summaryrefslogtreecommitdiffstats
path: root/gfx/skia
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /gfx/skia
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/skia')
-rw-r--r--gfx/skia/LICENSE27
-rw-r--r--gfx/skia/README3
-rw-r--r--gfx/skia/README_COMMITTING10
-rw-r--r--gfx/skia/README_MOZILLA12
-rwxr-xr-xgfx/skia/generate_mozbuild.py425
-rwxr-xr-xgfx/skia/moz.build501
-rw-r--r--gfx/skia/patches/README2
-rw-r--r--gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch66
-rw-r--r--gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch34
-rw-r--r--gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch26
-rw-r--r--gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch38
-rw-r--r--gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch30
-rw-r--r--gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch50
-rw-r--r--gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch39
-rw-r--r--gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch280
-rw-r--r--gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch684
-rw-r--r--gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch36
-rw-r--r--gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch472
-rw-r--r--gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch198
-rw-r--r--gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch147
-rw-r--r--gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch27
-rw-r--r--gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch702
-rw-r--r--gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch168
-rw-r--r--gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch173
-rw-r--r--gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch35
-rw-r--r--gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch28
-rw-r--r--gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch40
-rw-r--r--gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch23
-rw-r--r--gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch36
-rw-r--r--gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch698
-rw-r--r--gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch22
-rw-r--r--gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch28
-rw-r--r--gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch31
-rw-r--r--gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch29
-rw-r--r--gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch22
-rw-r--r--gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch26
-rw-r--r--gfx/skia/patches/archive/0013-Bug-761890-fonts.patch162
-rw-r--r--gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch29
-rw-r--r--gfx/skia/patches/archive/0015-Bug-766017-warnings.patch865
-rw-r--r--gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch400
-rw-r--r--gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch73
-rw-r--r--gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch14
-rw-r--r--gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch39
-rw-r--r--gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch39
-rw-r--r--gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch217
-rw-r--r--gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch26
-rw-r--r--gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch83
-rw-r--r--gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch94
-rw-r--r--gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch39
-rw-r--r--gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch148
-rw-r--r--gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch27
-rw-r--r--gfx/skia/patches/archive/SkPostConfig.patch32
-rw-r--r--gfx/skia/patches/archive/arm-fixes.patch191
-rw-r--r--gfx/skia/patches/archive/arm-opts.patch41
-rw-r--r--gfx/skia/patches/archive/fix-comma-end-enum-list.patch380
-rw-r--r--gfx/skia/patches/archive/fix-gradient-clamp.patch211
-rw-r--r--gfx/skia/patches/archive/getpostextpath.patch70
-rw-r--r--gfx/skia/patches/archive/mingw-fix.patch57
-rw-r--r--gfx/skia/patches/archive/new-aa.patch22
-rw-r--r--gfx/skia/patches/archive/old-android-fonthost.patch530
-rw-r--r--gfx/skia/patches/archive/radial-gradients.patch25
-rw-r--r--gfx/skia/patches/archive/skia_restrict_problem.patch461
-rw-r--r--gfx/skia/patches/archive/uninitialized-margin.patch22
-rw-r--r--gfx/skia/patches/archive/user-config.patch40
-rw-r--r--gfx/skia/skia/include/android/SkAndroidFrameworkUtils.h56
-rw-r--r--gfx/skia/skia/include/android/SkAnimatedImage.h167
-rw-r--r--gfx/skia/skia/include/android/SkBRDAllocator.h29
-rw-r--r--gfx/skia/skia/include/android/SkBitmapRegionDecoder.h92
-rw-r--r--gfx/skia/skia/include/atlastext/SkAtlasTextContext.h42
-rw-r--r--gfx/skia/skia/include/atlastext/SkAtlasTextFont.h38
-rw-r--r--gfx/skia/skia/include/atlastext/SkAtlasTextRenderer.h72
-rw-r--r--gfx/skia/skia/include/atlastext/SkAtlasTextTarget.h100
-rw-r--r--gfx/skia/skia/include/c/sk_canvas.h159
-rw-r--r--gfx/skia/skia/include/c/sk_colorspace.h25
-rw-r--r--gfx/skia/skia/include/c/sk_data.h70
-rw-r--r--gfx/skia/skia/include/c/sk_image.h71
-rw-r--r--gfx/skia/skia/include/c/sk_imageinfo.h62
-rw-r--r--gfx/skia/skia/include/c/sk_maskfilter.h47
-rw-r--r--gfx/skia/skia/include/c/sk_matrix.h49
-rw-r--r--gfx/skia/skia/include/c/sk_paint.h145
-rw-r--r--gfx/skia/skia/include/c/sk_path.h84
-rw-r--r--gfx/skia/skia/include/c/sk_picture.h70
-rw-r--r--gfx/skia/skia/include/c/sk_shader.h143
-rw-r--r--gfx/skia/skia/include/c/sk_surface.h73
-rw-r--r--gfx/skia/skia/include/c/sk_types.h256
-rw-r--r--gfx/skia/skia/include/codec/SkAndroidCodec.h287
-rw-r--r--gfx/skia/skia/include/codec/SkCodec.h954
-rw-r--r--gfx/skia/skia/include/codec/SkCodecAnimation.h43
-rw-r--r--gfx/skia/skia/include/codec/SkEncodedOrigin.h47
-rw-r--r--gfx/skia/skia/include/config/SkUserConfig.h131
-rw-r--r--gfx/skia/skia/include/core/SkAnnotation.h50
-rw-r--r--gfx/skia/skia/include/core/SkBBHFactory.h31
-rw-r--r--gfx/skia/skia/include/core/SkBitmap.h1166
-rw-r--r--gfx/skia/skia/include/core/SkBlendMode.h65
-rw-r--r--gfx/skia/skia/include/core/SkBlurTypes.h22
-rw-r--r--gfx/skia/skia/include/core/SkCanvas.h2789
-rw-r--r--gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h92
-rw-r--r--gfx/skia/skia/include/core/SkClipOp.h33
-rw-r--r--gfx/skia/skia/include/core/SkColor.h414
-rw-r--r--gfx/skia/skia/include/core/SkColorFilter.h167
-rw-r--r--gfx/skia/skia/include/core/SkColorPriv.h151
-rw-r--r--gfx/skia/skia/include/core/SkColorSpace.h249
-rw-r--r--gfx/skia/skia/include/core/SkContourMeasure.h148
-rw-r--r--gfx/skia/skia/include/core/SkCoverageMode.h30
-rw-r--r--gfx/skia/skia/include/core/SkCubicMap.h45
-rw-r--r--gfx/skia/skia/include/core/SkData.h182
-rw-r--r--gfx/skia/skia/include/core/SkDataTable.h119
-rw-r--r--gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h173
-rw-r--r--gfx/skia/skia/include/core/SkDeque.h141
-rw-r--r--gfx/skia/skia/include/core/SkDocument.h91
-rw-r--r--gfx/skia/skia/include/core/SkDrawLooper.h136
-rw-r--r--gfx/skia/skia/include/core/SkDrawable.h160
-rw-r--r--gfx/skia/skia/include/core/SkEncodedImageFormat.h34
-rw-r--r--gfx/skia/skia/include/core/SkExecutor.h34
-rw-r--r--gfx/skia/skia/include/core/SkFilterQuality.h26
-rw-r--r--gfx/skia/skia/include/core/SkFlattenable.h106
-rw-r--r--gfx/skia/skia/include/core/SkFont.h538
-rw-r--r--gfx/skia/skia/include/core/SkFontArguments.h79
-rw-r--r--gfx/skia/skia/include/core/SkFontLCDConfig.h58
-rw-r--r--gfx/skia/skia/include/core/SkFontMetrics.h110
-rw-r--r--gfx/skia/skia/include/core/SkFontMgr.h163
-rw-r--r--gfx/skia/skia/include/core/SkFontParameters.h38
-rw-r--r--gfx/skia/skia/include/core/SkFontStyle.h80
-rw-r--r--gfx/skia/skia/include/core/SkFontTypes.h25
-rw-r--r--gfx/skia/skia/include/core/SkGraphics.h179
-rw-r--r--gfx/skia/skia/include/core/SkICC.h19
-rw-r--r--gfx/skia/skia/include/core/SkImage.h1143
-rw-r--r--gfx/skia/skia/include/core/SkImageEncoder.h70
-rw-r--r--gfx/skia/skia/include/core/SkImageFilter.h164
-rw-r--r--gfx/skia/skia/include/core/SkImageGenerator.h207
-rw-r--r--gfx/skia/skia/include/core/SkImageInfo.h743
-rw-r--r--gfx/skia/skia/include/core/SkMallocPixelRef.h42
-rw-r--r--gfx/skia/skia/include/core/SkMaskFilter.h77
-rw-r--r--gfx/skia/skia/include/core/SkMath.h75
-rw-r--r--gfx/skia/skia/include/core/SkMatrix.h1863
-rw-r--r--gfx/skia/skia/include/core/SkMatrix44.h480
-rw-r--r--gfx/skia/skia/include/core/SkMilestone.h9
-rw-r--r--gfx/skia/skia/include/core/SkMultiPictureDraw.h75
-rw-r--r--gfx/skia/skia/include/core/SkOverdrawCanvas.h74
-rw-r--r--gfx/skia/skia/include/core/SkPaint.h668
-rw-r--r--gfx/skia/skia/include/core/SkPath.h1746
-rw-r--r--gfx/skia/skia/include/core/SkPathEffect.h187
-rw-r--r--gfx/skia/skia/include/core/SkPathMeasure.h88
-rw-r--r--gfx/skia/skia/include/core/SkPathTypes.h54
-rw-r--r--gfx/skia/skia/include/core/SkPicture.h258
-rw-r--r--gfx/skia/skia/include/core/SkPictureRecorder.h125
-rw-r--r--gfx/skia/skia/include/core/SkPixelRef.h129
-rw-r--r--gfx/skia/skia/include/core/SkPixmap.h713
-rw-r--r--gfx/skia/skia/include/core/SkPngChunkReader.h45
-rw-r--r--gfx/skia/skia/include/core/SkPoint.h552
-rw-r--r--gfx/skia/skia/include/core/SkPoint3.h157
-rw-r--r--gfx/skia/skia/include/core/SkPostConfig.h306
-rw-r--r--gfx/skia/skia/include/core/SkPreConfig.h200
-rw-r--r--gfx/skia/skia/include/core/SkPromiseImageTexture.h59
-rw-r--r--gfx/skia/skia/include/core/SkRRect.h519
-rw-r--r--gfx/skia/skia/include/core/SkRSXform.h69
-rw-r--r--gfx/skia/skia/include/core/SkRWBuffer.h111
-rw-r--r--gfx/skia/skia/include/core/SkRasterHandleAllocator.h87
-rw-r--r--gfx/skia/skia/include/core/SkRect.h1335
-rw-r--r--gfx/skia/skia/include/core/SkRefCnt.h370
-rw-r--r--gfx/skia/skia/include/core/SkRegion.h621
-rw-r--r--gfx/skia/skia/include/core/SkScalar.h208
-rw-r--r--gfx/skia/skia/include/core/SkSerialProcs.h73
-rw-r--r--gfx/skia/skia/include/core/SkShader.h154
-rw-r--r--gfx/skia/skia/include/core/SkSize.h92
-rw-r--r--gfx/skia/skia/include/core/SkStream.h515
-rw-r--r--gfx/skia/skia/include/core/SkString.h299
-rw-r--r--gfx/skia/skia/include/core/SkStrokeRec.h154
-rw-r--r--gfx/skia/skia/include/core/SkSurface.h1046
-rw-r--r--gfx/skia/skia/include/core/SkSurfaceCharacterization.h228
-rw-r--r--gfx/skia/skia/include/core/SkSurfaceProps.h89
-rw-r--r--gfx/skia/skia/include/core/SkSwizzle.h19
-rw-r--r--gfx/skia/skia/include/core/SkTextBlob.h418
-rw-r--r--gfx/skia/skia/include/core/SkTileMode.h41
-rw-r--r--gfx/skia/skia/include/core/SkTime.h62
-rw-r--r--gfx/skia/skia/include/core/SkTraceMemoryDump.h90
-rw-r--r--gfx/skia/skia/include/core/SkTypeface.h455
-rw-r--r--gfx/skia/skia/include/core/SkTypes.h226
-rw-r--r--gfx/skia/skia/include/core/SkUnPreMultiply.h56
-rw-r--r--gfx/skia/skia/include/core/SkVertices.h281
-rw-r--r--gfx/skia/skia/include/core/SkYUVAIndex.h81
-rw-r--r--gfx/skia/skia/include/core/SkYUVASizeInfo.h58
-rw-r--r--gfx/skia/skia/include/docs/SkPDFDocument.h202
-rw-r--r--gfx/skia/skia/include/docs/SkXPSDocument.h27
-rw-r--r--gfx/skia/skia/include/effects/Sk1DPathEffect.h77
-rw-r--r--gfx/skia/skia/include/effects/Sk2DPathEffect.h105
-rw-r--r--gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h36
-rw-r--r--gfx/skia/skia/include/effects/SkArithmeticImageFilter.h36
-rw-r--r--gfx/skia/skia/include/effects/SkBlurDrawLooper.h24
-rw-r--r--gfx/skia/skia/include/effects/SkBlurImageFilter.h46
-rw-r--r--gfx/skia/skia/include/effects/SkBlurMaskFilter.h35
-rw-r--r--gfx/skia/skia/include/effects/SkColorFilterImageFilter.h27
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrix.h66
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrixFilter.h25
-rw-r--r--gfx/skia/skia/include/effects/SkComposeImageFilter.h24
-rw-r--r--gfx/skia/skia/include/effects/SkCornerPathEffect.h43
-rw-r--r--gfx/skia/skia/include/effects/SkDashPathEffect.h39
-rw-r--r--gfx/skia/skia/include/effects/SkDiscretePathEffect.h53
-rw-r--r--gfx/skia/skia/include/effects/SkDisplacementMapEffect.h49
-rw-r--r--gfx/skia/skia/include/effects/SkDropShadowImageFilter.h38
-rw-r--r--gfx/skia/skia/include/effects/SkGradientShader.h259
-rw-r--r--gfx/skia/skia/include/effects/SkHighContrastFilter.h84
-rw-r--r--gfx/skia/skia/include/effects/SkImageFilters.h434
-rw-r--r--gfx/skia/skia/include/effects/SkImageSource.h29
-rw-r--r--gfx/skia/skia/include/effects/SkLayerDrawLooper.h150
-rw-r--r--gfx/skia/skia/include/effects/SkLightingImageFilter.h46
-rw-r--r--gfx/skia/skia/include/effects/SkLumaColorFilter.h57
-rw-r--r--gfx/skia/skia/include/effects/SkMagnifierImageFilter.h28
-rw-r--r--gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h88
-rw-r--r--gfx/skia/skia/include/effects/SkMergeImageFilter.h34
-rw-r--r--gfx/skia/skia/include/effects/SkMorphologyImageFilter.h40
-rw-r--r--gfx/skia/skia/include/effects/SkOffsetImageFilter.h26
-rw-r--r--gfx/skia/skia/include/effects/SkOpPathEffect.h38
-rw-r--r--gfx/skia/skia/include/effects/SkOverdrawColorFilter.h54
-rw-r--r--gfx/skia/skia/include/effects/SkPaintImageFilter.h36
-rw-r--r--gfx/skia/skia/include/effects/SkPerlinNoiseShader.h60
-rw-r--r--gfx/skia/skia/include/effects/SkPictureImageFilter.h35
-rw-r--r--gfx/skia/skia/include/effects/SkShaderMaskFilter.h24
-rw-r--r--gfx/skia/skia/include/effects/SkTableColorFilter.h42
-rw-r--r--gfx/skia/skia/include/effects/SkTableMaskFilter.h37
-rw-r--r--gfx/skia/skia/include/effects/SkTileImageFilter.h31
-rw-r--r--gfx/skia/skia/include/effects/SkTrimPathEffect.h41
-rw-r--r--gfx/skia/skia/include/effects/SkXfermodeImageFilter.h34
-rw-r--r--gfx/skia/skia/include/encode/SkEncoder.h42
-rw-r--r--gfx/skia/skia/include/encode/SkJpegEncoder.h97
-rw-r--r--gfx/skia/skia/include/encode/SkPngEncoder.h99
-rw-r--r--gfx/skia/skia/include/encode/SkWebpEncoder.h48
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h44
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSemaphore.h96
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSurface.h453
-rw-r--r--gfx/skia/skia/include/gpu/GrConfig.h164
-rw-r--r--gfx/skia/skia/include/gpu/GrContext.h545
-rw-r--r--gfx/skia/skia/include/gpu/GrContextOptions.h270
-rw-r--r--gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h102
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h51
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h43
-rw-r--r--gfx/skia/skia/include/gpu/GrGpuResource.h321
-rw-r--r--gfx/skia/skia/include/gpu/GrSurface.h157
-rw-r--r--gfx/skia/skia/include/gpu/GrTexture.h102
-rw-r--r--gfx/skia/skia/include/gpu/GrTypes.h349
-rw-r--r--gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h40
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h11
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h39
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig.h99
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h22
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLExtensions.h78
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLFunctions.h320
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLInterface.h340
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLTypes.h186
-rw-r--r--gfx/skia/skia/include/gpu/mock/GrMockTypes.h118
-rw-r--r--gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h39
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h76
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkExtensions.h63
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h90
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkTypes.h256
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkVulkan.h32
-rw-r--r--gfx/skia/skia/include/pathops/SkPathOps.h113
-rw-r--r--gfx/skia/skia/include/ports/SkCFObject.h138
-rw-r--r--gfx/skia/skia/include/ports/SkFontConfigInterface.h115
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h20
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_android.h45
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_directory.h21
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_empty.h21
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h22
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h19
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_indirect.h99
-rw-r--r--gfx/skia/skia/include/ports/SkImageGeneratorCG.h20
-rw-r--r--gfx/skia/skia/include/ports/SkImageGeneratorWIC.h35
-rw-r--r--gfx/skia/skia/include/ports/SkRemotableFontMgr.h139
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_cairo.h18
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_mac.h49
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_win.h93
-rw-r--r--gfx/skia/skia/include/private/GrContext_Base.h89
-rw-r--r--gfx/skia/skia/include/private/GrGLTypesPriv.h94
-rw-r--r--gfx/skia/skia/include/private/GrImageContext.h57
-rw-r--r--gfx/skia/skia/include/private/GrRecordingContext.h137
-rw-r--r--gfx/skia/skia/include/private/GrResourceKey.h357
-rw-r--r--gfx/skia/skia/include/private/GrSharedEnums.h37
-rw-r--r--gfx/skia/skia/include/private/GrSingleOwner.h55
-rw-r--r--gfx/skia/skia/include/private/GrTypesPriv.h1341
-rw-r--r--gfx/skia/skia/include/private/GrVkTypesPriv.h50
-rw-r--r--gfx/skia/skia/include/private/SkBitmaskEnum.h48
-rw-r--r--gfx/skia/skia/include/private/SkChecksum.h71
-rw-r--r--gfx/skia/skia/include/private/SkColorData.h444
-rw-r--r--gfx/skia/skia/include/private/SkDeferredDisplayList.h78
-rw-r--r--gfx/skia/skia/include/private/SkEncodedInfo.h247
-rw-r--r--gfx/skia/skia/include/private/SkFixed.h140
-rw-r--r--gfx/skia/skia/include/private/SkFloatBits.h91
-rw-r--r--gfx/skia/skia/include/private/SkFloatingPoint.h265
-rw-r--r--gfx/skia/skia/include/private/SkHalf.h85
-rw-r--r--gfx/skia/skia/include/private/SkImageInfoPriv.h135
-rw-r--r--gfx/skia/skia/include/private/SkMacros.h67
-rw-r--r--gfx/skia/skia/include/private/SkMalloc.h136
-rw-r--r--gfx/skia/skia/include/private/SkMutex.h52
-rw-r--r--gfx/skia/skia/include/private/SkNoncopyable.h30
-rw-r--r--gfx/skia/skia/include/private/SkNx.h439
-rw-r--r--gfx/skia/skia/include/private/SkNx_neon.h740
-rw-r--r--gfx/skia/skia/include/private/SkNx_sse.h894
-rw-r--r--gfx/skia/skia/include/private/SkOnce.h50
-rw-r--r--gfx/skia/skia/include/private/SkPathRef.h492
-rw-r--r--gfx/skia/skia/include/private/SkSafe32.h34
-rw-r--r--gfx/skia/skia/include/private/SkSafe_math.h52
-rw-r--r--gfx/skia/skia/include/private/SkSemaphore.h79
-rw-r--r--gfx/skia/skia/include/private/SkShadowFlags.h23
-rw-r--r--gfx/skia/skia/include/private/SkSpinlock.h57
-rw-r--r--gfx/skia/skia/include/private/SkTArray.h641
-rw-r--r--gfx/skia/skia/include/private/SkTDArray.h372
-rw-r--r--gfx/skia/skia/include/private/SkTFitsIn.h99
-rw-r--r--gfx/skia/skia/include/private/SkTHash.h362
-rw-r--r--gfx/skia/skia/include/private/SkTLogic.h108
-rw-r--r--gfx/skia/skia/include/private/SkTemplates.h455
-rw-r--r--gfx/skia/skia/include/private/SkThreadAnnotations.h80
-rw-r--r--gfx/skia/skia/include/private/SkThreadID.h19
-rw-r--r--gfx/skia/skia/include/private/SkTo.h28
-rw-r--r--gfx/skia/skia/include/private/SkVx.h527
-rw-r--r--gfx/skia/skia/include/private/SkWeakRefCnt.h170
-rw-r--r--gfx/skia/skia/include/svg/SkSVGCanvas.h36
-rw-r--r--gfx/skia/skia/include/third_party/skcms/LICENSE29
-rw-r--r--gfx/skia/skia/include/third_party/skcms/skcms.h332
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/LICENSE29
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vk_platform.h92
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan.h79
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_android.h126
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_core.h7576
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_ios.h58
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_macos.h58
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_win32.h276
-rw-r--r--gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h66
-rw-r--r--gfx/skia/skia/include/utils/Sk3D.h19
-rw-r--r--gfx/skia/skia/include/utils/SkAnimCodecPlayer.h60
-rw-r--r--gfx/skia/skia/include/utils/SkBase64.h39
-rw-r--r--gfx/skia/skia/include/utils/SkCamera.h154
-rw-r--r--gfx/skia/skia/include/utils/SkCanvasStateUtils.h78
-rw-r--r--gfx/skia/skia/include/utils/SkEventTracer.h74
-rw-r--r--gfx/skia/skia/include/utils/SkFrontBufferedStream.h39
-rw-r--r--gfx/skia/skia/include/utils/SkInterpolator.h138
-rw-r--r--gfx/skia/skia/include/utils/SkLua.h69
-rw-r--r--gfx/skia/skia/include/utils/SkLuaCanvas.h72
-rw-r--r--gfx/skia/skia/include/utils/SkNWayCanvas.h94
-rw-r--r--gfx/skia/skia/include/utils/SkNoDrawCanvas.h86
-rw-r--r--gfx/skia/skia/include/utils/SkNullCanvas.h18
-rw-r--r--gfx/skia/skia/include/utils/SkPaintFilterCanvas.h116
-rw-r--r--gfx/skia/skia/include/utils/SkParse.h32
-rw-r--r--gfx/skia/skia/include/utils/SkParsePath.h23
-rw-r--r--gfx/skia/skia/include/utils/SkRandom.h169
-rw-r--r--gfx/skia/skia/include/utils/SkShadowUtils.h56
-rw-r--r--gfx/skia/skia/include/utils/SkTextUtils.h38
-rw-r--r--gfx/skia/skia/include/utils/SkTraceEventPhase.h19
-rw-r--r--gfx/skia/skia/include/utils/mac/SkCGUtils.h86
-rw-r--r--gfx/skia/skia/src/android/SkAndroidFrameworkUtils.cpp82
-rw-r--r--gfx/skia/skia/src/android/SkAnimatedImage.cpp359
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp118
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionCodec.h49
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp47
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h61
-rw-r--r--gfx/skia/skia/src/atlastext/SkAtlasTextContext.cpp17
-rw-r--r--gfx/skia/skia/src/atlastext/SkAtlasTextTarget.cpp255
-rw-r--r--gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.cpp128
-rw-r--r--gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.h83
-rw-r--r--gfx/skia/skia/src/c/sk_c_from_to.h34
-rw-r--r--gfx/skia/skia/src/c/sk_effects.cpp186
-rw-r--r--gfx/skia/skia/src/c/sk_imageinfo.cpp143
-rw-r--r--gfx/skia/skia/src/c/sk_paint.cpp173
-rw-r--r--gfx/skia/skia/src/c/sk_surface.cpp443
-rw-r--r--gfx/skia/skia/src/c/sk_types_priv.h41
-rw-r--r--gfx/skia/skia/src/codec/SkAndroidCodec.cpp416
-rw-r--r--gfx/skia/skia/src/codec/SkAndroidCodecAdapter.cpp30
-rw-r--r--gfx/skia/skia/src/codec/SkAndroidCodecAdapter.h37
-rw-r--r--gfx/skia/skia/src/codec/SkBmpBaseCodec.cpp16
-rw-r--r--gfx/skia/skia/src/codec/SkBmpBaseCodec.h38
-rw-r--r--gfx/skia/skia/src/codec/SkBmpCodec.cpp650
-rw-r--r--gfx/skia/skia/src/codec/SkBmpCodec.h151
-rw-r--r--gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp105
-rw-r--r--gfx/skia/skia/src/codec/SkBmpMaskCodec.h62
-rw-r--r--gfx/skia/skia/src/codec/SkBmpRLECodec.cpp568
-rw-r--r--gfx/skia/skia/src/codec/SkBmpRLECodec.h120
-rw-r--r--gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp341
-rw-r--r--gfx/skia/skia/src/codec/SkBmpStandardCodec.h92
-rw-r--r--gfx/skia/skia/src/codec/SkCodec.cpp867
-rw-r--r--gfx/skia/skia/src/codec/SkCodecAnimationPriv.h32
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp99
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.h47
-rw-r--r--gfx/skia/skia/src/codec/SkCodecPriv.h251
-rw-r--r--gfx/skia/skia/src/codec/SkColorTable.cpp23
-rw-r--r--gfx/skia/skia/src/codec/SkColorTable.h50
-rw-r--r--gfx/skia/skia/src/codec/SkEncodedInfo.cpp28
-rw-r--r--gfx/skia/skia/src/codec/SkFrameHolder.h201
-rw-r--r--gfx/skia/skia/src/codec/SkGifCodec.cpp533
-rw-r--r--gfx/skia/skia/src/codec/SkGifCodec.h156
-rw-r--r--gfx/skia/skia/src/codec/SkHeifCodec.cpp472
-rw-r--r--gfx/skia/skia/src/codec/SkHeifCodec.h128
-rw-r--r--gfx/skia/skia/src/codec/SkIcoCodec.cpp384
-rw-r--r--gfx/skia/skia/src/codec/SkIcoCodec.h99
-rw-r--r--gfx/skia/skia/src/codec/SkJpegCodec.cpp974
-rw-r--r--gfx/skia/skia/src/codec/SkJpegCodec.h144
-rw-r--r--gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp100
-rw-r--r--gfx/skia/skia/src/codec/SkJpegDecoderMgr.h75
-rw-r--r--gfx/skia/skia/src/codec/SkJpegPriv.h53
-rw-r--r--gfx/skia/skia/src/codec/SkJpegUtility.cpp142
-rw-r--r--gfx/skia/skia/src/codec/SkJpegUtility.h44
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.cpp568
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.h72
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.cpp162
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.h86
-rw-r--r--gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp61
-rw-r--r--gfx/skia/skia/src/codec/SkParseEncodedOrigin.h19
-rw-r--r--gfx/skia/skia/src/codec/SkPngCodec.cpp1194
-rw-r--r--gfx/skia/skia/src/codec/SkPngCodec.h121
-rw-r--r--gfx/skia/skia/src/codec/SkPngPriv.h19
-rw-r--r--gfx/skia/skia/src/codec/SkRawCodec.cpp797
-rw-r--r--gfx/skia/skia/src/codec/SkRawCodec.h65
-rw-r--r--gfx/skia/skia/src/codec/SkSampledCodec.cpp352
-rw-r--r--gfx/skia/skia/src/codec/SkSampledCodec.h59
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.cpp64
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.h84
-rw-r--r--gfx/skia/skia/src/codec/SkScalingCodec.h39
-rw-r--r--gfx/skia/skia/src/codec/SkStreamBuffer.cpp88
-rw-r--r--gfx/skia/skia/src/codec/SkStreamBuffer.h116
-rw-r--r--gfx/skia/skia/src/codec/SkStubHeifDecoderAPI.h76
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.cpp1237
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.h222
-rw-r--r--gfx/skia/skia/src/codec/SkWbmpCodec.cpp193
-rw-r--r--gfx/skia/skia/src/codec/SkWbmpCodec.h62
-rw-r--r--gfx/skia/skia/src/codec/SkWebpCodec.cpp567
-rw-r--r--gfx/skia/skia/src/codec/SkWebpCodec.h104
-rw-r--r--gfx/skia/skia/src/codec/SkWuffsCodec.cpp870
-rw-r--r--gfx/skia/skia/src/codec/SkWuffsCodec.h17
-rw-r--r--gfx/skia/skia/src/core/Sk4px.h251
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.cpp2162
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.h137
-rw-r--r--gfx/skia/skia/src/core/SkATrace.cpp83
-rw-r--r--gfx/skia/skia/src/core/SkATrace.h53
-rw-r--r--gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h73
-rw-r--r--gfx/skia/skia/src/core/SkAlphaRuns.cpp78
-rw-r--r--gfx/skia/skia/src/core/SkAnalyticEdge.cpp512
-rw-r--r--gfx/skia/skia/src/core/SkAnalyticEdge.h204
-rw-r--r--gfx/skia/skia/src/core/SkAnnotation.cpp48
-rw-r--r--gfx/skia/skia/src/core/SkAnnotationKeys.h33
-rw-r--r--gfx/skia/skia/src/core/SkAntiRun.h196
-rw-r--r--gfx/skia/skia/src/core/SkArenaAlloc.cpp166
-rw-r--r--gfx/skia/skia/src/core/SkArenaAlloc.h240
-rw-r--r--gfx/skia/skia/src/core/SkArenaAllocList.h80
-rw-r--r--gfx/skia/skia/src/core/SkAutoBlitterChoose.h56
-rw-r--r--gfx/skia/skia/src/core/SkAutoMalloc.h177
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp66
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.h81
-rw-r--r--gfx/skia/skia/src/core/SkBBHFactory.cpp15
-rw-r--r--gfx/skia/skia/src/core/SkBBoxHierarchy.h43
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.cpp71
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.h71
-rw-r--r--gfx/skia/skia/src/core/SkBitmap.cpp630
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.cpp300
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.h67
-rw-r--r--gfx/skia/skia/src/core/SkBitmapController.cpp106
-rw-r--r--gfx/skia/skia/src/core/SkBitmapController.h53
-rw-r--r--gfx/skia/skia/src/core/SkBitmapDevice.cpp820
-rw-r--r--gfx/skia/skia/src/core/SkBitmapDevice.h191
-rw-r--r--gfx/skia/skia/src/core/SkBitmapFilter.h209
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.cpp660
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.h216
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp830
-rw-r--r--gfx/skia/skia/src/core/SkBitmapScaler.cpp254
-rw-r--r--gfx/skia/skia/src/core/SkBitmapScaler.h46
-rw-r--r--gfx/skia/skia/src/core/SkBlendMode.cpp168
-rw-r--r--gfx/skia/skia/src/core/SkBlendModePriv.h50
-rw-r--r--gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h127
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow.h38
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow_D32.cpp314
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.cpp884
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.h321
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_A8.cpp94
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp1420
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_RGB565.cpp139
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_Sprite.cpp215
-rw-r--r--gfx/skia/skia/src/core/SkBlurMF.cpp938
-rw-r--r--gfx/skia/skia/src/core/SkBlurMask.cpp658
-rw-r--r--gfx/skia/skia/src/core/SkBlurMask.h87
-rw-r--r--gfx/skia/skia/src/core/SkBlurPriv.h38
-rw-r--r--gfx/skia/skia/src/core/SkBuffer.cpp87
-rw-r--r--gfx/skia/skia/src/core/SkBuffer.h132
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.cpp177
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.h113
-rw-r--r--gfx/skia/skia/src/core/SkCanvas.cpp3155
-rw-r--r--gfx/skia/skia/src/core/SkCanvasPriv.cpp99
-rw-r--r--gfx/skia/skia/src/core/SkCanvasPriv.h58
-rw-r--r--gfx/skia/skia/src/core/SkClipOpPriv.h21
-rw-r--r--gfx/skia/skia/src/core/SkClipStack.cpp1100
-rw-r--r--gfx/skia/skia/src/core/SkClipStack.h522
-rw-r--r--gfx/skia/skia/src/core/SkClipStackDevice.cpp108
-rw-r--r--gfx/skia/skia/src/core/SkClipStackDevice.h48
-rw-r--r--gfx/skia/skia/src/core/SkColor.cpp165
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter.cpp501
-rw-r--r--gfx/skia/skia/src/core/SkColorFilterPriv.h53
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp135
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter_Matrix.h45
-rw-r--r--gfx/skia/skia/src/core/SkColorSpace.cpp419
-rw-r--r--gfx/skia/skia/src/core/SkColorSpacePriv.h108
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp165
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXformSteps.h62
-rw-r--r--gfx/skia/skia/src/core/SkContourMeasure.cpp651
-rw-r--r--gfx/skia/skia/src/core/SkConvertPixels.cpp234
-rw-r--r--gfx/skia/skia/src/core/SkConvertPixels.h35
-rw-r--r--gfx/skia/skia/src/core/SkConvolver.cpp272
-rw-r--r--gfx/skia/skia/src/core/SkConvolver.h173
-rw-r--r--gfx/skia/skia/src/core/SkCoreBlitters.h179
-rw-r--r--gfx/skia/skia/src/core/SkCoverageModePriv.h43
-rw-r--r--gfx/skia/skia/src/core/SkCpu.cpp160
-rw-r--r--gfx/skia/skia/src/core/SkCpu.h114
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.cpp156
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.h34
-rw-r--r--gfx/skia/skia/src/core/SkCubicMap.cpp109
-rw-r--r--gfx/skia/skia/src/core/SkCubicSolver.h71
-rw-r--r--gfx/skia/skia/src/core/SkData.cpp198
-rw-r--r--gfx/skia/skia/src/core/SkDataTable.cpp131
-rw-r--r--gfx/skia/skia/src/core/SkDebug.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayList.cpp26
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h56
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp302
-rw-r--r--gfx/skia/skia/src/core/SkDeque.cpp307
-rw-r--r--gfx/skia/skia/src/core/SkDescriptor.cpp138
-rw-r--r--gfx/skia/skia/src/core/SkDescriptor.h105
-rw-r--r--gfx/skia/skia/src/core/SkDevice.cpp475
-rw-r--r--gfx/skia/skia/src/core/SkDevice.h452
-rw-r--r--gfx/skia/skia/src/core/SkDiscardableMemory.h65
-rw-r--r--gfx/skia/skia/src/core/SkDistanceFieldGen.cpp562
-rw-r--r--gfx/skia/skia/src/core/SkDistanceFieldGen.h75
-rw-r--r--gfx/skia/skia/src/core/SkDocument.cpp78
-rw-r--r--gfx/skia/skia/src/core/SkDraw.cpp1307
-rw-r--r--gfx/skia/skia/src/core/SkDraw.h170
-rw-r--r--gfx/skia/skia/src/core/SkDrawLooper.cpp108
-rw-r--r--gfx/skia/skia/src/core/SkDrawProcs.h42
-rw-r--r--gfx/skia/skia/src/core/SkDrawShadowInfo.cpp192
-rw-r--r--gfx/skia/skia/src/core/SkDrawShadowInfo.h77
-rw-r--r--gfx/skia/skia/src/core/SkDraw_atlas.cpp120
-rw-r--r--gfx/skia/skia/src/core/SkDraw_text.cpp139
-rw-r--r--gfx/skia/skia/src/core/SkDraw_vertices.cpp376
-rw-r--r--gfx/skia/skia/src/core/SkDrawable.cpp83
-rw-r--r--gfx/skia/skia/src/core/SkEdge.cpp503
-rw-r--r--gfx/skia/skia/src/core/SkEdge.h138
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.cpp405
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.h89
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.cpp557
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.h57
-rw-r--r--gfx/skia/skia/src/core/SkEffectPriv.h30
-rw-r--r--gfx/skia/skia/src/core/SkEndian.h194
-rw-r--r--gfx/skia/skia/src/core/SkEnumerate.h76
-rw-r--r--gfx/skia/skia/src/core/SkExchange.h25
-rw-r--r--gfx/skia/skia/src/core/SkExecutor.cpp151
-rw-r--r--gfx/skia/skia/src/core/SkFDot6.h78
-rw-r--r--gfx/skia/skia/src/core/SkFixed15.h77
-rw-r--r--gfx/skia/skia/src/core/SkFlattenable.cpp145
-rw-r--r--gfx/skia/skia/src/core/SkFont.cpp543
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.cpp148
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.h84
-rw-r--r--gfx/skia/skia/src/core/SkFontLCDConfig.cpp27
-rw-r--r--gfx/skia/skia/src/core/SkFontMgr.cpp308
-rw-r--r--gfx/skia/skia/src/core/SkFontMgrPriv.h14
-rw-r--r--gfx/skia/skia/src/core/SkFontPriv.h104
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.cpp211
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.h49
-rw-r--r--gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp20
-rw-r--r--gfx/skia/skia/src/core/SkFuzzLogging.h23
-rw-r--r--gfx/skia/skia/src/core/SkGaussFilter.cpp109
-rw-r--r--gfx/skia/skia/src/core/SkGaussFilter.h34
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.cpp1491
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.h453
-rw-r--r--gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp18
-rw-r--r--gfx/skia/skia/src/core/SkGlyph.cpp353
-rw-r--r--gfx/skia/skia/src/core/SkGlyph.h320
-rw-r--r--gfx/skia/skia/src/core/SkGlyphBuffer.cpp83
-rw-r--r--gfx/skia/skia/src/core/SkGlyphBuffer.h142
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRun.cpp378
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRun.h175
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRunPainter.cpp932
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRunPainter.h202
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.cpp556
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.h62
-rw-r--r--gfx/skia/skia/src/core/SkGraphics.cpp135
-rw-r--r--gfx/skia/skia/src/core/SkHalf.cpp97
-rw-r--r--gfx/skia/skia/src/core/SkICC.cpp350
-rw-r--r--gfx/skia/skia/src/core/SkICCPriv.h54
-rw-r--r--gfx/skia/skia/src/core/SkIPoint16.h57
-rw-r--r--gfx/skia/skia/src/core/SkImageFilter.cpp713
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.cpp170
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.h75
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterTypes.cpp97
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterTypes.h702
-rw-r--r--gfx/skia/skia/src/core/SkImageFilter_Base.h442
-rw-r--r--gfx/skia/skia/src/core/SkImageGenerator.cpp110
-rw-r--r--gfx/skia/skia/src/core/SkImageInfo.cpp194
-rw-r--r--gfx/skia/skia/src/core/SkImagePriv.h94
-rw-r--r--gfx/skia/skia/src/core/SkLRUCache.h115
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.cpp302
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.h77
-rw-r--r--gfx/skia/skia/src/core/SkLeanWindows.h34
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.cpp276
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.h45
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp55
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h40
-rw-r--r--gfx/skia/skia/src/core/SkMD5.cpp260
-rw-r--r--gfx/skia/skia/src/core/SkMD5.h42
-rw-r--r--gfx/skia/skia/src/core/SkMSAN.h42
-rw-r--r--gfx/skia/skia/src/core/SkMakeUnique.h28
-rw-r--r--gfx/skia/skia/src/core/SkMallocPixelRef.cpp94
-rw-r--r--gfx/skia/skia/src/core/SkMask.cpp114
-rw-r--r--gfx/skia/skia/src/core/SkMask.h243
-rw-r--r--gfx/skia/skia/src/core/SkMaskBlurFilter.cpp1051
-rw-r--r--gfx/skia/skia/src/core/SkMaskBlurFilter.h37
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.cpp185
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.h44
-rw-r--r--gfx/skia/skia/src/core/SkMaskFilter.cpp720
-rw-r--r--gfx/skia/skia/src/core/SkMaskFilterBase.h245
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.cpp125
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.h232
-rw-r--r--gfx/skia/skia/src/core/SkMath.cpp93
-rw-r--r--gfx/skia/skia/src/core/SkMathPriv.h251
-rw-r--r--gfx/skia/skia/src/core/SkMatrix.cpp1833
-rw-r--r--gfx/skia/skia/src/core/SkMatrix44.cpp1032
-rw-r--r--gfx/skia/skia/src/core/SkMatrixImageFilter.cpp131
-rw-r--r--gfx/skia/skia/src/core/SkMatrixImageFilter.h53
-rw-r--r--gfx/skia/skia/src/core/SkMatrixPriv.h155
-rw-r--r--gfx/skia/skia/src/core/SkMatrixUtils.h38
-rw-r--r--gfx/skia/skia/src/core/SkMessageBus.h125
-rw-r--r--gfx/skia/skia/src/core/SkMiniRecorder.cpp137
-rw-r--r--gfx/skia/skia/src/core/SkMiniRecorder.h58
-rw-r--r--gfx/skia/skia/src/core/SkMipMap.cpp778
-rw-r--r--gfx/skia/skia/src/core/SkMipMap.h82
-rw-r--r--gfx/skia/skia/src/core/SkModeColorFilter.cpp141
-rw-r--r--gfx/skia/skia/src/core/SkModeColorFilter.h46
-rw-r--r--gfx/skia/skia/src/core/SkMultiPictureDraw.cpp109
-rw-r--r--gfx/skia/skia/src/core/SkNextID.h21
-rw-r--r--gfx/skia/skia/src/core/SkNormalFlatSource.cpp100
-rw-r--r--gfx/skia/skia/src/core/SkNormalFlatSource.h47
-rw-r--r--gfx/skia/skia/src/core/SkNormalMapSource.cpp257
-rw-r--r--gfx/skia/skia/src/core/SkNormalMapSource.h57
-rw-r--r--gfx/skia/skia/src/core/SkNormalSource.cpp19
-rw-r--r--gfx/skia/skia/src/core/SkNormalSource.h82
-rw-r--r--gfx/skia/skia/src/core/SkOSFile.h99
-rw-r--r--gfx/skia/skia/src/core/SkOpts.cpp150
-rw-r--r--gfx/skia/skia/src/core/SkOpts.h90
-rw-r--r--gfx/skia/skia/src/core/SkOrderedReadBuffer.h9
-rw-r--r--gfx/skia/skia/src/core/SkOverdrawCanvas.cpp274
-rw-r--r--gfx/skia/skia/src/core/SkPaint.cpp552
-rw-r--r--gfx/skia/skia/src/core/SkPaintDefaults.h31
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.cpp86
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.h74
-rw-r--r--gfx/skia/skia/src/core/SkPath.cpp3739
-rw-r--r--gfx/skia/skia/src/core/SkPathEffect.cpp195
-rw-r--r--gfx/skia/skia/src/core/SkPathMakers.h88
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasure.cpp53
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasurePriv.h29
-rw-r--r--gfx/skia/skia/src/core/SkPathPriv.h400
-rw-r--r--gfx/skia/skia/src/core/SkPathRef.cpp702
-rw-r--r--gfx/skia/skia/src/core/SkPath_serial.cpp292
-rw-r--r--gfx/skia/skia/src/core/SkPicture.cpp333
-rw-r--r--gfx/skia/skia/src/core/SkPictureCommon.h92
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.cpp536
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.h170
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.cpp20
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.h208
-rw-r--r--gfx/skia/skia/src/core/SkPictureImageGenerator.cpp126
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.cpp662
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.h65
-rw-r--r--gfx/skia/skia/src/core/SkPicturePriv.h106
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.cpp932
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.h271
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecorder.cpp134
-rw-r--r--gfx/skia/skia/src/core/SkPixelRef.cpp152
-rw-r--r--gfx/skia/skia/src/core/SkPixelRefPriv.h20
-rw-r--r--gfx/skia/skia/src/core/SkPixmap.cpp594
-rw-r--r--gfx/skia/skia/src/core/SkPixmapPriv.h65
-rw-r--r--gfx/skia/skia/src/core/SkPoint.cpp164
-rw-r--r--gfx/skia/skia/src/core/SkPoint3.cpp76
-rw-r--r--gfx/skia/skia/src/core/SkPointPriv.h127
-rw-r--r--gfx/skia/skia/src/core/SkPromiseImageTexture.cpp48
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.cpp73
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.h171
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.cpp117
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.h69
-rw-r--r--gfx/skia/skia/src/core/SkRRect.cpp688
-rw-r--r--gfx/skia/skia/src/core/SkRRectPriv.h45
-rw-r--r--gfx/skia/skia/src/core/SkRTree.cpp178
-rw-r--r--gfx/skia/skia/src/core/SkRTree.h89
-rw-r--r--gfx/skia/skia/src/core/SkRWBuffer.cpp364
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.cpp507
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.h249
-rw-r--r--gfx/skia/skia/src/core/SkRasterClipStack.h174
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.cpp358
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.h298
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp472
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.cpp465
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.h321
-rw-r--r--gfx/skia/skia/src/core/SkReader32.h169
-rw-r--r--gfx/skia/skia/src/core/SkRecord.cpp37
-rw-r--r--gfx/skia/skia/src/core/SkRecord.h194
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.cpp529
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.h82
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.cpp320
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.h32
-rw-r--r--gfx/skia/skia/src/core/SkRecordPattern.h176
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.cpp90
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.h42
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.cpp410
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.h149
-rw-r--r--gfx/skia/skia/src/core/SkRecords.cpp24
-rw-r--r--gfx/skia/skia/src/core/SkRecords.h346
-rw-r--r--gfx/skia/skia/src/core/SkRect.cpp168
-rw-r--r--gfx/skia/skia/src/core/SkRectPriv.h64
-rw-r--r--gfx/skia/skia/src/core/SkRegion.cpp1582
-rw-r--r--gfx/skia/skia/src/core/SkRegionPriv.h259
-rw-r--r--gfx/skia/skia/src/core/SkRegion_path.cpp549
-rw-r--r--gfx/skia/skia/src/core/SkRemoteGlyphCache.cpp891
-rw-r--r--gfx/skia/skia/src/core/SkRemoteGlyphCache.h224
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.cpp605
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.h292
-rw-r--r--gfx/skia/skia/src/core/SkSafeMath.h108
-rw-r--r--gfx/skia/skia/src/core/SkSafeRange.h49
-rw-r--r--gfx/skia/skia/src/core/SkScalar.cpp35
-rw-r--r--gfx/skia/skia/src/core/SkScaleToSides.h71
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.cpp1205
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.h447
-rw-r--r--gfx/skia/skia/src/core/SkScan.cpp111
-rw-r--r--gfx/skia/skia/src/core/SkScan.h136
-rw-r--r--gfx/skia/skia/src/core/SkScanPriv.h100
-rw-r--r--gfx/skia/skia/src/core/SkScan_AAAPath.cpp2011
-rw-r--r--gfx/skia/skia/src/core/SkScan_AntiPath.cpp838
-rw-r--r--gfx/skia/skia/src/core/SkScan_Antihair.cpp1009
-rw-r--r--gfx/skia/skia/src/core/SkScan_Hairline.cpp732
-rw-r--r--gfx/skia/skia/src/core/SkScan_Path.cpp780
-rw-r--r--gfx/skia/skia/src/core/SkScopeExit.h59
-rw-r--r--gfx/skia/skia/src/core/SkSemaphore.cpp80
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.cpp366
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.h106
-rw-r--r--gfx/skia/skia/src/core/SkSpan.h56
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.cpp560
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.h171
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.cpp186
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.h99
-rw-r--r--gfx/skia/skia/src/core/SkSpinlock.cpp47
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter.h49
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp118
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter_RGB565.cpp180
-rw-r--r--gfx/skia/skia/src/core/SkStream.cpp948
-rw-r--r--gfx/skia/skia/src/core/SkStreamPriv.h34
-rw-r--r--gfx/skia/skia/src/core/SkStrike.cpp291
-rw-r--r--gfx/skia/skia/src/core/SkStrike.h205
-rw-r--r--gfx/skia/skia/src/core/SkStrikeCache.cpp581
-rw-r--r--gfx/skia/skia/src/core/SkStrikeCache.h168
-rw-r--r--gfx/skia/skia/src/core/SkStrikeForGPU.cpp27
-rw-r--r--gfx/skia/skia/src/core/SkStrikeForGPU.h83
-rw-r--r--gfx/skia/skia/src/core/SkStrikeSpec.cpp259
-rw-r--r--gfx/skia/skia/src/core/SkStrikeSpec.h131
-rw-r--r--gfx/skia/skia/src/core/SkString.cpp604
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.cpp81
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.h35
-rw-r--r--gfx/skia/skia/src/core/SkStroke.cpp1579
-rw-r--r--gfx/skia/skia/src/core/SkStroke.h79
-rw-r--r--gfx/skia/skia/src/core/SkStrokeRec.cpp170
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.cpp235
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.h43
-rw-r--r--gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp114
-rw-r--r--gfx/skia/skia/src/core/SkSurfacePriv.h27
-rw-r--r--gfx/skia/skia/src/core/SkSwizzle.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkTDPQueue.h219
-rw-r--r--gfx/skia/skia/src/core/SkTDynamicHash.h298
-rw-r--r--gfx/skia/skia/src/core/SkTInternalLList.h302
-rw-r--r--gfx/skia/skia/src/core/SkTLList.h354
-rw-r--r--gfx/skia/skia/src/core/SkTLS.cpp105
-rw-r--r--gfx/skia/skia/src/core/SkTLS.h83
-rw-r--r--gfx/skia/skia/src/core/SkTLazy.h202
-rw-r--r--gfx/skia/skia/src/core/SkTMultiMap.h210
-rw-r--r--gfx/skia/skia/src/core/SkTSearch.cpp116
-rw-r--r--gfx/skia/skia/src/core/SkTSearch.h146
-rw-r--r--gfx/skia/skia/src/core/SkTSort.h216
-rw-r--r--gfx/skia/skia/src/core/SkTTopoSort.h113
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.cpp51
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.h48
-rw-r--r--gfx/skia/skia/src/core/SkTextBlob.cpp947
-rw-r--r--gfx/skia/skia/src/core/SkTextBlobPriv.h268
-rw-r--r--gfx/skia/skia/src/core/SkTextFormatParams.h38
-rw-r--r--gfx/skia/skia/src/core/SkThreadID.cpp16
-rw-r--r--gfx/skia/skia/src/core/SkTime.cpp87
-rw-r--r--gfx/skia/skia/src/core/SkTraceEvent.h366
-rw-r--r--gfx/skia/skia/src/core/SkTraceEventCommon.h291
-rw-r--r--gfx/skia/skia/src/core/SkTypeface.cpp420
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.cpp111
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.h74
-rw-r--r--gfx/skia/skia/src/core/SkTypefacePriv.h38
-rw-r--r--gfx/skia/skia/src/core/SkTypeface_remote.cpp112
-rw-r--r--gfx/skia/skia/src/core/SkTypeface_remote.h141
-rw-r--r--gfx/skia/skia/src/core/SkUnPreMultiply.cpp79
-rw-r--r--gfx/skia/skia/src/core/SkUtils.cpp35
-rw-r--r--gfx/skia/skia/src/core/SkUtils.h98
-rw-r--r--gfx/skia/skia/src/core/SkUtilsArm.cpp8
-rw-r--r--gfx/skia/skia/src/core/SkVM.cpp2221
-rw-r--r--gfx/skia/skia/src/core/SkVM.h521
-rw-r--r--gfx/skia/skia/src/core/SkVMBlitter.cpp446
-rw-r--r--gfx/skia/skia/src/core/SkValidatingReadBuffer.h23
-rw-r--r--gfx/skia/skia/src/core/SkValidationUtils.h36
-rw-r--r--gfx/skia/skia/src/core/SkVertState.cpp106
-rw-r--r--gfx/skia/skia/src/core/SkVertState.h58
-rw-r--r--gfx/skia/skia/src/core/SkVertices.cpp431
-rw-r--r--gfx/skia/skia/src/core/SkVptr.h24
-rw-r--r--gfx/skia/skia/src/core/SkWriteBuffer.cpp260
-rw-r--r--gfx/skia/skia/src/core/SkWriteBuffer.h139
-rw-r--r--gfx/skia/skia/src/core/SkWritePixelsRec.h50
-rw-r--r--gfx/skia/skia/src/core/SkWriter32.cpp90
-rw-r--r--gfx/skia/skia/src/core/SkWriter32.h282
-rw-r--r--gfx/skia/skia/src/core/SkXfermode.cpp153
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp44
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.h30
-rw-r--r--gfx/skia/skia/src/core/SkXfermodePriv.h62
-rw-r--r--gfx/skia/skia/src/core/SkYUVASizeInfo.cpp37
-rw-r--r--gfx/skia/skia/src/core/SkYUVMath.cpp201
-rw-r--r--gfx/skia/skia/src/core/SkYUVMath.h19
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.cpp89
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.h47
-rw-r--r--gfx/skia/skia/src/core/SkZip.h187
-rw-r--r--gfx/skia/skia/src/effects/Sk1DPathEffect.cpp214
-rw-r--r--gfx/skia/skia/src/effects/Sk2DPathEffect.cpp135
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrix.cpp171
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp36
-rw-r--r--gfx/skia/skia/src/effects/SkCornerPathEffect.cpp156
-rw-r--r--gfx/skia/skia/src/effects/SkDashImpl.h42
-rw-r--r--gfx/skia/skia/src/effects/SkDashPathEffect.cpp398
-rw-r--r--gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp145
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.cpp114
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.h19
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp142
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMaskFilter.h48
-rw-r--r--gfx/skia/skia/src/effects/SkHighContrastFilter.cpp361
-rw-r--r--gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp328
-rw-r--r--gfx/skia/skia/src/effects/SkLumaColorFilter.cpp45
-rw-r--r--gfx/skia/skia/src/effects/SkOpPE.h69
-rw-r--r--gfx/skia/skia/src/effects/SkOpPathEffect.cpp124
-rw-r--r--gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp97
-rw-r--r--gfx/skia/skia/src/effects/SkPackBits.cpp112
-rw-r--r--gfx/skia/skia/src/effects/SkPackBits.h45
-rw-r--r--gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp131
-rw-r--r--gfx/skia/skia/src/effects/SkTableColorFilter.cpp432
-rw-r--r--gfx/skia/skia/src/effects/SkTableMaskFilter.cpp168
-rw-r--r--gfx/skia/skia/src/effects/SkTrimPE.h33
-rw-r--r--gfx/skia/skia/src/effects/SkTrimPathEffect.cpp119
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdFilter.cpp266
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp479
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp706
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp157
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp122
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapEffect.cpp675
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp183
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkImageFilters.cpp260
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkImageSource.cpp169
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp2120
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp222
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp574
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp126
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp853
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkOffsetImageFilter.cpp145
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkPaintImageFilter.cpp106
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp146
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp190
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkXfermodeImageFilter.cpp335
-rw-r--r--gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp187
-rw-r--r--gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp23
-rw-r--r--gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.cpp230
-rw-r--r--gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.h65
-rw-r--r--gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.cpp556
-rw-r--r--gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.h69
-rw-r--r--gfx/skia/skia/src/gpu/GrAllocator.h439
-rw-r--r--gfx/skia/skia/src/gpu/GrAppliedClip.h141
-rw-r--r--gfx/skia/skia/src/gpu/GrAuditTrail.cpp200
-rw-r--r--gfx/skia/skia/src/gpu/GrAuditTrail.h181
-rw-r--r--gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h98
-rw-r--r--gfx/skia/skia/src/gpu/GrBackendSurface.cpp924
-rw-r--r--gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.cpp224
-rw-r--r--gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.h87
-rw-r--r--gfx/skia/skia/src/gpu/GrBaseContextPriv.h54
-rw-r--r--gfx/skia/skia/src/gpu/GrBitmapTextureMaker.cpp110
-rw-r--r--gfx/skia/skia/src/gpu/GrBitmapTextureMaker.h36
-rw-r--r--gfx/skia/skia/src/gpu/GrBlend.h151
-rw-r--r--gfx/skia/skia/src/gpu/GrBlurUtils.cpp478
-rw-r--r--gfx/skia/skia/src/gpu/GrBlurUtils.h56
-rw-r--r--gfx/skia/skia/src/gpu/GrBuffer.h36
-rw-r--r--gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp534
-rw-r--r--gfx/skia/skia/src/gpu/GrBufferAllocPool.h317
-rw-r--r--gfx/skia/skia/src/gpu/GrCaps.cpp393
-rw-r--r--gfx/skia/skia/src/gpu/GrCaps.h553
-rw-r--r--gfx/skia/skia/src/gpu/GrClientMappedBufferManager.cpp71
-rw-r--r--gfx/skia/skia/src/gpu/GrClientMappedBufferManager.h72
-rw-r--r--gfx/skia/skia/src/gpu/GrClip.h174
-rw-r--r--gfx/skia/skia/src/gpu/GrClipStackClip.cpp544
-rw-r--r--gfx/skia/skia/src/gpu/GrClipStackClip.h68
-rw-r--r--gfx/skia/skia/src/gpu/GrColor.h113
-rw-r--r--gfx/skia/skia/src/gpu/GrColorInfo.cpp37
-rw-r--r--gfx/skia/skia/src/gpu/GrColorInfo.h51
-rw-r--r--gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp186
-rw-r--r--gfx/skia/skia/src/gpu/GrColorSpaceXform.h103
-rw-r--r--gfx/skia/skia/src/gpu/GrContext.cpp582
-rw-r--r--gfx/skia/skia/src/gpu/GrContextPriv.cpp340
-rw-r--r--gfx/skia/skia/src/gpu/GrContextPriv.h288
-rw-r--r--gfx/skia/skia/src/gpu/GrContextThreadSafeProxy.cpp124
-rw-r--r--gfx/skia/skia/src/gpu/GrContextThreadSafeProxyPriv.h63
-rw-r--r--gfx/skia/skia/src/gpu/GrContext_Base.cpp71
-rw-r--r--gfx/skia/skia/src/gpu/GrCoordTransform.h140
-rw-r--r--gfx/skia/skia/src/gpu/GrCopyRenderTask.cpp84
-rw-r--r--gfx/skia/skia/src/gpu/GrCopyRenderTask.h53
-rw-r--r--gfx/skia/skia/src/gpu/GrCpuBuffer.h45
-rw-r--r--gfx/skia/skia/src/gpu/GrDDLContext.cpp80
-rw-r--r--gfx/skia/skia/src/gpu/GrDataUtils.cpp616
-rw-r--r--gfx/skia/skia/src/gpu/GrDataUtils.h40
-rw-r--r--gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp363
-rw-r--r--gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h97
-rw-r--r--gfx/skia/skia/src/gpu/GrDeferredProxyUploader.h121
-rw-r--r--gfx/skia/skia/src/gpu/GrDeferredUpload.h147
-rw-r--r--gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.cpp870
-rw-r--r--gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawOpAtlas.cpp664
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawOpAtlas.h450
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawOpTest.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawOpTest.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawingManager.cpp939
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawingManager.h246
-rw-r--r--gfx/skia/skia/src/gpu/GrDriverBugWorkarounds.cpp41
-rw-r--r--gfx/skia/skia/src/gpu/GrFPArgs.h97
-rw-r--r--gfx/skia/skia/src/gpu/GrFixedClip.cpp72
-rw-r--r--gfx/skia/skia/src/gpu/GrFixedClip.h57
-rw-r--r--gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp431
-rw-r--r--gfx/skia/skia/src/gpu/GrFragmentProcessor.h475
-rw-r--r--gfx/skia/skia/src/gpu/GrGeometryProcessor.h45
-rw-r--r--gfx/skia/skia/src/gpu/GrGlyph.h103
-rw-r--r--gfx/skia/skia/src/gpu/GrGpu.cpp775
-rw-r--r--gfx/skia/skia/src/gpu/GrGpu.h685
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuBuffer.cpp68
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuBuffer.h103
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResource.cpp214
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h103
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResourcePriv.h95
-rw-r--r--gfx/skia/skia/src/gpu/GrImageContext.cpp47
-rw-r--r--gfx/skia/skia/src/gpu/GrImageContextPriv.h63
-rw-r--r--gfx/skia/skia/src/gpu/GrImageInfo.h92
-rw-r--r--gfx/skia/skia/src/gpu/GrImageTextureMaker.cpp117
-rw-r--r--gfx/skia/skia/src/gpu/GrImageTextureMaker.h75
-rw-r--r--gfx/skia/skia/src/gpu/GrLegacyDirectContext.cpp243
-rw-r--r--gfx/skia/skia/src/gpu/GrMemoryPool.cpp217
-rw-r--r--gfx/skia/skia/src/gpu/GrMemoryPool.h155
-rw-r--r--gfx/skia/skia/src/gpu/GrMesh.h270
-rw-r--r--gfx/skia/skia/src/gpu/GrNativeRect.h98
-rw-r--r--gfx/skia/skia/src/gpu/GrNonAtomicRef.h63
-rw-r--r--gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.cpp134
-rw-r--r--gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.h102
-rw-r--r--gfx/skia/skia/src/gpu/GrOpFlushState.cpp230
-rw-r--r--gfx/skia/skia/src/gpu/GrOpFlushState.h203
-rw-r--r--gfx/skia/skia/src/gpu/GrOpsRenderPass.cpp68
-rw-r--r--gfx/skia/skia/src/gpu/GrOpsRenderPass.h109
-rw-r--r--gfx/skia/skia/src/gpu/GrOpsTask.cpp856
-rw-r--r--gfx/skia/skia/src/gpu/GrOpsTask.h299
-rw-r--r--gfx/skia/skia/src/gpu/GrPaint.cpp67
-rw-r--r--gfx/skia/skia/src/gpu/GrPaint.h136
-rw-r--r--gfx/skia/skia/src/gpu/GrPath.cpp55
-rw-r--r--gfx/skia/skia/src/gpu/GrPath.h58
-rw-r--r--gfx/skia/skia/src/gpu/GrPathProcessor.cpp141
-rw-r--r--gfx/skia/skia/src/gpu/GrPathProcessor.h50
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderer.cpp120
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderer.h208
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendererChain.cpp125
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendererChain.h65
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendering.cpp65
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendering.h134
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendering_none.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrPersistentCacheUtils.h105
-rw-r--r--gfx/skia/skia/src/gpu/GrPipeline.cpp115
-rw-r--r--gfx/skia/skia/src/gpu/GrPipeline.h241
-rw-r--r--gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp74
-rw-r--r--gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h369
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessor.cpp131
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessor.h226
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorAnalysis.cpp48
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorAnalysis.h153
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorSet.cpp253
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorSet.h205
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp25
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorUnitTest.h220
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramDesc.cpp268
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramDesc.h144
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramInfo.cpp121
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramInfo.h115
-rw-r--r--gfx/skia/skia/src/gpu/GrProxyProvider.cpp958
-rw-r--r--gfx/skia/skia/src/gpu/GrProxyProvider.h296
-rw-r--r--gfx/skia/skia/src/gpu/GrRecordingContext.cpp389
-rw-r--r--gfx/skia/skia/src/gpu/GrRecordingContextPriv.h142
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer.h44
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_pow2.h81
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp121
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_skyline.h62
-rw-r--r--gfx/skia/skia/src/gpu/GrReducedClip.cpp991
-rw-r--r--gfx/skia/skia/src/gpu/GrReducedClip.h151
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTarget.cpp100
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTarget.h77
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetContext.cpp2466
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetContext.h657
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetContextPriv.h128
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetPriv.h73
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp157
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetProxy.h179
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetProxyPriv.h53
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTask.cpp301
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTask.h207
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceAllocator.cpp510
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceAllocator.h277
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceCache.cpp945
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceCache.h448
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceHandle.h36
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceProvider.cpp581
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceProvider.h360
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceProviderPriv.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp112
-rw-r--r--gfx/skia/skia/src/gpu/GrSWMaskHelper.h71
-rw-r--r--gfx/skia/skia/src/gpu/GrSamplePatternDictionary.cpp42
-rw-r--r--gfx/skia/skia/src/gpu/GrSamplePatternDictionary.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrSamplerState.h86
-rw-r--r--gfx/skia/skia/src/gpu/GrScissorState.h40
-rw-r--r--gfx/skia/skia/src/gpu/GrSemaphore.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderCaps.cpp172
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderCaps.h314
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderUtils.cpp220
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderUtils.h23
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderVar.cpp61
-rw-r--r--gfx/skia/skia/src/gpu/GrShaderVar.h295
-rw-r--r--gfx/skia/skia/src/gpu/GrSkSLFPFactoryCache.h37
-rw-r--r--gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp395
-rw-r--r--gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h79
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilAttachment.cpp18
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilAttachment.h58
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilClip.h59
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilSettings.cpp506
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilSettings.h135
-rw-r--r--gfx/skia/skia/src/gpu/GrStyle.cpp199
-rw-r--r--gfx/skia/skia/src/gpu/GrStyle.h215
-rw-r--r--gfx/skia/skia/src/gpu/GrSurface.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceContext.cpp661
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceContext.h166
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceContextPriv.h45
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfacePriv.h42
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp523
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceProxy.h445
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceProxyPriv.h65
-rw-r--r--gfx/skia/skia/src/gpu/GrSwizzle.cpp36
-rw-r--r--gfx/skia/skia/src/gpu/GrSwizzle.h151
-rw-r--r--gfx/skia/skia/src/gpu/GrTRecorder.h176
-rw-r--r--gfx/skia/skia/src/gpu/GrTessellator.cpp2415
-rw-r--r--gfx/skia/skia/src/gpu/GrTessellator.h53
-rw-r--r--gfx/skia/skia/src/gpu/GrTestUtils.cpp350
-rw-r--r--gfx/skia/skia/src/gpu/GrTestUtils.h157
-rw-r--r--gfx/skia/skia/src/gpu/GrTexture.cpp127
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureAdjuster.cpp166
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureAdjuster.h58
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureContext.cpp52
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureContext.h57
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureMaker.cpp142
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureMaker.h51
-rw-r--r--gfx/skia/skia/src/gpu/GrTexturePriv.h85
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProducer.cpp306
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProducer.h203
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProxy.cpp197
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProxy.h196
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProxyCacheAccess.h46
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProxyPriv.h51
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.cpp182
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.h84
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureResolveManager.h37
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.cpp100
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.h51
-rw-r--r--gfx/skia/skia/src/gpu/GrTracing.h23
-rw-r--r--gfx/skia/skia/src/gpu/GrTransferFromRenderTask.cpp30
-rw-r--r--gfx/skia/skia/src/gpu/GrTransferFromRenderTask.h60
-rw-r--r--gfx/skia/skia/src/gpu/GrUserStencilSettings.h257
-rw-r--r--gfx/skia/skia/src/gpu/GrUtil.h90
-rw-r--r--gfx/skia/skia/src/gpu/GrVertexWriter.h176
-rw-r--r--gfx/skia/skia/src/gpu/GrWaitRenderTask.cpp28
-rw-r--r--gfx/skia/skia/src/gpu/GrWaitRenderTask.h44
-rw-r--r--gfx/skia/skia/src/gpu/GrWindowRectangles.h123
-rw-r--r--gfx/skia/skia/src/gpu/GrWindowRectsState.h54
-rw-r--r--gfx/skia/skia/src/gpu/GrXferProcessor.cpp191
-rw-r--r--gfx/skia/skia/src/gpu/GrXferProcessor.h323
-rw-r--r--gfx/skia/skia/src/gpu/GrYUVProvider.cpp190
-rw-r--r--gfx/skia/skia/src/gpu/GrYUVProvider.h93
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice.cpp1671
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice.h247
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp608
-rw-r--r--gfx/skia/skia/src/gpu/SkGr.cpp609
-rw-r--r--gfx/skia/skia/src/gpu/SkGr.h232
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.cpp268
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.h177
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.cpp75
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.h81
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.cpp124
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.h47
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.cpp110
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.h49
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.cpp215
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.h298
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.cpp159
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.h48
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.cpp461
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.h147
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.cpp802
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.h143
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCFiller.cpp583
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCFiller.h127
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.cpp442
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.h371
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.cpp250
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.h123
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.cpp611
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.h205
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCPerOpsTaskPaths.h32
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.cpp95
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.h47
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCSTLList.h66
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.cpp583
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.h179
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCStroker.cpp836
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCCStroker.h128
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp374
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.h121
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp23
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.cpp463
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.h54
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.cpp168
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.h115
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.cpp118
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.h33
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.cpp162
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.h84
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.cpp553
-rw-r--r--gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.h38
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.cpp75
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.h35
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnCaps.cpp231
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnCaps.h88
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnGpu.cpp699
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnGpu.h191
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.cpp203
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.h96
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.cpp555
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.h101
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.cpp290
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.h93
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.cpp65
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.h54
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.cpp36
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.h43
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.cpp65
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.h49
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.cpp71
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.h43
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnTexture.cpp201
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnTexture.h63
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.cpp36
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.h55
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.cpp326
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.h65
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnUtil.cpp72
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnUtil.h22
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.cpp108
-rw-r--r--gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.h27
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrAARectEffect.fp65
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrAlphaThresholdFragmentProcessor.fp77
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrAtlasedShaderHelpers.h74
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp462
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBezierEffect.h216
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp255
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h148
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp233
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h80
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBlurredEdgeFragmentProcessor.fp26
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCircleBlurFragmentProcessor.fp292
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCircleEffect.fp79
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrColorMatrixFragmentProcessor.fp90
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrComposeLerpEffect.fp16
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrComposeLerpRedEffect.fp16
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.fp174
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.fp86
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp235
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h92
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp234
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.h65
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp436
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCustomXfermode.h26
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp76
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h67
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp928
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h265
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrEllipseEffect.fp132
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.cpp305
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h104
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrLumaColorFilterEffect.fp27
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMagnifierEffect.fp94
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp352
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h96
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMixerEffect.fp46
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp33
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrOvalEffect.h27
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrOverrideInputFragmentProcessor.fp55
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp945
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.h76
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrPremulInputFragmentProcessor.fp21
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRRectBlurEffect.fp207
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp769
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRRectEffect.h29
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRectBlurEffect.fp205
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSRGBEffect.cpp136
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSRGBEffect.h56
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSaturateProcessor.fp25
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.cpp84
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.h52
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.fp81
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSkSLFP.cpp573
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSkSLFP.h202
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp507
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrTextureDomain.h294
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp524
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.h37
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.cpp252
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.h96
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.cpp118
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.h40
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.cpp126
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.h56
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.cpp60
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.h38
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.cpp350
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h47
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.cpp121
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.h48
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.cpp118
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h82
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.cpp82
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.h51
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.cpp75
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.h55
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.cpp71
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.h159
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.cpp103
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.h65
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.cpp147
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.h57
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.cpp50
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.h43
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.cpp200
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.h59
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.cpp78
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.h71
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.cpp98
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.h70
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.cpp50
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.h40
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.cpp169
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.h136
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.cpp176
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.h136
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.cpp54
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.h41
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.cpp92
-rw-r--r--gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.h75
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrPathUtils.cpp859
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrPathUtils.h217
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrQuad.cpp145
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrQuad.h159
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrQuadBuffer.h378
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrQuadUtils.cpp346
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrQuadUtils.h43
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrRect.h148
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrShape.cpp765
-rw-r--r--gfx/skia/skia/src/gpu/geometry/GrShape.h631
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleGLESInterfaceAutogen.cpp509
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleGLInterfaceAutogen.cpp501
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleHelpers.cpp24
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp42
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleWebGLInterfaceAutogen.cpp240
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp300
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLBuffer.h68
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp4246
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCaps.h691
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLContext.cpp117
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLContext.h109
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLDefines.h1112
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp171
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp71
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGLSL.h20
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp4035
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpu.h664
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp106
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLInterfaceAutogen.cpp742
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLMakeNativeInterface_none.cpp12
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.cpp25
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.h78
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPath.cpp347
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPath.h56
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp266
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h121
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp168
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgram.h174
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp317
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h117
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp263
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h114
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLSemaphore.cpp29
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLSemaphore.h53
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp44
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h67
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp174
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTexture.h91
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp78
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h79
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTypesPriv.cpp67
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp136
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h78
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp658
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUtil.h377
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp34
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h35
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp223
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h127
-rw-r--r--gfx/skia/skia/src/gpu/gl/android/GrGLMakeNativeInterface_android.cpp7
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp609
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h108
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp115
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h32
-rw-r--r--gfx/skia/skia/src/gpu/gl/egl/GrGLMakeNativeInterface_egl.cpp137
-rw-r--r--gfx/skia/skia/src/gpu/gl/glfw/GrGLMakeNativeInterface_glfw.cpp29
-rw-r--r--gfx/skia/skia/src/gpu/gl/glx/GrGLMakeNativeInterface_glx.cpp37
-rw-r--r--gfx/skia/skia/src/gpu/gl/iOS/GrGLMakeNativeInterface_iOS.cpp57
-rw-r--r--gfx/skia/skia/src/gpu/gl/mac/GrGLMakeNativeInterface_mac.cpp63
-rw-r--r--gfx/skia/skia/src/gpu/gl/win/GrGLMakeNativeInterface_win.cpp100
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp101
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSL.h58
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp510
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h28
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h87
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp130
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h211
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp322
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h224
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp150
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h95
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp78
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h151
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp372
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h181
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp32
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h73
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp278
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h257
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h102
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp52
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h19
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp154
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h180
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.cpp91
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.h86
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp173
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h124
-rw-r--r--gfx/skia/skia/src/gpu/gpu_workaround_list.txt17
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrClampedGradientEffect.fp58
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrDualIntervalGradientColorizer.fp62
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.cpp249
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.h62
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrGradientShader.cpp308
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrGradientShader.h65
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrLinearGradientLayout.fp74
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrRadialGradientLayout.fp70
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrSingleIntervalGradientColorizer.fp21
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrSweepGradientLayout.fp83
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrTextureGradientColorizer.fp18
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrTiledGradientEffect.fp59
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrTwoPointConicalGradientLayout.fp302
-rw-r--r--gfx/skia/skia/src/gpu/gradients/GrUnrolledBinaryGradientColorizer.fp188
-rw-r--r--gfx/skia/skia/src/gpu/gradients/README.md71
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.cpp109
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.h64
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.cpp149
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.h46
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.cpp91
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.h44
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.cpp95
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.h44
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.cpp82
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.h40
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.cpp132
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.h48
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.h40
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.cpp85
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.h60
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.cpp305
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.h62
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.cpp381
-rw-r--r--gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h93
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockBuffer.h35
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockCaps.cpp41
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockCaps.h186
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockGpu.cpp310
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockGpu.h160
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockOpsRenderPass.h61
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockStencilAttachment.h29
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockTexture.h199
-rw-r--r--gfx/skia/skia/src/gpu/mock/GrMockTypes.cpp18
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.h58
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.mm188
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlCaps.h188
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlCaps.mm1107
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.h52
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.mm138
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlCppUtil.h22
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.h65
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.mm126
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlGpu.h238
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlGpu.mm1375
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.h105
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.mm420
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.h133
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.mm248
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.h89
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.mm475
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.h84
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.mm362
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.h94
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.mm146
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.h114
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.mm290
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlSampler.h46
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlSampler.mm78
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.h47
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.mm70
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.h54
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.mm72
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTexture.h71
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTexture.mm112
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.h86
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.mm144
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.h28
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.mm25
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.h99
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.mm310
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlUtil.h108
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlUtil.mm351
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.h27
-rw-r--r--gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.mm42
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.cpp880
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.h23
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.cpp1108
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.h292
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.cpp1106
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.h30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.cpp394
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.h23
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.cpp576
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.h179
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrClearOp.cpp65
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrClearOp.h103
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.cpp28
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.h68
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.cpp58
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.h30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDashOp.cpp1279
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDashOp.h38
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.cpp30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.h54
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.cpp714
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.h44
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.cpp346
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.h30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawOp.h63
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawPathOp.cpp154
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawPathOp.h100
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.cpp708
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.h39
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawableOp.cpp34
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrDrawableOp.h53
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrFillRRectOp.cpp826
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrFillRRectOp.h92
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrFillRectOp.cpp447
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrFillRectOp.h58
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrLatticeOp.cpp479
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrLatticeOp.h36
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.cpp106
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.h181
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrOp.cpp79
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrOp.h337
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.cpp3266
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.h68
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrPathStencilSettings.h96
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.cpp1042
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.h121
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrRegionOp.cpp222
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrRegionOp.h30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.cpp692
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.h30
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp201
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.h228
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.cpp999
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.h83
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.cpp171
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.h44
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStencilPathOp.cpp40
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStencilPathOp.h72
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.cpp830
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.h45
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.cpp436
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.h37
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrTextureOp.cpp817
-rw-r--r--gfx/skia/skia/src/gpu/ops/GrTextureOp.h68
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasManager.cpp183
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasManager.h145
-rw-r--r--gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp104
-rw-r--r--gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h35
-rw-r--r--gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.cpp100
-rw-r--r--gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.h24
-rw-r--r--gfx/skia/skia/src/gpu/text/GrStrikeCache.cpp244
-rw-r--r--gfx/skia/skia/src/gpu/text/GrStrikeCache.h138
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlob.cpp453
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlob.h642
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp95
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlobCache.h191
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlobVertexRegenerator.cpp287
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextContext.cpp270
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextContext.h98
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextTarget.h56
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.cpp275
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.h57
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp268
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBuffer.h122
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBufferView.cpp38
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBufferView.h38
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp1747
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCaps.h321
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp942
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h347
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandPool.cpp91
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandPool.h69
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp51
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h50
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp34
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h44
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp336
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h97
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp131
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp57
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h48
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp2558
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpu.h313
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImage.cpp331
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImage.h259
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImageLayout.h32
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp77
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImageView.h53
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp62
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h37
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp483
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkInterface.h235
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp230
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkMemory.h56
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.cpp662
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.h131
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp642
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipeline.h66
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp337
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h150
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp353
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h121
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp120
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp346
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h85
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp266
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h158
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp401
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h182
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResource.h243
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp585
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h284
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp132
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSampler.h88
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp112
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.h75
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp168
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.h113
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSemaphore.cpp80
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSemaphore.h90
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp102
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h58
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp252
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTexture.h81
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp251
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h120
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp61
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h47
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTypesPriv.cpp50
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp103
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h59
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp341
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h120
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp197
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUtil.h97
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp109
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h27
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp73
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h37
-rw-r--r--gfx/skia/skia/src/image/SkImage.cpp535
-rw-r--r--gfx/skia/skia/src/image/SkImage_Base.h128
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.cpp681
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.h75
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuBase.cpp518
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuBase.h103
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp400
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuYUVA.h107
-rw-r--r--gfx/skia/skia/src/image/SkImage_Lazy.cpp568
-rw-r--r--gfx/skia/skia/src/image/SkImage_Lazy.h100
-rw-r--r--gfx/skia/skia/src/image/SkImage_Raster.cpp360
-rw-r--r--gfx/skia/skia/src/image/SkReadPixelsRec.h50
-rw-r--r--gfx/skia/skia/src/image/SkSurface.cpp643
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Base.h169
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.cpp741
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.h69
-rw-r--r--gfx/skia/skia/src/image/SkSurface_GpuMtl.mm62
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Raster.cpp214
-rw-r--r--gfx/skia/skia/src/images/SkImageEncoder.cpp105
-rw-r--r--gfx/skia/skia/src/images/SkImageEncoderFns.h178
-rw-r--r--gfx/skia/skia/src/images/SkImageEncoderPriv.h38
-rw-r--r--gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp67
-rw-r--r--gfx/skia/skia/src/images/SkJPEGWriteUtility.h39
-rw-r--r--gfx/skia/skia/src/images/SkJpegEncoder.cpp257
-rw-r--r--gfx/skia/skia/src/images/SkPngEncoder.cpp450
-rw-r--r--gfx/skia/skia/src/images/SkWebpEncoder.cpp219
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp241
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h67
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_NEON.h56
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_SSE2.h76
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_none.h59
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapFilter_opts.h927
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts.h502
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts.h231
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts.h202
-rw-r--r--gfx/skia/skia/src/opts/SkChecksum_opts.h212
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_avx.cpp41
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_crc32.cpp17
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_hsw.cpp142
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_sse41.cpp31
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_sse42.cpp18
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_ssse3.cpp33
-rw-r--r--gfx/skia/skia/src/opts/SkRasterPipeline_opts.h4557
-rw-r--r--gfx/skia/skia/src/opts/SkSwizzler_opts.h823
-rw-r--r--gfx/skia/skia/src/opts/SkUtils_opts.h66
-rw-r--r--gfx/skia/skia/src/opts/SkXfermode_opts.h138
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.cpp579
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.h17
-rw-r--r--gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp384
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp455
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp44
-rw-r--r--gfx/skia/skia/src/pathops/SkDLineIntersection.cpp339
-rw-r--r--gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp470
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersectionHelper.h113
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.cpp173
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.h333
-rw-r--r--gfx/skia/skia/src/pathops/SkLineParameters.h181
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.cpp1141
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.h146
-rw-r--r--gfx/skia/skia/src/pathops/SkOpBuilder.cpp195
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCoincidence.cpp1448
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCoincidence.h303
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.cpp109
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.h454
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCubicHull.cpp150
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp357
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h75
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.cpp1781
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.h455
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSpan.cpp484
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSpan.h578
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp427
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsBounds.h65
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp333
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.h30
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.cpp190
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.h193
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp750
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.h240
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp145
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.h421
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp3147
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.h495
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.cpp149
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.h39
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsOp.cpp382
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsPoint.h279
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp416
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.h184
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.cpp66
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.h77
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp226
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTCurve.h48
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp2138
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.h372
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp74
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp255
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.h624
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp425
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.cpp434
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.h54
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.cpp283
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.h35
-rw-r--r--gfx/skia/skia/src/pdf/SkBitmapKey.h22
-rw-r--r--gfx/skia/skia/src/pdf/SkClusterator.cpp64
-rw-r--r--gfx/skia/skia/src/pdf/SkClusterator.h47
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.cpp126
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.h53
-rw-r--r--gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp15
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.cpp128
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.h25
-rw-r--r--gfx/skia/skia/src/pdf/SkKeyedImage.cpp49
-rw-r--r--gfx/skia/skia/src/pdf/SkKeyedImage.h46
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.cpp290
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.h22
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.cpp1765
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.h200
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocument.cpp594
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h159
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.cpp690
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.h141
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp39
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.h28
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGlyphUse.h49
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp921
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGradientShader.h66
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp250
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h40
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp140
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.h71
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp206
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h23
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp220
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h28
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.cpp412
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.h29
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp96
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.h50
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.cpp375
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.h67
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp181
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFSubsetFont.h16
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTag.cpp226
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTag.h39
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFType1Font.cpp335
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFType1Font.h11
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.cpp486
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.h204
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUnion.h128
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.cpp395
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.h137
-rw-r--r--gfx/skia/skia/src/pdf/SkUUID.h18
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_android.cpp35
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_stdio.cpp20
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_win.cpp34
-rw-r--r--gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface.cpp33
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp710
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h39
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp16
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigTypeface.h85
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp2034
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp760
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h127
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_cairo.cpp677
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_mac.cpp3011
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_win.cpp2287
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp328
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android.cpp598
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp836
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.h216
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom.cpp289
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom.h161
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp104
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp122
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp27
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp1042
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp505
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp1227
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp58
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp118
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp198
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp157
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp206
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_none.cpp12
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_malloc.cpp80
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp49
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_ios.h47
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_posix.cpp213
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_stdio.cpp188
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_win.cpp282
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary.h14
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp21
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_win.cpp21
-rw-r--r--gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp472
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp1240
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.h106
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_none.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_pthread.cpp25
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_win.cpp80
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp533
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.h171
-rw-r--r--gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h142
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTableTypes.h62
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h108
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h150
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h41
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h52
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h515
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h538
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h547
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h582
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h141
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_fvar.h56
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_gasp.h72
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_glyf.h213
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_head.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_hhea.h54
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_loca.h31
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp.h34
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h30
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h48
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.cpp586
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.h577
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_post.h50
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.cpp239
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.h105
-rw-r--r--gfx/skia/skia/src/sfnt/SkPanose.h527
-rw-r--r--gfx/skia/skia/src/sfnt/SkSFNTHeader.h70
-rw-r--r--gfx/skia/skia/src/sfnt/SkTTCFHeader.h57
-rw-r--r--gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp109
-rw-r--r--gfx/skia/skia/src/shaders/SkBitmapProcShader.h25
-rw-r--r--gfx/skia/skia/src/shaders/SkColorFilterShader.cpp79
-rw-r--r--gfx/skia/skia/src/shaders/SkColorFilterShader.h37
-rw-r--r--gfx/skia/skia/src/shaders/SkColorShader.cpp116
-rw-r--r--gfx/skia/skia/src/shaders/SkColorShader.h71
-rw-r--r--gfx/skia/skia/src/shaders/SkComposeShader.cpp253
-rw-r--r--gfx/skia/skia/src/shaders/SkComposeShader.h101
-rw-r--r--gfx/skia/skia/src/shaders/SkEmptyShader.h46
-rw-r--r--gfx/skia/skia/src/shaders/SkImageShader.cpp614
-rw-r--r--gfx/skia/skia/src/shaders/SkImageShader.h61
-rw-r--r--gfx/skia/skia/src/shaders/SkLightingShader.cpp490
-rw-r--r--gfx/skia/skia/src/shaders/SkLightingShader.h39
-rw-r--r--gfx/skia/skia/src/shaders/SkLights.cpp68
-rw-r--r--gfx/skia/skia/src/shaders/SkLights.h192
-rw-r--r--gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp106
-rw-r--r--gfx/skia/skia/src/shaders/SkLocalMatrixShader.h61
-rw-r--r--gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp1546
-rw-r--r--gfx/skia/skia/src/shaders/SkPictureShader.cpp365
-rw-r--r--gfx/skia/skia/src/shaders/SkPictureShader.h81
-rw-r--r--gfx/skia/skia/src/shaders/SkRTShader.cpp156
-rw-r--r--gfx/skia/skia/src/shaders/SkRTShader.h64
-rw-r--r--gfx/skia/skia/src/shaders/SkShader.cpp199
-rw-r--r--gfx/skia/skia/src/shaders/SkShaderBase.h252
-rw-r--r--gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.cpp313
-rw-r--r--gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.h79
-rw-r--r--gfx/skia/skia/src/shaders/gradients/Sk4fGradientPriv.h105
-rw-r--r--gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.cpp388
-rw-r--r--gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.h40
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp886
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkGradientShaderPriv.h146
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp98
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h44
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp77
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkRadialGradient.h39
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp82
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkSweepGradient.h44
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp248
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.h91
-rw-r--r--gfx/skia/skia/src/sksl/GLSL.std.450.h131
-rw-r--r--gfx/skia/skia/src/sksl/README173
-rw-r--r--gfx/skia/skia/src/sksl/SkSLASTFile.h34
-rw-r--r--gfx/skia/skia/src/sksl/SkSLASTNode.cpp238
-rw-r--r--gfx/skia/skia/src/sksl/SkSLASTNode.h635
-rw-r--r--gfx/skia/skia/src/sksl/SkSLByteCode.cpp1760
-rw-r--r--gfx/skia/skia/src/sksl/SkSLByteCode.h304
-rw-r--r--gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.cpp1665
-rw-r--r--gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.h353
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCFGGenerator.cpp673
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCFGGenerator.h170
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCPP.h57
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.cpp1343
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.h141
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.cpp234
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.h133
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCodeGenerator.h42
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.cpp1676
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.h239
-rw-r--r--gfx/skia/skia/src/sksl/SkSLContext.h429
-rw-r--r--gfx/skia/skia/src/sksl/SkSLDefines.h58
-rw-r--r--gfx/skia/skia/src/sksl/SkSLErrorReporter.h33
-rw-r--r--gfx/skia/skia/src/sksl/SkSLExternalValue.h116
-rw-r--r--gfx/skia/skia/src/sksl/SkSLFileOutputStream.h76
-rw-r--r--gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp1786
-rw-r--r--gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h266
-rw-r--r--gfx/skia/skia/src/sksl/SkSLHCodeGenerator.cpp389
-rw-r--r--gfx/skia/skia/src/sksl/SkSLHCodeGenerator.h86
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp2493
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIRGenerator.h180
-rw-r--r--gfx/skia/skia/src/sksl/SkSLLexer.cpp1037
-rw-r--r--gfx/skia/skia/src/sksl/SkSLLexer.h245
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMain.cpp152
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMemoryLayout.h140
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.cpp1702
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.h288
-rw-r--r--gfx/skia/skia/src/sksl/SkSLOutputStream.cpp30
-rw-r--r--gfx/skia/skia/src/sksl/SkSLOutputStream.h42
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.cpp2150
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.h289
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.cpp297
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.h73
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPosition.h39
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp3257
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h411
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.cpp239
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.h125
-rw-r--r--gfx/skia/skia/src/sksl/SkSLString.cpp274
-rw-r--r--gfx/skia/skia/src/sksl/SkSLString.h153
-rw-r--r--gfx/skia/skia/src/sksl/SkSLStringStream.h87
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.cpp76
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.h396
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h61
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBlock.h63
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h57
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructor.h223
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h43
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLEnum.h60
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpression.h149
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h39
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExtension.h38
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExternalFunctionCall.h62
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExternalValueReference.h43
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLField.h42
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h56
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h67
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLForStatement.h67
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h64
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h117
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h51
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIRNode.h35
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h56
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h89
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h69
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h83
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLLayout.h432
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLModifiers.h116
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h41
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLNop.h40
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLNullLiteral.h51
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h46
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h80
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgram.h298
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h49
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSection.h47
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp22
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSetting.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLStatement.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h58
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h65
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h166
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbol.h43
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp139
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h66
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h53
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.cpp232
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.h442
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h49
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h40
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h115
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationsStatement.h49
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariable.h84
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp114
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h75
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h43
-rw-r--r--gfx/skia/skia/src/sksl/lex/DFA.h37
-rw-r--r--gfx/skia/skia/src/sksl/lex/DFAState.h73
-rw-r--r--gfx/skia/skia/src/sksl/lex/LexUtil.h18
-rw-r--r--gfx/skia/skia/src/sksl/lex/Main.cpp216
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFA.cpp41
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFA.h54
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFAState.h150
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFAtoDFA.h168
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexNode.cpp117
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexNode.h78
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexParser.cpp176
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexParser.h89
-rw-r--r--gfx/skia/skia/src/sksl/lex/sksl.lex98
-rw-r--r--gfx/skia/skia/src/sksl/sksl_enums.inc35
-rw-r--r--gfx/skia/skia/src/sksl/sksl_fp.inc34
-rw-r--r--gfx/skia/skia/src/sksl/sksl_frag.inc23
-rw-r--r--gfx/skia/skia/src/sksl/sksl_geom.inc24
-rw-r--r--gfx/skia/skia/src/sksl/sksl_gpu.inc299
-rw-r--r--gfx/skia/skia/src/sksl/sksl_interp.inc53
-rw-r--r--gfx/skia/skia/src/sksl/sksl_pipeline.inc3
-rw-r--r--gfx/skia/skia/src/sksl/sksl_vert.inc14
-rw-r--r--gfx/skia/skia/src/sksl/spirv.h870
-rw-r--r--gfx/skia/skia/src/svg/SkSVGCanvas.cpp26
-rw-r--r--gfx/skia/skia/src/svg/SkSVGDevice.cpp1048
-rw-r--r--gfx/skia/skia/src/svg/SkSVGDevice.h76
-rw-r--r--gfx/skia/skia/src/utils/Sk3D.cpp64
-rw-r--r--gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp96
-rw-r--r--gfx/skia/skia/src/utils/SkBase64.cpp163
-rw-r--r--gfx/skia/skia/src/utils/SkBitSet.h67
-rw-r--r--gfx/skia/skia/src/utils/SkCallableTraits.h86
-rw-r--r--gfx/skia/skia/src/utils/SkCamera.cpp397
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.cpp105
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.h60
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp318
-rw-r--r--gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp130
-rw-r--r--gfx/skia/skia/src/utils/SkCharToGlyphCache.h62
-rw-r--r--gfx/skia/skia/src/utils/SkDashPath.cpp461
-rw-r--r--gfx/skia/skia/src/utils/SkDashPathPriv.h52
-rw-r--r--gfx/skia/skia/src/utils/SkEventTracer.cpp63
-rw-r--r--gfx/skia/skia/src/utils/SkFloatToDecimal.cpp184
-rw-r--r--gfx/skia/skia/src/utils/SkFloatToDecimal.h34
-rw-r--r--gfx/skia/skia/src/utils/SkFloatUtils.h173
-rw-r--r--gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp214
-rw-r--r--gfx/skia/skia/src/utils/SkInterpolator.cpp272
-rw-r--r--gfx/skia/skia/src/utils/SkJSON.cpp917
-rw-r--r--gfx/skia/skia/src/utils/SkJSON.h361
-rw-r--r--gfx/skia/skia/src/utils/SkJSONWriter.cpp46
-rw-r--r--gfx/skia/skia/src/utils/SkJSONWriter.h358
-rw-r--r--gfx/skia/skia/src/utils/SkLua.cpp1993
-rw-r--r--gfx/skia/skia/src/utils/SkLuaCanvas.cpp288
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.cpp40
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.h31
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp203
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.h44
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h21
-rw-r--r--gfx/skia/skia/src/utils/SkNWayCanvas.cpp365
-rw-r--r--gfx/skia/skia/src/utils/SkNullCanvas.cpp18
-rw-r--r--gfx/skia/skia/src/utils/SkOSPath.cpp45
-rw-r--r--gfx/skia/skia/src/utils/SkOSPath.h55
-rw-r--r--gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp295
-rw-r--r--gfx/skia/skia/src/utils/SkParse.cpp288
-rw-r--r--gfx/skia/skia/src/utils/SkParseColor.cpp280
-rw-r--r--gfx/skia/skia/src/utils/SkParsePath.cpp269
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.cpp369
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.h57
-rw-r--r--gfx/skia/skia/src/utils/SkPolyUtils.cpp1838
-rw-r--r--gfx/skia/skia/src/utils/SkPolyUtils.h112
-rw-r--r--gfx/skia/skia/src/utils/SkShadowTessellator.cpp1169
-rw-r--r--gfx/skia/skia/src/utils/SkShadowTessellator.h44
-rw-r--r--gfx/skia/skia/src/utils/SkShadowUtils.cpp766
-rw-r--r--gfx/skia/skia/src/utils/SkShaperJSONWriter.cpp236
-rw-r--r--gfx/skia/skia/src/utils/SkShaperJSONWriter.h68
-rw-r--r--gfx/skia/skia/src/utils/SkTextUtils.cpp49
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp11
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_win.cpp11
-rw-r--r--gfx/skia/skia/src/utils/SkUTF.cpp253
-rw-r--r--gfx/skia/skia/src/utils/SkUTF.h68
-rw-r--r--gfx/skia/skia/src/utils/SkWhitelistChecksums.inc50
-rw-r--r--gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp273
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp282
-rw-r--r--gfx/skia/skia/src/utils/mac/SkStream_mac.cpp83
-rw-r--r--gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h25
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp32
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h32
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.cpp153
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.h100
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp235
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h88
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp150
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h44
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h31
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.cpp40
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.h62
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.cpp236
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.h101
-rw-r--r--gfx/skia/skia/src/utils/win/SkObjBase.h25
-rw-r--r--gfx/skia/skia/src/utils/win/SkTScopedComPtr.h86
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL.h165
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL_win.cpp508
-rw-r--r--gfx/skia/skia/src/xml/SkDOM.cpp429
-rw-r--r--gfx/skia/skia/src/xml/SkDOM.h95
-rw-r--r--gfx/skia/skia/src/xml/SkXMLParser.cpp216
-rw-r--r--gfx/skia/skia/src/xml/SkXMLParser.h87
-rw-r--r--gfx/skia/skia/src/xml/SkXMLWriter.cpp303
-rw-r--r--gfx/skia/skia/src/xml/SkXMLWriter.h102
-rw-r--r--gfx/skia/skia/src/xps/SkXPSDevice.cpp2018
-rw-r--r--gfx/skia/skia/src/xps/SkXPSDevice.h282
-rw-r--r--gfx/skia/skia/src/xps/SkXPSDocument.cpp86
-rw-r--r--gfx/skia/skia/third_party/skcms/LICENSE29
-rw-r--r--gfx/skia/skia/third_party/skcms/README.chromium5
-rw-r--r--gfx/skia/skia/third_party/skcms/skcms.cc2570
-rw-r--r--gfx/skia/skia/third_party/skcms/skcms.gni8
-rw-r--r--gfx/skia/skia/third_party/skcms/skcms_internal.h47
-rw-r--r--gfx/skia/skia/third_party/skcms/src/Transform_inl.h1545
-rwxr-xr-xgfx/skia/skia/third_party/skcms/version.sha11
2113 files changed, 504915 insertions, 0 deletions
diff --git a/gfx/skia/LICENSE b/gfx/skia/LICENSE
new file mode 100644
index 0000000000..e74c256cba
--- /dev/null
+++ b/gfx/skia/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/gfx/skia/README b/gfx/skia/README
new file mode 100644
index 0000000000..84e4ecc907
--- /dev/null
+++ b/gfx/skia/README
@@ -0,0 +1,3 @@
+Skia is a complete 2D graphic library for drawing Text, Geometries, and Images.
+
+See full details, and build instructions, at http://code.google.com/p/skia/wiki/DocRoot
diff --git a/gfx/skia/README_COMMITTING b/gfx/skia/README_COMMITTING
new file mode 100644
index 0000000000..4014ea3c7f
--- /dev/null
+++ b/gfx/skia/README_COMMITTING
@@ -0,0 +1,10 @@
+Any changes to Skia should have at a minimum both a Mozilla bug tagged with the [skia-upstream]
+whiteboard tag, and also an upstream bug and review request. Any patches that do ultimately land
+in mozilla-central must be reviewed by a Skia submodule peer.
+
+See https://wiki.mozilla.org/Modules/Core#Graphics for current peers.
+
+In most cases the patch will need to have an r+ from upstream before it is eligible to land here.
+
+For information on submitting upstream, see:
+https://sites.google.com/site/skiadocs/developer-documentation/contributing-code/how-to-submit-a-patch
diff --git a/gfx/skia/README_MOZILLA b/gfx/skia/README_MOZILLA
new file mode 100644
index 0000000000..17b6048208
--- /dev/null
+++ b/gfx/skia/README_MOZILLA
@@ -0,0 +1,12 @@
+This is an import of Skia. See skia/include/core/SkMilestone.h for the milestone number.
+
+To update to a new version of Skia:
+
+- Clone Skia from upstream using the instructions here: https://skia.org/user/download
+- Copy the entire source tree from a Skia clone to mozilla-central/gfx/skia/skia
+- cd gfx/skia && ./generate_mozbuild.py
+
+Once that's done, use git status to view the files that have changed. Keep an eye on GrUserConfig.h
+and SkUserConfig.h as those probably don't want to be overwritten by upstream versions.
+
+This process will be made more automatic in the future.
diff --git a/gfx/skia/generate_mozbuild.py b/gfx/skia/generate_mozbuild.py
new file mode 100755
index 0000000000..d5c409b657
--- /dev/null
+++ b/gfx/skia/generate_mozbuild.py
@@ -0,0 +1,425 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+import locale
+import subprocess
+from collections import defaultdict
+locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+
+header = """
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+ skia_opt_flags += ['-O2']
+ elif CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ skia_opt_flags += ['-O3']
+
+"""
+
+footer = """
+
+# We allow warnings for third-party code that can be updated from upstream.
+AllowCompilerWarnings()
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia',
+ 'skia/include/third_party/skcms',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-msse4.1']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-mavx']
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += ['-mavx2', '-mf16c', '-mfma']
+elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ CXXFLAGS += CONFIG['NEON_FLAGS']
+elif CONFIG['CPU_ARCH'] == 'aarch64' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += ['-march=armv8-a+crc']
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+if CONFIG['MOZ_ENABLE_SKIA_PDF_SFNTLY']:
+ DEFINES['SK_PDF_USE_SFNTLY'] = 1
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Reduce strength of synthetic-emboldening used in the freetype backend
+# (see bug 1600470).
+DEFINES['SK_OUTLINE_EMBOLDEN_DIVISOR'] = 48
+
+# Suppress warnings in third-party code.
+CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-shadow',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+]
+if CONFIG['CC_TYPE'] == 'gcc':
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CC_TYPE'] in ('clang', 'clang-cl'):
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk', 'android'):
+ CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
+
+if CONFIG['MOZ_ENABLE_SKIA_PDF_SFNTLY']:
+ LOCAL_INCLUDES += CONFIG['SFNTLY_INCLUDES']
+"""
+
+import json
+
+platforms = ['linux', 'mac', 'android', 'win']
+
+def parse_sources(output):
+ return set(v.replace('//', 'skia/') for v in output.split() if v.endswith('.cpp') or v.endswith('.S'))
+
+def generate_opt_sources():
+ cpus = [('intel', 'x86', [':sse2', ':ssse3', ':sse41', ':sse42', ':avx', ':hsw']),
+ ('arm', 'arm', [':armv7']),
+ ('arm64', 'arm64', [':arm64', ':crc32']),
+ ('none', 'none', [':none'])]
+
+ opt_sources = {}
+ for key, cpu, deps in cpus:
+ subprocess.check_output('cd skia && bin/gn gen out/{0} --args=\'target_cpu="{1}"\''.format(key, cpu), shell=True)
+ opt_sources[key] = set()
+ for dep in deps:
+ try:
+ output = subprocess.check_output('cd skia && bin/gn desc out/{0} {1} sources'.format(key, dep), shell=True)
+ if output:
+ opt_sources[key].update(parse_sources(output))
+ except subprocess.CalledProcessError as e:
+ if e.output.find('source_set') < 0:
+ raise
+
+ return opt_sources
+
+def generate_platform_sources():
+ sources = {}
+ platform_args = {
+ 'win' : 'win_vc="C:/" win_sdk_version="00.0.00000.0" win_toolchain_version="00.00.00000"'
+ }
+ for plat in platforms:
+ args = platform_args.get(plat, '')
+ output = subprocess.check_output('cd skia && bin/gn gen out/{0} --args=\'target_os="{0}" {1}\' > /dev/null && bin/gn desc out/{0} :skia sources'.format(plat, args), shell=True)
+ if output:
+ sources[plat] = parse_sources(output)
+
+ plat_deps = {':fontmgr_win' : 'win', ':fontmgr_win_gdi' : 'win'}
+ for dep, key in plat_deps.items():
+ output = subprocess.check_output('cd skia && bin/gn desc out/{1} {0} sources'.format(dep, key), shell=True)
+ if output:
+ sources[key].update(parse_sources(output))
+
+ deps = {':pdf' : 'pdf'}
+ for dep, key in deps.items():
+ output = subprocess.check_output('cd skia && bin/gn desc out/linux {} sources'.format(dep), shell=True)
+ if output:
+ sources[key] = parse_sources(output)
+
+ return dict(sources.items() + generate_opt_sources().items())
+
+
+def generate_separated_sources(platform_sources):
+ blacklist = [
+ 'skia/src/android/',
+ 'skia/src/atlastext/',
+ 'skia/src/c/',
+ 'skia/src/effects/',
+ 'skia/src/fonts/',
+ 'skia/src/ports/SkImageEncoder',
+ 'skia/src/ports/SkImageGenerator',
+ 'SkBitmapRegion',
+ 'SkLite',
+ 'SkLight',
+ 'SkNormal',
+ 'codec',
+ 'SkWGL',
+ 'SkMemory_malloc',
+ 'third_party',
+ 'Sk3D',
+ 'SkAnimCodecPlayer',
+ 'SkCamera',
+ 'SkCanvasStack',
+ 'SkCanvasStateUtils',
+ 'SkFrontBufferedStream',
+ 'SkInterpolator',
+ 'JSON',
+ 'SkMultiPictureDocument',
+ 'SkNullCanvas',
+ 'SkNWayCanvas',
+ 'SkOverdrawCanvas',
+ 'SkPaintFilterCanvas',
+ 'SkParseColor',
+ 'SkWhitelistTypefaces',
+ 'SkXPS',
+ 'SkCreateCGImageRef',
+ 'skia/src/ports/SkGlobalInitialization',
+ ]
+
+ def isblacklisted(value):
+ for item in blacklist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ separated = defaultdict(set, {
+ 'common': {
+ 'skia/src/codec/SkMasks.cpp',
+ 'skia/src/effects/imagefilters/SkBlurImageFilter.cpp',
+ 'skia/src/effects/imagefilters/SkComposeImageFilter.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkGlobalInitialization_default.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ 'skia/third_party/skcms/skcms.cc',
+ },
+ 'android': {
+ # 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ # 'skia/src/ports/SkFontHost_FreeType.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ # 'skia/src/ports/SkTime_Unix.cpp',
+ # 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ },
+ 'linux': {
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ },
+ 'intel': set(),
+ 'arm': set(),
+ 'arm64': set(),
+ 'none': set(),
+ 'pdf': set()
+ })
+
+ for plat in platform_sources.keys():
+ for value in platform_sources[plat]:
+ if isblacklisted(value):
+ continue
+
+ if value in separated['common']:
+ continue
+
+ key = plat
+
+ if all(value in platform_sources.get(p, {})
+ for p in platforms if p != plat):
+ key = 'common'
+
+ separated[key].add(value)
+
+ return separated
+
+def uniq(seq):
+ seen = set()
+ seen_add = seen.add
+ return [ x for x in seq if x not in seen and not seen_add(x)]
+
+def write_cflags(f, values, subsearch, cflag, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ if isinstance(subsearch, str):
+ subsearch = [ subsearch ]
+
+ def iswhitelisted(value):
+ for item in subsearch:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ for val in val_list:
+ if iswhitelisted(val):
+ write_indent(indent)
+ f.write("SOURCES[\'" + val + "\'].flags += " + cflag + "\n")
+
+opt_whitelist = [
+ 'SkOpts',
+ 'SkBitmapProcState',
+ 'SkBitmapScaler',
+ 'SkBlitRow',
+ 'SkBlitter',
+ 'SkSpriteBlitter',
+ 'SkMatrix.cpp',
+ 'skcms',
+]
+
+# Unfortunately for now the gpu and pathops directories are
+# non-unifiable. Keep track of this and fix it.
+unified_blacklist = [
+ 'FontHost',
+ 'SkBitmapProcState_matrixProcs.cpp',
+ 'SkBlitter_A8.cpp',
+ 'SkBlitter_ARGB32.cpp',
+ 'SkBlitter_RGB16.cpp',
+ 'SkBlitter_Sprite.cpp',
+ 'SkScan_Antihair.cpp',
+ 'SkScan_AntiPath.cpp',
+ 'SkScan_DAAPath.cpp',
+ 'SkParse.cpp',
+ 'SkPDFFont.cpp',
+ 'SkPDFDevice.cpp',
+ 'SkPDFType1Font.cpp',
+ 'SkPictureData.cpp',
+ 'SkColorSpace',
+ 'SkPathOpsDebug.cpp',
+ 'SkParsePath.cpp',
+ 'SkRecorder.cpp',
+ 'SkMiniRecorder.cpp',
+ 'SkXfermode',
+ 'SkMatrix44.cpp',
+ 'SkRTree.cpp',
+ 'SkVertices.cpp',
+ 'SkSLHCodeGenerator.cpp',
+ 'SkSLLexer.cpp',
+] + opt_whitelist
+
+def write_sources(f, values, indent):
+ def isblacklisted(value):
+ for item in unified_blacklist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ sources = {}
+ sources['nonunified'] = set()
+ sources['unified'] = set()
+
+ for item in values:
+ if isblacklisted(item):
+ sources['nonunified'].add(item)
+ else:
+ sources['unified'].add(item)
+
+ write_list(f, "UNIFIED_SOURCES", sources['unified'], indent)
+ write_list(f, "SOURCES", sources['nonunified'], indent)
+
+def write_list(f, name, values, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ write_indent(indent)
+ f.write(name + ' += [\n')
+ for val in val_list:
+ write_indent(indent + 4)
+ f.write('\'' + val + '\',\n')
+
+ write_indent(indent)
+ f.write(']\n')
+
+def write_mozbuild(sources):
+ filename = 'moz.build'
+ f = open(filename, 'w')
+
+ f.write(header)
+
+ write_sources(f, sources['common'], 0)
+ write_cflags(f, sources['common'], opt_whitelist, 'skia_opt_flags', 0)
+
+ f.write("if CONFIG['MOZ_ENABLE_SKIA_PDF']:\n")
+ write_sources(f, sources['pdf'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':\n")
+ write_sources(f, sources['android'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('cocoa', 'uikit'):\n")
+ write_sources(f, sources['mac'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':\n")
+ write_sources(f, sources['linux'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':\n")
+ write_list(f, "SOURCES", sources['win'], 4)
+
+ f.write("if CONFIG['INTEL_ARCHITECTURE']:\n")
+ write_sources(f, sources['intel'], 4)
+ write_cflags(f, sources['intel'], opt_whitelist, 'skia_opt_flags', 4)
+
+ if sources['arm']:
+ f.write("elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):\n")
+ write_sources(f, sources['arm'], 4)
+ write_cflags(f, sources['arm'], opt_whitelist, 'skia_opt_flags', 4)
+
+ if sources['arm64']:
+ f.write("elif CONFIG['CPU_ARCH'] == 'aarch64':\n")
+ write_sources(f, sources['arm64'], 4)
+ write_cflags(f, sources['arm64'], opt_whitelist, 'skia_opt_flags', 4)
+
+ if sources['none']:
+ f.write("else:\n")
+ write_sources(f, sources['none'], 4)
+
+ f.write(footer)
+
+ f.close()
+
+ print('Wrote ' + filename)
+
+def main():
+ platform_sources = generate_platform_sources()
+ separated_sources = generate_separated_sources(platform_sources)
+ write_mozbuild(separated_sources)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
new file mode 100755
index 0000000000..66b21aec25
--- /dev/null
+++ b/gfx/skia/moz.build
@@ -0,0 +1,501 @@
+
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+ skia_opt_flags += ['-O2']
+ elif CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ skia_opt_flags += ['-O3']
+
+UNIFIED_SOURCES += [
+ 'skia/src/codec/SkMasks.cpp',
+ 'skia/src/core/SkAAClip.cpp',
+ 'skia/src/core/SkAlphaRuns.cpp',
+ 'skia/src/core/SkAnalyticEdge.cpp',
+ 'skia/src/core/SkAnnotation.cpp',
+ 'skia/src/core/SkArenaAlloc.cpp',
+ 'skia/src/core/SkATrace.cpp',
+ 'skia/src/core/SkAutoPixmapStorage.cpp',
+ 'skia/src/core/SkBBHFactory.cpp',
+ 'skia/src/core/SkBigPicture.cpp',
+ 'skia/src/core/SkBitmap.cpp',
+ 'skia/src/core/SkBitmapCache.cpp',
+ 'skia/src/core/SkBitmapController.cpp',
+ 'skia/src/core/SkBitmapDevice.cpp',
+ 'skia/src/core/SkBlendMode.cpp',
+ 'skia/src/core/SkBlurMask.cpp',
+ 'skia/src/core/SkBlurMF.cpp',
+ 'skia/src/core/SkBuffer.cpp',
+ 'skia/src/core/SkCachedData.cpp',
+ 'skia/src/core/SkCanvas.cpp',
+ 'skia/src/core/SkCanvasPriv.cpp',
+ 'skia/src/core/SkClipStack.cpp',
+ 'skia/src/core/SkClipStackDevice.cpp',
+ 'skia/src/core/SkColor.cpp',
+ 'skia/src/core/SkColorFilter.cpp',
+ 'skia/src/core/SkColorFilter_Matrix.cpp',
+ 'skia/src/core/SkContourMeasure.cpp',
+ 'skia/src/core/SkConvertPixels.cpp',
+ 'skia/src/core/SkConvolver.cpp',
+ 'skia/src/core/SkCpu.cpp',
+ 'skia/src/core/SkCubicClipper.cpp',
+ 'skia/src/core/SkCubicMap.cpp',
+ 'skia/src/core/SkData.cpp',
+ 'skia/src/core/SkDataTable.cpp',
+ 'skia/src/core/SkDebug.cpp',
+ 'skia/src/core/SkDeferredDisplayList.cpp',
+ 'skia/src/core/SkDeferredDisplayListRecorder.cpp',
+ 'skia/src/core/SkDeque.cpp',
+ 'skia/src/core/SkDescriptor.cpp',
+ 'skia/src/core/SkDevice.cpp',
+ 'skia/src/core/SkDistanceFieldGen.cpp',
+ 'skia/src/core/SkDocument.cpp',
+ 'skia/src/core/SkDraw.cpp',
+ 'skia/src/core/SkDraw_atlas.cpp',
+ 'skia/src/core/SkDraw_text.cpp',
+ 'skia/src/core/SkDraw_vertices.cpp',
+ 'skia/src/core/SkDrawable.cpp',
+ 'skia/src/core/SkDrawLooper.cpp',
+ 'skia/src/core/SkDrawShadowInfo.cpp',
+ 'skia/src/core/SkEdge.cpp',
+ 'skia/src/core/SkEdgeBuilder.cpp',
+ 'skia/src/core/SkEdgeClipper.cpp',
+ 'skia/src/core/SkExecutor.cpp',
+ 'skia/src/core/SkFlattenable.cpp',
+ 'skia/src/core/SkFont.cpp',
+ 'skia/src/core/SkFontDescriptor.cpp',
+ 'skia/src/core/SkFontLCDConfig.cpp',
+ 'skia/src/core/SkFontMgr.cpp',
+ 'skia/src/core/SkFontStream.cpp',
+ 'skia/src/core/SkGaussFilter.cpp',
+ 'skia/src/core/SkGeometry.cpp',
+ 'skia/src/core/SkGlobalInitialization_core.cpp',
+ 'skia/src/core/SkGlyph.cpp',
+ 'skia/src/core/SkGlyphBuffer.cpp',
+ 'skia/src/core/SkGlyphRun.cpp',
+ 'skia/src/core/SkGlyphRunPainter.cpp',
+ 'skia/src/core/SkGpuBlurUtils.cpp',
+ 'skia/src/core/SkGraphics.cpp',
+ 'skia/src/core/SkHalf.cpp',
+ 'skia/src/core/SkICC.cpp',
+ 'skia/src/core/SkImageFilter.cpp',
+ 'skia/src/core/SkImageFilterCache.cpp',
+ 'skia/src/core/SkImageFilterTypes.cpp',
+ 'skia/src/core/SkImageGenerator.cpp',
+ 'skia/src/core/SkImageInfo.cpp',
+ 'skia/src/core/SkLatticeIter.cpp',
+ 'skia/src/core/SkLineClipper.cpp',
+ 'skia/src/core/SkLocalMatrixImageFilter.cpp',
+ 'skia/src/core/SkMallocPixelRef.cpp',
+ 'skia/src/core/SkMask.cpp',
+ 'skia/src/core/SkMaskBlurFilter.cpp',
+ 'skia/src/core/SkMaskCache.cpp',
+ 'skia/src/core/SkMaskFilter.cpp',
+ 'skia/src/core/SkMaskGamma.cpp',
+ 'skia/src/core/SkMath.cpp',
+ 'skia/src/core/SkMatrixImageFilter.cpp',
+ 'skia/src/core/SkMD5.cpp',
+ 'skia/src/core/SkMipMap.cpp',
+ 'skia/src/core/SkModeColorFilter.cpp',
+ 'skia/src/core/SkMultiPictureDraw.cpp',
+ 'skia/src/core/SkPaint.cpp',
+ 'skia/src/core/SkPaintPriv.cpp',
+ 'skia/src/core/SkPath.cpp',
+ 'skia/src/core/SkPath_serial.cpp',
+ 'skia/src/core/SkPathEffect.cpp',
+ 'skia/src/core/SkPathMeasure.cpp',
+ 'skia/src/core/SkPathRef.cpp',
+ 'skia/src/core/SkPicture.cpp',
+ 'skia/src/core/SkPictureFlat.cpp',
+ 'skia/src/core/SkPictureImageGenerator.cpp',
+ 'skia/src/core/SkPicturePlayback.cpp',
+ 'skia/src/core/SkPictureRecord.cpp',
+ 'skia/src/core/SkPictureRecorder.cpp',
+ 'skia/src/core/SkPixelRef.cpp',
+ 'skia/src/core/SkPixmap.cpp',
+ 'skia/src/core/SkPoint.cpp',
+ 'skia/src/core/SkPoint3.cpp',
+ 'skia/src/core/SkPromiseImageTexture.cpp',
+ 'skia/src/core/SkPtrRecorder.cpp',
+ 'skia/src/core/SkQuadClipper.cpp',
+ 'skia/src/core/SkRasterClip.cpp',
+ 'skia/src/core/SkRasterPipeline.cpp',
+ 'skia/src/core/SkRasterPipelineBlitter.cpp',
+ 'skia/src/core/SkReadBuffer.cpp',
+ 'skia/src/core/SkRecord.cpp',
+ 'skia/src/core/SkRecordDraw.cpp',
+ 'skia/src/core/SkRecordedDrawable.cpp',
+ 'skia/src/core/SkRecordOpts.cpp',
+ 'skia/src/core/SkRecords.cpp',
+ 'skia/src/core/SkRect.cpp',
+ 'skia/src/core/SkRegion.cpp',
+ 'skia/src/core/SkRegion_path.cpp',
+ 'skia/src/core/SkRemoteGlyphCache.cpp',
+ 'skia/src/core/SkResourceCache.cpp',
+ 'skia/src/core/SkRRect.cpp',
+ 'skia/src/core/SkRWBuffer.cpp',
+ 'skia/src/core/SkScalar.cpp',
+ 'skia/src/core/SkScalerContext.cpp',
+ 'skia/src/core/SkScan.cpp',
+ 'skia/src/core/SkScan_AAAPath.cpp',
+ 'skia/src/core/SkScan_Hairline.cpp',
+ 'skia/src/core/SkScan_Path.cpp',
+ 'skia/src/core/SkSemaphore.cpp',
+ 'skia/src/core/SkSharedMutex.cpp',
+ 'skia/src/core/SkSpecialImage.cpp',
+ 'skia/src/core/SkSpecialSurface.cpp',
+ 'skia/src/core/SkSpinlock.cpp',
+ 'skia/src/core/SkStream.cpp',
+ 'skia/src/core/SkStrike.cpp',
+ 'skia/src/core/SkStrikeCache.cpp',
+ 'skia/src/core/SkStrikeForGPU.cpp',
+ 'skia/src/core/SkStrikeSpec.cpp',
+ 'skia/src/core/SkString.cpp',
+ 'skia/src/core/SkStringUtils.cpp',
+ 'skia/src/core/SkStroke.cpp',
+ 'skia/src/core/SkStrokeRec.cpp',
+ 'skia/src/core/SkStrokerPriv.cpp',
+ 'skia/src/core/SkSurfaceCharacterization.cpp',
+ 'skia/src/core/SkSwizzle.cpp',
+ 'skia/src/core/SkTaskGroup.cpp',
+ 'skia/src/core/SkTextBlob.cpp',
+ 'skia/src/core/SkThreadID.cpp',
+ 'skia/src/core/SkTime.cpp',
+ 'skia/src/core/SkTLS.cpp',
+ 'skia/src/core/SkTSearch.cpp',
+ 'skia/src/core/SkTypeface.cpp',
+ 'skia/src/core/SkTypeface_remote.cpp',
+ 'skia/src/core/SkTypefaceCache.cpp',
+ 'skia/src/core/SkUnPreMultiply.cpp',
+ 'skia/src/core/SkUtils.cpp',
+ 'skia/src/core/SkVertState.cpp',
+ 'skia/src/core/SkVM.cpp',
+ 'skia/src/core/SkVMBlitter.cpp',
+ 'skia/src/core/SkWriteBuffer.cpp',
+ 'skia/src/core/SkWriter32.cpp',
+ 'skia/src/core/SkYUVASizeInfo.cpp',
+ 'skia/src/core/SkYUVMath.cpp',
+ 'skia/src/core/SkYUVPlanesCache.cpp',
+ 'skia/src/effects/imagefilters/SkBlurImageFilter.cpp',
+ 'skia/src/effects/imagefilters/SkComposeImageFilter.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/image/SkImage.cpp',
+ 'skia/src/image/SkImage_Lazy.cpp',
+ 'skia/src/image/SkImage_Raster.cpp',
+ 'skia/src/image/SkSurface.cpp',
+ 'skia/src/image/SkSurface_Raster.cpp',
+ 'skia/src/images/SkImageEncoder.cpp',
+ 'skia/src/lazy/SkDiscardableMemoryPool.cpp',
+ 'skia/src/pathops/SkAddIntersections.cpp',
+ 'skia/src/pathops/SkDConicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicToQuads.cpp',
+ 'skia/src/pathops/SkDLineIntersection.cpp',
+ 'skia/src/pathops/SkDQuadLineIntersection.cpp',
+ 'skia/src/pathops/SkIntersections.cpp',
+ 'skia/src/pathops/SkOpAngle.cpp',
+ 'skia/src/pathops/SkOpBuilder.cpp',
+ 'skia/src/pathops/SkOpCoincidence.cpp',
+ 'skia/src/pathops/SkOpContour.cpp',
+ 'skia/src/pathops/SkOpCubicHull.cpp',
+ 'skia/src/pathops/SkOpEdgeBuilder.cpp',
+ 'skia/src/pathops/SkOpSegment.cpp',
+ 'skia/src/pathops/SkOpSpan.cpp',
+ 'skia/src/pathops/SkPathOpsAsWinding.cpp',
+ 'skia/src/pathops/SkPathOpsCommon.cpp',
+ 'skia/src/pathops/SkPathOpsConic.cpp',
+ 'skia/src/pathops/SkPathOpsCubic.cpp',
+ 'skia/src/pathops/SkPathOpsCurve.cpp',
+ 'skia/src/pathops/SkPathOpsLine.cpp',
+ 'skia/src/pathops/SkPathOpsOp.cpp',
+ 'skia/src/pathops/SkPathOpsQuad.cpp',
+ 'skia/src/pathops/SkPathOpsRect.cpp',
+ 'skia/src/pathops/SkPathOpsSimplify.cpp',
+ 'skia/src/pathops/SkPathOpsTightBounds.cpp',
+ 'skia/src/pathops/SkPathOpsTSect.cpp',
+ 'skia/src/pathops/SkPathOpsTypes.cpp',
+ 'skia/src/pathops/SkPathOpsWinding.cpp',
+ 'skia/src/pathops/SkPathWriter.cpp',
+ 'skia/src/pathops/SkReduceOrder.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkGlobalInitialization_default.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkOSFile_stdio.cpp',
+ 'skia/src/sfnt/SkOTTable_name.cpp',
+ 'skia/src/sfnt/SkOTUtils.cpp',
+ 'skia/src/shaders/gradients/Sk4fGradientBase.cpp',
+ 'skia/src/shaders/gradients/Sk4fLinearGradient.cpp',
+ 'skia/src/shaders/gradients/SkGradientShader.cpp',
+ 'skia/src/shaders/gradients/SkLinearGradient.cpp',
+ 'skia/src/shaders/gradients/SkRadialGradient.cpp',
+ 'skia/src/shaders/gradients/SkSweepGradient.cpp',
+ 'skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp',
+ 'skia/src/shaders/SkBitmapProcShader.cpp',
+ 'skia/src/shaders/SkColorFilterShader.cpp',
+ 'skia/src/shaders/SkColorShader.cpp',
+ 'skia/src/shaders/SkComposeShader.cpp',
+ 'skia/src/shaders/SkImageShader.cpp',
+ 'skia/src/shaders/SkLocalMatrixShader.cpp',
+ 'skia/src/shaders/SkPerlinNoiseShader.cpp',
+ 'skia/src/shaders/SkPictureShader.cpp',
+ 'skia/src/shaders/SkRTShader.cpp',
+ 'skia/src/shaders/SkShader.cpp',
+ 'skia/src/sksl/ir/SkSLSetting.cpp',
+ 'skia/src/sksl/ir/SkSLSymbolTable.cpp',
+ 'skia/src/sksl/ir/SkSLType.cpp',
+ 'skia/src/sksl/ir/SkSLVariableReference.cpp',
+ 'skia/src/sksl/SkSLASTNode.cpp',
+ 'skia/src/sksl/SkSLByteCode.cpp',
+ 'skia/src/sksl/SkSLByteCodeGenerator.cpp',
+ 'skia/src/sksl/SkSLCFGGenerator.cpp',
+ 'skia/src/sksl/SkSLCompiler.cpp',
+ 'skia/src/sksl/SkSLIRGenerator.cpp',
+ 'skia/src/sksl/SkSLParser.cpp',
+ 'skia/src/sksl/SkSLSectionAndParameterHelper.cpp',
+ 'skia/src/sksl/SkSLString.cpp',
+ 'skia/src/sksl/SkSLUtil.cpp',
+ 'skia/src/utils/mac/SkStream_mac.cpp',
+ 'skia/src/utils/SkBase64.cpp',
+ 'skia/src/utils/SkCharToGlyphCache.cpp',
+ 'skia/src/utils/SkDashPath.cpp',
+ 'skia/src/utils/SkEventTracer.cpp',
+ 'skia/src/utils/SkFloatToDecimal.cpp',
+ 'skia/src/utils/SkMatrix22.cpp',
+ 'skia/src/utils/SkOSPath.cpp',
+ 'skia/src/utils/SkPatchUtils.cpp',
+ 'skia/src/utils/SkPolyUtils.cpp',
+ 'skia/src/utils/SkShadowTessellator.cpp',
+ 'skia/src/utils/SkShadowUtils.cpp',
+ 'skia/src/utils/SkTextUtils.cpp',
+ 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ 'skia/src/utils/SkThreadUtils_win.cpp',
+ 'skia/src/utils/SkUTF.cpp',
+ 'skia/src/utils/win/SkAutoCoInitialize.cpp',
+ 'skia/src/utils/win/SkDWrite.cpp',
+ 'skia/src/utils/win/SkDWriteFontFileStream.cpp',
+ 'skia/src/utils/win/SkDWriteGeometrySink.cpp',
+ 'skia/src/utils/win/SkHRESULT.cpp',
+ 'skia/src/utils/win/SkIStream.cpp',
+]
+SOURCES += [
+ 'skia/src/core/SkBitmapProcState.cpp',
+ 'skia/src/core/SkBitmapProcState_matrixProcs.cpp',
+ 'skia/src/core/SkBitmapScaler.cpp',
+ 'skia/src/core/SkBlitRow_D32.cpp',
+ 'skia/src/core/SkBlitter.cpp',
+ 'skia/src/core/SkBlitter_A8.cpp',
+ 'skia/src/core/SkBlitter_ARGB32.cpp',
+ 'skia/src/core/SkBlitter_RGB565.cpp',
+ 'skia/src/core/SkBlitter_Sprite.cpp',
+ 'skia/src/core/SkColorSpace.cpp',
+ 'skia/src/core/SkColorSpaceXformSteps.cpp',
+ 'skia/src/core/SkMatrix.cpp',
+ 'skia/src/core/SkMatrix44.cpp',
+ 'skia/src/core/SkMiniRecorder.cpp',
+ 'skia/src/core/SkOpts.cpp',
+ 'skia/src/core/SkPictureData.cpp',
+ 'skia/src/core/SkRecorder.cpp',
+ 'skia/src/core/SkRTree.cpp',
+ 'skia/src/core/SkScan_Antihair.cpp',
+ 'skia/src/core/SkScan_AntiPath.cpp',
+ 'skia/src/core/SkSpriteBlitter_ARGB32.cpp',
+ 'skia/src/core/SkSpriteBlitter_RGB565.cpp',
+ 'skia/src/core/SkVertices.cpp',
+ 'skia/src/core/SkXfermode.cpp',
+ 'skia/src/core/SkXfermodeInterpretation.cpp',
+ 'skia/src/pathops/SkPathOpsDebug.cpp',
+ 'skia/src/sksl/SkSLLexer.cpp',
+ 'skia/src/utils/SkParse.cpp',
+ 'skia/src/utils/SkParsePath.cpp',
+ 'skia/third_party/skcms/skcms.cc',
+]
+SOURCES['skia/src/core/SkBitmapProcState.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBitmapProcState_matrixProcs.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBitmapScaler.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitRow_D32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_A8.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_ARGB32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_RGB565.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_Sprite.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkMatrix.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkOpts.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter_ARGB32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter_RGB565.cpp'].flags += skia_opt_flags
+SOURCES['skia/third_party/skcms/skcms.cc'].flags += skia_opt_flags
+if CONFIG['MOZ_ENABLE_SKIA_PDF']:
+ UNIFIED_SOURCES += [
+ 'skia/src/pdf/SkClusterator.cpp',
+ 'skia/src/pdf/SkDeflate.cpp',
+ 'skia/src/pdf/SkJpegInfo.cpp',
+ 'skia/src/pdf/SkKeyedImage.cpp',
+ 'skia/src/pdf/SkPDFBitmap.cpp',
+ 'skia/src/pdf/SkPDFDocument.cpp',
+ 'skia/src/pdf/SkPDFFormXObject.cpp',
+ 'skia/src/pdf/SkPDFGradientShader.cpp',
+ 'skia/src/pdf/SkPDFGraphicStackState.cpp',
+ 'skia/src/pdf/SkPDFGraphicState.cpp',
+ 'skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp',
+ 'skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp',
+ 'skia/src/pdf/SkPDFMetadata.cpp',
+ 'skia/src/pdf/SkPDFResourceDict.cpp',
+ 'skia/src/pdf/SkPDFShader.cpp',
+ 'skia/src/pdf/SkPDFSubsetFont.cpp',
+ 'skia/src/pdf/SkPDFTag.cpp',
+ 'skia/src/pdf/SkPDFTypes.cpp',
+ 'skia/src/pdf/SkPDFUtils.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/pdf/SkPDFDevice.cpp',
+ 'skia/src/pdf/SkPDFFont.cpp',
+ 'skia/src/pdf/SkPDFType1Font.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('cocoa', 'uikit'):
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_mac.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ SOURCES += [
+ 'skia/src/ports/SkDebug_win.cpp',
+ 'skia/src/ports/SkFontHost_win.cpp',
+ 'skia/src/ports/SkFontMgr_win_dw.cpp',
+ 'skia/src/ports/SkFontMgr_win_dw_factory.cpp',
+ 'skia/src/ports/SkOSFile_win.cpp',
+ 'skia/src/ports/SkOSLibrary_win.cpp',
+ 'skia/src/ports/SkScalerContext_win_dw.cpp',
+ 'skia/src/ports/SkTLS_win.cpp',
+ 'skia/src/ports/SkTypeface_win_dw.cpp',
+ ]
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES += [
+ 'skia/src/opts/SkOpts_avx.cpp',
+ 'skia/src/opts/SkOpts_hsw.cpp',
+ 'skia/src/opts/SkOpts_sse41.cpp',
+ 'skia/src/opts/SkOpts_sse42.cpp',
+ 'skia/src/opts/SkOpts_ssse3.cpp',
+ ]
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += skia_opt_flags
+elif CONFIG['CPU_ARCH'] == 'aarch64':
+ SOURCES += [
+ 'skia/src/opts/SkOpts_crc32.cpp',
+ ]
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += skia_opt_flags
+
+
+# We allow warnings for third-party code that can be updated from upstream.
+AllowCompilerWarnings()
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia',
+ 'skia/include/third_party/skcms',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-msse4.1']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-mavx']
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += ['-mavx2', '-mf16c', '-mfma']
+elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ CXXFLAGS += CONFIG['NEON_FLAGS']
+elif CONFIG['CPU_ARCH'] == 'aarch64' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += ['-march=armv8-a+crc']
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+if CONFIG['MOZ_ENABLE_SKIA_PDF_SFNTLY']:
+ DEFINES['SK_PDF_USE_SFNTLY'] = 1
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Suppress warnings in third-party code.
+CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-shadow',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+]
+if CONFIG['CC_TYPE'] == 'gcc':
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CC_TYPE'] in ('clang', 'clang-cl'):
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk', 'android'):
+ CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
+
+if CONFIG['MOZ_ENABLE_SKIA_PDF_SFNTLY']:
+ LOCAL_INCLUDES += CONFIG['SFNTLY_INCLUDES']
diff --git a/gfx/skia/patches/README b/gfx/skia/patches/README
new file mode 100644
index 0000000000..8fd2c5396a
--- /dev/null
+++ b/gfx/skia/patches/README
@@ -0,0 +1,2 @@
+We no longer keep a local patch queue of patches against upstream. The protocol now
+is to upstream all patches before they are landed in mozilla-central.
diff --git a/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
new file mode 100644
index 0000000000..f8e76dbb90
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
@@ -0,0 +1,66 @@
+From 27a914815e757ed12523edf968c9da134dabeaf8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:10:44 -0400
+Subject: [PATCH 01/10] Bug 755869 - [4] Re-apply bug 687189 - Implement
+ SkPaint::getPosTextPath r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPaint.h | 3 +++
+ gfx/skia/src/core/SkPaint.cpp | 27 +++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index 1930db1..ff37d77 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -813,6 +813,9 @@ public:
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const SkGlyph& getGlyphMetrics(uint16_t);
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+index 1b74fa1..4c119aa 100644
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1355,6 +1355,33 @@ void SkPaint::getTextPath(const void* textData, size_t length,
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
new file mode 100644
index 0000000000..8fe0135fbb
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
@@ -0,0 +1,34 @@
+From 2dd8c789fc4ad3b5323c2c29f3e982d185f5b5d9 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 22:33:38 -0400
+Subject: [PATCH 1/9] Bug 777614 - Re-add our SkUserConfig.h r=nrc
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 353272c..fbfbfe0 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -184,5 +184,16 @@
+ directories from your include search path when you're not building the GPU
+ backend. Defaults to 1 (build the GPU code).
+ */
+-//#define SK_SUPPORT_GPU 1
++#define SK_SUPPORT_GPU 0
++
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
new file mode 100644
index 0000000000..20155977e2
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
@@ -0,0 +1,26 @@
+From 81ff1a8f5c2a7cc9e8b853101b995433a0c0fa37 Mon Sep 17 00:00:00 2001
+From: Jacek Caban <jacek@codeweavers.com>
+Date: Thu, 18 Oct 2012 15:25:08 +0200
+Subject: [PATCH] Bug 803063 - Skia cross compilation for Windows fails on
+ case sensitive OS
+
+---
+ gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+index 370616e..b647ada 100644
+--- a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
++++ b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+@@ -13,7 +13,7 @@
+ SK_DEFINE_INST_COUNT(SkAdvancedTypefaceMetrics)
+
+ #if defined(SK_BUILD_FOR_WIN)
+-#include <DWrite.h>
++#include <dwrite.h>
+ #endif
+
+ #if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+--
+1.7.8.6
+
diff --git a/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
new file mode 100644
index 0000000000..aa1fadb435
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
@@ -0,0 +1,38 @@
+From 58861c38751adf1f4ef3f67f8e85f5c36f1c43a5 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 17 Jul 2013 16:28:07 -0400
+Subject: [PATCH] Bug 895086 - Remove unused find_from_uniqueID() function from
+ SkFontHost_linux
+
+---
+ gfx/skia/src/ports/SkFontHost_linux.cpp | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_linux.cpp b/gfx/skia/src/ports/SkFontHost_linux.cpp
+index df21014..05b73dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_linux.cpp
++++ b/gfx/skia/src/ports/SkFontHost_linux.cpp
+@@ -117,20 +117,6 @@ static FamilyRec* find_family(const SkTypeface* member) {
+ return NULL;
+ }
+
+-static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
+- FamilyRec* curr = gFamilyHead;
+- while (curr != NULL) {
+- for (int i = 0; i < 4; i++) {
+- SkTypeface* face = curr->fFaces[i];
+- if (face != NULL && face->uniqueID() == uniqueID) {
+- return face;
+- }
+- }
+- curr = curr->fNext;
+- }
+- return NULL;
+-}
+-
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+--
+1.8.3.1
+
diff --git a/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
new file mode 100644
index 0000000000..d396b4ed12
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
@@ -0,0 +1,30 @@
+From f310d7e8b8d9cf6870c739650324bb585b591c0c Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:11:32 -0400
+Subject: [PATCH 02/10] Bug 755869 - [5] Re-apply bug 688366 - Fix Skia
+ marking radial gradients with the same radius as
+ invalid. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 6de820b..59ba48c 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1911,7 +1911,10 @@ public:
+ SkPMColor* SK_RESTRICT dstC = dstCParam;
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
new file mode 100644
index 0000000000..6ac2c9179d
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
@@ -0,0 +1,50 @@
+From: George Wright <george@mozilla.com>
+Date: Mon, 14 Jan 2013 17:59:09 -0500
+Subject: Bug 848491 - Re-apply Bug 795549 - Move TileProc functions into their own file to ensure they only exist once in a library
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index b9dbf1b..729ce4e 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -37,34 +37,9 @@ static inline void sk_memset32_dither(uint32_t dst[], uint32_t v0, uint32_t v1,
+ }
+ }
+
+-// Clamp
+-
+-static inline SkFixed clamp_tileproc(SkFixed x) {
+- return SkClampMax(x, 0xFFFF);
+-}
+-
+-// Repeat
+-
+-static inline SkFixed repeat_tileproc(SkFixed x) {
+- return x & 0xFFFF;
+-}
+-
+-// Mirror
+-
+-// Visual Studio 2010 (MSC_VER=1600) optimizes bit-shift code incorrectly.
+-// See http://code.google.com/p/skia/issues/detail?id=472
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", off)
+-#endif
+-
+-static inline SkFixed mirror_tileproc(SkFixed x) {
+- int s = x << 15 >> 31;
+- return (x ^ s) & 0xFFFF;
+-}
+-
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", on)
+-#endif
++SkFixed clamp_tileproc(SkFixed x);
++SkFixed repeat_tileproc(SkFixed x);
++SkFixed mirror_tileproc(SkFixed x);
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
new file mode 100644
index 0000000000..dc52a8d3d0
--- /dev/null
+++ b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
@@ -0,0 +1,39 @@
+From ef53776c06cffc7607c3777702f93e04c0852981 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:13:49 -0400
+Subject: [PATCH 03/10] Bug 755869 - [6] Re-apply SkUserConfig (no
+ original bug) r=mattwoodrow
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 9fdbd0a..f98ba85 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -156,6 +156,10 @@
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+@@ -177,4 +181,10 @@
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
new file mode 100644
index 0000000000..179aeded5d
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
@@ -0,0 +1,280 @@
+From 81d61682a94d47be5b47fb7882ea7e7c7e6c3351 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:28 -0400
+Subject: [PATCH 04/10] Bug 755869 - [7] Re-apply bug 722011 - Fix
+ trailing commas at end of enum lists r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkAdvancedTypefaceMetrics.h | 8 ++++----
+ gfx/skia/include/core/SkBlitRow.h | 2 +-
+ gfx/skia/include/core/SkCanvas.h | 2 +-
+ gfx/skia/include/core/SkDevice.h | 2 +-
+ gfx/skia/include/core/SkDeviceProfile.h | 4 ++--
+ gfx/skia/include/core/SkFlattenable.h | 2 +-
+ gfx/skia/include/core/SkFontHost.h | 4 ++--
+ gfx/skia/include/core/SkMaskFilter.h | 2 +-
+ gfx/skia/include/core/SkPaint.h | 4 ++--
+ gfx/skia/include/core/SkScalerContext.h | 9 +++++----
+ gfx/skia/include/core/SkTypes.h | 2 +-
+ gfx/skia/include/effects/SkLayerDrawLooper.h | 2 +-
+ gfx/skia/src/core/SkBitmap.cpp | 2 +-
+ gfx/skia/src/core/SkGlyphCache.cpp | 2 +-
+ 14 files changed, 24 insertions(+), 23 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+index 09fc9a9..5ffdb45 100644
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -34,7 +34,7 @@ public:
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+@@ -56,7 +56,7 @@ public:
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+@@ -75,7 +75,7 @@ public:
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+@@ -84,7 +84,7 @@ public:
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+index 973ab4c..febc405 100644
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -42,7 +42,7 @@ public:
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+index 25cc94a..d942783 100644
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -148,7 +148,7 @@ public:
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+index 1e4e0a3..b4d44bf 100644
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -139,7 +139,7 @@ public:
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+diff --git a/gfx/skia/include/core/SkDeviceProfile.h b/gfx/skia/include/core/SkDeviceProfile.h
+index 46b9781..f6a0bca 100644
+--- a/gfx/skia/include/core/SkDeviceProfile.h
++++ b/gfx/skia/include/core/SkDeviceProfile.h
+@@ -17,7 +17,7 @@ public:
+ kRGB_Horizontal_LCDConfig,
+ kBGR_Horizontal_LCDConfig,
+ kRGB_Vertical_LCDConfig,
+- kBGR_Vertical_LCDConfig,
++ kBGR_Vertical_LCDConfig
+ };
+
+ enum FontHintLevel {
+@@ -25,7 +25,7 @@ public:
+ kSlight_FontHintLevel,
+ kNormal_FontHintLevel,
+ kFull_FontHintLevel,
+- kAuto_FontHintLevel,
++ kAuto_FontHintLevel
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+index 5714f9d..dc115fc 100644
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -272,7 +272,7 @@ public:
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+index 732de5c..10f9bdf 100644
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -240,7 +240,7 @@ public:
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+@@ -259,7 +259,7 @@ public:
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+index 9a470a4..3422e27 100644
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -61,7 +61,7 @@ public:
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index ff37d77..7c96e193 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -76,7 +76,7 @@ public:
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+@@ -289,7 +289,7 @@ public:
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+index 2cb171b..3dbce27 100644
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -182,21 +182,22 @@ public:
+ kGenA8FromLCD_Flag = 0x0800,
+
+ #ifdef SK_USE_COLOR_LUMINANCE
+- kLuminance_Bits = 3,
++ kLuminance_Bits = 3
+ #else
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 13, // shift to land in the high 3-bits of Flags
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ #endif
+ };
+
+ // computed values
+ enum {
+- kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ #ifdef SK_USE_COLOR_LUMINANCE
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag
+ #else
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ #endif
+ };
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+index 7963a7d..0c5c2d7 100644
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -438,7 +438,7 @@ public:
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+index 0bc4af2..6cb8ef6 100644
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -41,7 +41,7 @@ public:
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+index 6b99145..aff52fd 100644
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1376,7 +1376,7 @@ enum {
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ /*
+diff --git a/gfx/skia/src/core/SkGlyphCache.cpp b/gfx/skia/src/core/SkGlyphCache.cpp
+index f3363cd..1fddc9d 100644
+--- a/gfx/skia/src/core/SkGlyphCache.cpp
++++ b/gfx/skia/src/core/SkGlyphCache.cpp
+@@ -417,7 +417,7 @@ class SkGlyphCache_Globals {
+ public:
+ enum UseMutex {
+ kNo_UseMutex, // thread-local cache
+- kYes_UseMutex, // shared cache
++ kYes_UseMutex // shared cache
+ };
+
+ SkGlyphCache_Globals(UseMutex um) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
new file mode 100644
index 0000000000..ad6e181274
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
@@ -0,0 +1,684 @@
+From 0d730a94e9f6676d5cde45f955fe025a4549817e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 23 Aug 2012 16:45:38 -0400
+Subject: [PATCH 4/9] Bug 777614 - Re-apply bug 719872 - Fix crash on Android
+ by reverting to older FontHost r=nrc
+
+---
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 ++++++++++++++++++++++++++
+ 1 file changed, 664 insertions(+)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
new file mode 100644
index 0000000000..e8b5df635b
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
@@ -0,0 +1,36 @@
+From 80350275c72921ed5ac405c029ae33727467d7c5 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:50 -0400
+Subject: [PATCH 05/10] Bug 755869 - [8] Re-apply bug 731384 - Fix compile
+ errors on older versions of clang r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPostConfig.h | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 8316f7a..041fe2a 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -288,9 +288,18 @@
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
new file mode 100644
index 0000000000..4b76fcea1d
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
@@ -0,0 +1,472 @@
+From: George Wright <george@mozilla.com>
+Date: Wed, 1 Aug 2012 16:43:15 -0400
+Subject: Bug 736276 - Add a new SkFontHost that takes a cairo_scaled_font_t r=karl
+
+
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+index 5ebbd2e..7c8cdbf 100644
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -60,15 +60,15 @@ VPATH += \
+ $(NULL)
+
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(CAIRO_FT_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (gtk2,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(MOZ_PANGO_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(MOZ_PANGO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (qt,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(MOZ_PANGO_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(MOZ_PANGO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ ifeq (Linux,$(OS_TARGET))
+ DEFINES += -DSK_USE_POSIX_THREADS=1
+ endif
+diff --git a/gfx/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/include/ports/SkTypeface_cairo.h
+new file mode 100644
+index 0000000..7e44f04
+--- /dev/null
++++ b/gfx/skia/include/ports/SkTypeface_cairo.h
+@@ -0,0 +1,11 @@
++#ifndef SkTypeface_cairo_DEFINED
++#define SkTypeface_cairo_DEFINED
++
++#include <cairo.h>
++
++#include "SkTypeface.h"
++
++SK_API extern SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth);
++
++#endif
++
+diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
+index 9ceba59..66efd52 100644
+--- a/gfx/skia/moz.build
++++ b/gfx/skia/moz.build
+@@ -171,10 +171,12 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ 'SkTime_win.cpp',
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+- 'SkFontHost_linux.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -183,14 +185,15 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'qt':
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkOSFile.cpp',
+ ]
+ if CONFIG['OS_TARGET'] == 'Linux':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_linux.cpp',
+- 'SkFontHost_tables.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -204,11 +207,13 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk':
+ # Separate 'if' from above, since the else below applies to all != 'android'
+ # toolkits.
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+ 'ashmem.cpp',
+ 'SkDebug_android.cpp',
+- 'SkFontHost_android_old.cpp',
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkImageRef_ashmem.cpp',
+ 'SkTime_Unix.cpp',
+diff --git a/gfx/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+new file mode 100644
+index 0000000..bb5b778
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+@@ -0,0 +1,364 @@
++
++/*
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "cairo.h"
++#include "cairo-ft.h"
++
++#include "SkFontHost_FreeType_common.h"
++
++#include "SkAdvancedTypefaceMetrics.h"
++#include "SkFontHost.h"
++#include "SkPath.h"
++#include "SkScalerContext.h"
++#include "SkTypefaceCache.h"
++
++#include <ft2build.h>
++#include FT_FREETYPE_H
++
++static cairo_user_data_key_t kSkTypefaceKey;
++
++class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
++public:
++ SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc);
++ virtual ~SkScalerContext_CairoFT();
++
++protected:
++ virtual unsigned generateGlyphCount() SK_OVERRIDE;
++ virtual uint16_t generateCharToGlyph(SkUnichar uniChar) SK_OVERRIDE;
++ virtual void generateAdvance(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateMetrics(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateImage(const SkGlyph& glyph) SK_OVERRIDE;
++ virtual void generatePath(const SkGlyph& glyph, SkPath* path) SK_OVERRIDE;
++ virtual void generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my) SK_OVERRIDE;
++ virtual SkUnichar generateGlyphToChar(uint16_t glyph) SK_OVERRIDE;
++private:
++ cairo_scaled_font_t* fScaledFont;
++ uint32_t fLoadGlyphFlags;
++};
++
++class CairoLockedFTFace {
++public:
++ CairoLockedFTFace(cairo_scaled_font_t* scaledFont)
++ : fScaledFont(scaledFont)
++ , fFace(cairo_ft_scaled_font_lock_face(scaledFont))
++ {}
++
++ ~CairoLockedFTFace()
++ {
++ cairo_ft_scaled_font_unlock_face(fScaledFont);
++ }
++
++ FT_Face getFace()
++ {
++ return fFace;
++ }
++
++private:
++ cairo_scaled_font_t* fScaledFont;
++ FT_Face fFace;
++};
++
++class SkCairoFTTypeface : public SkTypeface {
++public:
++ static SkTypeface* CreateTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth) {
++ SkASSERT(fontFace != NULL);
++ SkASSERT(cairo_font_face_get_type(fontFace) == CAIRO_FONT_TYPE_FT);
++
++ SkFontID newId = SkTypefaceCache::NewFontID();
++
++ return SkNEW_ARGS(SkCairoFTTypeface, (fontFace, style, newId, isFixedWidth));
++ }
++
++ cairo_font_face_t* getFontFace() {
++ return fFontFace;
++ }
++
++ virtual SkStream* onOpenStream(int*) const SK_OVERRIDE { return NULL; }
++
++ virtual SkAdvancedTypefaceMetrics*
++ onGetAdvancedTypefaceMetrics(SkAdvancedTypefaceMetrics::PerGlyphInfo,
++ const uint32_t*, uint32_t) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedTypefaceMetrics unimplemented\n"));
++ return NULL;
++ }
++
++ virtual SkScalerContext* onCreateScalerContext(const SkDescriptor* desc) const SK_OVERRIDE
++ {
++ return SkNEW_ARGS(SkScalerContext_CairoFT, (const_cast<SkCairoFTTypeface*>(this), desc));
++ }
++
++ virtual void onFilterRec(SkScalerContextRec*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onFilterRec unimplemented\n"));
++ }
++
++ virtual void onGetFontDescriptor(SkFontDescriptor*, bool*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
++ }
++
++
++private:
++
++ SkCairoFTTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, SkFontID id, bool isFixedWidth)
++ : SkTypeface(style, id, isFixedWidth)
++ , fFontFace(fontFace)
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, this, NULL);
++ cairo_font_face_reference(fFontFace);
++ }
++
++ ~SkCairoFTTypeface()
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, NULL, NULL);
++ cairo_font_face_destroy(fFontFace);
++ }
++
++ cairo_font_face_t* fFontFace;
++};
++
++SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth)
++{
++ SkTypeface* typeface = reinterpret_cast<SkTypeface*>(cairo_font_face_get_user_data(fontFace, &kSkTypefaceKey));
++
++ if (typeface) {
++ typeface->ref();
++ } else {
++ typeface = SkCairoFTTypeface::CreateTypeface(fontFace, style, isFixedWidth);
++ SkTypefaceCache::Add(typeface, style);
++ }
++
++ return typeface;
++}
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char famillyName[],
++ SkTypeface::Style style)
++{
++ SkDEBUGFAIL("SkFontHost::FindTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(char const*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypefaceFromFile unimplemented");
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++static bool isLCD(const SkScalerContext::Rec& rec) {
++ switch (rec.fMaskFormat) {
++ case SkMask::kLCD16_Format:
++ case SkMask::kLCD32_Format:
++ return true;
++ default:
++ return false;
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++SkScalerContext_CairoFT::SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc)
++ : SkScalerContext_FreeType_Base(typeface, desc)
++{
++ SkMatrix matrix;
++ fRec.getSingleMatrix(&matrix);
++
++ cairo_font_face_t* fontFace = static_cast<SkCairoFTTypeface*>(typeface)->getFontFace();
++
++ cairo_matrix_t fontMatrix, ctMatrix;
++ cairo_matrix_init(&fontMatrix, matrix.getScaleX(), matrix.getSkewY(), matrix.getSkewX(), matrix.getScaleY(), 0.0, 0.0);
++ cairo_matrix_init_scale(&ctMatrix, 1.0, 1.0);
++
++ // We need to ensure that the font options match for hinting, as generateMetrics()
++ // uses the fScaledFont which uses these font options
++ cairo_font_options_t *fontOptions = cairo_font_options_create();
++
++ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
++
++ if (SkMask::kBW_Format == fRec.fMaskFormat) {
++ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
++ loadFlags = FT_LOAD_TARGET_MONO;
++ if (fRec.getHinting() == SkPaint::kNo_Hinting) {
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ loadFlags = FT_LOAD_NO_HINTING;
++ }
++ } else {
++ switch (fRec.getHinting()) {
++ case SkPaint::kNo_Hinting:
++ loadFlags = FT_LOAD_NO_HINTING;
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ break;
++ case SkPaint::kSlight_Hinting:
++ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_SLIGHT);
++ break;
++ case SkPaint::kNormal_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_MEDIUM);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ break;
++ case SkPaint::kFull_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_FULL);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ if (isLCD(fRec)) {
++ if (SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag)) {
++ loadFlags = FT_LOAD_TARGET_LCD_V;
++ } else {
++ loadFlags = FT_LOAD_TARGET_LCD;
++ }
++ }
++ break;
++ default:
++ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
++ break;
++ }
++ }
++
++ fScaledFont = cairo_scaled_font_create(fontFace, &fontMatrix, &ctMatrix, fontOptions);
++
++ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
++ loadFlags |= FT_LOAD_NO_BITMAP;
++ }
++
++ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
++ // advances, as fontconfig and cairo do.
++ // See http://code.google.com/p/skia/issues/detail?id=222.
++ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
++
++ fLoadGlyphFlags = loadFlags;
++}
++
++SkScalerContext_CairoFT::~SkScalerContext_CairoFT()
++{
++ cairo_scaled_font_destroy(fScaledFont);
++}
++
++unsigned SkScalerContext_CairoFT::generateGlyphCount()
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return faceLock.getFace()->num_glyphs;
++}
++
++uint16_t SkScalerContext_CairoFT::generateCharToGlyph(SkUnichar uniChar)
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return SkToU16(FT_Get_Char_Index(faceLock.getFace(), uniChar));
++}
++
++void SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
++{
++ generateMetrics(glyph);
++}
++
++void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ cairo_text_extents_t extents;
++ cairo_glyph_t cairoGlyph = { glyph->getGlyphID(fBaseGlyphCount), 0.0, 0.0 };
++ cairo_scaled_font_glyph_extents(fScaledFont, &cairoGlyph, 1, &extents);
++
++ glyph->fAdvanceX = SkDoubleToFixed(extents.x_advance);
++ glyph->fAdvanceY = SkDoubleToFixed(extents.y_advance);
++ glyph->fWidth = SkToU16(SkScalarCeil(extents.width));
++ glyph->fHeight = SkToU16(SkScalarCeil(extents.height));
++ glyph->fLeft = SkToS16(SkScalarCeil(extents.x_bearing));
++ glyph->fTop = SkToS16(SkScalarCeil(extents.y_bearing));
++ glyph->fLsbDelta = 0;
++ glyph->fRsbDelta = 0;
++}
++
++void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), fLoadGlyphFlags);
++
++ if (err != 0) {
++ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
++ return;
++ }
++
++ generateGlyphImage(face, glyph);
++}
++
++void SkScalerContext_CairoFT::generatePath(const SkGlyph& glyph, SkPath* path)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ SkASSERT(&glyph && path);
++
++ uint32_t flags = fLoadGlyphFlags;
++ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
++ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), flags);
++
++ if (err != 0) {
++ path->reset();
++ return;
++ }
++
++ generateGlyphPath(face, path);
++}
++
++void SkScalerContext_CairoFT::generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my)
++{
++ SkDEBUGCODE(SkDebugf("SkScalerContext_CairoFT::generateFontMetrics unimplemented\n"));
++}
++
++SkUnichar SkScalerContext_CairoFT::generateGlyphToChar(uint16_t glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_UInt glyphIndex;
++ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
++ while (glyphIndex != 0) {
++ if (glyphIndex == glyph) {
++ return charCode;
++ }
++ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
++ }
++
++ return 0;
++}
++
++#ifdef SK_BUILD_FOR_ANDROID
++SkTypeface* SkAndroidNextLogicalTypeface(SkFontID currFontID,
++ SkFontID origFontID) {
++ return NULL;
++}
++#endif
++
++///////////////////////////////////////////////////////////////////////////////
++
++#include "SkFontMgr.h"
++
++SkFontMgr* SkFontMgr::Factory() {
++ // todo
++ return NULL;
++}
++
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 0000000000..cfcb40b9d7
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,198 @@
+From 1ab13a923399aa638388231baca784ba89f2c82b Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 12 Sep 2012 12:30:29 -0400
+Subject: [PATCH 5/9] Bug 777614 - Re-apply bug 687188 - Expand the gradient
+ cache by 2 to store 0/1 colour stop values for
+ clamping. r=nrc
+
+---
+ .../src/effects/gradients/SkGradientShader.cpp | 22 +++++++++++----
+ .../src/effects/gradients/SkGradientShaderPriv.h | 5 +++-
+ .../src/effects/gradients/SkLinearGradient.cpp | 32 ++++++++++++++++------
+ .../gradients/SkTwoPointConicalGradient.cpp | 11 ++++++--
+ .../effects/gradients/SkTwoPointRadialGradient.cpp | 11 ++++++--
+ 5 files changed, 61 insertions(+), 20 deletions(-)
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index f0dac4d..79e7202 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -426,15 +426,15 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -458,7 +458,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -467,10 +467,22 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 0e7c2fc..7427935 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -133,7 +133,10 @@ public:
+ kDitherStride32 = 0,
+ #endif
+ kDitherStride16 = kCache16Count,
+- kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1
++ kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 2
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index bcebc26..d400b4d 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -126,6 +126,17 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -144,6 +155,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -169,10 +191,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -191,10 +210,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index 3466d2c..764a444 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -123,9 +123,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ }
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index 9362ded..22b028e 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
new file mode 100644
index 0000000000..eb75691ad7
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
@@ -0,0 +1,147 @@
+From 94916fbbc7865c6fe23a57d6edc48c6daf93dda8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:16:08 -0400
+Subject: [PATCH 06/10] Bug 755869 - [9] Re-apply bug 751814 - Various
+ Skia fixes for ARM without EDSP and ARMv6+
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkMath.h | 5 +--
+ gfx/skia/include/core/SkPostConfig.h | 45 ++++++++++++++++++++++
+ gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp | 6 +-
+ gfx/skia/src/opts/SkBlitRow_opts_arm.cpp | 9 ++++
+ 4 files changed, 58 insertions(+), 7 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+index 5889103..7a4b707 100644
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -153,10 +153,7 @@ static inline bool SkIsPow2(int value) {
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 041fe2a..03105e4 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -311,3 +311,48 @@
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+index 20d62e1..deb1bfe 100644
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -11,7 +11,7 @@
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -182,7 +182,7 @@ void SI8_opaque_D32_nofilter_DX_arm(const SkBitmapProcState& s,
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -200,7 +200,7 @@ void SkBitmapProcState::platformProcs() {
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+index 2490371..c928888 100644
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -675,8 +675,13 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+@@ -745,7 +750,11 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
new file mode 100644
index 0000000000..2850000ace
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
@@ -0,0 +1,27 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:40:12 -0400
+Subject: Bug 848491 - Re-apply Bug 777614 - Add our SkUserConfig.h
+
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 63fc90d..c965e91 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -201,4 +201,14 @@
+ */
+ //#define SK_SUPPORT_GPU 1
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
new file mode 100644
index 0000000000..ca34e1a457
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
@@ -0,0 +1,702 @@
+From 6982ad469adcdfa2b7bdbf8bbd843bc22d3832fc Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:52:40 -0400
+Subject: [PATCH 07/10] Bug 755869 - [10] Re-apply bug 719872 - Fix crash
+ on Android by reverting to older FontHost impl
+ r=mattwoodrow
+
+---
+ gfx/skia/Makefile.in | 5 +-
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 +++++++++++++++++++++++++
+ 2 files changed, 668 insertions(+), 1 deletions(-)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+index 9da098a..8184f1c 100644
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -327,7 +327,10 @@ endif
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkDebug_android.cpp \
+- SkFontHost_none.cpp \
++ SkFontHost_android_old.cpp \
++ SkFontHost_gamma.cpp \
++ SkFontHost_FreeType.cpp \
++ SkFontHost_tables.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ SkThread_pthread.cpp \
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 0000000000..73bca9a48d
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,168 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:47:06 -0400
+Subject: Bug 848491 - Re-apply bug 687188 - Expand the gradient cache by 2 to store 0/1 colour stop values for clamping.
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 684355d..27a9c46 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -453,15 +453,15 @@ const uint16_t* SkGradientShaderBase::getCache16() const {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 4;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 4 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+@@ -484,7 +484,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -495,9 +495,21 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 729ce4e..2cb6a9d 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -86,6 +86,9 @@ public:
+ /// if dithering is disabled.
+ kDitherStride32 = kCache32Count,
+ kDitherStride16 = kCache16Count,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 4
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index e0f216c..40ab918 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -127,6 +127,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -154,10 +165,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kCache32Count - 1);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[next_dither_toggle(toggle) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -176,10 +184,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[next_dither_toggle(toggle) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index abd974b..601fff4 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -124,10 +124,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[toggle +
+- (index >> SkGradientShaderBase::kCache32Shift)];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index f70b67d..ec2ae75 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
new file mode 100644
index 0000000000..0f60dbd8ea
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
@@ -0,0 +1,173 @@
+From f941ea32e44a2436d235e83ef1a434289a9d9c1e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 11:40:25 -0400
+Subject: [PATCH 08/10] Bug 755869 - [11] Re-apply bug 687188 - Skia
+ radial gradients should use the 0/1 color stop values
+ for clamping. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 76 +++++++++++++++++++++++------
+ 1 files changed, 61 insertions(+), 15 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 59ba48c..ea05a39 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -204,6 +204,7 @@ private:
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+@@ -507,6 +508,21 @@ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+@@ -628,14 +644,14 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -659,7 +675,7 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -668,10 +684,13 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+@@ -857,6 +876,18 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -875,6 +906,18 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -900,10 +943,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, Gradient_Shader::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV0],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[-1], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -922,10 +963,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV1],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+ }
+ }
+
+@@ -1796,9 +1835,16 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
++ }
++
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 0000000000..58961d6e06
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,35 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:49:45 -0400
+Subject: Bug 848491 - Re-apply 759683 - Handle compilers that don't support SSSE3 intrinsics
+
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 6370058..18f68d6 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
new file mode 100644
index 0000000000..1e9a93f20a
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
@@ -0,0 +1,28 @@
+From df3be24040f7cb2f9c7ed86ad3e47206630e885f Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:49:57 -0400
+Subject: [PATCH 09/10] Bug 755869 - [12] Re-apply bug 749533 - Add
+ support for GNU/kFreeBSD and Hurd in Skia.
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPreConfig.h | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 46c6929..16c4d6c 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -35,7 +35,8 @@
+ #elif defined(ANDROID)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
+- defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__)
++ defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__) || \
++ defined(__GLIBC__) || defined(__GNU__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 0000000000..1da208ed18
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,40 @@
+From 2c5a8cebc806ed287ce7c3723ea64a233266cd9e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 14:55:33 -0400
+Subject: [PATCH 9/9] Bug 777614 - Re-apply 759683 - Handle compilers that
+ don't support SSSE3 intrinsics r=nrc
+
+---
+ gfx/skia/src/opts/opts_check_SSE2.cpp | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 96d0dea..add6d5f 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
new file mode 100644
index 0000000000..9778015c4f
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
@@ -0,0 +1,23 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:52:32 -0400
+Subject: Bug 848491 - Re-apply bug 751418 - Add our own GrUserConfig
+
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index 11d4feb..77ab850 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -43,6 +43,10 @@
+ */
+ //#define GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT 96
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
+ #define GR_GL_PER_GL_FUNC_CALLBACK 1
+
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
new file mode 100644
index 0000000000..bd6604b4bd
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
@@ -0,0 +1,36 @@
+From dc1292fc8c2b9da900ebcac953120eaffd0d329e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:52:36 -0400
+Subject: [PATCH 10/10] Bug 755869 - [13] Re-apply bug 750733 - Use
+ handles in API object hooks where possible
+ r=mattwoodrow
+
+---
+ gfx/skia/src/xml/SkJS.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/xml/SkJS.cpp b/gfx/skia/src/xml/SkJS.cpp
+index f2e7a83..b2717d7 100644
+--- a/gfx/skia/src/xml/SkJS.cpp
++++ b/gfx/skia/src/xml/SkJS.cpp
+@@ -74,7 +74,7 @@ extern "C" {
+ #endif
+
+ static bool
+-global_enumerate(JSContext *cx, JSObject *obj)
++global_enumerate(JSContext *cx, JSHandleObject *obj)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ return JS_EnumerateStandardClasses(cx, obj);
+@@ -84,7 +84,7 @@ global_enumerate(JSContext *cx, JSObject *obj)
+ }
+
+ static bool
+-global_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp)
++global_resolve(JSContext *cx, JSHandleObject obj, JSHandleId id, unsigned flags, JSObject **objp)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ if ((flags & JSRESOLVE_ASSIGNING) == 0) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
new file mode 100644
index 0000000000..a446037de0
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
@@ -0,0 +1,698 @@
+# HG changeset patch
+# User Rik Cabanier <cabanier@adobe.com>
+# Date 1360273929 -46800
+# Node ID 3ac8edca3a03b3d22240b5a5b95ae3b5ada9877d
+# Parent cbb67fe70b864b36165061e1fd3b083cd09af087
+Bug 836892 - Add new blending modes to SkXfermode. r=gw280
+
+diff --git a/gfx/skia/include/core/SkXfermode.h b/gfx/skia/include/core/SkXfermode.h
+--- a/gfx/skia/include/core/SkXfermode.h
++++ b/gfx/skia/include/core/SkXfermode.h
+@@ -96,33 +96,37 @@ public:
+ kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+ kXor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+
+ // all remaining modes are defined in the SVG Compositing standard
+ // http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
+ kPlus_Mode,
+- kMultiply_Mode,
+
+ // all above modes can be expressed as pair of src/dst Coeffs
+ kCoeffModesCnt,
+
+- kScreen_Mode = kCoeffModesCnt,
++ kMultiply_Mode = kCoeffModesCnt,
++ kScreen_Mode,
+ kOverlay_Mode,
+ kDarken_Mode,
+ kLighten_Mode,
+ kColorDodge_Mode,
+ kColorBurn_Mode,
+ kHardLight_Mode,
+ kSoftLight_Mode,
+ kDifference_Mode,
+ kExclusion_Mode,
++ kHue_Mode,
++ kSaturation_Mode,
++ kColor_Mode,
++ kLuminosity_Mode,
+
+- kLastMode = kExclusion_Mode
++ kLastMode = kLuminosity_Mode
+ };
+
+ /**
+ * If the xfermode is one of the modes in the Mode enum, then asMode()
+ * returns true and sets (if not null) mode accordingly. Otherwise it
+ * returns false and ignores the mode parameter.
+ */
+ virtual bool asMode(Mode* mode);
+diff --git a/gfx/skia/src/core/SkXfermode.cpp b/gfx/skia/src/core/SkXfermode.cpp
+--- a/gfx/skia/src/core/SkXfermode.cpp
++++ b/gfx/skia/src/core/SkXfermode.cpp
+@@ -7,16 +7,18 @@
+ */
+
+
+ #include "SkXfermode.h"
+ #include "SkColorPriv.h"
+ #include "SkFlattenableBuffers.h"
+ #include "SkMathPriv.h"
+
++#include <algorithm>
++
+ SK_DEFINE_INST_COUNT(SkXfermode)
+
+ #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
+
+ #if 0
+ // idea for higher precision blends in xfer procs (and slightly faster)
+ // see DstATop as a probable caller
+ static U8CPU mulmuldiv255round(U8CPU a, U8CPU b, U8CPU c, U8CPU d) {
+@@ -176,244 +178,439 @@ static SkPMColor xor_modeproc(SkPMColor
+ static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned b = saturated_add(SkGetPackedB32(src), SkGetPackedB32(dst));
+ unsigned g = saturated_add(SkGetPackedG32(src), SkGetPackedG32(dst));
+ unsigned r = saturated_add(SkGetPackedR32(src), SkGetPackedR32(dst));
+ unsigned a = saturated_add(SkGetPackedA32(src), SkGetPackedA32(dst));
+ return SkPackARGB32(a, r, g, b);
+ }
+
++static inline int srcover_byte(int a, int b) {
++ return a + b - SkAlphaMulAlpha(a, b);
++}
++
++#define blendfunc_byte(sc, dc, sa, da, blendfunc) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendfunc(sc, dc, sa, da))
++
+ // kMultiply_Mode
++static inline int multiply_byte(int sc, int dc, int sa, int da) {
++ return sc * dc;
++}
+ static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, multiply_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, multiply_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, multiply_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kScreen_Mode
+-static inline int srcover_byte(int a, int b) {
+- return a + b - SkAlphaMulAlpha(a, b);
++static inline int screen_byte(int sc, int dc, int sa, int da) {
++ return sc * da + sa * dc - sc * dc;
+ }
+ static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, screen_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, screen_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, screen_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++// kHardLight_Mode
++static inline int hardlight_byte(int sc, int dc, int sa, int da) {
++ if(!sa || !da)
++ return sc * da;
++ float Sc = (float)sc/sa;
++ float Dc = (float)dc/da;
++ if(Sc <= 0.5)
++ Sc *= 2 * Dc;
++ else
++ Sc = -1 + 2 * Sc + 2 * Dc - 2 * Sc * Dc;
++
++ return Sc * sa * da;
++}
++static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, hardlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, hardlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, hardlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kOverlay_Mode
+ static inline int overlay_byte(int sc, int dc, int sa, int da) {
+- int tmp = sc * (255 - da) + dc * (255 - sa);
+- int rc;
+- if (2 * dc <= da) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + tmp);
++ return hardlight_byte(dc, sc, da, sa);
+ }
+ static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, overlay_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, overlay_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, overlay_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDarken_Mode
+ static inline int darken_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd < ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMin32(sc * da, sa * dc);
+ }
+ static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, darken_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, darken_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, darken_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kLighten_Mode
+ static inline int lighten_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd > ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMax32(sc * da, sa * dc);
+ }
+ static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, lighten_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, lighten_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, lighten_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorDodge_Mode
+ static inline int colordodge_byte(int sc, int dc, int sa, int da) {
+- int diff = sa - sc;
+- int rc;
+- if (0 == diff) {
+- rc = sa * da + sc * (255 - da) + dc * (255 - sa);
+- rc = SkDiv255Round(rc);
+- } else {
+- int tmp = (dc * sa << 15) / (da * diff);
+- rc = SkDiv255Round(sa * da) * tmp >> 15;
+- // don't clamp here, since we'll do it in our modeproc
+- }
+- return rc;
++ if (dc == 0)
++ return 0;
++ // Avoid division by 0
++ if (sc == sa)
++ return da * sa;
++
++ return SkMin32(sa * da, sa * sa * dc / (sa - sc));
+ }
+ static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colordodge_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- r = clamp_max(r, a);
+- g = clamp_max(g, a);
+- b = clamp_max(b, a);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colordodge_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colordodge_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colordodge_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorBurn_Mode
+ static inline int colorburn_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (dc == da && 0 == sc) {
+- rc = sa * da + dc * (255 - sa);
+- } else if (0 == sc) {
+- return SkAlphaMulAlpha(dc, 255 - sa);
+- } else {
+- int tmp = (sa * (da - dc) * 256) / (sc * da);
+- if (tmp > 256) {
+- tmp = 256;
+- }
+- int tmp2 = sa * da;
+- rc = tmp2 - (tmp2 * tmp >> 8) + sc * (255 - da) + dc * (255 - sa);
+- }
+- return SkDiv255Round(rc);
++ // Avoid division by 0
++ if(dc == da)
++ return sa * da;
++ if(sc == 0)
++ return 0;
++
++ return sa * da - SkMin32(sa * da, sa * sa * (da - dc) / sc);
+ }
+ static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colorburn_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- return SkPackARGB32(a, r, g, b);
+-}
+-
+-// kHardLight_Mode
+-static inline int hardlight_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (2 * sc <= sa) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+-}
+-static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
+- int sa = SkGetPackedA32(src);
+- int da = SkGetPackedA32(dst);
+- int a = srcover_byte(sa, da);
+- int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colorburn_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colorburn_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colorburn_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // returns 255 * sqrt(n/255)
+ static U8CPU sqrt_unit_byte(U8CPU n) {
+ return SkSqrtBits(n, 15+4);
+ }
+
+ // kSoftLight_Mode
+ static inline int softlight_byte(int sc, int dc, int sa, int da) {
+ int m = da ? dc * 256 / da : 0;
+ int rc;
+- if (2 * sc <= sa) {
+- rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
+- } else if (4 * dc <= da) {
++ if (2 * sc <= sa)
++ return dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
++
++ if (4 * dc <= da) {
+ int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+- } else {
+- int tmp = sqrt_unit_byte(m) - m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
++ return dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
++ int tmp = sqrt_unit_byte(m) - m;
++ return rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+ static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, softlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, softlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, softlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDifference_Mode
+ static inline int difference_byte(int sc, int dc, int sa, int da) {
+- int tmp = SkMin32(sc * da, dc * sa);
+- return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
++ int tmp = dc * sa - sc * da;
++ if(tmp<0)
++ return - tmp;
++
++ return tmp;
+ }
+ static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, difference_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, difference_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, difference_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kExclusion_Mode
+ static inline int exclusion_byte(int sc, int dc, int sa, int da) {
+- // this equations is wacky, wait for SVG to confirm it
+- int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
+- return clamp_div255round(r);
++ return sc * da + dc * sa - 2 * dc * sc;
+ }
+ static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, exclusion_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, exclusion_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, exclusion_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
++struct BlendColor {
++ float r;
++ float g;
++ float b;
++
++ BlendColor(): r(0), g(0), b(0)
++ {}
++};
++
++static inline float Lum(BlendColor C)
++{
++ return C.r * 0.3 + C.g * 0.59 + C.b* 0.11;
++}
++
++static inline float SkMinFloat(float a, float b)
++{
++ if (a > b)
++ a = b;
++ return a;
++}
++
++static inline float SkMaxFloat(float a, float b)
++{
++ if (a < b)
++ a = b;
++ return a;
++}
++
++#define minimum(C) SkMinFloat(SkMinFloat(C.r, C.g), C.b)
++#define maximum(C) SkMaxFloat(SkMaxFloat(C.r, C.g), C.b)
++
++static inline float Sat(BlendColor c) {
++ return maximum(c) - minimum(c);
++}
++
++static inline void setSaturationComponents(float& Cmin, float& Cmid, float& Cmax, float s) {
++ if(Cmax > Cmin) {
++ Cmid = (((Cmid - Cmin) * s ) / (Cmax - Cmin));
++ Cmax = s;
++ } else {
++ Cmax = 0;
++ Cmid = 0;
++ }
++ Cmin = 0;
++}
++
++static inline BlendColor SetSat(BlendColor C, float s) {
++ if(C.r <= C.g) {
++ if(C.g <= C.b)
++ setSaturationComponents(C.r, C.g, C.b, s);
++ else
++ if(C.r <= C.b)
++ setSaturationComponents(C.r, C.b, C.g, s);
++ else
++ setSaturationComponents(C.b, C.r, C.g, s);
++ } else if(C.r <= C.b)
++ setSaturationComponents(C.g, C.r, C.b, s);
++ else
++ if(C.g <= C.b)
++ setSaturationComponents(C.g, C.b, C.r, s);
++ else
++ setSaturationComponents(C.b, C.g, C.r, s);
++
++ return C;
++}
++
++static inline BlendColor clipColor(BlendColor C) {
++ float L = Lum(C);
++ float n = minimum(C);
++ float x = maximum(C);
++ if(n < 0) {
++ C.r = L + (((C.r - L) * L) / (L - n));
++ C.g = L + (((C.g - L) * L) / (L - n));
++ C.b = L + (((C.b - L) * L) / (L - n));
++ }
++
++ if(x > 1) {
++ C.r = L + (((C.r - L) * (1 - L)) / (x - L));
++ C.g = L + (((C.g - L) * (1 - L)) / (x - L));
++ C.b = L + (((C.b - L) * (1 - L)) / (x - L));
++ }
++ return C;
++}
++
++static inline BlendColor SetLum(BlendColor C, float l) {
++ float d = l - Lum(C);
++ C.r += d;
++ C.g += d;
++ C.b += d;
++
++ return clipColor(C);
++}
++
++#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + (int)(sa * da * blendval))
++
++static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cs, Sat(Cd)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cd, Sat(Cs)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cs, Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cd, Lum(Cs));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ struct ProcCoeff {
+ SkXfermodeProc fProc;
+ SkXfermode::Coeff fSC;
+ SkXfermode::Coeff fDC;
+ };
+@@ -430,27 +627,31 @@ static const ProcCoeff gProcCoeffs[] = {
+ { dstin_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kSA_Coeff },
+ { srcout_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kZero_Coeff },
+ { dstout_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kISA_Coeff },
+ { srcatop_modeproc, SkXfermode::kDA_Coeff, SkXfermode::kISA_Coeff },
+ { dstatop_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kSA_Coeff },
+ { xor_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
+
+ { plus_modeproc, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
+- { multiply_modeproc,SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
++ { multiply_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF},
+ { screen_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { overlay_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { darken_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { lighten_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colordodge_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colorburn_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hardlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { softlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { difference_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { exclusion_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { hue_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { saturation_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { color_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { luminosity_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkXfermode::asCoeff(Coeff* src, Coeff* dst) {
+ return false;
+ }
+
+@@ -1172,16 +1373,20 @@ static const Proc16Rec gModeProcs16[] =
+ { darken_modeproc16_0, darken_modeproc16_255, NULL }, // darken
+ { lighten_modeproc16_0, lighten_modeproc16_255, NULL }, // lighten
+ { NULL, NULL, NULL }, // colordodge
+ { NULL, NULL, NULL }, // colorburn
+ { NULL, NULL, NULL }, // hardlight
+ { NULL, NULL, NULL }, // softlight
+ { NULL, NULL, NULL }, // difference
+ { NULL, NULL, NULL }, // exclusion
++ { NULL, NULL, NULL }, // hue
++ { NULL, NULL, NULL }, // saturation
++ { NULL, NULL, NULL }, // color
++ { NULL, NULL, NULL }, // luminosity
+ };
+
+ SkXfermodeProc16 SkXfermode::GetProc16(Mode mode, SkColor srcColor) {
+ SkXfermodeProc16 proc16 = NULL;
+ if ((unsigned)mode < kModeCount) {
+ const Proc16Rec& rec = gModeProcs16[mode];
+ unsigned a = SkColorGetA(srcColor);
+
diff --git a/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
new file mode 100644
index 0000000000..0d44b008d6
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
@@ -0,0 +1,22 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:55:02 -0400
+Subject: Bug 848491 - Re-apply bug 817356 - Patch Skia to recognize uppercase PPC/PPC64.
+
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 11cb223..7e95b99 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -99,7 +99,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
new file mode 100644
index 0000000000..95cb08a36f
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
@@ -0,0 +1,28 @@
+From cf855f31194ff071f2c787a7413d70a43f15f204 Mon Sep 17 00:00:00 2001
+From: Ehsan Akhgari <ehsan@mozilla.com>
+Date: Tue, 29 May 2012 15:39:55 -0400
+Subject: [PATCH] Bug 755869 - Re-apply patch from bug 719575 to fix clang
+ builds for the new Skia r=gw280
+
+---
+ gfx/skia/src/ports/SkFontHost_mac_coretext.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+index c43d1a6..ce5f409 100644
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -807,8 +807,8 @@ CGRGBPixel* Offscreen::getCG(const SkScalerContext_Mac& context, const SkGlyph&
+ void SkScalerContext_Mac::getVerticalOffset(CGGlyph glyphID, SkIPoint* offset) const {
+ CGSize vertOffset;
+ CTFontGetVerticalTranslationsForGlyphs(fCTVerticalFont, &glyphID, &vertOffset, 1);
+- const SkPoint trans = {SkFloatToScalar(vertOffset.width),
+- SkFloatToScalar(vertOffset.height)};
++ const SkPoint trans = {SkScalar(SkFloatToScalar(vertOffset.width)),
++ SkScalar(SkFloatToScalar(vertOffset.height))};
+ SkPoint floatOffset;
+ fVerticalMatrix.mapPoints(&floatOffset, &trans, 1);
+ if (!isSnowLeopard()) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
new file mode 100644
index 0000000000..854f0b1afe
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
@@ -0,0 +1,31 @@
+# HG changeset patch
+# Parent 2c6da9f02606f7a02f635d99ef8cf669d3bc5c4b
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 839347: Move SkNO_RETURN_HINT out of anonymous namespace so that clang won't warn about it being unused. r=mattwoodrow
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -63,20 +63,18 @@
+ * The clang static analyzer likes to know that when the program is not
+ * expected to continue (crash, assertion failure, etc). It will notice that
+ * some combination of parameters lead to a function call that does not return.
+ * It can then make appropriate assumptions about the parameters in code
+ * executed only if the non-returning function was *not* called.
+ */
+ #if !defined(SkNO_RETURN_HINT)
+ #if SK_HAS_COMPILER_FEATURE(attribute_analyzer_noreturn)
+- namespace {
+- inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
+- inline void SkNO_RETURN_HINT() {}
+- }
++ inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
++ inline void SkNO_RETURN_HINT() {}
+ #else
+ #define SkNO_RETURN_HINT() do {} while (false)
+ #endif
+ #endif
+
+ #if defined(SK_ZLIB_INCLUDE) && defined(SK_SYSTEM_ZLIB)
+ #error "cannot define both SK_ZLIB_INCLUDE and SK_SYSTEM_ZLIB"
+ #elif defined(SK_ZLIB_INCLUDE) || defined(SK_SYSTEM_ZLIB)
diff --git a/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
new file mode 100644
index 0000000000..cde2940950
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
@@ -0,0 +1,29 @@
+From 4c25387e6e6cdb55f19e51631a78c3fa9b4a3c73 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 1 Nov 2012 17:29:50 -0400
+Subject: [PATCH 2/8] Bug 751418 - Add our own GrUserConfig r=mattwoodrow
+
+---
+ gfx/skia/include/gpu/GrUserConfig.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index d514486..b729ab3 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -64,6 +64,12 @@
+ #define GR_TEXT_SCALAR_IS_FIXED 0
+ #define GR_TEXT_SCALAR_IS_FLOAT 1
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
++#define GR_GL_PER_GL_FUNC_CALLBACK 1
++
+ #endif
+
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
new file mode 100644
index 0000000000..dc780c5ec6
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -91,17 +91,17 @@ static bool cachedHasSSE2() {
+
+ static bool cachedHasSSSE3() {
+ static bool gHasSSSE3 = hasSSSE3();
+ return gHasSSSE3;
+ }
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+ } else if (fSampleProc32 == S32_alpha_D32_filter_DX) {
+ fSampleProc32 = S32_alpha_D32_filter_DX_SSSE3;
+ }
+
+ if (fSampleProc32 == S32_opaque_D32_filter_DXDY) {
diff --git a/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
new file mode 100644
index 0000000000..167e22184d
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
@@ -0,0 +1,26 @@
+From 3d786b1f0c040205ad9ef6d4216ce06b41f7359f Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Mon, 5 Nov 2012 15:49:42 +0000
+Subject: [PATCH 3/8] Bug 751418 - Fix compile error on gcc in Skia/GL
+ r=mattwoodrow
+
+---
+ gfx/skia/src/gpu/gl/GrGLProgram.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/gpu/gl/GrGLProgram.cpp b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+index 2703110..40cadc3 100644
+--- a/gfx/skia/src/gpu/gl/GrGLProgram.cpp
++++ b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+@@ -575,7 +575,7 @@ bool GrGLProgram::genProgram(const GrCustomStage** customStages) {
+ POS_ATTR_NAME);
+
+ builder.fVSCode.appendf("void main() {\n"
+- "\tvec3 pos3 = %s * vec3("POS_ATTR_NAME", 1);\n"
++ "\tvec3 pos3 = %s * vec3(" POS_ATTR_NAME ", 1);\n"
+ "\tgl_Position = vec4(pos3.xy, 0, pos3.z);\n",
+ viewMName);
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
new file mode 100644
index 0000000000..f20293d4cc
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
@@ -0,0 +1,162 @@
+# HG changeset patch
+# User Nicholas Cameron <ncameron@mozilla.com>
+# Date 1337146927 -43200
+# Node ID 310209abef2c2667e5de41dd2a1f071e8cd42821
+# Parent 93f3ca4d5707b2aae9c6ae52d5d29c2c802e7ef8
+Bug 746883; changes to the Skia library. r=gw280
+
+diff --git a/gfx/skia/include/core/SkDraw.h b/gfx/skia/include/core/SkDraw.h
+--- a/gfx/skia/include/core/SkDraw.h
++++ b/gfx/skia/include/core/SkDraw.h
+@@ -125,23 +125,24 @@ public:
+ #endif
+ };
+
+ class SkGlyphCache;
+
+ class SkTextToPathIter {
+ public:
+ SkTextToPathIter(const char text[], size_t length, const SkPaint& paint,
+- bool applyStrokeAndPathEffects);
++ bool applyStrokeAndPathEffects, bool useCanonicalTextSize = true);
+ ~SkTextToPathIter();
+
+ const SkPaint& getPaint() const { return fPaint; }
+ SkScalar getPathScale() const { return fScale; }
+
+ const SkPath* next(SkScalar* xpos); //!< returns nil when there are no more paths
++ bool nextWithWhitespace(const SkPath** path, SkScalar* xpos); //!< returns false when there are no more paths
+
+ private:
+ SkGlyphCache* fCache;
+ SkPaint fPaint;
+ SkScalar fScale;
+ SkFixed fPrevAdvance;
+ const char* fText;
+ const char* fStop;
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1359,30 +1359,32 @@ void SkPaint::getPosTextPath(const void*
+ const SkPoint pos[], SkPath* path) const {
+ SkASSERT(length == 0 || textData != NULL);
+
+ const char* text = (const char*)textData;
+ if (text == NULL || length == 0 || path == NULL) {
+ return;
+ }
+
+- SkTextToPathIter iter(text, length, *this, false);
++ SkTextToPathIter iter(text, length, *this, false, false);
+ SkMatrix matrix;
+ SkPoint prevPos;
+ prevPos.set(0, 0);
+
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ path->reset();
+
+ unsigned int i = 0;
+ const SkPath* iterPath;
+- while ((iterPath = iter.next(NULL)) != NULL) {
+- matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
+- path->addPath(*iterPath, matrix);
+- prevPos = pos[i];
++ while (iter.nextWithWhitespace(&iterPath, NULL)) {
++ if (iterPath) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ }
+ i++;
+ }
+ }
+
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+@@ -2118,30 +2120,31 @@ const SkRect& SkPaint::doComputeFastBoun
+
+ static bool has_thick_frame(const SkPaint& paint) {
+ return paint.getStrokeWidth() > 0 &&
+ paint.getStyle() != SkPaint::kFill_Style;
+ }
+
+ SkTextToPathIter::SkTextToPathIter( const char text[], size_t length,
+ const SkPaint& paint,
+- bool applyStrokeAndPathEffects)
++ bool applyStrokeAndPathEffects,
++ bool useCanonicalTextSize)
+ : fPaint(paint) {
+ fGlyphCacheProc = paint.getMeasureCacheProc(SkPaint::kForward_TextBufferDirection,
+ true);
+
+ fPaint.setLinearText(true);
+ fPaint.setMaskFilter(NULL); // don't want this affecting our path-cache lookup
+
+ if (fPaint.getPathEffect() == NULL && !has_thick_frame(fPaint)) {
+ applyStrokeAndPathEffects = false;
+ }
+
+ // can't use our canonical size if we need to apply patheffects
+- if (fPaint.getPathEffect() == NULL) {
++ if (useCanonicalTextSize && fPaint.getPathEffect() == NULL) {
+ fPaint.setTextSize(SkIntToScalar(SkPaint::kCanonicalTextSizeForPaths));
+ fScale = paint.getTextSize() / SkPaint::kCanonicalTextSizeForPaths;
+ if (has_thick_frame(fPaint)) {
+ fPaint.setStrokeWidth(SkScalarDiv(fPaint.getStrokeWidth(), fScale));
+ }
+ } else {
+ fScale = SK_Scalar1;
+ }
+@@ -2185,30 +2188,47 @@ SkTextToPathIter::SkTextToPathIter( cons
+ fXYIndex = paint.isVerticalText() ? 1 : 0;
+ }
+
+ SkTextToPathIter::~SkTextToPathIter() {
+ SkGlyphCache::AttachCache(fCache);
+ }
+
+ const SkPath* SkTextToPathIter::next(SkScalar* xpos) {
+- while (fText < fStop) {
++ const SkPath* result;
++ while (nextWithWhitespace(&result, xpos)) {
++ if (result) {
++ if (xpos) {
++ *xpos = fXPos;
++ }
++ return result;
++ }
++ }
++ return NULL;
++}
++
++bool SkTextToPathIter::nextWithWhitespace(const SkPath** path, SkScalar* xpos) {
++ if (fText < fStop) {
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &fText);
+
+ fXPos += SkScalarMul(SkFixedToScalar(fPrevAdvance + fAutoKern.adjust(glyph)), fScale);
+ fPrevAdvance = advance(glyph, fXYIndex); // + fPaint.getTextTracking();
+
+ if (glyph.fWidth) {
+ if (xpos) {
+ *xpos = fXPos;
+ }
+- return fCache->findPath(glyph);
++ *path = fCache->findPath(glyph);
++ return true;
++ } else {
++ *path = NULL;
++ return true;
+ }
+ }
+- return NULL;
++ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkPaint::nothingToDraw() const {
+ if (fLooper) {
+ return false;
+ }
diff --git a/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
new file mode 100644
index 0000000000..6cc74914d2
--- /dev/null
+++ b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
@@ -0,0 +1,29 @@
+# HG changeset patch
+# Parent 9ded7a9f94a863dfa1f3227d3013367f51b8b522
+# User Nicholas Cameron <ncameron@mozilla.com>
+Bug 765038; fix a Clang compilation bug in Skia; r=jwatt
+
+diff --git a/gfx/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/src/sfnt/SkOTTable_head.h
+--- a/gfx/skia/src/sfnt/SkOTTable_head.h
++++ b/gfx/skia/src/sfnt/SkOTTable_head.h
+@@ -109,18 +109,18 @@ struct SkOTTableHead {
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((FullyMixedDirectionalGlyphs, SkTEndian_SwapBE16(0)))
+ ((OnlyStronglyLTR, SkTEndian_SwapBE16(1)))
+ ((StronglyLTR, SkTEndian_SwapBE16(2)))
+- ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-1))))
+- ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-2))))
++ ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-1)))))
++ ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-2)))))
+ SK_SEQ_END,
+ (value)SK_SEQ_END)
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((ShortOffsets, SkTEndian_SwapBE16(0)))
+ ((LongOffsets, SkTEndian_SwapBE16(1)))
+ SK_SEQ_END,
diff --git a/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
new file mode 100644
index 0000000000..174dcb9bce
--- /dev/null
+++ b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
@@ -0,0 +1,865 @@
+From: David Zbarsky <dzbarsky@gmail.com>
+Bug 766017 - Fix some skia warnings r=gw280
+
+diff --git a/gfx/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/include/utils/mac/SkCGUtils.h
+--- a/gfx/skia/include/utils/mac/SkCGUtils.h
++++ b/gfx/skia/include/utils/mac/SkCGUtils.h
+@@ -39,18 +39,16 @@ static inline CGImageRef SkCreateCGImage
+ /**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+ void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output);
+-
+ /**
+ * Return a provider that wraps the specified stream. It will become an
+ * owner of the stream, so the caller must still manage its ownership.
+ *
+ * To hand-off ownership of the stream to the provider, the caller must do
+ * something like the following:
+ *
+ * SkStream* stream = new ...;
+diff --git a/gfx/skia/src/core/SkAAClip.cpp b/gfx/skia/src/core/SkAAClip.cpp
+--- a/gfx/skia/src/core/SkAAClip.cpp
++++ b/gfx/skia/src/core/SkAAClip.cpp
+@@ -246,17 +246,17 @@ static void count_left_right_zeros(const
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+ }
+
+-#ifdef SK_DEBUG
++#if 0
+ static void test_count_left_right_zeros() {
+ static bool gOnce;
+ if (gOnce) {
+ return;
+ }
+ gOnce = true;
+
+ const uint8_t data0[] = { 0, 0, 10, 0xFF };
+@@ -1319,22 +1319,16 @@ bool SkAAClip::setPath(const SkPath& pat
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ typedef void (*RowProc)(SkAAClip::Builder&, int bottom,
+ const uint8_t* rowA, const SkIRect& rectA,
+ const uint8_t* rowB, const SkIRect& rectB);
+
+-static void sectRowProc(SkAAClip::Builder& builder, int bottom,
+- const uint8_t* rowA, const SkIRect& rectA,
+- const uint8_t* rowB, const SkIRect& rectB) {
+-
+-}
+-
+ typedef U8CPU (*AlphaProc)(U8CPU alphaA, U8CPU alphaB);
+
+ static U8CPU sectAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // Multiply
+ return SkMulDiv255Round(alphaA, alphaB);
+ }
+
+ static U8CPU unionAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+@@ -1429,31 +1423,16 @@ private:
+ static void adjust_row(RowIter& iter, int& leftA, int& riteA, int rite) {
+ if (rite == riteA) {
+ iter.next();
+ leftA = iter.left();
+ riteA = iter.right();
+ }
+ }
+
+-static bool intersect(int& min, int& max, int boundsMin, int boundsMax) {
+- SkASSERT(min < max);
+- SkASSERT(boundsMin < boundsMax);
+- if (min >= boundsMax || max <= boundsMin) {
+- return false;
+- }
+- if (min < boundsMin) {
+- min = boundsMin;
+- }
+- if (max > boundsMax) {
+- max = boundsMax;
+- }
+- return true;
+-}
+-
+ static void operatorX(SkAAClip::Builder& builder, int lastY,
+ RowIter& iterA, RowIter& iterB,
+ AlphaProc proc, const SkIRect& bounds) {
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+@@ -1970,34 +1949,33 @@ static void small_bzero(void* dst, size_
+ static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+ }
+ static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha));
++ SkMulDiv255Round(g, alpha),
++ SkMulDiv255Round(b, alpha));
+ }
+ static inline SkPMColor mergeOne(SkPMColor value, unsigned alpha) {
+ unsigned a = SkGetPackedA32(value);
+ unsigned r = SkGetPackedR32(value);
+ unsigned g = SkGetPackedG32(value);
+ unsigned b = SkGetPackedB32(value);
+ return SkPackARGB32(SkMulDiv255Round(a, alpha),
+ SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+ }
+
+ template <typename T> void mergeT(const T* SK_RESTRICT src, int srcN,
+ const uint8_t* SK_RESTRICT row, int rowN,
+ T* SK_RESTRICT dst) {
+- SkDEBUGCODE(int accumulated = 0;)
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = SkMin32(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+diff --git a/gfx/skia/src/core/SkBlitMask_D32.cpp b/gfx/skia/src/core/SkBlitMask_D32.cpp
+--- a/gfx/skia/src/core/SkBlitMask_D32.cpp
++++ b/gfx/skia/src/core/SkBlitMask_D32.cpp
+@@ -268,107 +268,49 @@ bool SkBlitMask::BlitColor(const SkBitma
+ return true;
+ }
+ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////
+
+-static void BW_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- if (m & 0x40) { dst[1] = SkPMSrcOver(src[1], dst[1]); }
+- if (m & 0x20) { dst[2] = SkPMSrcOver(src[2], dst[2]); }
+- if (m & 0x10) { dst[3] = SkPMSrcOver(src[3], dst[3]); }
+- if (m & 0x08) { dst[4] = SkPMSrcOver(src[4], dst[4]); }
+- if (m & 0x04) { dst[5] = SkPMSrcOver(src[5], dst[5]); }
+- if (m & 0x02) { dst[6] = SkPMSrcOver(src[6], dst[6]); }
+- if (m & 0x01) { dst[7] = SkPMSrcOver(src[7], dst[7]); }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+-static void BW_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = src[0]; }
+- if (m & 0x40) { dst[1] = src[1]; }
+- if (m & 0x20) { dst[2] = src[2]; }
+- if (m & 0x10) { dst[3] = src[3]; }
+- if (m & 0x08) { dst[4] = src[4]; }
+- if (m & 0x04) { dst[5] = src[5]; }
+- if (m & 0x02) { dst[6] = src[6]; }
+- if (m & 0x01) { dst[7] = src[7]; }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+ static void A8_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (mask[i]) {
+ dst[i] = SkBlendARGB32(src[i], dst[i], mask[i]);
+ }
+ }
+ }
+
+ // expand the steps that SkAlphaMulQ performs, but this way we can
+-// exand.. add.. combine
++// expand.. add.. combine
+ // instead of
+ // expand..combine add expand..combine
+ //
+ #define EXPAND0(v, m, s) ((v) & (m)) * (s)
+ #define EXPAND1(v, m, s) (((v) >> 8) & (m)) * (s)
+ #define COMBINE(e0, e1, m) ((((e0) >> 8) & (m)) | ((e1) & ~(m)))
+
+ static void A8_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+- const uint32_t rbmask = gMask_00FF00FF;
+ for (int i = 0; i < count; ++i) {
+ int m = mask[i];
+ if (m) {
+ m += (m >> 7);
+ #if 1
+ // this is slightly slower than the expand/combine version, but it
+ // is much closer to the old results, so we use it for now to reduce
+ // rebaselining.
+ dst[i] = SkAlphaMulQ(src[i], m) + SkAlphaMulQ(dst[i], 256 - m);
+ #else
++ const uint32_t rbmask = gMask_00FF00FF;
+ uint32_t v = src[i];
+ uint32_t s0 = EXPAND0(v, rbmask, m);
+ uint32_t s1 = EXPAND1(v, rbmask, m);
+ v = dst[i];
+ uint32_t d0 = EXPAND0(v, rbmask, m);
+ uint32_t d1 = EXPAND1(v, rbmask, m);
+ dst[i] = COMBINE(s0 + d0, s1 + d1, rbmask);
+ #endif
+@@ -559,17 +501,17 @@ SkBlitMask::RowProc SkBlitMask::RowFacto
+ // make this opt-in until chrome can rebaseline
+ RowProc proc = PlatformRowProcs(config, format, flags);
+ if (proc) {
+ return proc;
+ }
+
+ static const RowProc gProcs[] = {
+ // need X coordinate to handle BW
+- NULL, NULL, //(RowProc)BW_RowProc_Blend, (RowProc)BW_RowProc_Opaque,
++ NULL, NULL,
+ (RowProc)A8_RowProc_Blend, (RowProc)A8_RowProc_Opaque,
+ (RowProc)LCD16_RowProc_Blend, (RowProc)LCD16_RowProc_Opaque,
+ (RowProc)LCD32_RowProc_Blend, (RowProc)LCD32_RowProc_Opaque,
+ };
+
+ int index;
+ switch (config) {
+ case SkBitmap::kARGB_8888_Config:
+diff --git a/gfx/skia/src/core/SkConcaveToTriangles.cpp b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+--- a/gfx/skia/src/core/SkConcaveToTriangles.cpp
++++ b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+@@ -37,17 +37,16 @@
+ #include "SkTDArray.h"
+ #include "SkGeometry.h"
+ #include "SkTSort.h"
+
+ // This is used to prevent runaway code bugs, and can probably be removed after
+ // the code has been proven robust.
+ #define kMaxCount 1000
+
+-#define DEBUG
+ #ifdef DEBUG
+ //------------------------------------------------------------------------------
+ // Debugging support
+ //------------------------------------------------------------------------------
+
+ #include <cstdio>
+ #include <stdarg.h>
+
+diff --git a/gfx/skia/src/core/SkPath.cpp b/gfx/skia/src/core/SkPath.cpp
+--- a/gfx/skia/src/core/SkPath.cpp
++++ b/gfx/skia/src/core/SkPath.cpp
+@@ -469,17 +469,16 @@ void SkPath::incReserve(U16CPU inc) {
+ fPts.setReserve(fPts.count() + inc);
+
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+- int vc = fVerbs.count();
+ SkPoint* pt;
+
+ // remember our index
+ fLastMoveToIndex = fPts.count();
+
+ pt = fPts.append();
+ *fVerbs.append() = kMove_Verb;
+ pt->set(x, y);
+@@ -1163,17 +1162,16 @@ void SkPath::reversePathTo(const SkPath&
+ }
+ pts -= gPtsInVerb[verbs[i]];
+ }
+ }
+
+ void SkPath::reverseAddPath(const SkPath& src) {
+ this->incReserve(src.fPts.count());
+
+- const SkPoint* startPts = src.fPts.begin();
+ const SkPoint* pts = src.fPts.end();
+ const uint8_t* startVerbs = src.fVerbs.begin();
+ const uint8_t* verbs = src.fVerbs.end();
+
+ fIsOval = false;
+
+ bool needMove = true;
+ bool needClose = false;
+diff --git a/gfx/skia/src/core/SkRegion.cpp b/gfx/skia/src/core/SkRegion.cpp
+--- a/gfx/skia/src/core/SkRegion.cpp
++++ b/gfx/skia/src/core/SkRegion.cpp
+@@ -920,20 +920,16 @@ static int operate(const SkRegion::RunTy
+ /* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+-static int count_to_intervals(int count) {
+- SkASSERT(count >= 6); // a single rect is 6 values
+- return (count - 4) >> 1;
+-}
+
+ /* Given a number of intervals, what is the worst case representation of that
+ many intervals?
+
+ Worst case (from a storage perspective), is a vertical stack of single
+ intervals: TOP + N * (BOTTOM INTERVALCOUNT LEFT RIGHT SENTINEL) + SENTINEL
+ */
+ static int intervals_to_count(int intervals) {
+diff --git a/gfx/skia/src/core/SkScalerContext.cpp b/gfx/skia/src/core/SkScalerContext.cpp
+--- a/gfx/skia/src/core/SkScalerContext.cpp
++++ b/gfx/skia/src/core/SkScalerContext.cpp
+@@ -336,44 +336,16 @@ SK_ERROR:
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ // put a valid value here, in case it was earlier set to
+ // MASK_FORMAT_JUST_ADVANCE
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ }
+
+-static bool isLCD(const SkScalerContext::Rec& rec) {
+- return SkMask::kLCD16_Format == rec.fMaskFormat ||
+- SkMask::kLCD32_Format == rec.fMaskFormat;
+-}
+-
+-static uint16_t a8_to_rgb565(unsigned a8) {
+- return SkPackRGB16(a8 >> 3, a8 >> 2, a8 >> 3);
+-}
+-
+-static void copyToLCD16(const SkBitmap& src, const SkMask& dst) {
+- SkASSERT(SkBitmap::kA8_Config == src.config());
+- SkASSERT(SkMask::kLCD16_Format == dst.fFormat);
+-
+- const int width = dst.fBounds.width();
+- const int height = dst.fBounds.height();
+- const uint8_t* srcP = src.getAddr8(0, 0);
+- size_t srcRB = src.rowBytes();
+- uint16_t* dstP = (uint16_t*)dst.fImage;
+- size_t dstRB = dst.fRowBytes;
+- for (int y = 0; y < height; ++y) {
+- for (int x = 0; x < width; ++x) {
+- dstP[x] = a8_to_rgb565(srcP[x]);
+- }
+- srcP += srcRB;
+- dstP = (uint16_t*)((char*)dstP + dstRB);
+- }
+-}
+-
+ #define SK_FREETYPE_LCD_LERP 160
+
+ static int lerp(int start, int end) {
+ SkASSERT((unsigned)SK_FREETYPE_LCD_LERP <= 256);
+ return start + ((end - start) * (SK_FREETYPE_LCD_LERP) >> 8);
+ }
+
+ static uint16_t packLCD16(unsigned r, unsigned g, unsigned b) {
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -230,52 +230,16 @@ void SuperBlitter::blitH(int x, int y, i
+ fOffsetX);
+
+ #ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+ #endif
+ }
+
+-static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+- int n, U8CPU riteA) {
+- SkASSERT(leftA <= 0xFF);
+- SkASSERT(riteA <= 0xFF);
+-
+- int16_t* run = runs.fRuns;
+- uint8_t* aa = runs.fAlpha;
+-
+- if (ileft > 0) {
+- run[0] = ileft;
+- aa[0] = 0;
+- run += ileft;
+- aa += ileft;
+- }
+-
+- SkASSERT(leftA < 0xFF);
+- if (leftA > 0) {
+- *run++ = 1;
+- *aa++ = leftA;
+- }
+-
+- if (n > 0) {
+- run[0] = n;
+- aa[0] = 0xFF;
+- run += n;
+- aa += n;
+- }
+-
+- SkASSERT(riteA < 0xFF);
+- if (riteA > 0) {
+- *run++ = 1;
+- *aa++ = riteA;
+- }
+- run[0] = 0;
+-}
+-
+ void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -865,45 +865,16 @@ bool Linear_Gradient::setContext(const S
+ } while (0)
+
+ namespace {
+
+ typedef void (*LinearShadeProc)(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* dstC, const SkPMColor* cache,
+ int toggle, int count);
+
+-// This function is deprecated, and will be replaced by
+-// shadeSpan_linear_vertical_lerp() once Chrome has been weaned off of it.
+-void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+- SkPMColor* SK_RESTRICT dstC,
+- const SkPMColor* SK_RESTRICT cache,
+- int toggle, int count) {
+- if (proc == clamp_tileproc) {
+- // Read out clamp values from beginning/end of the cache. No need to lerp
+- // or dither
+- if (fx < 0) {
+- sk_memset32(dstC, cache[-1], count);
+- return;
+- } else if (fx > 0xFFFF) {
+- sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+- return;
+- }
+- }
+-
+- // We're a vertical gradient, so no change in a span.
+- // If colors change sharply across the gradient, dithering is
+- // insufficient (it subsamples the color space) and we need to lerp.
+- unsigned fullIndex = proc(fx);
+- unsigned fi = fullIndex >> (16 - Gradient_Shader::kCache32Bits);
+- sk_memset32_dither(dstC,
+- cache[toggle + fi],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi],
+- count);
+-}
+-
+ // Linear interpolation (lerp) is unnecessary if there are no sharp
+ // discontinuities in the gradient - which must be true if there are
+ // only 2 colors - but it's cheap.
+ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ if (proc == clamp_tileproc) {
+@@ -2131,16 +2102,18 @@ protected:
+ buffer.writePoint(fCenter);
+ }
+
+ private:
+ typedef Gradient_Shader INHERITED;
+ const SkPoint fCenter;
+ };
+
++#ifndef SK_SCALAR_IS_FLOAT
++
+ #ifdef COMPUTE_SWEEP_TABLE
+ #define PI 3.14159265
+ static bool gSweepTableReady;
+ static uint8_t gSweepTable[65];
+
+ /* Our table stores precomputed values for atan: [0...1] -> [0..PI/4]
+ We scale the results to [0..32]
+ */
+@@ -2168,20 +2141,23 @@ static const uint8_t gSweepTable[] = {
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26,
+ 26, 27, 27, 27, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32,
+ 32
+ };
+ static const uint8_t* build_sweep_table() { return gSweepTable; }
+ #endif
+
++#endif
++
+ // divide numer/denom, with a bias of 6bits. Assumes numer <= denom
+ // and denom != 0. Since our table is 6bits big (+1), this is a nice fit.
+ // Same as (but faster than) SkFixedDiv(numer, denom) >> 10
+
++#ifndef SK_SCALAR_IS_FLOAT
+ //unsigned div_64(int numer, int denom);
+ static unsigned div_64(int numer, int denom) {
+ SkASSERT(numer <= denom);
+ SkASSERT(numer > 0);
+ SkASSERT(denom > 0);
+
+ int nbits = SkCLZ(numer);
+ int dbits = SkCLZ(denom);
+@@ -2294,16 +2270,17 @@ static unsigned atan_0_90(SkFixed y, SkF
+ result = 64 - result;
+ // pin to 63
+ result -= result >> 6;
+ }
+
+ SkASSERT(result <= 63);
+ return result;
+ }
++#endif
+
+ // returns angle in a circle [0..2PI) -> [0..255]
+ #ifdef SK_SCALAR_IS_FLOAT
+ static unsigned SkATan2_255(float y, float x) {
+ // static const float g255Over2PI = 255 / (2 * SK_ScalarPI);
+ static const float g255Over2PI = 40.584510488433314f;
+
+ float result = sk_float_atan2(y, x);
+diff --git a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+--- a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
++++ b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+@@ -112,17 +112,17 @@ void BlitRect32_OpaqueWide_SSE2(SkPMColo
+ }
+
+ void ColorRect32_SSE2(SkPMColor* destination,
+ int width, int height,
+ size_t rowBytes, uint32_t color) {
+ if (0 == height || 0 == width || 0 == color) {
+ return;
+ }
+- unsigned colorA = SkGetPackedA32(color);
++ //unsigned colorA = SkGetPackedA32(color);
+ //if (255 == colorA) {
+ //if (width < 31) {
+ //BlitRect32_OpaqueNarrow_SSE2(destination, width, height,
+ //rowBytes, color);
+ //} else {
+ //BlitRect32_OpaqueWide_SSE2(destination, width, height,
+ //rowBytes, color);
+ //}
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -75,20 +75,16 @@ static CGFloat CGRectGetMinY_inline(cons
+ static CGFloat CGRectGetMaxY_inline(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+ }
+
+ static CGFloat CGRectGetWidth_inline(const CGRect& rect) {
+ return rect.size.width;
+ }
+
+-static CGFloat CGRectGetHeight(const CGRect& rect) {
+- return rect.size.height;
+-}
+-
+ ///////////////////////////////////////////////////////////////////////////////
+
+ static void sk_memset_rect32(uint32_t* ptr, uint32_t value, size_t width,
+ size_t height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+@@ -125,28 +121,30 @@ static void sk_memset_rect32(uint32_t* p
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+ }
+
++#if 0
+ // Potentially this should be made (1) public (2) optimized when width is small.
+ // Also might want 16 and 32 bit version
+ //
+ static void sk_memset_rect(void* ptr, U8CPU byte, size_t width, size_t height,
+ size_t rowBytes) {
+ uint8_t* dst = (uint8_t*)ptr;
+ while (height) {
+ memset(dst, byte, width);
+ dst += rowBytes;
+ height -= 1;
+ }
+ }
++#endif
+
+ #include <sys/utsname.h>
+
+ typedef uint32_t CGRGBPixel;
+
+ static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+ }
+@@ -250,23 +248,16 @@ static CGAffineTransform MatrixToCGAffin
+ return CGAffineTransformMake(ScalarToCG(matrix[SkMatrix::kMScaleX]) * sx,
+ -ScalarToCG(matrix[SkMatrix::kMSkewY]) * sy,
+ -ScalarToCG(matrix[SkMatrix::kMSkewX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMScaleY]) * sy,
+ ScalarToCG(matrix[SkMatrix::kMTransX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMTransY]) * sy);
+ }
+
+-static void CGAffineTransformToMatrix(const CGAffineTransform& xform, SkMatrix* matrix) {
+- matrix->setAll(
+- CGToScalar(xform.a), CGToScalar(xform.c), CGToScalar(xform.tx),
+- CGToScalar(xform.b), CGToScalar(xform.d), CGToScalar(xform.ty),
+- 0, 0, SK_Scalar1);
+-}
+-
+ static SkScalar getFontScale(CGFontRef cgFont) {
+ int unitsPerEm = CGFontGetUnitsPerEm(cgFont);
+ return SkScalarInvert(SkIntToScalar(unitsPerEm));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ #define BITMAP_INFO_RGB (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host)
+@@ -1075,16 +1066,17 @@ static const uint8_t* getInverseTable(bo
+ if (!gInited) {
+ build_power_table(gWhiteTable, 1.5f);
+ build_power_table(gTable, 2.2f);
+ gInited = true;
+ }
+ return isWhite ? gWhiteTable : gTable;
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static const uint8_t* getGammaTable(U8CPU luminance) {
+ static uint8_t gGammaTables[4][256];
+ static bool gInited;
+ if (!gInited) {
+ #if 1
+ float start = 1.1;
+ float stop = 2.1;
+ for (int i = 0; i < 4; ++i) {
+@@ -1097,45 +1089,49 @@ static const uint8_t* getGammaTable(U8CP
+ build_power_table(gGammaTables[2], 1);
+ build_power_table(gGammaTables[3], 1);
+ #endif
+ gInited = true;
+ }
+ SkASSERT(0 == (luminance >> 8));
+ return gGammaTables[luminance >> 6];
+ }
++#endif
+
++#ifndef SK_USE_COLOR_LUMINANCE
+ static void invertGammaMask(bool isWhite, CGRGBPixel rgb[], int width,
+ int height, size_t rb) {
+ const uint8_t* table = getInverseTable(isWhite);
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ uint32_t c = rgb[x];
+ int r = (c >> 16) & 0xFF;
+ int g = (c >> 8) & 0xFF;
+ int b = (c >> 0) & 0xFF;
+ rgb[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ rgb = (CGRGBPixel*)((char*)rgb + rb);
+ }
+ }
++#endif
+
+ static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= (CGRGBPixel_getAlpha(*src++) >> 7) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static int lerpScale(int dst, int src, int scale) {
+ return dst + (scale * (src - dst) >> 23);
+ }
+
+ static CGRGBPixel lerpPixel(CGRGBPixel dst, CGRGBPixel src,
+ int scaleR, int scaleG, int scaleB) {
+ int sr = (src >> 16) & 0xFF;
+ int sg = (src >> 8) & 0xFF;
+@@ -1147,37 +1143,31 @@ static CGRGBPixel lerpPixel(CGRGBPixel d
+ int rr = lerpScale(dr, sr, scaleR);
+ int rg = lerpScale(dg, sg, scaleG);
+ int rb = lerpScale(db, sb, scaleB);
+ return (rr << 16) | (rg << 8) | rb;
+ }
+
+ static void lerpPixels(CGRGBPixel dst[], const CGRGBPixel src[], int width,
+ int height, int rowBytes, int lumBits) {
+-#ifdef SK_USE_COLOR_LUMINANCE
+ int scaleR = (1 << 23) * SkColorGetR(lumBits) / 0xFF;
+ int scaleG = (1 << 23) * SkColorGetG(lumBits) / 0xFF;
+ int scaleB = (1 << 23) * SkColorGetB(lumBits) / 0xFF;
+-#else
+- int scale = (1 << 23) * lumBits / SkScalerContext::kLuminance_Max;
+- int scaleR = scale;
+- int scaleG = scale;
+- int scaleB = scale;
+-#endif
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ // bit-not the src, since it was drawn from black, so we need the
+ // compliment of those bits
+ dst[x] = lerpPixel(dst[x], ~src[x], scaleR, scaleG, scaleB);
+ }
+ src = (CGRGBPixel*)((char*)src + rowBytes);
+ dst = (CGRGBPixel*)((char*)dst + rowBytes);
+ }
+ }
++#endif
+
+ #if 1
+ static inline int r32_to_16(int x) { return SkR32ToR16(x); }
+ static inline int g32_to_16(int x) { return SkG32ToG16(x); }
+ static inline int b32_to_16(int x) { return SkB32ToB16(x); }
+ #else
+ static inline int round8to5(int x) {
+ return (x + 3 - (x >> 5) + (x >> 7)) >> 3;
+@@ -1212,22 +1202,21 @@ static inline uint32_t rgb_to_lcd32(CGRG
+ return SkPackARGB32(0xFF, r, g, b);
+ }
+
+ #define BLACK_LUMINANCE_LIMIT 0x40
+ #define WHITE_LUMINANCE_LIMIT 0xA0
+
+ void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = (CGGlyph) glyph.getGlyphID(fBaseGlyphCount);
+-
+ const bool isLCD = isLCDFormat(glyph.fMaskFormat);
++#ifdef SK_USE_COLOR_LUMINANCE
+ const bool isBW = SkMask::kBW_Format == glyph.fMaskFormat;
+ const bool isA8 = !isLCD && !isBW;
+-
+-#ifdef SK_USE_COLOR_LUMINANCE
++
+ unsigned lumBits = fRec.getLuminanceColor();
+ uint32_t xorMask = 0;
+
+ if (isA8) {
+ // for A8, we just want a component (they're all the same)
+ lumBits = SkColorGetR(lumBits);
+ }
+ #else
+diff --git a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+--- a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
++++ b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+@@ -163,59 +163,8 @@ private:
+ CGPDFDocumentRef fDoc;
+ };
+
+ static void CGDataProviderReleaseData_FromMalloc(void*, const void* data,
+ size_t size) {
+ sk_free((void*)data);
+ }
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output) {
+- size_t size = stream->getLength();
+- void* ptr = sk_malloc_throw(size);
+- stream->read(ptr, size);
+- CGDataProviderRef data = CGDataProviderCreateWithData(NULL, ptr, size,
+- CGDataProviderReleaseData_FromMalloc);
+- if (NULL == data) {
+- return false;
+- }
+-
+- CGPDFDocumentRef pdf = CGPDFDocumentCreateWithProvider(data);
+- CGDataProviderRelease(data);
+- if (NULL == pdf) {
+- return false;
+- }
+- SkAutoPDFRelease releaseMe(pdf);
+-
+- CGPDFPageRef page = CGPDFDocumentGetPage(pdf, 1);
+- if (NULL == page) {
+- return false;
+- }
+-
+- CGRect bounds = CGPDFPageGetBoxRect(page, kCGPDFMediaBox);
+-
+- int w = (int)CGRectGetWidth(bounds);
+- int h = (int)CGRectGetHeight(bounds);
+-
+- SkBitmap bitmap;
+- bitmap.setConfig(SkBitmap::kARGB_8888_Config, w, h);
+- bitmap.allocPixels();
+- bitmap.eraseColor(SK_ColorWHITE);
+-
+- size_t bitsPerComponent;
+- CGBitmapInfo info;
+- getBitmapInfo(bitmap, &bitsPerComponent, &info, NULL);
+-
+- CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+- CGContextRef ctx = CGBitmapContextCreate(bitmap.getPixels(), w, h,
+- bitsPerComponent, bitmap.rowBytes(),
+- cs, info);
+- CGColorSpaceRelease(cs);
+-
+- if (ctx) {
+- CGContextDrawPDFPage(ctx, page);
+- CGContextRelease(ctx);
+- }
+-
+- output->swap(bitmap);
+- return true;
+-}
+-
diff --git a/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
new file mode 100644
index 0000000000..e00fd8602e
--- /dev/null
+++ b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
@@ -0,0 +1,400 @@
+# HG changeset patch
+# User Matt Woodrow <mwoodrow@mozilla.com>
+# Date 1339988782 -43200
+# Node ID 1e9dae659ee6c992f719fd4136efbcc5410ded37
+# Parent 946750f6d95febd199fb7b748e9d2c48fd01c8a6
+[mq]: skia-windows-gradients
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -847,16 +847,19 @@ bool Linear_Gradient::setContext(const S
+ fFlags |= SkShader::kConstInY32_Flag;
+ if ((fFlags & SkShader::kHasSpan16_Flag) && !paint.isDither()) {
+ // only claim this if we do have a 16bit mode (i.e. none of our
+ // colors have alpha), and if we are not dithering (which obviously
+ // is not const in Y).
+ fFlags |= SkShader::kConstInY16_Flag;
+ }
+ }
++ if (fStart == fEnd) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ #define NO_CHECK_ITER \
+ do { \
+ unsigned fi = fx >> Gradient_Shader::kCache32Shift; \
+ SkASSERT(fi <= 0xFF); \
+ fx += dx; \
+@@ -976,16 +979,21 @@ void Linear_Gradient::shadeSpan(int x, i
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+ #ifdef USE_DITHER_32BIT_GRADIENT
+ int toggle = ((x ^ y) & 1) * kDitherStride32;
+ #else
+ int toggle = 0;
+ #endif
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1169,16 +1177,21 @@ void Linear_Gradient::shadeSpan16(int x,
+ SkASSERT(count > 0);
+
+ SkPoint srcPt;
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const uint16_t* SK_RESTRICT cache = this->getCache16();
+ int toggle = ((x ^ y) & 1) * kDitherStride16;
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1739,21 +1752,25 @@ void Radial_Gradient::shadeSpan(int x, i
+ possible circles on which the point may fall. Solving for t yields
+ the gradient value to use.
+
+ If a<0, the start circle is entirely contained in the
+ end circle, and one of the roots will be <0 or >1 (off the line
+ segment). If a>0, the start circle falls at least partially
+ outside the end circle (or vice versa), and the gradient
+ defines a "tube" where a point may be on one circle (on the
+- inside of the tube) or the other (outside of the tube). We choose
+- one arbitrarily.
++ inside of the tube) or the other (outside of the tube). We choose
++ the one with the highest t value, as long as the radius that it
++ corresponds to is >=0. In the case where neither root has a positive
++ radius, we don't draw anything.
+
++ XXXmattwoodrow: I've removed this for now since it breaks
++ down when Dr == 0. Is there something else we can do instead?
+ In order to keep the math to within the limits of fixed point,
+- we divide the entire quadratic by Dr^2, and replace
++ we divide the entire quadratic by Dr, and replace
+ (x - Sx)/Dr with x' and (y - Sy)/Dr with y', giving
+
+ [Dx^2 / Dr^2 + Dy^2 / Dr^2 - 1)] * t^2
+ + 2 * [x' * Dx / Dr + y' * Dy / Dr - Sr / Dr] * t
+ + [x'^2 + y'^2 - Sr^2/Dr^2] = 0
+
+ (x' and y' are computed by appending the subtract and scale to the
+ fDstToIndex matrix in the constructor).
+@@ -1763,99 +1780,122 @@ void Radial_Gradient::shadeSpan(int x, i
+ x' and y', if x and y are linear in the span, 'B' can be computed
+ incrementally with a simple delta (db below). If it is not (e.g.,
+ a perspective projection), it must be computed in the loop.
+
+ */
+
+ namespace {
+
+-inline SkFixed two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
+- SkScalar sr2d2, SkScalar foura,
+- SkScalar oneOverTwoA, bool posRoot) {
++inline bool two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
++ SkScalar sr2d2, SkScalar foura,
++ SkScalar oneOverTwoA, SkScalar diffRadius,
++ SkScalar startRadius, SkFixed& t) {
+ SkScalar c = SkScalarSquare(fx) + SkScalarSquare(fy) - sr2d2;
+ if (0 == foura) {
+- return SkScalarToFixed(SkScalarDiv(-c, b));
++ SkScalar result = SkScalarDiv(-c, b);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++ return false;
+ }
+
+ SkScalar discrim = SkScalarSquare(b) - SkScalarMul(foura, c);
+ if (discrim < 0) {
+- discrim = -discrim;
++ return false;
+ }
+ SkScalar rootDiscrim = SkScalarSqrt(discrim);
+- SkScalar result;
+- if (posRoot) {
+- result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
+- } else {
+- result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++
++ // Make sure the results corresponds to a positive radius.
++ SkScalar result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
+ }
+- return SkScalarToFixed(result);
++ result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++
++ return false;
+ }
+
+ typedef void (* TwoPointRadialShadeProc)(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count);
+
+ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+-
+- if (t < 0) {
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else if (t < 0) {
+ *dstC++ = cache[-1];
+ } else if (t > 0xFFFF) {
+ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
+ } else {
+ SkASSERT(t <= 0xFFFF);
+ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
+ }
+
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+ void shadeSpan_twopoint_mirror(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+ void shadeSpan_twopoint_repeat(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+
+
+@@ -1935,17 +1975,16 @@ public:
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar dx, fx = srcPt.fX;
+ SkScalar dy, fy = srcPt.fY;
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+@@ -1954,60 +1993,69 @@ public:
+ dx = SkFixedToScalar(fixedX);
+ dy = SkFixedToScalar(fixedY);
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = fDstToIndex.getScaleX();
+ dy = fDstToIndex.getSkewY();
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalarMul(fDiff.fY, fy) - fStartRadius * fDiffRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+
+ TwoPointRadialShadeProc shadeProc = shadeSpan_twopoint_repeat;
+ if (proc == clamp_tileproc) {
+ shadeProc = shadeSpan_twopoint_clamp;
+ } else if (proc == mirror_tileproc) {
+ shadeProc = shadeSpan_twopoint_mirror;
+ } else {
+ SkASSERT(proc == repeat_tileproc);
+ }
+ (*shadeProc)(fx, dx, fy, dy, b, db,
+- fSr2D2, foura, fOneOverTwoA, posRoot,
++ fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1,
+ dstC, cache, count);
+ } else { // perspective case
+ SkScalar dstX = SkIntToScalar(x);
+ SkScalar dstY = SkIntToScalar(y);
+ for (; count > 0; --count) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ SkScalar fx = srcPt.fX;
+ SkScalar fy = srcPt.fY;
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ }
+ dstX += SK_Scalar1;
+ }
+ }
+ }
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+ // we don't have a span16 proc
+ fFlags &= ~kHasSpan16_Flag;
++
++ // If we might end up wanting to draw nothing as part of the gradient
++ // then we should mark ourselves as not being opaque.
++ if (fA >= 0 || (fDiffRadius == 0 && fCenter1 == fCenter2)) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(Two_Point_Radial_Gradient)
+
+ protected:
+ Two_Point_Radial_Gradient(SkFlattenableReadBuffer& buffer)
+ : INHERITED(buffer),
+@@ -2033,26 +2081,22 @@ private:
+ const SkScalar fRadius1;
+ const SkScalar fRadius2;
+ SkPoint fDiff;
+ SkScalar fStartRadius, fDiffRadius, fSr2D2, fA, fOneOverTwoA;
+
+ void init() {
+ fDiff = fCenter1 - fCenter2;
+ fDiffRadius = fRadius2 - fRadius1;
+- SkScalar inv = SkScalarInvert(fDiffRadius);
+- fDiff.fX = SkScalarMul(fDiff.fX, inv);
+- fDiff.fY = SkScalarMul(fDiff.fY, inv);
+- fStartRadius = SkScalarMul(fRadius1, inv);
++ fStartRadius = fRadius1;
+ fSr2D2 = SkScalarSquare(fStartRadius);
+- fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
++ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SkScalarSquare(fDiffRadius);
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+- fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+@@ -2488,16 +2532,20 @@ SkShader* SkGradientShader::CreateTwoPoi
+ int colorCount,
+ SkShader::TileMode mode,
+ SkUnitMapper* mapper) {
+ if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) {
+ return NULL;
+ }
+ EXPAND_1_COLOR(colorCount);
+
++ if (start == end && startRadius == 0) {
++ return CreateRadial(start, endRadius, colors, pos, colorCount, mode, mapper);
++ }
++
+ return SkNEW_ARGS(Two_Point_Radial_Gradient,
+ (start, startRadius, end, endRadius, colors, pos,
+ colorCount, mode, mapper));
+ }
+
+ SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
diff --git a/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
new file mode 100644
index 0000000000..719fda1650
--- /dev/null
+++ b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
@@ -0,0 +1,73 @@
+commit 5786f516119bcb677510f3c9256b870c3b5616c8
+Author: George Wright <gwright@mozilla.com>
+Date: Wed Aug 15 23:51:34 2012 -0400
+
+ Bug 740194 - [Skia] Implement a version of SkMemory for Mozilla that uses the infallible mozalloc allocators r=cjones
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index f98ba85..17be191 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -35,6 +35,16 @@
+ commented out, so including it will have no effect.
+ */
+
++/*
++ Override new/delete with Mozilla's allocator, mozalloc
++
++ Ideally we shouldn't need to do this here, but until
++ http://code.google.com/p/skia/issues/detail?id=598 is fixed
++ we need to include this here to override operator new and delete
++*/
++
++#include "mozilla/mozalloc.h"
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* Scalars (the fractional value type in skia) can be implemented either as
+diff --git a/gfx/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+new file mode 100644
+index 0000000..1f16ee5
+--- /dev/null
++++ b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2011 Google Inc.
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "SkTypes.h"
++
++#include "mozilla/mozalloc.h"
++#include "mozilla/mozalloc_abort.h"
++#include "mozilla/mozalloc_oom.h"
++
++void sk_throw() {
++ SkDEBUGFAIL("sk_throw");
++ mozalloc_abort("Abort from sk_throw");
++}
++
++void sk_out_of_memory(void) {
++ SkDEBUGFAIL("sk_out_of_memory");
++ mozalloc_handle_oom(0);
++}
++
++void* sk_malloc_throw(size_t size) {
++ return sk_malloc_flags(size, SK_MALLOC_THROW);
++}
++
++void* sk_realloc_throw(void* addr, size_t size) {
++ return moz_xrealloc(addr, size);
++}
++
++void sk_free(void* p) {
++ free(p);
++}
++
++void* sk_malloc_flags(size_t size, unsigned flags) {
++ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
++}
++
diff --git a/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
new file mode 100644
index 0000000000..d16ec4b3b4
--- /dev/null
+++ b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
@@ -0,0 +1,14 @@
+Index: gfx/skia/include/core/SkPreConfig.h
+===================================================================
+--- gfx/skia/include/core/SkPreConfig.h (revision 6724)
++++ gfx/skia/include/core/SkPreConfig.h (working copy)
+@@ -94,7 +94,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
diff --git a/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
new file mode 100644
index 0000000000..97404c431b
--- /dev/null
+++ b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
@@ -0,0 +1,39 @@
+From: George Wright <gwright@mozilla.com>
+Date: Thu, 20 Jun 2013 09:21:21 -0400
+Subject: Bug 848491 - Re-apply bug 795538 - Ensure we use the correct colour (and alpha) for the clamp values r=mattwoodrow
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 27a9c46..ce077b5 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -500,15 +500,17 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+
+ // Write the clamp colours into the first and last entries of fCache32
+- fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[0]),
+- SkColorGetG(fOrigColors[0]),
+- SkColorGetB(fOrigColors[0]));
+-
+- fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[fColorCount - 1]),
+- SkColorGetG(fOrigColors[fColorCount - 1]),
+- SkColorGetB(fOrigColors[fColorCount - 1]));
++ fCache32[kCache32ClampLower] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[0]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[fColorCount - 1]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
+
+ return fCache32;
+ }
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
new file mode 100644
index 0000000000..9bc7ddec46
--- /dev/null
+++ b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# Parent e378875000890099fffcdb4cbc4ab12828ac34ee
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 890539: Annotate SK_COMPILE_ASSERT's typedef as permissibly unused, to fix GCC 4.8 build warning. r=gw280
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -121,18 +121,29 @@ inline void operator delete(void* p) {
+ #define SkDEVCODE(code)
+ #define SK_DEVELOPER_TO_STRING()
+ #endif
+
+ template <bool>
+ struct SkCompileAssert {
+ };
+
++/*
++ * The SK_COMPILE_ASSERT definition creates an otherwise-unused typedef. This
++ * triggers compiler warnings with some versions of gcc, so mark the typedef
++ * as permissibly-unused to disable the warnings.
++ */
++# if defined(__GNUC__)
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
++# else
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE /* nothing */
++# endif
++
+ #define SK_COMPILE_ASSERT(expr, msg) \
+- typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
++ typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE
+
+ /*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+ #define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
diff --git a/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
new file mode 100644
index 0000000000..864a0af7a9
--- /dev/null
+++ b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
@@ -0,0 +1,217 @@
+diff --git a/gfx/gl/GLContextSkia.cpp b/gfx/gl/GLContextSkia.cpp
+--- a/gfx/gl/GLContextSkia.cpp
++++ b/gfx/gl/GLContextSkia.cpp
+@@ -303,39 +303,47 @@ const GLubyte* glGetString_mozilla(GrGLe
+ if (name == LOCAL_GL_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES 2.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("2.0");
+ }
+ } else if (name == LOCAL_GL_EXTENSIONS) {
+ // Only expose the bare minimum extensions we want to support to ensure a functional Ganesh
+ // as GLContext only exposes certain extensions
+ static bool extensionsStringBuilt = false;
+- static char extensionsString[120];
++ static char extensionsString[256];
+
+ if (!extensionsStringBuilt) {
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_texture_format_BGRA8888)) {
+ strcpy(extensionsString, "GL_EXT_texture_format_BGRA8888 ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_OES_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_EXT_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_rgb8_rgba8)) {
+ strcat(extensionsString, "GL_OES_rgb8_rgba8 ");
+ }
+
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_bgra)) {
++ strcat(extensionsString, "GL_EXT_bgra ");
++ }
++
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_read_format_bgra)) {
++ strcat(extensionsString, "GL_EXT_read_format_bgra ");
++ }
++
+ extensionsStringBuilt = true;
+ }
+
+ return reinterpret_cast<const GLubyte*>(extensionsString);
+
+ } else if (name == LOCAL_GL_SHADING_LANGUAGE_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES GLSL ES 1.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("1.10");
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.cpp b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.cpp
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+@@ -1,18 +1,18 @@
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+-
++#include <algorithm>
+ #include "GrGpuGL.h"
+ #include "GrGLStencilBuffer.h"
+ #include "GrGLPath.h"
+ #include "GrGLShaderBuilder.h"
+ #include "GrTemplates.h"
+ #include "GrTypes.h"
+ #include "SkTemplates.h"
+
+ static const GrGLuint GR_MAX_GLUINT = ~0U;
+ static const GrGLint GR_INVAL_GLINT = ~0;
+@@ -1381,29 +1381,67 @@ bool GrGpuGL::readPixelsWillPayForYFlip(
+ // Note the rowBytes might be tight to the passed in data, but if data
+ // gets clipped in x to the target the rowBytes will no longer be tight.
+ if (left >= 0 && (left + width) < renderTarget->width()) {
+ return 0 == rowBytes ||
+ GrBytesPerPixel(config) * width == rowBytes;
+ } else {
+ return false;
+ }
+ }
+
++static void swizzleRow(void* buffer, int byteLen) {
++ uint8_t* src = (uint8_t*)buffer;
++ uint8_t* end = src + byteLen;
++
++ GrAssert((end - src) % 4 == 0);
++
++ for (; src != end; src += 4) {
++ std::swap(src[0], src[2]);
++ }
++}
++
++bool GrGpuGL::canReadBGRA() const
++{
++ if (kDesktop_GrGLBinding == this->glBinding() ||
++ this->hasExtension("GL_EXT_bgra"))
++ return true;
++
++ if (this->hasExtension("GL_EXT_read_format_bgra")) {
++ GrGLint readFormat = 0;
++ GrGLint readType = 0;
++
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &readFormat));
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &readType));
++
++ return readFormat == GR_GL_BGRA && readType == GR_GL_UNSIGNED_BYTE;
++ }
++
++ return false;
++}
++
+ bool GrGpuGL::onReadPixels(GrRenderTarget* target,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ GrGLenum format;
+ GrGLenum type;
+ bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
++ bool needSwizzle = false;
++
++ if (kBGRA_8888_GrPixelConfig == config && !this->canReadBGRA()) {
++ // Read RGBA and swizzle after
++ config = kRGBA_8888_GrPixelConfig;
++ needSwizzle = true;
++ }
++
+ if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
+ return false;
+ }
+ size_t bpp = GrBytesPerPixel(config);
+ if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
+ &left, &top, &width, &height,
+ const_cast<const void**>(&buffer),
+ &rowBytes)) {
+ return false;
+ }
+@@ -1478,35 +1516,46 @@ bool GrGpuGL::onReadPixels(GrRenderTarge
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
++
++ if (needSwizzle) {
++ swizzleRow(top, tightRowBytes);
++ swizzleRow(bottom, tightRowBytes);
++ }
++
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+ } else {
+- GrAssert(readDst != buffer); GrAssert(rowBytes != tightRowBytes);
++ GrAssert(readDst != buffer);
++ GrAssert(rowBytes != tightRowBytes);
+ // copy from readDst to buffer while flipping y
+ // const int halfY = height >> 1;
+ const char* src = reinterpret_cast<const char*>(readDst);
+ char* dst = reinterpret_cast<char*>(buffer);
+ if (flipY) {
+ dst += (height-1) * rowBytes;
+ }
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, tightRowBytes);
++ if (needSwizzle) {
++ swizzleRow(dst, tightRowBytes);
++ }
++
+ src += readDstRowBytes;
+ if (!flipY) {
+ dst += rowBytes;
+ } else {
+ dst -= rowBytes;
+ }
+ }
+ }
+ return true;
+ }
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.h b/gfx/skia/src/gpu/gl/GrGpuGL.h
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.h
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.h
+@@ -243,20 +243,22 @@ private:
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+
+ bool createRenderTargetObjects(int width, int height,
+ GrGLuint texID,
+ GrGLRenderTarget::Desc* desc);
+
+ void fillInConfigRenderableTable();
+
++ bool canReadBGRA() const;
++
+ GrGLContext fGLContext;
+
+ // GL program-related state
+ ProgramCache* fProgramCache;
+ SkAutoTUnref<GrGLProgram> fCurrentProgram;
+
+ ///////////////////////////////////////////////////////////////////////////
+ ///@name Caching of GL State
+ ///@{
+ int fHWActiveTextureUnitIdx;
diff --git a/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
new file mode 100644
index 0000000000..aff99f75f1
--- /dev/null
+++ b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
@@ -0,0 +1,26 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -325,19 +325,19 @@
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
+ #elif defined(__has_extension)
+ #if __has_extension(cxx_override_control)
+ #define SK_OVERRIDE override
+ #endif
+ #endif
+- #else
+- // Linux GCC ignores "__attribute__((override))" and rejects "override".
+- #define SK_OVERRIDE
++ #endif
++ #ifndef SK_OVERRIDE
++ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_PRINTF_LIKE
+ #if defined(__clang__) || defined(__GNUC__)
+ #define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
diff --git a/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
new file mode 100644
index 0000000000..5c95b54014
--- /dev/null
+++ b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
@@ -0,0 +1,83 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -26,66 +26,78 @@ static void S32A_D565_Opaque(uint16_t* S
+ asm volatile (
+ "1: \n\t"
+ "ldr r3, [%[src]], #4 \n\t"
+ "cmp r3, #0xff000000 \n\t"
+ "blo 2f \n\t"
+ "and r4, r3, #0x0000f8 \n\t"
+ "and r5, r3, #0x00fc00 \n\t"
+ "and r6, r3, #0xf80000 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "lsl r3, r4, #8 \n\t"
+ "orr r3, r3, r5, lsr #5 \n\t"
+ "orr r3, r3, r6, lsr #19 \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "strh r3, [%[dst]], #2 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "2: \n\t"
+ "lsrs r7, r3, #24 \n\t"
+ "beq 3f \n\t"
+ "ldrh r4, [%[dst]] \n\t"
+ "rsb r7, r7, #255 \n\t"
+ "and r6, r4, #0x001f \n\t"
+-#if SK_ARM_ARCH == 6
++#if SK_ARM_ARCH <= 6
+ "lsl r5, r4, #21 \n\t"
+ "lsr r5, r5, #26 \n\t"
+ #else
+ "ubfx r5, r4, #5, #6 \n\t"
+ #endif
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r0, #16] \n\t"
++#endif
+ "lsr r4, r4, #11 \n\t"
+ #ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, r7 \n\t"
+ "smulbb r5, r5, r7 \n\t"
+ "smulbb r4, r4, r7 \n\t"
+ #else
+ "mul r6, r6, r7 \n\t"
+ "mul r5, r5, r7 \n\t"
+ "mul r4, r4, r7 \n\t"
+ #endif
++#if SK_ARM_ARCH >= 6
+ "uxtb r7, r3, ROR #16 \n\t"
+ "uxtb ip, r3, ROR #8 \n\t"
++#else
++ "mov ip, #0xff \n\t"
++ "and r7, ip, r3, ROR #16 \n\t"
++ "and ip, ip, r3, ROR #8 \n\t"
++#endif
+ "and r3, r3, #0xff \n\t"
+ "add r6, r6, #16 \n\t"
+ "add r5, r5, #32 \n\t"
+ "add r4, r4, #16 \n\t"
+ "add r6, r6, r6, lsr #5 \n\t"
+ "add r5, r5, r5, lsr #6 \n\t"
+ "add r4, r4, r4, lsr #5 \n\t"
+ "add r6, r7, r6, lsr #5 \n\t"
+ "add r5, ip, r5, lsr #6 \n\t"
+ "add r4, r3, r4, lsr #5 \n\t"
+ "lsr r6, r6, #3 \n\t"
+ "and r5, r5, #0xfc \n\t"
+ "and r4, r4, #0xf8 \n\t"
+ "orr r6, r6, r5, lsl #3 \n\t"
+ "orr r4, r6, r4, lsl #8 \n\t"
+ "strh r4, [%[dst]], #2 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "subs %[count], %[count], #1 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "3: \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "add %[dst], %[dst], #2 \n\t"
+ "bne 1b \n\t"
+ "4: \n\t"
diff --git a/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
new file mode 100644
index 0000000000..c92bf2aaeb
--- /dev/null
+++ b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
@@ -0,0 +1,94 @@
+# HG changeset patch
+# Parent 979e60d9c09f22eb139643da6de7568b603e1aa1
+
+diff --git a/gfx/skia/include/images/SkImages.h b/gfx/skia/include/images/SkImages.h
+--- a/gfx/skia/include/images/SkImages.h
++++ b/gfx/skia/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
++
+ #include "GrPathRenderer.h"
+
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+@@ -19,8 +22,10 @@ public:
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrReducedClip.h b/gfx/skia/src/gpu/GrReducedClip.h
+--- a/gfx/skia/src/gpu/GrReducedClip.h
++++ b/gfx/skia/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -33,8 +36,10 @@ enum InitialState {
+ void ReduceClipStack(const SkClipStack& stack,
+ const SkIRect& queryBounds,
+ ElementList* result,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
diff --git a/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
new file mode 100644
index 0000000000..f58e7e1659
--- /dev/null
+++ b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# User Ehsan Akhgari <ehsan@mozilla.com>
+
+Bug 945588 - Add include guards to SkConfig8888.h
+
+diff --git a/gfx/skia/src/core/SkConfig8888.h b/gfx/skia/src/core/SkConfig8888.h
+index 96eaef2..36bc9b4 100644
+--- a/gfx/skia/src/core/SkConfig8888.h
++++ b/gfx/skia/src/core/SkConfig8888.h
+@@ -1,16 +1,18 @@
+
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkConfig8888_DEFINED
++#define SkConfig8888_DEFINED
+
+ #include "SkCanvas.h"
+ #include "SkColorPriv.h"
+
+ /**
+ * Converts pixels from one Config8888 to another Config8888
+ */
+ void SkConvertConfig8888Pixels(uint32_t* dstPixels,
+@@ -69,8 +71,10 @@ static inline void SkCopyConfig8888ToBitmap(const SkBitmap& dstBmp,
+ int h = dstBmp.height();
+ size_t dstRowBytes = dstBmp.rowBytes();
+ uint32_t* dstPixels = reinterpret_cast<uint32_t*>(dstBmp.getPixels());
+
+ SkConvertConfig8888Pixels(dstPixels, dstRowBytes, SkCanvas::kNative_Premul_Config8888, srcPixels, srcRowBytes, srcConfig8888, w, h);
+ }
+
+ }
++
++#endif
diff --git a/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
new file mode 100644
index 0000000000..b6b8461213
--- /dev/null
+++ b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
@@ -0,0 +1,148 @@
+# HG changeset patch
+# Parent c8288d0c7a1544a590a0cac9c39397ac10c8a45b
+Bug 974900 - Add missing include guards to Skia headers - r=gw280
+
+diff --git a/gfx/skia/trunk/include/images/SkImages.h b/gfx/skia/trunk/include/images/SkImages.h
+--- a/gfx/skia/trunk/include/images/SkImages.h
++++ b/gfx/skia/trunk/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/core/SkConvolver.h b/gfx/skia/trunk/src/core/SkConvolver.h
+--- a/gfx/skia/trunk/src/core/SkConvolver.h
++++ b/gfx/skia/trunk/src/core/SkConvolver.h
+@@ -8,16 +8,18 @@
+ #include "SkSize.h"
+ #include "SkTypes.h"
+ #include "SkTArray.h"
+
+ // avoid confusion with Mac OS X's math library (Carbon)
+ #if defined(__APPLE__)
+ #undef FloatToConvolutionFixed
+ #undef ConvolutionFixedToFloat
++#undef FloatToFixed
++#undef FixedToFloat
+ #endif
+
+ // Represents a filter in one dimension. Each output pixel has one entry in this
+ // object for the filter values contributing to it. You build up the filter
+ // list by calling AddFilter for each output pixel (in order).
+ //
+ // We do 2-dimensional convolution by first convolving each row by one
+ // SkConvolutionFilter1D, then convolving each column by another one.
+diff --git a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+@@ -3,24 +3,28 @@
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+ #include "GrPathRenderer.h"
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ const GrDrawTarget* target,
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/gpu/GrReducedClip.h b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+--- a/gfx/skia/trunk/src/gpu/GrReducedClip.h
++++ b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -36,8 +39,10 @@ SK_API void ReduceClipStack(const SkClip
+ const SkIRect& queryBounds,
+ ElementList* result,
+ int32_t* resultGenID,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
+diff --git a/gfx/skia/trunk/src/pathops/SkLineParameters.h b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+--- a/gfx/skia/trunk/src/pathops/SkLineParameters.h
++++ b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+@@ -1,14 +1,18 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
++
++#ifndef SkLineParameters_DEFINED
++#define SkLineParameters_DEFINED
++
+ #include "SkPathOpsCubic.h"
+ #include "SkPathOpsLine.h"
+ #include "SkPathOpsQuad.h"
+
+ // Sources
+ // computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+ // online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+@@ -164,8 +168,10 @@ public:
+ return -a;
+ }
+
+ private:
+ double a;
+ double b;
+ double c;
+ };
++
++#endif
diff --git a/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
new file mode 100644
index 0000000000..05f17000a0
--- /dev/null
+++ b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
@@ -0,0 +1,27 @@
+# HG changeset patch
+# Parent b12f9a408740aa5fd93c296a7d41e1b5f54c1b20
+Bug 974900 - #undef interface defined by windows headers - r=gw280
+
+diff --git a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+--- a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
++++ b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+@@ -9,16 +9,19 @@
+ #ifndef GrGLCaps_DEFINED
+ #define GrGLCaps_DEFINED
+
+ #include "GrDrawTargetCaps.h"
+ #include "GrGLStencilBuffer.h"
+ #include "SkTArray.h"
+ #include "SkTDArray.h"
+
++// defined in Windows headers
++#undef interface
++
+ class GrGLContextInfo;
+
+ /**
+ * Stores some capabilities of a GL context. Most are determined by the GL
+ * version and the extensions string. It also tracks formats that have passed
+ * the FBO completeness test.
+ */
+ class GrGLCaps : public GrDrawTargetCaps {
diff --git a/gfx/skia/patches/archive/SkPostConfig.patch b/gfx/skia/patches/archive/SkPostConfig.patch
new file mode 100644
index 0000000000..d32341f4ea
--- /dev/null
+++ b/gfx/skia/patches/archive/SkPostConfig.patch
@@ -0,0 +1,32 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -277,19 +277,28 @@
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
diff --git a/gfx/skia/patches/archive/arm-fixes.patch b/gfx/skia/patches/archive/arm-fixes.patch
new file mode 100644
index 0000000000..d9fa430df0
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-fixes.patch
@@ -0,0 +1,191 @@
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -148,20 +148,17 @@ static inline bool SkIsPow2(int value) {
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /** SkMulS16(a, b) multiplies a * b, but requires that a and b are both int16_t.
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+ int32_t product;
+ asm("smulbb %0, %1, %2 \n"
+ : "=r"(product)
+ : "r"(x), "r"(y)
+ );
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -300,8 +300,53 @@
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -6,17 +6,17 @@
+ * found in the LICENSE file.
+ */
+
+
+ #include "SkBitmapProcState.h"
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count,
+ uint16_t* SK_RESTRICT colors) __attribute__((optimize("O1")));
+
+ void SI8_D16_nofilter_DX_arm(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -177,17 +177,17 @@ void SI8_opaque_D32_nofilter_DX_arm(cons
+ : [xx] "+r" (xx), [count] "+r" (count), [colors] "+r" (colors)
+ : [table] "r" (table), [srcAddr] "r" (srcAddr)
+ : "memory", "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11"
+ );
+ }
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* If we replace a sampleproc, then we null-out the associated shaderproc,
+ otherwise the shader won't even look at the matrix/sampler
+ */
+ void SkBitmapProcState::platformProcs() {
+ bool doFilter = fDoFilter;
+@@ -195,17 +195,17 @@ void SkBitmapProcState::platformProcs()
+ bool justDx = false;
+
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ justDx = true;
+ }
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+ fShaderProc16 = NULL;
+ #endif
+ if (isOpaque) {
+ // this one is only very slighty faster than the C version
+ fSampleProc32 = SI8_opaque_D32_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -669,18 +669,23 @@ static void __attribute((noinline,optimi
+ /* Double Loop */
+ "1: \n\t" /* <double loop> */
+ "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */
+ "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */
+
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+ "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
+
+ /* ---------------------- */
+
+ /* src1, src1_scale */
+@@ -739,17 +744,21 @@ static void __attribute((noinline,optimi
+ /* else get into the single loop */
+ /* Single Loop */
+ "2: \n\t" /* <single loop> */
+ "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */
+ "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+ "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */
+
+ /* src, src_scale */
+ "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */
+ "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
diff --git a/gfx/skia/patches/archive/arm-opts.patch b/gfx/skia/patches/archive/arm-opts.patch
new file mode 100644
index 0000000000..02ad85c9a7
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-opts.patch
@@ -0,0 +1,41 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -549,17 +549,17 @@ static void S32A_Opaque_BlitRow32_neon(S
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_neon
+
+ #else
+
+ #ifdef TEST_SRC_ALPHA
+ #error The ARM asm version of S32A_Opaque_BlitRow32 does not support TEST_SRC_ALPHA
+ #endif
+
+-static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+
+ SkASSERT(255 == alpha);
+
+ /* Does not support the TEST_SRC_ALPHA case */
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+@@ -646,17 +646,17 @@ static void S32A_Opaque_BlitRow32_arm(Sk
+ );
+ }
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_arm
+ #endif
+
+ /*
+ * ARM asm version of S32A_Blend_BlitRow32
+ */
+-static void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+ "beq 3f \n\t" /* if zero exit */
+
+ "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */
+ "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */
diff --git a/gfx/skia/patches/archive/fix-comma-end-enum-list.patch b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
new file mode 100644
index 0000000000..dea36377e8
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
@@ -0,0 +1,380 @@
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -29,17 +29,17 @@ public:
+ SkString fFontName;
+
+ enum FontType {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+ // kNotEmbeddable_Font, the per glyph information will never be populated.
+ FontType fType;
+
+ // fMultiMaster may be true for Type1_Font or CFF_Font.
+ bool fMultiMaster;
+@@ -51,17 +51,17 @@ public:
+ kFixedPitch_Style = 0x00001,
+ kSerif_Style = 0x00002,
+ kSymbolic_Style = 0x00004,
+ kScript_Style = 0x00008,
+ kNonsymbolic_Style = 0x00020,
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent; // Max height above baseline, not including accents.
+ int16_t fDescent; // Max depth below baseline (negative).
+ int16_t fStemV; // Thickness of dominant vertical stem.
+@@ -70,26 +70,26 @@ public:
+ SkIRect fBBox; // The bounding box of all glyphs (in font units).
+
+ // The type of advance data wanted.
+ enum PerGlyphInfo {
+ kNo_PerGlyphInfo = 0x0, // Don't populate any per glyph info.
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+ template <typename Data>
+ struct AdvanceMetric {
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+ uint16_t fEndId;
+ SkTDArray<Data> fAdvance;
+ SkTScopedPtr<AdvanceMetric<Data> > fNext;
+ };
+
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -44,17 +44,17 @@ public:
+
+ //! Public entry-point to return a blit function ptr
+ static Proc Factory(unsigned flags, SkBitmap::Config);
+
+ ///////////// D32 version
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -132,17 +132,17 @@ public:
+ * low byte to high byte: B, G, R, A.
+ */
+ kBGRA_Premul_Config8888,
+ kBGRA_Unpremul_Config8888,
+ /**
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+ * On success (returns true), copy the canvas pixels into the bitmap.
+ * On failure, the bitmap parameter is left unchanged and false is
+ * returned.
+ *
+ * The canvas' pixels are converted to the bitmap's config. The only
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -134,17 +134,17 @@ public:
+ * Return the device's origin: its offset in device coordinates from
+ * the default origin in its canvas' matrix/clip
+ */
+ const SkIPoint& getOrigin() const { return fOrigin; }
+
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ SkPaint::Hinting fHinting;
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -216,17 +216,17 @@ public:
+ SkFactorySet* setFactoryRecorder(SkFactorySet*);
+
+ enum Flags {
+ kCrossProcess_Flag = 0x01,
+ /**
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+
+ bool isCrossProcess() const {
+ return SkToBool(fFlags & kCrossProcess_Flag);
+ }
+ bool inlineFactoryNames() const {
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -245,17 +245,17 @@ public:
+ vertically. When rendering subpixel glyphs we need to know which way
+ round they are.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+ static LCDOrientation GetSubpixelOrientation();
+
+ /** LCD color elements can vary in order. For subpixel text we need to know
+ the order which the LCDs uses so that the color fringes are in the
+ correct place.
+@@ -264,17 +264,17 @@ public:
+ cache because it'll have the wrong type of masks cached.
+
+ kNONE_LCDOrder means that the subpixel elements are not spatially
+ separated in any usable fashion.
+ */
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+ static LCDOrder GetSubpixelOrder();
+
+ #ifdef SK_BUILD_FOR_ANDROID
+ ///////////////////////////////////////////////////////////////////////////
+
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -57,17 +57,17 @@ public:
+
+ virtual void flatten(SkFlattenableWriteBuffer& ) {}
+
+ enum BlurType {
+ kNone_BlurType, //!< this maskfilter is not a blur
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+ SkScalar fRadius;
+ bool fIgnoreTransform;
+ bool fHighQuality;
+ };
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -70,17 +70,17 @@ public:
+ kFull_Hinting -> <same as kNormalHinting, unless we are rendering
+ subpixel glyphs, in which case TARGET_LCD or
+ TARGET_LCD_V is used>
+ */
+ enum Hinting {
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+ return static_cast<Hinting>(fHinting);
+ }
+
+ void setHinting(Hinting hintingLevel);
+
+@@ -282,17 +282,17 @@ public:
+ results may not appear the same as if it was drawn twice, filled and
+ then stroked.
+ */
+ enum Style {
+ kFill_Style, //!< fill the geometry
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+ geometries are interpreted (except for drawBitmap, which always assumes
+ kFill_Style).
+ @return the paint's Style
+ */
+ Style getStyle() const { return (Style)fStyle; }
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -172,24 +172,24 @@ public:
+ kHintingBit2_Flag = 0x0100,
+
+ // these should only ever be set if fMaskFormat is LCD16 or LCD32
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 11, // to shift into the other flags above
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ };
+
+ struct Rec {
+ uint32_t fOrigFontID;
+ uint32_t fFontID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -433,17 +433,17 @@ public:
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink) {
+ if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
+ return fPtr;
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -36,17 +36,17 @@ public:
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fFlagsMask selects which flags in the layer's paint should be applied.
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1357,17 +1357,17 @@ bool SkBitmap::extractAlpha(SkBitmap* ds
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ enum {
+ SERIALIZE_PIXELTYPE_NONE,
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ static void writeString(SkFlattenableWriteBuffer& buffer, const char str[]) {
+ size_t len = strlen(str);
+ buffer.write32(len);
+ buffer.writePad(str, len);
+ }
+
+diff --git a/gfx/skia/src/core/SkMatrix.cpp b/gfx/skia/src/core/SkMatrix.cpp
+--- a/gfx/skia/src/core/SkMatrix.cpp
++++ b/gfx/skia/src/core/SkMatrix.cpp
+@@ -1715,17 +1715,17 @@ SkScalar SkMatrix::getMaxStretch() const
+ const SkMatrix& SkMatrix::I() {
+ static SkMatrix gIdentity;
+ static bool gOnce;
+ if (!gOnce) {
+ gIdentity.reset();
+ gOnce = true;
+ }
+ return gIdentity;
+-};
++}
+
+ const SkMatrix& SkMatrix::InvalidMatrix() {
+ static SkMatrix gInvalid;
+ static bool gOnce;
+ if (!gOnce) {
+ gInvalid.setAll(SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax);
diff --git a/gfx/skia/patches/archive/fix-gradient-clamp.patch b/gfx/skia/patches/archive/fix-gradient-clamp.patch
new file mode 100644
index 0000000000..91481c2c12
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-gradient-clamp.patch
@@ -0,0 +1,211 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -167,16 +167,17 @@ private:
+
+ mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values
+ mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values
+
+ mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+ void setCacheAlpha(U8CPU alpha) const;
+ void initCommon();
+
+ typedef SkShader INHERITED;
+ };
+@@ -512,16 +513,31 @@ static inline U8CPU dither_fixed_to_8(Sk
+ * For dithering with premultiply, we want to ceiling the alpha component,
+ * to ensure that it is always >= any color component.
+ */
+ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ n >>= 8;
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+
+ // need to apply paintAlpha to our two endpoints
+ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
+ SkFixed da;
+ {
+@@ -613,24 +629,24 @@ const uint16_t* Gradient_Shader::getCach
+ }
+ }
+ return fCache16;
+ }
+
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+ } else {
+ Rec* rec = fRecs;
+ int prevIndex = 0;
+ for (int i = 1; i < fColorCount; i++) {
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> (16 - kCache32Bits);
+@@ -644,28 +660,31 @@ const SkPMColor* Gradient_Shader::getCac
+ }
+ SkASSERT(prevIndex == kCache32Count - 1);
+ }
+
+ if (fMapper) {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+ mapped[i] = linear[index];
+ mapped[i + kCache32Count] = linear[index + kCache32Count];
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+ /*
+ * Because our caller might rebuild the same (logically the same) gradient
+ * over and over, we'd like to return exactly the same "bitmap" if possible,
+ * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
+ * To do that, we maintain a private cache of built-bitmaps, based on our
+@@ -875,28 +894,38 @@ void Linear_Gradient::shadeSpan(int x, i
+ dx = dxStorage[0];
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = SkScalarToFixed(fDstToIndex.getScaleX());
+ }
+
+ if (SkFixedNearlyZero(dx)) {
+ // we're a vertical gradient, so no change in a span
+- unsigned fi = proc(fx) >> (16 - kCache32Bits);
+- sk_memset32_dither(dstC, cache[toggle + fi],
+- cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ if (proc == clamp_tileproc) {
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[kCache32Count * 2], count);
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
+ } else if (proc == clamp_tileproc) {
+ SkClampRange range;
+- range.init(fx, dx, count, 0, 0xFF);
++ range.init(fx, dx, count, cache[-1], cache[kCache32Count * 2]);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV0],
+- count);
++ // Do we really want to dither the clamp values?
++ sk_memset32(dstC, range.fV0, count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+ int unroll = count >> 3;
+ fx = range.fFx1;
+ for (int i = 0; i < unroll; i++) {
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+@@ -905,20 +934,17 @@ void Linear_Gradient::shadeSpan(int x, i
+ }
+ if ((count &= 7) > 0) {
+ do {
+ NO_CHECK_ITER;
+ } while (--count != 0);
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV1],
+- count);
++ sk_memset32(dstC, range.fV1, count);
+ }
+ } else if (proc == mirror_tileproc) {
+ do {
+ unsigned fi = mirror_8bits(fx >> 8);
+ SkASSERT(fi <= 0xFF);
+ fx += dx;
+ *dstC++ = cache[toggle + fi];
+ toggle ^= TOGGLE_MASK;
+@@ -1670,19 +1699,24 @@ public:
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+ if (proc == clamp_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ } else if (proc == mirror_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+ SkFixed index = mirror_tileproc(t);
diff --git a/gfx/skia/patches/archive/getpostextpath.patch b/gfx/skia/patches/archive/getpostextpath.patch
new file mode 100644
index 0000000000..7181411ec8
--- /dev/null
+++ b/gfx/skia/patches/archive/getpostextpath.patch
@@ -0,0 +1,70 @@
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -836,16 +836,19 @@ public:
+
+ /** Return the path (outline) for the specified text.
+ Note: just like SkCanvas::drawText, this will respect the Align setting
+ in the paint.
+ */
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const void* findImage(const SkGlyph&);
+
+ uint32_t getGenerationID() const;
+ #endif
+
+ // returns true if the paint's settings (e.g. xfermode + alpha) resolve to
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1242,16 +1242,43 @@ void SkPaint::getTextPath(const void* te
+ const SkPath* iterPath;
+ while ((iterPath = iter.next(&xpos)) != NULL) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ path->addPath(*iterPath, matrix);
+ prevXPos = xpos;
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false, true);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+
+ // SkFontHost can override this choice in FilterRec()
+ static SkMask::Format computeMaskFormat(const SkPaint& paint) {
+ uint32_t flags = paint.getFlags();
diff --git a/gfx/skia/patches/archive/mingw-fix.patch b/gfx/skia/patches/archive/mingw-fix.patch
new file mode 100644
index 0000000000..d91a16aa70
--- /dev/null
+++ b/gfx/skia/patches/archive/mingw-fix.patch
@@ -0,0 +1,57 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 0135b85..bb108f8 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -253,7 +253,7 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+-#if defined(SK_BUILD_FOR_WIN)
++#if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
+ // Some documentation suggests we should be using __attribute__((override)),
+diff --git a/gfx/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/src/ports/SkFontHost_win.cpp
+index dd9c5dc..ca2c3dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_win.cpp
++++ b/gfx/skia/src/ports/SkFontHost_win.cpp
+@@ -22,7 +22,7 @@
+ #ifdef WIN32
+ #include "windows.h"
+ #include "tchar.h"
+-#include "Usp10.h"
++#include "usp10.h"
+
+ // always packed xxRRGGBB
+ typedef uint32_t SkGdiRGB;
+@@ -1033,6 +1033,10 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+ HFONT designFont = NULL;
+
++ const char stem_chars[] = {'i', 'I', '!', '1'};
++ int16_t min_width;
++ unsigned glyphCount;
++
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+@@ -1046,7 +1050,7 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ goto Error;
+ }
+- const unsigned glyphCount = calculateGlyphCount(hdc);
++ glyphCount = calculateGlyphCount(hdc);
+
+ info = new SkAdvancedTypefaceMetrics;
+ info->fEmSize = otm.otmEMSquare;
+@@ -1115,9 +1119,8 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+- int16_t min_width = SHRT_MAX;
++ min_width = SHRT_MAX;
+ info->fStemV = 0;
+- char stem_chars[] = {'i', 'I', '!', '1'};
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
diff --git a/gfx/skia/patches/archive/new-aa.patch b/gfx/skia/patches/archive/new-aa.patch
new file mode 100644
index 0000000000..d5e6fbf73d
--- /dev/null
+++ b/gfx/skia/patches/archive/new-aa.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -31,17 +31,17 @@
+ - supersampled coordinates, scale equal to the output * SCALE
+
+ NEW_AA is a set of code-changes to try to make both paths produce identical
+ results. Its not quite there yet, though the remaining differences may be
+ in the subsequent blits, and not in the different masks/runs...
+ */
+ //#define FORCE_SUPERMASK
+ //#define FORCE_RLE
+-//#define SK_SUPPORT_NEW_AA
++#define SK_SUPPORT_NEW_AA
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /// Base class for a single-pass supersampled blitter.
+ class BaseSuperBlitter : public SkBlitter {
+ public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkRegion& clip);
diff --git a/gfx/skia/patches/archive/old-android-fonthost.patch b/gfx/skia/patches/archive/old-android-fonthost.patch
new file mode 100644
index 0000000000..1c64ace7dd
--- /dev/null
+++ b/gfx/skia/patches/archive/old-android-fonthost.patch
@@ -0,0 +1,530 @@
+# HG changeset patch
+# Parent 9ee29e4aace683ddf6cf8ddb2893cd34fcfc772c
+# User James Willcox <jwillcox@mozilla.com>
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -305,21 +305,20 @@ CPPSRCS += \
+ SkFontHost_mac_coretext.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+ endif
+
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkFontHost_FreeType.cpp \
+ SkFontHost_android.cpp \
+ SkFontHost_gamma.cpp \
+- FontHostConfiguration_android.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+
+ DEFINES += -DSK_BUILD_FOR_ANDROID_NDK
+ OS_CXXFLAGS += $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (gtk2,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+diff --git a/gfx/skia/src/ports/SkFontHost_android.cpp b/gfx/skia/src/ports/SkFontHost_android.cpp
+--- a/gfx/skia/src/ports/SkFontHost_android.cpp
++++ b/gfx/skia/src/ports/SkFontHost_android.cpp
+@@ -1,38 +1,31 @@
++
+ /*
+-**
+-** Copyright 2006, The Android Open Source Project
+-**
+-** Licensed under the Apache License, Version 2.0 (the "License");
+-** you may not use this file except in compliance with the License.
+-** You may obtain a copy of the License at
+-**
+-** http://www.apache.org/licenses/LICENSE-2.0
+-**
+-** Unless required by applicable law or agreed to in writing, software
+-** distributed under the License is distributed on an "AS IS" BASIS,
+-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-** See the License for the specific language governing permissions and
+-** limitations under the License.
+-*/
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
+
+ #include "SkFontHost.h"
+ #include "SkDescriptor.h"
+ #include "SkMMapStream.h"
+ #include "SkPaint.h"
+ #include "SkString.h"
+ #include "SkStream.h"
+ #include "SkThread.h"
+ #include "SkTSearch.h"
+-#include "FontHostConfiguration_android.h"
+ #include <stdio.h>
+
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
+ #ifndef SK_FONT_FILE_PREFIX
+ #define SK_FONT_FILE_PREFIX "/fonts/"
+ #endif
+
+ SkTypeface::Style find_name_and_attributes(SkStream* stream, SkString* name,
+ bool* isFixedWidth);
+
+ static void GetFullPathForSysFonts(SkString* full, const char name[]) {
+ full->set(getenv("ANDROID_ROOT"));
+ full->append(SK_FONT_FILE_PREFIX);
+@@ -99,21 +92,21 @@ static SkTypeface* find_best_face(const
+ if (faces[SkTypeface::kNormal] != NULL) {
+ return faces[SkTypeface::kNormal];
+ }
+ // look for anything
+ for (int i = 0; i < 4; i++) {
+ if (faces[i] != NULL) {
+ return faces[i];
+ }
+ }
+ // should never get here, since the faces list should not be empty
+- SkDEBUGFAIL("faces list is empty");
++ SkASSERT(!"faces list is empty");
+ return NULL;
+ }
+
+ static FamilyRec* find_family(const SkTypeface* member) {
+ FamilyRec* curr = gFamilyHead;
+ while (curr != NULL) {
+ for (int i = 0; i < 4; i++) {
+ if (curr->fFaces[i] == member) {
+ return curr;
+ }
+@@ -138,31 +131,27 @@ static SkTypeface* find_from_uniqueID(ui
+ curr = curr->fNext;
+ }
+ return NULL;
+ }
+
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+ static FamilyRec* remove_from_family(const SkTypeface* face) {
+ FamilyRec* family = find_family(face);
+- if (family) {
+- SkASSERT(family->fFaces[face->style()] == face);
+- family->fFaces[face->style()] = NULL;
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
+
+- for (int i = 0; i < 4; i++) {
+- if (family->fFaces[i] != NULL) { // family is non-empty
+- return NULL;
+- }
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
+ }
+- } else {
+-// SkDebugf("remove_from_family(%p) face not found", face);
+ }
+ return family; // return the empty family
+ }
+
+ // maybe we should make FamilyRec be doubly-linked
+ static void detach_and_delete_family(FamilyRec* family) {
+ FamilyRec* curr = gFamilyHead;
+ FamilyRec* prev = NULL;
+
+ while (curr != NULL) {
+@@ -172,21 +161,21 @@ static void detach_and_delete_family(Fam
+ gFamilyHead = next;
+ } else {
+ prev->fNext = next;
+ }
+ SkDELETE(family);
+ return;
+ }
+ prev = curr;
+ curr = next;
+ }
+- SkDEBUGFAIL("Yikes, couldn't find family in our list to remove/delete");
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
+ }
+
+ static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
+ NameFamilyPair* list = gNameList.begin();
+ int count = gNameList.count();
+
+ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
+
+ if (index >= 0) {
+ return find_best_face(list[index].fFamily, style);
+@@ -387,111 +376,90 @@ static bool get_name_and_style(const cha
+ }
+ return false;
+ }
+
+ // used to record our notion of the pre-existing fonts
+ struct FontInitRec {
+ const char* fFileName;
+ const char* const* fNames; // null-terminated list
+ };
+
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
+ // deliberately empty, but we use the address to identify fallback fonts
+ static const char* gFBNames[] = { NULL };
+
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
+
+-/* Fonts are grouped by family, with the first font in a family having the
+- list of names (even if that list is empty), and the following members having
+- null for the list. The names list must be NULL-terminated.
+-*/
+-static FontInitRec *gSystemFonts;
+-static size_t gNumSystemFonts = 0;
+-
+-#define SYSTEM_FONTS_FILE "/system/etc/system_fonts.cfg"
++#define DEFAULT_NAMES gSansNames
+
+ // these globals are assigned (once) by load_system_fonts()
+ static FamilyRec* gDefaultFamily;
+ static SkTypeface* gDefaultNormal;
+-static char** gDefaultNames = NULL;
+-static uint32_t *gFallbackFonts;
+
+-/* Load info from a configuration file that populates the system/fallback font structures
+-*/
+-static void load_font_info() {
+-// load_font_info_xml("/system/etc/system_fonts.xml");
+- SkTDArray<FontFamily*> fontFamilies;
+- getFontFamilies(fontFamilies);
+-
+- SkTDArray<FontInitRec> fontInfo;
+- bool firstInFamily = false;
+- for (int i = 0; i < fontFamilies.count(); ++i) {
+- FontFamily *family = fontFamilies[i];
+- firstInFamily = true;
+- for (int j = 0; j < family->fFileNames.count(); ++j) {
+- FontInitRec fontInfoRecord;
+- fontInfoRecord.fFileName = family->fFileNames[j];
+- if (j == 0) {
+- if (family->fNames.count() == 0) {
+- // Fallback font
+- fontInfoRecord.fNames = (char **)gFBNames;
+- } else {
+- SkTDArray<const char*> names = family->fNames;
+- const char **nameList = (const char**)
+- malloc((names.count() + 1) * sizeof(char*));
+- if (nameList == NULL) {
+- // shouldn't get here
+- break;
+- }
+- if (gDefaultNames == NULL) {
+- gDefaultNames = (char**) nameList;
+- }
+- for (int i = 0; i < names.count(); ++i) {
+- nameList[i] = names[i];
+- }
+- nameList[names.count()] = NULL;
+- fontInfoRecord.fNames = nameList;
+- }
+- } else {
+- fontInfoRecord.fNames = NULL;
+- }
+- *fontInfo.append() = fontInfoRecord;
+- }
+- }
+- gNumSystemFonts = fontInfo.count();
+- gSystemFonts = (FontInitRec*) malloc(gNumSystemFonts * sizeof(FontInitRec));
+- gFallbackFonts = (uint32_t*) malloc((gNumSystemFonts + 1) * sizeof(uint32_t));
+- if (gSystemFonts == NULL) {
+- // shouldn't get here
+- gNumSystemFonts = 0;
+- }
+- for (size_t i = 0; i < gNumSystemFonts; ++i) {
+- gSystemFonts[i].fFileName = fontInfo[i].fFileName;
+- gSystemFonts[i].fNames = fontInfo[i].fNames;
+- }
+- fontFamilies.deleteAll();
+-}
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
+
+ /* Called once (ensured by the sentinel check at the beginning of our body).
+ Initializes all the globals, and register the system fonts.
+ */
+ static void load_system_fonts() {
+ // check if we've already be called
+ if (NULL != gDefaultNormal) {
+ return;
+ }
+
+- load_font_info();
+-
+ const FontInitRec* rec = gSystemFonts;
+ SkTypeface* firstInFamily = NULL;
+ int fallbackCount = 0;
+
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
+ // if we're the first in a new family, clear firstInFamily
+ if (rec[i].fNames != NULL) {
+ firstInFamily = NULL;
+ }
+
+ bool isFixedWidth;
+ SkString name;
+ SkTypeface::Style style;
+
+ // we expect all the fonts, except the "fallback" fonts
+@@ -515,120 +483,75 @@ static void load_system_fonts() {
+ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
+ // rec[i].fFileName, fallbackCount, tf->uniqueID());
+ gFallbackFonts[fallbackCount++] = tf->uniqueID();
+ }
+
+ firstInFamily = tf;
+ FamilyRec* family = find_family(tf);
+ const char* const* names = rec[i].fNames;
+
+ // record the default family if this is it
+- if (names == gDefaultNames) {
++ if (names == DEFAULT_NAMES) {
+ gDefaultFamily = family;
+ }
+ // add the names to map to this family
+ while (*names) {
+ add_name(*names, family);
+ names += 1;
+ }
+ }
+ }
+
+ // do this after all fonts are loaded. This is our default font, and it
+ // acts as a sentinel so we only execute load_system_fonts() once
+ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
+ // now terminate our fallback list with the sentinel value
+ gFallbackFonts[fallbackCount] = 0;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
+- // lookup and record if the font is custom (i.e. not a system font)
+- bool isCustomFont = !((FamilyTypeface*)face)->isSysFont();
+- stream->writeBool(isCustomFont);
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
+
+- if (isCustomFont) {
+- SkStream* fontStream = ((FamilyTypeface*)face)->openStream();
++ stream->write8((uint8_t)face->style());
+
+- // store the length of the custom font
+- uint32_t len = fontStream->getLength();
+- stream->write32(len);
+-
+- // store the entire font in the serialized stream
+- void* fontData = malloc(len);
+-
+- fontStream->read(fontData, len);
+- stream->write(fontData, len);
+-
+- fontStream->unref();
+- free(fontData);
+-// SkDebugf("--- fonthost custom serialize %d %d\n", face->style(), len);
+-
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
+ } else {
+- const char* name = ((FamilyTypeface*)face)->getUniqueString();
+-
+- stream->write8((uint8_t)face->style());
+-
+- if (NULL == name || 0 == *name) {
+- stream->writePackedUInt(0);
+-// SkDebugf("--- fonthost serialize null\n");
+- } else {
+- uint32_t len = strlen(name);
+- stream->writePackedUInt(len);
+- stream->write(name, len);
+-// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+- }
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+ }
+ }
+
+ SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
+ load_system_fonts();
+
+- // check if the font is a custom or system font
+- bool isCustomFont = stream->readBool();
++ int style = stream->readU8();
+
+- if (isCustomFont) {
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
+
+- // read the length of the custom font from the stream
+- uint32_t len = stream->readU32();
+-
+- // generate a new stream to store the custom typeface
+- SkMemoryStream* fontStream = new SkMemoryStream(len);
+- stream->read((void*)fontStream->getMemoryBase(), len);
+-
+- SkTypeface* face = CreateTypefaceFromStream(fontStream);
+-
+- fontStream->unref();
+-
+-// SkDebugf("--- fonthost custom deserialize %d %d\n", face->style(), len);
+- return face;
+-
+- } else {
+- int style = stream->readU8();
+-
+- int len = stream->readPackedUInt();
+- if (len > 0) {
+- SkString str;
+- str.resize(len);
+- stream->read(str.writable_str(), len);
+-
+- const FontInitRec* rec = gSystemFonts;
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
+- if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
+- // backup until we hit the fNames
+- for (int j = i; j >= 0; --j) {
+- if (rec[j].fNames != NULL) {
+- return SkFontHost::CreateTypeface(NULL,
+- rec[j].fNames[0], NULL, 0,
+- (SkTypeface::Style)style);
+- }
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], NULL, 0, (SkTypeface::Style)style);
+ }
+ }
+ }
+ }
+ }
+ return NULL;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -697,49 +620,32 @@ size_t SkFontHost::GetFileName(SkFontID
+ }
+ return size;
+ } else {
+ return 0;
+ }
+ }
+
+ SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
+ load_system_fonts();
+
+- const SkTypeface* origTypeface = find_from_uniqueID(origFontID);
+- const SkTypeface* currTypeface = find_from_uniqueID(currFontID);
+-
+- SkASSERT(origTypeface != 0);
+- SkASSERT(currTypeface != 0);
+-
+- // Our fallback list always stores the id of the plain in each fallback
+- // family, so we transform currFontID to its plain equivalent.
+- currFontID = find_typeface(currTypeface, SkTypeface::kNormal)->uniqueID();
+-
+ /* First see if fontID is already one of our fallbacks. If so, return
+ its successor. If fontID is not in our list, then return the first one
+ in our list. Note: list is zero-terminated, and returning zero means
+ we have no more fonts to use for fallbacks.
+ */
+ const uint32_t* list = gFallbackFonts;
+ for (int i = 0; list[i] != 0; i++) {
+ if (list[i] == currFontID) {
+- if (list[i+1] == 0)
+- return 0;
+- const SkTypeface* nextTypeface = find_from_uniqueID(list[i+1]);
+- return find_typeface(nextTypeface, origTypeface->style())->uniqueID();
++ return list[i+1];
+ }
+ }
+-
+- // If we get here, currFontID was not a fallback, so we start at the
+- // beginning of our list.
+- const SkTypeface* firstTypeface = find_from_uniqueID(list[0]);
+- return find_typeface(firstTypeface, origTypeface->style())->uniqueID();
++ return list[0];
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
+ if (NULL == stream || stream->getLength() <= 0) {
+ return NULL;
+ }
+
+ bool isFixedWidth;
+@@ -754,10 +660,11 @@ SkTypeface* SkFontHost::CreateTypefaceFr
+ }
+
+ SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
+ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
+ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
+ // since we created the stream, we let go of our ref() here
+ stream->unref();
+ return face;
+ }
+
++///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/patches/archive/radial-gradients.patch b/gfx/skia/patches/archive/radial-gradients.patch
new file mode 100644
index 0000000000..183923e83e
--- /dev/null
+++ b/gfx/skia/patches/archive/radial-gradients.patch
@@ -0,0 +1,25 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1665,17 +1665,20 @@ public:
+ }
+ return kRadial2_GradientType;
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+ SkASSERT(count > 0);
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
diff --git a/gfx/skia/patches/archive/skia_restrict_problem.patch b/gfx/skia/patches/archive/skia_restrict_problem.patch
new file mode 100644
index 0000000000..c7639ca2ce
--- /dev/null
+++ b/gfx/skia/patches/archive/skia_restrict_problem.patch
@@ -0,0 +1,461 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1184,116 +1184,17 @@ public:
+ {
+ // make sure our table is insync with our current #define for kSQRT_TABLE_SIZE
+ SkASSERT(sizeof(gSqrt8Table) == kSQRT_TABLE_SIZE);
+
+ rad_to_unit_matrix(center, radius, &fPtsToUnit);
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+- virtual void shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- SkPoint srcPt;
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const uint16_t* SK_RESTRICT cache = this->getCache16();
+- int toggle = ((x ^ y) & 1) << kCache16Bits;
+-
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+- SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed storage[2];
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
+- dx = storage[0];
+- dy = storage[1];
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = SkScalarToFixed(fDstToIndex.getScaleX());
+- dy = SkScalarToFixed(fDstToIndex.getSkewY());
+- }
+-
+- if (proc == clamp_tileproc) {
+- const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
+-
+- /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
+- rather than 0xFFFF which is slower. This is a compromise, since it reduces our
+- precision, but that appears to be visually OK. If we decide this is OK for
+- all of our cases, we could (it seems) put this scale-down into fDstToIndex,
+- to avoid having to do these extra shifts each time.
+- */
+- fx >>= 1;
+- dx >>= 1;
+- fy >>= 1;
+- dy >>= 1;
+- if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
+- fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fy *= fy;
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else if (proc == mirror_tileproc) {
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = mirror_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = repeat_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- do {
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+- SkASSERT(fi <= 0xFFFF);
+-
+- int index = fi >> (16 - kCache16Bits);
+- *dstC++ = cache[toggle + index];
+- toggle ^= (1 << kCache16Bits);
+-
+- dstX += SK_Scalar1;
+- } while (--count != 0);
+- }
+- }
++ virtual void shadeSpan16(int x, int y, uint16_t* dstC, int count) SK_OVERRIDE;
+
+ virtual BitmapType asABitmap(SkBitmap* bitmap,
+ SkMatrix* matrix,
+ TileMode* xy,
+ SkScalar* twoPointRadialParams) const SK_OVERRIDE {
+ if (bitmap) {
+ this->commonAsABitmap(bitmap);
+ }
+@@ -1507,16 +1408,117 @@ void Radial_Gradient::shadeSpan(int x, i
+ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[fi >> (16 - kCache32Bits)];
+ dstX += SK_Scalar1;
+ } while (--count != 0);
+ }
+ }
+
++void Radial_Gradient::shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ SkPoint srcPt;
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const uint16_t* SK_RESTRICT cache = this->getCache16();
++ int toggle = ((x ^ y) & 1) << kCache16Bits;
++
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
++ SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed storage[2];
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
++ dx = storage[0];
++ dy = storage[1];
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = SkScalarToFixed(fDstToIndex.getScaleX());
++ dy = SkScalarToFixed(fDstToIndex.getSkewY());
++ }
++
++ if (proc == clamp_tileproc) {
++ const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
++
++ /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
++ rather than 0xFFFF which is slower. This is a compromise, since it reduces our
++ precision, but that appears to be visually OK. If we decide this is OK for
++ all of our cases, we could (it seems) put this scale-down into fDstToIndex,
++ to avoid having to do these extra shifts each time.
++ */
++ fx >>= 1;
++ dx >>= 1;
++ fy >>= 1;
++ dy >>= 1;
++ if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
++ fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fy *= fy;
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else if (proc == mirror_tileproc) {
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = mirror_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = repeat_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ do {
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
++ SkASSERT(fi <= 0xFFFF);
++
++ int index = fi >> (16 - kCache16Bits);
++ *dstC++ = cache[toggle + index];
++ toggle ^= (1 << kCache16Bits);
++
++ dstX += SK_Scalar1;
++ } while (--count != 0);
++ }
++}
++
+ /* Two-point radial gradients are specified by two circles, each with a center
+ point and radius. The gradient can be considered to be a series of
+ concentric circles, with the color interpolated from the start circle
+ (at t=0) to the end circle (at t=1).
+
+ For each point (x, y) in the span, we want to find the
+ interpolated circle that intersects that point. The center
+ of the desired circle (Cx, Cy) falls at some distance t
+@@ -1661,109 +1663,17 @@ public:
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ }
+ return kRadial2_GradientType;
+ }
+
+- virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- // Zero difference between radii: fill with transparent black.
+- // TODO: Is removing this actually correct? Two circles with the
+- // same radius, but different centers doesn't sound like it
+- // should be cleared
+- if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+- sk_bzero(dstC, count * sizeof(*dstC));
+- return;
+- }
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const SkPMColor* SK_RESTRICT cache = this->getCache32();
+-
+- SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkScalar dx, fx = srcPt.fX;
+- SkScalar dy, fy = srcPt.fY;
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed fixedX, fixedY;
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
+- dx = SkFixedToScalar(fixedX);
+- dy = SkFixedToScalar(fixedY);
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = fDstToIndex.getScaleX();
+- dy = fDstToIndex.getSkewY();
+- }
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+- SkScalarMul(fDiff.fY, dy)) * 2;
+- if (proc == clamp_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- if (t < 0) {
+- *dstC++ = cache[-1];
+- } else if (t > 0xFFFF) {
+- *dstC++ = cache[kCache32Count * 2];
+- } else {
+- SkASSERT(t <= 0xFFFF);
+- *dstC++ = cache[t >> (16 - kCache32Bits)];
+- }
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else if (proc == mirror_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- for (; count > 0; --count) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- SkScalar fx = srcPt.fX;
+- SkScalar fy = srcPt.fY;
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- dstX += SK_Scalar1;
+- }
+- }
+- }
++ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+@@ -1817,16 +1727,110 @@ private:
+ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+ fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
++void Two_Point_Radial_Gradient::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ // Zero difference between radii: fill with transparent black.
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const SkPMColor* SK_RESTRICT cache = this->getCache32();
++
++ SkScalar foura = fA * 4;
++ bool posRoot = fDiffRadius < 0;
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkScalar dx, fx = srcPt.fX;
++ SkScalar dy, fy = srcPt.fY;
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed fixedX, fixedY;
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
++ dx = SkFixedToScalar(fixedX);
++ dy = SkFixedToScalar(fixedY);
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = fDstToIndex.getScaleX();
++ dy = fDstToIndex.getSkewY();
++ }
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
++ SkScalarMul(fDiff.fY, dy)) * 2;
++ if (proc == clamp_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else if (proc == mirror_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ for (; count > 0; --count) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ SkScalar fx = srcPt.fX;
++ SkScalar fy = srcPt.fY;
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ dstX += SK_Scalar1;
++ }
++ }
++}
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+ const SkScalar pos[], int count, SkUnitMapper* mapper)
+ : Gradient_Shader(colors, pos, count, SkShader::kClamp_TileMode, mapper),
+ fCenter(SkPoint::Make(cx, cy))
diff --git a/gfx/skia/patches/archive/uninitialized-margin.patch b/gfx/skia/patches/archive/uninitialized-margin.patch
new file mode 100644
index 0000000000..b8ab213e7b
--- /dev/null
+++ b/gfx/skia/patches/archive/uninitialized-margin.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkDraw.cpp b/gfx/skia/src/core/SkDraw.cpp
+--- a/gfx/skia/src/core/SkDraw.cpp
++++ b/gfx/skia/src/core/SkDraw.cpp
+@@ -2529,17 +2529,17 @@ static bool compute_bounds(const SkPath&
+
+ // init our bounds from the path
+ {
+ SkRect pathBounds = devPath.getBounds();
+ pathBounds.inset(-SK_ScalarHalf, -SK_ScalarHalf);
+ pathBounds.roundOut(bounds);
+ }
+
+- SkIPoint margin;
++ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fImage = NULL;
diff --git a/gfx/skia/patches/archive/user-config.patch b/gfx/skia/patches/archive/user-config.patch
new file mode 100644
index 0000000000..11c6f1f638
--- /dev/null
+++ b/gfx/skia/patches/archive/user-config.patch
@@ -0,0 +1,40 @@
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -140,16 +140,20 @@
+ /* If SK_DEBUG is defined, then you can optionally define SK_SUPPORT_UNITTEST
+ which will run additional self-tests at startup. These can take a long time,
+ so this flag is optional.
+ */
+ #ifdef SK_DEBUG
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+ SK_TRACE_EVENT0(event)
+ SK_TRACE_EVENT1(event, name1, value1)
+ SK_TRACE_EVENT2(event, name1, value1, name2, value2)
+ src/utils/SkDebugTrace.h has a trivial implementation that writes to
+ the debug output stream. If SK_USER_TRACE_INCLUDE_FILE is not defined,
+@@ -161,9 +165,15 @@
+ */
+ #ifdef SK_SAMPLES_FOR_X
+ #define SK_R32_SHIFT 16
+ #define SK_G32_SHIFT 8
+ #define SK_B32_SHIFT 0
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
diff --git a/gfx/skia/skia/include/android/SkAndroidFrameworkUtils.h b/gfx/skia/skia/include/android/SkAndroidFrameworkUtils.h
new file mode 100644
index 0000000000..d0f7d44fad
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkAndroidFrameworkUtils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAndroidFrameworkUtils_DEFINED
+#define SkAndroidFrameworkUtils_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+class SkCanvas;
+struct SkRect;
+class SkSurface;
+
+/**
+ * SkAndroidFrameworkUtils expose private APIs used only by Android framework.
+ */
+class SkAndroidFrameworkUtils {
+public:
+
+#if SK_SUPPORT_GPU
+ /**
+ * clipWithStencil draws the current clip into a stencil buffer with reference value and mask
+ * set to 0x1. This function works only on a GPU canvas.
+ *
+ * @param canvas A GPU canvas that has a non-empty clip.
+ *
+ * @return true on success or false if clip is empty or not a GPU canvas.
+ */
+ static bool clipWithStencil(SkCanvas* canvas);
+#endif //SK_SUPPORT_GPU
+
+ static void SafetyNetLog(const char*);
+
+ static sk_sp<SkSurface> getSurfaceFromCanvas(SkCanvas* canvas);
+
+ static int SaveBehind(SkCanvas* canvas, const SkRect* subset);
+
+ /**
+ * Unrolls a chain of nested SkPaintFilterCanvas to return the base wrapped canvas.
+ *
+ * @param canvas A SkPaintFilterCanvas or any other SkCanvas subclass.
+ *
+ * @return SkCanvas that was found in the innermost SkPaintFilterCanvas.
+ */
+ static SkCanvas* getBaseWrappedCanvas(SkCanvas* canvas);
+};
+
+#endif // SK_BUILD_FOR_ANDROID_ANDROID
+
+#endif // SkAndroidFrameworkUtils_DEFINED
diff --git a/gfx/skia/skia/include/android/SkAnimatedImage.h b/gfx/skia/skia/include/android/SkAnimatedImage.h
new file mode 100644
index 0000000000..760dabde76
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkAnimatedImage.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnimatedImage_DEFINED
+#define SkAnimatedImage_DEFINED
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+
+class SkAndroidCodec;
+class SkPicture;
+
+/**
+ * Thread unsafe drawable for drawing animated images (e.g. GIF).
+ */
+class SK_API SkAnimatedImage : public SkDrawable {
+public:
+ /**
+ * Create an SkAnimatedImage from the SkAndroidCodec.
+ *
+ * Returns null on failure to allocate pixels. On success, this will
+ * decode the first frame.
+ *
+ * @param info Width and height may require scaling.
+ * @param cropRect Rectangle to crop to after scaling.
+ * @param postProcess Picture to apply after scaling and cropping.
+ */
+ static sk_sp<SkAnimatedImage> Make(std::unique_ptr<SkAndroidCodec>,
+ const SkImageInfo& info, SkIRect cropRect, sk_sp<SkPicture> postProcess);
+
+ /**
+ * Create an SkAnimatedImage from the SkAndroidCodec.
+ *
+ * Returns null on failure to allocate pixels. On success, this will
+ * decode the first frame.
+ *
+ * @param scaledSize Size to draw the image, possibly requiring scaling.
+ * @param cropRect Rectangle to crop to after scaling.
+ * @param postProcess Picture to apply after scaling and cropping.
+ */
+ static sk_sp<SkAnimatedImage> Make(std::unique_ptr<SkAndroidCodec>,
+ SkISize scaledSize, SkIRect cropRect, sk_sp<SkPicture> postProcess);
+
+ /**
+ * Simpler version that uses the default size, no cropping, and no postProcess.
+ */
+ static sk_sp<SkAnimatedImage> Make(std::unique_ptr<SkAndroidCodec>);
+
+ ~SkAnimatedImage() override;
+
+ /**
+ * Reset the animation to the beginning.
+ */
+ void reset();
+
+ /**
+ * Whether the animation completed.
+ *
+ * Returns true after all repetitions are complete, or an error stops the
+ * animation. Gets reset to false if the animation is restarted.
+ */
+ bool isFinished() const { return fFinished; }
+
+ /**
+ * Returned by decodeNextFrame and currentFrameDuration if the animation
+ * is not running.
+ */
+ static constexpr int kFinished = -1;
+
+ /**
+ * Decode the next frame.
+ *
+ * If the animation is on the last frame or has hit an error, returns
+ * kFinished.
+ */
+ int decodeNextFrame();
+
+ /**
+ * How long to display the current frame.
+ *
+ * Useful for the first frame, for which decodeNextFrame is called
+ * internally.
+ */
+ int currentFrameDuration() {
+ return fCurrentFrameDuration;
+ }
+
+ /**
+ * Change the repetition count.
+ *
+ * By default, the image will repeat the number of times indicated in the
+ * encoded data.
+ *
+ * Use SkCodec::kRepetitionCountInfinite for infinite, and 0 to show all
+ * frames once and then stop.
+ */
+ void setRepetitionCount(int count);
+
+ /**
+ * Return the currently set repetition count.
+ */
+ int getRepetitionCount() const {
+ return fRepetitionCount;
+ }
+
+protected:
+ SkRect onGetBounds() override;
+ void onDraw(SkCanvas*) override;
+
+private:
+ struct Frame {
+ SkBitmap fBitmap;
+ int fIndex;
+ SkCodecAnimation::DisposalMethod fDisposalMethod;
+
+ // init() may have to create a new SkPixelRef, if the
+ // current one is already in use by another owner (e.g.
+ // an SkPicture). This determines whether to copy the
+ // existing one to the new one.
+ enum class OnInit {
+ // Restore the image from the old SkPixelRef to the
+ // new one.
+ kRestoreIfNecessary,
+ // No need to restore.
+ kNoRestore,
+ };
+
+ Frame();
+ bool init(const SkImageInfo& info, OnInit);
+ bool copyTo(Frame*) const;
+ };
+
+ std::unique_ptr<SkAndroidCodec> fCodec;
+ const SkISize fScaledSize;
+ const SkImageInfo fDecodeInfo;
+ const SkIRect fCropRect;
+ const sk_sp<SkPicture> fPostProcess;
+ const int fFrameCount;
+ const bool fSimple; // no crop, scale, or postprocess
+ SkMatrix fMatrix; // used only if !fSimple
+
+ bool fFinished;
+ int fCurrentFrameDuration;
+ Frame fDisplayFrame;
+ Frame fDecodingFrame;
+ Frame fRestoreFrame;
+ int fRepetitionCount;
+ int fRepetitionsCompleted;
+
+ SkAnimatedImage(std::unique_ptr<SkAndroidCodec>, SkISize scaledSize,
+ SkImageInfo decodeInfo, SkIRect cropRect, sk_sp<SkPicture> postProcess);
+ SkAnimatedImage(std::unique_ptr<SkAndroidCodec>);
+
+ int computeNextFrame(int current, bool* animationEnded);
+ double finish();
+
+ typedef SkDrawable INHERITED;
+};
+
+#endif // SkAnimatedImage_DEFINED
diff --git a/gfx/skia/skia/include/android/SkBRDAllocator.h b/gfx/skia/skia/include/android/SkBRDAllocator.h
new file mode 100644
index 0000000000..ae842b74b1
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkBRDAllocator.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBRDAllocator_DEFINED
+#define SkBRDAllocator_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkBitmap.h"
+
+/**
+ * Abstract subclass of SkBitmap's allocator.
+ * Allows the allocator to indicate if the memory it allocates
+ * is zero initialized.
+ */
+class SkBRDAllocator : public SkBitmap::Allocator {
+public:
+
+ /**
+ * Indicates if the memory allocated by this allocator is
+ * zero initialized.
+ */
+ virtual SkCodec::ZeroInitialized zeroInit() const = 0;
+};
+
+#endif // SkBRDAllocator_DEFINED
diff --git a/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h b/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h
new file mode 100644
index 0000000000..18ba90b774
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapRegionDecoder_DEFINED
+#define SkBitmapRegionDecoder_DEFINED
+
+#include "include/android/SkBRDAllocator.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkStream.h"
+
+/*
+ * This class aims to provide an interface to test multiple implementations of
+ * SkBitmapRegionDecoder.
+ */
+class SK_API SkBitmapRegionDecoder {
+public:
+
+ enum Strategy {
+ kAndroidCodec_Strategy, // Uses SkAndroidCodec for scaling and subsetting
+ };
+
+ /*
+ * @param data Refs the data while this object exists, unrefs on destruction
+ * @param strategy Strategy used for scaling and subsetting
+ * @return Tries to create an SkBitmapRegionDecoder, returns NULL on failure
+ */
+ static SkBitmapRegionDecoder* Create(sk_sp<SkData>, Strategy strategy);
+
+ /*
+ * @param stream Takes ownership of the stream
+ * @param strategy Strategy used for scaling and subsetting
+ * @return Tries to create an SkBitmapRegionDecoder, returns NULL on failure
+ */
+ static SkBitmapRegionDecoder* Create(
+ SkStreamRewindable* stream, Strategy strategy);
+
+ /*
+ * Decode a scaled region of the encoded image stream
+ *
+ * @param bitmap Container for decoded pixels. It is assumed that the pixels
+ * are initially unallocated and will be allocated by this function.
+ * @param allocator Allocator for the pixels. If this is NULL, the default
+ * allocator (HeapAllocator) will be used.
+ * @param desiredSubset Subset of the original image to decode.
+ * @param sampleSize An integer downscaling factor for the decode.
+ * @param colorType Preferred output colorType.
+ * New implementations should return NULL if they do not support
+ * decoding to this color type.
+ * The old kOriginal_Strategy will decode to a default color type
+ * if this color type is unsupported.
+ * @param requireUnpremul If the image is not opaque, we will use this to determine the
+ * alpha type to use.
+ * @param prefColorSpace If non-null and supported, this is the color space that we will
+ * decode into. Otherwise, we will choose a default.
+ *
+ */
+ virtual bool decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize,
+ SkColorType colorType, bool requireUnpremul,
+ sk_sp<SkColorSpace> prefColorSpace = nullptr) = 0;
+
+ virtual SkEncodedImageFormat getEncodedFormat() = 0;
+
+ virtual SkColorType computeOutputColorType(SkColorType requestedColorType) = 0;
+
+ virtual sk_sp<SkColorSpace> computeOutputColorSpace(SkColorType outputColorType,
+ sk_sp<SkColorSpace> prefColorSpace = nullptr) = 0;
+
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ virtual ~SkBitmapRegionDecoder() {}
+
+protected:
+
+ SkBitmapRegionDecoder(int width, int height)
+ : fWidth(width)
+ , fHeight(height)
+ {}
+
+private:
+ const int fWidth;
+ const int fHeight;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/atlastext/SkAtlasTextContext.h b/gfx/skia/skia/include/atlastext/SkAtlasTextContext.h
new file mode 100644
index 0000000000..d13ec51e23
--- /dev/null
+++ b/gfx/skia/skia/include/atlastext/SkAtlasTextContext.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAtlasTextContext_DEFINED
+#define SkAtlasTextContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkAtlasTextRenderer;
+class SkInternalAtlasTextContext;
+
+SkAtlasTextRenderer* SkGetAtlasTextRendererFromInternalContext(class SkInternalAtlasTextContext&);
+
+/**
+ * Class that Atlas Text client uses to register their SkAtlasTextRenderer implementation and
+ * to create one or more SkAtlasTextTargets (destination surfaces for text rendering).
+ */
+class SK_API SkAtlasTextContext : public SkRefCnt {
+public:
+ static sk_sp<SkAtlasTextContext> Make(sk_sp<SkAtlasTextRenderer>);
+
+ SkAtlasTextRenderer* renderer() const {
+ return SkGetAtlasTextRendererFromInternalContext(*fInternalContext);
+ }
+
+ SkInternalAtlasTextContext& internal() { return *fInternalContext; }
+
+private:
+ SkAtlasTextContext() = delete;
+ SkAtlasTextContext(const SkAtlasTextContext&) = delete;
+ SkAtlasTextContext& operator=(const SkAtlasTextContext&) = delete;
+
+ SkAtlasTextContext(sk_sp<SkAtlasTextRenderer>);
+
+ std::unique_ptr<SkInternalAtlasTextContext> fInternalContext;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/atlastext/SkAtlasTextFont.h b/gfx/skia/skia/include/atlastext/SkAtlasTextFont.h
new file mode 100644
index 0000000000..8bab5bfc85
--- /dev/null
+++ b/gfx/skia/skia/include/atlastext/SkAtlasTextFont.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAtlasTextFont_DEFINED
+#define SkAtlasTextFont_DEFINED
+
+#include "include/core/SkFont.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+
+/** Represents a font at a size. TODO: What else do we need here (skewX, scaleX, vertical, ...)? */
+class SK_API SkAtlasTextFont : public SkRefCnt {
+public:
+ static sk_sp<SkAtlasTextFont> Make(sk_sp<SkTypeface> typeface, SkScalar size) {
+ return sk_sp<SkAtlasTextFont>(new SkAtlasTextFont(std::move(typeface), size));
+ }
+
+ SkTypeface* typeface() const { return fTypeface.get(); }
+
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+ SkScalar size() const { return fSize; }
+
+ SkFont makeFont() const { return SkFont(fTypeface, fSize); }
+
+private:
+ SkAtlasTextFont(sk_sp<SkTypeface> typeface, SkScalar size)
+ : fTypeface(std::move(typeface)), fSize(size) {}
+
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/atlastext/SkAtlasTextRenderer.h b/gfx/skia/skia/include/atlastext/SkAtlasTextRenderer.h
new file mode 100644
index 0000000000..e572bb6e52
--- /dev/null
+++ b/gfx/skia/skia/include/atlastext/SkAtlasTextRenderer.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRefCnt.h"
+
+#ifndef SkAtlasTextRenderer_DEFINED
+#define SkAtlasTextRenderer_DEFINED
+
+/**
+ * This is the base class for a renderer implemented by the SkAtlasText client. The
+ * SkAtlasTextContext issues texture creations, deletions, uploads, and vertex draws to the
+ * renderer. The renderer must perform those actions in the order called to correctly render
+ * the text drawn to SkAtlasTextTargets.
+ */
+class SK_API SkAtlasTextRenderer : public SkRefCnt {
+public:
+ enum class AtlasFormat {
+ /** Unsigned normalized 8 bit single channel format. */
+ kA8
+ };
+
+ struct SDFVertex {
+ /** Position in device space (not normalized). The third component is w (not z). */
+ SkPoint3 fPosition;
+ /** Color, same value for all four corners of a glyph quad. */
+ uint32_t fColor;
+ /** Texture coordinate (in texel units, not normalized). */
+ int16_t fTextureCoordX;
+ int16_t fTextureCoordY;
+ };
+
+ virtual ~SkAtlasTextRenderer() = default;
+
+ /**
+ * Create a texture of the provided format with dimensions 'width' x 'height'
+ * and return a unique handle.
+ */
+ virtual void* createTexture(AtlasFormat, int width, int height) = 0;
+
+ /**
+ * Delete the texture with the passed handle.
+ */
+ virtual void deleteTexture(void* textureHandle) = 0;
+
+ /**
+ * Place the pixel data specified by 'data' in the texture with handle
+ * 'textureHandle' in the rectangle ['x', 'x' + 'width') x ['y', 'y' + 'height').
+ * 'rowBytes' specifies the byte offset between successive rows in 'data' and will always be
+ * a multiple of the number of bytes per pixel.
+ * The pixel format of data is the same as that of 'textureHandle'.
+ */
+ virtual void setTextureData(void* textureHandle, const void* data, int x, int y, int width,
+ int height, size_t rowBytes) = 0;
+
+ /**
+ * Draws glyphs using SDFs. The SDF data resides in 'textureHandle'. The array
+ * 'vertices' provides interleaved device-space positions, colors, and
+ * texture coordinates. There are are 4 * 'quadCnt' entries in 'vertices'.
+ */
+ virtual void drawSDFGlyphs(void* targetHandle, void* textureHandle, const SDFVertex vertices[],
+ int quadCnt) = 0;
+
+ /** Called when a SkAtlasTextureTarget is destroyed. */
+ virtual void targetDeleted(void* targetHandle) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/atlastext/SkAtlasTextTarget.h b/gfx/skia/skia/include/atlastext/SkAtlasTextTarget.h
new file mode 100644
index 0000000000..b90dad6fec
--- /dev/null
+++ b/gfx/skia/skia/include/atlastext/SkAtlasTextTarget.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAtlasTextTarget_DEFINED
+#define SkAtlasTextTarget_DEFINED
+
+#include "include/core/SkDeque.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+#include <memory>
+
+class SkAtlasTextContext;
+class SkAtlasTextFont;
+class SkMatrix;
+struct SkPoint;
+
+/** Represents a client-created renderable surface and is used to draw text into the surface. */
+class SK_API SkAtlasTextTarget {
+public:
+ virtual ~SkAtlasTextTarget();
+
+ /**
+ * Creates a text drawing target. ‘handle’ is used to identify this rendering surface when
+ * draws are flushed to the SkAtlasTextContext's SkAtlasTextRenderer.
+ */
+ static std::unique_ptr<SkAtlasTextTarget> Make(sk_sp<SkAtlasTextContext>,
+ int width,
+ int height,
+ void* handle);
+
+ /**
+ * Enqueues a text draw in the target. The caller provides an array of glyphs and their
+ * positions. The meaning of 'color' here is interpreted by the client's SkAtlasTextRenderer
+ * when it actually renders the text.
+ */
+ virtual void drawText(const SkGlyphID[], const SkPoint[], int glyphCnt, uint32_t color,
+ const SkAtlasTextFont&) = 0;
+
+ /** Issues all queued text draws to SkAtlasTextRenderer. */
+ virtual void flush() = 0;
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ void* handle() const { return fHandle; }
+
+ SkAtlasTextContext* context() const { return fContext.get(); }
+
+ /** Saves the current matrix in a stack. Returns the prior depth of the saved matrix stack. */
+ int save();
+ /** Pops the top matrix on the stack if the stack is not empty. */
+ void restore();
+ /**
+ * Pops the matrix stack until the stack depth is count. Does nothing if the depth is already
+ * less than count.
+ */
+ void restoreToCount(int count);
+
+ /** Pre-translates the current CTM. */
+ void translate(SkScalar dx, SkScalar dy);
+ /** Pre-scales the current CTM. */
+ void scale(SkScalar sx, SkScalar sy);
+ /** Pre-rotates the current CTM about the origin. */
+ void rotate(SkScalar degrees);
+ /** Pre-rotates the current CTM about the (px, py). */
+ void rotate(SkScalar degrees, SkScalar px, SkScalar py);
+ /** Pre-skews the current CTM. */
+ void skew(SkScalar sx, SkScalar sy);
+ /** Pre-concats the current CTM. */
+ void concat(const SkMatrix& matrix);
+
+protected:
+ SkAtlasTextTarget(sk_sp<SkAtlasTextContext>, int width, int height, void* handle);
+
+ const SkMatrix& ctm() const { return *static_cast<const SkMatrix*>(fMatrixStack.back()); }
+
+ void* const fHandle;
+ const sk_sp<SkAtlasTextContext> fContext;
+ const int fWidth;
+ const int fHeight;
+
+private:
+ SkDeque fMatrixStack;
+ int fSaveCnt;
+
+ SkMatrix* accessCTM() const {
+ return static_cast<SkMatrix*>(const_cast<void*>(fMatrixStack.back()));
+ }
+
+ SkAtlasTextTarget() = delete;
+ SkAtlasTextTarget(const SkAtlasTextContext&) = delete;
+ SkAtlasTextTarget& operator=(const SkAtlasTextContext&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_canvas.h b/gfx/skia/skia/include/c/sk_canvas.h
new file mode 100644
index 0000000000..c739ed4cff
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_canvas.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_canvas_DEFINED
+#define sk_canvas_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Save the current matrix and clip on the canvas. When the
+ balancing call to sk_canvas_restore() is made, the previous matrix
+ and clip are restored.
+*/
+SK_API void sk_canvas_save(sk_canvas_t*);
+/**
+ This behaves the same as sk_canvas_save(), but in addition it
+ allocates an offscreen surface. All drawing calls are directed
+ there, and only when the balancing call to sk_canvas_restore() is
+ made is that offscreen transfered to the canvas (or the previous
+ layer).
+
+ @param sk_rect_t* (may be null) This rect, if non-null, is used as
+ a hint to limit the size of the offscreen, and
+ thus drawing may be clipped to it, though that
+ clipping is not guaranteed to happen. If exact
+ clipping is desired, use sk_canvas_clip_rect().
+ @param sk_paint_t* (may be null) The paint is copied, and is applied
+ to the offscreen when sk_canvas_restore() is
+ called.
+*/
+SK_API void sk_canvas_save_layer(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ This call balances a previous call to sk_canvas_save() or
+ sk_canvas_save_layer(), and is used to remove all modifications to
+ the matrix and clip state since the last save call. It is an
+ error to call sk_canvas_restore() more times than save and
+ save_layer were called.
+*/
+SK_API void sk_canvas_restore(sk_canvas_t*);
+
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified translation.
+*/
+SK_API void sk_canvas_translate(sk_canvas_t*, float dx, float dy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified scale.
+*/
+SK_API void sk_canvas_scale(sk_canvas_t*, float sx, float sy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified rotation in degrees.
+*/
+SK_API void sk_canvas_rotate_degrees(sk_canvas_t*, float degrees);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified rotation in radians.
+*/
+SK_API void sk_canvas_rotate_radians(sk_canvas_t*, float radians);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified skew.
+*/
+SK_API void sk_canvas_skew(sk_canvas_t*, float sx, float sy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified matrix.
+*/
+SK_API void sk_canvas_concat(sk_canvas_t*, const sk_matrix_t*);
+
+/**
+ Modify the current clip with the specified rectangle. The new
+ current clip will be the intersection of the old clip and the
+ rectange.
+*/
+SK_API void sk_canvas_clip_rect(sk_canvas_t*, const sk_rect_t*);
+/**
+ Modify the current clip with the specified path. The new
+ current clip will be the intersection of the old clip and the
+ path.
+*/
+SK_API void sk_canvas_clip_path(sk_canvas_t*, const sk_path_t*);
+
+/**
+ Fill the entire canvas (restricted to the current clip) with the
+ specified paint.
+*/
+SK_API void sk_canvas_draw_paint(sk_canvas_t*, const sk_paint_t*);
+/**
+ Draw the specified rectangle using the specified paint. The
+ rectangle will be filled or stroked based on the style in the
+ paint.
+*/
+SK_API void sk_canvas_draw_rect(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ * Draw the circle centered at (cx, cy) with radius rad using the specified paint.
+ * The circle will be filled or framed based on the style in the paint
+ */
+SK_API void sk_canvas_draw_circle(sk_canvas_t*, float cx, float cy, float rad, const sk_paint_t*);
+/**
+ Draw the specified oval using the specified paint. The oval will be
+ filled or framed based on the style in the paint
+*/
+SK_API void sk_canvas_draw_oval(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ Draw the specified path using the specified paint. The path will be
+ filled or framed based on the style in the paint
+*/
+SK_API void sk_canvas_draw_path(sk_canvas_t*, const sk_path_t*, const sk_paint_t*);
+/**
+ Draw the specified image, with its top/left corner at (x,y), using
+ the specified paint, transformed by the current matrix.
+
+ @param sk_paint_t* (may be NULL) the paint used to draw the image.
+*/
+SK_API void sk_canvas_draw_image(sk_canvas_t*, const sk_image_t*,
+ float x, float y, const sk_paint_t*);
+/**
+ Draw the specified image, scaling and translating so that it fills
+ the specified dst rect. If the src rect is non-null, only that
+ subset of the image is transformed and drawn.
+
+ @param sk_paint_t* (may be NULL) The paint used to draw the image.
+*/
+SK_API void sk_canvas_draw_image_rect(sk_canvas_t*, const sk_image_t*,
+ const sk_rect_t* src,
+ const sk_rect_t* dst, const sk_paint_t*);
+
+/**
+ Draw the picture into this canvas (replay the pciture's drawing commands).
+
+ @param sk_matrix_t* If non-null, apply that matrix to the CTM when
+ drawing this picture. This is logically
+ equivalent to: save, concat, draw_picture,
+ restore.
+
+ @param sk_paint_t* If non-null, draw the picture into a temporary
+ buffer, and then apply the paint's alpha,
+ colorfilter, imagefilter, and xfermode to that
+ buffer as it is drawn to the canvas. This is
+ logically equivalent to save_layer(paint),
+ draw_picture, restore.
+*/
+SK_API void sk_canvas_draw_picture(sk_canvas_t*, const sk_picture_t*,
+ const sk_matrix_t*, const sk_paint_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_colorspace.h b/gfx/skia/skia/include/c/sk_colorspace.h
new file mode 100644
index 0000000000..31839840d0
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_colorspace.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_colorspace_DEFINED
+#define sk_colorspace_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+SK_API sk_colorspace_t* sk_colorspace_new_srgb();
+
+SK_API void sk_colorspace_ref(sk_colorspace_t*);
+SK_API void sk_colorspace_unref(sk_colorspace_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_data.h b/gfx/skia/skia/include/c/sk_data.h
new file mode 100644
index 0000000000..27c51c4136
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_data.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_data_DEFINED
+#define sk_data_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Returns a new empty sk_data_t. This call must be balanced with a call to
+ sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_empty(void);
+/**
+ Returns a new sk_data_t by copying the specified source data.
+ This call must be balanced with a call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_with_copy(const void* src, size_t length);
+/**
+ Pass ownership of the given memory to a new sk_data_t, which will
+ call free() when the refernce count of the data goes to zero. For
+ example:
+ size_t length = 1024;
+ void* buffer = malloc(length);
+ memset(buffer, 'X', length);
+ sk_data_t* data = sk_data_new_from_malloc(buffer, length);
+ This call must be balanced with a call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_from_malloc(const void* memory, size_t length);
+/**
+ Returns a new sk_data_t using a subset of the data in the
+ specified source sk_data_t. This call must be balanced with a
+ call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_subset(const sk_data_t* src, size_t offset, size_t length);
+
+/**
+ Increment the reference count on the given sk_data_t. Must be
+ balanced by a call to sk_data_unref().
+*/
+SK_API void sk_data_ref(const sk_data_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the sk_data_t
+ and the memory it is managing. New sk_data_t are created with a
+ reference count of 1.
+*/
+SK_API void sk_data_unref(const sk_data_t*);
+
+/**
+ Returns the number of bytes stored.
+*/
+SK_API size_t sk_data_get_size(const sk_data_t*);
+/**
+ Returns the pointer to the data.
+ */
+SK_API const void* sk_data_get_data(const sk_data_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_image.h b/gfx/skia/skia/include/c/sk_image.h
new file mode 100644
index 0000000000..5106d40a62
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_image.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_image_DEFINED
+#define sk_image_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ * Return a new image that has made a copy of the provided pixels, or NULL on failure.
+ * Balance with a call to sk_image_unref().
+ */
+SK_API sk_image_t* sk_image_new_raster_copy(const sk_imageinfo_t*, const void* pixels, size_t rowBytes);
+
+/**
+ * If the specified data can be interpreted as a compressed image (e.g. PNG or JPEG) then this
+ * returns an image. If the encoded data is not supported, returns NULL.
+ *
+ * On success, the encoded data may be processed immediately, or it may be ref()'d for later
+ * use.
+ */
+SK_API sk_image_t* sk_image_new_from_encoded(const sk_data_t* encoded, const sk_irect_t* subset);
+
+/**
+ * Encode the image's pixels and return the result as a new PNG in a
+ * sk_data_t, which the caller must manage: call sk_data_unref() when
+ * they are done.
+ *
+ * If the image type cannot be encoded, this will return NULL.
+ */
+SK_API sk_data_t* sk_image_encode(const sk_image_t*);
+
+/**
+ * Increment the reference count on the given sk_image_t. Must be
+ * balanced by a call to sk_image_unref().
+*/
+SK_API void sk_image_ref(const sk_image_t*);
+/**
+ * Decrement the reference count. If the reference count is 1 before
+ * the decrement, then release both the memory holding the sk_image_t
+ * and the memory it is managing. New sk_image_t are created with a
+ reference count of 1.
+*/
+SK_API void sk_image_unref(const sk_image_t*);
+
+/**
+ * Return the width of the sk_image_t/
+ */
+SK_API int sk_image_get_width(const sk_image_t*);
+/**
+ * Return the height of the sk_image_t/
+ */
+SK_API int sk_image_get_height(const sk_image_t*);
+
+/**
+ * Returns a non-zero value unique among all images.
+ */
+SK_API uint32_t sk_image_get_unique_id(const sk_image_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_imageinfo.h b/gfx/skia/skia/include/c/sk_imageinfo.h
new file mode 100644
index 0000000000..6c8e9fff24
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_imageinfo.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_imageinfo_DEFINED
+#define sk_imageinfo_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+typedef enum {
+ UNKNOWN_SK_COLORTYPE,
+ RGBA_8888_SK_COLORTYPE,
+ BGRA_8888_SK_COLORTYPE,
+ ALPHA_8_SK_COLORTYPE,
+ GRAY_8_SK_COLORTYPE,
+ RGBA_F16_SK_COLORTYPE,
+ RGBA_F32_SK_COLORTYPE,
+} sk_colortype_t;
+
+typedef enum {
+ OPAQUE_SK_ALPHATYPE,
+ PREMUL_SK_ALPHATYPE,
+ UNPREMUL_SK_ALPHATYPE,
+} sk_alphatype_t;
+
+/**
+ * Allocate a new imageinfo object. If colorspace is not null, it's owner-count will be
+ * incremented automatically.
+ */
+SK_API sk_imageinfo_t* sk_imageinfo_new(int width, int height, sk_colortype_t ct, sk_alphatype_t at,
+ sk_colorspace_t* cs);
+
+/**
+ * Free the imageinfo object. If it contains a reference to a colorspace, its owner-count will
+ * be decremented automatically.
+ */
+SK_API void sk_imageinfo_delete(sk_imageinfo_t*);
+
+SK_API int32_t sk_imageinfo_get_width(const sk_imageinfo_t*);
+SK_API int32_t sk_imageinfo_get_height(const sk_imageinfo_t*);
+SK_API sk_colortype_t sk_imageinfo_get_colortype(const sk_imageinfo_t*);
+SK_API sk_alphatype_t sk_imageinfo_get_alphatype(const sk_imageinfo_t*);
+
+/**
+ * Return the colorspace object reference contained in the imageinfo, or null if there is none.
+ * Note: this does not modify the owner-count on the colorspace object. If the caller needs to
+ * use the colorspace beyond the lifetime of the imageinfo, it should manually call
+ * sk_colorspace_ref() (and then call unref() when it is done).
+ */
+SK_API sk_colorspace_t* sk_imageinfo_get_colorspace(const sk_imageinfo_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_maskfilter.h b/gfx/skia/skia/include/c/sk_maskfilter.h
new file mode 100644
index 0000000000..c8aa7ed445
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_maskfilter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_maskfilter_DEFINED
+#define sk_maskfilter_DEFINED
+
+#include "include/c/sk_types.h"
+
+typedef enum {
+ NORMAL_SK_BLUR_STYLE, //!< fuzzy inside and outside
+ SOLID_SK_BLUR_STYLE, //!< solid inside, fuzzy outside
+ OUTER_SK_BLUR_STYLE, //!< nothing inside, fuzzy outside
+ INNER_SK_BLUR_STYLE, //!< fuzzy inside, nothing outside
+} sk_blurstyle_t;
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Increment the reference count on the given sk_maskfilter_t. Must be
+ balanced by a call to sk_maskfilter_unref().
+*/
+SK_API void sk_maskfilter_ref(sk_maskfilter_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_maskfilter_t and any other associated resources. New
+ sk_maskfilter_t are created with a reference count of 1.
+*/
+SK_API void sk_maskfilter_unref(sk_maskfilter_t*);
+
+/**
+ Create a blur maskfilter.
+ @param sk_blurstyle_t The SkBlurStyle to use
+ @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0.
+*/
+SK_API sk_maskfilter_t* sk_maskfilter_new_blur(sk_blurstyle_t, float sigma);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_matrix.h b/gfx/skia/skia/include/c/sk_matrix.h
new file mode 100644
index 0000000000..244863c4f2
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_matrix.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_matrix_DEFINED
+#define sk_matrix_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/** Set the matrix to identity */
+void sk_matrix_set_identity(sk_matrix_t*);
+
+/** Set the matrix to translate by (tx, ty). */
+void sk_matrix_set_translate(sk_matrix_t*, float tx, float ty);
+/**
+ Preconcats the matrix with the specified translation.
+ M' = M * T(dx, dy)
+*/
+void sk_matrix_pre_translate(sk_matrix_t*, float tx, float ty);
+/**
+ Postconcats the matrix with the specified translation.
+ M' = T(dx, dy) * M
+*/
+void sk_matrix_post_translate(sk_matrix_t*, float tx, float ty);
+
+/** Set the matrix to scale by sx and sy. */
+void sk_matrix_set_scale(sk_matrix_t*, float sx, float sy);
+/**
+ Preconcats the matrix with the specified scale.
+ M' = M * S(sx, sy)
+*/
+void sk_matrix_pre_scale(sk_matrix_t*, float sx, float sy);
+/**
+ Postconcats the matrix with the specified scale.
+ M' = S(sx, sy) * M
+*/
+void sk_matrix_post_scale(sk_matrix_t*, float sx, float sy);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_paint.h b/gfx/skia/skia/include/c/sk_paint.h
new file mode 100644
index 0000000000..98ba4954c1
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_paint.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_paint_DEFINED
+#define sk_paint_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Create a new paint with default settings:
+ antialias : false
+ stroke : false
+ stroke width : 0.0f (hairline)
+ stroke miter : 4.0f
+ stroke cap : BUTT_SK_STROKE_CAP
+ stroke join : MITER_SK_STROKE_JOIN
+ color : opaque black
+ shader : NULL
+ maskfilter : NULL
+ xfermode_mode : SRCOVER_SK_XFERMODE_MODE
+*/
+SK_API sk_paint_t* sk_paint_new(void);
+/**
+ Release the memory storing the sk_paint_t and unref() all
+ associated objects.
+*/
+SK_API void sk_paint_delete(sk_paint_t*);
+
+/**
+ Return true iff the paint has antialiasing enabled.
+*/
+SK_API bool sk_paint_is_antialias(const sk_paint_t*);
+/**
+ Set to true to enable antialiasing, false to disable it on this
+ sk_paint_t.
+*/
+SK_API void sk_paint_set_antialias(sk_paint_t*, bool);
+
+/**
+ Return the paint's curent drawing color.
+*/
+SK_API sk_color_t sk_paint_get_color(const sk_paint_t*);
+/**
+ Set the paint's curent drawing color.
+*/
+SK_API void sk_paint_set_color(sk_paint_t*, sk_color_t);
+
+/* stroke settings */
+
+/**
+ Return true iff stroking is enabled rather than filling on this
+ sk_paint_t.
+*/
+SK_API bool sk_paint_is_stroke(const sk_paint_t*);
+/**
+ Set to true to enable stroking rather than filling with this
+ sk_paint_t.
+*/
+SK_API void sk_paint_set_stroke(sk_paint_t*, bool);
+
+/**
+ Return the width for stroking. A value of 0 strokes in hairline mode.
+ */
+SK_API float sk_paint_get_stroke_width(const sk_paint_t*);
+/**
+ Set the width for stroking. A value of 0 strokes in hairline mode
+ (always draw 1-pixel wide, regardless of the matrix).
+ */
+SK_API void sk_paint_set_stroke_width(sk_paint_t*, float width);
+
+/**
+ Return the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp.
+*/
+SK_API float sk_paint_get_stroke_miter(const sk_paint_t*);
+/**
+ Set the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp. This value
+ must be >= 0.
+*/
+SK_API void sk_paint_set_stroke_miter(sk_paint_t*, float miter);
+
+typedef enum {
+ BUTT_SK_STROKE_CAP,
+ ROUND_SK_STROKE_CAP,
+ SQUARE_SK_STROKE_CAP
+} sk_stroke_cap_t;
+
+/**
+ Return the paint's stroke cap type, controlling how the start and
+ end of stroked lines and paths are treated.
+*/
+SK_API sk_stroke_cap_t sk_paint_get_stroke_cap(const sk_paint_t*);
+/**
+ Set the paint's stroke cap type, controlling how the start and
+ end of stroked lines and paths are treated.
+*/
+SK_API void sk_paint_set_stroke_cap(sk_paint_t*, sk_stroke_cap_t);
+
+typedef enum {
+ MITER_SK_STROKE_JOIN,
+ ROUND_SK_STROKE_JOIN,
+ BEVEL_SK_STROKE_JOIN
+} sk_stroke_join_t;
+
+/**
+ Return the paint's stroke join type, specifies the treatment that
+ is applied to corners in paths and rectangles
+ */
+SK_API sk_stroke_join_t sk_paint_get_stroke_join(const sk_paint_t*);
+/**
+ Set the paint's stroke join type, specifies the treatment that
+ is applied to corners in paths and rectangles
+ */
+SK_API void sk_paint_set_stroke_join(sk_paint_t*, sk_stroke_join_t);
+
+/**
+ * Set the paint's shader to the specified parameter. This will automatically call unref() on
+ * any previous value, and call ref() on the new value.
+ */
+SK_API void sk_paint_set_shader(sk_paint_t*, sk_shader_t*);
+
+/**
+ * Set the paint's maskfilter to the specified parameter. This will automatically call unref() on
+ * any previous value, and call ref() on the new value.
+ */
+SK_API void sk_paint_set_maskfilter(sk_paint_t*, sk_maskfilter_t*);
+
+/**
+ * Set the paint's xfermode to the specified parameter.
+ */
+SK_API void sk_paint_set_xfermode_mode(sk_paint_t*, sk_xfermode_mode_t);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_path.h b/gfx/skia/skia/include/c/sk_path.h
new file mode 100644
index 0000000000..3898b629f3
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_path.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_path_DEFINED
+#define sk_path_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+typedef enum {
+ CW_SK_PATH_DIRECTION,
+ CCW_SK_PATH_DIRECTION,
+} sk_path_direction_t;
+
+/** Create a new, empty path. */
+SK_API sk_path_t* sk_path_new(void);
+/** Release the memory used by a sk_path_t. */
+SK_API void sk_path_delete(sk_path_t*);
+
+/** Set the beginning of the next contour to the point (x,y). */
+SK_API void sk_path_move_to(sk_path_t*, float x, float y);
+/**
+ Add a line from the last point to the specified point (x,y). If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_line_to(sk_path_t*, float x, float y);
+/**
+ Add a quadratic bezier from the last point, approaching control
+ point (x0,y0), and ending at (x1,y1). If no sk_path_move_to() call
+ has been made for this contour, the first point is automatically
+ set to (0,0).
+*/
+SK_API void sk_path_quad_to(sk_path_t*, float x0, float y0, float x1, float y1);
+/**
+ Add a conic curve from the last point, approaching control point
+ (x0,y01), and ending at (x1,y1) with weight w. If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_conic_to(sk_path_t*, float x0, float y0, float x1, float y1, float w);
+/**
+ Add a cubic bezier from the last point, approaching control points
+ (x0,y0) and (x1,y1), and ending at (x2,y2). If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_cubic_to(sk_path_t*,
+ float x0, float y0,
+ float x1, float y1,
+ float x2, float y2);
+/**
+ Close the current contour. If the current point is not equal to the
+ first point of the contour, a line segment is automatically added.
+*/
+SK_API void sk_path_close(sk_path_t*);
+
+/**
+ Add a closed rectangle contour to the path.
+*/
+SK_API void sk_path_add_rect(sk_path_t*, const sk_rect_t*, sk_path_direction_t);
+/**
+ Add a closed oval contour to the path
+*/
+SK_API void sk_path_add_oval(sk_path_t*, const sk_rect_t*, sk_path_direction_t);
+
+/**
+ * If the path is empty, return false and set the rect parameter to [0, 0, 0, 0].
+ * else return true and set the rect parameter to the bounds of the control-points
+ * of the path.
+ */
+SK_API bool sk_path_get_bounds(const sk_path_t*, sk_rect_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_picture.h b/gfx/skia/skia/include/c/sk_picture.h
new file mode 100644
index 0000000000..e569104074
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_picture.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_picture_DEFINED
+#define sk_picture_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Create a new sk_picture_recorder_t. Its resources should be
+ released with a call to sk_picture_recorder_delete().
+*/
+SK_API sk_picture_recorder_t* sk_picture_recorder_new(void);
+/**
+ Release the memory and other resources used by this
+ sk_picture_recorder_t.
+*/
+SK_API void sk_picture_recorder_delete(sk_picture_recorder_t*);
+
+/**
+ Returns the canvas that records the drawing commands
+
+ @param sk_rect_t* the cull rect used when recording this
+ picture. Any drawing the falls outside of this
+ rect is undefined, and may be drawn or it may not.
+*/
+SK_API sk_canvas_t* sk_picture_recorder_begin_recording(sk_picture_recorder_t*, const sk_rect_t*);
+/**
+ Signal that the caller is done recording. This invalidates the
+ canvas returned by begin_recording. Ownership of the sk_picture_t
+ is passed to the caller, who must call sk_picture_unref() when
+ they are done using it. The returned picture is immutable.
+*/
+SK_API sk_picture_t* sk_picture_recorder_end_recording(sk_picture_recorder_t*);
+
+/**
+ Increment the reference count on the given sk_picture_t. Must be
+ balanced by a call to sk_picture_unref().
+*/
+SK_API void sk_picture_ref(sk_picture_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_picture_t and any resouces it may be managing. New
+ sk_picture_t are created with a reference count of 1.
+*/
+SK_API void sk_picture_unref(sk_picture_t*);
+
+/**
+ Returns a non-zero value unique among all pictures.
+ */
+SK_API uint32_t sk_picture_get_unique_id(sk_picture_t*);
+
+/**
+ Return the cull rect specified when this picture was recorded.
+*/
+SK_API sk_rect_t sk_picture_get_bounds(sk_picture_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_shader.h b/gfx/skia/skia/include/c/sk_shader.h
new file mode 100644
index 0000000000..023ccbaeac
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_shader.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_shader_DEFINED
+#define sk_shader_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+SK_API void sk_shader_ref(sk_shader_t*);
+SK_API void sk_shader_unref(sk_shader_t*);
+
+typedef enum {
+ CLAMP_SK_SHADER_TILEMODE,
+ REPEAT_SK_SHADER_TILEMODE,
+ MIRROR_SK_SHADER_TILEMODE,
+} sk_shader_tilemode_t;
+
+/**
+ Returns a shader that generates a linear gradient between the two
+ specified points.
+
+ @param points The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between
+ the two points
+ @param colorPos May be NULL. array[count] of SkScalars, or NULL, of
+ the relative position of each corresponding color
+ in the colors array. If this is NULL, the the
+ colors are distributed evenly between the start
+ and end point. If this is not null, the values
+ must begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param colorCount Must be >=2. The number of colors (and pos if not
+ NULL) entries.
+ @param mode The tiling mode
+*/
+SK_API sk_shader_t* sk_shader_new_linear_gradient(const sk_point_t points[2],
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+
+/**
+ Returns a shader that generates a radial gradient given the center
+ and radius.
+
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this
+ gradient
+ @param colors The array[count] of colors, to be distributed
+ between the center and edge of the circle
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the center and edge of
+ the circle. If this is not null, the values must
+ begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not
+ NULL) entries
+ @param tileMode The tiling mode
+ @param localMatrix May be NULL
+*/
+SK_API sk_shader_t* sk_shader_new_radial_gradient(const sk_point_t* center,
+ float radius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+/**
+ Returns a shader that generates a sweep gradient given a center.
+
+ @param center The coordinates of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around
+ the center.
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the center and edge of
+ the circle. If this is not null, the values must
+ begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param colorCount Must be >= 2. The number of colors (and pos if
+ not NULL) entries
+ @param localMatrix May be NULL
+*/
+SK_API sk_shader_t* sk_shader_new_sweep_gradient(const sk_point_t* center,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ const sk_matrix_t* localMatrix);
+
+/**
+ Returns a shader that generates a conical gradient given two circles, or
+ returns NULL if the inputs are invalid. The gradient interprets the
+ two circles according to the following HTML spec.
+ http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+
+ Returns a shader that generates a sweep gradient given a center.
+
+ @param start, startRadius Defines the first circle.
+ @param end, endRadius Defines the first circle.
+ @param colors The array[count] of colors, to be distributed between
+ the two circles.
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the two circles. If
+ this is not null, the values must begin with 0,
+ end with 1.0, and intermediate values must be
+ strictly increasing.
+ @param colorCount Must be >= 2. The number of colors (and pos if
+ not NULL) entries
+ @param tileMode The tiling mode
+ @param localMatrix May be NULL
+
+*/
+SK_API sk_shader_t* sk_shader_new_two_point_conical_gradient(
+ const sk_point_t* start,
+ float startRadius,
+ const sk_point_t* end,
+ float endRadius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_surface.h b/gfx/skia/skia/include/c/sk_surface.h
new file mode 100644
index 0000000000..88c8c87b32
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_surface.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_surface_DEFINED
+#define sk_surface_DEFINED
+
+#include "include/c/sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Return a new surface, with the memory for the pixels automatically
+ allocated. If the requested surface cannot be created, or the
+ request is not a supported configuration, NULL will be returned.
+
+ @param sk_imageinfo_t* Specify the width, height, color type, and
+ alpha type for the surface.
+
+ @param sk_surfaceprops_t* If not NULL, specify additional non-default
+ properties of the surface.
+*/
+SK_API sk_surface_t* sk_surface_new_raster(const sk_imageinfo_t*, const sk_surfaceprops_t*);
+
+/**
+ Create a new surface which will draw into the specified pixels
+ with the specified rowbytes. If the requested surface cannot be
+ created, or the request is not a supported configuration, NULL
+ will be returned.
+
+ @param sk_imageinfo_t* Specify the width, height, color type, and
+ alpha type for the surface.
+ @param void* pixels Specify the location in memory where the
+ destination pixels are. This memory must
+ outlast this surface.
+ @param size_t rowBytes Specify the difference, in bytes, between
+ each adjacent row. Should be at least
+ (width * sizeof(one pixel)).
+ @param sk_surfaceprops_t* If not NULL, specify additional non-default
+ properties of the surface.
+*/
+SK_API sk_surface_t* sk_surface_new_raster_direct(const sk_imageinfo_t*,
+ void* pixels, size_t rowBytes,
+ const sk_surfaceprops_t* props);
+
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_surface_t and any pixel memory it may be managing. New
+ sk_surface_t are created with a reference count of 1.
+*/
+SK_API void sk_surface_unref(sk_surface_t*);
+
+/**
+ * Return the canvas associated with this surface. Note: the canvas is owned by the surface,
+ * so the returned object is only valid while the owning surface is valid.
+ */
+SK_API sk_canvas_t* sk_surface_get_canvas(sk_surface_t*);
+
+/**
+ * Call sk_image_unref() when the returned image is no longer used.
+ */
+SK_API sk_image_t* sk_surface_new_image_snapshot(sk_surface_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_types.h b/gfx/skia/skia/include/c/sk_types.h
new file mode 100644
index 0000000000..852526f2b6
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_types.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_types_DEFINED
+#define sk_types_DEFINED
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+ #define SK_C_PLUS_PLUS_BEGIN_GUARD extern "C" {
+ #define SK_C_PLUS_PLUS_END_GUARD }
+#else
+ #include <stdbool.h>
+ #define SK_C_PLUS_PLUS_BEGIN_GUARD
+ #define SK_C_PLUS_PLUS_END_GUARD
+#endif
+
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+typedef uint32_t sk_color_t;
+
+/* This macro assumes all arguments are >=0 and <=255. */
+#define sk_color_set_argb(a, r, g, b) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
+#define sk_color_get_a(c) (((c) >> 24) & 0xFF)
+#define sk_color_get_r(c) (((c) >> 16) & 0xFF)
+#define sk_color_get_g(c) (((c) >> 8) & 0xFF)
+#define sk_color_get_b(c) (((c) >> 0) & 0xFF)
+
+typedef enum {
+ INTERSECT_SK_CLIPTYPE,
+ DIFFERENCE_SK_CLIPTYPE,
+} sk_cliptype_t;
+
+typedef enum {
+ UNKNOWN_SK_PIXELGEOMETRY,
+ RGB_H_SK_PIXELGEOMETRY,
+ BGR_H_SK_PIXELGEOMETRY,
+ RGB_V_SK_PIXELGEOMETRY,
+ BGR_V_SK_PIXELGEOMETRY,
+} sk_pixelgeometry_t;
+
+typedef struct {
+ sk_pixelgeometry_t pixelGeometry;
+} sk_surfaceprops_t;
+
+typedef struct {
+ float x;
+ float y;
+} sk_point_t;
+
+typedef struct {
+ int32_t left;
+ int32_t top;
+ int32_t right;
+ int32_t bottom;
+} sk_irect_t;
+
+typedef struct {
+ float left;
+ float top;
+ float right;
+ float bottom;
+} sk_rect_t;
+
+/**
+ The sk_matrix_t struct holds a 3x3 perspective matrix for
+ transforming coordinates:
+
+ (X,Y) = T[M]((x,y))
+ X = (M[0] * x + M[1] * y + M[2]) / (M[6] * x + M[7] * y + M[8]);
+ Y = (M[3] * x + M[4] * y + M[5]) / (M[6] * x + M[7] * y + M[8]);
+
+ Therefore, the identity matrix is
+
+ sk_matrix_t identity = {{1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1}};
+
+ A matrix that scales by sx and sy is:
+
+ sk_matrix_t scale = {{sx, 0, 0,
+ 0, sy, 0,
+ 0, 0, 1}};
+
+ A matrix that translates by tx and ty is:
+
+ sk_matrix_t translate = {{1, 0, tx,
+ 0, 1, ty,
+ 0, 0, 1}};
+
+ A matrix that rotates around the origin by A radians:
+
+ sk_matrix_t rotate = {{cos(A), -sin(A), 0,
+ sin(A), cos(A), 0,
+ 0, 0, 1}};
+
+ Two matrixes can be concatinated by:
+
+ void concat_matrices(sk_matrix_t* dst,
+ const sk_matrix_t* matrixU,
+ const sk_matrix_t* matrixV) {
+ const float* u = matrixU->mat;
+ const float* v = matrixV->mat;
+ sk_matrix_t result = {{
+ u[0] * v[0] + u[1] * v[3] + u[2] * v[6],
+ u[0] * v[1] + u[1] * v[4] + u[2] * v[7],
+ u[0] * v[2] + u[1] * v[5] + u[2] * v[8],
+ u[3] * v[0] + u[4] * v[3] + u[5] * v[6],
+ u[3] * v[1] + u[4] * v[4] + u[5] * v[7],
+ u[3] * v[2] + u[4] * v[5] + u[5] * v[8],
+ u[6] * v[0] + u[7] * v[3] + u[8] * v[6],
+ u[6] * v[1] + u[7] * v[4] + u[8] * v[7],
+ u[6] * v[2] + u[7] * v[5] + u[8] * v[8]
+ }};
+ *dst = result;
+ }
+*/
+typedef struct {
+ float mat[9];
+} sk_matrix_t;
+
+/**
+ A sk_canvas_t encapsulates all of the state about drawing into a
+ destination This includes a reference to the destination itself,
+ and a stack of matrix/clip values.
+*/
+typedef struct sk_canvas_t sk_canvas_t;
+/**
+ A sk_data_ holds an immutable data buffer.
+*/
+typedef struct sk_data_t sk_data_t;
+/**
+ A sk_image_t is an abstraction for drawing a rectagle of pixels.
+ The content of the image is always immutable, though the actual
+ storage may change, if for example that image can be re-created via
+ encoded data or other means.
+*/
+typedef struct sk_image_t sk_image_t;
+
+/**
+ * Describes the color components. See ICC Profiles.
+ */
+typedef struct sk_colorspace_t sk_colorspace_t;
+
+/**
+ * Describes an image buffer : width, height, pixel type, colorspace, etc.
+ */
+typedef struct sk_imageinfo_t sk_imageinfo_t;
+
+/**
+ A sk_maskfilter_t is an object that perform transformations on an
+ alpha-channel mask before drawing it; it may be installed into a
+ sk_paint_t. Each time a primitive is drawn, it is first
+ scan-converted into a alpha mask, which os handed to the
+ maskfilter, which may create a new mask is to render into the
+ destination.
+ */
+typedef struct sk_maskfilter_t sk_maskfilter_t;
+/**
+ A sk_paint_t holds the style and color information about how to
+ draw geometries, text and bitmaps.
+*/
+typedef struct sk_paint_t sk_paint_t;
+/**
+ A sk_path_t encapsulates compound (multiple contour) geometric
+ paths consisting of straight line segments, quadratic curves, and
+ cubic curves.
+*/
+typedef struct sk_path_t sk_path_t;
+/**
+ A sk_picture_t holds recorded canvas drawing commands to be played
+ back at a later time.
+*/
+typedef struct sk_picture_t sk_picture_t;
+/**
+ A sk_picture_recorder_t holds a sk_canvas_t that records commands
+ to create a sk_picture_t.
+*/
+typedef struct sk_picture_recorder_t sk_picture_recorder_t;
+/**
+ A sk_shader_t specifies the source color(s) for what is being drawn. If a
+ paint has no shader, then the paint's color is used. If the paint
+ has a shader, then the shader's color(s) are use instead, but they
+ are modulated by the paint's alpha.
+*/
+typedef struct sk_shader_t sk_shader_t;
+/**
+ A sk_surface_t holds the destination for drawing to a canvas. For
+ raster drawing, the destination is an array of pixels in memory.
+ For GPU drawing, the destination is a texture or a framebuffer.
+*/
+typedef struct sk_surface_t sk_surface_t;
+
+typedef enum {
+ CLEAR_SK_XFERMODE_MODE,
+ SRC_SK_XFERMODE_MODE,
+ DST_SK_XFERMODE_MODE,
+ SRCOVER_SK_XFERMODE_MODE,
+ DSTOVER_SK_XFERMODE_MODE,
+ SRCIN_SK_XFERMODE_MODE,
+ DSTIN_SK_XFERMODE_MODE,
+ SRCOUT_SK_XFERMODE_MODE,
+ DSTOUT_SK_XFERMODE_MODE,
+ SRCATOP_SK_XFERMODE_MODE,
+ DSTATOP_SK_XFERMODE_MODE,
+ XOR_SK_XFERMODE_MODE,
+ PLUS_SK_XFERMODE_MODE,
+ MODULATE_SK_XFERMODE_MODE,
+ SCREEN_SK_XFERMODE_MODE,
+ OVERLAY_SK_XFERMODE_MODE,
+ DARKEN_SK_XFERMODE_MODE,
+ LIGHTEN_SK_XFERMODE_MODE,
+ COLORDODGE_SK_XFERMODE_MODE,
+ COLORBURN_SK_XFERMODE_MODE,
+ HARDLIGHT_SK_XFERMODE_MODE,
+ SOFTLIGHT_SK_XFERMODE_MODE,
+ DIFFERENCE_SK_XFERMODE_MODE,
+ EXCLUSION_SK_XFERMODE_MODE,
+ MULTIPLY_SK_XFERMODE_MODE,
+ HUE_SK_XFERMODE_MODE,
+ SATURATION_SK_XFERMODE_MODE,
+ COLOR_SK_XFERMODE_MODE,
+ LUMINOSITY_SK_XFERMODE_MODE,
+} sk_xfermode_mode_t;
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/codec/SkAndroidCodec.h b/gfx/skia/skia/include/codec/SkAndroidCodec.h
new file mode 100644
index 0000000000..26b3f5e0a8
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkAndroidCodec.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAndroidCodec_DEFINED
+#define SkAndroidCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * Abstract interface defining image codec functionality that is necessary for
+ * Android.
+ */
+class SK_API SkAndroidCodec : SkNoncopyable {
+public:
+ enum class ExifOrientationBehavior {
+ /**
+ * Ignore any exif orientation markers in the data.
+ *
+ * getInfo's width and height will match the header of the image, and
+ * no processing will be done to match the marker.
+ */
+ kIgnore,
+
+ /**
+ * Respect the exif orientation marker.
+ *
+ * getInfo's width and height will represent what they should be after
+ * applying the orientation. For example, if the marker specifies a
+ * rotation by 90 degrees, they will be swapped relative to the header.
+ * getAndroidPixels will apply the orientation as well.
+ */
+ kRespect,
+ };
+
+ /**
+ * Pass ownership of an SkCodec to a newly-created SkAndroidCodec.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromCodec(std::unique_ptr<SkCodec>,
+ ExifOrientationBehavior = ExifOrientationBehavior::kIgnore);
+
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ *
+ * ExifOrientationBehavior is set to kIgnore.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromStream(std::unique_ptr<SkStream>,
+ SkPngChunkReader* = nullptr);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ *
+ * ExifOrientationBehavior is set to kIgnore.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr);
+
+ virtual ~SkAndroidCodec();
+
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedImageFormat getEncodedFormat() const { return fCodec->getEncodedFormat(); }
+
+ /**
+ * @param requestedColorType Color type requested by the client
+ *
+ * |requestedColorType| may be overriden. We will default to kF16
+ * for high precision images.
+ *
+ * In the general case, if it is possible to decode to
+ * |requestedColorType|, this returns |requestedColorType|.
+ * Otherwise, this returns a color type that is an appropriate
+ * match for the the encoded data.
+ */
+ SkColorType computeOutputColorType(SkColorType requestedColorType);
+
+ /**
+ * @param requestedUnpremul Indicates if the client requested
+ * unpremultiplied output
+ *
+ * Returns the appropriate alpha type to decode to. If the image
+ * has alpha, the value of requestedUnpremul will be honored.
+ */
+ SkAlphaType computeOutputAlphaType(bool requestedUnpremul);
+
+ /**
+ * @param outputColorType Color type that the client will decode to.
+ * @param prefColorSpace Preferred color space to decode to.
+ * This may not return |prefColorSpace| for a couple reasons.
+ * (1) Android Principles: 565 must be sRGB, F16 must be
+ * linear sRGB, transfer function must be parametric.
+ * (2) Codec Limitations: F16 requires a linear color space.
+ *
+ * Returns the appropriate color space to decode to.
+ */
+ sk_sp<SkColorSpace> computeOutputColorSpace(SkColorType outputColorType,
+ sk_sp<SkColorSpace> prefColorSpace = nullptr);
+
+ /**
+ * Compute the appropriate sample size to get to |size|.
+ *
+ * @param size As an input parameter, the desired output size of
+ * the decode. As an output parameter, the smallest sampled size
+ * larger than the input.
+ * @return the sample size to set AndroidOptions::fSampleSize to decode
+ * to the output |size|.
+ */
+ int computeSampleSize(SkISize* size) const;
+
+ /**
+ * Returns the dimensions of the scaled output image, for an input
+ * sampleSize.
+ *
+ * When the sample size divides evenly into the original dimensions, the
+ * scaled output dimensions will simply be equal to the original
+ * dimensions divided by the sample size.
+ *
+ * When the sample size does not divide even into the original
+ * dimensions, the codec may round up or down, depending on what is most
+ * efficient to decode.
+ *
+ * Finally, the codec will always recommend a non-zero output, so the output
+ * dimension will always be one if the sampleSize is greater than the
+ * original dimension.
+ */
+ SkISize getSampledDimensions(int sampleSize) const;
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if the input subset is invalid.
+ *
+ * @param desiredSubset in/out parameter
+ * As input, a desired subset of the original bounds
+ * (as specified by getInfo).
+ * As output, if true is returned, desiredSubset may
+ * have been modified to a subset which is
+ * supported. Although a particular change may have
+ * been made to desiredSubset to create something
+ * supported, it is possible other changes could
+ * result in a valid subset. If false is returned,
+ * desiredSubset's value is undefined.
+ * @return true If the input desiredSubset is valid.
+ * desiredSubset may be modified to a subset
+ * supported by the codec.
+ * false If desiredSubset is invalid (NULL or not fully
+ * contained within the image).
+ */
+ bool getSupportedSubset(SkIRect* desiredSubset) const;
+ // TODO: Rename SkCodec::getValidSubset() to getSupportedSubset()
+
+ /**
+ * Returns the dimensions of the scaled, partial output image, for an
+ * input sampleSize and subset.
+ *
+ * @param sampleSize Factor to scale down by.
+ * @param subset Must be a valid subset of the original image
+ * dimensions and a subset supported by SkAndroidCodec.
+ * getSubset() can be used to obtain a subset supported
+ * by SkAndroidCodec.
+ * @return Size of the scaled partial image. Or zero size
+ * if either of the inputs is invalid.
+ */
+ SkISize getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const;
+
+ /**
+ * Additional options to pass to getAndroidPixels().
+ */
+ // FIXME: It's a bit redundant to name these AndroidOptions when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // these Options when SkCodec has a slightly different set of Options. Maybe these
+ // should be DecodeOptions or SamplingOptions?
+ struct AndroidOptions {
+ AndroidOptions()
+ : fZeroInitialized(SkCodec::kNo_ZeroInitialized)
+ , fSubset(nullptr)
+ , fSampleSize(1)
+ {}
+
+ /**
+ * Indicates is destination pixel memory is zero initialized.
+ *
+ * The default is SkCodec::kNo_ZeroInitialized.
+ */
+ SkCodec::ZeroInitialized fZeroInitialized;
+
+ /**
+ * If not NULL, represents a subset of the original image to decode.
+ *
+ * Must be within the bounds returned by getInfo().
+ *
+ * If the EncodedFormat is SkEncodedImageFormat::kWEBP, the top and left
+ * values must be even.
+ *
+ * The default is NULL, meaning a decode of the entire image.
+ */
+ SkIRect* fSubset;
+
+ /**
+ * The client may provide an integer downscale factor for the decode.
+ * The codec may implement this downscaling by sampling or another
+ * method if it is more efficient.
+ *
+ * The default is 1, representing no downscaling.
+ */
+ int fSampleSize;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale or subset. If the codec cannot perform this
+ * scaling or subsetting, it will return an error code.
+ *
+ * The AndroidOptions object is also used to specify any requested scaling or subsetting
+ * using options->fSampleSize and options->fSubset. If NULL, the defaults (as specified above
+ * for AndroidOptions) are used.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ // FIXME: It's a bit redundant to name this getAndroidPixels() when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // this getPixels() when it is a slightly different API than SkCodec's getPixels().
+ // Maybe this should be decode() or decodeSubset()?
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions* options);
+
+ /**
+ * Simplified version of getAndroidPixels() where we supply the default AndroidOptions as
+ * specified above for AndroidOptions. It will not perform any scaling or subsetting.
+ */
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ SkCodec::Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getAndroidPixels(info, pixels, rowBytes);
+ }
+
+ SkCodec* codec() const { return fCodec.get(); }
+
+protected:
+ SkAndroidCodec(SkCodec*, ExifOrientationBehavior = ExifOrientationBehavior::kIgnore);
+
+ virtual SkISize onGetSampledDimensions(int sampleSize) const = 0;
+
+ virtual bool onGetSupportedSubset(SkIRect* desiredSubset) const = 0;
+
+ virtual SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) = 0;
+
+private:
+ const SkImageInfo fInfo;
+ const ExifOrientationBehavior fOrientationBehavior;
+ std::unique_ptr<SkCodec> fCodec;
+};
+#endif // SkAndroidCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkCodec.h b/gfx/skia/skia/include/codec/SkCodec.h
new file mode 100644
index 0000000000..2f9f93c0e7
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkCodec.h
@@ -0,0 +1,954 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodec_DEFINED
+#define SkCodec_DEFINED
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkYUVASizeInfo.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTemplates.h"
+
+#include <vector>
+
+class SkColorSpace;
+class SkData;
+class SkFrameHolder;
+class SkPngChunkReader;
+class SkSampler;
+
+namespace DM {
+class CodecSrc;
+class ColorCodecSrc;
+}
+
+/**
+ * Abstraction layer directly on top of an image codec.
+ */
+class SK_API SkCodec : SkNoncopyable {
+public:
+ /**
+ * Minimum number of bytes that must be buffered in SkStream input.
+ *
+ * An SkStream passed to NewFromStream must be able to use this many
+ * bytes to determine the image type. Then the same SkStream must be
+ * passed to the correct decoder to read from the beginning.
+ *
+ * This can be accomplished by implementing peek() to support peeking
+ * this many bytes, or by implementing rewind() to be able to rewind()
+ * after reading this many bytes.
+ */
+ static constexpr size_t MinBufferedBytesNeeded() { return 32; }
+
+ /**
+ * Error codes for various SkCodec methods.
+ */
+ enum Result {
+ /**
+ * General return value for success.
+ */
+ kSuccess,
+ /**
+ * The input is incomplete. A partial image was generated.
+ */
+ kIncompleteInput,
+ /**
+ * Like kIncompleteInput, except the input had an error.
+ *
+ * If returned from an incremental decode, decoding cannot continue,
+ * even with more data.
+ */
+ kErrorInInput,
+ /**
+ * The generator cannot convert to match the request, ignoring
+ * dimensions.
+ */
+ kInvalidConversion,
+ /**
+ * The generator cannot scale to requested size.
+ */
+ kInvalidScale,
+ /**
+ * Parameters (besides info) are invalid. e.g. NULL pixels, rowBytes
+ * too small, etc.
+ */
+ kInvalidParameters,
+ /**
+ * The input did not contain a valid image.
+ */
+ kInvalidInput,
+ /**
+ * Fulfilling this request requires rewinding the input, which is not
+ * supported for this input.
+ */
+ kCouldNotRewind,
+ /**
+ * An internal error, such as OOM.
+ */
+ kInternalError,
+ /**
+ * This method is not implemented by this codec.
+ * FIXME: Perhaps this should be kUnsupported?
+ */
+ kUnimplemented,
+ };
+
+ /**
+ * Readable string representing the error code.
+ */
+ static const char* ResultToString(Result);
+
+ /**
+ * For container formats that contain both still images and image sequences,
+ * instruct the decoder how the output should be selected. (Refer to comments
+ * for each value for more details.)
+ */
+ enum class SelectionPolicy {
+ /**
+ * If the container format contains both still images and image sequences,
+ * SkCodec should choose one of the still images. This is the default.
+ */
+ kPreferStillImage,
+ /**
+ * If the container format contains both still images and image sequences,
+ * SkCodec should choose one of the image sequences for animation.
+ */
+ kPreferAnimation,
+ };
+
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * As stated above, this call must be able to peek or read
+ * MinBufferedBytesNeeded to determine the correct format, and then start
+ * reading from the beginning. First it will attempt to peek, and it
+ * assumes that if less than MinBufferedBytesNeeded bytes (but more than
+ * zero) are returned, this is because the stream is shorter than this,
+ * so falling back to reading would not provide more data. If peek()
+ * returns zero bytes, this call will instead attempt to read(). This
+ * will require that the stream can be rewind()ed.
+ *
+ * If Result is not NULL, it will be set to either kSuccess if an SkCodec
+ * is returned or a reason for the failure if NULL is returned.
+ *
+ * If SkPngChunkReader is not NULL, take a ref and pass it to libpng if
+ * the image is a png.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(
+ std::unique_ptr<SkStream>, Result* = nullptr,
+ SkPngChunkReader* = nullptr,
+ SelectionPolicy selectionPolicy = SelectionPolicy::kPreferStillImage);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ */
+ static std::unique_ptr<SkCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr);
+
+ virtual ~SkCodec();
+
+ /**
+ * Return a reasonable SkImageInfo to decode into.
+ */
+ SkImageInfo getInfo() const { return fEncodedInfo.makeImageInfo(); }
+
+ SkISize dimensions() const { return {fEncodedInfo.width(), fEncodedInfo.height()}; }
+ SkIRect bounds() const {
+ return SkIRect::MakeWH(fEncodedInfo.width(), fEncodedInfo.height());
+ }
+
+ /**
+ * Returns the image orientation stored in the EXIF data.
+ * If there is no EXIF data, or if we cannot read the EXIF data, returns kTopLeft.
+ */
+ SkEncodedOrigin getOrigin() const { return fOrigin; }
+
+ /**
+ * Return a size that approximately supports the desired scale factor.
+ * The codec may not be able to scale efficiently to the exact scale
+ * factor requested, so return a size that approximates that scale.
+ * The returned value is the codec's suggestion for the closest valid
+ * scale that it can natively support
+ */
+ SkISize getScaledDimensions(float desiredScale) const {
+ // Negative and zero scales are errors.
+ SkASSERT(desiredScale > 0.0f);
+ if (desiredScale <= 0.0f) {
+ return SkISize::Make(0, 0);
+ }
+
+ // Upscaling is not supported. Return the original size if the client
+ // requests an upscale.
+ if (desiredScale >= 1.0f) {
+ return this->dimensions();
+ }
+ return this->onGetScaledDimensions(desiredScale);
+ }
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if this codec cannot decode subsets or anything similar to
+ * desiredSubset.
+ *
+ * @param desiredSubset In/out parameter. As input, a desired subset of
+ * the original bounds (as specified by getInfo). If true is returned,
+ * desiredSubset may have been modified to a subset which is
+ * supported. Although a particular change may have been made to
+ * desiredSubset to create something supported, it is possible other
+ * changes could result in a valid subset.
+ * If false is returned, desiredSubset's value is undefined.
+ * @return true if this codec supports decoding desiredSubset (as
+ * returned, potentially modified)
+ */
+ bool getValidSubset(SkIRect* desiredSubset) const {
+ return this->onGetValidSubset(desiredSubset);
+ }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedImageFormat getEncodedFormat() const { return this->onGetEncodedFormat(); }
+
+ /**
+ * Whether or not the memory passed to getPixels is zero initialized.
+ */
+ enum ZeroInitialized {
+ /**
+ * The memory passed to getPixels is zero initialized. The SkCodec
+ * may take advantage of this by skipping writing zeroes.
+ */
+ kYes_ZeroInitialized,
+ /**
+ * The memory passed to getPixels has not been initialized to zero,
+ * so the SkCodec must write all zeroes to memory.
+ *
+ * This is the default. It will be used if no Options struct is used.
+ */
+ kNo_ZeroInitialized,
+ };
+
+ /**
+ * Additional options to pass to getPixels.
+ */
+ struct Options {
+ Options()
+ : fZeroInitialized(kNo_ZeroInitialized)
+ , fSubset(nullptr)
+ , fFrameIndex(0)
+ , fPriorFrame(kNoFrame)
+ {}
+
+ ZeroInitialized fZeroInitialized;
+ /**
+ * If not NULL, represents a subset of the original image to decode.
+ * Must be within the bounds returned by getInfo().
+ * If the EncodedFormat is SkEncodedImageFormat::kWEBP (the only one which
+ * currently supports subsets), the top and left values must be even.
+ *
+ * In getPixels and incremental decode, we will attempt to decode the
+ * exact rectangular subset specified by fSubset.
+ *
+ * In a scanline decode, it does not make sense to specify a subset
+ * top or subset height, since the client already controls which rows
+ * to get and which rows to skip. During scanline decodes, we will
+ * require that the subset top be zero and the subset height be equal
+ * to the full height. We will, however, use the values of
+ * subset left and subset width to decode partial scanlines on calls
+ * to getScanlines().
+ */
+ const SkIRect* fSubset;
+
+ /**
+ * The frame to decode.
+ *
+ * Only meaningful for multi-frame images.
+ */
+ int fFrameIndex;
+
+ /**
+ * If not kNoFrame, the dst already contains the prior frame at this index.
+ *
+ * Only meaningful for multi-frame images.
+ *
+ * If fFrameIndex needs to be blended with a prior frame (as reported by
+ * getFrameInfo[fFrameIndex].fRequiredFrame), the client can set this to
+ * any non-kRestorePrevious frame in [fRequiredFrame, fFrameIndex) to
+ * indicate that that frame is already in the dst. Options.fZeroInitialized
+ * is ignored in this case.
+ *
+ * If set to kNoFrame, the codec will decode any necessary required frame(s) first.
+ */
+ int fPriorFrame;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return kInvalidScale.
+ *
+ * If the info contains a non-null SkColorSpace, the codec
+ * will perform the appropriate color space transformation.
+ * If the caller passes in the same color space that was
+ * reported by the codec, the color space transformation is
+ * a no-op.
+ *
+ * If a scanline decode is in progress, scanline mode will end, requiring the client to call
+ * startScanlineDecode() in order to return to decoding scanlines.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options*);
+
+ /**
+ * Simplified version of getPixels() that uses the default Options.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getPixels(info, pixels, rowBytes, nullptr);
+ }
+
+ Result getPixels(const SkPixmap& pm, const Options* opts = nullptr) {
+ return this->getPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), opts);
+ }
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, V, and A planes. Given current codec
+ * limitations the size of the A plane will always be 0 and the Y, U, V
+ * channels will always be planar.
+ * @param colorSpace Output parameter. If non-NULL this is set to kJPEG,
+ * otherwise this is ignored.
+ */
+ bool queryYUV8(SkYUVASizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ if (nullptr == sizeInfo) {
+ return false;
+ }
+
+ bool result = this->onQueryYUV8(sizeInfo, colorSpace);
+ if (result) {
+ for (int i = 0; i <= 2; ++i) {
+ SkASSERT(sizeInfo->fSizes[i].fWidth > 0 && sizeInfo->fSizes[i].fHeight > 0 &&
+ sizeInfo->fWidthBytes[i] > 0);
+ }
+ SkASSERT(!sizeInfo->fSizes[3].fWidth &&
+ !sizeInfo->fSizes[3].fHeight &&
+ !sizeInfo->fWidthBytes[3]);
+ }
+ return result;
+ }
+
+ /**
+ * Returns kSuccess, or another value explaining the type of failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call queryYUV8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param planes Memory for each of the Y, U, and V planes.
+ */
+ Result getYUV8Planes(const SkYUVASizeInfo& sizeInfo, void* planes[SkYUVASizeInfo::kMaxCount]) {
+ if (!planes || !planes[0] || !planes[1] || !planes[2]) {
+ return kInvalidInput;
+ }
+ SkASSERT(!planes[3]); // TODO: is this a fair assumption?
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ return this->onGetYUV8Planes(sizeInfo, planes);
+ }
+
+ /**
+ * Prepare for an incremental decode with the specified options.
+ *
+ * This may require a rewind.
+ *
+ * If kIncompleteInput is returned, may be called again after more data has
+ * been provided to the source SkStream.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param dst Memory to write to. Needs to be large enough to hold the subset,
+ * if present, or the full image as described in dstInfo.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized and whether to decode a subset.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const Options*);
+
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes) {
+ return this->startIncrementalDecode(dstInfo, dst, rowBytes, nullptr);
+ }
+
+ /**
+ * Start/continue the incremental decode.
+ *
+ * Not valid to call before a call to startIncrementalDecode() returns
+ * kSuccess.
+ *
+ * If kIncompleteInput is returned, may be called again after more data has
+ * been provided to the source SkStream.
+ *
+ * Unlike getPixels and getScanlines, this does not do any filling. This is
+ * left up to the caller, since they may be skipping lines or continuing the
+ * decode later. In the latter case, they may choose to initialize all lines
+ * first, or only initialize the remaining lines after the first call.
+ *
+ * @param rowsDecoded Optional output variable returning the total number of
+ * lines initialized. Only meaningful if this method returns kIncompleteInput.
+ * Otherwise the implementation may not set it.
+ * Note that some implementations may have initialized this many rows, but
+ * not necessarily finished those rows (e.g. interlaced PNG). This may be
+ * useful for determining what rows the client needs to initialize.
+ * @return kSuccess if all lines requested in startIncrementalDecode have
+ * been completely decoded. kIncompleteInput otherwise.
+ */
+ Result incrementalDecode(int* rowsDecoded = nullptr) {
+ if (!fStartedIncrementalDecode) {
+ return kInvalidParameters;
+ }
+ return this->onIncrementalDecode(rowsDecoded);
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Prepare for a scanline decode with the specified options.
+ *
+ * After this call, this class will be ready to decode the first scanline.
+ *
+ * This must be called in order to call getScanlines or skipScanlines.
+ *
+ * This may require rewinding the stream.
+ *
+ * Not all SkCodecs support this.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo, const Options* options);
+
+ /**
+ * Simplified version of startScanlineDecode() that uses the default Options.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo) {
+ return this->startScanlineDecode(dstInfo, nullptr);
+ }
+
+ /**
+ * Write the next countLines scanlines into dst.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * @param dst Must be non-null, and large enough to hold countLines
+ * scanlines of size rowBytes.
+ * @param countLines Number of lines to write.
+ * @param rowBytes Number of bytes per row. Must be large enough to hold
+ * a scanline based on the SkImageInfo used to create this object.
+ * @return the number of lines successfully decoded. If this value is
+ * less than countLines, this will fill the remaining lines with a
+ * default value.
+ */
+ int getScanlines(void* dst, int countLines, size_t rowBytes);
+
+ /**
+ * Skip count scanlines.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * The default version just calls onGetScanlines and discards the dst.
+ * NOTE: If skipped lines are the only lines with alpha, this default
+ * will make reallyHasAlpha return true, when it could have returned
+ * false.
+ *
+ * @return true if the scanlines were successfully skipped
+ * false on failure, possible reasons for failure include:
+ * An incomplete input image stream.
+ * Calling this function before calling startScanlineDecode().
+ * If countLines is less than zero or so large that it moves
+ * the current scanline past the end of the image.
+ */
+ bool skipScanlines(int countLines);
+
+ /**
+ * The order in which rows are output from the scanline decoder is not the
+ * same for all variations of all image types. This explains the possible
+ * output row orderings.
+ */
+ enum SkScanlineOrder {
+ /*
+ * By far the most common, this indicates that the image can be decoded
+ * reliably using the scanline decoder, and that rows will be output in
+ * the logical order.
+ */
+ kTopDown_SkScanlineOrder,
+
+ /*
+ * This indicates that the scanline decoder reliably outputs rows, but
+ * they will be returned in reverse order. If the scanline format is
+ * kBottomUp, the nextScanline() API can be used to determine the actual
+ * y-coordinate of the next output row, but the client is not forced
+ * to take advantage of this, given that it's not too tough to keep
+ * track independently.
+ *
+ * For full image decodes, it is safe to get all of the scanlines at
+ * once, since the decoder will handle inverting the rows as it
+ * decodes.
+ *
+ * For subset decodes and sampling, it is simplest to get and skip
+ * scanlines one at a time, using the nextScanline() API. It is
+ * possible to ask for larger chunks at a time, but this should be used
+ * with caution. As with full image decodes, the decoder will handle
+ * inverting the requested rows, but rows will still be delivered
+ * starting from the bottom of the image.
+ *
+ * Upside down bmps are an example.
+ */
+ kBottomUp_SkScanlineOrder,
+ };
+
+ /**
+ * An enum representing the order in which scanlines will be returned by
+ * the scanline decoder.
+ *
+ * This is undefined before startScanlineDecode() is called.
+ */
+ SkScanlineOrder getScanlineOrder() const { return this->onGetScanlineOrder(); }
+
+ /**
+ * Returns the y-coordinate of the next row to be returned by the scanline
+ * decoder.
+ *
+ * This will equal fCurrScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps).
+ *
+ * Results are undefined when not in scanline decoding mode.
+ */
+ int nextScanline() const { return this->outputScanline(fCurrScanline); }
+
+ /**
+ * Returns the output y-coordinate of the row that corresponds to an input
+ * y-coordinate. The input y-coordinate represents where the scanline
+ * is located in the encoded data.
+ *
+ * This will equal inputScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps, interlaced gifs).
+ */
+ int outputScanline(int inputScanline) const;
+
+ /**
+ * Return the number of frames in the image.
+ *
+ * May require reading through the stream.
+ */
+ int getFrameCount() {
+ return this->onGetFrameCount();
+ }
+
+ // Sentinel value used when a frame index implies "no frame":
+ // - FrameInfo::fRequiredFrame set to this value means the frame
+ // is independent.
+ // - Options::fPriorFrame set to this value means no (relevant) prior frame
+ // is residing in dst's memory.
+ static constexpr int kNoFrame = -1;
+
+ // This transitional definition was added in August 2018, and will eventually be removed.
+#ifdef SK_LEGACY_SKCODEC_NONE_ENUM
+ static constexpr int kNone = kNoFrame;
+#endif
+
+ /**
+ * Information about individual frames in a multi-framed image.
+ */
+ struct FrameInfo {
+ /**
+ * The frame that this frame needs to be blended with, or
+ * kNoFrame if this frame is independent (so it can be
+ * drawn over an uninitialized buffer).
+ *
+ * Note that this is the *earliest* frame that can be used
+ * for blending. Any frame from [fRequiredFrame, i) can be
+ * used, unless its fDisposalMethod is kRestorePrevious.
+ */
+ int fRequiredFrame;
+
+ /**
+ * Number of milliseconds to show this frame.
+ */
+ int fDuration;
+
+ /**
+ * Whether the end marker for this frame is contained in the stream.
+ *
+ * Note: this does not guarantee that an attempt to decode will be complete.
+ * There could be an error in the stream.
+ */
+ bool fFullyReceived;
+
+ /**
+ * This is conservative; it will still return non-opaque if e.g. a
+ * color index-based frame has a color with alpha but does not use it.
+ */
+ SkAlphaType fAlphaType;
+
+ /**
+ * How this frame should be modified before decoding the next one.
+ */
+ SkCodecAnimation::DisposalMethod fDisposalMethod;
+ };
+
+ /**
+ * Return info about a single frame.
+ *
+ * Only supported by multi-frame images. Does not read through the stream,
+ * so it should be called after getFrameCount() to parse any frames that
+ * have not already been parsed.
+ */
+ bool getFrameInfo(int index, FrameInfo* info) const {
+ if (index < 0) {
+ return false;
+ }
+ return this->onGetFrameInfo(index, info);
+ }
+
+ /**
+ * Return info about all the frames in the image.
+ *
+ * May require reading through the stream to determine info about the
+ * frames (including the count).
+ *
+ * As such, future decoding calls may require a rewind.
+ *
+ * For still (non-animated) image codecs, this will return an empty vector.
+ */
+ std::vector<FrameInfo> getFrameInfo();
+
+ static constexpr int kRepetitionCountInfinite = -1;
+
+ /**
+ * Return the number of times to repeat, if this image is animated. This number does not
+ * include the first play through of each frame. For example, a repetition count of 4 means
+ * that each frame is played 5 times and then the animation stops.
+ *
+ * It can return kRepetitionCountInfinite, a negative number, meaning that the animation
+ * should loop forever.
+ *
+ * May require reading the stream to find the repetition count.
+ *
+ * As such, future decoding calls may require a rewind.
+ *
+ * For still (non-animated) image codecs, this will return 0.
+ */
+ int getRepetitionCount() {
+ return this->onGetRepetitionCount();
+ }
+
+ // Register a decoder at runtime by passing two function pointers:
+ // - peek() to return true if the span of bytes appears to be your encoded format;
+ // - make() to attempt to create an SkCodec from the given stream.
+ // Not thread safe.
+ static void Register(
+ bool (*peek)(const void*, size_t),
+ std::unique_ptr<SkCodec> (*make)(std::unique_ptr<SkStream>, SkCodec::Result*));
+
+protected:
+ const SkEncodedInfo& getEncodedInfo() const { return fEncodedInfo; }
+
+ using XformFormat = skcms_PixelFormat;
+
+ SkCodec(SkEncodedInfo&&,
+ XformFormat srcFormat,
+ std::unique_ptr<SkStream>,
+ SkEncodedOrigin = kTopLeft_SkEncodedOrigin);
+
+ virtual SkISize onGetScaledDimensions(float /*desiredScale*/) const {
+ // By default, scaling is not supported.
+ return this->dimensions();
+ }
+
+ // FIXME: What to do about subsets??
+ /**
+ * Subclasses should override if they support dimensions other than the
+ * srcInfo's.
+ */
+ virtual bool onDimensionsSupported(const SkISize&) {
+ return false;
+ }
+
+ virtual SkEncodedImageFormat onGetEncodedFormat() const = 0;
+
+ /**
+ * @param rowsDecoded When the encoded image stream is incomplete, this function
+ * will return kIncompleteInput and rowsDecoded will be set to
+ * the number of scanlines that were successfully decoded.
+ * This will allow getPixels() to fill the uninitialized memory.
+ */
+ virtual Result onGetPixels(const SkImageInfo& info,
+ void* pixels, size_t rowBytes, const Options&,
+ int* rowsDecoded) = 0;
+
+ virtual bool onQueryYUV8(SkYUVASizeInfo*, SkYUVColorSpace*) const {
+ return false;
+ }
+
+ virtual Result onGetYUV8Planes(const SkYUVASizeInfo&,
+ void*[SkYUVASizeInfo::kMaxCount] /*planes*/) {
+ return kUnimplemented;
+ }
+
+ virtual bool onGetValidSubset(SkIRect* /*desiredSubset*/) const {
+ // By default, subsets are not supported.
+ return false;
+ }
+
+ /**
+ * If the stream was previously read, attempt to rewind.
+ *
+ * If the stream needed to be rewound, call onRewind.
+ * @returns true if the codec is at the right position and can be used.
+ * false if there was a failure to rewind.
+ *
+ * This is called by getPixels(), getYUV8Planes(), startIncrementalDecode() and
+ * startScanlineDecode(). Subclasses may call if they need to rewind at another time.
+ */
+ bool SK_WARN_UNUSED_RESULT rewindIfNeeded();
+
+ /**
+ * Called by rewindIfNeeded, if the stream needed to be rewound.
+ *
+ * Subclasses should do any set up needed after a rewind.
+ */
+ virtual bool onRewind() {
+ return true;
+ }
+
+ /**
+ * Get method for the input stream
+ */
+ SkStream* stream() {
+ return fStream.get();
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Most images types will be kTopDown and will not need to override this function.
+ */
+ virtual SkScanlineOrder onGetScanlineOrder() const { return kTopDown_SkScanlineOrder; }
+
+ const SkImageInfo& dstInfo() const { return fDstInfo; }
+
+ const Options& options() const { return fOptions; }
+
+ /**
+ * Returns the number of scanlines that have been decoded so far.
+ * This is unaffected by the SkScanlineOrder.
+ *
+ * Returns -1 if we have not started a scanline decode.
+ */
+ int currScanline() const { return fCurrScanline; }
+
+ virtual int onOutputScanline(int inputScanline) const;
+
+ /**
+ * Return whether we can convert to dst.
+ *
+ * Will be called for the appropriate frame, prior to initializing the colorXform.
+ */
+ virtual bool conversionSupported(const SkImageInfo& dst, bool srcIsOpaque,
+ bool needsColorXform);
+
+ // Some classes never need a colorXform e.g.
+ // - ICO uses its embedded codec's colorXform
+ // - WBMP is just Black/White
+ virtual bool usesColorXform() const { return true; }
+ void applyColorXform(void* dst, const void* src, int count) const;
+
+ bool colorXform() const { return fXformTime != kNo_XformTime; }
+ bool xformOnDecode() const { return fXformTime == kDecodeRow_XformTime; }
+
+ virtual int onGetFrameCount() {
+ return 1;
+ }
+
+ virtual bool onGetFrameInfo(int, FrameInfo*) const {
+ return false;
+ }
+
+ virtual int onGetRepetitionCount() {
+ return 0;
+ }
+
+private:
+ const SkEncodedInfo fEncodedInfo;
+ const XformFormat fSrcXformFormat;
+ std::unique_ptr<SkStream> fStream;
+ bool fNeedsRewind;
+ const SkEncodedOrigin fOrigin;
+
+ SkImageInfo fDstInfo;
+ Options fOptions;
+
+ enum XformTime {
+ kNo_XformTime,
+ kPalette_XformTime,
+ kDecodeRow_XformTime,
+ };
+ XformTime fXformTime;
+ XformFormat fDstXformFormat; // Based on fDstInfo.
+ skcms_ICCProfile fDstProfile;
+ skcms_AlphaFormat fDstXformAlphaFormat;
+
+ // Only meaningful during scanline decodes.
+ int fCurrScanline;
+
+ bool fStartedIncrementalDecode;
+
+ bool initializeColorXform(const SkImageInfo& dstInfo, SkEncodedInfo::Alpha, bool srcIsOpaque);
+
+ /**
+ * Return whether these dimensions are supported as a scale.
+ *
+ * The codec may choose to cache the information about scale and subset.
+ * Either way, the same information will be passed to onGetPixels/onStart
+ * on success.
+ *
+ * This must return true for a size returned from getScaledDimensions.
+ */
+ bool dimensionsSupported(const SkISize& dim) {
+ return dim == this->dimensions() || this->onDimensionsSupported(dim);
+ }
+
+ /**
+ * For multi-framed images, return the object with information about the frames.
+ */
+ virtual const SkFrameHolder* getFrameHolder() const {
+ return nullptr;
+ }
+
+ /**
+ * Check for a valid Options.fFrameIndex, and decode prior frames if necessary.
+ */
+ Result handleFrameIndex(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&);
+
+ // Methods for scanline decoding.
+ virtual Result onStartScanlineDecode(const SkImageInfo& /*dstInfo*/,
+ const Options& /*options*/) {
+ return kUnimplemented;
+ }
+
+ virtual Result onStartIncrementalDecode(const SkImageInfo& /*dstInfo*/, void*, size_t,
+ const Options&) {
+ return kUnimplemented;
+ }
+
+ virtual Result onIncrementalDecode(int*) {
+ return kUnimplemented;
+ }
+
+
+ virtual bool onSkipScanlines(int /*countLines*/) { return false; }
+
+ virtual int onGetScanlines(void* /*dst*/, int /*countLines*/, size_t /*rowBytes*/) { return 0; }
+
+ /**
+ * On an incomplete decode, getPixels() and getScanlines() will call this function
+ * to fill any uinitialized memory.
+ *
+ * @param dstInfo Contains the destination color type
+ * Contains the destination alpha type
+ * Contains the destination width
+ * The height stored in this info is unused
+ * @param dst Pointer to the start of destination pixel memory
+ * @param rowBytes Stride length in destination pixel memory
+ * @param zeroInit Indicates if memory is zero initialized
+ * @param linesRequested Number of lines that the client requested
+ * @param linesDecoded Number of lines that were successfully decoded
+ */
+ void fillIncompleteImage(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded);
+
+ /**
+ * Return an object which will allow forcing scanline decodes to sample in X.
+ *
+ * May create a sampler, if one is not currently being used. Otherwise, does
+ * not affect ownership.
+ *
+ * Only valid during scanline decoding or incremental decoding.
+ */
+ virtual SkSampler* getSampler(bool /*createIfNecessary*/) { return nullptr; }
+
+ friend class DM::CodecSrc; // for fillIncompleteImage
+ friend class SkSampledCodec;
+ friend class SkIcoCodec;
+ friend class SkAndroidCodec; // for fEncodedInfo
+};
+#endif // SkCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkCodecAnimation.h b/gfx/skia/skia/include/codec/SkCodecAnimation.h
new file mode 100644
index 0000000000..2ddb78deb3
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkCodecAnimation.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecAnimation_DEFINED
+#define SkCodecAnimation_DEFINED
+
+namespace SkCodecAnimation {
+ /**
+ * This specifies how the next frame is based on this frame.
+ *
+ * Names are based on the GIF 89a spec.
+ *
+ * The numbers correspond to values in a GIF.
+ */
+ enum class DisposalMethod {
+ /**
+ * The next frame should be drawn on top of this one.
+ *
+ * In a GIF, a value of 0 (not specified) is also treated as Keep.
+ */
+ kKeep = 1,
+
+ /**
+ * Similar to Keep, except the area inside this frame's rectangle
+ * should be cleared to the BackGround color (transparent) before
+ * drawing the next frame.
+ */
+ kRestoreBGColor = 2,
+
+ /**
+ * The next frame should be drawn on top of the previous frame - i.e.
+ * disregarding this one.
+ *
+ * In a GIF, a value of 4 is also treated as RestorePrevious.
+ */
+ kRestorePrevious = 3,
+ };
+}
+#endif // SkCodecAnimation_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkEncodedOrigin.h b/gfx/skia/skia/include/codec/SkEncodedOrigin.h
new file mode 100644
index 0000000000..2eed1c6ac6
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkEncodedOrigin.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedOrigin_DEFINED
+#define SkEncodedOrigin_DEFINED
+
+#include "include/core/SkMatrix.h"
+
+// These values match the orientation www.exif.org/Exif2-2.PDF.
+enum SkEncodedOrigin {
+ kTopLeft_SkEncodedOrigin = 1, // Default
+ kTopRight_SkEncodedOrigin = 2, // Reflected across y-axis
+ kBottomRight_SkEncodedOrigin = 3, // Rotated 180
+ kBottomLeft_SkEncodedOrigin = 4, // Reflected across x-axis
+ kLeftTop_SkEncodedOrigin = 5, // Reflected across x-axis, Rotated 90 CCW
+ kRightTop_SkEncodedOrigin = 6, // Rotated 90 CW
+ kRightBottom_SkEncodedOrigin = 7, // Reflected across x-axis, Rotated 90 CW
+ kLeftBottom_SkEncodedOrigin = 8, // Rotated 90 CCW
+ kDefault_SkEncodedOrigin = kTopLeft_SkEncodedOrigin,
+ kLast_SkEncodedOrigin = kLeftBottom_SkEncodedOrigin,
+};
+
+/**
+ * Given an encoded origin and the width and height of the source data, returns a matrix
+ * that transforms the source rectangle [0, 0, w, h] to a correctly oriented destination
+ * rectangle, with the upper left corner still at [0, 0].
+ */
+static inline SkMatrix SkEncodedOriginToMatrix(SkEncodedOrigin origin, int w, int h) {
+ switch (origin) {
+ case kTopLeft_SkEncodedOrigin: return SkMatrix::I();
+ case kTopRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, 1, 0, 0, 0, 1);
+ case kBottomRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, -1, h, 0, 0, 1);
+ case kBottomLeft_SkEncodedOrigin: return SkMatrix::MakeAll( 1, 0, 0, 0, -1, h, 0, 0, 1);
+ case kLeftTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, 1, 0, 0, 0, 0, 1);
+ case kRightTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, h, 1, 0, 0, 0, 0, 1);
+ case kRightBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, h, -1, 0, w, 0, 0, 1);
+ case kLeftBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, -1, 0, w, 0, 0, 1);
+ }
+ SK_ABORT("Unexpected origin");
+}
+
+
+#endif // SkEncodedOrigin_DEFINED
diff --git a/gfx/skia/skia/include/config/SkUserConfig.h b/gfx/skia/skia/include/config/SkUserConfig.h
new file mode 100644
index 0000000000..b262143677
--- /dev/null
+++ b/gfx/skia/skia/include/config/SkUserConfig.h
@@ -0,0 +1,131 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkUserConfig_DEFINED
+#define SkUserConfig_DEFINED
+
+/* SkTypes.h, the root of the public header files, includes SkPreConfig.h,
+ then SkUserConfig.h, then SkPostConfig.h.
+
+ SkPreConfig.h runs first, and it is responsible for initializing certain
+ skia defines.
+
+ SkPostConfig.h runs last, and its job is to just check that the final
+ defines are consistent (i.e. that we don't have mutually conflicting
+ defines).
+
+ SkUserConfig.h (this file) runs in the middle. It gets to change or augment
+ the list of flags initially set in preconfig, and then postconfig checks
+ that everything still makes sense.
+
+ Below are optional defines that add, subtract, or change default behavior
+ in Skia. Your port can locally edit this file to enable/disable flags as
+ you choose, or these can be delared on your command line (i.e. -Dfoo).
+
+ By default, this include file will always default to having all of the flags
+ commented out, so including it will have no effect.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Skia has lots of debug-only code. Often this is just null checks or other
+ parameter checking, but sometimes it can be quite intrusive (e.g. check that
+ each 32bit pixel is in premultiplied form). This code can be very useful
+ during development, but will slow things down in a shipping product.
+
+ By default, these mutually exclusive flags are defined in SkPreConfig.h,
+ based on the presence or absence of NDEBUG, but that decision can be changed
+ here.
+ */
+//#define SK_DEBUG
+//#define SK_RELEASE
+
+/* To write debug messages to a console, skia will call SkDebugf(...) following
+ printf conventions (e.g. const char* format, ...). If you want to redirect
+ this to something other than printf, define yours here
+ */
+//#define SkDebugf(...) MyFunction(__VA_ARGS__)
+
+/*
+ * To specify a different default font cache limit, define this. If this is
+ * undefined, skia will use a built-in value.
+ */
+//#define SK_DEFAULT_FONT_CACHE_LIMIT (1024 * 1024)
+
+/*
+ * To specify the default size of the image cache, undefine this and set it to
+ * the desired value (in bytes). SkGraphics.h as a runtime API to set this
+ * value as well. If this is undefined, a built-in value will be used.
+ */
+//#define SK_DEFAULT_IMAGE_CACHE_LIMIT (1024 * 1024)
+
+/* Define this to set the upper limit for text to support LCD. Values that
+ are very large increase the cost in the font cache and draw slower, without
+ improving readability. If this is undefined, Skia will use its default
+ value (e.g. 48)
+ */
+//#define SK_MAX_SIZE_FOR_LCDTEXT 48
+
+/* Change the kN32_SkColorType ordering to BGRA to work in X windows.
+ */
+//#define SK_R32_SHIFT 16
+
+
+/* Determines whether to build code that supports the GPU backend. Some classes
+ that are not GPU-specific, such as SkShader subclasses, have optional code
+ that is used allows them to interact with the GPU backend. If you'd like to
+ omit this code set SK_SUPPORT_GPU to 0. This also allows you to omit the gpu
+ directories from your include search path when you're not building the GPU
+ backend. Defaults to 1 (build the GPU code).
+ */
+//#define SK_SUPPORT_GPU 1
+
+/* Skia makes use of histogram logging macros to trace the frequency of
+ * events. By default, Skia provides no-op versions of these macros.
+ * Skia consumers can provide their own definitions of these macros to
+ * integrate with their histogram collection backend.
+ */
+//#define SK_HISTOGRAM_BOOLEAN(name, value)
+//#define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
+
+#define MOZ_SKIA
+
+// On all platforms we have this byte order
+#define SK_A32_SHIFT 24
+#define SK_R32_SHIFT 16
+#define SK_G32_SHIFT 8
+#define SK_B32_SHIFT 0
+
+#define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0
+
+#define SK_RASTERIZE_EVEN_ROUNDING
+
+#define SK_SUPPORT_DEPRECATED_CLIPOPS
+
+#define SK_USE_FREETYPE_EMBOLDEN
+
+#define SK_SUPPORT_GPU 0
+
+#ifndef MOZ_IMPLICIT
+# ifdef MOZ_CLANG_PLUGIN
+# define MOZ_IMPLICIT __attribute__((annotate("moz_implicit")))
+# else
+# define MOZ_IMPLICIT
+# endif
+#endif
+
+#define SK_DISABLE_SLOW_DEBUG_VALIDATION 1
+
+#define SK_IGNORE_MAC_BLENDING_MATCH_FIX
+
+#define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN
+
+#define SK_DISABLE_TYPEFACE_CACHE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkAnnotation.h b/gfx/skia/skia/include/core/SkAnnotation.h
new file mode 100644
index 0000000000..9048bb6b6d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkAnnotation.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotation_DEFINED
+#define SkAnnotation_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkData;
+struct SkPoint;
+struct SkRect;
+class SkCanvas;
+
+/**
+ * Annotate the canvas by associating the specified URL with the
+ * specified rectangle (in local coordinates, just like drawRect).
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateRectWithURL(SkCanvas*, const SkRect&, SkData*);
+
+/**
+ * Annotate the canvas by associating a name with the specified point.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateNamedDestination(SkCanvas*, const SkPoint&, SkData*);
+
+/**
+ * Annotate the canvas by making the specified rectangle link to a named
+ * destination.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateLinkToDestination(SkCanvas*, const SkRect&, SkData*);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBBHFactory.h b/gfx/skia/skia/include/core/SkBBHFactory.h
new file mode 100644
index 0000000000..afd223fe9e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBBHFactory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBBHFactory_DEFINED
+#define SkBBHFactory_DEFINED
+
+#include "include/core/SkTypes.h"
+class SkBBoxHierarchy;
+struct SkRect;
+
+class SK_API SkBBHFactory {
+public:
+ /**
+ * Allocate a new SkBBoxHierarchy. Return NULL on failure.
+ */
+ virtual SkBBoxHierarchy* operator()() const = 0;
+ virtual ~SkBBHFactory() {}
+};
+
+class SK_API SkRTreeFactory : public SkBBHFactory {
+public:
+ SkBBoxHierarchy* operator()() const override;
+private:
+ typedef SkBBHFactory INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBitmap.h b/gfx/skia/skia/include/core/SkBitmap.h
new file mode 100644
index 0000000000..d561321bc0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBitmap.h
@@ -0,0 +1,1166 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmap_DEFINED
+#define SkBitmap_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTileMode.h"
+
+struct SkMask;
+struct SkIRect;
+struct SkRect;
+class SkPaint;
+class SkPixelRef;
+class SkShader;
+class SkString;
+
+/** \class SkBitmap
+ SkBitmap describes a two-dimensional raster pixel array. SkBitmap is built on
+ SkImageInfo, containing integer width and height, SkColorType and SkAlphaType
+ describing the pixel format, and SkColorSpace describing the range of colors.
+ SkBitmap points to SkPixelRef, which describes the physical array of pixels.
+ SkImageInfo bounds may be located anywhere fully inside SkPixelRef bounds.
+
+ SkBitmap can be drawn using SkCanvas. SkBitmap can be a drawing destination for SkCanvas
+ draw member functions. SkBitmap flexibility as a pixel container limits some
+ optimizations available to the target platform.
+
+ If pixel array is primarily read-only, use SkImage for better performance.
+ If pixel array is primarily written to, use SkSurface for better performance.
+
+ Declaring SkBitmap const prevents altering SkImageInfo: the SkBitmap height, width,
+ and so on cannot change. It does not affect SkPixelRef: a caller may write its
+ pixels. Declaring SkBitmap const affects SkBitmap configuration, not its contents.
+
+ SkBitmap is not thread safe. Each thread must have its own copy of SkBitmap fields,
+ although threads may share the underlying pixel array.
+*/
+class SK_API SkBitmap {
+public:
+ class SK_API Allocator;
+
+ /** Creates an empty SkBitmap without pixels, with kUnknown_SkColorType,
+ kUnknown_SkAlphaType, and with a width and height of zero. SkPixelRef origin is
+ set to (0, 0). SkBitmap is not volatile.
+
+ Use setInfo() to associate SkColorType, SkAlphaType, width, and height
+ after SkBitmap has been created.
+
+ @return empty SkBitmap
+ */
+ SkBitmap();
+
+ /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels
+ allocated, so both bitmaps reference the same pixels.
+
+ @param src SkBitmap to copy SkImageInfo, and share SkPixelRef
+ @return copy of src
+ */
+ SkBitmap(const SkBitmap& src);
+
+ /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to
+ SkBitmap.
+
+ @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef
+ @return copy of src
+ */
+ SkBitmap(SkBitmap&& src);
+
+ /** Decrements SkPixelRef reference count, if SkPixelRef is not nullptr.
+ */
+ ~SkBitmap();
+
+ /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels
+ allocated, so both bitmaps reference the same pixels.
+
+ @param src SkBitmap to copy SkImageInfo, and share SkPixelRef
+ @return copy of src
+ */
+ SkBitmap& operator=(const SkBitmap& src);
+
+ /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to
+ SkBitmap.
+
+ @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef
+ @return copy of src
+ */
+ SkBitmap& operator=(SkBitmap&& src);
+
+ /** Swaps the fields of the two bitmaps.
+
+ @param other SkBitmap exchanged with original
+ */
+ void swap(SkBitmap& other);
+
+ /** Returns a constant reference to the SkPixmap holding the SkBitmap pixel
+ address, row bytes, and SkImageInfo.
+
+ @return reference to SkPixmap describing this SkBitmap
+ */
+ const SkPixmap& pixmap() const { return fPixmap; }
+
+ /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace.
+
+ @return reference to SkImageInfo
+ */
+ const SkImageInfo& info() const { return fPixmap.info(); }
+
+ /** Returns pixel count in each row. Should be equal or less than
+ rowBytes() / info().bytesPerPixel().
+
+ May be less than pixelRef().width(). Will not exceed pixelRef().width() less
+ pixelRefOrigin().fX.
+
+ @return pixel width in SkImageInfo
+ */
+ int width() const { return fPixmap.width(); }
+
+ /** Returns pixel row count.
+
+ Maybe be less than pixelRef().height(). Will not exceed pixelRef().height() less
+ pixelRefOrigin().fY.
+
+ @return pixel height in SkImageInfo
+ */
+ int height() const { return fPixmap.height(); }
+
+ /** Returns SkColorType, one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType.
+
+ @return SkColorType in SkImageInfo
+ */
+ SkColorType colorType() const { return fPixmap.colorType(); }
+
+ /** Returns SkAlphaType, one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType.
+
+ @return SkAlphaType in SkImageInfo
+ */
+ SkAlphaType alphaType() const { return fPixmap.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ @return SkColorSpace in SkImageInfo, or nullptr
+ */
+ SkColorSpace* colorSpace() const { return fPixmap.colorSpace(); }
+
+ /** Returns smart pointer to SkColorSpace, the range of colors, associated with
+ SkImageInfo. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace in SkImageInfo wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const { return fPixmap.info().refColorSpace(); }
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType( is kUnknown_SkColorType.
+
+ @return bytes in pixel
+ */
+ int bytesPerPixel() const { return fPixmap.info().bytesPerPixel(); }
+
+ /** Returns number of pixels that fit on row. Should be greater than or equal to
+ width().
+
+ @return maximum pixels per row
+ */
+ int rowBytesAsPixels() const { return fPixmap.rowBytesAsPixels(); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fPixmap.shiftPerPixel(); }
+
+ /** Returns true if either width() or height() are zero.
+
+ Does not check if SkPixelRef is nullptr; call drawsNothing() to check width(),
+ height(), and SkPixelRef.
+
+ @return true if dimensions do not enclose area
+ */
+ bool empty() const { return fPixmap.info().isEmpty(); }
+
+ /** Returns true if SkPixelRef is nullptr.
+
+ Does not check if width() or height() are zero; call drawsNothing() to check
+ width(), height(), and SkPixelRef.
+
+ @return true if no SkPixelRef is associated
+ */
+ bool isNull() const { return nullptr == fPixelRef; }
+
+ /** Returns true if width() or height() are zero, or if SkPixelRef is nullptr.
+ If true, SkBitmap has no effect when drawn or drawn into.
+
+ @return true if drawing has no effect
+ */
+ bool drawsNothing() const {
+ return this->empty() || this->isNull();
+ }
+
+ /** Returns row bytes, the interval from one pixel row to the next. Row bytes
+ is at least as large as: width() * info().bytesPerPixel().
+
+ Returns zero if colorType() is kUnknown_SkColorType, or if row bytes supplied to
+ setInfo() is not large enough to hold a row of pixels.
+
+ @return byte length of pixel row
+ */
+ size_t rowBytes() const { return fPixmap.rowBytes(); }
+
+ /** Sets SkAlphaType, if alphaType is compatible with SkColorType.
+ Returns true unless alphaType is kUnknown_SkAlphaType and current SkAlphaType
+ is not kUnknown_SkAlphaType.
+
+ Returns true if SkColorType is kUnknown_SkColorType. alphaType is ignored, and
+ SkAlphaType remains kUnknown_SkAlphaType.
+
+ Returns true if SkColorType is kRGB_565_SkColorType or kGray_8_SkColorType.
+ alphaType is ignored, and SkAlphaType remains kOpaque_SkAlphaType.
+
+ If SkColorType is kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: returns true unless
+ alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType.
+ If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored.
+
+ If SkColorType is kAlpha_8_SkColorType, returns true unless
+ alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType.
+ If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored. If alphaType is
+ kUnpremul_SkAlphaType, it is treated as kPremul_SkAlphaType.
+
+ This changes SkAlphaType in SkPixelRef; all bitmaps sharing SkPixelRef
+ are affected.
+
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @return true if SkAlphaType is set
+ */
+ bool setAlphaType(SkAlphaType alphaType);
+
+ /** Returns pixel address, the base address corresponding to the pixel origin.
+
+ @return pixel address
+ */
+ void* getPixels() const { return fPixmap.writable_addr(); }
+
+ /** Returns minimum memory required for pixel storage.
+ Does not include unused memory on last row when rowBytesAsPixels() exceeds width().
+ Returns SIZE_MAX if result does not fit in size_t.
+ Returns zero if height() or width() is 0.
+ Returns height() times rowBytes() if colorType() is kUnknown_SkColorType.
+
+ @return size in bytes of image buffer
+ */
+ size_t computeByteSize() const { return fPixmap.computeByteSize(); }
+
+ /** Returns true if pixels can not change.
+
+ Most immutable SkBitmap checks trigger an assert only on debug builds.
+
+ @return true if pixels are immutable
+ */
+ bool isImmutable() const;
+
+ /** Sets internal flag to mark SkBitmap as immutable. Once set, pixels can not change.
+ Any other bitmap sharing the same SkPixelRef are also marked as immutable.
+ Once SkPixelRef is marked immutable, the setting cannot be cleared.
+
+ Writing to immutable SkBitmap pixels triggers an assert on debug builds.
+ */
+ void setImmutable();
+
+ /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their
+ alpha value is implicitly or explicitly 1.0. If true, and all pixels are
+ not opaque, Skia may draw incorrectly.
+
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkImageInfo SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const {
+ return SkAlphaTypeIsOpaque(this->alphaType());
+ }
+
+ /** Provides a hint to caller that pixels should not be cached. Only true if
+ setIsVolatile() has been called to mark as volatile.
+
+ Volatile state is not shared by other bitmaps sharing the same SkPixelRef.
+
+ @return true if marked volatile
+ */
+ bool isVolatile() const;
+
+ /** Sets if pixels should be read from SkPixelRef on every access. SkBitmap are not
+ volatile by default; a GPU back end may upload pixel values expecting them to be
+ accessed repeatedly. Marking temporary SkBitmap as volatile provides a hint to
+ SkBaseDevice that the SkBitmap pixels should not be cached. This can
+ improve performance by avoiding overhead and reducing resource
+ consumption on SkBaseDevice.
+
+ @param isVolatile true if backing pixels are temporary
+ */
+ void setIsVolatile(bool isVolatile);
+
+ /** Resets to its initial state; all fields are set to zero, as if SkBitmap had
+ been initialized by SkBitmap().
+
+ Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to
+ kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType.
+
+ If SkPixelRef is allocated, its reference count is decreased by one, releasing
+ its memory if SkBitmap is the sole owner.
+ */
+ void reset();
+
+ /** Returns true if all pixels are opaque. SkColorType determines how pixels
+ are encoded, and whether pixel describes alpha. Returns true for SkColorType
+ without alpha in each pixel; for other SkColorType, returns true if all
+ pixels have alpha values equivalent to 1.0 or greater.
+
+ For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always
+ returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255.
+ For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15.
+ For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or
+ greater.
+
+ Returns false for kUnknown_SkColorType.
+
+ @param bm SkBitmap to check
+ @return true if all pixels have opaque values or SkColorType is opaque
+ */
+ static bool ComputeIsOpaque(const SkBitmap& bm) {
+ return bm.pixmap().computeIsOpaque();
+ }
+
+ /** Returns SkRect { 0, 0, width(), height() }.
+
+ @param bounds container for floating point rectangle
+ */
+ void getBounds(SkRect* bounds) const;
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @param bounds container for integral rectangle
+ */
+ void getBounds(SkIRect* bounds) const;
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return fPixmap.info().bounds(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return fPixmap.info().dimensions(); }
+
+ /** Returns the bounds of this bitmap, offset by its SkPixelRef origin.
+
+ @return bounds within SkPixelRef bounds
+ */
+ SkIRect getSubset() const {
+ SkIPoint origin = this->pixelRefOrigin();
+ return SkIRect::MakeXYWH(origin.x(), origin.y(), this->width(), this->height());
+ }
+
+ /** Sets width, height, SkAlphaType, SkColorType, SkColorSpace, and optional
+ rowBytes. Frees pixels, and returns true if successful.
+
+ imageInfo.alphaType() may be altered to a value permitted by imageInfo.colorSpace().
+ If imageInfo.colorType() is kUnknown_SkColorType, imageInfo.alphaType() is
+ set to kUnknown_SkAlphaType.
+ If imageInfo.colorType() is kAlpha_8_SkColorType and imageInfo.alphaType() is
+ kUnpremul_SkAlphaType, imageInfo.alphaType() is replaced by kPremul_SkAlphaType.
+ If imageInfo.colorType() is kRGB_565_SkColorType or kGray_8_SkColorType,
+ imageInfo.alphaType() is set to kOpaque_SkAlphaType.
+ If imageInfo.colorType() is kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: imageInfo.alphaType() remains
+ unchanged.
+
+ rowBytes must equal or exceed imageInfo.minRowBytes(). If imageInfo.colorSpace() is
+ kUnknown_SkColorType, rowBytes is ignored and treated as zero; for all other
+ SkColorSpace values, rowBytes of zero is treated as imageInfo.minRowBytes().
+
+ Calls reset() and returns false if:
+ - rowBytes exceeds 31 bits
+ - imageInfo.width() is negative
+ - imageInfo.height() is negative
+ - rowBytes is positive and less than imageInfo.width() times imageInfo.bytesPerPixel()
+
+ @param imageInfo contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes imageInfo.minRowBytes() or larger; or zero
+ @return true if SkImageInfo set successfully
+ */
+ bool setInfo(const SkImageInfo& imageInfo, size_t rowBytes = 0);
+
+ /** \enum SkBitmap::AllocFlags
+ AllocFlags is obsolete. We always zero pixel memory when allocated.
+ */
+ enum AllocFlags {
+ kZeroPixels_AllocFlag = 1 << 0, //!< zero pixel memory. No effect. This is the default.
+ };
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. Memory is zeroed.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated, or memory could not optionally be zeroed.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of calloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param flags kZeroPixels_AllocFlag, or zero
+ @return true if pixels allocation is successful
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixelsFlags(const SkImageInfo& info, uint32_t flags);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. Memory is zeroed.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated, or memory could not optionally
+ be zeroed. Abort steps may be provided by the user at compile time by defining
+ SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of calloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param flags kZeroPixels_AllocFlag, or zero
+ */
+ void allocPixelsFlags(const SkImageInfo& info, uint32_t flags);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(),
+ or equal zero. Pass in zero for rowBytes to compute the minimum valid value.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes size of pixel row or larger; may be zero
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info, size_t rowBytes);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(),
+ or equal zero. Pass in zero for rowBytes to compute the minimum valid value.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes size of pixel row or larger; may be zero
+ */
+ void allocPixels(const SkImageInfo& info, size_t rowBytes);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info) {
+ return this->tryAllocPixels(info, info.minRowBytes());
+ }
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ */
+ void allocPixels(const SkImageInfo& info);
+
+ /** Sets SkImageInfo to width, height, and native color type; and allocates
+ pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType;
+ otherwise, sets to kPremul_SkAlphaType.
+
+ Calls reset() and returns false if width exceeds 29 bits or is negative,
+ or height is negative.
+
+ Returns false if allocation fails.
+
+ Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on
+ the platform. SkBitmap drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param isOpaque true if pixels do not have transparency
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocN32Pixels(int width, int height, bool isOpaque = false);
+
+ /** Sets SkImageInfo to width, height, and the native color type; and allocates
+ pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType;
+ otherwise, sets to kPremul_SkAlphaType.
+
+ Aborts if width exceeds 29 bits or is negative, or height is negative, or
+ allocation fails. Abort steps may be provided by the user at compile time by
+ defining SK_ABORT.
+
+ Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on
+ the platform. SkBitmap drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param isOpaque true if pixels do not have transparency
+ */
+ void allocN32Pixels(int width, int height, bool isOpaque = false);
+
+ /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef
+ containing pixels and rowBytes. releaseProc, if not nullptr, is called
+ immediately on failure or when pixels are no longer referenced. context may be
+ nullptr.
+
+ If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes():
+ calls releaseProc if present, calls reset(), and returns false.
+
+ Otherwise, if pixels equals nullptr: sets SkImageInfo, calls releaseProc if
+ present, returns true.
+
+ If SkImageInfo is set, pixels is not nullptr, and releaseProc is not nullptr:
+ when pixels are no longer referenced, calls releaseProc with pixels and context
+ as parameters.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage; may be nullptr
+ @param rowBytes size of pixel row or larger
+ @param releaseProc function called when pixels can be deleted; may be nullptr
+ @param context caller state passed to releaseProc; may be nullptr
+ @return true if SkImageInfo is set to info
+ */
+ bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ void (*releaseProc)(void* addr, void* context), void* context);
+
+ /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef
+ containing pixels and rowBytes.
+
+ If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes():
+ calls reset(), and returns false.
+
+ Otherwise, if pixels equals nullptr: sets SkImageInfo, returns true.
+
+ Caller must ensure that pixels are valid for the lifetime of SkBitmap and SkPixelRef.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage; may be nullptr
+ @param rowBytes size of pixel row or larger
+ @return true if SkImageInfo is set to info
+ */
+ bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->installPixels(info, pixels, rowBytes, nullptr, nullptr);
+ }
+
+ /** Sets SkImageInfo to pixmap.info() following the rules in setInfo(), and creates
+ SkPixelRef containing pixmap.addr() and pixmap.rowBytes().
+
+ If SkImageInfo could not be set, or pixmap.rowBytes() is less than
+ SkImageInfo::minRowBytes(): calls reset(), and returns false.
+
+ Otherwise, if pixmap.addr() equals nullptr: sets SkImageInfo, returns true.
+
+ Caller must ensure that pixmap is valid for the lifetime of SkBitmap and SkPixelRef.
+
+ @param pixmap SkImageInfo, pixel address, and rowBytes()
+ @return true if SkImageInfo was set to pixmap.info()
+ */
+ bool installPixels(const SkPixmap& pixmap);
+
+ /** Deprecated.
+ */
+ bool installMaskPixels(const SkMask& mask);
+
+ /** Replaces SkPixelRef with pixels, preserving SkImageInfo and rowBytes().
+ Sets SkPixelRef origin to (0, 0).
+
+ If pixels is nullptr, or if info().colorType() equals kUnknown_SkColorType;
+ release reference to SkPixelRef, and set SkPixelRef to nullptr.
+
+ Caller is responsible for handling ownership pixel memory for the lifetime
+ of SkBitmap and SkPixelRef.
+
+ @param pixels address of pixel storage, managed by caller
+ */
+ void setPixels(void* pixels);
+
+ /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+
+ Returns false if info().colorType() is kUnknown_SkColorType, or allocation fails.
+
+ @return true if the allocation succeeds
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels() {
+ return this->tryAllocPixels((Allocator*)nullptr);
+ }
+
+ /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+
+ Aborts if info().colorType() is kUnknown_SkColorType, or allocation fails.
+ Abort steps may be provided by the user at compile
+ time by defining SK_ABORT.
+ */
+ void allocPixels();
+
+ /** Allocates pixel memory with allocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+ If allocator is nullptr, use HeapAllocator instead.
+
+ Returns false if Allocator::allocPixelRef return false.
+
+ @param allocator instance of SkBitmap::Allocator instantiation
+ @return true if custom allocator reports success
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(Allocator* allocator);
+
+ /** Allocates pixel memory with allocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+ If allocator is nullptr, use HeapAllocator instead.
+
+ Aborts if Allocator::allocPixelRef return false. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ @param allocator instance of SkBitmap::Allocator instantiation
+ */
+ void allocPixels(Allocator* allocator);
+
+ /** Returns SkPixelRef, which contains: pixel base address; its dimensions; and
+ rowBytes(), the interval from one row to the next. Does not change SkPixelRef
+ reference count. SkPixelRef may be shared by multiple bitmaps.
+ If SkPixelRef has not been set, returns nullptr.
+
+ @return SkPixelRef, or nullptr
+ */
+ SkPixelRef* pixelRef() const { return fPixelRef.get(); }
+
+ /** Returns origin of pixels within SkPixelRef. SkBitmap bounds is always contained
+ by SkPixelRef bounds, which may be the same size or larger. Multiple SkBitmap
+ can share the same SkPixelRef, where each SkBitmap has different bounds.
+
+ The returned origin added to SkBitmap dimensions equals or is smaller than the
+ SkPixelRef dimensions.
+
+ Returns (0, 0) if SkPixelRef is nullptr.
+
+ @return pixel origin within SkPixelRef
+ */
+ SkIPoint pixelRefOrigin() const;
+
+ /** Replaces pixelRef and origin in SkBitmap. dx and dy specify the offset
+ within the SkPixelRef pixels for the top-left corner of the bitmap.
+
+ Asserts in debug builds if dx or dy are out of range. Pins dx and dy
+ to legal range in release builds.
+
+ The caller is responsible for ensuring that the pixels match the
+ SkColorType and SkAlphaType in SkImageInfo.
+
+ @param pixelRef SkPixelRef describing pixel address and rowBytes()
+ @param dx column offset in SkPixelRef for bitmap origin
+ @param dy row offset in SkPixelRef for bitmap origin
+ */
+ void setPixelRef(sk_sp<SkPixelRef> pixelRef, int dx, int dy);
+
+ /** Returns true if SkBitmap is can be drawn.
+
+ @return true if getPixels() is not nullptr
+ */
+ bool readyToDraw() const {
+ return this->getPixels() != nullptr;
+ }
+
+ /** Returns a unique value corresponding to the pixels in SkPixelRef.
+ Returns a different value after notifyPixelsChanged() has been called.
+ Returns zero if SkPixelRef is nullptr.
+
+ Determines if pixels have changed since last examined.
+
+ @return unique value for pixels in SkPixelRef
+ */
+ uint32_t getGenerationID() const;
+
+ /** Marks that pixels in SkPixelRef have changed. Subsequent calls to
+ getGenerationID() return a different value.
+ */
+ void notifyPixelsChanged() const;
+
+ /** Replaces pixel values with c. All pixels contained by bounds() are affected.
+ If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then alpha
+ is ignored; RGB is treated as opaque. If colorType() is kAlpha_8_SkColorType,
+ then RGB is ignored.
+
+ @param c unpremultiplied color
+ */
+ void eraseColor(SkColor c) const;
+
+ /** Replaces pixel values with unpremultiplied color built from a, r, g, and b.
+ All pixels contained by bounds() are affected.
+ If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then a
+ is ignored; r, g, and b are treated as opaque. If colorType() is kAlpha_8_SkColorType,
+ then r, g, and b are ignored.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ */
+ void eraseARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) const {
+ this->eraseColor(SkColorSetARGB(a, r, g, b));
+ }
+
+ /** Replaces pixel values inside area with c. If area does not intersect bounds(),
+ call has no effect.
+
+ If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then alpha
+ is ignored; RGB is treated as opaque. If colorType() is kAlpha_8_SkColorType,
+ then RGB is ignored.
+
+ @param c unpremultiplied color
+ @param area rectangle to fill
+ */
+ void erase(SkColor c, const SkIRect& area) const;
+
+ /** Deprecated.
+ */
+ void eraseArea(const SkIRect& area, SkColor c) const {
+ this->erase(c, area);
+ }
+
+ /** Returns pixel at (x, y) as unpremultiplied color.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color; original pixel data may have additional
+ precision.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied color
+ */
+ SkColor getColor(int x, int y) const {
+ return this->pixmap().getColor(x, y);
+ }
+
+ /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1].
+ This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent
+ (and more precise if the pixels store more than 8 bits per component).
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return alpha converted to normalized float
+ */
+ float getAlphaf(int x, int y) const {
+ return this->pixmap().getAlphaf(x, y);
+ }
+
+ /** Returns pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y, or kUnknown_SkColorType,
+ trigger an assert() if built with SK_DEBUG defined. Returns nullptr if
+ SkColorType is kUnknown_SkColorType, or SkPixelRef is nullptr.
+
+ Performs a lookup of pixel size; for better performance, call
+ one of: getAddr8(), getAddr16(), or getAddr32().
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return generic pointer to pixel
+ */
+ void* getAddr(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not four
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 32-bit pointer to pixel at (x, y)
+ */
+ inline uint32_t* getAddr32(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not two
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 16-bit pointer to pixel at (x, y)
+ */
+ inline uint16_t* getAddr16(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not one
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 8-bit pointer to pixel at (x, y)
+ */
+ inline uint8_t* getAddr8(int x, int y) const;
+
+ /** Shares SkPixelRef with dst. Pixels are not copied; SkBitmap and dst point
+ to the same pixels; dst bounds() are set to the intersection of subset
+ and the original bounds().
+
+ subset may be larger than bounds(). Any area outside of bounds() is ignored.
+
+ Any contents of dst are discarded. isVolatile() setting is copied to dst.
+ dst is set to colorType(), alphaType(), and colorSpace().
+
+ Return false if:
+ - dst is nullptr
+ - SkPixelRef is nullptr
+ - subset does not intersect bounds()
+
+ @param dst SkBitmap set to subset
+ @param subset rectangle of pixels to reference
+ @return true if dst is replaced by subset
+ */
+ bool extractSubset(SkBitmap* dst, const SkIRect& subset) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dstPixels. Copy starts at (srcX, srcY),
+ and does not exceed SkBitmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of
+ destination. dstRowBytes specifics the gap from one destination row to the next.
+ Returns true if pixels are copied. Returns false if:
+ - dstInfo has no address
+ - dstRowBytes is less than dstInfo.minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkBitmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height().
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (srcX, srcY), and
+ does not exceed SkBitmap (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height().
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (0, 0), and
+ does not exceed SkBitmap (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst) const {
+ return this->readPixels(dst, 0, 0);
+ }
+
+ /** Copies a SkRect of pixels from src. Copy starts at (dstX, dstY), and does not exceed
+ (src.width(), src.height()).
+
+ src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of source. src.rowBytes() specifics the gap from one source
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - src pixel storage equals nullptr
+ - src.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ dstX and dstY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(dstX) >= Bitmap width(), or if abs(dstY) >= Bitmap height().
+
+ @param src source SkPixmap: SkImageInfo, pixels, row bytes
+ @param dstX column index whose absolute value is less than width()
+ @param dstY row index whose absolute value is less than height()
+ @return true if src pixels are copied to SkBitmap
+ */
+ bool writePixels(const SkPixmap& src, int dstX, int dstY);
+
+ /** Copies a SkRect of pixels from src. Copy starts at (0, 0), and does not exceed
+ (src.width(), src.height()).
+
+ src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of source. src.rowBytes() specifics the gap from one source
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - src pixel storage equals nullptr
+ - src.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ @param src source SkPixmap: SkImageInfo, pixels, row bytes
+ @return true if src pixels are copied to SkBitmap
+ */
+ bool writePixels(const SkPixmap& src) {
+ return this->writePixels(src, 0, 0);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ Uses HeapAllocator to reserve memory for dst SkPixelRef.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst) const {
+ return this->extractAlpha(dst, nullptr, nullptr, nullptr);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ If paint is not nullptr and contains SkMaskFilter, SkMaskFilter
+ generates mask alpha from SkBitmap. Uses HeapAllocator to reserve memory for dst
+ SkPixelRef. Sets offset to top-left position for dst for alignment with SkBitmap;
+ (0, 0) unless SkMaskFilter generates mask.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @param paint holds optional SkMaskFilter; may be nullptr
+ @param offset top-left position for dst; may be nullptr
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ SkIPoint* offset) const {
+ return this->extractAlpha(dst, paint, nullptr, offset);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ If paint is not nullptr and contains SkMaskFilter, SkMaskFilter
+ generates mask alpha from SkBitmap. allocator may reference a custom allocation
+ class or be set to nullptr to use HeapAllocator. Sets offset to top-left
+ position for dst for alignment with SkBitmap; (0, 0) unless SkMaskFilter generates
+ mask.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @param paint holds optional SkMaskFilter; may be nullptr
+ @param allocator function to reserve memory for SkPixelRef; may be nullptr
+ @param offset top-left position for dst; may be nullptr
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint, Allocator* allocator,
+ SkIPoint* offset) const;
+
+ /** Copies SkBitmap pixel address, row bytes, and SkImageInfo to pixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave pixmap unchanged.
+
+ pixmap contents become invalid on any future change to SkBitmap.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkBitmap has direct access to pixels
+ */
+ bool peekPixels(SkPixmap* pixmap) const;
+
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix = nullptr) const;
+ // defaults to Clamp in x, and y
+ sk_sp<SkShader> makeShader(const SkMatrix* localMatrix = nullptr) const;
+
+ /** Asserts if internal values are illegal or inconsistent. Only available if
+ SK_DEBUG is defined at compile time.
+ */
+ SkDEBUGCODE(void validate() const;)
+
+ /** \class SkBitmap::Allocator
+ Abstract subclass of HeapAllocator.
+ */
+ class Allocator : public SkRefCnt {
+ public:
+
+ /** Allocates the pixel memory for the bitmap, given its dimensions and
+ SkColorType. Returns true on success, where success means either setPixels()
+ or setPixelRef() was called.
+
+ @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output
+ @return true if SkPixelRef was allocated
+ */
+ virtual bool allocPixelRef(SkBitmap* bitmap) = 0;
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ /** \class SkBitmap::HeapAllocator
+ Subclass of SkBitmap::Allocator that returns a SkPixelRef that allocates its pixel
+ memory from the heap. This is the default SkBitmap::Allocator invoked by
+ allocPixels().
+ */
+ class HeapAllocator : public Allocator {
+ public:
+
+ /** Allocates the pixel memory for the bitmap, given its dimensions and
+ SkColorType. Returns true on success, where success means either setPixels()
+ or setPixelRef() was called.
+
+ @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output
+ @return true if pixels are allocated
+ */
+ bool allocPixelRef(SkBitmap* bitmap) override;
+ };
+
+private:
+ enum Flags {
+ kImageIsVolatile_Flag = 0x02,
+ };
+
+ sk_sp<SkPixelRef> fPixelRef;
+ SkPixmap fPixmap;
+ uint8_t fFlags;
+
+ friend class SkReadBuffer; // unflatten
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+inline uint32_t* SkBitmap::getAddr32(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr32(x, y);
+}
+
+inline uint16_t* SkBitmap::getAddr16(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr16(x, y);
+}
+
+inline uint8_t* SkBitmap::getAddr8(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr8(x, y);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlendMode.h b/gfx/skia/skia/include/core/SkBlendMode.h
new file mode 100644
index 0000000000..270978f74b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlendMode.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendMode_DEFINED
+#define SkBlendMode_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkBlendMode {
+ kClear, //!< replaces destination with zero: fully transparent
+ kSrc, //!< replaces destination
+ kDst, //!< preserves destination
+ kSrcOver, //!< source over destination
+ kDstOver, //!< destination over source
+ kSrcIn, //!< source trimmed inside destination
+ kDstIn, //!< destination trimmed by source
+ kSrcOut, //!< source trimmed outside destination
+ kDstOut, //!< destination trimmed outside source
+ kSrcATop, //!< source inside destination blended with destination
+ kDstATop, //!< destination inside source blended with source
+ kXor, //!< each of source and destination trimmed outside the other
+ kPlus, //!< sum of colors
+ kModulate, //!< product of premultiplied colors; darkens destination
+ kScreen, //!< multiply inverse of pixels, inverting result; brightens destination
+ kLastCoeffMode = kScreen, //!< last porter duff blend mode
+ kOverlay, //!< multiply or screen, depending on destination
+ kDarken, //!< darker of source and destination
+ kLighten, //!< lighter of source and destination
+ kColorDodge, //!< brighten destination to reflect source
+ kColorBurn, //!< darken destination to reflect source
+ kHardLight, //!< multiply or screen, depending on source
+ kSoftLight, //!< lighten or darken, depending on source
+ kDifference, //!< subtract darker from lighter with higher contrast
+ kExclusion, //!< subtract darker from lighter with lower contrast
+ kMultiply, //!< multiply source with destination, darkening image
+ kLastSeparableMode = kMultiply, //!< last blend mode operating separately on components
+ kHue, //!< hue of source with saturation and luminosity of destination
+ kSaturation, //!< saturation of source with hue and luminosity of destination
+ kColor, //!< hue and saturation of source with luminosity of destination
+ kLuminosity, //!< luminosity of source with hue and saturation of destination
+ kLastMode = kLuminosity, //!< last valid value
+};
+
+/** Returns name of blendMode as null-terminated C string.
+
+ @param blendMode one of:
+ SkBlendMode::kClear, SkBlendMode::kSrc, SkBlendMode::kDst,
+ SkBlendMode::kSrcOver, SkBlendMode::kDstOver, SkBlendMode::kSrcIn,
+ SkBlendMode::kDstIn, SkBlendMode::kSrcOut, SkBlendMode::kDstOut,
+ SkBlendMode::kSrcATop, SkBlendMode::kDstATop, SkBlendMode::kXor,
+ SkBlendMode::kPlus, SkBlendMode::kModulate, SkBlendMode::kScreen,
+ SkBlendMode::kOverlay, SkBlendMode::kDarken, SkBlendMode::kLighten,
+ SkBlendMode::kColorDodge, SkBlendMode::kColorBurn, SkBlendMode::kHardLight,
+ SkBlendMode::kSoftLight, SkBlendMode::kDifference, SkBlendMode::kExclusion,
+ SkBlendMode::kMultiply, SkBlendMode::kHue, SkBlendMode::kSaturation,
+ SkBlendMode::kColor, SkBlendMode::kLuminosity
+ @return C string
+*/
+SK_API const char* SkBlendMode_Name(SkBlendMode blendMode);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlurTypes.h b/gfx/skia/skia/include/core/SkBlurTypes.h
new file mode 100644
index 0000000000..aec37b6e68
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlurTypes.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurTypes_DEFINED
+#define SkBlurTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum SkBlurStyle : int {
+ kNormal_SkBlurStyle, //!< fuzzy inside and outside
+ kSolid_SkBlurStyle, //!< solid inside, fuzzy outside
+ kOuter_SkBlurStyle, //!< nothing inside, fuzzy outside
+ kInner_SkBlurStyle, //!< fuzzy inside, nothing outside
+
+ kLastEnum_SkBlurStyle = kInner_SkBlurStyle,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCanvas.h b/gfx/skia/skia/include/core/SkCanvas.h
new file mode 100644
index 0000000000..6f6df382c8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCanvas.h
@@ -0,0 +1,2789 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvas_DEFINED
+#define SkCanvas_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkClipOp.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkDeque.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkMacros.h"
+
+#include <cstring>
+#include <memory>
+
+class GrContext;
+class GrRenderTargetContext;
+class SkBaseDevice;
+class SkBitmap;
+class SkData;
+class SkDrawable;
+struct SkDrawShadowRec;
+class SkFont;
+class SkGlyphRunBuilder;
+class SkImage;
+class SkImageFilter;
+class SkPaintFilterCanvas;
+class SkPath;
+class SkPicture;
+class SkPixmap;
+class SkRegion;
+class SkRRect;
+struct SkRSXform;
+class SkSurface;
+class SkSurface_Base;
+class SkTextBlob;
+
+/** \class SkCanvas
+ SkCanvas provides an interface for drawing, and how the drawing is clipped and transformed.
+ SkCanvas contains a stack of SkMatrix and clip values.
+
+ SkCanvas and SkPaint together provide the state to draw into SkSurface or SkBaseDevice.
+ Each SkCanvas draw call transforms the geometry of the object by the concatenation of all
+ SkMatrix values in the stack. The transformed geometry is clipped by the intersection
+ of all of clip values in the stack. The SkCanvas draw calls use SkPaint to supply drawing
+ state such as color, SkTypeface, text size, stroke width, SkShader and so on.
+
+ To draw to a pixel-based destination, create raster surface or GPU surface.
+ Request SkCanvas from SkSurface to obtain the interface to draw.
+ SkCanvas generated by raster surface draws to memory visible to the CPU.
+ SkCanvas generated by GPU surface uses Vulkan or OpenGL to draw to the GPU.
+
+ To draw to a document, obtain SkCanvas from SVG canvas, document PDF, or SkPictureRecorder.
+ SkDocument based SkCanvas and other SkCanvas subclasses reference SkBaseDevice describing the
+ destination.
+
+ SkCanvas can be constructed to draw to SkBitmap without first creating raster surface.
+ This approach may be deprecated in the future.
+*/
+class SK_API SkCanvas {
+ enum PrivateSaveLayerFlags {
+ kDontClipToLayer_PrivateSaveLayerFlag = 1U << 31,
+ };
+
+public:
+
+ /** Allocates raster SkCanvas that will draw directly into pixels.
+
+ SkCanvas is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are zero or positive;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is zero or large enough to contain info width pixels of SkColorType.
+
+ Pass zero for rowBytes to compute rowBytes from info width and size of pixel.
+ If rowBytes is greater than zero, it must be equal to or greater than
+ info width times bytes required for SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ @param info width, height, SkColorType, SkAlphaType, SkColorSpace, of raster surface;
+ width, or height, or both, may be zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next, or zero
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkCanvas if all parameters are valid; otherwise, nullptr
+ */
+ static std::unique_ptr<SkCanvas> MakeRasterDirect(const SkImageInfo& info, void* pixels,
+ size_t rowBytes,
+ const SkSurfaceProps* props = nullptr);
+
+ /** Allocates raster SkCanvas specified by inline image specification. Subsequent SkCanvas
+ calls draw into pixels.
+ SkColorType is set to kN32_SkColorType.
+ SkAlphaType is set to kPremul_SkAlphaType.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ SkCanvas is returned if all parameters are valid.
+ Valid parameters include:
+ width and height are zero or positive;
+ pixels is not nullptr;
+ rowBytes is zero or large enough to contain width pixels of kN32_SkColorType.
+
+ Pass zero for rowBytes to compute rowBytes from width and size of pixel.
+ If rowBytes is greater than zero, it must be equal to or greater than
+ width times bytes required for SkColorType.
+
+ Pixel buffer size should be height times rowBytes.
+
+ @param width pixel column count on raster surface created; must be zero or greater
+ @param height pixel row count on raster surface created; must be zero or greater
+ @param pixels pointer to destination pixels buffer; buffer size should be height
+ times rowBytes
+ @param rowBytes interval from one SkSurface row to the next, or zero
+ @return SkCanvas if all parameters are valid; otherwise, nullptr
+ */
+ static std::unique_ptr<SkCanvas> MakeRasterDirectN32(int width, int height, SkPMColor* pixels,
+ size_t rowBytes) {
+ return MakeRasterDirect(SkImageInfo::MakeN32Premul(width, height), pixels, rowBytes);
+ }
+
+ /** Creates an empty SkCanvas with no backing device or pixels, with
+ a width and height of zero.
+
+ @return empty SkCanvas
+ */
+ SkCanvas();
+
+ /** Creates SkCanvas of the specified dimensions without a SkSurface.
+ Used by subclasses with custom implementations for draw member functions.
+
+ If props equals nullptr, SkSurfaceProps are created with
+ SkSurfaceProps::InitType settings, which choose the pixel striping
+ direction and order. Since a platform may dynamically change its direction when
+ the device is rotated, and since a platform may have multiple monitors with
+ different characteristics, it is best not to rely on this legacy behavior.
+
+ @param width zero or greater
+ @param height zero or greater
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkCanvas placeholder with dimensions
+ */
+ SkCanvas(int width, int height, const SkSurfaceProps* props = nullptr);
+
+ /** Private. For internal use only.
+ */
+ explicit SkCanvas(sk_sp<SkBaseDevice> device);
+
+ /** Constructs a canvas that draws into bitmap.
+ Sets SkSurfaceProps::kLegacyFontHost_InitType in constructed SkSurface.
+
+ SkBitmap is copied so that subsequently editing bitmap will not affect
+ constructed SkCanvas.
+
+ May be deprecated in the future.
+
+ @param bitmap width, height, SkColorType, SkAlphaType, and pixel
+ storage of raster surface
+ @return SkCanvas that can be used to draw into bitmap
+ */
+ explicit SkCanvas(const SkBitmap& bitmap);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Private.
+ */
+ enum class ColorBehavior {
+ kLegacy, //!< placeholder
+ };
+
+ /** Private. For use by Android framework only.
+
+ @param bitmap specifies a bitmap for the canvas to draw into
+ @param behavior specializes this constructor; value is unused
+ @return SkCanvas that can be used to draw into bitmap
+ */
+ SkCanvas(const SkBitmap& bitmap, ColorBehavior behavior);
+#endif
+
+ /** Constructs a canvas that draws into bitmap.
+ Use props to match the device characteristics, like LCD striping.
+
+ bitmap is copied so that subsequently editing bitmap will not affect
+ constructed SkCanvas.
+
+ @param bitmap width, height, SkColorType, SkAlphaType,
+ and pixel storage of raster surface
+ @param props order and orientation of RGB striping; and whether to use
+ device independent fonts
+ @return SkCanvas that can be used to draw into bitmap
+ */
+ SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props);
+
+ /** Draws saved layers, if any.
+ Frees up resources used by SkCanvas.
+ */
+ virtual ~SkCanvas();
+
+ /** Returns SkImageInfo for SkCanvas. If SkCanvas is not associated with raster surface or
+ GPU surface, returned SkColorType is set to kUnknown_SkColorType.
+
+ @return dimensions and SkColorType of SkCanvas
+ */
+ SkImageInfo imageInfo() const;
+
+ /** Copies SkSurfaceProps, if SkCanvas is associated with raster surface or
+ GPU surface, and returns true. Otherwise, returns false and leave props unchanged.
+
+ @param props storage for writable SkSurfaceProps
+ @return true if SkSurfaceProps was copied
+ */
+ bool getProps(SkSurfaceProps* props) const;
+
+ /** Triggers the immediate execution of all pending draw operations.
+ If SkCanvas is associated with GPU surface, resolves all pending GPU operations.
+ If SkCanvas is associated with raster surface, has no effect; raster draw
+ operations are never deferred.
+ */
+ void flush();
+
+ /** Gets the size of the base or root layer in global canvas coordinates. The
+ origin of the base layer is always (0,0). The area available for drawing may be
+ smaller (due to clipping or saveLayer).
+
+ @return integral width and height of base layer
+ */
+ virtual SkISize getBaseLayerSize() const;
+
+ /** Creates SkSurface matching info and props, and associates it with SkCanvas.
+ Returns nullptr if no match found.
+
+ If props is nullptr, matches SkSurfaceProps in SkCanvas. If props is nullptr and SkCanvas
+ does not have SkSurfaceProps, creates SkSurface with default SkSurfaceProps.
+
+ @param info width, height, SkColorType, SkAlphaType, and SkColorSpace
+ @param props SkSurfaceProps to match; may be nullptr to match SkCanvas
+ @return SkSurface matching info and props, or nullptr if no match is available
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo& info, const SkSurfaceProps* props = nullptr);
+
+ /** Returns GPU context of the GPU surface associated with SkCanvas.
+
+ @return GPU context, if available; nullptr otherwise
+ */
+ virtual GrContext* getGrContext();
+
+ /** Returns the pixel base address, SkImageInfo, rowBytes, and origin if the pixels
+ can be read directly. The returned address is only valid
+ while SkCanvas is in scope and unchanged. Any SkCanvas call or SkSurface call
+ may invalidate the returned address and other returned values.
+
+ If pixels are inaccessible, info, rowBytes, and origin are unchanged.
+
+ @param info storage for writable pixels' SkImageInfo; may be nullptr
+ @param rowBytes storage for writable pixels' row bytes; may be nullptr
+ @param origin storage for SkCanvas top layer origin, its top-left corner;
+ may be nullptr
+ @return address of pixels, or nullptr if inaccessible
+ */
+ void* accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin = nullptr);
+
+ /** Returns custom context that tracks the SkMatrix and clip.
+
+ Use SkRasterHandleAllocator to blend Skia drawing with custom drawing, typically performed
+ by the host platform user interface. The custom context returned is generated by
+ SkRasterHandleAllocator::MakeCanvas, which creates a custom canvas with raster storage for
+ the drawing destination.
+
+ @return context of custom allocation
+ */
+ SkRasterHandleAllocator::Handle accessTopRasterHandle() const;
+
+ /** Returns true if SkCanvas has direct access to its pixels.
+
+ Pixels are readable when SkBaseDevice is raster. Pixels are not readable when SkCanvas
+ is returned from GPU surface, returned by SkDocument::beginPage, returned by
+ SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility class
+ like DebugCanvas.
+
+ pixmap is valid only while SkCanvas is in scope and unchanged. Any
+ SkCanvas or SkSurface call may invalidate the pixmap values.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkCanvas has direct access to pixels
+ */
+ bool peekPixels(SkPixmap* pixmap);
+
+ /** Copies SkRect of pixels from SkCanvas into dstPixels. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dstInfo.colorType() and dstInfo.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dstPixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - dstRowBytes is too small to contain one row of pixels.
+
+ @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels
+ @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger
+ @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into pixmap. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (pixmap.width(), pixmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to pixmap.colorType() and pixmap.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Caller must allocate pixel storage in pixmap if needed.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination SkRect
+ are copied. pixmap pixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down pixmap.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to pixmap.colorType() or pixmap.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - SkPixmap pixels could not be allocated.
+ - pixmap.rowBytes() is too small to contain one row of pixels.
+
+ @param pixmap storage for pixels copied from SkCanvas
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkPixmap& pixmap, int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into bitmap. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to bitmap.colorType() and bitmap.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Caller must allocate pixel storage in bitmap if needed.
+
+ SkBitmap values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkBitmap pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down bitmap.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to bitmap.colorType() or bitmap.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - bitmap pixels could not be allocated.
+ - bitmap.rowBytes() is too small to contain one row of pixels.
+
+ @param bitmap storage for pixels copied from SkCanvas
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkBitmap& bitmap, int srcX, int srcY);
+
+ /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored.
+ Source SkRect corners are (0, 0) and (info.width(), info.height()).
+ Destination SkRect corners are (x, y) and
+ (imageInfo().width(), imageInfo().height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to imageInfo().colorType() and imageInfo().alphaType() if required.
+
+ Pixels are writable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not writable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkCanvas pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for x or y to offset pixels to the left or
+ above SkCanvas pixels.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - pixels could not be converted to SkCanvas imageInfo().colorType() or
+ imageInfo().alphaType().
+ - SkCanvas pixels are not writable; for instance, SkCanvas is document-based.
+ - rowBytes is too small to contain one row of pixels.
+
+ @param info width, height, SkColorType, and SkAlphaType of pixels
+ @param pixels pixels to copy, of size info.height() times rowBytes, or larger
+ @param rowBytes size of one row of pixels; info.width() times pixel size, or larger
+ @param x offset into SkCanvas writable pixels on x-axis; may be negative
+ @param y offset into SkCanvas writable pixels on y-axis; may be negative
+ @return true if pixels were written to SkCanvas
+ */
+ bool writePixels(const SkImageInfo& info, const void* pixels, size_t rowBytes, int x, int y);
+
+ /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored.
+ Source SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+
+ Destination SkRect corners are (x, y) and
+ (imageInfo().width(), imageInfo().height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to imageInfo().colorType() and imageInfo().alphaType() if required.
+
+ Pixels are writable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not writable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkCanvas pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for x or y to offset pixels to the left or
+ above SkCanvas pixels.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - bitmap does not have allocated pixels.
+ - bitmap pixels could not be converted to SkCanvas imageInfo().colorType() or
+ imageInfo().alphaType().
+ - SkCanvas pixels are not writable; for instance, SkCanvas is document based.
+ - bitmap pixels are inaccessible; for instance, bitmap wraps a texture.
+
+ @param bitmap contains pixels copied to SkCanvas
+ @param x offset into SkCanvas writable pixels on x-axis; may be negative
+ @param y offset into SkCanvas writable pixels on y-axis; may be negative
+ @return true if pixels were written to SkCanvas
+ */
+ bool writePixels(const SkBitmap& bitmap, int x, int y);
+
+ /** Saves SkMatrix and clip.
+ Calling restore() discards changes to SkMatrix and clip,
+ restoring the SkMatrix and clip to their state when save() was called.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), setMatrix(),
+ and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), clipPath(), clipRegion().
+
+ Saved SkCanvas state is put on a stack; multiple calls to save() should be balance
+ by an equal number of calls to restore().
+
+ Call restoreToCount() with result to restore this and subsequent saves.
+
+ @return depth of saved stack
+ */
+ int save();
+
+ /** Saves SkMatrix and clip, and allocates a SkBitmap for subsequent drawing.
+ Calling restore() discards changes to SkMatrix and clip, and draws the SkBitmap.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define the SkBitmap size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and
+ SkBlendMode when restore() is called.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of the layer; may be nullptr
+ @param paint graphics state for layer; may be nullptr
+ @return depth of saved stack
+ */
+ int saveLayer(const SkRect* bounds, const SkPaint* paint);
+
+ /** Saves SkMatrix and clip, and allocates a SkBitmap for subsequent drawing.
+ Calling restore() discards changes to SkMatrix and clip, and draws the SkBitmap.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define the layer size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and
+ SkBlendMode when restore() is called.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of layer; may be nullptr
+ @param paint graphics state for layer; may be nullptr
+ @return depth of saved stack
+ */
+ int saveLayer(const SkRect& bounds, const SkPaint* paint) {
+ return this->saveLayer(&bounds, paint);
+ }
+
+ /** Saves SkMatrix and clip, and allocates SkBitmap for subsequent drawing.
+
+ Calling restore() discards changes to SkMatrix and clip,
+ and blends layer with alpha opacity onto prior layer.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define layer size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ alpha of zero is fully transparent, 255 is fully opaque.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of layer; may be nullptr
+ @param alpha opacity of layer
+ @return depth of saved stack
+ */
+ int saveLayerAlpha(const SkRect* bounds, U8CPU alpha);
+
+ /** \enum SkCanvas::SaveLayerFlagsSet
+ SaveLayerFlags provides options that may be used in any combination in SaveLayerRec,
+ defining how layer allocated by saveLayer() operates. It may be set to zero,
+ kPreserveLCDText_SaveLayerFlag, kInitWithPrevious_SaveLayerFlag, or both flags.
+ */
+ enum SaveLayerFlagsSet {
+ kPreserveLCDText_SaveLayerFlag = 1 << 1,
+ kInitWithPrevious_SaveLayerFlag = 1 << 2, //!< initializes with previous contents
+ kMaskAgainstCoverage_EXPERIMENTAL_DONT_USE_SaveLayerFlag =
+ 1 << 3, //!< experimental: do not use
+ // instead of matching previous layer's colortype, use F16
+ kF16ColorType = 1 << 4,
+#ifdef SK_SUPPORT_LEGACY_CLIPTOLAYERFLAG
+ kDontClipToLayer_Legacy_SaveLayerFlag =
+ kDontClipToLayer_PrivateSaveLayerFlag, //!< deprecated
+#endif
+ };
+
+ typedef uint32_t SaveLayerFlags;
+
+ /** \struct SkCanvas::SaveLayerRec
+ SaveLayerRec contains the state used to create the layer.
+ */
+ struct SaveLayerRec {
+
+ /** Sets fBounds, fPaint, and fBackdrop to nullptr. Clears fSaveLayerFlags.
+
+ @return empty SaveLayerRec
+ */
+ SaveLayerRec() {}
+
+ /** Sets fBounds, fPaint, and fSaveLayerFlags; sets fBackdrop to nullptr.
+
+ @param bounds layer dimensions; may be nullptr
+ @param paint applied to layer when overlaying prior layer; may be nullptr
+ @param saveLayerFlags SaveLayerRec options to modify layer
+ @return SaveLayerRec with empty fBackdrop
+ */
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, SaveLayerFlags saveLayerFlags = 0)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fSaveLayerFlags(saveLayerFlags)
+ {}
+
+ /** Sets fBounds, fPaint, fBackdrop, and fSaveLayerFlags.
+
+ @param bounds layer dimensions; may be nullptr
+ @param paint applied to layer when overlaying prior layer;
+ may be nullptr
+ @param backdrop If not null, this causes the current layer to be filtered by
+ backdrop, and then drawn into the new layer
+ (respecting the current clip).
+ If null, the new layer is initialized with transparent-black.
+ @param saveLayerFlags SaveLayerRec options to modify layer
+ @return SaveLayerRec fully specified
+ */
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop,
+ SaveLayerFlags saveLayerFlags)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fBackdrop(backdrop)
+ , fSaveLayerFlags(saveLayerFlags)
+ {}
+
+ /** Experimental. Not ready for general use.
+ Sets fBounds, fPaint, fBackdrop, fClipMask, fClipMatrix, and fSaveLayerFlags.
+ clipMatrix uses alpha channel of image, transformed by clipMatrix, to clip
+ layer when drawn to SkCanvas.
+
+ @param bounds layer dimensions; may be nullptr
+ @param paint graphics state applied to layer when overlaying prior
+ layer; may be nullptr
+ @param backdrop If not null, this causes the current layer to be filtered by
+ backdrop, and then drawn into the new layer
+ (respecting the current clip).
+ If null, the new layer is initialized with transparent-black.
+ @param clipMask clip applied to layer; may be nullptr
+ @param clipMatrix matrix applied to clipMask; may be nullptr to use
+ identity matrix
+ @param saveLayerFlags SaveLayerRec options to modify layer
+ @return SaveLayerRec fully specified
+ */
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop,
+ const SkImage* clipMask, const SkMatrix* clipMatrix,
+ SaveLayerFlags saveLayerFlags)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fBackdrop(backdrop)
+ , fClipMask(clipMask)
+ , fClipMatrix(clipMatrix)
+ , fSaveLayerFlags(saveLayerFlags)
+ {}
+
+ /** hints at layer size limit */
+ const SkRect* fBounds = nullptr;
+
+ /** modifies overlay */
+ const SkPaint* fPaint = nullptr;
+
+ /**
+ * If not null, this triggers the same initialization behavior as setting
+ * kInitWithPrevious_SaveLayerFlag on fSaveLayerFlags: the current layer is copied into
+ * the new layer, rather than initializing the new layer with transparent-black.
+ * This is then filtered by fBackdrop (respecting the current clip).
+ */
+ const SkImageFilter* fBackdrop = nullptr;
+
+ /** clips layer with mask alpha */
+ const SkImage* fClipMask = nullptr;
+
+ /** transforms mask alpha used to clip */
+ const SkMatrix* fClipMatrix = nullptr;
+
+ /** preserves LCD text, creates with prior layer contents */
+ SaveLayerFlags fSaveLayerFlags = 0;
+ };
+
+ /** Saves SkMatrix and clip, and allocates SkBitmap for subsequent drawing.
+
+ Calling restore() discards changes to SkMatrix and clip,
+ and blends SkBitmap with alpha opacity onto the prior layer.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SaveLayerRec contains the state used to create the layer.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param layerRec layer state
+ @return depth of save state stack before this call was made.
+ */
+ int saveLayer(const SaveLayerRec& layerRec);
+
+ /** Removes changes to SkMatrix and clip since SkCanvas state was
+ last saved. The state is removed from the stack.
+
+ Does nothing if the stack is empty.
+ */
+ void restore();
+
+ /** Returns the number of saved states, each containing: SkMatrix and clip.
+ Equals the number of save() calls less the number of restore() calls plus one.
+ The save count of a new canvas is one.
+
+ @return depth of save state stack
+ */
+ int getSaveCount() const;
+
+ /** Restores state to SkMatrix and clip values when save(), saveLayer(),
+ saveLayerPreserveLCDTextRequests(), or saveLayerAlpha() returned saveCount.
+
+ Does nothing if saveCount is greater than state stack count.
+ Restores state to initial values if saveCount is less than or equal to one.
+
+ @param saveCount depth of state stack to restore
+ */
+ void restoreToCount(int saveCount);
+
+ /** Translates SkMatrix by dx along the x-axis and dy along the y-axis.
+
+ Mathematically, replaces SkMatrix with a translation matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of moving the drawing by (dx, dy) before transforming
+ the result with SkMatrix.
+
+ @param dx distance to translate on x-axis
+ @param dy distance to translate on y-axis
+ */
+ void translate(SkScalar dx, SkScalar dy);
+
+ /** Scales SkMatrix by sx on the x-axis and sy on the y-axis.
+
+ Mathematically, replaces SkMatrix with a scale matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of scaling the drawing by (sx, sy) before transforming
+ the result with SkMatrix.
+
+ @param sx amount to scale on x-axis
+ @param sy amount to scale on y-axis
+ */
+ void scale(SkScalar sx, SkScalar sy);
+
+ /** Rotates SkMatrix by degrees. Positive degrees rotates clockwise.
+
+ Mathematically, replaces SkMatrix with a rotation matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of rotating the drawing by degrees before transforming
+ the result with SkMatrix.
+
+ @param degrees amount to rotate, in degrees
+ */
+ void rotate(SkScalar degrees);
+
+ /** Rotates SkMatrix by degrees about a point at (px, py). Positive degrees rotates
+ clockwise.
+
+ Mathematically, constructs a rotation matrix; premultiplies the rotation matrix by
+ a translation matrix; then replaces SkMatrix with the resulting matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of rotating the drawing about a given point before
+ transforming the result with SkMatrix.
+
+ @param degrees amount to rotate, in degrees
+ @param px x-axis value of the point to rotate about
+ @param py y-axis value of the point to rotate about
+ */
+ void rotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Skews SkMatrix by sx on the x-axis and sy on the y-axis. A positive value of sx
+ skews the drawing right as y-axis values increase; a positive value of sy skews
+ the drawing down as x-axis values increase.
+
+ Mathematically, replaces SkMatrix with a skew matrix premultiplied with SkMatrix.
+
+ This has the effect of skewing the drawing by (sx, sy) before transforming
+ the result with SkMatrix.
+
+ @param sx amount to skew on x-axis
+ @param sy amount to skew on y-axis
+ */
+ void skew(SkScalar sx, SkScalar sy);
+
+ /** Replaces SkMatrix with matrix premultiplied with existing SkMatrix.
+
+ This has the effect of transforming the drawn geometry by matrix, before
+ transforming the result with existing SkMatrix.
+
+ @param matrix matrix to premultiply with existing SkMatrix
+ */
+ void concat(const SkMatrix& matrix);
+
+ /** Replaces SkMatrix with matrix.
+ Unlike concat(), any prior matrix state is overwritten.
+
+ @param matrix matrix to copy, replacing existing SkMatrix
+ */
+ void setMatrix(const SkMatrix& matrix);
+
+ /** Sets SkMatrix to the identity matrix.
+ Any prior matrix state is overwritten.
+ */
+ void resetMatrix();
+
+ /** Replaces clip with the intersection or difference of clip and rect,
+ with an aliased or anti-aliased clip edge. rect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRect(const SkRect& rect, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and rect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rect is transformed by SkMatrix before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipRect(const SkRect& rect, SkClipOp op) {
+ this->clipRect(rect, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and rect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRect(const SkRect& rect, bool doAntiAlias = false) {
+ this->clipRect(rect, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ /** Sets the maximum clip rectangle, which can be set by clipRect(), clipRRect() and
+ clipPath() and intersect the current clip with the specified rect.
+ The maximum clip affects only future clipping operations; it is not retroactive.
+ The clip restriction is not recorded in pictures.
+
+ Pass an empty rect to disable maximum clip.
+ This private API is for use by Android framework only.
+
+ @param rect maximum allowed clip in device coordinates
+ */
+ void androidFramework_setDeviceClipRestriction(const SkIRect& rect);
+
+ /** Replaces clip with the intersection or difference of clip and rrect,
+ with an aliased or anti-aliased clip edge.
+ rrect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRRect(const SkRRect& rrect, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and rrect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rrect is transformed by SkMatrix before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipRRect(const SkRRect& rrect, SkClipOp op) {
+ this->clipRRect(rrect, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and rrect,
+ with an aliased or anti-aliased clip edge.
+ rrect is transformed by SkMatrix before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRRect(const SkRRect& rrect, bool doAntiAlias = false) {
+ this->clipRRect(rrect, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ /** Replaces clip with the intersection or difference of clip and path,
+ with an aliased or anti-aliased clip edge. SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipPath(const SkPath& path, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and path.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipPath(const SkPath& path, SkClipOp op) {
+ this->clipPath(path, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and path.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipPath(const SkPath& path, bool doAntiAlias = false) {
+ this->clipPath(path, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ /** Experimental. For testing only.
+ Set to simplify clip stack using PathOps.
+ */
+ void setAllowSimplifyClip(bool allow) {
+ fAllowSimplifyClip = allow;
+ }
+
+ /** Replaces clip with the intersection or difference of clip and SkRegion deviceRgn.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ deviceRgn is unaffected by SkMatrix.
+
+ @param deviceRgn SkRegion to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipRegion(const SkRegion& deviceRgn, SkClipOp op = SkClipOp::kIntersect);
+
+ /** Returns true if SkRect rect, transformed by SkMatrix, can be quickly determined to be
+ outside of clip. May return false even though rect is outside of clip.
+
+ Use to check if an area to be drawn is clipped out, to skip subsequent draw calls.
+
+ @param rect SkRect to compare with clip
+ @return true if rect, transformed by SkMatrix, does not intersect clip
+ */
+ bool quickReject(const SkRect& rect) const;
+
+ /** Returns true if path, transformed by SkMatrix, can be quickly determined to be
+ outside of clip. May return false even though path is outside of clip.
+
+ Use to check if an area to be drawn is clipped out, to skip subsequent draw calls.
+
+ @param path SkPath to compare with clip
+ @return true if path, transformed by SkMatrix, does not intersect clip
+ */
+ bool quickReject(const SkPath& path) const;
+
+ /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty,
+ return SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ SkRect returned is outset by one to account for partial pixel coverage if clip
+ is anti-aliased.
+
+ @return bounds of clip in local coordinates
+ */
+ SkRect getLocalClipBounds() const;
+
+ /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty,
+ return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ bounds is outset by one to account for partial pixel coverage if clip
+ is anti-aliased.
+
+ @param bounds SkRect of clip in local coordinates
+ @return true if clip bounds is not empty
+ */
+ bool getLocalClipBounds(SkRect* bounds) const {
+ *bounds = this->getLocalClipBounds();
+ return !bounds->isEmpty();
+ }
+
+ /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty,
+ return SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ Unlike getLocalClipBounds(), returned SkIRect is not outset.
+
+ @return bounds of clip in SkBaseDevice coordinates
+ */
+ SkIRect getDeviceClipBounds() const;
+
+ /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty,
+ return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ Unlike getLocalClipBounds(), bounds is not outset.
+
+ @param bounds SkRect of clip in device coordinates
+ @return true if clip bounds is not empty
+ */
+ bool getDeviceClipBounds(SkIRect* bounds) const {
+ *bounds = this->getDeviceClipBounds();
+ return !bounds->isEmpty();
+ }
+
+ /** Fills clip with color color.
+ mode determines how ARGB is combined with destination.
+
+ @param color unpremultiplied ARGB
+ @param mode SkBlendMode used to combine source color and destination
+ */
+ void drawColor(SkColor color, SkBlendMode mode = SkBlendMode::kSrcOver);
+
+ /** Fills clip with color color using SkBlendMode::kSrc.
+ This has the effect of replacing all pixels contained by clip with color.
+
+ @param color unpremultiplied ARGB
+ */
+ void clear(SkColor color) {
+ this->drawColor(color, SkBlendMode::kSrc);
+ }
+
+ /** Makes SkCanvas contents undefined. Subsequent calls that read SkCanvas pixels,
+ such as drawing with SkBlendMode, return undefined results. discard() does
+ not change clip or SkMatrix.
+
+ discard() may do nothing, depending on the implementation of SkSurface or SkBaseDevice
+ that created SkCanvas.
+
+ discard() allows optimized performance on subsequent draws by removing
+ cached data associated with SkSurface or SkBaseDevice.
+ It is not necessary to call discard() once done with SkCanvas;
+ any cached data is deleted when owning SkSurface or SkBaseDevice is deleted.
+ */
+ void discard() { this->onDiscard(); }
+
+ /** Fills clip with SkPaint paint. SkPaint components SkMaskFilter, SkShader,
+ SkColorFilter, SkImageFilter, and SkBlendMode affect drawing;
+ SkPathEffect in paint is ignored.
+
+ @param paint graphics state used to fill SkCanvas
+ */
+ void drawPaint(const SkPaint& paint);
+
+ /** \enum SkCanvas::PointMode
+ Selects if an array of points are drawn as discrete points, as lines, or as
+ an open polygon.
+ */
+ enum PointMode {
+ kPoints_PointMode, //!< draw each point separately
+ kLines_PointMode, //!< draw each pair of points as a line segment
+ kPolygon_PointMode, //!< draw the array of points as a open polygon
+ };
+
+ /** Draws pts using clip, SkMatrix and SkPaint paint.
+ count is the number of points; if count is less than one, has no effect.
+ mode may be one of: kPoints_PointMode, kLines_PointMode, or kPolygon_PointMode.
+
+ If mode is kPoints_PointMode, the shape of point drawn depends on paint
+ SkPaint::Cap. If paint is set to SkPaint::kRound_Cap, each point draws a
+ circle of diameter SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap
+ or SkPaint::kButt_Cap, each point draws a square of width and height
+ SkPaint stroke width.
+
+ If mode is kLines_PointMode, each pair of points draws a line segment.
+ One line is drawn for every two points; each point is used once. If count is odd,
+ the final point is ignored.
+
+ If mode is kPolygon_PointMode, each adjacent pair of points draws a line segment.
+ count minus one lines are drawn; the first and last point are used once.
+
+ Each line segment respects paint SkPaint::Cap and SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ Always draws each element one at a time; is not affected by
+ SkPaint::Join, and unlike drawPath(), does not create a mask from all points
+ and lines before drawing.
+
+ @param mode whether pts draws points or lines
+ @param count number of points in the array
+ @param pts array of points to draw
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint);
+
+ /** Draws point at (x, y) using clip, SkMatrix and SkPaint paint.
+
+ The shape of point drawn depends on paint SkPaint::Cap.
+ If paint is set to SkPaint::kRound_Cap, draw a circle of diameter
+ SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap,
+ draw a square of width and height SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param x left edge of circle or square
+ @param y top edge of circle or square
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawPoint(SkScalar x, SkScalar y, const SkPaint& paint);
+
+ /** Draws point p using clip, SkMatrix and SkPaint paint.
+
+ The shape of point drawn depends on paint SkPaint::Cap.
+ If paint is set to SkPaint::kRound_Cap, draw a circle of diameter
+ SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap,
+ draw a square of width and height SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param p top-left edge of circle or square
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawPoint(SkPoint p, const SkPaint& paint) {
+ this->drawPoint(p.x(), p.y(), paint);
+ }
+
+ /** Draws line segment from (x0, y0) to (x1, y1) using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint stroke width describes the line thickness;
+ SkPaint::Cap draws the end rounded or square;
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param x0 start of line segment on x-axis
+ @param y0 start of line segment on y-axis
+ @param x1 end of line segment on x-axis
+ @param y1 end of line segment on y-axis
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1, const SkPaint& paint);
+
+ /** Draws line segment from p0 to p1 using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint stroke width describes the line thickness;
+ SkPaint::Cap draws the end rounded or square;
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param p0 start of line segment
+ @param p1 end of line segment
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawLine(SkPoint p0, SkPoint p1, const SkPaint& paint) {
+ this->drawLine(p0.x(), p0.y(), p1.x(), p1.y(), paint);
+ }
+
+ /** Draws SkRect rect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param rect rectangle to draw
+ @param paint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawRect(const SkRect& rect, const SkPaint& paint);
+
+ /** Draws SkIRect rect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param rect rectangle to draw
+ @param paint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawIRect(const SkIRect& rect, const SkPaint& paint) {
+ SkRect r;
+ r.set(rect); // promotes the ints to scalars
+ this->drawRect(r, paint);
+ }
+
+ /** Draws SkRegion region using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param region region to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawRegion(const SkRegion& region, const SkPaint& paint);
+
+ /** Draws oval oval using clip, SkMatrix, and SkPaint.
+ In paint: SkPaint::Style determines if oval is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param oval SkRect bounds of oval
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawOval(const SkRect& oval, const SkPaint& paint);
+
+ /** Draws SkRRect rrect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rrect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ rrect may represent a rectangle, circle, oval, uniformly rounded rectangle, or
+ may have any combination of positive non-square radii for the four corners.
+
+ @param rrect SkRRect with up to eight corner radii to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawRRect(const SkRRect& rrect, const SkPaint& paint);
+
+ /** Draws SkRRect outer and inner
+ using clip, SkMatrix, and SkPaint paint.
+ outer must contain inner or the drawing is undefined.
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+ If stroked and SkRRect corner has zero length radii, SkPaint::Join can
+ draw corners rounded or square.
+
+ GPU-backed platforms optimize drawing when both outer and inner are
+ concave and outer contains inner. These platforms may not be able to draw
+ SkPath built with identical data as fast.
+
+ @param outer SkRRect outer bounds to draw
+ @param inner SkRRect inner bounds to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint);
+
+ /** Draws circle at (cx, cy) with radius using clip, SkMatrix, and SkPaint paint.
+ If radius is zero or less, nothing is drawn.
+ In paint: SkPaint::Style determines if circle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param cx circle center on the x-axis
+ @param cy circle center on the y-axis
+ @param radius half the diameter of circle
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawCircle(SkScalar cx, SkScalar cy, SkScalar radius, const SkPaint& paint);
+
+ /** Draws circle at center with radius using clip, SkMatrix, and SkPaint paint.
+ If radius is zero or less, nothing is drawn.
+ In paint: SkPaint::Style determines if circle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param center circle center
+ @param radius half the diameter of circle
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawCircle(SkPoint center, SkScalar radius, const SkPaint& paint) {
+ this->drawCircle(center.x(), center.y(), radius, paint);
+ }
+
+ /** Draws arc using clip, SkMatrix, and SkPaint paint.
+
+ Arc is part of oval bounded by oval, sweeping from startAngle to startAngle plus
+ sweepAngle. startAngle and sweepAngle are in degrees.
+
+ startAngle of zero places start point at the right middle edge of oval.
+ A positive sweepAngle places arc end point clockwise from start point;
+ a negative sweepAngle places arc end point counterclockwise from start point.
+ sweepAngle may exceed 360 degrees, a full circle.
+ If useCenter is true, draw a wedge that includes lines from oval
+ center to arc end points. If useCenter is false, draw arc between end points.
+
+ If SkRect oval is empty or sweepAngle is zero, nothing is drawn.
+
+ @param oval SkRect bounds of oval containing arc to draw
+ @param startAngle angle in degrees where arc begins
+ @param sweepAngle sweep angle in degrees; positive is clockwise
+ @param useCenter if true, include the center of the oval
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint);
+
+ /** Draws SkRRect bounded by SkRect rect, with corner radii (rx, ry) using clip,
+ SkMatrix, and SkPaint paint.
+
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+ If rx or ry are less than zero, they are treated as if they are zero.
+ If rx plus ry exceeds rect width or rect height, radii are scaled down to fit.
+ If rx and ry are zero, SkRRect is drawn as SkRect and if stroked is affected by
+ SkPaint::Join.
+
+ @param rect SkRect bounds of SkRRect to draw
+ @param rx axis length on x-axis of oval describing rounded corners
+ @param ry axis length on y-axis of oval describing rounded corners
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry, const SkPaint& paint);
+
+ /** Draws SkPath path using clip, SkMatrix, and SkPaint paint.
+ SkPath contains an array of path contour, each of which may be open or closed.
+
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled:
+ if filled, SkPath::FillType determines whether path contour describes inside or
+ outside of fill; if stroked, SkPaint stroke width describes the line thickness,
+ SkPaint::Cap describes line ends, and SkPaint::Join describes how
+ corners are drawn.
+
+ @param path SkPath to draw
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint);
+
+ /** Draws SkImage image, with its top-left corner at (left, top),
+ using clip, SkMatrix, and optional SkPaint paint.
+
+ This is equivalent to drawImageRect() using a dst rect at (x,y) with the
+ same width and height of the image.
+
+ @param image uncompressed rectangular map of pixels
+ @param left left side of image
+ @param top top side of image
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint = nullptr);
+
+ /** Draws SkImage image, with its top-left corner at (left, top),
+ using clip, SkMatrix, and optional SkPaint paint.
+
+ This is equivalent to drawImageRect() using a dst rect at (x,y) with the
+ same width and height of the image.
+
+ @param image uncompressed rectangular map of pixels
+ @param left left side of image
+ @param top pop side of image
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImage(const sk_sp<SkImage>& image, SkScalar left, SkScalar top,
+ const SkPaint* paint = nullptr) {
+ this->drawImage(image.get(), left, top, paint);
+ }
+
+ /** \enum SkCanvas::SrcRectConstraint
+ SrcRectConstraint controls the behavior at the edge of source SkRect,
+ provided to drawImageRect(), trading off speed for precision.
+
+ SkFilterQuality in SkPaint may sample multiple pixels in the image. Source SkRect
+ restricts the bounds of pixels that may be read. SkFilterQuality may slow down if
+ it cannot read outside the bounds, when sampling near the edge of source SkRect.
+ SrcRectConstraint specifies whether an SkImageFilter is allowed to read pixels
+ outside source SkRect.
+ */
+ enum SrcRectConstraint {
+ kStrict_SrcRectConstraint, //!< sample only inside bounds; slower
+ kFast_SrcRectConstraint, //!< sample outside bounds; faster
+ };
+
+ /** Draws SkRect src of SkImage image, scaled and translated to fill SkRect dst.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst). The src rect is only used to access the provided image.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within src; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param src source SkRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within src or draw faster
+ */
+ void drawImageRect(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws SkIRect isrc of SkImage image, scaled and translated to fill SkRect dst.
+ Note that isrc is on integer pixel boundaries; dst may include fractional
+ boundaries. Additionally transform draw using clip, SkMatrix, and optional SkPaint
+ paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst). The src rect is only used to access the provided image.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within isrc; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param isrc source SkIRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within isrc or draw faster
+ */
+ void drawImageRect(const SkImage* image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws SkImage image, scaled and translated to fill SkRect dst, using clip, SkMatrix,
+ and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst).
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageRect(const SkImage* image, const SkRect& dst, const SkPaint* paint);
+
+ /** Draws SkRect src of SkImage image, scaled and translated to fill SkRect dst.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst). The src rect is only used to access the provided image.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param src source SkRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within src or draw faster
+ */
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint) {
+ this->drawImageRect(image.get(), src, dst, paint, constraint);
+ }
+
+ /** Draws SkIRect isrc of SkImage image, scaled and translated to fill SkRect dst.
+ isrc is on integer pixel boundaries; dst may include fractional boundaries.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst). The src rect is only used to access the provided image.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within image; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param isrc source SkIRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within image or draw faster
+ */
+ void drawImageRect(const sk_sp<SkImage>& image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint) {
+ this->drawImageRect(image.get(), isrc, dst, paint, constraint);
+ }
+
+ /** Draws SkImage image, scaled and translated to fill SkRect dst,
+ using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ When using a shader or shader mask filter, its coordinate system is based on the
+ current CTM, so will reflect the dst rect geometry and is equivalent to
+ drawRect(dst).
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within image; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& dst, const SkPaint* paint) {
+ this->drawImageRect(image.get(), dst, paint);
+ }
+
+ /** Draws SkImage image stretched proportionally to fit into SkRect dst.
+ SkIRect center divides the image into nine sections: four sides, four corners, and
+ the center. Corners are unmodified or scaled down proportionately if their sides
+ are larger than dst; center and four sides are scaled to fit remaining space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds. If paint
+ SkFilterQuality set to kNone_SkFilterQuality, disable pixel filtering. For all
+ other values of paint SkFilterQuality, use kLow_SkFilterQuality to filter pixels.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param center SkIRect edge of image corners and sides
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+
+ /** Draws SkImage image stretched proportionally to fit into SkRect dst.
+ SkIRect center divides the image into nine sections: four sides, four corners, and
+ the center. Corners are not scaled, or scaled down proportionately if their sides
+ are larger than dst; center and four sides are scaled to fit remaining space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds. If paint
+ SkFilterQuality set to kNone_SkFilterQuality, disable pixel filtering. For all
+ other values of paint SkFilterQuality, use kLow_SkFilterQuality to filter pixels.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param center SkIRect edge of image corners and sides
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageNine(const sk_sp<SkImage>& image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = nullptr) {
+ this->drawImageNine(image.get(), center, dst, paint);
+ }
+
+ /** Draws SkBitmap bitmap, with its top-left corner at (left, top),
+ using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is not nullptr, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param left left side of bitmap
+ @param top top side of bitmap
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawBitmap(const SkBitmap& bitmap, SkScalar left, SkScalar top,
+ const SkPaint* paint = nullptr);
+
+ /** Draws SkRect src of SkBitmap bitmap, scaled and translated to fill SkRect dst.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within src; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param src source SkRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within src or draw faster
+ */
+ void drawBitmapRect(const SkBitmap& bitmap, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws SkIRect isrc of SkBitmap bitmap, scaled and translated to fill SkRect dst.
+ isrc is on integer pixel boundaries; dst may include fractional boundaries.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within isrc; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param isrc source SkIRect of image to draw from
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint sample strictly within isrc, or draw faster
+ */
+ void drawBitmapRect(const SkBitmap& bitmap, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws SkBitmap bitmap, scaled and translated to fill SkRect dst.
+ bitmap bounds is on integer pixel boundaries; dst may include fractional boundaries.
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ constraint set to kStrict_SrcRectConstraint limits SkPaint SkFilterQuality to
+ sample within bitmap; set to kFast_SrcRectConstraint allows sampling outside to
+ improve performance.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ @param constraint filter strictly within bitmap or draw faster
+ */
+ void drawBitmapRect(const SkBitmap& bitmap, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws SkBitmap bitmap stretched proportionally to fit into SkRect dst.
+ SkIRect center divides the bitmap into nine sections: four sides, four corners,
+ and the center. Corners are not scaled, or scaled down proportionately if their
+ sides are larger than dst; center and four sides are scaled to fit remaining
+ space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds. If paint
+ SkFilterQuality set to kNone_SkFilterQuality, disable pixel filtering. For all
+ other values of paint SkFilterQuality, use kLow_SkFilterQuality to filter pixels.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param center SkIRect edge of image corners and sides
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+
+ /** \struct SkCanvas::Lattice
+ SkCanvas::Lattice divides SkBitmap or SkImage into a rectangular grid.
+ Grid entries on even columns and even rows are fixed; these entries are
+ always drawn at their original size if the destination is large enough.
+ If the destination side is too small to hold the fixed entries, all fixed
+ entries are proportionately scaled down to fit.
+ The grid entries not on even columns and rows are scaled to fit the
+ remaining space, if any.
+ */
+ struct Lattice {
+
+ /** \enum SkCanvas::Lattice::RectType
+ Optional setting per rectangular grid entry to make it transparent,
+ or to fill the grid entry with a color.
+ */
+ enum RectType : uint8_t {
+ kDefault = 0, //!< draws SkBitmap into lattice rectangle
+ kTransparent, //!< skips lattice rectangle by making it transparent
+ kFixedColor, //!< draws one of fColors into lattice rectangle
+ };
+
+ const int* fXDivs; //!< x-axis values dividing bitmap
+ const int* fYDivs; //!< y-axis values dividing bitmap
+ const RectType* fRectTypes; //!< array of fill types
+ int fXCount; //!< number of x-coordinates
+ int fYCount; //!< number of y-coordinates
+ const SkIRect* fBounds; //!< source bounds to draw from
+ const SkColor* fColors; //!< array of colors
+ };
+
+ /** Draws SkBitmap bitmap stretched proportionally to fit into SkRect dst.
+
+ SkCanvas::Lattice lattice divides bitmap into a rectangular grid.
+ Each intersection of an even-numbered row and column is fixed; like the corners
+ of drawBitmapNine(), fixed lattice elements never scale larger than their initial
+ size and shrink proportionately when all fixed elements exceed the bitmap
+ dimension. All other grid elements scale to fill the available space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If bitmap is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from bitmap bounds. If paint
+ SkFilterQuality set to kNone_SkFilterQuality, disable pixel filtering. For all
+ other values of paint SkFilterQuality, use kLow_SkFilterQuality to filter pixels.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ @param bitmap SkBitmap containing pixels, dimensions, and format
+ @param lattice division of bitmap into fixed and variable rectangles
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+
+ /** Draws SkImage image stretched proportionally to fit into SkRect dst.
+
+ SkCanvas::Lattice lattice divides image into a rectangular grid.
+ Each intersection of an even-numbered row and column is fixed; like the corners
+ of drawBitmapNine(), fixed lattice elements never scale larger than their initial
+ size and shrink proportionately when all fixed elements exceed the bitmap
+ dimension. All other grid elements scale to fill the available space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds. If paint
+ SkFilterQuality set to kNone_SkFilterQuality, disable pixel filtering. For all
+ other values of paint SkFilterQuality, use kLow_SkFilterQuality to filter pixels.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param lattice division of bitmap into fixed and variable rectangles
+ @param dst destination SkRect of image to draw to
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+
+ /**
+ * Experimental. Controls anti-aliasing of each edge of images in an image-set.
+ */
+ enum QuadAAFlags : unsigned {
+ kLeft_QuadAAFlag = 0b0001,
+ kTop_QuadAAFlag = 0b0010,
+ kRight_QuadAAFlag = 0b0100,
+ kBottom_QuadAAFlag = 0b1000,
+
+ kNone_QuadAAFlags = 0b0000,
+ kAll_QuadAAFlags = 0b1111,
+ };
+
+ /** This is used by the experimental API below. */
+ struct SK_API ImageSetEntry {
+ ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ int matrixIndex, float alpha, unsigned aaFlags, bool hasClip);
+
+ ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ float alpha, unsigned aaFlags);
+
+ ImageSetEntry();
+ ~ImageSetEntry();
+ ImageSetEntry(const ImageSetEntry&);
+ ImageSetEntry& operator=(const ImageSetEntry&);
+
+ sk_sp<const SkImage> fImage;
+ SkRect fSrcRect;
+ SkRect fDstRect;
+ int fMatrixIndex = -1; // Index into the preViewMatrices arg, or < 0
+ float fAlpha = 1.f;
+ unsigned fAAFlags = kNone_QuadAAFlags; // QuadAAFlags
+ bool fHasClip = false; // True to use next 4 points in dstClip arg as quad
+ };
+
+ /**
+ * This is an experimental API for the SkiaRenderer Chromium project, and its API will surely
+ * evolve if it is not removed outright.
+ *
+ * This behaves very similarly to drawRect() combined with a clipPath() formed by clip
+ * quadrilateral. 'rect' and 'clip' are in the same coordinate space. If 'clip' is null, then it
+ * is as if the rectangle was not clipped (or, alternatively, clipped to itself). If not null,
+ * then it must provide 4 points.
+ *
+ * In addition to combining the draw and clipping into one operation, this function adds the
+ * additional capability of controlling each of the rectangle's edges anti-aliasing
+ * independently. The edges of the clip will respect the per-edge AA flags. It is required that
+ * 'clip' be contained inside 'rect'. In terms of mapping to edge labels, the 'clip' points
+ * should be ordered top-left, top-right, bottom-right, bottom-left so that the edge between [0]
+ * and [1] is "top", [1] and [2] is "right", [2] and [3] is "bottom", and [3] and [0] is "left".
+ * This ordering matches SkRect::toQuad().
+ *
+ * This API only draws solid color, filled rectangles so it does not accept a full SkPaint.
+ */
+ void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ const SkColor4f& color, SkBlendMode mode);
+ void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ SkColor color, SkBlendMode mode) {
+ this->experimental_DrawEdgeAAQuad(rect, clip, aaFlags, SkColor4f::FromColor(color), mode);
+ }
+
+ /**
+ * This is an bulk variant of experimental_DrawEdgeAAQuad() that renders 'cnt' textured quads.
+ * For each entry, 'fDstRect' is rendered with its clip (determined by entry's 'fHasClip' and
+ * the current index in 'dstClip'). The entry's fImage is applied to the destination rectangle
+ * by sampling from 'fSrcRect' sub-image. The corners of 'fSrcRect' map to the corners of
+ * 'fDstRect', just like in drawImageRect(), and they will be properly interpolated when
+ * applying a clip.
+ *
+ * Like experimental_DrawEdgeAAQuad(), each entry can specify edge AA flags that apply to both
+ * the destination rect and its clip.
+ *
+ * If provided, the 'dstClips' array must have length equal 4 * the number of entries with
+ * fHasClip true. If 'dstClips' is null, every entry must have 'fHasClip' set to false. The
+ * destination clip coordinates will be read consecutively with the image set entries, advancing
+ * by 4 points every time an entry with fHasClip is passed.
+ *
+ * This entry point supports per-entry manipulations to the canvas's current matrix. If an
+ * entry provides 'fMatrixIndex' >= 0, it will be drawn as if the canvas's CTM was
+ * canvas->getTotalMatrix() * preViewMatrices[fMatrixIndex]. If 'fMatrixIndex' is less than 0,
+ * the pre-view matrix transform is implicitly the identity, so it will be drawn using just the
+ * current canvas matrix. The pre-view matrix modifies the canvas's view matrix, it does not
+ * affect the local coordinates of each entry.
+ *
+ * An optional paint may be provided, which supports the same subset of features usable with
+ * drawImageRect (i.e. assumed to be filled and no path effects). When a paint is provided, the
+ * image set is drawn as if each image used the applied paint independently, so each is affected
+ * by the image, color, and/or mask filter.
+ */
+ void experimental_DrawEdgeAAImageSet(const ImageSetEntry imageSet[], int cnt,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint* paint = nullptr,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws text, with origin at (x, y), using clip, SkMatrix, SkFont font,
+ and SkPaint paint.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ Text meaning depends on SkTextEncoding.
+
+ Text size is affected by SkMatrix and SkFont text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, SkImageFilter, and SkDrawLooper; apply to text. By
+ default, draws filled black glyphs.
+
+ @param text character code points or glyphs drawn
+ @param byteLength byte length of text array
+ @param encoding text encoding used in the text array
+ @param x start of text on x-axis
+ @param y start of text on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawSimpleText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint);
+
+ /** Draws null terminated string, with origin at (x, y), using clip, SkMatrix,
+ SkFont font, and SkPaint paint.
+
+ This function uses the default character-to-glyph mapping from the
+ SkTypeface in font. It does not perform typeface fallback for
+ characters not found in the SkTypeface. It does not perform kerning;
+ glyphs are positioned based on their default advances.
+
+ String str is encoded as UTF-8.
+
+ Text size is affected by SkMatrix and font text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, SkImageFilter, and SkDrawLooper; apply to text. By
+ default, draws filled black glyphs.
+
+ @param str character code points drawn,
+ ending with a char value of zero
+ @param x start of string on x-axis
+ @param y start of string on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawString(const char str[], SkScalar x, SkScalar y, const SkFont& font,
+ const SkPaint& paint) {
+ this->drawSimpleText(str, strlen(str), SkTextEncoding::kUTF8, x, y, font, paint);
+ }
+
+ /** Draws SkString, with origin at (x, y), using clip, SkMatrix, SkFont font,
+ and SkPaint paint.
+
+ This function uses the default character-to-glyph mapping from the
+ SkTypeface in font. It does not perform typeface fallback for
+ characters not found in the SkTypeface. It does not perform kerning;
+ glyphs are positioned based on their default advances.
+
+ SkString str is encoded as UTF-8.
+
+ Text size is affected by SkMatrix and SkFont text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, SkImageFilter, and SkDrawLooper; apply to text. By
+ default, draws filled black glyphs.
+
+ @param str character code points drawn,
+ ending with a char value of zero
+ @param x start of string on x-axis
+ @param y start of string on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawString(const SkString& str, SkScalar x, SkScalar y, const SkFont& font,
+ const SkPaint& paint) {
+ this->drawSimpleText(str.c_str(), str.size(), SkTextEncoding::kUTF8, x, y, font, paint);
+ }
+
+ /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint.
+
+ blob contains glyphs, their positions, and paint attributes specific to text:
+ SkTypeface, SkPaint text size, SkPaint text scale x,
+ SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold,
+ SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text,
+ and SkPaint subpixel text.
+
+ SkTextEncoding must be set to SkTextEncoding::kGlyphID.
+
+ Elements of paint: anti-alias, SkBlendMode, color including alpha,
+ SkColorFilter, SkPaint dither, SkDrawLooper, SkMaskFilter, SkPathEffect, SkShader, and
+ SkPaint::Style; apply to blob. If SkPaint contains SkPaint::kStroke_Style:
+ SkPaint miter limit, SkPaint::Cap, SkPaint::Join, and SkPaint stroke width;
+ apply to SkPath created from blob.
+
+ @param blob glyphs, positions, and their paints' text size, typeface, and so on
+ @param x horizontal offset applied to blob
+ @param y vertical offset applied to blob
+ @param paint blend, color, stroking, and so on, used to draw
+ */
+ void drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint);
+
+ /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint.
+
+ blob contains glyphs, their positions, and paint attributes specific to text:
+ SkTypeface, SkPaint text size, SkPaint text scale x,
+ SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold,
+ SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text,
+ and SkPaint subpixel text.
+
+ SkTextEncoding must be set to SkTextEncoding::kGlyphID.
+
+ Elements of paint: SkPathEffect, SkMaskFilter, SkShader, SkColorFilter,
+ SkImageFilter, and SkDrawLooper; apply to blob.
+
+ @param blob glyphs, positions, and their paints' text size, typeface, and so on
+ @param x horizontal offset applied to blob
+ @param y vertical offset applied to blob
+ @param paint blend, color, stroking, and so on, used to draw
+ */
+ void drawTextBlob(const sk_sp<SkTextBlob>& blob, SkScalar x, SkScalar y, const SkPaint& paint) {
+ this->drawTextBlob(blob.get(), x, y, paint);
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix.
+ Clip and SkMatrix are unchanged by picture contents, as if
+ save() was called before and restore() was called after drawPicture().
+
+ SkPicture records a series of draw commands for later playback.
+
+ @param picture recorded drawing commands to play
+ */
+ void drawPicture(const SkPicture* picture) {
+ this->drawPicture(picture, nullptr, nullptr);
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix.
+ Clip and SkMatrix are unchanged by picture contents, as if
+ save() was called before and restore() was called after drawPicture().
+
+ SkPicture records a series of draw commands for later playback.
+
+ @param picture recorded drawing commands to play
+ */
+ void drawPicture(const sk_sp<SkPicture>& picture) {
+ this->drawPicture(picture.get());
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with
+ SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter,
+ SkImageFilter, and SkBlendMode, if provided.
+
+ matrix transformation is equivalent to: save(), concat(), drawPicture(), restore().
+ paint use is equivalent to: saveLayer(), drawPicture(), restore().
+
+ @param picture recorded drawing commands to play
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+ */
+ void drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint);
+
+ /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with
+ SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter,
+ SkImageFilter, and SkBlendMode, if provided.
+
+ matrix transformation is equivalent to: save(), concat(), drawPicture(), restore().
+ paint use is equivalent to: saveLayer(), drawPicture(), restore().
+
+ @param picture recorded drawing commands to play
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+ */
+ void drawPicture(const sk_sp<SkPicture>& picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ this->drawPicture(picture.get(), matrix, paint);
+ }
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix.
+ If vertices texs and vertices colors are defined in vertices, and SkPaint paint
+ contains SkShader, SkBlendMode mode combines vertices colors with SkShader.
+
+ @param vertices triangle mesh to draw
+ @param mode combines vertices colors with SkShader, if both are present
+ @param paint specifies the SkShader, used as SkVertices texture; may be nullptr
+ */
+ void drawVertices(const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix.
+ If vertices texs and vertices colors are defined in vertices, and SkPaint paint
+ contains SkShader, SkBlendMode mode combines vertices colors with SkShader.
+
+ @param vertices triangle mesh to draw
+ @param mode combines vertices colors with SkShader, if both are present
+ @param paint specifies the SkShader, used as SkVertices texture, may be nullptr
+ */
+ void drawVertices(const sk_sp<SkVertices>& vertices, SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix. Bone data is used to
+ deform vertices with bone weights.
+ If vertices texs and vertices colors are defined in vertices, and SkPaint paint
+ contains SkShader, SkBlendMode mode combines vertices colors with SkShader.
+ The first element of bones should be an object to world space transformation matrix that
+ will be applied before performing mesh deformations. If no such transformation is needed,
+ it should be the identity matrix.
+ boneCount must be at most 80, and thus the size of bones should be at most 80.
+
+ @param vertices triangle mesh to draw
+ @param bones bone matrix data
+ @param boneCount number of bone matrices
+ @param mode combines vertices colors with SkShader, if both are present
+ @param paint specifies the SkShader, used as SkVertices texture, may be nullptr
+ */
+ void drawVertices(const SkVertices* vertices, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix. Bone data is used to
+ deform vertices with bone weights.
+ If vertices texs and vertices colors are defined in vertices, and SkPaint paint
+ contains SkShader, SkBlendMode mode combines vertices colors with SkShader.
+ The first element of bones should be an object to world space transformation matrix that
+ will be applied before performing mesh deformations. If no such transformation is needed,
+ it should be the identity matrix.
+ boneCount must be at most 80, and thus the size of bones should be at most 80.
+
+ @param vertices triangle mesh to draw
+ @param bones bone matrix data
+ @param boneCount number of bone matrices
+ @param mode combines vertices colors with SkShader, if both are present
+ @param paint specifies the SkShader, used as SkVertices texture, may be nullptr
+ */
+ void drawVertices(const sk_sp<SkVertices>& vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws a Coons patch: the interpolation of four cubics with shared corners,
+ associating a color, and optionally a texture SkPoint, with each corner.
+
+ Coons patch uses clip and SkMatrix, paint SkShader, SkColorFilter,
+ alpha, SkImageFilter, and SkBlendMode. If SkShader is provided it is treated
+ as Coons patch texture; SkBlendMode mode combines color colors and SkShader if
+ both are provided.
+
+ SkPoint array cubics specifies four SkPath cubic starting at the top-left corner,
+ in clockwise order, sharing every fourth point. The last SkPath cubic ends at the
+ first point.
+
+ Color array color associates colors with corners in top-left, top-right,
+ bottom-right, bottom-left order.
+
+ If paint contains SkShader, SkPoint array texCoords maps SkShader as texture to
+ corners in top-left, top-right, bottom-right, bottom-left order.
+
+ @param cubics SkPath cubic array, sharing common points
+ @param colors color array, one for each corner
+ @param texCoords SkPoint array of texture coordinates, mapping SkShader to corners;
+ may be nullptr
+ @param mode SkBlendMode for colors, and for SkShader if paint has one
+ @param paint SkShader, SkColorFilter, SkBlendMode, used to draw
+ */
+ void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws SkPath cubic Coons patch: the interpolation of four cubics with shared corners,
+ associating a color, and optionally a texture SkPoint, with each corner.
+
+ Coons patch uses clip and SkMatrix, paint SkShader, SkColorFilter,
+ alpha, SkImageFilter, and SkBlendMode. If SkShader is provided it is treated
+ as Coons patch texture; SkBlendMode mode combines color colors and SkShader if
+ both are provided.
+
+ SkPoint array cubics specifies four SkPath cubic starting at the top-left corner,
+ in clockwise order, sharing every fourth point. The last SkPath cubic ends at the
+ first point.
+
+ Color array color associates colors with corners in top-left, top-right,
+ bottom-right, bottom-left order.
+
+ If paint contains SkShader, SkPoint array texCoords maps SkShader as texture to
+ corners in top-left, top-right, bottom-right, bottom-left order.
+
+ @param cubics SkPath cubic array, sharing common points
+ @param colors color array, one for each corner
+ @param texCoords SkPoint array of texture coordinates, mapping SkShader to corners;
+ may be nullptr
+ @param paint SkShader, SkColorFilter, SkBlendMode, used to draw
+ */
+ void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], const SkPaint& paint) {
+ this->drawPatch(cubics, colors, texCoords, SkBlendMode::kModulate, paint);
+ }
+
+ /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint.
+ paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode
+ to draw, if present. For each entry in the array, SkRect tex locates sprite in
+ atlas, and SkRSXform xform transforms it into destination space.
+
+ xform, text, and colors if present, must contain count entries.
+ Optional colors are applied for each sprite using SkBlendMode mode, treating
+ sprite as source and colors as destination.
+ Optional cullRect is a conservative bounds of all transformed sprites.
+ If cullRect is outside of clip, canvas can skip drawing.
+
+ If atlas is nullptr, this draws nothing.
+
+ @param atlas SkImage containing sprites
+ @param xform SkRSXform mappings for sprites in atlas
+ @param tex SkRect locations of sprites in atlas
+ @param colors one per sprite, blended with sprite using SkBlendMode; may be nullptr
+ @param count number of sprites to draw
+ @param mode SkBlendMode combining colors and sprites
+ @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr
+ @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr
+ */
+ void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode, const SkRect* cullRect,
+ const SkPaint* paint);
+
+ /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint.
+ paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode
+ to draw, if present. For each entry in the array, SkRect tex locates sprite in
+ atlas, and SkRSXform xform transforms it into destination space.
+
+ xform, text, and colors if present, must contain count entries.
+ Optional colors is applied for each sprite using SkBlendMode.
+ Optional cullRect is a conservative bounds of all transformed sprites.
+ If cullRect is outside of clip, canvas can skip drawing.
+
+ @param atlas SkImage containing sprites
+ @param xform SkRSXform mappings for sprites in atlas
+ @param tex SkRect locations of sprites in atlas
+ @param colors one per sprite, blended with sprite using SkBlendMode; may be nullptr
+ @param count number of sprites to draw
+ @param mode SkBlendMode combining colors and sprites
+ @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr
+ @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr
+ */
+ void drawAtlas(const sk_sp<SkImage>& atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode, const SkRect* cullRect,
+ const SkPaint* paint) {
+ this->drawAtlas(atlas.get(), xform, tex, colors, count, mode, cullRect, paint);
+ }
+
+ /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint.
+ paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode
+ to draw, if present. For each entry in the array, SkRect tex locates sprite in
+ atlas, and SkRSXform xform transforms it into destination space.
+
+ xform and text must contain count entries.
+ Optional cullRect is a conservative bounds of all transformed sprites.
+ If cullRect is outside of clip, canvas can skip drawing.
+
+ @param atlas SkImage containing sprites
+ @param xform SkRSXform mappings for sprites in atlas
+ @param tex SkRect locations of sprites in atlas
+ @param count number of sprites to draw
+ @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr
+ @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr
+ */
+ void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[], int count,
+ const SkRect* cullRect, const SkPaint* paint) {
+ this->drawAtlas(atlas, xform, tex, nullptr, count, SkBlendMode::kDst, cullRect, paint);
+ }
+
+ /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint.
+ paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode
+ to draw, if present. For each entry in the array, SkRect tex locates sprite in
+ atlas, and SkRSXform xform transforms it into destination space.
+
+ xform and text must contain count entries.
+ Optional cullRect is a conservative bounds of all transformed sprites.
+ If cullRect is outside of clip, canvas can skip drawing.
+
+ @param atlas SkImage containing sprites
+ @param xform SkRSXform mappings for sprites in atlas
+ @param tex SkRect locations of sprites in atlas
+ @param count number of sprites to draw
+ @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr
+ @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr
+ */
+ void drawAtlas(const sk_sp<SkImage>& atlas, const SkRSXform xform[], const SkRect tex[],
+ int count, const SkRect* cullRect, const SkPaint* paint) {
+ this->drawAtlas(atlas.get(), xform, tex, nullptr, count, SkBlendMode::kDst,
+ cullRect, paint);
+ }
+
+ /** Draws SkDrawable drawable using clip and SkMatrix, concatenated with
+ optional matrix.
+
+ If SkCanvas has an asynchronous implementation, as is the case
+ when it is recording into SkPicture, then drawable will be referenced,
+ so that SkDrawable::draw() can be called when the operation is finalized. To force
+ immediate drawing, call SkDrawable::draw() instead.
+
+ @param drawable custom struct encapsulating drawing commands
+ @param matrix transformation applied to drawing; may be nullptr
+ */
+ void drawDrawable(SkDrawable* drawable, const SkMatrix* matrix = nullptr);
+
+ /** Draws SkDrawable drawable using clip and SkMatrix, offset by (x, y).
+
+ If SkCanvas has an asynchronous implementation, as is the case
+ when it is recording into SkPicture, then drawable will be referenced,
+ so that SkDrawable::draw() can be called when the operation is finalized. To force
+ immediate drawing, call SkDrawable::draw() instead.
+
+ @param drawable custom struct encapsulating drawing commands
+ @param x offset into SkCanvas writable pixels on x-axis
+ @param y offset into SkCanvas writable pixels on y-axis
+ */
+ void drawDrawable(SkDrawable* drawable, SkScalar x, SkScalar y);
+
+ /** Associates SkRect on SkCanvas with an annotation; a key-value pair, where the key is
+ a null-terminated UTF-8 string, and optional value is stored as SkData.
+
+ Only some canvas implementations, such as recording to SkPicture, or drawing to
+ document PDF, use annotations.
+
+ @param rect SkRect extent of canvas to annotate
+ @param key string used for lookup
+ @param value data holding value stored in annotation
+ */
+ void drawAnnotation(const SkRect& rect, const char key[], SkData* value);
+
+ /** Associates SkRect on SkCanvas when an annotation; a key-value pair, where the key is
+ a null-terminated UTF-8 string, and optional value is stored as SkData.
+
+ Only some canvas implementations, such as recording to SkPicture, or drawing to
+ document PDF, use annotations.
+
+ @param rect SkRect extent of canvas to annotate
+ @param key string used for lookup
+ @param value data holding value stored in annotation
+ */
+ void drawAnnotation(const SkRect& rect, const char key[], const sk_sp<SkData>& value) {
+ this->drawAnnotation(rect, key, value.get());
+ }
+
+ /** Returns true if clip is empty; that is, nothing will draw.
+
+ May do work when called; it should not be called
+ more often than needed. However, once called, subsequent calls perform no
+ work until clip changes.
+
+ @return true if clip is empty
+ */
+ virtual bool isClipEmpty() const;
+
+ /** Returns true if clip is SkRect and not empty.
+ Returns false if the clip is empty, or if it is not SkRect.
+
+ @return true if clip is SkRect and not empty
+ */
+ virtual bool isClipRect() const;
+
+ /** Returns SkMatrix.
+ This does not account for translation by SkBaseDevice or SkSurface.
+
+ @return SkMatrix in SkCanvas
+ */
+ const SkMatrix& getTotalMatrix() const;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ // don't call
+ virtual GrRenderTargetContext* internal_private_accessTopLayerRenderTargetContext();
+ SkIRect internal_private_getTopLayerBounds() const { return getTopLayerBounds(); }
+
+ // TEMP helpers until we switch virtual over to const& for src-rect
+ void legacy_drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+ void legacy_drawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /**
+ * Returns the global clip as a region. If the clip contains AA, then only the bounds
+ * of the clip may be returned.
+ */
+ void temporary_internal_getRgnClip(SkRegion* region);
+
+ void private_draw_shadow_rec(const SkPath&, const SkDrawShadowRec&);
+
+
+protected:
+ // default impl defers to getDevice()->newSurface(info)
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props);
+
+ // default impl defers to its device
+ virtual bool onPeekPixels(SkPixmap* pixmap);
+ virtual bool onAccessTopLayerPixels(SkPixmap* pixmap);
+ virtual SkImageInfo onImageInfo() const;
+ virtual bool onGetProps(SkSurfaceProps* props) const;
+ virtual void onFlush();
+
+ // Subclass save/restore notifiers.
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ // getSaveLayerStrategy()'s return value may suppress full layer allocation.
+ enum SaveLayerStrategy {
+ kFullLayer_SaveLayerStrategy,
+ kNoLayer_SaveLayerStrategy,
+ };
+
+ virtual void willSave() {}
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ virtual SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& ) {
+ return kFullLayer_SaveLayerStrategy;
+ }
+ // returns true if we should actually perform the saveBehind, or false if we should just save.
+ virtual bool onDoSaveBehind(const SkRect*) { return true; }
+ virtual void willRestore() {}
+ virtual void didRestore() {}
+ virtual void didConcat(const SkMatrix& ) {}
+ virtual void didSetMatrix(const SkMatrix& ) {}
+ virtual void didTranslate(SkScalar dx, SkScalar dy) {
+ this->didConcat(SkMatrix::MakeTrans(dx, dy));
+ }
+
+ // NOTE: If you are adding a new onDraw virtual to SkCanvas, PLEASE add an override to
+ // SkCanvasVirtualEnforcer (in SkCanvasVirtualEnforcer.h). This ensures that subclasses using
+ // that mechanism will be required to implement the new function.
+ virtual void onDrawPaint(const SkPaint& paint);
+ virtual void onDrawBehind(const SkPaint& paint);
+ virtual void onDrawRect(const SkRect& rect, const SkPaint& paint);
+ virtual void onDrawRRect(const SkRRect& rrect, const SkPaint& paint);
+ virtual void onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint);
+ virtual void onDrawOval(const SkRect& rect, const SkPaint& paint);
+ virtual void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint);
+ virtual void onDrawPath(const SkPath& path, const SkPaint& paint);
+ virtual void onDrawRegion(const SkRegion& region, const SkPaint& paint);
+
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint);
+
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint);
+ virtual void onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint);
+
+ // TODO: Remove old signature
+ virtual void onDrawVerticesObject(const SkVertices* vertices, SkBlendMode mode,
+ const SkPaint& paint) {
+ this->onDrawVerticesObject(vertices, nullptr, 0, mode, paint);
+ }
+ virtual void onDrawVerticesObject(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode mode, const SkPaint& paint);
+
+ virtual void onDrawImage(const SkImage* image, SkScalar dx, SkScalar dy, const SkPaint* paint);
+ virtual void onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint);
+ virtual void onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint);
+ virtual void onDrawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint);
+
+ virtual void onDrawBitmap(const SkBitmap& bitmap, SkScalar dx, SkScalar dy,
+ const SkPaint* paint);
+ virtual void onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint);
+ virtual void onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint);
+ virtual void onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint);
+
+ virtual void onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect rect[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkRect* cull, const SkPaint* paint);
+
+ virtual void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value);
+ virtual void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&);
+
+ virtual void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix);
+ virtual void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint);
+
+ virtual void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ const SkColor4f& color, SkBlendMode mode);
+ virtual void onDrawEdgeAAImageSet(const ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint* paint, SrcRectConstraint constraint);
+
+ enum ClipEdgeStyle {
+ kHard_ClipEdgeStyle,
+ kSoft_ClipEdgeStyle
+ };
+
+ virtual void onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipRegion(const SkRegion& deviceRgn, SkClipOp op);
+
+ virtual void onDiscard();
+
+ // Clip rectangle bounds. Called internally by saveLayer.
+ // returns false if the entire rectangle is entirely clipped out
+ // If non-NULL, The imageFilter parameter will be used to expand the clip
+ // and offscreen bounds for any margin required by the filter DAG.
+ bool clipRectBounds(const SkRect* bounds, SaveLayerFlags flags, SkIRect* intersection,
+ const SkImageFilter* imageFilter = nullptr);
+
+ SkBaseDevice* getTopDevice() const;
+
+private:
+ /** After calling saveLayer(), there can be any number of devices that make
+ up the top-most drawing area. LayerIter can be used to iterate through
+ those devices. Note that the iterator is only valid until the next API
+ call made on the canvas. Ownership of all pointers in the iterator stays
+ with the canvas, so none of them should be modified or deleted.
+ */
+ class LayerIter /*: SkNoncopyable*/ {
+ public:
+ /** Initialize iterator with canvas, and set values for 1st device */
+ LayerIter(SkCanvas*);
+ ~LayerIter();
+
+ /** Return true if the iterator is done */
+ bool done() const { return fDone; }
+ /** Cycle to the next device */
+ void next();
+
+ // These reflect the current device in the iterator
+
+ SkBaseDevice* device() const;
+ const SkMatrix& matrix() const;
+ SkIRect clipBounds() const;
+ const SkPaint& paint() const;
+ int x() const;
+ int y() const;
+
+ private:
+ // used to embed the SkDrawIter object directly in our instance, w/o
+ // having to expose that class def to the public. There is an assert
+ // in our constructor to ensure that fStorage is large enough
+ // (though needs to be a compile-time-assert!). We use intptr_t to work
+ // safely with 32 and 64 bit machines (to ensure the storage is enough)
+ intptr_t fStorage[32];
+ class SkDrawIter* fImpl; // this points at fStorage
+ SkPaint fDefaultPaint;
+ bool fDone;
+ };
+
+ static bool BoundsAffectsClip(SaveLayerFlags);
+
+ static void DrawDeviceWithFilter(SkBaseDevice* src, const SkImageFilter* filter,
+ SkBaseDevice* dst, const SkIPoint& dstOrigin,
+ const SkMatrix& ctm);
+
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ // notify our surface (if we have one) that we are about to draw, so it
+ // can perform copy-on-write or invalidate any cached images
+ void predrawNotify(bool willOverwritesEntireSurface = false);
+ void predrawNotify(const SkRect* rect, const SkPaint* paint, ShaderOverrideOpacity);
+ void predrawNotify(const SkRect* rect, const SkPaint* paint, bool shaderOverrideIsOpaque) {
+ this->predrawNotify(rect, paint, shaderOverrideIsOpaque ? kOpaque_ShaderOverrideOpacity
+ : kNotOpaque_ShaderOverrideOpacity);
+ }
+
+ SkBaseDevice* getDevice() const;
+
+ class MCRec;
+
+ SkDeque fMCStack;
+ // points to top of stack
+ MCRec* fMCRec;
+
+ // the first N recs that can fit here mean we won't call malloc
+ static constexpr int kMCRecSize = 128; // most recent measurement
+ static constexpr int kMCRecCount = 32; // common depth for save/restores
+ static constexpr int kDeviceCMSize = 224; // most recent measurement
+
+ intptr_t fMCRecStorage[kMCRecSize * kMCRecCount / sizeof(intptr_t)];
+ intptr_t fDeviceCMStorage[kDeviceCMSize / sizeof(intptr_t)];
+
+ const SkSurfaceProps fProps;
+
+ int fSaveCount; // value returned by getSaveCount()
+
+ std::unique_ptr<SkRasterHandleAllocator> fAllocator;
+
+ SkSurface_Base* fSurfaceBase;
+ SkSurface_Base* getSurfaceBase() const { return fSurfaceBase; }
+ void setSurfaceBase(SkSurface_Base* sb) {
+ fSurfaceBase = sb;
+ }
+ friend class SkSurface_Base;
+ friend class SkSurface_Gpu;
+
+ SkIRect fClipRestrictionRect = SkIRect::MakeEmpty();
+
+ void doSave();
+ void checkForDeferredSave();
+ void internalSetMatrix(const SkMatrix&);
+
+ friend class SkAndroidFrameworkUtils;
+ friend class SkCanvasPriv; // needs kDontClipToLayer_PrivateSaveLayerFlag
+ friend class SkDrawIter; // needs setupDrawForLayerDevice()
+ friend class AutoLayerForImageFilter;
+ friend class DebugCanvas; // needs experimental fAllowSimplifyClip
+ friend class SkSurface_Raster; // needs getDevice()
+ friend class SkNoDrawCanvas; // needs resetForNextPicture()
+ friend class SkPictureRecord; // predrawNotify (why does it need it? <reed>)
+ friend class SkOverdrawCanvas;
+ friend class SkRasterHandleAllocator;
+protected:
+ // For use by SkNoDrawCanvas (via SkCanvasVirtualEnforcer, which can't be a friend)
+ SkCanvas(const SkIRect& bounds);
+private:
+ SkCanvas(const SkBitmap&, std::unique_ptr<SkRasterHandleAllocator>,
+ SkRasterHandleAllocator::Handle);
+
+ SkCanvas(SkCanvas&&) = delete;
+ SkCanvas(const SkCanvas&) = delete;
+ SkCanvas& operator=(SkCanvas&&) = delete;
+ SkCanvas& operator=(const SkCanvas&) = delete;
+
+ /** Experimental
+ * Saves the specified subset of the current pixels in the current layer,
+ * and then clears those pixels to transparent black.
+ * Restores the pixels on restore() by drawing them in SkBlendMode::kDstOver.
+ *
+ * @param subset conservative bounds of the area to be saved / restored.
+ * @return depth of save state stack before this call was made.
+ */
+ int only_axis_aligned_saveBehind(const SkRect* subset);
+
+ /**
+ * Like drawPaint, but magically clipped to the most recent saveBehind buffer rectangle.
+ * If there is no active saveBehind, then this draws nothing.
+ */
+ void drawClippedToSaveBehind(const SkPaint&);
+
+ void resetForNextPicture(const SkIRect& bounds);
+
+ // needs gettotalclip()
+ friend class SkCanvasStateUtils;
+
+ // call this each time we attach ourselves to a device
+ // - constructor
+ // - internalSaveLayer
+ void setupDevice(SkBaseDevice*);
+
+ void init(sk_sp<SkBaseDevice>);
+
+ /**
+ * Gets the bounds of the top level layer in global canvas coordinates. We don't want this
+ * to be public because it exposes decisions about layer sizes that are internal to the canvas.
+ */
+ SkIRect getTopLayerBounds() const;
+
+ void internalDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint);
+ void internalDrawPaint(const SkPaint& paint);
+ void internalSaveLayer(const SaveLayerRec&, SaveLayerStrategy);
+ void internalSaveBehind(const SkRect*);
+ void internalDrawDevice(SkBaseDevice*, int x, int y, const SkPaint*, SkImage* clipImage,
+ const SkMatrix& clipMatrix);
+
+ // shared by save() and saveLayer()
+ void internalSave();
+ void internalRestore();
+
+ /*
+ * Returns true if drawing the specified rect (or all if it is null) with the specified
+ * paint (or default if null) would overwrite the entire root device of the canvas
+ * (i.e. the canvas' surface if it had one).
+ */
+ bool wouldOverwriteEntireSurface(const SkRect*, const SkPaint*, ShaderOverrideOpacity) const;
+
+ /**
+ * Returns true if the paint's imagefilter can be invoked directly, without needed a layer.
+ */
+ bool canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkPaint&);
+
+ /**
+ * Returns true if the clip (for any active layer) contains antialiasing.
+ * If the clip is empty, this will return false.
+ */
+ bool androidFramework_isClipAA() const;
+
+ virtual SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const { return nullptr; }
+
+ /**
+ * Keep track of the device clip bounds and if the matrix is scale-translate. This allows
+ * us to do a fast quick reject in the common case.
+ */
+ bool fIsScaleTranslate;
+ SkRect fDeviceClipBounds;
+
+ bool fAllowSoftClip;
+ bool fAllowSimplifyClip;
+
+ class AutoValidateClip {
+ public:
+ explicit AutoValidateClip(SkCanvas* canvas) : fCanvas(canvas) {
+ fCanvas->validateClip();
+ }
+ ~AutoValidateClip() { fCanvas->validateClip(); }
+
+ private:
+ const SkCanvas* fCanvas;
+
+ AutoValidateClip(AutoValidateClip&&) = delete;
+ AutoValidateClip(const AutoValidateClip&) = delete;
+ AutoValidateClip& operator=(AutoValidateClip&&) = delete;
+ AutoValidateClip& operator=(const AutoValidateClip&) = delete;
+ };
+
+#ifdef SK_DEBUG
+ void validateClip() const;
+#else
+ void validateClip() const {}
+#endif
+
+ std::unique_ptr<SkGlyphRunBuilder> fScratchGlyphRunBuilder;
+
+ typedef SkRefCnt INHERITED;
+};
+
+/** \class SkAutoCanvasRestore
+ Stack helper class calls SkCanvas::restoreToCount when SkAutoCanvasRestore
+ goes out of scope. Use this to guarantee that the canvas is restored to a known
+ state.
+*/
+class SkAutoCanvasRestore {
+public:
+
+ /** Preserves SkCanvas::save() count. Optionally saves SkCanvas clip and SkCanvas matrix.
+
+ @param canvas SkCanvas to guard
+ @param doSave call SkCanvas::save()
+ @return utility to restore SkCanvas state on destructor
+ */
+ SkAutoCanvasRestore(SkCanvas* canvas, bool doSave) : fCanvas(canvas), fSaveCount(0) {
+ if (fCanvas) {
+ fSaveCount = canvas->getSaveCount();
+ if (doSave) {
+ canvas->save();
+ }
+ }
+ }
+
+ /** Restores SkCanvas to saved state. Destructor is called when container goes out of
+ scope.
+ */
+ ~SkAutoCanvasRestore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ }
+ }
+
+ /** Restores SkCanvas to saved state immediately. Subsequent calls and
+ ~SkAutoCanvasRestore() have no effect.
+ */
+ void restore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ fCanvas = nullptr;
+ }
+ }
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+
+ SkAutoCanvasRestore(SkAutoCanvasRestore&&) = delete;
+ SkAutoCanvasRestore(const SkAutoCanvasRestore&) = delete;
+ SkAutoCanvasRestore& operator=(SkAutoCanvasRestore&&) = delete;
+ SkAutoCanvasRestore& operator=(const SkAutoCanvasRestore&) = delete;
+};
+
+// Private
+#define SkAutoCanvasRestore(...) SK_REQUIRE_LOCAL_VAR(SkAutoCanvasRestore)
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h b/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h
new file mode 100644
index 0000000000..5911f383c8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasVirtualEnforcer_DEFINED
+#define SkCanvasVirtualEnforcer_DEFINED
+
+#include "include/core/SkCanvas.h"
+
+// If you would ordinarily want to inherit from Base (eg SkCanvas, SkNWayCanvas), instead
+// inherit from SkCanvasVirtualEnforcer<Base>, which will make the build fail if you forget
+// to override one of SkCanvas' key virtual hooks.
+template <typename Base>
+class SkCanvasVirtualEnforcer : public Base {
+public:
+ using Base::Base;
+
+protected:
+ void onDrawPaint(const SkPaint& paint) override = 0;
+ void onDrawBehind(const SkPaint&) override {} // make zero after android updates
+ void onDrawRect(const SkRect& rect, const SkPaint& paint) override = 0;
+ void onDrawRRect(const SkRRect& rrect, const SkPaint& paint) override = 0;
+ void onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) override = 0;
+ void onDrawOval(const SkRect& rect, const SkPaint& paint) override = 0;
+ void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) override = 0;
+ void onDrawPath(const SkPath& path, const SkPaint& paint) override = 0;
+ void onDrawRegion(const SkRegion& region, const SkPaint& paint) override = 0;
+
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override = 0;
+
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode,
+ const SkPaint& paint) override = 0;
+ void onDrawPoints(SkCanvas::PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) override = 0;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override = 0;
+
+ void onDrawImage(const SkImage* image, SkScalar dx, SkScalar dy,
+ const SkPaint* paint) override = 0;
+ void onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SkCanvas::SrcRectConstraint constraint) override = 0;
+ void onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) override = 0;
+ void onDrawImageLattice(const SkImage* image, const SkCanvas::Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) override = 0;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // This is under active development for Chrome and not used in Android. Hold off on adding
+ // implementations in Android's SkCanvas subclasses until this stabilizes.
+ void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override {}
+ void onDrawEdgeAAImageSet(const SkCanvas::ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[], const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) override {}
+#else
+ void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override = 0;
+ void onDrawEdgeAAImageSet(const SkCanvas::ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[], const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) override = 0;
+#endif
+
+ void onDrawBitmap(const SkBitmap& bitmap, SkScalar dx, SkScalar dy,
+ const SkPaint* paint) override = 0;
+ void onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) override = 0;
+ void onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) override = 0;
+ void onDrawBitmapLattice(const SkBitmap& bitmap, const SkCanvas::Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) override = 0;
+
+ void onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect rect[],
+ const SkColor colors[], int count, SkBlendMode mode, const SkRect* cull,
+ const SkPaint* paint) override = 0;
+
+ void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override = 0;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override = 0;
+
+ void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) override = 0;
+ void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) override = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkClipOp.h b/gfx/skia/skia/include/core/SkClipOp.h
new file mode 100644
index 0000000000..a9ea7a04b4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkClipOp.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipOp_DEFINED
+#define SkClipOp_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkClipOp {
+ kDifference = 0,
+ kIntersect = 1,
+
+#ifdef SK_SUPPORT_DEPRECATED_CLIPOPS
+ kUnion_deprecated = 2,
+ kXOR_deprecated = 3,
+ kReverseDifference_deprecated = 4,
+ kReplace_deprecated = 5,
+#else
+ kExtraEnumNeedInternallyPleaseIgnoreWillGoAway2 = 2,
+ kExtraEnumNeedInternallyPleaseIgnoreWillGoAway3 = 3,
+ kExtraEnumNeedInternallyPleaseIgnoreWillGoAway4 = 4,
+ kExtraEnumNeedInternallyPleaseIgnoreWillGoAway5 = 5,
+#endif
+
+ // Used internally for validation, can only shrink to 1 when the deprecated flag is gone
+ kMax_EnumValue = 5,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColor.h b/gfx/skia/skia/include/core/SkColor.h
new file mode 100644
index 0000000000..bee77d8b60
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColor.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColor_DEFINED
+#define SkColor_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+/** \file SkColor.h
+
+ Types, consts, functions, and macros for colors.
+*/
+
+/** 8-bit type for an alpha value. 255 is 100% opaque, zero is 100% transparent.
+*/
+typedef uint8_t SkAlpha;
+
+/** 32-bit ARGB color value, unpremultiplied. Color components are always in
+ a known order. This is different from SkPMColor, which has its bytes in a configuration
+ dependent order, to match the format of kBGRA_8888_SkColorType bitmaps. SkColor
+ is the type used to specify colors in SkPaint and in gradients.
+
+ Color that is premultiplied has the same component values as color
+ that is unpremultiplied if alpha is 255, fully opaque, although may have the
+ component values in a different order.
+*/
+typedef uint32_t SkColor;
+
+/** Returns color value from 8-bit component values. Asserts if SK_DEBUG is defined
+ if a, r, g, or b exceed 255. Since color is unpremultiplied, a may be smaller
+ than the largest of r, g, and b.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ @return color and alpha, unpremultiplied
+*/
+static constexpr inline SkColor SkColorSetARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return SkASSERT(a <= 255 && r <= 255 && g <= 255 && b <= 255),
+ (a << 24) | (r << 16) | (g << 8) | (b << 0);
+}
+
+/** Returns color value from 8-bit component values, with alpha set
+ fully opaque to 255.
+*/
+#define SkColorSetRGB(r, g, b) SkColorSetARGB(0xFF, r, g, b)
+
+/** Returns alpha byte from color value.
+*/
+#define SkColorGetA(color) (((color) >> 24) & 0xFF)
+
+/** Returns red component of color, from zero to 255.
+*/
+#define SkColorGetR(color) (((color) >> 16) & 0xFF)
+
+/** Returns green component of color, from zero to 255.
+*/
+#define SkColorGetG(color) (((color) >> 8) & 0xFF)
+
+/** Returns blue component of color, from zero to 255.
+*/
+#define SkColorGetB(color) (((color) >> 0) & 0xFF)
+
+/** Returns unpremultiplied color with red, blue, and green set from c; and alpha set
+ from a. Alpha component of c is ignored and is replaced by a in result.
+
+ @param c packed RGB, eight bits per component
+ @param a alpha: transparent at zero, fully opaque at 255
+ @return color with transparency
+*/
+static constexpr inline SkColor SkColorSetA(SkColor c, U8CPU a) {
+ return (c & 0x00FFFFFF) | (a << 24);
+}
+
+/** Represents fully transparent SkAlpha value. SkAlpha ranges from zero,
+ fully transparent; to 255, fully opaque.
+*/
+constexpr SkAlpha SK_AlphaTRANSPARENT = 0x00;
+
+/** Represents fully opaque SkAlpha value. SkAlpha ranges from zero,
+ fully transparent; to 255, fully opaque.
+*/
+constexpr SkAlpha SK_AlphaOPAQUE = 0xFF;
+
+/** Represents fully transparent SkColor. May be used to initialize a destination
+ containing a mask or a non-rectangular image.
+*/
+constexpr SkColor SK_ColorTRANSPARENT = SkColorSetARGB(0x00, 0x00, 0x00, 0x00);
+
+/** Represents fully opaque black.
+*/
+constexpr SkColor SK_ColorBLACK = SkColorSetARGB(0xFF, 0x00, 0x00, 0x00);
+
+/** Represents fully opaque dark gray.
+ Note that SVG dark gray is equivalent to 0xFFA9A9A9.
+*/
+constexpr SkColor SK_ColorDKGRAY = SkColorSetARGB(0xFF, 0x44, 0x44, 0x44);
+
+/** Represents fully opaque gray.
+ Note that HTML gray is equivalent to 0xFF808080.
+*/
+constexpr SkColor SK_ColorGRAY = SkColorSetARGB(0xFF, 0x88, 0x88, 0x88);
+
+/** Represents fully opaque light gray. HTML silver is equivalent to 0xFFC0C0C0.
+ Note that SVG light gray is equivalent to 0xFFD3D3D3.
+*/
+constexpr SkColor SK_ColorLTGRAY = SkColorSetARGB(0xFF, 0xCC, 0xCC, 0xCC);
+
+/** Represents fully opaque white.
+*/
+constexpr SkColor SK_ColorWHITE = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0xFF);
+
+/** Represents fully opaque red.
+*/
+constexpr SkColor SK_ColorRED = SkColorSetARGB(0xFF, 0xFF, 0x00, 0x00);
+
+/** Represents fully opaque green. HTML lime is equivalent.
+ Note that HTML green is equivalent to 0xFF008000.
+*/
+constexpr SkColor SK_ColorGREEN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0x00);
+
+/** Represents fully opaque blue.
+*/
+constexpr SkColor SK_ColorBLUE = SkColorSetARGB(0xFF, 0x00, 0x00, 0xFF);
+
+/** Represents fully opaque yellow.
+*/
+constexpr SkColor SK_ColorYELLOW = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0x00);
+
+/** Represents fully opaque cyan. HTML aqua is equivalent.
+*/
+constexpr SkColor SK_ColorCYAN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0xFF);
+
+/** Represents fully opaque magenta. HTML fuchsia is equivalent.
+*/
+constexpr SkColor SK_ColorMAGENTA = SkColorSetARGB(0xFF, 0xFF, 0x00, 0xFF);
+
+/** Converts RGB to its HSV components.
+ hsv[0] contains hsv hue, a value from zero to less than 360.
+ hsv[1] contains hsv saturation, a value from zero to one.
+ hsv[2] contains hsv value, a value from zero to one.
+
+ @param red red component value from zero to 255
+ @param green green component value from zero to 255
+ @param blue blue component value from zero to 255
+ @param hsv three element array which holds the resulting HSV components
+*/
+SK_API void SkRGBToHSV(U8CPU red, U8CPU green, U8CPU blue, SkScalar hsv[3]);
+
+/** Converts ARGB to its HSV components. Alpha in ARGB is ignored.
+ hsv[0] contains hsv hue, and is assigned a value from zero to less than 360.
+ hsv[1] contains hsv saturation, a value from zero to one.
+ hsv[2] contains hsv value, a value from zero to one.
+
+ @param color ARGB color to convert
+ @param hsv three element array which holds the resulting HSV components
+*/
+static inline void SkColorToHSV(SkColor color, SkScalar hsv[3]) {
+ SkRGBToHSV(SkColorGetR(color), SkColorGetG(color), SkColorGetB(color), hsv);
+}
+
+/** Converts HSV components to an ARGB color. Alpha is passed through unchanged.
+ hsv[0] represents hsv hue, an angle from zero to less than 360.
+ hsv[1] represents hsv saturation, and varies from zero to one.
+ hsv[2] represents hsv value, and varies from zero to one.
+
+ Out of range hsv values are pinned.
+
+ @param alpha alpha component of the returned ARGB color
+ @param hsv three element array which holds the input HSV components
+ @return ARGB equivalent to HSV
+*/
+SK_API SkColor SkHSVToColor(U8CPU alpha, const SkScalar hsv[3]);
+
+/** Converts HSV components to an ARGB color. Alpha is set to 255.
+ hsv[0] represents hsv hue, an angle from zero to less than 360.
+ hsv[1] represents hsv saturation, and varies from zero to one.
+ hsv[2] represents hsv value, and varies from zero to one.
+
+ Out of range hsv values are pinned.
+
+ @param hsv three element array which holds the input HSV components
+ @return RGB equivalent to HSV
+*/
+static inline SkColor SkHSVToColor(const SkScalar hsv[3]) {
+ return SkHSVToColor(0xFF, hsv);
+}
+
+/** 32-bit ARGB color value, premultiplied. The byte order for this value is
+ configuration dependent, matching the format of kBGRA_8888_SkColorType bitmaps.
+ This is different from SkColor, which is unpremultiplied, and is always in the
+ same byte order.
+*/
+typedef uint32_t SkPMColor;
+
+/** Returns a SkPMColor value from unpremultiplied 8-bit component values.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ @return premultiplied color
+*/
+SK_API SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+/** Returns pmcolor closest to color c. Multiplies c RGB components by the c alpha,
+ and arranges the bytes to match the format of kN32_SkColorType.
+
+ @param c unpremultiplied ARGB color
+ @return premultiplied color
+*/
+SK_API SkPMColor SkPreMultiplyColor(SkColor c);
+
+/** \enum SkColorChannel
+ Describes different color channels one can manipulate
+*/
+enum class SkColorChannel {
+ kR, // the red channel
+ kG, // the green channel
+ kB, // the blue channel
+ kA, // the alpha channel
+
+ kLastEnum = kA,
+};
+
+/** \struct SkRGBA4f
+ RGBA color value, holding four floating point components. Color components are always in
+ a known order. kAT determines if the SkRGBA4f's R, G, and B components are premultiplied
+ by alpha or not.
+
+ Skia's public API always uses unpremultiplied colors, which can be stored as
+ SkRGBA4f<kUnpremul_SkAlphaType>. For convenience, this type can also be referred to
+ as SkColor4f.
+*/
+template <SkAlphaType kAT>
+struct SkRGBA4f {
+ float fR; //!< red component
+ float fG; //!< green component
+ float fB; //!< blue component
+ float fA; //!< alpha component
+
+ /** Compares SkRGBA4f with other, and returns true if all components are equal.
+
+ @param other SkRGBA4f to compare
+ @return true if SkRGBA4f equals other
+ */
+ bool operator==(const SkRGBA4f& other) const {
+ return fA == other.fA && fR == other.fR && fG == other.fG && fB == other.fB;
+ }
+
+ /** Compares SkRGBA4f with other, and returns true if not all components are equal.
+
+ @param other SkRGBA4f to compare
+ @return true if SkRGBA4f is not equal to other
+ */
+ bool operator!=(const SkRGBA4f& other) const {
+ return !(*this == other);
+ }
+
+ /** Returns SkRGBA4f multiplied by scale.
+
+ @param scale value to multiply by
+ @return SkRGBA4f as (fR * scale, fG * scale, fB * scale, fA * scale)
+ */
+ SkRGBA4f operator*(float scale) const {
+ return { fR * scale, fG * scale, fB * scale, fA * scale };
+ }
+
+ /** Returns SkRGBA4f multiplied component-wise by scale.
+
+ @param scale SkRGBA4f to multiply by
+ @return SkRGBA4f as (fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA)
+ */
+ SkRGBA4f operator*(const SkRGBA4f& scale) const {
+ return { fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA };
+ }
+
+ /** Returns a pointer to components of SkRGBA4f, for array access.
+
+ @return pointer to array [fR, fG, fB, fA]
+ */
+ const float* vec() const { return &fR; }
+
+ /** Returns a pointer to components of SkRGBA4f, for array access.
+
+ @return pointer to array [fR, fG, fB, fA]
+ */
+ float* vec() { return &fR; }
+
+ /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined.
+
+ @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA)
+ @return value corresponding to index
+ */
+ float operator[](int index) const {
+ SkASSERT(index >= 0 && index < 4);
+ return this->vec()[index];
+ }
+
+ /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined.
+
+ @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA)
+ @return value corresponding to index
+ */
+ float& operator[](int index) {
+ SkASSERT(index >= 0 && index < 4);
+ return this->vec()[index];
+ }
+
+ /** Returns true if SkRGBA4f is an opaque color. Asserts if fA is out of range and
+ SK_DEBUG is defined.
+
+ @return true if SkRGBA4f is opaque
+ */
+ bool isOpaque() const {
+ SkASSERT(fA <= 1.0f && fA >= 0.0f);
+ return fA == 1.0f;
+ }
+
+ /** Returns true if all channels are in [0, 1]. */
+ bool fitsInBytes() const {
+ SkASSERT(fA >= 0.0f && fA <= 1.0f);
+ return fR >= 0.0f && fR <= 1.0f &&
+ fG >= 0.0f && fG <= 1.0f &&
+ fB >= 0.0f && fB <= 1.0f;
+ }
+
+ /** Returns closest SkRGBA4f to SkColor. Only allowed if SkRGBA4f is unpremultiplied.
+
+ @param color Color with Alpha, red, blue, and green components
+ @return SkColor as SkRGBA4f
+ */
+ static SkRGBA4f FromColor(SkColor color); // impl. depends on kAT
+
+ /** Returns closest SkColor to SkRGBA4f. Only allowed if SkRGBA4f is unpremultiplied.
+
+ @return color as SkColor
+ */
+ SkColor toSkColor() const; // impl. depends on kAT
+
+ /** Returns closest SkRGBA4f to SkPMColor. Only allowed if SkRGBA4f is premultiplied.
+
+ @return SkPMColor as SkRGBA4f
+ */
+ static SkRGBA4f FromPMColor(SkPMColor); // impl. depends on kAT
+
+ /** Returns SkRGBA4f premultiplied by alpha. Asserts at compile time if SkRGBA4f is
+ already premultiplied.
+
+ @return premultiplied color
+ */
+ SkRGBA4f<kPremul_SkAlphaType> premul() const {
+ static_assert(kAT == kUnpremul_SkAlphaType, "");
+ return { fR * fA, fG * fA, fB * fA, fA };
+ }
+
+ /** Returns SkRGBA4f unpremultiplied by alpha. Asserts at compile time if SkRGBA4f is
+ already unpremultiplied.
+
+ @return unpremultiplied color
+ */
+ SkRGBA4f<kUnpremul_SkAlphaType> unpremul() const {
+ static_assert(kAT == kPremul_SkAlphaType, "");
+
+ if (fA == 0.0f) {
+ return { 0, 0, 0, 0 };
+ } else {
+ float invAlpha = 1 / fA;
+ return { fR * invAlpha, fG * invAlpha, fB * invAlpha, fA };
+ }
+ }
+
+ // This produces bytes in RGBA order (eg GrColor). Impl. is the same, regardless of kAT
+ uint32_t toBytes_RGBA() const;
+ static SkRGBA4f FromBytes_RGBA(uint32_t color);
+
+ SkRGBA4f makeOpaque() const {
+ return { fR, fG, fB, 1.0f };
+ }
+};
+
+/** \struct SkColor4f
+ RGBA color value, holding four floating point components. Color components are always in
+ a known order, and are unpremultiplied.
+
+ This is a specialization of SkRGBA4f. For details, @see SkRGBA4f.
+*/
+using SkColor4f = SkRGBA4f<kUnpremul_SkAlphaType>;
+
+template <> SK_API SkColor4f SkColor4f::FromColor(SkColor);
+template <> SK_API SkColor SkColor4f::toSkColor() const;
+
+namespace SkColors {
+constexpr SkColor4f kTransparent = {0, 0, 0, 0};
+constexpr SkColor4f kBlack = {0, 0, 0, 1};
+constexpr SkColor4f kDkGray = {0.25f, 0.25f, 0.25f, 1};
+constexpr SkColor4f kGray = {0.50f, 0.50f, 0.50f, 1};
+constexpr SkColor4f kLtGray = {0.75f, 0.75f, 0.75f, 1};
+constexpr SkColor4f kWhite = {1, 1, 1, 1};
+constexpr SkColor4f kRed = {1, 0, 0, 1};
+constexpr SkColor4f kGreen = {0, 1, 0, 1};
+constexpr SkColor4f kBlue = {0, 0, 1, 1};
+constexpr SkColor4f kYellow = {1, 1, 0, 1};
+constexpr SkColor4f kCyan = {0, 1, 1, 1};
+constexpr SkColor4f kMagenta = {1, 0, 1, 1};
+} // namespace SkColors
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorFilter.h b/gfx/skia/skia/include/core/SkColorFilter.h
new file mode 100644
index 0000000000..7f1e979b5d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorFilter.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilter_DEFINED
+#define SkColorFilter_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+
+class GrColorInfo;
+class GrFragmentProcessor;
+class GrRecordingContext;
+class SkBitmap;
+class SkColorMatrix;
+class SkColorSpace;
+struct SkStageRec;
+class SkString;
+
+/**
+ * ColorFilters are optional objects in the drawing pipeline. When present in
+ * a paint, they are called with the "src" colors, and return new colors, which
+ * are then passed onto the next stage (either ImageFilter or Xfermode).
+ *
+ * All subclasses are required to be reentrant-safe : it must be legal to share
+ * the same instance between several threads.
+ */
+class SK_API SkColorFilter : public SkFlattenable {
+public:
+ // DEPRECATED. skbug.com/8941
+
+ bool asColorMode(SkColor* color, SkBlendMode* mode) const {
+ return this->onAsAColorMode(color, mode);
+ }
+
+ /** If the filter can be represented by a source color plus Mode, this
+ * returns true, and sets (if not NULL) the color and mode appropriately.
+ * If not, this returns false and ignores the parameters.
+ */
+ bool asAColorMode(SkColor* color, SkBlendMode* mode) const {
+ return this->onAsAColorMode(color, mode);
+ }
+
+ /** If the filter can be represented by a 5x4 matrix, this
+ * returns true, and sets the matrix appropriately.
+ * If not, this returns false and ignores the parameter.
+ */
+ bool asAColorMatrix(float matrix[20]) const {
+ return this->onAsAColorMatrix(matrix);
+ }
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const;
+
+ enum Flags {
+ /** If set the filter methods will not change the alpha channel of the colors.
+ */
+ kAlphaUnchanged_Flag = 1 << 0,
+ };
+
+ /** Returns the flags for this filter. Override in subclasses to return custom flags.
+ */
+ virtual uint32_t getFlags() const { return 0; }
+
+ SkColor filterColor(SkColor) const;
+
+ /**
+ * Converts the src color (in src colorspace), into the dst colorspace,
+ * then applies this filter to it, returning the filtered color in the dst colorspace.
+ */
+ SkColor4f filterColor4f(const SkColor4f& srcColor, SkColorSpace* srcCS,
+ SkColorSpace* dstCS) const;
+
+ /** Construct a colorfilter whose effect is to first apply the inner filter and then apply
+ * this filter, applied to the output of the inner filter.
+ *
+ * result = this(inner(...))
+ *
+ * Due to internal limits, it is possible that this will return NULL, so the caller must
+ * always check.
+ */
+ sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter> inner) const;
+
+#if SK_SUPPORT_GPU
+ /**
+ * A subclass may implement this factory function to work with the GPU backend. It returns
+ * a GrFragmentProcessor that implemets the color filter in GPU shader code.
+ *
+ * The fragment processor receives a premultiplied input color and produces a premultiplied
+ * output color.
+ *
+ * A null return indicates that the color filter isn't implemented for the GPU backend.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo& dstColorInfo) const;
+#endif
+
+ bool affectsTransparentBlack() const {
+ return this->filterColor(SK_ColorTRANSPARENT) != SK_ColorTRANSPARENT;
+ }
+
+ static void RegisterFlattenables();
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkColorFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkColorFilter_Type;
+ }
+
+ static sk_sp<SkColorFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkColorFilter>(static_cast<SkColorFilter*>(
+ SkFlattenable::Deserialize(
+ kSkColorFilter_Type, data, size, procs).release()));
+ }
+
+protected:
+ SkColorFilter() {}
+
+ virtual bool onAsAColorMatrix(float[20]) const;
+ virtual bool onAsAColorMode(SkColor* color, SkBlendMode* bmode) const;
+
+private:
+ /*
+ * Returns 1 if this is a single filter (not a composition of other filters), otherwise it
+ * reutrns the number of leaf-node filters in a composition. This should be the same value
+ * as the number of GrFragmentProcessors returned by asFragmentProcessors's array parameter.
+ *
+ * e.g. compose(filter, compose(compose(filter, filter), filter)) --> 4
+ */
+ virtual int privateComposedFilterCount() const { return 1; }
+
+ virtual bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const = 0;
+
+ friend class SkComposeColorFilter;
+
+ typedef SkFlattenable INHERITED;
+};
+
+class SK_API SkColorFilters {
+public:
+ static sk_sp<SkColorFilter> Compose(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner) {
+ return outer ? outer->makeComposed(inner) : inner;
+ }
+ static sk_sp<SkColorFilter> Blend(SkColor c, SkBlendMode mode);
+ static sk_sp<SkColorFilter> Matrix(const SkColorMatrix&);
+ static sk_sp<SkColorFilter> Matrix(const float rowMajor[20]);
+
+ // A version of Matrix which operates in HSLA space instead of RGBA.
+ // I.e. HSLA-to-RGBA(Matrix(RGBA-to-HSLA(input))).
+ static sk_sp<SkColorFilter> HSLAMatrix(const float rowMajor[20]);
+
+ static sk_sp<SkColorFilter> LinearToSRGBGamma();
+ static sk_sp<SkColorFilter> SRGBToLinearGamma();
+ static sk_sp<SkColorFilter> Lerp(float t, sk_sp<SkColorFilter> dst, sk_sp<SkColorFilter> src);
+
+private:
+ SkColorFilters() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorPriv.h b/gfx/skia/skia/include/core/SkColorPriv.h
new file mode 100644
index 0000000000..7078d92ac5
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorPriv.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorPriv_DEFINED
+#define SkColorPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMath.h"
+#include "include/private/SkTo.h"
+
+/** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a
+ byte into a scale value, so that we can say scale * value >> 8 instead of
+ alpha * value / 255.
+
+ In debugging, asserts that alpha is 0..255
+*/
+static inline unsigned SkAlpha255To256(U8CPU alpha) {
+ SkASSERT(SkToU8(alpha) == alpha);
+ // this one assues that blending on top of an opaque dst keeps it that way
+ // even though it is less accurate than a+(a>>7) for non-opaque dsts
+ return alpha + 1;
+}
+
+/** Multiplify value by 0..256, and shift the result down 8
+ (i.e. return (value * alpha256) >> 8)
+ */
+#define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8)
+
+static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
+ return static_cast<U8CPU>(SkScalarPin(x, 0, 1) * 255 + 0.5);
+}
+
+#define SK_A32_BITS 8
+#define SK_R32_BITS 8
+#define SK_G32_BITS 8
+#define SK_B32_BITS 8
+
+#define SK_A32_MASK ((1 << SK_A32_BITS) - 1)
+#define SK_R32_MASK ((1 << SK_R32_BITS) - 1)
+#define SK_G32_MASK ((1 << SK_G32_BITS) - 1)
+#define SK_B32_MASK ((1 << SK_B32_BITS) - 1)
+
+/*
+ * Skia's 32bit backend only supports 1 swizzle order at a time (compile-time).
+ * This is specified by SK_R32_SHIFT=0 or SK_R32_SHIFT=16.
+ *
+ * For easier compatibility with Skia's GPU backend, we further restrict these
+ * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
+ * not directly correspond to the same shift-order, since we have to take endianess
+ * into account.
+ *
+ * Here we enforce this constraint.
+ */
+
+#define SK_RGBA_R32_SHIFT 0
+#define SK_RGBA_G32_SHIFT 8
+#define SK_RGBA_B32_SHIFT 16
+#define SK_RGBA_A32_SHIFT 24
+
+#define SK_BGRA_B32_SHIFT 0
+#define SK_BGRA_G32_SHIFT 8
+#define SK_BGRA_R32_SHIFT 16
+#define SK_BGRA_A32_SHIFT 24
+
+#if defined(SK_PMCOLOR_IS_RGBA) || defined(SK_PMCOLOR_IS_BGRA)
+ #error "Configure PMCOLOR by setting SK_R32_SHIFT."
+#endif
+
+// Deduce which SK_PMCOLOR_IS_ to define from the _SHIFT defines
+
+#if (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_RGBA_B32_SHIFT)
+ #define SK_PMCOLOR_IS_RGBA
+#elif (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_BGRA_B32_SHIFT)
+ #define SK_PMCOLOR_IS_BGRA
+#else
+ #error "need 32bit packing to be either RGBA or BGRA"
+#endif
+
+#define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
+#define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
+#define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
+#define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
+
+#define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK)
+#define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK)
+#define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK)
+#define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK)
+
+/**
+ * Pack the components into a SkPMColor, checking (in the debug version) that
+ * the components are 0..255, and are already premultiplied (i.e. alpha >= color)
+ */
+static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+/**
+ * Same as SkPackARGB32, but this version guarantees to not check that the
+ * values are premultiplied in the debug version.
+ */
+static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+static inline
+SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkR32Assert(r);
+ SkG32Assert(g);
+ SkB32Assert(b);
+
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ return SkPackARGB32(a, r, g, b);
+}
+
+// When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
+// inlined; forcing inlining significantly improves performance.
+static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
+ uint32_t mask = 0xFF00FF;
+
+ uint32_t rb = ((c & mask) * scale) >> 8;
+ uint32_t ag = ((c >> 8) & mask) * scale;
+ return (rb & mask) | (ag & ~mask);
+}
+
+static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
+ return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorSpace.h b/gfx/skia/skia/include/core/SkColorSpace.h
new file mode 100644
index 0000000000..9e52e85cc6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorSpace.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpace_DEFINED
+#define SkColorSpace_DEFINED
+
+#include "include/core/SkMatrix44.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkOnce.h"
+#include "include/third_party/skcms/skcms.h"
+#include <memory>
+
+class SkData;
+
+/**
+ * Describes a color gamut with primaries and a white point.
+ */
+struct SK_API SkColorSpacePrimaries {
+ float fRX;
+ float fRY;
+ float fGX;
+ float fGY;
+ float fBX;
+ float fBY;
+ float fWX;
+ float fWY;
+
+ /**
+ * Convert primaries and a white point to a toXYZD50 matrix, the preferred color gamut
+ * representation of SkColorSpace.
+ */
+ bool toXYZD50(skcms_Matrix3x3* toXYZD50) const;
+};
+
+namespace SkNamedTransferFn {
+
+// Like SkNamedGamut::kSRGB, keeping this bitwise exactly the same as skcms makes things fastest.
+static constexpr skcms_TransferFunction kSRGB =
+ { 2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction k2Dot2 =
+ { 2.2f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction kLinear =
+ { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction kRec2020 =
+ {2.22222f, 0.909672f, 0.0903276f, 0.222222f, 0.0812429f, 0, 0};
+
+static constexpr skcms_TransferFunction kPQ =
+ {-2.0f, -107/128.0f, 1.0f, 32/2523.0f, 2413/128.0f, -2392/128.0f, 8192/1305.0f };
+
+static constexpr skcms_TransferFunction kHLG =
+ {-3.0f, 2.0f, 2.0f, 1/0.17883277f, 0.28466892f, 0.55991073f, 0.0f };
+
+}
+
+namespace SkNamedGamut {
+
+static constexpr skcms_Matrix3x3 kSRGB = {{
+ // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync.
+ // 0.436065674f, 0.385147095f, 0.143066406f,
+ // 0.222488403f, 0.716873169f, 0.060607910f,
+ // 0.013916016f, 0.097076416f, 0.714096069f,
+ { SkFixedToFloat(0x6FA2), SkFixedToFloat(0x6299), SkFixedToFloat(0x24A0) },
+ { SkFixedToFloat(0x38F5), SkFixedToFloat(0xB785), SkFixedToFloat(0x0F84) },
+ { SkFixedToFloat(0x0390), SkFixedToFloat(0x18DA), SkFixedToFloat(0xB6CF) },
+}};
+
+static constexpr skcms_Matrix3x3 kAdobeRGB = {{
+ // ICC fixed-point (16.16) repesentation of:
+ // 0.60974, 0.20528, 0.14919,
+ // 0.31111, 0.62567, 0.06322,
+ // 0.01947, 0.06087, 0.74457,
+ { SkFixedToFloat(0x9c18), SkFixedToFloat(0x348d), SkFixedToFloat(0x2631) },
+ { SkFixedToFloat(0x4fa5), SkFixedToFloat(0xa02c), SkFixedToFloat(0x102f) },
+ { SkFixedToFloat(0x04fc), SkFixedToFloat(0x0f95), SkFixedToFloat(0xbe9c) },
+}};
+
+static constexpr skcms_Matrix3x3 kDCIP3 = {{
+ { 0.515102f, 0.291965f, 0.157153f },
+ { 0.241182f, 0.692236f, 0.0665819f },
+ { -0.00104941f, 0.0418818f, 0.784378f },
+}};
+
+static constexpr skcms_Matrix3x3 kRec2020 = {{
+ { 0.673459f, 0.165661f, 0.125100f },
+ { 0.279033f, 0.675338f, 0.0456288f },
+ { -0.00193139f, 0.0299794f, 0.797162f },
+}};
+
+static constexpr skcms_Matrix3x3 kXYZ = {{
+ { 1.0f, 0.0f, 0.0f },
+ { 0.0f, 1.0f, 0.0f },
+ { 0.0f, 0.0f, 1.0f },
+}};
+
+}
+
+class SK_API SkColorSpace : public SkNVRefCnt<SkColorSpace> {
+public:
+ /**
+ * Create the sRGB color space.
+ */
+ static sk_sp<SkColorSpace> MakeSRGB();
+
+ /**
+ * Colorspace with the sRGB primaries, but a linear (1.0) gamma.
+ */
+ static sk_sp<SkColorSpace> MakeSRGBLinear();
+
+ /**
+ * Create an SkColorSpace from a transfer function and a row-major 3x3 transformation to XYZ.
+ */
+ static sk_sp<SkColorSpace> MakeRGB(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& toXYZ);
+
+ /**
+ * Create an SkColorSpace from a parsed (skcms) ICC profile.
+ */
+ static sk_sp<SkColorSpace> Make(const skcms_ICCProfile&);
+
+ /**
+ * Convert this color space to an skcms ICC profile struct.
+ */
+ void toProfile(skcms_ICCProfile*) const;
+
+ /**
+ * Returns true if the color space gamma is near enough to be approximated as sRGB.
+ */
+ bool gammaCloseToSRGB() const;
+
+ /**
+ * Returns true if the color space gamma is linear.
+ */
+ bool gammaIsLinear() const;
+
+ /**
+ * If the transfer function can be represented as coefficients to the standard
+ * equation, returns true and sets |fn| to the proper values.
+ *
+ * If not, returns false.
+ */
+ bool isNumericalTransferFn(skcms_TransferFunction* fn) const;
+
+ /**
+ * Returns true and sets |toXYZD50| if the color gamut can be described as a matrix.
+ * Returns false otherwise.
+ */
+ bool toXYZD50(SkMatrix44* toXYZD50) const;
+
+ bool toXYZD50(skcms_Matrix3x3* toXYZD50) const;
+
+ /**
+ * Returns a hash of the gamut transformation to XYZ D50. Allows for fast equality checking
+ * of gamuts, at the (very small) risk of collision.
+ */
+ uint32_t toXYZD50Hash() const { return fToXYZD50Hash; }
+
+ /**
+ * Returns a color space with the same gamut as this one, but with a linear gamma.
+ * For color spaces whose gamut can not be described in terms of XYZ D50, returns
+ * linear sRGB.
+ */
+ sk_sp<SkColorSpace> makeLinearGamma() const;
+
+ /**
+ * Returns a color space with the same gamut as this one, with with the sRGB transfer
+ * function. For color spaces whose gamut can not be described in terms of XYZ D50, returns
+ * sRGB.
+ */
+ sk_sp<SkColorSpace> makeSRGBGamma() const;
+
+ /**
+ * Returns a color space with the same transfer function as this one, but with the primary
+ * colors rotated. For any XYZ space, this produces a new color space that maps RGB to GBR
+ * (when applied to a source), and maps RGB to BRG (when applied to a destination). For other
+ * types of color spaces, returns nullptr.
+ *
+ * This is used for testing, to construct color spaces that have severe and testable behavior.
+ */
+ sk_sp<SkColorSpace> makeColorSpin() const;
+
+ /**
+ * Returns true if the color space is sRGB.
+ * Returns false otherwise.
+ *
+ * This allows a little bit of tolerance, given that we might see small numerical error
+ * in some cases: converting ICC fixed point to float, converting white point to D50,
+ * rounding decisions on transfer function and matrix.
+ *
+ * This does not consider a 2.2f exponential transfer function to be sRGB. While these
+ * functions are similar (and it is sometimes useful to consider them together), this
+ * function checks for logical equality.
+ */
+ bool isSRGB() const;
+
+ /**
+ * Returns nullptr on failure. Fails when we fallback to serializing ICC data and
+ * the data is too large to serialize.
+ */
+ sk_sp<SkData> serialize() const;
+
+ /**
+ * If |memory| is nullptr, returns the size required to serialize.
+ * Otherwise, serializes into |memory| and returns the size.
+ */
+ size_t writeToMemory(void* memory) const;
+
+ static sk_sp<SkColorSpace> Deserialize(const void* data, size_t length);
+
+ /**
+ * If both are null, we return true. If one is null and the other is not, we return false.
+ * If both are non-null, we do a deeper compare.
+ */
+ static bool Equals(const SkColorSpace*, const SkColorSpace*);
+
+ void transferFn(float gabcdef[7]) const;
+ void invTransferFn(float gabcdef[7]) const;
+ void gamutTransformTo(const SkColorSpace* dst, float src_to_dst_row_major[9]) const;
+
+ uint32_t transferFnHash() const { return fTransferFnHash; }
+ uint64_t hash() const { return (uint64_t)fTransferFnHash << 32 | fToXYZD50Hash; }
+
+private:
+ friend class SkColorSpaceSingletonFactory;
+
+ SkColorSpace(const float transferFn[7],
+ const skcms_Matrix3x3& toXYZ);
+
+ void computeLazyDstFields() const;
+
+ uint32_t fTransferFnHash;
+ uint32_t fToXYZD50Hash;
+
+ float fTransferFn[7];
+ float fToXYZD50_3x3[9]; // row-major
+
+ mutable float fInvTransferFn[7];
+ mutable float fFromXYZD50_3x3[9]; // row-major
+ mutable SkOnce fLazyDstFieldsOnce;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkContourMeasure.h b/gfx/skia/skia/include/core/SkContourMeasure.h
new file mode 100644
index 0000000000..dc06122cf0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkContourMeasure.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkContourMeasure_DEFINED
+#define SkContourMeasure_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTDArray.h"
+
+struct SkConic;
+
+class SK_API SkContourMeasure : public SkRefCnt {
+public:
+ /** Return the length of the contour.
+ */
+ SkScalar length() const { return fLength; }
+
+ /** Pins distance to 0 <= distance <= length(), and then computes the corresponding
+ * position and tangent.
+ */
+ bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
+ SkVector* tangent) const;
+
+ enum MatrixFlags {
+ kGetPosition_MatrixFlag = 0x01,
+ kGetTangent_MatrixFlag = 0x02,
+ kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
+ };
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding matrix (by calling getPosTan).
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ matrix is unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags = kGetPosAndTan_MatrixFlag) const;
+
+ /** Given a start and stop distance, return in dst the intervening segment(s).
+ If the segment is zero-length, return false, else return true.
+ startD and stopD are pinned to legal values (0..getLength()). If startD > stopD
+ then return false (and leave dst untouched).
+ Begin the segment with a moveTo if startWithMoveTo is true
+ */
+ bool SK_WARN_UNUSED_RESULT getSegment(SkScalar startD, SkScalar stopD, SkPath* dst,
+ bool startWithMoveTo) const;
+
+ /** Return true if the contour is closed()
+ */
+ bool isClosed() const { return fIsClosed; }
+
+private:
+ struct Segment {
+ SkScalar fDistance; // total distance up to this point
+ unsigned fPtIndex; // index into the fPts array
+ unsigned fTValue : 30;
+ unsigned fType : 2; // actually the enum SkSegType
+ // See SkPathMeasurePriv.h
+
+ SkScalar getScalarT() const;
+
+ static const Segment* Next(const Segment* seg) {
+ unsigned ptIndex = seg->fPtIndex;
+ do {
+ ++seg;
+ } while (seg->fPtIndex == ptIndex);
+ return seg;
+ }
+
+ };
+
+ const SkTDArray<Segment> fSegments;
+ const SkTDArray<SkPoint> fPts; // Points used to define the segments
+
+ const SkScalar fLength;
+ const bool fIsClosed;
+
+ SkContourMeasure(SkTDArray<Segment>&& segs, SkTDArray<SkPoint>&& pts,
+ SkScalar length, bool isClosed);
+ ~SkContourMeasure() override {}
+
+ const Segment* distanceToSegment(SkScalar distance, SkScalar* t) const;
+
+ friend class SkContourMeasureIter;
+};
+
+class SK_API SkContourMeasureIter {
+public:
+ SkContourMeasureIter();
+ /**
+ * Initialize the Iter with a path.
+ * The parts of the path that are needed are copied, so the client is free to modify/delete
+ * the path after this call.
+ */
+ SkContourMeasureIter(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+ ~SkContourMeasureIter();
+
+ /**
+ * Reset the Iter with a path.
+ * The parts of the path that are needed are copied, so the client is free to modify/delete
+ * the path after this call.
+ */
+ void reset(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+
+ /**
+ * Iterates through contours in path, returning a contour-measure object for each contour
+ * in the path. Returns null when it is done.
+ *
+ * This only returns non-zero length contours, where a contour is the segments between
+ * a kMove_Verb and either ...
+ * - the next kMove_Verb
+ * - kClose_Verb (1 or more)
+ * - kDone_Verb
+ * If it encounters a zero-length contour, it is skipped.
+ */
+ sk_sp<SkContourMeasure> next();
+
+private:
+ SkPath::RawIter fIter;
+ SkPath fPath;
+ SkScalar fTolerance;
+ bool fForceClosed;
+
+ // temporary
+ SkTDArray<SkContourMeasure::Segment> fSegments;
+ SkTDArray<SkPoint> fPts; // Points used to define the segments
+
+ SkContourMeasure* buildSegments();
+
+ SkScalar compute_line_seg(SkPoint p0, SkPoint p1, SkScalar distance, unsigned ptIndex);
+ SkScalar compute_quad_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex);
+ SkScalar compute_conic_segs(const SkConic& conic, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt,
+ unsigned ptIndex);
+ SkScalar compute_cubic_segs(const SkPoint pts[4], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex);
+
+ SkContourMeasureIter(const SkContourMeasureIter&) = delete;
+ SkContourMeasureIter& operator=(const SkContourMeasureIter&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCoverageMode.h b/gfx/skia/skia/include/core/SkCoverageMode.h
new file mode 100644
index 0000000000..ea5b73d1a4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCoverageMode.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoverageMode_DEFINED
+#define SkCoverageMode_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Describes geometric operations (ala SkRegion::Op) that can be applied to coverage bytes.
+ * These can be thought of as variants of porter-duff (SkBlendMode) modes, but only applied
+ * to the alpha channel.
+ *
+ * See SkMaskFilter for ways to use these when combining two different masks.
+ */
+enum class SkCoverageMode {
+ kUnion, // A ∪ B A+B-A*B
+ kIntersect, // A ∩ B A*B
+ kDifference, // A - B A*(1-B)
+ kReverseDifference, // B - A B*(1-A)
+ kXor, // A ⊕ B A+B-2*A*B
+
+ kLast = kXor,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCubicMap.h b/gfx/skia/skia/include/core/SkCubicMap.h
new file mode 100644
index 0000000000..7389b92afd
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCubicMap.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCubicMap_DEFINED
+#define SkCubicMap_DEFINED
+
+#include "include/core/SkPoint.h"
+
+/**
+ * Fast evaluation of a cubic ease-in / ease-out curve. This is defined as a parametric cubic
+ * curve inside the unit square.
+ *
+ * pt[0] is implicitly { 0, 0 }
+ * pt[3] is implicitly { 1, 1 }
+ * pts[1,2].X are inside the unit [0..1]
+ */
+class SK_API SkCubicMap {
+public:
+ SkCubicMap(SkPoint p1, SkPoint p2);
+
+ static bool IsLinear(SkPoint p1, SkPoint p2) {
+ return SkScalarNearlyEqual(p1.fX, p1.fY) && SkScalarNearlyEqual(p2.fX, p2.fY);
+ }
+
+ float computeYFromX(float x) const;
+
+ SkPoint computeFromT(float t) const;
+
+private:
+ enum Type {
+ kLine_Type, // x == y
+ kCubeRoot_Type, // At^3 == x
+ kSolver_Type, // general monotonic cubic solver
+ };
+
+ SkPoint fCoeff[3];
+ Type fType;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkData.h b/gfx/skia/skia/include/core/SkData.h
new file mode 100644
index 0000000000..abf4eea75b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkData.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkData_DEFINED
+#define SkData_DEFINED
+
+#include <stdio.h>
+
+#include "include/core/SkRefCnt.h"
+
+class SkStream;
+
+/**
+ * SkData holds an immutable data buffer. Not only is the data immutable,
+ * but the actual ptr that is returned (by data() or bytes()) is guaranteed
+ * to always be the same for the life of this instance.
+ */
+class SK_API SkData final : public SkNVRefCnt<SkData> {
+public:
+ /**
+ * Returns the number of bytes stored.
+ */
+ size_t size() const { return fSize; }
+
+ bool isEmpty() const { return 0 == fSize; }
+
+ /**
+ * Returns the ptr to the data.
+ */
+ const void* data() const { return fPtr; }
+
+ /**
+ * Like data(), returns a read-only ptr into the data, but in this case
+ * it is cast to uint8_t*, to make it easy to add an offset to it.
+ */
+ const uint8_t* bytes() const {
+ return reinterpret_cast<const uint8_t*>(fPtr);
+ }
+
+ /**
+ * USE WITH CAUTION.
+ * This call will assert that the refcnt is 1, as a precaution against modifying the
+ * contents when another client/thread has access to the data.
+ */
+ void* writable_data() {
+ if (fSize) {
+ // only assert we're unique if we're not empty
+ SkASSERT(this->unique());
+ }
+ return fPtr;
+ }
+
+ /**
+ * Helper to copy a range of the data into a caller-provided buffer.
+ * Returns the actual number of bytes copied, after clamping offset and
+ * length to the size of the data. If buffer is NULL, it is ignored, and
+ * only the computed number of bytes is returned.
+ */
+ size_t copyRange(size_t offset, size_t length, void* buffer) const;
+
+ /**
+ * Returns true if these two objects have the same length and contents,
+ * effectively returning 0 == memcmp(...)
+ */
+ bool equals(const SkData* other) const;
+
+ /**
+ * Function that, if provided, will be called when the SkData goes out
+ * of scope, allowing for custom allocation/freeing of the data's contents.
+ */
+ typedef void (*ReleaseProc)(const void* ptr, void* context);
+
+ /**
+ * Create a new dataref by copying the specified data
+ */
+ static sk_sp<SkData> MakeWithCopy(const void* data, size_t length);
+
+
+ /**
+ * Create a new data with uninitialized contents. The caller should call writable_data()
+ * to write into the buffer, but this must be done before another ref() is made.
+ */
+ static sk_sp<SkData> MakeUninitialized(size_t length);
+
+ /**
+ * Create a new dataref by copying the specified c-string
+ * (a null-terminated array of bytes). The returned SkData will have size()
+ * equal to strlen(cstr) + 1. If cstr is NULL, it will be treated the same
+ * as "".
+ */
+ static sk_sp<SkData> MakeWithCString(const char cstr[]);
+
+ /**
+ * Create a new dataref, taking the ptr as is, and using the
+ * releaseproc to free it. The proc may be NULL.
+ */
+ static sk_sp<SkData> MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx);
+
+ /**
+ * Call this when the data parameter is already const and will outlive the lifetime of the
+ * SkData. Suitable for with const globals.
+ */
+ static sk_sp<SkData> MakeWithoutCopy(const void* data, size_t length) {
+ return MakeWithProc(data, length, DummyReleaseProc, nullptr);
+ }
+
+ /**
+ * Create a new dataref from a pointer allocated by malloc. The Data object
+ * takes ownership of that allocation, and will handling calling sk_free.
+ */
+ static sk_sp<SkData> MakeFromMalloc(const void* data, size_t length);
+
+ /**
+ * Create a new dataref the file with the specified path.
+ * If the file cannot be opened, this returns NULL.
+ */
+ static sk_sp<SkData> MakeFromFileName(const char path[]);
+
+ /**
+ * Create a new dataref from a stdio FILE.
+ * This does not take ownership of the FILE, nor close it.
+ * The caller is free to close the FILE at its convenience.
+ * The FILE must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFILE(FILE* f);
+
+ /**
+ * Create a new dataref from a file descriptor.
+ * This does not take ownership of the file descriptor, nor close it.
+ * The caller is free to close the file descriptor at its convenience.
+ * The file descriptor must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFD(int fd);
+
+ /**
+ * Attempt to read size bytes into a SkData. If the read succeeds, return the data,
+ * else return NULL. Either way the stream's cursor may have been changed as a result
+ * of calling read().
+ */
+ static sk_sp<SkData> MakeFromStream(SkStream*, size_t size);
+
+ /**
+ * Create a new dataref using a subset of the data in the specified
+ * src dataref.
+ */
+ static sk_sp<SkData> MakeSubset(const SkData* src, size_t offset, size_t length);
+
+ /**
+ * Returns a new empty dataref (or a reference to a shared empty dataref).
+ * New or shared, the caller must see that unref() is eventually called.
+ */
+ static sk_sp<SkData> MakeEmpty();
+
+private:
+ friend class SkNVRefCnt<SkData>;
+ ReleaseProc fReleaseProc;
+ void* fReleaseProcContext;
+ void* fPtr;
+ size_t fSize;
+
+ SkData(const void* ptr, size_t size, ReleaseProc, void* context);
+ explicit SkData(size_t size); // inplace new/delete
+ ~SkData();
+
+ // Ensure the unsized delete is called.
+ void operator delete(void* p);
+
+ // shared internal factory
+ static sk_sp<SkData> PrivateNewWithCopy(const void* srcOrNull, size_t length);
+
+ static void DummyReleaseProc(const void*, void*); // {}
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDataTable.h b/gfx/skia/skia/include/core/SkDataTable.h
new file mode 100644
index 0000000000..74e7d0ef29
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDataTable.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDataTable_DEFINED
+#define SkDataTable_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/private/SkTDArray.h"
+
+/**
+ * Like SkData, SkDataTable holds an immutable data buffer. The data buffer is
+ * organized into a table of entries, each with a length, so the entries are
+ * not required to all be the same size.
+ */
+class SK_API SkDataTable : public SkRefCnt {
+public:
+ /**
+ * Returns true if the table is empty (i.e. has no entries).
+ */
+ bool isEmpty() const { return 0 == fCount; }
+
+ /**
+ * Return the number of entries in the table. 0 for an empty table
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Return the size of the index'th entry in the table. The caller must
+ * ensure that index is valid for this table.
+ */
+ size_t atSize(int index) const;
+
+ /**
+ * Return a pointer to the data of the index'th entry in the table.
+ * The caller must ensure that index is valid for this table.
+ *
+ * @param size If non-null, this returns the byte size of this entry. This
+ * will be the same value that atSize(index) would return.
+ */
+ const void* at(int index, size_t* size = nullptr) const;
+
+ template <typename T>
+ const T* atT(int index, size_t* size = nullptr) const {
+ return reinterpret_cast<const T*>(this->at(index, size));
+ }
+
+ /**
+ * Returns the index'th entry as a c-string, and assumes that the trailing
+ * null byte had been copied into the table as well.
+ */
+ const char* atStr(int index) const {
+ size_t size;
+ const char* str = this->atT<const char>(index, &size);
+ SkASSERT(strlen(str) + 1 == size);
+ return str;
+ }
+
+ typedef void (*FreeProc)(void* context);
+
+ static sk_sp<SkDataTable> MakeEmpty();
+
+ /**
+ * Return a new DataTable that contains a copy of the data stored in each
+ * "array".
+ *
+ * @param ptrs array of points to each element to be copied into the table.
+ * @param sizes array of byte-lengths for each entry in the corresponding
+ * ptrs[] array.
+ * @param count the number of array elements in ptrs[] and sizes[] to copy.
+ */
+ static sk_sp<SkDataTable> MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count);
+
+ /**
+ * Return a new table that contains a copy of the data in array.
+ *
+ * @param array contiguous array of data for all elements to be copied.
+ * @param elemSize byte-length for a given element.
+ * @param count the number of entries to be copied out of array. The number
+ * of bytes that will be copied is count * elemSize.
+ */
+ static sk_sp<SkDataTable> MakeCopyArray(const void* array, size_t elemSize, int count);
+
+ static sk_sp<SkDataTable> MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context);
+
+private:
+ struct Dir {
+ const void* fPtr;
+ uintptr_t fSize;
+ };
+
+ int fCount;
+ size_t fElemSize;
+ union {
+ const Dir* fDir;
+ const char* fElems;
+ } fU;
+
+ FreeProc fFreeProc;
+ void* fFreeProcContext;
+
+ SkDataTable();
+ SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc, void* context);
+ SkDataTable(const Dir*, int count, FreeProc, void* context);
+ virtual ~SkDataTable();
+
+ friend class SkDataTableBuilder; // access to Dir
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h b/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h
new file mode 100644
index 0000000000..d5e4c0a0e9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayListRecorder_DEFINED
+#define SkDeferredDisplayListRecorder_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkDeferredDisplayList.h"
+
+class GrBackendFormat;
+class GrBackendTexture;
+class GrContext;
+class SkCanvas;
+class SkImage;
+class SkPromiseImageTexture;
+class SkSurface;
+struct SkYUVAIndex;
+struct SkYUVASizeInfo;
+
+/*
+ * This class is intended to be used as:
+ * Get an SkSurfaceCharacterization representing the intended gpu-backed destination SkSurface
+ * Create one of these (an SkDDLMaker) on the stack
+ * Get the canvas and render into it
+ * Snap off and hold on to an SkDeferredDisplayList
+ * Once your app actually needs the pixels, call SkSurface::draw(SkDeferredDisplayList*)
+ *
+ * This class never accesses the GPU but performs all the cpu work it can. It
+ * is thread-safe (i.e., one can break a scene into tiles and perform their cpu-side
+ * work in parallel ahead of time).
+ */
+class SK_API SkDeferredDisplayListRecorder {
+public:
+ SkDeferredDisplayListRecorder(const SkSurfaceCharacterization&);
+ ~SkDeferredDisplayListRecorder();
+
+ const SkSurfaceCharacterization& characterization() const {
+ return fCharacterization;
+ }
+
+ // The backing canvas will become invalid (and this entry point will return
+ // null) once 'detach' is called.
+ // Note: ownership of the SkCanvas is not transferred via this call.
+ SkCanvas* getCanvas();
+
+ std::unique_ptr<SkDeferredDisplayList> detach();
+
+ using PromiseImageTextureContext = void*;
+ using PromiseImageTextureFulfillProc =
+ sk_sp<SkPromiseImageTexture> (*)(PromiseImageTextureContext);
+ using PromiseImageTextureReleaseProc = void (*)(PromiseImageTextureContext);
+ using PromiseImageTextureDoneProc = void (*)(PromiseImageTextureContext);
+
+ enum class PromiseImageApiVersion { kLegacy, kNew };
+
+ /**
+ Create a new SkImage that is very similar to an SkImage created by MakeFromTexture. The
+ difference is that the caller need not have created the texture nor populated it with the
+ image pixel data. Moreover, the SkImage may be created on a thread as the creation of the
+ image does not require access to the backend API or GrContext. Instead of passing a
+ GrBackendTexture the client supplies a description of the texture consisting of
+ GrBackendFormat, width, height, and GrMipMapped state. The resulting SkImage can be drawn
+ to a SkDeferredDisplayListRecorder or directly to a GPU-backed SkSurface.
+
+ When the actual texture is required to perform a backend API draw, textureFulfillProc will
+ be called to receive a GrBackendTexture. The properties of the GrBackendTexture must match
+ those set during the SkImage creation, and it must refer to a valid existing texture in the
+ backend API context/device, and be populated with the image pixel data. The texture contents
+ cannot be modified until textureReleaseProc is called. The texture cannot be deleted until
+ textureDoneProc is called.
+
+ When all the following are true:
+ * the promise SkImage is deleted,
+ * any SkDeferredDisplayLists that recorded draws referencing the image are deleted,
+ * and all draws referencing the texture have been flushed (via GrContext::flush or
+ SkSurface::flush)
+ the textureReleaseProc is called. When the following additional constraint is met
+ * the texture is safe to delete in the underlying API
+ the textureDoneProc is called. For some APIs (e.g. GL) the two states are equivalent.
+ However, for others (e.g. Vulkan) they are not as it is not legal to delete a texture until
+ the GPU work referencing it has completed.
+
+ There is at most one call to each of textureFulfillProc, textureReleaseProc, and
+ textureDoneProc. textureDoneProc is always called even if image creation fails or if the
+ image is never fulfilled (e.g. it is never drawn or all draws are clipped out). If
+ textureFulfillProc is called then textureReleaseProc will always be called even if
+ textureFulfillProc failed.
+
+ If 'version' is set to kLegacy then the textureReleaseProc call is delayed until the
+ conditions for textureDoneProc are met and then they are both called.
+
+ This call is only valid if the SkDeferredDisplayListRecorder is backed by a GPU context.
+
+ @param backendFormat format of promised gpu texture
+ @param width width of promised gpu texture
+ @param height height of promised gpu texture
+ @param mipMapped mip mapped state of promised gpu texture
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorType one of: kUnknown_SkColorType, kAlpha_8_SkColorType,
+ kRGB_565_SkColorType, kARGB_4444_SkColorType,
+ kRGBA_8888_SkColorType, kBGRA_8888_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param alphaType one of: kUnknown_SkAlphaType, kOpaque_SkAlphaType,
+ kPremul_SkAlphaType, kUnpremul_SkAlphaType
+ @param colorSpace range of colors; may be nullptr
+ @param textureFulfillProc function called to get actual gpu texture
+ @param textureReleaseProc function called when texture can be released
+ @param textureDoneProc function called when we will no longer call textureFulfillProc
+ @param textureContext state passed to textureFulfillProc and textureReleaseProc
+ @param version controls when textureReleaseProc is called
+ @return created SkImage, or nullptr
+ */
+ sk_sp<SkImage> makePromiseTexture(
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion version = PromiseImageApiVersion::kLegacy);
+
+ /**
+ This entry point operates like 'makePromiseTexture' but it is used to construct a SkImage
+ from YUV[A] data. The source data may be planar (i.e. spread across multiple textures). In
+ the extreme Y, U, V, and A are all in different planes and thus the image is specified by
+ four textures. 'yuvaIndices' specifies the mapping from texture color channels to Y, U, V,
+ and possibly A components. It therefore indicates how many unique textures compose the full
+ image. Separate textureFulfillProc, textureReleaseProc, and textureDoneProc calls are made
+ for each texture and each texture has its own PromiseImageTextureContext. 'yuvFormats',
+ 'yuvaSizes', and 'textureContexts' have one entry for each of the up to four textures, as
+ indicated by 'yuvaIndices'.
+ */
+ sk_sp<SkImage> makeYUVAPromiseTexture(
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendFormat yuvaFormats[],
+ const SkISize yuvaSizes[],
+ const SkYUVAIndex yuvaIndices[4],
+ int imageWidth,
+ int imageHeight,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContexts[],
+ PromiseImageApiVersion version = PromiseImageApiVersion::kLegacy);
+
+private:
+ bool init();
+
+ const SkSurfaceCharacterization fCharacterization;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrContext> fContext;
+ sk_sp<SkDeferredDisplayList::LazyProxyData> fLazyProxyData;
+ sk_sp<SkSurface> fSurface;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDeque.h b/gfx/skia/skia/include/core/SkDeque.h
new file mode 100644
index 0000000000..199d47b60e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDeque.h
@@ -0,0 +1,141 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDeque_DEFINED
+#define SkDeque_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/*
+ * The deque class works by blindly creating memory space of a specified element
+ * size. It manages the memory as a doubly linked list of blocks each of which
+ * can contain multiple elements. Pushes and pops add/remove blocks from the
+ * beginning/end of the list as necessary while each block tracks the used
+ * portion of its memory.
+ * One behavior to be aware of is that the pops do not immediately remove an
+ * empty block from the beginning/end of the list (Presumably so push/pop pairs
+ * on the block boundaries don't cause thrashing). This can result in the first/
+ * last element not residing in the first/last block.
+ */
+class SK_API SkDeque {
+public:
+ /**
+ * elemSize specifies the size of each individual element in the deque
+ * allocCount specifies how many elements are to be allocated as a block
+ */
+ explicit SkDeque(size_t elemSize, int allocCount = 1);
+ SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount = 1);
+ ~SkDeque();
+
+ bool empty() const { return 0 == fCount; }
+ int count() const { return fCount; }
+ size_t elemSize() const { return fElemSize; }
+
+ const void* front() const { return fFront; }
+ const void* back() const { return fBack; }
+
+ void* front() {
+ return (void*)((const SkDeque*)this)->front();
+ }
+
+ void* back() {
+ return (void*)((const SkDeque*)this)->back();
+ }
+
+ /**
+ * push_front and push_back return a pointer to the memory space
+ * for the new element
+ */
+ void* push_front();
+ void* push_back();
+
+ void pop_front();
+ void pop_back();
+
+private:
+ struct Block;
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kFront_IterStart,
+ kBack_IterStart,
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkDeque& d, IterStart startLoc);
+ void* next();
+ void* prev();
+
+ void reset(const SkDeque& d, IterStart startLoc);
+
+ private:
+ SkDeque::Block* fCurBlock;
+ char* fPos;
+ size_t fElemSize;
+ };
+
+ // Inherit privately from Iter to prevent access to reverse iteration
+ class F2BIter : private Iter {
+ public:
+ F2BIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque
+ */
+ F2BIter(const SkDeque& d) : INHERITED(d, kFront_IterStart) {}
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the beginning of the
+ * deque
+ */
+ void reset(const SkDeque& d) {
+ this->INHERITED::reset(d, kFront_IterStart);
+ }
+
+ private:
+ typedef Iter INHERITED;
+ };
+
+private:
+ // allow unit test to call numBlocksAllocated
+ friend class DequeUnitTestHelper;
+
+ void* fFront;
+ void* fBack;
+
+ Block* fFrontBlock;
+ Block* fBackBlock;
+ size_t fElemSize;
+ void* fInitialStorage;
+ int fCount; // number of elements in the deque
+ int fAllocCount; // number of elements to allocate per block
+
+ Block* allocateBlock(int allocCount);
+ void freeBlock(Block* block);
+
+ /**
+ * This returns the number of chunk blocks allocated by the deque. It
+ * can be used to gauge the effectiveness of the selected allocCount.
+ */
+ int numBlocksAllocated() const;
+
+ SkDeque(const SkDeque&) = delete;
+ SkDeque& operator=(const SkDeque&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDocument.h b/gfx/skia/skia/include/core/SkDocument.h
new file mode 100644
index 0000000000..0548711c14
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDocument.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDocument_DEFINED
+#define SkDocument_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+class SkCanvas;
+class SkWStream;
+struct SkRect;
+
+/** SK_ScalarDefaultDPI is 72 dots per inch. */
+static constexpr SkScalar SK_ScalarDefaultRasterDPI = 72.0f;
+
+/**
+ * High-level API for creating a document-based canvas. To use..
+ *
+ * 1. Create a document, specifying a stream to store the output.
+ * 2. For each "page" of content:
+ * a. canvas = doc->beginPage(...)
+ * b. draw_my_content(canvas);
+ * c. doc->endPage();
+ * 3. Close the document with doc->close().
+ */
+class SK_API SkDocument : public SkRefCnt {
+public:
+
+ /**
+ * Begin a new page for the document, returning the canvas that will draw
+ * into the page. The document owns this canvas, and it will go out of
+ * scope when endPage() or close() is called, or the document is deleted.
+ */
+ SkCanvas* beginPage(SkScalar width, SkScalar height, const SkRect* content = nullptr);
+
+ /**
+ * Call endPage() when the content for the current page has been drawn
+ * (into the canvas returned by beginPage()). After this call the canvas
+ * returned by beginPage() will be out-of-scope.
+ */
+ void endPage();
+
+ /**
+ * Call close() when all pages have been drawn. This will close the file
+ * or stream holding the document's contents. After close() the document
+ * can no longer add new pages. Deleting the document will automatically
+ * call close() if need be.
+ */
+ void close();
+
+ /**
+ * Call abort() to stop producing the document immediately.
+ * The stream output must be ignored, and should not be trusted.
+ */
+ void abort();
+
+protected:
+ SkDocument(SkWStream*);
+
+ // note: subclasses must call close() in their destructor, as the base class
+ // cannot do this for them.
+ virtual ~SkDocument();
+
+ virtual SkCanvas* onBeginPage(SkScalar width, SkScalar height) = 0;
+ virtual void onEndPage() = 0;
+ virtual void onClose(SkWStream*) = 0;
+ virtual void onAbort() = 0;
+
+ // Allows subclasses to write to the stream as pages are written.
+ SkWStream* getStream() { return fStream; }
+
+ enum State {
+ kBetweenPages_State,
+ kInPage_State,
+ kClosed_State
+ };
+ State getState() const { return fState; }
+
+private:
+ SkWStream* fStream;
+ State fState;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawLooper.h b/gfx/skia/skia/include/core/SkDrawLooper.h
new file mode 100644
index 0000000000..ef06c8f7b8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawLooper.h
@@ -0,0 +1,136 @@
+
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawLooper_DEFINED
+#define SkDrawLooper_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPoint.h"
+#include <functional> // std::function
+
+class SkArenaAlloc;
+class SkCanvas;
+class SkPaint;
+struct SkRect;
+class SkString;
+
+/** \class SkDrawLooper
+ Subclasses of SkDrawLooper can be attached to a SkPaint. Where they are,
+ and something is drawn to a canvas with that paint, the looper subclass will
+ be called, allowing it to modify the canvas and/or paint for that draw call.
+ More than that, via the next() method, the looper can modify the draw to be
+ invoked multiple times (hence the name loop-er), allow it to perform effects
+ like shadows or frame/fills, that require more than one pass.
+*/
+class SK_API SkDrawLooper : public SkFlattenable {
+public:
+ /**
+ * Holds state during a draw. Users call next() until it returns false.
+ *
+ * Subclasses of SkDrawLooper should create a subclass of this object to
+ * hold state specific to their subclass.
+ */
+ class SK_API Context {
+ public:
+ Context() {}
+ virtual ~Context() {}
+
+ struct Info {
+ SkVector fTranslate;
+ bool fApplyPostCTM;
+
+ void applyToCTM(SkMatrix* ctm) const;
+ void applyToCanvas(SkCanvas*) const;
+ };
+
+ /**
+ * Called in a loop on objects returned by SkDrawLooper::createContext().
+ * Each time true is returned, the object is drawn (possibly with a modified
+ * canvas and/or paint). When false is finally returned, drawing for the object
+ * stops.
+ *
+ * On each call, the paint will be in its original state, but the
+ * canvas will be as it was following the previous call to next() or
+ * createContext().
+ *
+ * The implementation must ensure that, when next() finally returns
+ * false, the canvas has been restored to the state it was
+ * initially, before createContext() was first called.
+ */
+ virtual bool next(Info*, SkPaint*) = 0;
+
+ private:
+ Context(const Context&) = delete;
+ Context& operator=(const Context&) = delete;
+ };
+
+ /**
+ * Called right before something is being drawn. Returns a Context
+ * whose next() method should be called until it returns false.
+ */
+ virtual Context* makeContext(SkArenaAlloc*) const = 0;
+
+ /**
+ * The fast bounds functions are used to enable the paint to be culled early
+ * in the drawing pipeline. If a subclass can support this feature it must
+ * return true for the canComputeFastBounds() function. If that function
+ * returns false then computeFastBounds behavior is undefined otherwise it
+ * is expected to have the following behavior. Given the parent paint and
+ * the parent's bounding rect the subclass must fill in and return the
+ * storage rect, where the storage rect is with the union of the src rect
+ * and the looper's bounding rect.
+ */
+ bool canComputeFastBounds(const SkPaint& paint) const;
+ void computeFastBounds(const SkPaint& paint, const SkRect& src, SkRect* dst) const;
+
+ struct BlurShadowRec {
+ SkScalar fSigma;
+ SkVector fOffset;
+ SkColor fColor;
+ SkBlurStyle fStyle;
+ };
+ /**
+ * If this looper can be interpreted as having two layers, such that
+ * 1. The first layer (bottom most) just has a blur and translate
+ * 2. The second layer has no modifications to either paint or canvas
+ * 3. No other layers.
+ * then return true, and if not null, fill out the BlurShadowRec).
+ *
+ * If any of the above are not met, return false and ignore the BlurShadowRec parameter.
+ */
+ virtual bool asABlurShadow(BlurShadowRec*) const;
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkDrawLooper_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkDrawLooper_Type;
+ }
+
+ static sk_sp<SkDrawLooper> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkDrawLooper>(static_cast<SkDrawLooper*>(
+ SkFlattenable::Deserialize(
+ kSkDrawLooper_Type, data, size, procs).release()));
+ }
+
+ void apply(SkCanvas* canvas, const SkPaint& paint,
+ std::function<void(SkCanvas*, const SkPaint&)>);
+
+protected:
+ SkDrawLooper() {}
+
+private:
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawable.h b/gfx/skia/skia/include/core/SkDrawable.h
new file mode 100644
index 0000000000..8d605f80d6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawable.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawable_DEFINED
+#define SkDrawable_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkScalar.h"
+
+class GrBackendDrawableInfo;
+class SkCanvas;
+class SkMatrix;
+class SkPicture;
+enum class GrBackendApi : unsigned;
+struct SkRect;
+
+/**
+ * Base-class for objects that draw into SkCanvas.
+ *
+ * The object has a generation ID, which is guaranteed to be unique across all drawables. To
+ * allow for clients of the drawable that may want to cache the results, the drawable must
+ * change its generation ID whenever its internal state changes such that it will draw differently.
+ */
+class SK_API SkDrawable : public SkFlattenable {
+public:
+ /**
+ * Draws into the specified content. The drawing sequence will be balanced upon return
+ * (i.e. the saveLevel() on the canvas will match what it was when draw() was called,
+ * and the current matrix and clip settings will not be changed.
+ */
+ void draw(SkCanvas*, const SkMatrix* = nullptr);
+ void draw(SkCanvas*, SkScalar x, SkScalar y);
+
+ /**
+ * When using the GPU backend it is possible for a drawable to execute using the underlying 3D
+ * API rather than the SkCanvas API. It does so by creating a GpuDrawHandler. The GPU backend
+ * is deferred so the handler will be given access to the 3D API at the correct point in the
+ * drawing stream as the GPU backend flushes. Since the drawable may mutate, each time it is
+ * drawn to a GPU-backed canvas a new handler is snapped, representing the drawable's state at
+ * the time of the snap.
+ *
+ * When the GPU backend flushes to the 3D API it will call the draw method on the
+ * GpuDrawHandler. At this time the drawable may add commands to the stream of GPU commands for
+ * the unerlying 3D API. The draw function takes a GrBackendDrawableInfo which contains
+ * information about the current state of 3D API which the caller must respect. See
+ * GrBackendDrawableInfo for more specific details on what information is sent and the
+ * requirements for different 3D APIs.
+ *
+ * Additionaly there may be a slight delay from when the drawable adds its commands to when
+ * those commands are actually submitted to the GPU. Thus the drawable or GpuDrawHandler is
+ * required to keep any resources that are used by its added commands alive and valid until
+ * those commands are submitted to the GPU. The GpuDrawHandler will be kept alive and then
+ * deleted once the commands are submitted to the GPU. The dtor of the GpuDrawHandler is the
+ * signal to the drawable that the commands have all been submitted. Different 3D APIs may have
+ * additional requirements for certain resources which require waiting for the GPU to finish
+ * all work on those resources before reusing or deleting them. In this case, the drawable can
+ * use the dtor call of the GpuDrawHandler to add a fence to the GPU to track when the GPU work
+ * has completed.
+ *
+ * Currently this is only supported for the GPU Vulkan backend.
+ */
+
+ class GpuDrawHandler {
+ public:
+ virtual ~GpuDrawHandler() {}
+
+ virtual void draw(const GrBackendDrawableInfo&) {}
+ };
+
+ /**
+ * Snaps off a GpuDrawHandler to represent the state of the SkDrawable at the time the snap is
+ * called. This is used for executing GPU backend specific draws intermixed with normal Skia GPU
+ * draws. The GPU API, which will be used for the draw, as well as the full matrix, device clip
+ * bounds and imageInfo of the target buffer are passed in as inputs.
+ */
+ std::unique_ptr<GpuDrawHandler> snapGpuDrawHandler(GrBackendApi backendApi,
+ const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ const SkImageInfo& bufferInfo) {
+ return this->onSnapGpuDrawHandler(backendApi, matrix, clipBounds, bufferInfo);
+ }
+
+ SkPicture* newPictureSnapshot();
+
+ /**
+ * Return a unique value for this instance. If two calls to this return the same value,
+ * it is presumed that calling the draw() method will render the same thing as well.
+ *
+ * Subclasses that change their state should call notifyDrawingChanged() to ensure that
+ * a new value will be returned the next time it is called.
+ */
+ uint32_t getGenerationID();
+
+ /**
+ * Return the (conservative) bounds of what the drawable will draw. If the drawable can
+ * change what it draws (e.g. animation or in response to some external change), then this
+ * must return a bounds that is always valid for all possible states.
+ */
+ SkRect getBounds();
+
+ /**
+ * Calling this invalidates the previous generation ID, and causes a new one to be computed
+ * the next time getGenerationID() is called. Typically this is called by the object itself,
+ * in response to its internal state changing.
+ */
+ void notifyDrawingChanged();
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkDrawable_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkDrawable_Type;
+ }
+
+ static sk_sp<SkDrawable> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkDrawable>(static_cast<SkDrawable*>(
+ SkFlattenable::Deserialize(
+ kSkDrawable_Type, data, size, procs).release()));
+ }
+
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+protected:
+ SkDrawable();
+
+ virtual SkRect onGetBounds() = 0;
+ virtual void onDraw(SkCanvas*) = 0;
+
+ virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&,
+ const SkIRect& /*clipBounds*/,
+ const SkImageInfo&) {
+ return nullptr;
+ }
+
+ // TODO: Delete this once Android gets updated to take the clipBounds version above.
+ virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&) {
+ return nullptr;
+ }
+
+ /**
+ * Default implementation calls onDraw() with a canvas that records into a picture. Subclasses
+ * may override if they have a more efficient way to return a picture for the current state
+ * of their drawable. Note: this picture must draw the same as what would be drawn from
+ * onDraw().
+ */
+ virtual SkPicture* onNewPictureSnapshot();
+
+private:
+ int32_t fGenerationID;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkEncodedImageFormat.h b/gfx/skia/skia/include/core/SkEncodedImageFormat.h
new file mode 100644
index 0000000000..d0a9e5e0ca
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkEncodedImageFormat.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedImageFormat_DEFINED
+#define SkEncodedImageFormat_DEFINED
+
+#include <stdint.h>
+
+/**
+ * Enum describing format of encoded data.
+ */
+enum class SkEncodedImageFormat {
+#ifdef SK_BUILD_FOR_GOOGLE3
+ kUnknown,
+#endif
+ kBMP,
+ kGIF,
+ kICO,
+ kJPEG,
+ kPNG,
+ kWBMP,
+ kWEBP,
+ kPKM,
+ kKTX,
+ kASTC,
+ kDNG,
+ kHEIF,
+};
+
+#endif // SkEncodedImageFormat_DEFINED
diff --git a/gfx/skia/skia/include/core/SkExecutor.h b/gfx/skia/skia/include/core/SkExecutor.h
new file mode 100644
index 0000000000..b7b823f49e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkExecutor.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkExecutor_DEFINED
+#define SkExecutor_DEFINED
+
+#include <functional>
+#include <memory>
+#include "include/core/SkTypes.h"
+
+class SK_API SkExecutor {
+public:
+ virtual ~SkExecutor();
+
+ // Create a thread pool SkExecutor with a fixed thread count, by default the number of cores.
+ static std::unique_ptr<SkExecutor> MakeFIFOThreadPool(int threads = 0);
+ static std::unique_ptr<SkExecutor> MakeLIFOThreadPool(int threads = 0);
+
+ // There is always a default SkExecutor available by calling SkExecutor::GetDefault().
+ static SkExecutor& GetDefault();
+ static void SetDefault(SkExecutor*); // Does not take ownership. Not thread safe.
+
+ // Add work to execute.
+ virtual void add(std::function<void(void)>) = 0;
+
+ // If it makes sense for this executor, use this thread to execute work for a little while.
+ virtual void borrow() {}
+};
+
+#endif//SkExecutor_DEFINED
diff --git a/gfx/skia/skia/include/core/SkFilterQuality.h b/gfx/skia/skia/include/core/SkFilterQuality.h
new file mode 100644
index 0000000000..1f895171f1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFilterQuality.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFilterQuality_DEFINED
+#define SkFilterQuality_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Controls how much filtering to be done when scaling/transforming complex colors
+ * e.g. images
+ */
+enum SkFilterQuality {
+ kNone_SkFilterQuality, //!< fastest but lowest quality, typically nearest-neighbor
+ kLow_SkFilterQuality, //!< typically bilerp
+ kMedium_SkFilterQuality, //!< typically bilerp + mipmaps for down-scaling
+ kHigh_SkFilterQuality, //!< slowest but highest quality, typically bicubic or better
+
+ kLast_SkFilterQuality = kHigh_SkFilterQuality,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFlattenable.h b/gfx/skia/skia/include/core/SkFlattenable.h
new file mode 100644
index 0000000000..402517f2f4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFlattenable.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFlattenable_DEFINED
+#define SkFlattenable_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkData;
+class SkReadBuffer;
+class SkWriteBuffer;
+
+struct SkSerialProcs;
+struct SkDeserialProcs;
+
+/** \class SkFlattenable
+
+ SkFlattenable is the base class for objects that need to be flattened
+ into a data stream for either transport or as part of the key to the
+ font cache.
+ */
+class SK_API SkFlattenable : public SkRefCnt {
+public:
+ enum Type {
+ kSkColorFilter_Type,
+ kSkDrawable_Type,
+ kSkDrawLooper_Type,
+ kSkImageFilter_Type,
+ kSkMaskFilter_Type,
+ kSkPathEffect_Type,
+ kSkPixelRef_Type,
+ kSkUnused_Type4, // used to be SkRasterizer
+ kSkShaderBase_Type,
+ kSkUnused_Type, // used to be SkUnitMapper
+ kSkUnused_Type2,
+ kSkNormalSource_Type,
+ };
+
+ typedef sk_sp<SkFlattenable> (*Factory)(SkReadBuffer&);
+
+ SkFlattenable() {}
+
+ /** Implement this to return a factory function pointer that can be called
+ to recreate your class given a buffer (previously written to by your
+ override of flatten().
+ */
+ virtual Factory getFactory() const = 0;
+
+ /**
+ * Returns the name of the object's class.
+ */
+ virtual const char* getTypeName() const = 0;
+
+ static Factory NameToFactory(const char name[]);
+ static const char* FactoryToName(Factory);
+
+ static void Register(const char name[], Factory);
+
+ /**
+ * Override this if your subclass needs to record data that it will need to recreate itself
+ * from its CreateProc (returned by getFactory()).
+ *
+ * DEPRECATED public : will move to protected ... use serialize() instead
+ */
+ virtual void flatten(SkWriteBuffer&) const {}
+
+ virtual Type getFlattenableType() const = 0;
+
+ //
+ // public ways to serialize / deserialize
+ //
+ sk_sp<SkData> serialize(const SkSerialProcs* = nullptr) const;
+ size_t serialize(void* memory, size_t memory_size,
+ const SkSerialProcs* = nullptr) const;
+ static sk_sp<SkFlattenable> Deserialize(Type, const void* data, size_t length,
+ const SkDeserialProcs* procs = nullptr);
+
+protected:
+ class PrivateInitializer {
+ public:
+ static void InitEffects();
+ static void InitImageFilters();
+ };
+
+private:
+ static void RegisterFlattenablesIfNeeded();
+ static void Finalize();
+
+ friend class SkGraphics;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#define SK_REGISTER_FLATTENABLE(type) SkFlattenable::Register(#type, type::CreateProc)
+
+#define SK_FLATTENABLE_HOOKS(type) \
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \
+ friend class SkFlattenable::PrivateInitializer; \
+ Factory getFactory() const override { return type::CreateProc; } \
+ const char* getTypeName() const override { return #type; }
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFont.h b/gfx/skia/skia/include/core/SkFont.h
new file mode 100644
index 0000000000..ba4dc37196
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFont.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFont_DEFINED
+#define SkFont_DEFINED
+
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypeface.h"
+
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkFontMetrics;
+
+/** \class SkFont
+ SkFont controls options applied when drawing and measuring text.
+*/
+class SK_API SkFont {
+public:
+ /** Whether edge pixels draw opaque or with partial transparency.
+ */
+ enum class Edging {
+ kAlias, //!< no transparent pixels on glyph edges
+ kAntiAlias, //!< may have transparent pixels on glyph edges
+ kSubpixelAntiAlias, //!< glyph positioned in pixel using transparency
+ };
+
+ /** Constructs SkFont with default values.
+
+ @return default initialized SkFont
+ */
+ SkFont();
+
+ /** Constructs SkFont with default values with SkTypeface and size in points.
+
+ @param typeface font and style used to draw and measure text
+ @param size typographic height of text
+ @return initialized SkFont
+ */
+ SkFont(sk_sp<SkTypeface> typeface, SkScalar size);
+
+ /** Constructs SkFont with default values with SkTypeface.
+
+ @param typeface font and style used to draw and measure text
+ @return initialized SkFont
+ */
+ explicit SkFont(sk_sp<SkTypeface> typeface);
+
+
+ /** Constructs SkFont with default values with SkTypeface and size in points,
+ horizontal scale, and horizontal skew. Horizontal scale emulates condensed
+ and expanded fonts. Horizontal skew emulates oblique fonts.
+
+ @param typeface font and style used to draw and measure text
+ @param size typographic height of text
+ @param scaleX text horizontal scale
+ @param skewX additional shear on x-axis relative to y-axis
+ @return initialized SkFont
+ */
+ SkFont(sk_sp<SkTypeface> typeface, SkScalar size, SkScalar scaleX, SkScalar skewX);
+
+
+ /** Compares SkFont and font, and returns true if they are equivalent.
+ May return false if SkTypeface has identical contents but different pointers.
+
+ @param font font to compare
+ @return true if SkFont pair are equivalent
+ */
+ bool operator==(const SkFont& font) const;
+
+ /** Compares SkFont and font, and returns true if they are not equivalent.
+ May return true if SkTypeface has identical contents but different pointers.
+
+ @param font font to compare
+ @return true if SkFont pair are not equivalent
+ */
+ bool operator!=(const SkFont& font) const { return !(*this == font); }
+
+ /** If true, instructs the font manager to always hint glyphs.
+ Returned value is only meaningful if platform uses FreeType as the font manager.
+
+ @return true if all glyphs are hinted
+ */
+ bool isForceAutoHinting() const { return SkToBool(fFlags & kForceAutoHinting_PrivFlag); }
+
+ /** Returns true if font engine may return glyphs from font bitmaps instead of from outlines.
+
+ @return true if glyphs may be font bitmaps
+ */
+ bool isEmbeddedBitmaps() const { return SkToBool(fFlags & kEmbeddedBitmaps_PrivFlag); }
+
+ /** Returns true if glyphs may be drawn at sub-pixel offsets.
+
+ @return true if glyphs may be drawn at sub-pixel offsets.
+ */
+ bool isSubpixel() const { return SkToBool(fFlags & kSubpixel_PrivFlag); }
+
+ /** Returns true if font and glyph metrics are requested to be linearly scalable.
+
+ @return true if font and glyph metrics are requested to be linearly scalable.
+ */
+ bool isLinearMetrics() const { return SkToBool(fFlags & kLinearMetrics_PrivFlag); }
+
+ /** Returns true if bold is approximated by increasing the stroke width when creating glyph
+ bitmaps from outlines.
+
+ @return bold is approximated through stroke width
+ */
+ bool isEmbolden() const { return SkToBool(fFlags & kEmbolden_PrivFlag); }
+
+ /** Returns true if baselines will be snapped to pixel positions when the current transformation
+ matrix is axis aligned.
+
+ @return baselines may be snapped to pixels
+ */
+ bool isBaselineSnap() const { return SkToBool(fFlags & kBaselineSnap_PrivFlag); }
+
+ /** Sets whether to always hint glyphs.
+ If forceAutoHinting is set, instructs the font manager to always hint glyphs.
+
+ Only affects platforms that use FreeType as the font manager.
+
+ @param forceAutoHinting setting to always hint glyphs
+ */
+ void setForceAutoHinting(bool forceAutoHinting);
+
+ /** Requests, but does not require, to use bitmaps in fonts instead of outlines.
+
+ @param embeddedBitmaps setting to use bitmaps in fonts
+ */
+ void setEmbeddedBitmaps(bool embeddedBitmaps);
+
+ /** Requests, but does not require, that glyphs respect sub-pixel positioning.
+
+ @param subpixel setting for sub-pixel positioning
+ */
+ void setSubpixel(bool subpixel);
+
+ /** Requests, but does not require, linearly scalable font and glyph metrics.
+
+ For outline fonts 'true' means font and glyph metrics should ignore hinting and rounding.
+ Note that some bitmap formats may not be able to scale linearly and will ignore this flag.
+
+ @param linearMetrics setting for linearly scalable font and glyph metrics.
+ */
+ void setLinearMetrics(bool linearMetrics);
+
+ /** Increases stroke width when creating glyph bitmaps to approximate a bold typeface.
+
+ @param embolden setting for bold approximation
+ */
+ void setEmbolden(bool embolden);
+
+ /** Requests that baselines be snapped to pixels when the current transformation matrix is axis
+ aligned.
+
+ @param baselineSnap setting for baseline snapping to pixels
+ */
+ void setBaselineSnap(bool baselineSnap);
+
+ /** Whether edge pixels draw opaque or with partial transparency.
+
+ @return one of: Edging::kAlias, Edging::kAntiAlias, Edging::kSubpixelAntiAlias
+ */
+ Edging getEdging() const { return (Edging)fEdging; }
+
+ /** Requests, but does not require, that edge pixels draw opaque or with
+ partial transparency.
+
+ @param edging one of: Edging::kAlias, Edging::kAntiAlias, Edging::kSubpixelAntiAlias
+ */
+ void setEdging(Edging edging);
+
+ /** Sets level of glyph outline adjustment.
+ Does not check for valid values of hintingLevel.
+
+ @param hintingLevel one of: SkFontHinting::kNone, SkFontHinting::kSlight,
+ SkFontHinting::kNormal, SkFontHinting::kFull
+ */
+ void setHinting(SkFontHinting hintingLevel);
+
+ /** Returns level of glyph outline adjustment.
+
+ @return one of: SkFontHinting::kNone, SkFontHinting::kSlight, SkFontHinting::kNormal,
+ SkFontHinting::kFull
+ */
+ SkFontHinting getHinting() const { return (SkFontHinting)fHinting; }
+
+ /** Returns a font with the same attributes of this font, but with the specified size.
+ Returns nullptr if size is less than zero, infinite, or NaN.
+
+ @param size typographic height of text
+ @return initialized SkFont
+ */
+ SkFont makeWithSize(SkScalar size) const;
+
+ /** Returns SkTypeface if set, or nullptr.
+ Does not alter SkTypeface SkRefCnt.
+
+ @return SkTypeface if previously set, nullptr otherwise
+ */
+ SkTypeface* getTypeface() const {return fTypeface.get(); }
+
+ /** Returns SkTypeface if set, or the default typeface.
+ Does not alter SkTypeface SkRefCnt.
+
+ @return SkTypeface if previously set or, a pointer to the default typeface if not
+ previously set.
+ */
+ SkTypeface* getTypefaceOrDefault() const;
+
+ /** Returns text size in points.
+
+ @return typographic height of text
+ */
+ SkScalar getSize() const { return fSize; }
+
+ /** Returns text scale on x-axis.
+ Default value is 1.
+
+ @return text horizontal scale
+ */
+ SkScalar getScaleX() const { return fScaleX; }
+
+ /** Returns text skew on x-axis.
+ Default value is zero.
+
+ @return additional shear on x-axis relative to y-axis
+ */
+ SkScalar getSkewX() const { return fSkewX; }
+
+ /** Increases SkTypeface SkRefCnt by one.
+
+ @return SkTypeface if previously set, nullptr otherwise
+ */
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+ /** Increases SkTypeface SkRefCnt by one.
+
+ @return SkTypeface if previously set or, a pointer to the default typeface if not
+ previously set.
+ */
+ sk_sp<SkTypeface> refTypefaceOrDefault() const;
+
+ /** Sets SkTypeface to typeface, decreasing SkRefCnt of the previous SkTypeface.
+ Pass nullptr to clear SkTypeface and use the default typeface. Increments
+ tf SkRefCnt by one.
+
+ @param tf font and style used to draw text
+ */
+ void setTypeface(sk_sp<SkTypeface> tf) { fTypeface = tf; }
+
+ /** Sets text size in points.
+ Has no effect if textSize is not greater than or equal to zero.
+
+ @param textSize typographic height of text
+ */
+ void setSize(SkScalar textSize);
+
+ /** Sets text scale on x-axis.
+ Default value is 1.
+
+ @param scaleX text horizontal scale
+ */
+ void setScaleX(SkScalar scaleX);
+
+ /** Sets text skew on x-axis.
+ Default value is zero.
+
+ @param skewX additional shear on x-axis relative to y-axis
+ */
+ void setSkewX(SkScalar skewX);
+
+ /** Converts text into glyph indices.
+ Returns the number of glyph indices represented by text.
+ SkTextEncoding specifies how text represents characters or glyphs.
+ glyphs may be nullptr, to compute the glyph count.
+
+ Does not check text for valid character codes or valid glyph indices.
+
+ If byteLength equals zero, returns zero.
+ If byteLength includes a partial character, the partial character is ignored.
+
+ If encoding is SkTextEncoding::kUTF8 and text contains an invalid UTF-8 sequence,
+ zero is returned.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a
+ single glyph. This function uses the default character-to-glyph
+ mapping from the SkTypeface and maps characters not found in the
+ SkTypeface to zero.
+
+ If maxGlyphCount is not sufficient to store all the glyphs, no glyphs are copied.
+ The total glyph count is returned for subsequent buffer reallocation.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param encoding one of: SkTextEncoding::kUTF8, SkTextEncoding::kUTF16,
+ SkTextEncoding::kUTF32, SkTextEncoding::kGlyphID
+ @param glyphs storage for glyph indices; may be nullptr
+ @param maxGlyphCount storage capacity
+ @return number of glyphs represented by text of length byteLength
+ */
+ int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const;
+
+ /** Returns glyph index for Unicode character.
+
+ If the character is not supported by the SkTypeface, returns 0.
+
+ @param uni Unicode character
+ @return glyph index
+ */
+ SkGlyphID unicharToGlyph(SkUnichar uni) const;
+
+ void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const;
+
+ /** Returns number of glyphs represented by text.
+
+ If encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a
+ single glyph.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param encoding one of: SkTextEncoding::kUTF8, SkTextEncoding::kUTF16,
+ SkTextEncoding::kUTF32, SkTextEncoding::kGlyphID
+ @return number of glyphs represented by text of length byteLength
+ */
+ int countText(const void* text, size_t byteLength, SkTextEncoding encoding) const {
+ return this->textToGlyphs(text, byteLength, encoding, nullptr, 0);
+ }
+
+ /** Returns the advance width of text.
+ The advance is the normal distance to move before drawing additional text.
+ Returns the bounding box of text if bounds is not nullptr.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param encoding one of: SkTextEncoding::kUTF8, SkTextEncoding::kUTF16,
+ SkTextEncoding::kUTF32, SkTextEncoding::kGlyphID
+ @param bounds returns bounding box relative to (0, 0) if not nullptr
+ @return number of glyphs represented by text of length byteLength
+ */
+ SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkRect* bounds = nullptr) const {
+ return this->measureText(text, byteLength, encoding, bounds, nullptr);
+ }
+
+ /** Returns the advance width of text.
+ The advance is the normal distance to move before drawing additional text.
+ Returns the bounding box of text if bounds is not nullptr. paint
+ stroke width or SkPathEffect may modify the advance with.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param encoding one of: SkTextEncoding::kUTF8, SkTextEncoding::kUTF16,
+ SkTextEncoding::kUTF32, SkTextEncoding::kGlyphID
+ @param bounds returns bounding box relative to (0, 0) if not nullptr
+ @param paint optional; may be nullptr
+ @return number of glyphs represented by text of length byteLength
+ */
+ SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const;
+
+ /** DEPRECATED
+ Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph; may be nullptr
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ */
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[]) const {
+ this->getWidthsBounds(glyphs, count, widths, bounds, nullptr);
+ }
+
+ // DEPRECATED
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], std::nullptr_t) const {
+ this->getWidths(glyphs, count, widths);
+ }
+
+ /** Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph
+ */
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[]) const {
+ this->getWidthsBounds(glyphs, count, widths, nullptr, nullptr);
+ }
+
+ /** Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph; may be nullptr
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ @param paint optional, specifies stroking, SkPathEffect and SkMaskFilter
+ */
+ void getWidthsBounds(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[],
+ const SkPaint* paint) const;
+
+
+ /** Retrieves the bounds for each glyph in glyphs.
+ bounds must be an array of count entries.
+ If paint is not nullptr, its stroking, SkPathEffect, and SkMaskFilter fields are respected.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ @param paint optional, specifies stroking, SkPathEffect, and SkMaskFilter
+ */
+ void getBounds(const SkGlyphID glyphs[], int count, SkRect bounds[],
+ const SkPaint* paint) const {
+ this->getWidthsBounds(glyphs, count, nullptr, bounds, paint);
+ }
+
+ /** Retrieves the positions for each glyph, beginning at the specified origin. The caller
+ must allocated at least count number of elements in the pos[] array.
+
+ @param glyphs array of glyph indices to be positioned
+ @param count number of glyphs
+ @param pos returns glyphs positions
+ @param origin location of the first glyph. Defaults to {0, 0}.
+ */
+ void getPos(const SkGlyphID glyphs[], int count, SkPoint pos[], SkPoint origin = {0, 0}) const;
+
+ /** Retrieves the x-positions for each glyph, beginning at the specified origin. The caller
+ must allocated at least count number of elements in the xpos[] array.
+
+ @param glyphs array of glyph indices to be positioned
+ @param count number of glyphs
+ @param xpos returns glyphs x-positions
+ @param origin x-position of the first glyph. Defaults to 0.
+ */
+ void getXPos(const SkGlyphID glyphs[], int count, SkScalar xpos[], SkScalar origin = 0) const;
+
+ /** Returns path corresponding to glyph outline.
+ If glyph has an outline, copies outline to path and returns true.
+ path returned may be empty.
+ If glyph is described by a bitmap, returns false and ignores path parameter.
+
+ @param glyphID index of glyph
+ @param path pointer to existing SkPath
+ @return true if glyphID is described by path
+ */
+ bool getPath(SkGlyphID glyphID, SkPath* path) const;
+
+ /** Returns path corresponding to glyph array.
+
+ @param glyphIDs array of glyph indices
+ @param count number of glyphs
+ @param glyphPathProc function returning one glyph description as path
+ @param ctx function context
+ */
+ void getPaths(const SkGlyphID glyphIDs[], int count,
+ void (*glyphPathProc)(const SkPath* pathOrNull, const SkMatrix& mx, void* ctx),
+ void* ctx) const;
+
+ /** Returns SkFontMetrics associated with SkTypeface.
+ The return value is the recommended spacing between lines: the sum of metrics
+ descent, ascent, and leading.
+ If metrics is not nullptr, SkFontMetrics is copied to metrics.
+ Results are scaled by text size but does not take into account
+ dimensions required by text scale, text skew, fake bold,
+ style stroke, and SkPathEffect.
+
+ @param metrics storage for SkFontMetrics; may be nullptr
+ @return recommended spacing between lines
+ */
+ SkScalar getMetrics(SkFontMetrics* metrics) const;
+
+ /** Returns the recommended spacing between lines: the sum of metrics
+ descent, ascent, and leading.
+ Result is scaled by text size but does not take into account
+ dimensions required by stroking and SkPathEffect.
+ Returns the same result as getMetrics().
+
+ @return recommended spacing between lines
+ */
+ SkScalar getSpacing() const { return this->getMetrics(nullptr); }
+
+ /** Dumps fields of the font to SkDebugf. May change its output over time, so clients should
+ * not rely on this for anything specific. Used to aid in debugging.
+ */
+ void dump() const;
+
+private:
+ enum PrivFlags {
+ kForceAutoHinting_PrivFlag = 1 << 0,
+ kEmbeddedBitmaps_PrivFlag = 1 << 1,
+ kSubpixel_PrivFlag = 1 << 2,
+ kLinearMetrics_PrivFlag = 1 << 3,
+ kEmbolden_PrivFlag = 1 << 4,
+ kBaselineSnap_PrivFlag = 1 << 5,
+ };
+
+ static constexpr unsigned kAllFlags = kForceAutoHinting_PrivFlag
+ | kEmbeddedBitmaps_PrivFlag
+ | kSubpixel_PrivFlag
+ | kLinearMetrics_PrivFlag
+ | kEmbolden_PrivFlag
+ | kBaselineSnap_PrivFlag;
+
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fSize;
+ SkScalar fScaleX;
+ SkScalar fSkewX;
+ uint8_t fFlags;
+ uint8_t fEdging;
+ uint8_t fHinting;
+
+ SkScalar setupForAsPaths(SkPaint*);
+ bool hasSomeAntiAliasing() const;
+
+ friend class GrTextBlob;
+ friend class SkFontPriv;
+ friend class SkGlyphRunListPainter;
+ friend class SkTextBlobCacheDiffCanvas;
+ friend class SkStrikeSpec;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontArguments.h b/gfx/skia/skia/include/core/SkFontArguments.h
new file mode 100644
index 0000000000..c17a12014e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontArguments.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontArguments_DEFINED
+#define SkFontArguments_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+/** Represents a set of actual arguments for a font. */
+struct SkFontArguments {
+ struct VariationPosition {
+ struct Coordinate {
+ SkFourByteTag axis;
+ float value;
+ };
+ const Coordinate* coordinates;
+ int coordinateCount;
+ };
+ // deprecated, use VariationPosition::Coordinate instead
+ struct Axis {
+ SkFourByteTag fTag;
+ float fStyleValue;
+ };
+
+ SkFontArguments() : fCollectionIndex(0), fVariationDesignPosition{nullptr, 0} {}
+
+ /** Specify the index of the desired font.
+ *
+ * Font formats like ttc, dfont, cff, cid, pfr, t42, t1, and fon may actually be indexed
+ * collections of fonts.
+ */
+ SkFontArguments& setCollectionIndex(int collectionIndex) {
+ fCollectionIndex = collectionIndex;
+ return *this;
+ }
+
+ // deprecated, use setVariationDesignPosition instead.
+ SkFontArguments& setAxes(const Axis* axes, int axisCount) {
+ fVariationDesignPosition.coordinates =
+ reinterpret_cast<const VariationPosition::Coordinate*>(axes);
+ fVariationDesignPosition.coordinateCount = axisCount;
+ return *this;
+ }
+
+ /** Specify a position in the variation design space.
+ *
+ * Any axis not specified will use the default value.
+ * Any specified axis not actually present in the font will be ignored.
+ *
+ * @param position not copied. The value must remain valid for life of SkFontArguments.
+ */
+ SkFontArguments& setVariationDesignPosition(VariationPosition position) {
+ fVariationDesignPosition.coordinates = position.coordinates;
+ fVariationDesignPosition.coordinateCount = position.coordinateCount;
+ return *this;
+ }
+
+ int getCollectionIndex() const {
+ return fCollectionIndex;
+ }
+ // deprecated, use getVariationDesignPosition instead.
+ const Axis* getAxes(int* axisCount) const {
+ *axisCount = fVariationDesignPosition.coordinateCount;
+ return reinterpret_cast<const Axis*>(fVariationDesignPosition.coordinates);
+ }
+ VariationPosition getVariationDesignPosition() const {
+ return fVariationDesignPosition;
+ }
+private:
+ int fCollectionIndex;
+ VariationPosition fVariationDesignPosition;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontLCDConfig.h b/gfx/skia/skia/include/core/SkFontLCDConfig.h
new file mode 100644
index 0000000000..1d40b82fe7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontLCDConfig.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontLCDConfig_DEFINED
+#define SkFontLCDConfig_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SK_API SkFontLCDConfig {
+public:
+ /** LCDs either have their color elements arranged horizontally or
+ vertically. When rendering subpixel glyphs we need to know which way
+ round they are.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+
+ @deprecated use SkPixelGeometry instead.
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+ kVertical_LCDOrientation = 1,
+ };
+
+ /** @deprecated set on Device creation. */
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+ /** @deprecated get from Device. */
+ static LCDOrientation GetSubpixelOrientation();
+
+ /** LCD color elements can vary in order. For subpixel text we need to know
+ the order which the LCDs uses so that the color fringes are in the
+ correct place.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+
+ kNONE_LCDOrder means that the subpixel elements are not spatially
+ separated in any usable fashion.
+
+ @deprecated use SkPixelGeometry instead.
+ */
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+ kNONE_LCDOrder = 2,
+ };
+
+ /** @deprecated set on Device creation. */
+ static void SetSubpixelOrder(LCDOrder order);
+ /** @deprecated get from Device. */
+ static LCDOrder GetSubpixelOrder();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontMetrics.h b/gfx/skia/skia/include/core/SkFontMetrics.h
new file mode 100644
index 0000000000..6618577efe
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontMetrics.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMetrics_DEFINED
+#define SkFontMetrics_DEFINED
+
+#include "include/core/SkScalar.h"
+
+/** \class SkFontMetrics
+ The metrics of an SkFont.
+ The metric values are consistent with the Skia y-down coordinate system.
+ */
+struct SK_API SkFontMetrics {
+
+ /** \enum FontMetricsFlags
+ FontMetricsFlags indicate when certain metrics are valid;
+ the underline or strikeout metrics may be valid and zero.
+ Fonts with embedded bitmaps may not have valid underline or strikeout metrics.
+ */
+ enum FontMetricsFlags {
+ kUnderlineThicknessIsValid_Flag = 1 << 0, //!< set if fUnderlineThickness is valid
+ kUnderlinePositionIsValid_Flag = 1 << 1, //!< set if fUnderlinePosition is valid
+ kStrikeoutThicknessIsValid_Flag = 1 << 2, //!< set if fStrikeoutThickness is valid
+ kStrikeoutPositionIsValid_Flag = 1 << 3, //!< set if fStrikeoutPosition is valid
+ };
+
+ uint32_t fFlags; //!< FontMetricsFlags indicating which metrics are valid
+ SkScalar fTop; //!< greatest extent above origin of any glyph bounding box, typically negative; deprecated with variable fonts
+ SkScalar fAscent; //!< distance to reserve above baseline, typically negative
+ SkScalar fDescent; //!< distance to reserve below baseline, typically positive
+ SkScalar fBottom; //!< greatest extent below origin of any glyph bounding box, typically positive; deprecated with variable fonts
+ SkScalar fLeading; //!< distance to add between lines, typically positive or zero
+ SkScalar fAvgCharWidth; //!< average character width, zero if unknown
+ SkScalar fMaxCharWidth; //!< maximum character width, zero if unknown
+ SkScalar fXMin; //!< greatest extent to left of origin of any glyph bounding box, typically negative; deprecated with variable fonts
+ SkScalar fXMax; //!< greatest extent to right of origin of any glyph bounding box, typically positive; deprecated with variable fonts
+ SkScalar fXHeight; //!< height of lower-case 'x', zero if unknown, typically negative
+ SkScalar fCapHeight; //!< height of an upper-case letter, zero if unknown, typically negative
+ SkScalar fUnderlineThickness; //!< underline thickness
+ SkScalar fUnderlinePosition; //!< distance from baseline to top of stroke, typically positive
+ SkScalar fStrikeoutThickness; //!< strikeout thickness
+ SkScalar fStrikeoutPosition; //!< distance from baseline to bottom of stroke, typically negative
+
+ /** Returns true if SkFontMetrics has a valid underline thickness, and sets
+ thickness to that value. If the underline thickness is not valid,
+ return false, and ignore thickness.
+
+ @param thickness storage for underline width
+ @return true if font specifies underline width
+ */
+ bool hasUnderlineThickness(SkScalar* thickness) const {
+ if (SkToBool(fFlags & kUnderlineThicknessIsValid_Flag)) {
+ *thickness = fUnderlineThickness;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid underline position, and sets
+ position to that value. If the underline position is not valid,
+ return false, and ignore position.
+
+ @param position storage for underline position
+ @return true if font specifies underline position
+ */
+ bool hasUnderlinePosition(SkScalar* position) const {
+ if (SkToBool(fFlags & kUnderlinePositionIsValid_Flag)) {
+ *position = fUnderlinePosition;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid strikeout thickness, and sets
+ thickness to that value. If the underline thickness is not valid,
+ return false, and ignore thickness.
+
+ @param thickness storage for strikeout width
+ @return true if font specifies strikeout width
+ */
+ bool hasStrikeoutThickness(SkScalar* thickness) const {
+ if (SkToBool(fFlags & kStrikeoutThicknessIsValid_Flag)) {
+ *thickness = fStrikeoutThickness;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid strikeout position, and sets
+ position to that value. If the underline position is not valid,
+ return false, and ignore position.
+
+ @param position storage for strikeout position
+ @return true if font specifies strikeout position
+ */
+ bool hasStrikeoutPosition(SkScalar* position) const {
+ if (SkToBool(fFlags & kStrikeoutPositionIsValid_Flag)) {
+ *position = fStrikeoutPosition;
+ return true;
+ }
+ return false;
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontMgr.h b/gfx/skia/skia/include/core/SkFontMgr.h
new file mode 100644
index 0000000000..39a6bab36e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontMgr.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_DEFINED
+#define SkFontMgr_DEFINED
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkData;
+class SkFontData;
+class SkStreamAsset;
+class SkString;
+class SkTypeface;
+
+class SK_API SkFontStyleSet : public SkRefCnt {
+public:
+ virtual int count() = 0;
+ virtual void getStyle(int index, SkFontStyle*, SkString* style) = 0;
+ virtual SkTypeface* createTypeface(int index) = 0;
+ virtual SkTypeface* matchStyle(const SkFontStyle& pattern) = 0;
+
+ static SkFontStyleSet* CreateEmpty();
+
+protected:
+ SkTypeface* matchStyleCSS3(const SkFontStyle& pattern);
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+class SK_API SkFontMgr : public SkRefCnt {
+public:
+ int countFamilies() const;
+ void getFamilyName(int index, SkString* familyName) const;
+ SkFontStyleSet* createStyleSet(int index) const;
+
+ /**
+ * The caller must call unref() on the returned object.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * Passing nullptr as the parameter will return the default system family.
+ * Note that most systems don't have a default system family, so passing nullptr will often
+ * result in the empty set.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) due to hidden or auto-activated fonts.
+ */
+ SkFontStyleSet* matchFamily(const char familyName[]) const;
+
+ /**
+ * Find the closest matching typeface to the specified familyName and style
+ * and return a ref to it. The caller must call unref() on the returned
+ * object. Will return nullptr if no 'good' match is found.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) or matchFamily(const char[]) due to hidden or
+ * auto-activated fonts.
+ */
+ SkTypeface* matchFamilyStyle(const char familyName[], const SkFontStyle&) const;
+
+ /**
+ * Use the system fallback to find a typeface for the given character.
+ * Note that bcp47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ *
+ * Will return NULL if no family can be found for the character
+ * in the system fallback.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * bcp47[0] is the least significant fallback, bcp47[bcp47Count-1] is the
+ * most significant. If no specified bcp47 codes match, any font with the
+ * requested character will be matched.
+ */
+ SkTypeface* matchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const;
+
+ SkTypeface* matchFaceStyle(const SkTypeface*, const SkFontStyle&) const;
+
+ /**
+ * Create a typeface for the specified data and TTC index (pass 0 for none)
+ * or NULL if the data is not recognized. The caller must call unref() on
+ * the returned object if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromData(sk_sp<SkData>, int ttcIndex = 0) const;
+
+ /**
+ * Create a typeface for the specified stream and TTC index
+ * (pass 0 for none) or NULL if the stream is not recognized. The caller
+ * must call unref() on the returned object if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, int ttcIndex = 0) const;
+
+ /* Experimental, API subject to change. */
+ sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const;
+
+ /**
+ * Create a typeface from the specified font data.
+ * Will return NULL if the typeface could not be created.
+ * The caller must call unref() on the returned object if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromFontData(std::unique_ptr<SkFontData>) const;
+
+ /**
+ * Create a typeface for the specified fileName and TTC index
+ * (pass 0 for none) or NULL if the file is not found, or its contents are
+ * not recognized. The caller must call unref() on the returned object
+ * if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromFile(const char path[], int ttcIndex = 0) const;
+
+ sk_sp<SkTypeface> legacyMakeTypeface(const char familyName[], SkFontStyle style) const;
+
+ /** Return the default fontmgr. */
+ static sk_sp<SkFontMgr> RefDefault();
+
+protected:
+ virtual int onCountFamilies() const = 0;
+ virtual void onGetFamilyName(int index, SkString* familyName) const = 0;
+ virtual SkFontStyleSet* onCreateStyleSet(int index)const = 0;
+
+ /** May return NULL if the name is not found. */
+ virtual SkFontStyleSet* onMatchFamily(const char familyName[]) const = 0;
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle&) const = 0;
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const = 0;
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface*,
+ const SkFontStyle&) const = 0;
+
+ virtual sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const = 0;
+ virtual sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const = 0;
+ virtual sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const;
+ virtual sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData>) const;
+ virtual sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const = 0;
+
+ virtual sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const = 0;
+
+private:
+
+ /** Implemented by porting layer to return the default factory. */
+ static sk_sp<SkFontMgr> Factory();
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontParameters.h b/gfx/skia/skia/include/core/SkFontParameters.h
new file mode 100644
index 0000000000..3bcb7869b7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontParameters.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontParameters_DEFINED
+#define SkFontParameters_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+struct SkFontParameters {
+ struct Variation {
+ // Parameters in a variation font axis.
+ struct Axis {
+ // Four character identifier of the font axis (weight, width, slant, italic...).
+ SkFourByteTag tag;
+ // Minimum value supported by this axis.
+ float min;
+ // Default value set by this axis.
+ float def;
+ // Maximum value supported by this axis. The maximum can equal the minimum.
+ float max;
+ // Return whether this axis is recommended to be remain hidden in user interfaces.
+ bool isHidden() const { return flags & HIDDEN; }
+ // Set this axis to be remain hidden in user interfaces.
+ void setHidden(bool hidden) { flags = hidden ? (flags | HIDDEN) : (flags & ~HIDDEN); }
+ private:
+ static constexpr uint16_t HIDDEN = 0x0001;
+ // Attributes for a font axis.
+ uint16_t flags;
+ };
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontStyle.h b/gfx/skia/skia/include/core/SkFontStyle.h
new file mode 100644
index 0000000000..50b5bd026d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontStyle.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStyle_DEFINED
+#define SkFontStyle_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SK_API SkFontStyle {
+public:
+ enum Weight {
+ kInvisible_Weight = 0,
+ kThin_Weight = 100,
+ kExtraLight_Weight = 200,
+ kLight_Weight = 300,
+ kNormal_Weight = 400,
+ kMedium_Weight = 500,
+ kSemiBold_Weight = 600,
+ kBold_Weight = 700,
+ kExtraBold_Weight = 800,
+ kBlack_Weight = 900,
+ kExtraBlack_Weight = 1000,
+ };
+
+ enum Width {
+ kUltraCondensed_Width = 1,
+ kExtraCondensed_Width = 2,
+ kCondensed_Width = 3,
+ kSemiCondensed_Width = 4,
+ kNormal_Width = 5,
+ kSemiExpanded_Width = 6,
+ kExpanded_Width = 7,
+ kExtraExpanded_Width = 8,
+ kUltraExpanded_Width = 9,
+ };
+
+ enum Slant {
+ kUpright_Slant,
+ kItalic_Slant,
+ kOblique_Slant,
+ };
+
+ constexpr SkFontStyle(int weight, int width, Slant slant) : fValue(
+ (SkTPin<int>(weight, kInvisible_Weight, kExtraBlack_Weight)) +
+ (SkTPin<int>(width, kUltraCondensed_Width, kUltraExpanded_Width) << 16) +
+ (SkTPin<int>(slant, kUpright_Slant, kOblique_Slant) << 24)
+ ) { }
+
+ constexpr SkFontStyle() : SkFontStyle{kNormal_Weight, kNormal_Width, kUpright_Slant} { }
+
+ bool operator==(const SkFontStyle& rhs) const {
+ return fValue == rhs.fValue;
+ }
+
+ int weight() const { return fValue & 0xFFFF; }
+ int width() const { return (fValue >> 16) & 0xFF; }
+ Slant slant() const { return (Slant)((fValue >> 24) & 0xFF); }
+
+ static constexpr SkFontStyle Normal() {
+ return SkFontStyle(kNormal_Weight, kNormal_Width, kUpright_Slant);
+ }
+ static constexpr SkFontStyle Bold() {
+ return SkFontStyle(kBold_Weight, kNormal_Width, kUpright_Slant);
+ }
+ static constexpr SkFontStyle Italic() {
+ return SkFontStyle(kNormal_Weight, kNormal_Width, kItalic_Slant );
+ }
+ static constexpr SkFontStyle BoldItalic() {
+ return SkFontStyle(kBold_Weight, kNormal_Width, kItalic_Slant );
+ }
+
+private:
+ uint32_t fValue;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontTypes.h b/gfx/skia/skia/include/core/SkFontTypes.h
new file mode 100644
index 0000000000..76f5dde67f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontTypes.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontTypes_DEFINED
+#define SkFontTypes_DEFINED
+
+enum class SkTextEncoding {
+ kUTF8, //!< uses bytes to represent UTF-8 or ASCII
+ kUTF16, //!< uses two byte words to represent most of Unicode
+ kUTF32, //!< uses four byte words to represent all of Unicode
+ kGlyphID, //!< uses two byte words to represent glyph indices
+};
+
+enum class SkFontHinting {
+ kNone, //!< glyph outlines unchanged
+ kSlight, //!< minimal modification to improve constrast
+ kNormal, //!< glyph outlines modified to improve constrast
+ kFull, //!< modifies glyph outlines for maximum constrast
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkGraphics.h b/gfx/skia/skia/include/core/SkGraphics.h
new file mode 100644
index 0000000000..a7fcfa6d81
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkGraphics.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGraphics_DEFINED
+#define SkGraphics_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkData;
+class SkImageGenerator;
+class SkTraceMemoryDump;
+
+class SK_API SkGraphics {
+public:
+ /**
+ * Call this at process initialization time if your environment does not
+ * permit static global initializers that execute code.
+ * Init() is thread-safe and idempotent.
+ */
+ static void Init();
+
+ // We're in the middle of cleaning this up.
+ static void Term() {}
+
+ /**
+ * Return the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ * This max can be changed by calling SetFontCacheLimit().
+ */
+ static size_t GetFontCacheLimit();
+
+ /**
+ * Specify the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ *
+ * This function returns the previous setting, as if GetFontCacheLimit()
+ * had be called before the new limit was set.
+ */
+ static size_t SetFontCacheLimit(size_t bytes);
+
+ /**
+ * Return the number of bytes currently used by the font cache.
+ */
+ static size_t GetFontCacheUsed();
+
+ /**
+ * Return the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountUsed();
+
+ /**
+ * Return the current limit to the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountLimit();
+
+ /**
+ * Set the limit to the number of entries in the font cache, and return
+ * the previous value. If this new value is lower than the previous,
+ * it will automatically try to purge entries to meet the new limit.
+ */
+ static int SetFontCacheCountLimit(int count);
+
+ /*
+ * Returns the maximum point size for text that may be cached.
+ *
+ * Sizes above this will be drawn directly from the font's outline.
+ * Setting this to a large value may speed up drawing larger text (repeatedly),
+ * but could cause the cache to purge other sizes more often.
+ *
+ * This value is a hint to the font engine, and the actual limit may be different due to
+ * implementation specific details.
+ */
+ static int GetFontCachePointSizeLimit();
+
+ /*
+ * Set the maximum point size for text that may be cached, returning the previous value.
+ *
+ * Sizes above this will be drawn directly from the font's outline.
+ * Setting this to a large value may speed up drawing larger text (repeatedly),
+ * but could cause the cache to purge other sizes more often.
+ *
+ * This value is a hint to the font engine, and the actual limit may be different due to
+ * implementation specific details.
+ */
+ static int SetFontCachePointSizeLimit(int maxPointSize);
+
+ /**
+ * For debugging purposes, this will attempt to purge the font cache. It
+ * does not change the limit, but will cause subsequent font measures and
+ * draws to be recreated, since they will no longer be in the cache.
+ */
+ static void PurgeFontCache();
+
+ /**
+ * Scaling bitmaps with the kHigh_SkFilterQuality setting is
+ * expensive, so the result is saved in the global Scaled Image
+ * Cache.
+ *
+ * This function returns the memory usage of the Scaled Image Cache.
+ */
+ static size_t GetResourceCacheTotalBytesUsed();
+
+ /**
+ * These functions get/set the memory usage limit for the resource cache, used for temporary
+ * bitmaps and other resources. Entries are purged from the cache when the memory useage
+ * exceeds this limit.
+ */
+ static size_t GetResourceCacheTotalByteLimit();
+ static size_t SetResourceCacheTotalByteLimit(size_t newLimit);
+
+ /**
+ * For debugging purposes, this will attempt to purge the resource cache. It
+ * does not change the limit.
+ */
+ static void PurgeResourceCache();
+
+ /**
+ * When the cachable entry is very lage (e.g. a large scaled bitmap), adding it to the cache
+ * can cause most/all of the existing entries to be purged. To avoid the, the client can set
+ * a limit for a single allocation. If a cacheable entry would have been cached, but its size
+ * exceeds this limit, then we do not attempt to cache it at all.
+ *
+ * Zero is the default value, meaning we always attempt to cache entries.
+ */
+ static size_t GetResourceCacheSingleAllocationByteLimit();
+ static size_t SetResourceCacheSingleAllocationByteLimit(size_t newLimit);
+
+ /**
+ * Dumps memory usage of caches using the SkTraceMemoryDump interface. See SkTraceMemoryDump
+ * for usage of this method.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Free as much globally cached memory as possible. This will purge all private caches in Skia,
+ * including font and image caches.
+ *
+ * If there are caches associated with GPU context, those will not be affected by this call.
+ */
+ static void PurgeAllCaches();
+
+ /**
+ * Applications with command line options may pass optional state, such
+ * as cache sizes, here, for instance:
+ * font-cache-limit=12345678
+ *
+ * The flags format is name=value[;name=value...] with no spaces.
+ * This format is subject to change.
+ */
+ static void SetFlags(const char* flags);
+
+ typedef std::unique_ptr<SkImageGenerator>
+ (*ImageGeneratorFromEncodedDataFactory)(sk_sp<SkData>);
+
+ /**
+ * To instantiate images from encoded data, first looks at this runtime function-ptr. If it
+ * exists, it is called to create an SkImageGenerator from SkData. If there is no function-ptr
+ * or there is, but it returns NULL, then skia will call its internal default implementation.
+ *
+ * Returns the previous factory (which could be NULL).
+ */
+ static ImageGeneratorFromEncodedDataFactory
+ SetImageGeneratorFromEncodedDataFactory(ImageGeneratorFromEncodedDataFactory);
+};
+
+class SkAutoGraphics {
+public:
+ SkAutoGraphics() {
+ SkGraphics::Init();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkICC.h b/gfx/skia/skia/include/core/SkICC.h
new file mode 100644
index 0000000000..cb84c1ffbc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkICC.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkICC_DEFINED
+#define SkICC_DEFINED
+
+#include "include/core/SkData.h"
+
+struct skcms_Matrix3x3;
+struct skcms_TransferFunction;
+
+SK_API sk_sp<SkData> SkWriteICCProfile(const skcms_TransferFunction&,
+ const skcms_Matrix3x3& toXYZD50);
+
+#endif//SkICC_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImage.h b/gfx/skia/skia/include/core/SkImage.h
new file mode 100644
index 0000000000..c4fa7a5444
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImage.h
@@ -0,0 +1,1143 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_DEFINED
+#define SkImage_DEFINED
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTileMode.h"
+#include "include/gpu/GrTypes.h"
+#include <functional> // std::function
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include <android/hardware_buffer.h>
+#endif
+
+class SkData;
+class SkCanvas;
+class SkImageFilter;
+class SkImageGenerator;
+class SkPaint;
+class SkPicture;
+class SkString;
+class SkSurface;
+class GrBackendTexture;
+class GrContext;
+class GrContextThreadSafeProxy;
+class GrTexture;
+
+struct SkYUVAIndex;
+
+/** \class SkImage
+ SkImage describes a two dimensional array of pixels to draw. The pixels may be
+ decoded in a raster bitmap, encoded in a SkPicture or compressed data stream,
+ or located in GPU memory as a GPU texture.
+
+ SkImage cannot be modified after it is created. SkImage may allocate additional
+ storage as needed; for instance, an encoded SkImage may decode when drawn.
+
+ SkImage width and height are greater than zero. Creating an SkImage with zero width
+ or height returns SkImage equal to nullptr.
+
+ SkImage may be created from SkBitmap, SkPixmap, SkSurface, SkPicture, encoded streams,
+ GPU texture, YUV_ColorSpace data, or hardware buffer. Encoded streams supported
+ include BMP, GIF, HEIF, ICO, JPEG, PNG, WBMP, WebP. Supported encoding details
+ vary with platform.
+*/
+class SK_API SkImage : public SkRefCnt {
+public:
+
+ /** Caller data passed to RasterReleaseProc; may be nullptr.
+ */
+ typedef void* ReleaseContext;
+
+ /** Creates SkImage from SkPixmap and copy of pixels. Since pixels are copied, SkPixmap
+ pixels may be modified or deleted without affecting SkImage.
+
+ SkImage is returned if SkPixmap is valid. Valid SkPixmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @return copy of SkPixmap pixels, or nullptr
+ */
+ static sk_sp<SkImage> MakeRasterCopy(const SkPixmap& pixmap);
+
+ /** Creates SkImage from SkImageInfo, sharing pixels.
+
+ SkImage is returned if SkImageInfo is valid. Valid SkImageInfo parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ rowBytes are large enough to hold one row of pixels;
+ pixels is not nullptr, and contains enough data for SkImage.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage
+ @param rowBytes size of pixel row or larger
+ @return SkImage sharing pixels, or nullptr
+ */
+ static sk_sp<SkImage> MakeRasterData(const SkImageInfo& info, sk_sp<SkData> pixels,
+ size_t rowBytes);
+
+ /** Function called when SkImage no longer shares pixels. ReleaseContext is
+ provided by caller when SkImage is created, and may be nullptr.
+ */
+ typedef void (*RasterReleaseProc)(const void* pixels, ReleaseContext);
+
+ /** Creates SkImage from pixmap, sharing SkPixmap pixels. Pixels must remain valid and
+ unchanged until rasterReleaseProc is called. rasterReleaseProc is passed
+ releaseContext when SkImage is deleted or no longer refers to pixmap pixels.
+
+ Pass nullptr for rasterReleaseProc to share SkPixmap without requiring a callback
+ when SkImage is released. Pass nullptr for releaseContext if rasterReleaseProc
+ does not require state.
+
+ SkImage is returned if pixmap is valid. Valid SkPixmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @param rasterReleaseProc function called when pixels can be released; or nullptr
+ @param releaseContext state passed to rasterReleaseProc; or nullptr
+ @return SkImage sharing pixmap
+ */
+ static sk_sp<SkImage> MakeFromRaster(const SkPixmap& pixmap,
+ RasterReleaseProc rasterReleaseProc,
+ ReleaseContext releaseContext);
+
+ /** Creates SkImage from bitmap, sharing or copying bitmap pixels. If the bitmap
+ is marked immutable, and its pixel memory is shareable, it may be shared
+ instead of copied.
+
+ SkImage is returned if bitmap is valid. Valid SkBitmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param bitmap SkImageInfo, row bytes, and pixels
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromBitmap(const SkBitmap& bitmap);
+
+ /** Creates SkImage from data returned by imageGenerator. Generated data is owned by SkImage and
+ may not be shared or accessed.
+
+ subset allows selecting a portion of the full image. Pass nullptr to select the entire
+ image; otherwise, subset must be contained by image bounds.
+
+ SkImage is returned if generator data is valid. Valid data parameters vary by type of data
+ and platform.
+
+ imageGenerator may wrap SkPicture data, codec data, or custom data.
+
+ @param imageGenerator stock or custom routines to retrieve SkImage
+ @param subset bounds of returned SkImage; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromGenerator(std::unique_ptr<SkImageGenerator> imageGenerator,
+ const SkIRect* subset = nullptr);
+
+ /**
+ * Return an image backed by the encoded data, but attempt to defer decoding until the image
+ * is actually used/drawn. This deferral allows the system to cache the result, either on the
+ * CPU or on the GPU, depending on where the image is drawn. If memory is low, the cache may
+ * be purged, causing the next draw of the image to have to re-decode.
+ *
+ * The subset parameter specifies a area within the decoded image to create the image from.
+ * If subset is null, then the entire image is returned.
+ *
+ * This is similar to DecodeTo[Raster,Texture], but this method will attempt to defer the
+ * actual decode, while the DecodeTo... method explicitly decode and allocate the backend
+ * when the call is made.
+ *
+ * If the encoded format is not supported, or subset is outside of the bounds of the decoded
+ * image, nullptr is returned.
+ *
+ * @param encoded the encoded data
+ * @param length the number of bytes of encoded data
+ * @param subset the bounds of the pixels within the decoded image to return. may be null.
+ * @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromEncoded(sk_sp<SkData> encoded, const SkIRect* subset = nullptr);
+
+ /**
+ * Decode the data in encoded/length into a raster image.
+ *
+ * The subset parameter specifies a area within the decoded image to create the image from.
+ * If subset is null, then the entire image is returned.
+ *
+ * This is similar to MakeFromEncoded, but this method will always decode immediately, and
+ * allocate the memory for the pixels for the lifetime of the returned image.
+ *
+ * If the encoded format is not supported, or subset is outside of the bounds of the decoded
+ * image, nullptr is returned.
+ *
+ * @param encoded the encoded data
+ * @param length the number of bytes of encoded data
+ * @param subset the bounds of the pixels within the decoded image to return. may be null.
+ * @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> DecodeToRaster(const void* encoded, size_t length,
+ const SkIRect* subset = nullptr);
+ static sk_sp<SkImage> DecodeToRaster(const sk_sp<SkData>& data,
+ const SkIRect* subset = nullptr) {
+ return DecodeToRaster(data->data(), data->size(), subset);
+ }
+
+ /**
+ * Decode the data in encoded/length into a texture-backed image.
+ *
+ * The subset parameter specifies a area within the decoded image to create the image from.
+ * If subset is null, then the entire image is returned.
+ *
+ * This is similar to MakeFromEncoded, but this method will always decode immediately, and
+ * allocate the texture for the pixels for the lifetime of the returned image.
+ *
+ * If the encoded format is not supported, or subset is outside of the bounds of the decoded
+ * image, nullptr is returned.
+ *
+ * @param encoded the encoded data
+ * @param length the number of bytes of encoded data
+ * @param subset the bounds of the pixels within the decoded image to return. may be null.
+ * @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> DecodeToTexture(GrContext* ctx, const void* encoded, size_t length,
+ const SkIRect* subset = nullptr);
+ static sk_sp<SkImage> DecodeToTexture(GrContext* ctx, const sk_sp<SkData>& data,
+ const SkIRect* subset = nullptr) {
+ return DecodeToTexture(ctx, data->data(), data->size(), subset);
+ }
+
+ // Experimental
+ enum CompressionType {
+ kETC1_CompressionType,
+ kLast_CompressionType = kETC1_CompressionType,
+ };
+
+ /** Creates a GPU-backed SkImage from compressed data.
+
+ SkImage is returned if format of the compressed data is supported.
+ Supported formats vary by platform.
+
+ @param context GPU context
+ @param data compressed data to store in SkImage
+ @param width width of full SkImage
+ @param height height of full SkImage
+ @param type type of compression used
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromCompressed(GrContext* context, sk_sp<SkData> data,
+ int width, int height, CompressionType type);
+
+ /** User function called when supplied texture may be deleted.
+ */
+ typedef void (*TextureReleaseProc)(ReleaseContext releaseContext);
+
+ /** Creates SkImage from GPU texture associated with context. Caller is responsible for
+ managing the lifetime of GPU texture.
+
+ SkImage is returned if format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kRGB_888x_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param colorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace) {
+ return MakeFromTexture(context, backendTexture, origin, colorType, alphaType, colorSpace,
+ nullptr, nullptr);
+ }
+
+ /** Creates SkImage from GPU texture associated with context. GPU texture must stay
+ valid and unchanged until textureReleaseProc is called. textureReleaseProc is
+ passed releaseContext when SkImage is deleted or no longer refers to texture.
+
+ SkImage is returned if format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType,
+ kRGB_565_SkColorType, kARGB_4444_SkColorType,
+ kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType,
+ kRGB_101010x_SkColorType, kGray_8_SkColorType,
+ kRGBA_F16_SkColorType
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param colorSpace range of colors; may be nullptr
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext);
+
+ /** Creates SkImage from pixmap. SkImage is uploaded to GPU back-end using context.
+
+ Created SkImage is available to other GPU contexts, and is available across thread
+ boundaries. All contexts must be in the same GPU share group, or otherwise
+ share resources.
+
+ When SkImage is no longer referenced, context releases texture memory
+ asynchronously.
+
+ GrBackendTexture created from pixmap is uploaded to match SkSurface created with
+ dstColorSpace. SkColorSpace of SkImage is determined by pixmap.colorSpace().
+
+ SkImage is returned referring to GPU back-end if context is not nullptr,
+ format of data is recognized and supported, and if context supports moving
+ resources between contexts. Otherwise, pixmap pixel data is copied and SkImage
+ as returned in raster format if possible; nullptr may be returned.
+ Recognized GPU formats vary by platform and GPU back-end.
+
+ @param context GPU context
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @param buildMips create SkImage as mip map if true
+ @param dstColorSpace range of colors of matching SkSurface on GPU
+ @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeCrossContextFromPixmap(GrContext* context, const SkPixmap& pixmap,
+ bool buildMips,
+ bool limitToMaxTextureSize = false);
+
+ /** Creates SkImage from backendTexture associated with context. backendTexture and
+ returned SkImage are managed internally, and are released when no longer needed.
+
+ SkImage is returned if format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param surfaceOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType,
+ kRGB_565_SkColorType, kARGB_4444_SkColorType,
+ kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType,
+ kRGB_101010x_SkColorType, kGray_8_SkColorType,
+ kRGBA_F16_SkColorType
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param colorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAdoptedTexture(GrContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin surfaceOrigin,
+ SkColorType colorType,
+ SkAlphaType alphaType = kPremul_SkAlphaType,
+ sk_sp<SkColorSpace> colorSpace = nullptr);
+
+ /** Creates an SkImage by flattening the specified YUVA planes into a single, interleaved RGBA
+ image.
+
+ @param context GPU context
+ @param yuvColorSpace How the YUV values are converted to RGB. One of:
+ kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param yuvaTextures array of (up to four) YUVA textures on GPU which contain the,
+ possibly interleaved, YUVA planes
+ @param yuvaIndices array indicating which texture in yuvaTextures, and channel
+ in that texture, maps to each component of YUVA.
+ @param imageSize size of the resulting image
+ @param imageOrigin origin of the resulting image. One of: kBottomLeft_GrSurfaceOrigin,
+ kTopLeft_GrSurfaceOrigin
+ @param imageColorSpace range of colors of the resulting image; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVATexturesCopy(GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** Creates an SkImage by flattening the specified YUVA planes into a single, interleaved RGBA
+ image. 'backendTexture' is used to store the result of the flattening.
+
+ @param context GPU context
+ @param yuvColorSpace How the YUV values are converted to RGB. One of:
+ kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param yuvaTextures array of (up to four) YUVA textures on GPU which contain the,
+ possibly interleaved, YUVA planes
+ @param yuvaIndices array indicating which texture in yuvaTextures, and channel
+ in that texture, maps to each component of YUVA.
+ @param imageSize size of the resulting image
+ @param imageOrigin origin of the resulting image. One of:
+ kBottomLeft_GrSurfaceOrigin,
+ kTopLeft_GrSurfaceOrigin
+ @param backendTexture the resource that stores the final pixels
+ @param imageColorSpace range of colors of the resulting image; may be nullptr
+ @param textureReleaseProc function called when backendTexture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVATexturesCopyWithExternalBackend(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Creates an SkImage by storing the specified YUVA planes into an image, to be rendered
+ via multitexturing.
+
+ @param context GPU context
+ @param yuvColorSpace How the YUV values are converted to RGB. One of:
+ kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param yuvaTextures array of (up to four) YUVA textures on GPU which contain the,
+ possibly interleaved, YUVA planes
+ @param yuvaIndices array indicating which texture in yuvaTextures, and channel
+ in that texture, maps to each component of YUVA.
+ @param imageSize size of the resulting image
+ @param imageOrigin origin of the resulting image. One of: kBottomLeft_GrSurfaceOrigin,
+ kTopLeft_GrSurfaceOrigin
+ @param imageColorSpace range of colors of the resulting image; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVATextures(GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** Creates SkImage from pixmap array representing YUVA data.
+ SkImage is uploaded to GPU back-end using context.
+
+ Each GrBackendTexture created from yuvaPixmaps array is uploaded to match SkSurface
+ using SkColorSpace of SkPixmap. SkColorSpace of SkImage is determined by imageColorSpace.
+
+ SkImage is returned referring to GPU back-end if context is not nullptr and
+ format of data is recognized and supported. Otherwise, nullptr is returned.
+ Recognized GPU formats vary by platform and GPU back-end.
+
+ @param context GPU context
+ @param yuvColorSpace How the YUV values are converted to RGB. One of:
+ kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param yuvaPixmaps array of (up to four) SkPixmap which contain the,
+ possibly interleaved, YUVA planes
+ @param yuvaIndices array indicating which pixmap in yuvaPixmaps, and channel
+ in that pixmap, maps to each component of YUVA.
+ @param imageSize size of the resulting image
+ @param imageOrigin origin of the resulting image. One of:
+ kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param buildMips create internal YUVA textures as mip map if true
+ @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary
+ @param imageColorSpace range of colors of the resulting image; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVAPixmaps(
+ GrContext* context, SkYUVColorSpace yuvColorSpace, const SkPixmap yuvaPixmaps[],
+ const SkYUVAIndex yuvaIndices[4], SkISize imageSize, GrSurfaceOrigin imageOrigin,
+ bool buildMips, bool limitToMaxTextureSize = false,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** To be deprecated.
+ */
+ static sk_sp<SkImage> MakeFromYUVTexturesCopy(GrContext* context, SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvTextures[3],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** To be deprecated.
+ */
+ static sk_sp<SkImage> MakeFromYUVTexturesCopyWithExternalBackend(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvTextures[3],
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** Creates SkImage from copy of nv12Textures, an array of textures on GPU.
+ nv12Textures[0] contains pixels for YUV component y plane.
+ nv12Textures[1] contains pixels for YUV component u plane,
+ followed by pixels for YUV component v plane.
+ Returned SkImage has the dimensions nv12Textures[2].
+ yuvColorSpace describes how YUV colors convert to RGB colors.
+
+ @param context GPU context
+ @param yuvColorSpace one of: kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param nv12Textures array of YUV textures on GPU
+ @param imageOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param imageColorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromNV12TexturesCopy(GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture nv12Textures[2],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr);
+
+ /** Creates SkImage from copy of nv12Textures, an array of textures on GPU.
+ nv12Textures[0] contains pixels for YUV component y plane.
+ nv12Textures[1] contains pixels for YUV component u plane,
+ followed by pixels for YUV component v plane.
+ Returned SkImage has the dimensions nv12Textures[2] and stores pixels in backendTexture.
+ yuvColorSpace describes how YUV colors convert to RGB colors.
+
+ @param context GPU context
+ @param yuvColorSpace one of: kJPEG_SkYUVColorSpace, kRec601_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace, kIdentity_SkYUVColorSpace
+ @param nv12Textures array of YUV textures on GPU
+ @param imageOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param backendTexture the resource that stores the final pixels
+ @param imageColorSpace range of colors; may be nullptr
+ @param textureReleaseProc function called when backendTexture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromNV12TexturesCopyWithExternalBackend(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture nv12Textures[2],
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace = nullptr,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ enum class BitDepth {
+ kU8, //!< uses 8-bit unsigned int per color component
+ kF16, //!< uses 16-bit float per color component
+ };
+
+ /** Creates SkImage from picture. Returned SkImage width and height are set by dimensions.
+ SkImage draws picture with matrix and paint, set to bitDepth and colorSpace.
+
+ If matrix is nullptr, draws with identity SkMatrix. If paint is nullptr, draws
+ with default SkPaint. colorSpace may be nullptr.
+
+ @param picture stream of drawing commands
+ @param dimensions width and height
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+ @param bitDepth 8-bit integer or 16-bit float: per component
+ @param colorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth,
+ sk_sp<SkColorSpace> colorSpace);
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+ /** (See Skia bug 7447)
+ Creates SkImage from Android hardware buffer.
+ Returned SkImage takes a reference on the buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAHardwareBuffer(
+ AHardwareBuffer* hardwareBuffer,
+ SkAlphaType alphaType = kPremul_SkAlphaType,
+ sk_sp<SkColorSpace> colorSpace = nullptr,
+ GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin);
+
+ /** Creates SkImage from Android hardware buffer and uploads the data from the SkPixmap to it.
+ Returned SkImage takes a reference on the buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ @param pixmap SkPixmap that contains data to be uploaded to the AHardwareBuffer
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param surfaceOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAHardwareBufferWithData(
+ GrContext* context,
+ const SkPixmap& pixmap,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin);
+#endif
+
+ /** Returns a SkImageInfo describing the width, height, color type, alpha type, and color space
+ of the SkImage.
+
+ @return image info of SkImage.
+ */
+ const SkImageInfo& imageInfo() const { return fInfo; }
+
+ /** Returns pixel count in each row.
+
+ @return pixel width in SkImage
+ */
+ int width() const { return fInfo.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height in SkImage
+ */
+ int height() const { return fInfo.height(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return SkISize::Make(fInfo.width(), fInfo.height()); }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeWH(fInfo.width(), fInfo.height()); }
+
+ /** Returns value unique to image. SkImage contents cannot change after SkImage is
+ created. Any operation to create a new SkImage will receive generate a new
+ unique number.
+
+ @return unique identifier
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns SkAlphaType, one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType.
+
+ SkAlphaType returned was a parameter to an SkImage constructor,
+ or was parsed from encoded data.
+
+ @return SkAlphaType in SkImage
+ */
+ SkAlphaType alphaType() const;
+
+ /** Returns SkColorType if known; otherwise, returns kUnknown_SkColorType.
+
+ @return SkColorType of SkImage
+ */
+ SkColorType colorType() const;
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImage. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ SkColorSpace returned was passed to an SkImage constructor,
+ or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage
+ is drawn, depending on the capabilities of the SkSurface receiving the drawing.
+
+ @return SkColorSpace in SkImage, or nullptr
+ */
+ SkColorSpace* colorSpace() const;
+
+ /** Returns a smart pointer to SkColorSpace, the range of colors, associated with
+ SkImage. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ SkColorSpace returned was passed to an SkImage constructor,
+ or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage
+ is drawn, depending on the capabilities of the SkSurface receiving the drawing.
+
+ @return SkColorSpace in SkImage, or nullptr, wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const;
+
+ /** Returns true if SkImage pixels represent transparency only. If true, each pixel
+ is packed in 8 bits as defined by kAlpha_8_SkColorType.
+
+ @return true if pixels represent a transparency mask
+ */
+ bool isAlphaOnly() const;
+
+ /** Returns true if pixels ignore their alpha value and are treated as fully opaque.
+
+ @return true if SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const { return SkAlphaTypeIsOpaque(this->alphaType()); }
+
+ /** Creates SkShader from SkImage. SkShader dimensions are taken from SkImage. SkShader uses
+ SkTileMode rules to fill drawn area outside SkImage. localMatrix permits
+ transforming SkImage before SkCanvas matrix is applied.
+
+ @param tmx tiling in the x direction
+ @param tmy tiling in the y direction
+ @param localMatrix SkImage transformation, or nullptr
+ @return SkShader containing SkImage
+ */
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix = nullptr) const;
+
+ /** Creates SkShader from SkImage. SkShader dimensions are taken from SkImage. SkShader uses
+ SkShader::kClamp_TileMode to fill drawn area outside SkImage. localMatrix permits
+ transforming SkImage before SkCanvas matrix is applied.
+
+ @param localMatrix SkImage transformation, or nullptr
+ @return SkShader containing SkImage
+ */
+ sk_sp<SkShader> makeShader(const SkMatrix* localMatrix = nullptr) const {
+ return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, localMatrix);
+ }
+
+ /** Copies SkImage pixel address, row bytes, and SkImageInfo to pixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave pixmap unchanged.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkImage has direct access to pixels
+ */
+ bool peekPixels(SkPixmap* pixmap) const;
+
+ /** Deprecated.
+ */
+ GrTexture* getTexture() const;
+
+ /** Returns true the contents of SkImage was created on or uploaded to GPU memory,
+ and is available as a GPU texture.
+
+ @return true if SkImage is a GPU texture
+ */
+ bool isTextureBacked() const;
+
+ /** Returns true if SkImage can be drawn on either raster surface or GPU surface.
+ If context is nullptr, tests if SkImage draws on raster surface;
+ otherwise, tests if SkImage draws on GPU surface associated with context.
+
+ SkImage backed by GPU texture may become invalid if associated GrContext is
+ invalid. lazy image may be invalid and may not draw to raster surface or
+ GPU surface or both.
+
+ @param context GPU context
+ @return true if SkImage can be drawn
+ */
+ bool isValid(GrContext* context) const;
+
+ /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not
+ texture-backed (including promise texture images) or if the the GrContext does not
+ have the same context ID as the context backing the image then this is a no-op.
+
+ If the image was not used in any non-culled draws recorded on the passed GrContext then
+ this is a no-op unless the GrFlushInfo contains semaphores, a finish proc, or uses
+ kSyncCpu_GrFlushFlag. Those are respected even when the image has not been used.
+
+ @param context the context on which to flush pending usages of the image.
+ @param info flush options
+ @return one of: GrSemaphoresSubmitted::kYes, GrSemaphoresSubmitted::kNo
+ */
+ GrSemaphoresSubmitted flush(GrContext* context, const GrFlushInfo& flushInfo);
+
+ /** Version of flush() that uses a default GrFlushInfo. */
+ void flush(GrContext*);
+
+ /** Retrieves the back-end texture. If SkImage has no back-end texture, an invalid
+ object is returned. Call GrBackendTexture::isValid to determine if the result
+ is valid.
+
+ If flushPendingGrContextIO is true, completes deferred I/O operations.
+
+ If origin in not nullptr, copies location of content drawn into SkImage.
+
+ @param flushPendingGrContextIO flag to flush outstanding requests
+ @param origin storage for one of: kTopLeft_GrSurfaceOrigin,
+ kBottomLeft_GrSurfaceOrigin; or nullptr
+ @return back-end API texture handle; invalid on failure
+ */
+ GrBackendTexture getBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin = nullptr) const;
+
+ /** \enum SkImage::CachingHint
+ CachingHint selects whether Skia may internally cache SkBitmap generated by
+ decoding SkImage, or by copying SkImage from GPU to CPU. The default behavior
+ allows caching SkBitmap.
+
+ Choose kDisallow_CachingHint if SkImage pixels are to be used only once, or
+ if SkImage pixels reside in a cache outside of Skia, or to reduce memory pressure.
+
+ Choosing kAllow_CachingHint does not ensure that pixels will be cached.
+ SkImage pixels may not be cached if memory requirements are too large or
+ pixels are not accessible.
+ */
+ enum CachingHint {
+ kAllow_CachingHint, //!< allows internally caching decoded and copied pixels
+ kDisallow_CachingHint, //!< disallows internally caching decoded and copied pixels
+ };
+
+ /** Copies SkRect of pixels from SkImage to dstPixels. Copy starts at offset (srcX, srcY),
+ and does not exceed SkImage (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of
+ destination. dstRowBytes specifies the gap from one destination row to the next.
+ Returns true if pixels are copied. Returns false if:
+ - dstInfo.addr() equals nullptr
+ - dstRowBytes is less than dstInfo.minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height().
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @param cachingHint one of: kAllow_CachingHint, kDisallow_CachingHint
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint cachingHint = kAllow_CachingHint) const;
+
+ /** Copies a SkRect of pixels from SkImage to dst. Copy starts at (srcX, srcY), and
+ does not exceed SkImage (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height().
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @param cachingHint one of: kAllow_CachingHint, kDisallow_CachingHint
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+
+ /** Copies SkImage to dst, scaling pixels to fit dst.width() and dst.height(), and
+ converting pixels to match dst.colorType() and dst.alphaType(). Returns true if
+ pixels are copied. Returns false if dst.addr() is nullptr, or dst.rowBytes() is
+ less than dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ Scales the image, with filterQuality, to match dst.width() and dst.height().
+ filterQuality kNone_SkFilterQuality is fastest, typically implemented with
+ nearest neighbor filter. kLow_SkFilterQuality is typically implemented with
+ bilerp filter. kMedium_SkFilterQuality is typically implemented with
+ bilerp filter, and mip-map filter when size is reduced.
+ kHigh_SkFilterQuality is slowest, typically implemented with bicubic filter.
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @param filterQuality one of: kNone_SkFilterQuality, kLow_SkFilterQuality,
+ kMedium_SkFilterQuality, kHigh_SkFilterQuality
+ @param cachingHint one of: kAllow_CachingHint, kDisallow_CachingHint
+ @return true if pixels are scaled to fit dst
+ */
+ bool scalePixels(const SkPixmap& dst, SkFilterQuality filterQuality,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+
+ /** Encodes SkImage pixels, returning result as SkData.
+
+ Returns nullptr if encoding fails, or if encodedImageFormat is not supported.
+
+ SkImage encoding in a format requires both building with one or more of:
+ SK_HAS_JPEG_LIBRARY, SK_HAS_PNG_LIBRARY, SK_HAS_WEBP_LIBRARY; and platform support
+ for the encoded format.
+
+ If SK_BUILD_FOR_MAC or SK_BUILD_FOR_IOS is defined, encodedImageFormat can
+ additionally be one of: SkEncodedImageFormat::kICO, SkEncodedImageFormat::kBMP,
+ SkEncodedImageFormat::kGIF.
+
+ quality is a platform and format specific metric trading off size and encoding
+ error. When used, quality equaling 100 encodes with the least error. quality may
+ be ignored by the encoder.
+
+ @param encodedImageFormat one of: SkEncodedImageFormat::kJPEG, SkEncodedImageFormat::kPNG,
+ SkEncodedImageFormat::kWEBP
+ @param quality encoder specific metric with 100 equaling best
+ @return encoded SkImage, or nullptr
+ */
+ sk_sp<SkData> encodeToData(SkEncodedImageFormat encodedImageFormat, int quality) const;
+
+ /** Encodes SkImage pixels, returning result as SkData. Returns existing encoded data
+ if present; otherwise, SkImage is encoded with SkEncodedImageFormat::kPNG. Skia
+ must be built with SK_HAS_PNG_LIBRARY to encode SkImage.
+
+ Returns nullptr if existing encoded data is missing or invalid, and
+ encoding fails.
+
+ @return encoded SkImage, or nullptr
+ */
+ sk_sp<SkData> encodeToData() const;
+
+ /** Returns encoded SkImage pixels as SkData, if SkImage was created from supported
+ encoded stream format. Platform support for formats vary and may require building
+ with one or more of: SK_HAS_JPEG_LIBRARY, SK_HAS_PNG_LIBRARY, SK_HAS_WEBP_LIBRARY.
+
+ Returns nullptr if SkImage contents are not encoded.
+
+ @return encoded SkImage, or nullptr
+ */
+ sk_sp<SkData> refEncodedData() const;
+
+ /** Returns subset of SkImage. subset must be fully contained by SkImage dimensions().
+ The implementation may share pixels, or may copy them.
+
+ Returns nullptr if subset is empty, or subset is not contained by bounds, or
+ pixels in SkImage could not be read or copied.
+
+ @param subset bounds of returned SkImage
+ @return partial or full SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeSubset(const SkIRect& subset) const;
+
+ /** Returns SkImage backed by GPU texture associated with context. Returned SkImage is
+ compatible with SkSurface created with dstColorSpace. The returned SkImage respects
+ mipMapped setting; if mipMapped equals GrMipMapped::kYes, the backing texture
+ allocates mip map levels. Returns original SkImage if context
+ and dstColorSpace match and mipMapped is compatible with backing GPU texture.
+
+ Returns nullptr if context is nullptr, or if SkImage was created with another
+ GrContext.
+
+ @param context GPU context
+ @param dstColorSpace range of colors of matching SkSurface on GPU
+ @param mipMapped whether created SkImage texture must allocate mip map levels
+ @return created SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeTextureImage(GrContext* context, GrMipMapped = GrMipMapped::kNo) const;
+
+ /** Returns raster image or lazy image. Copies SkImage backed by GPU texture into
+ CPU memory if needed. Returns original SkImage if decoded in raster bitmap,
+ or if encoded in a stream.
+
+ Returns nullptr if backed by GPU texture and copy fails.
+
+ @return raster image, lazy image, or nullptr
+ */
+ sk_sp<SkImage> makeNonTextureImage() const;
+
+ /** Returns raster image. Copies SkImage backed by GPU texture into CPU memory,
+ or decodes SkImage from lazy image. Returns original SkImage if decoded in
+ raster bitmap.
+
+ Returns nullptr if copy, decode, or pixel read fails.
+
+ @return raster image, or nullptr
+ */
+ sk_sp<SkImage> makeRasterImage() const;
+
+ /** Creates filtered SkImage. filter processes original SkImage, potentially changing
+ color, position, and size. subset is the bounds of original SkImage processed
+ by filter. clipBounds is the expected bounds of the filtered SkImage. outSubset
+ is required storage for the actual bounds of the filtered SkImage. offset is
+ required storage for translation of returned SkImage.
+
+ Returns nullptr if SkImage could not be created. If nullptr is returned, outSubset
+ and offset are undefined.
+
+ Useful for animation of SkImageFilter that varies size from frame to frame.
+ Returned SkImage is created larger than required by filter so that GPU texture
+ can be reused with different sized effects. outSubset describes the valid bounds
+ of GPU texture returned. offset translates the returned SkImage to keep subsequent
+ animation frames aligned with respect to each other.
+
+ @param context the GrContext in play - if it exists
+ @param filter how SkImage is sampled when transformed
+ @param subset bounds of SkImage processed by filter
+ @param clipBounds expected bounds of filtered SkImage
+ @param outSubset storage for returned SkImage bounds
+ @param offset storage for returned SkImage translation
+ @return filtered SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeWithFilter(GrContext* context,
+ const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const;
+
+ /** To be deprecated.
+ */
+ sk_sp<SkImage> makeWithFilter(const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const;
+
+ /** Defines a callback function, taking one parameter of type GrBackendTexture with
+ no return value. Function is called when back-end texture is to be released.
+ */
+ typedef std::function<void(GrBackendTexture)> BackendTextureReleaseProc;
+
+ /** Creates a GrBackendTexture from the provided SkImage. Returns true and
+ stores result in backendTexture and backendTextureReleaseProc if
+ texture is created; otherwise, returns false and leaves
+ backendTexture and backendTextureReleaseProc unmodified.
+
+ Call backendTextureReleaseProc after deleting backendTexture.
+ backendTextureReleaseProc cleans up auxiliary data related to returned
+ backendTexture. The caller must delete returned backendTexture after use.
+
+ If SkImage is both texture backed and singly referenced, image is returned in
+ backendTexture without conversion or making a copy. SkImage is singly referenced
+ if its was transferred solely using std::move().
+
+ If SkImage is not texture backed, returns texture with SkImage contents.
+
+ @param context GPU context
+ @param image SkImage used for texture
+ @param backendTexture storage for back-end texture
+ @param backendTextureReleaseProc storage for clean up function
+ @return true if back-end texture was created
+ */
+ static bool MakeBackendTextureFromSkImage(GrContext* context,
+ sk_sp<SkImage> image,
+ GrBackendTexture* backendTexture,
+ BackendTextureReleaseProc* backendTextureReleaseProc);
+
+ /** Deprecated.
+ */
+ enum LegacyBitmapMode {
+ kRO_LegacyBitmapMode, //!< returned bitmap is read-only and immutable
+ };
+
+ /** Deprecated.
+ Creates raster SkBitmap with same pixels as SkImage. If legacyBitmapMode is
+ kRO_LegacyBitmapMode, returned bitmap is read-only and immutable.
+ Returns true if SkBitmap is stored in bitmap. Returns false and resets bitmap if
+ SkBitmap write did not succeed.
+
+ @param bitmap storage for legacy SkBitmap
+ @param legacyBitmapMode bitmap is read-only and immutable
+ @return true if SkBitmap was created
+ */
+ bool asLegacyBitmap(SkBitmap* bitmap,
+ LegacyBitmapMode legacyBitmapMode = kRO_LegacyBitmapMode) const;
+
+ /** Returns true if SkImage is backed by an image-generator or other service that creates
+ and caches its pixels or texture on-demand.
+
+ @return true if SkImage is created as needed
+ */
+ bool isLazyGenerated() const;
+
+ /** Creates SkImage in target SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorSpace.
+ Otherwise, converts pixels from SkImage SkColorSpace to target SkColorSpace.
+ If SkImage colorSpace() returns nullptr, SkImage SkColorSpace is assumed to be sRGB.
+
+ @param target SkColorSpace describing color range of returned SkImage
+ @return created SkImage in target SkColorSpace
+ */
+ sk_sp<SkImage> makeColorSpace(sk_sp<SkColorSpace> target) const;
+
+ /** Experimental.
+ Creates SkImage in target SkColorType and SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorType and SkColorSpace.
+
+ @param targetColorType SkColorType of returned SkImage
+ @param targetColorSpace SkColorSpace of returned SkImage
+ @return created SkImage in target SkColorType and SkColorSpace
+ */
+ sk_sp<SkImage> makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace) const;
+
+ /** Creates a new SkImage identical to this one, but with a different SkColorSpace.
+ This does not convert the underlying pixel data, so the resulting image will draw
+ differently.
+ */
+ sk_sp<SkImage> reinterpretColorSpace(sk_sp<SkColorSpace> newColorSpace) const;
+
+private:
+ SkImage(const SkImageInfo& info, uint32_t uniqueID);
+ friend class SkImage_Base;
+
+ SkImageInfo fInfo;
+ const uint32_t fUniqueID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageEncoder.h b/gfx/skia/skia/include/core/SkImageEncoder.h
new file mode 100644
index 0000000000..ec5cfdbde6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageEncoder.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoder_DEFINED
+#define SkImageEncoder_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkStream.h"
+
+/**
+ * Encode SkPixmap in the given binary image format.
+ *
+ * @param dst results are written to this stream.
+ * @param src source pixels.
+ * @param format image format, not all formats are supported.
+ * @param quality range from 0-100, this is supported by jpeg and webp.
+ * higher values correspond to improved visual quality, but less compression.
+ *
+ * @return false iff input is bad or format is unsupported.
+ *
+ * Will always return false if Skia is compiled without image
+ * encoders.
+ *
+ * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise
+ * it will use lossy.
+ *
+ * For examples of encoding an image to a file or to a block of memory,
+ * see tools/ToolUtils.h.
+ */
+SK_API bool SkEncodeImage(SkWStream* dst, const SkPixmap& src,
+ SkEncodedImageFormat format, int quality);
+
+/**
+ * The following helper function wraps SkEncodeImage().
+ */
+inline bool SkEncodeImage(SkWStream* dst, const SkBitmap& src, SkEncodedImageFormat f, int q) {
+ SkPixmap pixmap;
+ return src.peekPixels(&pixmap) && SkEncodeImage(dst, pixmap, f, q);
+}
+
+/**
+ * Encode SkPixmap in the given binary image format.
+ *
+ * @param src source pixels.
+ * @param format image format, not all formats are supported.
+ * @param quality range from 0-100, this is supported by jpeg and webp.
+ * higher values correspond to improved visual quality, but less compression.
+ *
+ * @return encoded data or nullptr if input is bad or format is unsupported.
+ *
+ * Will always return nullptr if Skia is compiled without image
+ * encoders.
+ *
+ * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise
+ * it will use lossy.
+ */
+SK_API sk_sp<SkData> SkEncodePixmap(const SkPixmap& src, SkEncodedImageFormat format, int quality);
+
+/**
+ * Helper that extracts the pixmap from the bitmap, and then calls SkEncodePixmap()
+ */
+SK_API sk_sp<SkData> SkEncodeBitmap(const SkBitmap& src, SkEncodedImageFormat format, int quality);
+
+#endif // SkImageEncoder_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImageFilter.h b/gfx/skia/skia/include/core/SkImageFilter.h
new file mode 100644
index 0000000000..d6da4d7238
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageFilter.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilter_DEFINED
+#define SkImageFilter_DEFINED
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+
+class SkColorFilter;
+
+/**
+ * Base class for image filters. If one is installed in the paint, then all drawing occurs as
+ * usual, but it is as if the drawing happened into an offscreen (before the xfermode is applied).
+ * This offscreen bitmap will then be handed to the imagefilter, who in turn creates a new bitmap
+ * which is what will finally be drawn to the device (using the original xfermode).
+ *
+ * The local space of image filters matches the local space of the drawn geometry. For instance if
+ * there is rotation on the canvas, the blur will be computed along those rotated axes and not in
+ * the device space. In order to achieve this result, the actual drawing of the geometry may happen
+ * in an unrotated coordinate system so that the filtered image can be computed more easily, and
+ * then it will be post transformed to match what would have been produced if the geometry were
+ * drawn with the total canvas matrix to begin with.
+ */
+class SK_API SkImageFilter : public SkFlattenable {
+public:
+ class CropRect {
+ public:
+ enum CropEdge {
+ kHasLeft_CropEdge = 0x01,
+ kHasTop_CropEdge = 0x02,
+ kHasWidth_CropEdge = 0x04,
+ kHasHeight_CropEdge = 0x08,
+ kHasAll_CropEdge = 0x0F,
+ };
+ CropRect() {}
+ explicit CropRect(const SkRect& rect, uint32_t flags = kHasAll_CropEdge)
+ : fRect(rect), fFlags(flags) {}
+ uint32_t flags() const { return fFlags; }
+ const SkRect& rect() const { return fRect; }
+
+ /**
+ * Apply this cropRect to the imageBounds. If a given edge of the cropRect is not set, then
+ * the corresponding edge from imageBounds will be used. If "embiggen" is true, the crop
+ * rect is allowed to enlarge the size of the rect, otherwise it may only reduce the rect.
+ * Filters that can affect transparent black should pass "true", while all other filters
+ * should pass "false".
+ *
+ * Note: imageBounds is in "device" space, as the output cropped rectangle will be, so the
+ * matrix is ignored for those. It is only applied to the cropRect's bounds.
+ */
+ void applyTo(const SkIRect& imageBounds, const SkMatrix& matrix, bool embiggen,
+ SkIRect* cropped) const;
+
+ private:
+ SkRect fRect;
+ uint32_t fFlags;
+ };
+
+ enum MapDirection {
+ kForward_MapDirection,
+ kReverse_MapDirection,
+ };
+ /**
+ * Map a device-space rect recursively forward or backward through the filter DAG.
+ * kForward_MapDirection is used to determine which pixels of the destination canvas a source
+ * image rect would touch after filtering. kReverse_MapDirection is used to determine which rect
+ * of the source image would be required to fill the given rect (typically, clip bounds). Used
+ * for clipping and temp-buffer allocations, so the result need not be exact, but should never
+ * be smaller than the real answer. The default implementation recursively unions all input
+ * bounds, or returns the source rect if no inputs.
+ *
+ * In kReverse mode, 'inputRect' is the device-space bounds of the input pixels. In kForward
+ * mode it should always be null. If 'inputRect' is null in kReverse mode the resulting answer
+ * may be incorrect.
+ */
+ SkIRect filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect = nullptr) const;
+
+ /**
+ * Returns whether this image filter is a color filter and puts the color filter into the
+ * "filterPtr" parameter if it can. Does nothing otherwise.
+ * If this returns false, then the filterPtr is unchanged.
+ * If this returns true, then if filterPtr is not null, it must be set to a ref'd colorfitler
+ * (i.e. it may not be set to NULL).
+ */
+ bool isColorFilterNode(SkColorFilter** filterPtr) const;
+
+ // DEPRECATED : use isColorFilterNode() instead
+ bool asColorFilter(SkColorFilter** filterPtr) const {
+ return this->isColorFilterNode(filterPtr);
+ }
+
+ /**
+ * Returns true (and optionally returns a ref'd filter) if this imagefilter can be completely
+ * replaced by the returned colorfilter. i.e. the two effects will affect drawing in the same
+ * way.
+ */
+ bool asAColorFilter(SkColorFilter** filterPtr) const;
+
+ /**
+ * Returns the number of inputs this filter will accept (some inputs can be NULL).
+ */
+ int countInputs() const;
+
+ /**
+ * Returns the input filter at a given index, or NULL if no input is connected. The indices
+ * used are filter-specific.
+ */
+ const SkImageFilter* getInput(int i) const;
+
+ // Default impl returns union of all input bounds.
+ virtual SkRect computeFastBounds(const SkRect& bounds) const;
+
+ // Can this filter DAG compute the resulting bounds of an object-space rectangle?
+ bool canComputeFastBounds() const;
+
+ /**
+ * If this filter can be represented by another filter + a localMatrix, return that filter,
+ * else return null.
+ */
+ sk_sp<SkImageFilter> makeWithLocalMatrix(const SkMatrix& matrix) const;
+
+ /**
+ * Return an imagefilter which transforms its input by the given matrix.
+ * DEPRECATED: Use include/effects/SkImageFilters::MatrixTransform
+ */
+ static sk_sp<SkImageFilter> MakeMatrixFilter(const SkMatrix& matrix,
+ SkFilterQuality quality,
+ sk_sp<SkImageFilter> input);
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkImageFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkImageFilter_Type;
+ }
+
+ static sk_sp<SkImageFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkImageFilter>(static_cast<SkImageFilter*>(
+ SkFlattenable::Deserialize(kSkImageFilter_Type, data, size, procs).release()));
+ }
+
+protected:
+
+ sk_sp<SkImageFilter> refMe() const {
+ return sk_ref_sp(const_cast<SkImageFilter*>(this));
+ }
+
+private:
+ friend class SkImageFilter_Base;
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageGenerator.h b/gfx/skia/skia/include/core/SkImageGenerator.h
new file mode 100644
index 0000000000..77eb6b7810
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageGenerator.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageGenerator_DEFINED
+#define SkImageGenerator_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkYUVAIndex.h"
+#include "include/core/SkYUVASizeInfo.h"
+
+class GrRecordingContext;
+class GrTextureProxy;
+class GrSamplerState;
+class SkBitmap;
+class SkData;
+class SkMatrix;
+class SkPaint;
+class SkPicture;
+
+class SK_API SkImageGenerator {
+public:
+ /**
+ * The PixelRef which takes ownership of this SkImageGenerator
+ * will call the image generator's destructor.
+ */
+ virtual ~SkImageGenerator() { }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * Return a ref to the encoded (i.e. compressed) representation
+ * of this data.
+ *
+ * If non-NULL is returned, the caller is responsible for calling
+ * unref() on the data when it is finished.
+ */
+ sk_sp<SkData> refEncodedData() {
+ return this->onRefEncodedData();
+ }
+
+ /**
+ * Return the ImageInfo associated with this generator.
+ */
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Can this generator be used to produce images that will be drawable to the specified context
+ * (or to CPU, if context is nullptr)?
+ */
+ bool isValid(GrContext* context) const {
+ return this->onIsValid(context);
+ }
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return false.
+ *
+ * @return true on success.
+ */
+ bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, V, and A planes.
+ * @param yuvaIndices How the YUVA planes are organized/used
+ * @param colorSpace Output parameter.
+ */
+ bool queryYUVA8(SkYUVASizeInfo* sizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace) const;
+
+ /**
+ * Returns true on success and false on failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call queryYUVA8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param yuvaIndices Needs to exactly match the values returned by the query.
+ * @param planes Memory for the Y, U, V, and A planes. Note that, depending on the
+ * settings in yuvaIndices, anywhere from 1..4 planes could be returned.
+ */
+ bool getYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
+ const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ void* planes[]);
+
+#if SK_SUPPORT_GPU
+ /**
+ * If the generator can natively/efficiently return its pixels as a GPU image (backed by a
+ * texture) this will return that image. If not, this will return NULL.
+ *
+ * This routine also supports retrieving only a subset of the pixels. That subset is specified
+ * by the following rectangle:
+ *
+ * subset = SkIRect::MakeXYWH(origin.x(), origin.y(), info.width(), info.height())
+ *
+ * If subset is not contained inside the generator's bounds, this returns false.
+ *
+ * whole = SkIRect::MakeWH(getInfo().width(), getInfo().height())
+ * if (!whole.contains(subset)) {
+ * return false;
+ * }
+ *
+ * Regarding the GrContext parameter:
+ *
+ * It must be non-NULL. The generator should only succeed if:
+ * - its internal context is the same
+ * - it can somehow convert its texture into one that is valid for the provided context.
+ *
+ * If the willNeedMipMaps flag is true, the generator should try to create a TextureProxy that
+ * at least has the mip levels allocated and the base layer filled in. If this is not possible,
+ * the generator is allowed to return a non mipped proxy, but this will have some additional
+ * overhead in later allocating mips and copying of the base layer.
+ */
+ sk_sp<GrTextureProxy> generateTexture(GrRecordingContext*, const SkImageInfo& info,
+ const SkIPoint& origin,
+ bool willNeedMipMaps);
+#endif
+
+ /**
+ * If the default image decoder system can interpret the specified (encoded) data, then
+ * this returns a new ImageGenerator for it. Otherwise this returns NULL. Either way
+ * the caller is still responsible for managing their ownership of the data.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromEncoded(sk_sp<SkData>);
+
+ /** Return a new image generator backed by the specified picture. If the size is empty or
+ * the picture is NULL, this returns NULL.
+ * The optional matrix and paint arguments are passed to drawPicture() at rasterization
+ * time.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromPicture(const SkISize&, sk_sp<SkPicture>,
+ const SkMatrix*, const SkPaint*,
+ SkImage::BitDepth,
+ sk_sp<SkColorSpace>);
+
+protected:
+ static constexpr int kNeedNewImageUniqueID = 0;
+
+ SkImageGenerator(const SkImageInfo& info, uint32_t uniqueId = kNeedNewImageUniqueID);
+
+ virtual sk_sp<SkData> onRefEncodedData() { return nullptr; }
+ struct Options {};
+ virtual bool onGetPixels(const SkImageInfo&, void*, size_t, const Options&) { return false; }
+ virtual bool onIsValid(GrContext*) const { return true; }
+ virtual bool onQueryYUVA8(SkYUVASizeInfo*, SkYUVAIndex[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace*) const { return false; }
+ virtual bool onGetYUVA8Planes(const SkYUVASizeInfo&, const SkYUVAIndex[SkYUVAIndex::kIndexCount],
+ void*[4] /*planes*/) { return false; }
+#if SK_SUPPORT_GPU
+ enum class TexGenType {
+ kNone, //image generator does not implement onGenerateTexture
+ kCheap, //onGenerateTexture is implemented and it is fast (does not render offscreen)
+ kExpensive, //onGenerateTexture is implemented and it is relatively slow
+ };
+
+ virtual TexGenType onCanGenerateTexture() const { return TexGenType::kNone; }
+ virtual sk_sp<GrTextureProxy> onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ const SkIPoint&,
+ bool willNeedMipMaps); // returns nullptr
+#endif
+
+private:
+ const SkImageInfo fInfo;
+ const uint32_t fUniqueID;
+
+ friend class SkImage_Lazy;
+
+ // This is our default impl, which may be different on different platforms.
+ // It is called from NewFromEncoded() after it has checked for any runtime factory.
+ // The SkData will never be NULL, as that will have been checked by NewFromEncoded.
+ static std::unique_ptr<SkImageGenerator> MakeFromEncodedImpl(sk_sp<SkData>);
+
+ SkImageGenerator(SkImageGenerator&&) = delete;
+ SkImageGenerator(const SkImageGenerator&) = delete;
+ SkImageGenerator& operator=(SkImageGenerator&&) = delete;
+ SkImageGenerator& operator=(const SkImageGenerator&) = delete;
+};
+
+#endif // SkImageGenerator_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImageInfo.h b/gfx/skia/skia/include/core/SkImageInfo.h
new file mode 100644
index 0000000000..fa4578439d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageInfo.h
@@ -0,0 +1,743 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageInfo_DEFINED
+#define SkImageInfo_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+
+#include "include/private/SkTFitsIn.h"
+#include "include/private/SkTo.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+/** \enum SkImageInfo::SkAlphaType
+ Describes how to interpret the alpha component of a pixel. A pixel may
+ be opaque, or alpha, describing multiple levels of transparency.
+
+ In simple blending, alpha weights the draw color and the destination
+ color to create a new color. If alpha describes a weight from zero to one:
+
+ new color = draw color * alpha + destination color * (1 - alpha)
+
+ In practice alpha is encoded in two or more bits, where 1.0 equals all bits set.
+
+ RGB may have alpha included in each component value; the stored
+ value is the original RGB multiplied by alpha. Premultiplied color
+ components improve performance.
+*/
+enum SkAlphaType {
+ kUnknown_SkAlphaType, //!< uninitialized
+ kOpaque_SkAlphaType, //!< pixel is opaque
+ kPremul_SkAlphaType, //!< pixel components are premultiplied by alpha
+ kUnpremul_SkAlphaType, //!< pixel components are independent of alpha
+ kLastEnum_SkAlphaType = kUnpremul_SkAlphaType, //!< last valid value
+};
+
+/** Returns true if SkAlphaType equals kOpaque_SkAlphaType. kOpaque_SkAlphaType is a
+ hint that the SkColorType is opaque, or that all alpha values are set to
+ their 1.0 equivalent. If SkAlphaType is kOpaque_SkAlphaType, and SkColorType is not
+ opaque, then the result of drawing any pixel with a alpha value less than
+ 1.0 is undefined.
+
+ @param at one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @return true if at equals kOpaque_SkAlphaType
+*/
+static inline bool SkAlphaTypeIsOpaque(SkAlphaType at) {
+ return kOpaque_SkAlphaType == at;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Temporary macro that allows us to add new color types without breaking Chrome compile. */
+#define SK_EXTENDED_COLOR_TYPES
+
+/** \enum SkImageInfo::SkColorType
+ Describes how pixel bits encode color. A pixel may be an alpha mask, a
+ grayscale, RGB, or ARGB.
+
+ kN32_SkColorType selects the native 32-bit ARGB format. On little endian
+ processors, pixels containing 8-bit ARGB components pack into 32-bit
+ kBGRA_8888_SkColorType. On big endian processors, pixels pack into 32-bit
+ kRGBA_8888_SkColorType.
+*/
+enum SkColorType {
+ kUnknown_SkColorType, //!< uninitialized
+ kAlpha_8_SkColorType, //!< pixel with alpha in 8-bit byte
+ kRGB_565_SkColorType, //!< pixel with 5 bits red, 6 bits green, 5 bits blue, in 16-bit word
+ kARGB_4444_SkColorType, //!< pixel with 4 bits for alpha, red, green, blue; in 16-bit word
+ kRGBA_8888_SkColorType, //!< pixel with 8 bits for red, green, blue, alpha; in 32-bit word
+ kRGB_888x_SkColorType, //!< pixel with 8 bits each for red, green, blue; in 32-bit word
+ kBGRA_8888_SkColorType, //!< pixel with 8 bits for blue, green, red, alpha; in 32-bit word
+ kRGBA_1010102_SkColorType, //!< 10 bits for red, green, blue; 2 bits for alpha; in 32-bit word
+ kRGB_101010x_SkColorType, //!< pixel with 10 bits each for red, green, blue; in 32-bit word
+ kGray_8_SkColorType, //!< pixel with grayscale level in 8-bit byte
+ kRGBA_F16Norm_SkColorType, //!< pixel with half floats in [0,1] for red, green, blue, alpha; in 64-bit word
+ kRGBA_F16_SkColorType, //!< pixel with half floats for red, green, blue, alpha; in 64-bit word
+ kRGBA_F32_SkColorType, //!< pixel using C float for red, green, blue, alpha; in 128-bit word
+
+ // The following 6 colortypes are just for reading from - not for rendering to
+ kR8G8_unorm_SkColorType, //<! pixel with a uint8_t for red and green
+
+ kA16_float_SkColorType, //<! pixel with a half float for alpha
+ kR16G16_float_SkColorType, //<! pixel with a half float for red and green
+
+ kA16_unorm_SkColorType, //<! pixel with a little endian uint16_t for alpha
+ kR16G16_unorm_SkColorType, //<! pixel with a little endian uint16_t for red and green
+ kR16G16B16A16_unorm_SkColorType,//<! pixel with a little endian uint16_t for red, green, blue, and alpha
+
+ kLastEnum_SkColorType = kR16G16B16A16_unorm_SkColorType, //!< last valid value
+
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+ kN32_SkColorType = kBGRA_8888_SkColorType,//!< native ARGB 32-bit encoding
+
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+ kN32_SkColorType = kRGBA_8888_SkColorType,//!< native ARGB 32-bit encoding
+
+#else
+ kN32_SkColorType = kBGRA_8888_SkColorType,
+#endif
+};
+
+/** Returns the number of bytes required to store a pixel, including unused padding.
+ Returns zero if ct is kUnknown_SkColorType or invalid.
+
+ @param ct one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @return bytes per pixel
+*/
+SK_API int SkColorTypeBytesPerPixel(SkColorType ct);
+
+/** Returns true if SkColorType always decodes alpha to 1.0, making the pixel
+ fully opaque. If true, SkColorType does not reserve bits to encode alpha.
+
+ @param ct one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @return true if alpha is always set to 1.0
+*/
+SK_API bool SkColorTypeIsAlwaysOpaque(SkColorType ct);
+
+/** Returns true if canonical can be set to a valid SkAlphaType for colorType. If
+ there is more than one valid canonical SkAlphaType, set to alphaType, if valid.
+ If true is returned and canonical is not nullptr, store valid SkAlphaType.
+
+ Returns false only if alphaType is kUnknown_SkAlphaType, color type is not
+ kUnknown_SkColorType, and SkColorType is not always opaque. If false is returned,
+ canonical is ignored.
+
+ For kUnknown_SkColorType: set canonical to kUnknown_SkAlphaType and return true.
+ For kAlpha_8_SkColorType: set canonical to kPremul_SkAlphaType or
+ kOpaque_SkAlphaType and return true if alphaType is not kUnknown_SkAlphaType.
+ For kRGB_565_SkColorType, kRGB_888x_SkColorType, kRGB_101010x_SkColorType, and
+ kGray_8_SkColorType: set canonical to kOpaque_SkAlphaType and return true.
+ For kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, and kRGBA_F16_SkColorType: set canonical to alphaType
+ and return true if alphaType is not kUnknown_SkAlphaType.
+
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param alphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param canonical storage for SkAlphaType
+ @return true if valid SkAlphaType can be associated with colorType
+*/
+SK_API bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical = nullptr);
+
+/** \enum SkImageInfo::SkYUVColorSpace
+ Describes color range of YUV pixels. The color mapping from YUV to RGB varies
+ depending on the source. YUV pixels may be generated by JPEG images, standard
+ video streams, or high definition video streams. Each has its own mapping from
+ YUV and RGB.
+
+ JPEG YUV values encode the full range of 0 to 255 for all three components.
+ Video YUV values range from 16 to 235 for all three components. Details of
+ encoding and conversion to RGB are described in YCbCr color space.
+
+ The identity colorspace exists to provide a utility mapping from Y to R, U to G and V to B.
+ It can be used to visualize the YUV planes or to explicitly post process the YUV channels.
+*/
+enum SkYUVColorSpace {
+ kJPEG_SkYUVColorSpace, //!< describes full range
+ kRec601_SkYUVColorSpace, //!< describes SDTV range
+ kRec709_SkYUVColorSpace, //!< describes HDTV range
+ kIdentity_SkYUVColorSpace, //!< maps Y->R, U->G, V->B
+
+ kLastEnum_SkYUVColorSpace = kIdentity_SkYUVColorSpace, //!< last valid value
+};
+
+/** \struct SkColorInfo
+ Describes pixel and encoding. SkImageInfo can be created from SkColorInfo by
+ providing dimensions.
+
+ It encodes how pixel bits describe alpha, transparency; color components red, blue,
+ and green; and SkColorSpace, the range and linearity of colors.
+*/
+class SK_API SkColorInfo {
+public:
+ /** Creates an SkColorInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ and no SkColorSpace.
+
+ @return empty SkImageInfo
+ */
+ SkColorInfo() = default;
+
+ /** Creates SkColorInfo from SkColorType ct, SkAlphaType at, and optionally SkColorSpace cs.
+
+ If SkColorSpace cs is nullptr and SkColorInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+ @return created SkColorInfo
+ */
+ SkColorInfo(SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs)
+ : fColorSpace(std::move(cs)), fColorType(ct), fAlphaType(at) {}
+
+ SkColorInfo(const SkColorInfo&) = default;
+ SkColorInfo(SkColorInfo&&) = default;
+
+ SkColorInfo& operator=(const SkColorInfo&) = default;
+ SkColorInfo& operator=(SkColorInfo&&) = default;
+
+ SkColorSpace* colorSpace() const { return fColorSpace.get(); }
+ sk_sp<SkColorSpace> refColorSpace() const { return fColorSpace; }
+ SkColorType colorType() const { return fColorType; }
+ SkAlphaType alphaType() const { return fAlphaType; }
+
+ bool isOpaque() const { return SkAlphaTypeIsOpaque(fAlphaType); }
+
+ bool gammaCloseToSRGB() const { return fColorSpace && fColorSpace->gammaCloseToSRGB(); }
+
+ /** Does other represent the same color type, alpha type, and color space? */
+ bool operator==(const SkColorInfo& other) const {
+ return fColorType == other.fColorType && fAlphaType == other.fAlphaType &&
+ SkColorSpace::Equals(fColorSpace.get(), other.fColorSpace.get());
+ }
+
+ /** Does other represent a different color type, alpha type, or color space? */
+ bool operator!=(const SkColorInfo& other) const { return !(*this == other); }
+
+ /** Creates SkColorInfo with same SkColorType, SkColorSpace, with SkAlphaType set
+ to newAlphaType.
+
+ Created SkColorInfo contains newAlphaType even if it is incompatible with
+ SkColorType, in which case SkAlphaType in SkColorInfo is ignored.
+ */
+ SkColorInfo makeAlphaType(SkAlphaType newAlphaType) const {
+ return SkColorInfo(this->colorType(), newAlphaType, this->refColorSpace());
+ }
+
+ /** Creates new SkColorInfo with same SkAlphaType, SkColorSpace, with SkColorType
+ set to newColorType.
+ */
+ SkColorInfo makeColorType(SkColorType newColorType) const {
+ return SkColorInfo(newColorType, this->alphaType(), this->refColorSpace());
+ }
+
+ /** Creates SkColorInfo with same SkAlphaType, SkColorType, with SkColorSpace
+ set to cs. cs may be nullptr.
+ */
+ SkColorInfo makeColorSpace(sk_sp<SkColorSpace> cs) const {
+ return SkColorInfo(this->colorType(), this->alphaType(), std::move(cs));
+ }
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType() is kUnknown_SkColorType.
+
+ @return bytes in pixel
+ */
+ int bytesPerPixel() const;
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3, 4; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const;
+
+private:
+ sk_sp<SkColorSpace> fColorSpace;
+ SkColorType fColorType = kUnknown_SkColorType;
+ SkAlphaType fAlphaType = kUnknown_SkAlphaType;
+};
+
+/** \struct SkImageInfo
+ Describes pixel dimensions and encoding. SkBitmap, SkImage, PixMap, and SkSurface
+ can be created from SkImageInfo. SkImageInfo can be retrieved from SkBitmap and
+ SkPixmap, but not from SkImage and SkSurface. For example, SkImage and SkSurface
+ implementations may defer pixel depth, so may not completely specify SkImageInfo.
+
+ SkImageInfo contains dimensions, the pixel integral width and height. It encodes
+ how pixel bits describe alpha, transparency; color components red, blue,
+ and green; and SkColorSpace, the range and linearity of colors.
+*/
+struct SK_API SkImageInfo {
+public:
+
+ /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ a width and height of zero, and no SkColorSpace.
+
+ @return empty SkImageInfo
+ */
+ SkImageInfo() = default;
+
+ /** Creates SkImageInfo from integral dimensions width and height, SkColorType ct,
+ SkAlphaType at, and optionally SkColorSpace cs.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param ct one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param at one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs = nullptr) {
+ return SkImageInfo({width, height}, {ct, at, std::move(cs)});
+ }
+ static SkImageInfo Make(SkISize dimensions, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs = nullptr) {
+ return SkImageInfo(dimensions, {ct, at, std::move(cs)});
+ }
+
+ /** Creates SkImageInfo from integral dimensions and SkColorInfo colorInfo,
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param dimensions pixel column and row count; must be zeros or greater
+ @param SkColorInfo the pixel encoding consisting of SkColorType, SkAlphaType, and
+ SkColorSpace (which may be nullptr)
+ @return created SkImageInfo
+ */
+ static SkImageInfo Make(SkISize dimensions, const SkColorInfo& colorInfo) {
+ return SkImageInfo(dimensions, colorInfo);
+ }
+ static SkImageInfo Make(SkISize dimensions, SkColorInfo&& colorInfo) {
+ return SkImageInfo(dimensions, std::move(colorInfo));
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ SkAlphaType at, and optionally SkColorSpace cs. kN32_SkColorType will equal either
+ kBGRA_8888_SkColorType or kRGBA_8888_SkColorType, whichever is optimal.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param at one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32(int width, int height, SkAlphaType at,
+ sk_sp<SkColorSpace> cs = nullptr) {
+ return Make({width, height}, kN32_SkColorType, at, std::move(cs));
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ SkAlphaType at, with sRGB SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param at one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeS32(int width, int height, SkAlphaType at);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ kPremul_SkAlphaType, with optional SkColorSpace.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32Premul(int width, int height, sk_sp<SkColorSpace> cs = nullptr) {
+ return Make({width, height}, kN32_SkColorType, kPremul_SkAlphaType, std::move(cs));
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ kPremul_SkAlphaType, with SkColorSpace set to nullptr.
+
+ If SkImageInfo is part of drawing source: SkColorSpace defaults to sRGB, mapping
+ into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param size width and height, each must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32Premul(const SkISize& size) {
+ return MakeN32Premul(size.width(), size.height());
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kAlpha_8_SkColorType,
+ kPremul_SkAlphaType, with SkColorSpace set to nullptr.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeA8(int width, int height) {
+ return Make({width, height}, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr);
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kUnknown_SkColorType,
+ kUnknown_SkAlphaType, with SkColorSpace set to nullptr.
+
+ Returned SkImageInfo as part of source does not draw, and as part of destination
+ can not be drawn to.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeUnknown(int width, int height) {
+ return Make({width, height}, kUnknown_SkColorType, kUnknown_SkAlphaType, nullptr);
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height set to zero,
+ kUnknown_SkColorType, kUnknown_SkAlphaType, with SkColorSpace set to nullptr.
+
+ Returned SkImageInfo as part of source does not draw, and as part of destination
+ can not be drawn to.
+
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeUnknown() {
+ return MakeUnknown(0, 0);
+ }
+
+ /** Returns pixel count in each row.
+
+ @return pixel width
+ */
+ int width() const { return fDimensions.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height
+ */
+ int height() const { return fDimensions.height(); }
+
+ /** Returns SkColorType, one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType.
+
+ @return SkColorType
+ */
+ SkColorType colorType() const { return fColorInfo.colorType(); }
+
+ /** Returns SkAlphaType, one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType.
+
+ @return SkAlphaType
+ */
+ SkAlphaType alphaType() const { return fColorInfo.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors. The reference count of
+ SkColorSpace is unchanged. The returned SkColorSpace is immutable.
+
+ @return SkColorSpace, or nullptr
+ */
+ SkColorSpace* colorSpace() const { return fColorInfo.colorSpace(); }
+
+ /** Returns smart pointer to SkColorSpace, the range of colors. The smart pointer
+ tracks the number of objects sharing this SkColorSpace reference so the memory
+ is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const { return fColorInfo.refColorSpace(); }
+
+ /** Returns if SkImageInfo describes an empty area of pixels by checking if either
+ width or height is zero or smaller.
+
+ @return true if either dimension is zero or smaller
+ */
+ bool isEmpty() const { return fDimensions.isEmpty(); }
+
+ /** Returns the dimensionless SkColorInfo that represents the same color type,
+ alpha type, and color space as this SkImageInfo.
+ */
+ const SkColorInfo& colorInfo() const { return fColorInfo; }
+
+ /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their
+ alpha value is implicitly or explicitly 1.0. If true, and all pixels are
+ not opaque, Skia may draw incorrectly.
+
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const { return fColorInfo.isOpaque(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return fDimensions; }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeSize(fDimensions); }
+
+ /** Returns true if associated SkColorSpace is not nullptr, and SkColorSpace gamma
+ is approximately the same as sRGB.
+ This includes the
+
+ @return true if SkColorSpace gamma is approximately the same as sRGB
+ */
+ bool gammaCloseToSRGB() const { return fColorInfo.gammaCloseToSRGB(); }
+
+ /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType,
+ with dimensions set to width and height.
+
+ @param newWidth pixel column count; must be zero or greater
+ @param newHeight pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ SkImageInfo makeWH(int newWidth, int newHeight) const {
+ return Make({newWidth, newHeight}, fColorInfo);
+ }
+
+ /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType,
+ with dimensions set to newDimensions.
+
+ @param newSize pixel column and row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ SkImageInfo makeDimensions(SkISize newSize) const {
+ return Make(newSize, fColorInfo);
+ }
+
+ /** Creates SkImageInfo with same SkColorType, SkColorSpace, width, and height,
+ with SkAlphaType set to newAlphaType.
+
+ Created SkImageInfo contains newAlphaType even if it is incompatible with
+ SkColorType, in which case SkAlphaType in SkImageInfo is ignored.
+
+ @param newAlphaType one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType
+ @return created SkImageInfo
+ */
+ SkImageInfo makeAlphaType(SkAlphaType newAlphaType) const {
+ return Make(fDimensions, fColorInfo.makeAlphaType(newAlphaType));
+ }
+
+ /** Creates SkImageInfo with same SkAlphaType, SkColorSpace, width, and height,
+ with SkColorType set to newColorType.
+
+ @param newColorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType,
+ kRGB_101010x_SkColorType, kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @return created SkImageInfo
+ */
+ SkImageInfo makeColorType(SkColorType newColorType) const {
+ return Make(fDimensions, fColorInfo.makeColorType(newColorType));
+ }
+
+ /** Creates SkImageInfo with same SkAlphaType, SkColorType, width, and height,
+ with SkColorSpace set to cs.
+
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ SkImageInfo makeColorSpace(sk_sp<SkColorSpace> cs) const {
+ return Make(fDimensions, fColorInfo.makeColorSpace(std::move(cs)));
+ }
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType( is kUnknown_SkColorType.
+
+ @return bytes in pixel
+ */
+ int bytesPerPixel() const { return fColorInfo.bytesPerPixel(); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fColorInfo.shiftPerPixel(); }
+
+ /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which
+ specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit
+ in 31 bits.
+
+ @return width() times bytesPerPixel() as unsigned 64-bit integer
+ */
+ uint64_t minRowBytes64() const { return sk_64_mul(this->width(), this->bytesPerPixel()); }
+
+ /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which
+ specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit
+ in 31 bits.
+
+ @return width() times bytesPerPixel() as signed 32-bit integer
+ */
+ size_t minRowBytes() const {
+ uint64_t minRowBytes = this->minRowBytes64();
+ if (!SkTFitsIn<int32_t>(minRowBytes)) {
+ return 0;
+ }
+ return SkTo<int32_t>(minRowBytes);
+ }
+
+ /** Returns byte offset of pixel from pixel base address.
+
+ Asserts in debug build if x or y is outside of bounds. Does not assert if
+ rowBytes is smaller than minRowBytes(), even though result may be incorrect.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @param rowBytes size of pixel row or larger
+ @return offset within pixel array
+ */
+ size_t computeOffset(int x, int y, size_t rowBytes) const;
+
+ /** Compares SkImageInfo with other, and returns true if width, height, SkColorType,
+ SkAlphaType, and SkColorSpace are equivalent.
+
+ @param other SkImageInfo to compare
+ @return true if SkImageInfo equals other
+ */
+ bool operator==(const SkImageInfo& other) const {
+ return fDimensions == other.fDimensions && fColorInfo == other.fColorInfo;
+ }
+
+ /** Compares SkImageInfo with other, and returns true if width, height, SkColorType,
+ SkAlphaType, and SkColorSpace are not equivalent.
+
+ @param other SkImageInfo to compare
+ @return true if SkImageInfo is not equal to other
+ */
+ bool operator!=(const SkImageInfo& other) const {
+ return !(*this == other);
+ }
+
+ /** Returns storage required by pixel array, given SkImageInfo dimensions, SkColorType,
+ and rowBytes. rowBytes is assumed to be at least as large as minRowBytes().
+
+ Returns zero if height is zero.
+ Returns SIZE_MAX if answer exceeds the range of size_t.
+
+ @param rowBytes size of pixel row or larger
+ @return memory required by pixel buffer
+ */
+ size_t computeByteSize(size_t rowBytes) const;
+
+ /** Returns storage required by pixel array, given SkImageInfo dimensions, and
+ SkColorType. Uses minRowBytes() to compute bytes for pixel row.
+
+ Returns zero if height is zero.
+ Returns SIZE_MAX if answer exceeds the range of size_t.
+
+ @return least memory required by pixel buffer
+ */
+ size_t computeMinByteSize() const {
+ return this->computeByteSize(this->minRowBytes());
+ }
+
+ /** Returns true if byteSize equals SIZE_MAX. computeByteSize() and
+ computeMinByteSize() return SIZE_MAX if size_t can not hold buffer size.
+
+ @param byteSize result of computeByteSize() or computeMinByteSize()
+ @return true if computeByteSize() or computeMinByteSize() result exceeds size_t
+ */
+ static bool ByteSizeOverflowed(size_t byteSize) {
+ return SIZE_MAX == byteSize;
+ }
+
+ /** Returns true if rowBytes is smaller than width times pixel size.
+
+ @param rowBytes size of pixel row or larger
+ @return true if rowBytes is large enough to contain pixel row
+ */
+ bool validRowBytes(size_t rowBytes) const {
+ return rowBytes >= this->minRowBytes64();
+ }
+
+ /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ a width and height of zero, and no SkColorSpace.
+ */
+ void reset() { *this = {}; }
+
+ /** Asserts if internal values are illegal or inconsistent. Only available if
+ SK_DEBUG is defined at compile time.
+ */
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ SkColorInfo fColorInfo;
+ SkISize fDimensions = {0, 0};
+
+ SkImageInfo(SkISize dimensions, const SkColorInfo& colorInfo)
+ : fColorInfo(colorInfo), fDimensions(dimensions) {}
+
+ SkImageInfo(SkISize dimensions, SkColorInfo&& colorInfo)
+ : fColorInfo(std::move(colorInfo)), fDimensions(dimensions) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMallocPixelRef.h b/gfx/skia/skia/include/core/SkMallocPixelRef.h
new file mode 100644
index 0000000000..df6ba0d90a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMallocPixelRef.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMallocPixelRef_DEFINED
+#define SkMallocPixelRef_DEFINED
+
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+class SkData;
+struct SkImageInfo;
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+*/
+namespace SkMallocPixelRef {
+ /**
+ * Return a new SkMallocPixelRef, automatically allocating storage for the
+ * pixels. If rowBytes are 0, an optimal value will be chosen automatically.
+ * If rowBytes is > 0, then it will be respected, or NULL will be returned
+ * if rowBytes is invalid for the specified info.
+ *
+ * All pixel bytes are zeroed.
+ *
+ * Returns NULL on failure.
+ */
+ SK_API sk_sp<SkPixelRef> MakeAllocate(const SkImageInfo&, size_t rowBytes);
+
+ /**
+ * Return a new SkMallocPixelRef that will use the provided SkData and
+ * rowBytes as pixel storage. The SkData will be ref()ed and on
+ * destruction of the PixelRef, the SkData will be unref()ed.
+ *
+ * Returns NULL on failure.
+ */
+ SK_API sk_sp<SkPixelRef> MakeWithData(const SkImageInfo&, size_t rowBytes, sk_sp<SkData> data);
+}
+#endif
diff --git a/gfx/skia/skia/include/core/SkMaskFilter.h b/gfx/skia/skia/include/core/SkMaskFilter.h
new file mode 100644
index 0000000000..ffb30e4cbe
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMaskFilter.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskFilter_DEFINED
+#define SkMaskFilter_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkCoverageMode.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkScalar.h"
+
+class SkMatrix;
+struct SkRect;
+class SkString;
+
+/** \class SkMaskFilter
+
+ SkMaskFilter is the base class for object that perform transformations on
+ the mask before drawing it. An example subclass is Blur.
+*/
+class SK_API SkMaskFilter : public SkFlattenable {
+public:
+ /** Create a blur maskfilter.
+ * @param style The SkBlurStyle to use
+ * @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0.
+ * @param respectCTM if true the blur's sigma is modified by the CTM.
+ * @return The new blur maskfilter
+ */
+ static sk_sp<SkMaskFilter> MakeBlur(SkBlurStyle style, SkScalar sigma,
+ bool respectCTM = true);
+
+ /**
+ * Construct a maskfilter whose effect is to first apply the inner filter and then apply
+ * the outer filter to the result of the inner's. Returns nullptr on failure.
+ */
+ static sk_sp<SkMaskFilter> MakeCompose(sk_sp<SkMaskFilter> outer, sk_sp<SkMaskFilter> inner);
+
+ /**
+ * Compose two maskfilters together using a coverage mode. Returns nullptr on failure.
+ */
+ static sk_sp<SkMaskFilter> MakeCombine(sk_sp<SkMaskFilter> filterA, sk_sp<SkMaskFilter> filterB,
+ SkCoverageMode mode);
+
+ /**
+ * Construct a maskfilter with an additional transform.
+ *
+ * Note: unlike shader local matrices, this transform composes next to the CTM.
+ *
+ * TotalMatrix = CTM x MaskFilterMatrix x (optional/downstream) ShaderLocalMatrix
+ */
+ sk_sp<SkMaskFilter> makeWithMatrix(const SkMatrix&) const;
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkMaskFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkMaskFilter_Type;
+ }
+
+ static sk_sp<SkMaskFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkMaskFilter>(static_cast<SkMaskFilter*>(
+ SkFlattenable::Deserialize(
+ kSkMaskFilter_Type, data, size, procs).release()));
+ }
+
+private:
+ static void RegisterFlattenables();
+ friend class SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMath.h b/gfx/skia/skia/include/core/SkMath.h
new file mode 100644
index 0000000000..33d929e604
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMath.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMath_DEFINED
+#define SkMath_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// 64bit -> 32bit utilities
+
+// Handy util that can be passed two ints, and will automatically promote to
+// 64bits before the multiply, so the caller doesn't have to remember to cast
+// e.g. (int64_t)a * b;
+static inline int64_t sk_64_mul(int64_t a, int64_t b) {
+ return a * b;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Given an integer and a positive (max) integer, return the value
+ * pinned against 0 and max, inclusive.
+ * @param value The value we want returned pinned between [0...max]
+ * @param max The positive max value
+ * @return 0 if value < 0, max if value > max, else value
+ */
+static inline int SkClampMax(int value, int max) {
+ // ensure that max is positive
+ SkASSERT(max >= 0);
+ if (value < 0) {
+ value = 0;
+ }
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+/**
+ * Returns true if value is a power of 2. Does not explicitly check for
+ * value <= 0.
+ */
+template <typename T> constexpr inline bool SkIsPow2(T value) {
+ return (value & (value - 1)) == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return a*b/((1 << shift) - 1), rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8
+ */
+static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ SkASSERT(shift > 0 && shift <= 8);
+ unsigned prod = a*b + (1 << (shift - 1));
+ return (prod + (prod >> shift)) >> shift;
+}
+
+/**
+ * Return a*b/255, rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767.
+ */
+static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ unsigned prod = a*b + 128;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMatrix.h b/gfx/skia/skia/include/core/SkMatrix.h
new file mode 100644
index 0000000000..0e89ca82cf
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMatrix.h
@@ -0,0 +1,1863 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix_DEFINED
+#define SkMatrix_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTo.h"
+
+struct SkRSXform;
+struct SkPoint3;
+class SkString;
+
+/** \class SkMatrix
+ SkMatrix holds a 3x3 matrix for transforming coordinates. This allows mapping
+ SkPoint and vectors with translation, scaling, skewing, rotation, and
+ perspective.
+
+ SkMatrix elements are in row major order. SkMatrix does not have a constructor,
+ so it must be explicitly initialized. setIdentity() initializes SkMatrix
+ so it has no effect. setTranslate(), setScale(), setSkew(), setRotate(), set9 and setAll()
+ initializes all SkMatrix elements with the corresponding mapping.
+
+ SkMatrix includes a hidden variable that classifies the type of matrix to
+ improve performance. SkMatrix is not thread safe unless getType() is called first.
+*/
+SK_BEGIN_REQUIRE_DENSE
+class SK_API SkMatrix {
+public:
+
+ /** Creates an identity SkMatrix:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+ */
+ constexpr SkMatrix() : SkMatrix(1,0,0, 0,1,0, 0,0,1, kIdentity_Mask | kRectStaysRect_Mask) {}
+
+ /** Sets SkMatrix to scale by (sx, sy). Returned matrix is:
+
+ | sx 0 0 |
+ | 0 sy 0 |
+ | 0 0 1 |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @return SkMatrix with scale
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeScale(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setScale(sx, sy);
+ return m;
+ }
+
+ /** Sets SkMatrix to scale by (scale, scale). Returned matrix is:
+
+ | scale 0 0 |
+ | 0 scale 0 |
+ | 0 0 1 |
+
+ @param scale horizontal and vertical scale factor
+ @return SkMatrix with scale
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeScale(SkScalar scale) {
+ SkMatrix m;
+ m.setScale(scale, scale);
+ return m;
+ }
+
+ /** Sets SkMatrix to translate by (dx, dy). Returned matrix is:
+
+ | 1 0 dx |
+ | 0 1 dy |
+ | 0 0 1 |
+
+ @param dx horizontal translation
+ @param dy vertical translation
+ @return SkMatrix with translation
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeTrans(SkScalar dx, SkScalar dy) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ return m;
+ }
+
+ /** Sets SkMatrix to:
+
+ | scaleX skewX transX |
+ | skewY scaleY transY |
+ | pers0 pers1 pers2 |
+
+ @param scaleX horizontal scale factor
+ @param skewX horizontal skew factor
+ @param transX horizontal translation
+ @param skewY vertical skew factor
+ @param scaleY vertical scale factor
+ @param transY vertical translation
+ @param pers0 input x-axis perspective factor
+ @param pers1 input y-axis perspective factor
+ @param pers2 perspective scale factor
+ @return SkMatrix constructed from parameters
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeAll(SkScalar scaleX, SkScalar skewX, SkScalar transX,
+ SkScalar skewY, SkScalar scaleY, SkScalar transY,
+ SkScalar pers0, SkScalar pers1, SkScalar pers2) {
+ SkMatrix m;
+ m.setAll(scaleX, skewX, transX, skewY, scaleY, transY, pers0, pers1, pers2);
+ return m;
+ }
+
+ /** \enum SkMatrix::TypeMask
+ Enum of bit fields for mask returned by getType().
+ Used to identify the complexity of SkMatrix, to optimize performance.
+ */
+ enum TypeMask {
+ kIdentity_Mask = 0, //!< identity SkMatrix; all bits clear
+ kTranslate_Mask = 0x01, //!< translation SkMatrix
+ kScale_Mask = 0x02, //!< scale SkMatrix
+ kAffine_Mask = 0x04, //!< skew or rotate SkMatrix
+ kPerspective_Mask = 0x08, //!< perspective SkMatrix
+ };
+
+ /** Returns a bit field describing the transformations the matrix may
+ perform. The bit field is computed conservatively, so it may include
+ false positives. For example, when kPerspective_Mask is set, all
+ other bits are set.
+
+ @return kIdentity_Mask, or combinations of: kTranslate_Mask, kScale_Mask,
+ kAffine_Mask, kPerspective_Mask
+ */
+ TypeMask getType() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ // only return the public masks
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if SkMatrix is identity. Identity matrix is:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ @return true if SkMatrix has no effect
+ */
+ bool isIdentity() const {
+ return this->getType() == 0;
+ }
+
+ /** Returns true if SkMatrix at most scales and translates. SkMatrix may be identity,
+ contain only scale elements, only translate elements, or both. SkMatrix form is:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ @return true if SkMatrix is identity; or scales, translates, or both
+ */
+ bool isScaleTranslate() const {
+ return !(this->getType() & ~(kScale_Mask | kTranslate_Mask));
+ }
+
+ /** Returns true if SkMatrix is identity, or translates. SkMatrix form is:
+
+ | 1 0 translate-x |
+ | 0 1 translate-y |
+ | 0 0 1 |
+
+ @return true if SkMatrix is identity, or translates
+ */
+ bool isTranslate() const { return !(this->getType() & ~(kTranslate_Mask)); }
+
+ /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity,
+ or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all
+ cases, SkMatrix may also have translation. SkMatrix form is either:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ or
+
+ | 0 rotate-x translate-x |
+ | rotate-y 0 translate-y |
+ | 0 0 1 |
+
+ for non-zero values of scale-x, scale-y, rotate-x, and rotate-y.
+
+ Also called preservesAxisAlignment(); use the one that provides better inline
+ documentation.
+
+ @return true if SkMatrix maps one SkRect into another
+ */
+ bool rectStaysRect() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ return (fTypeMask & kRectStaysRect_Mask) != 0;
+ }
+
+ /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity,
+ or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all
+ cases, SkMatrix may also have translation. SkMatrix form is either:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ or
+
+ | 0 rotate-x translate-x |
+ | rotate-y 0 translate-y |
+ | 0 0 1 |
+
+ for non-zero values of scale-x, scale-y, rotate-x, and rotate-y.
+
+ Also called rectStaysRect(); use the one that provides better inline
+ documentation.
+
+ @return true if SkMatrix maps one SkRect into another
+ */
+ bool preservesAxisAlignment() const { return this->rectStaysRect(); }
+
+ /** Returns true if the matrix contains perspective elements. SkMatrix form is:
+
+ | -- -- -- |
+ | -- -- -- |
+ | perspective-x perspective-y perspective-scale |
+
+ where perspective-x or perspective-y is non-zero, or perspective-scale is
+ not one. All other elements may have any value.
+
+ @return true if SkMatrix is in most general form
+ */
+ bool hasPerspective() const {
+ return SkToBool(this->getPerspectiveTypeMaskOnly() &
+ kPerspective_Mask);
+ }
+
+ /** Returns true if SkMatrix contains only translation, rotation, reflection, and
+ uniform scale.
+ Returns false if SkMatrix contains different scales, skewing, perspective, or
+ degenerate forms that collapse to a line or point.
+
+ Describes that the SkMatrix makes rendering with and without the matrix are
+ visually alike; a transformed circle remains a circle. Mathematically, this is
+ referred to as similarity of a Euclidean space, or a similarity transformation.
+
+ Preserves right angles, keeping the arms of the angle equal lengths.
+
+ @param tol to be deprecated
+ @return true if SkMatrix only rotates, uniformly scales, translates
+ */
+ bool isSimilarity(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ /** Returns true if SkMatrix contains only translation, rotation, reflection, and
+ scale. Scale may differ along rotated axes.
+ Returns false if SkMatrix skewing, perspective, or degenerate forms that collapse
+ to a line or point.
+
+ Preserves right angles, but not requiring that the arms of the angle
+ retain equal lengths.
+
+ @param tol to be deprecated
+ @return true if SkMatrix only rotates, scales, translates
+ */
+ bool preservesRightAngles(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ /** SkMatrix organizes its values in row order. These members correspond to
+ each value in SkMatrix.
+ */
+ static constexpr int kMScaleX = 0; //!< horizontal scale factor
+ static constexpr int kMSkewX = 1; //!< horizontal skew factor
+ static constexpr int kMTransX = 2; //!< horizontal translation
+ static constexpr int kMSkewY = 3; //!< vertical skew factor
+ static constexpr int kMScaleY = 4; //!< vertical scale factor
+ static constexpr int kMTransY = 5; //!< vertical translation
+ static constexpr int kMPersp0 = 6; //!< input x perspective factor
+ static constexpr int kMPersp1 = 7; //!< input y perspective factor
+ static constexpr int kMPersp2 = 8; //!< perspective bias
+
+ /** Affine arrays are in column major order to match the matrix used by
+ PDF and XPS.
+ */
+ static constexpr int kAScaleX = 0; //!< horizontal scale factor
+ static constexpr int kASkewY = 1; //!< vertical skew factor
+ static constexpr int kASkewX = 2; //!< horizontal skew factor
+ static constexpr int kAScaleY = 3; //!< vertical scale factor
+ static constexpr int kATransX = 4; //!< horizontal translation
+ static constexpr int kATransY = 5; //!< vertical translation
+
+ /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is
+ defined.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return value corresponding to index
+ */
+ SkScalar operator[](int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is
+ defined.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return value corresponding to index
+ */
+ SkScalar get(int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ /** Returns scale factor multiplied by x-axis input, contributing to x-axis output.
+ With mapPoints(), scales SkPoint along the x-axis.
+
+ @return horizontal scale factor
+ */
+ SkScalar getScaleX() const { return fMat[kMScaleX]; }
+
+ /** Returns scale factor multiplied by y-axis input, contributing to y-axis output.
+ With mapPoints(), scales SkPoint along the y-axis.
+
+ @return vertical scale factor
+ */
+ SkScalar getScaleY() const { return fMat[kMScaleY]; }
+
+ /** Returns scale factor multiplied by x-axis input, contributing to y-axis output.
+ With mapPoints(), skews SkPoint along the y-axis.
+ Skewing both axes can rotate SkPoint.
+
+ @return vertical skew factor
+ */
+ SkScalar getSkewY() const { return fMat[kMSkewY]; }
+
+ /** Returns scale factor multiplied by y-axis input, contributing to x-axis output.
+ With mapPoints(), skews SkPoint along the x-axis.
+ Skewing both axes can rotate SkPoint.
+
+ @return horizontal scale factor
+ */
+ SkScalar getSkewX() const { return fMat[kMSkewX]; }
+
+ /** Returns translation contributing to x-axis output.
+ With mapPoints(), moves SkPoint along the x-axis.
+
+ @return horizontal translation factor
+ */
+ SkScalar getTranslateX() const { return fMat[kMTransX]; }
+
+ /** Returns translation contributing to y-axis output.
+ With mapPoints(), moves SkPoint along the y-axis.
+
+ @return vertical translation factor
+ */
+ SkScalar getTranslateY() const { return fMat[kMTransY]; }
+
+ /** Returns factor scaling input x-axis relative to input y-axis.
+
+ @return input x-axis perspective factor
+ */
+ SkScalar getPerspX() const { return fMat[kMPersp0]; }
+
+ /** Returns factor scaling input y-axis relative to input x-axis.
+
+ @return input y-axis perspective factor
+ */
+ SkScalar getPerspY() const { return fMat[kMPersp1]; }
+
+ /** Returns writable SkMatrix value. Asserts if index is out of range and SK_DEBUG is
+ defined. Clears internal cache anticipating that caller will change SkMatrix value.
+
+ Next call to read SkMatrix state may recompute cache; subsequent writes to SkMatrix
+ value must be followed by dirtyMatrixTypeCache().
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return writable value corresponding to index
+ */
+ SkScalar& operator[](int index) {
+ SkASSERT((unsigned)index < 9);
+ this->setTypeMask(kUnknown_Mask);
+ return fMat[index];
+ }
+
+ /** Sets SkMatrix value. Asserts if index is out of range and SK_DEBUG is
+ defined. Safer than operator[]; internal cache is always maintained.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @param value scalar to store in SkMatrix
+ */
+ SkMatrix& set(int index, SkScalar value) {
+ SkASSERT((unsigned)index < 9);
+ fMat[index] = value;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+ }
+
+ /** Sets horizontal scale factor.
+
+ @param v horizontal scale factor to store
+ */
+ SkMatrix& setScaleX(SkScalar v) { return this->set(kMScaleX, v); }
+
+ /** Sets vertical scale factor.
+
+ @param v vertical scale factor to store
+ */
+ SkMatrix& setScaleY(SkScalar v) { return this->set(kMScaleY, v); }
+
+ /** Sets vertical skew factor.
+
+ @param v vertical skew factor to store
+ */
+ SkMatrix& setSkewY(SkScalar v) { return this->set(kMSkewY, v); }
+
+ /** Sets horizontal skew factor.
+
+ @param v horizontal skew factor to store
+ */
+ SkMatrix& setSkewX(SkScalar v) { return this->set(kMSkewX, v); }
+
+ /** Sets horizontal translation.
+
+ @param v horizontal translation to store
+ */
+ SkMatrix& setTranslateX(SkScalar v) { return this->set(kMTransX, v); }
+
+ /** Sets vertical translation.
+
+ @param v vertical translation to store
+ */
+ SkMatrix& setTranslateY(SkScalar v) { return this->set(kMTransY, v); }
+
+ /** Sets input x-axis perspective factor, which causes mapXY() to vary input x-axis values
+ inversely proportional to input y-axis values.
+
+ @param v perspective factor
+ */
+ SkMatrix& setPerspX(SkScalar v) { return this->set(kMPersp0, v); }
+
+ /** Sets input y-axis perspective factor, which causes mapXY() to vary input y-axis values
+ inversely proportional to input x-axis values.
+
+ @param v perspective factor
+ */
+ SkMatrix& setPerspY(SkScalar v) { return this->set(kMPersp1, v); }
+
+ /** Sets all values from parameters. Sets matrix to:
+
+ | scaleX skewX transX |
+ | skewY scaleY transY |
+ | persp0 persp1 persp2 |
+
+ @param scaleX horizontal scale factor to store
+ @param skewX horizontal skew factor to store
+ @param transX horizontal translation to store
+ @param skewY vertical skew factor to store
+ @param scaleY vertical scale factor to store
+ @param transY vertical translation to store
+ @param persp0 input x-axis values perspective factor to store
+ @param persp1 input y-axis values perspective factor to store
+ @param persp2 perspective scale factor to store
+ */
+ SkMatrix& setAll(SkScalar scaleX, SkScalar skewX, SkScalar transX,
+ SkScalar skewY, SkScalar scaleY, SkScalar transY,
+ SkScalar persp0, SkScalar persp1, SkScalar persp2) {
+ fMat[kMScaleX] = scaleX;
+ fMat[kMSkewX] = skewX;
+ fMat[kMTransX] = transX;
+ fMat[kMSkewY] = skewY;
+ fMat[kMScaleY] = scaleY;
+ fMat[kMTransY] = transY;
+ fMat[kMPersp0] = persp0;
+ fMat[kMPersp1] = persp1;
+ fMat[kMPersp2] = persp2;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+ }
+
+ /** Copies nine scalar values contained by SkMatrix into buffer, in member value
+ ascending order: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2.
+
+ @param buffer storage for nine scalar values
+ */
+ void get9(SkScalar buffer[9]) const {
+ memcpy(buffer, fMat, 9 * sizeof(SkScalar));
+ }
+
+ /** Sets SkMatrix to nine scalar values in buffer, in member value ascending order:
+ kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, kMPersp0, kMPersp1,
+ kMPersp2.
+
+ Sets matrix to:
+
+ | buffer[0] buffer[1] buffer[2] |
+ | buffer[3] buffer[4] buffer[5] |
+ | buffer[6] buffer[7] buffer[8] |
+
+ In the future, set9 followed by get9 may not return the same values. Since SkMatrix
+ maps non-homogeneous coordinates, scaling all nine values produces an equivalent
+ transformation, possibly improving precision.
+
+ @param buffer nine scalar values
+ */
+ SkMatrix& set9(const SkScalar buffer[9]);
+
+ /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ Also called setIdentity(); use the one that provides better inline
+ documentation.
+ */
+ SkMatrix& reset();
+
+ /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ Also called reset(); use the one that provides better inline
+ documentation.
+ */
+ SkMatrix& setIdentity() { return this->reset(); }
+
+ /** Sets SkMatrix to translate by (dx, dy).
+
+ @param dx horizontal translation
+ @param dy vertical translation
+ */
+ SkMatrix& setTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to translate by (v.fX, v.fY).
+
+ @param v vector containing horizontal and vertical translation
+ */
+ SkMatrix& setTranslate(const SkVector& v) { return this->setTranslate(v.fX, v.fY); }
+
+ /** Sets SkMatrix to scale by sx and sy, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to scale by sx and sy about at pivot point at (0, 0).
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& setScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to rotate by degrees about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to rotate by degrees about a pivot point at (0, 0).
+ Positive degrees rotates clockwise.
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& setRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1).
+ Vector length specifies scale.
+
+ @param sinValue rotation vector x-axis component
+ @param cosValue rotation vector y-axis component
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue,
+ SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (0, 0).
+
+ Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1).
+ Vector length specifies scale.
+
+ @param sinValue rotation vector x-axis component
+ @param cosValue rotation vector y-axis component
+ */
+ SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue);
+
+ /** Sets SkMatrix to rotate, scale, and translate using a compressed matrix form.
+
+ Vector (rsxForm.fSSin, rsxForm.fSCos) describes the angle of rotation relative
+ to (0, 1). Vector length specifies scale. Mapped point is rotated and scaled
+ by vector, then translated by (rsxForm.fTx, rsxForm.fTy).
+
+ @param rsxForm compressed SkRSXform matrix
+ @return reference to SkMatrix
+ */
+ SkMatrix& setRSXform(const SkRSXform& rsxForm);
+
+ /** Sets SkMatrix to skew by kx and ky, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to skew by kx and ky, about a pivot point at (0, 0).
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& setSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix a multiplied by SkMatrix b. Either a or b may be this.
+
+ Given:
+
+ | A B C | | J K L |
+ a = | D E F |, b = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param a SkMatrix on left side of multiply expression
+ @param b SkMatrix on right side of multiply expression
+ */
+ SkMatrix& setConcat(const SkMatrix& a, const SkMatrix& b);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from translation (dx, dy).
+ This can be thought of as moving the point to be mapped before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 0 dx |
+ Matrix = | D E F |, T(dx, dy) = | 0 1 dy |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | 1 0 dx | | A B A*dx+B*dy+C |
+ Matrix * T(dx, dy) = | D E F | | 0 1 dy | = | D E D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G H G*dx+H*dy+I |
+
+ @param dx x-axis translation before applying SkMatrix
+ @param dy y-axis translation before applying SkMatrix
+ */
+ SkMatrix& preTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy)
+ about pivot point (px, py).
+ This can be thought of as scaling about a pivot point before applying SkMatrix.
+
+ Given:
+
+ | A B C | | sx 0 dx |
+ Matrix = | D E F |, S(sx, sy, px, py) = | 0 sy dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ dx = px - sx * px
+ dy = py - sy * py
+
+ sets SkMatrix to:
+
+ | A B C | | sx 0 dx | | A*sx B*sy A*dx+B*dy+C |
+ Matrix * S(sx, sy, px, py) = | D E F | | 0 sy dy | = | D*sx E*sy D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G*sx H*sy G*dx+H*dy+I |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy)
+ about pivot point (0, 0).
+ This can be thought of as scaling about the origin before applying SkMatrix.
+
+ Given:
+
+ | A B C | | sx 0 0 |
+ Matrix = | D E F |, S(sx, sy) = | 0 sy 0 |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | sx 0 0 | | A*sx B*sy C |
+ Matrix * S(sx, sy) = | D E F | | 0 sy 0 | = | D*sx E*sy F |
+ | G H I | | 0 0 1 | | G*sx H*sy I |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& preScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees
+ about pivot point (px, py).
+ This can be thought of as rotating about a pivot point before applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | A B C | | c -s dx |
+ Matrix = | D E F |, R(degrees, px, py) = | s c dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+ dx = s * py + (1 - c) * px
+ dy = -s * px + (1 - c) * py
+
+ sets SkMatrix to:
+
+ | A B C | | c -s dx | | Ac+Bs -As+Bc A*dx+B*dy+C |
+ Matrix * R(degrees, px, py) = | D E F | | s c dy | = | Dc+Es -Ds+Ec D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc G*dx+H*dy+I |
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees
+ about pivot point (0, 0).
+ This can be thought of as rotating about the origin before applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | A B C | | c -s 0 |
+ Matrix = | D E F |, R(degrees, px, py) = | s c 0 |
+ | G H I | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+
+ sets SkMatrix to:
+
+ | A B C | | c -s 0 | | Ac+Bs -As+Bc C |
+ Matrix * R(degrees, px, py) = | D E F | | s c 0 | = | Dc+Es -Ds+Ec F |
+ | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc I |
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& preRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky)
+ about pivot point (px, py).
+ This can be thought of as skewing about a pivot point before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 kx dx |
+ Matrix = | D E F |, K(kx, ky, px, py) = | ky 1 dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ dx = -kx * py
+ dy = -ky * px
+
+ sets SkMatrix to:
+
+ | A B C | | 1 kx dx | | A+B*ky A*kx+B A*dx+B*dy+C |
+ Matrix * K(kx, ky, px, py) = | D E F | | ky 1 dy | = | D+E*ky D*kx+E D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G+H*ky G*kx+H G*dx+H*dy+I |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky)
+ about pivot point (0, 0).
+ This can be thought of as skewing about the origin before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 kx 0 |
+ Matrix = | D E F |, K(kx, ky) = | ky 1 0 |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | 1 kx 0 | | A+B*ky A*kx+B C |
+ Matrix * K(kx, ky) = | D E F | | ky 1 0 | = | D+E*ky D*kx+E F |
+ | G H I | | 0 0 1 | | G+H*ky G*kx+H I |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& preSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix other.
+ This can be thought of mapping by other before applying SkMatrix.
+
+ Given:
+
+ | A B C | | J K L |
+ Matrix = | D E F |, other = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ Matrix * other = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param other SkMatrix on right side of multiply expression
+ */
+ SkMatrix& preConcat(const SkMatrix& other);
+
+ /** Sets SkMatrix to SkMatrix constructed from translation (dx, dy) multiplied by SkMatrix.
+ This can be thought of as moving the point to be mapped after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 0 dx |
+ Matrix = | M N O |, T(dx, dy) = | 0 1 dy |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | 1 0 dx | | J K L | | J+dx*P K+dx*Q L+dx*R |
+ T(dx, dy) * Matrix = | 0 1 dy | | M N O | = | M+dy*P N+dy*Q O+dy*R |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param dx x-axis translation after applying SkMatrix
+ @param dy y-axis translation after applying SkMatrix
+ */
+ SkMatrix& postTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as scaling about a pivot point after applying SkMatrix.
+
+ Given:
+
+ | J K L | | sx 0 dx |
+ Matrix = | M N O |, S(sx, sy, px, py) = | 0 sy dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ dx = px - sx * px
+ dy = py - sy * py
+
+ sets SkMatrix to:
+
+ | sx 0 dx | | J K L | | sx*J+dx*P sx*K+dx*Q sx*L+dx+R |
+ S(sx, sy, px, py) * Matrix = | 0 sy dy | | M N O | = | sy*M+dy*P sy*N+dy*Q sy*O+dy*R |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as scaling about the origin after applying SkMatrix.
+
+ Given:
+
+ | J K L | | sx 0 0 |
+ Matrix = | M N O |, S(sx, sy) = | 0 sy 0 |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | sx 0 0 | | J K L | | sx*J sx*K sx*L |
+ S(sx, sy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& postScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to SkMatrix constructed from scaling by (1/divx, 1/divy),
+ about pivot point (px, py), multiplied by SkMatrix.
+
+ Returns false if either divx or divy is zero.
+
+ Given:
+
+ | J K L | | sx 0 0 |
+ Matrix = | M N O |, I(divx, divy) = | 0 sy 0 |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ sx = 1 / divx
+ sy = 1 / divy
+
+ sets SkMatrix to:
+
+ | sx 0 0 | | J K L | | sx*J sx*K sx*L |
+ I(divx, divy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param divx integer divisor for inverse scale in x
+ @param divy integer divisor for inverse scale in y
+ @return true on successful scale
+ */
+ bool postIDiv(int divx, int divy);
+
+ /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as rotating about a pivot point after applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | J K L | | c -s dx |
+ Matrix = | M N O |, R(degrees, px, py) = | s c dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+ dx = s * py + (1 - c) * px
+ dy = -s * px + (1 - c) * py
+
+ sets SkMatrix to:
+
+ |c -s dx| |J K L| |cJ-sM+dx*P cK-sN+dx*Q cL-sO+dx+R|
+ R(degrees, px, py) * Matrix = |s c dy| |M N O| = |sJ+cM+dy*P sK+cN+dy*Q sL+cO+dy*R|
+ |0 0 1| |P Q R| | P Q R|
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as rotating about the origin after applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | J K L | | c -s 0 |
+ Matrix = | M N O |, R(degrees, px, py) = | s c 0 |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+
+ sets SkMatrix to:
+
+ | c -s dx | | J K L | | cJ-sM cK-sN cL-sO |
+ R(degrees, px, py) * Matrix = | s c dy | | M N O | = | sJ+cM sK+cN sL+cO |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& postRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as skewing about a pivot point after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 kx dx |
+ Matrix = | M N O |, K(kx, ky, px, py) = | ky 1 dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ dx = -kx * py
+ dy = -ky * px
+
+ sets SkMatrix to:
+
+ | 1 kx dx| |J K L| |J+kx*M+dx*P K+kx*N+dx*Q L+kx*O+dx+R|
+ K(kx, ky, px, py) * Matrix = |ky 1 dy| |M N O| = |ky*J+M+dy*P ky*K+N+dy*Q ky*L+O+dy*R|
+ | 0 0 1| |P Q R| | P Q R|
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as skewing about the origin after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 kx 0 |
+ Matrix = | M N O |, K(kx, ky) = | ky 1 0 |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | 1 kx 0 | | J K L | | J+kx*M K+kx*N L+kx*O |
+ K(kx, ky) * Matrix = | ky 1 0 | | M N O | = | ky*J+M ky*K+N ky*L+O |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& postSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix other multiplied by SkMatrix.
+ This can be thought of mapping by other after applying SkMatrix.
+
+ Given:
+
+ | J K L | | A B C |
+ Matrix = | M N O |, other = | D E F |
+ | P Q R | | G H I |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ other * Matrix = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param other SkMatrix on left side of multiply expression
+ */
+ SkMatrix& postConcat(const SkMatrix& other);
+
+ /** \enum SkMatrix::ScaleToFit
+ ScaleToFit describes how SkMatrix is constructed to map one SkRect to another.
+ ScaleToFit may allow SkMatrix to have unequal horizontal and vertical scaling,
+ or may restrict SkMatrix to square scaling. If restricted, ScaleToFit specifies
+ how SkMatrix maps to the side or center of the destination SkRect.
+ */
+ enum ScaleToFit {
+ kFill_ScaleToFit, //!< scales in x and y to fill destination SkRect
+ kStart_ScaleToFit, //!< scales and aligns to left and top
+ kCenter_ScaleToFit, //!< scales and aligns to center
+ kEnd_ScaleToFit, //!< scales and aligns to right and bottom
+ };
+
+ /** Sets SkMatrix to scale and translate src SkRect to dst SkRect. stf selects whether
+ mapping completely fills dst or preserves the aspect ratio, and how to align
+ src within dst. Returns false if src is empty, and sets SkMatrix to identity.
+ Returns true if dst is empty, and sets SkMatrix to:
+
+ | 0 0 0 |
+ | 0 0 0 |
+ | 0 0 1 |
+
+ @param src SkRect to map from
+ @param dst SkRect to map to
+ @param stf one of: kFill_ScaleToFit, kStart_ScaleToFit,
+ kCenter_ScaleToFit, kEnd_ScaleToFit
+ @return true if SkMatrix can represent SkRect mapping
+ */
+ bool setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf);
+
+ /** Returns SkMatrix set to scale and translate src SkRect to dst SkRect. stf selects
+ whether mapping completely fills dst or preserves the aspect ratio, and how to
+ align src within dst. Returns the identity SkMatrix if src is empty. If dst is
+ empty, returns SkMatrix set to:
+
+ | 0 0 0 |
+ | 0 0 0 |
+ | 0 0 1 |
+
+ @param src SkRect to map from
+ @param dst SkRect to map to
+ @param stf one of: kFill_ScaleToFit, kStart_ScaleToFit,
+ kCenter_ScaleToFit, kEnd_ScaleToFit
+ @return SkMatrix mapping src to dst
+ */
+ static SkMatrix MakeRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf) {
+ SkMatrix m;
+ m.setRectToRect(src, dst, stf);
+ return m;
+ }
+
+ /** Sets SkMatrix to map src to dst. count must be zero or greater, and four or less.
+
+ If count is zero, sets SkMatrix to identity and returns true.
+ If count is one, sets SkMatrix to translate and returns true.
+ If count is two or more, sets SkMatrix to map SkPoint if possible; returns false
+ if SkMatrix cannot be constructed. If count is four, SkMatrix may include
+ perspective.
+
+ @param src SkPoint to map from
+ @param dst SkPoint to map to
+ @param count number of SkPoint in src and dst
+ @return true if SkMatrix was constructed successfully
+ */
+ bool setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count);
+
+ /** Sets inverse to reciprocal matrix, returning true if SkMatrix can be inverted.
+ Geometrically, if SkMatrix maps from source to destination, inverse SkMatrix
+ maps from destination to source. If SkMatrix can not be inverted, inverse is
+ unchanged.
+
+ @param inverse storage for inverted SkMatrix; may be nullptr
+ @return true if SkMatrix can be inverted
+ */
+ bool SK_WARN_UNUSED_RESULT invert(SkMatrix* inverse) const {
+ // Allow the trivial case to be inlined.
+ if (this->isIdentity()) {
+ if (inverse) {
+ inverse->reset();
+ }
+ return true;
+ }
+ return this->invertNonIdentity(inverse);
+ }
+
+ /** Fills affine with identity values in column major order.
+ Sets affine to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+
+ Affine 3 by 2 matrices in column major order are used by OpenGL and XPS.
+
+ @param affine storage for 3 by 2 affine matrix
+ */
+ static void SetAffineIdentity(SkScalar affine[6]);
+
+ /** Fills affine in column major order. Sets affine to:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+
+ If SkMatrix contains perspective, returns false and leaves affine unchanged.
+
+ @param affine storage for 3 by 2 affine matrix; may be nullptr
+ @return true if SkMatrix does not contain perspective
+ */
+ bool SK_WARN_UNUSED_RESULT asAffine(SkScalar affine[6]) const;
+
+ /** Sets SkMatrix to affine values, passed in column major order. Given affine,
+ column, then row, as:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+
+ SkMatrix is set, row, then column, to:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+ | 0 0 1 |
+
+ @param affine 3 by 2 affine matrix
+ */
+ SkMatrix& setAffine(const SkScalar affine[6]);
+
+ /** Maps src SkPoint array of length count to dst SkPoint array of equal or greater
+ length. SkPoint are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = src[i].fX
+ y = src[i].fY
+ }
+
+ each dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ src and dst may point to the same storage.
+
+ @param dst storage for mapped SkPoint
+ @param src SkPoint to transform
+ @param count number of SkPoint to transform
+ */
+ void mapPoints(SkPoint dst[], const SkPoint src[], int count) const;
+
+ /** Maps pts SkPoint array of length count in place. SkPoint are mapped by multiplying
+ each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = pts[i].fX
+ y = pts[i].fY
+ }
+
+ each resulting pts SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param pts storage for mapped SkPoint
+ @param count number of SkPoint to transform
+ */
+ void mapPoints(SkPoint pts[], int count) const {
+ this->mapPoints(pts, pts, count);
+ }
+
+ /** Maps src SkPoint3 array of length count to dst SkPoint3 array, which must of length count or
+ greater. SkPoint3 array is mapped by multiplying each SkPoint3 by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, src = | y |
+ | G H I | | z |
+
+ each resulting dst SkPoint is computed as:
+
+ |A B C| |x|
+ Matrix * src = |D E F| |y| = |Ax+By+Cz Dx+Ey+Fz Gx+Hy+Iz|
+ |G H I| |z|
+
+ @param dst storage for mapped SkPoint3 array
+ @param src SkPoint3 array to transform
+ @param count items in SkPoint3 array to transform
+ */
+ void mapHomogeneousPoints(SkPoint3 dst[], const SkPoint3 src[], int count) const;
+
+ /** Maps SkPoint (x, y) to result. SkPoint is mapped by multiplying by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param x x-axis value of SkPoint to map
+ @param y y-axis value of SkPoint to map
+ @param result storage for mapped SkPoint
+ */
+ void mapXY(SkScalar x, SkScalar y, SkPoint* result) const;
+
+ /** Returns SkPoint (x, y) multiplied by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param x x-axis value of SkPoint to map
+ @param y y-axis value of SkPoint to map
+ @return mapped SkPoint
+ */
+ SkPoint mapXY(SkScalar x, SkScalar y) const {
+ SkPoint result;
+ this->mapXY(x,y, &result);
+ return result;
+ }
+
+ /** Maps src vector array of length count to vector SkPoint array of equal or greater
+ length. Vectors are mapped by multiplying each vector by SkMatrix, treating
+ SkMatrix translation as zero. Given:
+
+ | A B 0 | | x |
+ Matrix = | D E 0 |, src = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = src[i].fX
+ y = src[i].fY
+ }
+
+ each dst vector is computed as:
+
+ |A B 0| |x| Ax+By Dx+Ey
+ Matrix * src = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ src and dst may point to the same storage.
+
+ @param dst storage for mapped vectors
+ @param src vectors to transform
+ @param count number of vectors to transform
+ */
+ void mapVectors(SkVector dst[], const SkVector src[], int count) const;
+
+ /** Maps vecs vector array of length count in place, multiplying each vector by
+ SkMatrix, treating SkMatrix translation as zero. Given:
+
+ | A B 0 | | x |
+ Matrix = | D E 0 |, vec = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = vecs[i].fX
+ y = vecs[i].fY
+ }
+
+ each result vector is computed as:
+
+ |A B 0| |x| Ax+By Dx+Ey
+ Matrix * vec = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param vecs vectors to transform, and storage for mapped vectors
+ @param count number of vectors to transform
+ */
+ void mapVectors(SkVector vecs[], int count) const {
+ this->mapVectors(vecs, vecs, count);
+ }
+
+ /** Maps vector (dx, dy) to result. Vector is mapped by multiplying by SkMatrix,
+ treating SkMatrix translation as zero. Given:
+
+ | A B 0 | | dx |
+ Matrix = | D E 0 |, vec = | dy |
+ | G H I | | 1 |
+
+ each result vector is computed as:
+
+ |A B 0| |dx| A*dx+B*dy D*dx+E*dy
+ Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , -----------
+ |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I
+
+ @param dx x-axis value of vector to map
+ @param dy y-axis value of vector to map
+ @param result storage for mapped vector
+ */
+ void mapVector(SkScalar dx, SkScalar dy, SkVector* result) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(result, &vec, 1);
+ }
+
+ /** Returns vector (dx, dy) multiplied by SkMatrix, treating SkMatrix translation as zero.
+ Given:
+
+ | A B 0 | | dx |
+ Matrix = | D E 0 |, vec = | dy |
+ | G H I | | 1 |
+
+ each result vector is computed as:
+
+ |A B 0| |dx| A*dx+B*dy D*dx+E*dy
+ Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , -----------
+ |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I
+
+ @param dx x-axis value of vector to map
+ @param dy y-axis value of vector to map
+ @return mapped vector
+ */
+ SkVector mapVector(SkScalar dx, SkScalar dy) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(&vec, &vec, 1);
+ return vec;
+ }
+
+ /** Sets dst to bounds of src corners mapped by SkMatrix.
+ Returns true if mapped corners are dst corners.
+
+ Returned value is the same as calling rectStaysRect().
+
+ @param dst storage for bounds of mapped SkPoint
+ @param src SkRect to map
+ @return true if dst is equivalent to mapped src
+ */
+ bool mapRect(SkRect* dst, const SkRect& src) const;
+
+ /** Sets rect to bounds of rect corners mapped by SkMatrix.
+ Returns true if mapped corners are computed rect corners.
+
+ Returned value is the same as calling rectStaysRect().
+
+ @param rect rectangle to map, and storage for bounds of mapped corners
+ @return true if result is equivalent to mapped rect
+ */
+ bool mapRect(SkRect* rect) const {
+ return this->mapRect(rect, *rect);
+ }
+
+ /** Returns bounds of src corners mapped by SkMatrix.
+
+ @param src rectangle to map
+ @return mapped bounds
+ */
+ SkRect mapRect(const SkRect& src) const {
+ SkRect dst;
+ (void)this->mapRect(&dst, src);
+ return dst;
+ }
+
+ /** Maps four corners of rect to dst. SkPoint are mapped by multiplying each
+ rect corner by SkMatrix. rect corner is processed in this order:
+ (rect.fLeft, rect.fTop), (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom),
+ (rect.fLeft, rect.fBottom).
+
+ rect may be empty: rect.fLeft may be greater than or equal to rect.fRight;
+ rect.fTop may be greater than or equal to rect.fBottom.
+
+ Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where pt is initialized from each of (rect.fLeft, rect.fTop),
+ (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom), (rect.fLeft, rect.fBottom),
+ each dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param dst storage for mapped corner SkPoint
+ @param rect SkRect to map
+ */
+ void mapRectToQuad(SkPoint dst[4], const SkRect& rect) const {
+ // This could potentially be faster if we only transformed each x and y of the rect once.
+ rect.toQuad(dst);
+ this->mapPoints(dst, 4);
+ }
+
+ /** Sets dst to bounds of src corners mapped by SkMatrix. If matrix contains
+ elements other than scale or translate: asserts if SK_DEBUG is defined;
+ otherwise, results are undefined.
+
+ @param dst storage for bounds of mapped SkPoint
+ @param src SkRect to map
+ */
+ void mapRectScaleTranslate(SkRect* dst, const SkRect& src) const;
+
+ /** Returns geometric mean radius of ellipse formed by constructing circle of
+ size radius, and mapping constructed circle with SkMatrix. The result squared is
+ equal to the major axis length times the minor axis length.
+ Result is not meaningful if SkMatrix contains perspective elements.
+
+ @param radius circle size to map
+ @return average mapped radius
+ */
+ SkScalar mapRadius(SkScalar radius) const;
+
+ /** Returns true if a unit step on x-axis at some y-axis value mapped through SkMatrix
+ can be represented by a constant vector. Returns true if getType() returns
+ kIdentity_Mask, or combinations of: kTranslate_Mask, kScale_Mask, and kAffine_Mask.
+
+ May return true if getType() returns kPerspective_Mask, but only when SkMatrix
+ does not include rotation or skewing along the y-axis.
+
+ @return true if SkMatrix does not have complex perspective
+ */
+ bool isFixedStepInX() const;
+
+ /** Returns vector representing a unit step on x-axis at y mapped through SkMatrix.
+ If isFixedStepInX() is false, returned value is undefined.
+
+ @param y position of line parallel to x-axis
+ @return vector advance of mapped unit step on x-axis
+ */
+ SkVector fixedStepInX(SkScalar y) const;
+
+ /** Returns true if SkMatrix equals m, using an efficient comparison.
+
+ Returns false when the sign of zero values is the different; when one
+ matrix has positive zero value and the other has negative zero value.
+
+ Returns true even when both SkMatrix contain NaN.
+
+ NaN never equals any value, including itself. To improve performance, NaN values
+ are treated as bit patterns that are equal if their bit patterns are equal.
+
+ @param m SkMatrix to compare
+ @return true if m and SkMatrix are represented by identical bit patterns
+ */
+ bool cheapEqualTo(const SkMatrix& m) const {
+ return 0 == memcmp(fMat, m.fMat, sizeof(fMat));
+ }
+
+ /** Compares a and b; returns true if a and b are numerically equal. Returns true
+ even if sign of zero values are different. Returns false if either SkMatrix
+ contains NaN, even if the other SkMatrix also contains NaN.
+
+ @param a SkMatrix to compare
+ @param b SkMatrix to compare
+ @return true if SkMatrix a and SkMatrix b are numerically equal
+ */
+ friend SK_API bool operator==(const SkMatrix& a, const SkMatrix& b);
+
+ /** Compares a and b; returns true if a and b are not numerically equal. Returns false
+ even if sign of zero values are different. Returns true if either SkMatrix
+ contains NaN, even if the other SkMatrix also contains NaN.
+
+ @param a SkMatrix to compare
+ @param b SkMatrix to compare
+ @return true if SkMatrix a and SkMatrix b are numerically not equal
+ */
+ friend SK_API bool operator!=(const SkMatrix& a, const SkMatrix& b) {
+ return !(a == b);
+ }
+
+ /** Writes text representation of SkMatrix to standard output. Floating point values
+ are written with limited precision; it may not be possible to reconstruct
+ original SkMatrix from output.
+ */
+ void dump() const;
+
+ /** Returns the minimum scaling factor of SkMatrix by decomposing the scaling and
+ skewing elements.
+ Returns -1 if scale factor overflows or SkMatrix contains perspective.
+
+ @return minimum scale factor
+ */
+ SkScalar getMinScale() const;
+
+ /** Returns the maximum scaling factor of SkMatrix by decomposing the scaling and
+ skewing elements.
+ Returns -1 if scale factor overflows or SkMatrix contains perspective.
+
+ @return maximum scale factor
+ */
+ SkScalar getMaxScale() const;
+
+ /** Sets scaleFactors[0] to the minimum scaling factor, and scaleFactors[1] to the
+ maximum scaling factor. Scaling factors are computed by decomposing
+ the SkMatrix scaling and skewing elements.
+
+ Returns true if scaleFactors are found; otherwise, returns false and sets
+ scaleFactors to undefined values.
+
+ @param scaleFactors storage for minimum and maximum scale factors
+ @return true if scale factors were computed correctly
+ */
+ bool SK_WARN_UNUSED_RESULT getMinMaxScales(SkScalar scaleFactors[2]) const;
+
+ /** Decomposes SkMatrix into scale components and whatever remains. Returns false if
+ SkMatrix could not be decomposed.
+
+ Sets scale to portion of SkMatrix that scale axes. Sets remaining to SkMatrix
+ with scaling factored out. remaining may be passed as nullptr
+ to determine if SkMatrix can be decomposed without computing remainder.
+
+ Returns true if scale components are found. scale and remaining are
+ unchanged if SkMatrix contains perspective; scale factors are not finite, or
+ are nearly zero.
+
+ On success: Matrix = Remaining * scale.
+
+ @param scale axes scaling factors; may be nullptr
+ @param remaining SkMatrix without scaling; may be nullptr
+ @return true if scale can be computed
+ */
+ bool decomposeScale(SkSize* scale, SkMatrix* remaining = nullptr) const;
+
+ /** Returns reference to const identity SkMatrix. Returned SkMatrix is set to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ @return const identity SkMatrix
+ */
+ static const SkMatrix& I();
+
+ /** Returns reference to a const SkMatrix with invalid values. Returned SkMatrix is set
+ to:
+
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+
+ @return const invalid SkMatrix
+ */
+ static const SkMatrix& InvalidMatrix();
+
+ /** Returns SkMatrix a multiplied by SkMatrix b.
+
+ Given:
+
+ | A B C | | J K L |
+ a = | D E F |, b = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param a SkMatrix on left side of multiply expression
+ @param b SkMatrix on right side of multiply expression
+ @return SkMatrix computed from a times b
+ */
+ static SkMatrix Concat(const SkMatrix& a, const SkMatrix& b) {
+ SkMatrix result;
+ result.setConcat(a, b);
+ return result;
+ }
+
+ /** Sets internal cache to unknown state. Use to force update after repeated
+ modifications to SkMatrix element reference returned by operator[](int index).
+ */
+ void dirtyMatrixTypeCache() {
+ this->setTypeMask(kUnknown_Mask);
+ }
+
+ /** Initializes SkMatrix with scale and translate elements.
+
+ | sx 0 tx |
+ | 0 sy ty |
+ | 0 0 1 |
+
+ @param sx horizontal scale factor to store
+ @param sy vertical scale factor to store
+ @param tx horizontal translation to store
+ @param ty vertical translation to store
+ */
+ void setScaleTranslate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) {
+ fMat[kMScaleX] = sx;
+ fMat[kMSkewX] = 0;
+ fMat[kMTransX] = tx;
+
+ fMat[kMSkewY] = 0;
+ fMat[kMScaleY] = sy;
+ fMat[kMTransY] = ty;
+
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ unsigned mask = 0;
+ if (sx != 1 || sy != 1) {
+ mask |= kScale_Mask;
+ }
+ if (tx || ty) {
+ mask |= kTranslate_Mask;
+ }
+ this->setTypeMask(mask | kRectStaysRect_Mask);
+ }
+
+ /** Returns true if all elements of the matrix are finite. Returns false if any
+ element is infinity, or NaN.
+
+ @return true if matrix has only finite elements
+ */
+ bool isFinite() const { return SkScalarsAreFinite(fMat, 9); }
+
+private:
+ /** Set if the matrix will map a rectangle to another rectangle. This
+ can be true if the matrix is scale-only, or rotates a multiple of
+ 90 degrees.
+
+ This bit will be set on identity matrices
+ */
+ static constexpr int kRectStaysRect_Mask = 0x10;
+
+ /** Set if the perspective bit is valid even though the rest of
+ the matrix is Unknown.
+ */
+ static constexpr int kOnlyPerspectiveValid_Mask = 0x40;
+
+ static constexpr int kUnknown_Mask = 0x80;
+
+ static constexpr int kORableMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask;
+
+ static constexpr int kAllMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask |
+ kRectStaysRect_Mask;
+
+ SkScalar fMat[9];
+ mutable uint32_t fTypeMask;
+
+ constexpr SkMatrix(SkScalar sx, SkScalar kx, SkScalar tx,
+ SkScalar ky, SkScalar sy, SkScalar ty,
+ SkScalar p0, SkScalar p1, SkScalar p2, uint32_t typeMask)
+ : fMat{sx, kx, tx,
+ ky, sy, ty,
+ p0, p1, p2}
+ , fTypeMask(typeMask) {}
+
+ static void ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp);
+
+ uint8_t computeTypeMask() const;
+ uint8_t computePerspectiveTypeMask() const;
+
+ void setTypeMask(int mask) {
+ // allow kUnknown or a valid mask
+ SkASSERT(kUnknown_Mask == mask || (mask & kAllMasks) == mask ||
+ ((kUnknown_Mask | kOnlyPerspectiveValid_Mask) & mask)
+ == (kUnknown_Mask | kOnlyPerspectiveValid_Mask));
+ fTypeMask = SkToU8(mask);
+ }
+
+ void orTypeMask(int mask) {
+ SkASSERT((mask & kORableMasks) == mask);
+ fTypeMask = SkToU8(fTypeMask | mask);
+ }
+
+ void clearTypeMask(int mask) {
+ // only allow a valid mask
+ SkASSERT((mask & kAllMasks) == mask);
+ fTypeMask = fTypeMask & ~mask;
+ }
+
+ TypeMask getPerspectiveTypeMaskOnly() const {
+ if ((fTypeMask & kUnknown_Mask) &&
+ !(fTypeMask & kOnlyPerspectiveValid_Mask)) {
+ fTypeMask = this->computePerspectiveTypeMask();
+ }
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if we already know that the matrix is identity;
+ false otherwise.
+ */
+ bool isTriviallyIdentity() const {
+ if (fTypeMask & kUnknown_Mask) {
+ return false;
+ }
+ return ((fTypeMask & 0xF) == 0);
+ }
+
+ inline void updateTranslateMask() {
+ if ((fMat[kMTransX] != 0) | (fMat[kMTransY] != 0)) {
+ fTypeMask |= kTranslate_Mask;
+ } else {
+ fTypeMask &= ~kTranslate_Mask;
+ }
+ }
+
+ typedef void (*MapXYProc)(const SkMatrix& mat, SkScalar x, SkScalar y,
+ SkPoint* result);
+
+ static MapXYProc GetMapXYProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapXYProcs[mask & kAllMasks];
+ }
+
+ MapXYProc getMapXYProc() const {
+ return GetMapXYProc(this->getType());
+ }
+
+ typedef void (*MapPtsProc)(const SkMatrix& mat, SkPoint dst[],
+ const SkPoint src[], int count);
+
+ static MapPtsProc GetMapPtsProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapPtsProcs[mask & kAllMasks];
+ }
+
+ MapPtsProc getMapPtsProc() const {
+ return GetMapPtsProc(this->getType());
+ }
+
+ bool SK_WARN_UNUSED_RESULT invertNonIdentity(SkMatrix* inverse) const;
+
+ static bool Poly2Proc(const SkPoint[], SkMatrix*);
+ static bool Poly3Proc(const SkPoint[], SkMatrix*);
+ static bool Poly4Proc(const SkPoint[], SkMatrix*);
+
+ static void Identity_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Trans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Scale_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void ScaleTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Rot_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void RotTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Persp_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+
+ static const MapXYProc gMapXYProcs[];
+
+ static void Identity_pts(const SkMatrix&, SkPoint[], const SkPoint[], int);
+ static void Trans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void Scale_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void ScaleTrans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[],
+ int count);
+ static void Persp_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static void Affine_vpts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static const MapPtsProc gMapPtsProcs[];
+
+ // return the number of bytes written, whether or not buffer is null
+ size_t writeToMemory(void* buffer) const;
+ /**
+ * Reads data from the buffer parameter
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ friend class SkPerspIter;
+ friend class SkMatrixPriv;
+ friend class SkReader32;
+ friend class SerializationTest;
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMatrix44.h b/gfx/skia/skia/include/core/SkMatrix44.h
new file mode 100644
index 0000000000..4e17b5f1ba
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMatrix44.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix44_DEFINED
+#define SkMatrix44_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkScalar.h"
+
+#include <atomic>
+#include <cstring>
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+#ifdef SK_MSCALAR_IS_FLOAT
+ #error "can't define MSCALAR both as DOUBLE and FLOAT"
+#endif
+ typedef double SkMScalar;
+
+ static inline double SkFloatToMScalar(float x) {
+ return static_cast<double>(x);
+ }
+ static inline float SkMScalarToFloat(double x) {
+ return static_cast<float>(x);
+ }
+ static inline double SkDoubleToMScalar(double x) {
+ return x;
+ }
+ static inline double SkMScalarToDouble(double x) {
+ return x;
+ }
+ static inline double SkMScalarAbs(double x) {
+ return fabs(x);
+ }
+ static const SkMScalar SK_MScalarPI = 3.141592653589793;
+ static const SkMScalar SK_MScalarNaN = SK_DoubleNaN;
+
+ #define SkMScalarFloor(x) sk_double_floor(x)
+ #define SkMScalarCeil(x) sk_double_ceil(x)
+ #define SkMScalarRound(x) sk_double_round(x)
+
+ #define SkMScalarFloorToInt(x) sk_double_floor2int(x)
+ #define SkMScalarCeilToInt(x) sk_double_ceil2int(x)
+ #define SkMScalarRoundToInt(x) sk_double_round2int(x)
+
+
+#elif defined SK_MSCALAR_IS_FLOAT
+#ifdef SK_MSCALAR_IS_DOUBLE
+ #error "can't define MSCALAR both as DOUBLE and FLOAT"
+#endif
+ typedef float SkMScalar;
+
+ static inline float SkFloatToMScalar(float x) {
+ return x;
+ }
+ static inline float SkMScalarToFloat(float x) {
+ return x;
+ }
+ static inline float SkDoubleToMScalar(double x) {
+ return sk_double_to_float(x);
+ }
+ static inline double SkMScalarToDouble(float x) {
+ return static_cast<double>(x);
+ }
+ static inline float SkMScalarAbs(float x) {
+ return sk_float_abs(x);
+ }
+ static const SkMScalar SK_MScalarPI = 3.14159265f;
+ static const SkMScalar SK_MScalarNaN = SK_FloatNaN;
+
+ #define SkMScalarFloor(x) sk_float_floor(x)
+ #define SkMScalarCeil(x) sk_float_ceil(x)
+ #define SkMScalarRound(x) sk_float_round(x)
+
+ #define SkMScalarFloorToInt(x) sk_float_floor2int(x)
+ #define SkMScalarCeilToInt(x) sk_float_ceil2int(x)
+ #define SkMScalarRoundToInt(x) sk_float_round2int(x)
+
+#endif
+
+#define SkIntToMScalar(n) static_cast<SkMScalar>(n)
+
+#define SkMScalarToScalar(x) SkMScalarToFloat(x)
+#define SkScalarToMScalar(x) SkFloatToMScalar(x)
+
+static const SkMScalar SK_MScalar1 = 1;
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkVector4 {
+ SkScalar fData[4];
+
+ SkVector4() {
+ this->set(0, 0, 0, 1);
+ }
+ SkVector4(const SkVector4& src) {
+ memcpy(fData, src.fData, sizeof(fData));
+ }
+ SkVector4(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ fData[0] = x;
+ fData[1] = y;
+ fData[2] = z;
+ fData[3] = w;
+ }
+
+ SkVector4& operator=(const SkVector4& src) {
+ memcpy(fData, src.fData, sizeof(fData));
+ return *this;
+ }
+
+ bool operator==(const SkVector4& v) const {
+ return fData[0] == v.fData[0] && fData[1] == v.fData[1] &&
+ fData[2] == v.fData[2] && fData[3] == v.fData[3];
+ }
+ bool operator!=(const SkVector4& v) const { return !(*this == v); }
+ bool equals(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ return fData[0] == x && fData[1] == y &&
+ fData[2] == z && fData[3] == w;
+ }
+
+ void set(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ fData[0] = x;
+ fData[1] = y;
+ fData[2] = z;
+ fData[3] = w;
+ }
+};
+
+/** \class SkMatrix44
+
+ The SkMatrix44 class holds a 4x4 matrix.
+
+*/
+class SK_API SkMatrix44 {
+public:
+
+ enum Uninitialized_Constructor {
+ kUninitialized_Constructor
+ };
+ enum Identity_Constructor {
+ kIdentity_Constructor
+ };
+ enum NaN_Constructor {
+ kNaN_Constructor
+ };
+
+ SkMatrix44(Uninitialized_Constructor) {} // ironically, cannot be constexpr
+
+ constexpr SkMatrix44(Identity_Constructor)
+ : fMat{{ 1, 0, 0, 0, },
+ { 0, 1, 0, 0, },
+ { 0, 0, 1, 0, },
+ { 0, 0, 0, 1, }}
+ , fTypeMask(kIdentity_Mask) {}
+
+ SkMatrix44(NaN_Constructor)
+ : fMat{{ SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN },
+ { SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN },
+ { SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN },
+ { SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN }}
+ , fTypeMask(kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask) {}
+
+ constexpr SkMatrix44() : SkMatrix44{kIdentity_Constructor} {}
+
+ SkMatrix44(const SkMatrix44& src) = default;
+
+ SkMatrix44& operator=(const SkMatrix44& src) = default;
+
+ SkMatrix44(const SkMatrix44& a, const SkMatrix44& b) {
+ this->setConcat(a, b);
+ }
+
+ bool operator==(const SkMatrix44& other) const;
+ bool operator!=(const SkMatrix44& other) const {
+ return !(other == *this);
+ }
+
+ /* When converting from SkMatrix44 to SkMatrix, the third row and
+ * column is dropped. When converting from SkMatrix to SkMatrix44
+ * the third row and column remain as identity:
+ * [ a b c ] [ a b 0 c ]
+ * [ d e f ] -> [ d e 0 f ]
+ * [ g h i ] [ 0 0 1 0 ]
+ * [ g h 0 i ]
+ */
+ SkMatrix44(const SkMatrix&);
+ SkMatrix44& operator=(const SkMatrix& src);
+ operator SkMatrix() const;
+
+ /**
+ * Return a reference to a const identity matrix
+ */
+ static const SkMatrix44& I();
+
+ using TypeMask = uint8_t;
+ enum : TypeMask {
+ kIdentity_Mask = 0,
+ kTranslate_Mask = 1 << 0, //!< set if the matrix has translation
+ kScale_Mask = 1 << 1, //!< set if the matrix has any scale != 1
+ kAffine_Mask = 1 << 2, //!< set if the matrix skews or rotates
+ kPerspective_Mask = 1 << 3, //!< set if the matrix is in perspective
+ };
+
+ /**
+ * Returns a bitfield describing the transformations the matrix may
+ * perform. The bitfield is computed conservatively, so it may include
+ * false positives. For example, when kPerspective_Mask is true, all
+ * other bits may be set to true even in the case of a pure perspective
+ * transform.
+ */
+ inline TypeMask getType() const { return fTypeMask; }
+
+ /**
+ * Return true if the matrix is identity.
+ */
+ inline bool isIdentity() const {
+ return kIdentity_Mask == this->getType();
+ }
+
+ /**
+ * Return true if the matrix contains translate or is identity.
+ */
+ inline bool isTranslate() const {
+ return !(this->getType() & ~kTranslate_Mask);
+ }
+
+ /**
+ * Return true if the matrix only contains scale or translate or is identity.
+ */
+ inline bool isScaleTranslate() const {
+ return !(this->getType() & ~(kScale_Mask | kTranslate_Mask));
+ }
+
+ /**
+ * Returns true if the matrix only contains scale or is identity.
+ */
+ inline bool isScale() const {
+ return !(this->getType() & ~kScale_Mask);
+ }
+
+ inline bool hasPerspective() const {
+ return SkToBool(this->getType() & kPerspective_Mask);
+ }
+
+ void setIdentity();
+ inline void reset() { this->setIdentity();}
+
+ /**
+ * get a value from the matrix. The row,col parameters work as follows:
+ * (0, 0) scale-x
+ * (0, 3) translate-x
+ * (3, 0) perspective-x
+ */
+ inline SkMScalar get(int row, int col) const {
+ SkASSERT((unsigned)row <= 3);
+ SkASSERT((unsigned)col <= 3);
+ return fMat[col][row];
+ }
+
+ /**
+ * set a value in the matrix. The row,col parameters work as follows:
+ * (0, 0) scale-x
+ * (0, 3) translate-x
+ * (3, 0) perspective-x
+ */
+ inline void set(int row, int col, SkMScalar value) {
+ SkASSERT((unsigned)row <= 3);
+ SkASSERT((unsigned)col <= 3);
+ fMat[col][row] = value;
+ this->recomputeTypeMask();
+ }
+
+ inline double getDouble(int row, int col) const {
+ return SkMScalarToDouble(this->get(row, col));
+ }
+ inline void setDouble(int row, int col, double value) {
+ this->set(row, col, SkDoubleToMScalar(value));
+ }
+ inline float getFloat(int row, int col) const {
+ return SkMScalarToFloat(this->get(row, col));
+ }
+ inline void setFloat(int row, int col, float value) {
+ this->set(row, col, SkFloatToMScalar(value));
+ }
+
+ /** These methods allow one to efficiently read matrix entries into an
+ * array. The given array must have room for exactly 16 entries. Whenever
+ * possible, they will try to use memcpy rather than an entry-by-entry
+ * copy.
+ *
+ * Col major indicates that consecutive elements of columns will be stored
+ * contiguously in memory. Row major indicates that consecutive elements
+ * of rows will be stored contiguously in memory.
+ */
+ void asColMajorf(float[]) const;
+ void asColMajord(double[]) const;
+ void asRowMajorf(float[]) const;
+ void asRowMajord(double[]) const;
+
+ /** These methods allow one to efficiently set all matrix entries from an
+ * array. The given array must have room for exactly 16 entries. Whenever
+ * possible, they will try to use memcpy rather than an entry-by-entry
+ * copy.
+ *
+ * Col major indicates that input memory will be treated as if consecutive
+ * elements of columns are stored contiguously in memory. Row major
+ * indicates that input memory will be treated as if consecutive elements
+ * of rows are stored contiguously in memory.
+ */
+ void setColMajorf(const float[]);
+ void setColMajord(const double[]);
+ void setRowMajorf(const float[]);
+ void setRowMajord(const double[]);
+
+#ifdef SK_MSCALAR_IS_FLOAT
+ void setColMajor(const SkMScalar data[]) { this->setColMajorf(data); }
+ void setRowMajor(const SkMScalar data[]) { this->setRowMajorf(data); }
+#else
+ void setColMajor(const SkMScalar data[]) { this->setColMajord(data); }
+ void setRowMajor(const SkMScalar data[]) { this->setRowMajord(data); }
+#endif
+
+ /* This sets the top-left of the matrix and clears the translation and
+ * perspective components (with [3][3] set to 1). m_ij is interpreted
+ * as the matrix entry at row = i, col = j. */
+ void set3x3(SkMScalar m_00, SkMScalar m_10, SkMScalar m_20,
+ SkMScalar m_01, SkMScalar m_11, SkMScalar m_21,
+ SkMScalar m_02, SkMScalar m_12, SkMScalar m_22);
+ void set3x3RowMajorf(const float[]);
+
+ void set4x4(SkMScalar m_00, SkMScalar m_10, SkMScalar m_20, SkMScalar m_30,
+ SkMScalar m_01, SkMScalar m_11, SkMScalar m_21, SkMScalar m_31,
+ SkMScalar m_02, SkMScalar m_12, SkMScalar m_22, SkMScalar m_32,
+ SkMScalar m_03, SkMScalar m_13, SkMScalar m_23, SkMScalar m_33);
+
+ void setTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+ void preTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+ void postTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+
+ void setScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+ void preScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+ void postScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+
+ inline void setScale(SkMScalar scale) {
+ this->setScale(scale, scale, scale);
+ }
+ inline void preScale(SkMScalar scale) {
+ this->preScale(scale, scale, scale);
+ }
+ inline void postScale(SkMScalar scale) {
+ this->postScale(scale, scale, scale);
+ }
+
+ void setRotateDegreesAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar degrees) {
+ this->setRotateAbout(x, y, z, degrees * SK_MScalarPI / 180);
+ }
+
+ /** Rotate about the vector [x,y,z]. If that vector is not unit-length,
+ it will be automatically resized.
+ */
+ void setRotateAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians);
+ /** Rotate about the vector [x,y,z]. Does not check the length of the
+ vector, assuming it is unit-length.
+ */
+ void setRotateAboutUnit(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians);
+
+ void setConcat(const SkMatrix44& a, const SkMatrix44& b);
+ inline void preConcat(const SkMatrix44& m) {
+ this->setConcat(*this, m);
+ }
+ inline void postConcat(const SkMatrix44& m) {
+ this->setConcat(m, *this);
+ }
+
+ friend SkMatrix44 operator*(const SkMatrix44& a, const SkMatrix44& b) {
+ return SkMatrix44(a, b);
+ }
+
+ /** If this is invertible, return that in inverse and return true. If it is
+ not invertible, return false and leave the inverse parameter in an
+ unspecified state.
+ */
+ bool invert(SkMatrix44* inverse) const;
+
+ /** Transpose this matrix in place. */
+ void transpose();
+
+ /** Apply the matrix to the src vector, returning the new vector in dst.
+ It is legal for src and dst to point to the same memory.
+ */
+ void mapScalars(const SkScalar src[4], SkScalar dst[4]) const;
+ inline void mapScalars(SkScalar vec[4]) const {
+ this->mapScalars(vec, vec);
+ }
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+ void mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const;
+#elif defined SK_MSCALAR_IS_FLOAT
+ inline void mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const {
+ this->mapScalars(src, dst);
+ }
+#endif
+ inline void mapMScalars(SkMScalar vec[4]) const {
+ this->mapMScalars(vec, vec);
+ }
+
+ friend SkVector4 operator*(const SkMatrix44& m, const SkVector4& src) {
+ SkVector4 dst;
+ m.mapScalars(src.fData, dst.fData);
+ return dst;
+ }
+
+ /**
+ * map an array of [x, y, 0, 1] through the matrix, returning an array
+ * of [x', y', z', w'].
+ *
+ * @param src2 array of [x, y] pairs, with implied z=0 and w=1
+ * @param count number of [x, y] pairs in src2
+ * @param dst4 array of [x', y', z', w'] quads as the output.
+ */
+ void map2(const float src2[], int count, float dst4[]) const;
+ void map2(const double src2[], int count, double dst4[]) const;
+
+ /** Returns true if transformating an axis-aligned square in 2d by this matrix
+ will produce another 2d axis-aligned square; typically means the matrix
+ is a scale with perhaps a 90-degree rotation. A 3d rotation through 90
+ degrees into a perpendicular plane collapses a square to a line, but
+ is still considered to be axis-aligned.
+
+ By default, tolerates very slight error due to float imprecisions;
+ a 90-degree rotation can still end up with 10^-17 of
+ "non-axis-aligned" result.
+ */
+ bool preserves2dAxisAlignment(SkMScalar epsilon = SK_ScalarNearlyZero) const;
+
+ void dump() const;
+
+ double determinant() const;
+
+private:
+ /* This is indexed by [col][row]. */
+ SkMScalar fMat[4][4];
+ TypeMask fTypeMask;
+
+ static constexpr int kAllPublic_Masks = 0xF;
+
+ void as3x4RowMajorf(float[]) const;
+ void set3x4RowMajorf(const float[]);
+
+ SkMScalar transX() const { return fMat[3][0]; }
+ SkMScalar transY() const { return fMat[3][1]; }
+ SkMScalar transZ() const { return fMat[3][2]; }
+
+ SkMScalar scaleX() const { return fMat[0][0]; }
+ SkMScalar scaleY() const { return fMat[1][1]; }
+ SkMScalar scaleZ() const { return fMat[2][2]; }
+
+ SkMScalar perspX() const { return fMat[0][3]; }
+ SkMScalar perspY() const { return fMat[1][3]; }
+ SkMScalar perspZ() const { return fMat[2][3]; }
+
+ void recomputeTypeMask();
+
+ inline void setTypeMask(TypeMask mask) {
+ SkASSERT(0 == (~kAllPublic_Masks & mask));
+ fTypeMask = mask;
+ }
+
+ inline const SkMScalar* values() const { return &fMat[0][0]; }
+
+ friend class SkColorSpace;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMilestone.h b/gfx/skia/skia/include/core/SkMilestone.h
new file mode 100644
index 0000000000..6ac2a14b07
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMilestone.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SK_MILESTONE
+#define SK_MILESTONE 79
+#endif
diff --git a/gfx/skia/skia/include/core/SkMultiPictureDraw.h b/gfx/skia/skia/include/core/SkMultiPictureDraw.h
new file mode 100644
index 0000000000..70c4e16c1f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMultiPictureDraw.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDraw_DEFINED
+#define SkMultiPictureDraw_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/private/SkTDArray.h"
+
+class SkCanvas;
+class SkPaint;
+class SkPicture;
+
+/** \class SkMultiPictureDraw
+
+ The MultiPictureDraw object accepts several picture/canvas pairs and
+ then attempts to optimally draw the pictures into the canvases, sharing
+ as many resources as possible.
+*/
+class SK_API SkMultiPictureDraw {
+public:
+ /**
+ * Create an object to optimize the drawing of multiple pictures.
+ * @param reserve Hint for the number of add calls expected to be issued
+ */
+ SkMultiPictureDraw(int reserve = 0);
+ ~SkMultiPictureDraw() { this->reset(); }
+
+ /**
+ * Add a canvas/picture pair for later rendering.
+ * @param canvas the canvas in which to draw picture
+ * @param picture the picture to draw into canvas
+ * @param matrix if non-NULL, applied to the CTM when drawing
+ * @param paint if non-NULL, draw picture to a temporary buffer
+ * and then apply the paint when the result is drawn
+ */
+ void add(SkCanvas* canvas,
+ const SkPicture* picture,
+ const SkMatrix* matrix = nullptr,
+ const SkPaint* paint = nullptr);
+
+ /**
+ * Perform all the previously added draws. This will reset the state
+ * of this object. If flush is true, all canvases are flushed after
+ * draw.
+ */
+ void draw(bool flush = false);
+
+ /**
+ * Abandon all buffered draws and reset to the initial state.
+ */
+ void reset();
+
+private:
+ struct DrawData {
+ SkCanvas* fCanvas;
+ const SkPicture* fPicture; // reffed
+ SkMatrix fMatrix;
+ SkPaint* fPaint; // owned
+
+ void init(SkCanvas*, const SkPicture*, const SkMatrix*, const SkPaint*);
+ void draw();
+
+ static void Reset(SkTDArray<DrawData>&);
+ };
+
+ SkTDArray<DrawData> fThreadSafeDrawData;
+ SkTDArray<DrawData> fGPUDrawData;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkOverdrawCanvas.h b/gfx/skia/skia/include/core/SkOverdrawCanvas.h
new file mode 100644
index 0000000000..87f581a204
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkOverdrawCanvas.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOverdrawCanvas_DEFINED
+#define SkOverdrawCanvas_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/utils/SkNWayCanvas.h"
+
+/**
+ * Captures all drawing commands. Rather than draw the actual content, this device
+ * increments the alpha channel of each pixel every time it would have been touched
+ * by a draw call. This is useful for detecting overdraw.
+ */
+class SK_API SkOverdrawCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> {
+public:
+ /* Does not take ownership of canvas */
+ SkOverdrawCanvas(SkCanvas*);
+
+ void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override;
+ void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode,
+ const SkPaint&) override;
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint& paint) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int, SkBlendMode, const SkRect*, const SkPaint*) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawImage(const SkImage*, SkScalar, SkScalar, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect&, const SkRect&, const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice&, const SkRect&, const SkPaint*) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar, SkScalar, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect&, const SkRect&, const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice&, const SkRect&,
+ const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawAnnotation(const SkRect&, const char key[], SkData* value) override;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], SkCanvas::QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkPaint*, SrcRectConstraint) override;
+
+private:
+ void drawPosTextCommon(const SkGlyphID[], int, const SkScalar[], int, const SkPoint&,
+ const SkFont&, const SkPaint&);
+
+ inline SkPaint overdrawPaint(const SkPaint& paint);
+
+ SkPaint fPaint;
+
+ typedef SkCanvasVirtualEnforcer<SkNWayCanvas> INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPaint.h b/gfx/skia/skia/include/core/SkPaint.h
new file mode 100644
index 0000000000..36f8751ea3
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPaint.h
@@ -0,0 +1,668 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaint_DEFINED
+#define SkPaint_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTo.h"
+
+class SkColorFilter;
+class SkColorSpace;
+struct SkRect;
+class SkImageFilter;
+class SkMaskFilter;
+class SkPath;
+class SkPathEffect;
+class SkShader;
+
+/** \class SkPaint
+ SkPaint controls options applied when drawing. SkPaint collects all
+ options outside of the SkCanvas clip and SkCanvas matrix.
+
+ Various options apply to strokes and fills, and images.
+
+ SkPaint collects effects and filters that describe single-pass and multiple-pass
+ algorithms that alter the drawing geometry, color, and transparency. For instance,
+ SkPaint does not directly implement dashing or blur, but contains the objects that do so.
+*/
+class SK_API SkPaint {
+public:
+
+ /** Constructs SkPaint with default values.
+
+ @return default initialized SkPaint
+ */
+ SkPaint();
+
+ /** Constructs SkPaint with default values and the given color.
+
+ Sets alpha and RGB used when stroking and filling. The color is four floating
+ point values, unpremultiplied. The color values are interpreted as being in
+ the colorSpace. If colorSpace is nullptr, then color is assumed to be in the
+ sRGB color space.
+
+ @param color unpremultiplied RGBA
+ @param colorSpace SkColorSpace describing the encoding of color
+ @return SkPaint with the given color
+ */
+ explicit SkPaint(const SkColor4f& color, SkColorSpace* colorSpace = nullptr);
+
+ /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter are shared
+ between the original paint and the copy. Objects containing SkRefCnt increment
+ their references by one.
+
+ The referenced objects SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ and SkImageFilter cannot be modified after they are created.
+ This prevents objects with SkRefCnt from being modified once SkPaint refers to them.
+
+ @param paint original to copy
+ @return shallow copy of paint
+ */
+ SkPaint(const SkPaint& paint);
+
+ /** Implements a move constructor to avoid increasing the reference counts
+ of objects referenced by the paint.
+
+ After the call, paint is undefined, and can be safely destructed.
+
+ @param paint original to move
+ @return content of paint
+ */
+ SkPaint(SkPaint&& paint);
+
+ /** Decreases SkPaint SkRefCnt of owned objects: SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter. If the
+ objects containing SkRefCnt go to zero, they are deleted.
+ */
+ ~SkPaint();
+
+ /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter are shared
+ between the original paint and the copy. Objects containing SkRefCnt in the
+ prior destination are decreased by one, and the referenced objects are deleted if the
+ resulting count is zero. Objects containing SkRefCnt in the parameter paint
+ are increased by one. paint is unmodified.
+
+ @param paint original to copy
+ @return content of paint
+ */
+ SkPaint& operator=(const SkPaint& paint);
+
+ /** Moves the paint to avoid increasing the reference counts
+ of objects referenced by the paint parameter. Objects containing SkRefCnt in the
+ prior destination are decreased by one; those objects are deleted if the resulting count
+ is zero.
+
+ After the call, paint is undefined, and can be safely destructed.
+
+ @param paint original to move
+ @return content of paint
+ */
+ SkPaint& operator=(SkPaint&& paint);
+
+ /** Compares a and b, and returns true if a and b are equivalent. May return false
+ if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ or SkImageFilter have identical contents but different pointers.
+
+ @param a SkPaint to compare
+ @param b SkPaint to compare
+ @return true if SkPaint pair are equivalent
+ */
+ SK_API friend bool operator==(const SkPaint& a, const SkPaint& b);
+
+ /** Compares a and b, and returns true if a and b are not equivalent. May return true
+ if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ or SkImageFilter have identical contents but different pointers.
+
+ @param a SkPaint to compare
+ @param b SkPaint to compare
+ @return true if SkPaint pair are not equivalent
+ */
+ friend bool operator!=(const SkPaint& a, const SkPaint& b) {
+ return !(a == b);
+ }
+
+ /** Returns a hash generated from SkPaint values and pointers.
+ Identical hashes guarantee that the paints are
+ equivalent, but differing hashes do not guarantee that the paints have differing
+ contents.
+
+ If operator==(const SkPaint& a, const SkPaint& b) returns true for two paints,
+ their hashes are also equal.
+
+ The hash returned is platform and implementation specific.
+
+ @return a shallow hash
+ */
+ uint32_t getHash() const;
+
+ /** Sets all SkPaint contents to their initial values. This is equivalent to replacing
+ SkPaint with the result of SkPaint().
+ */
+ void reset();
+
+ /** Returns true if pixels on the active edges of SkPath may be drawn with partial transparency.
+ @return antialiasing state
+ */
+ bool isAntiAlias() const {
+ return SkToBool(fBitfields.fAntiAlias);
+ }
+
+ /** Requests, but does not require, that edge pixels draw opaque or with
+ partial transparency.
+ @param aa setting for antialiasing
+ */
+ void setAntiAlias(bool aa) { fBitfields.fAntiAlias = static_cast<unsigned>(aa); }
+
+ /** Returns true if color error may be distributed to smooth color transition.
+ @return dithering state
+ */
+ bool isDither() const {
+ return SkToBool(fBitfields.fDither);
+ }
+
+ /** Requests, but does not require, to distribute color error.
+ @param dither setting for ditering
+ */
+ void setDither(bool dither) { fBitfields.fDither = static_cast<unsigned>(dither); }
+
+ /** Returns SkFilterQuality, the image filtering level. A lower setting
+ draws faster; a higher setting looks better when the image is scaled.
+
+ @return one of: kNone_SkFilterQuality, kLow_SkFilterQuality,
+ kMedium_SkFilterQuality, kHigh_SkFilterQuality
+ */
+ SkFilterQuality getFilterQuality() const {
+ return (SkFilterQuality)fBitfields.fFilterQuality;
+ }
+
+ /** Sets SkFilterQuality, the image filtering level. A lower setting
+ draws faster; a higher setting looks better when the image is scaled.
+ Does not check to see if quality is valid.
+
+ @param quality one of: kNone_SkFilterQuality, kLow_SkFilterQuality,
+ kMedium_SkFilterQuality, kHigh_SkFilterQuality
+ */
+ void setFilterQuality(SkFilterQuality quality);
+
+ /** \enum SkPaint::Style
+ Set Style to fill, stroke, or both fill and stroke geometry.
+ The stroke and fill
+ share all paint attributes; for instance, they are drawn with the same color.
+
+ Use kStrokeAndFill_Style to avoid hitting the same pixels twice with a stroke draw and
+ a fill draw.
+ */
+ enum Style : uint8_t {
+ kFill_Style, //!< set to fill geometry
+ kStroke_Style, //!< set to stroke geometry
+ kStrokeAndFill_Style, //!< sets to stroke and fill geometry
+ };
+
+ /** May be used to verify that SkPaint::Style is a legal value.
+ */
+ static constexpr int kStyleCount = kStrokeAndFill_Style + 1;
+
+ /** Returns whether the geometry is filled, stroked, or filled and stroked.
+
+ @return one of:kFill_Style, kStroke_Style, kStrokeAndFill_Style
+ */
+ Style getStyle() const { return (Style)fBitfields.fStyle; }
+
+ /** Sets whether the geometry is filled, stroked, or filled and stroked.
+ Has no effect if style is not a legal SkPaint::Style value.
+
+ @param style one of: kFill_Style, kStroke_Style, kStrokeAndFill_Style
+ */
+ void setStyle(Style style);
+
+ /** Retrieves alpha and RGB, unpremultiplied, packed into 32 bits.
+ Use helpers SkColorGetA(), SkColorGetR(), SkColorGetG(), and SkColorGetB() to extract
+ a color component.
+
+ @return unpremultiplied ARGB
+ */
+ SkColor getColor() const { return fColor4f.toSkColor(); }
+
+ /** Retrieves alpha and RGB, unpremultiplied, as four floating point values. RGB are
+ are extended sRGB values (sRGB gamut, and encoded with the sRGB transfer function).
+
+ @return unpremultiplied RGBA
+ */
+ SkColor4f getColor4f() const { return fColor4f; }
+
+ /** Sets alpha and RGB used when stroking and filling. The color is a 32-bit value,
+ unpremultiplied, packing 8-bit components for alpha, red, blue, and green.
+
+ @param color unpremultiplied ARGB
+ */
+ void setColor(SkColor color);
+
+ /** Sets alpha and RGB used when stroking and filling. The color is four floating
+ point values, unpremultiplied. The color values are interpreted as being in
+ the colorSpace. If colorSpace is nullptr, then color is assumed to be in the
+ sRGB color space.
+
+ @param color unpremultiplied RGBA
+ @param colorSpace SkColorSpace describing the encoding of color
+ */
+ void setColor(const SkColor4f& color, SkColorSpace* colorSpace = nullptr);
+
+ void setColor4f(const SkColor4f& color, SkColorSpace* colorSpace = nullptr) {
+ this->setColor(color, colorSpace);
+ }
+
+ /** Retrieves alpha from the color used when stroking and filling.
+
+ @return alpha ranging from zero, fully transparent, to 255, fully opaque
+ */
+ float getAlphaf() const { return fColor4f.fA; }
+
+ // Helper that scales the alpha by 255.
+ uint8_t getAlpha() const { return sk_float_round2int(this->getAlphaf() * 255); }
+
+ /** Replaces alpha, leaving RGB
+ unchanged. An out of range value triggers an assert in the debug
+ build. a is a value from 0.0 to 1.0.
+ a set to zero makes color fully transparent; a set to 1.0 makes color
+ fully opaque.
+
+ @param a alpha component of color
+ */
+ void setAlphaf(float a);
+
+ // Helper that accepts an int between 0 and 255, and divides it by 255.0
+ void setAlpha(U8CPU a) {
+ this->setAlphaf(a * (1.0f / 255));
+ }
+
+ /** Sets color used when drawing solid fills. The color components range from 0 to 255.
+ The color is unpremultiplied; alpha sets the transparency independent of RGB.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ */
+ void setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+ /** Returns the thickness of the pen used by SkPaint to
+ outline the shape.
+
+ @return zero for hairline, greater than zero for pen thickness
+ */
+ SkScalar getStrokeWidth() const { return fWidth; }
+
+ /** Sets the thickness of the pen used by the paint to
+ outline the shape.
+ Has no effect if width is less than zero.
+
+ @param width zero thickness for hairline; greater than zero for pen thickness
+ */
+ void setStrokeWidth(SkScalar width);
+
+ /** Returns the limit at which a sharp corner is drawn beveled.
+
+ @return zero and greater miter limit
+ */
+ SkScalar getStrokeMiter() const { return fMiterLimit; }
+
+ /** Sets the limit at which a sharp corner is drawn beveled.
+ Valid values are zero and greater.
+ Has no effect if miter is less than zero.
+
+ @param miter zero and greater miter limit
+ */
+ void setStrokeMiter(SkScalar miter);
+
+ /** \enum SkPaint::Cap
+ Cap draws at the beginning and end of an open path contour.
+ */
+ enum Cap {
+ kButt_Cap, //!< no stroke extension
+ kRound_Cap, //!< adds circle
+ kSquare_Cap, //!< adds square
+ kLast_Cap = kSquare_Cap, //!< largest Cap value
+ kDefault_Cap = kButt_Cap, //!< equivalent to kButt_Cap
+ };
+
+ /** May be used to verify that SkPaint::Cap is a legal value.
+ */
+ static constexpr int kCapCount = kLast_Cap + 1;
+
+ /** \enum SkPaint::Join
+ Join specifies how corners are drawn when a shape is stroked. Join
+ affects the four corners of a stroked rectangle, and the connected segments in a
+ stroked path.
+
+ Choose miter join to draw sharp corners. Choose round join to draw a circle with a
+ radius equal to the stroke width on top of the corner. Choose bevel join to minimally
+ connect the thick strokes.
+
+ The fill path constructed to describe the stroked path respects the join setting but may
+ not contain the actual join. For instance, a fill path constructed with round joins does
+ not necessarily include circles at each connected segment.
+ */
+ enum Join : uint8_t {
+ kMiter_Join, //!< extends to miter limit
+ kRound_Join, //!< adds circle
+ kBevel_Join, //!< connects outside edges
+ kLast_Join = kBevel_Join, //!< equivalent to the largest value for Join
+ kDefault_Join = kMiter_Join, //!< equivalent to kMiter_Join
+ };
+
+ /** May be used to verify that SkPaint::Join is a legal value.
+ */
+ static constexpr int kJoinCount = kLast_Join + 1;
+
+ /** Returns the geometry drawn at the beginning and end of strokes.
+
+ @return one of: kButt_Cap, kRound_Cap, kSquare_Cap
+ */
+ Cap getStrokeCap() const { return (Cap)fBitfields.fCapType; }
+
+ /** Sets the geometry drawn at the beginning and end of strokes.
+
+ @param cap one of: kButt_Cap, kRound_Cap, kSquare_Cap;
+ has no effect if cap is not valid
+ */
+ void setStrokeCap(Cap cap);
+
+ /** Returns the geometry drawn at the corners of strokes.
+
+ @return one of: kMiter_Join, kRound_Join, kBevel_Join
+ */
+ Join getStrokeJoin() const { return (Join)fBitfields.fJoinType; }
+
+ /** Sets the geometry drawn at the corners of strokes.
+
+ @param join one of: kMiter_Join, kRound_Join, kBevel_Join;
+ otherwise, has no effect
+ */
+ void setStrokeJoin(Join join);
+
+ /** Returns the filled equivalent of the stroked path.
+
+ @param src SkPath read to create a filled version
+ @param dst resulting SkPath; may be the same as src, but may not be nullptr
+ @param cullRect optional limit passed to SkPathEffect
+ @param resScale if > 1, increase precision, else if (0 < resScale < 1) reduce precision
+ to favor speed and size
+ @return true if the path represents style fill, or false if it represents hairline
+ */
+ bool getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect,
+ SkScalar resScale = 1) const;
+
+ /** Returns the filled equivalent of the stroked path.
+
+ Replaces dst with the src path modified by SkPathEffect and style stroke.
+ SkPathEffect, if any, is not culled. stroke width is created with default precision.
+
+ @param src SkPath read to create a filled version
+ @param dst resulting SkPath dst may be the same as src, but may not be nullptr
+ @return true if the path represents style fill, or false if it represents hairline
+ */
+ bool getFillPath(const SkPath& src, SkPath* dst) const {
+ return this->getFillPath(src, dst, nullptr, 1);
+ }
+
+ /** Returns optional colors used when filling a path, such as a gradient.
+
+ Does not alter SkShader SkRefCnt.
+
+ @return SkShader if previously set, nullptr otherwise
+ */
+ SkShader* getShader() const { return fShader.get(); }
+
+ /** Returns optional colors used when filling a path, such as a gradient.
+
+ Increases SkShader SkRefCnt by one.
+
+ @return SkShader if previously set, nullptr otherwise
+ */
+ sk_sp<SkShader> refShader() const;
+
+ /** Sets optional colors used when filling a path, such as a gradient.
+
+ Sets SkShader to shader, decreasing SkRefCnt of the previous SkShader.
+ Increments shader SkRefCnt by one.
+
+ @param shader how geometry is filled with color; if nullptr, color is used instead
+ */
+ void setShader(sk_sp<SkShader> shader);
+
+ /** Returns SkColorFilter if set, or nullptr.
+ Does not alter SkColorFilter SkRefCnt.
+
+ @return SkColorFilter if previously set, nullptr otherwise
+ */
+ SkColorFilter* getColorFilter() const { return fColorFilter.get(); }
+
+ /** Returns SkColorFilter if set, or nullptr.
+ Increases SkColorFilter SkRefCnt by one.
+
+ @return SkColorFilter if set, or nullptr
+ */
+ sk_sp<SkColorFilter> refColorFilter() const;
+
+ /** Sets SkColorFilter to filter, decreasing SkRefCnt of the previous
+ SkColorFilter. Pass nullptr to clear SkColorFilter.
+
+ Increments filter SkRefCnt by one.
+
+ @param colorFilter SkColorFilter to apply to subsequent draw
+ */
+ void setColorFilter(sk_sp<SkColorFilter> colorFilter);
+
+ /** Returns SkBlendMode.
+ By default, returns SkBlendMode::kSrcOver.
+
+ @return mode used to combine source color with destination color
+ */
+ SkBlendMode getBlendMode() const { return (SkBlendMode)fBitfields.fBlendMode; }
+
+ /** Returns true if SkBlendMode is SkBlendMode::kSrcOver, the default.
+
+ @return true if SkBlendMode is SkBlendMode::kSrcOver
+ */
+ bool isSrcOver() const { return (SkBlendMode)fBitfields.fBlendMode == SkBlendMode::kSrcOver; }
+
+ /** Sets SkBlendMode to mode.
+ Does not check for valid input.
+
+ @param mode SkBlendMode used to combine source color and destination
+ */
+ void setBlendMode(SkBlendMode mode) { fBitfields.fBlendMode = (unsigned)mode; }
+
+ /** Returns SkPathEffect if set, or nullptr.
+ Does not alter SkPathEffect SkRefCnt.
+
+ @return SkPathEffect if previously set, nullptr otherwise
+ */
+ SkPathEffect* getPathEffect() const { return fPathEffect.get(); }
+
+ /** Returns SkPathEffect if set, or nullptr.
+ Increases SkPathEffect SkRefCnt by one.
+
+ @return SkPathEffect if previously set, nullptr otherwise
+ */
+ sk_sp<SkPathEffect> refPathEffect() const;
+
+ /** Sets SkPathEffect to pathEffect, decreasing SkRefCnt of the previous
+ SkPathEffect. Pass nullptr to leave the path geometry unaltered.
+
+ Increments pathEffect SkRefCnt by one.
+
+ @param pathEffect replace SkPath with a modification when drawn
+ */
+ void setPathEffect(sk_sp<SkPathEffect> pathEffect);
+
+ /** Returns SkMaskFilter if set, or nullptr.
+ Does not alter SkMaskFilter SkRefCnt.
+
+ @return SkMaskFilter if previously set, nullptr otherwise
+ */
+ SkMaskFilter* getMaskFilter() const { return fMaskFilter.get(); }
+
+ /** Returns SkMaskFilter if set, or nullptr.
+
+ Increases SkMaskFilter SkRefCnt by one.
+
+ @return SkMaskFilter if previously set, nullptr otherwise
+ */
+ sk_sp<SkMaskFilter> refMaskFilter() const;
+
+ /** Sets SkMaskFilter to maskFilter, decreasing SkRefCnt of the previous
+ SkMaskFilter. Pass nullptr to clear SkMaskFilter and leave SkMaskFilter effect on
+ mask alpha unaltered.
+
+ Increments maskFilter SkRefCnt by one.
+
+ @param maskFilter modifies clipping mask generated from drawn geometry
+ */
+ void setMaskFilter(sk_sp<SkMaskFilter> maskFilter);
+
+ /** Returns SkImageFilter if set, or nullptr.
+ Does not alter SkImageFilter SkRefCnt.
+
+ @return SkImageFilter if previously set, nullptr otherwise
+ */
+ SkImageFilter* getImageFilter() const { return fImageFilter.get(); }
+
+ /** Returns SkImageFilter if set, or nullptr.
+ Increases SkImageFilter SkRefCnt by one.
+
+ @return SkImageFilter if previously set, nullptr otherwise
+ */
+ sk_sp<SkImageFilter> refImageFilter() const;
+
+ /** Sets SkImageFilter to imageFilter, decreasing SkRefCnt of the previous
+ SkImageFilter. Pass nullptr to clear SkImageFilter, and remove SkImageFilter effect
+ on drawing.
+
+ Increments imageFilter SkRefCnt by one.
+
+ @param imageFilter how SkImage is sampled when transformed
+ */
+ void setImageFilter(sk_sp<SkImageFilter> imageFilter);
+
+ /** Returns true if SkPaint prevents all drawing;
+ otherwise, the SkPaint may or may not allow drawing.
+
+ Returns true if, for example, SkBlendMode combined with alpha computes a
+ new alpha of zero.
+
+ @return true if SkPaint prevents all drawing
+ */
+ bool nothingToDraw() const;
+
+ /** (to be made private)
+ Returns true if SkPaint does not include elements requiring extensive computation
+ to compute SkBaseDevice bounds of drawn geometry. For instance, SkPaint with SkPathEffect
+ always returns false.
+
+ @return true if SkPaint allows for fast computation of bounds
+ */
+ bool canComputeFastBounds() const;
+
+ /** (to be made private)
+ Only call this if canComputeFastBounds() returned true. This takes a
+ raw rectangle (the raw bounds of a shape), and adjusts it for stylistic
+ effects in the paint (e.g. stroking). If needed, it uses the storage
+ parameter. It returns the adjusted bounds that can then be used
+ for SkCanvas::quickReject tests.
+
+ The returned SkRect will either be orig or storage, thus the caller
+ should not rely on storage being set to the result, but should always
+ use the returned value. It is legal for orig and storage to be the same
+ SkRect.
+ For example:
+ if (!path.isInverseFillType() && paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (canvas->quickReject(paint.computeFastBounds(path.getBounds(), &storage))) {
+ return; // do not draw the path
+ }
+ }
+ // draw the path
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry; may not be nullptr
+ @return fast computed bounds
+ */
+ const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const {
+ // Things like stroking, etc... will do math on the bounds rect, assuming that it's sorted.
+ SkASSERT(orig.isSorted());
+ SkPaint::Style style = this->getStyle();
+ // ultra fast-case: filling with no effects that affect geometry
+ if (kFill_Style == style) {
+ uintptr_t effects = 0;
+ effects |= reinterpret_cast<uintptr_t>(this->getMaskFilter());
+ effects |= reinterpret_cast<uintptr_t>(this->getPathEffect());
+ effects |= reinterpret_cast<uintptr_t>(this->getImageFilter());
+ if (!effects) {
+ return orig;
+ }
+ }
+
+ return this->doComputeFastBounds(orig, storage, style);
+ }
+
+ /** (to be made private)
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry
+ @return fast computed bounds
+ */
+ const SkRect& computeFastStrokeBounds(const SkRect& orig,
+ SkRect* storage) const {
+ return this->doComputeFastBounds(orig, storage, kStroke_Style);
+ }
+
+ /** (to be made private)
+ Computes the bounds, overriding the SkPaint SkPaint::Style. This can be used to
+ account for additional width required by stroking orig, without
+ altering SkPaint::Style set to fill.
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry
+ @param style overrides SkPaint::Style
+ @return fast computed bounds
+ */
+ const SkRect& doComputeFastBounds(const SkRect& orig, SkRect* storage,
+ Style style) const;
+
+private:
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkShader> fShader;
+ sk_sp<SkMaskFilter> fMaskFilter;
+ sk_sp<SkColorFilter> fColorFilter;
+ sk_sp<SkImageFilter> fImageFilter;
+
+ SkColor4f fColor4f;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ union {
+ struct {
+ unsigned fAntiAlias : 1;
+ unsigned fDither : 1;
+ unsigned fCapType : 2;
+ unsigned fJoinType : 2;
+ unsigned fStyle : 2;
+ unsigned fFilterQuality : 2;
+ unsigned fBlendMode : 8; // only need 5-6?
+ unsigned fPadding : 14; // 14==32-1-1-2-2-2-2-8
+ } fBitfields;
+ uint32_t fBitfieldsUInt;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPath.h b/gfx/skia/skia/include/core/SkPath.h
new file mode 100644
index 0000000000..6a87648685
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPath.h
@@ -0,0 +1,1746 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPath_DEFINED
+#define SkPath_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPathTypes.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/SkTo.h"
+
+#include <initializer_list>
+
+class SkAutoPathBoundsUpdate;
+class SkData;
+class SkRRect;
+class SkWStream;
+
+/** \class SkPath
+ SkPath contain geometry. SkPath may be empty, or contain one or more verbs that
+ outline a figure. SkPath always starts with a move verb to a Cartesian coordinate,
+ and may be followed by additional verbs that add lines or curves.
+ Adding a close verb makes the geometry into a continuous loop, a closed contour.
+ SkPath may contain any number of contours, each beginning with a move verb.
+
+ SkPath contours may contain only a move verb, or may also contain lines,
+ quadratic beziers, conics, and cubic beziers. SkPath contours may be open or
+ closed.
+
+ When used to draw a filled area, SkPath describes whether the fill is inside or
+ outside the geometry. SkPath also describes the winding rule used to fill
+ overlapping contours.
+
+ Internally, SkPath lazily computes metrics likes bounds and convexity. Call
+ SkPath::updateBoundsCache to make SkPath thread safe.
+*/
+class SK_API SkPath {
+public:
+
+ /** \enum SkPath::Direction
+ Direction describes whether contour is clockwise or counterclockwise.
+ When SkPath contains multiple overlapping contours, Direction together with
+ FillType determines whether overlaps are filled or form holes.
+
+ Direction also determines how contour is measured. For instance, dashing
+ measures along SkPath to determine where to start and stop stroke; Direction
+ will change dashed results as it steps clockwise or counterclockwise.
+
+ Closed contours like SkRect, SkRRect, circle, and oval added with
+ kCW_Direction travel clockwise; the same added with kCCW_Direction
+ travel counterclockwise.
+ */
+ enum Direction : int {
+ kCW_Direction = static_cast<int>(SkPathDirection::kCW),
+ kCCW_Direction = static_cast<int>(SkPathDirection::kCCW)
+ };
+
+ /** Constructs an empty SkPath. By default, SkPath has no verbs, no SkPoint, and no weights.
+ SkPath::FillType is set to kWinding_FillType.
+
+ @return empty SkPath
+ */
+ SkPath();
+
+ /** Constructs a copy of an existing path.
+ Copy constructor makes two paths identical by value. Internally, path and
+ the returned result share pointer values. The underlying verb array, SkPoint array
+ and weights are copied when modified.
+
+ Creating a SkPath copy is very efficient and never allocates memory.
+ SkPath are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param path SkPath to copy by value
+ @return copy of SkPath
+ */
+ SkPath(const SkPath& path);
+
+ /** Releases ownership of any shared data and deletes data if SkPath is sole owner.
+ */
+ ~SkPath();
+
+ /** Constructs a copy of an existing path.
+ SkPath assignment makes two paths identical by value. Internally, assignment
+ shares pointer values. The underlying verb array, SkPoint array and weights
+ are copied when modified.
+
+ Copying SkPath by assignment is very efficient and never allocates memory.
+ SkPath are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param path verb array, SkPoint array, weights, and SkPath::FillType to copy
+ @return SkPath copied by value
+ */
+ SkPath& operator=(const SkPath& path);
+
+ /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights
+ are equivalent.
+
+ @param a SkPath to compare
+ @param b SkPath to compare
+ @return true if SkPath pair are equivalent
+ */
+ friend SK_API bool operator==(const SkPath& a, const SkPath& b);
+
+ /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights
+ are not equivalent.
+
+ @param a SkPath to compare
+ @param b SkPath to compare
+ @return true if SkPath pair are not equivalent
+ */
+ friend bool operator!=(const SkPath& a, const SkPath& b) {
+ return !(a == b);
+ }
+
+ /** Returns true if SkPath contain equal verbs and equal weights.
+ If SkPath contain one or more conics, the weights must match.
+
+ conicTo() may add different verbs depending on conic weight, so it is not
+ trivial to interpolate a pair of SkPath containing conics with different
+ conic weight values.
+
+ @param compare SkPath to compare
+ @return true if SkPath verb array and weights are equivalent
+ */
+ bool isInterpolatable(const SkPath& compare) const;
+
+ /** Interpolates between SkPath with SkPoint array of equal size.
+ Copy verb array and weights to out, and set out SkPoint array to a weighted
+ average of this SkPoint array and ending SkPoint array, using the formula:
+ (Path Point * weight) + ending Point * (1 - weight).
+
+ weight is most useful when between zero (ending SkPoint array) and
+ one (this Point_Array); will work with values outside of this
+ range.
+
+ interpolate() returns false and leaves out unchanged if SkPoint array is not
+ the same size as ending SkPoint array. Call isInterpolatable() to check SkPath
+ compatibility prior to calling interpolate().
+
+ @param ending SkPoint array averaged with this SkPoint array
+ @param weight contribution of this SkPoint array, and
+ one minus contribution of ending SkPoint array
+ @param out SkPath replaced by interpolated averages
+ @return true if SkPath contain same number of SkPoint
+ */
+ bool interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const;
+
+ /** \enum SkPath::FillType
+ FillType selects the rule used to fill SkPath. SkPath set to kWinding_FillType
+ fills if the sum of contour edges is not zero, where clockwise edges add one, and
+ counterclockwise edges subtract one. SkPath set to kEvenOdd_FillType fills if the
+ number of contour edges is odd. Each FillType has an inverse variant that
+ reverses the rule:
+ kInverseWinding_FillType fills where the sum of contour edges is zero;
+ kInverseEvenOdd_FillType fills where the number of contour edges is even.
+ */
+ enum FillType {
+ kWinding_FillType = static_cast<int>(SkPathFillType::kWinding),
+ kEvenOdd_FillType = static_cast<int>(SkPathFillType::kEvenOdd),
+ kInverseWinding_FillType = static_cast<int>(SkPathFillType::kInverseWinding),
+ kInverseEvenOdd_FillType = static_cast<int>(SkPathFillType::kInverseEvenOdd)
+ };
+
+ /** Returns FillType, the rule used to fill SkPath. FillType of a new SkPath is
+ kWinding_FillType.
+
+ @return one of: kWinding_FillType, kEvenOdd_FillType, kInverseWinding_FillType,
+ kInverseEvenOdd_FillType
+ */
+ FillType getFillType() const { return (FillType)fFillType; }
+
+ /** Sets FillType, the rule used to fill SkPath. While there is no check
+ that ft is legal, values outside of FillType are not supported.
+
+ @param ft one of: kWinding_FillType, kEvenOdd_FillType, kInverseWinding_FillType,
+ kInverseEvenOdd_FillType
+ */
+ void setFillType(FillType ft) {
+ fFillType = SkToU8(ft);
+ }
+
+ /** Returns if FillType describes area outside SkPath geometry. The inverse fill area
+ extends indefinitely.
+
+ @return true if FillType is kInverseWinding_FillType or kInverseEvenOdd_FillType
+ */
+ bool isInverseFillType() const { return IsInverseFillType((FillType)fFillType); }
+
+ /** Replaces FillType with its inverse. The inverse of FillType describes the area
+ unmodified by the original FillType.
+ */
+ void toggleInverseFillType() {
+ fFillType ^= 2;
+ }
+
+ /** \enum SkPath::Convexity
+ SkPath is convex if it contains one contour and contour loops no more than
+ 360 degrees, and contour angles all have same Direction. Convex SkPath
+ may have better performance and require fewer resources on GPU surface.
+
+ SkPath is concave when either at least one Direction change is clockwise and
+ another is counterclockwise, or the sum of the changes in Direction is not 360
+ degrees.
+
+ Initially SkPath Convexity is kUnknown_Convexity. SkPath Convexity is computed
+ if needed by destination SkSurface.
+ */
+ enum Convexity : uint8_t {
+ kUnknown_Convexity = static_cast<int>(SkPathConvexityType::kUnknown),
+ kConvex_Convexity = static_cast<int>(SkPathConvexityType::kConvex),
+ kConcave_Convexity = static_cast<int>(SkPathConvexityType::kConcave),
+ };
+
+ /** Computes SkPath::Convexity if required, and returns stored value.
+ SkPath::Convexity is computed if stored value is kUnknown_Convexity,
+ or if SkPath has been altered since SkPath::Convexity was computed or set.
+
+ @return computed or stored SkPath::Convexity
+ */
+ Convexity getConvexity() const {
+ Convexity convexity = this->getConvexityOrUnknown();
+ if (convexity != kUnknown_Convexity) {
+ return convexity;
+ }
+ return this->internalGetConvexity();
+ }
+
+ /** Returns last computed SkPath::Convexity, or kUnknown_Convexity if
+ SkPath has been altered since SkPath::Convexity was computed or set.
+
+ @return stored SkPath::Convexity
+ */
+ Convexity getConvexityOrUnknown() const { return fConvexity.load(std::memory_order_relaxed); }
+
+ /** Stores convexity so that it is later returned by getConvexity() or getConvexityOrUnknown().
+ convexity may differ from getConvexity(), although setting an incorrect value may
+ cause incorrect or inefficient drawing.
+
+ If convexity is kUnknown_Convexity: getConvexity() will
+ compute SkPath::Convexity, and getConvexityOrUnknown() will return kUnknown_Convexity.
+
+ If convexity is kConvex_Convexity or kConcave_Convexity, getConvexity()
+ and getConvexityOrUnknown() will return convexity until the path is
+ altered.
+
+ @param convexity one of: kUnknown_Convexity, kConvex_Convexity, or kConcave_Convexity
+ */
+ void setConvexity(Convexity convexity);
+
+ /** Computes SkPath::Convexity if required, and returns true if value is kConvex_Convexity.
+ If setConvexity() was called with kConvex_Convexity or kConcave_Convexity, and
+ the path has not been altered, SkPath::Convexity is not recomputed.
+
+ @return true if SkPath::Convexity stored or computed is kConvex_Convexity
+ */
+ bool isConvex() const {
+ return kConvex_Convexity == this->getConvexity();
+ }
+
+ /** Returns true if this path is recognized as an oval or circle.
+
+ bounds receives bounds of oval.
+
+ bounds is unmodified if oval is not found.
+
+ @param bounds storage for bounding SkRect of oval; may be nullptr
+ @return true if SkPath is recognized as an oval or circle
+ */
+ bool isOval(SkRect* bounds) const;
+
+ /** Returns true if path is representable as SkRRect.
+ Returns false if path is representable as oval, circle, or SkRect.
+
+ rrect receives bounds of SkRRect.
+
+ rrect is unmodified if SkRRect is not found.
+
+ @param rrect storage for bounding SkRect of SkRRect; may be nullptr
+ @return true if SkPath contains only SkRRect
+ */
+ bool isRRect(SkRRect* rrect) const;
+
+ /** Sets SkPath to its initial state.
+ Removes verb array, SkPoint array, and weights, and sets FillType to kWinding_FillType.
+ Internal storage associated with SkPath is released.
+
+ @return reference to SkPath
+ */
+ SkPath& reset();
+
+ /** Sets SkPath to its initial state, preserving internal storage.
+ Removes verb array, SkPoint array, and weights, and sets FillType to kWinding_FillType.
+ Internal storage associated with SkPath is retained.
+
+ Use rewind() instead of reset() if SkPath storage will be reused and performance
+ is critical.
+
+ @return reference to SkPath
+ */
+ SkPath& rewind();
+
+ /** Returns if SkPath is empty.
+ Empty SkPath may have FillType but has no SkPoint, SkPath::Verb, or conic weight.
+ SkPath() constructs empty SkPath; reset() and rewind() make SkPath empty.
+
+ @return true if the path contains no SkPath::Verb array
+ */
+ bool isEmpty() const {
+ SkDEBUGCODE(this->validate();)
+ return 0 == fPathRef->countVerbs();
+ }
+
+ /** Returns if contour is closed.
+ Contour is closed if SkPath SkPath::Verb array was last modified by close(). When stroked,
+ closed contour draws SkPaint::Join instead of SkPaint::Cap at first and last SkPoint.
+
+ @return true if the last contour ends with a kClose_Verb
+ */
+ bool isLastContourClosed() const;
+
+ /** Returns true for finite SkPoint array values between negative SK_ScalarMax and
+ positive SK_ScalarMax. Returns false for any SkPoint array value of
+ SK_ScalarInfinity, SK_ScalarNegativeInfinity, or SK_ScalarNaN.
+
+ @return true if all SkPoint values are finite
+ */
+ bool isFinite() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->isFinite();
+ }
+
+ /** Returns true if the path is volatile; it will not be altered or discarded
+ by the caller after it is drawn. SkPath by default have volatile set false, allowing
+ SkSurface to attach a cache of data which speeds repeated drawing. If true, SkSurface
+ may not speed repeated drawing.
+
+ @return true if caller will alter SkPath after drawing
+ */
+ bool isVolatile() const {
+ return SkToBool(fIsVolatile);
+ }
+
+ /** Specifies whether SkPath is volatile; whether it will be altered or discarded
+ by the caller after it is drawn. SkPath by default have volatile set false, allowing
+ SkBaseDevice to attach a cache of data which speeds repeated drawing.
+
+ Mark temporary paths, discarded or modified after use, as volatile
+ to inform SkBaseDevice that the path need not be cached.
+
+ Mark animating SkPath volatile to improve performance.
+ Mark unchanging SkPath non-volatile to improve repeated rendering.
+
+ raster surface SkPath draws are affected by volatile for some shadows.
+ GPU surface SkPath draws are affected by volatile for some shadows and concave geometries.
+
+ @param isVolatile true if caller will alter SkPath after drawing
+ */
+ void setIsVolatile(bool isVolatile) {
+ fIsVolatile = isVolatile;
+ }
+
+ /** Tests if line between SkPoint pair is degenerate.
+ Line with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ exact changes the equality test. If true, returns true only if p1 equals p2.
+ If false, returns true if p1 equals or nearly equals p2.
+
+ @param p1 line start point
+ @param p2 line end point
+ @param exact if false, allow nearly equals
+ @return true if line is degenerate; its length is effectively zero
+ */
+ static bool IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact);
+
+ /** Tests if quad is degenerate.
+ Quad with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ @param p1 quad start point
+ @param p2 quad control point
+ @param p3 quad end point
+ @param exact if true, returns true only if p1, p2, and p3 are equal;
+ if false, returns true if p1, p2, and p3 are equal or nearly equal
+ @return true if quad is degenerate; its length is effectively zero
+ */
+ static bool IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, bool exact);
+
+ /** Tests if cubic is degenerate.
+ Cubic with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ @param p1 cubic start point
+ @param p2 cubic control point 1
+ @param p3 cubic control point 2
+ @param p4 cubic end point
+ @param exact if true, returns true only if p1, p2, p3, and p4 are equal;
+ if false, returns true if p1, p2, p3, and p4 are equal or nearly equal
+ @return true if cubic is degenerate; its length is effectively zero
+ */
+ static bool IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, const SkPoint& p4, bool exact);
+
+ /** Returns true if SkPath contains only one line;
+ SkPath::Verb array has two entries: kMove_Verb, kLine_Verb.
+ If SkPath contains one line and line is not nullptr, line is set to
+ line start point and line end point.
+ Returns false if SkPath is not one line; line is unaltered.
+
+ @param line storage for line. May be nullptr
+ @return true if SkPath contains exactly one line
+ */
+ bool isLine(SkPoint line[2]) const;
+
+ /** Returns the number of points in SkPath.
+ SkPoint count is initially zero.
+
+ @return SkPath SkPoint array length
+ */
+ int countPoints() const;
+
+ /** Returns SkPoint at index in SkPoint array. Valid range for index is
+ 0 to countPoints() - 1.
+ Returns (0, 0) if index is out of range.
+
+ @param index SkPoint array element selector
+ @return SkPoint array value or (0, 0)
+ */
+ SkPoint getPoint(int index) const;
+
+ /** Returns number of points in SkPath. Up to max points are copied.
+ points may be nullptr; then, max must be zero.
+ If max is greater than number of points, excess points storage is unaltered.
+
+ @param points storage for SkPath SkPoint array. May be nullptr
+ @param max maximum to copy; must be greater than or equal to zero
+ @return SkPath SkPoint array length
+ */
+ int getPoints(SkPoint points[], int max) const;
+
+ /** Returns the number of verbs: kMove_Verb, kLine_Verb, kQuad_Verb, kConic_Verb,
+ kCubic_Verb, and kClose_Verb; added to SkPath.
+
+ @return length of verb array
+ */
+ int countVerbs() const;
+
+ /** Returns the number of verbs in the path. Up to max verbs are copied. The
+ verbs are copied as one byte per verb.
+
+ @param verbs storage for verbs, may be nullptr
+ @param max maximum number to copy into verbs
+ @return the actual number of verbs in the path
+ */
+ int getVerbs(uint8_t verbs[], int max) const;
+
+ /** Returns the approximate byte size of the SkPath in memory.
+
+ @return approximate size
+ */
+ size_t approximateBytesUsed() const;
+
+ /** Exchanges the verb array, SkPoint array, weights, and SkPath::FillType with other.
+ Cached state is also exchanged. swap() internally exchanges pointers, so
+ it is lightweight and does not allocate memory.
+
+ swap() usage has largely been replaced by operator=(const SkPath& path).
+ SkPath do not copy their content on assignment until they are written to,
+ making assignment as efficient as swap().
+
+ @param other SkPath exchanged by value
+ */
+ void swap(SkPath& other);
+
+ /** Returns minimum and maximum axes values of SkPoint array.
+ Returns (0, 0, 0, 0) if SkPath contains no points. Returned bounds width and height may
+ be larger or smaller than area affected when SkPath is drawn.
+
+ SkRect returned includes all SkPoint added to SkPath, including SkPoint associated with
+ kMove_Verb that define empty contours.
+
+ @return bounds of all SkPoint in SkPoint array
+ */
+ const SkRect& getBounds() const {
+ return fPathRef->getBounds();
+ }
+
+ /** Updates internal bounds so that subsequent calls to getBounds() are instantaneous.
+ Unaltered copies of SkPath may also access cached bounds through getBounds().
+
+ For now, identical to calling getBounds() and ignoring the returned value.
+
+ Call to prepare SkPath subsequently drawn from multiple threads,
+ to avoid a race condition where each draw separately computes the bounds.
+ */
+ void updateBoundsCache() const {
+ // for now, just calling getBounds() is sufficient
+ this->getBounds();
+ }
+
+ /** Returns minimum and maximum axes values of the lines and curves in SkPath.
+ Returns (0, 0, 0, 0) if SkPath contains no points.
+ Returned bounds width and height may be larger or smaller than area affected
+ when SkPath is drawn.
+
+ Includes SkPoint associated with kMove_Verb that define empty
+ contours.
+
+ Behaves identically to getBounds() when SkPath contains
+ only lines. If SkPath contains curves, computed bounds includes
+ the maximum extent of the quad, conic, or cubic; is slower than getBounds();
+ and unlike getBounds(), does not cache the result.
+
+ @return tight bounds of curves in SkPath
+ */
+ SkRect computeTightBounds() const;
+
+ /** Returns true if rect is contained by SkPath.
+ May return false when rect is contained by SkPath.
+
+ For now, only returns true if SkPath has one contour and is convex.
+ rect may share points and edges with SkPath and be contained.
+ Returns true if rect is empty, that is, it has zero width or height; and
+ the SkPoint or line described by rect is contained by SkPath.
+
+ @param rect SkRect, line, or SkPoint checked for containment
+ @return true if rect is contained
+ */
+ bool conservativelyContainsRect(const SkRect& rect) const;
+
+ /** Grows SkPath verb array and SkPoint array to contain extraPtCount additional SkPoint.
+ May improve performance and use less memory by
+ reducing the number and size of allocations when creating SkPath.
+
+ @param extraPtCount number of additional SkPoint to allocate
+ */
+ void incReserve(int extraPtCount);
+
+ /** Shrinks SkPath verb array and SkPoint array storage to discard unused capacity.
+ May reduce the heap overhead for SkPath known to be fully constructed.
+ */
+ void shrinkToFit();
+
+ /** Adds beginning of contour at SkPoint (x, y).
+
+ @param x x-axis value of contour start
+ @param y y-axis value of contour start
+ @return reference to SkPath
+ */
+ SkPath& moveTo(SkScalar x, SkScalar y);
+
+ /** Adds beginning of contour at SkPoint p.
+
+ @param p contour start
+ @return reference to SkPath
+ */
+ SkPath& moveTo(const SkPoint& p) {
+ return this->moveTo(p.fX, p.fY);
+ }
+
+ /** Adds beginning of contour relative to last point.
+ If SkPath is empty, starts contour at (dx, dy).
+ Otherwise, start contour at last point offset by (dx, dy).
+ Function name stands for "relative move to".
+
+ @param dx offset from last point to contour start on x-axis
+ @param dy offset from last point to contour start on y-axis
+ @return reference to SkPath
+ */
+ SkPath& rMoveTo(SkScalar dx, SkScalar dy);
+
+ /** Adds line from last point to (x, y). If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ lineTo() appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+ lineTo() then appends kLine_Verb to verb array and (x, y) to SkPoint array.
+
+ @param x end of added line on x-axis
+ @param y end of added line on y-axis
+ @return reference to SkPath
+ */
+ SkPath& lineTo(SkScalar x, SkScalar y);
+
+ /** Adds line from last point to SkPoint p. If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ lineTo() first appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+ lineTo() then appends kLine_Verb to verb array and SkPoint p to SkPoint array.
+
+ @param p end SkPoint of added line
+ @return reference to SkPath
+ */
+ SkPath& lineTo(const SkPoint& p) {
+ return this->lineTo(p.fX, p.fY);
+ }
+
+ /** Adds line from last point to vector (dx, dy). If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kLine_Verb to verb array and line end to SkPoint array.
+ Line end is last point plus vector (dx, dy).
+ Function name stands for "relative line to".
+
+ @param dx offset from last point to line end on x-axis
+ @param dy offset from last point to line end on y-axis
+ @return reference to SkPath
+ */
+ SkPath& rLineTo(SkScalar dx, SkScalar dy);
+
+ /** Adds quad from last point towards (x1, y1), to (x2, y2).
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kQuad_Verb to verb array; and (x1, y1), (x2, y2)
+ to SkPoint array.
+
+ @param x1 control SkPoint of quad on x-axis
+ @param y1 control SkPoint of quad on y-axis
+ @param x2 end SkPoint of quad on x-axis
+ @param y2 end SkPoint of quad on y-axis
+ @return reference to SkPath
+ */
+ SkPath& quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2);
+
+ /** Adds quad from last point towards SkPoint p1, to SkPoint p2.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kQuad_Verb to verb array; and SkPoint p1, p2
+ to SkPoint array.
+
+ @param p1 control SkPoint of added quad
+ @param p2 end SkPoint of added quad
+ @return reference to SkPath
+ */
+ SkPath& quadTo(const SkPoint& p1, const SkPoint& p2) {
+ return this->quadTo(p1.fX, p1.fY, p2.fX, p2.fY);
+ }
+
+ /** Adds quad from last point towards vector (dx1, dy1), to vector (dx2, dy2).
+ If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed; then appends kQuad_Verb to verb array; and appends quad
+ control and quad end to SkPoint array.
+ Quad control is last point plus vector (dx1, dy1).
+ Quad end is last point plus vector (dx2, dy2).
+ Function name stands for "relative quad to".
+
+ @param dx1 offset from last point to quad control on x-axis
+ @param dy1 offset from last point to quad control on y-axis
+ @param dx2 offset from last point to quad end on x-axis
+ @param dy2 offset from last point to quad end on y-axis
+ @return reference to SkPath
+ */
+ SkPath& rQuadTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2);
+
+ /** Adds conic from last point towards (x1, y1), to (x2, y2), weighted by w.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+
+ If w is finite and not one, appends kConic_Verb to verb array;
+ and (x1, y1), (x2, y2) to SkPoint array; and w to conic weights.
+
+ If w is one, appends kQuad_Verb to verb array, and
+ (x1, y1), (x2, y2) to SkPoint array.
+
+ If w is not finite, appends kLine_Verb twice to verb array, and
+ (x1, y1), (x2, y2) to SkPoint array.
+
+ @param x1 control SkPoint of conic on x-axis
+ @param y1 control SkPoint of conic on y-axis
+ @param x2 end SkPoint of conic on x-axis
+ @param y2 end SkPoint of conic on y-axis
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w);
+
+ /** Adds conic from last point towards SkPoint p1, to SkPoint p2, weighted by w.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+
+ If w is finite and not one, appends kConic_Verb to verb array;
+ and SkPoint p1, p2 to SkPoint array; and w to conic weights.
+
+ If w is one, appends kQuad_Verb to verb array, and SkPoint p1, p2
+ to SkPoint array.
+
+ If w is not finite, appends kLine_Verb twice to verb array, and
+ SkPoint p1, p2 to SkPoint array.
+
+ @param p1 control SkPoint of added conic
+ @param p2 end SkPoint of added conic
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& conicTo(const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ return this->conicTo(p1.fX, p1.fY, p2.fX, p2.fY, w);
+ }
+
+ /** Adds conic from last point towards vector (dx1, dy1), to vector (dx2, dy2),
+ weighted by w. If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed.
+
+ If w is finite and not one, next appends kConic_Verb to verb array,
+ and w is recorded as conic weight; otherwise, if w is one, appends
+ kQuad_Verb to verb array; or if w is not finite, appends kLine_Verb
+ twice to verb array.
+
+ In all cases appends SkPoint control and end to SkPoint array.
+ control is last point plus vector (dx1, dy1).
+ end is last point plus vector (dx2, dy2).
+
+ Function name stands for "relative conic to".
+
+ @param dx1 offset from last point to conic control on x-axis
+ @param dy1 offset from last point to conic control on y-axis
+ @param dx2 offset from last point to conic end on x-axis
+ @param dy2 offset from last point to conic end on y-axis
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w);
+
+ /** Adds cubic from last point towards (x1, y1), then towards (x2, y2), ending at
+ (x3, y3). If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to
+ (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kCubic_Verb to verb array; and (x1, y1), (x2, y2), (x3, y3)
+ to SkPoint array.
+
+ @param x1 first control SkPoint of cubic on x-axis
+ @param y1 first control SkPoint of cubic on y-axis
+ @param x2 second control SkPoint of cubic on x-axis
+ @param y2 second control SkPoint of cubic on y-axis
+ @param x3 end SkPoint of cubic on x-axis
+ @param y3 end SkPoint of cubic on y-axis
+ @return reference to SkPath
+ */
+ SkPath& cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3);
+
+ /** Adds cubic from last point towards SkPoint p1, then towards SkPoint p2, ending at
+ SkPoint p3. If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to
+ (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kCubic_Verb to verb array; and SkPoint p1, p2, p3
+ to SkPoint array.
+
+ @param p1 first control SkPoint of cubic
+ @param p2 second control SkPoint of cubic
+ @param p3 end SkPoint of cubic
+ @return reference to SkPath
+ */
+ SkPath& cubicTo(const SkPoint& p1, const SkPoint& p2, const SkPoint& p3) {
+ return this->cubicTo(p1.fX, p1.fY, p2.fX, p2.fY, p3.fX, p3.fY);
+ }
+
+ /** Adds cubic from last point towards vector (dx1, dy1), then towards
+ vector (dx2, dy2), to vector (dx3, dy3).
+ If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed; then appends kCubic_Verb to verb array; and appends cubic
+ control and cubic end to SkPoint array.
+ Cubic control is last point plus vector (dx1, dy1).
+ Cubic end is last point plus vector (dx2, dy2).
+ Function name stands for "relative cubic to".
+
+ @param dx1 offset from last point to first cubic control on x-axis
+ @param dy1 offset from last point to first cubic control on y-axis
+ @param dx2 offset from last point to second cubic control on x-axis
+ @param dy2 offset from last point to second cubic control on y-axis
+ @param dx3 offset from last point to cubic end on x-axis
+ @param dy3 offset from last point to cubic end on y-axis
+ @return reference to SkPath
+ */
+ SkPath& rCubicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar dx3, SkScalar dy3);
+
+ /** Appends arc to SkPath. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ arcTo() adds line connecting SkPath last SkPoint to initial arc SkPoint if forceMoveTo
+ is false and SkPath is not empty. Otherwise, added contour begins with first point
+ of arc. Angles greater than -360 and less than 360 are treated modulo 360.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngle starting angle of arc in degrees
+ @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360
+ @param forceMoveTo true to start a new contour with arc
+ @return reference to SkPath
+ */
+ SkPath& arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool forceMoveTo);
+
+ /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic
+ weighted to describe part of circle. Arc is contained by tangent from
+ last SkPath point to (x1, y1), and tangent from (x1, y1) to (x2, y2). Arc
+ is part of circle sized to radius, positioned so it touches both tangent lines.
+
+ If last Path Point does not start Arc, arcTo appends connecting Line to Path.
+ The length of Vector from (x1, y1) to (x2, y2) does not affect Arc.
+
+ Arc sweep is always less than 180 degrees. If radius is zero, or if
+ tangents are nearly parallel, arcTo appends Line from last Path Point to (x1, y1).
+
+ arcTo appends at most one Line and one conic.
+ arcTo implements the functionality of PostScript arct and HTML Canvas arcTo.
+
+ @param x1 x-axis value common to pair of tangents
+ @param y1 y-axis value common to pair of tangents
+ @param x2 x-axis value end of second tangent
+ @param y2 y-axis value end of second tangent
+ @param radius distance from arc to circle center
+ @return reference to SkPath
+ */
+ SkPath& arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius);
+
+ /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic
+ weighted to describe part of circle. Arc is contained by tangent from
+ last SkPath point to p1, and tangent from p1 to p2. Arc
+ is part of circle sized to radius, positioned so it touches both tangent lines.
+
+ If last SkPath SkPoint does not start arc, arcTo() appends connecting line to SkPath.
+ The length of vector from p1 to p2 does not affect arc.
+
+ Arc sweep is always less than 180 degrees. If radius is zero, or if
+ tangents are nearly parallel, arcTo() appends line from last SkPath SkPoint to p1.
+
+ arcTo() appends at most one line and one conic.
+ arcTo() implements the functionality of PostScript arct and HTML Canvas arcTo.
+
+ @param p1 SkPoint common to pair of tangents
+ @param p2 end of second tangent
+ @param radius distance from arc to circle center
+ @return reference to SkPath
+ */
+ SkPath& arcTo(const SkPoint p1, const SkPoint p2, SkScalar radius) {
+ return this->arcTo(p1.fX, p1.fY, p2.fX, p2.fY, radius);
+ }
+
+ /** \enum SkPath::ArcSize
+ Four oval parts with radii (rx, ry) start at last SkPath SkPoint and ends at (x, y).
+ ArcSize and Direction select one of the four oval parts.
+ */
+ enum ArcSize {
+ kSmall_ArcSize, //!< smaller of arc pair
+ kLarge_ArcSize, //!< larger of arc pair
+ };
+
+ /** Appends arc to SkPath. Arc is implemented by one or more conics weighted to
+ describe part of oval with radii (rx, ry) rotated by xAxisRotate degrees. Arc
+ curves from last SkPath SkPoint to (x, y), choosing one of four possible routes:
+ clockwise or counterclockwise, and smaller or larger.
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to (x, y) if
+ either radii are zero, or if last SkPath SkPoint equals (x, y). arcTo() scales radii
+ (rx, ry) to fit last SkPath SkPoint and (x, y) if both are greater than zero but
+ too small.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of SVG arc, although SVG sweep-flag value
+ is opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise,
+ while kCW_Direction cast to int is zero.
+
+ @param rx radius on x-axis before x-axis rotation
+ @param ry radius on y-axis before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param x end of arc
+ @param y end of arc
+ @return reference to SkPath
+ */
+ SkPath& arcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ Direction sweep, SkScalar x, SkScalar y);
+
+ /** Appends arc to SkPath. Arc is implemented by one or more conic weighted to describe
+ part of oval with radii (r.fX, r.fY) rotated by xAxisRotate degrees. Arc curves
+ from last SkPath SkPoint to (xy.fX, xy.fY), choosing one of four possible routes:
+ clockwise or counterclockwise,
+ and smaller or larger.
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to xy if either
+ radii are zero, or if last SkPath SkPoint equals (xy.fX, xy.fY). arcTo() scales radii r to
+ fit last SkPath SkPoint and xy if both are greater than zero but too small to describe
+ an arc.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of SVG arc, although SVG sweep-flag value is
+ opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, while
+ kCW_Direction cast to int is zero.
+
+ @param r radii on axes before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param xy end of arc
+ @return reference to SkPath
+ */
+ SkPath& arcTo(const SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, Direction sweep,
+ const SkPoint xy) {
+ return this->arcTo(r.fX, r.fY, xAxisRotate, largeArc, sweep, xy.fX, xy.fY);
+ }
+
+ /** Appends arc to SkPath, relative to last SkPath SkPoint. Arc is implemented by one or
+ more conic, weighted to describe part of oval with radii (rx, ry) rotated by
+ xAxisRotate degrees. Arc curves from last SkPath SkPoint to relative end SkPoint:
+ (dx, dy), choosing one of four possible routes: clockwise or
+ counterclockwise, and smaller or larger. If SkPath is empty, the start arc SkPoint
+ is (0, 0).
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to end SkPoint
+ if either radii are zero, or if last SkPath SkPoint equals end SkPoint.
+ arcTo() scales radii (rx, ry) to fit last SkPath SkPoint and end SkPoint if both are
+ greater than zero but too small to describe an arc.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of svg arc, although SVG "sweep-flag" value is
+ opposite the integer value of sweep; SVG "sweep-flag" uses 1 for clockwise, while
+ kCW_Direction cast to int is zero.
+
+ @param rx radius before x-axis rotation
+ @param ry radius before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param dx x-axis offset end of arc from last SkPath SkPoint
+ @param dy y-axis offset end of arc from last SkPath SkPoint
+ @return reference to SkPath
+ */
+ SkPath& rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ Direction sweep, SkScalar dx, SkScalar dy);
+
+ /** Appends kClose_Verb to SkPath. A closed contour connects the first and last SkPoint
+ with line, forming a continuous loop. Open and closed contour draw the same
+ with SkPaint::kFill_Style. With SkPaint::kStroke_Style, open contour draws
+ SkPaint::Cap at contour start and end; closed contour draws
+ SkPaint::Join at contour start and end.
+
+ close() has no effect if SkPath is empty or last SkPath SkPath::Verb is kClose_Verb.
+
+ @return reference to SkPath
+ */
+ SkPath& close();
+
+ /** Returns true if fill is inverted and SkPath with fill represents area outside
+ of its geometric bounds.
+
+ @param fill one of: kWinding_FillType, kEvenOdd_FillType,
+ kInverseWinding_FillType, kInverseEvenOdd_FillType
+ @return true if SkPath fills outside its bounds
+ */
+ static bool IsInverseFillType(FillType fill) {
+ static_assert(0 == kWinding_FillType, "fill_type_mismatch");
+ static_assert(1 == kEvenOdd_FillType, "fill_type_mismatch");
+ static_assert(2 == kInverseWinding_FillType, "fill_type_mismatch");
+ static_assert(3 == kInverseEvenOdd_FillType, "fill_type_mismatch");
+ return (fill & 2) != 0;
+ }
+
+ /** Returns equivalent SkPath::FillType representing SkPath fill inside its bounds.
+ .
+
+ @param fill one of: kWinding_FillType, kEvenOdd_FillType,
+ kInverseWinding_FillType, kInverseEvenOdd_FillType
+ @return fill, or kWinding_FillType or kEvenOdd_FillType if fill is inverted
+ */
+ static FillType ConvertToNonInverseFillType(FillType fill) {
+ static_assert(0 == kWinding_FillType, "fill_type_mismatch");
+ static_assert(1 == kEvenOdd_FillType, "fill_type_mismatch");
+ static_assert(2 == kInverseWinding_FillType, "fill_type_mismatch");
+ static_assert(3 == kInverseEvenOdd_FillType, "fill_type_mismatch");
+ return (FillType)(fill & 1);
+ }
+
+ /** Approximates conic with quad array. Conic is constructed from start SkPoint p0,
+ control SkPoint p1, end SkPoint p2, and weight w.
+ Quad array is stored in pts; this storage is supplied by caller.
+ Maximum quad count is 2 to the pow2.
+ Every third point in array shares last SkPoint of previous quad and first SkPoint of
+ next quad. Maximum pts storage size is given by:
+ (1 + 2 * (1 << pow2)) * sizeof(SkPoint).
+
+ Returns quad count used the approximation, which may be smaller
+ than the number requested.
+
+ conic weight determines the amount of influence conic control point has on the curve.
+ w less than one represents an elliptical section. w greater than one represents
+ a hyperbolic section. w equal to one represents a parabolic section.
+
+ Two quad curves are sufficient to approximate an elliptical conic with a sweep
+ of up to 90 degrees; in this case, set pow2 to one.
+
+ @param p0 conic start SkPoint
+ @param p1 conic control SkPoint
+ @param p2 conic end SkPoint
+ @param w conic weight
+ @param pts storage for quad array
+ @param pow2 quad count, as power of two, normally 0 to 5 (1 to 32 quad curves)
+ @return number of quad curves written to pts
+ */
+ static int ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2);
+
+ /** Returns true if SkPath is equivalent to SkRect when filled.
+ If false: rect, isClosed, and direction are unchanged.
+ If true: rect, isClosed, and direction are written to if not nullptr.
+
+ rect may be smaller than the SkPath bounds. SkPath bounds may include kMove_Verb points
+ that do not alter the area drawn by the returned rect.
+
+ @param rect storage for bounds of SkRect; may be nullptr
+ @param isClosed storage set to true if SkPath is closed; may be nullptr
+ @param direction storage set to SkRect direction; may be nullptr
+ @return true if SkPath contains SkRect
+ */
+ bool isRect(SkRect* rect, bool* isClosed = nullptr, Direction* direction = nullptr) const;
+
+ /** Adds SkRect to SkPath, appending kMove_Verb, three kLine_Verb, and kClose_Verb,
+ starting with top-left corner of SkRect; followed by top-right, bottom-right,
+ and bottom-left if dir is kCW_Direction; or followed by bottom-left,
+ bottom-right, and top-right if dir is kCCW_Direction.
+
+ @param rect SkRect to add as a closed contour
+ @param dir SkPath::Direction to wind added contour
+ @return reference to SkPath
+ */
+ SkPath& addRect(const SkRect& rect, Direction dir = kCW_Direction);
+
+ /** Adds SkRect to SkPath, appending kMove_Verb, three kLine_Verb, and kClose_Verb.
+ If dir is kCW_Direction, SkRect corners are added clockwise; if dir is
+ kCCW_Direction, SkRect corners are added counterclockwise.
+ start determines the first corner added.
+
+ @param rect SkRect to add as a closed contour
+ @param dir SkPath::Direction to wind added contour
+ @param start initial corner of SkRect to add
+ @return reference to SkPath
+ */
+ SkPath& addRect(const SkRect& rect, Direction dir, unsigned start);
+
+ /** Adds SkRect (left, top, right, bottom) to SkPath,
+ appending kMove_Verb, three kLine_Verb, and kClose_Verb,
+ starting with top-left corner of SkRect; followed by top-right, bottom-right,
+ and bottom-left if dir is kCW_Direction; or followed by bottom-left,
+ bottom-right, and top-right if dir is kCCW_Direction.
+
+ @param left smaller x-axis value of SkRect
+ @param top smaller y-axis value of SkRect
+ @param right larger x-axis value of SkRect
+ @param bottom larger y-axis value of SkRect
+ @param dir SkPath::Direction to wind added contour
+ @return reference to SkPath
+ */
+ SkPath& addRect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom,
+ Direction dir = kCW_Direction);
+
+ /** Adds oval to path, appending kMove_Verb, four kConic_Verb, and kClose_Verb.
+ Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width
+ and half oval height. Oval begins at (oval.fRight, oval.centerY()) and continues
+ clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction.
+
+ @param oval bounds of ellipse added
+ @param dir SkPath::Direction to wind ellipse
+ @return reference to SkPath
+ */
+ SkPath& addOval(const SkRect& oval, Direction dir = kCW_Direction);
+
+ /** Adds oval to SkPath, appending kMove_Verb, four kConic_Verb, and kClose_Verb.
+ Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width
+ and half oval height. Oval begins at start and continues
+ clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction.
+
+ @param oval bounds of ellipse added
+ @param dir SkPath::Direction to wind ellipse
+ @param start index of initial point of ellipse
+ @return reference to SkPath
+ */
+ SkPath& addOval(const SkRect& oval, Direction dir, unsigned start);
+
+ /** Adds circle centered at (x, y) of size radius to SkPath, appending kMove_Verb,
+ four kConic_Verb, and kClose_Verb. Circle begins at: (x + radius, y), continuing
+ clockwise if dir is kCW_Direction, and counterclockwise if dir is kCCW_Direction.
+
+ Has no effect if radius is zero or negative.
+
+ @param x center of circle
+ @param y center of circle
+ @param radius distance from center to edge
+ @param dir SkPath::Direction to wind circle
+ @return reference to SkPath
+ */
+ SkPath& addCircle(SkScalar x, SkScalar y, SkScalar radius,
+ Direction dir = kCW_Direction);
+
+ /** Appends arc to SkPath, as the start of new contour. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ If sweepAngle <= -360, or sweepAngle >= 360; and startAngle modulo 90 is nearly
+ zero, append oval instead of arc. Otherwise, sweepAngle values are treated
+ modulo 360, and arc may or may not draw depending on numeric rounding.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngle starting angle of arc in degrees
+ @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360
+ @return reference to SkPath
+ */
+ SkPath& addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle);
+
+ /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds
+ equal to rect; each corner is 90 degrees of an ellipse with radii (rx, ry). If
+ dir is kCW_Direction, SkRRect starts at top-left of the lower-left corner and
+ winds clockwise. If dir is kCCW_Direction, SkRRect starts at the bottom-left
+ of the upper-left corner and winds counterclockwise.
+
+ If either rx or ry is too large, rx and ry are scaled uniformly until the
+ corners fit. If rx or ry is less than or equal to zero, addRoundRect() appends
+ SkRect rect to SkPath.
+
+ After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect.
+
+ @param rect bounds of SkRRect
+ @param rx x-axis radius of rounded corners on the SkRRect
+ @param ry y-axis radius of rounded corners on the SkRRect
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ Direction dir = kCW_Direction);
+
+ /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds
+ equal to rect; each corner is 90 degrees of an ellipse with radii from the
+ array.
+
+ @param rect bounds of SkRRect
+ @param radii array of 8 SkScalar values, a radius pair for each corner
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRoundRect(const SkRect& rect, const SkScalar radii[],
+ Direction dir = kCW_Direction);
+
+ /** Adds rrect to SkPath, creating a new closed contour. If
+ dir is kCW_Direction, rrect starts at top-left of the lower-left corner and
+ winds clockwise. If dir is kCCW_Direction, rrect starts at the bottom-left
+ of the upper-left corner and winds counterclockwise.
+
+ After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect.
+
+ @param rrect bounds and radii of rounded rectangle
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRRect(const SkRRect& rrect, Direction dir = kCW_Direction);
+
+ /** Adds rrect to SkPath, creating a new closed contour. If dir is kCW_Direction, rrect
+ winds clockwise; if dir is kCCW_Direction, rrect winds counterclockwise.
+ start determines the first point of rrect to add.
+
+ @param rrect bounds and radii of rounded rectangle
+ @param dir SkPath::Direction to wind SkRRect
+ @param start index of initial point of SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRRect(const SkRRect& rrect, Direction dir, unsigned start);
+
+ /** Adds contour created from line array, adding (count - 1) line segments.
+ Contour added starts at pts[0], then adds a line for every additional SkPoint
+ in pts array. If close is true, appends kClose_Verb to SkPath, connecting
+ pts[count - 1] and pts[0].
+
+ If count is zero, append kMove_Verb to path.
+ Has no effect if count is less than one.
+
+ @param pts array of line sharing end and start SkPoint
+ @param count length of SkPoint array
+ @param close true to add line connecting contour end and start
+ @return reference to SkPath
+ */
+ SkPath& addPoly(const SkPoint pts[], int count, bool close);
+
+ /** Adds contour created from list. Contour added starts at list[0], then adds a line
+ for every additional SkPoint in list. If close is true, appends kClose_Verb to SkPath,
+ connecting last and first SkPoint in list.
+
+ If list is empty, append kMove_Verb to path.
+
+ @param list array of SkPoint
+ @param close true to add line connecting contour end and start
+ @return reference to SkPath
+ */
+ SkPath& addPoly(const std::initializer_list<SkPoint>& list, bool close) {
+ return this->addPoly(list.begin(), SkToInt(list.size()), close);
+ }
+
+ /** \enum SkPath::AddPathMode
+ AddPathMode chooses how addPath() appends. Adding one SkPath to another can extend
+ the last contour or start a new contour.
+ */
+ enum AddPathMode {
+ kAppend_AddPathMode, //!< appended to destination unaltered
+ kExtend_AddPathMode, //!< add line if prior contour is not closed
+ };
+
+ /** Appends src to SkPath, offset by (dx, dy).
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param dx offset added to src SkPoint array x-axis coordinates
+ @param dy offset added to src SkPoint array y-axis coordinates
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, SkScalar dx, SkScalar dy,
+ AddPathMode mode = kAppend_AddPathMode);
+
+ /** Appends src to SkPath.
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, AddPathMode mode = kAppend_AddPathMode) {
+ SkMatrix m;
+ m.reset();
+ return this->addPath(src, m, mode);
+ }
+
+ /** Appends src to SkPath, transformed by matrix. Transformed curves may have different
+ verbs, SkPoint, and conic weights.
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param matrix transform applied to src
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, const SkMatrix& matrix,
+ AddPathMode mode = kAppend_AddPathMode);
+
+ /** Appends src to SkPath, from back to front.
+ Reversed src always appends a new contour to SkPath.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @return reference to SkPath
+ */
+ SkPath& reverseAddPath(const SkPath& src);
+
+ /** Offsets SkPoint array by (dx, dy). Offset SkPath replaces dst.
+ If dst is nullptr, SkPath is replaced by offset data.
+
+ @param dx offset added to SkPoint array x-axis coordinates
+ @param dy offset added to SkPoint array y-axis coordinates
+ @param dst overwritten, translated copy of SkPath; may be nullptr
+ */
+ void offset(SkScalar dx, SkScalar dy, SkPath* dst) const;
+
+ /** Offsets SkPoint array by (dx, dy). SkPath is replaced by offset data.
+
+ @param dx offset added to SkPoint array x-axis coordinates
+ @param dy offset added to SkPoint array y-axis coordinates
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ this->offset(dx, dy, this);
+ }
+
+ /** Transforms verb array, SkPoint array, and weight by matrix.
+ transform may change verbs and increase their number.
+ Transformed SkPath replaces dst; if dst is nullptr, original data
+ is replaced.
+
+ @param matrix SkMatrix to apply to SkPath
+ @param dst overwritten, transformed copy of SkPath; may be nullptr
+ */
+ void transform(const SkMatrix& matrix, SkPath* dst) const;
+
+ /** Transforms verb array, SkPoint array, and weight by matrix.
+ transform may change verbs and increase their number.
+ SkPath is replaced by transformed data.
+
+ @param matrix SkMatrix to apply to SkPath
+ */
+ void transform(const SkMatrix& matrix) {
+ this->transform(matrix, this);
+ }
+
+ /** Returns last point on SkPath in lastPt. Returns false if SkPoint array is empty,
+ storing (0, 0) if lastPt is not nullptr.
+
+ @param lastPt storage for final SkPoint in SkPoint array; may be nullptr
+ @return true if SkPoint array contains one or more SkPoint
+ */
+ bool getLastPt(SkPoint* lastPt) const;
+
+ /** Sets last point to (x, y). If SkPoint array is empty, append kMove_Verb to
+ verb array and append (x, y) to SkPoint array.
+
+ @param x set x-axis value of last point
+ @param y set y-axis value of last point
+ */
+ void setLastPt(SkScalar x, SkScalar y);
+
+ /** Sets the last point on the path. If SkPoint array is empty, append kMove_Verb to
+ verb array and append p to SkPoint array.
+
+ @param p set value of last point
+ */
+ void setLastPt(const SkPoint& p) {
+ this->setLastPt(p.fX, p.fY);
+ }
+
+ /** \enum SkPath::SegmentMask
+ SegmentMask constants correspond to each drawing Verb type in SkPath; for
+ instance, if SkPath only contains lines, only the kLine_SegmentMask bit is set.
+ */
+ enum SegmentMask {
+ kLine_SegmentMask = kLine_SkPathSegmentMask,
+ kQuad_SegmentMask = kQuad_SkPathSegmentMask,
+ kConic_SegmentMask = kConic_SkPathSegmentMask,
+ kCubic_SegmentMask = kCubic_SkPathSegmentMask,
+ };
+
+ /** Returns a mask, where each set bit corresponds to a SegmentMask constant
+ if SkPath contains one or more verbs of that type.
+ Returns zero if SkPath contains no lines, or curves: quads, conics, or cubics.
+
+ getSegmentMasks() returns a cached result; it is very fast.
+
+ @return SegmentMask bits or zero
+ */
+ uint32_t getSegmentMasks() const { return fPathRef->getSegmentMasks(); }
+
+ /** \enum SkPath::Verb
+ Verb instructs SkPath how to interpret one or more SkPoint and optional conic weight;
+ manage contour, and terminate SkPath.
+ */
+ enum Verb {
+ kMove_Verb = static_cast<int>(SkPathVerb::kMove),
+ kLine_Verb = static_cast<int>(SkPathVerb::kLine),
+ kQuad_Verb = static_cast<int>(SkPathVerb::kQuad),
+ kConic_Verb = static_cast<int>(SkPathVerb::kConic),
+ kCubic_Verb = static_cast<int>(SkPathVerb::kCubic),
+ kClose_Verb = static_cast<int>(SkPathVerb::kClose),
+ kDone_Verb = static_cast<int>(SkPathVerb::kDone),
+ };
+
+ /** \class SkPath::Iter
+ Iterates through verb array, and associated SkPoint array and conic weight.
+ Provides options to treat open contours as closed, and to ignore
+ degenerate data.
+ */
+ class SK_API Iter {
+ public:
+
+ /** Initializes SkPath::Iter with an empty SkPath. next() on SkPath::Iter returns
+ kDone_Verb.
+ Call setPath to initialize SkPath::Iter at a later time.
+
+ @return SkPath::Iter of empty SkPath
+ */
+ Iter();
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each
+ open contour. path is not altered.
+
+ @param path SkPath to iterate
+ @param forceClose true if open contours generate kClose_Verb
+ @return SkPath::Iter of path
+ */
+ Iter(const SkPath& path, bool forceClose);
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each
+ open contour. path is not altered.
+
+ @param path SkPath to iterate
+ @param forceClose true if open contours generate kClose_Verb
+ */
+ void setPath(const SkPath& path, bool forceClose);
+
+ /** Returns next SkPath::Verb in verb array, and advances SkPath::Iter.
+ When verb array is exhausted, returns kDone_Verb.
+
+ Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb.
+
+ @param pts storage for SkPoint data describing returned SkPath::Verb
+ @return next SkPath::Verb from verb array
+ */
+ Verb next(SkPoint pts[4]);
+
+ // DEPRECATED
+ Verb next(SkPoint pts[4], bool /*doConsumeDegenerates*/, bool /*exact*/ = false) {
+ return this->next(pts);
+ }
+
+ /** Returns conic weight if next() returned kConic_Verb.
+
+ If next() has not been called, or next() did not return kConic_Verb,
+ result is undefined.
+
+ @return conic weight for conic SkPoint returned by next()
+ */
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ /** Returns true if last kLine_Verb returned by next() was generated
+ by kClose_Verb. When true, the end point returned by next() is
+ also the start point of contour.
+
+ If next() has not been called, or next() did not return kLine_Verb,
+ result is undefined.
+
+ @return true if last kLine_Verb was generated by kClose_Verb
+ */
+ bool isCloseLine() const { return SkToBool(fCloseLine); }
+
+ /** Returns true if subsequent calls to next() return kClose_Verb before returning
+ kMove_Verb. if true, contour SkPath::Iter is processing may end with kClose_Verb, or
+ SkPath::Iter may have been initialized with force close set to true.
+
+ @return true if contour is closed
+ */
+ bool isClosedContour() const;
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ SkPoint fMoveTo;
+ SkPoint fLastPt;
+ bool fForceClose;
+ bool fNeedClose;
+ bool fCloseLine;
+ enum SegmentState : uint8_t {
+ /** The current contour is empty. Starting processing or have just closed a contour. */
+ kEmptyContour_SegmentState,
+ /** Have seen a move, but nothing else. */
+ kAfterMove_SegmentState,
+ /** Have seen a primitive but not yet closed the path. Also the initial state. */
+ kAfterPrimitive_SegmentState
+ };
+ SegmentState fSegmentState;
+
+ inline const SkPoint& cons_moveTo();
+ Verb autoClose(SkPoint pts[2]);
+ };
+
+ /** \class SkPath::RawIter
+ Iterates through verb array, and associated SkPoint array and conic weight.
+ verb array, SkPoint array, and conic weight are returned unaltered.
+ */
+ class SK_API RawIter {
+ public:
+
+ /** Initializes RawIter with an empty SkPath. next() on RawIter returns kDone_Verb.
+ Call setPath to initialize SkPath::Iter at a later time.
+
+ @return RawIter of empty SkPath
+ */
+ RawIter() {}
+
+ /** Sets RawIter to return elements of verb array, SkPoint array, and conic weight in path.
+
+ @param path SkPath to iterate
+ @return RawIter of path
+ */
+ RawIter(const SkPath& path) {
+ setPath(path);
+ }
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path.
+
+ @param path SkPath to iterate
+ */
+ void setPath(const SkPath& path) {
+ fRawIter.setPathRef(*path.fPathRef.get());
+ }
+
+ /** Returns next SkPath::Verb in verb array, and advances RawIter.
+ When verb array is exhausted, returns kDone_Verb.
+ Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb.
+
+ @param pts storage for SkPoint data describing returned SkPath::Verb
+ @return next SkPath::Verb from verb array
+ */
+ Verb next(SkPoint pts[4]) {
+ return (Verb) fRawIter.next(pts);
+ }
+
+ /** Returns next SkPath::Verb, but does not advance RawIter.
+
+ @return next SkPath::Verb from verb array
+ */
+ Verb peek() const {
+ return (Verb) fRawIter.peek();
+ }
+
+ /** Returns conic weight if next() returned kConic_Verb.
+
+ If next() has not been called, or next() did not return kConic_Verb,
+ result is undefined.
+
+ @return conic weight for conic SkPoint returned by next()
+ */
+ SkScalar conicWeight() const {
+ return fRawIter.conicWeight();
+ }
+
+ private:
+ SkPathRef::Iter fRawIter;
+ friend class SkPath;
+
+ };
+
+ /** Returns true if the point (x, y) is contained by SkPath, taking into
+ account FillType.
+
+ @param x x-axis value of containment test
+ @param y y-axis value of containment test
+ @return true if SkPoint is in SkPath
+ */
+ bool contains(SkScalar x, SkScalar y) const;
+
+ /** Writes text representation of SkPath to stream. If stream is nullptr, writes to
+ standard output. Set forceClose to true to get edges used to fill SkPath.
+ Set dumpAsHex true to generate exact binary representations
+ of floating point numbers used in SkPoint array and conic weights.
+
+ @param stream writable SkWStream receiving SkPath text representation; may be nullptr
+ @param forceClose true if missing kClose_Verb is output
+ @param dumpAsHex true if SkScalar values are written as hexadecimal
+ */
+ void dump(SkWStream* stream, bool forceClose, bool dumpAsHex) const;
+
+ /** Writes text representation of SkPath to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ with limited precision; it may not be possible to reconstruct original SkPath
+ from output.
+ */
+ void dump() const;
+
+ /** Writes text representation of SkPath to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ in hexadecimal to preserve their exact bit pattern. The output reconstructs the
+ original SkPath.
+
+ Use instead of dump() when submitting
+ */
+ void dumpHex() const;
+
+ /** Writes SkPath to buffer, returning the number of bytes written.
+ Pass nullptr to obtain the storage size.
+
+ Writes SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally writes computed information like SkPath::Convexity and bounds.
+
+ Use only be used in concert with readFromMemory();
+ the format used for SkPath in memory is not guaranteed.
+
+ @param buffer storage for SkPath; may be nullptr
+ @return size of storage required for SkPath; always a multiple of 4
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Writes SkPath to buffer, returning the buffer written to, wrapped in SkData.
+
+ serialize() writes SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally writes computed information like SkPath::Convexity and bounds.
+
+ serialize() should only be used in concert with readFromMemory().
+ The format used for SkPath in memory is not guaranteed.
+
+ @return SkPath data wrapped in SkData buffer
+ */
+ sk_sp<SkData> serialize() const;
+
+ /** Initializes SkPath from buffer of size length. Returns zero if the buffer is
+ data is inconsistent, or the length is too small.
+
+ Reads SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally reads computed information like SkPath::Convexity and bounds.
+
+ Used only in concert with writeToMemory();
+ the format used for SkPath in memory is not guaranteed.
+
+ @param buffer storage for SkPath
+ @param length buffer size in bytes; must be multiple of 4
+ @return number of bytes read, or zero on failure
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /** (See Skia bug 1762.)
+ Returns a non-zero, globally unique value. A different value is returned
+ if verb array, SkPoint array, or conic weight changes.
+
+ Setting SkPath::FillType does not change generation identifier.
+
+ Each time the path is modified, a different generation identifier will be returned.
+ SkPath::FillType does affect generation identifier on Android framework.
+
+ @return non-zero, globally unique value
+ */
+ uint32_t getGenerationID() const;
+
+ /** Returns if SkPath data is consistent. Corrupt SkPath data is detected if
+ internal values are out of range or internal storage does not match
+ array dimensions.
+
+ @return true if SkPath data is consistent
+ */
+ bool isValid() const { return this->isValidImpl() && fPathRef->isValid(); }
+
+private:
+ sk_sp<SkPathRef> fPathRef;
+ int fLastMoveToIndex;
+ mutable std::atomic<Convexity> fConvexity;
+ mutable std::atomic<uint8_t> fFirstDirection; // really an SkPathPriv::FirstDirection
+ uint8_t fFillType : 2;
+ uint8_t fIsVolatile : 1;
+
+ /** Resets all fields other than fPathRef to their initial 'empty' values.
+ * Assumes the caller has already emptied fPathRef.
+ * On Android increments fGenerationID without reseting it.
+ */
+ void resetFields();
+
+ /** Sets all fields other than fPathRef to the values in 'that'.
+ * Assumes the caller has already set fPathRef.
+ * Doesn't change fGenerationID or fSourcePath on Android.
+ */
+ void copyFields(const SkPath& that);
+
+ size_t writeToMemoryAsRRect(void* buffer) const;
+ size_t readAsRRect(const void*, size_t);
+ size_t readFromMemory_EQ4Or5(const void*, size_t);
+
+ friend class Iter;
+ friend class SkPathPriv;
+ friend class SkPathStroker;
+
+ /* Append, in reverse order, the first contour of path, ignoring path's
+ last point. If no moveTo() call has been made for this contour, the
+ first point is automatically set to (0,0).
+ */
+ SkPath& reversePathTo(const SkPath&);
+
+ // called before we add points for lineTo, quadTo, cubicTo, checking to see
+ // if we need to inject a leading moveTo first
+ //
+ // SkPath path; path.lineTo(...); <--- need a leading moveTo(0, 0)
+ // SkPath path; ... path.close(); path.lineTo(...) <-- need a moveTo(previous moveTo)
+ //
+ inline void injectMoveToIfNeeded();
+
+ inline bool hasOnlyMoveTos() const;
+
+ Convexity internalGetConvexity() const;
+
+ /** Asserts if SkPath data is inconsistent.
+ Debugging check intended for internal use only.
+ */
+ SkDEBUGCODE(void validate() const { SkASSERT(this->isValidImpl()); } )
+ bool isValidImpl() const;
+ SkDEBUGCODE(void validateRef() const { fPathRef->validate(); } )
+
+ // called by stroker to see if all points (in the last contour) are equal and worthy of a cap
+ bool isZeroLengthSincePoint(int startPtIndex) const;
+
+ /** Returns if the path can return a bound at no cost (true) or will have to
+ perform some computation (false).
+ */
+ bool hasComputedBounds() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->hasComputedBounds();
+ }
+
+
+ // 'rect' needs to be sorted
+ void setBounds(const SkRect& rect) {
+ SkPathRef::Editor ed(&fPathRef);
+
+ ed.setBounds(rect);
+ }
+
+ void setPt(int index, SkScalar x, SkScalar y);
+
+ // Bottlenecks for working with fConvexity and fFirstDirection.
+ // Notice the setters are const... these are mutable atomic fields.
+ void setConvexity(Convexity) const;
+ void setFirstDirection(uint8_t) const;
+ uint8_t getFirstDirection() const;
+
+ friend class SkAutoPathBoundsUpdate;
+ friend class SkAutoDisableOvalCheck;
+ friend class SkAutoDisableDirectionCheck;
+ friend class SkPathEdgeIter;
+ friend class SkPathWriter;
+ friend class SkOpBuilder;
+ friend class SkBench_AddPathTest; // perf test reversePathTo
+ friend class PathTest_Private; // unit test reversePathTo
+ friend class ForceIsRRect_Private; // unit test isRRect
+ friend class FuzzPath; // for legacy access to validateRef
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathEffect.h b/gfx/skia/skia/include/core/SkPathEffect.h
new file mode 100644
index 0000000000..1a96810f00
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathEffect.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathEffect_DEFINED
+#define SkPathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkPath;
+class SkStrokeRec;
+
+/** \class SkPathEffect
+
+ SkPathEffect is the base class for objects in the SkPaint that affect
+ the geometry of a drawing primitive before it is transformed by the
+ canvas' matrix and drawn.
+
+ Dashing is implemented as a subclass of SkPathEffect.
+*/
+class SK_API SkPathEffect : public SkFlattenable {
+public:
+ /**
+ * Returns a patheffect that apples each effect (first and second) to the original path,
+ * and returns a path with the sum of these.
+ *
+ * result = first(path) + second(path)
+ *
+ */
+ static sk_sp<SkPathEffect> MakeSum(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second);
+
+ /**
+ * Returns a patheffect that applies the inner effect to the path, and then applies the
+ * outer effect to the result of the inner's.
+ *
+ * result = outer(inner(path))
+ */
+ static sk_sp<SkPathEffect> MakeCompose(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner);
+
+ /**
+ * Given a src path (input) and a stroke-rec (input and output), apply
+ * this effect to the src path, returning the new path in dst, and return
+ * true. If this effect cannot be applied, return false and ignore dst
+ * and stroke-rec.
+ *
+ * The stroke-rec specifies the initial request for stroking (if any).
+ * The effect can treat this as input only, or it can choose to change
+ * the rec as well. For example, the effect can decide to change the
+ * stroke's width or join, or the effect can change the rec from stroke
+ * to fill (or fill to stroke) in addition to returning a new (dst) path.
+ *
+ * If this method returns true, the caller will apply (as needed) the
+ * resulting stroke-rec to dst and then draw.
+ */
+ bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect* cullR) const;
+
+ /**
+ * Compute a conservative bounds for its effect, given the src bounds.
+ * The baseline implementation just assigns src to dst.
+ */
+ void computeFastBounds(SkRect* dst, const SkRect& src) const;
+
+ /** \class PointData
+
+ PointData aggregates all the information needed to draw the point
+ primitives returned by an 'asPoints' call.
+ */
+ class PointData {
+ public:
+ PointData()
+ : fFlags(0)
+ , fPoints(nullptr)
+ , fNumPoints(0) {
+ fSize.set(SK_Scalar1, SK_Scalar1);
+ // 'asPoints' needs to initialize/fill-in 'fClipRect' if it sets
+ // the kUseClip flag
+ }
+ ~PointData() {
+ delete [] fPoints;
+ }
+
+ // TODO: consider using passed-in flags to limit the work asPoints does.
+ // For example, a kNoPath flag could indicate don't bother generating
+ // stamped solutions.
+
+ // Currently none of these flags are supported.
+ enum PointFlags {
+ kCircles_PointFlag = 0x01, // draw points as circles (instead of rects)
+ kUsePath_PointFlag = 0x02, // draw points as stamps of the returned path
+ kUseClip_PointFlag = 0x04, // apply 'fClipRect' before drawing the points
+ };
+
+ uint32_t fFlags; // flags that impact the drawing of the points
+ SkPoint* fPoints; // the center point of each generated point
+ int fNumPoints; // number of points in fPoints
+ SkVector fSize; // the size to draw the points
+ SkRect fClipRect; // clip required to draw the points (if kUseClip is set)
+ SkPath fPath; // 'stamp' to be used at each point (if kUsePath is set)
+
+ SkPath fFirst; // If not empty, contains geometry for first point
+ SkPath fLast; // If not empty, contains geometry for last point
+ };
+
+ /**
+ * Does applying this path effect to 'src' yield a set of points? If so,
+ * optionally return the points in 'results'.
+ */
+ bool asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec&, const SkMatrix&,
+ const SkRect* cullR) const;
+
+ /**
+ * If the PathEffect can be represented as a dash pattern, asADash will return kDash_DashType
+ * and None otherwise. If a non NULL info is passed in, the various DashInfo will be filled
+ * in if the PathEffect can be a dash pattern. If passed in info has an fCount equal or
+ * greater to that of the effect, it will memcpy the values of the dash intervals into the
+ * info. Thus the general approach will be call asADash once with default info to get DashType
+ * and fCount. If effect can be represented as a dash pattern, allocate space for the intervals
+ * in info, then call asADash again with the same info and the intervals will get copied in.
+ */
+
+ enum DashType {
+ kNone_DashType, //!< ignores the info parameter
+ kDash_DashType, //!< fills in all of the info parameter
+ };
+
+ struct DashInfo {
+ DashInfo() : fIntervals(nullptr), fCount(0), fPhase(0) {}
+ DashInfo(SkScalar* intervals, int32_t count, SkScalar phase)
+ : fIntervals(intervals), fCount(count), fPhase(phase) {}
+
+ SkScalar* fIntervals; //!< Length of on/off intervals for dashed lines
+ // Even values represent ons, and odds offs
+ int32_t fCount; //!< Number of intervals in the dash. Should be even number
+ SkScalar fPhase; //!< Offset into the dashed interval pattern
+ // mod the sum of all intervals
+ };
+
+ DashType asADash(DashInfo* info) const;
+
+ static void RegisterFlattenables();
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkPathEffect_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkPathEffect_Type;
+ }
+
+ static sk_sp<SkPathEffect> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkPathEffect>(static_cast<SkPathEffect*>(
+ SkFlattenable::Deserialize(
+ kSkPathEffect_Type, data, size, procs).release()));
+ }
+
+protected:
+ SkPathEffect() {}
+
+ virtual bool onFilterPath(SkPath*, const SkPath&, SkStrokeRec*, const SkRect*) const = 0;
+ virtual SkRect onComputeFastBounds(const SkRect& src) const {
+ return src;
+ }
+ virtual bool onAsPoints(PointData*, const SkPath&, const SkStrokeRec&, const SkMatrix&,
+ const SkRect*) const {
+ return false;
+ }
+ virtual DashType onAsADash(DashInfo*) const {
+ return kNone_DashType;
+ }
+
+private:
+ // illegal
+ SkPathEffect(const SkPathEffect&);
+ SkPathEffect& operator=(const SkPathEffect&);
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathMeasure.h b/gfx/skia/skia/include/core/SkPathMeasure.h
new file mode 100644
index 0000000000..f799041bff
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathMeasure.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasure_DEFINED
+#define SkPathMeasure_DEFINED
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPath.h"
+#include "include/private/SkTDArray.h"
+
+class SK_API SkPathMeasure {
+public:
+ SkPathMeasure();
+ /** Initialize the pathmeasure with the specified path. The parts of the path that are needed
+ * are copied, so the client is free to modify/delete the path after this call.
+ *
+ * resScale controls the precision of the measure. values > 1 increase the
+ * precision (and possible slow down the computation).
+ */
+ SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+ ~SkPathMeasure();
+
+ /** Reset the pathmeasure with the specified path. The parts of the path that are needed
+ * are copied, so the client is free to modify/delete the path after this call..
+ */
+ void setPath(const SkPath*, bool forceClosed);
+
+ /** Return the total length of the current contour, or 0 if no path
+ is associated (e.g. resetPath(null))
+ */
+ SkScalar getLength();
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding position and tangent.
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ position and tangent are unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
+ SkVector* tangent);
+
+ enum MatrixFlags {
+ kGetPosition_MatrixFlag = 0x01,
+ kGetTangent_MatrixFlag = 0x02,
+ kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
+ };
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding matrix (by calling getPosTan).
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ matrix is unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags = kGetPosAndTan_MatrixFlag);
+
+ /** Given a start and stop distance, return in dst the intervening segment(s).
+ If the segment is zero-length, return false, else return true.
+ startD and stopD are pinned to legal values (0..getLength()). If startD > stopD
+ then return false (and leave dst untouched).
+ Begin the segment with a moveTo if startWithMoveTo is true
+ */
+ bool getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo);
+
+ /** Return true if the current contour is closed()
+ */
+ bool isClosed();
+
+ /** Move to the next contour in the path. Return true if one exists, or false if
+ we're done with the path.
+ */
+ bool nextContour();
+
+#ifdef SK_DEBUG
+ void dump();
+#endif
+
+private:
+ SkContourMeasureIter fIter;
+ sk_sp<SkContourMeasure> fContour;
+
+ SkPathMeasure(const SkPathMeasure&) = delete;
+ SkPathMeasure& operator=(const SkPathMeasure&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathTypes.h b/gfx/skia/skia/include/core/SkPathTypes.h
new file mode 100644
index 0000000000..4f5adf458c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathTypes.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathTypes_DEFINED
+#define SkPathTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkPathFillType {
+ /** Specifies that "inside" is computed by a non-zero sum of signed edge crossings */
+ kWinding,
+ /** Specifies that "inside" is computed by an odd number of edge crossings */
+ kEvenOdd,
+ /** Same as Winding, but draws outside of the path, rather than inside */
+ kInverseWinding,
+ /** Same as EvenOdd, but draws outside of the path, rather than inside */
+ kInverseEvenOdd
+};
+
+enum class SkPathConvexityType {
+ kUnknown,
+ kConvex,
+ kConcave
+};
+
+enum class SkPathDirection {
+ /** clockwise direction for adding closed contours */
+ kCW,
+ /** counter-clockwise direction for adding closed contours */
+ kCCW,
+};
+
+enum SkPathSegmentMask {
+ kLine_SkPathSegmentMask = 1 << 0,
+ kQuad_SkPathSegmentMask = 1 << 1,
+ kConic_SkPathSegmentMask = 1 << 2,
+ kCubic_SkPathSegmentMask = 1 << 3,
+};
+
+enum class SkPathVerb {
+ kMove, //!< iter.next returns 1 point
+ kLine, //!< iter.next returns 2 points
+ kQuad, //!< iter.next returns 3 points
+ kConic, //!< iter.next returns 3 points + iter.conicWeight()
+ kCubic, //!< iter.next returns 4 points
+ kClose, //!< iter.next returns 1 point (contour's moveTo pt)
+ kDone, //!< iter.next returns 0 points
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPicture.h b/gfx/skia/skia/include/core/SkPicture.h
new file mode 100644
index 0000000000..a7ec138b58
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPicture.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicture_DEFINED
+#define SkPicture_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+
+class SkCanvas;
+class SkData;
+struct SkDeserialProcs;
+class SkImage;
+class SkMatrix;
+struct SkSerialProcs;
+class SkShader;
+class SkStream;
+class SkWStream;
+
+/** \class SkPicture
+ SkPicture records drawing commands made to SkCanvas. The command stream may be
+ played in whole or in part at a later time.
+
+ SkPicture is an abstract class. SkPicture may be generated by SkPictureRecorder
+ or SkDrawable, or from SkPicture previously saved to SkData or SkStream.
+
+ SkPicture may contain any SkCanvas drawing command, as well as one or more
+ SkCanvas matrix or SkCanvas clip. SkPicture has a cull SkRect, which is used as
+ a bounding box hint. To limit SkPicture bounds, use SkCanvas clip when
+ recording or drawing SkPicture.
+*/
+class SK_API SkPicture : public SkRefCnt {
+public:
+
+ /** Recreates SkPicture that was serialized into a stream. Returns constructed SkPicture
+ if successful; otherwise, returns nullptr. Fails if data does not permit
+ constructing valid SkPicture.
+
+ procs->fPictureProc permits supplying a custom function to decode SkPicture.
+ If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to data, data byte length, and user context.
+
+ @param stream container for serial data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from stream data
+ */
+ static sk_sp<SkPicture> MakeFromStream(SkStream* stream,
+ const SkDeserialProcs* procs = nullptr);
+
+ /** Recreates SkPicture that was serialized into data. Returns constructed SkPicture
+ if successful; otherwise, returns nullptr. Fails if data does not permit
+ constructing valid SkPicture.
+
+ procs->fPictureProc permits supplying a custom function to decode SkPicture.
+ If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to data, data byte length, and user context.
+
+ @param data container for serial data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from data
+ */
+ static sk_sp<SkPicture> MakeFromData(const SkData* data,
+ const SkDeserialProcs* procs = nullptr);
+
+ /**
+
+ @param data pointer to serial data
+ @param size size of data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from data
+ */
+ static sk_sp<SkPicture> MakeFromData(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr);
+
+ /** \class SkPicture::AbortCallback
+ AbortCallback is an abstract class. An implementation of AbortCallback may
+ passed as a parameter to SkPicture::playback, to stop it before all drawing
+ commands have been processed.
+
+ If AbortCallback::abort returns true, SkPicture::playback is interrupted.
+ */
+ class SK_API AbortCallback {
+ public:
+
+ /** Has no effect.
+
+ @return abstract class cannot be instantiated
+ */
+ AbortCallback() {}
+
+ /** Has no effect.
+ */
+ virtual ~AbortCallback() {}
+
+ /** Stops SkPicture playback when some condition is met. A subclass of
+ AbortCallback provides an override for abort() that can stop SkPicture::playback.
+
+ The part of SkPicture drawn when aborted is undefined. SkPicture instantiations are
+ free to stop drawing at different points during playback.
+
+ If the abort happens inside one or more calls to SkCanvas::save(), stack
+ of SkCanvas matrix and SkCanvas clip values is restored to its state before
+ SkPicture::playback was called.
+
+ @return true to stop playback
+ */
+ virtual bool abort() = 0;
+ };
+
+ /** Replays the drawing commands on the specified canvas. In the case that the
+ commands are recorded, each command in the SkPicture is sent separately to canvas.
+
+ To add a single command to draw SkPicture to recording canvas, call
+ SkCanvas::drawPicture instead.
+
+ @param canvas receiver of drawing commands
+ @param callback allows interruption of playback
+ */
+ virtual void playback(SkCanvas* canvas, AbortCallback* callback = nullptr) const = 0;
+
+ /** Returns cull SkRect for this picture, passed in when SkPicture was created.
+ Returned SkRect does not specify clipping SkRect for SkPicture; cull is hint
+ of SkPicture bounds.
+
+ SkPicture is free to discard recorded drawing commands that fall outside
+ cull.
+
+ @return bounds passed when SkPicture was created
+ */
+ virtual SkRect cullRect() const = 0;
+
+ /** Returns a non-zero value unique among SkPicture in Skia process.
+
+ @return identifier for SkPicture
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns storage containing SkData describing SkPicture, using optional custom
+ encoders.
+
+ procs->fPictureProc permits supplying a custom function to encode SkPicture.
+ If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to SkPicture and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @return storage containing serialized SkPicture
+ */
+ sk_sp<SkData> serialize(const SkSerialProcs* procs = nullptr) const;
+
+ /** Writes picture to stream, using optional custom encoders.
+
+ procs->fPictureProc permits supplying a custom function to encode SkPicture.
+ If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to SkPicture and user context.
+
+ @param stream writable serial data stream
+ @param procs custom serial data encoders; may be nullptr
+ */
+ void serialize(SkWStream* stream, const SkSerialProcs* procs = nullptr) const;
+
+ /** Returns a placeholder SkPicture. Result does not draw, and contains only
+ cull SkRect, a hint of its bounds. Result is immutable; it cannot be changed
+ later. Result identifier is unique.
+
+ Returned placeholder can be intercepted during playback to insert other
+ commands into SkCanvas draw stream.
+
+ @param cull placeholder dimensions
+ @return placeholder with unique identifier
+ */
+ static sk_sp<SkPicture> MakePlaceholder(SkRect cull);
+
+ /** Returns the approximate number of operations in SkPicture. Returned value
+ may be greater or less than the number of SkCanvas calls
+ recorded: some calls may be recorded as more than one operation, other
+ calls may be optimized away.
+
+ @return approximate operation count
+ */
+ virtual int approximateOpCount() const = 0;
+
+ /** Returns the approximate byte size of SkPicture. Does not include large objects
+ referenced by SkPicture.
+
+ @return approximate size
+ */
+ virtual size_t approximateBytesUsed() const = 0;
+
+ /** Return a new shader that will draw with this picture.
+ *
+ * @param tmx The tiling mode to use when sampling in the x-direction.
+ * @param tmy The tiling mode to use when sampling in the y-direction.
+ * @param localMatrix Optional matrix used when sampling
+ * @param tile The tile rectangle in picture coordinates: this represents the subset
+ * (or superset) of the picture used when building a tile. It is not
+ * affected by localMatrix and does not imply scaling (only translation
+ * and cropping). If null, the tile rect is considered equal to the picture
+ * bounds.
+ * @return Returns a new shader object. Note: this function never returns null.
+ */
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tileRect) const;
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix = nullptr) const;
+
+private:
+ // Subclass whitelist.
+ SkPicture();
+ friend class SkBigPicture;
+ friend class SkEmptyPicture;
+ friend class SkPicturePriv;
+ template <typename> friend class SkMiniPicture;
+
+ void serialize(SkWStream*, const SkSerialProcs*, class SkRefCntSet* typefaces,
+ bool textBlobsOnly=false) const;
+ static sk_sp<SkPicture> MakeFromStream(SkStream*, const SkDeserialProcs*,
+ class SkTypefacePlayback*);
+ friend class SkPictureData;
+
+ /** Return true if the SkStream/Buffer represents a serialized picture, and
+ fills out SkPictInfo. After this function returns, the data source is not
+ rewound so it will have to be manually reset before passing to
+ MakeFromStream or MakeFromBuffer. Note, MakeFromStream and
+ MakeFromBuffer perform this check internally so these entry points are
+ intended for stand alone tools.
+ If false is returned, SkPictInfo is unmodified.
+ */
+ static bool StreamIsSKP(SkStream*, struct SkPictInfo*);
+ static bool BufferIsSKP(class SkReadBuffer*, struct SkPictInfo*);
+ friend bool SkPicture_StreamIsSKP(SkStream*, struct SkPictInfo*);
+
+ // Returns NULL if this is not an SkBigPicture.
+ virtual const class SkBigPicture* asSkBigPicture() const { return nullptr; }
+
+ friend struct SkPathCounter;
+
+ static bool IsValidPictInfo(const struct SkPictInfo& info);
+ static sk_sp<SkPicture> Forwardport(const struct SkPictInfo&,
+ const class SkPictureData*,
+ class SkReadBuffer* buffer);
+
+ struct SkPictInfo createHeader() const;
+ class SkPictureData* backport() const;
+
+ uint32_t fUniqueID;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPictureRecorder.h b/gfx/skia/skia/include/core/SkPictureRecorder.h
new file mode 100644
index 0000000000..1b08f079b0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPictureRecorder.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecorder_DEFINED
+#define SkPictureRecorder_DEFINED
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRefCnt.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+namespace android {
+ class Picture;
+};
+#endif
+
+class GrContext;
+class SkCanvas;
+class SkDrawable;
+class SkMiniRecorder;
+class SkPictureRecord;
+class SkRecord;
+class SkRecorder;
+
+class SK_API SkPictureRecorder {
+public:
+ SkPictureRecorder();
+ ~SkPictureRecorder();
+
+ enum RecordFlags {
+ // If you call drawPicture() or drawDrawable() on the recording canvas, this flag forces
+ // that object to playback its contents immediately rather than reffing the object.
+ kPlaybackDrawPicture_RecordFlag = 1 << 0,
+ };
+
+ enum FinishFlags {
+ };
+
+ /** Returns the canvas that records the drawing commands.
+ @param bounds the cull rect used when recording this picture. Any drawing the falls outside
+ of this rect is undefined, and may be drawn or it may not.
+ @param bbhFactory factory to create desired acceleration structure
+ @param recordFlags optional flags that control recording.
+ @return the canvas.
+ */
+ SkCanvas* beginRecording(const SkRect& bounds,
+ SkBBHFactory* bbhFactory = nullptr,
+ uint32_t recordFlags = 0);
+
+ SkCanvas* beginRecording(SkScalar width, SkScalar height,
+ SkBBHFactory* bbhFactory = nullptr,
+ uint32_t recordFlags = 0) {
+ return this->beginRecording(SkRect::MakeWH(width, height), bbhFactory, recordFlags);
+ }
+
+ /** Returns the recording canvas if one is active, or NULL if recording is
+ not active. This does not alter the refcnt on the canvas (if present).
+ */
+ SkCanvas* getRecordingCanvas();
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * The returned picture is immutable. If during recording drawables were added to the canvas,
+ * these will have been "drawn" into a recording canvas, so that this resulting picture will
+ * reflect their current state, but will not contain a live reference to the drawables
+ * themselves.
+ */
+ sk_sp<SkPicture> finishRecordingAsPicture(uint32_t endFlags = 0);
+
+ /**
+ * Signal that the caller is done recording, and update the cull rect to use for bounding
+ * box hierarchy (BBH) generation. The behavior is the same as calling
+ * finishRecordingAsPicture(), except that this method updates the cull rect initially passed
+ * into beginRecording.
+ * @param cullRect the new culling rectangle to use as the overall bound for BBH generation
+ * and subsequent culling operations.
+ * @return the picture containing the recorded content.
+ */
+ sk_sp<SkPicture> finishRecordingAsPictureWithCull(const SkRect& cullRect,
+ uint32_t endFlags = 0);
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * Unlike finishRecordingAsPicture(), which returns an immutable picture, the returned drawable
+ * may contain live references to other drawables (if they were added to the recording canvas)
+ * and therefore this drawable will reflect the current state of those nested drawables anytime
+ * it is drawn or a new picture is snapped from it (by calling drawable->newPictureSnapshot()).
+ */
+ sk_sp<SkDrawable> finishRecordingAsDrawable(uint32_t endFlags = 0);
+
+private:
+ void reset();
+
+ /** Replay the current (partially recorded) operation stream into
+ canvas. This call doesn't close the current recording.
+ */
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ friend class android::Picture;
+#endif
+ friend class SkPictureRecorderReplayTester; // for unit testing
+ void partialReplay(SkCanvas* canvas) const;
+
+ bool fActivelyRecording;
+ uint32_t fFlags;
+ SkRect fCullRect;
+ sk_sp<SkBBoxHierarchy> fBBH;
+ std::unique_ptr<SkRecorder> fRecorder;
+ sk_sp<SkRecord> fRecord;
+ std::unique_ptr<SkMiniRecorder> fMiniRecorder;
+
+ SkPictureRecorder(SkPictureRecorder&&) = delete;
+ SkPictureRecorder& operator=(SkPictureRecorder&&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixelRef.h b/gfx/skia/skia/include/core/SkPixelRef.h
new file mode 100644
index 0000000000..48ad377acc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixelRef.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixelRef_DEFINED
+#define SkPixelRef_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTDArray.h"
+
+#include <atomic>
+
+struct SkIRect;
+
+class GrTexture;
+class SkDiscardableMemory;
+
+/** \class SkPixelRef
+
+ This class is the smart container for pixel memory, and is used with SkBitmap.
+ This class can be shared/accessed between multiple threads.
+*/
+class SK_API SkPixelRef : public SkRefCnt {
+public:
+ SkPixelRef(int width, int height, void* addr, size_t rowBytes);
+ ~SkPixelRef() override;
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ void* pixels() const { return fPixels; }
+ size_t rowBytes() const { return fRowBytes; }
+
+ /** Returns a non-zero, unique value corresponding to the pixels in this
+ pixelref. Each time the pixels are changed (and notifyPixelsChanged is
+ called), a different generation ID will be returned.
+ */
+ uint32_t getGenerationID() const;
+
+ /**
+ * Call this if you have changed the contents of the pixels. This will in-
+ * turn cause a different generation ID value to be returned from
+ * getGenerationID().
+ */
+ void notifyPixelsChanged();
+
+ /** Returns true if this pixelref is marked as immutable, meaning that the
+ contents of its pixels will not change for the lifetime of the pixelref.
+ */
+ bool isImmutable() const { return fMutability != kMutable; }
+
+ /** Marks this pixelref is immutable, meaning that the contents of its
+ pixels will not change for the lifetime of the pixelref. This state can
+ be set on a pixelref, but it cannot be cleared once it is set.
+ */
+ void setImmutable();
+
+ // Register a listener that may be called the next time our generation ID changes.
+ //
+ // We'll only call the listener if we're confident that we are the only SkPixelRef with this
+ // generation ID. If our generation ID changes and we decide not to call the listener, we'll
+ // never call it: you must add a new listener for each generation ID change. We also won't call
+ // the listener when we're certain no one knows what our generation ID is.
+ //
+ // This can be used to invalidate caches keyed by SkPixelRef generation ID.
+ struct GenIDChangeListener {
+ virtual ~GenIDChangeListener() {}
+ virtual void onChange() = 0;
+ };
+
+ // Takes ownership of listener. Threadsafe.
+ void addGenIDChangeListener(GenIDChangeListener* listener);
+
+ // Call when this pixelref is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this pixelref is changed or deleted.
+ void notifyAddedToCache() {
+ fAddedToCache.store(true);
+ }
+
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; }
+
+protected:
+ void android_only_reset(int width, int height, size_t rowBytes);
+
+private:
+ int fWidth;
+ int fHeight;
+ void* fPixels;
+ size_t fRowBytes;
+
+ // Bottom bit indicates the Gen ID is unique.
+ bool genIDIsUnique() const { return SkToBool(fTaggedGenID.load() & 1); }
+ mutable std::atomic<uint32_t> fTaggedGenID;
+
+ SkMutex fGenIDChangeListenersMutex;
+ SkTDArray<GenIDChangeListener*> fGenIDChangeListeners; // pointers are owned
+
+ // Set true by caches when they cache content that's derived from the current pixels.
+ std::atomic<bool> fAddedToCache;
+
+ enum Mutability {
+ kMutable, // PixelRefs begin mutable.
+ kTemporarilyImmutable, // Considered immutable, but can revert to mutable.
+ kImmutable, // Once set to this state, it never leaves.
+ } fMutability : 8; // easily fits inside a byte
+
+ void needsNewGenID();
+ void callGenIDChangeListeners();
+
+ void setTemporarilyImmutable();
+ void restoreMutability();
+ friend class SkSurface_Raster; // For the two methods above.
+
+ void setImmutableWithID(uint32_t genID);
+ friend void SkBitmapCache_setImmutableWithID(SkPixelRef*, uint32_t);
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixmap.h b/gfx/skia/skia/include/core/SkPixmap.h
new file mode 100644
index 0000000000..7d460275ff
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixmap.h
@@ -0,0 +1,713 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixmap_DEFINED
+#define SkPixmap_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImageInfo.h"
+
+class SkData;
+struct SkMask;
+
+/** \class SkPixmap
+ SkPixmap provides a utility to pair SkImageInfo with pixels and row bytes.
+ SkPixmap is a low level class which provides convenience functions to access
+ raster destinations. SkCanvas can not draw SkPixmap, nor does SkPixmap provide
+ a direct drawing destination.
+
+ Use SkBitmap to draw pixels referenced by SkPixmap; use SkSurface to draw into
+ pixels referenced by SkPixmap.
+
+ SkPixmap does not try to manage the lifetime of the pixel memory. Use SkPixelRef
+ to manage pixel memory; SkPixelRef is safe across threads.
+*/
+class SK_API SkPixmap {
+public:
+
+ /** Creates an empty SkPixmap without pixels, with kUnknown_SkColorType, with
+ kUnknown_SkAlphaType, and with a width and height of zero. Use
+ reset() to associate pixels, SkColorType, SkAlphaType, width, and height
+ after SkPixmap has been created.
+
+ @return empty SkPixmap
+ */
+ SkPixmap()
+ : fPixels(nullptr), fRowBytes(0), fInfo(SkImageInfo::MakeUnknown(0, 0))
+ {}
+
+ /** Creates SkPixmap from info width, height, SkAlphaType, and SkColorType.
+ addr points to pixels, or nullptr. rowBytes should be info.width() times
+ info.bytesPerPixel(), or larger.
+
+ No parameter checking is performed; it is up to the caller to ensure that
+ addr and rowBytes agree with info.
+
+ The memory lifetime of pixels is managed by the caller. When SkPixmap goes
+ out of scope, addr is unaffected.
+
+ SkPixmap may be later modified by reset() to change its size, pixel type, or
+ storage.
+
+ @param info width, height, SkAlphaType, SkColorType of SkImageInfo
+ @param addr pointer to pixels allocated by caller; may be nullptr
+ @param rowBytes size of one row of addr; width times pixel size, or larger
+ @return initialized SkPixmap
+ */
+ SkPixmap(const SkImageInfo& info, const void* addr, size_t rowBytes)
+ : fPixels(addr), fRowBytes(rowBytes), fInfo(info)
+ {}
+
+ /** Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to
+ kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType.
+
+ The prior pixels are unaffected; it is up to the caller to release pixels
+ memory if desired.
+ */
+ void reset();
+
+ /** Sets width, height, SkAlphaType, and SkColorType from info.
+ Sets pixel address from addr, which may be nullptr.
+ Sets row bytes from rowBytes, which should be info.width() times
+ info.bytesPerPixel(), or larger.
+
+ Does not check addr. Asserts if built with SK_DEBUG defined and if rowBytes is
+ too small to hold one row of pixels.
+
+ The memory lifetime pixels are managed by the caller. When SkPixmap goes
+ out of scope, addr is unaffected.
+
+ @param info width, height, SkAlphaType, SkColorType of SkImageInfo
+ @param addr pointer to pixels allocated by caller; may be nullptr
+ @param rowBytes size of one row of addr; width times pixel size, or larger
+ */
+ void reset(const SkImageInfo& info, const void* addr, size_t rowBytes);
+
+ /** Changes SkColorSpace in SkImageInfo; preserves width, height, SkAlphaType, and
+ SkColorType in SkImage, and leaves pixel address and row bytes unchanged.
+ SkColorSpace reference count is incremented.
+
+ @param colorSpace SkColorSpace moved to SkImageInfo
+ */
+ void setColorSpace(sk_sp<SkColorSpace> colorSpace);
+
+ /** Deprecated.
+ */
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask);
+
+ /** Sets subset width, height, pixel address to intersection of SkPixmap with area,
+ if intersection is not empty; and return true. Otherwise, leave subset unchanged
+ and return false.
+
+ Failing to read the return value generates a compile time warning.
+
+ @param subset storage for width, height, pixel address of intersection
+ @param area bounds to intersect with SkPixmap
+ @return true if intersection of SkPixmap and area is not empty
+ */
+ bool SK_WARN_UNUSED_RESULT extractSubset(SkPixmap* subset, const SkIRect& area) const;
+
+ /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace.
+
+ @return reference to SkImageInfo
+ */
+ const SkImageInfo& info() const { return fInfo; }
+
+ /** Returns row bytes, the interval from one pixel row to the next. Row bytes
+ is at least as large as: width() * info().bytesPerPixel().
+
+ Returns zero if colorType() is kUnknown_SkColorType.
+ It is up to the SkBitmap creator to ensure that row bytes is a useful value.
+
+ @return byte length of pixel row
+ */
+ size_t rowBytes() const { return fRowBytes; }
+
+ /** Returns pixel address, the base address corresponding to the pixel origin.
+
+ It is up to the SkPixmap creator to ensure that pixel address is a useful value.
+
+ @return pixel address
+ */
+ const void* addr() const { return fPixels; }
+
+ /** Returns pixel count in each pixel row. Should be equal or less than:
+ rowBytes() / info().bytesPerPixel().
+
+ @return pixel width in SkImageInfo
+ */
+ int width() const { return fInfo.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height in SkImageInfo
+ */
+ int height() const { return fInfo.height(); }
+
+ /**
+ * Return the dimensions of the pixmap (from its ImageInfo)
+ */
+ SkISize dimensions() const { return fInfo.dimensions(); }
+
+ /** Returns SkColorType, one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType, kRGB_888x_SkColorType,
+ kBGRA_8888_SkColorType, kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType.
+
+ @return SkColorType in SkImageInfo
+ */
+ SkColorType colorType() const { return fInfo.colorType(); }
+
+ /** Returns SkAlphaType, one of:
+ kUnknown_SkAlphaType, kOpaque_SkAlphaType, kPremul_SkAlphaType,
+ kUnpremul_SkAlphaType.
+
+ @return SkAlphaType in SkImageInfo
+ */
+ SkAlphaType alphaType() const { return fInfo.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ @return SkColorSpace in SkImageInfo, or nullptr
+ */
+ SkColorSpace* colorSpace() const { return fInfo.colorSpace(); }
+
+ /** Returns smart pointer to SkColorSpace, the range of colors, associated with
+ SkImageInfo. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace in SkImageInfo wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const { return fInfo.refColorSpace(); }
+
+ /** Returns true if SkAlphaType is kOpaque_SkAlphaType.
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkImageInfo has opaque SkAlphaType
+ */
+ bool isOpaque() const { return fInfo.isOpaque(); }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeWH(this->width(), this->height()); }
+
+ /** Returns number of pixels that fit on row. Should be greater than or equal to
+ width().
+
+ @return maximum pixels per row
+ */
+ int rowBytesAsPixels() const { return int(fRowBytes >> this->shiftPerPixel()); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fInfo.shiftPerPixel(); }
+
+ /** Returns minimum memory required for pixel storage.
+ Does not include unused memory on last row when rowBytesAsPixels() exceeds width().
+ Returns SIZE_MAX if result does not fit in size_t.
+ Returns zero if height() or width() is 0.
+ Returns height() times rowBytes() if colorType() is kUnknown_SkColorType.
+
+ @return size in bytes of image buffer
+ */
+ size_t computeByteSize() const { return fInfo.computeByteSize(fRowBytes); }
+
+ /** Returns true if all pixels are opaque. SkColorType determines how pixels
+ are encoded, and whether pixel describes alpha. Returns true for SkColorType
+ without alpha in each pixel; for other SkColorType, returns true if all
+ pixels have alpha values equivalent to 1.0 or greater.
+
+ For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always
+ returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255.
+ For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15.
+ For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or
+ greater.
+
+ Returns false for kUnknown_SkColorType.
+
+ @return true if all pixels have opaque values or SkColorType is opaque
+ */
+ bool computeIsOpaque() const;
+
+ /** Returns pixel at (x, y) as unpremultiplied color.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color; original pixel data may have additional
+ precision.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied color
+ */
+ SkColor getColor(int x, int y) const;
+
+ /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1].
+ This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent
+ (and more precise if the pixels store more than 8 bits per component).
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return alpha converted to normalized float
+ */
+ float getAlphaf(int x, int y) const;
+
+ /** Returns readable pixel address at (x, y). Returns nullptr if SkPixelRef is nullptr.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined. Returns nullptr if SkColorType is kUnknown_SkColorType.
+
+ Performs a lookup of pixel size; for better performance, call
+ one of: addr8, addr16, addr32, addr64, or addrF16().
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable generic pointer to pixel
+ */
+ const void* addr(int x, int y) const {
+ return (const char*)fPixels + fInfo.computeOffset(x, y, fRowBytes);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 8-bit bytes.
+ Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or
+ kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ One byte corresponds to one pixel.
+
+ @return readable unsigned 8-bit pointer to pixels
+ */
+ const uint8_t* addr8() const {
+ SkASSERT(1 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint8_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words.
+ Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or
+ kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 16-bit pointer to pixels
+ */
+ const uint16_t* addr16() const {
+ SkASSERT(2 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 32-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or
+ kBGRA_8888_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 32-bit pointer to pixels
+ */
+ const uint32_t* addr32() const {
+ SkASSERT(4 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint32_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 64-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 64-bit pointer to pixels
+ */
+ const uint64_t* addr64() const {
+ SkASSERT(8 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint64_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ Each word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @return readable unsigned 16-bit pointer to first component of pixels
+ */
+ const uint16_t* addrF16() const {
+ SkASSERT(8 == fInfo.bytesPerPixel());
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() ||
+ kRGBA_F16Norm_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or
+ kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 8-bit pointer to pixel at (x, y)
+ */
+ const uint8_t* addr8(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint8_t*)((const char*)this->addr8() + y * fRowBytes + (x << 0));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or
+ kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 16-bit pointer to pixel at (x, y)
+ */
+ const uint16_t* addr16(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint16_t*)((const char*)this->addr16() + y * fRowBytes + (x << 1));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or
+ kBGRA_8888_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 32-bit pointer to pixel at (x, y)
+ */
+ const uint32_t* addr32(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint32_t*)((const char*)this->addr32() + y * fRowBytes + (x << 2));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 64-bit pointer to pixel at (x, y)
+ */
+ const uint64_t* addr64(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint64_t*)((const char*)this->addr64() + y * fRowBytes + (x << 3));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ Each unsigned 16-bit word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 16-bit pointer to pixel component at (x, y)
+ */
+ const uint16_t* addrF16(int x, int y) const {
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() ||
+ kRGBA_F16Norm_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(this->addr64(x, y));
+ }
+
+ /** Returns writable base pixel address.
+
+ @return writable generic base pointer to pixels
+ */
+ void* writable_addr() const { return const_cast<void*>(fPixels); }
+
+ /** Returns writable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined. Returns zero if SkColorType is kUnknown_SkColorType.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable generic pointer to pixel
+ */
+ void* writable_addr(int x, int y) const {
+ return const_cast<void*>(this->addr(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 8-bit bytes. Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType
+ or kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ One byte corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 8-bit pointer to pixels
+ */
+ uint8_t* writable_addr8(int x, int y) const {
+ return const_cast<uint8_t*>(this->addr8(x, y));
+ }
+
+ /** Returns writable_addr pixel address at (x, y). Result is addressable as unsigned
+ 16-bit words. Will trigger an assert() if SkColorType is not kRGB_565_SkColorType
+ or kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 16-bit pointer to pixel
+ */
+ uint16_t* writable_addr16(int x, int y) const {
+ return const_cast<uint16_t*>(this->addr16(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 32-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_8888_SkColorType or kBGRA_8888_SkColorType, and is built with SK_DEBUG
+ defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 32-bit pointer to pixel
+ */
+ uint32_t* writable_addr32(int x, int y) const {
+ return const_cast<uint32_t*>(this->addr32(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 64-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_F16_SkColorType and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 64-bit pointer to pixel
+ */
+ uint64_t* writable_addr64(int x, int y) const {
+ return const_cast<uint64_t*>(this->addr64(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 16-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_F16_SkColorType and is built with SK_DEBUG defined.
+
+ Each word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 16-bit pointer to first component of pixel
+ */
+ uint16_t* writable_addrF16(int x, int y) const {
+ return reinterpret_cast<uint16_t*>(writable_addr64(x, y));
+ }
+
+ /** Copies a SkRect of pixels to dstPixels. Copy starts at (0, 0), and does not
+ exceed SkPixmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and
+ SkColorSpace of destination. dstRowBytes specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if
+ dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes().
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkPixmap width() or height() is zero or negative.
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes) const {
+ return this->readPixels(dstInfo, dstPixels, dstRowBytes, 0, 0);
+ }
+
+ /** Copies a SkRect of pixels to dstPixels. Copy starts at (srcX, srcY), and does not
+ exceed SkPixmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and
+ SkColorSpace of destination. dstRowBytes specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if
+ dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes().
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if SkPixmap width() or height() is zero or negative. Returns false if:
+ abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height().
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, int srcX,
+ int srcY) const;
+
+ /** Copies a SkRect of pixels to dst. Copy starts at (srcX, srcY), and does not
+ exceed SkPixmap (width(), height()). dst specifies width, height, SkColorType,
+ SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied.
+ Returns false if dst address equals nullptr, or dst.rowBytes() is less than
+ dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.info().colorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst.info().colorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst.info().alphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst.info().colorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false SkPixmap width() or height() is zero or negative. Returns false if:
+ abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height().
+
+ @param dst SkImageInfo and pixel address to write to
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY);
+ }
+
+ /** Copies pixels inside bounds() to dst. dst specifies width, height, SkColorType,
+ SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied.
+ Returns false if dst address equals nullptr, or dst.rowBytes() is less than
+ dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkPixmap width() or height() is zero or negative.
+
+ @param dst SkImageInfo and pixel address to write to
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), 0, 0);
+ }
+
+ /** Copies SkBitmap to dst, scaling pixels to fit dst.width() and dst.height(), and
+ converting pixels to match dst.colorType() and dst.alphaType(). Returns true if
+ pixels are copied. Returns false if dst address is nullptr, or dst.rowBytes() is
+ less than dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkBitmap width() or height() is zero or negative.
+
+ Scales the image, with filterQuality, to match dst.width() and dst.height().
+ filterQuality kNone_SkFilterQuality is fastest, typically implemented with
+ nearest neighbor filter. kLow_SkFilterQuality is typically implemented with
+ bilerp filter. kMedium_SkFilterQuality is typically implemented with
+ bilerp filter, and mip-map filter when size is reduced.
+ kHigh_SkFilterQuality is slowest, typically implemented with bicubic filter.
+
+ @param dst SkImageInfo and pixel address to write to
+ @param filterQuality one of: kNone_SkFilterQuality, kLow_SkFilterQuality,
+ kMedium_SkFilterQuality, kHigh_SkFilterQuality
+ @return true if pixels are scaled to fit dst
+ */
+ bool scalePixels(const SkPixmap& dst, SkFilterQuality filterQuality) const;
+
+ /** Writes color to pixels bounded by subset; returns true on success.
+ Returns false if colorType() is kUnknown_SkColorType, or if subset does
+ not intersect bounds().
+
+ @param color unpremultiplied color to write
+ @param subset bounding integer SkRect of written pixels
+ @return true if pixels are changed
+ */
+ bool erase(SkColor color, const SkIRect& subset) const;
+
+ /** Writes color to pixels inside bounds(); returns true on success.
+ Returns false if colorType() is kUnknown_SkColorType, or if bounds()
+ is empty.
+
+ @param color unpremultiplied color to write
+ @return true if pixels are changed
+ */
+ bool erase(SkColor color) const { return this->erase(color, this->bounds()); }
+
+ /** Writes color to pixels bounded by subset; returns true on success.
+ if subset is nullptr, writes colors pixels inside bounds(). Returns false if
+ colorType() is kUnknown_SkColorType, if subset is not nullptr and does
+ not intersect bounds(), or if subset is nullptr and bounds() is empty.
+
+ @param color unpremultiplied color to write
+ @param subset bounding integer SkRect of pixels to write; may be nullptr
+ @return true if pixels are changed
+ */
+ bool erase(const SkColor4f& color, const SkIRect* subset = nullptr) const;
+
+private:
+ const void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+
+ friend class SkPixmapPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPngChunkReader.h b/gfx/skia/skia/include/core/SkPngChunkReader.h
new file mode 100644
index 0000000000..0ee8a9ecc7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPngChunkReader.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngChunkReader_DEFINED
+#define SkPngChunkReader_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * SkPngChunkReader
+ *
+ * Base class for optional callbacks to retrieve meta/chunk data out of a PNG
+ * encoded image as it is being decoded.
+ * Used by SkCodec.
+ */
+class SkPngChunkReader : public SkRefCnt {
+public:
+ /**
+ * This will be called by the decoder when it sees an unknown chunk.
+ *
+ * Use by SkCodec:
+ * Depending on the location of the unknown chunks, this callback may be
+ * called by
+ * - the factory (NewFromStream/NewFromData)
+ * - getPixels
+ * - startScanlineDecode
+ * - the first call to getScanlines/skipScanlines
+ * The callback may be called from a different thread (e.g. if the SkCodec
+ * is passed to another thread), and it may be called multiple times, if
+ * the SkCodec is used multiple times.
+ *
+ * @param tag Name for this type of chunk.
+ * @param data Data to be interpreted by the subclass.
+ * @param length Number of bytes of data in the chunk.
+ * @return true to continue decoding, or false to indicate an error, which
+ * will cause the decoder to not return the image.
+ */
+ virtual bool readChunk(const char tag[], const void* data, size_t length) = 0;
+};
+#endif // SkPngChunkReader_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPoint.h b/gfx/skia/skia/include/core/SkPoint.h
new file mode 100644
index 0000000000..62516db4c4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint.h
@@ -0,0 +1,552 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint_DEFINED
+#define SkPoint_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkSafe32.h"
+
+struct SkIPoint;
+
+/** SkIVector provides an alternative name for SkIPoint. SkIVector and SkIPoint
+ can be used interchangeably for all purposes.
+*/
+typedef SkIPoint SkIVector;
+
+/** \struct SkIPoint
+ SkIPoint holds two 32-bit integer coordinates.
+*/
+struct SkIPoint {
+ int32_t fX; //!< x-axis value
+ int32_t fY; //!< y-axis value
+
+ /** Sets fX to x, fY to y.
+
+ @param x integer x-axis value of constructed SkIPoint
+ @param y integer y-axis value of constructed SkIPoint
+ @return SkIPoint (x, y)
+ */
+ static constexpr SkIPoint Make(int32_t x, int32_t y) {
+ return {x, y};
+ }
+
+ /** Returns x-axis value of SkIPoint.
+
+ @return fX
+ */
+ constexpr int32_t x() const { return fX; }
+
+ /** Returns y-axis value of SkIPoint.
+
+ @return fY
+ */
+ constexpr int32_t y() const { return fY; }
+
+ /** Returns true if fX and fY are both zero.
+
+ @return true if fX is zero and fY is zero
+ */
+ bool isZero() const { return (fX | fY) == 0; }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(int32_t x, int32_t y) {
+ fX = x;
+ fY = y;
+ }
+
+ /** Returns SkIPoint changing the signs of fX and fY.
+
+ @return SkIPoint as (-fX, -fY)
+ */
+ SkIPoint operator-() const {
+ return {-fX, -fY};
+ }
+
+ /** Offsets SkIPoint by ivector v. Sets SkIPoint to (fX + v.fX, fY + v.fY).
+
+ @param v ivector to add
+ */
+ void operator+=(const SkIVector& v) {
+ fX = Sk32_sat_add(fX, v.fX);
+ fY = Sk32_sat_add(fY, v.fY);
+ }
+
+ /** Subtracts ivector v from SkIPoint. Sets SkIPoint to: (fX - v.fX, fY - v.fY).
+
+ @param v ivector to subtract
+ */
+ void operator-=(const SkIVector& v) {
+ fX = Sk32_sat_sub(fX, v.fX);
+ fY = Sk32_sat_sub(fY, v.fY);
+ }
+
+ /** Returns true if SkIPoint is equivalent to SkIPoint constructed from (x, y).
+
+ @param x value compared with fX
+ @param y value compared with fY
+ @return true if SkIPoint equals (x, y)
+ */
+ bool equals(int32_t x, int32_t y) const {
+ return fX == x && fY == y;
+ }
+
+ /** Returns true if a is equivalent to b.
+
+ @param a SkIPoint to compare
+ @param b SkIPoint to compare
+ @return true if a.fX == b.fX and a.fY == b.fY
+ */
+ friend bool operator==(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ /** Returns true if a is not equivalent to b.
+
+ @param a SkIPoint to compare
+ @param b SkIPoint to compare
+ @return true if a.fX != b.fX or a.fY != b.fY
+ */
+ friend bool operator!=(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Returns ivector from b to a; computed as (a.fX - b.fX, a.fY - b.fY).
+
+ Can also be used to subtract ivector from ivector, returning ivector.
+
+ @param a SkIPoint or ivector to subtract from
+ @param b ivector to subtract
+ @return ivector from b to a
+ */
+ friend SkIVector operator-(const SkIPoint& a, const SkIPoint& b) {
+ return { Sk32_sat_sub(a.fX, b.fX), Sk32_sat_sub(a.fY, b.fY) };
+ }
+
+ /** Returns SkIPoint resulting from SkIPoint a offset by ivector b, computed as:
+ (a.fX + b.fX, a.fY + b.fY).
+
+ Can also be used to offset SkIPoint b by ivector a, returning SkIPoint.
+ Can also be used to add ivector to ivector, returning ivector.
+
+ @param a SkIPoint or ivector to add to
+ @param b SkIPoint or ivector to add
+ @return SkIPoint equal to a offset by b
+ */
+ friend SkIPoint operator+(const SkIPoint& a, const SkIVector& b) {
+ return { Sk32_sat_add(a.fX, b.fX), Sk32_sat_add(a.fY, b.fY) };
+ }
+};
+
+struct SkPoint;
+
+/** SkVector provides an alternative name for SkPoint. SkVector and SkPoint can
+ be used interchangeably for all purposes.
+*/
+typedef SkPoint SkVector;
+
+/** \struct SkPoint
+ SkPoint holds two 32-bit floating point coordinates.
+*/
+struct SK_API SkPoint {
+ SkScalar fX; //!< x-axis value
+ SkScalar fY; //!< y-axis value
+
+ /** Sets fX to x, fY to y. Used both to set SkPoint and vector.
+
+ @param x SkScalar x-axis value of constructed SkPoint or vector
+ @param y SkScalar y-axis value of constructed SkPoint or vector
+ @return SkPoint (x, y)
+ */
+ static constexpr SkPoint Make(SkScalar x, SkScalar y) {
+ return {x, y};
+ }
+
+ /** Returns x-axis value of SkPoint or vector.
+
+ @return fX
+ */
+ SkScalar x() const { return fX; }
+
+ /** Returns y-axis value of SkPoint or vector.
+
+ @return fY
+ */
+ SkScalar y() const { return fY; }
+
+ /** Returns true if fX and fY are both zero.
+
+ @return true if fX is zero and fY is zero
+ */
+ bool isZero() const { return (0 == fX) & (0 == fY); }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(SkScalar x, SkScalar y) {
+ fX = x;
+ fY = y;
+ }
+
+ /** Sets fX to x and fY to y, promoting integers to SkScalar values.
+
+ Assigning a large integer value directly to fX or fY may cause a compiler
+ error, triggered by narrowing conversion of int to SkScalar. This safely
+ casts x and y to avoid the error.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void iset(int32_t x, int32_t y) {
+ fX = SkIntToScalar(x);
+ fY = SkIntToScalar(y);
+ }
+
+ /** Sets fX to p.fX and fY to p.fY, promoting integers to SkScalar values.
+
+ Assigning an SkIPoint containing a large integer value directly to fX or fY may
+ cause a compiler error, triggered by narrowing conversion of int to SkScalar.
+ This safely casts p.fX and p.fY to avoid the error.
+
+ @param p SkIPoint members promoted to SkScalar
+ */
+ void iset(const SkIPoint& p) {
+ fX = SkIntToScalar(p.fX);
+ fY = SkIntToScalar(p.fY);
+ }
+
+ /** Sets fX to absolute value of pt.fX; and fY to absolute value of pt.fY.
+
+ @param pt members providing magnitude for fX and fY
+ */
+ void setAbs(const SkPoint& pt) {
+ fX = SkScalarAbs(pt.fX);
+ fY = SkScalarAbs(pt.fY);
+ }
+
+ /** Adds offset to each SkPoint in points array with count entries.
+
+ @param points SkPoint array
+ @param count entries in array
+ @param offset vector added to points
+ */
+ static void Offset(SkPoint points[], int count, const SkVector& offset) {
+ Offset(points, count, offset.fX, offset.fY);
+ }
+
+ /** Adds offset (dx, dy) to each SkPoint in points array of length count.
+
+ @param points SkPoint array
+ @param count entries in array
+ @param dx added to fX in points
+ @param dy added to fY in points
+ */
+ static void Offset(SkPoint points[], int count, SkScalar dx, SkScalar dy) {
+ for (int i = 0; i < count; ++i) {
+ points[i].offset(dx, dy);
+ }
+ }
+
+ /** Adds offset (dx, dy) to SkPoint.
+
+ @param dx added to fX
+ @param dy added to fY
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fX += dx;
+ fY += dy;
+ }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(fX * fX + fY * fY)
+
+ .
+
+ @return straight-line distance to origin
+ */
+ SkScalar length() const { return SkPoint::Length(fX, fY); }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(fX * fX + fY * fY)
+
+ .
+
+ @return straight-line distance to origin
+ */
+ SkScalar distanceToOrigin() const { return this->length(); }
+
+ /** Scales (fX, fY) so that length() returns one, while preserving ratio of fX to fY,
+ if possible. If prior length is nearly zero, sets vector to (0, 0) and returns
+ false; otherwise returns true.
+
+ @return true if former length is not zero or nearly zero
+ */
+ bool normalize();
+
+ /** Sets vector to (x, y) scaled so length() returns one, and so that
+ (fX, fY) is proportional to (x, y). If (x, y) length is nearly zero,
+ sets vector to (0, 0) and returns false; otherwise returns true.
+
+ @param x proportional value for fX
+ @param y proportional value for fY
+ @return true if (x, y) length is not zero or nearly zero
+ */
+ bool setNormalize(SkScalar x, SkScalar y);
+
+ /** Scales vector so that distanceToOrigin() returns length, if possible. If former
+ length is nearly zero, sets vector to (0, 0) and return false; otherwise returns
+ true.
+
+ @param length straight-line distance to origin
+ @return true if former length is not zero or nearly zero
+ */
+ bool setLength(SkScalar length);
+
+ /** Sets vector to (x, y) scaled to length, if possible. If former
+ length is nearly zero, sets vector to (0, 0) and return false; otherwise returns
+ true.
+
+ @param x proportional value for fX
+ @param y proportional value for fY
+ @param length straight-line distance to origin
+ @return true if (x, y) length is not zero or nearly zero
+ */
+ bool setLength(SkScalar x, SkScalar y, SkScalar length);
+
+ /** Sets dst to SkPoint times scale. dst may be SkPoint to modify SkPoint in place.
+
+ @param scale factor to multiply SkPoint by
+ @param dst storage for scaled SkPoint
+ */
+ void scale(SkScalar scale, SkPoint* dst) const;
+
+ /** Scales SkPoint in place by scale.
+
+ @param value factor to multiply SkPoint by
+ */
+ void scale(SkScalar value) { this->scale(value, this); }
+
+ /** Changes the sign of fX and fY.
+ */
+ void negate() {
+ fX = -fX;
+ fY = -fY;
+ }
+
+ /** Returns SkPoint changing the signs of fX and fY.
+
+ @return SkPoint as (-fX, -fY)
+ */
+ SkPoint operator-() const {
+ return {-fX, -fY};
+ }
+
+ /** Adds vector v to SkPoint. Sets SkPoint to: (fX + v.fX, fY + v.fY).
+
+ @param v vector to add
+ */
+ void operator+=(const SkVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ /** Subtracts vector v from SkPoint. Sets SkPoint to: (fX - v.fX, fY - v.fY).
+
+ @param v vector to subtract
+ */
+ void operator-=(const SkVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ /** Returns SkPoint multiplied by scale.
+
+ @param scale scalar to multiply by
+ @return SkPoint as (fX * scale, fY * scale)
+ */
+ SkPoint operator*(SkScalar scale) const {
+ return {fX * scale, fY * scale};
+ }
+
+ /** Multiplies SkPoint by scale. Sets SkPoint to: (fX * scale, fY * scale).
+
+ @param scale scalar to multiply by
+ @return reference to SkPoint
+ */
+ SkPoint& operator*=(SkScalar scale) {
+ fX *= scale;
+ fY *= scale;
+ return *this;
+ }
+
+ /** Returns true if both fX and fY are measurable values.
+
+ @return true for values other than infinities and NaN
+ */
+ bool isFinite() const {
+ SkScalar accum = 0;
+ accum *= fX;
+ accum *= fY;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns true if SkPoint is equivalent to SkPoint constructed from (x, y).
+
+ @param x value compared with fX
+ @param y value compared with fY
+ @return true if SkPoint equals (x, y)
+ */
+ bool equals(SkScalar x, SkScalar y) const {
+ return fX == x && fY == y;
+ }
+
+ /** Returns true if a is equivalent to b.
+
+ @param a SkPoint to compare
+ @param b SkPoint to compare
+ @return true if a.fX == b.fX and a.fY == b.fY
+ */
+ friend bool operator==(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ /** Returns true if a is not equivalent to b.
+
+ @param a SkPoint to compare
+ @param b SkPoint to compare
+ @return true if a.fX != b.fX or a.fY != b.fY
+ */
+ friend bool operator!=(const SkPoint& a, const SkPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Returns vector from b to a, computed as (a.fX - b.fX, a.fY - b.fY).
+
+ Can also be used to subtract vector from SkPoint, returning SkPoint.
+ Can also be used to subtract vector from vector, returning vector.
+
+ @param a SkPoint to subtract from
+ @param b SkPoint to subtract
+ @return vector from b to a
+ */
+ friend SkVector operator-(const SkPoint& a, const SkPoint& b) {
+ return {a.fX - b.fX, a.fY - b.fY};
+ }
+
+ /** Returns SkPoint resulting from SkPoint a offset by vector b, computed as:
+ (a.fX + b.fX, a.fY + b.fY).
+
+ Can also be used to offset SkPoint b by vector a, returning SkPoint.
+ Can also be used to add vector to vector, returning vector.
+
+ @param a SkPoint or vector to add to
+ @param b SkPoint or vector to add
+ @return SkPoint equal to a offset by b
+ */
+ friend SkPoint operator+(const SkPoint& a, const SkVector& b) {
+ return {a.fX + b.fX, a.fY + b.fY};
+ }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(x * x + y * y)
+
+ .
+
+ @param x component of length
+ @param y component of length
+ @return straight-line distance to origin
+ */
+ static SkScalar Length(SkScalar x, SkScalar y);
+
+ /** Scales (vec->fX, vec->fY) so that length() returns one, while preserving ratio of vec->fX
+ to vec->fY, if possible. If original length is nearly zero, sets vec to (0, 0) and returns
+ zero; otherwise, returns length of vec before vec is scaled.
+
+ Returned prior length may be SK_ScalarInfinity if it can not be represented by SkScalar.
+
+ Note that normalize() is faster if prior length is not required.
+
+ @param vec normalized to unit length
+ @return original vec length
+ */
+ static SkScalar Normalize(SkVector* vec);
+
+ /** Returns the Euclidean distance between a and b.
+
+ @param a line end point
+ @param b line end point
+ @return straight-line distance from a to b
+ */
+ static SkScalar Distance(const SkPoint& a, const SkPoint& b) {
+ return Length(a.fX - b.fX, a.fY - b.fY);
+ }
+
+ /** Returns the dot product of vector a and vector b.
+
+ @param a left side of dot product
+ @param b right side of dot product
+ @return product of input magnitudes and cosine of the angle between them
+ */
+ static SkScalar DotProduct(const SkVector& a, const SkVector& b) {
+ return a.fX * b.fX + a.fY * b.fY;
+ }
+
+ /** Returns the cross product of vector a and vector b.
+
+ a and b form three-dimensional vectors with z-axis value equal to zero. The
+ cross product is a three-dimensional vector with x-axis and y-axis values equal
+ to zero. The cross product z-axis component is returned.
+
+ @param a left side of cross product
+ @param b right side of cross product
+ @return area spanned by vectors signed by angle direction
+ */
+ static SkScalar CrossProduct(const SkVector& a, const SkVector& b) {
+ return a.fX * b.fY - a.fY * b.fX;
+ }
+
+ /** Returns the cross product of vector and vec.
+
+ Vector and vec form three-dimensional vectors with z-axis value equal to zero.
+ The cross product is a three-dimensional vector with x-axis and y-axis values
+ equal to zero. The cross product z-axis component is returned.
+
+ @param vec right side of cross product
+ @return area spanned by vectors signed by angle direction
+ */
+ SkScalar cross(const SkVector& vec) const {
+ return CrossProduct(*this, vec);
+ }
+
+ /** Returns the dot product of vector and vector vec.
+
+ @param vec right side of dot product
+ @return product of input magnitudes and cosine of the angle between them
+ */
+ SkScalar dot(const SkVector& vec) const {
+ return DotProduct(*this, vec);
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPoint3.h b/gfx/skia/skia/include/core/SkPoint3.h
new file mode 100644
index 0000000000..15b082a427
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint3.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint3_DEFINED
+#define SkPoint3_DEFINED
+
+#include "include/core/SkPoint.h"
+
+struct SK_API SkPoint3 {
+ SkScalar fX, fY, fZ;
+
+ static SkPoint3 Make(SkScalar x, SkScalar y, SkScalar z) {
+ SkPoint3 pt;
+ pt.set(x, y, z);
+ return pt;
+ }
+
+ SkScalar x() const { return fX; }
+ SkScalar y() const { return fY; }
+ SkScalar z() const { return fZ; }
+
+ void set(SkScalar x, SkScalar y, SkScalar z) { fX = x; fY = y; fZ = z; }
+
+ friend bool operator==(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX == b.fX && a.fY == b.fY && a.fZ == b.fZ;
+ }
+
+ friend bool operator!=(const SkPoint3& a, const SkPoint3& b) {
+ return !(a == b);
+ }
+
+ /** Returns the Euclidian distance from (0,0,0) to (x,y,z)
+ */
+ static SkScalar Length(SkScalar x, SkScalar y, SkScalar z);
+
+ /** Return the Euclidian distance from (0,0,0) to the point
+ */
+ SkScalar length() const { return SkPoint3::Length(fX, fY, fZ); }
+
+ /** Set the point (vector) to be unit-length in the same direction as it
+ already points. If the point has a degenerate length (i.e., nearly 0)
+ then set it to (0,0,0) and return false; otherwise return true.
+ */
+ bool normalize();
+
+ /** Return a new point whose X, Y and Z coordinates are scaled.
+ */
+ SkPoint3 makeScale(SkScalar scale) const {
+ SkPoint3 p;
+ p.set(scale * fX, scale * fY, scale * fZ);
+ return p;
+ }
+
+ /** Scale the point's coordinates by scale.
+ */
+ void scale(SkScalar value) {
+ fX *= value;
+ fY *= value;
+ fZ *= value;
+ }
+
+ /** Return a new point whose X, Y and Z coordinates are the negative of the
+ original point's
+ */
+ SkPoint3 operator-() const {
+ SkPoint3 neg;
+ neg.fX = -fX;
+ neg.fY = -fY;
+ neg.fZ = -fZ;
+ return neg;
+ }
+
+ /** Returns a new point whose coordinates are the difference between
+ a and b (i.e., a - b)
+ */
+ friend SkPoint3 operator-(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 v;
+ v.set(a.fX - b.fX, a.fY - b.fY, a.fZ - b.fZ);
+ return v;
+ }
+
+ /** Returns a new point whose coordinates are the sum of a and b (a + b)
+ */
+ friend SkPoint3 operator+(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 v;
+ v.set(a.fX + b.fX, a.fY + b.fY, a.fZ + b.fZ);
+ return v;
+ }
+
+ /** Add v's coordinates to the point's
+ */
+ void operator+=(const SkPoint3& v) {
+ fX += v.fX;
+ fY += v.fY;
+ fZ += v.fZ;
+ }
+
+ /** Subtract v's coordinates from the point's
+ */
+ void operator-=(const SkPoint3& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ fZ -= v.fZ;
+ }
+
+ /** Returns true if fX, fY, and fZ are measurable values.
+
+ @return true for values other than infinities and NaN
+ */
+ bool isFinite() const {
+ SkScalar accum = 0;
+ accum *= fX;
+ accum *= fY;
+ accum *= fZ;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns the dot product of a and b, treating them as 3D vectors
+ */
+ static SkScalar DotProduct(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ;
+ }
+
+ SkScalar dot(const SkPoint3& vec) const {
+ return DotProduct(*this, vec);
+ }
+
+ /** Returns the cross product of a and b, treating them as 3D vectors
+ */
+ static SkPoint3 CrossProduct(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 result;
+ result.fX = a.fY*b.fZ - a.fZ*b.fY;
+ result.fY = a.fZ*b.fX - a.fX*b.fZ;
+ result.fZ = a.fX*b.fY - a.fY*b.fX;
+
+ return result;
+ }
+
+ SkPoint3 cross(const SkPoint3& vec) const {
+ return CrossProduct(*this, vec);
+ }
+};
+
+typedef SkPoint3 SkVector3;
+typedef SkPoint3 SkColor3f;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPostConfig.h b/gfx/skia/skia/include/core/SkPostConfig.h
new file mode 100644
index 0000000000..d314aea758
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPostConfig.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// IWYU pragma: private, include "SkTypes.h"
+
+#ifndef SkPostConfig_DEFINED
+#define SkPostConfig_DEFINED
+
+#if !defined(SK_DEBUG) && !defined(SK_RELEASE)
+ #ifdef NDEBUG
+ #define SK_RELEASE
+ #else
+ #define SK_DEBUG
+ #endif
+#endif
+
+#if defined(SK_DEBUG) && defined(SK_RELEASE)
+# error "cannot define both SK_DEBUG and SK_RELEASE"
+#elif !defined(SK_DEBUG) && !defined(SK_RELEASE)
+# error "must define either SK_DEBUG or SK_RELEASE"
+#endif
+
+/**
+ * Matrix calculations may be float or double.
+ * The default is float, as that's what Chromium's using.
+ */
+#if defined(SK_MSCALAR_IS_DOUBLE) && defined(SK_MSCALAR_IS_FLOAT)
+# error "cannot define both SK_MSCALAR_IS_DOUBLE and SK_MSCALAR_IS_FLOAT"
+#elif !defined(SK_MSCALAR_IS_DOUBLE) && !defined(SK_MSCALAR_IS_FLOAT)
+# define SK_MSCALAR_IS_FLOAT
+#endif
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+# error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN"
+#elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+# error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN"
+#endif
+
+#if defined(SK_CPU_BENDIAN) && !defined(I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN)
+ #error "The Skia team is not endian-savvy enough to support big-endian CPUs."
+ #error "If you still want to use Skia,"
+ #error "please define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN."
+#endif
+
+#if !defined(SK_HAS_COMPILER_FEATURE)
+# if defined(__has_feature)
+# define SK_HAS_COMPILER_FEATURE(x) __has_feature(x)
+# else
+# define SK_HAS_COMPILER_FEATURE(x) 0
+# endif
+#endif
+
+#if !defined(SK_ATTRIBUTE)
+# if defined(__clang__) || defined(__GNUC__)
+# define SK_ATTRIBUTE(attr) __attribute__((attr))
+# else
+# define SK_ATTRIBUTE(attr)
+# endif
+#endif
+
+#if !defined(SK_SUPPORT_GPU)
+# define SK_SUPPORT_GPU 1
+#endif
+
+/**
+ * If GPU is enabled but no GPU backends are enabled then enable GL by default.
+ * Traditionally clients have relied on Skia always building with the GL backend
+ * and opting in to additional backends. TODO: Require explicit opt in for GL.
+ */
+#if SK_SUPPORT_GPU
+# if !defined(SK_GL) && !defined(SK_VULKAN) && !defined(SK_METAL)
+# define SK_GL
+# endif
+#endif
+
+#if !defined(SK_SUPPORT_ATLAS_TEXT)
+# define SK_SUPPORT_ATLAS_TEXT 0
+#elif SK_SUPPORT_ATLAS_TEXT && !SK_SUPPORT_GPU
+# error "SK_SUPPORT_ATLAS_TEXT requires SK_SUPPORT_GPU"
+#endif
+
+/**
+ * The clang static analyzer likes to know that when the program is not
+ * expected to continue (crash, assertion failure, etc). It will notice that
+ * some combination of parameters lead to a function call that does not return.
+ * It can then make appropriate assumptions about the parameters in code
+ * executed only if the non-returning function was *not* called.
+ */
+#if !defined(SkNO_RETURN_HINT)
+# if SK_HAS_COMPILER_FEATURE(attribute_analyzer_noreturn)
+ static inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
+ static inline void SkNO_RETURN_HINT() {}
+# else
+# define SkNO_RETURN_HINT() do {} while (false)
+# endif
+#endif
+
+#if !defined(SkUNREACHABLE)
+# if defined(_MSC_VER) && !defined(__clang__)
+# define SkUNREACHABLE __assume(false)
+# else
+# define SkUNREACHABLE __builtin_unreachable()
+# endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ void SkDebugfForDumpStackTrace(const char* data, void* unused);
+ void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg);
+# define SK_DUMP_GOOGLE3_STACK() DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr)
+#else
+# define SK_DUMP_GOOGLE3_STACK()
+#endif
+
+#ifdef SK_BUILD_FOR_WIN
+// permits visual studio to follow error back to source
+#define SK_DUMP_LINE_FORMAT(message) \
+ SkDebugf("%s(%d): fatal error: \"%s\"\n", __FILE__, __LINE__, message)
+#else
+#define SK_DUMP_LINE_FORMAT(message) \
+ SkDebugf("%s:%d: fatal error: \"%s\"\n", __FILE__, __LINE__, message)
+#endif
+
+#ifndef SK_ABORT
+# define SK_ABORT(message) \
+ do { if (sk_abort_is_enabled()) { \
+ SkNO_RETURN_HINT(); \
+ SK_DUMP_LINE_FORMAT(message); \
+ SK_DUMP_GOOGLE3_STACK(); \
+ sk_abort_no_print(); \
+ SkUNREACHABLE; \
+ } } while (false)
+#endif
+
+// If SK_R32_SHIFT is set, we'll use that to choose RGBA or BGRA.
+// If not, we'll default to RGBA everywhere except BGRA on Windows.
+#if defined(SK_R32_SHIFT)
+ static_assert(SK_R32_SHIFT == 0 || SK_R32_SHIFT == 16, "");
+#elif defined(SK_BUILD_FOR_WIN)
+ #define SK_R32_SHIFT 16
+#else
+ #define SK_R32_SHIFT 0
+#endif
+
+#if defined(SK_B32_SHIFT)
+ static_assert(SK_B32_SHIFT == (16-SK_R32_SHIFT), "");
+#else
+ #define SK_B32_SHIFT (16-SK_R32_SHIFT)
+#endif
+
+#define SK_G32_SHIFT 8
+#define SK_A32_SHIFT 24
+
+/**
+ * SkColor has well defined shift values, but SkPMColor is configurable. This
+ * macro is a convenience that returns true if the shift values are equal while
+ * ignoring the machine's endianness.
+ */
+#define SK_COLOR_MATCHES_PMCOLOR_BYTE_ORDER \
+ (SK_A32_SHIFT == 24 && SK_R32_SHIFT == 16 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 0)
+
+/**
+ * SK_PMCOLOR_BYTE_ORDER can be used to query the byte order of SkPMColor at compile time. The
+ * relationship between the byte order and shift values depends on machine endianness. If the shift
+ * order is R=0, G=8, B=16, A=24 then ((char*)&pmcolor)[0] will produce the R channel on a little
+ * endian machine and the A channel on a big endian machine. Thus, given those shifts values,
+ * SK_PMCOLOR_BYTE_ORDER(R,G,B,A) will be true on a little endian machine and
+ * SK_PMCOLOR_BYTE_ORDER(A,B,G,R) will be true on a big endian machine.
+ */
+#ifdef SK_CPU_BENDIAN
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C3 ## 32_SHIFT == 0 && \
+ SK_ ## C2 ## 32_SHIFT == 8 && \
+ SK_ ## C1 ## 32_SHIFT == 16 && \
+ SK_ ## C0 ## 32_SHIFT == 24)
+#else
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C0 ## 32_SHIFT == 0 && \
+ SK_ ## C1 ## 32_SHIFT == 8 && \
+ SK_ ## C2 ## 32_SHIFT == 16 && \
+ SK_ ## C3 ## 32_SHIFT == 24)
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined SK_DEBUG && defined SK_BUILD_FOR_WIN
+ #ifdef free
+ #undef free
+ #endif
+ #include <crtdbg.h>
+ #undef free
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_UNUSED)
+# if !defined(__clang__) && defined(_MSC_VER)
+# define SK_UNUSED __pragma(warning(suppress:4189))
+# else
+# define SK_UNUSED SK_ATTRIBUTE(unused)
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_ALWAYS_INLINE to force inlining. E.g.
+ * inline void someMethod() { ... } // may not be inlined
+ * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined
+ */
+#if !defined(SK_ALWAYS_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_ALWAYS_INLINE __forceinline
+# else
+# define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_NEVER_INLINE to prevent inlining.
+ */
+#if !defined(SK_NEVER_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_NEVER_INLINE __declspec(noinline)
+# else
+# define SK_NEVER_INLINE SK_ATTRIBUTE(noinline)
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #define SK_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0)
+ #define SK_WRITE_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0)
+#elif defined(__GNUC__)
+ #define SK_PREFETCH(ptr) __builtin_prefetch(ptr)
+ #define SK_WRITE_PREFETCH(ptr) __builtin_prefetch(ptr, 1)
+#else
+ #define SK_PREFETCH(ptr)
+ #define SK_WRITE_PREFETCH(ptr)
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_PRINTF_LIKE
+# if defined(__clang__) || defined(__GNUC__)
+# define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
+# else
+# define SK_PRINTF_LIKE(A, B)
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_SIZE_T_SPECIFIER
+# if defined(_MSC_VER) && !defined(__clang__)
+# define SK_SIZE_T_SPECIFIER "%Iu"
+# else
+# define SK_SIZE_T_SPECIFIER "%zu"
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_GAMMA_EXPONENT)
+ #define SK_GAMMA_EXPONENT (0.0f) // SRGB
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef GR_TEST_UTILS
+# define GR_TEST_UTILS 0
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if defined(SK_HISTOGRAM_ENUMERATION) && defined(SK_HISTOGRAM_BOOLEAN)
+# define SK_HISTOGRAMS_ENABLED 1
+#else
+# define SK_HISTOGRAMS_ENABLED 0
+#endif
+
+#ifndef SK_HISTOGRAM_BOOLEAN
+# define SK_HISTOGRAM_BOOLEAN(name, value)
+#endif
+
+#ifndef SK_HISTOGRAM_ENUMERATION
+# define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
+#endif
+
+#ifndef SK_DISABLE_LEGACY_SHADERCONTEXT
+#define SK_ENABLE_LEGACY_SHADERCONTEXT
+#endif
+
+#endif // SkPostConfig_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPreConfig.h b/gfx/skia/skia/include/core/SkPreConfig.h
new file mode 100644
index 0000000000..923d9650de
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPreConfig.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// IWYU pragma: private, include "SkTypes.h"
+
+#ifndef SkPreConfig_DEFINED
+#define SkPreConfig_DEFINED
+
+// Allows embedders that want to disable macros that take arguments to just
+// define that symbol to be one of these
+#define SK_NOTHING_ARG1(arg1)
+#define SK_NOTHING_ARG2(arg1, arg2)
+#define SK_NOTHING_ARG3(arg1, arg2, arg3)
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN) && \
+ !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC)
+
+ #ifdef __APPLE__
+ #include "TargetConditionals.h"
+ #endif
+
+ #if defined(_WIN32) || defined(__SYMBIAN32__)
+ #define SK_BUILD_FOR_WIN
+ #elif defined(ANDROID) || defined(__ANDROID__)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__Fuchsia__) || \
+ defined(__GLIBC__) || defined(__GNU__) || defined(__unix__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+ #else
+ #define SK_BUILD_FOR_MAC
+ #endif
+
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if defined(SK_BUILD_FOR_WIN) && !defined(__clang__)
+ #if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict
+ #endif
+ #if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT
+ #endif
+#endif
+
+#if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict__
+#endif
+
+#if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+ #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ #define SK_CPU_BENDIAN
+ #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ #define SK_CPU_LENDIAN
+ #elif defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || \
+ defined(__ppc__) || defined(__hppa) || \
+ defined(__PPC__) || defined(__PPC64__) || \
+ defined(_MIPSEB) || defined(__ARMEB__) || \
+ defined(__s390__) || \
+ (defined(__sh__) && defined(__BIG_ENDIAN__)) || \
+ (defined(__ia64) && defined(__BIG_ENDIAN__))
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+ #endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+ #define SK_CPU_X86 1
+#endif
+
+/**
+ * SK_CPU_SSE_LEVEL
+ *
+ * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level.
+ * On non-intel CPU this should be undefined.
+ */
+
+#define SK_CPU_SSE_LEVEL_SSE1 10
+#define SK_CPU_SSE_LEVEL_SSE2 20
+#define SK_CPU_SSE_LEVEL_SSE3 30
+#define SK_CPU_SSE_LEVEL_SSSE3 31
+#define SK_CPU_SSE_LEVEL_SSE41 41
+#define SK_CPU_SSE_LEVEL_SSE42 42
+#define SK_CPU_SSE_LEVEL_AVX 51
+#define SK_CPU_SSE_LEVEL_AVX2 52
+#define SK_CPU_SSE_LEVEL_AVX512 60
+
+// When targetting iOS and using gyp to generate the build files, it is not
+// possible to select files to build depending on the architecture (i.e. it
+// is not possible to use hand optimized assembly implementation). In that
+// configuration SK_BUILD_NO_OPTS is defined. Remove optimisation then.
+#ifdef SK_BUILD_NO_OPTS
+ #define SK_CPU_SSE_LEVEL 0
+#endif
+
+// Are we in GCC/Clang?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level.
+ #if defined(__AVX512F__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX512
+ #elif defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(__SSE4_2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42
+ #elif defined(__SSE4_1__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41
+ #elif defined(__SSSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3
+ #elif defined(__SSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3
+ #elif defined(__SSE2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #endif
+#endif
+
+// Are we in VisualStudio?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level. 64-bit intel guarantees at least SSE2 support.
+ #if defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(_M_X64) || defined(_M_AMD64)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif defined(_M_IX86_FP)
+ #if _M_IX86_FP >= 2
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif _M_IX86_FP == 1
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1
+ #endif
+ #endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+// ARM defines
+
+#if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR)
+ #define SK_CPU_ARM32
+#elif defined(__aarch64__) && !defined(SK_BUILD_NO_OPTS)
+ #define SK_CPU_ARM64
+#endif
+
+// All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too.
+#if !defined(SK_ARM_HAS_NEON) && !defined(SK_BUILD_NO_OPTS) && defined(__ARM_NEON)
+ #define SK_ARM_HAS_NEON
+#endif
+
+// Really this __APPLE__ check shouldn't be necessary, but it seems that Apple's Clang defines
+// __ARM_FEATURE_CRC32 for -arch arm64, even though their chips don't support those instructions!
+#if defined(__ARM_FEATURE_CRC32) && !defined(__APPLE__)
+ #define SK_ARM_HAS_CRC32
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SKIA_IMPLEMENTATION)
+ #define SKIA_IMPLEMENTATION 0
+#endif
+
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPromiseImageTexture.h b/gfx/skia/skia/include/core/SkPromiseImageTexture.h
new file mode 100644
index 0000000000..467e501bee
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPromiseImageTexture.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPromiseImageTexture_DEFINED
+#define SkPromiseImageTexture_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/private/GrResourceKey.h"
+
+#if SK_SUPPORT_GPU
+/**
+ * This type is used to fulfill textures for PromiseImages. Once an instance is returned from a
+ * PromiseImageTextureFulfillProc it must remain valid until the corresponding
+ * PromiseImageTextureReleaseProc is called. For performance reasons it is recommended that the
+ * the client reuse a single PromiseImageTexture every time a given texture is returned by
+ * the PromiseImageTextureFulfillProc rather than recreating PromiseImageTextures representing
+ * the same underlying backend API texture.
+ */
+class SK_API SkPromiseImageTexture : public SkNVRefCnt<SkPromiseImageTexture> {
+public:
+ SkPromiseImageTexture() = delete;
+ SkPromiseImageTexture(const SkPromiseImageTexture&) = delete;
+ SkPromiseImageTexture(SkPromiseImageTexture&&) = delete;
+ ~SkPromiseImageTexture();
+ SkPromiseImageTexture& operator=(const SkPromiseImageTexture&) = delete;
+ SkPromiseImageTexture& operator=(SkPromiseImageTexture&&) = delete;
+
+ static sk_sp<SkPromiseImageTexture> Make(const GrBackendTexture& backendTexture) {
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+ return sk_sp<SkPromiseImageTexture>(new SkPromiseImageTexture(backendTexture));
+ }
+
+ const GrBackendTexture& backendTexture() const { return fBackendTexture; }
+
+ void addKeyToInvalidate(uint32_t contextID, const GrUniqueKey& key);
+ uint32_t uniqueID() const { return fUniqueID; }
+
+#if GR_TEST_UTILS
+ SkTArray<GrUniqueKey> testingOnly_uniqueKeysToInvalidate() const;
+#endif
+
+private:
+ explicit SkPromiseImageTexture(const GrBackendTexture& backendTexture);
+
+ SkSTArray<1, GrUniqueKeyInvalidatedMessage> fMessages;
+ GrBackendTexture fBackendTexture;
+ uint32_t fUniqueID = SK_InvalidUniqueID;
+ static std::atomic<uint32_t> gUniqueID;
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRRect.h b/gfx/skia/skia/include/core/SkRRect.h
new file mode 100644
index 0000000000..7bc838784a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRRect.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRect_DEFINED
+#define SkRRect_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkPath;
+class SkMatrix;
+
+/** \class SkRRect
+ SkRRect describes a rounded rectangle with a bounds and a pair of radii for each corner.
+ The bounds and radii can be set so that SkRRect describes: a rectangle with sharp corners;
+ a circle; an oval; or a rectangle with one or more rounded corners.
+
+ SkRRect allows implementing CSS properties that describe rounded corners.
+ SkRRect may have up to eight different radii, one for each axis on each of its four
+ corners.
+
+ SkRRect may modify the provided parameters when initializing bounds and radii.
+ If either axis radii is zero or less: radii are stored as zero; corner is square.
+ If corner curves overlap, radii are proportionally reduced to fit within bounds.
+*/
+class SK_API SkRRect {
+public:
+
+ /** Initializes bounds at (0, 0), the origin, with zero width and height.
+ Initializes corner radii to (0, 0), and sets type of kEmpty_Type.
+
+ @return empty SkRRect
+ */
+ SkRRect() = default;
+
+ /** Initializes to copy of rrect bounds and corner radii.
+
+ @param rrect bounds and corner to copy
+ @return copy of rrect
+ */
+ SkRRect(const SkRRect& rrect) = default;
+
+ /** Copies rrect bounds and corner radii.
+
+ @param rrect bounds and corner to copy
+ @return copy of rrect
+ */
+ SkRRect& operator=(const SkRRect& rrect) = default;
+
+ /** \enum SkRRect::Type
+ Type describes possible specializations of SkRRect. Each Type is
+ exclusive; a SkRRect may only have one type.
+
+ Type members become progressively less restrictive; larger values of
+ Type have more degrees of freedom than smaller values.
+ */
+ enum Type {
+ kEmpty_Type, //!< zero width or height
+ kRect_Type, //!< non-zero width and height, and zeroed radii
+ kOval_Type, //!< non-zero width and height filled with radii
+ kSimple_Type, //!< non-zero width and height with equal radii
+ kNinePatch_Type, //!< non-zero width and height with axis-aligned radii
+ kComplex_Type, //!< non-zero width and height with arbitrary radii
+ kLastType = kComplex_Type, //!< largest Type value
+ };
+
+ /** Returns SkRRect::Type, one of:
+ kEmpty_Type, kRect_Type, kOval_Type, kSimple_Type, kNinePatch_Type,
+ kComplex_Type.
+
+ @return SkRRect::Type
+ */
+ Type getType() const {
+ SkASSERT(this->isValid());
+ return static_cast<Type>(fType);
+ }
+
+ /** Returns SkRRect::Type, one of:
+ kEmpty_Type, kRect_Type, kOval_Type, kSimple_Type, kNinePatch_Type,
+ kComplex_Type.
+
+ @return SkRRect::Type
+ */
+ Type type() const { return this->getType(); }
+
+ inline bool isEmpty() const { return kEmpty_Type == this->getType(); }
+ inline bool isRect() const { return kRect_Type == this->getType(); }
+ inline bool isOval() const { return kOval_Type == this->getType(); }
+ inline bool isSimple() const { return kSimple_Type == this->getType(); }
+ inline bool isNinePatch() const { return kNinePatch_Type == this->getType(); }
+ inline bool isComplex() const { return kComplex_Type == this->getType(); }
+
+ /** Returns span on the x-axis. This does not check if result fits in 32-bit float;
+ result may be infinity.
+
+ @return rect().fRight minus rect().fLeft
+ */
+ SkScalar width() const { return fRect.width(); }
+
+ /** Returns span on the y-axis. This does not check if result fits in 32-bit float;
+ result may be infinity.
+
+ @return rect().fBottom minus rect().fTop
+ */
+ SkScalar height() const { return fRect.height(); }
+
+ /** Returns top-left corner radii. If type() returns kEmpty_Type, kRect_Type,
+ kOval_Type, or kSimple_Type, returns a value representative of all corner radii.
+ If type() returns kNinePatch_Type or kComplex_Type, at least one of the
+ remaining three corners has a different value.
+
+ @return corner radii for simple types
+ */
+ SkVector getSimpleRadii() const {
+ return fRadii[0];
+ }
+
+ /** Sets bounds to zero width and height at (0, 0), the origin. Sets
+ corner radii to zero and sets type to kEmpty_Type.
+ */
+ void setEmpty() { *this = SkRRect(); }
+
+ /** Sets bounds to sorted rect, and sets corner radii to zero.
+ If set bounds has width and height, and sets type to kRect_Type;
+ otherwise, sets type to kEmpty_Type.
+
+ @param rect bounds to set
+ */
+ void setRect(const SkRect& rect) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kRect_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ /** Initializes bounds at (0, 0), the origin, with zero width and height.
+ Initializes corner radii to (0, 0), and sets type of kEmpty_Type.
+
+ @return empty SkRRect
+ */
+ static SkRRect MakeEmpty() { return SkRRect(); }
+
+ /** Initializes to copy of r bounds and zeroes corner radii.
+
+ @param r bounds to copy
+ @return copy of r
+ */
+ static SkRRect MakeRect(const SkRect& r) {
+ SkRRect rr;
+ rr.setRect(r);
+ return rr;
+ }
+
+ /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii
+ to half oval.height(). If oval bounds is empty, sets to kEmpty_Type.
+ Otherwise, sets to kOval_Type.
+
+ @param oval bounds of oval
+ @return oval
+ */
+ static SkRRect MakeOval(const SkRect& oval) {
+ SkRRect rr;
+ rr.setOval(oval);
+ return rr;
+ }
+
+ /** Sets to rounded rectangle with the same radii for all four corners.
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if xRad and yRad are zero, sets to kRect_Type.
+ Otherwise, if xRad is at least half rect.width() and yRad is at least half
+ rect.height(), sets to kOval_Type.
+ Otherwise, sets to kSimple_Type.
+
+ @param rect bounds of rounded rectangle
+ @param xRad x-axis radius of corners
+ @param yRad y-axis radius of corners
+ @return rounded rectangle
+ */
+ static SkRRect MakeRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ SkRRect rr;
+ rr.setRectXY(rect, xRad, yRad);
+ return rr;
+ }
+
+ /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii
+ to half oval.height(). If oval bounds is empty, sets to kEmpty_Type.
+ Otherwise, sets to kOval_Type.
+
+ @param oval bounds of oval
+ */
+ void setOval(const SkRect& oval) {
+ if (!this->initializeRect(oval)) {
+ return;
+ }
+
+ SkScalar xRad = SkScalarHalf(fRect.width());
+ SkScalar yRad = SkScalarHalf(fRect.height());
+
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kOval_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ /** Sets to rounded rectangle with the same radii for all four corners.
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if xRad or yRad is zero, sets to kRect_Type.
+ Otherwise, if xRad is at least half rect.width() and yRad is at least half
+ rect.height(), sets to kOval_Type.
+ Otherwise, sets to kSimple_Type.
+
+ @param rect bounds of rounded rectangle
+ @param xRad x-axis radius of corners
+ @param yRad y-axis radius of corners
+ */
+ void setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad);
+
+ /** Sets bounds to rect. Sets radii to (leftRad, topRad), (rightRad, topRad),
+ (rightRad, bottomRad), (leftRad, bottomRad).
+
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if leftRad and rightRad are zero, sets to kRect_Type.
+ Otherwise, if topRad and bottomRad are zero, sets to kRect_Type.
+ Otherwise, if leftRad and rightRad are equal and at least half rect.width(), and
+ topRad and bottomRad are equal at least half rect.height(), sets to kOval_Type.
+ Otherwise, if leftRad and rightRad are equal, and topRad and bottomRad are equal,
+ sets to kSimple_Type. Otherwise, sets to kNinePatch_Type.
+
+ Nine patch refers to the nine parts defined by the radii: one center rectangle,
+ four edge patches, and four corner patches.
+
+ @param rect bounds of rounded rectangle
+ @param leftRad left-top and left-bottom x-axis radius
+ @param topRad left-top and right-top y-axis radius
+ @param rightRad right-top and right-bottom x-axis radius
+ @param bottomRad left-bottom and right-bottom y-axis radius
+ */
+ void setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad);
+
+ /** Sets bounds to rect. Sets radii array for individual control of all for corners.
+
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if one of each corner radii are zero, sets to kRect_Type.
+ Otherwise, if all x-axis radii are equal and at least half rect.width(), and
+ all y-axis radii are equal at least half rect.height(), sets to kOval_Type.
+ Otherwise, if all x-axis radii are equal, and all y-axis radii are equal,
+ sets to kSimple_Type. Otherwise, sets to kNinePatch_Type.
+
+ @param rect bounds of rounded rectangle
+ @param radii corner x-axis and y-axis radii
+ */
+ void setRectRadii(const SkRect& rect, const SkVector radii[4]);
+
+ /** \enum SkRRect::Corner
+ The radii are stored: top-left, top-right, bottom-right, bottom-left.
+ */
+ enum Corner {
+ kUpperLeft_Corner, //!< index of top-left corner radii
+ kUpperRight_Corner, //!< index of top-right corner radii
+ kLowerRight_Corner, //!< index of bottom-right corner radii
+ kLowerLeft_Corner, //!< index of bottom-left corner radii
+ };
+
+ /** Returns bounds. Bounds may have zero width or zero height. Bounds right is
+ greater than or equal to left; bounds bottom is greater than or equal to top.
+ Result is identical to getBounds().
+
+ @return bounding box
+ */
+ const SkRect& rect() const { return fRect; }
+
+ /** Returns scalar pair for radius of curve on x-axis and y-axis for one corner.
+ Both radii may be zero. If not zero, both are positive and finite.
+
+ @param corner one of: kUpperLeft_Corner, kUpperRight_Corner,
+ kLowerRight_Corner, kLowerLeft_Corner
+ @return x-axis and y-axis radii for one corner
+ */
+ SkVector radii(Corner corner) const { return fRadii[corner]; }
+
+ /** Returns bounds. Bounds may have zero width or zero height. Bounds right is
+ greater than or equal to left; bounds bottom is greater than or equal to top.
+ Result is identical to rect().
+
+ @return bounding box
+ */
+ const SkRect& getBounds() const { return fRect; }
+
+ /** Returns true if bounds and radii in a are equal to bounds and radii in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect bounds and radii to compare
+ @param b SkRect bounds and radii to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkRRect& a, const SkRRect& b) {
+ return a.fRect == b.fRect && SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8);
+ }
+
+ /** Returns true if bounds and radii in a are not equal to bounds and radii in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect bounds and radii to compare
+ @param b SkRect bounds and radii to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkRRect& a, const SkRRect& b) {
+ return a.fRect != b.fRect || !SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8);
+ }
+
+ /** Copies SkRRect to dst, then insets dst bounds by dx and dy, and adjusts dst
+ radii by dx and dy. dx and dy may be positive, negative, or zero. dst may be
+ SkRRect.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half dst bounds width, dst bounds left and right are set to
+ bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, dst bounds is zeroed.
+
+ @param dx added to rect().fLeft, and subtracted from rect().fRight
+ @param dy added to rect().fTop, and subtracted from rect().fBottom
+ @param dst insets bounds and radii
+ */
+ void inset(SkScalar dx, SkScalar dy, SkRRect* dst) const;
+
+ /** Insets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half bounds width, bounds left and right are set to
+ bounds x-axis center. If dy exceeds half bounds height, bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, bounds is zeroed.
+
+ @param dx added to rect().fLeft, and subtracted from rect().fRight
+ @param dy added to rect().fTop, and subtracted from rect().fBottom
+ */
+ void inset(SkScalar dx, SkScalar dy) {
+ this->inset(dx, dy, this);
+ }
+
+ /** Outsets dst bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half dst bounds width, dst bounds left and right are set to
+ bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, dst bounds is zeroed.
+
+ @param dx subtracted from rect().fLeft, and added to rect().fRight
+ @param dy subtracted from rect().fTop, and added to rect().fBottom
+ @param dst outset bounds and radii
+ */
+ void outset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ this->inset(-dx, -dy, dst);
+ }
+
+ /** Outsets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half bounds width, bounds left and right are set to
+ bounds x-axis center. If dy exceeds half bounds height, bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, bounds is zeroed.
+
+ @param dx subtracted from rect().fLeft, and added to rect().fRight
+ @param dy subtracted from rect().fTop, and added to rect().fBottom
+ */
+ void outset(SkScalar dx, SkScalar dy) {
+ this->inset(-dx, -dy, this);
+ }
+
+ /** Translates SkRRect by (dx, dy).
+
+ @param dx offset added to rect().fLeft and rect().fRight
+ @param dy offset added to rect().fTop and rect().fBottom
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fRect.offset(dx, dy);
+ }
+
+ /** Returns SkRRect translated by (dx, dy).
+
+ @param dx offset added to rect().fLeft and rect().fRight
+ @param dy offset added to rect().fTop and rect().fBottom
+ @return SkRRect bounds offset by (dx, dy), with unchanged corner radii
+ */
+ SkRRect SK_WARN_UNUSED_RESULT makeOffset(SkScalar dx, SkScalar dy) const {
+ return SkRRect(fRect.makeOffset(dx, dy), fRadii, fType);
+ }
+
+ /** Returns true if rect is inside the bounds and corner radii, and if
+ SkRRect and rect are not empty.
+
+ @param rect area tested for containment
+ @return true if SkRRect contains rect
+ */
+ bool contains(const SkRect& rect) const;
+
+ /** Returns true if bounds and radii values are finite and describe a SkRRect
+ SkRRect::Type that matches getType(). All SkRRect methods construct valid types,
+ even if the input values are not valid. Invalid SkRRect data can only
+ be generated by corrupting memory.
+
+ @return true if bounds and radii match type()
+ */
+ bool isValid() const;
+
+ static constexpr size_t kSizeInMemory = 12 * sizeof(SkScalar);
+
+ /** Writes SkRRect to buffer. Writes kSizeInMemory bytes, and returns
+ kSizeInMemory, the number of bytes written.
+
+ @param buffer storage for SkRRect
+ @return bytes written, kSizeInMemory
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Reads SkRRect from buffer, reading kSizeInMemory bytes.
+ Returns kSizeInMemory, bytes read if length is at least kSizeInMemory.
+ Otherwise, returns zero.
+
+ @param buffer memory to read from
+ @param length size of buffer
+ @return bytes read, or 0 if length is less than kSizeInMemory
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /** Transforms by SkRRect by matrix, storing result in dst.
+ Returns true if SkRRect transformed can be represented by another SkRRect.
+ Returns false if matrix contains transformations that are not axis aligned.
+
+ Asserts in debug builds if SkRRect equals dst.
+
+ @param matrix SkMatrix specifying the transform
+ @param dst SkRRect to store the result
+ @return true if transformation succeeded.
+ */
+ bool transform(const SkMatrix& matrix, SkRRect* dst) const;
+
+ /** Writes text representation of SkRRect to standard output.
+ Set asHex true to generate exact binary representations
+ of floating point numbers.
+
+ @param asHex true if SkScalar values are written as hexadecimal
+ */
+ void dump(bool asHex) const;
+
+ /** Writes text representation of SkRRect to standard output. The representation
+ may be directly compiled as C++ code. Floating point values are written
+ with limited precision; it may not be possible to reconstruct original
+ SkRRect from output.
+ */
+ void dump() const { this->dump(false); }
+
+ /** Writes text representation of SkRRect to standard output. The representation
+ may be directly compiled as C++ code. Floating point values are written
+ in hexadecimal to preserve their exact bit pattern. The output reconstructs the
+ original SkRRect.
+ */
+ void dumpHex() const { this->dump(true); }
+
+private:
+ static bool AreRectAndRadiiValid(const SkRect&, const SkVector[4]);
+
+ SkRRect(const SkRect& rect, const SkVector radii[4], int32_t type)
+ : fRect(rect)
+ , fRadii{radii[0], radii[1], radii[2], radii[3]}
+ , fType(type) {}
+
+ /**
+ * Initializes fRect. If the passed in rect is not finite or empty the rrect will be fully
+ * initialized and false is returned. Otherwise, just fRect is initialized and true is returned.
+ */
+ bool initializeRect(const SkRect&);
+
+ void computeType();
+ bool checkCornerContainment(SkScalar x, SkScalar y) const;
+ void scaleRadii(const SkRect& rect);
+
+ SkRect fRect = SkRect::MakeEmpty();
+ // Radii order is UL, UR, LR, LL. Use Corner enum to index into fRadii[]
+ SkVector fRadii[4] = {{0, 0}, {0, 0}, {0,0}, {0,0}};
+ // use an explicitly sized type so we're sure the class is dense (no uninitialized bytes)
+ int32_t fType = kEmpty_Type;
+ // TODO: add padding so we can use memcpy for flattening and not copy uninitialized data
+
+ // to access fRadii directly
+ friend class SkPath;
+ friend class SkRRectPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRSXform.h b/gfx/skia/skia/include/core/SkRSXform.h
new file mode 100644
index 0000000000..91653311d9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRSXform.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRSXform_DEFINED
+#define SkRSXform_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkSize.h"
+
+/**
+ * A compressed form of a rotation+scale matrix.
+ *
+ * [ fSCos -fSSin fTx ]
+ * [ fSSin fSCos fTy ]
+ * [ 0 0 1 ]
+ */
+struct SkRSXform {
+ static SkRSXform Make(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ SkRSXform xform = { scos, ssin, tx, ty };
+ return xform;
+ }
+
+ /*
+ * Initialize a new xform based on the scale, rotation (in radians), final tx,ty location
+ * and anchor-point ax,ay within the src quad.
+ *
+ * Note: the anchor point is not normalized (e.g. 0...1) but is in pixels of the src image.
+ */
+ static SkRSXform MakeFromRadians(SkScalar scale, SkScalar radians, SkScalar tx, SkScalar ty,
+ SkScalar ax, SkScalar ay) {
+ const SkScalar s = SkScalarSin(radians) * scale;
+ const SkScalar c = SkScalarCos(radians) * scale;
+ return Make(c, s, tx + -c * ax + s * ay, ty + -s * ax - c * ay);
+ }
+
+ SkScalar fSCos;
+ SkScalar fSSin;
+ SkScalar fTx;
+ SkScalar fTy;
+
+ bool rectStaysRect() const {
+ return 0 == fSCos || 0 == fSSin;
+ }
+
+ void setIdentity() {
+ fSCos = 1;
+ fSSin = fTx = fTy = 0;
+ }
+
+ void set(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ fSCos = scos;
+ fSSin = ssin;
+ fTx = tx;
+ fTy = ty;
+ }
+
+ void toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const;
+ void toQuad(const SkSize& size, SkPoint quad[4]) const {
+ this->toQuad(size.width(), size.height(), quad);
+ }
+ void toTriStrip(SkScalar width, SkScalar height, SkPoint strip[4]) const;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkRWBuffer.h b/gfx/skia/skia/include/core/SkRWBuffer.h
new file mode 100644
index 0000000000..c0d24f6c5f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRWBuffer.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRWBuffer_DEFINED
+#define SkRWBuffer_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+struct SkBufferBlock;
+struct SkBufferHead;
+class SkRWBuffer;
+class SkStreamAsset;
+
+/**
+ * Contains a read-only, thread-sharable block of memory. To access the memory, the caller must
+ * instantiate a local iterator, as the memory is stored in 1 or more contiguous blocks.
+ */
+class SK_API SkROBuffer : public SkRefCnt {
+public:
+ /**
+ * Return the logical length of the data owned/shared by this buffer. It may be stored in
+ * multiple contiguous blocks, accessible via the iterator.
+ */
+ size_t size() const { return fAvailable; }
+
+ class SK_API Iter {
+ public:
+ Iter(const SkROBuffer*);
+ Iter(const sk_sp<SkROBuffer>&);
+
+ void reset(const SkROBuffer*);
+
+ /**
+ * Return the current continuous block of memory, or nullptr if the iterator is exhausted
+ */
+ const void* data() const;
+
+ /**
+ * Returns the number of bytes in the current continguous block of memory, or 0 if the
+ * iterator is exhausted.
+ */
+ size_t size() const;
+
+ /**
+ * Advance to the next contiguous block of memory, returning true if there is another
+ * block, or false if the iterator is exhausted.
+ */
+ bool next();
+
+ private:
+ const SkBufferBlock* fBlock;
+ size_t fRemaining;
+ const SkROBuffer* fBuffer;
+ };
+
+private:
+ SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* fTail);
+ virtual ~SkROBuffer();
+
+ const SkBufferHead* fHead;
+ const size_t fAvailable;
+ const SkBufferBlock* fTail;
+
+ friend class SkRWBuffer;
+};
+
+/**
+ * Accumulates bytes of memory that are "appended" to it, growing internal storage as needed.
+ * The growth is done such that at any time in the writer's thread, an RBuffer or StreamAsset
+ * can be snapped off (and safely passed to another thread). The RBuffer/StreamAsset snapshot
+ * can see the previously stored bytes, but will be unaware of any future writes.
+ */
+class SK_API SkRWBuffer {
+public:
+ SkRWBuffer(size_t initialCapacity = 0);
+ ~SkRWBuffer();
+
+ size_t size() const { return fTotalUsed; }
+
+ /**
+ * Append |length| bytes from |buffer|.
+ *
+ * If the caller knows in advance how much more data they are going to append, they can
+ * pass a |reserve| hint (representing the number of upcoming bytes *in addition* to the
+ * current append), to minimize the number of internal allocations.
+ */
+ void append(const void* buffer, size_t length, size_t reserve = 0);
+
+ sk_sp<SkROBuffer> makeROBufferSnapshot() const {
+ return sk_sp<SkROBuffer>(new SkROBuffer(fHead, fTotalUsed, fTail));
+ }
+
+ std::unique_ptr<SkStreamAsset> makeStreamSnapshot() const;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ SkBufferHead* fHead;
+ SkBufferBlock* fTail;
+ size_t fTotalUsed;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRasterHandleAllocator.h b/gfx/skia/skia/include/core/SkRasterHandleAllocator.h
new file mode 100644
index 0000000000..6f5fe682d6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRasterHandleAllocator.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterHandleAllocator_DEFINED
+#define SkRasterHandleAllocator_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+class SkBitmap;
+class SkCanvas;
+class SkMatrix;
+
+/**
+ * If a client wants to control the allocation of raster layers in a canvas, it should subclass
+ * SkRasterHandleAllocator. This allocator performs two tasks:
+ * 1. controls how the memory for the pixels is allocated
+ * 2. associates a "handle" to a private object that can track the matrix/clip of the SkCanvas
+ *
+ * This example allocates a canvas, and defers to the allocator to create the base layer.
+ *
+ * std::unique_ptr<SkCanvas> canvas = SkRasterHandleAllocator::MakeCanvas(
+ * SkImageInfo::Make(...),
+ * skstd::make_unique<MySubclassRasterHandleAllocator>(...),
+ * nullptr);
+ *
+ * If you have already allocated the base layer (and its handle, release-proc etc.) then you
+ * can pass those in using the last parameter to MakeCanvas().
+ *
+ * Regardless of how the base layer is allocated, each time canvas->saveLayer() is called,
+ * your allocator's allocHandle() will be called.
+ */
+class SK_API SkRasterHandleAllocator {
+public:
+ virtual ~SkRasterHandleAllocator() {}
+
+ // The value that is returned to clients of the canvas that has this allocator installed.
+ typedef void* Handle;
+
+ struct Rec {
+ // When the allocation goes out of scope, this proc is called to free everything associated
+ // with it: the pixels, the "handle", etc. This is passed the pixel address and fReleaseCtx.
+ void (*fReleaseProc)(void* pixels, void* ctx);
+ void* fReleaseCtx; // context passed to fReleaseProc
+ void* fPixels; // pixels for this allocation
+ size_t fRowBytes; // rowbytes for these pixels
+ Handle fHandle; // public handle returned by SkCanvas::accessTopRasterHandle()
+ };
+
+ /**
+ * Given a requested info, allocate the corresponding pixels/rowbytes, and whatever handle
+ * is desired to give clients access to those pixels. The rec also contains a proc and context
+ * which will be called when this allocation goes out of scope.
+ *
+ * e.g.
+ * when canvas->saveLayer() is called, the allocator will be called to allocate the pixels
+ * for the layer. When canvas->restore() is called, the fReleaseProc will be called.
+ */
+ virtual bool allocHandle(const SkImageInfo&, Rec*) = 0;
+
+ /**
+ * Clients access the handle for a given layer by calling SkCanvas::accessTopRasterHandle().
+ * To allow the handle to reflect the current matrix/clip in the canvs, updateHandle() is
+ * is called. The subclass is responsible to update the handle as it sees fit.
+ */
+ virtual void updateHandle(Handle, const SkMatrix&, const SkIRect&) = 0;
+
+ /**
+ * This creates a canvas which will use the allocator to manage pixel allocations, including
+ * all calls to saveLayer().
+ *
+ * If rec is non-null, then it will be used as the base-layer of pixels/handle.
+ * If rec is null, then the allocator will be called for the base-layer as well.
+ */
+ static std::unique_ptr<SkCanvas> MakeCanvas(std::unique_ptr<SkRasterHandleAllocator>,
+ const SkImageInfo&, const Rec* rec = nullptr);
+
+private:
+ friend class SkBitmapDevice;
+
+ Handle allocBitmap(const SkImageInfo&, SkBitmap*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRect.h b/gfx/skia/skia/include/core/SkRect.h
new file mode 100644
index 0000000000..eff1c0a69f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRect.h
@@ -0,0 +1,1335 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRect_DEFINED
+#define SkRect_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkSafe32.h"
+#include "include/private/SkTFitsIn.h"
+
+#include <utility>
+
+struct SkRect;
+
+/** \struct SkIRect
+ SkIRect holds four 32-bit integer coordinates describing the upper and
+ lower bounds of a rectangle. SkIRect may be created from outer bounds or
+ from position, width, and height. SkIRect describes an area; if its right
+ is less than or equal to its left, or if its bottom is less than or equal to
+ its top, it is considered empty.
+*/
+struct SK_API SkIRect {
+ int32_t fLeft; //!< smaller x-axis bounds
+ int32_t fTop; //!< smaller y-axis bounds
+ int32_t fRight; //!< larger x-axis bounds
+ int32_t fBottom; //!< larger y-axis bounds
+
+ /** Returns constructed SkIRect set to (0, 0, 0, 0).
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+
+ @return bounds (0, 0, 0, 0)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ return SkIRect{0, 0, 0, 0};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, w, h). Does not validate input; w or h
+ may be negative.
+
+ @param w width of constructed SkIRect
+ @param h height of constructed SkIRect
+ @return bounds (0, 0, w, h)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeWH(int32_t w, int32_t h) {
+ return SkIRect{0, 0, w, h};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()).
+ Does not validate input; size.width() or size.height() may be negative.
+
+ @param size values for SkIRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeSize(const SkISize& size) {
+ return SkIRect{0, 0, size.fWidth, size.fHeight};
+ }
+
+ /** Returns constructed SkIRect set to (l, t, r, b). Does not sort input; SkIRect may
+ result in fLeft greater than fRight, or fTop greater than fBottom.
+
+ @param l integer stored in fLeft
+ @param t integer stored in fTop
+ @param r integer stored in fRight
+ @param b integer stored in fBottom
+ @return bounds (l, t, r, b)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeLTRB(int32_t l, int32_t t,
+ int32_t r, int32_t b) {
+ return SkIRect{l, t, r, b};
+ }
+
+ /** Returns constructed SkIRect set to: (x, y, x + w, y + h).
+ Does not validate input; w or h may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param w added to x and stored in fRight
+ @param h added to y and stored in fBottom
+ @return bounds at (x, y) with width w and height h
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeXYWH(int32_t x, int32_t y,
+ int32_t w, int32_t h) {
+ return { x, y, Sk32_sat_add(x, w), Sk32_sat_add(y, h) };
+ }
+
+ /** Returns left edge of SkIRect, if sorted.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ int32_t left() const { return fLeft; }
+
+ /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ int32_t top() const { return fTop; }
+
+ /** Returns right edge of SkIRect, if sorted.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fRight
+ */
+ int32_t right() const { return fRight; }
+
+ /** Returns bottom edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fBottom
+ */
+ int32_t bottom() const { return fBottom; }
+
+ /** Returns left edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ int32_t x() const { return fLeft; }
+
+ /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ int32_t y() const { return fTop; }
+
+ // Experimental
+ SkIPoint topLeft() const { return {fLeft, fTop}; }
+
+ /** Returns span on the x-axis. This does not check if SkIRect is sorted, or if
+ result fits in 32-bit signed integer; result may be negative.
+
+ @return fRight minus fLeft
+ */
+ int32_t width() const { return Sk32_can_overflow_sub(fRight, fLeft); }
+
+ /** Returns span on the y-axis. This does not check if SkIRect is sorted, or if
+ result fits in 32-bit signed integer; result may be negative.
+
+ @return fBottom minus fTop
+ */
+ int32_t height() const { return Sk32_can_overflow_sub(fBottom, fTop); }
+
+ /** Returns spans on the x-axis and y-axis. This does not check if SkIRect is sorted,
+ or if result fits in 32-bit signed integer; result may be negative.
+
+ @return SkISize (width, height)
+ */
+ SkISize size() const { return SkISize::Make(this->width(), this->height()); }
+
+ /** Returns span on the x-axis. This does not check if SkIRect is sorted, so the
+ result may be negative. This is safer than calling width() since width() might
+ overflow in its calculation.
+
+ @return fRight minus fLeft cast to int64_t
+ */
+ int64_t width64() const { return (int64_t)fRight - (int64_t)fLeft; }
+
+ /** Returns span on the y-axis. This does not check if SkIRect is sorted, so the
+ result may be negative. This is safer than calling height() since height() might
+ overflow in its calculation.
+
+ @return fBottom minus fTop cast to int64_t
+ */
+ int64_t height64() const { return (int64_t)fBottom - (int64_t)fTop; }
+
+ /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
+ to or greater than fBottom. Call sort() to reverse rectangles with negative
+ width64() or height64().
+
+ @return true if width64() or height64() are zero or negative
+ */
+ bool isEmpty64() const { return fRight <= fLeft || fBottom <= fTop; }
+
+ /** Returns true if width() or height() are zero or negative.
+
+ @return true if width() or height() are zero or negative
+ */
+ bool isEmpty() const {
+ int64_t w = this->width64();
+ int64_t h = this->height64();
+ if (w <= 0 || h <= 0) {
+ return true;
+ }
+ // Return true if either exceeds int32_t
+ return !SkTFitsIn<int32_t>(w | h);
+ }
+
+ /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are
+ identical to corresponding members in b.
+
+ @param a SkIRect to compare
+ @param b SkIRect to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkIRect& a, const SkIRect& b) {
+ return !memcmp(&a, &b, sizeof(a));
+ }
+
+ /** Returns true if any member in a: fLeft, fTop, fRight, and fBottom; is not
+ identical to the corresponding member in b.
+
+ @param a SkIRect to compare
+ @param b SkIRect to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkIRect& a, const SkIRect& b) {
+ return !(a == b);
+ }
+
+ /** Sets SkIRect to (0, 0, 0, 0).
+
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+ */
+ void setEmpty() { memset(this, 0, sizeof(*this)); }
+
+ /** Sets SkIRect to (left, top, right, bottom).
+ left and right are not sorted; left is not necessarily less than right.
+ top and bottom are not sorted; top is not necessarily less than bottom.
+
+ @param left stored in fLeft
+ @param top stored in fTop
+ @param right stored in fRight
+ @param bottom stored in fBottom
+ */
+ void setLTRB(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ /** Sets SkIRect to: (x, y, x + width, y + height).
+ Does not validate input; width or height may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param width added to x and stored in fRight
+ @param height added to y and stored in fBottom
+ */
+ void setXYWH(int32_t x, int32_t y, int32_t width, int32_t height) {
+ fLeft = x;
+ fTop = y;
+ fRight = Sk32_sat_add(x, width);
+ fBottom = Sk32_sat_add(y, height);
+ }
+
+ void setWH(int32_t width, int32_t height) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = width;
+ fBottom = height;
+ }
+
+ /** Returns SkIRect offset by (dx, dy).
+
+ If dx is negative, SkIRect returned is moved to the left.
+ If dx is positive, SkIRect returned is moved to the right.
+ If dy is negative, SkIRect returned is moved upward.
+ If dy is positive, SkIRect returned is moved downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ @return SkIRect offset by dx and dy, with original width and height
+ */
+ constexpr SkIRect makeOffset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy),
+ Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy),
+ };
+ }
+
+ /** Returns SkIRect offset by (offset.x(), offset.y()).
+
+ If offset.x() is negative, SkIRect returned is moved to the left.
+ If offset.x() is positive, SkIRect returned is moved to the right.
+ If offset.y() is negative, SkIRect returned is moved upward.
+ If offset.y() is positive, SkIRect returned is moved downward.
+
+ @param offset translation vector
+ @return SkIRect translated by offset, with original width and height
+ */
+ constexpr SkIRect makeOffset(SkIVector offset) const {
+ return this->makeOffset(offset.x(), offset.y());
+ }
+
+ /** Returns SkIRect, inset by (dx, dy).
+
+ If dx is negative, SkIRect returned is wider.
+ If dx is positive, SkIRect returned is narrower.
+ If dy is negative, SkIRect returned is taller.
+ If dy is positive, SkIRect returned is shorter.
+
+ @param dx offset added to fLeft and subtracted from fRight
+ @param dy offset added to fTop and subtracted from fBottom
+ @return SkIRect inset symmetrically left and right, top and bottom
+ */
+ SkIRect makeInset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy),
+ Sk32_sat_sub(fRight, dx), Sk32_sat_sub(fBottom, dy),
+ };
+ }
+
+ /** Returns SkIRect, outset by (dx, dy).
+
+ If dx is negative, SkIRect returned is narrower.
+ If dx is positive, SkIRect returned is wider.
+ If dy is negative, SkIRect returned is shorter.
+ If dy is positive, SkIRect returned is taller.
+
+ @param dx offset subtracted to fLeft and added from fRight
+ @param dy offset subtracted to fTop and added from fBottom
+ @return SkIRect outset symmetrically left and right, top and bottom
+ */
+ SkIRect makeOutset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_sub(fLeft, dx), Sk32_sat_sub(fTop, dy),
+ Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy),
+ };
+ }
+
+ /** Offsets SkIRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
+
+ If dx is negative, moves SkIRect returned to the left.
+ If dx is positive, moves SkIRect returned to the right.
+ If dy is negative, moves SkIRect returned upward.
+ If dy is positive, moves SkIRect returned downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ */
+ void offset(int32_t dx, int32_t dy) {
+ fLeft = Sk32_sat_add(fLeft, dx);
+ fTop = Sk32_sat_add(fTop, dy);
+ fRight = Sk32_sat_add(fRight, dx);
+ fBottom = Sk32_sat_add(fBottom, dy);
+ }
+
+ /** Offsets SkIRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to
+ fTop, fBottom.
+
+ If delta.fX is negative, moves SkIRect returned to the left.
+ If delta.fX is positive, moves SkIRect returned to the right.
+ If delta.fY is negative, moves SkIRect returned upward.
+ If delta.fY is positive, moves SkIRect returned downward.
+
+ @param delta offset added to SkIRect
+ */
+ void offset(const SkIPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /** Offsets SkIRect so that fLeft equals newX, and fTop equals newY. width and height
+ are unchanged.
+
+ @param newX stored in fLeft, preserving width()
+ @param newY stored in fTop, preserving height()
+ */
+ void offsetTo(int32_t newX, int32_t newY) {
+ fRight = Sk64_pin_to_s32((int64_t)fRight + newX - fLeft);
+ fBottom = Sk64_pin_to_s32((int64_t)fBottom + newY - fTop);
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Insets SkIRect by (dx,dy).
+
+ If dx is positive, makes SkIRect narrower.
+ If dx is negative, makes SkIRect wider.
+ If dy is positive, makes SkIRect shorter.
+ If dy is negative, makes SkIRect taller.
+
+ @param dx offset added to fLeft and subtracted from fRight
+ @param dy offset added to fTop and subtracted from fBottom
+ */
+ void inset(int32_t dx, int32_t dy) {
+ fLeft = Sk32_sat_add(fLeft, dx);
+ fTop = Sk32_sat_add(fTop, dy);
+ fRight = Sk32_sat_sub(fRight, dx);
+ fBottom = Sk32_sat_sub(fBottom, dy);
+ }
+
+ /** Outsets SkIRect by (dx, dy).
+
+ If dx is positive, makes SkIRect wider.
+ If dx is negative, makes SkIRect narrower.
+ If dy is positive, makes SkIRect taller.
+ If dy is negative, makes SkIRect shorter.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ */
+ void outset(int32_t dx, int32_t dy) { this->inset(-dx, -dy); }
+
+ /** Adjusts SkIRect by adding dL to fLeft, dT to fTop, dR to fRight, and dB to fBottom.
+
+ If dL is positive, narrows SkIRect on the left. If negative, widens it on the left.
+ If dT is positive, shrinks SkIRect on the top. If negative, lengthens it on the top.
+ If dR is positive, narrows SkIRect on the right. If negative, widens it on the right.
+ If dB is positive, shrinks SkIRect on the bottom. If negative, lengthens it on the bottom.
+
+ The resulting SkIRect is not checked for validity. Thus, if the resulting SkIRect left is
+ greater than right, the SkIRect will be considered empty. Call sort() after this call
+ if that is not the desired behavior.
+
+ @param dL offset added to fLeft
+ @param dT offset added to fTop
+ @param dR offset added to fRight
+ @param dB offset added to fBottom
+ */
+ void adjust(int32_t dL, int32_t dT, int32_t dR, int32_t dB) {
+ fLeft = Sk32_sat_add(fLeft, dL);
+ fTop = Sk32_sat_add(fTop, dT);
+ fRight = Sk32_sat_add(fRight, dR);
+ fBottom = Sk32_sat_add(fBottom, dB);
+ }
+
+ /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
+ Returns false if SkIRect is empty.
+
+ Considers input to describe constructed SkIRect: (x, y, x + 1, y + 1) and
+ returns true if constructed area is completely enclosed by SkIRect area.
+
+ @param x test SkIPoint x-coordinate
+ @param y test SkIPoint y-coordinate
+ @return true if (x, y) is inside SkIRect
+ */
+ bool contains(int32_t x, int32_t y) const {
+ return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
+ }
+
+ /** Returns true if SkIRect contains r.
+ Returns false if SkIRect is empty or r is empty.
+
+ SkIRect contains r when SkIRect area completely includes r area.
+
+ @param r SkIRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ bool contains(const SkIRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkIRect contains r.
+ Returns false if SkIRect is empty or r is empty.
+
+ SkIRect contains r when SkIRect area completely includes r area.
+
+ @param r SkRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ inline bool contains(const SkRect& r) const;
+
+ /** Returns true if SkIRect contains construction.
+ Asserts if SkIRect is empty or construction is empty, and if SK_DEBUG is defined.
+
+ Return is undefined if SkIRect is empty or construction is empty.
+
+ @param r SkIRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ bool containsNoEmptyCheck(const SkIRect& r) const {
+ SkASSERT(fLeft < fRight && fTop < fBottom);
+ SkASSERT(r.fLeft < r.fRight && r.fTop < r.fBottom);
+ return fLeft <= r.fLeft && fTop <= r.fTop && fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkIRect intersects r, and sets SkIRect to intersection.
+ Returns false if SkIRect does not intersect r, and leaves SkIRect unchanged.
+
+ Returns false if either r or SkIRect is empty, leaving SkIRect unchanged.
+
+ @param r limit of result
+ @return true if r and SkIRect have area in common
+ */
+ bool intersect(const SkIRect& r) {
+ return this->intersect(*this, r);
+ }
+
+ /** Returns true if a intersects b, and sets SkIRect to intersection.
+ Returns false if a does not intersect b, and leaves SkIRect unchanged.
+
+ Returns false if either a or b is empty, leaving SkIRect unchanged.
+
+ @param a SkIRect to intersect
+ @param b SkIRect to intersect
+ @return true if a and b have area in common
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& a, const SkIRect& b);
+
+ /** Returns true if a intersects b.
+ Returns false if either a or b is empty, or do not intersect.
+
+ @param a SkIRect to intersect
+ @param b SkIRect to intersect
+ @return true if a and b have area in common
+ */
+ static bool Intersects(const SkIRect& a, const SkIRect& b) {
+ SkIRect dummy;
+ return dummy.intersect(a, b);
+ }
+
+ /** Sets SkIRect to the union of itself and r.
+
+ Has no effect if r is empty. Otherwise, if SkIRect is empty, sets SkIRect to r.
+
+ @param r expansion SkIRect
+ */
+ void join(const SkIRect& r);
+
+ /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
+ fTop and fBottom if fTop is greater than fBottom. Result may be empty,
+ and width() and height() will be zero or positive.
+ */
+ void sort() {
+ using std::swap;
+ if (fLeft > fRight) {
+ swap(fLeft, fRight);
+ }
+ if (fTop > fBottom) {
+ swap(fTop, fBottom);
+ }
+ }
+
+ /** Returns SkIRect with fLeft and fRight swapped if fLeft is greater than fRight; and
+ with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+
+ @return sorted SkIRect
+ */
+ SkIRect makeSorted() const {
+ return MakeLTRB(SkMin32(fLeft, fRight), SkMin32(fTop, fBottom),
+ SkMax32(fLeft, fRight), SkMax32(fTop, fBottom));
+ }
+
+ /** Returns a reference to immutable empty SkIRect, set to (0, 0, 0, 0).
+
+ @return global SkIRect set to all zeroes
+ */
+ static const SkIRect& SK_WARN_UNUSED_RESULT EmptyIRect() {
+ static const SkIRect gEmpty = { 0, 0, 0, 0 };
+ return gEmpty;
+ }
+};
+
+/** \struct SkRect
+ SkRect holds four SkScalar coordinates describing the upper and
+ lower bounds of a rectangle. SkRect may be created from outer bounds or
+ from position, width, and height. SkRect describes an area; if its right
+ is less than or equal to its left, or if its bottom is less than or equal to
+ its top, it is considered empty.
+*/
+struct SK_API SkRect {
+ SkScalar fLeft; //!< smaller x-axis bounds
+ SkScalar fTop; //!< smaller y-axis bounds
+ SkScalar fRight; //!< larger x-axis bounds
+ SkScalar fBottom; //!< larger y-axis bounds
+
+ /** Returns constructed SkRect set to (0, 0, 0, 0).
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+
+ @return bounds (0, 0, 0, 0)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ return SkRect{0, 0, 0, 0};
+ }
+
+ /** Returns constructed SkRect set to SkScalar values (0, 0, w, h). Does not
+ validate input; w or h may be negative.
+
+ Passing integer values may generate a compiler warning since SkRect cannot
+ represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle.
+
+ @param w SkScalar width of constructed SkRect
+ @param h SkScalar height of constructed SkRect
+ @return bounds (0, 0, w, h)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeWH(SkScalar w, SkScalar h) {
+ return SkRect{0, 0, w, h};
+ }
+
+ /** Returns constructed SkRect set to integer values (0, 0, w, h). Does not validate
+ input; w or h may be negative.
+
+ Use to avoid a compiler warning that input may lose precision when stored.
+ Use SkIRect for an exact integer rectangle.
+
+ @param w integer width of constructed SkRect
+ @param h integer height of constructed SkRect
+ @return bounds (0, 0, w, h)
+ */
+ static SkRect SK_WARN_UNUSED_RESULT MakeIWH(int w, int h) {
+ return {0, 0, SkIntToScalar(w), SkIntToScalar(h)};
+ }
+
+ /** Returns constructed SkRect set to (0, 0, size.width(), size.height()). Does not
+ validate input; size.width() or size.height() may be negative.
+
+ @param size SkScalar values for SkRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeSize(const SkSize& size) {
+ return SkRect{0, 0, size.fWidth, size.fHeight};
+ }
+
+ /** Returns constructed SkRect set to (l, t, r, b). Does not sort input; SkRect may
+ result in fLeft greater than fRight, or fTop greater than fBottom.
+
+ @param l SkScalar stored in fLeft
+ @param t SkScalar stored in fTop
+ @param r SkScalar stored in fRight
+ @param b SkScalar stored in fBottom
+ @return bounds (l, t, r, b)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeLTRB(SkScalar l, SkScalar t, SkScalar r,
+ SkScalar b) {
+ return SkRect {l, t, r, b};
+ }
+
+ /** Returns constructed SkRect set to (x, y, x + w, y + h).
+ Does not validate input; w or h may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param w added to x and stored in fRight
+ @param h added to y and stored in fBottom
+ @return bounds at (x, y) with width w and height h
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeXYWH(SkScalar x, SkScalar y, SkScalar w,
+ SkScalar h) {
+ return SkRect {x, y, x + w, y + h};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()).
+ Does not validate input; size.width() or size.height() may be negative.
+
+ @param size integer values for SkRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static SkRect Make(const SkISize& size) {
+ return MakeIWH(size.width(), size.height());
+ }
+
+ /** Returns constructed SkIRect set to irect, promoting integers to scalar.
+ Does not validate input; fLeft may be greater than fRight, fTop may be greater
+ than fBottom.
+
+ @param irect integer unsorted bounds
+ @return irect members converted to SkScalar
+ */
+ static SkRect SK_WARN_UNUSED_RESULT Make(const SkIRect& irect) {
+ return {
+ SkIntToScalar(irect.fLeft), SkIntToScalar(irect.fTop),
+ SkIntToScalar(irect.fRight), SkIntToScalar(irect.fBottom)
+ };
+ }
+
+ /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
+ to or greater than fBottom. Call sort() to reverse rectangles with negative
+ width() or height().
+
+ @return true if width() or height() are zero or negative
+ */
+ bool isEmpty() const {
+ // We write it as the NOT of a non-empty rect, so we will return true if any values
+ // are NaN.
+ return !(fLeft < fRight && fTop < fBottom);
+ }
+
+ /** Returns true if fLeft is equal to or less than fRight, or if fTop is equal
+ to or less than fBottom. Call sort() to reverse rectangles with negative
+ width() or height().
+
+ @return true if width() or height() are zero or positive
+ */
+ bool isSorted() const { return fLeft <= fRight && fTop <= fBottom; }
+
+ /** Returns true if all values in the rectangle are finite: SK_ScalarMin or larger,
+ and SK_ScalarMax or smaller.
+
+ @return true if no member is infinite or NaN
+ */
+ bool isFinite() const {
+ float accum = 0;
+ accum *= fLeft;
+ accum *= fTop;
+ accum *= fRight;
+ accum *= fBottom;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ SkScalar x() const { return fLeft; }
+
+ /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ SkScalar y() const { return fTop; }
+
+ /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ SkScalar left() const { return fLeft; }
+
+ /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ SkScalar top() const { return fTop; }
+
+ /** Returns right edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fRight
+ */
+ SkScalar right() const { return fRight; }
+
+ /** Returns bottom edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fBottom
+ */
+ SkScalar bottom() const { return fBottom; }
+
+ /** Returns span on the x-axis. This does not check if SkRect is sorted, or if
+ result fits in 32-bit float; result may be negative or infinity.
+
+ @return fRight minus fLeft
+ */
+ SkScalar width() const { return fRight - fLeft; }
+
+ /** Returns span on the y-axis. This does not check if SkRect is sorted, or if
+ result fits in 32-bit float; result may be negative or infinity.
+
+ @return fBottom minus fTop
+ */
+ SkScalar height() const { return fBottom - fTop; }
+
+ /** Returns average of left edge and right edge. Result does not change if SkRect
+ is sorted. Result may overflow to infinity if SkRect is far from the origin.
+
+ @return midpoint on x-axis
+ */
+ SkScalar centerX() const {
+ // don't use SkScalarHalf(fLeft + fBottom) as that might overflow before the 0.5
+ return SkScalarHalf(fLeft) + SkScalarHalf(fRight);
+ }
+
+ /** Returns average of top edge and bottom edge. Result does not change if SkRect
+ is sorted.
+
+ @return midpoint on y-axis
+ */
+ SkScalar centerY() const {
+ // don't use SkScalarHalf(fTop + fBottom) as that might overflow before the 0.5
+ return SkScalarHalf(fTop) + SkScalarHalf(fBottom);
+ }
+
+ /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are
+ equal to the corresponding members in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect to compare
+ @param b SkRect to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkRect& a, const SkRect& b) {
+ return SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4);
+ }
+
+ /** Returns true if any in a: fLeft, fTop, fRight, and fBottom; does not
+ equal the corresponding members in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect to compare
+ @param b SkRect to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkRect& a, const SkRect& b) {
+ return !SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4);
+ }
+
+ /** Returns four points in quad that enclose SkRect ordered as: top-left, top-right,
+ bottom-right, bottom-left.
+
+ TODO: Consider adding parameter to control whether quad is clockwise or counterclockwise.
+
+ @param quad storage for corners of SkRect
+ */
+ void toQuad(SkPoint quad[4]) const;
+
+ /** Sets SkRect to (0, 0, 0, 0).
+
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+ */
+ void setEmpty() { *this = MakeEmpty(); }
+
+ /** Sets SkRect to src, promoting src members from integer to scalar.
+ Very large values in src may lose precision.
+
+ @param src integer SkRect
+ */
+ void set(const SkIRect& src) {
+ fLeft = SkIntToScalar(src.fLeft);
+ fTop = SkIntToScalar(src.fTop);
+ fRight = SkIntToScalar(src.fRight);
+ fBottom = SkIntToScalar(src.fBottom);
+ }
+
+ /** Sets SkRect to (left, top, right, bottom).
+ left and right are not sorted; left is not necessarily less than right.
+ top and bottom are not sorted; top is not necessarily less than bottom.
+
+ @param left stored in fLeft
+ @param top stored in fTop
+ @param right stored in fRight
+ @param bottom stored in fBottom
+ */
+ void setLTRB(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ /** Sets to bounds of SkPoint array with count entries. If count is zero or smaller,
+ or if SkPoint array contains an infinity or NaN, sets to (0, 0, 0, 0).
+
+ Result is either empty or sorted: fLeft is less than or equal to fRight, and
+ fTop is less than or equal to fBottom.
+
+ @param pts SkPoint array
+ @param count entries in array
+ */
+ void setBounds(const SkPoint pts[], int count) {
+ (void)this->setBoundsCheck(pts, count);
+ }
+
+ /** Sets to bounds of SkPoint array with count entries. Returns false if count is
+ zero or smaller, or if SkPoint array contains an infinity or NaN; in these cases
+ sets SkRect to (0, 0, 0, 0).
+
+ Result is either empty or sorted: fLeft is less than or equal to fRight, and
+ fTop is less than or equal to fBottom.
+
+ @param pts SkPoint array
+ @param count entries in array
+ @return true if all SkPoint values are finite
+ */
+ bool setBoundsCheck(const SkPoint pts[], int count);
+
+ /** Sets to bounds of SkPoint pts array with count entries. If any SkPoint in pts
+ contains infinity or NaN, all SkRect dimensions are set to NaN.
+
+ @param pts SkPoint array
+ @param count entries in array
+ */
+ void setBoundsNoCheck(const SkPoint pts[], int count);
+
+ /** Sets bounds to the smallest SkRect enclosing SkPoint p0 and p1. The result is
+ sorted and may be empty. Does not check to see if values are finite.
+
+ @param p0 corner to include
+ @param p1 corner to include
+ */
+ void set(const SkPoint& p0, const SkPoint& p1) {
+ fLeft = SkMinScalar(p0.fX, p1.fX);
+ fRight = SkMaxScalar(p0.fX, p1.fX);
+ fTop = SkMinScalar(p0.fY, p1.fY);
+ fBottom = SkMaxScalar(p0.fY, p1.fY);
+ }
+
+ /** Sets SkRect to (x, y, x + width, y + height).
+ Does not validate input; width or height may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param width added to x and stored in fRight
+ @param height added to y and stored in fBottom
+ */
+ void setXYWH(SkScalar x, SkScalar y, SkScalar width, SkScalar height) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + width;
+ fBottom = y + height;
+ }
+
+ /** Sets SkRect to (0, 0, width, height). Does not validate input;
+ width or height may be negative.
+
+ @param width stored in fRight
+ @param height stored in fBottom
+ */
+ void setWH(SkScalar width, SkScalar height) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = width;
+ fBottom = height;
+ }
+ void setIWH(int32_t width, int32_t height) {
+ this->setWH(SkIntToScalar(width), SkIntToScalar(height));
+ }
+
+ /** Returns SkRect offset by (dx, dy).
+
+ If dx is negative, SkRect returned is moved to the left.
+ If dx is positive, SkRect returned is moved to the right.
+ If dy is negative, SkRect returned is moved upward.
+ If dy is positive, SkRect returned is moved downward.
+
+ @param dx added to fLeft and fRight
+ @param dy added to fTop and fBottom
+ @return SkRect offset on axes, with original width and height
+ */
+ SkRect makeOffset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Returns SkRect, inset by (dx, dy).
+
+ If dx is negative, SkRect returned is wider.
+ If dx is positive, SkRect returned is narrower.
+ If dy is negative, SkRect returned is taller.
+ If dy is positive, SkRect returned is shorter.
+
+ @param dx added to fLeft and subtracted from fRight
+ @param dy added to fTop and subtracted from fBottom
+ @return SkRect inset symmetrically left and right, top and bottom
+ */
+ SkRect makeInset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
+ }
+
+ /** Returns SkRect, outset by (dx, dy).
+
+ If dx is negative, SkRect returned is narrower.
+ If dx is positive, SkRect returned is wider.
+ If dy is negative, SkRect returned is shorter.
+ If dy is positive, SkRect returned is taller.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ @return SkRect outset symmetrically left and right, top and bottom
+ */
+ SkRect makeOutset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Offsets SkRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
+
+ If dx is negative, moves SkRect to the left.
+ If dx is positive, moves SkRect to the right.
+ If dy is negative, moves SkRect upward.
+ If dy is positive, moves SkRect downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight += dx;
+ fBottom += dy;
+ }
+
+ /** Offsets SkRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to
+ fTop, fBottom.
+
+ If delta.fX is negative, moves SkRect to the left.
+ If delta.fX is positive, moves SkRect to the right.
+ If delta.fY is negative, moves SkRect upward.
+ If delta.fY is positive, moves SkRect downward.
+
+ @param delta added to SkRect
+ */
+ void offset(const SkPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /** Offsets SkRect so that fLeft equals newX, and fTop equals newY. width and height
+ are unchanged.
+
+ @param newX stored in fLeft, preserving width()
+ @param newY stored in fTop, preserving height()
+ */
+ void offsetTo(SkScalar newX, SkScalar newY) {
+ fRight += newX - fLeft;
+ fBottom += newY - fTop;
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Insets SkRect by (dx, dy).
+
+ If dx is positive, makes SkRect narrower.
+ If dx is negative, makes SkRect wider.
+ If dy is positive, makes SkRect shorter.
+ If dy is negative, makes SkRect taller.
+
+ @param dx added to fLeft and subtracted from fRight
+ @param dy added to fTop and subtracted from fBottom
+ */
+ void inset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight -= dx;
+ fBottom -= dy;
+ }
+
+ /** Outsets SkRect by (dx, dy).
+
+ If dx is positive, makes SkRect wider.
+ If dx is negative, makes SkRect narrower.
+ If dy is positive, makes SkRect taller.
+ If dy is negative, makes SkRect shorter.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ */
+ void outset(SkScalar dx, SkScalar dy) { this->inset(-dx, -dy); }
+
+ /** Returns true if SkRect intersects r, and sets SkRect to intersection.
+ Returns false if SkRect does not intersect r, and leaves SkRect unchanged.
+
+ Returns false if either r or SkRect is empty, leaving SkRect unchanged.
+
+ @param r limit of result
+ @return true if r and SkRect have area in common
+ */
+ bool intersect(const SkRect& r);
+
+ /** Returns true if a intersects b, and sets SkRect to intersection.
+ Returns false if a does not intersect b, and leaves SkRect unchanged.
+
+ Returns false if either a or b is empty, leaving SkRect unchanged.
+
+ @param a SkRect to intersect
+ @param b SkRect to intersect
+ @return true if a and b have area in common
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkRect& a, const SkRect& b);
+
+
+private:
+ static bool Intersects(SkScalar al, SkScalar at, SkScalar ar, SkScalar ab,
+ SkScalar bl, SkScalar bt, SkScalar br, SkScalar bb) {
+ SkScalar L = SkMaxScalar(al, bl);
+ SkScalar R = SkMinScalar(ar, br);
+ SkScalar T = SkMaxScalar(at, bt);
+ SkScalar B = SkMinScalar(ab, bb);
+ return L < R && T < B;
+ }
+
+public:
+
+ /** Returns true if SkRect intersects r.
+ Returns false if either r or SkRect is empty, or do not intersect.
+
+ @param r SkRect to intersect
+ @return true if r and SkRect have area in common
+ */
+ bool intersects(const SkRect& r) const {
+ return Intersects(fLeft, fTop, fRight, fBottom,
+ r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /** Returns true if a intersects b.
+ Returns false if either a or b is empty, or do not intersect.
+
+ @param a SkRect to intersect
+ @param b SkRect to intersect
+ @return true if a and b have area in common
+ */
+ static bool Intersects(const SkRect& a, const SkRect& b) {
+ return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom,
+ b.fLeft, b.fTop, b.fRight, b.fBottom);
+ }
+
+ /** Sets SkRect to the union of itself and r.
+
+ Has no effect if r is empty. Otherwise, if SkRect is empty, sets
+ SkRect to r.
+
+ @param r expansion SkRect
+ */
+ void join(const SkRect& r);
+
+ /** Sets SkRect to the union of itself and r.
+
+ Asserts if r is empty and SK_DEBUG is defined.
+ If SkRect is empty, sets SkRect to r.
+
+ May produce incorrect results if r is empty.
+
+ @param r expansion SkRect
+ */
+ void joinNonEmptyArg(const SkRect& r) {
+ SkASSERT(!r.isEmpty());
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ *this = r;
+ } else {
+ this->joinPossiblyEmptyRect(r);
+ }
+ }
+
+ /** Sets SkRect to the union of itself and the construction.
+
+ May produce incorrect results if SkRect or r is empty.
+
+ @param r expansion SkRect
+ */
+ void joinPossiblyEmptyRect(const SkRect& r) {
+ fLeft = SkMinScalar(fLeft, r.left());
+ fTop = SkMinScalar(fTop, r.top());
+ fRight = SkMaxScalar(fRight, r.right());
+ fBottom = SkMaxScalar(fBottom, r.bottom());
+ }
+
+ /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
+ Returns false if SkRect is empty.
+
+ @param x test SkPoint x-coordinate
+ @param y test SkPoint y-coordinate
+ @return true if (x, y) is inside SkRect
+ */
+ bool contains(SkScalar x, SkScalar y) const {
+ return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
+ }
+
+ /** Returns true if SkRect contains r.
+ Returns false if SkRect is empty or r is empty.
+
+ SkRect contains r when SkRect area completely includes r area.
+
+ @param r SkRect contained
+ @return true if all sides of SkRect are outside r
+ */
+ bool contains(const SkRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkRect contains r.
+ Returns false if SkRect is empty or r is empty.
+
+ SkRect contains r when SkRect area completely includes r area.
+
+ @param r SkIRect contained
+ @return true if all sides of SkRect are outside r
+ */
+ bool contains(const SkIRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= SkIntToScalar(r.fLeft) && fTop <= SkIntToScalar(r.fTop) &&
+ fRight >= SkIntToScalar(r.fRight) && fBottom >= SkIntToScalar(r.fBottom);
+ }
+
+ /** Sets SkIRect by adding 0.5 and discarding the fractional portion of SkRect
+ members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void round(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom));
+ }
+
+ /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void roundOut(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom));
+ }
+
+ /** Sets SkRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @param dst storage for SkRect
+ */
+ void roundOut(SkRect* dst) const {
+ dst->setLTRB(SkScalarFloorToScalar(fLeft), SkScalarFloorToScalar(fTop),
+ SkScalarCeilToScalar(fRight), SkScalarCeilToScalar(fBottom));
+ }
+
+ /** Sets SkRect by rounding up fLeft and fTop; and discarding the fractional portion
+ of fRight and fBottom, using
+ (SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void roundIn(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom));
+ }
+
+ /** Returns SkIRect by adding 0.5 and discarding the fractional portion of SkRect
+ members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)).
+
+ @return rounded SkIRect
+ */
+ SkIRect round() const {
+ SkIRect ir;
+ this->round(&ir);
+ return ir;
+ }
+
+ /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @return rounded SkIRect
+ */
+ SkIRect roundOut() const {
+ SkIRect ir;
+ this->roundOut(&ir);
+ return ir;
+ }
+
+ /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
+ fTop and fBottom if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+ */
+ void sort() {
+ using std::swap;
+ if (fLeft > fRight) {
+ swap(fLeft, fRight);
+ }
+
+ if (fTop > fBottom) {
+ swap(fTop, fBottom);
+ }
+ }
+
+ /** Returns SkRect with fLeft and fRight swapped if fLeft is greater than fRight; and
+ with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+
+ @return sorted SkRect
+ */
+ SkRect makeSorted() const {
+ return MakeLTRB(SkMinScalar(fLeft, fRight), SkMinScalar(fTop, fBottom),
+ SkMaxScalar(fLeft, fRight), SkMaxScalar(fTop, fBottom));
+ }
+
+ /** Returns pointer to first scalar in SkRect, to treat it as an array with four
+ entries.
+
+ @return pointer to fLeft
+ */
+ const SkScalar* asScalars() const { return &fLeft; }
+
+ /** Writes text representation of SkRect to standard output. Set asHex to true to
+ generate exact binary representations of floating point numbers.
+
+ @param asHex true if SkScalar values are written as hexadecimal
+ */
+ void dump(bool asHex) const;
+
+ /** Writes text representation of SkRect to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ with limited precision; it may not be possible to reconstruct original SkRect
+ from output.
+ */
+ void dump() const { this->dump(false); }
+
+ /** Writes text representation of SkRect to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ in hexadecimal to preserve their exact bit pattern. The output reconstructs the
+ original SkRect.
+
+ Use instead of dump() when submitting
+ */
+ void dumpHex() const { this->dump(true); }
+};
+
+inline bool SkIRect::contains(const SkRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ (SkScalar)fLeft <= r.fLeft && (SkScalar)fTop <= r.fTop &&
+ (SkScalar)fRight >= r.fRight && (SkScalar)fBottom >= r.fBottom;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRefCnt.h b/gfx/skia/skia/include/core/SkRefCnt.h
new file mode 100644
index 0000000000..2fab49fe6c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRefCnt.h
@@ -0,0 +1,370 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRefCnt_DEFINED
+#define SkRefCnt_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <atomic> // std::atomic, std::memory_order_*
+#include <cstddef> // std::nullptr_t
+#include <iosfwd> // std::basic_ostream
+#include <memory> // TODO: unused
+#include <type_traits> // std::enable_if, std::is_convertible
+#include <utility> // std::forward, std::swap
+
+/** \class SkRefCntBase
+
+ SkRefCntBase is the base class for objects that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+*/
+class SK_API SkRefCntBase {
+public:
+ /** Default construct, initializing the reference count to 1.
+ */
+ SkRefCntBase() : fRefCnt(1) {}
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~SkRefCntBase() {
+ #ifdef SK_DEBUG
+ SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d", this->getRefCnt());
+ // illegal value, to catch us if we reuse after delete
+ fRefCnt.store(0, std::memory_order_relaxed);
+ #endif
+ }
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == fRefCnt.load(std::memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count. Must be balanced by a call to unref().
+ */
+ void ref() const {
+ SkASSERT(this->getRefCnt() > 0);
+ // No barrier required.
+ (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ */
+ void unref() const {
+ SkASSERT(this->getRefCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+private:
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const {
+ return fRefCnt.load(std::memory_order_relaxed);
+ }
+#endif
+
+ /**
+ * Called when the ref count goes to 0.
+ */
+ virtual void internal_dispose() const {
+ #ifdef SK_DEBUG
+ SkASSERT(0 == this->getRefCnt());
+ fRefCnt.store(1, std::memory_order_relaxed);
+ #endif
+ delete this;
+ }
+
+ // The following friends are those which override internal_dispose()
+ // and conditionally call SkRefCnt::internal_dispose().
+ friend class SkWeakRefCnt;
+
+ mutable std::atomic<int32_t> fRefCnt;
+
+ SkRefCntBase(SkRefCntBase&&) = delete;
+ SkRefCntBase(const SkRefCntBase&) = delete;
+ SkRefCntBase& operator=(SkRefCntBase&&) = delete;
+ SkRefCntBase& operator=(const SkRefCntBase&) = delete;
+};
+
+#ifdef SK_REF_CNT_MIXIN_INCLUDE
+// It is the responsibility of the following include to define the type SkRefCnt.
+// This SkRefCnt should normally derive from SkRefCntBase.
+#include SK_REF_CNT_MIXIN_INCLUDE
+#else
+class SK_API SkRefCnt : public SkRefCntBase {
+ // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system.
+ #if defined(SK_BUILD_FOR_GOOGLE3)
+ public:
+ void deref() const { this->unref(); }
+ #endif
+};
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Call obj->ref() and return obj. The obj must not be nullptr.
+ */
+template <typename T> static inline T* SkRef(T* obj) {
+ SkASSERT(obj);
+ obj->ref();
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->ref() and return obj.
+ */
+template <typename T> static inline T* SkSafeRef(T* obj) {
+ if (obj) {
+ obj->ref();
+ }
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->unref()
+ */
+template <typename T> static inline void SkSafeUnref(T* obj) {
+ if (obj) {
+ obj->unref();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16.
+// There's only benefit to using this if the deriving class does not otherwise need a vtable.
+template <typename Derived>
+class SkNVRefCnt {
+public:
+ SkNVRefCnt() : fRefCnt(1) {}
+ ~SkNVRefCnt() {
+ #ifdef SK_DEBUG
+ int rc = fRefCnt.load(std::memory_order_relaxed);
+ SkASSERTF(rc == 1, "NVRefCnt was %d", rc);
+ #endif
+ }
+
+ // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
+ // - unique() needs acquire when it returns true, and no barrier if it returns false;
+ // - ref() doesn't need any barrier;
+ // - unref() needs a release barrier, and an acquire if it's going to call delete.
+
+ bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); }
+ void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); }
+ void unref() const {
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // restore the 1 for our destructor's assert
+ SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed));
+ delete (const Derived*)this;
+ }
+ }
+ void deref() const { this->unref(); }
+
+private:
+ mutable std::atomic<int32_t> fRefCnt;
+
+ SkNVRefCnt(SkNVRefCnt&&) = delete;
+ SkNVRefCnt(const SkNVRefCnt&) = delete;
+ SkNVRefCnt& operator=(SkNVRefCnt&&) = delete;
+ SkNVRefCnt& operator=(const SkNVRefCnt&) = delete;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Shared pointer class to wrap classes that support a ref()/unref() interface.
+ *
+ * This can be used for classes inheriting from SkRefCnt, but it also works for other
+ * classes that match the interface, but have different internal choices: e.g. the hosted class
+ * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp.
+ */
+template <typename T> class sk_sp {
+public:
+ using element_type = T;
+
+ constexpr sk_sp() : fPtr(nullptr) {}
+ constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling ref(), so that both the argument and the newly
+ * created sk_sp both have a reference to it.
+ */
+ sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {}
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created sk_sp. Afterwards only
+ * the new sk_sp will have a reference to the object, and the argument will point to null.
+ * No call to ref() or unref() will be made.
+ */
+ sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {}
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {}
+
+ /**
+ * Adopt the bare pointer into the newly created sk_sp.
+ * No call to ref() or unref() will be made.
+ */
+ explicit sk_sp(T* obj) : fPtr(obj) {}
+
+ /**
+ * Calls unref() on the underlying object pointer.
+ */
+ ~sk_sp() {
+ SkSafeUnref(fPtr);
+ SkDEBUGCODE(fPtr = nullptr);
+ }
+
+ sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling ref() on it. If this
+ * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that
+ * object.
+ */
+ sk_sp<T>& operator=(const sk_sp<T>& that) {
+ if (this != &that) {
+ this->reset(SkSafeRef(that.get()));
+ }
+ return *this;
+ }
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp<T>& operator=(const sk_sp<U>& that) {
+ this->reset(SkSafeRef(that.get()));
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held
+ * a reference to another object, unref() will be called on that object. No call to ref()
+ * will be made.
+ */
+ sk_sp<T>& operator=(sk_sp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp<T>& operator=(sk_sp<U>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ T& operator*() const {
+ SkASSERT(this->get() != nullptr);
+ return *this->get();
+ }
+
+ explicit operator bool() const { return this->get() != nullptr; }
+
+ T* get() const { return fPtr; }
+ T* operator->() const { return fPtr; }
+
+ /**
+ * Adopt the new bare pointer, and call unref() on any previously held object (if not null).
+ * No call to ref() will be made.
+ */
+ void reset(T* ptr = nullptr) {
+ // Calling fPtr->unref() may call this->~() or this->reset(T*).
+ // http://wg21.cmeerw.net/lwg/issue998
+ // http://wg21.cmeerw.net/lwg/issue2262
+ T* oldPtr = fPtr;
+ fPtr = ptr;
+ SkSafeUnref(oldPtr);
+ }
+
+ /**
+ * Return the bare pointer, and set the internal object pointer to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to unref() will be made.
+ */
+ T* SK_WARN_UNUSED_RESULT release() {
+ T* ptr = fPtr;
+ fPtr = nullptr;
+ return ptr;
+ }
+
+ void swap(sk_sp<T>& that) /*noexcept*/ {
+ using std::swap;
+ swap(fPtr, that.fPtr);
+ }
+
+private:
+ T* fPtr;
+};
+
+template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ {
+ a.swap(b);
+}
+
+template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() == b.get();
+}
+template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return !a;
+}
+template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return !b;
+}
+
+template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() != b.get();
+}
+template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return static_cast<bool>(a);
+}
+template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return static_cast<bool>(b);
+}
+
+template <typename C, typename CT, typename T>
+auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) {
+ return os << sp.get();
+}
+
+template <typename T, typename... Args>
+sk_sp<T> sk_make_sp(Args&&... args) {
+ return sk_sp<T>(new T(std::forward<Args>(args)...));
+}
+
+/*
+ * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null).
+ *
+ * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr,
+ * effectively "adopting" it.
+ */
+template <typename T> sk_sp<T> sk_ref_sp(T* obj) {
+ return sk_sp<T>(SkSafeRef(obj));
+}
+
+template <typename T> sk_sp<T> sk_ref_sp(const T* obj) {
+ return sk_sp<T>(const_cast<T*>(SkSafeRef(obj)));
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRegion.h b/gfx/skia/skia/include/core/SkRegion.h
new file mode 100644
index 0000000000..30d8f8984e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRegion.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2005 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRegion_DEFINED
+#define SkRegion_DEFINED
+
+#include "include/core/SkRect.h"
+
+class SkPath;
+class SkRgnBuilder;
+
+/** \class SkRegion
+ SkRegion describes the set of pixels used to clip SkCanvas. SkRegion is compact,
+ efficiently storing a single integer rectangle, or a run length encoded array
+ of rectangles. SkRegion may reduce the current SkCanvas clip, or may be drawn as
+ one or more integer rectangles. SkRegion iterator returns the scan lines or
+ rectangles contained by it, optionally intersecting a bounding rectangle.
+*/
+class SK_API SkRegion {
+ typedef int32_t RunType;
+public:
+
+ /** Constructs an empty SkRegion. SkRegion is set to empty bounds
+ at (0, 0) with zero width and height.
+
+ @return empty SkRegion
+ */
+ SkRegion();
+
+ /** Constructs a copy of an existing region.
+ Copy constructor makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return copy of SkRegion
+ */
+ SkRegion(const SkRegion& region);
+
+ /** Constructs a rectangular SkRegion matching the bounds of rect.
+
+ @param rect bounds of constructed SkRegion
+ @return rectangular SkRegion
+ */
+ explicit SkRegion(const SkIRect& rect);
+
+ /** Releases ownership of any shared data and deletes data if SkRegion is sole owner.
+ */
+ ~SkRegion();
+
+ /** Constructs a copy of an existing region.
+ Makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return SkRegion to copy by value
+ */
+ SkRegion& operator=(const SkRegion& region);
+
+ /** Compares SkRegion and other; returns true if they enclose exactly
+ the same area.
+
+ @param other SkRegion to compare
+ @return true if SkRegion pair are equivalent
+ */
+ bool operator==(const SkRegion& other) const;
+
+ /** Compares SkRegion and other; returns true if they do not enclose the same area.
+
+ @param other SkRegion to compare
+ @return true if SkRegion pair are not equivalent
+ */
+ bool operator!=(const SkRegion& other) const {
+ return !(*this == other);
+ }
+
+ /** Sets SkRegion to src, and returns true if src bounds is not empty.
+ This makes SkRegion and src identical by value. Internally,
+ SkRegion and src share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param src SkRegion to copy
+ @return copy of src
+ */
+ bool set(const SkRegion& src) {
+ *this = src;
+ return !this->isEmpty();
+ }
+
+ /** Exchanges SkIRect array of SkRegion and other. swap() internally exchanges pointers,
+ so it is lightweight and does not allocate memory.
+
+ swap() usage has largely been replaced by operator=(const SkRegion& region).
+ SkPath do not copy their content on assignment until they are written to,
+ making assignment as efficient as swap().
+
+ @param other operator=(const SkRegion& region) set
+ */
+ void swap(SkRegion& other);
+
+ /** Returns true if SkRegion is empty.
+ Empty SkRegion has bounds width or height less than or equal to zero.
+ SkRegion() constructs empty SkRegion; setEmpty()
+ and setRect() with dimensionless data make SkRegion empty.
+
+ @return true if bounds has no width or height
+ */
+ bool isEmpty() const { return fRunHead == emptyRunHeadPtr(); }
+
+ /** Returns true if SkRegion is one SkIRect with positive dimensions.
+
+ @return true if SkRegion contains one SkIRect
+ */
+ bool isRect() const { return fRunHead == kRectRunHeadPtr; }
+
+ /** Returns true if SkRegion is described by more than one rectangle.
+
+ @return true if SkRegion contains more than one SkIRect
+ */
+ bool isComplex() const { return !this->isEmpty() && !this->isRect(); }
+
+ /** Returns minimum and maximum axes values of SkIRect array.
+ Returns (0, 0, 0, 0) if SkRegion is empty.
+
+ @return combined bounds of all SkIRect elements
+ */
+ const SkIRect& getBounds() const { return fBounds; }
+
+ /** Returns a value that increases with the number of
+ elements in SkRegion. Returns zero if SkRegion is empty.
+ Returns one if SkRegion equals SkIRect; otherwise, returns
+ value greater than one indicating that SkRegion is complex.
+
+ Call to compare SkRegion for relative complexity.
+
+ @return relative complexity
+ */
+ int computeRegionComplexity() const;
+
+ /** Appends outline of SkRegion to path.
+ Returns true if SkRegion is not empty; otherwise, returns false, and leaves path
+ unmodified.
+
+ @param path SkPath to append to
+ @return true if path changed
+ */
+ bool getBoundaryPath(SkPath* path) const;
+
+ /** Constructs an empty SkRegion. SkRegion is set to empty bounds
+ at (0, 0) with zero width and height. Always returns false.
+
+ @return false
+ */
+ bool setEmpty();
+
+ /** Constructs a rectangular SkRegion matching the bounds of rect.
+ If rect is empty, constructs empty and returns false.
+
+ @param rect bounds of constructed SkRegion
+ @return true if rect is not empty
+ */
+ bool setRect(const SkIRect& rect);
+
+ /** Constructs SkRegion as the union of SkIRect in rects array. If count is
+ zero, constructs empty SkRegion. Returns false if constructed SkRegion is empty.
+
+ May be faster than repeated calls to op().
+
+ @param rects array of SkIRect
+ @param count array size
+ @return true if constructed SkRegion is not empty
+ */
+ bool setRects(const SkIRect rects[], int count);
+
+ /** Constructs a copy of an existing region.
+ Makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return SkRegion to copy by value
+ */
+ bool setRegion(const SkRegion& region);
+
+ /** Constructs SkRegion to match outline of path within clip.
+ Returns false if constructed SkRegion is empty.
+
+ Constructed SkRegion draws the same pixels as path through clip when
+ anti-aliasing is disabled.
+
+ @param path SkPath providing outline
+ @param clip SkRegion containing path
+ @return true if constructed SkRegion is not empty
+ */
+ bool setPath(const SkPath& path, const SkRegion& clip);
+
+ /** Returns true if SkRegion intersects rect.
+ Returns false if either rect or SkRegion is empty, or do not intersect.
+
+ @param rect SkIRect to intersect
+ @return true if rect and SkRegion have area in common
+ */
+ bool intersects(const SkIRect& rect) const;
+
+ /** Returns true if SkRegion intersects other.
+ Returns false if either other or SkRegion is empty, or do not intersect.
+
+ @param other SkRegion to intersect
+ @return true if other and SkRegion have area in common
+ */
+ bool intersects(const SkRegion& other) const;
+
+ /** Returns true if SkIPoint (x, y) is inside SkRegion.
+ Returns false if SkRegion is empty.
+
+ @param x test SkIPoint x-coordinate
+ @param y test SkIPoint y-coordinate
+ @return true if (x, y) is inside SkRegion
+ */
+ bool contains(int32_t x, int32_t y) const;
+
+ /** Returns true if other is completely inside SkRegion.
+ Returns false if SkRegion or other is empty.
+
+ @param other SkIRect to contain
+ @return true if other is inside SkRegion
+ */
+ bool contains(const SkIRect& other) const;
+
+ /** Returns true if other is completely inside SkRegion.
+ Returns false if SkRegion or other is empty.
+
+ @param other SkRegion to contain
+ @return true if other is inside SkRegion
+ */
+ bool contains(const SkRegion& other) const;
+
+ /** Returns true if SkRegion is a single rectangle and contains r.
+ May return false even though SkRegion contains r.
+
+ @param r SkIRect to contain
+ @return true quickly if r points are equal or inside
+ */
+ bool quickContains(const SkIRect& r) const {
+ SkASSERT(this->isEmpty() == fBounds.isEmpty()); // valid region
+
+ return r.fLeft < r.fRight && r.fTop < r.fBottom &&
+ fRunHead == kRectRunHeadPtr && // this->isRect()
+ /* fBounds.contains(left, top, right, bottom); */
+ fBounds.fLeft <= r.fLeft && fBounds.fTop <= r.fTop &&
+ fBounds.fRight >= r.fRight && fBounds.fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkRegion does not intersect rect.
+ Returns true if rect is empty or SkRegion is empty.
+ May return false even though SkRegion does not intersect rect.
+
+ @param rect SkIRect to intersect
+ @return true if rect does not intersect
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return this->isEmpty() || rect.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rect);
+ }
+
+ /** Returns true if SkRegion does not intersect rgn.
+ Returns true if rgn is empty or SkRegion is empty.
+ May return false even though SkRegion does not intersect rgn.
+
+ @param rgn SkRegion to intersect
+ @return true if rgn does not intersect
+ */
+ bool quickReject(const SkRegion& rgn) const {
+ return this->isEmpty() || rgn.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rgn.fBounds);
+ }
+
+ /** Offsets SkRegion by ivector (dx, dy). Has no effect if SkRegion is empty.
+
+ @param dx x-axis offset
+ @param dy y-axis offset
+ */
+ void translate(int dx, int dy) { this->translate(dx, dy, this); }
+
+ /** Offsets SkRegion by ivector (dx, dy), writing result to dst. SkRegion may be passed
+ as dst parameter, translating SkRegion in place. Has no effect if dst is nullptr.
+ If SkRegion is empty, sets dst to empty.
+
+ @param dx x-axis offset
+ @param dy y-axis offset
+ @param dst translated result
+ */
+ void translate(int dx, int dy, SkRegion* dst) const;
+
+ /** \enum SkRegion::Op
+ The logical operations that can be performed when combining two SkRegion.
+ */
+ enum Op {
+ kDifference_Op, //!< target minus operand
+ kIntersect_Op, //!< target intersected with operand
+ kUnion_Op, //!< target unioned with operand
+ kXOR_Op, //!< target exclusive or with operand
+ kReverseDifference_Op, //!< operand minus target
+ kReplace_Op, //!< replace target with operand
+ kLastOp = kReplace_Op, //!< last operator
+ };
+
+ static const int kOpCnt = kLastOp + 1;
+
+ /** Replaces SkRegion with the result of SkRegion op rect.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rect SkIRect operand
+ @param op operator, one of:
+ kDifference_Op, kIntersect_Op, kUnion_Op, kXOR_Op, kReverseDifference_Op,
+ kReplace_Op
+ @return false if result is empty
+ */
+ bool op(const SkIRect& rect, Op op) {
+ if (this->isRect() && kIntersect_Op == op) {
+ if (!fBounds.intersect(rect)) {
+ return this->setEmpty();
+ }
+ return true;
+ }
+ return this->op(*this, rect, op);
+ }
+
+ /** Replaces SkRegion with the result of SkRegion op rgn.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgn SkRegion operand
+ @param op operator, one of:
+ kDifference_Op, kIntersect_Op, kUnion_Op, kXOR_Op, kReverseDifference_Op,
+ kReplace_Op
+ @return false if result is empty
+ */
+ bool op(const SkRegion& rgn, Op op) { return this->op(*this, rgn, op); }
+
+ /** Replaces SkRegion with the result of rect op rgn.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rect SkIRect operand
+ @param rgn SkRegion operand
+ @param op operator, one of:
+ kDifference_Op, kIntersect_Op, kUnion_Op, kXOR_Op, kReverseDifference_Op,
+ kReplace_Op
+ @return false if result is empty
+ */
+ bool op(const SkIRect& rect, const SkRegion& rgn, Op op);
+
+ /** Replaces SkRegion with the result of rgn op rect.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgn SkRegion operand
+ @param rect SkIRect operand
+ @param op operator, one of:
+ kDifference_Op, kIntersect_Op, kUnion_Op, kXOR_Op, kReverseDifference_Op,
+ kReplace_Op
+ @return false if result is empty
+ */
+ bool op(const SkRegion& rgn, const SkIRect& rect, Op op);
+
+ /** Replaces SkRegion with the result of rgna op rgnb.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgna SkRegion operand
+ @param rgnb SkRegion operand
+ @param op operator, one of:
+ kDifference_Op, kIntersect_Op, kUnion_Op, kXOR_Op, kReverseDifference_Op,
+ kReplace_Op
+ @return false if result is empty
+ */
+ bool op(const SkRegion& rgna, const SkRegion& rgnb, Op op);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Private. Android framework only.
+
+ @return string representation of SkRegion
+ */
+ char* toString();
+#endif
+
+ /** \class SkRegion::Iterator
+ Returns sequence of rectangles, sorted along y-axis, then x-axis, that make
+ up SkRegion.
+ */
+ class SK_API Iterator {
+ public:
+
+ /** Initializes SkRegion::Iterator with an empty SkRegion. done() on SkRegion::Iterator
+ returns true.
+ Call reset() to initialized SkRegion::Iterator at a later time.
+
+ @return empty SkRegion iterator
+ */
+ Iterator() : fRgn(nullptr), fDone(true) {}
+
+ /** Sets SkRegion::Iterator to return elements of SkIRect array in region.
+
+ @param region SkRegion to iterate
+ @return SkRegion iterator
+ */
+ Iterator(const SkRegion& region);
+
+ /** SkPoint SkRegion::Iterator to start of SkRegion.
+ Returns true if SkRegion was set; otherwise, returns false.
+
+ @return true if SkRegion was set
+ */
+ bool rewind();
+
+ /** Resets iterator, using the new SkRegion.
+
+ @param region SkRegion to iterate
+ */
+ void reset(const SkRegion& region);
+
+ /** Returns true if SkRegion::Iterator is pointing to final SkIRect in SkRegion.
+
+ @return true if data parsing is complete
+ */
+ bool done() const { return fDone; }
+
+ /** Advances SkRegion::Iterator to next SkIRect in SkRegion if it is not done.
+ */
+ void next();
+
+ /** Returns SkIRect element in SkRegion. Does not return predictable results if SkRegion
+ is empty.
+
+ @return part of SkRegion as SkIRect
+ */
+ const SkIRect& rect() const { return fRect; }
+
+ /** Returns SkRegion if set; otherwise, returns nullptr.
+
+ @return iterated SkRegion
+ */
+ const SkRegion* rgn() const { return fRgn; }
+
+ private:
+ const SkRegion* fRgn;
+ const SkRegion::RunType* fRuns;
+ SkIRect fRect = {0, 0, 0, 0};
+ bool fDone;
+ };
+
+ /** \class SkRegion::Cliperator
+ Returns the sequence of rectangles, sorted along y-axis, then x-axis, that make
+ up SkRegion intersected with the specified clip rectangle.
+ */
+ class SK_API Cliperator {
+ public:
+
+ /** Sets SkRegion::Cliperator to return elements of SkIRect array in SkRegion within clip.
+
+ @param region SkRegion to iterate
+ @param clip bounds of iteration
+ @return SkRegion iterator
+ */
+ Cliperator(const SkRegion& region, const SkIRect& clip);
+
+ /** Returns true if SkRegion::Cliperator is pointing to final SkIRect in SkRegion.
+
+ @return true if data parsing is complete
+ */
+ bool done() { return fDone; }
+
+ /** Advances iterator to next SkIRect in SkRegion contained by clip.
+ */
+ void next();
+
+ /** Returns SkIRect element in SkRegion, intersected with clip passed to
+ SkRegion::Cliperator constructor. Does not return predictable results if SkRegion
+ is empty.
+
+ @return part of SkRegion inside clip as SkIRect
+ */
+ const SkIRect& rect() const { return fRect; }
+
+ private:
+ Iterator fIter;
+ SkIRect fClip;
+ SkIRect fRect = {0, 0, 0, 0};
+ bool fDone;
+ };
+
+ /** \class SkRegion::Spanerator
+ Returns the line segment ends within SkRegion that intersect a horizontal line.
+ */
+ class Spanerator {
+ public:
+
+ /** Sets SkRegion::Spanerator to return line segments in SkRegion on scan line.
+
+ @param region SkRegion to iterate
+ @param y horizontal line to intersect
+ @param left bounds of iteration
+ @param right bounds of iteration
+ @return SkRegion iterator
+ */
+ Spanerator(const SkRegion& region, int y, int left, int right);
+
+ /** Advances iterator to next span intersecting SkRegion within line segment provided
+ in constructor. Returns true if interval was found.
+
+ @param left pointer to span start; may be nullptr
+ @param right pointer to span end; may be nullptr
+ @return true if interval was found
+ */
+ bool next(int* left, int* right);
+
+ private:
+ const SkRegion::RunType* fRuns;
+ int fLeft, fRight;
+ bool fDone;
+ };
+
+ /** Writes SkRegion to buffer, and returns number of bytes written.
+ If buffer is nullptr, returns number number of bytes that would be written.
+
+ @param buffer storage for binary data
+ @return size of SkRegion
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Constructs SkRegion from buffer of size length. Returns bytes read.
+ Returned value will be multiple of four or zero if length was too small.
+
+ @param buffer storage for binary data
+ @param length size of buffer
+ @return bytes read
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+private:
+ static constexpr int kOpCount = kReplace_Op + 1;
+
+ // T
+ // [B N L R S]
+ // S
+ static constexpr int kRectRegionRuns = 7;
+
+ struct RunHead;
+
+ static RunHead* emptyRunHeadPtr() { return (SkRegion::RunHead*) -1; }
+ static constexpr RunHead* kRectRunHeadPtr = nullptr;
+
+ // allocate space for count runs
+ void allocateRuns(int count);
+ void allocateRuns(int count, int ySpanCount, int intervalCount);
+ void allocateRuns(const RunHead& src);
+
+ SkDEBUGCODE(void dump() const;)
+
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ void freeRuns();
+
+ /**
+ * Return the runs from this region, consing up fake runs if the region
+ * is empty or a rect. In those 2 cases, we use tmpStorage to hold the
+ * run data.
+ */
+ const RunType* getRuns(RunType tmpStorage[], int* intervals) const;
+
+ // This is called with runs[] that do not yet have their interval-count
+ // field set on each scanline. That is computed as part of this call
+ // (inside ComputeRunBounds).
+ bool setRuns(RunType runs[], int count);
+
+ int count_runtype_values(int* itop, int* ibot) const;
+
+ bool isValid() const;
+
+ static void BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]);
+
+ // If the runs define a simple rect, return true and set bounds to that
+ // rect. If not, return false and ignore bounds.
+ static bool RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds);
+
+ /**
+ * If the last arg is null, just return if the result is non-empty,
+ * else store the result in the last arg.
+ */
+ static bool Oper(const SkRegion&, const SkRegion&, SkRegion::Op, SkRegion*);
+
+ friend struct RunHead;
+ friend class Iterator;
+ friend class Spanerator;
+ friend class SkRegionPriv;
+ friend class SkRgnBuilder;
+ friend class SkFlatRegion;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkScalar.h b/gfx/skia/skia/include/core/SkScalar.h
new file mode 100644
index 0000000000..64fea69df4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkScalar.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalar_DEFINED
+#define SkScalar_DEFINED
+
+#include "include/private/SkFloatingPoint.h"
+
+#undef SK_SCALAR_IS_FLOAT
+#define SK_SCALAR_IS_FLOAT 1
+
+typedef float SkScalar;
+
+#define SK_Scalar1 1.0f
+#define SK_ScalarHalf 0.5f
+#define SK_ScalarSqrt2 SK_FloatSqrt2
+#define SK_ScalarPI SK_FloatPI
+#define SK_ScalarTanPIOver8 0.414213562f
+#define SK_ScalarRoot2Over2 0.707106781f
+#define SK_ScalarMax 3.402823466e+38f
+#define SK_ScalarInfinity SK_FloatInfinity
+#define SK_ScalarNegativeInfinity SK_FloatNegativeInfinity
+#define SK_ScalarNaN SK_FloatNaN
+
+#define SkScalarFloorToScalar(x) sk_float_floor(x)
+#define SkScalarCeilToScalar(x) sk_float_ceil(x)
+#define SkScalarRoundToScalar(x) sk_float_floor((x) + 0.5f)
+#define SkScalarTruncToScalar(x) sk_float_trunc(x)
+
+#define SkScalarFloorToInt(x) sk_float_floor2int(x)
+#define SkScalarCeilToInt(x) sk_float_ceil2int(x)
+#define SkScalarRoundToInt(x) sk_float_round2int(x)
+
+#define SkScalarAbs(x) sk_float_abs(x)
+#define SkScalarCopySign(x, y) sk_float_copysign(x, y)
+#define SkScalarMod(x, y) sk_float_mod(x,y)
+#define SkScalarSqrt(x) sk_float_sqrt(x)
+#define SkScalarPow(b, e) sk_float_pow(b, e)
+
+#define SkScalarSin(radians) (float)sk_float_sin(radians)
+#define SkScalarCos(radians) (float)sk_float_cos(radians)
+#define SkScalarTan(radians) (float)sk_float_tan(radians)
+#define SkScalarASin(val) (float)sk_float_asin(val)
+#define SkScalarACos(val) (float)sk_float_acos(val)
+#define SkScalarATan2(y, x) (float)sk_float_atan2(y,x)
+#define SkScalarExp(x) (float)sk_float_exp(x)
+#define SkScalarLog(x) (float)sk_float_log(x)
+#define SkScalarLog2(x) (float)sk_float_log2(x)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define SkIntToScalar(x) static_cast<SkScalar>(x)
+#define SkIntToFloat(x) static_cast<float>(x)
+#define SkScalarTruncToInt(x) sk_float_saturate2int(x)
+
+#define SkScalarToFloat(x) static_cast<float>(x)
+#define SkFloatToScalar(x) static_cast<SkScalar>(x)
+#define SkScalarToDouble(x) static_cast<double>(x)
+#define SkDoubleToScalar(x) sk_double_to_float(x)
+
+#define SK_ScalarMin (-SK_ScalarMax)
+
+static inline bool SkScalarIsNaN(SkScalar x) { return x != x; }
+
+/** Returns true if x is not NaN and not infinite
+ */
+static inline bool SkScalarIsFinite(SkScalar x) { return sk_float_isfinite(x); }
+
+static inline bool SkScalarsAreFinite(SkScalar a, SkScalar b) {
+ return sk_floats_are_finite(a, b);
+}
+
+static inline bool SkScalarsAreFinite(const SkScalar array[], int count) {
+ return sk_floats_are_finite(array, count);
+}
+
+/**
+ * Variant of SkScalarRoundToInt, that performs the rounding step (adding 0.5) explicitly using
+ * double, to avoid possibly losing the low bit(s) of the answer before calling floor().
+ *
+ * This routine will likely be slower than SkScalarRoundToInt(), and should only be used when the
+ * extra precision is known to be valuable.
+ *
+ * In particular, this catches the following case:
+ * SkScalar x = 0.49999997;
+ * int ix = SkScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- fails
+ * ix = SkDScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- succeeds
+ */
+static inline int SkDScalarRoundToInt(SkScalar x) {
+ double xx = x;
+ xx += 0.5;
+ return (int)floor(xx);
+}
+
+/** Returns the fractional part of the scalar. */
+static inline SkScalar SkScalarFraction(SkScalar x) {
+ return x - SkScalarTruncToScalar(x);
+}
+
+static inline SkScalar SkScalarClampMax(SkScalar x, SkScalar max) {
+ x = SkTMin(x, max);
+ x = SkTMax<SkScalar>(x, 0);
+ return x;
+}
+
+static inline SkScalar SkScalarPin(SkScalar x, SkScalar min, SkScalar max) {
+ return SkTPin(x, min, max);
+}
+
+static inline SkScalar SkScalarSquare(SkScalar x) { return x * x; }
+
+#define SkScalarInvert(x) sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(SK_Scalar1, (x))
+#define SkScalarAve(a, b) (((a) + (b)) * SK_ScalarHalf)
+#define SkScalarHalf(a) ((a) * SK_ScalarHalf)
+
+#define SkDegreesToRadians(degrees) ((degrees) * (SK_ScalarPI / 180))
+#define SkRadiansToDegrees(radians) ((radians) * (180 / SK_ScalarPI))
+
+static inline SkScalar SkMaxScalar(SkScalar a, SkScalar b) { return a > b ? a : b; }
+static inline SkScalar SkMinScalar(SkScalar a, SkScalar b) { return a < b ? a : b; }
+
+static inline bool SkScalarIsInt(SkScalar x) {
+ return x == SkScalarFloorToScalar(x);
+}
+
+/**
+ * Returns -1 || 0 || 1 depending on the sign of value:
+ * -1 if x < 0
+ * 0 if x == 0
+ * 1 if x > 0
+ */
+static inline int SkScalarSignAsInt(SkScalar x) {
+ return x < 0 ? -1 : (x > 0);
+}
+
+// Scalar result version of above
+static inline SkScalar SkScalarSignAsScalar(SkScalar x) {
+ return x < 0 ? -SK_Scalar1 : ((x > 0) ? SK_Scalar1 : 0);
+}
+
+#define SK_ScalarNearlyZero (SK_Scalar1 / (1 << 12))
+
+static inline bool SkScalarNearlyZero(SkScalar x,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x) <= tolerance;
+}
+
+static inline bool SkScalarNearlyEqual(SkScalar x, SkScalar y,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x-y) <= tolerance;
+}
+
+static inline float SkScalarSinSnapToZero(SkScalar radians) {
+ float v = SkScalarSin(radians);
+ return SkScalarNearlyZero(v) ? 0.0f : v;
+}
+
+static inline float SkScalarCosSnapToZero(SkScalar radians) {
+ float v = SkScalarCos(radians);
+ return SkScalarNearlyZero(v) ? 0.0f : v;
+}
+
+/** Linearly interpolate between A and B, based on t.
+ If t is 0, return A
+ If t is 1, return B
+ else interpolate.
+ t must be [0..SK_Scalar1]
+*/
+static inline SkScalar SkScalarInterp(SkScalar A, SkScalar B, SkScalar t) {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+ return A + (B - A) * t;
+}
+
+/** Interpolate along the function described by (keys[length], values[length])
+ for the passed searchKey. SearchKeys outside the range keys[0]-keys[Length]
+ clamp to the min or max value. This function was inspired by a desire
+ to change the multiplier for thickness in fakeBold; therefore it assumes
+ the number of pairs (length) will be small, and a linear search is used.
+ Repeated keys are allowed for discontinuous functions (so long as keys is
+ monotonically increasing), and if key is the value of a repeated scalar in
+ keys, the first one will be used. However, that may change if a binary
+ search is used.
+*/
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length);
+
+/*
+ * Helper to compare an array of scalars.
+ */
+static inline bool SkScalarsEqual(const SkScalar a[], const SkScalar b[], int n) {
+ SkASSERT(n >= 0);
+ for (int i = 0; i < n; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSerialProcs.h b/gfx/skia/skia/include/core/SkSerialProcs.h
new file mode 100644
index 0000000000..87e10d847c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSerialProcs.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSerialProcs_DEFINED
+#define SkSerialProcs_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkTypeface.h"
+
+/**
+ * A serial-proc is asked to serialize the specified object (e.g. picture or image).
+ * If a data object is returned, it will be used (even if it is zero-length).
+ * If null is returned, then Skia will take its default action.
+ *
+ * The default action for pictures is to use Skia's internal format.
+ * The default action for images is to encode either in its native format or PNG.
+ * The default action for typefaces is to use Skia's internal format.
+ */
+
+typedef sk_sp<SkData> (*SkSerialPictureProc)(SkPicture*, void* ctx);
+typedef sk_sp<SkData> (*SkSerialImageProc)(SkImage*, void* ctx);
+typedef sk_sp<SkData> (*SkSerialTypefaceProc)(SkTypeface*, void* ctx);
+
+/**
+ * Called with the encoded form of a picture (previously written with a custom
+ * SkSerialPictureProc proc). Return a picture object, or nullptr indicating failure.
+ */
+typedef sk_sp<SkPicture> (*SkDeserialPictureProc)(const void* data, size_t length, void* ctx);
+
+/**
+ * Called with the encoded from of an image. The proc can return an image object, or if it
+ * returns nullptr, then Skia will take its default action to try to create an image from the data.
+ *
+ * Note that unlike SkDeserialPictureProc and SkDeserialTypefaceProc, return nullptr from this
+ * does not indicate failure, but is a signal for Skia to take its default action.
+ */
+typedef sk_sp<SkImage> (*SkDeserialImageProc)(const void* data, size_t length, void* ctx);
+
+/**
+ * Called with the encoded form of a typeface (previously written with a custom
+ * SkSerialTypefaceProc proc). Return a typeface object, or nullptr indicating failure.
+ */
+typedef sk_sp<SkTypeface> (*SkDeserialTypefaceProc)(const void* data, size_t length, void* ctx);
+
+struct SK_API SkSerialProcs {
+ SkSerialPictureProc fPictureProc = nullptr;
+ void* fPictureCtx = nullptr;
+
+ SkSerialImageProc fImageProc = nullptr;
+ void* fImageCtx = nullptr;
+
+ SkSerialTypefaceProc fTypefaceProc = nullptr;
+ void* fTypefaceCtx = nullptr;
+};
+
+struct SK_API SkDeserialProcs {
+ SkDeserialPictureProc fPictureProc = nullptr;
+ void* fPictureCtx = nullptr;
+
+ SkDeserialImageProc fImageProc = nullptr;
+ void* fImageCtx = nullptr;
+
+ SkDeserialTypefaceProc fTypefaceProc = nullptr;
+ void* fTypefaceCtx = nullptr;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkShader.h b/gfx/skia/skia/include/core/SkShader.h
new file mode 100644
index 0000000000..ef6ba1bdb7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkShader.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShader_DEFINED
+#define SkShader_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkTileMode.h"
+
+class SkArenaAlloc;
+class SkBitmap;
+class SkColorFilter;
+class SkColorSpace;
+class SkImage;
+class SkPath;
+class SkPicture;
+class SkRasterPipeline;
+class GrContext;
+class GrFragmentProcessor;
+
+/** \class SkShader
+ *
+ * Shaders specify the source color(s) for what is being drawn. If a paint
+ * has no shader, then the paint's color is used. If the paint has a
+ * shader, then the shader's color(s) are use instead, but they are
+ * modulated by the paint's alpha. This makes it easy to create a shader
+ * once (e.g. bitmap tiling or gradient) and then change its transparency
+ * w/o having to modify the original shader... only the paint's alpha needs
+ * to be modified.
+ */
+class SK_API SkShader : public SkFlattenable {
+public:
+ /**
+ * Returns true if the shader is guaranteed to produce only opaque
+ * colors, subject to the SkPaint using the shader to apply an opaque
+ * alpha value. Subclasses should override this to allow some
+ * optimizations.
+ */
+ virtual bool isOpaque() const { return false; }
+
+ /**
+ * Iff this shader is backed by a single SkImage, return its ptr (the caller must ref this
+ * if they want to keep it longer than the lifetime of the shader). If not, return nullptr.
+ */
+ SkImage* isAImage(SkMatrix* localMatrix, SkTileMode xy[2]) const;
+
+ bool isAImage() const {
+ return this->isAImage(nullptr, (SkTileMode*)nullptr) != nullptr;
+ }
+
+ /**
+ * If the shader subclass can be represented as a gradient, asAGradient
+ * returns the matching GradientType enum (or kNone_GradientType if it
+ * cannot). Also, if info is not null, asAGradient populates info with
+ * the relevant (see below) parameters for the gradient. fColorCount
+ * is both an input and output parameter. On input, it indicates how
+ * many entries in fColors and fColorOffsets can be used, if they are
+ * non-NULL. After asAGradient has run, fColorCount indicates how
+ * many color-offset pairs there are in the gradient. If there is
+ * insufficient space to store all of the color-offset pairs, fColors
+ * and fColorOffsets will not be altered. fColorOffsets specifies
+ * where on the range of 0 to 1 to transition to the given color.
+ * The meaning of fPoint and fRadius is dependant on the type of gradient.
+ *
+ * None:
+ * info is ignored.
+ * Color:
+ * fColorOffsets[0] is meaningless.
+ * Linear:
+ * fPoint[0] and fPoint[1] are the end-points of the gradient
+ * Radial:
+ * fPoint[0] and fRadius[0] are the center and radius
+ * Conical:
+ * fPoint[0] and fRadius[0] are the center and radius of the 1st circle
+ * fPoint[1] and fRadius[1] are the center and radius of the 2nd circle
+ * Sweep:
+ * fPoint[0] is the center of the sweep.
+ */
+
+ enum GradientType {
+ kNone_GradientType,
+ kColor_GradientType,
+ kLinear_GradientType,
+ kRadial_GradientType,
+ kSweep_GradientType,
+ kConical_GradientType,
+ kLast_GradientType = kConical_GradientType,
+ };
+
+ struct GradientInfo {
+ int fColorCount; //!< In-out parameter, specifies passed size
+ // of fColors/fColorOffsets on input, and
+ // actual number of colors/offsets on
+ // output.
+ SkColor* fColors; //!< The colors in the gradient.
+ SkScalar* fColorOffsets; //!< The unit offset for color transitions.
+ SkPoint fPoint[2]; //!< Type specific, see above.
+ SkScalar fRadius[2]; //!< Type specific, see above.
+ SkTileMode fTileMode;
+ uint32_t fGradientFlags; //!< see SkGradientShader::Flags
+ };
+
+ // DEPRECATED. skbug.com/8941
+ virtual GradientType asAGradient(GradientInfo* info) const;
+
+ //////////////////////////////////////////////////////////////////////////
+ // Methods to create combinations or variants of shaders
+
+ /**
+ * Return a shader that will apply the specified localMatrix to this shader.
+ * The specified matrix will be applied before any matrix associated with this shader.
+ */
+ sk_sp<SkShader> makeWithLocalMatrix(const SkMatrix&) const;
+
+ /**
+ * Create a new shader that produces the same colors as invoking this shader and then applying
+ * the colorfilter.
+ */
+ sk_sp<SkShader> makeWithColorFilter(sk_sp<SkColorFilter>) const;
+
+private:
+ SkShader() = default;
+ friend class SkShaderBase;
+
+ typedef SkFlattenable INHERITED;
+};
+
+class SK_API SkShaders {
+public:
+ static sk_sp<SkShader> Empty();
+ static sk_sp<SkShader> Color(SkColor);
+ static sk_sp<SkShader> Color(const SkColor4f&, sk_sp<SkColorSpace>);
+ static sk_sp<SkShader> Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* localMatrix = nullptr);
+ static sk_sp<SkShader> Lerp(float t, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* localMatrix = nullptr);
+
+ static sk_sp<SkShader> Lerp(sk_sp<SkShader> red, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* localMatrix = nullptr);
+
+private:
+ SkShaders() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSize.h b/gfx/skia/skia/include/core/SkSize.h
new file mode 100644
index 0000000000..87be93d007
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSize.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSize_DEFINED
+#define SkSize_DEFINED
+
+#include "include/core/SkScalar.h"
+
+struct SkISize {
+ int32_t fWidth;
+ int32_t fHeight;
+
+ static constexpr SkISize Make(int32_t w, int32_t h) { return {w, h}; }
+
+ static constexpr SkISize MakeEmpty() { return {0, 0}; }
+
+ void set(int32_t w, int32_t h) { *this = SkISize{w, h}; }
+
+ /** Returns true iff fWidth == 0 && fHeight == 0
+ */
+ bool isZero() const { return 0 == fWidth && 0 == fHeight; }
+
+ /** Returns true if either width or height are <= 0 */
+ bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; }
+
+ /** Set the width and height to 0 */
+ void setEmpty() { fWidth = fHeight = 0; }
+
+ int32_t width() const { return fWidth; }
+ int32_t height() const { return fHeight; }
+
+ bool equals(int32_t w, int32_t h) const { return fWidth == w && fHeight == h; }
+};
+
+static inline bool operator==(const SkISize& a, const SkISize& b) {
+ return a.fWidth == b.fWidth && a.fHeight == b.fHeight;
+}
+
+static inline bool operator!=(const SkISize& a, const SkISize& b) { return !(a == b); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkSize {
+ SkScalar fWidth;
+ SkScalar fHeight;
+
+ static SkSize Make(SkScalar w, SkScalar h) { return {w, h}; }
+
+ static SkSize Make(const SkISize& src) {
+ return {SkIntToScalar(src.width()), SkIntToScalar(src.height())};
+ }
+
+ SkSize& operator=(const SkISize& src) {
+ return *this = SkSize{SkIntToScalar(src.fWidth), SkIntToScalar(src.fHeight)};
+ }
+
+ static SkSize MakeEmpty() { return {0, 0}; }
+
+ void set(SkScalar w, SkScalar h) { *this = SkSize{w, h}; }
+
+ /** Returns true iff fWidth == 0 && fHeight == 0
+ */
+ bool isZero() const { return 0 == fWidth && 0 == fHeight; }
+
+ /** Returns true if either width or height are <= 0 */
+ bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; }
+
+ /** Set the width and height to 0 */
+ void setEmpty() { *this = SkSize{0, 0}; }
+
+ SkScalar width() const { return fWidth; }
+ SkScalar height() const { return fHeight; }
+
+ bool equals(SkScalar w, SkScalar h) const { return fWidth == w && fHeight == h; }
+
+ SkISize toRound() const { return {SkScalarRoundToInt(fWidth), SkScalarRoundToInt(fHeight)}; }
+
+ SkISize toCeil() const { return {SkScalarCeilToInt(fWidth), SkScalarCeilToInt(fHeight)}; }
+
+ SkISize toFloor() const { return {SkScalarFloorToInt(fWidth), SkScalarFloorToInt(fHeight)}; }
+};
+
+static inline bool operator==(const SkSize& a, const SkSize& b) {
+ return a.fWidth == b.fWidth && a.fHeight == b.fHeight;
+}
+
+static inline bool operator!=(const SkSize& a, const SkSize& b) { return !(a == b); }
+#endif
diff --git a/gfx/skia/skia/include/core/SkStream.h b/gfx/skia/skia/include/core/SkStream.h
new file mode 100644
index 0000000000..659880d2d6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStream.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStream_DEFINED
+#define SkStream_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkTo.h"
+
+#include <memory.h>
+
+class SkStream;
+class SkStreamRewindable;
+class SkStreamSeekable;
+class SkStreamAsset;
+class SkStreamMemory;
+
+/**
+ * SkStream -- abstraction for a source of bytes. Subclasses can be backed by
+ * memory, or a file, or something else.
+ *
+ * NOTE:
+ *
+ * Classic "streams" APIs are sort of async, in that on a request for N
+ * bytes, they may return fewer than N bytes on a given call, in which case
+ * the caller can "try again" to get more bytes, eventually (modulo an error)
+ * receiving their total N bytes.
+ *
+ * Skia streams behave differently. They are effectively synchronous, and will
+ * always return all N bytes of the request if possible. If they return fewer
+ * (the read() call returns the number of bytes read) then that means there is
+ * no more data (at EOF or hit an error). The caller should *not* call again
+ * in hopes of fulfilling more of the request.
+ */
+class SK_API SkStream {
+public:
+ virtual ~SkStream() {}
+ SkStream() {}
+
+ /**
+ * Attempts to open the specified file as a stream, returns nullptr on failure.
+ */
+ static std::unique_ptr<SkStreamAsset> MakeFromFile(const char path[]);
+
+ /** Reads or skips size number of bytes.
+ * If buffer == NULL, skip size bytes, return how many were skipped.
+ * If buffer != NULL, copy size bytes into buffer, return how many were copied.
+ * @param buffer when NULL skip size bytes, otherwise copy size bytes into buffer
+ * @param size the number of bytes to skip or copy
+ * @return the number of bytes actually read.
+ */
+ virtual size_t read(void* buffer, size_t size) = 0;
+
+ /** Skip size number of bytes.
+ * @return the actual number bytes that could be skipped.
+ */
+ size_t skip(size_t size) {
+ return this->read(nullptr, size);
+ }
+
+ /**
+ * Attempt to peek at size bytes.
+ * If this stream supports peeking, copy min(size, peekable bytes) into
+ * buffer, and return the number of bytes copied.
+ * If the stream does not support peeking, or cannot peek any bytes,
+ * return 0 and leave buffer unchanged.
+ * The stream is guaranteed to be in the same visible state after this
+ * call, regardless of success or failure.
+ * @param buffer Must not be NULL, and must be at least size bytes. Destination
+ * to copy bytes.
+ * @param size Number of bytes to copy.
+ * @return The number of bytes peeked/copied.
+ */
+ virtual size_t peek(void* /*buffer*/, size_t /*size*/) const { return 0; }
+
+ /** Returns true when all the bytes in the stream have been read.
+ * This may return true early (when there are no more bytes to be read)
+ * or late (after the first unsuccessful read).
+ */
+ virtual bool isAtEnd() const = 0;
+
+ bool SK_WARN_UNUSED_RESULT readS8(int8_t*);
+ bool SK_WARN_UNUSED_RESULT readS16(int16_t*);
+ bool SK_WARN_UNUSED_RESULT readS32(int32_t*);
+
+ bool SK_WARN_UNUSED_RESULT readU8(uint8_t* i) { return this->readS8((int8_t*)i); }
+ bool SK_WARN_UNUSED_RESULT readU16(uint16_t* i) { return this->readS16((int16_t*)i); }
+ bool SK_WARN_UNUSED_RESULT readU32(uint32_t* i) { return this->readS32((int32_t*)i); }
+
+ bool SK_WARN_UNUSED_RESULT readBool(bool* b) {
+ uint8_t i;
+ if (!this->readU8(&i)) { return false; }
+ *b = (i != 0);
+ return true;
+ }
+ bool SK_WARN_UNUSED_RESULT readScalar(SkScalar*);
+ bool SK_WARN_UNUSED_RESULT readPackedUInt(size_t*);
+
+//SkStreamRewindable
+ /** Rewinds to the beginning of the stream. Returns true if the stream is known
+ * to be at the beginning after this call returns.
+ */
+ virtual bool rewind() { return false; }
+
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned at the beginning of its data.
+ */
+ std::unique_ptr<SkStream> duplicate() const {
+ return std::unique_ptr<SkStream>(this->onDuplicate());
+ }
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned the same as this stream.
+ */
+ std::unique_ptr<SkStream> fork() const {
+ return std::unique_ptr<SkStream>(this->onFork());
+ }
+
+//SkStreamSeekable
+ /** Returns true if this stream can report it's current position. */
+ virtual bool hasPosition() const { return false; }
+ /** Returns the current position in the stream. If this cannot be done, returns 0. */
+ virtual size_t getPosition() const { return 0; }
+
+ /** Seeks to an absolute position in the stream. If this cannot be done, returns false.
+ * If an attempt is made to seek past the end of the stream, the position will be set
+ * to the end of the stream.
+ */
+ virtual bool seek(size_t /*position*/) { return false; }
+
+ /** Seeks to an relative offset in the stream. If this cannot be done, returns false.
+ * If an attempt is made to move to a position outside the stream, the position will be set
+ * to the closest point within the stream (beginning or end).
+ */
+ virtual bool move(long /*offset*/) { return false; }
+
+//SkStreamAsset
+ /** Returns true if this stream can report it's total length. */
+ virtual bool hasLength() const { return false; }
+ /** Returns the total length of the stream. If this cannot be done, returns 0. */
+ virtual size_t getLength() const { return 0; }
+
+//SkStreamMemory
+ /** Returns the starting address for the data. If this cannot be done, returns NULL. */
+ //TODO: replace with virtual const SkData* getData()
+ virtual const void* getMemoryBase() { return nullptr; }
+
+private:
+ virtual SkStream* onDuplicate() const { return nullptr; }
+ virtual SkStream* onFork() const { return nullptr; }
+
+ SkStream(SkStream&&) = delete;
+ SkStream(const SkStream&) = delete;
+ SkStream& operator=(SkStream&&) = delete;
+ SkStream& operator=(const SkStream&) = delete;
+};
+
+/** SkStreamRewindable is a SkStream for which rewind and duplicate are required. */
+class SK_API SkStreamRewindable : public SkStream {
+public:
+ bool rewind() override = 0;
+ std::unique_ptr<SkStreamRewindable> duplicate() const {
+ return std::unique_ptr<SkStreamRewindable>(this->onDuplicate());
+ }
+private:
+ SkStreamRewindable* onDuplicate() const override = 0;
+};
+
+/** SkStreamSeekable is a SkStreamRewindable for which position, seek, move, and fork are required. */
+class SK_API SkStreamSeekable : public SkStreamRewindable {
+public:
+ std::unique_ptr<SkStreamSeekable> duplicate() const {
+ return std::unique_ptr<SkStreamSeekable>(this->onDuplicate());
+ }
+
+ bool hasPosition() const override { return true; }
+ size_t getPosition() const override = 0;
+ bool seek(size_t position) override = 0;
+ bool move(long offset) override = 0;
+
+ std::unique_ptr<SkStreamSeekable> fork() const {
+ return std::unique_ptr<SkStreamSeekable>(this->onFork());
+ }
+private:
+ SkStreamSeekable* onDuplicate() const override = 0;
+ SkStreamSeekable* onFork() const override = 0;
+};
+
+/** SkStreamAsset is a SkStreamSeekable for which getLength is required. */
+class SK_API SkStreamAsset : public SkStreamSeekable {
+public:
+ bool hasLength() const override { return true; }
+ size_t getLength() const override = 0;
+
+ std::unique_ptr<SkStreamAsset> duplicate() const {
+ return std::unique_ptr<SkStreamAsset>(this->onDuplicate());
+ }
+ std::unique_ptr<SkStreamAsset> fork() const {
+ return std::unique_ptr<SkStreamAsset>(this->onFork());
+ }
+private:
+ SkStreamAsset* onDuplicate() const override = 0;
+ SkStreamAsset* onFork() const override = 0;
+};
+
+/** SkStreamMemory is a SkStreamAsset for which getMemoryBase is required. */
+class SK_API SkStreamMemory : public SkStreamAsset {
+public:
+ const void* getMemoryBase() override = 0;
+
+ std::unique_ptr<SkStreamMemory> duplicate() const {
+ return std::unique_ptr<SkStreamMemory>(this->onDuplicate());
+ }
+ std::unique_ptr<SkStreamMemory> fork() const {
+ return std::unique_ptr<SkStreamMemory>(this->onFork());
+ }
+private:
+ SkStreamMemory* onDuplicate() const override = 0;
+ SkStreamMemory* onFork() const override = 0;
+};
+
+class SK_API SkWStream {
+public:
+ virtual ~SkWStream();
+ SkWStream() {}
+
+ /** Called to write bytes to a SkWStream. Returns true on success
+ @param buffer the address of at least size bytes to be written to the stream
+ @param size The number of bytes in buffer to write to the stream
+ @return true on success
+ */
+ virtual bool write(const void* buffer, size_t size) = 0;
+ virtual void flush();
+
+ virtual size_t bytesWritten() const = 0;
+
+ // helpers
+
+ bool write8(U8CPU value) {
+ uint8_t v = SkToU8(value);
+ return this->write(&v, 1);
+ }
+ bool write16(U16CPU value) {
+ uint16_t v = SkToU16(value);
+ return this->write(&v, 2);
+ }
+ bool write32(uint32_t v) {
+ return this->write(&v, 4);
+ }
+
+ bool writeText(const char text[]) {
+ SkASSERT(text);
+ return this->write(text, strlen(text));
+ }
+
+ bool newline() { return this->write("\n", strlen("\n")); }
+
+ bool writeDecAsText(int32_t);
+ bool writeBigDecAsText(int64_t, int minDigits = 0);
+ bool writeHexAsText(uint32_t, int minDigits = 0);
+ bool writeScalarAsText(SkScalar);
+
+ bool writeBool(bool v) { return this->write8(v); }
+ bool writeScalar(SkScalar);
+ bool writePackedUInt(size_t);
+
+ bool writeStream(SkStream* input, size_t length);
+
+ /**
+ * This returns the number of bytes in the stream required to store
+ * 'value'.
+ */
+ static int SizeOfPackedUInt(size_t value);
+
+private:
+ SkWStream(const SkWStream&) = delete;
+ SkWStream& operator=(const SkWStream&) = delete;
+};
+
+class SK_API SkNullWStream : public SkWStream {
+public:
+ SkNullWStream() : fBytesWritten(0) {}
+
+ bool write(const void* , size_t n) override { fBytesWritten += n; return true; }
+ void flush() override {}
+ size_t bytesWritten() const override { return fBytesWritten; }
+
+private:
+ size_t fBytesWritten;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+#include <stdio.h>
+
+/** A stream that wraps a C FILE* file stream. */
+class SK_API SkFILEStream : public SkStreamAsset {
+public:
+ /** Initialize the stream by calling sk_fopen on the specified path.
+ * This internal stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(const char path[] = nullptr);
+
+ /** Initialize the stream with an existing C FILE stream.
+ * The current position of the C FILE stream will be considered the
+ * beginning of the SkFILEStream.
+ * The C FILE stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(FILE* file);
+
+ ~SkFILEStream() override;
+
+ static std::unique_ptr<SkFILEStream> Make(const char path[]) {
+ std::unique_ptr<SkFILEStream> stream(new SkFILEStream(path));
+ return stream->isValid() ? std::move(stream) : nullptr;
+ }
+
+ /** Returns true if the current path could be opened. */
+ bool isValid() const { return fFILE != nullptr; }
+
+ /** Close this SkFILEStream. */
+ void close();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ bool rewind() override;
+ std::unique_ptr<SkStreamAsset> duplicate() const {
+ return std::unique_ptr<SkStreamAsset>(this->onDuplicate());
+ }
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+
+ std::unique_ptr<SkStreamAsset> fork() const {
+ return std::unique_ptr<SkStreamAsset>(this->onFork());
+ }
+
+ size_t getLength() const override;
+
+private:
+ explicit SkFILEStream(std::shared_ptr<FILE>, size_t size, size_t offset);
+ explicit SkFILEStream(std::shared_ptr<FILE>, size_t size, size_t offset, size_t originalOffset);
+
+ SkStreamAsset* onDuplicate() const override;
+ SkStreamAsset* onFork() const override;
+
+ std::shared_ptr<FILE> fFILE;
+ // My own council will I keep on sizes and offsets.
+ size_t fSize;
+ size_t fOffset;
+ size_t fOriginalOffset;
+
+ typedef SkStreamAsset INHERITED;
+};
+
+class SK_API SkMemoryStream : public SkStreamMemory {
+public:
+ SkMemoryStream();
+
+ /** We allocate (and free) the memory. Write to it via getMemoryBase() */
+ SkMemoryStream(size_t length);
+
+ /** If copyData is true, the stream makes a private copy of the data. */
+ SkMemoryStream(const void* data, size_t length, bool copyData = false);
+
+ /** Creates the stream to read from the specified data */
+ SkMemoryStream(sk_sp<SkData> data);
+
+ /** Returns a stream with a copy of the input data. */
+ static std::unique_ptr<SkMemoryStream> MakeCopy(const void* data, size_t length);
+
+ /** Returns a stream with a bare pointer reference to the input data. */
+ static std::unique_ptr<SkMemoryStream> MakeDirect(const void* data, size_t length);
+
+ /** Returns a stream with a shared reference to the input data. */
+ static std::unique_ptr<SkMemoryStream> Make(sk_sp<SkData> data);
+
+ /** Resets the stream to the specified data and length,
+ just like the constructor.
+ if copyData is true, the stream makes a private copy of the data
+ */
+ virtual void setMemory(const void* data, size_t length,
+ bool copyData = false);
+ /** Replace any memory buffer with the specified buffer. The caller
+ must have allocated data with sk_malloc or sk_realloc, since it
+ will be freed with sk_free.
+ */
+ void setMemoryOwned(const void* data, size_t length);
+
+ sk_sp<SkData> asData() const { return fData; }
+ void setData(sk_sp<SkData> data);
+
+ void skipToAlign4();
+ const void* getAtPos();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ size_t peek(void* buffer, size_t size) const override;
+
+ bool rewind() override;
+
+ std::unique_ptr<SkMemoryStream> duplicate() const {
+ return std::unique_ptr<SkMemoryStream>(this->onDuplicate());
+ }
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+
+ std::unique_ptr<SkMemoryStream> fork() const {
+ return std::unique_ptr<SkMemoryStream>(this->onFork());
+ }
+
+ size_t getLength() const override;
+
+ const void* getMemoryBase() override;
+
+private:
+ SkMemoryStream* onDuplicate() const override;
+ SkMemoryStream* onFork() const override;
+
+ sk_sp<SkData> fData;
+ size_t fOffset;
+
+ typedef SkStreamMemory INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+class SK_API SkFILEWStream : public SkWStream {
+public:
+ SkFILEWStream(const char path[]);
+ ~SkFILEWStream() override;
+
+ /** Returns true if the current path could be opened.
+ */
+ bool isValid() const { return fFILE != nullptr; }
+
+ bool write(const void* buffer, size_t size) override;
+ void flush() override;
+ void fsync();
+ size_t bytesWritten() const override;
+
+private:
+ FILE* fFILE;
+
+ typedef SkWStream INHERITED;
+};
+
+class SK_API SkDynamicMemoryWStream : public SkWStream {
+public:
+ SkDynamicMemoryWStream() = default;
+ SkDynamicMemoryWStream(SkDynamicMemoryWStream&&);
+ SkDynamicMemoryWStream& operator=(SkDynamicMemoryWStream&&);
+ ~SkDynamicMemoryWStream() override;
+
+ bool write(const void* buffer, size_t size) override;
+ size_t bytesWritten() const override;
+
+ bool read(void* buffer, size_t offset, size_t size);
+
+ /** More efficient version of read(dst, 0, bytesWritten()). */
+ void copyTo(void* dst) const;
+ bool writeToStream(SkWStream* dst) const;
+
+ /** Equivalent to copyTo() followed by reset(), but may save memory use. */
+ void copyToAndReset(void* dst);
+
+ /** Equivalent to writeToStream() followed by reset(), but may save memory use. */
+ bool writeToAndReset(SkWStream* dst);
+
+ /** Equivalent to writeToStream() followed by reset(), but may save memory use.
+ When the dst is also a SkDynamicMemoryWStream, the implementation is constant time. */
+ bool writeToAndReset(SkDynamicMemoryWStream* dst);
+
+ /** Prepend this stream to dst, resetting this. */
+ void prependToAndReset(SkDynamicMemoryWStream* dst);
+
+ /** Return the contents as SkData, and then reset the stream. */
+ sk_sp<SkData> detachAsData();
+
+ /** Reset, returning a reader stream with the current content. */
+ std::unique_ptr<SkStreamAsset> detachAsStream();
+
+ /** Reset the stream to its original, empty, state. */
+ void reset();
+ void padToAlign4();
+private:
+ struct Block;
+ Block* fHead = nullptr;
+ Block* fTail = nullptr;
+ size_t fBytesWrittenBeforeTail = 0;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ // For access to the Block type.
+ friend class SkBlockMemoryStream;
+ friend class SkBlockMemoryRefCnt;
+
+ typedef SkWStream INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkString.h b/gfx/skia/skia/include/core/SkString.h
new file mode 100644
index 0000000000..fef0aa14a0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkString.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkString_DEFINED
+#define SkString_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTo.h"
+
+#include <stdarg.h>
+#include <string.h>
+#include <atomic>
+
+/* Some helper functions for C strings */
+static inline bool SkStrStartsWith(const char string[], const char prefixStr[]) {
+ SkASSERT(string);
+ SkASSERT(prefixStr);
+ return !strncmp(string, prefixStr, strlen(prefixStr));
+}
+static inline bool SkStrStartsWith(const char string[], const char prefixChar) {
+ SkASSERT(string);
+ return (prefixChar == *string);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]);
+bool SkStrEndsWith(const char string[], const char suffixChar);
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]);
+
+static inline int SkStrFind(const char string[], const char substring[]) {
+ const char *first = strstr(string, substring);
+ if (nullptr == first) return -1;
+ return SkToInt(first - &string[0]);
+}
+
+static inline int SkStrFindLastOf(const char string[], const char subchar) {
+ const char* last = strrchr(string, subchar);
+ if (nullptr == last) return -1;
+ return SkToInt(last - &string[0]);
+}
+
+static inline bool SkStrContains(const char string[], const char substring[]) {
+ SkASSERT(string);
+ SkASSERT(substring);
+ return (-1 != SkStrFind(string, substring));
+}
+static inline bool SkStrContains(const char string[], const char subchar) {
+ SkASSERT(string);
+ char tmp[2];
+ tmp[0] = subchar;
+ tmp[1] = '\0';
+ return (-1 != SkStrFind(string, tmp));
+}
+
+static inline char *SkStrDup(const char string[]) {
+ char *ret = (char *) sk_malloc_throw(strlen(string)+1);
+ memcpy(ret,string,strlen(string)+1);
+ return ret;
+}
+
+/*
+ * The SkStrAppend... methods will write into the provided buffer, assuming it is large enough.
+ * Each method has an associated const (e.g. SkStrAppendU32_MaxSize) which will be the largest
+ * value needed for that method's buffer.
+ *
+ * char storage[SkStrAppendU32_MaxSize];
+ * SkStrAppendU32(storage, value);
+ *
+ * Note : none of the SkStrAppend... methods write a terminating 0 to their buffers. Instead,
+ * the methods return the ptr to the end of the written part of the buffer. This can be used
+ * to compute the length, and/or know where to write a 0 if that is desired.
+ *
+ * char storage[SkStrAppendU32_MaxSize + 1];
+ * char* stop = SkStrAppendU32(storage, value);
+ * size_t len = stop - storage;
+ * *stop = 0; // valid, since storage was 1 byte larger than the max.
+ */
+
+#define SkStrAppendU32_MaxSize 10
+char* SkStrAppendU32(char buffer[], uint32_t);
+#define SkStrAppendU64_MaxSize 20
+char* SkStrAppendU64(char buffer[], uint64_t, int minDigits);
+
+#define SkStrAppendS32_MaxSize (SkStrAppendU32_MaxSize + 1)
+char* SkStrAppendS32(char buffer[], int32_t);
+#define SkStrAppendS64_MaxSize (SkStrAppendU64_MaxSize + 1)
+char* SkStrAppendS64(char buffer[], int64_t, int minDigits);
+
+/**
+ * Floats have at most 8 significant digits, so we limit our %g to that.
+ * However, the total string could be 15 characters: -1.2345678e-005
+ *
+ * In theory we should only expect up to 2 digits for the exponent, but on
+ * some platforms we have seen 3 (as in the example above).
+ */
+#define SkStrAppendScalar_MaxSize 15
+
+/**
+ * Write the scaler in decimal format into buffer, and return a pointer to
+ * the next char after the last one written. Note: a terminating 0 is not
+ * written into buffer, which must be at least SkStrAppendScalar_MaxSize.
+ * Thus if the caller wants to add a 0 at the end, buffer must be at least
+ * SkStrAppendScalar_MaxSize + 1 bytes large.
+ */
+#define SkStrAppendScalar SkStrAppendFloat
+
+char* SkStrAppendFloat(char buffer[], float);
+
+/** \class SkString
+
+ Light weight class for managing strings. Uses reference
+ counting to make string assignments and copies very fast
+ with no extra RAM cost. Assumes UTF8 encoding.
+*/
+class SK_API SkString {
+public:
+ SkString();
+ explicit SkString(size_t len);
+ explicit SkString(const char text[]);
+ SkString(const char text[], size_t len);
+ SkString(const SkString&);
+ SkString(SkString&&);
+ ~SkString();
+
+ bool isEmpty() const { return 0 == fRec->fLength; }
+ size_t size() const { return (size_t) fRec->fLength; }
+ const char* c_str() const { return fRec->data(); }
+ char operator[](size_t n) const { return this->c_str()[n]; }
+
+ bool equals(const SkString&) const;
+ bool equals(const char text[]) const;
+ bool equals(const char text[], size_t len) const;
+
+ bool startsWith(const char prefixStr[]) const {
+ return SkStrStartsWith(fRec->data(), prefixStr);
+ }
+ bool startsWith(const char prefixChar) const {
+ return SkStrStartsWith(fRec->data(), prefixChar);
+ }
+ bool endsWith(const char suffixStr[]) const {
+ return SkStrEndsWith(fRec->data(), suffixStr);
+ }
+ bool endsWith(const char suffixChar) const {
+ return SkStrEndsWith(fRec->data(), suffixChar);
+ }
+ bool contains(const char substring[]) const {
+ return SkStrContains(fRec->data(), substring);
+ }
+ bool contains(const char subchar) const {
+ return SkStrContains(fRec->data(), subchar);
+ }
+ int find(const char substring[]) const {
+ return SkStrFind(fRec->data(), substring);
+ }
+ int findLastOf(const char subchar) const {
+ return SkStrFindLastOf(fRec->data(), subchar);
+ }
+
+ friend bool operator==(const SkString& a, const SkString& b) {
+ return a.equals(b);
+ }
+ friend bool operator!=(const SkString& a, const SkString& b) {
+ return !a.equals(b);
+ }
+
+ // these methods edit the string
+
+ SkString& operator=(const SkString&);
+ SkString& operator=(SkString&&);
+ SkString& operator=(const char text[]);
+
+ char* writable_str();
+ char& operator[](size_t n) { return this->writable_str()[n]; }
+
+ void reset();
+ /** Destructive resize, does not preserve contents. */
+ void resize(size_t len) { this->set(nullptr, len); }
+ void set(const SkString& src) { *this = src; }
+ void set(const char text[]);
+ void set(const char text[], size_t len);
+
+ void insert(size_t offset, const SkString& src) { this->insert(offset, src.c_str(), src.size()); }
+ void insert(size_t offset, const char text[]);
+ void insert(size_t offset, const char text[], size_t len);
+ void insertUnichar(size_t offset, SkUnichar);
+ void insertS32(size_t offset, int32_t value);
+ void insertS64(size_t offset, int64_t value, int minDigits = 0);
+ void insertU32(size_t offset, uint32_t value);
+ void insertU64(size_t offset, uint64_t value, int minDigits = 0);
+ void insertHex(size_t offset, uint32_t value, int minDigits = 0);
+ void insertScalar(size_t offset, SkScalar);
+
+ void append(const SkString& str) { this->insert((size_t)-1, str); }
+ void append(const char text[]) { this->insert((size_t)-1, text); }
+ void append(const char text[], size_t len) { this->insert((size_t)-1, text, len); }
+ void appendUnichar(SkUnichar uni) { this->insertUnichar((size_t)-1, uni); }
+ void appendS32(int32_t value) { this->insertS32((size_t)-1, value); }
+ void appendS64(int64_t value, int minDigits = 0) { this->insertS64((size_t)-1, value, minDigits); }
+ void appendU32(uint32_t value) { this->insertU32((size_t)-1, value); }
+ void appendU64(uint64_t value, int minDigits = 0) { this->insertU64((size_t)-1, value, minDigits); }
+ void appendHex(uint32_t value, int minDigits = 0) { this->insertHex((size_t)-1, value, minDigits); }
+ void appendScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void prepend(const SkString& str) { this->insert(0, str); }
+ void prepend(const char text[]) { this->insert(0, text); }
+ void prepend(const char text[], size_t len) { this->insert(0, text, len); }
+ void prependUnichar(SkUnichar uni) { this->insertUnichar(0, uni); }
+ void prependS32(int32_t value) { this->insertS32(0, value); }
+ void prependS64(int32_t value, int minDigits = 0) { this->insertS64(0, value, minDigits); }
+ void prependHex(uint32_t value, int minDigits = 0) { this->insertHex(0, value, minDigits); }
+ void prependScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void printf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void appendf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void appendVAList(const char format[], va_list);
+ void prependf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void prependVAList(const char format[], va_list);
+
+ void remove(size_t offset, size_t length);
+
+ SkString& operator+=(const SkString& s) { this->append(s); return *this; }
+ SkString& operator+=(const char text[]) { this->append(text); return *this; }
+ SkString& operator+=(const char c) { this->append(&c, 1); return *this; }
+
+ /**
+ * Swap contents between this and other. This function is guaranteed
+ * to never fail or throw.
+ */
+ void swap(SkString& other);
+
+private:
+ struct Rec {
+ public:
+ constexpr Rec(uint32_t len, int32_t refCnt)
+ : fLength(len), fRefCnt(refCnt), fBeginningOfData(0)
+ { }
+ static sk_sp<Rec> Make(const char text[], size_t len);
+ uint32_t fLength; // logically size_t, but we want it to stay 32bits
+ mutable std::atomic<int32_t> fRefCnt;
+ char fBeginningOfData;
+
+ char* data() { return &fBeginningOfData; }
+ const char* data() const { return &fBeginningOfData; }
+
+ void ref() const;
+ void unref() const;
+ bool unique() const;
+ private:
+ // Ensure the unsized delete is called.
+ void operator delete(void* p) { ::operator delete(p); }
+ };
+ sk_sp<Rec> fRec;
+
+#ifdef SK_DEBUG
+ const SkString& validate() const;
+#else
+ const SkString& validate() const { return *this; }
+#endif
+
+ static const Rec gEmptyRec;
+};
+
+/// Creates a new string and writes into it using a printf()-style format.
+SkString SkStringPrintf(const char* format, ...);
+/// This makes it easier to write a caller as a VAR_ARGS function where the format string is
+/// optional.
+static inline SkString SkStringPrintf() { return SkString(); }
+
+static inline void swap(SkString& a, SkString& b) {
+ a.swap(b);
+}
+
+enum SkStrSplitMode {
+ // Strictly return all results. If the input is ",," and the separator is ',' this will return
+ // an array of three empty strings.
+ kStrict_SkStrSplitMode,
+
+ // Only nonempty results will be added to the results. Multiple separators will be
+ // coalesced. Separators at the beginning and end of the input will be ignored. If the input is
+ // ",," and the separator is ',', this will return an empty vector.
+ kCoalesce_SkStrSplitMode
+};
+
+// Split str on any characters in delimiters into out. (Think, strtok with a sane API.)
+void SkStrSplit(const char* str, const char* delimiters, SkStrSplitMode splitMode,
+ SkTArray<SkString>* out);
+inline void SkStrSplit(const char* str, const char* delimiters, SkTArray<SkString>* out) {
+ SkStrSplit(str, delimiters, kCoalesce_SkStrSplitMode, out);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkStrokeRec.h b/gfx/skia/skia/include/core/SkStrokeRec.h
new file mode 100644
index 0000000000..6fa3872469
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStrokeRec.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrokeRec_DEFINED
+#define SkStrokeRec_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/private/SkMacros.h"
+
+class SkPath;
+
+SK_BEGIN_REQUIRE_DENSE
+class SK_API SkStrokeRec {
+public:
+ enum InitStyle {
+ kHairline_InitStyle,
+ kFill_InitStyle
+ };
+ SkStrokeRec(InitStyle style);
+ SkStrokeRec(const SkPaint&, SkPaint::Style, SkScalar resScale = 1);
+ explicit SkStrokeRec(const SkPaint&, SkScalar resScale = 1);
+
+ enum Style {
+ kHairline_Style,
+ kFill_Style,
+ kStroke_Style,
+ kStrokeAndFill_Style
+ };
+
+ static constexpr int kStyleCount = kStrokeAndFill_Style + 1;
+
+ Style getStyle() const;
+ SkScalar getWidth() const { return fWidth; }
+ SkScalar getMiter() const { return fMiterLimit; }
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+
+ bool isHairlineStyle() const {
+ return kHairline_Style == this->getStyle();
+ }
+
+ bool isFillStyle() const {
+ return kFill_Style == this->getStyle();
+ }
+
+ void setFillStyle();
+ void setHairlineStyle();
+ /**
+ * Specify the strokewidth, and optionally if you want stroke + fill.
+ * Note, if width==0, then this request is taken to mean:
+ * strokeAndFill==true -> new style will be Fill
+ * strokeAndFill==false -> new style will be Hairline
+ */
+ void setStrokeStyle(SkScalar width, bool strokeAndFill = false);
+
+ void setStrokeParams(SkPaint::Cap cap, SkPaint::Join join, SkScalar miterLimit) {
+ fCap = cap;
+ fJoin = join;
+ fMiterLimit = miterLimit;
+ }
+
+ SkScalar getResScale() const {
+ return fResScale;
+ }
+
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Returns true if this specifes any thick stroking, i.e. applyToPath()
+ * will return true.
+ */
+ bool needToApply() const {
+ Style style = this->getStyle();
+ return (kStroke_Style == style) || (kStrokeAndFill_Style == style);
+ }
+
+ /**
+ * Apply these stroke parameters to the src path, returning the result
+ * in dst.
+ *
+ * If there was no change (i.e. style == hairline or fill) this returns
+ * false and dst is unchanged. Otherwise returns true and the result is
+ * stored in dst.
+ *
+ * src and dst may be the same path.
+ */
+ bool applyToPath(SkPath* dst, const SkPath& src) const;
+
+ /**
+ * Apply these stroke parameters to a paint.
+ */
+ void applyToPaint(SkPaint* paint) const;
+
+ /**
+ * Gives a conservative value for the outset that should applied to a
+ * geometries bounds to account for any inflation due to applying this
+ * strokeRec to the geometry.
+ */
+ SkScalar getInflationRadius() const;
+
+ /**
+ * Equivalent to:
+ * SkStrokeRec rec(paint, style);
+ * rec.getInflationRadius();
+ * This does not account for other effects on the paint (i.e. path
+ * effect).
+ */
+ static SkScalar GetInflationRadius(const SkPaint&, SkPaint::Style);
+
+ static SkScalar GetInflationRadius(SkPaint::Join, SkScalar miterLimit, SkPaint::Cap,
+ SkScalar strokeWidth);
+
+ /**
+ * Compare if two SkStrokeRecs have an equal effect on a path.
+ * Equal SkStrokeRecs produce equal paths. Equality of produced
+ * paths does not take the ResScale parameter into account.
+ */
+ bool hasEqualEffect(const SkStrokeRec& other) const {
+ if (!this->needToApply()) {
+ return this->getStyle() == other.getStyle();
+ }
+ return fWidth == other.fWidth &&
+ fMiterLimit == other.fMiterLimit &&
+ fCap == other.fCap &&
+ fJoin == other.fJoin &&
+ fStrokeAndFill == other.fStrokeAndFill;
+ }
+
+private:
+ void init(const SkPaint&, SkPaint::Style, SkScalar resScale);
+
+ SkScalar fResScale;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ // The following three members are packed together into a single u32.
+ // This is to avoid unnecessary padding and ensure binary equality for
+ // hashing (because the padded areas might contain garbage values).
+ //
+ // fCap and fJoin are larger than needed to avoid having to initialize
+ // any pad values
+ uint32_t fCap : 16; // SkPaint::Cap
+ uint32_t fJoin : 15; // SkPaint::Join
+ uint32_t fStrokeAndFill : 1; // bool
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurface.h b/gfx/skia/skia/include/core/SkSurface.h
new file mode 100644
index 0000000000..eaf530490d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurface.h
@@ -0,0 +1,1046 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_DEFINED
+#define SkSurface_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+
+#include "include/gpu/GrTypes.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include <android/hardware_buffer.h>
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+class SkCanvas;
+class SkDeferredDisplayList;
+class SkPaint;
+class SkSurfaceCharacterization;
+class GrBackendRenderTarget;
+class GrBackendSemaphore;
+class GrBackendTexture;
+class GrContext;
+class GrRecordingContext;
+class GrRenderTarget;
+
+/** \class SkSurface
+ SkSurface is responsible for managing the pixels that a canvas draws into. The pixels can be
+ allocated either in CPU memory (a raster surface) or on the GPU (a GrRenderTarget surface).
+ SkSurface takes care of allocating a SkCanvas that will draw into the surface. Call
+ surface->getCanvas() to use that canvas (but don't delete it, it is owned by the surface).
+ SkSurface always has non-zero dimensions. If there is a request for a new surface, and either
+ of the requested dimensions are zero, then nullptr will be returned.
+*/
+class SK_API SkSurface : public SkRefCnt {
+public:
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is large enough to contain info width pixels of SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterDirect(const SkImageInfo& imageInfo, void* pixels,
+ size_t rowBytes,
+ const SkSurfaceProps* surfaceProps = nullptr);
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ releaseProc is called with pixels and context when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is large enough to contain info width pixels of SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next
+ @param releaseProc called when SkSurface is deleted; may be nullptr
+ @param context passed to releaseProc; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterDirectReleaseProc(const SkImageInfo& imageInfo, void* pixels,
+ size_t rowBytes,
+ void (*releaseProc)(void* pixels, void* context),
+ void* context, const SkSurfaceProps* surfaceProps = nullptr);
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times
+ rowBytes, or times imageInfo.minRowBytes() if rowBytes is zero.
+ Pixel memory is deleted when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ rowBytes is large enough to contain info width pixels of SkColorType, or is zero.
+
+ If rowBytes is not zero, subsequent images returned by makeImageSnapshot()
+ have the same rowBytes.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param rowBytes interval from one SkSurface row to the next; may be zero
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo, size_t rowBytes,
+ const SkSurfaceProps* surfaceProps);
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times
+ imageInfo.minRowBytes().
+ Pixel memory is deleted when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo,
+ const SkSurfaceProps* props = nullptr) {
+ return MakeRaster(imageInfo, 0, props);
+ }
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is height times width times
+ four. Pixel memory is deleted when SkSurface is deleted.
+
+ Internally, sets SkImageInfo to width, height, native color type, and
+ kPremul_SkAlphaType.
+
+ SkSurface is returned if width and height are greater than zero.
+
+ Use to create SkSurface that matches SkPMColor, the native pixel arrangement on
+ the platform. SkSurface drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be greater than zero
+ @param height pixel row count; must be greater than zero
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterN32Premul(int width, int height,
+ const SkSurfaceProps* surfaceProps = nullptr);
+
+ /** Caller data passed to RenderTarget/TextureReleaseProc; may be nullptr. */
+ typedef void* ReleaseContext;
+
+ /** User function called when supplied render target may be deleted. */
+ typedef void (*RenderTargetReleaseProc)(ReleaseContext releaseContext);
+
+ /** User function called when supplied texture may be deleted. */
+ typedef void (*TextureReleaseProc)(ReleaseContext releaseContext);
+
+ /** Wraps a GPU-backed texture into SkSurface. Caller must ensure the texture is
+ valid for the lifetime of returned SkSurface. If sampleCnt greater than zero,
+ creates an intermediate MSAA SkSurface which is used for drawing backendTexture.
+
+ SkSurface is returned if all parameters are valid. backendTexture is valid if
+ its pixel configuration agrees with colorSpace and context; for instance, if
+ backendTexture has an sRGB configuration, then context must support sRGB,
+ and colorSpace must be present. Further, backendTexture width and height must
+ not exceed context capabilities, and the context must be able to support
+ back-end textures.
+
+ If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kRGB_888x_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendTexture(GrContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin, int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Wraps a GPU-backed buffer into SkSurface. Caller must ensure backendRenderTarget
+ is valid for the lifetime of returned SkSurface.
+
+ SkSurface is returned if all parameters are valid. backendRenderTarget is valid if
+ its pixel configuration agrees with colorSpace and context; for instance, if
+ backendRenderTarget has an sRGB configuration, then context must support sRGB,
+ and colorSpace must be present. Further, backendRenderTarget width and height must
+ not exceed context capabilities, and the context must be able to support
+ back-end render targets.
+
+ If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param backendRenderTarget GPU intermediate memory buffer
+ @param origin one of:
+ kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType,
+ kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kRGB_888x_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param colorSpace range of colors
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param releaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendRenderTarget(GrContext* context,
+ const GrBackendRenderTarget& backendRenderTarget,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ RenderTargetReleaseProc releaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Wraps a GPU-backed texture into SkSurface. Caller must ensure backendTexture is
+ valid for the lifetime of returned SkSurface. If sampleCnt greater than zero,
+ creates an intermediate MSAA SkSurface which is used for drawing backendTexture.
+
+ SkSurface is returned if all parameters are valid. backendTexture is valid if
+ its pixel configuration agrees with colorSpace and context; for instance, if
+ backendTexture has an sRGB configuration, then context must support sRGB,
+ and colorSpace must be present. Further, backendTexture width and height must
+ not exceed context capabilities.
+
+ Returned SkSurface is available only for drawing into, and cannot generate an
+ SkImage.
+
+ If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kRGB_888x_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendTextureAsRenderTarget(GrContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps);
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+ /** Private.
+ Creates SkSurface from Android hardware buffer.
+ Returned SkSurface takes a reference on the buffer. The ref on the buffer will be released
+ when the SkSurface is destroyed and there is no pending work on the GPU involving the
+ buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ Currently this is only supported for buffers that can be textured as well as rendered to.
+ In other words that must have both AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT and
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE usage bits.
+
+ @param context GPU context
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return created SkSurface, or nullptr
+ */
+ static sk_sp<SkSurface> MakeFromAHardwareBuffer(GrContext* context,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps);
+#endif
+
+#ifdef SK_METAL
+ /** Private.
+ Creates SkSurface from CAMetalLayer.
+ Returned SkSurface takes a reference on the CAMetalLayer. The ref on the layer will be
+ released when the SkSurface is destroyed.
+
+ Only available when Metal API is enabled.
+
+ Will grab the current drawable from the layer and use its texture as a backendRT to
+ create a renderable surface.
+
+ @param context GPU context
+ @param layer GrMTLHandle (expected to be a CAMetalLayer*)
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorType one of:
+ kUnknown_SkColorType, kAlpha_8_SkColorType, kRGB_565_SkColorType,
+ kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kRGB_888x_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_1010102_SkColorType, kRGB_101010x_SkColorType,
+ kGray_8_SkColorType, kRGBA_F16_SkColorType
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param drawable Pointer to drawable to be filled in when this surface is
+ instantiated; may not be nullptr
+ @return created SkSurface, or nullptr
+ */
+ static sk_sp<SkSurface> MakeFromCAMetalLayer(GrContext* context,
+ GrMTLHandle layer,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ GrMTLHandle* drawable);
+
+#endif
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ sampleCount requests the number of samples per pixel.
+ Pass zero to disable multi-sample anti-aliasing. The request is rounded
+ up to the next supported count, or rounded down if it is larger than the
+ maximum supported count.
+
+ surfaceOrigin pins either the top-left or the bottom-left corner to the origin.
+
+ shouldCreateWithMips hints that SkImage returned by makeImageSnapshot() is mip map.
+
+ If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param budgeted one of: SkBudgeted::kNo, SkBudgeted::kYes
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace;
+ width, or height, or both, may be zero
+ @param sampleCount samples per pixel, or 0 to disable full scene anti-aliasing
+ @param surfaceOrigin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param shouldCreateWithMips hint that SkSurface will host mip map images
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& imageInfo,
+ int sampleCount, GrSurfaceOrigin surfaceOrigin,
+ const SkSurfaceProps* surfaceProps,
+ bool shouldCreateWithMips = false);
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ sampleCount requests the number of samples per pixel.
+ Pass zero to disable multi-sample anti-aliasing. The request is rounded
+ up to the next supported count, or rounded down if it is larger than the
+ maximum supported count.
+
+ SkSurface bottom-left corner is pinned to the origin.
+
+ @param context GPU context
+ @param budgeted one of: SkBudgeted::kNo, SkBudgeted::kYes
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width, or height, or both, may be zero
+ @param sampleCount samples per pixel, or 0 to disable multi-sample anti-aliasing
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& imageInfo, int sampleCount,
+ const SkSurfaceProps* surfaceProps) {
+ return MakeRenderTarget(context, budgeted, imageInfo, sampleCount,
+ kBottomLeft_GrSurfaceOrigin, surfaceProps);
+ }
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ SkSurface bottom-left corner is pinned to the origin.
+
+ @param context GPU context
+ @param budgeted one of: SkBudgeted::kNo, SkBudgeted::kYes
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width, or height, or both, may be zero
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& imageInfo) {
+ if (!imageInfo.width() || !imageInfo.height()) {
+ return nullptr;
+ }
+ return MakeRenderTarget(context, budgeted, imageInfo, 0, kBottomLeft_GrSurfaceOrigin,
+ nullptr);
+ }
+
+ /** Returns SkSurface on GPU indicated by context that is compatible with the provided
+ characterization. budgeted selects whether allocation for pixels is tracked by context.
+
+ @param context GPU context
+ @param characterization description of the desired SkSurface
+ @param budgeted one of: SkBudgeted::kNo, SkBudgeted::kYes
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context,
+ const SkSurfaceCharacterization& characterization,
+ SkBudgeted budgeted);
+
+ /** Wraps a backend texture in an SkSurface - setting up the surface to match the provided
+ characterization. The caller must ensure the texture is valid for the lifetime of
+ returned SkSurface.
+
+ If the backend texture and surface characterization are incompatible then null will
+ be returned.
+
+ Usually, the GrContext::createBackendTexture variant that takes a surface characterization
+ should be used to create the backend texture. If not,
+ SkSurfaceCharacterization::isCompatible can be used to determine if a given backend texture
+ is compatible with a specific surface characterization.
+
+ @param context GPU context
+ @param characterization characterization of the desired surface
+ @param backendTexture texture residing on GPU
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return SkSurface if all parameters are compatible; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendTexture(GrContext* context,
+ const SkSurfaceCharacterization& characterzation,
+ const GrBackendTexture& backendTexture,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Is this surface compatible with the provided characterization?
+
+ This method can be used to determine if an existing SkSurface is a viable destination
+ for an SkDeferredDisplayList.
+
+ @param characterization The characterization for which a compatibility check is desired
+ @return true if this surface is compatible with the characterization;
+ false otherwise
+ */
+ bool isCompatible(const SkSurfaceCharacterization& characterization) const;
+
+ /** Returns SkSurface without backing pixels. Drawing to SkCanvas returned from SkSurface
+ has no effect. Calling makeImageSnapshot() on returned SkSurface returns nullptr.
+
+ @param width one or greater
+ @param height one or greater
+ @return SkSurface if width and height are positive; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeNull(int width, int height);
+
+ /** Returns pixel count in each row; may be zero or greater.
+
+ @return number of pixel columns
+ */
+ int width() const { return fWidth; }
+
+ /** Returns pixel row count; may be zero or greater.
+
+ @return number of pixel rows
+ */
+ int height() const { return fHeight; }
+
+ /** Returns an ImageInfo describing the surface.
+ */
+ SkImageInfo imageInfo();
+
+ /** Returns unique value identifying the content of SkSurface. Returned value changes
+ each time the content changes. Content is changed by drawing, or by calling
+ notifyContentWillChange().
+
+ @return unique content identifier
+ */
+ uint32_t generationID();
+
+ /** \enum SkSurface::ContentChangeMode
+ ContentChangeMode members are parameters to notifyContentWillChange().
+ */
+ enum ContentChangeMode {
+ kDiscard_ContentChangeMode, //!< discards surface on change
+ kRetain_ContentChangeMode, //!< preserves surface on change
+ };
+
+ /** Notifies that SkSurface contents will be changed by code outside of Skia.
+ Subsequent calls to generationID() return a different value.
+
+ TODO: Can kRetain_ContentChangeMode be deprecated?
+
+ @param mode one of: kDiscard_ContentChangeMode, kRetain_ContentChangeMode
+ */
+ void notifyContentWillChange(ContentChangeMode mode);
+
+ enum BackendHandleAccess {
+ kFlushRead_BackendHandleAccess, //!< back-end object is readable
+ kFlushWrite_BackendHandleAccess, //!< back-end object is writable
+ kDiscardWrite_BackendHandleAccess, //!< back-end object must be overwritten
+ };
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kFlushRead_TextureHandleAccess =
+ kFlushRead_BackendHandleAccess;
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kFlushWrite_TextureHandleAccess =
+ kFlushWrite_BackendHandleAccess;
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kDiscardWrite_TextureHandleAccess =
+ kDiscardWrite_BackendHandleAccess;
+
+ /** Retrieves the back-end texture. If SkSurface has no back-end texture, an invalid
+ object is returned. Call GrBackendTexture::isValid to determine if the result
+ is valid.
+
+ The returned GrBackendTexture should be discarded if the SkSurface is drawn to or deleted.
+
+ @param backendHandleAccess one of: kFlushRead_BackendHandleAccess,
+ kFlushWrite_BackendHandleAccess,
+ kDiscardWrite_BackendHandleAccess
+ @return GPU texture reference; invalid on failure
+ */
+ GrBackendTexture getBackendTexture(BackendHandleAccess backendHandleAccess);
+
+ /** Retrieves the back-end render target. If SkSurface has no back-end render target, an invalid
+ object is returned. Call GrBackendRenderTarget::isValid to determine if the result
+ is valid.
+
+ The returned GrBackendRenderTarget should be discarded if the SkSurface is drawn to
+ or deleted.
+
+ @param backendHandleAccess one of: kFlushRead_BackendHandleAccess,
+ kFlushWrite_BackendHandleAccess,
+ kDiscardWrite_BackendHandleAccess
+ @return GPU render target reference; invalid on failure
+ */
+ GrBackendRenderTarget getBackendRenderTarget(BackendHandleAccess backendHandleAccess);
+
+ /** If the surface was made via MakeFromBackendTexture then it's backing texture may be
+ substituted with a different texture. The contents of the previous backing texture are
+ copied into the new texture. SkCanvas state is preserved. The original sample count is
+ used. The GrBackendFormat and dimensions of replacement texture must match that of
+ the original.
+
+ @param backendTexture the new backing texture for the surface.
+ @param origin one of: kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ */
+ bool replaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Returns SkCanvas that draws into SkSurface. Subsequent calls return the same SkCanvas.
+ SkCanvas returned is managed and owned by SkSurface, and is deleted when SkSurface
+ is deleted.
+
+ @return drawing SkCanvas for SkSurface
+ */
+ SkCanvas* getCanvas();
+
+ /** Returns a compatible SkSurface, or nullptr. Returned SkSurface contains
+ the same raster, GPU, or null properties as the original. Returned SkSurface
+ does not share the same pixels.
+
+ Returns nullptr if imageInfo width or height are zero, or if imageInfo
+ is incompatible with SkSurface.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of SkSurface; width and height must be greater than zero
+ @return compatible SkSurface or nullptr
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo& imageInfo);
+
+ /** Calls makeSurface(ImageInfo) with the same ImageInfo as this surface, but with the
+ * specified width and height.
+ */
+ sk_sp<SkSurface> makeSurface(int width, int height);
+
+ /** Returns SkImage capturing SkSurface contents. Subsequent drawing to SkSurface contents
+ are not captured. SkImage allocation is accounted for if SkSurface was created with
+ SkBudgeted::kYes.
+
+ @return SkImage initialized with SkSurface contents
+ */
+ sk_sp<SkImage> makeImageSnapshot();
+
+ /**
+ * Like the no-parameter version, this returns an image of the current surface contents.
+ * This variant takes a rectangle specifying the subset of the surface that is of interest.
+ * These bounds will be sanitized before being used.
+ * - If bounds extends beyond the surface, it will be trimmed to just the intersection of
+ * it and the surface.
+ * - If bounds does not intersect the surface, then this returns nullptr.
+ * - If bounds == the surface, then this is the same as calling the no-parameter variant.
+ */
+ sk_sp<SkImage> makeImageSnapshot(const SkIRect& bounds);
+
+ /** Draws SkSurface contents to canvas, with its top-left corner at (x, y).
+
+ If SkPaint paint is not nullptr, apply SkColorFilter, alpha, SkImageFilter,
+ SkBlendMode, and SkDrawLooper.
+
+ @param canvas SkCanvas drawn into
+ @param x horizontal offset in SkCanvas
+ @param y vertical offset in SkCanvas
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint);
+
+ /** Copies SkSurface pixel address, row bytes, and SkImageInfo to SkPixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave SkPixmap unchanged.
+
+ pixmap contents become invalid on any future change to SkSurface.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkSurface has direct access to pixels
+ */
+ bool peekPixels(SkPixmap* pixmap);
+
+ /** Copies SkRect of pixels to dst.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (dst.width(), dst.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dst.colorType() and dst.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dst contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkPixmap pixels could not be allocated.
+ - dst.rowBytes() is too small to contain one row of pixels.
+
+ @param dst storage for pixels copied from SkSurface
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into dstPixels.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dstInfo.colorType() and dstInfo.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dstPixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkSurface pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType().
+ - dstRowBytes is too small to contain one row of pixels.
+
+ @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels
+ @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger
+ @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkSurface into bitmap.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to bitmap.colorType() and bitmap.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dst contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkSurface pixels could not be converted to dst.colorType() or dst.alphaType().
+ - dst pixels could not be allocated.
+ - dst.rowBytes() is too small to contain one row of pixels.
+
+ @param dst storage for pixels copied from SkSurface
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkBitmap& dst, int srcX, int srcY);
+
+ /** The result from asyncRescaleAndReadPixels() or asyncRescaleAndReadPixelsYUV420(). */
+ class AsyncReadResult {
+ public:
+ AsyncReadResult(const AsyncReadResult&) = delete;
+ AsyncReadResult(AsyncReadResult&&) = delete;
+ AsyncReadResult& operator=(const AsyncReadResult&) = delete;
+ AsyncReadResult& operator=(AsyncReadResult&&) = delete;
+
+ virtual ~AsyncReadResult() = default;
+ virtual int count() const = 0;
+ virtual const void* data(int i) const = 0;
+ virtual size_t rowBytes(int i) const = 0;
+
+ protected:
+ AsyncReadResult() = default;
+ };
+
+ /** Client-provided context that is passed to client-provided ReadPixelsContext. */
+ using ReadPixelsContext = void*;
+
+ /** Client-provided callback to asyncRescaleAndReadPixels() or
+ asyncRescaleAndReadPixelsYUV420() that is called when read result is ready or on failure.
+ */
+ using ReadPixelsCallback = void(ReadPixelsContext, std::unique_ptr<const AsyncReadResult>);
+
+ /** Controls the gamma that rescaling occurs in for asyncRescaleAndReadPixels() and
+ asyncRescaleAndReadPixelsYUV420().
+ */
+ enum RescaleGamma : bool { kSrc, kLinear };
+
+ /** Makes surface pixel data available to caller, possibly asynchronously. It can also rescale
+ the surface pixels.
+
+ Currently asynchronous reads are only supported on the GPU backend and only when the
+ underlying 3D API supports transfer buffers and CPU/GPU synchronization primitives. In all
+ other cases this operates synchronously.
+
+ Data is read from the source sub-rectangle, is optionally converted to a linear gamma, is
+ rescaled to the size indicated by 'info', is then converted to the color space, color type,
+ and alpha type of 'info'. A 'srcRect' that is not contained by the bounds of the surface
+ causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing pixel data in the requested color type, alpha type, and color
+ space. The AsyncReadResult will have count() == 1. Upon failure the callback is called
+ with nullptr for AsyncReadResult.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the
+ SkSurface is GPU-backed the data is immediately invalidated if the GrContext is abandoned
+ or destroyed.
+
+ @param info info of the requested pixels
+ @param srcRect subrectangle of surface to read
+ @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleQuality controls the quality (and cost) of the rescaling
+ @param callback function to call with result of the read
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixels(const SkImageInfo& info, const SkIRect& srcRect,
+ RescaleGamma rescaleGamma, SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback, ReadPixelsContext context);
+
+ /** Legacy version of asyncRescaleAndReadPixels() that passes data directly to the callback
+ rather than using AsyncReadResult. The data is only valid during the lifetime of the
+ callback.
+
+ Deprecated.
+ */
+ using LegacyReadPixelsCallback = void(ReadPixelsContext, const void* data, size_t rowBytes);
+ void asyncRescaleAndReadPixels(const SkImageInfo& info, const SkIRect& srcRect,
+ RescaleGamma rescaleGamma, SkFilterQuality rescaleQuality,
+ LegacyReadPixelsCallback callback, ReadPixelsContext context);
+
+ /**
+ Similar to asyncRescaleAndReadPixels but performs an additional conversion to YUV. The
+ RGB->YUV conversion is controlled by 'yuvColorSpace'. The YUV data is returned as three
+ planes ordered y, u, v. The u and v planes are half the width and height of the resized
+ rectangle. The y, u, and v values are single bytes. Currently this fails if 'dstSize'
+ width and height are not even. A 'srcRect' that is not contained by the bounds of the
+ surface causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing the planar data. The AsyncReadResult will have count() == 3.
+ Upon failure the callback is called with nullptr for AsyncReadResult.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the
+ SkSurface is GPU-backed the data is immediately invalidated if the GrContext is abandoned
+ or destroyed.
+
+ @param yuvColorSpace The transformation from RGB to YUV. Applied to the resized image
+ after it is converted to dstColorSpace.
+ @param dstColorSpace The color space to convert the resized image to, after rescaling.
+ @param srcRect The portion of the surface to rescale and convert to YUV planes.
+ @param dstSize The size to rescale srcRect to
+ @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleQuality controls the quality (and cost) of the rescaling
+ @param callback function to call with the planar read result
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext);
+
+ /** Legacy version of asyncRescaleAndReadPixelsYUV420() that passes data directly to the
+ callback rather than using AsyncReadResult. The data is only valid during the lifetime of
+ the callback.
+
+ Deprecated.
+ */
+ using LegacyReadPixelsCallbackYUV420 = void(ReadPixelsContext, const void* data[3],
+ size_t rowBytes[3]);
+ void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ int dstW, int dstH,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ LegacyReadPixelsCallbackYUV420 callback,
+ ReadPixelsContext);
+
+ /** Copies SkRect of pixels from the src SkPixmap to the SkSurface.
+
+ Source SkRect corners are (0, 0) and (src.width(), src.height()).
+ Destination SkRect corners are (dstX, dstY) and
+ (dstX + Surface width(), dstY + Surface height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to SkSurface colorType() and SkSurface alphaType() if required.
+
+ @param src storage for pixels to copy to SkSurface
+ @param dstX x-axis position relative to SkSurface to begin copy; may be negative
+ @param dstY y-axis position relative to SkSurface to begin copy; may be negative
+ */
+ void writePixels(const SkPixmap& src, int dstX, int dstY);
+
+ /** Copies SkRect of pixels from the src SkBitmap to the SkSurface.
+
+ Source SkRect corners are (0, 0) and (src.width(), src.height()).
+ Destination SkRect corners are (dstX, dstY) and
+ (dstX + Surface width(), dstY + Surface height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to SkSurface colorType() and SkSurface alphaType() if required.
+
+ @param src storage for pixels to copy to SkSurface
+ @param dstX x-axis position relative to SkSurface to begin copy; may be negative
+ @param dstY y-axis position relative to SkSurface to begin copy; may be negative
+ */
+ void writePixels(const SkBitmap& src, int dstX, int dstY);
+
+ /** Returns SkSurfaceProps for surface.
+
+ @return LCD striping orientation and setting for device independent fonts
+ */
+ const SkSurfaceProps& props() const { return fProps; }
+
+ /** Issues pending SkSurface commands to the GPU-backed API and resolves any SkSurface MSAA.
+
+ Skia flushes as needed, so it is not necessary to call this if Skia manages
+ drawing and object lifetime. Call when interleaving Skia calls with native
+ GPU calls.
+ */
+ void flush();
+
+ enum class BackendSurfaceAccess {
+ kNoAccess, //!< back-end object will not be used by client
+ kPresent, //!< back-end surface will be used for presenting to screen
+ };
+
+ /** Issues pending SkSurface commands to the GPU-backed API and resolves any SkSurface MSAA.
+ The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is
+ passed in.
+
+ If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU.
+
+ If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is
+ treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the
+ SkSurface will be transferred back to its original queue. If the SkSurface was created by
+ wrapping a VkImage, the queue will be set to the queue which was originally passed in on
+ the GrVkImageInfo. Additionally, if the original queue was not external or foreign the
+ layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
+
+ The GrFlushInfo describes additional options to flush. Please see documentation at
+ GrFlushInfo for more info.
+
+ If GrSemaphoresSubmitted::kNo is returned, the GPU back-end did not create or
+ add any semaphores to signal on the GPU; the caller should not instruct the GPU
+ to wait on any of the semaphores passed in the GrFlushInfo.
+
+ Pending surface commands are flushed regardless of the return result.
+
+ @param access type of access the call will do on the backend object after flush
+ @param info flush options
+ @return one of: GrSemaphoresSubmitted::kYes, GrSemaphoresSubmitted::kNo
+ */
+ GrSemaphoresSubmitted flush(BackendSurfaceAccess access, const GrFlushInfo& info);
+
+ /** Deprecated
+ */
+ GrSemaphoresSubmitted flush(BackendSurfaceAccess access, GrFlushFlags flags,
+ int numSemaphores, GrBackendSemaphore signalSemaphores[],
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /** The below enum and flush call are deprecated
+ */
+ enum FlushFlags {
+ kNone_FlushFlags = 0,
+ // flush will wait till all submitted GPU work is finished before returning.
+ kSyncCpu_FlushFlag = 0x1,
+ };
+ GrSemaphoresSubmitted flush(BackendSurfaceAccess access, FlushFlags flags,
+ int numSemaphores, GrBackendSemaphore signalSemaphores[]);
+
+ /** Deprecated.
+ */
+ GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores,
+ GrBackendSemaphore signalSemaphores[]);
+
+ /** Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
+ executing any more commands on the GPU for this surface. Skia will take ownership of the
+ underlying semaphores and delete them once they have been signaled and waited on.
+ If this call returns false, then the GPU back-end will not wait on any passed in semaphores,
+ and the client will still own the semaphores.
+
+ @param numSemaphores size of waitSemaphores array
+ @param waitSemaphores array of semaphore containers
+ @return true if GPU is waiting on semaphores
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores);
+
+ /** Initializes SkSurfaceCharacterization that can be used to perform GPU back-end
+ processing in a separate thread. Typically this is used to divide drawing
+ into multiple tiles. SkDeferredDisplayListRecorder records the drawing commands
+ for each tile.
+
+ Return true if SkSurface supports characterization. raster surface returns false.
+
+ @param characterization properties for parallel drawing
+ @return true if supported
+ */
+ bool characterize(SkSurfaceCharacterization* characterization) const;
+
+ /** Draws deferred display list created using SkDeferredDisplayListRecorder.
+ Has no effect and returns false if SkSurfaceCharacterization stored in
+ deferredDisplayList is not compatible with SkSurface.
+
+ raster surface returns false.
+
+ @param deferredDisplayList drawing commands
+ @return false if deferredDisplayList is not compatible
+ */
+ bool draw(SkDeferredDisplayList* deferredDisplayList);
+
+protected:
+ SkSurface(int width, int height, const SkSurfaceProps* surfaceProps);
+ SkSurface(const SkImageInfo& imageInfo, const SkSurfaceProps* surfaceProps);
+
+ // called by subclass if their contents have changed
+ void dirtyGenerationID() {
+ fGenerationID = 0;
+ }
+
+private:
+ const SkSurfaceProps fProps;
+ const int fWidth;
+ const int fHeight;
+ uint32_t fGenerationID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurfaceCharacterization.h b/gfx/skia/skia/include/core/SkSurfaceCharacterization.h
new file mode 100644
index 0000000000..4588df9e3d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurfaceCharacterization.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfaceCharacterization_DEFINED
+#define SkSurfaceCharacterization_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+
+class SkColorSpace;
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrBackendSurface.h"
+// TODO: remove the GrContext.h include once Flutter is updated
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrContextThreadSafeProxy.h"
+
+/** \class SkSurfaceCharacterization
+ A surface characterization contains all the information Ganesh requires to makes its internal
+ rendering decisions. When passed into a SkDeferredDisplayListRecorder it will copy the
+ data and pass it on to the SkDeferredDisplayList if/when it is created. Note that both of
+ those objects (the Recorder and the DisplayList) will take a ref on the
+ GrContextThreadSafeProxy and SkColorSpace objects.
+*/
+class SK_API SkSurfaceCharacterization {
+public:
+ enum class Textureable : bool { kNo = false, kYes = true };
+ enum class MipMapped : bool { kNo = false, kYes = true };
+ enum class UsesGLFBO0 : bool { kNo = false, kYes = true };
+ // This flag indicates if the surface is wrapping a raw Vulkan secondary command buffer.
+ enum class VulkanSecondaryCBCompatible : bool { kNo = false, kYes = true };
+
+ SkSurfaceCharacterization()
+ : fCacheMaxResourceBytes(0)
+ , fOrigin(kBottomLeft_GrSurfaceOrigin)
+ , fSampleCnt(0)
+ , fIsTextureable(Textureable::kYes)
+ , fIsMipMapped(MipMapped::kYes)
+ , fUsesGLFBO0(UsesGLFBO0::kNo)
+ , fVulkanSecondaryCBCompatible(VulkanSecondaryCBCompatible::kNo)
+ , fIsProtected(GrProtected::kNo)
+ , fSurfaceProps(0, kUnknown_SkPixelGeometry) {
+ }
+
+ SkSurfaceCharacterization(SkSurfaceCharacterization&&) = default;
+ SkSurfaceCharacterization& operator=(SkSurfaceCharacterization&&) = default;
+
+ SkSurfaceCharacterization(const SkSurfaceCharacterization&) = default;
+ SkSurfaceCharacterization& operator=(const SkSurfaceCharacterization& other) = default;
+ bool operator==(const SkSurfaceCharacterization& other) const;
+ bool operator!=(const SkSurfaceCharacterization& other) const {
+ return !(*this == other);
+ }
+
+ /*
+ * Return a new surface characterization with the only difference being a different width
+ * and height
+ */
+ SkSurfaceCharacterization createResized(int width, int height) const;
+
+ /*
+ * Return a new surface characterization with only a replaced color space
+ */
+ SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const;
+
+ GrContextThreadSafeProxy* contextInfo() const { return fContextInfo.get(); }
+ sk_sp<GrContextThreadSafeProxy> refContextInfo() const { return fContextInfo; }
+ size_t cacheMaxResourceBytes() const { return fCacheMaxResourceBytes; }
+
+ bool isValid() const { return kUnknown_SkColorType != fImageInfo.colorType(); }
+
+ const SkImageInfo& imageInfo() const { return fImageInfo; }
+ const GrBackendFormat& backendFormat() const { return fBackendFormat; }
+ GrSurfaceOrigin origin() const { return fOrigin; }
+ int width() const { return fImageInfo.width(); }
+ int height() const { return fImageInfo.height(); }
+ SkColorType colorType() const { return fImageInfo.colorType(); }
+ int sampleCount() const { return fSampleCnt; }
+ bool isTextureable() const { return Textureable::kYes == fIsTextureable; }
+ bool isMipMapped() const { return MipMapped::kYes == fIsMipMapped; }
+ bool usesGLFBO0() const { return UsesGLFBO0::kYes == fUsesGLFBO0; }
+ bool vulkanSecondaryCBCompatible() const {
+ return VulkanSecondaryCBCompatible::kYes == fVulkanSecondaryCBCompatible;
+ }
+ GrProtected isProtected() const { return fIsProtected; }
+ SkColorSpace* colorSpace() const { return fImageInfo.colorSpace(); }
+ sk_sp<SkColorSpace> refColorSpace() const { return fImageInfo.refColorSpace(); }
+ const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; }
+
+ // Is the provided backend texture compatible with this surface characterization?
+ bool isCompatible(const GrBackendTexture&) const;
+
+private:
+ friend class SkSurface_Gpu; // for 'set' & 'config'
+ friend class GrVkSecondaryCBDrawContext; // for 'set' & 'config'
+ friend class GrContextThreadSafeProxy; // for private ctor
+ friend class SkDeferredDisplayListRecorder; // for 'config'
+ friend class SkSurface; // for 'config'
+
+ SkDEBUGCODE(void validate() const;)
+
+ SkSurfaceCharacterization(sk_sp<GrContextThreadSafeProxy> contextInfo,
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ Textureable isTextureable,
+ MipMapped isMipMapped,
+ UsesGLFBO0 usesGLFBO0,
+ VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible,
+ GrProtected isProtected,
+ const SkSurfaceProps& surfaceProps)
+ : fContextInfo(std::move(contextInfo))
+ , fCacheMaxResourceBytes(cacheMaxResourceBytes)
+ , fImageInfo(ii)
+ , fBackendFormat(backendFormat)
+ , fOrigin(origin)
+ , fSampleCnt(sampleCnt)
+ , fIsTextureable(isTextureable)
+ , fIsMipMapped(isMipMapped)
+ , fUsesGLFBO0(usesGLFBO0)
+ , fVulkanSecondaryCBCompatible(vulkanSecondaryCBCompatible)
+ , fIsProtected(isProtected)
+ , fSurfaceProps(surfaceProps) {
+ SkDEBUGCODE(this->validate());
+ }
+
+ void set(sk_sp<GrContextThreadSafeProxy> contextInfo,
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ Textureable isTextureable,
+ MipMapped isMipMapped,
+ UsesGLFBO0 usesGLFBO0,
+ VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible,
+ GrProtected isProtected,
+ const SkSurfaceProps& surfaceProps) {
+ SkASSERT(MipMapped::kNo == isMipMapped || Textureable::kYes == isTextureable);
+ SkASSERT(Textureable::kNo == isTextureable || UsesGLFBO0::kNo == usesGLFBO0);
+
+ SkASSERT(VulkanSecondaryCBCompatible::kNo == vulkanSecondaryCBCompatible ||
+ UsesGLFBO0::kNo == usesGLFBO0);
+ SkASSERT(Textureable::kNo == isTextureable ||
+ VulkanSecondaryCBCompatible::kNo == vulkanSecondaryCBCompatible);
+
+ fContextInfo = contextInfo;
+ fCacheMaxResourceBytes = cacheMaxResourceBytes;
+
+ fImageInfo = ii;
+ fBackendFormat = backendFormat;
+ fOrigin = origin;
+ fSampleCnt = sampleCnt;
+ fIsTextureable = isTextureable;
+ fIsMipMapped = isMipMapped;
+ fUsesGLFBO0 = usesGLFBO0;
+ fVulkanSecondaryCBCompatible = vulkanSecondaryCBCompatible;
+ fIsProtected = isProtected;
+ fSurfaceProps = surfaceProps;
+
+ SkDEBUGCODE(this->validate());
+ }
+
+ sk_sp<GrContextThreadSafeProxy> fContextInfo;
+ size_t fCacheMaxResourceBytes;
+
+ SkImageInfo fImageInfo;
+ GrBackendFormat fBackendFormat;
+ GrSurfaceOrigin fOrigin;
+ int fSampleCnt;
+ Textureable fIsTextureable;
+ MipMapped fIsMipMapped;
+ UsesGLFBO0 fUsesGLFBO0;
+ VulkanSecondaryCBCompatible fVulkanSecondaryCBCompatible;
+ GrProtected fIsProtected;
+ SkSurfaceProps fSurfaceProps;
+};
+
+#else// !SK_SUPPORT_GPU
+
+class SK_API SkSurfaceCharacterization {
+public:
+ SkSurfaceCharacterization() : fSurfaceProps(0, kUnknown_SkPixelGeometry) { }
+
+ SkSurfaceCharacterization createResized(int width, int height) const {
+ return *this;
+ }
+
+ SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const {
+ return *this;
+ }
+
+ bool operator==(const SkSurfaceCharacterization& other) const { return false; }
+ bool operator!=(const SkSurfaceCharacterization& other) const {
+ return !(*this == other);
+ }
+
+ size_t cacheMaxResourceBytes() const { return 0; }
+
+ bool isValid() const { return false; }
+
+ int width() const { return 0; }
+ int height() const { return 0; }
+ int stencilCount() const { return 0; }
+ bool isTextureable() const { return false; }
+ bool isMipMapped() const { return false; }
+ bool usesGLFBO0() const { return false; }
+ bool vulkanSecondaryCBCompatible() const { return false; }
+ SkColorSpace* colorSpace() const { return nullptr; }
+ sk_sp<SkColorSpace> refColorSpace() const { return nullptr; }
+ const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; }
+
+private:
+ SkSurfaceProps fSurfaceProps;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurfaceProps.h b/gfx/skia/skia/include/core/SkSurfaceProps.h
new file mode 100644
index 0000000000..6eda6a563d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurfaceProps.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfaceProps_DEFINED
+#define SkSurfaceProps_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Description of how the LCD strips are arranged for each pixel. If this is unknown, or the
+ * pixels are meant to be "portable" and/or transformed before showing (e.g. rotated, scaled)
+ * then use kUnknown_SkPixelGeometry.
+ */
+enum SkPixelGeometry {
+ kUnknown_SkPixelGeometry,
+ kRGB_H_SkPixelGeometry,
+ kBGR_H_SkPixelGeometry,
+ kRGB_V_SkPixelGeometry,
+ kBGR_V_SkPixelGeometry,
+};
+
+// Returns true iff geo is a known geometry and is RGB.
+static inline bool SkPixelGeometryIsRGB(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kRGB_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is BGR.
+static inline bool SkPixelGeometryIsBGR(SkPixelGeometry geo) {
+ return kBGR_H_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is horizontal.
+static inline bool SkPixelGeometryIsH(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kBGR_H_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is vertical.
+static inline bool SkPixelGeometryIsV(SkPixelGeometry geo) {
+ return kRGB_V_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+/**
+ * Describes properties and constraints of a given SkSurface. The rendering engine can parse these
+ * during drawing, and can sometimes optimize its performance (e.g. disabling an expensive
+ * feature).
+ */
+class SK_API SkSurfaceProps {
+public:
+ enum Flags {
+ kUseDeviceIndependentFonts_Flag = 1 << 0,
+ };
+ /** Deprecated alias used by Chromium. Will be removed. */
+ static const Flags kUseDistanceFieldFonts_Flag = kUseDeviceIndependentFonts_Flag;
+
+ SkSurfaceProps(uint32_t flags, SkPixelGeometry);
+
+ enum InitType {
+ kLegacyFontHost_InitType
+ };
+ SkSurfaceProps(InitType);
+ SkSurfaceProps(uint32_t flags, InitType);
+ SkSurfaceProps(const SkSurfaceProps& other);
+
+ uint32_t flags() const { return fFlags; }
+ SkPixelGeometry pixelGeometry() const { return fPixelGeometry; }
+
+ bool isUseDeviceIndependentFonts() const {
+ return SkToBool(fFlags & kUseDeviceIndependentFonts_Flag);
+ }
+
+ bool operator==(const SkSurfaceProps& that) const {
+ return fFlags == that.fFlags && fPixelGeometry == that.fPixelGeometry;
+ }
+
+ bool operator!=(const SkSurfaceProps& that) const {
+ return !(*this == that);
+ }
+private:
+ SkSurfaceProps();
+
+ uint32_t fFlags;
+ SkPixelGeometry fPixelGeometry;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSwizzle.h b/gfx/skia/skia/include/core/SkSwizzle.h
new file mode 100644
index 0000000000..61e93b2da7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSwizzle.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzle_DEFINED
+#define SkSwizzle_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ Swizzles byte order of |count| 32-bit pixels, swapping R and B.
+ (RGBA <-> BGRA)
+*/
+SK_API void SkSwapRB(uint32_t* dest, const uint32_t* src, int count);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTextBlob.h b/gfx/skia/skia/include/core/SkTextBlob.h
new file mode 100644
index 0000000000..de4856efdc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTextBlob.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextBlob_DEFINED
+#define SkTextBlob_DEFINED
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/private/SkTemplates.h"
+
+#include <atomic>
+
+struct SkRSXform;
+struct SkSerialProcs;
+struct SkDeserialProcs;
+
+/** \class SkTextBlob
+ SkTextBlob combines multiple text runs into an immutable container. Each text
+ run consists of glyphs, SkPaint, and position. Only parts of SkPaint related to
+ fonts and text rendering are used by run.
+*/
+class SK_API SkTextBlob final : public SkNVRefCnt<SkTextBlob> {
+private:
+ class RunRecord;
+
+public:
+
+ /** Returns conservative bounding box. Uses SkPaint associated with each glyph to
+ determine glyph bounds, and unions all bounds. Returned bounds may be
+ larger than the bounds of all glyphs in runs.
+
+ @return conservative bounding box
+ */
+ const SkRect& bounds() const { return fBounds; }
+
+ /** Returns a non-zero value unique among all text blobs.
+
+ @return identifier for SkTextBlob
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns the number of intervals that intersect bounds.
+ bounds describes a pair of lines parallel to the text advance.
+ The return count is zero or a multiple of two, and is at most twice the number of glyphs in
+ the the blob.
+
+ Pass nullptr for intervals to determine the size of the interval array.
+
+ Runs within the blob that contain SkRSXform are ignored when computing intercepts.
+
+ @param bounds lower and upper line parallel to the advance
+ @param intervals returned intersections; may be nullptr
+ @param paint specifies stroking, SkPathEffect that affects the result; may be nullptr
+ @return number of intersections; may be zero
+ */
+ int getIntercepts(const SkScalar bounds[2], SkScalar intervals[],
+ const SkPaint* paint = nullptr) const;
+
+ /** Creates SkTextBlob with a single run.
+
+ font contains attributes used to define the run text.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ @param text character code points or glyphs drawn
+ @param byteLength byte length of text array
+ @param font text size, typeface, text scale, and so on, used to draw
+ @param encoding text encoding used in the text array
+ @return SkTextBlob constructed from one run
+ */
+ static sk_sp<SkTextBlob> MakeFromText(const void* text, size_t byteLength, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Creates SkTextBlob with a single run. string meaning depends on SkTextEncoding;
+ by default, string is encoded as UTF-8.
+
+ font contains attributes used to define the run text.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ @param string character code points or glyphs drawn
+ @param font text size, typeface, text scale, and so on, used to draw
+ @param encoding text encoding used in the text array
+ @return SkTextBlob constructed from one run
+ */
+ static sk_sp<SkTextBlob> MakeFromString(const char* string, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8) {
+ if (!string) {
+ return nullptr;
+ }
+ return MakeFromText(string, strlen(string), font, encoding);
+ }
+
+ /** Returns a textblob built from a single run of text with x-positions and a single y value.
+ This is equivalent to using SkTextBlobBuilder and calling allocRunPosH().
+ Returns nullptr if byteLength is zero.
+
+ @param text character code points or glyphs drawn (based on encoding)
+ @param byteLength byte length of text array
+ @param xpos array of x-positions, must contain values for all of the character points.
+ @param constY shared y-position for each character point, to be paired with each xpos.
+ @param font SkFont used for this run
+ @param encoding specifies the encoding of the text array.
+ @return new textblob or nullptr
+ */
+ static sk_sp<SkTextBlob> MakeFromPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Returns a textblob built from a single run of text with positions.
+ This is equivalent to using SkTextBlobBuilder and calling allocRunPos().
+ Returns nullptr if byteLength is zero.
+
+ @param text character code points or glyphs drawn (based on encoding)
+ @param byteLength byte length of text array
+ @param pos array of positions, must contain values for all of the character points.
+ @param font SkFont used for this run
+ @param encoding specifies the encoding of the text array.
+ @return new textblob or nullptr
+ */
+ static sk_sp<SkTextBlob> MakeFromPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ static sk_sp<SkTextBlob> MakeFromRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Writes data to allow later reconstruction of SkTextBlob. memory points to storage
+ to receive the encoded data, and memory_size describes the size of storage.
+ Returns bytes used if provided storage is large enough to hold all data;
+ otherwise, returns zero.
+
+ procs.fTypefaceProc permits supplying a custom function to encode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @param memory storage for data
+ @param memory_size size of storage
+ @return bytes written, or zero if required storage is larger than memory_size
+ */
+ size_t serialize(const SkSerialProcs& procs, void* memory, size_t memory_size) const;
+
+ /** Returns storage containing SkData describing SkTextBlob, using optional custom
+ encoders.
+
+ procs.fTypefaceProc permits supplying a custom function to encode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @return storage containing serialized SkTextBlob
+ */
+ sk_sp<SkData> serialize(const SkSerialProcs& procs) const;
+
+ /** Recreates SkTextBlob that was serialized into data. Returns constructed SkTextBlob
+ if successful; otherwise, returns nullptr. Fails if size is smaller than
+ required data length, or if data does not permit constructing valid SkTextBlob.
+
+ procs.fTypefaceProc permits supplying a custom function to decode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default decoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface data, data byte length, and user context.
+
+ @param data pointer for serial data
+ @param size size of data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkTextBlob constructed from data in memory
+ */
+ static sk_sp<SkTextBlob> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs& procs);
+
+ class SK_API Iter {
+ public:
+ struct Run {
+ SkTypeface* fTypeface;
+ int fGlyphCount;
+ const uint16_t* fGlyphIndices;
+ };
+
+ Iter(const SkTextBlob&);
+
+ /**
+ * Returns true for each "run" inside the textblob, setting the Run fields (if not null).
+ * If this returns false, there are no more runs, and the Run parameter will be ignored.
+ */
+ bool next(Run*);
+
+ private:
+ const RunRecord* fRunRecord;
+ };
+
+private:
+ friend class SkNVRefCnt<SkTextBlob>;
+
+ enum GlyphPositioning : uint8_t;
+
+ explicit SkTextBlob(const SkRect& bounds);
+
+ ~SkTextBlob();
+
+ // Memory for objects of this class is created with sk_malloc rather than operator new and must
+ // be freed with sk_free.
+ void operator delete(void* p);
+ void* operator new(size_t);
+ void* operator new(size_t, void* p);
+
+ static unsigned ScalarsPerGlyph(GlyphPositioning pos);
+
+ // Call when this blob is part of the key to a cache entry. This allows the cache
+ // to know automatically those entries can be purged when this SkTextBlob is deleted.
+ void notifyAddedToCache(uint32_t cacheID) const {
+ fCacheID.store(cacheID);
+ }
+
+ friend class SkGlyphRunList;
+ friend class GrTextBlobCache;
+ friend class SkTextBlobBuilder;
+ friend class SkTextBlobPriv;
+ friend class SkTextBlobRunIterator;
+
+ const SkRect fBounds;
+ const uint32_t fUniqueID;
+ mutable std::atomic<uint32_t> fCacheID;
+
+ SkDEBUGCODE(size_t fStorageSize;)
+
+ // The actual payload resides in externally-managed storage, following the object.
+ // (see the .cpp for more details)
+
+ typedef SkRefCnt INHERITED;
+};
+
+/** \class SkTextBlobBuilder
+ Helper class for constructing SkTextBlob.
+*/
+class SK_API SkTextBlobBuilder {
+public:
+
+ /** Constructs empty SkTextBlobBuilder. By default, SkTextBlobBuilder has no runs.
+
+ @return empty SkTextBlobBuilder
+ */
+ SkTextBlobBuilder();
+
+ /** Deletes data allocated internally by SkTextBlobBuilder.
+ */
+ ~SkTextBlobBuilder();
+
+ /** Returns SkTextBlob built from runs of glyphs added by builder. Returned
+ SkTextBlob is immutable; it may be copied, but its contents may not be altered.
+ Returns nullptr if no runs of glyphs were added by builder.
+
+ Resets SkTextBlobBuilder to its initial empty state, allowing it to be
+ reused to build a new set of runs.
+
+ @return SkTextBlob or nullptr
+ */
+ sk_sp<SkTextBlob> make();
+
+ /** \struct SkTextBlobBuilder::RunBuffer
+ RunBuffer supplies storage for glyphs and positions within a run.
+
+ A run is a sequence of glyphs sharing font metrics and positioning.
+ Each run may position its glyphs in one of three ways:
+ by specifying where the first glyph is drawn, and allowing font metrics to
+ determine the advance to subsequent glyphs; by specifying a baseline, and
+ the position on that baseline for each glyph in run; or by providing SkPoint
+ array, one per glyph.
+ */
+ struct RunBuffer {
+ SkGlyphID* glyphs; //!< storage for glyphs in run
+ SkScalar* pos; //!< storage for positions in run
+ char* utf8text; //!< reserved for future use
+ uint32_t* clusters; //!< reserved for future use
+
+ // Helpers, since the "pos" field can be different types (always some number of floats).
+ SkPoint* points() const { return reinterpret_cast<SkPoint*>(pos); }
+ SkRSXform* xforms() const { return reinterpret_cast<SkRSXform*>(pos); }
+ };
+
+ /** Returns run with storage for glyphs. Caller must write count glyphs to
+ RunBuffer::glyphs before next call to SkTextBlobBuilder.
+
+ RunBuffer::utf8text, and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at (x, y), using font metrics to
+ determine their relative placement.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from (x, y) and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param x horizontal offset within the blob
+ @param y vertical offset within the blob
+ @param bounds optional run bounding box
+ @return writable glyph buffer
+ */
+ const RunBuffer& allocRun(const SkFont& font, int count, SkScalar x, SkScalar y,
+ const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs and positions along baseline. Caller must
+ write count glyphs to RunBuffer::glyphs, and count scalars to RunBuffer::pos;
+ before next call to SkTextBlobBuilder.
+
+ RunBuffer::utf8text, and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at y, using x-axis positions written by
+ caller to RunBuffer::pos.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from y, RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param y vertical offset within the blob
+ @param bounds optional run bounding box
+ @return writable glyph buffer and x-axis position buffer
+ */
+ const RunBuffer& allocRunPosH(const SkFont& font, int count, SkScalar y,
+ const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs and SkPoint positions. Caller must
+ write count glyphs to RunBuffer::glyphs, and count SkPoint to RunBuffer::pos;
+ before next call to SkTextBlobBuilder.
+
+ RunBuffer::utf8text, and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned using SkPoint written by caller to RunBuffer::pos, using
+ two scalar values for each SkPoint.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param bounds optional run bounding box
+ @return writable glyph buffer and SkPoint buffer
+ */
+ const RunBuffer& allocRunPos(const SkFont& font, int count,
+ const SkRect* bounds = nullptr);
+
+ // RunBuffer.pos points to SkRSXform array
+ const RunBuffer& allocRunRSXform(const SkFont& font, int count);
+
+private:
+ const RunBuffer& allocRunText(const SkFont& font,
+ int count,
+ SkScalar x,
+ SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds = nullptr);
+ const RunBuffer& allocRunTextPosH(const SkFont& font, int count, SkScalar y,
+ int textByteCount, SkString lang,
+ const SkRect* bounds = nullptr);
+ const RunBuffer& allocRunTextPos(const SkFont& font, int count,
+ int textByteCount, SkString lang,
+ const SkRect* bounds = nullptr);
+ const RunBuffer& allocRunRSXform(const SkFont& font, int count,
+ int textByteCount, SkString lang,
+ const SkRect* bounds = nullptr);
+
+ void reserve(size_t size);
+ void allocInternal(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ int count, int textBytes, SkPoint offset, const SkRect* bounds);
+ bool mergeRun(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ uint32_t count, SkPoint offset);
+ void updateDeferredBounds();
+
+ static SkRect ConservativeRunBounds(const SkTextBlob::RunRecord&);
+ static SkRect TightRunBounds(const SkTextBlob::RunRecord&);
+
+ friend class SkTextBlobPriv;
+ friend class SkTextBlobBuilderPriv;
+
+ SkAutoTMalloc<uint8_t> fStorage;
+ size_t fStorageSize;
+ size_t fStorageUsed;
+
+ SkRect fBounds;
+ int fRunCount;
+ bool fDeferredBounds;
+ size_t fLastRun; // index into fStorage
+
+ RunBuffer fCurrentRunBuffer;
+};
+
+#endif // SkTextBlob_DEFINED
diff --git a/gfx/skia/skia/include/core/SkTileMode.h b/gfx/skia/skia/include/core/SkTileMode.h
new file mode 100644
index 0000000000..8a9d020958
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTileMode.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTileModes_DEFINED
+#define SkTileModes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkTileMode {
+ /**
+ * Replicate the edge color if the shader draws outside of its
+ * original bounds.
+ */
+ kClamp,
+
+ /**
+ * Repeat the shader's image horizontally and vertically.
+ */
+ kRepeat,
+
+ /**
+ * Repeat the shader's image horizontally and vertically, alternating
+ * mirror images so that adjacent images always seam.
+ */
+ kMirror,
+
+ /**
+ * Only draw within the original domain, return transparent-black everywhere else.
+ */
+ kDecal,
+
+ kLastTileMode = kDecal,
+};
+
+static constexpr int kSkTileModeCount = static_cast<int>(SkTileMode::kLastTileMode) + 1;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTime.h b/gfx/skia/skia/include/core/SkTime.h
new file mode 100644
index 0000000000..1f033ef3ad
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTime.h
@@ -0,0 +1,62 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTime_DEFINED
+#define SkTime_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMacros.h"
+
+class SkString;
+
+/** \class SkTime
+ Platform-implemented utilities to return time of day, and millisecond counter.
+*/
+class SK_API SkTime {
+public:
+ struct DateTime {
+ int16_t fTimeZoneMinutes; // The number of minutes that GetDateTime()
+ // is ahead of or behind UTC.
+ uint16_t fYear; //!< e.g. 2005
+ uint8_t fMonth; //!< 1..12
+ uint8_t fDayOfWeek; //!< 0..6, 0==Sunday
+ uint8_t fDay; //!< 1..31
+ uint8_t fHour; //!< 0..23
+ uint8_t fMinute; //!< 0..59
+ uint8_t fSecond; //!< 0..59
+
+ void toISO8601(SkString* dst) const;
+ };
+ static void GetDateTime(DateTime*);
+
+ static double GetSecs() { return GetNSecs() * 1e-9; }
+ static double GetMSecs() { return GetNSecs() * 1e-6; }
+ static double GetNSecs();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAutoTime {
+public:
+ // The label is not deep-copied, so its address must remain valid for the
+ // lifetime of this object
+ SkAutoTime(const char* label = nullptr)
+ : fLabel(label)
+ , fNow(SkTime::GetMSecs()) {}
+ ~SkAutoTime() {
+ uint64_t dur = static_cast<uint64_t>(SkTime::GetMSecs() - fNow);
+ SkDebugf("%s %ld\n", fLabel ? fLabel : "", dur);
+ }
+private:
+ const char* fLabel;
+ double fNow;
+};
+#define SkAutoTime(...) SK_REQUIRE_LOCAL_VAR(SkAutoTime)
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTraceMemoryDump.h b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
new file mode 100644
index 0000000000..10f28b4f01
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTraceMemoryDump_DEFINED
+#define SkTraceMemoryDump_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkDiscardableMemory;
+
+/**
+ * Interface for memory tracing.
+ * This interface is meant to be passed as argument to the memory dump methods of Skia objects.
+ * The implementation of this interface is provided by the embedder.
+ */
+class SK_API SkTraceMemoryDump {
+public:
+ /**
+ * Enum to specify the level of the requested details for the dump from the Skia objects.
+ */
+ enum LevelOfDetail {
+ // Dump only the minimal details to get the total memory usage (Usually just the totals).
+ kLight_LevelOfDetail,
+
+ // Dump the detailed breakdown of the objects in the caches.
+ kObjectsBreakdowns_LevelOfDetail
+ };
+
+ /**
+ * Appends a new memory dump (i.e. a row) to the trace memory infrastructure.
+ * If dumpName does not exist yet, a new one is created. Otherwise, a new column is appended to
+ * the previously created dump.
+ * Arguments:
+ * dumpName: an absolute, slash-separated, name for the item being dumped
+ * e.g., "skia/CacheX/EntryY".
+ * valueName: a string indicating the name of the column.
+ * e.g., "size", "active_size", "number_of_objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * units: a string indicating the units for the value.
+ * e.g., "bytes", "objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * value: the actual value being dumped.
+ */
+ virtual void dumpNumericValue(const char* dumpName,
+ const char* valueName,
+ const char* units,
+ uint64_t value) = 0;
+
+ virtual void dumpStringValue(const char* /*dumpName*/,
+ const char* /*valueName*/,
+ const char* /*value*/) { }
+
+ /**
+ * Sets the memory backing for an existing dump.
+ * backingType and backingObjectId are used by the embedder to associate the memory dumped via
+ * dumpNumericValue with the corresponding dump that backs the memory.
+ */
+ virtual void setMemoryBacking(const char* dumpName,
+ const char* backingType,
+ const char* backingObjectId) = 0;
+
+ /**
+ * Specialization for memory backed by discardable memory.
+ */
+ virtual void setDiscardableMemoryBacking(
+ const char* dumpName,
+ const SkDiscardableMemory& discardableMemoryObject) = 0;
+
+ /**
+ * Returns the type of details requested in the dump. The granularity of the dump is supposed to
+ * match the LevelOfDetail argument. The level of detail must not affect the total size
+ * reported, but only granularity of the child entries.
+ */
+ virtual LevelOfDetail getRequestedDetails() const = 0;
+
+ /**
+ * Returns true if we should dump wrapped objects. Wrapped objects come from outside Skia, and
+ * may be independently tracked there.
+ */
+ virtual bool shouldDumpWrappedObjects() const { return true; }
+
+protected:
+ virtual ~SkTraceMemoryDump() { }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypeface.h b/gfx/skia/skia/include/core/SkTypeface.h
new file mode 100644
index 0000000000..77f7445aae
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypeface.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_DEFINED
+#define SkTypeface_DEFINED
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontParameters.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkWeakRefCnt.h"
+
+class SkData;
+class SkDescriptor;
+class SkFontData;
+class SkFontDescriptor;
+class SkScalerContext;
+class SkStream;
+class SkStreamAsset;
+class SkWStream;
+struct SkAdvancedTypefaceMetrics;
+struct SkScalerContextEffects;
+struct SkScalerContextRec;
+
+typedef uint32_t SkFontID;
+/** Machine endian. */
+typedef uint32_t SkFontTableTag;
+
+/** \class SkTypeface
+
+ The SkTypeface class specifies the typeface and intrinsic style of a font.
+ This is used in the paint, along with optionally algorithmic settings like
+ textSize, textSkewX, textScaleX, kFakeBoldText_Mask, to specify
+ how text appears when drawn (and measured).
+
+ Typeface objects are immutable, and so they can be shared between threads.
+*/
+class SK_API SkTypeface : public SkWeakRefCnt {
+public:
+ /** Returns the typeface's intrinsic style attributes. */
+ SkFontStyle fontStyle() const {
+ return fStyle;
+ }
+
+ /** Returns true if style() has the kBold bit set. */
+ bool isBold() const { return fStyle.weight() >= SkFontStyle::kSemiBold_Weight; }
+
+ /** Returns true if style() has the kItalic bit set. */
+ bool isItalic() const { return fStyle.slant() != SkFontStyle::kUpright_Slant; }
+
+ /** Returns true if the typeface claims to be fixed-pitch.
+ * This is a style bit, advance widths may vary even if this returns true.
+ */
+ bool isFixedPitch() const { return fIsFixedPitch; }
+
+ /** Copy into 'coordinates' (allocated by the caller) the design variation coordinates.
+ *
+ * @param coordinates the buffer into which to write the design variation coordinates.
+ * @param coordinateCount the number of entries available through 'coordinates'.
+ *
+ * @return The number of axes, or -1 if there is an error.
+ * If 'coordinates != nullptr' and 'coordinateCount >= numAxes' then 'coordinates' will be
+ * filled with the variation coordinates describing the position of this typeface in design
+ * variation space. It is possible the number of axes can be retrieved but actual position
+ * cannot.
+ */
+ int getVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const;
+
+ /** Copy into 'parameters' (allocated by the caller) the design variation parameters.
+ *
+ * @param parameters the buffer into which to write the design variation parameters.
+ * @param coordinateCount the number of entries available through 'parameters'.
+ *
+ * @return The number of axes, or -1 if there is an error.
+ * If 'parameters != nullptr' and 'parameterCount >= numAxes' then 'parameters' will be
+ * filled with the variation parameters describing the position of this typeface in design
+ * variation space. It is possible the number of axes can be retrieved but actual parameters
+ * cannot.
+ */
+ int getVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const;
+
+ /** Return a 32bit value for this typeface, unique for the underlying font
+ data. Will never return 0.
+ */
+ SkFontID uniqueID() const { return fUniqueID; }
+
+ /** Return the uniqueID for the specified typeface. If the face is null,
+ resolve it to the default font and return its uniqueID. Will never
+ return 0.
+ */
+ static SkFontID UniqueID(const SkTypeface* face);
+
+ /** Returns true if the two typefaces reference the same underlying font,
+ handling either being null (treating null as the default font)
+ */
+ static bool Equal(const SkTypeface* facea, const SkTypeface* faceb);
+
+ /** Returns the default normal typeface, which is never nullptr. */
+ static sk_sp<SkTypeface> MakeDefault();
+
+ /** Creates a new reference to the typeface that most closely matches the
+ requested familyName and fontStyle. This method allows extended font
+ face specifiers as in the SkFontStyle type. Will never return null.
+
+ @param familyName May be NULL. The name of the font family.
+ @param fontStyle The style of the typeface.
+ @return reference to the closest-matching typeface. Call must call
+ unref() when they are done.
+ */
+ static sk_sp<SkTypeface> MakeFromName(const char familyName[], SkFontStyle fontStyle);
+
+ /** Return a new typeface given a file. If the file does not exist, or is
+ not a valid font file, returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromFile(const char path[], int index = 0);
+
+ /** Return a new typeface given a stream. If the stream is
+ not a valid font file, returns nullptr. Ownership of the stream is
+ transferred, so the caller must not reference it again.
+ */
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset> stream, int index = 0);
+
+ /** Return a new typeface given a SkData. If the data is null, or is not a valid font file,
+ * returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromData(sk_sp<SkData>, int index = 0);
+
+ /** Return a new typeface given font data and configuration. If the data
+ is not valid font data, returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromFontData(std::unique_ptr<SkFontData>);
+
+ /** Return a new typeface based on this typeface but parameterized as specified in the
+ SkFontArguments. If the SkFontArguments does not supply an argument for a parameter
+ in the font then the value from this typeface will be used as the value for that
+ argument. If the cloned typeface would be exaclty the same as this typeface then
+ this typeface may be ref'ed and returned. May return nullptr on failure.
+ */
+ sk_sp<SkTypeface> makeClone(const SkFontArguments&) const;
+
+ /**
+ * A typeface can serialize just a descriptor (names, etc.), or it can also include the
+ * actual font data (which can be large). This enum controls how serialize() decides what
+ * to serialize.
+ */
+ enum class SerializeBehavior {
+ kDoIncludeData,
+ kDontIncludeData,
+ kIncludeDataIfLocal,
+ };
+
+ /** Write a unique signature to a stream, sufficient to reconstruct a
+ typeface referencing the same font when Deserialize is called.
+ */
+ void serialize(SkWStream*, SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const;
+
+ /**
+ * Same as serialize(SkWStream*, ...) but returns the serialized data in SkData, instead of
+ * writing it to a stream.
+ */
+ sk_sp<SkData> serialize(SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const;
+
+ /** Given the data previously written by serialize(), return a new instance
+ of a typeface referring to the same font. If that font is not available,
+ return nullptr.
+ Does not affect ownership of SkStream.
+ */
+ static sk_sp<SkTypeface> MakeDeserialize(SkStream*);
+
+ /**
+ * Given an array of UTF32 character codes, return their corresponding glyph IDs.
+ *
+ * @param chars pointer to the array of UTF32 chars
+ * @param number of chars and glyphs
+ * @param glyphs returns the corresponding glyph IDs for each character.
+ */
+ void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const;
+
+ /**
+ * Return the glyphID that corresponds to the specified unicode code-point
+ * (in UTF32 encoding). If the unichar is not supported, returns 0.
+ *
+ * This is a short-cut for calling unicharsToGlyphs().
+ */
+ SkGlyphID unicharToGlyph(SkUnichar unichar) const;
+
+ /**
+ * Return the number of glyphs in the typeface.
+ */
+ int countGlyphs() const;
+
+ // Table getters -- may fail if the underlying font format is not organized
+ // as 4-byte tables.
+
+ /** Return the number of tables in the font. */
+ int countTables() const;
+
+ /** Copy into tags[] (allocated by the caller) the list of table tags in
+ * the font, and return the number. This will be the same as CountTables()
+ * or 0 if an error occured. If tags == NULL, this only returns the count
+ * (the same as calling countTables()).
+ */
+ int getTableTags(SkFontTableTag tags[]) const;
+
+ /** Given a table tag, return the size of its contents, or 0 if not present
+ */
+ size_t getTableSize(SkFontTableTag) const;
+
+ /** Copy the contents of a table into data (allocated by the caller). Note
+ * that the contents of the table will be in their native endian order
+ * (which for most truetype tables is big endian). If the table tag is
+ * not found, or there is an error copying the data, then 0 is returned.
+ * If this happens, it is possible that some or all of the memory pointed
+ * to by data may have been written to, even though an error has occured.
+ *
+ * @param tag The table tag whose contents are to be copied
+ * @param offset The offset in bytes into the table's contents where the
+ * copy should start from.
+ * @param length The number of bytes, starting at offset, of table data
+ * to copy.
+ * @param data storage address where the table contents are copied to
+ * @return the number of bytes actually copied into data. If offset+length
+ * exceeds the table's size, then only the bytes up to the table's
+ * size are actually copied, and this is the value returned. If
+ * offset > the table's size, or tag is not a valid table,
+ * then 0 is returned.
+ */
+ size_t getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const;
+
+ /**
+ * Return an immutable copy of the requested font table, or nullptr if that table was
+ * not found. This can sometimes be faster than calling getTableData() twice: once to find
+ * the length, and then again to copy the data.
+ *
+ * @param tag The table tag whose contents are to be copied
+ * @return an immutable copy of the table's data, or nullptr.
+ */
+ sk_sp<SkData> copyTableData(SkFontTableTag tag) const;
+
+ /**
+ * Return the units-per-em value for this typeface, or zero if there is an
+ * error.
+ */
+ int getUnitsPerEm() const;
+
+ /**
+ * Given a run of glyphs, return the associated horizontal adjustments.
+ * Adjustments are in "design units", which are integers relative to the
+ * typeface's units per em (see getUnitsPerEm).
+ *
+ * Some typefaces are known to never support kerning. Calling this method
+ * with all zeros (e.g. getKerningPairAdustments(NULL, 0, NULL)) returns
+ * a boolean indicating if the typeface might support kerning. If it
+ * returns false, then it will always return false (no kerning) for all
+ * possible glyph runs. If it returns true, then it *may* return true for
+ * somne glyph runs.
+ *
+ * If count is non-zero, then the glyphs parameter must point to at least
+ * [count] valid glyph IDs, and the adjustments parameter must be
+ * sized to at least [count - 1] entries. If the method returns true, then
+ * [count-1] entries in the adjustments array will be set. If the method
+ * returns false, then no kerning should be applied, and the adjustments
+ * array will be in an undefined state (possibly some values may have been
+ * written, but none of them should be interpreted as valid values).
+ */
+ bool getKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ struct LocalizedString {
+ SkString fString;
+ SkString fLanguage;
+ };
+ class LocalizedStrings {
+ public:
+ LocalizedStrings() = default;
+ virtual ~LocalizedStrings() { }
+ virtual bool next(LocalizedString* localizedString) = 0;
+ void unref() { delete this; }
+
+ private:
+ LocalizedStrings(const LocalizedStrings&) = delete;
+ LocalizedStrings& operator=(const LocalizedStrings&) = delete;
+ };
+ /**
+ * Returns an iterator which will attempt to enumerate all of the
+ * family names specified by the font.
+ * It is the caller's responsibility to unref() the returned pointer.
+ */
+ LocalizedStrings* createFamilyNameIterator() const;
+
+ /**
+ * Return the family name for this typeface. It will always be returned
+ * encoded as UTF8, but the language of the name is whatever the host
+ * platform chooses.
+ */
+ void getFamilyName(SkString* name) const;
+
+ /**
+ * Return a stream for the contents of the font data, or NULL on failure.
+ * If ttcIndex is not null, it is set to the TrueTypeCollection index
+ * of this typeface within the stream, or 0 if the stream is not a
+ * collection.
+ * The caller is responsible for deleting the stream.
+ */
+ std::unique_ptr<SkStreamAsset> openStream(int* ttcIndex) const;
+
+ /**
+ * Return the font data, or nullptr on failure.
+ */
+ std::unique_ptr<SkFontData> makeFontData() const;
+
+ /**
+ * Return a scalercontext for the given descriptor. If this fails, then
+ * if allowFailure is true, this returns NULL, else it returns a
+ * dummy scalercontext that will not crash, but will draw nothing.
+ */
+ std::unique_ptr<SkScalerContext> createScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*,
+ bool allowFailure = false) const;
+
+ /**
+ * Return a rectangle (scaled to 1-pt) that represents the union of the bounds of all
+ * of the glyphs, but each one positioned at (0,). This may be conservatively large, and
+ * will not take into account any hinting or other size-specific adjustments.
+ */
+ SkRect getBounds() const;
+
+ /***
+ * Returns whether this typeface has color glyphs and therefore cannot be
+ * rendered as a path. e.g. Emojis.
+ */
+ virtual bool hasColorGlyphs() const { return false; }
+
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void filterRec(SkScalerContextRec* rec) const {
+ this->onFilterRec(rec);
+ }
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void getFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ this->onGetFontDescriptor(desc, isLocal);
+ }
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void* internal_private_getCTFontRef() const {
+ return this->onGetCTFontRef();
+ }
+
+protected:
+ /** uniqueID must be unique and non-zero
+ */
+ SkTypeface(const SkFontStyle& style, bool isFixedPitch = false);
+ virtual ~SkTypeface();
+
+ virtual sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const = 0;
+
+ /** Sets the fixedPitch bit. If used, must be called in the constructor. */
+ void setIsFixedPitch(bool isFixedPitch) { fIsFixedPitch = isFixedPitch; }
+ /** Sets the font style. If used, must be called in the constructor. */
+ void setFontStyle(SkFontStyle style) { fStyle = style; }
+
+ virtual SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const = 0;
+ virtual void onFilterRec(SkScalerContextRec*) const = 0;
+ friend class SkScalerContext; // onFilterRec
+
+ // Subclasses *must* override this method to work with the PDF backend.
+ virtual std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const = 0;
+ // For type1 postscript fonts only, set the glyph names for each glyph.
+ // destination array is non-null, and points to an array of size this->countGlyphs().
+ // Backends that do not suport type1 fonts should not override.
+ virtual void getPostScriptGlyphNames(SkString*) const = 0;
+
+ // The mapping from glyph to Unicode; array indices are glyph ids.
+ // For each glyph, give the default Unicode value, if it exists.
+ // dstArray is non-null, and points to an array of size this->countGlyphs().
+ virtual void getGlyphToUnicodeMap(SkUnichar* dstArray) const = 0;
+
+ virtual std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const = 0;
+ // TODO: make pure virtual.
+ virtual std::unique_ptr<SkFontData> onMakeFontData() const;
+
+ virtual int onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const = 0;
+
+ virtual int onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const = 0;
+
+ virtual void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const = 0;
+
+ virtual void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const = 0;
+ virtual int onCountGlyphs() const = 0;
+
+ virtual int onGetUPEM() const = 0;
+ virtual bool onGetKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ /** Returns the family name of the typeface as known by its font manager.
+ * This name may or may not be produced by the family name iterator.
+ */
+ virtual void onGetFamilyName(SkString* familyName) const = 0;
+
+ /** Returns an iterator over the family names in the font. */
+ virtual LocalizedStrings* onCreateFamilyNameIterator() const = 0;
+
+ virtual int onGetTableTags(SkFontTableTag tags[]) const = 0;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const = 0;
+ virtual sk_sp<SkData> onCopyTableData(SkFontTableTag) const;
+
+ virtual bool onComputeBounds(SkRect*) const;
+
+ virtual void* onGetCTFontRef() const { return nullptr; }
+
+private:
+ /** Retrieve detailed typeface metrics. Used by the PDF backend. */
+ std::unique_ptr<SkAdvancedTypefaceMetrics> getAdvancedMetrics() const;
+ friend class SkRandomTypeface; // getAdvancedMetrics
+ friend class SkPDFFont; // getAdvancedMetrics
+
+ /** Style specifies the intrinsic style attributes of a given typeface */
+ enum Style {
+ kNormal = 0,
+ kBold = 0x01,
+ kItalic = 0x02,
+
+ // helpers
+ kBoldItalic = 0x03
+ };
+ static SkFontStyle FromOldStyle(Style oldStyle);
+ static SkTypeface* GetDefaultTypeface(Style style = SkTypeface::kNormal);
+
+ friend class SkFontPriv; // GetDefaultTypeface
+ friend class SkPaintPriv; // GetDefaultTypeface
+ friend class SkFont; // getGlyphToUnicodeMap
+
+private:
+ SkFontID fUniqueID;
+ SkFontStyle fStyle;
+ mutable SkRect fBounds;
+ mutable SkOnce fBoundsOnce;
+ bool fIsFixedPitch;
+
+ typedef SkWeakRefCnt INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypes.h b/gfx/skia/skia/include/core/SkTypes.h
new file mode 100644
index 0000000000..e0dbbaf7a4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypes.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypes_DEFINED
+#define SkTypes_DEFINED
+
+// IWYU pragma: begin_exports
+#include "include/core/SkPreConfig.h"
+#if defined (SK_USER_CONFIG_HEADER)
+ #include SK_USER_CONFIG_HEADER
+#else
+ #include "include/config/SkUserConfig.h"
+#endif
+#include "include/core/SkPostConfig.h"
+#include <stddef.h>
+#include <stdint.h>
+// IWYU pragma: end_exports
+
+/** \file SkTypes.h
+*/
+
+/** Called internally if we hit an unrecoverable error.
+ The platform implementation must not return, but should either throw
+ an exception or otherwise exit.
+*/
+SK_API extern void sk_abort_no_print(void);
+SK_API extern bool sk_abort_is_enabled();
+
+#ifndef SkDebugf
+ SK_API void SkDebugf(const char format[], ...);
+#endif
+
+// SkASSERT, SkASSERTF and SkASSERT_RELEASE can be used as stand alone assertion expressions, e.g.
+// uint32_t foo(int x) {
+// SkASSERT(x > 4);
+// return x - 4;
+// }
+// and are also written to be compatible with constexpr functions:
+// constexpr uint32_t foo(int x) {
+// return SkASSERT(x > 4),
+// x - 4;
+// }
+#define SkASSERT_RELEASE(cond) \
+ static_cast<void>( (cond) ? (void)0 : []{ SK_ABORT("assert(" #cond ")"); }() )
+
+#ifdef SK_DEBUG
+ #define SkASSERT(cond) SkASSERT_RELEASE(cond)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>( (cond) ? (void)0 : [&]{ \
+ SkDebugf(fmt"\n", __VA_ARGS__); \
+ SK_ABORT("assert(" #cond ")"); \
+ }() )
+ #define SkDEBUGFAIL(message) SK_ABORT(message)
+ #define SkDEBUGFAILF(fmt, ...) SkASSERTF(false, fmt, ##__VA_ARGS__)
+ #define SkDEBUGCODE(...) __VA_ARGS__
+ #define SkDEBUGF(...) SkDebugf(__VA_ARGS__)
+ #define SkAssertResult(cond) SkASSERT(cond)
+#else
+ #define SkASSERT(cond) static_cast<void>(0)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>(0)
+ #define SkDEBUGFAIL(message)
+ #define SkDEBUGFAILF(fmt, ...)
+ #define SkDEBUGCODE(...)
+ #define SkDEBUGF(...)
+
+ // unlike SkASSERT, this macro executes its condition in the non-debug build.
+ // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT.
+ #define SkAssertResult(cond) if (cond) {} do {} while(false)
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** Fast type for unsigned 8 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U8CPU;
+
+/** Fast type for unsigned 16 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U16CPU;
+
+/** @return false or true based on the condition
+*/
+template <typename T> static constexpr bool SkToBool(const T& x) { return 0 != x; }
+
+static constexpr int16_t SK_MaxS16 = INT16_MAX;
+static constexpr int16_t SK_MinS16 = -SK_MaxS16;
+
+static constexpr int32_t SK_MaxS32 = INT32_MAX;
+static constexpr int32_t SK_MinS32 = -SK_MaxS32;
+static constexpr int32_t SK_NaN32 = INT32_MIN;
+
+static constexpr int64_t SK_MaxS64 = INT64_MAX;
+static constexpr int64_t SK_MinS64 = -SK_MaxS64;
+
+static inline constexpr int32_t SkLeftShift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+static inline constexpr int64_t SkLeftShift(int64_t value, int32_t shift) {
+ return (int64_t) ((uint64_t) value << shift);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** @return the number of entries in an array (not a pointer)
+*/
+template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N];
+#define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array)))
+
+////////////////////////////////////////////////////////////////////////////////
+
+template <typename T> static constexpr T SkAlign2(T x) { return (x + 1) >> 1 << 1; }
+template <typename T> static constexpr T SkAlign4(T x) { return (x + 3) >> 2 << 2; }
+template <typename T> static constexpr T SkAlign8(T x) { return (x + 7) >> 3 << 3; }
+
+template <typename T> static constexpr bool SkIsAlign2(T x) { return 0 == (x & 1); }
+template <typename T> static constexpr bool SkIsAlign4(T x) { return 0 == (x & 3); }
+template <typename T> static constexpr bool SkIsAlign8(T x) { return 0 == (x & 7); }
+
+template <typename T> static constexpr T SkAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x);
+}
+template <typename T> static constexpr bool SkIsAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x);
+}
+
+typedef uint32_t SkFourByteTag;
+static inline constexpr SkFourByteTag SkSetFourByteTag(char a, char b, char c, char d) {
+ return (((uint8_t)a << 24) | ((uint8_t)b << 16) | ((uint8_t)c << 8) | (uint8_t)d);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** 32 bit integer to hold a unicode value
+*/
+typedef int32_t SkUnichar;
+
+/** 16 bit unsigned integer to hold a glyph index
+*/
+typedef uint16_t SkGlyphID;
+
+/** 32 bit value to hold a millisecond duration
+ Note that SK_MSecMax is about 25 days.
+*/
+typedef uint32_t SkMSec;
+
+/** Maximum representable milliseconds; 24d 20h 31m 23.647s.
+*/
+static constexpr SkMSec SK_MSecMax = INT32_MAX;
+
+/** The generation IDs in Skia reserve 0 has an invalid marker.
+*/
+static constexpr uint32_t SK_InvalidGenID = 0;
+
+/** The unique IDs in Skia reserve 0 has an invalid marker.
+*/
+static constexpr uint32_t SK_InvalidUniqueID = 0;
+
+static inline int32_t SkAbs32(int32_t value) {
+ SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated.
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+template <typename T> static inline T SkTAbs(T value) {
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+static inline int32_t SkMax32(int32_t a, int32_t b) {
+ if (a < b)
+ a = b;
+ return a;
+}
+
+static inline int32_t SkMin32(int32_t a, int32_t b) {
+ if (a > b)
+ a = b;
+ return a;
+}
+
+template <typename T> constexpr const T& SkTMin(const T& a, const T& b) {
+ return (a < b) ? a : b;
+}
+
+template <typename T> constexpr const T& SkTMax(const T& a, const T& b) {
+ return (b < a) ? a : b;
+}
+
+template <typename T> constexpr const T& SkTClamp(const T& x, const T& lo, const T& hi) {
+ return (x < lo) ? lo : SkTMin(x, hi);
+}
+
+/** @return value pinned (clamped) between min and max, inclusively.
+*/
+template <typename T> static constexpr const T& SkTPin(const T& value, const T& min, const T& max) {
+ return SkTMax(SkTMin(value, max), min);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** Indicates whether an allocation should count against a cache budget.
+*/
+enum class SkBudgeted : bool {
+ kNo = false,
+ kYes = true
+};
+
+/** Indicates whether a backing store needs to be an exact match or can be
+ larger than is strictly necessary
+*/
+enum class SkBackingFit {
+ kApprox,
+ kExact
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkUnPreMultiply.h b/gfx/skia/skia/include/core/SkUnPreMultiply.h
new file mode 100644
index 0000000000..b492619d07
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkUnPreMultiply.h
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+
+
+#ifndef SkUnPreMultiply_DEFINED
+#define SkUnPreMultiply_DEFINED
+
+#include "include/core/SkColor.h"
+
+class SK_API SkUnPreMultiply {
+public:
+ typedef uint32_t Scale;
+
+ // index this table with alpha [0..255]
+ static const Scale* GetScaleTable() {
+ return gTable;
+ }
+
+ static Scale GetScale(U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ return gTable[alpha];
+ }
+
+ /** Usage:
+
+ const Scale* table = SkUnPreMultiply::GetScaleTable();
+
+ for (...) {
+ unsigned a = ...
+ SkUnPreMultiply::Scale scale = table[a];
+
+ red = SkUnPreMultiply::ApplyScale(scale, red);
+ ...
+ // now red is unpremultiplied
+ }
+ */
+ static U8CPU ApplyScale(Scale scale, U8CPU component) {
+ SkASSERT(component <= 255);
+ return (scale * component + (1 << 23)) >> 24;
+ }
+
+ static SkColor PMColorToColor(SkPMColor c);
+
+private:
+ static const uint32_t gTable[256];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkVertices.h b/gfx/skia/skia/include/core/SkVertices.h
new file mode 100644
index 0000000000..c07e2dae63
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkVertices.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVertices_DEFINED
+#define SkVertices_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+
+/**
+ * An immutable set of vertex data that can be used with SkCanvas::drawVertices.
+ */
+class SK_API SkVertices : public SkNVRefCnt<SkVertices> {
+public:
+ // BoneIndices indicates which (of a maximum of 4 bones) a given vertex will interpolate
+ // between. To indicate that a slot is not used, the convention is to assign the bone index
+ // to 0.
+ struct BoneIndices {
+ uint32_t indices[4];
+
+ uint32_t& operator[] (int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 4);
+ return indices[i];
+ }
+
+ const uint32_t& operator[] (int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 4);
+ return indices[i];
+ }
+ };
+
+ // BoneWeights stores the interpolation weight for each of the (maximum of 4) bones a given
+ // vertex interpolates between. To indicate that a slot is not used, the weight for that
+ // slot should be 0.
+ struct BoneWeights {
+ float weights[4];
+
+ float& operator[] (int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 4);
+ return weights[i];
+ }
+
+ const float& operator[] (int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 4);
+ return weights[i];
+ }
+ };
+
+ // Bone stores a 3x2 transformation matrix in column major order:
+ // | scaleX skewX transX |
+ // | skewY scaleY transY |
+ // SkRSXform is insufficient because bones can have non uniform scale.
+ struct Bone {
+ float values[6];
+
+ float& operator[] (int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 6);
+ return values[i];
+ }
+
+ const float& operator[] (int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < 6);
+ return values[i];
+ }
+
+ SkPoint mapPoint(const SkPoint& point) const {
+ float x = values[0] * point.x() + values[2] * point.y() + values[4];
+ float y = values[1] * point.x() + values[3] * point.y() + values[5];
+ return SkPoint::Make(x, y);
+ }
+
+ SkRect mapRect(const SkRect& rect) const {
+ SkRect dst = SkRect::MakeEmpty();
+ SkPoint quad[4];
+ rect.toQuad(quad);
+ for (int i = 0; i < 4; i ++) {
+ quad[i] = mapPoint(quad[i]);
+ }
+ dst.setBoundsNoCheck(quad, 4);
+ return dst;
+ }
+ };
+
+ enum VertexMode {
+ kTriangles_VertexMode,
+ kTriangleStrip_VertexMode,
+ kTriangleFan_VertexMode,
+
+ kLast_VertexMode = kTriangleFan_VertexMode,
+ };
+
+ /**
+ * Create a vertices by copying the specified arrays. texs, colors, boneIndices, and
+ * boneWeights may be nullptr, and indices is ignored if indexCount == 0.
+ *
+ * boneIndices and boneWeights must either both be nullptr or both point to valid data.
+ * If specified, they must both contain 'vertexCount' entries.
+ */
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[],
+ const BoneIndices boneIndices[],
+ const BoneWeights boneWeights[],
+ int indexCount,
+ const uint16_t indices[],
+ bool isVolatile = true);
+
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[],
+ const BoneIndices boneIndices[],
+ const BoneWeights boneWeights[],
+ bool isVolatile = true) {
+ return MakeCopy(mode,
+ vertexCount,
+ positions,
+ texs,
+ colors,
+ boneIndices,
+ boneWeights,
+ 0,
+ nullptr,
+ isVolatile);
+ }
+
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[],
+ int indexCount,
+ const uint16_t indices[],
+ bool isVolatile = true) {
+ return MakeCopy(mode,
+ vertexCount,
+ positions,
+ texs,
+ colors,
+ nullptr,
+ nullptr,
+ indexCount,
+ indices,
+ isVolatile);
+ }
+
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[],
+ bool isVolatile = true) {
+ return MakeCopy(mode, vertexCount, positions, texs, colors, nullptr, nullptr, isVolatile);
+ }
+
+ struct Sizes;
+
+ enum BuilderFlags {
+ kHasTexCoords_BuilderFlag = 1 << 0,
+ kHasColors_BuilderFlag = 1 << 1,
+ kHasBones_BuilderFlag = 1 << 2,
+ kIsNonVolatile_BuilderFlag = 1 << 3,
+ };
+ class Builder {
+ public:
+ Builder(VertexMode mode, int vertexCount, int indexCount, uint32_t flags);
+
+ bool isValid() const { return fVertices != nullptr; }
+
+ // if the builder is invalid, these will return 0
+ int vertexCount() const;
+ int indexCount() const;
+ bool isVolatile() const;
+ SkPoint* positions();
+ SkPoint* texCoords(); // returns null if there are no texCoords
+ SkColor* colors(); // returns null if there are no colors
+ BoneIndices* boneIndices(); // returns null if there are no bone indices
+ BoneWeights* boneWeights(); // returns null if there are no bone weights
+ uint16_t* indices(); // returns null if there are no indices
+
+ // Detach the built vertices object. After the first call, this will always return null.
+ sk_sp<SkVertices> detach();
+
+ private:
+ Builder(VertexMode mode, int vertexCount, int indexCount, bool isVolatile, const Sizes&);
+
+ void init(VertexMode mode, int vertexCount, int indexCount, bool isVolatile, const Sizes&);
+
+ // holds a partially complete object. only completed in detach()
+ sk_sp<SkVertices> fVertices;
+ // Extra storage for intermediate vertices in the case where the client specifies indexed
+ // triangle fans. These get converted to indexed triangles when the Builder is finalized.
+ std::unique_ptr<uint8_t[]> fIntermediateFanIndices;
+
+ friend class SkVertices;
+ };
+
+ uint32_t uniqueID() const { return fUniqueID; }
+ VertexMode mode() const { return fMode; }
+ const SkRect& bounds() const { return fBounds; }
+
+ bool hasColors() const { return SkToBool(this->colors()); }
+ bool hasTexCoords() const { return SkToBool(this->texCoords()); }
+ bool hasBones() const { return SkToBool(this->boneIndices()); }
+ bool hasIndices() const { return SkToBool(this->indices()); }
+
+ int vertexCount() const { return fVertexCnt; }
+ const SkPoint* positions() const { return fPositions; }
+ const SkPoint* texCoords() const { return fTexs; }
+ const SkColor* colors() const { return fColors; }
+
+ const BoneIndices* boneIndices() const { return fBoneIndices; }
+ const BoneWeights* boneWeights() const { return fBoneWeights; }
+
+ int indexCount() const { return fIndexCnt; }
+ const uint16_t* indices() const { return fIndices; }
+
+ bool isVolatile() const { return fIsVolatile; }
+
+ sk_sp<SkVertices> applyBones(const Bone bones[], int boneCount) const;
+
+ // returns approximate byte size of the vertices object
+ size_t approximateSize() const;
+
+ /**
+ * Recreate a vertices from a buffer previously created by calling encode().
+ * Returns null if the data is corrupt or the length is incorrect for the contents.
+ */
+ static sk_sp<SkVertices> Decode(const void* buffer, size_t length);
+
+ /**
+ * Pack the vertices object into a byte buffer. This can be used to recreate the vertices
+ * by calling Decode() with the buffer.
+ */
+ sk_sp<SkData> encode() const;
+
+private:
+ SkVertices() {}
+
+ // these are needed since we've manually sized our allocation (see Builder::init)
+ friend class SkNVRefCnt<SkVertices>;
+ void operator delete(void* p);
+
+ static sk_sp<SkVertices> Alloc(int vCount, int iCount, uint32_t builderFlags,
+ size_t* arraySize);
+
+ // we store this first, to pair with the refcnt in our base-class, so we don't have an
+ // unnecessary pad between it and the (possibly 8-byte aligned) ptrs.
+ uint32_t fUniqueID;
+
+ // these point inside our allocation, so none of these can be "freed"
+ SkPoint* fPositions;
+ SkPoint* fTexs;
+ SkColor* fColors;
+ BoneIndices* fBoneIndices;
+ BoneWeights* fBoneWeights;
+ uint16_t* fIndices;
+
+ SkRect fBounds; // computed to be the union of the fPositions[]
+ int fVertexCnt;
+ int fIndexCnt;
+
+ bool fIsVolatile;
+
+ VertexMode fMode;
+ // below here is where the actual array data is stored.
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkYUVAIndex.h b/gfx/skia/skia/include/core/SkYUVAIndex.h
new file mode 100644
index 0000000000..310c9756dc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkYUVAIndex.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVAIndex_DEFINED
+#define SkYUVAIndex_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+
+/** \struct SkYUVAIndex
+ Describes from which image source and which channel to read each individual YUVA plane.
+
+ SkYUVAIndex contains a index for which image source to read from and a enum for which channel
+ to read from.
+*/
+struct SK_API SkYUVAIndex {
+ bool operator==(const SkYUVAIndex& that) const {
+ return this->fIndex == that.fIndex && this->fChannel == that.fChannel;
+ }
+
+ bool operator!=(const SkYUVAIndex& that) const {
+ return !(*this == that);
+ }
+
+ // Index in the array of SkYUVAIndex
+ // TODO: rename as Component
+ enum Index {
+ kY_Index = 0,
+ kU_Index = 1,
+ kV_Index = 2,
+ kA_Index = 3,
+
+ kLast_Index = kA_Index
+ };
+ static constexpr int kIndexCount = kLast_Index + 1;
+
+ /** The index is a number between -1..3 which defines which image source to read from, where -1
+ * means the image source doesn't exist. The assumption is we will always have image sources for
+ * each of YUV planes, but optionally have image source for A plane. */
+ int fIndex;
+ /** The channel describes from which channel to read the info from. Currently we only deal with
+ * YUV and NV12 and channel info is ignored. */
+ SkColorChannel fChannel;
+
+ static bool AreValidIndices(const SkYUVAIndex yuvaIndices[4], int* numPlanes) {
+ // Note that 'numPlanes' is always filled in even if the indices are not valid.
+ // This means it can always be used to process the backing resources (but be careful
+ // of empty intervening slots).
+ int maxSlotUsed = -1;
+ bool used[4] = { false, false, false, false };
+ bool valid = true;
+ for (int i = 0; i < 4; ++i) {
+ if (yuvaIndices[i].fIndex < 0) {
+ if (SkYUVAIndex::kA_Index != i) {
+ valid = false; // only the 'A' plane can be omitted
+ }
+ } else if (yuvaIndices[i].fIndex > 3) {
+ valid = false; // A maximum of four input textures is allowed
+ } else {
+ maxSlotUsed = SkTMax(yuvaIndices[i].fIndex, maxSlotUsed);
+ used[i] = true;
+ }
+ }
+
+ // All the used slots should be packed starting at 0 with no gaps
+ for (int i = 0; i <= maxSlotUsed; ++i) {
+ if (!used[i]) {
+ valid = false;
+ }
+ }
+
+ *numPlanes = maxSlotUsed + 1;
+ return valid;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkYUVASizeInfo.h b/gfx/skia/skia/include/core/SkYUVASizeInfo.h
new file mode 100644
index 0000000000..b723dc6f28
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkYUVASizeInfo.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVASizeInfo_DEFINED
+#define SkYUVASizeInfo_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkSize.h"
+
+struct SK_API SkYUVASizeInfo {
+ static constexpr auto kMaxCount = 4;
+
+ SkISize fSizes[kMaxCount];
+
+ /**
+ * While the widths of the Y, U, V and A planes are not restricted, the
+ * implementation often requires that the width of the memory allocated
+ * for each plane be a multiple of 8.
+ *
+ * This struct allows us to inform the client how many "widthBytes"
+ * that we need. Note that we use the new idea of "widthBytes"
+ * because this idea is distinct from "rowBytes" (used elsewhere in
+ * Skia). "rowBytes" allow the last row of the allocation to not
+ * include any extra padding, while, in this case, every single row of
+ * the allocation must be at least "widthBytes".
+ */
+ size_t fWidthBytes[kMaxCount];
+
+ /**
+ * YUVA data often comes from formats like JPEG that support EXIF orientation.
+ * Code that operates on the raw YUV data often needs to know that orientation.
+ */
+ SkEncodedOrigin fOrigin = kDefault_SkEncodedOrigin;
+
+ bool operator==(const SkYUVASizeInfo& that) const {
+ for (int i = 0; i < kMaxCount; ++i) {
+ SkASSERT((!fSizes[i].isEmpty() && fWidthBytes[i]) ||
+ (fSizes[i].isEmpty() && !fWidthBytes[i]));
+ if (fSizes[i] != that.fSizes[i] || fWidthBytes[i] != that.fWidthBytes[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ size_t computeTotalBytes() const;
+
+ void computePlanes(void* base, void* planes[kMaxCount]) const;
+
+};
+
+#endif // SkYUVASizeInfo_DEFINED
diff --git a/gfx/skia/skia/include/docs/SkPDFDocument.h b/gfx/skia/skia/include/docs/SkPDFDocument.h
new file mode 100644
index 0000000000..c208b7f156
--- /dev/null
+++ b/gfx/skia/skia/include/docs/SkPDFDocument.h
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFDocument_DEFINED
+#define SkPDFDocument_DEFINED
+
+#include "include/core/SkDocument.h"
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTime.h"
+
+class SkExecutor;
+
+namespace SkPDF {
+
+/** Table 333 in PDF 32000-1:2008 §14.8.4.2
+*/
+enum class DocumentStructureType {
+ kDocument, //!< Document
+ kPart, //!< Part
+ kArt, //!< Article
+ kSect, //!< Section
+ kDiv, //!< Division
+ kBlockQuote, //!< Block quotation
+ kCaption, //!< Caption
+ kTOC, //!< Table of Contents
+ kTOCI, //!< Table of Contents Item
+ kIndex, //!< Index
+ kNonStruct, //!< Nonstructural element
+ kPrivate, //!< Private element
+ kH, //!< Heading
+ kH1, //!< Heading level 1
+ kH2, //!< Heading level 2
+ kH3, //!< Heading level 3
+ kH4, //!< Heading level 4
+ kH5, //!< Heading level 5
+ kH6, //!< Heading level 6
+ kP, //!< Paragraph
+ kL, //!< List
+ kLI, //!< List item
+ kLbl, //!< List item label
+ kLBody, //!< List item body
+ kTable, //!< Table
+ kTR, //!< Table row
+ kTH, //!< Table header cell
+ kTD, //!< Table data cell
+ kTHead, //!< Table header row group
+ kTBody, //!< Table body row group
+ kTFoot, //!< table footer row group
+ kSpan, //!< Span
+ kQuote, //!< Quotation
+ kNote, //!< Note
+ kReference, //!< Reference
+ kBibEntry, //!< Bibliography entry
+ kCode, //!< Code
+ kLink, //!< Link
+ kAnnot, //!< Annotation
+ kRuby, //!< Ruby annotation
+ kRB, //!< Ruby base text
+ kRT, //!< Ruby annotation text
+ kRP, //!< Ruby punctuation
+ kWarichu, //!< Warichu annotation
+ kWT, //!< Warichu text
+ kWP, //!< Warichu punctuation
+ kFigure, //!< Figure
+ kFormula, //!< Formula
+ kForm, //!< Form control (not like an HTML FORM element)
+};
+
+/** A node in a PDF structure tree, giving a semantic representation
+ of the content. Each node ID is associated with content
+ by passing the SkCanvas and node ID to SkPDF::SetNodeId() when drawing.
+ NodeIDs should be unique within each tree.
+*/
+struct StructureElementNode {
+ const StructureElementNode* fChildren = nullptr;
+ size_t fChildCount;
+ int fNodeId;
+ DocumentStructureType fType;
+};
+
+/** Optional metadata to be passed into the PDF factory function.
+*/
+struct Metadata {
+ /** The document's title.
+ */
+ SkString fTitle;
+
+ /** The name of the person who created the document.
+ */
+ SkString fAuthor;
+
+ /** The subject of the document.
+ */
+ SkString fSubject;
+
+ /** Keywords associated with the document. Commas may be used to delineate
+ keywords within the string.
+ */
+ SkString fKeywords;
+
+ /** If the document was converted to PDF from another format,
+ the name of the conforming product that created the
+ original document from which it was converted.
+ */
+ SkString fCreator;
+
+ /** The product that is converting this document to PDF.
+ Leave fProducer empty to get the default, correct value.
+ */
+ SkString fProducer;
+
+ /** The date and time the document was created.
+ The zero default value represents an unknown/unset time.
+ */
+ SkTime::DateTime fCreation = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ /** The date and time the document was most recently modified.
+ The zero default value represents an unknown/unset time.
+ */
+ SkTime::DateTime fModified = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ /** The DPI (pixels-per-inch) at which features without native PDF support
+ will be rasterized (e.g. draw image with perspective, draw text with
+ perspective, ...) A larger DPI would create a PDF that reflects the
+ original intent with better fidelity, but it can make for larger PDF
+ files too, which would use more memory while rendering, and it would be
+ slower to be processed or sent online or to printer.
+ */
+ SkScalar fRasterDPI = SK_ScalarDefaultRasterDPI;
+
+ /** If true, include XMP metadata, a document UUID, and sRGB output intent
+ information. This adds length to the document and makes it
+ non-reproducable, but are necessary features for PDF/A-2b conformance
+ */
+ bool fPDFA = false;
+
+ /** Encoding quality controls the trade-off between size and quality. By
+ default this is set to 101 percent, which corresponds to lossless
+ encoding. If this value is set to a value <= 100, and the image is
+ opaque, it will be encoded (using JPEG) with that quality setting.
+ */
+ int fEncodingQuality = 101;
+
+ /** An optional tree of structured document tags that provide
+ a semantic representation of the content. The caller
+ should retain ownership.
+ */
+ const StructureElementNode* fStructureElementTreeRoot = nullptr;
+
+ /** Executor to handle threaded work within PDF Backend. If this is nullptr,
+ then all work will be done serially on the main thread. To have worker
+ threads assist with various tasks, set this to a valid SkExecutor
+ instance. Currently used for executing Deflate algorithm in parallel.
+
+ If set, the PDF output will be non-reproducible in the order and
+ internal numbering of objects, but should render the same.
+
+ Experimental.
+ */
+ SkExecutor* fExecutor = nullptr;
+
+ /** Preferred Subsetter. Only respected if both are compiled in.
+ Experimental.
+ */
+ enum Subsetter {
+ kHarfbuzz_Subsetter,
+ kSfntly_Subsetter,
+ } fSubsetter = kHarfbuzz_Subsetter;
+};
+
+/** Associate a node ID with subsequent drawing commands in an
+ SkCanvas. The same node ID can appear in a StructureElementNode
+ in order to associate a document's structure element tree with
+ its content.
+
+ A node ID of zero indicates no node ID.
+
+ @param canvas The canvas used to draw to the PDF.
+ @param nodeId The node ID for subsequent drawing commands.
+*/
+SK_API void SetNodeId(SkCanvas* dst, int nodeID);
+
+/** Create a PDF-backed document, writing the results into a SkWStream.
+
+ PDF pages are sized in point units. 1 pt == 1/72 inch == 127/360 mm.
+
+ @param stream A PDF document will be written to this stream. The document may write
+ to the stream at anytime during its lifetime, until either close() is
+ called or the document is deleted.
+ @param metadata a PDFmetadata object. Any fields may be left empty.
+
+ @returns NULL if there is an error, otherwise a newly created PDF-backed SkDocument.
+*/
+SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream, const Metadata& metadata);
+
+static inline sk_sp<SkDocument> MakeDocument(SkWStream* stream) {
+ return MakeDocument(stream, Metadata());
+}
+
+} // namespace SkPDF
+#endif // SkPDFDocument_DEFINED
diff --git a/gfx/skia/skia/include/docs/SkXPSDocument.h b/gfx/skia/skia/include/docs/SkXPSDocument.h
new file mode 100644
index 0000000000..5cd0777c9b
--- /dev/null
+++ b/gfx/skia/skia/include/docs/SkXPSDocument.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXPSDocument_DEFINED
+#define SkXPSDocument_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "include/core/SkDocument.h"
+
+struct IXpsOMObjectFactory;
+
+namespace SkXPS {
+
+SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream,
+ IXpsOMObjectFactory* xpsFactory,
+ SkScalar dpi = SK_ScalarDefaultRasterDPI);
+
+} // namespace SkXPS
+#endif // SK_BUILD_FOR_WIN
+#endif // SkXPSDocument_DEFINED
diff --git a/gfx/skia/skia/include/effects/Sk1DPathEffect.h b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
new file mode 100644
index 0000000000..34edfaa91f
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk1DPathEffect_DEFINED
+#define Sk1DPathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+
+class SkPathMeasure;
+
+// This class is not exported to java.
+class SK_API Sk1DPathEffect : public SkPathEffect {
+public:
+protected:
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+ /** Called at the start of each contour, returns the initial offset
+ into that contour.
+ */
+ virtual SkScalar begin(SkScalar contourLength) const = 0;
+ /** Called with the current distance along the path, with the current matrix
+ for the point/tangent at the specified distance.
+ Return the distance to travel for the next call. If return <= 0, then that
+ contour is done.
+ */
+ virtual SkScalar next(SkPath* dst, SkScalar dist, SkPathMeasure&) const = 0;
+
+private:
+ typedef SkPathEffect INHERITED;
+};
+
+class SK_API SkPath1DPathEffect : public Sk1DPathEffect {
+public:
+ enum Style {
+ kTranslate_Style, // translate the shape to each position
+ kRotate_Style, // rotate the shape about its center
+ kMorph_Style, // transform each point, and turn lines into curves
+
+ kLastEnum_Style = kMorph_Style,
+ };
+
+ /** Dash by replicating the specified path.
+ @param path The path to replicate (dash)
+ @param advance The space between instances of path
+ @param phase distance (mod advance) along path for its initial position
+ @param style how to transform path at each point (based on the current
+ position and tangent)
+ */
+ static sk_sp<SkPathEffect> Make(const SkPath& path, SkScalar advance, SkScalar phase, Style);
+
+protected:
+ SkPath1DPathEffect(const SkPath& path, SkScalar advance, SkScalar phase, Style);
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath*, const SkPath&, SkStrokeRec*, const SkRect*) const override;
+
+ // overrides from Sk1DPathEffect
+ SkScalar begin(SkScalar contourLength) const override;
+ SkScalar next(SkPath*, SkScalar, SkPathMeasure&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPath1DPathEffect)
+
+ SkPath fPath; // copied from constructor
+ SkScalar fAdvance; // copied from constructor
+ SkScalar fInitialOffset; // computed from phase
+ Style fStyle; // copied from constructor
+
+ typedef Sk1DPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/Sk2DPathEffect.h b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
new file mode 100644
index 0000000000..ea9057dfa1
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk2DPathEffect_DEFINED
+#define Sk2DPathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+
+class SK_API Sk2DPathEffect : public SkPathEffect {
+protected:
+ /** New virtual, to be overridden by subclasses.
+ This is called once from filterPath, and provides the
+ uv parameter bounds for the path. Subsequent calls to
+ next() will receive u and v values within these bounds,
+ and then a call to end() will signal the end of processing.
+ */
+ virtual void begin(const SkIRect& uvBounds, SkPath* dst) const;
+ virtual void next(const SkPoint& loc, int u, int v, SkPath* dst) const;
+ virtual void end(SkPath* dst) const;
+
+ /** Low-level virtual called per span of locations in the u-direction.
+ The default implementation calls next() repeatedly with each
+ location.
+ */
+ virtual void nextSpan(int u, int v, int ucount, SkPath* dst) const;
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+
+ // protected so that subclasses can call this during unflattening
+ explicit Sk2DPathEffect(const SkMatrix& mat);
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath*, const SkPath&, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SkMatrix fMatrix, fInverse;
+ bool fMatrixIsInvertible;
+
+ // illegal
+ Sk2DPathEffect(const Sk2DPathEffect&);
+ Sk2DPathEffect& operator=(const Sk2DPathEffect&);
+
+ friend class Sk2DPathEffectBlitter;
+ typedef SkPathEffect INHERITED;
+};
+
+class SK_API SkLine2DPathEffect : public Sk2DPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(SkScalar width, const SkMatrix& matrix) {
+ if (!(width >= 0)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkLine2DPathEffect(width, matrix));
+ }
+
+
+protected:
+ SkLine2DPathEffect(SkScalar width, const SkMatrix& matrix)
+ : Sk2DPathEffect(matrix), fWidth(width) {
+ SkASSERT(width >= 0);
+ }
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+ void nextSpan(int u, int v, int ucount, SkPath*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLine2DPathEffect)
+
+ SkScalar fWidth;
+
+ typedef Sk2DPathEffect INHERITED;
+};
+
+class SK_API SkPath2DPathEffect : public Sk2DPathEffect {
+public:
+ /**
+ * Stamp the specified path to fill the shape, using the matrix to define
+ * the latice.
+ */
+ static sk_sp<SkPathEffect> Make(const SkMatrix& matrix, const SkPath& path) {
+ return sk_sp<SkPathEffect>(new SkPath2DPathEffect(matrix, path));
+ }
+
+protected:
+ SkPath2DPathEffect(const SkMatrix&, const SkPath&);
+ void flatten(SkWriteBuffer&) const override;
+
+ void next(const SkPoint&, int u, int v, SkPath*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPath2DPathEffect)
+
+ SkPath fPath;
+
+ typedef Sk2DPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h b/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h
new file mode 100644
index 0000000000..cd3be18eec
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAlphaThresholdFilter_DEFINED
+#define SkAlphaThresholdFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+class SkRegion;
+
+// DEPRECATED: Use include/effects/SkImageFilters::AlphaThreshold
+class SK_API SkAlphaThresholdFilter {
+public:
+ /**
+ * Creates an image filter that samples a region. If the sample is inside the
+ * region the alpha of the image is boosted up to a threshold value. If it is
+ * outside the region then the alpha is decreased to the threshold value.
+ * The 0,0 point of the region corresponds to the upper left corner of the
+ * source image.
+ */
+ static sk_sp<SkImageFilter> Make(const SkRegion& region, SkScalar innerMin,
+ SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+
+ static void RegisterFlattenables();
+
+private:
+ SkAlphaThresholdFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkArithmeticImageFilter.h b/gfx/skia/skia/include/effects/SkArithmeticImageFilter.h
new file mode 100644
index 0000000000..2945a131c4
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkArithmeticImageFilter.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArithmeticImageFilter_DEFINED
+#define SkArithmeticImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+struct ArithmeticFPInputs {
+ ArithmeticFPInputs() {
+ memset(this, 0, sizeof(*this));
+ }
+
+ float k[4];
+ bool enforcePMColor;
+};
+
+// DEPRECATED: Use include/effects/SkImageFilters::Arithmetic
+class SK_API SkArithmeticImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect);
+
+ static void RegisterFlattenables();
+
+private:
+ SkArithmeticImageFilter(); // can't instantiate
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurDrawLooper.h b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
new file mode 100644
index 0000000000..4a1ae83142
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkBlurDrawLooper_DEFINED
+#define SkBlurDrawLooper_DEFINED
+
+#include "include/core/SkDrawLooper.h"
+
+/**
+ * Draws a shadow of the object (possibly offset), and then draws the original object in
+ * its original position.
+ */
+namespace SkBlurDrawLooper {
+ sk_sp<SkDrawLooper> SK_API Make(SkColor4f color, SkColorSpace* cs,
+ SkScalar sigma, SkScalar dx, SkScalar dy);
+ sk_sp<SkDrawLooper> SK_API Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurImageFilter.h b/gfx/skia/skia/include/effects/SkBlurImageFilter.h
new file mode 100644
index 0000000000..72335dee47
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurImageFilter.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurImageFilter_DEFINED
+#define SkBlurImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+enum class SkTileMode;
+
+// DEPRECATED: Use include/effects/SkImageFilters::Blur
+class SK_API SkBlurImageFilter {
+public:
+ /*! \enum TileMode
+ * DEPRECATED: Use SkTileMode instead. */
+ enum TileMode {
+ kClamp_TileMode = 0, /*!< Clamp to the image's edge pixels. */
+ /*!< This re-weights the filter so samples outside have no effect */
+ kRepeat_TileMode, /*!< Wrap around to the image's opposite edge. */
+ kClampToBlack_TileMode, /*!< Fill with transparent black. */
+ kLast_TileMode = kClampToBlack_TileMode,
+
+ // TODO: remove kMax - it is non-standard but Chromium uses it
+ kMax_TileMode = kClampToBlack_TileMode
+ };
+
+ static sk_sp<SkImageFilter> Make(SkScalar sigmaX, SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr,
+ TileMode tileMode = TileMode::kClampToBlack_TileMode);
+ // EXPERIMENTAL: kMirror is not yet supported
+ static sk_sp<SkImageFilter> Make(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkBlurImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurMaskFilter.h b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
new file mode 100644
index 0000000000..1b9319869e
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMaskFilter_DEFINED
+#define SkBlurMaskFilter_DEFINED
+
+// we include this since our callers will need to at least be able to ref/unref
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+
+class SkRRect;
+
+class SK_API SkBlurMaskFilter {
+public:
+#ifdef SK_SUPPORT_LEGACY_EMBOSSMASKFILTER
+ /** Create an emboss maskfilter
+ @param blurSigma standard deviation of the Gaussian blur to apply
+ before applying lighting (e.g. 3)
+ @param direction array of 3 scalars [x, y, z] specifying the direction of the light source
+ @param ambient 0...1 amount of ambient light
+ @param specular coefficient for specular highlights (e.g. 8)
+ @return the emboss maskfilter
+ */
+ static sk_sp<SkMaskFilter> MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular);
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h b/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h
new file mode 100644
index 0000000000..345910fa97
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterImageFilter_DEFINED
+#define SkColorFilterImageFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::ColorFilter
+class SK_API SkColorFilterImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkColorFilterImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrix.h b/gfx/skia/skia/include/effects/SkColorMatrix.h
new file mode 100644
index 0000000000..f2b7964b5f
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrix.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrix_DEFINED
+#define SkColorMatrix_DEFINED
+
+#include "include/core/SkTypes.h"
+#include <memory.h>
+
+class SK_API SkColorMatrix {
+public:
+ void setIdentity();
+ void setScale(float rScale, float gScale, float bScale, float aScale = 1.0f);
+
+ void setRowMajor(const float src[20]) {
+ memcpy(fMat, src, sizeof(fMat));
+ }
+
+ void getRowMajor(float dst[20]) const {
+ memcpy(dst, fMat, sizeof(fMat));
+ }
+
+ enum Axis {
+ kR_Axis = 0,
+ kG_Axis = 1,
+ kB_Axis = 2
+ };
+ void setRotate(Axis, float degrees);
+ void setSinCos(Axis, float sine, float cosine);
+ void preRotate(Axis, float degrees);
+ void postRotate(Axis, float degrees);
+ void postTranslate(float dr, float dg, float db, float da);
+
+ void setConcat(const SkColorMatrix& a, const SkColorMatrix& b);
+ void preConcat(const SkColorMatrix& mat) { this->setConcat(*this, mat); }
+ void postConcat(const SkColorMatrix& mat) { this->setConcat(mat, *this); }
+
+ void setSaturation(float sat);
+ void setRGB2YUV();
+ void setYUV2RGB();
+
+ bool operator==(const SkColorMatrix& other) const {
+ return 0 == memcmp(fMat, other.fMat, sizeof(fMat));
+ }
+
+ bool operator!=(const SkColorMatrix& other) const { return !((*this) == other); }
+
+ float* get20(float m[20]) const {
+ memcpy(m, fMat, sizeof(fMat));
+ return m;
+ }
+ void set20(const float m[20]) {
+ memcpy(fMat, m, sizeof(fMat));
+ }
+
+private:
+ float fMat[20];
+
+ friend class SkColorFilters;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrixFilter.h b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
new file mode 100644
index 0000000000..144c9685f4
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrixFilter_DEFINED
+#define SkColorMatrixFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/effects/SkColorMatrix.h"
+
+class SK_API SkColorMatrixFilter : public SkColorFilter {
+public:
+ /**
+ * Create a colorfilter that multiplies the RGB channels by one color, and
+ * then adds a second color, pinning the result for each component to
+ * [0..255]. The alpha components of the mul and add arguments
+ * are ignored.
+ */
+ static sk_sp<SkColorFilter> MakeLightingFilter(SkColor mul, SkColor add);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkComposeImageFilter.h b/gfx/skia/skia/include/effects/SkComposeImageFilter.h
new file mode 100644
index 0000000000..a2f51e2d20
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkComposeImageFilter.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkComposeImageFilter_DEFINED
+#define SkComposeImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Compose
+class SK_API SkComposeImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner);
+
+ static void RegisterFlattenables();
+
+private:
+ SkComposeImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkCornerPathEffect.h b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
new file mode 100644
index 0000000000..ac041b588d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCornerPathEffect_DEFINED
+#define SkCornerPathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPathEffect.h"
+
+/** \class SkCornerPathEffect
+
+ SkCornerPathEffect is a subclass of SkPathEffect that can turn sharp corners
+ into various treatments (e.g. rounded corners)
+*/
+class SK_API SkCornerPathEffect : public SkPathEffect {
+public:
+ /** radius must be > 0 to have an effect. It specifies the distance from each corner
+ that should be "rounded".
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar radius) {
+ return radius > 0 ? sk_sp<SkPathEffect>(new SkCornerPathEffect(radius)) : nullptr;
+ }
+
+protected:
+ ~SkCornerPathEffect() override;
+
+ explicit SkCornerPathEffect(SkScalar radius);
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkCornerPathEffect)
+
+ SkScalar fRadius;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDashPathEffect.h b/gfx/skia/skia/include/effects/SkDashPathEffect.h
new file mode 100644
index 0000000000..d6ca9122e6
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDashPathEffect.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathEffect_DEFINED
+#define SkDashPathEffect_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+class SK_API SkDashPathEffect {
+public:
+ /** intervals: array containing an even number of entries (>=2), with
+ the even indices specifying the length of "on" intervals, and the odd
+ indices specifying the length of "off" intervals. This array will be
+ copied in Make, and can be disposed of freely after.
+ count: number of elements in the intervals array
+ phase: offset into the intervals array (mod the sum of all of the
+ intervals).
+
+ For example: if intervals[] = {10, 20}, count = 2, and phase = 25,
+ this will set up a dashed path like so:
+ 5 pixels off
+ 10 pixels on
+ 20 pixels off
+ 10 pixels on
+ 20 pixels off
+ ...
+ A phase of -5, 25, 55, 85, etc. would all result in the same path,
+ because the sum of all the intervals is 30.
+
+ Note: only affects stroked paths.
+ */
+ static sk_sp<SkPathEffect> Make(const SkScalar intervals[], int count, SkScalar phase);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDiscretePathEffect.h b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
new file mode 100644
index 0000000000..007844be85
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscretePathEffect_DEFINED
+#define SkDiscretePathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPathEffect.h"
+
+/** \class SkDiscretePathEffect
+
+ This path effect chops a path into discrete segments, and randomly displaces them.
+*/
+class SK_API SkDiscretePathEffect : public SkPathEffect {
+public:
+ /** Break the path into segments of segLength length, and randomly move the endpoints
+ away from the original path by a maximum of deviation.
+ Note: works on filled or framed paths
+
+ @param seedAssist This is a caller-supplied seedAssist that modifies
+ the seed value that is used to randomize the path
+ segments' endpoints. If not supplied it defaults to 0,
+ in which case filtering a path multiple times will
+ result in the same set of segments (this is useful for
+ testing). If a caller does not want this behaviour
+ they can pass in a different seedAssist to get a
+ different set of path segments.
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar segLength, SkScalar dev, uint32_t seedAssist = 0);
+
+protected:
+ SkDiscretePathEffect(SkScalar segLength,
+ SkScalar deviation,
+ uint32_t seedAssist);
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkDiscretePathEffect)
+
+ SkScalar fSegLength, fPerterb;
+
+ /* Caller-supplied 32 bit seed assist */
+ uint32_t fSeedAssist;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h b/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h
new file mode 100644
index 0000000000..35f0d4d4b1
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDisplacementMapEffect_DEFINED
+#define SkDisplacementMapEffect_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+enum class SkColorChannel;
+
+// DEPRECATED: Use include/effects/SkImageFilters::DisplacementMap
+class SK_API SkDisplacementMapEffect {
+public:
+
+ // DEPRECATED - Use SkColorChannel instead.
+ enum ChannelSelectorType {
+ kUnknown_ChannelSelectorType,
+ kR_ChannelSelectorType,
+ kG_ChannelSelectorType,
+ kB_ChannelSelectorType,
+ kA_ChannelSelectorType,
+
+ kLast_ChannelSelectorType = kA_ChannelSelectorType
+ };
+
+ static sk_sp<SkImageFilter> Make(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> Make(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkDisplacementMapEffect() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h b/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h
new file mode 100644
index 0000000000..9d5160f69b
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDropShadowImageFilter_DEFINED
+#define SkDropShadowImageFilter_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkScalar.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::DropShadow and DropShadowOnly
+class SK_API SkDropShadowImageFilter {
+public:
+ enum ShadowMode {
+ kDrawShadowAndForeground_ShadowMode,
+ kDrawShadowOnly_ShadowMode,
+
+ kLast_ShadowMode = kDrawShadowOnly_ShadowMode
+ };
+
+ static const int kShadowModeCount = kLast_ShadowMode+1;
+
+ static sk_sp<SkImageFilter> Make(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, ShadowMode shadowMode,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkDropShadowImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkGradientShader.h b/gfx/skia/skia/include/effects/SkGradientShader.h
new file mode 100644
index 0000000000..daa6ed0808
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkGradientShader.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShader_DEFINED
+#define SkGradientShader_DEFINED
+
+#include "include/core/SkShader.h"
+
+/** \class SkGradientShader
+
+ SkGradientShader hosts factories for creating subclasses of SkShader that
+ render linear and radial gradients. In general, degenerate cases should not
+ produce surprising results, but there are several types of degeneracies:
+
+ * A linear gradient made from the same two points.
+ * A radial gradient with a radius of zero.
+ * A sweep gradient where the start and end angle are the same.
+ * A two point conical gradient where the two centers and the two radii are
+ the same.
+
+ For any degenerate gradient with a decal tile mode, it will draw empty since the interpolating
+ region is zero area and the outer region is discarded by the decal mode.
+
+ For any degenerate gradient with a repeat or mirror tile mode, it will draw a solid color that
+ is the average gradient color, since infinitely many repetitions of the gradients will fill the
+ shape.
+
+ For a clamped gradient, every type is well-defined at the limit except for linear gradients. The
+ radial gradient with zero radius becomes the last color. The sweep gradient draws the sector
+ from 0 to the provided angle with the first color, with a hardstop switching to the last color.
+ When the provided angle is 0, this is just the solid last color again. Similarly, the two point
+ conical gradient becomes a circle filled with the first color, sized to the provided radius,
+ with a hardstop switching to the last color. When the two radii are both zero, this is just the
+ solid last color.
+
+ As a linear gradient approaches the degenerate case, its shader will approach the appearance of
+ two half planes, each filled by the first and last colors of the gradient. The planes will be
+ oriented perpendicular to the vector between the two defining points of the gradient. However,
+ once they become the same point, Skia cannot reconstruct what that expected orientation is. To
+ provide a stable and predictable color in this case, Skia just uses the last color as a solid
+ fill to be similar to many of the other degenerate gradients' behaviors in clamp mode.
+*/
+class SK_API SkGradientShader {
+public:
+ enum Flags {
+ /** By default gradients will interpolate their colors in unpremul space
+ * and then premultiply each of the results. By setting this flag, the
+ * gradients will premultiply their colors first, and then interpolate
+ * between them.
+ */
+ kInterpolateColorsInPremul_Flag = 1 << 0,
+ };
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode) {
+ return MakeLinear(pts, colors, pos, count, mode, 0, nullptr);
+ }
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode) {
+ return MakeLinear(pts, colors, std::move(colorSpace), pos, count, mode, 0, nullptr);
+ }
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode) {
+ return MakeRadial(center, radius, colors, pos, count, mode, 0, nullptr);
+ }
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode) {
+ return MakeRadial(center, radius, colors, std::move(colorSpace), pos, count, mode,
+ 0, nullptr);
+ }
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[],
+ int count, SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[],
+ int count, SkTileMode mode) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors, pos, count, mode,
+ 0, nullptr);
+ }
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkTileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkTileMode mode) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors,
+ std::move(colorSpace), pos, count, mode, 0, nullptr);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center, within
+ the gradient angle range.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative
+ position of each corresponding color in the colors array. If this is
+ NULL, then the colors are distributed evenly within the angular range.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode Tiling mode: controls drawing outside of the gradient angular range.
+ @param startAngle Start of the angular range, corresponding to pos == 0.
+ @param endAngle End of the angular range, corresponding to pos == 1.
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ SkScalar startAngle, SkScalar endAngle,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeSweep(cx, cy, colors, pos, count, SkTileMode::kClamp, 0, 360, flags,
+ localMatrix);
+ }
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count) {
+ return MakeSweep(cx, cy, colors, pos, count, 0, nullptr);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center, within
+ the gradient angle range.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative
+ position of each corresponding color in the colors array. If this is
+ NULL, then the colors are distributed evenly within the angular range.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode Tiling mode: controls drawing outside of the gradient angular range.
+ @param startAngle Start of the angular range, corresponding to pos == 0.
+ @param endAngle End of the angular range, corresponding to pos == 1.
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ SkTileMode mode,
+ SkScalar startAngle, SkScalar endAngle,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count,
+ SkTileMode::kClamp, 0, 360, flags, localMatrix);
+ }
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count) {
+ return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, 0, nullptr);
+ }
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkHighContrastFilter.h b/gfx/skia/skia/include/effects/SkHighContrastFilter.h
new file mode 100644
index 0000000000..a8860c22cc
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkHighContrastFilter.h
@@ -0,0 +1,84 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkHighContrastFilter_DEFINED
+#define SkHighContrastFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+
+/**
+ * Configuration struct for SkHighContrastFilter.
+ *
+ * Provides transformations to improve contrast for users with low vision.
+ */
+struct SkHighContrastConfig {
+ enum class InvertStyle {
+ kNoInvert,
+ kInvertBrightness,
+ kInvertLightness,
+
+ kLast = kInvertLightness
+ };
+
+ SkHighContrastConfig() {
+ fGrayscale = false;
+ fInvertStyle = InvertStyle::kNoInvert;
+ fContrast = 0.0f;
+ }
+
+ SkHighContrastConfig(bool grayscale,
+ InvertStyle invertStyle,
+ SkScalar contrast)
+ : fGrayscale(grayscale),
+ fInvertStyle(invertStyle),
+ fContrast(contrast) {}
+
+ // Returns true if all of the fields are set within the valid range.
+ bool isValid() const {
+ return fInvertStyle >= InvertStyle::kNoInvert &&
+ fInvertStyle <= InvertStyle::kInvertLightness &&
+ fContrast >= -1.0 &&
+ fContrast <= 1.0;
+ }
+
+ // If true, the color will be converted to grayscale.
+ bool fGrayscale;
+
+ // Whether to invert brightness, lightness, or neither.
+ InvertStyle fInvertStyle;
+
+ // After grayscale and inverting, the contrast can be adjusted linearly.
+ // The valid range is -1.0 through 1.0, where 0.0 is no adjustment.
+ SkScalar fContrast;
+};
+
+/**
+ * Color filter that provides transformations to improve contrast
+ * for users with low vision.
+ *
+ * Applies the following transformations in this order. Each of these
+ * can be configured using SkHighContrastConfig.
+ *
+ * - Conversion to grayscale
+ * - Color inversion (either in RGB or HSL space)
+ * - Increasing the resulting contrast.
+ *
+ * Calling SkHighContrastFilter::Make will return nullptr if the config is
+ * not valid, e.g. if you try to call it with a contrast outside the range of
+ * -1.0 to 1.0.
+ */
+
+class SK_API SkHighContrastFilter {
+public:
+ // Returns the filter, or nullptr if the config is invalid.
+ static sk_sp<SkColorFilter> Make(const SkHighContrastConfig& config);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkImageFilters.h b/gfx/skia/skia/include/effects/SkImageFilters.h
new file mode 100644
index 0000000000..5dd6c2bd43
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkImageFilters.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilters_DEFINED
+#define SkImageFilters_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTileMode.h"
+
+class SkColorFilter;
+class SkPaint;
+class SkRegion;
+
+// A set of factory functions providing useful SkImageFilter effects. For image filters that take an
+// input filter, providing nullptr means it will automatically use the dynamic source image. This
+// source depends on how the filter is applied, but is either the contents of a saved layer when
+// drawing with SkCanvas, or an explicit SkImage if using SkImage::makeWithFilter.
+class SK_API SkImageFilters {
+public:
+ /**
+ * Create a filter that updates the alpha of the image based on 'region'. Pixels inside the
+ * region are made more opaque and pixels outside are made more transparent.
+ *
+ * Specifically, if a pixel is inside the region, its alpha will be set to
+ * max(innerMin, pixel's alpha). If a pixel is outside the region, its alpha will be updated to
+ * min(outerMax, pixel's alpha).
+ * @param region The geometric region controlling the inner and outer alpha thresholds.
+ * @param innerMin The minimum alpha value for pixels inside 'region'.
+ * @param outerMax The maximum alpha value for pixels outside of 'region'.
+ * @param input The input filter, or uses the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> AlphaThreshold(const SkRegion& region, SkScalar innerMin,
+ SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that implements a custom blend mode. Each output pixel is the result of
+ * combining the corresponding background and foreground pixels using the 4 coefficients:
+ * k1 * foreground * background + k2 * foreground + k3 * background + k4
+ * @param k1, k2, k3, k4 The four coefficients used to combine the foreground and background.
+ * @param enforcePMColor If true, the RGB channels will be clamped to the calculated alpha.
+ * @param background The background content, using the source bitmap when this is null.
+ * @param foreground The foreground content, using the source bitmap when this is null.
+ * @param cropRect Optional rectangle that crops the inputs and output.
+ */
+ static sk_sp<SkImageFilter> Arithmetic(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that blurs its input by the separate X and Y sigmas. The provided tile mode
+ * is used when the blur kernel goes outside the input image.
+ * @param sigmaX The Gaussian sigma value for blurring along the X axis.
+ * @param sigmaY The Gaussian sigma value for blurring along the Y axis.
+ * @param tileMode The tile mode applied at edges .
+ * TODO (michaelludwig) - kMirror is not supported yet
+ * @param input The input filter that is blurred, uses source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect = nullptr);
+ // As above, but defaults to the decal tile mode.
+ static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr) {
+ return Blur(sigmaX, sigmaY, SkTileMode::kDecal, std::move(input), cropRect);
+ }
+
+ /**
+ * Create a filter that applies the color filter to the input filter results.
+ * @param cf The color filter that transforms the input image.
+ * @param input The input filter, or uses the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> ColorFilter(sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that composes 'inner' with 'outer', such that the results of 'inner' are
+ * treated as the source bitmap passed to 'outer', i.e. result = outer(inner(source)).
+ * @param outer The outer filter that evaluates the results of inner.
+ * @param inner The inner filter that produces the input to outer.
+ */
+ static sk_sp<SkImageFilter> Compose(sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner);
+
+ /**
+ * Create a filter that moves each pixel in its color input based on an (x,y) vector encoded
+ * in its displacement input filter. Two color components of the displacement image are
+ * mapped into a vector as scale * (color[xChannel], color[yChannel]), where the channel
+ * selectors are one of R, G, B, or A.
+ * @param xChannelSelector RGBA channel that encodes the x displacement per pixel.
+ * @param yChannelSelector RGBA channel that encodes the y displacement per pixel.
+ * @param scale Scale applied to displacement extracted from image.
+ * @param displacement The filter defining the displacement image, or null to use source.
+ * @param color The filter providing the color pixels to be displaced.
+ * @param cropRect Optional rectangle that crops the color input and output.
+ */
+ static sk_sp<SkImageFilter> DisplacementMap(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkScalar scale, sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that draws a drop shadow under the input content. This filter produces an
+ * image that includes the inputs' content.
+ * @param dx The X offset of the shadow.
+ * @param dy The Y offset of the shadow.
+ * @param sigmaX The blur radius for the shadow, along the X axis.
+ * @param sigmaY The blur radius for the shadow, along the Y axis.
+ * @param color The color of the drop shadow.
+ * @param input The input filter, or will use the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DropShadow(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that renders a drop shadow, in exactly the same manner as ::DropShadow,
+ * except that the resulting image does not include the input content. This allows the shadow
+ * and input to be composed by a filter DAG in a more flexible manner.
+ * @param dx The X offset of the shadow.
+ * @param dy The Y offset of the shadow.
+ * @param sigmaX The blur radius for the shadow, along the X axis.
+ * @param sigmaY The blur radius for the shadow, along the Y axis.
+ * @param color The color of the drop shadow.
+ * @param input The input filter, or will use the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DropShadowOnly(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that draws the 'srcRect' portion of image into 'dstRect' using the given
+ * filter quality. Similar to SkCanvas::drawImageRect. Returns null if 'image' is null.
+ * @param image The image that is output by the filter, subset by 'srcRect'.
+ * @param srcRect The source pixels sampled into 'dstRect'
+ * @param dstRect The local rectangle to draw the image into.
+ * @param filterQuality The filter quality that is used when sampling the image.
+ */
+ static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, SkFilterQuality filterQuality);
+ /**
+ * Create a filter that produces the image contents.
+ * @param image The image that is output by the filter.
+ */
+ static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image) {
+ // Defaults to kHigh_SkFilterQuality because the dstRect of the image filter will be mapped
+ // by the layer matrix set during filtering. If that has a scale factor, then the image
+ // will not be drawn at a 1-to-1 pixel scale, even that is what this appears to create here.
+ SkRect r = image ? SkRect::MakeWH(image->width(), image->height()) : SkRect::MakeEmpty();
+ return Image(std::move(image), r, r, kHigh_SkFilterQuality);
+ }
+
+ /**
+ * Create a filter that mimics a zoom/magnifying lens effect.
+ * @param srcRect
+ * @param inset
+ * @param input The input filter that is magnified, if null the source bitmap is used.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Magnifier(const SkRect& srcRect, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that applies an NxM image processing kernel to the input image. This can be
+ * used to produce effects such as sharpening, blurring, edge detection, etc.
+ * @param kernelSize The kernel size in pixels, in each dimension (N by M).
+ * @param kernel The image processing kernel. Must contain N * M elements, in row order.
+ * @param gain A scale factor applied to each pixel after convolution. This can be
+ * used to normalize the kernel, if it does not already sum to 1.
+ * @param bias A bias factor added to each pixel after convolution.
+ * @param kernelOffset An offset applied to each pixel coordinate before convolution.
+ * This can be used to center the kernel over the image
+ * (e.g., a 3x3 kernel should have an offset of {1, 1}).
+ * @param tileMode How accesses outside the image are treated.
+ * TODO (michaelludwig) - kMirror is not supported yet
+ * @param convolveAlpha If true, all channels are convolved. If false, only the RGB channels
+ * are convolved, and alpha is copied from the source image.
+ * @param input The input image filter, if null the source bitmap is used instead.
+ * @param cropRect Optional rectangle to which the output processing will be limited.
+ */
+ static sk_sp<SkImageFilter> MatrixConvolution(const SkISize& kernelSize,
+ const SkScalar kernel[], SkScalar gain,
+ SkScalar bias, const SkIPoint& kernelOffset,
+ SkTileMode tileMode, bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that transforms the input image by 'matrix'. This matrix transforms the
+ * local space, which means it effectively happens prior to any transformation coming from the
+ * SkCanvas initiating the filtering.
+ * @param matrix The matrix to apply to the original content.
+ * @param filterQuality The filter quality to use when sampling the input image.
+ * @param input The image filter to transform, or null to use the source image.
+ */
+ static sk_sp<SkImageFilter> MatrixTransform(const SkMatrix& matrix,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input);
+
+ /**
+ * Create a filter that merges the 'count' filters together by drawing their results in order
+ * with src-over blending.
+ * @param filters The input filter array to merge, which must have 'count' elements. Any null
+ * filter pointers will use the source bitmap instead.
+ * @param count The number of input filters to be merged.
+ * @param cropRect Optional rectangle that crops all input filters and the output.
+ */
+ static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter>* const filters, int count,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that merges the results of the two filters together with src-over blending.
+ * @param first The first input filter, or the source bitmap if this is null.
+ * @param second The second input filter, or the source bitmap if this null.
+ * @param cropRect Optional rectangle that crops the inputs and output.
+ */
+ static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter> first, sk_sp<SkImageFilter> second,
+ const SkIRect* cropRect = nullptr) {
+ sk_sp<SkImageFilter> array[] = { std::move(first), std::move(second) };
+ return Merge(array, 2, cropRect);
+ }
+
+ /**
+ * Create a filter that offsets the input filter by the given vector.
+ * @param dx The x offset in local space that the image is shifted.
+ * @param dy The y offset in local space that the image is shifted.
+ * @param input The input that will be moved, if null the source bitmap is used instead.
+ * @param cropRect Optional rectangle to crop the input and output.
+ */
+ static sk_sp<SkImageFilter> Offset(SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that fills the output with the given paint.
+ * @param paint The paint to fill
+ * @param cropRect Optional rectangle that will be filled. If null, the source bitmap's bounds
+ * are filled even though the source bitmap itself is not used.
+ */
+ static sk_sp<SkImageFilter> Paint(const SkPaint& paint, const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that produces the SkPicture as its output, drawn into targetRect. Note that
+ * the targetRect is not the same as the SkIRect cropRect that many filters accept. Returns
+ * null if 'pic' is null.
+ * @param pic The picture that is drawn for the filter output.
+ * @param targetRect The drawing region for the picture.
+ */
+ static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic, const SkRect& targetRect);
+ // As above, but uses SkPicture::cullRect for the drawing region.
+ static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic) {
+ SkRect target = pic ? pic->cullRect() : SkRect::MakeEmpty();
+ return Picture(std::move(pic), target);
+ }
+
+ /**
+ * Create a tile image filter.
+ * @param src Defines the pixels to tile
+ * @param dst Defines the pixel region that the tiles will be drawn to
+ * @param input The input that will be tiled, if null the source bitmap is used instead.
+ */
+ static sk_sp<SkImageFilter> Tile(const SkRect& src, const SkRect& dst,
+ sk_sp<SkImageFilter> input);
+
+ /**
+ * This filter takes an SkBlendMode and uses it to composite the two filters together.
+ * @param background The Dst pixels used in blending, if null the source bitmap is used.
+ * @param foreground The Src pixels used in blending, if null the source bitmap is used.
+ * @cropRect Optional rectangle to crop input and output.
+ */
+ static sk_sp<SkImageFilter> Xfermode(SkBlendMode, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground = nullptr,
+ const SkIRect* cropRect = nullptr);
+
+ // Morphology filter effects
+
+ /**
+ * Create a filter that dilates each input pixel's channel values to the max value within the
+ * given radii along the x and y axes.
+ * @param radiusX The distance to dilate along the x axis to either side of each pixel.
+ * @param radiusY The distance to dilate along the y axis to either side of each pixel.
+ * @param input The image filter that is dilated, using source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Dilate(int radiusX, int radiusY, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that erodes each input pixel's channel values to the minimum channel value
+ * within the given radii along the x and y axes.
+ * @param radiusX The distance to erode along the x axis to either side of each pixel.
+ * @param radiusY The distance to erode along the y axis to either side of each pixel.
+ * @param input The image filter that is eroded, using source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Erode(int radiusX, int radiusY, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ // Lighting filter effects
+
+ /**
+ * Create a filter that calculates the diffuse illumination from a distant light source,
+ * interpreting the alpha channel of the input as the height profile of the surface (to
+ * approximate normal vectors).
+ * @param direction The direction to the distance light.
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DistantLitDiffuse(const SkPoint3& direction, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that calculates the diffuse illumination from a point light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors).
+ * @param location The location of the point light.
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> PointLitDiffuse(const SkPoint3& location, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that calculates the diffuse illumination from a spot light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between
+ * the location and target.
+ * @param location The location of the spot light.
+ * @param target The location that the spot light is point towards
+ * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle
+ * @param cutoffAngle Maximum angle from lighting direction that receives full light
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> SpotLitDiffuse(const SkPoint3& location, const SkPoint3& target,
+ SkScalar falloffExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale,
+ SkScalar kd, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ /**
+ * Create a filter that calculates the specular illumination from a distant light source,
+ * interpreting the alpha channel of the input as the height profile of the surface (to
+ * approximate normal vectors).
+ * @param direction The direction to the distance light.
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DistantLitSpecular(const SkPoint3& direction, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that calculates the specular illumination from a point light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors).
+ * @param location The location of the point light.
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> PointLitSpecular(const SkPoint3& location, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+ /**
+ * Create a filter that calculates the specular illumination from a spot light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between
+ * the location and target.
+ * @param location The location of the spot light.
+ * @param target The location that the spot light is point towards
+ * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle
+ * @param cutoffAngle Maximum angle from lighting direction that receives full light
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> SpotLitSpecular(const SkPoint3& location, const SkPoint3& target,
+ SkScalar falloffExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale,
+ SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkImageFilters() = delete;
+};
+
+#endif // SkImageFilters_DEFINED
diff --git a/gfx/skia/skia/include/effects/SkImageSource.h b/gfx/skia/skia/include/effects/SkImageSource.h
new file mode 100644
index 0000000000..1572df717d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkImageSource.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageSource_DEFINED
+#define SkImageSource_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Image
+class SK_API SkImageSource {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImage> image);
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality);
+
+ static void RegisterFlattenables();
+
+private:
+ SkImageSource() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
new file mode 100644
index 0000000000..8d10e8c2d6
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLayerDrawLooper_DEFINED
+#define SkLayerDrawLooper_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkDrawLooper.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+
+class SK_API SkLayerDrawLooper : public SkDrawLooper {
+public:
+ ~SkLayerDrawLooper() override;
+
+ /**
+ * Bits specifies which aspects of the layer's paint should replace the
+ * corresponding aspects on the draw's paint.
+ * kEntirePaint_Bits means use the layer's paint completely.
+ * 0 means ignore the layer's paint... except for fColorMode, which is
+ * always applied.
+ */
+ enum Bits {
+ kStyle_Bit = 1 << 0, //!< use this layer's Style/stroke settings
+ kPathEffect_Bit = 1 << 2, //!< use this layer's patheffect
+ kMaskFilter_Bit = 1 << 3, //!< use this layer's maskfilter
+ kShader_Bit = 1 << 4, //!< use this layer's shader
+ kColorFilter_Bit = 1 << 5, //!< use this layer's colorfilter
+ kXfermode_Bit = 1 << 6, //!< use this layer's xfermode
+
+ // unsupported kTextSkewX_Bit = 1 << 1,
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Color is always computed using the LayerInfo's fColorMode.
+ */
+ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fColorMode controls how we compute the final color for the layer:
+ * The layer's paint's color is treated as the SRC
+ * The draw's paint's color is treated as the DST
+ * final-color = Mode(layers-color, draws-color);
+ * Any SkBlendMode will work. Two common choices are:
+ * kSrc: to use the layer's color, ignoring the draw's
+ * kDst: to just keep the draw's color, ignoring the layer's
+ */
+ struct SK_API LayerInfo {
+ BitFlags fPaintBits;
+ SkBlendMode fColorMode;
+ SkVector fOffset;
+ bool fPostTranslate; //!< applies to fOffset
+
+ /**
+ * Initial the LayerInfo. Defaults to settings that will draw the
+ * layer with no changes: e.g.
+ * fPaintBits == 0
+ * fColorMode == kDst_Mode
+ * fOffset == (0, 0)
+ */
+ LayerInfo();
+ };
+
+ SkDrawLooper::Context* makeContext(SkArenaAlloc*) const override;
+
+ bool asABlurShadow(BlurShadowRec* rec) const override;
+
+protected:
+ SkLayerDrawLooper();
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLayerDrawLooper)
+
+ struct Rec {
+ Rec* fNext;
+ SkPaint fPaint;
+ LayerInfo fInfo;
+ };
+ Rec* fRecs;
+ int fCount;
+
+ // state-machine during the init/next cycle
+ class LayerDrawLooperContext : public SkDrawLooper::Context {
+ public:
+ explicit LayerDrawLooperContext(const SkLayerDrawLooper* looper);
+
+ protected:
+ bool next(Info*, SkPaint* paint) override;
+
+ private:
+ Rec* fCurrRec;
+
+ static void ApplyInfo(SkPaint* dst, const SkPaint& src, const LayerInfo&);
+ };
+
+ typedef SkDrawLooper INHERITED;
+
+public:
+ class SK_API Builder {
+ public:
+ Builder();
+ ~Builder();
+
+ /**
+ * Call for each layer you want to add (from top to bottom).
+ * This returns a paint you can modify, but that ptr is only valid until
+ * the next call made to addLayer().
+ */
+ SkPaint* addLayer(const LayerInfo&);
+
+ /**
+ * This layer will draw with the original paint, at the specified offset
+ */
+ void addLayer(SkScalar dx, SkScalar dy);
+
+ /**
+ * This layer will with the original paint and no offset.
+ */
+ void addLayer() { this->addLayer(0, 0); }
+
+ /// Similar to addLayer, but adds a layer to the top.
+ SkPaint* addLayerOnTop(const LayerInfo&);
+
+ /**
+ * Pass list of layers on to newly built looper and return it. This will
+ * also reset the builder, so it can be used to build another looper.
+ */
+ sk_sp<SkDrawLooper> detach();
+
+ private:
+ Rec* fRecs;
+ Rec* fTopRec;
+ int fCount;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLightingImageFilter.h b/gfx/skia/skia/include/effects/SkLightingImageFilter.h
new file mode 100644
index 0000000000..024dd4d1dd
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLightingImageFilter.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLightingImageFilter_DEFINED
+#define SkLightingImageFilter_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageFilter.h"
+
+struct SkPoint3;
+
+// DEPRECATED: Use include/effects/SkImageFilters::[Diffuse|Specular]Light[Distant|Point|Spot]
+class SK_API SkLightingImageFilter {
+public:
+ static sk_sp<SkImageFilter> MakeDistantLitDiffuse(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakePointLitDiffuse(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeSpotLitDiffuse(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeDistantLitSpecular(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakePointLitSpecular(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeSpotLitSpecular(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkLightingImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLumaColorFilter.h b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
new file mode 100644
index 0000000000..d35540c728
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLumaColorFilter_DEFINED
+#define SkLumaColorFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkRefCnt.h"
+
+class SkRasterPipeline;
+
+/**
+ * SkLumaColorFilter multiplies the luma of its input into the alpha channel,
+ * and sets the red, green, and blue channels to zero.
+ *
+ * SkLumaColorFilter(r,g,b,a) = {0,0,0, a * luma(r,g,b)}
+ *
+ * This is similar to a luminanceToAlpha feColorMatrix,
+ * but note how this filter folds in the previous alpha,
+ * something an feColorMatrix cannot do.
+ *
+ * feColorMatrix(luminanceToAlpha; r,g,b,a) = {0,0,0, luma(r,g,b)}
+ *
+ * (Despite its name, an feColorMatrix using luminanceToAlpha does
+ * actually compute luma, a dot-product of gamma-encoded color channels,
+ * not luminance, a dot-product of linear color channels. So at least
+ * SkLumaColorFilter and feColorMatrix+luminanceToAlpha agree there.)
+ */
+
+ #include "include/core/SkFlattenable.h"
+
+class SK_API SkLumaColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make();
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLumaColorFilter)
+
+ SkLumaColorFilter();
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h b/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h
new file mode 100644
index 0000000000..89550f5179
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMagnifierImageFilter_DEFINED
+#define SkMagnifierImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkRect.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Magnifier
+class SK_API SkMagnifierImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(const SkRect& srcRect, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkMagnifierImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h b/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h
new file mode 100644
index 0000000000..dfb2defa64
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixConvolutionImageFilter_DEFINED
+#define SkMatrixConvolutionImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+
+class SkBitmap;
+enum class SkTileMode;
+
+/*! \class SkMatrixConvolutionImageFilter
+ Matrix convolution image filter. This filter applies an NxM image
+ processing kernel to a given input image. This can be used to produce
+ effects such as sharpening, blurring, edge detection, etc.
+
+ DEPRECATED: Use include/effects/SkImageFilters::MatrixConvolution
+ */
+
+class SK_API SkMatrixConvolutionImageFilter {
+public:
+ /*! \enum TileMode
+ * DEPRECATED: Use SkTileMode instead. */
+ enum TileMode {
+ kClamp_TileMode = 0, /*!< Clamp to the image's edge pixels. */
+ kRepeat_TileMode, /*!< Wrap around to the image's opposite edge. */
+ kClampToBlack_TileMode, /*!< Fill with transparent black. */
+ kLast_TileMode = kClampToBlack_TileMode,
+
+ // TODO: remove kMax - it is non-standard but used by Chromium!
+ kMax_TileMode = kClampToBlack_TileMode
+ };
+
+ static sk_sp<SkImageFilter> Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ /** Construct a matrix convolution image filter.
+ @param kernelSize The kernel size in pixels, in each dimension (N by M).
+ @param kernel The image processing kernel. Must contain N * M
+ elements, in row order.
+ @param gain A scale factor applied to each pixel after
+ convolution. This can be used to normalize the
+ kernel, if it does not sum to 1.
+ @param bias A bias factor added to each pixel after convolution.
+ @param kernelOffset An offset applied to each pixel coordinate before
+ convolution. This can be used to center the kernel
+ over the image (e.g., a 3x3 kernel should have an
+ offset of {1, 1}).
+ @param tileMode How accesses outside the image are treated. (@see
+ TileMode). EXPERIMENTAL: kMirror not supported yet.
+ @param convolveAlpha If true, all channels are convolved. If false,
+ only the RGB channels are convolved, and
+ alpha is copied from the source image.
+ @param input The input image filter. If NULL, the src bitmap
+ passed to filterImage() is used instead.
+ @param cropRect The rectangle to which the output processing will be limited.
+ */
+ static sk_sp<SkImageFilter> Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ SkTileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkMatrixConvolutionImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMergeImageFilter.h b/gfx/skia/skia/include/effects/SkMergeImageFilter.h
new file mode 100644
index 0000000000..2ad9a79086
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMergeImageFilter.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMergeImageFilter_DEFINED
+#define SkMergeImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Merge
+class SK_API SkMergeImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter>* const filters, int count,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter> first, sk_sp<SkImageFilter> second,
+ const SkImageFilter::CropRect* cropRect = nullptr) {
+ sk_sp<SkImageFilter> array[] = {
+ std::move(first),
+ std::move(second),
+ };
+ return Make(array, 2, cropRect);
+ }
+
+ static void RegisterFlattenables();
+
+private:
+ SkMergeImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h b/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h
new file mode 100644
index 0000000000..ea72e0eaca
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMorphologyImageFilter_DEFINED
+#define SkMorphologyImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// DEPRECATED: Use include/effects/SkImageFilters::Dilate
+class SK_API SkDilateImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ // Registers all morphology filter implementations
+ static void RegisterFlattenables();
+
+private:
+ SkDilateImageFilter() = delete;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// DEPRECATED: Use include/effects/SkImageFilters::Erode
+class SK_API SkErodeImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+private:
+ SkErodeImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOffsetImageFilter.h b/gfx/skia/skia/include/effects/SkOffsetImageFilter.h
new file mode 100644
index 0000000000..b8ad790763
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOffsetImageFilter.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOffsetImageFilter_DEFINED
+#define SkOffsetImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Offset
+class SK_API SkOffsetImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkOffsetImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOpPathEffect.h b/gfx/skia/skia/include/effects/SkOpPathEffect.h
new file mode 100644
index 0000000000..9686644d76
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOpPathEffect.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpPathEffect_DEFINED
+#define SkOpPathEffect_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/pathops/SkPathOps.h"
+
+class SK_API SkMergePathEffect {
+public:
+ /* Defers to two other patheffects, and then combines their outputs using the specified op.
+ * e.g.
+ * result = output_one op output_two
+ *
+ * If either one or two is nullptr, then the original path is passed through to the op.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op);
+};
+
+class SK_API SkMatrixPathEffect {
+public:
+ static sk_sp<SkPathEffect> MakeTranslate(SkScalar dx, SkScalar dy);
+ static sk_sp<SkPathEffect> Make(const SkMatrix&);
+};
+
+class SK_API SkStrokePathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(SkScalar width, SkPaint::Join, SkPaint::Cap,
+ SkScalar miter = 4);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h b/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h
new file mode 100644
index 0000000000..a34c3763c9
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+
+#ifndef SkOverdrawColorFilter_DEFINED
+#define SkOverdrawColorFilter_DEFINED
+
+/**
+ * Uses the value in the src alpha channel to set the dst pixel.
+ * 0 -> fColors[0]
+ * 1 -> fColors[1]
+ * ...
+ * 5 (or larger) -> fColors[5]
+ *
+ */
+class SK_API SkOverdrawColorFilter : public SkColorFilter {
+public:
+ static constexpr int kNumColors = 6;
+
+ static sk_sp<SkOverdrawColorFilter> Make(const SkPMColor colors[kNumColors]) {
+ return sk_sp<SkOverdrawColorFilter>(new SkOverdrawColorFilter(colors));
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+ static void RegisterFlattenables();
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkOverdrawColorFilter)
+
+ SkOverdrawColorFilter(const SkPMColor colors[kNumColors]) {
+ memcpy(fColors, colors, kNumColors * sizeof(SkPMColor));
+ }
+
+ bool onAppendStages(const SkStageRec&, bool) const override;
+
+ SkPMColor fColors[kNumColors];
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif // SkOverdrawColorFilter_DEFINED
diff --git a/gfx/skia/skia/include/effects/SkPaintImageFilter.h b/gfx/skia/skia/include/effects/SkPaintImageFilter.h
new file mode 100644
index 0000000000..9430bc4ebf
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPaintImageFilter.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintImageFilter_DEFINED
+#define SkPaintImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+class SkPaint;
+
+// DEPRECATED: Use include/effects/SkImageFilters::Paint
+class SK_API SkPaintImageFilter {
+public:
+ /** Create a new image filter which fills the given rectangle using the
+ * given paint. If no rectangle is specified, an output is produced with
+ * the same bounds as the input primitive (even though the input
+ * primitive's pixels are not used for processing).
+ * @param paint Paint to use when filling the rect.
+ * @param rect Rectangle of output pixels. If NULL or a given crop edge is
+ * not specified, the source primitive's bounds are used
+ * instead.
+ */
+ static sk_sp<SkImageFilter> Make(const SkPaint& paint,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkPaintImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
new file mode 100644
index 0000000000..0765763ba4
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPerlinNoiseShader_DEFINED
+#define SkPerlinNoiseShader_DEFINED
+
+#include "include/core/SkShader.h"
+
+/** \class SkPerlinNoiseShader
+
+ SkPerlinNoiseShader creates an image using the Perlin turbulence function.
+
+ It can produce tileable noise if asked to stitch tiles and provided a tile size.
+ In order to fill a large area with repeating noise, set the stitchTiles flag to
+ true, and render exactly a single tile of noise. Without this flag, the result
+ will contain visible seams between tiles.
+
+ The algorithm used is described here :
+ http://www.w3.org/TR/SVG/filters.html#feTurbulenceElement
+*/
+class SK_API SkPerlinNoiseShader {
+public:
+ /**
+ * This will construct Perlin noise of the given type (Fractal Noise or Turbulence).
+ *
+ * Both base frequencies (X and Y) have a usual range of (0..1) and must be non-negative.
+ *
+ * The number of octaves provided should be fairly small, with a limit of 255 enforced.
+ * Each octave doubles the frequency, so 10 octaves would produce noise from
+ * baseFrequency * 1, * 2, * 4, ..., * 512, which quickly yields insignificantly small
+ * periods and resembles regular unstructured noise rather than Perlin noise.
+ *
+ * If tileSize isn't NULL or an empty size, the tileSize parameter will be used to modify
+ * the frequencies so that the noise will be tileable for the given tile size. If tileSize
+ * is NULL or an empty size, the frequencies will be used as is without modification.
+ */
+ static sk_sp<SkShader> MakeFractalNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+ static sk_sp<SkShader> MakeTurbulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+ /**
+ * Creates an Improved Perlin Noise shader. The z value is roughly equivalent to the seed of the
+ * other two types, but minor variations to z will only slightly change the noise.
+ */
+ static sk_sp<SkShader> MakeImprovedNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar z);
+
+ static void RegisterFlattenables();
+
+private:
+ SkPerlinNoiseShader() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPictureImageFilter.h b/gfx/skia/skia/include/effects/SkPictureImageFilter.h
new file mode 100644
index 0000000000..9ceaabcc2d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPictureImageFilter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureImageFilter_DEFINED
+#define SkPictureImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+class SkPicture;
+
+// DEPRECATED: Use include/effects/SkImageFilters::Picture
+class SK_API SkPictureImageFilter {
+public:
+ /**
+ * Refs the passed-in picture.
+ */
+ static sk_sp<SkImageFilter> Make(sk_sp<SkPicture> picture);
+
+ /**
+ * Refs the passed-in picture. cropRect can be used to crop or expand the destination rect when
+ * the picture is drawn. (No scaling is implied by the dest rect; only the CTM is applied.)
+ */
+ static sk_sp<SkImageFilter> Make(sk_sp<SkPicture> picture, const SkRect& cropRect);
+
+ static void RegisterFlattenables();
+
+private:
+ SkPictureImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkShaderMaskFilter.h b/gfx/skia/skia/include/effects/SkShaderMaskFilter.h
new file mode 100644
index 0000000000..2b25d367a7
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkShaderMaskFilter.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaderMaskFilter_DEFINED
+#define SkShaderMaskFilter_DEFINED
+
+#include "include/core/SkMaskFilter.h"
+
+class SkShader;
+
+class SK_API SkShaderMaskFilter {
+public:
+ static sk_sp<SkMaskFilter> Make(sk_sp<SkShader> shader);
+
+private:
+ static void RegisterFlattenables();
+ friend class SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableColorFilter.h b/gfx/skia/skia/include/effects/SkTableColorFilter.h
new file mode 100644
index 0000000000..ab964aa20d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableColorFilter.h
@@ -0,0 +1,42 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkTableColorFilter_DEFINED
+#define SkTableColorFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+
+class SK_API SkTableColorFilter {
+public:
+ /**
+ * Create a table colorfilter, copying the table into the filter, and
+ * applying it to all 4 components.
+ * a' = table[a];
+ * r' = table[r];
+ * g' = table[g];
+ * b' = table[b];
+ * Compoents are operated on in unpremultiplied space. If the incomming
+ * colors are premultiplied, they are temporarily unpremultiplied, then
+ * the table is applied, and then the result is remultiplied.
+ */
+ static sk_sp<SkColorFilter> Make(const uint8_t table[256]);
+
+ /**
+ * Create a table colorfilter, with a different table for each
+ * component [A, R, G, B]. If a given table is NULL, then it is
+ * treated as identity, with the component left unchanged. If a table
+ * is not null, then its contents are copied into the filter.
+ */
+ static sk_sp<SkColorFilter> MakeARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableMaskFilter.h b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
new file mode 100644
index 0000000000..03535a6f98
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTableMaskFilter_DEFINED
+#define SkTableMaskFilter_DEFINED
+
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkScalar.h"
+
+/** \class SkTableMaskFilter
+
+ Applies a table lookup on each of the alpha values in the mask.
+ Helper methods create some common tables (e.g. gamma, clipping)
+ */
+class SK_API SkTableMaskFilter {
+public:
+ /** Utility that sets the gamma table
+ */
+ static void MakeGammaTable(uint8_t table[256], SkScalar gamma);
+
+ /** Utility that creates a clipping table: clamps values below min to 0
+ and above max to 255, and rescales the remaining into 0..255
+ */
+ static void MakeClipTable(uint8_t table[256], uint8_t min, uint8_t max);
+
+ static SkMaskFilter* Create(const uint8_t table[256]);
+ static SkMaskFilter* CreateGamma(SkScalar gamma);
+ static SkMaskFilter* CreateClip(uint8_t min, uint8_t max);
+
+ SkTableMaskFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTileImageFilter.h b/gfx/skia/skia/include/effects/SkTileImageFilter.h
new file mode 100644
index 0000000000..348e4f1eff
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTileImageFilter.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTileImageFilter_DEFINED
+#define SkTileImageFilter_DEFINED
+
+#include "include/core/SkImageFilter.h"
+
+// DEPRECATED: Use include/effects/SkImageFilters::Tile
+class SK_API SkTileImageFilter {
+public:
+ /** Create a tile image filter
+ @param src Defines the pixels to tile
+ @param dst Defines the pixels where tiles are drawn
+ @param input Input from which the subregion defined by srcRect will be tiled
+ */
+ static sk_sp<SkImageFilter> Make(const SkRect& src,
+ const SkRect& dst,
+ sk_sp<SkImageFilter> input);
+
+ static void RegisterFlattenables();
+
+private:
+ SkTileImageFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTrimPathEffect.h b/gfx/skia/skia/include/effects/SkTrimPathEffect.h
new file mode 100644
index 0000000000..705d1c2be7
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTrimPathEffect.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTrimPathEffect_DEFINED
+#define SkTrimPathEffect_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+class SK_API SkTrimPathEffect {
+public:
+ enum class Mode {
+ kNormal, // return the subset path [start,stop]
+ kInverted, // return the complement/subset paths [0,start] + [stop,1]
+ };
+
+ /**
+ * Take start and stop "t" values (values between 0...1), and return a path that is that
+ * subset of the original path.
+ *
+ * e.g.
+ * Make(0.5, 1.0) --> return the 2nd half of the path
+ * Make(0.33333, 0.66667) --> return the middle third of the path
+ *
+ * The trim values apply to the entire path, so if it contains several contours, all of them
+ * are including in the calculation.
+ *
+ * startT and stopT must be 0..1 inclusive. If they are outside of that interval, they will
+ * be pinned to the nearest legal value. If either is NaN, null will be returned.
+ *
+ * Note: for Mode::kNormal, this will return one (logical) segment (even if it is spread
+ * across multiple contours). For Mode::kInverted, this will return 2 logical
+ * segments: 0...stopT and startT...1
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar startT, SkScalar stopT, Mode = Mode::kNormal);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h b/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h
new file mode 100644
index 0000000000..f2d5ec438c
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodeImageFilter_DEFINED
+#define SkXfermodeImageFilter_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkImageFilter.h"
+
+/**
+ * This filter takes a SkBlendMode, and uses it to composite the foreground over the background.
+ * If foreground or background is NULL, the input bitmap (src) is used instead.
+ // DEPRECATED: Use include/effects/SkImageFilters::XferMode
+ */
+class SK_API SkXfermodeImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(SkBlendMode, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect);
+ static sk_sp<SkImageFilter> Make(SkBlendMode mode, sk_sp<SkImageFilter> background) {
+ return Make(mode, std::move(background), nullptr, nullptr);
+ }
+
+ static void RegisterFlattenables();
+
+private:
+ SkXfermodeImageFilter(); // can't instantiate
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkEncoder.h b/gfx/skia/skia/include/encode/SkEncoder.h
new file mode 100644
index 0000000000..1a9c37e7f5
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkEncoder.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncoder_DEFINED
+#define SkEncoder_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTemplates.h"
+
+class SK_API SkEncoder : SkNoncopyable {
+public:
+
+ /**
+ * Encode |numRows| rows of input. If the caller requests more rows than are remaining
+ * in the src, this will encode all of the remaining rows. |numRows| must be greater
+ * than zero.
+ */
+ bool encodeRows(int numRows);
+
+ virtual ~SkEncoder() {}
+
+protected:
+
+ virtual bool onEncodeRows(int numRows) = 0;
+
+ SkEncoder(const SkPixmap& src, size_t storageBytes)
+ : fSrc(src)
+ , fCurrRow(0)
+ , fStorage(storageBytes)
+ {}
+
+ const SkPixmap& fSrc;
+ int fCurrRow;
+ SkAutoTMalloc<uint8_t> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkJpegEncoder.h b/gfx/skia/skia/include/encode/SkJpegEncoder.h
new file mode 100644
index 0000000000..e900cd9df0
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkJpegEncoder.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegEncoder_DEFINED
+#define SkJpegEncoder_DEFINED
+
+#include "include/encode/SkEncoder.h"
+
+class SkJpegEncoderMgr;
+class SkWStream;
+
+class SK_API SkJpegEncoder : public SkEncoder {
+public:
+
+ enum class AlphaOption {
+ kIgnore,
+ kBlendOnBlack,
+ };
+
+ enum class Downsample {
+ /**
+ * Reduction by a factor of two in both the horizontal and vertical directions.
+ */
+ k420,
+
+ /**
+ * Reduction by a factor of two in the horizontal direction.
+ */
+ k422,
+
+ /**
+ * No downsampling.
+ */
+ k444,
+ };
+
+ struct Options {
+ /**
+ * |fQuality| must be in [0, 100] where 0 corresponds to the lowest quality.
+ */
+ int fQuality = 100;
+
+ /**
+ * Choose the downsampling factor for the U and V components. This is only
+ * meaningful if the |src| is not kGray, since kGray will not be encoded as YUV.
+ *
+ * Our default value matches the libjpeg-turbo default.
+ */
+ Downsample fDownsample = Downsample::k420;
+
+ /**
+ * Jpegs must be opaque. This instructs the encoder on how to handle input
+ * images with alpha.
+ *
+ * The default is to ignore the alpha channel and treat the image as opaque.
+ * Another option is to blend the pixels onto a black background before encoding.
+ * In the second case, the encoder supports linear or legacy blending.
+ */
+ AlphaOption fAlphaOption = AlphaOption::kIgnore;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+
+ /**
+ * Create a jpeg encoder that will encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * |dst| is unowned but must remain valid for the lifetime of the object.
+ *
+ * This returns nullptr on an invalid or unsupported |src|.
+ */
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options);
+
+ ~SkJpegEncoder() override;
+
+protected:
+ bool onEncodeRows(int numRows) override;
+
+private:
+ SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr>, const SkPixmap& src);
+
+ std::unique_ptr<SkJpegEncoderMgr> fEncoderMgr;
+ typedef SkEncoder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkPngEncoder.h b/gfx/skia/skia/include/encode/SkPngEncoder.h
new file mode 100644
index 0000000000..bf24bc5cce
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkPngEncoder.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngEncoder_DEFINED
+#define SkPngEncoder_DEFINED
+
+#include "include/core/SkDataTable.h"
+#include "include/encode/SkEncoder.h"
+
+class SkPngEncoderMgr;
+class SkWStream;
+
+class SK_API SkPngEncoder : public SkEncoder {
+public:
+
+ enum class FilterFlag : int {
+ kZero = 0x00,
+ kNone = 0x08,
+ kSub = 0x10,
+ kUp = 0x20,
+ kAvg = 0x40,
+ kPaeth = 0x80,
+ kAll = kNone | kSub | kUp | kAvg | kPaeth,
+ };
+
+ struct Options {
+ /**
+ * Selects which filtering strategies to use.
+ *
+ * If a single filter is chosen, libpng will use that filter for every row.
+ *
+ * If multiple filters are chosen, libpng will use a heuristic to guess which filter
+ * will encode smallest, then apply that filter. This happens on a per row basis,
+ * different rows can use different filters.
+ *
+ * Using a single filter (or less filters) is typically faster. Trying all of the
+ * filters may help minimize the output file size.
+ *
+ * Our default value matches libpng's default.
+ */
+ FilterFlag fFilterFlags = FilterFlag::kAll;
+
+ /**
+ * Must be in [0, 9] where 9 corresponds to maximal compression. This value is passed
+ * directly to zlib. 0 is a special case to skip zlib entirely, creating dramatically
+ * larger pngs.
+ *
+ * Our default value matches libpng's default.
+ */
+ int fZLibLevel = 6;
+
+ /**
+ * Represents comments in the tEXt ancillary chunk of the png.
+ * The 2i-th entry is the keyword for the i-th comment,
+ * and the (2i + 1)-th entry is the text for the i-th comment.
+ */
+ sk_sp<SkDataTable> fComments;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+
+ /**
+ * Create a png encoder that will encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * |dst| is unowned but must remain valid for the lifetime of the object.
+ *
+ * This returns nullptr on an invalid or unsupported |src|.
+ */
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options);
+
+ ~SkPngEncoder() override;
+
+protected:
+ bool onEncodeRows(int numRows) override;
+
+ SkPngEncoder(std::unique_ptr<SkPngEncoderMgr>, const SkPixmap& src);
+
+ std::unique_ptr<SkPngEncoderMgr> fEncoderMgr;
+ typedef SkEncoder INHERITED;
+};
+
+static inline SkPngEncoder::FilterFlag operator|(SkPngEncoder::FilterFlag x,
+ SkPngEncoder::FilterFlag y) {
+ return (SkPngEncoder::FilterFlag)((int)x | (int)y);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkWebpEncoder.h b/gfx/skia/skia/include/encode/SkWebpEncoder.h
new file mode 100644
index 0000000000..25f986a9d5
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkWebpEncoder.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWebpEncoder_DEFINED
+#define SkWebpEncoder_DEFINED
+
+#include "include/encode/SkEncoder.h"
+
+class SkWStream;
+
+namespace SkWebpEncoder {
+
+ enum class Compression {
+ kLossy,
+ kLossless,
+ };
+
+ struct SK_API Options {
+ /**
+ * |fCompression| determines whether we will use webp lossy or lossless compression.
+ *
+ * |fQuality| must be in [0.0f, 100.0f].
+ * If |fCompression| is kLossy, |fQuality| corresponds to the visual quality of the
+ * encoding. Decreasing the quality will result in a smaller encoded image.
+ * If |fCompression| is kLossless, |fQuality| corresponds to the amount of effort
+ * put into the encoding. Lower values will compress faster into larger files,
+ * while larger values will compress slower into smaller files.
+ *
+ * This scheme is designed to match the libwebp API.
+ */
+ Compression fCompression = Compression::kLossy;
+ float fQuality = 100.0f;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ SK_API bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
new file mode 100644
index 0000000000..bda1e769fd
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendDrawableInfo_DEFINED
+#define GrBackendDrawableInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+class SK_API GrBackendDrawableInfo {
+public:
+ // Creates an invalid backend drawable info.
+ GrBackendDrawableInfo() : fIsValid(false) {}
+
+ GrBackendDrawableInfo(const GrVkDrawableInfo& info)
+ : fIsValid(true)
+ , fBackend(GrBackendApi::kVulkan)
+ , fVkInfo(info) {}
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ GrBackendApi backend() const { return fBackend; }
+
+ bool getVkDrawableInfo(GrVkDrawableInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *outInfo = fVkInfo;
+ return true;
+ }
+ return false;
+ }
+
+private:
+ bool fIsValid;
+ GrBackendApi fBackend;
+ GrVkDrawableInfo fVkInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSemaphore.h b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
new file mode 100644
index 0000000000..cbeb2e974c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSemaphore_DEFINED
+#define GrBackendSemaphore_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/gpu/mtl/GrMtlTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+/**
+ * Wrapper class for passing into and receiving data from Ganesh about a backend semaphore object.
+ */
+class GrBackendSemaphore {
+public:
+ // For convenience we just set the backend here to OpenGL. The GrBackendSemaphore cannot be used
+ // until either initGL or initVulkan are called which will set the appropriate GrBackend.
+ GrBackendSemaphore() : fBackend(GrBackendApi::kOpenGL), fGLSync(0), fIsInitialized(false) {}
+
+ void initGL(GrGLsync sync) {
+ fBackend = GrBackendApi::kOpenGL;
+ fGLSync = sync;
+ fIsInitialized = true;
+ }
+
+ void initVulkan(VkSemaphore semaphore) {
+ fBackend = GrBackendApi::kVulkan;
+ fVkSemaphore = semaphore;
+#ifdef SK_VULKAN
+ fIsInitialized = true;
+#else
+ fIsInitialized = false;
+#endif
+ }
+
+ // It is the creator's responsibility to ref the MTLEvent passed in here, via __bridge_retained.
+ // The other end will wrap this BackendSemaphore and take the ref, via __bridge_transfer.
+ void initMetal(GrMTLHandle event, uint64_t value) {
+ fBackend = GrBackendApi::kMetal;
+ fMtlEvent = event;
+ fMtlValue = value;
+#ifdef SK_METAL
+ fIsInitialized = true;
+#else
+ fIsInitialized = false;
+#endif
+ }
+
+ bool isInitialized() const { return fIsInitialized; }
+
+ GrGLsync glSync() const {
+ if (!fIsInitialized || GrBackendApi::kOpenGL != fBackend) {
+ return 0;
+ }
+ return fGLSync;
+ }
+
+ VkSemaphore vkSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kVulkan != fBackend) {
+ return VK_NULL_HANDLE;
+ }
+ return fVkSemaphore;
+ }
+
+ GrMTLHandle mtlSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return nullptr;
+ }
+ return fMtlEvent;
+ }
+
+ uint64_t mtlValue() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return 0;
+ }
+ return fMtlValue;
+ }
+
+private:
+ GrBackendApi fBackend;
+ union {
+ GrGLsync fGLSync;
+ VkSemaphore fVkSemaphore;
+ GrMTLHandle fMtlEvent; // Expected to be an id<MTLEvent>
+ };
+ uint64_t fMtlValue;
+ bool fIsInitialized;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSurface.h b/gfx/skia/skia/include/gpu/GrBackendSurface.h
new file mode 100644
index 0000000000..e98f6af2c2
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSurface.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSurface_DEFINED
+#define GrBackendSurface_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/gpu/mock/GrMockTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/GrGLTypesPriv.h"
+#include "include/private/GrVkTypesPriv.h"
+
+#ifdef SK_DAWN
+#include "include/gpu/dawn/GrDawnTypes.h"
+#endif
+
+class GrVkImageLayout;
+class GrGLTextureParameters;
+
+#ifdef SK_DAWN
+#include "dawn/dawncpp.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#if GR_TEST_UTILS
+class SkString;
+#endif
+
+#if !SK_SUPPORT_GPU
+
+// SkSurface and SkImage rely on a minimal version of these always being available
+class SK_API GrBackendTexture {
+public:
+ GrBackendTexture() {}
+
+ bool isValid() const { return false; }
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ GrBackendRenderTarget() {}
+
+ bool isValid() const { return false; }
+};
+#else
+
+enum class GrGLFormat;
+
+class SK_API GrBackendFormat {
+public:
+ // Creates an invalid backend format.
+ GrBackendFormat() {}
+
+ GrBackendFormat(const GrBackendFormat& src);
+
+ static GrBackendFormat MakeGL(GrGLenum format, GrGLenum target) {
+ return GrBackendFormat(format, target);
+ }
+
+ static GrBackendFormat MakeVk(VkFormat format) {
+ return GrBackendFormat(format, GrVkYcbcrConversionInfo());
+ }
+
+ static GrBackendFormat MakeVk(const GrVkYcbcrConversionInfo& ycbcrInfo);
+
+#ifdef SK_DAWN
+ static GrBackendFormat MakeDawn(dawn::TextureFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+#ifdef SK_METAL
+ static GrBackendFormat MakeMtl(GrMTLPixelFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+ static GrBackendFormat MakeMock(GrColorType colorType) {
+ return GrBackendFormat(colorType);
+ }
+
+ bool operator==(const GrBackendFormat& that) const;
+ bool operator!=(const GrBackendFormat& that) const { return !(*this == that); }
+
+ GrBackendApi backend() const { return fBackend; }
+ GrTextureType textureType() const { return fTextureType; }
+
+ /**
+ * If the backend API is GL this gets the format as a GrGLFormat. Otherwise, returns
+ * GrGLFormat::kUnknown.
+ */
+ GrGLFormat asGLFormat() const;
+
+ /**
+ * If the backend API is Vulkan this gets the format as a VkFormat and returns true. Otherwise,
+ * returns false.
+ */
+ bool asVkFormat(VkFormat*) const;
+
+ const GrVkYcbcrConversionInfo* getVkYcbcrConversionInfo() const;
+
+#ifdef SK_DAWN
+ /**
+ * If the backend API is Dawn this gets the format as a dawn::TextureFormat and returns true.
+ * Otherwise, returns false.
+ */
+ bool asDawnFormat(dawn::TextureFormat*) const;
+#endif
+
+#ifdef SK_METAL
+ /**
+ * If the backend API is Metal this gets the format as a GrMtlPixelFormat. Otherwise,
+ * Otherwise, returns MTLPixelFormatInvalid.
+ */
+ GrMTLPixelFormat asMtlFormat() const;
+#endif
+
+ /**
+ * If the backend API is Mock this gets the format as a GrColorType. Otherwise, returns
+ * GrColorType::kUnknown.
+ */
+ GrColorType asMockColorType() const;
+
+ // If possible, copies the GrBackendFormat and forces the texture type to be Texture2D. If the
+ // GrBackendFormat was for Vulkan and it originally had a GrVkYcbcrConversionInfo, we will
+ // remove the conversion and set the format to be VK_FORMAT_R8G8B8A8_UNORM.
+ GrBackendFormat makeTexture2D() const;
+
+ // Returns true if the backend format has been initialized.
+ bool isValid() const { return fValid; }
+
+#if GR_TEST_UTILS
+ SkString toStr() const;
+#endif
+
+private:
+ GrBackendFormat(GrGLenum format, GrGLenum target);
+
+ GrBackendFormat(const VkFormat vkFormat, const GrVkYcbcrConversionInfo&);
+
+#ifdef SK_DAWN
+ GrBackendFormat(dawn::TextureFormat format);
+#endif
+
+#ifdef SK_METAL
+ GrBackendFormat(const GrMTLPixelFormat mtlFormat);
+#endif
+
+ GrBackendFormat(GrColorType colorType);
+
+ GrBackendApi fBackend = GrBackendApi::kMock;
+ bool fValid = false;
+
+ union {
+ GrGLenum fGLFormat; // the sized, internal format of the GL resource
+ struct {
+ VkFormat fFormat;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ } fVk;
+#ifdef SK_DAWN
+ dawn::TextureFormat fDawnFormat;
+#endif
+
+#ifdef SK_METAL
+ GrMTLPixelFormat fMtlFormat;
+#endif
+ GrColorType fMockColorType;
+ };
+ GrTextureType fTextureType = GrTextureType::kNone;
+};
+
+class SK_API GrBackendTexture {
+public:
+ // Creates an invalid backend texture.
+ GrBackendTexture() : fIsValid(false) {}
+
+ // The GrGLTextureInfo must have a valid fFormat.
+ GrBackendTexture(int width,
+ int height,
+ GrMipMapped,
+ const GrGLTextureInfo& glInfo);
+
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo);
+
+#ifdef SK_METAL
+ GrBackendTexture(int width,
+ int height,
+ GrMipMapped,
+ const GrMtlTextureInfo& mtlInfo);
+#endif
+
+#ifdef SK_DAWN
+ GrBackendTexture(int width,
+ int height,
+ const GrDawnImageInfo& dawnInfo);
+#endif
+
+ GrBackendTexture(int width,
+ int height,
+ GrMipMapped,
+ const GrMockTextureInfo& mockInfo);
+
+ GrBackendTexture(const GrBackendTexture& that);
+
+ ~GrBackendTexture();
+
+ GrBackendTexture& operator=(const GrBackendTexture& that);
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ bool hasMipMaps() const { return GrMipMapped::kYes == fMipMapped; }
+ GrBackendApi backend() const {return fBackend; }
+
+ // If the backend API is GL, copies a snapshot of the GrGLTextureInfo struct into the passed in
+ // pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLTextureInfo(GrGLTextureInfo*) const;
+
+ // Call this to indicate that the texture parameters have been modified in the GL context
+ // externally to GrContext.
+ void glTextureParametersModified();
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnImageInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnImageInfo(GrDawnImageInfo*) const;
+#endif
+
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+ // Get the GrBackendFormat for this texture (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockTextureInfo(GrMockTextureInfo*) const;
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ // Returns true if both textures are valid and refer to the same API texture.
+ bool isSameTexture(const GrBackendTexture&);
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendTexture& , const GrBackendTexture&);
+#endif
+
+private:
+
+#ifdef SK_GL
+ friend class GrGLTexture;
+ friend class GrGLGpu; // for getGLTextureParams
+ GrBackendTexture(int width,
+ int height,
+ GrMipMapped,
+ const GrGLTextureInfo,
+ sk_sp<GrGLTextureParameters>);
+ sk_sp<GrGLTextureParameters> getGLTextureParams() const;
+#endif
+
+#ifdef SK_VULKAN
+ friend class GrVkTexture;
+ friend class GrVkGpu; // for getGrVkImageLayout
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<GrVkImageLayout> layout);
+ sk_sp<GrVkImageLayout> getGrVkImageLayout() const;
+#endif
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+ GrMipMapped fMipMapped;
+ GrBackendApi fBackend;
+
+ union {
+#ifdef SK_GL
+ GrGLBackendTextureInfo fGLInfo;
+#endif
+ GrVkBackendSurfaceInfo fVkInfo;
+ GrMockTextureInfo fMockInfo;
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnImageInfo fDawnInfo;
+#endif
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ // Creates an invalid backend texture.
+ GrBackendRenderTarget() : fIsValid(false) {}
+
+ // The GrGLTextureInfo must have a valid fFormat.
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrGLFramebufferInfo& glInfo);
+
+#ifdef SK_DAWN
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrDawnImageInfo& dawnInfo);
+#endif
+
+ /** Deprecated, use version that does not take stencil bits. */
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrVkImageInfo& vkInfo);
+ GrBackendRenderTarget(int width, int height, int sampleCnt, const GrVkImageInfo& vkInfo);
+
+#ifdef SK_METAL
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrMtlTextureInfo& mtlInfo);
+#endif
+
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrMockRenderTargetInfo& mockInfo);
+
+ ~GrBackendRenderTarget();
+
+ GrBackendRenderTarget(const GrBackendRenderTarget& that);
+ GrBackendRenderTarget& operator=(const GrBackendRenderTarget&);
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int sampleCnt() const { return fSampleCnt; }
+ int stencilBits() const { return fStencilBits; }
+ GrBackendApi backend() const {return fBackend; }
+
+ // If the backend API is GL, copies a snapshot of the GrGLFramebufferInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLFramebufferInfo(GrGLFramebufferInfo*) const;
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnImageInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnImageInfo(GrDawnImageInfo*) const;
+#endif
+
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendRenderTarget, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+ // Get the GrBackendFormat for this render target (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockRenderTargetInfo(GrMockRenderTargetInfo*) const;
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendRenderTarget&, const GrBackendRenderTarget&);
+#endif
+
+private:
+ friend class GrVkGpu; // for getGrVkImageLayout
+ sk_sp<GrVkImageLayout> getGrVkImageLayout() const;
+
+ friend class GrVkRenderTarget;
+ GrBackendRenderTarget(int width, int height, int sampleCnt, const GrVkImageInfo& vkInfo,
+ sk_sp<GrVkImageLayout> layout);
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+
+ int fSampleCnt;
+ int fStencilBits;
+
+ GrBackendApi fBackend;
+
+ union {
+#ifdef SK_GL
+ GrGLFramebufferInfo fGLInfo;
+#endif
+ GrVkBackendSurfaceInfo fVkInfo;
+ GrMockRenderTargetInfo fMockInfo;
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnImageInfo fDawnInfo;
+#endif
+};
+
+#endif
+
+#endif
+
diff --git a/gfx/skia/skia/include/gpu/GrConfig.h b/gfx/skia/skia/include/gpu/GrConfig.h
new file mode 100644
index 0000000000..3daae6139c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrConfig.h
@@ -0,0 +1,164 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrConfig_DEFINED
+#define GrConfig_DEFINED
+
+#include "include/core/SkTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// preconfig section:
+//
+// All the work before including GrUserConfig.h should center around guessing
+// what platform we're on, and defining low-level symbols based on that.
+//
+// A build environment may have already defined symbols, so we first check
+// for that
+//
+
+// hack to ensure we know what sort of Apple platform we're on
+#if defined(__APPLE_CPP__) || defined(__APPLE_CC__)
+ #include <TargetConditionals.h>
+#endif
+
+/**
+ * Gr defines are set to 0 or 1, rather than being undefined or defined
+ */
+
+#if !defined(GR_CACHE_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS)
+ #define GR_CACHE_STATS 1
+ #else
+ #define GR_CACHE_STATS 0
+ #endif
+#endif
+
+#if !defined(GR_GPU_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS) || defined(GR_TEST_UTILS)
+ #define GR_GPU_STATS 1
+ #else
+ #define GR_GPU_STATS 0
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Include stdint.h with defines that trigger declaration of C99 limit/const
+ * macros here before anyone else has a chance to include stdint.h without
+ * these.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+#include <stdint.h>
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+// postconfig section:
+//
+
+/**
+ * GR_STRING makes a string of X where X is expanded before conversion to a string
+ * if X itself contains macros.
+ */
+#define GR_STRING(X) GR_STRING_IMPL(X)
+#define GR_STRING_IMPL(X) #X
+
+/**
+ * GR_CONCAT concatenates X and Y where each is expanded before
+ * contanenation if either contains macros.
+ */
+#define GR_CONCAT(X,Y) GR_CONCAT_IMPL(X,Y)
+#define GR_CONCAT_IMPL(X,Y) X##Y
+
+/**
+ * Creates a string of the form "<filename>(<linenumber>) : "
+ */
+#define GR_FILE_AND_LINE_STR __FILE__ "(" GR_STRING(__LINE__) ") : "
+
+/**
+ * Compilers have different ways of issuing warnings. This macro
+ * attempts to abstract them, but may need to be specialized for your
+ * particular compiler.
+ * To insert compiler warnings use "#pragma message GR_WARN(<string>)"
+ */
+#if defined(_MSC_VER)
+ #define GR_WARN(MSG) (GR_FILE_AND_LINE_STR "WARNING: " MSG)
+#else//__GNUC__ - may need other defines for different compilers
+ #define GR_WARN(MSG) ("WARNING: " MSG)
+#endif
+
+/**
+ * GR_ALWAYSBREAK is an unconditional break in all builds.
+ */
+#if !defined(GR_ALWAYSBREAK)
+ #if defined(SK_BUILD_FOR_WIN)
+ #define GR_ALWAYSBREAK SkNO_RETURN_HINT(); __debugbreak()
+ #else
+ // TODO: do other platforms really not have continuable breakpoints?
+ // sign extend for 64bit architectures to be sure this is
+ // in the high address range
+ #define GR_ALWAYSBREAK SkNO_RETURN_HINT(); *((int*)(int64_t)(int32_t)0xbeefcafe) = 0;
+ #endif
+#endif
+
+/**
+ * GR_DEBUGBREAK is an unconditional break in debug builds.
+ */
+#if !defined(GR_DEBUGBREAK)
+ #ifdef SK_DEBUG
+ #define GR_DEBUGBREAK GR_ALWAYSBREAK
+ #else
+ #define GR_DEBUGBREAK
+ #endif
+#endif
+
+/**
+ * GR_ALWAYSASSERT is an assertion in all builds.
+ */
+#if !defined(GR_ALWAYSASSERT)
+ #define GR_ALWAYSASSERT(COND) \
+ do { \
+ if (!(COND)) { \
+ SkDebugf("%s %s failed\n", GR_FILE_AND_LINE_STR, #COND); \
+ GR_ALWAYSBREAK; \
+ } \
+ } while (false)
+#endif
+
+/**
+ * GR_DEBUGASSERT is an assertion in debug builds only.
+ */
+#if !defined(GR_DEBUGASSERT)
+ #ifdef SK_DEBUG
+ #define GR_DEBUGASSERT(COND) GR_ALWAYSASSERT(COND)
+ #else
+ #define GR_DEBUGASSERT(COND)
+ #endif
+#endif
+
+/**
+ * Prettier forms of the above macros.
+ */
+#define GrAlwaysAssert(COND) GR_ALWAYSASSERT(COND)
+
+/**
+ * GR_STATIC_ASSERT is a compile time assertion. Depending on the platform
+ * it may print the message in the compiler log. Obviously, the condition must
+ * be evaluatable at compile time.
+ */
+#define GR_STATIC_ASSERT(CONDITION) static_assert(CONDITION, "bug")
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContext.h b/gfx/skia/skia/include/gpu/GrContext.h
new file mode 100644
index 0000000000..405195476d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContext.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContext_DEFINED
+#define GrContext_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/private/GrRecordingContext.h"
+
+// We shouldn't need this but currently Android is relying on this being include transitively.
+#include "include/core/SkUnPreMultiply.h"
+
+class GrAtlasManager;
+class GrBackendSemaphore;
+class GrCaps;
+class GrClientMappedBufferManager;
+class GrContextPriv;
+class GrContextThreadSafeProxy;
+class GrFragmentProcessor;
+struct GrGLInterface;
+class GrGpu;
+struct GrMockOptions;
+class GrPath;
+class GrRenderTargetContext;
+class GrResourceCache;
+class GrResourceProvider;
+class GrSamplerState;
+class GrSkSLFPFactoryCache;
+class GrSurfaceProxy;
+class GrSwizzle;
+class GrTextContext;
+class GrTextureProxy;
+struct GrVkBackendContext;
+
+class SkImage;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+class SkTaskGroup;
+class SkTraceMemoryDump;
+
+class SK_API GrContext : public GrRecordingContext {
+public:
+ /**
+ * Creates a GrContext for a backend context. If no GrGLInterface is provided then the result of
+ * GrGLMakeNativeInterface() is used if it succeeds.
+ */
+ static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
+ static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>);
+ static sk_sp<GrContext> MakeGL(const GrContextOptions&);
+ static sk_sp<GrContext> MakeGL();
+
+ static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
+ static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&);
+
+#ifdef SK_METAL
+ /**
+ * Makes a GrContext which uses Metal as the backend. The device parameter is an MTLDevice
+ * and queue is an MTLCommandQueue which should be used by the backend. These objects must
+ * have a ref on them which can be transferred to Ganesh which will release the ref when the
+ * GrContext is destroyed.
+ */
+ static sk_sp<GrContext> MakeMetal(void* device, void* queue, const GrContextOptions& options);
+ static sk_sp<GrContext> MakeMetal(void* device, void* queue);
+#endif
+
+#ifdef SK_DAWN
+ static sk_sp<GrContext> MakeDawn(const dawn::Device& device, const GrContextOptions& options);
+ static sk_sp<GrContext> MakeDawn(const dawn::Device& device);
+#endif
+
+ static sk_sp<GrContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
+ static sk_sp<GrContext> MakeMock(const GrMockOptions*);
+
+ ~GrContext() override;
+
+ sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
+
+ /**
+ * The GrContext normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the context that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ * The flag bits, state, is dpendent on which backend is used by the
+ * context, either GL or D3D (possible in future).
+ */
+ void resetContext(uint32_t state = kAll_GrBackendState);
+
+ /**
+ * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
+ * the GrContext has modified the bound texture will have texture id 0 bound. This does not
+ * flush the GrContext. Calling resetContext() does not change the set that will be bound
+ * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
+ * all unit/target combinations are considered to have unmodified bindings until the GrContext
+ * subsequently modifies them (meaning if this is called twice in a row with no intervening
+ * GrContext usage then the second call is a no-op.)
+ */
+ void resetGLTextureBindings();
+
+ /**
+ * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
+ * usable. Call this if you have lost the associated GPU context, and thus internal texture,
+ * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
+ * GrContext and any of its created resource objects will not make backend 3D API calls. Content
+ * rendered but not previously flushed may be lost. After this function is called all subsequent
+ * calls on the GrContext will fail or be no-ops.
+ *
+ * The typical use case for this function is that the underlying 3D context was lost and further
+ * API calls may crash.
+ */
+ void abandonContext() override;
+
+ /**
+ * Returns true if the context was abandoned.
+ */
+ using GrImageContext::abandoned;
+
+ /**
+ * This is similar to abandonContext() however the underlying 3D context is not yet lost and
+ * the GrContext will cleanup all allocated resources before returning. After returning it will
+ * assume that the underlying context may no longer be valid.
+ *
+ * The typical use case for this function is that the client is going to destroy the 3D context
+ * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
+ * elsewhere by either the client or Skia objects).
+ */
+ virtual void releaseResourcesAndAbandonContext();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Resource Cache
+
+ /** DEPRECATED
+ * Return the current GPU resource cache limits.
+ *
+ * @param maxResources If non-null, will be set to -1.
+ * @param maxResourceBytes If non-null, returns maximum number of bytes of
+ * video memory that can be held in the cache.
+ */
+ void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
+
+ /**
+ * Return the current GPU resource cache limit in bytes.
+ */
+ size_t getResourceCacheLimit() const;
+
+ /**
+ * Gets the current GPU resource cache usage.
+ *
+ * @param resourceCount If non-null, returns the number of resources that are held in the
+ * cache.
+ * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
+ * in the cache.
+ */
+ void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
+
+ /**
+ * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
+ */
+ size_t getResourceCachePurgeableBytes() const;
+
+ /** DEPRECATED
+ * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
+ * limit, it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResources Unused.
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
+
+ /**
+ * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
+ * it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimit(size_t maxResourceBytes);
+
+ /**
+ * Frees GPU created by the context. Can be called to reduce GPU memory
+ * pressure.
+ */
+ virtual void freeGpuResources();
+
+ /**
+ * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
+ * otherwise marked for deletion, regardless of whether the context is under budget.
+ */
+ void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
+
+ // Temporary compatibility API for Android.
+ void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
+ this->performDeferredCleanup(msNotUsed);
+ }
+
+ /**
+ * Purge unlocked resources from the cache until the the provided byte count has been reached
+ * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
+ * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
+ * resource types.
+ *
+ * @param maxBytesToPurge the desired number of bytes to be purged.
+ * @param preferScratchResources If true scratch resources will be purged prior to other
+ * resource types.
+ */
+ void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
+
+ /**
+ * This entry point is intended for instances where an app has been backgrounded or
+ * suspended.
+ * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
+ * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
+ * then all unlocked resources will be purged.
+ * In either case, after the unlocked resources are purged a separate pass will be made to
+ * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
+ * some resources with persistent data may be purged to be under budget).
+ *
+ * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior
+ * enforcing the budget requirements.
+ */
+ void purgeUnlockedResources(bool scratchResourcesOnly);
+
+ /**
+ * Gets the maximum supported texture size.
+ */
+ int maxTextureSize() const;
+
+ /**
+ * Gets the maximum supported render target size.
+ */
+ int maxRenderTargetSize() const;
+
+ /**
+ * Can a SkImage be created with the given color type.
+ */
+ bool colorTypeSupportedAsImage(SkColorType) const;
+
+ /**
+ * Can a SkSurface be created with the given color type. To check whether MSAA is supported
+ * use maxSurfaceSampleCountForColorType().
+ */
+ bool colorTypeSupportedAsSurface(SkColorType colorType) const {
+ if (kR8G8_unorm_SkColorType == colorType ||
+ kR16G16_unorm_SkColorType == colorType ||
+ kA16_unorm_SkColorType == colorType ||
+ kA16_float_SkColorType == colorType ||
+ kR16G16_float_SkColorType == colorType ||
+ kR16G16B16A16_unorm_SkColorType == colorType ||
+ kGray_8_SkColorType == colorType) {
+ return false;
+ }
+
+ return this->maxSurfaceSampleCountForColorType(colorType) > 0;
+ }
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ int maxSurfaceSampleCountForColorType(SkColorType) const;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Misc.
+
+
+ /**
+ * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
+ * executing any more commands on the GPU. Skia will take ownership of the underlying semaphores
+ * and delete them once they have been signaled and waited on. If this call returns false, then
+ * the GPU back-end will not wait on any passed in semaphores, and the client will still own the
+ * semaphores.
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores);
+
+ /**
+ * Call to ensure all drawing to the context has been issued to the underlying 3D API.
+ */
+ void flush() {
+ this->flush(GrFlushInfo(), GrPrepareForExternalIORequests());
+ }
+
+ /**
+ * Call to ensure all drawing to the context has been issued to the underlying 3D API.
+ *
+ * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
+ * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
+ * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
+ * context will still be flushed. It should be emphasized that a return value of
+ * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
+ * no semaphores submitted to the GPU. A caller should only take this as a failure if they
+ * passed in semaphores to be submitted.
+ */
+ GrSemaphoresSubmitted flush(const GrFlushInfo& info) {
+ return this->flush(info, GrPrepareForExternalIORequests());
+ }
+
+ /**
+ * Call to ensure all drawing to the context has been issued to the underlying 3D API.
+ *
+ * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
+ * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
+ * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
+ * context will still be flushed. It should be emphasized that a return value of
+ * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
+ * no semaphores submitted to the GPU. A caller should only take this as a failure if they
+ * passed in semaphores to be submitted.
+ *
+ * If the GrPrepareForExternalIORequests contains valid gpu backed SkSurfaces or SkImages, Skia
+ * will put the underlying backend objects into a state that is ready for external uses. See
+ * declaration of GrPreopareForExternalIORequests for more details.
+ */
+ GrSemaphoresSubmitted flush(const GrFlushInfo&, const GrPrepareForExternalIORequests&);
+
+ /**
+ * Deprecated.
+ */
+ GrSemaphoresSubmitted flush(GrFlushFlags flags, int numSemaphores,
+ GrBackendSemaphore signalSemaphores[],
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr) {
+ GrFlushInfo info;
+ info.fFlags = flags;
+ info.fNumSemaphores = numSemaphores;
+ info.fSignalSemaphores = signalSemaphores;
+ info.fFinishedProc = finishedProc;
+ info.fFinishedContext = finishedContext;
+ return this->flush(info);
+ }
+
+ /**
+ * Deprecated.
+ */
+ GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores,
+ GrBackendSemaphore signalSemaphores[]) {
+ GrFlushInfo info;
+ info.fNumSemaphores = numSemaphores;
+ info.fSignalSemaphores = signalSemaphores;
+ return this->flush(info);
+ }
+
+ /**
+ * Checks whether any asynchronous work is complete and if so calls related callbacks.
+ */
+ void checkAsyncWorkCompletion();
+
+ // Provides access to functions that aren't part of the public API.
+ GrContextPriv priv();
+ const GrContextPriv priv() const;
+
+ /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
+ // Chrome is using this!
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ bool supportsDistanceFieldText() const;
+
+ void storeVkPipelineCacheData();
+
+ // Returns the gpu memory size of the the texture that backs the passed in SkImage. Returns 0 if
+ // the SkImage is not texture backed.
+ static size_t ComputeImageSize(sk_sp<SkImage> image, GrMipMapped, bool useNextPow2 = false);
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ /*
+ * The explicitly allocated backend texture API allows clients to use Skia to create backend
+ * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
+ *
+ * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
+ * before deleting the GrContext used to create them. Additionally, clients should only
+ * delete these objects on the thread for which that GrContext is active.
+ *
+ * The client is responsible for ensuring synchronization between different uses
+ * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
+ * surface, rewrapping it in a image and drawing the image will require explicit
+ * sychronization on the client's part).
+ */
+
+ // If possible, create an uninitialized backend texture. The client should ensure that the
+ // returned backend texture is valid.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_UNDEFINED.
+ GrBackendTexture createBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ GrMipMapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo);
+
+ // If possible, create an uninitialized backend texture. The client should ensure that the
+ // returned backend texture is valid.
+ // If successful, the created backend texture will be compatible with the provided
+ // SkColorType.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_UNDEFINED.
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ GrMipMapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo);
+
+
+ // If possible, create an uninitialized backend texture that is compatible with the
+ // provided characterization. The client should ensure that the returned backend texture
+ // is valid.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_UNDEFINED.
+ GrBackendTexture createBackendTexture(const SkSurfaceCharacterization& characterization);
+
+ // If possible, create a backend texture initialized to a particular color. The client should
+ // ensure that the returned backend texture is valid.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL if renderable is kNo
+ // and VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL if renderable is kYes
+ GrBackendTexture createBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const SkColor4f& color,
+ GrMipMapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo);
+
+ // If possible, create a backend texture initialized to a particular color. The client should
+ // ensure that the returned backend texture is valid.
+ // If successful, the created backend texture will be compatible with the provided
+ // SkColorType.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL if renderable is kNo
+ // and VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL if renderable is kYes
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ const SkColor4f& color,
+ GrMipMapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo);
+
+ // If possible, create a backend texture initialized to a particular color that is
+ // compatible with the provided characterization. The client should ensure that the
+ // returned backend texture is valid.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ GrBackendTexture createBackendTexture(const SkSurfaceCharacterization& characterization,
+ const SkColor4f& color);
+
+ // If possible, create a backend texture initialized with the provided pixmap data. The client
+ // should ensure that the returned backend texture is valid.
+ // If successful, the created backend texture will be compatible with the provided
+ // pixmap(s). Compatible, in this case, means that the backend format will be the result
+ // of calling defaultBackendFormat on the base pixmap's colortype.
+ // If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
+ // the data for all the mipmap levels must be provided. In the mipmapped case all the
+ // colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
+ // must be sized correctly (please see SkMipMap::ComputeLevelSize and ComputeLevelCount).
+ // Note: the pixmap's alphatypes and colorspaces are ignored.
+ // For the Vulkan backend the layout of the created VkImage will be:
+ // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ // regardless of the renderability setting
+ GrBackendTexture createBackendTexture(const SkPixmap srcData[], int numLevels,
+ GrRenderable, GrProtected);
+
+ // Helper version of above for a single level.
+ GrBackendTexture createBackendTexture(const SkPixmap& srcData,
+ GrRenderable renderable,
+ GrProtected isProtected) {
+ return this->createBackendTexture(&srcData, 1, renderable, isProtected);
+ }
+
+ void deleteBackendTexture(GrBackendTexture);
+
+ // This interface allows clients to pre-compile shaders and populate the runtime program cache.
+ // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
+ //
+ // Steps to use this API:
+ //
+ // 1) Create a GrContext as normal, but set fPersistentCache on GrContextOptions to something
+ // that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This will
+ // ensure that the blobs are SkSL, and are suitable for pre-compilation.
+ // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
+ //
+ // 3) Switch over to shipping your application. Include the key/data pairs from above.
+ // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
+ // This will compile the SkSL to create a GL program, and populate the runtime cache.
+ //
+ // This is only guaranteed to work if the context/device used in step #2 are created in the
+ // same way as the one used in step #4, and the same GrContextOptions are specified.
+ // Using cached shader blobs on a different device or driver are undefined.
+ bool precompileShader(const SkData& key, const SkData& data);
+
+#ifdef SK_ENABLE_DUMP_GPU
+ /** Returns a string with detailed information about the context & GPU, in JSON format. */
+ SkString dump() const;
+#endif
+
+protected:
+ GrContext(GrBackendApi, const GrContextOptions&, int32_t contextID = SK_InvalidGenID);
+
+ bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
+
+ GrContext* asDirectContext() override { return this; }
+
+ virtual GrAtlasManager* onGetAtlasManager() = 0;
+
+ sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy;
+
+private:
+ // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
+ // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
+ // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
+ // invoked after objects they depend upon have already been destroyed.
+ std::unique_ptr<SkTaskGroup> fTaskGroup;
+ sk_sp<GrGpu> fGpu;
+ GrResourceCache* fResourceCache;
+ GrResourceProvider* fResourceProvider;
+
+ bool fDidTestPMConversions;
+ // true if the PM/UPM conversion succeeded; false otherwise
+ bool fPMUPMConversionsRoundTrip;
+
+ GrContextOptions::PersistentCache* fPersistentCache;
+ GrContextOptions::ShaderErrorHandler* fShaderErrorHandler;
+
+ std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
+
+ // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending
+ friend class GrContextPriv;
+
+ typedef GrRecordingContext INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextOptions.h b/gfx/skia/skia/include/gpu/GrContextOptions.h
new file mode 100644
index 0000000000..6d1d6ca841
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextOptions.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextOptions_DEFINED
+#define GrContextOptions_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrDriverBugWorkarounds.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+#include <vector>
+
+class SkExecutor;
+
+#if SK_SUPPORT_GPU
+struct SK_API GrContextOptions {
+ enum class Enable {
+ /** Forces an option to be disabled. */
+ kNo,
+ /** Forces an option to be enabled. */
+ kYes,
+ /**
+ * Uses Skia's default behavior, which may use runtime properties (e.g. driver version).
+ */
+ kDefault
+ };
+
+ enum class ShaderCacheStrategy {
+ kSkSL,
+ kBackendSource,
+ kBackendBinary,
+ };
+
+ /**
+ * Abstract class which stores Skia data in a cache that persists between sessions. Currently,
+ * Skia stores compiled shader binaries (only when glProgramBinary / glGetProgramBinary are
+ * supported) when provided a persistent cache, but this may extend to other data in the future.
+ */
+ class SK_API PersistentCache {
+ public:
+ virtual ~PersistentCache() {}
+
+ /**
+ * Returns the data for the key if it exists in the cache, otherwise returns null.
+ */
+ virtual sk_sp<SkData> load(const SkData& key) = 0;
+
+ virtual void store(const SkData& key, const SkData& data) = 0;
+ };
+
+ /**
+ * Abstract class to report errors when compiling shaders. If fShaderErrorHandler is present,
+ * it will be called to report any compilation failures. Otherwise, failures will be reported
+ * via SkDebugf and asserts.
+ */
+ class SK_API ShaderErrorHandler {
+ public:
+ virtual ~ShaderErrorHandler() {}
+ virtual void compileError(const char* shader, const char* errors) = 0;
+ };
+
+ GrContextOptions() {}
+
+ // Suppress prints for the GrContext.
+ bool fSuppressPrints = false;
+
+ /** Overrides: These options override feature detection using backend API queries. These
+ overrides can only reduce the feature set or limits, never increase them beyond the
+ detected values. */
+
+ int fMaxTextureSizeOverride = SK_MaxS32;
+
+ /** the threshold in bytes above which we will use a buffer mapping API to map vertex and index
+ buffers to CPU memory in order to update them. A value of -1 means the GrContext should
+ deduce the optimal value for this platform. */
+ int fBufferMapThreshold = -1;
+
+ /**
+ * Executor to handle threaded work within Ganesh. If this is nullptr, then all work will be
+ * done serially on the main thread. To have worker threads assist with various tasks, set this
+ * to a valid SkExecutor instance. Currently, used for software path rendering, but may be used
+ * for other tasks.
+ */
+ SkExecutor* fExecutor = nullptr;
+
+ /** Construct mipmaps manually, via repeated downsampling draw-calls. This is used when
+ the driver's implementation (glGenerateMipmap) contains bugs. This requires mipmap
+ level and LOD control (ie desktop or ES3). */
+ bool fDoManualMipmapping = false;
+
+ /**
+ * Disables the use of coverage counting shortcuts to render paths. Coverage counting can cause
+ * artifacts along shared edges if care isn't taken to ensure both contours wind in the same
+ * direction.
+ */
+ // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
+ bool fDisableCoverageCountingPaths = true;
+
+ /**
+ * Disables distance field rendering for paths. Distance field computation can be expensive,
+ * and yields no benefit if a path is not rendered multiple times with different transforms.
+ */
+ bool fDisableDistanceFieldPaths = false;
+
+ /**
+ * If true this allows path mask textures to be cached. This is only really useful if paths
+ * are commonly rendered at the same scale and fractional translation.
+ */
+ bool fAllowPathMaskCaching = true;
+
+ /**
+ * If true, the GPU will not be used to perform YUV -> RGB conversion when generating
+ * textures from codec-backed images.
+ */
+ bool fDisableGpuYUVConversion = false;
+
+ /**
+ * The maximum size of cache textures used for Skia's Glyph cache.
+ */
+ size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4;
+
+ /**
+ * Below this threshold size in device space distance field fonts won't be used. Distance field
+ * fonts don't support hinting which is more important at smaller sizes. A negative value means
+ * use the default threshold.
+ */
+ float fMinDistanceFieldFontSize = -1.f;
+
+ /**
+ * Above this threshold size in device space glyphs are drawn as individual paths. A negative
+ * value means use the default threshold.
+ */
+ float fGlyphsAsPathsFontSize = -1.f;
+
+ /**
+ * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by
+ * fGlypheCacheTextureMaximumBytes.
+ */
+ Enable fAllowMultipleGlyphCacheTextures = Enable::kDefault;
+
+ /**
+ * Bugs on certain drivers cause stencil buffers to leak. This flag causes Skia to avoid
+ * allocating stencil buffers and use alternate rasterization paths, avoiding the leak.
+ */
+ bool fAvoidStencilBuffers = false;
+
+ /**
+ * If true, texture fetches from mip-mapped textures will be biased to read larger MIP levels.
+ * This has the effect of sharpening those textures, at the cost of some aliasing, and possible
+ * performance impact.
+ */
+ bool fSharpenMipmappedTextures = false;
+
+ /**
+ * Enables driver workaround to use draws instead of HW clears, e.g. glClear on the GL backend.
+ */
+ Enable fUseDrawInsteadOfClear = Enable::kDefault;
+
+ /**
+ * Allow Ganesh to more aggressively reorder operations.
+ * Eventually this will just be what is done and will not be optional.
+ */
+ Enable fReduceOpsTaskSplitting = Enable::kDefault;
+
+ /**
+ * Some ES3 contexts report the ES2 external image extension, but not the ES3 version.
+ * If support for external images is critical, enabling this option will cause Ganesh to limit
+ * shaders to the ES2 shading language in that situation.
+ */
+ bool fPreferExternalImagesOverES3 = false;
+
+ /**
+ * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
+ * This does not affect code path choices that are made for perfomance reasons nor does it
+ * override other GrContextOption settings.
+ */
+ bool fDisableDriverCorrectnessWorkarounds = false;
+
+ /**
+ * Maximum number of GPU programs or pipelines to keep active in the runtime cache.
+ */
+ int fRuntimeProgramCacheSize = 256;
+
+ /**
+ * Cache in which to store compiled shader binaries between runs.
+ */
+ PersistentCache* fPersistentCache = nullptr;
+
+ /**
+ * This affects the usage of the PersistentCache. We can cache SkSL, backend source (GLSL), or
+ * backend binaries (GL program binaries). By default we cache binaries, but if the driver's
+ * binary loading/storing is believed to have bugs, this can be limited to caching GLSL.
+ * Caching GLSL strings still saves CPU work when a GL program is created.
+ */
+ ShaderCacheStrategy fShaderCacheStrategy = ShaderCacheStrategy::kBackendBinary;
+
+ /**
+ * If present, use this object to report shader compilation failures. If not, report failures
+ * via SkDebugf and assert.
+ */
+ ShaderErrorHandler* fShaderErrorHandler = nullptr;
+
+ /**
+ * Specifies the number of samples Ganesh should use when performing internal draws with MSAA or
+ * mixed samples (hardware capabilities permitting).
+ *
+ * If 0, Ganesh will disable internal code paths that use multisampling.
+ */
+ int fInternalMultisampleCount = 4;
+
+#if GR_TEST_UTILS
+ /**
+ * Private options that are only meant for testing within Skia's tools.
+ */
+
+ /**
+ * If non-zero, overrides the maximum size of a tile for sw-backed images and bitmaps rendered
+ * by SkGpuDevice.
+ */
+ int fMaxTileSizeOverride = 0;
+
+ /**
+ * Prevents use of dual source blending, to test that all xfer modes work correctly without it.
+ */
+ bool fSuppressDualSourceBlending = false;
+
+ /**
+ * If true, the caps will never support geometry shaders.
+ */
+ bool fSuppressGeometryShaders = false;
+
+ /**
+ * Render everything in wireframe
+ */
+ bool fWireframeMode = false;
+
+ /**
+ * Enforces clearing of all textures when they're created.
+ */
+ bool fClearAllTextures = false;
+
+ /**
+ * Include or exclude specific GPU path renderers.
+ */
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kAll;
+#endif
+
+#if SK_SUPPORT_ATLAS_TEXT
+ /**
+ * Controls whether distance field glyph vertices always have 3 components even when the view
+ * matrix does not have perspective.
+ */
+ Enable fDistanceFieldGlyphVerticesAlwaysHaveW = Enable::kDefault;
+#endif
+
+ GrDriverBugWorkarounds fDriverBugWorkarounds;
+};
+#else
+struct GrContextOptions {
+ struct PersistentCache {};
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
new file mode 100644
index 0000000000..b329da0c8a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextThreadSafeProxy_DEFINED
+#define GrContextThreadSafeProxy_DEFINED
+
+#include "include/private/GrContext_Base.h"
+
+class GrBackendFormat;
+class GrContextThreadSafeProxyPriv;
+struct SkImageInfo;
+class SkSurfaceCharacterization;
+
+/**
+ * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
+ * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
+ */
+class SK_API GrContextThreadSafeProxy : public GrContext_Base {
+public:
+ ~GrContextThreadSafeProxy() override;
+
+ /**
+ * Create a surface characterization for a DDL that will be replayed into the GrContext
+ * that created this proxy. On failure the resulting characterization will be invalid (i.e.,
+ * "!c.isValid()").
+ *
+ * @param cacheMaxResourceBytes The max resource bytes limit that will be in effect when the
+ * DDL created with this characterization is replayed.
+ * Note: the contract here is that the DDL will be created as
+ * if it had a full 'cacheMaxResourceBytes' to use. If replayed
+ * into a GrContext that already has locked GPU memory, the
+ * replay can exceed the budget. To rephrase, all resource
+ * allocation decisions are made at record time and at playback
+ * time the budget limits will be ignored.
+ * @param ii The image info specifying properties of the SkSurface that
+ * the DDL created with this characterization will be replayed
+ * into.
+ * Note: Ganesh doesn't make use of the SkImageInfo's alphaType
+ * @param backendFormat Information about the format of the GPU surface that will
+ * back the SkSurface upon replay
+ * @param sampleCount The sample count of the SkSurface that the DDL created with
+ * this characterization will be replayed into
+ * @param origin The origin of the SkSurface that the DDL created with this
+ * characterization will be replayed into
+ * @param surfaceProps The surface properties of the SkSurface that the DDL created
+ * with this characterization will be replayed into
+ * @param isMipMapped Will the surface the DDL will be replayed into have space
+ * allocated for mipmaps?
+ * @param willUseGLFBO0 Will the surface the DDL will be replayed into be backed by GL
+ * FBO 0. This flag is only valid if using an GL backend.
+ * @param isTextureable Will the surface be able to act as a texture?
+ * @param isProtected Will the (Vulkan) surface be DRM protected?
+ */
+ SkSurfaceCharacterization createCharacterization(
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii, const GrBackendFormat& backendFormat,
+ int sampleCount, GrSurfaceOrigin origin,
+ const SkSurfaceProps& surfaceProps,
+ bool isMipMapped,
+ bool willUseGLFBO0 = false,
+ bool isTextureable = true,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ bool operator==(const GrContextThreadSafeProxy& that) const {
+ // Each GrContext should only ever have a single thread-safe proxy.
+ SkASSERT((this == &that) == (this->contextID() == that.contextID()));
+ return this == &that;
+ }
+
+ bool operator!=(const GrContextThreadSafeProxy& that) const { return !(*this == that); }
+
+ // Provides access to functions that aren't part of the public API.
+ GrContextThreadSafeProxyPriv priv();
+ const GrContextThreadSafeProxyPriv priv() const;
+
+private:
+ friend class GrContextThreadSafeProxyPriv; // for ctor and hidden methods
+
+ // DDL TODO: need to add unit tests for backend & maybe options
+ GrContextThreadSafeProxy(GrBackendApi, const GrContextOptions&, uint32_t contextID);
+
+ bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
+
+ typedef GrContext_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
new file mode 100644
index 0000000000..3f370346fd
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDriverBugWorkarounds_DEFINED
+#define GrDriverBugWorkarounds_DEFINED
+
+// External embedders of Skia can override this to use their own list
+// of workaround names.
+#ifdef SK_GPU_WORKAROUNDS_HEADER
+#include SK_GPU_WORKAROUNDS_HEADER
+#else
+// To regenerate this file, set gn arg "skia_generate_workarounds = true".
+// This is not rebuilt by default to avoid embedders having to have extra
+// build steps.
+#include "include/gpu/GrDriverBugWorkaroundsAutogen.h"
+#endif
+
+#include "include/core/SkTypes.h"
+
+#include <stdint.h>
+#include <vector>
+
+enum GrDriverBugWorkaroundType {
+#define GPU_OP(type, name) type,
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES
+};
+
+class SK_API GrDriverBugWorkarounds {
+ public:
+ GrDriverBugWorkarounds();
+ explicit GrDriverBugWorkarounds(const std::vector<int32_t>& workarounds);
+
+ GrDriverBugWorkarounds& operator=(const GrDriverBugWorkarounds&) = default;
+
+ // Turn on any workarounds listed in |workarounds| (but don't turn any off).
+ void applyOverrides(const GrDriverBugWorkarounds& workarounds);
+
+ ~GrDriverBugWorkarounds();
+
+#define GPU_OP(type, name) bool name = false;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
new file mode 100644
index 0000000000..a4db30e66d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from build_workaround_header.py
+// DO NOT EDIT!
+
+#define GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) \
+ GPU_OP(ADD_AND_TRUE_TO_LOOP_CONDITION, \
+ add_and_true_to_loop_condition) \
+ GPU_OP(DISABLE_BLEND_EQUATION_ADVANCED, \
+ disable_blend_equation_advanced) \
+ GPU_OP(DISABLE_DISCARD_FRAMEBUFFER, \
+ disable_discard_framebuffer) \
+ GPU_OP(DISABLE_TEXTURE_STORAGE, \
+ disable_texture_storage) \
+ GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \
+ disallow_large_instanced_draw) \
+ GPU_OP(EMULATE_ABS_INT_FUNCTION, \
+ emulate_abs_int_function) \
+ GPU_OP(FLUSH_ON_FRAMEBUFFER_CHANGE, \
+ flush_on_framebuffer_change) \
+ GPU_OP(GL_CLEAR_BROKEN, \
+ gl_clear_broken) \
+ GPU_OP(MAX_FRAGMENT_UNIFORM_VECTORS_32, \
+ max_fragment_uniform_vectors_32) \
+ GPU_OP(MAX_MSAA_SAMPLE_COUNT_4, \
+ max_msaa_sample_count_4) \
+ GPU_OP(MAX_TEXTURE_SIZE_LIMIT_4096, \
+ max_texture_size_limit_4096) \
+ GPU_OP(PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER, \
+ pack_parameters_workaround_with_pack_buffer) \
+ GPU_OP(REMOVE_POW_WITH_CONSTANT_EXPONENT, \
+ remove_pow_with_constant_exponent) \
+ GPU_OP(RESTORE_SCISSOR_ON_FBO_CHANGE, \
+ restore_scissor_on_fbo_change) \
+ GPU_OP(REWRITE_DO_WHILE_LOOPS, \
+ rewrite_do_while_loops) \
+ GPU_OP(UNBIND_ATTACHMENTS_ON_BOUND_RENDER_FBO_DELETE, \
+ unbind_attachments_on_bound_render_fbo_delete) \
+ GPU_OP(UNFOLD_SHORT_CIRCUIT_AS_TERNARY_OPERATION, \
+ unfold_short_circuit_as_ternary_operation) \
+// The End
diff --git a/gfx/skia/skia/include/gpu/GrGpuResource.h b/gfx/skia/skia/include/gpu/GrGpuResource.h
new file mode 100644
index 0000000000..3eb98381e8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrGpuResource.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResource_DEFINED
+#define GrGpuResource_DEFINED
+
+#include "include/private/GrResourceKey.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkNoncopyable.h"
+
+class GrContext;
+class GrGpu;
+class GrResourceCache;
+class SkTraceMemoryDump;
+
+/**
+ * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
+ * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
+ * exposing all of GrGpuResource.
+ *
+ * PRIOR to the last ref being removed DERIVED::notifyRefCntWillBeZero() will be called
+ * (static poly morphism using CRTP). It is legal for additional ref's to be added
+ * during this time. AFTER the ref count reaches zero DERIVED::notifyRefCntIsZero() will be
+ * called.
+ */
+template <typename DERIVED> class GrIORef : public SkNoncopyable {
+public:
+ bool unique() const { return fRefCnt == 1; }
+
+ void ref() const {
+ // Only the cache should be able to add the first ref to a resource.
+ SkASSERT(this->getRefCnt() > 0);
+ // No barrier required.
+ (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ void unref() const {
+ SkASSERT(this->getRefCnt() > 0);
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // At this point we better be the only thread accessing this resource.
+ // Trick out the notifyRefCntWillBeZero() call by adding back one more ref.
+ fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ static_cast<const DERIVED*>(this)->notifyRefCntWillBeZero();
+ // notifyRefCntWillBeZero() could have done anything, including re-refing this and
+ // passing on to another thread. Take away the ref-count we re-added above and see
+ // if we're back to zero.
+ // TODO: Consider making it so that refs can't be added and merge
+ // notifyRefCntWillBeZero()/willRemoveLastRef() with notifyRefCntIsZero().
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ static_cast<const DERIVED*>(this)->notifyRefCntIsZero();
+ }
+ }
+ }
+
+#if GR_TEST_UTILS
+ int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
+#endif
+
+protected:
+ friend class GrResourceCache; // for internalHasRef
+
+ GrIORef() : fRefCnt(1) {}
+
+ bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
+
+ // Privileged method that allows going from ref count = 0 to ref count = 1.
+ void addInitialRef() const {
+ SkASSERT(fRefCnt >= 0);
+ // No barrier required.
+ (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+private:
+ int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
+
+ mutable std::atomic<int32_t> fRefCnt;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+/**
+ * Base class for objects that can be kept in the GrResourceCache.
+ */
+class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
+public:
+ /**
+ * Tests whether a object has been abandoned or released. All objects will
+ * be in this state after their creating GrContext is destroyed or has
+ * contextLost called. It's up to the client to test wasDestroyed() before
+ * attempting to use an object if it holds refs on objects across
+ * ~GrContext, freeResources with the force flag, or contextLost.
+ *
+ * @return true if the object has been released or abandoned,
+ * false otherwise.
+ */
+ bool wasDestroyed() const { return nullptr == fGpu; }
+
+ /**
+ * Retrieves the context that owns the object. Note that it is possible for
+ * this to return NULL. When objects have been release()ed or abandon()ed
+ * they no longer have an owning context. Destroying a GrContext
+ * automatically releases all its resources.
+ */
+ const GrContext* getContext() const;
+ GrContext* getContext();
+
+ /**
+ * Retrieves the amount of GPU memory used by this resource in bytes. It is
+ * approximate since we aren't aware of additional padding or copies made
+ * by the driver.
+ *
+ * @return the amount of GPU memory used in bytes
+ */
+ size_t gpuMemorySize() const {
+ if (kInvalidGpuMemorySize == fGpuMemorySize) {
+ fGpuMemorySize = this->onGpuMemorySize();
+ SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
+ }
+ return fGpuMemorySize;
+ }
+
+ class UniqueID {
+ public:
+ UniqueID() = default;
+
+ explicit UniqueID(uint32_t id) : fID(id) {}
+
+ uint32_t asUInt() const { return fID; }
+
+ bool operator==(const UniqueID& other) const { return fID == other.fID; }
+ bool operator!=(const UniqueID& other) const { return !(*this == other); }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isInvalid() const { return fID == SK_InvalidUniqueID; }
+
+ protected:
+ uint32_t fID = SK_InvalidUniqueID;
+ };
+
+ /**
+ * Gets an id that is unique for this GrGpuResource object. It is static in that it does
+ * not change when the content of the GrGpuResource object changes. This will never return
+ * 0.
+ */
+ UniqueID uniqueID() const { return fUniqueID; }
+
+ /** Returns the current unique key for the resource. It will be invalid if the resource has no
+ associated unique key. */
+ const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by the cache.
+ */
+ class CacheAccess;
+ inline CacheAccess cacheAccess();
+ inline const CacheAccess cacheAccess() const;
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
+ */
+ class ProxyAccess;
+ inline ProxyAccess proxyAccess();
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by internal code.
+ */
+ class ResourcePriv;
+ inline ResourcePriv resourcePriv();
+ inline const ResourcePriv resourcePriv() const;
+
+ /**
+ * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
+ * Typically, subclasses should not need to override this, and should only
+ * need to override setMemoryBacking.
+ **/
+ virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ /**
+ * Describes the type of gpu resource that is represented by the implementing
+ * class (e.g. texture, buffer object, stencil). This data is used for diagnostic
+ * purposes by dumpMemoryStatistics().
+ *
+ * The value returned is expected to be long lived and will not be copied by the caller.
+ */
+ virtual const char* getResourceType() const = 0;
+
+ static uint32_t CreateUniqueID();
+
+protected:
+ // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
+ // fully initialized (i.e. only from the constructors of the final class).
+ void registerWithCache(SkBudgeted);
+
+ // This must be called by every GrGpuObject that references any wrapped backend objects. It
+ // should be called once the object is fully initialized (i.e. only from the constructors of the
+ // final class).
+ void registerWithCacheWrapped(GrWrapCacheable);
+
+ GrGpuResource(GrGpu*);
+ virtual ~GrGpuResource();
+
+ GrGpu* getGpu() const { return fGpu; }
+
+ /** Overridden to free GPU resources in the backend API. */
+ virtual void onRelease() { }
+ /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
+ This may be called when the underlying 3D context is no longer valid and so no
+ backend API calls should be made. */
+ virtual void onAbandon() { }
+
+ /**
+ * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
+ **/
+ virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
+
+ /**
+ * Returns a string that uniquely identifies this resource.
+ */
+ SkString getResourceName() const;
+
+ /**
+ * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
+ * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
+ * to customize various inputs.
+ */
+ void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
+ const char* type, size_t size) const;
+
+
+private:
+ bool isPurgeable() const;
+ bool hasRef() const;
+
+ /**
+ * Called by the registerWithCache if the resource is available to be used as scratch.
+ * Resource subclasses should override this if the instances should be recycled as scratch
+ * resources and populate the scratchKey with the key.
+ * By default resources are not recycled as scratch.
+ **/
+ virtual void computeScratchKey(GrScratchKey*) const {}
+
+ /**
+ * Removes references to objects in the underlying 3D API without freeing them.
+ * Called by CacheAccess.
+ */
+ void abandon();
+
+ /**
+ * Frees the object in the underlying 3D API. Called by CacheAccess.
+ */
+ void release();
+
+ virtual size_t onGpuMemorySize() const = 0;
+
+ /**
+ * Called by GrIORef when a resource is about to lose its last ref
+ */
+ virtual void willRemoveLastRef() {}
+
+ // See comments in CacheAccess and ResourcePriv.
+ void setUniqueKey(const GrUniqueKey&);
+ void removeUniqueKey();
+ void notifyRefCntWillBeZero() const;
+ void notifyRefCntIsZero() const;
+ void removeScratchKey();
+ void makeBudgeted();
+ void makeUnbudgeted();
+
+#ifdef SK_DEBUG
+ friend class GrGpu; // for assert in GrGpu to access getGpu
+#endif
+
+ // An index into a heap when this resource is purgeable or an array when not. This is maintained
+ // by the cache.
+ int fCacheArrayIndex;
+ // This value reflects how recently this resource was accessed in the cache. This is maintained
+ // by the cache.
+ uint32_t fTimestamp;
+ GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
+
+ static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
+ GrScratchKey fScratchKey;
+ GrUniqueKey fUniqueKey;
+
+ // This is not ref'ed but abandon() or release() will be called before the GrGpu object
+ // is destroyed. Those calls set will this to NULL.
+ GrGpu* fGpu;
+ mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
+
+ GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
+ bool fRefsWrappedObjects = false;
+ const UniqueID fUniqueID;
+
+ typedef GrIORef<GrGpuResource> INHERITED;
+ friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and notifyRefCntIsZero.
+};
+
+class GrGpuResource::ProxyAccess {
+private:
+ ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
+
+ /** Proxies are allowed to take a resource from no refs to one ref. */
+ void ref(GrResourceCache* cache);
+
+ // No taking addresses of this type.
+ const CacheAccess* operator&() const = delete;
+ CacheAccess* operator&() = delete;
+
+ GrGpuResource* fResource;
+
+ friend class GrGpuResource;
+ friend class GrSurfaceProxy;
+};
+
+inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrSurface.h b/gfx/skia/skia/include/gpu/GrSurface.h
new file mode 100644
index 0000000000..b57ef02b6b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrSurface.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurface_DEFINED
+#define GrSurface_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrGpuResource.h"
+#include "include/gpu/GrTypes.h"
+
+class GrRenderTarget;
+class GrSurfacePriv;
+class GrTexture;
+
+class GrSurface : public GrGpuResource {
+public:
+ /**
+ * Retrieves the width of the surface.
+ */
+ int width() const { return fWidth; }
+
+ /**
+ * Retrieves the height of the surface.
+ */
+ int height() const { return fHeight; }
+
+ /**
+ * Helper that gets the width and height of the surface as a bounding rectangle.
+ */
+ SkRect getBoundsRect() const { return SkRect::MakeIWH(this->width(), this->height()); }
+
+ /**
+ * Retrieves the pixel config specified when the surface was created.
+ * For render targets this can be kUnknown_GrPixelConfig
+ * if client asked us to render to a target that has a pixel
+ * config that isn't equivalent with one of our configs.
+ */
+ GrPixelConfig config() const { return fConfig; }
+
+ virtual GrBackendFormat backendFormat() const = 0;
+
+ SK_API void setRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
+ this->onSetRelease(releaseHelper);
+ fReleaseHelper = std::move(releaseHelper);
+ }
+
+ // These match the definitions in SkImage, from whence they came.
+ // TODO: Remove Chrome's need to call this on a GrTexture
+ typedef void* ReleaseCtx;
+ typedef void (*ReleaseProc)(ReleaseCtx);
+ SK_API void setRelease(ReleaseProc proc, ReleaseCtx ctx) {
+ sk_sp<GrRefCntedCallback> helper(new GrRefCntedCallback(proc, ctx));
+ this->setRelease(std::move(helper));
+ }
+
+ /**
+ * @return the texture associated with the surface, may be null.
+ */
+ virtual GrTexture* asTexture() { return nullptr; }
+ virtual const GrTexture* asTexture() const { return nullptr; }
+
+ /**
+ * @return the render target underlying this surface, may be null.
+ */
+ virtual GrRenderTarget* asRenderTarget() { return nullptr; }
+ virtual const GrRenderTarget* asRenderTarget() const { return nullptr; }
+
+ /** Access methods that are only to be used within Skia code. */
+ inline GrSurfacePriv surfacePriv();
+ inline const GrSurfacePriv surfacePriv() const;
+
+ static size_t ComputeSize(const GrCaps&, const GrBackendFormat&, int width, int height,
+ int colorSamplesPerPixel, GrMipMapped, bool binSize = false);
+
+ /**
+ * The pixel values of this surface cannot be modified (e.g. doesn't support write pixels or
+ * MIP map level regen).
+ */
+ bool readOnly() const { return fSurfaceFlags & GrInternalSurfaceFlags::kReadOnly; }
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const { return fIsProtected == GrProtected::kYes; }
+
+protected:
+ void setGLRTFBOIDIs0() {
+ SkASSERT(!this->requiresManualMSAAResolve());
+ SkASSERT(!this->asTexture());
+ SkASSERT(this->asRenderTarget());
+ fSurfaceFlags |= GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ }
+ bool glRTFBOIDis0() const {
+ return fSurfaceFlags & GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ }
+
+ void setRequiresManualMSAAResolve() {
+ SkASSERT(!this->glRTFBOIDis0());
+ SkASSERT(this->asRenderTarget());
+ fSurfaceFlags |= GrInternalSurfaceFlags::kRequiresManualMSAAResolve;
+ }
+ bool requiresManualMSAAResolve() const {
+ return fSurfaceFlags & GrInternalSurfaceFlags::kRequiresManualMSAAResolve;
+ }
+
+ void setReadOnly() {
+ SkASSERT(!this->asRenderTarget());
+ fSurfaceFlags |= GrInternalSurfaceFlags::kReadOnly;
+ }
+
+ // Provides access to methods that should be public within Skia code.
+ friend class GrSurfacePriv;
+
+ GrSurface(GrGpu* gpu, const SkISize& size, GrPixelConfig config, GrProtected isProtected)
+ : INHERITED(gpu)
+ , fConfig(config)
+ , fWidth(size.width())
+ , fHeight(size.height())
+ , fSurfaceFlags(GrInternalSurfaceFlags::kNone)
+ , fIsProtected(isProtected) {}
+
+ ~GrSurface() override {
+ // check that invokeReleaseProc has been called (if needed)
+ SkASSERT(!fReleaseHelper);
+ }
+
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ const char* getResourceType() const override { return "Surface"; }
+
+ // Unmanaged backends (e.g. Vulkan) may want to specially handle the release proc in order to
+ // ensure it isn't called until GPU work related to the resource is completed.
+ virtual void onSetRelease(sk_sp<GrRefCntedCallback>) {}
+
+ void invokeReleaseProc() {
+ // Depending on the ref count of fReleaseHelper this may or may not actually trigger the
+ // ReleaseProc to be called.
+ fReleaseHelper.reset();
+ }
+
+ GrPixelConfig fConfig;
+ int fWidth;
+ int fHeight;
+ GrInternalSurfaceFlags fSurfaceFlags;
+ GrProtected fIsProtected;
+ sk_sp<GrRefCntedCallback> fReleaseHelper;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTexture.h b/gfx/skia/skia/include/gpu/GrTexture.h
new file mode 100644
index 0000000000..ad861d5aee
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTexture.h
@@ -0,0 +1,102 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTexture_DEFINED
+#define GrTexture_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrSurface.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrTexturePriv;
+
+class GrTexture : virtual public GrSurface {
+public:
+ GrTexture* asTexture() override { return this; }
+ const GrTexture* asTexture() const override { return this; }
+
+ virtual GrBackendTexture getBackendTexture() const = 0;
+
+ /**
+ * This function indicates that the texture parameters (wrap mode, filtering, ...) have been
+ * changed externally to Skia.
+ */
+ SK_API virtual void textureParamsModified() = 0;
+
+ /**
+ * This function steals the backend texture from a uniquely owned GrTexture with no pending
+ * IO, passing it out to the caller. The GrTexture is deleted in the process.
+ *
+ * Note that if the GrTexture is not uniquely owned (no other refs), or has pending IO, this
+ * function will fail.
+ */
+ static bool StealBackendTexture(sk_sp<GrTexture>,
+ GrBackendTexture*,
+ SkImage::BackendTextureReleaseProc*);
+
+ /** See addIdleProc. */
+ enum class IdleState {
+ kFlushed,
+ kFinished
+ };
+ /**
+ * Installs a proc on this texture. It will be called when the texture becomes "idle". There
+ * are two types of idle states as indicated by IdleState. For managed backends (e.g. GL where
+ * a driver typically handles CPU/GPU synchronization of resource access) there is no difference
+ * between the two. They both mean "all work related to the resource has been flushed to the
+ * backend API and the texture is not owned outside the resource cache".
+ *
+ * If the API is unmanaged (e.g. Vulkan) then kFinished has the additional constraint that the
+ * work flushed to the GPU is finished.
+ */
+ virtual void addIdleProc(sk_sp<GrRefCntedCallback> idleProc, IdleState) {
+ // This is the default implementation for the managed case where the IdleState can be
+ // ignored. Unmanaged backends, e.g. Vulkan, must override this to consider IdleState.
+ fIdleProcs.push_back(std::move(idleProc));
+ }
+ /** Helper version of addIdleProc that creates the ref-counted wrapper. */
+ void addIdleProc(GrRefCntedCallback::Callback callback,
+ GrRefCntedCallback::Context context,
+ IdleState state) {
+ this->addIdleProc(sk_make_sp<GrRefCntedCallback>(callback, context), state);
+ }
+
+ /** Access methods that are only to be used within Skia code. */
+ inline GrTexturePriv texturePriv();
+ inline const GrTexturePriv texturePriv() const;
+
+protected:
+ GrTexture(GrGpu*, const SkISize&, GrPixelConfig, GrProtected, GrTextureType, GrMipMapsStatus);
+
+ virtual bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) = 0;
+
+ SkTArray<sk_sp<GrRefCntedCallback>> fIdleProcs;
+
+ void willRemoveLastRef() override {
+ // We're about to be idle in the resource cache. Do our part to trigger the idle callbacks.
+ fIdleProcs.reset();
+ }
+ void computeScratchKey(GrScratchKey*) const override;
+
+private:
+ size_t onGpuMemorySize() const override;
+ void markMipMapsDirty();
+ void markMipMapsClean();
+
+ GrTextureType fTextureType;
+ GrMipMapsStatus fMipMapsStatus;
+ int fMaxMipMapLevel;
+ friend class GrTexturePriv;
+
+ typedef GrSurface INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTypes.h b/gfx/skia/skia/include/gpu/GrTypes.h
new file mode 100644
index 0000000000..679a0fa96b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTypes.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypes_DEFINED
+#define GrTypes_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrConfig.h"
+
+class GrBackendSemaphore;
+class SkImage;
+class SkSurface;
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Defines overloaded bitwise operators to make it easier to use an enum as a
+ * bitfield.
+ */
+#define GR_MAKE_BITFIELD_OPS(X) \
+ inline X operator |(X a, X b) { \
+ return (X) (+a | +b); \
+ } \
+ inline X& operator |=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ inline X operator &(X a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ inline X& operator &=(X& a, X b) { \
+ return (a = a & b); \
+ } \
+ template <typename T> \
+ inline X operator &(T a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ template <typename T> \
+ inline X operator &(X a, T b) { \
+ return (X) (+a & +b); \
+ } \
+
+#define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
+ friend X operator |(X a, X b); \
+ friend X& operator |=(X& a, X b); \
+ \
+ friend X operator &(X a, X b); \
+ friend X& operator &=(X& a, X b); \
+ \
+ template <typename T> \
+ friend X operator &(T a, X b); \
+ \
+ template <typename T> \
+ friend X operator &(X a, T b); \
+
+/**
+ * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
+ * masking with type safety. Instantiated with the ~ operator.
+ */
+template<typename TFlags> class GrTFlagsMask {
+public:
+ constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
+ constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
+ constexpr int value() const { return fValue; }
+private:
+ const int fValue;
+};
+
+// Or-ing a mask always returns another mask.
+template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
+ GrTFlagsMask<TFlags> b) {
+ return GrTFlagsMask<TFlags>(a.value() | b.value());
+}
+template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
+ TFlags b) {
+ return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
+}
+template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
+ GrTFlagsMask<TFlags> b) {
+ return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
+}
+template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
+ GrTFlagsMask<TFlags> b) {
+ return (a = a | b);
+}
+
+// And-ing two masks returns another mask; and-ing one with regular flags returns flags.
+template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
+ GrTFlagsMask<TFlags> b) {
+ return GrTFlagsMask<TFlags>(a.value() & b.value());
+}
+template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
+ return static_cast<TFlags>(a.value() & static_cast<int>(b));
+}
+template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
+ return static_cast<TFlags>(static_cast<int>(a) & b.value());
+}
+template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
+ return (a = a & b);
+}
+
+/**
+ * Defines bitwise operators that make it possible to use an enum class as a
+ * basic bitfield.
+ */
+#define GR_MAKE_BITFIELD_CLASS_OPS(X) \
+ constexpr GrTFlagsMask<X> operator~(X a) { \
+ return GrTFlagsMask<X>(~static_cast<int>(a)); \
+ } \
+ constexpr X operator|(X a, X b) { \
+ return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
+ } \
+ inline X& operator|=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ constexpr bool operator&(X a, X b) { \
+ return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
+ } \
+
+#define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
+ friend constexpr GrTFlagsMask<X> operator ~(X); \
+ friend constexpr X operator |(X, X); \
+ friend X& operator |=(X&, X); \
+ friend constexpr bool operator &(X, X)
+
+////////////////////////////////////////////////////////////////////////////////
+
+// compile time versions of min/max
+#define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
+#define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
+
+/**
+ * divide, rounding up
+ */
+static inline constexpr int32_t GrIDivRoundUp(int x, int y) {
+ SkASSERT(y > 0);
+ return (x + (y-1)) / y;
+}
+static inline constexpr uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
+ return (x + (y-1)) / y;
+}
+static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
+
+/**
+ * align up
+ */
+static inline constexpr uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
+ return GrUIDivRoundUp(x, alignment) * alignment;
+}
+static inline constexpr size_t GrSizeAlignUp(size_t x, size_t alignment) {
+ return GrSizeDivRoundUp(x, alignment) * alignment;
+}
+
+/**
+ * amount of pad needed to align up
+ */
+static inline constexpr uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
+ return (alignment - x % alignment) % alignment;
+}
+static inline constexpr size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
+ return (alignment - x % alignment) % alignment;
+}
+
+/**
+ * align down
+ */
+static inline constexpr uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
+ return (x / alignment) * alignment;
+}
+static inline constexpr size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
+ return (x / alignment) * alignment;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Possible 3D APIs that may be used by Ganesh.
+ */
+enum class GrBackendApi : unsigned {
+ kMetal,
+ kDawn,
+ kOpenGL,
+ kVulkan,
+ /**
+ * Mock is a backend that does not draw anything. It is used for unit tests
+ * and to measure CPU overhead.
+ */
+ kMock,
+
+ /**
+ * Added here to support the legacy GrBackend enum value and clients who referenced it using
+ * GrBackend::kOpenGL_GrBackend.
+ */
+ kOpenGL_GrBackend = kOpenGL,
+};
+
+/**
+ * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
+ * the enum values we define them below so that no clients break.
+ */
+typedef GrBackendApi GrBackend;
+
+static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
+static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
+static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Used to say whether a texture has mip levels allocated or not.
+ */
+enum class GrMipMapped : bool {
+ kNo = false,
+ kYes = true
+};
+
+/*
+ * Can a GrBackendObject be rendered to?
+ */
+enum class GrRenderable : bool {
+ kNo = false,
+ kYes = true
+};
+
+/*
+ * Used to say whether texture is backed by protected memory.
+ */
+enum class GrProtected : bool {
+ kNo = false,
+ kYes = true
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
+ * either the top-left or bottom-left content pixel.
+ */
+enum GrSurfaceOrigin : int {
+ kTopLeft_GrSurfaceOrigin,
+ kBottomLeft_GrSurfaceOrigin,
+};
+
+/**
+ * A GrContext's cache of backend context state can be partially invalidated.
+ * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
+ */
+enum GrGLBackendState {
+ kRenderTarget_GrGLBackendState = 1 << 0,
+ // Also includes samplers bound to texture units.
+ kTextureBinding_GrGLBackendState = 1 << 1,
+ // View state stands for scissor and viewport
+ kView_GrGLBackendState = 1 << 2,
+ kBlend_GrGLBackendState = 1 << 3,
+ kMSAAEnable_GrGLBackendState = 1 << 4,
+ kVertex_GrGLBackendState = 1 << 5,
+ kStencil_GrGLBackendState = 1 << 6,
+ kPixelStore_GrGLBackendState = 1 << 7,
+ kProgram_GrGLBackendState = 1 << 8,
+ kFixedFunction_GrGLBackendState = 1 << 9,
+ kMisc_GrGLBackendState = 1 << 10,
+ kPathRendering_GrGLBackendState = 1 << 11,
+ kALL_GrGLBackendState = 0xffff
+};
+
+/**
+ * This value translates to reseting all the context state for any backend.
+ */
+static const uint32_t kAll_GrBackendState = 0xffffffff;
+
+enum GrFlushFlags {
+ kNone_GrFlushFlags = 0,
+ // flush will wait till all submitted GPU work is finished before returning.
+ kSyncCpu_GrFlushFlag = 0x1,
+};
+
+typedef void* GrGpuFinishedContext;
+typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
+
+/**
+ * Struct to supply options to flush calls.
+ *
+ * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
+ * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
+ * can be either initialized or not. If they are initialized, the backend uses the passed in
+ * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
+ * object is initialized with that semaphore.
+ *
+ * The client will own and be responsible for deleting the underlying semaphores that are stored
+ * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
+ * themselves can be deleted as soon as this function returns.
+ *
+ * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
+ * from this flush call and all previous flush calls has finished on the GPU. If the flush call
+ * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
+ * immediately.
+ */
+struct GrFlushInfo {
+ GrFlushFlags fFlags = kNone_GrFlushFlags;
+ int fNumSemaphores = 0;
+ GrBackendSemaphore* fSignalSemaphores = nullptr;
+ GrGpuFinishedProc fFinishedProc = nullptr;
+ GrGpuFinishedContext fFinishedContext = nullptr;
+};
+
+/**
+ * Enum used as return value when flush with semaphores so the client knows whether the semaphores
+ * were submitted to GPU or not.
+ */
+enum class GrSemaphoresSubmitted : bool {
+ kNo = false,
+ kYes = true
+};
+
+/**
+ * Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a
+ * flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed.
+ *
+ * If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces.
+ * Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If
+ * an entry is true, then that surface will be prepared for both external use and present.
+ *
+ * Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying
+ * VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily
+ * in which they were originally wrapped or created with. This allows a client to wrap a VkImage
+ * from a queue which is different from the graphics queue and then have Skia transition it back to
+ * that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also
+ * flagged to be prepared for present, then its VkImageLayout will be set to
+ * VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the
+ * GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT.
+ *
+ * If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and
+ * whatever layout is needed for its use.
+ */
+struct GrPrepareForExternalIORequests {
+ int fNumImages = 0;
+ SkImage** fImages = nullptr;
+ int fNumSurfaces = 0;
+ SkSurface** fSurfaces = nullptr;
+ bool* fPrepareSurfaceForPresent = nullptr;
+
+ bool hasRequests() const { return fNumImages || fNumSurfaces; }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
new file mode 100644
index 0000000000..f8936c29d8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTypes_DEFINED
+#define GrDawnTypes_DEFINED
+
+#ifdef Always
+#undef Always
+#endif
+#include "dawn/dawncpp.h"
+
+struct GrDawnImageInfo {
+ dawn::Texture fTexture;
+ dawn::TextureFormat fFormat;
+ uint32_t fLevelCount;
+ GrDawnImageInfo() : fTexture(nullptr), fFormat(), fLevelCount(0) {
+ }
+ GrDawnImageInfo(const GrDawnImageInfo& other)
+ : fTexture(other.fTexture)
+ , fFormat(other.fFormat)
+ , fLevelCount(other.fLevelCount) {
+ }
+ GrDawnImageInfo& operator=(const GrDawnImageInfo& other) {
+ fTexture = other.fTexture;
+ fFormat = other.fFormat;
+ fLevelCount = other.fLevelCount;
+ return *this;
+ }
+ bool operator==(const GrDawnImageInfo& other) const {
+ return fTexture.Get() == other.fTexture.Get() &&
+ fFormat == other.fFormat &&
+ fLevelCount == other.fLevelCount;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
new file mode 100644
index 0000000000..bfa2aea376
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+
+void GrGetEGLQueryAndDisplay(GrEGLQueryStringFn** queryString, GrEGLDisplay* display,
+ void* ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
new file mode 100644
index 0000000000..4f9f9f9ee0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLInterface.h"
+
+typedef GrGLFuncPtr (*GrGLGetProc)(void* ctx, const char name[]);
+
+/**
+ * Generic function for creating a GrGLInterface for an either OpenGL or GLES. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL (but not GLES) context. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL ES (but not Open GL) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for a WebGL (similar to OpenGL ES) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get);
+
+/** Deprecated version of GrGLMakeAssembledInterface() that returns a bare pointer. */
+SK_API const GrGLInterface* GrGLAssembleInterface(void *ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
new file mode 100644
index 0000000000..d6c49f3405
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLConfig_DEFINED
+#define GrGLConfig_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Optional GL config file.
+ */
+#ifdef GR_GL_CUSTOM_SETUP_HEADER
+ #include GR_GL_CUSTOM_SETUP_HEADER
+#endif
+
+#if !defined(GR_GL_FUNCTION_TYPE)
+ #if defined(SK_BUILD_FOR_WIN)
+ #define GR_GL_FUNCTION_TYPE __stdcall
+ #else
+ #define GR_GL_FUNCTION_TYPE
+ #endif
+#endif
+
+/**
+ * The following are optional defines that can be enabled at the compiler
+ * command line, in a IDE project, in a GrUserConfig.h file, or in a GL custom
+ * file (if one is in use). If a GR_GL_CUSTOM_SETUP_HEADER is used they can
+ * also be placed there.
+ *
+ * GR_GL_LOG_CALLS: if 1 Gr can print every GL call using SkDebugf. Defaults to
+ * 0. Logging can be enabled and disabled at runtime using a debugger via to
+ * global gLogCallsGL. The initial value of gLogCallsGL is controlled by
+ * GR_GL_LOG_CALLS_START.
+ *
+ * GR_GL_LOG_CALLS_START: controls the initial value of gLogCallsGL when
+ * GR_GL_LOG_CALLS is 1. Defaults to 0.
+ *
+ * GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call.
+ * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
+ * this can be toggled in a debugger using the gCheckErrorGL global. The initial
+ * value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START.
+ *
+ * GR_GL_CHECK_ERROR_START: controls the initial value of gCheckErrorGL
+ * when GR_GL_CHECK_ERROR is 1. Defaults to 1.
+ *
+ * GR_GL_CHECK_ALLOC_WITH_GET_ERROR: If set to 1 this will then glTexImage,
+ * glBufferData, glRenderbufferStorage, etc will be checked for errors. This
+ * amounts to ensuring the error is GL_NO_ERROR, calling the allocating
+ * function, and then checking that the error is still GL_NO_ERROR. When the
+ * value is 0 we will assume no error was generated without checking.
+ *
+ * GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT: We will normally check the FBO status
+ * every time we bind a texture or renderbuffer to an FBO. However, in some
+ * environments CheckFrameBufferStatus is very expensive. If this is set we will
+ * check the first time we use a color format or a combination of color /
+ * stencil formats as attachments. If the FBO is complete we will assume
+ * subsequent attachments with the same formats are complete as well.
+ *
+ * GR_GL_MUST_USE_VBO: Indicates that all vertices and indices must be rendered
+ * from VBOs. Chromium's command buffer doesn't allow glVertexAttribArray with
+ * ARARY_BUFFER 0 bound or glDrawElements with ELEMENT_ARRAY_BUFFER 0 bound.
+ */
+
+#if !defined(GR_GL_LOG_CALLS)
+ #ifdef SK_DEBUG
+ #define GR_GL_LOG_CALLS 1
+ #else
+ #define GR_GL_LOG_CALLS 0
+ #endif
+#endif
+
+#if !defined(GR_GL_LOG_CALLS_START)
+ #define GR_GL_LOG_CALLS_START 0
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR)
+ #ifdef SK_DEBUG
+ #define GR_GL_CHECK_ERROR 1
+ #else
+ #define GR_GL_CHECK_ERROR 0
+ #endif
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR_START)
+ #define GR_GL_CHECK_ERROR_START 1
+#endif
+
+#if !defined(GR_GL_CHECK_ALLOC_WITH_GET_ERROR)
+ #define GR_GL_CHECK_ALLOC_WITH_GET_ERROR 1
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
new file mode 100644
index 0000000000..683cd97884
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrGLConfig_chrome_DEFINED
+#define GrGLConfig_chrome_DEFINED
+
+// glGetError() forces a sync with gpu process on chrome
+#define GR_GL_CHECK_ERROR_START 0
+
+// Check error is even more expensive in chrome (cmd buffer flush). The
+// compositor also doesn't check its allocations.
+#define GR_GL_CHECK_ALLOC_WITH_GET_ERROR 0
+
+#if !defined(GR_GL_IGNORE_ES3_MSAA)
+ #define GR_GL_IGNORE_ES3_MSAA 1
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
new file mode 100644
index 0000000000..1e2823f71a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLExtensions_DEFINED
+#define GrGLExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+#include "include/private/SkTArray.h"
+
+#include <utility>
+
+struct GrGLInterface;
+class SkJSONWriter;
+
+/**
+ * This helper queries the current GL context for its extensions, remembers them, and can be
+ * queried. It supports both glGetString- and glGetStringi-style extension string APIs and will
+ * use the latter if it is available. It also will query for EGL extensions if a eglQueryString
+ * implementation is provided.
+ */
+class SK_API GrGLExtensions {
+public:
+ GrGLExtensions() {}
+
+ GrGLExtensions(const GrGLExtensions&);
+
+ GrGLExtensions& operator=(const GrGLExtensions&);
+
+ void swap(GrGLExtensions* that) {
+ using std::swap;
+ swap(fStrings, that->fStrings);
+ swap(fInitialized, that->fInitialized);
+ }
+
+ /**
+ * We sometimes need to use this class without having yet created a GrGLInterface. This version
+ * of init expects that getString is always non-NULL while getIntegerv and getStringi are non-
+ * NULL if on desktop GL with version 3.0 or higher. Otherwise it will fail.
+ */
+ bool init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringFn> getString,
+ GrGLFunction<GrGLGetStringiFn> getStringi,
+ GrGLFunction<GrGLGetIntegervFn> getIntegerv,
+ GrGLFunction<GrEGLQueryStringFn> queryString = nullptr,
+ GrEGLDisplay eglDisplay = nullptr);
+
+ bool isInitialized() const { return fInitialized; }
+
+ /**
+ * Queries whether an extension is present. This will fail if init() has not been called.
+ */
+ bool has(const char[]) const;
+
+ /**
+ * Removes an extension if present. Returns true if the extension was present before the call.
+ */
+ bool remove(const char[]);
+
+ /**
+ * Adds an extension to list
+ */
+ void add(const char[]);
+
+ void reset() { fStrings.reset(); }
+
+ void dumpJSON(SkJSONWriter*) const;
+
+private:
+ bool fInitialized = false;
+ SkTArray<SkString> fStrings;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
new file mode 100644
index 0000000000..b0585fd3d8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
@@ -0,0 +1,320 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLFunctions_DEFINED
+#define GrGLFunctions_DEFINED
+
+#include <cstring>
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/SkTLogic.h"
+
+
+extern "C" {
+
+///////////////////////////////////////////////////////////////////////////////
+
+using GrGLActiveTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum texture);
+using GrGLAttachShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint shader);
+using GrGLBeginQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint id);
+using GrGLBindAttribLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint index, const char* name);
+using GrGLBindBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint buffer);
+using GrGLBindFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint framebuffer);
+using GrGLBindRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint renderbuffer);
+using GrGLBindTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint texture);
+using GrGLBindFragDataLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name);
+using GrGLBindFragDataLocationIndexedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar* name);
+using GrGLBindSamplerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint unit, GrGLuint sampler);
+using GrGLBindVertexArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint array);
+using GrGLBlendBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLBlendColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLBlendEquationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLBlendFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum sfactor, GrGLenum dfactor);
+using GrGLBlitFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter);
+using GrGLBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage);
+using GrGLBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data);
+using GrGLCheckFramebufferStatusFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLClearFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield mask);
+using GrGLClearColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLClearStencilFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint s);
+using GrGLClearTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLClearTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLColorMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha);
+using GrGLCompileShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLCompressedTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCompressedTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCopyTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLCreateProgramFn = GrGLuint GR_GL_FUNCTION_TYPE();
+using GrGLCreateShaderFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLenum type);
+using GrGLCullFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDeleteBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* buffers);
+using GrGLDeleteFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* framebuffers);
+using GrGLDeleteProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLDeleteQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* ids);
+using GrGLDeleteRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* renderbuffers);
+using GrGLDeleteSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, const GrGLuint* samplers);
+using GrGLDeleteShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLDeleteTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* textures);
+using GrGLDeleteVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* arrays);
+using GrGLDepthMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean flag);
+using GrGLDisableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLDisableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLDrawArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count);
+using GrGLDrawArraysInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount);
+using GrGLDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect);
+using GrGLDrawBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDrawBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLenum* bufs);
+using GrGLDrawElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLDrawElementsInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices, GrGLsizei primcount);
+using GrGLDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect);
+using GrGLDrawRangeElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLEnableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLEnableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLEndQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLFinishFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFlushFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFlushMappedBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLFramebufferRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
+using GrGLFramebufferTexture2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+using GrGLFramebufferTexture2DMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples);
+using GrGLFrontFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLGenBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* buffers);
+using GrGLGenFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* framebuffers);
+using GrGLGenerateMipmapFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLGenQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* ids);
+using GrGLGenRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* renderbuffers);
+using GrGLGenSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, GrGLuint* samplers);
+using GrGLGenTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* textures);
+using GrGLGenVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* arrays);
+using GrGLGetBufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetErrorFn = GrGLenum GR_GL_FUNCTION_TYPE();
+using GrGLGetFramebufferAttachmentParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params);
+using GrGLGetIntegervFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint* params);
+using GrGLGetMultisamplefvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLuint index, GrGLfloat* val);
+using GrGLGetProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, GrGLenum* binaryFormat, void* binary);
+using GrGLGetProgramInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetProgramivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum GLtarget, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjecti64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint64* params);
+using GrGLGetQueryObjectivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjectui64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint64* params);
+using GrGLGetQueryObjectuivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint* params);
+using GrGLGetRenderbufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetShaderivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderPrecisionFormatFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum shadertype, GrGLenum precisiontype, GrGLint* range, GrGLint* precision);
+using GrGLGetStringFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name);
+using GrGLGetStringiFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name, GrGLuint index);
+using GrGLGetTexLevelParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params);
+using GrGLGetUniformLocationFn = GrGLint GR_GL_FUNCTION_TYPE(GrGLuint program, const char* name);
+using GrGLInsertEventMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLInvalidateBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer);
+using GrGLInvalidateBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLInvalidateFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLInvalidateSubFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLInvalidateTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level);
+using GrGLInvalidateTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth);
+using GrGLIsTextureFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint texture);
+using GrGLLineWidthFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLfloat width);
+using GrGLLinkProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLMapBufferFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum access);
+using GrGLMapBufferRangeFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
+using GrGLMapBufferSubDataFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access);
+using GrGLMapTexSubImage2DFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access);
+using GrGLPixelStoreiFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint param);
+using GrGLPolygonModeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum mode);
+using GrGLPopGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum binaryFormat, void* binary, GrGLsizei length);
+using GrGLProgramParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint value);
+using GrGLPushGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLQueryCounterFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum target);
+using GrGLReadBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum src);
+using GrGLReadPixelsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels);
+using GrGLRenderbufferStorageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLRenderbufferStorageMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLResolveMultisampleFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLSamplerParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLint params);
+using GrGLSamplerParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, const GrGLint* params);
+using GrGLScissorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+// GL_CHROMIUM_bind_uniform_location
+using GrGLBindUniformLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, const char* name);
+using GrGLShaderSourceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei count, const char* const* str, const GrGLint* length);
+using GrGLStencilFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilFuncSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint mask);
+using GrGLStencilMaskSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLuint mask);
+using GrGLStencilOpFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLStencilOpSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLTexBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer);
+using GrGLTexBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size);
+using GrGLTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTexParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLfloat param);
+using GrGLTexParameterfvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLfloat* params);
+using GrGLTexParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint param);
+using GrGLTexParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLint* params);
+using GrGLTexStorage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLDiscardFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTextureBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLUniform1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0);
+using GrGLUniform1iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0);
+using GrGLUniform1fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform1ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform2fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1);
+using GrGLUniform2iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1);
+using GrGLUniform2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform2ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform3fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2);
+using GrGLUniform3iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2);
+using GrGLUniform3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform3ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform4fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3);
+using GrGLUniform4iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3);
+using GrGLUniform4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform4ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniformMatrix2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUnmapBufferFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLUnmapBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUnmapTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUseProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLVertexAttrib1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat value);
+using GrGLVertexAttrib2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttribDivisorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index, GrGLuint divisor);
+using GrGLVertexAttribIPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLVertexAttribPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLViewportFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+
+/* GL_NV_path_rendering */
+using GrGLMatrixLoadfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum matrixMode, const GrGLfloat* m);
+using GrGLMatrixLoadIdentityFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum);
+using GrGLPathCommandsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLsizei numCommands, const GrGLubyte* commands, GrGLsizei numCoords, GrGLenum coordType, const GrGLvoid* coords);
+using GrGLPathParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLenum pname, GrGLint value);
+using GrGLPathParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLenum pname, GrGLfloat value);
+using GrGLGenPathsFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLsizei range);
+using GrGLDeletePathsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLsizei range);
+using GrGLIsPathFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint path);
+using GrGLPathStencilFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilFillPathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLenum fillMode, GrGLuint mask);
+using GrGLStencilStrokePathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLint reference, GrGLuint mask);
+using GrGLStencilFillPathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum transformType, const GrGLfloat* transformValues);
+using GrGLStencilStrokePathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum transformType, const GrGLfloat* transformValues);
+using GrGLCoverFillPathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLenum coverMode);
+using GrGLCoverStrokePathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint name, GrGLenum coverMode);
+using GrGLCoverFillPathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues);
+using GrGLCoverStrokePathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues);
+// NV_path_rendering v1.2
+using GrGLStencilThenCoverFillPathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode);
+using GrGLStencilThenCoverStrokePathFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint path, GrGLint reference, GrGLuint mask, GrGLenum coverMode);
+using GrGLStencilThenCoverFillPathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues);
+using GrGLStencilThenCoverStrokePathInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid* paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues);
+// NV_path_rendering v1.3
+using GrGLProgramPathFragmentInputGenFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, GrGLenum genMode, GrGLint components, const GrGLfloat* coeffs);
+// CHROMIUM_path_rendering
+using GrGLBindFragmentInputLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, const GrGLchar* name);
+
+/* ARB_program_interface_query */
+using GrGLGetProgramResourceLocationFn = GrGLint GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum programInterface, const GrGLchar* name);
+
+/* GL_NV_framebuffer_mixed_samples */
+using GrGLCoverageModulationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum components);
+
+/* EXT_multi_draw_indirect */
+using GrGLMultiDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+using GrGLMultiDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+
+/* ARB_sync */
+using GrGLFenceSyncFn = GrGLsync GR_GL_FUNCTION_TYPE(GrGLenum condition, GrGLbitfield flags);
+using GrGLIsSyncFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLsync sync);
+using GrGLClientWaitSyncFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLWaitSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLDeleteSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync);
+
+/* ARB_internalformat_query */
+using GrGLGetInternalformativFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLenum pname, GrGLsizei bufSize, GrGLint* params);
+
+/* KHR_debug */
+using GrGLDebugMessageControlFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled);
+using GrGLDebugMessageInsertFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf);
+using GrGLDebugMessageCallbackFn = GrGLvoid GR_GL_FUNCTION_TYPE(GRGLDEBUGPROC callback, const GrGLvoid* userParam);
+using GrGLGetDebugMessageLogFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog);
+using GrGLPushDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar* message);
+using GrGLPopDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLObjectLabelFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar* label);
+
+/** EXT_window_rectangles */
+using GrGLWindowRectanglesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, const GrGLint box[]);
+
+/** GL_QCOM_tiled_rendering */
+using GrGLStartTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint x, GrGLuint y, GrGLuint width, GrGLuint height, GrGLbitfield preserveMask);
+using GrGLEndTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield preserveMask);
+
+/** EGL functions */
+using GrEGLQueryStringFn = const char* GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLint name);
+using GrEGLGetCurrentDisplayFn = GrEGLDisplay GR_GL_FUNCTION_TYPE();
+using GrEGLCreateImageFn = GrEGLImage GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLContext ctx, GrEGLenum target, GrEGLClientBuffer buffer, const GrEGLint* attrib_list);
+using GrEGLDestroyImageFn = GrEGLBoolean GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLImage image);
+} // extern "C"
+
+// This is a lighter-weight std::function, trying to reduce code size and compile time
+// by only supporting the exact use cases we require.
+template <typename T> class GrGLFunction;
+
+template <typename R, typename... Args>
+class GrGLFunction<R GR_GL_FUNCTION_TYPE(Args...)> {
+public:
+ using Fn = R GR_GL_FUNCTION_TYPE(Args...);
+ // Construct empty.
+ GrGLFunction() = default;
+ GrGLFunction(std::nullptr_t) {}
+
+ // Construct from a simple function pointer.
+ GrGLFunction(Fn* fn_ptr) {
+ static_assert(sizeof(fn_ptr) <= sizeof(fBuf), "fBuf is too small");
+ if (fn_ptr) {
+ memcpy(fBuf, &fn_ptr, sizeof(fn_ptr));
+ fCall = [](const void* buf, Args... args) {
+ return (*(Fn**)buf)(std::forward<Args>(args)...);
+ };
+ }
+ }
+
+ // Construct from a small closure.
+ template <typename Closure>
+ GrGLFunction(Closure closure) : GrGLFunction() {
+ static_assert(sizeof(Closure) <= sizeof(fBuf), "fBuf is too small");
+#if defined(__APPLE__) // I am having serious trouble getting these to work with all STLs...
+ static_assert(std::is_trivially_copyable<Closure>::value, "");
+ static_assert(std::is_trivially_destructible<Closure>::value, "");
+#endif
+
+ memcpy(fBuf, &closure, sizeof(closure));
+ fCall = [](const void* buf, Args... args) {
+ auto closure = (const Closure*)buf;
+ return (*closure)(args...);
+ };
+ }
+
+ R operator()(Args... args) const {
+ SkASSERT(fCall);
+ return fCall(fBuf, std::forward<Args>(args)...);
+ }
+
+ explicit operator bool() const { return fCall != nullptr; }
+
+ void reset() { fCall = nullptr; }
+
+private:
+ using Call = R(const void* buf, Args...);
+ Call* fCall = nullptr;
+ size_t fBuf[4];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
new file mode 100644
index 0000000000..99735b7c94
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLInterface_DEFINED
+#define GrGLInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef void(*GrGLFuncPtr)();
+struct GrGLInterface;
+
+
+/**
+ * Rather than depend on platform-specific GL headers and libraries, we require
+ * the client to provide a struct of GL function pointers. This struct can be
+ * specified per-GrContext as a parameter to GrContext::MakeGL. If no interface is
+ * passed to MakeGL then a default GL interface is created using GrGLMakeNativeInterface().
+ * If this returns nullptr then GrContext::MakeGL() will fail.
+ *
+ * The implementation of GrGLMakeNativeInterface is platform-specific. Several
+ * implementations have been provided (for GLX, WGL, EGL, etc), along with an
+ * implementation that simply returns nullptr. Clients should select the most
+ * appropriate one to build.
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeNativeInterface();
+// Deprecated alternative to GrGLMakeNativeInterface().
+SK_API const GrGLInterface* GrGLCreateNativeInterface();
+
+/**
+ * GrContext uses the following interface to make all calls into OpenGL. When a
+ * GrContext is created it is given a GrGLInterface. The interface's function
+ * pointers must be valid for the OpenGL context associated with the GrContext.
+ * On some platforms, such as Windows, function pointers for OpenGL extensions
+ * may vary between OpenGL contexts. So the caller must be careful to use a
+ * GrGLInterface initialized for the correct context. All functions that should
+ * be available based on the OpenGL's version and extension string must be
+ * non-NULL or GrContext creation will fail. This can be tested with the
+ * validate() method when the OpenGL context has been made current.
+ */
+struct SK_API GrGLInterface : public SkRefCnt {
+private:
+ typedef SkRefCnt INHERITED;
+
+public:
+ GrGLInterface();
+
+ // Validates that the GrGLInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for both the GL version and any advertised
+ // extensions.
+ bool validate() const;
+
+ // Indicates the type of GL implementation
+ union {
+ GrGLStandard fStandard;
+ GrGLStandard fBindingsExported; // Legacy name, will be remove when Chromium is updated.
+ };
+
+ GrGLExtensions fExtensions;
+
+ bool hasExtension(const char ext[]) const { return fExtensions.has(ext); }
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ GrGLFunction<GrGLActiveTextureFn> fActiveTexture;
+ GrGLFunction<GrGLAttachShaderFn> fAttachShader;
+ GrGLFunction<GrGLBeginQueryFn> fBeginQuery;
+ GrGLFunction<GrGLBindAttribLocationFn> fBindAttribLocation;
+ GrGLFunction<GrGLBindBufferFn> fBindBuffer;
+ GrGLFunction<GrGLBindFragDataLocationFn> fBindFragDataLocation;
+ GrGLFunction<GrGLBindFragDataLocationIndexedFn> fBindFragDataLocationIndexed;
+ GrGLFunction<GrGLBindFramebufferFn> fBindFramebuffer;
+ GrGLFunction<GrGLBindRenderbufferFn> fBindRenderbuffer;
+ GrGLFunction<GrGLBindSamplerFn> fBindSampler;
+ GrGLFunction<GrGLBindTextureFn> fBindTexture;
+ GrGLFunction<GrGLBindVertexArrayFn> fBindVertexArray;
+ GrGLFunction<GrGLBlendBarrierFn> fBlendBarrier;
+ GrGLFunction<GrGLBlendColorFn> fBlendColor;
+ GrGLFunction<GrGLBlendEquationFn> fBlendEquation;
+ GrGLFunction<GrGLBlendFuncFn> fBlendFunc;
+ GrGLFunction<GrGLBlitFramebufferFn> fBlitFramebuffer;
+ GrGLFunction<GrGLBufferDataFn> fBufferData;
+ GrGLFunction<GrGLBufferSubDataFn> fBufferSubData;
+ GrGLFunction<GrGLCheckFramebufferStatusFn> fCheckFramebufferStatus;
+ GrGLFunction<GrGLClearFn> fClear;
+ GrGLFunction<GrGLClearColorFn> fClearColor;
+ GrGLFunction<GrGLClearStencilFn> fClearStencil;
+ GrGLFunction<GrGLClearTexImageFn> fClearTexImage;
+ GrGLFunction<GrGLClearTexSubImageFn> fClearTexSubImage;
+ GrGLFunction<GrGLColorMaskFn> fColorMask;
+ GrGLFunction<GrGLCompileShaderFn> fCompileShader;
+ GrGLFunction<GrGLCompressedTexImage2DFn> fCompressedTexImage2D;
+ GrGLFunction<GrGLCompressedTexSubImage2DFn> fCompressedTexSubImage2D;
+ GrGLFunction<GrGLCopyTexSubImage2DFn> fCopyTexSubImage2D;
+ GrGLFunction<GrGLCreateProgramFn> fCreateProgram;
+ GrGLFunction<GrGLCreateShaderFn> fCreateShader;
+ GrGLFunction<GrGLCullFaceFn> fCullFace;
+ GrGLFunction<GrGLDeleteBuffersFn> fDeleteBuffers;
+ GrGLFunction<GrGLDeleteFramebuffersFn> fDeleteFramebuffers;
+ GrGLFunction<GrGLDeleteProgramFn> fDeleteProgram;
+ GrGLFunction<GrGLDeleteQueriesFn> fDeleteQueries;
+ GrGLFunction<GrGLDeleteRenderbuffersFn> fDeleteRenderbuffers;
+ GrGLFunction<GrGLDeleteSamplersFn> fDeleteSamplers;
+ GrGLFunction<GrGLDeleteShaderFn> fDeleteShader;
+ GrGLFunction<GrGLDeleteTexturesFn> fDeleteTextures;
+ GrGLFunction<GrGLDeleteVertexArraysFn> fDeleteVertexArrays;
+ GrGLFunction<GrGLDepthMaskFn> fDepthMask;
+ GrGLFunction<GrGLDisableFn> fDisable;
+ GrGLFunction<GrGLDisableVertexAttribArrayFn> fDisableVertexAttribArray;
+ GrGLFunction<GrGLDrawArraysFn> fDrawArrays;
+ GrGLFunction<GrGLDrawArraysIndirectFn> fDrawArraysIndirect;
+ GrGLFunction<GrGLDrawArraysInstancedFn> fDrawArraysInstanced;
+ GrGLFunction<GrGLDrawBufferFn> fDrawBuffer;
+ GrGLFunction<GrGLDrawBuffersFn> fDrawBuffers;
+ GrGLFunction<GrGLDrawElementsFn> fDrawElements;
+ GrGLFunction<GrGLDrawElementsIndirectFn> fDrawElementsIndirect;
+ GrGLFunction<GrGLDrawElementsInstancedFn> fDrawElementsInstanced;
+ GrGLFunction<GrGLDrawRangeElementsFn> fDrawRangeElements;
+ GrGLFunction<GrGLEnableFn> fEnable;
+ GrGLFunction<GrGLEnableVertexAttribArrayFn> fEnableVertexAttribArray;
+ GrGLFunction<GrGLEndQueryFn> fEndQuery;
+ GrGLFunction<GrGLFinishFn> fFinish;
+ GrGLFunction<GrGLFlushFn> fFlush;
+ GrGLFunction<GrGLFlushMappedBufferRangeFn> fFlushMappedBufferRange;
+ GrGLFunction<GrGLFramebufferRenderbufferFn> fFramebufferRenderbuffer;
+ GrGLFunction<GrGLFramebufferTexture2DFn> fFramebufferTexture2D;
+ GrGLFunction<GrGLFramebufferTexture2DMultisampleFn> fFramebufferTexture2DMultisample;
+ GrGLFunction<GrGLFrontFaceFn> fFrontFace;
+ GrGLFunction<GrGLGenBuffersFn> fGenBuffers;
+ GrGLFunction<GrGLGenFramebuffersFn> fGenFramebuffers;
+ GrGLFunction<GrGLGenerateMipmapFn> fGenerateMipmap;
+ GrGLFunction<GrGLGenQueriesFn> fGenQueries;
+ GrGLFunction<GrGLGenRenderbuffersFn> fGenRenderbuffers;
+ GrGLFunction<GrGLGenSamplersFn> fGenSamplers;
+ GrGLFunction<GrGLGenTexturesFn> fGenTextures;
+ GrGLFunction<GrGLGenVertexArraysFn> fGenVertexArrays;
+ GrGLFunction<GrGLGetBufferParameterivFn> fGetBufferParameteriv;
+ GrGLFunction<GrGLGetErrorFn> fGetError;
+ GrGLFunction<GrGLGetFramebufferAttachmentParameterivFn> fGetFramebufferAttachmentParameteriv;
+ GrGLFunction<GrGLGetIntegervFn> fGetIntegerv;
+ GrGLFunction<GrGLGetMultisamplefvFn> fGetMultisamplefv;
+ GrGLFunction<GrGLGetProgramBinaryFn> fGetProgramBinary;
+ GrGLFunction<GrGLGetProgramInfoLogFn> fGetProgramInfoLog;
+ GrGLFunction<GrGLGetProgramivFn> fGetProgramiv;
+ GrGLFunction<GrGLGetQueryObjecti64vFn> fGetQueryObjecti64v;
+ GrGLFunction<GrGLGetQueryObjectivFn> fGetQueryObjectiv;
+ GrGLFunction<GrGLGetQueryObjectui64vFn> fGetQueryObjectui64v;
+ GrGLFunction<GrGLGetQueryObjectuivFn> fGetQueryObjectuiv;
+ GrGLFunction<GrGLGetQueryivFn> fGetQueryiv;
+ GrGLFunction<GrGLGetRenderbufferParameterivFn> fGetRenderbufferParameteriv;
+ GrGLFunction<GrGLGetShaderInfoLogFn> fGetShaderInfoLog;
+ GrGLFunction<GrGLGetShaderivFn> fGetShaderiv;
+ GrGLFunction<GrGLGetShaderPrecisionFormatFn> fGetShaderPrecisionFormat;
+ GrGLFunction<GrGLGetStringFn> fGetString;
+ GrGLFunction<GrGLGetStringiFn> fGetStringi;
+ GrGLFunction<GrGLGetTexLevelParameterivFn> fGetTexLevelParameteriv;
+ GrGLFunction<GrGLGetUniformLocationFn> fGetUniformLocation;
+ GrGLFunction<GrGLInsertEventMarkerFn> fInsertEventMarker;
+ GrGLFunction<GrGLInvalidateBufferDataFn> fInvalidateBufferData;
+ GrGLFunction<GrGLInvalidateBufferSubDataFn> fInvalidateBufferSubData;
+ GrGLFunction<GrGLInvalidateFramebufferFn> fInvalidateFramebuffer;
+ GrGLFunction<GrGLInvalidateSubFramebufferFn> fInvalidateSubFramebuffer;
+ GrGLFunction<GrGLInvalidateTexImageFn> fInvalidateTexImage;
+ GrGLFunction<GrGLInvalidateTexSubImageFn> fInvalidateTexSubImage;
+ GrGLFunction<GrGLIsTextureFn> fIsTexture;
+ GrGLFunction<GrGLLineWidthFn> fLineWidth;
+ GrGLFunction<GrGLLinkProgramFn> fLinkProgram;
+ GrGLFunction<GrGLProgramBinaryFn> fProgramBinary;
+ GrGLFunction<GrGLProgramParameteriFn> fProgramParameteri;
+ GrGLFunction<GrGLMapBufferFn> fMapBuffer;
+ GrGLFunction<GrGLMapBufferRangeFn> fMapBufferRange;
+ GrGLFunction<GrGLMapBufferSubDataFn> fMapBufferSubData;
+ GrGLFunction<GrGLMapTexSubImage2DFn> fMapTexSubImage2D;
+ GrGLFunction<GrGLMultiDrawArraysIndirectFn> fMultiDrawArraysIndirect;
+ GrGLFunction<GrGLMultiDrawElementsIndirectFn> fMultiDrawElementsIndirect;
+ GrGLFunction<GrGLPixelStoreiFn> fPixelStorei;
+ GrGLFunction<GrGLPolygonModeFn> fPolygonMode;
+ GrGLFunction<GrGLPopGroupMarkerFn> fPopGroupMarker;
+ GrGLFunction<GrGLPushGroupMarkerFn> fPushGroupMarker;
+ GrGLFunction<GrGLQueryCounterFn> fQueryCounter;
+ GrGLFunction<GrGLReadBufferFn> fReadBuffer;
+ GrGLFunction<GrGLReadPixelsFn> fReadPixels;
+ GrGLFunction<GrGLRenderbufferStorageFn> fRenderbufferStorage;
+
+ // On OpenGL ES there are multiple incompatible extensions that add support for MSAA
+ // and ES3 adds MSAA support to the standard. On an ES3 driver we may still use the
+ // older extensions for performance reasons or due to ES3 driver bugs. We want the function
+ // that creates the GrGLInterface to provide all available functions and internally
+ // we will select among them. They all have a method called glRenderbufferStorageMultisample*.
+ // So we have separate function pointers for GL_IMG/EXT_multisampled_to_texture,
+ // GL_CHROMIUM/ANGLE_framebuffer_multisample/ES3, and GL_APPLE_framebuffer_multisample
+ // variations.
+ //
+ // If a driver supports multiple GL_ARB_framebuffer_multisample-style extensions then we will
+ // assume the function pointers for the standard (or equivalent GL_ARB) version have
+ // been preferred over GL_EXT, GL_CHROMIUM, or GL_ANGLE variations that have reduced
+ // functionality.
+
+ // GL_EXT_multisampled_render_to_texture (preferred) or GL_IMG_multisampled_render_to_texture
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2EXT;
+ // GL_APPLE_framebuffer_multisample
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2APPLE;
+
+ // This is used to store the pointer for GL_ARB/EXT/ANGLE/CHROMIUM_framebuffer_multisample or
+ // the standard function in ES3+ or GL 3.0+.
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisample;
+
+ // Pointer to BindUniformLocationCHROMIUM from the GL_CHROMIUM_bind_uniform_location extension.
+ GrGLFunction<GrGLBindUniformLocationFn> fBindUniformLocation;
+
+ GrGLFunction<GrGLResolveMultisampleFramebufferFn> fResolveMultisampleFramebuffer;
+ GrGLFunction<GrGLSamplerParameteriFn> fSamplerParameteri;
+ GrGLFunction<GrGLSamplerParameterivFn> fSamplerParameteriv;
+ GrGLFunction<GrGLScissorFn> fScissor;
+ GrGLFunction<GrGLShaderSourceFn> fShaderSource;
+ GrGLFunction<GrGLStencilFuncFn> fStencilFunc;
+ GrGLFunction<GrGLStencilFuncSeparateFn> fStencilFuncSeparate;
+ GrGLFunction<GrGLStencilMaskFn> fStencilMask;
+ GrGLFunction<GrGLStencilMaskSeparateFn> fStencilMaskSeparate;
+ GrGLFunction<GrGLStencilOpFn> fStencilOp;
+ GrGLFunction<GrGLStencilOpSeparateFn> fStencilOpSeparate;
+ GrGLFunction<GrGLTexBufferFn> fTexBuffer;
+ GrGLFunction<GrGLTexBufferRangeFn> fTexBufferRange;
+ GrGLFunction<GrGLTexImage2DFn> fTexImage2D;
+ GrGLFunction<GrGLTexParameterfFn> fTexParameterf;
+ GrGLFunction<GrGLTexParameterfvFn> fTexParameterfv;
+ GrGLFunction<GrGLTexParameteriFn> fTexParameteri;
+ GrGLFunction<GrGLTexParameterivFn> fTexParameteriv;
+ GrGLFunction<GrGLTexSubImage2DFn> fTexSubImage2D;
+ GrGLFunction<GrGLTexStorage2DFn> fTexStorage2D;
+ GrGLFunction<GrGLTextureBarrierFn> fTextureBarrier;
+ GrGLFunction<GrGLDiscardFramebufferFn> fDiscardFramebuffer;
+ GrGLFunction<GrGLUniform1fFn> fUniform1f;
+ GrGLFunction<GrGLUniform1iFn> fUniform1i;
+ GrGLFunction<GrGLUniform1fvFn> fUniform1fv;
+ GrGLFunction<GrGLUniform1ivFn> fUniform1iv;
+ GrGLFunction<GrGLUniform2fFn> fUniform2f;
+ GrGLFunction<GrGLUniform2iFn> fUniform2i;
+ GrGLFunction<GrGLUniform2fvFn> fUniform2fv;
+ GrGLFunction<GrGLUniform2ivFn> fUniform2iv;
+ GrGLFunction<GrGLUniform3fFn> fUniform3f;
+ GrGLFunction<GrGLUniform3iFn> fUniform3i;
+ GrGLFunction<GrGLUniform3fvFn> fUniform3fv;
+ GrGLFunction<GrGLUniform3ivFn> fUniform3iv;
+ GrGLFunction<GrGLUniform4fFn> fUniform4f;
+ GrGLFunction<GrGLUniform4iFn> fUniform4i;
+ GrGLFunction<GrGLUniform4fvFn> fUniform4fv;
+ GrGLFunction<GrGLUniform4ivFn> fUniform4iv;
+ GrGLFunction<GrGLUniformMatrix2fvFn> fUniformMatrix2fv;
+ GrGLFunction<GrGLUniformMatrix3fvFn> fUniformMatrix3fv;
+ GrGLFunction<GrGLUniformMatrix4fvFn> fUniformMatrix4fv;
+ GrGLFunction<GrGLUnmapBufferFn> fUnmapBuffer;
+ GrGLFunction<GrGLUnmapBufferSubDataFn> fUnmapBufferSubData;
+ GrGLFunction<GrGLUnmapTexSubImage2DFn> fUnmapTexSubImage2D;
+ GrGLFunction<GrGLUseProgramFn> fUseProgram;
+ GrGLFunction<GrGLVertexAttrib1fFn> fVertexAttrib1f;
+ GrGLFunction<GrGLVertexAttrib2fvFn> fVertexAttrib2fv;
+ GrGLFunction<GrGLVertexAttrib3fvFn> fVertexAttrib3fv;
+ GrGLFunction<GrGLVertexAttrib4fvFn> fVertexAttrib4fv;
+ GrGLFunction<GrGLVertexAttribDivisorFn> fVertexAttribDivisor;
+ GrGLFunction<GrGLVertexAttribIPointerFn> fVertexAttribIPointer;
+ GrGLFunction<GrGLVertexAttribPointerFn> fVertexAttribPointer;
+ GrGLFunction<GrGLViewportFn> fViewport;
+
+ /* GL_NV_path_rendering */
+ GrGLFunction<GrGLMatrixLoadfFn> fMatrixLoadf;
+ GrGLFunction<GrGLMatrixLoadIdentityFn> fMatrixLoadIdentity;
+ GrGLFunction<GrGLGetProgramResourceLocationFn> fGetProgramResourceLocation;
+ GrGLFunction<GrGLPathCommandsFn> fPathCommands;
+ GrGLFunction<GrGLPathParameteriFn> fPathParameteri;
+ GrGLFunction<GrGLPathParameterfFn> fPathParameterf;
+ GrGLFunction<GrGLGenPathsFn> fGenPaths;
+ GrGLFunction<GrGLDeletePathsFn> fDeletePaths;
+ GrGLFunction<GrGLIsPathFn> fIsPath;
+ GrGLFunction<GrGLPathStencilFuncFn> fPathStencilFunc;
+ GrGLFunction<GrGLStencilFillPathFn> fStencilFillPath;
+ GrGLFunction<GrGLStencilStrokePathFn> fStencilStrokePath;
+ GrGLFunction<GrGLStencilFillPathInstancedFn> fStencilFillPathInstanced;
+ GrGLFunction<GrGLStencilStrokePathInstancedFn> fStencilStrokePathInstanced;
+ GrGLFunction<GrGLCoverFillPathFn> fCoverFillPath;
+ GrGLFunction<GrGLCoverStrokePathFn> fCoverStrokePath;
+ GrGLFunction<GrGLCoverFillPathInstancedFn> fCoverFillPathInstanced;
+ GrGLFunction<GrGLCoverStrokePathInstancedFn> fCoverStrokePathInstanced;
+ // NV_path_rendering v1.2
+ GrGLFunction<GrGLStencilThenCoverFillPathFn> fStencilThenCoverFillPath;
+ GrGLFunction<GrGLStencilThenCoverStrokePathFn> fStencilThenCoverStrokePath;
+ GrGLFunction<GrGLStencilThenCoverFillPathInstancedFn> fStencilThenCoverFillPathInstanced;
+ GrGLFunction<GrGLStencilThenCoverStrokePathInstancedFn> fStencilThenCoverStrokePathInstanced;
+ // NV_path_rendering v1.3
+ GrGLFunction<GrGLProgramPathFragmentInputGenFn> fProgramPathFragmentInputGen;
+ // CHROMIUM_path_rendering
+ GrGLFunction<GrGLBindFragmentInputLocationFn> fBindFragmentInputLocation;
+
+ /* NV_framebuffer_mixed_samples */
+ GrGLFunction<GrGLCoverageModulationFn> fCoverageModulation;
+
+ /* ARB_sync */
+ GrGLFunction<GrGLFenceSyncFn> fFenceSync;
+ GrGLFunction<GrGLIsSyncFn> fIsSync;
+ GrGLFunction<GrGLClientWaitSyncFn> fClientWaitSync;
+ GrGLFunction<GrGLWaitSyncFn> fWaitSync;
+ GrGLFunction<GrGLDeleteSyncFn> fDeleteSync;
+
+ /* ARB_internalforamt_query */
+ GrGLFunction<GrGLGetInternalformativFn> fGetInternalformativ;
+
+ /* KHR_debug */
+ GrGLFunction<GrGLDebugMessageControlFn> fDebugMessageControl;
+ GrGLFunction<GrGLDebugMessageInsertFn> fDebugMessageInsert;
+ GrGLFunction<GrGLDebugMessageCallbackFn> fDebugMessageCallback;
+ GrGLFunction<GrGLGetDebugMessageLogFn> fGetDebugMessageLog;
+ GrGLFunction<GrGLPushDebugGroupFn> fPushDebugGroup;
+ GrGLFunction<GrGLPopDebugGroupFn> fPopDebugGroup;
+ GrGLFunction<GrGLObjectLabelFn> fObjectLabel;
+
+ /* EXT_window_rectangles */
+ GrGLFunction<GrGLWindowRectanglesFn> fWindowRectangles;
+
+ /* GL_QCOM_tiled_rendering */
+ GrGLFunction<GrGLStartTilingFn> fStartTiling;
+ GrGLFunction<GrGLEndTilingFn> fEndTiling;
+ } fFunctions;
+
+#if GR_TEST_UTILS
+ // This exists for internal testing.
+ virtual void abandon() const;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLTypes.h b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
new file mode 100644
index 0000000000..1dba115afe
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
@@ -0,0 +1,186 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLTypes_DEFINED
+#define GrGLTypes_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLConfig.h"
+
+/**
+ * Classifies GL contexts by which standard they implement (currently as OpenGL vs. OpenGL ES).
+ */
+enum GrGLStandard {
+ kNone_GrGLStandard,
+ kGL_GrGLStandard,
+ kGLES_GrGLStandard,
+ kWebGL_GrGLStandard,
+};
+static const int kGrGLStandardCnt = 4;
+
+// The following allow certain interfaces to be turned off at compile time
+// (for example, to lower code size).
+#if SK_ASSUME_GL_ES
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) true
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_GL
+ #define GR_IS_GR_GL(standard) true
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_WEBGL
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) true
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_GL_INTERFACE 1
+#else
+ #define GR_IS_GR_GL(standard) (kGL_GrGLStandard == standard)
+ #define GR_IS_GR_GL_ES(standard) (kGLES_GrGLStandard == standard)
+ #define GR_IS_GR_WEBGL(standard) (kWebGL_GrGLStandard == standard)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The supported GL formats represented as an enum. Actual support by GrContext depends on GL
+ * context version and extensions.
+ */
+enum class GrGLFormat {
+ kUnknown,
+
+ kRGBA8,
+ kR8,
+ kALPHA8,
+ kLUMINANCE8,
+ kBGRA8,
+ kRGB565,
+ kRGBA16F,
+ kR16F,
+ kRGB8,
+ kRG8,
+ kRGB10_A2,
+ kRGBA4,
+ kSRGB8_ALPHA8,
+ kCOMPRESSED_RGB8_ETC2,
+ kCOMPRESSED_ETC1_RGB8,
+ kR16,
+ kRG16,
+ kRGBA16,
+ kRG16F,
+ kLUMINANCE16F,
+
+ kLast = kLUMINANCE16F
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Declares typedefs for all the GL functions used in GrGLInterface
+ */
+
+typedef unsigned int GrGLenum;
+typedef unsigned char GrGLboolean;
+typedef unsigned int GrGLbitfield;
+typedef signed char GrGLbyte;
+typedef char GrGLchar;
+typedef short GrGLshort;
+typedef int GrGLint;
+typedef int GrGLsizei;
+typedef int64_t GrGLint64;
+typedef unsigned char GrGLubyte;
+typedef unsigned short GrGLushort;
+typedef unsigned int GrGLuint;
+typedef uint64_t GrGLuint64;
+typedef unsigned short int GrGLhalf;
+typedef float GrGLfloat;
+typedef float GrGLclampf;
+typedef double GrGLdouble;
+typedef double GrGLclampd;
+typedef void GrGLvoid;
+#ifdef _WIN64
+typedef signed long long int GrGLintptr;
+typedef signed long long int GrGLsizeiptr;
+#else
+typedef signed long int GrGLintptr;
+typedef signed long int GrGLsizeiptr;
+#endif
+typedef void* GrGLeglImage;
+typedef struct __GLsync* GrGLsync;
+
+struct GrGLDrawArraysIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirst;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+GR_STATIC_ASSERT(16 == sizeof(GrGLDrawArraysIndirectCommand));
+
+struct GrGLDrawElementsIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirstIndex;
+ GrGLuint fBaseVertex;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+GR_STATIC_ASSERT(20 == sizeof(GrGLDrawElementsIndirectCommand));
+
+/**
+ * KHR_debug
+ */
+typedef void (GR_GL_FUNCTION_TYPE* GRGLDEBUGPROC)(GrGLenum source,
+ GrGLenum type,
+ GrGLuint id,
+ GrGLenum severity,
+ GrGLsizei length,
+ const GrGLchar* message,
+ const void* userParam);
+
+/**
+ * EGL types.
+ */
+typedef void* GrEGLImage;
+typedef void* GrEGLDisplay;
+typedef void* GrEGLContext;
+typedef void* GrEGLClientBuffer;
+typedef unsigned int GrEGLenum;
+typedef int32_t GrEGLint;
+typedef unsigned int GrEGLBoolean;
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Types for interacting with GL resources created externally to Skia. GrBackendObjects for GL
+ * textures are really const GrGLTexture*. The fFormat here should be a sized, internal format
+ * for the texture. We will try to use the sized format if the GL Context supports it, otherwise
+ * we will internally fall back to using the base internal formats.
+ */
+struct GrGLTextureInfo {
+ GrGLenum fTarget;
+ GrGLuint fID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLTextureInfo& that) const {
+ return fTarget == that.fTarget && fID == that.fID && fFormat == that.fFormat;
+ }
+};
+
+struct GrGLFramebufferInfo {
+ GrGLuint fFBOID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLFramebufferInfo& that) const {
+ return fFBOID == that.fFBOID && fFormat == that.fFormat;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mock/GrMockTypes.h b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
new file mode 100644
index 0000000000..335b48411d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockOptions_DEFINED
+#define GrMockOptions_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrBackendFormat;
+
+struct GrMockTextureInfo {
+ GrMockTextureInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fID(0) {}
+
+ GrMockTextureInfo(GrColorType colorType, int id)
+ : fColorType(colorType)
+ , fID(id) {
+ SkASSERT(fID);
+ }
+
+ bool operator==(const GrMockTextureInfo& that) const {
+ return fColorType == that.fColorType &&
+ fID == that.fID;
+ }
+
+ GrPixelConfig pixelConfig() const {
+ return GrColorTypeToPixelConfig(fColorType);
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ GrColorType fColorType;
+ int fID;
+};
+
+struct GrMockRenderTargetInfo {
+ GrMockRenderTargetInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fID(0) {}
+
+ GrMockRenderTargetInfo(GrColorType colorType, int id)
+ : fColorType(colorType)
+ , fID(id) {
+ SkASSERT(fID);
+ }
+
+ bool operator==(const GrMockRenderTargetInfo& that) const {
+ return fColorType == that.fColorType &&
+ fID == that.fID;
+ }
+
+ GrPixelConfig pixelConfig() const {
+ return GrColorTypeToPixelConfig(fColorType);
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ GrColorType colorType() const { return fColorType; }
+
+private:
+ GrColorType fColorType;
+ int fID;
+};
+
+/**
+ * A pointer to this type is used as the GrBackendContext when creating a Mock GrContext. It can be
+ * used to specify capability options for the mock context. If nullptr is used a default constructed
+ * GrMockOptions is used.
+ */
+struct GrMockOptions {
+ GrMockOptions() {
+ using Renderability = ConfigOptions::Renderability;
+ // By default RGBA_8888 and BGRA_8888 are textureable and renderable and
+ // A8 and RGB565 are texturable.
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fRenderability = Renderability::kNonMSAA;
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kAlpha_8].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kBGR_565].fTexturable = true;
+
+ fConfigOptions[(int)GrColorType::kBGRA_8888] = fConfigOptions[(int)GrColorType::kRGBA_8888];
+ }
+
+ struct ConfigOptions {
+ enum Renderability { kNo, kNonMSAA, kMSAA };
+ Renderability fRenderability = kNo;
+ bool fTexturable = false;
+ };
+
+ // GrCaps options.
+ bool fMipMapSupport = false;
+ bool fInstanceAttribSupport = false;
+ bool fHalfFloatVertexAttributeSupport = false;
+ uint32_t fMapBufferFlags = 0;
+ int fMaxTextureSize = 2048;
+ int fMaxRenderTargetSize = 2048;
+ int fMaxVertexAttributes = 16;
+ ConfigOptions fConfigOptions[kGrColorTypeCnt];
+
+ // GrShaderCaps options.
+ bool fGeometryShaderSupport = false;
+ bool fIntegerSupport = false;
+ bool fFlatInterpolationSupport = false;
+ int fMaxVertexSamplers = 0;
+ int fMaxFragmentSamplers = 8;
+ bool fShaderDerivativeSupport = true;
+ bool fDualSourceBlendingSupport = false;
+
+ // GrMockGpu options.
+ bool fFailTextureAllocations = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
new file mode 100644
index 0000000000..773ff9dc75
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTypes_DEFINED
+#define GrMtlTypes_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/ports/SkCFObject.h"
+
+/**
+ * Declares typedefs for Metal types used in Ganesh cpp code
+ */
+typedef unsigned int GrMTLPixelFormat;
+typedef const void* GrMTLHandle;
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_METAL
+/**
+ * Types for interacting with Metal resources created externally to Skia.
+ * This is used by GrBackendObjects.
+ */
+struct GrMtlTextureInfo {
+public:
+ GrMtlTextureInfo() {}
+
+ sk_cf_obj<const void*> fTexture;
+
+ bool operator==(const GrMtlTextureInfo& that) const {
+ return fTexture == that.fTexture;
+ }
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
new file mode 100644
index 0000000000..a4fd336ff7
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBackendContext_DEFINED
+#define GrVkBackendContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkMemoryAllocator.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+class GrVkExtensions;
+
+enum GrVkExtensionFlags {
+ kEXT_debug_report_GrVkExtensionFlag = 0x0001,
+ kNV_glsl_shader_GrVkExtensionFlag = 0x0002,
+ kKHR_surface_GrVkExtensionFlag = 0x0004,
+ kKHR_swapchain_GrVkExtensionFlag = 0x0008,
+ kKHR_win32_surface_GrVkExtensionFlag = 0x0010,
+ kKHR_android_surface_GrVkExtensionFlag = 0x0020,
+ kKHR_xcb_surface_GrVkExtensionFlag = 0x0040,
+};
+
+enum GrVkFeatureFlags {
+ kGeometryShader_GrVkFeatureFlag = 0x0001,
+ kDualSrcBlend_GrVkFeatureFlag = 0x0002,
+ kSampleRateShading_GrVkFeatureFlag = 0x0004,
+};
+
+// It is not guarenteed VkPhysicalDeviceProperties2 will be in the client's header so we forward
+// declare it here to be safe.
+struct VkPhysicalDeviceFeatures2;
+
+// The BackendContext contains all of the base Vulkan objects needed by the GrVkGpu. The assumption
+// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice
+// created must support at least one graphics queue, which is passed in as well.
+// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool
+// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created
+// in or transitioned to that family. The refs held by members of this struct must be released
+// (either by deleting the struct or manually releasing the refs) before the underlying vulkan
+// device and instance are destroyed.
+struct SK_API GrVkBackendContext {
+ VkInstance fInstance;
+ VkPhysicalDevice fPhysicalDevice;
+ VkDevice fDevice;
+ VkQueue fQueue;
+ uint32_t fGraphicsQueueIndex;
+ uint32_t fMinAPIVersion; // Deprecated. Set fInstanceVersion instead.
+ uint32_t fInstanceVersion = 0; // Deprecated. Set fMaxApiVersion instead
+ // The max api version set here should match the value set in VkApplicationInfo::apiVersion when
+ // then VkInstance was created.
+ uint32_t fMaxAPIVersion = 0;
+ uint32_t fExtensions = 0; // Deprecated. Use fVkExtensions instead.
+ const GrVkExtensions* fVkExtensions = nullptr;
+ uint32_t fFeatures; // Deprecated. Use fDeviceFeatures[2] instead.
+ // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or
+ // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The
+ // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension
+ // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both
+ // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled.
+ const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr;
+ const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr;
+ sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
+ GrVkGetProc fGetProc = nullptr;
+ // This is deprecated and should be set to false. The client is responsible for managing the
+ // lifetime of the VkInstance and VkDevice objects.
+ bool fOwnsInstanceAndDevice = false;
+ // Indicates that we are working with protected content and all CommandPool and Queue operations
+ // should be done in a protected context.
+ GrProtected fProtectedContext = GrProtected::kNo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
new file mode 100644
index 0000000000..6b14c47e71
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkExtensions_DEFINED
+#define GrVkExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkTArray.h"
+
+/**
+ * Helper class that eats in an array of extensions strings for instance and device and allows for
+ * quicker querying if an extension is present.
+ */
+class SK_API GrVkExtensions {
+public:
+ GrVkExtensions() {}
+
+ void init(GrVkGetProc, VkInstance, VkPhysicalDevice,
+ uint32_t instanceExtensionCount, const char* const* instanceExtensions,
+ uint32_t deviceExtensionCount, const char* const* deviceExtensions);
+
+ bool hasExtension(const char[], uint32_t minVersion) const;
+
+ struct Info {
+ Info() {}
+ Info(const char* name) : fName(name), fSpecVersion(0) {}
+
+ SkString fName;
+ uint32_t fSpecVersion;
+
+ struct Less {
+ bool operator() (const Info& a, const SkString& b) {
+ return strcmp(a.fName.c_str(), b.c_str()) < 0;
+ }
+ bool operator() (const SkString& a, const GrVkExtensions::Info& b) {
+ return strcmp(a.c_str(), b.fName.c_str()) < 0;
+ }
+ };
+ };
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("**Vulkan Extensions**\n");
+ for (int i = 0; i < fExtensions.count(); ++i) {
+ SkDebugf("%s. Version: %d\n",
+ fExtensions[i].fName.c_str(), fExtensions[i].fSpecVersion);
+ }
+ SkDebugf("**End Vulkan Extensions**\n");
+ }
+#endif
+
+private:
+ void getSpecVersions(GrVkGetProc getProc, VkInstance, VkPhysicalDevice);
+
+ SkTArray<Info> fExtensions;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
new file mode 100644
index 0000000000..5102496b83
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkMemoryAllocator_DEFINED
+#define GrVkMemoryAllocator_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+class GrVkMemoryAllocator : public SkRefCnt {
+public:
+ enum class AllocationPropertyFlags {
+ kNone = 0,
+ // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger
+ // block.
+ kDedicatedAllocation = 0x1,
+ // Says that the backing memory can only be accessed by the device. Additionally the device
+ // may lazily allocate the memory. This cannot be used with buffers that will be host
+ // visible. Setting this flag does not guarantee that we will allocate memory that respects
+ // it, but we will try to prefer memory that can respect it.
+ kLazyAllocation = 0x2,
+ // The allocation will be mapped immediately and stay mapped until it is destroyed. This
+ // flag is only valid for buffers which are host visible (i.e. must have a usage other than
+ // BufferUsage::kGpuOnly).
+ kPersistentlyMapped = 0x4,
+ // Allocation can only be accessed by the device using a protected context.
+ kProtected = 0x8,
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(AllocationPropertyFlags);
+
+ enum class BufferUsage {
+ // Buffers that will only be accessed from the device (large const buffers). Will always be
+ // in device local memory.
+ kGpuOnly,
+ // Buffers that will be accessed on the host and copied to and from a GPU resource (transfer
+ // buffers). Will always be mappable and coherent memory.
+ kCpuOnly,
+ // Buffers that typically will be updated multiple times by the host and read on the gpu
+ // (e.g. uniform or vertex buffers). Will always be mappable memory, and will prefer to be
+ // in device local memory.
+ kCpuWritesGpuReads,
+ // Buffers which are typically writted to by the GPU and then read on the host. Will always
+ // be mappable memory, and will prefer coherent and cached memory.
+ kGpuWritesCpuReads,
+ };
+
+ virtual bool allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
+ GrVkBackendMemory*) = 0;
+
+ virtual bool allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
+ AllocationPropertyFlags flags, GrVkBackendMemory*) = 0;
+
+ // Fills out the passed in GrVkAlloc struct for the passed in GrVkBackendMemory.
+ virtual void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const = 0;
+
+ // Maps the entire allocation and returns a pointer to the start of the allocation. The
+ // implementation may map more memory than just the allocation, but the returned pointer must
+ // point at the start of the memory for the requested allocation.
+ virtual void* mapMemory(const GrVkBackendMemory&) = 0;
+ virtual void unmapMemory(const GrVkBackendMemory&) = 0;
+
+ // The following two calls are used for managing non-coherent memory. The offset is relative to
+ // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client
+ // must make sure that the offset + size passed in is less that or equal to the allocation size.
+ // It is the responsibility of the implementation to make sure all alignment requirements are
+ // followed. The client should not have to deal with any sort of alignment issues.
+ virtual void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size) = 0;
+ virtual void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size)= 0;
+
+ virtual void freeMemory(const GrVkBackendMemory&) = 0;
+
+ // Returns the total amount of memory that is allocated and in use by an allocation for this
+ // allocator.
+ virtual uint64_t totalUsedMemory() const = 0;
+
+ // Returns the total amount of memory that is allocated by this allocator.
+ virtual uint64_t totalAllocatedMemory() const = 0;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrVkMemoryAllocator::AllocationPropertyFlags)
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkTypes.h b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
new file mode 100644
index 0000000000..9abf14d8d9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
@@ -0,0 +1,256 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypes_DEFINED
+#define GrVkTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/gpu/vk/GrVkVulkan.h"
+
+#ifndef VK_VERSION_1_1
+#error Skia requires the use of Vulkan 1.1 headers
+#endif
+
+#include <functional>
+#include "include/gpu/GrTypes.h"
+
+typedef intptr_t GrVkBackendMemory;
+
+/**
+ * Types for interacting with Vulkan resources created externally to Skia. GrBackendObjects for
+ * Vulkan textures are really const GrVkImageInfo*
+ */
+struct GrVkAlloc {
+ GrVkAlloc()
+ : fMemory(VK_NULL_HANDLE)
+ , fOffset(0)
+ , fSize(0)
+ , fFlags(0)
+ , fBackendMemory(0)
+ , fUsesSystemHeap(false) {}
+
+ GrVkAlloc(VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, uint32_t flags)
+ : fMemory(memory)
+ , fOffset(offset)
+ , fSize(size)
+ , fFlags(flags)
+ , fBackendMemory(0)
+ , fUsesSystemHeap(false) {}
+
+ VkDeviceMemory fMemory; // can be VK_NULL_HANDLE iff is an RT and is borrowed
+ VkDeviceSize fOffset;
+ VkDeviceSize fSize; // this can be indeterminate iff Tex uses borrow semantics
+ uint32_t fFlags;
+ GrVkBackendMemory fBackendMemory; // handle to memory allocated via GrVkMemoryAllocator.
+
+ enum Flag {
+ kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping
+ kMappable_Flag = 0x2, // memory is able to be mapped.
+ };
+
+ bool operator==(const GrVkAlloc& that) const {
+ return fMemory == that.fMemory && fOffset == that.fOffset && fSize == that.fSize &&
+ fFlags == that.fFlags && fUsesSystemHeap == that.fUsesSystemHeap;
+ }
+
+private:
+ friend class GrVkHeap; // For access to usesSystemHeap
+ bool fUsesSystemHeap;
+};
+
+// This struct is used to pass in the necessary information to create a VkSamplerYcbcrConversion
+// object for an VkExternalFormatANDROID.
+struct GrVkYcbcrConversionInfo {
+ GrVkYcbcrConversionInfo()
+ : fFormat(VK_FORMAT_UNDEFINED)
+ , fExternalFormat(0)
+ , fYcbcrModel(VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY)
+ , fYcbcrRange(VK_SAMPLER_YCBCR_RANGE_ITU_FULL)
+ , fXChromaOffset(VK_CHROMA_LOCATION_COSITED_EVEN)
+ , fYChromaOffset(VK_CHROMA_LOCATION_COSITED_EVEN)
+ , fChromaFilter(VK_FILTER_NEAREST)
+ , fForceExplicitReconstruction(false) {}
+
+ GrVkYcbcrConversionInfo(VkFormat format,
+ int64_t externalFormat,
+ VkSamplerYcbcrModelConversion ycbcrModel,
+ VkSamplerYcbcrRange ycbcrRange,
+ VkChromaLocation xChromaOffset,
+ VkChromaLocation yChromaOffset,
+ VkFilter chromaFilter,
+ VkBool32 forceExplicitReconstruction,
+ VkFormatFeatureFlags formatFeatures)
+ : fFormat(format)
+ , fExternalFormat(externalFormat)
+ , fYcbcrModel(ycbcrModel)
+ , fYcbcrRange(ycbcrRange)
+ , fXChromaOffset(xChromaOffset)
+ , fYChromaOffset(yChromaOffset)
+ , fChromaFilter(chromaFilter)
+ , fForceExplicitReconstruction(forceExplicitReconstruction)
+ , fFormatFeatures(formatFeatures) {
+ SkASSERT(fYcbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY);
+ // Either format or externalFormat must be specified.
+ SkASSERT((fFormat != VK_FORMAT_UNDEFINED) ^ (externalFormat != 0));
+ }
+
+ GrVkYcbcrConversionInfo(VkSamplerYcbcrModelConversion ycbcrModel,
+ VkSamplerYcbcrRange ycbcrRange,
+ VkChromaLocation xChromaOffset,
+ VkChromaLocation yChromaOffset,
+ VkFilter chromaFilter,
+ VkBool32 forceExplicitReconstruction,
+ uint64_t externalFormat,
+ VkFormatFeatureFlags externalFormatFeatures)
+ : GrVkYcbcrConversionInfo(VK_FORMAT_UNDEFINED, externalFormat, ycbcrModel, ycbcrRange,
+ xChromaOffset, yChromaOffset, chromaFilter,
+ forceExplicitReconstruction, externalFormatFeatures) {}
+
+ bool operator==(const GrVkYcbcrConversionInfo& that) const {
+ // Invalid objects are not required to have all other fields initialized or matching.
+ if (!this->isValid() && !that.isValid()) {
+ return true;
+ }
+ return this->fFormat == that.fFormat &&
+ this->fExternalFormat == that.fExternalFormat &&
+ this->fYcbcrModel == that.fYcbcrModel &&
+ this->fYcbcrRange == that.fYcbcrRange &&
+ this->fXChromaOffset == that.fXChromaOffset &&
+ this->fYChromaOffset == that.fYChromaOffset &&
+ this->fChromaFilter == that.fChromaFilter &&
+ this->fForceExplicitReconstruction == that.fForceExplicitReconstruction;
+ }
+ bool operator!=(const GrVkYcbcrConversionInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fYcbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; }
+
+ // Format of the source image. Must be set to VK_FORMAT_UNDEFINED for external images or
+ // a valid image format otherwise.
+ VkFormat fFormat;
+
+ // The external format. Must be non-zero for external images, zero otherwise.
+ // Should be compatible to be used in a VkExternalFormatANDROID struct.
+ uint64_t fExternalFormat;
+
+ VkSamplerYcbcrModelConversion fYcbcrModel;
+ VkSamplerYcbcrRange fYcbcrRange;
+ VkChromaLocation fXChromaOffset;
+ VkChromaLocation fYChromaOffset;
+ VkFilter fChromaFilter;
+ VkBool32 fForceExplicitReconstruction;
+
+ // For external images format features here should be those returned by a call to
+ // vkAndroidHardwareBufferFormatPropertiesANDROID
+ VkFormatFeatureFlags fFormatFeatures;
+};
+
+struct GrVkImageInfo {
+ VkImage fImage;
+ GrVkAlloc fAlloc;
+ VkImageTiling fImageTiling;
+ VkImageLayout fImageLayout;
+ VkFormat fFormat;
+ uint32_t fLevelCount;
+ uint32_t fCurrentQueueFamily;
+ GrProtected fProtected;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+
+ GrVkImageInfo()
+ : fImage(VK_NULL_HANDLE)
+ , fAlloc()
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fImageLayout(VK_IMAGE_LAYOUT_UNDEFINED)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fLevelCount(0)
+ , fCurrentQueueFamily(VK_QUEUE_FAMILY_IGNORED)
+ , fProtected(GrProtected::kNo)
+ , fYcbcrConversionInfo() {}
+
+ GrVkImageInfo(VkImage image,
+ GrVkAlloc alloc,
+ VkImageTiling imageTiling,
+ VkImageLayout layout,
+ VkFormat format,
+ uint32_t levelCount,
+ uint32_t currentQueueFamily = VK_QUEUE_FAMILY_IGNORED,
+ GrProtected isProtected = GrProtected::kNo,
+ GrVkYcbcrConversionInfo ycbcrConversionInfo = GrVkYcbcrConversionInfo())
+ : fImage(image)
+ , fAlloc(alloc)
+ , fImageTiling(imageTiling)
+ , fImageLayout(layout)
+ , fFormat(format)
+ , fLevelCount(levelCount)
+ , fCurrentQueueFamily(currentQueueFamily)
+ , fProtected(isProtected)
+ , fYcbcrConversionInfo(ycbcrConversionInfo) {}
+
+ GrVkImageInfo(const GrVkImageInfo& info, VkImageLayout layout)
+ : fImage(info.fImage)
+ , fAlloc(info.fAlloc)
+ , fImageTiling(info.fImageTiling)
+ , fImageLayout(layout)
+ , fFormat(info.fFormat)
+ , fLevelCount(info.fLevelCount)
+ , fCurrentQueueFamily(info.fCurrentQueueFamily)
+ , fProtected(info.fProtected)
+ , fYcbcrConversionInfo(info.fYcbcrConversionInfo) {}
+
+ // This gives a way for a client to update the layout of the Image if they change the layout
+ // while we're still holding onto the wrapped texture. They will first need to get a handle
+ // to our internal GrVkImageInfo by calling getTextureHandle on a GrVkTexture.
+ void updateImageLayout(VkImageLayout layout) { fImageLayout = layout; }
+
+ bool operator==(const GrVkImageInfo& that) const {
+ return fImage == that.fImage && fAlloc == that.fAlloc &&
+ fImageTiling == that.fImageTiling && fImageLayout == that.fImageLayout &&
+ fFormat == that.fFormat && fLevelCount == that.fLevelCount &&
+ fCurrentQueueFamily == that.fCurrentQueueFamily && fProtected == that.fProtected &&
+ fYcbcrConversionInfo == that.fYcbcrConversionInfo;
+ }
+};
+
+using GrVkGetProc = std::function<PFN_vkVoidFunction(
+ const char*, // function name
+ VkInstance, // instance or VK_NULL_HANDLE
+ VkDevice // device or VK_NULL_HANDLE
+ )>;
+
+/**
+ * This object is wrapped in a GrBackendDrawableInfo and passed in as an argument to
+ * drawBackendGpu() calls on an SkDrawable. The drawable will use this info to inject direct
+ * Vulkan calls into our stream of GPU draws.
+ *
+ * The SkDrawable is given a secondary VkCommandBuffer in which to record draws. The GPU backend
+ * will then execute that command buffer within a render pass it is using for its own draws. The
+ * drawable is also given the attachment of the color index, a compatible VkRenderPass, and the
+ * VkFormat of the color attachment so that it can make VkPipeline objects for the draws. The
+ * SkDrawable must not alter the state of the VkRenderpass or sub pass.
+ *
+ * Additionally, the SkDrawable may fill in the passed in fDrawBounds with the bounds of the draws
+ * that it submits to the command buffer. This will be used by the GPU backend for setting the
+ * bounds in vkCmdBeginRenderPass. If fDrawBounds is not updated, we will assume that the entire
+ * attachment may have been written to.
+ *
+ * The SkDrawable is always allowed to create its own command buffers and submit them to the queue
+ * to render offscreen textures which will be sampled in draws added to the passed in
+ * VkCommandBuffer. If this is done the SkDrawable is in charge of adding the required memory
+ * barriers to the queue for the sampled images since the Skia backend will not do this.
+ *
+ * The VkImage is informational only and should not be used or modified in any ways.
+ */
+struct GrVkDrawableInfo {
+ VkCommandBuffer fSecondaryCommandBuffer;
+ uint32_t fColorAttachmentIndex;
+ VkRenderPass fCompatibleRenderPass;
+ VkFormat fFormat;
+ VkRect2D* fDrawBounds;
+ VkImage fImage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkVulkan.h b/gfx/skia/skia/include/gpu/vk/GrVkVulkan.h
new file mode 100644
index 0000000000..65cbf9b8ba
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkVulkan.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkVulkan_DEFINED
+#define GrVkVulkan_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_core.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_core.h>
+#endif
+
+#ifdef SK_BUILD_FOR_ANDROID
+// This is needed to get android extensions for external memory
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_android.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_android.h>
+#endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/pathops/SkPathOps.h b/gfx/skia/skia/include/pathops/SkPathOps.h
new file mode 100644
index 0000000000..2c799d90a0
--- /dev/null
+++ b/gfx/skia/skia/include/pathops/SkPathOps.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOps_DEFINED
+#define SkPathOps_DEFINED
+
+#include "include/core/SkPreConfig.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+
+class SkPath;
+struct SkRect;
+
+
+// FIXME: move everything below into the SkPath class
+/**
+ * The logical operations that can be performed when combining two paths.
+ */
+enum SkPathOp {
+ kDifference_SkPathOp, //!< subtract the op path from the first path
+ kIntersect_SkPathOp, //!< intersect the two paths
+ kUnion_SkPathOp, //!< union (inclusive-or) the two paths
+ kXOR_SkPathOp, //!< exclusive-or the two paths
+ kReverseDifference_SkPathOp, //!< subtract the first path from the op path
+};
+
+/** Set this path to the result of applying the Op to this path and the
+ specified path: this = (this op operand).
+ The resulting path will be constructed from non-overlapping contours.
+ The curve order is reduced where possible so that cubics may be turned
+ into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param one The first operand (for difference, the minuend)
+ @param two The second operand (for difference, the subtrahend)
+ @param op The operator to apply.
+ @param result The product of the operands. The result may be one of the
+ inputs.
+ @return True if the operation succeeded.
+ */
+bool SK_API Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result);
+
+/** Set this path to a set of non-overlapping contours that describe the
+ same area as the original path.
+ The curve order is reduced where possible so that cubics may
+ be turned into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param path The path to simplify.
+ @param result The simplified path. The result may be the input.
+ @return True if simplification succeeded.
+ */
+bool SK_API Simplify(const SkPath& path, SkPath* result);
+
+/** Set the resulting rectangle to the tight bounds of the path.
+
+ @param path The path measured.
+ @param result The tight bounds of the path.
+ @return True if the bounds could be computed.
+ */
+bool SK_API TightBounds(const SkPath& path, SkRect* result);
+
+/** Set the result with fill type winding to area equivalent to path.
+ Returns true if successful. Does not detect if path contains contours which
+ contain self-crossings or cross other contours; in these cases, may return
+ true even though result does not fill same area as path.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified. The result may be the input.
+
+ @param path The path typically with fill type set to even odd.
+ @param result The equivalent path with fill type set to winding.
+ @return True if winding path was set.
+ */
+bool SK_API AsWinding(const SkPath& path, SkPath* result);
+
+/** Perform a series of path operations, optimized for unioning many paths together.
+ */
+class SK_API SkOpBuilder {
+public:
+ /** Add one or more paths and their operand. The builder is empty before the first
+ path is added, so the result of a single add is (emptyPath OP path).
+
+ @param path The second operand.
+ @param _operator The operator to apply to the existing and supplied paths.
+ */
+ void add(const SkPath& path, SkPathOp _operator);
+
+ /** Computes the sum of all paths and operands, and resets the builder to its
+ initial state.
+
+ @param result The product of the operands.
+ @return True if the operation succeeded.
+ */
+ bool resolve(SkPath* result);
+
+private:
+ SkTArray<SkPath> fPathRefs;
+ SkTDArray<SkPathOp> fOps;
+
+ static bool FixWinding(SkPath* path);
+ static void ReversePath(SkPath* path);
+ void reset();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkCFObject.h b/gfx/skia/skia/include/ports/SkCFObject.h
new file mode 100644
index 0000000000..c1517037f4
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkCFObject.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCFObject_DEFINED
+#define SkCFObject_DEFINED
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#import <CoreFoundation/CoreFoundation.h>
+
+/**
+ * Wrapper class for managing lifetime of CoreFoundation objects. It will call
+ * CFRetain and CFRelease appropriately on creation, assignment, and deletion.
+ * Based on sk_sp<>.
+ */
+template <typename T> static inline T SkCFSafeRetain(T obj) {
+ if (obj) {
+ CFRetain(obj);
+ }
+ return obj;
+}
+
+template <typename T> static inline void SkCFSafeRelease(T obj) {
+ if (obj) {
+ CFRelease(obj);
+ }
+}
+
+template <typename T> class sk_cf_obj {
+public:
+ using element_type = T;
+
+ constexpr sk_cf_obj() : fObject(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling CFRetain(), so that both the argument and the newly
+ * created sk_cf_obj both have a reference to it.
+ */
+ sk_cf_obj(const sk_cf_obj<T>& that) : fObject(SkCFSafeRetain(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created sk_cf_obj. Afterwards only
+ * the new sk_cf_obj will have a reference to the object, and the argument will point to null.
+ * No call to CFRetain() or CFRelease() will be made.
+ */
+ sk_cf_obj(sk_cf_obj<T>&& that) : fObject(that.release()) {}
+
+ /**
+ * Adopt the bare object into the newly created sk_cf_obj.
+ * No call to CFRetain() or CFRelease() will be made.
+ */
+ explicit sk_cf_obj(T obj) {
+ fObject = obj;
+ }
+
+ /**
+ * Calls CFRelease() on the underlying object pointer.
+ */
+ ~sk_cf_obj() {
+ SkCFSafeRelease(fObject);
+ SkDEBUGCODE(fObject = nullptr);
+ }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling CFRetain() on it. If this
+ * sk_cf_obj previously had a reference to an object (i.e. not null) it will call CFRelease()
+ * on that object.
+ */
+ sk_cf_obj<T>& operator=(const sk_cf_obj<T>& that) {
+ if (this != &that) {
+ this->reset(SkCFSafeRetain(that.get()));
+ }
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the sk_cf_obj. If the sk_cf_obj
+ * previously held a reference to another object, CFRelease() will be called on that object.
+ * No call to CFRetain() will be made.
+ */
+ sk_cf_obj<T>& operator=(sk_cf_obj<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ T get() const { return fObject; }
+
+ /**
+ * Adopt the new object, and call CFRelease() on any previously held object (if not null).
+ * No call to CFRetain() will be made.
+ */
+ void reset(T object = nullptr) {
+ T oldObject = fObject;
+ fObject = object;
+ SkCFSafeRelease(oldObject);
+ }
+
+ /**
+ * Shares the new object by calling CFRetain() on it. If this sk_cf_obj previously had a
+ * reference to an object (i.e. not null) it will call CFRelease() on that object.
+ */
+ void retain(T object) {
+ if (this->fObject != object) {
+ this->reset(SkCFSafeRetain(object));
+ }
+ }
+
+ /**
+ * Return the original object, and set the internal object to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to CFRelease() will be made.
+ */
+ T SK_WARN_UNUSED_RESULT release() {
+ T obj = fObject;
+ fObject = nullptr;
+ return obj;
+ }
+
+private:
+ T fObject;
+};
+
+template <typename T> inline bool operator==(const sk_cf_obj<T>& a,
+ const sk_cf_obj<T>& b) {
+ return a.get() == b.get();
+}
+
+template <typename T> inline bool operator!=(const sk_cf_obj<T>& a,
+ const sk_cf_obj<T>& b) {
+ return a.get() != b.get();
+}
+
+#endif // SK_BUILD_FOR_MAC || SK_BUILD_FOR_IOS
+#endif // SkCFOBject_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
new file mode 100644
index 0000000000..557381b7b2
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontConfigInterface_DEFINED
+#define SkFontConfigInterface_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+
+class SkFontMgr;
+
+/**
+ * \class SkFontConfigInterface
+ *
+ * A simple interface for remotable font management.
+ * The global instance can be found with RefGlobal().
+ */
+class SK_API SkFontConfigInterface : public SkRefCnt {
+public:
+
+ /**
+ * Returns the global SkFontConfigInterface instance. If it is not
+ * nullptr, calls ref() on it. The caller must balance this with a call to
+ * unref(). The default SkFontConfigInterface is the result of calling
+ * GetSingletonDirectInterface.
+ */
+ static sk_sp<SkFontConfigInterface> RefGlobal();
+
+ /**
+ * Replace the current global instance with the specified one.
+ */
+ static void SetGlobal(sk_sp<SkFontConfigInterface> fc);
+
+ /**
+ * This should be treated as private to the impl of SkFontConfigInterface.
+ * Callers should not change or expect any particular values. It is meant
+ * to be a union of possible storage types to aid the impl.
+ */
+ struct FontIdentity {
+ FontIdentity() : fID(0), fTTCIndex(0) {}
+
+ bool operator==(const FontIdentity& other) const {
+ return fID == other.fID &&
+ fTTCIndex == other.fTTCIndex &&
+ fString == other.fString;
+ }
+ bool operator!=(const FontIdentity& other) const {
+ return !(*this == other);
+ }
+
+ uint32_t fID;
+ int32_t fTTCIndex;
+ SkString fString;
+ SkFontStyle fStyle;
+
+ // If buffer is NULL, just return the number of bytes that would have
+ // been written. Will pad contents to a multiple of 4.
+ size_t writeToMemory(void* buffer = nullptr) const;
+
+ // Recreate from a flattened buffer, returning the number of bytes read.
+ size_t readFromMemory(const void* buffer, size_t length);
+ };
+
+ /**
+ * Given a familyName and style, find the best match.
+ *
+ * If a match is found, return true and set its outFontIdentifier.
+ * If outFamilyName is not null, assign the found familyName to it
+ * (which may differ from the requested familyName).
+ * If outStyle is not null, assign the found style to it
+ * (which may differ from the requested style).
+ *
+ * If a match is not found, return false, and ignore all out parameters.
+ */
+ virtual bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) = 0;
+
+ /**
+ * Given a FontRef, open a stream to access its data, or return null
+ * if the FontRef's data is not available. The caller is responsible for
+ * deleting the stream when it is done accessing the data.
+ */
+ virtual SkStreamAsset* openStream(const FontIdentity&) = 0;
+
+ /**
+ * Return an SkTypeface for the given FontIdentity.
+ *
+ * The default implementation simply returns a new typeface built using data obtained from
+ * openStream(), but derived classes may implement more complex caching schemes.
+ */
+ virtual sk_sp<SkTypeface> makeTypeface(const FontIdentity& identity) {
+ return SkTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset>(this->openStream(identity)),
+ identity.fTTCIndex);
+
+ }
+
+ /**
+ * Return a singleton instance of a direct subclass that calls into
+ * libfontconfig. This does not affect the refcnt of the returned instance.
+ */
+ static SkFontConfigInterface* GetSingletonDirectInterface();
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
new file mode 100644
index 0000000000..05771257d2
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_FontConfigInterface_DEFINED
+#define SkFontMgr_FontConfigInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+class SkFontConfigInterface;
+
+/** Creates a SkFontMgr which wraps a SkFontConfigInterface. */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci);
+
+#endif // #ifndef SkFontMgr_FontConfigInterface_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_android.h b/gfx/skia/skia/include/ports/SkFontMgr_android.h
new file mode 100644
index 0000000000..d68f3ba034
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_android.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_DEFINED
+#define SkFontMgr_android_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkFontMgr;
+
+struct SkFontMgr_Android_CustomFonts {
+ /** When specifying custom fonts, indicates how to use system fonts. */
+ enum SystemFontUse {
+ kOnlyCustom, /** Use only custom fonts. NDK compliant. */
+ kPreferCustom, /** Use custom fonts before system fonts. */
+ kPreferSystem /** Use system fonts before custom fonts. */
+ };
+ /** Whether or not to use system fonts. */
+ SystemFontUse fSystemFontUse;
+
+ /** Base path to resolve relative font file names. If a directory, should end with '/'. */
+ const char* fBasePath;
+
+ /** Optional custom configuration file to use. */
+ const char* fFontsXml;
+
+ /** Optional custom configuration file for fonts which provide fallback.
+ * In the new style (version > 21) fontsXml format is used, this should be NULL.
+ */
+ const char* fFallbackFontsXml;
+
+ /** Optional custom flag. If set to true the SkFontMgr will acquire all requisite
+ * system IO resources on initialization.
+ */
+ bool fIsolated;
+};
+
+/** Create a font manager for Android. If 'custom' is NULL, use only system fonts. */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom);
+
+#endif // SkFontMgr_android_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_directory.h b/gfx/skia/skia/include/ports/SkFontMgr_directory.h
new file mode 100644
index 0000000000..b1a60fb4da
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_directory.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_directory_DEFINED
+#define SkFontMgr_directory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager which scans a given directory for font files.
+ * This font manager uses FreeType for rendering.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Directory(const char* dir);
+
+#endif // SkFontMgr_directory_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_empty.h b/gfx/skia/skia/include/ports/SkFontMgr_empty.h
new file mode 100644
index 0000000000..e5756421d0
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_empty.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_empty_DEFINED
+#define SkFontMgr_empty_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager that contains no built-in fonts.
+ * This font manager uses FreeType for rendering.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Empty();
+
+#endif // SkFontMgr_empty_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
new file mode 100644
index 0000000000..4b2bb2d297
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_fontconfig_DEFINED
+#define SkFontMgr_fontconfig_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include <fontconfig/fontconfig.h>
+
+class SkFontMgr;
+
+/** Create a font manager around a FontConfig instance.
+ * If 'fc' is NULL, will use a new default config.
+ * Takes ownership of 'fc' and will call FcConfigDestroy on it.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FontConfig(FcConfig* fc);
+
+#endif // #ifndef SkFontMgr_fontconfig_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h b/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h
new file mode 100644
index 0000000000..d20530af72
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_fuchsia_DEFINED
+#define SkFontMgr_fuchsia_DEFINED
+
+#include <fuchsia/fonts/cpp/fidl.h>
+
+#include "include/core/SkRefCnt.h"
+
+class SkFontMgr;
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider);
+
+#endif // SkFontMgr_fuchsia_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_indirect.h b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
new file mode 100644
index 0000000000..4cdd445afb
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_indirect_DEFINED
+#define SkFontMgr_indirect_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTArray.h"
+
+class SkData;
+class SkFontStyle;
+class SkStreamAsset;
+class SkString;
+
+class SK_API SkFontMgr_Indirect : public SkFontMgr {
+public:
+ // TODO: The SkFontMgr is only used for createFromStream/File/Data.
+ // In the future these calls should be broken out into their own interface
+ // with a name like SkFontRenderer.
+ SkFontMgr_Indirect(sk_sp<SkFontMgr> impl, sk_sp<SkRemotableFontMgr> proxy)
+ : fImpl(std::move(impl)), fProxy(std::move(proxy))
+ { }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override;
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle&,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override;
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const override;
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ SkTypeface* createTypefaceFromFontId(const SkFontIdentity& fontId) const;
+
+ sk_sp<SkFontMgr> fImpl;
+ sk_sp<SkRemotableFontMgr> fProxy;
+
+ struct DataEntry {
+ uint32_t fDataId; // key1
+ uint32_t fTtcIndex; // key2
+ SkTypeface* fTypeface; // value: weak ref to typeface
+
+ DataEntry() { }
+
+ DataEntry(DataEntry&& that)
+ : fDataId(that.fDataId)
+ , fTtcIndex(that.fTtcIndex)
+ , fTypeface(that.fTypeface)
+ {
+ SkDEBUGCODE(that.fDataId = SkFontIdentity::kInvalidDataId;)
+ SkDEBUGCODE(that.fTtcIndex = 0xbbadbeef;)
+ that.fTypeface = nullptr;
+ }
+
+ ~DataEntry() {
+ if (fTypeface) {
+ fTypeface->weak_unref();
+ }
+ }
+ };
+ /**
+ * This cache is essentially { dataId: { ttcIndex: typeface } }
+ * For data caching we want a mapping from data id to weak references to
+ * typefaces with that data id. By storing the index next to the typeface,
+ * this data cache also acts as a typeface cache.
+ */
+ mutable SkTArray<DataEntry> fDataCache;
+ mutable SkMutex fDataCacheMutex;
+
+ friend class SkStyleSet_Indirect;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkImageGeneratorCG.h b/gfx/skia/skia/include/ports/SkImageGeneratorCG.h
new file mode 100644
index 0000000000..756ebb9386
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkImageGeneratorCG.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+#include <memory>
+
+namespace SkImageGeneratorCG {
+SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedCG(sk_sp<SkData>);
+}
+
+#endif //defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h b/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h
new file mode 100644
index 0000000000..eb57a20956
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+#include <memory>
+
+/*
+ * Any Windows program that uses COM must initialize the COM library by calling
+ * the CoInitializeEx function. In addition, each thread that uses a COM
+ * interface must make a separate call to this function.
+ *
+ * For every successful call to CoInitializeEx, the thread must call
+ * CoUninitialize before it exits.
+ *
+ * SkImageGeneratorWIC requires the COM library and leaves it to the client to
+ * initialize COM for their application.
+ *
+ * For more information on initializing COM, please see:
+ * https://msdn.microsoft.com/en-us/library/windows/desktop/ff485844.aspx
+ */
+namespace SkImageGeneratorWIC {
+SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedWIC(sk_sp<SkData>);
+}
+
+#endif // SK_BUILD_FOR_WIN
diff --git a/gfx/skia/skia/include/ports/SkRemotableFontMgr.h b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
new file mode 100644
index 0000000000..ebe1bd07d6
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemotableFontMgr_DEFINED
+#define SkRemotableFontMgr_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTemplates.h"
+
+class SkDataTable;
+class SkStreamAsset;
+
+struct SK_API SkFontIdentity {
+ static const uint32_t kInvalidDataId = 0xFFFFFFFF;
+
+ // Note that fDataId is a data identifier, not a font identifier.
+ // (fDataID, fTtcIndex) can be seen as a font identifier.
+ uint32_t fDataId;
+ uint32_t fTtcIndex;
+
+ // On Linux/FontConfig there is also the ability to specify preferences for rendering
+ // antialias, embedded bitmaps, autohint, hinting, hintstyle, lcd rendering
+ // may all be set or set to no-preference
+ // (No-preference is resolved against globals set by the platform)
+ // Since they may be selected against, these are really 'extensions' to SkFontStyle.
+ // SkFontStyle should pick these up.
+ SkFontStyle fFontStyle;
+};
+
+class SK_API SkRemotableFontIdentitySet : public SkRefCnt {
+public:
+ SkRemotableFontIdentitySet(int count, SkFontIdentity** data);
+
+ int count() const { return fCount; }
+ const SkFontIdentity& at(int index) const { return fData[index]; }
+
+ static SkRemotableFontIdentitySet* NewEmpty();
+
+private:
+ SkRemotableFontIdentitySet() : fCount(0), fData() { }
+
+ friend SkRemotableFontIdentitySet* sk_remotable_font_identity_set_new();
+
+ int fCount;
+ SkAutoTMalloc<SkFontIdentity> fData;
+
+ typedef SkRefCnt INHERITED;
+};
+
+class SK_API SkRemotableFontMgr : public SkRefCnt {
+public:
+ /**
+ * Returns all of the fonts with the given familyIndex.
+ * Returns NULL if the index is out of bounds.
+ * Returns empty if there are no fonts at the given index.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* getIndex(int familyIndex) const = 0;
+
+ /**
+ * Returns the closest match to the given style in the given index.
+ * If there are no available fonts at the given index, the return value's
+ * data id will be kInvalidDataId.
+ */
+ virtual SkFontIdentity matchIndexStyle(int familyIndex, const SkFontStyle&) const = 0;
+
+ /**
+ * Returns all the fonts on the system with the given name.
+ * If the given name is NULL, will return the default font family.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * It is possible that this will return fonts not accessible from
+ * getIndex(int) or matchIndexStyle(int, SkFontStyle) due to
+ * hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return fonts which are somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* matchName(const char familyName[]) const = 0;
+
+ /**
+ * Returns the closest matching font to the specified name and style.
+ * If there are no available fonts which match the name, the return value's
+ * data id will be kInvalidDataId.
+ * If the given name is NULL, the match will be against any default fonts.
+ *
+ * It is possible that this will return a font identity not accessible from
+ * methods returning sets due to hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return a font which is somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkFontIdentity matchNameStyle(const char familyName[], const SkFontStyle&) const = 0;
+
+ /**
+ * Use the system fall-back to find a font for the given character.
+ * If no font can be found for the character, the return value's data id
+ * will be kInvalidDataId.
+ * If the name is NULL, the match will start against any default fonts.
+ * If the bpc47 is NULL, a default locale will be assumed.
+ *
+ * Note that bpc47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ */
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const=0;
+
+ /**
+ * Returns the data for the given data id.
+ * Will return NULL if the data id is invalid.
+ * Note that this is a data id, not a font id.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkStreamAsset* getData(int dataId) const = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
new file mode 100644
index 0000000000..f70fb07ef5
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
@@ -0,0 +1,18 @@
+#ifndef SkTypeface_cairo_DEFINED
+#define SkTypeface_cairo_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "include/core/SkSurfaceProps.h"
+
+struct FT_FaceRec_;
+typedef FT_FaceRec_* FT_Face;
+
+SK_API extern void SkInitCairoFT(bool fontHintingEnabled);
+
+SK_API extern SkTypeface* SkCreateTypefaceFromCairoFTFont(
+ FT_Face face = nullptr, void* faceContext = nullptr,
+ SkPixelGeometry pixelGeometry = kUnknown_SkPixelGeometry,
+ uint8_t lcdFilter = 0);
+
+#endif
+
diff --git a/gfx/skia/skia/include/ports/SkTypeface_mac.h b/gfx/skia/skia/include/ports/SkTypeface_mac.h
new file mode 100644
index 0000000000..c17b821900
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_mac.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_mac_DEFINED
+#define SkTypeface_mac_DEFINED
+
+#include "include/core/SkTypeface.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#endif
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified CTFontRef. The caller must call
+ * unref() when it is finished.
+ *
+ * The CFTypeRef parameter, if provided, will be kept referenced for the
+ * lifetime of the SkTypeface. This was introduced as a means to work around
+ * https://crbug.com/413332 .
+ */
+SK_API extern SkTypeface* SkCreateTypefaceFromCTFont(CTFontRef, CFTypeRef = NULL);
+
+/**
+ * Returns the platform-specific CTFontRef handle for a
+ * given SkTypeface. Note that the returned CTFontRef gets
+ * released when the source SkTypeface is destroyed.
+ *
+ * This method is deprecated. It may only be used by Blink Mac
+ * legacy code in special cases related to text-shaping
+ * with AAT fonts, clipboard handling and font fallback.
+ * See https://code.google.com/p/skia/issues/detail?id=3408
+ */
+SK_API extern CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkTypeface_mac_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkTypeface_win.h b/gfx/skia/skia/include/ports/SkTypeface_win.h
new file mode 100644
index 0000000000..c9d1a3ee6a
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_win.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_DEFINED
+#define SkTypeface_win_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#ifdef UNICODE
+typedef struct tagLOGFONTW LOGFONTW;
+typedef LOGFONTW LOGFONT;
+#else
+typedef struct tagLOGFONTA LOGFONTA;
+typedef LOGFONTA LOGFONT;
+#endif // UNICODE
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified logfont. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT&);
+
+/**
+ * Copy the LOGFONT associated with this typeface into the lf parameter. Note
+ * that the lfHeight will need to be set afterwards, since the typeface does
+ * not track this (the paint does).
+ * typeface may be NULL, in which case we return the logfont for the default font.
+ */
+SK_API void SkLOGFONTFromTypeface(const SkTypeface* typeface, LOGFONT* lf);
+
+/**
+ * Set an optional callback to ensure that the data behind a LOGFONT is loaded.
+ * This will get called if Skia tries to access the data but hits a failure.
+ * Normally this is null, and is only required if the font data needs to be
+ * remotely (re)loaded.
+ */
+SK_API void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*)(const LOGFONT&));
+
+// Experimental!
+//
+class SkFontMgr;
+class SkRemotableFontMgr;
+struct IDWriteFactory;
+struct IDWriteFontCollection;
+struct IDWriteFontFallback;
+struct IDWriteFontFace;
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified dwrite font. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ int aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel);
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_GDI();
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory = NULL,
+ IDWriteFontCollection* collection = NULL);
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback);
+
+/**
+ * Creates an SkFontMgr which renders using DirectWrite and obtains its data
+ * from the SkRemotableFontMgr.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWriteRenderer(sk_sp<SkRemotableFontMgr>);
+
+/**
+ * Creates an SkRemotableFontMgr backed by DirectWrite using the default
+ * system font collection in the current locale.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API sk_sp<SkRemotableFontMgr> SkRemotableFontMgr_New_DirectWrite();
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTypeface_win_DEFINED
diff --git a/gfx/skia/skia/include/private/GrContext_Base.h b/gfx/skia/skia/include/private/GrContext_Base.h
new file mode 100644
index 0000000000..eae2c8f655
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrContext_Base.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContext_Base_DEFINED
+#define GrContext_Base_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrTypes.h"
+
+class GrBaseContextPriv;
+class GrCaps;
+class GrContext;
+class GrImageContext;
+class GrRecordingContext;
+class GrSkSLFPFactoryCache;
+
+class GrContext_Base : public SkRefCnt {
+public:
+ virtual ~GrContext_Base();
+
+ /*
+ * The 3D API backing this context
+ */
+ SK_API GrBackendApi backend() const { return fBackend; }
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the GrContext
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const;
+
+ // Provides access to functions that aren't part of the public API.
+ GrBaseContextPriv priv();
+ const GrBaseContextPriv priv() const;
+
+protected:
+ friend class GrBaseContextPriv; // for hidden functions
+
+ GrContext_Base(GrBackendApi backend, const GrContextOptions& options, uint32_t contextID);
+
+ virtual bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>);
+
+ /**
+ * An identifier for this context. The id is used by all compatible contexts. For example,
+ * if SkImages are created on one thread using an image creation context, then fed into a
+ * DDL Recorder on second thread (which has a recording context) and finally replayed on
+ * a third thread with a direct context, then all three contexts will report the same id.
+ * It is an error for an image to be used with contexts that report different ids.
+ */
+ uint32_t contextID() const { return fContextID; }
+
+ bool matches(GrContext_Base* candidate) const {
+ return candidate->contextID() == this->contextID();
+ }
+
+ /*
+ * The options in effect for this context
+ */
+ const GrContextOptions& options() const { return fOptions; }
+
+ const GrCaps* caps() const;
+ sk_sp<const GrCaps> refCaps() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ virtual GrImageContext* asImageContext() { return nullptr; }
+ virtual GrRecordingContext* asRecordingContext() { return nullptr; }
+ virtual GrContext* asDirectContext() { return nullptr; }
+
+private:
+ const GrBackendApi fBackend;
+ const GrContextOptions fOptions;
+ const uint32_t fContextID;
+ sk_sp<const GrCaps> fCaps;
+ sk_sp<GrSkSLFPFactoryCache> fFPFactoryCache;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrGLTypesPriv.h b/gfx/skia/skia/include/private/GrGLTypesPriv.h
new file mode 100644
index 0000000000..14f4606346
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrGLTypesPriv.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLTypes.h"
+
+#ifndef GrGLTypesPriv_DEFINED
+#define GrGLTypesPriv_DEFINED
+
+static constexpr int kGrGLFormatCount = static_cast<int>(GrGLFormat::kLast) + 1;
+
+class GrGLTextureParameters : public SkNVRefCnt<GrGLTextureParameters> {
+public:
+ // We currently consider texture parameters invalid on all textures
+ // GrContext::resetContext(). We use this type to track whether instances of
+ // GrGLTextureParameters were updated before or after the most recent resetContext(). At 10
+ // resets / frame and 60fps a 64bit timestamp will overflow in about a billion years.
+ // TODO: Require clients to use GrBackendTexture::glTextureParametersModified() to invalidate
+ // texture parameters and get rid of timestamp checking.
+ using ResetTimestamp = uint64_t;
+
+ // This initializes the params to have an expired timestamp. They'll be considered invalid the
+ // first time the texture is used unless set() is called.
+ GrGLTextureParameters() = default;
+
+ // This is texture parameter state that is overridden when a non-zero sampler object is bound.
+ struct SamplerOverriddenState {
+ SamplerOverriddenState();
+ void invalidate();
+
+ GrGLenum fMinFilter;
+ GrGLenum fMagFilter;
+ GrGLenum fWrapS;
+ GrGLenum fWrapT;
+ GrGLfloat fMinLOD;
+ GrGLfloat fMaxLOD;
+ // We always want the border color to be transparent black, so no need to store 4 floats.
+ // Just track if it's been invalidated and no longer the default
+ bool fBorderColorInvalid;
+ };
+
+ // Texture parameter state that is not overridden by a bound sampler object.
+ struct NonsamplerState {
+ NonsamplerState();
+ void invalidate();
+
+ uint32_t fSwizzleKey;
+ GrGLint fBaseMipMapLevel;
+ GrGLint fMaxMipMapLevel;
+ };
+
+ void invalidate();
+
+ ResetTimestamp resetTimestamp() const { return fResetTimestamp; }
+ const SamplerOverriddenState& samplerOverriddenState() const { return fSamplerOverriddenState; }
+ const NonsamplerState& nonsamplerState() const { return fNonsamplerState; }
+
+ // SamplerOverriddenState is optional because we don't track it when we're using sampler
+ // objects.
+ void set(const SamplerOverriddenState* samplerState,
+ const NonsamplerState& nonsamplerState,
+ ResetTimestamp currTimestamp);
+
+private:
+ static constexpr ResetTimestamp kExpiredTimestamp = 0;
+
+ SamplerOverriddenState fSamplerOverriddenState;
+ NonsamplerState fNonsamplerState;
+ ResetTimestamp fResetTimestamp = kExpiredTimestamp;
+};
+
+class GrGLBackendTextureInfo {
+public:
+ GrGLBackendTextureInfo(const GrGLTextureInfo& info, GrGLTextureParameters* params)
+ : fInfo(info), fParams(params) {}
+ GrGLBackendTextureInfo(const GrGLBackendTextureInfo&) = delete;
+ GrGLBackendTextureInfo& operator=(const GrGLBackendTextureInfo&) = delete;
+ const GrGLTextureInfo& info() const { return fInfo; }
+ GrGLTextureParameters* parameters() const { return fParams; }
+ sk_sp<GrGLTextureParameters> refParameters() const { return sk_ref_sp(fParams); }
+
+ void cleanup();
+ void assign(const GrGLBackendTextureInfo&, bool thisIsValid);
+
+private:
+ GrGLTextureInfo fInfo;
+ GrGLTextureParameters* fParams;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrImageContext.h b/gfx/skia/skia/include/private/GrImageContext.h
new file mode 100644
index 0000000000..e651964896
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrImageContext.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageContext_DEFINED
+#define GrImageContext_DEFINED
+
+#include "include/private/GrContext_Base.h"
+#include "include/private/GrSingleOwner.h"
+
+class GrImageContextPriv;
+class GrProxyProvider;
+
+class GrImageContext : public GrContext_Base {
+public:
+ ~GrImageContext() override;
+
+ GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ // Provides access to functions that aren't part of the public API.
+ GrImageContextPriv priv();
+ const GrImageContextPriv priv() const;
+
+protected:
+ friend class GrImageContextPriv; // for hidden functions
+
+ GrImageContext(GrBackendApi, const GrContextOptions&, uint32_t contextID);
+
+ SK_API virtual void abandonContext();
+ SK_API bool abandoned() const;
+
+ GrProxyProvider* proxyProvider() { return fProxyProvider.get(); }
+ const GrProxyProvider* proxyProvider() const { return fProxyProvider.get(); }
+
+ /** This is only useful for debug purposes */
+ GrSingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ GrImageContext* asImageContext() override { return this; }
+
+private:
+ std::unique_ptr<GrProxyProvider> fProxyProvider;
+ bool fAbandoned = false;
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the GrDrawingManager and, from there to all the
+ // GrRenderTargetContexts. It is also passed to the GrResourceProvider and SkGpuDevice.
+ mutable GrSingleOwner fSingleOwner;
+
+ typedef GrContext_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrRecordingContext.h b/gfx/skia/skia/include/private/GrRecordingContext.h
new file mode 100644
index 0000000000..1f44bee169
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrRecordingContext.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRecordingContext_DEFINED
+#define GrRecordingContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/GrImageContext.h"
+
+class GrAuditTrail;
+class GrBackendFormat;
+class GrDrawingManager;
+class GrOnFlushCallbackObject;
+class GrOpMemoryPool;
+class GrRecordingContextPriv;
+class GrStrikeCache;
+class GrSurfaceContext;
+class GrSurfaceProxy;
+class GrTextBlobCache;
+class GrTextureContext;
+
+class GrRecordingContext : public GrImageContext {
+public:
+ ~GrRecordingContext() override;
+
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ // Provides access to functions that aren't part of the public API.
+ GrRecordingContextPriv priv();
+ const GrRecordingContextPriv priv() const;
+
+protected:
+ friend class GrRecordingContextPriv; // for hidden functions
+
+ GrRecordingContext(GrBackendApi, const GrContextOptions&, uint32_t contextID);
+ bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
+ void setupDrawingManager(bool sortOpsTasks, bool reduceOpsTaskSplitting);
+
+ void abandonContext() override;
+
+ GrDrawingManager* drawingManager();
+
+ sk_sp<GrOpMemoryPool> refOpMemoryPool();
+ GrOpMemoryPool* opMemoryPool();
+
+ GrStrikeCache* getGrStrikeCache() { return fStrikeCache.get(); }
+ GrTextBlobCache* getTextBlobCache();
+ const GrTextBlobCache* getTextBlobCache() const;
+
+ /**
+ * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
+ *
+ * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
+ * ensure its lifetime is tied to that of the context.
+ */
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+ std::unique_ptr<GrSurfaceContext> makeWrappedSurfaceContext(sk_sp<GrSurfaceProxy>,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace> = nullptr,
+ const SkSurfaceProps* = nullptr);
+
+ /** Create a new texture context backed by a deferred-style GrTextureProxy. */
+ std::unique_ptr<GrTextureContext> makeDeferredTextureContext(
+ SkBackingFit,
+ int width,
+ int height,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected = GrProtected::kNo);
+
+ /*
+ * Create a new render target context backed by a deferred-style
+ * GrRenderTargetProxy. We guarantee that "asTextureProxy" will succeed for
+ * renderTargetContexts created via this entry point.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /*
+ * This method will attempt to create a renderTargetContext that has, at least, the number of
+ * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be
+ * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA).
+ * SRGB-ness will be preserved.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted budgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ GrAuditTrail* auditTrail() { return fAuditTrail.get(); }
+
+ GrRecordingContext* asRecordingContext() override { return this; }
+
+private:
+ std::unique_ptr<GrDrawingManager> fDrawingManager;
+ // All the GrOp-derived classes use this pool.
+ sk_sp<GrOpMemoryPool> fOpMemoryPool;
+
+ std::unique_ptr<GrStrikeCache> fStrikeCache;
+ std::unique_ptr<GrTextBlobCache> fTextBlobCache;
+
+ std::unique_ptr<GrAuditTrail> fAuditTrail;
+
+ typedef GrImageContext INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrResourceKey.h b/gfx/skia/skia/include/private/GrResourceKey.h
new file mode 100644
index 0000000000..d67fa7d226
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrResourceKey.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceKey_DEFINED
+#define GrResourceKey_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+
+#include <new>
+
+uint32_t GrResourceKeyHash(const uint32_t* data, size_t size);
+
+/**
+ * Base class for all GrGpuResource cache keys. There are two types of cache keys. Refer to the
+ * comments for each key type below.
+ */
+class GrResourceKey {
+public:
+ uint32_t hash() const {
+ this->validate();
+ return fKey[kHash_MetaDataIdx];
+ }
+
+ size_t size() const {
+ this->validate();
+ SkASSERT(this->isValid());
+ return this->internalSize();
+ }
+
+protected:
+ static const uint32_t kInvalidDomain = 0;
+
+ GrResourceKey() { this->reset(); }
+
+ /** Reset to an invalid key. */
+ void reset() {
+ GR_STATIC_ASSERT((uint16_t)kInvalidDomain == kInvalidDomain);
+ fKey.reset(kMetaDataCnt);
+ fKey[kHash_MetaDataIdx] = 0;
+ fKey[kDomainAndSize_MetaDataIdx] = kInvalidDomain;
+ }
+
+ bool operator==(const GrResourceKey& that) const {
+ return this->hash() == that.hash() && 0 == memcmp(&fKey[kHash_MetaDataIdx + 1],
+ &that.fKey[kHash_MetaDataIdx + 1],
+ this->internalSize() - sizeof(uint32_t));
+ }
+
+ GrResourceKey& operator=(const GrResourceKey& that) {
+ if (this != &that) {
+ if (!that.isValid()) {
+ this->reset();
+ } else {
+ size_t bytes = that.size();
+ SkASSERT(SkIsAlign4(bytes));
+ fKey.reset(SkToInt(bytes / sizeof(uint32_t)));
+ memcpy(fKey.get(), that.fKey.get(), bytes);
+ this->validate();
+ }
+ }
+ return *this;
+ }
+
+ bool isValid() const { return kInvalidDomain != this->domain(); }
+
+ uint32_t domain() const { return fKey[kDomainAndSize_MetaDataIdx] & 0xffff; }
+
+ /** size of the key data, excluding meta-data (hash, domain, etc). */
+ size_t dataSize() const { return this->size() - 4 * kMetaDataCnt; }
+
+ /** ptr to the key data, excluding meta-data (hash, domain, etc). */
+ const uint32_t* data() const {
+ this->validate();
+ return &fKey[kMetaDataCnt];
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ if (!this->isValid()) {
+ SkDebugf("Invalid Key\n");
+ } else {
+ SkDebugf("hash: %d ", this->hash());
+ SkDebugf("domain: %d ", this->domain());
+ SkDebugf("size: %dB ", this->internalSize());
+ for (size_t i = 0; i < this->internalSize(); ++i) {
+ SkDebugf("%d ", fKey[SkTo<int>(i)]);
+ }
+ SkDebugf("\n");
+ }
+ }
+#endif
+
+ /** Used to initialize a key. */
+ class Builder {
+ public:
+ Builder(GrResourceKey* key, uint32_t domain, int data32Count) : fKey(key) {
+ SkASSERT(data32Count >= 0);
+ SkASSERT(domain != kInvalidDomain);
+ key->fKey.reset(kMetaDataCnt + data32Count);
+ int size = (data32Count + kMetaDataCnt) * sizeof(uint32_t);
+ SkASSERT(SkToU16(size) == size);
+ SkASSERT(SkToU16(domain) == domain);
+ key->fKey[kDomainAndSize_MetaDataIdx] = domain | (size << 16);
+ }
+
+ ~Builder() { this->finish(); }
+
+ void finish() {
+ if (nullptr == fKey) {
+ return;
+ }
+ GR_STATIC_ASSERT(0 == kHash_MetaDataIdx);
+ uint32_t* hash = &fKey->fKey[kHash_MetaDataIdx];
+ *hash = GrResourceKeyHash(hash + 1, fKey->internalSize() - sizeof(uint32_t));
+ fKey->validate();
+ fKey = nullptr;
+ }
+
+ uint32_t& operator[](int dataIdx) {
+ SkASSERT(fKey);
+ SkDEBUGCODE(size_t dataCount = fKey->internalSize() / sizeof(uint32_t) - kMetaDataCnt;)
+ SkASSERT(SkToU32(dataIdx) < dataCount);
+ return fKey->fKey[kMetaDataCnt + dataIdx];
+ }
+
+ private:
+ GrResourceKey* fKey;
+ };
+
+private:
+ enum MetaDataIdx {
+ kHash_MetaDataIdx,
+ // The key domain and size are packed into a single uint32_t.
+ kDomainAndSize_MetaDataIdx,
+
+ kLastMetaDataIdx = kDomainAndSize_MetaDataIdx
+ };
+ static const uint32_t kMetaDataCnt = kLastMetaDataIdx + 1;
+
+ size_t internalSize() const { return fKey[kDomainAndSize_MetaDataIdx] >> 16; }
+
+ void validate() const {
+ SkASSERT(this->isValid());
+ SkASSERT(fKey[kHash_MetaDataIdx] ==
+ GrResourceKeyHash(&fKey[kHash_MetaDataIdx] + 1,
+ this->internalSize() - sizeof(uint32_t)));
+ SkASSERT(SkIsAlign4(this->internalSize()));
+ }
+
+ friend class TestResource; // For unit test to access kMetaDataCnt.
+
+ // bmp textures require 5 uint32_t values.
+ SkAutoSTMalloc<kMetaDataCnt + 5, uint32_t> fKey;
+};
+
+/**
+ * A key used for scratch resources. There are three important rules about scratch keys:
+ * * Multiple resources can share the same scratch key. Therefore resources assigned the same
+ * scratch key should be interchangeable with respect to the code that uses them.
+ * * A resource can have at most one scratch key and it is set at resource creation by the
+ * resource itself.
+ * * When a scratch resource is ref'ed it will not be returned from the
+ * cache for a subsequent cache request until all refs are released. This facilitates using
+ * a scratch key for multiple render-to-texture scenarios. An example is a separable blur:
+ *
+ * GrTexture* texture[2];
+ * texture[0] = get_scratch_texture(scratchKey);
+ * texture[1] = get_scratch_texture(scratchKey); // texture[0] is already owned so we will get a
+ * // different one for texture[1]
+ * draw_mask(texture[0], path); // draws path mask to texture[0]
+ * blur_x(texture[0], texture[1]); // blurs texture[0] in y and stores result in texture[1]
+ * blur_y(texture[1], texture[0]); // blurs texture[1] in y and stores result in texture[0]
+ * texture[1]->unref(); // texture 1 can now be recycled for the next request with scratchKey
+ * consume_blur(texture[0]);
+ * texture[0]->unref(); // texture 0 can now be recycled for the next request with scratchKey
+ */
+class GrScratchKey : public GrResourceKey {
+private:
+ typedef GrResourceKey INHERITED;
+
+public:
+ /** Uniquely identifies the type of resource that is cached as scratch. */
+ typedef uint32_t ResourceType;
+
+ /** Generate a unique ResourceType. */
+ static ResourceType GenerateResourceType();
+
+ /** Creates an invalid scratch key. It must be initialized using a Builder object before use. */
+ GrScratchKey() {}
+
+ GrScratchKey(const GrScratchKey& that) { *this = that; }
+
+ /** reset() returns the key to the invalid state. */
+ using INHERITED::reset;
+
+ using INHERITED::isValid;
+
+ ResourceType resourceType() const { return this->domain(); }
+
+ GrScratchKey& operator=(const GrScratchKey& that) {
+ this->INHERITED::operator=(that);
+ return *this;
+ }
+
+ bool operator==(const GrScratchKey& that) const { return this->INHERITED::operator==(that); }
+ bool operator!=(const GrScratchKey& that) const { return !(*this == that); }
+
+ class Builder : public INHERITED::Builder {
+ public:
+ Builder(GrScratchKey* key, ResourceType type, int data32Count)
+ : INHERITED::Builder(key, type, data32Count) {}
+ };
+};
+
+/**
+ * A key that allows for exclusive use of a resource for a use case (AKA "domain"). There are three
+ * rules governing the use of unique keys:
+ * * Only one resource can have a given unique key at a time. Hence, "unique".
+ * * A resource can have at most one unique key at a time.
+ * * Unlike scratch keys, multiple requests for a unique key will return the same
+ * resource even if the resource already has refs.
+ * This key type allows a code path to create cached resources for which it is the exclusive user.
+ * The code path creates a domain which it sets on its keys. This guarantees that there are no
+ * cross-domain collisions.
+ *
+ * Unique keys preempt scratch keys. While a resource has a unique key it is inaccessible via its
+ * scratch key. It can become scratch again if the unique key is removed.
+ */
+class GrUniqueKey : public GrResourceKey {
+private:
+ typedef GrResourceKey INHERITED;
+
+public:
+ typedef uint32_t Domain;
+ /** Generate a Domain for unique keys. */
+ static Domain GenerateDomain();
+
+ /** Creates an invalid unique key. It must be initialized using a Builder object before use. */
+ GrUniqueKey() : fTag(nullptr) {}
+
+ GrUniqueKey(const GrUniqueKey& that) { *this = that; }
+
+ /** reset() returns the key to the invalid state. */
+ using INHERITED::reset;
+
+ using INHERITED::isValid;
+
+ GrUniqueKey& operator=(const GrUniqueKey& that) {
+ this->INHERITED::operator=(that);
+ this->setCustomData(sk_ref_sp(that.getCustomData()));
+ fTag = that.fTag;
+ return *this;
+ }
+
+ bool operator==(const GrUniqueKey& that) const { return this->INHERITED::operator==(that); }
+ bool operator!=(const GrUniqueKey& that) const { return !(*this == that); }
+
+ void setCustomData(sk_sp<SkData> data) { fData = std::move(data); }
+ SkData* getCustomData() const { return fData.get(); }
+
+ const char* tag() const { return fTag; }
+
+#ifdef SK_DEBUG
+ void dump(const char* label) const {
+ SkDebugf("%s tag: %s\n", label, fTag ? fTag : "None");
+ this->INHERITED::dump();
+ }
+#endif
+
+ class Builder : public INHERITED::Builder {
+ public:
+ Builder(GrUniqueKey* key, Domain type, int data32Count, const char* tag = nullptr)
+ : INHERITED::Builder(key, type, data32Count) {
+ key->fTag = tag;
+ }
+
+ /** Used to build a key that wraps another key and adds additional data. */
+ Builder(GrUniqueKey* key, const GrUniqueKey& innerKey, Domain domain, int extraData32Cnt,
+ const char* tag = nullptr)
+ : INHERITED::Builder(key, domain, Data32CntForInnerKey(innerKey) + extraData32Cnt) {
+ SkASSERT(&innerKey != key);
+ // add the inner key to the end of the key so that op[] can be indexed normally.
+ uint32_t* innerKeyData = &this->operator[](extraData32Cnt);
+ const uint32_t* srcData = innerKey.data();
+ (*innerKeyData++) = innerKey.domain();
+ memcpy(innerKeyData, srcData, innerKey.dataSize());
+ key->fTag = tag;
+ }
+
+ private:
+ static int Data32CntForInnerKey(const GrUniqueKey& innerKey) {
+ // key data + domain
+ return SkToInt((innerKey.dataSize() >> 2) + 1);
+ }
+ };
+
+private:
+ sk_sp<SkData> fData;
+ const char* fTag;
+};
+
+/**
+ * It is common to need a frequently reused GrUniqueKey where the only requirement is that the key
+ * is unique. These macros create such a key in a thread safe manner so the key can be truly global
+ * and only constructed once.
+ */
+
+/** Place outside of function/class definitions. */
+#define GR_DECLARE_STATIC_UNIQUE_KEY(name) static SkOnce name##_once
+
+/** Place inside function where the key is used. */
+#define GR_DEFINE_STATIC_UNIQUE_KEY(name) \
+ static SkAlignedSTStorage<1, GrUniqueKey> name##_storage; \
+ name##_once(gr_init_static_unique_key_once, &name##_storage); \
+ static const GrUniqueKey& name = *reinterpret_cast<GrUniqueKey*>(name##_storage.get())
+
+static inline void gr_init_static_unique_key_once(SkAlignedSTStorage<1, GrUniqueKey>* keyStorage) {
+ GrUniqueKey* key = new (keyStorage->get()) GrUniqueKey;
+ GrUniqueKey::Builder builder(key, GrUniqueKey::GenerateDomain(), 0);
+}
+
+// The cache listens for these messages to purge junk resources proactively.
+class GrUniqueKeyInvalidatedMessage {
+public:
+ GrUniqueKeyInvalidatedMessage() = default;
+ GrUniqueKeyInvalidatedMessage(const GrUniqueKey& key, uint32_t contextUniqueID)
+ : fKey(key), fContextID(contextUniqueID) {
+ SkASSERT(SK_InvalidUniqueID != contextUniqueID);
+ }
+
+ GrUniqueKeyInvalidatedMessage(const GrUniqueKeyInvalidatedMessage&) = default;
+
+ GrUniqueKeyInvalidatedMessage& operator=(const GrUniqueKeyInvalidatedMessage&) = default;
+
+ const GrUniqueKey& key() const { return fKey; }
+ uint32_t contextID() const { return fContextID; }
+
+private:
+ GrUniqueKey fKey;
+ uint32_t fContextID = SK_InvalidUniqueID;
+};
+
+static inline bool SkShouldPostMessageToBus(const GrUniqueKeyInvalidatedMessage& msg,
+ uint32_t msgBusUniqueID) {
+ return msg.contextID() == msgBusUniqueID;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrSharedEnums.h b/gfx/skia/skia/include/private/GrSharedEnums.h
new file mode 100644
index 0000000000..d745b70bd4
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrSharedEnums.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSharedEnums_DEFINED
+#define GrSharedEnums_DEFINED
+
+/*************************************************************************************************/
+/* This file is used from both C++ and SkSL, so we need to stick to syntax compatible with both. */
+/*************************************************************************************************/
+
+/**
+ * We have coverage effects that clip rendering to the edge of some geometric primitive.
+ * This enum specifies how that clipping is performed. Not all factories that take a
+ * GrProcessorEdgeType will succeed with all values and it is up to the caller to check for
+ * a NULL return.
+ */
+enum class GrClipEdgeType {
+ kFillBW,
+ kFillAA,
+ kInverseFillBW,
+ kInverseFillAA,
+ kHairlineAA,
+
+ kLast = kHairlineAA
+};
+
+enum class PMConversion {
+ kToPremul = 0,
+ kToUnpremul = 1,
+ kPMConversionCnt = 2
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrSingleOwner.h b/gfx/skia/skia/include/private/GrSingleOwner.h
new file mode 100644
index 0000000000..6369ae82c8
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrSingleOwner.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSingleOwner_DEFINED
+#define GrSingleOwner_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_DEBUG
+#include "include/private/SkMutex.h"
+#include "include/private/SkThreadID.h"
+
+// This is a debug tool to verify an object is only being used from one thread at a time.
+class GrSingleOwner {
+public:
+ GrSingleOwner() : fOwner(kIllegalThreadID), fReentranceCount(0) {}
+
+ struct AutoEnforce {
+ AutoEnforce(GrSingleOwner* so) : fSO(so) { fSO->enter(); }
+ ~AutoEnforce() { fSO->exit(); }
+
+ GrSingleOwner* fSO;
+ };
+
+private:
+ void enter() {
+ SkAutoMutexExclusive lock(fMutex);
+ SkThreadID self = SkGetThreadID();
+ SkASSERT(fOwner == self || fOwner == kIllegalThreadID);
+ fReentranceCount++;
+ fOwner = self;
+ }
+
+ void exit() {
+ SkAutoMutexExclusive lock(fMutex);
+ SkASSERT(fOwner == SkGetThreadID());
+ fReentranceCount--;
+ if (fReentranceCount == 0) {
+ fOwner = kIllegalThreadID;
+ }
+ }
+
+ SkMutex fMutex;
+ SkThreadID fOwner SK_GUARDED_BY(fMutex);
+ int fReentranceCount SK_GUARDED_BY(fMutex);
+};
+#else
+class GrSingleOwner {}; // Provide a dummy implementation so we can pass pointers to constructors
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrTypesPriv.h b/gfx/skia/skia/include/private/GrTypesPriv.h
new file mode 100644
index 0000000000..90718038e0
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrTypesPriv.h
@@ -0,0 +1,1341 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypesPriv_DEFINED
+#define GrTypesPriv_DEFINED
+
+#include <chrono>
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrSharedEnums.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkWeakRefCnt.h"
+
+class GrBackendFormat;
+class GrCaps;
+
+#ifdef MOZ_SKIA
+#include "mozilla/TimeStamp.h"
+
+struct GrStdSteadyClock
+{
+ typedef mozilla::TimeStamp time_point;
+
+ static time_point now() {
+ return mozilla::TimeStamp::NowLoRes();
+ }
+};
+
+static inline GrStdSteadyClock::time_point
+operator-(GrStdSteadyClock::time_point t, std::chrono::milliseconds ms) {
+ return t - mozilla::TimeDuration::FromMilliseconds(ms.count());
+}
+
+#else
+
+// The old libstdc++ uses the draft name "monotonic_clock" rather than "steady_clock". This might
+// not actually be monotonic, depending on how libstdc++ was built. However, this is only currently
+// used for idle resource purging so it shouldn't cause a correctness problem.
+#if defined(__GLIBCXX__) && (__GLIBCXX__ < 20130000)
+using GrStdSteadyClock = std::chrono::monotonic_clock;
+#else
+using GrStdSteadyClock = std::chrono::steady_clock;
+#endif
+
+#endif
+
+/**
+ * Pixel configurations. This type conflates texture formats, CPU pixel formats, and
+ * premultipliedness. We are moving away from it towards SkColorType and backend API (GL, Vulkan)
+ * texture formats in the public API. Right now this mostly refers to texture formats as we're
+ * migrating.
+ */
+enum GrPixelConfig {
+ kUnknown_GrPixelConfig,
+ kAlpha_8_GrPixelConfig,
+ kAlpha_8_as_Alpha_GrPixelConfig,
+ kAlpha_8_as_Red_GrPixelConfig,
+ kGray_8_GrPixelConfig,
+ kGray_8_as_Lum_GrPixelConfig,
+ kGray_8_as_Red_GrPixelConfig,
+ kRGB_565_GrPixelConfig,
+ kRGBA_4444_GrPixelConfig,
+ kRGBA_8888_GrPixelConfig,
+ kRGB_888_GrPixelConfig,
+ kRGB_888X_GrPixelConfig,
+ kRG_88_GrPixelConfig,
+ kBGRA_8888_GrPixelConfig,
+ kSRGBA_8888_GrPixelConfig,
+ kRGBA_1010102_GrPixelConfig,
+ kAlpha_half_GrPixelConfig,
+ kAlpha_half_as_Lum_GrPixelConfig,
+ kAlpha_half_as_Red_GrPixelConfig,
+ kRGBA_half_GrPixelConfig,
+ kRGBA_half_Clamped_GrPixelConfig,
+ kRGB_ETC1_GrPixelConfig,
+ kAlpha_16_GrPixelConfig,
+ kRG_1616_GrPixelConfig,
+ kRGBA_16161616_GrPixelConfig,
+ kRG_half_GrPixelConfig,
+
+ kLast_GrPixelConfig = kRG_half_GrPixelConfig
+};
+static const int kGrPixelConfigCnt = kLast_GrPixelConfig + 1;
+
+// Aliases for pixel configs that match skia's byte order.
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+static const GrPixelConfig kSkia8888_GrPixelConfig = kBGRA_8888_GrPixelConfig;
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+static const GrPixelConfig kSkia8888_GrPixelConfig = kRGBA_8888_GrPixelConfig;
+#else
+static const GrPixelConfig kSkia8888_GrPixelConfig = kBGRA_8888_GrPixelConfig;
+#endif
+
+/**
+ * Geometric primitives used for drawing.
+ */
+enum class GrPrimitiveType {
+ kTriangles,
+ kTriangleStrip,
+ kPoints,
+ kLines, // 1 pix wide only
+ kLineStrip, // 1 pix wide only
+ kPath
+};
+static constexpr int kNumGrPrimitiveTypes = (int)GrPrimitiveType::kPath + 1;
+
+static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) {
+ return GrPrimitiveType::kLines == type || GrPrimitiveType::kLineStrip == type;
+}
+
+static constexpr bool GrIsPrimTypeTris(GrPrimitiveType type) {
+ return GrPrimitiveType::kTriangles == type || GrPrimitiveType::kTriangleStrip == type;
+}
+
+enum class GrPrimitiveRestart : bool {
+ kNo = false,
+ kYes = true
+};
+
+/**
+ * Formats for masks, used by the font cache. Important that these are 0-based.
+ */
+enum GrMaskFormat {
+ kA8_GrMaskFormat, //!< 1-byte per pixel
+ kA565_GrMaskFormat, //!< 2-bytes per pixel, RGB represent 3-channel LCD coverage
+ kARGB_GrMaskFormat, //!< 4-bytes per pixel, color format
+
+ kLast_GrMaskFormat = kARGB_GrMaskFormat
+};
+static const int kMaskFormatCount = kLast_GrMaskFormat + 1;
+
+/**
+ * Return the number of bytes-per-pixel for the specified mask format.
+ */
+static inline int GrMaskFormatBytesPerPixel(GrMaskFormat format) {
+ SkASSERT(format < kMaskFormatCount);
+ // kA8 (0) -> 1
+ // kA565 (1) -> 2
+ // kARGB (2) -> 4
+ static const int sBytesPerPixel[] = {1, 2, 4};
+ static_assert(SK_ARRAY_COUNT(sBytesPerPixel) == kMaskFormatCount, "array_size_mismatch");
+ static_assert(kA8_GrMaskFormat == 0, "enum_order_dependency");
+ static_assert(kA565_GrMaskFormat == 1, "enum_order_dependency");
+ static_assert(kARGB_GrMaskFormat == 2, "enum_order_dependency");
+
+ return sBytesPerPixel[(int)format];
+}
+
+/**
+ * Describes a surface to be created.
+ */
+struct GrSurfaceDesc {
+ GrSurfaceDesc() : fWidth(0), fHeight(0), fConfig(kUnknown_GrPixelConfig) {}
+
+ int fWidth; //!< Width of the texture
+ int fHeight; //!< Height of the texture
+
+ /**
+ * Format of source data of the texture. Not guaranteed to be the same as
+ * internal format used by 3D API.
+ */
+ GrPixelConfig fConfig;
+};
+
+/** Ownership rules for external GPU resources imported into Skia. */
+enum GrWrapOwnership {
+ /** Skia will assume the client will keep the resource alive and Skia will not free it. */
+ kBorrow_GrWrapOwnership,
+
+ /** Skia will assume ownership of the resource and free it. */
+ kAdopt_GrWrapOwnership,
+};
+
+enum class GrWrapCacheable : bool {
+ /**
+ * The wrapped resource will be removed from the cache as soon as it becomes purgeable. It may
+ * still be assigned and found by a unique key, but the presence of the key will not be used to
+ * keep the resource alive when it has no references.
+ */
+ kNo = false,
+ /**
+ * The wrapped resource is allowed to remain in the GrResourceCache when it has no references
+ * but has a unique key. Such resources should only be given unique keys when it is known that
+ * the key will eventually be removed from the resource or invalidated via the message bus.
+ */
+ kYes = true
+};
+
+enum class GrBudgetedType : uint8_t {
+ /** The resource is budgeted and is subject to purging under budget pressure. */
+ kBudgeted,
+ /**
+ * The resource is unbudgeted and is purged as soon as it has no refs regardless of whether
+ * it has a unique or scratch key.
+ */
+ kUnbudgetedUncacheable,
+ /**
+ * The resource is unbudgeted and is allowed to remain in the cache with no refs if it
+ * has a unique key. Scratch keys are ignored.
+ */
+ kUnbudgetedCacheable,
+};
+
+/**
+ * Clips are composed from these objects.
+ */
+enum GrClipType {
+ kRect_ClipType,
+ kPath_ClipType
+};
+
+enum class GrScissorTest : bool {
+ kDisabled = false,
+ kEnabled = true
+};
+
+struct GrMipLevel {
+ const void* fPixels = nullptr;
+ size_t fRowBytes = 0;
+};
+
+/**
+ * This enum is used to specify the load operation to be used when an GrOpsTask/GrOpsRenderPass
+ * begins execution.
+ */
+enum class GrLoadOp {
+ kLoad,
+ kClear,
+ kDiscard,
+};
+
+/**
+ * This enum is used to specify the store operation to be used when an GrOpsTask/GrOpsRenderPass
+ * ends execution.
+ */
+enum class GrStoreOp {
+ kStore,
+ kDiscard,
+};
+
+/**
+ * Used to control antialiasing in draw calls.
+ */
+enum class GrAA : bool {
+ kNo = false,
+ kYes = true
+};
+
+enum class GrFillRule : bool {
+ kNonzero,
+ kEvenOdd
+};
+
+inline GrFillRule GrFillRuleForSkPath(const SkPath& path) {
+ switch (path.getFillType()) {
+ case SkPath::kWinding_FillType:
+ case SkPath::kInverseWinding_FillType:
+ return GrFillRule::kNonzero;
+ case SkPath::kEvenOdd_FillType:
+ case SkPath::kInverseEvenOdd_FillType:
+ return GrFillRule::kEvenOdd;
+ }
+ SkUNREACHABLE;
+}
+
+/** This enum indicates the type of antialiasing to be performed. */
+enum class GrAAType : unsigned {
+ /** No antialiasing */
+ kNone,
+ /** Use fragment shader code or mixed samples to blend with a fractional pixel coverage. */
+ kCoverage,
+ /** Use normal MSAA. */
+ kMSAA
+};
+
+static constexpr bool GrAATypeIsHW(GrAAType type) {
+ switch (type) {
+ case GrAAType::kNone:
+ return false;
+ case GrAAType::kCoverage:
+ return false;
+ case GrAAType::kMSAA:
+ return true;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Some pixel configs are inherently clamped to [0,1], some are allowed to go outside that range,
+ * and some are FP but manually clamped in the XP.
+ */
+enum class GrClampType {
+ kAuto, // Normalized, fixed-point configs
+ kManual, // Clamped FP configs
+ kNone, // Normal (unclamped) FP configs
+};
+
+/**
+ * A number of rectangle/quadrilateral drawing APIs can control anti-aliasing on a per edge basis.
+ * These masks specify which edges are AA'ed. The intent for this is to support tiling with seamless
+ * boundaries, where the inner edges are non-AA and the outer edges are AA. Regular draws (where AA
+ * is specified by GrAA) is almost equivalent to kNone or kAll, with the exception of how MSAA is
+ * handled.
+ *
+ * When tiling and there is MSAA, mixed edge rectangles are processed with MSAA, so in order for the
+ * tiled edges to remain seamless, inner tiles with kNone must also be processed with MSAA. In
+ * regular drawing, however, kNone should disable MSAA (if it's supported) to match the expected
+ * appearance.
+ *
+ * Therefore, APIs that use per-edge AA flags also take a GrAA value so that they can differentiate
+ * between the regular and tiling use case behaviors. Tiling operations should always pass
+ * GrAA::kYes while regular options should pass GrAA based on the SkPaint's anti-alias state.
+ */
+enum class GrQuadAAFlags {
+ kLeft = SkCanvas::kLeft_QuadAAFlag,
+ kTop = SkCanvas::kTop_QuadAAFlag,
+ kRight = SkCanvas::kRight_QuadAAFlag,
+ kBottom = SkCanvas::kBottom_QuadAAFlag,
+
+ kNone = SkCanvas::kNone_QuadAAFlags,
+ kAll = SkCanvas::kAll_QuadAAFlags
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrQuadAAFlags)
+
+static inline GrQuadAAFlags SkToGrQuadAAFlags(unsigned flags) {
+ return static_cast<GrQuadAAFlags>(flags);
+}
+
+/**
+ * Types of shader-language-specific boxed variables we can create.
+ */
+enum GrSLType {
+ kVoid_GrSLType,
+ kBool_GrSLType,
+ kByte_GrSLType,
+ kByte2_GrSLType,
+ kByte3_GrSLType,
+ kByte4_GrSLType,
+ kUByte_GrSLType,
+ kUByte2_GrSLType,
+ kUByte3_GrSLType,
+ kUByte4_GrSLType,
+ kShort_GrSLType,
+ kShort2_GrSLType,
+ kShort3_GrSLType,
+ kShort4_GrSLType,
+ kUShort_GrSLType,
+ kUShort2_GrSLType,
+ kUShort3_GrSLType,
+ kUShort4_GrSLType,
+ kFloat_GrSLType,
+ kFloat2_GrSLType,
+ kFloat3_GrSLType,
+ kFloat4_GrSLType,
+ kFloat2x2_GrSLType,
+ kFloat3x3_GrSLType,
+ kFloat4x4_GrSLType,
+ kHalf_GrSLType,
+ kHalf2_GrSLType,
+ kHalf3_GrSLType,
+ kHalf4_GrSLType,
+ kHalf2x2_GrSLType,
+ kHalf3x3_GrSLType,
+ kHalf4x4_GrSLType,
+ kInt_GrSLType,
+ kInt2_GrSLType,
+ kInt3_GrSLType,
+ kInt4_GrSLType,
+ kUint_GrSLType,
+ kUint2_GrSLType,
+ kTexture2DSampler_GrSLType,
+ kTextureExternalSampler_GrSLType,
+ kTexture2DRectSampler_GrSLType,
+ kTexture2D_GrSLType,
+ kSampler_GrSLType,
+
+ kLast_GrSLType = kSampler_GrSLType
+};
+static const int kGrSLTypeCount = kLast_GrSLType + 1;
+
+/**
+ * The type of texture. Backends other than GL currently only use the 2D value but the type must
+ * still be known at the API-neutral layer as it used to determine whether MIP maps, renderability,
+ * and sampling parameters are legal for proxies that will be instantiated with wrapped textures.
+ */
+enum class GrTextureType {
+ kNone,
+ k2D,
+ /* Rectangle uses unnormalized texture coordinates. */
+ kRectangle,
+ kExternal
+};
+
+enum GrShaderType {
+ kVertex_GrShaderType,
+ kGeometry_GrShaderType,
+ kFragment_GrShaderType,
+
+ kLastkFragment_GrShaderType = kFragment_GrShaderType
+};
+static const int kGrShaderTypeCount = kLastkFragment_GrShaderType + 1;
+
+enum GrShaderFlags {
+ kNone_GrShaderFlags = 0,
+ kVertex_GrShaderFlag = 1 << kVertex_GrShaderType,
+ kGeometry_GrShaderFlag = 1 << kGeometry_GrShaderType,
+ kFragment_GrShaderFlag = 1 << kFragment_GrShaderType
+};
+GR_MAKE_BITFIELD_OPS(GrShaderFlags)
+
+/** Is the shading language type float (including vectors/matrices)? */
+static constexpr bool GrSLTypeIsFloatType(GrSLType type) {
+ switch (type) {
+ case kFloat_GrSLType:
+ case kFloat2_GrSLType:
+ case kFloat3_GrSLType:
+ case kFloat4_GrSLType:
+ case kFloat2x2_GrSLType:
+ case kFloat3x3_GrSLType:
+ case kFloat4x4_GrSLType:
+ case kHalf_GrSLType:
+ case kHalf2_GrSLType:
+ case kHalf3_GrSLType:
+ case kHalf4_GrSLType:
+ case kHalf2x2_GrSLType:
+ case kHalf3x3_GrSLType:
+ case kHalf4x4_GrSLType:
+ return true;
+
+ case kVoid_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kBool_GrSLType:
+ case kByte_GrSLType:
+ case kByte2_GrSLType:
+ case kByte3_GrSLType:
+ case kByte4_GrSLType:
+ case kUByte_GrSLType:
+ case kUByte2_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ case kShort_GrSLType:
+ case kShort2_GrSLType:
+ case kShort3_GrSLType:
+ case kShort4_GrSLType:
+ case kUShort_GrSLType:
+ case kUShort2_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ case kInt_GrSLType:
+ case kInt2_GrSLType:
+ case kInt3_GrSLType:
+ case kInt4_GrSLType:
+ case kUint_GrSLType:
+ case kUint2_GrSLType:
+ case kTexture2D_GrSLType:
+ case kSampler_GrSLType:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+/** If the type represents a single value or vector return the vector length, else -1. */
+static constexpr int GrSLTypeVecLength(GrSLType type) {
+ switch (type) {
+ case kFloat_GrSLType:
+ case kHalf_GrSLType:
+ case kBool_GrSLType:
+ case kByte_GrSLType:
+ case kUByte_GrSLType:
+ case kShort_GrSLType:
+ case kUShort_GrSLType:
+ case kInt_GrSLType:
+ case kUint_GrSLType:
+ return 1;
+
+ case kFloat2_GrSLType:
+ case kHalf2_GrSLType:
+ case kByte2_GrSLType:
+ case kUByte2_GrSLType:
+ case kShort2_GrSLType:
+ case kUShort2_GrSLType:
+ case kInt2_GrSLType:
+ case kUint2_GrSLType:
+ return 2;
+
+ case kFloat3_GrSLType:
+ case kHalf3_GrSLType:
+ case kByte3_GrSLType:
+ case kUByte3_GrSLType:
+ case kShort3_GrSLType:
+ case kUShort3_GrSLType:
+ case kInt3_GrSLType:
+ return 3;
+
+ case kFloat4_GrSLType:
+ case kHalf4_GrSLType:
+ case kByte4_GrSLType:
+ case kUByte4_GrSLType:
+ case kShort4_GrSLType:
+ case kUShort4_GrSLType:
+ case kInt4_GrSLType:
+ return 4;
+
+ case kFloat2x2_GrSLType:
+ case kFloat3x3_GrSLType:
+ case kFloat4x4_GrSLType:
+ case kHalf2x2_GrSLType:
+ case kHalf3x3_GrSLType:
+ case kHalf4x4_GrSLType:
+ case kVoid_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ case kSampler_GrSLType:
+ return -1;
+ }
+ SkUNREACHABLE;
+}
+
+static inline GrSLType GrSLCombinedSamplerTypeForTextureType(GrTextureType type) {
+ switch (type) {
+ case GrTextureType::k2D:
+ return kTexture2DSampler_GrSLType;
+ case GrTextureType::kRectangle:
+ return kTexture2DRectSampler_GrSLType;
+ case GrTextureType::kExternal:
+ return kTextureExternalSampler_GrSLType;
+ default:
+ SK_ABORT("Unexpected texture type");
+ }
+}
+
+/** Rectangle and external textures only support the clamp wrap mode and do not support
+ * MIP maps.
+ */
+static inline bool GrTextureTypeHasRestrictedSampling(GrTextureType type) {
+ switch (type) {
+ case GrTextureType::k2D:
+ return false;
+ case GrTextureType::kRectangle:
+ return true;
+ case GrTextureType::kExternal:
+ return true;
+ default:
+ SK_ABORT("Unexpected texture type");
+ }
+}
+
+static constexpr bool GrSLTypeIsCombinedSamplerType(GrSLType type) {
+ switch (type) {
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ return true;
+
+ case kVoid_GrSLType:
+ case kFloat_GrSLType:
+ case kFloat2_GrSLType:
+ case kFloat3_GrSLType:
+ case kFloat4_GrSLType:
+ case kFloat2x2_GrSLType:
+ case kFloat3x3_GrSLType:
+ case kFloat4x4_GrSLType:
+ case kHalf_GrSLType:
+ case kHalf2_GrSLType:
+ case kHalf3_GrSLType:
+ case kHalf4_GrSLType:
+ case kHalf2x2_GrSLType:
+ case kHalf3x3_GrSLType:
+ case kHalf4x4_GrSLType:
+ case kInt_GrSLType:
+ case kInt2_GrSLType:
+ case kInt3_GrSLType:
+ case kInt4_GrSLType:
+ case kUint_GrSLType:
+ case kUint2_GrSLType:
+ case kBool_GrSLType:
+ case kByte_GrSLType:
+ case kByte2_GrSLType:
+ case kByte3_GrSLType:
+ case kByte4_GrSLType:
+ case kUByte_GrSLType:
+ case kUByte2_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ case kShort_GrSLType:
+ case kShort2_GrSLType:
+ case kShort3_GrSLType:
+ case kShort4_GrSLType:
+ case kUShort_GrSLType:
+ case kUShort2_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ case kTexture2D_GrSLType:
+ case kSampler_GrSLType:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Types used to describe format of vertices in arrays.
+ */
+enum GrVertexAttribType {
+ kFloat_GrVertexAttribType = 0,
+ kFloat2_GrVertexAttribType,
+ kFloat3_GrVertexAttribType,
+ kFloat4_GrVertexAttribType,
+ kHalf_GrVertexAttribType,
+ kHalf2_GrVertexAttribType,
+ kHalf3_GrVertexAttribType,
+ kHalf4_GrVertexAttribType,
+
+ kInt2_GrVertexAttribType, // vector of 2 32-bit ints
+ kInt3_GrVertexAttribType, // vector of 3 32-bit ints
+ kInt4_GrVertexAttribType, // vector of 4 32-bit ints
+
+
+ kByte_GrVertexAttribType, // signed byte
+ kByte2_GrVertexAttribType, // vector of 2 8-bit signed bytes
+ kByte3_GrVertexAttribType, // vector of 3 8-bit signed bytes
+ kByte4_GrVertexAttribType, // vector of 4 8-bit signed bytes
+ kUByte_GrVertexAttribType, // unsigned byte
+ kUByte2_GrVertexAttribType, // vector of 2 8-bit unsigned bytes
+ kUByte3_GrVertexAttribType, // vector of 3 8-bit unsigned bytes
+ kUByte4_GrVertexAttribType, // vector of 4 8-bit unsigned bytes
+
+ kUByte_norm_GrVertexAttribType, // unsigned byte, e.g. coverage, 0 -> 0.0f, 255 -> 1.0f.
+ kUByte4_norm_GrVertexAttribType, // vector of 4 unsigned bytes, e.g. colors, 0 -> 0.0f,
+ // 255 -> 1.0f.
+
+ kShort2_GrVertexAttribType, // vector of 2 16-bit shorts.
+ kShort4_GrVertexAttribType, // vector of 4 16-bit shorts.
+
+ kUShort2_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0, 65535 -> 65535.
+ kUShort2_norm_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kInt_GrVertexAttribType,
+ kUint_GrVertexAttribType,
+
+ kUShort_norm_GrVertexAttribType,
+
+ kUShort4_norm_GrVertexAttribType, // vector of 4 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kLast_GrVertexAttribType = kUShort4_norm_GrVertexAttribType
+};
+static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1;
+
+//////////////////////////////////////////////////////////////////////////////
+
+static const int kGrClipEdgeTypeCnt = (int) GrClipEdgeType::kLast + 1;
+
+static constexpr bool GrProcessorEdgeTypeIsFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillAA == edgeType || GrClipEdgeType::kFillBW == edgeType);
+}
+
+static constexpr bool GrProcessorEdgeTypeIsInverseFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kInverseFillAA == edgeType ||
+ GrClipEdgeType::kInverseFillBW == edgeType);
+}
+
+static constexpr bool GrProcessorEdgeTypeIsAA(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillBW != edgeType &&
+ GrClipEdgeType::kInverseFillBW != edgeType);
+}
+
+static inline GrClipEdgeType GrInvertProcessorEdgeType(const GrClipEdgeType edgeType) {
+ switch (edgeType) {
+ case GrClipEdgeType::kFillBW:
+ return GrClipEdgeType::kInverseFillBW;
+ case GrClipEdgeType::kFillAA:
+ return GrClipEdgeType::kInverseFillAA;
+ case GrClipEdgeType::kInverseFillBW:
+ return GrClipEdgeType::kFillBW;
+ case GrClipEdgeType::kInverseFillAA:
+ return GrClipEdgeType::kFillAA;
+ case GrClipEdgeType::kHairlineAA:
+ SK_ABORT("Hairline fill isn't invertible.");
+ }
+ return GrClipEdgeType::kFillAA; // suppress warning.
+}
+
+/**
+ * Indicates the type of pending IO operations that can be recorded for gpu resources.
+ */
+enum GrIOType {
+ kRead_GrIOType,
+ kWrite_GrIOType,
+ kRW_GrIOType
+};
+
+/**
+ * Indicates the type of data that a GPU buffer will be used for.
+ */
+enum class GrGpuBufferType {
+ kVertex,
+ kIndex,
+ kXferCpuToGpu,
+ kXferGpuToCpu,
+};
+static const int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kXferGpuToCpu) + 1;
+
+/**
+ * Provides a performance hint regarding the frequency at which a data store will be accessed.
+ */
+enum GrAccessPattern {
+ /** Data store will be respecified repeatedly and used many times. */
+ kDynamic_GrAccessPattern,
+ /** Data store will be specified once and used many times. (Thus disqualified from caching.) */
+ kStatic_GrAccessPattern,
+ /** Data store will be specified once and used at most a few times. (Also can't be cached.) */
+ kStream_GrAccessPattern,
+
+ kLast_GrAccessPattern = kStream_GrAccessPattern
+};
+
+// Flags shared between the GrSurface & GrSurfaceProxy class hierarchies
+enum class GrInternalSurfaceFlags {
+ kNone = 0,
+
+ // Texture-level
+
+ // Means the pixels in the texture are read-only. Cannot also be a GrRenderTarget[Proxy].
+ kReadOnly = 1 << 0,
+
+ // RT-level
+
+ // This flag is for use with GL only. It tells us that the internal render target wraps FBO 0.
+ kGLRTFBOIDIs0 = 1 << 1,
+
+ // This means the render target is multisampled, and internally holds a non-msaa texture for
+ // resolving into. The render target resolves itself by blitting into this internal texture.
+ // (asTexture() might or might not return the internal texture, but if it does, we always
+ // resolve the render target before accessing this texture's data.)
+ kRequiresManualMSAAResolve = 1 << 2,
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrInternalSurfaceFlags)
+
+// 'GR_MAKE_BITFIELD_CLASS_OPS' defines the & operator on GrInternalSurfaceFlags to return bool.
+// We want to find the bitwise & with these masks, so we declare them as ints.
+constexpr static int kGrInternalTextureFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kReadOnly);
+
+constexpr static int kGrInternalRenderTargetFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kGLRTFBOIDIs0 | GrInternalSurfaceFlags::kRequiresManualMSAAResolve);
+
+constexpr static int kGrInternalTextureRenderTargetFlagsMask =
+ kGrInternalTextureFlagsMask | kGrInternalRenderTargetFlagsMask;
+
+#ifdef SK_DEBUG
+// Takes a pointer to a GrCaps, and will suppress prints if required
+#define GrCapsDebugf(caps, ...) if (!(caps)->suppressPrints()) SkDebugf(__VA_ARGS__)
+#else
+#define GrCapsDebugf(caps, ...) do {} while (0)
+#endif
+
+/**
+ * Specifies if the holder owns the backend, OpenGL or Vulkan, object.
+ */
+enum class GrBackendObjectOwnership : bool {
+ /** Holder does not destroy the backend object. */
+ kBorrowed = false,
+ /** Holder destroys the backend object. */
+ kOwned = true
+};
+
+template <typename T>
+T* const* unique_ptr_address_as_pointer_address(std::unique_ptr<T> const* up) {
+ static_assert(sizeof(T*) == sizeof(std::unique_ptr<T>), "unique_ptr not expected size.");
+ return reinterpret_cast<T* const*>(up);
+}
+
+/*
+ * Object for CPU-GPU synchronization
+ */
+typedef uint64_t GrFence;
+
+/**
+ * Used to include or exclude specific GPU path renderers for testing purposes.
+ */
+enum class GpuPathRenderers {
+ kNone = 0, // Always use software masks and/or GrDefaultPathRenderer.
+ kDashLine = 1 << 0,
+ kStencilAndCover = 1 << 1,
+ kCoverageCounting = 1 << 2,
+ kAAHairline = 1 << 3,
+ kAAConvex = 1 << 4,
+ kAALinearizing = 1 << 5,
+ kSmall = 1 << 6,
+ kTessellating = 1 << 7,
+
+ kAll = (kTessellating | (kTessellating - 1)),
+ kDefault = kAll & ~kCoverageCounting
+
+};
+
+/**
+ * Used to describe the current state of Mips on a GrTexture
+ */
+enum class GrMipMapsStatus {
+ kNotAllocated, // Mips have not been allocated
+ kDirty, // Mips are allocated but the full mip tree does not have valid data
+ kValid, // All levels fully allocated and have valid data in them
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GpuPathRenderers)
+
+/**
+ * Utility functions for GrPixelConfig
+ */
+
+static constexpr GrPixelConfig GrCompressionTypePixelConfig(SkImage::CompressionType compression) {
+ switch (compression) {
+ case SkImage::kETC1_CompressionType: return kRGB_ETC1_GrPixelConfig;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Returns true if the pixel config is a GPU-specific compressed format
+ * representation.
+ */
+static constexpr bool GrPixelConfigIsCompressed(GrPixelConfig config) {
+ switch (config) {
+ case kRGB_ETC1_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Returns the data size for the given SkImage::CompressionType
+ */
+static inline size_t GrCompressedFormatDataSize(SkImage::CompressionType compressionType,
+ int width, int height) {
+ switch (compressionType) {
+ case SkImage::kETC1_CompressionType:
+ SkASSERT((width & 3) == 0);
+ SkASSERT((height & 3) == 0);
+ return (width >> 2) * (height >> 2) * 8;
+ }
+
+ SK_ABORT("Invalid pixel config");
+}
+
+/**
+ * Like SkColorType this describes a layout of pixel data in CPU memory. It specifies the channels,
+ * their type, and width. This exists so that the GPU backend can have private types that have no
+ * analog in the public facing SkColorType enum and omit types not implemented in the GPU backend.
+ * It does not refer to a texture format and the mapping to texture formats may be many-to-many.
+ * It does not specify the sRGB encoding of the stored values. The components are listed in order of
+ * where they appear in memory. In other words the first component listed is in the low bits and
+ * the last component in the high bits.
+ */
+enum class GrColorType {
+ kUnknown,
+ kAlpha_8,
+ kBGR_565,
+ kABGR_4444, // This name differs from SkColorType. kARGB_4444_SkColorType is misnamed.
+ kRGBA_8888,
+ kRGBA_8888_SRGB,
+ kRGB_888x,
+ kRG_88,
+ kBGRA_8888,
+ kRGBA_1010102,
+ kGray_8,
+ kAlpha_F16,
+ kRGBA_F16,
+ kRGBA_F16_Clamped,
+ kRGBA_F32,
+
+ kAlpha_16,
+ kRG_1616,
+ kRG_F16,
+ kRGBA_16161616,
+
+ // Unusual formats that come up after reading back in cases where we are reassigning the meaning
+ // of a texture format's channels to use for a particular color format but have to read back the
+ // data to a full RGBA quadruple. (e.g. using a R8 texture format as A8 color type but the API
+ // only supports reading to RGBA8.) None of these have SkColorType equivalents.
+ kAlpha_8xxx,
+ kAlpha_F32xxx,
+ kGray_8xxx,
+
+ kLast = kGray_8xxx
+};
+
+static const int kGrColorTypeCnt = static_cast<int>(GrColorType::kLast) + 1;
+
+static constexpr SkColorType GrColorTypeToSkColorType(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_8: return kAlpha_8_SkColorType;
+ case GrColorType::kBGR_565: return kRGB_565_SkColorType;
+ case GrColorType::kABGR_4444: return kARGB_4444_SkColorType;
+ case GrColorType::kRGBA_8888: return kRGBA_8888_SkColorType;
+ // Once we add kRGBA_8888_SRGB_SkColorType we should return that here.
+ case GrColorType::kRGBA_8888_SRGB: return kRGBA_8888_SkColorType;
+ case GrColorType::kRGB_888x: return kRGB_888x_SkColorType;
+ case GrColorType::kRG_88: return kR8G8_unorm_SkColorType;
+ case GrColorType::kBGRA_8888: return kBGRA_8888_SkColorType;
+ case GrColorType::kRGBA_1010102: return kRGBA_1010102_SkColorType;
+ case GrColorType::kGray_8: return kGray_8_SkColorType;
+ case GrColorType::kAlpha_F16: return kA16_float_SkColorType;
+ case GrColorType::kRGBA_F16: return kRGBA_F16_SkColorType;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_F16Norm_SkColorType;
+ case GrColorType::kRGBA_F32: return kRGBA_F32_SkColorType;
+ case GrColorType::kAlpha_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_F32xxx: return kUnknown_SkColorType;
+ case GrColorType::kGray_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_16: return kA16_unorm_SkColorType;
+ case GrColorType::kRG_1616: return kR16G16_unorm_SkColorType;
+ case GrColorType::kRGBA_16161616: return kR16G16B16A16_unorm_SkColorType;
+ case GrColorType::kRG_F16: return kR16G16_float_SkColorType;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrColorType SkColorTypeToGrColorType(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return GrColorType::kUnknown;
+ case kAlpha_8_SkColorType: return GrColorType::kAlpha_8;
+ case kRGB_565_SkColorType: return GrColorType::kBGR_565;
+ case kARGB_4444_SkColorType: return GrColorType::kABGR_4444;
+ case kRGBA_8888_SkColorType: return GrColorType::kRGBA_8888;
+ case kRGB_888x_SkColorType: return GrColorType::kRGB_888x;
+ case kBGRA_8888_SkColorType: return GrColorType::kBGRA_8888;
+ case kGray_8_SkColorType: return GrColorType::kGray_8;
+ case kRGBA_F16Norm_SkColorType: return GrColorType::kRGBA_F16_Clamped;
+ case kRGBA_F16_SkColorType: return GrColorType::kRGBA_F16;
+ case kRGBA_1010102_SkColorType: return GrColorType::kRGBA_1010102;
+ case kRGB_101010x_SkColorType: return GrColorType::kUnknown;
+ case kRGBA_F32_SkColorType: return GrColorType::kRGBA_F32;
+ case kR8G8_unorm_SkColorType: return GrColorType::kRG_88;
+ case kA16_unorm_SkColorType: return GrColorType::kAlpha_16;
+ case kR16G16_unorm_SkColorType: return GrColorType::kRG_1616;
+ case kA16_float_SkColorType: return GrColorType::kAlpha_F16;
+ case kR16G16_float_SkColorType: return GrColorType::kRG_F16;
+ case kR16G16B16A16_unorm_SkColorType: return GrColorType::kRGBA_16161616;
+ }
+ SkUNREACHABLE;
+}
+
+// This is a temporary means of mapping an SkColorType and format to a
+// GrColorType::kRGBA_8888_SRGB. Once we have an SRGB SkColorType this can go away.
+GrColorType SkColorTypeAndFormatToGrColorType(const GrCaps* caps,
+ SkColorType skCT,
+ const GrBackendFormat& format);
+
+static constexpr uint32_t GrColorTypeComponentFlags(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return kAlpha_SkColorTypeComponentFlag;
+ case GrColorType::kBGR_565: return kRGB_SkColorTypeComponentFlags;
+ case GrColorType::kABGR_4444: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGBA_8888: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGBA_8888_SRGB: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGB_888x: return kRGB_SkColorTypeComponentFlags;
+ case GrColorType::kRG_88: return kRed_SkColorTypeComponentFlag |
+ kGreen_SkColorTypeComponentFlag;
+ case GrColorType::kBGRA_8888: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGBA_1010102: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kGray_8: return kGray_SkColorTypeComponentFlag;
+ case GrColorType::kAlpha_F16: return kAlpha_SkColorTypeComponentFlag;
+ case GrColorType::kRGBA_F16: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRGBA_F32: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kAlpha_8xxx: return kAlpha_SkColorTypeComponentFlag;
+ case GrColorType::kAlpha_F32xxx: return kAlpha_SkColorTypeComponentFlag;
+ case GrColorType::kGray_8xxx: return kGray_SkColorTypeComponentFlag;
+ case GrColorType::kAlpha_16: return kAlpha_SkColorTypeComponentFlag;
+ case GrColorType::kRG_1616: return kRed_SkColorTypeComponentFlag |
+ kGreen_SkColorTypeComponentFlag;
+ case GrColorType::kRGBA_16161616: return kRGBA_SkColorTypeComponentFlags;
+ case GrColorType::kRG_F16: return kRed_SkColorTypeComponentFlag |
+ kGreen_SkColorTypeComponentFlag;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Describes the encoding of channel data in a GrColorType.
+ */
+enum class GrColorTypeEncoding {
+ kUnorm,
+ kSRGBUnorm,
+ // kSnorm,
+ kFloat,
+ // kSint
+ // kUint
+};
+
+/**
+ * Describes a GrColorType by how many bits are used for each color component and how they are
+ * encoded. Currently all the non-zero channels share a single GrColorTypeEncoding. This could be
+ * expanded to store separate encodings and to indicate which bits belong to which components.
+ */
+struct GrColorTypeDesc {
+public:
+ static constexpr GrColorTypeDesc MakeRGBA(int rgba, GrColorTypeEncoding e) {
+ return {rgba, rgba, rgba, rgba, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeRGBA(int rgb, int a, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, a, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeRGB(int rgb, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, 0, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeRGB(int r, int g, int b, GrColorTypeEncoding e) {
+ return {r, g, b, 0, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeAlpha(int a, GrColorTypeEncoding e) {
+ return {0, 0, 0, a, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeR(int r, GrColorTypeEncoding e) {
+ return {r, 0, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeRG(int rg, GrColorTypeEncoding e) {
+ return {rg, rg, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeGray(int grayBits, GrColorTypeEncoding e) {
+ return {0, 0, 0, 0, grayBits, e};
+ }
+
+ static constexpr GrColorTypeDesc MakeInvalid() { return {}; }
+
+ constexpr int r() const { return fRBits; }
+ constexpr int g() const { return fGBits; }
+ constexpr int b() const { return fBBits; }
+ constexpr int a() const { return fABits; }
+ constexpr int operator[](int c) const {
+ switch (c) {
+ case 0: return this->r();
+ case 1: return this->g();
+ case 2: return this->b();
+ case 3: return this->a();
+ }
+ SkUNREACHABLE;
+ }
+
+ constexpr int gray() const { return fGrayBits; }
+
+ constexpr GrColorTypeEncoding encoding() const { return fEncoding; }
+
+private:
+ int fRBits = 0;
+ int fGBits = 0;
+ int fBBits = 0;
+ int fABits = 0;
+ int fGrayBits = 0;
+ GrColorTypeEncoding fEncoding = GrColorTypeEncoding::kUnorm;
+
+ constexpr GrColorTypeDesc() = default;
+
+ constexpr GrColorTypeDesc(int r, int g, int b, int a, int gray, GrColorTypeEncoding encoding)
+ : fRBits(r), fGBits(g), fBBits(b), fABits(a), fGrayBits(gray), fEncoding(encoding) {
+ SkASSERT(r >= 0 && g >= 0 && b >= 0 && a >= 0 && gray >= 0);
+ SkASSERT(!gray || (!r && !g && !b));
+ SkASSERT(r || g || b || a || gray);
+ }
+};
+
+static constexpr GrColorTypeDesc GrGetColorTypeDesc(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown:
+ return GrColorTypeDesc::MakeInvalid();
+ case GrColorType::kAlpha_8:
+ return GrColorTypeDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGR_565:
+ return GrColorTypeDesc::MakeRGB(5, 6, 5, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kABGR_4444:
+ return GrColorTypeDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888:
+ return GrColorTypeDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888_SRGB:
+ return GrColorTypeDesc::MakeRGBA(8, GrColorTypeEncoding::kSRGBUnorm);
+ case GrColorType::kRGB_888x:
+ return GrColorTypeDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_88:
+ return GrColorTypeDesc::MakeRG(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_8888:
+ return GrColorTypeDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_1010102:
+ return GrColorTypeDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kGray_8:
+ return GrColorTypeDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F16:
+ return GrColorTypeDesc::MakeAlpha(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16:
+ return GrColorTypeDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16_Clamped:
+ return GrColorTypeDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F32:
+ return GrColorTypeDesc::MakeRGBA(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kAlpha_8xxx:
+ return GrColorTypeDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F32xxx:
+ return GrColorTypeDesc::MakeAlpha(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kGray_8xxx:
+ return GrColorTypeDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_16:
+ return GrColorTypeDesc::MakeAlpha(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_1616:
+ return GrColorTypeDesc::MakeRG(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_16161616:
+ return GrColorTypeDesc::MakeRGBA(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_F16:
+ return GrColorTypeDesc::MakeRG(16, GrColorTypeEncoding::kFloat);
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrClampType GrColorTypeClampType(GrColorType colorType) {
+ if (GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kUnorm ||
+ GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kSRGBUnorm) {
+ return GrClampType::kAuto;
+ }
+ return GrColorType::kRGBA_F16_Clamped == colorType ? GrClampType::kManual : GrClampType::kNone;
+}
+
+// Consider a color type "wider" than n if it has more than n bits for any its representable
+// channels.
+static constexpr bool GrColorTypeIsWiderThan(GrColorType colorType, int n) {
+ SkASSERT(n > 0);
+ auto desc = GrGetColorTypeDesc(colorType);
+ return (desc.r() && desc.r() > n )||
+ (desc.g() && desc.g() > n) ||
+ (desc.b() && desc.b() > n) ||
+ (desc.a() && desc.a() > n) ||
+ (desc.gray() && desc.gray() > n);
+}
+
+static constexpr bool GrColorTypeIsAlphaOnly(GrColorType ct) {
+ return kAlpha_SkColorTypeComponentFlag == GrColorTypeComponentFlags(ct);
+}
+
+static constexpr bool GrColorTypeHasAlpha(GrColorType ct) {
+ return kAlpha_SkColorTypeComponentFlag & GrColorTypeComponentFlags(ct);
+}
+
+static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return 1;
+ case GrColorType::kBGR_565: return 2;
+ case GrColorType::kABGR_4444: return 2;
+ case GrColorType::kRGBA_8888: return 4;
+ case GrColorType::kRGBA_8888_SRGB: return 4;
+ case GrColorType::kRGB_888x: return 4;
+ case GrColorType::kRG_88: return 2;
+ case GrColorType::kBGRA_8888: return 4;
+ case GrColorType::kRGBA_1010102: return 4;
+ case GrColorType::kGray_8: return 1;
+ case GrColorType::kAlpha_F16: return 2;
+ case GrColorType::kRGBA_F16: return 8;
+ case GrColorType::kRGBA_F16_Clamped: return 8;
+ case GrColorType::kRGBA_F32: return 16;
+ case GrColorType::kAlpha_8xxx: return 4;
+ case GrColorType::kAlpha_F32xxx: return 16;
+ case GrColorType::kGray_8xxx: return 4;
+ case GrColorType::kAlpha_16: return 2;
+ case GrColorType::kRG_1616: return 4;
+ case GrColorType::kRGBA_16161616: return 8;
+ case GrColorType::kRG_F16: return 4;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrColorType GrPixelConfigToColorType(GrPixelConfig config) {
+ switch (config) {
+ case kUnknown_GrPixelConfig:
+ return GrColorType::kUnknown;
+ case kAlpha_8_GrPixelConfig:
+ return GrColorType::kAlpha_8;
+ case kGray_8_GrPixelConfig:
+ return GrColorType::kGray_8;
+ case kRGB_565_GrPixelConfig:
+ return GrColorType::kBGR_565;
+ case kRGBA_4444_GrPixelConfig:
+ return GrColorType::kABGR_4444;
+ case kRGBA_8888_GrPixelConfig:
+ return GrColorType::kRGBA_8888;
+ case kRGB_888_GrPixelConfig:
+ return GrColorType::kRGB_888x;
+ case kRGB_888X_GrPixelConfig:
+ return GrColorType::kRGB_888x;
+ case kRG_88_GrPixelConfig:
+ return GrColorType::kRG_88;
+ case kBGRA_8888_GrPixelConfig:
+ return GrColorType::kBGRA_8888;
+ case kSRGBA_8888_GrPixelConfig:
+ return GrColorType::kRGBA_8888_SRGB;
+ case kRGBA_1010102_GrPixelConfig:
+ return GrColorType::kRGBA_1010102;
+ case kAlpha_half_GrPixelConfig:
+ return GrColorType::kAlpha_F16;
+ case kRGBA_half_GrPixelConfig:
+ return GrColorType::kRGBA_F16;
+ case kRGBA_half_Clamped_GrPixelConfig:
+ return GrColorType::kRGBA_F16_Clamped;
+ case kRGB_ETC1_GrPixelConfig:
+ // We may need a roughly equivalent color type for a compressed texture. This should be
+ // the logical format for decompressing the data into.
+ return GrColorType::kRGB_888x;
+ case kAlpha_8_as_Alpha_GrPixelConfig:
+ return GrColorType::kAlpha_8;
+ case kAlpha_8_as_Red_GrPixelConfig:
+ return GrColorType::kAlpha_8;
+ case kAlpha_half_as_Lum_GrPixelConfig: // fall through
+ case kAlpha_half_as_Red_GrPixelConfig:
+ return GrColorType::kAlpha_F16;
+ case kGray_8_as_Lum_GrPixelConfig:
+ return GrColorType::kGray_8;
+ case kGray_8_as_Red_GrPixelConfig:
+ return GrColorType::kGray_8;
+ case kAlpha_16_GrPixelConfig:
+ return GrColorType::kAlpha_16;
+ case kRG_1616_GrPixelConfig:
+ return GrColorType::kRG_1616;
+ case kRGBA_16161616_GrPixelConfig:
+ return GrColorType::kRGBA_16161616;
+ case kRG_half_GrPixelConfig:
+ return GrColorType::kRG_F16;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrPixelConfig GrColorTypeToPixelConfig(GrColorType colorType) {
+ switch (colorType) {
+ case GrColorType::kUnknown: return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_8: return kAlpha_8_GrPixelConfig;
+ case GrColorType::kGray_8: return kGray_8_GrPixelConfig;
+ case GrColorType::kBGR_565: return kRGB_565_GrPixelConfig;
+ case GrColorType::kABGR_4444: return kRGBA_4444_GrPixelConfig;
+ case GrColorType::kRGBA_8888: return kRGBA_8888_GrPixelConfig;
+ case GrColorType::kRGBA_8888_SRGB: return kSRGBA_8888_GrPixelConfig;
+ case GrColorType::kRGB_888x: return kRGB_888_GrPixelConfig;
+ case GrColorType::kRG_88: return kRG_88_GrPixelConfig;
+ case GrColorType::kBGRA_8888: return kBGRA_8888_GrPixelConfig;
+ case GrColorType::kRGBA_1010102: return kRGBA_1010102_GrPixelConfig;
+ case GrColorType::kRGBA_F32: return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_F16: return kAlpha_half_GrPixelConfig;
+ case GrColorType::kRGBA_F16: return kRGBA_half_GrPixelConfig;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_half_Clamped_GrPixelConfig;
+ case GrColorType::kAlpha_8xxx: return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_F32xxx: return kUnknown_GrPixelConfig;
+ case GrColorType::kGray_8xxx: return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_16: return kAlpha_16_GrPixelConfig;
+ case GrColorType::kRG_1616: return kRG_1616_GrPixelConfig;
+ case GrColorType::kRGBA_16161616: return kRGBA_16161616_GrPixelConfig;
+ case GrColorType::kRG_F16: return kRG_half_GrPixelConfig;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Ref-counted object that calls a callback from its destructor.
+ */
+class GrRefCntedCallback : public SkRefCnt {
+public:
+ using Context = void*;
+ using Callback = void (*)(Context);
+
+ GrRefCntedCallback(Callback proc, Context ctx) : fReleaseProc(proc), fReleaseCtx(ctx) {
+ SkASSERT(proc);
+ }
+ ~GrRefCntedCallback() override { fReleaseProc ? fReleaseProc(fReleaseCtx) : void(); }
+
+ Context context() const { return fReleaseCtx; }
+
+private:
+ Callback fReleaseProc;
+ Context fReleaseCtx;
+};
+
+#if GR_TEST_UTILS || defined(SK_ENABLE_DUMP_GPU)
+static constexpr const char* GrBackendApiToStr(GrBackendApi api) {
+ switch (api) {
+ case GrBackendApi::kMetal: return "Metal";
+ case GrBackendApi::kDawn: return "Dawn";
+ case GrBackendApi::kOpenGL: return "OpenGL";
+ case GrBackendApi::kVulkan: return "Vulkan";
+ case GrBackendApi::kMock: return "Mock";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrColorTypeToStr(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return "kUnknown";
+ case GrColorType::kAlpha_8: return "kAlpha_8";
+ case GrColorType::kBGR_565: return "kRGB_565";
+ case GrColorType::kABGR_4444: return "kABGR_4444";
+ case GrColorType::kRGBA_8888: return "kRGBA_8888";
+ case GrColorType::kRGBA_8888_SRGB: return "kRGBA_8888_SRGB";
+ case GrColorType::kRGB_888x: return "kRGB_888x";
+ case GrColorType::kRG_88: return "kRG_88";
+ case GrColorType::kBGRA_8888: return "kBGRA_8888";
+ case GrColorType::kRGBA_1010102: return "kRGBA_1010102";
+ case GrColorType::kGray_8: return "kGray_8";
+ case GrColorType::kAlpha_F16: return "kAlpha_F16";
+ case GrColorType::kRGBA_F16: return "kRGBA_F16";
+ case GrColorType::kRGBA_F16_Clamped: return "kRGBA_F16_Clamped";
+ case GrColorType::kRGBA_F32: return "kRGBA_F32";
+ case GrColorType::kAlpha_8xxx: return "kAlpha_8xxx";
+ case GrColorType::kAlpha_F32xxx: return "kAlpha_F32xxx";
+ case GrColorType::kGray_8xxx: return "kGray_8xxx";
+ case GrColorType::kAlpha_16: return "kAlpha_16";
+ case GrColorType::kRG_1616: return "kRG_1616";
+ case GrColorType::kRGBA_16161616: return "kRGBA_16161616";
+ case GrColorType::kRG_F16: return "kRG_F16";
+ }
+ SkUNREACHABLE;
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrVkTypesPriv.h b/gfx/skia/skia/include/private/GrVkTypesPriv.h
new file mode 100644
index 0000000000..49b392c52b
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrVkTypesPriv.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypesPriv_DEFINED
+#define GrVkTypesPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+class GrVkImageLayout;
+
+// This struct is to used to store the the actual information about the vulkan backend image on the
+// GrBackendTexture and GrBackendRenderTarget. When a client calls getVkImageInfo on a
+// GrBackendTexture/RenderTarget, we use the GrVkBackendSurfaceInfo to create a snapshot
+// GrVkImgeInfo object. Internally, this uses a ref count GrVkImageLayout object to track the
+// current VkImageLayout which can be shared with an internal GrVkImage so that layout updates can
+// be seen by all users of the image.
+struct GrVkBackendSurfaceInfo {
+ GrVkBackendSurfaceInfo(GrVkImageInfo info, GrVkImageLayout* layout)
+ : fImageInfo(info), fLayout(layout) {}
+
+ void cleanup();
+
+ GrVkBackendSurfaceInfo& operator=(const GrVkBackendSurfaceInfo&) = delete;
+
+ // Assigns the passed in GrVkBackendSurfaceInfo to this object. if isValid is true we will also
+ // attempt to unref the old fLayout on this object.
+ void assign(const GrVkBackendSurfaceInfo&, bool isValid);
+
+ void setImageLayout(VkImageLayout layout);
+
+ sk_sp<GrVkImageLayout> getGrVkImageLayout() const;
+
+ GrVkImageInfo snapImageInfo() const;
+
+ bool isProtected() const { return fImageInfo.fProtected == GrProtected::kYes; }
+#if GR_TEST_UTILS
+ bool operator==(const GrVkBackendSurfaceInfo& that) const;
+#endif
+
+private:
+ GrVkImageInfo fImageInfo;
+ GrVkImageLayout* fLayout;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkBitmaskEnum.h b/gfx/skia/skia/include/private/SkBitmaskEnum.h
new file mode 100644
index 0000000000..71022b2ce5
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkBitmaskEnum.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEnumOperators_DEFINED
+#define SkEnumOperators_DEFINED
+
+#include <type_traits>
+
+namespace skstd {
+template <typename T> struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, bool>::type constexpr Any(E e) {
+ return static_cast<typename std::underlying_type<E>::type>(e) != 0;
+}
+} // namespace skstd
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, E>::type constexpr operator|(E l, E r) {
+ using U = typename std::underlying_type<E>::type;
+ return static_cast<E>(static_cast<U>(l) | static_cast<U>(r));
+}
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, E&>::type constexpr operator|=(E& l, E r) {
+ return l = l | r;
+}
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, E>::type constexpr operator&(E l, E r) {
+ using U = typename std::underlying_type<E>::type;
+ return static_cast<E>(static_cast<U>(l) & static_cast<U>(r));
+}
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, E&>::type constexpr operator&=(E& l, E r) {
+ return l = l & r;
+}
+
+template <typename E>
+typename std::enable_if<skstd::is_bitmask_enum<E>::value, E>::type constexpr operator~(E e) {
+ return static_cast<E>(~static_cast<typename std::underlying_type<E>::type>(e));
+}
+
+#endif // SkEnumOperators_DEFINED
diff --git a/gfx/skia/skia/include/private/SkChecksum.h b/gfx/skia/skia/include/private/SkChecksum.h
new file mode 100644
index 0000000000..553bfcdfab
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkChecksum.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_DEFINED
+#define SkChecksum_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTLogic.h"
+
+// #include "src/core/SkOpts.h"
+// It's sort of pesky to be able to include SkOpts.h here, so we'll just re-declare what we need.
+namespace SkOpts {
+ extern uint32_t (*hash_fn)(const void*, size_t, uint32_t);
+}
+
+class SkChecksum : SkNoncopyable {
+public:
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This is the Murmur3 finalizer.
+ */
+ static uint32_t Mix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+ return hash;
+ }
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache.
+ */
+ static uint32_t CheapMix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 16;
+ return hash;
+ }
+};
+
+// SkGoodHash should usually be your first choice in hashing data.
+// It should be both reasonably fast and high quality.
+struct SkGoodHash {
+ template <typename K>
+ SK_WHEN(sizeof(K) == 4, uint32_t) operator()(const K& k) const {
+ return SkChecksum::Mix(*(const uint32_t*)&k);
+ }
+
+ template <typename K>
+ SK_WHEN(sizeof(K) != 4, uint32_t) operator()(const K& k) const {
+ return SkOpts::hash_fn(&k, sizeof(K), 0);
+ }
+
+ uint32_t operator()(const SkString& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkColorData.h b/gfx/skia/skia/include/private/SkColorData.h
new file mode 100644
index 0000000000..00a28952f1
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkColorData.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorData_DEFINED
+#define SkColorData_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTo.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 16bit pixel to a 32bit pixel
+
+#define SK_R16_BITS 5
+#define SK_G16_BITS 6
+#define SK_B16_BITS 5
+
+#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT (SK_B16_BITS)
+#define SK_B16_SHIFT 0
+
+#define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+ return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+ return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+ return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
+
+//////////////////////////////////////////////////////////////////////////////
+
+#define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFF))
+
+// Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the
+// pair of them are in the same 2 slots in both RGBA and BGRA, thus there is
+// no need to pass in the colortype to this function.
+static inline uint32_t SkSwizzle_RB(uint32_t c) {
+ static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT);
+
+ unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF;
+ unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF;
+ return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) |
+ (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) |
+ (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT);
+}
+
+static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+///@{
+/** See ITU-R Recommendation BT.709 at http://www.itu.int/rec/R-REC-BT.709/ .*/
+#define SK_ITU_BT709_LUM_COEFF_R (0.2126f)
+#define SK_ITU_BT709_LUM_COEFF_G (0.7152f)
+#define SK_ITU_BT709_LUM_COEFF_B (0.0722f)
+///@}
+
+///@{
+/** A float value which specifies this channel's contribution to luminance. */
+#define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R
+#define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G
+#define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B
+///@}
+
+/** Computes the luminance from the given r, g, and b in accordance with
+ SK_LUM_COEFF_X. For correct results, r, g, and b should be in linear space.
+*/
+static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) {
+ //The following is
+ //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B
+ //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256).
+ return (r * 54 + g * 183 + b * 19) >> 8;
+}
+
+/** Calculates 256 - (value * alpha256) / 255 in range [0,256],
+ * for [0,255] value and [0,256] alpha256.
+ */
+static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) {
+ unsigned prod = 0xFFFF - value * alpha256;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+// The caller may want negative values, so keep all params signed (int)
+// so we don't accidentally slip into unsigned math and lose the sign
+// extension when we shift (in SkAlphaMul)
+static inline int SkAlphaBlend(int src, int dst, int scale256) {
+ SkASSERT((unsigned)scale256 <= 256);
+ return dst + SkAlphaMul(src - dst, scale256);
+}
+
+static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) {
+ SkASSERT(r <= SK_R16_MASK);
+ SkASSERT(g <= SK_G16_MASK);
+ SkASSERT(b <= SK_B16_MASK);
+
+ return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT));
+}
+
+#define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT)
+#define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT)
+#define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT)
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ * srcWeight is [0..256], unlike SkFourByteInterp which takes [0..255]
+ */
+static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst,
+ unsigned scale) {
+ unsigned a = SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale);
+ unsigned r = SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale);
+ unsigned g = SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale);
+ unsigned b = SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale);
+
+ return SkPackARGB32(a, r, g, b);
+}
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ */
+static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst,
+ U8CPU srcWeight) {
+ unsigned scale = SkAlpha255To256(srcWeight);
+ return SkFourByteInterp256(src, dst, scale);
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG, 0x00RR00BB
+ */
+static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) {
+ const uint32_t mask = 0x00FF00FF;
+ *ag = (color >> 8) & mask;
+ *rb = color & mask;
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG00RR00BB
+ * (note, ARGB -> AGRB)
+ */
+static inline uint64_t SkSplay(uint32_t color) {
+ const uint32_t mask = 0x00FF00FF;
+ uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG
+ agrb <<= 32; // 0x00AA00GG00000000
+ agrb |= color & mask; // 0x00AA00GG00RR00BB
+ return agrb;
+}
+
+/**
+ * 0xAAxxGGxx, 0xRRxxBBxx-> 0xAARRGGBB
+ */
+static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) {
+ const uint32_t mask = 0xFF00FF00;
+ return (ag & mask) | ((rb & mask) >> 8);
+}
+
+/**
+ * 0xAAxxGGxxRRxxBBxx -> 0xAARRGGBB
+ * (note, AGRB -> ARGB)
+ */
+static inline uint32_t SkUnsplay(uint64_t agrb) {
+ const uint32_t mask = 0xFF00FF00;
+ return SkPMColor(
+ ((agrb & mask) >> 8) | // 0x00RR00BB
+ ((agrb >> 32) & mask)); // 0xAARRGGBB
+}
+
+static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+
+ // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide.
+ uint32_t src_ag, src_rb, dst_ag, dst_rb;
+ SkSplay(src, &src_ag, &src_rb);
+ SkSplay(dst, &dst_ag, &dst_rb);
+
+ const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
+ const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
+
+ return SkUnsplay(ret_ag, ret_rb);
+}
+
+static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+ // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide.
+ return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst));
+}
+
+// TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere.
+
+/**
+ * Same as SkFourByteInterp256, but faster.
+ */
+static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) {
+ // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine.
+ if (sizeof(void*) == 4) {
+ return SkFastFourByteInterp256_32(src, dst, scale);
+ } else {
+ return SkFastFourByteInterp256_64(src, dst, scale);
+ }
+}
+
+/**
+ * Nearly the same as SkFourByteInterp, but faster and a touch more accurate, due to better
+ * srcWeight scaling to [0, 256].
+ */
+static inline SkPMColor SkFastFourByteInterp(SkPMColor src,
+ SkPMColor dst,
+ U8CPU srcWeight) {
+ SkASSERT(srcWeight <= 255);
+ // scale = srcWeight + (srcWeight >> 7) is more accurate than
+ // scale = srcWeight + 1, but 7% slower
+ return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7));
+}
+
+/**
+ * Interpolates between colors src and dst using [0,256] scale.
+ */
+static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) {
+ return SkFastFourByteInterp256(src, dst, scale);
+}
+
+static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) {
+ SkASSERT((unsigned)aa <= 255);
+
+ unsigned src_scale = SkAlpha255To256(aa);
+ unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale);
+
+ const uint32_t mask = 0xFF00FF;
+
+ uint32_t src_rb = (src & mask) * src_scale;
+ uint32_t src_ag = ((src >> 8) & mask) * src_scale;
+
+ uint32_t dst_rb = (dst & mask) * dst_scale;
+ uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale;
+
+ return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 32bit pixel to a 16bit pixel (no dither)
+
+#define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS))
+#define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS))
+#define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS))
+
+#ifdef SK_DEBUG
+ static inline unsigned SkR32ToR16(unsigned r) {
+ SkR32Assert(r);
+ return SkR32ToR16_MACRO(r);
+ }
+ static inline unsigned SkG32ToG16(unsigned g) {
+ SkG32Assert(g);
+ return SkG32ToG16_MACRO(g);
+ }
+ static inline unsigned SkB32ToB16(unsigned b) {
+ SkB32Assert(b);
+ return SkB32ToB16_MACRO(b);
+ }
+#else
+ #define SkR32ToR16(r) SkR32ToR16_MACRO(r)
+ #define SkG32ToG16(g) SkG32ToG16_MACRO(g)
+ #define SkB32ToB16(b) SkB32ToB16_MACRO(b)
+#endif
+
+static inline U16CPU SkPixel32ToPixel16(SkPMColor c) {
+ unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT;
+ unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT;
+ unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT;
+ return r | g | b;
+}
+
+static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
+ return (SkR32ToR16(r) << SK_R16_SHIFT) |
+ (SkG32ToG16(g) << SK_G16_SHIFT) |
+ (SkB32ToB16(b) << SK_B16_SHIFT);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+/* SrcOver the 32bit src color with the 16bit dst, returning a 16bit value
+ (with dirt in the high 16bits, so caller beware).
+*/
+static inline U16CPU SkSrcOver32To16(SkPMColor src, uint16_t dst) {
+ unsigned sr = SkGetPackedR32(src);
+ unsigned sg = SkGetPackedG32(src);
+ unsigned sb = SkGetPackedB32(src);
+
+ unsigned dr = SkGetPackedR16(dst);
+ unsigned dg = SkGetPackedG16(dst);
+ unsigned db = SkGetPackedB16(dst);
+
+ unsigned isa = 255 - SkGetPackedA32(src);
+
+ dr = (sr + SkMul16ShiftRound(dr, isa, SK_R16_BITS)) >> (8 - SK_R16_BITS);
+ dg = (sg + SkMul16ShiftRound(dg, isa, SK_G16_BITS)) >> (8 - SK_G16_BITS);
+ db = (sb + SkMul16ShiftRound(db, isa, SK_B16_BITS)) >> (8 - SK_B16_BITS);
+
+ return SkPackRGB16(dr, dg, db);
+}
+
+static inline SkColor SkPixel16ToColor(U16CPU src) {
+ SkASSERT(src == SkToU16(src));
+
+ unsigned r = SkPacked16ToR32(src);
+ unsigned g = SkPacked16ToG32(src);
+ unsigned b = SkPacked16ToB32(src);
+
+ SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
+ SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
+ SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
+
+ return SkColorSetRGB(r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef uint16_t SkPMColor16;
+
+// Put in OpenGL order (r g b a)
+#define SK_A4444_SHIFT 0
+#define SK_R4444_SHIFT 12
+#define SK_G4444_SHIFT 8
+#define SK_B4444_SHIFT 4
+
+static inline U8CPU SkReplicateNibble(unsigned nib) {
+ SkASSERT(nib <= 0xF);
+ return (nib << 4) | nib;
+}
+
+#define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF)
+#define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF)
+#define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF)
+#define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF)
+
+#define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c))
+
+static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) {
+ uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) |
+ (SkGetPackedR4444(c) << SK_R32_SHIFT) |
+ (SkGetPackedG4444(c) << SK_G32_SHIFT) |
+ (SkGetPackedB4444(c) << SK_B32_SHIFT);
+ return d | (d << 4);
+}
+
+static inline Sk4f swizzle_rb(const Sk4f& x) {
+ return SkNx_shuffle<2, 1, 0, 3>(x);
+}
+
+static inline Sk4f swizzle_rb_if_bgra(const Sk4f& x) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return swizzle_rb(x);
+#else
+ return x;
+#endif
+}
+
+static inline Sk4f Sk4f_fromL32(uint32_t px) {
+ return SkNx_cast<float>(Sk4b::Load(&px)) * (1 / 255.0f);
+}
+
+static inline uint32_t Sk4f_toL32(const Sk4f& px) {
+ Sk4f v = px;
+
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ // SkNx_cast<uint8_t, int32_t>() pins, and we don't anticipate giant floats
+#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
+ // SkNx_cast<uint8_t, int32_t>() pins, and so does Sk4f_round().
+#else
+ // No guarantee of a pin.
+ v = Sk4f::Max(0, Sk4f::Min(v, 1));
+#endif
+
+ uint32_t l32;
+ SkNx_cast<uint8_t>(Sk4f_round(v * 255.0f)).store(&l32);
+ return l32;
+}
+
+using SkPMColor4f = SkRGBA4f<kPremul_SkAlphaType>;
+
+constexpr SkPMColor4f SK_PMColor4fTRANSPARENT = { 0, 0, 0, 0 };
+constexpr SkPMColor4f SK_PMColor4fWHITE = { 1, 1, 1, 1 };
+constexpr SkPMColor4f SK_PMColor4fILLEGAL = { SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity };
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkDeferredDisplayList.h b/gfx/skia/skia/include/private/SkDeferredDisplayList.h
new file mode 100644
index 0000000000..e8fc35b611
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkDeferredDisplayList.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayList_DEFINED
+#define SkDeferredDisplayList_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/core/SkTypes.h"
+
+class SkDeferredDisplayListPriv;
+
+#if SK_SUPPORT_GPU
+#include "include/private/SkTArray.h"
+#include <map>
+class GrRenderTask;
+class GrRenderTargetProxy;
+struct GrCCPerOpsTaskPaths;
+#endif
+
+/*
+ * This class contains pre-processed gpu operations that can be replayed into
+ * an SkSurface via draw(SkDeferredDisplayList*).
+ *
+ * TODO: we probably need to expose this class so users can query it for memory usage.
+ */
+class SkDeferredDisplayList {
+public:
+
+#if SK_SUPPORT_GPU
+ // This object is the source from which the lazy proxy backing the DDL will pull its backing
+ // texture when the DDL is replayed. It has to be separately ref counted bc the lazy proxy
+ // can outlive the DDL.
+ class SK_API LazyProxyData : public SkRefCnt {
+ public:
+ // Upon being replayed - this field will be filled in (by the DrawingManager) with the proxy
+ // backing the destination SkSurface. Note that, since there is no good place to clear it
+ // it can become a dangling pointer.
+ GrRenderTargetProxy* fReplayDest = nullptr;
+ };
+#else
+ class SK_API LazyProxyData : public SkRefCnt {};
+#endif
+
+ SK_API SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
+ sk_sp<LazyProxyData>);
+ SK_API ~SkDeferredDisplayList();
+
+ SK_API const SkSurfaceCharacterization& characterization() const {
+ return fCharacterization;
+ }
+
+ // Provides access to functions that aren't part of the public API.
+ SkDeferredDisplayListPriv priv();
+ const SkDeferredDisplayListPriv priv() const;
+
+private:
+ friend class GrDrawingManager; // for access to 'fRenderTasks' and 'fLazyProxyData'
+ friend class SkDeferredDisplayListRecorder; // for access to 'fLazyProxyData'
+ friend class SkDeferredDisplayListPriv;
+
+ const SkSurfaceCharacterization fCharacterization;
+
+#if SK_SUPPORT_GPU
+ // This needs to match the same type in GrCoverageCountingPathRenderer.h
+ using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
+
+ SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
+ PendingPathsMap fPendingPaths; // This is the path data from CCPR.
+#endif
+ sk_sp<LazyProxyData> fLazyProxyData;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkEncodedInfo.h b/gfx/skia/skia/include/private/SkEncodedInfo.h
new file mode 100644
index 0000000000..887198c1e1
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkEncodedInfo.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedInfo_DEFINED
+#define SkEncodedInfo_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/third_party/skcms/skcms.h"
+
+struct SkEncodedInfo {
+public:
+ class ICCProfile {
+ public:
+ static std::unique_ptr<ICCProfile> Make(sk_sp<SkData>);
+ static std::unique_ptr<ICCProfile> Make(const skcms_ICCProfile&);
+
+ const skcms_ICCProfile* profile() const { return &fProfile; }
+ private:
+ ICCProfile(const skcms_ICCProfile&, sk_sp<SkData> = nullptr);
+
+ skcms_ICCProfile fProfile;
+ sk_sp<SkData> fData;
+ };
+
+ enum Alpha {
+ kOpaque_Alpha,
+ kUnpremul_Alpha,
+
+ // Each pixel is either fully opaque or fully transparent.
+ // There is no difference between requesting kPremul or kUnpremul.
+ kBinary_Alpha,
+ };
+
+ /*
+ * We strive to make the number of components per pixel obvious through
+ * our naming conventions.
+ * Ex: kRGB has 3 components. kRGBA has 4 components.
+ *
+ * This sometimes results in redundant Alpha and Color information.
+ * Ex: kRGB images must also be kOpaque.
+ */
+ enum Color {
+ // PNG, WBMP
+ kGray_Color,
+
+ // PNG
+ kGrayAlpha_Color,
+
+ // PNG with Skia-specific sBIT
+ // Like kGrayAlpha, except this expects to be treated as
+ // kAlpha_8_SkColorType, which ignores the gray component. If
+ // decoded to full color (e.g. kN32), the gray component is respected
+ // (so it can share code with kGrayAlpha).
+ kXAlpha_Color,
+
+ // PNG
+ // 565 images may be encoded to PNG by specifying the number of
+ // significant bits for each channel. This is a strange 565
+ // representation because the image is still encoded with 8 bits per
+ // component.
+ k565_Color,
+
+ // PNG, GIF, BMP
+ kPalette_Color,
+
+ // PNG, RAW
+ kRGB_Color,
+ kRGBA_Color,
+
+ // BMP
+ kBGR_Color,
+ kBGRX_Color,
+ kBGRA_Color,
+
+ // JPEG, WEBP
+ kYUV_Color,
+
+ // WEBP
+ kYUVA_Color,
+
+ // JPEG
+ // Photoshop actually writes inverted CMYK data into JPEGs, where zero
+ // represents 100% ink coverage. For this reason, we treat CMYK JPEGs
+ // as having inverted CMYK. libjpeg-turbo warns that this may break
+ // other applications, but the CMYK JPEGs we see on the web expect to
+ // be treated as inverted CMYK.
+ kInvertedCMYK_Color,
+ kYCCK_Color,
+ };
+
+ static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha,
+ int bitsPerComponent) {
+ return Make(width, height, color, alpha, bitsPerComponent, nullptr);
+ }
+
+ static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha,
+ int bitsPerComponent, std::unique_ptr<ICCProfile> profile) {
+ SkASSERT(1 == bitsPerComponent ||
+ 2 == bitsPerComponent ||
+ 4 == bitsPerComponent ||
+ 8 == bitsPerComponent ||
+ 16 == bitsPerComponent);
+
+ switch (color) {
+ case kGray_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ break;
+ case kGrayAlpha_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ break;
+ case kPalette_Color:
+ SkASSERT(16 != bitsPerComponent);
+ break;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kBGRX_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kYUV_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kRGBA_Color:
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kBGRA_Color:
+ case kYUVA_Color:
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kXAlpha_Color:
+ SkASSERT(kUnpremul_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case k565_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+
+ return SkEncodedInfo(width, height, color, alpha, bitsPerComponent, std::move(profile));
+ }
+
+ /*
+ * Returns a recommended SkImageInfo.
+ *
+ * TODO: Leave this up to the client.
+ */
+ SkImageInfo makeImageInfo() const {
+ auto ct = kGray_Color == fColor ? kGray_8_SkColorType :
+ kXAlpha_Color == fColor ? kAlpha_8_SkColorType :
+ k565_Color == fColor ? kRGB_565_SkColorType :
+ kN32_SkColorType ;
+ auto alpha = kOpaque_Alpha == fAlpha ? kOpaque_SkAlphaType
+ : kUnpremul_SkAlphaType;
+ sk_sp<SkColorSpace> cs = fProfile ? SkColorSpace::Make(*fProfile->profile())
+ : nullptr;
+ if (!cs) {
+ cs = SkColorSpace::MakeSRGB();
+ }
+ return SkImageInfo::Make(fWidth, fHeight, ct, alpha, std::move(cs));
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ Color color() const { return fColor; }
+ Alpha alpha() const { return fAlpha; }
+ bool opaque() const { return fAlpha == kOpaque_Alpha; }
+ const skcms_ICCProfile* profile() const {
+ if (!fProfile) return nullptr;
+ return fProfile->profile();
+ }
+
+ uint8_t bitsPerComponent() const { return fBitsPerComponent; }
+
+ uint8_t bitsPerPixel() const {
+ switch (fColor) {
+ case kGray_Color:
+ return fBitsPerComponent;
+ case kXAlpha_Color:
+ case kGrayAlpha_Color:
+ return 2 * fBitsPerComponent;
+ case kPalette_Color:
+ return fBitsPerComponent;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kYUV_Color:
+ case k565_Color:
+ return 3 * fBitsPerComponent;
+ case kRGBA_Color:
+ case kBGRA_Color:
+ case kBGRX_Color:
+ case kYUVA_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ return 4 * fBitsPerComponent;
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+ }
+
+ SkEncodedInfo(const SkEncodedInfo& orig) = delete;
+ SkEncodedInfo& operator=(const SkEncodedInfo&) = delete;
+
+ SkEncodedInfo(SkEncodedInfo&& orig) = default;
+ SkEncodedInfo& operator=(SkEncodedInfo&&) = default;
+
+ // Explicit copy method, to avoid accidental copying.
+ SkEncodedInfo copy() const {
+ auto copy = SkEncodedInfo::Make(fWidth, fHeight, fColor, fAlpha, fBitsPerComponent);
+ if (fProfile) {
+ copy.fProfile.reset(new ICCProfile(*fProfile.get()));
+ }
+ return copy;
+ }
+
+private:
+ SkEncodedInfo(int width, int height, Color color, Alpha alpha,
+ uint8_t bitsPerComponent, std::unique_ptr<ICCProfile> profile)
+ : fWidth(width)
+ , fHeight(height)
+ , fColor(color)
+ , fAlpha(alpha)
+ , fBitsPerComponent(bitsPerComponent)
+ , fProfile(std::move(profile))
+ {}
+
+ int fWidth;
+ int fHeight;
+ Color fColor;
+ Alpha fAlpha;
+ uint8_t fBitsPerComponent;
+ std::unique_ptr<ICCProfile> fProfile;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFixed.h b/gfx/skia/skia/include/private/SkFixed.h
new file mode 100644
index 0000000000..af858efc89
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFixed.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFixed_DEFINED
+#define SkFixed_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSafe_math.h"
+#include "include/private/SkTo.h"
+
+/** \file SkFixed.h
+
+ Types and macros for 16.16 fixed point
+*/
+
+/** 32 bit signed integer used to represent fractions values with 16 bits to the right of the decimal point
+*/
+typedef int32_t SkFixed;
+#define SK_Fixed1 (1 << 16)
+#define SK_FixedHalf (1 << 15)
+#define SK_FixedQuarter (1 << 14)
+#define SK_FixedMax (0x7FFFFFFF)
+#define SK_FixedMin (-SK_FixedMax)
+#define SK_FixedPI (0x3243F)
+#define SK_FixedSqrt2 (92682)
+#define SK_FixedTanPIOver8 (0x6A0A)
+#define SK_FixedRoot2Over2 (0xB505)
+
+// NOTE: SkFixedToFloat is exact. SkFloatToFixed seems to lack a rounding step. For all fixed-point
+// values, this version is as accurate as possible for (fixed -> float -> fixed). Rounding reduces
+// accuracy if the intermediate floats are in the range that only holds integers (adding 0.5f to an
+// odd integer then snaps to nearest even). Using double for the rounding math gives maximum
+// accuracy for (float -> fixed -> float), but that's usually overkill.
+#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f)
+#define SkFloatToFixed(x) sk_float_saturate2int((x) * SK_Fixed1)
+
+#ifdef SK_DEBUG
+ static inline SkFixed SkFloatToFixed_Check(float x) {
+ int64_t n64 = (int64_t)(x * SK_Fixed1);
+ SkFixed n32 = (SkFixed)n64;
+ SkASSERT(n64 == n32);
+ return n32;
+ }
+#else
+ #define SkFloatToFixed_Check(x) SkFloatToFixed(x)
+#endif
+
+#define SkFixedToDouble(x) ((x) * 1.52587890625e-5)
+#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1))
+
+/** Converts an integer to a SkFixed, asserting that the result does not overflow
+ a 32 bit signed integer
+*/
+#ifdef SK_DEBUG
+ inline SkFixed SkIntToFixed(int n)
+ {
+ SkASSERT(n >= -32768 && n <= 32767);
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting.
+ return (unsigned)n << 16;
+ }
+#else
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting. Then we force the cast to SkFixed to ensure that the answer is signed (like the
+ // debug version).
+ #define SkIntToFixed(n) (SkFixed)((unsigned)(n) << 16)
+#endif
+
+#define SkFixedRoundToInt(x) (((x) + SK_FixedHalf) >> 16)
+#define SkFixedCeilToInt(x) (((x) + SK_Fixed1 - 1) >> 16)
+#define SkFixedFloorToInt(x) ((x) >> 16)
+
+static inline SkFixed SkFixedRoundToFixed(SkFixed x) {
+ return (x + SK_FixedHalf) & 0xFFFF0000;
+}
+static inline SkFixed SkFixedCeilToFixed(SkFixed x) {
+ return (x + SK_Fixed1 - 1) & 0xFFFF0000;
+}
+static inline SkFixed SkFixedFloorToFixed(SkFixed x) {
+ return x & 0xFFFF0000;
+}
+
+#define SkFixedAbs(x) SkAbs32(x)
+#define SkFixedAve(a, b) (((a) + (b)) >> 1)
+
+// The divide may exceed 32 bits. Clamp to a signed 32 bit result.
+#define SkFixedDiv(numer, denom) \
+ SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)(numer), 16) / (denom)), SK_MinS32, SK_MaxS32))
+
+static inline SkFixed SkFixedMul(SkFixed a, SkFixed b) {
+ return (SkFixed)((int64_t)a * b >> 16);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Platform-specific alternatives to our portable versions.
+
+// The VCVT float-to-fixed instruction is part of the VFPv3 instruction set.
+#if defined(__ARM_VFPV3__)
+ /* This guy does not handle NaN or other obscurities, but is faster than
+ than (int)(x*65536). When built on Android with -Os, needs forcing
+ to inline or we lose the speed benefit.
+ */
+ SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x)
+ {
+ int32_t y;
+ asm("vcvt.s32.f32 %0, %0, #16": "+w"(x));
+ memcpy(&y, &x, sizeof(y));
+ return y;
+ }
+ #undef SkFloatToFixed
+ #define SkFloatToFixed(x) SkFloatToFixed_arm(x)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkFixedToScalar(x) SkFixedToFloat(x)
+#define SkScalarToFixed(x) SkFloatToFixed(x)
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int64_t SkFixed3232; // 32.32
+
+#define SkFixed3232Max SK_MaxS64
+#define SkFixed3232Min (-SkFixed3232Max)
+
+#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32))
+#define SkFixed3232ToInt(x) ((int)((x) >> 32))
+#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16))
+#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16))
+#define SkFloatToFixed3232(x) sk_float_saturate2int64((x) * (65536.0f * 65536.0f))
+#define SkFixed3232ToFloat(x) (x * (1 / (65536.0f * 65536.0f)))
+
+#define SkScalarToFixed3232(x) SkFloatToFixed3232(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFloatBits.h b/gfx/skia/skia/include/private/SkFloatBits.h
new file mode 100644
index 0000000000..89eea4b9e3
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFloatBits.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatBits_DEFINED
+#define SkFloatBits_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSafe_math.h"
+
+#include <float.h>
+
+/** Convert a sign-bit int (i.e. float interpreted as int) into a 2s compliement
+ int. This also converts -0 (0x80000000) to 0. Doing this to a float allows
+ it to be compared using normal C operators (<, <=, etc.)
+*/
+static inline int32_t SkSignBitTo2sCompliment(int32_t x) {
+ if (x < 0) {
+ x &= 0x7FFFFFFF;
+ x = -x;
+ }
+ return x;
+}
+
+/** Convert a 2s compliment int to a sign-bit (i.e. int interpreted as float).
+ This undoes the result of SkSignBitTo2sCompliment().
+ */
+static inline int32_t Sk2sComplimentToSignBit(int32_t x) {
+ int sign = x >> 31;
+ // make x positive
+ x = (x ^ sign) - sign;
+ // set the sign bit as needed
+ x |= SkLeftShift(sign, 31);
+ return x;
+}
+
+union SkFloatIntUnion {
+ float fFloat;
+ int32_t fSignBitInt;
+};
+
+// Helper to see a float as its bit pattern (w/o aliasing warnings)
+static inline int32_t SkFloat2Bits(float x) {
+ SkFloatIntUnion data;
+ data.fFloat = x;
+ return data.fSignBitInt;
+}
+
+// Helper to see a bit pattern as a float (w/o aliasing warnings)
+static inline float SkBits2Float(int32_t floatAsBits) {
+ SkFloatIntUnion data;
+ data.fSignBitInt = floatAsBits;
+ return data.fFloat;
+}
+
+constexpr int32_t gFloatBits_exponent_mask = 0x7F800000;
+constexpr int32_t gFloatBits_matissa_mask = 0x007FFFFF;
+
+static inline bool SkFloatBits_IsFinite(int32_t bits) {
+ return (bits & gFloatBits_exponent_mask) != gFloatBits_exponent_mask;
+}
+
+static inline bool SkFloatBits_IsInf(int32_t bits) {
+ return ((bits & gFloatBits_exponent_mask) == gFloatBits_exponent_mask) &&
+ (bits & gFloatBits_matissa_mask) == 0;
+}
+
+/** Return the float as a 2s compliment int. Just to be used to compare floats
+ to each other or against positive float-bit-constants (like 0). This does
+ not return the int equivalent of the float, just something cheaper for
+ compares-only.
+ */
+static inline int32_t SkFloatAs2sCompliment(float x) {
+ return SkSignBitTo2sCompliment(SkFloat2Bits(x));
+}
+
+/** Return the 2s compliment int as a float. This undos the result of
+ SkFloatAs2sCompliment
+ */
+static inline float Sk2sComplimentAsFloat(int32_t x) {
+ return SkBits2Float(Sk2sComplimentToSignBit(x));
+}
+
+// Scalar wrappers for float-bit routines
+
+#define SkScalarAs2sCompliment(x) SkFloatAs2sCompliment(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFloatingPoint.h b/gfx/skia/skia/include/private/SkFloatingPoint.h
new file mode 100644
index 0000000000..7b34ac147e
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFloatingPoint.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatingPoint_DEFINED
+#define SkFloatingPoint_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkSafe_math.h"
+#include <float.h>
+#include <math.h>
+#include <cmath>
+#include <cstring>
+#include <limits>
+
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #include <xmmintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+// For _POSIX_VERSION
+#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
+#include <unistd.h>
+#endif
+
+constexpr float SK_FloatSqrt2 = 1.41421356f;
+constexpr float SK_FloatPI = 3.14159265f;
+constexpr double SK_DoublePI = 3.14159265358979323846264338327950288;
+
+// C++98 cmath std::pow seems to be the earliest portable way to get float pow.
+// However, on Linux including cmath undefines isfinite.
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14608
+static inline float sk_float_pow(float base, float exp) {
+ return powf(base, exp);
+}
+
+#define sk_float_sqrt(x) sqrtf(x)
+#define sk_float_sin(x) sinf(x)
+#define sk_float_cos(x) cosf(x)
+#define sk_float_tan(x) tanf(x)
+#define sk_float_floor(x) floorf(x)
+#define sk_float_ceil(x) ceilf(x)
+#define sk_float_trunc(x) truncf(x)
+#ifdef SK_BUILD_FOR_MAC
+# define sk_float_acos(x) static_cast<float>(acos(x))
+# define sk_float_asin(x) static_cast<float>(asin(x))
+#else
+# define sk_float_acos(x) acosf(x)
+# define sk_float_asin(x) asinf(x)
+#endif
+#define sk_float_atan2(y,x) atan2f(y,x)
+#define sk_float_abs(x) fabsf(x)
+#define sk_float_copysign(x, y) copysignf(x, y)
+#define sk_float_mod(x,y) fmodf(x,y)
+#define sk_float_exp(x) expf(x)
+#define sk_float_log(x) logf(x)
+
+constexpr float sk_float_degrees_to_radians(float degrees) {
+ return degrees * (SK_FloatPI / 180);
+}
+
+constexpr float sk_float_radians_to_degrees(float radians) {
+ return radians * (180 / SK_FloatPI);
+}
+
+#define sk_float_round(x) sk_float_floor((x) + 0.5f)
+
+// can't find log2f on android, but maybe that just a tool bug?
+#ifdef SK_BUILD_FOR_ANDROID
+ static inline float sk_float_log2(float x) {
+ const double inv_ln_2 = 1.44269504088896;
+ return (float)(log(x) * inv_ln_2);
+ }
+#else
+ #define sk_float_log2(x) log2f(x)
+#endif
+
+static inline bool sk_float_isfinite(float x) {
+ return SkFloatBits_IsFinite(SkFloat2Bits(x));
+}
+
+static inline bool sk_floats_are_finite(float a, float b) {
+ return sk_float_isfinite(a) && sk_float_isfinite(b);
+}
+
+static inline bool sk_floats_are_finite(const float array[], int count) {
+ float prod = 0;
+ for (int i = 0; i < count; ++i) {
+ prod *= array[i];
+ }
+ // At this point, prod will either be NaN or 0
+ return prod == 0; // if prod is NaN, this check will return false
+}
+
+static inline bool sk_float_isinf(float x) {
+ return SkFloatBits_IsInf(SkFloat2Bits(x));
+}
+
+#ifdef SK_BUILD_FOR_WIN
+ #define sk_float_isnan(x) _isnan(x)
+#elif defined(__clang__) || defined(__GNUC__)
+ #define sk_float_isnan(x) __builtin_isnan(x)
+#else
+ #define sk_float_isnan(x) isnan(x)
+#endif
+
+#define sk_double_isnan(a) sk_float_isnan(a)
+
+#define SK_MaxS32FitsInFloat 2147483520
+#define SK_MinS32FitsInFloat -SK_MaxS32FitsInFloat
+
+#define SK_MaxS64FitsInFloat (SK_MaxS64 >> (63-24) << (63-24)) // 0x7fffff8000000000
+#define SK_MinS64FitsInFloat -SK_MaxS64FitsInFloat
+
+/**
+ * Return the closest int for the given float. Returns SK_MaxS32FitsInFloat for NaN.
+ */
+static inline int sk_float_saturate2int(float x) {
+ x = SkTMin<float>(x, SK_MaxS32FitsInFloat);
+ x = SkTMax<float>(x, SK_MinS32FitsInFloat);
+ return (int)x;
+}
+
+/**
+ * Return the closest int for the given double. Returns SK_MaxS32 for NaN.
+ */
+static inline int sk_double_saturate2int(double x) {
+ x = SkTMin<double>(x, SK_MaxS32);
+ x = SkTMax<double>(x, SK_MinS32);
+ return (int)x;
+}
+
+/**
+ * Return the closest int64_t for the given float. Returns SK_MaxS64FitsInFloat for NaN.
+ */
+static inline int64_t sk_float_saturate2int64(float x) {
+ x = SkTMin<float>(x, SK_MaxS64FitsInFloat);
+ x = SkTMax<float>(x, SK_MinS64FitsInFloat);
+ return (int64_t)x;
+}
+
+#define sk_float_floor2int(x) sk_float_saturate2int(sk_float_floor(x))
+#define sk_float_round2int(x) sk_float_saturate2int(sk_float_floor((x) + 0.5f))
+#define sk_float_ceil2int(x) sk_float_saturate2int(sk_float_ceil(x))
+
+#define sk_float_floor2int_no_saturate(x) (int)sk_float_floor(x)
+#define sk_float_round2int_no_saturate(x) (int)sk_float_floor((x) + 0.5f)
+#define sk_float_ceil2int_no_saturate(x) (int)sk_float_ceil(x)
+
+#define sk_double_floor(x) floor(x)
+#define sk_double_round(x) floor((x) + 0.5)
+#define sk_double_ceil(x) ceil(x)
+#define sk_double_floor2int(x) (int)floor(x)
+#define sk_double_round2int(x) (int)floor((x) + 0.5)
+#define sk_double_ceil2int(x) (int)ceil(x)
+
+// Cast double to float, ignoring any warning about too-large finite values being cast to float.
+// Clang thinks this is undefined, but it's actually implementation defined to return either
+// the largest float or infinity (one of the two bracketing representable floats). Good enough!
+#if defined(__clang__) && (__clang_major__ * 1000 + __clang_minor__) >= 3007
+__attribute__((no_sanitize("float-cast-overflow")))
+#endif
+static inline float sk_double_to_float(double x) {
+ return static_cast<float>(x);
+}
+
+#define SK_FloatNaN std::numeric_limits<float>::quiet_NaN()
+#define SK_FloatInfinity (+std::numeric_limits<float>::infinity())
+#define SK_FloatNegativeInfinity (-std::numeric_limits<float>::infinity())
+
+#define SK_DoubleNaN std::numeric_limits<double>::quiet_NaN()
+
+// Returns false if any of the floats are outside of [0...1]
+// Returns true if count is 0
+bool sk_floats_are_unit(const float array[], size_t count);
+
+static inline float sk_float_rsqrt_portable(float x) {
+ // Get initial estimate.
+ int i;
+ memcpy(&i, &x, 4);
+ i = 0x5F1FFFF9 - (i>>1);
+ float estimate;
+ memcpy(&estimate, &i, 4);
+
+ // One step of Newton's method to refine.
+ const float estimate_sq = estimate*estimate;
+ estimate *= 0.703952253f*(2.38924456f-x*estimate_sq);
+ return estimate;
+}
+
+// Fast, approximate inverse square root.
+// Compare to name-brand "1.0f / sk_float_sqrt(x)". Should be around 10x faster on SSE, 2x on NEON.
+static inline float sk_float_rsqrt(float x) {
+// We want all this inlined, so we'll inline SIMD and just take the hit when we don't know we've got
+// it at compile time. This is going to be too fast to productively hide behind a function pointer.
+//
+// We do one step of Newton's method to refine the estimates in the NEON and portable paths. No
+// refinement is faster, but very innacurate. Two steps is more accurate, but slower than 1/sqrt.
+//
+// Optimized constants in the portable path courtesy of http://rrrola.wz.cz/inv_sqrt.html
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ return _mm_cvtss_f32(_mm_rsqrt_ss(_mm_set_ss(x)));
+#elif defined(SK_ARM_HAS_NEON)
+ // Get initial estimate.
+ const float32x2_t xx = vdup_n_f32(x); // Clever readers will note we're doing everything 2x.
+ float32x2_t estimate = vrsqrte_f32(xx);
+
+ // One step of Newton's method to refine.
+ const float32x2_t estimate_sq = vmul_f32(estimate, estimate);
+ estimate = vmul_f32(estimate, vrsqrts_f32(xx, estimate_sq));
+ return vget_lane_f32(estimate, 0); // 1 will work fine too; the answer's in both places.
+#else
+ return sk_float_rsqrt_portable(x);
+#endif
+}
+
+// This is the number of significant digits we can print in a string such that when we read that
+// string back we get the floating point number we expect. The minimum value C requires is 6, but
+// most compilers support 9
+#ifdef FLT_DECIMAL_DIG
+#define SK_FLT_DECIMAL_DIG FLT_DECIMAL_DIG
+#else
+#define SK_FLT_DECIMAL_DIG 9
+#endif
+
+// IEEE defines how float divide behaves for non-finite values and zero-denoms, but C does not
+// so we have a helper that suppresses the possible undefined-behavior warnings.
+
+#ifdef __clang__
+__attribute__((no_sanitize("float-divide-by-zero")))
+#endif
+static inline float sk_ieee_float_divide(float numer, float denom) {
+ return numer / denom;
+}
+
+#ifdef __clang__
+__attribute__((no_sanitize("float-divide-by-zero")))
+#endif
+static inline double sk_ieee_double_divide(double numer, double denom) {
+ return numer / denom;
+}
+
+// While we clean up divide by zero, we'll replace places that do divide by zero with this TODO.
+static inline float sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(float n, float d) {
+ return sk_ieee_float_divide(n,d);
+}
+static inline float sk_ieee_double_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(double n, double d) {
+ return sk_ieee_double_divide(n,d);
+}
+
+static inline float sk_fmaf(float f, float m, float a) {
+#if defined(FP_FAST_FMA)
+ return std::fmaf(f,m,a);
+#else
+ return f*m+a;
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkHalf.h b/gfx/skia/skia/include/private/SkHalf.h
new file mode 100644
index 0000000000..d951891310
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkHalf.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHalf_DEFINED
+#define SkHalf_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkNx.h"
+
+// 16-bit floating point value
+// format is 1 bit sign, 5 bits exponent, 10 bits mantissa
+// only used for storage
+typedef uint16_t SkHalf;
+
+static constexpr uint16_t SK_HalfMin = 0x0400; // 2^-14 (minimum positive normal value)
+static constexpr uint16_t SK_HalfMax = 0x7bff; // 65504
+static constexpr uint16_t SK_HalfEpsilon = 0x1400; // 2^-10
+static constexpr uint16_t SK_Half1 = 0x3C00; // 1
+
+// convert between half and single precision floating point
+float SkHalfToFloat(SkHalf h);
+SkHalf SkFloatToHalf(float f);
+
+// Convert between half and single precision floating point,
+// assuming inputs and outputs are both finite, and may
+// flush values which would be denormal half floats to zero.
+static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t);
+static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f&);
+
+// ~~~~~~~~~~~ impl ~~~~~~~~~~~~~~ //
+
+// Like the serial versions in SkHalf.cpp, these are based on
+// https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+
+// GCC 4.9 lacks the intrinsics to use ARMv8 f16<->f32 instructions, so we use inline assembly.
+
+static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t rgba) {
+ Sk4h hs = Sk4h::Load(&rgba);
+#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
+ float32x4_t fs;
+ asm ("fcvtl %[fs].4s, %[hs].4h \n" // vcvt_f32_f16(...)
+ : [fs] "=w" (fs) // =w: write-only NEON register
+ : [hs] "w" (hs.fVec)); // w: read-only NEON register
+ return fs;
+#else
+ Sk4i bits = SkNx_cast<int>(hs), // Expand to 32 bit.
+ sign = bits & 0x00008000, // Save the sign bit for later...
+ positive = bits ^ sign, // ...but strip it off for now.
+ is_norm = 0x03ff < positive; // Exponent > 0?
+
+ // For normal half floats, extend the mantissa by 13 zero bits,
+ // then adjust the exponent from 15 bias to 127 bias.
+ Sk4i norm = (positive << 13) + ((127 - 15) << 23);
+
+ Sk4i merged = (sign << 16) | (norm & is_norm);
+ return Sk4f::Load(&merged);
+#endif
+}
+
+static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f& fs) {
+#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
+ float32x4_t vec = fs.fVec;
+ asm ("fcvtn %[vec].4h, %[vec].4s \n" // vcvt_f16_f32(vec)
+ : [vec] "+w" (vec)); // +w: read-write NEON register
+ return vreinterpret_u16_f32(vget_low_f32(vec));
+#else
+ Sk4i bits = Sk4i::Load(&fs),
+ sign = bits & 0x80000000, // Save the sign bit for later...
+ positive = bits ^ sign, // ...but strip it off for now.
+ will_be_norm = 0x387fdfff < positive; // greater than largest denorm half?
+
+ // For normal half floats, adjust the exponent from 127 bias to 15 bias,
+ // then drop the bottom 13 mantissa bits.
+ Sk4i norm = (positive - ((127 - 15) << 23)) >> 13;
+
+ Sk4i merged = (sign >> 16) | (will_be_norm & norm);
+ return SkNx_cast<uint16_t>(merged);
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkImageInfoPriv.h b/gfx/skia/skia/include/private/SkImageInfoPriv.h
new file mode 100644
index 0000000000..d0ff6fd8df
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkImageInfoPriv.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageInfoPriv_DEFINED
+#define SkImageInfoPriv_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+enum SkColorTypeComponentFlag {
+ kRed_SkColorTypeComponentFlag = 0x1,
+ kGreen_SkColorTypeComponentFlag = 0x2,
+ kBlue_SkColorTypeComponentFlag = 0x4,
+ kAlpha_SkColorTypeComponentFlag = 0x8,
+ kGray_SkColorTypeComponentFlag = 0x10,
+ kRG_SkColorTypeComponentFlags = kRed_SkColorTypeComponentFlag |
+ kGreen_SkColorTypeComponentFlag,
+ kRGB_SkColorTypeComponentFlags = kRed_SkColorTypeComponentFlag |
+ kGreen_SkColorTypeComponentFlag |
+ kBlue_SkColorTypeComponentFlag,
+ kRGBA_SkColorTypeComponentFlags = kRGB_SkColorTypeComponentFlags |
+ kAlpha_SkColorTypeComponentFlag,
+};
+
+static inline uint32_t SkColorTypeComponentFlags(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return kAlpha_SkColorTypeComponentFlag;
+ case kRGB_565_SkColorType: return kRGB_SkColorTypeComponentFlags;
+ case kARGB_4444_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGBA_8888_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGB_888x_SkColorType: return kRGB_SkColorTypeComponentFlags;
+ case kBGRA_8888_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGBA_1010102_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGB_101010x_SkColorType: return kRGB_SkColorTypeComponentFlags;
+ case kGray_8_SkColorType: return kGray_SkColorTypeComponentFlag;
+ case kRGBA_F16Norm_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGBA_F16_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kRGBA_F32_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ case kR8G8_unorm_SkColorType: return kRG_SkColorTypeComponentFlags;
+ case kA16_unorm_SkColorType: return kAlpha_SkColorTypeComponentFlag;
+ case kR16G16_unorm_SkColorType: return kRG_SkColorTypeComponentFlags;
+ case kA16_float_SkColorType: return kAlpha_SkColorTypeComponentFlag;
+ case kR16G16_float_SkColorType: return kRG_SkColorTypeComponentFlags;
+ case kR16G16B16A16_unorm_SkColorType: return kRGBA_SkColorTypeComponentFlags;
+ }
+ SkUNREACHABLE;
+}
+
+static inline bool SkColorTypeIsAlphaOnly(SkColorType ct) {
+ return kAlpha_SkColorTypeComponentFlag == SkColorTypeComponentFlags(ct);
+}
+
+static inline bool SkAlphaTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkAlphaType;
+}
+
+static inline bool SkColorTypeIsGray(SkColorType ct) {
+ auto flags = SkColorTypeComponentFlags(ct);
+ // Currently assuming that a color type has only gray or does not have gray.
+ SkASSERT(!(kGray_SkColorTypeComponentFlag & flags) || kGray_SkColorTypeComponentFlag == flags);
+ return kGray_SkColorTypeComponentFlag == flags;
+}
+
+static int SkColorTypeShiftPerPixel(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return 0;
+ case kRGB_565_SkColorType: return 1;
+ case kARGB_4444_SkColorType: return 1;
+ case kRGBA_8888_SkColorType: return 2;
+ case kRGB_888x_SkColorType: return 2;
+ case kBGRA_8888_SkColorType: return 2;
+ case kRGBA_1010102_SkColorType: return 2;
+ case kRGB_101010x_SkColorType: return 2;
+ case kGray_8_SkColorType: return 0;
+ case kRGBA_F16Norm_SkColorType: return 3;
+ case kRGBA_F16_SkColorType: return 3;
+ case kRGBA_F32_SkColorType: return 4;
+ case kR8G8_unorm_SkColorType: return 1;
+ case kA16_unorm_SkColorType: return 1;
+ case kR16G16_unorm_SkColorType: return 2;
+ case kA16_float_SkColorType: return 1;
+ case kR16G16_float_SkColorType: return 2;
+ case kR16G16B16A16_unorm_SkColorType: return 3;
+ }
+ SkUNREACHABLE;
+}
+
+static inline size_t SkColorTypeMinRowBytes(SkColorType ct, int width) {
+ return width * SkColorTypeBytesPerPixel(ct);
+}
+
+static inline bool SkColorTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkColorType;
+}
+
+static inline size_t SkColorTypeComputeOffset(SkColorType ct, int x, int y, size_t rowBytes) {
+ if (kUnknown_SkColorType == ct) {
+ return 0;
+ }
+ return y * rowBytes + (x << SkColorTypeShiftPerPixel(ct));
+}
+
+/**
+ * Returns true if |info| contains a valid combination of width, height, colorType, and alphaType.
+ */
+static inline bool SkImageInfoIsValid(const SkImageInfo& info) {
+ if (info.width() <= 0 || info.height() <= 0) {
+ return false;
+ }
+
+ const int kMaxDimension = SK_MaxS32 >> 2;
+ if (info.width() > kMaxDimension || info.height() > kMaxDimension) {
+ return false;
+ }
+
+ if (kUnknown_SkColorType == info.colorType() || kUnknown_SkAlphaType == info.alphaType()) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Returns true if Skia has defined a pixel conversion from the |src| to the |dst|.
+ * Returns false otherwise.
+ */
+static inline bool SkImageInfoValidConversion(const SkImageInfo& dst, const SkImageInfo& src) {
+ return SkImageInfoIsValid(dst) && SkImageInfoIsValid(src);
+}
+#endif // SkImageInfoPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/SkMacros.h b/gfx/skia/skia/include/private/SkMacros.h
new file mode 100644
index 0000000000..a68193228f
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkMacros.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMacros_DEFINED
+#define SkMacros_DEFINED
+
+/*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+#define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
+#define SK_MACRO_CONCAT_IMPL_PRIV(X, Y) X ## Y
+
+/*
+ * Usage: SK_MACRO_APPEND_LINE(foo) to make foo123, where 123 is the current
+ * line number. Easy way to construct
+ * unique names for local functions or
+ * variables.
+ */
+#define SK_MACRO_APPEND_LINE(name) SK_MACRO_CONCAT(name, __LINE__)
+
+/**
+ * For some classes, it's almost always an error to instantiate one without a name, e.g.
+ * {
+ * SkAutoMutexAcquire(&mutex);
+ * <some code>
+ * }
+ * In this case, the writer meant to hold mutex while the rest of the code in the block runs,
+ * but instead the mutex is acquired and then immediately released. The correct usage is
+ * {
+ * SkAutoMutexAcquire lock(&mutex);
+ * <some code>
+ * }
+ *
+ * To prevent callers from instantiating your class without a name, use SK_REQUIRE_LOCAL_VAR
+ * like this:
+ * class classname {
+ * <your class>
+ * };
+ * #define classname(...) SK_REQUIRE_LOCAL_VAR(classname)
+ *
+ * This won't work with templates, and you must inline the class' constructors and destructors.
+ * Take a look at SkAutoFree and SkAutoMalloc in this file for examples.
+ */
+#define SK_REQUIRE_LOCAL_VAR(classname) \
+ static_assert(false, "missing name for " #classname)
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Can be used to bracket data types that must be dense, e.g. hash keys.
+#if defined(__clang__) // This should work on GCC too, but GCC diagnostic pop didn't seem to work!
+ #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic error \"-Wpadded\"")
+ #define SK_END_REQUIRE_DENSE _Pragma("GCC diagnostic pop")
+#else
+ #define SK_BEGIN_REQUIRE_DENSE
+ #define SK_END_REQUIRE_DENSE
+#endif
+
+#define SK_INIT_TO_AVOID_WARNING = 0
+
+#endif // SkMacros_DEFINED
diff --git a/gfx/skia/skia/include/private/SkMalloc.h b/gfx/skia/skia/include/private/SkMalloc.h
new file mode 100644
index 0000000000..cb7bbb7035
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkMalloc.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMalloc_DEFINED
+#define SkMalloc_DEFINED
+
+#include <stddef.h>
+#include <string.h>
+
+#include "include/core/SkTypes.h"
+
+/*
+ memory wrappers to be implemented by the porting layer (platform)
+*/
+
+
+/** Free memory returned by sk_malloc(). It is safe to pass null. */
+SK_API extern void sk_free(void*);
+
+/**
+ * Called internally if we run out of memory. The platform implementation must
+ * not return, but should either throw an exception or otherwise exit.
+ */
+SK_API extern void sk_out_of_memory(void);
+
+enum {
+ /**
+ * If this bit is set, the returned buffer must be zero-initialized. If this bit is not set
+ * the buffer can be uninitialized.
+ */
+ SK_MALLOC_ZERO_INITIALIZE = 1 << 0,
+
+ /**
+ * If this bit is set, the implementation must throw/crash/quit if the request cannot
+ * be fulfilled. If this bit is not set, then it should return nullptr on failure.
+ */
+ SK_MALLOC_THROW = 1 << 1,
+};
+/**
+ * Return a block of memory (at least 4-byte aligned) of at least the specified size.
+ * If the requested memory cannot be returned, either return nullptr or throw/exit, depending
+ * on the SK_MALLOC_THROW bit. If the allocation succeeds, the memory will be zero-initialized
+ * if the SK_MALLOC_ZERO_INITIALIZE bit was set.
+ *
+ * To free the memory, call sk_free()
+ */
+SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
+
+/** Same as standard realloc(), but this one never returns null on failure. It will throw
+ * an exception if it fails.
+ */
+SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
+
+static inline void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+static inline void* sk_calloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_ZERO_INITIALIZE);
+}
+
+static inline void* sk_calloc_canfail(size_t size) {
+#if defined(IS_FUZZING_WITH_LIBFUZZER)
+ // The Libfuzzer environment is very susceptible to OOM, so to avoid those
+ // just pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, SK_MALLOC_ZERO_INITIALIZE);
+}
+
+// Performs a safe multiply count * elemSize, checking for overflow
+SK_API extern void* sk_calloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_malloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize);
+
+/**
+ * These variants return nullptr on failure
+ */
+static inline void* sk_malloc_canfail(size_t size) {
+#if defined(IS_FUZZING_WITH_LIBFUZZER)
+ // The Libfuzzer environment is very susceptible to OOM, so to avoid those
+ // just pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, 0);
+}
+SK_API extern void* sk_malloc_canfail(size_t count, size_t elemSize);
+
+// bzero is safer than memset, but we can't rely on it, so... sk_bzero()
+static inline void sk_bzero(void* buffer, size_t size) {
+ // Please c.f. sk_careful_memcpy. It's undefined behavior to call memset(null, 0, 0).
+ if (size) {
+ memset(buffer, 0, size);
+ }
+}
+
+/**
+ * sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior.
+ *
+ * It is undefined behavior to call memcpy() with null dst or src, even if len is 0.
+ * If an optimizer is "smart" enough, it can exploit this to do unexpected things.
+ * memcpy(dst, src, 0);
+ * if (src) {
+ * printf("%x\n", *src);
+ * }
+ * In this code the compiler can assume src is not null and omit the if (src) {...} check,
+ * unconditionally running the printf, crashing the program if src really is null.
+ * Of the compilers we pay attention to only GCC performs this optimization in practice.
+ */
+static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memcpy(dst,src,len);
+ }
+ return dst;
+}
+
+static inline void* sk_careful_memmove(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memmove(dst,src,len);
+ }
+ return dst;
+}
+
+#endif // SkMalloc_DEFINED
diff --git a/gfx/skia/skia/include/private/SkMutex.h b/gfx/skia/skia/include/private/SkMutex.h
new file mode 100644
index 0000000000..a9af46fdc1
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkMutex.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMutex_DEFINED
+#define SkMutex_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkSemaphore.h"
+#include "include/private/SkThreadAnnotations.h"
+#include "include/private/SkThreadID.h"
+
+class SK_CAPABILITY("mutex") SkMutex {
+public:
+ constexpr SkMutex() = default;
+
+ void acquire() SK_ACQUIRE() {
+ fSemaphore.wait();
+ SkDEBUGCODE(fOwner = SkGetThreadID();)
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ this->assertHeld();
+ SkDEBUGCODE(fOwner = kIllegalThreadID;)
+ fSemaphore.signal();
+ }
+
+ void assertHeld() SK_ASSERT_CAPABILITY(this) {
+ SkASSERT(fOwner == SkGetThreadID());
+ }
+
+private:
+ SkSemaphore fSemaphore{1};
+ SkDEBUGCODE(SkThreadID fOwner{kIllegalThreadID};)
+};
+
+class SK_SCOPED_CAPABILITY SkAutoMutexExclusive {
+public:
+ SkAutoMutexExclusive(SkMutex& mutex) SK_ACQUIRE(mutex) : fMutex(mutex) { fMutex.acquire(); }
+ ~SkAutoMutexExclusive() SK_RELEASE_CAPABILITY() { fMutex.release(); }
+
+private:
+ SkMutex& fMutex;
+};
+
+#define SkAutoMutexExclusive(...) SK_REQUIRE_LOCAL_VAR(SkAutoMutexExclusive)
+
+#endif // SkMutex_DEFINED
diff --git a/gfx/skia/skia/include/private/SkNoncopyable.h b/gfx/skia/skia/include/private/SkNoncopyable.h
new file mode 100644
index 0000000000..bda5d50bb6
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkNoncopyable.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoncopyable_DEFINED
+#define SkNoncopyable_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/** \class SkNoncopyable
+
+ SkNoncopyable is the base class for objects that do not want to
+ be copied. It hides its copy-constructor and its assignment-operator.
+*/
+class SK_API SkNoncopyable {
+public:
+ SkNoncopyable() = default;
+
+ SkNoncopyable(SkNoncopyable&&) = default;
+ SkNoncopyable& operator =(SkNoncopyable&&) = default;
+
+private:
+ SkNoncopyable(const SkNoncopyable&) = delete;
+ SkNoncopyable& operator=(const SkNoncopyable&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkNx.h b/gfx/skia/skia/include/private/SkNx.h
new file mode 100644
index 0000000000..4bc0cfa72e
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkNx.h
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_DEFINED
+#define SkNx_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSafe_math.h"
+
+#include <algorithm>
+#include <limits>
+#include <type_traits>
+
+// Every single SkNx method wants to be fully inlined. (We know better than MSVC).
+#define AI SK_ALWAYS_INLINE
+
+namespace { // NOLINT(google-build-namespaces)
+
+// The default SkNx<N,T> just proxies down to a pair of SkNx<N/2, T>.
+template <int N, typename T>
+struct SkNx {
+ typedef SkNx<N/2, T> Half;
+
+ Half fLo, fHi;
+
+ AI SkNx() = default;
+ AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {}
+
+ AI SkNx(T v) : fLo(v), fHi(v) {}
+
+ AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); }
+ AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); }
+ AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) {
+ static_assert(N==8, "");
+ }
+ AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
+ T i, T j, T k, T l, T m, T n, T o, T p)
+ : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) {
+ static_assert(N==16, "");
+ }
+
+ AI T operator[](int k) const {
+ SkASSERT(0 <= k && k < N);
+ return k < N/2 ? fLo[k] : fHi[k-N/2];
+ }
+
+ AI static SkNx Load(const void* vptr) {
+ auto ptr = (const char*)vptr;
+ return { Half::Load(ptr), Half::Load(ptr + N/2*sizeof(T)) };
+ }
+ AI void store(void* vptr) const {
+ auto ptr = (char*)vptr;
+ fLo.store(ptr);
+ fHi.store(ptr + N/2*sizeof(T));
+ }
+
+ AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
+ auto ptr = (const char*)vptr;
+ Half al, bl, cl, dl,
+ ah, bh, ch, dh;
+ Half::Load4(ptr , &al, &bl, &cl, &dl);
+ Half::Load4(ptr + 4*N/2*sizeof(T), &ah, &bh, &ch, &dh);
+ *a = SkNx{al, ah};
+ *b = SkNx{bl, bh};
+ *c = SkNx{cl, ch};
+ *d = SkNx{dl, dh};
+ }
+ AI static void Load3(const void* vptr, SkNx* a, SkNx* b, SkNx* c) {
+ auto ptr = (const char*)vptr;
+ Half al, bl, cl,
+ ah, bh, ch;
+ Half::Load3(ptr , &al, &bl, &cl);
+ Half::Load3(ptr + 3*N/2*sizeof(T), &ah, &bh, &ch);
+ *a = SkNx{al, ah};
+ *b = SkNx{bl, bh};
+ *c = SkNx{cl, ch};
+ }
+ AI static void Load2(const void* vptr, SkNx* a, SkNx* b) {
+ auto ptr = (const char*)vptr;
+ Half al, bl,
+ ah, bh;
+ Half::Load2(ptr , &al, &bl);
+ Half::Load2(ptr + 2*N/2*sizeof(T), &ah, &bh);
+ *a = SkNx{al, ah};
+ *b = SkNx{bl, bh};
+ }
+ AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ auto ptr = (char*)vptr;
+ Half::Store4(ptr, a.fLo, b.fLo, c.fLo, d.fLo);
+ Half::Store4(ptr + 4*N/2*sizeof(T), a.fHi, b.fHi, c.fHi, d.fHi);
+ }
+ AI static void Store3(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c) {
+ auto ptr = (char*)vptr;
+ Half::Store3(ptr, a.fLo, b.fLo, c.fLo);
+ Half::Store3(ptr + 3*N/2*sizeof(T), a.fHi, b.fHi, c.fHi);
+ }
+ AI static void Store2(void* vptr, const SkNx& a, const SkNx& b) {
+ auto ptr = (char*)vptr;
+ Half::Store2(ptr, a.fLo, b.fLo);
+ Half::Store2(ptr + 2*N/2*sizeof(T), a.fHi, b.fHi);
+ }
+
+ AI T min() const { return SkTMin(fLo.min(), fHi.min()); }
+ AI T max() const { return SkTMax(fLo.max(), fHi.max()); }
+ AI bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
+ AI bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
+
+ AI SkNx abs() const { return { fLo. abs(), fHi. abs() }; }
+ AI SkNx sqrt() const { return { fLo. sqrt(), fHi. sqrt() }; }
+ AI SkNx rsqrt() const { return { fLo. rsqrt(), fHi. rsqrt() }; }
+ AI SkNx floor() const { return { fLo. floor(), fHi. floor() }; }
+ AI SkNx invert() const { return { fLo.invert(), fHi.invert() }; }
+
+ AI SkNx operator!() const { return { !fLo, !fHi }; }
+ AI SkNx operator-() const { return { -fLo, -fHi }; }
+ AI SkNx operator~() const { return { ~fLo, ~fHi }; }
+
+ AI SkNx operator<<(int bits) const { return { fLo << bits, fHi << bits }; }
+ AI SkNx operator>>(int bits) const { return { fLo >> bits, fHi >> bits }; }
+
+ AI SkNx operator+(const SkNx& y) const { return { fLo + y.fLo, fHi + y.fHi }; }
+ AI SkNx operator-(const SkNx& y) const { return { fLo - y.fLo, fHi - y.fHi }; }
+ AI SkNx operator*(const SkNx& y) const { return { fLo * y.fLo, fHi * y.fHi }; }
+ AI SkNx operator/(const SkNx& y) const { return { fLo / y.fLo, fHi / y.fHi }; }
+
+ AI SkNx operator&(const SkNx& y) const { return { fLo & y.fLo, fHi & y.fHi }; }
+ AI SkNx operator|(const SkNx& y) const { return { fLo | y.fLo, fHi | y.fHi }; }
+ AI SkNx operator^(const SkNx& y) const { return { fLo ^ y.fLo, fHi ^ y.fHi }; }
+
+ AI SkNx operator==(const SkNx& y) const { return { fLo == y.fLo, fHi == y.fHi }; }
+ AI SkNx operator!=(const SkNx& y) const { return { fLo != y.fLo, fHi != y.fHi }; }
+ AI SkNx operator<=(const SkNx& y) const { return { fLo <= y.fLo, fHi <= y.fHi }; }
+ AI SkNx operator>=(const SkNx& y) const { return { fLo >= y.fLo, fHi >= y.fHi }; }
+ AI SkNx operator< (const SkNx& y) const { return { fLo < y.fLo, fHi < y.fHi }; }
+ AI SkNx operator> (const SkNx& y) const { return { fLo > y.fLo, fHi > y.fHi }; }
+
+ AI SkNx saturatedAdd(const SkNx& y) const {
+ return { fLo.saturatedAdd(y.fLo), fHi.saturatedAdd(y.fHi) };
+ }
+
+ AI SkNx mulHi(const SkNx& m) const {
+ return { fLo.mulHi(m.fLo), fHi.mulHi(m.fHi) };
+ }
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return { fLo.thenElse(t.fLo, e.fLo), fHi.thenElse(t.fHi, e.fHi) };
+ }
+ AI static SkNx Min(const SkNx& x, const SkNx& y) {
+ return { Half::Min(x.fLo, y.fLo), Half::Min(x.fHi, y.fHi) };
+ }
+ AI static SkNx Max(const SkNx& x, const SkNx& y) {
+ return { Half::Max(x.fLo, y.fLo), Half::Max(x.fHi, y.fHi) };
+ }
+};
+
+// The N -> N/2 recursion bottoms out at N == 1, a scalar value.
+template <typename T>
+struct SkNx<1,T> {
+ T fVal;
+
+ AI SkNx() = default;
+ AI SkNx(T v) : fVal(v) {}
+
+ // Android complains against unused parameters, so we guard it
+ AI T operator[](int SkDEBUGCODE(k)) const {
+ SkASSERT(k == 0);
+ return fVal;
+ }
+
+ AI static SkNx Load(const void* ptr) {
+ SkNx v;
+ memcpy(&v, ptr, sizeof(T));
+ return v;
+ }
+ AI void store(void* ptr) const { memcpy(ptr, &fVal, sizeof(T)); }
+
+ AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
+ auto ptr = (const char*)vptr;
+ *a = Load(ptr + 0*sizeof(T));
+ *b = Load(ptr + 1*sizeof(T));
+ *c = Load(ptr + 2*sizeof(T));
+ *d = Load(ptr + 3*sizeof(T));
+ }
+ AI static void Load3(const void* vptr, SkNx* a, SkNx* b, SkNx* c) {
+ auto ptr = (const char*)vptr;
+ *a = Load(ptr + 0*sizeof(T));
+ *b = Load(ptr + 1*sizeof(T));
+ *c = Load(ptr + 2*sizeof(T));
+ }
+ AI static void Load2(const void* vptr, SkNx* a, SkNx* b) {
+ auto ptr = (const char*)vptr;
+ *a = Load(ptr + 0*sizeof(T));
+ *b = Load(ptr + 1*sizeof(T));
+ }
+ AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ auto ptr = (char*)vptr;
+ a.store(ptr + 0*sizeof(T));
+ b.store(ptr + 1*sizeof(T));
+ c.store(ptr + 2*sizeof(T));
+ d.store(ptr + 3*sizeof(T));
+ }
+ AI static void Store3(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c) {
+ auto ptr = (char*)vptr;
+ a.store(ptr + 0*sizeof(T));
+ b.store(ptr + 1*sizeof(T));
+ c.store(ptr + 2*sizeof(T));
+ }
+ AI static void Store2(void* vptr, const SkNx& a, const SkNx& b) {
+ auto ptr = (char*)vptr;
+ a.store(ptr + 0*sizeof(T));
+ b.store(ptr + 1*sizeof(T));
+ }
+
+ AI T min() const { return fVal; }
+ AI T max() const { return fVal; }
+ AI bool anyTrue() const { return fVal != 0; }
+ AI bool allTrue() const { return fVal != 0; }
+
+ AI SkNx abs() const { return Abs(fVal); }
+ AI SkNx sqrt() const { return Sqrt(fVal); }
+ AI SkNx rsqrt() const { return T(1) / this->sqrt(); }
+ AI SkNx floor() const { return Floor(fVal); }
+ AI SkNx invert() const { return T(1) / *this; }
+
+ AI SkNx operator!() const { return !fVal; }
+ AI SkNx operator-() const { return -fVal; }
+ AI SkNx operator~() const { return FromBits(~ToBits(fVal)); }
+
+ AI SkNx operator<<(int bits) const { return fVal << bits; }
+ AI SkNx operator>>(int bits) const { return fVal >> bits; }
+
+ AI SkNx operator+(const SkNx& y) const { return fVal + y.fVal; }
+ AI SkNx operator-(const SkNx& y) const { return fVal - y.fVal; }
+ AI SkNx operator*(const SkNx& y) const { return fVal * y.fVal; }
+ AI SkNx operator/(const SkNx& y) const { return fVal / y.fVal; }
+
+ AI SkNx operator&(const SkNx& y) const { return FromBits(ToBits(fVal) & ToBits(y.fVal)); }
+ AI SkNx operator|(const SkNx& y) const { return FromBits(ToBits(fVal) | ToBits(y.fVal)); }
+ AI SkNx operator^(const SkNx& y) const { return FromBits(ToBits(fVal) ^ ToBits(y.fVal)); }
+
+ AI SkNx operator==(const SkNx& y) const { return FromBits(fVal == y.fVal ? ~0 : 0); }
+ AI SkNx operator!=(const SkNx& y) const { return FromBits(fVal != y.fVal ? ~0 : 0); }
+ AI SkNx operator<=(const SkNx& y) const { return FromBits(fVal <= y.fVal ? ~0 : 0); }
+ AI SkNx operator>=(const SkNx& y) const { return FromBits(fVal >= y.fVal ? ~0 : 0); }
+ AI SkNx operator< (const SkNx& y) const { return FromBits(fVal < y.fVal ? ~0 : 0); }
+ AI SkNx operator> (const SkNx& y) const { return FromBits(fVal > y.fVal ? ~0 : 0); }
+
+ AI static SkNx Min(const SkNx& x, const SkNx& y) { return x.fVal < y.fVal ? x : y; }
+ AI static SkNx Max(const SkNx& x, const SkNx& y) { return x.fVal > y.fVal ? x : y; }
+
+ AI SkNx saturatedAdd(const SkNx& y) const {
+ static_assert(std::is_unsigned<T>::value, "");
+ T sum = fVal + y.fVal;
+ return sum < fVal ? std::numeric_limits<T>::max() : sum;
+ }
+
+ AI SkNx mulHi(const SkNx& m) const {
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(sizeof(T) <= 4, "");
+ return static_cast<T>((static_cast<uint64_t>(fVal) * m.fVal) >> (sizeof(T)*8));
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
+
+private:
+ // Helper functions to choose the right float/double methods. (In <cmath> madness lies...)
+ AI static int Abs(int val) { return val < 0 ? -val : val; }
+
+ AI static float Abs(float val) { return ::fabsf(val); }
+ AI static float Sqrt(float val) { return ::sqrtf(val); }
+ AI static float Floor(float val) { return ::floorf(val); }
+
+ AI static double Abs(double val) { return ::fabs(val); }
+ AI static double Sqrt(double val) { return ::sqrt(val); }
+ AI static double Floor(double val) { return ::floor(val); }
+
+ // Helper functions for working with floats/doubles as bit patterns.
+ template <typename U>
+ AI static U ToBits(U v) { return v; }
+ AI static int32_t ToBits(float v) { int32_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+ AI static int64_t ToBits(double v) { int64_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+
+ template <typename Bits>
+ AI static T FromBits(Bits bits) {
+ static_assert(std::is_pod<T >::value &&
+ std::is_pod<Bits>::value &&
+ sizeof(T) <= sizeof(Bits), "");
+ T val;
+ memcpy(&val, &bits, sizeof(T));
+ return val;
+ }
+};
+
+// Allow scalars on the left or right of binary operators, and things like +=, &=, etc.
+#define V template <int N, typename T> AI static SkNx<N,T>
+ V operator+ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) + y; }
+ V operator- (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) - y; }
+ template <int N> AI static SkNx<N, float> operator-(int x, const SkNx<N, float>& y) { return SkNx<N,float>(x) - y; }
+ template <int N> AI static SkNx<N, uint16_t> operator-(int x, const SkNx<N, uint16_t>& y) { return SkNx<N,uint16_t>(x) - y; }
+ V operator* (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) * y; }
+ template <int N> AI static SkNx<N, uint16_t> operator*(int x, const SkNx<N, uint16_t>& y) { return SkNx<N,uint16_t>(x) * y; }
+ V operator/ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) / y; }
+ V operator& (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) & y; }
+ V operator| (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) | y; }
+ V operator^ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) ^ y; }
+ V operator==(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) == y; }
+ V operator!=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) != y; }
+ V operator<=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) <= y; }
+ template <int N> AI static SkNx<N, float> operator<=(int x, const SkNx<N, float>& y) { return SkNx<N,float>(x) <= y; }
+ V operator>=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) >= y; }
+ V operator< (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) < y; }
+ V operator> (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) > y; }
+
+ V operator+ (const SkNx<N,T>& x, T y) { return x + SkNx<N,T>(y); }
+ V operator- (const SkNx<N,T>& x, T y) { return x - SkNx<N,T>(y); }
+ V operator* (const SkNx<N,T>& x, T y) { return x * SkNx<N,T>(y); }
+ V operator/ (const SkNx<N,T>& x, T y) { return x / SkNx<N,T>(y); }
+ V operator& (const SkNx<N,T>& x, T y) { return x & SkNx<N,T>(y); }
+ V operator| (const SkNx<N,T>& x, T y) { return x | SkNx<N,T>(y); }
+ V operator^ (const SkNx<N,T>& x, T y) { return x ^ SkNx<N,T>(y); }
+ V operator==(const SkNx<N,T>& x, T y) { return x == SkNx<N,T>(y); }
+ V operator!=(const SkNx<N,T>& x, T y) { return x != SkNx<N,T>(y); }
+ V operator<=(const SkNx<N,T>& x, T y) { return x <= SkNx<N,T>(y); }
+ V operator>=(const SkNx<N,T>& x, T y) { return x >= SkNx<N,T>(y); }
+ V operator< (const SkNx<N,T>& x, T y) { return x < SkNx<N,T>(y); }
+ V operator> (const SkNx<N,T>& x, T y) { return x > SkNx<N,T>(y); }
+
+ V& operator<<=(SkNx<N,T>& x, int bits) { return (x = x << bits); }
+ V& operator>>=(SkNx<N,T>& x, int bits) { return (x = x >> bits); }
+
+ V& operator +=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x + y); }
+ V& operator -=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x - y); }
+ V& operator *=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x * y); }
+ V& operator /=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x / y); }
+ V& operator &=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x & y); }
+ V& operator |=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x | y); }
+ V& operator ^=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x ^ y); }
+
+ V& operator +=(SkNx<N,T>& x, T y) { return (x = x + SkNx<N,T>(y)); }
+ V& operator -=(SkNx<N,T>& x, T y) { return (x = x - SkNx<N,T>(y)); }
+ V& operator *=(SkNx<N,T>& x, T y) { return (x = x * SkNx<N,T>(y)); }
+ V& operator /=(SkNx<N,T>& x, T y) { return (x = x / SkNx<N,T>(y)); }
+ V& operator &=(SkNx<N,T>& x, T y) { return (x = x & SkNx<N,T>(y)); }
+ V& operator |=(SkNx<N,T>& x, T y) { return (x = x | SkNx<N,T>(y)); }
+ V& operator ^=(SkNx<N,T>& x, T y) { return (x = x ^ SkNx<N,T>(y)); }
+#undef V
+
+// SkNx<N,T> ~~> SkNx<N/2,T> + SkNx<N/2,T>
+template <int N, typename T>
+AI static void SkNx_split(const SkNx<N,T>& v, SkNx<N/2,T>* lo, SkNx<N/2,T>* hi) {
+ *lo = v.fLo;
+ *hi = v.fHi;
+}
+
+// SkNx<N/2,T> + SkNx<N/2,T> ~~> SkNx<N,T>
+template <int N, typename T>
+AI static SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) {
+ return { lo, hi };
+}
+
+// A very generic shuffle. Can reorder, duplicate, contract, expand...
+// Sk4f v = { R,G,B,A };
+// SkNx_shuffle<2,1,0,3>(v) ~~> {B,G,R,A}
+// SkNx_shuffle<2,1>(v) ~~> {B,G}
+// SkNx_shuffle<2,1,2,1,2,1,2,1>(v) ~~> {B,G,B,G,B,G,B,G}
+// SkNx_shuffle<3,3,3,3>(v) ~~> {A,A,A,A}
+template <int... Ix, int N, typename T>
+AI static SkNx<sizeof...(Ix),T> SkNx_shuffle(const SkNx<N,T>& v) {
+ return { v[Ix]... };
+}
+
+// Cast from SkNx<N, Src> to SkNx<N, Dst>, as if you called static_cast<Dst>(Src).
+template <typename Dst, typename Src, int N>
+AI static SkNx<N,Dst> SkNx_cast(const SkNx<N,Src>& v) {
+ return { SkNx_cast<Dst>(v.fLo), SkNx_cast<Dst>(v.fHi) };
+}
+template <typename Dst, typename Src>
+AI static SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) {
+ return static_cast<Dst>(v.fVal);
+}
+
+template <int N, typename T>
+AI static SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) {
+ return f*m+a;
+}
+
+} // namespace
+
+typedef SkNx<2, float> Sk2f;
+typedef SkNx<4, float> Sk4f;
+typedef SkNx<8, float> Sk8f;
+typedef SkNx<16, float> Sk16f;
+
+typedef SkNx<2, SkScalar> Sk2s;
+typedef SkNx<4, SkScalar> Sk4s;
+typedef SkNx<8, SkScalar> Sk8s;
+typedef SkNx<16, SkScalar> Sk16s;
+
+typedef SkNx<4, uint8_t> Sk4b;
+typedef SkNx<8, uint8_t> Sk8b;
+typedef SkNx<16, uint8_t> Sk16b;
+
+typedef SkNx<4, uint16_t> Sk4h;
+typedef SkNx<8, uint16_t> Sk8h;
+typedef SkNx<16, uint16_t> Sk16h;
+
+typedef SkNx<4, int32_t> Sk4i;
+typedef SkNx<8, int32_t> Sk8i;
+typedef SkNx<4, uint32_t> Sk4u;
+typedef SkNx<8, uint32_t> Sk8u;
+
+// Include platform specific specializations if available.
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "include/private/SkNx_sse.h"
+#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
+ #include "include/private/SkNx_neon.h"
+#else
+
+AI static Sk4i Sk4f_round(const Sk4f& x) {
+ return { (int) lrintf (x[0]),
+ (int) lrintf (x[1]),
+ (int) lrintf (x[2]),
+ (int) lrintf (x[3]), };
+}
+
+#endif
+
+AI static void Sk4f_ToBytes(uint8_t p[16],
+ const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
+ SkNx_cast<uint8_t>(SkNx_join(SkNx_join(a,b), SkNx_join(c,d))).store(p);
+}
+
+#undef AI
+
+#endif//SkNx_DEFINED
diff --git a/gfx/skia/skia/include/private/SkNx_neon.h b/gfx/skia/skia/include/private/SkNx_neon.h
new file mode 100644
index 0000000000..a14a57c277
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkNx_neon.h
@@ -0,0 +1,740 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_neon_DEFINED
+#define SkNx_neon_DEFINED
+
+#include <arm_neon.h>
+
+namespace { // NOLINT(google-build-namespaces)
+
+// ARMv8 has vrndm(q)_f32 to floor floats. Here we emulate it:
+// - roundtrip through integers via truncation
+// - subtract 1 if that's too big (possible for negative values).
+// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
+AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) {
+ auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ auto too_big = vcgtq_f32(roundtrip, v);
+ return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
+}
+AI static float32x2_t emulate_vrndm_f32(float32x2_t v) {
+ auto roundtrip = vcvt_f32_s32(vcvt_s32_f32(v));
+ auto too_big = vcgt_f32(roundtrip, v);
+ return vsub_f32(roundtrip, (float32x2_t)vand_u32(too_big, (uint32x2_t)vdup_n_f32(1)));
+}
+
+template <>
+class SkNx<2, float> {
+public:
+ AI SkNx(float32x2_t vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(vdup_n_f32(val)) {}
+ AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
+
+ AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
+ AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
+
+ AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
+ float32x2x2_t xy = vld2_f32((const float*) ptr);
+ *x = xy.val[0];
+ *y = xy.val[1];
+ }
+
+ AI static void Store2(void* dst, const SkNx& a, const SkNx& b) {
+ float32x2x2_t ab = {{
+ a.fVec,
+ b.fVec,
+ }};
+ vst2_f32((float*) dst, ab);
+ }
+
+ AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) {
+ float32x2x3_t abc = {{
+ a.fVec,
+ b.fVec,
+ c.fVec,
+ }};
+ vst3_f32((float*) dst, abc);
+ }
+
+ AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ float32x2x4_t abcd = {{
+ a.fVec,
+ b.fVec,
+ c.fVec,
+ d.fVec,
+ }};
+ vst4_f32((float*) dst, abcd);
+ }
+
+ AI SkNx invert() const {
+ float32x2_t est0 = vrecpe_f32(fVec),
+ est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
+ return est1;
+ }
+
+ AI SkNx operator - () const { return vneg_f32(fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdiv_f32(fVec, o.fVec);
+ #else
+ float32x2_t est0 = vrecpe_f32(o.fVec),
+ est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
+ est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
+ AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
+ AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
+ AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
+ AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
+ AI SkNx operator!=(const SkNx& o) const {
+ return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
+ }
+
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
+
+ AI SkNx abs() const { return vabs_f32(fVec); }
+ AI SkNx floor() const {
+ #if defined(SK_CPU_ARM64)
+ return vrndm_f32(fVec);
+ #else
+ return emulate_vrndm_f32(fVec);
+ #endif
+ }
+
+ AI SkNx rsqrt() const {
+ float32x2_t est0 = vrsqrte_f32(fVec);
+ return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
+ }
+
+ AI SkNx sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrt_f32(fVec);
+ #else
+ float32x2_t est0 = vrsqrte_f32(fVec),
+ est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0),
+ est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ AI float operator[](int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { float32x2_t v; float fs[2]; } pun = {fVec};
+ return pun.fs[k&1];
+ }
+
+ AI bool allTrue() const {
+ #if defined(SK_CPU_ARM64)
+ return 0 != vminv_u32(vreinterpret_u32_f32(fVec));
+ #else
+ auto v = vreinterpret_u32_f32(fVec);
+ return vget_lane_u32(v,0) && vget_lane_u32(v,1);
+ #endif
+ }
+ AI bool anyTrue() const {
+ #if defined(SK_CPU_ARM64)
+ return 0 != vmaxv_u32(vreinterpret_u32_f32(fVec));
+ #else
+ auto v = vreinterpret_u32_f32(fVec);
+ return vget_lane_u32(v,0) || vget_lane_u32(v,1);
+ #endif
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec);
+ }
+
+ float32x2_t fVec;
+};
+
+template <>
+class SkNx<4, float> {
+public:
+ AI SkNx(float32x4_t vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(vdupq_n_f32(val)) {}
+ AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
+
+ AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
+ AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
+
+ AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
+ float32x4x2_t xy = vld2q_f32((const float*) ptr);
+ *x = xy.val[0];
+ *y = xy.val[1];
+ }
+
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ float32x4x4_t rgba = vld4q_f32((const float*) ptr);
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ float32x4x4_t rgba = {{
+ r.fVec,
+ g.fVec,
+ b.fVec,
+ a.fVec,
+ }};
+ vst4q_f32((float*) dst, rgba);
+ }
+
+ AI SkNx invert() const {
+ float32x4_t est0 = vrecpeq_f32(fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
+ return est1;
+ }
+
+ AI SkNx operator - () const { return vnegq_f32(fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdivq_f32(fVec, o.fVec);
+ #else
+ float32x4_t est0 = vrecpeq_f32(o.fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
+ est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
+ AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
+ AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
+ AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
+ AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
+ AI SkNx operator!=(const SkNx& o) const {
+ return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
+ }
+
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
+
+ AI SkNx abs() const { return vabsq_f32(fVec); }
+ AI SkNx floor() const {
+ #if defined(SK_CPU_ARM64)
+ return vrndmq_f32(fVec);
+ #else
+ return emulate_vrndmq_f32(fVec);
+ #endif
+ }
+
+
+ AI SkNx rsqrt() const {
+ float32x4_t est0 = vrsqrteq_f32(fVec);
+ return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
+ }
+
+ AI SkNx sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrtq_f32(fVec);
+ #else
+ float32x4_t est0 = vrsqrteq_f32(fVec),
+ est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
+ est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ AI float operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { float32x4_t v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&3];
+ }
+
+ AI float min() const {
+ #if defined(SK_CPU_ARM64)
+ return vminvq_f32(fVec);
+ #else
+ SkNx min = Min(*this, vrev64q_f32(fVec));
+ return SkTMin(min[0], min[2]);
+ #endif
+ }
+
+ AI float max() const {
+ #if defined(SK_CPU_ARM64)
+ return vmaxvq_f32(fVec);
+ #else
+ SkNx max = Max(*this, vrev64q_f32(fVec));
+ return SkTMax(max[0], max[2]);
+ #endif
+ }
+
+ AI bool allTrue() const {
+ #if defined(SK_CPU_ARM64)
+ return 0 != vminvq_u32(vreinterpretq_u32_f32(fVec));
+ #else
+ auto v = vreinterpretq_u32_f32(fVec);
+ return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
+ && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
+ #endif
+ }
+ AI bool anyTrue() const {
+ #if defined(SK_CPU_ARM64)
+ return 0 != vmaxvq_u32(vreinterpretq_u32_f32(fVec));
+ #else
+ auto v = vreinterpretq_u32_f32(fVec);
+ return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
+ || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
+ #endif
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
+ }
+
+ float32x4_t fVec;
+};
+
+#if defined(SK_CPU_ARM64)
+ AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) {
+ return vfmaq_f32(a.fVec, f.fVec, m.fVec);
+ }
+#endif
+
+// It's possible that for our current use cases, representing this as
+// half a uint16x8_t might be better than representing it as a uint16x4_t.
+// It'd make conversion to Sk4b one step simpler.
+template <>
+class SkNx<4, uint16_t> {
+public:
+ AI SkNx(const uint16x4_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+ fVec = (uint16x4_t) { a,b,c,d };
+ }
+
+ AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
+ AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
+
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr);
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+ AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
+ uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr);
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ }
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ uint16x4x4_t rgba = {{
+ r.fVec,
+ g.fVec,
+ b.fVec,
+ a.fVec,
+ }};
+ vst4_u16((uint16_t*) dst, rgba);
+ }
+
+ AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
+
+ AI uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbsl_u16(fVec, t.fVec, e.fVec);
+ }
+
+ uint16x4_t fVec;
+};
+
+template <>
+class SkNx<8, uint16_t> {
+public:
+ AI SkNx(const uint16x8_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
+ AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
+
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
+ fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
+ }
+
+ AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
+
+ AI uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ AI SkNx mulHi(const SkNx& m) const {
+ uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec));
+ uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec));
+
+ return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) };
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u16(fVec, t.fVec, e.fVec);
+ }
+
+ uint16x8_t fVec;
+};
+
+template <>
+class SkNx<4, uint8_t> {
+public:
+ typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
+
+ AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
+ fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
+ }
+ AI static SkNx Load(const void* ptr) {
+ return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
+ }
+ AI void store(void* ptr) const {
+ return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
+ }
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ // TODO as needed
+
+ uint8x8_t fVec;
+};
+
+template <>
+class SkNx<8, uint8_t> {
+public:
+ AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h) {
+ fVec = (uint8x8_t) { a,b,c,d, e,f,g,h };
+ }
+
+ AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); }
+ AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ uint8x8_t fVec;
+};
+
+template <>
+class SkNx<16, uint8_t> {
+public:
+ AI SkNx(const uint8x16_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
+ fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
+ }
+
+ AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
+ AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
+
+ AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u8(fVec, t.fVec, e.fVec);
+ }
+
+ uint8x16_t fVec;
+};
+
+template <>
+class SkNx<4, int32_t> {
+public:
+ AI SkNx(const int32x4_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(int32_t v) {
+ fVec = vdupq_n_s32(v);
+ }
+ AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
+ fVec = (int32x4_t){a,b,c,d};
+ }
+ AI static SkNx Load(const void* ptr) {
+ return vld1q_s32((const int32_t*)ptr);
+ }
+ AI void store(void* ptr) const {
+ return vst1q_s32((int32_t*)ptr, fVec);
+ }
+ AI int32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { int32x4_t v; int32_t is[4]; } pun = {fVec};
+ return pun.is[k&3];
+ }
+
+ AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
+
+ AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ AI SkNx operator == (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
+ }
+ AI SkNx operator < (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
+ }
+ AI SkNx operator > (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
+ }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
+ AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); }
+ // TODO as needed
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
+ }
+
+ AI SkNx abs() const { return vabsq_s32(fVec); }
+
+ int32x4_t fVec;
+};
+
+template <>
+class SkNx<4, uint32_t> {
+public:
+ AI SkNx(const uint32x4_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint32_t v) {
+ fVec = vdupq_n_u32(v);
+ }
+ AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ fVec = (uint32x4_t){a,b,c,d};
+ }
+ AI static SkNx Load(const void* ptr) {
+ return vld1q_u32((const uint32_t*)ptr);
+ }
+ AI void store(void* ptr) const {
+ return vst1q_u32((uint32_t*)ptr, fVec);
+ }
+ AI uint32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
+
+ AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
+ // TODO as needed
+
+ AI SkNx mulHi(const SkNx& m) const {
+ uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec));
+ uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec));
+
+ return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) };
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u32(fVec, t.fVec, e.fVec);
+ }
+
+ uint32x4_t fVec;
+};
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+ return vcvtq_s32_f32(src.fVec);
+
+}
+template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+ return vcvtq_f32_s32(src.fVec);
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+ return SkNx_cast<float>(Sk4i::Load(&src));
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint64_t>(const SkNx<4, uint64_t>& src) {
+ return Sk4f(float(int32_t(src[0])), float(int32_t(src[1])), float(int32_t(src[2])), float(int32_t(src[3])));
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+ return vqmovn_u32(vcvtq_u32_f32(src.fVec));
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+ return vcvtq_f32_u32(vmovl_u16(src.fVec));
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+ uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
+ uint16x4_t _16 = vqmovn_u32(_32);
+ return vqmovn_u16(vcombine_u16(_16, _16));
+}
+
+template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) {
+ uint16x8_t _16 = vmovl_u8(src.fVec);
+ return vmovl_u16(vget_low_u16(_16));
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
+ return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec);
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+ return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec);
+}
+
+template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+ return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
+ (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
+ vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
+ (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
+}
+
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
+ Sk4i a, b;
+ SkNx_split(src, &a, &b);
+ uint16x4_t a16 = vqmovun_s32(a.fVec);
+ uint16x4_t b16 = vqmovun_s32(b.fVec);
+
+ return vqmovn_u16(vcombine_u16(a16, b16));
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+ return vget_low_u16(vmovl_u8(src.fVec));
+}
+
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
+ return vmovl_u8(src.fVec);
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+ return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
+}
+
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
+ return vqmovn_u16(src.fVec);
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+ uint16x4_t _16 = vqmovun_s32(src.fVec);
+ return vqmovn_u16(vcombine_u16(_16, _16));
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) {
+ uint16x4_t _16 = vqmovn_u32(src.fVec);
+ return vqmovn_u16(vcombine_u16(_16, _16));
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+ return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+ return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+ return vreinterpretq_s32_u32(src.fVec);
+}
+
+AI static Sk4i Sk4f_round(const Sk4f& x) {
+ return vcvtq_s32_f32((x + 0.5f).fVec);
+}
+
+} // namespace
+
+#endif//SkNx_neon_DEFINED
diff --git a/gfx/skia/skia/include/private/SkNx_sse.h b/gfx/skia/skia/include/private/SkNx_sse.h
new file mode 100644
index 0000000000..cc6ace3680
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkNx_sse.h
@@ -0,0 +1,894 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_sse_DEFINED
+#define SkNx_sse_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #include <smmintrin.h>
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #include <tmmintrin.h>
+#else
+ #include <emmintrin.h>
+#endif
+
+// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
+// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
+
+namespace { // NOLINT(google-build-namespaces)
+
+// Emulate _mm_floor_ps() with SSE2:
+// - roundtrip through integers via truncation
+// - subtract 1 if that's too big (possible for negative values).
+// This restricts the domain of our inputs to a maximum somehwere around 2^31.
+// Seems plenty big.
+AI static __m128 emulate_mm_floor_ps(__m128 v) {
+ __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
+ __m128 too_big = _mm_cmpgt_ps(roundtrip, v);
+ return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
+}
+
+template <>
+class SkNx<2, float> {
+public:
+ AI SkNx(const __m128& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(_mm_set1_ps(val)) {}
+ AI static SkNx Load(const void* ptr) {
+ return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
+ }
+ AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
+
+ AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
+
+ AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
+ const float* m = (const float*)ptr;
+ *x = SkNx{m[0], m[2]};
+ *y = SkNx{m[1], m[3]};
+ }
+
+ AI static void Store2(void* dst, const SkNx& a, const SkNx& b) {
+ auto vals = _mm_unpacklo_ps(a.fVec, b.fVec);
+ _mm_storeu_ps((float*)dst, vals);
+ }
+
+ AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) {
+ auto lo = _mm_setr_ps(a[0], b[0], c[0], a[1]),
+ hi = _mm_setr_ps(b[1], c[1], 0, 0);
+ _mm_storeu_ps((float*)dst, lo);
+ _mm_storel_pi(((__m64*)dst) + 2, hi);
+ }
+
+ AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ auto lo = _mm_setr_ps(a[0], b[0], c[0], d[0]),
+ hi = _mm_setr_ps(a[1], b[1], c[1], d[1]);
+ _mm_storeu_ps((float*)dst, lo);
+ _mm_storeu_ps(((float*)dst) + 4, hi);
+ }
+
+ AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
+ AI SkNx floor() const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_floor_ps(fVec);
+ #else
+ return emulate_mm_floor_ps(fVec);
+ #endif
+ }
+
+ AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ AI SkNx invert() const { return _mm_rcp_ps(fVec); }
+
+ AI float operator[](int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&1];
+ }
+
+ AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+ AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_ps(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
+ _mm_andnot_ps(fVec, e.fVec));
+ #endif
+ }
+
+ __m128 fVec;
+};
+
+template <>
+class SkNx<4, float> {
+public:
+ AI SkNx(const __m128& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
+ AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
+
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
+ AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
+
+ AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
+ SkNx lo = SkNx::Load((const float*)ptr+0),
+ hi = SkNx::Load((const float*)ptr+4);
+ *x = SkNx{lo[0], lo[2], hi[0], hi[2]};
+ *y = SkNx{lo[1], lo[3], hi[1], hi[3]};
+ }
+
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ __m128 v0 = _mm_loadu_ps(((float*)ptr) + 0),
+ v1 = _mm_loadu_ps(((float*)ptr) + 4),
+ v2 = _mm_loadu_ps(((float*)ptr) + 8),
+ v3 = _mm_loadu_ps(((float*)ptr) + 12);
+ _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
+ *r = v0;
+ *g = v1;
+ *b = v2;
+ *a = v3;
+ }
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ __m128 v0 = r.fVec,
+ v1 = g.fVec,
+ v2 = b.fVec,
+ v3 = a.fVec;
+ _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
+ _mm_storeu_ps(((float*) dst) + 0, v0);
+ _mm_storeu_ps(((float*) dst) + 4, v1);
+ _mm_storeu_ps(((float*) dst) + 8, v2);
+ _mm_storeu_ps(((float*) dst) + 12, v3);
+ }
+
+ AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); }
+ AI SkNx operator ~ () const { return _mm_xor_ps(fVec, _mm_castsi128_ps(_mm_cmpeq_epi32(_mm_castps_si128(fVec), _mm_castps_si128(fVec)))); }
+ AI SkNx operator ! () const { return ~*this; }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_ps(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_ps(fVec, o.fVec); }
+
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
+ AI SkNx floor() const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_floor_ps(fVec);
+ #else
+ return emulate_mm_floor_ps(fVec);
+ #endif
+ }
+
+ AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ AI SkNx invert() const { return _mm_rcp_ps(fVec); }
+
+ AI float operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&3];
+ }
+
+ AI float min() const {
+ SkNx min = Min(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1)));
+ min = Min(min, _mm_shuffle_ps(min.fVec, min.fVec, _MM_SHUFFLE(0,1,2,3)));
+ return min[0];
+ }
+
+ AI float max() const {
+ SkNx max = Max(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1)));
+ max = Max(max, _mm_shuffle_ps(max.fVec, max.fVec, _MM_SHUFFLE(0,1,2,3)));
+ return max[0];
+ }
+
+ AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+ AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_ps(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
+ _mm_andnot_ps(fVec, e.fVec));
+ #endif
+ }
+
+ operator __m128() const { return fVec; }
+
+ __m128 fVec;
+};
+
+AI static __m128i mullo32(__m128i a, __m128i b) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_mullo_epi32(a, b);
+#else
+ __m128i mul20 = _mm_mul_epu32(a, b),
+ mul31 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4));
+ return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
+ _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
+#endif
+}
+
+template <>
+class SkNx<4, int32_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); }
+
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+
+ AI SkNx operator ~ () const { return _mm_xor_si128(fVec, _mm_cmpeq_epi32(fVec, fVec)); }
+
+ AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
+
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
+
+ AI int32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; int32_t is[4]; } pun = {fVec};
+ return pun.is[k&3];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ #endif
+ }
+
+ AI SkNx abs() const {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ return _mm_abs_epi32(fVec);
+#else
+ SkNx mask = (*this) >> 31;
+ return (mask ^ (*this)) - mask;
+#endif
+ }
+
+ AI static SkNx Min(const SkNx& x, const SkNx& y) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_min_epi32(x.fVec, y.fVec);
+#else
+ return (x < y).thenElse(x, y);
+#endif
+ }
+
+ AI static SkNx Max(const SkNx& x, const SkNx& y) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_max_epi32(x.fVec, y.fVec);
+#else
+ return (x > y).thenElse(x, y);
+#endif
+ }
+
+ operator __m128i() const { return fVec; }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<2, uint32_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ AI SkNx(uint32_t a, uint32_t b) : fVec(_mm_setr_epi32(a,b,0,0)) {}
+
+ AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); }
+
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
+
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; }
+ // operator < and > take a little extra fiddling to make work for unsigned ints.
+
+ AI uint32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128i v; uint32_t us[4]; } pun = {fVec};
+ return pun.us[k&1];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
+#else
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+#endif
+ }
+
+ AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<4, uint32_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); }
+
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+
+ AI SkNx operator ~ () const { return _mm_xor_si128(fVec, _mm_cmpeq_epi32(fVec, fVec)); }
+
+ AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
+
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; }
+
+ // operator < and > take a little extra fiddling to make work for unsigned ints.
+
+ AI uint32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint32_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ #endif
+ }
+
+ AI SkNx mulHi(SkNx m) const {
+ SkNx v20{_mm_mul_epu32(m.fVec, fVec)};
+ SkNx v31{_mm_mul_epu32(_mm_srli_si128(m.fVec, 4), _mm_srli_si128(fVec, 4))};
+
+ return SkNx{v20[1], v31[1], v20[3], v31[3]};
+ }
+
+ operator __m128i() const { return fVec; }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<4, uint16_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d)
+ : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
+
+ AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
+
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
+ hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2
+ odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ...
+ __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ...
+ *r = rg;
+ *g = _mm_srli_si128(rg, 8);
+ *b = ba;
+ *a = _mm_srli_si128(ba, 8);
+ }
+ AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
+ // The idea here is to get 4 vectors that are R G B _ _ _ _ _.
+ // The second load is at a funny location to make sure we don't read past
+ // the bounds of memory. This is fine, we just need to shift it a little bit.
+ const uint8_t* ptr8 = (const uint8_t*) ptr;
+ __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0));
+ __m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
+ __m128i rgb2 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 4*2)), 2*2);
+ __m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
+
+ __m128i rrggbb01 = _mm_unpacklo_epi16(rgb0, rgb1);
+ __m128i rrggbb23 = _mm_unpacklo_epi16(rgb2, rgb3);
+ *r = _mm_unpacklo_epi32(rrggbb01, rrggbb23);
+ *g = _mm_srli_si128(r->fVec, 4*2);
+ *b = _mm_unpackhi_epi32(rrggbb01, rrggbb23);
+ }
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
+ __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
+ __m128i lo = _mm_unpacklo_epi32(rg, ba);
+ __m128i hi = _mm_unpackhi_epi32(rg, ba);
+ _mm_storeu_si128(((__m128i*) dst) + 0, lo);
+ _mm_storeu_si128(((__m128i*) dst) + 1, hi);
+ }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+
+ AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ AI uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ operator __m128i() const { return fVec; }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<8, uint16_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h)
+ : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
+
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
+ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
+ _45 = _mm_loadu_si128(((__m128i*)ptr) + 2),
+ _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
+
+ __m128i _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
+ _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
+ _46 = _mm_unpacklo_epi16(_45, _67),
+ _57 = _mm_unpackhi_epi16(_45, _67);
+
+ __m128i rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
+ rg4567 = _mm_unpacklo_epi16(_46, _57),
+ ba4567 = _mm_unpackhi_epi16(_46, _57);
+
+ *r = _mm_unpacklo_epi64(rg0123, rg4567);
+ *g = _mm_unpackhi_epi64(rg0123, rg4567);
+ *b = _mm_unpacklo_epi64(ba0123, ba4567);
+ *a = _mm_unpackhi_epi64(ba0123, ba4567);
+ }
+ AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
+ const uint8_t* ptr8 = (const uint8_t*) ptr;
+ __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0*2));
+ __m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
+ __m128i rgb2 = _mm_loadu_si128((const __m128i*) (ptr8 + 6*2));
+ __m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
+ __m128i rgb4 = _mm_loadu_si128((const __m128i*) (ptr8 + 12*2));
+ __m128i rgb5 = _mm_srli_si128(rgb4, 3*2);
+ __m128i rgb6 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 16*2)), 2*2);
+ __m128i rgb7 = _mm_srli_si128(rgb6, 3*2);
+
+ __m128i rgb01 = _mm_unpacklo_epi16(rgb0, rgb1);
+ __m128i rgb23 = _mm_unpacklo_epi16(rgb2, rgb3);
+ __m128i rgb45 = _mm_unpacklo_epi16(rgb4, rgb5);
+ __m128i rgb67 = _mm_unpacklo_epi16(rgb6, rgb7);
+
+ __m128i rg03 = _mm_unpacklo_epi32(rgb01, rgb23);
+ __m128i bx03 = _mm_unpackhi_epi32(rgb01, rgb23);
+ __m128i rg47 = _mm_unpacklo_epi32(rgb45, rgb67);
+ __m128i bx47 = _mm_unpackhi_epi32(rgb45, rgb67);
+
+ *r = _mm_unpacklo_epi64(rg03, rg47);
+ *g = _mm_unpackhi_epi64(rg03, rg47);
+ *b = _mm_unpacklo_epi64(bx03, bx47);
+ }
+ AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3
+ rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7
+ ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec),
+ ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec);
+
+ _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123));
+ _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123));
+ _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567));
+ _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
+ }
+
+ AI SkNx operator ~ () const { return _mm_xor_si128(fVec, _mm_cmpeq_epi16(fVec, fVec)); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const {
+ auto flip = _mm_set1_epi16(short(0x8000));
+ return _mm_cmplt_epi16(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
+ }
+ AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) {
+ // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
+ // signed version, _mm_min_epi16, then shift back.
+ const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
+ const __m128i top_8x = _mm_set1_epi16(top);
+ return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
+ _mm_sub_epi8(b.fVec, top_8x)));
+ }
+ AI static SkNx Max(const SkNx& a, const SkNx& b) {
+ const uint16_t top = 0x8000;
+ const __m128i top_8x = _mm_set1_epi16(top);
+ return _mm_add_epi8(top_8x, _mm_max_epi16(_mm_sub_epi8(a.fVec, top_8x),
+ _mm_sub_epi8(b.fVec, top_8x)));
+ }
+
+ AI SkNx mulHi(const SkNx& m) const {
+ return _mm_mulhi_epu16(fVec, m.fVec);
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ AI uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ operator __m128i() const { return fVec; }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<4, uint8_t> {
+public:
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
+ : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
+
+ AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
+ AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ // TODO as needed
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<8, uint8_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h)
+ : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
+
+ AI void store(void* ptr) const {_mm_storel_epi64((__m128i*)ptr, fVec);}
+
+ AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const {
+ // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
+ auto flip = _mm_set1_epi8(char(0x80));
+ return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
+ }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<16, uint8_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p)
+ : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
+
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const {
+ // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
+ auto flip = _mm_set1_epi8(char(0x80));
+ return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
+ }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ __m128i fVec;
+};
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+ return _mm_cvtepi32_ps(src.fVec);
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+ return SkNx_cast<float>(Sk4i::Load(&src));
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint64_t>(const SkNx<4, uint64_t>& src) {
+ return Sk4f(float(int32_t(src[0])), float(int32_t(src[1])), float(int32_t(src[2])), float(int32_t(src[3])));
+}
+
+template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+ return _mm_cvttps_epi32(src.fVec);
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_packus_epi32(src.fVec, src.fVec);
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
+ const int _ = ~0;
+ return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
+#else
+ // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
+ __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
+ return _mm_packs_epi32(x,x);
+#endif
+}
+
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, int32_t>(const Sk8i& src) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_packus_epi32(src.fLo.fVec, src.fHi.fVec);
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
+ const int _ = ~0;
+ __m128i mask = _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_);
+ return _mm_unpacklo_epi64(_mm_shuffle_epi8(src.fLo.fVec, mask),
+ _mm_shuffle_epi8(src.fHi.fVec, mask));
+#else
+ // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
+ __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fLo.fVec, 16), 16);
+ __m128i y = _mm_srai_epi32(_mm_slli_epi32(src.fHi.fVec, 16), 16);
+ return _mm_packs_epi32(x,y);
+#endif
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+ return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
+}
+
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, float>(const Sk8f& src) {
+ return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+ auto _32 = _mm_cvttps_epi32(src.fVec);
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ const int _ = ~0;
+ return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
+#else
+ auto _16 = _mm_packus_epi16(_32, _32);
+ return _mm_packus_epi16(_16, _16);
+#endif
+}
+
+template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ const int _ = ~0;
+ return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
+#else
+ auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
+ return _mm_unpacklo_epi16(_16, _mm_setzero_si128());
+#endif
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
+ return SkNx_cast<uint32_t>(src).fVec;
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+ return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec);
+}
+
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+ auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
+ return _mm_cvtepi32_ps(_32);
+}
+
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
+ Sk4i lo, hi;
+ SkNx_split(src, &lo, &hi);
+
+ auto t = _mm_packs_epi32(lo.fVec, hi.fVec);
+ return _mm_packus_epi16(t, t);
+}
+
+template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+
+ return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
+ _mm_cvttps_epi32(b.fVec)),
+ _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
+ _mm_cvttps_epi32(d.fVec)));
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+ return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
+}
+
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
+ return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+ return _mm_packus_epi16(src.fVec, src.fVec);
+}
+
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
+ return _mm_packus_epi16(src.fVec, src.fVec);
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+ return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
+}
+
+template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint16_t>(const Sk4h& src) {
+ return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+ return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
+}
+
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) {
+ return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
+}
+
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+ return src.fVec;
+}
+
+template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, int32_t>(const Sk4i& src) {
+ return src.fVec;
+}
+
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint32_t>(const Sk4u& src) {
+ return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
+}
+
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint32_t>(const Sk8u& src) {
+ return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
+}
+
+template<> AI /*static*/ Sk8u SkNx_cast<uint32_t, uint16_t>(const Sk8h& src) {
+ return Sk8u(_mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()),
+ _mm_unpackhi_epi16(src.fVec, _mm_setzero_si128()));
+}
+
+AI static Sk4i Sk4f_round(const Sk4f& x) {
+ return _mm_cvtps_epi32(x.fVec);
+}
+
+} // namespace
+
+#endif//SkNx_sse_DEFINED
diff --git a/gfx/skia/skia/include/private/SkOnce.h b/gfx/skia/skia/include/private/SkOnce.h
new file mode 100644
index 0000000000..662bffb1cd
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkOnce.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOnce_DEFINED
+#define SkOnce_DEFINED
+
+#include <atomic>
+#include <utility>
+
+// SkOnce provides call-once guarantees for Skia, much like std::once_flag/std::call_once().
+//
+// There should be no particularly error-prone gotcha use cases when using SkOnce.
+// It works correctly as a class member, a local, a global, a function-scoped static, whatever.
+
+class SkOnce {
+public:
+ constexpr SkOnce() = default;
+
+ template <typename Fn, typename... Args>
+ void operator()(Fn&& fn, Args&&... args) {
+ auto state = fState.load(std::memory_order_acquire);
+
+ if (state == Done) {
+ return;
+ }
+
+ // If it looks like no one has started calling fn(), try to claim that job.
+ if (state == NotStarted && fState.compare_exchange_strong(state, Claimed,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
+ // Great! We'll run fn() then notify the other threads by releasing Done into fState.
+ fn(std::forward<Args>(args)...);
+ return fState.store(Done, std::memory_order_release);
+ }
+
+ // Some other thread is calling fn().
+ // We'll just spin here acquiring until it releases Done into fState.
+ while (fState.load(std::memory_order_acquire) != Done) { /*spin*/ }
+ }
+
+private:
+ enum State : uint8_t { NotStarted, Claimed, Done};
+ std::atomic<uint8_t> fState{NotStarted};
+};
+
+#endif // SkOnce_DEFINED
diff --git a/gfx/skia/skia/include/private/SkPathRef.h b/gfx/skia/skia/include/private/SkPathRef.h
new file mode 100644
index 0000000000..4775ad08ff
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkPathRef.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathRef_DEFINED
+#define SkPathRef_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include <atomic>
+#include <limits>
+
+class SkRBuffer;
+class SkWBuffer;
+
+/**
+ * Holds the path verbs and points. It is versioned by a generation ID. None of its public methods
+ * modify the contents. To modify or append to the verbs/points wrap the SkPathRef in an
+ * SkPathRef::Editor object. Installing the editor resets the generation ID. It also performs
+ * copy-on-write if the SkPathRef is shared by multiple SkPaths. The caller passes the Editor's
+ * constructor a pointer to a sk_sp<SkPathRef>, which may be updated to point to a new SkPathRef
+ * after the editor's constructor returns.
+ *
+ * The points and verbs are stored in a single allocation. The points are at the begining of the
+ * allocation while the verbs are stored at end of the allocation, in reverse order. Thus the points
+ * and verbs both grow into the middle of the allocation until the meet. To access verb i in the
+ * verb array use ref.verbs()[~i] (because verbs() returns a pointer just beyond the first
+ * logical verb or the last verb in memory).
+ */
+
+class SK_API SkPathRef final : public SkNVRefCnt<SkPathRef> {
+public:
+ class Editor {
+ public:
+ Editor(sk_sp<SkPathRef>* pathRef,
+ int incReserveVerbs = 0,
+ int incReservePoints = 0);
+
+ ~Editor() { SkDEBUGCODE(fPathRef->fEditorsAttached--;) }
+
+ /**
+ * Returns the array of points.
+ */
+ SkPoint* writablePoints() { return fPathRef->getWritablePoints(); }
+ const SkPoint* points() const { return fPathRef->points(); }
+
+ /**
+ * Gets the ith point. Shortcut for this->points() + i
+ */
+ SkPoint* atPoint(int i) { return fPathRef->getWritablePoints() + i; }
+ const SkPoint* atPoint(int i) const { return &fPathRef->fPoints[i]; }
+
+ /**
+ * Adds the verb and allocates space for the number of points indicated by the verb. The
+ * return value is a pointer to where the points for the verb should be written.
+ * 'weight' is only used if 'verb' is kConic_Verb
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight = 0) {
+ SkDEBUGCODE(fPathRef->validate();)
+ return fPathRef->growForVerb(verb, weight);
+ }
+
+ /**
+ * Allocates space for multiple instances of a particular verb and the
+ * requisite points & weights.
+ * The return pointer points at the first new point (indexed normally [<i>]).
+ * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * space for the conic weights (indexed normally).
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights = nullptr) {
+ return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
+ }
+
+ /**
+ * Resets the path ref to a new verb and point count. The new verbs and points are
+ * uninitialized.
+ */
+ void resetToSize(int newVerbCnt, int newPointCnt, int newConicCount) {
+ fPathRef->resetToSize(newVerbCnt, newPointCnt, newConicCount);
+ }
+
+ /**
+ * Gets the path ref that is wrapped in the Editor.
+ */
+ SkPathRef* pathRef() { return fPathRef; }
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fPathRef->setIsOval(isOval, isCCW, start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fPathRef->setIsRRect(isRRect, isCCW, start);
+ }
+
+ void setBounds(const SkRect& rect) { fPathRef->setBounds(rect); }
+
+ private:
+ SkPathRef* fPathRef;
+ };
+
+ class SK_API Iter {
+ public:
+ Iter();
+ Iter(const SkPathRef&);
+
+ void setPathRef(const SkPathRef&);
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ If any point in the path is non-finite, return kDone_Verb immediately.
+
+ @param pts The points representing the current verb and/or segment
+ This must not be NULL.
+ @return The verb for the current segment
+ */
+ uint8_t next(SkPoint pts[4]);
+ uint8_t peek() const;
+
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ };
+
+public:
+ /**
+ * Gets a path ref with no verbs or points.
+ */
+ static SkPathRef* CreateEmpty();
+
+ /**
+ * Returns true if all of the points in this path are finite, meaning there
+ * are no infinities and no NaNs.
+ */
+ bool isFinite() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return SkToBool(fIsFinite);
+ }
+
+ /**
+ * Returns a mask, where each bit corresponding to a SegmentMask is
+ * set if the path contains 1 or more segments of that type.
+ * Returns 0 for an empty path (no segments).
+ */
+ uint32_t getSegmentMasks() const { return fSegmentMask; }
+
+ /** Returns true if the path is an oval.
+ *
+ * @param rect returns the bounding rect of this oval. It's a circle
+ * if the height and width are the same.
+ * @param isCCW is the oval CCW (or CW if false).
+ * @param start indicates where the contour starts on the oval (see
+ * SkPath::addOval for intepretation of the index).
+ *
+ * @return true if this path is an oval.
+ * Tracking whether a path is an oval is considered an
+ * optimization for performance and so some paths that are in
+ * fact ovals can report false.
+ */
+ bool isOval(SkRect* rect, bool* isCCW, unsigned* start) const {
+ if (fIsOval) {
+ if (rect) {
+ *rect = this->getBounds();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+
+ return SkToBool(fIsOval);
+ }
+
+ bool isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const {
+ if (fIsRRect) {
+ if (rrect) {
+ *rrect = this->getRRect();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+ return SkToBool(fIsRRect);
+ }
+
+
+ bool hasComputedBounds() const {
+ return !fBoundsIsDirty;
+ }
+
+ /** Returns the bounds of the path's points. If the path contains 0 or 1
+ points, the bounds is set to (0,0,0,0), and isEmpty() will return true.
+ Note: this bounds may be larger than the actual shape, since curves
+ do not extend as far as their control points.
+ */
+ const SkRect& getBounds() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return fBounds;
+ }
+
+ SkRRect getRRect() const;
+
+ /**
+ * Transforms a path ref by a matrix, allocating a new one only if necessary.
+ */
+ static void CreateTransformedCopy(sk_sp<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix);
+
+ // static SkPathRef* CreateFromBuffer(SkRBuffer* buffer);
+
+ /**
+ * Rollsback a path ref to zero verbs and points with the assumption that the path ref will be
+ * repopulated with approximately the same number of verbs and points. A new path ref is created
+ * only if necessary.
+ */
+ static void Rewind(sk_sp<SkPathRef>* pathRef);
+
+ ~SkPathRef();
+ int countPoints() const { return fPoints.count(); }
+ int countVerbs() const { return fVerbs.count(); }
+ int countWeights() const { return fConicWeights.count(); }
+
+ /**
+ * Returns a pointer one beyond the first logical verb (last verb in memory order).
+ */
+ const uint8_t* verbsBegin() const { return fVerbs.begin(); }
+
+ /**
+ * Returns a const pointer to the first verb in memory (which is the last logical verb).
+ */
+ const uint8_t* verbsEnd() const { return fVerbs.end(); }
+
+ /**
+ * Returns a const pointer to the first point.
+ */
+ const SkPoint* points() const { return fPoints.begin(); }
+
+ /**
+ * Shortcut for this->points() + this->countPoints()
+ */
+ const SkPoint* pointsEnd() const { return this->points() + this->countPoints(); }
+
+ const SkScalar* conicWeights() const { return fConicWeights.begin(); }
+ const SkScalar* conicWeightsEnd() const { return fConicWeights.end(); }
+
+ /**
+ * Convenience methods for getting to a verb or point by index.
+ */
+ uint8_t atVerb(int index) const { return fVerbs[index]; }
+ const SkPoint& atPoint(int index) const { return fPoints[index]; }
+
+ bool operator== (const SkPathRef& ref) const;
+
+ /**
+ * Writes the path points and verbs to a buffer.
+ */
+ void writeToBuffer(SkWBuffer* buffer) const;
+
+ /**
+ * Gets the number of bytes that would be written in writeBuffer()
+ */
+ uint32_t writeSize() const;
+
+ void interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const;
+
+ /**
+ * Gets an ID that uniquely identifies the contents of the path ref. If two path refs have the
+ * same ID then they have the same verbs and points. However, two path refs may have the same
+ * contents but different genIDs.
+ */
+ uint32_t genID() const;
+
+ class GenIDChangeListener : public SkRefCnt {
+ public:
+ GenIDChangeListener() : fShouldUnregisterFromPath(false) {}
+ virtual ~GenIDChangeListener() {}
+
+ virtual void onChange() = 0;
+
+ // The caller can use this method to notify the path that it no longer needs to listen. Once
+ // called, the path will remove this listener from the list at some future point.
+ void markShouldUnregisterFromPath() {
+ fShouldUnregisterFromPath.store(true, std::memory_order_relaxed);
+ }
+ bool shouldUnregisterFromPath() {
+ return fShouldUnregisterFromPath.load(std::memory_order_acquire);
+ }
+
+ private:
+ std::atomic<bool> fShouldUnregisterFromPath;
+ };
+
+ void addGenIDChangeListener(sk_sp<GenIDChangeListener>); // Threadsafe.
+
+ bool isValid() const;
+ SkDEBUGCODE(void validate() const { SkASSERT(this->isValid()); } )
+
+private:
+ enum SerializationOffsets {
+ kLegacyRRectOrOvalStartIdx_SerializationShift = 28, // requires 3 bits, ignored.
+ kLegacyRRectOrOvalIsCCW_SerializationShift = 27, // requires 1 bit, ignored.
+ kLegacyIsRRect_SerializationShift = 26, // requires 1 bit, ignored.
+ kIsFinite_SerializationShift = 25, // requires 1 bit
+ kLegacyIsOval_SerializationShift = 24, // requires 1 bit, ignored.
+ kSegmentMask_SerializationShift = 0 // requires 4 bits (deprecated)
+ };
+
+ SkPathRef() {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = kEmptyGenID;
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ SkDEBUGCODE(fEditorsAttached.store(0);)
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void copy(const SkPathRef& ref, int additionalReserveVerbs, int additionalReservePoints);
+
+ // Doesn't read fSegmentMask, but (re)computes it from the verbs array
+ unsigned computeSegmentMask() const;
+
+ // Return true if the computed bounds are finite.
+ static bool ComputePtBounds(SkRect* bounds, const SkPathRef& ref) {
+ return bounds->setBoundsCheck(ref.points(), ref.countPoints());
+ }
+
+ // called, if dirty, by getBounds()
+ void computeBounds() const {
+ SkDEBUGCODE(this->validate();)
+ // TODO(mtklein): remove fBoundsIsDirty and fIsFinite,
+ // using an inverted rect instead of fBoundsIsDirty and always recalculating fIsFinite.
+ SkASSERT(fBoundsIsDirty);
+
+ fIsFinite = ComputePtBounds(&fBounds, *this);
+ fBoundsIsDirty = false;
+ }
+
+ void setBounds(const SkRect& rect) {
+ SkASSERT(rect.fLeft <= rect.fRight && rect.fTop <= rect.fBottom);
+ fBounds = rect;
+ fBoundsIsDirty = false;
+ fIsFinite = fBounds.isFinite();
+ }
+
+ /** Makes additional room but does not change the counts or change the genID */
+ void incReserve(int additionalVerbs, int additionalPoints) {
+ SkDEBUGCODE(this->validate();)
+ fPoints.setReserve(fPoints.count() + additionalPoints);
+ fVerbs.setReserve(fVerbs.count() + additionalVerbs);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /** Resets the path ref with verbCount verbs and pointCount points, all uninitialized. Also
+ * allocates space for reserveVerb additional verbs and reservePoints additional points.*/
+ void resetToSize(int verbCount, int pointCount, int conicCount,
+ int reserveVerbs = 0, int reservePoints = 0) {
+ SkDEBUGCODE(this->validate();)
+ this->callGenIDChangeListeners();
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0;
+
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+
+ fPoints.setReserve(pointCount + reservePoints);
+ fPoints.setCount(pointCount);
+ fVerbs.setReserve(verbCount + reserveVerbs);
+ fVerbs.setCount(verbCount);
+ fConicWeights.setCount(conicCount);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Increases the verb count by numVbs and point count by the required amount.
+ * The new points are uninitialized. All the new verbs are set to the specified
+ * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * uninitialized conic weights.
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
+
+ /**
+ * Increases the verb count 1, records the new verb, and creates room for the requisite number
+ * of additional points. A pointer to the first point is returned. Any new points are
+ * uninitialized.
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight);
+
+ /**
+ * Private, non-const-ptr version of the public function verbsMemBegin().
+ */
+ uint8_t* verbsBeginWritable() { return fVerbs.begin(); }
+
+ /**
+ * Called the first time someone calls CreateEmpty to actually create the singleton.
+ */
+ friend SkPathRef* sk_create_empty_pathref();
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fIsOval = isOval;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fIsRRect = isRRect;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ // called only by the editor. Note that this is not a const function.
+ SkPoint* getWritablePoints() {
+ SkDEBUGCODE(this->validate();)
+ fIsOval = false;
+ fIsRRect = false;
+ return fPoints.begin();
+ }
+
+ const SkPoint* getPoints() const {
+ SkDEBUGCODE(this->validate();)
+ return fPoints.begin();
+ }
+
+ void callGenIDChangeListeners();
+
+ enum {
+ kMinSize = 256,
+ };
+
+ mutable SkRect fBounds;
+
+ SkTDArray<SkPoint> fPoints;
+ SkTDArray<uint8_t> fVerbs;
+ SkTDArray<SkScalar> fConicWeights;
+
+ enum {
+ kEmptyGenID = 1, // GenID reserved for path ref with zero points and zero verbs.
+ };
+ mutable uint32_t fGenerationID;
+ SkDEBUGCODE(std::atomic<int> fEditorsAttached;) // assert only one editor in use at any time.
+
+ SkMutex fGenIDChangeListenersMutex;
+ SkTDArray<GenIDChangeListener*> fGenIDChangeListeners; // pointers are reffed
+
+ mutable uint8_t fBoundsIsDirty;
+ mutable bool fIsFinite; // only meaningful if bounds are valid
+
+ bool fIsOval;
+ bool fIsRRect;
+ // Both the circle and rrect special cases have a notion of direction and starting point
+ // The next two variables store that information for either.
+ bool fRRectOrOvalIsCCW;
+ uint8_t fRRectOrOvalStartIdx;
+ uint8_t fSegmentMask;
+
+ friend class PathRefTest_Private;
+ friend class ForceIsRRect_Private; // unit test isRRect
+ friend class SkPath;
+ friend class SkPathPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSafe32.h b/gfx/skia/skia/include/private/SkSafe32.h
new file mode 100644
index 0000000000..7e59f2b004
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSafe32.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafe32_DEFINED
+#define SkSafe32_DEFINED
+
+#include "include/core/SkTypes.h"
+
+static constexpr int32_t Sk64_pin_to_s32(int64_t x) {
+ return x < SK_MinS32 ? SK_MinS32 : (x > SK_MaxS32 ? SK_MaxS32 : (int32_t)x);
+}
+
+static constexpr int32_t Sk32_sat_add(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a + (int64_t)b);
+}
+
+static constexpr int32_t Sk32_sat_sub(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a - (int64_t)b);
+}
+
+// To avoid UBSAN complaints about 2's compliment overflows
+//
+static constexpr int32_t Sk32_can_overflow_add(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a + (uint32_t)b);
+}
+static constexpr int32_t Sk32_can_overflow_sub(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a - (uint32_t)b);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSafe_math.h b/gfx/skia/skia/include/private/SkSafe_math.h
new file mode 100644
index 0000000000..144b28a4a3
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSafe_math.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafe_math_DEFINED
+#define SkSafe_math_DEFINED
+
+// This file protects against known bugs in ucrt\math.h.
+// Namely, that header defines inline methods without marking them static,
+// which makes it very easy to cause ODR violations and ensuing chaos.
+//
+// TODO: other headers? Here are some potential problem headers:
+// $ grep -R __inline * | grep -v static | cut -f 1 -d: | sort | uniq
+// corecrt.h
+// corecrt_stdio_config.h
+// ctype.h
+// fenv.h
+// locale.h
+// malloc.h
+// math.h
+// tchar.h
+// wchar.h
+// I took a quick look through other headers outside math.h.
+// Nothing looks anywhere near as likely to be used by Skia as math.h.
+
+#if defined(_MSC_VER) && !defined(_INC_MATH)
+ // Our strategy here is to simply inject "static" into the headers
+ // where it should have been written, just before __inline.
+ //
+ // Most inline-but-not-static methods in math.h are 32-bit only,
+ // but not all of them (see frexpf, hypothf, ldexpf...). So to
+ // be safe, 32- and 64-bit builds both get this treatment.
+
+ #define __inline static __inline
+ #include <math.h>
+ #undef __inline
+
+ #if !defined(_INC_MATH)
+ #error Hmm. Looks like math.h has changed its header guards.
+ #endif
+
+ #define INC_MATH_IS_SAFE_NOW
+
+#else
+ #include <math.h>
+
+#endif
+
+#endif//SkSafe_math_DEFINED
diff --git a/gfx/skia/skia/include/private/SkSemaphore.h b/gfx/skia/skia/include/private/SkSemaphore.h
new file mode 100644
index 0000000000..29bbca6fa9
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSemaphore.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSemaphore_DEFINED
+#define SkSemaphore_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkOnce.h"
+#include <atomic>
+
+class SkSemaphore {
+public:
+ constexpr SkSemaphore(int count = 0) : fCount(count), fOSSemaphore(nullptr) {}
+
+ // Cleanup the underlying OS semaphore.
+ ~SkSemaphore();
+
+ // Increment the counter n times.
+ // Generally it's better to call signal(n) instead of signal() n times.
+ void signal(int n = 1);
+
+ // Decrement the counter by 1,
+ // then if the counter is < 0, sleep this thread until the counter is >= 0.
+ void wait();
+
+ // If the counter is positive, decrement it by 1 and return true, otherwise return false.
+ bool try_wait();
+
+private:
+ // This implementation follows the general strategy of
+ // 'A Lightweight Semaphore with Partial Spinning'
+ // found here
+ // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+ // That article (and entire blog) are very much worth reading.
+ //
+ // We wrap an OS-provided semaphore with a user-space atomic counter that
+ // lets us avoid interacting with the OS semaphore unless strictly required:
+ // moving the count from >=0 to <0 or vice-versa, i.e. sleeping or waking threads.
+ struct OSSemaphore;
+
+ void osSignal(int n);
+ void osWait();
+
+ std::atomic<int> fCount;
+ SkOnce fOSSemaphoreOnce;
+ OSSemaphore* fOSSemaphore;
+};
+
+inline void SkSemaphore::signal(int n) {
+ int prev = fCount.fetch_add(n, std::memory_order_release);
+
+ // We only want to call the OS semaphore when our logical count crosses
+ // from <0 to >=0 (when we need to wake sleeping threads).
+ //
+ // This is easiest to think about with specific examples of prev and n.
+ // If n == 5 and prev == -3, there are 3 threads sleeping and we signal
+ // SkTMin(-(-3), 5) == 3 times on the OS semaphore, leaving the count at 2.
+ //
+ // If prev >= 0, no threads are waiting, SkTMin(-prev, n) is always <= 0,
+ // so we don't call the OS semaphore, leaving the count at (prev + n).
+ int toSignal = SkTMin(-prev, n);
+ if (toSignal > 0) {
+ this->osSignal(toSignal);
+ }
+}
+
+inline void SkSemaphore::wait() {
+ // Since this fetches the value before the subtract, zero and below means that there are no
+ // resources left, so the thread needs to wait.
+ if (fCount.fetch_sub(1, std::memory_order_acquire) <= 0) {
+ this->osWait();
+ }
+}
+
+#endif//SkSemaphore_DEFINED
diff --git a/gfx/skia/skia/include/private/SkShadowFlags.h b/gfx/skia/skia/include/private/SkShadowFlags.h
new file mode 100644
index 0000000000..8caf632988
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkShadowFlags.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowFlags_DEFINED
+#define SkShadowFlags_DEFINED
+
+// A set of flags shared between the SkAmbientShadowMaskFilter and the SkSpotShadowMaskFilter
+enum SkShadowFlags {
+ kNone_ShadowFlag = 0x00,
+ /** The occluding object is not opaque. Knowing that the occluder is opaque allows
+ * us to cull shadow geometry behind it and improve performance. */
+ kTransparentOccluder_ShadowFlag = 0x01,
+ /** Don't try to use analytic shadows. */
+ kGeometricOnly_ShadowFlag = 0x02,
+ /** mask for all shadow flags */
+ kAll_ShadowFlag = 0x03
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSpinlock.h b/gfx/skia/skia/include/private/SkSpinlock.h
new file mode 100644
index 0000000000..e1d5011681
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSpinlock.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpinlock_DEFINED
+#define SkSpinlock_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkThreadAnnotations.h"
+#include <atomic>
+
+class SK_CAPABILITY("mutex") SkSpinlock {
+public:
+ constexpr SkSpinlock() = default;
+
+ void acquire() SK_ACQUIRE() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Fall back to an out-of-line spin loop.
+ this->contendedAcquire();
+ }
+ }
+
+ // Acquire the lock or fail (quickly). Lets the caller decide to do something other than wait.
+ bool tryAcquire() SK_TRY_ACQUIRE(true) {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Let the caller decide what to do.
+ return false;
+ }
+ return true;
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ // To act as a mutex, we need a release barrier when we release the lock.
+ fLocked.store(false, std::memory_order_release);
+ }
+
+private:
+ SK_API void contendedAcquire();
+
+ std::atomic<bool> fLocked{false};
+};
+
+class SK_SCOPED_CAPABILITY SkAutoSpinlock {
+public:
+ SkAutoSpinlock(SkSpinlock& mutex) SK_ACQUIRE(mutex) : fSpinlock(mutex) { fSpinlock.acquire(); }
+ ~SkAutoSpinlock() SK_RELEASE_CAPABILITY() { fSpinlock.release(); }
+
+private:
+ SkSpinlock& fSpinlock;
+};
+
+#endif//SkSpinlock_DEFINED
diff --git a/gfx/skia/skia/include/private/SkTArray.h b/gfx/skia/skia/include/private/SkTArray.h
new file mode 100644
index 0000000000..e526921471
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTArray.h
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTArray_DEFINED
+#define SkTArray_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkSafe32.h"
+#include "include/private/SkTLogic.h"
+#include "include/private/SkTemplates.h"
+
+#include <string.h>
+#include <memory>
+#include <new>
+#include <utility>
+
+/** When MEM_MOVE is true T will be bit copied when moved.
+ When MEM_MOVE is false, T will be copy constructed / destructed.
+ In all cases T will be default-initialized on allocation,
+ and its destructor will be called from this object's destructor.
+*/
+template <typename T, bool MEM_MOVE = false> class SkTArray {
+public:
+ /**
+ * Creates an empty array with no initial storage
+ */
+ SkTArray() { this->init(); }
+
+ /**
+ * Creates an empty array that will preallocate space for reserveCount
+ * elements.
+ */
+ explicit SkTArray(int reserveCount) { this->init(0, reserveCount); }
+
+ /**
+ * Copies one array to another. The new array will be heap allocated.
+ */
+ SkTArray(const SkTArray& that) {
+ this->init(that.fCount);
+ this->copy(that.fItemArray);
+ }
+
+ SkTArray(SkTArray&& that) {
+ // TODO: If 'that' owns its memory why don't we just steal the pointer?
+ this->init(that.fCount);
+ that.move(fMemArray);
+ that.fCount = 0;
+ }
+
+ /**
+ * Creates a SkTArray by copying contents of a standard C array. The new
+ * array will be heap allocated. Be careful not to use this constructor
+ * when you really want the (void*, int) version.
+ */
+ SkTArray(const T* array, int count) {
+ this->init(count);
+ this->copy(array);
+ }
+
+ SkTArray& operator=(const SkTArray& that) {
+ if (this == &that) {
+ return *this;
+ }
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(that.count());
+ fCount = that.count();
+ this->copy(that.fItemArray);
+ return *this;
+ }
+ SkTArray& operator=(SkTArray&& that) {
+ if (this == &that) {
+ return *this;
+ }
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(that.count());
+ fCount = that.count();
+ that.move(fMemArray);
+ that.fCount = 0;
+ return *this;
+ }
+
+ ~SkTArray() {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ if (fOwnMemory) {
+ sk_free(fMemArray);
+ }
+ }
+
+ /**
+ * Resets to count() == 0 and resets any reserve count.
+ */
+ void reset() {
+ this->pop_back_n(fCount);
+ fReserved = false;
+ }
+
+ /**
+ * Resets to count() = n newly constructed T objects and resets any reserve count.
+ */
+ void reset(int n) {
+ SkASSERT(n >= 0);
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ // Set fCount to 0 before calling checkRealloc so that no elements are moved.
+ fCount = 0;
+ this->checkRealloc(n);
+ fCount = n;
+ for (int i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T;
+ }
+ fReserved = false;
+ }
+
+ /**
+ * Resets to a copy of a C array and resets any reserve count.
+ */
+ void reset(const T* array, int count) {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(count);
+ fCount = count;
+ this->copy(array);
+ fReserved = false;
+ }
+
+ /**
+ * Ensures there is enough reserved space for n additional elements. The is guaranteed at least
+ * until the array size grows above n and subsequently shrinks below n, any version of reset()
+ * is called, or reserve() is called again.
+ */
+ void reserve(int n) {
+ SkASSERT(n >= 0);
+ if (n > 0) {
+ this->checkRealloc(n);
+ fReserved = fOwnMemory;
+ } else {
+ fReserved = false;
+ }
+ }
+
+ void removeShuffle(int n) {
+ SkASSERT(n < fCount);
+ int newCount = fCount - 1;
+ fCount = newCount;
+ fItemArray[n].~T();
+ if (n != newCount) {
+ this->move(n, newCount);
+ }
+ }
+
+ /**
+ * Number of elements in the array.
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Is the array empty.
+ */
+ bool empty() const { return !fCount; }
+
+ /**
+ * Adds 1 new default-initialized T value and returns it by reference. Note
+ * the reference only remains valid until the next call that adds or removes
+ * elements.
+ */
+ T& push_back() {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the new item
+ */
+ T& push_back(const T& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(t);
+ }
+
+ /**
+ * Version of above that uses a move constructor to initialize the new item
+ */
+ T& push_back(T&& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::move(t));
+ }
+
+ /**
+ * Construct a new T at the back of this array.
+ */
+ template<class... Args> T& emplace_back(Args&&... args) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Allocates n more default-initialized T values, and returns the address of
+ * the start of that new range. Note: this address is only valid until the
+ * next API call made on the array that might add or remove elements.
+ */
+ T* push_back_n(int n) {
+ SkASSERT(n >= 0);
+ void* newTs = this->push_back_raw(n);
+ for (int i = 0; i < n; ++i) {
+ new (static_cast<char*>(newTs) + i * sizeof(T)) T;
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize all n items
+ * to the same T.
+ */
+ T* push_back_n(int n, const T& t) {
+ SkASSERT(n >= 0);
+ void* newTs = this->push_back_raw(n);
+ for (int i = 0; i < n; ++i) {
+ new (static_cast<char*>(newTs) + i * sizeof(T)) T(t);
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the n items
+ * to separate T values.
+ */
+ T* push_back_n(int n, const T t[]) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n);
+ for (int i = 0; i < n; ++i) {
+ new (fItemArray + fCount + i) T(t[i]);
+ }
+ fCount += n;
+ return fItemArray + fCount - n;
+ }
+
+ /**
+ * Version of above that uses the move constructor to set n items.
+ */
+ T* move_back_n(int n, T* t) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n);
+ for (int i = 0; i < n; ++i) {
+ new (fItemArray + fCount + i) T(std::move(t[i]));
+ }
+ fCount += n;
+ return fItemArray + fCount - n;
+ }
+
+ /**
+ * Removes the last element. Not safe to call when count() == 0.
+ */
+ void pop_back() {
+ SkASSERT(fCount > 0);
+ --fCount;
+ fItemArray[fCount].~T();
+ this->checkRealloc(0);
+ }
+
+ /**
+ * Removes the last n elements. Not safe to call when count() < n.
+ */
+ void pop_back_n(int n) {
+ SkASSERT(n >= 0);
+ SkASSERT(fCount >= n);
+ fCount -= n;
+ for (int i = 0; i < n; ++i) {
+ fItemArray[fCount + i].~T();
+ }
+ this->checkRealloc(0);
+ }
+
+ /**
+ * Pushes or pops from the back to resize. Pushes will be default
+ * initialized.
+ */
+ void resize_back(int newCount) {
+ SkASSERT(newCount >= 0);
+
+ if (newCount > fCount) {
+ this->push_back_n(newCount - fCount);
+ } else if (newCount < fCount) {
+ this->pop_back_n(fCount - newCount);
+ }
+ }
+
+ /** Swaps the contents of this array with that array. Does a pointer swap if possible,
+ otherwise copies the T values. */
+ void swap(SkTArray& that) {
+ using std::swap;
+ if (this == &that) {
+ return;
+ }
+ if (fOwnMemory && that.fOwnMemory) {
+ swap(fItemArray, that.fItemArray);
+ swap(fCount, that.fCount);
+ swap(fAllocCount, that.fAllocCount);
+ } else {
+ // This could be more optimal...
+ SkTArray copy(std::move(that));
+ that = std::move(*this);
+ *this = std::move(copy);
+ }
+ }
+
+ T* begin() {
+ return fItemArray;
+ }
+ const T* begin() const {
+ return fItemArray;
+ }
+ T* end() {
+ return fItemArray ? fItemArray + fCount : nullptr;
+ }
+ const T* end() const {
+ return fItemArray ? fItemArray + fCount : nullptr;
+ }
+ T* data() { return fItemArray; }
+ const T* data() const { return fItemArray; }
+ size_t size() const { return (size_t)fCount; }
+ void resize(size_t count) { this->resize_back((int)count); }
+
+ /**
+ * Get the i^th element.
+ */
+ T& operator[] (int i) {
+ SkASSERT(i < fCount);
+ SkASSERT(i >= 0);
+ return fItemArray[i];
+ }
+
+ const T& operator[] (int i) const {
+ SkASSERT(i < fCount);
+ SkASSERT(i >= 0);
+ return fItemArray[i];
+ }
+
+ /**
+ * equivalent to operator[](0)
+ */
+ T& front() { SkASSERT(fCount > 0); return fItemArray[0];}
+
+ const T& front() const { SkASSERT(fCount > 0); return fItemArray[0];}
+
+ /**
+ * equivalent to operator[](count() - 1)
+ */
+ T& back() { SkASSERT(fCount); return fItemArray[fCount - 1];}
+
+ const T& back() const { SkASSERT(fCount > 0); return fItemArray[fCount - 1];}
+
+ /**
+ * equivalent to operator[](count()-1-i)
+ */
+ T& fromBack(int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+ const T& fromBack(int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+ bool operator==(const SkTArray<T, MEM_MOVE>& right) const {
+ int leftCount = this->count();
+ if (leftCount != right.count()) {
+ return false;
+ }
+ for (int index = 0; index < leftCount; ++index) {
+ if (fItemArray[index] != right.fItemArray[index]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const SkTArray<T, MEM_MOVE>& right) const {
+ return !(*this == right);
+ }
+
+ inline int allocCntForTest() const;
+
+protected:
+ /**
+ * Creates an empty array that will use the passed storage block until it
+ * is insufficiently large to hold the entire array.
+ */
+ template <int N>
+ SkTArray(SkAlignedSTStorage<N,T>* storage) {
+ this->initWithPreallocatedStorage(0, storage->get(), N);
+ }
+
+ /**
+ * Copy another array, using preallocated storage if preAllocCount >=
+ * array.count(). Otherwise storage will only be used when array shrinks
+ * to fit.
+ */
+ template <int N>
+ SkTArray(const SkTArray& array, SkAlignedSTStorage<N,T>* storage) {
+ this->initWithPreallocatedStorage(array.fCount, storage->get(), N);
+ this->copy(array.fItemArray);
+ }
+
+ /**
+ * Move another array, using preallocated storage if preAllocCount >=
+ * array.count(). Otherwise storage will only be used when array shrinks
+ * to fit.
+ */
+ template <int N>
+ SkTArray(SkTArray&& array, SkAlignedSTStorage<N,T>* storage) {
+ this->initWithPreallocatedStorage(array.fCount, storage->get(), N);
+ array.move(fMemArray);
+ array.fCount = 0;
+ }
+
+ /**
+ * Copy a C array, using preallocated storage if preAllocCount >=
+ * count. Otherwise storage will only be used when array shrinks
+ * to fit.
+ */
+ template <int N>
+ SkTArray(const T* array, int count, SkAlignedSTStorage<N,T>* storage) {
+ this->initWithPreallocatedStorage(count, storage->get(), N);
+ this->copy(array);
+ }
+
+private:
+ void init(int count = 0, int reserveCount = 0) {
+ SkASSERT(count >= 0);
+ SkASSERT(reserveCount >= 0);
+ fCount = count;
+ if (!count && !reserveCount) {
+ fAllocCount = 0;
+ fMemArray = nullptr;
+ fOwnMemory = true;
+ fReserved = false;
+ } else {
+ fAllocCount = SkTMax(count, SkTMax(kMinHeapAllocCount, reserveCount));
+ fMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
+ fOwnMemory = true;
+ fReserved = reserveCount > 0;
+ }
+ }
+
+ void initWithPreallocatedStorage(int count, void* preallocStorage, int preallocCount) {
+ SkASSERT(count >= 0);
+ SkASSERT(preallocCount > 0);
+ SkASSERT(preallocStorage);
+ fCount = count;
+ fMemArray = nullptr;
+ fReserved = false;
+ if (count > preallocCount) {
+ fAllocCount = SkTMax(count, kMinHeapAllocCount);
+ fMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
+ fOwnMemory = true;
+ } else {
+ fAllocCount = preallocCount;
+ fMemArray = preallocStorage;
+ fOwnMemory = false;
+ }
+ }
+
+ /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage.
+ * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage.
+ */
+ void copy(const T* src) {
+ // Some types may be trivially copyable, in which case we *could* use memcopy; but
+ // MEM_MOVE == true implies that the type is trivially movable, and not necessarily
+ // trivially copyable (think sk_sp<>). So short of adding another template arg, we
+ // must be conservative and use copy construction.
+ for (int i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(src[i]);
+ }
+ }
+
+ template <bool E = MEM_MOVE> SK_WHEN(E, void) move(int dst, int src) {
+ memcpy(&fItemArray[dst], &fItemArray[src], sizeof(T));
+ }
+ template <bool E = MEM_MOVE> SK_WHEN(E, void) move(void* dst) {
+ sk_careful_memcpy(dst, fMemArray, fCount * sizeof(T));
+ }
+
+ template <bool E = MEM_MOVE> SK_WHEN(!E, void) move(int dst, int src) {
+ new (&fItemArray[dst]) T(std::move(fItemArray[src]));
+ fItemArray[src].~T();
+ }
+ template <bool E = MEM_MOVE> SK_WHEN(!E, void) move(void* dst) {
+ for (int i = 0; i < fCount; ++i) {
+ new (static_cast<char*>(dst) + sizeof(T) * i) T(std::move(fItemArray[i]));
+ fItemArray[i].~T();
+ }
+ }
+
+ static constexpr int kMinHeapAllocCount = 8;
+
+ // Helper function that makes space for n objects, adjusts the count, but does not initialize
+ // the new objects.
+ void* push_back_raw(int n) {
+ this->checkRealloc(n);
+ void* ptr = fItemArray + fCount;
+ fCount += n;
+ return ptr;
+ }
+
+ void checkRealloc(int delta) {
+ SkASSERT(fCount >= 0);
+ SkASSERT(fAllocCount >= 0);
+ SkASSERT(-delta <= fCount);
+
+ // Move into 64bit math temporarily, to avoid local overflows
+ int64_t newCount = fCount + delta;
+
+ // We allow fAllocCount to be in the range [newCount, 3*newCount]. We also never shrink
+ // when we're currently using preallocated memory, would allocate less than
+ // kMinHeapAllocCount, or a reserve count was specified that has yet to be exceeded.
+ bool mustGrow = newCount > fAllocCount;
+ bool shouldShrink = fAllocCount > 3 * newCount && fOwnMemory && !fReserved;
+ if (!mustGrow && !shouldShrink) {
+ return;
+ }
+
+
+ // Whether we're growing or shrinking, we leave at least 50% extra space for future growth.
+ int64_t newAllocCount = newCount + ((newCount + 1) >> 1);
+ // Align the new allocation count to kMinHeapAllocCount.
+ static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two.");
+ newAllocCount = (newAllocCount + (kMinHeapAllocCount - 1)) & ~(kMinHeapAllocCount - 1);
+ // At small sizes the old and new alloc count can both be kMinHeapAllocCount.
+ if (newAllocCount == fAllocCount) {
+ return;
+ }
+
+ fAllocCount = Sk64_pin_to_s32(newAllocCount);
+ SkASSERT(fAllocCount >= newCount);
+ void* newMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
+ this->move(newMemArray);
+ if (fOwnMemory) {
+ sk_free(fMemArray);
+
+ }
+ fMemArray = newMemArray;
+ fOwnMemory = true;
+ fReserved = false;
+ }
+
+ union {
+ T* fItemArray;
+ void* fMemArray;
+ };
+ int fCount;
+ int fAllocCount;
+ bool fOwnMemory : 1;
+ bool fReserved : 1;
+};
+
+template <typename T, bool M> static inline void swap(SkTArray<T, M>& a, SkTArray<T, M>& b) {
+ a.swap(b);
+}
+
+template<typename T, bool MEM_MOVE> constexpr int SkTArray<T, MEM_MOVE>::kMinHeapAllocCount;
+
+/**
+ * Subclass of SkTArray that contains a preallocated memory block for the array.
+ */
+template <int N, typename T, bool MEM_MOVE= false>
+class SkSTArray : public SkTArray<T, MEM_MOVE> {
+private:
+ typedef SkTArray<T, MEM_MOVE> INHERITED;
+
+public:
+ SkSTArray() : INHERITED(&fStorage) {
+ }
+
+ SkSTArray(const SkSTArray& array)
+ : INHERITED(array, &fStorage) {
+ }
+
+ SkSTArray(SkSTArray&& array)
+ : INHERITED(std::move(array), &fStorage) {
+ }
+
+ explicit SkSTArray(const INHERITED& array)
+ : INHERITED(array, &fStorage) {
+ }
+
+ SkSTArray(INHERITED&& array)
+ : INHERITED(std::move(array), &fStorage) {
+ }
+
+ explicit SkSTArray(int reserveCount)
+ : INHERITED(reserveCount) {
+ }
+
+ SkSTArray(const T* array, int count)
+ : INHERITED(array, count, &fStorage) {
+ }
+
+ SkSTArray& operator=(const SkSTArray& array) {
+ INHERITED::operator=(array);
+ return *this;
+ }
+
+ SkSTArray& operator=(SkSTArray&& array) {
+ INHERITED::operator=(std::move(array));
+ return *this;
+ }
+
+ SkSTArray& operator=(const INHERITED& array) {
+ INHERITED::operator=(array);
+ return *this;
+ }
+
+ SkSTArray& operator=(INHERITED&& array) {
+ INHERITED::operator=(std::move(array));
+ return *this;
+ }
+
+private:
+ SkAlignedSTStorage<N,T> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTDArray.h b/gfx/skia/skia/include/private/SkTDArray.h
new file mode 100644
index 0000000000..59c180bcde
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTDArray.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTDArray_DEFINED
+#define SkTDArray_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+
+#include <initializer_list>
+#include <utility>
+
+template <typename T> class SkTDArray {
+public:
+ SkTDArray() : fArray(nullptr), fReserve(0), fCount(0) {}
+ SkTDArray(const T src[], int count) {
+ SkASSERT(src || count == 0);
+
+ fReserve = fCount = 0;
+ fArray = nullptr;
+ if (count) {
+ fArray = (T*)sk_malloc_throw(count * sizeof(T));
+ memcpy(fArray, src, sizeof(T) * count);
+ fReserve = fCount = count;
+ }
+ }
+ SkTDArray(const std::initializer_list<T>& list) : SkTDArray(list.begin(), list.size()) {}
+ SkTDArray(const SkTDArray<T>& src) : fArray(nullptr), fReserve(0), fCount(0) {
+ SkTDArray<T> tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+ }
+ SkTDArray(SkTDArray<T>&& src) : fArray(nullptr), fReserve(0), fCount(0) {
+ this->swap(src);
+ }
+ ~SkTDArray() {
+ sk_free(fArray);
+ }
+
+ SkTDArray<T>& operator=(const SkTDArray<T>& src) {
+ if (this != &src) {
+ if (src.fCount > fReserve) {
+ SkTDArray<T> tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+ } else {
+ sk_careful_memcpy(fArray, src.fArray, sizeof(T) * src.fCount);
+ fCount = src.fCount;
+ }
+ }
+ return *this;
+ }
+ SkTDArray<T>& operator=(SkTDArray<T>&& src) {
+ if (this != &src) {
+ this->swap(src);
+ src.reset();
+ }
+ return *this;
+ }
+
+ friend bool operator==(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return a.fCount == b.fCount &&
+ (a.fCount == 0 ||
+ !memcmp(a.fArray, b.fArray, a.fCount * sizeof(T)));
+ }
+ friend bool operator!=(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return !(a == b);
+ }
+
+ void swap(SkTDArray<T>& that) {
+ using std::swap;
+ swap(fArray, that.fArray);
+ swap(fReserve, that.fReserve);
+ swap(fCount, that.fCount);
+ }
+
+ bool isEmpty() const { return fCount == 0; }
+ bool empty() const { return this->isEmpty(); }
+
+ /**
+ * Return the number of elements in the array
+ */
+ int count() const { return fCount; }
+ size_t size() const { return fCount; }
+
+ /**
+ * Return the total number of elements allocated.
+ * reserved() - count() gives you the number of elements you can add
+ * without causing an allocation.
+ */
+ int reserved() const { return fReserve; }
+
+ /**
+ * return the number of bytes in the array: count * sizeof(T)
+ */
+ size_t bytes() const { return fCount * sizeof(T); }
+
+ T* begin() { return fArray; }
+ const T* begin() const { return fArray; }
+ T* end() { return fArray ? fArray + fCount : nullptr; }
+ const T* end() const { return fArray ? fArray + fCount : nullptr; }
+
+ T& operator[](int index) {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+ const T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ T& getAt(int index) {
+ return (*this)[index];
+ }
+
+
+ void reset() {
+ if (fArray) {
+ sk_free(fArray);
+ fArray = nullptr;
+ fReserve = fCount = 0;
+ } else {
+ SkASSERT(fReserve == 0 && fCount == 0);
+ }
+ }
+
+ void rewind() {
+ // same as setCount(0)
+ fCount = 0;
+ }
+
+ /**
+ * Sets the number of elements in the array.
+ * If the array does not have space for count elements, it will increase
+ * the storage allocated to some amount greater than that required.
+ * It will never shrink the storage.
+ */
+ void setCount(int count) {
+ SkASSERT(count >= 0);
+ if (count > fReserve) {
+ this->resizeStorageToAtLeast(count);
+ }
+ fCount = count;
+ }
+
+ void setReserve(int reserve) {
+ SkASSERT(reserve >= 0);
+ if (reserve > fReserve) {
+ this->resizeStorageToAtLeast(reserve);
+ }
+ }
+ void reserve(size_t n) {
+ SkASSERT_RELEASE(SkTFitsIn<int>(n));
+ this->setReserve(SkToInt(n));
+ }
+
+ T* prepend() {
+ this->adjustCount(1);
+ memmove(fArray + 1, fArray, (fCount - 1) * sizeof(T));
+ return fArray;
+ }
+
+ T* append() {
+ return this->append(1, nullptr);
+ }
+ T* append(int count, const T* src = nullptr) {
+ int oldCount = fCount;
+ if (count) {
+ SkASSERT(src == nullptr || fArray == nullptr ||
+ src + count <= fArray || fArray + oldCount <= src);
+
+ this->adjustCount(count);
+ if (src) {
+ memcpy(fArray + oldCount, src, sizeof(T) * count);
+ }
+ }
+ return fArray + oldCount;
+ }
+
+ T* insert(int index) {
+ return this->insert(index, 1, nullptr);
+ }
+ T* insert(int index, int count, const T* src = nullptr) {
+ SkASSERT(count);
+ SkASSERT(index <= fCount);
+ size_t oldCount = fCount;
+ this->adjustCount(count);
+ T* dst = fArray + index;
+ memmove(dst + count, dst, sizeof(T) * (oldCount - index));
+ if (src) {
+ memcpy(dst, src, sizeof(T) * count);
+ }
+ return dst;
+ }
+
+ void remove(int index, int count = 1) {
+ SkASSERT(index + count <= fCount);
+ fCount = fCount - count;
+ memmove(fArray + index, fArray + index + count, sizeof(T) * (fCount - index));
+ }
+
+ void removeShuffle(int index) {
+ SkASSERT(index < fCount);
+ int newCount = fCount - 1;
+ fCount = newCount;
+ if (index != newCount) {
+ memcpy(fArray + index, fArray + newCount, sizeof(T));
+ }
+ }
+
+ int find(const T& elem) const {
+ const T* iter = fArray;
+ const T* stop = fArray + fCount;
+
+ for (; iter < stop; iter++) {
+ if (*iter == elem) {
+ return SkToInt(iter - fArray);
+ }
+ }
+ return -1;
+ }
+
+ int rfind(const T& elem) const {
+ const T* iter = fArray + fCount;
+ const T* stop = fArray;
+
+ while (iter > stop) {
+ if (*--iter == elem) {
+ return SkToInt(iter - stop);
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Returns true iff the array contains this element.
+ */
+ bool contains(const T& elem) const {
+ return (this->find(elem) >= 0);
+ }
+
+ /**
+ * Copies up to max elements into dst. The number of items copied is
+ * capped by count - index. The actual number copied is returned.
+ */
+ int copyRange(T* dst, int index, int max) const {
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ if (index >= fCount) {
+ return 0;
+ }
+ int count = SkMin32(max, fCount - index);
+ memcpy(dst, fArray + index, sizeof(T) * count);
+ return count;
+ }
+
+ void copy(T* dst) const {
+ this->copyRange(dst, 0, fCount);
+ }
+
+ // routines to treat the array like a stack
+ void push_back(const T& v) { *this->append() = v; }
+ T* push() { return this->append(); }
+ const T& top() const { return (*this)[fCount - 1]; }
+ T& top() { return (*this)[fCount - 1]; }
+ void pop(T* elem) { SkASSERT(fCount > 0); if (elem) *elem = (*this)[fCount - 1]; --fCount; }
+ void pop() { SkASSERT(fCount > 0); --fCount; }
+
+ void deleteAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ delete *iter;
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void freeAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ sk_free(*iter);
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void unrefAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ (*iter)->unref();
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void safeUnrefAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ SkSafeUnref(*iter);
+ iter += 1;
+ }
+ this->reset();
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT((fReserve == 0 && fArray == nullptr) ||
+ (fReserve > 0 && fArray != nullptr));
+ SkASSERT(fCount <= fReserve);
+ }
+#endif
+
+ void shrinkToFit() {
+ fReserve = fCount;
+ fArray = (T*)sk_realloc_throw(fArray, fReserve * sizeof(T));
+ }
+
+private:
+ T* fArray;
+ int fReserve; // size of the allocation in fArray (#elements)
+ int fCount; // logical number of elements (fCount <= fReserve)
+
+ /**
+ * Adjusts the number of elements in the array.
+ * This is the same as calling setCount(count() + delta).
+ */
+ void adjustCount(int delta) {
+ SkASSERT(delta > 0);
+
+ // We take care to avoid overflow here.
+ // The sum of fCount and delta is at most 4294967294, which fits fine in uint32_t.
+ uint32_t count = (uint32_t)fCount + (uint32_t)delta;
+ SkASSERT_RELEASE( SkTFitsIn<int>(count) );
+
+ this->setCount(SkTo<int>(count));
+ }
+
+ /**
+ * Increase the storage allocation such that it can hold (fCount + extra)
+ * elements.
+ * It never shrinks the allocation, and it may increase the allocation by
+ * more than is strictly required, based on a private growth heuristic.
+ *
+ * note: does NOT modify fCount
+ */
+ void resizeStorageToAtLeast(int count) {
+ SkASSERT(count > fReserve);
+
+ // We take care to avoid overflow here.
+ // The maximum value we can get for reserve here is 2684354563, which fits in uint32_t.
+ uint32_t reserve = (uint32_t)count + 4;
+ reserve += reserve / 4;
+ SkASSERT_RELEASE( SkTFitsIn<int>(reserve) );
+
+ fReserve = SkTo<int>(reserve);
+ fArray = (T*)sk_realloc_throw(fArray, fReserve * sizeof(T));
+ }
+};
+
+template <typename T> static inline void swap(SkTDArray<T>& a, SkTDArray<T>& b) {
+ a.swap(b);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTFitsIn.h b/gfx/skia/skia/include/private/SkTFitsIn.h
new file mode 100644
index 0000000000..a912f13e08
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTFitsIn.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTFitsIn_DEFINED
+#define SkTFitsIn_DEFINED
+
+#include <limits>
+#include <stdint.h>
+#include <type_traits>
+
+/**
+ * std::underlying_type is only defined for enums. For integral types, we just want the type.
+ */
+template <typename T, class Enable = void>
+struct sk_strip_enum {
+ typedef T type;
+};
+
+template <typename T>
+struct sk_strip_enum<T, typename std::enable_if<std::is_enum<T>::value>::type> {
+ typedef typename std::underlying_type<T>::type type;
+};
+
+
+/**
+ * In C++ an unsigned to signed cast where the source value cannot be represented in the destination
+ * type results in an implementation defined destination value. Unlike C, C++ does not allow a trap.
+ * This makes "(S)(D)s == s" a possibly useful test. However, there are two cases where this is
+ * incorrect:
+ *
+ * when testing if a value of a smaller signed type can be represented in a larger unsigned type
+ * (int8_t)(uint16_t)-1 == -1 => (int8_t)0xFFFF == -1 => [implementation defined] == -1
+ *
+ * when testing if a value of a larger unsigned type can be represented in a smaller signed type
+ * (uint16_t)(int8_t)0xFFFF == 0xFFFF => (uint16_t)-1 == 0xFFFF => 0xFFFF == 0xFFFF => true.
+ *
+ * Consider the cases:
+ * u = unsigned, less digits
+ * U = unsigned, more digits
+ * s = signed, less digits
+ * S = signed, more digits
+ * v is the value we're considering.
+ *
+ * u -> U: (u)(U)v == v, trivially true
+ * U -> u: (U)(u)v == v, both casts well defined, test works
+ * s -> S: (s)(S)v == v, trivially true
+ * S -> s: (S)(s)v == v, first cast implementation value, second cast defined, test works
+ * s -> U: (s)(U)v == v, *this is bad*, the second cast results in implementation defined value
+ * S -> u: (S)(u)v == v, the second cast is required to prevent promotion of rhs to unsigned
+ * u -> S: (u)(S)v == v, trivially true
+ * U -> s: (U)(s)v == v, *this is bad*,
+ * first cast results in implementation defined value,
+ * second cast is defined. However, this creates false positives
+ * uint16_t x = 0xFFFF
+ * (uint16_t)(int8_t)x == x
+ * => (uint16_t)-1 == x
+ * => 0xFFFF == x
+ * => true
+ *
+ * So for the eight cases three are trivially true, three more are valid casts, and two are special.
+ * The two 'full' checks which otherwise require two comparisons are valid cast checks.
+ * The two remaining checks s -> U [v >= 0] and U -> s [v <= max(s)] can be done with one op.
+ */
+
+template <typename D, typename S>
+static constexpr inline
+typename std::enable_if<(std::is_integral<S>::value || std::is_enum<S>::value) &&
+ (std::is_integral<D>::value || std::is_enum<D>::value), bool>::type
+/*bool*/ SkTFitsIn(S src) {
+ // SkTFitsIn() is used in public headers, so needs to be written targeting at most C++11.
+ return
+
+ // E.g. (int8_t)(uint8_t) int8_t(-1) == -1, but the uint8_t == 255, not -1.
+ (std::is_signed<S>::value && std::is_unsigned<D>::value && sizeof(S) <= sizeof(D)) ?
+ (S)0 <= src :
+
+ // E.g. (uint8_t)(int8_t) uint8_t(255) == 255, but the int8_t == -1.
+ (std::is_signed<D>::value && std::is_unsigned<S>::value && sizeof(D) <= sizeof(S)) ?
+ src <= (S)std::numeric_limits<typename sk_strip_enum<D>::type>::max() :
+
+#if !defined(SK_DEBUG) && !defined(__MSVC_RUNTIME_CHECKS )
+ // Correct (simple) version. This trips up MSVC's /RTCc run-time checking.
+ (S)(D)src == src;
+#else
+ // More complex version that's safe with /RTCc. Used in all debug builds, for coverage.
+ (std::is_signed<S>::value) ?
+ (intmax_t)src >= (intmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::min() &&
+ (intmax_t)src <= (intmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::max() :
+
+ // std::is_unsigned<S> ?
+ (uintmax_t)src <= (uintmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::max();
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTHash.h b/gfx/skia/skia/include/private/SkTHash.h
new file mode 100644
index 0000000000..bc563d168c
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTHash.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTHash_DEFINED
+#define SkTHash_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/SkTemplates.h"
+#include <new>
+
+// Before trying to use SkTHashTable, look below to see if SkTHashMap or SkTHashSet works for you.
+// They're easier to use, usually perform the same, and have fewer sharp edges.
+
+// T and K are treated as ordinary copyable C++ types.
+// Traits must have:
+// - static K GetKey(T)
+// - static uint32_t Hash(K)
+// If the key is large and stored inside T, you may want to make K a const&.
+// Similarly, if T is large you might want it to be a pointer.
+template <typename T, typename K, typename Traits = T>
+class SkTHashTable {
+public:
+ SkTHashTable() : fCount(0), fCapacity(0) {}
+ SkTHashTable(SkTHashTable&& other)
+ : fCount(other.fCount)
+ , fCapacity(other.fCapacity)
+ , fSlots(std::move(other.fSlots)) { other.fCount = other.fCapacity = 0; }
+
+ SkTHashTable& operator=(SkTHashTable&& other) {
+ if (this != &other) {
+ this->~SkTHashTable();
+ new (this) SkTHashTable(std::move(other));
+ }
+ return *this;
+ }
+
+ // Clear the table.
+ void reset() { *this = SkTHashTable(); }
+
+ // How many entries are in the table?
+ int count() const { return fCount; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fCapacity * sizeof(Slot); }
+
+ // !!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!
+ // set(), find() and foreach() all allow mutable access to table entries.
+ // If you change an entry so that it no longer has the same key, all hell
+ // will break loose. Do not do that!
+ //
+ // Please prefer to use SkTHashMap or SkTHashSet, which do not have this danger.
+
+ // The pointers returned by set() and find() are valid only until the next call to set().
+ // The pointers you receive in foreach() are only valid for its duration.
+
+ // Copy val into the hash table, returning a pointer to the copy now in the table.
+ // If there already is an entry in the table with the same key, we overwrite it.
+ T* set(T val) {
+ if (4 * fCount >= 3 * fCapacity) {
+ this->resize(fCapacity > 0 ? fCapacity * 2 : 4);
+ }
+ return this->uncheckedSet(std::move(val));
+ }
+
+ // If there is an entry in the table with this key, return a pointer to it. If not, null.
+ T* find(const K& key) const {
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ return nullptr;
+ }
+ if (hash == s.hash && key == Traits::GetKey(s.val)) {
+ return &s.val;
+ }
+ index = this->next(index);
+ }
+ SkASSERT(fCapacity == 0);
+ return nullptr;
+ }
+
+ // If there is an entry in the table with this key, return it. If not, null.
+ // This only works for pointer type T, and cannot be used to find an nullptr entry.
+ T findOrNull(const K& key) const {
+ if (T* p = this->find(key)) {
+ return *p;
+ }
+ return nullptr;
+ }
+
+ // Remove the value with this key from the hash table.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ SkASSERT(!s.empty());
+ if (hash == s.hash && key == Traits::GetKey(s.val)) {
+ fCount--;
+ break;
+ }
+ index = this->next(index);
+ }
+
+ // Rearrange elements to restore the invariants for linear probing.
+ for (;;) {
+ Slot& emptySlot = fSlots[index];
+ int emptyIndex = index;
+ int originalIndex;
+ // Look for an element that can be moved into the empty slot.
+ // If the empty slot is in between where an element landed, and its native slot, then
+ // move it to the empty slot. Don't move it if its native slot is in between where
+ // the element landed and the empty slot.
+ // [native] <= [empty] < [candidate] == GOOD, can move candidate to empty slot
+ // [empty] < [native] < [candidate] == BAD, need to leave candidate where it is
+ do {
+ index = this->next(index);
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ // We're done shuffling elements around. Clear the last empty slot.
+ emptySlot = Slot();
+ return;
+ }
+ originalIndex = s.hash & (fCapacity - 1);
+ } while ((index <= originalIndex && originalIndex < emptyIndex)
+ || (originalIndex < emptyIndex && emptyIndex < index)
+ || (emptyIndex < index && index <= originalIndex));
+ // Move the element to the empty slot.
+ Slot& moveFrom = fSlots[index];
+ emptySlot = std::move(moveFrom);
+ }
+ }
+
+ // Call fn on every entry in the table. You may mutate the entries, but be very careful.
+ template <typename Fn> // f(T*)
+ void foreach(Fn&& fn) {
+ for (int i = 0; i < fCapacity; i++) {
+ if (!fSlots[i].empty()) {
+ fn(&fSlots[i].val);
+ }
+ }
+ }
+
+ // Call fn on every entry in the table. You may not mutate anything.
+ template <typename Fn> // f(T) or f(const T&)
+ void foreach(Fn&& fn) const {
+ for (int i = 0; i < fCapacity; i++) {
+ if (!fSlots[i].empty()) {
+ fn(fSlots[i].val);
+ }
+ }
+ }
+
+private:
+ T* uncheckedSet(T&& val) {
+ const K& key = Traits::GetKey(val);
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ // New entry.
+ s.val = std::move(val);
+ s.hash = hash;
+ fCount++;
+ return &s.val;
+ }
+ if (hash == s.hash && key == Traits::GetKey(s.val)) {
+ // Overwrite previous entry.
+ // Note: this triggers extra copies when adding the same value repeatedly.
+ s.val = std::move(val);
+ return &s.val;
+ }
+
+ index = this->next(index);
+ }
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ void resize(int capacity) {
+ int oldCapacity = fCapacity;
+ SkDEBUGCODE(int oldCount = fCount);
+
+ fCount = 0;
+ fCapacity = capacity;
+ SkAutoTArray<Slot> oldSlots = std::move(fSlots);
+ fSlots = SkAutoTArray<Slot>(capacity);
+
+ for (int i = 0; i < oldCapacity; i++) {
+ Slot& s = oldSlots[i];
+ if (!s.empty()) {
+ this->uncheckedSet(std::move(s.val));
+ }
+ }
+ SkASSERT(fCount == oldCount);
+ }
+
+ int next(int index) const {
+ index--;
+ if (index < 0) { index += fCapacity; }
+ return index;
+ }
+
+ static uint32_t Hash(const K& key) {
+ uint32_t hash = Traits::Hash(key) & 0xffffffff;
+ return hash ? hash : 1; // We reserve hash 0 to mark empty.
+ }
+
+ struct Slot {
+ Slot() : val{}, hash(0) {}
+ Slot(T&& v, uint32_t h) : val(std::move(v)), hash(h) {}
+ Slot(Slot&& o) { *this = std::move(o); }
+ Slot& operator=(Slot&& o) {
+ val = std::move(o.val);
+ hash = o.hash;
+ return *this;
+ }
+
+ bool empty() const { return this->hash == 0; }
+
+ T val;
+ uint32_t hash;
+ };
+
+ int fCount, fCapacity;
+ SkAutoTArray<Slot> fSlots;
+
+ SkTHashTable(const SkTHashTable&) = delete;
+ SkTHashTable& operator=(const SkTHashTable&) = delete;
+};
+
+// Maps K->V. A more user-friendly wrapper around SkTHashTable, suitable for most use cases.
+// K and V are treated as ordinary copyable C++ types, with no assumed relationship between the two.
+template <typename K, typename V, typename HashK = SkGoodHash>
+class SkTHashMap {
+public:
+ SkTHashMap() {}
+ SkTHashMap(SkTHashMap&&) = default;
+ SkTHashMap& operator=(SkTHashMap&&) = default;
+
+ // Clear the map.
+ void reset() { fTable.reset(); }
+
+ // How many key/value pairs are in the table?
+ int count() const { return fTable.count(); }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // N.B. The pointers returned by set() and find() are valid only until the next call to set().
+
+ // Set key to val in the table, replacing any previous value with the same key.
+ // We copy both key and val, and return a pointer to the value copy now in the table.
+ V* set(K key, V val) {
+ Pair* out = fTable.set({std::move(key), std::move(val)});
+ return &out->val;
+ }
+
+ // If there is key/value entry in the table with this key, return a pointer to the value.
+ // If not, return null.
+ V* find(const K& key) const {
+ if (Pair* p = fTable.find(key)) {
+ return &p->val;
+ }
+ return nullptr;
+ }
+
+ // Remove the key/value entry in the table with this key.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+ fTable.remove(key);
+ }
+
+ // Call fn on every key/value pair in the table. You may mutate the value but not the key.
+ template <typename Fn> // f(K, V*) or f(const K&, V*)
+ void foreach(Fn&& fn) {
+ fTable.foreach([&fn](Pair* p){ fn(p->key, &p->val); });
+ }
+
+ // Call fn on every key/value pair in the table. You may not mutate anything.
+ template <typename Fn> // f(K, V), f(const K&, V), f(K, const V&) or f(const K&, const V&).
+ void foreach(Fn&& fn) const {
+ fTable.foreach([&fn](const Pair& p){ fn(p.key, p.val); });
+ }
+
+private:
+ struct Pair {
+ K key;
+ V val;
+ static const K& GetKey(const Pair& p) { return p.key; }
+ static auto Hash(const K& key) { return HashK()(key); }
+ };
+
+ SkTHashTable<Pair, K> fTable;
+
+ SkTHashMap(const SkTHashMap&) = delete;
+ SkTHashMap& operator=(const SkTHashMap&) = delete;
+};
+
+// A set of T. T is treated as an ordinary copyable C++ type.
+template <typename T, typename HashT = SkGoodHash>
+class SkTHashSet {
+public:
+ SkTHashSet() {}
+ SkTHashSet(SkTHashSet&&) = default;
+ SkTHashSet& operator=(SkTHashSet&&) = default;
+
+ // Clear the set.
+ void reset() { fTable.reset(); }
+
+ // How many items are in the set?
+ int count() const { return fTable.count(); }
+
+ // Is empty?
+ bool empty() const { return fTable.count() == 0; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // Copy an item into the set.
+ void add(T item) { fTable.set(std::move(item)); }
+
+ // Is this item in the set?
+ bool contains(const T& item) const { return SkToBool(this->find(item)); }
+
+ // If an item equal to this is in the set, return a pointer to it, otherwise null.
+ // This pointer remains valid until the next call to add().
+ const T* find(const T& item) const { return fTable.find(item); }
+
+ // Remove the item in the set equal to this.
+ void remove(const T& item) {
+ SkASSERT(this->contains(item));
+ fTable.remove(item);
+ }
+
+ // Call fn on every item in the set. You may not mutate anything.
+ template <typename Fn> // f(T), f(const T&)
+ void foreach (Fn&& fn) const {
+ fTable.foreach(fn);
+ }
+
+private:
+ struct Traits {
+ static const T& GetKey(const T& item) { return item; }
+ static auto Hash(const T& item) { return HashT()(item); }
+ };
+ SkTHashTable<T, T, Traits> fTable;
+
+ SkTHashSet(const SkTHashSet&) = delete;
+ SkTHashSet& operator=(const SkTHashSet&) = delete;
+};
+
+#endif//SkTHash_DEFINED
diff --git a/gfx/skia/skia/include/private/SkTLogic.h b/gfx/skia/skia/include/private/SkTLogic.h
new file mode 100644
index 0000000000..527f8dde23
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTLogic.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ *
+ * This header provides some of the helpers (like std::enable_if_t) which will
+ * become available with C++14 in the type_traits header (in the skstd
+ * namespace). This header also provides several Skia specific additions such
+ * as SK_WHEN and the sknonstd namespace.
+ */
+
+#ifndef SkTLogic_DEFINED
+#define SkTLogic_DEFINED
+
+#include <cstddef>
+#include <new> // see bug 981264
+#include <type_traits>
+
+namespace skstd {
+
+template <bool B> using bool_constant = std::integral_constant<bool, B>;
+
+template <bool B, typename T, typename F> using conditional_t = typename std::conditional<B, T, F>::type;
+template <bool B, typename T = void> using enable_if_t = typename std::enable_if<B, T>::type;
+
+template <typename T> using remove_const_t = typename std::remove_const<T>::type;
+template <typename T> using remove_volatile_t = typename std::remove_volatile<T>::type;
+template <typename T> using remove_cv_t = typename std::remove_cv<T>::type;
+template <typename T> using remove_pointer_t = typename std::remove_pointer<T>::type;
+template <typename T> using remove_reference_t = typename std::remove_reference<T>::type;
+template <typename T> using remove_extent_t = typename std::remove_extent<T>::type;
+
+template <typename T> using add_const_t = typename std::add_const<T>::type;
+template <typename T> using add_volatile_t = typename std::add_volatile<T>::type;
+template <typename T> using add_cv_t = typename std::add_cv<T>::type;
+template <typename T> using add_pointer_t = typename std::add_pointer<T>::type;
+template <typename T> using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
+
+template <typename T> using result_of_t = typename std::result_of<T>::type;
+
+template <typename... T> using common_type_t = typename std::common_type<T...>::type;
+
+template <std::size_t... Ints> struct index_sequence {
+ using type = index_sequence;
+ using value_type = std::size_t;
+ static constexpr std::size_t size() noexcept { return sizeof...(Ints); }
+};
+
+template <typename S1, typename S2> struct make_index_sequence_combine;
+template <std::size_t... I1, std::size_t... I2>
+struct make_index_sequence_combine<skstd::index_sequence<I1...>, skstd::index_sequence<I2...>>
+ : skstd::index_sequence<I1..., (sizeof...(I1)+I2)...>
+{ };
+
+template <std::size_t N> struct make_index_sequence
+ : make_index_sequence_combine<typename skstd::make_index_sequence< N/2>::type,
+ typename skstd::make_index_sequence<N - N/2>::type>{};
+template<> struct make_index_sequence<0> : skstd::index_sequence< >{};
+template<> struct make_index_sequence<1> : skstd::index_sequence<0>{};
+
+struct monostate {};
+
+template<typename...> struct conjunction : std::true_type { };
+template<typename B0> struct conjunction<B0> : B0 { };
+template<typename B0, typename... Bs>
+struct conjunction<B0, Bs...> : std::conditional<bool(B0::value), conjunction<Bs...>, B0>::type { };
+} // namespace skstd
+
+// The sknonstd namespace contains things we would like to be proposed and feel std-ish.
+namespace sknonstd {
+
+// The name 'copy' here is fraught with peril. In this case it means 'append', not 'overwrite'.
+// Alternate proposed names are 'propagate', 'augment', or 'append' (and 'add', but already taken).
+// std::experimental::propagate_const already exists for other purposes in TSv2.
+// These also follow the <dest, source> pattern used by boost.
+template <typename D, typename S> struct copy_const {
+ using type = skstd::conditional_t<std::is_const<S>::value, skstd::add_const_t<D>, D>;
+};
+template <typename D, typename S> using copy_const_t = typename copy_const<D, S>::type;
+
+template <typename D, typename S> struct copy_volatile {
+ using type = skstd::conditional_t<std::is_volatile<S>::value, skstd::add_volatile_t<D>, D>;
+};
+template <typename D, typename S> using copy_volatile_t = typename copy_volatile<D, S>::type;
+
+template <typename D, typename S> struct copy_cv {
+ using type = copy_volatile_t<copy_const_t<D, S>, S>;
+};
+template <typename D, typename S> using copy_cv_t = typename copy_cv<D, S>::type;
+
+// The name 'same' here means 'overwrite'.
+// Alternate proposed names are 'replace', 'transfer', or 'qualify_from'.
+// same_xxx<D, S> can be written as copy_xxx<remove_xxx_t<D>, S>
+template <typename D, typename S> using same_const = copy_const<skstd::remove_const_t<D>, S>;
+template <typename D, typename S> using same_const_t = typename same_const<D, S>::type;
+template <typename D, typename S> using same_volatile =copy_volatile<skstd::remove_volatile_t<D>,S>;
+template <typename D, typename S> using same_volatile_t = typename same_volatile<D, S>::type;
+template <typename D, typename S> using same_cv = copy_cv<skstd::remove_cv_t<D>, S>;
+template <typename D, typename S> using same_cv_t = typename same_cv<D, S>::type;
+
+} // namespace sknonstd
+
+// Just a pithier wrapper for enable_if_t.
+#define SK_WHEN(condition, T) skstd::enable_if_t<!!(condition), T>
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTemplates.h b/gfx/skia/skia/include/private/SkTemplates.h
new file mode 100644
index 0000000000..94b335cda1
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTemplates.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTemplates_DEFINED
+#define SkTemplates_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTLogic.h"
+
+#include <string.h>
+#include <array>
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <utility>
+
+/** \file SkTemplates.h
+
+ This file contains light-weight template classes for type-safe and exception-safe
+ resource management.
+*/
+
+/**
+ * Marks a local variable as known to be unused (to avoid warnings).
+ * Note that this does *not* prevent the local variable from being optimized away.
+ */
+template<typename T> inline void sk_ignore_unused_variable(const T&) { }
+
+/**
+ * Returns a pointer to a D which comes immediately after S[count].
+ */
+template <typename D, typename S> static D* SkTAfter(S* ptr, size_t count = 1) {
+ return reinterpret_cast<D*>(ptr + count);
+}
+
+/**
+ * Returns a pointer to a D which comes byteOffset bytes after S.
+ */
+template <typename D, typename S> static D* SkTAddOffset(S* ptr, size_t byteOffset) {
+ // The intermediate char* has the same cv-ness as D as this produces better error messages.
+ // This relies on the fact that reinterpret_cast can add constness, but cannot remove it.
+ return reinterpret_cast<D*>(reinterpret_cast<sknonstd::same_cv_t<char, D>*>(ptr) + byteOffset);
+}
+
+// TODO: when C++17 the language is available, use template <auto P>
+template <typename T, T* P> struct SkFunctionWrapper {
+ template <typename... Args>
+ auto operator()(Args&&... args) const -> decltype(P(std::forward<Args>(args)...)) {
+ return P(std::forward<Args>(args)...);
+ }
+};
+
+/** \class SkAutoTCallVProc
+
+ Call a function when this goes out of scope. The template uses two
+ parameters, the object, and a function that is to be called in the destructor.
+ If release() is called, the object reference is set to null. If the object
+ reference is null when the destructor is called, we do not call the
+ function.
+*/
+template <typename T, void (*P)(T*)> class SkAutoTCallVProc
+ : public std::unique_ptr<T, SkFunctionWrapper<skstd::remove_pointer_t<decltype(P)>, P>> {
+public:
+ SkAutoTCallVProc(T* obj)
+ : std::unique_ptr<T, SkFunctionWrapper<skstd::remove_pointer_t<decltype(P)>, P>>(obj) {}
+
+ operator T*() const { return this->get(); }
+};
+
+/** Allocate an array of T elements, and free the array in the destructor
+ */
+template <typename T> class SkAutoTArray {
+public:
+ SkAutoTArray() {}
+ /** Allocate count number of T elements
+ */
+ explicit SkAutoTArray(int count) {
+ SkASSERT(count >= 0);
+ if (count) {
+ fArray.reset(new T[count]);
+ }
+ SkDEBUGCODE(fCount = count;)
+ }
+
+ SkAutoTArray(SkAutoTArray&& other) : fArray(std::move(other.fArray)) {
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ SkAutoTArray& operator=(SkAutoTArray&& other) {
+ if (this != &other) {
+ fArray = std::move(other.fArray);
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ return *this;
+ }
+
+ /** Reallocates given a new count. Reallocation occurs even if new count equals old count.
+ */
+ void reset(int count) { *this = SkAutoTArray(count); }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray.get(); }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+
+private:
+ std::unique_ptr<T[]> fArray;
+ SkDEBUGCODE(int fCount = 0;)
+};
+
+/** Wraps SkAutoTArray, with room for kCountRequested elements preallocated.
+ */
+template <int kCountRequested, typename T> class SkAutoSTArray {
+public:
+ SkAutoSTArray(SkAutoSTArray&&) = delete;
+ SkAutoSTArray(const SkAutoSTArray&) = delete;
+ SkAutoSTArray& operator=(SkAutoSTArray&&) = delete;
+ SkAutoSTArray& operator=(const SkAutoSTArray&) = delete;
+
+ /** Initialize with no objects */
+ SkAutoSTArray() {
+ fArray = nullptr;
+ fCount = 0;
+ }
+
+ /** Allocate count number of T elements
+ */
+ SkAutoSTArray(int count) {
+ fArray = nullptr;
+ fCount = 0;
+ this->reset(count);
+ }
+
+ ~SkAutoSTArray() {
+ this->reset(0);
+ }
+
+ /** Destroys previous objects in the array and default constructs count number of objects */
+ void reset(int count) {
+ T* start = fArray;
+ T* iter = start + fCount;
+ while (iter > start) {
+ (--iter)->~T();
+ }
+
+ SkASSERT(count >= 0);
+ if (fCount != count) {
+ if (fCount > kCount) {
+ // 'fArray' was allocated last time so free it now
+ SkASSERT((T*) fStorage != fArray);
+ sk_free(fArray);
+ }
+
+ if (count > kCount) {
+ fArray = (T*) sk_malloc_throw(count, sizeof(T));
+ } else if (count > 0) {
+ fArray = (T*) fStorage;
+ } else {
+ fArray = nullptr;
+ }
+
+ fCount = count;
+ }
+
+ iter = fArray;
+ T* stop = fArray + count;
+ while (iter < stop) {
+ new (iter++) T;
+ }
+ }
+
+ /** Return the number of T elements in the array
+ */
+ int count() const { return fCount; }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray; }
+
+ T* begin() { return fArray; }
+
+ const T* begin() const { return fArray; }
+
+ T* end() { return fArray + fCount; }
+
+ const T* end() const { return fArray + fCount; }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+private:
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const int kMaxBytes = 4 * 1024;
+ static const int kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountRequested;
+#else
+ static const int kCount = kCountRequested;
+#endif
+
+ int fCount;
+ T* fArray;
+ // since we come right after fArray, fStorage should be properly aligned
+ char fStorage[kCount * sizeof(T)];
+};
+
+/** Manages an array of T elements, freeing the array in the destructor.
+ * Does NOT call any constructors/destructors on T (T must be POD).
+ */
+template <typename T> class SkAutoTMalloc {
+public:
+ /** Takes ownership of the ptr. The ptr must be a value which can be passed to sk_free. */
+ explicit SkAutoTMalloc(T* ptr = nullptr) : fPtr(ptr) {}
+
+ /** Allocates space for 'count' Ts. */
+ explicit SkAutoTMalloc(size_t count)
+ : fPtr(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr) {}
+
+ SkAutoTMalloc(SkAutoTMalloc&&) = default;
+ SkAutoTMalloc& operator=(SkAutoTMalloc&&) = default;
+
+ /** Resize the memory area pointed to by the current ptr preserving contents. */
+ void realloc(size_t count) {
+ fPtr.reset(count ? (T*)sk_realloc_throw(fPtr.release(), count * sizeof(T)) : nullptr);
+ }
+
+ /** Resize the memory area pointed to by the current ptr without preserving contents. */
+ T* reset(size_t count = 0) {
+ fPtr.reset(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr);
+ return this->get();
+ }
+
+ T* get() const { return fPtr.get(); }
+
+ operator T*() { return fPtr.get(); }
+
+ operator const T*() const { return fPtr.get(); }
+
+ T& operator[](int index) { return fPtr.get()[index]; }
+
+ const T& operator[](int index) const { return fPtr.get()[index]; }
+
+ /**
+ * Transfer ownership of the ptr to the caller, setting the internal
+ * pointer to NULL. Note that this differs from get(), which also returns
+ * the pointer, but it does not transfer ownership.
+ */
+ T* release() { return fPtr.release(); }
+
+private:
+ std::unique_ptr<T, SkFunctionWrapper<void(void*), sk_free>> fPtr;
+};
+
+template <size_t kCountRequested, typename T> class SkAutoSTMalloc {
+public:
+ SkAutoSTMalloc() : fPtr(fTStorage) {}
+
+ SkAutoSTMalloc(size_t count) {
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ }
+
+ SkAutoSTMalloc(SkAutoSTMalloc&&) = delete;
+ SkAutoSTMalloc(const SkAutoSTMalloc&) = delete;
+ SkAutoSTMalloc& operator=(SkAutoSTMalloc&&) = delete;
+ SkAutoSTMalloc& operator=(const SkAutoSTMalloc&) = delete;
+
+ ~SkAutoSTMalloc() {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ // doesn't preserve contents
+ T* reset(size_t count) {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ return fPtr;
+ }
+
+ T* get() const { return fPtr; }
+
+ operator T*() {
+ return fPtr;
+ }
+
+ operator const T*() const {
+ return fPtr;
+ }
+
+ T& operator[](int index) {
+ return fPtr[index];
+ }
+
+ const T& operator[](int index) const {
+ return fPtr[index];
+ }
+
+ // Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent
+ void realloc(size_t count) {
+ if (count > kCount) {
+ if (fPtr == fTStorage) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ memcpy(fPtr, fTStorage, kCount * sizeof(T));
+ } else {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else if (count) {
+ if (fPtr != fTStorage) {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else {
+ this->reset(0);
+ }
+ }
+
+private:
+ // Since we use uint32_t storage, we might be able to get more elements for free.
+ static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T);
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountWithPadding;
+#else
+ static const size_t kCount = kCountWithPadding;
+#endif
+
+ T* fPtr;
+ union {
+ uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2];
+ T fTStorage[1]; // do NOT want to invoke T::T()
+ };
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Pass the object and the storage that was offered during SkInPlaceNewCheck, and this will
+ * safely destroy (and free if it was dynamically allocated) the object.
+ */
+template <typename T> void SkInPlaceDeleteCheck(T* obj, void* storage) {
+ if (storage == obj) {
+ obj->~T();
+ } else {
+ delete obj;
+ }
+}
+
+/**
+ * Allocates T, using storage if it is large enough, and allocating on the heap (via new) if
+ * storage is not large enough.
+ *
+ * obj = SkInPlaceNewCheck<Type>(storage, size);
+ * ...
+ * SkInPlaceDeleteCheck(obj, storage);
+ */
+template<typename T, typename... Args>
+T* SkInPlaceNewCheck(void* storage, size_t size, Args&&... args) {
+ return (sizeof(T) <= size) ? new (storage) T(std::forward<Args>(args)...)
+ : new T(std::forward<Args>(args)...);
+}
+/**
+ * Reserves memory that is aligned on double and pointer boundaries.
+ * Hopefully this is sufficient for all practical purposes.
+ */
+template <size_t N> class SkAlignedSStorage {
+public:
+ SkAlignedSStorage() {}
+ SkAlignedSStorage(SkAlignedSStorage&&) = delete;
+ SkAlignedSStorage(const SkAlignedSStorage&) = delete;
+ SkAlignedSStorage& operator=(SkAlignedSStorage&&) = delete;
+ SkAlignedSStorage& operator=(const SkAlignedSStorage&) = delete;
+
+ size_t size() const { return N; }
+ void* get() { return fData; }
+ const void* get() const { return fData; }
+
+private:
+ union {
+ void* fPtr;
+ double fDouble;
+ char fData[N];
+ };
+};
+
+/**
+ * Reserves memory that is aligned on double and pointer boundaries.
+ * Hopefully this is sufficient for all practical purposes. Otherwise,
+ * we have to do some arcane trickery to determine alignment of non-POD
+ * types. Lifetime of the memory is the lifetime of the object.
+ */
+template <int N, typename T> class SkAlignedSTStorage {
+public:
+ SkAlignedSTStorage() {}
+ SkAlignedSTStorage(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage(const SkAlignedSTStorage&) = delete;
+ SkAlignedSTStorage& operator=(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage& operator=(const SkAlignedSTStorage&) = delete;
+
+ /**
+ * Returns void* because this object does not initialize the
+ * memory. Use placement new for types that require a cons.
+ */
+ void* get() { return fStorage.get(); }
+ const void* get() const { return fStorage.get(); }
+private:
+ SkAlignedSStorage<sizeof(T)*N> fStorage;
+};
+
+using SkAutoFree = std::unique_ptr<void, SkFunctionWrapper<void(void*), sk_free>>;
+
+template<typename C, std::size_t... Is>
+constexpr auto SkMakeArrayFromIndexSequence(C c, skstd::index_sequence<Is...>)
+-> std::array<skstd::result_of_t<C(std::size_t)>, sizeof...(Is)> {
+ return {{ c(Is)... }};
+}
+
+template<size_t N, typename C> constexpr auto SkMakeArray(C c)
+-> std::array<skstd::result_of_t<C(std::size_t)>, N> {
+ return SkMakeArrayFromIndexSequence(c, skstd::make_index_sequence<N>{});
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkThreadAnnotations.h b/gfx/skia/skia/include/private/SkThreadAnnotations.h
new file mode 100644
index 0000000000..fd312b5885
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkThreadAnnotations.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadAnnotations_DEFINED
+#define SkThreadAnnotations_DEFINED
+
+// The bulk of this code is cribbed from:
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if defined(__clang__) && (!defined(SWIG))
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
+#else
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
+#endif
+
+#define SK_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(capability(x))
+
+#define SK_SCOPED_CAPABILITY \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable)
+
+#define SK_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
+
+#define SK_PT_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
+
+#define SK_ACQUIRED_BEFORE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
+
+#define SK_ACQUIRED_AFTER(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
+
+#define SK_REQUIRES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__))
+
+#define SK_REQUIRES_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_shared_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_shared_capability(__VA_ARGS__))
+
+// Would be SK_RELEASE, but that is already in use by SkPostConfig.
+#define SK_RELEASE_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_capability(__VA_ARGS__))
+
+// For symmetry with SK_RELEASE_CAPABILITY.
+#define SK_RELEASE_SHARED_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_shared_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_shared_capability(__VA_ARGS__))
+
+#define SK_EXCLUDES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__))
+
+#define SK_ASSERT_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_capability(x))
+
+#define SK_ASSERT_SHARED_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_capability(x))
+
+#define SK_RETURN_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x))
+
+#define SK_NO_THREAD_SAFETY_ANALYSIS \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
+
+
+#endif // SkThreadAnnotations_DEFINED
diff --git a/gfx/skia/skia/include/private/SkThreadID.h b/gfx/skia/skia/include/private/SkThreadID.h
new file mode 100644
index 0000000000..06b9be7317
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkThreadID.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadID_DEFINED
+#define SkThreadID_DEFINED
+
+#include "include/core/SkTypes.h"
+
+typedef int64_t SkThreadID;
+
+SkThreadID SkGetThreadID();
+
+const SkThreadID kIllegalThreadID = 0;
+
+#endif // SkThreadID_DEFINED
diff --git a/gfx/skia/skia/include/private/SkTo.h b/gfx/skia/skia/include/private/SkTo.h
new file mode 100644
index 0000000000..d788f7b269
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTo.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTo_DEFINED
+#define SkTo_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTFitsIn.h"
+
+template <typename D, typename S> constexpr D SkTo(S s) {
+ return SkASSERT(SkTFitsIn<D>(s)),
+ static_cast<D>(s);
+}
+
+template <typename S> constexpr int8_t SkToS8(S x) { return SkTo<int8_t>(x); }
+template <typename S> constexpr uint8_t SkToU8(S x) { return SkTo<uint8_t>(x); }
+template <typename S> constexpr int16_t SkToS16(S x) { return SkTo<int16_t>(x); }
+template <typename S> constexpr uint16_t SkToU16(S x) { return SkTo<uint16_t>(x); }
+template <typename S> constexpr int32_t SkToS32(S x) { return SkTo<int32_t>(x); }
+template <typename S> constexpr uint32_t SkToU32(S x) { return SkTo<uint32_t>(x); }
+template <typename S> constexpr int SkToInt(S x) { return SkTo<int>(x); }
+template <typename S> constexpr unsigned SkToUInt(S x) { return SkTo<unsigned>(x); }
+template <typename S> constexpr size_t SkToSizeT(S x) { return SkTo<size_t>(x); }
+
+#endif // SkTo_DEFINED
diff --git a/gfx/skia/skia/include/private/SkVx.h b/gfx/skia/skia/include/private/SkVx.h
new file mode 100644
index 0000000000..7e71da6e43
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkVx.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKVX_DEFINED
+#define SKVX_DEFINED
+
+// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
+//
+// This time we're leaning a bit less on platform-specific intrinsics and a bit
+// more on Clang/GCC vector extensions, but still keeping the option open to
+// drop in platform-specific intrinsics, actually more easily than before.
+//
+// We've also fixed a few of the caveats that used to make SkNx awkward to work
+// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
+// and alignment[1][2] and is safe to use across translation units freely.
+//
+// [1] Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.
+// [2] Some compilers barf if we try to use N*sizeof(T), so instead we leave them at T.
+
+// Please try to keep this file independent of Skia headers.
+#include <algorithm> // std::min, std::max
+#include <cmath> // std::ceil, std::floor, std::trunc, std::round, std::sqrt, etc.
+#include <cstdint> // intXX_t
+#include <cstring> // memcpy()
+#include <initializer_list> // std::initializer_list
+
+#if defined(__SSE__)
+ #include <immintrin.h>
+#elif defined(__ARM_NEON)
+ #include <arm_neon.h>
+#endif
+
+#if !defined(__clang__) && defined(__GNUC__) && defined(__mips64)
+ // GCC 7 hits an internal compiler error when targeting MIPS64.
+ #define SKVX_ALIGNMENT
+#elif !defined(__clang__) && defined(_MSC_VER) && defined(_M_IX86)
+ // Our SkVx unit tests fail when built by MSVC for 32-bit x86.
+ #define SKVX_ALIGNMENT
+#else
+ #define SKVX_ALIGNMENT alignas(N * sizeof(T))
+#endif
+
+
+namespace skvx {
+
+// All Vec have the same simple memory layout, the same as `T vec[N]`.
+// This gives Vec a consistent ABI, letting them pass between files compiled with
+// different instruction sets (e.g. SSE2 and AVX2) without fear of ODR violation.
+template <int N, typename T>
+struct SKVX_ALIGNMENT Vec {
+ static_assert((N & (N-1)) == 0, "N must be a power of 2.");
+ static_assert(sizeof(T) >= alignof(T), "What kind of crazy T is this?");
+
+ Vec<N/2,T> lo, hi;
+
+ // Methods belong here in the class declaration of Vec only if:
+ // - they must be here, like constructors or operator[];
+ // - they'll definitely never want a specialized implementation.
+ // Other operations on Vec should be defined outside the type.
+
+ Vec() = default;
+
+ template <typename U,
+ typename=typename std::enable_if<std::is_convertible<U,T>::value>::type>
+ Vec(U x) : lo(x), hi(x) {}
+
+ Vec(std::initializer_list<T> xs) {
+ T vals[N] = {0};
+ memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
+
+ lo = Vec<N/2,T>::Load(vals + 0);
+ hi = Vec<N/2,T>::Load(vals + N/2);
+ }
+
+ T operator[](int i) const { return i < N/2 ? lo[i] : hi[i-N/2]; }
+ T& operator[](int i) { return i < N/2 ? lo[i] : hi[i-N/2]; }
+
+ static Vec Load(const void* ptr) {
+ Vec v;
+ memcpy(&v, ptr, sizeof(Vec));
+ return v;
+ }
+ void store(void* ptr) const {
+ memcpy(ptr, this, sizeof(Vec));
+ }
+};
+
+template <typename T>
+struct Vec<1,T> {
+ T val;
+
+ Vec() = default;
+
+ template <typename U,
+ typename=typename std::enable_if<std::is_convertible<U,T>::value>::type>
+ Vec(U x) : val(x) {}
+
+ Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
+
+ T operator[](int) const { return val; }
+ T& operator[](int) { return val; }
+
+ static Vec Load(const void* ptr) {
+ Vec v;
+ memcpy(&v, ptr, sizeof(Vec));
+ return v;
+ }
+ void store(void* ptr) const {
+ memcpy(ptr, this, sizeof(Vec));
+ }
+};
+
+#if defined(__GNUC__) && !defined(__clang__) && defined(__SSE__)
+ // GCC warns about ABI changes when returning >= 32 byte vectors when -mavx is not enabled.
+ // This only happens for types like VExt whose ABI we don't care about, not for Vec itself.
+ #pragma GCC diagnostic ignored "-Wpsabi"
+#endif
+
+// Helps tamp down on the repetitive boilerplate.
+#define SIT template < typename T> static inline
+#define SINT template <int N, typename T> static inline
+#define SINTU template <int N, typename T, typename U, \
+ typename=typename std::enable_if<std::is_convertible<U,T>::value>::type> \
+ static inline
+
+template <typename D, typename S>
+static inline D bit_pun(const S& s) {
+ static_assert(sizeof(D) == sizeof(S), "");
+ D d;
+ memcpy(&d, &s, sizeof(D));
+ return d;
+}
+
+// Translate from a value type T to its corresponding Mask, the result of a comparison.
+template <typename T> struct Mask { using type = T; };
+template <> struct Mask<float > { using type = int32_t; };
+template <> struct Mask<double> { using type = int64_t; };
+template <typename T> using M = typename Mask<T>::type;
+
+// Join two Vec<N,T> into one Vec<2N,T>.
+SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
+ Vec<2*N,T> v;
+ v.lo = lo;
+ v.hi = hi;
+ return v;
+}
+
+// We have two default strategies for implementing most operations:
+// 1) lean on Clang/GCC vector extensions when available;
+// 2) recurse to scalar portable implementations when not.
+// At the end we can drop in platform-specific implementations that override either default.
+
+#if !defined(SKNX_NO_SIMD) && (defined(__clang__) || defined(__GNUC__))
+
+ // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
+ // N.B. VExt<N,T> alignment is N*alignof(T), stricter than Vec<N,T>'s alignof(T).
+ #if defined(__clang__)
+ template <int N, typename T>
+ using VExt = T __attribute__((ext_vector_type(N)));
+
+ #elif defined(__GNUC__)
+ template <int N, typename T>
+ struct VExtHelper {
+ typedef T __attribute__((vector_size(N*sizeof(T)))) type;
+ };
+
+ template <int N, typename T>
+ using VExt = typename VExtHelper<N,T>::type;
+
+ // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
+ // to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
+ static inline Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
+ #endif
+
+ SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
+ SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
+
+ SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) + to_vext(y)); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) - to_vext(y)); }
+ SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) * to_vext(y)); }
+ SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) / to_vext(y)); }
+
+ SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) ^ to_vext(y)); }
+ SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) & to_vext(y)); }
+ SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) | to_vext(y)); }
+
+ SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
+ SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
+
+ SINT Vec<N,T> operator<<(const Vec<N,T>& x, int bits) { return to_vec<N,T>(to_vext(x) << bits); }
+ SINT Vec<N,T> operator>>(const Vec<N,T>& x, int bits) { return to_vec<N,T>(to_vext(x) >> bits); }
+
+ SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y)); }
+ SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y)); }
+ SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y)); }
+ SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y)); }
+ SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y)); }
+ SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y)); }
+
+#else
+
+ // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
+ // We'll implement things portably, in a way that should be easily autovectorizable.
+
+ // N == 1 scalar implementations.
+ SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
+ SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
+ SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
+ SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
+
+ SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
+ SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
+ SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
+
+ SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
+ SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
+ SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
+
+ SIT Vec<1,T> operator<<(const Vec<1,T>& x, int bits) { return x.val << bits; }
+ SIT Vec<1,T> operator>>(const Vec<1,T>& x, int bits) { return x.val >> bits; }
+
+ SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val == y.val ? ~0 : 0; }
+ SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val != y.val ? ~0 : 0; }
+ SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val <= y.val ? ~0 : 0; }
+ SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val >= y.val ? ~0 : 0; }
+ SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) { return x.val < y.val ? ~0 : 0; }
+ SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) { return x.val > y.val ? ~0 : 0; }
+
+ // All default N != 1 implementations just recurse on lo and hi halves.
+ SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo + y.lo, x.hi + y.hi); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo - y.lo, x.hi - y.hi); }
+ SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo * y.lo, x.hi * y.hi); }
+ SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo / y.lo, x.hi / y.hi); }
+
+ SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo ^ y.lo, x.hi ^ y.hi); }
+ SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo & y.lo, x.hi & y.hi); }
+ SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo | y.lo, x.hi | y.hi); }
+
+ SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
+ SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
+
+ SINT Vec<N,T> operator<<(const Vec<N,T>& x, int bits) { return join(x.lo << bits, x.hi << bits); }
+ SINT Vec<N,T> operator>>(const Vec<N,T>& x, int bits) { return join(x.lo >> bits, x.hi >> bits); }
+
+ SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo == y.lo, x.hi == y.hi); }
+ SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo != y.lo, x.hi != y.hi); }
+ SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo <= y.lo, x.hi <= y.hi); }
+ SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo >= y.lo, x.hi >= y.hi); }
+ SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo < y.lo, x.hi < y.hi); }
+ SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo > y.lo, x.hi > y.hi); }
+#endif
+
+// Some operations we want are not expressible with Clang/GCC vector
+// extensions, so we implement them using the recursive approach.
+
+// N == 1 scalar implementations.
+SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
+ auto t_bits = bit_pun<M<T>>(t),
+ e_bits = bit_pun<M<T>>(e);
+ return bit_pun<T>( (cond.val & t_bits) | (~cond.val & e_bits) );
+}
+
+SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
+SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
+
+SIT T min(const Vec<1,T>& x) { return x.val; }
+SIT T max(const Vec<1,T>& x) { return x.val; }
+
+SIT Vec<1,T> min(const Vec<1,T>& x, const Vec<1,T>& y) { return std::min(x.val, y.val); }
+SIT Vec<1,T> max(const Vec<1,T>& x, const Vec<1,T>& y) { return std::max(x.val, y.val); }
+
+SIT Vec<1,T> ceil(const Vec<1,T>& x) { return std:: ceil(x.val); }
+SIT Vec<1,T> floor(const Vec<1,T>& x) { return std::floor(x.val); }
+SIT Vec<1,T> trunc(const Vec<1,T>& x) { return std::trunc(x.val); }
+SIT Vec<1,T> round(const Vec<1,T>& x) { return std::round(x.val); }
+SIT Vec<1,T> sqrt(const Vec<1,T>& x) { return std:: sqrt(x.val); }
+SIT Vec<1,T> abs(const Vec<1,T>& x) { return std:: abs(x.val); }
+
+SIT Vec<1,T> rcp(const Vec<1,T>& x) { return 1 / x.val; }
+SIT Vec<1,T> rsqrt(const Vec<1,T>& x) { return rcp(sqrt(x)); }
+SIT Vec<1,T> mad(const Vec<1,T>& f,
+ const Vec<1,T>& m,
+ const Vec<1,T>& a) { return f*m+a; }
+
+// All default N != 1 implementations just recurse on lo and hi halves.
+SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
+ return join(if_then_else(cond.lo, t.lo, e.lo),
+ if_then_else(cond.hi, t.hi, e.hi));
+}
+
+SINT bool any(const Vec<N,T>& x) { return any(x.lo) || any(x.hi); }
+SINT bool all(const Vec<N,T>& x) { return all(x.lo) && all(x.hi); }
+
+SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
+SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
+
+SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return join(min(x.lo, y.lo), min(x.hi, y.hi)); }
+SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return join(max(x.lo, y.lo), max(x.hi, y.hi)); }
+
+SINT Vec<N,T> ceil(const Vec<N,T>& x) { return join( ceil(x.lo), ceil(x.hi)); }
+SINT Vec<N,T> floor(const Vec<N,T>& x) { return join(floor(x.lo), floor(x.hi)); }
+SINT Vec<N,T> trunc(const Vec<N,T>& x) { return join(trunc(x.lo), trunc(x.hi)); }
+SINT Vec<N,T> round(const Vec<N,T>& x) { return join(round(x.lo), round(x.hi)); }
+SINT Vec<N,T> sqrt(const Vec<N,T>& x) { return join( sqrt(x.lo), sqrt(x.hi)); }
+SINT Vec<N,T> abs(const Vec<N,T>& x) { return join( abs(x.lo), abs(x.hi)); }
+
+SINT Vec<N,T> rcp(const Vec<N,T>& x) { return join( rcp(x.lo), rcp(x.hi)); }
+SINT Vec<N,T> rsqrt(const Vec<N,T>& x) { return join(rsqrt(x.lo), rsqrt(x.hi)); }
+SINT Vec<N,T> mad(const Vec<N,T>& f,
+ const Vec<N,T>& m,
+ const Vec<N,T>& a) { return join(mad(f.lo, m.lo, a.lo), mad(f.hi, m.hi, a.hi)); }
+
+
+// Scalar/vector operations just splat the scalar to a vector...
+SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
+SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
+SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
+SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
+SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
+SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
+SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
+SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
+SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
+SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
+SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
+SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
+SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
+SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
+SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
+
+// ... and same deal for vector/scalar operations.
+SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
+SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
+SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
+SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
+SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
+SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
+SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
+SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
+SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
+
+// All vector/scalar combinations for mad() with at least one vector.
+SINTU Vec<N,T> mad(U f, const Vec<N,T>& m, const Vec<N,T>& a) { return Vec<N,T>(f)*m + a; }
+SINTU Vec<N,T> mad(const Vec<N,T>& f, U m, const Vec<N,T>& a) { return f*Vec<N,T>(m) + a; }
+SINTU Vec<N,T> mad(const Vec<N,T>& f, const Vec<N,T>& m, U a) { return f*m + Vec<N,T>(a); }
+SINTU Vec<N,T> mad(const Vec<N,T>& f, U m, U a) { return f*Vec<N,T>(m) + Vec<N,T>(a); }
+SINTU Vec<N,T> mad(U f, const Vec<N,T>& m, U a) { return Vec<N,T>(f)*m + Vec<N,T>(a); }
+SINTU Vec<N,T> mad(U f, U m, const Vec<N,T>& a) { return Vec<N,T>(f)*Vec<N,T>(m) + a; }
+
+// The various op= operators, for vectors...
+SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
+SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
+SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
+SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
+SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
+SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
+SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
+
+// ... for scalars...
+SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
+
+// ... and for shifts.
+SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
+SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
+
+// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
+template <typename D, typename S>
+static inline Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
+
+template <typename D, int N, typename S>
+static inline Vec<N,D> cast(const Vec<N,S>& src) {
+#if !defined(SKNX_NO_SIMD) && defined(__clang__)
+ return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
+#else
+ return join(cast<D>(src.lo), cast<D>(src.hi));
+#endif
+}
+
+// Shuffle values from a vector pretty arbitrarily:
+// skvx::Vec<4,float> rgba = {R,G,B,A};
+// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
+// shuffle<2,1> (rgba) ~> {B,G}
+// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
+// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
+// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
+template <int... Ix, int N, typename T>
+static inline Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
+#if !defined(SKNX_NO_SIMD) && defined(__clang__)
+ return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
+#else
+ return { x[Ix]... };
+#endif
+}
+
+// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
+template <int N>
+static inline Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
+ return cast<uint8_t>( (x+127)/255 );
+}
+
+// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
+// and is always perfect when x or y is 0 or 255.
+template <int N>
+static inline Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
+ // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
+ // We happen to have historically picked (x*y+x)/256.
+ auto X = cast<uint16_t>(x),
+ Y = cast<uint16_t>(y);
+ return cast<uint8_t>( (X*Y+X)/256 );
+}
+
+#if !defined(SKNX_NO_SIMD) && defined(__ARM_NEON)
+ // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
+ static inline Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x,
+ const Vec<8,uint8_t>& y) {
+ return to_vec<8,uint16_t>(vmull_u8(to_vext(x),
+ to_vext(y)));
+ }
+
+ template <int N>
+ static inline typename std::enable_if<(N < 8),
+ Vec<N,uint16_t>>::type mull(const Vec<N,uint8_t>& x,
+ const Vec<N,uint8_t>& y) {
+ // N < 8 --> double up data until N == 8, returning the part we need.
+ return mull(join(x,x),
+ join(y,y)).lo;
+ }
+
+ template <int N>
+ static inline typename std::enable_if<(N > 8),
+ Vec<N,uint16_t>>::type mull(const Vec<N,uint8_t>& x,
+ const Vec<N,uint8_t>& y) {
+ // N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8.
+ return join(mull(x.lo, y.lo),
+ mull(x.hi, y.hi));
+ }
+#else
+ // Nothing special when we don't have NEON... just cast up to 16-bit and multiply.
+ template <int N>
+ static inline Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
+ const Vec<N,uint8_t>& y) {
+ return cast<uint16_t>(x)
+ * cast<uint16_t>(y);
+ }
+#endif
+
+#if !defined(SKNX_NO_SIMD)
+
+ // Platform-specific specializations and overloads can now drop in here.
+
+ #if defined(__SSE__)
+ static inline Vec<4,float> sqrt(const Vec<4,float>& x) {
+ return bit_pun<Vec<4,float>>(_mm_sqrt_ps(bit_pun<__m128>(x)));
+ }
+ static inline Vec<4,float> rsqrt(const Vec<4,float>& x) {
+ return bit_pun<Vec<4,float>>(_mm_rsqrt_ps(bit_pun<__m128>(x)));
+ }
+ static inline Vec<4,float> rcp(const Vec<4,float>& x) {
+ return bit_pun<Vec<4,float>>(_mm_rcp_ps(bit_pun<__m128>(x)));
+ }
+
+ static inline Vec<2,float> sqrt(const Vec<2,float>& x) {
+ return shuffle<0,1>( sqrt(shuffle<0,1,0,1>(x)));
+ }
+ static inline Vec<2,float> rsqrt(const Vec<2,float>& x) {
+ return shuffle<0,1>(rsqrt(shuffle<0,1,0,1>(x)));
+ }
+ static inline Vec<2,float> rcp(const Vec<2,float>& x) {
+ return shuffle<0,1>( rcp(shuffle<0,1,0,1>(x)));
+ }
+ #endif
+
+ #if defined(__SSE4_1__)
+ static inline Vec<4,float> if_then_else(const Vec<4,int >& c,
+ const Vec<4,float>& t,
+ const Vec<4,float>& e) {
+ return bit_pun<Vec<4,float>>(_mm_blendv_ps(bit_pun<__m128>(e),
+ bit_pun<__m128>(t),
+ bit_pun<__m128>(c)));
+ }
+ #elif defined(__SSE__)
+ static inline Vec<4,float> if_then_else(const Vec<4,int >& c,
+ const Vec<4,float>& t,
+ const Vec<4,float>& e) {
+ return bit_pun<Vec<4,float>>(_mm_or_ps(_mm_and_ps (bit_pun<__m128>(c),
+ bit_pun<__m128>(t)),
+ _mm_andnot_ps(bit_pun<__m128>(c),
+ bit_pun<__m128>(e))));
+ }
+ #elif defined(__ARM_NEON)
+ static inline Vec<4,float> if_then_else(const Vec<4,int >& c,
+ const Vec<4,float>& t,
+ const Vec<4,float>& e) {
+ return bit_pun<Vec<4,float>>(vbslq_f32(bit_pun<uint32x4_t> (c),
+ bit_pun<float32x4_t>(t),
+ bit_pun<float32x4_t>(e)));
+ }
+ #endif
+
+#endif // !defined(SKNX_NO_SIMD)
+
+} // namespace skvx
+
+#undef SINTU
+#undef SINT
+#undef SIT
+#undef SKVX_ALIGNMENT
+
+#endif//SKVX_DEFINED
diff --git a/gfx/skia/skia/include/private/SkWeakRefCnt.h b/gfx/skia/skia/include/private/SkWeakRefCnt.h
new file mode 100644
index 0000000000..c360697cd9
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkWeakRefCnt.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include <atomic>
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced by a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ ~SkWeakRefCnt() override {
+#ifdef SK_DEBUG
+ SkASSERT(getWeakCnt() == 1);
+ fWeakCnt.store(0, std::memory_order_relaxed);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the weak reference count. */
+ int32_t getWeakCnt() const {
+ return fWeakCnt.load(std::memory_order_relaxed);
+ }
+#endif
+
+private:
+ /** If fRefCnt is 0, returns 0.
+ * Otherwise increments fRefCnt, acquires, and returns the old value.
+ */
+ int32_t atomic_conditional_acquire_strong_ref() const {
+ int32_t prev = fRefCnt.load(std::memory_order_relaxed);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
+ std::memory_order_relaxed));
+ return prev;
+ }
+
+public:
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (atomic_conditional_acquire_strong_ref() != 0) {
+ // Acquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(getRefCnt() > 0);
+ SkASSERT(getWeakCnt() > 0);
+ // No barrier required.
+ (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(getWeakCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like try_ref(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt.store(1, std::memory_order_relaxed);
+#endif
+ this->INHERITED::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt.load(std::memory_order_relaxed) == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ void internal_dispose() const override {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable std::atomic<int32_t> fWeakCnt;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/svg/SkSVGCanvas.h b/gfx/skia/skia/include/svg/SkSVGCanvas.h
new file mode 100644
index 0000000000..86e8975230
--- /dev/null
+++ b/gfx/skia/skia/include/svg/SkSVGCanvas.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSVGCanvas_DEFINED
+#define SkSVGCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+
+class SkWStream;
+
+class SK_API SkSVGCanvas {
+public:
+ enum {
+ kConvertTextToPaths_Flag = 0x01, // emit text as <path>s
+ kNoPrettyXML_Flag = 0x02, // suppress newlines and tabs in output
+ };
+
+ /**
+ * Returns a new canvas that will generate SVG commands from its draw calls, and send
+ * them to the provided stream. Ownership of the stream is not transfered, and it must
+ * remain valid for the lifetime of the returned canvas.
+ *
+ * The canvas may buffer some drawing calls, so the output is not guaranteed to be valid
+ * or complete until the canvas instance is deleted.
+ *
+ * The 'bounds' parameter defines an initial SVG viewport (viewBox attribute on the root
+ * SVG element).
+ */
+ static std::unique_ptr<SkCanvas> Make(const SkRect& bounds, SkWStream*, uint32_t flags = 0);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/skcms/LICENSE b/gfx/skia/skia/include/third_party/skcms/LICENSE
new file mode 100644
index 0000000000..6c7c5be360
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/skcms/LICENSE
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
diff --git a/gfx/skia/skia/include/third_party/skcms/skcms.h b/gfx/skia/skia/include/third_party/skcms/skcms.h
new file mode 100644
index 0000000000..f458784558
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/skcms/skcms.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#pragma once
+
+// skcms.h contains the entire public API for skcms.
+
+#ifndef SKCMS_API
+ #define SKCMS_API
+#endif
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// A row-major 3x3 matrix (ie vals[row][col])
+typedef struct skcms_Matrix3x3 {
+ float vals[3][3];
+} skcms_Matrix3x3;
+
+// It is _not_ safe to alias the pointers to invert in-place.
+SKCMS_API bool skcms_Matrix3x3_invert(const skcms_Matrix3x3*, skcms_Matrix3x3*);
+SKCMS_API skcms_Matrix3x3 skcms_Matrix3x3_concat(const skcms_Matrix3x3*, const skcms_Matrix3x3*);
+
+// A row-major 3x4 matrix (ie vals[row][col])
+typedef struct skcms_Matrix3x4 {
+ float vals[3][4];
+} skcms_Matrix3x4;
+
+// A transfer function mapping encoded values to linear values,
+// represented by this 7-parameter piecewise function:
+//
+// linear = sign(encoded) * (c*|encoded| + f) , 0 <= |encoded| < d
+// = sign(encoded) * ((a*|encoded| + b)^g + e), d <= |encoded|
+//
+// (A simple gamma transfer function sets g to gamma and a to 1.)
+typedef struct skcms_TransferFunction {
+ float g, a,b,c,d,e,f;
+} skcms_TransferFunction;
+
+SKCMS_API float skcms_TransferFunction_eval (const skcms_TransferFunction*, float);
+SKCMS_API bool skcms_TransferFunction_invert(const skcms_TransferFunction*,
+ skcms_TransferFunction*);
+
+// We can jam a couple alternate transfer function forms into skcms_TransferFunction,
+// including those matching the general forms of the SMPTE ST 2084 PQ function or HLG.
+//
+// PQish:
+// max(A + B|encoded|^C, 0)
+// linear = sign(encoded) * (------------------------) ^ F
+// D + E|encoded|^C
+SKCMS_API bool skcms_TransferFunction_makePQish(skcms_TransferFunction*,
+ float A, float B, float C,
+ float D, float E, float F);
+// HLGish:
+// { sign(encoded) * ( (R|encoded|)^G ) when 0 <= |encoded| <= 1/R
+// linear = { sign(encoded) * ( e^(a(|encoded|-c)) + b ) when 1/R < |encoded|
+SKCMS_API bool skcms_TransferFunction_makeHLGish(skcms_TransferFunction*,
+ float R, float G,
+ float a, float b, float c);
+
+// PQ mapping encoded [0,1] to linear [0,1].
+static inline bool skcms_TransferFunction_makePQ(skcms_TransferFunction* tf) {
+ return skcms_TransferFunction_makePQish(tf, -107/128.0f, 1.0f, 32/2523.0f
+ , 2413/128.0f, -2392/128.0f, 8192/1305.0f);
+}
+// HLG mapping encoded [0,1] to linear [0,12].
+static inline bool skcms_TransferFunction_makeHLG(skcms_TransferFunction* tf) {
+ return skcms_TransferFunction_makeHLGish(tf, 2.0f, 2.0f
+ , 1/0.17883277f, 0.28466892f, 0.55991073f);
+}
+
+// Unified representation of 'curv' or 'para' tag data, or a 1D table from 'mft1' or 'mft2'
+typedef union skcms_Curve {
+ struct {
+ uint32_t alias_of_table_entries;
+ skcms_TransferFunction parametric;
+ };
+ struct {
+ uint32_t table_entries;
+ const uint8_t* table_8;
+ const uint8_t* table_16;
+ };
+} skcms_Curve;
+
+typedef struct skcms_A2B {
+ // Optional: N 1D curves, followed by an N-dimensional CLUT.
+ // If input_channels == 0, these curves and CLUT are skipped,
+ // Otherwise, input_channels must be in [1, 4].
+ uint32_t input_channels;
+ skcms_Curve input_curves[4];
+ uint8_t grid_points[4];
+ const uint8_t* grid_8;
+ const uint8_t* grid_16;
+
+ // Optional: 3 1D curves, followed by a color matrix.
+ // If matrix_channels == 0, these curves and matrix are skipped,
+ // Otherwise, matrix_channels must be 3.
+ uint32_t matrix_channels;
+ skcms_Curve matrix_curves[3];
+ skcms_Matrix3x4 matrix;
+
+ // Required: 3 1D curves. Always present, and output_channels must be 3.
+ uint32_t output_channels;
+ skcms_Curve output_curves[3];
+} skcms_A2B;
+
+typedef struct skcms_ICCProfile {
+ const uint8_t* buffer;
+
+ uint32_t size;
+ uint32_t data_color_space;
+ uint32_t pcs;
+ uint32_t tag_count;
+
+ // skcms_Parse() will set commonly-used fields for you when possible:
+
+ // If we can parse red, green and blue transfer curves from the profile,
+ // trc will be set to those three curves, and has_trc will be true.
+ bool has_trc;
+ skcms_Curve trc[3];
+
+ // If this profile's gamut can be represented by a 3x3 transform to XYZD50,
+ // skcms_Parse() sets toXYZD50 to that transform and has_toXYZD50 to true.
+ bool has_toXYZD50;
+ skcms_Matrix3x3 toXYZD50;
+
+ // If the profile has a valid A2B0 tag, skcms_Parse() sets A2B to that data,
+ // and has_A2B to true.
+ bool has_A2B;
+ skcms_A2B A2B;
+} skcms_ICCProfile;
+
+// The sRGB color profile is so commonly used that we offer a canonical skcms_ICCProfile for it.
+SKCMS_API const skcms_ICCProfile* skcms_sRGB_profile(void);
+// Ditto for XYZD50, the most common profile connection space.
+SKCMS_API const skcms_ICCProfile* skcms_XYZD50_profile(void);
+
+SKCMS_API const skcms_TransferFunction* skcms_sRGB_TransferFunction(void);
+SKCMS_API const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction(void);
+SKCMS_API const skcms_TransferFunction* skcms_Identity_TransferFunction(void);
+
+// Practical equality test for two skcms_ICCProfiles.
+// The implementation is subject to change, but it will always try to answer
+// "can I substitute A for B?" and "can I skip transforming from A to B?".
+SKCMS_API bool skcms_ApproximatelyEqualProfiles(const skcms_ICCProfile* A,
+ const skcms_ICCProfile* B);
+
+// Practical test that answers: Is curve roughly the inverse of inv_tf? Typically used by passing
+// the inverse of a known parametric transfer function (like sRGB), to determine if a particular
+// curve is very close to sRGB.
+SKCMS_API bool skcms_AreApproximateInverses(const skcms_Curve* curve,
+ const skcms_TransferFunction* inv_tf);
+
+// Similar to above, answering the question for all three TRC curves of the given profile. Again,
+// passing skcms_sRGB_InverseTransferFunction as inv_tf will answer the question:
+// "Does this profile have a transfer function that is very close to sRGB?"
+SKCMS_API bool skcms_TRCs_AreApproximateInverse(const skcms_ICCProfile* profile,
+ const skcms_TransferFunction* inv_tf);
+
+// Parse an ICC profile and return true if possible, otherwise return false.
+// The buffer is not copied, it must remain valid as long as the skcms_ICCProfile
+// will be used.
+SKCMS_API bool skcms_Parse(const void*, size_t, skcms_ICCProfile*);
+
+SKCMS_API bool skcms_ApproximateCurve(const skcms_Curve* curve,
+ skcms_TransferFunction* approx,
+ float* max_error);
+
+typedef struct skcms_ICCTag {
+ uint32_t signature;
+ uint32_t type;
+ uint32_t size;
+ const uint8_t* buf;
+} skcms_ICCTag;
+
+SKCMS_API void skcms_GetTagByIndex (const skcms_ICCProfile*, uint32_t idx, skcms_ICCTag*);
+SKCMS_API bool skcms_GetTagBySignature(const skcms_ICCProfile*, uint32_t sig, skcms_ICCTag*);
+
+// These are common ICC signature values
+enum {
+ // data_color_space
+ skcms_Signature_CMYK = 0x434D594B,
+ skcms_Signature_Gray = 0x47524159,
+ skcms_Signature_RGB = 0x52474220,
+
+ // pcs
+ skcms_Signature_Lab = 0x4C616220,
+ skcms_Signature_XYZ = 0x58595A20,
+};
+
+typedef enum skcms_PixelFormat {
+ skcms_PixelFormat_A_8,
+ skcms_PixelFormat_A_8_,
+ skcms_PixelFormat_G_8,
+ skcms_PixelFormat_G_8_,
+ skcms_PixelFormat_RGBA_8888_Palette8,
+ skcms_PixelFormat_BGRA_8888_Palette8,
+
+ skcms_PixelFormat_RGB_565,
+ skcms_PixelFormat_BGR_565,
+
+ skcms_PixelFormat_ABGR_4444,
+ skcms_PixelFormat_ARGB_4444,
+
+ skcms_PixelFormat_RGB_888,
+ skcms_PixelFormat_BGR_888,
+ skcms_PixelFormat_RGBA_8888,
+ skcms_PixelFormat_BGRA_8888,
+
+ skcms_PixelFormat_RGBA_1010102,
+ skcms_PixelFormat_BGRA_1010102,
+
+ skcms_PixelFormat_RGB_161616LE, // Little-endian. Pointers must be 16-bit aligned.
+ skcms_PixelFormat_BGR_161616LE,
+ skcms_PixelFormat_RGBA_16161616LE,
+ skcms_PixelFormat_BGRA_16161616LE,
+
+ skcms_PixelFormat_RGB_161616BE, // Big-endian. Pointers must be 16-bit aligned.
+ skcms_PixelFormat_BGR_161616BE,
+ skcms_PixelFormat_RGBA_16161616BE,
+ skcms_PixelFormat_BGRA_16161616BE,
+
+ skcms_PixelFormat_RGB_hhh_Norm, // 1-5-10 half-precision float in [0,1]
+ skcms_PixelFormat_BGR_hhh_Norm, // Pointers must be 16-bit aligned.
+ skcms_PixelFormat_RGBA_hhhh_Norm,
+ skcms_PixelFormat_BGRA_hhhh_Norm,
+
+ skcms_PixelFormat_RGB_hhh, // 1-5-10 half-precision float.
+ skcms_PixelFormat_BGR_hhh, // Pointers must be 16-bit aligned.
+ skcms_PixelFormat_RGBA_hhhh,
+ skcms_PixelFormat_BGRA_hhhh,
+
+ skcms_PixelFormat_RGB_fff, // 1-8-23 single-precision float (the normal kind).
+ skcms_PixelFormat_BGR_fff, // Pointers must be 32-bit aligned.
+ skcms_PixelFormat_RGBA_ffff,
+ skcms_PixelFormat_BGRA_ffff,
+} skcms_PixelFormat;
+
+// We always store any alpha channel linearly. In the chart below, tf-1() is the inverse
+// transfer function for the given color profile (applying the transfer function linearizes).
+
+// We treat opaque as a strong requirement, not just a performance hint: we will ignore
+// any source alpha and treat it as 1.0, and will make sure that any destination alpha
+// channel is filled with the equivalent of 1.0.
+
+// We used to offer multiple types of premultiplication, but now just one, PremulAsEncoded.
+// This is the premul you're probably used to working with.
+
+typedef enum skcms_AlphaFormat {
+ skcms_AlphaFormat_Opaque, // alpha is always opaque
+ // tf-1(r), tf-1(g), tf-1(b), 1.0
+ skcms_AlphaFormat_Unpremul, // alpha and color are unassociated
+ // tf-1(r), tf-1(g), tf-1(b), a
+ skcms_AlphaFormat_PremulAsEncoded, // premultiplied while encoded
+ // tf-1(r)*a, tf-1(g)*a, tf-1(b)*a, a
+} skcms_AlphaFormat;
+
+// Convert npixels pixels from src format and color profile to dst format and color profile
+// and return true, otherwise return false. It is safe to alias dst == src if dstFmt == srcFmt.
+SKCMS_API bool skcms_Transform(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels);
+
+// As skcms_Transform(), supporting srcFmts with a palette.
+SKCMS_API bool skcms_TransformWithPalette(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels,
+ const void* palette);
+
+// If profile can be used as a destination in skcms_Transform, return true. Otherwise, attempt to
+// rewrite it with approximations where reasonable. If successful, return true. If no reasonable
+// approximation exists, leave the profile unchanged and return false.
+SKCMS_API bool skcms_MakeUsableAsDestination(skcms_ICCProfile* profile);
+
+// If profile can be used as a destination with a single parametric transfer function (ie for
+// rasterization), return true. Otherwise, attempt to rewrite it with approximations where
+// reasonable. If successful, return true. If no reasonable approximation exists, leave the
+// profile unchanged and return false.
+SKCMS_API bool skcms_MakeUsableAsDestinationWithSingleCurve(skcms_ICCProfile* profile);
+
+SKCMS_API bool skcms_PrimariesToXYZD50(float rx, float ry,
+ float gx, float gy,
+ float bx, float by,
+ float wx, float wy,
+ skcms_Matrix3x3* toXYZD50);
+
+// Utilities for programmatically constructing profiles
+static inline void skcms_Init(skcms_ICCProfile* p) {
+ memset(p, 0, sizeof(*p));
+ p->data_color_space = skcms_Signature_RGB;
+ p->pcs = skcms_Signature_XYZ;
+}
+
+static inline void skcms_SetTransferFunction(skcms_ICCProfile* p,
+ const skcms_TransferFunction* tf) {
+ p->has_trc = true;
+ for (int i = 0; i < 3; ++i) {
+ p->trc[i].table_entries = 0;
+ p->trc[i].parametric = *tf;
+ }
+}
+
+static inline void skcms_SetXYZD50(skcms_ICCProfile* p, const skcms_Matrix3x3* m) {
+ p->has_toXYZD50 = true;
+ p->toXYZD50 = *m;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/LICENSE b/gfx/skia/skia/include/third_party/vulkan/LICENSE
new file mode 100644
index 0000000000..6c7c5be360
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/LICENSE
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vk_platform.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vk_platform.h
new file mode 100644
index 0000000000..7289299240
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vk_platform.h
@@ -0,0 +1,92 @@
+//
+// File: vk_platform.h
+//
+/*
+** Copyright (c) 2014-2017 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#ifndef VK_PLATFORM_H_
+#define VK_PLATFORM_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+/* Platform-specific calling convention macros.
+ *
+ * Platforms should define these so that Vulkan clients call Vulkan commands
+ * with the same calling conventions that the Vulkan implementation expects.
+ *
+ * VKAPI_ATTR - Placed before the return type in function declarations.
+ * Useful for C++11 and GCC/Clang-style function attribute syntax.
+ * VKAPI_CALL - Placed after the return type in function declarations.
+ * Useful for MSVC-style calling convention syntax.
+ * VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
+ *
+ * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
+ * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
+ */
+#if defined(_WIN32)
+ // On Windows, Vulkan commands use the stdcall convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL __stdcall
+ #define VKAPI_PTR VKAPI_CALL
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
+ #error "Vulkan isn't supported for the 'armeabi' NDK ABI"
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
+ // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
+ // calling convention, i.e. float parameters are passed in registers. This
+ // is true even if the rest of the application passes floats on the stack,
+ // as it does by default when compiling for the armeabi-v7a NDK ABI.
+ #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
+ #define VKAPI_CALL
+ #define VKAPI_PTR VKAPI_ATTR
+#else
+ // On other platforms, use the default calling convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL
+ #define VKAPI_PTR
+#endif
+
+#include <stddef.h>
+
+#if !defined(VK_NO_STDINT_H)
+ #if defined(_MSC_VER) && (_MSC_VER < 1600)
+ typedef signed __int8 int8_t;
+ typedef unsigned __int8 uint8_t;
+ typedef signed __int16 int16_t;
+ typedef unsigned __int16 uint16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int32 uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+ #else
+ #include <stdint.h>
+ #endif
+#endif // !defined(VK_NO_STDINT_H)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan.h
new file mode 100644
index 0000000000..4fba7c977a
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan.h
@@ -0,0 +1,79 @@
+#ifndef VULKAN_H_
+#define VULKAN_H_ 1
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include "include/third_party/vulkan/vulkan/vk_platform.h"
+#include "include/third_party/vulkan/vulkan/vulkan_core.h"
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#include "include/third_party/vulkan/vulkan/vulkan_android.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+#include "include/third_party/vulkan/vulkan/vulkan_ios.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+#include "include/third_party/vulkan/vulkan/vulkan_macos.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#include <mir_toolkit/client_types.h>
+#include "vulkan_mir.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_VI_NN
+#include "vulkan_vi.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#include <wayland-client.h>
+#include "vulkan_wayland.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#include <windows.h>
+#include "include/third_party/vulkan/vulkan/vulkan_win32.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#include <xcb/xcb.h>
+#include "include/third_party/vulkan/vulkan/vulkan_xcb.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#include <X11/Xlib.h>
+#include "vulkan_xlib.h"
+#endif
+
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+#include <X11/Xlib.h>
+#include <X11/extensions/Xrandr.h>
+#include "vulkan_xlib_xrandr.h"
+#endif
+
+#endif // VULKAN_H_
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_android.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_android.h
new file mode 100644
index 0000000000..07aaeda28e
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_android.h
@@ -0,0 +1,126 @@
+#ifndef VULKAN_ANDROID_H_
+#define VULKAN_ANDROID_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_KHR_android_surface 1
+struct ANativeWindow;
+
+#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
+#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
+
+typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
+
+typedef struct VkAndroidSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkAndroidSurfaceCreateFlagsKHR flags;
+ struct ANativeWindow* window;
+} VkAndroidSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
+ VkInstance instance,
+ const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#define VK_ANDROID_external_memory_android_hardware_buffer 1
+struct AHardwareBuffer;
+
+#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3
+#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer"
+
+typedef struct VkAndroidHardwareBufferUsageANDROID {
+ VkStructureType sType;
+ void* pNext;
+ uint64_t androidHardwareBufferUsage;
+} VkAndroidHardwareBufferUsageANDROID;
+
+typedef struct VkAndroidHardwareBufferPropertiesANDROID {
+ VkStructureType sType;
+ void* pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeBits;
+} VkAndroidHardwareBufferPropertiesANDROID;
+
+typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID {
+ VkStructureType sType;
+ void* pNext;
+ VkFormat format;
+ uint64_t externalFormat;
+ VkFormatFeatureFlags formatFeatures;
+ VkComponentMapping samplerYcbcrConversionComponents;
+ VkSamplerYcbcrModelConversion suggestedYcbcrModel;
+ VkSamplerYcbcrRange suggestedYcbcrRange;
+ VkChromaLocation suggestedXChromaOffset;
+ VkChromaLocation suggestedYChromaOffset;
+} VkAndroidHardwareBufferFormatPropertiesANDROID;
+
+typedef struct VkImportAndroidHardwareBufferInfoANDROID {
+ VkStructureType sType;
+ const void* pNext;
+ struct AHardwareBuffer* buffer;
+} VkImportAndroidHardwareBufferInfoANDROID;
+
+typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+} VkMemoryGetAndroidHardwareBufferInfoANDROID;
+
+typedef struct VkExternalFormatANDROID {
+ VkStructureType sType;
+ void* pNext;
+ uint64_t externalFormat;
+} VkExternalFormatANDROID;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID(
+ VkDevice device,
+ const struct AHardwareBuffer* buffer,
+ VkAndroidHardwareBufferPropertiesANDROID* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID(
+ VkDevice device,
+ const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
+ struct AHardwareBuffer** pBuffer);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_core.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_core.h
new file mode 100644
index 0000000000..4d8762f8e6
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_core.h
@@ -0,0 +1,7576 @@
+#ifndef VULKAN_CORE_H_
+#define VULKAN_CORE_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_VERSION_1_0 1
+#include "include/third_party/vulkan/vulkan/vk_platform.h"
+
+#define VK_MAKE_VERSION(major, minor, patch) \
+ (((major) << 22) | ((minor) << 12) | (patch))
+
+// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead.
+//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0
+
+// Vulkan 1.0 version number
+#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0
+
+#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
+#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
+#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
+// Version of this file
+#define VK_HEADER_VERSION 77
+
+
+#define VK_NULL_HANDLE 0
+
+
+
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
+
+
+#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE)
+#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
+#else
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#endif
+#endif
+
+
+
+typedef uint32_t VkFlags;
+typedef uint32_t VkBool32;
+typedef uint64_t VkDeviceSize;
+typedef uint32_t VkSampleMask;
+
+VK_DEFINE_HANDLE(VkInstance)
+VK_DEFINE_HANDLE(VkPhysicalDevice)
+VK_DEFINE_HANDLE(VkDevice)
+VK_DEFINE_HANDLE(VkQueue)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)
+VK_DEFINE_HANDLE(VkCommandBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
+
+#define VK_LOD_CLAMP_NONE 1000.0f
+#define VK_REMAINING_MIP_LEVELS (~0U)
+#define VK_REMAINING_ARRAY_LAYERS (~0U)
+#define VK_WHOLE_SIZE (~0ULL)
+#define VK_ATTACHMENT_UNUSED (~0U)
+#define VK_TRUE 1
+#define VK_FALSE 0
+#define VK_QUEUE_FAMILY_IGNORED (~0U)
+#define VK_SUBPASS_EXTERNAL (~0U)
+#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256
+#define VK_UUID_SIZE 16
+#define VK_MAX_MEMORY_TYPES 32
+#define VK_MAX_MEMORY_HEAPS 16
+#define VK_MAX_EXTENSION_NAME_SIZE 256
+#define VK_MAX_DESCRIPTION_SIZE 256
+
+
+typedef enum VkPipelineCacheHeaderVersion {
+ VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
+ VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1),
+ VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCacheHeaderVersion;
+
+typedef enum VkResult {
+ VK_SUCCESS = 0,
+ VK_NOT_READY = 1,
+ VK_TIMEOUT = 2,
+ VK_EVENT_SET = 3,
+ VK_EVENT_RESET = 4,
+ VK_INCOMPLETE = 5,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_DEVICE_LOST = -4,
+ VK_ERROR_MEMORY_MAP_FAILED = -5,
+ VK_ERROR_LAYER_NOT_PRESENT = -6,
+ VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+ VK_ERROR_FEATURE_NOT_PRESENT = -8,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+ VK_ERROR_TOO_MANY_OBJECTS = -10,
+ VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+ VK_ERROR_FRAGMENTED_POOL = -12,
+ VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003,
+ VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+ VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+ VK_SUBOPTIMAL_KHR = 1000001003,
+ VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+ VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
+ VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
+ VK_ERROR_INVALID_SHADER_NV = -1000012000,
+ VK_ERROR_FRAGMENTATION_EXT = -1000161000,
+ VK_ERROR_NOT_PERMITTED_EXT = -1000174001,
+ VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ VK_RESULT_BEGIN_RANGE = VK_ERROR_FRAGMENTED_POOL,
+ VK_RESULT_END_RANGE = VK_INCOMPLETE,
+ VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FRAGMENTED_POOL + 1),
+ VK_RESULT_MAX_ENUM = 0x7FFFFFFF
+} VkResult;
+
+typedef enum VkStructureType {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,
+ VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,
+ VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001,
+ VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002,
+ VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000,
+ VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003,
+ VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = 1000120000,
+ VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002,
+ VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002,
+ VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000,
+ VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001,
+ VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000,
+ VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = 1000063000,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007,
+ VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009,
+ VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012,
+ VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000,
+ VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001,
+ VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000,
+ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
+ VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
+ VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
+ VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
+ VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
+ VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002,
+ VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001,
+ VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000,
+ VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
+ VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001,
+ VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002,
+ VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000,
+ VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001,
+ VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002,
+ VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000,
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001,
+ VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002,
+ VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003,
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000,
+ VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000,
+ VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000,
+ VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000,
+ VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001,
+ VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002,
+ VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003,
+ VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004,
+ VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000,
+ VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000,
+ VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000,
+ VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001,
+ VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003,
+ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000,
+ VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001,
+ VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000,
+ VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000,
+ VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000,
+ VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001,
+ VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002,
+ VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000,
+ VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000,
+ VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001,
+ VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002,
+ VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000,
+ VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001,
+ VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002,
+ VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003,
+ VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004,
+ VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000,
+ VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004,
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000,
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001,
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002,
+ VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003,
+ VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004,
+ VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = 1000130000,
+ VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = 1000130001,
+ VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000,
+ VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001,
+ VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003,
+ VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = 1000147000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002,
+ VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000,
+ VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000,
+ VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = 1000161000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = 1000161001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = 1000161002,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = 1000161003,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = 1000161004,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000,
+ VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
+ VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
+ VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
+ VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO,
+ VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES,
+ VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES,
+ VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
+ VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
+ VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO,
+ VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,
+ VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT,
+ VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
+ VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkStructureType;
+
+typedef enum VkSystemAllocationScope {
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
+ VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4,
+ VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND,
+ VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE,
+ VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1),
+ VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF
+} VkSystemAllocationScope;
+
+typedef enum VkInternalAllocationType {
+ VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0,
+ VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1),
+ VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkInternalAllocationType;
+
+typedef enum VkFormat {
+ VK_FORMAT_UNDEFINED = 0,
+ VK_FORMAT_R4G4_UNORM_PACK8 = 1,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
+ VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
+ VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
+ VK_FORMAT_R8_UNORM = 9,
+ VK_FORMAT_R8_SNORM = 10,
+ VK_FORMAT_R8_USCALED = 11,
+ VK_FORMAT_R8_SSCALED = 12,
+ VK_FORMAT_R8_UINT = 13,
+ VK_FORMAT_R8_SINT = 14,
+ VK_FORMAT_R8_SRGB = 15,
+ VK_FORMAT_R8G8_UNORM = 16,
+ VK_FORMAT_R8G8_SNORM = 17,
+ VK_FORMAT_R8G8_USCALED = 18,
+ VK_FORMAT_R8G8_SSCALED = 19,
+ VK_FORMAT_R8G8_UINT = 20,
+ VK_FORMAT_R8G8_SINT = 21,
+ VK_FORMAT_R8G8_SRGB = 22,
+ VK_FORMAT_R8G8B8_UNORM = 23,
+ VK_FORMAT_R8G8B8_SNORM = 24,
+ VK_FORMAT_R8G8B8_USCALED = 25,
+ VK_FORMAT_R8G8B8_SSCALED = 26,
+ VK_FORMAT_R8G8B8_UINT = 27,
+ VK_FORMAT_R8G8B8_SINT = 28,
+ VK_FORMAT_R8G8B8_SRGB = 29,
+ VK_FORMAT_B8G8R8_UNORM = 30,
+ VK_FORMAT_B8G8R8_SNORM = 31,
+ VK_FORMAT_B8G8R8_USCALED = 32,
+ VK_FORMAT_B8G8R8_SSCALED = 33,
+ VK_FORMAT_B8G8R8_UINT = 34,
+ VK_FORMAT_B8G8R8_SINT = 35,
+ VK_FORMAT_B8G8R8_SRGB = 36,
+ VK_FORMAT_R8G8B8A8_UNORM = 37,
+ VK_FORMAT_R8G8B8A8_SNORM = 38,
+ VK_FORMAT_R8G8B8A8_USCALED = 39,
+ VK_FORMAT_R8G8B8A8_SSCALED = 40,
+ VK_FORMAT_R8G8B8A8_UINT = 41,
+ VK_FORMAT_R8G8B8A8_SINT = 42,
+ VK_FORMAT_R8G8B8A8_SRGB = 43,
+ VK_FORMAT_B8G8R8A8_UNORM = 44,
+ VK_FORMAT_B8G8R8A8_SNORM = 45,
+ VK_FORMAT_B8G8R8A8_USCALED = 46,
+ VK_FORMAT_B8G8R8A8_SSCALED = 47,
+ VK_FORMAT_B8G8R8A8_UINT = 48,
+ VK_FORMAT_B8G8R8A8_SINT = 49,
+ VK_FORMAT_B8G8R8A8_SRGB = 50,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
+ VK_FORMAT_R16_UNORM = 70,
+ VK_FORMAT_R16_SNORM = 71,
+ VK_FORMAT_R16_USCALED = 72,
+ VK_FORMAT_R16_SSCALED = 73,
+ VK_FORMAT_R16_UINT = 74,
+ VK_FORMAT_R16_SINT = 75,
+ VK_FORMAT_R16_SFLOAT = 76,
+ VK_FORMAT_R16G16_UNORM = 77,
+ VK_FORMAT_R16G16_SNORM = 78,
+ VK_FORMAT_R16G16_USCALED = 79,
+ VK_FORMAT_R16G16_SSCALED = 80,
+ VK_FORMAT_R16G16_UINT = 81,
+ VK_FORMAT_R16G16_SINT = 82,
+ VK_FORMAT_R16G16_SFLOAT = 83,
+ VK_FORMAT_R16G16B16_UNORM = 84,
+ VK_FORMAT_R16G16B16_SNORM = 85,
+ VK_FORMAT_R16G16B16_USCALED = 86,
+ VK_FORMAT_R16G16B16_SSCALED = 87,
+ VK_FORMAT_R16G16B16_UINT = 88,
+ VK_FORMAT_R16G16B16_SINT = 89,
+ VK_FORMAT_R16G16B16_SFLOAT = 90,
+ VK_FORMAT_R16G16B16A16_UNORM = 91,
+ VK_FORMAT_R16G16B16A16_SNORM = 92,
+ VK_FORMAT_R16G16B16A16_USCALED = 93,
+ VK_FORMAT_R16G16B16A16_SSCALED = 94,
+ VK_FORMAT_R16G16B16A16_UINT = 95,
+ VK_FORMAT_R16G16B16A16_SINT = 96,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 97,
+ VK_FORMAT_R32_UINT = 98,
+ VK_FORMAT_R32_SINT = 99,
+ VK_FORMAT_R32_SFLOAT = 100,
+ VK_FORMAT_R32G32_UINT = 101,
+ VK_FORMAT_R32G32_SINT = 102,
+ VK_FORMAT_R32G32_SFLOAT = 103,
+ VK_FORMAT_R32G32B32_UINT = 104,
+ VK_FORMAT_R32G32B32_SINT = 105,
+ VK_FORMAT_R32G32B32_SFLOAT = 106,
+ VK_FORMAT_R32G32B32A32_UINT = 107,
+ VK_FORMAT_R32G32B32A32_SINT = 108,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 109,
+ VK_FORMAT_R64_UINT = 110,
+ VK_FORMAT_R64_SINT = 111,
+ VK_FORMAT_R64_SFLOAT = 112,
+ VK_FORMAT_R64G64_UINT = 113,
+ VK_FORMAT_R64G64_SINT = 114,
+ VK_FORMAT_R64G64_SFLOAT = 115,
+ VK_FORMAT_R64G64B64_UINT = 116,
+ VK_FORMAT_R64G64B64_SINT = 117,
+ VK_FORMAT_R64G64B64_SFLOAT = 118,
+ VK_FORMAT_R64G64B64A64_UINT = 119,
+ VK_FORMAT_R64G64B64A64_SINT = 120,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 121,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
+ VK_FORMAT_D16_UNORM = 124,
+ VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
+ VK_FORMAT_D32_SFLOAT = 126,
+ VK_FORMAT_S8_UINT = 127,
+ VK_FORMAT_D16_UNORM_S8_UINT = 128,
+ VK_FORMAT_D24_UNORM_S8_UINT = 129,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
+ VK_FORMAT_BC2_UNORM_BLOCK = 135,
+ VK_FORMAT_BC2_SRGB_BLOCK = 136,
+ VK_FORMAT_BC3_UNORM_BLOCK = 137,
+ VK_FORMAT_BC3_SRGB_BLOCK = 138,
+ VK_FORMAT_BC4_UNORM_BLOCK = 139,
+ VK_FORMAT_BC4_SNORM_BLOCK = 140,
+ VK_FORMAT_BC5_UNORM_BLOCK = 141,
+ VK_FORMAT_BC5_SNORM_BLOCK = 142,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
+ VK_FORMAT_BC7_UNORM_BLOCK = 145,
+ VK_FORMAT_BC7_SRGB_BLOCK = 146,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
+ VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000,
+ VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003,
+ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004,
+ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005,
+ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006,
+ VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007,
+ VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008,
+ VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,
+ VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,
+ VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,
+ VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017,
+ VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018,
+ VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,
+ VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,
+ VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,
+ VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027,
+ VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028,
+ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029,
+ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030,
+ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031,
+ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032,
+ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033,
+ VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
+ VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
+ VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
+ VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,
+ VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,
+ VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
+ VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
+ VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
+ VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM,
+ VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM,
+ VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16,
+ VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16,
+ VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
+ VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,
+ VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16,
+ VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16,
+ VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,
+ VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,
+ VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM,
+ VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
+ VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED,
+ VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1),
+ VK_FORMAT_MAX_ENUM = 0x7FFFFFFF
+} VkFormat;
+
+typedef enum VkImageType {
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2,
+ VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1),
+ VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageType;
+
+typedef enum VkImageTiling {
+ VK_IMAGE_TILING_OPTIMAL = 0,
+ VK_IMAGE_TILING_LINEAR = 1,
+ VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR,
+ VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1),
+ VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF
+} VkImageTiling;
+
+typedef enum VkPhysicalDeviceType {
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 4,
+ VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER,
+ VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU,
+ VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1),
+ VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkPhysicalDeviceType;
+
+typedef enum VkQueryType {
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+ VK_QUERY_TYPE_TIMESTAMP = 2,
+ VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
+ VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP,
+ VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1),
+ VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkQueryType;
+
+typedef enum VkSharingMode {
+ VK_SHARING_MODE_EXCLUSIVE = 0,
+ VK_SHARING_MODE_CONCURRENT = 1,
+ VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE,
+ VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT,
+ VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1),
+ VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSharingMode;
+
+typedef enum VkImageLayout {
+ VK_IMAGE_LAYOUT_UNDEFINED = 0,
+ VK_IMAGE_LAYOUT_GENERAL = 1,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
+ VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
+ VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000,
+ VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
+ VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000,
+ VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL,
+ VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL,
+ VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED,
+ VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1),
+ VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF
+} VkImageLayout;
+
+typedef enum VkImageViewType {
+ VK_IMAGE_VIEW_TYPE_1D = 0,
+ VK_IMAGE_VIEW_TYPE_2D = 1,
+ VK_IMAGE_VIEW_TYPE_3D = 2,
+ VK_IMAGE_VIEW_TYPE_CUBE = 3,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,
+ VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1),
+ VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageViewType;
+
+typedef enum VkComponentSwizzle {
+ VK_COMPONENT_SWIZZLE_IDENTITY = 0,
+ VK_COMPONENT_SWIZZLE_ZERO = 1,
+ VK_COMPONENT_SWIZZLE_ONE = 2,
+ VK_COMPONENT_SWIZZLE_R = 3,
+ VK_COMPONENT_SWIZZLE_G = 4,
+ VK_COMPONENT_SWIZZLE_B = 5,
+ VK_COMPONENT_SWIZZLE_A = 6,
+ VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A,
+ VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1),
+ VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF
+} VkComponentSwizzle;
+
+typedef enum VkVertexInputRate {
+ VK_VERTEX_INPUT_RATE_VERTEX = 0,
+ VK_VERTEX_INPUT_RATE_INSTANCE = 1,
+ VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX,
+ VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE,
+ VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1),
+ VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF
+} VkVertexInputRate;
+
+typedef enum VkPrimitiveTopology {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10,
+ VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+ VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1),
+ VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF
+} VkPrimitiveTopology;
+
+typedef enum VkPolygonMode {
+ VK_POLYGON_MODE_FILL = 0,
+ VK_POLYGON_MODE_LINE = 1,
+ VK_POLYGON_MODE_POINT = 2,
+ VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000,
+ VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL,
+ VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT,
+ VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1),
+ VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPolygonMode;
+
+typedef enum VkFrontFace {
+ VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,
+ VK_FRONT_FACE_CLOCKWISE = 1,
+ VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE,
+ VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1),
+ VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF
+} VkFrontFace;
+
+typedef enum VkCompareOp {
+ VK_COMPARE_OP_NEVER = 0,
+ VK_COMPARE_OP_LESS = 1,
+ VK_COMPARE_OP_EQUAL = 2,
+ VK_COMPARE_OP_LESS_OR_EQUAL = 3,
+ VK_COMPARE_OP_GREATER = 4,
+ VK_COMPARE_OP_NOT_EQUAL = 5,
+ VK_COMPARE_OP_GREATER_OR_EQUAL = 6,
+ VK_COMPARE_OP_ALWAYS = 7,
+ VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER,
+ VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS,
+ VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1),
+ VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkCompareOp;
+
+typedef enum VkStencilOp {
+ VK_STENCIL_OP_KEEP = 0,
+ VK_STENCIL_OP_ZERO = 1,
+ VK_STENCIL_OP_REPLACE = 2,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,
+ VK_STENCIL_OP_INVERT = 5,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP = 7,
+ VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
+ VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP,
+ VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1),
+ VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF
+} VkStencilOp;
+
+typedef enum VkLogicOp {
+ VK_LOGIC_OP_CLEAR = 0,
+ VK_LOGIC_OP_AND = 1,
+ VK_LOGIC_OP_AND_REVERSE = 2,
+ VK_LOGIC_OP_COPY = 3,
+ VK_LOGIC_OP_AND_INVERTED = 4,
+ VK_LOGIC_OP_NO_OP = 5,
+ VK_LOGIC_OP_XOR = 6,
+ VK_LOGIC_OP_OR = 7,
+ VK_LOGIC_OP_NOR = 8,
+ VK_LOGIC_OP_EQUIVALENT = 9,
+ VK_LOGIC_OP_INVERT = 10,
+ VK_LOGIC_OP_OR_REVERSE = 11,
+ VK_LOGIC_OP_COPY_INVERTED = 12,
+ VK_LOGIC_OP_OR_INVERTED = 13,
+ VK_LOGIC_OP_NAND = 14,
+ VK_LOGIC_OP_SET = 15,
+ VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR,
+ VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
+ VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1),
+ VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF
+} VkLogicOp;
+
+typedef enum VkBlendFactor {
+ VK_BLEND_FACTOR_ZERO = 0,
+ VK_BLEND_FACTOR_ONE = 1,
+ VK_BLEND_FACTOR_SRC_COLOR = 2,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
+ VK_BLEND_FACTOR_DST_COLOR = 4,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
+ VK_BLEND_FACTOR_SRC_ALPHA = 6,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ VK_BLEND_FACTOR_DST_ALPHA = 8,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
+ VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
+ VK_BLEND_FACTOR_SRC1_COLOR = 15,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
+ VK_BLEND_FACTOR_SRC1_ALPHA = 17,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,
+ VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
+ VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1),
+ VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF
+} VkBlendFactor;
+
+typedef enum VkBlendOp {
+ VK_BLEND_OP_ADD = 0,
+ VK_BLEND_OP_SUBTRACT = 1,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+ VK_BLEND_OP_MIN = 3,
+ VK_BLEND_OP_MAX = 4,
+ VK_BLEND_OP_ZERO_EXT = 1000148000,
+ VK_BLEND_OP_SRC_EXT = 1000148001,
+ VK_BLEND_OP_DST_EXT = 1000148002,
+ VK_BLEND_OP_SRC_OVER_EXT = 1000148003,
+ VK_BLEND_OP_DST_OVER_EXT = 1000148004,
+ VK_BLEND_OP_SRC_IN_EXT = 1000148005,
+ VK_BLEND_OP_DST_IN_EXT = 1000148006,
+ VK_BLEND_OP_SRC_OUT_EXT = 1000148007,
+ VK_BLEND_OP_DST_OUT_EXT = 1000148008,
+ VK_BLEND_OP_SRC_ATOP_EXT = 1000148009,
+ VK_BLEND_OP_DST_ATOP_EXT = 1000148010,
+ VK_BLEND_OP_XOR_EXT = 1000148011,
+ VK_BLEND_OP_MULTIPLY_EXT = 1000148012,
+ VK_BLEND_OP_SCREEN_EXT = 1000148013,
+ VK_BLEND_OP_OVERLAY_EXT = 1000148014,
+ VK_BLEND_OP_DARKEN_EXT = 1000148015,
+ VK_BLEND_OP_LIGHTEN_EXT = 1000148016,
+ VK_BLEND_OP_COLORDODGE_EXT = 1000148017,
+ VK_BLEND_OP_COLORBURN_EXT = 1000148018,
+ VK_BLEND_OP_HARDLIGHT_EXT = 1000148019,
+ VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020,
+ VK_BLEND_OP_DIFFERENCE_EXT = 1000148021,
+ VK_BLEND_OP_EXCLUSION_EXT = 1000148022,
+ VK_BLEND_OP_INVERT_EXT = 1000148023,
+ VK_BLEND_OP_INVERT_RGB_EXT = 1000148024,
+ VK_BLEND_OP_LINEARDODGE_EXT = 1000148025,
+ VK_BLEND_OP_LINEARBURN_EXT = 1000148026,
+ VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027,
+ VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028,
+ VK_BLEND_OP_PINLIGHT_EXT = 1000148029,
+ VK_BLEND_OP_HARDMIX_EXT = 1000148030,
+ VK_BLEND_OP_HSL_HUE_EXT = 1000148031,
+ VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032,
+ VK_BLEND_OP_HSL_COLOR_EXT = 1000148033,
+ VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034,
+ VK_BLEND_OP_PLUS_EXT = 1000148035,
+ VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036,
+ VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037,
+ VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038,
+ VK_BLEND_OP_MINUS_EXT = 1000148039,
+ VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040,
+ VK_BLEND_OP_CONTRAST_EXT = 1000148041,
+ VK_BLEND_OP_INVERT_OVG_EXT = 1000148042,
+ VK_BLEND_OP_RED_EXT = 1000148043,
+ VK_BLEND_OP_GREEN_EXT = 1000148044,
+ VK_BLEND_OP_BLUE_EXT = 1000148045,
+ VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD,
+ VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX,
+ VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1),
+ VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF
+} VkBlendOp;
+
+typedef enum VkDynamicState {
+ VK_DYNAMIC_STATE_VIEWPORT = 0,
+ VK_DYNAMIC_STATE_SCISSOR = 1,
+ VK_DYNAMIC_STATE_LINE_WIDTH = 2,
+ VK_DYNAMIC_STATE_DEPTH_BIAS = 3,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,
+ VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000,
+ VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000,
+ VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000,
+ VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1),
+ VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF
+} VkDynamicState;
+
+typedef enum VkFilter {
+ VK_FILTER_NEAREST = 0,
+ VK_FILTER_LINEAR = 1,
+ VK_FILTER_CUBIC_IMG = 1000015000,
+ VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST,
+ VK_FILTER_END_RANGE = VK_FILTER_LINEAR,
+ VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1),
+ VK_FILTER_MAX_ENUM = 0x7FFFFFFF
+} VkFilter;
+
+typedef enum VkSamplerMipmapMode {
+ VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,
+ VK_SAMPLER_MIPMAP_MODE_LINEAR = 1,
+ VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+ VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR,
+ VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1),
+ VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerMipmapMode;
+
+typedef enum VkSamplerAddressMode {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,
+ VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
+ VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1),
+ VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerAddressMode;
+
+typedef enum VkBorderColor {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+ VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1),
+ VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF
+} VkBorderColor;
+
+typedef enum VkDescriptorType {
+ VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
+ VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
+ VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
+ VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1),
+ VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorType;
+
+typedef enum VkAttachmentLoadOp {
+ VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+ VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+ VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1),
+ VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentLoadOp;
+
+typedef enum VkAttachmentStoreOp {
+ VK_ATTACHMENT_STORE_OP_STORE = 0,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
+ VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
+ VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1),
+ VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentStoreOp;
+
+typedef enum VkPipelineBindPoint {
+ VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
+ VK_PIPELINE_BIND_POINT_COMPUTE = 1,
+ VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
+ VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1),
+ VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineBindPoint;
+
+typedef enum VkCommandBufferLevel {
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1,
+ VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY,
+ VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1),
+ VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferLevel;
+
+typedef enum VkIndexType {
+ VK_INDEX_TYPE_UINT16 = 0,
+ VK_INDEX_TYPE_UINT32 = 1,
+ VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
+ VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
+ VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
+ VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkIndexType;
+
+typedef enum VkSubpassContents {
+ VK_SUBPASS_CONTENTS_INLINE = 0,
+ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1,
+ VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE,
+ VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS,
+ VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1),
+ VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassContents;
+
+typedef enum VkObjectType {
+ VK_OBJECT_TYPE_UNKNOWN = 0,
+ VK_OBJECT_TYPE_INSTANCE = 1,
+ VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2,
+ VK_OBJECT_TYPE_DEVICE = 3,
+ VK_OBJECT_TYPE_QUEUE = 4,
+ VK_OBJECT_TYPE_SEMAPHORE = 5,
+ VK_OBJECT_TYPE_COMMAND_BUFFER = 6,
+ VK_OBJECT_TYPE_FENCE = 7,
+ VK_OBJECT_TYPE_DEVICE_MEMORY = 8,
+ VK_OBJECT_TYPE_BUFFER = 9,
+ VK_OBJECT_TYPE_IMAGE = 10,
+ VK_OBJECT_TYPE_EVENT = 11,
+ VK_OBJECT_TYPE_QUERY_POOL = 12,
+ VK_OBJECT_TYPE_BUFFER_VIEW = 13,
+ VK_OBJECT_TYPE_IMAGE_VIEW = 14,
+ VK_OBJECT_TYPE_SHADER_MODULE = 15,
+ VK_OBJECT_TYPE_PIPELINE_CACHE = 16,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17,
+ VK_OBJECT_TYPE_RENDER_PASS = 18,
+ VK_OBJECT_TYPE_PIPELINE = 19,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20,
+ VK_OBJECT_TYPE_SAMPLER = 21,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET = 23,
+ VK_OBJECT_TYPE_FRAMEBUFFER = 24,
+ VK_OBJECT_TYPE_COMMAND_POOL = 25,
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000,
+ VK_OBJECT_TYPE_SURFACE_KHR = 1000000000,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000,
+ VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000,
+ VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001,
+ VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000,
+ VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000,
+ VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001,
+ VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000,
+ VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE,
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION,
+ VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN,
+ VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL,
+ VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1),
+ VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkObjectType;
+
+typedef VkFlags VkInstanceCreateFlags;
+
+typedef enum VkFormatFeatureFlagBits {
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+ VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400,
+ VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000,
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000,
+ VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000,
+ VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000,
+ VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = 0x00010000,
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT,
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT,
+ VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT,
+ VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT,
+ VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT,
+ VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkFormatFeatureFlagBits;
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef enum VkImageUsageFlagBits {
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+ VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+ VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageUsageFlagBits;
+typedef VkFlags VkImageUsageFlags;
+
+typedef enum VkImageCreateFlagBits {
+ VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010,
+ VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400,
+ VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040,
+ VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020,
+ VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080,
+ VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100,
+ VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800,
+ VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200,
+ VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000,
+ VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT,
+ VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT,
+ VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT,
+ VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT,
+ VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT,
+ VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT,
+ VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageCreateFlagBits;
+typedef VkFlags VkImageCreateFlags;
+
+typedef enum VkSampleCountFlagBits {
+ VK_SAMPLE_COUNT_1_BIT = 0x00000001,
+ VK_SAMPLE_COUNT_2_BIT = 0x00000002,
+ VK_SAMPLE_COUNT_4_BIT = 0x00000004,
+ VK_SAMPLE_COUNT_8_BIT = 0x00000008,
+ VK_SAMPLE_COUNT_16_BIT = 0x00000010,
+ VK_SAMPLE_COUNT_32_BIT = 0x00000020,
+ VK_SAMPLE_COUNT_64_BIT = 0x00000040,
+ VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSampleCountFlagBits;
+typedef VkFlags VkSampleCountFlags;
+
+typedef enum VkQueueFlagBits {
+ VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+ VK_QUEUE_COMPUTE_BIT = 0x00000002,
+ VK_QUEUE_TRANSFER_BIT = 0x00000004,
+ VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008,
+ VK_QUEUE_PROTECTED_BIT = 0x00000010,
+ VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueueFlagBits;
+typedef VkFlags VkQueueFlags;
+
+typedef enum VkMemoryPropertyFlagBits {
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002,
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004,
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+ VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020,
+ VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkMemoryPropertyFlagBits;
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef enum VkMemoryHeapFlagBits {
+ VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001,
+ VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002,
+ VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT,
+ VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkMemoryHeapFlagBits;
+typedef VkFlags VkMemoryHeapFlags;
+typedef VkFlags VkDeviceCreateFlags;
+
+typedef enum VkDeviceQueueCreateFlagBits {
+ VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001,
+ VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDeviceQueueCreateFlagBits;
+typedef VkFlags VkDeviceQueueCreateFlags;
+
+typedef enum VkPipelineStageFlagBits {
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010,
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000,
+ VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,
+ VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000,
+ VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineStageFlagBits;
+typedef VkFlags VkPipelineStageFlags;
+typedef VkFlags VkMemoryMapFlags;
+
+typedef enum VkImageAspectFlagBits {
+ VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,
+ VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,
+ VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,
+ VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,
+ VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010,
+ VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020,
+ VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040,
+ VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT,
+ VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT,
+ VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT,
+ VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageAspectFlagBits;
+typedef VkFlags VkImageAspectFlags;
+
+typedef enum VkSparseImageFormatFlagBits {
+ VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001,
+ VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+ VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004,
+ VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSparseImageFormatFlagBits;
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef enum VkSparseMemoryBindFlagBits {
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001,
+ VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSparseMemoryBindFlagBits;
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef enum VkFenceCreateFlagBits {
+ VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+ VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkFenceCreateFlagBits;
+typedef VkFlags VkFenceCreateFlags;
+typedef VkFlags VkSemaphoreCreateFlags;
+typedef VkFlags VkEventCreateFlags;
+typedef VkFlags VkQueryPoolCreateFlags;
+
+typedef enum VkQueryPipelineStatisticFlagBits {
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001,
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002,
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040,
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,
+ VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryPipelineStatisticFlagBits;
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef enum VkQueryResultFlagBits {
+ VK_QUERY_RESULT_64_BIT = 0x00000001,
+ VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+ VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+ VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryResultFlagBits;
+typedef VkFlags VkQueryResultFlags;
+
+typedef enum VkBufferCreateFlagBits {
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008,
+ VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkBufferCreateFlagBits;
+typedef VkFlags VkBufferCreateFlags;
+
+typedef enum VkBufferUsageFlagBits {
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+ VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkBufferUsageFlagBits;
+typedef VkFlags VkBufferUsageFlags;
+typedef VkFlags VkBufferViewCreateFlags;
+typedef VkFlags VkImageViewCreateFlags;
+typedef VkFlags VkShaderModuleCreateFlags;
+typedef VkFlags VkPipelineCacheCreateFlags;
+
+typedef enum VkPipelineCreateFlagBits {
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+ VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008,
+ VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010,
+ VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT,
+ VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE,
+ VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCreateFlagBits;
+typedef VkFlags VkPipelineCreateFlags;
+typedef VkFlags VkPipelineShaderStageCreateFlags;
+
+typedef enum VkShaderStageFlagBits {
+ VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004,
+ VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,
+ VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,
+ VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
+ VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F,
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+ VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkShaderStageFlagBits;
+typedef VkFlags VkPipelineVertexInputStateCreateFlags;
+typedef VkFlags VkPipelineInputAssemblyStateCreateFlags;
+typedef VkFlags VkPipelineTessellationStateCreateFlags;
+typedef VkFlags VkPipelineViewportStateCreateFlags;
+typedef VkFlags VkPipelineRasterizationStateCreateFlags;
+
+typedef enum VkCullModeFlagBits {
+ VK_CULL_MODE_NONE = 0,
+ VK_CULL_MODE_FRONT_BIT = 0x00000001,
+ VK_CULL_MODE_BACK_BIT = 0x00000002,
+ VK_CULL_MODE_FRONT_AND_BACK = 0x00000003,
+ VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCullModeFlagBits;
+typedef VkFlags VkCullModeFlags;
+typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+typedef VkFlags VkPipelineColorBlendStateCreateFlags;
+
+typedef enum VkColorComponentFlagBits {
+ VK_COLOR_COMPONENT_R_BIT = 0x00000001,
+ VK_COLOR_COMPONENT_G_BIT = 0x00000002,
+ VK_COLOR_COMPONENT_B_BIT = 0x00000004,
+ VK_COLOR_COMPONENT_A_BIT = 0x00000008,
+ VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkColorComponentFlagBits;
+typedef VkFlags VkColorComponentFlags;
+typedef VkFlags VkPipelineDynamicStateCreateFlags;
+typedef VkFlags VkPipelineLayoutCreateFlags;
+typedef VkFlags VkShaderStageFlags;
+typedef VkFlags VkSamplerCreateFlags;
+
+typedef enum VkDescriptorSetLayoutCreateFlagBits {
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001,
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = 0x00000002,
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorSetLayoutCreateFlagBits;
+typedef VkFlags VkDescriptorSetLayoutCreateFlags;
+
+typedef enum VkDescriptorPoolCreateFlagBits {
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,
+ VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = 0x00000002,
+ VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorPoolCreateFlagBits;
+typedef VkFlags VkDescriptorPoolCreateFlags;
+typedef VkFlags VkDescriptorPoolResetFlags;
+typedef VkFlags VkFramebufferCreateFlags;
+typedef VkFlags VkRenderPassCreateFlags;
+
+typedef enum VkAttachmentDescriptionFlagBits {
+ VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001,
+ VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentDescriptionFlagBits;
+typedef VkFlags VkAttachmentDescriptionFlags;
+
+typedef enum VkSubpassDescriptionFlagBits {
+ VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001,
+ VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002,
+ VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassDescriptionFlagBits;
+typedef VkFlags VkSubpassDescriptionFlags;
+
+typedef enum VkAccessFlagBits {
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001,
+ VK_ACCESS_INDEX_READ_BIT = 0x00000002,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004,
+ VK_ACCESS_UNIFORM_READ_BIT = 0x00000008,
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010,
+ VK_ACCESS_SHADER_READ_BIT = 0x00000020,
+ VK_ACCESS_SHADER_WRITE_BIT = 0x00000040,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400,
+ VK_ACCESS_TRANSFER_READ_BIT = 0x00000800,
+ VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000,
+ VK_ACCESS_HOST_READ_BIT = 0x00002000,
+ VK_ACCESS_HOST_WRITE_BIT = 0x00004000,
+ VK_ACCESS_MEMORY_READ_BIT = 0x00008000,
+ VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,
+ VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000,
+ VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000,
+ VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkAccessFlagBits;
+typedef VkFlags VkAccessFlags;
+
+typedef enum VkDependencyFlagBits {
+ VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,
+ VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004,
+ VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002,
+ VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT,
+ VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT,
+ VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDependencyFlagBits;
+typedef VkFlags VkDependencyFlags;
+
+typedef enum VkCommandPoolCreateFlagBits {
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+ VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004,
+ VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandPoolCreateFlagBits;
+typedef VkFlags VkCommandPoolCreateFlags;
+
+typedef enum VkCommandPoolResetFlagBits {
+ VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+ VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandPoolResetFlagBits;
+typedef VkFlags VkCommandPoolResetFlags;
+
+typedef enum VkCommandBufferUsageFlagBits {
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001,
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004,
+ VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferUsageFlagBits;
+typedef VkFlags VkCommandBufferUsageFlags;
+
+typedef enum VkQueryControlFlagBits {
+ VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001,
+ VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryControlFlagBits;
+typedef VkFlags VkQueryControlFlags;
+
+typedef enum VkCommandBufferResetFlagBits {
+ VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+ VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferResetFlagBits;
+typedef VkFlags VkCommandBufferResetFlags;
+
+typedef enum VkStencilFaceFlagBits {
+ VK_STENCIL_FACE_FRONT_BIT = 0x00000001,
+ VK_STENCIL_FACE_BACK_BIT = 0x00000002,
+ VK_STENCIL_FRONT_AND_BACK = 0x00000003,
+ VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkStencilFaceFlagBits;
+typedef VkFlags VkStencilFaceFlags;
+
+typedef struct VkApplicationInfo {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pApplicationName;
+ uint32_t applicationVersion;
+ const char* pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+} VkApplicationInfo;
+
+typedef struct VkInstanceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkInstanceCreateFlags flags;
+ const VkApplicationInfo* pApplicationInfo;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
+ void* pUserData,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(
+ void* pUserData,
+ void* pOriginal,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkFreeFunction)(
+ void* pUserData,
+ void* pMemory);
+
+typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef struct VkAllocationCallbacks {
+ void* pUserData;
+ PFN_vkAllocationFunction pfnAllocation;
+ PFN_vkReallocationFunction pfnReallocation;
+ PFN_vkFreeFunction pfnFree;
+ PFN_vkInternalAllocationNotification pfnInternalAllocation;
+ PFN_vkInternalFreeNotification pfnInternalFree;
+} VkAllocationCallbacks;
+
+typedef struct VkPhysicalDeviceFeatures {
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSrcBlend;
+ VkBool32 logicOp;
+ VkBool32 multiDrawIndirect;
+ VkBool32 drawIndirectFirstInstance;
+ VkBool32 depthClamp;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 alphaToOne;
+ VkBool32 multiViewport;
+ VkBool32 samplerAnisotropy;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 occlusionQueryPrecise;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexPipelineStoresAndAtomics;
+ VkBool32 fragmentStoresAndAtomics;
+ VkBool32 shaderTessellationAndGeometryPointSize;
+ VkBool32 shaderImageGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageImageReadWithoutFormat;
+ VkBool32 shaderStorageImageWriteWithoutFormat;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLod;
+ VkBool32 sparseBinding;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyAliased;
+ VkBool32 variableMultisampleRate;
+ VkBool32 inheritedQueries;
+} VkPhysicalDeviceFeatures;
+
+typedef struct VkFormatProperties {
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+ VkFormatFeatureFlags bufferFeatures;
+} VkFormatProperties;
+
+typedef struct VkExtent3D {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+} VkExtent3D;
+
+typedef struct VkImageFormatProperties {
+ VkExtent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArrayLayers;
+ VkSampleCountFlags sampleCounts;
+ VkDeviceSize maxResourceSize;
+} VkImageFormatProperties;
+
+typedef struct VkPhysicalDeviceLimits {
+ uint32_t maxImageDimension1D;
+ uint32_t maxImageDimension2D;
+ uint32_t maxImageDimension3D;
+ uint32_t maxImageDimensionCube;
+ uint32_t maxImageArrayLayers;
+ uint32_t maxTexelBufferElements;
+ uint32_t maxUniformBufferRange;
+ uint32_t maxStorageBufferRange;
+ uint32_t maxPushConstantsSize;
+ uint32_t maxMemoryAllocationCount;
+ uint32_t maxSamplerAllocationCount;
+ VkDeviceSize bufferImageGranularity;
+ VkDeviceSize sparseAddressSpaceSize;
+ uint32_t maxBoundDescriptorSets;
+ uint32_t maxPerStageDescriptorSamplers;
+ uint32_t maxPerStageDescriptorUniformBuffers;
+ uint32_t maxPerStageDescriptorStorageBuffers;
+ uint32_t maxPerStageDescriptorSampledImages;
+ uint32_t maxPerStageDescriptorStorageImages;
+ uint32_t maxPerStageDescriptorInputAttachments;
+ uint32_t maxPerStageResources;
+ uint32_t maxDescriptorSetSamplers;
+ uint32_t maxDescriptorSetUniformBuffers;
+ uint32_t maxDescriptorSetUniformBuffersDynamic;
+ uint32_t maxDescriptorSetStorageBuffers;
+ uint32_t maxDescriptorSetStorageBuffersDynamic;
+ uint32_t maxDescriptorSetSampledImages;
+ uint32_t maxDescriptorSetStorageImages;
+ uint32_t maxDescriptorSetInputAttachments;
+ uint32_t maxVertexInputAttributes;
+ uint32_t maxVertexInputBindings;
+ uint32_t maxVertexInputAttributeOffset;
+ uint32_t maxVertexInputBindingStride;
+ uint32_t maxVertexOutputComponents;
+ uint32_t maxTessellationGenerationLevel;
+ uint32_t maxTessellationPatchSize;
+ uint32_t maxTessellationControlPerVertexInputComponents;
+ uint32_t maxTessellationControlPerVertexOutputComponents;
+ uint32_t maxTessellationControlPerPatchOutputComponents;
+ uint32_t maxTessellationControlTotalOutputComponents;
+ uint32_t maxTessellationEvaluationInputComponents;
+ uint32_t maxTessellationEvaluationOutputComponents;
+ uint32_t maxGeometryShaderInvocations;
+ uint32_t maxGeometryInputComponents;
+ uint32_t maxGeometryOutputComponents;
+ uint32_t maxGeometryOutputVertices;
+ uint32_t maxGeometryTotalOutputComponents;
+ uint32_t maxFragmentInputComponents;
+ uint32_t maxFragmentOutputAttachments;
+ uint32_t maxFragmentDualSrcAttachments;
+ uint32_t maxFragmentCombinedOutputResources;
+ uint32_t maxComputeSharedMemorySize;
+ uint32_t maxComputeWorkGroupCount[3];
+ uint32_t maxComputeWorkGroupInvocations;
+ uint32_t maxComputeWorkGroupSize[3];
+ uint32_t subPixelPrecisionBits;
+ uint32_t subTexelPrecisionBits;
+ uint32_t mipmapPrecisionBits;
+ uint32_t maxDrawIndexedIndexValue;
+ uint32_t maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32_t maxViewports;
+ uint32_t maxViewportDimensions[2];
+ float viewportBoundsRange[2];
+ uint32_t viewportSubPixelBits;
+ size_t minMemoryMapAlignment;
+ VkDeviceSize minTexelBufferOffsetAlignment;
+ VkDeviceSize minUniformBufferOffsetAlignment;
+ VkDeviceSize minStorageBufferOffsetAlignment;
+ int32_t minTexelOffset;
+ uint32_t maxTexelOffset;
+ int32_t minTexelGatherOffset;
+ uint32_t maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32_t subPixelInterpolationOffsetBits;
+ uint32_t maxFramebufferWidth;
+ uint32_t maxFramebufferHeight;
+ uint32_t maxFramebufferLayers;
+ VkSampleCountFlags framebufferColorSampleCounts;
+ VkSampleCountFlags framebufferDepthSampleCounts;
+ VkSampleCountFlags framebufferStencilSampleCounts;
+ VkSampleCountFlags framebufferNoAttachmentsSampleCounts;
+ uint32_t maxColorAttachments;
+ VkSampleCountFlags sampledImageColorSampleCounts;
+ VkSampleCountFlags sampledImageIntegerSampleCounts;
+ VkSampleCountFlags sampledImageDepthSampleCounts;
+ VkSampleCountFlags sampledImageStencilSampleCounts;
+ VkSampleCountFlags storageImageSampleCounts;
+ uint32_t maxSampleMaskWords;
+ VkBool32 timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32_t maxClipDistances;
+ uint32_t maxCullDistances;
+ uint32_t maxCombinedClipAndCullDistances;
+ uint32_t discreteQueuePriorities;
+ float pointSizeRange[2];
+ float lineWidthRange[2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ VkBool32 strictLines;
+ VkBool32 standardSampleLocations;
+ VkDeviceSize optimalBufferCopyOffsetAlignment;
+ VkDeviceSize optimalBufferCopyRowPitchAlignment;
+ VkDeviceSize nonCoherentAtomSize;
+} VkPhysicalDeviceLimits;
+
+typedef struct VkPhysicalDeviceSparseProperties {
+ VkBool32 residencyStandard2DBlockShape;
+ VkBool32 residencyStandard2DMultisampleBlockShape;
+ VkBool32 residencyStandard3DBlockShape;
+ VkBool32 residencyAlignedMipSize;
+ VkBool32 residencyNonResidentStrict;
+} VkPhysicalDeviceSparseProperties;
+
+typedef struct VkPhysicalDeviceProperties {
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+ uint8_t pipelineCacheUUID[VK_UUID_SIZE];
+ VkPhysicalDeviceLimits limits;
+ VkPhysicalDeviceSparseProperties sparseProperties;
+} VkPhysicalDeviceProperties;
+
+typedef struct VkQueueFamilyProperties {
+ VkQueueFlags queueFlags;
+ uint32_t queueCount;
+ uint32_t timestampValidBits;
+ VkExtent3D minImageTransferGranularity;
+} VkQueueFamilyProperties;
+
+typedef struct VkMemoryType {
+ VkMemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+} VkMemoryType;
+
+typedef struct VkMemoryHeap {
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+} VkMemoryHeap;
+
+typedef struct VkPhysicalDeviceMemoryProperties {
+ uint32_t memoryTypeCount;
+ VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ uint32_t memoryHeapCount;
+ VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+} VkPhysicalDeviceMemoryProperties;
+
+typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);
+typedef struct VkDeviceQueueCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+ const float* pQueuePriorities;
+} VkDeviceQueueCreateInfo;
+
+typedef struct VkDeviceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceCreateFlags flags;
+ uint32_t queueCreateInfoCount;
+ const VkDeviceQueueCreateInfo* pQueueCreateInfos;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures* pEnabledFeatures;
+} VkDeviceCreateInfo;
+
+typedef struct VkExtensionProperties {
+ char extensionName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+typedef struct VkLayerProperties {
+ char layerName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+ uint32_t implementationVersion;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+} VkLayerProperties;
+
+typedef struct VkSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ const VkPipelineStageFlags* pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ const VkCommandBuffer* pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkSubmitInfo;
+
+typedef struct VkMemoryAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+} VkMemoryAllocateInfo;
+
+typedef struct VkMappedMemoryRange {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkMappedMemoryRange;
+
+typedef struct VkMemoryRequirements {
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ uint32_t memoryTypeBits;
+} VkMemoryRequirements;
+
+typedef struct VkSparseImageFormatProperties {
+ VkImageAspectFlags aspectMask;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+} VkSparseImageFormatProperties;
+
+typedef struct VkSparseImageMemoryRequirements {
+ VkSparseImageFormatProperties formatProperties;
+ uint32_t imageMipTailFirstLod;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+typedef struct VkSparseMemoryBind {
+ VkDeviceSize resourceOffset;
+ VkDeviceSize size;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseMemoryBind;
+
+typedef struct VkSparseBufferMemoryBindInfo {
+ VkBuffer buffer;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseBufferMemoryBindInfo;
+
+typedef struct VkSparseImageOpaqueMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseImageOpaqueMemoryBindInfo;
+
+typedef struct VkImageSubresource {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t arrayLayer;
+} VkImageSubresource;
+
+typedef struct VkOffset3D {
+ int32_t x;
+ int32_t y;
+ int32_t z;
+} VkOffset3D;
+
+typedef struct VkSparseImageMemoryBind {
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseImageMemoryBind;
+
+typedef struct VkSparseImageMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseImageMemoryBind* pBinds;
+} VkSparseImageMemoryBindInfo;
+
+typedef struct VkBindSparseInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t bufferBindCount;
+ const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ uint32_t imageOpaqueBindCount;
+ const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ uint32_t imageBindCount;
+ const VkSparseImageMemoryBindInfo* pImageBinds;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkBindSparseInfo;
+
+typedef struct VkFenceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFenceCreateFlags flags;
+} VkFenceCreateInfo;
+
+typedef struct VkSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreCreateFlags flags;
+} VkSemaphoreCreateInfo;
+
+typedef struct VkEventCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkEventCreateFlags flags;
+} VkEventCreateInfo;
+
+typedef struct VkQueryPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueryPoolCreateFlags flags;
+ VkQueryType queryType;
+ uint32_t queryCount;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+typedef struct VkBufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferCreateFlags flags;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+typedef struct VkBufferViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferViewCreateFlags flags;
+ VkBuffer buffer;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkBufferViewCreateInfo;
+
+typedef struct VkImageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageCreateFlags flags;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ uint32_t mipLevels;
+ uint32_t arrayLayers;
+ VkSampleCountFlagBits samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkImageLayout initialLayout;
+} VkImageCreateInfo;
+
+typedef struct VkSubresourceLayout {
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VkDeviceSize rowPitch;
+ VkDeviceSize arrayPitch;
+ VkDeviceSize depthPitch;
+} VkSubresourceLayout;
+
+typedef struct VkComponentMapping {
+ VkComponentSwizzle r;
+ VkComponentSwizzle g;
+ VkComponentSwizzle b;
+ VkComponentSwizzle a;
+} VkComponentMapping;
+
+typedef struct VkImageSubresourceRange {
+ VkImageAspectFlags aspectMask;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceRange;
+
+typedef struct VkImageViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageViewCreateFlags flags;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkComponentMapping components;
+ VkImageSubresourceRange subresourceRange;
+} VkImageViewCreateInfo;
+
+typedef struct VkShaderModuleCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderModuleCreateFlags flags;
+ size_t codeSize;
+ const uint32_t* pCode;
+} VkShaderModuleCreateInfo;
+
+typedef struct VkPipelineCacheCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCacheCreateFlags flags;
+ size_t initialDataSize;
+ const void* pInitialData;
+} VkPipelineCacheCreateInfo;
+
+typedef struct VkSpecializationMapEntry {
+ uint32_t constantID;
+ uint32_t offset;
+ size_t size;
+} VkSpecializationMapEntry;
+
+typedef struct VkSpecializationInfo {
+ uint32_t mapEntryCount;
+ const VkSpecializationMapEntry* pMapEntries;
+ size_t dataSize;
+ const void* pData;
+} VkSpecializationInfo;
+
+typedef struct VkPipelineShaderStageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineShaderStageCreateFlags flags;
+ VkShaderStageFlagBits stage;
+ VkShaderModule module;
+ const char* pName;
+ const VkSpecializationInfo* pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct VkVertexInputBindingDescription {
+ uint32_t binding;
+ uint32_t stride;
+ VkVertexInputRate inputRate;
+} VkVertexInputBindingDescription;
+
+typedef struct VkVertexInputAttributeDescription {
+ uint32_t location;
+ uint32_t binding;
+ VkFormat format;
+ uint32_t offset;
+} VkVertexInputAttributeDescription;
+
+typedef struct VkPipelineVertexInputStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineVertexInputStateCreateFlags flags;
+ uint32_t vertexBindingDescriptionCount;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ uint32_t vertexAttributeDescriptionCount;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct VkPipelineInputAssemblyStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineInputAssemblyStateCreateFlags flags;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct VkPipelineTessellationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineTessellationStateCreateFlags flags;
+ uint32_t patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct VkViewport {
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+} VkViewport;
+
+typedef struct VkOffset2D {
+ int32_t x;
+ int32_t y;
+} VkOffset2D;
+
+typedef struct VkExtent2D {
+ uint32_t width;
+ uint32_t height;
+} VkExtent2D;
+
+typedef struct VkRect2D {
+ VkOffset2D offset;
+ VkExtent2D extent;
+} VkRect2D;
+
+typedef struct VkPipelineViewportStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineViewportStateCreateFlags flags;
+ uint32_t viewportCount;
+ const VkViewport* pViewports;
+ uint32_t scissorCount;
+ const VkRect2D* pScissors;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct VkPipelineRasterizationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineRasterizationStateCreateFlags flags;
+ VkBool32 depthClampEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkPolygonMode polygonMode;
+ VkCullModeFlags cullMode;
+ VkFrontFace frontFace;
+ VkBool32 depthBiasEnable;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float lineWidth;
+} VkPipelineRasterizationStateCreateInfo;
+
+typedef struct VkPipelineMultisampleStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineMultisampleStateCreateFlags flags;
+ VkSampleCountFlagBits rasterizationSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ const VkSampleMask* pSampleMask;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 alphaToOneEnable;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct VkStencilOpState {
+ VkStencilOp failOp;
+ VkStencilOp passOp;
+ VkStencilOp depthFailOp;
+ VkCompareOp compareOp;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
+} VkStencilOpState;
+
+typedef struct VkPipelineDepthStencilStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDepthStencilStateCreateFlags flags;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsTestEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+ float minDepthBounds;
+ float maxDepthBounds;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct VkPipelineColorBlendAttachmentState {
+ VkBool32 blendEnable;
+ VkBlendFactor srcColorBlendFactor;
+ VkBlendFactor dstColorBlendFactor;
+ VkBlendOp colorBlendOp;
+ VkBlendFactor srcAlphaBlendFactor;
+ VkBlendFactor dstAlphaBlendFactor;
+ VkBlendOp alphaBlendOp;
+ VkColorComponentFlags colorWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct VkPipelineColorBlendStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineColorBlendStateCreateFlags flags;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ uint32_t attachmentCount;
+ const VkPipelineColorBlendAttachmentState* pAttachments;
+ float blendConstants[4];
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct VkPipelineDynamicStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDynamicStateCreateFlags flags;
+ uint32_t dynamicStateCount;
+ const VkDynamicState* pDynamicStates;
+} VkPipelineDynamicStateCreateInfo;
+
+typedef struct VkGraphicsPipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ const VkPipelineViewportStateCreateInfo* pViewportState;
+ const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
+ const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ const VkPipelineDynamicStateCreateInfo* pDynamicState;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+typedef struct VkComputePipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ VkPipelineShaderStageCreateInfo stage;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+typedef struct VkPushConstantRange {
+ VkShaderStageFlags stageFlags;
+ uint32_t offset;
+ uint32_t size;
+} VkPushConstantRange;
+
+typedef struct VkPipelineLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineLayoutCreateFlags flags;
+ uint32_t setLayoutCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const VkPushConstantRange* pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+typedef struct VkSamplerCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSamplerCreateFlags flags;
+ VkFilter magFilter;
+ VkFilter minFilter;
+ VkSamplerMipmapMode mipmapMode;
+ VkSamplerAddressMode addressModeU;
+ VkSamplerAddressMode addressModeV;
+ VkSamplerAddressMode addressModeW;
+ float mipLodBias;
+ VkBool32 anisotropyEnable;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+ VkBool32 unnormalizedCoordinates;
+} VkSamplerCreateInfo;
+
+typedef struct VkDescriptorSetLayoutBinding {
+ uint32_t binding;
+ VkDescriptorType descriptorType;
+ uint32_t descriptorCount;
+ VkShaderStageFlags stageFlags;
+ const VkSampler* pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct VkDescriptorSetLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSetLayoutCreateFlags flags;
+ uint32_t bindingCount;
+ const VkDescriptorSetLayoutBinding* pBindings;
+} VkDescriptorSetLayoutCreateInfo;
+
+typedef struct VkDescriptorPoolSize {
+ VkDescriptorType type;
+ uint32_t descriptorCount;
+} VkDescriptorPoolSize;
+
+typedef struct VkDescriptorPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPoolCreateFlags flags;
+ uint32_t maxSets;
+ uint32_t poolSizeCount;
+ const VkDescriptorPoolSize* pPoolSizes;
+} VkDescriptorPoolCreateInfo;
+
+typedef struct VkDescriptorSetAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPool descriptorPool;
+ uint32_t descriptorSetCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+} VkDescriptorSetAllocateInfo;
+
+typedef struct VkDescriptorImageInfo {
+ VkSampler sampler;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+} VkDescriptorImageInfo;
+
+typedef struct VkDescriptorBufferInfo {
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkDescriptorBufferInfo;
+
+typedef struct VkWriteDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ const VkDescriptorImageInfo* pImageInfo;
+ const VkDescriptorBufferInfo* pBufferInfo;
+ const VkBufferView* pTexelBufferView;
+} VkWriteDescriptorSet;
+
+typedef struct VkCopyDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+} VkCopyDescriptorSet;
+
+typedef struct VkFramebufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFramebufferCreateFlags flags;
+ VkRenderPass renderPass;
+ uint32_t attachmentCount;
+ const VkImageView* pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+} VkFramebufferCreateInfo;
+
+typedef struct VkAttachmentDescription {
+ VkAttachmentDescriptionFlags flags;
+ VkFormat format;
+ VkSampleCountFlagBits samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+} VkAttachmentDescription;
+
+typedef struct VkAttachmentReference {
+ uint32_t attachment;
+ VkImageLayout layout;
+} VkAttachmentReference;
+
+typedef struct VkSubpassDescription {
+ VkSubpassDescriptionFlags flags;
+ VkPipelineBindPoint pipelineBindPoint;
+ uint32_t inputAttachmentCount;
+ const VkAttachmentReference* pInputAttachments;
+ uint32_t colorAttachmentCount;
+ const VkAttachmentReference* pColorAttachments;
+ const VkAttachmentReference* pResolveAttachments;
+ const VkAttachmentReference* pDepthStencilAttachment;
+ uint32_t preserveAttachmentCount;
+ const uint32_t* pPreserveAttachments;
+} VkSubpassDescription;
+
+typedef struct VkSubpassDependency {
+ uint32_t srcSubpass;
+ uint32_t dstSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkDependencyFlags dependencyFlags;
+} VkSubpassDependency;
+
+typedef struct VkRenderPassCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPassCreateFlags flags;
+ uint32_t attachmentCount;
+ const VkAttachmentDescription* pAttachments;
+ uint32_t subpassCount;
+ const VkSubpassDescription* pSubpasses;
+ uint32_t dependencyCount;
+ const VkSubpassDependency* pDependencies;
+} VkRenderPassCreateInfo;
+
+typedef struct VkCommandPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPoolCreateFlags flags;
+ uint32_t queueFamilyIndex;
+} VkCommandPoolCreateInfo;
+
+typedef struct VkCommandBufferAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPool commandPool;
+ VkCommandBufferLevel level;
+ uint32_t commandBufferCount;
+} VkCommandBufferAllocateInfo;
+
+typedef struct VkCommandBufferInheritanceInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkFramebuffer framebuffer;
+ VkBool32 occlusionQueryEnable;
+ VkQueryControlFlags queryFlags;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkCommandBufferInheritanceInfo;
+
+typedef struct VkCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandBufferUsageFlags flags;
+ const VkCommandBufferInheritanceInfo* pInheritanceInfo;
+} VkCommandBufferBeginInfo;
+
+typedef struct VkBufferCopy {
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+} VkBufferCopy;
+
+typedef struct VkImageSubresourceLayers {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceLayers;
+
+typedef struct VkImageCopy {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageCopy;
+
+typedef struct VkImageBlit {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffsets[2];
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffsets[2];
+} VkImageBlit;
+
+typedef struct VkBufferImageCopy {
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresourceLayers imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
+
+typedef union VkClearColorValue {
+ float float32[4];
+ int32_t int32[4];
+ uint32_t uint32[4];
+} VkClearColorValue;
+
+typedef struct VkClearDepthStencilValue {
+ float depth;
+ uint32_t stencil;
+} VkClearDepthStencilValue;
+
+typedef union VkClearValue {
+ VkClearColorValue color;
+ VkClearDepthStencilValue depthStencil;
+} VkClearValue;
+
+typedef struct VkClearAttachment {
+ VkImageAspectFlags aspectMask;
+ uint32_t colorAttachment;
+ VkClearValue clearValue;
+} VkClearAttachment;
+
+typedef struct VkClearRect {
+ VkRect2D rect;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkClearRect;
+
+typedef struct VkImageResolve {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageResolve;
+
+typedef struct VkMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+} VkMemoryBarrier;
+
+typedef struct VkBufferMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier;
+
+typedef struct VkImageMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct VkRenderPassBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ uint32_t clearValueCount;
+ const VkClearValue* pClearValues;
+} VkRenderPassBeginInfo;
+
+typedef struct VkDispatchIndirectCommand {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+} VkDispatchIndirectCommand;
+
+typedef struct VkDrawIndexedIndirectCommand {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t vertexOffset;
+ uint32_t firstInstance;
+} VkDrawIndexedIndirectCommand;
+
+typedef struct VkDrawIndirectCommand {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+} VkDrawIndirectCommand;
+
+typedef struct VkBaseOutStructure {
+ VkStructureType sType;
+ struct VkBaseOutStructure* pNext;
+} VkBaseOutStructure;
+
+typedef struct VkBaseInStructure {
+ VkStructureType sType;
+ const struct VkBaseInStructure* pNext;
+} VkBaseInStructure;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance);
+typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
+typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
+typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
+typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);
+typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);
+typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);
+typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage);
+typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
+typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);
+typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets);
+typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
+typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers);
+typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);
+typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);
+typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
+typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
+ const VkInstanceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkInstance* pInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
+ VkInstance instance,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceCount,
+ VkPhysicalDevice* pPhysicalDevices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageFormatProperties* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
+ VkDevice device,
+ const char* pName);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
+ VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDevice* pDevice);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
+ VkDevice device,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
+ VkPhysicalDevice physicalDevice,
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
+ VkDevice device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue* pQueue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
+ VkQueue queue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
+ VkDevice device);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
+ VkDevice device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDeviceMemory* pMemory);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void** ppData);
+
+VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
+ VkDevice device,
+ VkDeviceMemory memory);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize* pCommittedMemoryInBytes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
+ VkDevice device,
+ VkImage image,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
+ VkDevice device,
+ VkBuffer buffer,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkSampleCountFlagBits samples,
+ VkImageUsageFlags usage,
+ VkImageTiling tiling,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
+ VkQueue queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo* pBindInfo,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
+ VkDevice device,
+ VkFence fence,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
+ VkDevice device,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences,
+ VkBool32 waitAll,
+ uint64_t timeout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSemaphore* pSemaphore);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
+ VkDevice device,
+ VkSemaphore semaphore,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkEvent* pEvent);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(
+ VkDevice device,
+ VkEvent event,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkQueryPool* pQueryPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(
+ VkDevice device,
+ VkQueryPool queryPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
+ VkDevice device,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ size_t dataSize,
+ void* pData,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
+ VkDevice device,
+ const VkBufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBuffer* pBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
+ VkDevice device,
+ VkBuffer buffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
+ VkDevice device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBufferView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(
+ VkDevice device,
+ VkBufferView bufferView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImage* pImage);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
+ VkDevice device,
+ VkImage image,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
+ VkDevice device,
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceLayout* pLayout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImageView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(
+ VkDevice device,
+ VkImageView imageView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
+ VkDevice device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkShaderModule* pShaderModule);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(
+ VkDevice device,
+ VkShaderModule shaderModule,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
+ VkDevice device,
+ const VkPipelineCacheCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineCache* pPipelineCache);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ size_t* pDataSize,
+ void* pData);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
+ VkDevice device,
+ VkPipelineCache dstCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache* pSrcCaches);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(
+ VkDevice device,
+ VkPipeline pipeline,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
+ VkDevice device,
+ const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineLayout* pPipelineLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(
+ VkDevice device,
+ VkPipelineLayout pipelineLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSampler* pSampler);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySampler(
+ VkDevice device,
+ VkSampler sampler,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorSetLayout* pSetLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
+ VkDevice device,
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorPool* pDescriptorPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(
+ VkDevice device,
+ const VkDescriptorSetAllocateInfo* pAllocateInfo,
+ VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
+ VkDevice device,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFramebuffer* pFramebuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
+ VkDevice device,
+ VkFramebuffer framebuffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkRenderPass* pRenderPass);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
+ VkDevice device,
+ VkRenderPass renderPass,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(
+ VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D* pGranularity);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
+ VkDevice device,
+ const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkCommandPool* pCommandPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
+ VkDevice device,
+ const VkCommandBufferAllocateInfo* pAllocateInfo,
+ VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
+ VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo* pBeginInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* pViewports);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(
+ VkCommandBuffer commandBuffer,
+ float lineWidth);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
+ VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(
+ VkCommandBuffer commandBuffer,
+ const float blendConstants[4]);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
+ VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
+ VkCommandBuffer commandBuffer,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
+ VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
+ VkCommandBuffer commandBuffer,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkFilter filter);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize size,
+ uint32_t data);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
+ VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkClearAttachment* pAttachments,
+ uint32_t rectCount,
+ const VkClearRect* pRects);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(
+ VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(
+ VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+#endif
+
+#define VK_VERSION_1_1 1
+// Vulkan 1.1 version number
+#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0
+
+
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate)
+
+#define VK_MAX_DEVICE_GROUP_SIZE 32
+#define VK_LUID_SIZE 8
+#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1)
+
+
+typedef enum VkPointClippingBehavior {
+ VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0,
+ VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1,
+ VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES,
+ VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY,
+ VK_POINT_CLIPPING_BEHAVIOR_BEGIN_RANGE = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES,
+ VK_POINT_CLIPPING_BEHAVIOR_END_RANGE = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY,
+ VK_POINT_CLIPPING_BEHAVIOR_RANGE_SIZE = (VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES + 1),
+ VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF
+} VkPointClippingBehavior;
+
+typedef enum VkTessellationDomainOrigin {
+ VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0,
+ VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1,
+ VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT,
+ VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT,
+ VK_TESSELLATION_DOMAIN_ORIGIN_BEGIN_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT,
+ VK_TESSELLATION_DOMAIN_ORIGIN_END_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT,
+ VK_TESSELLATION_DOMAIN_ORIGIN_RANGE_SIZE = (VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT - VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT + 1),
+ VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF
+} VkTessellationDomainOrigin;
+
+typedef enum VkSamplerYcbcrModelConversion {
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_BEGIN_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_END_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020,
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_RANGE_SIZE = (VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 - VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY + 1),
+ VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerYcbcrModelConversion;
+
+typedef enum VkSamplerYcbcrRange {
+ VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0,
+ VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1,
+ VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL,
+ VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW,
+ VK_SAMPLER_YCBCR_RANGE_BEGIN_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_FULL,
+ VK_SAMPLER_YCBCR_RANGE_END_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW,
+ VK_SAMPLER_YCBCR_RANGE_RANGE_SIZE = (VK_SAMPLER_YCBCR_RANGE_ITU_NARROW - VK_SAMPLER_YCBCR_RANGE_ITU_FULL + 1),
+ VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerYcbcrRange;
+
+typedef enum VkChromaLocation {
+ VK_CHROMA_LOCATION_COSITED_EVEN = 0,
+ VK_CHROMA_LOCATION_MIDPOINT = 1,
+ VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN,
+ VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT,
+ VK_CHROMA_LOCATION_BEGIN_RANGE = VK_CHROMA_LOCATION_COSITED_EVEN,
+ VK_CHROMA_LOCATION_END_RANGE = VK_CHROMA_LOCATION_MIDPOINT,
+ VK_CHROMA_LOCATION_RANGE_SIZE = (VK_CHROMA_LOCATION_MIDPOINT - VK_CHROMA_LOCATION_COSITED_EVEN + 1),
+ VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF
+} VkChromaLocation;
+
+typedef enum VkDescriptorUpdateTemplateType {
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET + 1),
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorUpdateTemplateType;
+
+
+typedef enum VkSubgroupFeatureFlagBits {
+ VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001,
+ VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002,
+ VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004,
+ VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008,
+ VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010,
+ VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020,
+ VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040,
+ VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080,
+ VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100,
+ VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSubgroupFeatureFlagBits;
+typedef VkFlags VkSubgroupFeatureFlags;
+
+typedef enum VkPeerMemoryFeatureFlagBits {
+ VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001,
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002,
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004,
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008,
+ VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT,
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT,
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT,
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT,
+ VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPeerMemoryFeatureFlagBits;
+typedef VkFlags VkPeerMemoryFeatureFlags;
+
+typedef enum VkMemoryAllocateFlagBits {
+ VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001,
+ VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT,
+ VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkMemoryAllocateFlagBits;
+typedef VkFlags VkMemoryAllocateFlags;
+typedef VkFlags VkCommandPoolTrimFlags;
+typedef VkFlags VkDescriptorUpdateTemplateCreateFlags;
+
+typedef enum VkExternalMemoryHandleTypeFlagBits {
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalMemoryHandleTypeFlagBits;
+typedef VkFlags VkExternalMemoryHandleTypeFlags;
+
+typedef enum VkExternalMemoryFeatureFlagBits {
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004,
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
+ VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalMemoryFeatureFlagBits;
+typedef VkFlags VkExternalMemoryFeatureFlags;
+
+typedef enum VkExternalFenceHandleTypeFlagBits {
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalFenceHandleTypeFlagBits;
+typedef VkFlags VkExternalFenceHandleTypeFlags;
+
+typedef enum VkExternalFenceFeatureFlagBits {
+ VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001,
+ VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002,
+ VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT,
+ VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT,
+ VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalFenceFeatureFlagBits;
+typedef VkFlags VkExternalFenceFeatureFlags;
+
+typedef enum VkFenceImportFlagBits {
+ VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001,
+ VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT,
+ VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkFenceImportFlagBits;
+typedef VkFlags VkFenceImportFlags;
+
+typedef enum VkSemaphoreImportFlagBits {
+ VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001,
+ VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
+ VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSemaphoreImportFlagBits;
+typedef VkFlags VkSemaphoreImportFlags;
+
+typedef enum VkExternalSemaphoreHandleTypeFlagBits {
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalSemaphoreHandleTypeFlagBits;
+typedef VkFlags VkExternalSemaphoreHandleTypeFlags;
+
+typedef enum VkExternalSemaphoreFeatureFlagBits {
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkExternalSemaphoreFeatureFlagBits;
+typedef VkFlags VkExternalSemaphoreFeatureFlags;
+
+typedef struct VkPhysicalDeviceSubgroupProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t subgroupSize;
+ VkShaderStageFlags supportedStages;
+ VkSubgroupFeatureFlags supportedOperations;
+ VkBool32 quadOperationsInAllStages;
+} VkPhysicalDeviceSubgroupProperties;
+
+typedef struct VkBindBufferMemoryInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer buffer;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+} VkBindBufferMemoryInfo;
+
+typedef struct VkBindImageMemoryInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+} VkBindImageMemoryInfo;
+
+typedef struct VkPhysicalDevice16BitStorageFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 storageBuffer16BitAccess;
+ VkBool32 uniformAndStorageBuffer16BitAccess;
+ VkBool32 storagePushConstant16;
+ VkBool32 storageInputOutput16;
+} VkPhysicalDevice16BitStorageFeatures;
+
+typedef struct VkMemoryDedicatedRequirements {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 prefersDedicatedAllocation;
+ VkBool32 requiresDedicatedAllocation;
+} VkMemoryDedicatedRequirements;
+
+typedef struct VkMemoryDedicatedAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkBuffer buffer;
+} VkMemoryDedicatedAllocateInfo;
+
+typedef struct VkMemoryAllocateFlagsInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryAllocateFlags flags;
+ uint32_t deviceMask;
+} VkMemoryAllocateFlagsInfo;
+
+typedef struct VkDeviceGroupRenderPassBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceMask;
+ uint32_t deviceRenderAreaCount;
+ const VkRect2D* pDeviceRenderAreas;
+} VkDeviceGroupRenderPassBeginInfo;
+
+typedef struct VkDeviceGroupCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceMask;
+} VkDeviceGroupCommandBufferBeginInfo;
+
+typedef struct VkDeviceGroupSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const uint32_t* pWaitSemaphoreDeviceIndices;
+ uint32_t commandBufferCount;
+ const uint32_t* pCommandBufferDeviceMasks;
+ uint32_t signalSemaphoreCount;
+ const uint32_t* pSignalSemaphoreDeviceIndices;
+} VkDeviceGroupSubmitInfo;
+
+typedef struct VkDeviceGroupBindSparseInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t resourceDeviceIndex;
+ uint32_t memoryDeviceIndex;
+} VkDeviceGroupBindSparseInfo;
+
+typedef struct VkBindBufferMemoryDeviceGroupInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+} VkBindBufferMemoryDeviceGroupInfo;
+
+typedef struct VkBindImageMemoryDeviceGroupInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+ uint32_t splitInstanceBindRegionCount;
+ const VkRect2D* pSplitInstanceBindRegions;
+} VkBindImageMemoryDeviceGroupInfo;
+
+typedef struct VkPhysicalDeviceGroupProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t physicalDeviceCount;
+ VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE];
+ VkBool32 subsetAllocation;
+} VkPhysicalDeviceGroupProperties;
+
+typedef struct VkDeviceGroupDeviceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t physicalDeviceCount;
+ const VkPhysicalDevice* pPhysicalDevices;
+} VkDeviceGroupDeviceCreateInfo;
+
+typedef struct VkBufferMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer buffer;
+} VkBufferMemoryRequirementsInfo2;
+
+typedef struct VkImageMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+} VkImageMemoryRequirementsInfo2;
+
+typedef struct VkImageSparseMemoryRequirementsInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+} VkImageSparseMemoryRequirementsInfo2;
+
+typedef struct VkMemoryRequirements2 {
+ VkStructureType sType;
+ void* pNext;
+ VkMemoryRequirements memoryRequirements;
+} VkMemoryRequirements2;
+
+typedef struct VkSparseImageMemoryRequirements2 {
+ VkStructureType sType;
+ void* pNext;
+ VkSparseImageMemoryRequirements memoryRequirements;
+} VkSparseImageMemoryRequirements2;
+
+typedef struct VkPhysicalDeviceFeatures2 {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceFeatures features;
+} VkPhysicalDeviceFeatures2;
+
+typedef struct VkPhysicalDeviceProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceProperties properties;
+} VkPhysicalDeviceProperties2;
+
+typedef struct VkFormatProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkFormatProperties formatProperties;
+} VkFormatProperties2;
+
+typedef struct VkImageFormatProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkImageFormatProperties imageFormatProperties;
+} VkImageFormatProperties2;
+
+typedef struct VkPhysicalDeviceImageFormatInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ VkImageType type;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkImageCreateFlags flags;
+} VkPhysicalDeviceImageFormatInfo2;
+
+typedef struct VkQueueFamilyProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkQueueFamilyProperties queueFamilyProperties;
+} VkQueueFamilyProperties2;
+
+typedef struct VkPhysicalDeviceMemoryProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceMemoryProperties memoryProperties;
+} VkPhysicalDeviceMemoryProperties2;
+
+typedef struct VkSparseImageFormatProperties2 {
+ VkStructureType sType;
+ void* pNext;
+ VkSparseImageFormatProperties properties;
+} VkSparseImageFormatProperties2;
+
+typedef struct VkPhysicalDeviceSparseImageFormatInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ VkImageType type;
+ VkSampleCountFlagBits samples;
+ VkImageUsageFlags usage;
+ VkImageTiling tiling;
+} VkPhysicalDeviceSparseImageFormatInfo2;
+
+typedef struct VkPhysicalDevicePointClippingProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkPointClippingBehavior pointClippingBehavior;
+} VkPhysicalDevicePointClippingProperties;
+
+typedef struct VkInputAttachmentAspectReference {
+ uint32_t subpass;
+ uint32_t inputAttachmentIndex;
+ VkImageAspectFlags aspectMask;
+} VkInputAttachmentAspectReference;
+
+typedef struct VkRenderPassInputAttachmentAspectCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t aspectReferenceCount;
+ const VkInputAttachmentAspectReference* pAspectReferences;
+} VkRenderPassInputAttachmentAspectCreateInfo;
+
+typedef struct VkImageViewUsageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageUsageFlags usage;
+} VkImageViewUsageCreateInfo;
+
+typedef struct VkPipelineTessellationDomainOriginStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkTessellationDomainOrigin domainOrigin;
+} VkPipelineTessellationDomainOriginStateCreateInfo;
+
+typedef struct VkRenderPassMultiviewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t subpassCount;
+ const uint32_t* pViewMasks;
+ uint32_t dependencyCount;
+ const int32_t* pViewOffsets;
+ uint32_t correlationMaskCount;
+ const uint32_t* pCorrelationMasks;
+} VkRenderPassMultiviewCreateInfo;
+
+typedef struct VkPhysicalDeviceMultiviewFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 multiview;
+ VkBool32 multiviewGeometryShader;
+ VkBool32 multiviewTessellationShader;
+} VkPhysicalDeviceMultiviewFeatures;
+
+typedef struct VkPhysicalDeviceMultiviewProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxMultiviewViewCount;
+ uint32_t maxMultiviewInstanceIndex;
+} VkPhysicalDeviceMultiviewProperties;
+
+typedef struct VkPhysicalDeviceVariablePointerFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 variablePointersStorageBuffer;
+ VkBool32 variablePointers;
+} VkPhysicalDeviceVariablePointerFeatures;
+
+typedef struct VkPhysicalDeviceProtectedMemoryFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 protectedMemory;
+} VkPhysicalDeviceProtectedMemoryFeatures;
+
+typedef struct VkPhysicalDeviceProtectedMemoryProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 protectedNoFault;
+} VkPhysicalDeviceProtectedMemoryProperties;
+
+typedef struct VkDeviceQueueInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueIndex;
+} VkDeviceQueueInfo2;
+
+typedef struct VkProtectedSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 protectedSubmit;
+} VkProtectedSubmitInfo;
+
+typedef struct VkSamplerYcbcrConversionCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ VkSamplerYcbcrModelConversion ycbcrModel;
+ VkSamplerYcbcrRange ycbcrRange;
+ VkComponentMapping components;
+ VkChromaLocation xChromaOffset;
+ VkChromaLocation yChromaOffset;
+ VkFilter chromaFilter;
+ VkBool32 forceExplicitReconstruction;
+} VkSamplerYcbcrConversionCreateInfo;
+
+typedef struct VkSamplerYcbcrConversionInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSamplerYcbcrConversion conversion;
+} VkSamplerYcbcrConversionInfo;
+
+typedef struct VkBindImagePlaneMemoryInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageAspectFlagBits planeAspect;
+} VkBindImagePlaneMemoryInfo;
+
+typedef struct VkImagePlaneMemoryRequirementsInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageAspectFlagBits planeAspect;
+} VkImagePlaneMemoryRequirementsInfo;
+
+typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 samplerYcbcrConversion;
+} VkPhysicalDeviceSamplerYcbcrConversionFeatures;
+
+typedef struct VkSamplerYcbcrConversionImageFormatProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t combinedImageSamplerDescriptorCount;
+} VkSamplerYcbcrConversionImageFormatProperties;
+
+typedef struct VkDescriptorUpdateTemplateEntry {
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ size_t offset;
+ size_t stride;
+} VkDescriptorUpdateTemplateEntry;
+
+typedef struct VkDescriptorUpdateTemplateCreateInfo {
+ VkStructureType sType;
+ void* pNext;
+ VkDescriptorUpdateTemplateCreateFlags flags;
+ uint32_t descriptorUpdateEntryCount;
+ const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries;
+ VkDescriptorUpdateTemplateType templateType;
+ VkDescriptorSetLayout descriptorSetLayout;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkPipelineLayout pipelineLayout;
+ uint32_t set;
+} VkDescriptorUpdateTemplateCreateInfo;
+
+typedef struct VkExternalMemoryProperties {
+ VkExternalMemoryFeatureFlags externalMemoryFeatures;
+ VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalMemoryHandleTypeFlags compatibleHandleTypes;
+} VkExternalMemoryProperties;
+
+typedef struct VkPhysicalDeviceExternalImageFormatInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalImageFormatInfo;
+
+typedef struct VkExternalImageFormatProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalMemoryProperties externalMemoryProperties;
+} VkExternalImageFormatProperties;
+
+typedef struct VkPhysicalDeviceExternalBufferInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferCreateFlags flags;
+ VkBufferUsageFlags usage;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalBufferInfo;
+
+typedef struct VkExternalBufferProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalMemoryProperties externalMemoryProperties;
+} VkExternalBufferProperties;
+
+typedef struct VkPhysicalDeviceIDProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint8_t deviceUUID[VK_UUID_SIZE];
+ uint8_t driverUUID[VK_UUID_SIZE];
+ uint8_t deviceLUID[VK_LUID_SIZE];
+ uint32_t deviceNodeMask;
+ VkBool32 deviceLUIDValid;
+} VkPhysicalDeviceIDProperties;
+
+typedef struct VkExternalMemoryImageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExternalMemoryImageCreateInfo;
+
+typedef struct VkExternalMemoryBufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExternalMemoryBufferCreateInfo;
+
+typedef struct VkExportMemoryAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlags handleTypes;
+} VkExportMemoryAllocateInfo;
+
+typedef struct VkPhysicalDeviceExternalFenceInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalFenceHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalFenceInfo;
+
+typedef struct VkExternalFenceProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalFenceHandleTypeFlags compatibleHandleTypes;
+ VkExternalFenceFeatureFlags externalFenceFeatures;
+} VkExternalFenceProperties;
+
+typedef struct VkExportFenceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalFenceHandleTypeFlags handleTypes;
+} VkExportFenceCreateInfo;
+
+typedef struct VkExportSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalSemaphoreHandleTypeFlags handleTypes;
+} VkExportSemaphoreCreateInfo;
+
+typedef struct VkPhysicalDeviceExternalSemaphoreInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+} VkPhysicalDeviceExternalSemaphoreInfo;
+
+typedef struct VkExternalSemaphoreProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes;
+ VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes;
+ VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures;
+} VkExternalSemaphoreProperties;
+
+typedef struct VkPhysicalDeviceMaintenance3Properties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxPerSetDescriptors;
+ VkDeviceSize maxMemoryAllocationSize;
+} VkPhysicalDeviceMaintenance3Properties;
+
+typedef struct VkDescriptorSetLayoutSupport {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 supported;
+} VkDescriptorSetLayoutSupport;
+
+typedef struct VkPhysicalDeviceShaderDrawParameterFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderDrawParameters;
+} VkPhysicalDeviceShaderDrawParameterFeatures;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties);
+typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion);
+typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion(
+ uint32_t* pApiVersion);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfo* pBindInfos);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfo* pBindInfos);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures(
+ VkDevice device,
+ uint32_t heapIndex,
+ uint32_t localDeviceIndex,
+ uint32_t remoteDeviceIndex,
+ VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase(
+ VkCommandBuffer commandBuffer,
+ uint32_t baseGroupX,
+ uint32_t baseGroupY,
+ uint32_t baseGroupZ,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2(
+ VkDevice device,
+ const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2(
+ VkDevice device,
+ const VkBufferMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2(
+ VkDevice device,
+ const VkImageSparseMemoryRequirementsInfo2* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties2* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties2* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2(
+ VkDevice device,
+ const VkDeviceQueueInfo2* pQueueInfo,
+ VkQueue* pQueue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion(
+ VkDevice device,
+ const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSamplerYcbcrConversion* pYcbcrConversion);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion(
+ VkDevice device,
+ VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate(
+ VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(
+ VkDevice device,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(
+ VkDevice device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const void* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
+ VkExternalBufferProperties* pExternalBufferProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
+ VkExternalFenceProperties* pExternalFenceProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+ VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ VkDescriptorSetLayoutSupport* pSupport);
+#endif
+
+#define VK_KHR_surface 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
+
+#define VK_KHR_SURFACE_SPEC_VERSION 25
+#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface"
+#define VK_COLORSPACE_SRGB_NONLINEAR_KHR VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
+
+
+typedef enum VkColorSpaceKHR {
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001,
+ VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002,
+ VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003,
+ VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004,
+ VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005,
+ VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006,
+ VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007,
+ VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008,
+ VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009,
+ VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010,
+ VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011,
+ VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012,
+ VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013,
+ VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014,
+ VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1),
+ VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkColorSpaceKHR;
+
+typedef enum VkPresentModeKHR {
+ VK_PRESENT_MODE_IMMEDIATE_KHR = 0,
+ VK_PRESENT_MODE_MAILBOX_KHR = 1,
+ VK_PRESENT_MODE_FIFO_KHR = 2,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3,
+ VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000,
+ VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001,
+ VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1),
+ VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkPresentModeKHR;
+
+
+typedef enum VkSurfaceTransformFlagBitsKHR {
+ VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001,
+ VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002,
+ VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004,
+ VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080,
+ VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100,
+ VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSurfaceTransformFlagBitsKHR;
+typedef VkFlags VkSurfaceTransformFlagsKHR;
+
+typedef enum VkCompositeAlphaFlagBitsKHR {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008,
+ VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkCompositeAlphaFlagBitsKHR;
+typedef VkFlags VkCompositeAlphaFlagsKHR;
+
+typedef struct VkSurfaceCapabilitiesKHR {
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+} VkSurfaceCapabilitiesKHR;
+
+typedef struct VkSurfaceFormatKHR {
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+} VkSurfaceFormatKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
+ VkInstance instance,
+ VkSurfaceKHR surface,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface,
+ VkBool32* pSupported);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormatKHR* pSurfaceFormats);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pPresentModeCount,
+ VkPresentModeKHR* pPresentModes);
+#endif
+
+#define VK_KHR_swapchain 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)
+
+#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70
+#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain"
+
+
+typedef enum VkSwapchainCreateFlagBitsKHR {
+ VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001,
+ VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002,
+ VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSwapchainCreateFlagBitsKHR;
+typedef VkFlags VkSwapchainCreateFlagsKHR;
+
+typedef enum VkDeviceGroupPresentModeFlagBitsKHR {
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001,
+ VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002,
+ VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004,
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008,
+ VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkDeviceGroupPresentModeFlagBitsKHR;
+typedef VkFlags VkDeviceGroupPresentModeFlagsKHR;
+
+typedef struct VkSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainCreateFlagsKHR flags;
+ VkSurfaceKHR surface;
+ uint32_t minImageCount;
+ VkFormat imageFormat;
+ VkColorSpaceKHR imageColorSpace;
+ VkExtent2D imageExtent;
+ uint32_t imageArrayLayers;
+ VkImageUsageFlags imageUsage;
+ VkSharingMode imageSharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ VkPresentModeKHR presentMode;
+ VkBool32 clipped;
+ VkSwapchainKHR oldSwapchain;
+} VkSwapchainCreateInfoKHR;
+
+typedef struct VkPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t swapchainCount;
+ const VkSwapchainKHR* pSwapchains;
+ const uint32_t* pImageIndices;
+ VkResult* pResults;
+} VkPresentInfoKHR;
+
+typedef struct VkImageSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+} VkImageSwapchainCreateInfoKHR;
+
+typedef struct VkBindImageMemorySwapchainInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+ uint32_t imageIndex;
+} VkBindImageMemorySwapchainInfoKHR;
+
+typedef struct VkAcquireNextImageInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+ uint64_t timeout;
+ VkSemaphore semaphore;
+ VkFence fence;
+ uint32_t deviceMask;
+} VkAcquireNextImageInfoKHR;
+
+typedef struct VkDeviceGroupPresentCapabilitiesKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE];
+ VkDeviceGroupPresentModeFlagsKHR modes;
+} VkDeviceGroupPresentCapabilitiesKHR;
+
+typedef struct VkDeviceGroupPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const uint32_t* pDeviceMasks;
+ VkDeviceGroupPresentModeFlagBitsKHR mode;
+} VkDeviceGroupPresentInfoKHR;
+
+typedef struct VkDeviceGroupSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceGroupPresentModeFlagsKHR modes;
+} VkDeviceGroupSwapchainCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
+typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
+typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
+ VkDevice device,
+ const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchain);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pSwapchainImageCount,
+ VkImage* pSwapchainImages);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint64_t timeout,
+ VkSemaphore semaphore,
+ VkFence fence,
+ uint32_t* pImageIndex);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
+ VkQueue queue,
+ const VkPresentInfoKHR* pPresentInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR(
+ VkDevice device,
+ VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(
+ VkDevice device,
+ VkSurfaceKHR surface,
+ VkDeviceGroupPresentModeFlagsKHR* pModes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pRectCount,
+ VkRect2D* pRects);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR(
+ VkDevice device,
+ const VkAcquireNextImageInfoKHR* pAcquireInfo,
+ uint32_t* pImageIndex);
+#endif
+
+#define VK_KHR_display 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)
+
+#define VK_KHR_DISPLAY_SPEC_VERSION 21
+#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display"
+
+
+typedef enum VkDisplayPlaneAlphaFlagBitsKHR {
+ VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008,
+ VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkDisplayPlaneAlphaFlagBitsKHR;
+typedef VkFlags VkDisplayPlaneAlphaFlagsKHR;
+typedef VkFlags VkDisplayModeCreateFlagsKHR;
+typedef VkFlags VkDisplaySurfaceCreateFlagsKHR;
+
+typedef struct VkDisplayPropertiesKHR {
+ VkDisplayKHR display;
+ const char* displayName;
+ VkExtent2D physicalDimensions;
+ VkExtent2D physicalResolution;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkBool32 planeReorderPossible;
+ VkBool32 persistentContent;
+} VkDisplayPropertiesKHR;
+
+typedef struct VkDisplayModeParametersKHR {
+ VkExtent2D visibleRegion;
+ uint32_t refreshRate;
+} VkDisplayModeParametersKHR;
+
+typedef struct VkDisplayModePropertiesKHR {
+ VkDisplayModeKHR displayMode;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModePropertiesKHR;
+
+typedef struct VkDisplayModeCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayModeCreateFlagsKHR flags;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModeCreateInfoKHR;
+
+typedef struct VkDisplayPlaneCapabilitiesKHR {
+ VkDisplayPlaneAlphaFlagsKHR supportedAlpha;
+ VkOffset2D minSrcPosition;
+ VkOffset2D maxSrcPosition;
+ VkExtent2D minSrcExtent;
+ VkExtent2D maxSrcExtent;
+ VkOffset2D minDstPosition;
+ VkOffset2D maxDstPosition;
+ VkExtent2D minDstExtent;
+ VkExtent2D maxDstExtent;
+} VkDisplayPlaneCapabilitiesKHR;
+
+typedef struct VkDisplayPlanePropertiesKHR {
+ VkDisplayKHR currentDisplay;
+ uint32_t currentStackIndex;
+} VkDisplayPlanePropertiesKHR;
+
+typedef struct VkDisplaySurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplaySurfaceCreateFlagsKHR flags;
+ VkDisplayModeKHR displayMode;
+ uint32_t planeIndex;
+ uint32_t planeStackIndex;
+ VkSurfaceTransformFlagBitsKHR transform;
+ float globalAlpha;
+ VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
+ VkExtent2D imageExtent;
+} VkDisplaySurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPlanePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t planeIndex,
+ uint32_t* pDisplayCount,
+ VkDisplayKHR* pDisplays);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t* pPropertyCount,
+ VkDisplayModePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ const VkDisplayModeCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDisplayModeKHR* pMode);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayModeKHR mode,
+ uint32_t planeIndex,
+ VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
+ VkInstance instance,
+ const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#define VK_KHR_display_swapchain 1
+#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9
+#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain"
+
+typedef struct VkDisplayPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkRect2D srcRect;
+ VkRect2D dstRect;
+ VkBool32 persistent;
+} VkDisplayPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
+ VkDevice device,
+ uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchains);
+#endif
+
+#define VK_KHR_sampler_mirror_clamp_to_edge 1
+#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1
+#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge"
+
+
+#define VK_KHR_multiview 1
+#define VK_KHR_MULTIVIEW_SPEC_VERSION 1
+#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview"
+
+typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR;
+
+typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR;
+
+typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR;
+
+
+
+#define VK_KHR_get_physical_device_properties2 1
+#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2"
+
+typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR;
+
+typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR;
+
+typedef VkFormatProperties2 VkFormatProperties2KHR;
+
+typedef VkImageFormatProperties2 VkImageFormatProperties2KHR;
+
+typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR;
+
+typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR;
+
+typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR;
+
+typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR;
+
+typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties2* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties2* pProperties);
+#endif
+
+#define VK_KHR_device_group 1
+#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 3
+#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group"
+
+typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR;
+
+typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR;
+
+typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR;
+
+typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR;
+
+
+typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR;
+
+typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR;
+
+typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR;
+
+typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR;
+
+typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR;
+
+typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR;
+
+typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR(
+ VkDevice device,
+ uint32_t heapIndex,
+ uint32_t localDeviceIndex,
+ uint32_t remoteDeviceIndex,
+ VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR(
+ VkCommandBuffer commandBuffer,
+ uint32_t baseGroupX,
+ uint32_t baseGroupY,
+ uint32_t baseGroupZ,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+#endif
+
+#define VK_KHR_shader_draw_parameters 1
+#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1
+#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters"
+
+
+#define VK_KHR_maintenance1 1
+#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2
+#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1"
+
+typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlags flags);
+#endif
+
+#define VK_KHR_device_group_creation 1
+#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1
+#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation"
+#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE
+
+typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR;
+
+typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+#endif
+
+#define VK_KHR_external_memory_capabilities 1
+#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities"
+#define VK_LUID_SIZE_KHR VK_LUID_SIZE
+
+typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR;
+
+typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR;
+
+typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR;
+
+typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR;
+
+
+typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR;
+
+typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR;
+
+typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR;
+
+typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR;
+
+typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR;
+
+typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
+ VkExternalBufferProperties* pExternalBufferProperties);
+#endif
+
+#define VK_KHR_external_memory 1
+#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory"
+#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL
+
+typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR;
+
+typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR;
+
+typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR;
+
+
+
+#define VK_KHR_external_memory_fd 1
+#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd"
+
+typedef struct VkImportMemoryFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+ int fd;
+} VkImportMemoryFdInfoKHR;
+
+typedef struct VkMemoryFdPropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t memoryTypeBits;
+} VkMemoryFdPropertiesKHR;
+
+typedef struct VkMemoryGetFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkMemoryGetFdInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd);
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR(
+ VkDevice device,
+ const VkMemoryGetFdInfoKHR* pGetFdInfo,
+ int* pFd);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR(
+ VkDevice device,
+ VkExternalMemoryHandleTypeFlagBits handleType,
+ int fd,
+ VkMemoryFdPropertiesKHR* pMemoryFdProperties);
+#endif
+
+#define VK_KHR_external_semaphore_capabilities 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities"
+
+typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR;
+
+typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR;
+
+typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR;
+
+typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR;
+
+
+typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR;
+
+typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+ VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+#endif
+
+#define VK_KHR_external_semaphore 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore"
+
+typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR;
+
+typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR;
+
+
+typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR;
+
+
+
+#define VK_KHR_external_semaphore_fd 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd"
+
+typedef struct VkImportSemaphoreFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkSemaphoreImportFlags flags;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+ int fd;
+} VkImportSemaphoreFdInfoKHR;
+
+typedef struct VkSemaphoreGetFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+} VkSemaphoreGetFdInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR(
+ VkDevice device,
+ const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR(
+ VkDevice device,
+ const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
+ int* pFd);
+#endif
+
+#define VK_KHR_push_descriptor 1
+#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2
+#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor"
+
+typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxPushDescriptors;
+} VkPhysicalDevicePushDescriptorPropertiesKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites);
+typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR(
+ VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ VkPipelineLayout layout,
+ uint32_t set,
+ const void* pData);
+#endif
+
+#define VK_KHR_16bit_storage 1
+#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1
+#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage"
+
+typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR;
+
+
+
+#define VK_KHR_incremental_present 1
+#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1
+#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present"
+
+typedef struct VkRectLayerKHR {
+ VkOffset2D offset;
+ VkExtent2D extent;
+ uint32_t layer;
+} VkRectLayerKHR;
+
+typedef struct VkPresentRegionKHR {
+ uint32_t rectangleCount;
+ const VkRectLayerKHR* pRectangles;
+} VkPresentRegionKHR;
+
+typedef struct VkPresentRegionsKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentRegionKHR* pRegions;
+} VkPresentRegionsKHR;
+
+
+
+#define VK_KHR_descriptor_update_template 1
+typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR;
+
+
+#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1
+#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template"
+
+typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR;
+
+
+typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR;
+
+
+typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR;
+
+typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR(
+ VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR(
+ VkDevice device,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR(
+ VkDevice device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const void* pData);
+#endif
+
+#define VK_KHR_shared_presentable_image 1
+#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1
+#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image"
+
+typedef struct VkSharedPresentSurfaceCapabilitiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkImageUsageFlags sharedPresentSupportedUsageFlags;
+} VkSharedPresentSurfaceCapabilitiesKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain);
+#endif
+
+#define VK_KHR_external_fence_capabilities 1
+#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities"
+
+typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR;
+
+typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR;
+
+typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR;
+
+typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR;
+
+
+typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR;
+
+typedef VkExternalFenceProperties VkExternalFencePropertiesKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
+ VkExternalFenceProperties* pExternalFenceProperties);
+#endif
+
+#define VK_KHR_external_fence 1
+#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence"
+
+typedef VkFenceImportFlags VkFenceImportFlagsKHR;
+
+typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR;
+
+
+typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR;
+
+
+
+#define VK_KHR_external_fence_fd 1
+#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd"
+
+typedef struct VkImportFenceFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFence fence;
+ VkFenceImportFlags flags;
+ VkExternalFenceHandleTypeFlagBits handleType;
+ int fd;
+} VkImportFenceFdInfoKHR;
+
+typedef struct VkFenceGetFdInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFence fence;
+ VkExternalFenceHandleTypeFlagBits handleType;
+} VkFenceGetFdInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR(
+ VkDevice device,
+ const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR(
+ VkDevice device,
+ const VkFenceGetFdInfoKHR* pGetFdInfo,
+ int* pFd);
+#endif
+
+#define VK_KHR_maintenance2 1
+#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1
+#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2"
+
+typedef VkPointClippingBehavior VkPointClippingBehaviorKHR;
+
+typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR;
+
+
+typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR;
+
+typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR;
+
+typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR;
+
+typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR;
+
+typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR;
+
+
+
+#define VK_KHR_get_surface_capabilities2 1
+#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2"
+
+typedef struct VkPhysicalDeviceSurfaceInfo2KHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSurfaceKHR surface;
+} VkPhysicalDeviceSurfaceInfo2KHR;
+
+typedef struct VkSurfaceCapabilities2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkSurfaceCapabilitiesKHR surfaceCapabilities;
+} VkSurfaceCapabilities2KHR;
+
+typedef struct VkSurfaceFormat2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkSurfaceFormatKHR surfaceFormat;
+} VkSurfaceFormat2KHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormat2KHR* pSurfaceFormats);
+#endif
+
+#define VK_KHR_variable_pointers 1
+#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1
+#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers"
+
+typedef VkPhysicalDeviceVariablePointerFeatures VkPhysicalDeviceVariablePointerFeaturesKHR;
+
+
+
+#define VK_KHR_get_display_properties2 1
+#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2"
+
+typedef struct VkDisplayProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkDisplayPropertiesKHR displayProperties;
+} VkDisplayProperties2KHR;
+
+typedef struct VkDisplayPlaneProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkDisplayPlanePropertiesKHR displayPlaneProperties;
+} VkDisplayPlaneProperties2KHR;
+
+typedef struct VkDisplayModeProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkDisplayModePropertiesKHR displayModeProperties;
+} VkDisplayModeProperties2KHR;
+
+typedef struct VkDisplayPlaneInfo2KHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayModeKHR mode;
+ uint32_t planeIndex;
+} VkDisplayPlaneInfo2KHR;
+
+typedef struct VkDisplayPlaneCapabilities2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkDisplayPlaneCapabilitiesKHR capabilities;
+} VkDisplayPlaneCapabilities2KHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayProperties2KHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPlaneProperties2KHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t* pPropertyCount,
+ VkDisplayModeProperties2KHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
+ VkDisplayPlaneCapabilities2KHR* pCapabilities);
+#endif
+
+#define VK_KHR_dedicated_allocation 1
+#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3
+#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation"
+
+typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR;
+
+typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR;
+
+
+
+#define VK_KHR_storage_buffer_storage_class 1
+#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1
+#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class"
+
+
+#define VK_KHR_relaxed_block_layout 1
+#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1
+#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout"
+
+
+#define VK_KHR_get_memory_requirements2 1
+#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1
+#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2"
+
+typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR;
+
+typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR;
+
+typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR;
+
+typedef VkMemoryRequirements2 VkMemoryRequirements2KHR;
+
+typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR(
+ VkDevice device,
+ const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR(
+ VkDevice device,
+ const VkBufferMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR(
+ VkDevice device,
+ const VkImageSparseMemoryRequirementsInfo2* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+#endif
+
+#define VK_KHR_image_format_list 1
+#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1
+#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list"
+
+typedef struct VkImageFormatListCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t viewFormatCount;
+ const VkFormat* pViewFormats;
+} VkImageFormatListCreateInfoKHR;
+
+
+
+#define VK_KHR_sampler_ycbcr_conversion 1
+typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR;
+
+
+#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 1
+#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion"
+
+typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR;
+
+typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR;
+
+typedef VkChromaLocation VkChromaLocationKHR;
+
+
+typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR;
+
+typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR;
+
+typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR;
+
+typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR;
+
+typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR;
+
+typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion);
+typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR(
+ VkDevice device,
+ const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSamplerYcbcrConversion* pYcbcrConversion);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR(
+ VkDevice device,
+ VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks* pAllocator);
+#endif
+
+#define VK_KHR_bind_memory2 1
+#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1
+#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2"
+
+typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR;
+
+typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfo* pBindInfos);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfo* pBindInfos);
+#endif
+
+#define VK_KHR_maintenance3 1
+#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1
+#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3"
+
+typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR;
+
+typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ VkDescriptorSetLayoutSupport* pSupport);
+#endif
+
+#define VK_KHR_draw_indirect_count 1
+#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1
+#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count"
+
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+#endif
+
+#define VK_EXT_debug_report 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
+
+#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9
+#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report"
+#define VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT
+#define VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT
+
+
+typedef enum VkDebugReportObjectTypeEXT {
+ VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10,
+ VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30,
+ VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32,
+ VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1),
+ VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugReportObjectTypeEXT;
+
+
+typedef enum VkDebugReportFlagBitsEXT {
+ VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002,
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008,
+ VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010,
+ VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugReportFlagBitsEXT;
+typedef VkFlags VkDebugReportFlagsEXT;
+
+typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)(
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage,
+ void* pUserData);
+
+typedef struct VkDebugReportCallbackCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportFlagsEXT flags;
+ PFN_vkDebugReportCallbackEXT pfnCallback;
+ void* pUserData;
+} VkDebugReportCallbackCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback);
+typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
+ VkInstance instance,
+ const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDebugReportCallbackEXT* pCallback);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
+ VkInstance instance,
+ VkDebugReportCallbackEXT callback,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
+ VkInstance instance,
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage);
+#endif
+
+#define VK_NV_glsl_shader 1
+#define VK_NV_GLSL_SHADER_SPEC_VERSION 1
+#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader"
+
+
+#define VK_EXT_depth_range_unrestricted 1
+#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1
+#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted"
+
+
+#define VK_IMG_filter_cubic 1
+#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1
+#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic"
+
+
+#define VK_AMD_rasterization_order 1
+#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1
+#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order"
+
+
+typedef enum VkRasterizationOrderAMD {
+ VK_RASTERIZATION_ORDER_STRICT_AMD = 0,
+ VK_RASTERIZATION_ORDER_RELAXED_AMD = 1,
+ VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD,
+ VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD,
+ VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1),
+ VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkRasterizationOrderAMD;
+
+typedef struct VkPipelineRasterizationStateRasterizationOrderAMD {
+ VkStructureType sType;
+ const void* pNext;
+ VkRasterizationOrderAMD rasterizationOrder;
+} VkPipelineRasterizationStateRasterizationOrderAMD;
+
+
+
+#define VK_AMD_shader_trinary_minmax 1
+#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1
+#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax"
+
+
+#define VK_AMD_shader_explicit_vertex_parameter 1
+#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1
+#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter"
+
+
+#define VK_EXT_debug_marker 1
+#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4
+#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker"
+
+typedef struct VkDebugMarkerObjectNameInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ const char* pObjectName;
+} VkDebugMarkerObjectNameInfoEXT;
+
+typedef struct VkDebugMarkerObjectTagInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ uint64_t tagName;
+ size_t tagSize;
+ const void* pTag;
+} VkDebugMarkerObjectTagInfoEXT;
+
+typedef struct VkDebugMarkerMarkerInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pMarkerName;
+ float color[4];
+} VkDebugMarkerMarkerInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT(
+ VkDevice device,
+ const VkDebugMarkerObjectTagInfoEXT* pTagInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT(
+ VkDevice device,
+ const VkDebugMarkerObjectNameInfoEXT* pNameInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT(
+ VkCommandBuffer commandBuffer,
+ const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT(
+ VkCommandBuffer commandBuffer,
+ const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+#endif
+
+#define VK_AMD_gcn_shader 1
+#define VK_AMD_GCN_SHADER_SPEC_VERSION 1
+#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader"
+
+
+#define VK_NV_dedicated_allocation 1
+#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1
+#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation"
+
+typedef struct VkDedicatedAllocationImageCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 dedicatedAllocation;
+} VkDedicatedAllocationImageCreateInfoNV;
+
+typedef struct VkDedicatedAllocationBufferCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 dedicatedAllocation;
+} VkDedicatedAllocationBufferCreateInfoNV;
+
+typedef struct VkDedicatedAllocationMemoryAllocateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkBuffer buffer;
+} VkDedicatedAllocationMemoryAllocateInfoNV;
+
+
+
+#define VK_AMD_draw_indirect_count 1
+#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1
+#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count"
+
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+#endif
+
+#define VK_AMD_negative_viewport_height 1
+#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1
+#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height"
+
+
+#define VK_AMD_gpu_shader_half_float 1
+#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 1
+#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float"
+
+
+#define VK_AMD_shader_ballot 1
+#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1
+#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot"
+
+
+#define VK_AMD_texture_gather_bias_lod 1
+#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1
+#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod"
+
+typedef struct VkTextureLODGatherFormatPropertiesAMD {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 supportsTextureGatherLODBiasAMD;
+} VkTextureLODGatherFormatPropertiesAMD;
+
+
+
+#define VK_AMD_shader_info 1
+#define VK_AMD_SHADER_INFO_SPEC_VERSION 1
+#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info"
+
+
+typedef enum VkShaderInfoTypeAMD {
+ VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0,
+ VK_SHADER_INFO_TYPE_BINARY_AMD = 1,
+ VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2,
+ VK_SHADER_INFO_TYPE_BEGIN_RANGE_AMD = VK_SHADER_INFO_TYPE_STATISTICS_AMD,
+ VK_SHADER_INFO_TYPE_END_RANGE_AMD = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD,
+ VK_SHADER_INFO_TYPE_RANGE_SIZE_AMD = (VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD - VK_SHADER_INFO_TYPE_STATISTICS_AMD + 1),
+ VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkShaderInfoTypeAMD;
+
+typedef struct VkShaderResourceUsageAMD {
+ uint32_t numUsedVgprs;
+ uint32_t numUsedSgprs;
+ uint32_t ldsSizePerLocalWorkGroup;
+ size_t ldsUsageSizeInBytes;
+ size_t scratchMemUsageInBytes;
+} VkShaderResourceUsageAMD;
+
+typedef struct VkShaderStatisticsInfoAMD {
+ VkShaderStageFlags shaderStageMask;
+ VkShaderResourceUsageAMD resourceUsage;
+ uint32_t numPhysicalVgprs;
+ uint32_t numPhysicalSgprs;
+ uint32_t numAvailableVgprs;
+ uint32_t numAvailableSgprs;
+ uint32_t computeWorkGroupSize[3];
+} VkShaderStatisticsInfoAMD;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD(
+ VkDevice device,
+ VkPipeline pipeline,
+ VkShaderStageFlagBits shaderStage,
+ VkShaderInfoTypeAMD infoType,
+ size_t* pInfoSize,
+ void* pInfo);
+#endif
+
+#define VK_AMD_shader_image_load_store_lod 1
+#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1
+#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod"
+
+
+#define VK_IMG_format_pvrtc 1
+#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1
+#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc"
+
+
+#define VK_NV_external_memory_capabilities 1
+#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities"
+
+
+typedef enum VkExternalMemoryHandleTypeFlagBitsNV {
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkExternalMemoryHandleTypeFlagBitsNV;
+typedef VkFlags VkExternalMemoryHandleTypeFlagsNV;
+
+typedef enum VkExternalMemoryFeatureFlagBitsNV {
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004,
+ VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkExternalMemoryFeatureFlagBitsNV;
+typedef VkFlags VkExternalMemoryFeatureFlagsNV;
+
+typedef struct VkExternalImageFormatPropertiesNV {
+ VkImageFormatProperties imageFormatProperties;
+ VkExternalMemoryFeatureFlagsNV externalMemoryFeatures;
+ VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes;
+ VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes;
+} VkExternalImageFormatPropertiesNV;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkExternalMemoryHandleTypeFlagsNV externalHandleType,
+ VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
+#endif
+
+#define VK_NV_external_memory 1
+#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory"
+
+typedef struct VkExternalMemoryImageCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleTypes;
+} VkExternalMemoryImageCreateInfoNV;
+
+typedef struct VkExportMemoryAllocateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleTypes;
+} VkExportMemoryAllocateInfoNV;
+
+
+
+#define VK_EXT_validation_flags 1
+#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1
+#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags"
+
+
+typedef enum VkValidationCheckEXT {
+ VK_VALIDATION_CHECK_ALL_EXT = 0,
+ VK_VALIDATION_CHECK_SHADERS_EXT = 1,
+ VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT,
+ VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_SHADERS_EXT,
+ VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_SHADERS_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1),
+ VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkValidationCheckEXT;
+
+typedef struct VkValidationFlagsEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t disabledValidationCheckCount;
+ VkValidationCheckEXT* pDisabledValidationChecks;
+} VkValidationFlagsEXT;
+
+
+
+#define VK_EXT_shader_subgroup_ballot 1
+#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1
+#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot"
+
+
+#define VK_EXT_shader_subgroup_vote 1
+#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1
+#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote"
+
+
+#define VK_NVX_device_generated_commands 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX)
+
+#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3
+#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands"
+
+
+typedef enum VkIndirectCommandsTokenTypeNVX {
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX = 0,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX = 1,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX = 2,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX = 3,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX = 4,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX = 5,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX = 6,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX = 7,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX + 1),
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkIndirectCommandsTokenTypeNVX;
+
+typedef enum VkObjectEntryTypeNVX {
+ VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX = 0,
+ VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX = 1,
+ VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX = 2,
+ VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX = 3,
+ VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX = 4,
+ VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX,
+ VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX,
+ VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX + 1),
+ VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkObjectEntryTypeNVX;
+
+
+typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX {
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkIndirectCommandsLayoutUsageFlagBitsNVX;
+typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX;
+
+typedef enum VkObjectEntryUsageFlagBitsNVX {
+ VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001,
+ VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002,
+ VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkObjectEntryUsageFlagBitsNVX;
+typedef VkFlags VkObjectEntryUsageFlagsNVX;
+
+typedef struct VkDeviceGeneratedCommandsFeaturesNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 computeBindingPointSupport;
+} VkDeviceGeneratedCommandsFeaturesNVX;
+
+typedef struct VkDeviceGeneratedCommandsLimitsNVX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t maxIndirectCommandsLayoutTokenCount;
+ uint32_t maxObjectEntryCounts;
+ uint32_t minSequenceCountBufferOffsetAlignment;
+ uint32_t minSequenceIndexBufferOffsetAlignment;
+ uint32_t minCommandsTokenBufferOffsetAlignment;
+} VkDeviceGeneratedCommandsLimitsNVX;
+
+typedef struct VkIndirectCommandsTokenNVX {
+ VkIndirectCommandsTokenTypeNVX tokenType;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+} VkIndirectCommandsTokenNVX;
+
+typedef struct VkIndirectCommandsLayoutTokenNVX {
+ VkIndirectCommandsTokenTypeNVX tokenType;
+ uint32_t bindingUnit;
+ uint32_t dynamicCount;
+ uint32_t divisor;
+} VkIndirectCommandsLayoutTokenNVX;
+
+typedef struct VkIndirectCommandsLayoutCreateInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkIndirectCommandsLayoutUsageFlagsNVX flags;
+ uint32_t tokenCount;
+ const VkIndirectCommandsLayoutTokenNVX* pTokens;
+} VkIndirectCommandsLayoutCreateInfoNVX;
+
+typedef struct VkCmdProcessCommandsInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectTableNVX objectTable;
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t indirectCommandsTokenCount;
+ const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens;
+ uint32_t maxSequencesCount;
+ VkCommandBuffer targetCommandBuffer;
+ VkBuffer sequencesCountBuffer;
+ VkDeviceSize sequencesCountOffset;
+ VkBuffer sequencesIndexBuffer;
+ VkDeviceSize sequencesIndexOffset;
+} VkCmdProcessCommandsInfoNVX;
+
+typedef struct VkCmdReserveSpaceForCommandsInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectTableNVX objectTable;
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t maxSequencesCount;
+} VkCmdReserveSpaceForCommandsInfoNVX;
+
+typedef struct VkObjectTableCreateInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t objectCount;
+ const VkObjectEntryTypeNVX* pObjectEntryTypes;
+ const uint32_t* pObjectEntryCounts;
+ const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags;
+ uint32_t maxUniformBuffersPerDescriptor;
+ uint32_t maxStorageBuffersPerDescriptor;
+ uint32_t maxStorageImagesPerDescriptor;
+ uint32_t maxSampledImagesPerDescriptor;
+ uint32_t maxPipelineLayouts;
+} VkObjectTableCreateInfoNVX;
+
+typedef struct VkObjectTableEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+} VkObjectTableEntryNVX;
+
+typedef struct VkObjectTablePipelineEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipeline pipeline;
+} VkObjectTablePipelineEntryNVX;
+
+typedef struct VkObjectTableDescriptorSetEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipelineLayout pipelineLayout;
+ VkDescriptorSet descriptorSet;
+} VkObjectTableDescriptorSetEntryNVX;
+
+typedef struct VkObjectTableVertexBufferEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkBuffer buffer;
+} VkObjectTableVertexBufferEntryNVX;
+
+typedef struct VkObjectTableIndexBufferEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkBuffer buffer;
+ VkIndexType indexType;
+} VkObjectTableIndexBufferEntryNVX;
+
+typedef struct VkObjectTablePushConstantEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipelineLayout pipelineLayout;
+ VkShaderStageFlags stageFlags;
+} VkObjectTablePushConstantEntryNVX;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable);
+typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices);
+typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX(
+ VkCommandBuffer commandBuffer,
+ const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX(
+ VkCommandBuffer commandBuffer,
+ const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX(
+ VkDevice device,
+ const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX(
+ VkDevice device,
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX(
+ VkDevice device,
+ const VkObjectTableCreateInfoNVX* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkObjectTableNVX* pObjectTable);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ uint32_t objectCount,
+ const VkObjectTableEntryNVX* const* ppObjectTableEntries,
+ const uint32_t* pObjectIndices);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ uint32_t objectCount,
+ const VkObjectEntryTypeNVX* pObjectEntryTypes,
+ const uint32_t* pObjectIndices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(
+ VkPhysicalDevice physicalDevice,
+ VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
+ VkDeviceGeneratedCommandsLimitsNVX* pLimits);
+#endif
+
+#define VK_NV_clip_space_w_scaling 1
+#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1
+#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling"
+
+typedef struct VkViewportWScalingNV {
+ float xcoeff;
+ float ycoeff;
+} VkViewportWScalingNV;
+
+typedef struct VkPipelineViewportWScalingStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 viewportWScalingEnable;
+ uint32_t viewportCount;
+ const VkViewportWScalingNV* pViewportWScalings;
+} VkPipelineViewportWScalingStateCreateInfoNV;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewportWScalingNV* pViewportWScalings);
+#endif
+
+#define VK_EXT_direct_mode_display 1
+#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1
+#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display"
+
+typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display);
+#endif
+
+#define VK_EXT_display_surface_counter 1
+#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1
+#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter"
+#define VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT
+
+
+typedef enum VkSurfaceCounterFlagBitsEXT {
+ VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001,
+ VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkSurfaceCounterFlagBitsEXT;
+typedef VkFlags VkSurfaceCounterFlagsEXT;
+
+typedef struct VkSurfaceCapabilities2EXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+ VkSurfaceCounterFlagsEXT supportedSurfaceCounters;
+} VkSurfaceCapabilities2EXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
+#endif
+
+#define VK_EXT_display_control 1
+#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1
+#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control"
+
+
+typedef enum VkDisplayPowerStateEXT {
+ VK_DISPLAY_POWER_STATE_OFF_EXT = 0,
+ VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1,
+ VK_DISPLAY_POWER_STATE_ON_EXT = 2,
+ VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT,
+ VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT,
+ VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1),
+ VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDisplayPowerStateEXT;
+
+typedef enum VkDeviceEventTypeEXT {
+ VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0,
+ VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT,
+ VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT,
+ VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1),
+ VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDeviceEventTypeEXT;
+
+typedef enum VkDisplayEventTypeEXT {
+ VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0,
+ VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT,
+ VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT,
+ VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1),
+ VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDisplayEventTypeEXT;
+
+typedef struct VkDisplayPowerInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayPowerStateEXT powerState;
+} VkDisplayPowerInfoEXT;
+
+typedef struct VkDeviceEventInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceEventTypeEXT deviceEvent;
+} VkDeviceEventInfoEXT;
+
+typedef struct VkDisplayEventInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayEventTypeEXT displayEvent;
+} VkDisplayEventInfoEXT;
+
+typedef struct VkSwapchainCounterCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkSurfaceCounterFlagsEXT surfaceCounters;
+} VkSwapchainCounterCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT(
+ VkDevice device,
+ VkDisplayKHR display,
+ const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT(
+ VkDevice device,
+ const VkDeviceEventInfoEXT* pDeviceEventInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT(
+ VkDevice device,
+ VkDisplayKHR display,
+ const VkDisplayEventInfoEXT* pDisplayEventInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ VkSurfaceCounterFlagBitsEXT counter,
+ uint64_t* pCounterValue);
+#endif
+
+#define VK_GOOGLE_display_timing 1
+#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1
+#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing"
+
+typedef struct VkRefreshCycleDurationGOOGLE {
+ uint64_t refreshDuration;
+} VkRefreshCycleDurationGOOGLE;
+
+typedef struct VkPastPresentationTimingGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+ uint64_t actualPresentTime;
+ uint64_t earliestPresentTime;
+ uint64_t presentMargin;
+} VkPastPresentationTimingGOOGLE;
+
+typedef struct VkPresentTimeGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+} VkPresentTimeGOOGLE;
+
+typedef struct VkPresentTimesInfoGOOGLE {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentTimeGOOGLE* pTimes;
+} VkPresentTimesInfoGOOGLE;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pPresentationTimingCount,
+ VkPastPresentationTimingGOOGLE* pPresentationTimings);
+#endif
+
+#define VK_NV_sample_mask_override_coverage 1
+#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1
+#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage"
+
+
+#define VK_NV_geometry_shader_passthrough 1
+#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1
+#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough"
+
+
+#define VK_NV_viewport_array2 1
+#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1
+#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2"
+
+
+#define VK_NVX_multiview_per_view_attributes 1
+#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1
+#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes"
+
+typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 perViewPositionAllComponents;
+} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX;
+
+
+
+#define VK_NV_viewport_swizzle 1
+#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1
+#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle"
+
+
+typedef enum VkViewportCoordinateSwizzleNV {
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1),
+ VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkViewportCoordinateSwizzleNV;
+
+typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV;
+
+typedef struct VkViewportSwizzleNV {
+ VkViewportCoordinateSwizzleNV x;
+ VkViewportCoordinateSwizzleNV y;
+ VkViewportCoordinateSwizzleNV z;
+ VkViewportCoordinateSwizzleNV w;
+} VkViewportSwizzleNV;
+
+typedef struct VkPipelineViewportSwizzleStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineViewportSwizzleStateCreateFlagsNV flags;
+ uint32_t viewportCount;
+ const VkViewportSwizzleNV* pViewportSwizzles;
+} VkPipelineViewportSwizzleStateCreateInfoNV;
+
+
+
+#define VK_EXT_discard_rectangles 1
+#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1
+#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles"
+
+
+typedef enum VkDiscardRectangleModeEXT {
+ VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0,
+ VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1,
+ VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT,
+ VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT,
+ VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1),
+ VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDiscardRectangleModeEXT;
+
+typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT;
+
+typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxDiscardRectangles;
+} VkPhysicalDeviceDiscardRectanglePropertiesEXT;
+
+typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDiscardRectangleStateCreateFlagsEXT flags;
+ VkDiscardRectangleModeEXT discardRectangleMode;
+ uint32_t discardRectangleCount;
+ const VkRect2D* pDiscardRectangles;
+} VkPipelineDiscardRectangleStateCreateInfoEXT;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount,
+ const VkRect2D* pDiscardRectangles);
+#endif
+
+#define VK_EXT_conservative_rasterization 1
+#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1
+#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization"
+
+
+typedef enum VkConservativeRasterizationModeEXT {
+ VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0,
+ VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1,
+ VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2,
+ VK_CONSERVATIVE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
+ VK_CONSERVATIVE_RASTERIZATION_MODE_END_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT,
+ VK_CONSERVATIVE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT - VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT + 1),
+ VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkConservativeRasterizationModeEXT;
+
+typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT;
+
+typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ float primitiveOverestimationSize;
+ float maxExtraPrimitiveOverestimationSize;
+ float extraPrimitiveOverestimationSizeGranularity;
+ VkBool32 primitiveUnderestimation;
+ VkBool32 conservativePointAndLineRasterization;
+ VkBool32 degenerateTrianglesRasterized;
+ VkBool32 degenerateLinesRasterized;
+ VkBool32 fullyCoveredFragmentShaderInputVariable;
+ VkBool32 conservativeRasterizationPostDepthCoverage;
+} VkPhysicalDeviceConservativeRasterizationPropertiesEXT;
+
+typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineRasterizationConservativeStateCreateFlagsEXT flags;
+ VkConservativeRasterizationModeEXT conservativeRasterizationMode;
+ float extraPrimitiveOverestimationSize;
+} VkPipelineRasterizationConservativeStateCreateInfoEXT;
+
+
+
+#define VK_EXT_swapchain_colorspace 1
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 3
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace"
+
+
+#define VK_EXT_hdr_metadata 1
+#define VK_EXT_HDR_METADATA_SPEC_VERSION 1
+#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata"
+
+typedef struct VkXYColorEXT {
+ float x;
+ float y;
+} VkXYColorEXT;
+
+typedef struct VkHdrMetadataEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkXYColorEXT displayPrimaryRed;
+ VkXYColorEXT displayPrimaryGreen;
+ VkXYColorEXT displayPrimaryBlue;
+ VkXYColorEXT whitePoint;
+ float maxLuminance;
+ float minLuminance;
+ float maxContentLightLevel;
+ float maxFrameAverageLightLevel;
+} VkHdrMetadataEXT;
+
+
+typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT(
+ VkDevice device,
+ uint32_t swapchainCount,
+ const VkSwapchainKHR* pSwapchains,
+ const VkHdrMetadataEXT* pMetadata);
+#endif
+
+#define VK_EXT_external_memory_dma_buf 1
+#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1
+#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf"
+
+
+#define VK_EXT_queue_family_foreign 1
+#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1
+#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign"
+#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2)
+
+
+#define VK_EXT_debug_utils 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT)
+
+#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 1
+#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils"
+
+typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT;
+typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT;
+
+typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT {
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001,
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010,
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100,
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000,
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugUtilsMessageSeverityFlagBitsEXT;
+typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT;
+
+typedef enum VkDebugUtilsMessageTypeFlagBitsEXT {
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugUtilsMessageTypeFlagBitsEXT;
+typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT;
+
+typedef struct VkDebugUtilsObjectNameInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectType objectType;
+ uint64_t objectHandle;
+ const char* pObjectName;
+} VkDebugUtilsObjectNameInfoEXT;
+
+typedef struct VkDebugUtilsObjectTagInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectType objectType;
+ uint64_t objectHandle;
+ uint64_t tagName;
+ size_t tagSize;
+ const void* pTag;
+} VkDebugUtilsObjectTagInfoEXT;
+
+typedef struct VkDebugUtilsLabelEXT {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pLabelName;
+ float color[4];
+} VkDebugUtilsLabelEXT;
+
+typedef struct VkDebugUtilsMessengerCallbackDataEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugUtilsMessengerCallbackDataFlagsEXT flags;
+ const char* pMessageIdName;
+ int32_t messageIdNumber;
+ const char* pMessage;
+ uint32_t queueLabelCount;
+ VkDebugUtilsLabelEXT* pQueueLabels;
+ uint32_t cmdBufLabelCount;
+ VkDebugUtilsLabelEXT* pCmdBufLabels;
+ uint32_t objectCount;
+ VkDebugUtilsObjectNameInfoEXT* pObjects;
+} VkDebugUtilsMessengerCallbackDataEXT;
+
+typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)(
+ VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* pUserData);
+
+typedef struct VkDebugUtilsMessengerCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugUtilsMessengerCreateFlagsEXT flags;
+ VkDebugUtilsMessageSeverityFlagsEXT messageSeverity;
+ VkDebugUtilsMessageTypeFlagsEXT messageType;
+ PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback;
+ void* pUserData;
+} VkDebugUtilsMessengerCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo);
+typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
+typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue);
+typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger);
+typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT(
+ VkDevice device,
+ const VkDebugUtilsObjectNameInfoEXT* pNameInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT(
+ VkDevice device,
+ const VkDebugUtilsObjectTagInfoEXT* pTagInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT(
+ VkQueue queue,
+ const VkDebugUtilsLabelEXT* pLabelInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT(
+ VkQueue queue);
+
+VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT(
+ VkQueue queue,
+ const VkDebugUtilsLabelEXT* pLabelInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT(
+ VkCommandBuffer commandBuffer,
+ const VkDebugUtilsLabelEXT* pLabelInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT(
+ VkCommandBuffer commandBuffer,
+ const VkDebugUtilsLabelEXT* pLabelInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT(
+ VkInstance instance,
+ const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDebugUtilsMessengerEXT* pMessenger);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT(
+ VkInstance instance,
+ VkDebugUtilsMessengerEXT messenger,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT(
+ VkInstance instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageTypes,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);
+#endif
+
+#define VK_EXT_sampler_filter_minmax 1
+#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 1
+#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax"
+
+
+typedef enum VkSamplerReductionModeEXT {
+ VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = 0,
+ VK_SAMPLER_REDUCTION_MODE_MIN_EXT = 1,
+ VK_SAMPLER_REDUCTION_MODE_MAX_EXT = 2,
+ VK_SAMPLER_REDUCTION_MODE_BEGIN_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT,
+ VK_SAMPLER_REDUCTION_MODE_END_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_MAX_EXT,
+ VK_SAMPLER_REDUCTION_MODE_RANGE_SIZE_EXT = (VK_SAMPLER_REDUCTION_MODE_MAX_EXT - VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT + 1),
+ VK_SAMPLER_REDUCTION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkSamplerReductionModeEXT;
+
+typedef struct VkSamplerReductionModeCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkSamplerReductionModeEXT reductionMode;
+} VkSamplerReductionModeCreateInfoEXT;
+
+typedef struct VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 filterMinmaxSingleComponentFormats;
+ VkBool32 filterMinmaxImageComponentMapping;
+} VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT;
+
+
+
+#define VK_AMD_gpu_shader_int16 1
+#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 1
+#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16"
+
+
+#define VK_AMD_mixed_attachment_samples 1
+#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1
+#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples"
+
+
+#define VK_AMD_shader_fragment_mask 1
+#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1
+#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask"
+
+
+#define VK_EXT_shader_stencil_export 1
+#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1
+#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export"
+
+
+#define VK_EXT_sample_locations 1
+#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1
+#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations"
+
+typedef struct VkSampleLocationEXT {
+ float x;
+ float y;
+} VkSampleLocationEXT;
+
+typedef struct VkSampleLocationsInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkSampleCountFlagBits sampleLocationsPerPixel;
+ VkExtent2D sampleLocationGridSize;
+ uint32_t sampleLocationsCount;
+ const VkSampleLocationEXT* pSampleLocations;
+} VkSampleLocationsInfoEXT;
+
+typedef struct VkAttachmentSampleLocationsEXT {
+ uint32_t attachmentIndex;
+ VkSampleLocationsInfoEXT sampleLocationsInfo;
+} VkAttachmentSampleLocationsEXT;
+
+typedef struct VkSubpassSampleLocationsEXT {
+ uint32_t subpassIndex;
+ VkSampleLocationsInfoEXT sampleLocationsInfo;
+} VkSubpassSampleLocationsEXT;
+
+typedef struct VkRenderPassSampleLocationsBeginInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t attachmentInitialSampleLocationsCount;
+ const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations;
+ uint32_t postSubpassSampleLocationsCount;
+ const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations;
+} VkRenderPassSampleLocationsBeginInfoEXT;
+
+typedef struct VkPipelineSampleLocationsStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 sampleLocationsEnable;
+ VkSampleLocationsInfoEXT sampleLocationsInfo;
+} VkPipelineSampleLocationsStateCreateInfoEXT;
+
+typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkSampleCountFlags sampleLocationSampleCounts;
+ VkExtent2D maxSampleLocationGridSize;
+ float sampleLocationCoordinateRange[2];
+ uint32_t sampleLocationSubPixelBits;
+ VkBool32 variableSampleLocations;
+} VkPhysicalDeviceSampleLocationsPropertiesEXT;
+
+typedef struct VkMultisamplePropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkExtent2D maxSampleLocationGridSize;
+} VkMultisamplePropertiesEXT;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT(
+ VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT(
+ VkPhysicalDevice physicalDevice,
+ VkSampleCountFlagBits samples,
+ VkMultisamplePropertiesEXT* pMultisampleProperties);
+#endif
+
+#define VK_EXT_blend_operation_advanced 1
+#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2
+#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced"
+
+
+typedef enum VkBlendOverlapEXT {
+ VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0,
+ VK_BLEND_OVERLAP_DISJOINT_EXT = 1,
+ VK_BLEND_OVERLAP_CONJOINT_EXT = 2,
+ VK_BLEND_OVERLAP_BEGIN_RANGE_EXT = VK_BLEND_OVERLAP_UNCORRELATED_EXT,
+ VK_BLEND_OVERLAP_END_RANGE_EXT = VK_BLEND_OVERLAP_CONJOINT_EXT,
+ VK_BLEND_OVERLAP_RANGE_SIZE_EXT = (VK_BLEND_OVERLAP_CONJOINT_EXT - VK_BLEND_OVERLAP_UNCORRELATED_EXT + 1),
+ VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkBlendOverlapEXT;
+
+typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 advancedBlendCoherentOperations;
+} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT;
+
+typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t advancedBlendMaxColorAttachments;
+ VkBool32 advancedBlendIndependentBlend;
+ VkBool32 advancedBlendNonPremultipliedSrcColor;
+ VkBool32 advancedBlendNonPremultipliedDstColor;
+ VkBool32 advancedBlendCorrelatedOverlap;
+ VkBool32 advancedBlendAllOperations;
+} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT;
+
+typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 srcPremultiplied;
+ VkBool32 dstPremultiplied;
+ VkBlendOverlapEXT blendOverlap;
+} VkPipelineColorBlendAdvancedStateCreateInfoEXT;
+
+
+
+#define VK_NV_fragment_coverage_to_color 1
+#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1
+#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color"
+
+typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV;
+
+typedef struct VkPipelineCoverageToColorStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCoverageToColorStateCreateFlagsNV flags;
+ VkBool32 coverageToColorEnable;
+ uint32_t coverageToColorLocation;
+} VkPipelineCoverageToColorStateCreateInfoNV;
+
+
+
+#define VK_NV_framebuffer_mixed_samples 1
+#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1
+#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples"
+
+
+typedef enum VkCoverageModulationModeNV {
+ VK_COVERAGE_MODULATION_MODE_NONE_NV = 0,
+ VK_COVERAGE_MODULATION_MODE_RGB_NV = 1,
+ VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2,
+ VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3,
+ VK_COVERAGE_MODULATION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_MODULATION_MODE_NONE_NV,
+ VK_COVERAGE_MODULATION_MODE_END_RANGE_NV = VK_COVERAGE_MODULATION_MODE_RGBA_NV,
+ VK_COVERAGE_MODULATION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_MODULATION_MODE_RGBA_NV - VK_COVERAGE_MODULATION_MODE_NONE_NV + 1),
+ VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkCoverageModulationModeNV;
+
+typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV;
+
+typedef struct VkPipelineCoverageModulationStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCoverageModulationStateCreateFlagsNV flags;
+ VkCoverageModulationModeNV coverageModulationMode;
+ VkBool32 coverageModulationTableEnable;
+ uint32_t coverageModulationTableCount;
+ const float* pCoverageModulationTable;
+} VkPipelineCoverageModulationStateCreateInfoNV;
+
+
+
+#define VK_NV_fill_rectangle 1
+#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1
+#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle"
+
+
+#define VK_EXT_post_depth_coverage 1
+#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1
+#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage"
+
+
+#define VK_EXT_validation_cache 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT)
+
+#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1
+#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache"
+#define VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT
+
+
+typedef enum VkValidationCacheHeaderVersionEXT {
+ VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1,
+ VK_VALIDATION_CACHE_HEADER_VERSION_BEGIN_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT,
+ VK_VALIDATION_CACHE_HEADER_VERSION_END_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT,
+ VK_VALIDATION_CACHE_HEADER_VERSION_RANGE_SIZE_EXT = (VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT - VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + 1),
+ VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkValidationCacheHeaderVersionEXT;
+
+typedef VkFlags VkValidationCacheCreateFlagsEXT;
+
+typedef struct VkValidationCacheCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkValidationCacheCreateFlagsEXT flags;
+ size_t initialDataSize;
+ const void* pInitialData;
+} VkValidationCacheCreateInfoEXT;
+
+typedef struct VkShaderModuleValidationCacheCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkValidationCacheEXT validationCache;
+} VkShaderModuleValidationCacheCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache);
+typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches);
+typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT(
+ VkDevice device,
+ const VkValidationCacheCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkValidationCacheEXT* pValidationCache);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT(
+ VkDevice device,
+ VkValidationCacheEXT validationCache,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT(
+ VkDevice device,
+ VkValidationCacheEXT dstCache,
+ uint32_t srcCacheCount,
+ const VkValidationCacheEXT* pSrcCaches);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT(
+ VkDevice device,
+ VkValidationCacheEXT validationCache,
+ size_t* pDataSize,
+ void* pData);
+#endif
+
+#define VK_EXT_descriptor_indexing 1
+#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2
+#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing"
+
+
+typedef enum VkDescriptorBindingFlagBitsEXT {
+ VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = 0x00000001,
+ VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = 0x00000002,
+ VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = 0x00000004,
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x00000008,
+ VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDescriptorBindingFlagBitsEXT;
+typedef VkFlags VkDescriptorBindingFlagsEXT;
+
+typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t bindingCount;
+ const VkDescriptorBindingFlagsEXT* pBindingFlags;
+} VkDescriptorSetLayoutBindingFlagsCreateInfoEXT;
+
+typedef struct VkPhysicalDeviceDescriptorIndexingFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderInputAttachmentArrayDynamicIndexing;
+ VkBool32 shaderUniformTexelBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageTexelBufferArrayDynamicIndexing;
+ VkBool32 shaderUniformBufferArrayNonUniformIndexing;
+ VkBool32 shaderSampledImageArrayNonUniformIndexing;
+ VkBool32 shaderStorageBufferArrayNonUniformIndexing;
+ VkBool32 shaderStorageImageArrayNonUniformIndexing;
+ VkBool32 shaderInputAttachmentArrayNonUniformIndexing;
+ VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing;
+ VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing;
+ VkBool32 descriptorBindingUniformBufferUpdateAfterBind;
+ VkBool32 descriptorBindingSampledImageUpdateAfterBind;
+ VkBool32 descriptorBindingStorageImageUpdateAfterBind;
+ VkBool32 descriptorBindingStorageBufferUpdateAfterBind;
+ VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind;
+ VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind;
+ VkBool32 descriptorBindingUpdateUnusedWhilePending;
+ VkBool32 descriptorBindingPartiallyBound;
+ VkBool32 descriptorBindingVariableDescriptorCount;
+ VkBool32 runtimeDescriptorArray;
+} VkPhysicalDeviceDescriptorIndexingFeaturesEXT;
+
+typedef struct VkPhysicalDeviceDescriptorIndexingPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxUpdateAfterBindDescriptorsInAllPools;
+ VkBool32 shaderUniformBufferArrayNonUniformIndexingNative;
+ VkBool32 shaderSampledImageArrayNonUniformIndexingNative;
+ VkBool32 shaderStorageBufferArrayNonUniformIndexingNative;
+ VkBool32 shaderStorageImageArrayNonUniformIndexingNative;
+ VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative;
+ VkBool32 robustBufferAccessUpdateAfterBind;
+ VkBool32 quadDivergentImplicitLod;
+ uint32_t maxPerStageDescriptorUpdateAfterBindSamplers;
+ uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers;
+ uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers;
+ uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages;
+ uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages;
+ uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments;
+ uint32_t maxPerStageUpdateAfterBindResources;
+ uint32_t maxDescriptorSetUpdateAfterBindSamplers;
+ uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers;
+ uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic;
+ uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers;
+ uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic;
+ uint32_t maxDescriptorSetUpdateAfterBindSampledImages;
+ uint32_t maxDescriptorSetUpdateAfterBindStorageImages;
+ uint32_t maxDescriptorSetUpdateAfterBindInputAttachments;
+} VkPhysicalDeviceDescriptorIndexingPropertiesEXT;
+
+typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t descriptorSetCount;
+ const uint32_t* pDescriptorCounts;
+} VkDescriptorSetVariableDescriptorCountAllocateInfoEXT;
+
+typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupportEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxVariableDescriptorCount;
+} VkDescriptorSetVariableDescriptorCountLayoutSupportEXT;
+
+
+
+#define VK_EXT_shader_viewport_index_layer 1
+#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1
+#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer"
+
+
+#define VK_EXT_global_priority 1
+#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2
+#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority"
+
+
+typedef enum VkQueueGlobalPriorityEXT {
+ VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128,
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256,
+ VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512,
+ VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024,
+ VK_QUEUE_GLOBAL_PRIORITY_BEGIN_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT,
+ VK_QUEUE_GLOBAL_PRIORITY_END_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT,
+ VK_QUEUE_GLOBAL_PRIORITY_RANGE_SIZE_EXT = (VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT - VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT + 1),
+ VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkQueueGlobalPriorityEXT;
+
+typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueueGlobalPriorityEXT globalPriority;
+} VkDeviceQueueGlobalPriorityCreateInfoEXT;
+
+
+
+#define VK_EXT_external_memory_host 1
+#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1
+#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host"
+
+typedef struct VkImportMemoryHostPointerInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+ void* pHostPointer;
+} VkImportMemoryHostPointerInfoEXT;
+
+typedef struct VkMemoryHostPointerPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t memoryTypeBits;
+} VkMemoryHostPointerPropertiesEXT;
+
+typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkDeviceSize minImportedHostPointerAlignment;
+} VkPhysicalDeviceExternalMemoryHostPropertiesEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT(
+ VkDevice device,
+ VkExternalMemoryHandleTypeFlagBits handleType,
+ const void* pHostPointer,
+ VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties);
+#endif
+
+#define VK_AMD_buffer_marker 1
+#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1
+#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker"
+
+typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ uint32_t marker);
+#endif
+
+#define VK_AMD_shader_core_properties 1
+#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1
+#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties"
+
+typedef struct VkPhysicalDeviceShaderCorePropertiesAMD {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t shaderEngineCount;
+ uint32_t shaderArraysPerEngineCount;
+ uint32_t computeUnitsPerShaderArray;
+ uint32_t simdPerComputeUnit;
+ uint32_t wavefrontsPerSimd;
+ uint32_t wavefrontSize;
+ uint32_t sgprsPerSimd;
+ uint32_t minSgprAllocation;
+ uint32_t maxSgprAllocation;
+ uint32_t sgprAllocationGranularity;
+ uint32_t vgprsPerSimd;
+ uint32_t minVgprAllocation;
+ uint32_t maxVgprAllocation;
+ uint32_t vgprAllocationGranularity;
+} VkPhysicalDeviceShaderCorePropertiesAMD;
+
+
+
+#define VK_EXT_vertex_attribute_divisor 1
+#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 1
+#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor"
+
+typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxVertexAttribDivisor;
+} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT;
+
+typedef struct VkVertexInputBindingDivisorDescriptionEXT {
+ uint32_t binding;
+ uint32_t divisor;
+} VkVertexInputBindingDivisorDescriptionEXT;
+
+typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t vertexBindingDivisorCount;
+ const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors;
+} VkPipelineVertexInputDivisorStateCreateInfoEXT;
+
+
+
+#define VK_NV_shader_subgroup_partitioned 1
+#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1
+#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned"
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_ios.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_ios.h
new file mode 100644
index 0000000000..a0924816d5
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_ios.h
@@ -0,0 +1,58 @@
+#ifndef VULKAN_IOS_H_
+#define VULKAN_IOS_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_MVK_ios_surface 1
+#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2
+#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface"
+
+typedef VkFlags VkIOSSurfaceCreateFlagsMVK;
+
+typedef struct VkIOSSurfaceCreateInfoMVK {
+ VkStructureType sType;
+ const void* pNext;
+ VkIOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+} VkIOSSurfaceCreateInfoMVK;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
+ VkInstance instance,
+ const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_macos.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_macos.h
new file mode 100644
index 0000000000..ff0b701801
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_macos.h
@@ -0,0 +1,58 @@
+#ifndef VULKAN_MACOS_H_
+#define VULKAN_MACOS_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_MVK_macos_surface 1
+#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2
+#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface"
+
+typedef VkFlags VkMacOSSurfaceCreateFlagsMVK;
+
+typedef struct VkMacOSSurfaceCreateInfoMVK {
+ VkStructureType sType;
+ const void* pNext;
+ VkMacOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+} VkMacOSSurfaceCreateInfoMVK;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
+ VkInstance instance,
+ const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_win32.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_win32.h
new file mode 100644
index 0000000000..6a85409ebe
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_win32.h
@@ -0,0 +1,276 @@
+#ifndef VULKAN_WIN32_H_
+#define VULKAN_WIN32_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_KHR_win32_surface 1
+#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6
+#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
+
+typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
+
+typedef struct VkWin32SurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkWin32SurfaceCreateFlagsKHR flags;
+ HINSTANCE hinstance;
+ HWND hwnd;
+} VkWin32SurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
+ VkInstance instance,
+ const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex);
+#endif
+
+#define VK_KHR_external_memory_win32 1
+#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32"
+
+typedef struct VkImportMemoryWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+ HANDLE handle;
+ LPCWSTR name;
+} VkImportMemoryWin32HandleInfoKHR;
+
+typedef struct VkExportMemoryWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+} VkExportMemoryWin32HandleInfoKHR;
+
+typedef struct VkMemoryWin32HandlePropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t memoryTypeBits;
+} VkMemoryWin32HandlePropertiesKHR;
+
+typedef struct VkMemoryGetWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkMemoryGetWin32HandleInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR(
+ VkDevice device,
+ const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+ HANDLE* pHandle);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR(
+ VkDevice device,
+ VkExternalMemoryHandleTypeFlagBits handleType,
+ HANDLE handle,
+ VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
+#endif
+
+#define VK_KHR_win32_keyed_mutex 1
+#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1
+#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex"
+
+typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t acquireCount;
+ const VkDeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeouts;
+ uint32_t releaseCount;
+ const VkDeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+} VkWin32KeyedMutexAcquireReleaseInfoKHR;
+
+
+
+#define VK_KHR_external_semaphore_win32 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32"
+
+typedef struct VkImportSemaphoreWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkSemaphoreImportFlags flags;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+ HANDLE handle;
+ LPCWSTR name;
+} VkImportSemaphoreWin32HandleInfoKHR;
+
+typedef struct VkExportSemaphoreWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+} VkExportSemaphoreWin32HandleInfoKHR;
+
+typedef struct VkD3D12FenceSubmitInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreValuesCount;
+ const uint64_t* pWaitSemaphoreValues;
+ uint32_t signalSemaphoreValuesCount;
+ const uint64_t* pSignalSemaphoreValues;
+} VkD3D12FenceSubmitInfoKHR;
+
+typedef struct VkSemaphoreGetWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkExternalSemaphoreHandleTypeFlagBits handleType;
+} VkSemaphoreGetWin32HandleInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR(
+ VkDevice device,
+ const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR(
+ VkDevice device,
+ const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+ HANDLE* pHandle);
+#endif
+
+#define VK_KHR_external_fence_win32 1
+#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1
+#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32"
+
+typedef struct VkImportFenceWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFence fence;
+ VkFenceImportFlags flags;
+ VkExternalFenceHandleTypeFlagBits handleType;
+ HANDLE handle;
+ LPCWSTR name;
+} VkImportFenceWin32HandleInfoKHR;
+
+typedef struct VkExportFenceWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+} VkExportFenceWin32HandleInfoKHR;
+
+typedef struct VkFenceGetWin32HandleInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFence fence;
+ VkExternalFenceHandleTypeFlagBits handleType;
+} VkFenceGetWin32HandleInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR(
+ VkDevice device,
+ const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR(
+ VkDevice device,
+ const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+ HANDLE* pHandle);
+#endif
+
+#define VK_NV_external_memory_win32 1
+#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32"
+
+typedef struct VkImportMemoryWin32HandleInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleType;
+ HANDLE handle;
+} VkImportMemoryWin32HandleInfoNV;
+
+typedef struct VkExportMemoryWin32HandleInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+} VkExportMemoryWin32HandleInfoNV;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkExternalMemoryHandleTypeFlagsNV handleType,
+ HANDLE* pHandle);
+#endif
+
+#define VK_NV_win32_keyed_mutex 1
+#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1
+#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex"
+
+typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t acquireCount;
+ const VkDeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeoutMilliseconds;
+ uint32_t releaseCount;
+ const VkDeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+} VkWin32KeyedMutexAcquireReleaseInfoNV;
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h
new file mode 100644
index 0000000000..ba03600602
--- /dev/null
+++ b/gfx/skia/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h
@@ -0,0 +1,66 @@
+#ifndef VULKAN_XCB_H_
+#define VULKAN_XCB_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2018 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_KHR_xcb_surface 1
+#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
+#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
+
+typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
+
+typedef struct VkXcbSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkXcbSurfaceCreateFlagsKHR flags;
+ xcb_connection_t* connection;
+ xcb_window_t window;
+} VkXcbSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
+ VkInstance instance,
+ const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ xcb_connection_t* connection,
+ xcb_visualid_t visual_id);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/utils/Sk3D.h b/gfx/skia/skia/include/utils/Sk3D.h
new file mode 100644
index 0000000000..9b233ba0c9
--- /dev/null
+++ b/gfx/skia/skia/include/utils/Sk3D.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk3D_DEFINED
+#define Sk3D_DEFINED
+
+#include "include/core/SkMatrix44.h"
+#include "include/core/SkPoint3.h"
+
+SK_API void Sk3LookAt(SkMatrix44* dst, const SkPoint3& eye, const SkPoint3& center, const SkPoint3& up);
+SK_API bool Sk3Perspective(SkMatrix44* dst, float near, float far, float angle);
+SK_API void Sk3MapPts(SkPoint dst[], const SkMatrix44& m4, const SkPoint3 src[], int count);
+
+#endif
+
diff --git a/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h b/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h
new file mode 100644
index 0000000000..40660e13a5
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnimCodecPlayer_DEFINED
+#define SkAnimCodecPlayer_DEFINED
+
+#include "include/codec/SkCodec.h"
+
+class SkImage;
+
+class SkAnimCodecPlayer {
+public:
+ SkAnimCodecPlayer(std::unique_ptr<SkCodec> codec);
+ ~SkAnimCodecPlayer();
+
+ /**
+ * Returns the current frame of the animation. This defaults to the first frame for
+ * animated codecs (i.e. msec = 0). Calling this multiple times (without calling seek())
+ * will always return the same image object (or null if there was an error).
+ */
+ sk_sp<SkImage> getFrame();
+
+ /**
+ * Return the size of the image(s) that will be returned by getFrame().
+ */
+ SkISize dimensions();
+
+ /**
+ * Returns the total duration of the animation in milliseconds. Returns 0 for a single-frame
+ * image.
+ */
+ uint32_t duration() { return fTotalDuration; }
+
+ /**
+ * Finds the closest frame associated with the time code (in milliseconds) and sets that
+ * to be the current frame (call getFrame() to retrieve that image).
+ * Returns true iff this call to seek() changed the "current frame" for the animation.
+ * Thus if seek() returns false, then getFrame() will return the same image as it did
+ * before this call to seek().
+ */
+ bool seek(uint32_t msec);
+
+
+private:
+ std::unique_ptr<SkCodec> fCodec;
+ SkImageInfo fImageInfo;
+ std::vector<SkCodec::FrameInfo> fFrameInfos;
+ std::vector<sk_sp<SkImage> > fImages;
+ int fCurrIndex = 0;
+ uint32_t fTotalDuration;
+
+ sk_sp<SkImage> getFrameAt(int index);
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/utils/SkBase64.h b/gfx/skia/skia/include/utils/SkBase64.h
new file mode 100644
index 0000000000..d547cb4d0a
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkBase64.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBase64_DEFINED
+#define SkBase64_DEFINED
+
+#include "include/core/SkTypes.h"
+
+struct SkBase64 {
+public:
+ enum Error {
+ kNoError,
+ kPadError,
+ kBadCharError
+ };
+
+ SkBase64();
+ Error decode(const char* src, size_t length);
+ char* getData() { return fData; }
+ /**
+ Base64 encodes src into dst. encode is a pointer to at least 65 chars.
+ encode[64] will be used as the pad character. Encodings other than the
+ default encoding cannot be decoded.
+ */
+ static size_t Encode(const void* src, size_t length, void* dest, const char* encode = nullptr);
+
+private:
+ Error decode(const void* srcPtr, size_t length, bool writeDestination);
+
+ size_t fLength;
+ char* fData;
+ friend class SkImageBaseBitmap;
+};
+
+#endif // SkBase64_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkCamera.h b/gfx/skia/skia/include/utils/SkCamera.h
new file mode 100644
index 0000000000..c216b6d6c4
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCamera.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Inspired by Rob Johnson's most excellent QuickDraw GX sample code
+
+#ifndef SkCamera_DEFINED
+#define SkCamera_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkCanvas;
+
+struct SkUnit3D {
+ SkScalar fX, fY, fZ;
+
+ void set(SkScalar x, SkScalar y, SkScalar z) {
+ fX = x; fY = y; fZ = z;
+ }
+ static SkScalar Dot(const SkUnit3D&, const SkUnit3D&);
+ static void Cross(const SkUnit3D&, const SkUnit3D&, SkUnit3D* cross);
+};
+
+struct SkPoint3D {
+ SkScalar fX, fY, fZ;
+
+ void set(SkScalar x, SkScalar y, SkScalar z) {
+ fX = x; fY = y; fZ = z;
+ }
+ SkScalar normalize(SkUnit3D*) const;
+};
+typedef SkPoint3D SkVector3D;
+
+struct SkMatrix3D {
+ SkScalar fMat[3][4];
+
+ void reset();
+
+ void setRow(int row, SkScalar a, SkScalar b, SkScalar c, SkScalar d = 0) {
+ SkASSERT((unsigned)row < 3);
+ fMat[row][0] = a;
+ fMat[row][1] = b;
+ fMat[row][2] = c;
+ fMat[row][3] = d;
+ }
+
+ void setRotateX(SkScalar deg);
+ void setRotateY(SkScalar deg);
+ void setRotateZ(SkScalar deg);
+ void setTranslate(SkScalar x, SkScalar y, SkScalar z);
+
+ void preRotateX(SkScalar deg);
+ void preRotateY(SkScalar deg);
+ void preRotateZ(SkScalar deg);
+ void preTranslate(SkScalar x, SkScalar y, SkScalar z);
+
+ void setConcat(const SkMatrix3D& a, const SkMatrix3D& b);
+ void mapPoint(const SkPoint3D& src, SkPoint3D* dst) const;
+ void mapVector(const SkVector3D& src, SkVector3D* dst) const;
+
+ void mapPoint(SkPoint3D* v) const {
+ this->mapPoint(*v, v);
+ }
+
+ void mapVector(SkVector3D* v) const {
+ this->mapVector(*v, v);
+ }
+};
+
+class SkPatch3D {
+public:
+ SkPatch3D();
+
+ void reset();
+ void transform(const SkMatrix3D&, SkPatch3D* dst = nullptr) const;
+
+ // dot a unit vector with the patch's normal
+ SkScalar dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const;
+ SkScalar dotWith(const SkVector3D& v) const {
+ return this->dotWith(v.fX, v.fY, v.fZ);
+ }
+
+ // deprecated, but still here for animator (for now)
+ void rotate(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+ void rotateDegrees(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+
+private:
+public: // make public for SkDraw3D for now
+ SkVector3D fU, fV;
+ SkPoint3D fOrigin;
+
+ friend class SkCamera3D;
+};
+
+class SkCamera3D {
+public:
+ SkCamera3D();
+
+ void reset();
+ void update();
+ void patchToMatrix(const SkPatch3D&, SkMatrix* matrix) const;
+
+ SkPoint3D fLocation; // origin of the camera's space
+ SkPoint3D fAxis; // view direction
+ SkPoint3D fZenith; // up direction
+ SkPoint3D fObserver; // eye position (may not be the same as the origin)
+
+private:
+ mutable SkMatrix fOrientation;
+ mutable bool fNeedToUpdate;
+
+ void doUpdate() const;
+};
+
+class SK_API Sk3DView : SkNoncopyable {
+public:
+ Sk3DView();
+ ~Sk3DView();
+
+ void save();
+ void restore();
+
+ void translate(SkScalar x, SkScalar y, SkScalar z);
+ void rotateX(SkScalar deg);
+ void rotateY(SkScalar deg);
+ void rotateZ(SkScalar deg);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ void setCameraLocation(SkScalar x, SkScalar y, SkScalar z);
+ SkScalar getCameraLocationX() const;
+ SkScalar getCameraLocationY() const;
+ SkScalar getCameraLocationZ() const;
+#endif
+
+ void getMatrix(SkMatrix*) const;
+ void applyToCanvas(SkCanvas*) const;
+
+ SkScalar dotWithNormal(SkScalar dx, SkScalar dy, SkScalar dz) const;
+
+private:
+ struct Rec {
+ Rec* fNext;
+ SkMatrix3D fMatrix;
+ };
+ Rec* fRec;
+ Rec fInitialRec;
+ SkCamera3D fCamera;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkCanvasStateUtils.h b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
new file mode 100644
index 0000000000..8b5c65ec08
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStateUtils_DEFINED
+#define SkCanvasStateUtils_DEFINED
+
+#include "include/core/SkCanvas.h"
+
+class SkCanvasState;
+
+/**
+ * A set of functions that are useful for copying the state of an SkCanvas
+ * across a library boundary where the Skia library on the other side of the
+ * boundary may be newer. The expected usage is outline below...
+ *
+ * Lib Boundary
+ * CaptureCanvasState(...) |||
+ * SkCanvas --> SkCanvasState |||
+ * ||| CreateFromCanvasState(...)
+ * ||| SkCanvasState --> SkCanvas`
+ * ||| Draw into SkCanvas`
+ * ||| Unref SkCanvas`
+ * ReleaseCanvasState(...) |||
+ *
+ */
+class SK_API SkCanvasStateUtils {
+public:
+ /**
+ * Captures the current state of the canvas into an opaque ptr that is safe
+ * to pass to a different instance of Skia (which may be the same version,
+ * or may be newer). The function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the canvas device type is not supported (currently only raster is supported)
+ * 2) the canvas clip type is not supported (currently only non-AA clips are supported)
+ *
+ * It is recommended that the original canvas also not be used until all
+ * canvases that have been created using its captured state have been dereferenced.
+ *
+ * Finally, it is important to note that any draw filters attached to the
+ * canvas are NOT currently captured.
+ *
+ * @param canvas The canvas you wish to capture the current state of.
+ * @return NULL or an opaque ptr that can be passed to CreateFromCanvasState
+ * to reconstruct the canvas. The caller is responsible for calling
+ * ReleaseCanvasState to free the memory associated with this state.
+ */
+ static SkCanvasState* CaptureCanvasState(SkCanvas* canvas);
+
+ /**
+ * Create a new SkCanvas from the captured state of another SkCanvas. The
+ * function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the captured state is in an unrecognized format
+ * 2) the captured canvas device type is not supported
+ *
+ * @param state Opaque object created by CaptureCanvasState.
+ * @return NULL or an SkCanvas* whose devices and matrix/clip state are
+ * identical to the captured canvas. The caller is responsible for
+ * calling unref on the SkCanvas.
+ */
+ static std::unique_ptr<SkCanvas> MakeFromCanvasState(const SkCanvasState* state);
+
+ /**
+ * Free the memory associated with the captured canvas state. The state
+ * should not be released until all SkCanvas objects created using that
+ * state have been dereferenced. Must be called from the same library
+ * instance that created the state via CaptureCanvasState.
+ *
+ * @param state The captured state you wish to dispose of.
+ */
+ static void ReleaseCanvasState(SkCanvasState* state);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkEventTracer.h b/gfx/skia/skia/include/utils/SkEventTracer.h
new file mode 100644
index 0000000000..03e9d5564d
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkEventTracer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2014 Google Inc. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEventTracer_DEFINED
+#define SkEventTracer_DEFINED
+
+// The class in this header defines the interface between Skia's internal
+// tracing macros and an external entity (e.g., Chrome) that will consume them.
+// Such an entity should subclass SkEventTracer and provide an instance of
+// that event to SkEventTracer::SetInstance.
+
+// If you're looking for the tracing macros to instrument Skia itself, those
+// live in src/core/SkTraceEvent.h
+
+#include "include/core/SkTypes.h"
+
+class SK_API SkEventTracer {
+public:
+
+ typedef uint64_t Handle;
+
+ /**
+ * If this is the first call to SetInstance or GetInstance then the passed instance is
+ * installed and true is returned. Otherwise, false is returned. In either case ownership of the
+ * tracer is transferred and it will be deleted when no longer needed.
+ */
+ static bool SetInstance(SkEventTracer*);
+
+ /**
+ * Gets the event tracer. If this is the first call to SetInstance or GetIntance then a default
+ * event tracer is installed and returned.
+ */
+ static SkEventTracer* GetInstance();
+
+ virtual ~SkEventTracer() { }
+
+ // The pointer returned from GetCategoryGroupEnabled() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in trace_event.h in chromium.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ };
+
+ virtual const uint8_t* getCategoryGroupEnabled(const char* name) = 0;
+ virtual const char* getCategoryGroupName(const uint8_t* categoryEnabledFlag) = 0;
+
+ virtual SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int32_t numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) = 0;
+
+ virtual void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) = 0;
+};
+
+#endif // SkEventTracer_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkFrontBufferedStream.h b/gfx/skia/skia/include/utils/SkFrontBufferedStream.h
new file mode 100644
index 0000000000..52a0178b63
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkFrontBufferedStream.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFrontBufferedStream_DEFINED
+#define SkFrontBufferedStream_DEFINED
+
+#include "include/core/SkStream.h"
+
+/**
+ * Specialized stream that buffers the first X bytes of a stream,
+ * where X is passed in by the user. Note that unlike some buffered
+ * stream APIs, once more bytes than can fit in the buffer are read,
+ * no more buffering is done. This stream is designed for a use case
+ * where the caller knows that rewind will only be called from within
+ * X bytes (inclusive), and the wrapped stream is not necessarily
+ * able to rewind at all.
+ */
+class SK_API SkFrontBufferedStream {
+public:
+ /**
+ * Creates a new stream that wraps and buffers an SkStream.
+ * @param stream SkStream to buffer. If stream is NULL, NULL is
+ * returned. When this call succeeds (i.e. returns non NULL),
+ * SkFrontBufferedStream is expected to be the only owner of
+ * stream, so it should no be longer used directly.
+ * SkFrontBufferedStream will delete stream upon deletion.
+ * @param minBufferSize Minimum size of buffer required.
+ * @return An SkStream that can buffer at least minBufferSize, or
+ * NULL on failure. The caller is required to delete when finished with
+ * this object.
+ */
+ static std::unique_ptr<SkStreamRewindable> Make(std::unique_ptr<SkStream> stream,
+ size_t minBufferSize);
+};
+#endif // SkFrontBufferedStream_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkInterpolator.h b/gfx/skia/skia/include/utils/SkInterpolator.h
new file mode 100644
index 0000000000..ac08ca44f3
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkInterpolator.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkInterpolator_DEFINED
+#define SkInterpolator_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTo.h"
+
+class SK_API SkInterpolatorBase : SkNoncopyable {
+public:
+ enum Result {
+ kNormal_Result,
+ kFreezeStart_Result,
+ kFreezeEnd_Result
+ };
+protected:
+ SkInterpolatorBase();
+ ~SkInterpolatorBase();
+public:
+ void reset(int elemCount, int frameCount);
+
+ /** Return the start and end time for this interpolator.
+ If there are no key frames, return false.
+ @param startTime If not null, returns the time (in milliseconds) of the
+ first keyframe. If there are no keyframes, this param
+ is ignored (left unchanged).
+ @param endTime If not null, returns the time (in milliseconds) of the
+ last keyframe. If there are no keyframes, this parameter
+ is ignored (left unchanged).
+ @return True if there are key frames, or false if there are none.
+ */
+ bool getDuration(SkMSec* startTime, SkMSec* endTime) const;
+
+
+ /** Set the whether the repeat is mirrored.
+ @param mirror If true, the odd repeats interpolate from the last key
+ frame and the first.
+ */
+ void setMirror(bool mirror) {
+ fFlags = SkToU8((fFlags & ~kMirror) | (int)mirror);
+ }
+
+ /** Set the repeat count. The repeat count may be fractional.
+ @param repeatCount Multiplies the total time by this scalar.
+ */
+ void setRepeatCount(SkScalar repeatCount) { fRepeat = repeatCount; }
+
+ /** Set the whether the repeat is mirrored.
+ @param reset If true, the odd repeats interpolate from the last key
+ frame and the first.
+ */
+ void setReset(bool reset) {
+ fFlags = SkToU8((fFlags & ~kReset) | (int)reset);
+ }
+
+ Result timeToT(SkMSec time, SkScalar* T, int* index, bool* exact) const;
+
+protected:
+ enum Flags {
+ kMirror = 1,
+ kReset = 2,
+ kHasBlend = 4
+ };
+ static SkScalar ComputeRelativeT(SkMSec time, SkMSec prevTime, SkMSec nextTime,
+ const SkScalar blend[4] = nullptr);
+ int16_t fFrameCount;
+ uint8_t fElemCount;
+ uint8_t fFlags;
+ SkScalar fRepeat;
+ struct SkTimeCode {
+ SkMSec fTime;
+ SkScalar fBlend[4];
+ };
+ SkTimeCode* fTimes; // pointer into fStorage
+ void* fStorage;
+#ifdef SK_DEBUG
+ SkTimeCode(* fTimesArray)[10];
+#endif
+};
+
+class SK_API SkInterpolator : public SkInterpolatorBase {
+public:
+ SkInterpolator();
+ SkInterpolator(int elemCount, int frameCount);
+ void reset(int elemCount, int frameCount);
+
+ /** Add or replace a key frame, copying the values[] data into the
+ interpolator.
+ @param index The index of this frame (frames must be ordered by time)
+ @param time The millisecond time for this frame
+ @param values The array of values [elemCount] for this frame. The data
+ is copied into the interpolator.
+ @param blend A positive scalar specifying how to blend between this
+ and the next key frame. [0...1) is a cubic lag/log/lag
+ blend (slow to change at the beginning and end)
+ 1 is a linear blend (default)
+ */
+ bool setKeyFrame(int index, SkMSec time, const SkScalar values[],
+ const SkScalar blend[4] = nullptr);
+
+ /** Return the computed values given the specified time. Return whether
+ those values are the result of pinning to either the first
+ (kFreezeStart) or last (kFreezeEnd), or from interpolated the two
+ nearest key values (kNormal).
+ @param time The time to sample (in milliseconds)
+ @param (may be null) where to write the computed values.
+ */
+ Result timeToValues(SkMSec time, SkScalar values[] = nullptr) const;
+
+private:
+ SkScalar* fValues; // pointer into fStorage
+#ifdef SK_DEBUG
+ SkScalar(* fScalarsArray)[10];
+#endif
+ typedef SkInterpolatorBase INHERITED;
+};
+
+/** Interpolate a cubic curve, typically to provide an ease-in ease-out transition.
+ All the parameters are in the range of [0...1].
+ The input value is treated as the x-coordinate of the cubic.
+ The output value is the y-coordinate on the cubic at the x-coordinate.
+
+ @param value The x-coordinate pinned between [0..1].
+ @param bx,by,cx,cy The cubic control points where the cubic is specified
+ as (0,0) (bx,by) (cx,cy) (1,1)
+ @return the corresponding y-coordinate value, from [0..1].
+*/
+SkScalar SkUnitCubicInterp(SkScalar value, SkScalar bx, SkScalar by,
+ SkScalar cx, SkScalar cy);
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkLua.h b/gfx/skia/skia/include/utils/SkLua.h
new file mode 100644
index 0000000000..2d963eff13
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkLua.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLua_DEFINED
+#define SkLua_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+
+struct lua_State;
+
+class SkCanvas;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkRect;
+class SkRRect;
+class SkTextBlob;
+
+#define SkScalarToLua(x) SkScalarToDouble(x)
+#define SkLuaToScalar(x) SkDoubleToScalar(x)
+
+class SkLua {
+public:
+ static void Load(lua_State*);
+
+ SkLua(const char termCode[] = nullptr); // creates a new L, will close it
+ SkLua(lua_State*); // uses L, will not close it
+ ~SkLua();
+
+ lua_State* get() const { return fL; }
+ lua_State* operator*() const { return fL; }
+ lua_State* operator->() const { return fL; }
+
+ bool runCode(const char code[]);
+ bool runCode(const void* code, size_t size);
+
+ void pushBool(bool, const char tableKey[] = nullptr);
+ void pushString(const char[], const char tableKey[] = nullptr);
+ void pushString(const char[], size_t len, const char tableKey[] = nullptr);
+ void pushString(const SkString&, const char tableKey[] = nullptr);
+ void pushArrayU16(const uint16_t[], int count, const char tableKey[] = nullptr);
+ void pushArrayPoint(const SkPoint[], int count, const char key[] = nullptr);
+ void pushArrayScalar(const SkScalar[], int count, const char key[] = nullptr);
+ void pushColor(SkColor, const char tableKey[] = nullptr);
+ void pushU32(uint32_t, const char tableKey[] = nullptr);
+ void pushScalar(SkScalar, const char tableKey[] = nullptr);
+ void pushRect(const SkRect&, const char tableKey[] = nullptr);
+ void pushRRect(const SkRRect&, const char tableKey[] = nullptr);
+ void pushDash(const SkPathEffect::DashInfo&, const char tableKey[] = nullptr);
+ void pushMatrix(const SkMatrix&, const char tableKey[] = nullptr);
+ void pushPaint(const SkPaint&, const char tableKey[] = nullptr);
+ void pushPath(const SkPath&, const char tableKey[] = nullptr);
+ void pushCanvas(SkCanvas*, const char tableKey[] = nullptr);
+ void pushTextBlob(const SkTextBlob*, const char tableKey[] = nullptr);
+
+private:
+ lua_State* fL;
+ SkString fTermCode;
+ bool fWeOwnL;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkLuaCanvas.h b/gfx/skia/skia/include/utils/SkLuaCanvas.h
new file mode 100644
index 0000000000..f75c8f219c
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkLuaCanvas.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLuaCanvas_DEFINED
+#define SkLuaCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkString.h"
+#include "include/core/SkVertices.h"
+
+struct lua_State;
+
+class SkLuaCanvas : public SkCanvas {
+public:
+ void pushThis();
+
+ SkLuaCanvas(int width, int height, lua_State*, const char function[]);
+ ~SkLuaCanvas() override;
+
+protected:
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+
+private:
+ lua_State* fL;
+ SkString fFunc;
+
+ void sendverb(const char verb[]);
+
+ typedef SkCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkNWayCanvas.h b/gfx/skia/skia/include/utils/SkNWayCanvas.h
new file mode 100644
index 0000000000..72bed680bf
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNWayCanvas.h
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNWayCanvas_DEFINED
+#define SkNWayCanvas_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/private/SkTDArray.h"
+#include "include/utils/SkNoDrawCanvas.h"
+
+class SK_API SkNWayCanvas : public SkCanvasVirtualEnforcer<SkNoDrawCanvas> {
+public:
+ SkNWayCanvas(int width, int height);
+ ~SkNWayCanvas() override;
+
+ virtual void addCanvas(SkCanvas*);
+ virtual void removeCanvas(SkCanvas*);
+ virtual void removeAll();
+
+protected:
+ SkTDArray<SkCanvas*> fList;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice&, const SkRect&,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice&, const SkRect&, const SkPaint*) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int, SkBlendMode, const SkRect*, const SkPaint*) override;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkPaint*, SrcRectConstraint) override;
+
+ void onFlush() override;
+
+ class Iter;
+
+private:
+ typedef SkCanvasVirtualEnforcer<SkNoDrawCanvas> INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkNoDrawCanvas.h b/gfx/skia/skia/include/utils/SkNoDrawCanvas.h
new file mode 100644
index 0000000000..a68d41a0fe
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNoDrawCanvas.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoDrawCanvas_DEFINED
+#define SkNoDrawCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkVertices.h"
+
+struct SkIRect;
+
+// SkNoDrawCanvas is a helper for SkCanvas subclasses which do not need to
+// actually rasterize (e.g., analysis of the draw calls).
+//
+// It provides the following simplifications:
+//
+// * not backed by any device/pixels
+// * conservative clipping (clipping calls only use rectangles)
+//
+class SK_API SkNoDrawCanvas : public SkCanvasVirtualEnforcer<SkCanvas> {
+public:
+ SkNoDrawCanvas(int width, int height);
+ SkNoDrawCanvas(const SkIRect&);
+
+ explicit SkNoDrawCanvas(sk_sp<SkBaseDevice> device);
+
+ // Optimization to reset state to be the same as after construction.
+ void resetCanvas(int w, int h) { this->resetForNextPicture(SkIRect::MakeWH(w, h)); }
+ void resetCanvas(const SkIRect& rect) { this->resetForNextPicture(rect); }
+
+protected:
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& rec) override;
+ bool onDoSaveBehind(const SkRect*) override;
+
+ // No-op overrides for aborting rasterization earlier than SkNullBlitter.
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override {}
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override {}
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override {}
+ void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override {}
+ void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode,
+ const SkPaint&) override {}
+
+ void onDrawPaint(const SkPaint&) override {}
+ void onDrawBehind(const SkPaint&) override {}
+ void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override {}
+ void onDrawRect(const SkRect&, const SkPaint&) override {}
+ void onDrawRegion(const SkRegion&, const SkPaint&) override {}
+ void onDrawOval(const SkRect&, const SkPaint&) override {}
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override {}
+ void onDrawRRect(const SkRRect&, const SkPaint&) override {}
+ void onDrawPath(const SkPath&, const SkPaint&) override {}
+ void onDrawBitmap(const SkBitmap&, SkScalar, SkScalar, const SkPaint*) override {}
+ void onDrawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override {}
+ void onDrawImage(const SkImage*, SkScalar, SkScalar, const SkPaint*) override {}
+ void onDrawImageRect(const SkImage*, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override {}
+ void onDrawImageNine(const SkImage*, const SkIRect&, const SkRect&, const SkPaint*) override {}
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect&, const SkRect&,
+ const SkPaint*) override {}
+ void onDrawImageLattice(const SkImage*, const Lattice&, const SkRect&,
+ const SkPaint*) override {}
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice&, const SkRect&,
+ const SkPaint*) override {}
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone[], int, SkBlendMode,
+ const SkPaint&) override {}
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int, SkBlendMode, const SkRect*, const SkPaint*) override {}
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override {}
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override {}
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override {}
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int, const SkPoint[],
+ const SkMatrix[], const SkPaint*, SrcRectConstraint) override {}
+
+private:
+ typedef SkCanvasVirtualEnforcer<SkCanvas> INHERITED;
+};
+
+#endif // SkNoDrawCanvas_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkNullCanvas.h b/gfx/skia/skia/include/utils/SkNullCanvas.h
new file mode 100644
index 0000000000..d63bf5c414
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNullCanvas.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNullCanvas_DEFINED
+#define SkNullCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+
+/**
+ * Creates a canvas that draws nothing. This is useful for performance testing.
+ */
+SK_API std::unique_ptr<SkCanvas> SkMakeNullCanvas();
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
new file mode 100644
index 0000000000..3583e904ee
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintFilterCanvas_DEFINED
+#define SkPaintFilterCanvas_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/utils/SkNWayCanvas.h"
+
+class SkAndroidFrameworkUtils;
+
+/** \class SkPaintFilterCanvas
+
+ A utility proxy base class for implementing draw/paint filters.
+*/
+class SK_API SkPaintFilterCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> {
+public:
+ /**
+ * The new SkPaintFilterCanvas is configured for forwarding to the
+ * specified canvas. Also copies the target canvas matrix and clip bounds.
+ */
+ SkPaintFilterCanvas(SkCanvas* canvas);
+
+ enum Type {
+ kPicture_Type,
+ };
+
+ // Forwarded to the wrapped canvas.
+ SkISize getBaseLayerSize() const override { return proxy()->getBaseLayerSize(); }
+ GrContext* getGrContext() override { return proxy()->getGrContext(); }
+ GrRenderTargetContext* internal_private_accessTopLayerRenderTargetContext() override {
+ return proxy()->internal_private_accessTopLayerRenderTargetContext();
+ }
+
+protected:
+ /**
+ * Called with the paint that will be used to draw the specified type.
+ * The implementation may modify the paint as they wish.
+ *
+ * The result bool is used to determine whether the draw op is to be
+ * executed (true) or skipped (false).
+ *
+ * Note: The base implementation calls onFilter() for top-level/explicit paints only.
+ * To also filter encapsulated paints (e.g. SkPicture, SkTextBlob), clients may need to
+ * override the relevant methods (i.e. drawPicture, drawTextBlob).
+ */
+ virtual bool onFilter(SkPaint& paint) const = 0;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice&, const SkRect&,
+ const SkPaint*) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice&, const SkRect&,
+ const SkPaint*) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode,
+ const SkPaint& paint) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int, SkBlendMode, const SkRect*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override;
+ void onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkPaint*, SrcRectConstraint) override;
+
+ // Forwarded to the wrapped canvas.
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ bool onPeekPixels(SkPixmap* pixmap) override;
+ bool onAccessTopLayerPixels(SkPixmap* pixmap) override;
+ SkImageInfo onImageInfo() const override;
+ bool onGetProps(SkSurfaceProps* props) const override;
+
+private:
+ class AutoPaintFilter;
+
+ SkCanvas* proxy() const { SkASSERT(fList.count() == 1); return fList[0]; }
+
+ SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const override {
+ return const_cast<SkPaintFilterCanvas*>(this);
+ }
+
+ friend class SkAndroidFrameworkUtils;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParse.h b/gfx/skia/skia/include/utils/SkParse.h
new file mode 100644
index 0000000000..9a738bace1
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParse.h
@@ -0,0 +1,32 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParse_DEFINED
+#define SkParse_DEFINED
+
+#include "include/core/SkColor.h"
+
+class SK_API SkParse {
+public:
+ static int Count(const char str[]); // number of scalars or int values
+ static int Count(const char str[], char separator);
+ static const char* FindColor(const char str[], SkColor* value);
+ static const char* FindHex(const char str[], uint32_t* value);
+ static const char* FindMSec(const char str[], SkMSec* value);
+ static const char* FindNamedColor(const char str[], size_t len, SkColor* color);
+ static const char* FindS32(const char str[], int32_t* value);
+ static const char* FindScalar(const char str[], SkScalar* value);
+ static const char* FindScalars(const char str[], SkScalar value[], int count);
+
+ static bool FindBool(const char str[], bool* value);
+ // return the index of str in list[], or -1 if not found
+ static int FindList(const char str[], const char list[]);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParsePath.h b/gfx/skia/skia/include/utils/SkParsePath.h
new file mode 100644
index 0000000000..9a79f945d9
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParsePath.h
@@ -0,0 +1,23 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParsePath_DEFINED
+#define SkParsePath_DEFINED
+
+#include "include/core/SkPath.h"
+
+class SkString;
+
+class SK_API SkParsePath {
+public:
+ static bool FromSVGString(const char str[], SkPath*);
+ static void ToSVGString(const SkPath&, SkString*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkRandom.h b/gfx/skia/skia/include/utils/SkRandom.h
new file mode 100644
index 0000000000..0678010362
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkRandom.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRandom_DEFINED
+#define SkRandom_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkFloatBits.h"
+
+/** \class SkRandom
+
+ Utility class that implements pseudo random 32bit numbers using Marsaglia's
+ multiply-with-carry "mother of all" algorithm. Unlike rand(), this class holds
+ its own state, so that multiple instances can be used with no side-effects.
+
+ Has a large period and all bits are well-randomized.
+ */
+class SkRandom {
+public:
+ SkRandom() { init(0); }
+ SkRandom(uint32_t seed) { init(seed); }
+ SkRandom(const SkRandom& rand) : fK(rand.fK), fJ(rand.fJ) {}
+
+ SkRandom& operator=(const SkRandom& rand) {
+ fK = rand.fK;
+ fJ = rand.fJ;
+
+ return *this;
+ }
+
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() {
+ fK = kKMul*(fK & 0xffff) + (fK >> 16);
+ fJ = kJMul*(fJ & 0xffff) + (fJ >> 16);
+ return (((fK << 16) | (fK >> 16)) + fJ);
+ }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /**
+ * Returns value [0...1) as an IEEE float
+ */
+ float nextF() {
+ unsigned int floatint = 0x3f800000 | (this->nextU() >> 9);
+ float f = SkBits2Float(floatint) - 1.0f;
+ return f;
+ }
+
+ /**
+ * Returns value [min...max) as a float
+ */
+ float nextRangeF(float min, float max) {
+ return min + this->nextF() * (max - min);
+ }
+
+ /** Return the next pseudo random number, as an unsigned value of
+ at most bitCount bits.
+ @param bitCount The maximum number of bits to be returned
+ */
+ uint32_t nextBits(unsigned bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ return this->nextU() >> (32 - bitCount);
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [min, max] inclusive.
+ */
+ uint32_t nextRangeU(uint32_t min, uint32_t max) {
+ SkASSERT(min <= max);
+ uint32_t range = max - min + 1;
+ if (0 == range) {
+ return this->nextU();
+ } else {
+ return min + this->nextU() % range;
+ }
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [0, count).
+ */
+ uint32_t nextULessThan(uint32_t count) {
+ SkASSERT(count > 0);
+ return this->nextRangeU(0, count - 1);
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [0..SK_Scalar1).
+ */
+ SkScalar nextUScalar1() { return SkFixedToScalar(this->nextUFixed1()); }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [min..max).
+ */
+ SkScalar nextRangeScalar(SkScalar min, SkScalar max) {
+ return this->nextUScalar1() * (max - min) + min;
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+ /** Return the next pseudo random number as a bool.
+ */
+ bool nextBool() { return this->nextU() >= 0x80000000; }
+
+ /** A biased version of nextBool().
+ */
+ bool nextBiasedBool(SkScalar fractionTrue) {
+ SkASSERT(fractionTrue >= 0 && fractionTrue <= SK_Scalar1);
+ return this->nextUScalar1() <= fractionTrue;
+ }
+
+ /** Reset the random object.
+ */
+ void setSeed(uint32_t seed) { init(seed); }
+
+private:
+ // Initialize state variables with LCG.
+ // We must ensure that both J and K are non-zero, otherwise the
+ // multiply-with-carry step will forevermore return zero.
+ void init(uint32_t seed) {
+ fK = NextLCG(seed);
+ if (0 == fK) {
+ fK = NextLCG(fK);
+ }
+ fJ = NextLCG(fK);
+ if (0 == fJ) {
+ fJ = NextLCG(fJ);
+ }
+ SkASSERT(0 != fK && 0 != fJ);
+ }
+ static uint32_t NextLCG(uint32_t seed) { return kMul*seed + kAdd; }
+
+ /** Return the next pseudo random number expressed as an unsigned SkFixed
+ in the range [0..SK_Fixed1).
+ */
+ SkFixed nextUFixed1() { return this->nextU() >> 16; }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ // For the LCG that sets the initial state from a seed
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ // Constants for the multiply-with-carry steps
+ enum {
+ kKMul = 30345,
+ kJMul = 18000,
+ };
+
+ uint32_t fK;
+ uint32_t fJ;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkShadowUtils.h b/gfx/skia/skia/include/utils/SkShadowUtils.h
new file mode 100644
index 0000000000..d659ffccf4
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkShadowUtils.h
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkShadowUtils_DEFINED
+#define SkShadowUtils_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkShadowFlags.h"
+
+class SkCanvas;
+class SkPath;
+class SkResourceCache;
+
+class SK_API SkShadowUtils {
+public:
+ /**
+ * Draw an offset spot shadow and outlining ambient shadow for the given path using a disc
+ * light. The shadow may be cached, depending on the path type and canvas matrix. If the
+ * matrix is perspective or the path is volatile, it will not be cached.
+ *
+ * @param canvas The canvas on which to draw the shadows.
+ * @param path The occluder used to generate the shadows.
+ * @param zPlaneParams Values for the plane function which returns the Z offset of the
+ * occluder from the canvas based on local x and y values (the current matrix is not applied).
+ * @param lightPos The 3D position of the light relative to the canvas plane. This is
+ * independent of the canvas's current matrix.
+ * @param lightRadius The radius of the disc light.
+ * @param ambientColor The color of the ambient shadow.
+ * @param spotColor The color of the spot shadow.
+ * @param flags Options controlling opaque occluder optimizations and shadow appearance. See
+ * SkShadowFlags.
+ */
+ static void DrawShadow(SkCanvas* canvas, const SkPath& path, const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ SkColor ambientColor, SkColor spotColor,
+ uint32_t flags = SkShadowFlags::kNone_ShadowFlag);
+
+ /**
+ * Helper routine to compute color values for one-pass tonal alpha.
+ *
+ * @param inAmbientColor Original ambient color
+ * @param inSpotColor Original spot color
+ * @param outAmbientColor Modified ambient color
+ * @param outSpotColor Modified spot color
+ */
+ static void ComputeTonalColors(SkColor inAmbientColor, SkColor inSpotColor,
+ SkColor* outAmbientColor, SkColor* outSpotColor);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkTextUtils.h b/gfx/skia/skia/include/utils/SkTextUtils.h
new file mode 100644
index 0000000000..6cd3771e3d
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkTextUtils.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextUtils_DEFINED
+#define SkTextUtils_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkString.h"
+
+class SkPath;
+
+class SK_API SkTextUtils {
+public:
+ enum Align {
+ kLeft_Align,
+ kCenter_Align,
+ kRight_Align,
+ };
+
+ static void Draw(SkCanvas*, const void* text, size_t size, SkTextEncoding,
+ SkScalar x, SkScalar y, const SkFont&, const SkPaint&, Align = kLeft_Align);
+
+ static void DrawString(SkCanvas* canvas, const char text[], SkScalar x, SkScalar y,
+ const SkFont& font, const SkPaint& paint, Align align = kLeft_Align) {
+ Draw(canvas, text, strlen(text), SkTextEncoding::kUTF8, x, y, font, paint, align);
+ }
+
+ static void GetPath(const void* text, size_t length, SkTextEncoding, SkScalar x, SkScalar y,
+ const SkFont&, SkPath*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkTraceEventPhase.h b/gfx/skia/skia/include/utils/SkTraceEventPhase.h
new file mode 100644
index 0000000000..38457be24b
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkTraceEventPhase.h
@@ -0,0 +1,19 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef SkTraceEventPhase_DEFINED
+#define SkTraceEventPhase_DEFINED
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+
+#endif // SkTraceEventPhase_DEFINED
diff --git a/gfx/skia/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
new file mode 100644
index 0000000000..a8bde9950d
--- /dev/null
+++ b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCGUtils_DEFINED
+#define SkCGUtils_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkSize.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+class SkBitmap;
+class SkData;
+class SkPixmap;
+class SkStreamRewindable;
+
+SK_API CGContextRef SkCreateCGContext(const SkPixmap&);
+
+/**
+ * Given a CGImage, allocate an SkBitmap and copy the image's pixels into it. If scaleToFit is not
+ * null, use it to determine the size of the bitmap, and scale the image to fill the bitmap.
+ * Otherwise use the image's width/height.
+ *
+ * On failure, return false, and leave bitmap unchanged.
+ */
+SK_API bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef src);
+
+SK_API sk_sp<SkImage> SkMakeImageFromCGImage(CGImageRef);
+
+/**
+ * Copy the pixels from src into the memory specified by info/rowBytes/dstPixels. On failure,
+ * return false (e.g. ImageInfo incompatible with src).
+ */
+SK_API bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* dstPixels,
+ CGImageRef src);
+static inline bool SkCopyPixelsFromCGImage(const SkPixmap& dst, CGImageRef src) {
+ return SkCopyPixelsFromCGImage(dst.info(), dst.rowBytes(), dst.writable_addr(), src);
+}
+
+/**
+ * Create an imageref from the specified bitmap using the specified colorspace.
+ * If space is NULL, then CGColorSpaceCreateDeviceRGB() is used.
+ */
+SK_API CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef space);
+
+/**
+ * Create an imageref from the specified bitmap using the colorspace returned
+ * by CGColorSpaceCreateDeviceRGB()
+ */
+static inline CGImageRef SkCreateCGImageRef(const SkBitmap& bm) {
+ return SkCreateCGImageRefWithColorspace(bm, NULL);
+}
+
+/**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+/**
+ * Return a provider that wraps the specified stream.
+ * When the provider is finally deleted, it will delete the stream.
+ */
+CGDataProviderRef SkCreateDataProviderFromStream(std::unique_ptr<SkStreamRewindable>);
+
+CGDataProviderRef SkCreateDataProviderFromData(sk_sp<SkData>);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkCGUtils_DEFINED
diff --git a/gfx/skia/skia/src/android/SkAndroidFrameworkUtils.cpp b/gfx/skia/skia/src/android/SkAndroidFrameworkUtils.cpp
new file mode 100644
index 0000000000..d5d533365b
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkAndroidFrameworkUtils.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/android/SkAndroidFrameworkUtils.h"
+#include "include/core/SkCanvas.h"
+#include "include/utils/SkPaintFilterCanvas.h"
+#include "src/core/SkDevice.h"
+#include "src/image/SkSurface_Base.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/effects/GrDisableColorXP.h"
+#endif //SK_SUPPORT_GPU
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+#include <log/log.h>
+
+#if SK_SUPPORT_GPU
+bool SkAndroidFrameworkUtils::clipWithStencil(SkCanvas* canvas) {
+ SkRegion clipRegion;
+ canvas->temporary_internal_getRgnClip(&clipRegion);
+ if (clipRegion.isEmpty()) {
+ return false;
+ }
+ SkBaseDevice* device = canvas->getDevice();
+ if (!device) {
+ return false;
+ }
+ GrRenderTargetContext* rtc = device->accessRenderTargetContext();
+ if (!rtc) {
+ return false;
+ }
+ GrPaint grPaint;
+ grPaint.setXPFactory(GrDisableColorXPFactory::Get());
+ GrNoClip noClip;
+ static constexpr GrUserStencilSettings kDrawToStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x1,
+ GrUserStencilTest::kAlways,
+ 0x1,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0x1>()
+ );
+ rtc->drawRegion(noClip, std::move(grPaint), GrAA::kNo, SkMatrix::I(), clipRegion,
+ GrStyle::SimpleFill(), &kDrawToStencil);
+ return true;
+}
+#endif //SK_SUPPORT_GPU
+
+void SkAndroidFrameworkUtils::SafetyNetLog(const char* bugNumber) {
+ android_errorWriteLog(0x534e4554, bugNumber);
+}
+
+sk_sp<SkSurface> SkAndroidFrameworkUtils::getSurfaceFromCanvas(SkCanvas* canvas) {
+ sk_sp<SkSurface> surface(SkSafeRef(canvas->getSurfaceBase()));
+ return surface;
+}
+
+int SkAndroidFrameworkUtils::SaveBehind(SkCanvas* canvas, const SkRect* subset) {
+ return canvas->only_axis_aligned_saveBehind(subset);
+}
+
+SkCanvas* SkAndroidFrameworkUtils::getBaseWrappedCanvas(SkCanvas* canvas) {
+ auto pfc = canvas->internal_private_asPaintFilterCanvas();
+ auto result = canvas;
+ while (pfc) {
+ result = pfc->proxy();
+ pfc = result->internal_private_asPaintFilterCanvas();
+ }
+ return result;
+}
+#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK
+
diff --git a/gfx/skia/skia/src/android/SkAnimatedImage.cpp b/gfx/skia/skia/src/android/SkAnimatedImage.cpp
new file mode 100644
index 0000000000..6db6afde4a
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkAnimatedImage.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/android/SkAnimatedImage.h"
+#include "include/codec/SkAndroidCodec.h"
+#include "include/codec/SkCodec.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkPixelRef.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/core/SkImagePriv.h"
+
+#include <limits.h>
+#include <utility>
+
+sk_sp<SkAnimatedImage> SkAnimatedImage::Make(std::unique_ptr<SkAndroidCodec> codec,
+ SkISize scaledSize, SkIRect cropRect, sk_sp<SkPicture> postProcess) {
+ if (!codec) {
+ return nullptr;
+ }
+ auto info = codec->getInfo().makeDimensions(scaledSize);
+ return Make(std::move(codec), info, cropRect, std::move(postProcess));
+}
+
+sk_sp<SkAnimatedImage> SkAnimatedImage::Make(std::unique_ptr<SkAndroidCodec> codec,
+ const SkImageInfo& requestedInfo, SkIRect cropRect, sk_sp<SkPicture> postProcess) {
+ if (!codec) {
+ return nullptr;
+ }
+
+ auto scaledSize = requestedInfo.dimensions();
+ auto decodeInfo = requestedInfo;
+ if (codec->getEncodedFormat() != SkEncodedImageFormat::kWEBP
+ || scaledSize.width() >= decodeInfo.width()
+ || scaledSize.height() >= decodeInfo.height()) {
+ // Only libwebp can decode to arbitrary smaller sizes.
+ auto dims = codec->getInfo().dimensions();
+ decodeInfo = decodeInfo.makeDimensions(dims);
+ }
+
+ auto image = sk_sp<SkAnimatedImage>(new SkAnimatedImage(std::move(codec), scaledSize,
+ decodeInfo, cropRect, std::move(postProcess)));
+ if (!image->fDisplayFrame.fBitmap.getPixels()) {
+ // tryAllocPixels failed.
+ return nullptr;
+ }
+
+ return image;
+}
+
+sk_sp<SkAnimatedImage> SkAnimatedImage::Make(std::unique_ptr<SkAndroidCodec> codec) {
+ if (!codec) {
+ return nullptr;
+ }
+
+ const auto decodeInfo = codec->getInfo();
+ const auto scaledSize = decodeInfo.dimensions();
+ const auto cropRect = SkIRect::MakeSize(scaledSize);
+ auto image = sk_sp<SkAnimatedImage>(new SkAnimatedImage(std::move(codec), scaledSize,
+ decodeInfo, cropRect, nullptr));
+
+ if (!image->fDisplayFrame.fBitmap.getPixels()) {
+ // tryAllocPixels failed.
+ return nullptr;
+ }
+
+ SkASSERT(image->fSimple);
+ return image;
+}
+
+SkAnimatedImage::SkAnimatedImage(std::unique_ptr<SkAndroidCodec> codec, SkISize scaledSize,
+ SkImageInfo decodeInfo, SkIRect cropRect, sk_sp<SkPicture> postProcess)
+ : fCodec(std::move(codec))
+ , fScaledSize(scaledSize)
+ , fDecodeInfo(decodeInfo)
+ , fCropRect(cropRect)
+ , fPostProcess(std::move(postProcess))
+ , fFrameCount(fCodec->codec()->getFrameCount())
+ , fSimple(fScaledSize == fDecodeInfo.dimensions() && !fPostProcess
+ && fCropRect == fDecodeInfo.bounds())
+ , fFinished(false)
+ , fRepetitionCount(fCodec->codec()->getRepetitionCount())
+ , fRepetitionsCompleted(0)
+{
+ if (!fDecodingFrame.fBitmap.tryAllocPixels(fDecodeInfo)) {
+ return;
+ }
+
+ if (!fSimple) {
+ fMatrix = SkMatrix::MakeTrans(-fCropRect.fLeft, -fCropRect.fTop);
+ float scaleX = (float) fScaledSize.width() / fDecodeInfo.width();
+ float scaleY = (float) fScaledSize.height() / fDecodeInfo.height();
+ fMatrix.preConcat(SkMatrix::MakeScale(scaleX, scaleY));
+ }
+ this->decodeNextFrame();
+}
+
+SkAnimatedImage::~SkAnimatedImage() { }
+
+SkRect SkAnimatedImage::onGetBounds() {
+ return SkRect::MakeIWH(fCropRect.width(), fCropRect.height());
+}
+
+SkAnimatedImage::Frame::Frame()
+ : fIndex(SkCodec::kNoFrame)
+{}
+
+bool SkAnimatedImage::Frame::init(const SkImageInfo& info, OnInit onInit) {
+ if (fBitmap.getPixels()) {
+ if (fBitmap.pixelRef()->unique()) {
+ SkAssertResult(fBitmap.setAlphaType(info.alphaType()));
+ return true;
+ }
+
+ // An SkCanvas provided to onDraw is still holding a reference.
+ // Copy before we decode to ensure that we don't overwrite the
+ // expected contents of the image.
+ if (OnInit::kRestoreIfNecessary == onInit) {
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return false;
+ }
+
+ memcpy(tmp.getPixels(), fBitmap.getPixels(), fBitmap.computeByteSize());
+ using std::swap;
+ swap(tmp, fBitmap);
+ return true;
+ }
+ }
+
+ return fBitmap.tryAllocPixels(info);
+}
+
+bool SkAnimatedImage::Frame::copyTo(Frame* dst) const {
+ if (!dst->init(fBitmap.info(), OnInit::kNoRestore)) {
+ return false;
+ }
+
+ memcpy(dst->fBitmap.getPixels(), fBitmap.getPixels(), fBitmap.computeByteSize());
+ dst->fIndex = fIndex;
+ dst->fDisposalMethod = fDisposalMethod;
+ return true;
+}
+
+void SkAnimatedImage::reset() {
+ fFinished = false;
+ fRepetitionsCompleted = 0;
+ if (fDisplayFrame.fIndex != 0) {
+ fDisplayFrame.fIndex = SkCodec::kNoFrame;
+ this->decodeNextFrame();
+ }
+}
+
+static bool is_restore_previous(SkCodecAnimation::DisposalMethod dispose) {
+ return SkCodecAnimation::DisposalMethod::kRestorePrevious == dispose;
+}
+
+int SkAnimatedImage::computeNextFrame(int current, bool* animationEnded) {
+ SkASSERT(animationEnded != nullptr);
+ *animationEnded = false;
+
+ const int frameToDecode = current + 1;
+ if (frameToDecode == fFrameCount - 1) {
+ // Final frame. Check to determine whether to stop.
+ fRepetitionsCompleted++;
+ if (fRepetitionCount != SkCodec::kRepetitionCountInfinite
+ && fRepetitionsCompleted > fRepetitionCount) {
+ *animationEnded = true;
+ }
+ } else if (frameToDecode == fFrameCount) {
+ return 0;
+ }
+ return frameToDecode;
+}
+
+double SkAnimatedImage::finish() {
+ fFinished = true;
+ fCurrentFrameDuration = kFinished;
+ return kFinished;
+}
+
+int SkAnimatedImage::decodeNextFrame() {
+ if (fFinished) {
+ return kFinished;
+ }
+
+ bool animationEnded = false;
+ const int frameToDecode = this->computeNextFrame(fDisplayFrame.fIndex, &animationEnded);
+
+ SkCodec::FrameInfo frameInfo;
+ if (fCodec->codec()->getFrameInfo(frameToDecode, &frameInfo)) {
+ if (!frameInfo.fFullyReceived) {
+ SkCodecPrintf("Frame %i not fully received\n", frameToDecode);
+ return this->finish();
+ }
+
+ fCurrentFrameDuration = frameInfo.fDuration;
+ } else {
+ animationEnded = true;
+ if (0 == frameToDecode) {
+ // Static image. This is okay.
+ frameInfo.fRequiredFrame = SkCodec::kNoFrame;
+ frameInfo.fAlphaType = fCodec->getInfo().alphaType();
+ frameInfo.fDisposalMethod = SkCodecAnimation::DisposalMethod::kKeep;
+ // These fields won't be read.
+ frameInfo.fDuration = INT_MAX;
+ frameInfo.fFullyReceived = true;
+ fCurrentFrameDuration = kFinished;
+ } else {
+ SkCodecPrintf("Error getting frameInfo for frame %i\n",
+ frameToDecode);
+ return this->finish();
+ }
+ }
+
+ if (frameToDecode == fDisplayFrame.fIndex) {
+ if (animationEnded) {
+ return this->finish();
+ }
+ return fCurrentFrameDuration;
+ }
+
+ for (Frame* frame : { &fRestoreFrame, &fDecodingFrame }) {
+ if (frameToDecode == frame->fIndex) {
+ using std::swap;
+ swap(fDisplayFrame, *frame);
+ if (animationEnded) {
+ return this->finish();
+ }
+ return fCurrentFrameDuration;
+ }
+ }
+
+ // The following code makes an effort to avoid overwriting a frame that will
+ // be used again. If frame |i| is_restore_previous, frame |i+1| will not
+ // depend on frame |i|, so do not overwrite frame |i-1|, which may be needed
+ // for frame |i+1|.
+ // We could be even smarter about which frames to save by looking at the
+ // entire dependency chain.
+ SkCodec::Options options;
+ options.fFrameIndex = frameToDecode;
+ if (frameInfo.fRequiredFrame == SkCodec::kNoFrame) {
+ if (is_restore_previous(frameInfo.fDisposalMethod)) {
+ // frameToDecode will be discarded immediately after drawing, so
+ // do not overwrite a frame which could possibly be used in the
+ // future.
+ if (fDecodingFrame.fIndex != SkCodec::kNoFrame &&
+ !is_restore_previous(fDecodingFrame.fDisposalMethod)) {
+ using std::swap;
+ swap(fDecodingFrame, fRestoreFrame);
+ }
+ }
+ } else {
+ auto validPriorFrame = [&frameInfo, &frameToDecode](const Frame& frame) {
+ if (SkCodec::kNoFrame == frame.fIndex ||
+ is_restore_previous(frame.fDisposalMethod)) {
+ return false;
+ }
+
+ return frame.fIndex >= frameInfo.fRequiredFrame && frame.fIndex < frameToDecode;
+ };
+ if (validPriorFrame(fDecodingFrame)) {
+ if (is_restore_previous(frameInfo.fDisposalMethod)) {
+ // fDecodingFrame is a good frame to use for this one, but we
+ // don't want to overwrite it.
+ fDecodingFrame.copyTo(&fRestoreFrame);
+ }
+ options.fPriorFrame = fDecodingFrame.fIndex;
+ } else if (validPriorFrame(fDisplayFrame)) {
+ if (!fDisplayFrame.copyTo(&fDecodingFrame)) {
+ SkCodecPrintf("Failed to allocate pixels for frame\n");
+ return this->finish();
+ }
+ options.fPriorFrame = fDecodingFrame.fIndex;
+ } else if (validPriorFrame(fRestoreFrame)) {
+ if (!is_restore_previous(frameInfo.fDisposalMethod)) {
+ using std::swap;
+ swap(fDecodingFrame, fRestoreFrame);
+ } else if (!fRestoreFrame.copyTo(&fDecodingFrame)) {
+ SkCodecPrintf("Failed to restore frame\n");
+ return this->finish();
+ }
+ options.fPriorFrame = fDecodingFrame.fIndex;
+ }
+ }
+
+ auto alphaType = kOpaque_SkAlphaType == frameInfo.fAlphaType ?
+ kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ auto info = fDecodeInfo.makeAlphaType(alphaType);
+ SkBitmap* dst = &fDecodingFrame.fBitmap;
+ if (!fDecodingFrame.init(info, Frame::OnInit::kRestoreIfNecessary)) {
+ return this->finish();
+ }
+
+ auto result = fCodec->codec()->getPixels(dst->info(), dst->getPixels(), dst->rowBytes(),
+ &options);
+ if (result != SkCodec::kSuccess) {
+ SkCodecPrintf("error %i, frame %i of %i\n", result, frameToDecode, fFrameCount);
+ return this->finish();
+ }
+
+ fDecodingFrame.fIndex = frameToDecode;
+ fDecodingFrame.fDisposalMethod = frameInfo.fDisposalMethod;
+
+ using std::swap;
+ swap(fDecodingFrame, fDisplayFrame);
+ fDisplayFrame.fBitmap.notifyPixelsChanged();
+
+ if (animationEnded) {
+ return this->finish();
+ } else if (fCodec->getEncodedFormat() == SkEncodedImageFormat::kHEIF) {
+ // HEIF doesn't know the frame duration until after decoding. Update to
+ // the correct value. Note that earlier returns in this method either
+ // return kFinished, or fCurrentFrameDuration. If they return the
+ // latter, it is a frame that was previously decoded, so it has the
+ // updated value.
+ if (fCodec->codec()->getFrameInfo(frameToDecode, &frameInfo)) {
+ fCurrentFrameDuration = frameInfo.fDuration;
+ } else {
+ SkCodecPrintf("Failed to getFrameInfo on second attempt (HEIF)");
+ }
+ }
+ return fCurrentFrameDuration;
+}
+
+void SkAnimatedImage::onDraw(SkCanvas* canvas) {
+ auto image = SkMakeImageFromRasterBitmap(fDisplayFrame.fBitmap,
+ kNever_SkCopyPixelsMode);
+
+ if (fSimple) {
+ canvas->drawImage(image, 0, 0);
+ return;
+ }
+
+ SkRect bounds = this->getBounds();
+ if (fPostProcess) {
+ canvas->saveLayer(&bounds, nullptr);
+ }
+ {
+ SkAutoCanvasRestore acr(canvas, fPostProcess != nullptr);
+ canvas->concat(fMatrix);
+ SkPaint paint;
+ paint.setFilterQuality(kLow_SkFilterQuality);
+ canvas->drawImage(image, 0, 0, &paint);
+ }
+ if (fPostProcess) {
+ canvas->drawPicture(fPostProcess);
+ canvas->restore();
+ }
+}
+
+void SkAnimatedImage::setRepetitionCount(int newCount) {
+ fRepetitionCount = newCount;
+}
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp b/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp
new file mode 100644
index 0000000000..5cb9ccb0c4
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkAndroidCodec.h"
+#include "src/android/SkBitmapRegionCodec.h"
+#include "src/android/SkBitmapRegionDecoderPriv.h"
+#include "src/codec/SkCodecPriv.h"
+
+SkBitmapRegionCodec::SkBitmapRegionCodec(SkAndroidCodec* codec)
+ : INHERITED(codec->getInfo().width(), codec->getInfo().height())
+ , fCodec(codec)
+{}
+
+bool SkBitmapRegionCodec::decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize, SkColorType dstColorType,
+ bool requireUnpremul, sk_sp<SkColorSpace> dstColorSpace) {
+
+ // Fix the input sampleSize if necessary.
+ if (sampleSize < 1) {
+ sampleSize = 1;
+ }
+
+ // The size of the output bitmap is determined by the size of the
+ // requested subset, not by the size of the intersection of the subset
+ // and the image dimensions.
+ // If inputX is negative, we will need to place decoded pixels into the
+ // output bitmap starting at a left offset. Call this outX.
+ // If outX is non-zero, subsetX must be zero.
+ // If inputY is negative, we will need to place decoded pixels into the
+ // output bitmap starting at a top offset. Call this outY.
+ // If outY is non-zero, subsetY must be zero.
+ int outX;
+ int outY;
+ SkIRect subset = desiredSubset;
+ SubsetType type = adjust_subset_rect(fCodec->getInfo().dimensions(), &subset, &outX, &outY);
+ if (SubsetType::kOutside_SubsetType == type) {
+ return false;
+ }
+
+ // Ask the codec for a scaled subset
+ if (!fCodec->getSupportedSubset(&subset)) {
+ SkCodecPrintf("Error: Could not get subset.\n");
+ return false;
+ }
+ SkISize scaledSize = fCodec->getSampledSubsetDimensions(sampleSize, subset);
+
+ // Create the image info for the decode
+ SkAlphaType dstAlphaType = fCodec->computeOutputAlphaType(requireUnpremul);
+ SkImageInfo decodeInfo =
+ SkImageInfo::Make(scaledSize, dstColorType, dstAlphaType, dstColorSpace);
+
+ // Initialize the destination bitmap
+ int scaledOutX = 0;
+ int scaledOutY = 0;
+ int scaledOutWidth = scaledSize.width();
+ int scaledOutHeight = scaledSize.height();
+ if (SubsetType::kPartiallyInside_SubsetType == type) {
+ scaledOutX = outX / sampleSize;
+ scaledOutY = outY / sampleSize;
+ // We need to be safe here because getSupportedSubset() may have modified the subset.
+ const int extraX = SkTMax(0, desiredSubset.width() - outX - subset.width());
+ const int extraY = SkTMax(0, desiredSubset.height() - outY - subset.height());
+ const int scaledExtraX = extraX / sampleSize;
+ const int scaledExtraY = extraY / sampleSize;
+ scaledOutWidth += scaledOutX + scaledExtraX;
+ scaledOutHeight += scaledOutY + scaledExtraY;
+ }
+ SkImageInfo outInfo = decodeInfo.makeWH(scaledOutWidth, scaledOutHeight);
+ if (kGray_8_SkColorType == dstColorType) {
+ // The legacy implementations of BitmapFactory and BitmapRegionDecoder
+ // used kAlpha8 for grayscale images (before kGray8 existed). While
+ // the codec recognizes kGray8, we need to decode into a kAlpha8
+ // bitmap in order to avoid a behavior change.
+ outInfo = outInfo.makeColorType(kAlpha_8_SkColorType).makeAlphaType(kPremul_SkAlphaType);
+ }
+ bitmap->setInfo(outInfo);
+ if (!bitmap->tryAllocPixels(allocator)) {
+ SkCodecPrintf("Error: Could not allocate pixels.\n");
+ return false;
+ }
+
+ // Zero the bitmap if the region is not completely within the image.
+ // TODO (msarett): Can we make this faster by implementing it to only
+ // zero parts of the image that we won't overwrite with
+ // pixels?
+ SkCodec::ZeroInitialized zeroInit = allocator ? allocator->zeroInit() :
+ SkCodec::kNo_ZeroInitialized;
+ if (SubsetType::kPartiallyInside_SubsetType == type &&
+ SkCodec::kNo_ZeroInitialized == zeroInit) {
+ void* pixels = bitmap->getPixels();
+ size_t bytes = outInfo.computeByteSize(bitmap->rowBytes());
+ memset(pixels, 0, bytes);
+ }
+
+ // Decode into the destination bitmap
+ SkAndroidCodec::AndroidOptions options;
+ options.fSampleSize = sampleSize;
+ options.fSubset = &subset;
+ options.fZeroInitialized = zeroInit;
+ void* dst = bitmap->getAddr(scaledOutX, scaledOutY);
+
+ SkCodec::Result result = fCodec->getAndroidPixels(decodeInfo, dst, bitmap->rowBytes(),
+ &options);
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ SkCodecPrintf("Error: Could not get pixels with message \"%s\".\n",
+ SkCodec::ResultToString(result));
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionCodec.h b/gfx/skia/skia/src/android/SkBitmapRegionCodec.h
new file mode 100644
index 0000000000..21859514af
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionCodec.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapRegionCodec_DEFINED
+#define SkBitmapRegionCodec_DEFINED
+
+#include "include/android/SkBitmapRegionDecoder.h"
+#include "include/codec/SkAndroidCodec.h"
+#include "include/core/SkBitmap.h"
+
+/*
+ * This class implements SkBitmapRegionDecoder using an SkAndroidCodec.
+ */
+class SkBitmapRegionCodec : public SkBitmapRegionDecoder {
+public:
+
+ /*
+ * Takes ownership of pointer to codec
+ */
+ SkBitmapRegionCodec(SkAndroidCodec* codec);
+
+ bool decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize,
+ SkColorType colorType, bool requireUnpremul,
+ sk_sp<SkColorSpace> prefColorSpace) override;
+
+ SkEncodedImageFormat getEncodedFormat() override { return fCodec->getEncodedFormat(); }
+
+ SkColorType computeOutputColorType(SkColorType requestedColorType) override {
+ return fCodec->computeOutputColorType(requestedColorType);
+ }
+
+ sk_sp<SkColorSpace> computeOutputColorSpace(SkColorType outputColorType,
+ sk_sp<SkColorSpace> prefColorSpace = nullptr) override {
+ return fCodec->computeOutputColorSpace(outputColorType, prefColorSpace);
+ }
+
+private:
+
+ std::unique_ptr<SkAndroidCodec> fCodec;
+
+ typedef SkBitmapRegionDecoder INHERITED;
+
+};
+#endif // SkBitmapRegionCodec_DEFINED
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp b/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp
new file mode 100644
index 0000000000..328daa3bc9
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/android/SkBitmapRegionDecoder.h"
+#include "include/codec/SkAndroidCodec.h"
+#include "include/codec/SkCodec.h"
+#include "src/android/SkBitmapRegionCodec.h"
+#include "src/codec/SkCodecPriv.h"
+
+SkBitmapRegionDecoder* SkBitmapRegionDecoder::Create(
+ sk_sp<SkData> data, Strategy strategy) {
+ return SkBitmapRegionDecoder::Create(new SkMemoryStream(data),
+ strategy);
+}
+
+SkBitmapRegionDecoder* SkBitmapRegionDecoder::Create(
+ SkStreamRewindable* stream, Strategy strategy) {
+ std::unique_ptr<SkStreamRewindable> streamDeleter(stream);
+ switch (strategy) {
+ case kAndroidCodec_Strategy: {
+ auto codec = SkAndroidCodec::MakeFromStream(std::move(streamDeleter));
+ if (nullptr == codec) {
+ SkCodecPrintf("Error: Failed to create codec.\n");
+ return nullptr;
+ }
+
+ switch ((SkEncodedImageFormat)codec->getEncodedFormat()) {
+ case SkEncodedImageFormat::kJPEG:
+ case SkEncodedImageFormat::kPNG:
+ case SkEncodedImageFormat::kWEBP:
+ case SkEncodedImageFormat::kHEIF:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return new SkBitmapRegionCodec(codec.release());
+ }
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h b/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h
new file mode 100644
index 0000000000..5f1613a1a6
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapRegionDecoderPriv_DEFINED
+#define SkBitmapRegionDecoderPriv_DEFINED
+
+#include "include/core/SkRect.h"
+
+enum SubsetType {
+ kFullyInside_SubsetType,
+ kPartiallyInside_SubsetType,
+ kOutside_SubsetType,
+};
+
+/*
+ * Corrects image subset offsets and dimensions in order to perform a valid decode.
+ * Also indicates if the image subset should be placed at an offset within the
+ * output bitmap.
+ *
+ * Values of output variables are undefined if the SubsetType is kInvalid.
+ *
+ * @param imageDims Original image dimensions.
+ * @param subset As input, the subset that the client requested.
+ * As output, the image subset that we will decode.
+ * @param outX The left offset of the image subset within the output bitmap.
+ * @param outY The top offset of the image subset within the output bitmap.
+ *
+ * @return An indication of how the subset is contained in the image.
+ * If the return value is kInvalid, values of output variables are undefined.
+ */
+inline SubsetType adjust_subset_rect(const SkISize& imageDims, SkIRect* subset, int* outX,
+ int* outY) {
+ // These must be at least zero, we can't start decoding the image at a negative coordinate.
+ int left = SkTMax(0, subset->fLeft);
+ int top = SkTMax(0, subset->fTop);
+
+ // If input offsets are less than zero, we decode to an offset location in the output bitmap.
+ *outX = left - subset->fLeft;
+ *outY = top - subset->fTop;
+
+ // Make sure we don't decode pixels past the edge of the image or past the edge of the subset.
+ int width = SkTMin(imageDims.width() - left, subset->width() - *outX);
+ int height = SkTMin(imageDims.height() - top, subset->height() - *outY);
+ if (width <= 0 || height <= 0) {
+ return SubsetType::kOutside_SubsetType;
+ }
+
+ subset->setXYWH(left, top, width, height);
+ if ((*outX != 0) || (*outY != 0) || (width != subset->width()) ||
+ (height != subset->height())) {
+ return SubsetType::kPartiallyInside_SubsetType;
+ }
+
+ return SubsetType::kFullyInside_SubsetType;
+}
+
+#endif // SkBitmapRegionDecoderPriv_DEFINED
diff --git a/gfx/skia/skia/src/atlastext/SkAtlasTextContext.cpp b/gfx/skia/skia/src/atlastext/SkAtlasTextContext.cpp
new file mode 100644
index 0000000000..91e7713c12
--- /dev/null
+++ b/gfx/skia/skia/src/atlastext/SkAtlasTextContext.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/atlastext/SkAtlasTextContext.h"
+#include "include/atlastext/SkAtlasTextRenderer.h"
+#include "src/atlastext/SkInternalAtlasTextContext.h"
+
+sk_sp<SkAtlasTextContext> SkAtlasTextContext::Make(sk_sp<SkAtlasTextRenderer> renderer) {
+ return sk_sp<SkAtlasTextContext>(new SkAtlasTextContext(std::move(renderer)));
+}
+
+SkAtlasTextContext::SkAtlasTextContext(sk_sp<SkAtlasTextRenderer> renderer)
+ : fInternalContext(SkInternalAtlasTextContext::Make(std::move(renderer))) {}
diff --git a/gfx/skia/skia/src/atlastext/SkAtlasTextTarget.cpp b/gfx/skia/skia/src/atlastext/SkAtlasTextTarget.cpp
new file mode 100644
index 0000000000..80ef553333
--- /dev/null
+++ b/gfx/skia/skia/src/atlastext/SkAtlasTextTarget.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/atlastext/SkAtlasTextTarget.h"
+
+#include "include/atlastext/SkAtlasTextContext.h"
+#include "include/atlastext/SkAtlasTextFont.h"
+#include "include/atlastext/SkAtlasTextRenderer.h"
+#include "src/atlastext/SkInternalAtlasTextContext.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/ops/GrAtlasTextOp.h"
+#include "src/gpu/text/GrTextContext.h"
+
+static constexpr int kMaxBatchLookBack = 10;
+
+SkAtlasTextTarget::SkAtlasTextTarget(sk_sp<SkAtlasTextContext> context, int width, int height,
+ void* handle)
+ : fHandle(handle)
+ , fContext(std::move(context))
+ , fWidth(width)
+ , fHeight(height)
+ , fMatrixStack(sizeof(SkMatrix), 4)
+ , fSaveCnt(0) {
+ fMatrixStack.push_back();
+ this->accessCTM()->reset();
+}
+
+SkAtlasTextTarget::~SkAtlasTextTarget() { fContext->renderer()->targetDeleted(fHandle); }
+
+int SkAtlasTextTarget::save() {
+ const auto& currCTM = this->ctm();
+ *static_cast<SkMatrix*>(fMatrixStack.push_back()) = currCTM;
+ return fSaveCnt++;
+}
+
+void SkAtlasTextTarget::restore() {
+ if (fSaveCnt) {
+ fMatrixStack.pop_back();
+ fSaveCnt--;
+ }
+}
+
+void SkAtlasTextTarget::restoreToCount(int count) {
+ while (fSaveCnt > count) {
+ this->restore();
+ }
+}
+
+void SkAtlasTextTarget::translate(SkScalar dx, SkScalar dy) {
+ this->accessCTM()->preTranslate(dx, dy);
+}
+
+void SkAtlasTextTarget::scale(SkScalar sx, SkScalar sy) { this->accessCTM()->preScale(sx, sy); }
+
+void SkAtlasTextTarget::rotate(SkScalar degrees) { this->accessCTM()->preRotate(degrees); }
+
+void SkAtlasTextTarget::rotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ this->accessCTM()->preRotate(degrees, px, py);
+}
+
+void SkAtlasTextTarget::skew(SkScalar sx, SkScalar sy) { this->accessCTM()->preSkew(sx, sy); }
+
+void SkAtlasTextTarget::concat(const SkMatrix& matrix) { this->accessCTM()->preConcat(matrix); }
+
+//////////////////////////////////////////////////////////////////////////////
+
+static const GrColorInfo kColorInfo(GrColorType::kRGBA_8888, kPremul_SkAlphaType, nullptr);
+static const SkSurfaceProps kProps(
+ SkSurfaceProps::kUseDistanceFieldFonts_Flag, kUnknown_SkPixelGeometry);
+
+//////////////////////////////////////////////////////////////////////////////
+
+class SkInternalAtlasTextTarget : public GrTextTarget, public SkAtlasTextTarget {
+public:
+ SkInternalAtlasTextTarget(sk_sp<SkAtlasTextContext> context, int width, int height,
+ void* handle)
+ : GrTextTarget(width, height, kColorInfo)
+ , SkAtlasTextTarget(std::move(context), width, height, handle)
+ , fGlyphPainter(kProps, kColorInfo) {
+ fOpMemoryPool = fContext->internal().grContext()->priv().refOpMemoryPool();
+ }
+
+ ~SkInternalAtlasTextTarget() override {
+ this->deleteOps();
+ }
+
+ /** GrTextTarget overrides */
+
+ void addDrawOp(const GrClip&, std::unique_ptr<GrAtlasTextOp> op) override;
+
+ void drawShape(const GrClip&, const SkPaint&, const SkMatrix& viewMatrix,
+ const GrShape&) override {
+ SkDebugf("Path glyph??");
+ }
+
+ void makeGrPaint(GrMaskFormat, const SkPaint& skPaint, const SkMatrix&,
+ GrPaint* grPaint) override {
+ grPaint->setColor4f(skPaint.getColor4f().premul());
+ }
+
+ GrContext* getContext() override {
+ return this->context()->internal().grContext();
+ }
+
+ SkGlyphRunListPainter* glyphPainter() override {
+ return &fGlyphPainter;
+ }
+
+ /** SkAtlasTextTarget overrides */
+
+ void drawText(const SkGlyphID[], const SkPoint[], int glyphCnt, uint32_t color,
+ const SkAtlasTextFont&) override;
+ void flush() override;
+
+private:
+ void deleteOps();
+
+ uint32_t fColor;
+ using SkAtlasTextTarget::fWidth;
+ using SkAtlasTextTarget::fHeight;
+ SkTArray<std::unique_ptr<GrAtlasTextOp>, true> fOps;
+ sk_sp<GrOpMemoryPool> fOpMemoryPool;
+ SkGlyphRunListPainter fGlyphPainter;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkAtlasTextTarget> SkAtlasTextTarget::Make(sk_sp<SkAtlasTextContext> context,
+ int width, int height, void* handle) {
+ return std::unique_ptr<SkAtlasTextTarget>(
+ new SkInternalAtlasTextTarget(std::move(context), width, height, handle));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkInternalAtlasTextTarget::drawText(const SkGlyphID glyphs[], const SkPoint positions[],
+ int glyphCnt, uint32_t color,
+ const SkAtlasTextFont& font) {
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ // The atlas text context does munging of the paint color. We store the client's color here
+ // and then overwrite the generated op's color when addDrawOp() is called.
+ fColor = color;
+
+ SkSurfaceProps props(SkSurfaceProps::kUseDistanceFieldFonts_Flag, kUnknown_SkPixelGeometry);
+ auto grContext = this->context()->internal().grContext();
+ auto atlasTextContext = grContext->priv().drawingManager()->getTextContext();
+ SkGlyphRunBuilder builder;
+ builder.drawGlyphsWithPositions(paint, font.makeFont(),
+ SkSpan<const SkGlyphID>{glyphs, SkTo<size_t>(glyphCnt)},
+ positions);
+ auto glyphRunList = builder.useGlyphRunList();
+ if (!glyphRunList.empty()) {
+ atlasTextContext->drawGlyphRunList(grContext, this, GrNoClip(), this->ctm(), props,
+ glyphRunList);
+ }
+}
+
+void SkInternalAtlasTextTarget::addDrawOp(const GrClip& clip, std::unique_ptr<GrAtlasTextOp> op) {
+ SkASSERT(clip.quickContains(SkRect::MakeIWH(fWidth, fHeight)));
+ // The SkAtlasTextRenderer currently only handles grayscale SDF glyphs.
+ if (op->maskType() != GrAtlasTextOp::kGrayscaleDistanceField_MaskType) {
+ return;
+ }
+ const GrCaps& caps = *this->context()->internal().grContext()->priv().caps();
+ op->finalizeForTextTarget(fColor, caps);
+ int n = SkTMin(kMaxBatchLookBack, fOps.count());
+ for (int i = 0; i < n; ++i) {
+ GrAtlasTextOp* other = fOps.fromBack(i).get();
+ if (other->combineIfPossible(op.get(), caps) == GrOp::CombineResult::kMerged) {
+ fOpMemoryPool->release(std::move(op));
+ return;
+ }
+ if (GrRectsOverlap(op->bounds(), other->bounds())) {
+ break;
+ }
+ }
+ fOps.emplace_back(std::move(op));
+}
+
+void SkInternalAtlasTextTarget::deleteOps() {
+ for (int i = 0; i < fOps.count(); ++i) {
+ if (fOps[i]) {
+ fOpMemoryPool->release(std::move(fOps[i]));
+ }
+ }
+ fOps.reset();
+}
+
+void SkInternalAtlasTextTarget::flush() {
+ for (int i = 0; i < fOps.count(); ++i) {
+ fOps[i]->executeForTextTarget(this);
+ }
+ this->context()->internal().flush();
+ this->deleteOps();
+}
+
+void GrAtlasTextOp::finalizeForTextTarget(uint32_t color, const GrCaps& caps) {
+ // TODO4F: Odd handling of client colors among AtlasTextTarget and AtlasTextRenderer
+ SkPMColor4f color4f = SkPMColor4f::FromBytes_RGBA(color);
+ for (int i = 0; i < fGeoCount; ++i) {
+ fGeoData[i].fColor = color4f;
+ }
+ // Atlas text doesn't use MSAA, so no need to handle mixed samples.
+ // Also, no need to support normalized F16 with manual clamp?
+ this->finalize(caps, nullptr /* applied clip */, false /* mixed samples */, GrClampType::kAuto);
+}
+
+void GrAtlasTextOp::executeForTextTarget(SkAtlasTextTarget* target) {
+ FlushInfo flushInfo;
+ SkExclusiveStrikePtr autoGlyphCache;
+ auto& context = target->context()->internal();
+ auto glyphCache = context.grContext()->priv().getGrStrikeCache();
+ auto atlasManager = context.grContext()->priv().getAtlasManager();
+ auto resourceProvider = context.grContext()->priv().resourceProvider();
+
+ unsigned int numProxies;
+ if (!atlasManager->getProxies(kA8_GrMaskFormat, &numProxies)) {
+ return;
+ }
+
+ for (int i = 0; i < fGeoCount; ++i) {
+ // TODO4F: Preserve float colors
+ GrTextBlob::VertexRegenerator regenerator(
+ resourceProvider, fGeoData[i].fBlob, fGeoData[i].fRun, fGeoData[i].fSubRun,
+ fGeoData[i].fViewMatrix, fGeoData[i].fX, fGeoData[i].fY,
+ fGeoData[i].fColor.toBytes_RGBA(), &context, glyphCache, atlasManager,
+ &autoGlyphCache);
+ bool done = false;
+ while (!done) {
+ GrTextBlob::VertexRegenerator::Result result;
+ if (!regenerator.regenerate(&result)) {
+ break;
+ }
+ done = result.fFinished;
+
+ context.recordDraw(result.fFirstVertex, result.fGlyphsRegenerated,
+ fGeoData[i].fViewMatrix, target->handle());
+ if (!result.fFinished) {
+ // Make space in the atlas so we can continue generating vertices.
+ context.flush();
+ }
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.cpp b/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.cpp
new file mode 100644
index 0000000000..725be72e6d
--- /dev/null
+++ b/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/atlastext/SkAtlasTextContext.h"
+#include "include/atlastext/SkAtlasTextRenderer.h"
+#include "include/gpu/GrContext.h"
+#include "src/atlastext/SkInternalAtlasTextContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/text/GrStrikeCache.h"
+
+SkAtlasTextRenderer* SkGetAtlasTextRendererFromInternalContext(
+ class SkInternalAtlasTextContext& internal) {
+ return internal.renderer();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkInternalAtlasTextContext> SkInternalAtlasTextContext::Make(
+ sk_sp<SkAtlasTextRenderer> renderer) {
+ return std::unique_ptr<SkInternalAtlasTextContext>(
+ new SkInternalAtlasTextContext(std::move(renderer)));
+}
+
+SkInternalAtlasTextContext::SkInternalAtlasTextContext(sk_sp<SkAtlasTextRenderer> renderer)
+ : fRenderer(std::move(renderer)) {
+ GrContextOptions options;
+ options.fAllowMultipleGlyphCacheTextures = GrContextOptions::Enable::kNo;
+ options.fMinDistanceFieldFontSize = 0.f;
+ options.fGlyphsAsPathsFontSize = SK_ScalarInfinity;
+ options.fDistanceFieldGlyphVerticesAlwaysHaveW = GrContextOptions::Enable::kYes;
+ fGrContext = GrContext::MakeMock(nullptr, options);
+}
+
+SkInternalAtlasTextContext::~SkInternalAtlasTextContext() {
+ if (fDistanceFieldAtlas.fProxy) {
+#ifdef SK_DEBUG
+ auto atlasManager = fGrContext->priv().getAtlasManager();
+ if (atlasManager) {
+ unsigned int numProxies;
+ atlasManager->getProxies(kA8_GrMaskFormat, &numProxies);
+ SkASSERT(1 == numProxies);
+ }
+#endif
+ fRenderer->deleteTexture(fDistanceFieldAtlas.fTextureHandle);
+ }
+}
+
+GrStrikeCache* SkInternalAtlasTextContext::glyphCache() {
+ return fGrContext->priv().getGrStrikeCache();
+}
+
+GrTextBlobCache* SkInternalAtlasTextContext::textBlobCache() {
+ return fGrContext->priv().getTextBlobCache();
+}
+
+GrDeferredUploadToken SkInternalAtlasTextContext::addInlineUpload(
+ GrDeferredTextureUploadFn&& upload) {
+ auto token = fTokenTracker.nextDrawToken();
+ fInlineUploads.append(&fArena, InlineUpload{std::move(upload), token});
+ return token;
+}
+
+GrDeferredUploadToken SkInternalAtlasTextContext::addASAPUpload(
+ GrDeferredTextureUploadFn&& upload) {
+ fASAPUploads.append(&fArena, std::move(upload));
+ return fTokenTracker.nextTokenToFlush();
+}
+
+void SkInternalAtlasTextContext::recordDraw(const void* srcVertexData, int glyphCnt,
+ const SkMatrix& matrix, void* targetHandle) {
+ auto vertexDataSize = sizeof(SkAtlasTextRenderer::SDFVertex) * 4 * glyphCnt;
+ auto vertexData = fArena.makeArrayDefault<char>(vertexDataSize);
+ memcpy(vertexData, srcVertexData, vertexDataSize);
+ for (int i = 0; i < 4 * glyphCnt; ++i) {
+ auto* vertex = reinterpret_cast<SkAtlasTextRenderer::SDFVertex*>(vertexData) + i;
+ // GrTextContext encodes a texture index into the lower bit of each texture coord.
+ // This isn't expected by SkAtlasTextRenderer subclasses.
+ vertex->fTextureCoordX /= 2;
+ vertex->fTextureCoordY /= 2;
+ matrix.mapHomogeneousPoints(&vertex->fPosition, &vertex->fPosition, 1);
+ }
+ fDraws.append(&fArena,
+ Draw{glyphCnt, fTokenTracker.issueDrawToken(), targetHandle, vertexData});
+}
+
+void SkInternalAtlasTextContext::flush() {
+ auto* atlasManager = fGrContext->priv().getAtlasManager();
+ if (!fDistanceFieldAtlas.fProxy) {
+ unsigned int numProxies;
+ fDistanceFieldAtlas.fProxy = atlasManager->getProxies(kA8_GrMaskFormat, &numProxies)->get();
+ SkASSERT(1 == numProxies);
+ fDistanceFieldAtlas.fTextureHandle =
+ fRenderer->createTexture(SkAtlasTextRenderer::AtlasFormat::kA8,
+ fDistanceFieldAtlas.fProxy->width(),
+ fDistanceFieldAtlas.fProxy->height());
+ }
+ GrDeferredTextureUploadWritePixelsFn writePixelsFn =
+ [this](GrTextureProxy* proxy, int left, int top, int width, int height,
+ GrColorType colorType, const void* data, size_t rowBytes) -> bool {
+ SkASSERT(GrColorType::kAlpha_8 == colorType);
+ SkASSERT(proxy == this->fDistanceFieldAtlas.fProxy);
+ void* handle = fDistanceFieldAtlas.fTextureHandle;
+ this->fRenderer->setTextureData(handle, data, left, top, width, height, rowBytes);
+ return true;
+ };
+ for (const auto& upload : fASAPUploads) {
+ upload(writePixelsFn);
+ }
+ auto inlineUpload = fInlineUploads.begin();
+ for (const auto& draw : fDraws) {
+ while (inlineUpload != fInlineUploads.end() && inlineUpload->fToken == draw.fToken) {
+ inlineUpload->fUpload(writePixelsFn);
+ ++inlineUpload;
+ }
+ auto vertices = reinterpret_cast<const SkAtlasTextRenderer::SDFVertex*>(draw.fVertexData);
+ fRenderer->drawSDFGlyphs(draw.fTargetHandle, fDistanceFieldAtlas.fTextureHandle, vertices,
+ draw.fGlyphCnt);
+ fTokenTracker.flushToken();
+ }
+ fASAPUploads.reset();
+ fInlineUploads.reset();
+ fDraws.reset();
+ fArena.reset();
+}
diff --git a/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.h b/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.h
new file mode 100644
index 0000000000..d0f7f23d2f
--- /dev/null
+++ b/gfx/skia/skia/src/atlastext/SkInternalAtlasTextContext.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkInternalAtlasTextContext_DEFINED
+#define SkInternalAtlasTextContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkArenaAllocList.h"
+#include "src/gpu/GrDeferredUpload.h"
+
+class GrContext;
+class GrStrikeCache;
+class GrTextBlobCache;
+
+class SkAtlasTextRenderer;
+class SkMatrix;
+
+/**
+ * The implementation of SkAtlasTextContext. This exists to hide the details from the public class.
+ * and to be able to use other private types.
+ */
+class SkInternalAtlasTextContext : public GrDeferredUploadTarget {
+public:
+ static std::unique_ptr<SkInternalAtlasTextContext> Make(sk_sp<SkAtlasTextRenderer>);
+
+ ~SkInternalAtlasTextContext() override;
+
+ SkAtlasTextRenderer* renderer() const { return fRenderer.get(); }
+
+ GrContext* grContext() const { return fGrContext.get(); }
+ GrStrikeCache* glyphCache();
+ GrTextBlobCache* textBlobCache();
+
+ const GrTokenTracker* tokenTracker() final { return &fTokenTracker; }
+ GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
+ GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
+
+ void recordDraw(const void* vertexData, int glyphCnt, const SkMatrix&, void* targetHandle);
+
+ void flush();
+
+private:
+ class DeferredUploader;
+ SkInternalAtlasTextContext() = delete;
+ SkInternalAtlasTextContext(const SkInternalAtlasTextContext&) = delete;
+ SkInternalAtlasTextContext& operator=(const SkInternalAtlasTextContext&) = delete;
+
+ SkInternalAtlasTextContext(sk_sp<SkAtlasTextRenderer>);
+
+ sk_sp<SkAtlasTextRenderer> fRenderer;
+
+ struct AtlasTexture {
+ void* fTextureHandle = nullptr;
+ GrTextureProxy* fProxy = nullptr;
+ };
+
+ struct Draw {
+ int fGlyphCnt;
+ GrDeferredUploadToken fToken;
+ void* fTargetHandle;
+ const void* fVertexData;
+ };
+
+ struct InlineUpload {
+ GrDeferredTextureUploadFn fUpload;
+ GrDeferredUploadToken fToken;
+ };
+
+ GrTokenTracker fTokenTracker;
+ SkArenaAllocList<InlineUpload> fInlineUploads;
+ SkArenaAllocList<Draw> fDraws;
+ SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
+ SkArenaAlloc fArena{1024 * 40};
+ sk_sp<GrContext> fGrContext;
+ AtlasTexture fDistanceFieldAtlas;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/c/sk_c_from_to.h b/gfx/skia/skia/src/c/sk_c_from_to.h
new file mode 100644
index 0000000000..19fda37a26
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_c_from_to.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+static bool find_sk(CType from, SKType* to) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(CTypeSkTypeMap); ++i) {
+ if (CTypeSkTypeMap[i].fC == from) {
+ if (to) {
+ *to = CTypeSkTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool find_c(SKType from, CType* to) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(CTypeSkTypeMap); ++i) {
+ if (CTypeSkTypeMap[i].fSK == from) {
+ if (to) {
+ *to = CTypeSkTypeMap[i].fC;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+#undef CType
+#undef SKType
+#undef CTypeSkTypeMap
diff --git a/gfx/skia/skia/src/c/sk_effects.cpp b/gfx/skia/skia/src/c/sk_effects.cpp
new file mode 100644
index 0000000000..7e226cd756
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_effects.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "src/c/sk_types_priv.h"
+
+static void from_c_matrix(const sk_matrix_t* cmatrix, SkMatrix* matrix) {
+ matrix->setAll(cmatrix->mat[0], cmatrix->mat[1], cmatrix->mat[2],
+ cmatrix->mat[3], cmatrix->mat[4], cmatrix->mat[5],
+ cmatrix->mat[6], cmatrix->mat[7], cmatrix->mat[8]);
+}
+
+#include "include/c/sk_shader.h"
+#include "include/effects/SkGradientShader.h"
+
+const struct {
+ sk_shader_tilemode_t fC;
+ SkTileMode fSK;
+} gTileModeMap[] = {
+ { CLAMP_SK_SHADER_TILEMODE, SkTileMode::kClamp },
+ { REPEAT_SK_SHADER_TILEMODE, SkTileMode::kRepeat },
+ { MIRROR_SK_SHADER_TILEMODE, SkTileMode::kMirror },
+};
+
+static bool from_c_tilemode(sk_shader_tilemode_t cMode, SkTileMode* skMode) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gTileModeMap); ++i) {
+ if (cMode == gTileModeMap[i].fC) {
+ if (skMode) {
+ *skMode = gTileModeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+void sk_shader_ref(sk_shader_t* cshader) {
+ SkSafeRef(AsShader(cshader));
+}
+
+void sk_shader_unref(sk_shader_t* cshader) {
+ SkSafeUnref(AsShader(cshader));
+}
+
+sk_shader_t* sk_shader_new_linear_gradient(const sk_point_t pts[2],
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkTileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return nullptr;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ return (sk_shader_t*)SkGradientShader::MakeLinear(reinterpret_cast<const SkPoint*>(pts),
+ reinterpret_cast<const SkColor*>(colors),
+ colorPos, colorCount,
+ mode, 0, &matrix).release();
+}
+
+static const SkPoint& to_skpoint(const sk_point_t& p) {
+ return reinterpret_cast<const SkPoint&>(p);
+}
+
+sk_shader_t* sk_shader_new_radial_gradient(const sk_point_t* ccenter,
+ float radius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkTileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return nullptr;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ SkPoint center = to_skpoint(*ccenter);
+ return (sk_shader_t*)SkGradientShader::MakeRadial(center, (SkScalar)radius,
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, mode, 0, &matrix).release();
+}
+
+sk_shader_t* sk_shader_new_sweep_gradient(const sk_point_t* ccenter,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ const sk_matrix_t* cmatrix) {
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ return (sk_shader_t*)SkGradientShader::MakeSweep((SkScalar)(ccenter->x),
+ (SkScalar)(ccenter->y),
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, 0, &matrix).release();
+}
+
+sk_shader_t* sk_shader_new_two_point_conical_gradient(const sk_point_t* start,
+ float startRadius,
+ const sk_point_t* end,
+ float endRadius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkTileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return nullptr;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ SkPoint skstart = to_skpoint(*start);
+ SkPoint skend = to_skpoint(*end);
+ return (sk_shader_t*)SkGradientShader::MakeTwoPointConical(skstart, (SkScalar)startRadius,
+ skend, (SkScalar)endRadius,
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, mode, 0, &matrix).release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/c/sk_maskfilter.h"
+#include "include/core/SkMaskFilter.h"
+
+const struct {
+ sk_blurstyle_t fC;
+ SkBlurStyle fSk;
+} gBlurStylePairs[] = {
+ { NORMAL_SK_BLUR_STYLE, kNormal_SkBlurStyle },
+ { SOLID_SK_BLUR_STYLE, kSolid_SkBlurStyle },
+ { OUTER_SK_BLUR_STYLE, kOuter_SkBlurStyle },
+ { INNER_SK_BLUR_STYLE, kInner_SkBlurStyle },
+};
+
+static bool find_blurstyle(sk_blurstyle_t csrc, SkBlurStyle* dst) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gBlurStylePairs); ++i) {
+ if (gBlurStylePairs[i].fC == csrc) {
+ if (dst) {
+ *dst = gBlurStylePairs[i].fSk;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+void sk_maskfilter_ref(sk_maskfilter_t* cfilter) {
+ SkSafeRef(AsMaskFilter(cfilter));
+}
+
+void sk_maskfilter_unref(sk_maskfilter_t* cfilter) {
+ SkSafeUnref(AsMaskFilter(cfilter));
+}
+
+sk_maskfilter_t* sk_maskfilter_new_blur(sk_blurstyle_t cstyle, float sigma) {
+ SkBlurStyle style;
+ if (!find_blurstyle(cstyle, &style)) {
+ return nullptr;
+ }
+ return ToMaskFilter(SkMaskFilter::MakeBlur(style, sigma).release());
+}
diff --git a/gfx/skia/skia/src/c/sk_imageinfo.cpp b/gfx/skia/skia/src/c/sk_imageinfo.cpp
new file mode 100644
index 0000000000..160c7f9ab7
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_imageinfo.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+
+#include "include/c/sk_colorspace.h"
+#include "include/c/sk_imageinfo.h"
+
+const struct {
+ sk_colortype_t fC;
+ SkColorType fSK;
+} gColorTypeMap[] = {
+ { UNKNOWN_SK_COLORTYPE, kUnknown_SkColorType },
+ { RGBA_8888_SK_COLORTYPE, kRGBA_8888_SkColorType },
+ { BGRA_8888_SK_COLORTYPE, kBGRA_8888_SkColorType },
+ { ALPHA_8_SK_COLORTYPE, kAlpha_8_SkColorType },
+ { GRAY_8_SK_COLORTYPE, kGray_8_SkColorType },
+ { RGBA_F16_SK_COLORTYPE, kRGBA_F16_SkColorType },
+ { RGBA_F32_SK_COLORTYPE, kRGBA_F32_SkColorType },
+};
+
+const struct {
+ sk_alphatype_t fC;
+ SkAlphaType fSK;
+} gAlphaTypeMap[] = {
+ { OPAQUE_SK_ALPHATYPE, kOpaque_SkAlphaType },
+ { PREMUL_SK_ALPHATYPE, kPremul_SkAlphaType },
+ { UNPREMUL_SK_ALPHATYPE, kUnpremul_SkAlphaType },
+};
+
+static bool from_c_colortype(sk_colortype_t cCT, SkColorType* skCT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gColorTypeMap); ++i) {
+ if (gColorTypeMap[i].fC == cCT) {
+ if (skCT) {
+ *skCT = gColorTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool to_c_colortype(SkColorType skCT, sk_colortype_t* cCT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gColorTypeMap); ++i) {
+ if (gColorTypeMap[i].fSK == skCT) {
+ if (cCT) {
+ *cCT = gColorTypeMap[i].fC;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool from_c_alphatype(sk_alphatype_t cAT, SkAlphaType* skAT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gAlphaTypeMap); ++i) {
+ if (gAlphaTypeMap[i].fC == cAT) {
+ if (skAT) {
+ *skAT = gAlphaTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool to_c_alphatype(SkAlphaType skAT, sk_alphatype_t* cAT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gAlphaTypeMap); ++i) {
+ if (gAlphaTypeMap[i].fSK == skAT) {
+ if (cAT) {
+ *cAT = gAlphaTypeMap[i].fC;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkImageInfo* ToImageInfo(const sk_imageinfo_t* cinfo) {
+ return reinterpret_cast<const SkImageInfo*>(cinfo);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_imageinfo_t* sk_imageinfo_new(int w, int h, sk_colortype_t cct, sk_alphatype_t cat,
+ sk_colorspace_t* ccs) {
+ SkColorType ct;
+ SkAlphaType at;
+ if (!from_c_colortype(cct, &ct) || !from_c_alphatype(cat, &at)) {
+ return nullptr;
+ }
+ SkColorSpace* cs = (SkColorSpace*)ccs;
+
+ SkImageInfo* info = new SkImageInfo(SkImageInfo::Make(w, h, ct, at, sk_ref_sp(cs)));
+ return reinterpret_cast<sk_imageinfo_t*>(info);
+}
+
+void sk_imageinfo_delete(sk_imageinfo_t* cinfo) {
+ delete ToImageInfo(cinfo);
+}
+
+int32_t sk_imageinfo_get_width(const sk_imageinfo_t* cinfo) {
+ return ToImageInfo(cinfo)->width();
+}
+
+int32_t sk_imageinfo_get_height(const sk_imageinfo_t* cinfo) {
+ return ToImageInfo(cinfo)->height();
+}
+
+sk_colortype_t sk_imageinfo_get_colortype(const sk_imageinfo_t* cinfo) {
+ sk_colortype_t ct;
+ return to_c_colortype(ToImageInfo(cinfo)->colorType(), &ct) ? ct : UNKNOWN_SK_COLORTYPE;
+}
+
+sk_alphatype_t sk_imageinfo_get_alphatype(const sk_imageinfo_t* cinfo) {
+ sk_alphatype_t at;
+ // odd that we return premul on failure...
+ return to_c_alphatype(ToImageInfo(cinfo)->alphaType(), &at) ? at : PREMUL_SK_ALPHATYPE;
+}
+
+sk_colorspace_t* sk_imageinfo_get_colorspace(const sk_imageinfo_t* cinfo) {
+ return reinterpret_cast<sk_colorspace_t*>(ToImageInfo(cinfo)->colorSpace());
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_colorspace_t* sk_colorspace_new_srgb() {
+ return reinterpret_cast<sk_colorspace_t*>(SkColorSpace::MakeSRGB().release());
+}
+
+void sk_colorspace_ref(sk_colorspace_t* cs) {
+ SkSafeRef(reinterpret_cast<SkColorSpace*>(cs));
+}
+
+void sk_colorspace_unref(sk_colorspace_t* cs) {
+ SkSafeUnref(reinterpret_cast<SkColorSpace*>(cs));
+}
+
diff --git a/gfx/skia/skia/src/c/sk_paint.cpp b/gfx/skia/skia/src/c/sk_paint.cpp
new file mode 100644
index 0000000000..0236270b15
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_paint.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+
+#include "include/c/sk_paint.h"
+#include "src/c/sk_types_priv.h"
+
+#define MAKE_FROM_TO_NAME(FROM) g_ ## FROM ## _map
+
+const struct {
+ sk_stroke_cap_t fC;
+ SkPaint::Cap fSK;
+} MAKE_FROM_TO_NAME(sk_stroke_cap_t)[] = {
+ { BUTT_SK_STROKE_CAP, SkPaint::kButt_Cap },
+ { ROUND_SK_STROKE_CAP, SkPaint::kRound_Cap },
+ { SQUARE_SK_STROKE_CAP, SkPaint::kSquare_Cap },
+};
+
+const struct {
+ sk_stroke_join_t fC;
+ SkPaint::Join fSK;
+} MAKE_FROM_TO_NAME(sk_stroke_join_t)[] = {
+ { MITER_SK_STROKE_JOIN, SkPaint::kMiter_Join },
+ { ROUND_SK_STROKE_JOIN, SkPaint::kRound_Join },
+ { BEVEL_SK_STROKE_JOIN, SkPaint::kBevel_Join },
+};
+
+#define CType sk_stroke_cap_t
+#define SKType SkPaint::Cap
+#define CTypeSkTypeMap MAKE_FROM_TO_NAME(sk_stroke_cap_t)
+#include "src/c/sk_c_from_to.h"
+
+#define CType sk_stroke_join_t
+#define SKType SkPaint::Join
+#define CTypeSkTypeMap MAKE_FROM_TO_NAME(sk_stroke_join_t)
+#include "src/c/sk_c_from_to.h"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_paint_t* sk_paint_new() { return (sk_paint_t*)new SkPaint; }
+
+void sk_paint_delete(sk_paint_t* cpaint) { delete AsPaint(cpaint); }
+
+bool sk_paint_is_antialias(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).isAntiAlias();
+}
+
+void sk_paint_set_antialias(sk_paint_t* cpaint, bool aa) {
+ AsPaint(cpaint)->setAntiAlias(aa);
+}
+
+sk_color_t sk_paint_get_color(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getColor();
+}
+
+void sk_paint_set_color(sk_paint_t* cpaint, sk_color_t c) {
+ AsPaint(cpaint)->setColor(c);
+}
+
+void sk_paint_set_shader(sk_paint_t* cpaint, sk_shader_t* cshader) {
+ AsPaint(cpaint)->setShader(sk_ref_sp(AsShader(cshader)));
+}
+
+void sk_paint_set_maskfilter(sk_paint_t* cpaint, sk_maskfilter_t* cfilter) {
+ AsPaint(cpaint)->setMaskFilter(sk_ref_sp(AsMaskFilter(cfilter)));
+}
+
+bool sk_paint_is_stroke(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStyle() != SkPaint::kFill_Style;
+}
+
+void sk_paint_set_stroke(sk_paint_t* cpaint, bool doStroke) {
+ AsPaint(cpaint)->setStyle(doStroke ? SkPaint::kStroke_Style : SkPaint::kFill_Style);
+}
+
+float sk_paint_get_stroke_width(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStrokeWidth();
+}
+
+void sk_paint_set_stroke_width(sk_paint_t* cpaint, float width) {
+ AsPaint(cpaint)->setStrokeWidth(width);
+}
+
+float sk_paint_get_stroke_miter(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStrokeMiter();
+}
+
+void sk_paint_set_stroke_miter(sk_paint_t* cpaint, float miter) {
+ AsPaint(cpaint)->setStrokeMiter(miter);
+}
+
+sk_stroke_cap_t sk_paint_get_stroke_cap(const sk_paint_t* cpaint) {
+ sk_stroke_cap_t ccap;
+ if (find_c(AsPaint(*cpaint).getStrokeCap(), &ccap)) {
+ ccap = BUTT_SK_STROKE_CAP;
+ }
+ return ccap;
+}
+
+void sk_paint_set_stroke_cap(sk_paint_t* cpaint, sk_stroke_cap_t ccap) {
+ SkPaint::Cap skcap;
+ if (find_sk(ccap, &skcap)) {
+ AsPaint(cpaint)->setStrokeCap(skcap);
+ } else {
+ // unknown ccap
+ }
+}
+
+sk_stroke_join_t sk_paint_get_stroke_join(const sk_paint_t* cpaint) {
+ sk_stroke_join_t cjoin;
+ if (find_c(AsPaint(*cpaint).getStrokeJoin(), &cjoin)) {
+ cjoin = MITER_SK_STROKE_JOIN;
+ }
+ return cjoin;
+}
+
+void sk_paint_set_stroke_join(sk_paint_t* cpaint, sk_stroke_join_t cjoin) {
+ SkPaint::Join skjoin;
+ if (find_sk(cjoin, &skjoin)) {
+ AsPaint(cpaint)->setStrokeJoin(skjoin);
+ } else {
+ // unknown cjoin
+ }
+}
+
+void sk_paint_set_xfermode_mode(sk_paint_t* paint, sk_xfermode_mode_t mode) {
+ SkASSERT(paint);
+ SkBlendMode skmode;
+ switch (mode) {
+ #define MAP(X, Y) case (X): skmode = (Y); break
+ MAP( CLEAR_SK_XFERMODE_MODE, SkBlendMode::kClear );
+ MAP( SRC_SK_XFERMODE_MODE, SkBlendMode::kSrc );
+ MAP( DST_SK_XFERMODE_MODE, SkBlendMode::kDst );
+ MAP( SRCOVER_SK_XFERMODE_MODE, SkBlendMode::kSrcOver );
+ MAP( DSTOVER_SK_XFERMODE_MODE, SkBlendMode::kDstOver );
+ MAP( SRCIN_SK_XFERMODE_MODE, SkBlendMode::kSrcIn );
+ MAP( DSTIN_SK_XFERMODE_MODE, SkBlendMode::kDstIn );
+ MAP( SRCOUT_SK_XFERMODE_MODE, SkBlendMode::kSrcOut );
+ MAP( DSTOUT_SK_XFERMODE_MODE, SkBlendMode::kDstOut );
+ MAP( SRCATOP_SK_XFERMODE_MODE, SkBlendMode::kSrcATop );
+ MAP( DSTATOP_SK_XFERMODE_MODE, SkBlendMode::kDstATop );
+ MAP( XOR_SK_XFERMODE_MODE, SkBlendMode::kXor );
+ MAP( PLUS_SK_XFERMODE_MODE, SkBlendMode::kPlus );
+ MAP( MODULATE_SK_XFERMODE_MODE, SkBlendMode::kModulate );
+ MAP( SCREEN_SK_XFERMODE_MODE, SkBlendMode::kScreen );
+ MAP( OVERLAY_SK_XFERMODE_MODE, SkBlendMode::kOverlay );
+ MAP( DARKEN_SK_XFERMODE_MODE, SkBlendMode::kDarken );
+ MAP( LIGHTEN_SK_XFERMODE_MODE, SkBlendMode::kLighten );
+ MAP( COLORDODGE_SK_XFERMODE_MODE, SkBlendMode::kColorDodge );
+ MAP( COLORBURN_SK_XFERMODE_MODE, SkBlendMode::kColorBurn );
+ MAP( HARDLIGHT_SK_XFERMODE_MODE, SkBlendMode::kHardLight );
+ MAP( SOFTLIGHT_SK_XFERMODE_MODE, SkBlendMode::kSoftLight );
+ MAP( DIFFERENCE_SK_XFERMODE_MODE, SkBlendMode::kDifference );
+ MAP( EXCLUSION_SK_XFERMODE_MODE, SkBlendMode::kExclusion );
+ MAP( MULTIPLY_SK_XFERMODE_MODE, SkBlendMode::kMultiply );
+ MAP( HUE_SK_XFERMODE_MODE, SkBlendMode::kHue );
+ MAP( SATURATION_SK_XFERMODE_MODE, SkBlendMode::kSaturation );
+ MAP( COLOR_SK_XFERMODE_MODE, SkBlendMode::kColor );
+ MAP( LUMINOSITY_SK_XFERMODE_MODE, SkBlendMode::kLuminosity );
+ #undef MAP
+ default:
+ return;
+ }
+ AsPaint(paint)->setBlendMode(skmode);
+}
diff --git a/gfx/skia/skia/src/c/sk_surface.cpp b/gfx/skia/skia/src/c/sk_surface.cpp
new file mode 100644
index 0000000000..b3f203fc24
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_surface.cpp
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkSurface.h"
+
+#include "include/c/sk_canvas.h"
+#include "include/c/sk_data.h"
+#include "include/c/sk_image.h"
+#include "include/c/sk_paint.h"
+#include "include/c/sk_path.h"
+#include "include/c/sk_picture.h"
+#include "include/c/sk_surface.h"
+#include "src/c/sk_types_priv.h"
+
+const struct {
+ sk_pixelgeometry_t fC;
+ SkPixelGeometry fSK;
+} gPixelGeometryMap[] = {
+ { UNKNOWN_SK_PIXELGEOMETRY, kUnknown_SkPixelGeometry },
+ { RGB_H_SK_PIXELGEOMETRY, kRGB_H_SkPixelGeometry },
+ { BGR_H_SK_PIXELGEOMETRY, kBGR_H_SkPixelGeometry },
+ { RGB_V_SK_PIXELGEOMETRY, kRGB_V_SkPixelGeometry },
+ { BGR_V_SK_PIXELGEOMETRY, kBGR_V_SkPixelGeometry },
+};
+
+
+static bool from_c_pixelgeometry(sk_pixelgeometry_t cGeom, SkPixelGeometry* skGeom) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPixelGeometryMap); ++i) {
+ if (gPixelGeometryMap[i].fC == cGeom) {
+ if (skGeom) {
+ *skGeom = gPixelGeometryMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static void from_c_matrix(const sk_matrix_t* cmatrix, SkMatrix* matrix) {
+ matrix->setAll(cmatrix->mat[0], cmatrix->mat[1], cmatrix->mat[2],
+ cmatrix->mat[3], cmatrix->mat[4], cmatrix->mat[5],
+ cmatrix->mat[6], cmatrix->mat[7], cmatrix->mat[8]);
+}
+
+const struct {
+ sk_path_direction_t fC;
+ SkPath::Direction fSk;
+} gPathDirMap[] = {
+ { CW_SK_PATH_DIRECTION, SkPath::kCW_Direction },
+ { CCW_SK_PATH_DIRECTION, SkPath::kCCW_Direction },
+};
+
+static bool from_c_path_direction(sk_path_direction_t cdir, SkPath::Direction* dir) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPathDirMap); ++i) {
+ if (gPathDirMap[i].fC == cdir) {
+ if (dir) {
+ *dir = gPathDirMap[i].fSk;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static SkData* AsData(const sk_data_t* cdata) {
+ return reinterpret_cast<SkData*>(const_cast<sk_data_t*>(cdata));
+}
+
+static sk_data_t* ToData(SkData* data) {
+ return reinterpret_cast<sk_data_t*>(data);
+}
+
+static sk_rect_t ToRect(const SkRect& rect) {
+ return reinterpret_cast<const sk_rect_t&>(rect);
+}
+
+static const SkRect& AsRect(const sk_rect_t& crect) {
+ return reinterpret_cast<const SkRect&>(crect);
+}
+
+static const SkPath& AsPath(const sk_path_t& cpath) {
+ return reinterpret_cast<const SkPath&>(cpath);
+}
+
+static SkPath* as_path(sk_path_t* cpath) {
+ return reinterpret_cast<SkPath*>(cpath);
+}
+
+static const SkImage* AsImage(const sk_image_t* cimage) {
+ return reinterpret_cast<const SkImage*>(cimage);
+}
+
+static sk_image_t* ToImage(SkImage* cimage) {
+ return reinterpret_cast<sk_image_t*>(cimage);
+}
+
+static sk_canvas_t* ToCanvas(SkCanvas* canvas) {
+ return reinterpret_cast<sk_canvas_t*>(canvas);
+}
+
+static SkCanvas* AsCanvas(sk_canvas_t* ccanvas) {
+ return reinterpret_cast<SkCanvas*>(ccanvas);
+}
+
+static SkPictureRecorder* AsPictureRecorder(sk_picture_recorder_t* crec) {
+ return reinterpret_cast<SkPictureRecorder*>(crec);
+}
+
+static sk_picture_recorder_t* ToPictureRecorder(SkPictureRecorder* rec) {
+ return reinterpret_cast<sk_picture_recorder_t*>(rec);
+}
+
+static const SkPicture* AsPicture(const sk_picture_t* cpic) {
+ return reinterpret_cast<const SkPicture*>(cpic);
+}
+
+static SkPicture* AsPicture(sk_picture_t* cpic) {
+ return reinterpret_cast<SkPicture*>(cpic);
+}
+
+static sk_picture_t* ToPicture(SkPicture* pic) {
+ return reinterpret_cast<sk_picture_t*>(pic);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_image_t* sk_image_new_raster_copy(const sk_imageinfo_t* cinfo, const void* pixels,
+ size_t rowBytes) {
+ const SkImageInfo* info = reinterpret_cast<const SkImageInfo*>(cinfo);
+ return (sk_image_t*)SkImage::MakeRasterCopy(SkPixmap(*info, pixels, rowBytes)).release();
+}
+
+sk_image_t* sk_image_new_from_encoded(const sk_data_t* cdata, const sk_irect_t* subset) {
+ return ToImage(SkImage::MakeFromEncoded(sk_ref_sp(AsData(cdata)),
+ reinterpret_cast<const SkIRect*>(subset)).release());
+}
+
+sk_data_t* sk_image_encode(const sk_image_t* cimage) {
+ return ToData(AsImage(cimage)->encodeToData().release());
+}
+
+void sk_image_ref(const sk_image_t* cimage) {
+ AsImage(cimage)->ref();
+}
+
+void sk_image_unref(const sk_image_t* cimage) {
+ AsImage(cimage)->unref();
+}
+
+int sk_image_get_width(const sk_image_t* cimage) {
+ return AsImage(cimage)->width();
+}
+
+int sk_image_get_height(const sk_image_t* cimage) {
+ return AsImage(cimage)->height();
+}
+
+uint32_t sk_image_get_unique_id(const sk_image_t* cimage) {
+ return AsImage(cimage)->uniqueID();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_path_t* sk_path_new() { return (sk_path_t*)new SkPath; }
+
+void sk_path_delete(sk_path_t* cpath) { delete as_path(cpath); }
+
+void sk_path_move_to(sk_path_t* cpath, float x, float y) {
+ as_path(cpath)->moveTo(x, y);
+}
+
+void sk_path_line_to(sk_path_t* cpath, float x, float y) {
+ as_path(cpath)->lineTo(x, y);
+}
+
+void sk_path_quad_to(sk_path_t* cpath, float x0, float y0, float x1, float y1) {
+ as_path(cpath)->quadTo(x0, y0, x1, y1);
+}
+
+void sk_path_conic_to(sk_path_t* cpath, float x0, float y0, float x1, float y1, float w) {
+ as_path(cpath)->conicTo(x0, y0, x1, y1, w);
+}
+
+void sk_path_cubic_to(sk_path_t* cpath, float x0, float y0, float x1, float y1, float x2, float y2) {
+ as_path(cpath)->cubicTo(x0, y0, x1, y1, x2, y2);
+}
+
+void sk_path_close(sk_path_t* cpath) {
+ as_path(cpath)->close();
+}
+
+void sk_path_add_rect(sk_path_t* cpath, const sk_rect_t* crect, sk_path_direction_t cdir) {
+ SkPath::Direction dir;
+ if (!from_c_path_direction(cdir, &dir)) {
+ return;
+ }
+ as_path(cpath)->addRect(AsRect(*crect), dir);
+}
+
+void sk_path_add_oval(sk_path_t* cpath, const sk_rect_t* crect, sk_path_direction_t cdir) {
+ SkPath::Direction dir;
+ if (!from_c_path_direction(cdir, &dir)) {
+ return;
+ }
+ as_path(cpath)->addOval(AsRect(*crect), dir);
+}
+
+bool sk_path_get_bounds(const sk_path_t* cpath, sk_rect_t* crect) {
+ const SkPath& path = AsPath(*cpath);
+
+ if (path.isEmpty()) {
+ if (crect) {
+ *crect = ToRect(SkRect::MakeEmpty());
+ }
+ return false;
+ }
+
+ if (crect) {
+ *crect = ToRect(path.getBounds());
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+void sk_canvas_save(sk_canvas_t* ccanvas) {
+ AsCanvas(ccanvas)->save();
+}
+
+void sk_canvas_save_layer(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawRect(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_restore(sk_canvas_t* ccanvas) {
+ AsCanvas(ccanvas)->restore();
+}
+
+void sk_canvas_translate(sk_canvas_t* ccanvas, float dx, float dy) {
+ AsCanvas(ccanvas)->translate(dx, dy);
+}
+
+void sk_canvas_scale(sk_canvas_t* ccanvas, float sx, float sy) {
+ AsCanvas(ccanvas)->scale(sx, sy);
+}
+
+void sk_canvas_rotate_degress(sk_canvas_t* ccanvas, float degrees) {
+ AsCanvas(ccanvas)->rotate(degrees);
+}
+
+void sk_canvas_rotate_radians(sk_canvas_t* ccanvas, float radians) {
+ AsCanvas(ccanvas)->rotate(SkRadiansToDegrees(radians));
+}
+
+void sk_canvas_skew(sk_canvas_t* ccanvas, float sx, float sy) {
+ AsCanvas(ccanvas)->skew(sx, sy);
+}
+
+void sk_canvas_concat(sk_canvas_t* ccanvas, const sk_matrix_t* cmatrix) {
+ SkASSERT(cmatrix);
+ SkMatrix matrix;
+ from_c_matrix(cmatrix, &matrix);
+ AsCanvas(ccanvas)->concat(matrix);
+}
+
+void sk_canvas_clip_rect(sk_canvas_t* ccanvas, const sk_rect_t* crect) {
+ AsCanvas(ccanvas)->clipRect(AsRect(*crect));
+}
+
+void sk_canvas_clip_path(sk_canvas_t* ccanvas, const sk_path_t* cpath) {
+ AsCanvas(ccanvas)->clipPath(AsPath(*cpath));
+}
+
+void sk_canvas_draw_paint(sk_canvas_t* ccanvas, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawPaint(AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_rect(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawRect(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_circle(sk_canvas_t* ccanvas, float cx, float cy, float rad,
+ const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawCircle(cx, cy, rad, AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_oval(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawOval(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_path(sk_canvas_t* ccanvas, const sk_path_t* cpath, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawPath(AsPath(*cpath), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_image(sk_canvas_t* ccanvas, const sk_image_t* cimage, float x, float y,
+ const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawImage(AsImage(cimage), x, y, AsPaint(cpaint));
+}
+
+void sk_canvas_draw_image_rect(sk_canvas_t* ccanvas, const sk_image_t* cimage,
+ const sk_rect_t* csrcR, const sk_rect_t* cdstR,
+ const sk_paint_t* cpaint) {
+ SkCanvas* canvas = AsCanvas(ccanvas);
+ const SkImage* image = AsImage(cimage);
+ const SkRect& dst = AsRect(*cdstR);
+ const SkPaint* paint = AsPaint(cpaint);
+
+ if (csrcR) {
+ canvas->drawImageRect(image, AsRect(*csrcR), dst, paint);
+ } else {
+ canvas->drawImageRect(image, dst, paint);
+ }
+}
+
+void sk_canvas_draw_picture(sk_canvas_t* ccanvas, const sk_picture_t* cpicture,
+ const sk_matrix_t* cmatrix, const sk_paint_t* cpaint) {
+ const SkMatrix* matrixPtr = NULL;
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ matrixPtr = &matrix;
+ }
+ AsCanvas(ccanvas)->drawPicture(AsPicture(cpicture), matrixPtr, AsPaint(cpaint));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_surface_t* sk_surface_new_raster(const sk_imageinfo_t* cinfo,
+ const sk_surfaceprops_t* props) {
+ const SkImageInfo* info = reinterpret_cast<const SkImageInfo*>(cinfo);
+ SkPixelGeometry geo = kUnknown_SkPixelGeometry;
+ if (props && !from_c_pixelgeometry(props->pixelGeometry, &geo)) {
+ return NULL;
+ }
+
+ SkSurfaceProps surfProps(0, geo);
+ return (sk_surface_t*)SkSurface::MakeRaster(*info, &surfProps).release();
+}
+
+sk_surface_t* sk_surface_new_raster_direct(const sk_imageinfo_t* cinfo, void* pixels,
+ size_t rowBytes,
+ const sk_surfaceprops_t* props) {
+ const SkImageInfo* info = reinterpret_cast<const SkImageInfo*>(cinfo);
+ SkPixelGeometry geo = kUnknown_SkPixelGeometry;
+ if (props && !from_c_pixelgeometry(props->pixelGeometry, &geo)) {
+ return NULL;
+ }
+
+ SkSurfaceProps surfProps(0, geo);
+ return (sk_surface_t*)SkSurface::MakeRasterDirect(*info, pixels, rowBytes, &surfProps).release();
+}
+
+void sk_surface_unref(sk_surface_t* csurf) {
+ SkSafeUnref((SkSurface*)csurf);
+}
+
+sk_canvas_t* sk_surface_get_canvas(sk_surface_t* csurf) {
+ SkSurface* surf = (SkSurface*)csurf;
+ return (sk_canvas_t*)surf->getCanvas();
+}
+
+sk_image_t* sk_surface_new_image_snapshot(sk_surface_t* csurf) {
+ SkSurface* surf = (SkSurface*)csurf;
+ return (sk_image_t*)surf->makeImageSnapshot().release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_picture_recorder_t* sk_picture_recorder_new() {
+ return ToPictureRecorder(new SkPictureRecorder);
+}
+
+void sk_picture_recorder_delete(sk_picture_recorder_t* crec) {
+ delete AsPictureRecorder(crec);
+}
+
+sk_canvas_t* sk_picture_recorder_begin_recording(sk_picture_recorder_t* crec,
+ const sk_rect_t* cbounds) {
+ return ToCanvas(AsPictureRecorder(crec)->beginRecording(AsRect(*cbounds)));
+}
+
+sk_picture_t* sk_picture_recorder_end_recording(sk_picture_recorder_t* crec) {
+ return ToPicture(AsPictureRecorder(crec)->finishRecordingAsPicture().release());
+}
+
+void sk_picture_ref(sk_picture_t* cpic) {
+ SkSafeRef(AsPicture(cpic));
+}
+
+void sk_picture_unref(sk_picture_t* cpic) {
+ SkSafeUnref(AsPicture(cpic));
+}
+
+uint32_t sk_picture_get_unique_id(sk_picture_t* cpic) {
+ return AsPicture(cpic)->uniqueID();
+}
+
+sk_rect_t sk_picture_get_bounds(sk_picture_t* cpic) {
+ return ToRect(AsPicture(cpic)->cullRect());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_data_t* sk_data_new_with_copy(const void* src, size_t length) {
+ return ToData(SkData::MakeWithCopy(src, length).release());
+}
+
+sk_data_t* sk_data_new_from_malloc(const void* memory, size_t length) {
+ return ToData(SkData::MakeFromMalloc(memory, length).release());
+}
+
+sk_data_t* sk_data_new_subset(const sk_data_t* csrc, size_t offset, size_t length) {
+ return ToData(SkData::MakeSubset(AsData(csrc), offset, length).release());
+}
+
+void sk_data_ref(const sk_data_t* cdata) {
+ SkSafeRef(AsData(cdata));
+}
+
+void sk_data_unref(const sk_data_t* cdata) {
+ SkSafeUnref(AsData(cdata));
+}
+
+size_t sk_data_get_size(const sk_data_t* cdata) {
+ return AsData(cdata)->size();
+}
+
+const void* sk_data_get_data(const sk_data_t* cdata) {
+ return AsData(cdata)->data();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/c/sk_types_priv.h b/gfx/skia/skia/src/c/sk_types_priv.h
new file mode 100644
index 0000000000..ea62646700
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_types_priv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef sk_types_priv_DEFINED
+#define sk_types_priv_DEFINED
+
+#include "include/c/sk_types.h"
+
+class SkMaskFilter;
+class SkPaint;
+class SkShader;
+
+static inline const SkPaint& AsPaint(const sk_paint_t& cpaint) {
+ return reinterpret_cast<const SkPaint&>(cpaint);
+}
+
+static inline const SkPaint* AsPaint(const sk_paint_t* cpaint) {
+ return reinterpret_cast<const SkPaint*>(cpaint);
+}
+
+static inline SkPaint* AsPaint(sk_paint_t* cpaint) {
+ return reinterpret_cast<SkPaint*>(cpaint);
+}
+
+static inline SkMaskFilter* AsMaskFilter(sk_maskfilter_t* cfilter) {
+ return reinterpret_cast<SkMaskFilter*>(cfilter);
+}
+
+static inline sk_maskfilter_t* ToMaskFilter(SkMaskFilter* filter) {
+ return reinterpret_cast<sk_maskfilter_t*>(filter);
+}
+
+static inline SkShader* AsShader(sk_shader_t* cshader) {
+ return reinterpret_cast<SkShader*>(cshader);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkAndroidCodec.cpp b/gfx/skia/skia/src/codec/SkAndroidCodec.cpp
new file mode 100644
index 0000000000..b90dbe7af0
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkAndroidCodec.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkAndroidCodec.h"
+#include "include/codec/SkCodec.h"
+#include "include/core/SkPixmap.h"
+#include "src/codec/SkAndroidCodecAdapter.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkSampledCodec.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkPixmapPriv.h"
+
+static bool is_valid_sample_size(int sampleSize) {
+ // FIXME: As Leon has mentioned elsewhere, surely there is also a maximum sampleSize?
+ return sampleSize > 0;
+}
+
+/**
+ * Loads the gamut as a set of three points (triangle).
+ */
+static void load_gamut(SkPoint rgb[], const skcms_Matrix3x3& xyz) {
+ // rx = rX / (rX + rY + rZ)
+ // ry = rY / (rX + rY + rZ)
+ // gx, gy, bx, and gy are calulcated similarly.
+ for (int rgbIdx = 0; rgbIdx < 3; rgbIdx++) {
+ float sum = xyz.vals[rgbIdx][0] + xyz.vals[rgbIdx][1] + xyz.vals[rgbIdx][2];
+ rgb[rgbIdx].fX = xyz.vals[rgbIdx][0] / sum;
+ rgb[rgbIdx].fY = xyz.vals[rgbIdx][1] / sum;
+ }
+}
+
+/**
+ * Calculates the area of the triangular gamut.
+ */
+static float calculate_area(SkPoint abc[]) {
+ SkPoint a = abc[0];
+ SkPoint b = abc[1];
+ SkPoint c = abc[2];
+ return 0.5f * SkTAbs(a.fX*b.fY + b.fX*c.fY - a.fX*c.fY - c.fX*b.fY - b.fX*a.fY);
+}
+
+static constexpr float kSRGB_D50_GamutArea = 0.084f;
+
+static bool is_wide_gamut(const skcms_ICCProfile& profile) {
+ // Determine if the source image has a gamut that is wider than sRGB. If so, we
+ // will use P3 as the output color space to avoid clipping the gamut.
+ if (profile.has_toXYZD50) {
+ SkPoint rgb[3];
+ load_gamut(rgb, profile.toXYZD50);
+ return calculate_area(rgb) > kSRGB_D50_GamutArea;
+ }
+
+ return false;
+}
+
+static inline SkImageInfo adjust_info(SkCodec* codec,
+ SkAndroidCodec::ExifOrientationBehavior orientationBehavior) {
+ auto info = codec->getInfo();
+ if (orientationBehavior == SkAndroidCodec::ExifOrientationBehavior::kIgnore
+ || !SkPixmapPriv::ShouldSwapWidthHeight(codec->getOrigin())) {
+ return info;
+ }
+ return SkPixmapPriv::SwapWidthHeight(info);
+}
+
+SkAndroidCodec::SkAndroidCodec(SkCodec* codec, ExifOrientationBehavior orientationBehavior)
+ : fInfo(adjust_info(codec, orientationBehavior))
+ , fOrientationBehavior(orientationBehavior)
+ , fCodec(codec)
+{}
+
+SkAndroidCodec::~SkAndroidCodec() {}
+
+std::unique_ptr<SkAndroidCodec> SkAndroidCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ SkPngChunkReader* chunkReader) {
+ auto codec = SkCodec::MakeFromStream(std::move(stream), nullptr, chunkReader);
+ return MakeFromCodec(std::move(codec));
+}
+
+std::unique_ptr<SkAndroidCodec> SkAndroidCodec::MakeFromCodec(std::unique_ptr<SkCodec> codec,
+ ExifOrientationBehavior orientationBehavior) {
+ if (nullptr == codec) {
+ return nullptr;
+ }
+
+ switch ((SkEncodedImageFormat)codec->getEncodedFormat()) {
+ case SkEncodedImageFormat::kPNG:
+ case SkEncodedImageFormat::kICO:
+ case SkEncodedImageFormat::kJPEG:
+#ifndef SK_HAS_WUFFS_LIBRARY
+ case SkEncodedImageFormat::kGIF:
+#endif
+ case SkEncodedImageFormat::kBMP:
+ case SkEncodedImageFormat::kWBMP:
+ case SkEncodedImageFormat::kHEIF:
+ return skstd::make_unique<SkSampledCodec>(codec.release(), orientationBehavior);
+#ifdef SK_HAS_WUFFS_LIBRARY
+ case SkEncodedImageFormat::kGIF:
+#endif
+#ifdef SK_HAS_WEBP_LIBRARY
+ case SkEncodedImageFormat::kWEBP:
+#endif
+#ifdef SK_CODEC_DECODES_RAW
+ case SkEncodedImageFormat::kDNG:
+#endif
+#if defined(SK_HAS_WEBP_LIBRARY) || defined(SK_CODEC_DECODES_RAW) || defined(SK_HAS_WUFFS_LIBRARY)
+ return skstd::make_unique<SkAndroidCodecAdapter>(codec.release(), orientationBehavior);
+#endif
+
+ default:
+ return nullptr;
+ }
+}
+
+std::unique_ptr<SkAndroidCodec> SkAndroidCodec::MakeFromData(sk_sp<SkData> data,
+ SkPngChunkReader* chunkReader) {
+ if (!data) {
+ return nullptr;
+ }
+
+ return MakeFromStream(SkMemoryStream::Make(std::move(data)), chunkReader);
+}
+
+SkColorType SkAndroidCodec::computeOutputColorType(SkColorType requestedColorType) {
+ bool highPrecision = fCodec->getEncodedInfo().bitsPerComponent() > 8;
+ switch (requestedColorType) {
+ case kARGB_4444_SkColorType:
+ return kN32_SkColorType;
+ case kN32_SkColorType:
+ break;
+ case kAlpha_8_SkColorType:
+ // Fall through to kGray_8. Before kGray_8_SkColorType existed,
+ // we allowed clients to request kAlpha_8 when they wanted a
+ // grayscale decode.
+ case kGray_8_SkColorType:
+ if (kGray_8_SkColorType == this->getInfo().colorType()) {
+ return kGray_8_SkColorType;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ if (kOpaque_SkAlphaType == this->getInfo().alphaType()) {
+ return kRGB_565_SkColorType;
+ }
+ break;
+ case kRGBA_F16_SkColorType:
+ return kRGBA_F16_SkColorType;
+ default:
+ break;
+ }
+
+ // F16 is the Android default for high precision images.
+ return highPrecision ? kRGBA_F16_SkColorType : kN32_SkColorType;
+}
+
+SkAlphaType SkAndroidCodec::computeOutputAlphaType(bool requestedUnpremul) {
+ if (kOpaque_SkAlphaType == this->getInfo().alphaType()) {
+ return kOpaque_SkAlphaType;
+ }
+ return requestedUnpremul ? kUnpremul_SkAlphaType : kPremul_SkAlphaType;
+}
+
+sk_sp<SkColorSpace> SkAndroidCodec::computeOutputColorSpace(SkColorType outputColorType,
+ sk_sp<SkColorSpace> prefColorSpace) {
+ switch (outputColorType) {
+ case kRGBA_F16_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ // If |prefColorSpace| is supplied, choose it.
+ if (prefColorSpace) {
+ return prefColorSpace;
+ }
+
+ const skcms_ICCProfile* encodedProfile = fCodec->getEncodedInfo().profile();
+ if (encodedProfile) {
+ if (auto encodedSpace = SkColorSpace::Make(*encodedProfile)) {
+ // Leave the pixels in the encoded color space. Color space conversion
+ // will be handled after decode time.
+ return encodedSpace;
+ }
+
+ if (is_wide_gamut(*encodedProfile)) {
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kSRGB, SkNamedGamut::kDCIP3);
+ }
+ }
+
+ return SkColorSpace::MakeSRGB();
+ }
+ default:
+ // Color correction not supported for kGray.
+ return nullptr;
+ }
+}
+
+static bool supports_any_down_scale(const SkCodec* codec) {
+ return codec->getEncodedFormat() == SkEncodedImageFormat::kWEBP;
+}
+
+// There are a variety of ways two SkISizes could be compared. This method
+// returns true if either dimensions of a is < that of b.
+// computeSampleSize also uses the opposite, which means that both
+// dimensions of a >= b.
+static inline bool smaller_than(const SkISize& a, const SkISize& b) {
+ return a.width() < b.width() || a.height() < b.height();
+}
+
+// Both dimensions of a > that of b.
+static inline bool strictly_bigger_than(const SkISize& a, const SkISize& b) {
+ return a.width() > b.width() && a.height() > b.height();
+}
+
+int SkAndroidCodec::computeSampleSize(SkISize* desiredSize) const {
+ SkASSERT(desiredSize);
+
+ if (!desiredSize || *desiredSize == fInfo.dimensions()) {
+ return 1;
+ }
+
+ if (smaller_than(fInfo.dimensions(), *desiredSize)) {
+ *desiredSize = fInfo.dimensions();
+ return 1;
+ }
+
+ // Handle bad input:
+ if (desiredSize->width() < 1 || desiredSize->height() < 1) {
+ *desiredSize = SkISize::Make(std::max(1, desiredSize->width()),
+ std::max(1, desiredSize->height()));
+ }
+
+ if (supports_any_down_scale(fCodec.get())) {
+ return 1;
+ }
+
+ int sampleX = fInfo.width() / desiredSize->width();
+ int sampleY = fInfo.height() / desiredSize->height();
+ int sampleSize = std::min(sampleX, sampleY);
+ auto computedSize = this->getSampledDimensions(sampleSize);
+ if (computedSize == *desiredSize) {
+ return sampleSize;
+ }
+
+ if (computedSize == fInfo.dimensions() || sampleSize == 1) {
+ // Cannot downscale
+ *desiredSize = computedSize;
+ return 1;
+ }
+
+ if (strictly_bigger_than(computedSize, *desiredSize)) {
+ // See if there is a tighter fit.
+ while (true) {
+ auto smaller = this->getSampledDimensions(sampleSize + 1);
+ if (smaller == *desiredSize) {
+ return sampleSize + 1;
+ }
+ if (smaller == computedSize || smaller_than(smaller, *desiredSize)) {
+ // Cannot get any smaller without being smaller than desired.
+ *desiredSize = computedSize;
+ return sampleSize;
+ }
+
+ sampleSize++;
+ computedSize = smaller;
+ }
+
+ SkASSERT(false);
+ }
+
+ if (!smaller_than(computedSize, *desiredSize)) {
+ // This means one of the computed dimensions is equal to desired, and
+ // the other is bigger. This is as close as we can get.
+ *desiredSize = computedSize;
+ return sampleSize;
+ }
+
+ // computedSize is too small. Make it larger.
+ while (sampleSize > 2) {
+ auto bigger = this->getSampledDimensions(sampleSize - 1);
+ if (bigger == *desiredSize || !smaller_than(bigger, *desiredSize)) {
+ *desiredSize = bigger;
+ return sampleSize - 1;
+ }
+ sampleSize--;
+ }
+
+ *desiredSize = fInfo.dimensions();
+ return 1;
+}
+
+SkISize SkAndroidCodec::getSampledDimensions(int sampleSize) const {
+ if (!is_valid_sample_size(sampleSize)) {
+ return {0, 0};
+ }
+
+ // Fast path for when we are not scaling.
+ if (1 == sampleSize) {
+ return fInfo.dimensions();
+ }
+
+ auto dims = this->onGetSampledDimensions(sampleSize);
+ if (fOrientationBehavior == SkAndroidCodec::ExifOrientationBehavior::kIgnore
+ || !SkPixmapPriv::ShouldSwapWidthHeight(fCodec->getOrigin())) {
+ return dims;
+ }
+
+ return { dims.height(), dims.width() };
+}
+
+bool SkAndroidCodec::getSupportedSubset(SkIRect* desiredSubset) const {
+ if (!desiredSubset || !is_valid_subset(*desiredSubset, fInfo.dimensions())) {
+ return false;
+ }
+
+ return this->onGetSupportedSubset(desiredSubset);
+}
+
+SkISize SkAndroidCodec::getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const {
+ if (!is_valid_sample_size(sampleSize)) {
+ return {0, 0};
+ }
+
+ // We require that the input subset is a subset that is supported by SkAndroidCodec.
+ // We test this by calling getSupportedSubset() and verifying that no modifications
+ // are made to the subset.
+ SkIRect copySubset = subset;
+ if (!this->getSupportedSubset(&copySubset) || copySubset != subset) {
+ return {0, 0};
+ }
+
+ // If the subset is the entire image, for consistency, use getSampledDimensions().
+ if (fInfo.dimensions() == subset.size()) {
+ return this->getSampledDimensions(sampleSize);
+ }
+
+ // This should perhaps call a virtual function, but currently both of our subclasses
+ // want the same implementation.
+ return {get_scaled_dimension(subset.width(), sampleSize),
+ get_scaled_dimension(subset.height(), sampleSize)};
+}
+
+static bool acceptable_result(SkCodec::Result result) {
+ switch (result) {
+ // These results mean a partial or complete image. They should be considered
+ // a success by SkPixmapPriv.
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ return false;
+ }
+}
+
+SkCodec::Result SkAndroidCodec::getAndroidPixels(const SkImageInfo& requestInfo,
+ void* requestPixels, size_t requestRowBytes, const AndroidOptions* options) {
+ if (!requestPixels) {
+ return SkCodec::kInvalidParameters;
+ }
+ if (requestRowBytes < requestInfo.minRowBytes()) {
+ return SkCodec::kInvalidParameters;
+ }
+
+ SkImageInfo adjustedInfo = fInfo;
+ if (ExifOrientationBehavior::kRespect == fOrientationBehavior
+ && SkPixmapPriv::ShouldSwapWidthHeight(fCodec->getOrigin())) {
+ adjustedInfo = SkPixmapPriv::SwapWidthHeight(adjustedInfo);
+ }
+
+ AndroidOptions defaultOptions;
+ if (!options) {
+ options = &defaultOptions;
+ } else if (options->fSubset) {
+ if (!is_valid_subset(*options->fSubset, adjustedInfo.dimensions())) {
+ return SkCodec::kInvalidParameters;
+ }
+
+ if (SkIRect::MakeSize(adjustedInfo.dimensions()) == *options->fSubset) {
+ // The caller wants the whole thing, rather than a subset. Modify
+ // the AndroidOptions passed to onGetAndroidPixels to not specify
+ // a subset.
+ defaultOptions = *options;
+ defaultOptions.fSubset = nullptr;
+ options = &defaultOptions;
+ }
+ }
+
+ if (ExifOrientationBehavior::kIgnore == fOrientationBehavior) {
+ return this->onGetAndroidPixels(requestInfo, requestPixels, requestRowBytes, *options);
+ }
+
+ SkCodec::Result result;
+ auto decode = [this, options, &result](const SkPixmap& pm) {
+ result = this->onGetAndroidPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), *options);
+ return acceptable_result(result);
+ };
+
+ SkPixmap dst(requestInfo, requestPixels, requestRowBytes);
+ if (SkPixmapPriv::Orient(dst, fCodec->getOrigin(), decode)) {
+ return result;
+ }
+
+ // Orient returned false. If onGetAndroidPixels succeeded, then Orient failed internally.
+ if (acceptable_result(result)) {
+ return SkCodec::kInternalError;
+ }
+
+ return result;
+}
+
+SkCodec::Result SkAndroidCodec::getAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes) {
+ return this->getAndroidPixels(info, pixels, rowBytes, nullptr);
+}
diff --git a/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.cpp b/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.cpp
new file mode 100644
index 0000000000..5551136fa6
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkAndroidCodecAdapter.h"
+#include "src/codec/SkCodecPriv.h"
+
+SkAndroidCodecAdapter::SkAndroidCodecAdapter(SkCodec* codec, ExifOrientationBehavior behavior)
+ : INHERITED(codec, behavior)
+{}
+
+SkISize SkAndroidCodecAdapter::onGetSampledDimensions(int sampleSize) const {
+ float scale = get_scale_from_sample_size(sampleSize);
+ return this->codec()->getScaledDimensions(scale);
+}
+
+bool SkAndroidCodecAdapter::onGetSupportedSubset(SkIRect* desiredSubset) const {
+ return this->codec()->getValidSubset(desiredSubset);
+}
+
+SkCodec::Result SkAndroidCodecAdapter::onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ SkCodec::Options codecOptions;
+ codecOptions.fZeroInitialized = options.fZeroInitialized;
+ codecOptions.fSubset = options.fSubset;
+ return this->codec()->getPixels(info, pixels, rowBytes, &codecOptions);
+}
diff --git a/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.h b/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.h
new file mode 100644
index 0000000000..7a5d093d5b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkAndroidCodecAdapter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkAndroidCodecAdapter_DEFINED
+#define SkAndroidCodecAdapter_DEFINED
+
+#include "include/codec/SkAndroidCodec.h"
+
+/**
+ * This class wraps SkCodec to implement the functionality of SkAndroidCodec.
+ * The underlying SkCodec implements sampled decodes. SkCodec's that do not
+ * implement that are wrapped with SkSampledCodec instead.
+ */
+class SkAndroidCodecAdapter : public SkAndroidCodec {
+public:
+
+ explicit SkAndroidCodecAdapter(SkCodec*, ExifOrientationBehavior);
+
+ ~SkAndroidCodecAdapter() override {}
+
+protected:
+
+ SkISize onGetSampledDimensions(int sampleSize) const override;
+
+ bool onGetSupportedSubset(SkIRect* desiredSubset) const override;
+
+ SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) override;
+
+private:
+
+ typedef SkAndroidCodec INHERITED;
+};
+#endif // SkAndroidCodecAdapter_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkBmpBaseCodec.cpp b/gfx/skia/skia/src/codec/SkBmpBaseCodec.cpp
new file mode 100644
index 0000000000..32a1d37384
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpBaseCodec.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/private/SkMalloc.h"
+#include "src/codec/SkBmpBaseCodec.h"
+
+SkBmpBaseCodec::~SkBmpBaseCodec() {}
+
+SkBmpBaseCodec::SkBmpBaseCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(std::move(info), std::move(stream), bitsPerPixel, rowOrder)
+ , fSrcBuffer(sk_malloc_canfail(this->srcRowBytes()))
+{}
diff --git a/gfx/skia/skia/src/codec/SkBmpBaseCodec.h b/gfx/skia/skia/src/codec/SkBmpBaseCodec.h
new file mode 100644
index 0000000000..1c57ce6de4
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpBaseCodec.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBmpBaseCodec_DEFINED
+#define SkBmpBaseCodec_DEFINED
+
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkBmpCodec.h"
+
+/*
+ * Common base class for SkBmpStandardCodec and SkBmpMaskCodec.
+ */
+class SkBmpBaseCodec : public SkBmpCodec {
+public:
+ ~SkBmpBaseCodec() override;
+
+ /*
+ * Whether fSrcBuffer was successfully created.
+ *
+ * If false, this Codec must not be used.
+ */
+ bool didCreateSrcBuffer() const { return fSrcBuffer != nullptr; }
+
+protected:
+ SkBmpBaseCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream>,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder);
+
+ uint8_t* srcBuffer() { return reinterpret_cast<uint8_t*>(fSrcBuffer.get()); }
+
+private:
+ SkAutoFree fSrcBuffer;
+
+ typedef SkBmpCodec INHERITED;
+};
+#endif // SkBmpBaseCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkBmpCodec.cpp b/gfx/skia/skia/src/codec/SkBmpCodec.cpp
new file mode 100644
index 0000000000..615c8a4212
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpCodec.cpp
@@ -0,0 +1,650 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkBmpCodec.h"
+#include "src/codec/SkBmpMaskCodec.h"
+#include "src/codec/SkBmpRLECodec.h"
+#include "src/codec/SkBmpStandardCodec.h"
+#include "src/codec/SkCodecPriv.h"
+
+/*
+ * Defines the version and type of the second bitmap header
+ */
+enum BmpHeaderType {
+ kInfoV1_BmpHeaderType,
+ kInfoV2_BmpHeaderType,
+ kInfoV3_BmpHeaderType,
+ kInfoV4_BmpHeaderType,
+ kInfoV5_BmpHeaderType,
+ kOS2V1_BmpHeaderType,
+ kOS2VX_BmpHeaderType,
+ kUnknown_BmpHeaderType
+};
+
+/*
+ * Possible bitmap compression types
+ */
+enum BmpCompressionMethod {
+ kNone_BmpCompressionMethod = 0,
+ k8BitRLE_BmpCompressionMethod = 1,
+ k4BitRLE_BmpCompressionMethod = 2,
+ kBitMasks_BmpCompressionMethod = 3,
+ kJpeg_BmpCompressionMethod = 4,
+ kPng_BmpCompressionMethod = 5,
+ kAlphaBitMasks_BmpCompressionMethod = 6,
+ kCMYK_BmpCompressionMethod = 11,
+ kCMYK8BitRLE_BmpCompressionMethod = 12,
+ kCMYK4BitRLE_BmpCompressionMethod = 13
+};
+
+/*
+ * Used to define the input format of the bmp
+ */
+enum BmpInputFormat {
+ kStandard_BmpInputFormat,
+ kRLE_BmpInputFormat,
+ kBitMask_BmpInputFormat,
+ kUnknown_BmpInputFormat
+};
+
+/*
+ * Checks the start of the stream to see if the image is a bitmap
+ */
+bool SkBmpCodec::IsBmp(const void* buffer, size_t bytesRead) {
+ // TODO: Support "IC", "PT", "CI", "CP", "BA"
+ const char bmpSig[] = { 'B', 'M' };
+ return bytesRead >= sizeof(bmpSig) && !memcmp(buffer, bmpSig, sizeof(bmpSig));
+}
+
+/*
+ * Assumes IsBmp was called and returned true
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+std::unique_ptr<SkCodec> SkBmpCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ return SkBmpCodec::MakeFromStream(std::move(stream), result, false);
+}
+
+/*
+ * Creates a bmp decoder for a bmp embedded in ico
+ * Reads enough of the stream to determine the image format
+ */
+std::unique_ptr<SkCodec> SkBmpCodec::MakeFromIco(std::unique_ptr<SkStream> stream, Result* result) {
+ return SkBmpCodec::MakeFromStream(std::move(stream), result, true);
+}
+
+// Header size constants
+static constexpr uint32_t kBmpHeaderBytes = 14;
+static constexpr uint32_t kBmpHeaderBytesPlusFour = kBmpHeaderBytes + 4;
+static constexpr uint32_t kBmpOS2V1Bytes = 12;
+static constexpr uint32_t kBmpOS2V2Bytes = 64;
+static constexpr uint32_t kBmpInfoBaseBytes = 16;
+static constexpr uint32_t kBmpInfoV1Bytes = 40;
+static constexpr uint32_t kBmpInfoV2Bytes = 52;
+static constexpr uint32_t kBmpInfoV3Bytes = 56;
+static constexpr uint32_t kBmpInfoV4Bytes = 108;
+static constexpr uint32_t kBmpInfoV5Bytes = 124;
+static constexpr uint32_t kBmpMaskBytes = 12;
+
+static BmpHeaderType get_header_type(size_t infoBytes) {
+ if (infoBytes >= kBmpInfoBaseBytes) {
+ // Check the version of the header
+ switch (infoBytes) {
+ case kBmpInfoV1Bytes:
+ return kInfoV1_BmpHeaderType;
+ case kBmpInfoV2Bytes:
+ return kInfoV2_BmpHeaderType;
+ case kBmpInfoV3Bytes:
+ return kInfoV3_BmpHeaderType;
+ case kBmpInfoV4Bytes:
+ return kInfoV4_BmpHeaderType;
+ case kBmpInfoV5Bytes:
+ return kInfoV5_BmpHeaderType;
+ case 16:
+ case 20:
+ case 24:
+ case 28:
+ case 32:
+ case 36:
+ case 42:
+ case 46:
+ case 48:
+ case 60:
+ case kBmpOS2V2Bytes:
+ return kOS2VX_BmpHeaderType;
+ default:
+ SkCodecPrintf("Error: unknown bmp header format.\n");
+ return kUnknown_BmpHeaderType;
+ }
+ } if (infoBytes >= kBmpOS2V1Bytes) {
+ // The OS2V1 is treated separately because it has a unique format
+ return kOS2V1_BmpHeaderType;
+ } else {
+ // There are no valid bmp headers
+ SkCodecPrintf("Error: second bitmap header size is invalid.\n");
+ return kUnknown_BmpHeaderType;
+ }
+}
+
+SkCodec::Result SkBmpCodec::ReadHeader(SkStream* stream, bool inIco,
+ std::unique_ptr<SkCodec>* codecOut) {
+ // The total bytes in the bmp file
+ // We only need to use this value for RLE decoding, so we will only
+ // check that it is valid in the RLE case.
+ uint32_t totalBytes;
+ // The offset from the start of the file where the pixel data begins
+ uint32_t offset;
+ // The size of the second (info) header in bytes
+ uint32_t infoBytes;
+
+ // Bmps embedded in Icos skip the first Bmp header
+ if (!inIco) {
+ // Read the first header and the size of the second header
+ uint8_t hBuffer[kBmpHeaderBytesPlusFour];
+ if (stream->read(hBuffer, kBmpHeaderBytesPlusFour) !=
+ kBmpHeaderBytesPlusFour) {
+ SkCodecPrintf("Error: unable to read first bitmap header.\n");
+ return kIncompleteInput;
+ }
+
+ totalBytes = get_int(hBuffer, 2);
+ offset = get_int(hBuffer, 10);
+ if (offset < kBmpHeaderBytes + kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid starting location for pixel data\n");
+ return kInvalidInput;
+ }
+
+ // The size of the second (info) header in bytes
+ // The size is the first field of the second header, so we have already
+ // read the first four infoBytes.
+ infoBytes = get_int(hBuffer, 14);
+ if (infoBytes < kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid second header size.\n");
+ return kInvalidInput;
+ }
+ } else {
+ // This value is only used by RLE compression. Bmp in Ico files do not
+ // use RLE. If the compression field is incorrectly signaled as RLE,
+ // we will catch this and signal an error below.
+ totalBytes = 0;
+
+ // Bmps in Ico cannot specify an offset. We will always assume that
+ // pixel data begins immediately after the color table. This value
+ // will be corrected below.
+ offset = 0;
+
+ // Read the size of the second header
+ uint8_t hBuffer[4];
+ if (stream->read(hBuffer, 4) != 4) {
+ SkCodecPrintf("Error: unable to read size of second bitmap header.\n");
+ return kIncompleteInput;
+ }
+ infoBytes = get_int(hBuffer, 0);
+ if (infoBytes < kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid second header size.\n");
+ return kInvalidInput;
+ }
+ }
+
+ // Determine image information depending on second header format
+ const BmpHeaderType headerType = get_header_type(infoBytes);
+ if (kUnknown_BmpHeaderType == headerType) {
+ return kInvalidInput;
+ }
+
+ // We already read the first four bytes of the info header to get the size
+ const uint32_t infoBytesRemaining = infoBytes - 4;
+
+ // Read the second header
+ std::unique_ptr<uint8_t[]> iBuffer(new uint8_t[infoBytesRemaining]);
+ if (stream->read(iBuffer.get(), infoBytesRemaining) != infoBytesRemaining) {
+ SkCodecPrintf("Error: unable to read second bitmap header.\n");
+ return kIncompleteInput;
+ }
+
+ // The number of bits used per pixel in the pixel data
+ uint16_t bitsPerPixel;
+
+ // The compression method for the pixel data
+ uint32_t compression = kNone_BmpCompressionMethod;
+
+ // Number of colors in the color table, defaults to 0 or max (see below)
+ uint32_t numColors = 0;
+
+ // Bytes per color in the color table, early versions use 3, most use 4
+ uint32_t bytesPerColor;
+
+ // The image width and height
+ int width, height;
+
+ switch (headerType) {
+ case kInfoV1_BmpHeaderType:
+ case kInfoV2_BmpHeaderType:
+ case kInfoV3_BmpHeaderType:
+ case kInfoV4_BmpHeaderType:
+ case kInfoV5_BmpHeaderType:
+ case kOS2VX_BmpHeaderType:
+ // We check the size of the header before entering the if statement.
+ // We should not reach this point unless the size is large enough for
+ // these required fields.
+ SkASSERT(infoBytesRemaining >= 12);
+ width = get_int(iBuffer.get(), 0);
+ height = get_int(iBuffer.get(), 4);
+ bitsPerPixel = get_short(iBuffer.get(), 10);
+
+ // Some versions do not have these fields, so we check before
+ // overwriting the default value.
+ if (infoBytesRemaining >= 16) {
+ compression = get_int(iBuffer.get(), 12);
+ if (infoBytesRemaining >= 32) {
+ numColors = get_int(iBuffer.get(), 28);
+ }
+ }
+
+ // All of the headers that reach this point, store color table entries
+ // using 4 bytes per pixel.
+ bytesPerColor = 4;
+ break;
+ case kOS2V1_BmpHeaderType:
+ // The OS2V1 is treated separately because it has a unique format
+ width = (int) get_short(iBuffer.get(), 0);
+ height = (int) get_short(iBuffer.get(), 2);
+ bitsPerPixel = get_short(iBuffer.get(), 6);
+ bytesPerColor = 3;
+ break;
+ case kUnknown_BmpHeaderType:
+ // We'll exit above in this case.
+ SkASSERT(false);
+ return kInvalidInput;
+ }
+
+ // Check for valid dimensions from header
+ SkCodec::SkScanlineOrder rowOrder = SkCodec::kBottomUp_SkScanlineOrder;
+ if (height < 0) {
+ // We can't negate INT32_MIN.
+ if (height == INT32_MIN) {
+ return kInvalidInput;
+ }
+
+ height = -height;
+ rowOrder = SkCodec::kTopDown_SkScanlineOrder;
+ }
+ // The height field for bmp in ico is double the actual height because they
+ // contain an XOR mask followed by an AND mask
+ if (inIco) {
+ height /= 2;
+ }
+
+ // Arbitrary maximum. Matches Chromium.
+ constexpr int kMaxDim = 1 << 16;
+ if (width <= 0 || height <= 0 || width >= kMaxDim || height >= kMaxDim) {
+ SkCodecPrintf("Error: invalid bitmap dimensions.\n");
+ return kInvalidInput;
+ }
+
+ // Create mask struct
+ SkMasks::InputMasks inputMasks;
+ memset(&inputMasks, 0, sizeof(SkMasks::InputMasks));
+
+ // Determine the input compression format and set bit masks if necessary
+ uint32_t maskBytes = 0;
+ BmpInputFormat inputFormat = kUnknown_BmpInputFormat;
+ switch (compression) {
+ case kNone_BmpCompressionMethod:
+ inputFormat = kStandard_BmpInputFormat;
+
+ // In addition to more standard pixel compression formats, bmp supports
+ // the use of bit masks to determine pixel components. The standard
+ // format for representing 16-bit colors is 555 (XRRRRRGGGGGBBBBB),
+ // which does not map well to any Skia color formats. For this reason,
+ // we will always enable mask mode with 16 bits per pixel.
+ if (16 == bitsPerPixel) {
+ inputMasks.red = 0x7C00;
+ inputMasks.green = 0x03E0;
+ inputMasks.blue = 0x001F;
+ inputFormat = kBitMask_BmpInputFormat;
+ }
+ break;
+ case k8BitRLE_BmpCompressionMethod:
+ if (bitsPerPixel != 8) {
+ SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
+ bitsPerPixel = 8;
+ }
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ case k4BitRLE_BmpCompressionMethod:
+ if (bitsPerPixel != 4) {
+ SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
+ bitsPerPixel = 4;
+ }
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ case kAlphaBitMasks_BmpCompressionMethod:
+ case kBitMasks_BmpCompressionMethod:
+ // Load the masks
+ inputFormat = kBitMask_BmpInputFormat;
+ switch (headerType) {
+ case kInfoV1_BmpHeaderType: {
+ // The V1 header stores the bit masks after the header
+ uint8_t buffer[kBmpMaskBytes];
+ if (stream->read(buffer, kBmpMaskBytes) != kBmpMaskBytes) {
+ SkCodecPrintf("Error: unable to read bit inputMasks.\n");
+ return kIncompleteInput;
+ }
+ maskBytes = kBmpMaskBytes;
+ inputMasks.red = get_int(buffer, 0);
+ inputMasks.green = get_int(buffer, 4);
+ inputMasks.blue = get_int(buffer, 8);
+ break;
+ }
+ case kInfoV2_BmpHeaderType:
+ case kInfoV3_BmpHeaderType:
+ case kInfoV4_BmpHeaderType:
+ case kInfoV5_BmpHeaderType:
+ // Header types are matched based on size. If the header
+ // is V2+, we are guaranteed to be able to read at least
+ // this size.
+ SkASSERT(infoBytesRemaining >= 48);
+ inputMasks.red = get_int(iBuffer.get(), 36);
+ inputMasks.green = get_int(iBuffer.get(), 40);
+ inputMasks.blue = get_int(iBuffer.get(), 44);
+
+ if (kInfoV2_BmpHeaderType == headerType ||
+ (kInfoV3_BmpHeaderType == headerType && !inIco)) {
+ break;
+ }
+
+ // V3+ bmp files introduce an alpha mask and allow the creator of the image
+ // to use the alpha channels. However, many of these images leave the
+ // alpha channel blank and expect to be rendered as opaque. This is the
+ // case for almost all V3 images, so we ignore the alpha mask. For V4+
+ // images in kMask mode, we will use the alpha mask. Additionally, V3
+ // bmp-in-ico expect us to use the alpha mask.
+ //
+ // skbug.com/4116: We should perhaps also apply the alpha mask in kStandard
+ // mode. We just haven't seen any images that expect this
+ // behavior.
+ //
+ // Header types are matched based on size. If the header is
+ // V3+, we are guaranteed to be able to read at least this size.
+ SkASSERT(infoBytesRemaining >= 52);
+ inputMasks.alpha = get_int(iBuffer.get(), 48);
+ break;
+ case kOS2VX_BmpHeaderType:
+ // TODO: Decide if we intend to support this.
+ // It is unsupported in the previous version and
+ // in chromium. I have not come across a test case
+ // that uses this format.
+ SkCodecPrintf("Error: huffman format unsupported.\n");
+ return kUnimplemented;
+ default:
+ SkCodecPrintf("Error: invalid bmp bit masks header.\n");
+ return kInvalidInput;
+ }
+ break;
+ case kJpeg_BmpCompressionMethod:
+ if (24 == bitsPerPixel) {
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ }
+ // Fall through
+ case kPng_BmpCompressionMethod:
+ // TODO: Decide if we intend to support this.
+ // It is unsupported in the previous version and
+ // in chromium. I think it is used mostly for printers.
+ SkCodecPrintf("Error: compression format not supported.\n");
+ return kUnimplemented;
+ case kCMYK_BmpCompressionMethod:
+ case kCMYK8BitRLE_BmpCompressionMethod:
+ case kCMYK4BitRLE_BmpCompressionMethod:
+ // TODO: Same as above.
+ SkCodecPrintf("Error: CMYK not supported for bitmap decoding.\n");
+ return kUnimplemented;
+ default:
+ SkCodecPrintf("Error: invalid format for bitmap decoding.\n");
+ return kInvalidInput;
+ }
+ iBuffer.reset();
+
+ // Calculate the number of bytes read so far
+ const uint32_t bytesRead = kBmpHeaderBytes + infoBytes + maskBytes;
+ if (!inIco && offset < bytesRead) {
+ // TODO (msarett): Do we really want to fail if the offset in the header is invalid?
+ // Seems like we can just assume that the offset is zero and try to decode?
+ // Maybe we don't want to try to decode corrupt images?
+ SkCodecPrintf("Error: pixel data offset less than header size.\n");
+ return kInvalidInput;
+ }
+
+
+
+ switch (inputFormat) {
+ case kStandard_BmpInputFormat: {
+ // BMPs are generally opaque, however BMPs-in-ICOs may contain
+ // a transparency mask after the image. Therefore, we mark the
+ // alpha as kBinary if the BMP is contained in an ICO.
+ // We use |isOpaque| to indicate if the BMP itself is opaque.
+ SkEncodedInfo::Alpha alpha = inIco ? SkEncodedInfo::kBinary_Alpha :
+ SkEncodedInfo::kOpaque_Alpha;
+ bool isOpaque = true;
+
+ SkEncodedInfo::Color color;
+ uint8_t bitsPerComponent;
+ switch (bitsPerPixel) {
+ // Palette formats
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ // In the case of ICO, kBGRA is actually the closest match,
+ // since we will need to apply a transparency mask.
+ if (inIco) {
+ color = SkEncodedInfo::kBGRA_Color;
+ bitsPerComponent = 8;
+ } else {
+ color = SkEncodedInfo::kPalette_Color;
+ bitsPerComponent = (uint8_t) bitsPerPixel;
+ }
+ break;
+ case 24:
+ // In the case of ICO, kBGRA is actually the closest match,
+ // since we will need to apply a transparency mask.
+ color = inIco ? SkEncodedInfo::kBGRA_Color : SkEncodedInfo::kBGR_Color;
+ bitsPerComponent = 8;
+ break;
+ case 32:
+ // 32-bit BMP-in-ICOs actually use the alpha channel in place of a
+ // transparency mask.
+ if (inIco) {
+ isOpaque = false;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ color = SkEncodedInfo::kBGRA_Color;
+ } else {
+ color = SkEncodedInfo::kBGRX_Color;
+ }
+ bitsPerComponent = 8;
+ break;
+ default:
+ SkCodecPrintf("Error: invalid input value for bits per pixel.\n");
+ return kInvalidInput;
+ }
+
+ if (codecOut) {
+ // We require streams to have a memory base for Bmp-in-Ico decodes.
+ SkASSERT(!inIco || nullptr != stream->getMemoryBase());
+
+ // Set the image info and create a codec.
+ auto info = SkEncodedInfo::Make(width, height, color, alpha, bitsPerComponent);
+ codecOut->reset(new SkBmpStandardCodec(std::move(info),
+ std::unique_ptr<SkStream>(stream),
+ bitsPerPixel, numColors, bytesPerColor,
+ offset - bytesRead, rowOrder, isOpaque,
+ inIco));
+ return static_cast<SkBmpStandardCodec*>(codecOut->get())->didCreateSrcBuffer()
+ ? kSuccess : kInvalidInput;
+ }
+ return kSuccess;
+ }
+
+ case kBitMask_BmpInputFormat: {
+ // Bmp-in-Ico must be standard mode
+ if (inIco) {
+ SkCodecPrintf("Error: Icos may not use bit mask format.\n");
+ return kInvalidInput;
+ }
+
+ switch (bitsPerPixel) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ SkCodecPrintf("Error: invalid input value for bits per pixel.\n");
+ return kInvalidInput;
+ }
+
+ // Skip to the start of the pixel array.
+ // We can do this here because there is no color table to read
+ // in bit mask mode.
+ if (stream->skip(offset - bytesRead) != offset - bytesRead) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return kIncompleteInput;
+ }
+
+ if (codecOut) {
+ // Check that input bit masks are valid and create the masks object
+ SkASSERT(bitsPerPixel % 8 == 0);
+ std::unique_ptr<SkMasks> masks(SkMasks::CreateMasks(inputMasks, bitsPerPixel/8));
+ if (nullptr == masks) {
+ SkCodecPrintf("Error: invalid input masks.\n");
+ return kInvalidInput;
+ }
+
+ // Masked bmps are not a great fit for SkEncodedInfo, since they have
+ // arbitrary component orderings and bits per component. Here we choose
+ // somewhat reasonable values - it's ok that we don't match exactly
+ // because SkBmpMaskCodec has its own mask swizzler anyway.
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ if (masks->getAlphaMask()) {
+ color = SkEncodedInfo::kBGRA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ } else {
+ color = SkEncodedInfo::kBGR_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ auto info = SkEncodedInfo::Make(width, height, color, alpha, 8);
+ codecOut->reset(new SkBmpMaskCodec(std::move(info),
+ std::unique_ptr<SkStream>(stream), bitsPerPixel,
+ masks.release(), rowOrder));
+ return static_cast<SkBmpMaskCodec*>(codecOut->get())->didCreateSrcBuffer()
+ ? kSuccess : kInvalidInput;
+ }
+ return kSuccess;
+ }
+
+ case kRLE_BmpInputFormat: {
+ // We should not reach this point without a valid value of bitsPerPixel.
+ SkASSERT(4 == bitsPerPixel || 8 == bitsPerPixel || 24 == bitsPerPixel);
+
+ // Check for a valid number of total bytes when in RLE mode
+ if (totalBytes <= offset) {
+ SkCodecPrintf("Error: RLE requires valid input size.\n");
+ return kInvalidInput;
+ }
+
+ // Bmp-in-Ico must be standard mode
+ // When inIco is true, this line cannot be reached, since we
+ // require that RLE Bmps have a valid number of totalBytes, and
+ // Icos skip the header that contains totalBytes.
+ SkASSERT(!inIco);
+
+ if (codecOut) {
+ // RLE inputs may skip pixels, leaving them as transparent. This
+ // is uncommon, but we cannot be certain that an RLE bmp will be
+ // opaque or that we will be able to represent it with a palette.
+ // For that reason, we always indicate that we are kBGRA.
+ auto info = SkEncodedInfo::Make(width, height, SkEncodedInfo::kBGRA_Color,
+ SkEncodedInfo::kBinary_Alpha, 8);
+ codecOut->reset(new SkBmpRLECodec(std::move(info),
+ std::unique_ptr<SkStream>(stream), bitsPerPixel,
+ numColors, bytesPerColor, offset - bytesRead,
+ rowOrder));
+ }
+ return kSuccess;
+ }
+ default:
+ SkASSERT(false);
+ return kInvalidInput;
+ }
+}
+
+/*
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+std::unique_ptr<SkCodec> SkBmpCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result, bool inIco) {
+ std::unique_ptr<SkCodec> codec;
+ *result = ReadHeader(stream.get(), inIco, &codec);
+ if (codec) {
+ // codec has taken ownership of stream, so we do not need to delete it.
+ stream.release();
+ }
+ return kSuccess == *result ? std::move(codec) : nullptr;
+}
+
+SkBmpCodec::SkBmpCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(std::move(info), kXformSrcColorFormat, std::move(stream))
+ , fBitsPerPixel(bitsPerPixel)
+ , fRowOrder(rowOrder)
+ , fSrcRowBytes(SkAlign4(compute_row_bytes(this->dimensions().width(), fBitsPerPixel)))
+ , fXformBuffer(nullptr)
+{}
+
+bool SkBmpCodec::onRewind() {
+ return SkBmpCodec::ReadHeader(this->stream(), this->inIco(), nullptr) == kSuccess;
+}
+
+int32_t SkBmpCodec::getDstRow(int32_t y, int32_t height) const {
+ if (SkCodec::kTopDown_SkScanlineOrder == fRowOrder) {
+ return y;
+ }
+ SkASSERT(SkCodec::kBottomUp_SkScanlineOrder == fRowOrder);
+ return height - y - 1;
+}
+
+SkCodec::Result SkBmpCodec::prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ return this->onPrepareToDecode(dstInfo, options);
+}
+
+SkCodec::Result SkBmpCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ return prepareToDecode(dstInfo, options);
+}
+
+int SkBmpCodec::onGetScanlines(void* dst, int count, size_t rowBytes) {
+ // Create a new image info representing the portion of the image to decode
+ SkImageInfo rowInfo = this->dstInfo().makeWH(this->dstInfo().width(), count);
+
+ // Decode the requested rows
+ return this->decodeRows(rowInfo, dst, rowBytes, this->options());
+}
+
+bool SkBmpCodec::skipRows(int count) {
+ const size_t bytesToSkip = count * fSrcRowBytes;
+ return this->stream()->skip(bytesToSkip) == bytesToSkip;
+}
+
+bool SkBmpCodec::onSkipScanlines(int count) {
+ return this->skipRows(count);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpCodec.h b/gfx/skia/skia/src/codec/SkBmpCodec.h
new file mode 100644
index 0000000000..97a15b6d23
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpCodec.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBmpCodec_DEFINED
+#define SkBmpCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkSwizzler.h"
+
+/*
+ * This class enables code sharing between its bmp codec subclasses. The
+ * subclasses actually do the work.
+ */
+class SkBmpCodec : public SkCodec {
+public:
+ static bool IsBmp(const void*, size_t);
+
+ /*
+ * Assumes IsBmp was called and returned true
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+ /*
+ * Creates a bmp decoder for a bmp embedded in ico
+ * Reads enough of the stream to determine the image format
+ */
+ static std::unique_ptr<SkCodec> MakeFromIco(std::unique_ptr<SkStream>, Result*);
+
+protected:
+
+ SkBmpCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream>,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder);
+
+ SkEncodedImageFormat onGetEncodedFormat() const override { return SkEncodedImageFormat::kBMP; }
+
+ /*
+ * Read enough of the stream to initialize the SkBmpCodec.
+ * On kSuccess, if codecOut is not nullptr, it will be set to a new SkBmpCodec.
+ */
+ static Result ReadHeader(SkStream*, bool inIco, std::unique_ptr<SkCodec>* codecOut);
+
+ bool onRewind() override;
+
+ /*
+ * Returns whether this BMP is part of an ICO image.
+ */
+ bool inIco() const {
+ return this->onInIco();
+ }
+
+ virtual bool onInIco() const {
+ return false;
+ }
+
+ /*
+ * Get the destination row number corresponding to the encoded row number.
+ * For kTopDown, we simply return y, but for kBottomUp, the rows will be
+ * decoded in reverse order.
+ *
+ * @param y Iterates from 0 to height, indicating the current row.
+ * @param height The height of the current subset of the image that we are
+ * decoding. This is generally equal to the full height
+ * when we want to decode the full or one when we are
+ * sampling.
+ */
+ int32_t getDstRow(int32_t y, int32_t height) const;
+
+ /*
+ * Accessors used by subclasses
+ */
+ uint16_t bitsPerPixel() const { return fBitsPerPixel; }
+ SkScanlineOrder onGetScanlineOrder() const override { return fRowOrder; }
+ size_t srcRowBytes() const { return fSrcRowBytes; }
+
+ /*
+ * To be overriden by bmp subclasses, which provide unique implementations.
+ * Performs subclass specific setup.
+ *
+ * @param dstInfo Contains output information. Height specifies
+ * the total number of rows that will be decoded.
+ * @param options Additonal options to pass to the decoder.
+ */
+ virtual SkCodec::Result onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) = 0;
+ SkCodec::Result prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options);
+
+ uint32_t* xformBuffer() const { return fXformBuffer.get(); }
+ void resetXformBuffer(int count) { fXformBuffer.reset(new uint32_t[count]); }
+
+ /*
+ * BMPs are typically encoded as BGRA/BGR so this is a more efficient choice
+ * than RGBA.
+ */
+ static constexpr SkColorType kXformSrcColorType = kBGRA_8888_SkColorType;
+ static constexpr auto kXformSrcColorFormat = skcms_PixelFormat_BGRA_8888;
+
+private:
+
+ /*
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*, bool inIco);
+
+ /*
+ * Decodes the next dstInfo.height() lines.
+ *
+ * onGetPixels() uses this for full image decodes.
+ * SkScaledCodec::onGetPixels() uses the scanline decoder to call this with
+ * dstInfo.height() = 1, in order to implement sampling.
+ * A potential future use is to allow the caller to decode a subset of the
+ * lines in the image.
+ *
+ * @param dstInfo Contains output information. Height specifies the
+ * number of rows to decode at this time.
+ * @param dst Memory location to store output pixels
+ * @param dstRowBytes Bytes in a row of the destination
+ * @return Number of rows successfully decoded
+ */
+ virtual int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) = 0;
+
+ virtual bool skipRows(int count);
+
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options&) override;
+
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+
+ bool onSkipScanlines(int count) override;
+
+ const uint16_t fBitsPerPixel;
+ const SkScanlineOrder fRowOrder;
+ const size_t fSrcRowBytes;
+ std::unique_ptr<uint32_t[]> fXformBuffer;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp b/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp
new file mode 100644
index 0000000000..874056a08d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "src/codec/SkBmpMaskCodec.h"
+#include "src/codec/SkCodecPriv.h"
+
+/*
+ * Creates an instance of the decoder
+ */
+SkBmpMaskCodec::SkBmpMaskCodec(SkEncodedInfo&& info,
+ std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, SkMasks* masks,
+ SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(std::move(info), std::move(stream), bitsPerPixel, rowOrder)
+ , fMasks(masks)
+ , fMaskSwizzler(nullptr)
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpMaskCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+ if (dstInfo.dimensions() != this->dimensions()) {
+ SkCodecPrintf("Error: scaling not supported.\n");
+ return kInvalidScale;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ *rowsDecoded = rows;
+ return kIncompleteInput;
+ }
+ return kSuccess;
+}
+
+SkCodec::Result SkBmpMaskCodec::onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ if (this->colorXform()) {
+ this->resetXformBuffer(dstInfo.width());
+ }
+
+ SkImageInfo swizzlerInfo = dstInfo;
+ if (this->colorXform()) {
+ swizzlerInfo = swizzlerInfo.makeColorType(kXformSrcColorType);
+ if (kPremul_SkAlphaType == dstInfo.alphaType()) {
+ swizzlerInfo = swizzlerInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+ }
+
+ bool srcIsOpaque = this->getEncodedInfo().opaque();
+ fMaskSwizzler.reset(SkMaskSwizzler::CreateMaskSwizzler(swizzlerInfo, srcIsOpaque,
+ fMasks.get(), this->bitsPerPixel(), options));
+ SkASSERT(fMaskSwizzler);
+
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the decoding
+ */
+int SkBmpMaskCodec::decodeRows(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ // Iterate over rows of the image
+ uint8_t* srcRow = this->srcBuffer();
+ const int height = dstInfo.height();
+ for (int y = 0; y < height; y++) {
+ // Read a row of the input
+ if (this->stream()->read(srcRow, this->srcRowBytes()) != this->srcRowBytes()) {
+ SkCodecPrintf("Warning: incomplete input stream.\n");
+ return y;
+ }
+
+ // Decode the row in destination format
+ uint32_t row = this->getDstRow(y, height);
+ void* dstRow = SkTAddOffset<void>(dst, row * dstRowBytes);
+
+ if (this->colorXform()) {
+ fMaskSwizzler->swizzle(this->xformBuffer(), srcRow);
+ this->applyColorXform(dstRow, this->xformBuffer(), fMaskSwizzler->swizzleWidth());
+ } else {
+ fMaskSwizzler->swizzle(dstRow, srcRow);
+ }
+ }
+
+ // Finished decoding the entire image
+ return height;
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpMaskCodec.h b/gfx/skia/skia/src/codec/SkBmpMaskCodec.h
new file mode 100644
index 0000000000..eaef50d20b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpMaskCodec.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBmpMaskCodec_DEFINED
+#define SkBmpMaskCodec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkBmpBaseCodec.h"
+#include "src/codec/SkMaskSwizzler.h"
+
+/*
+ * This class implements the decoding for bmp images using bit masks
+ */
+class SkBmpMaskCodec : public SkBmpBaseCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::MakeFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param masks color masks for certain bmp formats
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ */
+ SkBmpMaskCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream>,
+ uint16_t bitsPerPixel, SkMasks* masks,
+ SkCodec::SkScanlineOrder rowOrder);
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&,
+ int*) override;
+
+ SkCodec::Result onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) override;
+
+private:
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fMaskSwizzler);
+ return fMaskSwizzler.get();
+ }
+
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+
+ std::unique_ptr<SkMasks> fMasks;
+ std::unique_ptr<SkMaskSwizzler> fMaskSwizzler;
+
+ typedef SkBmpBaseCodec INHERITED;
+};
+#endif // SkBmpMaskCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp b/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp
new file mode 100644
index 0000000000..fc5d298e2f
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkBmpRLECodec.h"
+#include "src/codec/SkCodecPriv.h"
+
+/*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+SkBmpRLECodec::SkBmpRLECodec(SkEncodedInfo&& info,
+ std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, uint32_t numColors,
+ uint32_t bytesPerColor, uint32_t offset,
+ SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(std::move(info), std::move(stream), bitsPerPixel, rowOrder)
+ , fColorTable(nullptr)
+ , fNumColors(numColors)
+ , fBytesPerColor(bytesPerColor)
+ , fOffset(offset)
+ , fBytesBuffered(0)
+ , fCurrRLEByte(0)
+ , fSampleX(1)
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpRLECodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ // Perform the decode
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ // We set rowsDecoded equal to the height because the background has already
+ // been filled. RLE encodings sometimes skip pixels, so we always start by
+ // filling the background.
+ *rowsDecoded = dstInfo.height();
+ return kIncompleteInput;
+ }
+
+ return kSuccess;
+}
+
+/*
+ * Process the color table for the bmp input
+ */
+ bool SkBmpRLECodec::createColorTable(SkColorType dstColorType) {
+ // Allocate memory for color table
+ uint32_t colorBytes = 0;
+ SkPMColor colorTable[256];
+ if (this->bitsPerPixel() <= 8) {
+ // Inform the caller of the number of colors
+ uint32_t maxColors = 1 << this->bitsPerPixel();
+ // Don't bother reading more than maxColors.
+ const uint32_t numColorsToRead =
+ fNumColors == 0 ? maxColors : SkTMin(fNumColors, maxColors);
+
+ // Read the color table from the stream
+ colorBytes = numColorsToRead * fBytesPerColor;
+ std::unique_ptr<uint8_t[]> cBuffer(new uint8_t[colorBytes]);
+ if (stream()->read(cBuffer.get(), colorBytes) != colorBytes) {
+ SkCodecPrintf("Error: unable to read color table.\n");
+ return false;
+ }
+
+ // Fill in the color table
+ PackColorProc packARGB = choose_pack_color_proc(false, dstColorType);
+ uint32_t i = 0;
+ for (; i < numColorsToRead; i++) {
+ uint8_t blue = get_byte(cBuffer.get(), i*fBytesPerColor);
+ uint8_t green = get_byte(cBuffer.get(), i*fBytesPerColor + 1);
+ uint8_t red = get_byte(cBuffer.get(), i*fBytesPerColor + 2);
+ colorTable[i] = packARGB(0xFF, red, green, blue);
+ }
+
+ // To avoid segmentation faults on bad pixel data, fill the end of the
+ // color table with black. This is the same the behavior as the
+ // chromium decoder.
+ for (; i < maxColors; i++) {
+ colorTable[i] = SkPackARGB32NoCheck(0xFF, 0, 0, 0);
+ }
+
+ // Set the color table
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ }
+
+ // Check that we have not read past the pixel array offset
+ if(fOffset < colorBytes) {
+ // This may occur on OS 2.1 and other old versions where the color
+ // table defaults to max size, and the bmp tries to use a smaller
+ // color table. This is invalid, and our decision is to indicate
+ // an error, rather than try to guess the intended size of the
+ // color table.
+ SkCodecPrintf("Error: pixel data offset less than color table size.\n");
+ return false;
+ }
+
+ // After reading the color table, skip to the start of the pixel array
+ if (stream()->skip(fOffset - colorBytes) != fOffset - colorBytes) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return false;
+ }
+
+ // Return true on success
+ return true;
+}
+
+bool SkBmpRLECodec::initializeStreamBuffer() {
+ fBytesBuffered = this->stream()->read(fStreamBuffer, kBufferSize);
+ if (fBytesBuffered == 0) {
+ SkCodecPrintf("Error: could not read RLE image data.\n");
+ return false;
+ }
+ fCurrRLEByte = 0;
+ return true;
+}
+
+/*
+ * @return the number of bytes remaining in the stream buffer after
+ * attempting to read more bytes from the stream
+ */
+size_t SkBmpRLECodec::checkForMoreData() {
+ const size_t remainingBytes = fBytesBuffered - fCurrRLEByte;
+ uint8_t* buffer = fStreamBuffer;
+
+ // We will be reusing the same buffer, starting over from the beginning.
+ // Move any remaining bytes to the start of the buffer.
+ // We use memmove() instead of memcpy() because there is risk that the dst
+ // and src memory will overlap in corrupt images.
+ memmove(buffer, SkTAddOffset<uint8_t>(buffer, fCurrRLEByte), remainingBytes);
+
+ // Adjust the buffer ptr to the start of the unfilled data.
+ buffer += remainingBytes;
+
+ // Try to read additional bytes from the stream. There are fCurrRLEByte
+ // bytes of additional space remaining in the buffer, assuming that we
+ // have already copied remainingBytes to the start of the buffer.
+ size_t additionalBytes = this->stream()->read(buffer, fCurrRLEByte);
+
+ // Update counters and return the number of bytes we currently have
+ // available. We are at the start of the buffer again.
+ fCurrRLEByte = 0;
+ fBytesBuffered = remainingBytes + additionalBytes;
+ return fBytesBuffered;
+}
+
+/*
+ * Set an RLE pixel using the color table
+ */
+void SkBmpRLECodec::setPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t index) {
+ if (dst && is_coord_necessary(x, fSampleX, dstInfo.width())) {
+ // Set the row
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ // Set the pixel based on destination color type
+ const int dstX = get_dst_coord(x, fSampleX);
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = fColorTable->operator[](index);
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = SkTAddOffset<uint16_t>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPixel32ToPixel16(fColorTable->operator[](index));
+ break;
+ }
+ default:
+ // This case should not be reached. We should catch an invalid
+ // color type when we check that the conversion is possible.
+ SkASSERT(false);
+ break;
+ }
+ }
+}
+
+/*
+ * Set an RLE pixel from R, G, B values
+ */
+void SkBmpRLECodec::setRGBPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x,
+ uint32_t y, uint8_t red, uint8_t green,
+ uint8_t blue) {
+ if (dst && is_coord_necessary(x, fSampleX, dstInfo.width())) {
+ // Set the row
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ // Set the pixel based on destination color type
+ const int dstX = get_dst_coord(x, fSampleX);
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ break;
+ }
+ case kBGRA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = SkTAddOffset<uint16_t>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPack888ToRGB16(red, green, blue);
+ break;
+ }
+ default:
+ // This case should not be reached. We should catch an invalid
+ // color type when we check that the conversion is possible.
+ SkASSERT(false);
+ break;
+ }
+ }
+}
+
+SkCodec::Result SkBmpRLECodec::onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ // FIXME: Support subsets for scanline decodes.
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ // Reset fSampleX. If it needs to be a value other than 1, it will get modified by
+ // the sampler.
+ fSampleX = 1;
+ fLinesToSkip = 0;
+
+ SkColorType colorTableColorType = dstInfo.colorType();
+ if (this->colorXform()) {
+ // Just set a known colorType for the colorTable. No need to actually transform
+ // the colors in the colorTable.
+ colorTableColorType = kBGRA_8888_SkColorType;
+ }
+
+ // Create the color table if necessary and prepare the stream for decode
+ // Note that if it is non-NULL, inputColorCount will be modified
+ if (!this->createColorTable(colorTableColorType)) {
+ SkCodecPrintf("Error: could not create color table.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ // Initialize a buffer for encoded RLE data
+ if (!this->initializeStreamBuffer()) {
+ SkCodecPrintf("Error: cannot initialize stream buffer.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the bitmap decoding for RLE input format
+ * RLE decoding is performed all at once, rather than a one row at a time
+ */
+int SkBmpRLECodec::decodeRows(const SkImageInfo& info, void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ int height = info.height();
+
+ // Account for sampling.
+ SkImageInfo dstInfo = info.makeWH(this->fillWidth(), height);
+
+ // Set the background as transparent. Then, if the RLE code skips pixels,
+ // the skipped pixels will be transparent.
+ if (dst) {
+ SkSampler::Fill(dstInfo, dst, dstRowBytes, opts.fZeroInitialized);
+ }
+
+ // Adjust the height and the dst if the previous call to decodeRows() left us
+ // with lines that need to be skipped.
+ if (height > fLinesToSkip) {
+ height -= fLinesToSkip;
+ if (dst) {
+ dst = SkTAddOffset<void>(dst, fLinesToSkip * dstRowBytes);
+ }
+ fLinesToSkip = 0;
+
+ dstInfo = dstInfo.makeWH(dstInfo.width(), height);
+ } else {
+ fLinesToSkip -= height;
+ return height;
+ }
+
+ void* decodeDst = dst;
+ size_t decodeRowBytes = dstRowBytes;
+ SkImageInfo decodeInfo = dstInfo;
+ if (decodeDst) {
+ if (this->colorXform()) {
+ decodeInfo = decodeInfo.makeColorType(kXformSrcColorType);
+ if (kRGBA_F16_SkColorType == dstInfo.colorType()) {
+ int count = height * dstInfo.width();
+ this->resetXformBuffer(count);
+ sk_bzero(this->xformBuffer(), count * sizeof(uint32_t));
+ decodeDst = this->xformBuffer();
+ decodeRowBytes = dstInfo.width() * sizeof(uint32_t);
+ }
+ }
+ }
+
+ int decodedHeight = this->decodeRLE(decodeInfo, decodeDst, decodeRowBytes);
+ if (this->colorXform() && decodeDst) {
+ for (int y = 0; y < decodedHeight; y++) {
+ this->applyColorXform(dst, decodeDst, dstInfo.width());
+ decodeDst = SkTAddOffset<void>(decodeDst, decodeRowBytes);
+ dst = SkTAddOffset<void>(dst, dstRowBytes);
+ }
+ }
+
+ return decodedHeight;
+}
+
+int SkBmpRLECodec::decodeRLE(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes) {
+ // Use the original width to count the number of pixels in each row.
+ const int width = this->dimensions().width();
+
+ // This tells us the number of rows that we are meant to decode.
+ const int height = dstInfo.height();
+
+ // Set RLE flags
+ constexpr uint8_t RLE_ESCAPE = 0;
+ constexpr uint8_t RLE_EOL = 0;
+ constexpr uint8_t RLE_EOF = 1;
+ constexpr uint8_t RLE_DELTA = 2;
+
+ // Destination parameters
+ int x = 0;
+ int y = 0;
+
+ while (true) {
+ // If we have reached a row that is beyond the requested height, we have
+ // succeeded.
+ if (y >= height) {
+ // It would be better to check for the EOF marker before indicating
+ // success, but we may be performing a scanline decode, which
+ // would require us to stop before decoding the full height.
+ return height;
+ }
+
+ // Every entry takes at least two bytes
+ if ((int) fBytesBuffered - fCurrRLEByte < 2) {
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+
+ // Read the next two bytes. These bytes have different meanings
+ // depending on their values. In the first interpretation, the first
+ // byte is an escape flag and the second byte indicates what special
+ // task to perform.
+ const uint8_t flag = fStreamBuffer[fCurrRLEByte++];
+ const uint8_t task = fStreamBuffer[fCurrRLEByte++];
+
+ // Perform decoding
+ if (RLE_ESCAPE == flag) {
+ switch (task) {
+ case RLE_EOL:
+ x = 0;
+ y++;
+ break;
+ case RLE_EOF:
+ return height;
+ case RLE_DELTA: {
+ // Two bytes are needed to specify delta
+ if ((int) fBytesBuffered - fCurrRLEByte < 2) {
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+ // Modify x and y
+ const uint8_t dx = fStreamBuffer[fCurrRLEByte++];
+ const uint8_t dy = fStreamBuffer[fCurrRLEByte++];
+ x += dx;
+ y += dy;
+ if (x > width) {
+ SkCodecPrintf("Warning: invalid RLE input.\n");
+ return y - dy;
+ } else if (y > height) {
+ fLinesToSkip = y - height;
+ return height;
+ }
+ break;
+ }
+ default: {
+ // If task does not match any of the above signals, it
+ // indicates that we have a sequence of non-RLE pixels.
+ // Furthermore, the value of task is equal to the number
+ // of pixels to interpret.
+ uint8_t numPixels = task;
+ const size_t rowBytes = compute_row_bytes(numPixels,
+ this->bitsPerPixel());
+ // Abort if setting numPixels moves us off the edge of the
+ // image.
+ if (x + numPixels > width) {
+ SkCodecPrintf("Warning: invalid RLE input.\n");
+ return y;
+ }
+
+ // Also abort if there are not enough bytes
+ // remaining in the stream to set numPixels.
+
+ // At most, alignedRowBytes can be 255 (max uint8_t) *
+ // 3 (max bytes per pixel) + 1 (aligned) = 766. If
+ // fStreamBuffer was smaller than this,
+ // checkForMoreData would never succeed for some bmps.
+ static_assert(255 * 3 + 1 < kBufferSize,
+ "kBufferSize needs to be larger!");
+ const size_t alignedRowBytes = SkAlign2(rowBytes);
+ if ((int) fBytesBuffered - fCurrRLEByte < alignedRowBytes) {
+ SkASSERT(alignedRowBytes < kBufferSize);
+ if (this->checkForMoreData() < alignedRowBytes) {
+ return y;
+ }
+ }
+ // Set numPixels number of pixels
+ while (numPixels > 0) {
+ switch(this->bitsPerPixel()) {
+ case 4: {
+ SkASSERT(fCurrRLEByte < fBytesBuffered);
+ uint8_t val = fStreamBuffer[fCurrRLEByte++];
+ setPixel(dst, dstRowBytes, dstInfo, x++,
+ y, val >> 4);
+ numPixels--;
+ if (numPixels != 0) {
+ setPixel(dst, dstRowBytes, dstInfo,
+ x++, y, val & 0xF);
+ numPixels--;
+ }
+ break;
+ }
+ case 8:
+ SkASSERT(fCurrRLEByte < fBytesBuffered);
+ setPixel(dst, dstRowBytes, dstInfo, x++,
+ y, fStreamBuffer[fCurrRLEByte++]);
+ numPixels--;
+ break;
+ case 24: {
+ SkASSERT(fCurrRLEByte + 2 < fBytesBuffered);
+ uint8_t blue = fStreamBuffer[fCurrRLEByte++];
+ uint8_t green = fStreamBuffer[fCurrRLEByte++];
+ uint8_t red = fStreamBuffer[fCurrRLEByte++];
+ setRGBPixel(dst, dstRowBytes, dstInfo,
+ x++, y, red, green, blue);
+ numPixels--;
+ break;
+ }
+ default:
+ SkASSERT(false);
+ return y;
+ }
+ }
+ // Skip a byte if necessary to maintain alignment
+ if (!SkIsAlign2(rowBytes)) {
+ fCurrRLEByte++;
+ }
+ break;
+ }
+ }
+ } else {
+ // If the first byte read is not a flag, it indicates the number of
+ // pixels to set in RLE mode.
+ const uint8_t numPixels = flag;
+ const int endX = SkTMin<int>(x + numPixels, width);
+
+ if (24 == this->bitsPerPixel()) {
+ // In RLE24, the second byte read is part of the pixel color.
+ // There are two more required bytes to finish encoding the
+ // color.
+ if ((int) fBytesBuffered - fCurrRLEByte < 2) {
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+
+ // Fill the pixels up to endX with the specified color
+ uint8_t blue = task;
+ uint8_t green = fStreamBuffer[fCurrRLEByte++];
+ uint8_t red = fStreamBuffer[fCurrRLEByte++];
+ while (x < endX) {
+ setRGBPixel(dst, dstRowBytes, dstInfo, x++, y, red, green, blue);
+ }
+ } else {
+ // In RLE8 or RLE4, the second byte read gives the index in the
+ // color table to look up the pixel color.
+ // RLE8 has one color index that gets repeated
+ // RLE4 has two color indexes in the upper and lower 4 bits of
+ // the bytes, which are alternated
+ uint8_t indices[2] = { task, task };
+ if (4 == this->bitsPerPixel()) {
+ indices[0] >>= 4;
+ indices[1] &= 0xf;
+ }
+
+ // Set the indicated number of pixels
+ for (int which = 0; x < endX; x++) {
+ setPixel(dst, dstRowBytes, dstInfo, x, y, indices[which]);
+ which = !which;
+ }
+ }
+ }
+ }
+}
+
+bool SkBmpRLECodec::skipRows(int count) {
+ const SkImageInfo rowInfo = SkImageInfo::Make(this->dimensions().width(), count,
+ kN32_SkColorType, kUnpremul_SkAlphaType);
+ return count == this->decodeRows(rowInfo, nullptr, 0, this->options());
+}
+
+// FIXME: Make SkBmpRLECodec have no knowledge of sampling.
+// Or it should do all sampling natively.
+// It currently is a hybrid that needs to know what SkScaledCodec is doing.
+class SkBmpRLESampler : public SkSampler {
+public:
+ SkBmpRLESampler(SkBmpRLECodec* codec)
+ : fCodec(codec)
+ {
+ SkASSERT(fCodec);
+ }
+
+ int fillWidth() const override {
+ return fCodec->fillWidth();
+ }
+
+private:
+ int onSetSampleX(int sampleX) override {
+ return fCodec->setSampleX(sampleX);
+ }
+
+ // Unowned pointer. fCodec will delete this class in its destructor.
+ SkBmpRLECodec* fCodec;
+};
+
+SkSampler* SkBmpRLECodec::getSampler(bool createIfNecessary) {
+ if (!fSampler && createIfNecessary) {
+ fSampler.reset(new SkBmpRLESampler(this));
+ }
+
+ return fSampler.get();
+}
+
+int SkBmpRLECodec::setSampleX(int sampleX) {
+ fSampleX = sampleX;
+ return this->fillWidth();
+}
+
+int SkBmpRLECodec::fillWidth() const {
+ return get_scaled_dimension(this->dimensions().width(), fSampleX);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpRLECodec.h b/gfx/skia/skia/src/codec/SkBmpRLECodec.h
new file mode 100644
index 0000000000..80acf423ce
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpRLECodec.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBmpRLECodec_DEFINED
+#define SkBmpRLECodec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkBmpCodec.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkSampler.h"
+
+/*
+ * This class implements the decoding for bmp images that use an RLE encoding
+ */
+class SkBmpRLECodec : public SkBmpCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::MakeFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param numColors the number of colors in the color table
+ * @param bytesPerColor the number of bytes in the stream used to represent
+ each color in the color table
+ * @param offset the offset of the image pixel data from the end of the
+ * headers
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ */
+ SkBmpRLECodec(SkEncodedInfo&& info, std::unique_ptr<SkStream>,
+ uint16_t bitsPerPixel, uint32_t numColors, uint32_t bytesPerColor,
+ uint32_t offset, SkCodec::SkScanlineOrder rowOrder);
+
+ int setSampleX(int);
+
+ int fillWidth() const;
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&,
+ int*) override;
+
+ SkCodec::Result onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) override;
+
+private:
+
+ /*
+ * Creates the color table
+ * Sets colorCount to the new color count if it is non-nullptr
+ */
+ bool createColorTable(SkColorType dstColorType);
+
+ bool initializeStreamBuffer();
+
+ /*
+ * Before signalling kIncompleteInput, we should attempt to load the
+ * stream buffer with additional data.
+ *
+ * @return the number of bytes remaining in the stream buffer after
+ * attempting to read more bytes from the stream
+ */
+ size_t checkForMoreData();
+
+ /*
+ * Set an RLE pixel using the color table
+ */
+ void setPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t index);
+ /*
+ * Set an RLE24 pixel from R, G, B values
+ */
+ void setRGBPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t red, uint8_t green, uint8_t blue);
+
+ /*
+ * If dst is NULL, this is a signal to skip the rows.
+ */
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+ int decodeRLE(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes);
+
+ bool skipRows(int count) override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+
+ sk_sp<SkColorTable> fColorTable;
+ // fNumColors is the number specified in the header, or 0 if not present in the header.
+ const uint32_t fNumColors;
+ const uint32_t fBytesPerColor;
+ const uint32_t fOffset;
+
+ static constexpr size_t kBufferSize = 4096;
+ uint8_t fStreamBuffer[kBufferSize];
+ size_t fBytesBuffered;
+
+ uint32_t fCurrRLEByte;
+ int fSampleX;
+ std::unique_ptr<SkSampler> fSampler;
+
+ // Scanline decodes allow the client to ask for a single scanline at a time.
+ // This can be tricky when the RLE encoding instructs the decoder to jump down
+ // multiple lines. This field keeps track of lines that need to be skipped
+ // on subsequent calls to decodeRows().
+ int fLinesToSkip;
+
+ typedef SkBmpCodec INHERITED;
+};
+#endif // SkBmpRLECodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp b/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp
new file mode 100644
index 0000000000..e60100a50f
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkBmpStandardCodec.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/core/SkMathPriv.h"
+
+/*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+SkBmpStandardCodec::SkBmpStandardCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, uint32_t numColors,
+ uint32_t bytesPerColor, uint32_t offset,
+ SkCodec::SkScanlineOrder rowOrder,
+ bool isOpaque, bool inIco)
+ : INHERITED(std::move(info), std::move(stream), bitsPerPixel, rowOrder)
+ , fColorTable(nullptr)
+ , fNumColors(numColors)
+ , fBytesPerColor(bytesPerColor)
+ , fOffset(offset)
+ , fSwizzler(nullptr)
+ , fIsOpaque(isOpaque)
+ , fInIco(inIco)
+ , fAndMaskRowBytes(fInIco ? SkAlign4(compute_row_bytes(this->dimensions().width(), 1)) : 0)
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpStandardCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+ if (dstInfo.dimensions() != this->dimensions()) {
+ SkCodecPrintf("Error: scaling not supported.\n");
+ return kInvalidScale;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts);
+ if (kSuccess != result) {
+ return result;
+ }
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ *rowsDecoded = rows;
+ return kIncompleteInput;
+ }
+ return kSuccess;
+}
+
+/*
+ * Process the color table for the bmp input
+ */
+ bool SkBmpStandardCodec::createColorTable(SkColorType dstColorType, SkAlphaType dstAlphaType) {
+ // Allocate memory for color table
+ uint32_t colorBytes = 0;
+ SkPMColor colorTable[256];
+ if (this->bitsPerPixel() <= 8) {
+ // Inform the caller of the number of colors
+ uint32_t maxColors = 1 << this->bitsPerPixel();
+ // Don't bother reading more than maxColors.
+ const uint32_t numColorsToRead =
+ fNumColors == 0 ? maxColors : SkTMin(fNumColors, maxColors);
+
+ // Read the color table from the stream
+ colorBytes = numColorsToRead * fBytesPerColor;
+ std::unique_ptr<uint8_t[]> cBuffer(new uint8_t[colorBytes]);
+ if (stream()->read(cBuffer.get(), colorBytes) != colorBytes) {
+ SkCodecPrintf("Error: unable to read color table.\n");
+ return false;
+ }
+
+ SkColorType packColorType = dstColorType;
+ SkAlphaType packAlphaType = dstAlphaType;
+ if (this->colorXform()) {
+ packColorType = kBGRA_8888_SkColorType;
+ packAlphaType = kUnpremul_SkAlphaType;
+ }
+
+ // Choose the proper packing function
+ bool isPremul = (kPremul_SkAlphaType == packAlphaType) && !fIsOpaque;
+ PackColorProc packARGB = choose_pack_color_proc(isPremul, packColorType);
+
+ // Fill in the color table
+ uint32_t i = 0;
+ for (; i < numColorsToRead; i++) {
+ uint8_t blue = get_byte(cBuffer.get(), i*fBytesPerColor);
+ uint8_t green = get_byte(cBuffer.get(), i*fBytesPerColor + 1);
+ uint8_t red = get_byte(cBuffer.get(), i*fBytesPerColor + 2);
+ uint8_t alpha;
+ if (fIsOpaque) {
+ alpha = 0xFF;
+ } else {
+ alpha = get_byte(cBuffer.get(), i*fBytesPerColor + 3);
+ }
+ colorTable[i] = packARGB(alpha, red, green, blue);
+ }
+
+ // To avoid segmentation faults on bad pixel data, fill the end of the
+ // color table with black. This is the same the behavior as the
+ // chromium decoder.
+ for (; i < maxColors; i++) {
+ colorTable[i] = SkPackARGB32NoCheck(0xFF, 0, 0, 0);
+ }
+
+ if (this->colorXform() && !this->xformOnDecode()) {
+ this->applyColorXform(colorTable, colorTable, maxColors);
+ }
+
+ // Set the color table
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ }
+
+ // Bmp-in-Ico files do not use an offset to indicate where the pixel data
+ // begins. Pixel data always begins immediately after the color table.
+ if (!fInIco) {
+ // Check that we have not read past the pixel array offset
+ if(fOffset < colorBytes) {
+ // This may occur on OS 2.1 and other old versions where the color
+ // table defaults to max size, and the bmp tries to use a smaller
+ // color table. This is invalid, and our decision is to indicate
+ // an error, rather than try to guess the intended size of the
+ // color table.
+ SkCodecPrintf("Error: pixel data offset less than color table size.\n");
+ return false;
+ }
+
+ // After reading the color table, skip to the start of the pixel array
+ if (stream()->skip(fOffset - colorBytes) != fOffset - colorBytes) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return false;
+ }
+ }
+
+ // Return true on success
+ return true;
+}
+
+static SkEncodedInfo make_info(SkEncodedInfo::Color color,
+ SkEncodedInfo::Alpha alpha, int bitsPerPixel) {
+ // This is just used for the swizzler, which does not need the width or height.
+ return SkEncodedInfo::Make(0, 0, color, alpha, bitsPerPixel);
+}
+
+SkEncodedInfo SkBmpStandardCodec::swizzlerInfo() const {
+ const auto& info = this->getEncodedInfo();
+ if (fInIco) {
+ if (this->bitsPerPixel() <= 8) {
+ return make_info(SkEncodedInfo::kPalette_Color,
+ info.alpha(), this->bitsPerPixel());
+ }
+ if (this->bitsPerPixel() == 24) {
+ return make_info(SkEncodedInfo::kBGR_Color,
+ SkEncodedInfo::kOpaque_Alpha, 8);
+ }
+ }
+
+ return make_info(info.color(), info.alpha(), info.bitsPerComponent());
+}
+
+void SkBmpStandardCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& opts) {
+ // In the case of bmp-in-icos, we will report BGRA to the client,
+ // since we may be required to apply an alpha mask after the decode.
+ // However, the swizzler needs to know the actual format of the bmp.
+ SkEncodedInfo encodedInfo = this->swizzlerInfo();
+
+ // Get a pointer to the color table if it exists
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+
+ SkImageInfo swizzlerInfo = dstInfo;
+ SkCodec::Options swizzlerOptions = opts;
+ if (this->colorXform()) {
+ swizzlerInfo = swizzlerInfo.makeColorType(kXformSrcColorType);
+ if (kPremul_SkAlphaType == dstInfo.alphaType()) {
+ swizzlerInfo = swizzlerInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+
+ swizzlerOptions.fZeroInitialized = kNo_ZeroInitialized;
+ }
+
+ fSwizzler = SkSwizzler::Make(encodedInfo, colorPtr, swizzlerInfo, swizzlerOptions);
+ SkASSERT(fSwizzler);
+}
+
+SkCodec::Result SkBmpStandardCodec::onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ if (this->xformOnDecode()) {
+ this->resetXformBuffer(dstInfo.width());
+ }
+
+ // Create the color table if necessary and prepare the stream for decode
+ // Note that if it is non-NULL, inputColorCount will be modified
+ if (!this->createColorTable(dstInfo.colorType(), dstInfo.alphaType())) {
+ SkCodecPrintf("Error: could not create color table.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ // Initialize a swizzler
+ this->initializeSwizzler(dstInfo, options);
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the bitmap decoding for standard input format
+ */
+int SkBmpStandardCodec::decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ // Iterate over rows of the image
+ const int height = dstInfo.height();
+ for (int y = 0; y < height; y++) {
+ // Read a row of the input
+ if (this->stream()->read(this->srcBuffer(), this->srcRowBytes()) != this->srcRowBytes()) {
+ SkCodecPrintf("Warning: incomplete input stream.\n");
+ return y;
+ }
+
+ // Decode the row in destination format
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ void* dstRow = SkTAddOffset<void>(dst, row * dstRowBytes);
+
+ if (this->xformOnDecode()) {
+ SkASSERT(this->colorXform());
+ fSwizzler->swizzle(this->xformBuffer(), this->srcBuffer());
+ this->applyColorXform(dstRow, this->xformBuffer(), fSwizzler->swizzleWidth());
+ } else {
+ fSwizzler->swizzle(dstRow, this->srcBuffer());
+ }
+ }
+
+ if (fInIco && fIsOpaque) {
+ const int startScanline = this->currScanline();
+ if (startScanline < 0) {
+ // We are not performing a scanline decode.
+ // Just decode the entire ICO mask and return.
+ decodeIcoMask(this->stream(), dstInfo, dst, dstRowBytes);
+ return height;
+ }
+
+ // In order to perform a scanline ICO decode, we must be able
+ // to skip ahead in the stream in order to apply the AND mask
+ // to the requested scanlines.
+ // We will do this by taking advantage of the fact that
+ // SkIcoCodec always uses a SkMemoryStream as its underlying
+ // representation of the stream.
+ const void* memoryBase = this->stream()->getMemoryBase();
+ SkASSERT(nullptr != memoryBase);
+ SkASSERT(this->stream()->hasLength());
+ SkASSERT(this->stream()->hasPosition());
+
+ const size_t length = this->stream()->getLength();
+ const size_t currPosition = this->stream()->getPosition();
+
+ // Calculate how many bytes we must skip to reach the AND mask.
+ const int remainingScanlines = this->dimensions().height() - startScanline - height;
+ const size_t bytesToSkip = remainingScanlines * this->srcRowBytes() +
+ startScanline * fAndMaskRowBytes;
+ const size_t subStreamStartPosition = currPosition + bytesToSkip;
+ if (subStreamStartPosition >= length) {
+ // FIXME: How can we indicate that this decode was actually incomplete?
+ return height;
+ }
+
+ // Create a subStream to pass to decodeIcoMask(). It is useful to encapsulate
+ // the memory base into a stream in order to safely handle incomplete images
+ // without reading out of bounds memory.
+ const void* subStreamMemoryBase = SkTAddOffset<const void>(memoryBase,
+ subStreamStartPosition);
+ const size_t subStreamLength = length - subStreamStartPosition;
+ // This call does not transfer ownership of the subStreamMemoryBase.
+ SkMemoryStream subStream(subStreamMemoryBase, subStreamLength, false);
+
+ // FIXME: If decodeIcoMask does not succeed, is there a way that we can
+ // indicate the decode was incomplete?
+ decodeIcoMask(&subStream, dstInfo, dst, dstRowBytes);
+ }
+
+ return height;
+}
+
+void SkBmpStandardCodec::decodeIcoMask(SkStream* stream, const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes) {
+ // BMP in ICO have transparency, so this cannot be 565. The below code depends
+ // on the output being an SkPMColor.
+ SkASSERT(kRGBA_8888_SkColorType == dstInfo.colorType() ||
+ kBGRA_8888_SkColorType == dstInfo.colorType() ||
+ kRGBA_F16_SkColorType == dstInfo.colorType());
+
+ // If we are sampling, make sure that we only mask the sampled pixels.
+ // We do not need to worry about sampling in the y-dimension because that
+ // should be handled by SkSampledCodec.
+ const int sampleX = fSwizzler->sampleX();
+ const int sampledWidth = get_scaled_dimension(this->dimensions().width(), sampleX);
+ const int srcStartX = get_start_coord(sampleX);
+
+
+ SkPMColor* dstPtr = (SkPMColor*) dst;
+ for (int y = 0; y < dstInfo.height(); y++) {
+ // The srcBuffer will at least be large enough
+ if (stream->read(this->srcBuffer(), fAndMaskRowBytes) != fAndMaskRowBytes) {
+ SkCodecPrintf("Warning: incomplete AND mask for bmp-in-ico.\n");
+ return;
+ }
+
+ auto applyMask = [dstInfo](void* dstRow, int x, uint64_t bit) {
+ if (kRGBA_F16_SkColorType == dstInfo.colorType()) {
+ uint64_t* dst64 = (uint64_t*) dstRow;
+ dst64[x] &= bit - 1;
+ } else {
+ uint32_t* dst32 = (uint32_t*) dstRow;
+ dst32[x] &= bit - 1;
+ }
+ };
+
+ int row = this->getDstRow(y, dstInfo.height());
+
+ void* dstRow = SkTAddOffset<SkPMColor>(dstPtr, row * dstRowBytes);
+
+ int srcX = srcStartX;
+ for (int dstX = 0; dstX < sampledWidth; dstX++) {
+ int quotient;
+ int modulus;
+ SkTDivMod(srcX, 8, &quotient, &modulus);
+ uint32_t shift = 7 - modulus;
+ uint64_t alphaBit = (this->srcBuffer()[quotient] >> shift) & 0x1;
+ applyMask(dstRow, dstX, alphaBit);
+ srcX += sampleX;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpStandardCodec.h b/gfx/skia/skia/src/codec/SkBmpStandardCodec.h
new file mode 100644
index 0000000000..966330ef4a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpStandardCodec.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBmpStandardCodec_DEFINED
+#define SkBmpStandardCodec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkBmpBaseCodec.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkSwizzler.h"
+
+/*
+ * This class implements the decoding for bmp images that use "standard" modes,
+ * which essentially means they do not contain bit masks or RLE codes.
+ */
+class SkBmpStandardCodec : public SkBmpBaseCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::MakeFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param numColors the number of colors in the color table
+ * @param bytesPerColor the number of bytes in the stream used to represent
+ each color in the color table
+ * @param offset the offset of the image pixel data from the end of the
+ * headers
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ * @param isOpaque indicates if the bmp itself is opaque (before applying
+ * the icp mask, if there is one)
+ * @param inIco indicates if the bmp is embedded in an ico file
+ */
+ SkBmpStandardCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ uint16_t bitsPerPixel, uint32_t numColors, uint32_t bytesPerColor,
+ uint32_t offset, SkCodec::SkScanlineOrder rowOrder,
+ bool isOpaque, bool inIco);
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&,
+ int*) override;
+
+ bool onInIco() const override {
+ return fInIco;
+ }
+
+ SkCodec::Result onPrepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) override;
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler);
+ return fSwizzler.get();
+ }
+
+private:
+ bool createColorTable(SkColorType colorType, SkAlphaType alphaType);
+ SkEncodedInfo swizzlerInfo() const;
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options& opts);
+
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+
+ /*
+ * @param stream This may be a pointer to the stream owned by the parent SkCodec
+ * or a sub-stream of the stream owned by the parent SkCodec.
+ * Either way, this stream is unowned.
+ */
+ void decodeIcoMask(SkStream* stream, const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes);
+
+ sk_sp<SkColorTable> fColorTable;
+ // fNumColors is the number specified in the header, or 0 if not present in the header.
+ const uint32_t fNumColors;
+ const uint32_t fBytesPerColor;
+ const uint32_t fOffset;
+ std::unique_ptr<SkSwizzler> fSwizzler;
+ const bool fIsOpaque;
+ const bool fInIco;
+ const size_t fAndMaskRowBytes; // only used for fInIco decodes
+
+ typedef SkBmpBaseCodec INHERITED;
+};
+#endif // SkBmpStandardCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkCodec.cpp b/gfx/skia/skia/src/codec/SkCodec.cpp
new file mode 100644
index 0000000000..cc5b9bff6e
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodec.cpp
@@ -0,0 +1,867 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/private/SkHalf.h"
+#include "src/codec/SkBmpCodec.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkFrameHolder.h"
+#ifdef SK_HAS_HEIF_LIBRARY
+#include "src/codec/SkHeifCodec.h"
+#endif
+#include "src/codec/SkIcoCodec.h"
+#include "src/codec/SkJpegCodec.h"
+#ifdef SK_HAS_PNG_LIBRARY
+#include "src/codec/SkPngCodec.h"
+#endif
+#include "include/core/SkStream.h"
+#include "src/codec/SkRawCodec.h"
+#include "src/codec/SkWbmpCodec.h"
+#include "src/codec/SkWebpCodec.h"
+#ifdef SK_HAS_WUFFS_LIBRARY
+#include "src/codec/SkWuffsCodec.h"
+#else
+#include "src/codec/SkGifCodec.h"
+#endif
+
+struct DecoderProc {
+ bool (*IsFormat)(const void*, size_t);
+ std::unique_ptr<SkCodec> (*MakeFromStream)(std::unique_ptr<SkStream>, SkCodec::Result*);
+};
+
+static std::vector<DecoderProc>* decoders() {
+ static auto* decoders = new std::vector<DecoderProc> {
+ #ifdef SK_HAS_JPEG_LIBRARY
+ { SkJpegCodec::IsJpeg, SkJpegCodec::MakeFromStream },
+ #endif
+ #ifdef SK_HAS_WEBP_LIBRARY
+ { SkWebpCodec::IsWebp, SkWebpCodec::MakeFromStream },
+ #endif
+ #ifdef SK_HAS_WUFFS_LIBRARY
+ { SkWuffsCodec_IsFormat, SkWuffsCodec_MakeFromStream },
+ #else
+ { SkGifCodec::IsGif, SkGifCodec::MakeFromStream },
+ #endif
+ #ifdef SK_HAS_PNG_LIBRARY
+ { SkIcoCodec::IsIco, SkIcoCodec::MakeFromStream },
+ #endif
+ { SkBmpCodec::IsBmp, SkBmpCodec::MakeFromStream },
+ { SkWbmpCodec::IsWbmp, SkWbmpCodec::MakeFromStream },
+ };
+ return decoders;
+}
+
+void SkCodec::Register(
+ bool (*peek)(const void*, size_t),
+ std::unique_ptr<SkCodec> (*make)(std::unique_ptr<SkStream>, SkCodec::Result*)) {
+ decoders()->push_back(DecoderProc{peek, make});
+}
+
+std::unique_ptr<SkCodec> SkCodec::MakeFromStream(
+ std::unique_ptr<SkStream> stream, Result* outResult,
+ SkPngChunkReader* chunkReader, SelectionPolicy selectionPolicy) {
+ Result resultStorage;
+ if (!outResult) {
+ outResult = &resultStorage;
+ }
+
+ if (!stream) {
+ *outResult = kInvalidInput;
+ return nullptr;
+ }
+
+ if (selectionPolicy != SelectionPolicy::kPreferStillImage
+ && selectionPolicy != SelectionPolicy::kPreferAnimation) {
+ *outResult = kInvalidParameters;
+ return nullptr;
+ }
+
+ constexpr size_t bytesToRead = MinBufferedBytesNeeded();
+
+ char buffer[bytesToRead];
+ size_t bytesRead = stream->peek(buffer, bytesToRead);
+
+ // It is also possible to have a complete image less than bytesToRead bytes
+ // (e.g. a 1 x 1 wbmp), meaning peek() would return less than bytesToRead.
+ // Assume that if bytesRead < bytesToRead, but > 0, the stream is shorter
+ // than bytesToRead, so pass that directly to the decoder.
+ // It also is possible the stream uses too small a buffer for peeking, but
+ // we trust the caller to use a large enough buffer.
+
+ if (0 == bytesRead) {
+ // TODO: After implementing peek in CreateJavaOutputStreamAdaptor.cpp, this
+ // printf could be useful to notice failures.
+ // SkCodecPrintf("Encoded image data failed to peek!\n");
+
+ // It is possible the stream does not support peeking, but does support
+ // rewinding.
+ // Attempt to read() and pass the actual amount read to the decoder.
+ bytesRead = stream->read(buffer, bytesToRead);
+ if (!stream->rewind()) {
+ SkCodecPrintf("Encoded image data could not peek or rewind to determine format!\n");
+ *outResult = kCouldNotRewind;
+ return nullptr;
+ }
+ }
+
+ // PNG is special, since we want to be able to supply an SkPngChunkReader.
+ // But this code follows the same pattern as the loop.
+#ifdef SK_HAS_PNG_LIBRARY
+ if (SkPngCodec::IsPng(buffer, bytesRead)) {
+ return SkPngCodec::MakeFromStream(std::move(stream), outResult, chunkReader);
+ } else
+#endif
+ {
+ for (DecoderProc proc : *decoders()) {
+ if (proc.IsFormat(buffer, bytesRead)) {
+ return proc.MakeFromStream(std::move(stream), outResult);
+ }
+ }
+
+#ifdef SK_HAS_HEIF_LIBRARY
+ if (SkHeifCodec::IsHeif(buffer, bytesRead)) {
+ return SkHeifCodec::MakeFromStream(std::move(stream), selectionPolicy, outResult);
+ }
+#endif
+
+#ifdef SK_CODEC_DECODES_RAW
+ // Try to treat the input as RAW if all the other checks failed.
+ return SkRawCodec::MakeFromStream(std::move(stream), outResult);
+#endif
+ }
+
+ if (bytesRead < bytesToRead) {
+ *outResult = kIncompleteInput;
+ } else {
+ *outResult = kUnimplemented;
+ }
+
+ return nullptr;
+}
+
+std::unique_ptr<SkCodec> SkCodec::MakeFromData(sk_sp<SkData> data, SkPngChunkReader* reader) {
+ if (!data) {
+ return nullptr;
+ }
+ return MakeFromStream(SkMemoryStream::Make(std::move(data)), nullptr, reader);
+}
+
+SkCodec::SkCodec(SkEncodedInfo&& info, XformFormat srcFormat, std::unique_ptr<SkStream> stream,
+ SkEncodedOrigin origin)
+ : fEncodedInfo(std::move(info))
+ , fSrcXformFormat(srcFormat)
+ , fStream(std::move(stream))
+ , fNeedsRewind(false)
+ , fOrigin(origin)
+ , fDstInfo()
+ , fOptions()
+ , fCurrScanline(-1)
+ , fStartedIncrementalDecode(false)
+{}
+
+SkCodec::~SkCodec() {}
+
+bool SkCodec::conversionSupported(const SkImageInfo& dst, bool srcIsOpaque, bool needsColorXform) {
+ if (!valid_alpha(dst.alphaType(), srcIsOpaque)) {
+ return false;
+ }
+
+ switch (dst.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return true;
+ case kRGBA_F16_SkColorType:
+ return dst.colorSpace();
+ case kRGB_565_SkColorType:
+ return srcIsOpaque;
+ case kGray_8_SkColorType:
+ return SkEncodedInfo::kGray_Color == fEncodedInfo.color() && srcIsOpaque;
+ case kAlpha_8_SkColorType:
+ // conceptually we can convert anything into alpha_8, but we haven't actually coded
+ // all of those other conversions yet.
+ return SkEncodedInfo::kXAlpha_Color == fEncodedInfo.color();
+ default:
+ return false;
+ }
+}
+
+bool SkCodec::rewindIfNeeded() {
+ // Store the value of fNeedsRewind so we can update it. Next read will
+ // require a rewind.
+ const bool needsRewind = fNeedsRewind;
+ fNeedsRewind = true;
+ if (!needsRewind) {
+ return true;
+ }
+
+ // startScanlineDecode will need to be called before decoding scanlines.
+ fCurrScanline = -1;
+ // startIncrementalDecode will need to be called before incrementalDecode.
+ fStartedIncrementalDecode = false;
+
+ // Some codecs do not have a stream. They may hold onto their own data or another codec.
+ // They must handle rewinding themselves.
+ if (fStream && !fStream->rewind()) {
+ return false;
+ }
+
+ return this->onRewind();
+}
+
+static SkIRect frame_rect_on_screen(SkIRect frameRect,
+ const SkIRect& screenRect) {
+ if (!frameRect.intersect(screenRect)) {
+ return SkIRect::MakeEmpty();
+ }
+
+ return frameRect;
+}
+
+bool zero_rect(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ SkISize srcDimensions, SkIRect prevRect) {
+ prevRect = frame_rect_on_screen(prevRect, SkIRect::MakeSize(srcDimensions));
+ if (prevRect.isEmpty()) {
+ return true;
+ }
+ const auto dimensions = dstInfo.dimensions();
+ if (dimensions != srcDimensions) {
+ SkRect src = SkRect::Make(srcDimensions);
+ SkRect dst = SkRect::Make(dimensions);
+ SkMatrix map = SkMatrix::MakeRectToRect(src, dst, SkMatrix::kCenter_ScaleToFit);
+ SkRect asRect = SkRect::Make(prevRect);
+ if (!map.mapRect(&asRect)) {
+ return false;
+ }
+ asRect.roundIn(&prevRect);
+ if (prevRect.isEmpty()) {
+ // Down-scaling shrank the empty portion to nothing,
+ // so nothing to zero.
+ return true;
+ }
+ }
+
+ const SkImageInfo info = dstInfo.makeDimensions(prevRect.size());
+ const size_t bpp = dstInfo.bytesPerPixel();
+ const size_t offset = prevRect.x() * bpp + prevRect.y() * rowBytes;
+ void* eraseDst = SkTAddOffset<void>(pixels, offset);
+ SkSampler::Fill(info, eraseDst, rowBytes, SkCodec::kNo_ZeroInitialized);
+ return true;
+}
+
+SkCodec::Result SkCodec::handleFrameIndex(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& options) {
+ const int index = options.fFrameIndex;
+ if (0 == index) {
+ return this->initializeColorXform(info, fEncodedInfo.alpha(), fEncodedInfo.opaque())
+ ? kSuccess : kInvalidConversion;
+ }
+
+ if (index < 0) {
+ return kInvalidParameters;
+ }
+
+ if (options.fSubset) {
+ // If we add support for this, we need to update the code that zeroes
+ // a kRestoreBGColor frame.
+ return kInvalidParameters;
+ }
+
+ if (index >= this->onGetFrameCount()) {
+ return kIncompleteInput;
+ }
+
+ const auto* frameHolder = this->getFrameHolder();
+ SkASSERT(frameHolder);
+
+ const auto* frame = frameHolder->getFrame(index);
+ SkASSERT(frame);
+
+ const int requiredFrame = frame->getRequiredFrame();
+ if (requiredFrame != kNoFrame) {
+ if (options.fPriorFrame != kNoFrame) {
+ // Check for a valid frame as a starting point. Alternatively, we could
+ // treat an invalid frame as not providing one, but rejecting it will
+ // make it easier to catch the mistake.
+ if (options.fPriorFrame < requiredFrame || options.fPriorFrame >= index) {
+ return kInvalidParameters;
+ }
+ const auto* prevFrame = frameHolder->getFrame(options.fPriorFrame);
+ switch (prevFrame->getDisposalMethod()) {
+ case SkCodecAnimation::DisposalMethod::kRestorePrevious:
+ return kInvalidParameters;
+ case SkCodecAnimation::DisposalMethod::kRestoreBGColor:
+ // If a frame after the required frame is provided, there is no
+ // need to clear, since it must be covered by the desired frame.
+ if (options.fPriorFrame == requiredFrame) {
+ SkIRect prevRect = prevFrame->frameRect();
+ if (!zero_rect(info, pixels, rowBytes, this->dimensions(), prevRect)) {
+ return kInternalError;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ Options prevFrameOptions(options);
+ prevFrameOptions.fFrameIndex = requiredFrame;
+ prevFrameOptions.fZeroInitialized = kNo_ZeroInitialized;
+ const Result result = this->getPixels(info, pixels, rowBytes, &prevFrameOptions);
+ if (result != kSuccess) {
+ return result;
+ }
+ const auto* prevFrame = frameHolder->getFrame(requiredFrame);
+ const auto disposalMethod = prevFrame->getDisposalMethod();
+ if (disposalMethod == SkCodecAnimation::DisposalMethod::kRestoreBGColor) {
+ auto prevRect = prevFrame->frameRect();
+ if (!zero_rect(info, pixels, rowBytes, this->dimensions(), prevRect)) {
+ return kInternalError;
+ }
+ }
+ }
+ }
+
+ return this->initializeColorXform(info, frame->reportedAlpha(), !frame->hasAlpha())
+ ? kSuccess : kInvalidConversion;
+}
+
+SkCodec::Result SkCodec::getPixels(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ const Options* options) {
+ SkImageInfo info = dstInfo;
+ if (!info.colorSpace()) {
+ info = info.makeColorSpace(SkColorSpace::MakeSRGB());
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return kInvalidParameters;
+ }
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Default options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else {
+ if (options->fSubset) {
+ SkIRect subset(*options->fSubset);
+ if (!this->onGetValidSubset(&subset) || subset != *options->fSubset) {
+ // FIXME: How to differentiate between not supporting subset at all
+ // and not supporting this particular subset?
+ return kUnimplemented;
+ }
+ }
+ }
+
+ const Result frameIndexResult = this->handleFrameIndex(info, pixels, rowBytes,
+ *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ // FIXME: Support subsets somehow? Note that this works for SkWebpCodec
+ // because it supports arbitrary scaling/subset combinations.
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ fOptions = *options;
+
+ // On an incomplete decode, the subclass will specify the number of scanlines that it decoded
+ // successfully.
+ int rowsDecoded = 0;
+ const Result result = this->onGetPixels(info, pixels, rowBytes, *options, &rowsDecoded);
+
+ // A return value of kIncompleteInput indicates a truncated image stream.
+ // In this case, we will fill any uninitialized memory with a default value.
+ // Some subclasses will take care of filling any uninitialized memory on
+ // their own. They indicate that all of the memory has been filled by
+ // setting rowsDecoded equal to the height.
+ if ((kIncompleteInput == result || kErrorInInput == result) && rowsDecoded != info.height()) {
+ // FIXME: (skbug.com/5772) fillIncompleteImage will fill using the swizzler's width, unless
+ // there is a subset. In that case, it will use the width of the subset. From here, the
+ // subset will only be non-null in the case of SkWebpCodec, but it treats the subset
+ // differenty from the other codecs, and it needs to use the width specified by the info.
+ // Set the subset to null so SkWebpCodec uses the correct width.
+ fOptions.fSubset = nullptr;
+ this->fillIncompleteImage(info, pixels, rowBytes, options->fZeroInitialized, info.height(),
+ rowsDecoded);
+ }
+
+ return result;
+}
+
+SkCodec::Result SkCodec::startIncrementalDecode(const SkImageInfo& dstInfo, void* pixels,
+ size_t rowBytes, const SkCodec::Options* options) {
+ fStartedIncrementalDecode = false;
+
+ SkImageInfo info = dstInfo;
+ if (!info.colorSpace()) {
+ info = info.makeColorSpace(SkColorSpace::MakeSRGB());
+ }
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+
+ // FIXME: If the rows come after the rows of a previous incremental decode,
+ // we might be able to skip the rewind, but only the implementation knows
+ // that. (e.g. PNG will always need to rewind, since we called longjmp, but
+ // a bottom-up BMP could skip rewinding if the new rows are above the old
+ // rows.)
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else {
+ if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidParameters;
+ }
+
+ const int top = options->fSubset->top();
+ const int bottom = options->fSubset->bottom();
+ if (top < 0 || top >= info.height() || top >= bottom || bottom > info.height()) {
+ return kInvalidParameters;
+ }
+ }
+ }
+
+ const Result frameIndexResult = this->handleFrameIndex(info, pixels, rowBytes,
+ *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ fOptions = *options;
+
+ const Result result = this->onStartIncrementalDecode(info, pixels, rowBytes, fOptions);
+ if (kSuccess == result) {
+ fStartedIncrementalDecode = true;
+ } else if (kUnimplemented == result) {
+ // FIXME: This is temporarily necessary, until we transition SkCodec
+ // implementations from scanline decoding to incremental decoding.
+ // SkAndroidCodec will first attempt to use incremental decoding, but
+ // will fall back to scanline decoding if incremental returns
+ // kUnimplemented. rewindIfNeeded(), above, set fNeedsRewind to true
+ // (after potentially rewinding), but we do not want the next call to
+ // startScanlineDecode() to do a rewind.
+ fNeedsRewind = false;
+ }
+ return result;
+}
+
+
+SkCodec::Result SkCodec::startScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options* options) {
+ // Reset fCurrScanline in case of failure.
+ fCurrScanline = -1;
+
+ SkImageInfo info = dstInfo;
+ if (!info.colorSpace()) {
+ info = info.makeColorSpace(SkColorSpace::MakeSRGB());
+ }
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidInput;
+ }
+
+ // We only support subsetting in the x-dimension for scanline decoder.
+ // Subsetting in the y-dimension can be accomplished using skipScanlines().
+ if (options->fSubset->top() != 0 || options->fSubset->height() != info.height()) {
+ return kInvalidInput;
+ }
+ }
+
+ // Scanline decoding only supports decoding the first frame.
+ if (options->fFrameIndex != 0) {
+ return kUnimplemented;
+ }
+
+ // The void* dst and rowbytes in handleFrameIndex or only used for decoding prior
+ // frames, which is not supported here anyway, so it is safe to pass nullptr/0.
+ const Result frameIndexResult = this->handleFrameIndex(info, nullptr, 0, *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ // FIXME: Support subsets somehow?
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ const Result result = this->onStartScanlineDecode(info, *options);
+ if (result != SkCodec::kSuccess) {
+ return result;
+ }
+
+ fCurrScanline = 0;
+ fDstInfo = info;
+ fOptions = *options;
+ return kSuccess;
+}
+
+int SkCodec::getScanlines(void* dst, int countLines, size_t rowBytes) {
+ if (fCurrScanline < 0) {
+ return 0;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines <= 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ return 0;
+ }
+
+ const int linesDecoded = this->onGetScanlines(dst, countLines, rowBytes);
+ if (linesDecoded < countLines) {
+ this->fillIncompleteImage(this->dstInfo(), dst, rowBytes, this->options().fZeroInitialized,
+ countLines, linesDecoded);
+ }
+ fCurrScanline += countLines;
+ return linesDecoded;
+}
+
+bool SkCodec::skipScanlines(int countLines) {
+ if (fCurrScanline < 0) {
+ return false;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines < 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ // Arguably, we could just skip the scanlines which are remaining,
+ // and return true. We choose to return false so the client
+ // can catch their bug.
+ return false;
+ }
+
+ bool result = this->onSkipScanlines(countLines);
+ fCurrScanline += countLines;
+ return result;
+}
+
+int SkCodec::outputScanline(int inputScanline) const {
+ SkASSERT(0 <= inputScanline && inputScanline < fEncodedInfo.height());
+ return this->onOutputScanline(inputScanline);
+}
+
+int SkCodec::onOutputScanline(int inputScanline) const {
+ switch (this->getScanlineOrder()) {
+ case kTopDown_SkScanlineOrder:
+ return inputScanline;
+ case kBottomUp_SkScanlineOrder:
+ return fEncodedInfo.height() - inputScanline - 1;
+ default:
+ // This case indicates an interlaced gif and is implemented by SkGifCodec.
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+void SkCodec::fillIncompleteImage(const SkImageInfo& info, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded) {
+ if (kYes_ZeroInitialized == zeroInit) {
+ return;
+ }
+
+ const int linesRemaining = linesRequested - linesDecoded;
+ SkSampler* sampler = this->getSampler(false);
+
+ const int fillWidth = sampler ? sampler->fillWidth() :
+ fOptions.fSubset ? fOptions.fSubset->width() :
+ info.width() ;
+ void* fillDst = this->getScanlineOrder() == kBottomUp_SkScanlineOrder ? dst :
+ SkTAddOffset<void>(dst, linesDecoded * rowBytes);
+ const auto fillInfo = info.makeWH(fillWidth, linesRemaining);
+ SkSampler::Fill(fillInfo, fillDst, rowBytes, kNo_ZeroInitialized);
+}
+
+bool sk_select_xform_format(SkColorType colorType, bool forColorTable,
+ skcms_PixelFormat* outFormat) {
+ SkASSERT(outFormat);
+
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ *outFormat = skcms_PixelFormat_RGBA_8888;
+ break;
+ case kBGRA_8888_SkColorType:
+ *outFormat = skcms_PixelFormat_BGRA_8888;
+ break;
+ case kRGB_565_SkColorType:
+ if (forColorTable) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ *outFormat = skcms_PixelFormat_RGBA_8888;
+#else
+ *outFormat = skcms_PixelFormat_BGRA_8888;
+#endif
+ break;
+ }
+ *outFormat = skcms_PixelFormat_BGR_565;
+ break;
+ case kRGBA_F16_SkColorType:
+ *outFormat = skcms_PixelFormat_RGBA_hhhh;
+ break;
+ case kGray_8_SkColorType:
+ *outFormat = skcms_PixelFormat_G_8;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkCodec::initializeColorXform(const SkImageInfo& dstInfo, SkEncodedInfo::Alpha encodedAlpha,
+ bool srcIsOpaque) {
+ fXformTime = kNo_XformTime;
+ bool needsColorXform = false;
+ if (this->usesColorXform() && dstInfo.colorSpace()) {
+ dstInfo.colorSpace()->toProfile(&fDstProfile);
+ if (kRGBA_F16_SkColorType == dstInfo.colorType()) {
+ needsColorXform = true;
+ } else {
+ const auto* srcProfile = fEncodedInfo.profile();
+ if (!srcProfile) {
+ srcProfile = skcms_sRGB_profile();
+ }
+ if (!skcms_ApproximatelyEqualProfiles(srcProfile, &fDstProfile) ) {
+ needsColorXform = true;
+ }
+ }
+ }
+
+ if (!this->conversionSupported(dstInfo, srcIsOpaque, needsColorXform)) {
+ return false;
+ }
+
+ if (needsColorXform) {
+ fXformTime = SkEncodedInfo::kPalette_Color != fEncodedInfo.color()
+ || kRGBA_F16_SkColorType == dstInfo.colorType()
+ ? kDecodeRow_XformTime : kPalette_XformTime;
+ if (!sk_select_xform_format(dstInfo.colorType(), fXformTime == kPalette_XformTime,
+ &fDstXformFormat)) {
+ return false;
+ }
+ if (encodedAlpha == SkEncodedInfo::kUnpremul_Alpha
+ && dstInfo.alphaType() == kPremul_SkAlphaType) {
+ fDstXformAlphaFormat = skcms_AlphaFormat_PremulAsEncoded;
+ } else {
+ fDstXformAlphaFormat = skcms_AlphaFormat_Unpremul;
+ }
+ }
+ return true;
+}
+
+void SkCodec::applyColorXform(void* dst, const void* src, int count) const {
+ // It is okay for srcProfile to be null. This will use sRGB.
+ const auto* srcProfile = fEncodedInfo.profile();
+ SkAssertResult(skcms_Transform(src, fSrcXformFormat, skcms_AlphaFormat_Unpremul, srcProfile,
+ dst, fDstXformFormat, fDstXformAlphaFormat, &fDstProfile,
+ count));
+}
+
+std::vector<SkCodec::FrameInfo> SkCodec::getFrameInfo() {
+ const int frameCount = this->getFrameCount();
+ SkASSERT(frameCount >= 0);
+ if (frameCount <= 0) {
+ return std::vector<FrameInfo>{};
+ }
+
+ if (frameCount == 1 && !this->onGetFrameInfo(0, nullptr)) {
+ // Not animated.
+ return std::vector<FrameInfo>{};
+ }
+
+ std::vector<FrameInfo> result(frameCount);
+ for (int i = 0; i < frameCount; ++i) {
+ SkAssertResult(this->onGetFrameInfo(i, &result[i]));
+ }
+ return result;
+}
+
+const char* SkCodec::ResultToString(Result result) {
+ switch (result) {
+ case kSuccess:
+ return "success";
+ case kIncompleteInput:
+ return "incomplete input";
+ case kErrorInInput:
+ return "error in input";
+ case kInvalidConversion:
+ return "invalid conversion";
+ case kInvalidScale:
+ return "invalid scale";
+ case kInvalidParameters:
+ return "invalid parameters";
+ case kInvalidInput:
+ return "invalid input";
+ case kCouldNotRewind:
+ return "could not rewind";
+ case kInternalError:
+ return "internal error";
+ case kUnimplemented:
+ return "unimplemented";
+ default:
+ SkASSERT(false);
+ return "bogus result value";
+ }
+}
+
+static bool independent(const SkFrame& frame) {
+ return frame.getRequiredFrame() == SkCodec::kNoFrame;
+}
+
+static bool restore_bg(const SkFrame& frame) {
+ return frame.getDisposalMethod() == SkCodecAnimation::DisposalMethod::kRestoreBGColor;
+}
+
+// As its name suggests, this method computes a frame's alpha (e.g. completely
+// opaque, unpremul, binary) and its required frame (a preceding frame that
+// this frame depends on, to draw the complete image at this frame's point in
+// the animation stream), and calls this frame's setter methods with that
+// computed information.
+//
+// A required frame of kNoFrame means that this frame is independent: drawing
+// the complete image at this frame's point in the animation stream does not
+// require first preparing the pixel buffer based on another frame. Instead,
+// drawing can start from an uninitialized pixel buffer.
+//
+// "Uninitialized" is from the SkCodec's caller's point of view. In the SkCodec
+// implementation, for independent frames, first party Skia code (in src/codec)
+// will typically fill the buffer with a uniform background color (e.g.
+// transparent black) before calling into third party codec-specific code (e.g.
+// libjpeg or libpng). Pixels outside of the frame's rect will remain this
+// background color after drawing this frame. For incomplete decodes, pixels
+// inside that rect may be (at least temporarily) set to that background color.
+// In an incremental decode, later passes may then overwrite that background
+// color.
+//
+// Determining kNoFrame or otherwise involves testing a number of conditions
+// sequentially. The first satisfied condition results in setting the required
+// frame to kNoFrame (an "INDx" condition) or to a non-negative frame number (a
+// "DEPx" condition), and the function returning early. Those "INDx" and "DEPx"
+// labels also map to comments in the function body.
+//
+// - IND1: this frame is the first frame.
+// - IND2: this frame fills out the whole image, and it is completely opaque
+// or it overwrites (not blends with) the previous frame.
+// - IND3: all preceding frames' disposals are kRestorePrevious.
+// - IND4: the prevFrame's disposal is kRestoreBGColor, and it fills out the
+// whole image or it is itself otherwise independent.
+// - DEP5: this frame reports alpha (it is not completely opaque) and it
+// blends with (not overwrites) the previous frame.
+// - IND6: this frame's rect covers the rects of all preceding frames back to
+// and including the most recent independent frame before this frame.
+// - DEP7: unconditional.
+//
+// The "prevFrame" variable initially points to the previous frame (also known
+// as the prior frame), but that variable may iterate further backwards over
+// the course of this computation.
+void SkFrameHolder::setAlphaAndRequiredFrame(SkFrame* frame) {
+ const bool reportsAlpha = frame->reportedAlpha() != SkEncodedInfo::kOpaque_Alpha;
+ const auto screenRect = SkIRect::MakeWH(fScreenWidth, fScreenHeight);
+ const auto frameRect = frame_rect_on_screen(frame->frameRect(), screenRect);
+
+ const int i = frame->frameId();
+ if (0 == i) {
+ frame->setHasAlpha(reportsAlpha || frameRect != screenRect);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND1
+ return;
+ }
+
+
+ const bool blendWithPrevFrame = frame->getBlend() == SkCodecAnimation::Blend::kPriorFrame;
+ if ((!reportsAlpha || !blendWithPrevFrame) && frameRect == screenRect) {
+ frame->setHasAlpha(reportsAlpha);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND2
+ return;
+ }
+
+ const SkFrame* prevFrame = this->getFrame(i-1);
+ while (prevFrame->getDisposalMethod() == SkCodecAnimation::DisposalMethod::kRestorePrevious) {
+ const int prevId = prevFrame->frameId();
+ if (0 == prevId) {
+ frame->setHasAlpha(true);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND3
+ return;
+ }
+
+ prevFrame = this->getFrame(prevId - 1);
+ }
+
+ const bool clearPrevFrame = restore_bg(*prevFrame);
+ auto prevFrameRect = frame_rect_on_screen(prevFrame->frameRect(), screenRect);
+
+ if (clearPrevFrame) {
+ if (prevFrameRect == screenRect || independent(*prevFrame)) {
+ frame->setHasAlpha(true);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND4
+ return;
+ }
+ }
+
+ if (reportsAlpha && blendWithPrevFrame) {
+ // Note: We could be more aggressive here. If prevFrame clears
+ // to background color and covers its required frame (and that
+ // frame is independent), prevFrame could be marked independent.
+ // Would this extra complexity be worth it?
+ frame->setRequiredFrame(prevFrame->frameId()); // DEP5
+ frame->setHasAlpha(prevFrame->hasAlpha() || clearPrevFrame);
+ return;
+ }
+
+ while (frameRect.contains(prevFrameRect)) {
+ const int prevRequiredFrame = prevFrame->getRequiredFrame();
+ if (prevRequiredFrame == SkCodec::kNoFrame) {
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND6
+ frame->setHasAlpha(true);
+ return;
+ }
+
+ prevFrame = this->getFrame(prevRequiredFrame);
+ prevFrameRect = frame_rect_on_screen(prevFrame->frameRect(), screenRect);
+ }
+
+ frame->setRequiredFrame(prevFrame->frameId()); // DEP7
+ if (restore_bg(*prevFrame)) {
+ frame->setHasAlpha(true);
+ return;
+ }
+ SkASSERT(prevFrame->getDisposalMethod() == SkCodecAnimation::DisposalMethod::kKeep);
+ frame->setHasAlpha(prevFrame->hasAlpha() || (reportsAlpha && !blendWithPrevFrame));
+}
+
diff --git a/gfx/skia/skia/src/codec/SkCodecAnimationPriv.h b/gfx/skia/skia/src/codec/SkCodecAnimationPriv.h
new file mode 100644
index 0000000000..233a79b211
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecAnimationPriv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecAnimationPriv_DEFINED
+#define SkCodecAnimationPriv_DEFINED
+
+namespace SkCodecAnimation {
+ /**
+ * How to blend the current frame.
+ */
+ enum class Blend {
+ /**
+ * Blend with the prior frame. This is the typical case, supported
+ * by all animated image types.
+ */
+ kPriorFrame,
+
+ /**
+ * Do not blend.
+ *
+ * This frames pixels overwrite previous pixels "blending" with
+ * the background color of transparent.
+ */
+ kBG,
+ };
+
+}
+#endif // SkCodecAnimationPriv_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
new file mode 100644
index 0000000000..b7909479a1
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkYUVAIndex.h"
+#include "src/codec/SkCodecImageGenerator.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkPixmapPriv.h"
+
+std::unique_ptr<SkImageGenerator> SkCodecImageGenerator::MakeFromEncodedCodec(sk_sp<SkData> data) {
+ auto codec = SkCodec::MakeFromData(data);
+ if (nullptr == codec) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<SkImageGenerator>(new SkCodecImageGenerator(std::move(codec), data));
+}
+
+std::unique_ptr<SkImageGenerator>
+SkCodecImageGenerator::MakeFromCodec(std::unique_ptr<SkCodec> codec) {
+ return codec
+ ? std::unique_ptr<SkImageGenerator>(new SkCodecImageGenerator(std::move(codec), nullptr))
+ : nullptr;
+}
+
+static SkImageInfo adjust_info(SkCodec* codec) {
+ SkImageInfo info = codec->getInfo();
+ if (kUnpremul_SkAlphaType == info.alphaType()) {
+ info = info.makeAlphaType(kPremul_SkAlphaType);
+ }
+ if (SkPixmapPriv::ShouldSwapWidthHeight(codec->getOrigin())) {
+ info = SkPixmapPriv::SwapWidthHeight(info);
+ }
+ return info;
+}
+
+SkCodecImageGenerator::SkCodecImageGenerator(std::unique_ptr<SkCodec> codec, sk_sp<SkData> data)
+ : INHERITED(adjust_info(codec.get()))
+ , fCodec(std::move(codec))
+ , fData(std::move(data))
+{}
+
+sk_sp<SkData> SkCodecImageGenerator::onRefEncodedData() {
+ return fData;
+}
+
+bool SkCodecImageGenerator::onGetPixels(const SkImageInfo& requestInfo, void* requestPixels,
+ size_t requestRowBytes, const Options&) {
+ SkPixmap dst(requestInfo, requestPixels, requestRowBytes);
+
+ auto decode = [this](const SkPixmap& pm) {
+ SkCodec::Result result = fCodec->getPixels(pm);
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ return SkPixmapPriv::Orient(dst, fCodec->getOrigin(), decode);
+}
+
+bool SkCodecImageGenerator::onQueryYUVA8(SkYUVASizeInfo* sizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace) const {
+ // This image generator always returns 3 separate non-interleaved planes
+ yuvaIndices[SkYUVAIndex::kY_Index].fIndex = 0;
+ yuvaIndices[SkYUVAIndex::kY_Index].fChannel = SkColorChannel::kR;
+ yuvaIndices[SkYUVAIndex::kU_Index].fIndex = 1;
+ yuvaIndices[SkYUVAIndex::kU_Index].fChannel = SkColorChannel::kR;
+ yuvaIndices[SkYUVAIndex::kV_Index].fIndex = 2;
+ yuvaIndices[SkYUVAIndex::kV_Index].fChannel = SkColorChannel::kR;
+ yuvaIndices[SkYUVAIndex::kA_Index].fIndex = -1;
+ yuvaIndices[SkYUVAIndex::kA_Index].fChannel = SkColorChannel::kR;
+
+ return fCodec->queryYUV8(sizeInfo, colorSpace);
+}
+
+bool SkCodecImageGenerator::onGetYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
+ const SkYUVAIndex indices[SkYUVAIndex::kIndexCount],
+ void* planes[]) {
+ SkCodec::Result result = fCodec->getYUV8Planes(sizeInfo, planes);
+ // TODO: check indices
+
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.h b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
new file mode 100644
index 0000000000..3d8404f2dc
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCodecImageGenerator_DEFINED
+#define SkCodecImageGenerator_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+class SkCodecImageGenerator : public SkImageGenerator {
+public:
+ /*
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodecImageGenerator. Otherwise return nullptr.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromEncodedCodec(sk_sp<SkData>);
+
+ static std::unique_ptr<SkImageGenerator> MakeFromCodec(std::unique_ptr<SkCodec>);
+
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(
+ const SkImageInfo& info, void* pixels, size_t rowBytes, const Options& opts) override;
+
+ bool onQueryYUVA8(
+ SkYUVASizeInfo*, SkYUVAIndex[SkYUVAIndex::kIndexCount], SkYUVColorSpace*) const override;
+
+ bool onGetYUVA8Planes(const SkYUVASizeInfo&, const SkYUVAIndex[SkYUVAIndex::kIndexCount],
+ void* planes[]) override;
+
+private:
+ /*
+ * Takes ownership of codec
+ */
+ SkCodecImageGenerator(std::unique_ptr<SkCodec>, sk_sp<SkData>);
+
+ std::unique_ptr<SkCodec> fCodec;
+ sk_sp<SkData> fData;
+
+ typedef SkImageGenerator INHERITED;
+};
+#endif // SkCodecImageGenerator_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkCodecPriv.h b/gfx/skia/skia/src/codec/SkCodecPriv.h
new file mode 100644
index 0000000000..d2f2a37f31
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecPriv.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecPriv_DEFINED
+#define SkCodecPriv_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkEncodedInfo.h"
+#include "src/codec/SkColorTable.h"
+#include "src/core/SkEndian.h"
+
+#ifdef SK_PRINT_CODEC_MESSAGES
+ #define SkCodecPrintf SkDebugf
+#else
+ #define SkCodecPrintf(...)
+#endif
+
+// Defined in SkCodec.cpp
+bool sk_select_xform_format(SkColorType colorType, bool forColorTable,
+ skcms_PixelFormat* outFormat);
+
+// FIXME: Consider sharing with dm, nanbench, and tools.
+static inline float get_scale_from_sample_size(int sampleSize) {
+ return 1.0f / ((float) sampleSize);
+}
+
+static inline bool is_valid_subset(const SkIRect& subset, const SkISize& imageDims) {
+ return SkIRect::MakeSize(imageDims).contains(subset);
+}
+
+/*
+ * returns a scaled dimension based on the original dimension and the sampleSize
+ * NOTE: we round down here for scaled dimension to match the behavior of SkImageDecoder
+ * FIXME: I think we should call this get_sampled_dimension().
+ */
+static inline int get_scaled_dimension(int srcDimension, int sampleSize) {
+ if (sampleSize > srcDimension) {
+ return 1;
+ }
+ return srcDimension / sampleSize;
+}
+
+/*
+ * Returns the first coordinate that we will keep during a scaled decode.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_start_coord(int sampleFactor) { return sampleFactor / 2; };
+
+/*
+ * Given a coordinate in the original image, this returns the corresponding
+ * coordinate in the scaled image. This function is meaningless if
+ * IsCoordNecessary returns false.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_dst_coord(int srcCoord, int sampleFactor) { return srcCoord / sampleFactor; };
+
+/*
+ * When scaling, we will discard certain y-coordinates (rows) and
+ * x-coordinates (columns). This function returns true if we should keep the
+ * coordinate and false otherwise.
+ * The inputs may be x-coordinates or y-coordinates.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline bool is_coord_necessary(int srcCoord, int sampleFactor, int scaledDim) {
+ // Get the first coordinate that we want to keep
+ int startCoord = get_start_coord(sampleFactor);
+
+ // Return false on edge cases
+ if (srcCoord < startCoord || get_dst_coord(srcCoord, sampleFactor) >= scaledDim) {
+ return false;
+ }
+
+ // Every sampleFactor rows are necessary
+ return ((srcCoord - startCoord) % sampleFactor) == 0;
+}
+
+static inline bool valid_alpha(SkAlphaType dstAlpha, bool srcIsOpaque) {
+ if (kUnknown_SkAlphaType == dstAlpha) {
+ return false;
+ }
+
+ if (srcIsOpaque) {
+ if (kOpaque_SkAlphaType != dstAlpha) {
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ }
+ return true;
+ }
+
+ return dstAlpha != kOpaque_SkAlphaType;
+}
+
+/*
+ * If there is a color table, get a pointer to the colors, otherwise return nullptr
+ */
+static inline const SkPMColor* get_color_ptr(SkColorTable* colorTable) {
+ return nullptr != colorTable ? colorTable->readColors() : nullptr;
+}
+
+/*
+ * Compute row bytes for an image using pixels per byte
+ */
+static inline size_t compute_row_bytes_ppb(int width, uint32_t pixelsPerByte) {
+ return (width + pixelsPerByte - 1) / pixelsPerByte;
+}
+
+/*
+ * Compute row bytes for an image using bytes per pixel
+ */
+static inline size_t compute_row_bytes_bpp(int width, uint32_t bytesPerPixel) {
+ return width * bytesPerPixel;
+}
+
+/*
+ * Compute row bytes for an image
+ */
+static inline size_t compute_row_bytes(int width, uint32_t bitsPerPixel) {
+ if (bitsPerPixel < 16) {
+ SkASSERT(0 == 8 % bitsPerPixel);
+ const uint32_t pixelsPerByte = 8 / bitsPerPixel;
+ return compute_row_bytes_ppb(width, pixelsPerByte);
+ } else {
+ SkASSERT(0 == bitsPerPixel % 8);
+ const uint32_t bytesPerPixel = bitsPerPixel / 8;
+ return compute_row_bytes_bpp(width, bytesPerPixel);
+ }
+}
+
+/*
+ * Get a byte from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint8_t get_byte(uint8_t* buffer, uint32_t i) {
+ return buffer[i];
+}
+
+/*
+ * Get a short from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint16_t get_short(uint8_t* buffer, uint32_t i) {
+ uint16_t result;
+ memcpy(&result, &(buffer[i]), 2);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap16(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * Get an int from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint32_t get_int(uint8_t* buffer, uint32_t i) {
+ uint32_t result;
+ memcpy(&result, &(buffer[i]), 4);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap32(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * @param data Buffer to read bytes from
+ * @param isLittleEndian Output parameter
+ * Indicates if the data is little endian
+ * Is unaffected on false returns
+ */
+static inline bool is_valid_endian_marker(const uint8_t* data, bool* isLittleEndian) {
+ // II indicates Intel (little endian) and MM indicates motorola (big endian).
+ if (('I' != data[0] || 'I' != data[1]) && ('M' != data[0] || 'M' != data[1])) {
+ return false;
+ }
+
+ *isLittleEndian = ('I' == data[0]);
+ return true;
+}
+
+static inline uint16_t get_endian_short(const uint8_t* data, bool littleEndian) {
+ if (littleEndian) {
+ return (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 8) | (data[1]);
+}
+
+static inline SkPMColor premultiply_argb_as_rgba(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_RGBA(a, r, g, b);
+}
+
+static inline SkPMColor premultiply_argb_as_bgra(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_BGRA(a, r, g, b);
+}
+
+static inline bool is_rgba(SkColorType colorType) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return (kBGRA_8888_SkColorType != colorType);
+#else
+ return (kRGBA_8888_SkColorType == colorType);
+#endif
+}
+
+// Method for coverting to a 32 bit pixel.
+typedef uint32_t (*PackColorProc)(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+static inline PackColorProc choose_pack_color_proc(bool isPremul, SkColorType colorType) {
+ bool isRGBA = is_rgba(colorType);
+ if (isPremul) {
+ if (isRGBA) {
+ return &premultiply_argb_as_rgba;
+ } else {
+ return &premultiply_argb_as_bgra;
+ }
+ } else {
+ if (isRGBA) {
+ return &SkPackARGB_as_RGBA;
+ } else {
+ return &SkPackARGB_as_BGRA;
+ }
+ }
+}
+
+bool is_orientation_marker(const uint8_t* data, size_t data_length, SkEncodedOrigin* orientation);
+
+#endif // SkCodecPriv_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkColorTable.cpp b/gfx/skia/skia/src/codec/SkColorTable.cpp
new file mode 100644
index 0000000000..c15c452478
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkColorTable.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+#include "src/codec/SkColorTable.h"
+
+SkColorTable::SkColorTable(const SkPMColor colors[], int count) {
+ SkASSERT(0 == count || colors);
+ SkASSERT(count >= 0 && count <= 256);
+
+ fCount = count;
+ fColors = reinterpret_cast<SkPMColor*>(sk_malloc_throw(count * sizeof(SkPMColor)));
+
+ memcpy(fColors, colors, count * sizeof(SkPMColor));
+}
+
+SkColorTable::~SkColorTable() {
+ sk_free(fColors);
+}
diff --git a/gfx/skia/skia/src/codec/SkColorTable.h b/gfx/skia/skia/src/codec/SkColorTable.h
new file mode 100644
index 0000000000..e83498ead8
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkColorTable.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorTable_DEFINED
+#define SkColorTable_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+
+/** \class SkColorTable
+
+ SkColorTable holds an array SkPMColors (premultiplied 32-bit colors) used by
+ 8-bit bitmaps, where the bitmap bytes are interpreted as indices into the colortable.
+
+ SkColorTable is thread-safe.
+*/
+class SkColorTable : public SkRefCnt {
+public:
+ /** Copy up to 256 colors into a new SkColorTable.
+ */
+ SkColorTable(const SkPMColor colors[], int count);
+ ~SkColorTable() override;
+
+ /** Returns the number of colors in the table.
+ */
+ int count() const { return fCount; }
+
+ /** Returns the specified color from the table. In the debug build, this asserts that
+ * the index is in range (0 <= index < count).
+ */
+ SkPMColor operator[](int index) const {
+ SkASSERT(fColors != nullptr && (unsigned)index < (unsigned)fCount);
+ return fColors[index];
+ }
+
+ /** Return the array of colors for reading. */
+ const SkPMColor* readColors() const { return fColors; }
+
+private:
+ SkPMColor* fColors;
+ int fCount;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkEncodedInfo.cpp b/gfx/skia/skia/src/codec/SkEncodedInfo.cpp
new file mode 100644
index 0000000000..75c4d3061d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkEncodedInfo.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkEncodedInfo.h"
+
+std::unique_ptr<SkEncodedInfo::ICCProfile> SkEncodedInfo::ICCProfile::Make(sk_sp<SkData> data) {
+ if (data) {
+ skcms_ICCProfile profile;
+ if (skcms_Parse(data->data(), data->size(), &profile)) {
+ return std::unique_ptr<ICCProfile>(new ICCProfile(profile, std::move(data)));
+ }
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkEncodedInfo::ICCProfile> SkEncodedInfo::ICCProfile::Make(
+ const skcms_ICCProfile& profile) {
+ return std::unique_ptr<ICCProfile>(new ICCProfile(profile));
+}
+
+SkEncodedInfo::ICCProfile::ICCProfile(const skcms_ICCProfile& profile, sk_sp<SkData> data)
+ : fProfile(profile)
+ , fData(std::move(data))
+{}
diff --git a/gfx/skia/skia/src/codec/SkFrameHolder.h b/gfx/skia/skia/src/codec/SkFrameHolder.h
new file mode 100644
index 0000000000..c44d2e048c
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkFrameHolder.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFrameHolder_DEFINED
+#define SkFrameHolder_DEFINED
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/codec/SkCodecAnimationPriv.h"
+
+/**
+ * Base class for a single frame of an animated image.
+ *
+ * Separate from SkCodec::FrameInfo, which is a pared down
+ * interface that only contains the info the client needs.
+ */
+class SkFrame : public SkNoncopyable {
+public:
+ SkFrame(int id)
+ : fId(id)
+ , fHasAlpha(false)
+ , fRequiredFrame(kUninitialized)
+ , fDisposalMethod(SkCodecAnimation::DisposalMethod::kKeep)
+ , fDuration(0)
+ , fBlend(SkCodecAnimation::Blend::kPriorFrame)
+ {
+ fRect.setEmpty();
+ }
+
+ virtual ~SkFrame() {}
+
+ /**
+ * An explicit move constructor, as
+ * https://en.cppreference.com/w/cpp/language/move_constructor says that
+ * there is no implicit move constructor if there are user-declared
+ * destructors, and we have one, immediately above.
+ *
+ * Without a move constructor, it is harder to use an SkFrame, or an
+ * SkFrame subclass, inside a std::vector.
+ */
+ SkFrame(SkFrame&&) = default;
+
+ /**
+ * 0-based index of the frame in the image sequence.
+ */
+ int frameId() const { return fId; }
+
+ /**
+ * How this frame reports its alpha.
+ *
+ * This only considers the rectangle of this frame, and
+ * considers it to have alpha even if it is opaque once
+ * blended with the frame behind it.
+ */
+ SkEncodedInfo::Alpha reportedAlpha() const {
+ return this->onReportedAlpha();
+ }
+
+ /**
+ * Cached value representing whether the frame has alpha,
+ * after compositing with the prior frame.
+ */
+ bool hasAlpha() const { return fHasAlpha; }
+
+ /**
+ * Cache whether the finished frame has alpha.
+ */
+ void setHasAlpha(bool alpha) { fHasAlpha = alpha; }
+
+ /**
+ * Whether enough of the frame has been read to determine
+ * fRequiredFrame and fHasAlpha.
+ */
+ bool reachedStartOfData() const { return fRequiredFrame != kUninitialized; }
+
+ /**
+ * The frame this one depends on.
+ *
+ * Must not be called until fRequiredFrame has been set properly.
+ */
+ int getRequiredFrame() const {
+ SkASSERT(this->reachedStartOfData());
+ return fRequiredFrame;
+ }
+
+ /**
+ * Set the frame that this frame depends on.
+ */
+ void setRequiredFrame(int req) { fRequiredFrame = req; }
+
+ /**
+ * Set the rectangle that is updated by this frame.
+ */
+ void setXYWH(int x, int y, int width, int height) {
+ fRect.setXYWH(x, y, width, height);
+ }
+
+ /**
+ * The rectangle that is updated by this frame.
+ */
+ SkIRect frameRect() const { return fRect; }
+
+ int xOffset() const { return fRect.x(); }
+ int yOffset() const { return fRect.y(); }
+ int width() const { return fRect.width(); }
+ int height() const { return fRect.height(); }
+
+ SkCodecAnimation::DisposalMethod getDisposalMethod() const {
+ return fDisposalMethod;
+ }
+
+ void setDisposalMethod(SkCodecAnimation::DisposalMethod disposalMethod) {
+ fDisposalMethod = disposalMethod;
+ }
+
+ /**
+ * Set the duration (in ms) to show this frame.
+ */
+ void setDuration(int duration) {
+ fDuration = duration;
+ }
+
+ /**
+ * Duration in ms to show this frame.
+ */
+ int getDuration() const {
+ return fDuration;
+ }
+
+ void setBlend(SkCodecAnimation::Blend blend) {
+ fBlend = blend;
+ }
+
+ SkCodecAnimation::Blend getBlend() const {
+ return fBlend;
+ }
+
+protected:
+ virtual SkEncodedInfo::Alpha onReportedAlpha() const = 0;
+
+private:
+ static constexpr int kUninitialized = -2;
+
+ const int fId;
+ bool fHasAlpha;
+ int fRequiredFrame;
+ SkIRect fRect;
+ SkCodecAnimation::DisposalMethod fDisposalMethod;
+ int fDuration;
+ SkCodecAnimation::Blend fBlend;
+};
+
+/**
+ * Base class for an object which holds the SkFrames of an
+ * image sequence.
+ */
+class SkFrameHolder : public SkNoncopyable {
+public:
+ SkFrameHolder()
+ : fScreenWidth(0)
+ , fScreenHeight(0)
+ {}
+
+ virtual ~SkFrameHolder() {}
+
+ /**
+ * Size of the image. Each frame will be contained in
+ * these dimensions (possibly after clipping).
+ */
+ int screenWidth() const { return fScreenWidth; }
+ int screenHeight() const { return fScreenHeight; }
+
+ /**
+ * Compute the opacity and required frame, based on
+ * the frame's reportedAlpha and how it blends
+ * with prior frames.
+ */
+ void setAlphaAndRequiredFrame(SkFrame*);
+
+ /**
+ * Return the frame with frameId i.
+ */
+ const SkFrame* getFrame(int i) const {
+ return this->onGetFrame(i);
+ }
+
+protected:
+ int fScreenWidth;
+ int fScreenHeight;
+
+ virtual const SkFrame* onGetFrame(int i) const = 0;
+};
+
+#endif // SkFrameHolder_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkGifCodec.cpp b/gfx/skia/skia/src/codec/SkGifCodec.cpp
new file mode 100644
index 0000000000..b2185d184e
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkGifCodec.cpp
@@ -0,0 +1,533 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkGifCodec.h"
+#include "src/codec/SkSwizzler.h"
+#include "src/core/SkMakeUnique.h"
+
+#include <algorithm>
+
+#define GIF87_STAMP "GIF87a"
+#define GIF89_STAMP "GIF89a"
+#define GIF_STAMP_LEN 6
+
+/*
+ * Checks the start of the stream to see if the image is a gif
+ */
+bool SkGifCodec::IsGif(const void* buf, size_t bytesRead) {
+ if (bytesRead >= GIF_STAMP_LEN) {
+ if (memcmp(GIF87_STAMP, buf, GIF_STAMP_LEN) == 0 ||
+ memcmp(GIF89_STAMP, buf, GIF_STAMP_LEN) == 0)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Error function
+ */
+static SkCodec::Result gif_error(const char* msg, SkCodec::Result result = SkCodec::kInvalidInput) {
+ SkCodecPrintf("Gif Error: %s\n", msg);
+ return result;
+}
+
+std::unique_ptr<SkCodec> SkGifCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ std::unique_ptr<SkGifImageReader> reader(new SkGifImageReader(std::move(stream)));
+ *result = reader->parse(SkGifImageReader::SkGIFSizeQuery);
+ if (*result != kSuccess) {
+ return nullptr;
+ }
+
+ // If no images are in the data, or the first header is not yet defined, we cannot
+ // create a codec. In either case, the width and height are not yet known.
+ auto* frame = reader->frameContext(0);
+ if (!frame || !frame->isHeaderDefined()) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ // isHeaderDefined() will not return true if the screen size is empty.
+ SkASSERT(reader->screenHeight() > 0 && reader->screenWidth() > 0);
+
+ const auto alpha = reader->firstFrameHasAlpha() ? SkEncodedInfo::kBinary_Alpha
+ : SkEncodedInfo::kOpaque_Alpha;
+ // Use kPalette since Gifs are encoded with a color table.
+ // FIXME: Gifs can actually be encoded with 4-bits per pixel. Using 8 works, but we could skip
+ // expanding to 8 bits and take advantage of the SkSwizzler to work from 4.
+ auto encodedInfo = SkEncodedInfo::Make(reader->screenWidth(), reader->screenHeight(),
+ SkEncodedInfo::kPalette_Color, alpha, 8);
+ return std::unique_ptr<SkCodec>(new SkGifCodec(std::move(encodedInfo), reader.release()));
+}
+
+bool SkGifCodec::onRewind() {
+ fReader->clearDecodeState();
+ return true;
+}
+
+SkGifCodec::SkGifCodec(SkEncodedInfo&& encodedInfo, SkGifImageReader* reader)
+ : INHERITED(std::move(encodedInfo), skcms_PixelFormat_RGBA_8888, nullptr)
+ , fReader(reader)
+ , fTmpBuffer(nullptr)
+ , fSwizzler(nullptr)
+ , fCurrColorTable(nullptr)
+ , fCurrColorTableIsReal(false)
+ , fFilledBackground(false)
+ , fFirstCallToIncrementalDecode(false)
+ , fDst(nullptr)
+ , fDstRowBytes(0)
+ , fRowsDecoded(0)
+{
+ reader->setClient(this);
+}
+
+int SkGifCodec::onGetFrameCount() {
+ fReader->parse(SkGifImageReader::SkGIFFrameCountQuery);
+ return fReader->imagesCount();
+}
+
+bool SkGifCodec::onGetFrameInfo(int i, SkCodec::FrameInfo* frameInfo) const {
+ if (i >= fReader->imagesCount()) {
+ return false;
+ }
+
+ const SkGIFFrameContext* frameContext = fReader->frameContext(i);
+ SkASSERT(frameContext->reachedStartOfData());
+
+ if (frameInfo) {
+ frameInfo->fDuration = frameContext->getDuration();
+ frameInfo->fRequiredFrame = frameContext->getRequiredFrame();
+ frameInfo->fFullyReceived = frameContext->isComplete();
+ frameInfo->fAlphaType = frameContext->hasAlpha() ? kUnpremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ frameInfo->fDisposalMethod = frameContext->getDisposalMethod();
+ }
+ return true;
+}
+
+int SkGifCodec::onGetRepetitionCount() {
+ fReader->parse(SkGifImageReader::SkGIFLoopCountQuery);
+ return fReader->loopCount();
+}
+
+static constexpr SkColorType kXformSrcColorType = kRGBA_8888_SkColorType;
+
+void SkGifCodec::initializeColorTable(const SkImageInfo& dstInfo, int frameIndex) {
+ SkColorType colorTableColorType = dstInfo.colorType();
+ if (this->colorXform()) {
+ colorTableColorType = kXformSrcColorType;
+ }
+
+ sk_sp<SkColorTable> currColorTable = fReader->getColorTable(colorTableColorType, frameIndex);
+ fCurrColorTableIsReal = static_cast<bool>(currColorTable);
+ if (!fCurrColorTableIsReal) {
+ // This is possible for an empty frame. Create a dummy with one value (transparent).
+ SkPMColor color = SK_ColorTRANSPARENT;
+ fCurrColorTable.reset(new SkColorTable(&color, 1));
+ } else if (this->colorXform() && !this->xformOnDecode()) {
+ SkPMColor dstColors[256];
+ this->applyColorXform(dstColors, currColorTable->readColors(),
+ currColorTable->count());
+ fCurrColorTable.reset(new SkColorTable(dstColors, currColorTable->count()));
+ } else {
+ fCurrColorTable = std::move(currColorTable);
+ }
+}
+
+
+SkCodec::Result SkGifCodec::prepareToDecode(const SkImageInfo& dstInfo, const Options& opts) {
+ if (opts.fSubset) {
+ return gif_error("Subsets not supported.\n", kUnimplemented);
+ }
+
+ const int frameIndex = opts.fFrameIndex;
+ if (frameIndex > 0 && kRGB_565_SkColorType == dstInfo.colorType()) {
+ // FIXME: In theory, we might be able to support this, but it's not clear that it
+ // is necessary (Chromium does not decode to 565, and Android does not decode
+ // frames beyond the first). Disabling it because it is somewhat difficult:
+ // - If there is a transparent pixel, and this frame draws on top of another frame
+ // (if the frame is independent with a transparent pixel, we should not decode to
+ // 565 anyway, since it is not opaque), we need to skip drawing the transparent
+ // pixels (see writeTransparentPixels in haveDecodedRow). We currently do this by
+ // first swizzling into temporary memory, then copying into the destination. (We
+ // let the swizzler handle it first because it may need to sample.) After
+ // swizzling to 565, we do not know which pixels in our temporary memory
+ // correspond to the transparent pixel, so we do not know what to skip. We could
+ // special case the non-sampled case (no need to swizzle), but as this is
+ // currently unused we can just not support it.
+ return gif_error("Cannot decode multiframe gif (except frame 0) as 565.\n",
+ kInvalidConversion);
+ }
+
+ const auto* frame = fReader->frameContext(frameIndex);
+ SkASSERT(frame);
+ if (0 == frameIndex) {
+ // SkCodec does not have a way to just parse through frame 0, so we
+ // have to do so manually, here.
+ fReader->parse((SkGifImageReader::SkGIFParseQuery) 0);
+ if (!frame->reachedStartOfData()) {
+ // We have parsed enough to know that there is a color map, but cannot
+ // parse the map itself yet. Exit now, so we do not build an incorrect
+ // table.
+ return gif_error("color map not available yet\n", kIncompleteInput);
+ }
+ } else {
+ // Parsing happened in SkCodec::getPixels.
+ SkASSERT(frameIndex < fReader->imagesCount());
+ SkASSERT(frame->reachedStartOfData());
+ }
+
+ if (this->xformOnDecode()) {
+ fXformBuffer.reset(new uint32_t[dstInfo.width()]);
+ sk_bzero(fXformBuffer.get(), dstInfo.width() * sizeof(uint32_t));
+ }
+
+ fTmpBuffer.reset(new uint8_t[dstInfo.minRowBytes()]);
+
+ this->initializeColorTable(dstInfo, frameIndex);
+ this->initializeSwizzler(dstInfo, frameIndex);
+
+ SkASSERT(fCurrColorTable);
+ return kSuccess;
+}
+
+void SkGifCodec::initializeSwizzler(const SkImageInfo& dstInfo, int frameIndex) {
+ const SkGIFFrameContext* frame = fReader->frameContext(frameIndex);
+ // This is only called by prepareToDecode, which ensures frameIndex is in range.
+ SkASSERT(frame);
+
+ const int xBegin = frame->xOffset();
+ const int xEnd = std::min(frame->frameRect().right(), fReader->screenWidth());
+
+ // CreateSwizzler only reads left and right of the frame. We cannot use the frame's raw
+ // frameRect, since it might extend beyond the edge of the frame.
+ SkIRect swizzleRect = SkIRect::MakeLTRB(xBegin, 0, xEnd, 0);
+
+ SkImageInfo swizzlerInfo = dstInfo;
+ if (this->colorXform()) {
+ swizzlerInfo = swizzlerInfo.makeColorType(kXformSrcColorType);
+ if (kPremul_SkAlphaType == dstInfo.alphaType()) {
+ swizzlerInfo = swizzlerInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+ }
+
+ // The default Options should be fine:
+ // - we'll ignore if the memory is zero initialized - unless we're the first frame, this won't
+ // matter anyway.
+ // - subsets are not supported for gif
+ // - the swizzler does not need to know about the frame.
+ // We may not be able to use the real Options anyway, since getPixels does not store it (due to
+ // a bug).
+ fSwizzler = SkSwizzler::Make(this->getEncodedInfo(), fCurrColorTable->readColors(),
+ swizzlerInfo, Options(), &swizzleRect);
+ SkASSERT(fSwizzler.get());
+}
+
+/*
+ * Initiates the gif decode
+ */
+SkCodec::Result SkGifCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* pixels, size_t dstRowBytes,
+ const Options& opts,
+ int* rowsDecoded) {
+ Result result = this->prepareToDecode(dstInfo, opts);
+ switch (result) {
+ case kSuccess:
+ break;
+ case kIncompleteInput:
+ // onStartIncrementalDecode treats this as incomplete, since it may
+ // provide more data later, but in this case, no more data will be
+ // provided, and there is nothing to draw. We also cannot return
+ // kIncompleteInput, which will make SkCodec attempt to fill
+ // remaining rows, but that requires an SkSwizzler, which we have
+ // not created.
+ return kInvalidInput;
+ default:
+ return result;
+ }
+
+ if (dstInfo.dimensions() != this->dimensions()) {
+ return gif_error("Scaling not supported.\n", kInvalidScale);
+ }
+
+ fDst = pixels;
+ fDstRowBytes = dstRowBytes;
+
+ return this->decodeFrame(true, opts, rowsDecoded);
+}
+
+SkCodec::Result SkGifCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* pixels, size_t dstRowBytes,
+ const SkCodec::Options& opts) {
+ Result result = this->prepareToDecode(dstInfo, opts);
+ if (result != kSuccess) {
+ return result;
+ }
+
+ fDst = pixels;
+ fDstRowBytes = dstRowBytes;
+
+ fFirstCallToIncrementalDecode = true;
+
+ return kSuccess;
+}
+
+SkCodec::Result SkGifCodec::onIncrementalDecode(int* rowsDecoded) {
+ // It is possible the client has appended more data. Parse, if needed.
+ const auto& options = this->options();
+ const int frameIndex = options.fFrameIndex;
+ fReader->parse((SkGifImageReader::SkGIFParseQuery) frameIndex);
+
+ const bool firstCallToIncrementalDecode = fFirstCallToIncrementalDecode;
+ fFirstCallToIncrementalDecode = false;
+ return this->decodeFrame(firstCallToIncrementalDecode, options, rowsDecoded);
+}
+
+SkCodec::Result SkGifCodec::decodeFrame(bool firstAttempt, const Options& opts, int* rowsDecoded) {
+ const SkImageInfo& dstInfo = this->dstInfo();
+ const int scaledHeight = get_scaled_dimension(dstInfo.height(), fSwizzler->sampleY());
+
+ const int frameIndex = opts.fFrameIndex;
+ SkASSERT(frameIndex < fReader->imagesCount());
+ const SkGIFFrameContext* frameContext = fReader->frameContext(frameIndex);
+ if (firstAttempt) {
+ // rowsDecoded reports how many rows have been initialized, so a layer above
+ // can fill the rest. In some cases, we fill the background before decoding
+ // (or it is already filled for us), so we report rowsDecoded to be the full
+ // height.
+ bool filledBackground = false;
+ if (frameContext->getRequiredFrame() == kNoFrame) {
+ // We may need to clear to transparent for one of the following reasons:
+ // - The frameRect does not cover the full bounds. haveDecodedRow will
+ // only draw inside the frameRect, so we need to clear the rest.
+ // - The frame is interlaced. There is no obvious way to fill
+ // afterwards for an incomplete image. (FIXME: Does the first pass
+ // cover all rows? If so, we do not have to fill here.)
+ // - There is no color table for this frame. In that case will not
+ // draw anything, so we need to fill.
+ if (frameContext->frameRect() != this->bounds()
+ || frameContext->interlaced() || !fCurrColorTableIsReal) {
+ auto fillInfo = dstInfo.makeWH(fSwizzler->fillWidth(), scaledHeight);
+ SkSampler::Fill(fillInfo, fDst, fDstRowBytes, opts.fZeroInitialized);
+ filledBackground = true;
+ }
+ } else {
+ // Not independent.
+ // SkCodec ensured that the prior frame has been decoded.
+ filledBackground = true;
+ }
+
+ fFilledBackground = filledBackground;
+ if (filledBackground) {
+ // Report the full (scaled) height, since the client will never need to fill.
+ fRowsDecoded = scaledHeight;
+ } else {
+ // This will be updated by haveDecodedRow.
+ fRowsDecoded = 0;
+ }
+ }
+
+ if (!fCurrColorTableIsReal) {
+ // Nothing to draw this frame.
+ return kSuccess;
+ }
+
+ bool frameDecoded = false;
+ const bool fatalError = !fReader->decode(frameIndex, &frameDecoded);
+ if (fatalError || !frameDecoded || fRowsDecoded != scaledHeight) {
+ if (rowsDecoded) {
+ *rowsDecoded = fRowsDecoded;
+ }
+ if (fatalError) {
+ return kErrorInInput;
+ }
+ return kIncompleteInput;
+ }
+
+ return kSuccess;
+}
+
+void SkGifCodec::applyXformRow(const SkImageInfo& dstInfo, void* dst, const uint8_t* src) const {
+ if (this->xformOnDecode()) {
+ SkASSERT(this->colorXform());
+ fSwizzler->swizzle(fXformBuffer.get(), src);
+
+ const int xformWidth = get_scaled_dimension(dstInfo.width(), fSwizzler->sampleX());
+ this->applyColorXform(dst, fXformBuffer.get(), xformWidth);
+ } else {
+ fSwizzler->swizzle(dst, src);
+ }
+}
+
+template <typename T>
+static void blend_line(void* dstAsVoid, const void* srcAsVoid, int width) {
+ T* dst = reinterpret_cast<T*>(dstAsVoid);
+ const T* src = reinterpret_cast<const T*>(srcAsVoid);
+ while (width --> 0) {
+ if (*src != 0) { // GIF pixels are either transparent (== 0) or opaque (!= 0).
+ *dst = *src;
+ }
+ src++;
+ dst++;
+ }
+}
+
+void SkGifCodec::haveDecodedRow(int frameIndex, const unsigned char* rowBegin,
+ int rowNumber, int repeatCount, bool writeTransparentPixels)
+{
+ const SkGIFFrameContext* frameContext = fReader->frameContext(frameIndex);
+ // The pixel data and coordinates supplied to us are relative to the frame's
+ // origin within the entire image size, i.e.
+ // (frameContext->xOffset, frameContext->yOffset). There is no guarantee
+ // that width == (size().width() - frameContext->xOffset), so
+ // we must ensure we don't run off the end of either the source data or the
+ // row's X-coordinates.
+ const int width = frameContext->width();
+ const int xBegin = frameContext->xOffset();
+ const int yBegin = frameContext->yOffset() + rowNumber;
+ const int xEnd = std::min(xBegin + width, this->dimensions().width());
+ const int yEnd = std::min(yBegin + rowNumber + repeatCount, this->dimensions().height());
+ // FIXME: No need to make the checks on width/xBegin/xEnd for every row. We could instead do
+ // this once in prepareToDecode.
+ if (!width || (xBegin < 0) || (yBegin < 0) || (xEnd <= xBegin) || (yEnd <= yBegin))
+ return;
+
+ // yBegin is the first row in the non-sampled image. dstRow will be the row in the output,
+ // after potentially scaling it.
+ int dstRow = yBegin;
+
+ const int sampleY = fSwizzler->sampleY();
+ if (sampleY > 1) {
+ // Check to see whether this row or one that falls in the repeatCount is needed in the
+ // output.
+ bool foundNecessaryRow = false;
+ for (int i = 0; i < repeatCount; i++) {
+ const int potentialRow = yBegin + i;
+ if (fSwizzler->rowNeeded(potentialRow)) {
+ dstRow = potentialRow / sampleY;
+ const int scaledHeight = get_scaled_dimension(this->dstInfo().height(), sampleY);
+ if (dstRow >= scaledHeight) {
+ return;
+ }
+
+ foundNecessaryRow = true;
+ repeatCount -= i;
+
+ repeatCount = (repeatCount - 1) / sampleY + 1;
+
+ // Make sure the repeatCount does not take us beyond the end of the dst
+ if (dstRow + repeatCount > scaledHeight) {
+ repeatCount = scaledHeight - dstRow;
+ SkASSERT(repeatCount >= 1);
+ }
+ break;
+ }
+ }
+
+ if (!foundNecessaryRow) {
+ return;
+ }
+ } else {
+ // Make sure the repeatCount does not take us beyond the end of the dst
+ SkASSERT(this->dstInfo().height() >= yBegin);
+ repeatCount = SkTMin(repeatCount, this->dstInfo().height() - yBegin);
+ }
+
+ if (!fFilledBackground) {
+ // At this point, we are definitely going to write the row, so count it towards the number
+ // of rows decoded.
+ // We do not consider the repeatCount, which only happens for interlaced, in which case we
+ // have already set fRowsDecoded to the proper value (reflecting that we have filled the
+ // background).
+ fRowsDecoded++;
+ }
+
+ // decodeFrame will early exit if this is false, so this method will not be
+ // called.
+ SkASSERT(fCurrColorTableIsReal);
+
+ // The swizzler takes care of offsetting into the dst width-wise.
+ void* dstLine = SkTAddOffset<void>(fDst, dstRow * fDstRowBytes);
+
+ // We may or may not need to write transparent pixels to the buffer.
+ // If we're compositing against a previous image, it's wrong, but if
+ // we're decoding an interlaced gif and displaying it "Haeberli"-style,
+ // we must write these for passes beyond the first, or the initial passes
+ // will "show through" the later ones.
+ const auto dstInfo = this->dstInfo();
+ if (writeTransparentPixels) {
+ this->applyXformRow(dstInfo, dstLine, rowBegin);
+ } else {
+ this->applyXformRow(dstInfo, fTmpBuffer.get(), rowBegin);
+
+ size_t offsetBytes = fSwizzler->swizzleOffsetBytes();
+ if (dstInfo.colorType() == kRGBA_F16_SkColorType) {
+ // Account for the fact that post-swizzling we converted to F16,
+ // which is twice as wide.
+ offsetBytes *= 2;
+ }
+ const void* src = SkTAddOffset<void>(fTmpBuffer.get(), offsetBytes);
+ void* dst = SkTAddOffset<void>(dstLine, offsetBytes);
+
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType:
+ blend_line<uint32_t>(dst, src, fSwizzler->swizzleWidth());
+ break;
+ case kRGBA_F16_SkColorType:
+ blend_line<uint64_t>(dst, src, fSwizzler->swizzleWidth());
+ break;
+ default:
+ SkASSERT(false);
+ return;
+ }
+ }
+
+ // Tell the frame to copy the row data if need be.
+ if (repeatCount > 1) {
+ const size_t bytesPerPixel = this->dstInfo().bytesPerPixel();
+ const size_t bytesToCopy = fSwizzler->swizzleWidth() * bytesPerPixel;
+ void* copiedLine = SkTAddOffset<void>(dstLine, fSwizzler->swizzleOffsetBytes());
+ void* dst = copiedLine;
+ for (int i = 1; i < repeatCount; i++) {
+ dst = SkTAddOffset<void>(dst, fDstRowBytes);
+ memcpy(dst, copiedLine, bytesToCopy);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkGifCodec.h b/gfx/skia/skia/src/codec/SkGifCodec.h
new file mode 100644
index 0000000000..1a825fa166
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkGifCodec.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkGifCodec_DEFINED
+#define SkGifCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkSwizzler.h"
+
+#include "third_party/gif/SkGifImageReader.h"
+
+/*
+ *
+ * This class implements the decoding for gif images
+ *
+ */
+class SkGifCodec : public SkCodec {
+public:
+ static bool IsGif(const void*, size_t);
+
+ /*
+ * Assumes IsGif was called and returned true
+ * Reads enough of the stream to determine the image format
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+ // Callback for SkGifImageReader when a row is available.
+ void haveDecodedRow(int frameIndex, const unsigned char* rowBegin,
+ int rowNumber, int repeatCount, bool writeTransparentPixels);
+protected:
+ /*
+ * Performs the full gif decode
+ */
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&,
+ int*) override;
+
+ SkEncodedImageFormat onGetEncodedFormat() const override {
+ return SkEncodedImageFormat::kGIF;
+ }
+
+ bool onRewind() override;
+
+ int onGetFrameCount() override;
+ bool onGetFrameInfo(int, FrameInfo*) const override;
+ int onGetRepetitionCount() override;
+
+ Result onStartIncrementalDecode(const SkImageInfo& /*dstInfo*/, void*, size_t,
+ const SkCodec::Options&) override;
+
+ Result onIncrementalDecode(int*) override;
+
+ const SkFrameHolder* getFrameHolder() const override {
+ return fReader.get();
+ }
+
+private:
+
+ /*
+ * Initializes the color table that we will use for decoding.
+ *
+ * @param dstInfo Contains the requested dst color type.
+ * @param frameIndex Frame whose color table to use.
+ */
+ void initializeColorTable(const SkImageInfo& dstInfo, int frameIndex);
+
+ /*
+ * Does necessary setup, including setting up the color table and swizzler.
+ */
+ Result prepareToDecode(const SkImageInfo& dstInfo, const Options& opts);
+
+ /*
+ * Initializes the swizzler.
+ *
+ * @param dstInfo Output image information. Dimensions may have been
+ * adjusted if the image frame size does not match the size
+ * indicated in the header.
+ * @param frameIndex Which frame we are decoding. This determines the frameRect
+ * to use.
+ */
+ void initializeSwizzler(const SkImageInfo& dstInfo, int frameIndex);
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler);
+ return fSwizzler.get();
+ }
+
+ /*
+ * Recursive function to decode a frame.
+ *
+ * @param firstAttempt Whether this is the first call to decodeFrame since
+ * starting. e.g. true in onGetPixels, and true in the
+ * first call to onIncrementalDecode after calling
+ * onStartIncrementalDecode.
+ * When true, this method may have to initialize the
+ * frame, for example by filling or decoding the prior
+ * frame.
+ * @param opts Options for decoding. May be different from
+ * this->options() for decoding prior frames. Specifies
+ * the frame to decode and whether the prior frame has
+ * already been decoded to fDst. If not, and the frame
+ * is not independent, this method will recursively
+ * decode the frame it depends on.
+ * @param rowsDecoded Out-parameter to report the total number of rows
+ * that have been decoded (or at least written to, if
+ * it had to fill), including rows decoded by prior
+ * calls to onIncrementalDecode.
+ * @return kSuccess if the frame is complete, kIncompleteInput
+ * otherwise.
+ */
+ Result decodeFrame(bool firstAttempt, const Options& opts, int* rowsDecoded);
+
+ /*
+ * Swizzles and color xforms (if necessary) into dst.
+ */
+ void applyXformRow(const SkImageInfo& dstInfo, void* dst, const uint8_t* src) const;
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ * Takes ownership of the SkGifImageReader
+ */
+ SkGifCodec(SkEncodedInfo&&, SkGifImageReader*);
+
+ std::unique_ptr<SkGifImageReader> fReader;
+ std::unique_ptr<uint8_t[]> fTmpBuffer;
+ std::unique_ptr<SkSwizzler> fSwizzler;
+ sk_sp<SkColorTable> fCurrColorTable;
+ // We may create a dummy table if there is not a Map in the input data. In
+ // that case, we set this value to false, and we can skip a lot of decoding
+ // work (which would not be meaningful anyway). We create a "fake"/"dummy"
+ // one in that case, so the client and the swizzler have something to draw.
+ bool fCurrColorTableIsReal;
+ // Whether the background was filled.
+ bool fFilledBackground;
+ // True on the first call to onIncrementalDecode. This value is passed to
+ // decodeFrame.
+ bool fFirstCallToIncrementalDecode;
+
+ void* fDst;
+ size_t fDstRowBytes;
+
+ // Updated inside haveDecodedRow when rows are decoded, unless we filled
+ // the background, in which case it is set once and left alone.
+ int fRowsDecoded;
+ std::unique_ptr<uint32_t[]> fXformBuffer;
+
+ typedef SkCodec INHERITED;
+};
+#endif // SkGifCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkHeifCodec.cpp b/gfx/skia/skia/src/codec/SkHeifCodec.cpp
new file mode 100644
index 0000000000..de51f1b408
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkHeifCodec.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_HAS_HEIF_LIBRARY
+#include "include/codec/SkCodec.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkHeifCodec.h"
+#include "src/core/SkEndian.h"
+
+#define FOURCC(c1, c2, c3, c4) \
+ ((c1) << 24 | (c2) << 16 | (c3) << 8 | (c4))
+
+bool SkHeifCodec::IsHeif(const void* buffer, size_t bytesRead) {
+ // Parse the ftyp box up to bytesRead to determine if this is HEIF.
+ // Any valid ftyp box should have at least 8 bytes.
+ if (bytesRead < 8) {
+ return false;
+ }
+
+ uint32_t* ptr = (uint32_t*)buffer;
+ uint64_t chunkSize = SkEndian_SwapBE32(ptr[0]);
+ uint32_t chunkType = SkEndian_SwapBE32(ptr[1]);
+
+ if (chunkType != FOURCC('f', 't', 'y', 'p')) {
+ return false;
+ }
+
+ int64_t offset = 8;
+ if (chunkSize == 1) {
+ // This indicates that the next 8 bytes represent the chunk size,
+ // and chunk data comes after that.
+ if (bytesRead < 16) {
+ return false;
+ }
+ auto* chunkSizePtr = SkTAddOffset<const uint64_t>(buffer, offset);
+ chunkSize = SkEndian_SwapBE64(*chunkSizePtr);
+ if (chunkSize < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return false;
+ }
+ offset += 8;
+ } else if (chunkSize < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ return false;
+ }
+
+ if (chunkSize > bytesRead) {
+ chunkSize = bytesRead;
+ }
+ int64_t chunkDataSize = chunkSize - offset;
+ // It should at least have major brand (4-byte) and minor version (4-bytes).
+ // The rest of the chunk (if any) is a list of (4-byte) compatible brands.
+ if (chunkDataSize < 8) {
+ return false;
+ }
+
+ uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
+ for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+ if (i == 1) {
+ // Skip this index, it refers to the minorVersion,
+ // not a brand.
+ continue;
+ }
+ auto* brandPtr = SkTAddOffset<const uint32_t>(buffer, offset + 4 * i);
+ uint32_t brand = SkEndian_SwapBE32(*brandPtr);
+ if (brand == FOURCC('m', 'i', 'f', '1') || brand == FOURCC('h', 'e', 'i', 'c')
+ || brand == FOURCC('m', 's', 'f', '1') || brand == FOURCC('h', 'e', 'v', 'c')) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static SkEncodedOrigin get_orientation(const HeifFrameInfo& frameInfo) {
+ switch (frameInfo.mRotationAngle) {
+ case 0: return kTopLeft_SkEncodedOrigin;
+ case 90: return kRightTop_SkEncodedOrigin;
+ case 180: return kBottomRight_SkEncodedOrigin;
+ case 270: return kLeftBottom_SkEncodedOrigin;
+ }
+ return kDefault_SkEncodedOrigin;
+}
+
+struct SkHeifStreamWrapper : public HeifStream {
+ SkHeifStreamWrapper(SkStream* stream) : fStream(stream) {}
+
+ ~SkHeifStreamWrapper() override {}
+
+ size_t read(void* buffer, size_t size) override {
+ return fStream->read(buffer, size);
+ }
+
+ bool rewind() override {
+ return fStream->rewind();
+ }
+
+ bool seek(size_t position) override {
+ return fStream->seek(position);
+ }
+
+ bool hasLength() const override {
+ return fStream->hasLength();
+ }
+
+ size_t getLength() const override {
+ return fStream->getLength();
+ }
+
+private:
+ std::unique_ptr<SkStream> fStream;
+};
+
+static void releaseProc(const void* ptr, void* context) {
+ delete reinterpret_cast<std::vector<uint8_t>*>(context);
+}
+
+std::unique_ptr<SkCodec> SkHeifCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ SkCodec::SelectionPolicy selectionPolicy, Result* result) {
+ std::unique_ptr<HeifDecoder> heifDecoder(createHeifDecoder());
+ if (heifDecoder.get() == nullptr) {
+ *result = kInternalError;
+ return nullptr;
+ }
+
+ HeifFrameInfo heifInfo;
+ if (!heifDecoder->init(new SkHeifStreamWrapper(stream.release()), &heifInfo)) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ size_t frameCount = 1;
+ if (selectionPolicy == SkCodec::SelectionPolicy::kPreferAnimation) {
+ HeifFrameInfo sequenceInfo;
+ if (heifDecoder->getSequenceInfo(&sequenceInfo, &frameCount) &&
+ frameCount > 1) {
+ heifInfo = std::move(sequenceInfo);
+ }
+ }
+
+ std::unique_ptr<SkEncodedInfo::ICCProfile> profile = nullptr;
+ if (heifInfo.mIccData.size() > 0) {
+ auto iccData = new std::vector<uint8_t>(std::move(heifInfo.mIccData));
+ auto icc = SkData::MakeWithProc(iccData->data(), iccData->size(), releaseProc, iccData);
+ profile = SkEncodedInfo::ICCProfile::Make(std::move(icc));
+ }
+ if (profile && profile->profile()->data_color_space != skcms_Signature_RGB) {
+ // This will result in sRGB.
+ profile = nullptr;
+ }
+
+ SkEncodedInfo info = SkEncodedInfo::Make(heifInfo.mWidth, heifInfo.mHeight,
+ SkEncodedInfo::kYUV_Color, SkEncodedInfo::kOpaque_Alpha, 8, std::move(profile));
+ SkEncodedOrigin orientation = get_orientation(heifInfo);
+
+ *result = kSuccess;
+ return std::unique_ptr<SkCodec>(new SkHeifCodec(
+ std::move(info), heifDecoder.release(), orientation, frameCount > 1));
+}
+
+SkHeifCodec::SkHeifCodec(
+ SkEncodedInfo&& info,
+ HeifDecoder* heifDecoder,
+ SkEncodedOrigin origin,
+ bool useAnimation)
+ : INHERITED(std::move(info), skcms_PixelFormat_RGBA_8888, nullptr, origin)
+ , fHeifDecoder(heifDecoder)
+ , fSwizzleSrcRow(nullptr)
+ , fColorXformSrcRow(nullptr)
+ , fUseAnimation(useAnimation)
+{}
+
+bool SkHeifCodec::conversionSupported(const SkImageInfo& dstInfo, bool srcIsOpaque,
+ bool needsColorXform) {
+ SkASSERT(srcIsOpaque);
+
+ if (kUnknown_SkAlphaType == dstInfo.alphaType()) {
+ return false;
+ }
+
+ if (kOpaque_SkAlphaType != dstInfo.alphaType()) {
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ }
+
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ return fHeifDecoder->setOutputColor(kHeifColorFormat_RGBA_8888);
+
+ case kBGRA_8888_SkColorType:
+ return fHeifDecoder->setOutputColor(kHeifColorFormat_BGRA_8888);
+
+ case kRGB_565_SkColorType:
+ if (needsColorXform) {
+ return fHeifDecoder->setOutputColor(kHeifColorFormat_RGBA_8888);
+ } else {
+ return fHeifDecoder->setOutputColor(kHeifColorFormat_RGB565);
+ }
+
+ case kRGBA_F16_SkColorType:
+ SkASSERT(needsColorXform);
+ return fHeifDecoder->setOutputColor(kHeifColorFormat_RGBA_8888);
+
+ default:
+ return false;
+ }
+}
+
+int SkHeifCodec::readRows(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, int count,
+ const Options& opts) {
+ // When fSwizzleSrcRow is non-null, it means that we need to swizzle. In this case,
+ // we will always decode into fSwizzlerSrcRow before swizzling into the next buffer.
+ // We can never swizzle "in place" because the swizzler may perform sampling and/or
+ // subsetting.
+ // When fColorXformSrcRow is non-null, it means that we need to color xform and that
+ // we cannot color xform "in place" (many times we can, but not when the dst is F16).
+ // In this case, we will color xform from fColorXformSrcRow into the dst.
+ uint8_t* decodeDst = (uint8_t*) dst;
+ uint32_t* swizzleDst = (uint32_t*) dst;
+ size_t decodeDstRowBytes = rowBytes;
+ size_t swizzleDstRowBytes = rowBytes;
+ int dstWidth = opts.fSubset ? opts.fSubset->width() : dstInfo.width();
+ if (fSwizzleSrcRow && fColorXformSrcRow) {
+ decodeDst = fSwizzleSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ } else if (fColorXformSrcRow) {
+ decodeDst = (uint8_t*) fColorXformSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ } else if (fSwizzleSrcRow) {
+ decodeDst = fSwizzleSrcRow;
+ decodeDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ }
+
+ for (int y = 0; y < count; y++) {
+ if (!fHeifDecoder->getScanline(decodeDst)) {
+ return y;
+ }
+
+ if (fSwizzler) {
+ fSwizzler->swizzle(swizzleDst, decodeDst);
+ }
+
+ if (this->colorXform()) {
+ this->applyColorXform(dst, swizzleDst, dstWidth);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ }
+
+ decodeDst = SkTAddOffset<uint8_t>(decodeDst, decodeDstRowBytes);
+ swizzleDst = SkTAddOffset<uint32_t>(swizzleDst, swizzleDstRowBytes);
+ }
+
+ return count;
+}
+
+int SkHeifCodec::onGetFrameCount() {
+ if (!fUseAnimation) {
+ return 1;
+ }
+
+ if (fFrameHolder.size() == 0) {
+ size_t frameCount;
+ HeifFrameInfo frameInfo;
+ if (!fHeifDecoder->getSequenceInfo(&frameInfo, &frameCount)
+ || frameCount <= 1) {
+ fUseAnimation = false;
+ return 1;
+ }
+ fFrameHolder.reserve(frameCount);
+ for (size_t i = 0; i < frameCount; i++) {
+ Frame* frame = fFrameHolder.appendNewFrame();
+ frame->setXYWH(0, 0, frameInfo.mWidth, frameInfo.mHeight);
+ frame->setDisposalMethod(SkCodecAnimation::DisposalMethod::kKeep);
+ // Currently we don't know the duration until the frame is actually
+ // decoded (onGetFrameInfo is also called before frame is decoded).
+ // For now, fill it base on the value reported for the sequence.
+ frame->setDuration(frameInfo.mDurationUs / 1000);
+ frame->setRequiredFrame(SkCodec::kNoFrame);
+ frame->setHasAlpha(false);
+ }
+ }
+
+ return fFrameHolder.size();
+}
+
+const SkFrame* SkHeifCodec::FrameHolder::onGetFrame(int i) const {
+ return static_cast<const SkFrame*>(this->frame(i));
+}
+
+SkHeifCodec::Frame* SkHeifCodec::FrameHolder::appendNewFrame() {
+ const int i = this->size();
+ fFrames.emplace_back(i); // TODO: need to handle frame duration here
+ return &fFrames[i];
+}
+
+const SkHeifCodec::Frame* SkHeifCodec::FrameHolder::frame(int i) const {
+ SkASSERT(i >= 0 && i < this->size());
+ return &fFrames[i];
+}
+
+SkHeifCodec::Frame* SkHeifCodec::FrameHolder::editFrameAt(int i) {
+ SkASSERT(i >= 0 && i < this->size());
+ return &fFrames[i];
+}
+
+bool SkHeifCodec::onGetFrameInfo(int i, FrameInfo* frameInfo) const {
+ if (i >= fFrameHolder.size()) {
+ return false;
+ }
+
+ const Frame* frame = fFrameHolder.frame(i);
+ if (!frame) {
+ return false;
+ }
+
+ if (frameInfo) {
+ frameInfo->fRequiredFrame = SkCodec::kNoFrame;
+ frameInfo->fDuration = frame->getDuration();
+ frameInfo->fFullyReceived = true;
+ frameInfo->fAlphaType = kOpaque_SkAlphaType;
+ frameInfo->fDisposalMethod = SkCodecAnimation::DisposalMethod::kKeep;
+ }
+
+ return true;
+}
+
+int SkHeifCodec::onGetRepetitionCount() {
+ return kRepetitionCountInfinite;
+}
+
+/*
+ * Performs the heif decode
+ */
+SkCodec::Result SkHeifCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& options,
+ int* rowsDecoded) {
+ if (options.fSubset) {
+ // Not supporting subsets on this path for now.
+ // TODO: if the heif has tiles, we can support subset here, but
+ // need to retrieve tile config from metadata retriever first.
+ return kUnimplemented;
+ }
+
+ bool success;
+ if (fUseAnimation) {
+ success = fHeifDecoder->decodeSequence(options.fFrameIndex, &fFrameInfo);
+ fFrameHolder.editFrameAt(options.fFrameIndex)->setDuration(
+ fFrameInfo.mDurationUs / 1000);
+ } else {
+ success = fHeifDecoder->decode(&fFrameInfo);
+ }
+
+ if (!success) {
+ return kInvalidInput;
+ }
+
+ fSwizzler.reset(nullptr);
+ this->allocateStorage(dstInfo);
+
+ int rows = this->readRows(dstInfo, dst, dstRowBytes, dstInfo.height(), options);
+ if (rows < dstInfo.height()) {
+ *rowsDecoded = rows;
+ return kIncompleteInput;
+ }
+
+ return kSuccess;
+}
+
+void SkHeifCodec::allocateStorage(const SkImageInfo& dstInfo) {
+ int dstWidth = dstInfo.width();
+
+ size_t swizzleBytes = 0;
+ if (fSwizzler) {
+ swizzleBytes = fFrameInfo.mBytesPerPixel * fFrameInfo.mWidth;
+ dstWidth = fSwizzler->swizzleWidth();
+ SkASSERT(!this->colorXform() || SkIsAlign4(swizzleBytes));
+ }
+
+ size_t xformBytes = 0;
+ if (this->colorXform() && (kRGBA_F16_SkColorType == dstInfo.colorType() ||
+ kRGB_565_SkColorType == dstInfo.colorType())) {
+ xformBytes = dstWidth * sizeof(uint32_t);
+ }
+
+ size_t totalBytes = swizzleBytes + xformBytes;
+ fStorage.reset(totalBytes);
+ if (totalBytes > 0) {
+ fSwizzleSrcRow = (swizzleBytes > 0) ? fStorage.get() : nullptr;
+ fColorXformSrcRow = (xformBytes > 0) ?
+ SkTAddOffset<uint32_t>(fStorage.get(), swizzleBytes) : nullptr;
+ }
+}
+
+void SkHeifCodec::initializeSwizzler(
+ const SkImageInfo& dstInfo, const Options& options) {
+ SkImageInfo swizzlerDstInfo = dstInfo;
+ if (this->colorXform()) {
+ // The color xform will be expecting RGBA 8888 input.
+ swizzlerDstInfo = swizzlerDstInfo.makeColorType(kRGBA_8888_SkColorType);
+ }
+
+ int srcBPP = 4;
+ if (dstInfo.colorType() == kRGB_565_SkColorType && !this->colorXform()) {
+ srcBPP = 2;
+ }
+
+ fSwizzler = SkSwizzler::MakeSimple(srcBPP, swizzlerDstInfo, options);
+ SkASSERT(fSwizzler);
+}
+
+SkSampler* SkHeifCodec::getSampler(bool createIfNecessary) {
+ if (!createIfNecessary || fSwizzler) {
+ SkASSERT(!fSwizzler || (fSwizzleSrcRow && fStorage.get() == fSwizzleSrcRow));
+ return fSwizzler.get();
+ }
+
+ this->initializeSwizzler(this->dstInfo(), this->options());
+ this->allocateStorage(this->dstInfo());
+ return fSwizzler.get();
+}
+
+bool SkHeifCodec::onRewind() {
+ fSwizzler.reset(nullptr);
+ fSwizzleSrcRow = nullptr;
+ fColorXformSrcRow = nullptr;
+ fStorage.reset();
+
+ return true;
+}
+
+SkCodec::Result SkHeifCodec::onStartScanlineDecode(
+ const SkImageInfo& dstInfo, const Options& options) {
+ // TODO: For now, just decode the whole thing even when there is a subset.
+ // If the heif image has tiles, we could potentially do this much faster,
+ // but the tile configuration needs to be retrieved from the metadata.
+ if (!fHeifDecoder->decode(&fFrameInfo)) {
+ return kInvalidInput;
+ }
+
+ if (options.fSubset) {
+ this->initializeSwizzler(dstInfo, options);
+ } else {
+ fSwizzler.reset(nullptr);
+ }
+
+ this->allocateStorage(dstInfo);
+
+ return kSuccess;
+}
+
+int SkHeifCodec::onGetScanlines(void* dst, int count, size_t dstRowBytes) {
+ return this->readRows(this->dstInfo(), dst, dstRowBytes, count, this->options());
+}
+
+bool SkHeifCodec::onSkipScanlines(int count) {
+ return count == (int) fHeifDecoder->skipScanlines(count);
+}
+
+#endif // SK_HAS_HEIF_LIBRARY
diff --git a/gfx/skia/skia/src/codec/SkHeifCodec.h b/gfx/skia/skia/src/codec/SkHeifCodec.h
new file mode 100644
index 0000000000..bbcf722295
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkHeifCodec.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHeifCodec_DEFINED
+#define SkHeifCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkStream.h"
+#include "src/codec/SkFrameHolder.h"
+#include "src/codec/SkSwizzler.h"
+
+#if __has_include("HeifDecoderAPI.h")
+ #include "HeifDecoderAPI.h"
+#else
+ #include "src/codec/SkStubHeifDecoderAPI.h"
+#endif
+
+class SkHeifCodec : public SkCodec {
+public:
+ static bool IsHeif(const void*, size_t);
+
+ /*
+ * Assumes IsHeif was called and returned true.
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(
+ std::unique_ptr<SkStream>, SkCodec::SelectionPolicy selectionPolicy, Result*);
+
+protected:
+
+ Result onGetPixels(
+ const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& options,
+ int* rowsDecoded) override;
+
+ SkEncodedImageFormat onGetEncodedFormat() const override {
+ return SkEncodedImageFormat::kHEIF;
+ }
+
+ int onGetFrameCount() override;
+ bool onGetFrameInfo(int, FrameInfo*) const override;
+ int onGetRepetitionCount() override;
+ const SkFrameHolder* getFrameHolder() const override {
+ return &fFrameHolder;
+ }
+
+ bool conversionSupported(const SkImageInfo&, bool, bool) override;
+
+ bool onRewind() override;
+
+private:
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+ SkHeifCodec(SkEncodedInfo&&, HeifDecoder*, SkEncodedOrigin, bool animation);
+
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options& options);
+ void allocateStorage(const SkImageInfo& dstInfo);
+ int readRows(const SkImageInfo& dstInfo, void* dst,
+ size_t rowBytes, int count, const Options&);
+
+ /*
+ * Scanline decoding.
+ */
+ SkSampler* getSampler(bool createIfNecessary) override;
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options) override;
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+ bool onSkipScanlines(int count) override;
+
+ std::unique_ptr<HeifDecoder> fHeifDecoder;
+ HeifFrameInfo fFrameInfo;
+ SkAutoTMalloc<uint8_t> fStorage;
+ uint8_t* fSwizzleSrcRow;
+ uint32_t* fColorXformSrcRow;
+
+ std::unique_ptr<SkSwizzler> fSwizzler;
+ bool fUseAnimation;
+
+ class Frame : public SkFrame {
+ public:
+ Frame(int i) : INHERITED(i) {}
+
+ protected:
+ SkEncodedInfo::Alpha onReportedAlpha() const override {
+ return SkEncodedInfo::Alpha::kOpaque_Alpha;
+ }
+
+ private:
+ typedef SkFrame INHERITED;
+ };
+
+ class FrameHolder : public SkFrameHolder {
+ public:
+ ~FrameHolder() override {}
+ void setScreenSize(int w, int h) {
+ fScreenWidth = w;
+ fScreenHeight = h;
+ }
+ Frame* appendNewFrame();
+ const Frame* frame(int i) const;
+ Frame* editFrameAt(int i);
+ int size() const {
+ return static_cast<int>(fFrames.size());
+ }
+ void reserve(int size) {
+ fFrames.reserve(size);
+ }
+
+ protected:
+ const SkFrame* onGetFrame(int i) const override;
+
+ private:
+ std::vector<Frame> fFrames;
+ };
+
+ FrameHolder fFrameHolder;
+ typedef SkCodec INHERITED;
+};
+
+#endif // SkHeifCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkIcoCodec.cpp b/gfx/skia/skia/src/codec/SkIcoCodec.cpp
new file mode 100644
index 0000000000..e70a9cded4
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkIcoCodec.cpp
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTDArray.h"
+#include "src/codec/SkBmpCodec.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkIcoCodec.h"
+#include "src/codec/SkPngCodec.h"
+#include "src/core/SkTSort.h"
+
+/*
+ * Checks the start of the stream to see if the image is an Ico or Cur
+ */
+bool SkIcoCodec::IsIco(const void* buffer, size_t bytesRead) {
+ const char icoSig[] = { '\x00', '\x00', '\x01', '\x00' };
+ const char curSig[] = { '\x00', '\x00', '\x02', '\x00' };
+ return bytesRead >= sizeof(icoSig) &&
+ (!memcmp(buffer, icoSig, sizeof(icoSig)) ||
+ !memcmp(buffer, curSig, sizeof(curSig)));
+}
+
+std::unique_ptr<SkCodec> SkIcoCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ // Header size constants
+ constexpr uint32_t kIcoDirectoryBytes = 6;
+ constexpr uint32_t kIcoDirEntryBytes = 16;
+
+ // Read the directory header
+ std::unique_ptr<uint8_t[]> dirBuffer(new uint8_t[kIcoDirectoryBytes]);
+ if (stream->read(dirBuffer.get(), kIcoDirectoryBytes) != kIcoDirectoryBytes) {
+ SkCodecPrintf("Error: unable to read ico directory header.\n");
+ *result = kIncompleteInput;
+ return nullptr;
+ }
+
+ // Process the directory header
+ const uint16_t numImages = get_short(dirBuffer.get(), 4);
+ if (0 == numImages) {
+ SkCodecPrintf("Error: No images embedded in ico.\n");
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ // This structure is used to represent the vital information about entries
+ // in the directory header. We will obtain this information for each
+ // directory entry.
+ struct Entry {
+ uint32_t offset;
+ uint32_t size;
+ };
+ SkAutoFree dirEntryBuffer(sk_malloc_canfail(sizeof(Entry) * numImages));
+ if (!dirEntryBuffer) {
+ SkCodecPrintf("Error: OOM allocating ICO directory for %i images.\n",
+ numImages);
+ *result = kInternalError;
+ return nullptr;
+ }
+ auto* directoryEntries = reinterpret_cast<Entry*>(dirEntryBuffer.get());
+
+ // Iterate over directory entries
+ for (uint32_t i = 0; i < numImages; i++) {
+ uint8_t entryBuffer[kIcoDirEntryBytes];
+ if (stream->read(entryBuffer, kIcoDirEntryBytes) != kIcoDirEntryBytes) {
+ SkCodecPrintf("Error: Dir entries truncated in ico.\n");
+ *result = kIncompleteInput;
+ return nullptr;
+ }
+
+ // The directory entry contains information such as width, height,
+ // bits per pixel, and number of colors in the color palette. We will
+ // ignore these fields since they are repeated in the header of the
+ // embedded image. In the event of an inconsistency, we would always
+ // defer to the value in the embedded header anyway.
+
+ // Specifies the size of the embedded image, including the header
+ uint32_t size = get_int(entryBuffer, 8);
+
+ // Specifies the offset of the embedded image from the start of file.
+ // It does not indicate the start of the pixel data, but rather the
+ // start of the embedded image header.
+ uint32_t offset = get_int(entryBuffer, 12);
+
+ // Save the vital fields
+ directoryEntries[i].offset = offset;
+ directoryEntries[i].size = size;
+ }
+
+ // Default Result, if no valid embedded codecs are found.
+ *result = kInvalidInput;
+
+ // It is "customary" that the embedded images will be stored in order of
+ // increasing offset. However, the specification does not indicate that
+ // they must be stored in this order, so we will not trust that this is the
+ // case. Here we sort the embedded images by increasing offset.
+ struct EntryLessThan {
+ bool operator() (Entry a, Entry b) const {
+ return a.offset < b.offset;
+ }
+ };
+ EntryLessThan lessThan;
+ SkTQSort(directoryEntries, &directoryEntries[numImages - 1], lessThan);
+
+ // Now will construct a candidate codec for each of the embedded images
+ uint32_t bytesRead = kIcoDirectoryBytes + numImages * kIcoDirEntryBytes;
+ std::unique_ptr<SkTArray<std::unique_ptr<SkCodec>, true>> codecs(
+ new SkTArray<std::unique_ptr<SkCodec>, true>(numImages));
+ for (uint32_t i = 0; i < numImages; i++) {
+ uint32_t offset = directoryEntries[i].offset;
+ uint32_t size = directoryEntries[i].size;
+
+ // Ensure that the offset is valid
+ if (offset < bytesRead) {
+ SkCodecPrintf("Warning: invalid ico offset.\n");
+ continue;
+ }
+
+ // If we cannot skip, assume we have reached the end of the stream and
+ // stop trying to make codecs
+ if (stream->skip(offset - bytesRead) != offset - bytesRead) {
+ SkCodecPrintf("Warning: could not skip to ico offset.\n");
+ break;
+ }
+ bytesRead = offset;
+
+ // Create a new stream for the embedded codec
+ SkAutoFree buffer(sk_malloc_canfail(size));
+ if (!buffer) {
+ SkCodecPrintf("Warning: OOM trying to create embedded stream.\n");
+ break;
+ }
+
+ if (stream->read(buffer.get(), size) != size) {
+ SkCodecPrintf("Warning: could not create embedded stream.\n");
+ *result = kIncompleteInput;
+ break;
+ }
+
+ sk_sp<SkData> data(SkData::MakeFromMalloc(buffer.release(), size));
+ auto embeddedStream = SkMemoryStream::Make(data);
+ bytesRead += size;
+
+ // Check if the embedded codec is bmp or png and create the codec
+ std::unique_ptr<SkCodec> codec;
+ Result dummyResult;
+ if (SkPngCodec::IsPng((const char*) data->bytes(), data->size())) {
+ codec = SkPngCodec::MakeFromStream(std::move(embeddedStream), &dummyResult);
+ } else {
+ codec = SkBmpCodec::MakeFromIco(std::move(embeddedStream), &dummyResult);
+ }
+
+ // Save a valid codec
+ if (nullptr != codec) {
+ codecs->push_back().reset(codec.release());
+ }
+ }
+
+ // Recognize if there are no valid codecs
+ if (0 == codecs->count()) {
+ SkCodecPrintf("Error: could not find any valid embedded ico codecs.\n");
+ return nullptr;
+ }
+
+ // Use the largest codec as a "suggestion" for image info
+ size_t maxSize = 0;
+ int maxIndex = 0;
+ for (int i = 0; i < codecs->count(); i++) {
+ SkImageInfo info = codecs->operator[](i)->getInfo();
+ size_t size = info.computeMinByteSize();
+
+ if (size > maxSize) {
+ maxSize = size;
+ maxIndex = i;
+ }
+ }
+
+ auto maxInfo = codecs->operator[](maxIndex)->getEncodedInfo().copy();
+
+ *result = kSuccess;
+ // The original stream is no longer needed, because the embedded codecs own their
+ // own streams.
+ return std::unique_ptr<SkCodec>(new SkIcoCodec(std::move(maxInfo), codecs.release()));
+}
+
+SkIcoCodec::SkIcoCodec(SkEncodedInfo&& info, SkTArray<std::unique_ptr<SkCodec>, true>* codecs)
+ // The source skcms_PixelFormat will not be used. The embedded
+ // codec's will be used instead.
+ : INHERITED(std::move(info), skcms_PixelFormat(), nullptr)
+ , fEmbeddedCodecs(codecs)
+ , fCurrCodec(nullptr)
+{}
+
+/*
+ * Chooses the best dimensions given the desired scale
+ */
+SkISize SkIcoCodec::onGetScaledDimensions(float desiredScale) const {
+ // We set the dimensions to the largest candidate image by default.
+ // Regardless of the scale request, this is the largest image that we
+ // will decode.
+ int origWidth = this->dimensions().width();
+ int origHeight = this->dimensions().height();
+ float desiredSize = desiredScale * origWidth * origHeight;
+ // At least one image will have smaller error than this initial value
+ float minError = ((float) (origWidth * origHeight)) - desiredSize + 1.0f;
+ int32_t minIndex = -1;
+ for (int32_t i = 0; i < fEmbeddedCodecs->count(); i++) {
+ auto dimensions = fEmbeddedCodecs->operator[](i)->dimensions();
+ int width = dimensions.width();
+ int height = dimensions.height();
+ float error = SkTAbs(((float) (width * height)) - desiredSize);
+ if (error < minError) {
+ minError = error;
+ minIndex = i;
+ }
+ }
+ SkASSERT(minIndex >= 0);
+
+ return fEmbeddedCodecs->operator[](minIndex)->dimensions();
+}
+
+int SkIcoCodec::chooseCodec(const SkISize& requestedSize, int startIndex) {
+ SkASSERT(startIndex >= 0);
+
+ // FIXME: Cache the index from onGetScaledDimensions?
+ for (int i = startIndex; i < fEmbeddedCodecs->count(); i++) {
+ if (fEmbeddedCodecs->operator[](i)->dimensions() == requestedSize) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+bool SkIcoCodec::onDimensionsSupported(const SkISize& dim) {
+ return this->chooseCodec(dim, 0) >= 0;
+}
+
+/*
+ * Initiates the Ico decode
+ */
+SkCodec::Result SkIcoCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ int index = 0;
+ SkCodec::Result result = kInvalidScale;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index).get();
+ result = embeddedCodec->getPixels(dstInfo, dst, dstRowBytes, &opts);
+ switch (result) {
+ case kSuccess:
+ case kIncompleteInput:
+ // The embedded codec will handle filling incomplete images, so we will indicate
+ // that all of the rows are initialized.
+ *rowsDecoded = dstInfo.height();
+ return result;
+ default:
+ // Continue trying to find a valid embedded codec on a failed decode.
+ break;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return result;
+}
+
+SkCodec::Result SkIcoCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ int index = 0;
+ SkCodec::Result result = kInvalidScale;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index).get();
+ result = embeddedCodec->startScanlineDecode(dstInfo, &options);
+ if (kSuccess == result) {
+ fCurrCodec = embeddedCodec;
+ return result;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return result;
+}
+
+int SkIcoCodec::onGetScanlines(void* dst, int count, size_t rowBytes) {
+ SkASSERT(fCurrCodec);
+ return fCurrCodec->getScanlines(dst, count, rowBytes);
+}
+
+bool SkIcoCodec::onSkipScanlines(int count) {
+ SkASSERT(fCurrCodec);
+ return fCurrCodec->skipScanlines(count);
+}
+
+SkCodec::Result SkIcoCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* pixels, size_t rowBytes, const SkCodec::Options& options) {
+ int index = 0;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index).get();
+ switch (embeddedCodec->startIncrementalDecode(dstInfo,
+ pixels, rowBytes, &options)) {
+ case kSuccess:
+ fCurrCodec = embeddedCodec;
+ return kSuccess;
+ case kUnimplemented:
+ // FIXME: embeddedCodec is a BMP. If scanline decoding would work,
+ // return kUnimplemented so that SkSampledCodec will fall through
+ // to use the scanline decoder.
+ // Note that calling startScanlineDecode will require an extra
+ // rewind. The embedded codec has an SkMemoryStream, which is
+ // cheap to rewind, though it will do extra work re-reading the
+ // header.
+ // Also note that we pass nullptr for Options. This is because
+ // Options that are valid for incremental decoding may not be
+ // valid for scanline decoding.
+ // Once BMP supports incremental decoding this workaround can go
+ // away.
+ if (embeddedCodec->startScanlineDecode(dstInfo) == kSuccess) {
+ return kUnimplemented;
+ }
+ // Move on to the next embedded codec.
+ break;
+ default:
+ break;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return kInvalidScale;
+}
+
+SkCodec::Result SkIcoCodec::onIncrementalDecode(int* rowsDecoded) {
+ SkASSERT(fCurrCodec);
+ return fCurrCodec->incrementalDecode(rowsDecoded);
+}
+
+SkCodec::SkScanlineOrder SkIcoCodec::onGetScanlineOrder() const {
+ // FIXME: This function will possibly return the wrong value if it is called
+ // before startScanlineDecode()/startIncrementalDecode().
+ if (fCurrCodec) {
+ return fCurrCodec->getScanlineOrder();
+ }
+
+ return INHERITED::onGetScanlineOrder();
+}
+
+SkSampler* SkIcoCodec::getSampler(bool createIfNecessary) {
+ if (fCurrCodec) {
+ return fCurrCodec->getSampler(createIfNecessary);
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/codec/SkIcoCodec.h b/gfx/skia/skia/src/codec/SkIcoCodec.h
new file mode 100644
index 0000000000..c1b27dc50d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkIcoCodec.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIcoCodec_DEFINED
+#define SkIcoCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTArray.h"
+
+/*
+ * This class implements the decoding for bmp images
+ */
+class SkIcoCodec : public SkCodec {
+public:
+ static bool IsIco(const void*, size_t);
+
+ /*
+ * Assumes IsIco was called and returned true
+ * Creates an Ico decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+protected:
+
+ /*
+ * Chooses the best dimensions given the desired scale
+ */
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ /*
+ * Initiates the Ico decode
+ */
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ int*) override;
+
+ SkEncodedImageFormat onGetEncodedFormat() const override {
+ return SkEncodedImageFormat::kICO;
+ }
+
+ SkScanlineOrder onGetScanlineOrder() const override;
+
+ bool conversionSupported(const SkImageInfo&, bool, bool) override {
+ // This will be checked by the embedded codec.
+ return true;
+ }
+
+ // Handled by the embedded codec.
+ bool usesColorXform() const override { return false; }
+private:
+
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) override;
+
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+
+ bool onSkipScanlines(int count) override;
+
+ Result onStartIncrementalDecode(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ const SkCodec::Options&) override;
+
+ Result onIncrementalDecode(int* rowsDecoded) override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+
+ /*
+ * Searches fEmbeddedCodecs for a codec that matches requestedSize.
+ * The search starts at startIndex and ends when an appropriate codec
+ * is found, or we have reached the end of the array.
+ *
+ * @return the index of the matching codec or -1 if there is no
+ * matching codec between startIndex and the end of
+ * the array.
+ */
+ int chooseCodec(const SkISize& requestedSize, int startIndex);
+
+ /*
+ * Constructor called by NewFromStream
+ * @param embeddedCodecs codecs for the embedded images, takes ownership
+ */
+ SkIcoCodec(SkEncodedInfo&& info, SkTArray<std::unique_ptr<SkCodec>, true>* embeddedCodecs);
+
+ std::unique_ptr<SkTArray<std::unique_ptr<SkCodec>, true>> fEmbeddedCodecs;
+
+ // fCurrCodec is owned by this class, but should not be an
+ // std::unique_ptr. It will be deleted by the destructor of fEmbeddedCodecs.
+ SkCodec* fCurrCodec;
+
+ typedef SkCodec INHERITED;
+};
+#endif // SkIcoCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkJpegCodec.cpp b/gfx/skia/skia/src/codec/SkJpegCodec.cpp
new file mode 100644
index 0000000000..45bd35dd5d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegCodec.cpp
@@ -0,0 +1,974 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkJpegCodec.h"
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkJpegDecoderMgr.h"
+#include "src/codec/SkParseEncodedOrigin.h"
+#include "src/pdf/SkJpegInfo.h"
+
+// stdio is needed for libjpeg-turbo
+#include <stdio.h>
+#include "src/codec/SkJpegUtility.h"
+
+// This warning triggers false postives way too often in here.
+#if defined(__GNUC__) && !defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wclobbered"
+#endif
+
+extern "C" {
+ #include "jerror.h"
+ #include "jpeglib.h"
+}
+
+bool SkJpegCodec::IsJpeg(const void* buffer, size_t bytesRead) {
+ constexpr uint8_t jpegSig[] = { 0xFF, 0xD8, 0xFF };
+ return bytesRead >= 3 && !memcmp(buffer, jpegSig, sizeof(jpegSig));
+}
+
+const uint32_t kExifHeaderSize = 14;
+const uint32_t kExifMarker = JPEG_APP0 + 1;
+
+static bool is_orientation_marker(jpeg_marker_struct* marker, SkEncodedOrigin* orientation) {
+ if (kExifMarker != marker->marker || marker->data_length < kExifHeaderSize) {
+ return false;
+ }
+
+ constexpr uint8_t kExifSig[] { 'E', 'x', 'i', 'f', '\0' };
+ if (memcmp(marker->data, kExifSig, sizeof(kExifSig))) {
+ return false;
+ }
+
+ // Account for 'E', 'x', 'i', 'f', '\0', '<fill byte>'.
+ constexpr size_t kOffset = 6;
+ return SkParseEncodedOrigin(marker->data + kOffset, marker->data_length - kOffset,
+ orientation);
+}
+
+static SkEncodedOrigin get_exif_orientation(jpeg_decompress_struct* dinfo) {
+ SkEncodedOrigin orientation;
+ for (jpeg_marker_struct* marker = dinfo->marker_list; marker; marker = marker->next) {
+ if (is_orientation_marker(marker, &orientation)) {
+ return orientation;
+ }
+ }
+
+ return kDefault_SkEncodedOrigin;
+}
+
+static bool is_icc_marker(jpeg_marker_struct* marker) {
+ if (kICCMarker != marker->marker || marker->data_length < kICCMarkerHeaderSize) {
+ return false;
+ }
+
+ return !memcmp(marker->data, kICCSig, sizeof(kICCSig));
+}
+
+/*
+ * ICC profiles may be stored using a sequence of multiple markers. We obtain the ICC profile
+ * in two steps:
+ * (1) Discover all ICC profile markers and verify that they are numbered properly.
+ * (2) Copy the data from each marker into a contiguous ICC profile.
+ */
+static std::unique_ptr<SkEncodedInfo::ICCProfile> read_color_profile(jpeg_decompress_struct* dinfo)
+{
+ // Note that 256 will be enough storage space since each markerIndex is stored in 8-bits.
+ jpeg_marker_struct* markerSequence[256];
+ memset(markerSequence, 0, sizeof(markerSequence));
+ uint8_t numMarkers = 0;
+ size_t totalBytes = 0;
+
+ // Discover any ICC markers and verify that they are numbered properly.
+ for (jpeg_marker_struct* marker = dinfo->marker_list; marker; marker = marker->next) {
+ if (is_icc_marker(marker)) {
+ // Verify that numMarkers is valid and consistent.
+ if (0 == numMarkers) {
+ numMarkers = marker->data[13];
+ if (0 == numMarkers) {
+ SkCodecPrintf("ICC Profile Error: numMarkers must be greater than zero.\n");
+ return nullptr;
+ }
+ } else if (numMarkers != marker->data[13]) {
+ SkCodecPrintf("ICC Profile Error: numMarkers must be consistent.\n");
+ return nullptr;
+ }
+
+ // Verify that the markerIndex is valid and unique. Note that zero is not
+ // a valid index.
+ uint8_t markerIndex = marker->data[12];
+ if (markerIndex == 0 || markerIndex > numMarkers) {
+ SkCodecPrintf("ICC Profile Error: markerIndex is invalid.\n");
+ return nullptr;
+ }
+ if (markerSequence[markerIndex]) {
+ SkCodecPrintf("ICC Profile Error: Duplicate value of markerIndex.\n");
+ return nullptr;
+ }
+ markerSequence[markerIndex] = marker;
+ SkASSERT(marker->data_length >= kICCMarkerHeaderSize);
+ totalBytes += marker->data_length - kICCMarkerHeaderSize;
+ }
+ }
+
+ if (0 == totalBytes) {
+ // No non-empty ICC profile markers were found.
+ return nullptr;
+ }
+
+ // Combine the ICC marker data into a contiguous profile.
+ sk_sp<SkData> iccData = SkData::MakeUninitialized(totalBytes);
+ void* dst = iccData->writable_data();
+ for (uint32_t i = 1; i <= numMarkers; i++) {
+ jpeg_marker_struct* marker = markerSequence[i];
+ if (!marker) {
+ SkCodecPrintf("ICC Profile Error: Missing marker %d of %d.\n", i, numMarkers);
+ return nullptr;
+ }
+
+ void* src = SkTAddOffset<void>(marker->data, kICCMarkerHeaderSize);
+ size_t bytes = marker->data_length - kICCMarkerHeaderSize;
+ memcpy(dst, src, bytes);
+ dst = SkTAddOffset<void>(dst, bytes);
+ }
+
+ return SkEncodedInfo::ICCProfile::Make(std::move(iccData));
+}
+
+SkCodec::Result SkJpegCodec::ReadHeader(SkStream* stream, SkCodec** codecOut,
+ JpegDecoderMgr** decoderMgrOut,
+ std::unique_ptr<SkEncodedInfo::ICCProfile> defaultColorProfile) {
+
+ // Create a JpegDecoderMgr to own all of the decompress information
+ std::unique_ptr<JpegDecoderMgr> decoderMgr(new JpegDecoderMgr(stream));
+
+ // libjpeg errors will be caught and reported here
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(decoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return decoderMgr->returnFailure("ReadHeader", kInvalidInput);
+ }
+
+ // Initialize the decompress info and the source manager
+ decoderMgr->init();
+ auto* dinfo = decoderMgr->dinfo();
+
+ // Instruct jpeg library to save the markers that we care about. Since
+ // the orientation and color profile will not change, we can skip this
+ // step on rewinds.
+ if (codecOut) {
+ jpeg_save_markers(dinfo, kExifMarker, 0xFFFF);
+ jpeg_save_markers(dinfo, kICCMarker, 0xFFFF);
+ }
+
+ // Read the jpeg header
+ switch (jpeg_read_header(dinfo, true)) {
+ case JPEG_HEADER_OK:
+ break;
+ case JPEG_SUSPENDED:
+ return decoderMgr->returnFailure("ReadHeader", kIncompleteInput);
+ default:
+ return decoderMgr->returnFailure("ReadHeader", kInvalidInput);
+ }
+
+ if (codecOut) {
+ // Get the encoded color type
+ SkEncodedInfo::Color color;
+ if (!decoderMgr->getEncodedColor(&color)) {
+ return kInvalidInput;
+ }
+
+ SkEncodedOrigin orientation = get_exif_orientation(dinfo);
+ auto profile = read_color_profile(dinfo);
+ if (profile) {
+ auto type = profile->profile()->data_color_space;
+ switch (decoderMgr->dinfo()->jpeg_color_space) {
+ case JCS_CMYK:
+ case JCS_YCCK:
+ if (type != skcms_Signature_CMYK) {
+ profile = nullptr;
+ }
+ break;
+ case JCS_GRAYSCALE:
+ if (type != skcms_Signature_Gray &&
+ type != skcms_Signature_RGB)
+ {
+ profile = nullptr;
+ }
+ break;
+ default:
+ if (type != skcms_Signature_RGB) {
+ profile = nullptr;
+ }
+ break;
+ }
+ }
+ if (!profile) {
+ profile = std::move(defaultColorProfile);
+ }
+
+ SkEncodedInfo info = SkEncodedInfo::Make(dinfo->image_width, dinfo->image_height,
+ color, SkEncodedInfo::kOpaque_Alpha, 8,
+ std::move(profile));
+
+ SkJpegCodec* codec = new SkJpegCodec(std::move(info), std::unique_ptr<SkStream>(stream),
+ decoderMgr.release(), orientation);
+ *codecOut = codec;
+ } else {
+ SkASSERT(nullptr != decoderMgrOut);
+ *decoderMgrOut = decoderMgr.release();
+ }
+ return kSuccess;
+}
+
+std::unique_ptr<SkCodec> SkJpegCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ return SkJpegCodec::MakeFromStream(std::move(stream), result, nullptr);
+}
+
+std::unique_ptr<SkCodec> SkJpegCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result, std::unique_ptr<SkEncodedInfo::ICCProfile> defaultColorProfile) {
+ SkCodec* codec = nullptr;
+ *result = ReadHeader(stream.get(), &codec, nullptr, std::move(defaultColorProfile));
+ if (kSuccess == *result) {
+ // Codec has taken ownership of the stream, we do not need to delete it
+ SkASSERT(codec);
+ stream.release();
+ return std::unique_ptr<SkCodec>(codec);
+ }
+ return nullptr;
+}
+
+SkJpegCodec::SkJpegCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ JpegDecoderMgr* decoderMgr, SkEncodedOrigin origin)
+ : INHERITED(std::move(info), skcms_PixelFormat_RGBA_8888, std::move(stream), origin)
+ , fDecoderMgr(decoderMgr)
+ , fReadyState(decoderMgr->dinfo()->global_state)
+ , fSwizzleSrcRow(nullptr)
+ , fColorXformSrcRow(nullptr)
+ , fSwizzlerSubset(SkIRect::MakeEmpty())
+{}
+
+/*
+ * Return the row bytes of a particular image type and width
+ */
+static size_t get_row_bytes(const j_decompress_ptr dinfo) {
+ const size_t colorBytes = (dinfo->out_color_space == JCS_RGB565) ? 2 :
+ dinfo->out_color_components;
+ return dinfo->output_width * colorBytes;
+
+}
+
+/*
+ * Calculate output dimensions based on the provided factors.
+ *
+ * Not to be used on the actual jpeg_decompress_struct used for decoding, since it will
+ * incorrectly modify num_components.
+ */
+void calc_output_dimensions(jpeg_decompress_struct* dinfo, unsigned int num, unsigned int denom) {
+ dinfo->num_components = 0;
+ dinfo->scale_num = num;
+ dinfo->scale_denom = denom;
+ jpeg_calc_output_dimensions(dinfo);
+}
+
+/*
+ * Return a valid set of output dimensions for this decoder, given an input scale
+ */
+SkISize SkJpegCodec::onGetScaledDimensions(float desiredScale) const {
+ // libjpeg-turbo supports scaling by 1/8, 1/4, 3/8, 1/2, 5/8, 3/4, 7/8, and 1/1, so we will
+ // support these as well
+ unsigned int num;
+ unsigned int denom = 8;
+ if (desiredScale >= 0.9375) {
+ num = 8;
+ } else if (desiredScale >= 0.8125) {
+ num = 7;
+ } else if (desiredScale >= 0.6875f) {
+ num = 6;
+ } else if (desiredScale >= 0.5625f) {
+ num = 5;
+ } else if (desiredScale >= 0.4375f) {
+ num = 4;
+ } else if (desiredScale >= 0.3125f) {
+ num = 3;
+ } else if (desiredScale >= 0.1875f) {
+ num = 2;
+ } else {
+ num = 1;
+ }
+
+ // Set up a fake decompress struct in order to use libjpeg to calculate output dimensions
+ jpeg_decompress_struct dinfo;
+ sk_bzero(&dinfo, sizeof(dinfo));
+ dinfo.image_width = this->dimensions().width();
+ dinfo.image_height = this->dimensions().height();
+ dinfo.global_state = fReadyState;
+ calc_output_dimensions(&dinfo, num, denom);
+
+ // Return the calculated output dimensions for the given scale
+ return SkISize::Make(dinfo.output_width, dinfo.output_height);
+}
+
+bool SkJpegCodec::onRewind() {
+ JpegDecoderMgr* decoderMgr = nullptr;
+ if (kSuccess != ReadHeader(this->stream(), nullptr, &decoderMgr, nullptr)) {
+ return fDecoderMgr->returnFalse("onRewind");
+ }
+ SkASSERT(nullptr != decoderMgr);
+ fDecoderMgr.reset(decoderMgr);
+
+ fSwizzler.reset(nullptr);
+ fSwizzleSrcRow = nullptr;
+ fColorXformSrcRow = nullptr;
+ fStorage.reset();
+
+ return true;
+}
+
+bool SkJpegCodec::conversionSupported(const SkImageInfo& dstInfo, bool srcIsOpaque,
+ bool needsColorXform) {
+ SkASSERT(srcIsOpaque);
+
+ if (kUnknown_SkAlphaType == dstInfo.alphaType()) {
+ return false;
+ }
+
+ if (kOpaque_SkAlphaType != dstInfo.alphaType()) {
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ }
+
+ J_COLOR_SPACE encodedColorType = fDecoderMgr->dinfo()->jpeg_color_space;
+
+ // Check for valid color types and set the output color space
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ break;
+ case kBGRA_8888_SkColorType:
+ if (needsColorXform) {
+ // Always using RGBA as the input format for color xforms makes the
+ // implementation a little simpler.
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ } else {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_BGRA;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ if (needsColorXform) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ } else {
+ fDecoderMgr->dinfo()->dither_mode = JDITHER_NONE;
+ fDecoderMgr->dinfo()->out_color_space = JCS_RGB565;
+ }
+ break;
+ case kGray_8_SkColorType:
+ if (JCS_GRAYSCALE != encodedColorType) {
+ return false;
+ }
+
+ if (needsColorXform) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ } else {
+ fDecoderMgr->dinfo()->out_color_space = JCS_GRAYSCALE;
+ }
+ break;
+ case kRGBA_F16_SkColorType:
+ SkASSERT(needsColorXform);
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ break;
+ default:
+ return false;
+ }
+
+ // Check if we will decode to CMYK. libjpeg-turbo does not convert CMYK to RGBA, so
+ // we must do it ourselves.
+ if (JCS_CMYK == encodedColorType || JCS_YCCK == encodedColorType) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_CMYK;
+ }
+
+ return true;
+}
+
+/*
+ * Checks if we can natively scale to the requested dimensions and natively scales the
+ * dimensions if possible
+ */
+bool SkJpegCodec::onDimensionsSupported(const SkISize& size) {
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return fDecoderMgr->returnFalse("onDimensionsSupported");
+ }
+
+ const unsigned int dstWidth = size.width();
+ const unsigned int dstHeight = size.height();
+
+ // Set up a fake decompress struct in order to use libjpeg to calculate output dimensions
+ // FIXME: Why is this necessary?
+ jpeg_decompress_struct dinfo;
+ sk_bzero(&dinfo, sizeof(dinfo));
+ dinfo.image_width = this->dimensions().width();
+ dinfo.image_height = this->dimensions().height();
+ dinfo.global_state = fReadyState;
+
+ // libjpeg-turbo can scale to 1/8, 1/4, 3/8, 1/2, 5/8, 3/4, 7/8, and 1/1
+ unsigned int num = 8;
+ const unsigned int denom = 8;
+ calc_output_dimensions(&dinfo, num, denom);
+ while (dinfo.output_width != dstWidth || dinfo.output_height != dstHeight) {
+
+ // Return a failure if we have tried all of the possible scales
+ if (1 == num || dstWidth > dinfo.output_width || dstHeight > dinfo.output_height) {
+ return false;
+ }
+
+ // Try the next scale
+ num -= 1;
+ calc_output_dimensions(&dinfo, num, denom);
+ }
+
+ fDecoderMgr->dinfo()->scale_num = num;
+ fDecoderMgr->dinfo()->scale_denom = denom;
+ return true;
+}
+
+int SkJpegCodec::readRows(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, int count,
+ const Options& opts) {
+ // Set the jump location for libjpeg-turbo errors
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return 0;
+ }
+
+ // When fSwizzleSrcRow is non-null, it means that we need to swizzle. In this case,
+ // we will always decode into fSwizzlerSrcRow before swizzling into the next buffer.
+ // We can never swizzle "in place" because the swizzler may perform sampling and/or
+ // subsetting.
+ // When fColorXformSrcRow is non-null, it means that we need to color xform and that
+ // we cannot color xform "in place" (many times we can, but not when the src and dst
+ // are different sizes).
+ // In this case, we will color xform from fColorXformSrcRow into the dst.
+ JSAMPLE* decodeDst = (JSAMPLE*) dst;
+ uint32_t* swizzleDst = (uint32_t*) dst;
+ size_t decodeDstRowBytes = rowBytes;
+ size_t swizzleDstRowBytes = rowBytes;
+ int dstWidth = opts.fSubset ? opts.fSubset->width() : dstInfo.width();
+ if (fSwizzleSrcRow && fColorXformSrcRow) {
+ decodeDst = (JSAMPLE*) fSwizzleSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ } else if (fColorXformSrcRow) {
+ decodeDst = (JSAMPLE*) fColorXformSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ } else if (fSwizzleSrcRow) {
+ decodeDst = (JSAMPLE*) fSwizzleSrcRow;
+ decodeDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ }
+
+ for (int y = 0; y < count; y++) {
+ uint32_t lines = jpeg_read_scanlines(fDecoderMgr->dinfo(), &decodeDst, 1);
+ if (0 == lines) {
+ return y;
+ }
+
+ if (fSwizzler) {
+ fSwizzler->swizzle(swizzleDst, decodeDst);
+ }
+
+ if (this->colorXform()) {
+ this->applyColorXform(dst, swizzleDst, dstWidth);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ }
+
+ decodeDst = SkTAddOffset<JSAMPLE>(decodeDst, decodeDstRowBytes);
+ swizzleDst = SkTAddOffset<uint32_t>(swizzleDst, swizzleDstRowBytes);
+ }
+
+ return count;
+}
+
+/*
+ * This is a bit tricky. We only need the swizzler to do format conversion if the jpeg is
+ * encoded as CMYK.
+ * And even then we still may not need it. If the jpeg has a CMYK color profile and a color
+ * xform, the color xform will handle the CMYK->RGB conversion.
+ */
+static inline bool needs_swizzler_to_convert_from_cmyk(J_COLOR_SPACE jpegColorType,
+ const skcms_ICCProfile* srcProfile,
+ bool hasColorSpaceXform) {
+ if (JCS_CMYK != jpegColorType) {
+ return false;
+ }
+
+ bool hasCMYKColorSpace = srcProfile && srcProfile->data_color_space == skcms_Signature_CMYK;
+ return !hasCMYKColorSpace || !hasColorSpaceXform;
+}
+
+/*
+ * Performs the jpeg decode
+ */
+SkCodec::Result SkJpegCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& options,
+ int* rowsDecoded) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ // Get a pointer to the decompress info since we will use it quite frequently
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+
+ // Set the jump location for libjpeg errors
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return fDecoderMgr->returnFailure("setjmp", kInvalidInput);
+ }
+
+ if (!jpeg_start_decompress(dinfo)) {
+ return fDecoderMgr->returnFailure("startDecompress", kInvalidInput);
+ }
+
+ // The recommended output buffer height should always be 1 in high quality modes.
+ // If it's not, we want to know because it means our strategy is not optimal.
+ SkASSERT(1 == dinfo->rec_outbuf_height);
+
+ if (needs_swizzler_to_convert_from_cmyk(dinfo->out_color_space,
+ this->getEncodedInfo().profile(), this->colorXform())) {
+ this->initializeSwizzler(dstInfo, options, true);
+ }
+
+ this->allocateStorage(dstInfo);
+
+ int rows = this->readRows(dstInfo, dst, dstRowBytes, dstInfo.height(), options);
+ if (rows < dstInfo.height()) {
+ *rowsDecoded = rows;
+ return fDecoderMgr->returnFailure("Incomplete image data", kIncompleteInput);
+ }
+
+ return kSuccess;
+}
+
+void SkJpegCodec::allocateStorage(const SkImageInfo& dstInfo) {
+ int dstWidth = dstInfo.width();
+
+ size_t swizzleBytes = 0;
+ if (fSwizzler) {
+ swizzleBytes = get_row_bytes(fDecoderMgr->dinfo());
+ dstWidth = fSwizzler->swizzleWidth();
+ SkASSERT(!this->colorXform() || SkIsAlign4(swizzleBytes));
+ }
+
+ size_t xformBytes = 0;
+
+ if (this->colorXform() && sizeof(uint32_t) != dstInfo.bytesPerPixel()) {
+ xformBytes = dstWidth * sizeof(uint32_t);
+ }
+
+ size_t totalBytes = swizzleBytes + xformBytes;
+ if (totalBytes > 0) {
+ fStorage.reset(totalBytes);
+ fSwizzleSrcRow = (swizzleBytes > 0) ? fStorage.get() : nullptr;
+ fColorXformSrcRow = (xformBytes > 0) ?
+ SkTAddOffset<uint32_t>(fStorage.get(), swizzleBytes) : nullptr;
+ }
+}
+
+void SkJpegCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& options,
+ bool needsCMYKToRGB) {
+ Options swizzlerOptions = options;
+ if (options.fSubset) {
+ // Use fSwizzlerSubset if this is a subset decode. This is necessary in the case
+ // where libjpeg-turbo provides a subset and then we need to subset it further.
+ // Also, verify that fSwizzlerSubset is initialized and valid.
+ SkASSERT(!fSwizzlerSubset.isEmpty() && fSwizzlerSubset.x() <= options.fSubset->x() &&
+ fSwizzlerSubset.width() == options.fSubset->width());
+ swizzlerOptions.fSubset = &fSwizzlerSubset;
+ }
+
+ SkImageInfo swizzlerDstInfo = dstInfo;
+ if (this->colorXform()) {
+ // The color xform will be expecting RGBA 8888 input.
+ swizzlerDstInfo = swizzlerDstInfo.makeColorType(kRGBA_8888_SkColorType);
+ }
+
+ if (needsCMYKToRGB) {
+ // The swizzler is used to convert to from CMYK.
+ // The swizzler does not use the width or height on SkEncodedInfo.
+ auto swizzlerInfo = SkEncodedInfo::Make(0, 0, SkEncodedInfo::kInvertedCMYK_Color,
+ SkEncodedInfo::kOpaque_Alpha, 8);
+ fSwizzler = SkSwizzler::Make(swizzlerInfo, nullptr, swizzlerDstInfo, swizzlerOptions);
+ } else {
+ int srcBPP = 0;
+ switch (fDecoderMgr->dinfo()->out_color_space) {
+ case JCS_EXT_RGBA:
+ case JCS_EXT_BGRA:
+ case JCS_CMYK:
+ srcBPP = 4;
+ break;
+ case JCS_RGB565:
+ srcBPP = 2;
+ break;
+ case JCS_GRAYSCALE:
+ srcBPP = 1;
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ fSwizzler = SkSwizzler::MakeSimple(srcBPP, swizzlerDstInfo, swizzlerOptions);
+ }
+ SkASSERT(fSwizzler);
+}
+
+SkSampler* SkJpegCodec::getSampler(bool createIfNecessary) {
+ if (!createIfNecessary || fSwizzler) {
+ SkASSERT(!fSwizzler || (fSwizzleSrcRow && fStorage.get() == fSwizzleSrcRow));
+ return fSwizzler.get();
+ }
+
+ bool needsCMYKToRGB = needs_swizzler_to_convert_from_cmyk(
+ fDecoderMgr->dinfo()->out_color_space, this->getEncodedInfo().profile(),
+ this->colorXform());
+ this->initializeSwizzler(this->dstInfo(), this->options(), needsCMYKToRGB);
+ this->allocateStorage(this->dstInfo());
+ return fSwizzler.get();
+}
+
+SkCodec::Result SkJpegCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options) {
+ // Set the jump location for libjpeg errors
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ SkCodecPrintf("setjmp: Error from libjpeg\n");
+ return kInvalidInput;
+ }
+
+ if (!jpeg_start_decompress(fDecoderMgr->dinfo())) {
+ SkCodecPrintf("start decompress failed\n");
+ return kInvalidInput;
+ }
+
+ bool needsCMYKToRGB = needs_swizzler_to_convert_from_cmyk(
+ fDecoderMgr->dinfo()->out_color_space, this->getEncodedInfo().profile(),
+ this->colorXform());
+ if (options.fSubset) {
+ uint32_t startX = options.fSubset->x();
+ uint32_t width = options.fSubset->width();
+
+ // libjpeg-turbo may need to align startX to a multiple of the IDCT
+ // block size. If this is the case, it will decrease the value of
+ // startX to the appropriate alignment and also increase the value
+ // of width so that the right edge of the requested subset remains
+ // the same.
+ jpeg_crop_scanline(fDecoderMgr->dinfo(), &startX, &width);
+
+ SkASSERT(startX <= (uint32_t) options.fSubset->x());
+ SkASSERT(width >= (uint32_t) options.fSubset->width());
+ SkASSERT(startX + width >= (uint32_t) options.fSubset->right());
+
+ // Instruct the swizzler (if it is necessary) to further subset the
+ // output provided by libjpeg-turbo.
+ //
+ // We set this here (rather than in the if statement below), so that
+ // if (1) we don't need a swizzler for the subset, and (2) we need a
+ // swizzler for CMYK, the swizzler will still use the proper subset
+ // dimensions.
+ //
+ // Note that the swizzler will ignore the y and height parameters of
+ // the subset. Since the scanline decoder (and the swizzler) handle
+ // one row at a time, only the subsetting in the x-dimension matters.
+ fSwizzlerSubset.setXYWH(options.fSubset->x() - startX, 0,
+ options.fSubset->width(), options.fSubset->height());
+
+ // We will need a swizzler if libjpeg-turbo cannot provide the exact
+ // subset that we request.
+ if (startX != (uint32_t) options.fSubset->x() ||
+ width != (uint32_t) options.fSubset->width()) {
+ this->initializeSwizzler(dstInfo, options, needsCMYKToRGB);
+ }
+ }
+
+ // Make sure we have a swizzler if we are converting from CMYK.
+ if (!fSwizzler && needsCMYKToRGB) {
+ this->initializeSwizzler(dstInfo, options, true);
+ }
+
+ this->allocateStorage(dstInfo);
+
+ return kSuccess;
+}
+
+int SkJpegCodec::onGetScanlines(void* dst, int count, size_t dstRowBytes) {
+ int rows = this->readRows(this->dstInfo(), dst, dstRowBytes, count, this->options());
+ if (rows < count) {
+ // This allows us to skip calling jpeg_finish_decompress().
+ fDecoderMgr->dinfo()->output_scanline = this->dstInfo().height();
+ }
+
+ return rows;
+}
+
+bool SkJpegCodec::onSkipScanlines(int count) {
+ // Set the jump location for libjpeg errors
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return fDecoderMgr->returnFalse("onSkipScanlines");
+ }
+
+ return (uint32_t) count == jpeg_skip_scanlines(fDecoderMgr->dinfo(), count);
+}
+
+static bool is_yuv_supported(jpeg_decompress_struct* dinfo) {
+ // Scaling is not supported in raw data mode.
+ SkASSERT(dinfo->scale_num == dinfo->scale_denom);
+
+ // I can't imagine that this would ever change, but we do depend on it.
+ static_assert(8 == DCTSIZE, "DCTSIZE (defined in jpeg library) should always be 8.");
+
+ if (JCS_YCbCr != dinfo->jpeg_color_space) {
+ return false;
+ }
+
+ SkASSERT(3 == dinfo->num_components);
+ SkASSERT(dinfo->comp_info);
+
+ // It is possible to perform a YUV decode for any combination of
+ // horizontal and vertical sampling that is supported by
+ // libjpeg/libjpeg-turbo. However, we will start by supporting only the
+ // common cases (where U and V have samp_factors of one).
+ //
+ // The definition of samp_factor is kind of the opposite of what SkCodec
+ // thinks of as a sampling factor. samp_factor is essentially a
+ // multiplier, and the larger the samp_factor is, the more samples that
+ // there will be. Ex:
+ // U_plane_width = image_width * (U_h_samp_factor / max_h_samp_factor)
+ //
+ // Supporting cases where the samp_factors for U or V were larger than
+ // that of Y would be an extremely difficult change, given that clients
+ // allocate memory as if the size of the Y plane is always the size of the
+ // image. However, this case is very, very rare.
+ if ((1 != dinfo->comp_info[1].h_samp_factor) ||
+ (1 != dinfo->comp_info[1].v_samp_factor) ||
+ (1 != dinfo->comp_info[2].h_samp_factor) ||
+ (1 != dinfo->comp_info[2].v_samp_factor))
+ {
+ return false;
+ }
+
+ // Support all common cases of Y samp_factors.
+ // TODO (msarett): As mentioned above, it would be possible to support
+ // more combinations of samp_factors. The issues are:
+ // (1) Are there actually any images that are not covered
+ // by these cases?
+ // (2) How much complexity would be added to the
+ // implementation in order to support these rare
+ // cases?
+ int hSampY = dinfo->comp_info[0].h_samp_factor;
+ int vSampY = dinfo->comp_info[0].v_samp_factor;
+ return (1 == hSampY && 1 == vSampY) ||
+ (2 == hSampY && 1 == vSampY) ||
+ (2 == hSampY && 2 == vSampY) ||
+ (1 == hSampY && 2 == vSampY) ||
+ (4 == hSampY && 1 == vSampY) ||
+ (4 == hSampY && 2 == vSampY);
+}
+
+bool SkJpegCodec::onQueryYUV8(SkYUVASizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+ if (!is_yuv_supported(dinfo)) {
+ return false;
+ }
+
+ jpeg_component_info * comp_info = dinfo->comp_info;
+ for (int i = 0; i < 3; ++i) {
+ sizeInfo->fSizes[i].set(comp_info[i].downsampled_width, comp_info[i].downsampled_height);
+ sizeInfo->fWidthBytes[i] = comp_info[i].width_in_blocks * DCTSIZE;
+ }
+
+ // JPEG never has an alpha channel
+ sizeInfo->fSizes[3].fHeight = sizeInfo->fSizes[3].fWidth = sizeInfo->fWidthBytes[3] = 0;
+
+ sizeInfo->fOrigin = this->getOrigin();
+
+ if (colorSpace) {
+ *colorSpace = kJPEG_SkYUVColorSpace;
+ }
+
+ return true;
+}
+
+SkCodec::Result SkJpegCodec::onGetYUV8Planes(const SkYUVASizeInfo& sizeInfo,
+ void* planes[SkYUVASizeInfo::kMaxCount]) {
+ SkYUVASizeInfo defaultInfo;
+
+ // This will check is_yuv_supported(), so we don't need to here.
+ bool supportsYUV = this->onQueryYUV8(&defaultInfo, nullptr);
+ if (!supportsYUV ||
+ sizeInfo.fSizes[0] != defaultInfo.fSizes[0] ||
+ sizeInfo.fSizes[1] != defaultInfo.fSizes[1] ||
+ sizeInfo.fSizes[2] != defaultInfo.fSizes[2] ||
+ sizeInfo.fWidthBytes[0] < defaultInfo.fWidthBytes[0] ||
+ sizeInfo.fWidthBytes[1] < defaultInfo.fWidthBytes[1] ||
+ sizeInfo.fWidthBytes[2] < defaultInfo.fWidthBytes[2]) {
+ return fDecoderMgr->returnFailure("onGetYUV8Planes", kInvalidInput);
+ }
+
+ // Set the jump location for libjpeg errors
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fDecoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return fDecoderMgr->returnFailure("setjmp", kInvalidInput);
+ }
+
+ // Get a pointer to the decompress info since we will use it quite frequently
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+
+ dinfo->raw_data_out = TRUE;
+ if (!jpeg_start_decompress(dinfo)) {
+ return fDecoderMgr->returnFailure("startDecompress", kInvalidInput);
+ }
+
+ // A previous implementation claims that the return value of is_yuv_supported()
+ // may change after calling jpeg_start_decompress(). It looks to me like this
+ // was caused by a bug in the old code, but we'll be safe and check here.
+ SkASSERT(is_yuv_supported(dinfo));
+
+ // Currently, we require that the Y plane dimensions match the image dimensions
+ // and that the U and V planes are the same dimensions.
+ SkASSERT(sizeInfo.fSizes[1] == sizeInfo.fSizes[2]);
+ SkASSERT((uint32_t) sizeInfo.fSizes[0].width() == dinfo->output_width &&
+ (uint32_t) sizeInfo.fSizes[0].height() == dinfo->output_height);
+
+ // Build a JSAMPIMAGE to handle output from libjpeg-turbo. A JSAMPIMAGE has
+ // a 2-D array of pixels for each of the components (Y, U, V) in the image.
+ // Cheat Sheet:
+ // JSAMPIMAGE == JSAMPLEARRAY* == JSAMPROW** == JSAMPLE***
+ JSAMPARRAY yuv[3];
+
+ // Set aside enough space for pointers to rows of Y, U, and V.
+ JSAMPROW rowptrs[2 * DCTSIZE + DCTSIZE + DCTSIZE];
+ yuv[0] = &rowptrs[0]; // Y rows (DCTSIZE or 2 * DCTSIZE)
+ yuv[1] = &rowptrs[2 * DCTSIZE]; // U rows (DCTSIZE)
+ yuv[2] = &rowptrs[3 * DCTSIZE]; // V rows (DCTSIZE)
+
+ // Initialize rowptrs.
+ int numYRowsPerBlock = DCTSIZE * dinfo->comp_info[0].v_samp_factor;
+ for (int i = 0; i < numYRowsPerBlock; i++) {
+ rowptrs[i] = SkTAddOffset<JSAMPLE>(planes[0], i * sizeInfo.fWidthBytes[0]);
+ }
+ for (int i = 0; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] =
+ SkTAddOffset<JSAMPLE>(planes[1], i * sizeInfo.fWidthBytes[1]);
+ rowptrs[i + 3 * DCTSIZE] =
+ SkTAddOffset<JSAMPLE>(planes[2], i * sizeInfo.fWidthBytes[2]);
+ }
+
+ // After each loop iteration, we will increment pointers to Y, U, and V.
+ size_t blockIncrementY = numYRowsPerBlock * sizeInfo.fWidthBytes[0];
+ size_t blockIncrementU = DCTSIZE * sizeInfo.fWidthBytes[1];
+ size_t blockIncrementV = DCTSIZE * sizeInfo.fWidthBytes[2];
+
+ uint32_t numRowsPerBlock = numYRowsPerBlock;
+
+ // We intentionally round down here, as this first loop will only handle
+ // full block rows. As a special case at the end, we will handle any
+ // remaining rows that do not make up a full block.
+ const int numIters = dinfo->output_height / numRowsPerBlock;
+ for (int i = 0; i < numIters; i++) {
+ JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock);
+ if (linesRead < numRowsPerBlock) {
+ // FIXME: Handle incomplete YUV decodes without signalling an error.
+ return kInvalidInput;
+ }
+
+ // Update rowptrs.
+ for (int i = 0; i < numYRowsPerBlock; i++) {
+ rowptrs[i] += blockIncrementY;
+ }
+ for (int i = 0; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] += blockIncrementU;
+ rowptrs[i + 3 * DCTSIZE] += blockIncrementV;
+ }
+ }
+
+ uint32_t remainingRows = dinfo->output_height - dinfo->output_scanline;
+ SkASSERT(remainingRows == dinfo->output_height % numRowsPerBlock);
+ SkASSERT(dinfo->output_scanline == numIters * numRowsPerBlock);
+ if (remainingRows > 0) {
+ // libjpeg-turbo needs memory to be padded by the block sizes. We will fulfill
+ // this requirement using a dummy row buffer.
+ // FIXME: Should SkCodec have an extra memory buffer that can be shared among
+ // all of the implementations that use temporary/garbage memory?
+ SkAutoTMalloc<JSAMPLE> dummyRow(sizeInfo.fWidthBytes[0]);
+ for (int i = remainingRows; i < numYRowsPerBlock; i++) {
+ rowptrs[i] = dummyRow.get();
+ }
+ int remainingUVRows = dinfo->comp_info[1].downsampled_height - DCTSIZE * numIters;
+ for (int i = remainingUVRows; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] = dummyRow.get();
+ rowptrs[i + 3 * DCTSIZE] = dummyRow.get();
+ }
+
+ JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock);
+ if (linesRead < remainingRows) {
+ // FIXME: Handle incomplete YUV decodes without signalling an error.
+ return kInvalidInput;
+ }
+ }
+
+ return kSuccess;
+}
+
+// This function is declared in SkJpegInfo.h, used by SkPDF.
+bool SkGetJpegInfo(const void* data, size_t len,
+ SkISize* size,
+ SkEncodedInfo::Color* colorType,
+ SkEncodedOrigin* orientation) {
+ if (!SkJpegCodec::IsJpeg(data, len)) {
+ return false;
+ }
+
+ SkMemoryStream stream(data, len);
+ JpegDecoderMgr decoderMgr(&stream);
+ // libjpeg errors will be caught and reported here
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(decoderMgr.errorMgr());
+ if (setjmp(jmp)) {
+ return false;
+ }
+ decoderMgr.init();
+ jpeg_decompress_struct* dinfo = decoderMgr.dinfo();
+ jpeg_save_markers(dinfo, kExifMarker, 0xFFFF);
+ jpeg_save_markers(dinfo, kICCMarker, 0xFFFF);
+ if (JPEG_HEADER_OK != jpeg_read_header(dinfo, true)) {
+ return false;
+ }
+ SkEncodedInfo::Color encodedColorType;
+ if (!decoderMgr.getEncodedColor(&encodedColorType)) {
+ return false; // Unable to interpret the color channels as colors.
+ }
+ if (colorType) {
+ *colorType = encodedColorType;
+ }
+ if (orientation) {
+ *orientation = get_exif_orientation(dinfo);
+ }
+ if (size) {
+ *size = {SkToS32(dinfo->image_width), SkToS32(dinfo->image_height)};
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegCodec.h b/gfx/skia/skia/src/codec/SkJpegCodec.h
new file mode 100644
index 0000000000..986d283b3a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegCodec.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegCodec_DEFINED
+#define SkJpegCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkSwizzler.h"
+
+class JpegDecoderMgr;
+
+/*
+ *
+ * This class implements the decoding for jpeg images
+ *
+ */
+class SkJpegCodec : public SkCodec {
+public:
+ static bool IsJpeg(const void*, size_t);
+
+ /*
+ * Assumes IsJpeg was called and returned true
+ * Takes ownership of the stream
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+protected:
+
+ /*
+ * Recommend a set of destination dimensions given a requested scale
+ */
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ /*
+ * Initiates the jpeg decode
+ */
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ int*) override;
+
+ bool onQueryYUV8(SkYUVASizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override;
+
+ Result onGetYUV8Planes(const SkYUVASizeInfo& sizeInfo,
+ void* planes[SkYUVASizeInfo::kMaxCount]) override;
+
+ SkEncodedImageFormat onGetEncodedFormat() const override {
+ return SkEncodedImageFormat::kJPEG;
+ }
+
+ bool onRewind() override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ bool conversionSupported(const SkImageInfo&, bool, bool) override;
+
+private:
+ /*
+ * Allows SkRawCodec to communicate the color profile from the exif data.
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*,
+ std::unique_ptr<SkEncodedInfo::ICCProfile> defaultColorProfile);
+
+ /*
+ * Read enough of the stream to initialize the SkJpegCodec.
+ * Returns a bool representing success or failure.
+ *
+ * @param codecOut
+ * If this returns true, and codecOut was not nullptr,
+ * codecOut will be set to a new SkJpegCodec.
+ *
+ * @param decoderMgrOut
+ * If this returns true, and codecOut was nullptr,
+ * decoderMgrOut must be non-nullptr and decoderMgrOut will be set to a new
+ * JpegDecoderMgr pointer.
+ *
+ * @param stream
+ * Deleted on failure.
+ * codecOut will take ownership of it in the case where we created a codec.
+ * Ownership is unchanged when we set decoderMgrOut.
+ *
+ * @param defaultColorProfile
+ * If the jpeg does not have an embedded color profile, the image data should
+ * be tagged with this color profile.
+ */
+ static Result ReadHeader(SkStream* stream, SkCodec** codecOut,
+ JpegDecoderMgr** decoderMgrOut,
+ std::unique_ptr<SkEncodedInfo::ICCProfile> defaultColorProfile);
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the encoded image data
+ * @param decoderMgr holds decompress struct, src manager, and error manager
+ * takes ownership
+ */
+ SkJpegCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ JpegDecoderMgr* decoderMgr, SkEncodedOrigin origin);
+
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options& options,
+ bool needsCMYKToRGB);
+ void allocateStorage(const SkImageInfo& dstInfo);
+ int readRows(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, int count, const Options&);
+
+ /*
+ * Scanline decoding.
+ */
+ SkSampler* getSampler(bool createIfNecessary) override;
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options) override;
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+ bool onSkipScanlines(int count) override;
+
+ std::unique_ptr<JpegDecoderMgr> fDecoderMgr;
+
+ // We will save the state of the decompress struct after reading the header.
+ // This allows us to safely call onGetScaledDimensions() at any time.
+ const int fReadyState;
+
+
+ SkAutoTMalloc<uint8_t> fStorage;
+ uint8_t* fSwizzleSrcRow;
+ uint32_t* fColorXformSrcRow;
+
+ // libjpeg-turbo provides some subsetting. In the case that libjpeg-turbo
+ // cannot take the exact the subset that we need, we will use the swizzler
+ // to further subset the output from libjpeg-turbo.
+ SkIRect fSwizzlerSubset;
+
+ std::unique_ptr<SkSwizzler> fSwizzler;
+
+ friend class SkRawCodec;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp
new file mode 100644
index 0000000000..0ada6d8592
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkJpegDecoderMgr.h"
+
+#include "src/codec/SkJpegUtility.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #include "include/android/SkAndroidFrameworkUtils.h"
+#endif
+
+/*
+ * Print information, warning, and error messages
+ */
+static void print_message(const j_common_ptr info, const char caller[]) {
+ char buffer[JMSG_LENGTH_MAX];
+ info->err->format_message(info, buffer);
+ SkCodecPrintf("libjpeg error %d <%s> from %s\n", info->err->msg_code, buffer, caller);
+}
+
+/*
+ * Reporting function for error and warning messages.
+ */
+static void output_message(j_common_ptr info) {
+ print_message(info, "output_message");
+}
+
+static void progress_monitor(j_common_ptr info) {
+ int scan = ((j_decompress_ptr)info)->input_scan_number;
+ // Progressive images with a very large number of scans can cause the
+ // decoder to hang. Here we use the progress monitor to abort on
+ // a very large number of scans. 100 is arbitrary, but much larger
+ // than the number of scans we might expect in a normal image.
+ if (scan >= 100) {
+ skjpeg_err_exit(info);
+ }
+}
+
+bool JpegDecoderMgr::returnFalse(const char caller[]) {
+ print_message((j_common_ptr) &fDInfo, caller);
+ return false;
+}
+
+SkCodec::Result JpegDecoderMgr::returnFailure(const char caller[], SkCodec::Result result) {
+ print_message((j_common_ptr) &fDInfo, caller);
+ return result;
+}
+
+bool JpegDecoderMgr::getEncodedColor(SkEncodedInfo::Color* outColor) {
+ switch (fDInfo.jpeg_color_space) {
+ case JCS_GRAYSCALE:
+ *outColor = SkEncodedInfo::kGray_Color;
+ return true;
+ case JCS_YCbCr:
+ *outColor = SkEncodedInfo::kYUV_Color;
+ return true;
+ case JCS_RGB:
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkAndroidFrameworkUtils::SafetyNetLog("118372692");
+#endif
+ *outColor = SkEncodedInfo::kRGB_Color;
+ return true;
+ case JCS_YCCK:
+ *outColor = SkEncodedInfo::kYCCK_Color;
+ return true;
+ case JCS_CMYK:
+ *outColor = SkEncodedInfo::kInvertedCMYK_Color;
+ return true;
+ default:
+ return false;
+ }
+}
+
+JpegDecoderMgr::JpegDecoderMgr(SkStream* stream)
+ : fSrcMgr(stream)
+ , fInit(false)
+{
+ // Error manager must be set before any calls to libjeg in order to handle failures
+ fDInfo.err = jpeg_std_error(&fErrorMgr);
+ fErrorMgr.error_exit = skjpeg_err_exit;
+}
+
+void JpegDecoderMgr::init() {
+ jpeg_create_decompress(&fDInfo);
+ fInit = true;
+ fDInfo.src = &fSrcMgr;
+ fDInfo.err->output_message = &output_message;
+ fDInfo.progress = &fProgressMgr;
+ fProgressMgr.progress_monitor = &progress_monitor;
+}
+
+JpegDecoderMgr::~JpegDecoderMgr() {
+ if (fInit) {
+ jpeg_destroy_decompress(&fDInfo);
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h
new file mode 100644
index 0000000000..f992bf5411
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegDecoderMgr_DEFINED
+#define SkJpegDecoderMgr_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "src/codec/SkCodecPriv.h"
+#include <stdio.h>
+#include "src/codec/SkJpegUtility.h"
+
+extern "C" {
+ #include "jpeglib.h"
+}
+
+class JpegDecoderMgr : SkNoncopyable {
+public:
+
+ /*
+ * Print a useful error message and return false
+ */
+ bool returnFalse(const char caller[]);
+
+ /*
+ * Print a useful error message and return a decode failure
+ */
+ SkCodec::Result returnFailure(const char caller[], SkCodec::Result result);
+
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream
+ */
+ JpegDecoderMgr(SkStream* stream);
+
+ /*
+ * Initialize decompress struct
+ * Initialize the source manager
+ */
+ void init();
+
+ /*
+ * Returns true if it successfully sets outColor to the encoded color,
+ * and false otherwise.
+ */
+ bool getEncodedColor(SkEncodedInfo::Color* outColor);
+
+ /*
+ * Free memory used by the decode manager
+ */
+ ~JpegDecoderMgr();
+
+ /*
+ * Get the skjpeg_error_mgr in order to set an error return jmp_buf
+ */
+ skjpeg_error_mgr* errorMgr() { return &fErrorMgr; }
+
+ /*
+ * Get function for the decompress info struct
+ */
+ jpeg_decompress_struct* dinfo() { return &fDInfo; }
+
+private:
+
+ jpeg_decompress_struct fDInfo;
+ skjpeg_source_mgr fSrcMgr;
+ skjpeg_error_mgr fErrorMgr;
+ jpeg_progress_mgr fProgressMgr;
+ bool fInit;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkJpegPriv.h b/gfx/skia/skia/src/codec/SkJpegPriv.h
new file mode 100644
index 0000000000..2e36397714
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegPriv.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegPriv_DEFINED
+#define SkJpegPriv_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/private/SkTArray.h"
+
+#include <setjmp.h>
+// stdio is needed for jpeglib
+#include <stdio.h>
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+static constexpr uint32_t kICCMarker = JPEG_APP0 + 2;
+static constexpr uint32_t kICCMarkerHeaderSize = 14;
+static constexpr uint8_t kICCSig[] = {
+ 'I', 'C', 'C', '_', 'P', 'R', 'O', 'F', 'I', 'L', 'E', '\0',
+};
+
+/*
+ * Error handling struct
+ */
+struct skjpeg_error_mgr : jpeg_error_mgr {
+ class AutoPushJmpBuf {
+ public:
+ AutoPushJmpBuf(skjpeg_error_mgr* mgr) : fMgr(mgr) {
+ fMgr->fJmpBufStack.push_back(&fJmpBuf);
+ }
+ ~AutoPushJmpBuf() {
+ SkASSERT(fMgr->fJmpBufStack.back() == &fJmpBuf);
+ fMgr->fJmpBufStack.pop_back();
+ }
+ operator jmp_buf&() { return fJmpBuf; }
+
+ private:
+ skjpeg_error_mgr* const fMgr;
+ jmp_buf fJmpBuf;
+ };
+
+ SkSTArray<4, jmp_buf*> fJmpBufStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkJpegUtility.cpp b/gfx/skia/skia/src/codec/SkJpegUtility.cpp
new file mode 100644
index 0000000000..d313892cc3
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegUtility.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkJpegUtility.h"
+
+#include "src/codec/SkCodecPriv.h"
+
+/*
+ * Call longjmp to continue execution on an error
+ */
+void skjpeg_err_exit(j_common_ptr dinfo) {
+ // Simply return to Skia client code
+ // JpegDecoderMgr will take care of freeing memory
+ skjpeg_error_mgr* error = (skjpeg_error_mgr*) dinfo->err;
+ (*error->output_message) (dinfo);
+ if (error->fJmpBufStack.empty()) {
+ SK_ABORT("JPEG error with no jmp_buf set.");
+ }
+ longjmp(*error->fJmpBufStack.back(), 1);
+}
+
+// Functions for buffered sources //
+
+/*
+ * Initialize the buffered source manager
+ */
+static void sk_init_buffered_source(j_decompress_ptr dinfo) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = 0;
+}
+
+/*
+ * Fill the input buffer from the stream
+ */
+static boolean sk_fill_buffered_input_buffer(j_decompress_ptr dinfo) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ size_t bytes = src->fStream->read(src->fBuffer, skjpeg_source_mgr::kBufferSize);
+
+ // libjpeg is still happy with a less than full read, as long as the result is non-zero
+ if (bytes == 0) {
+ // Let libjpeg know that the buffer needs to be refilled
+ src->next_input_byte = nullptr;
+ src->bytes_in_buffer = 0;
+ return false;
+ }
+
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = bytes;
+ return true;
+}
+
+/*
+ * Skip a certain number of bytes in the stream
+ */
+static void sk_skip_buffered_input_data(j_decompress_ptr dinfo, long numBytes) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ size_t bytes = (size_t) numBytes;
+
+ if (bytes > src->bytes_in_buffer) {
+ size_t bytesToSkip = bytes - src->bytes_in_buffer;
+ if (bytesToSkip != src->fStream->skip(bytesToSkip)) {
+ SkCodecPrintf("Failure to skip.\n");
+ dinfo->err->error_exit((j_common_ptr) dinfo);
+ return;
+ }
+
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = 0;
+ } else {
+ src->next_input_byte += numBytes;
+ src->bytes_in_buffer -= numBytes;
+ }
+}
+
+/*
+ * We do not need to do anything to terminate our stream
+ */
+static void sk_term_source(j_decompress_ptr dinfo)
+{
+ // The current implementation of SkJpegCodec does not call
+ // jpeg_finish_decompress(), so this function is never called.
+ // If we want to modify this function to do something, we also
+ // need to modify SkJpegCodec to call jpeg_finish_decompress().
+}
+
+// Functions for memory backed sources //
+
+/*
+ * Initialize the mem backed source manager
+ */
+static void sk_init_mem_source(j_decompress_ptr dinfo) {
+ /* no work necessary here, all things are done in constructor */
+}
+
+static void sk_skip_mem_input_data (j_decompress_ptr cinfo, long num_bytes) {
+ jpeg_source_mgr* src = cinfo->src;
+ size_t bytes = static_cast<size_t>(num_bytes);
+ if(bytes > src->bytes_in_buffer) {
+ src->next_input_byte = nullptr;
+ src->bytes_in_buffer = 0;
+ } else {
+ src->next_input_byte += bytes;
+ src->bytes_in_buffer -= bytes;
+ }
+}
+
+static boolean sk_fill_mem_input_buffer (j_decompress_ptr cinfo) {
+ /* The whole JPEG data is expected to reside in the supplied memory,
+ * buffer, so any request for more data beyond the given buffer size
+ * is treated as an error.
+ */
+ return false;
+}
+
+/*
+ * Constructor for the source manager that we provide to libjpeg
+ * We provide skia implementations of all of the stream processing functions required by libjpeg
+ */
+skjpeg_source_mgr::skjpeg_source_mgr(SkStream* stream)
+ : fStream(stream)
+{
+ if (stream->hasLength() && stream->getMemoryBase()) {
+ init_source = sk_init_mem_source;
+ fill_input_buffer = sk_fill_mem_input_buffer;
+ skip_input_data = sk_skip_mem_input_data;
+ resync_to_restart = jpeg_resync_to_restart;
+ term_source = sk_term_source;
+ bytes_in_buffer = static_cast<size_t>(stream->getLength());
+ next_input_byte = static_cast<const JOCTET*>(stream->getMemoryBase());
+ } else {
+ init_source = sk_init_buffered_source;
+ fill_input_buffer = sk_fill_buffered_input_buffer;
+ skip_input_data = sk_skip_buffered_input_data;
+ resync_to_restart = jpeg_resync_to_restart;
+ term_source = sk_term_source;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegUtility.h b/gfx/skia/skia/src/codec/SkJpegUtility.h
new file mode 100644
index 0000000000..350f15484a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegUtility.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegUtility_codec_DEFINED
+#define SkJpegUtility_codec_DEFINED
+
+#include "include/core/SkStream.h"
+#include "src/codec/SkJpegPriv.h"
+
+#include <setjmp.h>
+// stdio is needed for jpeglib
+#include <stdio.h>
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+/*
+ * Error handling function
+ */
+void skjpeg_err_exit(j_common_ptr cinfo);
+
+/*
+ * Source handling struct for that allows libjpeg to use our stream object
+ */
+struct skjpeg_source_mgr : jpeg_source_mgr {
+ skjpeg_source_mgr(SkStream* stream);
+
+ SkStream* fStream; // unowned
+ enum {
+ // TODO (msarett): Experiment with different buffer sizes.
+ // This size was chosen because it matches SkImageDecoder.
+ kBufferSize = 1024
+ };
+ uint8_t fBuffer[kBufferSize];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
new file mode 100644
index 0000000000..d3f1f36c51
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkMaskSwizzler.h"
+
+static void swizzle_mask16_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+// TODO (msarett): We have promoted a two byte per pixel image to 8888, only to
+// convert it back to 565. Instead, we should swizzle to 565 directly.
+static void swizzle_mask16_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+/*
+ *
+ * Create a new mask swizzler
+ *
+ */
+SkMaskSwizzler* SkMaskSwizzler::CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ bool srcIsOpaque, SkMasks* masks, uint32_t bitsPerPixel,
+ const SkCodec::Options& options) {
+
+ // Choose the appropriate row procedure
+ RowProc proc = nullptr;
+ switch (bitsPerPixel) {
+ case 16:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask16_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask16_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask16_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 24:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask24_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask24_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask24_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 32:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask32_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask32_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask32_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ if (options.fSubset) {
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ }
+
+ return new SkMaskSwizzler(masks, proc, srcOffset, srcWidth);
+}
+
+/*
+ *
+ * Constructor for mask swizzler
+ *
+ */
+SkMaskSwizzler::SkMaskSwizzler(SkMasks* masks, RowProc proc, int srcOffset, int subsetWidth)
+ : fMasks(masks)
+ , fRowProc(proc)
+ , fSubsetWidth(subsetWidth)
+ , fDstWidth(subsetWidth)
+ , fSampleX(1)
+ , fSrcOffset(srcOffset)
+ , fX0(srcOffset)
+{}
+
+int SkMaskSwizzler::onSetSampleX(int sampleX) {
+ // FIXME: Share this function with SkSwizzler?
+ SkASSERT(sampleX > 0); // Surely there is an upper limit? Should there be
+ // way to report failure?
+ fSampleX = sampleX;
+ fX0 = get_start_coord(sampleX) + fSrcOffset;
+ fDstWidth = get_scaled_dimension(fSubsetWidth, sampleX);
+
+ // check that fX0 is valid
+ SkASSERT(fX0 >= 0);
+ return fDstWidth;
+}
+
+/*
+ *
+ * Swizzle the specified row
+ *
+ */
+void SkMaskSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fRowProc(dst, src, fDstWidth, fMasks, fX0, fSampleX);
+}
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.h b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
new file mode 100644
index 0000000000..c7c44fd354
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMaskSwizzler_DEFINED
+#define SkMaskSwizzler_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/codec/SkMasks.h"
+#include "src/codec/SkSampler.h"
+#include "src/codec/SkSwizzler.h"
+
+/*
+ *
+ * Used to swizzle images whose pixel components are extracted by bit masks
+ * Currently only used by bmp
+ *
+ */
+class SkMaskSwizzler : public SkSampler {
+public:
+
+ /*
+ * @param masks Unowned pointer to helper class
+ */
+ static SkMaskSwizzler* CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ bool srcIsOpaque,
+ SkMasks* masks,
+ uint32_t bitsPerPixel,
+ const SkCodec::Options& options);
+
+ /*
+ * Swizzle a row
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ int fillWidth() const override {
+ return fDstWidth;
+ }
+
+ /**
+ * Returns the byte offset at which we write to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ * A similar function exists on SkSwizzler.
+ */
+ int swizzleWidth() const { return fDstWidth; }
+
+private:
+
+ /*
+ * Row procedure used for swizzle
+ */
+ typedef void (*RowProc)(void* dstRow, const uint8_t* srcRow, int width,
+ SkMasks* masks, uint32_t startX, uint32_t sampleX);
+
+ SkMaskSwizzler(SkMasks* masks, RowProc proc, int subsetWidth, int srcOffset);
+
+ int onSetSampleX(int) override;
+
+ SkMasks* fMasks; // unowned
+ const RowProc fRowProc;
+
+ // FIXME: Can this class share more with SkSwizzler? These variables are all the same.
+ const int fSubsetWidth; // Width of the subset of source before any sampling.
+ int fDstWidth; // Width of dst, which may differ with sampling.
+ int fSampleX;
+ int fSrcOffset;
+ int fX0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkMasks.cpp b/gfx/skia/skia/src/codec/SkMasks.cpp
new file mode 100644
index 0000000000..e10551cfe4
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkMasks.h"
+
+/*
+ *
+ * Used to convert 1-7 bit color components into 8-bit color components
+ *
+ */
+static constexpr uint8_t n_bit_to_8_bit_lookup_table[] = {
+ // 1 bit
+ 0, 255,
+ // 2 bits
+ 0, 85, 170, 255,
+ // 3 bits
+ 0, 36, 73, 109, 146, 182, 219, 255,
+ // 4 bits
+ 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255,
+ // 5 bits
+ 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140,
+ 148, 156, 165, 173, 181, 189, 197, 206, 214, 222, 230, 239, 247, 255,
+ // 6 bits
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73,
+ 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138,
+ 142, 146, 150, 154, 158, 162, 166, 170, 174, 178, 182, 186, 190, 194, 198,
+ 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, 255,
+ // 7 bits
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38,
+ 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122, 124, 126, 129, 131, 133, 135, 137, 139, 141,
+ 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171,
+ 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231,
+ 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255
+};
+
+/*
+ *
+ * Convert an n bit component to an 8-bit component
+ *
+ */
+static uint8_t convert_to_8(uint8_t component, uint32_t n) {
+ if (0 == n) {
+ return 0;
+ } else if (8 > n) {
+ return n_bit_to_8_bit_lookup_table[(1 << n) - 2 + component];
+ } else {
+ SkASSERT(8 == n);
+ return component;
+ }
+}
+
+static uint8_t get_comp(uint32_t pixel, uint32_t mask, uint32_t shift,
+ uint32_t size) {
+ return convert_to_8((pixel & mask) >> shift, size);
+}
+
+/*
+ *
+ * Get a color component
+ *
+ */
+uint8_t SkMasks::getRed(uint32_t pixel) const {
+ return get_comp(pixel, fRed.mask, fRed.shift, fRed.size);
+}
+uint8_t SkMasks::getGreen(uint32_t pixel) const {
+ return get_comp(pixel, fGreen.mask, fGreen.shift, fGreen.size);
+}
+uint8_t SkMasks::getBlue(uint32_t pixel) const {
+ return get_comp(pixel, fBlue.mask, fBlue.shift, fBlue.size);
+}
+uint8_t SkMasks::getAlpha(uint32_t pixel) const {
+ return get_comp(pixel, fAlpha.mask, fAlpha.shift, fAlpha.size);
+}
+
+/*
+ *
+ * Process an input mask to obtain the necessary information
+ *
+ */
+static const SkMasks::MaskInfo process_mask(uint32_t mask) {
+ // Determine properties of the mask
+ uint32_t tempMask = mask;
+ uint32_t shift = 0;
+ uint32_t size = 0;
+ if (tempMask != 0) {
+ // Count trailing zeros on masks
+ for (; (tempMask & 1) == 0; tempMask >>= 1) {
+ shift++;
+ }
+ // Count the size of the mask
+ for (; tempMask & 1; tempMask >>= 1) {
+ size++;
+ }
+ // Verify that the mask is continuous
+ if (tempMask) {
+ SkCodecPrintf("Warning: Bit mask is not continuous.\n");
+ // Finish processing the mask
+ for (; tempMask; tempMask >>= 1) {
+ size++;
+ }
+ }
+ // Truncate masks greater than 8 bits
+ if (size > 8) {
+ shift += size - 8;
+ size = 8;
+ mask &= 0xFF << shift;
+ }
+ }
+
+ return { mask, shift, size };
+}
+
+/*
+ *
+ * Create the masks object
+ *
+ */
+SkMasks* SkMasks::CreateMasks(InputMasks masks, int bytesPerPixel) {
+ SkASSERT(0 < bytesPerPixel && bytesPerPixel <= 4);
+
+ // Trim the input masks to match bytesPerPixel.
+ if (bytesPerPixel < 4) {
+ int bitsPerPixel = 8*bytesPerPixel;
+ masks.red &= (1 << bitsPerPixel) - 1;
+ masks.green &= (1 << bitsPerPixel) - 1;
+ masks.blue &= (1 << bitsPerPixel) - 1;
+ masks.alpha &= (1 << bitsPerPixel) - 1;
+ }
+
+ // Check that masks do not overlap.
+ if (((masks.red & masks.green) |
+ (masks.red & masks.blue ) |
+ (masks.red & masks.alpha) |
+ (masks.green & masks.blue ) |
+ (masks.green & masks.alpha) |
+ (masks.blue & masks.alpha) ) != 0) {
+ return nullptr;
+ }
+
+ return new SkMasks(process_mask(masks.red ),
+ process_mask(masks.green),
+ process_mask(masks.blue ),
+ process_mask(masks.alpha));
+}
+
+
+SkMasks::SkMasks(const MaskInfo& red, const MaskInfo& green,
+ const MaskInfo& blue, const MaskInfo& alpha)
+ : fRed(red)
+ , fGreen(green)
+ , fBlue(blue)
+ , fAlpha(alpha)
+{}
diff --git a/gfx/skia/skia/src/codec/SkMasks.h b/gfx/skia/skia/src/codec/SkMasks.h
new file mode 100644
index 0000000000..473d6f8baf
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMasks_DEFINED
+#define SkMasks_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/*
+ *
+ * Contains useful mask routines for SkMaskSwizzler
+ *
+ */
+class SkMasks {
+public:
+
+ /*
+ *
+ * Input bit masks format
+ *
+ */
+ struct InputMasks {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t alpha;
+ };
+
+ /*
+ *
+ * Contains all of the information for a single mask
+ *
+ */
+ struct MaskInfo {
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t size;
+ };
+
+ /*
+ *
+ * Create the masks object
+ *
+ */
+ static SkMasks* CreateMasks(InputMasks masks, int bytesPerPixel);
+
+ /*
+ *
+ * Get a color component
+ *
+ */
+ uint8_t getRed(uint32_t pixel) const;
+ uint8_t getGreen(uint32_t pixel) const;
+ uint8_t getBlue(uint32_t pixel) const;
+ uint8_t getAlpha(uint32_t pixel) const;
+
+ /*
+ *
+ * Getter for the alpha mask
+ * The alpha mask may be used in other decoding modes
+ *
+ */
+ uint32_t getAlphaMask() const {
+ return fAlpha.mask;
+ }
+
+private:
+
+ /*
+ *
+ * Constructor
+ *
+ */
+ SkMasks(const MaskInfo& red, const MaskInfo& green, const MaskInfo& blue,
+ const MaskInfo& alpha);
+
+ const MaskInfo fRed;
+ const MaskInfo fGreen;
+ const MaskInfo fBlue;
+ const MaskInfo fAlpha;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp
new file mode 100644
index 0000000000..aaee8c2d38
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/codec/SkCodecPriv.h"
+
+bool SkParseEncodedOrigin(const uint8_t* data, size_t data_length, SkEncodedOrigin* orientation) {
+ SkASSERT(orientation);
+ bool littleEndian;
+ // We need eight bytes to read the endian marker and the offset, below.
+ if (data_length < 8 || !is_valid_endian_marker(data, &littleEndian)) {
+ return false;
+ }
+
+ auto getEndianInt = [](const uint8_t* data, bool littleEndian) -> uint32_t {
+ if (littleEndian) {
+ return (data[3] << 24) | (data[2] << 16) | (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | (data[3]);
+ };
+
+ // Get the offset from the start of the marker.
+ // Though this only reads four bytes, use a larger int in case it overflows.
+ uint64_t offset = getEndianInt(data + 4, littleEndian);
+
+ // Require that the marker is at least large enough to contain the number of entries.
+ if (data_length < offset + 2) {
+ return false;
+ }
+ uint32_t numEntries = get_endian_short(data + offset, littleEndian);
+
+ // Tag (2 bytes), Datatype (2 bytes), Number of elements (4 bytes), Data (4 bytes)
+ const uint32_t kEntrySize = 12;
+ const auto max = SkTo<uint32_t>((data_length - offset - 2) / kEntrySize);
+ numEntries = SkTMin(numEntries, max);
+
+ // Advance the data to the start of the entries.
+ data += offset + 2;
+
+ const uint16_t kOriginTag = 0x112;
+ const uint16_t kOriginType = 3;
+ for (uint32_t i = 0; i < numEntries; i++, data += kEntrySize) {
+ uint16_t tag = get_endian_short(data, littleEndian);
+ uint16_t type = get_endian_short(data + 2, littleEndian);
+ uint32_t count = getEndianInt(data + 4, littleEndian);
+ if (kOriginTag == tag && kOriginType == type && 1 == count) {
+ uint16_t val = get_endian_short(data + 8, littleEndian);
+ if (0 < val && val <= kLast_SkEncodedOrigin) {
+ *orientation = (SkEncodedOrigin) val;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h
new file mode 100644
index 0000000000..4891557a19
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkParseEncodedOrigin_DEFINED
+#define SkParseEncodedOrigin_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+
+/**
+ * If |data| is an EXIF tag representing an SkEncodedOrigin, return true and set |out|
+ * appropriately. Otherwise return false.
+ */
+bool SkParseEncodedOrigin(const uint8_t* data, size_t data_length, SkEncodedOrigin* out);
+
+#endif // SkParseEncodedOrigin_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkPngCodec.cpp b/gfx/skia/skia/src/codec/SkPngCodec.cpp
new file mode 100644
index 0000000000..4eaa034e64
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPngCodec.cpp
@@ -0,0 +1,1194 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkPngCodec.h"
+#include "src/codec/SkPngPriv.h"
+#include "src/codec/SkSwizzler.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkUtils.h"
+
+#include "png.h"
+#include <algorithm>
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #include "include/android/SkAndroidFrameworkUtils.h"
+#endif
+
+// This warning triggers false postives way too often in here.
+#if defined(__GNUC__) && !defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wclobbered"
+#endif
+
+// FIXME (scroggo): We can use png_jumpbuf directly once Google3 is on 1.6
+#define PNG_JMPBUF(x) png_jmpbuf((png_structp) x)
+
+///////////////////////////////////////////////////////////////////////////////
+// Callback functions
+///////////////////////////////////////////////////////////////////////////////
+
+// When setjmp is first called, it returns 0, meaning longjmp was not called.
+constexpr int kSetJmpOkay = 0;
+// An error internal to libpng.
+constexpr int kPngError = 1;
+// Passed to longjmp when we have decoded as many lines as we need.
+constexpr int kStopDecoding = 2;
+
+static void sk_error_fn(png_structp png_ptr, png_const_charp msg) {
+ SkCodecPrintf("------ png error %s\n", msg);
+ longjmp(PNG_JMPBUF(png_ptr), kPngError);
+}
+
+void sk_warning_fn(png_structp, png_const_charp msg) {
+ SkCodecPrintf("----- png warning %s\n", msg);
+}
+
+#ifdef PNG_READ_UNKNOWN_CHUNKS_SUPPORTED
+static int sk_read_user_chunk(png_structp png_ptr, png_unknown_chunkp chunk) {
+ SkPngChunkReader* chunkReader = (SkPngChunkReader*)png_get_user_chunk_ptr(png_ptr);
+ // readChunk() returning true means continue decoding
+ return chunkReader->readChunk((const char*)chunk->name, chunk->data, chunk->size) ? 1 : -1;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Helpers
+///////////////////////////////////////////////////////////////////////////////
+
+class AutoCleanPng : public SkNoncopyable {
+public:
+ /*
+ * This class does not take ownership of stream or reader, but if codecPtr
+ * is non-NULL, and decodeBounds succeeds, it will have created a new
+ * SkCodec (pointed to by *codecPtr) which will own/ref them, as well as
+ * the png_ptr and info_ptr.
+ */
+ AutoCleanPng(png_structp png_ptr, SkStream* stream, SkPngChunkReader* reader,
+ SkCodec** codecPtr)
+ : fPng_ptr(png_ptr)
+ , fInfo_ptr(nullptr)
+ , fStream(stream)
+ , fChunkReader(reader)
+ , fOutCodec(codecPtr)
+ {}
+
+ ~AutoCleanPng() {
+ // fInfo_ptr will never be non-nullptr unless fPng_ptr is.
+ if (fPng_ptr) {
+ png_infopp info_pp = fInfo_ptr ? &fInfo_ptr : nullptr;
+ png_destroy_read_struct(&fPng_ptr, info_pp, nullptr);
+ }
+ }
+
+ void setInfoPtr(png_infop info_ptr) {
+ SkASSERT(nullptr == fInfo_ptr);
+ fInfo_ptr = info_ptr;
+ }
+
+ /**
+ * Reads enough of the input stream to decode the bounds.
+ * @return false if the stream is not a valid PNG (or too short).
+ * true if it read enough of the stream to determine the bounds.
+ * In the latter case, the stream may have been read beyond the
+ * point to determine the bounds, and the png_ptr will have saved
+ * any extra data. Further, if the codecPtr supplied to the
+ * constructor was not NULL, it will now point to a new SkCodec,
+ * which owns (or refs, in the case of the SkPngChunkReader) the
+ * inputs. If codecPtr was NULL, the png_ptr and info_ptr are
+ * unowned, and it is up to the caller to destroy them.
+ */
+ bool decodeBounds();
+
+private:
+ png_structp fPng_ptr;
+ png_infop fInfo_ptr;
+ SkStream* fStream;
+ SkPngChunkReader* fChunkReader;
+ SkCodec** fOutCodec;
+
+ void infoCallback(size_t idatLength);
+
+ void releasePngPtrs() {
+ fPng_ptr = nullptr;
+ fInfo_ptr = nullptr;
+ }
+};
+#define AutoCleanPng(...) SK_REQUIRE_LOCAL_VAR(AutoCleanPng)
+
+static inline bool is_chunk(const png_byte* chunk, const char* tag) {
+ return memcmp(chunk + 4, tag, 4) == 0;
+}
+
+static inline bool process_data(png_structp png_ptr, png_infop info_ptr,
+ SkStream* stream, void* buffer, size_t bufferSize, size_t length) {
+ while (length > 0) {
+ const size_t bytesToProcess = std::min(bufferSize, length);
+ const size_t bytesRead = stream->read(buffer, bytesToProcess);
+ png_process_data(png_ptr, info_ptr, (png_bytep) buffer, bytesRead);
+ if (bytesRead < bytesToProcess) {
+ return false;
+ }
+ length -= bytesToProcess;
+ }
+ return true;
+}
+
+bool AutoCleanPng::decodeBounds() {
+ if (setjmp(PNG_JMPBUF(fPng_ptr))) {
+ return false;
+ }
+
+ png_set_progressive_read_fn(fPng_ptr, nullptr, nullptr, nullptr, nullptr);
+
+ // Arbitrary buffer size, though note that it matches (below)
+ // SkPngCodec::processData(). FIXME: Can we better suit this to the size of
+ // the PNG header?
+ constexpr size_t kBufferSize = 4096;
+ char buffer[kBufferSize];
+
+ {
+ // Parse the signature.
+ if (fStream->read(buffer, 8) < 8) {
+ return false;
+ }
+
+ png_process_data(fPng_ptr, fInfo_ptr, (png_bytep) buffer, 8);
+ }
+
+ while (true) {
+ // Parse chunk length and type.
+ if (fStream->read(buffer, 8) < 8) {
+ // We have read to the end of the input without decoding bounds.
+ break;
+ }
+
+ png_byte* chunk = reinterpret_cast<png_byte*>(buffer);
+ const size_t length = png_get_uint_32(chunk);
+
+ if (is_chunk(chunk, "IDAT")) {
+ this->infoCallback(length);
+ return true;
+ }
+
+ png_process_data(fPng_ptr, fInfo_ptr, chunk, 8);
+ // Process the full chunk + CRC.
+ if (!process_data(fPng_ptr, fInfo_ptr, fStream, buffer, kBufferSize, length + 4)) {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+bool SkPngCodec::processData() {
+ switch (setjmp(PNG_JMPBUF(fPng_ptr))) {
+ case kPngError:
+ // There was an error. Stop processing data.
+ // FIXME: Do we need to discard png_ptr?
+ return false;
+ case kStopDecoding:
+ // We decoded all the lines we want.
+ return true;
+ case kSetJmpOkay:
+ // Everything is okay.
+ break;
+ default:
+ // No other values should be passed to longjmp.
+ SkASSERT(false);
+ }
+
+ // Arbitrary buffer size
+ constexpr size_t kBufferSize = 4096;
+ char buffer[kBufferSize];
+
+ bool iend = false;
+ while (true) {
+ size_t length;
+ if (fDecodedIdat) {
+ // Parse chunk length and type.
+ if (this->stream()->read(buffer, 8) < 8) {
+ break;
+ }
+
+ png_byte* chunk = reinterpret_cast<png_byte*>(buffer);
+ png_process_data(fPng_ptr, fInfo_ptr, chunk, 8);
+ if (is_chunk(chunk, "IEND")) {
+ iend = true;
+ }
+
+ length = png_get_uint_32(chunk);
+ } else {
+ length = fIdatLength;
+ png_byte idat[] = {0, 0, 0, 0, 'I', 'D', 'A', 'T'};
+ png_save_uint_32(idat, length);
+ png_process_data(fPng_ptr, fInfo_ptr, idat, 8);
+ fDecodedIdat = true;
+ }
+
+ // Process the full chunk + CRC.
+ if (!process_data(fPng_ptr, fInfo_ptr, this->stream(), buffer, kBufferSize, length + 4)
+ || iend) {
+ break;
+ }
+ }
+
+ return true;
+}
+
+static constexpr SkColorType kXformSrcColorType = kRGBA_8888_SkColorType;
+
+static inline bool needs_premul(SkAlphaType dstAT, SkEncodedInfo::Alpha encodedAlpha) {
+ return kPremul_SkAlphaType == dstAT && SkEncodedInfo::kUnpremul_Alpha == encodedAlpha;
+}
+
+// Note: SkColorTable claims to store SkPMColors, which is not necessarily the case here.
+bool SkPngCodec::createColorTable(const SkImageInfo& dstInfo) {
+
+ int numColors;
+ png_color* palette;
+ if (!png_get_PLTE(fPng_ptr, fInfo_ptr, &palette, &numColors)) {
+ return false;
+ }
+
+ // Contents depend on tableColorType and our choice of if/when to premultiply:
+ // { kPremul, kUnpremul, kOpaque } x { RGBA, BGRA }
+ SkPMColor colorTable[256];
+ SkColorType tableColorType = this->colorXform() ? kXformSrcColorType : dstInfo.colorType();
+
+ png_bytep alphas;
+ int numColorsWithAlpha = 0;
+ if (png_get_tRNS(fPng_ptr, fInfo_ptr, &alphas, &numColorsWithAlpha, nullptr)) {
+ bool premultiply = needs_premul(dstInfo.alphaType(), this->getEncodedInfo().alpha());
+
+ // Choose which function to use to create the color table. If the final destination's
+ // colortype is unpremultiplied, the color table will store unpremultiplied colors.
+ PackColorProc proc = choose_pack_color_proc(premultiply, tableColorType);
+
+ for (int i = 0; i < numColorsWithAlpha; i++) {
+ // We don't have a function in SkOpts that combines a set of alphas with a set
+ // of RGBs. We could write one, but it's hardly worth it, given that this
+ // is such a small fraction of the total decode time.
+ colorTable[i] = proc(alphas[i], palette->red, palette->green, palette->blue);
+ palette++;
+ }
+ }
+
+ if (numColorsWithAlpha < numColors) {
+ // The optimized code depends on a 3-byte png_color struct with the colors
+ // in RGB order. These checks make sure it is safe to use.
+ static_assert(3 == sizeof(png_color), "png_color struct has changed. Opts are broken.");
+#ifdef SK_DEBUG
+ SkASSERT(&palette->red < &palette->green);
+ SkASSERT(&palette->green < &palette->blue);
+#endif
+
+ if (is_rgba(tableColorType)) {
+ SkOpts::RGB_to_RGB1(colorTable + numColorsWithAlpha, (const uint8_t*)palette,
+ numColors - numColorsWithAlpha);
+ } else {
+ SkOpts::RGB_to_BGR1(colorTable + numColorsWithAlpha, (const uint8_t*)palette,
+ numColors - numColorsWithAlpha);
+ }
+ }
+
+ if (this->colorXform() && !this->xformOnDecode()) {
+ this->applyColorXform(colorTable, colorTable, numColors);
+ }
+
+ // Pad the color table with the last color in the table (or black) in the case that
+ // invalid pixel indices exceed the number of colors in the table.
+ const int maxColors = 1 << fBitDepth;
+ if (numColors < maxColors) {
+ SkPMColor lastColor = numColors > 0 ? colorTable[numColors - 1] : SK_ColorBLACK;
+ sk_memset32(colorTable + numColors, lastColor, maxColors - numColors);
+ }
+
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Creation
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPngCodec::IsPng(const char* buf, size_t bytesRead) {
+ return !png_sig_cmp((png_bytep) buf, (png_size_t)0, bytesRead);
+}
+
+#if (PNG_LIBPNG_VER_MAJOR > 1) || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 6)
+
+static float png_fixed_point_to_float(png_fixed_point x) {
+ // We multiply by the same factor that libpng used to convert
+ // fixed point -> double. Since we want floats, we choose to
+ // do the conversion ourselves rather than convert
+ // fixed point -> double -> float.
+ return ((float) x) * 0.00001f;
+}
+
+static float png_inverted_fixed_point_to_float(png_fixed_point x) {
+ // This is necessary because the gAMA chunk actually stores 1/gamma.
+ return 1.0f / png_fixed_point_to_float(x);
+}
+
+#endif // LIBPNG >= 1.6
+
+// If there is no color profile information, it will use sRGB.
+std::unique_ptr<SkEncodedInfo::ICCProfile> read_color_profile(png_structp png_ptr,
+ png_infop info_ptr) {
+
+#if (PNG_LIBPNG_VER_MAJOR > 1) || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 6)
+ // First check for an ICC profile
+ png_bytep profile;
+ png_uint_32 length;
+ // The below variables are unused, however, we need to pass them in anyway or
+ // png_get_iCCP() will return nothing.
+ // Could knowing the |name| of the profile ever be interesting? Maybe for debugging?
+ png_charp name;
+ // The |compression| is uninteresting since:
+ // (1) libpng has already decompressed the profile for us.
+ // (2) "deflate" is the only mode of decompression that libpng supports.
+ int compression;
+ if (PNG_INFO_iCCP == png_get_iCCP(png_ptr, info_ptr, &name, &compression, &profile,
+ &length)) {
+ auto data = SkData::MakeWithCopy(profile, length);
+ return SkEncodedInfo::ICCProfile::Make(std::move(data));
+ }
+
+ // Second, check for sRGB.
+ // Note that Blink does this first. This code checks ICC first, with the thinking that
+ // an image has both truly wants the potentially more specific ICC chunk, with sRGB as a
+ // backup in case the decoder does not support full color management.
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
+ // sRGB chunks also store a rendering intent: Absolute, Relative,
+ // Perceptual, and Saturation.
+ // FIXME (scroggo): Extract this information from the sRGB chunk once
+ // we are able to handle this information in
+ // skcms_ICCProfile
+ return nullptr;
+ }
+
+ // Default to SRGB gamut.
+ skcms_Matrix3x3 toXYZD50 = skcms_sRGB_profile()->toXYZD50;
+ // Next, check for chromaticities.
+ png_fixed_point chrm[8];
+ png_fixed_point gamma;
+ if (png_get_cHRM_fixed(png_ptr, info_ptr, &chrm[0], &chrm[1], &chrm[2], &chrm[3], &chrm[4],
+ &chrm[5], &chrm[6], &chrm[7]))
+ {
+ float rx = png_fixed_point_to_float(chrm[2]);
+ float ry = png_fixed_point_to_float(chrm[3]);
+ float gx = png_fixed_point_to_float(chrm[4]);
+ float gy = png_fixed_point_to_float(chrm[5]);
+ float bx = png_fixed_point_to_float(chrm[6]);
+ float by = png_fixed_point_to_float(chrm[7]);
+ float wx = png_fixed_point_to_float(chrm[0]);
+ float wy = png_fixed_point_to_float(chrm[1]);
+
+ skcms_Matrix3x3 tmp;
+ if (skcms_PrimariesToXYZD50(rx, ry, gx, gy, bx, by, wx, wy, &tmp)) {
+ toXYZD50 = tmp;
+ } else {
+ // Note that Blink simply returns nullptr in this case. We'll fall
+ // back to srgb.
+ }
+ }
+
+ skcms_TransferFunction fn;
+ if (PNG_INFO_gAMA == png_get_gAMA_fixed(png_ptr, info_ptr, &gamma)) {
+ fn.a = 1.0f;
+ fn.b = fn.c = fn.d = fn.e = fn.f = 0.0f;
+ fn.g = png_inverted_fixed_point_to_float(gamma);
+ } else {
+ // Default to sRGB gamma if the image has color space information,
+ // but does not specify gamma.
+ // Note that Blink would again return nullptr in this case.
+ fn = *skcms_sRGB_TransferFunction();
+ }
+
+ skcms_ICCProfile skcmsProfile;
+ skcms_Init(&skcmsProfile);
+ skcms_SetTransferFunction(&skcmsProfile, &fn);
+ skcms_SetXYZD50(&skcmsProfile, &toXYZD50);
+
+ return SkEncodedInfo::ICCProfile::Make(skcmsProfile);
+#else // LIBPNG >= 1.6
+ return nullptr;
+#endif // LIBPNG >= 1.6
+}
+
+void SkPngCodec::allocateStorage(const SkImageInfo& dstInfo) {
+ switch (fXformMode) {
+ case kSwizzleOnly_XformMode:
+ break;
+ case kColorOnly_XformMode:
+ // Intentional fall through. A swizzler hasn't been created yet, but one will
+ // be created later if we are sampling. We'll go ahead and allocate
+ // enough memory to swizzle if necessary.
+ case kSwizzleColor_XformMode: {
+ const int bitsPerPixel = this->getEncodedInfo().bitsPerPixel();
+
+ // If we have more than 8-bits (per component) of precision, we will keep that
+ // extra precision. Otherwise, we will swizzle to RGBA_8888 before transforming.
+ const size_t bytesPerPixel = (bitsPerPixel > 32) ? bitsPerPixel / 8 : 4;
+ const size_t colorXformBytes = dstInfo.width() * bytesPerPixel;
+ fStorage.reset(colorXformBytes);
+ fColorXformSrcRow = fStorage.get();
+ break;
+ }
+ }
+}
+
+static skcms_PixelFormat png_select_xform_format(const SkEncodedInfo& info) {
+ // We use kRGB and kRGBA formats because color PNGs are always RGB or RGBA.
+ if (16 == info.bitsPerComponent()) {
+ if (SkEncodedInfo::kRGBA_Color == info.color()) {
+ return skcms_PixelFormat_RGBA_16161616BE;
+ } else if (SkEncodedInfo::kRGB_Color == info.color()) {
+ return skcms_PixelFormat_RGB_161616BE;
+ }
+ } else if (SkEncodedInfo::kGray_Color == info.color()) {
+ return skcms_PixelFormat_G_8;
+ }
+
+ return skcms_PixelFormat_RGBA_8888;
+}
+
+void SkPngCodec::applyXformRow(void* dst, const void* src) {
+ switch (fXformMode) {
+ case kSwizzleOnly_XformMode:
+ fSwizzler->swizzle(dst, (const uint8_t*) src);
+ break;
+ case kColorOnly_XformMode:
+ this->applyColorXform(dst, src, fXformWidth);
+ break;
+ case kSwizzleColor_XformMode:
+ fSwizzler->swizzle(fColorXformSrcRow, (const uint8_t*) src);
+ this->applyColorXform(dst, fColorXformSrcRow, fXformWidth);
+ break;
+ }
+}
+
+static SkCodec::Result log_and_return_error(bool success) {
+ if (success) return SkCodec::kIncompleteInput;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkAndroidFrameworkUtils::SafetyNetLog("117838472");
+#endif
+ return SkCodec::kErrorInInput;
+}
+
+class SkPngNormalDecoder : public SkPngCodec {
+public:
+ SkPngNormalDecoder(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ SkPngChunkReader* reader, png_structp png_ptr, png_infop info_ptr, int bitDepth)
+ : INHERITED(std::move(info), std::move(stream), reader, png_ptr, info_ptr, bitDepth)
+ , fRowsWrittenToOutput(0)
+ , fDst(nullptr)
+ , fRowBytes(0)
+ , fFirstRow(0)
+ , fLastRow(0)
+ {}
+
+ static void AllRowsCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int /*pass*/) {
+ GetDecoder(png_ptr)->allRowsCallback(row, rowNum);
+ }
+
+ static void RowCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int /*pass*/) {
+ GetDecoder(png_ptr)->rowCallback(row, rowNum);
+ }
+
+private:
+ int fRowsWrittenToOutput;
+ void* fDst;
+ size_t fRowBytes;
+
+ // Variables for partial decode
+ int fFirstRow; // FIXME: Move to baseclass?
+ int fLastRow;
+ int fRowsNeeded;
+
+ typedef SkPngCodec INHERITED;
+
+ static SkPngNormalDecoder* GetDecoder(png_structp png_ptr) {
+ return static_cast<SkPngNormalDecoder*>(png_get_progressive_ptr(png_ptr));
+ }
+
+ Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) override {
+ const int height = this->dimensions().height();
+ png_set_progressive_read_fn(this->png_ptr(), this, nullptr, AllRowsCallback, nullptr);
+ fDst = dst;
+ fRowBytes = rowBytes;
+
+ fRowsWrittenToOutput = 0;
+ fFirstRow = 0;
+ fLastRow = height - 1;
+
+ const bool success = this->processData();
+ if (success && fRowsWrittenToOutput == height) {
+ return kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fRowsWrittenToOutput;
+ }
+
+ return log_and_return_error(success);
+ }
+
+ void allRowsCallback(png_bytep row, int rowNum) {
+ SkASSERT(rowNum == fRowsWrittenToOutput);
+ fRowsWrittenToOutput++;
+ this->applyXformRow(fDst, row);
+ fDst = SkTAddOffset<void>(fDst, fRowBytes);
+ }
+
+ void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) override {
+ png_set_progressive_read_fn(this->png_ptr(), this, nullptr, RowCallback, nullptr);
+ fFirstRow = firstRow;
+ fLastRow = lastRow;
+ fDst = dst;
+ fRowBytes = rowBytes;
+ fRowsWrittenToOutput = 0;
+ fRowsNeeded = fLastRow - fFirstRow + 1;
+ }
+
+ Result decode(int* rowsDecoded) override {
+ if (this->swizzler()) {
+ const int sampleY = this->swizzler()->sampleY();
+ fRowsNeeded = get_scaled_dimension(fLastRow - fFirstRow + 1, sampleY);
+ }
+
+ const bool success = this->processData();
+ if (success && fRowsWrittenToOutput == fRowsNeeded) {
+ return kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fRowsWrittenToOutput;
+ }
+
+ return log_and_return_error(success);
+ }
+
+ void rowCallback(png_bytep row, int rowNum) {
+ if (rowNum < fFirstRow) {
+ // Ignore this row.
+ return;
+ }
+
+ SkASSERT(rowNum <= fLastRow);
+ SkASSERT(fRowsWrittenToOutput < fRowsNeeded);
+
+ // If there is no swizzler, all rows are needed.
+ if (!this->swizzler() || this->swizzler()->rowNeeded(rowNum - fFirstRow)) {
+ this->applyXformRow(fDst, row);
+ fDst = SkTAddOffset<void>(fDst, fRowBytes);
+ fRowsWrittenToOutput++;
+ }
+
+ if (fRowsWrittenToOutput == fRowsNeeded) {
+ // Fake error to stop decoding scanlines.
+ longjmp(PNG_JMPBUF(this->png_ptr()), kStopDecoding);
+ }
+ }
+};
+
+class SkPngInterlacedDecoder : public SkPngCodec {
+public:
+ SkPngInterlacedDecoder(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ SkPngChunkReader* reader, png_structp png_ptr,
+ png_infop info_ptr, int bitDepth, int numberPasses)
+ : INHERITED(std::move(info), std::move(stream), reader, png_ptr, info_ptr, bitDepth)
+ , fNumberPasses(numberPasses)
+ , fFirstRow(0)
+ , fLastRow(0)
+ , fLinesDecoded(0)
+ , fInterlacedComplete(false)
+ , fPng_rowbytes(0)
+ {}
+
+ static void InterlacedRowCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int pass) {
+ auto decoder = static_cast<SkPngInterlacedDecoder*>(png_get_progressive_ptr(png_ptr));
+ decoder->interlacedRowCallback(row, rowNum, pass);
+ }
+
+private:
+ const int fNumberPasses;
+ int fFirstRow;
+ int fLastRow;
+ void* fDst;
+ size_t fRowBytes;
+ int fLinesDecoded;
+ bool fInterlacedComplete;
+ size_t fPng_rowbytes;
+ SkAutoTMalloc<png_byte> fInterlaceBuffer;
+
+ typedef SkPngCodec INHERITED;
+
+ // FIXME: Currently sharing interlaced callback for all rows and subset. It's not
+ // as expensive as the subset version of non-interlaced, but it still does extra
+ // work.
+ void interlacedRowCallback(png_bytep row, int rowNum, int pass) {
+ if (rowNum < fFirstRow || rowNum > fLastRow || fInterlacedComplete) {
+ // Ignore this row
+ return;
+ }
+
+ png_bytep oldRow = fInterlaceBuffer.get() + (rowNum - fFirstRow) * fPng_rowbytes;
+ png_progressive_combine_row(this->png_ptr(), oldRow, row);
+
+ if (0 == pass) {
+ // The first pass initializes all rows.
+ SkASSERT(row);
+ SkASSERT(fLinesDecoded == rowNum - fFirstRow);
+ fLinesDecoded++;
+ } else {
+ SkASSERT(fLinesDecoded == fLastRow - fFirstRow + 1);
+ if (fNumberPasses - 1 == pass && rowNum == fLastRow) {
+ // Last pass, and we have read all of the rows we care about.
+ fInterlacedComplete = true;
+ if (fLastRow != this->dimensions().height() - 1 ||
+ (this->swizzler() && this->swizzler()->sampleY() != 1)) {
+ // Fake error to stop decoding scanlines. Only stop if we're not decoding the
+ // whole image, in which case processing the rest of the image might be
+ // expensive. When decoding the whole image, read through the IEND chunk to
+ // preserve Android behavior of leaving the input stream in the right place.
+ longjmp(PNG_JMPBUF(this->png_ptr()), kStopDecoding);
+ }
+ }
+ }
+ }
+
+ Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) override {
+ const int height = this->dimensions().height();
+ this->setUpInterlaceBuffer(height);
+ png_set_progressive_read_fn(this->png_ptr(), this, nullptr, InterlacedRowCallback,
+ nullptr);
+
+ fFirstRow = 0;
+ fLastRow = height - 1;
+ fLinesDecoded = 0;
+
+ const bool success = this->processData();
+ png_bytep srcRow = fInterlaceBuffer.get();
+ // FIXME: When resuming, this may rewrite rows that did not change.
+ for (int rowNum = 0; rowNum < fLinesDecoded; rowNum++) {
+ this->applyXformRow(dst, srcRow);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ srcRow = SkTAddOffset<png_byte>(srcRow, fPng_rowbytes);
+ }
+ if (success && fInterlacedComplete) {
+ return kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fLinesDecoded;
+ }
+
+ return log_and_return_error(success);
+ }
+
+ void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) override {
+ // FIXME: We could skip rows in the interlace buffer that we won't put in the output.
+ this->setUpInterlaceBuffer(lastRow - firstRow + 1);
+ png_set_progressive_read_fn(this->png_ptr(), this, nullptr, InterlacedRowCallback, nullptr);
+ fFirstRow = firstRow;
+ fLastRow = lastRow;
+ fDst = dst;
+ fRowBytes = rowBytes;
+ fLinesDecoded = 0;
+ }
+
+ Result decode(int* rowsDecoded) override {
+ const bool success = this->processData();
+
+ // Now apply Xforms on all the rows that were decoded.
+ if (!fLinesDecoded) {
+ if (rowsDecoded) {
+ *rowsDecoded = 0;
+ }
+ return log_and_return_error(success);
+ }
+
+ const int sampleY = this->swizzler() ? this->swizzler()->sampleY() : 1;
+ const int rowsNeeded = get_scaled_dimension(fLastRow - fFirstRow + 1, sampleY);
+
+ // FIXME: For resuming interlace, we may swizzle a row that hasn't changed. But it
+ // may be too tricky/expensive to handle that correctly.
+
+ // Offset srcRow by get_start_coord rows. We do not need to account for fFirstRow,
+ // since the first row in fInterlaceBuffer corresponds to fFirstRow.
+ int srcRow = get_start_coord(sampleY);
+ void* dst = fDst;
+ int rowsWrittenToOutput = 0;
+ while (rowsWrittenToOutput < rowsNeeded && srcRow < fLinesDecoded) {
+ png_bytep src = SkTAddOffset<png_byte>(fInterlaceBuffer.get(), fPng_rowbytes * srcRow);
+ this->applyXformRow(dst, src);
+ dst = SkTAddOffset<void>(dst, fRowBytes);
+
+ rowsWrittenToOutput++;
+ srcRow += sampleY;
+ }
+
+ if (success && fInterlacedComplete) {
+ return kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = rowsWrittenToOutput;
+ }
+ return log_and_return_error(success);
+ }
+
+ void setUpInterlaceBuffer(int height) {
+ fPng_rowbytes = png_get_rowbytes(this->png_ptr(), this->info_ptr());
+ fInterlaceBuffer.reset(fPng_rowbytes * height);
+ fInterlacedComplete = false;
+ }
+};
+
+// Reads the header and initializes the output fields, if not NULL.
+//
+// @param stream Input data. Will be read to get enough information to properly
+// setup the codec.
+// @param chunkReader SkPngChunkReader, for reading unknown chunks. May be NULL.
+// If not NULL, png_ptr will hold an *unowned* pointer to it. The caller is
+// expected to continue to own it for the lifetime of the png_ptr.
+// @param outCodec Optional output variable. If non-NULL, will be set to a new
+// SkPngCodec on success.
+// @param png_ptrp Optional output variable. If non-NULL, will be set to a new
+// png_structp on success.
+// @param info_ptrp Optional output variable. If non-NULL, will be set to a new
+// png_infop on success;
+// @return if kSuccess, the caller is responsible for calling
+// png_destroy_read_struct(png_ptrp, info_ptrp).
+// Otherwise, the passed in fields (except stream) are unchanged.
+static SkCodec::Result read_header(SkStream* stream, SkPngChunkReader* chunkReader,
+ SkCodec** outCodec,
+ png_structp* png_ptrp, png_infop* info_ptrp) {
+ // The image is known to be a PNG. Decode enough to know the SkImageInfo.
+ png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr,
+ sk_error_fn, sk_warning_fn);
+ if (!png_ptr) {
+ return SkCodec::kInternalError;
+ }
+
+#ifdef PNG_SET_OPTION_SUPPORTED
+ // This setting ensures that we display images with incorrect CMF bytes.
+ // See crbug.com/807324.
+ png_set_option(png_ptr, PNG_MAXIMUM_INFLATE_WINDOW, PNG_OPTION_ON);
+#endif
+
+ AutoCleanPng autoClean(png_ptr, stream, chunkReader, outCodec);
+
+ png_infop info_ptr = png_create_info_struct(png_ptr);
+ if (info_ptr == nullptr) {
+ return SkCodec::kInternalError;
+ }
+
+ autoClean.setInfoPtr(info_ptr);
+
+ if (setjmp(PNG_JMPBUF(png_ptr))) {
+ return SkCodec::kInvalidInput;
+ }
+
+#ifdef PNG_READ_UNKNOWN_CHUNKS_SUPPORTED
+ // Hookup our chunkReader so we can see any user-chunks the caller may be interested in.
+ // This needs to be installed before we read the png header. Android may store ninepatch
+ // chunks in the header.
+ if (chunkReader) {
+ png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_ALWAYS, (png_byte*)"", 0);
+ png_set_read_user_chunk_fn(png_ptr, (png_voidp) chunkReader, sk_read_user_chunk);
+ }
+#endif
+
+ const bool decodedBounds = autoClean.decodeBounds();
+
+ if (!decodedBounds) {
+ return SkCodec::kIncompleteInput;
+ }
+
+ // On success, decodeBounds releases ownership of png_ptr and info_ptr.
+ if (png_ptrp) {
+ *png_ptrp = png_ptr;
+ }
+ if (info_ptrp) {
+ *info_ptrp = info_ptr;
+ }
+
+ // decodeBounds takes care of setting outCodec
+ if (outCodec) {
+ SkASSERT(*outCodec);
+ }
+ return SkCodec::kSuccess;
+}
+
+void AutoCleanPng::infoCallback(size_t idatLength) {
+ png_uint_32 origWidth, origHeight;
+ int bitDepth, encodedColorType;
+ png_get_IHDR(fPng_ptr, fInfo_ptr, &origWidth, &origHeight, &bitDepth,
+ &encodedColorType, nullptr, nullptr, nullptr);
+
+ // TODO: Should we support 16-bits of precision for gray images?
+ if (bitDepth == 16 && (PNG_COLOR_TYPE_GRAY == encodedColorType ||
+ PNG_COLOR_TYPE_GRAY_ALPHA == encodedColorType)) {
+ bitDepth = 8;
+ png_set_strip_16(fPng_ptr);
+ }
+
+ // Now determine the default colorType and alphaType and set the required transforms.
+ // Often, we depend on SkSwizzler to perform any transforms that we need. However, we
+ // still depend on libpng for many of the rare and PNG-specific cases.
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ switch (encodedColorType) {
+ case PNG_COLOR_TYPE_PALETTE:
+ // Extract multiple pixels with bit depths of 1, 2, and 4 from a single
+ // byte into separate bytes (useful for paletted and grayscale images).
+ if (bitDepth < 8) {
+ // TODO: Should we use SkSwizzler here?
+ bitDepth = 8;
+ png_set_packing(fPng_ptr);
+ }
+
+ color = SkEncodedInfo::kPalette_Color;
+ // Set the alpha depending on if a transparency chunk exists.
+ alpha = png_get_valid(fPng_ptr, fInfo_ptr, PNG_INFO_tRNS) ?
+ SkEncodedInfo::kUnpremul_Alpha : SkEncodedInfo::kOpaque_Alpha;
+ break;
+ case PNG_COLOR_TYPE_RGB:
+ if (png_get_valid(fPng_ptr, fInfo_ptr, PNG_INFO_tRNS)) {
+ // Convert to RGBA if transparency chunk exists.
+ png_set_tRNS_to_alpha(fPng_ptr);
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kBinary_Alpha;
+ } else {
+ color = SkEncodedInfo::kRGB_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case PNG_COLOR_TYPE_GRAY:
+ // Expand grayscale images to the full 8 bits from 1, 2, or 4 bits/pixel.
+ if (bitDepth < 8) {
+ // TODO: Should we use SkSwizzler here?
+ bitDepth = 8;
+ png_set_expand_gray_1_2_4_to_8(fPng_ptr);
+ }
+
+ if (png_get_valid(fPng_ptr, fInfo_ptr, PNG_INFO_tRNS)) {
+ png_set_tRNS_to_alpha(fPng_ptr);
+ color = SkEncodedInfo::kGrayAlpha_Color;
+ alpha = SkEncodedInfo::kBinary_Alpha;
+ } else {
+ color = SkEncodedInfo::kGray_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case PNG_COLOR_TYPE_GRAY_ALPHA:
+ color = SkEncodedInfo::kGrayAlpha_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ case PNG_COLOR_TYPE_RGBA:
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ default:
+ // All the color types have been covered above.
+ SkASSERT(false);
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ }
+
+ const int numberPasses = png_set_interlace_handling(fPng_ptr);
+
+ if (fOutCodec) {
+ SkASSERT(nullptr == *fOutCodec);
+ auto profile = read_color_profile(fPng_ptr, fInfo_ptr);
+ if (profile) {
+ switch (profile->profile()->data_color_space) {
+ case skcms_Signature_CMYK:
+ profile = nullptr;
+ break;
+ case skcms_Signature_Gray:
+ if (SkEncodedInfo::kGray_Color != color &&
+ SkEncodedInfo::kGrayAlpha_Color != color)
+ {
+ profile = nullptr;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (encodedColorType == PNG_COLOR_TYPE_GRAY_ALPHA) {
+ png_color_8p sigBits;
+ if (png_get_sBIT(fPng_ptr, fInfo_ptr, &sigBits)) {
+ if (8 == sigBits->alpha && kGraySigBit_GrayAlphaIsJustAlpha == sigBits->gray) {
+ color = SkEncodedInfo::kXAlpha_Color;
+ }
+ }
+ } else if (SkEncodedInfo::kOpaque_Alpha == alpha) {
+ png_color_8p sigBits;
+ if (png_get_sBIT(fPng_ptr, fInfo_ptr, &sigBits)) {
+ if (5 == sigBits->red && 6 == sigBits->green && 5 == sigBits->blue) {
+ // Recommend a decode to 565 if the sBIT indicates 565.
+ color = SkEncodedInfo::k565_Color;
+ }
+ }
+ }
+
+ SkEncodedInfo encodedInfo = SkEncodedInfo::Make(origWidth, origHeight, color, alpha,
+ bitDepth, std::move(profile));
+ if (1 == numberPasses) {
+ *fOutCodec = new SkPngNormalDecoder(std::move(encodedInfo),
+ std::unique_ptr<SkStream>(fStream), fChunkReader, fPng_ptr, fInfo_ptr, bitDepth);
+ } else {
+ *fOutCodec = new SkPngInterlacedDecoder(std::move(encodedInfo),
+ std::unique_ptr<SkStream>(fStream), fChunkReader, fPng_ptr, fInfo_ptr, bitDepth,
+ numberPasses);
+ }
+ static_cast<SkPngCodec*>(*fOutCodec)->setIdatLength(idatLength);
+ }
+
+ // Release the pointers, which are now owned by the codec or the caller is expected to
+ // take ownership.
+ this->releasePngPtrs();
+}
+
+SkPngCodec::SkPngCodec(SkEncodedInfo&& encodedInfo, std::unique_ptr<SkStream> stream,
+ SkPngChunkReader* chunkReader, void* png_ptr, void* info_ptr, int bitDepth)
+ : INHERITED(std::move(encodedInfo), png_select_xform_format(encodedInfo), std::move(stream))
+ , fPngChunkReader(SkSafeRef(chunkReader))
+ , fPng_ptr(png_ptr)
+ , fInfo_ptr(info_ptr)
+ , fColorXformSrcRow(nullptr)
+ , fBitDepth(bitDepth)
+ , fIdatLength(0)
+ , fDecodedIdat(false)
+{}
+
+SkPngCodec::~SkPngCodec() {
+ this->destroyReadStruct();
+}
+
+void SkPngCodec::destroyReadStruct() {
+ if (fPng_ptr) {
+ // We will never have a nullptr fInfo_ptr with a non-nullptr fPng_ptr
+ SkASSERT(fInfo_ptr);
+ png_destroy_read_struct((png_struct**)&fPng_ptr, (png_info**)&fInfo_ptr, nullptr);
+ fPng_ptr = nullptr;
+ fInfo_ptr = nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Getting the pixels
+///////////////////////////////////////////////////////////////////////////////
+
+SkCodec::Result SkPngCodec::initializeXforms(const SkImageInfo& dstInfo, const Options& options) {
+ if (setjmp(PNG_JMPBUF((png_struct*)fPng_ptr))) {
+ SkCodecPrintf("Failed on png_read_update_info.\n");
+ return kInvalidInput;
+ }
+ png_read_update_info(fPng_ptr, fInfo_ptr);
+
+ // Reset fSwizzler and this->colorXform(). We can't do this in onRewind() because the
+ // interlaced scanline decoder may need to rewind.
+ fSwizzler.reset(nullptr);
+
+ // If skcms directly supports the encoded PNG format, we should skip format
+ // conversion in the swizzler (or skip swizzling altogether).
+ bool skipFormatConversion = false;
+ switch (this->getEncodedInfo().color()) {
+ case SkEncodedInfo::kRGB_Color:
+ if (this->getEncodedInfo().bitsPerComponent() != 16) {
+ break;
+ }
+
+ // Fall through
+ case SkEncodedInfo::kRGBA_Color:
+ case SkEncodedInfo::kGray_Color:
+ skipFormatConversion = this->colorXform();
+ break;
+ default:
+ break;
+ }
+ if (skipFormatConversion && !options.fSubset) {
+ fXformMode = kColorOnly_XformMode;
+ return kSuccess;
+ }
+
+ if (SkEncodedInfo::kPalette_Color == this->getEncodedInfo().color()) {
+ if (!this->createColorTable(dstInfo)) {
+ return kInvalidInput;
+ }
+ }
+
+ this->initializeSwizzler(dstInfo, options, skipFormatConversion);
+ return kSuccess;
+}
+
+void SkPngCodec::initializeXformParams() {
+ switch (fXformMode) {
+ case kColorOnly_XformMode:
+ fXformWidth = this->dstInfo().width();
+ break;
+ case kSwizzleColor_XformMode:
+ fXformWidth = this->swizzler()->swizzleWidth();
+ break;
+ default:
+ break;
+ }
+}
+
+void SkPngCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& options,
+ bool skipFormatConversion) {
+ SkImageInfo swizzlerInfo = dstInfo;
+ Options swizzlerOptions = options;
+ fXformMode = kSwizzleOnly_XformMode;
+ if (this->colorXform() && this->xformOnDecode()) {
+ if (SkEncodedInfo::kGray_Color == this->getEncodedInfo().color()) {
+ swizzlerInfo = swizzlerInfo.makeColorType(kGray_8_SkColorType);
+ } else {
+ swizzlerInfo = swizzlerInfo.makeColorType(kXformSrcColorType);
+ }
+ if (kPremul_SkAlphaType == dstInfo.alphaType()) {
+ swizzlerInfo = swizzlerInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+
+ fXformMode = kSwizzleColor_XformMode;
+
+ // Here, we swizzle into temporary memory, which is not zero initialized.
+ // FIXME (msarett):
+ // Is this a problem?
+ swizzlerOptions.fZeroInitialized = kNo_ZeroInitialized;
+ }
+
+ if (skipFormatConversion) {
+ // We cannot skip format conversion when there is a color table.
+ SkASSERT(!fColorTable);
+ int srcBPP = 0;
+ switch (this->getEncodedInfo().color()) {
+ case SkEncodedInfo::kRGB_Color:
+ SkASSERT(this->getEncodedInfo().bitsPerComponent() == 16);
+ srcBPP = 6;
+ break;
+ case SkEncodedInfo::kRGBA_Color:
+ srcBPP = this->getEncodedInfo().bitsPerComponent() / 2;
+ break;
+ case SkEncodedInfo::kGray_Color:
+ srcBPP = 1;
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ fSwizzler = SkSwizzler::MakeSimple(srcBPP, swizzlerInfo, swizzlerOptions);
+ } else {
+ const SkPMColor* colors = get_color_ptr(fColorTable.get());
+ fSwizzler = SkSwizzler::Make(this->getEncodedInfo(), colors, swizzlerInfo,
+ swizzlerOptions);
+ }
+ SkASSERT(fSwizzler);
+}
+
+SkSampler* SkPngCodec::getSampler(bool createIfNecessary) {
+ if (fSwizzler || !createIfNecessary) {
+ return fSwizzler.get();
+ }
+
+ this->initializeSwizzler(this->dstInfo(), this->options(), true);
+ return fSwizzler.get();
+}
+
+bool SkPngCodec::onRewind() {
+ // This sets fPng_ptr and fInfo_ptr to nullptr. If read_header
+ // succeeds, they will be repopulated, and if it fails, they will
+ // remain nullptr. Any future accesses to fPng_ptr and fInfo_ptr will
+ // come through this function which will rewind and again attempt
+ // to reinitialize them.
+ this->destroyReadStruct();
+
+ png_structp png_ptr;
+ png_infop info_ptr;
+ if (kSuccess != read_header(this->stream(), fPngChunkReader.get(), nullptr,
+ &png_ptr, &info_ptr)) {
+ return false;
+ }
+
+ fPng_ptr = png_ptr;
+ fInfo_ptr = info_ptr;
+ fDecodedIdat = false;
+ return true;
+}
+
+SkCodec::Result SkPngCodec::onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t rowBytes, const Options& options,
+ int* rowsDecoded) {
+ Result result = this->initializeXforms(dstInfo, options);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ if (options.fSubset) {
+ return kUnimplemented;
+ }
+
+ this->allocateStorage(dstInfo);
+ this->initializeXformParams();
+ return this->decodeAllRows(dst, rowBytes, rowsDecoded);
+}
+
+SkCodec::Result SkPngCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* dst, size_t rowBytes, const SkCodec::Options& options) {
+ Result result = this->initializeXforms(dstInfo, options);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ this->allocateStorage(dstInfo);
+
+ int firstRow, lastRow;
+ if (options.fSubset) {
+ firstRow = options.fSubset->top();
+ lastRow = options.fSubset->bottom() - 1;
+ } else {
+ firstRow = 0;
+ lastRow = dstInfo.height() - 1;
+ }
+ this->setRange(firstRow, lastRow, dst, rowBytes);
+ return kSuccess;
+}
+
+SkCodec::Result SkPngCodec::onIncrementalDecode(int* rowsDecoded) {
+ // FIXME: Only necessary on the first call.
+ this->initializeXformParams();
+
+ return this->decode(rowsDecoded);
+}
+
+std::unique_ptr<SkCodec> SkPngCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result, SkPngChunkReader* chunkReader) {
+ SkCodec* outCodec = nullptr;
+ *result = read_header(stream.get(), chunkReader, &outCodec, nullptr, nullptr);
+ if (kSuccess == *result) {
+ // Codec has taken ownership of the stream.
+ SkASSERT(outCodec);
+ stream.release();
+ }
+ return std::unique_ptr<SkCodec>(outCodec);
+}
diff --git a/gfx/skia/skia/src/codec/SkPngCodec.h b/gfx/skia/skia/src/codec/SkPngCodec.h
new file mode 100644
index 0000000000..423331647c
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPngCodec.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPngCodec_DEFINED
+#define SkPngCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPngChunkReader.h"
+#include "include/core/SkRefCnt.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkSwizzler.h"
+
+class SkStream;
+
+class SkPngCodec : public SkCodec {
+public:
+ static bool IsPng(const char*, size_t);
+
+ // Assume IsPng was called and returned true.
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*,
+ SkPngChunkReader* = nullptr);
+
+ // FIXME (scroggo): Temporarily needed by AutoCleanPng.
+ void setIdatLength(size_t len) { fIdatLength = len; }
+
+ ~SkPngCodec() override;
+
+protected:
+ // We hold the png_ptr and info_ptr as voidp to avoid having to include png.h
+ // or forward declare their types here. voidp auto-casts to the real pointer types.
+ struct voidp {
+ voidp(void* ptr) : fPtr(ptr) {}
+
+ template <typename T>
+ operator T*() const { return (T*)fPtr; }
+
+ explicit operator bool() const { return fPtr != nullptr; }
+
+ void* fPtr;
+ };
+
+ SkPngCodec(SkEncodedInfo&&, std::unique_ptr<SkStream>, SkPngChunkReader*,
+ void* png_ptr, void* info_ptr, int bitDepth);
+
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&, int*)
+ override;
+ SkEncodedImageFormat onGetEncodedFormat() const override { return SkEncodedImageFormat::kPNG; }
+ bool onRewind() override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+ void applyXformRow(void* dst, const void* src);
+
+ voidp png_ptr() { return fPng_ptr; }
+ voidp info_ptr() { return fInfo_ptr; }
+
+ SkSwizzler* swizzler() { return fSwizzler.get(); }
+
+ // Initialize variables used by applyXformRow.
+ void initializeXformParams();
+
+ /**
+ * Pass available input to libpng to process it.
+ *
+ * libpng will call any relevant callbacks installed. This will continue decoding
+ * until it reaches the end of the file, or until a callback tells libpng to stop.
+ */
+ bool processData();
+
+ Result onStartIncrementalDecode(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ const SkCodec::Options&) override;
+ Result onIncrementalDecode(int*) override;
+
+ sk_sp<SkPngChunkReader> fPngChunkReader;
+ voidp fPng_ptr;
+ voidp fInfo_ptr;
+
+ // These are stored here so they can be used both by normal decoding and scanline decoding.
+ sk_sp<SkColorTable> fColorTable; // May be unpremul.
+ std::unique_ptr<SkSwizzler> fSwizzler;
+ SkAutoTMalloc<uint8_t> fStorage;
+ void* fColorXformSrcRow;
+ const int fBitDepth;
+
+private:
+
+ enum XformMode {
+ // Requires only a swizzle pass.
+ kSwizzleOnly_XformMode,
+
+ // Requires only a color xform pass.
+ kColorOnly_XformMode,
+
+ // Requires a swizzle and a color xform.
+ kSwizzleColor_XformMode,
+ };
+
+ bool createColorTable(const SkImageInfo& dstInfo);
+ // Helper to set up swizzler, color xforms, and color table. Also calls png_read_update_info.
+ SkCodec::Result initializeXforms(const SkImageInfo& dstInfo, const Options&);
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options&, bool skipFormatConversion);
+ void allocateStorage(const SkImageInfo& dstInfo);
+ void destroyReadStruct();
+
+ virtual Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) = 0;
+ virtual void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) = 0;
+ virtual Result decode(int* rowsDecoded) = 0;
+
+ XformMode fXformMode;
+ int fXformWidth;
+
+ size_t fIdatLength;
+ bool fDecodedIdat;
+
+ typedef SkCodec INHERITED;
+};
+#endif // SkPngCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkPngPriv.h b/gfx/skia/skia/src/codec/SkPngPriv.h
new file mode 100644
index 0000000000..5760179b60
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPngPriv.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngPriv_DEFINED
+#define SkPngPriv_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// We store kAlpha_8 images as GrayAlpha in png. Our private signal is significant bits for gray.
+// If that is set to 1, we assume the gray channel can be ignored, and we output just alpha.
+// We tried 0 at first, but png doesn't like a 0 sigbit for a channel it expects, hence we chose 1.
+
+static constexpr int kGraySigBit_GrayAlphaIsJustAlpha = 1;
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkRawCodec.cpp b/gfx/skia/skia/src/codec/SkRawCodec.cpp
new file mode 100644
index 0000000000..f8dff7a862
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawCodec.cpp
@@ -0,0 +1,797 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkJpegCodec.h"
+#include "src/codec/SkRawCodec.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/core/SkTaskGroup.h"
+
+#include "dng_area_task.h"
+#include "dng_color_space.h"
+#include "dng_errors.h"
+#include "dng_exceptions.h"
+#include "dng_host.h"
+#include "dng_info.h"
+#include "dng_memory.h"
+#include "dng_render.h"
+#include "dng_stream.h"
+
+#include "src/piex.h"
+
+#include <cmath> // for std::round,floor,ceil
+#include <limits>
+
+namespace {
+
+// Caluclates the number of tiles of tile_size that fit into the area in vertical and horizontal
+// directions.
+dng_point num_tiles_in_area(const dng_point &areaSize,
+ const dng_point_real64 &tileSize) {
+ // FIXME: Add a ceil_div() helper in SkCodecPriv.h
+ return dng_point(static_cast<int32>((areaSize.v + tileSize.v - 1) / tileSize.v),
+ static_cast<int32>((areaSize.h + tileSize.h - 1) / tileSize.h));
+}
+
+int num_tasks_required(const dng_point& tilesInTask,
+ const dng_point& tilesInArea) {
+ return ((tilesInArea.v + tilesInTask.v - 1) / tilesInTask.v) *
+ ((tilesInArea.h + tilesInTask.h - 1) / tilesInTask.h);
+}
+
+// Calculate the number of tiles to process per task, taking into account the maximum number of
+// tasks. It prefers to increase horizontally for better locality of reference.
+dng_point num_tiles_per_task(const int maxTasks,
+ const dng_point &tilesInArea) {
+ dng_point tilesInTask = {1, 1};
+ while (num_tasks_required(tilesInTask, tilesInArea) > maxTasks) {
+ if (tilesInTask.h < tilesInArea.h) {
+ ++tilesInTask.h;
+ } else if (tilesInTask.v < tilesInArea.v) {
+ ++tilesInTask.v;
+ } else {
+ ThrowProgramError("num_tiles_per_task calculation is wrong.");
+ }
+ }
+ return tilesInTask;
+}
+
+std::vector<dng_rect> compute_task_areas(const int maxTasks, const dng_rect& area,
+ const dng_point& tileSize) {
+ std::vector<dng_rect> taskAreas;
+ const dng_point tilesInArea = num_tiles_in_area(area.Size(), tileSize);
+ const dng_point tilesPerTask = num_tiles_per_task(maxTasks, tilesInArea);
+ const dng_point taskAreaSize = {tilesPerTask.v * tileSize.v,
+ tilesPerTask.h * tileSize.h};
+ for (int v = 0; v < tilesInArea.v; v += tilesPerTask.v) {
+ for (int h = 0; h < tilesInArea.h; h += tilesPerTask.h) {
+ dng_rect taskArea;
+ taskArea.t = area.t + v * tileSize.v;
+ taskArea.l = area.l + h * tileSize.h;
+ taskArea.b = Min_int32(taskArea.t + taskAreaSize.v, area.b);
+ taskArea.r = Min_int32(taskArea.l + taskAreaSize.h, area.r);
+
+ taskAreas.push_back(taskArea);
+ }
+ }
+ return taskAreas;
+}
+
+class SkDngHost : public dng_host {
+public:
+ explicit SkDngHost(dng_memory_allocator* allocater) : dng_host(allocater) {}
+
+ void PerformAreaTask(dng_area_task& task, const dng_rect& area) override {
+ SkTaskGroup taskGroup;
+
+ // tileSize is typically 256x256
+ const dng_point tileSize(task.FindTileSize(area));
+ const std::vector<dng_rect> taskAreas = compute_task_areas(this->PerformAreaTaskThreads(),
+ area, tileSize);
+ const int numTasks = static_cast<int>(taskAreas.size());
+
+ SkMutex mutex;
+ SkTArray<dng_exception> exceptions;
+ task.Start(numTasks, tileSize, &Allocator(), Sniffer());
+ for (int taskIndex = 0; taskIndex < numTasks; ++taskIndex) {
+ taskGroup.add([&mutex, &exceptions, &task, this, taskIndex, taskAreas, tileSize] {
+ try {
+ task.ProcessOnThread(taskIndex, taskAreas[taskIndex], tileSize, this->Sniffer());
+ } catch (dng_exception& exception) {
+ SkAutoMutexExclusive lock(mutex);
+ exceptions.push_back(exception);
+ } catch (...) {
+ SkAutoMutexExclusive lock(mutex);
+ exceptions.push_back(dng_exception(dng_error_unknown));
+ }
+ });
+ }
+
+ taskGroup.wait();
+ task.Finish(numTasks);
+
+ // We only re-throw the first exception.
+ if (!exceptions.empty()) {
+ Throw_dng_error(exceptions.front().ErrorCode(), nullptr, nullptr);
+ }
+ }
+
+ uint32 PerformAreaTaskThreads() override {
+#ifdef SK_BUILD_FOR_ANDROID
+ // Only use 1 thread. DNGs with the warp effect require a lot of memory,
+ // and the amount of memory required scales linearly with the number of
+ // threads. The sample used in CTS requires over 500 MB, so even two
+ // threads is significantly expensive. There is no good way to tell
+ // whether the image has the warp effect.
+ return 1;
+#else
+ return kMaxMPThreads;
+#endif
+ }
+
+private:
+ typedef dng_host INHERITED;
+};
+
+// T must be unsigned type.
+template <class T>
+bool safe_add_to_size_t(T arg1, T arg2, size_t* result) {
+ SkASSERT(arg1 >= 0);
+ SkASSERT(arg2 >= 0);
+ if (arg1 >= 0 && arg2 <= std::numeric_limits<T>::max() - arg1) {
+ T sum = arg1 + arg2;
+ if (sum <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(sum);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool is_asset_stream(const SkStream& stream) {
+ return stream.hasLength() && stream.hasPosition();
+}
+
+} // namespace
+
+class SkRawStream {
+public:
+ virtual ~SkRawStream() {}
+
+ /*
+ * Gets the length of the stream. Depending on the type of stream, this may require reading to
+ * the end of the stream.
+ */
+ virtual uint64 getLength() = 0;
+
+ virtual bool read(void* data, size_t offset, size_t length) = 0;
+
+ /*
+ * Creates an SkMemoryStream from the offset with size.
+ * Note: for performance reason, this function is destructive to the SkRawStream. One should
+ * abandon current object after the function call.
+ */
+ virtual std::unique_ptr<SkMemoryStream> transferBuffer(size_t offset, size_t size) = 0;
+};
+
+class SkRawLimitedDynamicMemoryWStream : public SkDynamicMemoryWStream {
+public:
+ ~SkRawLimitedDynamicMemoryWStream() override {}
+
+ bool write(const void* buffer, size_t size) override {
+ size_t newSize;
+ if (!safe_add_to_size_t(this->bytesWritten(), size, &newSize) ||
+ newSize > kMaxStreamSize)
+ {
+ SkCodecPrintf("Error: Stream size exceeds the limit.\n");
+ return false;
+ }
+ return this->INHERITED::write(buffer, size);
+ }
+
+private:
+ // Most of valid RAW images will not be larger than 100MB. This limit is helpful to avoid
+ // streaming too large data chunk. We can always adjust the limit here if we need.
+ const size_t kMaxStreamSize = 100 * 1024 * 1024; // 100MB
+
+ typedef SkDynamicMemoryWStream INHERITED;
+};
+
+// Note: the maximum buffer size is 100MB (limited by SkRawLimitedDynamicMemoryWStream).
+class SkRawBufferedStream : public SkRawStream {
+public:
+ explicit SkRawBufferedStream(std::unique_ptr<SkStream> stream)
+ : fStream(std::move(stream))
+ , fWholeStreamRead(false)
+ {
+ // Only use SkRawBufferedStream when the stream is not an asset stream.
+ SkASSERT(!is_asset_stream(*fStream));
+ }
+
+ ~SkRawBufferedStream() override {}
+
+ uint64 getLength() override {
+ if (!this->bufferMoreData(kReadToEnd)) { // read whole stream
+ ThrowReadFile();
+ }
+ return fStreamBuffer.bytesWritten();
+ }
+
+ bool read(void* data, size_t offset, size_t length) override {
+ if (length == 0) {
+ return true;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, length, &sum)) {
+ return false;
+ }
+
+ return this->bufferMoreData(sum) && fStreamBuffer.read(data, offset, length);
+ }
+
+ std::unique_ptr<SkMemoryStream> transferBuffer(size_t offset, size_t size) override {
+ sk_sp<SkData> data(SkData::MakeUninitialized(size));
+ if (offset > fStreamBuffer.bytesWritten()) {
+ // If the offset is not buffered, read from fStream directly and skip the buffering.
+ const size_t skipLength = offset - fStreamBuffer.bytesWritten();
+ if (fStream->skip(skipLength) != skipLength) {
+ return nullptr;
+ }
+ const size_t bytesRead = fStream->read(data->writable_data(), size);
+ if (bytesRead < size) {
+ data = SkData::MakeSubset(data.get(), 0, bytesRead);
+ }
+ } else {
+ const size_t alreadyBuffered = SkTMin(fStreamBuffer.bytesWritten() - offset, size);
+ if (alreadyBuffered > 0 &&
+ !fStreamBuffer.read(data->writable_data(), offset, alreadyBuffered)) {
+ return nullptr;
+ }
+
+ const size_t remaining = size - alreadyBuffered;
+ if (remaining) {
+ auto* dst = static_cast<uint8_t*>(data->writable_data()) + alreadyBuffered;
+ const size_t bytesRead = fStream->read(dst, remaining);
+ size_t newSize;
+ if (bytesRead < remaining) {
+ if (!safe_add_to_size_t(alreadyBuffered, bytesRead, &newSize)) {
+ return nullptr;
+ }
+ data = SkData::MakeSubset(data.get(), 0, newSize);
+ }
+ }
+ }
+ return SkMemoryStream::Make(data);
+ }
+
+private:
+ // Note: if the newSize == kReadToEnd (0), this function will read to the end of stream.
+ bool bufferMoreData(size_t newSize) {
+ if (newSize == kReadToEnd) {
+ if (fWholeStreamRead) { // already read-to-end.
+ return true;
+ }
+
+ // TODO: optimize for the special case when the input is SkMemoryStream.
+ return SkStreamCopy(&fStreamBuffer, fStream.get());
+ }
+
+ if (newSize <= fStreamBuffer.bytesWritten()) { // already buffered to newSize
+ return true;
+ }
+ if (fWholeStreamRead) { // newSize is larger than the whole stream.
+ return false;
+ }
+
+ // Try to read at least 8192 bytes to avoid to many small reads.
+ const size_t kMinSizeToRead = 8192;
+ const size_t sizeRequested = newSize - fStreamBuffer.bytesWritten();
+ const size_t sizeToRead = SkTMax(kMinSizeToRead, sizeRequested);
+ SkAutoSTMalloc<kMinSizeToRead, uint8> tempBuffer(sizeToRead);
+ const size_t bytesRead = fStream->read(tempBuffer.get(), sizeToRead);
+ if (bytesRead < sizeRequested) {
+ return false;
+ }
+ return fStreamBuffer.write(tempBuffer.get(), bytesRead);
+ }
+
+ std::unique_ptr<SkStream> fStream;
+ bool fWholeStreamRead;
+
+ // Use a size-limited stream to avoid holding too huge buffer.
+ SkRawLimitedDynamicMemoryWStream fStreamBuffer;
+
+ const size_t kReadToEnd = 0;
+};
+
+class SkRawAssetStream : public SkRawStream {
+public:
+ explicit SkRawAssetStream(std::unique_ptr<SkStream> stream)
+ : fStream(std::move(stream))
+ {
+ // Only use SkRawAssetStream when the stream is an asset stream.
+ SkASSERT(is_asset_stream(*fStream));
+ }
+
+ ~SkRawAssetStream() override {}
+
+ uint64 getLength() override {
+ return fStream->getLength();
+ }
+
+
+ bool read(void* data, size_t offset, size_t length) override {
+ if (length == 0) {
+ return true;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, length, &sum)) {
+ return false;
+ }
+
+ return fStream->seek(offset) && (fStream->read(data, length) == length);
+ }
+
+ std::unique_ptr<SkMemoryStream> transferBuffer(size_t offset, size_t size) override {
+ if (fStream->getLength() < offset) {
+ return nullptr;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, size, &sum)) {
+ return nullptr;
+ }
+
+ // This will allow read less than the requested "size", because the JPEG codec wants to
+ // handle also a partial JPEG file.
+ const size_t bytesToRead = SkTMin(sum, fStream->getLength()) - offset;
+ if (bytesToRead == 0) {
+ return nullptr;
+ }
+
+ if (fStream->getMemoryBase()) { // directly copy if getMemoryBase() is available.
+ sk_sp<SkData> data(SkData::MakeWithCopy(
+ static_cast<const uint8_t*>(fStream->getMemoryBase()) + offset, bytesToRead));
+ fStream.reset();
+ return SkMemoryStream::Make(data);
+ } else {
+ sk_sp<SkData> data(SkData::MakeUninitialized(bytesToRead));
+ if (!fStream->seek(offset)) {
+ return nullptr;
+ }
+ const size_t bytesRead = fStream->read(data->writable_data(), bytesToRead);
+ if (bytesRead < bytesToRead) {
+ data = SkData::MakeSubset(data.get(), 0, bytesRead);
+ }
+ return SkMemoryStream::Make(data);
+ }
+ }
+private:
+ std::unique_ptr<SkStream> fStream;
+};
+
+class SkPiexStream : public ::piex::StreamInterface {
+public:
+ // Will NOT take the ownership of the stream.
+ explicit SkPiexStream(SkRawStream* stream) : fStream(stream) {}
+
+ ~SkPiexStream() override {}
+
+ ::piex::Error GetData(const size_t offset, const size_t length,
+ uint8* data) override {
+ return fStream->read(static_cast<void*>(data), offset, length) ?
+ ::piex::Error::kOk : ::piex::Error::kFail;
+ }
+
+private:
+ SkRawStream* fStream;
+};
+
+class SkDngStream : public dng_stream {
+public:
+ // Will NOT take the ownership of the stream.
+ SkDngStream(SkRawStream* stream) : fStream(stream) {}
+
+ ~SkDngStream() override {}
+
+ uint64 DoGetLength() override { return fStream->getLength(); }
+
+ void DoRead(void* data, uint32 count, uint64 offset) override {
+ size_t sum;
+ if (!safe_add_to_size_t(static_cast<uint64>(count), offset, &sum) ||
+ !fStream->read(data, static_cast<size_t>(offset), static_cast<size_t>(count))) {
+ ThrowReadFile();
+ }
+ }
+
+private:
+ SkRawStream* fStream;
+};
+
+class SkDngImage {
+public:
+ /*
+ * Initializes the object with the information from Piex in a first attempt. This way it can
+ * save time and storage to obtain the DNG dimensions and color filter array (CFA) pattern
+ * which is essential for the demosaicing of the sensor image.
+ * Note: this will take the ownership of the stream.
+ */
+ static SkDngImage* NewFromStream(SkRawStream* stream) {
+ std::unique_ptr<SkDngImage> dngImage(new SkDngImage(stream));
+#if defined(IS_FUZZING_WITH_LIBFUZZER)
+ // Libfuzzer easily runs out of memory after here. To avoid that
+ // We just pretend all streams are invalid. Our AFL-fuzzer
+ // should still exercise this code; it's more resistant to OOM.
+ return nullptr;
+#endif
+ if (!dngImage->initFromPiex() && !dngImage->readDng()) {
+ return nullptr;
+ }
+
+ return dngImage.release();
+ }
+
+ /*
+ * Renders the DNG image to the size. The DNG SDK only allows scaling close to integer factors
+ * down to 80 pixels on the short edge. The rendered image will be close to the specified size,
+ * but there is no guarantee that any of the edges will match the requested size. E.g.
+ * 100% size: 4000 x 3000
+ * requested size: 1600 x 1200
+ * returned size could be: 2000 x 1500
+ */
+ dng_image* render(int width, int height) {
+ if (!fHost || !fInfo || !fNegative || !fDngStream) {
+ if (!this->readDng()) {
+ return nullptr;
+ }
+ }
+
+ // DNG SDK preserves the aspect ratio, so it only needs to know the longer dimension.
+ const int preferredSize = SkTMax(width, height);
+ try {
+ // render() takes ownership of fHost, fInfo, fNegative and fDngStream when available.
+ std::unique_ptr<dng_host> host(fHost.release());
+ std::unique_ptr<dng_info> info(fInfo.release());
+ std::unique_ptr<dng_negative> negative(fNegative.release());
+ std::unique_ptr<dng_stream> dngStream(fDngStream.release());
+
+ host->SetPreferredSize(preferredSize);
+ host->ValidateSizes();
+
+ negative->ReadStage1Image(*host, *dngStream, *info);
+
+ if (info->fMaskIndex != -1) {
+ negative->ReadTransparencyMask(*host, *dngStream, *info);
+ }
+
+ negative->ValidateRawImageDigest(*host);
+ if (negative->IsDamaged()) {
+ return nullptr;
+ }
+
+ const int32 kMosaicPlane = -1;
+ negative->BuildStage2Image(*host);
+ negative->BuildStage3Image(*host, kMosaicPlane);
+
+ dng_render render(*host, *negative);
+ render.SetFinalSpace(dng_space_sRGB::Get());
+ render.SetFinalPixelType(ttByte);
+
+ dng_point stage3_size = negative->Stage3Image()->Size();
+ render.SetMaximumSize(SkTMax(stage3_size.h, stage3_size.v));
+
+ return render.Render();
+ } catch (...) {
+ return nullptr;
+ }
+ }
+
+ int width() const {
+ return fWidth;
+ }
+
+ int height() const {
+ return fHeight;
+ }
+
+ bool isScalable() const {
+ return fIsScalable;
+ }
+
+ bool isXtransImage() const {
+ return fIsXtransImage;
+ }
+
+ // Quick check if the image contains a valid TIFF header as requested by DNG format.
+ // Does not affect ownership of stream.
+ static bool IsTiffHeaderValid(SkRawStream* stream) {
+ const size_t kHeaderSize = 4;
+ unsigned char header[kHeaderSize];
+ if (!stream->read(header, 0 /* offset */, kHeaderSize)) {
+ return false;
+ }
+
+ // Check if the header is valid (endian info and magic number "42").
+ bool littleEndian;
+ if (!is_valid_endian_marker(header, &littleEndian)) {
+ return false;
+ }
+
+ return 0x2A == get_endian_short(header + 2, littleEndian);
+ }
+
+private:
+ bool init(int width, int height, const dng_point& cfaPatternSize) {
+ fWidth = width;
+ fHeight = height;
+
+ // The DNG SDK scales only during demosaicing, so scaling is only possible when
+ // a mosaic info is available.
+ fIsScalable = cfaPatternSize.v != 0 && cfaPatternSize.h != 0;
+ fIsXtransImage = fIsScalable ? (cfaPatternSize.v == 6 && cfaPatternSize.h == 6) : false;
+
+ return width > 0 && height > 0;
+ }
+
+ bool initFromPiex() {
+ // Does not take the ownership of rawStream.
+ SkPiexStream piexStream(fStream.get());
+ ::piex::PreviewImageData imageData;
+ if (::piex::IsRaw(&piexStream)
+ && ::piex::GetPreviewImageData(&piexStream, &imageData) == ::piex::Error::kOk)
+ {
+ dng_point cfaPatternSize(imageData.cfa_pattern_dim[1], imageData.cfa_pattern_dim[0]);
+ return this->init(static_cast<int>(imageData.full_width),
+ static_cast<int>(imageData.full_height), cfaPatternSize);
+ }
+ return false;
+ }
+
+ bool readDng() {
+ try {
+ // Due to the limit of DNG SDK, we need to reset host and info.
+ fHost.reset(new SkDngHost(&fAllocator));
+ fInfo.reset(new dng_info);
+ fDngStream.reset(new SkDngStream(fStream.get()));
+
+ fHost->ValidateSizes();
+ fInfo->Parse(*fHost, *fDngStream);
+ fInfo->PostParse(*fHost);
+ if (!fInfo->IsValidDNG()) {
+ return false;
+ }
+
+ fNegative.reset(fHost->Make_dng_negative());
+ fNegative->Parse(*fHost, *fDngStream, *fInfo);
+ fNegative->PostParse(*fHost, *fDngStream, *fInfo);
+ fNegative->SynchronizeMetadata();
+
+ dng_point cfaPatternSize(0, 0);
+ if (fNegative->GetMosaicInfo() != nullptr) {
+ cfaPatternSize = fNegative->GetMosaicInfo()->fCFAPatternSize;
+ }
+ return this->init(static_cast<int>(fNegative->DefaultCropSizeH().As_real64()),
+ static_cast<int>(fNegative->DefaultCropSizeV().As_real64()),
+ cfaPatternSize);
+ } catch (...) {
+ return false;
+ }
+ }
+
+ SkDngImage(SkRawStream* stream)
+ : fStream(stream)
+ {}
+
+ dng_memory_allocator fAllocator;
+ std::unique_ptr<SkRawStream> fStream;
+ std::unique_ptr<dng_host> fHost;
+ std::unique_ptr<dng_info> fInfo;
+ std::unique_ptr<dng_negative> fNegative;
+ std::unique_ptr<dng_stream> fDngStream;
+
+ int fWidth;
+ int fHeight;
+ bool fIsScalable;
+ bool fIsXtransImage;
+};
+
+/*
+ * Tries to handle the image with PIEX. If PIEX returns kOk and finds the preview image, create a
+ * SkJpegCodec. If PIEX returns kFail, then the file is invalid, return nullptr. In other cases,
+ * fallback to create SkRawCodec for DNG images.
+ */
+std::unique_ptr<SkCodec> SkRawCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ std::unique_ptr<SkRawStream> rawStream;
+ if (is_asset_stream(*stream)) {
+ rawStream.reset(new SkRawAssetStream(std::move(stream)));
+ } else {
+ rawStream.reset(new SkRawBufferedStream(std::move(stream)));
+ }
+
+ // Does not take the ownership of rawStream.
+ SkPiexStream piexStream(rawStream.get());
+ ::piex::PreviewImageData imageData;
+ if (::piex::IsRaw(&piexStream)) {
+ ::piex::Error error = ::piex::GetPreviewImageData(&piexStream, &imageData);
+ if (error == ::piex::Error::kFail) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ std::unique_ptr<SkEncodedInfo::ICCProfile> profile;
+ if (imageData.color_space == ::piex::PreviewImageData::kAdobeRgb) {
+ skcms_ICCProfile skcmsProfile;
+ skcms_Init(&skcmsProfile);
+ skcms_SetTransferFunction(&skcmsProfile, &SkNamedTransferFn::k2Dot2);
+ skcms_SetXYZD50(&skcmsProfile, &SkNamedGamut::kAdobeRGB);
+ profile = SkEncodedInfo::ICCProfile::Make(skcmsProfile);
+ }
+
+ // Theoretically PIEX can return JPEG compressed image or uncompressed RGB image. We only
+ // handle the JPEG compressed preview image here.
+ if (error == ::piex::Error::kOk && imageData.preview.length > 0 &&
+ imageData.preview.format == ::piex::Image::kJpegCompressed)
+ {
+ // transferBuffer() is destructive to the rawStream. Abandon the rawStream after this
+ // function call.
+ // FIXME: one may avoid the copy of memoryStream and use the buffered rawStream.
+ auto memoryStream = rawStream->transferBuffer(imageData.preview.offset,
+ imageData.preview.length);
+ if (!memoryStream) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+ return SkJpegCodec::MakeFromStream(std::move(memoryStream), result,
+ std::move(profile));
+ }
+ }
+
+ if (!SkDngImage::IsTiffHeaderValid(rawStream.get())) {
+ *result = kUnimplemented;
+ return nullptr;
+ }
+
+ // Takes the ownership of the rawStream.
+ std::unique_ptr<SkDngImage> dngImage(SkDngImage::NewFromStream(rawStream.release()));
+ if (!dngImage) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ *result = kSuccess;
+ return std::unique_ptr<SkCodec>(new SkRawCodec(dngImage.release()));
+}
+
+SkCodec::Result SkRawCodec::onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options& options,
+ int* rowsDecoded) {
+ const int width = dstInfo.width();
+ const int height = dstInfo.height();
+ std::unique_ptr<dng_image> image(fDngImage->render(width, height));
+ if (!image) {
+ return kInvalidInput;
+ }
+
+ // Because the DNG SDK can not guarantee to render to requested size, we allow a small
+ // difference. Only the overlapping region will be converted.
+ const float maxDiffRatio = 1.03f;
+ const dng_point& imageSize = image->Size();
+ if (imageSize.h / (float) width > maxDiffRatio || imageSize.h < width ||
+ imageSize.v / (float) height > maxDiffRatio || imageSize.v < height) {
+ return SkCodec::kInvalidScale;
+ }
+
+ void* dstRow = dst;
+ SkAutoTMalloc<uint8_t> srcRow(width * 3);
+
+ dng_pixel_buffer buffer;
+ buffer.fData = &srcRow[0];
+ buffer.fPlane = 0;
+ buffer.fPlanes = 3;
+ buffer.fColStep = buffer.fPlanes;
+ buffer.fPlaneStep = 1;
+ buffer.fPixelType = ttByte;
+ buffer.fPixelSize = sizeof(uint8_t);
+ buffer.fRowStep = width * 3;
+
+ constexpr auto srcFormat = skcms_PixelFormat_RGB_888;
+ skcms_PixelFormat dstFormat;
+ if (!sk_select_xform_format(dstInfo.colorType(), false, &dstFormat)) {
+ return kInvalidConversion;
+ }
+
+ const skcms_ICCProfile* const srcProfile = this->getEncodedInfo().profile();
+ skcms_ICCProfile dstProfileStorage;
+ const skcms_ICCProfile* dstProfile = nullptr;
+ if (auto cs = dstInfo.colorSpace()) {
+ cs->toProfile(&dstProfileStorage);
+ dstProfile = &dstProfileStorage;
+ }
+
+ for (int i = 0; i < height; ++i) {
+ buffer.fArea = dng_rect(i, 0, i + 1, width);
+
+ try {
+ image->Get(buffer, dng_image::edge_zero);
+ } catch (...) {
+ *rowsDecoded = i;
+ return kIncompleteInput;
+ }
+
+ if (!skcms_Transform(&srcRow[0], srcFormat, skcms_AlphaFormat_Unpremul, srcProfile,
+ dstRow, dstFormat, skcms_AlphaFormat_Unpremul, dstProfile,
+ dstInfo.width())) {
+ SkDebugf("failed to transform\n");
+ *rowsDecoded = i;
+ return kInternalError;
+ }
+
+ dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
+ }
+ return kSuccess;
+}
+
+SkISize SkRawCodec::onGetScaledDimensions(float desiredScale) const {
+ SkASSERT(desiredScale <= 1.f);
+
+ const SkISize dim = this->dimensions();
+ SkASSERT(dim.fWidth != 0 && dim.fHeight != 0);
+
+ if (!fDngImage->isScalable()) {
+ return dim;
+ }
+
+ // Limits the minimum size to be 80 on the short edge.
+ const float shortEdge = static_cast<float>(SkTMin(dim.fWidth, dim.fHeight));
+ if (desiredScale < 80.f / shortEdge) {
+ desiredScale = 80.f / shortEdge;
+ }
+
+ // For Xtrans images, the integer-factor scaling does not support the half-size scaling case
+ // (stronger downscalings are fine). In this case, returns the factor "3" scaling instead.
+ if (fDngImage->isXtransImage() && desiredScale > 1.f / 3.f && desiredScale < 1.f) {
+ desiredScale = 1.f / 3.f;
+ }
+
+ // Round to integer-factors.
+ const float finalScale = std::floor(1.f/ desiredScale);
+ return SkISize::Make(static_cast<int32_t>(std::floor(dim.fWidth / finalScale)),
+ static_cast<int32_t>(std::floor(dim.fHeight / finalScale)));
+}
+
+bool SkRawCodec::onDimensionsSupported(const SkISize& dim) {
+ const SkISize fullDim = this->dimensions();
+ const float fullShortEdge = static_cast<float>(SkTMin(fullDim.fWidth, fullDim.fHeight));
+ const float shortEdge = static_cast<float>(SkTMin(dim.fWidth, dim.fHeight));
+
+ SkISize sizeFloor = this->onGetScaledDimensions(1.f / std::floor(fullShortEdge / shortEdge));
+ SkISize sizeCeil = this->onGetScaledDimensions(1.f / std::ceil(fullShortEdge / shortEdge));
+ return sizeFloor == dim || sizeCeil == dim;
+}
+
+SkRawCodec::~SkRawCodec() {}
+
+SkRawCodec::SkRawCodec(SkDngImage* dngImage)
+ : INHERITED(SkEncodedInfo::Make(dngImage->width(), dngImage->height(),
+ SkEncodedInfo::kRGB_Color,
+ SkEncodedInfo::kOpaque_Alpha, 8),
+ skcms_PixelFormat_RGBA_8888, nullptr)
+ , fDngImage(dngImage) {}
diff --git a/gfx/skia/skia/src/codec/SkRawCodec.h b/gfx/skia/skia/src/codec/SkRawCodec.h
new file mode 100644
index 0000000000..d1c5131afa
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawCodec.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRawCodec_DEFINED
+#define SkRawCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+
+class SkDngImage;
+class SkStream;
+
+/*
+ *
+ * This class implements the decoding for RAW images
+ *
+ */
+class SkRawCodec : public SkCodec {
+public:
+
+ /*
+ * Creates a RAW decoder
+ * Takes ownership of the stream
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+ ~SkRawCodec() override;
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ int*) override;
+
+ SkEncodedImageFormat onGetEncodedFormat() const override {
+ return SkEncodedImageFormat::kDNG;
+ }
+
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ // SkCodec only applies the colorXform if it's necessary for color space
+ // conversion. SkRawCodec will always convert, so tell SkCodec not to.
+ bool usesColorXform() const override { return false; }
+
+private:
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream, takes ownership of dngImage.
+ */
+ SkRawCodec(SkDngImage* dngImage);
+
+ std::unique_ptr<SkDngImage> fDngImage;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkSampledCodec.cpp b/gfx/skia/skia/src/codec/SkSampledCodec.cpp
new file mode 100644
index 0000000000..c0c8cf02c4
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampledCodec.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkMath.h"
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkSampledCodec.h"
+#include "src/codec/SkSampler.h"
+#include "src/core/SkMathPriv.h"
+
+SkSampledCodec::SkSampledCodec(SkCodec* codec, ExifOrientationBehavior behavior)
+ : INHERITED(codec, behavior)
+{}
+
+SkISize SkSampledCodec::accountForNativeScaling(int* sampleSizePtr, int* nativeSampleSize) const {
+ SkISize preSampledSize = this->codec()->dimensions();
+ int sampleSize = *sampleSizePtr;
+ SkASSERT(sampleSize > 1);
+
+ if (nativeSampleSize) {
+ *nativeSampleSize = 1;
+ }
+
+ // Only JPEG supports native downsampling.
+ if (this->codec()->getEncodedFormat() == SkEncodedImageFormat::kJPEG) {
+ // See if libjpeg supports this scale directly
+ switch (sampleSize) {
+ case 2:
+ case 4:
+ case 8:
+ // This class does not need to do any sampling.
+ *sampleSizePtr = 1;
+ return this->codec()->getScaledDimensions(get_scale_from_sample_size(sampleSize));
+ default:
+ break;
+ }
+
+ // Check if sampleSize is a multiple of something libjpeg can support.
+ int remainder;
+ const int sampleSizes[] = { 8, 4, 2 };
+ for (int supportedSampleSize : sampleSizes) {
+ int actualSampleSize;
+ SkTDivMod(sampleSize, supportedSampleSize, &actualSampleSize, &remainder);
+ if (0 == remainder) {
+ float scale = get_scale_from_sample_size(supportedSampleSize);
+
+ // this->codec() will scale to this size.
+ preSampledSize = this->codec()->getScaledDimensions(scale);
+
+ // And then this class will sample it.
+ *sampleSizePtr = actualSampleSize;
+ if (nativeSampleSize) {
+ *nativeSampleSize = supportedSampleSize;
+ }
+ break;
+ }
+ }
+ }
+
+ return preSampledSize;
+}
+
+SkISize SkSampledCodec::onGetSampledDimensions(int sampleSize) const {
+ const SkISize size = this->accountForNativeScaling(&sampleSize);
+ return SkISize::Make(get_scaled_dimension(size.width(), sampleSize),
+ get_scaled_dimension(size.height(), sampleSize));
+}
+
+SkCodec::Result SkSampledCodec::onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ // Create an Options struct for the codec.
+ SkCodec::Options codecOptions;
+ codecOptions.fZeroInitialized = options.fZeroInitialized;
+
+ SkIRect* subset = options.fSubset;
+ if (!subset || subset->size() == this->codec()->dimensions()) {
+ if (this->codec()->dimensionsSupported(info.dimensions())) {
+ return this->codec()->getPixels(info, pixels, rowBytes, &codecOptions);
+ }
+
+ // If the native codec does not support the requested scale, scale by sampling.
+ return this->sampledDecode(info, pixels, rowBytes, options);
+ }
+
+ // We are performing a subset decode.
+ int sampleSize = options.fSampleSize;
+ SkISize scaledSize = this->getSampledDimensions(sampleSize);
+ if (!this->codec()->dimensionsSupported(scaledSize)) {
+ // If the native codec does not support the requested scale, scale by sampling.
+ return this->sampledDecode(info, pixels, rowBytes, options);
+ }
+
+ // Calculate the scaled subset bounds.
+ int scaledSubsetX = subset->x() / sampleSize;
+ int scaledSubsetY = subset->y() / sampleSize;
+ int scaledSubsetWidth = info.width();
+ int scaledSubsetHeight = info.height();
+
+ const SkImageInfo scaledInfo = info.makeDimensions(scaledSize);
+
+ {
+ // Although startScanlineDecode expects the bottom and top to match the
+ // SkImageInfo, startIncrementalDecode uses them to determine which rows to
+ // decode.
+ SkIRect incrementalSubset = SkIRect::MakeXYWH(scaledSubsetX, scaledSubsetY,
+ scaledSubsetWidth, scaledSubsetHeight);
+ codecOptions.fSubset = &incrementalSubset;
+ const SkCodec::Result startResult = this->codec()->startIncrementalDecode(
+ scaledInfo, pixels, rowBytes, &codecOptions);
+ if (SkCodec::kSuccess == startResult) {
+ int rowsDecoded = 0;
+ const SkCodec::Result incResult = this->codec()->incrementalDecode(&rowsDecoded);
+ if (incResult == SkCodec::kSuccess) {
+ return SkCodec::kSuccess;
+ }
+ SkASSERT(incResult == SkCodec::kIncompleteInput || incResult == SkCodec::kErrorInInput);
+
+ // FIXME: Can zero initialized be read from SkCodec::fOptions?
+ this->codec()->fillIncompleteImage(scaledInfo, pixels, rowBytes,
+ options.fZeroInitialized, scaledSubsetHeight, rowsDecoded);
+ return incResult;
+ } else if (startResult != SkCodec::kUnimplemented) {
+ return startResult;
+ }
+ // Otherwise fall down to use the old scanline decoder.
+ // codecOptions.fSubset will be reset below, so it will not continue to
+ // point to the object that is no longer on the stack.
+ }
+
+ // Start the scanline decode.
+ SkIRect scanlineSubset = SkIRect::MakeXYWH(scaledSubsetX, 0, scaledSubsetWidth,
+ scaledSize.height());
+ codecOptions.fSubset = &scanlineSubset;
+
+ SkCodec::Result result = this->codec()->startScanlineDecode(scaledInfo,
+ &codecOptions);
+ if (SkCodec::kSuccess != result) {
+ return result;
+ }
+
+ // At this point, we are only concerned with subsetting. Either no scale was
+ // requested, or the this->codec() is handling the scale.
+ // Note that subsetting is only supported for kTopDown, so this code will not be
+ // reached for other orders.
+ SkASSERT(this->codec()->getScanlineOrder() == SkCodec::kTopDown_SkScanlineOrder);
+ if (!this->codec()->skipScanlines(scaledSubsetY)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ scaledSubsetHeight, 0);
+ return SkCodec::kIncompleteInput;
+ }
+
+ int decodedLines = this->codec()->getScanlines(pixels, scaledSubsetHeight, rowBytes);
+ if (decodedLines != scaledSubsetHeight) {
+ return SkCodec::kIncompleteInput;
+ }
+ return SkCodec::kSuccess;
+}
+
+
+SkCodec::Result SkSampledCodec::sampledDecode(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ // We should only call this function when sampling.
+ SkASSERT(options.fSampleSize > 1);
+
+ // Create options struct for the codec.
+ SkCodec::Options sampledOptions;
+ sampledOptions.fZeroInitialized = options.fZeroInitialized;
+
+ // FIXME: This was already called by onGetAndroidPixels. Can we reduce that?
+ int sampleSize = options.fSampleSize;
+ int nativeSampleSize;
+ SkISize nativeSize = this->accountForNativeScaling(&sampleSize, &nativeSampleSize);
+
+ // Check if there is a subset.
+ SkIRect subset;
+ int subsetY = 0;
+ int subsetWidth = nativeSize.width();
+ int subsetHeight = nativeSize.height();
+ if (options.fSubset) {
+ // We will need to know about subsetting in the y-dimension in order to use the
+ // scanline decoder.
+ // Update the subset to account for scaling done by this->codec().
+ const SkIRect* subsetPtr = options.fSubset;
+
+ // Do the divide ourselves, instead of calling get_scaled_dimension. If
+ // X and Y are 0, they should remain 0, rather than being upgraded to 1
+ // due to being smaller than the sampleSize.
+ const int subsetX = subsetPtr->x() / nativeSampleSize;
+ subsetY = subsetPtr->y() / nativeSampleSize;
+
+ subsetWidth = get_scaled_dimension(subsetPtr->width(), nativeSampleSize);
+ subsetHeight = get_scaled_dimension(subsetPtr->height(), nativeSampleSize);
+
+ // The scanline decoder only needs to be aware of subsetting in the x-dimension.
+ subset.setXYWH(subsetX, 0, subsetWidth, nativeSize.height());
+ sampledOptions.fSubset = &subset;
+ }
+
+ // Since we guarantee that output dimensions are always at least one (even if the sampleSize
+ // is greater than a given dimension), the input sampleSize is not always the sampleSize that
+ // we use in practice.
+ const int sampleX = subsetWidth / info.width();
+ const int sampleY = subsetHeight / info.height();
+
+ const int samplingOffsetY = get_start_coord(sampleY);
+ const int startY = samplingOffsetY + subsetY;
+ const int dstHeight = info.height();
+
+ const SkImageInfo nativeInfo = info.makeDimensions(nativeSize);
+
+ {
+ // Although startScanlineDecode expects the bottom and top to match the
+ // SkImageInfo, startIncrementalDecode uses them to determine which rows to
+ // decode.
+ SkCodec::Options incrementalOptions = sampledOptions;
+ SkIRect incrementalSubset;
+ if (sampledOptions.fSubset) {
+ incrementalSubset.fTop = subsetY;
+ incrementalSubset.fBottom = subsetY + subsetHeight;
+ incrementalSubset.fLeft = sampledOptions.fSubset->fLeft;
+ incrementalSubset.fRight = sampledOptions.fSubset->fRight;
+ incrementalOptions.fSubset = &incrementalSubset;
+ }
+ const SkCodec::Result startResult = this->codec()->startIncrementalDecode(nativeInfo,
+ pixels, rowBytes, &incrementalOptions);
+ if (SkCodec::kSuccess == startResult) {
+ SkSampler* sampler = this->codec()->getSampler(true);
+ if (!sampler) {
+ return SkCodec::kUnimplemented;
+ }
+
+ if (sampler->setSampleX(sampleX) != info.width()) {
+ return SkCodec::kInvalidScale;
+ }
+ if (get_scaled_dimension(subsetHeight, sampleY) != info.height()) {
+ return SkCodec::kInvalidScale;
+ }
+
+ sampler->setSampleY(sampleY);
+
+ int rowsDecoded = 0;
+ const SkCodec::Result incResult = this->codec()->incrementalDecode(&rowsDecoded);
+ if (incResult == SkCodec::kSuccess) {
+ return SkCodec::kSuccess;
+ }
+ SkASSERT(incResult == SkCodec::kIncompleteInput || incResult == SkCodec::kErrorInInput);
+
+ SkASSERT(rowsDecoded <= info.height());
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ info.height(), rowsDecoded);
+ return incResult;
+ } else if (startResult == SkCodec::kIncompleteInput
+ || startResult == SkCodec::kErrorInInput) {
+ return SkCodec::kInvalidInput;
+ } else if (startResult != SkCodec::kUnimplemented) {
+ return startResult;
+ } // kUnimplemented means use the old method.
+ }
+
+ // Start the scanline decode.
+ SkCodec::Result result = this->codec()->startScanlineDecode(nativeInfo,
+ &sampledOptions);
+ if (SkCodec::kIncompleteInput == result || SkCodec::kErrorInInput == result) {
+ return SkCodec::kInvalidInput;
+ } else if (SkCodec::kSuccess != result) {
+ return result;
+ }
+
+ SkSampler* sampler = this->codec()->getSampler(true);
+ if (!sampler) {
+ return SkCodec::kUnimplemented;
+ }
+
+ if (sampler->setSampleX(sampleX) != info.width()) {
+ return SkCodec::kInvalidScale;
+ }
+ if (get_scaled_dimension(subsetHeight, sampleY) != info.height()) {
+ return SkCodec::kInvalidScale;
+ }
+
+ switch(this->codec()->getScanlineOrder()) {
+ case SkCodec::kTopDown_SkScanlineOrder: {
+ if (!this->codec()->skipScanlines(startY)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ dstHeight, 0);
+ return SkCodec::kIncompleteInput;
+ }
+ void* pixelPtr = pixels;
+ for (int y = 0; y < dstHeight; y++) {
+ if (1 != this->codec()->getScanlines(pixelPtr, 1, rowBytes)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes,
+ options.fZeroInitialized, dstHeight, y + 1);
+ return SkCodec::kIncompleteInput;
+ }
+ if (y < dstHeight - 1) {
+ if (!this->codec()->skipScanlines(sampleY - 1)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes,
+ options.fZeroInitialized, dstHeight, y + 1);
+ return SkCodec::kIncompleteInput;
+ }
+ }
+ pixelPtr = SkTAddOffset<void>(pixelPtr, rowBytes);
+ }
+ return SkCodec::kSuccess;
+ }
+ case SkCodec::kBottomUp_SkScanlineOrder: {
+ // Note that these modes do not support subsetting.
+ SkASSERT(0 == subsetY && nativeSize.height() == subsetHeight);
+ int y;
+ for (y = 0; y < nativeSize.height(); y++) {
+ int srcY = this->codec()->nextScanline();
+ if (is_coord_necessary(srcY, sampleY, dstHeight)) {
+ void* pixelPtr = SkTAddOffset<void>(pixels,
+ rowBytes * get_dst_coord(srcY, sampleY));
+ if (1 != this->codec()->getScanlines(pixelPtr, 1, rowBytes)) {
+ break;
+ }
+ } else {
+ if (!this->codec()->skipScanlines(1)) {
+ break;
+ }
+ }
+ }
+
+ if (nativeSize.height() == y) {
+ return SkCodec::kSuccess;
+ }
+
+ // We handle filling uninitialized memory here instead of using this->codec().
+ // this->codec() does not know that we are sampling.
+ const SkImageInfo fillInfo = info.makeWH(info.width(), 1);
+ for (; y < nativeSize.height(); y++) {
+ int srcY = this->codec()->outputScanline(y);
+ if (!is_coord_necessary(srcY, sampleY, dstHeight)) {
+ continue;
+ }
+
+ void* rowPtr = SkTAddOffset<void>(pixels, rowBytes * get_dst_coord(srcY, sampleY));
+ SkSampler::Fill(fillInfo, rowPtr, rowBytes, options.fZeroInitialized);
+ }
+ return SkCodec::kIncompleteInput;
+ }
+ default:
+ SkASSERT(false);
+ return SkCodec::kUnimplemented;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkSampledCodec.h b/gfx/skia/skia/src/codec/SkSampledCodec.h
new file mode 100644
index 0000000000..c92f944a62
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampledCodec.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSampledCodec_DEFINED
+#define SkSampledCodec_DEFINED
+
+#include "include/codec/SkAndroidCodec.h"
+#include "include/codec/SkCodec.h"
+
+/**
+ * This class implements the functionality of SkAndroidCodec. Scaling will
+ * be provided by sampling if it cannot be provided by fCodec.
+ */
+class SkSampledCodec : public SkAndroidCodec {
+public:
+ explicit SkSampledCodec(SkCodec*, ExifOrientationBehavior);
+
+ ~SkSampledCodec() override {}
+
+protected:
+
+ SkISize onGetSampledDimensions(int sampleSize) const override;
+
+ bool onGetSupportedSubset(SkIRect* desiredSubset) const override { return true; }
+
+ SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) override;
+
+private:
+ /**
+ * Find the best way to account for native scaling.
+ *
+ * Return a size that fCodec can scale to, and adjust sampleSize to finish scaling.
+ *
+ * @param sampleSize As an input, the requested sample size.
+ * As an output, sampling needed after letting fCodec
+ * scale to the returned dimensions.
+ * @param nativeSampleSize Optional output parameter. Will be set to the
+ * effective sample size done by fCodec.
+ * @return SkISize The size that fCodec should scale to.
+ */
+ SkISize accountForNativeScaling(int* sampleSize, int* nativeSampleSize = nullptr) const;
+
+ /**
+ * This fulfills the same contract as onGetAndroidPixels().
+ *
+ * We call this function from onGetAndroidPixels() if we have determined
+ * that fCodec does not support the requested scale, and we need to
+ * provide the scale by sampling.
+ */
+ SkCodec::Result sampledDecode(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options);
+
+ typedef SkAndroidCodec INHERITED;
+};
+#endif // SkSampledCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSampler.cpp b/gfx/skia/skia/src/codec/SkSampler.cpp
new file mode 100644
index 0000000000..3820d71495
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkSampler.h"
+#include "src/core/SkUtils.h"
+
+void SkSampler::Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ SkCodec::ZeroInitialized zeroInit) {
+ SkASSERT(dst != nullptr);
+
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ return;
+ }
+
+ const int width = info.width();
+ const int numRows = info.height();
+
+ // Use the proper memset routine to fill the remaining bytes
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ uint32_t* dstRow = (uint32_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset32(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint32_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = (uint16_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset16(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint16_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kGray_8_SkColorType: {
+ uint8_t* dstRow = (uint8_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ memset(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint8_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kRGBA_F16_SkColorType: {
+ uint64_t* dstRow = (uint64_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset64(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint64_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ default:
+ SkCodecPrintf("Error: Unsupported dst color type for fill(). Doing nothing.\n");
+ SkASSERT(false);
+ break;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkSampler.h b/gfx/skia/skia/src/codec/SkSampler.h
new file mode 100644
index 0000000000..d03b80aa26
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSampler_DEFINED
+#define SkSampler_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkCodecPriv.h"
+
+class SkSampler : public SkNoncopyable {
+public:
+ /**
+ * Update the sampler to sample every sampleX'th pixel. Returns the
+ * width after sampling.
+ */
+ int setSampleX(int sampleX) {
+ return this->onSetSampleX(sampleX);
+ }
+
+ /**
+ * Update the sampler to sample every sampleY'th row.
+ */
+ void setSampleY(int sampleY) {
+ fSampleY = sampleY;
+ }
+
+ /**
+ * Retrieve the value set for sampleY.
+ */
+ int sampleY() const {
+ return fSampleY;
+ }
+
+ /**
+ * Based on fSampleY, return whether this row belongs in the output.
+ *
+ * @param row Row of the image, starting with the first row in the subset.
+ */
+ bool rowNeeded(int row) const {
+ return (row - get_start_coord(fSampleY)) % fSampleY == 0;
+ }
+
+ /**
+ * Fill the remainder of the destination with 0.
+ *
+ * 0 has a different meaning depending on the SkColorType. For color types
+ * with transparency, this means transparent. For k565 and kGray, 0 is
+ * black.
+ *
+ * @param info
+ * Contains the color type of the rows to fill.
+ * Contains the pixel width of the destination rows to fill
+ * Contains the number of rows that we need to fill.
+ *
+ * @param dst
+ * The destination row to fill.
+ *
+ * @param rowBytes
+ * Stride in bytes of the destination.
+ *
+ * @param zeroInit
+ * Indicates whether memory is already zero initialized.
+ */
+ static void Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ SkCodec::ZeroInitialized zeroInit);
+
+ virtual int fillWidth() const = 0;
+
+ SkSampler()
+ : fSampleY(1)
+ {}
+
+ virtual ~SkSampler() {}
+private:
+ int fSampleY;
+
+ virtual int onSetSampleX(int) = 0;
+};
+
+#endif // SkSampler_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkScalingCodec.h b/gfx/skia/skia/src/codec/SkScalingCodec.h
new file mode 100644
index 0000000000..799ca3852b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkScalingCodec.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkScalingCodec_DEFINED
+#define SkScalingCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+
+// Helper class for an SkCodec that supports arbitrary downscaling.
+class SkScalingCodec : public SkCodec {
+protected:
+ SkScalingCodec(SkEncodedInfo&& info, XformFormat srcFormat, std::unique_ptr<SkStream> stream,
+ SkEncodedOrigin origin = kTopLeft_SkEncodedOrigin)
+ : INHERITED(std::move(info), srcFormat, std::move(stream), origin) {}
+
+ SkISize onGetScaledDimensions(float desiredScale) const override {
+ SkISize dim = this->dimensions();
+ // SkCodec treats zero dimensional images as errors, so the minimum size
+ // that we will recommend is 1x1.
+ dim.fWidth = SkTMax(1, SkScalarRoundToInt(desiredScale * dim.fWidth));
+ dim.fHeight = SkTMax(1, SkScalarRoundToInt(desiredScale * dim.fHeight));
+ return dim;
+ }
+
+ bool onDimensionsSupported(const SkISize& requested) override {
+ SkISize dim = this->dimensions();
+ int w = requested.width();
+ int h = requested.height();
+ return 1 <= w && w <= dim.width() && 1 <= h && h <= dim.height();
+ }
+
+private:
+ typedef SkCodec INHERITED;
+};
+
+#endif // SkScalingCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkStreamBuffer.cpp b/gfx/skia/skia/src/codec/SkStreamBuffer.cpp
new file mode 100644
index 0000000000..cdac862fdd
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkStreamBuffer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkStreamBuffer.h"
+
+SkStreamBuffer::SkStreamBuffer(std::unique_ptr<SkStream> stream)
+ : fStream(std::move(stream))
+ , fPosition(0)
+ , fBytesBuffered(0)
+ , fHasLengthAndPosition(fStream->hasLength() && fStream->hasPosition())
+ , fTrulyBuffered(0)
+{}
+
+SkStreamBuffer::~SkStreamBuffer() {
+ fMarkedData.foreach([](size_t, SkData** data) { (*data)->unref(); });
+}
+
+const char* SkStreamBuffer::get() const {
+ SkASSERT(fBytesBuffered >= 1);
+ if (fHasLengthAndPosition && fTrulyBuffered < fBytesBuffered) {
+ const size_t bytesToBuffer = fBytesBuffered - fTrulyBuffered;
+ char* dst = SkTAddOffset<char>(const_cast<char*>(fBuffer), fTrulyBuffered);
+ SkDEBUGCODE(const size_t bytesRead =)
+ // This stream is rewindable, so it should be safe to call the non-const
+ // read()
+ const_cast<SkStream*>(fStream.get())->read(dst, bytesToBuffer);
+ SkASSERT(bytesRead == bytesToBuffer);
+ fTrulyBuffered = fBytesBuffered;
+ }
+ return fBuffer;
+}
+
+bool SkStreamBuffer::buffer(size_t totalBytesToBuffer) {
+ // FIXME (scroggo): What should we do if the client tries to read too much?
+ // Should not be a problem in GIF.
+ SkASSERT(totalBytesToBuffer <= kMaxSize);
+
+ if (totalBytesToBuffer <= fBytesBuffered) {
+ return true;
+ }
+
+ if (fHasLengthAndPosition) {
+ const size_t remaining = fStream->getLength() - fStream->getPosition() + fTrulyBuffered;
+ fBytesBuffered = SkTMin(remaining, totalBytesToBuffer);
+ } else {
+ const size_t extraBytes = totalBytesToBuffer - fBytesBuffered;
+ const size_t bytesBuffered = fStream->read(fBuffer + fBytesBuffered, extraBytes);
+ fBytesBuffered += bytesBuffered;
+ }
+ return fBytesBuffered == totalBytesToBuffer;
+}
+
+size_t SkStreamBuffer::markPosition() {
+ SkASSERT(fBytesBuffered >= 1);
+ if (!fHasLengthAndPosition) {
+ sk_sp<SkData> data(SkData::MakeWithCopy(fBuffer, fBytesBuffered));
+ SkASSERT(nullptr == fMarkedData.find(fPosition));
+ fMarkedData.set(fPosition, data.release());
+ }
+ return fPosition;
+}
+
+sk_sp<SkData> SkStreamBuffer::getDataAtPosition(size_t position, size_t length) {
+ if (!fHasLengthAndPosition) {
+ SkData** data = fMarkedData.find(position);
+ SkASSERT(data);
+ SkASSERT((*data)->size() == length);
+ return sk_ref_sp<SkData>(*data);
+ }
+
+ SkASSERT(length <= fStream->getLength() &&
+ position <= fStream->getLength() - length);
+
+ const size_t oldPosition = fStream->getPosition();
+ if (!fStream->seek(position)) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+ void* dst = data->writable_data();
+ const bool success = fStream->read(dst, length) == length;
+ fStream->seek(oldPosition);
+ return success ? data : nullptr;
+}
diff --git a/gfx/skia/skia/src/codec/SkStreamBuffer.h b/gfx/skia/skia/src/codec/SkStreamBuffer.h
new file mode 100644
index 0000000000..465d2f54ee
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkStreamBuffer.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStreamBuffer_DEFINED
+#define SkStreamBuffer_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTHash.h"
+
+/**
+ * Helper class for reading from a stream that may not have all its data
+ * available yet.
+ *
+ * Used by GIFImageReader, and currently set up for that use case.
+ *
+ * Buffers up to 256 * 3 bytes (256 colors, with 3 bytes each) to support GIF.
+ * FIXME (scroggo): Make this more general purpose?
+ */
+class SkStreamBuffer : SkNoncopyable {
+public:
+ SkStreamBuffer(std::unique_ptr<SkStream>);
+ ~SkStreamBuffer();
+
+ /**
+ * Return a pointer the buffered data.
+ *
+ * The number of bytes buffered is the number passed to buffer()
+ * after the last call to flush().
+ */
+ const char* get() const;
+
+ /**
+ * Buffer from the stream into our buffer.
+ *
+ * If this call returns true, get() can be used to access |bytes| bytes
+ * from the stream. In addition, markPosition() can be called to mark this
+ * position and enable calling getAtPosition() later to retrieve |bytes|
+ * bytes.
+ *
+ * @param bytes Total number of bytes desired.
+ *
+ * @return Whether all bytes were successfully buffered.
+ */
+ bool buffer(size_t bytes);
+
+ /**
+ * Flush the buffer.
+ *
+ * After this call, no bytes are buffered.
+ */
+ void flush() {
+ if (fHasLengthAndPosition) {
+ if (fTrulyBuffered < fBytesBuffered) {
+ fStream->move(fBytesBuffered - fTrulyBuffered);
+ }
+ fTrulyBuffered = 0;
+ }
+ fPosition += fBytesBuffered;
+ fBytesBuffered = 0;
+ }
+
+ /**
+ * Mark the current position in the stream to return to it later.
+ *
+ * This is the position of the start of the buffer. After this call, a
+ * a client can call getDataAtPosition to retrieve all the bytes currently
+ * buffered.
+ *
+ * @return size_t Position which can be passed to getDataAtPosition later
+ * to retrieve the data currently buffered.
+ */
+ size_t markPosition();
+
+ /**
+ * Retrieve data at position, as previously marked by markPosition().
+ *
+ * @param position Position to retrieve data, as marked by markPosition().
+ * @param length Amount of data required at position.
+ * @return SkData The data at position.
+ */
+ sk_sp<SkData> getDataAtPosition(size_t position, size_t length);
+
+private:
+ static constexpr size_t kMaxSize = 256 * 3;
+
+ std::unique_ptr<SkStream> fStream;
+ size_t fPosition;
+ char fBuffer[kMaxSize];
+ size_t fBytesBuffered;
+ // If the stream has a length and position, we can make two optimizations:
+ // - We can skip buffering
+ // - During parsing, we can store the position and size of data that is
+ // needed later during decoding.
+ const bool fHasLengthAndPosition;
+ // When fHasLengthAndPosition is true, we do not need to actually buffer
+ // inside buffer(). We'll buffer inside get(). This keeps track of how many
+ // bytes we've buffered inside get(), for the (non-existent) case of:
+ // buffer(n)
+ // get()
+ // buffer(n + u)
+ // get()
+ // The second call to get() needs to only truly buffer the part that was
+ // not already buffered.
+ mutable size_t fTrulyBuffered;
+ // Only used if !fHasLengthAndPosition. In that case, markPosition will
+ // copy into an SkData, stored here.
+ SkTHashMap<size_t, SkData*> fMarkedData;
+};
+#endif // SkStreamBuffer_DEFINED
+
diff --git a/gfx/skia/skia/src/codec/SkStubHeifDecoderAPI.h b/gfx/skia/skia/src/codec/SkStubHeifDecoderAPI.h
new file mode 100644
index 0000000000..413ec62800
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkStubHeifDecoderAPI.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStubHeifDecoderAPI_DEFINED
+#define SkStubHeifDecoderAPI_DEFINED
+
+// This stub implementation of HeifDecoderAPI.h lets us compile SkHeifCodec.cpp
+// even when libheif is not available. It, of course, does nothing and fails to decode.
+
+#include <memory>
+#include <stddef.h>
+#include <stdint.h>
+
+enum HeifColorFormat {
+ kHeifColorFormat_RGB565,
+ kHeifColorFormat_RGBA_8888,
+ kHeifColorFormat_BGRA_8888,
+};
+
+struct HeifStream {
+ virtual ~HeifStream() {}
+
+ virtual size_t read(void*, size_t) = 0;
+ virtual bool rewind() = 0;
+ virtual bool seek(size_t) = 0;
+ virtual bool hasLength() const = 0;
+ virtual size_t getLength() const = 0;
+};
+
+struct HeifFrameInfo {
+ uint32_t mWidth;
+ uint32_t mHeight;
+ int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
+ uint32_t mBytesPerPixel; // Number of bytes for one pixel
+ int64_t mDurationUs; // Duration of the frame in us
+ std::vector<uint8_t> mIccData; // ICC data array
+};
+
+struct HeifDecoder {
+ bool init(HeifStream* stream, HeifFrameInfo*) {
+ delete stream;
+ return false;
+ }
+
+ bool getSequenceInfo(HeifFrameInfo* frameInfo, size_t *frameCount) {
+ return false;
+ }
+
+ bool decode(HeifFrameInfo*) {
+ return false;
+ }
+
+ bool decodeSequence(int frameIndex, HeifFrameInfo* frameInfo) {
+ return false;
+ }
+
+ bool setOutputColor(HeifColorFormat) {
+ return false;
+ }
+
+ bool getScanline(uint8_t*) {
+ return false;
+ }
+
+ int skipScanlines(int) {
+ return 0;
+ }
+};
+
+static inline HeifDecoder* createHeifDecoder() { return new HeifDecoder; }
+
+#endif//SkStubHeifDecoderAPI_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.cpp b/gfx/skia/skia/src/codec/SkSwizzler.cpp
new file mode 100644
index 0000000000..e29c41428b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.cpp
@@ -0,0 +1,1237 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkTemplates.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkSwizzler.h"
+#include "src/core/SkOpts.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #include "include/android/SkAndroidFrameworkUtils.h"
+#endif
+
+static void copy(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ memcpy(dst, src + offset, width * bpp);
+}
+
+static void sample1(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst8[x] = *src;
+ src += deltaSrc;
+ }
+}
+
+static void sample2(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* dst16 = (uint16_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst16[x] = *((const uint16_t*) src);
+ src += deltaSrc;
+ }
+}
+
+static void sample4(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = *((const uint32_t*) src);
+ src += deltaSrc;
+ }
+}
+
+static void sample6(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*) dst;
+ for (int x = 0; x < width; x++) {
+ memcpy(dst8, src, 6);
+ dst8 += 6;
+ src += deltaSrc;
+ }
+}
+
+static void sample8(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint64_t* dst64 = (uint64_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst64[x] = *((const uint64_t*) src);
+ src += deltaSrc;
+ }
+}
+
+// kBit
+// These routines exclusively choose between white and black
+
+#define GRAYSCALE_BLACK 0
+#define GRAYSCALE_WHITE 0xFF
+
+
+// same as swizzle_bit_to_index and swizzle_bit_to_n32 except for value assigned to dst[x]
+static void swizzle_bit_to_grayscale(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+
+ uint8_t* SK_RESTRICT dst = (uint8_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+ }
+}
+
+#undef GRAYSCALE_BLACK
+#undef GRAYSCALE_WHITE
+
+// same as swizzle_bit_to_grayscale and swizzle_bit_to_index except for value assigned to dst[x]
+static void swizzle_bit_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+ }
+}
+
+#define RGB565_BLACK 0
+#define RGB565_WHITE 0xFFFF
+
+static void swizzle_bit_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ uint16_t* SK_RESTRICT dst = (uint16_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+ }
+}
+
+#undef RGB565_BLACK
+#undef RGB565_WHITE
+
+static void swizzle_bit_to_f16(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ constexpr uint64_t kWhite = (((uint64_t) SK_Half1) << 0) |
+ (((uint64_t) SK_Half1) << 16) |
+ (((uint64_t) SK_Half1) << 32) |
+ (((uint64_t) SK_Half1) << 48);
+ constexpr uint64_t kBlack = (((uint64_t) 0) << 0) |
+ (((uint64_t) 0) << 16) |
+ (((uint64_t) 0) << 32) |
+ (((uint64_t) SK_Half1) << 48);
+
+ uint64_t* SK_RESTRICT dst = (uint64_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? kWhite : kBlack;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? kWhite : kBlack;
+ }
+}
+
+// kIndex1, kIndex2, kIndex4
+
+static void swizzle_small_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ uint16_t* dst = (uint16_t*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = SkPixel32ToPixel16(ctable[index]);
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = SkPixel32ToPixel16(ctable[index]);
+ }
+}
+
+static void swizzle_small_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ SkPMColor* dst = (SkPMColor*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = ctable[index];
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = ctable[index];
+ }
+}
+
+// kIndex
+
+static void swizzle_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ dst[x] = c;
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_n32_skipZ(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ if (c != 0) {
+ dst[x] = c;
+ }
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPixel32ToPixel16(ctable[*src]);
+ src += deltaSrc;
+ }
+}
+
+// kGray
+
+static void swizzle_gray_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB32NoCheck(0xFF, *src, *src, *src);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_gray_to_n32(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::gray_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_gray_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kGrayAlpha
+
+static void swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = SkPackARGB32NoCheck(src[1], src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_RGBA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ uint8_t pmgray = SkMulDiv255Round(src[1], src[0]);
+ dst32[x] = SkPackARGB32NoCheck(src[1], pmgray, pmgray, pmgray);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between rgb and bgr.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_rgbA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_grayalpha_to_a8(void* dst, const uint8_t* src, int width, int bpp,
+ int deltaSrc, int offset, const SkPMColor[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*)dst;
+ for (int x = 0; x < width; ++x) {
+ dst8[x] = src[1]; // src[0] is gray, ignored
+ src += deltaSrc;
+ }
+}
+
+// kBGR
+
+static void swizzle_bgr_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[2], src[1], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kRGB
+
+static void swizzle_rgb_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_RGBA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_BGRA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgb_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void fast_swizzle_rgb_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_BGR1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_rgb_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+// kRGBA
+
+static void swizzle_rgba_to_rgba_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_rgba(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba_to_bgra_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_bgra(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_rgba_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_rgbA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void fast_swizzle_rgba_to_bgra_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_bgrA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void swizzle_rgba_to_bgra_unpremul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint32_t* SK_RESTRICT dst = reinterpret_cast<uint32_t*>(dstRow);
+ for (int x = 0; x < dstWidth; x++) {
+ unsigned alpha = src[3];
+ dst[x] = SkPackARGB_as_BGRA(alpha, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_bgra_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_BGRA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+// 16-bits per component kRGB and kRGBA
+
+static void swizzle_rgb16_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return 0xFF000000 | (ptr[4] << 16) | (ptr[2] << 8) | ptr[0];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb16_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return 0xFF000000 | (ptr[0] << 16) | (ptr[2] << 8) | ptr[4];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb16_to_565(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to565 = [](const uint8_t* ptr) {
+ return SkPack888ToRGB16(ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint16_t* dst16 = (uint16_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst16[x] = strip16to565(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_rgba_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return (ptr[6] << 24) | (ptr[4] << 16) | (ptr[2] << 8) | ptr[0];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_rgba_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto stripAndPremul16to8 = [](const uint8_t* ptr) {
+ return premultiply_argb_as_rgba(ptr[6], ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = stripAndPremul16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_bgra_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return (ptr[6] << 24) | (ptr[0] << 16) | (ptr[2] << 8) | ptr[4];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_bgra_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto stripAndPremul16to8 = [](const uint8_t* ptr) {
+ return premultiply_argb_as_bgra(ptr[6], ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = stripAndPremul16to8(src);
+ src += deltaSrc;
+ }
+}
+
+// kCMYK
+//
+// CMYK is stored as four bytes per pixel.
+//
+// We will implement a crude conversion from CMYK -> RGB using formulas
+// from easyrgb.com.
+//
+// CMYK -> CMY
+// C = C * (1 - K) + K
+// M = M * (1 - K) + K
+// Y = Y * (1 - K) + K
+//
+// libjpeg actually gives us inverted CMYK, so we must subtract the
+// original terms from 1.
+// CMYK -> CMY
+// C = (1 - C) * (1 - (1 - K)) + (1 - K)
+// M = (1 - M) * (1 - (1 - K)) + (1 - K)
+// Y = (1 - Y) * (1 - (1 - K)) + (1 - K)
+//
+// Simplifying the above expression.
+// CMYK -> CMY
+// C = 1 - CK
+// M = 1 - MK
+// Y = 1 - YK
+//
+// CMY -> RGB
+// R = (1 - C) * 255
+// G = (1 - M) * 255
+// B = (1 - Y) * 255
+//
+// Therefore the full conversion is below. This can be verified at
+// www.rapidtables.com (assuming inverted CMYK).
+// CMYK -> RGB
+// R = C * K * 255
+// G = M * K * 255
+// B = Y * K * 255
+//
+// As a final note, we have treated the CMYK values as if they were on
+// a scale from 0-1, when in fact they are 8-bit ints scaling from 0-255.
+// We must divide each CMYK component by 255 to obtain the true conversion
+// we should perform.
+// CMYK -> RGB
+// R = C * K / 255
+// G = M * K / 255
+// B = Y * K / 255
+static void swizzle_cmyk_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_RGBA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_cmyk_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_BGRA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_cmyk_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_RGB1((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void fast_swizzle_cmyk_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_BGR1((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void swizzle_cmyk_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPack888ToRGB16(r, g, b);
+ src += deltaSrc;
+ }
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeadingGrayAlphaZerosThen(
+ void* dst, const uint8_t* src, int width,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ const uint16_t* src16 = (const uint16_t*) (src + offset);
+ uint32_t* dst32 = (uint32_t*) dst;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FF which is not zero but becomes zero after premultiplication.
+ while (width > 0 && *src16 == 0x0000) {
+ width--;
+ dst32++;
+ src16 += deltaSrc / 2;
+ }
+ proc(dst32, (const uint8_t*)src16, width, bpp, deltaSrc, 0, ctable);
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeading8888ZerosThen(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ auto src32 = (const uint32_t*)(src+offset);
+ auto dst32 = (uint32_t*)dstRow;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FFFFFF which is not zero but becomes zero after premultiplication.
+ while (dstWidth > 0 && *src32 == 0x00000000) {
+ dstWidth--;
+ dst32++;
+ src32 += deltaSrc/4;
+ }
+ proc(dst32, (const uint8_t*)src32, dstWidth, bpp, deltaSrc, 0, ctable);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::MakeSimple(int srcBPP, const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ RowProc proc = nullptr;
+ switch (srcBPP) {
+ case 1: // kGray_8_SkColorType
+ proc = &sample1;
+ break;
+ case 2: // kRGB_565_SkColorType
+ proc = &sample2;
+ break;
+ case 4: // kRGBA_8888_SkColorType
+ // kBGRA_8888_SkColorType
+ proc = &sample4;
+ break;
+ case 6: // 16 bit PNG no alpha
+ proc = &sample6;
+ break;
+ case 8: // 16 bit PNG with alpha
+ proc = &sample8;
+ break;
+ default:
+ return nullptr;
+ }
+
+ return Make(dstInfo, &copy, proc, nullptr /*ctable*/, srcBPP,
+ dstInfo.bytesPerPixel(), options, nullptr /*frame*/);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::Make(const SkEncodedInfo& encodedInfo,
+ const SkPMColor* ctable,
+ const SkImageInfo& dstInfo,
+ const SkCodec::Options& options,
+ const SkIRect* frame) {
+ if (SkEncodedInfo::kPalette_Color == encodedInfo.color() && nullptr == ctable) {
+ return nullptr;
+ }
+
+ RowProc fastProc = nullptr;
+ RowProc proc = nullptr;
+ SkCodec::ZeroInitialized zeroInit = options.fZeroInitialized;
+ const bool premultiply = (SkEncodedInfo::kOpaque_Alpha != encodedInfo.alpha()) &&
+ (kPremul_SkAlphaType == dstInfo.alphaType());
+
+ switch (encodedInfo.color()) {
+ case SkEncodedInfo::kGray_Color:
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_bit_to_n32;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bit_to_565;
+ break;
+ case kGray_8_SkColorType:
+ proc = &swizzle_bit_to_grayscale;
+ break;
+ case kRGBA_F16_SkColorType:
+ proc = &swizzle_bit_to_f16;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_gray_to_n32;
+ fastProc = &fast_swizzle_gray_to_n32;
+ break;
+ case kGray_8_SkColorType:
+ proc = &sample1;
+ fastProc = &copy;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_gray_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kXAlpha_Color:
+ case SkEncodedInfo::kGrayAlpha_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_premul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_premul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_premul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_unpremul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_unpremul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_unpremul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_unpremul;
+ }
+ }
+ break;
+ case kAlpha_8_SkColorType:
+ proc = &swizzle_grayalpha_to_a8;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kPalette_Color:
+ // We assume that the color table is premultiplied and swizzled
+ // as desired.
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ case 2:
+ case 4:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_small_index_to_n32;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_small_index_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &swizzle_index_to_n32_skipZ;
+ } else {
+ proc = &swizzle_index_to_n32;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_index_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::k565_Color:
+ // Treat 565 exactly like RGB (since it's still encoded as 8 bits per component).
+ // We just mark as 565 when we have a hint that there are only 5/6/5 "significant"
+ // bits in each channel.
+ case SkEncodedInfo::kRGB_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_rgba;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_bgra;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_565;
+ break;
+ }
+
+ proc = &swizzle_rgb_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kRGBA_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = premultiply ? &swizzle_rgba16_to_rgba_premul :
+ &swizzle_rgba16_to_rgba_unpremul;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = premultiply ? &swizzle_rgba16_to_bgra_premul :
+ &swizzle_rgba16_to_bgra_unpremul;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGR_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRX_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRA_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kRGBA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kInvertedCMYK_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_rgba;
+ fastProc = &fast_swizzle_cmyk_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_bgra;
+ fastProc = &fast_swizzle_cmyk_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_cmyk_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+
+ // Store bpp in bytes if it is an even multiple, otherwise use bits
+ uint8_t bitsPerPixel = encodedInfo.bitsPerPixel();
+ int srcBPP = SkIsAlign8(bitsPerPixel) ? bitsPerPixel / 8 : bitsPerPixel;
+ int dstBPP = dstInfo.bytesPerPixel();
+ return Make(dstInfo, fastProc, proc, ctable, srcBPP, dstBPP, options, frame);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::Make(const SkImageInfo& dstInfo,
+ RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcBPP,
+ int dstBPP, const SkCodec::Options& options, const SkIRect* frame) {
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ int dstOffset = 0;
+ int dstWidth = srcWidth;
+ if (options.fSubset) {
+ // We do not currently support subset decodes for image types that may have
+ // frames (gif).
+ SkASSERT(!frame);
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ dstWidth = srcWidth;
+ } else if (frame) {
+ dstOffset = frame->left();
+ srcWidth = frame->width();
+ }
+
+ return std::unique_ptr<SkSwizzler>(new SkSwizzler(fastProc, proc, ctable, srcOffset, srcWidth,
+ dstOffset, dstWidth, srcBPP, dstBPP));
+}
+
+SkSwizzler::SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP)
+ : fFastProc(fastProc)
+ , fSlowProc(proc)
+ , fActualProc(fFastProc ? fFastProc : fSlowProc)
+ , fColorTable(ctable)
+ , fSrcOffset(srcOffset)
+ , fDstOffset(dstOffset)
+ , fSrcOffsetUnits(srcOffset * srcBPP)
+ , fDstOffsetBytes(dstOffset * dstBPP)
+ , fSrcWidth(srcWidth)
+ , fDstWidth(dstWidth)
+ , fSwizzleWidth(srcWidth)
+ , fAllocatedWidth(dstWidth)
+ , fSampleX(1)
+ , fSrcBPP(srcBPP)
+ , fDstBPP(dstBPP)
+{}
+
+int SkSwizzler::onSetSampleX(int sampleX) {
+ SkASSERT(sampleX > 0);
+
+ fSampleX = sampleX;
+ fDstOffsetBytes = (fDstOffset / sampleX) * fDstBPP;
+ fSwizzleWidth = get_scaled_dimension(fSrcWidth, sampleX);
+ fAllocatedWidth = get_scaled_dimension(fDstWidth, sampleX);
+
+ int frameSampleX = sampleX;
+ if (fSrcWidth < fDstWidth) {
+ // Although SkSampledCodec adjusted sampleX so that it will never be
+ // larger than the width of the image (or subset, if applicable), it
+ // doesn't account for the width of a subset frame (i.e. gif). As a
+ // result, get_start_coord(sampleX) could result in fSrcOffsetUnits
+ // being wider than fSrcWidth. Compute a sampling rate based on the
+ // frame width to ensure that fSrcOffsetUnits is sensible.
+ frameSampleX = fSrcWidth / fSwizzleWidth;
+ }
+ fSrcOffsetUnits = (get_start_coord(frameSampleX) + fSrcOffset) * fSrcBPP;
+
+ if (fDstOffsetBytes > 0) {
+ const size_t dstSwizzleBytes = fSwizzleWidth * fDstBPP;
+ const size_t dstAllocatedBytes = fAllocatedWidth * fDstBPP;
+ if (fDstOffsetBytes + dstSwizzleBytes > dstAllocatedBytes) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkAndroidFrameworkUtils::SafetyNetLog("118143775");
+#endif
+ SkASSERT(dstSwizzleBytes <= dstAllocatedBytes);
+ fDstOffsetBytes = dstAllocatedBytes - dstSwizzleBytes;
+ }
+ }
+
+ // The optimized swizzler functions do not support sampling. Sampled swizzles
+ // are already fast because they skip pixels. We haven't seen a situation
+ // where speeding up sampling has a significant impact on total decode time.
+ if (1 == fSampleX && fFastProc) {
+ fActualProc = fFastProc;
+ } else {
+ fActualProc = fSlowProc;
+ }
+
+ return fAllocatedWidth;
+}
+
+void SkSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fActualProc(SkTAddOffset<void>(dst, fDstOffsetBytes), src, fSwizzleWidth, fSrcBPP,
+ fSampleX * fSrcBPP, fSrcOffsetUnits, fColorTable);
+}
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.h b/gfx/skia/skia/src/codec/SkSwizzler.h
new file mode 100644
index 0000000000..c71b93a464
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_DEFINED
+#define SkSwizzler_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "src/codec/SkSampler.h"
+
+class SkSwizzler : public SkSampler {
+public:
+ /**
+ * Create a new SkSwizzler.
+ * @param encodedInfo Description of the format of the encoded data.
+ * @param ctable Unowned pointer to an array of up to 256 colors for an
+ * index source.
+ * @param dstInfo Describes the destination.
+ * @param options Contains partial scanline information and whether the dst is zero-
+ * initialized.
+ * @param frame Is non-NULL if the source pixels are part of an image
+ * frame that is a subset of the full image.
+ *
+ * Note that a deeper discussion of partial scanline subsets and image frame
+ * subsets is below. Currently, we do not support both simultaneously. If
+ * options->fSubset is non-NULL, frame must be NULL.
+ *
+ * @return A new SkSwizzler or nullptr on failure.
+ */
+ static std::unique_ptr<SkSwizzler> Make(const SkEncodedInfo& encodedInfo,
+ const SkPMColor* ctable, const SkImageInfo& dstInfo, const SkCodec::Options&,
+ const SkIRect* frame = nullptr);
+
+ /**
+ * Create a simplified swizzler that does not need to do format conversion. The swizzler
+ * only needs to sample and/or subset.
+ *
+ * @param srcBPP Bytes per pixel of the source.
+ * @param dstInfo Describes the destination.
+ * @param options Contains partial scanline information and whether the dst is zero-
+ * initialized.
+ * @return A new SkSwizzler or nullptr on failure.
+ */
+ static std::unique_ptr<SkSwizzler> MakeSimple(int srcBPP, const SkImageInfo& dstInfo,
+ const SkCodec::Options&);
+
+ /**
+ * Swizzle a line. Generally this will be called height times, once
+ * for each row of source.
+ * By allowing the caller to pass in the dst pointer, we give the caller
+ * flexibility to use the swizzler even when the encoded data does not
+ * store the rows in order. This also improves usability for scaled and
+ * subset decodes.
+ * @param dst Where we write the output.
+ * @param src The next row of the source data.
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ int fillWidth() const override {
+ return fAllocatedWidth;
+ }
+
+ /**
+ * If fSampleX > 1, the swizzler is sampling every fSampleX'th pixel and
+ * discarding the rest.
+ *
+ * This getter is currently used by SkBmpStandardCodec for Bmp-in-Ico decodes.
+ * Ideally, the subclasses of SkCodec would have no knowledge of sampling, but
+ * this allows us to apply a transparency mask to pixels after swizzling.
+ */
+ int sampleX() const { return fSampleX; }
+
+ /**
+ * Returns the actual number of pixels written to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ */
+ int swizzleWidth() const { return fSwizzleWidth; }
+
+ /**
+ * Returns the byte offset at which we write to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ */
+ size_t swizzleOffsetBytes() const { return fDstOffsetBytes; }
+
+private:
+
+ /**
+ * Method for converting raw data to Skia pixels.
+ * @param dstRow Row in which to write the resulting pixels.
+ * @param src Row of src data, in format specified by SrcConfig
+ * @param dstWidth Width in pixels of the destination
+ * @param bpp if bitsPerPixel % 8 == 0, deltaSrc is bytesPerPixel
+ * else, deltaSrc is bitsPerPixel
+ * @param deltaSrc bpp * sampleX
+ * @param ctable Colors (used for kIndex source).
+ * @param offset The offset before the first pixel to sample.
+ Is in bytes or bits based on what deltaSrc is in.
+ */
+ typedef void (*RowProc)(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeading8888ZerosThen(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeadingGrayAlphaZerosThen(void* dst, const uint8_t* src, int width, int bpp,
+ int deltaSrc, int offset, const SkPMColor ctable[]);
+
+ // May be NULL. We have not implemented optimized functions for all supported transforms.
+ const RowProc fFastProc;
+ // Always non-NULL. Supports sampling.
+ const RowProc fSlowProc;
+ // The actual RowProc we are using. This depends on if fFastProc is non-NULL and
+ // whether or not we are sampling.
+ RowProc fActualProc;
+
+ const SkPMColor* fColorTable; // Unowned pointer
+
+ // Subset Swizzles
+ // There are two types of subset swizzles that we support. We do not
+ // support both at the same time.
+ // TODO: If we want to support partial scanlines for gifs (which may
+ // use frame subsets), we will need to support both subsetting
+ // modes at the same time.
+ // (1) Partial Scanlines
+ // The client only wants to write a subset of the source pixels
+ // to the destination. This subset is specified to CreateSwizzler
+ // using options->fSubset. We will store subset information in
+ // the following fields.
+ //
+ // fSrcOffset: The starting pixel of the source.
+ // fSrcOffsetUnits: Derived from fSrcOffset with two key
+ // differences:
+ // (1) This takes the size of source pixels into
+ // account by multiplying by fSrcBPP. This may
+ // be measured in bits or bytes depending on
+ // which is natural for the SrcConfig.
+ // (2) If we are sampling, this will be larger
+ // than fSrcOffset * fSrcBPP, since sampling
+ // implies that we will skip some pixels.
+ // fDstOffset: Will be zero. There is no destination offset
+ // for this type of subset.
+ // fDstOffsetBytes: Will be zero.
+ // fSrcWidth: The width of the desired subset of source
+ // pixels, before any sampling is performed.
+ // fDstWidth: Will be equal to fSrcWidth, since this is also
+ // calculated before any sampling is performed.
+ // For this type of subset, the destination width
+ // matches the desired subset of the source.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth/fDstWidth.
+ // fAllocatedWidth: Will be equal to fSwizzleWidth. For this type
+ // of subset, the number of pixels written is the
+ // same as the actual width of the destination.
+ // (2) Frame Subset
+ // The client will decode the entire width of the source into a
+ // subset of destination memory. This subset is specified to
+ // CreateSwizzler in the "frame" parameter. We store subset
+ // information in the following fields.
+ //
+ // fSrcOffset: Will be zero. The starting pixel of the source.
+ // fSrcOffsetUnits: Will only be non-zero if we are sampling,
+ // since sampling implies that we will skip some
+ // pixels. Note that this is measured in bits
+ // or bytes depending on which is natural for
+ // SrcConfig.
+ // fDstOffset: First pixel to write in destination.
+ // fDstOffsetBytes: fDstOffset * fDstBPP.
+ // fSrcWidth: The entire width of the source pixels, before
+ // any sampling is performed.
+ // fDstWidth: The entire width of the destination memory,
+ // before any sampling is performed.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth.
+ // fAllocatedWidth: The actual number of pixels in destination
+ // memory. This is a scaled version of
+ // fDstWidth.
+ //
+ // If we are not subsetting, these fields are more straightforward.
+ // fSrcOffset = fDstOffet = fDstOffsetBytes = 0
+ // fSrcOffsetUnits may be non-zero (we will skip the first few pixels when sampling)
+ // fSrcWidth = fDstWidth = Full original width
+ // fSwizzleWidth = fAllcoatedWidth = Scaled width (if we are sampling)
+ const int fSrcOffset;
+ const int fDstOffset;
+ int fSrcOffsetUnits;
+ int fDstOffsetBytes;
+ const int fSrcWidth;
+ const int fDstWidth;
+ int fSwizzleWidth;
+ int fAllocatedWidth;
+
+ int fSampleX; // Step between X samples
+ const int fSrcBPP; // Bits/bytes per pixel for the SrcConfig
+ // if bitsPerPixel % 8 == 0
+ // fBPP is bytesPerPixel
+ // else
+ // fBPP is bitsPerPixel
+ const int fDstBPP; // Bytes per pixel for the destination color type
+
+ SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP);
+ static std::unique_ptr<SkSwizzler> Make(const SkImageInfo& dstInfo, RowProc fastProc,
+ RowProc proc, const SkPMColor* ctable, int srcBPP, int dstBPP,
+ const SkCodec::Options& options, const SkIRect* frame);
+
+ int onSetSampleX(int) override;
+
+};
+#endif // SkSwizzler_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWbmpCodec.cpp b/gfx/skia/skia/src/codec/SkWbmpCodec.cpp
new file mode 100644
index 0000000000..8190c50322
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWbmpCodec.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkWbmpCodec.h"
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkColorTable.h"
+
+// Each bit represents a pixel, so width is actually a number of bits.
+// A row will always be stored in bytes, so we round width up to the
+// nearest multiple of 8 to get the number of bits actually in the row.
+// We then divide by 8 to convert to bytes.
+static inline size_t get_src_row_bytes(int width) {
+ return SkAlign8(width) >> 3;
+}
+
+static inline bool valid_color_type(const SkImageInfo& dstInfo) {
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kGray_8_SkColorType:
+ case kRGB_565_SkColorType:
+ return true;
+ case kRGBA_F16_SkColorType:
+ return dstInfo.colorSpace();
+ default:
+ return false;
+ }
+}
+
+static bool read_byte(SkStream* stream, uint8_t* data)
+{
+ return stream->read(data, 1) == 1;
+}
+
+// http://en.wikipedia.org/wiki/Variable-length_quantity
+static bool read_mbf(SkStream* stream, uint64_t* value) {
+ uint64_t n = 0;
+ uint8_t data;
+ const uint64_t kLimit = 0xFE00000000000000;
+ SkASSERT(kLimit == ~((~static_cast<uint64_t>(0)) >> 7));
+ do {
+ if (n & kLimit) { // Will overflow on shift by 7.
+ return false;
+ }
+ if (stream->read(&data, 1) != 1) {
+ return false;
+ }
+ n = (n << 7) | (data & 0x7F);
+ } while (data & 0x80);
+ *value = n;
+ return true;
+}
+
+static bool read_header(SkStream* stream, SkISize* size) {
+ {
+ uint8_t data;
+ if (!read_byte(stream, &data) || data != 0) { // unknown type
+ return false;
+ }
+ if (!read_byte(stream, &data) || (data & 0x9F)) { // skip fixed header
+ return false;
+ }
+ }
+
+ uint64_t width, height;
+ if (!read_mbf(stream, &width) || width > 0xFFFF || !width) {
+ return false;
+ }
+ if (!read_mbf(stream, &height) || height > 0xFFFF || !height) {
+ return false;
+ }
+ if (size) {
+ *size = SkISize::Make(SkToS32(width), SkToS32(height));
+ }
+ return true;
+}
+
+bool SkWbmpCodec::onRewind() {
+ return read_header(this->stream(), nullptr);
+}
+
+bool SkWbmpCodec::readRow(uint8_t* row) {
+ return this->stream()->read(row, fSrcRowBytes) == fSrcRowBytes;
+}
+
+SkWbmpCodec::SkWbmpCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream)
+ // Wbmp does not need a colorXform, so choose an arbitrary srcFormat.
+ : INHERITED(std::move(info), skcms_PixelFormat(),
+ std::move(stream))
+ , fSrcRowBytes(get_src_row_bytes(this->dimensions().width()))
+ , fSwizzler(nullptr)
+{}
+
+SkEncodedImageFormat SkWbmpCodec::onGetEncodedFormat() const {
+ return SkEncodedImageFormat::kWBMP;
+}
+
+bool SkWbmpCodec::conversionSupported(const SkImageInfo& dst, bool srcIsOpaque,
+ bool /*needsColorXform*/) {
+ return valid_color_type(dst) && valid_alpha(dst.alphaType(), srcIsOpaque);
+}
+
+SkCodec::Result SkWbmpCodec::onGetPixels(const SkImageInfo& info,
+ void* dst,
+ size_t rowBytes,
+ const Options& options,
+ int* rowsDecoded) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ // Initialize the swizzler
+ std::unique_ptr<SkSwizzler> swizzler = SkSwizzler::Make(this->getEncodedInfo(), nullptr, info,
+ options);
+ SkASSERT(swizzler);
+
+ // Perform the decode
+ SkISize size = info.dimensions();
+ SkAutoTMalloc<uint8_t> src(fSrcRowBytes);
+ void* dstRow = dst;
+ for (int y = 0; y < size.height(); ++y) {
+ if (!this->readRow(src.get())) {
+ *rowsDecoded = y;
+ return kIncompleteInput;
+ }
+ swizzler->swizzle(dstRow, src.get());
+ dstRow = SkTAddOffset<void>(dstRow, rowBytes);
+ }
+ return kSuccess;
+}
+
+bool SkWbmpCodec::IsWbmp(const void* buffer, size_t bytesRead) {
+ SkMemoryStream stream(buffer, bytesRead, false);
+ return read_header(&stream, nullptr);
+}
+
+std::unique_ptr<SkCodec> SkWbmpCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ SkISize size;
+ if (!read_header(stream.get(), &size)) {
+ // This already succeeded in IsWbmp, so this stream was corrupted in/
+ // after rewind.
+ *result = kCouldNotRewind;
+ return nullptr;
+ }
+ *result = kSuccess;
+ auto info = SkEncodedInfo::Make(size.width(), size.height(), SkEncodedInfo::kGray_Color,
+ SkEncodedInfo::kOpaque_Alpha, 1);
+ return std::unique_ptr<SkCodec>(new SkWbmpCodec(std::move(info), std::move(stream)));
+}
+
+int SkWbmpCodec::onGetScanlines(void* dst, int count, size_t dstRowBytes) {
+ void* dstRow = dst;
+ for (int y = 0; y < count; ++y) {
+ if (!this->readRow(fSrcBuffer.get())) {
+ return y;
+ }
+ fSwizzler->swizzle(dstRow, fSrcBuffer.get());
+ dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
+ }
+ return count;
+}
+
+bool SkWbmpCodec::onSkipScanlines(int count) {
+ const size_t bytesToSkip = count * fSrcRowBytes;
+ return this->stream()->skip(bytesToSkip) == bytesToSkip;
+}
+
+SkCodec::Result SkWbmpCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ fSwizzler = SkSwizzler::Make(this->getEncodedInfo(), nullptr, dstInfo, options);
+ SkASSERT(fSwizzler);
+
+ fSrcBuffer.reset(fSrcRowBytes);
+
+ return kSuccess;
+}
diff --git a/gfx/skia/skia/src/codec/SkWbmpCodec.h b/gfx/skia/skia/src/codec/SkWbmpCodec.h
new file mode 100644
index 0000000000..30af5d7a5f
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWbmpCodec.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodec_wbmp_DEFINED
+#define SkCodec_wbmp_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorSpace.h"
+#include "src/codec/SkSwizzler.h"
+
+class SkWbmpCodec final : public SkCodec {
+public:
+ static bool IsWbmp(const void*, size_t);
+
+ /*
+ * Assumes IsWbmp was called and returned true
+ * Creates a wbmp codec
+ * Takes ownership of the stream
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+
+protected:
+ SkEncodedImageFormat onGetEncodedFormat() const override;
+ Result onGetPixels(const SkImageInfo&, void*, size_t,
+ const Options&, int*) override;
+ bool onRewind() override;
+ bool conversionSupported(const SkImageInfo& dst, bool srcIsOpaque,
+ bool needsXform) override;
+ // No need to Xform; all pixels are either black or white.
+ bool usesColorXform() const override { return false; }
+private:
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler || !createIfNecessary);
+ return fSwizzler.get();
+ }
+
+ /*
+ * Read a src row from the encoded stream
+ */
+ bool readRow(uint8_t* row);
+
+ SkWbmpCodec(SkEncodedInfo&&, std::unique_ptr<SkStream>);
+
+ const size_t fSrcRowBytes;
+
+ // Used for scanline decodes:
+ std::unique_ptr<SkSwizzler> fSwizzler;
+ SkAutoTMalloc<uint8_t> fSrcBuffer;
+
+ int onGetScanlines(void* dst, int count, size_t dstRowBytes) override;
+ bool onSkipScanlines(int count) override;
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options) override;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif // SkCodec_wbmp_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWebpCodec.cpp b/gfx/skia/skia/src/codec/SkWebpCodec.cpp
new file mode 100644
index 0000000000..a90d452b68
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpCodec.cpp
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkWebpCodec.h"
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/codec/SkCodecAnimationPriv.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkParseEncodedOrigin.h"
+#include "src/codec/SkSampler.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkStreamPriv.h"
+
+// A WebP decoder on top of (subset of) libwebp
+// For more information on WebP image format, and libwebp library, see:
+// https://code.google.com/speed/webp/
+// http://www.webmproject.org/code/#libwebp-webp-image-library
+// https://chromium.googlesource.com/webm/libwebp
+
+// If moving libwebp out of skia source tree, path for webp headers must be
+// updated accordingly. Here, we enforce using local copy in webp sub-directory.
+#include "webp/decode.h"
+#include "webp/demux.h"
+#include "webp/encode.h"
+
+bool SkWebpCodec::IsWebp(const void* buf, size_t bytesRead) {
+ // WEBP starts with the following:
+ // RIFFXXXXWEBPVP
+ // Where XXXX is unspecified.
+ const char* bytes = static_cast<const char*>(buf);
+ return bytesRead >= 14 && !memcmp(bytes, "RIFF", 4) && !memcmp(&bytes[8], "WEBPVP", 6);
+}
+
+// Parse headers of RIFF container, and check for valid Webp (VP8) content.
+// Returns an SkWebpCodec on success
+std::unique_ptr<SkCodec> SkWebpCodec::MakeFromStream(std::unique_ptr<SkStream> stream,
+ Result* result) {
+ // Webp demux needs a contiguous data buffer.
+ sk_sp<SkData> data = nullptr;
+ if (stream->getMemoryBase()) {
+ // It is safe to make without copy because we'll hold onto the stream.
+ data = SkData::MakeWithoutCopy(stream->getMemoryBase(), stream->getLength());
+ } else {
+ data = SkCopyStreamToData(stream.get());
+
+ // If we are forced to copy the stream to a data, we can go ahead and delete the stream.
+ stream.reset(nullptr);
+ }
+
+ // It's a little strange that the |demux| will outlive |webpData|, though it needs the
+ // pointer in |webpData| to remain valid. This works because the pointer remains valid
+ // until the SkData is freed.
+ WebPData webpData = { data->bytes(), data->size() };
+ WebPDemuxState state;
+ SkAutoTCallVProc<WebPDemuxer, WebPDemuxDelete> demux(WebPDemuxPartial(&webpData, &state));
+ switch (state) {
+ case WEBP_DEMUX_PARSE_ERROR:
+ *result = kInvalidInput;
+ return nullptr;
+ case WEBP_DEMUX_PARSING_HEADER:
+ *result = kIncompleteInput;
+ return nullptr;
+ case WEBP_DEMUX_PARSED_HEADER:
+ case WEBP_DEMUX_DONE:
+ SkASSERT(demux);
+ break;
+ }
+
+ const int width = WebPDemuxGetI(demux, WEBP_FF_CANVAS_WIDTH);
+ const int height = WebPDemuxGetI(demux, WEBP_FF_CANVAS_HEIGHT);
+
+ // Sanity check for image size that's about to be decoded.
+ {
+ const int64_t size = sk_64_mul(width, height);
+ // now check that if we are 4-bytes per pixel, we also don't overflow
+ if (!SkTFitsIn<int32_t>(size) || SkTo<int32_t>(size) > (0x7FFFFFFF >> 2)) {
+ *result = kInvalidInput;
+ return nullptr;
+ }
+ }
+
+ std::unique_ptr<SkEncodedInfo::ICCProfile> profile = nullptr;
+ {
+ WebPChunkIterator chunkIterator;
+ SkAutoTCallVProc<WebPChunkIterator, WebPDemuxReleaseChunkIterator> autoCI(&chunkIterator);
+ if (WebPDemuxGetChunk(demux, "ICCP", 1, &chunkIterator)) {
+ // FIXME: I think this could be MakeWithoutCopy
+ auto chunk = SkData::MakeWithCopy(chunkIterator.chunk.bytes, chunkIterator.chunk.size);
+ profile = SkEncodedInfo::ICCProfile::Make(std::move(chunk));
+ }
+ if (profile && profile->profile()->data_color_space != skcms_Signature_RGB) {
+ profile = nullptr;
+ }
+ }
+
+ SkEncodedOrigin origin = kDefault_SkEncodedOrigin;
+ {
+ WebPChunkIterator chunkIterator;
+ SkAutoTCallVProc<WebPChunkIterator, WebPDemuxReleaseChunkIterator> autoCI(&chunkIterator);
+ if (WebPDemuxGetChunk(demux, "EXIF", 1, &chunkIterator)) {
+ SkParseEncodedOrigin(chunkIterator.chunk.bytes, chunkIterator.chunk.size, &origin);
+ }
+ }
+
+ // Get the first frame and its "features" to determine the color and alpha types.
+ WebPIterator frame;
+ SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoFrame(&frame);
+ if (!WebPDemuxGetFrame(demux, 1, &frame)) {
+ *result = kIncompleteInput;
+ return nullptr;
+ }
+
+ WebPBitstreamFeatures features;
+ switch (WebPGetFeatures(frame.fragment.bytes, frame.fragment.size, &features)) {
+ case VP8_STATUS_OK:
+ break;
+ case VP8_STATUS_SUSPENDED:
+ case VP8_STATUS_NOT_ENOUGH_DATA:
+ *result = kIncompleteInput;
+ return nullptr;
+ default:
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+ const bool hasAlpha = SkToBool(frame.has_alpha)
+ || frame.width != width || frame.height != height;
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ switch (features.format) {
+ case 0:
+ // This indicates a "mixed" format. We could see this for
+ // animated webps (multiple fragments).
+ // We could also guess kYUV here, but I think it makes more
+ // sense to guess kBGRA which is likely closer to the final
+ // output. Otherwise, we might end up converting
+ // BGRA->YUVA->BGRA.
+ // Fallthrough:
+ case 2:
+ // This is the lossless format (BGRA).
+ if (hasAlpha) {
+ color = SkEncodedInfo::kBGRA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ } else {
+ color = SkEncodedInfo::kBGRX_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case 1:
+ // This is the lossy format (YUV).
+ if (hasAlpha) {
+ color = SkEncodedInfo::kYUVA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ } else {
+ color = SkEncodedInfo::kYUV_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ default:
+ *result = kInvalidInput;
+ return nullptr;
+ }
+
+
+ *result = kSuccess;
+ SkEncodedInfo info = SkEncodedInfo::Make(width, height, color, alpha, 8, std::move(profile));
+ return std::unique_ptr<SkCodec>(new SkWebpCodec(std::move(info), std::move(stream),
+ demux.release(), std::move(data), origin));
+}
+
+static WEBP_CSP_MODE webp_decode_mode(SkColorType dstCT, bool premultiply) {
+ switch (dstCT) {
+ case kBGRA_8888_SkColorType:
+ return premultiply ? MODE_bgrA : MODE_BGRA;
+ case kRGBA_8888_SkColorType:
+ return premultiply ? MODE_rgbA : MODE_RGBA;
+ case kRGB_565_SkColorType:
+ return MODE_RGB_565;
+ default:
+ return MODE_LAST;
+ }
+}
+
+SkWebpCodec::Frame* SkWebpCodec::FrameHolder::appendNewFrame(bool hasAlpha) {
+ const int i = this->size();
+ fFrames.emplace_back(i, hasAlpha ? SkEncodedInfo::kUnpremul_Alpha
+ : SkEncodedInfo::kOpaque_Alpha);
+ return &fFrames[i];
+}
+
+bool SkWebpCodec::onGetValidSubset(SkIRect* desiredSubset) const {
+ if (!desiredSubset) {
+ return false;
+ }
+
+ if (!this->bounds().contains(*desiredSubset)) {
+ return false;
+ }
+
+ // As stated below, libwebp snaps to even left and top. Make sure top and left are even, so we
+ // decode this exact subset.
+ // Leave right and bottom unmodified, so we suggest a slightly larger subset than requested.
+ desiredSubset->fLeft = (desiredSubset->fLeft >> 1) << 1;
+ desiredSubset->fTop = (desiredSubset->fTop >> 1) << 1;
+ return true;
+}
+
+int SkWebpCodec::onGetRepetitionCount() {
+ auto flags = WebPDemuxGetI(fDemux.get(), WEBP_FF_FORMAT_FLAGS);
+ if (!(flags & ANIMATION_FLAG)) {
+ return 0;
+ }
+
+ const int repCount = WebPDemuxGetI(fDemux.get(), WEBP_FF_LOOP_COUNT);
+ if (0 == repCount) {
+ return kRepetitionCountInfinite;
+ }
+
+ return repCount;
+}
+
+int SkWebpCodec::onGetFrameCount() {
+ auto flags = WebPDemuxGetI(fDemux.get(), WEBP_FF_FORMAT_FLAGS);
+ if (!(flags & ANIMATION_FLAG)) {
+ return 1;
+ }
+
+ const uint32_t oldFrameCount = fFrameHolder.size();
+ if (fFailed) {
+ return oldFrameCount;
+ }
+
+ const uint32_t frameCount = WebPDemuxGetI(fDemux, WEBP_FF_FRAME_COUNT);
+ if (oldFrameCount == frameCount) {
+ // We have already parsed this.
+ return frameCount;
+ }
+
+ fFrameHolder.reserve(frameCount);
+
+ for (uint32_t i = oldFrameCount; i < frameCount; i++) {
+ WebPIterator iter;
+ SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoIter(&iter);
+
+ if (!WebPDemuxGetFrame(fDemux.get(), i + 1, &iter)) {
+ fFailed = true;
+ break;
+ }
+
+ // libwebp only reports complete frames of an animated image.
+ SkASSERT(iter.complete);
+
+ Frame* frame = fFrameHolder.appendNewFrame(iter.has_alpha);
+ frame->setXYWH(iter.x_offset, iter.y_offset, iter.width, iter.height);
+ frame->setDisposalMethod(iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ?
+ SkCodecAnimation::DisposalMethod::kRestoreBGColor :
+ SkCodecAnimation::DisposalMethod::kKeep);
+ frame->setDuration(iter.duration);
+ if (WEBP_MUX_BLEND != iter.blend_method) {
+ frame->setBlend(SkCodecAnimation::Blend::kBG);
+ }
+ fFrameHolder.setAlphaAndRequiredFrame(frame);
+ }
+
+ return fFrameHolder.size();
+
+}
+
+const SkFrame* SkWebpCodec::FrameHolder::onGetFrame(int i) const {
+ return static_cast<const SkFrame*>(this->frame(i));
+}
+
+const SkWebpCodec::Frame* SkWebpCodec::FrameHolder::frame(int i) const {
+ SkASSERT(i >= 0 && i < this->size());
+ return &fFrames[i];
+}
+
+bool SkWebpCodec::onGetFrameInfo(int i, FrameInfo* frameInfo) const {
+ if (i >= fFrameHolder.size()) {
+ return false;
+ }
+
+ const Frame* frame = fFrameHolder.frame(i);
+ if (!frame) {
+ return false;
+ }
+
+ if (frameInfo) {
+ frameInfo->fRequiredFrame = frame->getRequiredFrame();
+ frameInfo->fDuration = frame->getDuration();
+ // libwebp only reports fully received frames for an
+ // animated image.
+ frameInfo->fFullyReceived = true;
+ frameInfo->fAlphaType = frame->hasAlpha() ? kUnpremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ frameInfo->fDisposalMethod = frame->getDisposalMethod();
+ }
+
+ return true;
+}
+
+static bool is_8888(SkColorType colorType) {
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Requires that the src input be unpremultiplied (or opaque).
+static void blend_line(SkColorType dstCT, void* dst,
+ SkColorType srcCT, const void* src,
+ SkAlphaType dstAt,
+ bool srcHasAlpha,
+ int width) {
+ SkRasterPipeline_MemoryCtx dst_ctx = { (void*)dst, 0 },
+ src_ctx = { (void*)src, 0 };
+
+ SkRasterPipeline_<256> p;
+
+ p.append_load_dst(dstCT, &dst_ctx);
+ if (kUnpremul_SkAlphaType == dstAt) {
+ p.append(SkRasterPipeline::premul_dst);
+ }
+
+ p.append_load(srcCT, &src_ctx);
+ if (srcHasAlpha) {
+ p.append(SkRasterPipeline::premul);
+ }
+
+ p.append(SkRasterPipeline::srcover);
+
+ if (kUnpremul_SkAlphaType == dstAt) {
+ p.append(SkRasterPipeline::unpremul);
+ }
+ p.append_store(dstCT, &dst_ctx);
+
+ p.run(0,0, width,1);
+}
+
+SkCodec::Result SkWebpCodec::onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const Options& options, int* rowsDecodedPtr) {
+ const int index = options.fFrameIndex;
+ SkASSERT(0 == index || index < fFrameHolder.size());
+ SkASSERT(0 == index || !options.fSubset);
+
+ WebPDecoderConfig config;
+ if (0 == WebPInitDecoderConfig(&config)) {
+ // ABI mismatch.
+ // FIXME: New enum for this?
+ return kInvalidInput;
+ }
+
+ // Free any memory associated with the buffer. Must be called last, so we declare it first.
+ SkAutoTCallVProc<WebPDecBuffer, WebPFreeDecBuffer> autoFree(&(config.output));
+
+ WebPIterator frame;
+ SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoFrame(&frame);
+ // If this succeeded in onGetFrameCount(), it should succeed again here.
+ SkAssertResult(WebPDemuxGetFrame(fDemux, index + 1, &frame));
+
+ const bool independent = index == 0 ? true :
+ (fFrameHolder.frame(index)->getRequiredFrame() == kNoFrame);
+ // Get the frameRect. libwebp will have already signaled an error if this is not fully
+ // contained by the canvas.
+ auto frameRect = SkIRect::MakeXYWH(frame.x_offset, frame.y_offset, frame.width, frame.height);
+ SkASSERT(this->bounds().contains(frameRect));
+ const bool frameIsSubset = frameRect != this->bounds();
+ if (independent && frameIsSubset) {
+ SkSampler::Fill(dstInfo, dst, rowBytes, options.fZeroInitialized);
+ }
+
+ int dstX = frameRect.x();
+ int dstY = frameRect.y();
+ int subsetWidth = frameRect.width();
+ int subsetHeight = frameRect.height();
+ if (options.fSubset) {
+ SkIRect subset = *options.fSubset;
+ SkASSERT(this->bounds().contains(subset));
+ SkASSERT(SkIsAlign2(subset.fLeft) && SkIsAlign2(subset.fTop));
+ SkASSERT(this->getValidSubset(&subset) && subset == *options.fSubset);
+
+ if (!SkIRect::Intersects(subset, frameRect)) {
+ return kSuccess;
+ }
+
+ int minXOffset = SkTMin(dstX, subset.x());
+ int minYOffset = SkTMin(dstY, subset.y());
+ dstX -= minXOffset;
+ dstY -= minYOffset;
+ frameRect.offset(-minXOffset, -minYOffset);
+ subset.offset(-minXOffset, -minYOffset);
+
+ // Just like we require that the requested subset x and y offset are even, libwebp
+ // guarantees that the frame x and y offset are even (it's actually impossible to specify
+ // an odd frame offset). So we can still guarantee that the adjusted offsets are even.
+ SkASSERT(SkIsAlign2(subset.fLeft) && SkIsAlign2(subset.fTop));
+
+ SkIRect intersection;
+ SkAssertResult(intersection.intersect(frameRect, subset));
+ subsetWidth = intersection.width();
+ subsetHeight = intersection.height();
+
+ config.options.use_cropping = 1;
+ config.options.crop_left = subset.x();
+ config.options.crop_top = subset.y();
+ config.options.crop_width = subsetWidth;
+ config.options.crop_height = subsetHeight;
+ }
+
+ // Ignore the frame size and offset when determining if scaling is necessary.
+ int scaledWidth = subsetWidth;
+ int scaledHeight = subsetHeight;
+ SkISize srcSize = options.fSubset ? options.fSubset->size() : this->dimensions();
+ if (srcSize != dstInfo.dimensions()) {
+ config.options.use_scaling = 1;
+
+ if (frameIsSubset) {
+ float scaleX = ((float) dstInfo.width()) / srcSize.width();
+ float scaleY = ((float) dstInfo.height()) / srcSize.height();
+
+ // We need to be conservative here and floor rather than round.
+ // Otherwise, we may find ourselves decoding off the end of memory.
+ dstX = scaleX * dstX;
+ scaledWidth = scaleX * scaledWidth;
+ dstY = scaleY * dstY;
+ scaledHeight = scaleY * scaledHeight;
+ if (0 == scaledWidth || 0 == scaledHeight) {
+ return kSuccess;
+ }
+ } else {
+ scaledWidth = dstInfo.width();
+ scaledHeight = dstInfo.height();
+ }
+
+ config.options.scaled_width = scaledWidth;
+ config.options.scaled_height = scaledHeight;
+ }
+
+ const bool blendWithPrevFrame = !independent && frame.blend_method == WEBP_MUX_BLEND
+ && frame.has_alpha;
+
+ SkBitmap webpDst;
+ auto webpInfo = dstInfo;
+ if (!frame.has_alpha) {
+ webpInfo = webpInfo.makeAlphaType(kOpaque_SkAlphaType);
+ }
+ if (this->colorXform()) {
+ // Swizzling between RGBA and BGRA is zero cost in a color transform. So when we have a
+ // color transform, we should decode to whatever is easiest for libwebp, and then let the
+ // color transform swizzle if necessary.
+ // Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost). Lossless webp is
+ // encoded as BGRA. This means decoding to BGRA is either faster or the same cost as RGBA.
+ webpInfo = webpInfo.makeColorType(kBGRA_8888_SkColorType);
+
+ if (webpInfo.alphaType() == kPremul_SkAlphaType) {
+ webpInfo = webpInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+ }
+
+ if ((this->colorXform() && !is_8888(dstInfo.colorType())) || blendWithPrevFrame) {
+ // We will decode the entire image and then perform the color transform. libwebp
+ // does not provide a row-by-row API. This is a shame particularly when we do not want
+ // 8888, since we will need to create another image sized buffer.
+ webpDst.allocPixels(webpInfo);
+ } else {
+ // libwebp can decode directly into the output memory.
+ webpDst.installPixels(webpInfo, dst, rowBytes);
+ }
+
+ config.output.colorspace = webp_decode_mode(webpInfo.colorType(),
+ frame.has_alpha && dstInfo.alphaType() == kPremul_SkAlphaType && !this->colorXform());
+ config.output.is_external_memory = 1;
+
+ config.output.u.RGBA.rgba = reinterpret_cast<uint8_t*>(webpDst.getAddr(dstX, dstY));
+ config.output.u.RGBA.stride = static_cast<int>(webpDst.rowBytes());
+ config.output.u.RGBA.size = webpDst.computeByteSize();
+
+ SkAutoTCallVProc<WebPIDecoder, WebPIDelete> idec(WebPIDecode(nullptr, 0, &config));
+ if (!idec) {
+ return kInvalidInput;
+ }
+
+ int rowsDecoded = 0;
+ SkCodec::Result result;
+ switch (WebPIUpdate(idec, frame.fragment.bytes, frame.fragment.size)) {
+ case VP8_STATUS_OK:
+ rowsDecoded = scaledHeight;
+ result = kSuccess;
+ break;
+ case VP8_STATUS_SUSPENDED:
+ if (!WebPIDecGetRGB(idec, &rowsDecoded, nullptr, nullptr, nullptr)
+ || rowsDecoded <= 0) {
+ return kInvalidInput;
+ }
+ *rowsDecodedPtr = rowsDecoded + dstY;
+ result = kIncompleteInput;
+ break;
+ default:
+ return kInvalidInput;
+ }
+
+ const size_t dstBpp = dstInfo.bytesPerPixel();
+ dst = SkTAddOffset<void>(dst, dstBpp * dstX + rowBytes * dstY);
+ const size_t srcRowBytes = config.output.u.RGBA.stride;
+
+ const auto dstCT = dstInfo.colorType();
+ if (this->colorXform()) {
+ uint32_t* xformSrc = (uint32_t*) config.output.u.RGBA.rgba;
+ SkBitmap tmp;
+ void* xformDst;
+
+ if (blendWithPrevFrame) {
+ // Xform into temporary bitmap big enough for one row.
+ tmp.allocPixels(dstInfo.makeWH(scaledWidth, 1));
+ xformDst = tmp.getPixels();
+ } else {
+ xformDst = dst;
+ }
+
+ for (int y = 0; y < rowsDecoded; y++) {
+ this->applyColorXform(xformDst, xformSrc, scaledWidth);
+ if (blendWithPrevFrame) {
+ blend_line(dstCT, dst, dstCT, xformDst,
+ dstInfo.alphaType(), frame.has_alpha, scaledWidth);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ } else {
+ xformDst = SkTAddOffset<void>(xformDst, rowBytes);
+ }
+ xformSrc = SkTAddOffset<uint32_t>(xformSrc, srcRowBytes);
+ }
+ } else if (blendWithPrevFrame) {
+ const uint8_t* src = config.output.u.RGBA.rgba;
+
+ for (int y = 0; y < rowsDecoded; y++) {
+ blend_line(dstCT, dst, webpDst.colorType(), src,
+ dstInfo.alphaType(), frame.has_alpha, scaledWidth);
+ src = SkTAddOffset<const uint8_t>(src, srcRowBytes);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ }
+ }
+
+ return result;
+}
+
+SkWebpCodec::SkWebpCodec(SkEncodedInfo&& info, std::unique_ptr<SkStream> stream,
+ WebPDemuxer* demux, sk_sp<SkData> data, SkEncodedOrigin origin)
+ : INHERITED(std::move(info), skcms_PixelFormat_BGRA_8888, std::move(stream),
+ origin)
+ , fDemux(demux)
+ , fData(std::move(data))
+ , fFailed(false)
+{
+ const auto& eInfo = this->getEncodedInfo();
+ fFrameHolder.setScreenSize(eInfo.width(), eInfo.height());
+}
diff --git a/gfx/skia/skia/src/codec/SkWebpCodec.h b/gfx/skia/skia/src/codec/SkWebpCodec.h
new file mode 100644
index 0000000000..36ff5357f0
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpCodec.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWebpCodec_DEFINED
+#define SkWebpCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkEncodedImageFormat.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkFrameHolder.h"
+#include "src/codec/SkScalingCodec.h"
+
+#include <vector>
+
+class SkStream;
+extern "C" {
+ struct WebPDemuxer;
+ void WebPDemuxDelete(WebPDemuxer* dmux);
+}
+
+class SkWebpCodec final : public SkScalingCodec {
+public:
+ // Assumes IsWebp was called and returned true.
+ static std::unique_ptr<SkCodec> MakeFromStream(std::unique_ptr<SkStream>, Result*);
+ static bool IsWebp(const void*, size_t);
+protected:
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&, int*) override;
+ SkEncodedImageFormat onGetEncodedFormat() const override { return SkEncodedImageFormat::kWEBP; }
+
+ bool onGetValidSubset(SkIRect* /* desiredSubset */) const override;
+
+ int onGetFrameCount() override;
+ bool onGetFrameInfo(int, FrameInfo*) const override;
+ int onGetRepetitionCount() override;
+
+ const SkFrameHolder* getFrameHolder() const override {
+ return &fFrameHolder;
+ }
+
+private:
+ SkWebpCodec(SkEncodedInfo&&, std::unique_ptr<SkStream>, WebPDemuxer*, sk_sp<SkData>,
+ SkEncodedOrigin);
+
+ SkAutoTCallVProc<WebPDemuxer, WebPDemuxDelete> fDemux;
+
+ // fDemux has a pointer into this data.
+ // This should not be freed until the decode is completed.
+ sk_sp<SkData> fData;
+
+ class Frame : public SkFrame {
+ public:
+ Frame(int i, SkEncodedInfo::Alpha alpha)
+ : INHERITED(i)
+ , fReportedAlpha(alpha)
+ {}
+
+ protected:
+ SkEncodedInfo::Alpha onReportedAlpha() const override {
+ return fReportedAlpha;
+ }
+
+ private:
+ const SkEncodedInfo::Alpha fReportedAlpha;
+
+ typedef SkFrame INHERITED;
+ };
+
+ class FrameHolder : public SkFrameHolder {
+ public:
+ ~FrameHolder() override {}
+ void setScreenSize(int w, int h) {
+ fScreenWidth = w;
+ fScreenHeight = h;
+ }
+ Frame* appendNewFrame(bool hasAlpha);
+ const Frame* frame(int i) const;
+ int size() const {
+ return static_cast<int>(fFrames.size());
+ }
+ void reserve(int size) {
+ fFrames.reserve(size);
+ }
+
+ protected:
+ const SkFrame* onGetFrame(int i) const override;
+
+ private:
+ std::vector<Frame> fFrames;
+ };
+
+ FrameHolder fFrameHolder;
+ // Set to true if WebPDemuxGetFrame fails. This only means
+ // that we will cap the frame count to the frames that
+ // succeed.
+ bool fFailed;
+
+ typedef SkScalingCodec INHERITED;
+};
+#endif // SkWebpCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWuffsCodec.cpp b/gfx/skia/skia/src/codec/SkWuffsCodec.cpp
new file mode 100644
index 0000000000..f7ada1f548
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWuffsCodec.cpp
@@ -0,0 +1,870 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkWuffsCodec.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/private/SkMalloc.h"
+#include "src/codec/SkFrameHolder.h"
+#include "src/codec/SkSampler.h"
+#include "src/codec/SkScalingCodec.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkUtils.h"
+
+#include <limits.h>
+
+// Wuffs ships as a "single file C library" or "header file library" as per
+// https://github.com/nothings/stb/blob/master/docs/stb_howto.txt
+//
+// As we have not #define'd WUFFS_IMPLEMENTATION, the #include here is
+// including a header file, even though that file name ends in ".c".
+#if defined(WUFFS_IMPLEMENTATION)
+#error "SkWuffsCodec should not #define WUFFS_IMPLEMENTATION"
+#endif
+#include "wuffs-v0.2.c"
+#if WUFFS_VERSION_BUILD_METADATA_COMMIT_COUNT < 1942
+#error "Wuffs version is too old. Upgrade to the latest version."
+#endif
+
+#define SK_WUFFS_CODEC_BUFFER_SIZE 4096
+
+static bool fill_buffer(wuffs_base__io_buffer* b, SkStream* s) {
+ b->compact();
+ size_t num_read = s->read(b->data.ptr + b->meta.wi, b->data.len - b->meta.wi);
+ b->meta.wi += num_read;
+ b->meta.closed = s->isAtEnd();
+ return num_read > 0;
+}
+
+static bool seek_buffer(wuffs_base__io_buffer* b, SkStream* s, uint64_t pos) {
+ // Try to re-position the io_buffer's meta.ri read-index first, which is
+ // cheaper than seeking in the backing SkStream.
+ if ((pos >= b->meta.pos) && (pos - b->meta.pos <= b->meta.wi)) {
+ b->meta.ri = pos - b->meta.pos;
+ return true;
+ }
+ // Seek in the backing SkStream.
+ if ((pos > SIZE_MAX) || (!s->seek(pos))) {
+ return false;
+ }
+ b->meta.wi = 0;
+ b->meta.ri = 0;
+ b->meta.pos = pos;
+ b->meta.closed = false;
+ return true;
+}
+
+static SkEncodedInfo::Alpha wuffs_blend_to_skia_alpha(wuffs_base__animation_blend w) {
+ return (w == WUFFS_BASE__ANIMATION_BLEND__OPAQUE) ? SkEncodedInfo::kOpaque_Alpha
+ : SkEncodedInfo::kUnpremul_Alpha;
+}
+
+static SkCodecAnimation::Blend wuffs_blend_to_skia_blend(wuffs_base__animation_blend w) {
+ return (w == WUFFS_BASE__ANIMATION_BLEND__SRC) ? SkCodecAnimation::Blend::kBG
+ : SkCodecAnimation::Blend::kPriorFrame;
+}
+
+static SkCodecAnimation::DisposalMethod wuffs_disposal_to_skia_disposal(
+ wuffs_base__animation_disposal w) {
+ switch (w) {
+ case WUFFS_BASE__ANIMATION_DISPOSAL__RESTORE_BACKGROUND:
+ return SkCodecAnimation::DisposalMethod::kRestoreBGColor;
+ case WUFFS_BASE__ANIMATION_DISPOSAL__RESTORE_PREVIOUS:
+ return SkCodecAnimation::DisposalMethod::kRestorePrevious;
+ default:
+ return SkCodecAnimation::DisposalMethod::kKeep;
+ }
+}
+
+// -------------------------------- Class definitions
+
+class SkWuffsCodec;
+
+class SkWuffsFrame final : public SkFrame {
+public:
+ SkWuffsFrame(wuffs_base__frame_config* fc);
+
+ SkCodec::FrameInfo frameInfo(bool fullyReceived) const;
+ uint64_t ioPosition() const;
+
+ // SkFrame overrides.
+ SkEncodedInfo::Alpha onReportedAlpha() const override;
+
+private:
+ uint64_t fIOPosition;
+ SkEncodedInfo::Alpha fReportedAlpha;
+
+ typedef SkFrame INHERITED;
+};
+
+// SkWuffsFrameHolder is a trivial indirector that forwards its calls onto a
+// SkWuffsCodec. It is a separate class as SkWuffsCodec would otherwise
+// inherit from both SkCodec and SkFrameHolder, and Skia style discourages
+// multiple inheritance (e.g. with its "typedef Foo INHERITED" convention).
+class SkWuffsFrameHolder final : public SkFrameHolder {
+public:
+ SkWuffsFrameHolder() : INHERITED() {}
+
+ void init(SkWuffsCodec* codec, int width, int height);
+
+ // SkFrameHolder overrides.
+ const SkFrame* onGetFrame(int i) const override;
+
+private:
+ const SkWuffsCodec* fCodec;
+
+ typedef SkFrameHolder INHERITED;
+};
+
+class SkWuffsCodec final : public SkScalingCodec {
+public:
+ SkWuffsCodec(SkEncodedInfo&& encodedInfo,
+ std::unique_ptr<SkStream> stream,
+ std::unique_ptr<wuffs_gif__decoder, decltype(&sk_free)> dec,
+ std::unique_ptr<uint8_t, decltype(&sk_free)> pixbuf_ptr,
+ std::unique_ptr<uint8_t, decltype(&sk_free)> workbuf_ptr,
+ size_t workbuf_len,
+ wuffs_base__image_config imgcfg,
+ wuffs_base__pixel_buffer pixbuf,
+ wuffs_base__io_buffer iobuf);
+
+ const SkWuffsFrame* frame(int i) const;
+
+private:
+ // SkCodec overrides.
+ SkEncodedImageFormat onGetEncodedFormat() const override;
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&, int*) override;
+ const SkFrameHolder* getFrameHolder() const override;
+ Result onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* dst,
+ size_t rowBytes,
+ const SkCodec::Options& options) override;
+ Result onIncrementalDecode(int* rowsDecoded) override;
+ int onGetFrameCount() override;
+ bool onGetFrameInfo(int, FrameInfo*) const override;
+ int onGetRepetitionCount() override;
+
+ void readFrames();
+ Result seekFrame(int frameIndex);
+
+ Result resetDecoder();
+ const char* decodeFrameConfig();
+ const char* decodeFrame();
+ void updateNumFullyReceivedFrames();
+
+ SkWuffsFrameHolder fFrameHolder;
+ std::unique_ptr<SkStream> fStream;
+ std::unique_ptr<wuffs_gif__decoder, decltype(&sk_free)> fDecoder;
+ std::unique_ptr<uint8_t, decltype(&sk_free)> fPixbufPtr;
+ std::unique_ptr<uint8_t, decltype(&sk_free)> fWorkbufPtr;
+ size_t fWorkbufLen;
+
+ const uint64_t fFirstFrameIOPosition;
+ wuffs_base__frame_config fFrameConfig;
+ wuffs_base__pixel_buffer fPixelBuffer;
+ wuffs_base__io_buffer fIOBuffer;
+
+ // Incremental decoding state.
+ uint8_t* fIncrDecDst;
+ size_t fIncrDecRowBytes;
+ bool fFirstCallToIncrementalDecode;
+
+ uint64_t fNumFullyReceivedFrames;
+ std::vector<SkWuffsFrame> fFrames;
+ bool fFramesComplete;
+
+ // If calling an fDecoder method returns an incomplete status, then
+ // fDecoder is suspended in a coroutine (i.e. waiting on I/O or halted on a
+ // non-recoverable error). To keep its internal proof-of-safety invariants
+ // consistent, there's only two things you can safely do with a suspended
+ // Wuffs object: resume the coroutine, or reset all state (memset to zero
+ // and start again).
+ //
+ // If fDecoderIsSuspended, and we aren't sure that we're going to resume
+ // the coroutine, then we will need to call this->resetDecoder before
+ // calling other fDecoder methods.
+ bool fDecoderIsSuspended;
+
+ uint8_t fBuffer[SK_WUFFS_CODEC_BUFFER_SIZE];
+
+ typedef SkScalingCodec INHERITED;
+};
+
+// -------------------------------- SkWuffsFrame implementation
+
+SkWuffsFrame::SkWuffsFrame(wuffs_base__frame_config* fc)
+ : INHERITED(fc->index()),
+ fIOPosition(fc->io_position()),
+ fReportedAlpha(wuffs_blend_to_skia_alpha(fc->blend())) {
+ wuffs_base__rect_ie_u32 r = fc->bounds();
+ this->setXYWH(r.min_incl_x, r.min_incl_y, r.width(), r.height());
+ this->setDisposalMethod(wuffs_disposal_to_skia_disposal(fc->disposal()));
+ this->setDuration(fc->duration() / WUFFS_BASE__FLICKS_PER_MILLISECOND);
+ this->setBlend(wuffs_blend_to_skia_blend(fc->blend()));
+}
+
+SkCodec::FrameInfo SkWuffsFrame::frameInfo(bool fullyReceived) const {
+ SkCodec::FrameInfo ret;
+ ret.fRequiredFrame = getRequiredFrame();
+ ret.fDuration = getDuration();
+ ret.fFullyReceived = fullyReceived;
+ ret.fAlphaType = hasAlpha() ? kUnpremul_SkAlphaType : kOpaque_SkAlphaType;
+ ret.fDisposalMethod = getDisposalMethod();
+ return ret;
+}
+
+uint64_t SkWuffsFrame::ioPosition() const {
+ return fIOPosition;
+}
+
+SkEncodedInfo::Alpha SkWuffsFrame::onReportedAlpha() const {
+ return fReportedAlpha;
+}
+
+// -------------------------------- SkWuffsFrameHolder implementation
+
+void SkWuffsFrameHolder::init(SkWuffsCodec* codec, int width, int height) {
+ fCodec = codec;
+ // Initialize SkFrameHolder's (the superclass) fields.
+ fScreenWidth = width;
+ fScreenHeight = height;
+}
+
+const SkFrame* SkWuffsFrameHolder::onGetFrame(int i) const {
+ return fCodec->frame(i);
+};
+
+// -------------------------------- SkWuffsCodec implementation
+
+SkWuffsCodec::SkWuffsCodec(SkEncodedInfo&& encodedInfo,
+ std::unique_ptr<SkStream> stream,
+ std::unique_ptr<wuffs_gif__decoder, decltype(&sk_free)> dec,
+ std::unique_ptr<uint8_t, decltype(&sk_free)> pixbuf_ptr,
+ std::unique_ptr<uint8_t, decltype(&sk_free)> workbuf_ptr,
+ size_t workbuf_len,
+ wuffs_base__image_config imgcfg,
+ wuffs_base__pixel_buffer pixbuf,
+ wuffs_base__io_buffer iobuf)
+ : INHERITED(std::move(encodedInfo),
+ skcms_PixelFormat_RGBA_8888,
+ // Pass a nullptr SkStream to the SkCodec constructor. We
+ // manage the stream ourselves, as the default SkCodec behavior
+ // is too trigger-happy on rewinding the stream.
+ nullptr),
+ fFrameHolder(),
+ fStream(std::move(stream)),
+ fDecoder(std::move(dec)),
+ fPixbufPtr(std::move(pixbuf_ptr)),
+ fWorkbufPtr(std::move(workbuf_ptr)),
+ fWorkbufLen(workbuf_len),
+ fFirstFrameIOPosition(imgcfg.first_frame_io_position()),
+ fFrameConfig(wuffs_base__null_frame_config()),
+ fPixelBuffer(pixbuf),
+ fIOBuffer(wuffs_base__empty_io_buffer()),
+ fIncrDecDst(nullptr),
+ fIncrDecRowBytes(0),
+ fFirstCallToIncrementalDecode(false),
+ fNumFullyReceivedFrames(0),
+ fFramesComplete(false),
+ fDecoderIsSuspended(false) {
+ fFrameHolder.init(this, imgcfg.pixcfg.width(), imgcfg.pixcfg.height());
+
+ // Initialize fIOBuffer's fields, copying any outstanding data from iobuf to
+ // fIOBuffer, as iobuf's backing array may not be valid for the lifetime of
+ // this SkWuffsCodec object, but fIOBuffer's backing array (fBuffer) is.
+ SkASSERT(iobuf.data.len == SK_WUFFS_CODEC_BUFFER_SIZE);
+ memmove(fBuffer, iobuf.data.ptr, iobuf.meta.wi);
+ fIOBuffer.data = wuffs_base__make_slice_u8(fBuffer, SK_WUFFS_CODEC_BUFFER_SIZE);
+ fIOBuffer.meta = iobuf.meta;
+}
+
+const SkWuffsFrame* SkWuffsCodec::frame(int i) const {
+ if ((0 <= i) && (static_cast<size_t>(i) < fFrames.size())) {
+ return &fFrames[i];
+ }
+ return nullptr;
+}
+
+SkEncodedImageFormat SkWuffsCodec::onGetEncodedFormat() const {
+ return SkEncodedImageFormat::kGIF;
+}
+
+SkCodec::Result SkWuffsCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst,
+ size_t rowBytes,
+ const Options& options,
+ int* rowsDecoded) {
+ SkCodec::Result result = this->onStartIncrementalDecode(dstInfo, dst, rowBytes, options);
+ if (result != kSuccess) {
+ return result;
+ }
+ return this->onIncrementalDecode(rowsDecoded);
+}
+
+const SkFrameHolder* SkWuffsCodec::getFrameHolder() const {
+ return &fFrameHolder;
+}
+
+SkCodec::Result SkWuffsCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* dst,
+ size_t rowBytes,
+ const SkCodec::Options& options) {
+ if (!dst) {
+ return SkCodec::kInvalidParameters;
+ }
+ if (options.fSubset) {
+ return SkCodec::kUnimplemented;
+ }
+ if (options.fFrameIndex > 0 && SkColorTypeIsAlwaysOpaque(dstInfo.colorType())) {
+ return SkCodec::kInvalidConversion;
+ }
+ SkCodec::Result result = this->seekFrame(options.fFrameIndex);
+ if (result != SkCodec::kSuccess) {
+ return result;
+ }
+
+ const char* status = this->decodeFrameConfig();
+ if (status == wuffs_base__suspension__short_read) {
+ return SkCodec::kIncompleteInput;
+ } else if (status != nullptr) {
+ SkCodecPrintf("decodeFrameConfig: %s", status);
+ return SkCodec::kErrorInInput;
+ }
+
+ uint32_t src_bits_per_pixel =
+ wuffs_base__pixel_format__bits_per_pixel(fPixelBuffer.pixcfg.pixel_format());
+ if ((src_bits_per_pixel == 0) || (src_bits_per_pixel % 8 != 0)) {
+ return SkCodec::kInternalError;
+ }
+ size_t src_bytes_per_pixel = src_bits_per_pixel / 8;
+
+ // Zero-initialize Wuffs' buffer covering the frame rect.
+ wuffs_base__rect_ie_u32 frame_rect = fFrameConfig.bounds();
+ wuffs_base__table_u8 pixels = fPixelBuffer.plane(0);
+ for (uint32_t y = frame_rect.min_incl_y; y < frame_rect.max_excl_y; y++) {
+ sk_bzero(pixels.ptr + (y * pixels.stride) + (frame_rect.min_incl_x * src_bytes_per_pixel),
+ frame_rect.width() * src_bytes_per_pixel);
+ }
+
+ fIncrDecDst = static_cast<uint8_t*>(dst);
+ fIncrDecRowBytes = rowBytes;
+ fFirstCallToIncrementalDecode = true;
+ return SkCodec::kSuccess;
+}
+
+static SkAlphaType to_alpha_type(bool opaque) {
+ return opaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+}
+
+SkCodec::Result SkWuffsCodec::onIncrementalDecode(int* rowsDecoded) {
+ if (!fIncrDecDst) {
+ return SkCodec::kInternalError;
+ }
+
+ SkCodec::Result result = SkCodec::kSuccess;
+ const char* status = this->decodeFrame();
+ bool independent;
+ SkAlphaType alphaType;
+ const int index = options().fFrameIndex;
+ if (index == 0) {
+ independent = true;
+ alphaType = to_alpha_type(getEncodedInfo().opaque());
+ } else {
+ const SkWuffsFrame* f = this->frame(index);
+ independent = f->getRequiredFrame() == SkCodec::kNoFrame;
+ alphaType = to_alpha_type(f->reportedAlpha() == SkEncodedInfo::kOpaque_Alpha);
+ }
+ if (status != nullptr) {
+ if (status == wuffs_base__suspension__short_read) {
+ result = SkCodec::kIncompleteInput;
+ } else {
+ SkCodecPrintf("decodeFrame: %s", status);
+ result = SkCodec::kErrorInInput;
+ }
+
+ if (!independent) {
+ // For a dependent frame, we cannot blend the partial result, since
+ // that will overwrite the contribution from prior frames.
+ return result;
+ }
+ }
+
+ uint32_t src_bits_per_pixel =
+ wuffs_base__pixel_format__bits_per_pixel(fPixelBuffer.pixcfg.pixel_format());
+ if ((src_bits_per_pixel == 0) || (src_bits_per_pixel % 8 != 0)) {
+ return SkCodec::kInternalError;
+ }
+ size_t src_bytes_per_pixel = src_bits_per_pixel / 8;
+
+ wuffs_base__rect_ie_u32 frame_rect = fFrameConfig.bounds();
+ if (fFirstCallToIncrementalDecode) {
+ if (frame_rect.width() > (SIZE_MAX / src_bytes_per_pixel)) {
+ return SkCodec::kInternalError;
+ }
+
+ auto bounds = SkIRect::MakeLTRB(frame_rect.min_incl_x, frame_rect.min_incl_y,
+ frame_rect.max_excl_x, frame_rect.max_excl_y);
+
+ // If the frame rect does not fill the output, ensure that those pixels are not
+ // left uninitialized.
+ if (independent && (bounds != this->bounds() || result != kSuccess)) {
+ SkSampler::Fill(dstInfo(), fIncrDecDst, fIncrDecRowBytes,
+ options().fZeroInitialized);
+ }
+ fFirstCallToIncrementalDecode = false;
+ } else {
+ // Existing clients intend to only show frames beyond the first if they
+ // are complete (based on FrameInfo::fFullyReceived), since it might
+ // look jarring to draw a partial frame over an existing frame. If they
+ // changed their behavior and expected to continue decoding a partial
+ // frame after the first one, we'll need to update our blending code.
+ // Otherwise, if the frame were interlaced and not independent, the
+ // second pass may have an overlapping dirty_rect with the first,
+ // resulting in blending with the first pass.
+ SkASSERT(index == 0);
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = dstInfo().height();
+ }
+
+ // If the frame's dirty rect is empty, no need to swizzle.
+ wuffs_base__rect_ie_u32 dirty_rect = fDecoder->frame_dirty_rect();
+ if (!dirty_rect.is_empty()) {
+ wuffs_base__table_u8 pixels = fPixelBuffer.plane(0);
+
+ // The Wuffs model is that the dst buffer is the image, not the frame.
+ // The expectation is that you allocate the buffer once, but re-use it
+ // for the N frames, regardless of each frame's top-left co-ordinate.
+ //
+ // To get from the start (in the X-direction) of the image to the start
+ // of the dirty_rect, we adjust s by (dirty_rect.min_incl_x * src_bytes_per_pixel).
+ uint8_t* s = pixels.ptr + (dirty_rect.min_incl_y * pixels.stride)
+ + (dirty_rect.min_incl_x * src_bytes_per_pixel);
+
+ // Currently, this is only used for GIF, which will never have an ICC profile. When it is
+ // used for other formats that might have one, we will need to transform from profiles that
+ // do not have corresponding SkColorSpaces.
+ SkASSERT(!getEncodedInfo().profile());
+
+ auto srcInfo = getInfo().makeWH(dirty_rect.width(), dirty_rect.height())
+ .makeAlphaType(alphaType);
+ SkBitmap src;
+ src.installPixels(srcInfo, s, pixels.stride);
+ SkPaint paint;
+ if (independent) {
+ paint.setBlendMode(SkBlendMode::kSrc);
+ }
+
+ SkDraw draw;
+ draw.fDst.reset(dstInfo(), fIncrDecDst, fIncrDecRowBytes);
+ SkMatrix matrix = SkMatrix::MakeRectToRect(SkRect::Make(this->dimensions()),
+ SkRect::Make(this->dstInfo().dimensions()),
+ SkMatrix::kFill_ScaleToFit);
+ draw.fMatrix = &matrix;
+ SkRasterClip rc(SkIRect::MakeSize(this->dstInfo().dimensions()));
+ draw.fRC = &rc;
+
+ SkMatrix translate = SkMatrix::MakeTrans(dirty_rect.min_incl_x, dirty_rect.min_incl_y);
+ draw.drawBitmap(src, translate, nullptr, paint);
+ }
+
+ if (result == SkCodec::kSuccess) {
+ fIncrDecDst = nullptr;
+ fIncrDecRowBytes = 0;
+ }
+ return result;
+}
+
+int SkWuffsCodec::onGetFrameCount() {
+ // It is valid, in terms of the SkCodec API, to call SkCodec::getFrameCount
+ // while in an incremental decode (after onStartIncrementalDecode returns
+ // and before onIncrementalDecode returns kSuccess).
+ //
+ // We should not advance the SkWuffsCodec' stream while doing so, even
+ // though other SkCodec implementations can return increasing values from
+ // onGetFrameCount when given more data. If we tried to do so, the
+ // subsequent resume of the incremental decode would continue reading from
+ // a different position in the I/O stream, leading to an incorrect error.
+ //
+ // Other SkCodec implementations can move the stream forward during
+ // onGetFrameCount because they assume that the stream is rewindable /
+ // seekable. For example, an alternative GIF implementation may choose to
+ // store, for each frame walked past when merely counting the number of
+ // frames, the I/O position of each of the frame's GIF data blocks. (A GIF
+ // frame's compressed data can have multiple data blocks, each at most 255
+ // bytes in length). Obviously, this can require O(numberOfFrames) extra
+ // memory to store these I/O positions. The constant factor is small, but
+ // it's still O(N), not O(1).
+ //
+ // Wuffs and SkWuffsCodec tries to minimize relying on the rewindable /
+ // seekable assumption. By design, Wuffs per se aims for O(1) memory use
+ // (after any pixel buffers are allocated) instead of O(N), and its I/O
+ // type, wuffs_base__io_buffer, is not necessarily rewindable or seekable.
+ //
+ // The Wuffs API provides a limited, optional form of seeking, to the start
+ // of an animation frame's data, but does not provide arbitrary save and
+ // load of its internal state whilst in the middle of an animation frame.
+ bool incrementalDecodeIsInProgress = fIncrDecDst != nullptr;
+
+ if (!fFramesComplete && !incrementalDecodeIsInProgress) {
+ this->readFrames();
+ this->updateNumFullyReceivedFrames();
+ }
+ return fFrames.size();
+}
+
+bool SkWuffsCodec::onGetFrameInfo(int i, SkCodec::FrameInfo* frameInfo) const {
+ const SkWuffsFrame* f = this->frame(i);
+ if (!f) {
+ return false;
+ }
+ if (frameInfo) {
+ *frameInfo = f->frameInfo(static_cast<uint64_t>(i) < this->fNumFullyReceivedFrames);
+ }
+ return true;
+}
+
+int SkWuffsCodec::onGetRepetitionCount() {
+ // Convert from Wuffs's loop count to Skia's repeat count. Wuffs' uint32_t
+ // number is how many times to play the loop. Skia's int number is how many
+ // times to play the loop *after the first play*. Wuffs and Skia use 0 and
+ // kRepetitionCountInfinite respectively to mean loop forever.
+ uint32_t n = fDecoder->num_animation_loops();
+ if (n == 0) {
+ return SkCodec::kRepetitionCountInfinite;
+ }
+ n--;
+ return n < INT_MAX ? n : INT_MAX;
+}
+
+void SkWuffsCodec::readFrames() {
+ size_t n = fFrames.size();
+ int i = n ? n - 1 : 0;
+ if (this->seekFrame(i) != SkCodec::kSuccess) {
+ return;
+ }
+
+ // Iterate through the frames, converting from Wuffs'
+ // wuffs_base__frame_config type to Skia's SkWuffsFrame type.
+ for (; i < INT_MAX; i++) {
+ const char* status = this->decodeFrameConfig();
+ if (status == nullptr) {
+ // No-op.
+ } else if (status == wuffs_base__warning__end_of_data) {
+ break;
+ } else {
+ return;
+ }
+
+ if (static_cast<size_t>(i) < fFrames.size()) {
+ continue;
+ }
+ fFrames.emplace_back(&fFrameConfig);
+ SkWuffsFrame* f = &fFrames[fFrames.size() - 1];
+ fFrameHolder.setAlphaAndRequiredFrame(f);
+ }
+
+ fFramesComplete = true;
+}
+
+SkCodec::Result SkWuffsCodec::seekFrame(int frameIndex) {
+ if (fDecoderIsSuspended) {
+ SkCodec::Result res = this->resetDecoder();
+ if (res != SkCodec::kSuccess) {
+ return res;
+ }
+ }
+
+ uint64_t pos = 0;
+ if (frameIndex < 0) {
+ return SkCodec::kInternalError;
+ } else if (frameIndex == 0) {
+ pos = fFirstFrameIOPosition;
+ } else if (static_cast<size_t>(frameIndex) < fFrames.size()) {
+ pos = fFrames[frameIndex].ioPosition();
+ } else {
+ return SkCodec::kInternalError;
+ }
+
+ if (!seek_buffer(&fIOBuffer, fStream.get(), pos)) {
+ return SkCodec::kInternalError;
+ }
+ const char* status = fDecoder->restart_frame(frameIndex, fIOBuffer.reader_io_position());
+ if (status != nullptr) {
+ return SkCodec::kInternalError;
+ }
+ return SkCodec::kSuccess;
+}
+
+// An overview of the Wuffs decoding API:
+//
+// An animated image (such as GIF) has an image header and then N frames. The
+// image header gives e.g. the overall image's width and height. Each frame
+// consists of a frame header (e.g. frame rectangle bounds, display duration)
+// and a payload (the pixels).
+//
+// In Wuffs terminology, there is one image config and then N pairs of
+// (frame_config, frame). To decode everything (without knowing N in advance)
+// sequentially:
+// - call wuffs_gif__decoder::decode_image_config
+// - while (true) {
+// - call wuffs_gif__decoder::decode_frame_config
+// - if that returned wuffs_base__warning__end_of_data, break
+// - call wuffs_gif__decoder::decode_frame
+// - }
+//
+// The first argument to each decode_foo method is the destination struct to
+// store the decoded information.
+//
+// For random (instead of sequential) access to an image's frames, call
+// wuffs_gif__decoder::restart_frame to prepare to decode the i'th frame.
+// Essentially, it restores the state to be at the top of the while loop above.
+// The wuffs_base__io_buffer's reader position will also need to be set at the
+// right point in the source data stream. The position for the i'th frame is
+// calculated by the i'th decode_frame_config call. You can only call
+// restart_frame after decode_image_config is called, explicitly or implicitly
+// (see below), as decoding a single frame might require for-all-frames
+// information like the overall image dimensions and the global palette.
+//
+// All of those decode_xxx calls are optional. For example, if
+// decode_image_config is not called, then the first decode_frame_config call
+// will implicitly parse and verify the image header, before parsing the first
+// frame's header. Similarly, you can call only decode_frame N times, without
+// calling decode_image_config or decode_frame_config, if you already know
+// metadata like N and each frame's rectangle bounds by some other means (e.g.
+// this is a first party, statically known image).
+//
+// Specifically, starting with an unknown (but re-windable) GIF image, if you
+// want to just find N (i.e. count the number of frames), you can loop calling
+// only the decode_frame_config method and avoid calling the more expensive
+// decode_frame method. In terms of the underlying GIF image format, this will
+// skip over the LZW-encoded pixel data, avoiding the costly LZW decompression.
+//
+// Those decode_xxx methods are also suspendible. They will return early (with
+// a status code that is_suspendible and therefore isn't is_complete) if there
+// isn't enough source data to complete the operation: an incremental decode.
+// Calling decode_xxx again with additional source data will resume the
+// previous operation, instead of starting a new operation. Calling decode_yyy
+// whilst decode_xxx is suspended will result in an error.
+//
+// Once an error is encountered, whether from invalid source data or from a
+// programming error such as calling decode_yyy while suspended in decode_xxx,
+// all subsequent calls will be no-ops that return an error. To reset the
+// decoder into something that does productive work, memset the entire struct
+// to zero, check the Wuffs version and then, in order to be able to call
+// restart_frame, call decode_image_config. The io_buffer and its associated
+// stream will also need to be rewound.
+
+static SkCodec::Result reset_and_decode_image_config(wuffs_gif__decoder* decoder,
+ wuffs_base__image_config* imgcfg,
+ wuffs_base__io_buffer* b,
+ SkStream* s) {
+ // Calling decoder->initialize will memset it to zero.
+ const char* status = decoder->initialize(sizeof__wuffs_gif__decoder(), WUFFS_VERSION, 0);
+ if (status != nullptr) {
+ SkCodecPrintf("initialize: %s", status);
+ return SkCodec::kInternalError;
+ }
+ while (true) {
+ status = decoder->decode_image_config(imgcfg, b);
+ if (status == nullptr) {
+ break;
+ } else if (status != wuffs_base__suspension__short_read) {
+ SkCodecPrintf("decode_image_config: %s", status);
+ return SkCodec::kErrorInInput;
+ } else if (!fill_buffer(b, s)) {
+ return SkCodec::kIncompleteInput;
+ }
+ }
+
+ // A GIF image's natural color model is indexed color: 1 byte per pixel,
+ // indexing a 256-element palette.
+ //
+ // For Skia, we override that to decode to 4 bytes per pixel, BGRA or RGBA.
+ wuffs_base__pixel_format pixfmt = 0;
+ switch (kN32_SkColorType) {
+ case kBGRA_8888_SkColorType:
+ pixfmt = WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL;
+ break;
+ case kRGBA_8888_SkColorType:
+ pixfmt = WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL;
+ break;
+ default:
+ return SkCodec::kInternalError;
+ }
+ if (imgcfg) {
+ imgcfg->pixcfg.set(pixfmt, WUFFS_BASE__PIXEL_SUBSAMPLING__NONE, imgcfg->pixcfg.width(),
+ imgcfg->pixcfg.height());
+ }
+
+ return SkCodec::kSuccess;
+}
+
+SkCodec::Result SkWuffsCodec::resetDecoder() {
+ if (!fStream->rewind()) {
+ return SkCodec::kInternalError;
+ }
+ fIOBuffer.meta = wuffs_base__empty_io_buffer_meta();
+
+ SkCodec::Result result =
+ reset_and_decode_image_config(fDecoder.get(), nullptr, &fIOBuffer, fStream.get());
+ if (result == SkCodec::kIncompleteInput) {
+ return SkCodec::kInternalError;
+ } else if (result != SkCodec::kSuccess) {
+ return result;
+ }
+
+ fDecoderIsSuspended = false;
+ return SkCodec::kSuccess;
+}
+
+const char* SkWuffsCodec::decodeFrameConfig() {
+ while (true) {
+ const char* status = fDecoder->decode_frame_config(&fFrameConfig, &fIOBuffer);
+ if ((status == wuffs_base__suspension__short_read) &&
+ fill_buffer(&fIOBuffer, fStream.get())) {
+ continue;
+ }
+ fDecoderIsSuspended = !wuffs_base__status__is_complete(status);
+ this->updateNumFullyReceivedFrames();
+ return status;
+ }
+}
+
+const char* SkWuffsCodec::decodeFrame() {
+ while (true) {
+ const char* status =
+ fDecoder->decode_frame(&fPixelBuffer, &fIOBuffer,
+ wuffs_base__make_slice_u8(fWorkbufPtr.get(), fWorkbufLen), NULL);
+ if ((status == wuffs_base__suspension__short_read) &&
+ fill_buffer(&fIOBuffer, fStream.get())) {
+ continue;
+ }
+ fDecoderIsSuspended = !wuffs_base__status__is_complete(status);
+ this->updateNumFullyReceivedFrames();
+ return status;
+ }
+}
+
+void SkWuffsCodec::updateNumFullyReceivedFrames() {
+ // num_decoded_frames's return value, n, can change over time, both up and
+ // down, as we seek back and forth in the underlying stream.
+ // fNumFullyReceivedFrames is the highest n we've seen.
+ uint64_t n = fDecoder->num_decoded_frames();
+ if (fNumFullyReceivedFrames < n) {
+ fNumFullyReceivedFrames = n;
+ }
+}
+
+// -------------------------------- SkWuffsCodec.h functions
+
+bool SkWuffsCodec_IsFormat(const void* buf, size_t bytesRead) {
+ constexpr const char* gif_ptr = "GIF8";
+ constexpr size_t gif_len = 4;
+ return (bytesRead >= gif_len) && (memcmp(buf, gif_ptr, gif_len) == 0);
+}
+
+std::unique_ptr<SkCodec> SkWuffsCodec_MakeFromStream(std::unique_ptr<SkStream> stream,
+ SkCodec::Result* result) {
+ uint8_t buffer[SK_WUFFS_CODEC_BUFFER_SIZE];
+ wuffs_base__io_buffer iobuf =
+ wuffs_base__make_io_buffer(wuffs_base__make_slice_u8(buffer, SK_WUFFS_CODEC_BUFFER_SIZE),
+ wuffs_base__empty_io_buffer_meta());
+ wuffs_base__image_config imgcfg = wuffs_base__null_image_config();
+
+ // Wuffs is primarily a C library, not a C++ one. Furthermore, outside of
+ // the wuffs_base__etc types, the sizeof a file format specific type like
+ // GIF's wuffs_gif__decoder can vary between Wuffs versions. If p is of
+ // type wuffs_gif__decoder*, then the supported API treats p as a pointer
+ // to an opaque type: a private implementation detail. The API is always
+ // "set_foo(p, etc)" and not "p->foo = etc".
+ //
+ // See https://en.wikipedia.org/wiki/Opaque_pointer#C
+ //
+ // Thus, we don't use C++'s new operator (which requires knowing the sizeof
+ // the struct at compile time). Instead, we use sk_malloc_canfail, with
+ // sizeof__wuffs_gif__decoder returning the appropriate value for the
+ // (statically or dynamically) linked version of the Wuffs library.
+ //
+ // As a C (not C++) library, none of the Wuffs types have constructors or
+ // destructors.
+ //
+ // In RAII style, we can still use std::unique_ptr with these pointers, but
+ // we pair the pointer with sk_free instead of C++'s delete.
+ void* decoder_raw = sk_malloc_canfail(sizeof__wuffs_gif__decoder());
+ if (!decoder_raw) {
+ *result = SkCodec::kInternalError;
+ return nullptr;
+ }
+ std::unique_ptr<wuffs_gif__decoder, decltype(&sk_free)> decoder(
+ reinterpret_cast<wuffs_gif__decoder*>(decoder_raw), &sk_free);
+
+ SkCodec::Result reset_result =
+ reset_and_decode_image_config(decoder.get(), &imgcfg, &iobuf, stream.get());
+ if (reset_result != SkCodec::kSuccess) {
+ *result = reset_result;
+ return nullptr;
+ }
+
+ uint32_t width = imgcfg.pixcfg.width();
+ uint32_t height = imgcfg.pixcfg.height();
+ if ((width == 0) || (width > INT_MAX) || (height == 0) || (height > INT_MAX)) {
+ *result = SkCodec::kInvalidInput;
+ return nullptr;
+ }
+
+ uint64_t workbuf_len = decoder->workbuf_len().max_incl;
+ void* workbuf_ptr_raw = nullptr;
+ if (workbuf_len) {
+ workbuf_ptr_raw = workbuf_len <= SIZE_MAX ? sk_malloc_canfail(workbuf_len) : nullptr;
+ if (!workbuf_ptr_raw) {
+ *result = SkCodec::kInternalError;
+ return nullptr;
+ }
+ }
+ std::unique_ptr<uint8_t, decltype(&sk_free)> workbuf_ptr(
+ reinterpret_cast<uint8_t*>(workbuf_ptr_raw), &sk_free);
+
+ uint64_t pixbuf_len = imgcfg.pixcfg.pixbuf_len();
+ void* pixbuf_ptr_raw = pixbuf_len <= SIZE_MAX ? sk_malloc_canfail(pixbuf_len) : nullptr;
+ if (!pixbuf_ptr_raw) {
+ *result = SkCodec::kInternalError;
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t, decltype(&sk_free)> pixbuf_ptr(
+ reinterpret_cast<uint8_t*>(pixbuf_ptr_raw), &sk_free);
+ wuffs_base__pixel_buffer pixbuf = wuffs_base__null_pixel_buffer();
+
+ const char* status = pixbuf.set_from_slice(
+ &imgcfg.pixcfg, wuffs_base__make_slice_u8(pixbuf_ptr.get(), SkToSizeT(pixbuf_len)));
+ if (status != nullptr) {
+ SkCodecPrintf("set_from_slice: %s", status);
+ *result = SkCodec::kInternalError;
+ return nullptr;
+ }
+
+ SkEncodedInfo::Color color =
+ (imgcfg.pixcfg.pixel_format() == WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL)
+ ? SkEncodedInfo::kBGRA_Color
+ : SkEncodedInfo::kRGBA_Color;
+
+ // In Skia's API, the alpha we calculate here and return is only for the
+ // first frame.
+ SkEncodedInfo::Alpha alpha = imgcfg.first_frame_is_opaque() ? SkEncodedInfo::kOpaque_Alpha
+ : SkEncodedInfo::kBinary_Alpha;
+
+ SkEncodedInfo encodedInfo = SkEncodedInfo::Make(width, height, color, alpha, 8);
+
+ *result = SkCodec::kSuccess;
+ return std::unique_ptr<SkCodec>(new SkWuffsCodec(
+ std::move(encodedInfo), std::move(stream), std::move(decoder), std::move(pixbuf_ptr),
+ std::move(workbuf_ptr), workbuf_len, imgcfg, pixbuf, iobuf));
+}
diff --git a/gfx/skia/skia/src/codec/SkWuffsCodec.h b/gfx/skia/skia/src/codec/SkWuffsCodec.h
new file mode 100644
index 0000000000..373c89c131
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWuffsCodec.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWuffsCodec_DEFINED
+#define SkWuffsCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+
+// These functions' types match DecoderProc in SkCodec.cpp.
+bool SkWuffsCodec_IsFormat(const void*, size_t);
+std::unique_ptr<SkCodec> SkWuffsCodec_MakeFromStream(std::unique_ptr<SkStream>, SkCodec::Result*);
+
+#endif // SkWuffsCodec_DEFINED
diff --git a/gfx/skia/skia/src/core/Sk4px.h b/gfx/skia/skia/src/core/Sk4px.h
new file mode 100644
index 0000000000..f2c4ea4e6b
--- /dev/null
+++ b/gfx/skia/skia/src/core/Sk4px.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4px_DEFINED
+#define Sk4px_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkNx.h"
+
+// This file may be included multiple times by .cpp files with different flags, leading
+// to different definitions. Usually that doesn't matter because it's all inlined, but
+// in Debug modes the compilers may not inline everything. So wrap everything in an
+// anonymous namespace to give each includer their own silo of this code (or the linker
+// will probably pick one randomly for us, which is rarely correct).
+namespace { // NOLINT(google-build-namespaces)
+
+// 1, 2 or 4 SkPMColors, generally vectorized.
+class Sk4px : public Sk16b {
+public:
+ Sk4px(const Sk16b& v) : INHERITED(v) {}
+
+ static Sk4px DupPMColor(SkPMColor c) {
+ Sk4u splat(c);
+
+ Sk4px v;
+ memcpy(&v, &splat, 16);
+ return v;
+ }
+
+ Sk4px alphas() const; // ARGB argb XYZW xyzw -> AAAA aaaa XXXX xxxx
+ Sk4px inv() const { return Sk16b(255) - *this; }
+
+ // When loading or storing fewer than 4 SkPMColors, we use the low lanes.
+ static Sk4px Load4(const SkPMColor px[4]) {
+ Sk4px v;
+ memcpy(&v, px, 16);
+ return v;
+ }
+ static Sk4px Load2(const SkPMColor px[2]) {
+ Sk4px v;
+ memcpy(&v, px, 8);
+ return v;
+ }
+ static Sk4px Load1(const SkPMColor px[1]) {
+ Sk4px v;
+ memcpy(&v, px, 4);
+ return v;
+ }
+
+ // Ditto for Alphas... Load2Alphas fills the low two lanes of Sk4px.
+ static Sk4px Load4Alphas(const SkAlpha[4]); // AaXx -> AAAA aaaa XXXX xxxx
+ static Sk4px Load2Alphas(const SkAlpha[2]); // Aa -> AAAA aaaa ???? ????
+
+ void store4(SkPMColor px[4]) const { memcpy(px, this, 16); }
+ void store2(SkPMColor px[2]) const { memcpy(px, this, 8); }
+ void store1(SkPMColor px[1]) const { memcpy(px, this, 4); }
+
+ // 1, 2, or 4 SkPMColors with 16-bit components.
+ // This is most useful as the result of a multiply, e.g. from mulWiden().
+ class Wide : public Sk16h {
+ public:
+ Wide(const Sk16h& v) : Sk16h(v) {}
+
+ // Add, then pack the top byte of each component back down into 4 SkPMColors.
+ Sk4px addNarrowHi(const Sk16h&) const;
+
+ // Rounds, i.e. (x+127) / 255.
+ Sk4px div255() const;
+
+ // These just keep the types as Wide so the user doesn't have to keep casting.
+ Wide operator * (const Wide& o) const { return INHERITED::operator*(o); }
+ Wide operator + (const Wide& o) const { return INHERITED::operator+(o); }
+ Wide operator - (const Wide& o) const { return INHERITED::operator-(o); }
+ Wide operator >> (int bits) const { return INHERITED::operator>>(bits); }
+ Wide operator << (int bits) const { return INHERITED::operator<<(bits); }
+
+ private:
+ typedef Sk16h INHERITED;
+ };
+
+ Wide widen() const; // Widen 8-bit values to low 8-bits of 16-bit lanes.
+ Wide mulWiden(const Sk16b&) const; // 8-bit x 8-bit -> 16-bit components.
+
+ // The only 8-bit multiply we use is 8-bit x 8-bit -> 16-bit. Might as well make it pithy.
+ Wide operator * (const Sk4px& o) const { return this->mulWiden(o); }
+
+ // These just keep the types as Sk4px so the user doesn't have to keep casting.
+ Sk4px operator + (const Sk4px& o) const { return INHERITED::operator+(o); }
+ Sk4px operator - (const Sk4px& o) const { return INHERITED::operator-(o); }
+ Sk4px operator < (const Sk4px& o) const { return INHERITED::operator<(o); }
+ Sk4px thenElse(const Sk4px& t, const Sk4px& e) const { return INHERITED::thenElse(t,e); }
+
+ // Generally faster than (*this * o).div255().
+ // May be incorrect by +-1, but is always exactly correct when *this or o is 0 or 255.
+ Sk4px approxMulDiv255(const Sk16b& o) const {
+ // (x*y + x) / 256 meets these criteria. (As of course does (x*y + y) / 256 by symmetry.)
+ // FYI: (x*y + 255) / 256 also meets these criteria. In my brief testing, it was slower.
+ return this->widen().addNarrowHi(*this * o);
+ }
+
+ // A generic driver that maps fn over a src array into a dst array.
+ // fn should take an Sk4px (4 src pixels) and return an Sk4px (4 dst pixels).
+ template <typename Fn>
+ static void MapSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ // This looks a bit odd, but it helps loop-invariant hoisting across different calls to fn.
+ // Basically, we need to make sure we keep things inside a single loop.
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(src+0)),
+ dst4 = fn(Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4).
+ template <typename Fn>
+ static void MapDstSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, alpha4).
+ template <typename Fn>
+ static void MapDstAlpha(int n, SkPMColor* dst, const SkAlpha* a, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4Alphas(a)).store4(dst);
+ dst += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2Alphas(a)).store2(dst);
+ dst += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Sk16b(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4, alpha4).
+ template <typename Fn>
+ static void MapDstSrcAlpha(int n, SkPMColor* dst, const SkPMColor* src, const SkAlpha* a,
+ const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src), Load4Alphas(a)).store4(dst);
+ dst += 4; src += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src), Load2Alphas(a)).store2(dst);
+ dst += 2; src += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src), Sk16b(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+private:
+ Sk4px() = default;
+
+ typedef Sk16b INHERITED;
+};
+
+} // namespace
+
+#ifdef SKNX_NO_SIMD
+ #include "src/opts/Sk4px_none.h"
+#else
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "src/opts/Sk4px_SSE2.h"
+ #elif defined(SK_ARM_HAS_NEON)
+ #include "src/opts/Sk4px_NEON.h"
+ #else
+ #include "src/opts/Sk4px_none.h"
+ #endif
+#endif
+
+#endif//Sk4px_DEFINED
diff --git a/gfx/skia/skia/src/core/SkAAClip.cpp b/gfx/skia/skia/src/core/SkAAClip.cpp
new file mode 100644
index 0000000000..2f39a24457
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.cpp
@@ -0,0 +1,2162 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkAAClip.h"
+
+#include "include/core/SkPath.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScan.h"
+#include "src/utils/SkUTF.h"
+#include <atomic>
+#include <utility>
+
+class AutoAAClipValidate {
+public:
+ AutoAAClipValidate(const SkAAClip& clip) : fClip(clip) {
+ fClip.validate();
+ }
+ ~AutoAAClipValidate() {
+ fClip.validate();
+ }
+private:
+ const SkAAClip& fClip;
+};
+
+#ifdef SK_DEBUG
+ #define AUTO_AACLIP_VALIDATE(clip) AutoAAClipValidate acv(clip)
+#else
+ #define AUTO_AACLIP_VALIDATE(clip)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxInt32 0x7FFFFFFF
+
+#ifdef SK_DEBUG
+static inline bool x_in_rect(int x, const SkIRect& rect) {
+ return (unsigned)(x - rect.fLeft) < (unsigned)rect.width();
+}
+#endif
+
+static inline bool y_in_rect(int y, const SkIRect& rect) {
+ return (unsigned)(y - rect.fTop) < (unsigned)rect.height();
+}
+
+/*
+ * Data runs are packed [count, alpha]
+ */
+
+struct SkAAClip::YOffset {
+ int32_t fY;
+ uint32_t fOffset;
+};
+
+struct SkAAClip::RunHead {
+ std::atomic<int32_t> fRefCnt;
+ int32_t fRowCount;
+ size_t fDataSize;
+
+ YOffset* yoffsets() {
+ return (YOffset*)((char*)this + sizeof(RunHead));
+ }
+ const YOffset* yoffsets() const {
+ return (const YOffset*)((const char*)this + sizeof(RunHead));
+ }
+ uint8_t* data() {
+ return (uint8_t*)(this->yoffsets() + fRowCount);
+ }
+ const uint8_t* data() const {
+ return (const uint8_t*)(this->yoffsets() + fRowCount);
+ }
+
+ static RunHead* Alloc(int rowCount, size_t dataSize) {
+ size_t size = sizeof(RunHead) + rowCount * sizeof(YOffset) + dataSize;
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt.store(1);
+ head->fRowCount = rowCount;
+ head->fDataSize = dataSize;
+ return head;
+ }
+
+ static int ComputeRowSizeForWidth(int width) {
+ // 2 bytes per segment, where each segment can store up to 255 for count
+ int segments = 0;
+ while (width > 0) {
+ segments += 1;
+ int n = SkMin32(width, 255);
+ width -= n;
+ }
+ return segments * 2; // each segment is row[0] + row[1] (n + alpha)
+ }
+
+ static RunHead* AllocRect(const SkIRect& bounds) {
+ SkASSERT(!bounds.isEmpty());
+ int width = bounds.width();
+ size_t rowSize = ComputeRowSizeForWidth(width);
+ RunHead* head = RunHead::Alloc(1, rowSize);
+ YOffset* yoff = head->yoffsets();
+ yoff->fY = bounds.height() - 1;
+ yoff->fOffset = 0;
+ uint8_t* row = head->data();
+ while (width > 0) {
+ int n = SkMin32(width, 255);
+ row[0] = n;
+ row[1] = 0xFF;
+ width -= n;
+ row += 2;
+ }
+ return head;
+ }
+};
+
+class SkAAClip::Iter {
+public:
+ Iter(const SkAAClip&);
+
+ bool done() const { return fDone; }
+ int top() const { return fTop; }
+ int bottom() const { return fBottom; }
+ const uint8_t* data() const { return fData; }
+ void next();
+
+private:
+ const YOffset* fCurrYOff;
+ const YOffset* fStopYOff;
+ const uint8_t* fData;
+
+ int fTop, fBottom;
+ bool fDone;
+};
+
+SkAAClip::Iter::Iter(const SkAAClip& clip) {
+ if (clip.isEmpty()) {
+ fDone = true;
+ fTop = fBottom = clip.fBounds.fBottom;
+ fData = nullptr;
+ fCurrYOff = nullptr;
+ fStopYOff = nullptr;
+ return;
+ }
+
+ const RunHead* head = clip.fRunHead;
+ fCurrYOff = head->yoffsets();
+ fStopYOff = fCurrYOff + head->fRowCount;
+ fData = head->data() + fCurrYOff->fOffset;
+
+ // setup first value
+ fTop = clip.fBounds.fTop;
+ fBottom = clip.fBounds.fTop + fCurrYOff->fY + 1;
+ fDone = false;
+}
+
+void SkAAClip::Iter::next() {
+ if (!fDone) {
+ const YOffset* prev = fCurrYOff;
+ const YOffset* curr = prev + 1;
+ SkASSERT(curr <= fStopYOff);
+
+ fTop = fBottom;
+ if (curr >= fStopYOff) {
+ fDone = true;
+ fBottom = kMaxInt32;
+ fData = nullptr;
+ } else {
+ fBottom += curr->fY - prev->fY;
+ fData += curr->fOffset - prev->fOffset;
+ fCurrYOff = curr;
+ }
+ }
+}
+
+#ifdef SK_DEBUG
+// assert we're exactly width-wide, and then return the number of bytes used
+static size_t compute_row_length(const uint8_t row[], int width) {
+ const uint8_t* origRow = row;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+ return row - origRow;
+}
+
+void SkAAClip::validate() const {
+ if (nullptr == fRunHead) {
+ SkASSERT(fBounds.isEmpty());
+ return;
+ }
+ SkASSERT(!fBounds.isEmpty());
+
+ const RunHead* head = fRunHead;
+ SkASSERT(head->fRefCnt.load() > 0);
+ SkASSERT(head->fRowCount > 0);
+
+ const YOffset* yoff = head->yoffsets();
+ const YOffset* ystop = yoff + head->fRowCount;
+ const int lastY = fBounds.height() - 1;
+
+ // Y and offset must be monotonic
+ int prevY = -1;
+ int32_t prevOffset = -1;
+ while (yoff < ystop) {
+ SkASSERT(prevY < yoff->fY);
+ SkASSERT(yoff->fY <= lastY);
+ prevY = yoff->fY;
+ SkASSERT(prevOffset < (int32_t)yoff->fOffset);
+ prevOffset = yoff->fOffset;
+ const uint8_t* row = head->data() + yoff->fOffset;
+ size_t rowLength = compute_row_length(row, fBounds.width());
+ SkASSERT(yoff->fOffset + rowLength <= head->fDataSize);
+ yoff += 1;
+ }
+ // check the last entry;
+ --yoff;
+ SkASSERT(yoff->fY == lastY);
+}
+
+static void dump_one_row(const uint8_t* SK_RESTRICT row,
+ int width, int leading_num) {
+ if (leading_num) {
+ SkDebugf( "%03d ", leading_num );
+ }
+ while (width > 0) {
+ int n = row[0];
+ int val = row[1];
+ char out = '.';
+ if (val == 0xff) {
+ out = '*';
+ } else if (val > 0) {
+ out = '+';
+ }
+ for (int i = 0 ; i < n ; i++) {
+ SkDebugf( "%c", out );
+ }
+ row += 2;
+ width -= n;
+ }
+ SkDebugf( "\n" );
+}
+
+void SkAAClip::debug(bool compress_y) const {
+ Iter iter(*this);
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ if (compress_y) {
+ dump_one_row(iter.data(), width, iter.bottom() - iter.top() + 1);
+ } else {
+ do {
+ dump_one_row(iter.data(), width, 0);
+ } while (++y < iter.bottom());
+ }
+ iter.next();
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Count the number of zeros on the left and right edges of the passed in
+// RLE row. If 'row' is all zeros return 'width' in both variables.
+static void count_left_right_zeros(const uint8_t* row, int width,
+ int* leftZ, int* riteZ) {
+ int zeros = 0;
+ do {
+ if (row[1]) {
+ break;
+ }
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ zeros += n;
+ row += 2;
+ width -= n;
+ } while (width > 0);
+ *leftZ = zeros;
+
+ if (0 == width) {
+ // this line is completely empty return 'width' in both variables
+ *riteZ = *leftZ;
+ return;
+ }
+
+ zeros = 0;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (0 == row[1]) {
+ zeros += n;
+ } else {
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+}
+
+// modify row in place, trimming off (zeros) from the left and right sides.
+// return the number of bytes that were completely eliminated from the left
+static int trim_row_left_right(uint8_t* row, int width, int leftZ, int riteZ) {
+ int trim = 0;
+ while (leftZ > 0) {
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ if (n > leftZ) {
+ row[-2] = n - leftZ;
+ break;
+ }
+ trim += 2;
+ leftZ -= n;
+ SkASSERT(leftZ >= 0);
+ }
+
+ if (riteZ) {
+ // walk row to the end, and then we'll back up to trim riteZ
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ }
+ // now skip whole runs of zeros
+ do {
+ row -= 2;
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (n > riteZ) {
+ row[0] = n - riteZ;
+ break;
+ }
+ riteZ -= n;
+ SkASSERT(riteZ >= 0);
+ } while (riteZ > 0);
+ }
+
+ return trim;
+}
+
+bool SkAAClip::trimLeftRight() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ uint8_t* base = head->data();
+
+ // After this loop, 'leftZeros' & 'rightZeros' will contain the minimum
+ // number of zeros on the left and right of the clip. This information
+ // can be used to shrink the bounding box.
+ int leftZeros = width;
+ int riteZeros = width;
+ while (yoff < stop) {
+ int L, R;
+ count_left_right_zeros(base + yoff->fOffset, width, &L, &R);
+ SkASSERT(L + R < width || (L == width && R == width));
+ if (L < leftZeros) {
+ leftZeros = L;
+ }
+ if (R < riteZeros) {
+ riteZeros = R;
+ }
+ if (0 == (leftZeros | riteZeros)) {
+ // no trimming to do
+ return true;
+ }
+ yoff += 1;
+ }
+
+ SkASSERT(leftZeros || riteZeros);
+ if (width == leftZeros) {
+ SkASSERT(width == riteZeros);
+ return this->setEmpty();
+ }
+
+ this->validate();
+
+ fBounds.fLeft += leftZeros;
+ fBounds.fRight -= riteZeros;
+ SkASSERT(!fBounds.isEmpty());
+
+ // For now we don't realloc the storage (for time), we just shrink in place
+ // This means we don't have to do any memmoves either, since we can just
+ // play tricks with the yoff->fOffset for each row
+ yoff = head->yoffsets();
+ while (yoff < stop) {
+ uint8_t* row = base + yoff->fOffset;
+ SkDEBUGCODE((void)compute_row_length(row, width);)
+ yoff->fOffset += trim_row_left_right(row, width, leftZeros, riteZeros);
+ SkDEBUGCODE((void)compute_row_length(base + yoff->fOffset, width - leftZeros - riteZeros);)
+ yoff += 1;
+ }
+ return true;
+}
+
+static bool row_is_all_zeros(const uint8_t* row, int width) {
+ SkASSERT(width > 0);
+ do {
+ if (row[1]) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ SkASSERT(0 == width);
+ return true;
+}
+
+bool SkAAClip::trimTopBottom() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ this->validate();
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ const uint8_t* base = head->data();
+
+ // Look to trim away empty rows from the top.
+ //
+ int skip = 0;
+ while (yoff < stop) {
+ const uint8_t* data = base + yoff->fOffset;
+ if (!row_is_all_zeros(data, width)) {
+ break;
+ }
+ skip += 1;
+ yoff += 1;
+ }
+ SkASSERT(skip <= head->fRowCount);
+ if (skip == head->fRowCount) {
+ return this->setEmpty();
+ }
+ if (skip > 0) {
+ // adjust fRowCount and fBounds.fTop, and slide all the data up
+ // as we remove [skip] number of YOffset entries
+ yoff = head->yoffsets();
+ int dy = yoff[skip - 1].fY + 1;
+ for (int i = skip; i < head->fRowCount; ++i) {
+ SkASSERT(yoff[i].fY >= dy);
+ yoff[i].fY -= dy;
+ }
+ YOffset* dst = head->yoffsets();
+ size_t size = head->fRowCount * sizeof(YOffset) + head->fDataSize;
+ memmove(dst, dst + skip, size - skip * sizeof(YOffset));
+
+ fBounds.fTop += dy;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+
+ this->validate();
+ // need to reset this after the memmove
+ base = head->data();
+ }
+
+ // Look to trim away empty rows from the bottom.
+ // We know that we have at least one non-zero row, so we can just walk
+ // backwards without checking for running past the start.
+ //
+ stop = yoff = head->yoffsets() + head->fRowCount;
+ do {
+ yoff -= 1;
+ } while (row_is_all_zeros(base + yoff->fOffset, width));
+ skip = SkToInt(stop - yoff - 1);
+ SkASSERT(skip >= 0 && skip < head->fRowCount);
+ if (skip > 0) {
+ // removing from the bottom is easier than from the top, as we don't
+ // have to adjust any of the Y values, we just have to trim the array
+ memmove(stop - skip, stop, head->fDataSize);
+
+ fBounds.fBottom = fBounds.fTop + yoff->fY + 1;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+ }
+ this->validate();
+
+ return true;
+}
+
+// can't validate before we're done, since trimming is part of the process of
+// making us valid after the Builder. Since we build from top to bottom, its
+// possible our fBounds.fBottom is bigger than our last scanline of data, so
+// we trim fBounds.fBottom back up.
+//
+// TODO: check for duplicates in X and Y to further compress our data
+//
+bool SkAAClip::trimBounds() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ const YOffset* yoff = head->yoffsets();
+
+ SkASSERT(head->fRowCount > 0);
+ const YOffset& lastY = yoff[head->fRowCount - 1];
+ SkASSERT(lastY.fY + 1 <= fBounds.height());
+ fBounds.fBottom = fBounds.fTop + lastY.fY + 1;
+ SkASSERT(lastY.fY + 1 == fBounds.height());
+ SkASSERT(!fBounds.isEmpty());
+
+ return this->trimTopBottom() && this->trimLeftRight();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkAAClip::freeRuns() {
+ if (fRunHead) {
+ SkASSERT(fRunHead->fRefCnt.load() >= 1);
+ if (1 == fRunHead->fRefCnt--) {
+ sk_free(fRunHead);
+ }
+ }
+}
+
+SkAAClip::SkAAClip() {
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+}
+
+SkAAClip::SkAAClip(const SkAAClip& src) {
+ SkDEBUGCODE(fBounds.setEmpty();) // need this for validate
+ fRunHead = nullptr;
+ *this = src;
+}
+
+SkAAClip::~SkAAClip() {
+ this->freeRuns();
+}
+
+SkAAClip& SkAAClip::operator=(const SkAAClip& src) {
+ AUTO_AACLIP_VALIDATE(*this);
+ src.validate();
+
+ if (this != &src) {
+ this->freeRuns();
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (fRunHead) {
+ fRunHead->fRefCnt++;
+ }
+ }
+ return *this;
+}
+
+bool operator==(const SkAAClip& a, const SkAAClip& b) {
+ a.validate();
+ b.validate();
+
+ if (&a == &b) {
+ return true;
+ }
+ if (a.fBounds != b.fBounds) {
+ return false;
+ }
+
+ const SkAAClip::RunHead* ah = a.fRunHead;
+ const SkAAClip::RunHead* bh = b.fRunHead;
+
+ // this catches empties and rects being equal
+ if (ah == bh) {
+ return true;
+ }
+
+ // now we insist that both are complex (but different ptrs)
+ if (!a.fRunHead || !b.fRunHead) {
+ return false;
+ }
+
+ return ah->fRowCount == bh->fRowCount &&
+ ah->fDataSize == bh->fDataSize &&
+ !memcmp(ah->data(), bh->data(), ah->fDataSize);
+}
+
+void SkAAClip::swap(SkAAClip& other) {
+ AUTO_AACLIP_VALIDATE(*this);
+ other.validate();
+
+ using std::swap;
+ swap(fBounds, other.fBounds);
+ swap(fRunHead, other.fRunHead);
+}
+
+bool SkAAClip::set(const SkAAClip& src) {
+ *this = src;
+ return !this->isEmpty();
+}
+
+bool SkAAClip::setEmpty() {
+ this->freeRuns();
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+ return false;
+}
+
+bool SkAAClip::setRect(const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+#if 0
+ SkRect r;
+ r.set(bounds);
+ SkPath path;
+ path.addRect(r);
+ return this->setPath(path);
+#else
+ this->freeRuns();
+ fBounds = bounds;
+ fRunHead = RunHead::AllocRect(bounds);
+ SkASSERT(!this->isEmpty());
+ return true;
+#endif
+}
+
+bool SkAAClip::isRect() const {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ if (head->fRowCount != 1) {
+ return false;
+ }
+ const YOffset* yoff = head->yoffsets();
+ if (yoff->fY != fBounds.fBottom - 1) {
+ return false;
+ }
+
+ const uint8_t* row = head->data() + yoff->fOffset;
+ int width = fBounds.width();
+ do {
+ if (row[1] != 0xFF) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ return true;
+}
+
+bool SkAAClip::setRect(const SkRect& r, bool doAA) {
+ if (r.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ // TODO: special case this
+
+ SkPath path;
+ path.addRect(r);
+ return this->setPath(path, nullptr, doAA);
+}
+
+static void append_run(SkTDArray<uint8_t>& array, uint8_t value, int count) {
+ SkASSERT(count >= 0);
+ while (count > 0) {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* data = array.append(2);
+ data[0] = n;
+ data[1] = value;
+ count -= n;
+ }
+}
+
+bool SkAAClip::setRegion(const SkRegion& rgn) {
+ if (rgn.isEmpty()) {
+ return this->setEmpty();
+ }
+ if (rgn.isRect()) {
+ return this->setRect(rgn.getBounds());
+ }
+
+#if 0
+ SkAAClip clip;
+ SkRegion::Iterator iter(rgn);
+ for (; !iter.done(); iter.next()) {
+ clip.op(iter.rect(), SkRegion::kUnion_Op);
+ }
+ this->swap(clip);
+ return !this->isEmpty();
+#else
+ const SkIRect& bounds = rgn.getBounds();
+ const int offsetX = bounds.fLeft;
+ const int offsetY = bounds.fTop;
+
+ SkTDArray<YOffset> yArray;
+ SkTDArray<uint8_t> xArray;
+
+ yArray.setReserve(SkMin32(bounds.height(), 1024));
+ xArray.setReserve(SkMin32(bounds.width(), 512) * 128);
+
+ SkRegion::Iterator iter(rgn);
+ int prevRight = 0;
+ int prevBot = 0;
+ YOffset* currY = nullptr;
+
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ int bot = r.fBottom - offsetY;
+ SkASSERT(bot >= prevBot);
+ if (bot > prevBot) {
+ if (currY) {
+ // flush current row
+ append_run(xArray, 0, bounds.width() - prevRight);
+ }
+ // did we introduce an empty-gap from the prev row?
+ int top = r.fTop - offsetY;
+ if (top > prevBot) {
+ currY = yArray.append();
+ currY->fY = top - 1;
+ currY->fOffset = xArray.count();
+ append_run(xArray, 0, bounds.width());
+ }
+ // create a new record for this Y value
+ currY = yArray.append();
+ currY->fY = bot - 1;
+ currY->fOffset = xArray.count();
+ prevRight = 0;
+ prevBot = bot;
+ }
+
+ int x = r.fLeft - offsetX;
+ append_run(xArray, 0, x - prevRight);
+
+ int w = r.fRight - r.fLeft;
+ append_run(xArray, 0xFF, w);
+ prevRight = x + w;
+ SkASSERT(prevRight <= bounds.width());
+ }
+ // flush last row
+ append_run(xArray, 0, bounds.width() - prevRight);
+
+ // now pack everything into a RunHead
+ RunHead* head = RunHead::Alloc(yArray.count(), xArray.bytes());
+ memcpy(head->yoffsets(), yArray.begin(), yArray.bytes());
+ memcpy(head->data(), xArray.begin(), xArray.bytes());
+
+ this->setEmpty();
+ fBounds = bounds;
+ fRunHead = head;
+ this->validate();
+ return true;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const uint8_t* SkAAClip::findRow(int y, int* lastYForRow) const {
+ SkASSERT(fRunHead);
+
+ if (!y_in_rect(y, fBounds)) {
+ return nullptr;
+ }
+ y -= fBounds.y(); // our yoffs values are relative to the top
+
+ const YOffset* yoff = fRunHead->yoffsets();
+ while (yoff->fY < y) {
+ yoff += 1;
+ SkASSERT(yoff - fRunHead->yoffsets() < fRunHead->fRowCount);
+ }
+
+ if (lastYForRow) {
+ *lastYForRow = fBounds.y() + yoff->fY;
+ }
+ return fRunHead->data() + yoff->fOffset;
+}
+
+const uint8_t* SkAAClip::findX(const uint8_t data[], int x, int* initialCount) const {
+ SkASSERT(x_in_rect(x, fBounds));
+ x -= fBounds.x();
+
+ // first skip up to X
+ for (;;) {
+ int n = data[0];
+ if (x < n) {
+ if (initialCount) {
+ *initialCount = n - x;
+ }
+ break;
+ }
+ data += 2;
+ x -= n;
+ }
+ return data;
+}
+
+bool SkAAClip::quickContains(int left, int top, int right, int bottom) const {
+ if (this->isEmpty()) {
+ return false;
+ }
+ if (!fBounds.contains(SkIRect{left, top, right, bottom})) {
+ return false;
+ }
+#if 0
+ if (this->isRect()) {
+ return true;
+ }
+#endif
+
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = this->findRow(top, &lastY);
+ if (lastY < bottom) {
+ return false;
+ }
+ // now just need to check in X
+ int count;
+ row = this->findX(row, left, &count);
+#if 0
+ return count >= (right - left) && 0xFF == row[1];
+#else
+ int rectWidth = right - left;
+ while (0xFF == row[1]) {
+ if (count >= rectWidth) {
+ return true;
+ }
+ rectWidth -= count;
+ row += 2;
+ count = row[0];
+ }
+ return false;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClip::Builder {
+ SkIRect fBounds;
+ struct Row {
+ int fY;
+ int fWidth;
+ SkTDArray<uint8_t>* fData;
+ };
+ SkTDArray<Row> fRows;
+ Row* fCurrRow;
+ int fPrevY;
+ int fWidth;
+ int fMinY;
+
+public:
+ Builder(const SkIRect& bounds) : fBounds(bounds) {
+ fPrevY = -1;
+ fWidth = bounds.width();
+ fCurrRow = nullptr;
+ fMinY = bounds.fTop;
+ }
+
+ ~Builder() {
+ Row* row = fRows.begin();
+ Row* stop = fRows.end();
+ while (row < stop) {
+ delete row->fData;
+ row += 1;
+ }
+ }
+
+ const SkIRect& getBounds() const { return fBounds; }
+
+ void addRun(int x, int y, U8CPU alpha, int count) {
+ SkASSERT(count > 0);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fBounds.contains(x + count - 1, y));
+
+ x -= fBounds.left();
+ y -= fBounds.top();
+
+ Row* row = fCurrRow;
+ if (y != fPrevY) {
+ SkASSERT(y > fPrevY);
+ fPrevY = y;
+ row = this->flushRow(true);
+ row->fY = y;
+ row->fWidth = 0;
+ SkASSERT(row->fData);
+ SkASSERT(0 == row->fData->count());
+ fCurrRow = row;
+ }
+
+ SkASSERT(row->fWidth <= x);
+ SkASSERT(row->fWidth < fBounds.width());
+
+ SkTDArray<uint8_t>& data = *row->fData;
+
+ int gap = x - row->fWidth;
+ if (gap) {
+ AppendRun(data, 0, gap);
+ row->fWidth += gap;
+ SkASSERT(row->fWidth < fBounds.width());
+ }
+
+ AppendRun(data, alpha, count);
+ row->fWidth += count;
+ SkASSERT(row->fWidth <= fBounds.width());
+ }
+
+ void addColumn(int x, int y, U8CPU alpha, int height) {
+ SkASSERT(fBounds.contains(x, y + height - 1));
+
+ this->addRun(x, y, alpha, 1);
+ this->flushRowH(fCurrRow);
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addRectRun(int x, int y, int width, int height) {
+ SkASSERT(fBounds.contains(x + width - 1, y + height - 1));
+ this->addRun(x, y, 0xFF, width);
+
+ // we assum the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addAntiRectRun(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ // According to SkBlitter.cpp, no matter whether leftAlpha is 0 or positive,
+ // we should always consider [x, x+1] as the left-most column and [x+1, x+1+width]
+ // as the rect with full alpha.
+ SkASSERT(fBounds.contains(x + width + (rightAlpha > 0 ? 1 : 0),
+ y + height - 1));
+ SkASSERT(width >= 0);
+
+ // Conceptually we're always adding 3 runs, but we should
+ // merge or omit them if possible.
+ if (leftAlpha == 0xFF) {
+ width++;
+ } else if (leftAlpha > 0) {
+ this->addRun(x++, y, leftAlpha, 1);
+ } else {
+ // leftAlpha is 0, ignore the left column
+ x++;
+ }
+ if (rightAlpha == 0xFF) {
+ width++;
+ }
+ if (width > 0) {
+ this->addRun(x, y, 0xFF, width);
+ }
+ if (rightAlpha > 0 && rightAlpha < 255) {
+ this->addRun(x + width, y, rightAlpha, 1);
+ }
+
+ // if we never called addRun, we might not have a fCurrRow yet
+ if (fCurrRow) {
+ // we assume the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+ }
+
+ bool finish(SkAAClip* target) {
+ this->flushRow(false);
+
+ const Row* row = fRows.begin();
+ const Row* stop = fRows.end();
+
+ size_t dataSize = 0;
+ while (row < stop) {
+ dataSize += row->fData->count();
+ row += 1;
+ }
+
+ if (0 == dataSize) {
+ return target->setEmpty();
+ }
+
+ SkASSERT(fMinY >= fBounds.fTop);
+ SkASSERT(fMinY < fBounds.fBottom);
+ int adjustY = fMinY - fBounds.fTop;
+ fBounds.fTop = fMinY;
+
+ RunHead* head = RunHead::Alloc(fRows.count(), dataSize);
+ YOffset* yoffset = head->yoffsets();
+ uint8_t* data = head->data();
+ uint8_t* baseData = data;
+
+ row = fRows.begin();
+ SkDEBUGCODE(int prevY = row->fY - 1;)
+ while (row < stop) {
+ SkASSERT(prevY < row->fY); // must be monotonic
+ SkDEBUGCODE(prevY = row->fY);
+
+ yoffset->fY = row->fY - adjustY;
+ yoffset->fOffset = SkToU32(data - baseData);
+ yoffset += 1;
+
+ size_t n = row->fData->count();
+ memcpy(data, row->fData->begin(), n);
+#ifdef SK_DEBUG
+ size_t bytesNeeded = compute_row_length(data, fBounds.width());
+ SkASSERT(bytesNeeded == n);
+#endif
+ data += n;
+
+ row += 1;
+ }
+
+ target->freeRuns();
+ target->fBounds = fBounds;
+ target->fRunHead = head;
+ return target->trimBounds();
+ }
+
+ void dump() {
+ this->validate();
+ int y;
+ for (y = 0; y < fRows.count(); ++y) {
+ const Row& row = fRows[y];
+ SkDebugf("Y:%3d W:%3d", row.fY, row.fWidth);
+ const SkTDArray<uint8_t>& data = *row.fData;
+ int count = data.count();
+ SkASSERT(!(count & 1));
+ const uint8_t* ptr = data.begin();
+ for (int x = 0; x < count; x += 2) {
+ SkDebugf(" [%3d:%02X]", ptr[0], ptr[1]);
+ ptr += 2;
+ }
+ SkDebugf("\n");
+ }
+ }
+
+ void validate() {
+#ifdef SK_DEBUG
+ int prevY = -1;
+ for (int i = 0; i < fRows.count(); ++i) {
+ const Row& row = fRows[i];
+ SkASSERT(prevY < row.fY);
+ SkASSERT(fWidth == row.fWidth);
+ int count = row.fData->count();
+ const uint8_t* ptr = row.fData->begin();
+ SkASSERT(!(count & 1));
+ int w = 0;
+ for (int x = 0; x < count; x += 2) {
+ int n = ptr[0];
+ SkASSERT(n > 0);
+ w += n;
+ SkASSERT(w <= fWidth);
+ ptr += 2;
+ }
+ SkASSERT(w == fWidth);
+ prevY = row.fY;
+ }
+#endif
+ }
+
+ // only called by BuilderBlitter
+ void setMinY(int y) {
+ fMinY = y;
+ }
+
+private:
+ void flushRowH(Row* row) {
+ // flush current row if needed
+ if (row->fWidth < fWidth) {
+ AppendRun(*row->fData, 0, fWidth - row->fWidth);
+ row->fWidth = fWidth;
+ }
+ }
+
+ Row* flushRow(bool readyForAnother) {
+ Row* next = nullptr;
+ int count = fRows.count();
+ if (count > 0) {
+ this->flushRowH(&fRows[count - 1]);
+ }
+ if (count > 1) {
+ // are our last two runs the same?
+ Row* prev = &fRows[count - 2];
+ Row* curr = &fRows[count - 1];
+ SkASSERT(prev->fWidth == fWidth);
+ SkASSERT(curr->fWidth == fWidth);
+ if (*prev->fData == *curr->fData) {
+ prev->fY = curr->fY;
+ if (readyForAnother) {
+ curr->fData->rewind();
+ next = curr;
+ } else {
+ delete curr->fData;
+ fRows.removeShuffle(count - 1);
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ return next;
+ }
+
+ static void AppendRun(SkTDArray<uint8_t>& data, U8CPU alpha, int count) {
+ do {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* ptr = data.append(2);
+ ptr[0] = n;
+ ptr[1] = alpha;
+ count -= n;
+ } while (count > 0);
+ }
+};
+
+class SkAAClip::BuilderBlitter : public SkBlitter {
+ int fLastY;
+
+ /*
+ If we see a gap of 1 or more empty scanlines while building in Y-order,
+ we inject an explicit empty scanline (alpha==0)
+
+ See AAClipTest.cpp : test_path_with_hole()
+ */
+ void checkForYGap(int y) {
+ SkASSERT(y >= fLastY);
+ if (fLastY > -SK_MaxS32) {
+ int gap = y - fLastY;
+ if (gap > 1) {
+ fBuilder->addRun(fLeft, y - 1, 0, fRight - fLeft);
+ }
+ }
+ fLastY = y;
+ }
+
+public:
+
+ BuilderBlitter(Builder* builder) {
+ fBuilder = builder;
+ fLeft = builder->getBounds().fLeft;
+ fRight = builder->getBounds().fRight;
+ fMinY = SK_MaxS32;
+ fLastY = -SK_MaxS32; // sentinel
+ }
+
+ void finish() {
+ if (fMinY < SK_MaxS32) {
+ fBuilder->setMinY(fMinY);
+ }
+ }
+
+ /**
+ Must evaluate clips in scan-line order, so don't want to allow blitV(),
+ but an AAClip can be clipped down to a single pixel wide, so we
+ must support it (given AntiRect semantics: minimum width is 2).
+ Instead we'll rely on the runtime asserts to guarantee Y monotonicity;
+ any failure cases that misses may have minor artifacts.
+ */
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ if (height == 1) {
+ // We're still in scan-line order if height is 1
+ // This is useful for Analytic AA
+ const SkAlpha alphas[2] = {alpha, 0};
+ const int16_t runs[2] = {1, 0};
+ this->blitAntiH(x, y, alphas, runs);
+ } else {
+ this->recordMinY(y);
+ fBuilder->addColumn(x, y, alpha, height);
+ fLastY = y + height - 1;
+ }
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRectRun(x, y, width, height);
+ fLastY = y + height - 1;
+ }
+
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addAntiRectRun(x, y, width, height, leftAlpha, rightAlpha);
+ fLastY = y + height - 1;
+ }
+
+ void blitMask(const SkMask&, const SkIRect& clip) override
+ { unexpected(); }
+
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override {
+ return nullptr;
+ }
+
+ void blitH(int x, int y, int width) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRun(x, y, 0xFF, width);
+ }
+
+ virtual void blitAntiH(int x, int y, const SkAlpha alpha[],
+ const int16_t runs[]) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ return;
+ }
+
+ // The supersampler's buffer can be the width of the device, so
+ // we may have to trim the run to our bounds. Previously, we assert that
+ // the extra spans are always alpha==0.
+ // However, the analytic AA is too sensitive to precision errors
+ // so it may have extra spans with very tiny alpha because after several
+ // arithmatic operations, the edge may bleed the path boundary a little bit.
+ // Therefore, instead of always asserting alpha==0, we assert alpha < 0x10.
+ int localX = x;
+ int localCount = count;
+ if (x < fLeft) {
+ SkASSERT(0x10 > *alpha);
+ int gap = fLeft - x;
+ SkASSERT(gap <= count);
+ localX += gap;
+ localCount -= gap;
+ }
+ int right = x + count;
+ if (right > fRight) {
+ SkASSERT(0x10 > *alpha);
+ localCount -= right - fRight;
+ SkASSERT(localCount >= 0);
+ }
+
+ if (localCount) {
+ fBuilder->addRun(localX, y, *alpha, localCount);
+ }
+ // Next run
+ runs += count;
+ alpha += count;
+ x += count;
+ }
+ }
+
+private:
+ Builder* fBuilder;
+ int fLeft; // cache of builder's bounds' left edge
+ int fRight;
+ int fMinY;
+
+ /*
+ * We track this, in case the scan converter skipped some number of
+ * scanlines at the (relative to the bounds it was given). This allows
+ * the builder, during its finish, to trip its bounds down to the "real"
+ * top.
+ */
+ void recordMinY(int y) {
+ if (y < fMinY) {
+ fMinY = y;
+ }
+ }
+
+ void unexpected() {
+ SK_ABORT("---- did not expect to get called here");
+ }
+};
+
+bool SkAAClip::setPath(const SkPath& path, const SkRegion* clip, bool doAA) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (clip && clip->isEmpty()) {
+ return this->setEmpty();
+ }
+
+ SkIRect ibounds;
+ path.getBounds().roundOut(&ibounds);
+
+ SkRegion tmpClip;
+ if (nullptr == clip) {
+ tmpClip.setRect(ibounds);
+ clip = &tmpClip;
+ }
+
+ // Since we assert that the BuilderBlitter will never blit outside the intersection
+ // of clip and ibounds, we create this snugClip to be that intersection and send it
+ // to the scan-converter.
+ SkRegion snugClip(*clip);
+
+ if (path.isInverseFillType()) {
+ ibounds = clip->getBounds();
+ } else {
+ if (ibounds.isEmpty() || !ibounds.intersect(clip->getBounds())) {
+ return this->setEmpty();
+ }
+ snugClip.op(ibounds, SkRegion::kIntersect_Op);
+ }
+
+ Builder builder(ibounds);
+ BuilderBlitter blitter(&builder);
+
+ if (doAA) {
+ SkScan::AntiFillPath(path, snugClip, &blitter, true);
+ } else {
+ SkScan::FillPath(path, snugClip, &blitter);
+ }
+
+ blitter.finish();
+ return builder.finish(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef void (*RowProc)(SkAAClip::Builder&, int bottom,
+ const uint8_t* rowA, const SkIRect& rectA,
+ const uint8_t* rowB, const SkIRect& rectB);
+
+typedef U8CPU (*AlphaProc)(U8CPU alphaA, U8CPU alphaB);
+
+static U8CPU sectAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // Multiply
+ return SkMulDiv255Round(alphaA, alphaB);
+}
+
+static U8CPU unionAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // SrcOver
+ return alphaA + alphaB - SkMulDiv255Round(alphaA, alphaB);
+}
+
+static U8CPU diffAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // SrcOut
+ return SkMulDiv255Round(alphaA, 0xFF - alphaB);
+}
+
+static U8CPU xorAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // XOR
+ return alphaA + alphaB - 2 * SkMulDiv255Round(alphaA, alphaB);
+}
+
+static AlphaProc find_alpha_proc(SkRegion::Op op) {
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ return sectAlphaProc;
+ case SkRegion::kDifference_Op:
+ return diffAlphaProc;
+ case SkRegion::kUnion_Op:
+ return unionAlphaProc;
+ case SkRegion::kXOR_Op:
+ return xorAlphaProc;
+ default:
+ SkDEBUGFAIL("unexpected region op");
+ return sectAlphaProc;
+ }
+}
+
+class RowIter {
+public:
+ RowIter(const uint8_t* row, const SkIRect& bounds) {
+ fRow = row;
+ fLeft = bounds.fLeft;
+ fBoundsRight = bounds.fRight;
+ if (row) {
+ fRight = bounds.fLeft + row[0];
+ SkASSERT(fRight <= fBoundsRight);
+ fAlpha = row[1];
+ fDone = false;
+ } else {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ }
+ }
+
+ bool done() const { return fDone; }
+ int left() const { return fLeft; }
+ int right() const { return fRight; }
+ U8CPU alpha() const { return fAlpha; }
+ void next() {
+ if (!fDone) {
+ fLeft = fRight;
+ if (fRight == fBoundsRight) {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ } else {
+ fRow += 2;
+ fRight += fRow[0];
+ fAlpha = fRow[1];
+ SkASSERT(fRight <= fBoundsRight);
+ }
+ }
+ }
+
+private:
+ const uint8_t* fRow;
+ int fLeft;
+ int fRight;
+ int fBoundsRight;
+ bool fDone;
+ uint8_t fAlpha;
+};
+
+static void adjust_row(RowIter& iter, int& leftA, int& riteA, int rite) {
+ if (rite == riteA) {
+ iter.next();
+ leftA = iter.left();
+ riteA = iter.right();
+ }
+}
+
+#if 0 // UNUSED
+static bool intersect(int& min, int& max, int boundsMin, int boundsMax) {
+ SkASSERT(min < max);
+ SkASSERT(boundsMin < boundsMax);
+ if (min >= boundsMax || max <= boundsMin) {
+ return false;
+ }
+ if (min < boundsMin) {
+ min = boundsMin;
+ }
+ if (max > boundsMax) {
+ max = boundsMax;
+ }
+ return true;
+}
+#endif
+
+static void operatorX(SkAAClip::Builder& builder, int lastY,
+ RowIter& iterA, RowIter& iterB,
+ AlphaProc proc, const SkIRect& bounds) {
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+ int prevRite = bounds.fLeft;
+
+ do {
+ U8CPU alphaA = 0;
+ U8CPU alphaB = 0;
+ int left, rite;
+
+ if (leftA < leftB) {
+ left = leftA;
+ alphaA = iterA.alpha();
+ if (riteA <= leftB) {
+ rite = riteA;
+ } else {
+ rite = leftA = leftB;
+ }
+ } else if (leftB < leftA) {
+ left = leftB;
+ alphaB = iterB.alpha();
+ if (riteB <= leftA) {
+ rite = riteB;
+ } else {
+ rite = leftB = leftA;
+ }
+ } else {
+ left = leftA; // or leftB, since leftA == leftB
+ rite = leftA = leftB = SkMin32(riteA, riteB);
+ alphaA = iterA.alpha();
+ alphaB = iterB.alpha();
+ }
+
+ if (left >= bounds.fRight) {
+ break;
+ }
+ if (rite > bounds.fRight) {
+ rite = bounds.fRight;
+ }
+
+ if (left >= bounds.fLeft) {
+ SkASSERT(rite > left);
+ builder.addRun(left, lastY, proc(alphaA, alphaB), rite - left);
+ prevRite = rite;
+ }
+
+ adjust_row(iterA, leftA, riteA, rite);
+ adjust_row(iterB, leftB, riteB, rite);
+ } while (!iterA.done() || !iterB.done());
+
+ if (prevRite < bounds.fRight) {
+ builder.addRun(prevRite, lastY, 0, bounds.fRight - prevRite);
+ }
+}
+
+static void adjust_iter(SkAAClip::Iter& iter, int& topA, int& botA, int bot) {
+ if (bot == botA) {
+ iter.next();
+ topA = botA;
+ SkASSERT(botA == iter.top());
+ botA = iter.bottom();
+ }
+}
+
+static void operateY(SkAAClip::Builder& builder, const SkAAClip& A,
+ const SkAAClip& B, SkRegion::Op op) {
+ AlphaProc proc = find_alpha_proc(op);
+ const SkIRect& bounds = builder.getBounds();
+
+ SkAAClip::Iter iterA(A);
+ SkAAClip::Iter iterB(B);
+
+ SkASSERT(!iterA.done());
+ int topA = iterA.top();
+ int botA = iterA.bottom();
+ SkASSERT(!iterB.done());
+ int topB = iterB.top();
+ int botB = iterB.bottom();
+
+ do {
+ const uint8_t* rowA = nullptr;
+ const uint8_t* rowB = nullptr;
+ int top, bot;
+
+ if (topA < topB) {
+ top = topA;
+ rowA = iterA.data();
+ if (botA <= topB) {
+ bot = botA;
+ } else {
+ bot = topA = topB;
+ }
+
+ } else if (topB < topA) {
+ top = topB;
+ rowB = iterB.data();
+ if (botB <= topA) {
+ bot = botB;
+ } else {
+ bot = topB = topA;
+ }
+ } else {
+ top = topA; // or topB, since topA == topB
+ bot = topA = topB = SkMin32(botA, botB);
+ rowA = iterA.data();
+ rowB = iterB.data();
+ }
+
+ if (top >= bounds.fBottom) {
+ break;
+ }
+
+ if (bot > bounds.fBottom) {
+ bot = bounds.fBottom;
+ }
+ SkASSERT(top < bot);
+
+ if (!rowA && !rowB) {
+ builder.addRun(bounds.fLeft, bot - 1, 0, bounds.width());
+ } else if (top >= bounds.fTop) {
+ SkASSERT(bot <= bounds.fBottom);
+ RowIter rowIterA(rowA, rowA ? A.getBounds() : bounds);
+ RowIter rowIterB(rowB, rowB ? B.getBounds() : bounds);
+ operatorX(builder, bot - 1, rowIterA, rowIterB, proc, bounds);
+ }
+
+ adjust_iter(iterA, topA, botA, bot);
+ adjust_iter(iterB, topB, botB, bot);
+ } while (!iterA.done() || !iterB.done());
+}
+
+bool SkAAClip::op(const SkAAClip& clipAOrig, const SkAAClip& clipBOrig,
+ SkRegion::Op op) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (SkRegion::kReplace_Op == op) {
+ return this->set(clipBOrig);
+ }
+
+ const SkAAClip* clipA = &clipAOrig;
+ const SkAAClip* clipB = &clipBOrig;
+
+ if (SkRegion::kReverseDifference_Op == op) {
+ using std::swap;
+ swap(clipA, clipB);
+ op = SkRegion::kDifference_Op;
+ }
+
+ bool a_empty = clipA->isEmpty();
+ bool b_empty = clipB->isEmpty();
+
+ SkIRect bounds;
+ switch (op) {
+ case SkRegion::kDifference_Op:
+ if (a_empty) {
+ return this->setEmpty();
+ }
+ if (b_empty || !SkIRect::Intersects(clipA->fBounds, clipB->fBounds)) {
+ return this->set(*clipA);
+ }
+ bounds = clipA->fBounds;
+ break;
+
+ case SkRegion::kIntersect_Op:
+ if ((a_empty | b_empty) || !bounds.intersect(clipA->fBounds,
+ clipB->fBounds)) {
+ return this->setEmpty();
+ }
+ break;
+
+ case SkRegion::kUnion_Op:
+ case SkRegion::kXOR_Op:
+ if (a_empty) {
+ return this->set(*clipB);
+ }
+ if (b_empty) {
+ return this->set(*clipA);
+ }
+ bounds = clipA->fBounds;
+ bounds.join(clipB->fBounds);
+ break;
+
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return !this->isEmpty();
+ }
+
+ SkASSERT(SkIRect::Intersects(bounds, clipB->fBounds));
+ SkASSERT(SkIRect::Intersects(bounds, clipB->fBounds));
+
+ Builder builder(bounds);
+ operateY(builder, *clipA, *clipB, op);
+
+ return builder.finish(this);
+}
+
+/*
+ * It can be expensive to build a local aaclip before applying the op, so
+ * we first see if we can restrict the bounds of new rect to our current
+ * bounds, or note that the new rect subsumes our current clip.
+ */
+
+bool SkAAClip::op(const SkIRect& rOrig, SkRegion::Op op) {
+ SkIRect rStorage;
+ const SkIRect* r = &rOrig;
+
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ if (!rStorage.intersect(rOrig, fBounds)) {
+ // no overlap, so we're empty
+ return this->setEmpty();
+ }
+ if (rStorage == fBounds) {
+ // we were wholly inside the rect, no change
+ return !this->isEmpty();
+ }
+ if (this->quickContains(rStorage)) {
+ // the intersection is wholly inside us, we're a rect
+ return this->setRect(rStorage);
+ }
+ r = &rStorage; // use the intersected bounds
+ break;
+ case SkRegion::kDifference_Op:
+ break;
+ case SkRegion::kUnion_Op:
+ if (rOrig.contains(fBounds)) {
+ return this->setRect(rOrig);
+ }
+ break;
+ default:
+ break;
+ }
+
+ SkAAClip clip;
+ clip.setRect(*r);
+ return this->op(*this, clip, op);
+}
+
+bool SkAAClip::op(const SkRect& rOrig, SkRegion::Op op, bool doAA) {
+ SkRect rStorage, boundsStorage;
+ const SkRect* r = &rOrig;
+
+ boundsStorage.set(fBounds);
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kDifference_Op:
+ if (!rStorage.intersect(rOrig, boundsStorage)) {
+ if (SkRegion::kIntersect_Op == op) {
+ return this->setEmpty();
+ } else { // kDifference
+ return !this->isEmpty();
+ }
+ }
+ r = &rStorage; // use the intersected bounds
+ break;
+ case SkRegion::kUnion_Op:
+ if (rOrig.contains(boundsStorage)) {
+ return this->setRect(rOrig);
+ }
+ break;
+ default:
+ break;
+ }
+
+ SkAAClip clip;
+ clip.setRect(*r, doAA);
+ return this->op(*this, clip, op);
+}
+
+bool SkAAClip::op(const SkAAClip& clip, SkRegion::Op op) {
+ return this->op(*this, clip, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkAAClip::translate(int dx, int dy, SkAAClip* dst) const {
+ if (nullptr == dst) {
+ return !this->isEmpty();
+ }
+
+ if (this->isEmpty()) {
+ return dst->setEmpty();
+ }
+
+ if (this != dst) {
+ fRunHead->fRefCnt++;
+ dst->freeRuns();
+ dst->fRunHead = fRunHead;
+ dst->fBounds = fBounds;
+ }
+ dst->fBounds.offset(dx, dy);
+ return true;
+}
+
+static void expand_row_to_mask(uint8_t* SK_RESTRICT mask,
+ const uint8_t* SK_RESTRICT row,
+ int width) {
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(width >= n);
+ memset(mask, row[1], n);
+ mask += n;
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+}
+
+void SkAAClip::copyToMask(SkMask* mask) const {
+ mask->fFormat = SkMask::kA8_Format;
+ if (this->isEmpty()) {
+ mask->fBounds.setEmpty();
+ mask->fImage = nullptr;
+ mask->fRowBytes = 0;
+ return;
+ }
+
+ mask->fBounds = fBounds;
+ mask->fRowBytes = fBounds.width();
+ size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size);
+
+ Iter iter(*this);
+ uint8_t* dst = mask->fImage;
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ do {
+ expand_row_to_mask(dst, iter.data(), width);
+ dst += mask->fRowBytes;
+ } while (++y < iter.bottom());
+ iter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static void expandToRuns(const uint8_t* SK_RESTRICT data, int initialCount, int width,
+ int16_t* SK_RESTRICT runs, SkAlpha* SK_RESTRICT aa) {
+ // we don't read our initial n from data, since the caller may have had to
+ // clip it, hence the initialCount parameter.
+ int n = initialCount;
+ for (;;) {
+ if (n > width) {
+ n = width;
+ }
+ SkASSERT(n > 0);
+ runs[0] = n;
+ runs += n;
+
+ aa[0] = data[1];
+ aa += n;
+
+ data += 2;
+ width -= n;
+ if (0 == width) {
+ break;
+ }
+ // load the next count
+ n = data[0];
+ }
+ runs[0] = 0; // sentinel
+}
+
+SkAAClipBlitter::~SkAAClipBlitter() {
+ sk_free(fScanlineScratch);
+}
+
+void SkAAClipBlitter::ensureRunsAndAA() {
+ if (nullptr == fScanlineScratch) {
+ // add 1 so we can store the terminating run count of 0
+ int count = fAAClipBounds.width() + 1;
+ // we use this either for fRuns + fAA, or a scaline of a mask
+ // which may be as deep as 32bits
+ fScanlineScratch = sk_malloc_throw(count * sizeof(SkPMColor));
+ fRuns = (int16_t*)fScanlineScratch;
+ fAA = (SkAlpha*)(fRuns + count);
+ }
+}
+
+void SkAAClipBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+ SkASSERT(fAAClipBounds.contains(x, y));
+ SkASSERT(fAAClipBounds.contains(x + width - 1, y));
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ if (initialCount >= width) {
+ SkAlpha alpha = row[1];
+ if (0 == alpha) {
+ return;
+ }
+ if (0xFF == alpha) {
+ fBlitter->blitH(x, y, width);
+ return;
+ }
+ }
+
+ this->ensureRunsAndAA();
+ expandToRuns(row, initialCount, width, fRuns, fAA);
+
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+static void merge(const uint8_t* SK_RESTRICT row, int rowN,
+ const SkAlpha* SK_RESTRICT srcAA,
+ const int16_t* SK_RESTRICT srcRuns,
+ SkAlpha* SK_RESTRICT dstAA,
+ int16_t* SK_RESTRICT dstRuns,
+ int width) {
+ SkDEBUGCODE(int accumulated = 0;)
+ int srcN = srcRuns[0];
+ // do we need this check?
+ if (0 == srcN) {
+ return;
+ }
+
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ unsigned newAlpha = SkMulDiv255Round(srcAA[0], row[1]);
+ int minN = SkMin32(srcN, rowN);
+ dstRuns[0] = minN;
+ dstRuns += minN;
+ dstAA[0] = newAlpha;
+ dstAA += minN;
+
+ if (0 == (srcN -= minN)) {
+ srcN = srcRuns[0]; // refresh
+ srcRuns += srcN;
+ srcAA += srcN;
+ srcN = srcRuns[0]; // reload
+ if (0 == srcN) {
+ break;
+ }
+ }
+ if (0 == (rowN -= minN)) {
+ row += 2;
+ rowN = row[0]; // reload
+ }
+
+ SkDEBUGCODE(accumulated += minN;)
+ SkASSERT(accumulated <= width);
+ }
+ dstRuns[0] = 0;
+}
+
+void SkAAClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ this->ensureRunsAndAA();
+
+ merge(row, initialCount, aa, runs, fAA, fRuns, fAAClipBounds.width());
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+void SkAAClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (fAAClip->quickContains(x, y, x + 1, y + height)) {
+ fBlitter->blitV(x, y, height, alpha);
+ return;
+ }
+
+ for (;;) {
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &lastY);
+ int dy = lastY - y + 1;
+ if (dy > height) {
+ dy = height;
+ }
+ height -= dy;
+
+ row = fAAClip->findX(row, x);
+ SkAlpha newAlpha = SkMulDiv255Round(alpha, row[1]);
+ if (newAlpha) {
+ fBlitter->blitV(x, y, dy, newAlpha);
+ }
+ SkASSERT(height >= 0);
+ if (height <= 0) {
+ break;
+ }
+ y = lastY + 1;
+ }
+}
+
+void SkAAClipBlitter::blitRect(int x, int y, int width, int height) {
+ if (fAAClip->quickContains(x, y, x + width, y + height)) {
+ fBlitter->blitRect(x, y, width, height);
+ return;
+ }
+
+ while (--height >= 0) {
+ this->blitH(x, y, width);
+ y += 1;
+ }
+}
+
+typedef void (*MergeAAProc)(const void* src, int width, const uint8_t* row,
+ int initialRowCount, void* dst);
+
+static void small_memcpy(void* dst, const void* src, size_t n) {
+ memcpy(dst, src, n);
+}
+
+static void small_bzero(void* dst, size_t n) {
+ sk_bzero(dst, n);
+}
+
+static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+}
+
+static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+}
+
+template <typename T>
+void mergeT(const void* inSrc, int srcN, const uint8_t* SK_RESTRICT row, int rowN, void* inDst) {
+ const T* SK_RESTRICT src = static_cast<const T*>(inSrc);
+ T* SK_RESTRICT dst = static_cast<T*>(inDst);
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = SkMin32(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+ } else if (0 == rowA) {
+ small_bzero(dst, n * sizeof(T));
+ } else {
+ for (int i = 0; i < n; ++i) {
+ dst[i] = mergeOne(src[i], rowA);
+ }
+ }
+
+ if (0 == (srcN -= n)) {
+ break;
+ }
+
+ src += n;
+ dst += n;
+
+ SkASSERT(rowN == n);
+ row += 2;
+ rowN = row[0];
+ }
+}
+
+static MergeAAProc find_merge_aa_proc(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ case SkMask::kA8_Format:
+ case SkMask::k3D_Format:
+ return mergeT<uint8_t> ;
+ case SkMask::kLCD16_Format:
+ return mergeT<uint16_t>;
+ default:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ }
+}
+
+static U8CPU bit2byte(int bitInAByte) {
+ SkASSERT(bitInAByte <= 0xFF);
+ // negation turns any non-zero into 0xFFFFFF??, so we just shift down
+ // some value >= 8 to get a full FF value
+ return -bitInAByte >> 8;
+}
+
+static void upscaleBW2A8(SkMask* dstMask, const SkMask& srcMask) {
+ SkASSERT(SkMask::kBW_Format == srcMask.fFormat);
+ SkASSERT(SkMask::kA8_Format == dstMask->fFormat);
+
+ const int width = srcMask.fBounds.width();
+ const int height = srcMask.fBounds.height();
+
+ const uint8_t* SK_RESTRICT src = (const uint8_t*)srcMask.fImage;
+ const size_t srcRB = srcMask.fRowBytes;
+ uint8_t* SK_RESTRICT dst = (uint8_t*)dstMask->fImage;
+ const size_t dstRB = dstMask->fRowBytes;
+
+ const int wholeBytes = width >> 3;
+ const int leftOverBits = width & 7;
+
+ for (int y = 0; y < height; ++y) {
+ uint8_t* SK_RESTRICT d = dst;
+ for (int i = 0; i < wholeBytes; ++i) {
+ int srcByte = src[i];
+ d[0] = bit2byte(srcByte & (1 << 7));
+ d[1] = bit2byte(srcByte & (1 << 6));
+ d[2] = bit2byte(srcByte & (1 << 5));
+ d[3] = bit2byte(srcByte & (1 << 4));
+ d[4] = bit2byte(srcByte & (1 << 3));
+ d[5] = bit2byte(srcByte & (1 << 2));
+ d[6] = bit2byte(srcByte & (1 << 1));
+ d[7] = bit2byte(srcByte & (1 << 0));
+ d += 8;
+ }
+ if (leftOverBits) {
+ int srcByte = src[wholeBytes];
+ for (int x = 0; x < leftOverBits; ++x) {
+ *d++ = bit2byte(srcByte & 0x80);
+ srcByte <<= 1;
+ }
+ }
+ src += srcRB;
+ dst += dstRB;
+ }
+}
+
+void SkAAClipBlitter::blitMask(const SkMask& origMask, const SkIRect& clip) {
+ SkASSERT(fAAClip->getBounds().contains(clip));
+
+ if (fAAClip->quickContains(clip)) {
+ fBlitter->blitMask(origMask, clip);
+ return;
+ }
+
+ const SkMask* mask = &origMask;
+
+ // if we're BW, we need to upscale to A8 (ugh)
+ SkMask grayMask;
+ if (SkMask::kBW_Format == origMask.fFormat) {
+ grayMask.fFormat = SkMask::kA8_Format;
+ grayMask.fBounds = origMask.fBounds;
+ grayMask.fRowBytes = origMask.fBounds.width();
+ size_t size = grayMask.computeImageSize();
+ grayMask.fImage = (uint8_t*)fGrayMaskScratch.reset(size,
+ SkAutoMalloc::kReuse_OnShrink);
+
+ upscaleBW2A8(&grayMask, origMask);
+ mask = &grayMask;
+ }
+
+ this->ensureRunsAndAA();
+
+ // HACK -- we are devolving 3D into A8, need to copy the rest of the 3D
+ // data into a temp block to support it better (ugh)
+
+ const void* src = mask->getAddr(clip.fLeft, clip.fTop);
+ const size_t srcRB = mask->fRowBytes;
+ const int width = clip.width();
+ MergeAAProc mergeProc = find_merge_aa_proc(mask->fFormat);
+
+ SkMask rowMask;
+ rowMask.fFormat = SkMask::k3D_Format == mask->fFormat ? SkMask::kA8_Format : mask->fFormat;
+ rowMask.fBounds.fLeft = clip.fLeft;
+ rowMask.fBounds.fRight = clip.fRight;
+ rowMask.fRowBytes = mask->fRowBytes; // doesn't matter, since our height==1
+ rowMask.fImage = (uint8_t*)fScanlineScratch;
+
+ int y = clip.fTop;
+ const int stopY = y + clip.height();
+
+ do {
+ int localStopY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &localStopY);
+ // findRow returns last Y, not stop, so we add 1
+ localStopY = SkMin32(localStopY + 1, stopY);
+
+ int initialCount;
+ row = fAAClip->findX(row, clip.fLeft, &initialCount);
+ do {
+ mergeProc(src, width, row, initialCount, rowMask.fImage);
+ rowMask.fBounds.fTop = y;
+ rowMask.fBounds.fBottom = y + 1;
+ fBlitter->blitMask(rowMask, rowMask.fBounds);
+ src = (const void*)((const char*)src + srcRB);
+ } while (++y < localStopY);
+ } while (y < stopY);
+}
+
+const SkPixmap* SkAAClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkAAClip.h b/gfx/skia/skia/src/core/SkAAClip.h
new file mode 100644
index 0000000000..3c5521fb81
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAAClip_DEFINED
+#define SkAAClip_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkBlitter.h"
+
+class SkAAClip {
+public:
+ SkAAClip();
+ SkAAClip(const SkAAClip&);
+ ~SkAAClip();
+
+ SkAAClip& operator=(const SkAAClip&);
+ friend bool operator==(const SkAAClip&, const SkAAClip&);
+ friend bool operator!=(const SkAAClip& a, const SkAAClip& b) {
+ return !(a == b);
+ }
+
+ void swap(SkAAClip&);
+
+ bool isEmpty() const { return nullptr == fRunHead; }
+ const SkIRect& getBounds() const { return fBounds; }
+
+ // Returns true iff the clip is not empty, and is just a hard-edged rect (no partial alpha).
+ // If true, getBounds() can be used in place of this clip.
+ bool isRect() const;
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+ bool setRect(const SkRect&, bool doAA = true);
+ bool setPath(const SkPath&, const SkRegion* clip = nullptr, bool doAA = true);
+ bool setRegion(const SkRegion&);
+ bool set(const SkAAClip&);
+
+ bool op(const SkAAClip&, const SkAAClip&, SkRegion::Op);
+
+ // Helpers for op()
+ bool op(const SkIRect&, SkRegion::Op);
+ bool op(const SkRect&, SkRegion::Op, bool doAA);
+ bool op(const SkAAClip&, SkRegion::Op);
+
+ bool translate(int dx, int dy, SkAAClip* dst) const;
+ bool translate(int dx, int dy) {
+ return this->translate(dx, dy, this);
+ }
+
+ /**
+ * Allocates a mask the size of the aaclip, and expands its data into
+ * the mask, using kA8_Format
+ */
+ void copyToMask(SkMask*) const;
+
+ // called internally
+
+ bool quickContains(int left, int top, int right, int bottom) const;
+ bool quickContains(const SkIRect& r) const {
+ return this->quickContains(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ const uint8_t* findRow(int y, int* lastYForRow = nullptr) const;
+ const uint8_t* findX(const uint8_t data[], int x, int* initialCount = nullptr) const;
+
+ class Iter;
+ struct RunHead;
+ struct YOffset;
+ class Builder;
+
+#ifdef SK_DEBUG
+ void validate() const;
+ void debug(bool compress_y=false) const;
+#else
+ void validate() const {}
+ void debug(bool compress_y=false) const {}
+#endif
+
+private:
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ void freeRuns();
+ bool trimBounds();
+ bool trimTopBottom();
+ bool trimLeftRight();
+
+ friend class Builder;
+ class BuilderBlitter;
+ friend class BuilderBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClipBlitter : public SkBlitter {
+public:
+ SkAAClipBlitter() : fScanlineScratch(nullptr) {}
+ ~SkAAClipBlitter() override;
+
+ void init(SkBlitter* blitter, const SkAAClip* aaclip) {
+ SkASSERT(aaclip && !aaclip->isEmpty());
+ fBlitter = blitter;
+ fAAClip = aaclip;
+ fAAClipBounds = aaclip->getBounds();
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+private:
+ SkBlitter* fBlitter;
+ const SkAAClip* fAAClip;
+ SkIRect fAAClipBounds;
+
+ // point into fScanlineScratch
+ int16_t* fRuns;
+ SkAlpha* fAA;
+
+ enum {
+ kSize = 32 * 32
+ };
+ SkAutoSMalloc<kSize> fGrayMaskScratch; // used for blitMask
+ void* fScanlineScratch; // enough for a mask at 32bit, or runs+aa
+
+ void ensureRunsAndAA();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkATrace.cpp b/gfx/skia/skia/src/core/SkATrace.cpp
new file mode 100644
index 0000000000..87cdca9289
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkATrace.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkATrace.h"
+
+#include "src/core/SkTraceEvent.h"
+
+#include "src/core/SkTraceEventCommon.h"
+
+#ifdef SK_BUILD_FOR_ANDROID
+#include <dlfcn.h>
+#endif
+
+SkATrace::SkATrace() : fBeginSection(nullptr), fEndSection(nullptr), fIsEnabled(nullptr) {
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ fIsEnabled = []{ return static_cast<bool>(CC_UNLIKELY(ATRACE_ENABLED())); };
+ fBeginSection = [](const char* name){ ATRACE_BEGIN(name); };
+ fEndSection = []{ ATRACE_END(); };
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (void* lib = dlopen("libandroid.so", RTLD_NOW | RTLD_LOCAL)) {
+ fBeginSection = (decltype(fBeginSection))dlsym(lib, "ATrace_beginSection");
+ fEndSection = (decltype(fEndSection))dlsym(lib, "ATrace_endSection");
+ fIsEnabled = (decltype(fIsEnabled))dlsym(lib, "ATrace_isEnabled");
+ }
+#endif
+
+ if (!fIsEnabled) {
+ fIsEnabled = []{ return false; };
+ }
+}
+
+SkEventTracer::Handle SkATrace::addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) {
+ if (fIsEnabled()) {
+ if (TRACE_EVENT_PHASE_COMPLETE == phase ||
+ TRACE_EVENT_PHASE_INSTANT == phase) {
+ fBeginSection(name);
+ }
+
+ if (TRACE_EVENT_PHASE_INSTANT == phase) {
+ fEndSection();
+ }
+ }
+ return 0;
+}
+
+void SkATrace::updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) {
+ // This is only ever called from a scoped trace event so we will just end the ATrace section.
+ if (fIsEnabled()) {
+ fEndSection();
+ }
+}
+
+const uint8_t* SkATrace::getCategoryGroupEnabled(const char* name) {
+ // Chrome tracing is setup to not repeatly call this function once it has been initialized. So
+ // we can't use this to do a check for ATrace isEnabled(). Thus we will always return yes here
+ // and then check to see if ATrace is enabled when beginning and ending a section.
+ static uint8_t yes = SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags;
+ return &yes;
+}
+
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+bool SkAndroidFrameworkTraceUtil::gEnableAndroidTracing = false;
+
+#endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+
+
diff --git a/gfx/skia/skia/src/core/SkATrace.h b/gfx/skia/skia/src/core/SkATrace.h
new file mode 100644
index 0000000000..eae6d31a5d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkATrace.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkATrace_DEFINED
+#define SkATrace_DEFINED
+
+#include "include/utils/SkEventTracer.h"
+
+/**
+ * This class is used to support ATrace in android apps. It hooks into the SkEventTracer system. It
+ * currently supports the macros TRACE_EVENT*, TRACE_EVENT_INSTANT*, and TRACE_EVENT_BEGIN/END*.
+ * For versions of these calls that take additoinal args and value pairs we currently just drop them
+ * and report only the name. Since ATrace is a simple push and pop system (all traces are fully
+ * nested), if using BEGIN and END you should also make sure your calls are properly nested (i.e. if
+ * startA is before startB, then endB is before endA).
+ */
+class SkATrace : public SkEventTracer {
+public:
+ SkATrace();
+
+ SkEventTracer::Handle addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) override;
+
+
+ void updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) override;
+
+ const uint8_t* getCategoryGroupEnabled(const char* name) override;
+
+ const char* getCategoryGroupName(const uint8_t* categoryEnabledFlag) override {
+ static const char* category = "skiaATrace";
+ return category;
+ }
+
+private:
+ void (*fBeginSection)(const char*);
+ void (*fEndSection)(void);
+ bool (*fIsEnabled)(void);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
new file mode 100644
index 0000000000..84fd020af7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAdvancedTypefaceMetrics_DEFINED
+#define SkAdvancedTypefaceMetrics_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/private/SkBitmaskEnum.h"
+
+/** \class SkAdvancedTypefaceMetrics
+
+ The SkAdvancedTypefaceMetrics class is used by the PDF backend to correctly
+ embed typefaces. This class is created and filled in with information by
+ SkTypeface::getAdvancedMetrics.
+*/
+struct SkAdvancedTypefaceMetrics {
+ // The PostScript name of the font. See `FontName` and `BaseFont` in PDF standard.
+ SkString fPostScriptName;
+ SkString fFontName;
+
+ // These enum values match the values used in the PDF file format.
+ enum StyleFlags : uint32_t {
+ kFixedPitch_Style = 0x00000001,
+ kSerif_Style = 0x00000002,
+ kScript_Style = 0x00000008,
+ kItalic_Style = 0x00000040,
+ kAllCaps_Style = 0x00010000,
+ kSmallCaps_Style = 0x00020000,
+ kForceBold_Style = 0x00040000
+ };
+ StyleFlags fStyle = (StyleFlags)0; // Font style characteristics.
+
+ enum FontType : uint8_t {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font the per glyph
+ // information will never be populated.
+ FontType fType = kOther_Font;
+
+ enum FontFlags : uint8_t {
+ kMultiMaster_FontFlag = 0x01, //!<May be true for Type1, CFF, or TrueType fonts.
+ kNotEmbeddable_FontFlag = 0x02, //!<May not be embedded.
+ kNotSubsettable_FontFlag = 0x04, //!<May not be subset.
+ };
+ FontFlags fFlags = (FontFlags)0; // Global font flags.
+
+ int16_t fItalicAngle = 0; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent = 0; // Max height above baseline, not including accents.
+ int16_t fDescent = 0; // Max depth below baseline (negative).
+ int16_t fStemV = 0; // Thickness of dominant vertical stem.
+ int16_t fCapHeight = 0; // Height (from baseline) of top of flat capitals.
+
+ SkIRect fBBox = {0, 0, 0, 0}; // The bounding box of all glyphs (in font units).
+};
+
+namespace skstd {
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::FontFlags> : std::true_type {};
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::StyleFlags> : std::true_type {};
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAlphaRuns.cpp b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
new file mode 100644
index 0000000000..4d98ececdc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTo.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkUtils.h"
+
+void SkAlphaRuns::reset(int width) {
+ SkASSERT(width > 0);
+
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ sk_memset16((uint16_t*)fRuns, (uint16_t)(-42), width);
+#endif
+#endif
+ fRuns[0] = SkToS16(width);
+ fRuns[width] = 0;
+ fAlpha[0] = 0;
+
+ SkDEBUGCODE(fWidth = width;)
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+ void SkAlphaRuns::assertValid(int y, int maxStep) const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ int max = (y + 1) * maxStep - (y == maxStep - 1);
+
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ while (*runs) {
+ SkASSERT(*alpha <= max);
+ alpha += *runs;
+ runs += *runs;
+ }
+#endif
+ }
+
+ void SkAlphaRuns::dump() const {
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ SkDebugf("Runs");
+ while (*runs) {
+ int n = *runs;
+
+ SkDebugf(" %02x", *alpha);
+ if (n > 1) {
+ SkDebugf(",%d", n);
+ }
+ alpha += n;
+ runs += n;
+ }
+ SkDebugf("\n");
+ }
+
+ void SkAlphaRuns::validate() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ SkASSERT(fWidth > 0);
+
+ int count = 0;
+ const int16_t* runs = fRuns;
+
+ while (*runs) {
+ SkASSERT(*runs > 0);
+ count += *runs;
+ SkASSERT(count <= fWidth);
+ runs += *runs;
+ }
+ SkASSERT(count == fWidth);
+#endif
+ }
+#endif
diff --git a/gfx/skia/skia/src/core/SkAnalyticEdge.cpp b/gfx/skia/skia/src/core/SkAnalyticEdge.cpp
new file mode 100644
index 0000000000..2062512501
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnalyticEdge.cpp
@@ -0,0 +1,512 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTo.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkMathPriv.h"
+#include <utility>
+
+static const int kInverseTableSize = 1024; // SK_FDot6One * 16
+
+static inline SkFixed quick_inverse(SkFDot6 x) {
+ SkASSERT(SkAbs32(x) < kInverseTableSize);
+ static const int32_t table[kInverseTableSize * 2] = {
+ -4096, -4100, -4104, -4108, -4112, -4116, -4120, -4124, -4128, -4132, -4136,
+ -4140, -4144, -4148, -4152, -4156, -4161, -4165, -4169, -4173, -4177, -4181,
+ -4185, -4190, -4194, -4198, -4202, -4206, -4211, -4215, -4219, -4223, -4228,
+ -4232, -4236, -4240, -4245, -4249, -4253, -4258, -4262, -4266, -4271, -4275,
+ -4279, -4284, -4288, -4293, -4297, -4301, -4306, -4310, -4315, -4319, -4324,
+ -4328, -4332, -4337, -4341, -4346, -4350, -4355, -4359, -4364, -4369, -4373,
+ -4378, -4382, -4387, -4391, -4396, -4401, -4405, -4410, -4415, -4419, -4424,
+ -4429, -4433, -4438, -4443, -4447, -4452, -4457, -4462, -4466, -4471, -4476,
+ -4481, -4485, -4490, -4495, -4500, -4505, -4510, -4514, -4519, -4524, -4529,
+ -4534, -4539, -4544, -4549, -4554, -4559, -4563, -4568, -4573, -4578, -4583,
+ -4588, -4593, -4599, -4604, -4609, -4614, -4619, -4624, -4629, -4634, -4639,
+ -4644, -4650, -4655, -4660, -4665, -4670, -4675, -4681, -4686, -4691, -4696,
+ -4702, -4707, -4712, -4718, -4723, -4728, -4733, -4739, -4744, -4750, -4755,
+ -4760, -4766, -4771, -4777, -4782, -4788, -4793, -4798, -4804, -4809, -4815,
+ -4821, -4826, -4832, -4837, -4843, -4848, -4854, -4860, -4865, -4871, -4877,
+ -4882, -4888, -4894, -4899, -4905, -4911, -4917, -4922, -4928, -4934, -4940,
+ -4946, -4951, -4957, -4963, -4969, -4975, -4981, -4987, -4993, -4999, -5005,
+ -5011, -5017, -5023, -5029, -5035, -5041, -5047, -5053, -5059, -5065, -5071,
+ -5077, -5084, -5090, -5096, -5102, -5108, -5115, -5121, -5127, -5133, -5140,
+ -5146, -5152, -5159, -5165, -5171, -5178, -5184, -5190, -5197, -5203, -5210,
+ -5216, -5223, -5229, -5236, -5242, -5249, -5256, -5262, -5269, -5275, -5282,
+ -5289, -5295, -5302, -5309, -5315, -5322, -5329, -5336, -5343, -5349, -5356,
+ -5363, -5370, -5377, -5384, -5391, -5398, -5405, -5412, -5418, -5426, -5433,
+ -5440, -5447, -5454, -5461, -5468, -5475, -5482, -5489, -5497, -5504, -5511,
+ -5518, -5526, -5533, -5540, -5548, -5555, -5562, -5570, -5577, -5584, -5592,
+ -5599, -5607, -5614, -5622, -5629, -5637, -5645, -5652, -5660, -5667, -5675,
+ -5683, -5691, -5698, -5706, -5714, -5722, -5729, -5737, -5745, -5753, -5761,
+ -5769, -5777, -5785, -5793, -5801, -5809, -5817, -5825, -5833, -5841, -5849,
+ -5857, -5866, -5874, -5882, -5890, -5899, -5907, -5915, -5924, -5932, -5940,
+ -5949, -5957, -5966, -5974, -5983, -5991, -6000, -6009, -6017, -6026, -6034,
+ -6043, -6052, -6061, -6069, -6078, -6087, -6096, -6105, -6114, -6123, -6132,
+ -6141, -6150, -6159, -6168, -6177, -6186, -6195, -6204, -6213, -6223, -6232,
+ -6241, -6250, -6260, -6269, -6278, -6288, -6297, -6307, -6316, -6326, -6335,
+ -6345, -6355, -6364, -6374, -6384, -6393, -6403, -6413, -6423, -6432, -6442,
+ -6452, -6462, -6472, -6482, -6492, -6502, -6512, -6523, -6533, -6543, -6553,
+ -6563, -6574, -6584, -6594, -6605, -6615, -6626, -6636, -6647, -6657, -6668,
+ -6678, -6689, -6700, -6710, -6721, -6732, -6743, -6754, -6765, -6775, -6786,
+ -6797, -6808, -6820, -6831, -6842, -6853, -6864, -6875, -6887, -6898, -6909,
+ -6921, -6932, -6944, -6955, -6967, -6978, -6990, -7002, -7013, -7025, -7037,
+ -7049, -7061, -7073, -7084, -7096, -7108, -7121, -7133, -7145, -7157, -7169,
+ -7182, -7194, -7206, -7219, -7231, -7244, -7256, -7269, -7281, -7294, -7307,
+ -7319, -7332, -7345, -7358, -7371, -7384, -7397, -7410, -7423, -7436, -7449,
+ -7463, -7476, -7489, -7503, -7516, -7530, -7543, -7557, -7570, -7584, -7598,
+ -7612, -7626, -7639, -7653, -7667, -7681, -7695, -7710, -7724, -7738, -7752,
+ -7767, -7781, -7796, -7810, -7825, -7839, -7854, -7869, -7884, -7898, -7913,
+ -7928, -7943, -7958, -7973, -7989, -8004, -8019, -8035, -8050, -8065, -8081,
+ -8097, -8112, -8128, -8144, -8160, -8176, -8192, -8208, -8224, -8240, -8256,
+ -8272, -8289, -8305, -8322, -8338, -8355, -8371, -8388, -8405, -8422, -8439,
+ -8456, -8473, -8490, -8507, -8525, -8542, -8559, -8577, -8594, -8612, -8630,
+ -8648, -8665, -8683, -8701, -8719, -8738, -8756, -8774, -8793, -8811, -8830,
+ -8848, -8867, -8886, -8905, -8924, -8943, -8962, -8981, -9000, -9020, -9039,
+ -9058, -9078, -9098, -9118, -9137, -9157, -9177, -9198, -9218, -9238, -9258,
+ -9279, -9300, -9320, -9341, -9362, -9383, -9404, -9425, -9446, -9467, -9489,
+ -9510, -9532, -9554, -9576, -9597, -9619, -9642, -9664, -9686, -9709, -9731,
+ -9754, -9776, -9799, -9822, -9845, -9868, -9892, -9915, -9939, -9962, -9986,
+ -10010, -10034, -10058, -10082, -10106, -10131, -10155, -10180, -10205, -10230,
+ -10255, -10280, -10305, -10330, -10356, -10381, -10407, -10433, -10459, -10485,
+ -10512, -10538, -10564, -10591, -10618, -10645, -10672, -10699, -10727, -10754,
+ -10782, -10810, -10837, -10866, -10894, -10922, -10951, -10979, -11008, -11037,
+ -11066, -11096, -11125, -11155, -11184, -11214, -11244, -11275, -11305, -11335,
+ -11366, -11397, -11428, -11459, -11491, -11522, -11554, -11586, -11618, -11650,
+ -11683, -11715, -11748, -11781, -11814, -11848, -11881, -11915, -11949, -11983,
+ -12018, -12052, -12087, -12122, -12157, -12192, -12228, -12264, -12300, -12336,
+ -12372, -12409, -12446, -12483, -12520, -12557, -12595, -12633, -12671, -12710,
+ -12748, -12787, -12826, -12865, -12905, -12945, -12985, -13025, -13066, -13107,
+ -13148, -13189, -13231, -13273, -13315, -13357, -13400, -13443, -13486, -13530,
+ -13573, -13617, -13662, -13706, -13751, -13797, -13842, -13888, -13934, -13981,
+ -14027, -14074, -14122, -14169, -14217, -14266, -14315, -14364, -14413, -14463,
+ -14513, -14563, -14614, -14665, -14716, -14768, -14820, -14873, -14926, -14979,
+ -15033, -15087, -15141, -15196, -15252, -15307, -15363, -15420, -15477, -15534,
+ -15592, -15650, -15709, -15768, -15827, -15887, -15947, -16008, -16070, -16131,
+ -16194, -16256, -16320, -16384, -16448, -16513, -16578, -16644, -16710, -16777,
+ -16844, -16912, -16980, -17050, -17119, -17189, -17260, -17331, -17403, -17476,
+ -17549, -17623, -17697, -17772, -17848, -17924, -18001, -18078, -18157, -18236,
+ -18315, -18396, -18477, -18558, -18641, -18724, -18808, -18893, -18978, -19065,
+ -19152, -19239, -19328, -19418, -19508, -19599, -19691, -19784, -19878, -19972,
+ -20068, -20164, -20262, -20360, -20460, -20560, -20661, -20763, -20867, -20971,
+ -21076, -21183, -21290, -21399, -21509, -21620, -21732, -21845, -21959, -22075,
+ -22192, -22310, -22429, -22550, -22671, -22795, -22919, -23045, -23172, -23301,
+ -23431, -23563, -23696, -23831, -23967, -24105, -24244, -24385, -24528, -24672,
+ -24818, -24966, -25115, -25266, -25420, -25575, -25731, -25890, -26051, -26214,
+ -26379, -26546, -26715, -26886, -27060, -27235, -27413, -27594, -27776, -27962,
+ -28149, -28339, -28532, -28728, -28926, -29127, -29330, -29537, -29746, -29959,
+ -30174, -30393, -30615, -30840, -31068, -31300, -31536, -31775, -32017, -32263,
+ -32513, -32768, -33026, -33288, -33554, -33825, -34100, -34379, -34663, -34952,
+ -35246, -35544, -35848, -36157, -36472, -36792, -37117, -37449, -37786, -38130,
+ -38479, -38836, -39199, -39568, -39945, -40329, -40721, -41120, -41527, -41943,
+ -42366, -42799, -43240, -43690, -44150, -44620, -45100, -45590, -46091, -46603,
+ -47127, -47662, -48210, -48770, -49344, -49932, -50533, -51150, -51781, -52428,
+ -53092, -53773, -54471, -55188, -55924, -56679, -57456, -58254, -59074, -59918,
+ -60787, -61680, -62601, -63550, -64527, -65536, -66576, -67650, -68759, -69905,
+ -71089, -72315, -73584, -74898, -76260, -77672, -79137, -80659, -82241, -83886,
+ -85598, -87381, -89240, -91180, -93206, -95325, -97541, -99864, -102300,
+ -104857, -107546, -110376, -113359, -116508, -119837, -123361, -127100, -131072,
+ -135300, -139810, -144631, -149796, -155344, -161319, -167772, -174762, -182361,
+ -190650, -199728, -209715, -220752, -233016, -246723, -262144, -279620, -299593,
+ -322638, -349525, -381300, -419430, -466033, -524288, -599186, -699050, -838860,
+ -1048576, -1398101, -2097152, -4194304, 0, 4194304, 2097152, 1398101, 1048576,
+ 838860, 699050, 599186, 524288, 466033, 419430, 381300, 349525, 322638, 299593,
+ 279620, 262144, 246723, 233016, 220752, 209715, 199728, 190650, 182361, 174762,
+ 167772, 161319, 155344, 149796, 144631, 139810, 135300, 131072, 127100, 123361,
+ 119837, 116508, 113359, 110376, 107546, 104857, 102300, 99864, 97541, 95325,
+ 93206, 91180, 89240, 87381, 85598, 83886, 82241, 80659, 79137, 77672, 76260,
+ 74898, 73584, 72315, 71089, 69905, 68759, 67650, 66576, 65536, 64527, 63550,
+ 62601, 61680, 60787, 59918, 59074, 58254, 57456, 56679, 55924, 55188, 54471,
+ 53773, 53092, 52428, 51781, 51150, 50533, 49932, 49344, 48770, 48210, 47662,
+ 47127, 46603, 46091, 45590, 45100, 44620, 44150, 43690, 43240, 42799, 42366,
+ 41943, 41527, 41120, 40721, 40329, 39945, 39568, 39199, 38836, 38479, 38130,
+ 37786, 37449, 37117, 36792, 36472, 36157, 35848, 35544, 35246, 34952, 34663,
+ 34379, 34100, 33825, 33554, 33288, 33026, 32768, 32513, 32263, 32017, 31775,
+ 31536, 31300, 31068, 30840, 30615, 30393, 30174, 29959, 29746, 29537, 29330,
+ 29127, 28926, 28728, 28532, 28339, 28149, 27962, 27776, 27594, 27413, 27235,
+ 27060, 26886, 26715, 26546, 26379, 26214, 26051, 25890, 25731, 25575, 25420,
+ 25266, 25115, 24966, 24818, 24672, 24528, 24385, 24244, 24105, 23967, 23831,
+ 23696, 23563, 23431, 23301, 23172, 23045, 22919, 22795, 22671, 22550, 22429,
+ 22310, 22192, 22075, 21959, 21845, 21732, 21620, 21509, 21399, 21290, 21183,
+ 21076, 20971, 20867, 20763, 20661, 20560, 20460, 20360, 20262, 20164, 20068,
+ 19972, 19878, 19784, 19691, 19599, 19508, 19418, 19328, 19239, 19152, 19065,
+ 18978, 18893, 18808, 18724, 18641, 18558, 18477, 18396, 18315, 18236, 18157,
+ 18078, 18001, 17924, 17848, 17772, 17697, 17623, 17549, 17476, 17403, 17331,
+ 17260, 17189, 17119, 17050, 16980, 16912, 16844, 16777, 16710, 16644, 16578,
+ 16513, 16448, 16384, 16320, 16256, 16194, 16131, 16070, 16008, 15947, 15887,
+ 15827, 15768, 15709, 15650, 15592, 15534, 15477, 15420, 15363, 15307, 15252,
+ 15196, 15141, 15087, 15033, 14979, 14926, 14873, 14820, 14768, 14716, 14665,
+ 14614, 14563, 14513, 14463, 14413, 14364, 14315, 14266, 14217, 14169, 14122,
+ 14074, 14027, 13981, 13934, 13888, 13842, 13797, 13751, 13706, 13662, 13617,
+ 13573, 13530, 13486, 13443, 13400, 13357, 13315, 13273, 13231, 13189, 13148,
+ 13107, 13066, 13025, 12985, 12945, 12905, 12865, 12826, 12787, 12748, 12710,
+ 12671, 12633, 12595, 12557, 12520, 12483, 12446, 12409, 12372, 12336, 12300,
+ 12264, 12228, 12192, 12157, 12122, 12087, 12052, 12018, 11983, 11949, 11915,
+ 11881, 11848, 11814, 11781, 11748, 11715, 11683, 11650, 11618, 11586, 11554,
+ 11522, 11491, 11459, 11428, 11397, 11366, 11335, 11305, 11275, 11244, 11214,
+ 11184, 11155, 11125, 11096, 11066, 11037, 11008, 10979, 10951, 10922, 10894,
+ 10866, 10837, 10810, 10782, 10754, 10727, 10699, 10672, 10645, 10618, 10591,
+ 10564, 10538, 10512, 10485, 10459, 10433, 10407, 10381, 10356, 10330, 10305,
+ 10280, 10255, 10230, 10205, 10180, 10155, 10131, 10106, 10082, 10058, 10034,
+ 10010, 9986, 9962, 9939, 9915, 9892, 9868, 9845, 9822, 9799, 9776, 9754, 9731,
+ 9709, 9686, 9664, 9642, 9619, 9597, 9576, 9554, 9532, 9510, 9489, 9467, 9446,
+ 9425, 9404, 9383, 9362, 9341, 9320, 9300, 9279, 9258, 9238, 9218, 9198, 9177,
+ 9157, 9137, 9118, 9098, 9078, 9058, 9039, 9020, 9000, 8981, 8962, 8943, 8924,
+ 8905, 8886, 8867, 8848, 8830, 8811, 8793, 8774, 8756, 8738, 8719, 8701, 8683,
+ 8665, 8648, 8630, 8612, 8594, 8577, 8559, 8542, 8525, 8507, 8490, 8473, 8456,
+ 8439, 8422, 8405, 8388, 8371, 8355, 8338, 8322, 8305, 8289, 8272, 8256, 8240,
+ 8224, 8208, 8192, 8176, 8160, 8144, 8128, 8112, 8097, 8081, 8065, 8050, 8035,
+ 8019, 8004, 7989, 7973, 7958, 7943, 7928, 7913, 7898, 7884, 7869, 7854, 7839,
+ 7825, 7810, 7796, 7781, 7767, 7752, 7738, 7724, 7710, 7695, 7681, 7667, 7653,
+ 7639, 7626, 7612, 7598, 7584, 7570, 7557, 7543, 7530, 7516, 7503, 7489, 7476,
+ 7463, 7449, 7436, 7423, 7410, 7397, 7384, 7371, 7358, 7345, 7332, 7319, 7307,
+ 7294, 7281, 7269, 7256, 7244, 7231, 7219, 7206, 7194, 7182, 7169, 7157, 7145,
+ 7133, 7121, 7108, 7096, 7084, 7073, 7061, 7049, 7037, 7025, 7013, 7002, 6990,
+ 6978, 6967, 6955, 6944, 6932, 6921, 6909, 6898, 6887, 6875, 6864, 6853, 6842,
+ 6831, 6820, 6808, 6797, 6786, 6775, 6765, 6754, 6743, 6732, 6721, 6710, 6700,
+ 6689, 6678, 6668, 6657, 6647, 6636, 6626, 6615, 6605, 6594, 6584, 6574, 6563,
+ 6553, 6543, 6533, 6523, 6512, 6502, 6492, 6482, 6472, 6462, 6452, 6442, 6432,
+ 6423, 6413, 6403, 6393, 6384, 6374, 6364, 6355, 6345, 6335, 6326, 6316, 6307,
+ 6297, 6288, 6278, 6269, 6260, 6250, 6241, 6232, 6223, 6213, 6204, 6195, 6186,
+ 6177, 6168, 6159, 6150, 6141, 6132, 6123, 6114, 6105, 6096, 6087, 6078, 6069,
+ 6061, 6052, 6043, 6034, 6026, 6017, 6009, 6000, 5991, 5983, 5974, 5966, 5957,
+ 5949, 5940, 5932, 5924, 5915, 5907, 5899, 5890, 5882, 5874, 5866, 5857, 5849,
+ 5841, 5833, 5825, 5817, 5809, 5801, 5793, 5785, 5777, 5769, 5761, 5753, 5745,
+ 5737, 5729, 5722, 5714, 5706, 5698, 5691, 5683, 5675, 5667, 5660, 5652, 5645,
+ 5637, 5629, 5622, 5614, 5607, 5599, 5592, 5584, 5577, 5570, 5562, 5555, 5548,
+ 5540, 5533, 5526, 5518, 5511, 5504, 5497, 5489, 5482, 5475, 5468, 5461, 5454,
+ 5447, 5440, 5433, 5426, 5418, 5412, 5405, 5398, 5391, 5384, 5377, 5370, 5363,
+ 5356, 5349, 5343, 5336, 5329, 5322, 5315, 5309, 5302, 5295, 5289, 5282, 5275,
+ 5269, 5262, 5256, 5249, 5242, 5236, 5229, 5223, 5216, 5210, 5203, 5197, 5190,
+ 5184, 5178, 5171, 5165, 5159, 5152, 5146, 5140, 5133, 5127, 5121, 5115, 5108,
+ 5102, 5096, 5090, 5084, 5077, 5071, 5065, 5059, 5053, 5047, 5041, 5035, 5029,
+ 5023, 5017, 5011, 5005, 4999, 4993, 4987, 4981, 4975, 4969, 4963, 4957, 4951,
+ 4946, 4940, 4934, 4928, 4922, 4917, 4911, 4905, 4899, 4894, 4888, 4882, 4877,
+ 4871, 4865, 4860, 4854, 4848, 4843, 4837, 4832, 4826, 4821, 4815, 4809, 4804,
+ 4798, 4793, 4788, 4782, 4777, 4771, 4766, 4760, 4755, 4750, 4744, 4739, 4733,
+ 4728, 4723, 4718, 4712, 4707, 4702, 4696, 4691, 4686, 4681, 4675, 4670, 4665,
+ 4660, 4655, 4650, 4644, 4639, 4634, 4629, 4624, 4619, 4614, 4609, 4604, 4599,
+ 4593, 4588, 4583, 4578, 4573, 4568, 4563, 4559, 4554, 4549, 4544, 4539, 4534,
+ 4529, 4524, 4519, 4514, 4510, 4505, 4500, 4495, 4490, 4485, 4481, 4476, 4471,
+ 4466, 4462, 4457, 4452, 4447, 4443, 4438, 4433, 4429, 4424, 4419, 4415, 4410,
+ 4405, 4401, 4396, 4391, 4387, 4382, 4378, 4373, 4369, 4364, 4359, 4355, 4350,
+ 4346, 4341, 4337, 4332, 4328, 4324, 4319, 4315, 4310, 4306, 4301, 4297, 4293,
+ 4288, 4284, 4279, 4275, 4271, 4266, 4262, 4258, 4253, 4249, 4245, 4240, 4236,
+ 4232, 4228, 4223, 4219, 4215, 4211, 4206, 4202, 4198, 4194, 4190, 4185, 4181,
+ 4177, 4173, 4169, 4165, 4161, 4156, 4152, 4148, 4144, 4140, 4136, 4132, 4128,
+ 4124, 4120, 4116, 4112, 4108, 4104, 4100
+ };
+ return table[kInverseTableSize + x];
+}
+
+static inline SkFixed quick_div(SkFDot6 a, SkFDot6 b) {
+ const int kMinBits = 3; // abs(b) should be at least (1 << kMinBits) for quick division
+ const int kMaxBits = 31; // Number of bits available in signed int
+ // Given abs(b) <= (1 << kMinBits), the inverse of abs(b) is at most 1 << (22 - kMinBits) in
+ // SkFixed format. Hence abs(a) should be less than kMaxAbsA
+ const int kMaxAbsA = 1 << (kMaxBits - (22 - kMinBits));
+ SkFDot6 abs_a = SkAbs32(a);
+ SkFDot6 abs_b = SkAbs32(b);
+ if (abs_b >= (1 << kMinBits) && abs_b < kInverseTableSize && abs_a < kMaxAbsA) {
+ SkASSERT((int64_t)a * quick_inverse(b) <= SK_MaxS32
+ && (int64_t)a * quick_inverse(b) >= SK_MinS32);
+ SkFixed ourAnswer = (a * quick_inverse(b)) >> 6;
+ SkASSERT(
+ (SkFDot6Div(a,b) == 0 && ourAnswer == 0) ||
+ SkFixedDiv(SkAbs32(SkFDot6Div(a,b) - ourAnswer), SkAbs32(SkFDot6Div(a,b))) <= 1 << 10
+ );
+ return ourAnswer;
+ }
+ return SkFDot6Div(a, b);
+}
+
+bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1) {
+ fRiteE = nullptr;
+
+ // We must set X/Y using the same way (e.g., times 4, to FDot6, then to Fixed) as Quads/Cubics.
+ // Otherwise the order of the edge might be wrong due to precision limit.
+ const int accuracy = kDefaultAccuracy;
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ SkFixed x0 = SkFDot6ToFixed(SkScalarRoundToFDot6(p0.fX, accuracy)) >> accuracy;
+ SkFixed y0 = SnapY(SkFDot6ToFixed(SkScalarRoundToFDot6(p0.fY, accuracy)) >> accuracy);
+ SkFixed x1 = SkFDot6ToFixed(SkScalarRoundToFDot6(p1.fX, accuracy)) >> accuracy;
+ SkFixed y1 = SnapY(SkFDot6ToFixed(SkScalarRoundToFDot6(p1.fY, accuracy)) >> accuracy);
+#else
+ const int multiplier = (1 << kDefaultAccuracy);
+ SkFixed x0 = SkFDot6ToFixed(SkScalarToFDot6(p0.fX * multiplier)) >> accuracy;
+ SkFixed y0 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p0.fY * multiplier)) >> accuracy);
+ SkFixed x1 = SkFDot6ToFixed(SkScalarToFDot6(p1.fX * multiplier)) >> accuracy;
+ SkFixed y1 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p1.fY * multiplier)) >> accuracy);
+#endif
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ // are we a zero-height line?
+ SkFDot6 dy = SkFixedToFDot6(y1 - y0);
+ if (dy == 0) {
+ return false;
+ }
+ SkFDot6 dx = SkFixedToFDot6(x1 - x0);
+ SkFixed slope = quick_div(dx, dy);
+ SkFixed absSlope = SkAbs32(slope);
+
+ fX = x0;
+ fDX = slope;
+ fUpperX = x0;
+ fY = y0;
+ fUpperY = y0;
+ fLowerY = y1;
+ fDY = dx == 0 || slope == 0 ? SK_MaxS32 : absSlope < kInverseTableSize
+ ? quick_inverse(absSlope)
+ : SkAbs32(quick_div(dy, dx));
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+
+ return true;
+}
+
+// This will become a bottleneck for small ovals rendering if we call SkFixedDiv twice here.
+// Therefore, we'll let the outter function compute the slope once and send in the value.
+// Moreover, we'll compute fDY by quickly lookup the inverse table (if possible).
+bool SkAnalyticEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1, SkFixed slope) {
+ // Since we send in the slope, we can no longer snap y inside this function.
+ // If we don't send in the slope, or we do some more sophisticated snapping, this function
+ // could be a performance bottleneck.
+ SkASSERT(fWinding == 1 || fWinding == -1);
+ SkASSERT(fCurveCount != 0);
+
+ // We don't chop at y extrema for cubics so the y is not guaranteed to be increasing for them.
+ // In that case, we have to swap x/y and negate the winding.
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ fWinding = -fWinding;
+ }
+
+ SkASSERT(y0 <= y1);
+
+ SkFDot6 dx = SkFixedToFDot6(x1 - x0);
+ SkFDot6 dy = SkFixedToFDot6(y1 - y0);
+
+ // are we a zero-height line?
+ if (dy == 0) {
+ return false;
+ }
+
+ SkASSERT(slope < SK_MaxS32);
+
+ SkFDot6 absSlope = SkAbs32(SkFixedToFDot6(slope));
+ fX = x0;
+ fDX = slope;
+ fUpperX = x0;
+ fY = y0;
+ fUpperY = y0;
+ fLowerY = y1;
+ fDY = (dx == 0 || slope == 0)
+ ? SK_MaxS32
+ : absSlope < kInverseTableSize
+ ? quick_inverse(absSlope)
+ : SkAbs32(quick_div(dy, dx));
+
+ return true;
+}
+
+bool SkAnalyticEdge::update(SkFixed last_y, bool sortY) {
+ SkASSERT(last_y >= fLowerY); // we shouldn't update edge if last_y < fLowerY
+ if (fCurveCount < 0) {
+ return static_cast<SkAnalyticCubicEdge*>(this)->updateCubic(sortY);
+ } else if (fCurveCount > 0) {
+ return static_cast<SkAnalyticQuadraticEdge*>(this)->updateQuadratic();
+ }
+ return false;
+}
+
+bool SkAnalyticQuadraticEdge::setQuadratic(const SkPoint pts[3]) {
+ fRiteE = nullptr;
+
+ if (!fQEdge.setQuadraticWithoutUpdate(pts, kDefaultAccuracy)) {
+ return false;
+ }
+ fQEdge.fQx >>= kDefaultAccuracy;
+ fQEdge.fQy >>= kDefaultAccuracy;
+ fQEdge.fQDx >>= kDefaultAccuracy;
+ fQEdge.fQDy >>= kDefaultAccuracy;
+ fQEdge.fQDDx >>= kDefaultAccuracy;
+ fQEdge.fQDDy >>= kDefaultAccuracy;
+ fQEdge.fQLastX >>= kDefaultAccuracy;
+ fQEdge.fQLastY >>= kDefaultAccuracy;
+ fQEdge.fQy = SnapY(fQEdge.fQy);
+ fQEdge.fQLastY = SnapY(fQEdge.fQLastY);
+
+ fWinding = fQEdge.fWinding;
+ fCurveCount = fQEdge.fCurveCount;
+ fCurveShift = fQEdge.fCurveShift;
+
+ fSnappedX = fQEdge.fQx;
+ fSnappedY = fQEdge.fQy;
+
+ return this->updateQuadratic();
+}
+
+bool SkAnalyticQuadraticEdge::updateQuadratic() {
+ int success = 0; // initialize to fail!
+ int count = fCurveCount;
+ SkFixed oldx = fQEdge.fQx;
+ SkFixed oldy = fQEdge.fQy;
+ SkFixed dx = fQEdge.fQDx;
+ SkFixed dy = fQEdge.fQDy;
+ SkFixed newx, newy, newSnappedX, newSnappedY;
+ int shift = fCurveShift;
+
+ SkASSERT(count > 0);
+
+ do {
+ SkFixed slope;
+ if (--count > 0)
+ {
+ newx = oldx + (dx >> shift);
+ newy = oldy + (dy >> shift);
+ if (SkAbs32(dy >> shift) >= SK_Fixed1 * 2) { // only snap when dy is large enough
+ SkFDot6 diffY = SkFixedToFDot6(newy - fSnappedY);
+ slope = diffY ? quick_div(SkFixedToFDot6(newx - fSnappedX), diffY)
+ : SK_MaxS32;
+ newSnappedY = SkTMin<SkFixed>(fQEdge.fQLastY, SkFixedRoundToFixed(newy));
+ newSnappedX = newx - SkFixedMul(slope, newy - newSnappedY);
+ } else {
+ newSnappedY = SkTMin(fQEdge.fQLastY, SnapY(newy));
+ newSnappedX = newx;
+ SkFDot6 diffY = SkFixedToFDot6(newSnappedY - fSnappedY);
+ slope = diffY ? quick_div(SkFixedToFDot6(newx - fSnappedX), diffY)
+ : SK_MaxS32;
+ }
+ dx += fQEdge.fQDDx;
+ dy += fQEdge.fQDDy;
+ }
+ else // last segment
+ {
+ newx = fQEdge.fQLastX;
+ newy = fQEdge.fQLastY;
+ newSnappedY = newy;
+ newSnappedX = newx;
+ SkFDot6 diffY = (newy - fSnappedY) >> 10;
+ slope = diffY ? quick_div((newx - fSnappedX) >> 10, diffY) : SK_MaxS32;
+ }
+ if (slope < SK_MaxS32) {
+ success = this->updateLine(fSnappedX, fSnappedY, newSnappedX, newSnappedY, slope);
+ }
+ oldx = newx;
+ oldy = newy;
+ } while (count > 0 && !success);
+
+ SkASSERT(newSnappedY <= fQEdge.fQLastY);
+
+ fQEdge.fQx = newx;
+ fQEdge.fQy = newy;
+ fQEdge.fQDx = dx;
+ fQEdge.fQDy = dy;
+ fSnappedX = newSnappedX;
+ fSnappedY = newSnappedY;
+ fCurveCount = SkToS8(count);
+ return success;
+}
+
+bool SkAnalyticCubicEdge::setCubic(const SkPoint pts[4], bool sortY) {
+ fRiteE = nullptr;
+
+ if (!fCEdge.setCubicWithoutUpdate(pts, kDefaultAccuracy, sortY)) {
+ return false;
+ }
+
+ fCEdge.fCx >>= kDefaultAccuracy;
+ fCEdge.fCy >>= kDefaultAccuracy;
+ fCEdge.fCDx >>= kDefaultAccuracy;
+ fCEdge.fCDy >>= kDefaultAccuracy;
+ fCEdge.fCDDx >>= kDefaultAccuracy;
+ fCEdge.fCDDy >>= kDefaultAccuracy;
+ fCEdge.fCDDDx >>= kDefaultAccuracy;
+ fCEdge.fCDDDy >>= kDefaultAccuracy;
+ fCEdge.fCLastX >>= kDefaultAccuracy;
+ fCEdge.fCLastY >>= kDefaultAccuracy;
+ fCEdge.fCy = SnapY(fCEdge.fCy);
+ fCEdge.fCLastY = SnapY(fCEdge.fCLastY);
+
+ fWinding = fCEdge.fWinding;
+ fCurveCount = fCEdge.fCurveCount;
+ fCurveShift = fCEdge.fCurveShift;
+ fCubicDShift = fCEdge.fCubicDShift;
+
+ fSnappedY = fCEdge.fCy;
+
+ return this->updateCubic(sortY);
+}
+
+bool SkAnalyticCubicEdge::updateCubic(bool sortY) {
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fCEdge.fCx;
+ SkFixed oldy = fCEdge.fCy;
+ SkFixed newx, newy;
+ const int ddshift = fCurveShift;
+ const int dshift = fCubicDShift;
+
+ SkASSERT(count < 0);
+
+ do {
+ if (++count < 0) {
+ newx = oldx + (fCEdge.fCDx >> dshift);
+ fCEdge.fCDx += fCEdge.fCDDx >> ddshift;
+ fCEdge.fCDDx += fCEdge.fCDDDx;
+
+ newy = oldy + (fCEdge.fCDy >> dshift);
+ fCEdge.fCDy += fCEdge.fCDDy >> ddshift;
+ fCEdge.fCDDy += fCEdge.fCDDDy;
+ }
+ else { // last segment
+ newx = fCEdge.fCLastX;
+ newy = fCEdge.fCLastY;
+ }
+
+ // we want to say SkASSERT(oldy <= newy), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (sortY && newy < oldy) {
+ newy = oldy;
+ }
+
+ SkFixed newSnappedY = SnapY(newy);
+ // we want to SkASSERT(snappedNewY <= fCEdge.fCLastY), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (sortY && fCEdge.fCLastY < newSnappedY) {
+ newSnappedY = fCEdge.fCLastY;
+ count = 0;
+ }
+
+ SkFixed slope = SkFixedToFDot6(newSnappedY - fSnappedY) == 0
+ ? SK_MaxS32
+ : SkFDot6Div(SkFixedToFDot6(newx - oldx),
+ SkFixedToFDot6(newSnappedY - fSnappedY));
+
+ success = this->updateLine(oldx, fSnappedY, newx, newSnappedY, slope);
+
+ oldx = newx;
+ oldy = newy;
+ fSnappedY = newSnappedY;
+ } while (count < 0 && !success);
+
+ fCEdge.fCx = newx;
+ fCEdge.fCy = newy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
diff --git a/gfx/skia/skia/src/core/SkAnalyticEdge.h b/gfx/skia/skia/src/core/SkAnalyticEdge.h
new file mode 100644
index 0000000000..85d012234a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnalyticEdge.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnalyticEdge_DEFINED
+#define SkAnalyticEdge_DEFINED
+
+#include "include/private/SkTo.h"
+#include "src/core/SkEdge.h"
+
+#include <utility>
+
+struct SkAnalyticEdge {
+ // Similar to SkEdge, the conic edges will be converted to quadratic edges
+ enum Type {
+ kLine_Type,
+ kQuad_Type,
+ kCubic_Type
+ };
+
+ SkAnalyticEdge* fNext;
+ SkAnalyticEdge* fPrev;
+
+ // During aaa_walk_edges, if this edge is a left edge,
+ // then fRiteE is its corresponding right edge. Otherwise it's nullptr.
+ SkAnalyticEdge* fRiteE;
+
+ SkFixed fX;
+ SkFixed fDX;
+ SkFixed fUpperX; // The x value when y = fUpperY
+ SkFixed fY; // The current y
+ SkFixed fUpperY; // The upper bound of y (our edge is from y = fUpperY to y = fLowerY)
+ SkFixed fLowerY; // The lower bound of y (our edge is from y = fUpperY to y = fLowerY)
+ SkFixed fDY; // abs(1/fDX); may be SK_MaxS32 when fDX is close to 0.
+ // fDY is only used for blitting trapezoids.
+
+ SkFixed fSavedX; // For deferred blitting
+ SkFixed fSavedY; // For deferred blitting
+ SkFixed fSavedDY; // For deferred blitting
+
+ int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
+ uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
+ uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
+ int8_t fWinding; // 1 or -1
+
+ static const int kDefaultAccuracy = 2; // default accuracy for snapping
+
+ static inline SkFixed SnapY(SkFixed y) {
+ const int accuracy = kDefaultAccuracy;
+ // This approach is safer than left shift, round, then right shift
+ return ((unsigned)y + (SK_Fixed1 >> (accuracy + 1))) >> (16 - accuracy) << (16 - accuracy);
+ }
+
+ // Update fX, fY of this edge so fY = y
+ inline void goY(SkFixed y) {
+ if (y == fY + SK_Fixed1) {
+ fX = fX + fDX;
+ fY = y;
+ } else if (y != fY) {
+ // Drop lower digits as our alpha only has 8 bits
+ // (fDX and y - fUpperY may be greater than SK_Fixed1)
+ fX = fUpperX + SkFixedMul(fDX, y - fUpperY);
+ fY = y;
+ }
+ }
+
+ inline void goY(SkFixed y, int yShift) {
+ SkASSERT(yShift >= 0 && yShift <= kDefaultAccuracy);
+ SkASSERT(fDX == 0 || y - fY == SK_Fixed1 >> yShift);
+ fY = y;
+ fX += fDX >> yShift;
+ }
+
+ inline void saveXY(SkFixed x, SkFixed y, SkFixed dY) {
+ fSavedX = x;
+ fSavedY = y;
+ fSavedDY = dY;
+ }
+
+ bool setLine(const SkPoint& p0, const SkPoint& p1);
+ bool updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by, SkFixed slope);
+
+ // return true if we're NOT done with this edge
+ bool update(SkFixed last_y, bool sortY = true);
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("edge: upperY:%d lowerY:%d y:%g x:%g dx:%g w:%d\n",
+ fUpperY, fLowerY, SkFixedToFloat(fY), SkFixedToFloat(fX),
+ SkFixedToFloat(fDX), fWinding);
+ }
+
+ void validate() const {
+ SkASSERT(fPrev && fNext);
+ SkASSERT(fPrev->fNext == this);
+ SkASSERT(fNext->fPrev == this);
+
+ SkASSERT(fUpperY < fLowerY);
+ SkASSERT(SkAbs32(fWinding) == 1);
+ }
+#endif
+};
+
+struct SkAnalyticQuadraticEdge : public SkAnalyticEdge {
+ SkQuadraticEdge fQEdge;
+
+ // snap y to integer points in the middle of the curve to accelerate AAA path filling
+ SkFixed fSnappedX, fSnappedY;
+
+ bool setQuadratic(const SkPoint pts[3]);
+ bool updateQuadratic();
+ inline void keepContinuous() {
+ // We use fX as the starting x to ensure the continuouty.
+ // Without it, we may break the sorted edge list.
+ SkASSERT(SkAbs32(fX - SkFixedMul(fY - fSnappedY, fDX) - fSnappedX) < SK_Fixed1);
+ SkASSERT(SkAbs32(fY - fSnappedY) < SK_Fixed1); // This may differ due to smooth jump
+ fSnappedX = fX;
+ fSnappedY = fY;
+ }
+};
+
+struct SkAnalyticCubicEdge : public SkAnalyticEdge {
+ SkCubicEdge fCEdge;
+
+ SkFixed fSnappedY; // to make sure that y is increasing with smooth jump and snapping
+
+ bool setCubic(const SkPoint pts[4], bool sortY = true);
+ bool updateCubic(bool sortY = true);
+ inline void keepContinuous() {
+ SkASSERT(SkAbs32(fX - SkFixedMul(fDX, fY - SnapY(fCEdge.fCy)) - fCEdge.fCx) < SK_Fixed1);
+ fCEdge.fCx = fX;
+ fSnappedY = fY;
+ }
+};
+
+struct SkBezier {
+ int fCount; // 2 line, 3 quad, 4 cubic
+ SkPoint fP0;
+ SkPoint fP1;
+
+ // See if left shift, covert to SkFDot6, and round has the same top and bottom y.
+ // If so, the edge will be empty.
+ static inline bool IsEmpty(SkScalar y0, SkScalar y1, int shift = 2) {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ return SkScalarRoundToFDot6(y0, shift) == SkScalarRoundToFDot6(y1, shift);
+#else
+ SkScalar scale = (1 << (shift + 6));
+ return SkFDot6Round(int(y0 * scale)) == SkFDot6Round(int(y1 * scale));
+#endif
+ }
+};
+
+struct SkLine : public SkBezier {
+ bool set(const SkPoint pts[2]){
+ if (IsEmpty(pts[0].fY, pts[1].fY)) {
+ return false;
+ }
+ fCount = 2;
+ fP0 = pts[0];
+ fP1 = pts[1];
+ return true;
+ }
+};
+
+struct SkQuad : public SkBezier {
+ SkPoint fP2;
+
+ bool set(const SkPoint pts[3]){
+ if (IsEmpty(pts[0].fY, pts[2].fY)) {
+ return false;
+ }
+ fCount = 3;
+ fP0 = pts[0];
+ fP1 = pts[1];
+ fP2 = pts[2];
+ return true;
+ }
+};
+
+struct SkCubic : public SkBezier {
+ SkPoint fP2;
+ SkPoint fP3;
+
+ bool set(const SkPoint pts[4]){
+ // We do not chop at y extrema for cubics so pts[0], pts[1], pts[2], pts[3] may not be
+ // monotonic. Therefore, we have to check the emptiness for all three pairs, instead of just
+ // checking IsEmpty(pts[0].fY, pts[3].fY).
+ if (IsEmpty(pts[0].fY, pts[1].fY) && IsEmpty(pts[1].fY, pts[2].fY) &&
+ IsEmpty(pts[2].fY, pts[3].fY)) {
+ return false;
+ }
+ fCount = 4;
+ fP0 = pts[0];
+ fP1 = pts[1];
+ fP2 = pts[2];
+ fP3 = pts[3];
+ return true;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAnnotation.cpp b/gfx/skia/skia/src/core/SkAnnotation.cpp
new file mode 100644
index 0000000000..43b3170f2b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotation.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAnnotation.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkAnnotationKeys.h"
+
+const char* SkAnnotationKeys::URL_Key() {
+ return "SkAnnotationKey_URL";
+};
+
+const char* SkAnnotationKeys::Define_Named_Dest_Key() {
+ return "SkAnnotationKey_Define_Named_Dest";
+};
+
+const char* SkAnnotationKeys::Link_Named_Dest_Key() {
+ return "SkAnnotationKey_Link_Named_Dest";
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkAnnotateRectWithURL(SkCanvas* canvas, const SkRect& rect, SkData* value) {
+ if (nullptr == value) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::URL_Key(), value);
+}
+
+void SkAnnotateNamedDestination(SkCanvas* canvas, const SkPoint& point, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ const SkRect rect = SkRect::MakeXYWH(point.x(), point.y(), 0, 0);
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Define_Named_Dest_Key(), name);
+}
+
+void SkAnnotateLinkToDestination(SkCanvas* canvas, const SkRect& rect, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Link_Named_Dest_Key(), name);
+}
diff --git a/gfx/skia/skia/src/core/SkAnnotationKeys.h b/gfx/skia/skia/src/core/SkAnnotationKeys.h
new file mode 100644
index 0000000000..90fdc6d30a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotationKeys.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotationKeys_DEFINED
+#define SkAnnotationKeys_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkAnnotationKeys {
+public:
+ /**
+ * Returns the canonical key whose payload is a URL
+ */
+ static const char* URL_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be defined.
+ */
+ static const char* Define_Named_Dest_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be linked to.
+ */
+ static const char* Link_Named_Dest_Key();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAntiRun.h b/gfx/skia/skia/src/core/SkAntiRun.h
new file mode 100644
index 0000000000..70ef751a96
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAntiRun.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAntiRun_DEFINED
+#define SkAntiRun_DEFINED
+
+#include "include/private/SkTo.h"
+#include "src/core/SkBlitter.h"
+
+/** Sparse array of run-length-encoded alpha (supersampling coverage) values.
+ Sparseness allows us to independently compose several paths into the
+ same SkAlphaRuns buffer.
+*/
+
+class SkAlphaRuns {
+public:
+ int16_t* fRuns;
+ uint8_t* fAlpha;
+
+ // Return 0-255 given 0-256
+ static inline SkAlpha CatchOverflow(int alpha) {
+ SkASSERT(alpha >= 0 && alpha <= 256);
+ return alpha - (alpha >> 8);
+ }
+
+ /// Returns true if the scanline contains only a single run,
+ /// of alpha value 0.
+ bool empty() const {
+ SkASSERT(fRuns[0] > 0);
+ return fAlpha[0] == 0 && fRuns[fRuns[0]] == 0;
+ }
+
+ /// Reinitialize for a new scanline.
+ void reset(int width);
+
+ /**
+ * Insert into the buffer a run starting at (x-offsetX):
+ * if startAlpha > 0
+ * one pixel with value += startAlpha,
+ * max 255
+ * if middleCount > 0
+ * middleCount pixels with value += maxValue
+ * if stopAlpha > 0
+ * one pixel with value += stopAlpha
+ * Returns the offsetX value that should be passed on the next call,
+ * assuming we're on the same scanline. If the caller is switching
+ * scanlines, then offsetX should be 0 when this is called.
+ */
+ SK_ALWAYS_INLINE int add(int x, U8CPU startAlpha, int middleCount, U8CPU stopAlpha,
+ U8CPU maxValue, int offsetX) {
+ SkASSERT(middleCount >= 0);
+ SkASSERT(x >= 0 && x + (startAlpha != 0) + middleCount + (stopAlpha != 0) <= fWidth);
+
+ SkASSERT(fRuns[offsetX] >= 0);
+
+ int16_t* runs = fRuns + offsetX;
+ uint8_t* alpha = fAlpha + offsetX;
+ uint8_t* lastAlpha = alpha;
+ x -= offsetX;
+
+ if (startAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract (crud).
+ */
+ unsigned tmp = alpha[x] + startAlpha;
+ SkASSERT(tmp <= 256);
+ alpha[x] = SkToU8(tmp - (tmp >> 8)); // was (tmp >> 7), but that seems wrong if we're trying to catch 256
+
+ runs += x + 1;
+ alpha += x + 1;
+ x = 0;
+ SkDEBUGCODE(this->validate();)
+ }
+
+ if (middleCount) {
+ SkAlphaRuns::Break(runs, alpha, x, middleCount);
+ alpha += x;
+ runs += x;
+ x = 0;
+ do {
+ alpha[0] = SkToU8(CatchOverflow(alpha[0] + maxValue));
+ int n = runs[0];
+ SkASSERT(n <= middleCount);
+ alpha += n;
+ runs += n;
+ middleCount -= n;
+ } while (middleCount > 0);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ if (stopAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ alpha += x;
+ alpha[0] = SkToU8(alpha[0] + stopAlpha);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ return SkToS32(lastAlpha - fAlpha); // new offsetX
+ }
+
+ SkDEBUGCODE(void assertValid(int y, int maxStep) const;)
+ SkDEBUGCODE(void dump() const;)
+
+ /**
+ * Break the runs in the buffer at offsets x and x+count, properly
+ * updating the runs to the right and left.
+ * i.e. from the state AAAABBBB, run-length encoded as A4B4,
+ * Break(..., 2, 5) would produce AAAABBBB rle as A2A2B3B1.
+ * Allows add() to sum another run to some of the new sub-runs.
+ * i.e. adding ..CCCCC. would produce AADDEEEB, rle as A2D2E3B1.
+ */
+ static void Break(int16_t runs[], uint8_t alpha[], int x, int count) {
+ SkASSERT(count > 0 && x >= 0);
+
+ // SkAlphaRuns::BreakAt(runs, alpha, x);
+ // SkAlphaRuns::BreakAt(&runs[x], &alpha[x], count);
+
+ int16_t* next_runs = runs + x;
+ uint8_t* next_alpha = alpha + x;
+
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+
+ runs = next_runs;
+ alpha = next_alpha;
+ x = count;
+
+ for (;;) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ x -= n;
+ if (x <= 0) {
+ break;
+ }
+ runs += n;
+ alpha += n;
+ }
+ }
+
+ /**
+ * Cut (at offset x in the buffer) a run into two shorter runs with
+ * matching alpha values.
+ * Used by the RectClipBlitter to trim a RLE encoding to match the
+ * clipping rectangle.
+ */
+ static void BreakAt(int16_t runs[], uint8_t alpha[], int x) {
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+ }
+
+private:
+ SkDEBUGCODE(int fWidth;)
+ SkDEBUGCODE(void validate() const;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkArenaAlloc.cpp b/gfx/skia/skia/src/core/SkArenaAlloc.cpp
new file mode 100644
index 0000000000..fc67c2d58e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkArenaAlloc.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkArenaAlloc.h"
+#include <algorithm>
+#include <new>
+
+static char* end_chain(char*) { return nullptr; }
+
+static uint32_t first_allocated_block(uint32_t blockSize, uint32_t firstHeapAllocation) {
+ return firstHeapAllocation > 0 ? firstHeapAllocation :
+ blockSize > 0 ? blockSize : 1024;
+}
+
+SkArenaAlloc::SkArenaAlloc(char* block, size_t size, size_t firstHeapAllocation)
+ : fDtorCursor {block}
+ , fCursor {block}
+ , fEnd {block + ToU32(size)}
+ , fFirstBlock {block}
+ , fFirstSize {ToU32(size)}
+ , fFirstHeapAllocationSize {first_allocated_block(ToU32(size), ToU32(firstHeapAllocation))}
+{
+ if (size < sizeof(Footer)) {
+ fEnd = fCursor = fDtorCursor = nullptr;
+ }
+
+ if (fCursor != nullptr) {
+ this->installFooter(end_chain, 0);
+ }
+}
+
+SkArenaAlloc::~SkArenaAlloc() {
+ RunDtorsOnBlock(fDtorCursor);
+}
+
+void SkArenaAlloc::reset() {
+ this->~SkArenaAlloc();
+ new (this) SkArenaAlloc{fFirstBlock, fFirstSize, fFirstHeapAllocationSize};
+}
+
+void SkArenaAlloc::installFooter(FooterAction* action, uint32_t padding) {
+ assert(padding < 64);
+ int64_t actionInt = (int64_t)(intptr_t)action;
+
+ // The top 14 bits should be either all 0s or all 1s. Check this.
+ assert((actionInt << 6) >> 6 == actionInt);
+ Footer encodedFooter = (actionInt << 6) | padding;
+ memmove(fCursor, &encodedFooter, sizeof(Footer));
+ fCursor += sizeof(Footer);
+ fDtorCursor = fCursor;
+}
+
+void SkArenaAlloc::installPtrFooter(FooterAction* action, char* ptr, uint32_t padding) {
+ memmove(fCursor, &ptr, sizeof(char*));
+ fCursor += sizeof(char*);
+ this->installFooter(action, padding);
+}
+
+char* SkArenaAlloc::SkipPod(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(int32_t));
+ int32_t skip;
+ memmove(&skip, objEnd, sizeof(int32_t));
+ return objEnd - skip;
+}
+
+void SkArenaAlloc::RunDtorsOnBlock(char* footerEnd) {
+ while (footerEnd != nullptr) {
+ Footer footer;
+ memcpy(&footer, footerEnd - sizeof(Footer), sizeof(Footer));
+
+ FooterAction* action = (FooterAction*)(footer >> 6);
+ ptrdiff_t padding = footer & 63;
+
+ footerEnd = action(footerEnd) - padding;
+ }
+}
+
+char* SkArenaAlloc::NextBlock(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(char*));
+ char* next;
+ memmove(&next, objEnd, sizeof(char*));
+ RunDtorsOnBlock(next);
+ delete [] objEnd;
+ return nullptr;
+}
+
+void SkArenaAlloc::installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding) {
+ memmove(fCursor, &value, sizeof(uint32_t));
+ fCursor += sizeof(uint32_t);
+ this->installFooter(action, padding);
+}
+
+void SkArenaAlloc::ensureSpace(uint32_t size, uint32_t alignment) {
+ constexpr uint32_t headerSize = sizeof(Footer) + sizeof(ptrdiff_t);
+ // The chrome c++ library we use does not define std::max_align_t.
+ // This must be conservative to add the right amount of extra memory to handle the alignment
+ // padding.
+ constexpr uint32_t alignof_max_align_t = 8;
+ constexpr uint32_t maxSize = std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t overhead = headerSize + sizeof(Footer);
+ AssertRelease(size <= maxSize - overhead);
+ uint32_t objSizeAndOverhead = size + overhead;
+ if (alignment > alignof_max_align_t) {
+ uint32_t alignmentOverhead = alignment - 1;
+ AssertRelease(objSizeAndOverhead <= maxSize - alignmentOverhead);
+ objSizeAndOverhead += alignmentOverhead;
+ }
+
+ uint32_t minAllocationSize;
+ if (fFirstHeapAllocationSize <= maxSize / fFib0) {
+ minAllocationSize = fFirstHeapAllocationSize * fFib0;
+ fFib0 += fFib1;
+ std::swap(fFib0, fFib1);
+ } else {
+ minAllocationSize = maxSize;
+ }
+ uint32_t allocationSize = std::max(objSizeAndOverhead, minAllocationSize);
+
+ // Round up to a nice size. If > 32K align to 4K boundary else up to max_align_t. The > 32K
+ // heuristic is from the JEMalloc behavior.
+ {
+ uint32_t mask = allocationSize > (1 << 15) ? (1 << 12) - 1 : 16 - 1;
+ AssertRelease(allocationSize <= maxSize - mask);
+ allocationSize = (allocationSize + mask) & ~mask;
+ }
+
+ char* newBlock = new char[allocationSize];
+
+ auto previousDtor = fDtorCursor;
+ fCursor = newBlock;
+ fDtorCursor = newBlock;
+ fEnd = fCursor + allocationSize;
+ this->installPtrFooter(NextBlock, previousDtor, 0);
+}
+
+char* SkArenaAlloc::allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+
+restart:
+ uint32_t skipOverhead = 0;
+ bool needsSkipFooter = fCursor != fDtorCursor;
+ if (needsSkipFooter) {
+ skipOverhead = sizeof(Footer) + sizeof(uint32_t);
+ }
+ char* objStart = (char*)((uintptr_t)(fCursor + skipOverhead + mask) & ~mask);
+ uint32_t totalSize = sizeIncludingFooter + skipOverhead;
+
+ if ((ptrdiff_t)totalSize > fEnd - objStart) {
+ this->ensureSpace(totalSize, alignment);
+ goto restart;
+ }
+
+ AssertRelease((ptrdiff_t)totalSize <= fEnd - objStart);
+
+ // Install a skip footer if needed, thus terminating a run of POD data. The calling code is
+ // responsible for installing the footer after the object.
+ if (needsSkipFooter) {
+ this->installUint32Footer(SkipPod, ToU32(fCursor - fDtorCursor), 0);
+ }
+
+ return objStart;
+}
diff --git a/gfx/skia/skia/src/core/SkArenaAlloc.h b/gfx/skia/skia/src/core/SkArenaAlloc.h
new file mode 100644
index 0000000000..bb8d05f98e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkArenaAlloc.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArenaAlloc_DEFINED
+#define SkArenaAlloc_DEFINED
+
+#include "include/private/SkTFitsIn.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
+// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
+// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
+// starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead)
+// fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in
+// firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for
+// firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used.
+//
+// Examples:
+//
+// char block[mostCasesSize];
+// SkArenaAlloc arena(block, mostCasesSize);
+//
+// If mostCasesSize is too large for the stack, you can use the following pattern.
+//
+// std::unique_ptr<char[]> block{new char[mostCasesSize]};
+// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
+//
+// If the program only sometimes allocates memory, use the following pattern.
+//
+// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
+//
+// The storage does not necessarily need to be on the stack. Embedding the storage in a class also
+// works.
+//
+// class Foo {
+// char storage[mostCasesSize];
+// SkArenaAlloc arena (storage, mostCasesSize);
+// };
+//
+// In addition, the system is optimized to handle POD data including arrays of PODs (where
+// POD is really data with no destructors). For POD data it has zero overhead per item, and a
+// typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4
+// bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There
+// is an addition overhead when switching from POD data to non-POD data of typically 8 bytes.
+//
+// If additional blocks are needed they are increased exponentially. This strategy bounds the
+// recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
+// the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
+// there are 71 allocations.
+class SkArenaAlloc {
+public:
+ SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation);
+
+ explicit SkArenaAlloc(size_t firstHeapAllocation)
+ : SkArenaAlloc(nullptr, 0, firstHeapAllocation)
+ {}
+
+ ~SkArenaAlloc();
+
+ template <typename T, typename... Args>
+ T* make(Args&&... args) {
+ uint32_t size = ToU32(sizeof(T));
+ uint32_t alignment = ToU32(alignof(T));
+ char* objStart;
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(size, alignment);
+ fCursor = objStart + size;
+ } else {
+ objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = ToU32(objStart - fCursor);
+
+ // Advance to end of object to install footer.
+ fCursor = objStart + size;
+ FooterAction* releaser = [](char* objEnd) {
+ char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
+ ((T*)objStart)->~T();
+ return objStart;
+ };
+ this->installFooter(releaser, padding);
+ }
+
+ // This must be last to make objects with nested use of this allocator work.
+ return new(objStart) T(std::forward<Args>(args)...);
+ }
+
+ template <typename T>
+ T* makeArrayDefault(size_t count) {
+ AssertRelease(SkTFitsIn<uint32_t>(count));
+ uint32_t safeCount = ToU32(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then no initialization takes place.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T;
+ }
+ return array;
+ }
+
+ template <typename T>
+ T* makeArray(size_t count) {
+ AssertRelease(SkTFitsIn<uint32_t>(count));
+ uint32_t safeCount = ToU32(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then the memory is initialized. For example, an array of chars will
+ // be zeroed.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T();
+ }
+ return array;
+ }
+
+ // Only use makeBytesAlignedTo if none of the typed variants are impractical to use.
+ void* makeBytesAlignedTo(size_t size, size_t align) {
+ AssertRelease(SkTFitsIn<uint32_t>(size));
+ auto objStart = this->allocObject(ToU32(size), ToU32(align));
+ fCursor = objStart + size;
+ return objStart;
+ }
+
+ // Destroy all allocated objects, free any heap allocations.
+ void reset();
+
+private:
+ static void AssertRelease(bool cond) { if (!cond) { ::abort(); } }
+ static uint32_t ToU32(size_t v) {
+ assert(SkTFitsIn<uint32_t>(v));
+ return (uint32_t)v;
+ }
+
+ using Footer = int64_t;
+ using FooterAction = char* (char*);
+
+ static char* SkipPod(char* footerEnd);
+ static void RunDtorsOnBlock(char* footerEnd);
+ static char* NextBlock(char* footerEnd);
+
+ void installFooter(FooterAction* releaser, uint32_t padding);
+ void installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding);
+ void installPtrFooter(FooterAction* action, char* ptr, uint32_t padding);
+
+ void ensureSpace(uint32_t size, uint32_t alignment);
+
+ char* allocObject(uint32_t size, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+ uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ uintptr_t totalSize = size + alignedOffset;
+ AssertRelease(totalSize >= size);
+ if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
+ this->ensureSpace(size, alignment);
+ alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ }
+ return fCursor + alignedOffset;
+ }
+
+ char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
+
+ template <typename T>
+ char* commonArrayAlloc(uint32_t count) {
+ char* objStart;
+ AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
+ uint32_t arraySize = ToU32(count * sizeof(T));
+ uint32_t alignment = ToU32(alignof(T));
+
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(arraySize, alignment);
+ fCursor = objStart + arraySize;
+ } else {
+ constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
+ AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
+ uint32_t totalSize = arraySize + overhead;
+ objStart = this->allocObjectWithFooter(totalSize, alignment);
+
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = ToU32(objStart - fCursor);
+
+ // Advance to end of array to install footer.?
+ fCursor = objStart + arraySize;
+ this->installUint32Footer(
+ [](char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
+ uint32_t count;
+ memmove(&count, objEnd, sizeof(uint32_t));
+ char* objStart = objEnd - count * sizeof(T);
+ T* array = (T*) objStart;
+ for (uint32_t i = 0; i < count; i++) {
+ array[i].~T();
+ }
+ return objStart;
+ },
+ ToU32(count),
+ padding);
+ }
+
+ return objStart;
+ }
+
+ char* fDtorCursor;
+ char* fCursor;
+ char* fEnd;
+ char* const fFirstBlock;
+ const uint32_t fFirstSize;
+ const uint32_t fFirstHeapAllocationSize;
+
+ // Use the Fibonacci sequence as the growth factor for block size. The size of the block
+ // allocated is fFib0 * fFirstHeapAllocationSize. Using 2 ^ n * fFirstHeapAllocationSize
+ // had too much slop for Android.
+ uint32_t fFib0 {1}, fFib1 {1};
+};
+
+// Helper for defining allocators with inline/reserved storage.
+// For argument declarations, stick to the base type (SkArenaAlloc).
+template <size_t InlineStorageSize>
+class SkSTArenaAlloc : public SkArenaAlloc {
+public:
+ explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize)
+ : INHERITED(fInlineStorage, InlineStorageSize, firstHeapAllocation) {}
+
+private:
+ char fInlineStorage[InlineStorageSize];
+
+ using INHERITED = SkArenaAlloc;
+};
+
+#endif // SkArenaAlloc_DEFINED
diff --git a/gfx/skia/skia/src/core/SkArenaAllocList.h b/gfx/skia/skia/src/core/SkArenaAllocList.h
new file mode 100644
index 0000000000..c544b863d0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkArenaAllocList.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArenaAllocList_DEFINED
+#define SkArenaAllocList_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkArenaAlloc.h"
+
+/**
+ * A singly linked list of Ts stored in a SkArenaAlloc. The arena rather than the list owns
+ * the elements. This supports forward iteration and range based for loops.
+ */
+template <typename T>
+class SkArenaAllocList {
+private:
+ struct Node;
+
+public:
+ SkArenaAllocList() = default;
+
+ void reset() { fHead = fTail = nullptr; }
+
+ template <typename... Args>
+ inline T& append(SkArenaAlloc* arena, Args... args);
+
+ class Iter {
+ public:
+ Iter() = default;
+ inline Iter& operator++();
+ T& operator*() const { return fCurr->fT; }
+ T* operator->() const { return &fCurr->fT; }
+ bool operator==(const Iter& that) const { return fCurr == that.fCurr; }
+ bool operator!=(const Iter& that) const { return !(*this == that); }
+
+ private:
+ friend class SkArenaAllocList;
+ explicit Iter(Node* node) : fCurr(node) {}
+ Node* fCurr = nullptr;
+ };
+
+ Iter begin() { return Iter(fHead); }
+ Iter end() { return Iter(); }
+ Iter tail() { return Iter(fTail); }
+
+private:
+ struct Node {
+ template <typename... Args>
+ Node(Args... args) : fT(std::forward<Args>(args)...) {}
+ T fT;
+ Node* fNext = nullptr;
+ };
+ Node* fHead = nullptr;
+ Node* fTail = nullptr;
+};
+
+template <typename T>
+template <typename... Args>
+T& SkArenaAllocList<T>::append(SkArenaAlloc* arena, Args... args) {
+ SkASSERT(!fHead == !fTail);
+ auto* n = arena->make<Node>(std::forward<Args>(args)...);
+ if (!fTail) {
+ fHead = fTail = n;
+ } else {
+ fTail = fTail->fNext = n;
+ }
+ return fTail->fT;
+}
+
+template <typename T>
+typename SkArenaAllocList<T>::Iter& SkArenaAllocList<T>::Iter::operator++() {
+ fCurr = fCurr->fNext;
+ return *this;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoBlitterChoose.h b/gfx/skia/skia/src/core/SkAutoBlitterChoose.h
new file mode 100644
index 0000000000..9098c7b2bd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoBlitterChoose.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoBlitterChoose_DEFINED
+#define SkAutoBlitterChoose_DEFINED
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDraw.h"
+
+class SkMatrix;
+class SkPaint;
+class SkPixmap;
+
+class SkAutoBlitterChoose : SkNoncopyable {
+public:
+ SkAutoBlitterChoose() {}
+ SkAutoBlitterChoose(const SkDraw& draw, const SkMatrix* matrix, const SkPaint& paint,
+ bool drawCoverage = false) {
+ this->choose(draw, matrix, paint, drawCoverage);
+ }
+
+ SkBlitter* operator->() { return fBlitter; }
+ SkBlitter* get() const { return fBlitter; }
+
+ SkBlitter* choose(const SkDraw& draw, const SkMatrix* matrix, const SkPaint& paint,
+ bool drawCoverage = false) {
+ SkASSERT(!fBlitter);
+ if (!matrix) {
+ matrix = draw.fMatrix;
+ }
+ fBlitter = SkBlitter::Choose(draw.fDst, *matrix, paint, &fAlloc, drawCoverage);
+
+ if (draw.fCoverage) {
+ // hmm, why can't choose ignore the paint if drawCoverage is true?
+ SkBlitter* coverageBlitter = SkBlitter::Choose(*draw.fCoverage, *matrix, SkPaint(),
+ &fAlloc, true);
+ fBlitter = fAlloc.make<SkPairBlitter>(fBlitter, coverageBlitter);
+ }
+ return fBlitter;
+ }
+
+private:
+ // Owned by fAlloc, which will handle the delete.
+ SkBlitter* fBlitter = nullptr;
+
+ SkSTArenaAlloc<kSkBlitterContextSize> fAlloc;
+};
+#define SkAutoBlitterChoose(...) SK_REQUIRE_LOCAL_VAR(SkAutoBlitterChoose)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoMalloc.h b/gfx/skia/skia/src/core/SkAutoMalloc.h
new file mode 100644
index 0000000000..4be8ddb03e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoMalloc.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoMalloc_DEFINED
+#define SkAutoMalloc_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkNoncopyable.h"
+
+#include <memory>
+
+/**
+ * Manage an allocated block of heap memory. This object is the sole manager of
+ * the lifetime of the block, so the caller must not call sk_free() or delete
+ * on the block, unless release() was called.
+ */
+class SkAutoMalloc : SkNoncopyable {
+public:
+ explicit SkAutoMalloc(size_t size = 0)
+ : fPtr(size ? sk_malloc_throw(size) : nullptr), fSize(size) {}
+
+ /**
+ * Passed to reset to specify what happens if the requested size is smaller
+ * than the current size (and the current block was dynamically allocated).
+ */
+ enum OnShrink {
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, free the old block and
+ * malloc a new block of the smaller size.
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size = 0, OnShrink shrink = kAlloc_OnShrink) {
+ if (size != fSize && (size > fSize || kReuse_OnShrink != shrink)) {
+ fPtr.reset(size ? sk_malloc_throw(size) : nullptr);
+ fSize = size;
+ }
+ return fPtr.get();
+ }
+
+ /**
+ * Return the allocated block.
+ */
+ void* get() { return fPtr.get(); }
+ const void* get() const { return fPtr.get(); }
+
+ /** Transfer ownership of the current ptr to the caller, setting the
+ internal reference to null. Note the caller is reponsible for calling
+ sk_free on the returned address.
+ */
+ void* release() {
+ fSize = 0;
+ return fPtr.release();
+ }
+
+private:
+ struct WrapFree {
+ void operator()(void* p) { sk_free(p); }
+ };
+ std::unique_ptr<void, WrapFree> fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+};
+#define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc)
+
+/**
+ * Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly
+ * more), then the allocation will come from the stack rather than the heap. This object is the
+ * sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on
+ * the block.
+ */
+template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable {
+public:
+ /**
+ * Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation.
+ * Must call reset(size) to return an allocated block.
+ */
+ SkAutoSMalloc() {
+ fPtr = fStorage;
+ fSize = kSize;
+ }
+
+ /**
+ * Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then
+ * the allocation will come from the stack, otherwise it will be dynamically allocated.
+ */
+ explicit SkAutoSMalloc(size_t size) {
+ fPtr = fStorage;
+ fSize = kSize;
+ this->reset(size);
+ }
+
+ /**
+ * Free the allocated block (if any). If the block was small enough to have been allocated on
+ * the stack, then this does nothing.
+ */
+ ~SkAutoSMalloc() {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ /**
+ * Return the allocated block. May return non-null even if the block is of zero size. Since
+ * this may be on the stack or dynamically allocated, the caller must not call sk_free() on it,
+ * but must rely on SkAutoSMalloc to manage it.
+ */
+ void* get() const { return fPtr; }
+
+ /**
+ * Return a new block of the requested size, freeing (as necessary) any previously allocated
+ * block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return
+ * block may be allocated locally, rather than from the heap.
+ */
+ void* reset(size_t size,
+ SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
+ bool* didChangeAlloc = nullptr) {
+ size = (size < kSize) ? kSize : size;
+ bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
+ if (didChangeAlloc) {
+ *didChangeAlloc = alloc;
+ }
+ if (alloc) {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+
+ if (size == kSize) {
+ SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
+ fPtr = fStorage;
+ } else {
+ fPtr = sk_malloc_throw(size);
+ }
+
+ fSize = size;
+ }
+ SkASSERT(fSize >= size && fSize >= kSize);
+ SkASSERT((fPtr == fStorage) || fSize > kSize);
+ return fPtr;
+ }
+
+private:
+ // Align up to 32 bits.
+ static const size_t kSizeAlign4 = SkAlign4(kSizeRequested);
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4;
+#else
+ static const size_t kSize = kSizeAlign4;
+#endif
+
+ void* fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+ uint32_t fStorage[kSize >> 2];
+};
+// Can't guard the constructor because it's a template class.
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
new file mode 100644
index 0000000000..0a63385e55
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "src/core/SkAutoPixmapStorage.h"
+
+SkAutoPixmapStorage::SkAutoPixmapStorage() : fStorage(nullptr) {}
+
+SkAutoPixmapStorage::~SkAutoPixmapStorage() {
+ this->freeStorage();
+}
+
+SkAutoPixmapStorage& SkAutoPixmapStorage::operator=(SkAutoPixmapStorage&& other) {
+ this->fStorage = other.fStorage;
+ this->INHERITED::reset(other.info(), this->fStorage, other.rowBytes());
+
+ other.fStorage = nullptr;
+ other.INHERITED::reset();
+
+ return *this;
+}
+
+size_t SkAutoPixmapStorage::AllocSize(const SkImageInfo& info, size_t* rowBytes) {
+ size_t rb = info.minRowBytes();
+ if (rowBytes) {
+ *rowBytes = rb;
+ }
+ return info.computeByteSize(rb);
+}
+
+bool SkAutoPixmapStorage::tryAlloc(const SkImageInfo& info) {
+ this->freeStorage();
+
+ size_t rb;
+ size_t size = AllocSize(info, &rb);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return false;
+ }
+ void* pixels = sk_malloc_canfail(size);
+ if (nullptr == pixels) {
+ return false;
+ }
+ this->reset(info, pixels, rb);
+ fStorage = pixels;
+ return true;
+}
+
+void SkAutoPixmapStorage::alloc(const SkImageInfo& info) {
+ SkASSERT_RELEASE(this->tryAlloc(info));
+}
+
+sk_sp<SkData> SkAutoPixmapStorage::detachPixelsAsData() {
+ if (!fStorage) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeFromMalloc(fStorage, this->computeByteSize());
+ fStorage = nullptr;
+ this->INHERITED::reset();
+
+ return data;
+}
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.h b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
new file mode 100644
index 0000000000..494e0a93f9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoPixmapStorage_DEFINED
+#define SkAutoPixmapStorage_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/private/SkMalloc.h"
+
+class SkAutoPixmapStorage : public SkPixmap {
+public:
+ SkAutoPixmapStorage();
+ ~SkAutoPixmapStorage();
+
+ /**
+ * Leave the moved-from object in a free-but-valid state.
+ */
+ SkAutoPixmapStorage& operator=(SkAutoPixmapStorage&& other);
+
+ /**
+ * Try to allocate memory for the pixels needed to match the specified Info. On success
+ * return true and fill out the pixmap to point to that memory. The storage will be freed
+ * when this object is destroyed, or if another call to tryAlloc() or alloc() is made.
+ *
+ * On failure, return false and reset() the pixmap to empty.
+ */
+ bool tryAlloc(const SkImageInfo&);
+
+ /**
+ * Allocate memory for the pixels needed to match the specified Info and fill out the pixmap
+ * to point to that memory. The storage will be freed when this object is destroyed,
+ * or if another call to tryAlloc() or alloc() is made.
+ *
+ * If the memory cannot be allocated, calls SK_ABORT().
+ */
+ void alloc(const SkImageInfo&);
+
+ /**
+ * Gets the size and optionally the rowBytes that would be allocated by SkAutoPixmapStorage if
+ * alloc/tryAlloc was called.
+ */
+ static size_t AllocSize(const SkImageInfo& info, size_t* rowBytes);
+
+ /**
+ * Returns an SkData object wrapping the allocated pixels memory, and resets the pixmap.
+ * If the storage hasn't been allocated, the result is NULL.
+ */
+ sk_sp<SkData> SK_WARN_UNUSED_RESULT detachPixelsAsData();
+
+ // We wrap these so we can clear our internal storage
+
+ void reset() {
+ this->freeStorage();
+ this->INHERITED::reset();
+ }
+ void reset(const SkImageInfo& info, const void* addr, size_t rb) {
+ this->freeStorage();
+ this->INHERITED::reset(info, addr, rb);
+ }
+
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask) {
+ this->freeStorage();
+ return this->INHERITED::reset(mask);
+ }
+
+private:
+ void* fStorage;
+
+ void freeStorage() {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ }
+
+ typedef SkPixmap INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBBHFactory.cpp b/gfx/skia/skia/src/core/SkBBHFactory.cpp
new file mode 100644
index 0000000000..b4b091852a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBBHFactory.cpp
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkRTree.h"
+
+SkBBoxHierarchy* SkRTreeFactory::operator()() const {
+ return new SkRTree;
+}
diff --git a/gfx/skia/skia/src/core/SkBBoxHierarchy.h b/gfx/skia/skia/src/core/SkBBoxHierarchy.h
new file mode 100644
index 0000000000..741f9d77b2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBBoxHierarchy.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBBoxHierarchy_DEFINED
+#define SkBBoxHierarchy_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTDArray.h"
+
+/**
+ * Interface for a spatial data structure that stores axis-aligned bounding
+ * boxes and allows efficient retrieval of intersections with query rectangles.
+ */
+class SkBBoxHierarchy : public SkRefCnt {
+public:
+ SkBBoxHierarchy() {}
+ virtual ~SkBBoxHierarchy() {}
+
+ /**
+ * Insert N bounding boxes into the hierarchy.
+ */
+ virtual void insert(const SkRect[], int N) = 0;
+
+ /**
+ * Populate results with the indices of bounding boxes interesecting that query.
+ */
+ virtual void search(const SkRect& query, SkTDArray<int>* results) const = 0;
+
+ virtual size_t bytesUsed() const = 0;
+
+ // Get the root bound.
+ virtual SkRect getRootBound() const = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBigPicture.cpp b/gfx/skia/skia/src/core/SkBigPicture.cpp
new file mode 100644
index 0000000000..3ade96bf6a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBBoxHierarchy.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkPictureCommon.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkTraceEvent.h"
+
+SkBigPicture::SkBigPicture(const SkRect& cull,
+ SkRecord* record,
+ SnapshotArray* drawablePicts,
+ SkBBoxHierarchy* bbh,
+ size_t approxBytesUsedBySubPictures)
+ : fCullRect(cull)
+ , fApproxBytesUsedBySubPictures(approxBytesUsedBySubPictures)
+ , fRecord(record) // Take ownership of caller's ref.
+ , fDrawablePicts(drawablePicts) // Take ownership.
+ , fBBH(bbh) // Take ownership of caller's ref.
+{}
+
+void SkBigPicture::playback(SkCanvas* canvas, AbortCallback* callback) const {
+ SkASSERT(canvas);
+
+ // If the query contains the whole picture, don't bother with the BBH.
+ const bool useBBH = !canvas->getLocalClipBounds().contains(this->cullRect());
+
+ SkRecordDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ nullptr,
+ this->drawableCount(),
+ useBBH ? fBBH.get() : nullptr,
+ callback);
+}
+
+void SkBigPicture::partialPlayback(SkCanvas* canvas,
+ int start,
+ int stop,
+ const SkMatrix& initialCTM) const {
+ SkASSERT(canvas);
+ SkRecordPartialDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ this->drawableCount(),
+ start,
+ stop,
+ initialCTM);
+}
+
+SkRect SkBigPicture::cullRect() const { return fCullRect; }
+int SkBigPicture::approximateOpCount() const { return fRecord->count(); }
+size_t SkBigPicture::approximateBytesUsed() const {
+ size_t bytes = sizeof(*this) + fRecord->bytesUsed() + fApproxBytesUsedBySubPictures;
+ if (fBBH) { bytes += fBBH->bytesUsed(); }
+ return bytes;
+}
+
+int SkBigPicture::drawableCount() const {
+ return fDrawablePicts ? fDrawablePicts->count() : 0;
+}
+
+SkPicture const* const* SkBigPicture::drawablePicts() const {
+ return fDrawablePicts ? fDrawablePicts->begin() : nullptr;
+}
+
diff --git a/gfx/skia/skia/src/core/SkBigPicture.h b/gfx/skia/skia/src/core/SkBigPicture.h
new file mode 100644
index 0000000000..a4d71aa463
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBigPicture_DEFINED
+#define SkBigPicture_DEFINED
+
+#include "include/core/SkPicture.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTemplates.h"
+
+class SkBBoxHierarchy;
+class SkMatrix;
+class SkRecord;
+
+// An implementation of SkPicture supporting an arbitrary number of drawing commands.
+class SkBigPicture final : public SkPicture {
+public:
+ // An array of refcounted const SkPicture pointers.
+ class SnapshotArray : ::SkNoncopyable {
+ public:
+ SnapshotArray(const SkPicture* pics[], int count) : fPics(pics), fCount(count) {}
+ ~SnapshotArray() { for (int i = 0; i < fCount; i++) { fPics[i]->unref(); } }
+
+ const SkPicture* const* begin() const { return fPics; }
+ int count() const { return fCount; }
+ private:
+ SkAutoTMalloc<const SkPicture*> fPics;
+ int fCount;
+ };
+
+ SkBigPicture(const SkRect& cull,
+ SkRecord*, // We take ownership of the caller's ref.
+ SnapshotArray*, // We take exclusive ownership.
+ SkBBoxHierarchy*, // We take ownership of the caller's ref.
+ size_t approxBytesUsedBySubPictures);
+
+
+// SkPicture overrides
+ void playback(SkCanvas*, AbortCallback*) const override;
+ SkRect cullRect() const override;
+ int approximateOpCount() const override;
+ size_t approximateBytesUsed() const override;
+ const SkBigPicture* asSkBigPicture() const override { return this; }
+
+// Used by GrLayerHoister
+ void partialPlayback(SkCanvas*,
+ int start,
+ int stop,
+ const SkMatrix& initialCTM) const;
+// Used by GrRecordReplaceDraw
+ const SkBBoxHierarchy* bbh() const { return fBBH.get(); }
+ const SkRecord* record() const { return fRecord.get(); }
+
+private:
+ int drawableCount() const;
+ SkPicture const* const* drawablePicts() const;
+
+ const SkRect fCullRect;
+ const size_t fApproxBytesUsedBySubPictures;
+ sk_sp<const SkRecord> fRecord;
+ std::unique_ptr<const SnapshotArray> fDrawablePicts;
+ sk_sp<const SkBBoxHierarchy> fBBH;
+};
+
+#endif//SkBigPicture_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBitmap.cpp b/gfx/skia/skia/src/core/SkBitmap.cpp
new file mode 100644
index 0000000000..f25991de25
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmap.cpp
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkMallocPixelRef.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPixelRefPriv.h"
+#include "src/core/SkPixmapPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkWritePixelsRec.h"
+
+#include <cstring>
+#include <utility>
+
+static bool reset_return_false(SkBitmap* bm) {
+ bm->reset();
+ return false;
+}
+
+SkBitmap::SkBitmap() : fFlags(0) {}
+
+SkBitmap::SkBitmap(const SkBitmap& src)
+ : fPixelRef (src.fPixelRef)
+ , fPixmap (src.fPixmap)
+ , fFlags (src.fFlags)
+{
+ SkDEBUGCODE(src.validate();)
+ SkDEBUGCODE(this->validate();)
+}
+
+SkBitmap::SkBitmap(SkBitmap&& other)
+ : fPixelRef (std::move(other.fPixelRef))
+ , fPixmap (std::move(other.fPixmap))
+ , fFlags (other.fFlags)
+{
+ SkASSERT(!other.fPixelRef);
+ other.fPixmap.reset();
+ other.fFlags = 0;
+}
+
+SkBitmap::~SkBitmap() {}
+
+SkBitmap& SkBitmap::operator=(const SkBitmap& src) {
+ if (this != &src) {
+ fPixelRef = src.fPixelRef;
+ fPixmap = src.fPixmap;
+ fFlags = src.fFlags;
+ }
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+SkBitmap& SkBitmap::operator=(SkBitmap&& other) {
+ if (this != &other) {
+ fPixelRef = std::move(other.fPixelRef);
+ fPixmap = std::move(other.fPixmap);
+ fFlags = other.fFlags;
+ SkASSERT(!other.fPixelRef);
+ other.fPixmap.reset();
+ other.fFlags = 0;
+ }
+ return *this;
+}
+
+void SkBitmap::swap(SkBitmap& other) {
+ using std::swap;
+ swap(*this, other);
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::reset() {
+ fPixelRef = nullptr; // Free pixels.
+ fPixmap.reset();
+ fFlags = 0;
+}
+
+void SkBitmap::getBounds(SkRect* bounds) const {
+ SkASSERT(bounds);
+ *bounds = SkRect::Make(this->dimensions());
+}
+
+void SkBitmap::getBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ *bounds = fPixmap.bounds();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::setInfo(const SkImageInfo& info, size_t rowBytes) {
+ SkAlphaType newAT = info.alphaType();
+ if (!SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAT)) {
+ return reset_return_false(this);
+ }
+ // don't look at info.alphaType(), since newAT is the real value...
+
+ // require that rowBytes fit in 31bits
+ int64_t mrb = info.minRowBytes64();
+ if (!SkTFitsIn<int32_t>(mrb)) {
+ return reset_return_false(this);
+ }
+ if (!SkTFitsIn<int32_t>(rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ if (info.width() < 0 || info.height() < 0) {
+ return reset_return_false(this);
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ rowBytes = 0;
+ } else if (0 == rowBytes) {
+ rowBytes = (size_t)mrb;
+ } else if (!info.validRowBytes(rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ fPixelRef = nullptr; // Free pixels.
+ fPixmap.reset(info.makeAlphaType(newAT), nullptr, SkToU32(rowBytes));
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+
+
+bool SkBitmap::setAlphaType(SkAlphaType newAlphaType) {
+ if (!SkColorTypeValidateAlphaType(this->colorType(), newAlphaType, &newAlphaType)) {
+ return false;
+ }
+ if (this->alphaType() != newAlphaType) {
+ auto newInfo = fPixmap.info().makeAlphaType(newAlphaType);
+ fPixmap.reset(std::move(newInfo), fPixmap.addr(), fPixmap.rowBytes());
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+SkIPoint SkBitmap::pixelRefOrigin() const {
+ const char* addr = (const char*)fPixmap.addr();
+ const char* pix = (const char*)(fPixelRef ? fPixelRef->pixels() : nullptr);
+ size_t rb = this->rowBytes();
+ if (!pix || 0 == rb) {
+ return {0, 0};
+ }
+ SkASSERT(this->bytesPerPixel() > 0);
+ SkASSERT(this->bytesPerPixel() == (1 << this->shiftPerPixel()));
+ SkASSERT(addr >= pix);
+ size_t off = addr - pix;
+ return {SkToS32((off % rb) >> this->shiftPerPixel()), SkToS32(off / rb)};
+}
+
+void SkBitmap::setPixelRef(sk_sp<SkPixelRef> pr, int dx, int dy) {
+#ifdef SK_DEBUG
+ if (pr) {
+ if (kUnknown_SkColorType != this->colorType()) {
+ SkASSERT(dx >= 0 && this->width() + dx <= pr->width());
+ SkASSERT(dy >= 0 && this->height() + dy <= pr->height());
+ }
+ }
+#endif
+ fPixelRef = kUnknown_SkColorType != this->colorType() ? std::move(pr) : nullptr;
+ void* p = nullptr;
+ size_t rowBytes = this->rowBytes();
+ // ignore dx,dy if there is no pixelref
+ if (fPixelRef) {
+ rowBytes = fPixelRef->rowBytes();
+ // TODO(reed): Enforce that PixelRefs must have non-null pixels.
+ p = fPixelRef->pixels();
+ if (p) {
+ p = (char*)p + dy * rowBytes + dx * this->bytesPerPixel();
+ }
+ }
+ SkPixmapPriv::ResetPixmapKeepInfo(&fPixmap, p, rowBytes);
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::setPixels(void* p) {
+ if (kUnknown_SkColorType == this->colorType()) {
+ p = nullptr;
+ }
+ size_t rb = this->rowBytes();
+ SkPixmapPriv::ResetPixmapKeepInfo(&fPixmap, p, rb);
+ fPixelRef = p ? sk_make_sp<SkPixelRef>(this->width(), this->height(), p, rb) : nullptr;
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkBitmap::tryAllocPixels(Allocator* allocator) {
+ HeapAllocator stdalloc;
+
+ if (nullptr == allocator) {
+ allocator = &stdalloc;
+ }
+ return allocator->allocPixelRef(this);
+}
+
+bool SkBitmap::tryAllocN32Pixels(int width, int height, bool isOpaque) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ return this->tryAllocPixels(info);
+}
+
+void SkBitmap::allocN32Pixels(int width, int height, bool isOpaque) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ this->allocPixels(info);
+}
+
+void SkBitmap::allocPixels() {
+ this->allocPixels((Allocator*)nullptr);
+}
+
+void SkBitmap::allocPixels(Allocator* allocator) {
+ SkASSERT_RELEASE(this->tryAllocPixels(allocator));
+}
+
+void SkBitmap::allocPixelsFlags(const SkImageInfo& info, uint32_t flags) {
+ SkASSERT_RELEASE(this->tryAllocPixelsFlags(info, flags));
+}
+
+void SkBitmap::allocPixels(const SkImageInfo& info, size_t rowBytes) {
+ SkASSERT_RELEASE(this->tryAllocPixels(info, rowBytes));
+}
+
+void SkBitmap::allocPixels(const SkImageInfo& info) {
+ this->allocPixels(info, info.minRowBytes());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::tryAllocPixels(const SkImageInfo& requestedInfo, size_t rowBytes) {
+ if (!this->setInfo(requestedInfo, rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+ if (kUnknown_SkColorType == correctedInfo.colorType()) {
+ return true;
+ }
+ // setInfo may have computed a valid rowbytes if 0 were passed in
+ rowBytes = this->rowBytes();
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(correctedInfo, rowBytes);
+ if (!pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(std::move(pr), 0, 0);
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+bool SkBitmap::tryAllocPixelsFlags(const SkImageInfo& requestedInfo, uint32_t allocFlags) {
+ if (!this->setInfo(requestedInfo)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(correctedInfo,
+ correctedInfo.minRowBytes());
+ if (!pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(std::move(pr), 0, 0);
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+static void invoke_release_proc(void (*proc)(void* pixels, void* ctx), void* pixels, void* ctx) {
+ if (proc) {
+ proc(pixels, ctx);
+ }
+}
+
+bool SkBitmap::installPixels(const SkImageInfo& requestedInfo, void* pixels, size_t rb,
+ void (*releaseProc)(void* addr, void* context), void* context) {
+ if (!this->setInfo(requestedInfo, rb)) {
+ invoke_release_proc(releaseProc, pixels, context);
+ this->reset();
+ return false;
+ }
+ if (nullptr == pixels) {
+ invoke_release_proc(releaseProc, pixels, context);
+ return true; // we behaved as if they called setInfo()
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+ this->setPixelRef(
+ SkMakePixelRefWithProc(correctedInfo.width(), correctedInfo.height(),
+ rb, pixels, releaseProc, context), 0, 0);
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+bool SkBitmap::installPixels(const SkPixmap& pixmap) {
+ return this->installPixels(pixmap.info(), pixmap.writable_addr(), pixmap.rowBytes(),
+ nullptr, nullptr);
+}
+
+bool SkBitmap::installMaskPixels(const SkMask& mask) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->reset();
+ return false;
+ }
+ return this->installPixels(SkImageInfo::MakeA8(mask.fBounds.width(),
+ mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint32_t SkBitmap::getGenerationID() const {
+ return fPixelRef ? fPixelRef->getGenerationID() : 0;
+}
+
+void SkBitmap::notifyPixelsChanged() const {
+ SkASSERT(!this->isImmutable());
+ if (fPixelRef) {
+ fPixelRef->notifyPixelsChanged();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+ */
+bool SkBitmap::HeapAllocator::allocPixelRef(SkBitmap* dst) {
+ const SkImageInfo info = dst->info();
+ if (kUnknown_SkColorType == info.colorType()) {
+// SkDebugf("unsupported config for info %d\n", dst->config());
+ return false;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, dst->rowBytes());
+ if (!pr) {
+ return false;
+ }
+
+ dst->setPixelRef(std::move(pr), 0, 0);
+ SkDEBUGCODE(dst->validate();)
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::isImmutable() const {
+ return fPixelRef ? fPixelRef->isImmutable() : false;
+}
+
+void SkBitmap::setImmutable() {
+ if (fPixelRef) {
+ fPixelRef->setImmutable();
+ }
+}
+
+bool SkBitmap::isVolatile() const {
+ return (fFlags & kImageIsVolatile_Flag) != 0;
+}
+
+void SkBitmap::setIsVolatile(bool isVolatile) {
+ if (isVolatile) {
+ fFlags |= kImageIsVolatile_Flag;
+ } else {
+ fFlags &= ~kImageIsVolatile_Flag;
+ }
+}
+
+void* SkBitmap::getAddr(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ char* base = (char*)this->getPixels();
+ if (base) {
+ base += (y * this->rowBytes()) + (x << this->shiftPerPixel());
+ }
+ return base;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmap::erase(SkColor c, const SkIRect& area) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (kUnknown_SkColorType == this->colorType()) {
+ // TODO: can we ASSERT that we never get here?
+ return; // can't erase. Should we bzero so the memory is not uninitialized?
+ }
+
+ SkPixmap result;
+ if (!this->peekPixels(&result)) {
+ return;
+ }
+
+ if (result.erase(c, area)) {
+ this->notifyPixelsChanged();
+ }
+}
+
+void SkBitmap::eraseColor(SkColor c) const {
+ this->erase(c, SkIRect::MakeWH(this->width(), this->height()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::extractSubset(SkBitmap* result, const SkIRect& subset) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (nullptr == result || !fPixelRef) {
+ return false; // no src pixels
+ }
+
+ SkIRect srcRect, r;
+ srcRect.setWH(this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ SkBitmap dst;
+ dst.setInfo(this->info().makeDimensions(r.size()), this->rowBytes());
+ dst.setIsVolatile(this->isVolatile());
+
+ if (fPixelRef) {
+ SkIPoint origin = this->pixelRefOrigin();
+ // share the pixelref with a custom offset
+ dst.setPixelRef(fPixelRef, origin.x() + r.fLeft, origin.y() + r.fTop);
+ }
+ SkDEBUGCODE(dst.validate();)
+
+ // we know we're good, so commit to result
+ result->swap(dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::readPixels(const SkImageInfo& requestedDstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ SkPixmap src;
+ if (!this->peekPixels(&src)) {
+ return false;
+ }
+ return src.readPixels(requestedDstInfo, dstPixels, dstRB, x, y);
+}
+
+bool SkBitmap::readPixels(const SkPixmap& dst, int srcX, int srcY) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY);
+}
+
+bool SkBitmap::writePixels(const SkPixmap& src, int dstX, int dstY) {
+ if (!SkImageInfoValidConversion(this->info(), src.info())) {
+ return false;
+ }
+
+ SkWritePixelsRec rec(src.info(), src.addr(), src.rowBytes(), dstX, dstY);
+ if (!rec.trim(this->width(), this->height())) {
+ return false;
+ }
+
+ void* dstPixels = this->getAddr(rec.fX, rec.fY);
+ const SkImageInfo dstInfo = this->info().makeDimensions(rec.fInfo.dimensions());
+ SkConvertPixels(dstInfo, dstPixels, this->rowBytes(), rec.fInfo, rec.fPixels, rec.fRowBytes);
+ this->notifyPixelsChanged();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool GetBitmapAlpha(const SkBitmap& src, uint8_t* SK_RESTRICT alpha, int alphaRowBytes) {
+ SkASSERT(alpha != nullptr);
+ SkASSERT(alphaRowBytes >= src.width());
+
+ SkPixmap pmap;
+ if (!src.peekPixels(&pmap)) {
+ for (int y = 0; y < src.height(); ++y) {
+ memset(alpha, 0, src.width());
+ alpha += alphaRowBytes;
+ }
+ return false;
+ }
+ SkConvertPixels(SkImageInfo::MakeA8(pmap.width(), pmap.height()), alpha, alphaRowBytes,
+ pmap.info(), pmap.addr(), pmap.rowBytes());
+ return true;
+}
+
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+
+bool SkBitmap::extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ Allocator *allocator, SkIPoint* offset) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkBitmap tmpBitmap;
+ SkMatrix identity;
+ SkMask srcM, dstM;
+
+ if (this->width() == 0 || this->height() == 0) {
+ return false;
+ }
+ srcM.fBounds.setWH(this->width(), this->height());
+ srcM.fRowBytes = SkAlign4(this->width());
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkMaskFilter* filter = paint ? paint->getMaskFilter() : nullptr;
+
+ // compute our (larger?) dst bounds if we have a filter
+ if (filter) {
+ identity.reset();
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ dstM.fRowBytes = SkAlign4(dstM.fBounds.width());
+ } else {
+ NO_FILTER_CASE:
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(this->width(), this->height()), srcM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ GetBitmapAlpha(*this, tmpBitmap.getAddr8(0, 0), srcM.fRowBytes);
+ if (offset) {
+ offset->set(0, 0);
+ }
+ tmpBitmap.swap(*dst);
+ return true;
+ }
+ srcM.fImage = SkMask::AllocImage(srcM.computeImageSize());
+ SkAutoMaskFreeImage srcCleanup(srcM.fImage);
+
+ GetBitmapAlpha(*this, srcM.fImage, srcM.fRowBytes);
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ SkAutoMaskFreeImage dstCleanup(dstM.fImage);
+
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(dstM.fBounds.width(), dstM.fBounds.height()),
+ dstM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ memcpy(tmpBitmap.getPixels(), dstM.fImage, dstM.computeImageSize());
+ if (offset) {
+ offset->set(dstM.fBounds.fLeft, dstM.fBounds.fTop);
+ }
+ SkDEBUGCODE(tmpBitmap.validate();)
+
+ tmpBitmap.swap(*dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkBitmap::validate() const {
+ this->info().validate();
+
+ SkASSERT(this->info().validRowBytes(this->rowBytes()));
+ uint8_t allFlags = kImageIsVolatile_Flag;
+ SkASSERT((~allFlags & fFlags) == 0);
+
+ if (fPixelRef && fPixelRef->pixels()) {
+ SkASSERT(this->getPixels());
+ } else {
+ SkASSERT(!this->getPixels());
+ }
+
+ if (this->getPixels()) {
+ SkASSERT(fPixelRef);
+ SkASSERT(fPixelRef->rowBytes() == this->rowBytes());
+ SkIPoint origin = this->pixelRefOrigin();
+ SkASSERT(origin.fX >= 0);
+ SkASSERT(origin.fY >= 0);
+ SkASSERT(fPixelRef->width() >= (int)this->width() + origin.fX);
+ SkASSERT(fPixelRef->height() >= (int)this->height() + origin.fY);
+ SkASSERT(fPixelRef->rowBytes() >= this->info().minRowBytes());
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::peekPixels(SkPixmap* pmap) const {
+ if (this->getPixels()) {
+ if (pmap) {
+ *pmap = fPixmap;
+ }
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.cpp b/gfx/skia/skia/src/core/SkBitmapCache.cpp
new file mode 100644
index 0000000000..6767c59093
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkResourceCache.h"
+#include "src/image/SkImage_Base.h"
+
+/**
+ * Use this for bitmapcache and mipmapcache entries.
+ */
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID) {
+ uint64_t sharedID = SkSetFourByteTag('b', 'm', 'a', 'p');
+ return (sharedID << 32) | bitmapGenID;
+}
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID) {
+ SkResourceCache::PostPurgeSharedID(SkMakeResourceCacheSharedIDForBitmap(bitmapGenID));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(uint32_t imageID, const SkIRect& subset) {
+ SkASSERT(imageID);
+ SkASSERT(subset.width() > 0 && subset.height() > 0);
+ return { imageID, subset };
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkImage* image) {
+ SkIRect bounds = SkIRect::MakeWH(image->width(), image->height());
+ return Make(image->uniqueID(), bounds);
+}
+
+namespace {
+static unsigned gBitmapKeyNamespaceLabel;
+
+struct BitmapKey : public SkResourceCache::Key {
+public:
+ BitmapKey(const SkBitmapCacheDesc& desc) : fDesc(desc) {
+ this->init(&gBitmapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fDesc.fImageID),
+ sizeof(fDesc));
+ }
+
+ const SkBitmapCacheDesc fDesc;
+};
+}
+
+//////////////////////
+#include "src/core/SkDiscardableMemory.h"
+#include "src/core/SkNextID.h"
+
+void SkBitmapCache_setImmutableWithID(SkPixelRef* pr, uint32_t id) {
+ pr->setImmutableWithID(id);
+}
+
+class SkBitmapCache::Rec : public SkResourceCache::Rec {
+public:
+ Rec(const SkBitmapCacheDesc& desc, const SkImageInfo& info, size_t rowBytes,
+ std::unique_ptr<SkDiscardableMemory> dm, void* block)
+ : fKey(desc)
+ , fDM(std::move(dm))
+ , fMalloc(block)
+ , fInfo(info)
+ , fRowBytes(rowBytes)
+ {
+ SkASSERT(!(fDM && fMalloc)); // can't have both
+
+ // We need an ID to return with the bitmap/pixelref. We can't necessarily use the key/desc
+ // ID - lazy images cache the same ID with multiple keys (in different color types).
+ fPrUniqueID = SkNextID::ImageID();
+ }
+
+ ~Rec() override {
+ SkASSERT(0 == fExternalCounter);
+ if (fDM && fDiscardableIsLocked) {
+ SkASSERT(fDM->data());
+ fDM->unlock();
+ }
+ sk_free(fMalloc); // may be null
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override {
+ return sizeof(fKey) + fInfo.computeByteSize(fRowBytes);
+ }
+ bool canBePurged() override {
+ SkAutoMutexExclusive ama(fMutex);
+ return fExternalCounter == 0;
+ }
+ void postAddInstall(void* payload) override {
+ SkAssertResult(this->install(static_cast<SkBitmap*>(payload)));
+ }
+
+ const char* getCategory() const override { return "bitmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fDM.get();
+ }
+
+ static void ReleaseProc(void* addr, void* ctx) {
+ Rec* rec = static_cast<Rec*>(ctx);
+ SkAutoMutexExclusive ama(rec->fMutex);
+
+ SkASSERT(rec->fExternalCounter > 0);
+ rec->fExternalCounter -= 1;
+ if (rec->fDM) {
+ SkASSERT(rec->fMalloc == nullptr);
+ if (rec->fExternalCounter == 0) {
+ rec->fDM->unlock();
+ rec->fDiscardableIsLocked = false;
+ }
+ } else {
+ SkASSERT(rec->fMalloc != nullptr);
+ }
+ }
+
+ bool install(SkBitmap* bitmap) {
+ SkAutoMutexExclusive ama(fMutex);
+
+ if (!fDM && !fMalloc) {
+ return false;
+ }
+
+ if (fDM) {
+ if (!fDiscardableIsLocked) {
+ SkASSERT(fExternalCounter == 0);
+ if (!fDM->lock()) {
+ fDM.reset(nullptr);
+ return false;
+ }
+ fDiscardableIsLocked = true;
+ }
+ SkASSERT(fDM->data());
+ }
+
+ bitmap->installPixels(fInfo, fDM ? fDM->data() : fMalloc, fRowBytes, ReleaseProc, this);
+ SkBitmapCache_setImmutableWithID(bitmap->pixelRef(), fPrUniqueID);
+ fExternalCounter++;
+
+ return true;
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextBitmap) {
+ Rec* rec = (Rec*)&baseRec;
+ SkBitmap* result = (SkBitmap*)contextBitmap;
+ return rec->install(result);
+ }
+
+private:
+ BitmapKey fKey;
+
+ SkMutex fMutex;
+
+ // either fDM or fMalloc can be non-null, but not both
+ std::unique_ptr<SkDiscardableMemory> fDM;
+ void* fMalloc;
+
+ SkImageInfo fInfo;
+ size_t fRowBytes;
+ uint32_t fPrUniqueID;
+
+ // This field counts the number of external pixelrefs we have created.
+ // They notify us when they are destroyed so we can decrement this.
+ int fExternalCounter = 0;
+ bool fDiscardableIsLocked = true;
+};
+
+void SkBitmapCache::PrivateDeleteRec(Rec* rec) { delete rec; }
+
+SkBitmapCache::RecPtr SkBitmapCache::Alloc(const SkBitmapCacheDesc& desc, const SkImageInfo& info,
+ SkPixmap* pmap) {
+ // Ensure that the info matches the subset (i.e. the subset is the entire image)
+ SkASSERT(info.width() == desc.fSubset.width());
+ SkASSERT(info.height() == desc.fSubset.height());
+
+ const size_t rb = info.minRowBytes();
+ size_t size = info.computeByteSize(rb);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkDiscardableMemory> dm;
+ void* block = nullptr;
+
+ auto factory = SkResourceCache::GetDiscardableFactory();
+ if (factory) {
+ dm.reset(factory(size));
+ } else {
+ block = sk_malloc_canfail(size);
+ }
+ if (!dm && !block) {
+ return nullptr;
+ }
+ *pmap = SkPixmap(info, dm ? dm->data() : block, rb);
+ return RecPtr(new Rec(desc, info, rb, std::move(dm), block));
+}
+
+void SkBitmapCache::Add(RecPtr rec, SkBitmap* bitmap) {
+ SkResourceCache::Add(rec.release(), bitmap);
+}
+
+bool SkBitmapCache::Find(const SkBitmapCacheDesc& desc, SkBitmap* result) {
+ desc.validate();
+ return SkResourceCache::Find(BitmapKey(desc), SkBitmapCache::Rec::Finder, result);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+namespace {
+static unsigned gMipMapKeyNamespaceLabel;
+
+struct MipMapKey : public SkResourceCache::Key {
+public:
+ MipMapKey(const SkBitmapCacheDesc& desc) : fDesc(desc) {
+ this->init(&gMipMapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fDesc.fImageID),
+ sizeof(fDesc));
+ }
+
+ const SkBitmapCacheDesc fDesc;
+};
+
+struct MipMapRec : public SkResourceCache::Rec {
+ MipMapRec(const SkBitmapCacheDesc& desc, const SkMipMap* result)
+ : fKey(desc)
+ , fMipMap(result)
+ {
+ fMipMap->attachToCacheAndRef();
+ }
+
+ ~MipMapRec() override {
+ fMipMap->detachFromCacheAndUnref();
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(fKey) + fMipMap->size(); }
+ const char* getCategory() const override { return "mipmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fMipMap->diagnostic_only_getDiscardable();
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextMip) {
+ const MipMapRec& rec = static_cast<const MipMapRec&>(baseRec);
+ const SkMipMap* mm = SkRef(rec.fMipMap);
+ // the call to ref() above triggers a "lock" in the case of discardable memory,
+ // which means we can now check for null (in case the lock failed).
+ if (nullptr == mm->data()) {
+ mm->unref(); // balance our call to ref()
+ return false;
+ }
+ // the call must call unref() when they are done.
+ *(const SkMipMap**)contextMip = mm;
+ return true;
+ }
+
+private:
+ MipMapKey fKey;
+ const SkMipMap* fMipMap;
+};
+}
+
+const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmapCacheDesc& desc,
+ SkResourceCache* localCache) {
+ MipMapKey key(desc);
+ const SkMipMap* result;
+
+ if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Finder, &result)) {
+ result = nullptr;
+ }
+ return result;
+}
+
+static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) {
+ return localCache ? localCache->GetDiscardableFactory()
+ : SkResourceCache::GetDiscardableFactory();
+}
+
+const SkMipMap* SkMipMapCache::AddAndRef(const SkImage_Base* image, SkResourceCache* localCache) {
+ SkBitmap src;
+ if (!image->getROPixels(&src)) {
+ return nullptr;
+ }
+
+ SkMipMap* mipmap = SkMipMap::Build(src, get_fact(localCache));
+ if (mipmap) {
+ MipMapRec* rec = new MipMapRec(SkBitmapCacheDesc::Make(image), mipmap);
+ CHECK_LOCAL(localCache, add, Add, rec);
+ image->notifyAddedToRasterCache();
+ }
+ return mipmap;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.h b/gfx/skia/skia/src/core/SkBitmapCache.h
new file mode 100644
index 0000000000..66021e4431
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapCache_DEFINED
+#define SkBitmapCache_DEFINED
+
+#include "include/core/SkRect.h"
+#include <memory>
+
+class SkBitmap;
+class SkImage;
+class SkImage_Base;
+struct SkImageInfo;
+class SkMipMap;
+class SkPixmap;
+class SkResourceCache;
+
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID);
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID);
+
+struct SkBitmapCacheDesc {
+ uint32_t fImageID; // != 0
+ SkIRect fSubset; // always set to a valid rect (entire or subset)
+
+ void validate() const {
+ SkASSERT(fImageID);
+ SkASSERT(fSubset.fLeft >= 0 && fSubset.fTop >= 0);
+ SkASSERT(fSubset.width() > 0 && fSubset.height() > 0);
+ }
+
+ static SkBitmapCacheDesc Make(const SkImage*);
+ static SkBitmapCacheDesc Make(uint32_t genID, const SkIRect& subset);
+};
+
+class SkBitmapCache {
+public:
+ /**
+ * Search based on the desc. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool Find(const SkBitmapCacheDesc&, SkBitmap* result);
+
+ class Rec;
+ struct RecDeleter { void operator()(Rec* r) { PrivateDeleteRec(r); } };
+ typedef std::unique_ptr<Rec, RecDeleter> RecPtr;
+
+ static RecPtr Alloc(const SkBitmapCacheDesc&, const SkImageInfo&, SkPixmap*);
+ static void Add(RecPtr, SkBitmap*);
+
+private:
+ static void PrivateDeleteRec(Rec*);
+};
+
+class SkMipMapCache {
+public:
+ static const SkMipMap* FindAndRef(const SkBitmapCacheDesc&,
+ SkResourceCache* localCache = nullptr);
+ static const SkMipMap* AddAndRef(const SkImage_Base*,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapController.cpp b/gfx/skia/skia/src/core/SkBitmapController.cpp
new file mode 100644
index 0000000000..35cd0410e3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapController.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkMatrix.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkBitmapController.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMipMap.h"
+#include "src/image/SkImage_Base.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBitmapController::State* SkBitmapController::RequestBitmap(const SkImage_Base* image,
+ const SkMatrix& inv,
+ SkFilterQuality quality,
+ SkArenaAlloc* alloc) {
+ auto* state = alloc->make<SkBitmapController::State>(image, inv, quality);
+
+ return state->pixmap().addr() ? state : nullptr;
+}
+
+bool SkBitmapController::State::processHighRequest(const SkImage_Base* image) {
+ if (fQuality != kHigh_SkFilterQuality) {
+ return false;
+ }
+
+ if (SkMatrixPriv::AdjustHighQualityFilterLevel(fInvMatrix, true) != kHigh_SkFilterQuality) {
+ fQuality = kMedium_SkFilterQuality;
+ return false;
+ }
+
+ (void)image->getROPixels(&fResultBitmap);
+ return true;
+}
+
+/*
+ * Modulo internal errors, this should always succeed *if* the matrix is downscaling
+ * (in this case, we have the inverse, so it succeeds if fInvMatrix is upscaling)
+ */
+bool SkBitmapController::State::processMediumRequest(const SkImage_Base* image) {
+ SkASSERT(fQuality <= kMedium_SkFilterQuality);
+ if (fQuality != kMedium_SkFilterQuality) {
+ return false;
+ }
+
+ // Our default return state is to downgrade the request to Low, w/ or w/o setting fBitmap
+ // to a valid bitmap.
+ fQuality = kLow_SkFilterQuality;
+
+ SkSize invScaleSize;
+ if (!fInvMatrix.decomposeScale(&invScaleSize, nullptr)) {
+ return false;
+ }
+
+ if (invScaleSize.width() > SK_Scalar1 || invScaleSize.height() > SK_Scalar1) {
+ fCurrMip.reset(SkMipMapCache::FindAndRef(SkBitmapCacheDesc::Make(image)));
+ if (nullptr == fCurrMip.get()) {
+ fCurrMip.reset(SkMipMapCache::AddAndRef(image));
+ if (nullptr == fCurrMip.get()) {
+ return false;
+ }
+ }
+ // diagnostic for a crasher...
+ SkASSERT_RELEASE(fCurrMip->data());
+
+ const SkSize scale = SkSize::Make(SkScalarInvert(invScaleSize.width()),
+ SkScalarInvert(invScaleSize.height()));
+ SkMipMap::Level level;
+ if (fCurrMip->extractLevel(scale, &level)) {
+ const SkSize& invScaleFixup = level.fScale;
+ fInvMatrix.postScale(invScaleFixup.width(), invScaleFixup.height());
+
+ // todo: if we could wrap the fCurrMip in a pixelref, then we could just install
+ // that here, and not need to explicitly track it ourselves.
+ return fResultBitmap.installPixels(level.fPixmap);
+ } else {
+ // failed to extract, so release the mipmap
+ fCurrMip.reset(nullptr);
+ }
+ }
+ return false;
+}
+
+SkBitmapController::State::State(const SkImage_Base* image,
+ const SkMatrix& inv,
+ SkFilterQuality qual) {
+ fInvMatrix = inv;
+ fQuality = qual;
+
+ if (this->processHighRequest(image) || this->processMediumRequest(image)) {
+ SkASSERT(fResultBitmap.getPixels());
+ } else {
+ (void)image->getROPixels(&fResultBitmap);
+ }
+
+ // fResultBitmap.getPixels() may be null, but our caller knows to check fPixmap.addr()
+ // and will destroy us if it is nullptr.
+ fPixmap.reset(fResultBitmap.info(), fResultBitmap.getPixels(), fResultBitmap.rowBytes());
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapController.h b/gfx/skia/skia/src/core/SkBitmapController.h
new file mode 100644
index 0000000000..6e08f94db5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapController.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapController_DEFINED
+#define SkBitmapController_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkMatrix.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkMipMap.h"
+
+class SkImage_Base;
+
+/**
+ * Handles request to scale, filter, and lock a bitmap to be rasterized.
+ */
+class SkBitmapController : ::SkNoncopyable {
+public:
+ class State : ::SkNoncopyable {
+ public:
+ State(const SkImage_Base*, const SkMatrix& inv, SkFilterQuality);
+
+ const SkPixmap& pixmap() const { return fPixmap; }
+ const SkMatrix& invMatrix() const { return fInvMatrix; }
+ SkFilterQuality quality() const { return fQuality; }
+
+ private:
+ bool processHighRequest(const SkImage_Base*);
+ bool processMediumRequest(const SkImage_Base*);
+
+ SkPixmap fPixmap;
+ SkMatrix fInvMatrix;
+ SkFilterQuality fQuality;
+
+ // Pixmap storage.
+ SkBitmap fResultBitmap;
+ sk_sp<const SkMipMap> fCurrMip;
+
+ };
+
+ static State* RequestBitmap(const SkImage_Base*, const SkMatrix& inverse, SkFilterQuality,
+ SkArenaAlloc*);
+
+private:
+ SkBitmapController() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapDevice.cpp b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
new file mode 100644
index 0000000000..9bf45a0fbe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
@@ -0,0 +1,820 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkVertices.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTLazy.h"
+
+struct Bounder {
+ SkRect fBounds;
+ bool fHasBounds;
+
+ Bounder(const SkRect& r, const SkPaint& paint) {
+ if ((fHasBounds = paint.canComputeFastBounds())) {
+ fBounds = paint.computeFastBounds(r, &fBounds);
+ }
+ }
+
+ bool hasBounds() const { return fHasBounds; }
+ const SkRect* bounds() const { return fHasBounds ? &fBounds : nullptr; }
+ operator const SkRect* () const { return this->bounds(); }
+};
+
+class SkDrawTiler {
+ enum {
+ // 8K is 1 too big, since 8K << supersample == 32768 which is too big for SkFixed
+ kMaxDim = 8192 - 1
+ };
+
+ SkBitmapDevice* fDevice;
+ SkPixmap fRootPixmap;
+ SkIRect fSrcBounds;
+
+ // Used for tiling and non-tiling
+ SkDraw fDraw;
+
+ // fCurr... are only used if fNeedTiling
+ SkMatrix fTileMatrix;
+ SkRasterClip fTileRC;
+ SkIPoint fOrigin;
+
+ bool fDone, fNeedsTiling;
+
+public:
+ static bool NeedsTiling(SkBitmapDevice* dev) {
+ return dev->width() > kMaxDim || dev->height() > kMaxDim;
+ }
+
+ SkDrawTiler(SkBitmapDevice* dev, const SkRect* bounds) : fDevice(dev) {
+ fDone = false;
+
+ // we need fDst to be set, and if we're actually drawing, to dirty the genID
+ if (!dev->accessPixels(&fRootPixmap)) {
+ // NoDrawDevice uses us (why?) so we have to catch this case w/ no pixels
+ fRootPixmap.reset(dev->imageInfo(), nullptr, 0);
+ }
+
+ // do a quick check, so we don't even have to process "bounds" if there is no need
+ const SkIRect clipR = dev->fRCStack.rc().getBounds();
+ fNeedsTiling = clipR.right() > kMaxDim || clipR.bottom() > kMaxDim;
+ if (fNeedsTiling) {
+ if (bounds) {
+ // Make sure we round first, and then intersect. We can't rely on promoting the
+ // clipR to floats (and then intersecting with devBounds) since promoting
+ // int --> float can make the float larger than the int.
+ // rounding(out) first runs the risk of clamping if the float is larger an intmax
+ // but our roundOut() is saturating, which is fine for this use case
+ //
+ // e.g. the older version of this code did this:
+ // devBounds = mapRect(bounds);
+ // if (devBounds.intersect(SkRect::Make(clipR))) {
+ // fSrcBounds = devBounds.roundOut();
+ // The problem being that the promotion of clipR to SkRect was unreliable
+ //
+ fSrcBounds = dev->ctm().mapRect(*bounds).roundOut();
+ if (fSrcBounds.intersect(clipR)) {
+ // Check again, now that we have computed srcbounds.
+ fNeedsTiling = fSrcBounds.right() > kMaxDim || fSrcBounds.bottom() > kMaxDim;
+ } else {
+ fNeedsTiling = false;
+ fDone = true;
+ }
+ } else {
+ fSrcBounds = clipR;
+ }
+ }
+
+ if (fNeedsTiling) {
+ // fDraw.fDst is reset each time in setupTileDraw()
+ fDraw.fMatrix = &fTileMatrix;
+ fDraw.fRC = &fTileRC;
+ // we'll step/increase it before using it
+ fOrigin.set(fSrcBounds.fLeft - kMaxDim, fSrcBounds.fTop);
+ } else {
+ // don't reference fSrcBounds, as it may not have been set
+ fDraw.fDst = fRootPixmap;
+ fDraw.fMatrix = &dev->ctm();
+ fDraw.fRC = &dev->fRCStack.rc();
+ fOrigin.set(0, 0);
+
+ fDraw.fCoverage = dev->accessCoverage();
+ }
+ }
+
+ bool needsTiling() const { return fNeedsTiling; }
+
+ const SkDraw* next() {
+ if (fDone) {
+ return nullptr;
+ }
+ if (fNeedsTiling) {
+ do {
+ this->stepAndSetupTileDraw(); // might set the clip to empty and fDone to true
+ } while (!fDone && fTileRC.isEmpty());
+ // if we exit the loop and we're still empty, we're (past) done
+ if (fTileRC.isEmpty()) {
+ SkASSERT(fDone);
+ return nullptr;
+ }
+ SkASSERT(!fTileRC.isEmpty());
+ } else {
+ fDone = true; // only draw untiled once
+ }
+ return &fDraw;
+ }
+
+private:
+ void stepAndSetupTileDraw() {
+ SkASSERT(!fDone);
+ SkASSERT(fNeedsTiling);
+
+ // We do fRootPixmap.width() - kMaxDim instead of fOrigin.fX + kMaxDim to avoid overflow.
+ if (fOrigin.fX >= fSrcBounds.fRight - kMaxDim) { // too far
+ fOrigin.fX = fSrcBounds.fLeft;
+ fOrigin.fY += kMaxDim;
+ } else {
+ fOrigin.fX += kMaxDim;
+ }
+ // fDone = next origin will be invalid.
+ fDone = fOrigin.fX >= fSrcBounds.fRight - kMaxDim &&
+ fOrigin.fY >= fSrcBounds.fBottom - kMaxDim;
+
+ SkIRect bounds = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), kMaxDim, kMaxDim);
+ SkASSERT(!bounds.isEmpty());
+ bool success = fRootPixmap.extractSubset(&fDraw.fDst, bounds);
+ SkASSERT_RELEASE(success);
+ // now don't use bounds, since fDst has the clipped dimensions.
+
+ fTileMatrix = fDevice->ctm();
+ fTileMatrix.postTranslate(SkIntToScalar(-fOrigin.x()), SkIntToScalar(-fOrigin.y()));
+ fDevice->fRCStack.rc().translate(-fOrigin.x(), -fOrigin.y(), &fTileRC);
+ fTileRC.op(SkIRect::MakeWH(fDraw.fDst.width(), fDraw.fDst.height()),
+ SkRegion::kIntersect_Op);
+ }
+};
+
+// Passing a bounds allows the tiler to only visit the dst-tiles that might intersect the
+// drawing. If null is passed, the tiler has to visit everywhere. The bounds is expected to be
+// in local coordinates, as the tiler itself will transform that into device coordinates.
+//
+#define LOOP_TILER(code, boundsPtr) \
+ SkDrawTiler priv_tiler(this, boundsPtr); \
+ while (const SkDraw* priv_draw = priv_tiler.next()) { \
+ priv_draw->code; \
+ }
+
+// Helper to create an SkDraw from a device
+class SkBitmapDevice::BDDraw : public SkDraw {
+public:
+ BDDraw(SkBitmapDevice* dev) {
+ // we need fDst to be set, and if we're actually drawing, to dirty the genID
+ if (!dev->accessPixels(&fDst)) {
+ // NoDrawDevice uses us (why?) so we have to catch this case w/ no pixels
+ fDst.reset(dev->imageInfo(), nullptr, 0);
+ }
+ fMatrix = &dev->ctm();
+ fRC = &dev->fRCStack.rc();
+ fCoverage = dev->accessCoverage();
+ }
+};
+
+static bool valid_for_bitmap_device(const SkImageInfo& info,
+ SkAlphaType* newAlphaType) {
+ if (info.width() < 0 || info.height() < 0 || kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+
+ if (newAlphaType) {
+ *newAlphaType = SkColorTypeIsAlwaysOpaque(info.colorType()) ? kOpaque_SkAlphaType
+ : info.alphaType();
+ }
+
+ return true;
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap)
+ : INHERITED(bitmap.info(), SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType))
+ , fBitmap(bitmap)
+ , fRCStack(bitmap.width(), bitmap.height())
+ , fGlyphPainter(this->surfaceProps(),
+ bitmap.colorType(),
+ bitmap.colorSpace(),
+ SkStrikeCache::GlobalStrikeCache()) {
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& info) {
+ return Create(info, SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType));
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps,
+ SkRasterHandleAllocator::Handle hndl, const SkBitmap* coverage)
+ : INHERITED(bitmap.info(), surfaceProps)
+ , fBitmap(bitmap)
+ , fRasterHandle(hndl)
+ , fRCStack(bitmap.width(), bitmap.height())
+ , fGlyphPainter(this->surfaceProps(),
+ bitmap.colorType(),
+ bitmap.colorSpace(),
+ SkStrikeCache::GlobalStrikeCache()) {
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+
+ if (coverage) {
+ SkASSERT(coverage->width() == bitmap.width());
+ SkASSERT(coverage->height() == bitmap.height());
+ fCoverage = skstd::make_unique<SkBitmap>(*coverage);
+ }
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& origInfo,
+ const SkSurfaceProps& surfaceProps,
+ bool trackCoverage,
+ SkRasterHandleAllocator* allocator) {
+ SkAlphaType newAT = origInfo.alphaType();
+ if (!valid_for_bitmap_device(origInfo, &newAT)) {
+ return nullptr;
+ }
+
+ SkRasterHandleAllocator::Handle hndl = nullptr;
+ const SkImageInfo info = origInfo.makeAlphaType(newAT);
+ SkBitmap bitmap;
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ if (!bitmap.setInfo(info)) {
+ return nullptr;
+ }
+ } else if (allocator) {
+ hndl = allocator->allocBitmap(info, &bitmap);
+ if (!hndl) {
+ return nullptr;
+ }
+ } else if (info.isOpaque()) {
+ // If this bitmap is opaque, we don't have any sensible default color,
+ // so we just return uninitialized pixels.
+ if (!bitmap.tryAllocPixels(info)) {
+ return nullptr;
+ }
+ } else {
+ // This bitmap has transparency, so we'll zero the pixels (to transparent).
+ // We use the flag as a faster alloc-then-eraseColor(SK_ColorTRANSPARENT).
+ if (!bitmap.tryAllocPixelsFlags(info, SkBitmap::kZeroPixels_AllocFlag)) {
+ return nullptr;
+ }
+ }
+
+ SkBitmap coverage;
+ if (trackCoverage) {
+ SkImageInfo ci =
+ SkImageInfo::Make(info.dimensions(), kAlpha_8_SkColorType, kPremul_SkAlphaType);
+ if (!coverage.tryAllocPixelsFlags(ci, SkBitmap::kZeroPixels_AllocFlag)) {
+ return nullptr;
+ }
+ }
+
+ return new SkBitmapDevice(bitmap, surfaceProps, hndl, trackCoverage ? &coverage : nullptr);
+}
+
+void SkBitmapDevice::replaceBitmapBackendForRasterSurface(const SkBitmap& bm) {
+ SkASSERT(bm.width() == fBitmap.width());
+ SkASSERT(bm.height() == fBitmap.height());
+ fBitmap = bm; // intent is to use bm's pixelRef (and rowbytes/config)
+ this->privateResize(fBitmap.info().width(), fBitmap.info().height());
+}
+
+SkBaseDevice* SkBitmapDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint* layerPaint) {
+ const SkSurfaceProps surfaceProps(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+
+ // Need to force L32 for now if we have an image filter.
+ // If filters ever support other colortypes, e.g. F16, we can modify this check.
+ SkImageInfo info = cinfo.fInfo;
+ if (layerPaint && layerPaint->getImageFilter()) {
+ // TODO: can we query the imagefilter, to see if it can handle floats (so we don't always
+ // use N32 when the layer itself was float)?
+ info = info.makeColorType(kN32_SkColorType);
+ }
+
+ return SkBitmapDevice::Create(info, surfaceProps, cinfo.fTrackCoverage, cinfo.fAllocator);
+}
+
+bool SkBitmapDevice::onAccessPixels(SkPixmap* pmap) {
+ if (this->onPeekPixels(pmap)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onPeekPixels(SkPixmap* pmap) {
+ const SkImageInfo info = fBitmap.info();
+ if (fBitmap.getPixels() && (kUnknown_SkColorType != info.colorType())) {
+ pmap->reset(fBitmap.info(), fBitmap.getPixels(), fBitmap.rowBytes());
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onWritePixels(const SkPixmap& pm, int x, int y) {
+ // since we don't stop creating un-pixeled devices yet, check for no pixels here
+ if (nullptr == fBitmap.getPixels()) {
+ return false;
+ }
+
+ if (fBitmap.writePixels(pm, x, y)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onReadPixels(const SkPixmap& pm, int x, int y) {
+ return fBitmap.readPixels(pm, x, y);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::drawPaint(const SkPaint& paint) {
+ BDDraw(this).drawPaint(paint);
+}
+
+void SkBitmapDevice::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ LOOP_TILER( drawPoints(mode, count, pts, paint, nullptr), nullptr)
+}
+
+void SkBitmapDevice::drawRect(const SkRect& r, const SkPaint& paint) {
+ LOOP_TILER( drawRect(r, paint), Bounder(r, paint))
+}
+
+void SkBitmapDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ SkPath path;
+ path.addOval(oval);
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawOval.
+ this->drawPath(path, paint, true);
+}
+
+void SkBitmapDevice::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+#ifdef SK_IGNORE_BLURRED_RRECT_OPT
+ SkPath path;
+
+ path.addRRect(rrect);
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawRRect.
+ this->drawPath(path, paint, true);
+#else
+ LOOP_TILER( drawRRect(rrect, paint), Bounder(rrect.getBounds(), paint))
+#endif
+}
+
+void SkBitmapDevice::drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable) {
+ const SkRect* bounds = nullptr;
+ if (SkDrawTiler::NeedsTiling(this) && !path.isInverseFillType()) {
+ bounds = &path.getBounds();
+ }
+ SkDrawTiler tiler(this, bounds ? Bounder(*bounds, paint).bounds() : nullptr);
+ if (tiler.needsTiling()) {
+ pathIsMutable = false;
+ }
+ while (const SkDraw* draw = tiler.next()) {
+ draw->drawPath(path, paint, nullptr, pathIsMutable);
+ }
+}
+
+void SkBitmapDevice::drawBitmap(const SkBitmap& bitmap, const SkMatrix& matrix,
+ const SkRect* dstOrNull, const SkPaint& paint) {
+ const SkRect* bounds = dstOrNull;
+ SkRect storage;
+ if (!bounds && SkDrawTiler::NeedsTiling(this)) {
+ matrix.mapRect(&storage, SkRect::MakeIWH(bitmap.width(), bitmap.height()));
+ Bounder b(storage, paint);
+ if (b.hasBounds()) {
+ storage = *b.bounds();
+ bounds = &storage;
+ }
+ }
+ LOOP_TILER(drawBitmap(bitmap, matrix, dstOrNull, paint), bounds)
+}
+
+static inline bool CanApplyDstMatrixAsCTM(const SkMatrix& m, const SkPaint& paint) {
+ if (!paint.getMaskFilter()) {
+ return true;
+ }
+
+ // Some mask filters parameters (sigma) depend on the CTM/scale.
+ return m.getType() <= SkMatrix::kTranslate_Mask;
+}
+
+void SkBitmapDevice::drawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ SkASSERT(dst.isFinite());
+ SkASSERT(dst.isSorted());
+
+ SkMatrix matrix;
+ SkRect bitmapBounds, tmpSrc, tmpDst;
+ SkBitmap tmpBitmap;
+
+ bitmapBounds.setIWH(bitmap.width(), bitmap.height());
+
+ // Compute matrix from the two rectangles
+ if (src) {
+ tmpSrc = *src;
+ } else {
+ tmpSrc = bitmapBounds;
+ }
+ matrix.setRectToRect(tmpSrc, dst, SkMatrix::kFill_ScaleToFit);
+
+ LogDrawScaleFactor(this->ctm(), matrix, paint.getFilterQuality());
+
+ const SkRect* dstPtr = &dst;
+ const SkBitmap* bitmapPtr = &bitmap;
+
+ // clip the tmpSrc to the bounds of the bitmap, and recompute dstRect if
+ // needed (if the src was clipped). No check needed if src==null.
+ if (src) {
+ if (!bitmapBounds.contains(*src)) {
+ if (!tmpSrc.intersect(bitmapBounds)) {
+ return; // nothing to draw
+ }
+ // recompute dst, based on the smaller tmpSrc
+ matrix.mapRect(&tmpDst, tmpSrc);
+ if (!tmpDst.isFinite()) {
+ return;
+ }
+ dstPtr = &tmpDst;
+ }
+ }
+
+ if (src && !src->contains(bitmapBounds) &&
+ SkCanvas::kFast_SrcRectConstraint == constraint &&
+ paint.getFilterQuality() != kNone_SkFilterQuality) {
+ // src is smaller than the bounds of the bitmap, and we are filtering, so we don't know
+ // how much more of the bitmap we need, so we can't use extractSubset or drawBitmap,
+ // but we must use a shader w/ dst bounds (which can access all of the bitmap needed).
+ goto USE_SHADER;
+ }
+
+ if (src) {
+ // since we may need to clamp to the borders of the src rect within
+ // the bitmap, we extract a subset.
+ const SkIRect srcIR = tmpSrc.roundOut();
+ if (!bitmap.extractSubset(&tmpBitmap, srcIR)) {
+ return;
+ }
+ bitmapPtr = &tmpBitmap;
+
+ // Since we did an extract, we need to adjust the matrix accordingly
+ SkScalar dx = 0, dy = 0;
+ if (srcIR.fLeft > 0) {
+ dx = SkIntToScalar(srcIR.fLeft);
+ }
+ if (srcIR.fTop > 0) {
+ dy = SkIntToScalar(srcIR.fTop);
+ }
+ if (dx || dy) {
+ matrix.preTranslate(dx, dy);
+ }
+
+#ifdef SK_DRAWBITMAPRECT_FAST_OFFSET
+ SkRect extractedBitmapBounds = SkRect::MakeXYWH(dx, dy,
+ SkIntToScalar(bitmapPtr->width()),
+ SkIntToScalar(bitmapPtr->height()));
+#else
+ SkRect extractedBitmapBounds;
+ extractedBitmapBounds.setIWH(bitmapPtr->width(), bitmapPtr->height());
+#endif
+ if (extractedBitmapBounds == tmpSrc) {
+ // no fractional part in src, we can just call drawBitmap
+ goto USE_DRAWBITMAP;
+ }
+ } else {
+ USE_DRAWBITMAP:
+ // We can go faster by just calling drawBitmap, which will concat the
+ // matrix with the CTM, and try to call drawSprite if it can. If not,
+ // it will make a shader and call drawRect, as we do below.
+ if (CanApplyDstMatrixAsCTM(matrix, paint)) {
+ this->drawBitmap(*bitmapPtr, matrix, dstPtr, paint);
+ return;
+ }
+ }
+
+ USE_SHADER:
+
+ // TODO(herb): Move this over to SkArenaAlloc when arena alloc has a facility to return sk_sps.
+ // Since the shader need only live for our stack-frame, pass in a custom allocator. This
+ // can save malloc calls, and signals to SkMakeBitmapShader to not try to copy the bitmap
+ // if its mutable, since that precaution is not needed (give the short lifetime of the shader).
+
+ // construct a shader, so we can call drawRect with the dst
+ auto s = SkMakeBitmapShaderForPaint(paint, *bitmapPtr, SkTileMode::kClamp,
+ SkTileMode::kClamp, &matrix, kNever_SkCopyPixelsMode);
+ if (!s) {
+ return;
+ }
+
+ SkPaint paintWithShader(paint);
+ paintWithShader.setStyle(SkPaint::kFill_Style);
+ paintWithShader.setShader(std::move(s));
+
+ // Call ourself, in case the subclass wanted to share this setup code
+ // but handle the drawRect code themselves.
+ this->drawRect(*dstPtr, paintWithShader);
+}
+
+void SkBitmapDevice::drawSprite(const SkBitmap& bitmap, int x, int y, const SkPaint& paint) {
+ BDDraw(this).drawSprite(bitmap, x, y, paint);
+}
+
+void SkBitmapDevice::drawGlyphRunList(const SkGlyphRunList& glyphRunList) {
+ LOOP_TILER( drawGlyphRunList(glyphRunList, &fGlyphPainter), nullptr )
+}
+
+void SkBitmapDevice::drawVertices(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode bmode, const SkPaint& paint) {
+ BDDraw(this).drawVertices(vertices->mode(), vertices->vertexCount(), vertices->positions(),
+ vertices->texCoords(), vertices->colors(), vertices->boneIndices(),
+ vertices->boneWeights(), bmode, vertices->indices(),
+ vertices->indexCount(), paint, bones, boneCount);
+}
+
+void SkBitmapDevice::drawDevice(SkBaseDevice* device, int x, int y, const SkPaint& origPaint) {
+ SkASSERT(!origPaint.getImageFilter());
+
+ // todo: can we unify with similar adjustment in SkGpuDevice?
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (paint->getMaskFilter()) {
+ paint.writable()->setMaskFilter(paint->getMaskFilter()->makeWithMatrix(this->ctm()));
+ }
+
+ // hack to test coverage
+ SkBitmapDevice* src = static_cast<SkBitmapDevice*>(device);
+ if (src->fCoverage) {
+ SkDraw draw;
+ draw.fDst = fBitmap.pixmap();
+ draw.fMatrix = &SkMatrix::I();
+ draw.fRC = &fRCStack.rc();
+ SkPaint paint(origPaint);
+ paint.setShader(src->fBitmap.makeShader());
+ draw.drawBitmap(*src->fCoverage.get(),
+ SkMatrix::MakeTrans(SkIntToScalar(x),SkIntToScalar(y)), nullptr, paint);
+ } else {
+ this->drawSprite(src->fBitmap, x, y, *paint);
+ }
+}
+
+void SkBitmapDevice::drawAtlas(const SkImage* atlas, const SkRSXform xform[],
+ const SkRect tex[], const SkColor colors[], int count,
+ SkBlendMode mode, const SkPaint& paint) {
+ // set this to true for performance comparisons with the old drawVertices way
+ if (false) {
+ this->INHERITED::drawAtlas(atlas, xform, tex, colors, count, mode, paint);
+ return;
+ }
+ BDDraw(this).drawAtlas(atlas, xform, tex, colors, count, mode, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+class SkAutoDeviceClipRestore {
+public:
+ SkAutoDeviceClipRestore(SkBaseDevice* device, const SkIRect& clip)
+ : fDevice(device)
+ , fPrevCTM(device->ctm()) {
+ fDevice->save();
+ fDevice->setCTM(SkMatrix::I());
+ fDevice->clipRect(SkRect::Make(clip), SkClipOp::kIntersect, false);
+ fDevice->setCTM(fPrevCTM);
+ }
+
+ ~SkAutoDeviceClipRestore() {
+ fDevice->restore(fPrevCTM);
+ }
+
+private:
+ SkBaseDevice* fDevice;
+ const SkMatrix fPrevCTM;
+};
+
+} // anonymous ns
+
+void SkBitmapDevice::drawSpecial(SkSpecialImage* src, int x, int y, const SkPaint& origPaint,
+ SkImage* clipImage, const SkMatrix& clipMatrix) {
+ SkASSERT(!src->isTextureBacked());
+
+ sk_sp<SkSpecialImage> filteredImage;
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ if (SkImageFilter* filter = paint->getImageFilter()) {
+ SkIPoint offset = SkIPoint::Make(0, 0);
+ const SkMatrix matrix = SkMatrix::Concat(
+ SkMatrix::MakeTrans(SkIntToScalar(-x), SkIntToScalar(-y)), this->ctm());
+ const SkIRect clipBounds = fRCStack.rc().getBounds().makeOffset(-x, -y);
+ sk_sp<SkImageFilterCache> cache(this->getImageFilterCache());
+ SkImageFilter_Base::Context ctx(matrix, clipBounds, cache.get(), fBitmap.colorType(),
+ fBitmap.colorSpace(), src);
+
+ filteredImage = as_IFB(filter)->filterImage(ctx).imageAndOffset(&offset);
+ if (!filteredImage) {
+ return;
+ }
+
+ src = filteredImage.get();
+ paint.writable()->setImageFilter(nullptr);
+ x += offset.x();
+ y += offset.y();
+ }
+
+ if (paint->getMaskFilter()) {
+ paint.writable()->setMaskFilter(paint->getMaskFilter()->makeWithMatrix(this->ctm()));
+ }
+
+ if (!clipImage) {
+ SkBitmap resultBM;
+ if (src->getROPixels(&resultBM)) {
+ this->drawSprite(resultBM, x, y, *paint);
+ }
+ return;
+ }
+
+ // Clip image case.
+ sk_sp<SkImage> srcImage(src->asImage());
+ if (!srcImage) {
+ return;
+ }
+
+ const SkMatrix totalMatrix = SkMatrix::Concat(this->ctm(), clipMatrix);
+ SkRect clipBounds;
+ totalMatrix.mapRect(&clipBounds, SkRect::Make(clipImage->bounds()));
+ const SkIRect srcBounds = srcImage->bounds().makeOffset(x, y);
+
+ SkIRect maskBounds = fRCStack.rc().getBounds();
+ if (!maskBounds.intersect(clipBounds.roundOut()) || !maskBounds.intersect(srcBounds)) {
+ return;
+ }
+
+ sk_sp<SkImage> mask;
+ SkMatrix maskMatrix, shaderMatrix;
+ SkTLazy<SkAutoDeviceClipRestore> autoClipRestore;
+
+ SkMatrix totalInverse;
+ if (clipImage->isAlphaOnly() && totalMatrix.invert(&totalInverse)) {
+ // If the mask is already in A8 format, we can draw it directly
+ // (while compensating in the shader matrix).
+ mask = sk_ref_sp(clipImage);
+ maskMatrix = totalMatrix;
+ shaderMatrix = SkMatrix::Concat(totalInverse, SkMatrix::MakeTrans(x, y));
+
+ // If the mask is not fully contained within the src layer, we must clip.
+ if (!srcBounds.contains(clipBounds)) {
+ autoClipRestore.init(this, srcBounds);
+ }
+
+ maskBounds.offsetTo(0, 0);
+ } else {
+ // Otherwise, we convert the mask to A8 explicitly.
+ sk_sp<SkSurface> surf = SkSurface::MakeRaster(SkImageInfo::MakeA8(maskBounds.width(),
+ maskBounds.height()));
+ SkCanvas* canvas = surf->getCanvas();
+ canvas->translate(-maskBounds.x(), -maskBounds.y());
+ canvas->concat(totalMatrix);
+ canvas->drawImage(clipImage, 0, 0);
+
+ mask = surf->makeImageSnapshot();
+ maskMatrix = SkMatrix::I();
+ shaderMatrix = SkMatrix::MakeTrans(x - maskBounds.x(), y - maskBounds.y());
+ }
+
+ SkAutoDeviceCTMRestore adctmr(this, maskMatrix);
+ paint.writable()->setShader(srcImage->makeShader(&shaderMatrix));
+ this->drawImageRect(mask.get(), nullptr,
+ SkRect::MakeXYWH(maskBounds.x(), maskBounds.y(),
+ mask->width(), mask->height()),
+ *paint, SkCanvas::kFast_SrcRectConstraint);
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap);
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(nullptr, SkIRect::MakeWH(image->width(), image->height()),
+ image->makeNonTextureImage());
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::snapSpecial(const SkIRect& bounds, bool forceCopy) {
+ if (forceCopy) {
+ return SkSpecialImage::CopyFromRaster(bounds, fBitmap, &this->surfaceProps());
+ } else {
+ return SkSpecialImage::MakeFromRaster(bounds, fBitmap);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkBitmapDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+SkImageFilterCache* SkBitmapDevice::getImageFilterCache() {
+ SkImageFilterCache* cache = SkImageFilterCache::Get();
+ cache->ref();
+ return cache;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::onSave() {
+ fRCStack.save();
+}
+
+void SkBitmapDevice::onRestore() {
+ fRCStack.restore();
+}
+
+void SkBitmapDevice::onClipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ fRCStack.clipRect(this->ctm(), rect, op, aa);
+}
+
+void SkBitmapDevice::onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ fRCStack.clipRRect(this->ctm(), rrect, op, aa);
+}
+
+void SkBitmapDevice::onClipPath(const SkPath& path, SkClipOp op, bool aa) {
+ fRCStack.clipPath(this->ctm(), path, op, aa);
+}
+
+void SkBitmapDevice::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ SkIPoint origin = this->getOrigin();
+ SkRegion tmp;
+ const SkRegion* ptr = &rgn;
+ if (origin.fX | origin.fY) {
+ // translate from "global/canvas" coordinates to relative to this device
+ rgn.translate(-origin.fX, -origin.fY, &tmp);
+ ptr = &tmp;
+ }
+ fRCStack.clipRegion(*ptr, op);
+}
+
+void SkBitmapDevice::onSetDeviceClipRestriction(SkIRect* mutableClipRestriction) {
+ fRCStack.setDeviceClipRestriction(mutableClipRestriction);
+ if (!mutableClipRestriction->isEmpty()) {
+ SkRegion rgn(*mutableClipRestriction);
+ fRCStack.clipRegion(rgn, SkClipOp::kIntersect);
+ }
+}
+
+bool SkBitmapDevice::onClipIsAA() const {
+ const SkRasterClip& rc = fRCStack.rc();
+ return !rc.isEmpty() && rc.isAA();
+}
+
+void SkBitmapDevice::onAsRgnClip(SkRegion* rgn) const {
+ const SkRasterClip& rc = fRCStack.rc();
+ if (rc.isAA()) {
+ rgn->setRect(rc.getBounds());
+ } else {
+ *rgn = rc.bwRgn();
+ }
+}
+
+void SkBitmapDevice::validateDevBounds(const SkIRect& drawClipBounds) {
+#ifdef SK_DEBUG
+ const SkIRect& stackBounds = fRCStack.rc().getBounds();
+ SkASSERT(drawClipBounds == stackBounds);
+#endif
+}
+
+SkBaseDevice::ClipType SkBitmapDevice::onGetClipType() const {
+ const SkRasterClip& rc = fRCStack.rc();
+ if (rc.isEmpty()) {
+ return ClipType::kEmpty;
+ } else if (rc.isRect()) {
+ return ClipType::kRect;
+ } else {
+ return ClipType::kComplex;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapDevice.h b/gfx/skia/skia/src/core/SkBitmapDevice.h
new file mode 100644
index 0000000000..e0a777fc65
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapDevice.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapDevice_DEFINED
+#define SkBitmapDevice_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRasterClipStack.h"
+
+class SkImageFilterCache;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPixmap;
+class SkRasterHandleAllocator;
+class SkRRect;
+class SkSurface;
+struct SkPoint;
+
+///////////////////////////////////////////////////////////////////////////////
+class SkBitmapDevice : public SkBaseDevice {
+public:
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap);
+
+ /**
+ * Create a new device along with its requisite pixel memory using
+ * default SkSurfaceProps (i.e., kLegacyFontHost_InitType-style).
+ * Note: this entry point is slated for removal - no one should call it.
+ */
+ static SkBitmapDevice* Create(const SkImageInfo& info);
+
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps,
+ void* externalHandle, const SkBitmap* coverage);
+
+ static SkBitmapDevice* Create(const SkImageInfo&, const SkSurfaceProps&,
+ bool trackCoverage,
+ SkRasterHandleAllocator*);
+
+ static SkBitmapDevice* Create(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return Create(info, props, false, nullptr);
+ }
+
+ const SkPixmap* accessCoverage() const {
+ return fCoverage ? &fCoverage->pixmap() : nullptr;
+ }
+
+protected:
+ void* getRasterHandle() const override { return fRasterHandle; }
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint.
+ */
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr, const SkPaint& paint) override;
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ */
+ void drawPath(const SkPath&, const SkPaint&, bool pathIsMutable) override;
+ void drawSprite(const SkBitmap&, int x, int y, const SkPaint&) override;
+
+ /**
+ * The default impl. will create a bitmap-shader from the bitmap,
+ * and call drawRect with it.
+ */
+ void drawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&,
+ const SkPaint&, SkCanvas::SrcRectConstraint) override;
+
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override;
+ void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const SkPaint& paint) override;
+ void drawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int count,
+ SkBlendMode, const SkPaint&) override;
+ void drawDevice(SkBaseDevice*, int x, int y, const SkPaint&) override;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void drawSpecial(SkSpecialImage*, int x, int y, const SkPaint&,
+ SkImage*, const SkMatrix&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial(const SkIRect&, bool = false) override;
+ void setImmutable() override { fBitmap.setImmutable(); }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ bool onReadPixels(const SkPixmap&, int x, int y) override;
+ bool onWritePixels(const SkPixmap&, int, int) override;
+ bool onPeekPixels(SkPixmap*) override;
+ bool onAccessPixels(SkPixmap*) override;
+
+ void onSave() override;
+ void onRestore() override;
+ void onClipRect(const SkRect& rect, SkClipOp, bool aa) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) override;
+ void onClipPath(const SkPath& path, SkClipOp, bool aa) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+ void onSetDeviceClipRestriction(SkIRect* mutableClipRestriction) override;
+ bool onClipIsAA() const override;
+ void onAsRgnClip(SkRegion*) const override;
+ void validateDevBounds(const SkIRect& r) override;
+ ClipType onGetClipType() const override;
+
+ virtual void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkPaint&);
+
+private:
+ friend class SkCanvas;
+ friend struct DeviceCM; //for setMatrixClip
+ friend class SkDraw;
+ friend class SkDrawIter;
+ friend class SkDrawTiler;
+ friend class SkSurface_Raster;
+
+ class BDDraw;
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ void replaceBitmapBackendForRasterSurface(const SkBitmap&) override;
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ SkImageFilterCache* getImageFilterCache() override;
+
+ SkBitmap fBitmap;
+ void* fRasterHandle = nullptr;
+ SkRasterClipStack fRCStack;
+ std::unique_ptr<SkBitmap> fCoverage; // if non-null, will have the same dimensions as fBitmap
+ SkGlyphRunListPainter fGlyphPainter;
+
+
+ typedef SkBaseDevice INHERITED;
+};
+
+class SkBitmapDeviceFilteredSurfaceProps {
+public:
+ SkBitmapDeviceFilteredSurfaceProps(const SkBitmap& bitmap, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps)
+ : fSurfaceProps((kN32_SkColorType != bitmap.colorType() || !paint.isSrcOver())
+ ? fLazy.init(surfaceProps.flags(), kUnknown_SkPixelGeometry)
+ : &surfaceProps)
+ { }
+
+ SkBitmapDeviceFilteredSurfaceProps(const SkBitmapDeviceFilteredSurfaceProps&) = delete;
+ SkBitmapDeviceFilteredSurfaceProps& operator=(const SkBitmapDeviceFilteredSurfaceProps&) = delete;
+ SkBitmapDeviceFilteredSurfaceProps(SkBitmapDeviceFilteredSurfaceProps&&) = delete;
+ SkBitmapDeviceFilteredSurfaceProps& operator=(SkBitmapDeviceFilteredSurfaceProps&&) = delete;
+
+ const SkSurfaceProps& operator()() const { return *fSurfaceProps; }
+
+private:
+ SkTLazy<SkSurfaceProps> fLazy;
+ SkSurfaceProps const * const fSurfaceProps;
+};
+
+#endif // SkBitmapDevice_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBitmapFilter.h b/gfx/skia/skia/src/core/SkBitmapFilter.h
new file mode 100644
index 0000000000..f7f1f07f24
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapFilter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapFilter_DEFINED
+#define SkBitmapFilter_DEFINED
+
+#include "include/private/SkFixed.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkScalar.h"
+
+#include "include/private/SkNx.h"
+
+// size of the precomputed bitmap filter tables for high quality filtering.
+// Used to precompute the shape of the filter kernel.
+// Table size chosen from experiments to see where I could start to see a difference.
+
+#define SKBITMAP_FILTER_TABLE_SIZE 128
+
+class SkBitmapFilter {
+public:
+ SkBitmapFilter(float width) : fWidth(width), fInvWidth(1.f/width) {
+ fPrecomputed = false;
+ fLookupMultiplier = this->invWidth() * (SKBITMAP_FILTER_TABLE_SIZE-1);
+ }
+ virtual ~SkBitmapFilter() {}
+
+ SkScalar lookupScalar(float x) const {
+ if (!fPrecomputed) {
+ precomputeTable();
+ }
+ int filter_idx = int(sk_float_abs(x * fLookupMultiplier));
+ SkASSERT(filter_idx < SKBITMAP_FILTER_TABLE_SIZE);
+ return fFilterTableScalar[filter_idx];
+ }
+
+ float width() const { return fWidth; }
+ float invWidth() const { return fInvWidth; }
+ virtual float evaluate(float x) const = 0;
+
+ virtual float evaluate_n(float val, float diff, int count, float* output) const {
+ float sum = 0;
+ for (int index = 0; index < count; index++) {
+ float filterValue = evaluate(val);
+ *output++ = filterValue;
+ sum += filterValue;
+ val += diff;
+ }
+ return sum;
+ }
+
+protected:
+ float fWidth;
+ float fInvWidth;
+ float fLookupMultiplier;
+
+ mutable bool fPrecomputed;
+ mutable SkScalar fFilterTableScalar[SKBITMAP_FILTER_TABLE_SIZE];
+
+private:
+ void precomputeTable() const {
+ fPrecomputed = true;
+ SkScalar *ftpScalar = fFilterTableScalar;
+ for (int x = 0; x < SKBITMAP_FILTER_TABLE_SIZE; ++x) {
+ float fx = ((float)x + .5f) * this->width() / SKBITMAP_FILTER_TABLE_SIZE;
+ float filter_value = evaluate(fx);
+ *ftpScalar++ = filter_value;
+ }
+ }
+};
+
+class SkMitchellFilter final : public SkBitmapFilter {
+public:
+ SkMitchellFilter()
+ : INHERITED(2)
+ , fB(1.f / 3.f)
+ , fC(1.f / 3.f)
+ , fA1(-fB - 6*fC)
+ , fB1(6*fB + 30*fC)
+ , fC1(-12*fB - 48*fC)
+ , fD1(8*fB + 24*fC)
+ , fA2(12 - 9*fB - 6*fC)
+ , fB2(-18 + 12*fB + 6*fC)
+ , fD2(6 - 2*fB)
+ {}
+
+ float evaluate(float x) const override {
+ x = fabsf(x);
+ if (x > 2.f) {
+ return 0;
+ } else if (x > 1.f) {
+ return (((fA1 * x + fB1) * x + fC1) * x + fD1) * (1.f/6.f);
+ } else {
+ return ((fA2 * x + fB2) * x*x + fD2) * (1.f/6.f);
+ }
+ }
+
+ Sk4f evalcore_n(const Sk4f& val) const {
+ Sk4f x = val.abs();
+ Sk4f over2 = x > Sk4f(2);
+ Sk4f over1 = x > Sk4f(1);
+ Sk4f poly1 = (((Sk4f(fA1) * x + Sk4f(fB1)) * x + Sk4f(fC1)) * x + Sk4f(fD1))
+ * Sk4f(1.f/6.f);
+ Sk4f poly0 = ((Sk4f(fA2) * x + Sk4f(fB2)) * x*x + Sk4f(fD2)) * Sk4f(1.f/6.f);
+ return over2.thenElse(Sk4f(0), over1.thenElse(poly1, poly0));
+ }
+
+ float evaluate_n(float val, float diff, int count, float* output) const override {
+ Sk4f sum(0);
+ while (count >= 4) {
+ float v0 = val;
+ float v1 = val += diff;
+ float v2 = val += diff;
+ float v3 = val += diff;
+ val += diff;
+ Sk4f filterValue = evalcore_n(Sk4f(v0, v1, v2, v3));
+ filterValue.store(output);
+ output += 4;
+ sum = sum + filterValue;
+ count -= 4;
+ }
+ float sums[4];
+ sum.store(sums);
+ float result = sums[0] + sums[1] + sums[2] + sums[3];
+ result += INHERITED::evaluate_n(val, diff, count, output);
+ return result;
+ }
+
+ protected:
+ float fB, fC;
+ float fA1, fB1, fC1, fD1;
+ float fA2, fB2, fD2;
+private:
+ typedef SkBitmapFilter INHERITED;
+};
+
+class SkGaussianFilter final : public SkBitmapFilter {
+ float fAlpha, fExpWidth;
+
+public:
+ SkGaussianFilter(float a, float width = 2)
+ : SkBitmapFilter(width)
+ , fAlpha(a)
+ , fExpWidth(expf(-a * width * width))
+ {}
+
+ float evaluate(float x) const override {
+ return SkTMax(0.f, float(expf(-fAlpha*x*x) - fExpWidth));
+ }
+};
+
+class SkTriangleFilter final : public SkBitmapFilter {
+public:
+ SkTriangleFilter(float width = 1) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ return SkTMax(0.f, fWidth - fabsf(x));
+ }
+};
+
+class SkBoxFilter final : public SkBitmapFilter {
+public:
+ SkBoxFilter(float width = 0.5f) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ return (x >= -fWidth && x < fWidth) ? 1.0f : 0.0f;
+ }
+};
+
+class SkHammingFilter final : public SkBitmapFilter {
+public:
+ SkHammingFilter(float width = 1) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ if (x <= -fWidth || x >= fWidth) {
+ return 0.0f; // Outside of the window.
+ }
+ if (x > -FLT_EPSILON && x < FLT_EPSILON) {
+ return 1.0f; // Special case the sinc discontinuity at the origin.
+ }
+ const float xpi = x * static_cast<float>(SK_ScalarPI);
+
+ return ((sk_float_sin(xpi) / xpi) * // sinc(x)
+ (0.54f + 0.46f * sk_float_cos(xpi / fWidth))); // hamming(x)
+ }
+};
+
+class SkLanczosFilter final : public SkBitmapFilter {
+public:
+ SkLanczosFilter(float width = 3.f) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ if (x <= -fWidth || x >= fWidth) {
+ return 0.0f; // Outside of the window.
+ }
+ if (x > -FLT_EPSILON && x < FLT_EPSILON) {
+ return 1.0f; // Special case the discontinuity at the origin.
+ }
+ float xpi = x * static_cast<float>(SK_ScalarPI);
+ return (sk_float_sin(xpi) / xpi) * // sinc(x)
+ sk_float_sin(xpi / fWidth) / (xpi / fWidth); // sinc(x/fWidth)
+ }
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.cpp b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
new file mode 100644
index 0000000000..064f5d19ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
@@ -0,0 +1,660 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkBitmapController.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkUtils.h"
+
+// One-stop-shop shader for,
+// - nearest-neighbor sampling (_nofilter_),
+// - clamp tiling in X and Y both (Clamp_),
+// - with at most a scale and translate matrix (_DX_),
+// - and no extra alpha applied (_opaque_),
+// - sampling from 8888 (_S32_) and drawing to 8888 (_S32_).
+static void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const void* sIn, int x, int y,
+ SkPMColor* dst, int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fAlphaScale == 256);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ int dstY;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ dstY = SkClampMax(mapper.intY(), maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ const SkPMColor* src = s.fPixmap.addr32(0, dstY);
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ // Check if we're safely inside [0...maxX] so no need to clamp each computed index.
+ //
+ if ((uint64_t)SkFractionalIntToInt(fx) <= maxX &&
+ (uint64_t)SkFractionalIntToInt(fx + dx * (count - 1)) <= maxX)
+ {
+ int count4 = count >> 2;
+ for (int i = 0; i < count4; ++i) {
+ SkPMColor src0 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src1 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src2 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src3 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ dst[0] = src0;
+ dst[1] = src1;
+ dst[2] = src2;
+ dst[3] = src3;
+ dst += 4;
+ }
+ for (int i = (count4 << 2); i < count; ++i) {
+ unsigned index = SkFractionalIntToInt(fx);
+ SkASSERT(index <= maxX);
+ *dst++ = src[index];
+ fx += dx;
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)];
+ fx += dx;
+ }
+ }
+}
+
+static void S32_alpha_D32_nofilter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // xy is a 32-bit y-coordinate, followed by 16-bit x-coordinates.
+ unsigned y = *xy++;
+ SkASSERT(y < (unsigned)s.fPixmap.height());
+
+ auto row = (const SkPMColor*)( (const char*)s.fPixmap.addr() + y * s.fPixmap.rowBytes() );
+
+ if (1 == s.fPixmap.width()) {
+ sk_memset32(colors, SkAlphaMulQ(row[0], s.fAlphaScale), count);
+ return;
+ }
+
+ // Step 4 xs == 2 uint32_t at a time.
+ while (count >= 4) {
+ uint32_t x01 = *xy++,
+ x23 = *xy++;
+
+ SkPMColor p0 = row[UNPACK_PRIMARY_SHORT (x01)];
+ SkPMColor p1 = row[UNPACK_SECONDARY_SHORT(x01)];
+ SkPMColor p2 = row[UNPACK_PRIMARY_SHORT (x23)];
+ SkPMColor p3 = row[UNPACK_SECONDARY_SHORT(x23)];
+
+ *colors++ = SkAlphaMulQ(p0, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p1, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p2, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p3, s.fAlphaScale);
+
+ count -= 4;
+ }
+
+ // Step 1 x == 1 uint16_t at a time.
+ auto x = (const uint16_t*)xy;
+ while (count --> 0) {
+ *colors++ = SkAlphaMulQ(row[*x++], s.fAlphaScale);
+ }
+}
+
+SkBitmapProcInfo::SkBitmapProcInfo(const SkImage_Base* image, SkTileMode tmx, SkTileMode tmy)
+ : fImage(image)
+ , fTileModeX(tmx)
+ , fTileModeY(tmy)
+ , fBMState(nullptr)
+{}
+
+SkBitmapProcInfo::~SkBitmapProcInfo() {}
+
+
+// true iff the matrix has a scale and no more than an optional translate.
+static bool matrix_only_scale_translate(const SkMatrix& m) {
+ return (m.getType() & ~SkMatrix::kTranslate_Mask) == SkMatrix::kScale_Mask;
+}
+
+/**
+ * For the purposes of drawing bitmaps, if a matrix is "almost" translate
+ * go ahead and treat it as if it were, so that subsequent code can go fast.
+ */
+static bool just_trans_general(const SkMatrix& matrix) {
+ SkASSERT(matrix_only_scale_translate(matrix));
+
+ const SkScalar tol = SK_Scalar1 / 32768;
+
+ return SkScalarNearlyZero(matrix[SkMatrix::kMScaleX] - SK_Scalar1, tol)
+ && SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol);
+}
+
+/**
+ * Determine if the matrix can be treated as integral-only-translate,
+ * for the purpose of filtering.
+ */
+static bool just_trans_integral(const SkMatrix& m) {
+ static constexpr SkScalar tol = SK_Scalar1 / 256;
+
+ return m.getType() <= SkMatrix::kTranslate_Mask
+ && SkScalarNearlyEqual(m.getTranslateX(), SkScalarRoundToScalar(m.getTranslateX()), tol)
+ && SkScalarNearlyEqual(m.getTranslateY(), SkScalarRoundToScalar(m.getTranslateY()), tol);
+}
+
+static bool valid_for_filtering(unsigned dimension) {
+ // for filtering, width and height must fit in 14bits, since we use steal
+ // 2 bits from each to store our 4bit subpixel data
+ return (dimension & ~0x3FFF) == 0;
+}
+
+bool SkBitmapProcInfo::init(const SkMatrix& inv, const SkPaint& paint) {
+ SkASSERT(inv.isScaleTranslate());
+
+ fPixmap.reset();
+ fInvMatrix = inv;
+ fFilterQuality = paint.getFilterQuality();
+
+ fBMState = SkBitmapController::RequestBitmap(fImage, inv, paint.getFilterQuality(), &fAlloc);
+
+ // Note : we allow the controller to return an empty (zero-dimension) result. Should we?
+ if (nullptr == fBMState || fBMState->pixmap().info().isEmpty()) {
+ return false;
+ }
+ fPixmap = fBMState->pixmap();
+ fInvMatrix = fBMState->invMatrix();
+ fRealInvMatrix = fBMState->invMatrix();
+ fPaintColor = paint.getColor();
+ fFilterQuality = fBMState->quality();
+ SkASSERT(fFilterQuality <= kLow_SkFilterQuality);
+ SkASSERT(fPixmap.addr());
+
+ bool integral_translate_only = just_trans_integral(fInvMatrix);
+ if (!integral_translate_only) {
+ // Most of the scanline procs deal with "unit" texture coordinates, as this
+ // makes it easy to perform tiling modes (repeat = (x & 0xFFFF)). To generate
+ // those, we divide the matrix by its dimensions here.
+ //
+ // We don't do this if we're either trivial (can ignore the matrix) or clamping
+ // in both X and Y since clamping to width,height is just as easy as to 0xFFFF.
+
+ if (fTileModeX != SkTileMode::kClamp || fTileModeY != SkTileMode::kClamp) {
+ fInvMatrix.postIDiv(fPixmap.width(), fPixmap.height());
+ }
+
+ // Now that all possible changes to the matrix have taken place, check
+ // to see if we're really close to a no-scale matrix. If so, explicitly
+ // set it to be so. Subsequent code may inspect this matrix to choose
+ // a faster path in this case.
+
+ // This code will only execute if the matrix has some scale component;
+ // if it's already pure translate then we won't do this inversion.
+
+ if (matrix_only_scale_translate(fInvMatrix)) {
+ SkMatrix forward;
+ if (fInvMatrix.invert(&forward) && just_trans_general(forward)) {
+ fInvMatrix.setTranslate(-forward.getTranslateX(), -forward.getTranslateY());
+ }
+ }
+
+ // Recompute the flag after matrix adjustments.
+ integral_translate_only = just_trans_integral(fInvMatrix);
+ }
+
+ fInvType = fInvMatrix.getType();
+
+ if (kLow_SkFilterQuality == fFilterQuality &&
+ (!valid_for_filtering(fPixmap.width() | fPixmap.height()) ||
+ integral_translate_only)) {
+ fFilterQuality = kNone_SkFilterQuality;
+ }
+
+ return true;
+}
+
+/*
+ * Analyze filter-quality and matrix, and decide how to implement that.
+ *
+ * In general, we cascade down the request level [ High ... None ]
+ * - for a given level, if we can fulfill it, fine, else
+ * - else we downgrade to the next lower level and try again.
+ * We can always fulfill requests for Low and None
+ * - sometimes we will "ignore" Low and give None, but this is likely a legacy perf hack
+ * and may be removed.
+ */
+bool SkBitmapProcState::chooseProcs() {
+ SkASSERT(fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ SkASSERT(fPixmap.colorType() == kN32_SkColorType);
+ SkASSERT(fPixmap.alphaType() == kPremul_SkAlphaType ||
+ fPixmap.alphaType() == kOpaque_SkAlphaType);
+ SkASSERT(fTileModeX == fTileModeY);
+ SkASSERT(fTileModeX != SkTileMode::kDecal);
+ SkASSERT(fFilterQuality < kHigh_SkFilterQuality);
+
+ fInvProc = SkMatrixPriv::GetMapXYProc(fInvMatrix);
+ fInvSx = SkScalarToFixed (fInvMatrix.getScaleX());
+ fInvSxFractionalInt = SkScalarToFractionalInt(fInvMatrix.getScaleX());
+ fInvKy = SkScalarToFixed (fInvMatrix.getSkewY());
+ fInvKyFractionalInt = SkScalarToFractionalInt(fInvMatrix.getSkewY());
+
+ fAlphaScale = SkAlpha255To256(SkColorGetA(fPaintColor));
+
+ bool translate_only = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
+ fMatrixProc = this->chooseMatrixProc(translate_only);
+ SkASSERT(fMatrixProc);
+
+ if (fFilterQuality > kNone_SkFilterQuality) {
+ fSampleProc32 = SkOpts::S32_alpha_D32_filter_DX;
+ } else {
+ fSampleProc32 = S32_alpha_D32_nofilter_DX;
+ }
+
+ fShaderProc32 = this->chooseShaderProc32();
+
+ // our special-case shaderprocs
+ // TODO: move this one into chooseShaderProc32() or pull all that in here.
+ if (nullptr == fShaderProc32
+ && fAlphaScale == 256
+ && fFilterQuality == kNone_SkFilterQuality
+ && SkTileMode::kClamp == fTileModeX) {
+ fShaderProc32 = Clamp_S32_opaque_D32_nofilter_DX_shaderproc;
+ }
+
+ return true;
+}
+
+static void Clamp_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+
+ const int maxX = s.fPixmap.width() - 1;
+ const int maxY = s.fPixmap.height() - 1;
+ int ix = s.fFilterOneX + x;
+ int iy = SkClampMax(s.fFilterOneY + y, maxY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ // clamp to the left
+ if (ix < 0) {
+ int n = SkMin32(-ix, count);
+ sk_memset32(colors, row[0], n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ SkASSERT(-ix == n);
+ ix = 0;
+ }
+ // copy the middle
+ if (ix <= maxX) {
+ int n = SkMin32(maxX - ix + 1, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ }
+ SkASSERT(count > 0);
+ // clamp to the right
+ sk_memset32(colors, row[maxX], count);
+}
+
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+static inline int sk_int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+static void Repeat_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+
+ const int stopX = s.fPixmap.width();
+ const int stopY = s.fPixmap.height();
+ int ix = s.fFilterOneX + x;
+ int iy = sk_int_mod(s.fFilterOneY + y, stopY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ ix = sk_int_mod(ix, stopX);
+ for (;;) {
+ int n = SkMin32(stopX - ix, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ ix = 0;
+ }
+}
+
+static inline void filter_32_alpha(unsigned t,
+ SkPMColor color0,
+ SkPMColor color1,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)t <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*t;
+ uint32_t lo = (color0 & mask) * scale;
+ uint32_t hi = ((color0 >> 8) & mask) * scale;
+
+ scale = 16*t;
+ lo += (color1 & mask) * scale;
+ hi += ((color1 >> 8) & mask) * scale;
+
+ // TODO: if (alphaScale < 256) ...
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
+
+static void S32_D32_constX_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(1 == s.fPixmap.width());
+
+ int iY0;
+ int iY1 SK_INIT_TO_AVOID_WARNING;
+ int iSubY SK_INIT_TO_AVOID_WARNING;
+
+ if (kNone_SkFilterQuality != s.fFilterQuality) {
+ SkBitmapProcState::MatrixProc mproc = s.getMatrixProc();
+ uint32_t xy[2];
+
+ mproc(s, xy, 1, x, y);
+
+ iY0 = xy[0] >> 18;
+ iY1 = xy[0] & 0x3FFF;
+ iSubY = (xy[0] >> 14) & 0xF;
+ } else {
+ int yTemp;
+
+ if (s.fInvType > SkMatrix::kTranslate_Mask) {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ // When the matrix has a scale component the setup code in
+ // chooseProcs multiples the inverse matrix by the inverse of the
+ // bitmap's width and height. Since this method is going to do
+ // its own tiling and sampling we need to undo that here.
+ if (SkTileMode::kClamp != s.fTileModeX || SkTileMode::kClamp != s.fTileModeY) {
+ yTemp = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ yTemp = mapper.intY();
+ }
+ } else {
+ yTemp = s.fFilterOneY + y;
+ }
+
+ const int stopY = s.fPixmap.height();
+ switch (s.fTileModeY) {
+ case SkTileMode::kClamp:
+ iY0 = SkClampMax(yTemp, stopY-1);
+ break;
+ case SkTileMode::kRepeat:
+ iY0 = sk_int_mod(yTemp, stopY);
+ break;
+ case SkTileMode::kMirror:
+ default:
+ iY0 = sk_int_mirror(yTemp, stopY);
+ break;
+ }
+
+#ifdef SK_DEBUG
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ int iY2;
+
+ if (s.fInvType > SkMatrix::kTranslate_Mask &&
+ (SkTileMode::kClamp != s.fTileModeX || SkTileMode::kClamp != s.fTileModeY)) {
+ iY2 = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ iY2 = mapper.intY();
+ }
+
+ switch (s.fTileModeY) {
+ case SkTileMode::kClamp:
+ iY2 = SkClampMax(iY2, stopY-1);
+ break;
+ case SkTileMode::kRepeat:
+ iY2 = sk_int_mod(iY2, stopY);
+ break;
+ case SkTileMode::kMirror:
+ default:
+ iY2 = sk_int_mirror(iY2, stopY);
+ break;
+ }
+
+ SkASSERT(iY0 == iY2);
+ }
+#endif
+ }
+
+ const SkPMColor* row0 = s.fPixmap.addr32(0, iY0);
+ SkPMColor color;
+
+ if (kNone_SkFilterQuality != s.fFilterQuality) {
+ const SkPMColor* row1 = s.fPixmap.addr32(0, iY1);
+ filter_32_alpha(iSubY, *row0, *row1, &color, s.fAlphaScale);
+ } else {
+ if (s.fAlphaScale < 256) {
+ color = SkAlphaMulQ(*row0, s.fAlphaScale);
+ } else {
+ color = *row0;
+ }
+ }
+
+ sk_memset32(colors, color, count);
+}
+
+static void DoNothing_shaderproc(const void*, int x, int y,
+ SkPMColor* colors, int count) {
+ // if we get called, the matrix is too tricky, so we just draw nothing
+ sk_memset32(colors, 0, count);
+}
+
+bool SkBitmapProcState::setupForTranslate() {
+ SkPoint pt;
+ const SkBitmapProcStateAutoMapper mapper(*this, 0, 0, &pt);
+
+ /*
+ * if the translate is larger than our ints, we can get random results, or
+ * worse, we might get 0x80000000, which wreaks havoc on us, since we can't
+ * negate it.
+ */
+ const SkScalar too_big = SkIntToScalar(1 << 30);
+ if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) {
+ return false;
+ }
+
+ // Since we know we're not filtered, we re-purpose these fields allow
+ // us to go from device -> src coordinates w/ just an integer add,
+ // rather than running through the inverse-matrix
+ fFilterOneX = mapper.intX();
+ fFilterOneY = mapper.intY();
+
+ return true;
+}
+
+SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() {
+
+ if (kN32_SkColorType != fPixmap.colorType()) {
+ return nullptr;
+ }
+
+ static const unsigned kMask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
+
+ if (1 == fPixmap.width() && 0 == (fInvType & ~kMask)) {
+ if (kNone_SkFilterQuality == fFilterQuality &&
+ fInvType <= SkMatrix::kTranslate_Mask &&
+ !this->setupForTranslate()) {
+ return DoNothing_shaderproc;
+ }
+ return S32_D32_constX_shaderproc;
+ }
+
+ if (fAlphaScale < 256) {
+ return nullptr;
+ }
+ if (fInvType > SkMatrix::kTranslate_Mask) {
+ return nullptr;
+ }
+ if (kNone_SkFilterQuality != fFilterQuality) {
+ return nullptr;
+ }
+
+ SkTileMode tx = fTileModeX;
+ SkTileMode ty = fTileModeY;
+
+ if (SkTileMode::kClamp == tx && SkTileMode::kClamp == ty) {
+ if (this->setupForTranslate()) {
+ return Clamp_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ if (SkTileMode::kRepeat == tx && SkTileMode::kRepeat == ty) {
+ if (this->setupForTranslate()) {
+ return Repeat_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ return nullptr;
+}
+
+#ifdef SK_DEBUG
+
+static void check_scale_nofilter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ unsigned y = *bitmapXY++;
+ SkASSERT(y < my);
+
+ const uint16_t* xptr = reinterpret_cast<const uint16_t*>(bitmapXY);
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(xptr[i] < mx);
+ }
+}
+
+static void check_scale_filter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ uint32_t YY = *bitmapXY++;
+ unsigned y0 = YY >> 18;
+ unsigned y1 = YY & 0x3FFF;
+ SkASSERT(y0 < my);
+ SkASSERT(y1 < my);
+
+ for (int i = 0; i < count; ++i) {
+ uint32_t XX = bitmapXY[i];
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+ SkASSERT(x0 < mx);
+ SkASSERT(x1 < mx);
+ }
+}
+
+void SkBitmapProcState::DebugMatrixProc(const SkBitmapProcState& state,
+ uint32_t bitmapXY[], int count,
+ int x, int y) {
+ SkASSERT(bitmapXY);
+ SkASSERT(count > 0);
+
+ state.fMatrixProc(state, bitmapXY, count, x, y);
+
+ void (*proc)(uint32_t bitmapXY[], int count, unsigned mx, unsigned my);
+
+ // There are two formats possible:
+ // filter -vs- nofilter
+ SkASSERT(state.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ proc = state.fFilterQuality != kNone_SkFilterQuality ?
+ check_scale_filter : check_scale_nofilter;
+ proc(bitmapXY, count, state.fPixmap.width(), state.fPixmap.height());
+}
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::getMatrixProc() const {
+ return DebugMatrixProc;
+}
+
+#endif
+
+/*
+ The storage requirements for the different matrix procs are as follows,
+ where each X or Y is 2 bytes, and N is the number of pixels/elements:
+
+ scale/translate nofilter Y(4bytes) + N * X
+ affine/perspective nofilter N * (X Y)
+ scale/translate filter Y Y + N * (X X)
+ affine filter N * (Y Y X X)
+ */
+int SkBitmapProcState::maxCountForBufferSize(size_t bufferSize) const {
+ int32_t size = static_cast<int32_t>(bufferSize);
+
+ size &= ~3; // only care about 4-byte aligned chunks
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ size -= 4; // the shared Y (or YY) coordinate
+ if (size < 0) {
+ size = 0;
+ }
+ size >>= 1;
+ } else {
+ size >>= 2;
+ }
+
+ if (fFilterQuality != kNone_SkFilterQuality) {
+ size >>= 1;
+ }
+
+ return size;
+}
+
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.h b/gfx/skia/skia/src/core/SkBitmapProcState.h
new file mode 100644
index 0000000000..c1df487740
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_DEFINED
+#define SkBitmapProcState_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapController.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMipMap.h"
+
+typedef SkFixed3232 SkFractionalInt;
+#define SkScalarToFractionalInt(x) SkScalarToFixed3232(x)
+#define SkFractionalIntToFixed(x) SkFixed3232ToFixed(x)
+#define SkFixedToFractionalInt(x) SkFixedToFixed3232(x)
+#define SkFractionalIntToInt(x) SkFixed3232ToInt(x)
+
+class SkPaint;
+
+struct SkBitmapProcInfo {
+ SkBitmapProcInfo(const SkImage_Base*, SkTileMode tmx, SkTileMode tmy);
+ ~SkBitmapProcInfo();
+
+ const SkImage_Base* fImage;
+
+ SkPixmap fPixmap;
+ SkMatrix fInvMatrix; // This changes based on tile mode.
+ // TODO: combine fInvMatrix and fRealInvMatrix.
+ SkMatrix fRealInvMatrix; // The actual inverse matrix.
+ SkColor fPaintColor;
+ SkTileMode fTileModeX;
+ SkTileMode fTileModeY;
+ SkFilterQuality fFilterQuality;
+ SkMatrix::TypeMask fInvType;
+
+ bool init(const SkMatrix& inverse, const SkPaint&);
+
+private:
+ enum {
+ kBMStateSize = 136 // found by inspection. if too small, we will call new/delete
+ };
+ SkSTArenaAlloc<kBMStateSize> fAlloc;
+ SkBitmapController::State* fBMState;
+};
+
+struct SkBitmapProcState : public SkBitmapProcInfo {
+ SkBitmapProcState(const SkImage_Base* image, SkTileMode tmx, SkTileMode tmy)
+ : SkBitmapProcInfo(image, tmx, tmy) {}
+
+ bool setup(const SkMatrix& inv, const SkPaint& paint) {
+ return this->init(inv, paint) && this->chooseProcs();
+ }
+
+ typedef void (*ShaderProc32)(const void* ctx, int x, int y, SkPMColor[], int count);
+
+ typedef void (*MatrixProc)(const SkBitmapProcState&,
+ uint32_t bitmapXY[],
+ int count,
+ int x, int y);
+
+ typedef void (*SampleProc32)(const SkBitmapProcState&,
+ const uint32_t[],
+ int count,
+ SkPMColor colors[]);
+
+ SkMatrixPriv::MapXYProc fInvProc; // chooseProcs
+ SkFractionalInt fInvSxFractionalInt;
+ SkFractionalInt fInvKyFractionalInt;
+
+ SkFixed fFilterOneX;
+ SkFixed fFilterOneY;
+
+ SkFixed fInvSx; // chooseProcs
+ SkFixed fInvKy; // chooseProcs
+ SkPMColor fPaintPMColor; // chooseProcs - A8 config
+ uint16_t fAlphaScale; // chooseProcs
+
+ /** Given the byte size of the index buffer to be passed to the matrix proc,
+ return the maximum number of resulting pixels that can be computed
+ (i.e. the number of SkPMColor values to be written by the sample proc).
+ This routine takes into account that filtering and scale-vs-affine
+ affect the amount of buffer space needed.
+
+ Only valid to call after chooseProcs (setContext) has been called. It is
+ safe to call this inside the shader's shadeSpan() method.
+ */
+ int maxCountForBufferSize(size_t bufferSize) const;
+
+ // If a shader proc is present, then the corresponding matrix/sample procs
+ // are ignored
+ ShaderProc32 getShaderProc32() const { return fShaderProc32; }
+
+#ifdef SK_DEBUG
+ MatrixProc getMatrixProc() const;
+#else
+ MatrixProc getMatrixProc() const { return fMatrixProc; }
+#endif
+ SampleProc32 getSampleProc32() const { return fSampleProc32; }
+
+private:
+ ShaderProc32 fShaderProc32; // chooseProcs
+ // These are used if the shaderproc is nullptr
+ MatrixProc fMatrixProc; // chooseProcs
+ SampleProc32 fSampleProc32; // chooseProcs
+
+ MatrixProc chooseMatrixProc(bool trivial_matrix);
+ bool chooseProcs(); // caller must have called init() first (on our base-class)
+ ShaderProc32 chooseShaderProc32();
+
+ // Return false if we failed to setup for fast translate (e.g. overflow)
+ bool setupForTranslate();
+
+#ifdef SK_DEBUG
+ static void DebugMatrixProc(const SkBitmapProcState&,
+ uint32_t[], int count, int x, int y);
+#endif
+};
+
+/* Macros for packing and unpacking pairs of 16bit values in a 32bit uint.
+ Used to allow access to a stream of uint16_t either one at a time, or
+ 2 at a time by unpacking a uint32_t
+ */
+#ifdef SK_CPU_BENDIAN
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) << 16 | (sec))
+ #define UNPACK_PRIMARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+ #define UNPACK_SECONDARY_SHORT(packed) ((packed) & 0xFFFF)
+#else
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) | ((sec) << 16))
+ #define UNPACK_PRIMARY_SHORT(packed) ((packed) & 0xFFFF)
+ #define UNPACK_SECONDARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+#endif
+
+#ifdef SK_DEBUG
+ static inline uint32_t pack_two_shorts(U16CPU pri, U16CPU sec) {
+ SkASSERT((uint16_t)pri == pri);
+ SkASSERT((uint16_t)sec == sec);
+ return PACK_TWO_SHORTS(pri, sec);
+ }
+#else
+ #define pack_two_shorts(pri, sec) PACK_TWO_SHORTS(pri, sec)
+#endif
+
+// Helper class for mapping the middle of pixel (x, y) into SkFractionalInt bitmap space.
+// Discussion:
+// Overall, this code takes a point in destination space, and uses the center of the pixel
+// at (x, y) to determine the sample point in source space. It then adjusts the pixel by different
+// amounts based in filtering and tiling.
+// This code can be broken into two main cases based on filtering:
+// * no filtering (nearest neighbor) - when using nearest neighbor filtering all tile modes reduce
+// the sampled by one ulp. If a simple point pt lies precisely on XXX.1/2 then it forced down
+// when positive making 1/2 + 1/2 = .999999 instead of 1.0.
+// * filtering - in the filtering case, the code calculates the -1/2 shift for starting the
+// bilerp kernel. There is a twist; there is a big difference between clamp and the other tile
+// modes. In tile and repeat the matrix has been reduced by an additional 1/width and 1/height
+// factor. This maps from destination space to [0, 1) (instead of source space) to allow easy
+// modulo arithmetic. This means that the -1/2 needed by bilerp is actually 1/2 * 1/width for x
+// and 1/2 * 1/height for y. This is what happens when the poorly named fFilterOne{X|Y} is
+// divided by two.
+class SkBitmapProcStateAutoMapper {
+public:
+ SkBitmapProcStateAutoMapper(const SkBitmapProcState& s, int x, int y,
+ SkPoint* scalarPoint = nullptr) {
+ SkPoint pt;
+ s.fInvProc(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &pt);
+
+ SkFixed biasX, biasY;
+ if (s.fFilterQuality == kNone_SkFilterQuality) {
+ // SkFixed epsilon bias to ensure inverse-mapped bitmap coordinates are rounded
+ // consistently WRT geometry. Note that we only need the bias for positive scales:
+ // for negative scales, the rounding is intrinsically correct.
+ // We scale it to persist SkFractionalInt -> SkFixed conversions.
+ biasX = (s.fInvMatrix.getScaleX() > 0);
+ biasY = (s.fInvMatrix.getScaleY() > 0);
+ } else {
+ biasX = s.fFilterOneX >> 1;
+ biasY = s.fFilterOneY >> 1;
+ }
+
+ // punt to unsigned for defined underflow behavior
+ fX = (SkFractionalInt)((uint64_t)SkScalarToFractionalInt(pt.x()) -
+ (uint64_t)SkFixedToFractionalInt(biasX));
+ fY = (SkFractionalInt)((uint64_t)SkScalarToFractionalInt(pt.y()) -
+ (uint64_t)SkFixedToFractionalInt(biasY));
+
+ if (scalarPoint) {
+ scalarPoint->set(pt.x() - SkFixedToScalar(biasX),
+ pt.y() - SkFixedToScalar(biasY));
+ }
+ }
+
+ SkFractionalInt fractionalIntX() const { return fX; }
+ SkFractionalInt fractionalIntY() const { return fY; }
+
+ SkFixed fixedX() const { return SkFractionalIntToFixed(fX); }
+ SkFixed fixedY() const { return SkFractionalIntToFixed(fY); }
+
+ int intX() const { return SkFractionalIntToInt(fX); }
+ int intY() const { return SkFractionalIntToInt(fY); }
+
+private:
+ SkFractionalInt fX, fY;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
new file mode 100644
index 0000000000..4baa5d3a44
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
@@ -0,0 +1,830 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// The copyright below was added in 2009, but I see no record of moto contributions...?
+
+/* NEON optimized code (C) COPYRIGHT 2009 Motorola
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkUtils.h"
+
+/*
+ * The decal_ functions require that
+ * 1. dx > 0
+ * 2. [fx, fx+dx, fx+2dx, fx+3dx, ... fx+(count-1)dx] are all <= maxX
+ *
+ * In addition, we use SkFractionalInt to keep more fractional precision than
+ * just SkFixed, so we will abort the decal_ call if dx is very small, since
+ * the decal_ function just operates on SkFixed. If that were changed, we could
+ * skip the very_small test here.
+ */
+static inline bool can_truncate_to_fixed_for_decal(SkFixed fx,
+ SkFixed dx,
+ int count, unsigned max) {
+ SkASSERT(count > 0);
+
+ // if decal_ kept SkFractionalInt precision, this would just be dx <= 0
+ // I just made up the 1/256. Just don't want to perceive accumulated error
+ // if we truncate frDx and lose its low bits.
+ if (dx <= SK_Fixed1 / 256) {
+ return false;
+ }
+
+ // Note: it seems the test should be (fx <= max && lastFx <= max); but
+ // historically it's been a strict inequality check, and changing produces
+ // unexpected diffs. Further investigation is needed.
+
+ // We cast to unsigned so we don't have to check for negative values, which
+ // will now appear as very large positive values, and thus fail our test!
+ if ((unsigned)SkFixedFloorToInt(fx) >= max) {
+ return false;
+ }
+
+ // Promote to 64bit (48.16) to avoid overflow.
+ const uint64_t lastFx = fx + sk_64_mul(dx, count - 1);
+
+ return SkTFitsIn<int32_t>(lastFx) && (unsigned)SkFixedFloorToInt(SkTo<int32_t>(lastFx)) < max;
+}
+
+// When not filtering, we store 32-bit y, 16-bit x, 16-bit x, 16-bit x, ...
+// When filtering we write out 32-bit encodings, pairing 14.4 x0 with 14-bit x1.
+
+// The clamp routines may try to fall into one of these unclamped decal fast-paths.
+// (Only clamp works in the right coordinate space to check for decal.)
+static void decal_nofilter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ // can_truncate_to_fixed_for_decal() checked only that stepping fx+=dx count-1
+ // times doesn't overflow fx, so we take unusual care not to step count times.
+ for (; count > 2; count -= 2) {
+ *dst++ = pack_two_shorts( (fx + 0) >> 16,
+ (fx + dx) >> 16);
+ fx += dx+dx;
+ }
+
+ SkASSERT(count <= 2);
+ switch (count) {
+ case 2: ((uint16_t*)dst)[1] = SkToU16((fx + dx) >> 16);
+ case 1: ((uint16_t*)dst)[0] = SkToU16((fx + 0) >> 16);
+ }
+}
+
+// A generic implementation for unfiltered scale+translate, templated on tiling method.
+template <unsigned (*tile)(SkFixed, int), bool tryDecal>
+static void nofilter_scale(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ // Write out our 32-bit y, and get our intial fx.
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = tile(mapper.fixedY(), s.fPixmap.height() - 1);
+ fx = mapper.fractionalIntX();
+ }
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ if (0 == maxX) {
+ // If width == 1, all the x-values must refer to that pixel, and must be zero.
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ if (tryDecal) {
+ const SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ const SkFixed fixedDx = SkFractionalIntToFixed(dx);
+
+ if (can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
+ decal_nofilter_scale(xy, fixedFx, fixedDx, count);
+ return;
+ }
+ }
+
+ // Remember, each x-coordinate is 16-bit.
+ for (; count >= 2; count -= 2) {
+ *xy++ = pack_two_shorts(tile(SkFractionalIntToFixed(fx ), maxX),
+ tile(SkFractionalIntToFixed(fx + dx), maxX));
+ fx += dx+dx;
+ }
+
+ auto xx = (uint16_t*)xy;
+ while (count --> 0) {
+ *xx++ = tile(SkFractionalIntToFixed(fx), maxX);
+ fx += dx;
+ }
+}
+
+// Extract the high four fractional bits from fx, the lerp parameter when filtering.
+static unsigned extract_low_bits_clamp(SkFixed fx, int /*max*/) {
+ // If we're already scaled up to by max like clamp/decal,
+ // just grab the high four fractional bits.
+ return (fx >> 12) & 0xf;
+}
+static unsigned extract_low_bits_repeat_mirror(SkFixed fx, int max) {
+ // In repeat or mirror fx is in [0,1], so scale up by max first.
+ // TODO: remove the +1 here and the -1 at the call sites...
+ return extract_low_bits_clamp((fx & 0xffff) * (max+1), max);
+}
+
+template <unsigned (*tile)(SkFixed, int), unsigned (*extract_low_bits)(SkFixed, int), bool tryDecal>
+static void filter_scale(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+
+ auto pack = [](SkFixed f, unsigned max, SkFixed one) {
+ unsigned i = tile(f, max);
+ i = (i << 4) | extract_low_bits(f, max);
+ return (i << 14) | (tile((f + one), max));
+ };
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = pack(fy, maxY, s.fFilterOneY);
+ // now initialize fx
+ fx = mapper.fractionalIntX();
+ }
+
+ // For historical reasons we check both ends are < maxX rather than <= maxX.
+ // TODO: try changing this? See also can_truncate_to_fixed_for_decal().
+ if (tryDecal &&
+ (unsigned)SkFractionalIntToInt(fx ) < maxX &&
+ (unsigned)SkFractionalIntToInt(fx + dx*(count-1)) < maxX) {
+ while (count --> 0) {
+ SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ SkASSERT((fixedFx >> (16 + 14)) == 0);
+ *xy++ = (fixedFx >> 12 << 14) | ((fixedFx >> 16) + 1);
+ fx += dx;
+ }
+ return;
+ }
+
+ while (count --> 0) {
+ SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ *xy++ = pack(fixedFx, maxX, s.fFilterOneX);
+ fx += dx;
+ }
+}
+
+// Helper to ensure that when we shift down, we do it w/o sign-extension
+// so the caller doesn't have to manually mask off the top 16 bits.
+static inline unsigned SK_USHIFT16(unsigned x) {
+ return x >> 16;
+}
+
+static unsigned clamp(SkFixed fx, int max) {
+ return SkClampMax(fx >> 16, max);
+}
+static unsigned repeat(SkFixed fx, int max) {
+ SkASSERT(max < 65535);
+ return SK_USHIFT16((unsigned)(fx & 0xFFFF) * (max + 1));
+}
+static unsigned mirror(SkFixed fx, int max) {
+ SkASSERT(max < 65535);
+ // s is 0xFFFFFFFF if we're on an odd interval, or 0 if an even interval
+ SkFixed s = SkLeftShift(fx, 15) >> 31;
+
+ // This should be exactly the same as repeat(fx ^ s, max) from here on.
+ return SK_USHIFT16( ((fx ^ s) & 0xFFFF) * (max + 1) );
+}
+
+// Mirror/Mirror's always just portable code.
+static const SkBitmapProcState::MatrixProc MirrorX_MirrorY_Procs[] = {
+ nofilter_scale<mirror, false>,
+ filter_scale<mirror, extract_low_bits_repeat_mirror, false>,
+};
+
+// Clamp/Clamp and Repeat/Repeat have NEON or portable implementations.
+#if defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ // TODO: this is a fine drop-in for decal_nofilter_scale() generally.
+ static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ if (count >= 8) {
+ // SkFixed is 16.16 fixed point
+ SkFixed dx8 = dx * 8;
+ int32x4_t vdx8 = vdupq_n_s32(dx8);
+
+ // setup lbase and hbase
+ int32x4_t lbase, hbase;
+ lbase = vdupq_n_s32(fx);
+ lbase = vsetq_lane_s32(fx + dx, lbase, 1);
+ lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2);
+ lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3);
+ hbase = lbase + vdupq_n_s32(4 * dx);
+
+ do {
+ // store the upper 16 bits
+ vst1q_u32(dst, vreinterpretq_u32_s16(
+ vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1]
+ ));
+
+ // on to the next group of 8
+ lbase += vdx8;
+ hbase += vdx8;
+ dst += 4; // we did 8 elements but the result is twice smaller
+ count -= 8;
+ fx += dx8;
+ } while (count >= 8);
+ }
+
+ uint16_t* xx = (uint16_t*)dst;
+ for (int i = count; i > 0; --i) {
+ *xx++ = SkToU16(fx >> 16); fx += dx;
+ }
+ }
+
+ static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ if (count >= 8) {
+ SkFixed dx8 = dx * 8;
+ int32x4_t vdx8 = vdupq_n_s32(dx8);
+
+ int32x4_t wide_fx, wide_fx2;
+ wide_fx = vdupq_n_s32(fx);
+ wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1);
+ wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2);
+ wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3);
+
+ wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
+
+ while (count >= 8) {
+ int32x4_t wide_out;
+ int32x4_t wide_out2;
+
+ wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14);
+ wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1));
+
+ wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14);
+ wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1));
+
+ vst1q_u32(dst, vreinterpretq_u32_s32(wide_out));
+ vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2));
+
+ dst += 8;
+ fx += dx8;
+ wide_fx += vdx8;
+ wide_fx2 += vdx8;
+ count -= 8;
+ }
+ }
+
+ if (count & 1)
+ {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+ while ((count -= 2) >= 0)
+ {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+ }
+
+ static inline int16x8_t clamp8(int32x4_t low, int32x4_t high, unsigned max) {
+ int16x8_t res;
+
+ // get the hi 16s of all those 32s
+ res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1];
+
+ // clamp
+ res = vmaxq_s16(res, vdupq_n_s16(0));
+ res = vminq_s16(res, vdupq_n_s16(max));
+
+ return res;
+ }
+
+ static inline int32x4_t clamp4(int32x4_t f, unsigned max) {
+ int32x4_t res;
+
+ // get the hi 16s of all those 32s
+ res = vshrq_n_s32(f, 16);
+
+ // clamp
+ res = vmaxq_s32(res, vdupq_n_s32(0));
+ res = vminq_s32(res, vdupq_n_s32(max));
+
+ return res;
+ }
+
+ static inline int32x4_t extract_low_bits_clamp4(int32x4_t fx, unsigned) {
+ int32x4_t ret;
+
+ ret = vshrq_n_s32(fx, 12);
+
+ /* We don't need the mask below because the caller will
+ * overwrite the non-masked bits
+ */
+ //ret = vandq_s32(ret, vdupq_n_s32(0xF));
+
+ return ret;
+ }
+
+ static inline int16x8_t repeat8(int32x4_t low, int32x4_t high, unsigned max) {
+ uint16x8_t res;
+ uint32x4_t tmpl, tmph;
+
+ // get the lower 16 bits
+ res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0];
+
+ // bare multiplication, not SkFixedMul
+ tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1));
+ tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1));
+
+ // extraction of the 16 upper bits
+ res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1];
+
+ return vreinterpretq_s16_u16(res);
+ }
+
+ static inline int32x4_t repeat4(int32x4_t f, unsigned max) {
+ uint16x4_t res;
+ uint32x4_t tmp;
+
+ // get the lower 16 bits
+ res = vmovn_u32(vreinterpretq_u32_s32(f));
+
+ // bare multiplication, not SkFixedMul
+ tmp = vmull_u16(res, vdup_n_u16(max+1));
+
+ // extraction of the 16 upper bits
+ tmp = vshrq_n_u32(tmp, 16);
+
+ return vreinterpretq_s32_u32(tmp);
+ }
+
+ static inline int32x4_t extract_low_bits_repeat_mirror4(int32x4_t fx, unsigned max) {
+ uint16x4_t res;
+ uint32x4_t tmp;
+ int32x4_t ret;
+
+ // get the lower 16 bits
+ res = vmovn_u32(vreinterpretq_u32_s32(fx));
+
+ // bare multiplication, not SkFixedMul
+ tmp = vmull_u16(res, vdup_n_u16(max + 1));
+
+ // shift and mask
+ ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12);
+
+ /* We don't need the mask below because the caller will
+ * overwrite the non-masked bits
+ */
+ //ret = vandq_s32(ret, vdupq_n_s32(0xF));
+
+ return ret;
+ }
+
+ template <unsigned (*tile)(SkFixed, int),
+ int16x8_t (*tile8)(int32x4_t, int32x4_t, unsigned),
+ bool tryDecal>
+ static void nofilter_scale_neon(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ // we store y, x, x, x, x, x
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ *xy++ = tile(mapper.fixedY(), maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ if (0 == maxX) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ // test if we don't need to apply the tile proc
+ const SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ const SkFixed fixedDx = SkFractionalIntToFixed(dx);
+ if (tryDecal && can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
+ decal_nofilter_scale_neon(xy, fixedFx, fixedDx, count);
+ return;
+ }
+
+ if (count >= 8) {
+ SkFractionalInt dx2 = dx+dx;
+ SkFractionalInt dx4 = dx2+dx2;
+ SkFractionalInt dx8 = dx4+dx4;
+
+ // now build fx/fx+dx/fx+2dx/fx+3dx
+ SkFractionalInt fx1, fx2, fx3;
+ int32x4_t lbase, hbase;
+ int16_t *dst16 = (int16_t *)xy;
+
+ fx1 = fx+dx;
+ fx2 = fx1+dx;
+ fx3 = fx2+dx;
+
+ lbase = vdupq_n_s32(SkFractionalIntToFixed(fx));
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx1), lbase, 1);
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx2), lbase, 2);
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx3), lbase, 3);
+ hbase = vaddq_s32(lbase, vdupq_n_s32(SkFractionalIntToFixed(dx4)));
+
+ // store & bump
+ while (count >= 8) {
+
+ int16x8_t fx8;
+
+ fx8 = tile8(lbase, hbase, maxX);
+
+ vst1q_s16(dst16, fx8);
+
+ // but preserving base & on to the next
+ lbase = vaddq_s32 (lbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ hbase = vaddq_s32 (hbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ dst16 += 8;
+ count -= 8;
+ fx += dx8;
+ }
+ xy = (uint32_t *) dst16;
+ }
+
+ uint16_t* xx = (uint16_t*)xy;
+ for (int i = count; i > 0; --i) {
+ *xx++ = tile(SkFractionalIntToFixed(fx), maxX);
+ fx += dx;
+ }
+ }
+
+ template <unsigned (*tile )(SkFixed, int),
+ int32x4_t (*tile4)(int32x4_t, unsigned),
+ unsigned (*extract_low_bits )(SkFixed, int),
+ int32x4_t (*extract_low_bits4)(int32x4_t, unsigned),
+ bool tryDecal>
+ static void filter_scale_neon(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+
+ auto pack = [&](SkFixed f, unsigned max, SkFixed one) {
+ unsigned i = tile(f, max);
+ i = (i << 4) | extract_low_bits(f, max);
+ return (i << 14) | (tile((f + one), max));
+ };
+
+ auto pack4 = [&](int32x4_t f, unsigned max, SkFixed one) {
+ int32x4_t ret, res;
+
+ res = tile4(f, max);
+
+ ret = extract_low_bits4(f, max);
+ ret = vsliq_n_s32(ret, res, 4);
+
+ res = tile4(f + vdupq_n_s32(one), max);
+ ret = vorrq_s32(vshlq_n_s32(ret, 14), res);
+
+ return ret;
+ };
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFixed one = s.fFilterOneX;
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt fx;
+
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = pack(fy, maxY, s.fFilterOneY);
+ // now initialize fx
+ fx = mapper.fractionalIntX();
+ }
+
+ // test if we don't need to apply the tile proc
+ const SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ const SkFixed fixedDx = SkFractionalIntToFixed(dx);
+ if (tryDecal && can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
+ decal_filter_scale_neon(xy, fixedFx, fixedDx, count);
+ return;
+ }
+
+ if (count >= 4) {
+ int32x4_t wide_fx;
+
+ wide_fx = vdupq_n_s32(SkFractionalIntToFixed(fx));
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx), wide_fx, 1);
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx), wide_fx, 2);
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx+dx), wide_fx, 3);
+
+ while (count >= 4) {
+ int32x4_t res;
+
+ res = pack4(wide_fx, maxX, one);
+
+ vst1q_u32(xy, vreinterpretq_u32_s32(res));
+
+ wide_fx += vdupq_n_s32(SkFractionalIntToFixed(dx+dx+dx+dx));
+ fx += dx+dx+dx+dx;
+ xy += 4;
+ count -= 4;
+ }
+ }
+
+ while (--count >= 0) {
+ *xy++ = pack(SkFractionalIntToFixed(fx), maxX, one);
+ fx += dx;
+ }
+ }
+
+ static const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
+ nofilter_scale_neon<clamp, clamp8, true>,
+ filter_scale_neon<clamp,
+ clamp4,
+ extract_low_bits_clamp,
+ extract_low_bits_clamp4,
+ true>,
+ };
+
+ static const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
+ nofilter_scale_neon<repeat, repeat8, false>,
+ filter_scale_neon<repeat,
+ repeat4,
+ extract_low_bits_repeat_mirror,
+ extract_low_bits_repeat_mirror4,
+ false>,
+ };
+
+#else
+ static const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
+ nofilter_scale<clamp, true>,
+ filter_scale<clamp, extract_low_bits_clamp, true>,
+ };
+
+ static const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
+ nofilter_scale<repeat, false>,
+ filter_scale<repeat, extract_low_bits_repeat_mirror, false>,
+ };
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// This next chunk has some specializations for unfiltered translate-only matrices.
+
+static inline U16CPU int_clamp(int x, int n) {
+ if (x < 0) { x = 0; }
+ if (x >= n) { x = n - 1; }
+ return x;
+}
+
+/* returns 0...(n-1) given any x (positive or negative).
+
+ As an example, if n (which is always positive) is 5...
+
+ x: -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8
+ returns: 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3
+ */
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+static inline U16CPU int_repeat(int x, int n) {
+ return sk_int_mod(x, n);
+}
+
+static inline U16CPU int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+static void fill_sequential(uint16_t xptr[], int pos, int count) {
+ while (count --> 0) {
+ *xptr++ = pos++;
+ }
+}
+
+static void fill_backwards(uint16_t xptr[], int pos, int count) {
+ while (count --> 0) {
+ SkASSERT(pos >= 0);
+ *xptr++ = pos--;
+ }
+}
+
+static void clampx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = int_clamp(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int n;
+
+ // fill before 0 as needed
+ if (xpos < 0) {
+ n = -xpos;
+ if (n > count) {
+ n = count;
+ }
+ memset(xptr, 0, n * sizeof(uint16_t));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ xpos = 0;
+ }
+
+ // fill in 0..width-1 if needed
+ if (xpos < width) {
+ n = width - xpos;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, xpos, n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ }
+
+ // fill the remaining with the max value
+ sk_memset16(xptr, width - 1, count);
+}
+
+static void repeatx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = int_repeat(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int start = sk_int_mod(xpos, width);
+ int n = width - start;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, start, n);
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ fill_sequential(xptr, 0, width);
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ fill_sequential(xptr, 0, count);
+ }
+}
+
+static void mirrorx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = int_mirror(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ // need to know our start, and our initial phase (forward or backward)
+ bool forward;
+ int n;
+ int start = sk_int_mod(xpos, 2 * width);
+ if (start >= width) {
+ start = width + ~(start - width);
+ forward = false;
+ n = start + 1; // [start .. 0]
+ } else {
+ forward = true;
+ n = width - start; // [start .. width)
+ }
+ if (n > count) {
+ n = count;
+ }
+ if (forward) {
+ fill_sequential(xptr, start, n);
+ } else {
+ fill_backwards(xptr, start, n);
+ }
+ forward = !forward;
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ if (forward) {
+ fill_sequential(xptr, 0, width);
+ } else {
+ fill_backwards(xptr, width - 1, width);
+ }
+ forward = !forward;
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ if (forward) {
+ fill_sequential(xptr, 0, count);
+ } else {
+ fill_backwards(xptr, width - 1, count);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// The main entry point to the file, choosing between everything above.
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::chooseMatrixProc(bool translate_only_matrix) {
+ SkASSERT(fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ SkASSERT(fTileModeX == fTileModeY);
+ SkASSERT(fTileModeX != SkTileMode::kDecal);
+
+ // Check for our special case translate methods when there is no scale/affine/perspective.
+ if (translate_only_matrix && kNone_SkFilterQuality == fFilterQuality) {
+ switch (fTileModeX) {
+ default: SkASSERT(false);
+ case SkTileMode::kClamp: return clampx_nofilter_trans;
+ case SkTileMode::kRepeat: return repeatx_nofilter_trans;
+ case SkTileMode::kMirror: return mirrorx_nofilter_trans;
+ }
+ }
+
+ // The arrays are all [ nofilter, filter ].
+ int index = fFilterQuality > kNone_SkFilterQuality ? 1 : 0;
+
+ if (fTileModeX == SkTileMode::kClamp) {
+ // clamp gets special version of filterOne, working in non-normalized space (allowing decal)
+ fFilterOneX = SK_Fixed1;
+ fFilterOneY = SK_Fixed1;
+ return ClampX_ClampY_Procs[index];
+ }
+
+ // all remaining procs use this form for filterOne, putting them into normalized space.
+ fFilterOneX = SK_Fixed1 / fPixmap.width();
+ fFilterOneY = SK_Fixed1 / fPixmap.height();
+
+ if (fTileModeX == SkTileMode::kRepeat) {
+ return RepeatX_RepeatY_Procs[index];
+ }
+
+ return MirrorX_MirrorY_Procs[index];
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapScaler.cpp b/gfx/skia/skia/src/core/SkBitmapScaler.cpp
new file mode 100644
index 0000000000..c8f0340171
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapScaler.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBitmapScaler.h"
+#include "src/core/SkBitmapFilter.h"
+#include "src/core/SkConvolver.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkTArray.h"
+
+// SkResizeFilter ----------------------------------------------------------------
+
+// Encapsulates computation and storage of the filters required for one complete
+// resize operation.
+class SkResizeFilter {
+public:
+ SkResizeFilter(SkBitmapScaler::ResizeMethod method,
+ int srcFullWidth, int srcFullHeight,
+ float destWidth, float destHeight,
+ const SkRect& destSubset);
+ ~SkResizeFilter() { delete fBitmapFilter; }
+
+ // Returns the filled filter values.
+ const SkConvolutionFilter1D& xFilter() { return fXFilter; }
+ const SkConvolutionFilter1D& yFilter() { return fYFilter; }
+
+private:
+
+ SkBitmapFilter* fBitmapFilter;
+
+ // Computes one set of filters either horizontally or vertically. The caller
+ // will specify the "min" and "max" rather than the bottom/top and
+ // right/bottom so that the same code can be re-used in each dimension.
+ //
+ // |srcDependLo| and |srcDependSize| gives the range for the source
+ // depend rectangle (horizontally or vertically at the caller's discretion
+ // -- see above for what this means).
+ //
+ // Likewise, the range of destination values to compute and the scale factor
+ // for the transform is also specified.
+
+ void computeFilters(int srcSize,
+ float destSubsetLo, float destSubsetSize,
+ float scale,
+ SkConvolutionFilter1D* output);
+
+ SkConvolutionFilter1D fXFilter;
+ SkConvolutionFilter1D fYFilter;
+};
+
+SkResizeFilter::SkResizeFilter(SkBitmapScaler::ResizeMethod method,
+ int srcFullWidth, int srcFullHeight,
+ float destWidth, float destHeight,
+ const SkRect& destSubset) {
+
+ SkASSERT(method >= SkBitmapScaler::RESIZE_FirstMethod &&
+ method <= SkBitmapScaler::RESIZE_LastMethod);
+
+ fBitmapFilter = nullptr;
+ switch(method) {
+ case SkBitmapScaler::RESIZE_BOX:
+ fBitmapFilter = new SkBoxFilter;
+ break;
+ case SkBitmapScaler::RESIZE_TRIANGLE:
+ fBitmapFilter = new SkTriangleFilter;
+ break;
+ case SkBitmapScaler::RESIZE_MITCHELL:
+ fBitmapFilter = new SkMitchellFilter;
+ break;
+ case SkBitmapScaler::RESIZE_HAMMING:
+ fBitmapFilter = new SkHammingFilter;
+ break;
+ case SkBitmapScaler::RESIZE_LANCZOS3:
+ fBitmapFilter = new SkLanczosFilter;
+ break;
+ }
+
+
+ float scaleX = destWidth / srcFullWidth;
+ float scaleY = destHeight / srcFullHeight;
+
+ this->computeFilters(srcFullWidth, destSubset.fLeft, destSubset.width(),
+ scaleX, &fXFilter);
+ if (srcFullWidth == srcFullHeight &&
+ destSubset.fLeft == destSubset.fTop &&
+ destSubset.width() == destSubset.height()&&
+ scaleX == scaleY) {
+ fYFilter = fXFilter;
+ } else {
+ this->computeFilters(srcFullHeight, destSubset.fTop, destSubset.height(),
+ scaleY, &fYFilter);
+ }
+}
+
+// TODO(egouriou): Take advantage of periods in the convolution.
+// Practical resizing filters are periodic outside of the border area.
+// For Lanczos, a scaling by a (reduced) factor of p/q (q pixels in the
+// source become p pixels in the destination) will have a period of p.
+// A nice consequence is a period of 1 when downscaling by an integral
+// factor. Downscaling from typical display resolutions is also bound
+// to produce interesting periods as those are chosen to have multiple
+// small factors.
+// Small periods reduce computational load and improve cache usage if
+// the coefficients can be shared. For periods of 1 we can consider
+// loading the factors only once outside the borders.
+void SkResizeFilter::computeFilters(int srcSize,
+ float destSubsetLo, float destSubsetSize,
+ float scale,
+ SkConvolutionFilter1D* output) {
+ float destSubsetHi = destSubsetLo + destSubsetSize; // [lo, hi)
+
+ // When we're doing a magnification, the scale will be larger than one. This
+ // means the destination pixels are much smaller than the source pixels, and
+ // that the range covered by the filter won't necessarily cover any source
+ // pixel boundaries. Therefore, we use these clamped values (max of 1) for
+ // some computations.
+ float clampedScale = SkTMin(1.0f, scale);
+
+ // This is how many source pixels from the center we need to count
+ // to support the filtering function.
+ float srcSupport = fBitmapFilter->width() / clampedScale;
+
+ float invScale = 1.0f / scale;
+
+ SkSTArray<64, float, true> filterValuesArray;
+ SkSTArray<64, SkConvolutionFilter1D::ConvolutionFixed, true> fixedFilterValuesArray;
+
+ // Loop over all pixels in the output range. We will generate one set of
+ // filter values for each one. Those values will tell us how to blend the
+ // source pixels to compute the destination pixel.
+
+ // This is the pixel in the source directly under the pixel in the dest.
+ // Note that we base computations on the "center" of the pixels. To see
+ // why, observe that the destination pixel at coordinates (0, 0) in a 5.0x
+ // downscale should "cover" the pixels around the pixel with *its center*
+ // at coordinates (2.5, 2.5) in the source, not those around (0, 0).
+ // Hence we need to scale coordinates (0.5, 0.5), not (0, 0).
+ destSubsetLo = SkScalarFloorToScalar(destSubsetLo);
+ destSubsetHi = SkScalarCeilToScalar(destSubsetHi);
+ float srcPixel = (destSubsetLo + 0.5f) * invScale;
+ int destLimit = SkScalarTruncToInt(destSubsetHi - destSubsetLo);
+ output->reserveAdditional(destLimit, SkScalarCeilToInt(destLimit * srcSupport * 2));
+ for (int destI = 0; destI < destLimit; srcPixel += invScale, destI++) {
+ // Compute the (inclusive) range of source pixels the filter covers.
+ float srcBegin = SkTMax(0.f, SkScalarFloorToScalar(srcPixel - srcSupport));
+ float srcEnd = SkTMin(srcSize - 1.f, SkScalarCeilToScalar(srcPixel + srcSupport));
+
+ // Compute the unnormalized filter value at each location of the source
+ // it covers.
+
+ // Sum of the filter values for normalizing.
+ // Distance from the center of the filter, this is the filter coordinate
+ // in source space. We also need to consider the center of the pixel
+ // when comparing distance against 'srcPixel'. In the 5x downscale
+ // example used above the distance from the center of the filter to
+ // the pixel with coordinates (2, 2) should be 0, because its center
+ // is at (2.5, 2.5).
+ float destFilterDist = (srcBegin + 0.5f - srcPixel) * clampedScale;
+ int filterCount = SkScalarTruncToInt(srcEnd - srcBegin) + 1;
+ if (filterCount <= 0) {
+ // true when srcSize is equal to srcPixel - srcSupport; this may be a bug
+ return;
+ }
+ filterValuesArray.reset(filterCount);
+ float filterSum = fBitmapFilter->evaluate_n(destFilterDist, clampedScale, filterCount,
+ filterValuesArray.begin());
+
+ // The filter must be normalized so that we don't affect the brightness of
+ // the image. Convert to normalized fixed point.
+ int fixedSum = 0;
+ fixedFilterValuesArray.reset(filterCount);
+ const float* filterValues = filterValuesArray.begin();
+ SkConvolutionFilter1D::ConvolutionFixed* fixedFilterValues = fixedFilterValuesArray.begin();
+ float invFilterSum = 1 / filterSum;
+ for (int fixedI = 0; fixedI < filterCount; fixedI++) {
+ int curFixed = SkConvolutionFilter1D::FloatToFixed(filterValues[fixedI] * invFilterSum);
+ fixedSum += curFixed;
+ fixedFilterValues[fixedI] = SkToS16(curFixed);
+ }
+ SkASSERT(fixedSum <= 0x7FFF);
+
+ // The conversion to fixed point will leave some rounding errors, which
+ // we add back in to avoid affecting the brightness of the image. We
+ // arbitrarily add this to the center of the filter array (this won't always
+ // be the center of the filter function since it could get clipped on the
+ // edges, but it doesn't matter enough to worry about that case).
+ int leftovers = SkConvolutionFilter1D::FloatToFixed(1) - fixedSum;
+ fixedFilterValues[filterCount / 2] += leftovers;
+
+ // Now it's ready to go.
+ output->AddFilter(SkScalarFloorToInt(srcBegin), fixedFilterValues, filterCount);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool valid_for_resize(const SkPixmap& source, int dstW, int dstH) {
+ // TODO: Seems like we shouldn't care about the swizzle of source, just that it's 8888
+ return source.addr() && source.colorType() == kN32_SkColorType &&
+ source.width() >= 1 && source.height() >= 1 && dstW >= 1 && dstH >= 1;
+}
+
+bool SkBitmapScaler::Resize(const SkPixmap& result, const SkPixmap& source, ResizeMethod method) {
+ if (!valid_for_resize(source, result.width(), result.height())) {
+ return false;
+ }
+ if (!result.addr() || result.colorType() != source.colorType()) {
+ return false;
+ }
+
+ SkRect destSubset = SkRect::MakeIWH(result.width(), result.height());
+
+ SkResizeFilter filter(method, source.width(), source.height(),
+ result.width(), result.height(), destSubset);
+
+ // Get a subset encompassing this touched area. We construct the
+ // offsets and row strides such that it looks like a new bitmap, while
+ // referring to the old data.
+ const uint8_t* sourceSubset = reinterpret_cast<const uint8_t*>(source.addr());
+
+ return BGRAConvolve2D(sourceSubset, static_cast<int>(source.rowBytes()),
+ !source.isOpaque(), filter.xFilter(), filter.yFilter(),
+ static_cast<int>(result.rowBytes()),
+ static_cast<unsigned char*>(result.writable_addr()));
+}
+
+bool SkBitmapScaler::Resize(SkBitmap* resultPtr, const SkPixmap& source, ResizeMethod method,
+ int destWidth, int destHeight, SkBitmap::Allocator* allocator) {
+ // Preflight some of the checks, to avoid allocating the result if we don't need it.
+ if (!valid_for_resize(source, destWidth, destHeight)) {
+ return false;
+ }
+
+ SkBitmap result;
+ // Note: pass along the profile information even thought this is no the right answer because
+ // this could be scaling in sRGB.
+ result.setInfo(SkImageInfo::MakeN32(destWidth, destHeight, source.alphaType(),
+ sk_ref_sp(source.info().colorSpace())));
+ result.allocPixels(allocator);
+
+ SkPixmap resultPM;
+ if (!result.peekPixels(&resultPM) || !Resize(resultPM, source, method)) {
+ return false;
+ }
+
+ *resultPtr = result;
+ SkASSERT(resultPtr->getPixels());
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapScaler.h b/gfx/skia/skia/src/core/SkBitmapScaler.h
new file mode 100644
index 0000000000..b2c3ab77d1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapScaler.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapScaler_DEFINED
+#define SkBitmapScaler_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "src/core/SkConvolver.h"
+
+/** \class SkBitmapScaler
+
+ Provides the interface for high quality image resampling.
+ */
+
+class SK_API SkBitmapScaler {
+public:
+ enum ResizeMethod {
+ RESIZE_BOX,
+ RESIZE_TRIANGLE,
+ RESIZE_LANCZOS3,
+ RESIZE_HAMMING,
+ RESIZE_MITCHELL,
+
+ RESIZE_FirstMethod = RESIZE_BOX,
+ RESIZE_LastMethod = RESIZE_MITCHELL,
+ };
+
+ /**
+ * Given already-allocated src and dst pixmaps, this will scale the src pixels using the
+ * specified resize-method and write the results into the pixels pointed to by dst.
+ */
+ static bool Resize(const SkPixmap& dst, const SkPixmap& src, ResizeMethod method);
+
+ /**
+ * Helper function that manages allocating a bitmap to hold the dst pixels, and then calls
+ * the pixmap version of Resize.
+ */
+ static bool Resize(SkBitmap* result, const SkPixmap& src, ResizeMethod method,
+ int dest_width, int dest_height, SkBitmap::Allocator* = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlendMode.cpp b/gfx/skia/skia/src/core/SkBlendMode.cpp
new file mode 100644
index 0000000000..b19481c182
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendMode.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkCoverageModePriv.h"
+#include "src/core/SkRasterPipeline.h"
+
+bool SkBlendMode_ShouldPreScaleCoverage(SkBlendMode mode, bool rgb_coverage) {
+ // The most important things we do here are:
+ // 1) never pre-scale with rgb coverage if the blend mode involves a source-alpha term;
+ // 2) always pre-scale Plus.
+ //
+ // When we pre-scale with rgb coverage, we scale each of source r,g,b, with a distinct value,
+ // and source alpha with one of those three values. This process destructively updates the
+ // source-alpha term, so we can't evaluate blend modes that need its original value.
+ //
+ // Plus always requires pre-scaling as a specific quirk of its implementation in
+ // SkRasterPipeline. This lets us put the clamp inside the blend mode itself rather
+ // than as a separate stage that'd come after the lerp.
+ //
+ // This function is a finer-grained breakdown of SkBlendMode_SupportsCoverageAsAlpha().
+ switch (mode) {
+ case SkBlendMode::kDst: // d --> no sa term, ok!
+ case SkBlendMode::kDstOver: // d + s*inv(da) --> no sa term, ok!
+ case SkBlendMode::kPlus: // clamp(s+d) --> no sa term, ok!
+ return true;
+
+ case SkBlendMode::kDstOut: // d * inv(sa)
+ case SkBlendMode::kSrcATop: // s*da + d*inv(sa)
+ case SkBlendMode::kSrcOver: // s + d*inv(sa)
+ case SkBlendMode::kXor: // s*inv(da) + d*inv(sa)
+ return !rgb_coverage;
+
+ default: break;
+ }
+ return false;
+}
+
+// Users of this function may want to switch to the rgb-coverage aware version above.
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode mode) {
+ return SkBlendMode_ShouldPreScaleCoverage(mode, false);
+}
+
+struct CoeffRec {
+ SkBlendModeCoeff fSrc;
+ SkBlendModeCoeff fDst;
+};
+
+const CoeffRec gCoeffs[] = {
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kZero },
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kZero },
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kOne },
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kISA },
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kOne },
+ { SkBlendModeCoeff::kDA, SkBlendModeCoeff::kZero },
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kSA },
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kZero },
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kISA },
+ { SkBlendModeCoeff::kDA, SkBlendModeCoeff::kISA },
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kSA },
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kISA },
+
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kOne },
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kSC },
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kISC }, // screen
+};
+
+bool SkBlendMode_AsCoeff(SkBlendMode mode, SkBlendModeCoeff* src, SkBlendModeCoeff* dst) {
+ if (mode > SkBlendMode::kScreen) {
+ return false;
+ }
+ if (src) {
+ *src = gCoeffs[static_cast<int>(mode)].fSrc;
+ }
+ if (dst) {
+ *dst = gCoeffs[static_cast<int>(mode)].fDst;
+ }
+ return true;
+}
+
+void SkBlendMode_AppendStages(SkBlendMode mode, SkRasterPipeline* p) {
+ auto stage = SkRasterPipeline::srcover;
+ switch (mode) {
+ case SkBlendMode::kClear: stage = SkRasterPipeline::clear; break;
+ case SkBlendMode::kSrc: return; // This stage is a no-op.
+ case SkBlendMode::kDst: stage = SkRasterPipeline::move_dst_src; break;
+ case SkBlendMode::kSrcOver: stage = SkRasterPipeline::srcover; break;
+ case SkBlendMode::kDstOver: stage = SkRasterPipeline::dstover; break;
+ case SkBlendMode::kSrcIn: stage = SkRasterPipeline::srcin; break;
+ case SkBlendMode::kDstIn: stage = SkRasterPipeline::dstin; break;
+ case SkBlendMode::kSrcOut: stage = SkRasterPipeline::srcout; break;
+ case SkBlendMode::kDstOut: stage = SkRasterPipeline::dstout; break;
+ case SkBlendMode::kSrcATop: stage = SkRasterPipeline::srcatop; break;
+ case SkBlendMode::kDstATop: stage = SkRasterPipeline::dstatop; break;
+ case SkBlendMode::kXor: stage = SkRasterPipeline::xor_; break;
+ case SkBlendMode::kPlus: stage = SkRasterPipeline::plus_; break;
+ case SkBlendMode::kModulate: stage = SkRasterPipeline::modulate; break;
+
+ case SkBlendMode::kScreen: stage = SkRasterPipeline::screen; break;
+ case SkBlendMode::kOverlay: stage = SkRasterPipeline::overlay; break;
+ case SkBlendMode::kDarken: stage = SkRasterPipeline::darken; break;
+ case SkBlendMode::kLighten: stage = SkRasterPipeline::lighten; break;
+ case SkBlendMode::kColorDodge: stage = SkRasterPipeline::colordodge; break;
+ case SkBlendMode::kColorBurn: stage = SkRasterPipeline::colorburn; break;
+ case SkBlendMode::kHardLight: stage = SkRasterPipeline::hardlight; break;
+ case SkBlendMode::kSoftLight: stage = SkRasterPipeline::softlight; break;
+ case SkBlendMode::kDifference: stage = SkRasterPipeline::difference; break;
+ case SkBlendMode::kExclusion: stage = SkRasterPipeline::exclusion; break;
+ case SkBlendMode::kMultiply: stage = SkRasterPipeline::multiply; break;
+
+ case SkBlendMode::kHue: stage = SkRasterPipeline::hue; break;
+ case SkBlendMode::kSaturation: stage = SkRasterPipeline::saturation; break;
+ case SkBlendMode::kColor: stage = SkRasterPipeline::color; break;
+ case SkBlendMode::kLuminosity: stage = SkRasterPipeline::luminosity; break;
+ }
+ p->append(stage);
+}
+
+SkPMColor4f SkBlendMode_Apply(SkBlendMode mode, const SkPMColor4f& src, const SkPMColor4f& dst) {
+ // special-case simple/common modes...
+ switch (mode) {
+ case SkBlendMode::kClear: return SK_PMColor4fTRANSPARENT;
+ case SkBlendMode::kSrc: return src;
+ case SkBlendMode::kDst: return dst;
+ case SkBlendMode::kSrcOver: {
+ Sk4f r = Sk4f::Load(src.vec()) + Sk4f::Load(dst.vec()) * Sk4f(1 - src.fA);
+ return { r[0], r[1], r[2], r[3] };
+ }
+ default:
+ break;
+ }
+
+ SkRasterPipeline_<256> p;
+ SkPMColor4f src_storage = src,
+ dst_storage = dst,
+ res_storage;
+ SkRasterPipeline_MemoryCtx src_ctx = { &src_storage, 0 },
+ dst_ctx = { &dst_storage, 0 },
+ res_ctx = { &res_storage, 0 };
+
+ p.append(SkRasterPipeline::load_f32, &dst_ctx);
+ p.append(SkRasterPipeline::move_src_dst);
+ p.append(SkRasterPipeline::load_f32, &src_ctx);
+ SkBlendMode_AppendStages(mode, &p);
+ p.append(SkRasterPipeline::store_f32, &res_ctx);
+ p.run(0,0, 1,1);
+ return res_storage;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+const SkBlendMode gUncorrelatedCoverageToBlend[] = {
+ SkBlendMode::kSrcOver, // or DstOver
+ SkBlendMode::kSrcIn, // or kDstIn
+ SkBlendMode::kSrcOut,
+ SkBlendMode::kDstOut,
+ SkBlendMode::kXor,
+};
+
+SkBlendMode SkUncorrelatedCoverageModeToBlendMode(SkCoverageMode cm) {
+ unsigned index = static_cast<unsigned>(cm);
+ SkASSERT(index < SK_ARRAY_COUNT(gUncorrelatedCoverageToBlend));
+ return gUncorrelatedCoverageToBlend[index];
+}
diff --git a/gfx/skia/skia/src/core/SkBlendModePriv.h b/gfx/skia/skia/src/core/SkBlendModePriv.h
new file mode 100644
index 0000000000..c5e688adb6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendModePriv.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendModePriv_DEFINED
+#define SkBlendModePriv_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/private/SkColorData.h"
+
+class SkRasterPipeline;
+
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode);
+
+static inline bool SkBlendMode_CaresAboutRBOrder(SkBlendMode mode) {
+ return (mode > SkBlendMode::kLastSeparableMode);
+}
+
+bool SkBlendMode_ShouldPreScaleCoverage(SkBlendMode, bool rgb_coverage);
+void SkBlendMode_AppendStages(SkBlendMode, SkRasterPipeline*);
+
+enum class SkBlendModeCoeff {
+ kZero, /** 0 */
+ kOne, /** 1 */
+ kSC, /** src color */
+ kISC, /** inverse src color (i.e. 1 - sc) */
+ kDC, /** dst color */
+ kIDC, /** inverse dst color (i.e. 1 - dc) */
+ kSA, /** src alpha */
+ kISA, /** inverse src alpha (i.e. 1 - sa) */
+ kDA, /** dst alpha */
+ kIDA, /** inverse dst alpha (i.e. 1 - da) */
+
+ kCoeffCount
+};
+
+bool SkBlendMode_AsCoeff(SkBlendMode mode, SkBlendModeCoeff* src, SkBlendModeCoeff* dst);
+
+SkPMColor4f SkBlendMode_Apply(SkBlendMode, const SkPMColor4f& src, const SkPMColor4f& dst);
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrXferProcessor.h"
+const GrXPFactory* SkBlendMode_AsXPFactory(SkBlendMode);
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
new file mode 100644
index 0000000000..991e699b9d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkBitmap.h"
+#include "src/core/SkMask.h"
+
+#ifndef ClearLow3Bits_DEFINED
+#define ClearLow3Bits_DEFINED
+ #define ClearLow3Bits(x) ((unsigned)(x) >> 3 << 3)
+#endif
+
+/*
+ SK_BLITBWMASK_NAME name of function(const SkBitmap& bitmap, const SkMask& mask, const SkIRect& clip, SK_BLITBWMASK_ARGS)
+ SK_BLITBWMASK_ARGS list of additional arguments to SK_BLITBWMASK_NAME, beginning with a comma
+ SK_BLITBWMASK_BLIT8 name of function(U8CPU byteMask, SK_BLITBWMASK_DEVTYPE* dst, int x, int y)
+ SK_BLITBWMASK_GETADDR either writable_addr[8,16,32]
+ SK_BLITBWMASK_DEVTYPE either U32 or U16 or U8
+*/
+
+static void SK_BLITBWMASK_NAME(const SkPixmap& dst, const SkMask& srcMask,
+ const SkIRect& clip SK_BLITBWMASK_ARGS) {
+ SkASSERT(clip.fRight <= srcMask.fBounds.fRight);
+
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = srcMask.fBounds.fLeft;
+ unsigned mask_rowBytes = srcMask.fRowBytes;
+ size_t bitmap_rowBytes = dst.rowBytes();
+ unsigned height = clip.height();
+
+ SkASSERT(mask_rowBytes != 0);
+ SkASSERT(bitmap_rowBytes != 0);
+ SkASSERT(height != 0);
+
+ const uint8_t* bits = srcMask.getAddr1(cx, cy);
+ SK_BLITBWMASK_DEVTYPE* device = dst.SK_BLITBWMASK_GETADDR(cx, cy);
+
+ if (cx == maskLeft && clip.fRight == srcMask.fBounds.fRight)
+ {
+ do {
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ unsigned rb = mask_rowBytes;
+ do {
+ U8CPU mask = *bits++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ } while (--rb != 0);
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ int left_edge = cx - maskLeft;
+ SkASSERT(left_edge >= 0);
+ int rite_edge = clip.fRight - maskLeft;
+ SkASSERT(rite_edge > left_edge);
+
+ int left_mask = 0xFF >> (left_edge & 7);
+ int rite_mask = 0xFF << (8 - (rite_edge & 7));
+ rite_mask &= 0xFF; // only want low-8 bits of mask
+ int full_runs = (rite_edge >> 3) - ((left_edge + 7) >> 3);
+
+ // check for empty right mask, so we don't read off the end (or go slower than we need to)
+ if (rite_mask == 0)
+ {
+ SkASSERT(full_runs >= 0);
+ full_runs -= 1;
+ rite_mask = 0xFF;
+ }
+ if (left_mask == 0xFF)
+ full_runs -= 1;
+
+ // back up manually so we can keep in sync with our byte-aligned src
+ // and not trigger an assert from the getAddr## function
+ device -= left_edge & 7;
+
+ if (full_runs < 0)
+ {
+ left_mask &= rite_mask;
+ SkASSERT(left_mask != 0);
+ do {
+ U8CPU mask = *bits & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, device);
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ do {
+ int runs = full_runs;
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ const uint8_t* b = bits;
+ U8CPU mask;
+
+ mask = *b++ & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+
+ while (--runs >= 0)
+ {
+ mask = *b++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ }
+
+ mask = *b & rite_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ }
+}
+
+#undef SK_BLITBWMASK_NAME
+#undef SK_BLITBWMASK_ARGS
+#undef SK_BLITBWMASK_BLIT8
+#undef SK_BLITBWMASK_GETADDR
+#undef SK_BLITBWMASK_DEVTYPE
+#undef SK_BLITBWMASK_DOROWSETUP
diff --git a/gfx/skia/skia/src/core/SkBlitRow.h b/gfx/skia/skia/src/core/SkBlitRow.h
new file mode 100644
index 0000000000..cc4ba86407
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_DEFINED
+#define SkBlitRow_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+
+class SkBlitRow {
+public:
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+ typedef void (*Proc32)(uint32_t dst[], const SkPMColor src[], int count, U8CPU alpha);
+
+ static Proc32 Factory32(unsigned flags32);
+
+ /** Blend a single color onto a row of S32 pixels, writing the result
+ into a row of D32 pixels. src and dst may be the same memory, but
+ if they are not, they may not overlap.
+ */
+ static void Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitRow_D32.cpp b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
new file mode 100644
index 0000000000..82b3cca2cb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkUtils.h"
+
+// Everyone agrees memcpy() is the best way to do this.
+static void blit_row_s32_opaque(SkPMColor* dst,
+ const SkPMColor* src,
+ int count,
+ U8CPU alpha) {
+ SkASSERT(255 == alpha);
+ memcpy(dst, src, count * sizeof(SkPMColor));
+}
+
+// We have SSE2, NEON, and portable implementations of
+// blit_row_s32_blend() and blit_row_s32a_blend().
+
+// TODO(mtklein): can we do better in NEON than 2 pixels at a time?
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+
+ static inline __m128i SkPMLerp_SSE2(const __m128i& src,
+ const __m128i& dst,
+ const unsigned src_scale) {
+ // Computes dst + (((src - dst)*src_scale)>>8)
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Compute scaled differences.
+ __m128i diff_rb = _mm_sub_epi16(src_rb, dst_rb);
+ __m128i diff_ag = _mm_sub_epi16(src_ag, dst_ag);
+ __m128i s = _mm_set1_epi16(src_scale);
+ diff_rb = _mm_mullo_epi16(diff_rb, s);
+ diff_ag = _mm_mullo_epi16(diff_ag, s);
+
+ // Pack the differences back together.
+ diff_rb = _mm_srli_epi16(diff_rb, 8);
+ diff_ag = _mm_andnot_si128(mask, diff_ag);
+ __m128i diff = _mm_or_si128(diff_rb, diff_ag);
+
+ // Add difference to destination.
+ return _mm_add_epi8(dst, diff);
+ }
+
+
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ auto src4 = (const __m128i*)src;
+ auto dst4 = ( __m128i*)dst;
+
+ while (count >= 4) {
+ _mm_storeu_si128(dst4, SkPMLerp_SSE2(_mm_loadu_si128(src4),
+ _mm_loadu_si128(dst4),
+ SkAlpha255To256(alpha)));
+ src4++;
+ dst4++;
+ count -= 4;
+ }
+
+ src = (const SkPMColor*)src4;
+ dst = ( SkPMColor*)dst4;
+
+ while (count --> 0) {
+ *dst = SkPMLerp(*src, *dst, SkAlpha255To256(alpha));
+ src++;
+ dst++;
+ }
+ }
+
+ static inline __m128i SkBlendARGB32_SSE2(const __m128i& src,
+ const __m128i& dst,
+ const unsigned aa) {
+ unsigned alpha = SkAlpha255To256(aa);
+ __m128i src_scale = _mm_set1_epi16(alpha);
+ // SkAlphaMulInv256(SkGetPackedA32(src), src_scale)
+ __m128i dst_scale = _mm_srli_epi32(src, 24);
+ // High words in dst_scale are 0, so it's safe to multiply with 16-bit src_scale.
+ dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
+ dst_scale = _mm_sub_epi32(_mm_set1_epi32(0xFFFF), dst_scale);
+ dst_scale = _mm_add_epi32(dst_scale, _mm_srli_epi32(dst_scale, 8));
+ dst_scale = _mm_srli_epi32(dst_scale, 8);
+ // Duplicate scales into 2x16-bit pattern per pixel.
+ dst_scale = _mm_shufflelo_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+ dst_scale = _mm_shufflehi_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source/destination into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Scale them.
+ src_rb = _mm_mullo_epi16(src_rb, src_scale);
+ src_ag = _mm_mullo_epi16(src_ag, src_scale);
+ dst_rb = _mm_mullo_epi16(dst_rb, dst_scale);
+ dst_ag = _mm_mullo_epi16(dst_ag, dst_scale);
+
+ // Add the scaled source and destination.
+ dst_rb = _mm_add_epi16(src_rb, dst_rb);
+ dst_ag = _mm_add_epi16(src_ag, dst_ag);
+
+ // Unsplay the halves back together.
+ dst_rb = _mm_srli_epi16(dst_rb, 8);
+ dst_ag = _mm_andnot_si128(mask, dst_ag);
+ return _mm_or_si128(dst_rb, dst_ag);
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ auto src4 = (const __m128i*)src;
+ auto dst4 = ( __m128i*)dst;
+
+ while (count >= 4) {
+ _mm_storeu_si128(dst4, SkBlendARGB32_SSE2(_mm_loadu_si128(src4),
+ _mm_loadu_si128(dst4),
+ alpha));
+ src4++;
+ dst4++;
+ count -= 4;
+ }
+
+ src = (const SkPMColor*)src4;
+ dst = ( SkPMColor*)dst4;
+
+ while (count --> 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ uint16_t src_scale = SkAlpha255To256(alpha);
+ uint16_t dst_scale = 256 - src_scale;
+
+ while (count >= 2) {
+ uint8x8_t vsrc, vdst, vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+
+ if (count == 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ }
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha < 255);
+
+ unsigned alpha256 = SkAlpha255To256(alpha);
+
+ if (count & 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vdst_wide, vsrc_wide;
+ unsigned dst_scale;
+
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ dst_scale = vget_lane_u8(vsrc, 3);
+ dst_scale = SkAlphaMulInv256(dst_scale, alpha256);
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_n_u16(vsrc_wide, alpha256);
+
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide = vmulq_n_u16(vdst_wide, dst_scale);
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ dst++;
+ src++;
+ count--;
+ }
+
+ uint8x8_t alpha_mask;
+ static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
+ alpha_mask = vld1_u8(alpha_mask_setup);
+
+ while (count) {
+
+ uint8x8_t vsrc, vdst, vres, vsrc_alphas;
+ uint16x8_t vdst_wide, vsrc_wide, vsrc_scale, vdst_scale;
+
+ __builtin_prefetch(src+32);
+ __builtin_prefetch(dst+32);
+
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ vsrc_scale = vdupq_n_u16(alpha256);
+
+ vsrc_alphas = vtbl1_u8(vsrc, alpha_mask);
+ vdst_scale = vmovl_u8(vsrc_alphas);
+ // Calculate SkAlphaMulInv256(vdst_scale, vsrc_scale).
+ // A 16-bit lane would overflow if we used 0xFFFF here,
+ // so use an approximation with 0xFF00 that is off by 1,
+ // and add back 1 after to get the correct value.
+ // This is valid if alpha256 <= 255.
+ vdst_scale = vmlsq_u16(vdupq_n_u16(0xFF00), vdst_scale, vsrc_scale);
+ vdst_scale = vsraq_n_u16(vdst_scale, vdst_scale, 8);
+ vdst_scale = vsraq_n_u16(vdupq_n_u16(1), vdst_scale, 8);
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide *= vsrc_scale;
+
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide *= vdst_scale;
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+ }
+
+#else
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ while (count --> 0) {
+ *dst = SkPMLerp(*src, *dst, SkAlpha255To256(alpha));
+ src++;
+ dst++;
+ }
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ while (count --> 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ }
+ }
+#endif
+
+SkBlitRow::Proc32 SkBlitRow::Factory32(unsigned flags) {
+ static const SkBlitRow::Proc32 kProcs[] = {
+ blit_row_s32_opaque,
+ blit_row_s32_blend,
+ nullptr, // blit_row_s32a_opaque is in SkOpts
+ blit_row_s32a_blend
+ };
+
+ SkASSERT(flags < SK_ARRAY_COUNT(kProcs));
+ flags &= SK_ARRAY_COUNT(kProcs) - 1; // just to be safe
+
+ return flags == 2 ? SkOpts::blit_row_s32a_opaque
+ : kProcs[flags];
+}
+
+void SkBlitRow::Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color) {
+ switch (SkGetPackedA32(color)) {
+ case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
+ case 255: sk_memset32(dst, color, count); return;
+ }
+ return SkOpts::blit_row_color32(dst, src, count, color);
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter.cpp b/gfx/skia/skia/src/core/SkBlitter.cpp
new file mode 100644
index 0000000000..c8cf2bb8a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.cpp
@@ -0,0 +1,884 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlitter.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRegionPriv.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodeInterpretation.h"
+#include "src/shaders/SkShaderBase.h"
+
+SkBlitter::~SkBlitter() {}
+
+bool SkBlitter::isNullBlitter() const { return false; }
+
+const SkPixmap* SkBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+/*
+void SkBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("unimplemented");
+}
+
+
+void SkBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkDEBUGFAIL("unimplemented");
+}
+ */
+
+inline static SkAlpha ScalarToAlpha(SkScalar a) {
+ SkAlpha alpha = (SkAlpha)(a * 255);
+ return alpha > 247 ? 0xFF : alpha < 8 ? 0 : alpha;
+}
+
+void SkBlitter::blitFatAntiRect(const SkRect& rect) {
+ SkIRect bounds = rect.roundOut();
+ SkASSERT(bounds.width() >= 3);
+
+ // skbug.com/7813
+ // To ensure consistency of the threaded backend (a rect that's considered fat in the init-once
+ // phase must also be considered fat in the draw phase), we have to deal with rects with small
+ // heights because the horizontal tiling in the threaded backend may change the height.
+ //
+ // This also implies that we cannot do vertical tiling unless we can blit any rect (not just the
+ // fat one.)
+ if (bounds.height() == 0) {
+ return;
+ }
+
+ int runSize = bounds.width() + 1; // +1 so we can set runs[bounds.width()] = 0
+ void* storage = this->allocBlitMemory(runSize * (sizeof(int16_t) + sizeof(SkAlpha)));
+ int16_t* runs = reinterpret_cast<int16_t*>(storage);
+ SkAlpha* alphas = reinterpret_cast<SkAlpha*>(runs + runSize);
+
+ runs[0] = 1;
+ runs[1] = bounds.width() - 2;
+ runs[bounds.width() - 1] = 1;
+ runs[bounds.width()] = 0;
+
+ SkScalar partialL = bounds.fLeft + 1 - rect.fLeft;
+ SkScalar partialR = rect.fRight - (bounds.fRight - 1);
+ SkScalar partialT = bounds.fTop + 1 - rect.fTop;
+ SkScalar partialB = rect.fBottom - (bounds.fBottom - 1);
+
+ if (bounds.height() == 1) {
+ partialT = rect.fBottom - rect.fTop;
+ }
+
+ alphas[0] = ScalarToAlpha(partialL * partialT);
+ alphas[1] = ScalarToAlpha(partialT);
+ alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialT);
+ this->blitAntiH(bounds.fLeft, bounds.fTop, alphas, runs);
+
+ if (bounds.height() > 2) {
+ this->blitAntiRect(bounds.fLeft, bounds.fTop + 1, bounds.width() - 2, bounds.height() - 2,
+ ScalarToAlpha(partialL), ScalarToAlpha(partialR));
+ }
+
+ if (bounds.height() > 1) {
+ alphas[0] = ScalarToAlpha(partialL * partialB);
+ alphas[1] = ScalarToAlpha(partialB);
+ alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialB);
+ this->blitAntiH(bounds.fLeft, bounds.fBottom - 1, alphas, runs);
+ }
+}
+
+void SkBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 255) {
+ this->blitRect(x, y, 1, height);
+ } else {
+ int16_t runs[2];
+ runs[0] = 1;
+ runs[1] = 0;
+
+ while (--height >= 0) {
+ this->blitAntiH(x, y++, &alpha, runs);
+ }
+ }
+}
+
+void SkBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+/// Default implementation doesn't check for easy optimizations
+/// such as alpha == 255; also uses blitV(), which some subclasses
+/// may not support.
+void SkBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ if (leftAlpha > 0) { // we may send in x = -1 with leftAlpha = 0
+ this->blitV(x, y, height, leftAlpha);
+ }
+ x++;
+ if (width > 0) {
+ this->blitRect(x, y, width, height);
+ x += width;
+ }
+ if (rightAlpha > 0) {
+ this->blitV(x, y, height, rightAlpha);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static inline void bits_to_runs(SkBlitter* blitter, int x, int y,
+ const uint8_t bits[],
+ uint8_t left_mask, ptrdiff_t rowBytes,
+ uint8_t right_mask) {
+ int inFill = 0;
+ int pos = 0;
+
+ while (--rowBytes >= 0) {
+ uint8_t b = *bits++ & left_mask;
+ if (rowBytes == 0) {
+ b &= right_mask;
+ }
+
+ for (uint8_t test = 0x80U; test != 0; test >>= 1) {
+ if (b & test) {
+ if (!inFill) {
+ pos = x;
+ inFill = true;
+ }
+ } else {
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ inFill = false;
+ }
+ }
+ x += 1;
+ }
+ left_mask = 0xFFU;
+ }
+
+ // final cleanup
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ }
+}
+
+// maskBitCount is the number of 1's to place in the mask. It must be in the range between 1 and 8.
+static uint8_t generate_right_mask(int maskBitCount) {
+ return static_cast<uint8_t>((0xFF00U >> maskBitCount) & 0xFF);
+}
+
+void SkBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (mask.fFormat == SkMask::kLCD16_Format) {
+ return; // needs to be handled by subclass
+ }
+
+ if (mask.fFormat == SkMask::kBW_Format) {
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = mask.fBounds.fLeft;
+ int maskRowBytes = mask.fRowBytes;
+ int height = clip.height();
+
+ const uint8_t* bits = mask.getAddr1(cx, cy);
+
+ SkDEBUGCODE(const uint8_t* endOfImage =
+ mask.fImage + (mask.fBounds.height() - 1) * maskRowBytes
+ + ((mask.fBounds.width() + 7) >> 3));
+
+ if (cx == maskLeft && clip.fRight == mask.fBounds.fRight) {
+ while (--height >= 0) {
+ int affectedRightBit = mask.fBounds.width() - 1;
+ ptrdiff_t rowBytes = (affectedRightBit >> 3) + 1;
+ SkASSERT(bits + rowBytes <= endOfImage);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+ bits_to_runs(this, cx, cy, bits, 0xFF, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ } else {
+ // Bits is calculated as the offset into the mask at the point {cx, cy} therefore, all
+ // addressing into the bit mask is relative to that point. Since this is an address
+ // calculated from a arbitrary bit in that byte, calculate the left most bit.
+ int bitsLeft = cx - ((cx - maskLeft) & 7);
+
+ // Everything is relative to the bitsLeft.
+ int leftEdge = cx - bitsLeft;
+ SkASSERT(leftEdge >= 0);
+ int rightEdge = clip.fRight - bitsLeft;
+ SkASSERT(rightEdge > leftEdge);
+
+ // Calculate left byte and mask
+ const uint8_t* leftByte = bits;
+ U8CPU leftMask = 0xFFU >> (leftEdge & 7);
+
+ // Calculate right byte and mask
+ int affectedRightBit = rightEdge - 1;
+ const uint8_t* rightByte = bits + (affectedRightBit >> 3);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+
+ // leftByte and rightByte are byte locations therefore, to get a count of bytes the
+ // code must add one.
+ ptrdiff_t rowBytes = rightByte - leftByte + 1;
+
+ while (--height >= 0) {
+ SkASSERT(bits + rowBytes <= endOfImage);
+ bits_to_runs(this, bitsLeft, cy, bits, leftMask, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ }
+ } else {
+ int width = clip.width();
+ SkAutoSTMalloc<64, int16_t> runStorage(width + 1);
+ int16_t* runs = runStorage.get();
+ const uint8_t* aa = mask.getAddr8(clip.fLeft, clip.fTop);
+
+ sk_memset16((uint16_t*)runs, 1, width);
+ runs[width] = 0;
+
+ int height = clip.height();
+ int y = clip.fTop;
+ while (--height >= 0) {
+ this->blitAntiH(clip.fLeft, y, aa, runs);
+ aa += mask.fRowBytes;
+ y += 1;
+ }
+ }
+}
+
+/////////////////////// these guys are not virtual, just a helpers
+
+void SkBlitter::blitMaskRegion(const SkMask& mask, const SkRegion& clip) {
+ if (clip.quickReject(mask.fBounds)) {
+ return;
+ }
+
+ SkRegion::Cliperator clipper(clip, mask.fBounds);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitMask(mask, cr);
+ clipper.next();
+ }
+}
+
+void SkBlitter::blitRectRegion(const SkIRect& rect, const SkRegion& clip) {
+ SkRegion::Cliperator clipper(clip, rect);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitRect(cr.fLeft, cr.fTop, cr.width(), cr.height());
+ clipper.next();
+ }
+}
+
+void SkBlitter::blitRegion(const SkRegion& clip) {
+ SkRegionPriv::VisitSpans(clip, [this](const SkIRect& r) {
+ this->blitRect(r.left(), r.top(), r.width(), r.height());
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkNullBlitter::blitH(int x, int y, int width) {}
+
+void SkNullBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {}
+
+void SkNullBlitter::blitV(int x, int y, int height, SkAlpha alpha) {}
+
+void SkNullBlitter::blitRect(int x, int y, int width, int height) {}
+
+void SkNullBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {}
+
+const SkPixmap* SkNullBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+bool SkNullBlitter::isNullBlitter() const { return true; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int compute_anti_width(const int16_t runs[]) {
+ int width = 0;
+
+ for (;;) {
+ int count = runs[0];
+
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ break;
+ }
+ width += count;
+ runs += count;
+ }
+ return width;
+}
+
+static inline bool y_in_rect(int y, const SkIRect& rect) {
+ return (unsigned)(y - rect.fTop) < (unsigned)rect.height();
+}
+
+static inline bool x_in_rect(int x, const SkIRect& rect) {
+ return (unsigned)(x - rect.fLeft) < (unsigned)rect.width();
+}
+
+void SkRectClipBlitter::blitH(int left, int y, int width) {
+ SkASSERT(width > 0);
+
+ if (!y_in_rect(y, fClipRect)) {
+ return;
+ }
+
+ int right = left + width;
+
+ if (left < fClipRect.fLeft) {
+ left = fClipRect.fLeft;
+ }
+ if (right > fClipRect.fRight) {
+ right = fClipRect.fRight;
+ }
+
+ width = right - left;
+ if (width > 0) {
+ fBlitter->blitH(left, y, width);
+ }
+}
+
+void SkRectClipBlitter::blitAntiH(int left, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ if (!y_in_rect(y, fClipRect) || left >= fClipRect.fRight) {
+ return;
+ }
+
+ int x0 = left;
+ int x1 = left + compute_anti_width(runs);
+
+ if (x1 <= fClipRect.fLeft) {
+ return;
+ }
+
+ SkASSERT(x0 < x1);
+ if (x0 < fClipRect.fLeft) {
+ int dx = fClipRect.fLeft - x0;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, dx);
+ runs += dx;
+ aa += dx;
+ x0 = fClipRect.fLeft;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ if (x1 > fClipRect.fRight) {
+ x1 = fClipRect.fRight;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, x1 - x0);
+ ((int16_t*)runs)[x1 - x0] = 0;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ SkASSERT(compute_anti_width(runs) == x1 - x0);
+
+ fBlitter->blitAntiH(x0, y, aa, runs);
+}
+
+void SkRectClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(height > 0);
+
+ if (!x_in_rect(x, fClipRect)) {
+ return;
+ }
+
+ int y0 = y;
+ int y1 = y + height;
+
+ if (y0 < fClipRect.fTop) {
+ y0 = fClipRect.fTop;
+ }
+ if (y1 > fClipRect.fBottom) {
+ y1 = fClipRect.fBottom;
+ }
+
+ if (y0 < y1) {
+ fBlitter->blitV(x, y0, y1 - y0, alpha);
+ }
+}
+
+void SkRectClipBlitter::blitRect(int left, int y, int width, int height) {
+ SkIRect r;
+
+ r.setLTRB(left, y, left + width, y + height);
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ }
+}
+
+void SkRectClipBlitter::blitAntiRect(int left, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ SkIRect r;
+
+ // The *true* width of the rectangle blitted is width+2:
+ r.setLTRB(left, y, left + width + 2, y + height);
+ if (r.intersect(fClipRect)) {
+ if (r.fLeft != left) {
+ SkASSERT(r.fLeft > left);
+ leftAlpha = 255;
+ }
+ if (r.fRight != left + width + 2) {
+ SkASSERT(r.fRight < left + width + 2);
+ rightAlpha = 255;
+ }
+ if (255 == leftAlpha && 255 == rightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == left) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), leftAlpha);
+ } else {
+ SkASSERT(r.fLeft == left + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), rightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ leftAlpha, rightAlpha);
+ }
+ }
+}
+
+void SkRectClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkIRect r = clip;
+
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitMask(mask, r);
+ }
+}
+
+const SkPixmap* SkRectClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRgnClipBlitter::blitH(int x, int y, int width) {
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+
+ while (span.next(&left, &right)) {
+ SkASSERT(left < right);
+ fBlitter->blitH(left, y, right - left);
+ }
+}
+
+void SkRgnClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ int width = compute_anti_width(runs);
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+ SkDEBUGCODE(const SkIRect& bounds = fRgn->getBounds();)
+
+ int prevRite = x;
+ while (span.next(&left, &right)) {
+ SkASSERT(x <= left);
+ SkASSERT(left < right);
+ SkASSERT(left >= bounds.fLeft && right <= bounds.fRight);
+
+ SkAlphaRuns::Break((int16_t*)runs, (uint8_t*)aa, left - x, right - left);
+
+ // now zero before left
+ if (left > prevRite) {
+ int index = prevRite - x;
+ ((uint8_t*)aa)[index] = 0; // skip runs after right
+ ((int16_t*)runs)[index] = SkToS16(left - prevRite);
+ }
+
+ prevRite = right;
+ }
+
+ if (prevRite > x) {
+ ((int16_t*)runs)[prevRite - x] = 0;
+
+ if (x < 0) {
+ int skip = runs[0];
+ SkASSERT(skip >= -x);
+ aa += skip;
+ runs += skip;
+ x += skip;
+ }
+ fBlitter->blitAntiH(x, y, aa, runs);
+ }
+}
+
+void SkRgnClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkIRect bounds;
+ bounds.setXYWH(x, y, 1, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitV(x, r.fTop, r.height(), alpha);
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitRect(int x, int y, int width, int height) {
+ SkIRect bounds;
+ bounds.setXYWH(x, y, width, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ // The *true* width of the rectangle to blit is width + 2
+ SkIRect bounds;
+ bounds.setXYWH(x, y, width + 2, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+ SkASSERT(r.fLeft >= x);
+ SkASSERT(r.fRight <= x + width + 2);
+
+ SkAlpha effectiveLeftAlpha = (r.fLeft == x) ? leftAlpha : 255;
+ SkAlpha effectiveRightAlpha = (r.fRight == x + width + 2) ?
+ rightAlpha : 255;
+
+ if (255 == effectiveLeftAlpha && 255 == effectiveRightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == x) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveLeftAlpha);
+ } else {
+ SkASSERT(r.fLeft == x + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveRightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ effectiveLeftAlpha, effectiveRightAlpha);
+ }
+ iter.next();
+ }
+}
+
+
+void SkRgnClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkRegion::Cliperator iter(*fRgn, clip);
+ const SkIRect& r = iter.rect();
+ SkBlitter* blitter = fBlitter;
+
+ while (!iter.done()) {
+ blitter->blitMask(mask, r);
+ iter.next();
+ }
+}
+
+const SkPixmap* SkRgnClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlitter* SkBlitterClipper::apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* ir) {
+ if (clip) {
+ const SkIRect& clipR = clip->getBounds();
+
+ if (clip->isEmpty() || (ir && !SkIRect::Intersects(clipR, *ir))) {
+ blitter = &fNullBlitter;
+ } else if (clip->isRect()) {
+ if (ir == nullptr || !clipR.contains(*ir)) {
+ fRectBlitter.init(blitter, clipR);
+ blitter = &fRectBlitter;
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ return blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkCoreBlitters.h"
+
+// hack for testing, not to be exposed to clients
+bool gSkForceRasterPipelineBlitter;
+
+bool SkBlitter::UseRasterPipelineBlitter(const SkPixmap& device, const SkPaint& paint,
+ const SkMatrix& matrix) {
+ if (gSkForceRasterPipelineBlitter) {
+ return true;
+ }
+#if 0 || defined(SK_FORCE_RASTER_PIPELINE_BLITTER)
+ return true;
+#else
+
+ const SkMaskFilterBase* mf = as_MFB(paint.getMaskFilter());
+
+ // The legacy blitters cannot handle any of these complex features (anymore).
+ if (device.alphaType() == kUnpremul_SkAlphaType ||
+ matrix.hasPerspective() ||
+ paint.getColorFilter() ||
+ paint.getBlendMode() > SkBlendMode::kLastCoeffMode ||
+ paint.getFilterQuality() == kHigh_SkFilterQuality ||
+ (mf && mf->getFormat() == SkMask::k3D_Format)) {
+ return true;
+ }
+
+ // All the real legacy fast paths are for shaders and SrcOver.
+ // Choosing SkRasterPipelineBlitter will also let us to hit its single-color memset path.
+ if (!paint.getShader() && paint.getBlendMode() != SkBlendMode::kSrcOver) {
+ return true;
+ }
+
+ auto cs = device.colorSpace();
+ // We check (indirectly via makeContext()) later on if the shader can handle the colorspace
+ // in legacy mode, so here we just focus on if a single color needs raster-pipeline.
+ if (cs && !paint.getShader()) {
+ if (!paint.getColor4f().fitsInBytes() || !cs->isSRGB()) {
+ return true;
+ }
+ }
+
+ // Only kN32 and 565 are handled by legacy blitters now, 565 mostly just for Android.
+ return device.colorType() != kN32_SkColorType
+ && device.colorType() != kRGB_565_SkColorType;
+#endif
+}
+
+SkBlitter* SkBlitter::Choose(const SkPixmap& device,
+ const SkMatrix& matrix,
+ const SkPaint& origPaint,
+ SkArenaAlloc* alloc,
+ bool drawCoverage) {
+ SkASSERT(alloc);
+
+ if (kUnknown_SkColorType == device.colorType()) {
+ return alloc->make<SkNullBlitter>();
+ }
+
+ // We may tweak the original paint as we go.
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ // We have the most fast-paths for SrcOver, so see if we can act like SrcOver.
+ if (paint->getBlendMode() != SkBlendMode::kSrcOver) {
+ switch (SkInterpretXfermode(*paint, SkColorTypeIsAlwaysOpaque(device.colorType()))) {
+ case kSrcOver_SkXfermodeInterpretation:
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ break;
+ case kSkipDrawing_SkXfermodeInterpretation:
+ return alloc->make<SkNullBlitter>();
+ default:
+ break;
+ }
+ }
+
+ // A Clear blend mode will ignore the entire color pipeline, as if Src mode with 0x00000000.
+ if (paint->getBlendMode() == SkBlendMode::kClear) {
+ SkPaint* p = paint.writable();
+ p->setShader(nullptr);
+ p->setColorFilter(nullptr);
+ p->setBlendMode(SkBlendMode::kSrc);
+ p->setColor(0x00000000);
+ }
+
+#ifndef SK_SUPPORT_LEGACY_COLORFILTER_NO_SHADER
+ if (paint->getColorFilter() && !paint->getShader()) {
+ // apply the filter to the paint's color, and then remove the filter
+ auto dstCS = device.colorSpace();
+ SkPaint* p = paint.writable();
+ p->setColor(p->getColorFilter()->filterColor4f(p->getColor4f(), sk_srgb_singleton(), dstCS),
+ dstCS);
+ p->setColorFilter(nullptr);
+ }
+#endif
+
+ if (drawCoverage) {
+ if (device.colorType() == kAlpha_8_SkColorType) {
+ SkASSERT(!paint->getShader());
+ SkASSERT(paint->isSrcOver());
+ return alloc->make<SkA8_Coverage_Blitter>(device, *paint);
+ }
+ return alloc->make<SkNullBlitter>();
+ }
+
+ if (paint->isDither() && !SkPaintPriv::ShouldDither(*paint, device.colorType())) {
+ paint.writable()->setDither(false);
+ }
+
+#if defined(SK_USE_SKVM_BLITTER)
+ if (auto blitter = SkCreateSkVMBlitter(device, *paint, matrix, alloc)) {
+ return blitter;
+ }
+#endif
+
+ // We'll end here for many interesting cases: color spaces, color filters, most color types.
+ if (UseRasterPipelineBlitter(device, *paint, matrix)) {
+ auto blitter = SkCreateRasterPipelineBlitter(device, *paint, matrix, alloc);
+ SkASSERT(blitter);
+ return blitter;
+ }
+
+ // Everything but legacy kN32_SkColorType and kRGB_565_SkColorType should already be handled.
+ SkASSERT(device.colorType() == kN32_SkColorType ||
+ device.colorType() == kRGB_565_SkColorType);
+
+ // And we should either have a shader, be blending with SrcOver, or both.
+ SkASSERT(paint->getShader() || paint->getBlendMode() == SkBlendMode::kSrcOver);
+
+ // Legacy blitters keep their shader state on a shader context.
+ SkShaderBase::Context* shaderContext = nullptr;
+ if (paint->getShader()) {
+ shaderContext = as_SB(paint->getShader())->makeContext(
+ {*paint, matrix, nullptr, device.colorType(), device.colorSpace()},
+ alloc);
+
+ // Creating the context isn't always possible... we'll just fall back to raster pipeline.
+ if (!shaderContext) {
+ auto blitter = SkCreateRasterPipelineBlitter(device, *paint, matrix, alloc);
+ SkASSERT(blitter);
+ return blitter;
+ }
+ }
+
+ switch (device.colorType()) {
+ case kN32_SkColorType:
+ if (shaderContext) {
+ return alloc->make<SkARGB32_Shader_Blitter>(device, *paint, shaderContext);
+ } else if (paint->getColor() == SK_ColorBLACK) {
+ return alloc->make<SkARGB32_Black_Blitter>(device, *paint);
+ } else if (paint->getAlpha() == 0xFF) {
+ return alloc->make<SkARGB32_Opaque_Blitter>(device, *paint);
+ } else {
+ return alloc->make<SkARGB32_Blitter>(device, *paint);
+ }
+
+ case kRGB_565_SkColorType:
+ if (shaderContext && SkRGB565_Shader_Blitter::Supports(device, *paint)) {
+ return alloc->make<SkRGB565_Shader_Blitter>(device, *paint, shaderContext);
+ } else {
+ return SkCreateRasterPipelineBlitter(device, *paint, matrix, alloc);
+ }
+
+ default:
+ SkASSERT(false);
+ return alloc->make<SkNullBlitter>();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkShaderBlitter::SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext)
+ : INHERITED(device)
+ , fShader(paint.getShader())
+ , fShaderContext(shaderContext) {
+ SkASSERT(fShader);
+ SkASSERT(fShaderContext);
+
+ fShader->ref();
+ fShaderFlags = fShaderContext->getFlags();
+ fConstInY = SkToBool(fShaderFlags & SkShaderBase::kConstInY32_Flag);
+}
+
+SkShaderBlitter::~SkShaderBlitter() {
+ fShader->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkRectClipCheckBlitter::blitH(int x, int y, int width) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, 1)));
+ fBlitter->blitH(x, y, width);
+}
+
+void SkRectClipCheckBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const int16_t runs[]) {
+ const int16_t* iter = runs;
+ for (; *iter; iter += *iter)
+ ;
+ int width = iter - runs;
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, 1)));
+ fBlitter->blitAntiH(x, y, aa, runs);
+}
+
+void SkRectClipCheckBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 1, height)));
+ fBlitter->blitV(x, y, height, alpha);
+}
+
+void SkRectClipCheckBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, height)));
+ fBlitter->blitRect(x, y, width, height);
+}
+
+void SkRectClipCheckBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ bool skipLeft = !leftAlpha;
+ bool skipRight = !rightAlpha;
+#ifdef SK_DEBUG
+ SkIRect r = SkIRect::MakeXYWH(x + skipLeft, y, width + 2 - skipRight - skipLeft, height);
+ SkASSERT(r.isEmpty() || fClipRect.contains(r));
+#endif
+ fBlitter->blitAntiRect(x, y, width, height, leftAlpha, rightAlpha);
+}
+
+void SkRectClipCheckBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+ SkASSERT(fClipRect.contains(clip));
+ fBlitter->blitMask(mask, clip);
+}
+
+const SkPixmap* SkRectClipCheckBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+void SkRectClipCheckBlitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 2, 1)));
+ fBlitter->blitAntiH2(x, y, a0, a1);
+}
+
+void SkRectClipCheckBlitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 1, 2)));
+ fBlitter->blitAntiV2(x, y, a0, a1);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitter.h b/gfx/skia/skia/src/core/SkBlitter.h
new file mode 100644
index 0000000000..ef37a1fca8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitter_DEFINED
+#define SkBlitter_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkImagePriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkArenaAlloc;
+class SkMatrix;
+class SkPaint;
+class SkPixmap;
+struct SkMask;
+
+/** SkBlitter and its subclasses are responsible for actually writing pixels
+ into memory. Besides efficiency, they handle clipping and antialiasing.
+ A SkBlitter subclass contains all the context needed to generate pixels
+ for the destination and how src/generated pixels map to the destination.
+ The coordinates passed to the blitX calls are in destination pixel space.
+*/
+class SkBlitter {
+public:
+ virtual ~SkBlitter();
+
+ /// Blit a horizontal run of one or more pixels.
+ virtual void blitH(int x, int y, int width) = 0;
+
+ /// Blit a horizontal run of antialiased pixels; runs[] is a *sparse*
+ /// zero-terminated run-length encoding of spans of constant alpha values.
+ /// The runs[] and antialias[] work together to represent long runs of pixels with the same
+ /// alphas. The runs[] contains the number of pixels with the same alpha, and antialias[]
+ /// contain the coverage value for that number of pixels. The runs[] (and antialias[]) are
+ /// encoded in a clever way. The runs array is zero terminated, and has enough entries for
+ /// each pixel plus one, in most cases some of the entries will not contain valid data. An entry
+ /// in the runs array contains the number of pixels (np) that have the same alpha value. The
+ /// next np value is found np entries away. For example, if runs[0] = 7, then the next valid
+ /// entry will by at runs[7]. The runs array and antialias[] are coupled by index. So, if the
+ /// np entry is at runs[45] = 12 then the alpha value can be found at antialias[45] = 0x88.
+ /// This would mean to use an alpha value of 0x88 for the next 12 pixels starting at pixel 45.
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) = 0;
+
+ /// Blit a vertical run of pixels with a constant alpha value.
+ virtual void blitV(int x, int y, int height, SkAlpha alpha);
+
+ /// Blit a solid rectangle one or more pixels wide.
+ virtual void blitRect(int x, int y, int width, int height);
+
+ /** Blit a rectangle with one alpha-blended column on the left,
+ width (zero or more) opaque pixels, and one alpha-blended column
+ on the right.
+ The result will always be at least two pixels wide.
+ */
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha);
+
+ // Blit a rect in AA with size at least 3 x 3 (small rect has too many edge cases...)
+ void blitFatAntiRect(const SkRect& rect);
+
+ /// Blit a pattern of pixels defined by a rectangle-clipped mask;
+ /// typically used for text.
+ virtual void blitMask(const SkMask&, const SkIRect& clip);
+
+ /** If the blitter just sets a single value for each pixel, return the
+ bitmap it draws into, and assign value. If not, return nullptr and ignore
+ the value parameter.
+ */
+ virtual const SkPixmap* justAnOpaqueColor(uint32_t* value);
+
+ // (x, y), (x + 1, y)
+ virtual void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[3];
+ uint8_t aa[2];
+
+ runs[0] = 1;
+ runs[1] = 1;
+ runs[2] = 0;
+ aa[0] = SkToU8(a0);
+ aa[1] = SkToU8(a1);
+ this->blitAntiH(x, y, aa, runs);
+ }
+
+ // (x, y), (x, y + 1)
+ virtual void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[2];
+ uint8_t aa[1];
+
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a0);
+ this->blitAntiH(x, y, aa, runs);
+ // reset in case the clipping blitter modified runs
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a1);
+ this->blitAntiH(x, y + 1, aa, runs);
+ }
+
+ /**
+ * Special method just to identify the null blitter, which is returned
+ * from Choose() if the request cannot be fulfilled. Default impl
+ * returns false.
+ */
+ virtual bool isNullBlitter() const;
+
+ /**
+ * Special methods for blitters that can blit more than one row at a time.
+ * This function returns the number of rows that this blitter could optimally
+ * process at a time. It is still required to support blitting one scanline
+ * at a time.
+ */
+ virtual int requestRowsPreserved() const { return 1; }
+
+ /**
+ * This function allocates memory for the blitter that the blitter then owns.
+ * The memory can be used by the calling function at will, but it will be
+ * released when the blitter's destructor is called. This function returns
+ * nullptr if no persistent memory is needed by the blitter.
+ */
+ virtual void* allocBlitMemory(size_t sz) {
+ return fBlitMemory.reset(sz, SkAutoMalloc::kReuse_OnShrink);
+ }
+
+ ///@name non-virtual helpers
+ void blitMaskRegion(const SkMask& mask, const SkRegion& clip);
+ void blitRectRegion(const SkIRect& rect, const SkRegion& clip);
+ void blitRegion(const SkRegion& clip);
+ ///@}
+
+ /** @name Factories
+ Return the correct blitter to use given the specified context.
+ */
+ static SkBlitter* Choose(const SkPixmap& dst,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkArenaAlloc*,
+ bool drawCoverage = false);
+
+ static SkBlitter* ChooseSprite(const SkPixmap& dst,
+ const SkPaint&,
+ const SkPixmap& src,
+ int left, int top,
+ SkArenaAlloc*);
+ ///@}
+
+ static bool UseRasterPipelineBlitter(const SkPixmap&, const SkPaint&, const SkMatrix&);
+
+protected:
+ SkAutoMalloc fBlitMemory;
+};
+
+/** This blitter silently never draws anything.
+*/
+class SkNullBlitter : public SkBlitter {
+public:
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+ bool isNullBlitter() const override;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRect.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRectClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkIRect& clipRect) {
+ SkASSERT(!clipRect.isEmpty());
+ fBlitter = blitter;
+ fClipRect = clipRect;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ SkIRect fClipRect;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRgn.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRgnClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkRegion* clipRgn) {
+ SkASSERT(clipRgn && !clipRgn->isEmpty());
+ fBlitter = blitter;
+ fRgn = clipRgn;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ const SkRegion* fRgn;
+};
+
+#ifdef SK_DEBUG
+class SkRectClipCheckBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkIRect& clipRect) {
+ SkASSERT(blitter);
+ SkASSERT(!clipRect.isEmpty());
+ fBlitter = blitter;
+ fClipRect = clipRect;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ SkIRect fClipRect;
+};
+#endif
+
+/** Factory to set up the appropriate most-efficient wrapper blitter
+ to apply a clip. Returns a pointer to a member, so lifetime must
+ be managed carefully.
+*/
+class SkBlitterClipper {
+public:
+ SkBlitter* apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* bounds = nullptr);
+
+private:
+ SkNullBlitter fNullBlitter;
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+};
+
+#define SHARD(code) fA->code; fB->code;
+
+class SkPairBlitter : public SkBlitter {
+ SkBlitter* fA = nullptr;
+ SkBlitter* fB = nullptr;
+public:
+ SkPairBlitter(SkBlitter* a, SkBlitter* b) : fA(a), fB(b) {}
+
+ void blitH(int x, int y, int width) override { SHARD(blitH(x, y, width)) }
+ void blitAntiH(int x, int y, const SkAlpha alphas[], const int16_t runs[]) override {
+ SHARD(blitAntiH(x, y, alphas, runs))
+ }
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SHARD(blitV(x, y, height, alpha))
+ }
+ void blitRect(int x, int y, int width, int height) override {
+ SHARD(blitRect(x, y, width, height))
+ }
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override {
+ SHARD(blitAntiRect(x, y, width, height, leftAlpha, rightAlpha))
+ }
+ void blitMask(const SkMask& mask, const SkIRect& clip) override { SHARD(blitMask(mask, clip)) }
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override { return nullptr; }
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override { SHARD(blitAntiH2(x, y, a0, a1)) }
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override { SHARD(blitAntiV2(x, y, a0, a1)) }
+};
+#undef SHARD
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitter_A8.cpp b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
new file mode 100644
index 0000000000..7217c9e6b6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkXfermodePriv.h"
+
+SkA8_Coverage_Blitter::SkA8_Coverage_Blitter(const SkPixmap& device,
+ const SkPaint& paint) : SkRasterBlitter(device) {
+ SkASSERT(nullptr == paint.getShader());
+ SkASSERT(paint.isSrcOver());
+ SkASSERT(nullptr == paint.getColorFilter());
+}
+
+void SkA8_Coverage_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ SkDEBUGCODE(int totalCount = 0;)
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ return;
+ }
+ if (antialias[0]) {
+ memset(device, antialias[0], count);
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+
+ SkDEBUGCODE(totalCount += count;)
+ }
+ SkASSERT(fDevice.width() == totalCount);
+}
+
+void SkA8_Coverage_Blitter::blitH(int x, int y, int width) {
+ memset(fDevice.writable_addr8(x, y), 0xFF, width);
+}
+
+void SkA8_Coverage_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (0 == alpha) {
+ return;
+ }
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ *dst = alpha;
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitRect(int x, int y, int width, int height) {
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ memset(dst, 0xFF, width);
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const uint8_t* src = mask.getAddr8(x, y);
+ const size_t srcRB = mask.fRowBytes;
+ const size_t dstRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ dst += dstRB;
+ src += srcRB;
+ }
+}
+
+const SkPixmap* SkA8_Coverage_Blitter::justAnOpaqueColor(uint32_t*) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
new file mode 100644
index 0000000000..a67d2e250c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
@@ -0,0 +1,1420 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkVx.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkUtils.h"
+#include "src/core/SkXfermodePriv.h"
+
+static inline int upscale_31_to_32(int value) {
+ SkASSERT((unsigned)value <= 31);
+ return value + (value >> 4);
+}
+
+static inline int blend_32(int src, int dst, int scale) {
+ SkASSERT((unsigned)src <= 0xFF);
+ SkASSERT((unsigned)dst <= 0xFF);
+ SkASSERT((unsigned)scale <= 32);
+ return dst + ((src - dst) * scale >> 5);
+}
+
+static inline SkPMColor blend_lcd16(int srcA, int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ // srcA has been upscaled to 256 before passed into this function
+ maskR = maskR * srcA >> 8;
+ maskG = maskG * srcA >> 8;
+ maskB = maskB * srcA >> 8;
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ blend_32(srcR, dstR, maskR),
+ blend_32(srcG, dstG, maskG),
+ blend_32(srcB, dstB, maskB));
+}
+
+static inline SkPMColor blend_lcd16_opaque(int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask,
+ SkPMColor opaqueDst) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ if (0xFFFF == mask) {
+ return opaqueDst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ blend_32(srcR, dstR, maskR),
+ blend_32(srcG, dstG, maskG),
+ blend_32(srcB, dstB, maskB));
+}
+
+
+// TODO: rewrite at least the SSE code here. It's miserable.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+
+ // The following (left) shifts cause the top 5 bits of the mask components to
+ // line up with the corresponding components in an SkPMColor.
+ // Note that the mask's RGB16 order may differ from the SkPMColor order.
+ #define SK_R16x5_R32x5_SHIFT (SK_R32_SHIFT - SK_R16_SHIFT - SK_R16_BITS + 5)
+ #define SK_G16x5_G32x5_SHIFT (SK_G32_SHIFT - SK_G16_SHIFT - SK_G16_BITS + 5)
+ #define SK_B16x5_B32x5_SHIFT (SK_B32_SHIFT - SK_B16_SHIFT - SK_B16_BITS + 5)
+
+ #if SK_R16x5_R32x5_SHIFT == 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (x)
+ #elif SK_R16x5_R32x5_SHIFT > 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_slli_epi32(x, SK_R16x5_R32x5_SHIFT))
+ #else
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_srli_epi32(x, -SK_R16x5_R32x5_SHIFT))
+ #endif
+
+ #if SK_G16x5_G32x5_SHIFT == 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (x)
+ #elif SK_G16x5_G32x5_SHIFT > 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_slli_epi32(x, SK_G16x5_G32x5_SHIFT))
+ #else
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_srli_epi32(x, -SK_G16x5_G32x5_SHIFT))
+ #endif
+
+ #if SK_B16x5_B32x5_SHIFT == 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (x)
+ #elif SK_B16x5_B32x5_SHIFT > 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_slli_epi32(x, SK_B16x5_B32x5_SHIFT))
+ #else
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_srli_epi32(x, -SK_B16x5_B32x5_SHIFT))
+ #endif
+
+ static __m128i blend_lcd16_sse2(__m128i &src, __m128i &dst, __m128i &mask, __m128i &srcA) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // srcA = (srcA, 0, srcA, 0, srcA, 0, srcA, 0,
+ // srcA, 0, srcA, 0, srcA, 0, srcA, 0)
+ // mask stores 16-bit values (compressed three channels) interleaved with zeros.
+ // Lo and Hi denote the low and high bytes of a 16-bit value, respectively.
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Multiply each component of maskLo and maskHi by srcA
+ maskLo = _mm_mullo_epi16(maskLo, srcA);
+ maskHi = _mm_mullo_epi16(maskHi, srcA);
+
+ // Left shift mask components by 8 (divide by 256)
+ maskLo = _mm_srli_epi16(maskLo, 8);
+ maskHi = _mm_srli_epi16(maskHi, 8);
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary.
+ return _mm_packus_epi16(resultLo, resultHi);
+ }
+
+ static __m128i blend_lcd16_opaque_sse2(__m128i &src, __m128i &dst, __m128i &mask) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // mask stores 16-bit values (shown as high and low bytes) interleaved with
+ // zeros
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels and force opaque.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary. Set alpha components to 0xFF.
+ return _mm_or_si128(_mm_packus_epi16(resultLo, resultHi),
+ _mm_set1_epi32(SK_A32_MASK << SK_A32_SHIFT));
+ }
+
+ void blit_row_lcd16(SkPMColor dst[], const uint16_t mask[], SkColor src, int width, SkPMColor) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = blend_lcd16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Interleave with zeros to get two sets of four 16-bit values.
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ __m128i srcA_sse = _mm_set1_epi16(srcA);
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = blend_lcd16_sse2(src_sse, dst_sse, mask_sse, srcA_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = blend_lcd16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+ }
+
+ void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor opaqueDst) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = blend_lcd16_opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = blend_lcd16_opaque_sse2(src_sse, dst_sse, mask_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = blend_lcd16_opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ #define NEON_A (SK_A32_SHIFT / 8)
+ #define NEON_R (SK_R32_SHIFT / 8)
+ #define NEON_G (SK_G32_SHIFT / 8)
+ #define NEON_B (SK_B32_SHIFT / 8)
+
+ static inline uint8x8_t blend_32_neon(uint8x8_t src, uint8x8_t dst, uint16x8_t scale) {
+ int16x8_t src_wide, dst_wide;
+
+ src_wide = vreinterpretq_s16_u16(vmovl_u8(src));
+ dst_wide = vreinterpretq_s16_u16(vmovl_u8(dst));
+
+ src_wide = (src_wide - dst_wide) * vreinterpretq_s16_u16(scale);
+
+ dst_wide += vshrq_n_s16(src_wide, 5);
+
+ return vmovn_u16(vreinterpretq_u16_s16(dst_wide));
+ }
+
+ void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width,
+ SkPMColor opaqueDst) {
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ uint8x8_t vcolR = vdup_n_u8(colR);
+ uint8x8_t vcolG = vdup_n_u8(colG);
+ uint8x8_t vcolB = vdup_n_u8(colB);
+ uint8x8_t vopqDstA = vdup_n_u8(SkGetPackedA32(opaqueDst));
+ uint8x8_t vopqDstR = vdup_n_u8(SkGetPackedR32(opaqueDst));
+ uint8x8_t vopqDstG = vdup_n_u8(SkGetPackedG32(opaqueDst));
+ uint8x8_t vopqDstB = vdup_n_u8(SkGetPackedB32(opaqueDst));
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+ uint8x8_t vsel_trans, vsel_opq;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Prepare compare masks
+ vsel_trans = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0)));
+ vsel_opq = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0xFFFF)));
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vdst.val[NEON_A] = vbsl_u8(vsel_trans, vdst.val[NEON_A], vdup_n_u8(0xFF));
+ vdst.val[NEON_A] = vbsl_u8(vsel_opq, vopqDstA, vdst.val[NEON_A]);
+
+ vdst.val[NEON_R] = blend_32_neon(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = blend_32_neon(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = blend_32_neon(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vdst.val[NEON_R] = vbsl_u8(vsel_opq, vopqDstR, vdst.val[NEON_R]);
+ vdst.val[NEON_G] = vbsl_u8(vsel_opq, vopqDstG, vdst.val[NEON_G]);
+ vdst.val[NEON_B] = vbsl_u8(vsel_opq, vopqDstB, vdst.val[NEON_B]);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ // Leftovers
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16_opaque(colR, colG, colB, dst[i], src[i], opaqueDst);
+ }
+ }
+
+ void blit_row_lcd16(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor) {
+ int colA = SkColorGetA(color);
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ colA = SkAlpha255To256(colA);
+
+ uint16x8_t vcolA = vdupq_n_u16(colA);
+ uint8x8_t vcolR = vdup_n_u8(colR);
+ uint8x8_t vcolG = vdup_n_u8(colG);
+ uint8x8_t vcolB = vdup_n_u8(colB);
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vmaskR = vshrq_n_u16(vmaskR * vcolA, 8);
+ vmaskG = vshrq_n_u16(vmaskG * vcolA, 8);
+ vmaskB = vshrq_n_u16(vmaskB * vcolA, 8);
+
+ vdst.val[NEON_A] = vdup_n_u8(0xFF);
+ vdst.val[NEON_R] = blend_32_neon(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = blend_32_neon(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = blend_32_neon(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16(colA, colR, colG, colB, dst[i], src[i]);
+ }
+ }
+
+#else
+
+ static inline void blit_row_lcd16(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor) {
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16(srcA, srcR, srcG, srcB, dst[i], mask[i]);
+ }
+ }
+
+ static inline void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width,
+ SkPMColor opaqueDst) {
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16_opaque(srcR, srcG, srcB, dst[i], mask[i], opaqueDst);
+ }
+ }
+
+#endif
+
+static bool blit_color(const SkPixmap& device,
+ const SkMask& mask,
+ const SkIRect& clip,
+ SkColor color) {
+ int x = clip.fLeft,
+ y = clip.fTop;
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kA8_Format) {
+ SkOpts::blit_mask_d32_a8(device.writable_addr32(x,y), device.rowBytes(),
+ (const SkAlpha*)mask.getAddr(x,y), mask.fRowBytes,
+ color, clip.width(), clip.height());
+ return true;
+ }
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kLCD16_Format) {
+ auto dstRow = device.writable_addr32(x,y);
+ auto maskRow = (const uint16_t*)mask.getAddr(x,y);
+
+ auto blit_row = blit_row_lcd16;
+ SkPMColor opaqueDst = 0; // ignored unless opaque
+
+ if (0xff == SkColorGetA(color)) {
+ blit_row = blit_row_lcd16_opaque;
+ opaqueDst = SkPreMultiplyColor(color);
+ }
+
+ for (int height = clip.height(); height --> 0; ) {
+ blit_row(dstRow, maskRow, color, clip.width(), opaqueDst);
+
+ dstRow = (SkPMColor*) (( char*) dstRow + device.rowBytes());
+ maskRow = (const uint16_t*)((const char*)maskRow + mask.fRowBytes);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void SkARGB32_Blit32(const SkPixmap& device, const SkMask& mask,
+ const SkIRect& clip, SkPMColor srcColor) {
+ U8CPU alpha = SkGetPackedA32(srcColor);
+ unsigned flags = SkBlitRow::kSrcPixelAlpha_Flag32;
+ if (alpha != 255) {
+ flags |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ SkBlitRow::Proc32 proc = SkBlitRow::Factory32(flags);
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ SkPMColor* dstRow = device.writable_addr32(x, y);
+ const SkPMColor* srcRow = reinterpret_cast<const SkPMColor*>(mask.getAddr8(x, y));
+
+ do {
+ proc(dstRow, srcRow, width, alpha);
+ dstRow = (SkPMColor*)((char*)dstRow + device.rowBytes());
+ srcRow = (const SkPMColor*)((const char*)srcRow + mask.fRowBytes);
+ } while (--height != 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkARGB32_Blitter::SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device) {
+ SkColor color = paint.getColor();
+ fColor = color;
+
+ fSrcA = SkColorGetA(color);
+ unsigned scale = SkAlpha255To256(fSrcA);
+ fSrcR = SkAlphaMul(SkColorGetR(color), scale);
+ fSrcG = SkAlphaMul(SkColorGetG(color), scale);
+ fSrcB = SkAlphaMul(SkColorGetB(color), scale);
+
+ fPMColor = SkPackARGB32(fSrcA, fSrcR, fSrcG, fSrcB);
+}
+
+const SkPixmap* SkARGB32_Blitter::justAnOpaqueColor(uint32_t* value) {
+ if (255 == fSrcA) {
+ *value = fPMColor;
+ return &fDevice;
+ }
+ return nullptr;
+}
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+void SkARGB32_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkBlitRow::Color32(device, device, width, fPMColor);
+}
+
+void SkARGB32_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t color = fPMColor;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ unsigned opaqueMask = fSrcA; // if fSrcA is 0xFF, then we will catch the fast opaque case
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if ((opaqueMask & aa) == 255) {
+ sk_memset32(device, color, count);
+ } else {
+ uint32_t sc = SkAlphaMulQ(color, SkAlpha255To256(aa));
+ SkBlitRow::Color32(device, device, count, sc);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device[1] = SkBlendARGB32(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkBlendARGB32(fPMColor, device[0], a1);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#define solid_8_pixels(mask, dst, color) \
+ do { \
+ if (mask & 0x80) dst[0] = color; \
+ if (mask & 0x40) dst[1] = color; \
+ if (mask & 0x20) dst[2] = color; \
+ if (mask & 0x10) dst[3] = color; \
+ if (mask & 0x08) dst[4] = color; \
+ if (mask & 0x04) dst[5] = color; \
+ if (mask & 0x02) dst[6] = color; \
+ if (mask & 0x01) dst[7] = color; \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlitBW
+#define SK_BLITBWMASK_ARGS , SkPMColor color
+#define SK_BLITBWMASK_BLIT8(mask, dst) solid_8_pixels(mask, dst, color)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "src/core/SkBlitBWMaskTemplate.h"
+
+#define blend_8_pixels(mask, dst, sc, dst_scale) \
+ do { \
+ if (mask & 0x80) { dst[0] = sc + SkAlphaMulQ(dst[0], dst_scale); } \
+ if (mask & 0x40) { dst[1] = sc + SkAlphaMulQ(dst[1], dst_scale); } \
+ if (mask & 0x20) { dst[2] = sc + SkAlphaMulQ(dst[2], dst_scale); } \
+ if (mask & 0x10) { dst[3] = sc + SkAlphaMulQ(dst[3], dst_scale); } \
+ if (mask & 0x08) { dst[4] = sc + SkAlphaMulQ(dst[4], dst_scale); } \
+ if (mask & 0x04) { dst[5] = sc + SkAlphaMulQ(dst[5], dst_scale); } \
+ if (mask & 0x02) { dst[6] = sc + SkAlphaMulQ(dst[6], dst_scale); } \
+ if (mask & 0x01) { dst[7] = sc + SkAlphaMulQ(dst[7], dst_scale); } \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlendBW
+#define SK_BLITBWMASK_ARGS , uint32_t sc, unsigned dst_scale
+#define SK_BLITBWMASK_BLIT8(mask, dst) blend_8_pixels(mask, dst, sc, dst_scale)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "src/core/SkBlitBWMaskTemplate.h"
+
+void SkARGB32_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+ SkASSERT(fSrcA != 0xFF);
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ if (blit_color(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlendBW(fDevice, mask, clip, fPMColor, SkAlpha255To256(255 - fSrcA));
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SK_ABORT("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (blit_color(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlitBW(fDevice, mask, clip, fPMColor);
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SK_ABORT("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device[1] = SkFastFourByteInterp(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 0 || fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+
+ if (alpha != 255) {
+ color = SkAlphaMulQ(color, SkAlpha255To256(alpha));
+ }
+
+ unsigned dst_scale = SkAlpha255To256(255 - SkGetPackedA32(color));
+ size_t rowBytes = fDevice.rowBytes();
+ while (--height >= 0) {
+ device[0] = color + SkAlphaMulQ(device[0], dst_scale);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+}
+
+void SkARGB32_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+ size_t rowBytes = fDevice.rowBytes();
+
+ if (SkGetPackedA32(fPMColor) == 0xFF) {
+ SkOpts::rect_memset32(device, color, width, rowBytes, height);
+ } else {
+ while (height --> 0) {
+ SkBlitRow::Color32(device, device, width, color);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+ }
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+///////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Black_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkPMColor black = (SkPMColor)(SK_A32_MASK << SK_A32_SHIFT);
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if (aa == 255) {
+ sk_memset32(device, black, count);
+ } else {
+ SkPMColor src = aa << SK_A32_SHIFT;
+ unsigned dst_scale = 256 - aa;
+ int n = count;
+ do {
+ --n;
+ device[n] = src + SkAlphaMulQ(device[n], dst_scale);
+ } while (n > 0);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Black_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device[1] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[1], 256 - a1);
+}
+
+void SkARGB32_Black_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Special version of SkBlitRow::Factory32 that knows we're in kSrc_Mode,
+// instead of kSrcOver_Mode
+static void blend_srcmode(SkPMColor* SK_RESTRICT device,
+ const SkPMColor* SK_RESTRICT span,
+ int count, U8CPU aa) {
+ int aa256 = SkAlpha255To256(aa);
+ for (int i = 0; i < count; ++i) {
+ device[i] = SkFourByteInterp256(span[i], device[i], aa256);
+ }
+}
+
+SkARGB32_Shader_Blitter::SkARGB32_Shader_Blitter(const SkPixmap& device,
+ const SkPaint& paint, SkShaderBase::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ fBuffer = (SkPMColor*)sk_malloc_throw(device.width() * (sizeof(SkPMColor)));
+
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+
+ int flags = 0;
+ if (!(shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag)) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+ // we call this on the output from the shader
+ fProc32 = SkBlitRow::Factory32(flags);
+ // we call this on the output from the shader + alpha from the aa buffer
+ fProc32Blend = SkBlitRow::Factory32(flags | SkBlitRow::kGlobalAlpha_Flag32);
+
+ fShadeDirectlyIntoDevice = false;
+ if (fXfermode == nullptr) {
+ if (shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag) {
+ fShadeDirectlyIntoDevice = true;
+ }
+ } else {
+ if (SkBlendMode::kSrc == paint.getBlendMode()) {
+ fShadeDirectlyIntoDevice = true;
+ fProc32Blend = blend_srcmode;
+ }
+ }
+
+ fConstInY = SkToBool(shaderContext->getFlags() & SkShaderBase::kConstInY32_Flag);
+}
+
+SkARGB32_Shader_Blitter::~SkARGB32_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkARGB32_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+
+ if (fShadeDirectlyIntoDevice) {
+ fShaderContext->shadeSpan(x, y, device, width);
+ } else {
+ SkPMColor* span = fBuffer;
+ fShaderContext->shadeSpan(x, y, span, width);
+ if (fXfermode) {
+ fXfermode->xfer32(device, span, width, nullptr);
+ } else {
+ fProc32(device, span, width, 255);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+ auto* shaderContext = fShaderContext;
+ SkPMColor* span = fBuffer;
+
+ if (fConstInY) {
+ if (fShadeDirectlyIntoDevice) {
+ // shade the first row directly into the device
+ shaderContext->shadeSpan(x, y, device, width);
+ span = device;
+ while (--height > 0) {
+ device = (uint32_t*)((char*)device + deviceRB);
+ memcpy(device, span, width << 2);
+ }
+ } else {
+ shaderContext->shadeSpan(x, y, span, width);
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ do {
+ shaderContext->shadeSpan(x, y, device, width);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkPMColor* span = fBuffer;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ auto* shaderContext = fShaderContext;
+
+ if (fXfermode && !fShadeDirectlyIntoDevice) {
+ for (;;) {
+ SkXfermode* xfer = fXfermode;
+
+ int count = *runs;
+ if (count <= 0)
+ break;
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ xfer->xfer32(device, span, count, nullptr);
+ } else {
+ // count is almost always 1
+ for (int i = count - 1; i >= 0; --i) {
+ xfer->xfer32(&device[i], &span[i], 1, antialias);
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else if (fShadeDirectlyIntoDevice ||
+ (shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag)) {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (aa == 255) {
+ // cool, have the shader draw right into the device
+ shaderContext->shadeSpan(x, y, device, count);
+ } else {
+ shaderContext->shadeSpan(x, y, span, count);
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ fProc32(device, span, count, 255);
+ } else {
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ }
+}
+
+using U32 = skvx::Vec< 4, uint32_t>;
+using U8x4 = skvx::Vec<16, uint8_t>;
+using U8 = skvx::Vec< 4, uint8_t>;
+
+static void drive(SkPMColor* dst, const SkPMColor* src, const uint8_t* cov, int n,
+ U8x4 (*kernel)(U8x4,U8x4,U8x4)) {
+
+ auto apply = [kernel](U32 dst, U32 src, U8 cov) -> U32 {
+ U8x4 cov_splat = skvx::shuffle<0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3>(cov);
+ return skvx::bit_pun<U32>(kernel(skvx::bit_pun<U8x4>(dst),
+ skvx::bit_pun<U8x4>(src),
+ cov_splat));
+ };
+ while (n >= 4) {
+ apply(U32::Load(dst), U32::Load(src), U8::Load(cov)).store(dst);
+ dst += 4;
+ src += 4;
+ cov += 4;
+ n -= 4;
+ }
+ while (n --> 0) {
+ *dst = apply(U32{*dst}, U32{*src}, U8{*cov})[0];
+ dst++;
+ src++;
+ cov++;
+ }
+}
+
+static void blend_row_A8(SkPMColor* dst, const void* mask, const SkPMColor* src, int n) {
+ auto cov = (const uint8_t*)mask;
+ drive(dst, src, cov, n, [](U8x4 d, U8x4 s, U8x4 c) {
+ U8x4 s_aa = skvx::approx_scale(s, c),
+ alpha = skvx::shuffle<3,3,3,3, 7,7,7,7, 11,11,11,11, 15,15,15,15>(s_aa);
+ return s_aa + skvx::approx_scale(d, 255 - alpha);
+ });
+}
+
+static void blend_row_A8_opaque(SkPMColor* dst, const void* mask, const SkPMColor* src, int n) {
+ auto cov = (const uint8_t*)mask;
+ drive(dst, src, cov, n, [](U8x4 d, U8x4 s, U8x4 c) {
+ return skvx::div255( skvx::cast<uint16_t>(s) * skvx::cast<uint16_t>( c )
+ + skvx::cast<uint16_t>(d) * skvx::cast<uint16_t>(255-c));
+ });
+}
+
+static void blend_row_lcd16(SkPMColor* dst, const void* vmask, const SkPMColor* src, int n) {
+ auto src_alpha_blend = [](int s, int d, int sa, int m) {
+ return d + SkAlphaMul(s - SkAlphaMul(sa, d), m);
+ };
+
+ auto upscale_31_to_255 = [](int v) {
+ return (v << 3) | (v >> 2);
+ };
+
+ auto mask = (const uint16_t*)vmask;
+ for (int i = 0; i < n; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcA = SkGetPackedA32(s);
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ srcA += srcA >> 7;
+
+ // We're ignoring the least significant bit of the green coverage channel here.
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ // Scale up to 8-bit coverage to work with SkAlphaMul() in src_alpha_blend().
+ maskR = upscale_31_to_255(maskR);
+ maskG = upscale_31_to_255(maskG);
+ maskB = upscale_31_to_255(maskB);
+
+ // This LCD blit routine only works if the destination is opaque.
+ dst[i] = SkPackARGB32(0xFF,
+ src_alpha_blend(srcR, SkGetPackedR32(d), srcA, maskR),
+ src_alpha_blend(srcG, SkGetPackedG32(d), srcA, maskG),
+ src_alpha_blend(srcB, SkGetPackedB32(d), srcA, maskB));
+ }
+}
+
+static void blend_row_LCD16_opaque(SkPMColor* dst, const void* vmask, const SkPMColor* src, int n) {
+ auto mask = (const uint16_t*)vmask;
+
+ for (int i = 0; i < n; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ // We're ignoring the least significant bit of the green coverage channel here.
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend_32.
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ // This LCD blit routine only works if the destination is opaque.
+ dst[i] = SkPackARGB32(0xFF,
+ blend_32(srcR, SkGetPackedR32(d), maskR),
+ blend_32(srcG, SkGetPackedG32(d), maskG),
+ blend_32(srcB, SkGetPackedB32(d), maskB));
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ // we only handle kA8 with an xfermode
+ if (fXfermode && (SkMask::kA8_Format != mask.fFormat)) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ SkASSERT(mask.fBounds.contains(clip));
+
+ void (*blend_row)(SkPMColor*, const void* mask, const SkPMColor*, int) = nullptr;
+
+ if (!fXfermode) {
+ bool opaque = (fShaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag);
+
+ if (mask.fFormat == SkMask::kA8_Format && opaque) {
+ blend_row = blend_row_A8_opaque;
+ } else if (mask.fFormat == SkMask::kA8_Format) {
+ blend_row = blend_row_A8;
+ } else if (mask.fFormat == SkMask::kLCD16_Format && opaque) {
+ blend_row = blend_row_LCD16_opaque;
+ } else if (mask.fFormat == SkMask::kLCD16_Format) {
+ blend_row = blend_row_lcd16;
+ } else {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+ }
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ int y = clip.fTop;
+ int height = clip.height();
+
+ char* dstRow = (char*)fDevice.writable_addr32(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ const uint8_t* maskRow = (const uint8_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ SkPMColor* span = fBuffer;
+
+ if (fXfermode) {
+ SkASSERT(SkMask::kA8_Format == mask.fFormat);
+ SkXfermode* xfer = fXfermode;
+ do {
+ fShaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(reinterpret_cast<SkPMColor*>(dstRow), span, width, maskRow);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ } else {
+ SkASSERT(blend_row);
+ do {
+ fShaderContext->shadeSpan(x, y, span, width);
+ blend_row(reinterpret_cast<SkPMColor*>(dstRow), maskRow, span, width);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(x >= 0 && y >= 0 && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ if (fConstInY) {
+ SkPMColor c;
+ fShaderContext->shadeSpan(x, y, &c, 1);
+
+ if (fShadeDirectlyIntoDevice) {
+ if (255 == alpha) {
+ do {
+ *device = c;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ *device = SkFourByteInterp(c, *device, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, &c, 1, &alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ proc(device, &c, 1, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ if (255 == alpha) {
+ do {
+ fShaderContext->shadeSpan(x, y, device, 1);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ SkPMColor c;
+ fShaderContext->shadeSpan(x, y, &c, 1);
+ *device = SkFourByteInterp(c, *device, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkPMColor* span = fBuffer;
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ fShaderContext->shadeSpan(x, y, span, 1);
+ xfer->xfer32(device, span, 1, &alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ fShaderContext->shadeSpan(x, y, span, 1);
+ proc(device, span, 1, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_RGB565.cpp b/gfx/skia/skia/src/core/SkBlitter_RGB565.cpp
new file mode 100644
index 0000000000..3df8217f69
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_RGB565.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/utils/SkUTF.h"
+
+#include "include/private/SkNx.h"
+
+static void D16_S32X_src(uint16_t dst[], const SkPMColor src[], int count, uint8_t coverage) {
+ SkASSERT(coverage == 0xFF);
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkPixel32ToPixel16(src[i]);
+ }
+}
+
+static void D16_S32X_src_coverage(uint16_t dst[], const SkPMColor src[], int count,
+ uint8_t coverage) {
+ switch (coverage) {
+ case 0: break;
+ case 0xFF:
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkPixel32ToPixel16(src[i]);
+ }
+ break;
+ default:
+ unsigned scale = coverage + (coverage >> 7);
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkSrcOver32To16(SkAlphaMulQ(src[i], scale), dst[i]);
+ }
+ break;
+ }
+}
+
+static void D16_S32A_srcover(uint16_t dst[], const SkPMColor src[], int count, uint8_t coverage) {
+ SkASSERT(coverage == 0xFF);
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkSrcOver32To16(src[i], dst[i]);
+ }
+}
+
+static void D16_S32A_srcover_coverage(uint16_t dst[], const SkPMColor src[], int count,
+ uint8_t coverage) {
+ switch (coverage) {
+ case 0: break;
+ case 0xFF:
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkSrcOver32To16(src[i], dst[i]);
+ }
+ break;
+ default:
+ unsigned scale = coverage + (coverage >> 7);
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkSrcOver32To16(SkAlphaMulQ(src[i], scale), dst[i]);
+ }
+ break;
+ }
+}
+
+bool SkRGB565_Shader_Blitter::Supports(const SkPixmap& device, const SkPaint& paint) {
+ if (device.colorType() != kRGB_565_SkColorType) {
+ return false;
+ }
+ if (device.colorSpace()) {
+ return false;
+ }
+ if (paint.getBlendMode() != SkBlendMode::kSrcOver &&
+ paint.getBlendMode() != SkBlendMode::kSrc) {
+ return false;
+ }
+ if (paint.isDither()) {
+ return false;
+ }
+ return true;
+}
+
+SkRGB565_Shader_Blitter::SkRGB565_Shader_Blitter(const SkPixmap& device,
+ const SkPaint& paint, SkShaderBase::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ SkASSERT(shaderContext);
+ SkASSERT(Supports(device, paint));
+
+ fBuffer = (SkPMColor*)sk_malloc_throw(device.width() * (sizeof(SkPMColor)));
+
+ bool isOpaque = SkToBool(shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag);
+
+ if (paint.getBlendMode() == SkBlendMode::kSrc || isOpaque) {
+ fBlend = D16_S32X_src;
+ fBlendCoverage = D16_S32X_src_coverage;
+ } else { // srcover
+ fBlend = isOpaque ? D16_S32X_src : D16_S32A_srcover;
+ fBlendCoverage = isOpaque ? D16_S32X_src_coverage : D16_S32A_srcover_coverage;
+ }
+}
+
+SkRGB565_Shader_Blitter::~SkRGB565_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkRGB565_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint16_t* device = fDevice.writable_addr16(x, y);
+
+ SkPMColor* span = fBuffer;
+ fShaderContext->shadeSpan(x, y, span, width);
+ fBlend(device, span, width, 0xFF);
+}
+
+void SkRGB565_Shader_Blitter::blitAntiH(int x, int y, const SkAlpha coverage[],
+ const int16_t runs[]) {
+ SkPMColor* span = fBuffer;
+ uint16_t* device = fDevice.writable_addr16(x, y);
+ auto* shaderContext = fShaderContext;
+
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *coverage;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ fBlendCoverage(device, span, count, aa);
+ }
+ device += count;
+ runs += count;
+ coverage += count;
+ x += count;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
new file mode 100644
index 0000000000..89b107b939
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkSpriteBlitter.h"
+
+SkSpriteBlitter::SkSpriteBlitter(const SkPixmap& source)
+ : fSource(source) {}
+
+void SkSpriteBlitter::setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) {
+ fDst = dst;
+ fLeft = left;
+ fTop = top;
+ fPaint = &paint;
+}
+
+void SkSpriteBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fallback to blitRect.
+ this->blitRect(x, y, width, 1);
+}
+
+void SkSpriteBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // No fallback strategy.
+}
+
+void SkSpriteBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitV(x, y, height, alpha);
+}
+
+void SkSpriteBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitMask(mask, clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpriteBlitter_Memcpy final : public SkSpriteBlitter {
+public:
+ static bool Supports(const SkPixmap& dst, const SkPixmap& src, const SkPaint& paint) {
+ // the caller has already inspected the colorspace on src and dst
+ SkASSERT(!SkColorSpaceXformSteps::Required(src.colorSpace(), dst.colorSpace()));
+
+ if (dst.colorType() != src.colorType()) {
+ return false;
+ }
+ if (paint.getMaskFilter() || paint.getColorFilter() || paint.getImageFilter()) {
+ return false;
+ }
+ if (0xFF != paint.getAlpha()) {
+ return false;
+ }
+ SkBlendMode mode = paint.getBlendMode();
+ return SkBlendMode::kSrc == mode || (SkBlendMode::kSrcOver == mode && src.isOpaque());
+ }
+
+ SkSpriteBlitter_Memcpy(const SkPixmap& src)
+ : INHERITED(src) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(fDst.colorType() == fSource.colorType());
+ SkASSERT(width > 0 && height > 0);
+
+ char* dst = (char*)fDst.writable_addr(x, y);
+ const char* src = (const char*)fSource.addr(x - fLeft, y - fTop);
+ const size_t dstRB = fDst.rowBytes();
+ const size_t srcRB = fSource.rowBytes();
+ const size_t bytesToCopy = width << fSource.shiftPerPixel();
+
+ while (height --> 0) {
+ memcpy(dst, src, bytesToCopy);
+ dst += dstRB;
+ src += srcRB;
+ }
+ }
+
+private:
+ typedef SkSpriteBlitter INHERITED;
+};
+
+class SkRasterPipelineSpriteBlitter : public SkSpriteBlitter {
+public:
+ SkRasterPipelineSpriteBlitter(const SkPixmap& src, SkArenaAlloc* alloc)
+ : INHERITED(src)
+ , fAlloc(alloc)
+ , fBlitter(nullptr)
+ , fSrcPtr{nullptr, 0}
+ {}
+
+ void setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) override {
+ fDst = dst;
+ fLeft = left;
+ fTop = top;
+ fPaintColor = paint.getColor4f();
+
+ SkRasterPipeline p(fAlloc);
+ p.append_load(fSource.colorType(), &fSrcPtr);
+
+ if (fSource.colorType() == kAlpha_8_SkColorType) {
+ // The color for A8 images comes from the (sRGB) paint color.
+ p.append_set_rgb(fAlloc, fPaintColor);
+ p.append(SkRasterPipeline::premul);
+ }
+ if (auto dstCS = fDst.colorSpace()) {
+ auto srcCS = fSource.colorSpace();
+ if (!srcCS || fSource.colorType() == kAlpha_8_SkColorType) {
+ // We treat untagged images as sRGB.
+ // A8 images get their r,g,b from the paint color, so they're also sRGB.
+ srcCS = sk_srgb_singleton();
+ }
+ auto srcAT = fSource.isOpaque() ? kOpaque_SkAlphaType
+ : kPremul_SkAlphaType;
+ fAlloc->make<SkColorSpaceXformSteps>(srcCS, srcAT,
+ dstCS, kPremul_SkAlphaType)
+ ->apply(&p, fSource.colorType());
+ }
+ if (fPaintColor.fA != 1.0f) {
+ p.append(SkRasterPipeline::scale_1_float, &fPaintColor.fA);
+ }
+
+ bool is_opaque = fSource.isOpaque() && fPaintColor.fA == 1.0f;
+ fBlitter = SkCreateRasterPipelineBlitter(fDst, paint, p, is_opaque, fAlloc);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ fSrcPtr.stride = fSource.rowBytesAsPixels();
+
+ // We really want fSrcPtr.pixels = fSource.addr(-fLeft, -fTop) here, but that asserts.
+ // Instead we ask for addr(-fLeft+x, -fTop+y), then back up (x,y) manually.
+ // Representing bpp as a size_t keeps all this math in size_t instead of int,
+ // which could wrap around with large enough fSrcPtr.stride and y.
+ size_t bpp = fSource.info().bytesPerPixel();
+ fSrcPtr.pixels = (char*)fSource.addr(-fLeft+x, -fTop+y) - bpp * x
+ - bpp * y * fSrcPtr.stride;
+
+ fBlitter->blitRect(x,y,width,height);
+ }
+
+private:
+ SkArenaAlloc* fAlloc;
+ SkBlitter* fBlitter;
+ SkRasterPipeline_MemoryCtx fSrcPtr;
+ SkColor4f fPaintColor;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+// returning null means the caller will call SkBlitter::Choose() and
+// have wrapped the source bitmap inside a shader
+SkBlitter* SkBlitter::ChooseSprite(const SkPixmap& dst, const SkPaint& paint,
+ const SkPixmap& source, int left, int top, SkArenaAlloc* allocator) {
+ /* We currently ignore antialiasing and filtertype, meaning we will take our
+ special blitters regardless of these settings. Ignoring filtertype seems fine
+ since by definition there is no scale in the matrix. Ignoring antialiasing is
+ a bit of a hack, since we "could" pass in the fractional left/top for the bitmap,
+ and respect that by blending the edges of the bitmap against the device. To support
+ this we could either add more special blitters here, or detect antialiasing in the
+ paint and return null if it is set, forcing the client to take the slow shader case
+ (which does respect soft edges).
+ */
+ SkASSERT(allocator != nullptr);
+
+ if (source.alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+
+ SkSpriteBlitter* blitter = nullptr;
+
+ if (!SkColorSpaceXformSteps::Required(source.colorSpace(), dst.colorSpace())) {
+ if (!blitter && SkSpriteBlitter_Memcpy::Supports(dst, source, paint)) {
+ blitter = allocator->make<SkSpriteBlitter_Memcpy>(source);
+ }
+ if (!blitter) {
+ switch (dst.colorType()) {
+ case kN32_SkColorType:
+ blitter = SkSpriteBlitter::ChooseL32(source, paint, allocator);
+ break;
+ case kRGB_565_SkColorType:
+ blitter = SkSpriteBlitter::ChooseL565(source, paint, allocator);
+ break;
+ case kAlpha_8_SkColorType:
+ blitter = SkSpriteBlitter::ChooseLA8(source, paint, allocator);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ if (!blitter && !paint.getMaskFilter()) {
+ blitter = allocator->make<SkRasterPipelineSpriteBlitter>(source, allocator);
+ }
+
+ if (blitter) {
+ blitter->setup(dst, left, top, paint);
+ }
+ return blitter;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMF.cpp b/gfx/skia/skia/src/core/SkBlurMF.cpp
new file mode 100644
index 0000000000..d385af8ae0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMF.cpp
@@ -0,0 +1,938 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkVertices.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkBlurPriv.h"
+#include "src/core/SkGpuBlurUtils.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrRRectBlurEffect.h"
+#include "src/gpu/effects/generated/GrRectBlurEffect.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#endif
+
+class SkBlurMaskFilterImpl : public SkMaskFilterBase {
+public:
+ SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, bool respectCTM);
+
+ // overrides from SkMaskFilter
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+#if SK_SUPPORT_GPU
+ bool canFilterMaskGPU(const GrShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const override;
+ bool directFilterMaskGPU(GrRecordingContext*,
+ GrRenderTargetContext* renderTargetContext,
+ GrPaint&&,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape) const override;
+ sk_sp<GrTextureProxy> filterMaskGPU(GrRecordingContext*,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const override;
+#endif
+
+ void computeFastBounds(const SkRect&, SkRect*) const override;
+ bool asABlur(BlurRec*) const override;
+
+
+protected:
+ FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+ bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+
+ bool ignoreXform() const { return !fRespectCTM; }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkBlurMaskFilterImpl)
+ // To avoid unseemly allocation requests (esp. for finite platforms like
+ // handset) we limit the radius so something manageable. (as opposed to
+ // a request like 10,000)
+ static const SkScalar kMAX_BLUR_SIGMA;
+
+ SkScalar fSigma;
+ SkBlurStyle fBlurStyle;
+ bool fRespectCTM;
+
+ SkBlurMaskFilterImpl(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+
+ SkScalar computeXformedSigma(const SkMatrix& ctm) const {
+ SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
+ return SkMinScalar(xformedSigma, kMAX_BLUR_SIGMA);
+ }
+
+ friend class SkBlurMaskFilter;
+
+ typedef SkMaskFilter INHERITED;
+ friend void sk_register_blur_maskfilter_createproc();
+};
+
+const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
+
+// linearly interpolate between y1 & y3 to match x2's position between x1 & x3
+static SkScalar interp(SkScalar x1, SkScalar x2, SkScalar x3, SkScalar y1, SkScalar y3) {
+ SkASSERT(x1 <= x2 && x2 <= x3);
+ SkASSERT(y1 <= y3);
+
+ SkScalar t = (x2 - x1) / (x3 - x1);
+ return y1 + t * (y3 - y1);
+}
+
+// Insert 'lower' and 'higher' into 'array1' and insert a new value at each matching insertion
+// point in 'array2' that linearly interpolates between the existing values.
+// Return a bit mask which contains a copy of 'inputMask' for all the cells between the two
+// insertion points.
+static uint32_t insert_into_arrays(SkScalar* array1, SkScalar* array2,
+ SkScalar lower, SkScalar higher,
+ int* num, uint32_t inputMask, int maskSize) {
+ SkASSERT(lower < higher);
+ SkASSERT(lower >= array1[0] && higher <= array1[*num-1]);
+
+ int32_t skipMask = 0x0;
+ int i;
+ for (i = 0; i < *num; ++i) {
+ if (lower >= array1[i] && lower < array1[i+1]) {
+ if (!SkScalarNearlyEqual(lower, array1[i])) {
+ memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
+ array1[i+1] = lower;
+ memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
+ array2[i+1] = interp(array1[i], lower, array1[i+2], array2[i], array2[i+2]);
+ i++;
+ (*num)++;
+ }
+ break;
+ }
+ }
+ for ( ; i < *num; ++i) {
+ skipMask |= inputMask << (i*maskSize);
+ if (higher > array1[i] && higher <= array1[i+1]) {
+ if (!SkScalarNearlyEqual(higher, array1[i+1])) {
+ memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
+ array1[i+1] = higher;
+ memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
+ array2[i+1] = interp(array1[i], higher, array1[i+2], array2[i], array2[i+2]);
+ (*num)++;
+ }
+ break;
+ }
+ }
+
+ return skipMask;
+}
+
+bool SkComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
+ const SkRect& occluder,
+ SkScalar sigma, SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kSkBlurRRectMaxDivisions],
+ SkScalar rectYs[kSkBlurRRectMaxDivisions],
+ SkScalar texXs[kSkBlurRRectMaxDivisions],
+ SkScalar texYs[kSkBlurRRectMaxDivisions],
+ int* numXs, int* numYs, uint32_t* skipMask) {
+ unsigned int devBlurRadius = 3*SkScalarCeilToInt(xformedSigma-1/6.0f);
+ SkScalar srcBlurRadius = 3.0f * sigma;
+
+ const SkRect& devOrig = devRRect.getBounds();
+ const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const int devLeft = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fX, devRadiiLL.fX));
+ const int devTop = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fY, devRadiiUR.fY));
+ const int devRight = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUR.fX, devRadiiLR.fX));
+ const int devBot = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiLL.fY, devRadiiLR.fY));
+
+ // This is a conservative check for nine-patchability
+ if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius ||
+ devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) {
+ return false;
+ }
+
+ const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar srcLeft = SkTMax<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX);
+ const SkScalar srcTop = SkTMax<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY);
+ const SkScalar srcRight = SkTMax<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX);
+ const SkScalar srcBot = SkTMax<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY);
+
+ int newRRWidth = 2*devBlurRadius + devLeft + devRight + 1;
+ int newRRHeight = 2*devBlurRadius + devTop + devBot + 1;
+ widthHeight->fWidth = newRRWidth + 2 * devBlurRadius;
+ widthHeight->fHeight = newRRHeight + 2 * devBlurRadius;
+
+ const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius);
+
+ rectXs[0] = srcProxyRect.fLeft;
+ rectXs[1] = srcProxyRect.fLeft + 2*srcBlurRadius + srcLeft;
+ rectXs[2] = srcProxyRect.fRight - 2*srcBlurRadius - srcRight;
+ rectXs[3] = srcProxyRect.fRight;
+
+ rectYs[0] = srcProxyRect.fTop;
+ rectYs[1] = srcProxyRect.fTop + 2*srcBlurRadius + srcTop;
+ rectYs[2] = srcProxyRect.fBottom - 2*srcBlurRadius - srcBot;
+ rectYs[3] = srcProxyRect.fBottom;
+
+ texXs[0] = 0.0f;
+ texXs[1] = 2.0f*devBlurRadius + devLeft;
+ texXs[2] = 2.0f*devBlurRadius + devLeft + 1;
+ texXs[3] = SkIntToScalar(widthHeight->fWidth);
+
+ texYs[0] = 0.0f;
+ texYs[1] = 2.0f*devBlurRadius + devTop;
+ texYs[2] = 2.0f*devBlurRadius + devTop + 1;
+ texYs[3] = SkIntToScalar(widthHeight->fHeight);
+
+ SkRect temp = occluder;
+
+ *numXs = 4;
+ *numYs = 4;
+ *skipMask = 0;
+ if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) {
+ *skipMask = insert_into_arrays(rectXs, texXs, temp.fLeft, temp.fRight, numXs, 0x1, 1);
+ *skipMask = insert_into_arrays(rectYs, texYs, temp.fTop, temp.fBottom,
+ numYs, *skipMask, *numXs-1);
+ }
+
+ const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius),
+ SkIntToScalar(devBlurRadius),
+ SkIntToScalar(newRRWidth),
+ SkIntToScalar(newRRHeight));
+ SkVector newRadii[4];
+ newRadii[0] = { SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY) };
+ newRadii[1] = { SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY) };
+ newRadii[2] = { SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY) };
+ newRadii[3] = { SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY) };
+
+ rrectToDraw->setRectRadii(newRect, newRadii);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, bool respectCTM)
+ : fSigma(sigma)
+ , fBlurStyle(style)
+ , fRespectCTM(respectCTM) {
+ SkASSERT(fSigma > 0);
+ SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
+}
+
+SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
+ if (this->ignoreXform()) {
+ return false;
+ }
+
+ if (rec) {
+ rec->fSigma = fSigma;
+ rec->fStyle = fBlurStyle;
+ }
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix,
+ SkIPoint* margin) const {
+ SkScalar sigma = this->computeXformedSigma(matrix);
+ return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, margin);
+}
+
+bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+#include "include/core/SkCanvas.h"
+
+static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
+ SkASSERT(mask != nullptr);
+
+ mask->fBounds = bounds.roundOut();
+ mask->fRowBytes = SkAlign4(mask->fBounds.width());
+ mask->fFormat = SkMask::kA8_Format;
+ const size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
+ if (nullptr == mask->fImage) {
+ return false;
+ }
+ return true;
+}
+
+static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
+ if (!prepare_to_draw_into_mask(rrect.rect(), mask)) {
+ return false;
+ }
+
+ // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a
+ // clean way to share more code?
+ SkBitmap bitmap;
+ bitmap.installMaskPixels(*mask);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(mask->fBounds.left()),
+ -SkIntToScalar(mask->fBounds.top()));
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ canvas.drawRRect(rrect, paint);
+ return true;
+}
+
+static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
+ if (!prepare_to_draw_into_mask(rects[0], mask)) {
+ return false;
+ }
+
+ SkBitmap bitmap;
+ bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(),
+ mask->fBounds.height(),
+ kAlpha_8_SkColorType,
+ kPremul_SkAlphaType),
+ mask->fImage, mask->fRowBytes);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(mask->fBounds.left()),
+ -SkIntToScalar(mask->fBounds.top()));
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ if (1 == count) {
+ canvas.drawRect(rects[0], paint);
+ } else {
+ // todo: do I need a fast way to do this?
+ SkPath path;
+ path.addRect(rects[0]);
+ path.addRect(rects[1]);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+ canvas.drawPath(path, paint);
+ }
+ return true;
+}
+
+static bool rect_exceeds(const SkRect& r, SkScalar v) {
+ return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
+ r.width() > v || r.height() > v;
+}
+
+#include "src/core/SkMaskCache.h"
+
+static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
+ const size_t size = mask->computeTotalImageSize();
+ SkCachedData* data = SkResourceCache::NewCachedData(size);
+ if (data) {
+ memcpy(data->writable_data(), mask->fImage, size);
+ SkMask::FreeImage(mask->fImage);
+ mask->fImage = (uint8_t*)data->data();
+ }
+ return data;
+}
+
+static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect) {
+ return SkMaskCache::FindAndRef(sigma, style, rrect, mask);
+}
+
+static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, rrect, *mask, cache);
+ }
+ return cache;
+}
+
+static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count) {
+ return SkMaskCache::FindAndRef(sigma, style, rects, count, mask);
+}
+
+static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, rects, count, *mask, cache);
+ }
+ return cache;
+}
+
+static const bool c_analyticBlurRRect{true};
+
+SkMaskFilterBase::FilterReturn
+SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ SkASSERT(patch != nullptr);
+ switch (rrect.getType()) {
+ case SkRRect::kEmpty_Type:
+ // Nothing to draw.
+ return kFalse_FilterReturn;
+
+ case SkRRect::kRect_Type:
+ // We should have caught this earlier.
+ SkASSERT(false);
+ // Fall through.
+ case SkRRect::kOval_Type:
+ // The nine patch special case does not handle ovals, and we
+ // already have code for rectangles.
+ return kUnimplemented_FilterReturn;
+
+ // These three can take advantage of this fast path.
+ case SkRRect::kSimple_Type:
+ case SkRRect::kNinePatch_Type:
+ case SkRRect::kComplex_Type:
+ break;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rrect.rect().roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (c_analyticBlurRRect) {
+ // special case for fast round rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ }
+
+ if (!filterResult) {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ // Now figure out the appropriate width and height of the smaller round rectangle
+ // to stretch. It will take into account the larger radius per side as well as double
+ // the margin, to account for inner and outer blur.
+ const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar leftUnstretched = SkTMax(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
+ const SkScalar rightUnstretched = SkTMax(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
+
+ // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
+ // any fractional space on either side plus 1 for the part to stretch.
+ const SkScalar stretchSize = SkIntToScalar(3);
+
+ const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
+ if (totalSmallWidth >= rrect.rect().width()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ const SkScalar topUnstretched = SkTMax(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
+ const SkScalar bottomUnstretched = SkTMax(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
+
+ const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
+ if (totalSmallHeight >= rrect.rect().height()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
+
+ SkRRect smallRR;
+ SkVector radii[4];
+ radii[SkRRect::kUpperLeft_Corner] = UL;
+ radii[SkRRect::kUpperRight_Corner] = UR;
+ radii[SkRRect::kLowerRight_Corner] = LR;
+ radii[SkRRect::kLowerLeft_Corner] = LL;
+ smallRR.setRectRadii(smallR, radii);
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
+ if (!cache) {
+ bool analyticBlurWorked = false;
+ if (c_analyticBlurRRect) {
+ analyticBlurWorked =
+ this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ }
+
+ if (!analyticBlurWorked) {
+ if (!draw_rrect_into_mask(smallRR, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
+ }
+
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
+ patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+// Use the faster analytic blur approach for ninepatch rects
+static const bool c_analyticBlurNinepatch{true};
+
+SkMaskFilterBase::FilterReturn
+SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
+ const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ if (count < 1 || count > 2) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rects[0].roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (count == 1 && c_analyticBlurNinepatch) {
+ // special case for fast rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ } else {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ /*
+ * smallR is the smallest version of 'rect' that will still guarantee that
+ * we get the same blur results on all edges, plus 1 center row/col that is
+ * representative of the extendible/stretchable edges of the ninepatch.
+ * Since our actual edge may be fractional we inset 1 more to be sure we
+ * don't miss any interior blur.
+ * x is an added pixel of blur, and { and } are the (fractional) edge
+ * pixels from the original rect.
+ *
+ * x x { x x .... x x } x x
+ *
+ * Thus, in this case, we inset by a total of 5 (on each side) beginning
+ * with our outer-rect (dstM.fBounds)
+ */
+ SkRect smallR[2];
+ SkIPoint center;
+
+ // +2 is from +1 for each edge (to account for possible fractional edges
+ int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
+ int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
+ SkIRect innerIR;
+
+ if (1 == count) {
+ innerIR = srcM.fBounds;
+ center.set(smallW, smallH);
+ } else {
+ SkASSERT(2 == count);
+ rects[1].roundIn(&innerIR);
+ center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
+ smallH + (innerIR.top() - srcM.fBounds.top()));
+ }
+
+ // +1 so we get a clean, stretchable, center row/col
+ smallW += 1;
+ smallH += 1;
+
+ // we want the inset amounts to be integral, so we don't change any
+ // fractional phase on the fRight or fBottom of our smallR.
+ const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
+ const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
+ if (dx < 0 || dy < 0) {
+ // we're too small, relative to our blur, to break into nine-patch,
+ // so we ask to have our normal filterMask() be called.
+ return kUnimplemented_FilterReturn;
+ }
+
+ smallR[0].setLTRB(rects[0].left(), rects[0].top(),
+ rects[0].right() - dx, rects[0].bottom() - dy);
+ if (smallR[0].width() < 2 || smallR[0].height() < 2) {
+ return kUnimplemented_FilterReturn;
+ }
+ if (2 == count) {
+ smallR[1].setLTRB(rects[1].left(), rects[1].top(),
+ rects[1].right() - dx, rects[1].bottom() - dy);
+ SkASSERT(!smallR[1].isEmpty());
+ }
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
+ if (!cache) {
+ if (count > 1 || !c_analyticBlurNinepatch) {
+ if (!draw_rects_into_mask(smallR, count, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ } else {
+ if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
+ }
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter = center;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
+ SkRect* dst) const {
+ SkScalar pad = 3.0f * fSigma;
+
+ dst->setLTRB(src.fLeft - pad, src.fTop - pad,
+ src.fRight + pad, src.fBottom + pad);
+}
+
+sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar sigma = buffer.readScalar();
+ SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle);
+
+ uint32_t flags = buffer.read32LE(0x3); // historically we only recorded 2 bits
+ bool respectCTM = !(flags & 1); // historically we stored ignoreCTM in low bit
+
+ if (buffer.isVersionLT(SkPicturePriv::kRemoveOccluderFromBlurMaskFilter)) {
+ SkRect unused;
+ buffer.readRect(&unused);
+ }
+
+ return SkMaskFilter::MakeBlur((SkBlurStyle)style, sigma, respectCTM);
+}
+
+void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fSigma);
+ buffer.writeUInt(fBlurStyle);
+ buffer.writeUInt(!fRespectCTM); // historically we recorded ignoreCTM
+}
+
+
+#if SK_SUPPORT_GPU
+
+bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape) const {
+ SkASSERT(renderTargetContext);
+
+ if (fBlurStyle != kNormal_SkBlurStyle) {
+ return false;
+ }
+
+ if (!viewMatrix.isScaleTranslate()) {
+ return false;
+ }
+
+ // TODO: we could handle blurred stroked circles
+ if (!shape.style().isSimpleFill()) {
+ return false;
+ }
+
+ SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
+ if (xformedSigma <= 0) {
+ return false;
+ }
+
+ SkRRect srcRRect;
+ bool inverted;
+ if (!shape.asRRect(&srcRRect, nullptr, nullptr, &inverted) || inverted) {
+ return false;
+ }
+
+ SkRRect devRRect;
+ if (!srcRRect.transform(viewMatrix, &devRRect)) {
+ return false;
+ }
+
+ if (!SkRRectPriv::AllCornersCircular(devRRect)) {
+ return false;
+ }
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ std::unique_ptr<GrFragmentProcessor> fp;
+
+ if (devRRect.isRect() || SkRRectPriv::IsCircle(devRRect)) {
+ if (devRRect.isRect()) {
+ fp = GrRectBlurEffect::Make(proxyProvider, *context->priv().caps()->shaderCaps(),
+ devRRect.rect(), xformedSigma);
+ } else {
+ fp = GrCircleBlurFragmentProcessor::Make(proxyProvider, devRRect.rect(), xformedSigma);
+ }
+
+ if (!fp) {
+ return false;
+ }
+ paint.addCoverageFragmentProcessor(std::move(fp));
+
+ SkRect srcProxyRect = srcRRect.rect();
+ SkScalar outsetX = 3.0f*fSigma;
+ SkScalar outsetY = 3.0f*fSigma;
+ if (this->ignoreXform()) {
+ // When we're ignoring the CTM the padding added to the source rect also needs to ignore
+ // the CTM. The matrix passed in here is guaranteed to be just scale and translate so we
+ // can just grab the X and Y scales off the matrix and pre-undo the scale.
+ outsetX /= SkScalarAbs(viewMatrix.getScaleX());
+ outsetY /= SkScalarAbs(viewMatrix.getScaleY());
+ }
+ srcProxyRect.outset(outsetX, outsetY);
+
+ renderTargetContext->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
+ return true;
+ }
+
+ fp = GrRRectBlurEffect::Make(context, fSigma, xformedSigma, srcRRect, devRRect);
+ if (!fp) {
+ return false;
+ }
+
+ if (!this->ignoreXform()) {
+ SkRect srcProxyRect = srcRRect.rect();
+ srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
+
+ SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, 4, 6, 0);
+ srcProxyRect.toQuad(builder.positions());
+
+ static const uint16_t fullIndices[6] = { 0, 1, 2, 0, 2, 3 };
+ memcpy(builder.indices(), fullIndices, sizeof(fullIndices));
+ sk_sp<SkVertices> vertices = builder.detach();
+
+ paint.addCoverageFragmentProcessor(std::move(fp));
+ renderTargetContext->drawVertices(clip, std::move(paint), viewMatrix, std::move(vertices),
+ nullptr, 0);
+ } else {
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+
+ float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
+ SkRect proxyRect = devRRect.rect();
+ proxyRect.outset(extra, extra);
+
+ paint.addCoverageFragmentProcessor(std::move(fp));
+ renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo,
+ SkMatrix::I(), proxyRect, inverse);
+ }
+
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::canFilterMaskGPU(const GrShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const {
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+ if (xformedSigma <= 0) {
+ maskRect->setEmpty();
+ return false;
+ }
+
+ if (maskRect) {
+ float sigma3 = 3 * SkScalarToFloat(xformedSigma);
+
+ // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
+ SkIRect clipRect = clipBounds.makeOutset(sigma3, sigma3);
+ SkIRect srcRect = devSpaceShapeBounds.makeOutset(sigma3, sigma3);
+
+ if (!srcRect.intersect(clipRect)) {
+ srcRect.setEmpty();
+ }
+ *maskRect = srcRect;
+ }
+
+ // We prefer to blur paths with small blur radii on the CPU.
+ if (ctm.rectStaysRect()) {
+ static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
+ static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
+
+ if (devSpaceShapeBounds.width() <= kMIN_GPU_BLUR_SIZE &&
+ devSpaceShapeBounds.height() <= kMIN_GPU_BLUR_SIZE &&
+ xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+sk_sp<GrTextureProxy> SkBlurMaskFilterImpl::filterMaskGPU(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const {
+ // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
+ const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
+
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+ SkASSERT(xformedSigma > 0);
+
+ // If we're doing a normal blur, we can clobber the pathTexture in the
+ // gaussianBlur. Otherwise, we need to save it for later compositing.
+ bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
+ auto renderTargetContext = SkGpuBlurUtils::GaussianBlur(context,
+ srcProxy,
+ srcColorType,
+ srcAlphaType,
+ SkIPoint::Make(0, 0),
+ nullptr,
+ clipRect,
+ SkIRect::EmptyIRect(),
+ xformedSigma,
+ xformedSigma,
+ GrTextureDomain::kIgnore_Mode);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ if (!isNormalBlur) {
+ GrPaint paint;
+ // Blend pathTexture over blurTexture.
+ paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(std::move(srcProxy),
+ srcColorType,
+ SkMatrix::I()));
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ // inner: dst = dst * src
+ paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
+ } else if (kSolid_SkBlurStyle == fBlurStyle) {
+ // solid: dst = src + dst - src * dst
+ // = src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
+ } else if (kOuter_SkBlurStyle == fBlurStyle) {
+ // outer: dst = dst * (1 - src)
+ // = 0 * src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
+ } else {
+ paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
+ }
+
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(clipRect));
+ }
+
+ return renderTargetContext->asTextureProxyRef();
+}
+
+#endif // SK_SUPPORT_GPU
+
+void sk_register_blur_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(SkBlurMaskFilterImpl); }
+
+sk_sp<SkMaskFilter> SkMaskFilter::MakeBlur(SkBlurStyle style, SkScalar sigma, bool respectCTM) {
+ if (SkScalarIsFinite(sigma) && sigma > 0) {
+ return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, respectCTM));
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMask.cpp b/gfx/skia/skia/src/core/SkBlurMask.cpp
new file mode 100644
index 0000000000..9963fffe37
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMask.cpp
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlurMask.h"
+
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkMath.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkMaskBlurFilter.h"
+#include "src/core/SkMathPriv.h"
+
+// This constant approximates the scaling done in the software path's
+// "high quality" mode, in SkBlurMask::Blur() (1 / sqrt(3)).
+// IMHO, it actually should be 1: we blur "less" than we should do
+// according to the CSS and canvas specs, simply because Safari does the same.
+// Firefox used to do the same too, until 4.0 where they fixed it. So at some
+// point we should probably get rid of these scaling constants and rebaseline
+// all the blur tests.
+static const SkScalar kBLUR_SIGMA_SCALE = 0.57735f;
+
+SkScalar SkBlurMask::ConvertRadiusToSigma(SkScalar radius) {
+ return radius > 0 ? kBLUR_SIGMA_SCALE * radius + 0.5f : 0.0f;
+}
+
+SkScalar SkBlurMask::ConvertSigmaToRadius(SkScalar sigma) {
+ return sigma > 0.5f ? (sigma - 0.5f) / kBLUR_SIGMA_SCALE : 0.0f;
+}
+
+
+template <typename AlphaIter>
+static void merge_src_with_blur(uint8_t dst[], int dstRB,
+ AlphaIter src, int srcRB,
+ const uint8_t blur[], int blurRB,
+ int sw, int sh) {
+ dstRB -= sw;
+ blurRB -= sw;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (int x = sw - 1; x >= 0; --x) {
+ *dst = SkToU8(SkAlphaMul(*blur, SkAlpha255To256(*rowSrc)));
+ ++dst;
+ ++rowSrc;
+ ++blur;
+ }
+ dst += dstRB;
+ src >>= srcRB;
+ blur += blurRB;
+ }
+}
+
+template <typename AlphaIter>
+static void clamp_solid_with_orig(uint8_t dst[], int dstRowBytes,
+ AlphaIter src, int srcRowBytes,
+ int sw, int sh) {
+ int x;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (x = sw - 1; x >= 0; --x) {
+ int s = *rowSrc;
+ int d = *dst;
+ *dst = SkToU8(s + d - SkMulDiv255Round(s, d));
+ ++dst;
+ ++rowSrc;
+ }
+ dst += dstRowBytes - sw;
+ src >>= srcRowBytes;
+ }
+}
+
+template <typename AlphaIter>
+static void clamp_outer_with_orig(uint8_t dst[], int dstRowBytes,
+ AlphaIter src, int srcRowBytes,
+ int sw, int sh) {
+ int x;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (x = sw - 1; x >= 0; --x) {
+ int srcValue = *rowSrc;
+ if (srcValue) {
+ *dst = SkToU8(SkAlphaMul(*dst, SkAlpha255To256(255 - srcValue)));
+ }
+ ++dst;
+ ++rowSrc;
+ }
+ dst += dstRowBytes - sw;
+ src >>= srcRowBytes;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+// we use a local function to wrap the class static method to work around
+// a bug in gcc98
+void SkMask_FreeImage(uint8_t* image);
+void SkMask_FreeImage(uint8_t* image) {
+ SkMask::FreeImage(image);
+}
+
+bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, SkScalar sigma, SkBlurStyle style,
+ SkIPoint* margin) {
+ if (src.fFormat != SkMask::kBW_Format &&
+ src.fFormat != SkMask::kA8_Format &&
+ src.fFormat != SkMask::kARGB32_Format &&
+ src.fFormat != SkMask::kLCD16_Format)
+ {
+ return false;
+ }
+
+ SkMaskBlurFilter blurFilter{sigma, sigma};
+ if (blurFilter.hasNoBlur()) {
+ // If there is no effective blur most styles will just produce the original mask.
+ // However, kOuter_SkBlurStyle will produce an empty mask.
+ if (style == kOuter_SkBlurStyle) {
+ dst->fImage = nullptr;
+ dst->fBounds = SkIRect::MakeEmpty();
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ if (margin != nullptr) {
+ // This filter will disregard the src.fImage completely.
+ // The margin is actually {-(src.fBounds.width() / 2), -(src.fBounds.height() / 2)}
+ // but it is not clear if callers will fall over with negative margins.
+ *margin = SkIPoint{0,0};
+ }
+ return true;
+ }
+ return false;
+ }
+ const SkIPoint border = blurFilter.blur(src, dst);
+ // If src.fImage is null, then this call is only to calculate the border.
+ if (src.fImage != nullptr && dst->fImage == nullptr) {
+ return false;
+ }
+
+ if (margin != nullptr) {
+ *margin = border;
+ }
+
+ if (src.fImage == nullptr) {
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = dst->fBounds.width();
+ }
+ return true;
+ }
+
+ switch (style) {
+ case kNormal_SkBlurStyle:
+ break;
+ case kSolid_SkBlurStyle: {
+ auto dstStart = &dst->fImage[border.x() + border.y() * dst->fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ case kOuter_SkBlurStyle: {
+ auto dstStart = &dst->fImage[border.x() + border.y() * dst->fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ case kInner_SkBlurStyle: {
+ // now we allocate the "real" dst, mirror the size of src
+ SkMask blur = *dst;
+ SkAutoMaskFreeImage autoFreeBlurMask(blur.fImage);
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = dst->fBounds.width();
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(dstSize);
+ auto blurStart = &blur.fImage[border.x() + border.y() * blur.fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ }
+
+ return true;
+}
+
+/* Convolving a box with itself three times results in a piecewise
+ quadratic function:
+
+ 0 x <= -1.5
+ 9/8 + 3/2 x + 1/2 x^2 -1.5 < x <= -.5
+ 3/4 - x^2 -.5 < x <= .5
+ 9/8 - 3/2 x + 1/2 x^2 0.5 < x <= 1.5
+ 0 1.5 < x
+
+ Mathematica:
+
+ g[x_] := Piecewise [ {
+ {9/8 + 3/2 x + 1/2 x^2 , -1.5 < x <= -.5},
+ {3/4 - x^2 , -.5 < x <= .5},
+ {9/8 - 3/2 x + 1/2 x^2 , 0.5 < x <= 1.5}
+ }, 0]
+
+ To get the profile curve of the blurred step function at the rectangle
+ edge, we evaluate the indefinite integral, which is piecewise cubic:
+
+ 0 x <= -1.5
+ 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3 -1.5 < x <= -0.5
+ 1/2 + 3/4 x - 1/3 x^3 -.5 < x <= .5
+ 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3 .5 < x <= 1.5
+ 1 1.5 < x
+
+ in Mathematica code:
+
+ gi[x_] := Piecewise[ {
+ { 0 , x <= -1.5 },
+ { 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3, -1.5 < x <= -0.5 },
+ { 1/2 + 3/4 x - 1/3 x^3 , -.5 < x <= .5},
+ { 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3, .5 < x <= 1.5}
+ },1]
+*/
+
+static float gaussianIntegral(float x) {
+ if (x > 1.5f) {
+ return 0.0f;
+ }
+ if (x < -1.5f) {
+ return 1.0f;
+ }
+
+ float x2 = x*x;
+ float x3 = x2*x;
+
+ if ( x > 0.5f ) {
+ return 0.5625f - (x3 / 6.0f - 3.0f * x2 * 0.25f + 1.125f * x);
+ }
+ if ( x > -0.5f ) {
+ return 0.5f - (0.75f * x - x3 / 3.0f);
+ }
+ return 0.4375f + (-x3 / 6.0f - 3.0f * x2 * 0.25f - 1.125f * x);
+}
+
+/* ComputeBlurProfile fills in an array of floating
+ point values between 0 and 255 for the profile signature of
+ a blurred half-plane with the given blur radius. Since we're
+ going to be doing screened multiplications (i.e., 1 - (1-x)(1-y))
+ all the time, we actually fill in the profile pre-inverted
+ (already done 255-x).
+*/
+
+void SkBlurMask::ComputeBlurProfile(uint8_t* profile, int size, SkScalar sigma) {
+ SkASSERT(SkScalarCeilToInt(6*sigma) == size);
+
+ int center = size >> 1;
+
+ float invr = 1.f/(2*sigma);
+
+ profile[0] = 255;
+ for (int x = 1 ; x < size ; ++x) {
+ float scaled_x = (center - x - .5f) * invr;
+ float gi = gaussianIntegral(scaled_x);
+ profile[x] = 255 - (uint8_t) (255.f * gi);
+ }
+}
+
+// TODO MAYBE: Maintain a profile cache to avoid recomputing this for
+// commonly used radii. Consider baking some of the most common blur radii
+// directly in as static data?
+
+// Implementation adapted from Michael Herf's approach:
+// http://stereopsis.com/shadowrect/
+
+uint8_t SkBlurMask::ProfileLookup(const uint8_t *profile, int loc,
+ int blurredWidth, int sharpWidth) {
+ // how far are we from the original edge?
+ int dx = SkAbs32(((loc << 1) + 1) - blurredWidth) - sharpWidth;
+ int ox = dx >> 1;
+ if (ox < 0) {
+ ox = 0;
+ }
+
+ return profile[ox];
+}
+
+void SkBlurMask::ComputeBlurredScanline(uint8_t *pixels, const uint8_t *profile,
+ unsigned int width, SkScalar sigma) {
+
+ unsigned int profile_size = SkScalarCeilToInt(6*sigma);
+ SkAutoTMalloc<uint8_t> horizontalScanline(width);
+
+ unsigned int sw = width - profile_size;
+ // nearest odd number less than the profile size represents the center
+ // of the (2x scaled) profile
+ int center = ( profile_size & ~1 ) - 1;
+
+ int w = sw - center;
+
+ for (unsigned int x = 0 ; x < width ; ++x) {
+ if (profile_size <= sw) {
+ pixels[x] = ProfileLookup(profile, x, width, w);
+ } else {
+ float span = float(sw)/(2*sigma);
+ float giX = 1.5f - (x+.5f)/(2*sigma);
+ pixels[x] = (uint8_t) (255 * (gaussianIntegral(giX) - gaussianIntegral(giX + span)));
+ }
+ }
+}
+
+bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst,
+ const SkRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ int profileSize = SkScalarCeilToInt(6*sigma);
+ if (profileSize <= 0) {
+ return false; // no blur to compute
+ }
+
+ int pad = profileSize/2;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds.setLTRB(SkScalarRoundToInt(src.fLeft - pad),
+ SkScalarRoundToInt(src.fTop - pad),
+ SkScalarRoundToInt(src.fRight + pad),
+ SkScalarRoundToInt(src.fBottom + pad));
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ int sw = SkScalarFloorToInt(src.width());
+ int sh = SkScalarFloorToInt(src.height());
+
+ if (createMode == SkMask::kJustComputeBounds_CreateMode) {
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.round(); // restore trimmed bounds
+ dst->fRowBytes = sw;
+ }
+ return true;
+ }
+
+ SkAutoTMalloc<uint8_t> profile(profileSize);
+
+ ComputeBlurProfile(profile, profileSize, sigma);
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ uint8_t* dp = SkMask::AllocImage(dstSize);
+
+ dst->fImage = dp;
+
+ int dstHeight = dst->fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ uint8_t *outptr = dp;
+
+ SkAutoTMalloc<uint8_t> horizontalScanline(dstWidth);
+ SkAutoTMalloc<uint8_t> verticalScanline(dstHeight);
+
+ ComputeBlurredScanline(horizontalScanline, profile, dstWidth, sigma);
+ ComputeBlurredScanline(verticalScanline, profile, dstHeight, sigma);
+
+ for (int y = 0 ; y < dstHeight ; ++y) {
+ for (int x = 0 ; x < dstWidth ; x++) {
+ unsigned int maskval = SkMulDiv255Round(horizontalScanline[x], verticalScanline[y]);
+ *(outptr++) = maskval;
+ }
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = (size_t)(src.width() * src.height());
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ for (int y = 0 ; y < sh ; y++) {
+ uint8_t *blur_scanline = dp + (y+pad)*dstWidth + pad;
+ uint8_t *inner_scanline = dst->fImage + y*sw;
+ memcpy(inner_scanline, blur_scanline, sw);
+ }
+ SkMask::FreeImage(dp);
+
+ dst->fBounds = src.round(); // restore trimmed bounds
+ dst->fRowBytes = sw;
+
+ } else if (style == kOuter_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0, sw);
+ }
+ } else if (style == kSolid_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0xff, sw);
+ }
+ }
+ // normal and solid styles are the same for analytic rect blurs, so don't
+ // need to handle solid specially.
+
+ return true;
+}
+
+bool SkBlurMask::BlurRRect(SkScalar sigma, SkMask *dst,
+ const SkRRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ // Temporary for now -- always fail, should cause caller to fall back
+ // to old path. Plumbing just to land API and parallelize effort.
+
+ return false;
+}
+
+// The "simple" blur is a direct implementation of separable convolution with a discrete
+// gaussian kernel. It's "ground truth" in a sense; too slow to be used, but very
+// useful for correctness comparisons.
+
+bool SkBlurMask::BlurGroundTruth(SkScalar sigma, SkMask* dst, const SkMask& src,
+ SkBlurStyle style, SkIPoint* margin) {
+
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ float variance = sigma * sigma;
+
+ int windowSize = SkScalarCeilToInt(sigma*6);
+ // round window size up to nearest odd number
+ windowSize |= 1;
+
+ SkAutoTMalloc<float> gaussWindow(windowSize);
+
+ int halfWindow = windowSize >> 1;
+
+ gaussWindow[halfWindow] = 1;
+
+ float windowSum = 1;
+ for (int x = 1 ; x <= halfWindow ; ++x) {
+ float gaussian = expf(-x*x / (2*variance));
+ gaussWindow[halfWindow + x] = gaussWindow[halfWindow-x] = gaussian;
+ windowSum += 2*gaussian;
+ }
+
+ // leave the filter un-normalized for now; we will divide by the normalization
+ // sum later;
+
+ int pad = halfWindow;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fBounds.outset(pad, pad);
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ int srcWidth = src.fBounds.width();
+ int srcHeight = src.fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ const uint8_t* srcPixels = src.fImage;
+ uint8_t* dstPixels = SkMask::AllocImage(dstSize);
+ SkAutoMaskFreeImage autoFreeDstPixels(dstPixels);
+
+ // do the actual blur. First, make a padded copy of the source.
+ // use double pad so we never have to check if we're outside anything
+
+ int padWidth = srcWidth + 4*pad;
+ int padHeight = srcHeight;
+ int padSize = padWidth * padHeight;
+
+ SkAutoTMalloc<uint8_t> padPixels(padSize);
+ memset(padPixels, 0, padSize);
+
+ for (int y = 0 ; y < srcHeight; ++y) {
+ uint8_t* padptr = padPixels + y * padWidth + 2*pad;
+ const uint8_t* srcptr = srcPixels + y * srcWidth;
+ memcpy(padptr, srcptr, srcWidth);
+ }
+
+ // blur in X, transposing the result into a temporary floating point buffer.
+ // also double-pad the intermediate result so that the second blur doesn't
+ // have to do extra conditionals.
+
+ int tmpWidth = padHeight + 4*pad;
+ int tmpHeight = padWidth - 2*pad;
+ int tmpSize = tmpWidth * tmpHeight;
+
+ SkAutoTMalloc<float> tmpImage(tmpSize);
+ memset(tmpImage, 0, tmpSize*sizeof(tmpImage[0]));
+
+ for (int y = 0 ; y < padHeight ; ++y) {
+ uint8_t *srcScanline = padPixels + y*padWidth;
+ for (int x = pad ; x < padWidth - pad ; ++x) {
+ float *outPixel = tmpImage + (x-pad)*tmpWidth + y + 2*pad; // transposed output
+ uint8_t *windowCenter = srcScanline + x;
+ for (int i = -pad ; i <= pad ; ++i) {
+ *outPixel += gaussWindow[pad+i]*windowCenter[i];
+ }
+ *outPixel /= windowSum;
+ }
+ }
+
+ // blur in Y; now filling in the actual desired destination. We have to do
+ // the transpose again; these transposes guarantee that we read memory in
+ // linear order.
+
+ for (int y = 0 ; y < tmpHeight ; ++y) {
+ float *srcScanline = tmpImage + y*tmpWidth;
+ for (int x = pad ; x < tmpWidth - pad ; ++x) {
+ float *windowCenter = srcScanline + x;
+ float finalValue = 0;
+ for (int i = -pad ; i <= pad ; ++i) {
+ finalValue += gaussWindow[pad+i]*windowCenter[i];
+ }
+ finalValue /= windowSum;
+ uint8_t *outPixel = dstPixels + (x-pad)*dstWidth + y; // transposed output
+ int integerPixel = int(finalValue + 0.5f);
+ *outPixel = SkClampMax( SkClampPos(integerPixel), 255 );
+ }
+ }
+
+ dst->fImage = dstPixels;
+ switch (style) {
+ case kNormal_SkBlurStyle:
+ break;
+ case kSolid_SkBlurStyle: {
+ clamp_solid_with_orig(
+ dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ srcWidth, srcHeight);
+ } break;
+ case kOuter_SkBlurStyle: {
+ clamp_outer_with_orig(
+ dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ srcWidth, srcHeight);
+ } break;
+ case kInner_SkBlurStyle: {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = src.computeImageSize();
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ merge_src_with_blur(dst->fImage, src.fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ dstPixels + pad*dst->fRowBytes + pad,
+ dst->fRowBytes, srcWidth, srcHeight);
+ SkMask::FreeImage(dstPixels);
+ } break;
+ }
+ autoFreeDstPixels.release();
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = src.fRowBytes;
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMask.h b/gfx/skia/skia/src/core/SkBlurMask.h
new file mode 100644
index 0000000000..55c999e4e1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMask.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMask_DEFINED
+#define SkBlurMask_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkShader.h"
+#include "src/core/SkMask.h"
+
+class SkBlurMask {
+public:
+ static bool SK_WARN_UNUSED_RESULT BlurRect(SkScalar sigma, SkMask *dst, const SkRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ static bool SK_WARN_UNUSED_RESULT BlurRRect(SkScalar sigma, SkMask *dst, const SkRRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+
+ // forceQuality will prevent BoxBlur from falling back to the low quality approach when sigma
+ // is very small -- this can be used predict the margin bump ahead of time without completely
+ // replicating the internal logic. This permits not only simpler caching of blurred results,
+ // but also being able to predict precisely at what pixels the blurred profile of e.g. a
+ // rectangle will lie.
+ //
+ // Calling details:
+ // * calculate margin - if src.fImage is null, then this call only calculates the border.
+ // * failure - if src.fImage is not null, failure is signal with dst->fImage being
+ // null.
+
+ static bool SK_WARN_UNUSED_RESULT BoxBlur(SkMask* dst, const SkMask& src,
+ SkScalar sigma, SkBlurStyle style,
+ SkIPoint* margin = nullptr);
+
+ // the "ground truth" blur does a gaussian convolution; it's slow
+ // but useful for comparison purposes.
+ static bool SK_WARN_UNUSED_RESULT BlurGroundTruth(SkScalar sigma, SkMask* dst,
+ const SkMask& src,
+ SkBlurStyle, SkIPoint* margin = nullptr);
+
+ // If radius > 0, return the corresponding sigma, else return 0
+ static SkScalar SK_API ConvertRadiusToSigma(SkScalar radius);
+ // If sigma > 0.5, return the corresponding radius, else return 0
+ static SkScalar SK_API ConvertSigmaToRadius(SkScalar sigma);
+
+ /* Helper functions for analytic rectangle blurs */
+
+ /** Look up the intensity of the (one dimnensional) blurred half-plane.
+ @param profile The precomputed 1D blur profile; initialized by ComputeBlurProfile below.
+ @param loc the location to look up; The lookup will clamp invalid inputs, but
+ meaningful data are available between 0 and blurred_width
+ @param blurred_width The width of the final, blurred rectangle
+ @param sharp_width The width of the original, unblurred rectangle.
+ */
+ static uint8_t ProfileLookup(const uint8_t* profile, int loc, int blurredWidth, int sharpWidth);
+
+ /** Populate the profile of a 1D blurred halfplane.
+ @param profile The 1D table to fill in
+ @param size Should be 6*sigma bytes
+ @param sigma The standard deviation of the gaussian blur kernel
+ */
+ static void ComputeBlurProfile(uint8_t* profile, int size, SkScalar sigma);
+
+ /** Compute an entire scanline of a blurred step function. This is a 1D helper that
+ will produce both the horizontal and vertical profiles of the blurry rectangle.
+ @param pixels Location to store the resulting pixel data; allocated and managed by caller
+ @param profile Precomputed blur profile computed by ComputeBlurProfile above.
+ @param width Size of the pixels array.
+ @param sigma Standard deviation of the gaussian blur kernel used to compute the profile;
+ this implicitly gives the size of the pixels array.
+ */
+
+ static void ComputeBlurredScanline(uint8_t* pixels, const uint8_t* profile,
+ unsigned int width, SkScalar sigma);
+
+
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlurPriv.h b/gfx/skia/skia/src/core/SkBlurPriv.h
new file mode 100644
index 0000000000..2509d499b1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurPriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurPriv_DEFINED
+#define SkBlurPriv_DEFINED
+
+#include "include/core/SkRRect.h"
+#include "include/core/SkSize.h"
+
+static const int kSkBlurRRectMaxDivisions = 6;
+
+// This method computes all the parameters for drawing a partially occluded nine-patched
+// blurred rrect mask:
+// rrectToDraw - the integerized rrect to draw in the mask
+// widthHeight - how large to make the mask (rrectToDraw will be centered in this coord sys)
+// rectXs, rectYs - the x & y coordinates of the covering geometry lattice
+// texXs, texYs - the texture coordinate at each point in rectXs & rectYs
+// numXs, numYs - number of coordinates in the x & y directions
+// skipMask - bit mask that contains a 1-bit whenever one of the cells is occluded
+// It returns true if 'devRRect' is nine-patchable
+bool SkComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
+ const SkRect& occluder,
+ SkScalar sigma, SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kSkBlurRRectMaxDivisions],
+ SkScalar rectYs[kSkBlurRRectMaxDivisions],
+ SkScalar texXs[kSkBlurRRectMaxDivisions],
+ SkScalar texYs[kSkBlurRRectMaxDivisions],
+ int* numXs, int* numYs, uint32_t* skipMask);
+
+extern void sk_register_blur_maskfilter_createproc();
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBuffer.cpp b/gfx/skia/skia/src/core/SkBuffer.cpp
new file mode 100644
index 0000000000..3bb38bf73b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBuffer.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+#include "src/core/SkBuffer.h"
+#include <string.h>
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+const void* SkRBuffer::skip(size_t size) {
+ if (fValid && size <= this->available()) {
+ const void* pos = fPos;
+ fPos += size;
+ return pos;
+ }
+ fValid = false;
+ return nullptr;
+}
+
+bool SkRBuffer::read(void* buffer, size_t size) {
+ if (const void* src = this->skip(size)) {
+ sk_careful_memcpy(buffer, src, size);
+ return true;
+ }
+ return false;
+}
+
+bool SkRBuffer::skipToAlign4() {
+ intptr_t pos = reinterpret_cast<intptr_t>(fPos);
+ size_t n = SkAlign4(pos) - pos;
+ if (fValid && n <= this->available()) {
+ fPos += n;
+ return true;
+ } else {
+ fValid = false;
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void* SkWBuffer::skip(size_t size) {
+ void* result = fPos;
+ writeNoSizeCheck(nullptr, size);
+ return fData == nullptr ? nullptr : result;
+}
+
+void SkWBuffer::writeNoSizeCheck(const void* buffer, size_t size) {
+ SkASSERT(fData == nullptr || fStop == nullptr || fPos + size <= fStop);
+ if (fData && buffer) {
+ sk_careful_memcpy(fPos, buffer, size);
+ }
+ fPos += size;
+}
+
+size_t SkWBuffer::padToAlign4() {
+ size_t pos = this->pos();
+ size_t n = SkAlign4(pos) - pos;
+
+ if (n && fData)
+ {
+ char* p = fPos;
+ char* stop = p + n;
+ do {
+ *p++ = 0;
+ } while (p < stop);
+ }
+ fPos += n;
+ return n;
+}
+
+#if 0
+#ifdef SK_DEBUG
+ static void AssertBuffer32(const void* buffer)
+ {
+ SkASSERT(buffer);
+ SkASSERT(((size_t)buffer & 3) == 0);
+ }
+#else
+ #define AssertBuffer32(buffer)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBuffer.h b/gfx/skia/skia/src/core/SkBuffer.h
new file mode 100644
index 0000000000..3f5e45f0ad
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBuffer.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBuffer_DEFINED
+#define SkBuffer_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkSafeMath.h"
+
+#include <limits>
+
+/** \class SkRBuffer
+
+ Light weight class for reading data from a memory block.
+ The RBuffer is given the buffer to read from, with either a specified size
+ or no size (in which case no range checking is performed). It is iillegal
+ to attempt to read a value from an empty RBuffer (data == null).
+*/
+class SkRBuffer : SkNoncopyable {
+public:
+ SkRBuffer() : fData(nullptr), fPos(nullptr), fStop(nullptr) {}
+
+ /** Initialize RBuffer with a data point and length.
+ */
+ SkRBuffer(const void* data, size_t size) {
+ SkASSERT(data != nullptr || size == 0);
+ fData = (const char*)data;
+ fPos = (const char*)data;
+ fStop = (const char*)data + size;
+ }
+
+ /** Return the number of bytes that have been read from the beginning
+ of the data pointer.
+ */
+ size_t pos() const { return fPos - fData; }
+ /** Return the total size of the data pointer. Only defined if the length was
+ specified in the constructor or in a call to reset().
+ */
+ size_t size() const { return fStop - fData; }
+ /** Return true if the buffer has read to the end of the data pointer.
+ Only defined if the length was specified in the constructor or in a call
+ to reset(). Always returns true if the length was not specified.
+ */
+ bool eof() const { return fPos >= fStop; }
+
+ size_t available() const { return fStop - fPos; }
+
+ bool isValid() const { return fValid; }
+
+ /** Read the specified number of bytes from the data pointer. If buffer is not
+ null, copy those bytes into buffer.
+ */
+ bool read(void* buffer, size_t size);
+ bool skipToAlign4();
+
+ bool readU8(uint8_t* x) { return this->read(x, 1); }
+ bool readS32(int32_t* x) { return this->read(x, 4); }
+ bool readU32(uint32_t* x) { return this->read(x, 4); }
+
+ // returns nullptr on failure
+ const void* skip(size_t bytes);
+ template <typename T> const T* skipCount(size_t count) {
+ return static_cast<const T*>(this->skip(SkSafeMath::Mul(count, sizeof(T))));
+ }
+
+private:
+ const char* fData;
+ const char* fPos;
+ const char* fStop;
+ bool fValid = true;
+};
+
+/** \class SkWBuffer
+
+ Light weight class for writing data to a memory block.
+ The WBuffer is given the buffer to write into, with either a specified size
+ or no size, in which case no range checking is performed. An empty WBuffer
+ is legal, in which case no data is ever written, but the relative pos()
+ is updated.
+*/
+class SkWBuffer : SkNoncopyable {
+public:
+ SkWBuffer() : fData(nullptr), fPos(nullptr), fStop(nullptr) {}
+ SkWBuffer(void* data) { reset(data); }
+ SkWBuffer(void* data, size_t size) { reset(data, size); }
+
+ void reset(void* data) {
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = nullptr; // no bounds checking
+ }
+
+ void reset(void* data, size_t size) {
+ SkASSERT(data != nullptr || size == 0);
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = (char*)data + size;
+ }
+
+ size_t pos() const { return fPos - fData; }
+ void* skip(size_t size); // return start of skipped data
+
+ void write(const void* buffer, size_t size) {
+ if (size) {
+ this->writeNoSizeCheck(buffer, size);
+ }
+ }
+
+ size_t padToAlign4();
+
+ void writePtr(const void* x) { this->writeNoSizeCheck(&x, sizeof(x)); }
+ void writeScalar(SkScalar x) { this->writeNoSizeCheck(&x, 4); }
+ void write32(int32_t x) { this->writeNoSizeCheck(&x, 4); }
+ void write16(int16_t x) { this->writeNoSizeCheck(&x, 2); }
+ void write8(int8_t x) { this->writeNoSizeCheck(&x, 1); }
+ void writeBool(bool x) { this->write8(x); }
+
+private:
+ void writeNoSizeCheck(const void* buffer, size_t size);
+
+ char* fData;
+ char* fPos;
+ char* fStop;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.cpp b/gfx/skia/skia/src/core/SkCachedData.cpp
new file mode 100644
index 0000000000..1a6f423190
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkDiscardableMemory.h"
+
+SkCachedData::SkCachedData(void* data, size_t size)
+ : fData(data)
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kMalloc_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fMalloc = data;
+}
+
+SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm)
+ : fData(dm->data())
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kDiscardableMemory_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fDM = dm;
+}
+
+SkCachedData::~SkCachedData() {
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ sk_free(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ delete fStorage.fDM;
+ break;
+ }
+}
+
+class SkCachedData::AutoMutexWritable {
+public:
+ AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast<SkCachedData*>(cd)) {
+ fCD->fMutex.acquire();
+ fCD->validate();
+ }
+ ~AutoMutexWritable() {
+ fCD->validate();
+ fCD->fMutex.release();
+ }
+
+ SkCachedData* get() { return fCD; }
+ SkCachedData* operator->() { return fCD; }
+
+private:
+ SkCachedData* fCD;
+};
+
+void SkCachedData::internalRef(bool fromCache) const {
+ AutoMutexWritable(this)->inMutexRef(fromCache);
+}
+
+void SkCachedData::internalUnref(bool fromCache) const {
+ if (AutoMutexWritable(this)->inMutexUnref(fromCache)) {
+ // can't delete inside doInternalUnref, since it is locking a mutex (which we own)
+ delete this;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkCachedData::inMutexRef(bool fromCache) {
+ if ((1 == fRefCnt) && fInCache) {
+ this->inMutexLock();
+ }
+
+ fRefCnt += 1;
+ if (fromCache) {
+ SkASSERT(!fInCache);
+ fInCache = true;
+ }
+}
+
+bool SkCachedData::inMutexUnref(bool fromCache) {
+ switch (--fRefCnt) {
+ case 0:
+ // we're going to be deleted, so we need to be unlocked (for DiscardableMemory)
+ if (fIsLocked) {
+ this->inMutexUnlock();
+ }
+ break;
+ case 1:
+ if (fInCache && !fromCache) {
+ // If we're down to 1 owner, and that owner is the cache, this it is safe
+ // to unlock (and mutate fData) even if the cache is in a different thread,
+ // as the cache is NOT allowed to inspect or use fData.
+ this->inMutexUnlock();
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (fromCache) {
+ SkASSERT(fInCache);
+ fInCache = false;
+ }
+
+ // return true when we need to be deleted
+ return 0 == fRefCnt;
+}
+
+void SkCachedData::inMutexLock() {
+ fMutex.assertHeld();
+
+ SkASSERT(!fIsLocked);
+ fIsLocked = true;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ this->setData(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fStorage.fDM->lock()) {
+ void* ptr = fStorage.fDM->data();
+ SkASSERT(ptr);
+ this->setData(ptr);
+ } else {
+ this->setData(nullptr); // signal failure to lock, contents are gone
+ }
+ break;
+ }
+}
+
+void SkCachedData::inMutexUnlock() {
+ fMutex.assertHeld();
+
+ SkASSERT(fIsLocked);
+ fIsLocked = false;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ // nothing to do/check
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fData) { // did the previous lock succeed?
+ fStorage.fDM->unlock();
+ }
+ break;
+ }
+ this->setData(nullptr); // signal that we're in an unlocked state
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkCachedData::validate() const {
+ if (fIsLocked) {
+ SkASSERT((fInCache && fRefCnt > 1) || !fInCache);
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ SkASSERT(fData == fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ // fData can be null or the actual value, depending if DM's lock succeeded
+ break;
+ }
+ } else {
+ SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt));
+ SkASSERT(nullptr == fData);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.h b/gfx/skia/skia/src/core/SkCachedData.h
new file mode 100644
index 0000000000..d7b49ddd38
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCachedData_DEFINED
+#define SkCachedData_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkDiscardableMemory;
+
+class SkCachedData : ::SkNoncopyable {
+public:
+ SkCachedData(void* mallocData, size_t size);
+ SkCachedData(size_t size, SkDiscardableMemory*);
+ virtual ~SkCachedData();
+
+ size_t size() const { return fSize; }
+ const void* data() const { return fData; }
+
+ void* writable_data() { return fData; }
+
+ void ref() const { this->internalRef(false); }
+ void unref() const { this->internalUnref(false); }
+
+ int testing_only_getRefCnt() const { return fRefCnt; }
+ bool testing_only_isLocked() const { return fIsLocked; }
+ bool testing_only_isInCache() const { return fInCache; }
+
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const {
+ return kDiscardableMemory_StorageType == fStorageType ? fStorage.fDM : nullptr;
+ }
+
+protected:
+ // called when fData changes. could be nullptr.
+ virtual void onDataChange(void* oldData, void* newData) {}
+
+private:
+ SkMutex fMutex; // could use a pool of these...
+
+ enum StorageType {
+ kDiscardableMemory_StorageType,
+ kMalloc_StorageType
+ };
+
+ union {
+ SkDiscardableMemory* fDM;
+ void* fMalloc;
+ } fStorage;
+ void* fData;
+ size_t fSize;
+ int fRefCnt; // low-bit means we're owned by the cache
+ StorageType fStorageType;
+ bool fInCache;
+ bool fIsLocked;
+
+ void internalRef(bool fromCache) const;
+ void internalUnref(bool fromCache) const;
+
+ void inMutexRef(bool fromCache);
+ bool inMutexUnref(bool fromCache); // returns true if we should delete "this"
+ void inMutexLock();
+ void inMutexUnlock();
+
+ // called whenever our fData might change (lock or unlock)
+ void setData(void* newData) {
+ if (newData != fData) {
+ // notify our subclasses of the change
+ this->onDataChange(fData, newData);
+ fData = newData;
+ }
+ }
+
+ class AutoMutexWritable;
+
+public:
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ /*
+ * Attaching a data to to a SkResourceCache (only one at a time) enables the data to be
+ * unlocked when the cache is the only owner, thus freeing it to be purged (assuming the
+ * data is backed by a SkDiscardableMemory).
+ *
+ * When attached, it also automatically attempts to "lock" the data when the first client
+ * ref's the data (typically from a find(key, visitor) call).
+ *
+ * Thus the data will always be "locked" when a non-cache has a ref on it (whether or not
+ * the lock succeeded to recover the memory -- check data() to see if it is nullptr).
+ */
+
+ /*
+ * Call when adding this instance to a SkResourceCache::Rec subclass
+ * (typically in the Rec's constructor).
+ */
+ void attachToCacheAndRef() const { this->internalRef(true); }
+
+ /*
+ * Call when removing this instance from a SkResourceCache::Rec subclass
+ * (typically in the Rec's destructor).
+ */
+ void detachFromCacheAndUnref() const { this->internalUnref(true); }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCanvas.cpp b/gfx/skia/skia/src/core/SkCanvas.cpp
new file mode 100644
index 0000000000..94b1835421
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvas.cpp
@@ -0,0 +1,3155 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTo.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMSAN.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTextFormatParams.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkSurface_Base.h"
+#include "src/utils/SkPatchUtils.h"
+
+#include <new>
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/SkGr.h"
+#endif
+
+#define RETURN_ON_NULL(ptr) do { if (nullptr == (ptr)) return; } while (0)
+#define RETURN_ON_FALSE(pred) do { if (!(pred)) return; } while (0)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Return true if the drawing this rect would hit every pixels in the canvas.
+ *
+ * Returns false if
+ * - rect does not contain the canvas' bounds
+ * - paint is not fill
+ * - paint would blur or otherwise change the coverage of the rect
+ */
+bool SkCanvas::wouldOverwriteEntireSurface(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) const {
+ static_assert((int)SkPaintPriv::kNone_ShaderOverrideOpacity ==
+ (int)kNone_ShaderOverrideOpacity,
+ "need_matching_enums0");
+ static_assert((int)SkPaintPriv::kOpaque_ShaderOverrideOpacity ==
+ (int)kOpaque_ShaderOverrideOpacity,
+ "need_matching_enums1");
+ static_assert((int)SkPaintPriv::kNotOpaque_ShaderOverrideOpacity ==
+ (int)kNotOpaque_ShaderOverrideOpacity,
+ "need_matching_enums2");
+
+ const SkISize size = this->getBaseLayerSize();
+ const SkRect bounds = SkRect::MakeIWH(size.width(), size.height());
+
+ // if we're clipped at all, we can't overwrite the entire surface
+ {
+ SkBaseDevice* base = this->getDevice();
+ SkBaseDevice* top = this->getTopDevice();
+ if (base != top) {
+ return false; // we're in a saveLayer, so conservatively don't assume we'll overwrite
+ }
+ if (!base->clipIsWideOpen()) {
+ return false;
+ }
+ }
+
+ if (rect) {
+ if (!this->getTotalMatrix().isScaleTranslate()) {
+ return false; // conservative
+ }
+
+ SkRect devRect;
+ this->getTotalMatrix().mapRectScaleTranslate(&devRect, *rect);
+ if (!devRect.contains(bounds)) {
+ return false;
+ }
+ }
+
+ if (paint) {
+ SkPaint::Style paintStyle = paint->getStyle();
+ if (!(paintStyle == SkPaint::kFill_Style ||
+ paintStyle == SkPaint::kStrokeAndFill_Style)) {
+ return false;
+ }
+ if (paint->getMaskFilter() || paint->getPathEffect() || paint->getImageFilter()) {
+ return false; // conservative
+ }
+ }
+ return SkPaintPriv::Overwrites(paint, (SkPaintPriv::ShaderOverrideOpacity)overrideOpacity);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// experimental for faster tiled drawing...
+//#define SK_TRACE_SAVERESTORE
+
+#ifdef SK_TRACE_SAVERESTORE
+ static int gLayerCounter;
+ static void inc_layer() { ++gLayerCounter; printf("----- inc layer %d\n", gLayerCounter); }
+ static void dec_layer() { --gLayerCounter; printf("----- dec layer %d\n", gLayerCounter); }
+
+ static int gRecCounter;
+ static void inc_rec() { ++gRecCounter; printf("----- inc rec %d\n", gRecCounter); }
+ static void dec_rec() { --gRecCounter; printf("----- dec rec %d\n", gRecCounter); }
+
+ static int gCanvasCounter;
+ static void inc_canvas() { ++gCanvasCounter; printf("----- inc canvas %d\n", gCanvasCounter); }
+ static void dec_canvas() { --gCanvasCounter; printf("----- dec canvas %d\n", gCanvasCounter); }
+#else
+ #define inc_layer()
+ #define dec_layer()
+ #define inc_rec()
+ #define dec_rec()
+ #define inc_canvas()
+ #define dec_canvas()
+#endif
+
+typedef SkTLazy<SkPaint> SkLazyPaint;
+
+void SkCanvas::predrawNotify(bool willOverwritesEntireSurface) {
+ if (fSurfaceBase) {
+ fSurfaceBase->aboutToDraw(willOverwritesEntireSurface
+ ? SkSurface::kDiscard_ContentChangeMode
+ : SkSurface::kRetain_ContentChangeMode);
+ }
+}
+
+void SkCanvas::predrawNotify(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) {
+ if (fSurfaceBase) {
+ SkSurface::ContentChangeMode mode = SkSurface::kRetain_ContentChangeMode;
+ // Since willOverwriteAllPixels() may not be complete free to call, we only do so if
+ // there is an outstanding snapshot, since w/o that, there will be no copy-on-write
+ // and therefore we don't care which mode we're in.
+ //
+ if (fSurfaceBase->outstandingImageSnapshot()) {
+ if (this->wouldOverwriteEntireSurface(rect, paint, overrideOpacity)) {
+ mode = SkSurface::kDiscard_ContentChangeMode;
+ }
+ }
+ fSurfaceBase->aboutToDraw(mode);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* This is the record we keep for each SkBaseDevice that the user installs.
+ The clip/matrix/proc are fields that reflect the top of the save/restore
+ stack. Whenever the canvas changes, it marks a dirty flag, and then before
+ these are used (assuming we're not on a layer) we rebuild these cache
+ values: they reflect the top of the save stack, but translated and clipped
+ by the device's XY offset and bitmap-bounds.
+*/
+struct DeviceCM {
+ DeviceCM* fNext;
+ sk_sp<SkBaseDevice> fDevice;
+ SkRasterClip fClip;
+ std::unique_ptr<const SkPaint> fPaint; // may be null (in the future)
+ SkMatrix fStashedMatrix; // original CTM; used by imagefilter in saveLayer
+ sk_sp<SkImage> fClipImage;
+ SkMatrix fClipMatrix;
+
+ DeviceCM(sk_sp<SkBaseDevice> device, const SkPaint* paint, const SkMatrix& stashed,
+ const SkImage* clipImage, const SkMatrix* clipMatrix)
+ : fNext(nullptr)
+ , fDevice(std::move(device))
+ , fPaint(paint ? skstd::make_unique<SkPaint>(*paint) : nullptr)
+ , fStashedMatrix(stashed)
+ , fClipImage(sk_ref_sp(const_cast<SkImage*>(clipImage)))
+ , fClipMatrix(clipMatrix ? *clipMatrix : SkMatrix::I())
+ {}
+
+ void reset(const SkIRect& bounds) {
+ SkASSERT(!fPaint);
+ SkASSERT(!fNext);
+ SkASSERT(fDevice);
+ fClip.setRect(bounds);
+ }
+};
+
+namespace {
+// Encapsulate state needed to restore from saveBehind()
+struct BackImage {
+ sk_sp<SkSpecialImage> fImage;
+ SkIPoint fLoc;
+};
+}
+
+/* This is the record we keep for each save/restore level in the stack.
+ Since a level optionally copies the matrix and/or stack, we have pointers
+ for these fields. If the value is copied for this level, the copy is
+ stored in the ...Storage field, and the pointer points to that. If the
+ value is not copied for this level, we ignore ...Storage, and just point
+ at the corresponding value in the previous level in the stack.
+*/
+class SkCanvas::MCRec {
+public:
+ DeviceCM* fLayer;
+ /* If there are any layers in the stack, this points to the top-most
+ one that is at or below this level in the stack (so we know what
+ bitmap/device to draw into from this level. This value is NOT
+ reference counted, since the real owner is either our fLayer field,
+ or a previous one in a lower level.)
+ */
+ DeviceCM* fTopLayer;
+ std::unique_ptr<BackImage> fBackImage;
+ SkConservativeClip fRasterClip;
+ SkMatrix fMatrix;
+ int fDeferredSaveCount;
+
+ MCRec() {
+ fLayer = nullptr;
+ fTopLayer = nullptr;
+ fMatrix.reset();
+ fDeferredSaveCount = 0;
+
+ // don't bother initializing fNext
+ inc_rec();
+ }
+ MCRec(const MCRec& prev) : fRasterClip(prev.fRasterClip), fMatrix(prev.fMatrix) {
+ fLayer = nullptr;
+ fTopLayer = prev.fTopLayer;
+ fDeferredSaveCount = 0;
+
+ // don't bother initializing fNext
+ inc_rec();
+ }
+ ~MCRec() {
+ delete fLayer;
+ dec_rec();
+ }
+
+ void reset(const SkIRect& bounds) {
+ SkASSERT(fLayer);
+ SkASSERT(fDeferredSaveCount == 0);
+
+ fMatrix.reset();
+ fRasterClip.setRect(bounds);
+ fLayer->reset(bounds);
+ }
+};
+
+class SkDrawIter {
+public:
+ SkDrawIter(SkCanvas* canvas)
+ : fDevice(nullptr), fCurrLayer(canvas->fMCRec->fTopLayer), fPaint(nullptr)
+ {}
+
+ bool next() {
+ const DeviceCM* rec = fCurrLayer;
+ if (rec && rec->fDevice) {
+ fDevice = rec->fDevice.get();
+ fPaint = rec->fPaint.get();
+ fCurrLayer = rec->fNext;
+ // fCurrLayer may be nullptr now
+ return true;
+ }
+ return false;
+ }
+
+ int getX() const { return fDevice->getOrigin().x(); }
+ int getY() const { return fDevice->getOrigin().y(); }
+ const SkPaint* getPaint() const { return fPaint; }
+
+ SkBaseDevice* fDevice;
+
+private:
+ const DeviceCM* fCurrLayer;
+ const SkPaint* fPaint; // May be null.
+};
+
+#define FOR_EACH_TOP_DEVICE( code ) \
+ do { \
+ DeviceCM* layer = fMCRec->fTopLayer; \
+ while (layer) { \
+ SkBaseDevice* device = layer->fDevice.get(); \
+ if (device) { \
+ code; \
+ } \
+ layer = layer->fNext; \
+ } \
+ } while (0)
+
+/////////////////////////////////////////////////////////////////////////////
+
+/**
+ * If the paint has an imagefilter, but it can be simplified to just a colorfilter, return that
+ * colorfilter, else return nullptr.
+ */
+static sk_sp<SkColorFilter> image_to_color_filter(const SkPaint& paint) {
+ SkImageFilter* imgf = paint.getImageFilter();
+ if (!imgf) {
+ return nullptr;
+ }
+
+ SkColorFilter* imgCFPtr;
+ if (!imgf->asAColorFilter(&imgCFPtr)) {
+ return nullptr;
+ }
+ sk_sp<SkColorFilter> imgCF(imgCFPtr);
+
+ SkColorFilter* paintCF = paint.getColorFilter();
+ if (nullptr == paintCF) {
+ // there is no existing paint colorfilter, so we can just return the imagefilter's
+ return imgCF;
+ }
+
+ // The paint has both a colorfilter(paintCF) and an imagefilter-which-is-a-colorfilter(imgCF)
+ // and we need to combine them into a single colorfilter.
+ return imgCF->makeComposed(sk_ref_sp(paintCF));
+}
+
+/**
+ * There are many bounds in skia. A circle's bounds is just its center extended by its radius.
+ * However, if we stroke a circle, then the "bounds" of that is larger, since it will now draw
+ * outside of its raw-bounds by 1/2 the stroke width. SkPaint has lots of optional
+ * effects/attributes that can modify the effective bounds of a given primitive -- maskfilters,
+ * patheffects, stroking, etc. This function takes a raw bounds and a paint, and returns the
+ * conservative "effective" bounds based on the settings in the paint... with one exception. This
+ * function does *not* look at the imagefilter, which can also modify the effective bounds. It is
+ * deliberately ignored.
+ */
+static const SkRect& apply_paint_to_bounds_sans_imagefilter(const SkPaint& paint,
+ const SkRect& rawBounds,
+ SkRect* storage) {
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+ if (tmpUnfiltered.canComputeFastBounds()) {
+ return tmpUnfiltered.computeFastBounds(rawBounds, storage);
+ } else {
+ return rawBounds;
+ }
+}
+
+class AutoLayerForImageFilter {
+public:
+ // "rawBounds" is the original bounds of the primitive about to be drawn, unmodified by the
+ // paint. It's used to determine the size of the offscreen layer for filters.
+ // If null, the clip will be used instead.
+ AutoLayerForImageFilter(SkCanvas* canvas, const SkPaint& origPaint,
+ bool skipLayerForImageFilter = false,
+ const SkRect* rawBounds = nullptr) {
+ fCanvas = canvas;
+ fPaint = &origPaint;
+ fSaveCount = canvas->getSaveCount();
+ fTempLayerForImageFilter = false;
+
+ if (auto simplifiedCF = image_to_color_filter(origPaint)) {
+ SkASSERT(!fLazyPaint.isValid());
+ SkPaint* paint = fLazyPaint.set(origPaint);
+ paint->setColorFilter(std::move(simplifiedCF));
+ paint->setImageFilter(nullptr);
+ fPaint = paint;
+ }
+
+ if (!skipLayerForImageFilter && fPaint->getImageFilter()) {
+ /**
+ * We implement ImageFilters for a given draw by creating a layer, then applying the
+ * imagefilter to the pixels of that layer (its backing surface/image), and then
+ * we call restore() to xfer that layer to the main canvas.
+ *
+ * 1. SaveLayer (with a paint containing the current imagefilter and xfermode)
+ * 2. Generate the src pixels:
+ * Remove the imagefilter and the xfermode from the paint that we (AutoDrawLooper)
+ * return (fPaint). We then draw the primitive (using srcover) into a cleared
+ * buffer/surface.
+ * 3. Restore the layer created in #1
+ * The imagefilter is passed the buffer/surface from the layer (now filled with the
+ * src pixels of the primitive). It returns a new "filtered" buffer, which we
+ * draw onto the previous layer using the xfermode from the original paint.
+ */
+
+ SkPaint restorePaint;
+ restorePaint.setImageFilter(fPaint->refImageFilter());
+ restorePaint.setBlendMode(fPaint->getBlendMode());
+
+ SkRect storage;
+ if (rawBounds) {
+ // Make rawBounds include all paint outsets except for those due to image filters.
+ rawBounds = &apply_paint_to_bounds_sans_imagefilter(*fPaint, *rawBounds, &storage);
+ }
+ (void)canvas->internalSaveLayer(SkCanvas::SaveLayerRec(rawBounds, &restorePaint),
+ SkCanvas::kFullLayer_SaveLayerStrategy);
+ fTempLayerForImageFilter = true;
+
+ // Remove the restorePaint fields from our "working" paint
+ SkASSERT(!fLazyPaint.isValid());
+ SkPaint* paint = fLazyPaint.set(origPaint);
+ paint->setImageFilter(nullptr);
+ paint->setBlendMode(SkBlendMode::kSrcOver);
+ fPaint = paint;
+ }
+ }
+
+ ~AutoLayerForImageFilter() {
+ if (fTempLayerForImageFilter) {
+ fCanvas->internalRestore();
+ }
+ SkASSERT(fCanvas->getSaveCount() == fSaveCount);
+ }
+
+ const SkPaint& paint() const {
+ SkASSERT(fPaint);
+ return *fPaint;
+ }
+
+private:
+ SkLazyPaint fLazyPaint; // base paint storage in case we need to modify it
+ SkCanvas* fCanvas;
+ const SkPaint* fPaint; // points to either the original paint, or lazy (if we needed it)
+ int fSaveCount;
+ bool fTempLayerForImageFilter;
+};
+
+////////// macros to place around the internal draw calls //////////////////
+
+#define DRAW_BEGIN_DRAWBITMAP(paint, skipLayerForFilter, bounds) \
+ this->predrawNotify(); \
+ AutoLayerForImageFilter draw(this, paint, skipLayerForFilter, bounds); \
+ { SkDrawIter iter(this);
+
+
+#define DRAW_BEGIN_DRAWDEVICE(paint) \
+ this->predrawNotify(); \
+ AutoLayerForImageFilter draw(this, paint, true); \
+ { SkDrawIter iter(this);
+
+#define DRAW_BEGIN(paint, bounds) \
+ this->predrawNotify(); \
+ AutoLayerForImageFilter draw(this, paint, false, bounds); \
+ { SkDrawIter iter(this);
+
+#define DRAW_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, bounds, auxOpaque) \
+ this->predrawNotify(bounds, &paint, auxOpaque); \
+ AutoLayerForImageFilter draw(this, paint, false, bounds); \
+ { SkDrawIter iter(this);
+
+#define DRAW_END }
+
+////////////////////////////////////////////////////////////////////////////
+
+static inline SkRect qr_clip_bounds(const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
+ return SkRect::MakeEmpty();
+ }
+
+ // Expand bounds out by 1 in case we are anti-aliasing. We store the
+ // bounds as floats to enable a faster quick reject implementation.
+ SkRect dst;
+ SkNx_cast<float>(Sk4i::Load(&bounds.fLeft) + Sk4i(-1,-1,1,1)).store(&dst.fLeft);
+ return dst;
+}
+
+void SkCanvas::resetForNextPicture(const SkIRect& bounds) {
+ this->restoreToCount(1);
+ fMCRec->reset(bounds);
+
+ // We're peering through a lot of structs here. Only at this scope do we
+ // know that the device is a SkNoPixelsDevice.
+ static_cast<SkNoPixelsDevice*>(fMCRec->fLayer->fDevice.get())->resetForNextPicture(bounds);
+ fDeviceClipBounds = qr_clip_bounds(bounds);
+ fIsScaleTranslate = true;
+}
+
+void SkCanvas::init(sk_sp<SkBaseDevice> device) {
+ fAllowSimplifyClip = false;
+ fSaveCount = 1;
+
+ fMCRec = (MCRec*)fMCStack.push_back();
+ new (fMCRec) MCRec;
+ fMCRec->fRasterClip.setDeviceClipRestriction(&fClipRestrictionRect);
+ fIsScaleTranslate = true;
+
+ SkASSERT(sizeof(DeviceCM) <= sizeof(fDeviceCMStorage));
+ fMCRec->fLayer = (DeviceCM*)fDeviceCMStorage;
+ new (fDeviceCMStorage) DeviceCM(device, nullptr, fMCRec->fMatrix, nullptr, nullptr);
+
+ fMCRec->fTopLayer = fMCRec->fLayer;
+
+ fSurfaceBase = nullptr;
+
+ if (device) {
+ // The root device and the canvas should always have the same pixel geometry
+ SkASSERT(fProps.pixelGeometry() == device->surfaceProps().pixelGeometry());
+ fMCRec->fRasterClip.setRect(device->getGlobalBounds());
+ fDeviceClipBounds = qr_clip_bounds(device->getGlobalBounds());
+
+ device->androidFramework_setDeviceClipRestriction(&fClipRestrictionRect);
+ }
+
+ fScratchGlyphRunBuilder = skstd::make_unique<SkGlyphRunBuilder>();
+}
+
+SkCanvas::SkCanvas()
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+{
+ inc_canvas();
+
+ this->init(nullptr);
+}
+
+SkCanvas::SkCanvas(int width, int height, const SkSurfaceProps* props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfacePropsCopyOrDefault(props))
+{
+ inc_canvas();
+ this->init(sk_make_sp<SkNoPixelsDevice>(
+ SkIRect::MakeWH(SkTMax(width, 0), SkTMax(height, 0)), fProps));
+}
+
+SkCanvas::SkCanvas(const SkIRect& bounds)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+{
+ inc_canvas();
+
+ SkIRect r = bounds.isEmpty() ? SkIRect::MakeEmpty() : bounds;
+ this->init(sk_make_sp<SkNoPixelsDevice>(r, fProps));
+}
+
+SkCanvas::SkCanvas(sk_sp<SkBaseDevice> device)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(device->surfaceProps())
+{
+ inc_canvas();
+
+ this->init(device);
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(props)
+{
+ inc_canvas();
+
+ sk_sp<SkBaseDevice> device(new SkBitmapDevice(bitmap, fProps, nullptr, nullptr));
+ this->init(device);
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap, std::unique_ptr<SkRasterHandleAllocator> alloc,
+ SkRasterHandleAllocator::Handle hndl)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fAllocator(std::move(alloc))
+{
+ inc_canvas();
+
+ sk_sp<SkBaseDevice> device(new SkBitmapDevice(bitmap, fProps, hndl, nullptr));
+ this->init(device);
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap) : SkCanvas(bitmap, nullptr, nullptr) {}
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+SkCanvas::SkCanvas(const SkBitmap& bitmap, ColorBehavior)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fAllocator(nullptr)
+{
+ inc_canvas();
+
+ SkBitmap tmp(bitmap);
+ *const_cast<SkImageInfo*>(&tmp.info()) = tmp.info().makeColorSpace(nullptr);
+ sk_sp<SkBaseDevice> device(new SkBitmapDevice(tmp, fProps, nullptr, nullptr));
+ this->init(device);
+}
+#endif
+
+SkCanvas::~SkCanvas() {
+ // free up the contents of our deque
+ this->restoreToCount(1); // restore everything but the last
+
+ this->internalRestore(); // restore the last, since we're going away
+
+ dec_canvas();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::flush() {
+ this->onFlush();
+}
+
+void SkCanvas::onFlush() {
+ SkBaseDevice* device = this->getDevice();
+ if (device) {
+ device->flush();
+ }
+}
+
+SkISize SkCanvas::getBaseLayerSize() const {
+ SkBaseDevice* d = this->getDevice();
+ return d ? SkISize::Make(d->width(), d->height()) : SkISize::Make(0, 0);
+}
+
+SkIRect SkCanvas::getTopLayerBounds() const {
+ SkBaseDevice* d = this->getTopDevice();
+ if (!d) {
+ return SkIRect::MakeEmpty();
+ }
+ return SkIRect::MakeXYWH(d->getOrigin().x(), d->getOrigin().y(), d->width(), d->height());
+}
+
+SkBaseDevice* SkCanvas::getDevice() const {
+ // return root device
+ MCRec* rec = (MCRec*) fMCStack.front();
+ SkASSERT(rec && rec->fLayer);
+ return rec->fLayer->fDevice.get();
+}
+
+SkBaseDevice* SkCanvas::getTopDevice() const {
+ return fMCRec->fTopLayer->fDevice.get();
+}
+
+bool SkCanvas::readPixels(const SkPixmap& pm, int x, int y) {
+ SkBaseDevice* device = this->getDevice();
+ return device && pm.addr() && device->readPixels(pm, x, y);
+}
+
+bool SkCanvas::readPixels(const SkImageInfo& dstInfo, void* dstP, size_t rowBytes, int x, int y) {
+ return this->readPixels({ dstInfo, dstP, rowBytes}, x, y);
+}
+
+bool SkCanvas::readPixels(const SkBitmap& bm, int x, int y) {
+ SkPixmap pm;
+ return bm.peekPixels(&pm) && this->readPixels(pm, x, y);
+}
+
+bool SkCanvas::writePixels(const SkBitmap& bitmap, int x, int y) {
+ SkPixmap pm;
+ if (bitmap.peekPixels(&pm)) {
+ return this->writePixels(pm.info(), pm.addr(), pm.rowBytes(), x, y);
+ }
+ return false;
+}
+
+bool SkCanvas::writePixels(const SkImageInfo& srcInfo, const void* pixels, size_t rowBytes,
+ int x, int y) {
+ SkBaseDevice* device = this->getDevice();
+ if (!device) {
+ return false;
+ }
+
+ // This check gives us an early out and prevents generation ID churn on the surface.
+ // This is purely optional: it is a subset of the checks performed by SkWritePixelsRec.
+ SkIRect srcRect = SkIRect::MakeXYWH(x, y, srcInfo.width(), srcInfo.height());
+ if (!srcRect.intersect({0, 0, device->width(), device->height()})) {
+ return false;
+ }
+
+ // Tell our owning surface to bump its generation ID.
+ const bool completeOverwrite =
+ srcRect.size() == SkISize::Make(device->width(), device->height());
+ this->predrawNotify(completeOverwrite);
+
+ // This can still fail, most notably in the case of a invalid color type or alpha type
+ // conversion. We could pull those checks into this function and avoid the unnecessary
+ // generation ID bump. But then we would be performing those checks twice, since they
+ // are also necessary at the bitmap/pixmap entry points.
+ return device->writePixels({srcInfo, pixels, rowBytes}, x, y);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::checkForDeferredSave() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ this->doSave();
+ }
+}
+
+int SkCanvas::getSaveCount() const {
+#ifdef SK_DEBUG
+ int count = 0;
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kFront_IterStart);
+ for (;;) {
+ const MCRec* rec = (const MCRec*)iter.next();
+ if (!rec) {
+ break;
+ }
+ count += 1 + rec->fDeferredSaveCount;
+ }
+ SkASSERT(count == fSaveCount);
+#endif
+ return fSaveCount;
+}
+
+int SkCanvas::save() {
+ fSaveCount += 1;
+ fMCRec->fDeferredSaveCount += 1;
+ return this->getSaveCount() - 1; // return our prev value
+}
+
+void SkCanvas::doSave() {
+ this->willSave();
+
+ SkASSERT(fMCRec->fDeferredSaveCount > 0);
+ fMCRec->fDeferredSaveCount -= 1;
+ this->internalSave();
+}
+
+void SkCanvas::restore() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ fMCRec->fDeferredSaveCount -= 1;
+ } else {
+ // check for underflow
+ if (fMCStack.count() > 1) {
+ this->willRestore();
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ this->internalRestore();
+ this->didRestore();
+ }
+ }
+}
+
+void SkCanvas::restoreToCount(int count) {
+ // sanity check
+ if (count < 1) {
+ count = 1;
+ }
+
+ int n = this->getSaveCount() - count;
+ for (int i = 0; i < n; ++i) {
+ this->restore();
+ }
+}
+
+void SkCanvas::internalSave() {
+ MCRec* newTop = (MCRec*)fMCStack.push_back();
+ new (newTop) MCRec(*fMCRec); // balanced in restore()
+ fMCRec = newTop;
+
+ FOR_EACH_TOP_DEVICE(device->save());
+}
+
+bool SkCanvas::BoundsAffectsClip(SaveLayerFlags saveLayerFlags) {
+ return !(saveLayerFlags & SkCanvasPriv::kDontClipToLayer_SaveLayerFlag);
+}
+
+bool SkCanvas::clipRectBounds(const SkRect* bounds, SaveLayerFlags saveLayerFlags,
+ SkIRect* intersection, const SkImageFilter* imageFilter) {
+ // clipRectBounds() is called to determine the input layer size needed for a given image filter.
+ // The coordinate space of the rectangle passed to filterBounds(kReverse) is meant to be in the
+ // filtering layer space. Here, 'clipBounds' is always in the true device space. When an image
+ // filter does not require a decomposed CTM matrix, the filter space and device space are the
+ // same. When it has been decomposed, we want the original image filter node to process the
+ // bounds in the layer space represented by the decomposed scale matrix. 'imageFilter' is no
+ // longer the original filter, but has the remainder matrix baked into it, and passing in the
+ // the true device clip bounds ensures that the matrix image filter provides a layer clip bounds
+ // to the original filter node (barring inflation from consecutive calls to mapRect). While
+ // initially counter-intuitive given the apparent inconsistency of coordinate spaces, always
+ // passing getDeviceClipBounds() to 'imageFilter' is correct.
+ // FIXME (michaelludwig) - When the remainder matrix is instead applied as a final draw, it will
+ // be important to more accurately calculate the clip bounds in the layer space for the original
+ // image filter (similar to how matrix image filter does it, but ideally without the inflation).
+ SkIRect clipBounds = this->getDeviceClipBounds();
+ if (clipBounds.isEmpty()) {
+ return false;
+ }
+
+ const SkMatrix& ctm = fMCRec->fMatrix; // this->getTotalMatrix()
+
+ if (imageFilter && bounds && !imageFilter->canComputeFastBounds()) {
+ // If the image filter DAG affects transparent black then we will need to render
+ // out to the clip bounds
+ bounds = nullptr;
+ }
+
+ SkIRect inputSaveLayerBounds;
+ if (bounds) {
+ SkRect r;
+ ctm.mapRect(&r, *bounds);
+ r.roundOut(&inputSaveLayerBounds);
+ } else { // no user bounds, so just use the clip
+ inputSaveLayerBounds = clipBounds;
+ }
+
+ if (imageFilter) {
+ // expand the clip bounds by the image filter DAG to include extra content that might
+ // be required by the image filters.
+ clipBounds = imageFilter->filterBounds(clipBounds, ctm,
+ SkImageFilter::kReverse_MapDirection,
+ &inputSaveLayerBounds);
+ }
+
+ SkIRect clippedSaveLayerBounds;
+ if (bounds) {
+ // For better or for worse, user bounds currently act as a hard clip on the layer's
+ // extent (i.e., they implement the CSS filter-effects 'filter region' feature).
+ clippedSaveLayerBounds = inputSaveLayerBounds;
+ } else {
+ // If there are no user bounds, we don't want to artificially restrict the resulting
+ // layer bounds, so allow the expanded clip bounds free reign.
+ clippedSaveLayerBounds = clipBounds;
+ }
+
+ // early exit if the layer's bounds are clipped out
+ if (!clippedSaveLayerBounds.intersect(clipBounds)) {
+ if (BoundsAffectsClip(saveLayerFlags)) {
+ fMCRec->fTopLayer->fDevice->clipRegion(SkRegion(), SkClipOp::kIntersect); // empty
+ fMCRec->fRasterClip.setEmpty();
+ fDeviceClipBounds.setEmpty();
+ }
+ return false;
+ }
+ SkASSERT(!clippedSaveLayerBounds.isEmpty());
+
+ if (BoundsAffectsClip(saveLayerFlags)) {
+ // Simplify the current clips since they will be applied properly during restore()
+ fMCRec->fRasterClip.setRect(clippedSaveLayerBounds);
+ fDeviceClipBounds = qr_clip_bounds(clippedSaveLayerBounds);
+ }
+
+ if (intersection) {
+ *intersection = clippedSaveLayerBounds;
+ }
+
+ return true;
+}
+
+int SkCanvas::saveLayer(const SkRect* bounds, const SkPaint* paint) {
+ return this->saveLayer(SaveLayerRec(bounds, paint, 0));
+}
+
+int SkCanvas::saveLayer(const SaveLayerRec& rec) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (rec.fPaint && rec.fPaint->nothingToDraw()) {
+ // no need for the layer (or any of the draws until the matching restore()
+ this->save();
+ this->clipRect({0,0,0,0});
+ } else {
+ SaveLayerStrategy strategy = this->getSaveLayerStrategy(rec);
+ fSaveCount += 1;
+ this->internalSaveLayer(rec, strategy);
+ }
+ return this->getSaveCount() - 1;
+}
+
+int SkCanvas::only_axis_aligned_saveBehind(const SkRect* bounds) {
+ if (bounds && !this->getLocalClipBounds().intersects(*bounds)) {
+ // Assuming clips never expand, if the request bounds is outside of the current clip
+ // there is no need to copy/restore the area, so just devolve back to a regular save.
+ this->save();
+ } else {
+ bool doTheWork = this->onDoSaveBehind(bounds);
+ fSaveCount += 1;
+ this->internalSave();
+ if (doTheWork) {
+ this->internalSaveBehind(bounds);
+ }
+ }
+ return this->getSaveCount() - 1;
+}
+
+void SkCanvas::DrawDeviceWithFilter(SkBaseDevice* src, const SkImageFilter* filter,
+ SkBaseDevice* dst, const SkIPoint& dstOrigin,
+ const SkMatrix& ctm) {
+ // The local bounds of the src device; all the bounds passed to snapSpecial must be intersected
+ // with this rect.
+ const SkIRect srcDevRect = SkIRect::MakeWH(src->width(), src->height());
+
+ if (!filter) {
+ // All non-filtered devices are currently axis aligned, so they only differ by their origin.
+ // This means that we only have to copy a dst-sized block of pixels out of src and translate
+ // it to the matching position relative to dst's origin.
+ SkIRect snapBounds = SkIRect::MakeXYWH(dstOrigin.x() - src->getOrigin().x(),
+ dstOrigin.y() - src->getOrigin().y(),
+ dst->width(), dst->height());
+ if (!snapBounds.intersect(srcDevRect)) {
+ return;
+ }
+
+ auto special = src->snapSpecial(snapBounds);
+ if (special) {
+ // The image is drawn at 1-1 scale with integer translation, so no filtering is needed.
+ SkPaint p;
+ dst->drawSpecial(special.get(), 0, 0, p, nullptr, SkMatrix::I());
+ }
+ return;
+ }
+
+ // First decompose the ctm into a post-filter transform and a filter matrix that is supported
+ // by the backdrop filter.
+ SkMatrix toRoot, layerMatrix;
+ SkSize scale;
+ if (ctm.isScaleTranslate() || as_IFB(filter)->canHandleComplexCTM()) {
+ toRoot = SkMatrix::I();
+ layerMatrix = ctm;
+ } else if (ctm.decomposeScale(&scale, &toRoot)) {
+ layerMatrix = SkMatrix::MakeScale(scale.fWidth, scale.fHeight);
+ } else {
+ // Perspective, for now, do no scaling of the layer itself.
+ // TODO (michaelludwig) - perhaps it'd be better to explore a heuristic scale pulled from
+ // the matrix, e.g. based on the midpoint of the near/far planes?
+ toRoot = ctm;
+ layerMatrix = SkMatrix::I();
+ }
+
+ // We have to map the dst bounds from the root space into the layer space where filtering will
+ // occur. If we knew the input bounds of the content that defined the original dst bounds, we
+ // could map that forward by layerMatrix and have tighter bounds, but toRoot^-1 * dst bounds
+ // is a safe, conservative estimate.
+ SkMatrix fromRoot;
+ if (!toRoot.invert(&fromRoot)) {
+ return;
+ }
+
+ // This represents what the backdrop filter needs to produce in the layer space, and is sized
+ // such that drawing it into dst with the toRoot transform will cover the actual dst device.
+ SkIRect layerTargetBounds = fromRoot.mapRect(
+ SkRect::MakeXYWH(dstOrigin.x(), dstOrigin.y(), dst->width(), dst->height())).roundOut();
+ // While layerTargetBounds is what needs to be output by the filter, the filtering process may
+ // require some extra input pixels.
+ SkIRect layerInputBounds = filter->filterBounds(
+ layerTargetBounds, layerMatrix, SkImageFilter::kReverse_MapDirection,
+ &layerTargetBounds);
+
+ // Map the required input into the root space, then make relative to the src device. This will
+ // be the conservative contents required to fill a layerInputBounds-sized surface with the
+ // backdrop content (transformed back into the layer space using fromRoot).
+ SkIRect backdropBounds = toRoot.mapRect(SkRect::Make(layerInputBounds)).roundOut();
+ backdropBounds.offset(-src->getOrigin().x(), -src->getOrigin().y());
+ if (!backdropBounds.intersect(srcDevRect)) {
+ return;
+ }
+
+ auto special = src->snapSpecial(backdropBounds);
+ if (!special) {
+ return;
+ }
+
+ SkColorType colorType = src->imageInfo().colorType();
+ if (colorType == kUnknown_SkColorType) {
+ colorType = kRGBA_8888_SkColorType;
+ }
+ SkColorSpace* colorSpace = src->imageInfo().colorSpace();
+
+ SkPaint p;
+ if (!toRoot.isIdentity()) {
+ // Drawing the temporary and final filtered image requires a higher filter quality if the
+ // 'toRoot' transformation is not identity, in order to minimize the impact on already
+ // rendered edges/content.
+ // TODO (michaelludwig) - Explore reducing this quality, identify visual tradeoffs
+ p.setFilterQuality(kHigh_SkFilterQuality);
+
+ // The snapped backdrop content needs to be transformed by fromRoot into the layer space,
+ // and stored in a temporary surface, which is then used as the input to the actual filter.
+ auto tmpSurface = special->makeSurface(colorType, colorSpace, layerInputBounds.size());
+ if (!tmpSurface) {
+ return;
+ }
+
+ auto tmpCanvas = tmpSurface->getCanvas();
+ tmpCanvas->clear(SK_ColorTRANSPARENT);
+ // Reading in reverse, this takes the backdrop bounds from src device space into the root
+ // space, then maps from root space into the layer space, then maps it so the input layer's
+ // top left corner is (0, 0). This transformation automatically accounts for any cropping
+ // performed on backdropBounds.
+ tmpCanvas->translate(-layerInputBounds.fLeft, -layerInputBounds.fTop);
+ tmpCanvas->concat(fromRoot);
+ tmpCanvas->translate(src->getOrigin().x(), src->getOrigin().y());
+
+ tmpCanvas->drawImageRect(special->asImage(), special->subset(),
+ SkRect::Make(backdropBounds), &p, kStrict_SrcRectConstraint);
+ special = tmpSurface->makeImageSnapshot();
+ } else {
+ // Since there is no extra transform that was done, update the input bounds to reflect
+ // cropping of the snapped backdrop image. In this case toRoot = I, so layerInputBounds
+ // was equal to backdropBounds before it was made relative to the src device and cropped.
+ // When we use the original snapped image directly, just map the update backdrop bounds
+ // back into the shared layer space
+ layerInputBounds = backdropBounds;
+ layerInputBounds.offset(src->getOrigin().x(), src->getOrigin().y());
+
+ // Similar to the unfiltered case above, when toRoot is the identity, then the final
+ // draw will be 1-1 so there is no need to increase filter quality.
+ p.setFilterQuality(kNone_SkFilterQuality);
+ }
+
+ // Now evaluate the filter on 'special', which contains the backdrop content mapped back into
+ // layer space. This has to further offset everything so that filter evaluation thinks the
+ // source image's top left corner is (0, 0).
+ // TODO (michaelludwig) - Once image filters are robust to non-(0,0) image origins for inputs,
+ // this can be simplified.
+ layerTargetBounds.offset(-layerInputBounds.fLeft, -layerInputBounds.fTop);
+ SkMatrix filterCTM = layerMatrix;
+ filterCTM.postTranslate(-layerInputBounds.fLeft, -layerInputBounds.fTop);
+ skif::Context ctx(filterCTM, layerTargetBounds, nullptr, colorType, colorSpace, special.get());
+
+ SkIPoint offset;
+ special = as_IFB(filter)->filterImage(ctx).imageAndOffset(&offset);
+ if (special) {
+ // Draw the filtered backdrop content into the dst device. We add layerInputBounds origin
+ // to offset because the original value in 'offset' was relative to 'filterCTM'. 'filterCTM'
+ // had subtracted the layerInputBounds origin, so adding that back makes 'offset' relative
+ // to 'layerMatrix' (what we need it to be when drawing the image by 'toRoot').
+ offset += layerInputBounds.topLeft();
+
+ // Manually setting the device's CTM requires accounting for the device's origin.
+ // TODO (michaelludwig) - This could be simpler if the dst device had its origin configured
+ // before filtering the backdrop device, and if SkAutoDeviceCTMRestore had a way to accept
+ // a global CTM instead of a device CTM.
+ SkMatrix dstCTM = toRoot;
+ dstCTM.postTranslate(-dstOrigin.x(), -dstOrigin.y());
+ SkAutoDeviceCTMRestore acr(dst, dstCTM);
+
+ // And because devices don't have a special-image draw function that supports arbitrary
+ // matrices, we are abusing the asImage() functionality here...
+ SkRect specialSrc = SkRect::Make(special->subset());
+ auto looseImage = special->asImage();
+ dst->drawImageRect(
+ looseImage.get(), &specialSrc,
+ SkRect::MakeXYWH(offset.x(), offset.y(), special->width(), special->height()),
+ p, kStrict_SrcRectConstraint);
+ }
+}
+
+static SkImageInfo make_layer_info(const SkImageInfo& prev, int w, int h, const SkPaint* paint) {
+ SkColorType ct = prev.colorType();
+ if (prev.bytesPerPixel() <= 4 &&
+ prev.colorType() != kRGBA_8888_SkColorType &&
+ prev.colorType() != kBGRA_8888_SkColorType) {
+ // "Upgrade" A8, G8, 565, 4444, 1010102, 101010x, and 888x to 8888,
+ // ensuring plenty of alpha bits for the layer, perhaps losing some color bits in return.
+ ct = kN32_SkColorType;
+ }
+ return SkImageInfo::Make(w, h, ct, kPremul_SkAlphaType, prev.refColorSpace());
+}
+
+void SkCanvas::internalSaveLayer(const SaveLayerRec& rec, SaveLayerStrategy strategy) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ const SkRect* bounds = rec.fBounds;
+ const SkPaint* paint = rec.fPaint;
+ SaveLayerFlags saveLayerFlags = rec.fSaveLayerFlags;
+
+ // If we have a backdrop filter, then we must apply it to the entire layer (clip-bounds)
+ // regardless of any hint-rect from the caller. skbug.com/8783
+ if (rec.fBackdrop) {
+ bounds = nullptr;
+ }
+
+ SkLazyPaint lazyP;
+ SkImageFilter* imageFilter = paint ? paint->getImageFilter() : nullptr;
+ SkMatrix stashedMatrix = fMCRec->fMatrix;
+ MCRec* modifiedRec = nullptr;
+
+ /*
+ * Many ImageFilters (so far) do not (on their own) correctly handle matrices (CTM) that
+ * contain rotation/skew/etc. We rely on applyCTM to create a new image filter DAG as needed to
+ * accommodate this, but it requires update the CTM we use when drawing into the layer.
+ *
+ * 1. Stash off the current CTM
+ * 2. Apply the CTM to imagefilter, which decomposes it into simple and complex transforms
+ * if necessary.
+ * 3. Wack the CTM to be the remaining scale matrix and use the modified imagefilter, which
+ * is a MatrixImageFilter that contains the complex matrix.
+ * 4. Proceed as usual, allowing the client to draw into the layer (now with a scale-only CTM)
+ * 5. During restore, the MatrixImageFilter automatically applies complex stage to the output
+ * of the original imagefilter, and draw that (via drawSprite)
+ * 6. Unwack the CTM to its original state (i.e. stashedMatrix)
+ *
+ * Perhaps in the future we could augment #5 to apply REMAINDER as part of the draw (no longer
+ * a sprite operation) to avoid the extra buffer/overhead of MatrixImageFilter.
+ */
+ if (imageFilter) {
+ SkMatrix modifiedCTM;
+ sk_sp<SkImageFilter> modifiedFilter = as_IFB(imageFilter)->applyCTM(stashedMatrix,
+ &modifiedCTM);
+ if (as_IFB(modifiedFilter)->uniqueID() != as_IFB(imageFilter)->uniqueID()) {
+ // The original filter couldn't support the CTM entirely
+ SkASSERT(modifiedCTM.isScaleTranslate() || as_IFB(imageFilter)->canHandleComplexCTM());
+ modifiedRec = fMCRec;
+ this->internalSetMatrix(modifiedCTM);
+ SkPaint* p = lazyP.set(*paint);
+ p->setImageFilter(std::move(modifiedFilter));
+ imageFilter = p->getImageFilter();
+ paint = p;
+ }
+ // Else the filter didn't change, so modifiedCTM == stashedMatrix and there's nothing
+ // left to do since the stack already has that as the CTM.
+ }
+
+ // do this before we create the layer. We don't call the public save() since
+ // that would invoke a possibly overridden virtual
+ this->internalSave();
+
+ SkIRect ir;
+ if (!this->clipRectBounds(bounds, saveLayerFlags, &ir, imageFilter)) {
+ if (modifiedRec) {
+ // In this case there will be no layer in which to stash the matrix so we need to
+ // revert the prior MCRec to its earlier state.
+ modifiedRec->fMatrix = stashedMatrix;
+ }
+ return;
+ }
+
+ // FIXME: do willSaveLayer() overriders returning kNoLayer_SaveLayerStrategy really care about
+ // the clipRectBounds() call above?
+ if (kNoLayer_SaveLayerStrategy == strategy) {
+ return;
+ }
+
+ SkPixelGeometry geo = fProps.pixelGeometry();
+ if (paint) {
+ // TODO: perhaps add a query to filters so we might preserve opaqueness...
+ if (paint->getImageFilter() || paint->getColorFilter()) {
+ geo = kUnknown_SkPixelGeometry;
+ }
+ }
+
+ SkBaseDevice* priorDevice = this->getTopDevice();
+ if (nullptr == priorDevice) { // Do we still need this check???
+ SkDebugf("Unable to find device for layer.");
+ return;
+ }
+
+ SkImageInfo info = make_layer_info(priorDevice->imageInfo(), ir.width(), ir.height(), paint);
+ if (rec.fSaveLayerFlags & kF16ColorType) {
+ info = info.makeColorType(kRGBA_F16_SkColorType);
+ }
+
+ sk_sp<SkBaseDevice> newDevice;
+ {
+ const bool preserveLCDText = kOpaque_SkAlphaType == info.alphaType() ||
+ (saveLayerFlags & kPreserveLCDText_SaveLayerFlag);
+ const SkBaseDevice::TileUsage usage =
+ preserveLCDText ? SkBaseDevice::kPossible_TileUsage : SkBaseDevice::kNever_TileUsage;
+ const bool trackCoverage =
+ SkToBool(saveLayerFlags & kMaskAgainstCoverage_EXPERIMENTAL_DONT_USE_SaveLayerFlag);
+ const SkBaseDevice::CreateInfo createInfo = SkBaseDevice::CreateInfo(info, usage, geo,
+ trackCoverage,
+ fAllocator.get());
+ newDevice.reset(priorDevice->onCreateDevice(createInfo, paint));
+ if (!newDevice) {
+ return;
+ }
+ }
+ DeviceCM* layer = new DeviceCM(newDevice, paint, stashedMatrix, rec.fClipMask, rec.fClipMatrix);
+
+ // only have a "next" if this new layer doesn't affect the clip (rare)
+ layer->fNext = BoundsAffectsClip(saveLayerFlags) ? nullptr : fMCRec->fTopLayer;
+ fMCRec->fLayer = layer;
+ fMCRec->fTopLayer = layer; // this field is NOT an owner of layer
+
+ if ((rec.fSaveLayerFlags & kInitWithPrevious_SaveLayerFlag) || rec.fBackdrop) {
+ DrawDeviceWithFilter(priorDevice, rec.fBackdrop, newDevice.get(), { ir.fLeft, ir.fTop },
+ fMCRec->fMatrix);
+ }
+
+ newDevice->setOrigin(fMCRec->fMatrix, ir.fLeft, ir.fTop);
+
+ newDevice->androidFramework_setDeviceClipRestriction(&fClipRestrictionRect);
+ if (layer->fNext) {
+ // need to punch a hole in the previous device, so we don't draw there, given that
+ // the new top-layer will allow drawing to happen "below" it.
+ SkRegion hole(ir);
+ do {
+ layer = layer->fNext;
+ layer->fDevice->clipRegion(hole, SkClipOp::kDifference);
+ } while (layer->fNext);
+ }
+}
+
+int SkCanvas::saveLayerAlpha(const SkRect* bounds, U8CPU alpha) {
+ if (0xFF == alpha) {
+ return this->saveLayer(bounds, nullptr);
+ } else {
+ SkPaint tmpPaint;
+ tmpPaint.setAlpha(alpha);
+ return this->saveLayer(bounds, &tmpPaint);
+ }
+}
+
+void SkCanvas::internalSaveBehind(const SkRect* localBounds) {
+ SkIRect devBounds;
+ if (localBounds) {
+ SkRect tmp;
+ fMCRec->fMatrix.mapRect(&tmp, *localBounds);
+ if (!devBounds.intersect(tmp.round(), this->getDeviceClipBounds())) {
+ devBounds.setEmpty();
+ }
+ } else {
+ devBounds = this->getDeviceClipBounds();
+ }
+ if (devBounds.isEmpty()) {
+ return;
+ }
+
+ SkBaseDevice* device = this->getTopDevice();
+ if (nullptr == device) { // Do we still need this check???
+ return;
+ }
+
+ // need the bounds relative to the device itself
+ devBounds.offset(-device->fOrigin.fX, -device->fOrigin.fY);
+
+ // This is getting the special image from the current device, which is then drawn into (both by
+ // a client, and the drawClippedToSaveBehind below). Since this is not saving a layer, with its
+ // own device, we need to explicitly copy the back image contents so that its original content
+ // is available when we splat it back later during restore.
+ auto backImage = device->snapSpecial(devBounds, /* copy */ true);
+ if (!backImage) {
+ return;
+ }
+
+ // we really need the save, so we can wack the fMCRec
+ this->checkForDeferredSave();
+
+ fMCRec->fBackImage.reset(new BackImage{std::move(backImage), devBounds.topLeft()});
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kClear);
+ this->drawClippedToSaveBehind(paint);
+}
+
+void SkCanvas::internalRestore() {
+ SkASSERT(fMCStack.count() != 0);
+
+ // reserve our layer (if any)
+ DeviceCM* layer = fMCRec->fLayer; // may be null
+ // now detach it from fMCRec so we can pop(). Gets freed after its drawn
+ fMCRec->fLayer = nullptr;
+
+ // move this out before we do the actual restore
+ auto backImage = std::move(fMCRec->fBackImage);
+
+ // now do the normal restore()
+ fMCRec->~MCRec(); // balanced in save()
+ fMCStack.pop_back();
+ fMCRec = (MCRec*)fMCStack.back();
+
+ if (fMCRec) {
+ FOR_EACH_TOP_DEVICE(device->restore(fMCRec->fMatrix));
+ }
+
+ if (backImage) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kDstOver);
+ const int x = backImage->fLoc.x();
+ const int y = backImage->fLoc.y();
+ this->getTopDevice()->drawSpecial(backImage->fImage.get(), x, y, paint,
+ nullptr, SkMatrix::I());
+ }
+
+ /* Time to draw the layer's offscreen. We can't call the public drawSprite,
+ since if we're being recorded, we don't want to record this (the
+ recorder will have already recorded the restore).
+ */
+ if (layer) {
+ if (fMCRec) {
+ const SkIPoint& origin = layer->fDevice->getOrigin();
+ layer->fDevice->setImmutable();
+ this->internalDrawDevice(layer->fDevice.get(), origin.x(), origin.y(),
+ layer->fPaint.get(),
+ layer->fClipImage.get(), layer->fClipMatrix);
+ // restore what we smashed in internalSaveLayer
+ this->internalSetMatrix(layer->fStashedMatrix);
+ // reset this, since internalDrawDevice will have set it to true
+ delete layer;
+ } else {
+ // we're at the root
+ SkASSERT(layer == (void*)fDeviceCMStorage);
+ layer->~DeviceCM();
+ // no need to update fMCRec, 'cause we're killing the canvas
+ }
+ }
+
+ if (fMCRec) {
+ fIsScaleTranslate = fMCRec->fMatrix.isScaleTranslate();
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ }
+}
+
+sk_sp<SkSurface> SkCanvas::makeSurface(const SkImageInfo& info, const SkSurfaceProps* props) {
+ if (nullptr == props) {
+ props = &fProps;
+ }
+ return this->onNewSurface(info, *props);
+}
+
+sk_sp<SkSurface> SkCanvas::onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ SkBaseDevice* dev = this->getDevice();
+ return dev ? dev->makeSurface(info, props) : nullptr;
+}
+
+SkImageInfo SkCanvas::imageInfo() const {
+ return this->onImageInfo();
+}
+
+SkImageInfo SkCanvas::onImageInfo() const {
+ SkBaseDevice* dev = this->getDevice();
+ if (dev) {
+ return dev->imageInfo();
+ } else {
+ return SkImageInfo::MakeUnknown(0, 0);
+ }
+}
+
+bool SkCanvas::getProps(SkSurfaceProps* props) const {
+ return this->onGetProps(props);
+}
+
+bool SkCanvas::onGetProps(SkSurfaceProps* props) const {
+ SkBaseDevice* dev = this->getDevice();
+ if (dev) {
+ if (props) {
+ *props = fProps;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool SkCanvas::peekPixels(SkPixmap* pmap) {
+ return this->onPeekPixels(pmap);
+}
+
+bool SkCanvas::onPeekPixels(SkPixmap* pmap) {
+ SkBaseDevice* dev = this->getDevice();
+ return dev && dev->peekPixels(pmap);
+}
+
+void* SkCanvas::accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin) {
+ SkPixmap pmap;
+ if (!this->onAccessTopLayerPixels(&pmap)) {
+ return nullptr;
+ }
+ if (info) {
+ *info = pmap.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pmap.rowBytes();
+ }
+ if (origin) {
+ *origin = this->getTopDevice()->getOrigin();
+ }
+ return pmap.writable_addr();
+}
+
+bool SkCanvas::onAccessTopLayerPixels(SkPixmap* pmap) {
+ SkBaseDevice* dev = this->getTopDevice();
+ return dev && dev->accessPixels(pmap);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+// In our current design/features, we should never have a layer (src) in a different colorspace
+// than its parent (dst), so we assert that here. This is called out from other asserts, in case
+// we add some feature in the future to allow a given layer/imagefilter to operate in a specific
+// colorspace.
+static void check_drawdevice_colorspaces(SkColorSpace* src, SkColorSpace* dst) {
+ SkASSERT(src == dst);
+}
+
+void SkCanvas::internalDrawDevice(SkBaseDevice* srcDev, int x, int y, const SkPaint* paint,
+ SkImage* clipImage, const SkMatrix& clipMatrix) {
+ SkPaint tmp;
+ if (nullptr == paint) {
+ paint = &tmp;
+ }
+
+ DRAW_BEGIN_DRAWDEVICE(*paint)
+
+ while (iter.next()) {
+ SkBaseDevice* dstDev = iter.fDevice;
+ check_drawdevice_colorspaces(dstDev->imageInfo().colorSpace(),
+ srcDev->imageInfo().colorSpace());
+ paint = &draw.paint();
+ SkImageFilter* filter = paint->getImageFilter();
+ SkIPoint pos = { x - iter.getX(), y - iter.getY() };
+ if (filter || clipImage) {
+ sk_sp<SkSpecialImage> specialImage = srcDev->snapSpecial();
+ if (specialImage) {
+ check_drawdevice_colorspaces(dstDev->imageInfo().colorSpace(),
+ specialImage->getColorSpace());
+ dstDev->drawSpecial(specialImage.get(), pos.x(), pos.y(), *paint,
+ clipImage, clipMatrix);
+ }
+ } else {
+ dstDev->drawDevice(srcDev, pos.x(), pos.y(), *paint);
+ }
+ }
+
+ DRAW_END
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::translate(SkScalar dx, SkScalar dy) {
+ if (dx || dy) {
+ this->checkForDeferredSave();
+ fMCRec->fMatrix.preTranslate(dx,dy);
+
+ // Translate shouldn't affect the is-scale-translateness of the matrix.
+ SkASSERT(fIsScaleTranslate == fMCRec->fMatrix.isScaleTranslate());
+
+ FOR_EACH_TOP_DEVICE(device->setGlobalCTM(fMCRec->fMatrix));
+
+ this->didTranslate(dx,dy);
+ }
+}
+
+void SkCanvas::scale(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setScale(sx, sy);
+ this->concat(m);
+}
+
+void SkCanvas::rotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ this->concat(m);
+}
+
+void SkCanvas::rotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ this->concat(m);
+}
+
+void SkCanvas::skew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ this->concat(m);
+}
+
+void SkCanvas::concat(const SkMatrix& matrix) {
+ if (matrix.isIdentity()) {
+ return;
+ }
+
+ this->checkForDeferredSave();
+ fMCRec->fMatrix.preConcat(matrix);
+ fIsScaleTranslate = fMCRec->fMatrix.isScaleTranslate();
+
+ FOR_EACH_TOP_DEVICE(device->setGlobalCTM(fMCRec->fMatrix));
+
+ this->didConcat(matrix);
+}
+
+void SkCanvas::internalSetMatrix(const SkMatrix& matrix) {
+ fMCRec->fMatrix = matrix;
+ fIsScaleTranslate = matrix.isScaleTranslate();
+
+ FOR_EACH_TOP_DEVICE(device->setGlobalCTM(fMCRec->fMatrix));
+}
+
+void SkCanvas::setMatrix(const SkMatrix& matrix) {
+ this->checkForDeferredSave();
+ this->internalSetMatrix(matrix);
+ this->didSetMatrix(matrix);
+}
+
+void SkCanvas::resetMatrix() {
+ this->setMatrix(SkMatrix::I());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::clipRect(const SkRect& rect, SkClipOp op, bool doAA) {
+ if (!rect.isFinite()) {
+ return;
+ }
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ this->onClipRect(rect, op, edgeStyle);
+}
+
+void SkCanvas::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ const bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ FOR_EACH_TOP_DEVICE(device->clipRect(rect, op, isAA));
+
+ AutoValidateClip avc(this);
+ fMCRec->fRasterClip.opRect(rect, fMCRec->fMatrix, this->getTopLayerBounds(), (SkRegion::Op)op,
+ isAA);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::androidFramework_setDeviceClipRestriction(const SkIRect& rect) {
+ fClipRestrictionRect = rect;
+ if (fClipRestrictionRect.isEmpty()) {
+ // we notify the device, but we *dont* resolve deferred saves (since we're just
+ // removing the restriction if the rect is empty. how I hate this api.
+ FOR_EACH_TOP_DEVICE(device->androidFramework_setDeviceClipRestriction(&fClipRestrictionRect));
+ } else {
+ this->checkForDeferredSave();
+ FOR_EACH_TOP_DEVICE(device->androidFramework_setDeviceClipRestriction(&fClipRestrictionRect));
+ AutoValidateClip avc(this);
+ fMCRec->fRasterClip.opIRect(fClipRestrictionRect, SkRegion::kIntersect_Op);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ }
+}
+
+void SkCanvas::clipRRect(const SkRRect& rrect, SkClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ if (rrect.isRect()) {
+ this->onClipRect(rrect.getBounds(), op, edgeStyle);
+ } else {
+ this->onClipRRect(rrect, op, edgeStyle);
+ }
+}
+
+void SkCanvas::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ AutoValidateClip avc(this);
+
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ FOR_EACH_TOP_DEVICE(device->clipRRect(rrect, op, isAA));
+
+ fMCRec->fRasterClip.opRRect(rrect, fMCRec->fMatrix, this->getTopLayerBounds(), (SkRegion::Op)op,
+ isAA);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::clipPath(const SkPath& path, SkClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+
+ if (!path.isInverseFillType() && fMCRec->fMatrix.rectStaysRect()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->onClipRect(r, op, edgeStyle);
+ return;
+ }
+ SkRRect rrect;
+ if (path.isOval(&r)) {
+ rrect.setOval(r);
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ if (path.isRRect(&rrect)) {
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ }
+
+ this->onClipPath(path, op, edgeStyle);
+}
+
+void SkCanvas::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ AutoValidateClip avc(this);
+
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ FOR_EACH_TOP_DEVICE(device->clipPath(path, op, isAA));
+
+ const SkPath* rasterClipPath = &path;
+ const SkMatrix* matrix = &fMCRec->fMatrix;
+ fMCRec->fRasterClip.opPath(*rasterClipPath, *matrix, this->getTopLayerBounds(),
+ (SkRegion::Op)op, isAA);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::clipRegion(const SkRegion& rgn, SkClipOp op) {
+ this->checkForDeferredSave();
+ this->onClipRegion(rgn, op);
+}
+
+void SkCanvas::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ FOR_EACH_TOP_DEVICE(device->clipRegion(rgn, op));
+
+ AutoValidateClip avc(this);
+
+ fMCRec->fRasterClip.opRegion(rgn, (SkRegion::Op)op);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+#ifdef SK_DEBUG
+void SkCanvas::validateClip() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ // construct clipRgn from the clipstack
+ const SkBaseDevice* device = this->getDevice();
+ if (!device) {
+ SkASSERT(this->isClipEmpty());
+ return;
+ }
+#endif
+}
+#endif
+
+bool SkCanvas::androidFramework_isClipAA() const {
+ bool containsAA = false;
+
+ FOR_EACH_TOP_DEVICE(containsAA |= device->onClipIsAA());
+
+ return containsAA;
+}
+
+class RgnAccumulator {
+ SkRegion* fRgn;
+public:
+ RgnAccumulator(SkRegion* total) : fRgn(total) {}
+ void accumulate(SkBaseDevice* device, SkRegion* rgn) {
+ SkIPoint origin = device->getOrigin();
+ if (origin.x() | origin.y()) {
+ rgn->translate(origin.x(), origin.y());
+ }
+ fRgn->op(*rgn, SkRegion::kUnion_Op);
+ }
+};
+
+void SkCanvas::temporary_internal_getRgnClip(SkRegion* rgn) {
+ RgnAccumulator accum(rgn);
+ SkRegion tmp;
+
+ rgn->setEmpty();
+ FOR_EACH_TOP_DEVICE(device->onAsRgnClip(&tmp); accum.accumulate(device, &tmp));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvas::isClipEmpty() const {
+ return fMCRec->fRasterClip.isEmpty();
+
+ // TODO: should we only use the conservative answer in a recording canvas?
+#if 0
+ SkBaseDevice* dev = this->getTopDevice();
+ // if no device we return true
+ return !dev || dev->onGetClipType() == SkBaseDevice::kEmpty_ClipType;
+#endif
+}
+
+bool SkCanvas::isClipRect() const {
+ SkBaseDevice* dev = this->getTopDevice();
+ // if no device we return false
+ return dev && dev->onGetClipType() == SkBaseDevice::ClipType::kRect;
+}
+
+static inline bool is_nan_or_clipped(const Sk4f& devRect, const Sk4f& devClip) {
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ __m128 lLtT = _mm_unpacklo_ps(devRect.fVec, devClip.fVec);
+ __m128 RrBb = _mm_unpackhi_ps(devClip.fVec, devRect.fVec);
+ __m128 mask = _mm_cmplt_ps(lLtT, RrBb);
+ return 0xF != _mm_movemask_ps(mask);
+#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
+ float32x4_t lLtT = vzipq_f32(devRect.fVec, devClip.fVec).val[0];
+ float32x4_t RrBb = vzipq_f32(devClip.fVec, devRect.fVec).val[1];
+ uint32x4_t mask = vcltq_f32(lLtT, RrBb);
+ return 0xFFFFFFFFFFFFFFFF != (uint64_t) vmovn_u32(mask);
+#else
+ SkRect devRectAsRect;
+ SkRect devClipAsRect;
+ devRect.store(&devRectAsRect.fLeft);
+ devClip.store(&devClipAsRect.fLeft);
+ return !devRectAsRect.isFinite() || !devRectAsRect.intersect(devClipAsRect);
+#endif
+}
+
+// It's important for this function to not be inlined. Otherwise the compiler will share code
+// between the fast path and the slow path, resulting in two slow paths.
+static SK_NEVER_INLINE bool quick_reject_slow_path(const SkRect& src, const SkRect& deviceClip,
+ const SkMatrix& matrix) {
+ SkRect deviceRect;
+ matrix.mapRect(&deviceRect, src);
+ return !deviceRect.isFinite() || !deviceRect.intersect(deviceClip);
+}
+
+bool SkCanvas::quickReject(const SkRect& src) const {
+#ifdef SK_DEBUG
+ // Verify that fDeviceClipBounds are set properly.
+ SkRect tmp = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ if (fMCRec->fRasterClip.isEmpty()) {
+ SkASSERT(fDeviceClipBounds.isEmpty());
+ } else {
+ SkASSERT(tmp == fDeviceClipBounds);
+ }
+
+ // Verify that fIsScaleTranslate is set properly.
+ SkASSERT(fIsScaleTranslate == fMCRec->fMatrix.isScaleTranslate());
+#endif
+
+ if (!fIsScaleTranslate) {
+ return quick_reject_slow_path(src, fDeviceClipBounds, fMCRec->fMatrix);
+ }
+
+ // We inline the implementation of mapScaleTranslate() for the fast path.
+ float sx = fMCRec->fMatrix.getScaleX();
+ float sy = fMCRec->fMatrix.getScaleY();
+ float tx = fMCRec->fMatrix.getTranslateX();
+ float ty = fMCRec->fMatrix.getTranslateY();
+ Sk4f scale(sx, sy, sx, sy);
+ Sk4f trans(tx, ty, tx, ty);
+
+ // Apply matrix.
+ Sk4f ltrb = Sk4f::Load(&src.fLeft) * scale + trans;
+
+ // Make sure left < right, top < bottom.
+ Sk4f rblt(ltrb[2], ltrb[3], ltrb[0], ltrb[1]);
+ Sk4f min = Sk4f::Min(ltrb, rblt);
+ Sk4f max = Sk4f::Max(ltrb, rblt);
+ // We can extract either pair [0,1] or [2,3] from min and max and be correct, but on
+ // ARM this sequence generates the fastest (a single instruction).
+ Sk4f devRect = Sk4f(min[2], min[3], max[0], max[1]);
+
+ // Check if the device rect is NaN or outside the clip.
+ return is_nan_or_clipped(devRect, Sk4f::Load(&fDeviceClipBounds.fLeft));
+}
+
+bool SkCanvas::quickReject(const SkPath& path) const {
+ return path.isEmpty() || this->quickReject(path.getBounds());
+}
+
+SkRect SkCanvas::getLocalClipBounds() const {
+ SkIRect ibounds = this->getDeviceClipBounds();
+ if (ibounds.isEmpty()) {
+ return SkRect::MakeEmpty();
+ }
+
+ SkMatrix inverse;
+ // if we can't invert the CTM, we can't return local clip bounds
+ if (!fMCRec->fMatrix.invert(&inverse)) {
+ return SkRect::MakeEmpty();
+ }
+
+ SkRect bounds;
+ // adjust it outwards in case we are antialiasing
+ const int margin = 1;
+
+ SkRect r = SkRect::Make(ibounds.makeOutset(margin, margin));
+ inverse.mapRect(&bounds, r);
+ return bounds;
+}
+
+SkIRect SkCanvas::getDeviceClipBounds() const {
+ return fMCRec->fRasterClip.getBounds();
+}
+
+const SkMatrix& SkCanvas::getTotalMatrix() const {
+ return fMCRec->fMatrix;
+}
+
+GrRenderTargetContext* SkCanvas::internal_private_accessTopLayerRenderTargetContext() {
+ SkBaseDevice* dev = this->getTopDevice();
+ return dev ? dev->accessRenderTargetContext() : nullptr;
+}
+
+GrContext* SkCanvas::getGrContext() {
+ SkBaseDevice* device = this->getTopDevice();
+ return device ? device->context() : nullptr;
+}
+
+void SkCanvas::drawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (outer.isEmpty()) {
+ return;
+ }
+ if (inner.isEmpty()) {
+ this->drawRRect(outer, paint);
+ return;
+ }
+
+ // We don't have this method (yet), but technically this is what we should
+ // be able to return ...
+ // if (!outer.contains(inner))) {
+ //
+ // For now at least check for containment of bounds
+ if (!outer.getBounds().contains(inner.getBounds())) {
+ return;
+ }
+
+ this->onDrawDRRect(outer, inner, paint);
+}
+
+void SkCanvas::drawPaint(const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPaint(paint);
+}
+
+void SkCanvas::drawRect(const SkRect& r, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // To avoid redundant logic in our culling code and various backends, we always sort rects
+ // before passing them along.
+ this->onDrawRect(r.makeSorted(), paint);
+}
+
+void SkCanvas::drawClippedToSaveBehind(const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawBehind(paint);
+}
+
+void SkCanvas::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (region.isEmpty()) {
+ return;
+ }
+
+ if (region.isRect()) {
+ return this->drawIRect(region.getBounds(), paint);
+ }
+
+ this->onDrawRegion(region, paint);
+}
+
+void SkCanvas::drawOval(const SkRect& r, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // To avoid redundant logic in our culling code and various backends, we always sort rects
+ // before passing them along.
+ this->onDrawOval(r.makeSorted(), paint);
+}
+
+void SkCanvas::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawRRect(rrect, paint);
+}
+
+void SkCanvas::drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPoints(mode, count, pts, paint);
+}
+
+void SkCanvas::drawVertices(const sk_sp<SkVertices>& vertices, SkBlendMode mode,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(vertices);
+ // We expect fans to be converted to triangles when building or deserializing SkVertices.
+ SkASSERT(vertices->mode() != SkVertices::kTriangleFan_VertexMode);
+ this->onDrawVerticesObject(vertices.get(), nullptr, 0, mode, paint);
+}
+
+void SkCanvas::drawVertices(const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(vertices);
+ this->onDrawVerticesObject(vertices, nullptr, 0, mode, paint);
+}
+
+void SkCanvas::drawVertices(const sk_sp<SkVertices>& vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode mode, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(vertices);
+ SkASSERT(boneCount <= 80);
+ this->onDrawVerticesObject(vertices.get(), bones, boneCount, mode, paint);
+}
+
+void SkCanvas::drawVertices(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode mode, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(vertices);
+ SkASSERT(boneCount <= 80);
+ this->onDrawVerticesObject(vertices, bones, boneCount, mode, paint);
+}
+
+void SkCanvas::drawPath(const SkPath& path, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPath(path, paint);
+}
+
+void SkCanvas::drawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ this->onDrawImage(image, x, y, paint);
+}
+
+// Returns true if the rect can be "filled" : non-empty and finite
+static bool fillable(const SkRect& r) {
+ SkScalar w = r.width();
+ SkScalar h = r.height();
+ return SkScalarIsFinite(w) && w > 0 && SkScalarIsFinite(h) && h > 0;
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ if (!fillable(dst) || !fillable(src)) {
+ return;
+ }
+ this->onDrawImageRect(image, &src, dst, paint, constraint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ RETURN_ON_NULL(image);
+ this->drawImageRect(image, SkRect::Make(isrc), dst, paint, constraint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& dst, const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()), dst, paint,
+ kFast_SrcRectConstraint);
+}
+
+namespace {
+class LatticePaint : SkNoncopyable {
+public:
+ LatticePaint(const SkPaint* origPaint) : fPaint(origPaint) {
+ if (!origPaint) {
+ return;
+ }
+ if (origPaint->getFilterQuality() > kLow_SkFilterQuality) {
+ fPaint.writable()->setFilterQuality(kLow_SkFilterQuality);
+ }
+ if (origPaint->getMaskFilter()) {
+ fPaint.writable()->setMaskFilter(nullptr);
+ }
+ if (origPaint->isAntiAlias()) {
+ fPaint.writable()->setAntiAlias(false);
+ }
+ }
+
+ const SkPaint* get() const {
+ return fPaint;
+ }
+
+private:
+ SkTCopyOnFirstWrite<SkPaint> fPaint;
+};
+} // namespace
+
+void SkCanvas::drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty()) {
+ return;
+ }
+ if (SkLatticeIter::Valid(image->width(), image->height(), center)) {
+ LatticePaint latticePaint(paint);
+ this->onDrawImageNine(image, center, dst, latticePaint.get());
+ } else {
+ this->drawImageRect(image, dst, paint);
+ }
+}
+
+void SkCanvas::drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty()) {
+ return;
+ }
+
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(image->width(), image->height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(image->width(), image->height(), latticePlusBounds)) {
+ LatticePaint latticePaint(paint);
+ this->onDrawImageLattice(image, latticePlusBounds, dst, latticePaint.get());
+ } else {
+ this->drawImageRect(image, dst, paint);
+ }
+}
+
+void SkCanvas::drawBitmap(const SkBitmap& bitmap, SkScalar dx, SkScalar dy, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (bitmap.drawsNothing()) {
+ return;
+ }
+ this->onDrawBitmap(bitmap, dx, dy, paint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (bitmap.drawsNothing() || dst.isEmpty() || src.isEmpty()) {
+ return;
+ }
+ this->onDrawBitmapRect(bitmap, &src, dst, paint, constraint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ this->drawBitmapRect(bitmap, SkRect::Make(isrc), dst, paint, constraint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ this->drawBitmapRect(bitmap, SkRect::MakeIWH(bitmap.width(), bitmap.height()), dst, paint,
+ constraint);
+}
+
+void SkCanvas::drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+ if (SkLatticeIter::Valid(bitmap.width(), bitmap.height(), center)) {
+ LatticePaint latticePaint(paint);
+ this->onDrawBitmapNine(bitmap, center, dst, latticePaint.get());
+ } else {
+ this->drawBitmapRect(bitmap, dst, paint);
+ }
+}
+
+void SkCanvas::drawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(bitmap.width(), bitmap.height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(bitmap.width(), bitmap.height(), latticePlusBounds)) {
+ LatticePaint latticePaint(paint);
+ this->onDrawBitmapLattice(bitmap, latticePlusBounds, dst, latticePaint.get());
+ } else {
+ this->drawBitmapRect(bitmap, dst, paint);
+ }
+}
+
+void SkCanvas::drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(atlas);
+ if (count <= 0) {
+ return;
+ }
+ SkASSERT(atlas);
+ SkASSERT(tex);
+ this->onDrawAtlas(atlas, xform, tex, colors, count, mode, cull, paint);
+}
+
+void SkCanvas::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (key) {
+ this->onDrawAnnotation(rect, key, value);
+ }
+}
+
+void SkCanvas::legacy_drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (src) {
+ this->drawImageRect(image, *src, dst, paint, constraint);
+ } else {
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()),
+ dst, paint, constraint);
+ }
+}
+void SkCanvas::legacy_drawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (src) {
+ this->drawBitmapRect(bitmap, *src, dst, paint, constraint);
+ } else {
+ this->drawBitmapRect(bitmap, SkRect::MakeIWH(bitmap.width(), bitmap.height()),
+ dst, paint, constraint);
+ }
+}
+
+void SkCanvas::private_draw_shadow_rec(const SkPath& path, const SkDrawShadowRec& rec) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawShadowRec(path, rec);
+}
+
+void SkCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ SkPaint paint;
+ const SkRect& pathBounds = path.getBounds();
+
+ DRAW_BEGIN(paint, &pathBounds)
+ while (iter.next()) {
+ iter.fDevice->drawShadow(path, rec);
+ }
+ DRAW_END
+}
+
+void SkCanvas::experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aaFlags, const SkColor4f& color,
+ SkBlendMode mode) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // Make sure the rect is sorted before passing it along
+ this->onDrawEdgeAAQuad(rect.makeSorted(), clip, aaFlags, color, mode);
+}
+
+void SkCanvas::experimental_DrawEdgeAAImageSet(const ImageSetEntry imageSet[], int cnt,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawEdgeAAImageSet(imageSet, cnt, dstClips, preViewMatrices, paint, constraint);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These are the virtual drawing methods
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::onDiscard() {
+ if (fSurfaceBase) {
+ fSurfaceBase->aboutToDraw(SkSurface::kDiscard_ContentChangeMode);
+ }
+}
+
+void SkCanvas::onDrawPaint(const SkPaint& paint) {
+ this->internalDrawPaint(paint);
+}
+
+void SkCanvas::internalDrawPaint(const SkPaint& paint) {
+ DRAW_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, nullptr, false)
+
+ while (iter.next()) {
+ iter.fDevice->drawPaint(draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ if ((long)count <= 0) {
+ return;
+ }
+
+ SkRect r;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ // special-case 2 points (common for drawing a single line)
+ if (2 == count) {
+ r.set(pts[0], pts[1]);
+ } else {
+ r.setBounds(pts, SkToInt(count));
+ }
+ if (!r.isFinite()) {
+ return;
+ }
+ SkRect storage;
+ if (this->quickReject(paint.computeFastStrokeBounds(r, &storage))) {
+ return;
+ }
+ bounds = &r;
+ }
+
+ SkASSERT(pts != nullptr);
+
+ DRAW_BEGIN(paint, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawPoints(mode, count, pts, draw.paint());
+ }
+
+ DRAW_END
+}
+
+static bool needs_autodrawlooper(SkCanvas* canvas, const SkPaint& paint) {
+ return paint.getImageFilter() != nullptr;
+}
+
+void SkCanvas::onDrawRect(const SkRect& r, const SkPaint& paint) {
+ SkASSERT(r.isSorted());
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(r, &storage))) {
+ return;
+ }
+ }
+
+ if (needs_autodrawlooper(this, paint)) {
+ DRAW_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, &r, false)
+
+ while (iter.next()) {
+ iter.fDevice->drawRect(r, draw.paint());
+ }
+
+ DRAW_END
+ } else if (!paint.nothingToDraw()) {
+ this->predrawNotify(&r, &paint, false);
+ SkDrawIter iter(this);
+ while (iter.next()) {
+ iter.fDevice->drawRect(r, paint);
+ }
+ }
+}
+
+void SkCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ SkRect regionRect = SkRect::Make(region.getBounds());
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(regionRect, &storage))) {
+ return;
+ }
+ }
+
+ DRAW_BEGIN(paint, &regionRect)
+
+ while (iter.next()) {
+ iter.fDevice->drawRegion(region, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawBehind(const SkPaint& paint) {
+ SkIRect bounds;
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kBack_IterStart);
+ for (;;) {
+ const MCRec* rec = (const MCRec*)iter.prev();
+ if (!rec) {
+ return; // no backimages, so nothing to draw
+ }
+ if (rec->fBackImage) {
+ bounds = SkIRect::MakeXYWH(rec->fBackImage->fLoc.fX, rec->fBackImage->fLoc.fY,
+ rec->fBackImage->fImage->width(),
+ rec->fBackImage->fImage->height());
+ break;
+ }
+ }
+
+ DRAW_BEGIN(paint, nullptr)
+
+ while (iter.next()) {
+ SkBaseDevice* dev = iter.fDevice;
+
+ dev->save();
+ // We use clipRegion because it is already defined to operate in dev-space
+ // (i.e. ignores the ctm). However, it is going to first translate by -origin,
+ // but we don't want that, so we undo that before calling in.
+ SkRegion rgn(bounds.makeOffset(dev->fOrigin));
+ dev->clipRegion(rgn, SkClipOp::kIntersect);
+ dev->drawPaint(draw.paint());
+ dev->restore(fMCRec->fMatrix);
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ SkASSERT(oval.isSorted());
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(oval, &storage))) {
+ return;
+ }
+ }
+
+ DRAW_BEGIN(paint, &oval)
+
+ while (iter.next()) {
+ iter.fDevice->drawOval(oval, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ SkASSERT(oval.isSorted());
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ // Note we're using the entire oval as the bounds.
+ if (this->quickReject(paint.computeFastBounds(oval, &storage))) {
+ return;
+ }
+ }
+
+ DRAW_BEGIN(paint, &oval)
+
+ while (iter.next()) {
+ iter.fDevice->drawArc(oval, startAngle, sweepAngle, useCenter, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(rrect.getBounds(), &storage))) {
+ return;
+ }
+ }
+
+ if (rrect.isRect()) {
+ // call the non-virtual version
+ this->SkCanvas::drawRect(rrect.getBounds(), paint);
+ return;
+ } else if (rrect.isOval()) {
+ // call the non-virtual version
+ this->SkCanvas::drawOval(rrect.getBounds(), paint);
+ return;
+ }
+
+ DRAW_BEGIN(paint, &rrect.getBounds())
+
+ while (iter.next()) {
+ iter.fDevice->drawRRect(rrect, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(outer.getBounds(), &storage))) {
+ return;
+ }
+ }
+
+ DRAW_BEGIN(paint, &outer.getBounds())
+
+ while (iter.next()) {
+ iter.fDevice->drawDRRect(outer, inner, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ if (!path.isFinite()) {
+ return;
+ }
+
+ const SkRect& pathBounds = path.getBounds();
+ if (!path.isInverseFillType() && paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint.computeFastBounds(pathBounds, &storage))) {
+ return;
+ }
+ }
+
+ if (pathBounds.width() <= 0 && pathBounds.height() <= 0) {
+ if (path.isInverseFillType()) {
+ this->internalDrawPaint(paint);
+ return;
+ }
+ }
+
+ DRAW_BEGIN(paint, &pathBounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawPath(path, draw.paint());
+ }
+
+ DRAW_END
+}
+
+bool SkCanvas::canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkPaint& paint) {
+ if (!paint.getImageFilter()) {
+ return false;
+ }
+
+ const SkMatrix& ctm = this->getTotalMatrix();
+ if (!SkTreatAsSprite(ctm, SkISize::Make(w, h), paint)) {
+ return false;
+ }
+
+ // Currently we can only use the filterSprite code if we are clipped to the bitmap's bounds.
+ // Once we can filter and the filter will return a result larger than itself, we should be
+ // able to remove this constraint.
+ // skbug.com/4526
+ //
+ SkPoint pt;
+ ctm.mapXY(x, y, &pt);
+ SkIRect ir = SkIRect::MakeXYWH(SkScalarRoundToInt(pt.x()), SkScalarRoundToInt(pt.y()), w, h);
+ return ir.contains(fMCRec->fRasterClip.getBounds());
+}
+
+// Given storage for a real paint, and an optional paint parameter, clean-up the param (if non-null)
+// given the drawing semantics for drawImage/bitmap (skbug.com/7804) and return it, or the original
+// null.
+static const SkPaint* init_image_paint(SkPaint* real, const SkPaint* paintParam) {
+ if (paintParam) {
+ *real = *paintParam;
+ real->setStyle(SkPaint::kFill_Style);
+ real->setPathEffect(nullptr);
+ paintParam = real;
+ }
+ return paintParam;
+}
+
+void SkCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ SkRect bounds = SkRect::MakeXYWH(x, y,
+ SkIntToScalar(image->width()), SkIntToScalar(image->height()));
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect tmp = bounds;
+ if (paint) {
+ paint->computeFastBounds(tmp, &tmp);
+ }
+ if (this->quickReject(tmp)) {
+ return;
+ }
+ }
+ // At this point we need a real paint object. If the caller passed null, then we should
+ // use realPaint (in its default state). If the caller did pass a paint, then we have copied
+ // (and modified) it in realPaint. Thus either way, "realPaint" is what we want to use.
+ paint = &realPaint;
+
+ sk_sp<SkSpecialImage> special;
+ bool drawAsSprite = this->canDrawBitmapAsSprite(x, y, image->width(), image->height(),
+ *paint);
+ if (drawAsSprite && paint->getImageFilter()) {
+ special = this->getDevice()->makeSpecial(image);
+ if (!special) {
+ drawAsSprite = false;
+ }
+ }
+
+ DRAW_BEGIN_DRAWBITMAP(*paint, drawAsSprite, &bounds)
+
+ while (iter.next()) {
+ const SkPaint& pnt = draw.paint();
+ if (special) {
+ SkPoint pt;
+ iter.fDevice->ctm().mapXY(x, y, &pt);
+ iter.fDevice->drawSpecial(special.get(),
+ SkScalarRoundToInt(pt.fX),
+ SkScalarRoundToInt(pt.fY), pnt,
+ nullptr, SkMatrix::I());
+ } else {
+ iter.fDevice->drawImageRect(
+ image, nullptr, SkRect::MakeXYWH(x, y, image->width(), image->height()), pnt,
+ kStrict_SrcRectConstraint);
+ }
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage = dst;
+ if (paint) {
+ paint->computeFastBounds(dst, &storage);
+ }
+ if (this->quickReject(storage)) {
+ return;
+ }
+ }
+ paint = &realPaint;
+
+ DRAW_BEGIN_CHECK_COMPLETE_OVERWRITE(*paint, &dst, image->isOpaque())
+
+ while (iter.next()) {
+ iter.fDevice->drawImageRect(image, src, dst, draw.paint(), constraint);
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y, const SkPaint* paint) {
+ SkDEBUGCODE(bitmap.validate();)
+
+ if (bitmap.drawsNothing()) {
+ return;
+ }
+
+ SkPaint realPaint;
+ init_image_paint(&realPaint, paint);
+ paint = &realPaint;
+
+ SkRect bounds;
+ bitmap.getBounds(&bounds);
+ bounds.offset(x, y);
+ bool canFastBounds = paint->canComputeFastBounds();
+ if (canFastBounds) {
+ SkRect storage;
+ if (this->quickReject(paint->computeFastBounds(bounds, &storage))) {
+ return;
+ }
+ }
+
+ sk_sp<SkSpecialImage> special;
+ bool drawAsSprite = canFastBounds && this->canDrawBitmapAsSprite(x, y, bitmap.width(),
+ bitmap.height(), *paint);
+ if (drawAsSprite && paint->getImageFilter()) {
+ special = this->getDevice()->makeSpecial(bitmap);
+ if (!special) {
+ drawAsSprite = false;
+ }
+ }
+
+ DRAW_BEGIN_DRAWBITMAP(*paint, drawAsSprite, &bounds)
+
+ while (iter.next()) {
+ const SkPaint& pnt = draw.paint();
+ if (special) {
+ SkPoint pt;
+ iter.fDevice->ctm().mapXY(x, y, &pt);
+ iter.fDevice->drawSpecial(special.get(),
+ SkScalarRoundToInt(pt.fX),
+ SkScalarRoundToInt(pt.fY), pnt,
+ nullptr, SkMatrix::I());
+ } else {
+ SkRect fullImage = SkRect::MakeWH(bitmap.width(), bitmap.height());
+ iter.fDevice->drawBitmapRect(bitmap, &fullImage, fullImage.makeOffset(x, y), pnt,
+ kStrict_SrcRectConstraint);
+ }
+ }
+
+ DRAW_END
+}
+
+// this one is non-virtual, so it can be called safely by other canvas apis
+void SkCanvas::internalDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ DRAW_BEGIN_CHECK_COMPLETE_OVERWRITE(*paint, &dst, bitmap.isOpaque())
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapRect(bitmap, src, dst, draw.paint(), constraint);
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ SkDEBUGCODE(bitmap.validate();)
+ this->internalDrawBitmapRect(bitmap, src, dst, paint, constraint);
+}
+
+void SkCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+ paint = &realPaint;
+
+ DRAW_BEGIN(*paint, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawImageNine(image, center, dst, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ SkDEBUGCODE(bitmap.validate();)
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+ paint = &realPaint;
+
+ DRAW_BEGIN(*paint, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapNine(bitmap, center, dst, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+ paint = &realPaint;
+
+ DRAW_BEGIN(*paint, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawImageLattice(image, lattice, dst, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ SkPaint realPaint;
+ paint = init_image_paint(&realPaint, paint);
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+ paint = &realPaint;
+
+ DRAW_BEGIN(*paint, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapLattice(bitmap, lattice, dst, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ storage = blob->bounds().makeOffset(x, y);
+ SkRect tmp;
+ if (this->quickReject(paint.computeFastBounds(storage, &tmp))) {
+ return;
+ }
+ bounds = &storage;
+ }
+
+ // We cannot filter in the looper as we normally do, because the paint is
+ // incomplete at this point (text-related attributes are embedded within blob run paints).
+ DRAW_BEGIN(paint, bounds)
+
+ while (iter.next()) {
+ fScratchGlyphRunBuilder->drawTextBlob(draw.paint(), *blob, {x, y}, iter.fDevice);
+ }
+
+ DRAW_END
+}
+
+// These call the (virtual) onDraw... method
+void SkCanvas::drawSimpleText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (byteLength) {
+ sk_msan_assert_initialized(text, SkTAddOffset<const void>(text, byteLength));
+ this->drawTextBlob(SkTextBlob::MakeFromText(text, byteLength, font, encoding), x, y, paint);
+ }
+}
+
+void SkCanvas::drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(blob);
+ RETURN_ON_FALSE(blob->bounds().makeOffset(x, y).isFinite());
+ this->onDrawTextBlob(blob, x, y, paint);
+}
+
+void SkCanvas::onDrawVerticesObject(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode bmode, const SkPaint& paint) {
+ DRAW_BEGIN(paint, nullptr)
+
+ while (iter.next()) {
+ // In the common case of one iteration we could std::move vertices here.
+ iter.fDevice->drawVertices(vertices, bones, boneCount, bmode, draw.paint());
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (nullptr == cubics) {
+ return;
+ }
+
+ this->onDrawPatch(cubics, colors, texCoords, bmode, paint);
+}
+
+void SkCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ // Since a patch is always within the convex hull of the control points, we discard it when its
+ // bounding rectangle is completely outside the current clip.
+ SkRect bounds;
+ bounds.setBounds(cubics, SkPatchUtils::kNumCtrlPts);
+ if (this->quickReject(bounds)) {
+ return;
+ }
+
+ DRAW_BEGIN(paint, nullptr)
+
+ while (iter.next()) {
+ iter.fDevice->drawPatch(cubics, colors, texCoords, bmode, paint);
+ }
+
+ DRAW_END
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, SkScalar x, SkScalar y) {
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia", TRACE_FUNC);
+#endif
+ RETURN_ON_NULL(dr);
+ if (x || y) {
+ SkMatrix matrix = SkMatrix::MakeTrans(x, y);
+ this->onDrawDrawable(dr, &matrix);
+ } else {
+ this->onDrawDrawable(dr, nullptr);
+ }
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia", TRACE_FUNC);
+#endif
+ RETURN_ON_NULL(dr);
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ this->onDrawDrawable(dr, matrix);
+}
+
+void SkCanvas::onDrawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+ // drawable bounds are no longer reliable (e.g. android displaylist)
+ // so don't use them for quick-reject
+ this->getDevice()->drawDrawable(dr, matrix, this);
+}
+
+void SkCanvas::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode bmode,
+ const SkRect* cull, const SkPaint* paint) {
+ if (cull && this->quickReject(*cull)) {
+ return;
+ }
+
+ SkPaint pnt;
+ if (paint) {
+ pnt = *paint;
+ }
+
+ DRAW_BEGIN(pnt, nullptr)
+ while (iter.next()) {
+ iter.fDevice->drawAtlas(atlas, xform, tex, colors, count, bmode, pnt);
+ }
+ DRAW_END
+}
+
+void SkCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ SkASSERT(key);
+
+ SkPaint paint;
+ DRAW_BEGIN(paint, nullptr)
+ while (iter.next()) {
+ iter.fDevice->drawAnnotation(rect, key, value);
+ }
+ DRAW_END
+}
+
+void SkCanvas::onDrawEdgeAAQuad(const SkRect& r, const SkPoint clip[4], QuadAAFlags edgeAA,
+ const SkColor4f& color, SkBlendMode mode) {
+ SkASSERT(r.isSorted());
+
+ // If this used a paint, it would be a filled color with blend mode, which does not
+ // need to use an autodraw loop, so use SkDrawIter directly.
+ if (this->quickReject(r)) {
+ return;
+ }
+
+ this->predrawNotify(&r, nullptr, false);
+ SkDrawIter iter(this);
+ while(iter.next()) {
+ iter.fDevice->drawEdgeAAQuad(r, clip, edgeAA, color, mode);
+ }
+}
+
+void SkCanvas::onDrawEdgeAAImageSet(const ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (count <= 0) {
+ // Nothing to draw
+ return;
+ }
+
+ SkPaint realPaint;
+ init_image_paint(&realPaint, paint);
+
+ // We could calculate the set's dstRect union to always check quickReject(), but we can't reject
+ // individual entries and Chromium's occlusion culling already makes it likely that at least one
+ // entry will be visible. So, we only calculate the draw bounds when it's trivial (count == 1),
+ // or we need it for the autolooper (since it greatly improves image filter perf).
+ bool needsAutoLooper = needs_autodrawlooper(this, realPaint);
+ bool setBoundsValid = count == 1 || needsAutoLooper;
+ SkRect setBounds = imageSet[0].fDstRect;
+ if (imageSet[0].fMatrixIndex >= 0) {
+ // Account for the per-entry transform that is applied prior to the CTM when drawing
+ preViewMatrices[imageSet[0].fMatrixIndex].mapRect(&setBounds);
+ }
+ if (needsAutoLooper) {
+ for (int i = 1; i < count; ++i) {
+ SkRect entryBounds = imageSet[i].fDstRect;
+ if (imageSet[i].fMatrixIndex >= 0) {
+ preViewMatrices[imageSet[i].fMatrixIndex].mapRect(&entryBounds);
+ }
+ setBounds.joinPossiblyEmptyRect(entryBounds);
+ }
+ }
+
+ // If we happen to have the draw bounds, though, might as well check quickReject().
+ if (setBoundsValid && realPaint.canComputeFastBounds()) {
+ SkRect tmp;
+ if (this->quickReject(realPaint.computeFastBounds(setBounds, &tmp))) {
+ return;
+ }
+ }
+
+ if (needsAutoLooper) {
+ SkASSERT(setBoundsValid);
+ DRAW_BEGIN(realPaint, &setBounds)
+ while (iter.next()) {
+ iter.fDevice->drawEdgeAAImageSet(
+ imageSet, count, dstClips, preViewMatrices, draw.paint(), constraint);
+ }
+ DRAW_END
+ } else {
+ this->predrawNotify();
+ SkDrawIter iter(this);
+ while(iter.next()) {
+ iter.fDevice->drawEdgeAAImageSet(
+ imageSet, count, dstClips, preViewMatrices, realPaint, constraint);
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These methods are NOT virtual, and therefore must call back into virtual
+// methods, rather than actually drawing themselves.
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::drawColor(SkColor c, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor(c);
+ paint.setBlendMode(mode);
+ this->drawPaint(paint);
+}
+
+void SkCanvas::drawPoint(SkScalar x, SkScalar y, const SkPaint& paint) {
+ const SkPoint pt = { x, y };
+ this->drawPoints(kPoints_PointMode, 1, &pt, paint);
+}
+
+void SkCanvas::drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1, const SkPaint& paint) {
+ SkPoint pts[2];
+ pts[0].set(x0, y0);
+ pts[1].set(x1, y1);
+ this->drawPoints(kLines_PointMode, 2, pts, paint);
+}
+
+void SkCanvas::drawCircle(SkScalar cx, SkScalar cy, SkScalar radius, const SkPaint& paint) {
+ if (radius < 0) {
+ radius = 0;
+ }
+
+ SkRect r;
+ r.setLTRB(cx - radius, cy - radius, cx + radius, cy + radius);
+ this->drawOval(r, paint);
+}
+
+void SkCanvas::drawRoundRect(const SkRect& r, SkScalar rx, SkScalar ry,
+ const SkPaint& paint) {
+ if (rx > 0 && ry > 0) {
+ SkRRect rrect;
+ rrect.setRectXY(r, rx, ry);
+ this->drawRRect(rrect, paint);
+ } else {
+ this->drawRect(r, paint);
+ }
+}
+
+void SkCanvas::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (oval.isEmpty() || !sweepAngle) {
+ return;
+ }
+ this->onDrawArc(oval, startAngle, sweepAngle, useCenter, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DISABLE_SKPICTURE
+void SkCanvas::drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint) {}
+
+
+void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {}
+#else
+/**
+ * This constant is trying to balance the speed of ref'ing a subpicture into a parent picture,
+ * against the playback cost of recursing into the subpicture to get at its actual ops.
+ *
+ * For now we pick a conservatively small value, though measurement (and other heuristics like
+ * the type of ops contained) may justify changing this value.
+ */
+#define kMaxPictureOpsToUnrollInsteadOfRef 1
+
+void SkCanvas::drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(picture);
+
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ if (picture->approximateOpCount() <= kMaxPictureOpsToUnrollInsteadOfRef) {
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+ } else {
+ this->onDrawPicture(picture, matrix, paint);
+ }
+}
+
+void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ if (!paint || paint->canComputeFastBounds()) {
+ SkRect bounds = picture->cullRect();
+ if (paint) {
+ paint->computeFastBounds(bounds, &bounds);
+ }
+ if (matrix) {
+ matrix->mapRect(&bounds);
+ }
+ if (this->quickReject(bounds)) {
+ return;
+ }
+ }
+
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvas::LayerIter::LayerIter(SkCanvas* canvas) {
+ static_assert(sizeof(fStorage) >= sizeof(SkDrawIter), "fStorage_too_small");
+
+ SkASSERT(canvas);
+
+ fImpl = new (fStorage) SkDrawIter(canvas);
+ fDone = !fImpl->next();
+}
+
+SkCanvas::LayerIter::~LayerIter() {
+ fImpl->~SkDrawIter();
+}
+
+void SkCanvas::LayerIter::next() {
+ fDone = !fImpl->next();
+}
+
+SkBaseDevice* SkCanvas::LayerIter::device() const {
+ return fImpl->fDevice;
+}
+
+const SkMatrix& SkCanvas::LayerIter::matrix() const {
+ return fImpl->fDevice->ctm();
+}
+
+const SkPaint& SkCanvas::LayerIter::paint() const {
+ const SkPaint* paint = fImpl->getPaint();
+ if (nullptr == paint) {
+ paint = &fDefaultPaint;
+ }
+ return *paint;
+}
+
+SkIRect SkCanvas::LayerIter::clipBounds() const {
+ return fImpl->fDevice->getGlobalBounds();
+}
+
+int SkCanvas::LayerIter::x() const { return fImpl->getX(); }
+int SkCanvas::LayerIter::y() const { return fImpl->getY(); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvas::ImageSetEntry::ImageSetEntry() = default;
+SkCanvas::ImageSetEntry::~ImageSetEntry() = default;
+SkCanvas::ImageSetEntry::ImageSetEntry(const ImageSetEntry&) = default;
+SkCanvas::ImageSetEntry& SkCanvas::ImageSetEntry::operator=(const ImageSetEntry&) = default;
+
+SkCanvas::ImageSetEntry::ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, int matrixIndex, float alpha,
+ unsigned aaFlags, bool hasClip)
+ : fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fMatrixIndex(matrixIndex)
+ , fAlpha(alpha)
+ , fAAFlags(aaFlags)
+ , fHasClip(hasClip) {}
+
+SkCanvas::ImageSetEntry::ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, float alpha, unsigned aaFlags)
+ : fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fAlpha(alpha)
+ , fAAFlags(aaFlags) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkCanvas> SkCanvas::MakeRasterDirect(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const SkSurfaceProps* props) {
+ if (!SkSurfaceValidateRasterInfo(info, rowBytes)) {
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(info, pixels, rowBytes)) {
+ return nullptr;
+ }
+
+ return props ?
+ skstd::make_unique<SkCanvas>(bitmap, *props) :
+ skstd::make_unique<SkCanvas>(bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkNoDrawCanvas::SkNoDrawCanvas(int width, int height)
+ : INHERITED(SkIRect::MakeWH(width, height)) {}
+
+SkNoDrawCanvas::SkNoDrawCanvas(const SkIRect& bounds)
+ : INHERITED(bounds) {}
+
+SkNoDrawCanvas::SkNoDrawCanvas(sk_sp<SkBaseDevice> device)
+ : INHERITED(device) {}
+
+SkCanvas::SaveLayerStrategy SkNoDrawCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkNoDrawCanvas::onDoSaveBehind(const SkRect*) {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static_assert((int)SkRegion::kDifference_Op == (int)kDifference_SkClipOp, "");
+static_assert((int)SkRegion::kIntersect_Op == (int)kIntersect_SkClipOp, "");
+static_assert((int)SkRegion::kUnion_Op == (int)kUnion_SkClipOp, "");
+static_assert((int)SkRegion::kXOR_Op == (int)kXOR_SkClipOp, "");
+static_assert((int)SkRegion::kReverseDifference_Op == (int)kReverseDifference_SkClipOp, "");
+static_assert((int)SkRegion::kReplace_Op == (int)kReplace_SkClipOp, "");
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRasterHandleAllocator::Handle SkCanvas::accessTopRasterHandle() const {
+ if (fAllocator && fMCRec->fTopLayer->fDevice) {
+ const auto& dev = fMCRec->fTopLayer->fDevice;
+ SkRasterHandleAllocator::Handle handle = dev->getRasterHandle();
+ SkIPoint origin = dev->getOrigin();
+ SkMatrix ctm = this->getTotalMatrix();
+ ctm.preTranslate(SkIntToScalar(-origin.x()), SkIntToScalar(-origin.y()));
+
+ SkIRect clip = fMCRec->fRasterClip.getBounds();
+ clip.offset(-origin.x(), -origin.y());
+ if (!clip.intersect({0, 0, dev->width(), dev->height()})) {
+ clip.setEmpty();
+ }
+
+ fAllocator->updateHandle(handle, ctm, clip);
+ return handle;
+ }
+ return nullptr;
+}
+
+static bool install(SkBitmap* bm, const SkImageInfo& info,
+ const SkRasterHandleAllocator::Rec& rec) {
+ return bm->installPixels(info, rec.fPixels, rec.fRowBytes, rec.fReleaseProc, rec.fReleaseCtx);
+}
+
+SkRasterHandleAllocator::Handle SkRasterHandleAllocator::allocBitmap(const SkImageInfo& info,
+ SkBitmap* bm) {
+ SkRasterHandleAllocator::Rec rec;
+ if (!this->allocHandle(info, &rec) || !install(bm, info, rec)) {
+ return nullptr;
+ }
+ return rec.fHandle;
+}
+
+std::unique_ptr<SkCanvas>
+SkRasterHandleAllocator::MakeCanvas(std::unique_ptr<SkRasterHandleAllocator> alloc,
+ const SkImageInfo& info, const Rec* rec) {
+ if (!alloc || !SkSurfaceValidateRasterInfo(info, rec ? rec->fRowBytes : kIgnoreRowBytesValue)) {
+ return nullptr;
+ }
+
+ SkBitmap bm;
+ Handle hndl;
+
+ if (rec) {
+ hndl = install(&bm, info, *rec) ? rec->fHandle : nullptr;
+ } else {
+ hndl = alloc->allocBitmap(info, &bm);
+ }
+ return hndl ? std::unique_ptr<SkCanvas>(new SkCanvas(bm, std::move(alloc), hndl)) : nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkCanvasPriv.cpp b/gfx/skia/skia/src/core/SkCanvasPriv.cpp
new file mode 100644
index 0000000000..cf90419d9f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvasPriv.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriter32.h"
+
+SkAutoCanvasMatrixPaint::SkAutoCanvasMatrixPaint(SkCanvas* canvas, const SkMatrix* matrix,
+ const SkPaint* paint, const SkRect& bounds)
+: fCanvas(canvas)
+, fSaveCount(canvas->getSaveCount())
+{
+ if (paint) {
+ SkRect newBounds = bounds;
+ if (matrix) {
+ matrix->mapRect(&newBounds);
+ }
+ canvas->saveLayer(&newBounds, paint);
+ } else if (matrix) {
+ canvas->save();
+ }
+
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+}
+
+SkAutoCanvasMatrixPaint::~SkAutoCanvasMatrixPaint() {
+ fCanvas->restoreToCount(fSaveCount);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvasPriv::ReadLattice(SkReadBuffer& buffer, SkCanvas::Lattice* lattice) {
+ lattice->fXCount = buffer.readInt();
+ lattice->fXDivs = buffer.skipT<int32_t>(lattice->fXCount);
+ lattice->fYCount = buffer.readInt();
+ lattice->fYDivs = buffer.skipT<int32_t>(lattice->fYCount);
+ int flagCount = buffer.readInt();
+ lattice->fRectTypes = nullptr;
+ lattice->fColors = nullptr;
+ if (flagCount) {
+ lattice->fRectTypes = buffer.skipT<SkCanvas::Lattice::RectType>(flagCount);
+ lattice->fColors = buffer.skipT<SkColor>(flagCount);
+ }
+ lattice->fBounds = buffer.skipT<SkIRect>();
+ return buffer.isValid();
+}
+
+size_t SkCanvasPriv::WriteLattice(void* buffer, const SkCanvas::Lattice& lattice) {
+ int flagCount = lattice.fRectTypes ? (lattice.fXCount + 1) * (lattice.fYCount + 1) : 0;
+
+ const size_t size = (1 + lattice.fXCount + 1 + lattice.fYCount + 1) * sizeof(int32_t) +
+ SkAlign4(flagCount * sizeof(SkCanvas::Lattice::RectType)) +
+ SkAlign4(flagCount * sizeof(SkColor)) +
+ sizeof(SkIRect);
+
+ if (buffer) {
+ SkWriter32 writer(buffer, size);
+ writer.write32(lattice.fXCount);
+ writer.write(lattice.fXDivs, lattice.fXCount * sizeof(uint32_t));
+ writer.write32(lattice.fYCount);
+ writer.write(lattice.fYDivs, lattice.fYCount * sizeof(uint32_t));
+ writer.write32(flagCount);
+ writer.writePad(lattice.fRectTypes, flagCount * sizeof(uint8_t));
+ writer.write(lattice.fColors, flagCount * sizeof(SkColor));
+ SkASSERT(lattice.fBounds);
+ writer.write(lattice.fBounds, sizeof(SkIRect));
+ SkASSERT(writer.bytesWritten() == size);
+ }
+ return size;
+};
+
+void SkCanvasPriv::WriteLattice(SkWriteBuffer& buffer, const SkCanvas::Lattice& lattice) {
+ const size_t size = WriteLattice(nullptr, lattice);
+ SkAutoSMalloc<1024> storage(size);
+ WriteLattice(storage.get(), lattice);
+ buffer.writePad32(storage.get(), size);
+}
+
+void SkCanvasPriv::GetDstClipAndMatrixCounts(const SkCanvas::ImageSetEntry set[], int count,
+ int* totalDstClipCount, int* totalMatrixCount) {
+ int dstClipCount = 0;
+ int maxMatrixIndex = -1;
+ for (int i = 0; i < count; ++i) {
+ dstClipCount += 4 * set[i].fHasClip;
+ if (set[i].fMatrixIndex > maxMatrixIndex) {
+ maxMatrixIndex = set[i].fMatrixIndex;
+ }
+ }
+
+ *totalDstClipCount = dstClipCount;
+ *totalMatrixCount = maxMatrixIndex + 1;
+}
diff --git a/gfx/skia/skia/src/core/SkCanvasPriv.h b/gfx/skia/skia/src/core/SkCanvasPriv.h
new file mode 100644
index 0000000000..5bef882900
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvasPriv.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasPriv_DEFINED
+#define SkCanvasPriv_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkAutoCanvasMatrixPaint : SkNoncopyable {
+public:
+ SkAutoCanvasMatrixPaint(SkCanvas*, const SkMatrix*, const SkPaint*, const SkRect& bounds);
+ ~SkAutoCanvasMatrixPaint();
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+};
+
+class SkCanvasPriv {
+public:
+ enum {
+ kDontClipToLayer_SaveLayerFlag = SkCanvas::kDontClipToLayer_PrivateSaveLayerFlag,
+ };
+
+ // The lattice has pointers directly into the readbuffer
+ static bool ReadLattice(SkReadBuffer&, SkCanvas::Lattice*);
+
+ static void WriteLattice(SkWriteBuffer&, const SkCanvas::Lattice&);
+
+ // return the byte-size of the lattice, even if the buffer is null
+ // storage must be 4-byte aligned
+ static size_t WriteLattice(void* storage, const SkCanvas::Lattice&);
+
+ static SkCanvas::SaveLayerFlags LegacySaveFlagsToSaveLayerFlags(uint32_t legacySaveFlags);
+
+ static int SaveBehind(SkCanvas* canvas, const SkRect* subset) {
+ return canvas->only_axis_aligned_saveBehind(subset);
+ }
+ static void DrawBehind(SkCanvas* canvas, const SkPaint& paint) {
+ canvas->drawClippedToSaveBehind(paint);
+ }
+
+ // The experimental_DrawEdgeAAImageSet API accepts separate dstClips and preViewMatrices arrays,
+ // where entries refer into them, but no explicit size is provided. Given a set of entries,
+ // computes the minimum length for these arrays that would provide index access errors.
+ static void GetDstClipAndMatrixCounts(const SkCanvas::ImageSetEntry set[], int count,
+ int* totalDstClipCount, int* totalMatrixCount);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipOpPriv.h b/gfx/skia/skia/src/core/SkClipOpPriv.h
new file mode 100644
index 0000000000..02c8ffae5c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipOpPriv.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipOpPriv_DEFINED
+#define SkClipOpPriv_DEFINED
+
+#include "include/core/SkClipOp.h"
+
+const SkClipOp kDifference_SkClipOp = SkClipOp::kDifference;
+const SkClipOp kIntersect_SkClipOp = SkClipOp::kIntersect;
+
+const SkClipOp kUnion_SkClipOp = (SkClipOp)2;
+const SkClipOp kXOR_SkClipOp = (SkClipOp)3;
+const SkClipOp kReverseDifference_SkClipOp = (SkClipOp)4;
+const SkClipOp kReplace_SkClipOp = (SkClipOp)5;
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipStack.cpp b/gfx/skia/skia/src/core/SkClipStack.cpp
new file mode 100644
index 0000000000..9219f67b04
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStack.cpp
@@ -0,0 +1,1100 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPath.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkClipStack.h"
+#include <atomic>
+#include <new>
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrProxyProvider.h"
+#endif
+
+SkClipStack::Element::Element(const Element& that) {
+ switch (that.getDeviceSpaceType()) {
+ case DeviceSpaceType::kEmpty:
+ fDeviceSpaceRRect.setEmpty();
+ fDeviceSpacePath.reset();
+ break;
+ case DeviceSpaceType::kRect: // Rect uses rrect
+ case DeviceSpaceType::kRRect:
+ fDeviceSpacePath.reset();
+ fDeviceSpaceRRect = that.fDeviceSpaceRRect;
+ break;
+ case DeviceSpaceType::kPath:
+ fDeviceSpacePath.set(that.getDeviceSpacePath());
+ break;
+ }
+
+ fSaveCount = that.fSaveCount;
+ fOp = that.fOp;
+ fDeviceSpaceType = that.fDeviceSpaceType;
+ fDoAA = that.fDoAA;
+ fFiniteBoundType = that.fFiniteBoundType;
+ fFiniteBound = that.fFiniteBound;
+ fIsIntersectionOfRects = that.fIsIntersectionOfRects;
+ fGenID = that.fGenID;
+}
+
+SkClipStack::Element::~Element() {
+#if SK_SUPPORT_GPU
+ for (int i = 0; i < fKeysToInvalidate.count(); ++i) {
+ fProxyProvider->processInvalidUniqueKey(fKeysToInvalidate[i], nullptr,
+ GrProxyProvider::InvalidateGPUResource::kYes);
+ }
+#endif
+}
+
+bool SkClipStack::Element::operator== (const Element& element) const {
+ if (this == &element) {
+ return true;
+ }
+ if (fOp != element.fOp || fDeviceSpaceType != element.fDeviceSpaceType ||
+ fDoAA != element.fDoAA || fSaveCount != element.fSaveCount) {
+ return false;
+ }
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kPath:
+ return this->getDeviceSpacePath() == element.getDeviceSpacePath();
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect == element.fDeviceSpaceRRect;
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect() == element.getDeviceSpaceRect();
+ case DeviceSpaceType::kEmpty:
+ return true;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+const SkRect& SkClipStack::Element::getBounds() const {
+ static const SkRect kEmpty = {0, 0, 0, 0};
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect: // fallthrough
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect.getBounds();
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath.get()->getBounds();
+ case DeviceSpaceType::kEmpty:
+ return kEmpty;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return kEmpty;
+ }
+}
+
+bool SkClipStack::Element::contains(const SkRect& rect) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect().contains(rect);
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect.contains(rect);
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath.get()->conservativelyContainsRect(rect);
+ case DeviceSpaceType::kEmpty:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+bool SkClipStack::Element::contains(const SkRRect& rrect) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect().contains(rrect.getBounds());
+ case DeviceSpaceType::kRRect:
+ // We don't currently have a generalized rrect-rrect containment.
+ return fDeviceSpaceRRect.contains(rrect.getBounds()) || rrect == fDeviceSpaceRRect;
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath.get()->conservativelyContainsRect(rrect.getBounds());
+ case DeviceSpaceType::kEmpty:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+void SkClipStack::Element::invertShapeFillType() {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ fDeviceSpacePath.init();
+ fDeviceSpacePath.get()->addRect(this->getDeviceSpaceRect());
+ fDeviceSpacePath.get()->setFillType(SkPath::kInverseEvenOdd_FillType);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ break;
+ case DeviceSpaceType::kRRect:
+ fDeviceSpacePath.init();
+ fDeviceSpacePath.get()->addRRect(fDeviceSpaceRRect);
+ fDeviceSpacePath.get()->setFillType(SkPath::kInverseEvenOdd_FillType);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ break;
+ case DeviceSpaceType::kPath:
+ fDeviceSpacePath.get()->toggleInverseFillType();
+ break;
+ case DeviceSpaceType::kEmpty:
+ // Should this set to an empty, inverse filled path?
+ break;
+ }
+}
+
+void SkClipStack::Element::initCommon(int saveCount, SkClipOp op, bool doAA) {
+ fSaveCount = saveCount;
+ fOp = op;
+ fDoAA = doAA;
+ // A default of inside-out and empty bounds means the bounds are effectively void as it
+ // indicates that nothing is known to be outside the clip.
+ fFiniteBoundType = kInsideOut_BoundsType;
+ fFiniteBound.setEmpty();
+ fIsIntersectionOfRects = false;
+ fGenID = kInvalidGenID;
+}
+
+void SkClipStack::Element::initRect(int saveCount, const SkRect& rect, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (m.rectStaysRect()) {
+ SkRect devRect;
+ m.mapRect(&devRect, rect);
+ fDeviceSpaceRRect.setRect(devRect);
+ fDeviceSpaceType = DeviceSpaceType::kRect;
+ this->initCommon(saveCount, op, doAA);
+ return;
+ }
+ SkPath path;
+ path.addRect(rect);
+ path.setIsVolatile(true);
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initRRect(int saveCount, const SkRRect& rrect, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (rrect.transform(m, &fDeviceSpaceRRect)) {
+ SkRRect::Type type = fDeviceSpaceRRect.getType();
+ if (SkRRect::kRect_Type == type || SkRRect::kEmpty_Type == type) {
+ fDeviceSpaceType = DeviceSpaceType::kRect;
+ } else {
+ fDeviceSpaceType = DeviceSpaceType::kRRect;
+ }
+ this->initCommon(saveCount, op, doAA);
+ return;
+ }
+ SkPath path;
+ path.addRRect(rrect);
+ path.setIsVolatile(true);
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initPath(int saveCount, const SkPath& path, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (!path.isInverseFillType()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->initRect(saveCount, r, m, op, doAA);
+ return;
+ }
+ SkRect ovalRect;
+ if (path.isOval(&ovalRect)) {
+ SkRRect rrect;
+ rrect.setOval(ovalRect);
+ this->initRRect(saveCount, rrect, m, op, doAA);
+ return;
+ }
+ }
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initAsPath(int saveCount, const SkPath& path, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ path.transform(m, fDeviceSpacePath.init());
+ fDeviceSpacePath.get()->setIsVolatile(true);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ this->initCommon(saveCount, op, doAA);
+}
+
+void SkClipStack::Element::asDeviceSpacePath(SkPath* path) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kEmpty:
+ path->reset();
+ break;
+ case DeviceSpaceType::kRect:
+ path->reset();
+ path->addRect(this->getDeviceSpaceRect());
+ break;
+ case DeviceSpaceType::kRRect:
+ path->reset();
+ path->addRRect(fDeviceSpaceRRect);
+ break;
+ case DeviceSpaceType::kPath:
+ *path = *fDeviceSpacePath.get();
+ break;
+ }
+ path->setIsVolatile(true);
+}
+
+void SkClipStack::Element::setEmpty() {
+ fDeviceSpaceType = DeviceSpaceType::kEmpty;
+ fFiniteBound.setEmpty();
+ fFiniteBoundType = kNormal_BoundsType;
+ fIsIntersectionOfRects = false;
+ fDeviceSpaceRRect.setEmpty();
+ fDeviceSpacePath.reset();
+ fGenID = kEmptyGenID;
+ SkDEBUGCODE(this->checkEmpty();)
+}
+
+void SkClipStack::Element::checkEmpty() const {
+ SkASSERT(fFiniteBound.isEmpty());
+ SkASSERT(kNormal_BoundsType == fFiniteBoundType);
+ SkASSERT(!fIsIntersectionOfRects);
+ SkASSERT(kEmptyGenID == fGenID);
+ SkASSERT(fDeviceSpaceRRect.isEmpty());
+ SkASSERT(!fDeviceSpacePath.isValid());
+}
+
+bool SkClipStack::Element::canBeIntersectedInPlace(int saveCount, SkClipOp op) const {
+ if (DeviceSpaceType::kEmpty == fDeviceSpaceType &&
+ (kDifference_SkClipOp == op || kIntersect_SkClipOp == op)) {
+ return true;
+ }
+ // Only clips within the same save/restore frame (as captured by
+ // the save count) can be merged
+ return fSaveCount == saveCount &&
+ kIntersect_SkClipOp == op &&
+ (kIntersect_SkClipOp == fOp || kReplace_SkClipOp == fOp);
+}
+
+bool SkClipStack::Element::rectRectIntersectAllowed(const SkRect& newR, bool newAA) const {
+ SkASSERT(DeviceSpaceType::kRect == fDeviceSpaceType);
+
+ if (fDoAA == newAA) {
+ // if the AA setting is the same there is no issue
+ return true;
+ }
+
+ if (!SkRect::Intersects(this->getDeviceSpaceRect(), newR)) {
+ // The calling code will correctly set the result to the empty clip
+ return true;
+ }
+
+ if (this->getDeviceSpaceRect().contains(newR)) {
+ // if the new rect carves out a portion of the old one there is no
+ // issue
+ return true;
+ }
+
+ // So either the two overlap in some complex manner or newR contains oldR.
+ // In the first, case the edges will require different AA. In the second,
+ // the AA setting that would be carried forward is incorrect (e.g., oldR
+ // is AA while newR is BW but since newR contains oldR, oldR will be
+ // drawn BW) since the new AA setting will predominate.
+ return false;
+}
+
+// a mirror of combineBoundsRevDiff
+void SkClipStack::Element::combineBoundsDiff(FillCombo combination, const SkRect& prevFinite) {
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // In this case the only pixels that can remain set
+ // are inside the current clip rect since the extensions
+ // to infinity of both clips cancel out and whatever
+ // is outside of the current clip is removed
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the current op is finite so the only pixels
+ // that aren't set are whatever isn't set in the previous
+ // clip and whatever this clip carves out
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case everything outside of this clip's bound
+ // is erased, so the only pixels that can remain set
+ // occur w/in the intersection of the two finite bounds
+ if (!fFiniteBound.intersect(prevFinite)) {
+ fFiniteBound.setEmpty();
+ fGenID = kEmptyGenID;
+ }
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ // The most conservative result bound is that of the
+ // prior clip. This could be wildly incorrect if the
+ // second clip either exactly matches the first clip
+ // (which should yield the empty set) or reduces the
+ // size of the prior bound (e.g., if the second clip
+ // exactly matched the bottom half of the prior clip).
+ // We ignore these two possibilities.
+ fFiniteBound = prevFinite;
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsDiff Invalid fill combination");
+ break;
+ }
+}
+
+void SkClipStack::Element::combineBoundsXOR(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_Cur_FillCombo: // fall through
+ case kPrev_InvCur_FillCombo:
+ // With only one of the clips inverted the result will always
+ // extend to infinity. The only pixels that may be un-writeable
+ // lie within the union of the two finite bounds
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that can survive are within the
+ // union of the two bounding boxes since the extensions
+ // to infinity of both clips cancel out
+ // fall through!
+ case kPrev_Cur_FillCombo:
+ // The most conservative bound for xor is the
+ // union of the two bounds. If the two clips exactly overlapped
+ // the xor could yield the empty set. Similarly the xor
+ // could reduce the size of the original clip's bound (e.g.,
+ // if the second clip exactly matched the bottom half of the
+ // first clip). We ignore these two cases.
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsXOR Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsIntersection
+void SkClipStack::Element::combineBoundsUnion(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ fFiniteBound.setEmpty();
+ fGenID = kWideOpenGenID;
+ }
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // The only pixels that won't be drawable are inside
+ // the prior clip's finite bound
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_InvCur_FillCombo:
+ // The only pixels that won't be drawable are inside
+ // this clip's finite bound
+ break;
+ case kPrev_Cur_FillCombo:
+ fFiniteBound.join(prevFinite);
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsUnion Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsUnion
+void SkClipStack::Element::combineBoundsIntersection(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that aren't writable in this case
+ // occur in the union of the two finite bounds
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are within the current clip
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are with the previous clip
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ this->setEmpty();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsIntersection Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsDiff
+void SkClipStack::Element::combineBoundsRevDiff(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that can survive are in the
+ // previous bound since the extensions to infinity in
+ // both clips cancel out
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ this->setEmpty();
+ } else {
+ fFiniteBoundType = kNormal_BoundsType;
+ }
+ break;
+ case kPrev_InvCur_FillCombo:
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ // Fall through - as with the kDifference_Op case, the
+ // most conservative result bound is the bound of the
+ // current clip. The prior clip could reduce the size of this
+ // bound (as in the kDifference_Op case) but we are ignoring
+ // those cases.
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsRevDiff Invalid fill combination");
+ break;
+ }
+}
+
+void SkClipStack::Element::updateBoundAndGenID(const Element* prior) {
+ // We set this first here but we may overwrite it later if we determine that the clip is
+ // either wide-open or empty.
+ fGenID = GetNextGenID();
+
+ // First, optimistically update the current Element's bound information
+ // with the current clip's bound
+ fIsIntersectionOfRects = false;
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ fFiniteBound = this->getDeviceSpaceRect();
+ fFiniteBoundType = kNormal_BoundsType;
+
+ if (kReplace_SkClipOp == fOp || (kIntersect_SkClipOp == fOp && nullptr == prior) ||
+ (kIntersect_SkClipOp == fOp && prior->fIsIntersectionOfRects &&
+ prior->rectRectIntersectAllowed(this->getDeviceSpaceRect(), fDoAA))) {
+ fIsIntersectionOfRects = true;
+ }
+ break;
+ case DeviceSpaceType::kRRect:
+ fFiniteBound = fDeviceSpaceRRect.getBounds();
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case DeviceSpaceType::kPath:
+ fFiniteBound = fDeviceSpacePath.get()->getBounds();
+
+ if (fDeviceSpacePath.get()->isInverseFillType()) {
+ fFiniteBoundType = kInsideOut_BoundsType;
+ } else {
+ fFiniteBoundType = kNormal_BoundsType;
+ }
+ break;
+ case DeviceSpaceType::kEmpty:
+ SkDEBUGFAIL("We shouldn't get here with an empty element.");
+ break;
+ }
+
+ // Now determine the previous Element's bound information taking into
+ // account that there may be no previous clip
+ SkRect prevFinite;
+ SkClipStack::BoundsType prevType;
+
+ if (nullptr == prior) {
+ // no prior clip means the entire plane is writable
+ prevFinite.setEmpty(); // there are no pixels that cannot be drawn to
+ prevType = kInsideOut_BoundsType;
+ } else {
+ prevFinite = prior->fFiniteBound;
+ prevType = prior->fFiniteBoundType;
+ }
+
+ FillCombo combination = kPrev_Cur_FillCombo;
+ if (kInsideOut_BoundsType == fFiniteBoundType) {
+ combination = (FillCombo) (combination | 0x01);
+ }
+ if (kInsideOut_BoundsType == prevType) {
+ combination = (FillCombo) (combination | 0x02);
+ }
+
+ SkASSERT(kInvPrev_InvCur_FillCombo == combination ||
+ kInvPrev_Cur_FillCombo == combination ||
+ kPrev_InvCur_FillCombo == combination ||
+ kPrev_Cur_FillCombo == combination);
+
+ // Now integrate with clip with the prior clips
+ switch (fOp) {
+ case kDifference_SkClipOp:
+ this->combineBoundsDiff(combination, prevFinite);
+ break;
+ case kXOR_SkClipOp:
+ this->combineBoundsXOR(combination, prevFinite);
+ break;
+ case kUnion_SkClipOp:
+ this->combineBoundsUnion(combination, prevFinite);
+ break;
+ case kIntersect_SkClipOp:
+ this->combineBoundsIntersection(combination, prevFinite);
+ break;
+ case kReverseDifference_SkClipOp:
+ this->combineBoundsRevDiff(combination, prevFinite);
+ break;
+ case kReplace_SkClipOp:
+ // Replace just ignores everything prior
+ // The current clip's bound information is already filled in
+ // so nothing to do
+ break;
+ default:
+ SkDebugf("SkClipOp error\n");
+ SkASSERT(0);
+ break;
+ }
+}
+
+// This constant determines how many Element's are allocated together as a block in
+// the deque. As such it needs to balance allocating too much memory vs.
+// incurring allocation/deallocation thrashing. It should roughly correspond to
+// the deepest save/restore stack we expect to see.
+static const int kDefaultElementAllocCnt = 8;
+
+SkClipStack::SkClipStack()
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt)
+ , fSaveCount(0) {
+}
+
+SkClipStack::SkClipStack(void* storage, size_t size)
+ : fDeque(sizeof(Element), storage, size, kDefaultElementAllocCnt)
+ , fSaveCount(0) {
+}
+
+SkClipStack::SkClipStack(const SkClipStack& b)
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt) {
+ *this = b;
+}
+
+SkClipStack::~SkClipStack() {
+ reset();
+}
+
+SkClipStack& SkClipStack::operator=(const SkClipStack& b) {
+ if (this == &b) {
+ return *this;
+ }
+ reset();
+
+ fSaveCount = b.fSaveCount;
+ SkDeque::F2BIter recIter(b.fDeque);
+ for (const Element* element = (const Element*)recIter.next();
+ element != nullptr;
+ element = (const Element*)recIter.next()) {
+ new (fDeque.push_back()) Element(*element);
+ }
+
+ return *this;
+}
+
+bool SkClipStack::operator==(const SkClipStack& b) const {
+ if (this->getTopmostGenID() == b.getTopmostGenID()) {
+ return true;
+ }
+ if (fSaveCount != b.fSaveCount ||
+ fDeque.count() != b.fDeque.count()) {
+ return false;
+ }
+ SkDeque::F2BIter myIter(fDeque);
+ SkDeque::F2BIter bIter(b.fDeque);
+ const Element* myElement = (const Element*)myIter.next();
+ const Element* bElement = (const Element*)bIter.next();
+
+ while (myElement != nullptr && bElement != nullptr) {
+ if (*myElement != *bElement) {
+ return false;
+ }
+ myElement = (const Element*)myIter.next();
+ bElement = (const Element*)bIter.next();
+ }
+ return myElement == nullptr && bElement == nullptr;
+}
+
+void SkClipStack::reset() {
+ // We used a placement new for each object in fDeque, so we're responsible
+ // for calling the destructor on each of them as well.
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ element->~Element();
+ fDeque.pop_back();
+ }
+
+ fSaveCount = 0;
+}
+
+void SkClipStack::save() {
+ fSaveCount += 1;
+}
+
+void SkClipStack::restore() {
+ fSaveCount -= 1;
+ restoreTo(fSaveCount);
+}
+
+void SkClipStack::restoreTo(int saveCount) {
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ if (element->fSaveCount <= saveCount) {
+ break;
+ }
+ element->~Element();
+ fDeque.pop_back();
+ }
+}
+
+SkRect SkClipStack::bounds(const SkIRect& deviceBounds) const {
+ // TODO: optimize this.
+ SkRect r;
+ SkClipStack::BoundsType bounds;
+ this->getBounds(&r, &bounds);
+ if (bounds == SkClipStack::kInsideOut_BoundsType) {
+ return SkRect::Make(deviceBounds);
+ }
+ return r.intersect(SkRect::Make(deviceBounds)) ? r : SkRect::MakeEmpty();
+}
+
+// TODO: optimize this.
+bool SkClipStack::isEmpty(const SkIRect& r) const { return this->bounds(r).isEmpty(); }
+
+void SkClipStack::getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(canvFiniteBound && boundType);
+
+ Element* element = (Element*)fDeque.back();
+
+ if (nullptr == element) {
+ // the clip is wide open - the infinite plane w/ no pixels un-writeable
+ canvFiniteBound->setEmpty();
+ *boundType = kInsideOut_BoundsType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = false;
+ }
+ return;
+ }
+
+ *canvFiniteBound = element->fFiniteBound;
+ *boundType = element->fFiniteBoundType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = element->fIsIntersectionOfRects;
+ }
+}
+
+bool SkClipStack::internalQuickContains(const SkRect& rect) const {
+
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ if (kIntersect_SkClipOp != element->getOp() && kReplace_SkClipOp != element->getOp())
+ return false;
+ if (element->isInverseFilled()) {
+ // Part of 'rect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rect)) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rect)) {
+ return false;
+ }
+ }
+ if (kReplace_SkClipOp == element->getOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+bool SkClipStack::internalQuickContains(const SkRRect& rrect) const {
+
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ if (kIntersect_SkClipOp != element->getOp() && kReplace_SkClipOp != element->getOp())
+ return false;
+ if (element->isInverseFilled()) {
+ // Part of 'rrect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rrect.getBounds())) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rrect)) {
+ return false;
+ }
+ }
+ if (kReplace_SkClipOp == element->getOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+bool SkClipStack::asPath(SkPath *path) const {
+ bool isAA = false;
+
+ path->reset();
+ path->setFillType(SkPath::kInverseEvenOdd_FillType);
+
+ SkClipStack::Iter iter(*this, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkPath operand;
+ if (element->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kEmpty) {
+ element->asDeviceSpacePath(&operand);
+ }
+
+ SkClipOp elementOp = element->getOp();
+ if (elementOp == kReplace_SkClipOp) {
+ *path = operand;
+ } else {
+ Op(*path, operand, (SkPathOp)elementOp, path);
+ }
+
+ // if the prev and curr clips disagree about aa -vs- not, favor the aa request.
+ // perhaps we need an API change to avoid this sort of mixed-signals about
+ // clipping.
+ isAA = (isAA || element->isAA());
+ }
+
+ return isAA;
+}
+
+void SkClipStack::pushElement(const Element& element) {
+ // Use reverse iterator instead of back because Rect path may need previous
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ Element* prior = (Element*) iter.prev();
+
+ if (prior) {
+ if (prior->canBeIntersectedInPlace(fSaveCount, element.getOp())) {
+ switch (prior->fDeviceSpaceType) {
+ case Element::DeviceSpaceType::kEmpty:
+ SkDEBUGCODE(prior->checkEmpty();)
+ return;
+ case Element::DeviceSpaceType::kRect:
+ if (Element::DeviceSpaceType::kRect == element.getDeviceSpaceType()) {
+ if (prior->rectRectIntersectAllowed(element.getDeviceSpaceRect(),
+ element.isAA())) {
+ SkRect isectRect;
+ if (!isectRect.intersect(prior->getDeviceSpaceRect(),
+ element.getDeviceSpaceRect())) {
+ prior->setEmpty();
+ return;
+ }
+
+ prior->fDeviceSpaceRRect.setRect(isectRect);
+ prior->fDoAA = element.isAA();
+ Element* priorPrior = (Element*) iter.prev();
+ prior->updateBoundAndGenID(priorPrior);
+ return;
+ }
+ break;
+ }
+ // fallthrough
+ default:
+ if (!SkRect::Intersects(prior->getBounds(), element.getBounds())) {
+ prior->setEmpty();
+ return;
+ }
+ break;
+ }
+ } else if (kReplace_SkClipOp == element.getOp()) {
+ this->restoreTo(fSaveCount - 1);
+ prior = (Element*) fDeque.back();
+ }
+ }
+ Element* newElement = new (fDeque.push_back()) Element(element);
+ newElement->updateBoundAndGenID(prior);
+}
+
+void SkClipStack::clipRRect(const SkRRect& rrect, const SkMatrix& matrix, SkClipOp op,
+ bool doAA) {
+ Element element(fSaveCount, rrect, matrix, op, doAA);
+ this->pushElement(element);
+ if (this->hasClipRestriction(op)) {
+ Element restriction(fSaveCount, fClipRestrictionRect, SkMatrix::I(), kIntersect_SkClipOp,
+ false);
+ this->pushElement(restriction);
+ }
+}
+
+void SkClipStack::clipRect(const SkRect& rect, const SkMatrix& matrix, SkClipOp op,
+ bool doAA) {
+ Element element(fSaveCount, rect, matrix, op, doAA);
+ this->pushElement(element);
+ if (this->hasClipRestriction(op)) {
+ Element restriction(fSaveCount, fClipRestrictionRect, SkMatrix::I(), kIntersect_SkClipOp,
+ false);
+ this->pushElement(restriction);
+ }
+}
+
+void SkClipStack::clipPath(const SkPath& path, const SkMatrix& matrix, SkClipOp op,
+ bool doAA) {
+ Element element(fSaveCount, path, matrix, op, doAA);
+ this->pushElement(element);
+ if (this->hasClipRestriction(op)) {
+ Element restriction(fSaveCount, fClipRestrictionRect, SkMatrix::I(), kIntersect_SkClipOp,
+ false);
+ this->pushElement(restriction);
+ }
+}
+
+void SkClipStack::clipEmpty() {
+ Element* element = (Element*) fDeque.back();
+
+ if (element && element->canBeIntersectedInPlace(fSaveCount, kIntersect_SkClipOp)) {
+ element->setEmpty();
+ }
+ new (fDeque.push_back()) Element(fSaveCount);
+
+ ((Element*)fDeque.back())->fGenID = kEmptyGenID;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkClipStack::Iter::Iter() : fStack(nullptr) {
+}
+
+SkClipStack::Iter::Iter(const SkClipStack& stack, IterStart startLoc)
+ : fStack(&stack) {
+ this->reset(stack, startLoc);
+}
+
+const SkClipStack::Element* SkClipStack::Iter::next() {
+ return (const SkClipStack::Element*)fIter.next();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::prev() {
+ return (const SkClipStack::Element*)fIter.prev();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::skipToTopmost(SkClipOp op) {
+
+ if (nullptr == fStack) {
+ return nullptr;
+ }
+
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kBack_IterStart);
+
+ const SkClipStack::Element* element = nullptr;
+
+ for (element = (const SkClipStack::Element*) fIter.prev();
+ element;
+ element = (const SkClipStack::Element*) fIter.prev()) {
+
+ if (op == element->fOp) {
+ // The Deque's iterator is actually one pace ahead of the
+ // returned value. So while "element" is the element we want to
+ // return, the iterator is actually pointing at (and will
+ // return on the next "next" or "prev" call) the element
+ // in front of it in the deque. Bump the iterator forward a
+ // step so we get the expected result.
+ if (nullptr == fIter.next()) {
+ // The reverse iterator has run off the front of the deque
+ // (i.e., the "op" clip is the first clip) and can't
+ // recover. Reset the iterator to start at the front.
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+ break;
+ }
+ }
+
+ if (nullptr == element) {
+ // There were no "op" clips
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+
+ return this->next();
+}
+
+void SkClipStack::Iter::reset(const SkClipStack& stack, IterStart startLoc) {
+ fStack = &stack;
+ fIter.reset(stack.fDeque, static_cast<SkDeque::Iter::IterStart>(startLoc));
+}
+
+// helper method
+void SkClipStack::getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(devBounds);
+
+ devBounds->setLTRB(0, 0,
+ SkIntToScalar(maxWidth), SkIntToScalar(maxHeight));
+
+ SkRect temp;
+ SkClipStack::BoundsType boundType;
+
+ // temp starts off in canvas space here
+ this->getBounds(&temp, &boundType, isIntersectionOfRects);
+ if (SkClipStack::kInsideOut_BoundsType == boundType) {
+ return;
+ }
+
+ // but is converted to device space here
+ temp.offset(SkIntToScalar(offsetX), SkIntToScalar(offsetY));
+
+ if (!devBounds->intersect(temp)) {
+ devBounds->setEmpty();
+ }
+}
+
+bool SkClipStack::isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const {
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (!back) {
+ // TODO: return bounds?
+ return false;
+ }
+ // First check if the entire stack is known to be a rect by the top element.
+ if (back->fIsIntersectionOfRects && back->fFiniteBoundType == BoundsType::kNormal_BoundsType) {
+ rrect->setRect(back->fFiniteBound);
+ *aa = back->isAA();
+ return true;
+ }
+
+ if (back->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kRect &&
+ back->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kRRect) {
+ return false;
+ }
+ if (back->getOp() == kReplace_SkClipOp) {
+ *rrect = back->asDeviceSpaceRRect();
+ *aa = back->isAA();
+ return true;
+ }
+
+ if (back->getOp() == kIntersect_SkClipOp) {
+ SkRect backBounds;
+ if (!backBounds.intersect(bounds, back->asDeviceSpaceRRect().rect())) {
+ return false;
+ }
+ // We limit to 17 elements. This means the back element will be bounds checked at most 16
+ // times if it is an rrect.
+ int cnt = fDeque.count();
+ if (cnt > 17) {
+ return false;
+ }
+ if (cnt > 1) {
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ SkAssertResult(static_cast<const Element*>(iter.prev()) == back);
+ while (const Element* prior = (const Element*)iter.prev()) {
+ if ((prior->getOp() != kIntersect_SkClipOp &&
+ prior->getOp() != kReplace_SkClipOp) ||
+ !prior->contains(backBounds)) {
+ return false;
+ }
+ if (prior->getOp() == kReplace_SkClipOp) {
+ break;
+ }
+ }
+ }
+ *rrect = back->asDeviceSpaceRRect();
+ *aa = back->isAA();
+ return true;
+ }
+ return false;
+}
+
+uint32_t SkClipStack::GetNextGenID() {
+ // 0-2 are reserved for invalid, empty & wide-open
+ static const uint32_t kFirstUnreservedGenID = 3;
+ static std::atomic<uint32_t> nextID{kFirstUnreservedGenID};
+
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id < kFirstUnreservedGenID);
+ return id;
+}
+
+uint32_t SkClipStack::getTopmostGenID() const {
+ if (fDeque.empty()) {
+ return kWideOpenGenID;
+ }
+
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (kInsideOut_BoundsType == back->fFiniteBoundType && back->fFiniteBound.isEmpty()) {
+ return kWideOpenGenID;
+ }
+
+ return back->getGenID();
+}
+
+#ifdef SK_DEBUG
+void SkClipStack::Element::dump() const {
+ static const char* kTypeStrings[] = {
+ "empty",
+ "rect",
+ "rrect",
+ "path"
+ };
+ static_assert(0 == static_cast<int>(DeviceSpaceType::kEmpty), "enum mismatch");
+ static_assert(1 == static_cast<int>(DeviceSpaceType::kRect), "enum mismatch");
+ static_assert(2 == static_cast<int>(DeviceSpaceType::kRRect), "enum mismatch");
+ static_assert(3 == static_cast<int>(DeviceSpaceType::kPath), "enum mismatch");
+ static_assert(SK_ARRAY_COUNT(kTypeStrings) == kTypeCnt, "enum mismatch");
+
+ static const char* kOpStrings[] = {
+ "difference",
+ "intersect",
+ "union",
+ "xor",
+ "reverse-difference",
+ "replace",
+ };
+ static_assert(0 == static_cast<int>(kDifference_SkClipOp), "enum mismatch");
+ static_assert(1 == static_cast<int>(kIntersect_SkClipOp), "enum mismatch");
+ static_assert(2 == static_cast<int>(kUnion_SkClipOp), "enum mismatch");
+ static_assert(3 == static_cast<int>(kXOR_SkClipOp), "enum mismatch");
+ static_assert(4 == static_cast<int>(kReverseDifference_SkClipOp), "enum mismatch");
+ static_assert(5 == static_cast<int>(kReplace_SkClipOp), "enum mismatch");
+ static_assert(SK_ARRAY_COUNT(kOpStrings) == SkRegion::kOpCnt, "enum mismatch");
+
+ SkDebugf("Type: %s, Op: %s, AA: %s, Save Count: %d\n", kTypeStrings[(int)fDeviceSpaceType],
+ kOpStrings[static_cast<int>(fOp)], (fDoAA ? "yes" : "no"), fSaveCount);
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kEmpty:
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kRect:
+ this->getDeviceSpaceRect().dump();
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kRRect:
+ this->getDeviceSpaceRRect().dump();
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kPath:
+ this->getDeviceSpacePath().dump(nullptr, true, false);
+ break;
+ }
+}
+
+void SkClipStack::dump() const {
+ B2TIter iter(*this);
+ const Element* e;
+ while ((e = iter.next())) {
+ e->dump();
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipStack.h b/gfx/skia/skia/src/core/SkClipStack.h
new file mode 100644
index 0000000000..5f502ee336
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStack.h
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipStack_DEFINED
+#define SkClipStack_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDeque.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkTLazy.h"
+
+#if SK_SUPPORT_GPU
+class GrProxyProvider;
+
+#include "include/private/GrResourceKey.h"
+#endif
+
+// Because a single save/restore state can have multiple clips, this class
+// stores the stack depth (fSaveCount) and clips (fDeque) separately.
+// Each clip in fDeque stores the stack state to which it belongs
+// (i.e., the fSaveCount in force when it was added). Restores are thus
+// implemented by removing clips from fDeque that have an fSaveCount larger
+// then the freshly decremented count.
+class SkClipStack {
+public:
+ enum BoundsType {
+ // The bounding box contains all the pixels that can be written to
+ kNormal_BoundsType,
+ // The bounding box contains all the pixels that cannot be written to.
+ // The real bound extends out to infinity and all the pixels outside
+ // of the bound can be written to. Note that some of the pixels inside
+ // the bound may also be writeable but all pixels that cannot be
+ // written to are guaranteed to be inside.
+ kInsideOut_BoundsType
+ };
+
+ /**
+ * An element of the clip stack. It represents a shape combined with the prevoius clip using a
+ * set operator. Each element can be antialiased or not.
+ */
+ class Element {
+ public:
+ /** This indicates the shape type of the clip element in device space. */
+ enum class DeviceSpaceType {
+ //!< This element makes the clip empty (regardless of previous elements).
+ kEmpty,
+ //!< This element combines a device space rect with the current clip.
+ kRect,
+ //!< This element combines a device space round-rect with the current clip.
+ kRRect,
+ //!< This element combines a device space path with the current clip.
+ kPath,
+
+ kLastType = kPath
+ };
+ static const int kTypeCnt = (int)DeviceSpaceType::kLastType + 1;
+
+ Element() {
+ this->initCommon(0, kReplace_SkClipOp, false);
+ this->setEmpty();
+ }
+
+ Element(const Element&);
+
+ Element(const SkRect& rect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRect(0, rect, m, op, doAA);
+ }
+
+ Element(const SkRRect& rrect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRRect(0, rrect, m, op, doAA);
+ }
+
+ Element(const SkPath& path, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initPath(0, path, m, op, doAA);
+ }
+
+ ~Element();
+
+ bool operator== (const Element& element) const;
+ bool operator!= (const Element& element) const { return !(*this == element); }
+
+ //!< Call to get the type of the clip element.
+ DeviceSpaceType getDeviceSpaceType() const { return fDeviceSpaceType; }
+
+ //!< Call to get the save count associated with this clip element.
+ int getSaveCount() const { return fSaveCount; }
+
+ //!< Call if getDeviceSpaceType() is kPath to get the path.
+ const SkPath& getDeviceSpacePath() const {
+ SkASSERT(DeviceSpaceType::kPath == fDeviceSpaceType);
+ return *fDeviceSpacePath.get();
+ }
+
+ //!< Call if getDeviceSpaceType() is kRRect to get the round-rect.
+ const SkRRect& getDeviceSpaceRRect() const {
+ SkASSERT(DeviceSpaceType::kRRect == fDeviceSpaceType);
+ return fDeviceSpaceRRect;
+ }
+
+ //!< Call if getDeviceSpaceType() is kRect to get the rect.
+ const SkRect& getDeviceSpaceRect() const {
+ SkASSERT(DeviceSpaceType::kRect == fDeviceSpaceType &&
+ (fDeviceSpaceRRect.isRect() || fDeviceSpaceRRect.isEmpty()));
+ return fDeviceSpaceRRect.getBounds();
+ }
+
+ //!< Call if getDeviceSpaceType() is not kEmpty to get the set operation used to combine
+ //!< this element.
+ SkClipOp getOp() const { return fOp; }
+
+ //!< Call to get the element as a path, regardless of its type.
+ void asDeviceSpacePath(SkPath* path) const;
+
+ //!< Call if getType() is not kPath to get the element as a round rect.
+ const SkRRect& asDeviceSpaceRRect() const {
+ SkASSERT(DeviceSpaceType::kPath != fDeviceSpaceType);
+ return fDeviceSpaceRRect;
+ }
+
+ /** If getType() is not kEmpty this indicates whether the clip shape should be anti-aliased
+ when it is rasterized. */
+ bool isAA() const { return fDoAA; }
+
+ //!< Inverts the fill of the clip shape. Note that a kEmpty element remains kEmpty.
+ void invertShapeFillType();
+
+ //!< Sets the set operation represented by the element.
+ void setOp(SkClipOp op) { fOp = op; }
+
+ /** The GenID can be used by clip stack clients to cache representations of the clip. The
+ ID corresponds to the set of clip elements up to and including this element within the
+ stack not to the element itself. That is the same clip path in different stacks will
+ have a different ID since the elements produce different clip result in the context of
+ their stacks. */
+ uint32_t getGenID() const { SkASSERT(kInvalidGenID != fGenID); return fGenID; }
+
+ /**
+ * Gets the bounds of the clip element, either the rect or path bounds. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ const SkRect& getBounds() const;
+
+ /**
+ * Conservatively checks whether the clip shape contains the rect/rrect. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ bool contains(const SkRect& rect) const;
+ bool contains(const SkRRect& rrect) const;
+
+ /**
+ * Is the clip shape inverse filled.
+ */
+ bool isInverseFilled() const {
+ return DeviceSpaceType::kPath == fDeviceSpaceType &&
+ fDeviceSpacePath.get()->isInverseFillType();
+ }
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the element to SkDebugf. This is intended for Skia development debugging
+ * Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+#if SK_SUPPORT_GPU
+ /**
+ * This is used to purge any GPU resource cache items that become unreachable when
+ * the element is destroyed because their key is based on this element's gen ID.
+ */
+ void addResourceInvalidationMessage(GrProxyProvider* proxyProvider,
+ const GrUniqueKey& key) const {
+ SkASSERT(proxyProvider);
+
+ if (!fProxyProvider) {
+ fProxyProvider = proxyProvider;
+ }
+ SkASSERT(fProxyProvider == proxyProvider);
+
+ fKeysToInvalidate.push_back(key);
+ }
+#endif
+
+ private:
+ friend class SkClipStack;
+
+ SkTLazy<SkPath> fDeviceSpacePath;
+ SkRRect fDeviceSpaceRRect;
+ int fSaveCount; // save count of stack when this element was added.
+ SkClipOp fOp;
+ DeviceSpaceType fDeviceSpaceType;
+ bool fDoAA;
+
+ /* fFiniteBoundType and fFiniteBound are used to incrementally update the clip stack's
+ bound. When fFiniteBoundType is kNormal_BoundsType, fFiniteBound represents the
+ conservative bounding box of the pixels that aren't clipped (i.e., any pixels that can be
+ drawn to are inside the bound). When fFiniteBoundType is kInsideOut_BoundsType (which
+ occurs when a clip is inverse filled), fFiniteBound represents the conservative bounding
+ box of the pixels that _are_ clipped (i.e., any pixels that cannot be drawn to are inside
+ the bound). When fFiniteBoundType is kInsideOut_BoundsType the actual bound is the
+ infinite plane. This behavior of fFiniteBoundType and fFiniteBound is required so that we
+ can capture the cancelling out of the extensions to infinity when two inverse filled
+ clips are Booleaned together. */
+ SkClipStack::BoundsType fFiniteBoundType;
+ SkRect fFiniteBound;
+
+ // When element is applied to the previous elements in the stack is the result known to be
+ // equivalent to a single rect intersection? IIOW, is the clip effectively a rectangle.
+ bool fIsIntersectionOfRects;
+
+ uint32_t fGenID;
+#if SK_SUPPORT_GPU
+ mutable GrProxyProvider* fProxyProvider = nullptr;
+ mutable SkTArray<GrUniqueKey> fKeysToInvalidate;
+#endif
+ Element(int saveCount) {
+ this->initCommon(saveCount, kReplace_SkClipOp, false);
+ this->setEmpty();
+ }
+
+ Element(int saveCount, const SkRRect& rrect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRRect(saveCount, rrect, m, op, doAA);
+ }
+
+ Element(int saveCount, const SkRect& rect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRect(saveCount, rect, m, op, doAA);
+ }
+
+ Element(int saveCount, const SkPath& path, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initPath(saveCount, path, m, op, doAA);
+ }
+
+ void initCommon(int saveCount, SkClipOp op, bool doAA);
+ void initRect(int saveCount, const SkRect&, const SkMatrix&, SkClipOp, bool doAA);
+ void initRRect(int saveCount, const SkRRect&, const SkMatrix&, SkClipOp, bool doAA);
+ void initPath(int saveCount, const SkPath&, const SkMatrix&, SkClipOp, bool doAA);
+ void initAsPath(int saveCount, const SkPath&, const SkMatrix&, SkClipOp, bool doAA);
+
+ void setEmpty();
+
+ // All Element methods below are only used within SkClipStack.cpp
+ inline void checkEmpty() const;
+ inline bool canBeIntersectedInPlace(int saveCount, SkClipOp op) const;
+ /* This method checks to see if two rect clips can be safely merged into one. The issue here
+ is that to be strictly correct all the edges of the resulting rect must have the same
+ anti-aliasing. */
+ bool rectRectIntersectAllowed(const SkRect& newR, bool newAA) const;
+ /** Determines possible finite bounds for the Element given the previous element of the
+ stack */
+ void updateBoundAndGenID(const Element* prior);
+ // The different combination of fill & inverse fill when combining bounding boxes
+ enum FillCombo {
+ kPrev_Cur_FillCombo,
+ kPrev_InvCur_FillCombo,
+ kInvPrev_Cur_FillCombo,
+ kInvPrev_InvCur_FillCombo
+ };
+ // per-set operation functions used by updateBoundAndGenID().
+ inline void combineBoundsDiff(FillCombo combination, const SkRect& prevFinite);
+ inline void combineBoundsXOR(int combination, const SkRect& prevFinite);
+ inline void combineBoundsUnion(int combination, const SkRect& prevFinite);
+ inline void combineBoundsIntersection(int combination, const SkRect& prevFinite);
+ inline void combineBoundsRevDiff(int combination, const SkRect& prevFinite);
+ };
+
+ SkClipStack();
+ SkClipStack(void* storage, size_t size);
+ SkClipStack(const SkClipStack& b);
+ ~SkClipStack();
+
+ SkClipStack& operator=(const SkClipStack& b);
+ bool operator==(const SkClipStack& b) const;
+ bool operator!=(const SkClipStack& b) const { return !(*this == b); }
+
+ void reset();
+
+ int getSaveCount() const { return fSaveCount; }
+ void save();
+ void restore();
+
+ class AutoRestore {
+ public:
+ AutoRestore(SkClipStack* cs, bool doSave)
+ : fCS(cs), fSaveCount(cs->getSaveCount())
+ {
+ if (doSave) {
+ fCS->save();
+ }
+ }
+ ~AutoRestore() {
+ SkASSERT(fCS->getSaveCount() >= fSaveCount); // no underflow
+ while (fCS->getSaveCount() > fSaveCount) {
+ fCS->restore();
+ }
+ }
+
+ private:
+ SkClipStack* fCS;
+ const int fSaveCount;
+ };
+
+ /**
+ * getBounds places the current finite bound in its first parameter. In its
+ * second, it indicates which kind of bound is being returned. If
+ * 'canvFiniteBound' is a normal bounding box then it encloses all writeable
+ * pixels. If 'canvFiniteBound' is an inside out bounding box then it
+ * encloses all the un-writeable pixels and the true/normal bound is the
+ * infinite plane. isIntersectionOfRects is an optional parameter
+ * that is true if 'canvFiniteBound' resulted from an intersection of rects.
+ */
+ void getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects = nullptr) const;
+
+ SkRect bounds(const SkIRect& deviceBounds) const;
+ bool isEmpty(const SkIRect& deviceBounds) const;
+
+ /**
+ * Returns true if the input (r)rect in device space is entirely contained
+ * by the clip. A return value of false does not guarantee that the (r)rect
+ * is not contained by the clip.
+ */
+ bool quickContains(const SkRect& devRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRect);
+ }
+
+ bool quickContains(const SkRRect& devRRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRRect);
+ }
+
+ /**
+ * Flattens the clip stack into a single SkPath. Returns true if any of
+ * the clip stack components requires anti-aliasing.
+ */
+ bool asPath(SkPath* path) const;
+
+ void clipDevRect(const SkIRect& ir, SkClipOp op) {
+ SkRect r;
+ r.set(ir);
+ this->clipRect(r, SkMatrix::I(), op, false);
+ }
+ void clipRect(const SkRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ void clipRRect(const SkRRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ void clipPath(const SkPath&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ // An optimized version of clipDevRect(emptyRect, kIntersect, ...)
+ void clipEmpty();
+ void setDeviceClipRestriction(const SkIRect& rect) {
+ fClipRestrictionRect = SkRect::Make(rect);
+ }
+
+ /**
+ * isWideOpen returns true if the clip state corresponds to the infinite
+ * plane (i.e., draws are not limited at all)
+ */
+ bool isWideOpen() const { return this->getTopmostGenID() == kWideOpenGenID; }
+
+ /**
+ * This method quickly and conservatively determines whether the entire stack is equivalent to
+ * intersection with a rrect given a bounds, where the rrect must not contain the entire bounds.
+ *
+ * @param bounds A bounds on what will be drawn through the clip. The clip only need be
+ * equivalent to a intersection with a rrect for draws within the bounds. The
+ * returned rrect must intersect the bounds but need not be contained by the
+ * bounds.
+ * @param rrect If return is true rrect will contain the rrect equivalent to the stack.
+ * @param aa If return is true aa will indicate whether the equivalent rrect clip is
+ * antialiased.
+ * @return true if the stack is equivalent to a single rrect intersect clip, false otherwise.
+ */
+ bool isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const;
+
+ /**
+ * The generation ID has three reserved values to indicate special
+ * (potentially ignorable) cases
+ */
+ static const uint32_t kInvalidGenID = 0; //!< Invalid id that is never returned by
+ //!< SkClipStack. Useful when caching clips
+ //!< based on GenID.
+ static const uint32_t kEmptyGenID = 1; // no pixels writeable
+ static const uint32_t kWideOpenGenID = 2; // all pixels writeable
+
+ uint32_t getTopmostGenID() const;
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the contents of the clip stack to SkDebugf. This is intended for Skia development
+ * debugging. Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kBottom_IterStart = SkDeque::Iter::kFront_IterStart,
+ kTop_IterStart = SkDeque::Iter::kBack_IterStart
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkClipStack& stack, IterStart startLoc);
+
+ /**
+ * Return the clip element for this iterator. If next()/prev() returns NULL, then the
+ * iterator is done.
+ */
+ const Element* next();
+ const Element* prev();
+
+ /**
+ * Moves the iterator to the topmost element with the specified RegionOp and returns that
+ * element. If no clip element with that op is found, the first element is returned.
+ */
+ const Element* skipToTopmost(SkClipOp op);
+
+ /**
+ * Restarts the iterator on a clip stack.
+ */
+ void reset(const SkClipStack& stack, IterStart startLoc);
+
+ private:
+ const SkClipStack* fStack;
+ SkDeque::Iter fIter;
+ };
+
+ /**
+ * The B2TIter iterates from the bottom of the stack to the top.
+ * It inherits privately from Iter to prevent access to reverse iteration.
+ */
+ class B2TIter : private Iter {
+ public:
+ B2TIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ B2TIter(const SkClipStack& stack)
+ : INHERITED(stack, kBottom_IterStart) {
+ }
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ void reset(const SkClipStack& stack) {
+ this->INHERITED::reset(stack, kBottom_IterStart);
+ }
+
+ private:
+
+ typedef Iter INHERITED;
+ };
+
+ /**
+ * GetConservativeBounds returns a conservative bound of the current clip.
+ * Since this could be the infinite plane (if inverse fills were involved) the
+ * maxWidth and maxHeight parameters can be used to limit the returned bound
+ * to the expected drawing area. Similarly, the offsetX and offsetY parameters
+ * allow the caller to offset the returned bound to account for translated
+ * drawing areas (i.e., those resulting from a saveLayer). For finite bounds,
+ * the translation (+offsetX, +offsetY) is applied before the clamp to the
+ * maximum rectangle: [0,maxWidth) x [0,maxHeight).
+ * isIntersectionOfRects is an optional parameter that is true when
+ * 'devBounds' is the result of an intersection of rects. In this case
+ * 'devBounds' is the exact answer/clip.
+ */
+ void getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects = nullptr) const;
+
+private:
+ friend class Iter;
+
+ SkDeque fDeque;
+ int fSaveCount;
+
+ SkRect fClipRestrictionRect = SkRect::MakeEmpty();
+
+ bool internalQuickContains(const SkRect& devRect) const;
+ bool internalQuickContains(const SkRRect& devRRect) const;
+
+ /**
+ * Helper for clipDevPath, etc.
+ */
+ void pushElement(const Element& element);
+
+ /**
+ * Restore the stack back to the specified save count.
+ */
+ void restoreTo(int saveCount);
+
+ inline bool hasClipRestriction(SkClipOp op) {
+ return op >= kUnion_SkClipOp && !fClipRestrictionRect.isEmpty();
+ }
+
+ /**
+ * Return the next unique generation ID.
+ */
+ static uint32_t GetNextGenID();
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkClipStackDevice.cpp b/gfx/skia/skia/src/core/SkClipStackDevice.cpp
new file mode 100644
index 0000000000..72bfd1bb6c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStackDevice.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkClipStackDevice.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterClip.h"
+
+SkIRect SkClipStackDevice::devClipBounds() const {
+ SkIRect r = fClipStack.bounds(this->imageInfo().bounds()).roundOut();
+ if (!r.isEmpty()) {
+ SkASSERT(this->imageInfo().bounds().contains(r));
+ }
+ return r;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkClipStackDevice::onSave() {
+ fClipStack.save();
+}
+
+void SkClipStackDevice::onRestore() {
+ fClipStack.restore();
+}
+
+void SkClipStackDevice::onClipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ fClipStack.clipRect(rect, this->ctm(), op, aa);
+}
+
+void SkClipStackDevice::onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ fClipStack.clipRRect(rrect, this->ctm(), op, aa);
+}
+
+void SkClipStackDevice::onClipPath(const SkPath& path, SkClipOp op, bool aa) {
+ fClipStack.clipPath(path, this->ctm(), op, aa);
+}
+
+void SkClipStackDevice::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ SkIPoint origin = this->getOrigin();
+ SkRegion tmp;
+ const SkRegion* ptr = &rgn;
+ if (origin.fX | origin.fY) {
+ // translate from "global/canvas" coordinates to relative to this device
+ rgn.translate(-origin.fX, -origin.fY, &tmp);
+ ptr = &tmp;
+ }
+ fClipStack.clipDevRect(ptr->getBounds(), op);
+}
+
+void SkClipStackDevice::onSetDeviceClipRestriction(SkIRect* clipRestriction) {
+ if (clipRestriction->isEmpty()) {
+ fClipStack.setDeviceClipRestriction(*clipRestriction);
+ } else {
+ SkIPoint origin = this->getOrigin();
+ SkIRect rect = clipRestriction->makeOffset(-origin);
+ fClipStack.setDeviceClipRestriction(rect);
+ fClipStack.clipDevRect(rect, SkClipOp::kIntersect);
+ }
+}
+
+bool SkClipStackDevice::onClipIsAA() const {
+ SkClipStack::B2TIter iter(fClipStack);
+ const SkClipStack::Element* element;
+
+ while ((element = iter.next()) != nullptr) {
+ if (element->isAA()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SkClipStackDevice::onAsRgnClip(SkRegion* rgn) const {
+ SkClipStack::BoundsType boundType;
+ bool isIntersectionOfRects;
+ SkRect bounds;
+ fClipStack.getBounds(&bounds, &boundType, &isIntersectionOfRects);
+ if (isIntersectionOfRects && SkClipStack::kNormal_BoundsType == boundType) {
+ rgn->setRect(bounds.round());
+ } else {
+ SkPath path;
+ fClipStack.asPath(&path);
+ rgn->setPath(path, SkRegion(SkIRect::MakeWH(this->width(), this->height())));
+ }
+}
+
+SkBaseDevice::ClipType SkClipStackDevice::onGetClipType() const {
+ if (fClipStack.isWideOpen()) {
+ return ClipType::kRect;
+ }
+ if (fClipStack.isEmpty(SkIRect::MakeWH(this->width(), this->height()))) {
+ return ClipType::kEmpty;
+ } else {
+ SkClipStack::BoundsType boundType;
+ bool isIntersectionOfRects;
+ SkRect bounds;
+ fClipStack.getBounds(&bounds, &boundType, &isIntersectionOfRects);
+ if (isIntersectionOfRects && SkClipStack::kNormal_BoundsType == boundType) {
+ return ClipType::kRect;
+ } else {
+ return ClipType::kComplex;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkClipStackDevice.h b/gfx/skia/skia/src/core/SkClipStackDevice.h
new file mode 100644
index 0000000000..64b7021f87
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStackDevice.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipStackDevice_DEFINED
+#define SkClipStackDevice_DEFINED
+
+#include "src/core/SkClipStack.h"
+#include "src/core/SkDevice.h"
+
+class SkClipStackDevice : public SkBaseDevice {
+public:
+ SkClipStackDevice(const SkImageInfo& info, const SkSurfaceProps& props)
+ : SkBaseDevice(info, props)
+ , fClipStack(fStorage, sizeof(fStorage))
+ {}
+
+ SkClipStack& cs() { return fClipStack; }
+ const SkClipStack& cs() const { return fClipStack; }
+
+ SkIRect devClipBounds() const;
+
+protected:
+ void onSave() override;
+ void onRestore() override;
+ void onClipRect(const SkRect& rect, SkClipOp, bool aa) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) override;
+ void onClipPath(const SkPath& path, SkClipOp, bool aa) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+ void onSetDeviceClipRestriction(SkIRect* mutableClipRestriction) override;
+ bool onClipIsAA() const override;
+ void onAsRgnClip(SkRegion*) const override;
+ ClipType onGetClipType() const override;
+
+private:
+ enum {
+ kPreallocCount = 16 // empirically determined, adjust as needed to reduce mallocs
+ };
+ intptr_t fStorage[kPreallocCount * sizeof(SkClipStack::Element) / sizeof(intptr_t)];
+ SkClipStack fClipStack;
+
+ typedef SkBaseDevice INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColor.cpp b/gfx/skia/skia/src/core/SkColor.cpp
new file mode 100644
index 0000000000..38f619bdb1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColor.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkFixed.h"
+
+SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return SkPremultiplyARGBInline(a, r, g, b);
+}
+
+SkPMColor SkPreMultiplyColor(SkColor c) {
+ return SkPremultiplyARGBInline(SkColorGetA(c), SkColorGetR(c),
+ SkColorGetG(c), SkColorGetB(c));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar ByteToScalar(U8CPU x) {
+ SkASSERT(x <= 255);
+ return SkIntToScalar(x) / 255;
+}
+
+static inline SkScalar ByteDivToScalar(int numer, U8CPU denom) {
+ // cast to keep the answer signed
+ return SkIntToScalar(numer) / (int)denom;
+}
+
+void SkRGBToHSV(U8CPU r, U8CPU g, U8CPU b, SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ unsigned min = SkMin32(r, SkMin32(g, b));
+ unsigned max = SkMax32(r, SkMax32(g, b));
+ unsigned delta = max - min;
+
+ SkScalar v = ByteToScalar(max);
+ SkASSERT(v >= 0 && v <= SK_Scalar1);
+
+ if (0 == delta) { // we're a shade of gray
+ hsv[0] = 0;
+ hsv[1] = 0;
+ hsv[2] = v;
+ return;
+ }
+
+ SkScalar s = ByteDivToScalar(delta, max);
+ SkASSERT(s >= 0 && s <= SK_Scalar1);
+
+ SkScalar h;
+ if (r == max) {
+ h = ByteDivToScalar(g - b, delta);
+ } else if (g == max) {
+ h = SkIntToScalar(2) + ByteDivToScalar(b - r, delta);
+ } else { // b == max
+ h = SkIntToScalar(4) + ByteDivToScalar(r - g, delta);
+ }
+
+ h *= 60;
+ if (h < 0) {
+ h += SkIntToScalar(360);
+ }
+ SkASSERT(h >= 0 && h < SkIntToScalar(360));
+
+ hsv[0] = h;
+ hsv[1] = s;
+ hsv[2] = v;
+}
+
+SkColor SkHSVToColor(U8CPU a, const SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ SkScalar s = SkScalarPin(hsv[1], 0, 1);
+ SkScalar v = SkScalarPin(hsv[2], 0, 1);
+
+ U8CPU v_byte = SkScalarRoundToInt(v * 255);
+
+ if (SkScalarNearlyZero(s)) { // shade of gray
+ return SkColorSetARGB(a, v_byte, v_byte, v_byte);
+ }
+ SkScalar hx = (hsv[0] < 0 || hsv[0] >= SkIntToScalar(360)) ? 0 : hsv[0]/60;
+ SkScalar w = SkScalarFloorToScalar(hx);
+ SkScalar f = hx - w;
+
+ unsigned p = SkScalarRoundToInt((SK_Scalar1 - s) * v * 255);
+ unsigned q = SkScalarRoundToInt((SK_Scalar1 - (s * f)) * v * 255);
+ unsigned t = SkScalarRoundToInt((SK_Scalar1 - (s * (SK_Scalar1 - f))) * v * 255);
+
+ unsigned r, g, b;
+
+ SkASSERT((unsigned)(w) < 6);
+ switch ((unsigned)(w)) {
+ case 0: r = v_byte; g = t; b = p; break;
+ case 1: r = q; g = v_byte; b = p; break;
+ case 2: r = p; g = v_byte; b = t; break;
+ case 3: r = p; g = q; b = v_byte; break;
+ case 4: r = t; g = p; b = v_byte; break;
+ default: r = v_byte; g = p; b = q; break;
+ }
+ return SkColorSetARGB(a, r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <>
+SkColor4f SkColor4f::FromColor(SkColor bgra) {
+ SkColor4f rgba;
+ Sk4f c4f = Sk4f_fromL32(bgra);
+#ifdef SK_CPU_BENDIAN
+ // ARGB -> RGBA
+ c4f = SkNx_shuffle<1, 2, 3, 0>(c4f);
+#else
+ // BGRA -> RGBA
+ c4f = swizzle_rb(c4f);
+#endif
+ c4f.store(rgba.vec());
+ return rgba;
+}
+
+template <>
+SkColor SkColor4f::toSkColor() const {
+ Sk4f c4f = Sk4f::Load(this->vec());
+#ifdef SK_CPU_BENDIAN
+ // RGBA -> ARGB
+ c4f = SkNx_shuffle<3, 0, 1, 2>(c4f);
+#else
+ // RGBA -> BGRA
+ c4f = swizzle_rb(c4f);
+#endif
+ return Sk4f_toL32(c4f);
+}
+
+template <>
+uint32_t SkColor4f::toBytes_RGBA() const {
+ return Sk4f_toL32(Sk4f::Load(this->vec()));
+}
+
+template <>
+SkColor4f SkColor4f::FromBytes_RGBA(uint32_t c) {
+ SkColor4f color;
+ Sk4f_fromL32(c).store(&color);
+ return color;
+}
+
+template <>
+SkPMColor4f SkPMColor4f::FromPMColor(SkPMColor c) {
+ SkPMColor4f color;
+ swizzle_rb_if_bgra(Sk4f_fromL32(c)).store(&color);
+ return color;
+}
+
+template <>
+uint32_t SkPMColor4f::toBytes_RGBA() const {
+ return Sk4f_toL32(Sk4f::Load(this->vec()));
+}
+
+template <>
+SkPMColor4f SkPMColor4f::FromBytes_RGBA(uint32_t c) {
+ SkPMColor4f color;
+ Sk4f_fromL32(c).store(&color);
+ return color;
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilter.cpp b/gfx/skia/skia/src/core/SkColorFilter.cpp
new file mode 100644
index 0000000000..babab34a5e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter.cpp
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrMixerEffect.h"
+#endif
+
+bool SkColorFilter::onAsAColorMode(SkColor*, SkBlendMode*) const {
+ return false;
+}
+
+bool SkColorFilter::onAsAColorMatrix(float matrix[20]) const {
+ return false;
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkColorFilter::asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const {
+ return nullptr;
+}
+#endif
+
+bool SkColorFilter::appendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ return this->onAppendStages(rec, shaderIsOpaque);
+}
+
+SkColor SkColorFilter::filterColor(SkColor c) const {
+ // This is mostly meaningless. We should phase-out this call entirely.
+ SkColorSpace* cs = nullptr;
+ return this->filterColor4f(SkColor4f::FromColor(c), cs, cs).toSkColor();
+}
+
+SkColor4f SkColorFilter::filterColor4f(const SkColor4f& origSrcColor, SkColorSpace* srcCS,
+ SkColorSpace* dstCS) const {
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_NO_SHADER
+ SkPMColor4f src = origSrcColor.premul();
+ SkColor4f color = *(SkColor4f*)&src;
+#else
+ SkColor4f color = origSrcColor;
+ SkColorSpaceXformSteps(srcCS, kUnpremul_SkAlphaType,
+ dstCS, kPremul_SkAlphaType).apply(color.vec());
+#endif
+
+ constexpr size_t kEnoughForCommonFilters = 512; // big enough for compose+colormatrix
+ SkSTArenaAlloc<kEnoughForCommonFilters> alloc;
+ SkRasterPipeline pipeline(&alloc);
+ pipeline.append_constant_color(&alloc, color.vec());
+ SkPaint dummyPaint;
+ SkStageRec rec = {
+ &pipeline, &alloc, kRGBA_F32_SkColorType, dstCS, dummyPaint, nullptr, SkMatrix::I()
+ };
+ this->onAppendStages(rec, color.fA == 1);
+
+ SkPMColor4f dst;
+ SkRasterPipeline_MemoryCtx dstPtr = { &dst, 0 };
+ pipeline.append(SkRasterPipeline::store_f32, &dstPtr);
+ pipeline.run(0,0, 1,1);
+ return dst.unpremul();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Since colorfilters may be used on the GPU backend, and in that case we may string together
+ * many GrFragmentProcessors, we might exceed some internal instruction/resource limit.
+ *
+ * Since we don't yet know *what* those limits might be when we construct the final shader,
+ * we just set an arbitrary limit during construction. If later we find smarter ways to know what
+ * the limnits are, we can change this constant (or remove it).
+ */
+#define SK_MAX_COMPOSE_COLORFILTER_COUNT 4
+
+class SkComposeColorFilter : public SkColorFilter {
+public:
+ uint32_t getFlags() const override {
+ // Can only claim alphaunchanged support if both our proxys do.
+ return fOuter->getFlags() & fInner->getFlags();
+ }
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ bool innerIsOpaque = shaderIsOpaque;
+ if (!(fInner->getFlags() & kAlphaUnchanged_Flag)) {
+ innerIsOpaque = false;
+ }
+ return fInner->appendStages(rec, shaderIsOpaque) &&
+ fOuter->appendStages(rec, innerIsOpaque);
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ GrRecordingContext* context, const GrColorInfo& dstColorInfo) const override {
+ auto innerFP = fInner->asFragmentProcessor(context, dstColorInfo);
+ auto outerFP = fOuter->asFragmentProcessor(context, dstColorInfo);
+ if (!innerFP || !outerFP) {
+ return nullptr;
+ }
+ std::unique_ptr<GrFragmentProcessor> series[] = { std::move(innerFP), std::move(outerFP) };
+ return GrFragmentProcessor::RunInSeries(series, 2);
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fOuter.get());
+ buffer.writeFlattenable(fInner.get());
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkComposeColorFilter)
+
+ SkComposeColorFilter(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner,
+ int composedFilterCount)
+ : fOuter(std::move(outer))
+ , fInner(std::move(inner))
+ , fComposedFilterCount(composedFilterCount)
+ {
+ SkASSERT(composedFilterCount >= 2);
+ SkASSERT(composedFilterCount <= SK_MAX_COMPOSE_COLORFILTER_COUNT);
+ }
+
+ int privateComposedFilterCount() const override {
+ return fComposedFilterCount;
+ }
+
+ sk_sp<SkColorFilter> fOuter;
+ sk_sp<SkColorFilter> fInner;
+ const int fComposedFilterCount;
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkComposeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorFilter> outer(buffer.readColorFilter());
+ sk_sp<SkColorFilter> inner(buffer.readColorFilter());
+ return outer ? outer->makeComposed(std::move(inner)) : inner;
+}
+
+
+sk_sp<SkColorFilter> SkColorFilter::makeComposed(sk_sp<SkColorFilter> inner) const {
+ if (!inner) {
+ return sk_ref_sp(this);
+ }
+
+ int count = inner->privateComposedFilterCount() + this->privateComposedFilterCount();
+ if (count > SK_MAX_COMPOSE_COLORFILTER_COUNT) {
+ return nullptr;
+ }
+ return sk_sp<SkColorFilter>(new SkComposeColorFilter(sk_ref_sp(this), std::move(inner), count));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/effects/GrSRGBEffect.h"
+#endif
+
+class SkSRGBGammaColorFilter : public SkColorFilter {
+public:
+ enum class Direction {
+ kLinearToSRGB,
+ kSRGBToLinear,
+ };
+ SkSRGBGammaColorFilter(Direction dir) : fDir(dir), fSteps([&]{
+ // We handle premul/unpremul separately, so here just always upm->upm.
+ if (dir == Direction::kLinearToSRGB) {
+ return SkColorSpaceXformSteps{sk_srgb_linear_singleton(), kUnpremul_SkAlphaType,
+ sk_srgb_singleton(), kUnpremul_SkAlphaType};
+ } else {
+ return SkColorSpaceXformSteps{sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ sk_srgb_linear_singleton(), kUnpremul_SkAlphaType};
+ }
+ }()) {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override {
+ // wish our caller would let us know if our input was opaque...
+ GrSRGBEffect::Alpha alpha = GrSRGBEffect::Alpha::kPremul;
+ switch (fDir) {
+ case Direction::kLinearToSRGB:
+ return GrSRGBEffect::Make(GrSRGBEffect::Mode::kLinearToSRGB, alpha);
+ case Direction::kSRGBToLinear:
+ return GrSRGBEffect::Make(GrSRGBEffect::Mode::kSRGBToLinear, alpha);
+ }
+ return nullptr;
+ }
+#endif
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ if (!shaderIsOpaque) {
+ rec.fPipeline->append(SkRasterPipeline::unpremul);
+ }
+
+ // TODO: is it valuable to thread this through appendStages()?
+ bool shaderIsNormalized = false;
+ fSteps.apply(rec.fPipeline, shaderIsNormalized);
+
+ if (!shaderIsOpaque) {
+ rec.fPipeline->append(SkRasterPipeline::premul);
+ }
+ return true;
+ }
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.write32(static_cast<uint32_t>(fDir));
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkSRGBGammaColorFilter)
+
+ const Direction fDir;
+ SkColorSpaceXformSteps fSteps;
+
+ friend class SkColorFilter;
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkSRGBGammaColorFilter::CreateProc(SkReadBuffer& buffer) {
+ uint32_t dir = buffer.read32();
+ if (!buffer.validate(dir <= 1)) {
+ return nullptr;
+ }
+ return sk_sp<SkFlattenable>(new SkSRGBGammaColorFilter(static_cast<Direction>(dir)));
+}
+
+template <SkSRGBGammaColorFilter::Direction dir>
+sk_sp<SkColorFilter> MakeSRGBGammaCF() {
+ static SkColorFilter* gSingleton = new SkSRGBGammaColorFilter(dir);
+ return sk_ref_sp(gSingleton);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::LinearToSRGBGamma() {
+ return MakeSRGBGammaCF<SkSRGBGammaColorFilter::Direction::kLinearToSRGB>();
+}
+
+sk_sp<SkColorFilter> SkColorFilters::SRGBToLinearGamma() {
+ return MakeSRGBGammaCF<SkSRGBGammaColorFilter::Direction::kSRGBToLinear>();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkMixerColorFilter : public SkColorFilter {
+public:
+ SkMixerColorFilter(sk_sp<SkColorFilter> cf0, sk_sp<SkColorFilter> cf1, float weight)
+ : fCF0(std::move(cf0)), fCF1(std::move(cf1)), fWeight(weight)
+ {
+ SkASSERT(fCF0);
+ SkASSERT(fWeight >= 0 && fWeight <= 1);
+ }
+
+ uint32_t getFlags() const override {
+ uint32_t f0 = fCF0->getFlags();
+ uint32_t f1 = fCF1 ? fCF1->getFlags() : ~0U;
+ return f0 & f1;
+ }
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ // want cf0 * (1 - w) + cf1 * w == lerp(w)
+ // which means
+ // dr,dg,db,da <-- cf0
+ // r,g,b,a <-- cf1
+ struct State {
+ float orig_rgba[4 * SkRasterPipeline_kMaxStride];
+ float filtered_rgba[4 * SkRasterPipeline_kMaxStride];
+ };
+ auto state = rec.fAlloc->make<State>();
+ SkRasterPipeline* p = rec.fPipeline;
+
+ p->append(SkRasterPipeline::store_src, state->orig_rgba);
+ if (!fCF1) {
+ fCF0->appendStages(rec, shaderIsOpaque);
+ p->append(SkRasterPipeline::move_src_dst);
+ p->append(SkRasterPipeline::load_src, state->orig_rgba);
+ } else {
+ fCF0->appendStages(rec, shaderIsOpaque);
+ p->append(SkRasterPipeline::store_src, state->filtered_rgba);
+ p->append(SkRasterPipeline::load_src, state->orig_rgba);
+ fCF1->appendStages(rec, shaderIsOpaque);
+ p->append(SkRasterPipeline::load_dst, state->filtered_rgba);
+ }
+ float* storage = rec.fAlloc->make<float>(fWeight);
+ p->append(SkRasterPipeline::lerp_1_float, storage);
+ return true;
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ GrRecordingContext* context, const GrColorInfo& dstColorInfo) const override {
+ return GrMixerEffect::Make(
+ fCF0->asFragmentProcessor(context, dstColorInfo),
+ fCF1 ? fCF1->asFragmentProcessor(context, dstColorInfo) : nullptr,
+ fWeight);
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fCF0.get());
+ buffer.writeFlattenable(fCF1.get());
+ buffer.writeScalar(fWeight);
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkMixerColorFilter)
+
+ sk_sp<SkColorFilter> fCF0;
+ sk_sp<SkColorFilter> fCF1;
+ const float fWeight;
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkMixerColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorFilter> cf0(buffer.readColorFilter());
+ sk_sp<SkColorFilter> cf1(buffer.readColorFilter());
+ const float weight = buffer.readScalar();
+ return SkColorFilters::Lerp(weight, std::move(cf0), std::move(cf1));
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Lerp(float weight, sk_sp<SkColorFilter> cf0,
+ sk_sp<SkColorFilter> cf1) {
+ if (!cf0 && !cf1) {
+ return nullptr;
+ }
+ if (SkScalarIsNaN(weight)) {
+ return nullptr;
+ }
+
+ if (cf0 == cf1) {
+ return cf0; // or cf1
+ }
+
+ if (weight <= 0) {
+ return cf0;
+ }
+ if (weight >= 1) {
+ return cf1;
+ }
+
+ return sk_sp<SkColorFilter>(cf0
+ ? new SkMixerColorFilter(std::move(cf0), std::move(cf1), weight)
+ : new SkMixerColorFilter(std::move(cf1), nullptr, 1 - weight));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/private/SkMutex.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/sksl/SkSLByteCode.h"
+
+class SkRuntimeColorFilter : public SkColorFilter {
+public:
+ SkRuntimeColorFilter(int index, SkString sksl, sk_sp<SkData> inputs,
+ void (*cpuFunction)(float[4], const void*))
+ : fIndex(index)
+ , fSkSL(std::move(sksl))
+ , fInputs(std::move(inputs))
+ , fCpuFunction(cpuFunction) {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext* context,
+ const GrColorInfo&) const override {
+ return GrSkSLFP::Make(context, fIndex, "Runtime Color Filter", fSkSL,
+ fInputs ? fInputs->data() : nullptr,
+ fInputs ? fInputs->size() : 0);
+ }
+#endif
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ if (fCpuFunction) {
+ struct CpuFuncCtx : public SkRasterPipeline_CallbackCtx {
+ SkRuntimeColorFilterFn cpuFn;
+ const void* inputs;
+ };
+ auto ctx = rec.fAlloc->make<CpuFuncCtx>();
+ ctx->inputs = fInputs->data();
+ ctx->cpuFn = fCpuFunction;
+ ctx->fn = [](SkRasterPipeline_CallbackCtx* arg, int active_pixels) {
+ auto ctx = (CpuFuncCtx*)arg;
+ for (int i = 0; i < active_pixels; i++) {
+ ctx->cpuFn(ctx->rgba + i * 4, ctx->inputs);
+ }
+ };
+ rec.fPipeline->append(SkRasterPipeline::callback, ctx);
+ } else {
+ auto ctx = rec.fAlloc->make<SkRasterPipeline_InterpreterCtx>();
+ // don't need to set ctx->paintColor
+ ctx->inputs = fInputs->data();
+ ctx->ninputs = fInputs->size() / 4;
+ ctx->shaderConvention = false;
+
+ SkAutoMutexExclusive ama(fByteCodeMutex);
+ if (!fByteCode) {
+ SkSL::Compiler c;
+ auto prog = c.convertProgram(SkSL::Program::kPipelineStage_Kind,
+ SkSL::String(fSkSL.c_str()),
+ SkSL::Program::Settings());
+ if (c.errorCount()) {
+ SkDebugf("%s\n", c.errorText().c_str());
+ return false;
+ }
+ fByteCode = c.toByteCode(*prog);
+ }
+ ctx->byteCode = fByteCode.get();
+ ctx->fn = ctx->byteCode->getFunction("main");
+ rec.fPipeline->append(SkRasterPipeline::interpreter, ctx);
+ }
+ return true;
+ }
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ // the client is responsible for ensuring that the indices match up between flattening and
+ // unflattening; we don't have a reasonable way to enforce that at the moment
+ buffer.writeInt(fIndex);
+ buffer.writeString(fSkSL.c_str());
+ if (fInputs) {
+ buffer.writeDataAsByteArray(fInputs.get());
+ } else {
+ buffer.writeByteArray(nullptr, 0);
+ }
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkRuntimeColorFilter)
+
+ int fIndex;
+ SkString fSkSL;
+ sk_sp<SkData> fInputs;
+ SkRuntimeColorFilterFn fCpuFunction;
+
+ mutable SkMutex fByteCodeMutex;
+ mutable std::unique_ptr<SkSL::ByteCode> fByteCode;
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkRuntimeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ int index = buffer.readInt();
+ SkString sksl;
+ buffer.readString(&sksl);
+ sk_sp<SkData> inputs = buffer.readByteArrayAsData();
+ return sk_sp<SkFlattenable>(new SkRuntimeColorFilter(index, std::move(sksl), std::move(inputs),
+ nullptr));
+}
+
+SkRuntimeColorFilterFactory::SkRuntimeColorFilterFactory(SkString sksl,
+ SkRuntimeColorFilterFn cpuFunc)
+ : fIndex(GrSkSLFP::NewIndex())
+ , fSkSL(std::move(sksl))
+ , fCpuFunc(cpuFunc) {}
+
+sk_sp<SkColorFilter> SkRuntimeColorFilterFactory::make(sk_sp<SkData> inputs) {
+ return sk_sp<SkColorFilter>(new SkRuntimeColorFilter(fIndex, fSkSL, std::move(inputs),
+ fCpuFunc));
+}
+
+#endif // SK_SUPPORT_GPU
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkModeColorFilter.h"
+
+void SkColorFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkComposeColorFilter);
+ SK_REGISTER_FLATTENABLE(SkModeColorFilter);
+ SK_REGISTER_FLATTENABLE(SkSRGBGammaColorFilter);
+ SK_REGISTER_FLATTENABLE(SkMixerColorFilter);
+#if SK_SUPPORT_GPU
+ SK_REGISTER_FLATTENABLE(SkRuntimeColorFilter);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilterPriv.h b/gfx/skia/skia/src/core/SkColorFilterPriv.h
new file mode 100644
index 0000000000..a386e6ccb2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilterPriv.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkColorFilterPriv_DEFINED
+#define SkColorFilterPriv_DEFINED
+
+#ifdef SK_SUPPORT_GPU
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkString.h"
+
+using SkRuntimeColorFilterFn = void(*)(float[4], const void*);
+
+class SK_API SkRuntimeColorFilterFactory {
+public:
+ /**
+ * Creates a factory which creates runtime color filters. The SkSL must define a 'main' function
+ * with the signature 'void main(inout half4 color)'. The SkSL will be used when rendering in
+ * GPU mode, with the 'color' parameter providing the current value on input and receiving the
+ * new color value on exit. In software mode, the cpuFunc will be called with the current color
+ * and a pointer to the 'inputs' bytes. cpuFunc may be left null, in which case only GPU
+ * rendering is supported.
+ */
+ SkRuntimeColorFilterFactory(SkString sksl, SkRuntimeColorFilterFn cpuFunc = nullptr);
+
+ /**
+ * Creates a color filter instance with the specified inputs. In GPU rendering, the inputs are
+ * used to populate the values of 'in' variables. For instance, given the color filter:
+ * in uniform float x;
+ * in uniform float y;
+ * void main(inout half4 color) {
+ * ...
+ * }
+ * The values of the x and y inputs come from the 'inputs' SkData, which are laid out as a
+ * struct with two float elements. If there are no inputs, the 'inputs' parameter may be null.
+ *
+ * In CPU rendering, a pointer to the input bytes is passed as the second parameter to
+ * 'cpuFunc'.
+ */
+ sk_sp<SkColorFilter> make(sk_sp<SkData> inputs);
+
+private:
+ int fIndex;
+ SkString fSkSL;
+ SkRuntimeColorFilterFn fCpuFunc;
+};
+
+#endif // SK_SUPPORT_GPU
+
+#endif // SkColorFilterPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp b/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp
new file mode 100644
index 0000000000..cdc15dd018
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkColorMatrix.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkColorFilter_Matrix.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+static uint16_t ComputeFlags(const float matrix[20]) {
+ const float* srcA = matrix + 15;
+
+ return SkScalarNearlyZero (srcA[0])
+ && SkScalarNearlyZero (srcA[1])
+ && SkScalarNearlyZero (srcA[2])
+ && SkScalarNearlyEqual(srcA[3], 1)
+ && SkScalarNearlyZero (srcA[4])
+ ? SkColorFilter::kAlphaUnchanged_Flag : 0;
+}
+
+SkColorFilter_Matrix::SkColorFilter_Matrix(const float array[20], Domain domain)
+ : fFlags(ComputeFlags(array))
+ , fDomain(domain) {
+ memcpy(fMatrix, array, 20 * sizeof(float));
+}
+
+uint32_t SkColorFilter_Matrix::getFlags() const {
+ return this->INHERITED::getFlags() | fFlags;
+}
+
+void SkColorFilter_Matrix::flatten(SkWriteBuffer& buffer) const {
+ SkASSERT(sizeof(fMatrix)/sizeof(float) == 20);
+ buffer.writeScalarArray(fMatrix, 20);
+
+ // RGBA flag
+ buffer.writeBool(fDomain == Domain::kRGBA);
+}
+
+sk_sp<SkFlattenable> SkColorFilter_Matrix::CreateProc(SkReadBuffer& buffer) {
+ float matrix[20];
+ if (!buffer.readScalarArray(matrix, 20)) {
+ return nullptr;
+ }
+
+ auto is_rgba = buffer.isVersionLT(SkPicturePriv::kMatrixColorFilterDomain_Version) ||
+ buffer.readBool();
+ return is_rgba ? SkColorFilters::Matrix(matrix)
+ : SkColorFilters::HSLAMatrix(matrix);
+}
+
+bool SkColorFilter_Matrix::onAsAColorMatrix(float matrix[20]) const {
+ if (matrix) {
+ memcpy(matrix, fMatrix, 20 * sizeof(float));
+ }
+ return true;
+}
+
+bool SkColorFilter_Matrix::onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ const bool willStayOpaque = shaderIsOpaque && (fFlags & kAlphaUnchanged_Flag),
+ hsla = fDomain == Domain::kHSLA;
+
+ SkRasterPipeline* p = rec.fPipeline;
+ if (!shaderIsOpaque) { p->append(SkRasterPipeline::unpremul); }
+ if ( hsla) { p->append(SkRasterPipeline::rgb_to_hsl); }
+ if ( true) { p->append(SkRasterPipeline::matrix_4x5, fMatrix); }
+ if ( hsla) { p->append(SkRasterPipeline::hsl_to_rgb); }
+ if ( true) { p->append(SkRasterPipeline::clamp_0); }
+ if ( true) { p->append(SkRasterPipeline::clamp_1); }
+ if (!willStayOpaque) { p->append(SkRasterPipeline::premul); }
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h"
+std::unique_ptr<GrFragmentProcessor> SkColorFilter_Matrix::asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo&) const {
+ if (fDomain == Domain::kHSLA) {
+ // TODO
+ return nullptr;
+ }
+ return GrColorMatrixFragmentProcessor::Make(fMatrix,
+ /* premulInput = */ true,
+ /* clampRGBOutput = */ true,
+ /* premulOutput = */ true);
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkColorFilter> MakeMatrix(const float array[20],
+ SkColorFilter_Matrix::Domain domain) {
+ return sk_floats_are_finite(array, 20)
+ ? sk_make_sp<SkColorFilter_Matrix>(array, domain)
+ : nullptr;
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Matrix(const float array[20]) {
+ return MakeMatrix(array, SkColorFilter_Matrix::Domain::kRGBA);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Matrix(const SkColorMatrix& cm) {
+ return MakeMatrix(cm.fMat, SkColorFilter_Matrix::Domain::kRGBA);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::HSLAMatrix(const float array[20]) {
+ return MakeMatrix(array, SkColorFilter_Matrix::Domain::kHSLA);
+}
+
+void SkColorFilter_Matrix::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkColorFilter_Matrix);
+
+ // This subclass was removed 4/2019
+ SkFlattenable::Register("SkColorMatrixFilterRowMajor255",
+ [](SkReadBuffer& buffer) -> sk_sp<SkFlattenable> {
+ float matrix[20];
+ if (buffer.readScalarArray(matrix, 20)) {
+ matrix[ 4] *= (1.0f/255);
+ matrix[ 9] *= (1.0f/255);
+ matrix[14] *= (1.0f/255);
+ matrix[19] *= (1.0f/255);
+ return SkColorFilters::Matrix(matrix);
+ }
+ return nullptr;
+ });
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilter_Matrix.h b/gfx/skia/skia/src/core/SkColorFilter_Matrix.h
new file mode 100644
index 0000000000..7f019882a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter_Matrix.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilter_Matrix_DEFINED
+#define SkColorFilter_Matrix_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+
+class SkColorFilter_Matrix : public SkColorFilter {
+public:
+ enum class Domain : uint8_t { kRGBA, kHSLA };
+
+ explicit SkColorFilter_Matrix(const float array[20], Domain);
+
+ uint32_t getFlags() const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+ static void RegisterFlattenables();
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onAsAColorMatrix(float matrix[20]) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkColorFilter_Matrix)
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+ float fMatrix[20];
+ uint16_t fFlags;
+ Domain fDomain;
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorSpace.cpp b/gfx/skia/skia/src/core/SkColorSpace.cpp
new file mode 100644
index 0000000000..4e77d7c3ec
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpace.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/third_party/skcms/skcms.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkOpts.h"
+
+bool SkColorSpacePrimaries::toXYZD50(skcms_Matrix3x3* toXYZ_D50) const {
+ return skcms_PrimariesToXYZD50(fRX, fRY, fGX, fGY, fBX, fBY, fWX, fWY, toXYZ_D50);
+}
+
+SkColorSpace::SkColorSpace(const float transferFn[7],
+ const skcms_Matrix3x3& toXYZD50) {
+ memcpy(fToXYZD50_3x3, &toXYZD50.vals[0][0], 9*sizeof(float));
+ fToXYZD50Hash = SkOpts::hash_fn(fToXYZD50_3x3, 9*sizeof(float), 0);
+
+ memcpy(fTransferFn, transferFn, 7*sizeof(float));
+ fTransferFnHash = SkOpts::hash_fn(fTransferFn, 7*sizeof(float), 0);
+}
+
+static bool xyz_almost_equal(const skcms_Matrix3x3& mA, const skcms_Matrix3x3& mB) {
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 3; ++c) {
+ if (!color_space_almost_equal(mA.vals[r][c], mB.vals[r][c])) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeRGB(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& toXYZ) {
+ if (classify_transfer_fn(transferFn) == Bad_TF) {
+ return nullptr;
+ }
+
+ const float* tf = &transferFn.g;
+
+ if (is_almost_srgb(transferFn)) {
+ if (xyz_almost_equal(toXYZ, SkNamedGamut::kSRGB)) {
+ return SkColorSpace::MakeSRGB();
+ }
+ tf = &SkNamedTransferFn::kSRGB.g;
+ } else if (is_almost_2dot2(transferFn)) {
+ tf = &SkNamedTransferFn::k2Dot2.g;
+ } else if (is_almost_linear(transferFn)) {
+ if (xyz_almost_equal(toXYZ, SkNamedGamut::kSRGB)) {
+ return SkColorSpace::MakeSRGBLinear();
+ }
+ tf = &SkNamedTransferFn::kLinear.g;
+ }
+
+ return sk_sp<SkColorSpace>(new SkColorSpace(tf, toXYZ));
+}
+
+class SkColorSpaceSingletonFactory {
+public:
+ static SkColorSpace* Make(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& to_xyz) {
+ return new SkColorSpace(&transferFn.g, to_xyz);
+ }
+};
+
+SkColorSpace* sk_srgb_singleton() {
+ static SkColorSpace* cs = SkColorSpaceSingletonFactory::Make(SkNamedTransferFn::kSRGB,
+ SkNamedGamut::kSRGB);
+ return cs;
+}
+
+SkColorSpace* sk_srgb_linear_singleton() {
+ static SkColorSpace* cs = SkColorSpaceSingletonFactory::Make(SkNamedTransferFn::kLinear,
+ SkNamedGamut::kSRGB);
+ return cs;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeSRGB() {
+ return sk_ref_sp(sk_srgb_singleton());
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeSRGBLinear() {
+ return sk_ref_sp(sk_srgb_linear_singleton());
+}
+
+void SkColorSpace::computeLazyDstFields() const {
+ fLazyDstFieldsOnce([this] {
+
+ // Invert 3x3 gamut, defaulting to sRGB if we can't.
+ {
+ skcms_Matrix3x3 fwd, inv;
+ memcpy(&fwd, fToXYZD50_3x3, 9*sizeof(float));
+ if (!skcms_Matrix3x3_invert(&fwd, &inv)) {
+ SkAssertResult(skcms_Matrix3x3_invert(&skcms_sRGB_profile()->toXYZD50, &inv));
+ }
+ memcpy(fFromXYZD50_3x3, &inv, 9*sizeof(float));
+ }
+
+ // Invert transfer function, defaulting to sRGB if we can't.
+ {
+ skcms_TransferFunction fwd, inv;
+ this->transferFn(&fwd.g);
+ if (!skcms_TransferFunction_invert(&fwd, &inv)) {
+ inv = *skcms_sRGB_Inverse_TransferFunction();
+ }
+ memcpy(fInvTransferFn, &inv, 7*sizeof(float));
+ }
+
+ });
+}
+
+bool SkColorSpace::isNumericalTransferFn(skcms_TransferFunction* coeffs) const {
+ // TODO: Change transferFn/invTransferFn to just operate on skcms_TransferFunction (all callers
+ // already pass pointers to an skcms struct). Then remove this function, and update the two
+ // remaining callers to do the right thing with transferFn and classify.
+ this->transferFn(&coeffs->g);
+ return classify_transfer_fn(*coeffs) == sRGBish_TF;
+}
+
+void SkColorSpace::transferFn(float gabcdef[7]) const {
+ memcpy(gabcdef, &fTransferFn, 7*sizeof(float));
+}
+
+void SkColorSpace::invTransferFn(float gabcdef[7]) const {
+ this->computeLazyDstFields();
+ memcpy(gabcdef, &fInvTransferFn, 7*sizeof(float));
+}
+
+bool SkColorSpace::toXYZD50(SkMatrix44* toXYZD50) const {
+ toXYZD50->set3x3RowMajorf(fToXYZD50_3x3);
+ return true;
+}
+
+bool SkColorSpace::toXYZD50(skcms_Matrix3x3* toXYZD50) const {
+ memcpy(toXYZD50, fToXYZD50_3x3, 9*sizeof(float));
+ return true;
+}
+
+void SkColorSpace::gamutTransformTo(const SkColorSpace* dst, float src_to_dst[9]) const {
+ dst->computeLazyDstFields();
+
+ skcms_Matrix3x3 toXYZD50,
+ fromXYZD50;
+
+ memcpy(& toXYZD50, this-> fToXYZD50_3x3, 9*sizeof(float));
+ memcpy(&fromXYZD50, dst ->fFromXYZD50_3x3, 9*sizeof(float));
+
+ skcms_Matrix3x3 srcToDst = skcms_Matrix3x3_concat(&fromXYZD50, &toXYZD50);
+ memcpy(src_to_dst, &srcToDst, 9*sizeof(float));
+}
+
+bool SkColorSpace::isSRGB() const {
+ return sk_srgb_singleton() == this;
+}
+
+bool SkColorSpace::gammaCloseToSRGB() const {
+ // Nearly-equal transfer functions were snapped at construction time, so just do an exact test
+ return memcmp(fTransferFn, &SkNamedTransferFn::kSRGB.g, 7*sizeof(float)) == 0;
+}
+
+bool SkColorSpace::gammaIsLinear() const {
+ // Nearly-equal transfer functions were snapped at construction time, so just do an exact test
+ return memcmp(fTransferFn, &SkNamedTransferFn::kLinear.g, 7*sizeof(float)) == 0;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeLinearGamma() const {
+ if (this->gammaIsLinear()) {
+ return sk_ref_sp(const_cast<SkColorSpace*>(this));
+ }
+ skcms_Matrix3x3 gamut;
+ this->toXYZD50(&gamut);
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kLinear, gamut);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeSRGBGamma() const {
+ if (this->gammaCloseToSRGB()) {
+ return sk_ref_sp(const_cast<SkColorSpace*>(this));
+ }
+ skcms_Matrix3x3 gamut;
+ this->toXYZD50(&gamut);
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kSRGB, gamut);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeColorSpin() const {
+ skcms_Matrix3x3 spin = {{
+ { 0, 0, 1 },
+ { 1, 0, 0 },
+ { 0, 1, 0 },
+ }};
+
+ skcms_Matrix3x3 toXYZ;
+ this->toXYZD50(&toXYZ);
+
+ skcms_Matrix3x3 spun = skcms_Matrix3x3_concat(&toXYZ, &spin);
+
+ return sk_sp<SkColorSpace>(new SkColorSpace(fTransferFn, spun));
+}
+
+void SkColorSpace::toProfile(skcms_ICCProfile* profile) const {
+ skcms_TransferFunction tf;
+ skcms_Matrix3x3 toXYZD50;
+
+ memcpy(&tf, fTransferFn, 7*sizeof(float));
+ memcpy(&toXYZD50, fToXYZD50_3x3, 9*sizeof(float));
+
+ skcms_Init (profile);
+ skcms_SetTransferFunction(profile, &tf);
+ skcms_SetXYZD50 (profile, &toXYZD50);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::Make(const skcms_ICCProfile& profile) {
+ // TODO: move below ≈sRGB test?
+ if (!profile.has_toXYZD50 || !profile.has_trc) {
+ return nullptr;
+ }
+
+ if (skcms_ApproximatelyEqualProfiles(&profile, skcms_sRGB_profile())) {
+ return SkColorSpace::MakeSRGB();
+ }
+
+ // TODO: can we save this work and skip lazily inverting the matrix later?
+ skcms_Matrix3x3 inv;
+ if (!skcms_Matrix3x3_invert(&profile.toXYZD50, &inv)) {
+ return nullptr;
+ }
+
+ // We can't work with tables or mismatched parametric curves,
+ // but if they all look close enough to sRGB, that's fine.
+ // TODO: should we maybe do this unconditionally to snap near-sRGB parametrics to sRGB?
+ const skcms_Curve* trc = profile.trc;
+ if (trc[0].table_entries != 0 ||
+ trc[1].table_entries != 0 ||
+ trc[2].table_entries != 0 ||
+ 0 != memcmp(&trc[0].parametric, &trc[1].parametric, sizeof(trc[0].parametric)) ||
+ 0 != memcmp(&trc[0].parametric, &trc[2].parametric, sizeof(trc[0].parametric)))
+ {
+ if (skcms_TRCs_AreApproximateInverse(&profile, skcms_sRGB_Inverse_TransferFunction())) {
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kSRGB, profile.toXYZD50);
+ }
+ return nullptr;
+ }
+
+ return SkColorSpace::MakeRGB(profile.trc[0].parametric, profile.toXYZD50);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+enum Version {
+ k0_Version, // Initial version, header + flags for matrix and profile
+ k1_Version, // Simple header (version tag) + 16 floats
+
+ kCurrent_Version = k1_Version,
+};
+
+enum NamedColorSpace {
+ kSRGB_NamedColorSpace,
+ kAdobeRGB_NamedColorSpace,
+ kSRGBLinear_NamedColorSpace,
+};
+
+enum NamedGamma {
+ kLinear_NamedGamma,
+ kSRGB_NamedGamma,
+ k2Dot2_NamedGamma,
+};
+
+struct ColorSpaceHeader {
+ // Flag values, only used by old (k0_Version) serialization
+ static constexpr uint8_t kMatrix_Flag = 1 << 0;
+ static constexpr uint8_t kICC_Flag = 1 << 1;
+ static constexpr uint8_t kTransferFn_Flag = 1 << 3;
+
+ uint8_t fVersion = kCurrent_Version;
+
+ // Other fields are only used by k0_Version. Could be re-purposed in future versions.
+ uint8_t fNamed = 0;
+ uint8_t fGammaNamed = 0;
+ uint8_t fFlags = 0;
+};
+
+size_t SkColorSpace::writeToMemory(void* memory) const {
+ if (memory) {
+ *((ColorSpaceHeader*) memory) = ColorSpaceHeader();
+ memory = SkTAddOffset<void>(memory, sizeof(ColorSpaceHeader));
+
+ memcpy(memory, fTransferFn, 7 * sizeof(float));
+ memory = SkTAddOffset<void>(memory, 7 * sizeof(float));
+
+ memcpy(memory, fToXYZD50_3x3, 9 * sizeof(float));
+ }
+
+ return sizeof(ColorSpaceHeader) + 16 * sizeof(float);
+}
+
+sk_sp<SkData> SkColorSpace::serialize() const {
+ sk_sp<SkData> data = SkData::MakeUninitialized(this->writeToMemory(nullptr));
+ this->writeToMemory(data->writable_data());
+ return data;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::Deserialize(const void* data, size_t length) {
+ if (length < sizeof(ColorSpaceHeader)) {
+ return nullptr;
+ }
+
+ ColorSpaceHeader header = *((const ColorSpaceHeader*) data);
+ data = SkTAddOffset<const void>(data, sizeof(ColorSpaceHeader));
+ length -= sizeof(ColorSpaceHeader);
+ if (k1_Version == header.fVersion) {
+ if (length < 16 * sizeof(float)) {
+ return nullptr;
+ }
+
+ skcms_TransferFunction transferFn;
+ memcpy(&transferFn, data, 7 * sizeof(float));
+ data = SkTAddOffset<const void>(data, 7 * sizeof(float));
+
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ, data, 9 * sizeof(float));
+ return SkColorSpace::MakeRGB(transferFn, toXYZ);
+ } else if (k0_Version == header.fVersion) {
+ if (0 == header.fFlags) {
+ switch ((NamedColorSpace)header.fNamed) {
+ case kSRGB_NamedColorSpace:
+ return SkColorSpace::MakeSRGB();
+ case kSRGBLinear_NamedColorSpace:
+ return SkColorSpace::MakeSRGBLinear();
+ case kAdobeRGB_NamedColorSpace:
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::k2Dot2,
+ SkNamedGamut::kAdobeRGB);
+ }
+ }
+
+ auto make_named_tf = [=](const skcms_TransferFunction& tf) {
+ if (ColorSpaceHeader::kMatrix_Flag != header.fFlags || length < 12 * sizeof(float)) {
+ return sk_sp<SkColorSpace>(nullptr);
+ }
+
+ // Version 0 matrix is row-major 3x4
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ.vals[0][0], (const float*)data + 0, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[1][0], (const float*)data + 4, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[2][0], (const float*)data + 8, 3 * sizeof(float));
+ return SkColorSpace::MakeRGB(tf, toXYZ);
+ };
+
+ switch ((NamedGamma) header.fGammaNamed) {
+ case kSRGB_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::kSRGB);
+ case k2Dot2_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::k2Dot2);
+ case kLinear_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::kLinear);
+ default:
+ break;
+ }
+
+ switch (header.fFlags) {
+ case ColorSpaceHeader::kICC_Flag: {
+ // Deprecated and unsupported code path
+ return nullptr;
+ }
+ case ColorSpaceHeader::kTransferFn_Flag: {
+ if (length < 19 * sizeof(float)) {
+ return nullptr;
+ }
+
+ // Version 0 TF is in abcdefg order
+ skcms_TransferFunction transferFn;
+ transferFn.a = *(((const float*) data) + 0);
+ transferFn.b = *(((const float*) data) + 1);
+ transferFn.c = *(((const float*) data) + 2);
+ transferFn.d = *(((const float*) data) + 3);
+ transferFn.e = *(((const float*) data) + 4);
+ transferFn.f = *(((const float*) data) + 5);
+ transferFn.g = *(((const float*) data) + 6);
+ data = SkTAddOffset<const void>(data, 7 * sizeof(float));
+
+ // Version 0 matrix is row-major 3x4
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ.vals[0][0], (const float*)data + 0, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[1][0], (const float*)data + 4, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[2][0], (const float*)data + 8, 3 * sizeof(float));
+ return SkColorSpace::MakeRGB(transferFn, toXYZ);
+ }
+ default:
+ return nullptr;
+ }
+ } else {
+ return nullptr;
+ }
+}
+
+bool SkColorSpace::Equals(const SkColorSpace* x, const SkColorSpace* y) {
+ if (x == y) {
+ return true;
+ }
+
+ if (!x || !y) {
+ return false;
+ }
+
+ if (x->hash() == y->hash()) {
+ for (int i = 0; i < 7; i++) {
+ SkASSERT(x-> fTransferFn[i] == y-> fTransferFn[i] && "Hash collsion");
+ }
+ for (int i = 0; i < 9; i++) {
+ SkASSERT(x->fToXYZD50_3x3[i] == y->fToXYZD50_3x3[i] && "Hash collsion");
+ }
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpacePriv.h b/gfx/skia/skia/src/core/SkColorSpacePriv.h
new file mode 100644
index 0000000000..425357db7b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpacePriv.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkColorSpacePriv_DEFINED
+#define SkColorSpacePriv_DEFINED
+
+#include <math.h>
+
+#include "include/core/SkColorSpace.h"
+#include "include/private/SkFixed.h"
+
+#define SkColorSpacePrintf(...)
+
+// A gamut narrower than sRGB, useful for testing.
+static constexpr skcms_Matrix3x3 gNarrow_toXYZD50 = {{
+ { 0.190974f, 0.404865f, 0.368380f },
+ { 0.114746f, 0.582937f, 0.302318f },
+ { 0.032925f, 0.153615f, 0.638669f },
+}};
+
+static inline bool color_space_almost_equal(float a, float b) {
+ return SkTAbs(a - b) < 0.01f;
+}
+
+// Let's use a stricter version for transfer functions. Worst case, these are encoded
+// in ICC format, which offers 16-bits of fractional precision.
+static inline bool transfer_fn_almost_equal(float a, float b) {
+ return SkTAbs(a - b) < 0.001f;
+}
+
+// NOTE: All of this logic is copied from skcms.cc, and needs to be kept in sync.
+
+// Most transfer functions we work with are sRGBish.
+// For exotic HDR transfer functions, we encode them using a tf.g that makes no sense,
+// and repurpose the other fields to hold the parameters of the HDR functions.
+enum TFKind { Bad_TF, sRGBish_TF, PQish_TF, HLGish_TF, HLGinvish_TF };
+
+static inline TFKind classify_transfer_fn(const skcms_TransferFunction& tf) {
+ if (tf.g < 0 && (int)tf.g == tf.g) {
+ // TODO: sanity checks for PQ/HLG like we do for sRGBish.
+ switch (-(int)tf.g) {
+ case PQish_TF: return PQish_TF;
+ case HLGish_TF: return HLGish_TF;
+ case HLGinvish_TF: return HLGinvish_TF;
+ }
+ return Bad_TF;
+ }
+
+ // Basic sanity checks for sRGBish transfer functions.
+ if (sk_float_isfinite(tf.a + tf.b + tf.c + tf.d + tf.e + tf.f + tf.g)
+ // a,c,d,g should be non-negative to make any sense.
+ && tf.a >= 0
+ && tf.c >= 0
+ && tf.d >= 0
+ && tf.g >= 0
+ // Raising a negative value to a fractional tf->g produces complex numbers.
+ && tf.a * tf.d + tf.b >= 0) {
+ return sRGBish_TF;
+ }
+
+ return Bad_TF;
+}
+
+static inline bool is_almost_srgb(const skcms_TransferFunction& coeffs) {
+ return transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.a, coeffs.a) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.b, coeffs.b) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.c, coeffs.c) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.d, coeffs.d) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.e, coeffs.e) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.f, coeffs.f) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.g, coeffs.g);
+}
+
+static inline bool is_almost_2dot2(const skcms_TransferFunction& coeffs) {
+ return transfer_fn_almost_equal(1.0f, coeffs.a) &&
+ transfer_fn_almost_equal(0.0f, coeffs.b) &&
+ transfer_fn_almost_equal(0.0f, coeffs.e) &&
+ transfer_fn_almost_equal(2.2f, coeffs.g) &&
+ coeffs.d <= 0.0f;
+}
+
+static inline bool is_almost_linear(const skcms_TransferFunction& coeffs) {
+ // OutputVal = InputVal ^ 1.0f
+ const bool linearExp =
+ transfer_fn_almost_equal(1.0f, coeffs.a) &&
+ transfer_fn_almost_equal(0.0f, coeffs.b) &&
+ transfer_fn_almost_equal(0.0f, coeffs.e) &&
+ transfer_fn_almost_equal(1.0f, coeffs.g) &&
+ coeffs.d <= 0.0f;
+
+ // OutputVal = 1.0f * InputVal
+ const bool linearFn =
+ transfer_fn_almost_equal(1.0f, coeffs.c) &&
+ transfer_fn_almost_equal(0.0f, coeffs.f) &&
+ coeffs.d >= 1.0f;
+
+ return linearExp || linearFn;
+}
+
+// Return raw pointers to commonly used SkColorSpaces.
+// No need to ref/unref these, but if you do, do it in pairs.
+SkColorSpace* sk_srgb_singleton();
+SkColorSpace* sk_srgb_linear_singleton();
+
+#endif // SkColorSpacePriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp
new file mode 100644
index 0000000000..fe03bca043
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/third_party/skcms/skcms.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+
+// TODO(mtklein): explain the logic of this file
+
+bool SkColorSpaceXformSteps::Required(SkColorSpace* src, SkColorSpace* dst) {
+ // Any SkAlphaType will work fine here as long as we use the same one.
+ SkAlphaType at = kPremul_SkAlphaType;
+ return 0 != SkColorSpaceXformSteps(src, at,
+ dst, at).flags.mask();
+ // TODO(mtklein): quicker impl. that doesn't construct an SkColorSpaceXformSteps?
+}
+
+SkColorSpaceXformSteps::SkColorSpaceXformSteps(SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst, SkAlphaType dstAT) {
+ // Opaque outputs are treated as the same alpha type as the source input.
+ // TODO: we'd really like to have a good way of explaining why we think this is useful.
+ if (dstAT == kOpaque_SkAlphaType) {
+ dstAT = srcAT;
+ }
+
+ // We have some options about what to do with null src or dst here.
+ // This pair seems to be the most consistent with legacy expectations.
+ if (!src) { src = sk_srgb_singleton(); }
+ if (!dst) { dst = src; }
+
+ if (src->hash() == dst->hash() && srcAT == dstAT) {
+ SkASSERT(SkColorSpace::Equals(src,dst));
+ return;
+ }
+
+ this->flags.unpremul = srcAT == kPremul_SkAlphaType;
+ this->flags.linearize = !src->gammaIsLinear();
+ this->flags.gamut_transform = src->toXYZD50Hash() != dst->toXYZD50Hash();
+ this->flags.encode = !dst->gammaIsLinear();
+ this->flags.premul = srcAT != kOpaque_SkAlphaType && dstAT == kPremul_SkAlphaType;
+
+ if (this->flags.gamut_transform) {
+ float row_major[9]; // TODO: switch src_to_dst_matrix to row-major
+ src->gamutTransformTo(dst, row_major);
+
+ this->src_to_dst_matrix[0] = row_major[0];
+ this->src_to_dst_matrix[1] = row_major[3];
+ this->src_to_dst_matrix[2] = row_major[6];
+
+ this->src_to_dst_matrix[3] = row_major[1];
+ this->src_to_dst_matrix[4] = row_major[4];
+ this->src_to_dst_matrix[5] = row_major[7];
+
+ this->src_to_dst_matrix[6] = row_major[2];
+ this->src_to_dst_matrix[7] = row_major[5];
+ this->src_to_dst_matrix[8] = row_major[8];
+ } else {
+ #ifdef SK_DEBUG
+ skcms_Matrix3x3 srcM, dstM;
+ src->toXYZD50(&srcM);
+ dst->toXYZD50(&dstM);
+ SkASSERT(0 == memcmp(&srcM, &dstM, 9*sizeof(float)) && "Hash collision");
+ #endif
+ }
+
+ // Fill out all the transfer functions we'll use.
+ src-> transferFn(&this->srcTF .g);
+ dst->invTransferFn(&this->dstTFInv.g);
+
+ this->srcTF_is_sRGB = src->gammaCloseToSRGB();
+ this->dstTF_is_sRGB = dst->gammaCloseToSRGB();
+
+ // If we linearize then immediately reencode with the same transfer function, skip both.
+ if ( this->flags.linearize &&
+ !this->flags.gamut_transform &&
+ this->flags.encode &&
+ src->transferFnHash() == dst->transferFnHash())
+ {
+ #ifdef SK_DEBUG
+ float dstTF[7];
+ dst->transferFn(dstTF);
+ for (int i = 0; i < 7; i++) {
+ SkASSERT( (&srcTF.g)[i] == dstTF[i] && "Hash collision" );
+ }
+ #endif
+ this->flags.linearize = false;
+ this->flags.encode = false;
+ }
+
+ // Skip unpremul...premul if there are no non-linear operations between.
+ if ( this->flags.unpremul &&
+ !this->flags.linearize &&
+ !this->flags.encode &&
+ this->flags.premul)
+ {
+ this->flags.unpremul = false;
+ this->flags.premul = false;
+ }
+}
+
+void SkColorSpaceXformSteps::apply(float* rgba) const {
+ if (flags.unpremul) {
+ // I don't know why isfinite(x) stopped working on the Chromecast bots...
+ auto is_finite = [](float x) { return x*0 == 0; };
+
+ float invA = is_finite(1.0f / rgba[3]) ? 1.0f / rgba[3] : 0;
+ rgba[0] *= invA;
+ rgba[1] *= invA;
+ rgba[2] *= invA;
+ }
+ if (flags.linearize) {
+ rgba[0] = skcms_TransferFunction_eval(&srcTF, rgba[0]);
+ rgba[1] = skcms_TransferFunction_eval(&srcTF, rgba[1]);
+ rgba[2] = skcms_TransferFunction_eval(&srcTF, rgba[2]);
+ }
+ if (flags.gamut_transform) {
+ float temp[3] = { rgba[0], rgba[1], rgba[2] };
+ for (int i = 0; i < 3; ++i) {
+ rgba[i] = src_to_dst_matrix[ i] * temp[0] +
+ src_to_dst_matrix[3 + i] * temp[1] +
+ src_to_dst_matrix[6 + i] * temp[2];
+ }
+ }
+ if (flags.encode) {
+ rgba[0] = skcms_TransferFunction_eval(&dstTFInv, rgba[0]);
+ rgba[1] = skcms_TransferFunction_eval(&dstTFInv, rgba[1]);
+ rgba[2] = skcms_TransferFunction_eval(&dstTFInv, rgba[2]);
+ }
+ if (flags.premul) {
+ rgba[0] *= rgba[3];
+ rgba[1] *= rgba[3];
+ rgba[2] *= rgba[3];
+ }
+}
+
+void SkColorSpaceXformSteps::apply(SkRasterPipeline* p, bool src_is_normalized) const {
+#if defined(SK_LEGACY_SRGB_STAGE_CHOICE)
+ src_is_normalized = true;
+#endif
+ if (flags.unpremul) { p->append(SkRasterPipeline::unpremul); }
+ if (flags.linearize) {
+ if (src_is_normalized && srcTF_is_sRGB) {
+ p->append(SkRasterPipeline::from_srgb);
+ } else {
+ p->append_transfer_function(srcTF);
+ }
+ }
+ if (flags.gamut_transform) {
+ p->append(SkRasterPipeline::matrix_3x3, &src_to_dst_matrix);
+ }
+ if (flags.encode) {
+ if (src_is_normalized && dstTF_is_sRGB) {
+ p->append(SkRasterPipeline::to_srgb);
+ } else {
+ p->append_transfer_function(dstTFInv);
+ }
+ }
+ if (flags.premul) { p->append(SkRasterPipeline::premul); }
+}
+
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h
new file mode 100644
index 0000000000..1a15643839
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpaceXformSteps_DEFINED
+#define SkColorSpaceXformSteps_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+
+class SkRasterPipeline;
+
+struct SkColorSpaceXformSteps {
+ // Returns true if SkColorSpaceXformSteps must be applied
+ // to draw content in `src` into a destination in `dst`.
+ static bool Required(SkColorSpace* src, SkColorSpace* dst);
+
+ struct Flags {
+ bool unpremul = false;
+ bool linearize = false;
+ bool gamut_transform = false;
+ bool encode = false;
+ bool premul = false;
+
+ uint32_t mask() const {
+ return (unpremul ? 1 : 0)
+ | (linearize ? 2 : 0)
+ | (gamut_transform ? 4 : 0)
+ | (encode ? 8 : 0)
+ | (premul ? 16 : 0);
+ }
+ };
+
+ SkColorSpaceXformSteps(SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst, SkAlphaType dstAT);
+
+ void apply(float rgba[4]) const;
+ void apply(SkRasterPipeline*, bool src_is_normalized) const;
+
+ void apply(SkRasterPipeline* p, SkColorType srcCT) const {
+ #if 0
+ this->apply(p, srcCT < kRGBA_F16_SkColorType);
+ #else
+ // F16Norm is normalized, but to make diffing with F16 easier we
+ // intentionally take the slower, non-normalized path here.
+ this->apply(p, srcCT < kRGBA_F16Norm_SkColorType);
+ #endif
+ }
+
+ Flags flags;
+
+ bool srcTF_is_sRGB,
+ dstTF_is_sRGB;
+ skcms_TransferFunction srcTF, // Apply for linearize.
+ dstTFInv; // Apply for encode.
+ float src_to_dst_matrix[9]; // Apply this 3x3 column-major matrix for gamut_transform.
+};
+
+#endif//SkColorSpaceXformSteps_DEFINED
diff --git a/gfx/skia/skia/src/core/SkContourMeasure.cpp b/gfx/skia/skia/src/core/SkContourMeasure.cpp
new file mode 100644
index 0000000000..836b0a2e16
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkContourMeasure.cpp
@@ -0,0 +1,651 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPath.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathMeasurePriv.h"
+#include "src/core/SkTSearch.h"
+
+#define kMaxTValue 0x3FFFFFFF
+
+constexpr static inline SkScalar tValue2Scalar(int t) {
+ SkASSERT((unsigned)t <= kMaxTValue);
+ // 1/kMaxTValue can't be represented as a float, but it's close and the limits work fine.
+ const SkScalar kMaxTReciprocal = 1.0f / (SkScalar)kMaxTValue;
+ return t * kMaxTReciprocal;
+}
+
+static_assert(0.0f == tValue2Scalar( 0), "Lower limit should be exact.");
+static_assert(1.0f == tValue2Scalar(kMaxTValue), "Upper limit should be exact.");
+
+SkScalar SkContourMeasure::Segment::getScalarT() const {
+ return tValue2Scalar(fTValue);
+}
+
+void SkContourMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst) {
+ SkASSERT(startT >= 0 && startT <= SK_Scalar1);
+ SkASSERT(stopT >= 0 && stopT <= SK_Scalar1);
+ SkASSERT(startT <= stopT);
+
+ if (startT == stopT) {
+ if (!dst->isEmpty()) {
+ /* if the dash as a zero-length on segment, add a corresponding zero-length line.
+ The stroke code will add end caps to zero length lines as appropriate */
+ SkPoint lastPt;
+ SkAssertResult(dst->getLastPt(&lastPt));
+ dst->lineTo(lastPt);
+ }
+ return;
+ }
+
+ SkPoint tmp0[7], tmp1[7];
+
+ switch (segType) {
+ case kLine_SegType:
+ if (SK_Scalar1 == stopT) {
+ dst->lineTo(pts[1]);
+ } else {
+ dst->lineTo(SkScalarInterp(pts[0].fX, pts[1].fX, stopT),
+ SkScalarInterp(pts[0].fY, pts[1].fY, stopT));
+ }
+ break;
+ case kQuad_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(pts[1], pts[2]);
+ } else {
+ SkChopQuadAt(pts, tmp0, stopT);
+ dst->quadTo(tmp0[1], tmp0[2]);
+ }
+ } else {
+ SkChopQuadAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(tmp0[3], tmp0[4]);
+ } else {
+ SkChopQuadAt(&tmp0[2], tmp1, (stopT - startT) / (1 - startT));
+ dst->quadTo(tmp1[1], tmp1[2]);
+ }
+ }
+ break;
+ case kConic_SegType: {
+ SkConic conic(pts[0], pts[2], pts[3], pts[1].fX);
+
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->conicTo(conic.fPts[1], conic.fPts[2], conic.fW);
+ } else {
+ SkConic tmp[2];
+ if (conic.chopAt(stopT, tmp)) {
+ dst->conicTo(tmp[0].fPts[1], tmp[0].fPts[2], tmp[0].fW);
+ }
+ }
+ } else {
+ if (SK_Scalar1 == stopT) {
+ SkConic tmp1[2];
+ if (conic.chopAt(startT, tmp1)) {
+ dst->conicTo(tmp1[1].fPts[1], tmp1[1].fPts[2], tmp1[1].fW);
+ }
+ } else {
+ SkConic tmp;
+ conic.chopAt(startT, stopT, &tmp);
+ dst->conicTo(tmp.fPts[1], tmp.fPts[2], tmp.fW);
+ }
+ }
+ } break;
+ case kCubic_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ } else {
+ SkChopCubicAt(pts, tmp0, stopT);
+ dst->cubicTo(tmp0[1], tmp0[2], tmp0[3]);
+ }
+ } else {
+ SkChopCubicAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(tmp0[4], tmp0[5], tmp0[6]);
+ } else {
+ SkChopCubicAt(&tmp0[3], tmp1, (stopT - startT) / (1 - startT));
+ dst->cubicTo(tmp1[1], tmp1[2], tmp1[3]);
+ }
+ }
+ break;
+ default:
+ SK_ABORT("unknown segType");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline int tspan_big_enough(int tspan) {
+ SkASSERT((unsigned)tspan <= kMaxTValue);
+ return tspan >> 10;
+}
+
+// can't use tangents, since we need [0..1..................2] to be seen
+// as definitely not a line (it is when drawn, but not parametrically)
+// so we compare midpoints
+#define CHEAP_DIST_LIMIT (SK_Scalar1/2) // just made this value up
+
+static bool quad_too_curvy(const SkPoint pts[3], SkScalar tolerance) {
+ // diff = (a/4 + b/2 + c/4) - (a/2 + c/2)
+ // diff = -a/4 + b/2 - c/4
+ SkScalar dx = SkScalarHalf(pts[1].fX) -
+ SkScalarHalf(SkScalarHalf(pts[0].fX + pts[2].fX));
+ SkScalar dy = SkScalarHalf(pts[1].fY) -
+ SkScalarHalf(SkScalarHalf(pts[0].fY + pts[2].fY));
+
+ SkScalar dist = SkMaxScalar(SkScalarAbs(dx), SkScalarAbs(dy));
+ return dist > tolerance;
+}
+
+static bool conic_too_curvy(const SkPoint& firstPt, const SkPoint& midTPt,
+ const SkPoint& lastPt, SkScalar tolerance) {
+ SkPoint midEnds = firstPt + lastPt;
+ midEnds *= 0.5f;
+ SkVector dxy = midTPt - midEnds;
+ SkScalar dist = SkMaxScalar(SkScalarAbs(dxy.fX), SkScalarAbs(dxy.fY));
+ return dist > tolerance;
+}
+
+static bool cheap_dist_exceeds_limit(const SkPoint& pt, SkScalar x, SkScalar y,
+ SkScalar tolerance) {
+ SkScalar dist = SkMaxScalar(SkScalarAbs(x - pt.fX), SkScalarAbs(y - pt.fY));
+ // just made up the 1/2
+ return dist > tolerance;
+}
+
+static bool cubic_too_curvy(const SkPoint pts[4], SkScalar tolerance) {
+ return cheap_dist_exceeds_limit(pts[1],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1/3), tolerance)
+ ||
+ cheap_dist_exceeds_limit(pts[2],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1*2/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1*2/3), tolerance);
+}
+
+SkScalar SkContourMeasureIter::compute_quad_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex) {
+ if (tspan_big_enough(maxt - mint) && quad_too_curvy(pts, fTolerance)) {
+ SkPoint tmp[5];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopQuadAtHalf(pts, tmp);
+ distance = this->compute_quad_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_quad_segs(&tmp[2], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[2]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.count());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kQuad_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::compute_conic_segs(const SkConic& conic, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt,
+ unsigned ptIndex) {
+ int halft = (mint + maxt) >> 1;
+ SkPoint halfPt = conic.evalAt(tValue2Scalar(halft));
+ if (!halfPt.isFinite()) {
+ return distance;
+ }
+ if (tspan_big_enough(maxt - mint) && conic_too_curvy(minPt, halfPt, maxPt, fTolerance)) {
+ distance = this->compute_conic_segs(conic, distance, mint, minPt, halft, halfPt, ptIndex);
+ distance = this->compute_conic_segs(conic, distance, halft, halfPt, maxt, maxPt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(minPt, maxPt);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.count());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kConic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::compute_cubic_segs(const SkPoint pts[4], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex) {
+ if (tspan_big_enough(maxt - mint) && cubic_too_curvy(pts, fTolerance)) {
+ SkPoint tmp[7];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopCubicAtHalf(pts, tmp);
+ distance = this->compute_cubic_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_cubic_segs(&tmp[3], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[3]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.count());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kCubic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::compute_line_seg(SkPoint p0, SkPoint p1, SkScalar distance,
+ unsigned ptIndex) {
+ SkScalar d = SkPoint::Distance(p0, p1);
+ SkASSERT(d >= 0);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT((unsigned)ptIndex < (unsigned)fPts.count());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kLine_SegType;
+ seg->fTValue = kMaxTValue;
+ }
+ return distance;
+}
+
+SkContourMeasure* SkContourMeasureIter::buildSegments() {
+ SkPoint pts[4];
+ int ptIndex = -1;
+ SkScalar distance = 0;
+ bool haveSeenClose = fForceClosed;
+ bool haveSeenMoveTo = false;
+
+ /* Note:
+ * as we accumulate distance, we have to check that the result of +=
+ * actually made it larger, since a very small delta might be > 0, but
+ * still have no effect on distance (if distance >>> delta).
+ *
+ * We do this check below, and in compute_quad_segs and compute_cubic_segs
+ */
+
+ fSegments.reset();
+ fPts.reset();
+
+ bool done = false;
+ do {
+ if (haveSeenMoveTo && fIter.peek() == SkPath::kMove_Verb) {
+ break;
+ }
+ switch (fIter.next(pts)) {
+ case SkPath::kMove_Verb:
+ ptIndex += 1;
+ fPts.append(1, pts);
+ SkASSERT(!haveSeenMoveTo);
+ haveSeenMoveTo = true;
+ break;
+
+ case SkPath::kLine_Verb: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_line_seg(pts[0], pts[1], distance, ptIndex);
+ if (distance > prevD) {
+ fPts.append(1, pts + 1);
+ ptIndex++;
+ }
+ } break;
+
+ case SkPath::kQuad_Verb: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_quad_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ if (distance > prevD) {
+ fPts.append(2, pts + 1);
+ ptIndex += 2;
+ }
+ } break;
+
+ case SkPath::kConic_Verb: {
+ SkASSERT(haveSeenMoveTo);
+ const SkConic conic(pts, fIter.conicWeight());
+ SkScalar prevD = distance;
+ distance = this->compute_conic_segs(conic, distance, 0, conic.fPts[0],
+ kMaxTValue, conic.fPts[2], ptIndex);
+ if (distance > prevD) {
+ // we store the conic weight in our next point, followed by the last 2 pts
+ // thus to reconstitue a conic, you'd need to say
+ // SkConic(pts[0], pts[2], pts[3], weight = pts[1].fX)
+ fPts.append()->set(conic.fW, 0);
+ fPts.append(2, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPath::kCubic_Verb: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_cubic_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ if (distance > prevD) {
+ fPts.append(3, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPath::kClose_Verb:
+ haveSeenClose = true;
+ break;
+
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+
+ } while (!done);
+
+ if (!SkScalarIsFinite(distance)) {
+ return nullptr;
+ }
+ if (fSegments.count() == 0) {
+ return nullptr;
+ }
+
+ // Handle the close segment ourselves, since we're using RawIter
+ if (haveSeenClose) {
+ SkScalar prevD = distance;
+ SkPoint firstPt = fPts[0];
+ distance = this->compute_line_seg(fPts[ptIndex], firstPt, distance, ptIndex);
+ if (distance > prevD) {
+ *fPts.append() = firstPt;
+ }
+ }
+
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ {
+ const SkContourMeasure::Segment* seg = fSegments.begin();
+ const SkContourMeasure::Segment* stop = fSegments.end();
+ unsigned ptIndex = 0;
+ SkScalar distance = 0;
+ // limit the loop to a reasonable number; pathological cases can run for minutes
+ int maxChecks = 10000000; // set to INT_MAX to defeat the check
+ while (seg < stop) {
+ SkASSERT(seg->fDistance > distance);
+ SkASSERT(seg->fPtIndex >= ptIndex);
+ SkASSERT(seg->fTValue > 0);
+
+ const SkContourMeasure::Segment* s = seg;
+ while (s < stop - 1 && s[0].fPtIndex == s[1].fPtIndex && --maxChecks > 0) {
+ SkASSERT(s[0].fType == s[1].fType);
+ SkASSERT(s[0].fTValue < s[1].fTValue);
+ s += 1;
+ }
+
+ distance = seg->fDistance;
+ ptIndex = seg->fPtIndex;
+ seg += 1;
+ }
+ // SkDebugf("\n");
+ }
+#endif
+#endif
+
+ return new SkContourMeasure(std::move(fSegments), std::move(fPts), distance, haveSeenClose);
+}
+
+static void compute_pos_tan(const SkPoint pts[], unsigned segType,
+ SkScalar t, SkPoint* pos, SkVector* tangent) {
+ switch (segType) {
+ case kLine_SegType:
+ if (pos) {
+ pos->set(SkScalarInterp(pts[0].fX, pts[1].fX, t),
+ SkScalarInterp(pts[0].fY, pts[1].fY, t));
+ }
+ if (tangent) {
+ tangent->setNormalize(pts[1].fX - pts[0].fX, pts[1].fY - pts[0].fY);
+ }
+ break;
+ case kQuad_SegType:
+ SkEvalQuadAt(pts, t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ case kConic_SegType: {
+ SkConic(pts[0], pts[2], pts[3], pts[1].fX).evalAt(t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ } break;
+ case kCubic_SegType:
+ SkEvalCubicAt(pts, t, pos, tangent, nullptr);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown segType");
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+SkContourMeasureIter::SkContourMeasureIter() {
+ fTolerance = CHEAP_DIST_LIMIT;
+ fForceClosed = false;
+}
+
+SkContourMeasureIter::SkContourMeasureIter(const SkPath& path, bool forceClosed,
+ SkScalar resScale) {
+ fPath = path.isFinite() ? path : SkPath();
+ fTolerance = CHEAP_DIST_LIMIT * SkScalarInvert(resScale);
+ fForceClosed = forceClosed;
+
+ fIter.setPath(fPath);
+}
+
+SkContourMeasureIter::~SkContourMeasureIter() {}
+
+/** Assign a new path, or null to have none.
+*/
+void SkContourMeasureIter::reset(const SkPath& path, bool forceClosed, SkScalar resScale) {
+ if (path.isFinite()) {
+ fPath = path;
+ } else {
+ fPath.reset();
+ }
+ fForceClosed = forceClosed;
+
+ fIter.setPath(fPath);
+ fSegments.reset();
+ fPts.reset();
+}
+
+sk_sp<SkContourMeasure> SkContourMeasureIter::next() {
+ while (fIter.peek() != SkPath::kDone_Verb) {
+ auto cm = this->buildSegments();
+ if (cm) {
+ return sk_sp<SkContourMeasure>(cm);
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkContourMeasure::SkContourMeasure(SkTDArray<Segment>&& segs, SkTDArray<SkPoint>&& pts, SkScalar length, bool isClosed)
+ : fSegments(std::move(segs))
+ , fPts(std::move(pts))
+ , fLength(length)
+ , fIsClosed(isClosed)
+ {}
+
+template <typename T, typename K>
+int SkTKSearch(const T base[], int count, const K& key) {
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != nullptr); // base may be nullptr if count is zero
+
+ unsigned lo = 0;
+ unsigned hi = count - 1;
+
+ while (lo < hi) {
+ unsigned mid = (hi + lo) >> 1;
+ if (base[mid].fDistance < key) {
+ lo = mid + 1;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (base[hi].fDistance < key) {
+ hi += 1;
+ hi = ~hi;
+ } else if (key < base[hi].fDistance) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+const SkContourMeasure::Segment* SkContourMeasure::distanceToSegment( SkScalar distance,
+ SkScalar* t) const {
+ SkDEBUGCODE(SkScalar length = ) this->length();
+ SkASSERT(distance >= 0 && distance <= length);
+
+ const Segment* seg = fSegments.begin();
+ int count = fSegments.count();
+
+ int index = SkTKSearch<Segment, SkScalar>(seg, count, distance);
+ // don't care if we hit an exact match or not, so we xor index if it is negative
+ index ^= (index >> 31);
+ seg = &seg[index];
+
+ // now interpolate t-values with the prev segment (if possible)
+ SkScalar startT = 0, startD = 0;
+ // check if the prev segment is legal, and references the same set of points
+ if (index > 0) {
+ startD = seg[-1].fDistance;
+ if (seg[-1].fPtIndex == seg->fPtIndex) {
+ SkASSERT(seg[-1].fType == seg->fType);
+ startT = seg[-1].getScalarT();
+ }
+ }
+
+ SkASSERT(seg->getScalarT() > startT);
+ SkASSERT(distance >= startD);
+ SkASSERT(seg->fDistance > startD);
+
+ *t = startT + (seg->getScalarT() - startT) * (distance - startD) / (seg->fDistance - startD);
+ return seg;
+}
+
+bool SkContourMeasure::getPosTan(SkScalar distance, SkPoint* pos, SkVector* tangent) const {
+ if (SkScalarIsNaN(distance)) {
+ return false;
+ }
+
+ const SkScalar length = this->length();
+ SkASSERT(length > 0 && fSegments.count() > 0);
+
+ // pin the distance to a legal range
+ if (distance < 0) {
+ distance = 0;
+ } else if (distance > length) {
+ distance = length;
+ }
+
+ SkScalar t;
+ const Segment* seg = this->distanceToSegment(distance, &t);
+ if (SkScalarIsNaN(t)) {
+ return false;
+ }
+
+ SkASSERT((unsigned)seg->fPtIndex < (unsigned)fPts.count());
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, t, pos, tangent);
+ return true;
+}
+
+bool SkContourMeasure::getMatrix(SkScalar distance, SkMatrix* matrix, MatrixFlags flags) const {
+ SkPoint position;
+ SkVector tangent;
+
+ if (this->getPosTan(distance, &position, &tangent)) {
+ if (matrix) {
+ if (flags & kGetTangent_MatrixFlag) {
+ matrix->setSinCos(tangent.fY, tangent.fX, 0, 0);
+ } else {
+ matrix->reset();
+ }
+ if (flags & kGetPosition_MatrixFlag) {
+ matrix->postTranslate(position.fX, position.fY);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkContourMeasure::getSegment(SkScalar startD, SkScalar stopD, SkPath* dst,
+ bool startWithMoveTo) const {
+ SkASSERT(dst);
+
+ SkScalar length = this->length(); // ensure we have built our segments
+
+ if (startD < 0) {
+ startD = 0;
+ }
+ if (stopD > length) {
+ stopD = length;
+ }
+ if (!(startD <= stopD)) { // catch NaN values as well
+ return false;
+ }
+ if (!fSegments.count()) {
+ return false;
+ }
+
+ SkPoint p;
+ SkScalar startT, stopT;
+ const Segment* seg = this->distanceToSegment(startD, &startT);
+ if (!SkScalarIsFinite(startT)) {
+ return false;
+ }
+ const Segment* stopSeg = this->distanceToSegment(stopD, &stopT);
+ if (!SkScalarIsFinite(stopT)) {
+ return false;
+ }
+ SkASSERT(seg <= stopSeg);
+ if (startWithMoveTo) {
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, startT, &p, nullptr);
+ dst->moveTo(p);
+ }
+
+ if (seg->fPtIndex == stopSeg->fPtIndex) {
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, stopT, dst);
+ } else {
+ do {
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, SK_Scalar1, dst);
+ seg = SkContourMeasure::Segment::Next(seg);
+ startT = 0;
+ } while (seg->fPtIndex < stopSeg->fPtIndex);
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, 0, stopT, dst);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkConvertPixels.cpp b/gfx/skia/skia/src/core/SkConvertPixels.cpp
new file mode 100644
index 0000000000..ca50a95784
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvertPixels.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+
+static bool rect_memcpy(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB,
+ const SkColorSpaceXformSteps& steps) {
+ // We can copy the pixels when no color type, alpha type, or color space changes.
+ if (dstInfo.colorType() != srcInfo.colorType()) {
+ return false;
+ }
+ if (dstInfo.colorType() != kAlpha_8_SkColorType
+ && steps.flags.mask() != 0b00000) {
+ return false;
+ }
+
+ SkRectMemcpy(dstPixels, dstRB,
+ srcPixels, srcRB, dstInfo.minRowBytes(), dstInfo.height());
+ return true;
+}
+
+static bool swizzle_or_premul(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB,
+ const SkColorSpaceXformSteps& steps) {
+ auto is_8888 = [](SkColorType ct) {
+ return ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType;
+ };
+ if (!is_8888(dstInfo.colorType()) ||
+ !is_8888(srcInfo.colorType()) ||
+ steps.flags.linearize ||
+ steps.flags.gamut_transform ||
+ steps.flags.unpremul ||
+ steps.flags.encode) {
+ return false;
+ }
+
+ const bool swapRB = dstInfo.colorType() != srcInfo.colorType();
+
+ void (*fn)(uint32_t*, const uint32_t*, int) = nullptr;
+
+ if (steps.flags.premul) {
+ fn = swapRB ? SkOpts::RGBA_to_bgrA
+ : SkOpts::RGBA_to_rgbA;
+ } else {
+ // If we're not swizzling, we ought to have used rect_memcpy().
+ SkASSERT(swapRB);
+ fn = SkOpts::RGBA_to_BGRA;
+ }
+
+ for (int y = 0; y < dstInfo.height(); y++) {
+ fn((uint32_t*)dstPixels, (const uint32_t*)srcPixels, dstInfo.width());
+ dstPixels = SkTAddOffset<void>(dstPixels, dstRB);
+ srcPixels = SkTAddOffset<const void>(srcPixels, srcRB);
+ }
+ return true;
+}
+
+static bool convert_to_alpha8(const SkImageInfo& dstInfo, void* vdst, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* src, size_t srcRB,
+ const SkColorSpaceXformSteps&) {
+ if (dstInfo.colorType() != kAlpha_8_SkColorType) {
+ return false;
+ }
+ auto dst = (uint8_t*)vdst;
+
+ switch (srcInfo.colorType()) {
+ case kUnknown_SkColorType:
+ case kAlpha_8_SkColorType: {
+ // Unknown should never happen.
+ // Alpha8 should have been handled by rect_memcpy().
+ SkASSERT(false);
+ return false;
+ }
+
+ case kA16_unorm_SkColorType: {
+ auto src16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = src16[x] >> 8;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src16 = SkTAddOffset<const uint16_t>(src16, srcRB);
+ }
+ return true;
+ }
+
+ case kGray_8_SkColorType:
+ case kRGB_565_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType: {
+ for (int y = 0; y < srcInfo.height(); ++y) {
+ memset(dst, 0xFF, srcInfo.width());
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+ return true;
+ }
+
+ case kARGB_4444_SkColorType: {
+ auto src16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = SkPacked4444ToA32(src16[x]);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src16 = SkTAddOffset<const uint16_t>(src16, srcRB);
+ }
+ return true;
+ }
+
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType: {
+ auto src32 = (const uint32_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = src32[x] >> 24;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src32 = SkTAddOffset<const uint32_t>(src32, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_1010102_SkColorType: {
+ auto src32 = (const uint32_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (src32[x] >> 30) * 0x55;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src32 = SkTAddOffset<const uint32_t>(src32, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ auto src64 = (const uint64_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48));
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src64 = SkTAddOffset<const uint64_t>(src64, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_F32_SkColorType: {
+ auto rgba = (const float*)src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t)(255.0f * rgba[4*x+3]);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ rgba = SkTAddOffset<const float>(rgba, srcRB);
+ }
+ return true;
+ }
+
+ case kA16_float_SkColorType: {
+ auto srcF16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t) (255.0f * SkHalfToFloat(srcF16[x]));
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ srcF16 = SkTAddOffset<const uint16_t>(srcF16, srcRB);
+ }
+ return true;
+ }
+
+ case kR16G16B16A16_unorm_SkColorType: {
+ auto src64 = (const uint64_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (src64[x] >> 48) >> 8;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src64 = SkTAddOffset<const uint64_t>(src64, srcRB);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+// Default: Use the pipeline.
+static void convert_with_pipeline(const SkImageInfo& dstInfo, void* dstRow, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcRow, size_t srcRB,
+ const SkColorSpaceXformSteps& steps) {
+
+ SkRasterPipeline_MemoryCtx src = { (void*)srcRow, (int)(srcRB / srcInfo.bytesPerPixel()) },
+ dst = { (void*)dstRow, (int)(dstRB / dstInfo.bytesPerPixel()) };
+
+ SkRasterPipeline_<256> pipeline;
+ pipeline.append_load(srcInfo.colorType(), &src);
+ steps.apply(&pipeline, srcInfo.colorType());
+
+ pipeline.append_gamut_clamp_if_normalized(dstInfo);
+
+ pipeline.append_store(dstInfo.colorType(), &dst);
+ pipeline.run(0,0, srcInfo.width(), srcInfo.height());
+}
+
+void SkConvertPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB) {
+ SkASSERT(dstInfo.dimensions() == srcInfo.dimensions());
+ SkASSERT(SkImageInfoValidConversion(dstInfo, srcInfo));
+
+ SkColorSpaceXformSteps steps{srcInfo.colorSpace(), srcInfo.alphaType(),
+ dstInfo.colorSpace(), dstInfo.alphaType()};
+
+ for (auto fn : {rect_memcpy, swizzle_or_premul, convert_to_alpha8}) {
+ if (fn(dstInfo, dstPixels, dstRB, srcInfo, srcPixels, srcRB, steps)) {
+ return;
+ }
+ }
+ convert_with_pipeline(dstInfo, dstPixels, dstRB, srcInfo, srcPixels, srcRB, steps);
+}
diff --git a/gfx/skia/skia/src/core/SkConvertPixels.h b/gfx/skia/skia/src/core/SkConvertPixels.h
new file mode 100644
index 0000000000..99d6a017c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvertPixels.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkConvertPixels_DEFINED
+#define SkConvertPixels_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/private/SkTemplates.h"
+
+class SkColorTable;
+
+void SkConvertPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRowBytes);
+
+static inline void SkRectMemcpy(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ size_t trimRowBytes, int rowCount) {
+ SkASSERT(trimRowBytes <= dstRB);
+ SkASSERT(trimRowBytes <= srcRB);
+ if (trimRowBytes == dstRB && trimRowBytes == srcRB) {
+ memcpy(dst, src, trimRowBytes * rowCount);
+ return;
+ }
+
+ for (int i = 0; i < rowCount; ++i) {
+ memcpy(dst, src, trimRowBytes);
+ dst = SkTAddOffset<void>(dst, dstRB);
+ src = SkTAddOffset<const void>(src, srcRB);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkConvolver.cpp b/gfx/skia/skia/src/core/SkConvolver.cpp
new file mode 100644
index 0000000000..23cc2b7744
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvolver.cpp
@@ -0,0 +1,272 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/core/SkConvolver.h"
+#include "src/core/SkOpts.h"
+#include "include/private/SkTArray.h"
+
+namespace {
+ // Stores a list of rows in a circular buffer. The usage is you write into it
+ // by calling AdvanceRow. It will keep track of which row in the buffer it
+ // should use next, and the total number of rows added.
+ class CircularRowBuffer {
+ public:
+ // The number of pixels in each row is given in |sourceRowPixelWidth|.
+ // The maximum number of rows needed in the buffer is |maxYFilterSize|
+ // (we only need to store enough rows for the biggest filter).
+ //
+ // We use the |firstInputRow| to compute the coordinates of all of the
+ // following rows returned by Advance().
+ CircularRowBuffer(int destRowPixelWidth, int maxYFilterSize,
+ int firstInputRow)
+ : fRowByteWidth(destRowPixelWidth * 4),
+ fNumRows(maxYFilterSize),
+ fNextRow(0),
+ fNextRowCoordinate(firstInputRow) {
+ fBuffer.reset(fRowByteWidth * maxYFilterSize);
+ fRowAddresses.reset(fNumRows);
+ }
+
+ // Moves to the next row in the buffer, returning a pointer to the beginning
+ // of it.
+ unsigned char* advanceRow() {
+ unsigned char* row = &fBuffer[fNextRow * fRowByteWidth];
+ fNextRowCoordinate++;
+
+ // Set the pointer to the next row to use, wrapping around if necessary.
+ fNextRow++;
+ if (fNextRow == fNumRows) {
+ fNextRow = 0;
+ }
+ return row;
+ }
+
+ // Returns a pointer to an "unrolled" array of rows. These rows will start
+ // at the y coordinate placed into |*firstRowIndex| and will continue in
+ // order for the maximum number of rows in this circular buffer.
+ //
+ // The |firstRowIndex_| may be negative. This means the circular buffer
+ // starts before the top of the image (it hasn't been filled yet).
+ unsigned char* const* GetRowAddresses(int* firstRowIndex) {
+ // Example for a 4-element circular buffer holding coords 6-9.
+ // Row 0 Coord 8
+ // Row 1 Coord 9
+ // Row 2 Coord 6 <- fNextRow = 2, fNextRowCoordinate = 10.
+ // Row 3 Coord 7
+ //
+ // The "next" row is also the first (lowest) coordinate. This computation
+ // may yield a negative value, but that's OK, the math will work out
+ // since the user of this buffer will compute the offset relative
+ // to the firstRowIndex and the negative rows will never be used.
+ *firstRowIndex = fNextRowCoordinate - fNumRows;
+
+ int curRow = fNextRow;
+ for (int i = 0; i < fNumRows; i++) {
+ fRowAddresses[i] = &fBuffer[curRow * fRowByteWidth];
+
+ // Advance to the next row, wrapping if necessary.
+ curRow++;
+ if (curRow == fNumRows) {
+ curRow = 0;
+ }
+ }
+ return &fRowAddresses[0];
+ }
+
+ private:
+ // The buffer storing the rows. They are packed, each one fRowByteWidth.
+ SkTArray<unsigned char> fBuffer;
+
+ // Number of bytes per row in the |buffer|.
+ int fRowByteWidth;
+
+ // The number of rows available in the buffer.
+ int fNumRows;
+
+ // The next row index we should write into. This wraps around as the
+ // circular buffer is used.
+ int fNextRow;
+
+ // The y coordinate of the |fNextRow|. This is incremented each time a
+ // new row is appended and does not wrap.
+ int fNextRowCoordinate;
+
+ // Buffer used by GetRowAddresses().
+ SkTArray<unsigned char*> fRowAddresses;
+ };
+
+} // namespace
+
+// SkConvolutionFilter1D ---------------------------------------------------------
+
+SkConvolutionFilter1D::SkConvolutionFilter1D()
+: fMaxFilter(0) {
+}
+
+SkConvolutionFilter1D::~SkConvolutionFilter1D() {
+}
+
+void SkConvolutionFilter1D::AddFilter(int filterOffset,
+ const ConvolutionFixed* filterValues,
+ int filterLength) {
+ // It is common for leading/trailing filter values to be zeros. In such
+ // cases it is beneficial to only store the central factors.
+ // For a scaling to 1/4th in each dimension using a Lanczos-2 filter on
+ // a 1080p image this optimization gives a ~10% speed improvement.
+ int filterSize = filterLength;
+ int firstNonZero = 0;
+ while (firstNonZero < filterLength && filterValues[firstNonZero] == 0) {
+ firstNonZero++;
+ }
+
+ if (firstNonZero < filterLength) {
+ // Here we have at least one non-zero factor.
+ int lastNonZero = filterLength - 1;
+ while (lastNonZero >= 0 && filterValues[lastNonZero] == 0) {
+ lastNonZero--;
+ }
+
+ filterOffset += firstNonZero;
+ filterLength = lastNonZero + 1 - firstNonZero;
+ SkASSERT(filterLength > 0);
+
+ fFilterValues.append(filterLength, &filterValues[firstNonZero]);
+ } else {
+ // Here all the factors were zeroes.
+ filterLength = 0;
+ }
+
+ FilterInstance instance;
+
+ // We pushed filterLength elements onto fFilterValues
+ instance.fDataLocation = (static_cast<int>(fFilterValues.count()) -
+ filterLength);
+ instance.fOffset = filterOffset;
+ instance.fTrimmedLength = filterLength;
+ instance.fLength = filterSize;
+ fFilters.push_back(instance);
+
+ fMaxFilter = SkTMax(fMaxFilter, filterLength);
+}
+
+const SkConvolutionFilter1D::ConvolutionFixed* SkConvolutionFilter1D::GetSingleFilter(
+ int* specifiedFilterlength,
+ int* filterOffset,
+ int* filterLength) const {
+ const FilterInstance& filter = fFilters[0];
+ *filterOffset = filter.fOffset;
+ *filterLength = filter.fTrimmedLength;
+ *specifiedFilterlength = filter.fLength;
+ if (filter.fTrimmedLength == 0) {
+ return nullptr;
+ }
+
+ return &fFilterValues[filter.fDataLocation];
+}
+
+bool BGRAConvolve2D(const unsigned char* sourceData,
+ int sourceByteRowStride,
+ bool sourceHasAlpha,
+ const SkConvolutionFilter1D& filterX,
+ const SkConvolutionFilter1D& filterY,
+ int outputByteRowStride,
+ unsigned char* output) {
+
+ int maxYFilterSize = filterY.maxFilter();
+
+ // The next row in the input that we will generate a horizontally
+ // convolved row for. If the filter doesn't start at the beginning of the
+ // image (this is the case when we are only resizing a subset), then we
+ // don't want to generate any output rows before that. Compute the starting
+ // row for convolution as the first pixel for the first vertical filter.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filterY.FilterForValue(0, &filterOffset, &filterLength);
+ int nextXRow = filterOffset;
+
+ // We loop over each row in the input doing a horizontal convolution. This
+ // will result in a horizontally convolved image. We write the results into
+ // a circular buffer of convolved rows and do vertical convolution as rows
+ // are available. This prevents us from having to store the entire
+ // intermediate image and helps cache coherency.
+ // We will need four extra rows to allow horizontal convolution could be done
+ // simultaneously. We also pad each row in row buffer to be aligned-up to
+ // 32 bytes.
+ // TODO(jiesun): We do not use aligned load from row buffer in vertical
+ // convolution pass yet. Somehow Windows does not like it.
+ int rowBufferWidth = (filterX.numValues() + 31) & ~0x1F;
+ int rowBufferHeight = maxYFilterSize +
+ (SkOpts::convolve_4_rows_horizontally != nullptr ? 4 : 0);
+
+ // check for too-big allocation requests : crbug.com/528628
+ {
+ int64_t size = sk_64_mul(rowBufferWidth, rowBufferHeight);
+ // need some limit, to avoid over-committing success from malloc, but then
+ // crashing when we try to actually use the memory.
+ // 100meg seems big enough to allow "normal" zoom factors and image sizes through
+ // while avoiding the crash seen by the bug (crbug.com/528628)
+ if (size > 100 * 1024 * 1024) {
+// SkDebugf("BGRAConvolve2D: tmp allocation [%lld] too big\n", size);
+ return false;
+ }
+ }
+
+ CircularRowBuffer rowBuffer(rowBufferWidth,
+ rowBufferHeight,
+ filterOffset);
+
+ // Loop over every possible output row, processing just enough horizontal
+ // convolutions to run each subsequent vertical convolution.
+ SkASSERT(outputByteRowStride >= filterX.numValues() * 4);
+ int numOutputRows = filterY.numValues();
+
+ // We need to check which is the last line to convolve before we advance 4
+ // lines in one iteration.
+ int lastFilterOffset, lastFilterLength;
+ filterY.FilterForValue(numOutputRows - 1, &lastFilterOffset,
+ &lastFilterLength);
+
+ for (int outY = 0; outY < numOutputRows; outY++) {
+ filterValues = filterY.FilterForValue(outY,
+ &filterOffset, &filterLength);
+
+ // Generate output rows until we have enough to run the current filter.
+ while (nextXRow < filterOffset + filterLength) {
+ if (SkOpts::convolve_4_rows_horizontally != nullptr &&
+ nextXRow + 3 < lastFilterOffset + lastFilterLength) {
+ const unsigned char* src[4];
+ unsigned char* outRow[4];
+ for (int i = 0; i < 4; ++i) {
+ src[i] = &sourceData[(uint64_t)(nextXRow + i) * sourceByteRowStride];
+ outRow[i] = rowBuffer.advanceRow();
+ }
+ SkOpts::convolve_4_rows_horizontally(src, filterX, outRow, 4*rowBufferWidth);
+ nextXRow += 4;
+ } else {
+ SkOpts::convolve_horizontally(
+ &sourceData[(uint64_t)nextXRow * sourceByteRowStride],
+ filterX, rowBuffer.advanceRow(), sourceHasAlpha);
+ nextXRow++;
+ }
+ }
+
+ // Compute where in the output image this row of final data will go.
+ unsigned char* curOutputRow = &output[(uint64_t)outY * outputByteRowStride];
+
+ // Get the list of rows that the circular buffer has, in order.
+ int firstRowInCircularBuffer;
+ unsigned char* const* rowsToConvolve =
+ rowBuffer.GetRowAddresses(&firstRowInCircularBuffer);
+
+ // Now compute the start of the subset of those rows that the filter needs.
+ unsigned char* const* firstRowForFilter =
+ &rowsToConvolve[filterOffset - firstRowInCircularBuffer];
+
+ SkOpts::convolve_vertically(filterValues, filterLength,
+ firstRowForFilter,
+ filterX.numValues(), curOutputRow,
+ sourceHasAlpha);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkConvolver.h b/gfx/skia/skia/src/core/SkConvolver.h
new file mode 100644
index 0000000000..f7d80c9f1d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvolver.h
@@ -0,0 +1,173 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SK_CONVOLVER_H
+#define SK_CONVOLVER_H
+
+#include "include/core/SkSize.h"
+#include "include/private/SkTDArray.h"
+
+// avoid confusion with Mac OS X's math library (Carbon)
+#if defined(__APPLE__)
+#undef FloatToConvolutionFixed
+#undef ConvolutionFixedToFloat
+#undef FloatToFixed
+#undef FixedToFloat
+#endif
+
+// Represents a filter in one dimension. Each output pixel has one entry in this
+// object for the filter values contributing to it. You build up the filter
+// list by calling AddFilter for each output pixel (in order).
+//
+// We do 2-dimensional convolution by first convolving each row by one
+// SkConvolutionFilter1D, then convolving each column by another one.
+//
+// Entries are stored in ConvolutionFixed point, shifted left by kShiftBits.
+class SkConvolutionFilter1D {
+public:
+ typedef short ConvolutionFixed;
+
+ // The number of bits that ConvolutionFixed point values are shifted by.
+ enum { kShiftBits = 14 };
+
+ SK_API SkConvolutionFilter1D();
+ SK_API ~SkConvolutionFilter1D();
+
+ // Convert between floating point and our ConvolutionFixed point representation.
+ static ConvolutionFixed FloatToFixed(float f) {
+ return static_cast<ConvolutionFixed>(f * (1 << kShiftBits));
+ }
+ static unsigned char FixedToChar(ConvolutionFixed x) {
+ return static_cast<unsigned char>(x >> kShiftBits);
+ }
+ static float FixedToFloat(ConvolutionFixed x) {
+ // The cast relies on ConvolutionFixed being a short, implying that on
+ // the platforms we care about all (16) bits will fit into
+ // the mantissa of a (32-bit) float.
+ static_assert(sizeof(ConvolutionFixed) == 2, "ConvolutionFixed_type_should_fit_in_float_mantissa");
+ float raw = static_cast<float>(x);
+ return ldexpf(raw, -kShiftBits);
+ }
+
+ // Returns the maximum pixel span of a filter.
+ int maxFilter() const { return fMaxFilter; }
+
+ // Returns the number of filters in this filter. This is the dimension of the
+ // output image.
+ int numValues() const { return static_cast<int>(fFilters.count()); }
+
+ void reserveAdditional(int filterCount, int filterValueCount) {
+ fFilters.setReserve(fFilters.count() + filterCount);
+ fFilterValues.setReserve(fFilterValues.count() + filterValueCount);
+ }
+
+ // Appends the given list of scaling values for generating a given output
+ // pixel. |filterOffset| is the distance from the edge of the image to where
+ // the scaling factors start. The scaling factors apply to the source pixels
+ // starting from this position, and going for the next |filterLength| pixels.
+ //
+ // You will probably want to make sure your input is normalized (that is,
+ // all entries in |filterValuesg| sub to one) to prevent affecting the overall
+ // brighness of the image.
+ //
+ // The filterLength must be > 0.
+ void AddFilter(int filterOffset,
+ const ConvolutionFixed* filterValues,
+ int filterLength);
+
+ // Retrieves a filter for the given |valueOffset|, a position in the output
+ // image in the direction we're convolving. The offset and length of the
+ // filter values are put into the corresponding out arguments (see AddFilter
+ // above for what these mean), and a pointer to the first scaling factor is
+ // returned. There will be |filterLength| values in this array.
+ inline const ConvolutionFixed* FilterForValue(int valueOffset,
+ int* filterOffset,
+ int* filterLength) const {
+ const FilterInstance& filter = fFilters[valueOffset];
+ *filterOffset = filter.fOffset;
+ *filterLength = filter.fTrimmedLength;
+ if (filter.fTrimmedLength == 0) {
+ return nullptr;
+ }
+ return &fFilterValues[filter.fDataLocation];
+ }
+
+ // Retrieves the filter for the offset 0, presumed to be the one and only.
+ // The offset and length of the filter values are put into the corresponding
+ // out arguments (see AddFilter). Note that |filterLegth| and
+ // |specifiedFilterLength| may be different if leading/trailing zeros of the
+ // original floating point form were clipped.
+ // There will be |filterLength| values in the return array.
+ // Returns nullptr if the filter is 0-length (for instance when all floating
+ // point values passed to AddFilter were clipped to 0).
+ SK_API const ConvolutionFixed* GetSingleFilter(int* specifiedFilterLength,
+ int* filterOffset,
+ int* filterLength) const;
+
+ // Add another value to the fFilterValues array -- useful for
+ // SIMD padding which happens outside of this class.
+
+ void addFilterValue( ConvolutionFixed val ) {
+ fFilterValues.push_back( val );
+ }
+private:
+ struct FilterInstance {
+ // Offset within filterValues for this instance of the filter.
+ int fDataLocation;
+
+ // Distance from the left of the filter to the center. IN PIXELS
+ int fOffset;
+
+ // Number of values in this filter instance.
+ int fTrimmedLength;
+
+ // Filter length as specified. Note that this may be different from
+ // 'trimmed_length' if leading/trailing zeros of the original floating
+ // point form were clipped differently on each tail.
+ int fLength;
+ };
+
+ // Stores the information for each filter added to this class.
+ SkTDArray<FilterInstance> fFilters;
+
+ // We store all the filter values in this flat list, indexed by
+ // |FilterInstance.data_location| to avoid the mallocs required for storing
+ // each one separately.
+ SkTDArray<ConvolutionFixed> fFilterValues;
+
+ // The maximum size of any filter we've added.
+ int fMaxFilter;
+};
+
+// Does a two-dimensional convolution on the given source image.
+//
+// It is assumed the source pixel offsets referenced in the input filters
+// reference only valid pixels, so the source image size is not required. Each
+// row of the source image starts |sourceByteRowStride| after the previous
+// one (this allows you to have rows with some padding at the end).
+//
+// The result will be put into the given output buffer. The destination image
+// size will be xfilter.numValues() * yfilter.numValues() pixels. It will be
+// in rows of exactly xfilter.numValues() * 4 bytes.
+//
+// |sourceHasAlpha| is a hint that allows us to avoid doing computations on
+// the alpha channel if the image is opaque. If you don't know, set this to
+// true and it will work properly, but setting this to false will be a few
+// percent faster if you know the image is opaque.
+//
+// The layout in memory is assumed to be 4-bytes per pixel in B-G-R-A order
+// (this is ARGB when loaded into 32-bit words on a little-endian machine).
+/**
+ * Returns false if it was unable to perform the convolution/rescale. in which case the output
+ * buffer is assumed to be undefined.
+ */
+SK_API bool BGRAConvolve2D(const unsigned char* sourceData,
+ int sourceByteRowStride,
+ bool sourceHasAlpha,
+ const SkConvolutionFilter1D& xfilter,
+ const SkConvolutionFilter1D& yfilter,
+ int outputByteRowStride,
+ unsigned char* output);
+
+#endif // SK_CONVOLVER_H
diff --git a/gfx/skia/skia/src/core/SkCoreBlitters.h b/gfx/skia/skia/src/core/SkCoreBlitters.h
new file mode 100644
index 0000000000..600a9d1680
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCoreBlitters.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoreBlitters_DEFINED
+#define SkCoreBlitters_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkRasterBlitter : public SkBlitter {
+public:
+ SkRasterBlitter(const SkPixmap& device) : fDevice(device) {}
+
+protected:
+ const SkPixmap fDevice;
+
+private:
+ typedef SkBlitter INHERITED;
+};
+
+class SkShaderBlitter : public SkRasterBlitter {
+public:
+ /**
+ * The storage for shaderContext is owned by the caller, but the object itself is not.
+ * The blitter only ensures that the storage always holds a live object, but it may
+ * exchange that object.
+ */
+ SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext);
+ virtual ~SkShaderBlitter();
+
+protected:
+ uint32_t fShaderFlags;
+ const SkShader* fShader;
+ SkShaderBase::Context* fShaderContext;
+ bool fConstInY;
+
+private:
+ // illegal
+ SkShaderBlitter& operator=(const SkShaderBlitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkA8_Coverage_Blitter : public SkRasterBlitter {
+public:
+ SkA8_Coverage_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+
+private:
+ typedef SkRasterBlitter INHERITED;
+};
+
+////////////////////////////////////////////////////////////////
+
+class SkARGB32_Blitter : public SkRasterBlitter {
+public:
+ SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+protected:
+ SkColor fColor;
+ SkPMColor fPMColor;
+
+private:
+ unsigned fSrcA, fSrcR, fSrcG, fSrcB;
+
+ // illegal
+ SkARGB32_Blitter& operator=(const SkARGB32_Blitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+class SkARGB32_Opaque_Blitter : public SkARGB32_Blitter {
+public:
+ SkARGB32_Opaque_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) { SkASSERT(paint.getAlpha() == 0xFF); }
+ void blitMask(const SkMask&, const SkIRect&) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ typedef SkARGB32_Blitter INHERITED;
+};
+
+class SkARGB32_Black_Blitter : public SkARGB32_Opaque_Blitter {
+public:
+ SkARGB32_Black_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) {}
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ typedef SkARGB32_Opaque_Blitter INHERITED;
+};
+
+class SkARGB32_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkARGB32_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext);
+ ~SkARGB32_Shader_Blitter() override;
+ void blitH(int x, int y, int width) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+
+private:
+ SkXfermode* fXfermode;
+ SkPMColor* fBuffer;
+ SkBlitRow::Proc32 fProc32;
+ SkBlitRow::Proc32 fProc32Blend;
+ bool fShadeDirectlyIntoDevice;
+
+ // illegal
+ SkARGB32_Shader_Blitter& operator=(const SkARGB32_Shader_Blitter&);
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+typedef void (*SkS32D16BlendProc)(uint16_t*, const SkPMColor*, int, uint8_t);
+
+class SkRGB565_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkRGB565_Shader_Blitter(const SkPixmap& device, const SkPaint&, SkShaderBase::Context*);
+ ~SkRGB565_Shader_Blitter() override;
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override;
+
+ static bool Supports(const SkPixmap& device, const SkPaint&);
+
+private:
+ SkPMColor* fBuffer;
+ SkS32D16BlendProc fBlend;
+ SkS32D16BlendProc fBlendCoverage;
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Neither of these ever returns nullptr, but this first factory may return a SkNullBlitter.
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap&, const SkPaint&, const SkMatrix& ctm,
+ SkArenaAlloc*);
+// Use this if you've pre-baked a shader pipeline, including modulating with paint alpha.
+// This factory never returns an SkNullBlitter.
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap&, const SkPaint&,
+ const SkRasterPipeline& shaderPipeline,
+ bool shader_is_opaque,
+ SkArenaAlloc*);
+
+SkBlitter* SkCreateSkVMBlitter(const SkPixmap&, const SkPaint&, const SkMatrix& ctm, SkArenaAlloc*);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCoverageModePriv.h b/gfx/skia/skia/src/core/SkCoverageModePriv.h
new file mode 100644
index 0000000000..09bfb5bddb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCoverageModePriv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoverageModePriv_DEFINED
+#define SkCoverageModePriv_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCoverageMode.h"
+
+SkBlendMode SkUncorrelatedCoverageModeToBlendMode(SkCoverageMode);
+
+#if 0
+// Experimental idea to extend to overlap types
+
+Master calculation = X(S,D) + Y(S,D) + Z(S,D)
+
+enum class SkCoverageOverlap {
+ // X Y Z
+ kUncorrelated, // S*D S*(1-D) D*(1-S)
+ kConjoint, // min(S,D) max(S-D,0) max(D-S,0)
+ kDisjoint, // max(S+D-1,0) min(S,1-D) min(D,1-S)
+
+ kLast = kDisjoint
+};
+
+// The coverage modes each have a set of coefficients to be applied to the general equation (above)
+//
+// e.g.
+// kXor+conjoint = max(S-D,0) + max(D-S,0) ==> abs(D-S)
+//
+kUnion, // 1,1,1
+kIntersect, // 1,0,0
+kDifference, // 0,1,0
+kReverseDifference, // 0,0,1
+kXor, // 0,1,1
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCpu.cpp b/gfx/skia/skia/src/core/SkCpu.cpp
new file mode 100644
index 0000000000..bdae9d3a8a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkCpu.h"
+
+#if defined(SK_CPU_X86)
+ #if defined(SK_BUILD_FOR_WIN) && !defined(__MINGW32__)
+ #include <intrin.h>
+ static void cpuid (uint32_t abcd[4]) { __cpuid ((int*)abcd, 1); }
+ static void cpuid7(uint32_t abcd[4]) { __cpuidex((int*)abcd, 7, 0); }
+ static uint64_t xgetbv(uint32_t xcr) { return _xgetbv(xcr); }
+ #else
+ #include <cpuid.h>
+ #if !defined(__cpuid_count) // Old Mac Clang doesn't have this defined.
+ #define __cpuid_count(eax, ecx, a, b, c, d) \
+ __asm__("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eax), "2"(ecx))
+ #endif
+ static void cpuid (uint32_t abcd[4]) { __get_cpuid(1, abcd+0, abcd+1, abcd+2, abcd+3); }
+ static void cpuid7(uint32_t abcd[4]) {
+ __cpuid_count(7, 0, abcd[0], abcd[1], abcd[2], abcd[3]);
+ }
+ static uint64_t xgetbv(uint32_t xcr) {
+ uint32_t eax, edx;
+ __asm__ __volatile__ ( "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return (uint64_t)(edx) << 32 | eax;
+ }
+ #endif
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ uint32_t abcd[4] = {0,0,0,0};
+
+ // You might want to refer to http://www.sandpile.org/x86/cpuid.htm
+
+ cpuid(abcd);
+ if (abcd[3] & (1<<25)) { features |= SkCpu:: SSE1; }
+ if (abcd[3] & (1<<26)) { features |= SkCpu:: SSE2; }
+ if (abcd[2] & (1<< 0)) { features |= SkCpu:: SSE3; }
+ if (abcd[2] & (1<< 9)) { features |= SkCpu::SSSE3; }
+ if (abcd[2] & (1<<19)) { features |= SkCpu::SSE41; }
+ if (abcd[2] & (1<<20)) { features |= SkCpu::SSE42; }
+
+ if ((abcd[2] & (3<<26)) == (3<<26) // XSAVE + OSXSAVE
+ && (xgetbv(0) & (3<<1)) == (3<<1)) { // XMM and YMM state enabled.
+ if (abcd[2] & (1<<28)) { features |= SkCpu:: AVX; }
+ if (abcd[2] & (1<<29)) { features |= SkCpu::F16C; }
+ if (abcd[2] & (1<<12)) { features |= SkCpu:: FMA; }
+
+ cpuid7(abcd);
+ if (abcd[1] & (1<<5)) { features |= SkCpu::AVX2; }
+ if (abcd[1] & (1<<3)) { features |= SkCpu::BMI1; }
+ if (abcd[1] & (1<<8)) { features |= SkCpu::BMI2; }
+
+ if ((xgetbv(0) & (7<<5)) == (7<<5)) { // All ZMM state bits enabled too.
+ if (abcd[1] & (1<<16)) { features |= SkCpu::AVX512F; }
+ if (abcd[1] & (1<<17)) { features |= SkCpu::AVX512DQ; }
+ if (abcd[1] & (1<<21)) { features |= SkCpu::AVX512IFMA; }
+ if (abcd[1] & (1<<26)) { features |= SkCpu::AVX512PF; }
+ if (abcd[1] & (1<<27)) { features |= SkCpu::AVX512ER; }
+ if (abcd[1] & (1<<28)) { features |= SkCpu::AVX512CD; }
+ if (abcd[1] & (1<<30)) { features |= SkCpu::AVX512BW; }
+ if (abcd[1] & (1<<31)) { features |= SkCpu::AVX512VL; }
+ }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM64) && __has_include(<sys/auxv.h>)
+ #include <sys/auxv.h>
+
+ static uint32_t read_cpu_features() {
+ const uint32_t kHWCAP_CRC32 = (1<< 7),
+ kHWCAP_ASIMDHP = (1<<10);
+
+ uint32_t features = 0;
+ uint32_t hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & kHWCAP_CRC32 ) { features |= SkCpu::CRC32; }
+ if (hwcaps & kHWCAP_ASIMDHP) { features |= SkCpu::ASIMDHP; }
+
+ // The Samsung Mongoose 3 core sets the ASIMDHP bit but doesn't support it.
+ for (int core = 0; features & SkCpu::ASIMDHP; core++) {
+ // These /sys files contain the core's MIDR_EL1 register, the source of
+ // CPU {implementer, variant, part, revision} you'd see in /proc/cpuinfo.
+ SkString path =
+ SkStringPrintf("/sys/devices/system/cpu/cpu%d/regs/identification/midr_el1", core);
+
+ // Can't use SkData::MakeFromFileName() here, I think because /sys can't be mmap()'d.
+ SkFILEStream midr_el1(path.c_str());
+ if (!midr_el1.isValid()) {
+ // This is our ordinary exit path.
+ // If we ask for MIDR_EL1 from a core that doesn't exist, we've checked all cores.
+ if (core == 0) {
+ // On the other hand, if we can't read MIDR_EL1 from any core, assume the worst.
+ features &= ~(SkCpu::ASIMDHP);
+ }
+ break;
+ }
+
+ const char kMongoose3[] = "0x00000000531f0020"; // 53 == Samsung.
+ char buf[SK_ARRAY_COUNT(kMongoose3) - 1]; // No need for the terminating \0.
+
+ if (SK_ARRAY_COUNT(buf) != midr_el1.read(buf, SK_ARRAY_COUNT(buf))
+ || 0 == memcmp(kMongoose3, buf, SK_ARRAY_COUNT(buf))) {
+ features &= ~(SkCpu::ASIMDHP);
+ }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM32) && __has_include(<sys/auxv.h>) && \
+ (!defined(__ANDROID_API__) || __ANDROID_API__ >= 18)
+ // sys/auxv.h will always be present in the Android NDK due to unified
+ //headers, but getauxval is only defined for API >= 18.
+ #include <sys/auxv.h>
+
+ static uint32_t read_cpu_features() {
+ const uint32_t kHWCAP_NEON = (1<<12);
+ const uint32_t kHWCAP_VFPv4 = (1<<16);
+
+ uint32_t features = 0;
+ uint32_t hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & kHWCAP_NEON ) {
+ features |= SkCpu::NEON;
+ if (hwcaps & kHWCAP_VFPv4) { features |= SkCpu::NEON_FMA|SkCpu::VFP_FP16; }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM32) && __has_include(<cpu-features.h>)
+ #include <cpu-features.h>
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ uint64_t cpu_features = android_getCpuFeatures();
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON) { features |= SkCpu::NEON; }
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON_FMA) { features |= SkCpu::NEON_FMA; }
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) { features |= SkCpu::VFP_FP16; }
+ return features;
+ }
+
+#else
+ static uint32_t read_cpu_features() {
+ return 0;
+ }
+
+#endif
+
+uint32_t SkCpu::gCachedFeatures = 0;
+
+void SkCpu::CacheRuntimeFeatures() {
+ static SkOnce once;
+ once([] { gCachedFeatures = read_cpu_features(); });
+}
diff --git a/gfx/skia/skia/src/core/SkCpu.h b/gfx/skia/skia/src/core/SkCpu.h
new file mode 100644
index 0000000000..d4a9cdc359
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCpu_DEFINED
+#define SkCpu_DEFINED
+
+#include "include/core/SkTypes.h"
+
+struct SkCpu {
+ enum {
+ SSE1 = 1 << 0,
+ SSE2 = 1 << 1,
+ SSE3 = 1 << 2,
+ SSSE3 = 1 << 3,
+ SSE41 = 1 << 4,
+ SSE42 = 1 << 5,
+ AVX = 1 << 6,
+ F16C = 1 << 7,
+ FMA = 1 << 8,
+ AVX2 = 1 << 9,
+ BMI1 = 1 << 10,
+ BMI2 = 1 << 11,
+ // Handy alias for all the cool Haswell+ instructions.
+ HSW = AVX2 | BMI1 | BMI2 | F16C | FMA,
+
+ AVX512F = 1 << 12,
+ AVX512DQ = 1 << 13,
+ AVX512IFMA = 1 << 14,
+ AVX512PF = 1 << 15,
+ AVX512ER = 1 << 16,
+ AVX512CD = 1 << 17,
+ AVX512BW = 1 << 18,
+ AVX512VL = 1 << 19,
+
+ // Handy alias for all the cool Skylake Xeon+ instructions.
+ SKX = AVX512F | AVX512DQ | AVX512CD | AVX512BW | AVX512VL,
+ };
+ enum {
+ NEON = 1 << 0,
+ NEON_FMA = 1 << 1,
+ VFP_FP16 = 1 << 2,
+ CRC32 = 1 << 3,
+ ASIMDHP = 1 << 4,
+ };
+
+ static void CacheRuntimeFeatures();
+ static bool Supports(uint32_t);
+private:
+ static uint32_t gCachedFeatures;
+};
+
+inline bool SkCpu::Supports(uint32_t mask) {
+ uint32_t features = gCachedFeatures;
+
+ // If we mask in compile-time known lower limits, the compiler can
+ // often compile away this entire function.
+#if SK_CPU_X86
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ features |= SSE1;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ features |= SSE2;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ features |= SSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ features |= SSSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ features |= SSE41;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ features |= SSE42;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ features |= AVX;
+ #endif
+ // F16C goes here if we add SK_CPU_SSE_LEVEL_F16C
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ features |= AVX2;
+ #endif
+ // FMA doesn't fit neatly into this total ordering.
+ // It's available on Haswell+ just like AVX2, but it's technically a different bit.
+ // TODO: circle back on this if we find ourselves limited by lack of compile-time FMA
+
+ #if defined(SK_CPU_LIMIT_SSE41)
+ features &= (SkCpu::SSE1 | SkCpu::SSE2 | SkCpu::SSE3 | SkCpu::SSSE3 | SkCpu::SSE41);
+ #elif defined(SK_CPU_LIMIT_SSE2)
+ features &= (SkCpu::SSE1 | SkCpu::SSE2);
+ #endif
+
+#else
+ #if defined(SK_ARM_HAS_NEON)
+ features |= NEON;
+ #endif
+
+ #if defined(SK_CPU_ARM64)
+ features |= NEON|NEON_FMA|VFP_FP16;
+ #endif
+
+ #if defined(SK_ARM_HAS_CRC32)
+ features |= CRC32;
+ #endif
+
+#endif
+ return (features & mask) == mask;
+}
+
+#endif//SkCpu_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.cpp b/gfx/skia/skia/src/core/SkCubicClipper.cpp
new file mode 100644
index 0000000000..b774f15301
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/core/SkCubicClipper.h"
+#include "src/core/SkGeometry.h"
+
+#include <utility>
+
+SkCubicClipper::SkCubicClipper() {
+ fClip.setEmpty();
+}
+
+void SkCubicClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+
+bool SkCubicClipper::ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t) {
+ SkScalar ycrv[4];
+ ycrv[0] = pts[0].fY - y;
+ ycrv[1] = pts[1].fY - y;
+ ycrv[2] = pts[2].fY - y;
+ ycrv[3] = pts[3].fY - y;
+
+#ifdef NEWTON_RAPHSON // Quadratic convergence, typically <= 3 iterations.
+ // Initial guess.
+ // TODO(turk): Check for zero denominator? Shouldn't happen unless the curve
+ // is not only monotonic but degenerate.
+ SkScalar t1 = ycrv[0] / (ycrv[0] - ycrv[3]);
+
+ // Newton's iterations.
+ const SkScalar tol = SK_Scalar1 / 16384; // This leaves 2 fixed noise bits.
+ SkScalar t0;
+ const int maxiters = 5;
+ int iters = 0;
+ bool converged;
+ do {
+ t0 = t1;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], t0);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], t0);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], t0);
+ SkScalar y012 = SkScalarInterp(y01, y12, t0);
+ SkScalar y123 = SkScalarInterp(y12, y23, t0);
+ SkScalar y0123 = SkScalarInterp(y012, y123, t0);
+ SkScalar yder = (y123 - y012) * 3;
+ // TODO(turk): check for yder==0: horizontal.
+ t1 -= y0123 / yder;
+ converged = SkScalarAbs(t1 - t0) <= tol; // NaN-safe
+ ++iters;
+ } while (!converged && (iters < maxiters));
+ *t = t1; // Return the result.
+
+ // The result might be valid, even if outside of the range [0, 1], but
+ // we never evaluate a Bezier outside this interval, so we return false.
+ if (t1 < 0 || t1 > SK_Scalar1)
+ return false; // This shouldn't happen, but check anyway.
+ return converged;
+
+#else // BISECTION // Linear convergence, typically 16 iterations.
+
+ // Check that the endpoints straddle zero.
+ SkScalar tNeg, tPos; // Negative and positive function parameters.
+ if (ycrv[0] < 0) {
+ if (ycrv[3] < 0)
+ return false;
+ tNeg = 0;
+ tPos = SK_Scalar1;
+ } else if (ycrv[0] > 0) {
+ if (ycrv[3] > 0)
+ return false;
+ tNeg = SK_Scalar1;
+ tPos = 0;
+ } else {
+ *t = 0;
+ return true;
+ }
+
+ const SkScalar tol = SK_Scalar1 / 65536; // 1 for fixed, 1e-5 for float.
+ int iters = 0;
+ do {
+ SkScalar tMid = (tPos + tNeg) / 2;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], tMid);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], tMid);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], tMid);
+ SkScalar y012 = SkScalarInterp(y01, y12, tMid);
+ SkScalar y123 = SkScalarInterp(y12, y23, tMid);
+ SkScalar y0123 = SkScalarInterp(y012, y123, tMid);
+ if (y0123 == 0) {
+ *t = tMid;
+ return true;
+ }
+ if (y0123 < 0) tNeg = tMid;
+ else tPos = tMid;
+ ++iters;
+ } while (!(SkScalarAbs(tPos - tNeg) <= tol)); // Nan-safe
+
+ *t = (tNeg + tPos) / 2;
+ return true;
+#endif // BISECTION
+}
+
+
+bool SkCubicClipper::clipCubic(const SkPoint srcPts[4], SkPoint dst[4]) {
+ bool reverse;
+
+ // we need the data to be monotonically descending in Y
+ if (srcPts[0].fY > srcPts[3].fY) {
+ dst[0] = srcPts[3];
+ dst[1] = srcPts[2];
+ dst[2] = srcPts[1];
+ dst[3] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 4 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[3].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[7]; // for SkChopCubicAt
+
+ // are we partially above
+ if (dst[0].fY < ctop && ChopMonoAtY(dst, ctop, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[0] = tmp[3];
+ dst[1] = tmp[4];
+ dst[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (dst[3].fY > cbot && ChopMonoAtY(dst, cbot, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ dst[3] = tmp[3];
+ }
+
+ if (reverse) {
+ using std::swap;
+ swap(dst[0], dst[3]);
+ swap(dst[1], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.h b/gfx/skia/skia/src/core/SkCubicClipper.h
new file mode 100644
index 0000000000..a92a08d398
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkCubicClipper_DEFINED
+#define SkCubicClipper_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+/** This class is initialized with a clip rectangle, and then can be fed cubics,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkCubicClipper {
+public:
+ SkCubicClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool SK_WARN_UNUSED_RESULT clipCubic(const SkPoint src[4], SkPoint dst[4]);
+
+ static bool SK_WARN_UNUSED_RESULT ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t);
+private:
+ SkRect fClip;
+};
+
+#endif // SkCubicClipper_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCubicMap.cpp b/gfx/skia/skia/src/core/SkCubicMap.cpp
new file mode 100644
index 0000000000..30cd3b4bed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicMap.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCubicMap.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkOpts.h"
+
+//#define CUBICMAP_TRACK_MAX_ERROR
+
+#ifdef CUBICMAP_TRACK_MAX_ERROR
+#include "src/pathops/SkPathOpsCubic.h"
+#endif
+
+static inline bool nearly_zero(SkScalar x) {
+ SkASSERT(x >= 0);
+ return x <= 0.0000000001f;
+}
+
+#ifdef CUBICMAP_TRACK_MAX_ERROR
+ static int max_iters;
+#endif
+
+#ifdef CUBICMAP_TRACK_MAX_ERROR
+static float compute_slow(float A, float B, float C, float x) {
+ double roots[3];
+ SkDEBUGCODE(int count =) SkDCubic::RootsValidT(A, B, C, -x, roots);
+ SkASSERT(count == 1);
+ return (float)roots[0];
+}
+
+static float max_err;
+#endif
+
+static float compute_t_from_x(float A, float B, float C, float x) {
+#ifdef CUBICMAP_TRACK_MAX_ERROR
+ float answer = compute_slow(A, B, C, x);
+#endif
+ float answer2 = SkOpts::cubic_solver(A, B, C, -x);
+
+#ifdef CUBICMAP_TRACK_MAX_ERROR
+ float err = sk_float_abs(answer - answer2);
+ if (err > max_err) {
+ max_err = err;
+ SkDebugf("max error %g\n", max_err);
+ }
+#endif
+ return answer2;
+}
+
+float SkCubicMap::computeYFromX(float x) const {
+ x = SkScalarPin(x, 0, 1);
+
+ if (nearly_zero(x) || nearly_zero(1 - x)) {
+ return x;
+ }
+ if (fType == kLine_Type) {
+ return x;
+ }
+ float t;
+ if (fType == kCubeRoot_Type) {
+ t = sk_float_pow(x / fCoeff[0].fX, 1.0f / 3);
+ } else {
+ t = compute_t_from_x(fCoeff[0].fX, fCoeff[1].fX, fCoeff[2].fX, x);
+ }
+ float a = fCoeff[0].fY;
+ float b = fCoeff[1].fY;
+ float c = fCoeff[2].fY;
+ float y = ((a * t + b) * t + c) * t;
+
+ return y;
+}
+
+static inline bool coeff_nearly_zero(float delta) {
+ return sk_float_abs(delta) <= 0.0000001f;
+}
+
+SkCubicMap::SkCubicMap(SkPoint p1, SkPoint p2) {
+ // Clamp X values only (we allow Ys outside [0..1]).
+ p1.fX = SkTMin(SkTMax(p1.fX, 0.0f), 1.0f);
+ p2.fX = SkTMin(SkTMax(p2.fX, 0.0f), 1.0f);
+
+ Sk2s s1 = Sk2s::Load(&p1) * 3;
+ Sk2s s2 = Sk2s::Load(&p2) * 3;
+
+ (Sk2s(1) + s1 - s2).store(&fCoeff[0]);
+ (s2 - s1 - s1).store(&fCoeff[1]);
+ s1.store(&fCoeff[2]);
+
+ fType = kSolver_Type;
+ if (SkScalarNearlyEqual(p1.fX, p1.fY) && SkScalarNearlyEqual(p2.fX, p2.fY)) {
+ fType = kLine_Type;
+ } else if (coeff_nearly_zero(fCoeff[1].fX) && coeff_nearly_zero(fCoeff[2].fX)) {
+ fType = kCubeRoot_Type;
+ }
+}
+
+SkPoint SkCubicMap::computeFromT(float t) const {
+ Sk2s a = Sk2s::Load(&fCoeff[0]);
+ Sk2s b = Sk2s::Load(&fCoeff[1]);
+ Sk2s c = Sk2s::Load(&fCoeff[2]);
+
+ SkPoint result;
+ (((a * t + b) * t + c) * t).store(&result);
+ return result;
+}
diff --git a/gfx/skia/skia/src/core/SkCubicSolver.h b/gfx/skia/skia/src/core/SkCubicSolver.h
new file mode 100644
index 0000000000..f5bd9fef62
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicSolver.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCubicSolver_DEFINED
+#define SkCubicSolver_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkFloatingPoint.h"
+
+//#define CUBICMAP_TRACK_MAX_ERROR
+
+namespace SK_OPTS_NS {
+
+ static float eval_poly(float t, float b) {
+ return b;
+ }
+
+ template <typename... Rest>
+ static float eval_poly(float t, float m, float b, Rest... rest) {
+ return eval_poly(t, sk_fmaf(m,t,b), rest...);
+ }
+
+ inline float cubic_solver(float A, float B, float C, float D) {
+ #ifdef CUBICMAP_TRACK_MAX_ERROR
+ static int max_iters = 0;
+ #endif
+
+ #ifdef SK_DEBUG
+ auto valid = [](float t) {
+ return t >= 0 && t <= 1;
+ };
+ #endif
+
+ auto guess_nice_cubic_root = [](float a, float b, float c, float d) {
+ return -d;
+ };
+ float t = guess_nice_cubic_root(A, B, C, D);
+
+ int iters = 0;
+ const int MAX_ITERS = 8;
+ for (; iters < MAX_ITERS; ++iters) {
+ SkASSERT(valid(t));
+ float f = eval_poly(t, A,B,C,D); // f = At^3 + Bt^2 + Ct + D
+ if (sk_float_abs(f) <= 0.00005f) {
+ break;
+ }
+ float fp = eval_poly(t, 3*A, 2*B, C); // f' = 3At^2 + 2Bt + C
+ float fpp = eval_poly(t, 3*A+3*A, 2*B); // f'' = 6At + 2B
+
+ float numer = 2 * fp * f;
+ float denom = sk_fmaf(2*fp, fp, -(f*fpp));
+
+ t -= numer / denom;
+ }
+
+ #ifdef CUBICMAP_TRACK_MAX_ERROR
+ if (max_iters < iters) {
+ max_iters = iters;
+ SkDebugf("max_iters %d\n", max_iters);
+ }
+ #endif
+ SkASSERT(valid(t));
+ return t;
+ }
+
+} // namespace SK_OPTS_NS
+#endif
diff --git a/gfx/skia/skia/src/core/SkData.cpp b/gfx/skia/skia/src/core/SkData.cpp
new file mode 100644
index 0000000000..ef3f42e5ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkData.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include <new>
+
+SkData::SkData(const void* ptr, size_t size, ReleaseProc proc, void* context) {
+ fPtr = const_cast<void*>(ptr);
+ fSize = size;
+ fReleaseProc = proc;
+ fReleaseProcContext = context;
+}
+
+/** This constructor means we are inline with our fPtr's contents.
+ * Thus we set fPtr to point right after this.
+ */
+SkData::SkData(size_t size) {
+ fPtr = (char*)(this + 1); // contents are immediately after this
+ fSize = size;
+ fReleaseProc = nullptr;
+ fReleaseProcContext = nullptr;
+}
+
+SkData::~SkData() {
+ if (fReleaseProc) {
+ fReleaseProc(fPtr, fReleaseProcContext);
+ }
+}
+
+bool SkData::equals(const SkData* other) const {
+ if (nullptr == other) {
+ return false;
+ }
+
+ return fSize == other->fSize && !memcmp(fPtr, other->fPtr, fSize);
+}
+
+size_t SkData::copyRange(size_t offset, size_t length, void* buffer) const {
+ size_t available = fSize;
+ if (offset >= available || 0 == length) {
+ return 0;
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ memcpy(buffer, this->bytes() + offset, length);
+ return length;
+}
+
+void SkData::operator delete(void* p) {
+ ::operator delete(p);
+}
+
+sk_sp<SkData> SkData::PrivateNewWithCopy(const void* srcOrNull, size_t length) {
+ if (0 == length) {
+ return SkData::MakeEmpty();
+ }
+
+ const size_t actualLength = length + sizeof(SkData);
+ SkASSERT_RELEASE(length < actualLength); // Check for overflow.
+
+ void* storage = ::operator new (actualLength);
+ sk_sp<SkData> data(new (storage) SkData(length));
+ if (srcOrNull) {
+ memcpy(data->writable_data(), srcOrNull, length);
+ }
+ return data;
+}
+
+void SkData::DummyReleaseProc(const void*, void*) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeEmpty() {
+ static SkOnce once;
+ static SkData* empty;
+
+ once([]{ empty = new SkData(nullptr, 0, nullptr, nullptr); });
+ return sk_ref_sp(empty);
+}
+
+// assumes fPtr was allocated via sk_malloc
+static void sk_free_releaseproc(const void* ptr, void*) {
+ sk_free((void*)ptr);
+}
+
+sk_sp<SkData> SkData::MakeFromMalloc(const void* data, size_t length) {
+ return sk_sp<SkData>(new SkData(data, length, sk_free_releaseproc, nullptr));
+}
+
+sk_sp<SkData> SkData::MakeWithCopy(const void* src, size_t length) {
+ SkASSERT(src);
+ return PrivateNewWithCopy(src, length);
+}
+
+sk_sp<SkData> SkData::MakeUninitialized(size_t length) {
+ return PrivateNewWithCopy(nullptr, length);
+}
+
+sk_sp<SkData> SkData::MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx) {
+ return sk_sp<SkData>(new SkData(ptr, length, proc, ctx));
+}
+
+// assumes fPtr was allocated with sk_fmmap
+static void sk_mmap_releaseproc(const void* addr, void* ctx) {
+ size_t length = reinterpret_cast<size_t>(ctx);
+ sk_fmunmap(addr, length);
+}
+
+sk_sp<SkData> SkData::MakeFromFILE(FILE* f) {
+ size_t size;
+ void* addr = sk_fmmap(f, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
+}
+
+sk_sp<SkData> SkData::MakeFromFileName(const char path[]) {
+ FILE* f = path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr;
+ if (nullptr == f) {
+ return nullptr;
+ }
+ auto data = MakeFromFILE(f);
+ sk_fclose(f);
+ return data;
+}
+
+sk_sp<SkData> SkData::MakeFromFD(int fd) {
+ size_t size;
+ void* addr = sk_fdmmap(fd, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
+}
+
+// assumes context is a SkData
+static void sk_dataref_releaseproc(const void*, void* context) {
+ SkData* src = reinterpret_cast<SkData*>(context);
+ src->unref();
+}
+
+sk_sp<SkData> SkData::MakeSubset(const SkData* src, size_t offset, size_t length) {
+ /*
+ We could, if we wanted/need to, just make a deep copy of src's data,
+ rather than referencing it. This would duplicate the storage (of the
+ subset amount) but would possibly allow src to go out of scope sooner.
+ */
+
+ size_t available = src->size();
+ if (offset >= available || 0 == length) {
+ return SkData::MakeEmpty();
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ src->ref(); // this will be balanced in sk_dataref_releaseproc
+ return sk_sp<SkData>(new SkData(src->bytes() + offset, length, sk_dataref_releaseproc,
+ const_cast<SkData*>(src)));
+}
+
+sk_sp<SkData> SkData::MakeWithCString(const char cstr[]) {
+ size_t size;
+ if (nullptr == cstr) {
+ cstr = "";
+ size = 1;
+ } else {
+ size = strlen(cstr) + 1;
+ }
+ return MakeWithCopy(cstr, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeFromStream(SkStream* stream, size_t size) {
+ sk_sp<SkData> data(SkData::MakeUninitialized(size));
+ if (stream->read(data->writable_data(), size) != size) {
+ return nullptr;
+ }
+ return data;
+}
diff --git a/gfx/skia/skia/src/core/SkDataTable.cpp b/gfx/skia/skia/src/core/SkDataTable.cpp
new file mode 100644
index 0000000000..9a1d7bd6a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDataTable.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkDataTable.h"
+#include "include/private/SkOnce.h"
+
+static void malloc_freeproc(void* context) {
+ sk_free(context);
+}
+
+// Makes empty table
+SkDataTable::SkDataTable() {
+ fCount = 0;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = nullptr;
+ fFreeProc = nullptr;
+ fFreeProcContext = nullptr;
+}
+
+SkDataTable::SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = elemSize; // non-zero signals we use fElems instead of fDir
+ fU.fElems = (const char*)array;
+ fFreeProc = proc;
+ fFreeProcContext = context;
+}
+
+SkDataTable::SkDataTable(const Dir* dir, int count, FreeProc proc, void* ctx) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = dir;
+ fFreeProc = proc;
+ fFreeProcContext = ctx;
+}
+
+SkDataTable::~SkDataTable() {
+ if (fFreeProc) {
+ fFreeProc(fFreeProcContext);
+ }
+}
+
+size_t SkDataTable::atSize(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ return fElemSize;
+ } else {
+ return fU.fDir[index].fSize;
+ }
+}
+
+const void* SkDataTable::at(int index, size_t* size) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ if (size) {
+ *size = fElemSize;
+ }
+ return fU.fElems + index * fElemSize;
+ } else {
+ if (size) {
+ *size = fU.fDir[index].fSize;
+ }
+ return fU.fDir[index].fPtr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDataTable> SkDataTable::MakeEmpty() {
+ static SkDataTable* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkDataTable(); });
+ return sk_ref_sp(singleton);
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t dataSize = 0;
+ for (int i = 0; i < count; ++i) {
+ dataSize += sizes[i];
+ }
+
+ size_t bufferSize = count * sizeof(Dir) + dataSize;
+ void* buffer = sk_malloc_throw(bufferSize);
+
+ Dir* dir = (Dir*)buffer;
+ char* elem = (char*)(dir + count);
+ for (int i = 0; i < count; ++i) {
+ dir[i].fPtr = elem;
+ dir[i].fSize = sizes[i];
+ memcpy(elem, ptrs[i], sizes[i]);
+ elem += sizes[i];
+ }
+
+ return sk_sp<SkDataTable>(new SkDataTable(dir, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArray(const void* array, size_t elemSize, int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t bufferSize = elemSize * count;
+ void* buffer = sk_malloc_throw(bufferSize);
+ memcpy(buffer, array, bufferSize);
+
+ return sk_sp<SkDataTable>(new SkDataTable(buffer, elemSize, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* ctx) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+ return sk_sp<SkDataTable>(new SkDataTable(array, elemSize, count, proc, ctx));
+}
diff --git a/gfx/skia/skia/src/core/SkDebug.cpp b/gfx/skia/skia/src/core/SkDebug.cpp
new file mode 100644
index 0000000000..b02ddf7fa9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDebug.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_GOOGLE3)
+void SkDebugfForDumpStackTrace(const char* data, void* unused) {
+ SkDebugf("%s", data);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp b/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp
new file mode 100644
index 0000000000..c89cd990bb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include <utility>
+class SkSurfaceCharacterization;
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrRenderTask.h"
+#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
+#endif
+
+SkDeferredDisplayList::SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
+ sk_sp<LazyProxyData> lazyProxyData)
+ : fCharacterization(characterization)
+ , fLazyProxyData(std::move(lazyProxyData)) {
+}
+
+SkDeferredDisplayList::~SkDeferredDisplayList() {
+}
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h b/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h
new file mode 100644
index 0000000000..4b6fccdbe2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayListPriv_DEFINED
+#define SkDeferredDisplayListPriv_DEFINED
+
+#include "include/private/SkDeferredDisplayList.h"
+
+/** Class that adds methods to SkDeferredDisplayList that are only intended for use internal to Skia.
+ This class is purely a privileged window into SkDeferredDisplayList. It should never have
+ additional data members or virtual methods. */
+class SkDeferredDisplayListPriv {
+public:
+ int numRenderTasks() const {
+#if SK_SUPPORT_GPU
+ return fDDL->fRenderTasks.count();
+#else
+ return 0;
+#endif
+ }
+
+ const SkDeferredDisplayList::LazyProxyData* lazyProxyData() const {
+#if SK_SUPPORT_GPU
+ return fDDL->fLazyProxyData.get();
+#else
+ return nullptr;
+#endif
+ }
+
+private:
+ explicit SkDeferredDisplayListPriv(SkDeferredDisplayList* ddl) : fDDL(ddl) {}
+ SkDeferredDisplayListPriv(const SkDeferredDisplayListPriv&); // unimpl
+ SkDeferredDisplayListPriv& operator=(const SkDeferredDisplayListPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const SkDeferredDisplayListPriv* operator&() const;
+ SkDeferredDisplayListPriv* operator&();
+
+ SkDeferredDisplayList* fDDL;
+
+ friend class SkDeferredDisplayList; // to construct/copy this type.
+};
+
+inline SkDeferredDisplayListPriv SkDeferredDisplayList::priv() {
+ return SkDeferredDisplayListPriv(this);
+}
+
+inline const SkDeferredDisplayListPriv SkDeferredDisplayList::priv () const {
+ return SkDeferredDisplayListPriv(const_cast<SkDeferredDisplayList*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp b/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp
new file mode 100644
index 0000000000..1149c15d55
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDeferredDisplayListRecorder.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include "src/core/SkMessageBus.h"
+
+#if !SK_SUPPORT_GPU
+SkDeferredDisplayListRecorder::SkDeferredDisplayListRecorder(const SkSurfaceCharacterization&) {}
+
+SkDeferredDisplayListRecorder::~SkDeferredDisplayListRecorder() {}
+
+bool SkDeferredDisplayListRecorder::init() { return false; }
+
+SkCanvas* SkDeferredDisplayListRecorder::getCanvas() { return nullptr; }
+
+std::unique_ptr<SkDeferredDisplayList> SkDeferredDisplayListRecorder::detach() { return nullptr; }
+
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makePromiseTexture(
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makeYUVAPromiseTexture(
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendFormat yuvaFormats[],
+ const SkISize yuvaSizes[],
+ const SkYUVAIndex yuvaIndices[4],
+ int imageWidth,
+ int imageHeight,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContexts[],
+ PromiseImageApiVersion) {
+ return nullptr;
+}
+
+#else
+
+#include "include/core/SkPromiseImageTexture.h"
+#include "include/core/SkYUVASizeInfo.h"
+#include "include/gpu/GrTexture.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/SkGr.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkImage_GpuYUVA.h"
+#include "src/image/SkSurface_Gpu.h"
+
+SkDeferredDisplayListRecorder::SkDeferredDisplayListRecorder(const SkSurfaceCharacterization& c)
+ : fCharacterization(c) {
+ if (fCharacterization.isValid()) {
+ fContext = GrContextPriv::MakeDDL(fCharacterization.refContextInfo());
+ }
+}
+
+SkDeferredDisplayListRecorder::~SkDeferredDisplayListRecorder() {
+ if (fContext) {
+ auto proxyProvider = fContext->priv().proxyProvider();
+
+ // This allows the uniquely keyed proxies to keep their keys but removes their back
+ // pointer to the about-to-be-deleted proxy provider. The proxies will use their
+ // unique key to reattach to cached versions of themselves or to appropriately tag new
+ // resources (if a cached version was not found). This system operates independent of
+ // the replaying context's proxy provider (i.e., these uniquely keyed proxies will not
+ // appear in the replaying proxy providers uniquely keyed proxy map). This should be fine
+ // since no one else should be trying to reconnect to the orphaned proxies and orphaned
+ // proxies from different DDLs that share the same key should simply reconnect to the
+ // same cached resource.
+ proxyProvider->orphanAllUniqueKeys();
+ }
+}
+
+
+bool SkDeferredDisplayListRecorder::init() {
+ SkASSERT(fContext);
+ SkASSERT(!fLazyProxyData);
+ SkASSERT(!fSurface);
+
+ if (!fCharacterization.isValid()) {
+ return false;
+ }
+
+ fLazyProxyData = sk_sp<SkDeferredDisplayList::LazyProxyData>(
+ new SkDeferredDisplayList::LazyProxyData);
+
+ auto proxyProvider = fContext->priv().proxyProvider();
+ const GrCaps* caps = fContext->priv().caps();
+
+ bool usesGLFBO0 = fCharacterization.usesGLFBO0();
+ if (usesGLFBO0) {
+ if (GrBackendApi::kOpenGL != fContext->backend() ||
+ fCharacterization.isTextureable()) {
+ return false;
+ }
+ }
+
+ if (fCharacterization.vulkanSecondaryCBCompatible()) {
+ // Because of the restrictive API allowed for a GrVkSecondaryCBDrawContext, we know ahead
+ // of time that we don't be able to support certain parameter combinations. Specifially we
+ // fail on usesGLFBO0 since we can't mix GL and Vulkan. We can't have a texturable object.
+ // And finally the GrVkSecondaryCBDrawContext always assumes a top left origin.
+ if (usesGLFBO0 ||
+ fCharacterization.isTextureable() ||
+ fCharacterization.origin() == kBottomLeft_GrSurfaceOrigin) {
+ return false;
+ }
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(fCharacterization.colorType());
+
+ GrPixelConfig config = caps->getConfigFromBackendFormat(fCharacterization.backendFormat(),
+ grColorType);
+ if (config == kUnknown_GrPixelConfig) {
+ return false;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = fCharacterization.width();
+ desc.fHeight = fCharacterization.height();
+ desc.fConfig = config;
+
+ sk_sp<SkDeferredDisplayList::LazyProxyData> lazyProxyData = fLazyProxyData;
+
+ // What we're doing here is we're creating a lazy proxy to back the SkSurface. The lazy
+ // proxy, when instantiated, will use the GrRenderTarget that backs the SkSurface that the
+ // DDL is being replayed into.
+
+ GrInternalSurfaceFlags surfaceFlags = GrInternalSurfaceFlags::kNone;
+ if (usesGLFBO0) {
+ surfaceFlags |= GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ }
+ // FIXME: Why do we use GrMipMapped::kNo instead of SkSurfaceCharacterization::fIsMipMapped?
+ static constexpr GrProxyProvider::TextureInfo kTextureInfo{GrMipMapped::kNo,
+ GrTextureType::k2D};
+ const GrProxyProvider::TextureInfo* optionalTextureInfo = nullptr;
+ if (fCharacterization.isTextureable()) {
+ optionalTextureInfo = &kTextureInfo;
+ }
+
+ sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
+ [lazyProxyData](GrResourceProvider* resourceProvider) {
+ // The proxy backing the destination surface had better have been instantiated
+ // prior to the proxy backing the DLL's surface. Steal its GrRenderTarget.
+ SkASSERT(lazyProxyData->fReplayDest->peekSurface());
+ auto surface = sk_ref_sp<GrSurface>(lazyProxyData->fReplayDest->peekSurface());
+ return GrSurfaceProxy::LazyCallbackResult(std::move(surface));
+ },
+ fCharacterization.backendFormat(),
+ desc,
+ fCharacterization.sampleCount(),
+ fCharacterization.origin(),
+ surfaceFlags,
+ optionalTextureInfo,
+ GrMipMapsStatus::kNotAllocated,
+ SkBackingFit::kExact,
+ SkBudgeted::kYes,
+ fCharacterization.isProtected(),
+ fCharacterization.vulkanSecondaryCBCompatible(),
+ GrSurfaceProxy::UseAllocator::kYes);
+
+ if (!proxy) {
+ return false;
+ }
+
+ auto c = fContext->priv().makeWrappedSurfaceContext(std::move(proxy),
+ grColorType,
+ kPremul_SkAlphaType,
+ fCharacterization.refColorSpace(),
+ &fCharacterization.surfaceProps());
+ SkASSERT(c->asRenderTargetContext());
+ std::unique_ptr<GrRenderTargetContext> rtc(c.release()->asRenderTargetContext());
+ fSurface = SkSurface_Gpu::MakeWrappedRenderTarget(fContext.get(), std::move(rtc));
+ return SkToBool(fSurface.get());
+}
+
+SkCanvas* SkDeferredDisplayListRecorder::getCanvas() {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ if (!fSurface && !this->init()) {
+ return nullptr;
+ }
+
+ return fSurface->getCanvas();
+}
+
+std::unique_ptr<SkDeferredDisplayList> SkDeferredDisplayListRecorder::detach() {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ if (fSurface) {
+ SkCanvas* canvas = fSurface->getCanvas();
+
+ canvas->restoreToCount(0);
+ }
+
+ auto ddl = std::unique_ptr<SkDeferredDisplayList>(
+ new SkDeferredDisplayList(fCharacterization, std::move(fLazyProxyData)));
+
+ fContext->priv().moveRenderTasksToDDL(ddl.get());
+
+ // We want a new lazy proxy target for each recorded DDL so force the (lazy proxy-backed)
+ // SkSurface to be regenerated for each DDL.
+ fSurface = nullptr;
+ return ddl;
+}
+
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makePromiseTexture(
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion version) {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ return SkImage_Gpu::MakePromiseTexture(fContext.get(),
+ backendFormat,
+ width,
+ height,
+ mipMapped,
+ origin,
+ colorType,
+ alphaType,
+ std::move(colorSpace),
+ textureFulfillProc,
+ textureReleaseProc,
+ textureDoneProc,
+ textureContext,
+ version);
+}
+
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makeYUVAPromiseTexture(
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendFormat yuvaFormats[],
+ const SkISize yuvaSizes[],
+ const SkYUVAIndex yuvaIndices[4],
+ int imageWidth,
+ int imageHeight,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContexts[],
+ PromiseImageApiVersion version) {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ return SkImage_GpuYUVA::MakePromiseYUVATexture(fContext.get(),
+ yuvColorSpace,
+ yuvaFormats,
+ yuvaSizes,
+ yuvaIndices,
+ imageWidth,
+ imageHeight,
+ imageOrigin,
+ std::move(imageColorSpace),
+ textureFulfillProc,
+ textureReleaseProc,
+ textureDoneProc,
+ textureContexts,
+ version);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeque.cpp b/gfx/skia/skia/src/core/SkDeque.cpp
new file mode 100644
index 0000000000..6df4cc04be
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeque.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDeque.h"
+#include "include/private/SkMalloc.h"
+
+struct SkDeque::Block {
+ Block* fNext;
+ Block* fPrev;
+ char* fBegin; // start of used section in this chunk
+ char* fEnd; // end of used section in this chunk
+ char* fStop; // end of the allocated chunk
+
+ char* start() { return (char*)(this + 1); }
+ const char* start() const { return (const char*)(this + 1); }
+
+ void init(size_t size) {
+ fNext = fPrev = nullptr;
+ fBegin = fEnd = nullptr;
+ fStop = (char*)this + size;
+ }
+};
+
+SkDeque::SkDeque(size_t elemSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(nullptr)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(allocCount >= 1);
+ fFrontBlock = fBackBlock = nullptr;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(storage)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(storageSize == 0 || storage != nullptr);
+ SkASSERT(allocCount >= 1);
+
+ if (storageSize >= sizeof(Block) + elemSize) {
+ fFrontBlock = (Block*)storage;
+ fFrontBlock->init(storageSize);
+ } else {
+ fFrontBlock = nullptr;
+ }
+ fBackBlock = fFrontBlock;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::~SkDeque() {
+ Block* head = fFrontBlock;
+ Block* initialHead = (Block*)fInitialStorage;
+
+ while (head) {
+ Block* next = head->fNext;
+ if (head != initialHead) {
+ this->freeBlock(head);
+ }
+ head = next;
+ }
+}
+
+void* SkDeque::push_front() {
+ fCount += 1;
+
+ if (nullptr == fFrontBlock) {
+ fFrontBlock = this->allocateBlock(fAllocCount);
+ fBackBlock = fFrontBlock; // update our linklist
+ }
+
+ Block* first = fFrontBlock;
+ char* begin;
+
+ if (nullptr == first->fBegin) {
+ INIT_CHUNK:
+ first->fEnd = first->fStop;
+ begin = first->fStop - fElemSize;
+ } else {
+ begin = first->fBegin - fElemSize;
+ if (begin < first->start()) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ first = this->allocateBlock(fAllocCount);
+ first->fNext = fFrontBlock;
+ fFrontBlock->fPrev = first;
+ fFrontBlock = first;
+ goto INIT_CHUNK;
+ }
+ }
+
+ first->fBegin = begin;
+
+ if (nullptr == fFront) {
+ SkASSERT(nullptr == fBack);
+ fFront = fBack = begin;
+ } else {
+ SkASSERT(fBack);
+ fFront = begin;
+ }
+
+ return begin;
+}
+
+void* SkDeque::push_back() {
+ fCount += 1;
+
+ if (nullptr == fBackBlock) {
+ fBackBlock = this->allocateBlock(fAllocCount);
+ fFrontBlock = fBackBlock; // update our linklist
+ }
+
+ Block* last = fBackBlock;
+ char* end;
+
+ if (nullptr == last->fBegin) {
+ INIT_CHUNK:
+ last->fBegin = last->start();
+ end = last->fBegin + fElemSize;
+ } else {
+ end = last->fEnd + fElemSize;
+ if (end > last->fStop) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ last = this->allocateBlock(fAllocCount);
+ last->fPrev = fBackBlock;
+ fBackBlock->fNext = last;
+ fBackBlock = last;
+ goto INIT_CHUNK;
+ }
+ }
+
+ last->fEnd = end;
+ end -= fElemSize;
+
+ if (nullptr == fBack) {
+ SkASSERT(nullptr == fFront);
+ fFront = fBack = end;
+ } else {
+ SkASSERT(fFront);
+ fBack = end;
+ }
+
+ return end;
+}
+
+void SkDeque::pop_front() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* first = fFrontBlock;
+
+ SkASSERT(first != nullptr);
+
+ if (first->fBegin == nullptr) { // we were marked empty from before
+ first = first->fNext;
+ SkASSERT(first != nullptr); // else we popped too far
+ first->fPrev = nullptr;
+ this->freeBlock(fFrontBlock);
+ fFrontBlock = first;
+ }
+
+ char* begin = first->fBegin + fElemSize;
+ SkASSERT(begin <= first->fEnd);
+
+ if (begin < fFrontBlock->fEnd) {
+ first->fBegin = begin;
+ SkASSERT(first->fBegin);
+ fFront = first->fBegin;
+ } else {
+ first->fBegin = first->fEnd = nullptr; // mark as empty
+ if (nullptr == first->fNext) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(first->fNext->fBegin);
+ fFront = first->fNext->fBegin;
+ }
+ }
+}
+
+void SkDeque::pop_back() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* last = fBackBlock;
+
+ SkASSERT(last != nullptr);
+
+ if (last->fEnd == nullptr) { // we were marked empty from before
+ last = last->fPrev;
+ SkASSERT(last != nullptr); // else we popped too far
+ last->fNext = nullptr;
+ this->freeBlock(fBackBlock);
+ fBackBlock = last;
+ }
+
+ char* end = last->fEnd - fElemSize;
+ SkASSERT(end >= last->fBegin);
+
+ if (end > last->fBegin) {
+ last->fEnd = end;
+ SkASSERT(last->fEnd);
+ fBack = last->fEnd - fElemSize;
+ } else {
+ last->fBegin = last->fEnd = nullptr; // mark as empty
+ if (nullptr == last->fPrev) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(last->fPrev->fEnd);
+ fBack = last->fPrev->fEnd - fElemSize;
+ }
+ }
+}
+
+int SkDeque::numBlocksAllocated() const {
+ int numBlocks = 0;
+
+ for (const Block* temp = fFrontBlock; temp; temp = temp->fNext) {
+ ++numBlocks;
+ }
+
+ return numBlocks;
+}
+
+SkDeque::Block* SkDeque::allocateBlock(int allocCount) {
+ Block* newBlock = (Block*)sk_malloc_throw(sizeof(Block) + allocCount * fElemSize);
+ newBlock->init(sizeof(Block) + allocCount * fElemSize);
+ return newBlock;
+}
+
+void SkDeque::freeBlock(Block* block) {
+ sk_free(block);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDeque::Iter::Iter() : fCurBlock(nullptr), fPos(nullptr), fElemSize(0) {}
+
+SkDeque::Iter::Iter(const SkDeque& d, IterStart startLoc) {
+ this->reset(d, startLoc);
+}
+
+// Due to how reset and next work, next actually returns the current element
+// pointed to by fPos and then updates fPos to point to the next one.
+void* SkDeque::Iter::next() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the next setting
+ char* next = pos + fElemSize;
+ SkASSERT(next <= fCurBlock->fEnd);
+ if (next == fCurBlock->fEnd) { // exhausted this chunk, move to next
+ do {
+ fCurBlock = fCurBlock->fNext;
+ } while (fCurBlock != nullptr && fCurBlock->fBegin == nullptr);
+ next = fCurBlock ? fCurBlock->fBegin : nullptr;
+ }
+ fPos = next;
+ }
+ return pos;
+}
+
+// Like next, prev actually returns the current element pointed to by fPos and
+// then makes fPos point to the previous element.
+void* SkDeque::Iter::prev() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the prior setting
+ char* prev = pos - fElemSize;
+ SkASSERT(prev >= fCurBlock->fBegin - fElemSize);
+ if (prev < fCurBlock->fBegin) { // exhausted this chunk, move to prior
+ do {
+ fCurBlock = fCurBlock->fPrev;
+ } while (fCurBlock != nullptr && fCurBlock->fEnd == nullptr);
+ prev = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+ fPos = prev;
+ }
+ return pos;
+}
+
+// reset works by skipping through the spare blocks at the start (or end)
+// of the doubly linked list until a non-empty one is found. The fPos
+// member is then set to the first (or last) element in the block. If
+// there are no elements in the deque both fCurBlock and fPos will come
+// out of this routine nullptr.
+void SkDeque::Iter::reset(const SkDeque& d, IterStart startLoc) {
+ fElemSize = d.fElemSize;
+
+ if (kFront_IterStart == startLoc) {
+ // initialize the iterator to start at the front
+ fCurBlock = d.fFrontBlock;
+ while (fCurBlock && nullptr == fCurBlock->fBegin) {
+ fCurBlock = fCurBlock->fNext;
+ }
+ fPos = fCurBlock ? fCurBlock->fBegin : nullptr;
+ } else {
+ // initialize the iterator to start at the back
+ fCurBlock = d.fBackBlock;
+ while (fCurBlock && nullptr == fCurBlock->fEnd) {
+ fCurBlock = fCurBlock->fPrev;
+ }
+ fPos = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDescriptor.cpp b/gfx/skia/skia/src/core/SkDescriptor.cpp
new file mode 100644
index 0000000000..28eb677105
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDescriptor.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDescriptor.h"
+
+#include <new>
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkOpts.h"
+
+std::unique_ptr<SkDescriptor> SkDescriptor::Alloc(size_t length) {
+ SkASSERT(SkAlign4(length) == length);
+ return std::unique_ptr<SkDescriptor>(static_cast<SkDescriptor*>(::operator new (length)));
+}
+
+void SkDescriptor::operator delete(void* p) { ::operator delete(p); }
+
+void* SkDescriptor::addEntry(uint32_t tag, size_t length, const void* data) {
+ SkASSERT(tag);
+ SkASSERT(SkAlign4(length) == length);
+ SkASSERT(this->findEntry(tag, nullptr) == nullptr);
+
+ Entry* entry = (Entry*)((char*)this + fLength);
+ entry->fTag = tag;
+ entry->fLen = SkToU32(length);
+ if (data) {
+ memcpy(entry + 1, data, length);
+ }
+
+ fCount += 1;
+ fLength = SkToU32(fLength + sizeof(Entry) + length);
+ return (entry + 1); // return its data
+}
+
+void SkDescriptor::computeChecksum() {
+ fChecksum = SkDescriptor::ComputeChecksum(this);
+}
+
+const void* SkDescriptor::findEntry(uint32_t tag, uint32_t* length) const {
+ const Entry* entry = (const Entry*)(this + 1);
+ int count = fCount;
+
+ while (--count >= 0) {
+ if (entry->fTag == tag) {
+ if (length) {
+ *length = entry->fLen;
+ }
+ return entry + 1;
+ }
+ entry = (const Entry*)((const char*)(entry + 1) + entry->fLen);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkDescriptor> SkDescriptor::copy() const {
+ std::unique_ptr<SkDescriptor> desc = SkDescriptor::Alloc(fLength);
+ memcpy(desc.get(), this, fLength);
+ return desc;
+}
+
+bool SkDescriptor::operator==(const SkDescriptor& other) const {
+
+ // the first value we should look at is the checksum, so this loop
+ // should terminate early if they descriptors are different.
+ // NOTE: if we wrote a sentinel value at the end of each, we could
+ // remove the aa < stop test in the loop...
+ const uint32_t* aa = (const uint32_t*)this;
+ const uint32_t* bb = (const uint32_t*)&other;
+ const uint32_t* stop = (const uint32_t*)((const char*)aa + fLength);
+ do {
+ if (*aa++ != *bb++)
+ return false;
+ } while (aa < stop);
+ return true;
+}
+
+uint32_t SkDescriptor::ComputeChecksum(const SkDescriptor* desc) {
+ const uint32_t* ptr = (const uint32_t*)desc + 1; // skip the checksum field
+ size_t len = desc->fLength - sizeof(uint32_t);
+ return SkOpts::hash(ptr, len);
+}
+
+bool SkDescriptor::isValid() const {
+ uint32_t count = 0;
+ size_t offset = sizeof(SkDescriptor);
+
+ while (offset < fLength) {
+ const Entry* entry = (const Entry*)(reinterpret_cast<const char*>(this) + offset);
+ // rec tags are always a known size.
+ if (entry->fTag == kRec_SkDescriptorTag && entry->fLen != sizeof(SkScalerContextRec)) {
+ return false;
+ }
+ offset += sizeof(Entry) + entry->fLen;
+ count++;
+ }
+ return offset <= fLength && count == fCount;
+}
+
+SkAutoDescriptor::SkAutoDescriptor() = default;
+SkAutoDescriptor::SkAutoDescriptor(size_t size) { this->reset(size); }
+SkAutoDescriptor::SkAutoDescriptor(const SkDescriptor& desc) { this->reset(desc); }
+SkAutoDescriptor::SkAutoDescriptor(const SkAutoDescriptor& ad) {
+ this->reset(*ad.getDesc());
+}
+SkAutoDescriptor& SkAutoDescriptor::operator=(const SkAutoDescriptor& ad) {
+ this->reset(*ad.getDesc());
+ return *this;
+}
+
+SkAutoDescriptor::~SkAutoDescriptor() { this->free(); }
+
+void SkAutoDescriptor::reset(size_t size) {
+ this->free();
+ if (size <= sizeof(fStorage)) {
+ fDesc = reinterpret_cast<SkDescriptor*>(&fStorage);
+ } else {
+ fDesc = SkDescriptor::Alloc(size).release();
+ }
+}
+
+void SkAutoDescriptor::reset(const SkDescriptor& desc) {
+ size_t size = desc.getLength();
+ this->reset(size);
+ memcpy(fDesc, &desc, size);
+}
+
+void SkAutoDescriptor::free() {
+ if (fDesc != (SkDescriptor*)&fStorage) {
+ delete fDesc;
+ }
+}
+
+
diff --git a/gfx/skia/skia/src/core/SkDescriptor.h b/gfx/skia/skia/src/core/SkDescriptor.h
new file mode 100644
index 0000000000..88202c3eb3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDescriptor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDescriptor_DEFINED
+#define SkDescriptor_DEFINED
+
+#include <memory>
+
+#include "include/private/SkMacros.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkScalerContext.h"
+
+class SkDescriptor : SkNoncopyable {
+public:
+ static size_t ComputeOverhead(int entryCount) {
+ SkASSERT(entryCount >= 0);
+ return sizeof(SkDescriptor) + entryCount * sizeof(Entry);
+ }
+
+ static std::unique_ptr<SkDescriptor> Alloc(size_t length);
+
+ // Ensure the unsized delete is called.
+ void operator delete(void* p);
+ void init() {
+ fLength = sizeof(SkDescriptor);
+ fCount = 0;
+ }
+ uint32_t getLength() const { return fLength; }
+ void* addEntry(uint32_t tag, size_t length, const void* data = nullptr);
+ void computeChecksum();
+
+ // Assumes that getLength <= capacity of this SkDescriptor.
+ bool isValid() const;
+
+#ifdef SK_DEBUG
+ void assertChecksum() const {
+ SkASSERT(SkDescriptor::ComputeChecksum(this) == fChecksum);
+ }
+#endif
+
+ const void* findEntry(uint32_t tag, uint32_t* length) const;
+
+ std::unique_ptr<SkDescriptor> copy() const;
+
+ // This assumes that all memory added has a length that is a multiple of 4. This is checked
+ // by the assert in addEntry.
+ bool operator==(const SkDescriptor& other) const;
+ bool operator!=(const SkDescriptor& other) const { return !(*this == other); }
+
+ uint32_t getChecksum() const { return fChecksum; }
+
+ struct Entry {
+ uint32_t fTag;
+ uint32_t fLen;
+ };
+
+#ifdef SK_DEBUG
+ uint32_t getCount() const { return fCount; }
+#endif
+
+private:
+ // private so no one can create one except our factories
+ SkDescriptor() = default;
+ friend class SkDescriptorTestHelper;
+
+ static uint32_t ComputeChecksum(const SkDescriptor* desc);
+
+ uint32_t fChecksum; // must be first
+ uint32_t fLength; // must be second
+ uint32_t fCount;
+};
+
+class SkAutoDescriptor {
+public:
+ SkAutoDescriptor();
+ explicit SkAutoDescriptor(size_t size);
+ explicit SkAutoDescriptor(const SkDescriptor& desc);
+ SkAutoDescriptor(const SkAutoDescriptor& ad);
+ SkAutoDescriptor& operator= (const SkAutoDescriptor& ad);
+ SkAutoDescriptor(SkAutoDescriptor&&) = delete;
+ SkAutoDescriptor& operator= (SkAutoDescriptor&&) = delete;
+
+ ~SkAutoDescriptor();
+
+ void reset(size_t size);
+ void reset(const SkDescriptor& desc);
+ SkDescriptor* getDesc() const { SkASSERT(fDesc); return fDesc; }
+
+private:
+ void free();
+ static constexpr size_t kStorageSize
+ = sizeof(SkDescriptor)
+ + sizeof(SkDescriptor::Entry) + sizeof(SkScalerContextRec) // for rec
+ + sizeof(SkDescriptor::Entry) + sizeof(void*) // for typeface
+ + 32; // slop for occasional small extras
+
+ SkDescriptor* fDesc{nullptr};
+ std::aligned_storage<kStorageSize, alignof(uint32_t)>::type fStorage;
+};
+
+#endif //SkDescriptor_DEFINED
diff --git a/gfx/skia/skia/src/core/SkDevice.cpp b/gfx/skia/skia/src/core/SkDevice.cpp
new file mode 100644
index 0000000000..094f273e91
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDevice.cpp
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDevice.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkUtils.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#include "src/utils/SkPatchUtils.h"
+
+SkBaseDevice::SkBaseDevice(const SkImageInfo& info, const SkSurfaceProps& surfaceProps)
+ : fInfo(info)
+ , fSurfaceProps(surfaceProps)
+{
+ fOrigin = {0, 0};
+ fCTM.reset();
+}
+
+void SkBaseDevice::setOrigin(const SkMatrix& globalCTM, int x, int y) {
+ fOrigin.set(x, y);
+ fCTM = globalCTM;
+ fCTM.postTranslate(SkIntToScalar(-x), SkIntToScalar(-y));
+}
+
+void SkBaseDevice::setGlobalCTM(const SkMatrix& ctm) {
+ fCTM = ctm;
+ if (fOrigin.fX | fOrigin.fY) {
+ fCTM.postTranslate(-SkIntToScalar(fOrigin.fX), -SkIntToScalar(fOrigin.fY));
+ }
+}
+
+bool SkBaseDevice::clipIsWideOpen() const {
+ if (ClipType::kRect == this->onGetClipType()) {
+ SkRegion rgn;
+ this->onAsRgnClip(&rgn);
+ SkASSERT(rgn.isRect());
+ return rgn.getBounds() == SkIRect::MakeWH(this->width(), this->height());
+ } else {
+ return false;
+ }
+}
+
+SkPixelGeometry SkBaseDevice::CreateInfo::AdjustGeometry(TileUsage tileUsage, SkPixelGeometry geo) {
+ switch (tileUsage) {
+ case kPossible_TileUsage:
+ // (we think) for compatibility with old clients, we assume this layer can support LCD
+ // even though they may not have marked it as opaque... seems like we should update
+ // our callers (reed/robertphilips).
+ break;
+ case kNever_TileUsage:
+ geo = kUnknown_SkPixelGeometry;
+ break;
+ }
+ return geo;
+}
+
+static inline bool is_int(float x) {
+ return x == (float) sk_float_round2int(x);
+}
+
+void SkBaseDevice::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ const SkMatrix& ctm = this->ctm();
+ bool isNonTranslate = ctm.getType() & ~(SkMatrix::kTranslate_Mask);
+ bool complexPaint = paint.getStyle() != SkPaint::kFill_Style || paint.getMaskFilter() ||
+ paint.getPathEffect();
+ bool antiAlias = paint.isAntiAlias() && (!is_int(ctm.getTranslateX()) ||
+ !is_int(ctm.getTranslateY()));
+ if (isNonTranslate || complexPaint || antiAlias) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ path.setIsVolatile(true);
+ return this->drawPath(path, paint, true);
+ }
+
+ SkRegion::Iterator it(region);
+ while (!it.done()) {
+ this->drawRect(SkRect::Make(it.rect()), paint);
+ it.next();
+ }
+}
+
+void SkBaseDevice::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint) {
+ SkPath path;
+ bool isFillNoPathEffect = SkPaint::kFill_Style == paint.getStyle() && !paint.getPathEffect();
+ SkPathPriv::CreateDrawArcPath(&path, oval, startAngle, sweepAngle, useCenter,
+ isFillNoPathEffect);
+ this->drawPath(path, paint);
+}
+
+void SkBaseDevice::drawDRRect(const SkRRect& outer,
+ const SkRRect& inner, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(outer);
+ path.addRRect(inner);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+ path.setIsVolatile(true);
+
+ this->drawPath(path, paint, true);
+}
+
+void SkBaseDevice::drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode, const SkPaint& paint) {
+ SkISize lod = SkPatchUtils::GetLevelOfDetail(cubics, &this->ctm());
+ auto vertices = SkPatchUtils::MakeVertices(cubics, colors, texCoords, lod.width(), lod.height(),
+ this->imageInfo().colorSpace());
+ if (vertices) {
+ this->drawVertices(vertices.get(), nullptr, 0, bmode, paint);
+ }
+}
+
+void SkBaseDevice::drawImageRect(const SkImage* image, const SkRect* src,
+ const SkRect& dst, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmapRect(bm, src, dst, paint, constraint);
+ }
+}
+
+void SkBaseDevice::drawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ SkLatticeIter iter(image->width(), image->height(), center, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawImageRect(image, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ SkLatticeIter iter(bitmap.width(), bitmap.height(), center, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(bitmap, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawImageLattice(const SkImage* image,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ SkLatticeIter iter(lattice, dst);
+
+ SkRect srcR, dstR;
+ SkColor c;
+ bool isFixedColor = false;
+ const SkImageInfo info = SkImageInfo::Make(1, 1, kBGRA_8888_SkColorType, kUnpremul_SkAlphaType);
+
+ while (iter.next(&srcR, &dstR, &isFixedColor, &c)) {
+ if (isFixedColor || (srcR.width() <= 1.0f && srcR.height() <= 1.0f &&
+ image->readPixels(info, &c, 4, srcR.fLeft, srcR.fTop))) {
+ // Fast draw with drawRect, if this is a patch containing a single color
+ // or if this is a patch containing a single pixel.
+ if (0 != c || !paint.isSrcOver()) {
+ SkPaint paintCopy(paint);
+ int alpha = SkAlphaMul(SkColorGetA(c), SkAlpha255To256(paint.getAlpha()));
+ paintCopy.setColor(SkColorSetA(c, alpha));
+ this->drawRect(dstR, paintCopy);
+ }
+ } else {
+ this->drawImageRect(image, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+ }
+}
+
+void SkBaseDevice::drawBitmapLattice(const SkBitmap& bitmap,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ SkLatticeIter iter(lattice, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(bitmap, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+static SkPoint* quad_to_tris(SkPoint tris[6], const SkPoint quad[4]) {
+ tris[0] = quad[0];
+ tris[1] = quad[1];
+ tris[2] = quad[2];
+
+ tris[3] = quad[0];
+ tris[4] = quad[2];
+ tris[5] = quad[3];
+
+ return tris + 6;
+}
+
+void SkBaseDevice::drawAtlas(const SkImage* atlas, const SkRSXform xform[],
+ const SkRect tex[], const SkColor colors[], int quadCount,
+ SkBlendMode mode, const SkPaint& paint) {
+ const int triCount = quadCount << 1;
+ const int vertexCount = triCount * 3;
+ uint32_t flags = SkVertices::kHasTexCoords_BuilderFlag;
+ if (colors) {
+ flags |= SkVertices::kHasColors_BuilderFlag;
+ }
+ SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, vertexCount, 0, flags);
+
+ SkPoint* vPos = builder.positions();
+ SkPoint* vTex = builder.texCoords();
+ SkColor* vCol = builder.colors();
+ for (int i = 0; i < quadCount; ++i) {
+ SkPoint tmp[4];
+ xform[i].toQuad(tex[i].width(), tex[i].height(), tmp);
+ vPos = quad_to_tris(vPos, tmp);
+
+ tex[i].toQuad(tmp);
+ vTex = quad_to_tris(vTex, tmp);
+
+ if (colors) {
+ sk_memset32(vCol, colors[i], 6);
+ vCol += 6;
+ }
+ }
+ SkPaint p(paint);
+ p.setShader(atlas->makeShader());
+ this->drawVertices(builder.detach().get(), nullptr, 0, mode, p);
+}
+
+
+void SkBaseDevice::drawEdgeAAQuad(const SkRect& r, const SkPoint clip[4], SkCanvas::QuadAAFlags aa,
+ const SkColor4f& color, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor4f(color);
+ paint.setBlendMode(mode);
+ paint.setAntiAlias(aa == SkCanvas::kAll_QuadAAFlags);
+
+ if (clip) {
+ // Draw the clip directly as a quad since it's a filled color with no local coords
+ SkPath clipPath;
+ clipPath.addPoly(clip, 4, true);
+ this->drawPath(clipPath, paint);
+ } else {
+ this->drawRect(r, paint);
+ }
+}
+
+void SkBaseDevice::drawEdgeAAImageSet(const SkCanvas::ImageSetEntry images[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkASSERT(paint.getStyle() == SkPaint::kFill_Style);
+ SkASSERT(!paint.getPathEffect());
+
+ SkPaint entryPaint = paint;
+ const SkMatrix baseCTM = this->ctm();
+ int clipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ // TODO: Handle per-edge AA. Right now this mirrors the SkiaRenderer component of Chrome
+ // which turns off antialiasing unless all four edges should be antialiased. This avoids
+ // seaming in tiled composited layers.
+ entryPaint.setAntiAlias(images[i].fAAFlags == SkCanvas::kAll_QuadAAFlags);
+ entryPaint.setAlphaf(paint.getAlphaf() * images[i].fAlpha);
+
+ bool needsRestore = false;
+ SkASSERT(images[i].fMatrixIndex < 0 || preViewMatrices);
+ if (images[i].fMatrixIndex >= 0) {
+ this->save();
+ this->setGlobalCTM(SkMatrix::Concat(
+ baseCTM, preViewMatrices[images[i].fMatrixIndex]));
+ needsRestore = true;
+ }
+
+ SkASSERT(!images[i].fHasClip || dstClips);
+ if (images[i].fHasClip) {
+ // Since drawImageRect requires a srcRect, the dst clip is implemented as a true clip
+ if (!needsRestore) {
+ this->save();
+ needsRestore = true;
+ }
+ SkPath clipPath;
+ clipPath.addPoly(dstClips + clipIndex, 4, true);
+ this->clipPath(clipPath, SkClipOp::kIntersect, entryPaint.isAntiAlias());
+ clipIndex += 4;
+ }
+ this->drawImageRect(images[i].fImage.get(), &images[i].fSrcRect, images[i].fDstRect,
+ entryPaint, constraint);
+ if (needsRestore) {
+ this->restore(baseCTM);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::drawDrawable(SkDrawable* drawable, const SkMatrix* matrix, SkCanvas* canvas) {
+ drawable->draw(canvas, matrix);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::drawSpecial(SkSpecialImage*, int x, int y, const SkPaint&,
+ SkImage*, const SkMatrix&) {}
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkBitmap&) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkImage*) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecial(const SkIRect&, bool) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecial() {
+ return this->snapSpecial(SkIRect::MakeWH(this->width(), this->height()));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBaseDevice::readPixels(const SkPixmap& pm, int x, int y) {
+ return this->onReadPixels(pm, x, y);
+}
+
+bool SkBaseDevice::writePixels(const SkPixmap& pm, int x, int y) {
+ return this->onWritePixels(pm, x, y);
+}
+
+bool SkBaseDevice::onWritePixels(const SkPixmap&, int, int) {
+ return false;
+}
+
+bool SkBaseDevice::onReadPixels(const SkPixmap&, int x, int y) {
+ return false;
+}
+
+bool SkBaseDevice::accessPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onAccessPixels(pmap);
+}
+
+bool SkBaseDevice::peekPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onPeekPixels(pmap);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkUtils.h"
+
+void SkBaseDevice::drawGlyphRunRSXform(const SkFont& font, const SkGlyphID glyphs[],
+ const SkRSXform xform[], int count, SkPoint origin,
+ const SkPaint& paint) {
+ const SkMatrix originalCTM = this->ctm();
+ if (!originalCTM.isFinite() || !SkScalarIsFinite(font.getSize()) ||
+ !SkScalarIsFinite(font.getScaleX()) ||
+ !SkScalarIsFinite(font.getSkewX())) {
+ return;
+ }
+
+ SkPoint sharedPos{0, 0}; // we're at the origin
+ SkGlyphID glyphID;
+ SkGlyphRun glyphRun{
+ font,
+ SkSpan<const SkPoint>{&sharedPos, 1},
+ SkSpan<const SkGlyphID>{&glyphID, 1},
+ SkSpan<const char>{},
+ SkSpan<const uint32_t>{}
+ };
+
+ for (int i = 0; i < count; i++) {
+ glyphID = glyphs[i];
+ // now "glyphRun" is pointing at the current glyphID
+
+ SkMatrix ctm;
+ ctm.setRSXform(xform[i]).postTranslate(origin.fX, origin.fY);
+
+ // We want to rotate each glyph by the rsxform, but we don't want to rotate "space"
+ // (i.e. the shader that cares about the ctm) so we have to undo our little ctm trick
+ // with a localmatrixshader so that the shader draws as if there was no change to the ctm.
+ SkPaint transformingPaint{paint};
+ auto shader = transformingPaint.getShader();
+ if (shader) {
+ SkMatrix inverse;
+ if (ctm.invert(&inverse)) {
+ transformingPaint.setShader(shader->makeWithLocalMatrix(inverse));
+ } else {
+ transformingPaint.setShader(nullptr); // can't handle this xform
+ }
+ }
+
+ ctm.setConcat(originalCTM, ctm);
+ this->setCTM(ctm);
+
+ this->drawGlyphRunList(SkGlyphRunList{glyphRun, transformingPaint});
+ }
+ this->setCTM(originalCTM);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkBaseDevice::makeSurface(SkImageInfo const&, SkSurfaceProps const&) {
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::LogDrawScaleFactor(const SkMatrix& view, const SkMatrix& srcToDst,
+ SkFilterQuality filterQuality) {
+#if SK_HISTOGRAMS_ENABLED
+ SkMatrix matrix = SkMatrix::Concat(view, srcToDst);
+ enum ScaleFactor {
+ kUpscale_ScaleFactor,
+ kNoScale_ScaleFactor,
+ kDownscale_ScaleFactor,
+ kLargeDownscale_ScaleFactor,
+
+ kLast_ScaleFactor = kLargeDownscale_ScaleFactor
+ };
+
+ float rawScaleFactor = matrix.getMinScale();
+
+ ScaleFactor scaleFactor;
+ if (rawScaleFactor < 0.5f) {
+ scaleFactor = kLargeDownscale_ScaleFactor;
+ } else if (rawScaleFactor < 1.0f) {
+ scaleFactor = kDownscale_ScaleFactor;
+ } else if (rawScaleFactor > 1.0f) {
+ scaleFactor = kUpscale_ScaleFactor;
+ } else {
+ scaleFactor = kNoScale_ScaleFactor;
+ }
+
+ switch (filterQuality) {
+ case kNone_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.NoneFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kLow_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.LowFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kMedium_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.MediumFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kHigh_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.HighFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ }
+
+ // Also log filter quality independent scale factor.
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.AnyFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+
+ // Also log an overall histogram of filter quality.
+ SK_HISTOGRAM_ENUMERATION("FilterQuality", filterQuality, kLast_SkFilterQuality + 1);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkDevice.h b/gfx/skia/skia/src/core/SkDevice.h
new file mode 100644
index 0000000000..ce2629fd80
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDevice.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDevice_DEFINED
+#define SkDevice_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkBitmap;
+struct SkDrawShadowRec;
+class SkGlyphRun;
+class SkGlyphRunList;
+class SkImageFilterCache;
+struct SkIRect;
+class SkMatrix;
+class SkRasterHandleAllocator;
+class SkSpecialImage;
+
+class SkBaseDevice : public SkRefCnt {
+public:
+ SkBaseDevice(const SkImageInfo&, const SkSurfaceProps&);
+
+ /**
+ * Return ImageInfo for this device. If the canvas is not backed by pixels
+ * (cpu or gpu), then the info's ColorType will be kUnknown_SkColorType.
+ */
+ const SkImageInfo& imageInfo() const { return fInfo; }
+
+ /**
+ * Return SurfaceProps for this device.
+ */
+ const SkSurfaceProps& surfaceProps() const {
+ return fSurfaceProps;
+ }
+
+ /**
+ * Return the bounds of the device in the coordinate space of the root
+ * canvas. The root device will have its top-left at 0,0, but other devices
+ * such as those associated with saveLayer may have a non-zero origin.
+ */
+ void getGlobalBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ const SkIPoint& origin = this->getOrigin();
+ bounds->setXYWH(origin.x(), origin.y(), this->width(), this->height());
+ }
+
+ SkIRect getGlobalBounds() const {
+ SkIRect bounds;
+ this->getGlobalBounds(&bounds);
+ return bounds;
+ }
+
+ int width() const {
+ return this->imageInfo().width();
+ }
+
+ int height() const {
+ return this->imageInfo().height();
+ }
+
+ bool isOpaque() const {
+ return this->imageInfo().isOpaque();
+ }
+
+ bool writePixels(const SkPixmap&, int x, int y);
+
+ /**
+ * Try to get write-access to the pixels behind the device. If successful, this returns true
+ * and fills-out the pixmap parameter. On success it also bumps the genID of the underlying
+ * bitmap.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool accessPixels(SkPixmap* pmap);
+
+ /**
+ * Try to get read-only-access to the pixels behind the device. If successful, this returns
+ * true and fills-out the pixmap parameter.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool peekPixels(SkPixmap*);
+
+ /**
+ * Return the device's origin: its offset in device coordinates from
+ * the default origin in its canvas' matrix/clip
+ */
+ const SkIPoint& getOrigin() const { return fOrigin; }
+
+ virtual void* getRasterHandle() const { return nullptr; }
+
+ void save() { this->onSave(); }
+ void restore(const SkMatrix& ctm) {
+ this->onRestore();
+ this->setGlobalCTM(ctm);
+ }
+ void clipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ this->onClipRect(rect, op, aa);
+ }
+ void clipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ this->onClipRRect(rrect, op, aa);
+ }
+ void clipPath(const SkPath& path, SkClipOp op, bool aa) {
+ this->onClipPath(path, op, aa);
+ }
+ void clipRegion(const SkRegion& region, SkClipOp op) {
+ this->onClipRegion(region, op);
+ }
+ void androidFramework_setDeviceClipRestriction(SkIRect* mutableClipRestriction) {
+ this->onSetDeviceClipRestriction(mutableClipRestriction);
+ }
+ bool clipIsWideOpen() const;
+
+ const SkMatrix& ctm() const { return fCTM; }
+ void setCTM(const SkMatrix& ctm) {
+ fCTM = ctm;
+ }
+ void setGlobalCTM(const SkMatrix& ctm);
+ virtual void validateDevBounds(const SkIRect&) {}
+
+protected:
+ enum TileUsage {
+ kPossible_TileUsage, //!< the created device may be drawn tiled
+ kNever_TileUsage, //!< the created device will never be drawn tiled
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ };
+
+ virtual void onSave() {}
+ virtual void onRestore() {}
+ virtual void onClipRect(const SkRect& rect, SkClipOp, bool aa) {}
+ virtual void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) {}
+ virtual void onClipPath(const SkPath& path, SkClipOp, bool aa) {}
+ virtual void onClipRegion(const SkRegion& deviceRgn, SkClipOp) {}
+ virtual void onSetDeviceClipRestriction(SkIRect* mutableClipRestriction) {}
+ virtual bool onClipIsAA() const = 0;
+ virtual void onAsRgnClip(SkRegion*) const = 0;
+ enum class ClipType {
+ kEmpty,
+ kRect,
+ kComplex
+ };
+ virtual ClipType onGetClipType() const = 0;
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint.
+ */
+ virtual void drawPaint(const SkPaint& paint) = 0;
+ virtual void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) = 0;
+ virtual void drawRect(const SkRect& r,
+ const SkPaint& paint) = 0;
+ virtual void drawRegion(const SkRegion& r,
+ const SkPaint& paint);
+ virtual void drawOval(const SkRect& oval,
+ const SkPaint& paint) = 0;
+ /** By the time this is called we know that abs(sweepAngle) is in the range [0, 360). */
+ virtual void drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint);
+ virtual void drawRRect(const SkRRect& rr,
+ const SkPaint& paint) = 0;
+
+ // Default impl calls drawPath()
+ virtual void drawDRRect(const SkRRect& outer,
+ const SkRRect& inner, const SkPaint&);
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ */
+ virtual void drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable = false) = 0;
+ virtual void drawSprite(const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) = 0;
+
+ /**
+ * The default impl. will create a bitmap-shader from the bitmap,
+ * and call drawRect with it.
+ */
+ virtual void drawBitmapRect(const SkBitmap&,
+ const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) = 0;
+ virtual void drawBitmapNine(const SkBitmap&, const SkIRect& center,
+ const SkRect& dst, const SkPaint&);
+ virtual void drawBitmapLattice(const SkBitmap&, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&);
+
+ virtual void drawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint&, SkCanvas::SrcRectConstraint);
+ virtual void drawImageNine(const SkImage*, const SkIRect& center,
+ const SkRect& dst, const SkPaint&);
+ virtual void drawImageLattice(const SkImage*, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&);
+
+ virtual void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) = 0;
+ virtual void drawShadow(const SkPath&, const SkDrawShadowRec&);
+
+ virtual void drawGlyphRunList(const SkGlyphRunList& glyphRunList) = 0;
+ // default implementation calls drawVertices
+ virtual void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode, const SkPaint& paint);
+
+ // default implementation calls drawPath
+ virtual void drawAtlas(const SkImage* atlas, const SkRSXform[], const SkRect[],
+ const SkColor[], int count, SkBlendMode, const SkPaint&);
+
+ virtual void drawAnnotation(const SkRect&, const char[], SkData*) {}
+
+ // Default impl always calls drawRect() with a solid-color paint, setting it to anti-aliased
+ // only when all edge flags are set. If there's a clip region, it draws that using drawPath,
+ // or uses clipPath().
+ virtual void drawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color,
+ SkBlendMode mode);
+ // Default impl uses drawImageRect per entry, being anti-aliased only when an entry's edge flags
+ // are all set. If there's a clip region, it will be applied using clipPath().
+ virtual void drawEdgeAAImageSet(const SkCanvas::ImageSetEntry[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint& paint, SkCanvas::SrcRectConstraint);
+
+ /** The SkDevice passed will be an SkDevice which was returned by a call to
+ onCreateDevice on this device with kNeverTile_TileExpectation.
+ */
+ virtual void drawDevice(SkBaseDevice*, int x, int y, const SkPaint&) = 0;
+
+ void drawGlyphRunRSXform(const SkFont&, const SkGlyphID[], const SkRSXform[], int count,
+ SkPoint origin, const SkPaint& paint);
+
+ virtual void drawDrawable(SkDrawable*, const SkMatrix*, SkCanvas*);
+
+ virtual void drawSpecial(SkSpecialImage*, int x, int y, const SkPaint&,
+ SkImage* clipImage, const SkMatrix& clipMatrix);
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&);
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkImage*);
+ // Get a view of the entire device's current contents as an image.
+ sk_sp<SkSpecialImage> snapSpecial();
+ // Snap the 'subset' contents from this device, possibly as a read-only view. If 'forceCopy'
+ // is true then the returned image's pixels must not be affected by subsequent draws into the
+ // device. When 'forceCopy' is false, the image can be a view into the device's pixels
+ // (avoiding a copy for performance, at the expense of safety). Default returns null.
+ virtual sk_sp<SkSpecialImage> snapSpecial(const SkIRect& subset, bool forceCopy = false);
+
+ virtual void setImmutable() {}
+
+ bool readPixels(const SkPixmap&, int x, int y);
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ virtual GrContext* context() const { return nullptr; }
+
+ virtual sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&);
+ virtual bool onPeekPixels(SkPixmap*) { return false; }
+
+ /**
+ * The caller is responsible for "pre-clipping" the dst. The impl can assume that the dst
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in readPixels(), the public way to call this.
+ */
+ virtual bool onReadPixels(const SkPixmap&, int x, int y);
+
+ /**
+ * The caller is responsible for "pre-clipping" the src. The impl can assume that the src
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in writePixelsDirect(), the public way to call this.
+ */
+ virtual bool onWritePixels(const SkPixmap&, int x, int y);
+
+ virtual bool onAccessPixels(SkPixmap*) { return false; }
+
+ struct CreateInfo {
+ static SkPixelGeometry AdjustGeometry(TileUsage, SkPixelGeometry);
+
+ // The constructor may change the pixel geometry based on other parameters.
+ CreateInfo(const SkImageInfo& info,
+ TileUsage tileUsage,
+ SkPixelGeometry geo,
+ bool trackCoverage,
+ SkRasterHandleAllocator* allocator)
+ : fInfo(info)
+ , fTileUsage(tileUsage)
+ , fPixelGeometry(AdjustGeometry(tileUsage, geo))
+ , fTrackCoverage(trackCoverage)
+ , fAllocator(allocator)
+ {}
+
+ const SkImageInfo fInfo;
+ const TileUsage fTileUsage;
+ const SkPixelGeometry fPixelGeometry;
+ const bool fTrackCoverage = false;
+ SkRasterHandleAllocator* fAllocator = nullptr;
+ };
+
+ /**
+ * Create a new device based on CreateInfo. If the paint is not null, then it represents a
+ * preview of how the new device will be composed with its creator device (this).
+ *
+ * The subclass may be handed this device in drawDevice(), so it must always return
+ * a device that it knows how to draw, and that it knows how to identify if it is not of the
+ * same subclass (since drawDevice is passed a SkBaseDevice*). If the subclass cannot fulfill
+ * that contract (e.g. PDF cannot support some settings on the paint) it should return NULL,
+ * and the caller may then decide to explicitly create a bitmapdevice, knowing that later
+ * it could not call drawDevice with it (but it could call drawSprite or drawBitmap).
+ */
+ virtual SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) {
+ return nullptr;
+ }
+
+ // A helper function used by derived classes to log the scale factor of a bitmap or image draw.
+ static void LogDrawScaleFactor(const SkMatrix& view, const SkMatrix& srcToDst, SkFilterQuality);
+
+private:
+ friend class SkAndroidFrameworkUtils;
+ friend class SkCanvas;
+ friend struct DeviceCM; //for setMatrixClip
+ friend class SkDraw;
+ friend class SkDrawIter;
+ friend class SkSurface_Raster;
+ friend class DeviceTestingAccess;
+
+ // Temporarily friend the SkGlyphRunBuilder until drawPosText is gone.
+ friend class SkGlyphRun;
+ friend class SkGlyphRunList;
+ friend class SkGlyphRunBuilder;
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ // TODO: move to SkBitmapDevice
+ virtual void replaceBitmapBackendForRasterSurface(const SkBitmap&) {}
+
+ virtual bool forceConservativeRasterClip() const { return false; }
+
+ /**
+ * Don't call this!
+ */
+ virtual GrRenderTargetContext* accessRenderTargetContext() { return nullptr; }
+
+ // just called by SkCanvas when built as a layer
+ void setOrigin(const SkMatrix& ctm, int x, int y);
+
+ /** Causes any deferred drawing to the device to be completed.
+ */
+ virtual void flush() {}
+
+ virtual SkImageFilterCache* getImageFilterCache() { return nullptr; }
+
+ friend class SkNoPixelsDevice;
+ friend class SkBitmapDevice;
+ void privateResize(int w, int h) {
+ *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeWH(w, h);
+ }
+
+ SkIPoint fOrigin;
+ const SkImageInfo fInfo;
+ const SkSurfaceProps fSurfaceProps;
+ SkMatrix fCTM;
+
+ typedef SkRefCnt INHERITED;
+};
+
+class SkNoPixelsDevice : public SkBaseDevice {
+public:
+ SkNoPixelsDevice(const SkIRect& bounds, const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace = nullptr)
+ : SkBaseDevice(SkImageInfo::Make(bounds.size(), kUnknown_SkColorType,
+ kUnknown_SkAlphaType, std::move(colorSpace)),
+ props) {
+ // this fails if we enable this assert: DiscardableImageMapTest.GetDiscardableImagesInRectMaxImage
+ //SkASSERT(bounds.width() >= 0 && bounds.height() >= 0);
+
+ this->setOrigin(SkMatrix::I(), bounds.left(), bounds.top());
+ }
+
+ void resetForNextPicture(const SkIRect& bounds) {
+ //SkASSERT(bounds.width() >= 0 && bounds.height() >= 0);
+ this->privateResize(bounds.width(), bounds.height());
+ this->setOrigin(SkMatrix::I(), bounds.left(), bounds.top());
+ }
+
+protected:
+ // We don't track the clip at all (for performance), but we have to respond to some queries.
+ // We pretend to be wide-open. We could pretend to always be empty, but that *seems* worse.
+ void onSave() override {}
+ void onRestore() override {}
+ void onClipRect(const SkRect& rect, SkClipOp, bool aa) override {}
+ void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) override {}
+ void onClipPath(const SkPath& path, SkClipOp, bool aa) override {}
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override {}
+ void onSetDeviceClipRestriction(SkIRect* mutableClipRestriction) override {}
+ bool onClipIsAA() const override { return false; }
+ void onAsRgnClip(SkRegion* rgn) const override {
+ rgn->setRect(SkIRect::MakeWH(this->width(), this->height()));
+ }
+ ClipType onGetClipType() const override {
+ return ClipType::kRect;
+ }
+
+ void drawPaint(const SkPaint& paint) override {}
+ void drawPoints(SkCanvas::PointMode, size_t, const SkPoint[], const SkPaint&) override {}
+ void drawRect(const SkRect&, const SkPaint&) override {}
+ void drawOval(const SkRect&, const SkPaint&) override {}
+ void drawRRect(const SkRRect&, const SkPaint&) override {}
+ void drawPath(const SkPath&, const SkPaint&, bool) override {}
+ void drawSprite(const SkBitmap&, int, int, const SkPaint&) override {}
+ void drawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&, const SkPaint&,
+ SkCanvas::SrcRectConstraint) override {}
+ void drawDevice(SkBaseDevice*, int, int, const SkPaint&) override {}
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override {}
+ void drawVertices(const SkVertices*, const SkVertices::Bone[], int, SkBlendMode,
+ const SkPaint&) override {}
+
+private:
+ typedef SkBaseDevice INHERITED;
+};
+
+class SkAutoDeviceCTMRestore : SkNoncopyable {
+public:
+ SkAutoDeviceCTMRestore(SkBaseDevice* device, const SkMatrix& ctm)
+ : fDevice(device)
+ , fPrevCTM(device->ctm())
+ {
+ fDevice->setCTM(ctm);
+ }
+ ~SkAutoDeviceCTMRestore() {
+ fDevice->setCTM(fPrevCTM);
+ }
+
+private:
+ SkBaseDevice* fDevice;
+ const SkMatrix fPrevCTM;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDiscardableMemory.h b/gfx/skia/skia/src/core/SkDiscardableMemory.h
new file mode 100644
index 0000000000..613dbd763f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDiscardableMemory.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemory_DEFINED
+#define SkDiscardableMemory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * Interface for discardable memory. Implementation is provided by the
+ * embedder.
+ */
+class SK_API SkDiscardableMemory {
+public:
+ /**
+ * Factory method that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ static SkDiscardableMemory* Create(size_t bytes);
+
+ /**
+ * Factory class that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ class Factory : public SkRefCnt {
+ public:
+ virtual SkDiscardableMemory* create(size_t bytes) = 0;
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ /** Must not be called while locked.
+ */
+ virtual ~SkDiscardableMemory() {}
+
+ /**
+ * Locks the memory, prevent it from being discarded. Once locked. you may
+ * obtain a pointer to that memory using the data() method.
+ *
+ * lock() may return false, indicating that the underlying memory was
+ * discarded and that the lock failed.
+ *
+ * Nested calls to lock are not allowed.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT lock() = 0;
+
+ /**
+ * Returns the current pointer for the discardable memory. This call is ONLY
+ * valid when the discardable memory object is locked.
+ */
+ virtual void* data() = 0;
+
+ /**
+ * Unlock the memory so that it can be purged by the system. Must be called
+ * after every successful lock call.
+ */
+ virtual void unlock() = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
new file mode 100644
index 0000000000..9471e1cd38
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkPointPriv.h"
+
+#include <utility>
+
+struct DFData {
+ float fAlpha; // alpha value of source texel
+ float fDistSq; // distance squared to nearest (so far) edge texel
+ SkPoint fDistVector; // distance vector to nearest (so far) edge texel
+};
+
+enum NeighborFlags {
+ kLeft_NeighborFlag = 0x01,
+ kRight_NeighborFlag = 0x02,
+ kTopLeft_NeighborFlag = 0x04,
+ kTop_NeighborFlag = 0x08,
+ kTopRight_NeighborFlag = 0x10,
+ kBottomLeft_NeighborFlag = 0x20,
+ kBottom_NeighborFlag = 0x40,
+ kBottomRight_NeighborFlag = 0x80,
+ kAll_NeighborFlags = 0xff,
+
+ kNeighborFlagCount = 8
+};
+
+// We treat an "edge" as a place where we cross from >=128 to <128, or vice versa, or
+// where we have two non-zero pixels that are <128.
+// 'neighborFlags' is used to limit the directions in which we test to avoid indexing
+// outside of the image
+static bool found_edge(const unsigned char* imagePtr, int width, int neighborFlags) {
+ // the order of these should match the neighbor flags above
+ const int kNum8ConnectedNeighbors = 8;
+ const int offsets[8] = {-1, 1, -width-1, -width, -width+1, width-1, width, width+1 };
+ SkASSERT(kNum8ConnectedNeighbors == kNeighborFlagCount);
+
+ // search for an edge
+ unsigned char currVal = *imagePtr;
+ unsigned char currCheck = (currVal >> 7);
+ for (int i = 0; i < kNum8ConnectedNeighbors; ++i) {
+ unsigned char neighborVal;
+ if ((1 << i) & neighborFlags) {
+ const unsigned char* checkPtr = imagePtr + offsets[i];
+ neighborVal = *checkPtr;
+ } else {
+ neighborVal = 0;
+ }
+ unsigned char neighborCheck = (neighborVal >> 7);
+ SkASSERT(currCheck == 0 || currCheck == 1);
+ SkASSERT(neighborCheck == 0 || neighborCheck == 1);
+ // if sharp transition
+ if (currCheck != neighborCheck ||
+ // or both <128 and >0
+ (!currCheck && !neighborCheck && currVal && neighborVal)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void init_glyph_data(DFData* data, unsigned char* edges, const unsigned char* image,
+ int dataWidth, int dataHeight,
+ int imageWidth, int imageHeight,
+ int pad) {
+ data += pad*dataWidth;
+ data += pad;
+ edges += (pad*dataWidth + pad);
+
+ for (int j = 0; j < imageHeight; ++j) {
+ for (int i = 0; i < imageWidth; ++i) {
+ if (255 == *image) {
+ data->fAlpha = 1.0f;
+ } else {
+ data->fAlpha = (*image)*0.00392156862f; // 1/255
+ }
+ int checkMask = kAll_NeighborFlags;
+ if (i == 0) {
+ checkMask &= ~(kLeft_NeighborFlag|kTopLeft_NeighborFlag|kBottomLeft_NeighborFlag);
+ }
+ if (i == imageWidth-1) {
+ checkMask &= ~(kRight_NeighborFlag|kTopRight_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (j == 0) {
+ checkMask &= ~(kTopLeft_NeighborFlag|kTop_NeighborFlag|kTopRight_NeighborFlag);
+ }
+ if (j == imageHeight-1) {
+ checkMask &= ~(kBottomLeft_NeighborFlag|kBottom_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (found_edge(image, imageWidth, checkMask)) {
+ *edges = 255; // using 255 makes for convenient debug rendering
+ }
+ ++data;
+ ++image;
+ ++edges;
+ }
+ data += 2*pad;
+ edges += 2*pad;
+ }
+}
+
+// from Gustavson (2011)
+// computes the distance to an edge given an edge normal vector and a pixel's alpha value
+// assumes that direction has been pre-normalized
+static float edge_distance(const SkPoint& direction, float alpha) {
+ float dx = direction.fX;
+ float dy = direction.fY;
+ float distance;
+ if (SkScalarNearlyZero(dx) || SkScalarNearlyZero(dy)) {
+ distance = 0.5f - alpha;
+ } else {
+ // this is easier if we treat the direction as being in the first octant
+ // (other octants are symmetrical)
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ if (dx < dy) {
+ using std::swap;
+ swap(dx, dy);
+ }
+
+ // a1 = 0.5*dy/dx is the smaller fractional area chopped off by the edge
+ // to avoid the divide, we just consider the numerator
+ float a1num = 0.5f*dy;
+
+ // we now compute the approximate distance, depending where the alpha falls
+ // relative to the edge fractional area
+
+ // if 0 <= alpha < a1
+ if (alpha*dx < a1num) {
+ // TODO: find a way to do this without square roots?
+ distance = 0.5f*(dx + dy) - SkScalarSqrt(2.0f*dx*dy*alpha);
+ // if a1 <= alpha <= 1 - a1
+ } else if (alpha*dx < (dx - a1num)) {
+ distance = (0.5f - alpha)*dx;
+ // if 1 - a1 < alpha <= 1
+ } else {
+ // TODO: find a way to do this without square roots?
+ distance = -0.5f*(dx + dy) + SkScalarSqrt(2.0f*dx*dy*(1.0f - alpha));
+ }
+ }
+
+ return distance;
+}
+
+static void init_distances(DFData* data, unsigned char* edges, int width, int height) {
+ // skip one pixel border
+ DFData* currData = data;
+ DFData* prevData = data - width;
+ DFData* nextData = data + width;
+
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ if (*edges) {
+ // we should not be in the one-pixel outside band
+ SkASSERT(i > 0 && i < width-1 && j > 0 && j < height-1);
+ // gradient will point from low to high
+ // +y is down in this case
+ // i.e., if you're outside, gradient points towards edge
+ // if you're inside, gradient points away from edge
+ SkPoint currGrad;
+ currGrad.fX = (prevData+1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*(currData+1)->fAlpha
+ - SK_ScalarSqrt2*(currData-1)->fAlpha
+ + (nextData+1)->fAlpha - (nextData-1)->fAlpha;
+ currGrad.fY = (nextData-1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*nextData->fAlpha
+ - SK_ScalarSqrt2*prevData->fAlpha
+ + (nextData+1)->fAlpha - (prevData+1)->fAlpha;
+ SkPointPriv::SetLengthFast(&currGrad, 1.0f);
+
+ // init squared distance to edge and distance vector
+ float dist = edge_distance(currGrad, currData->fAlpha);
+ currGrad.scale(dist, &currData->fDistVector);
+ currData->fDistSq = dist*dist;
+ } else {
+ // init distance to "far away"
+ currData->fDistSq = 2000000.f;
+ currData->fDistVector.fX = 1000.f;
+ currData->fDistVector.fY = 1000.f;
+ }
+ ++currData;
+ ++prevData;
+ ++nextData;
+ ++edges;
+ }
+ }
+}
+
+// Danielsson's 8SSEDT
+
+// first stage forward pass
+// (forward in Y, forward in X)
+static void F1(DFData* curr, int width) {
+ // upper left
+ DFData* check = curr - width-1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*(distVec.fX + distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // up
+ check = curr - width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // upper right
+ check = curr - width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX - distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // left
+ check = curr - 1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage forward pass
+// (forward in Y, backward in X)
+static void F2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// first stage backward pass
+// (backward in Y, forward in X)
+static void B1(DFData* curr, int width) {
+ // left
+ DFData* check = curr - 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage backward pass
+// (backward in Y, backwards in X)
+static void B2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom left
+ check = curr + width-1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*(distVec.fX - distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom
+ check = curr + width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom right
+ check = curr + width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX + distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// enable this to output edge data rather than the distance field
+#define DUMP_EDGE 0
+
+#if !DUMP_EDGE
+template <int distanceMagnitude>
+static unsigned char pack_distance_field_val(float dist) {
+ // The distance field is constructed as unsigned char values, so that the zero value is at 128,
+ // Beside 128, we have 128 values in range [0, 128), but only 127 values in range (128, 255].
+ // So we multiply distanceMagnitude by 127/128 at the latter range to avoid overflow.
+ dist = SkScalarPin(-dist, -distanceMagnitude, distanceMagnitude * 127.0f / 128.0f);
+
+ // Scale into the positive range for unsigned distance.
+ dist += distanceMagnitude;
+
+ // Scale into unsigned char range.
+ // Round to place negative and positive values as equally as possible around 128
+ // (which represents zero).
+ return (unsigned char)SkScalarRoundToInt(dist / (2 * distanceMagnitude) * 256.0f);
+}
+#endif
+
+// assumes a padded 8-bit image and distance field
+// width and height are the original width and height of the image
+static bool generate_distance_field_from_image(unsigned char* distanceField,
+ const unsigned char* copyPtr,
+ int width, int height) {
+ SkASSERT(distanceField);
+ SkASSERT(copyPtr);
+
+ // we expand our temp data by one more on each side to simplify
+ // the scanning code -- will always be treated as infinitely far away
+ int pad = SK_DistanceFieldPad + 1;
+
+ // set params for distance field data
+ int dataWidth = width + 2*pad;
+ int dataHeight = height + 2*pad;
+
+ // create zeroed temp DFData+edge storage
+ SkAutoFree storage(sk_calloc_throw(dataWidth*dataHeight*(sizeof(DFData) + 1)));
+ DFData* dataPtr = (DFData*)storage.get();
+ unsigned char* edgePtr = (unsigned char*)storage.get() + dataWidth*dataHeight*sizeof(DFData);
+
+ // copy glyph into distance field storage
+ init_glyph_data(dataPtr, edgePtr, copyPtr,
+ dataWidth, dataHeight,
+ width+2, height+2, SK_DistanceFieldPad);
+
+ // create initial distance data, particularly at edges
+ init_distances(dataPtr, edgePtr, dataWidth, dataHeight);
+
+ // now perform Euclidean distance transform to propagate distances
+
+ // forwards in y
+ DFData* currData = dataPtr+dataWidth+1; // skip outer buffer
+ unsigned char* currEdge = edgePtr+dataWidth+1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData += dataWidth+1;
+ currEdge += dataWidth+1;
+ }
+
+ // backwards in y
+ currData = dataPtr+dataWidth*(dataHeight-2) - 1; // skip outer buffer
+ currEdge = edgePtr+dataWidth*(dataHeight-2) - 1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData -= dataWidth-1;
+ currEdge -= dataWidth-1;
+ }
+
+ // copy results to final distance field data
+ currData = dataPtr + dataWidth+1;
+ currEdge = edgePtr + dataWidth+1;
+ unsigned char *dfPtr = distanceField;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ for (int i = 1; i < dataWidth-1; ++i) {
+#if DUMP_EDGE
+ float alpha = currData->fAlpha;
+ float edge = 0.0f;
+ if (*currEdge) {
+ edge = 0.25f;
+ }
+ // blend with original image
+ float result = alpha + (1.0f-alpha)*edge;
+ unsigned char val = sk_float_round2int(255*result);
+ *dfPtr++ = val;
+#else
+ float dist;
+ if (currData->fAlpha > 0.5f) {
+ dist = -SkScalarSqrt(currData->fDistSq);
+ } else {
+ dist = SkScalarSqrt(currData->fDistSq);
+ }
+ *dfPtr++ = pack_distance_field_val<SK_DistanceFieldMagnitude>(dist);
+#endif
+ ++currData;
+ ++currEdge;
+ }
+ currData += 2;
+ currEdge += 2;
+ }
+
+ return true;
+}
+
+// assumes an 8-bit image and distance field
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+ memcpy(currDestPtr, currSrcScanLine, width);
+ currSrcScanLine += rowBytes;
+ currDestPtr += width;
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
+
+// assumes a 16-bit lcd mask and 8-bit distance field
+bool SkGenerateDistanceFieldFromLCD16Mask(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((w+2)*(h+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const uint16_t* start = reinterpret_cast<const uint16_t*>(image);
+ auto currSrcScanline = SkMask::AlphaIter<SkMask::kLCD16_Format>(start);
+ auto endSrcScanline = SkMask::AlphaIter<SkMask::kLCD16_Format>(start + w);
+ sk_bzero(copyPtr, (w+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + w + 2;
+ for (int i = 0; i < h; ++i, currSrcScanline >>= rowBytes, endSrcScanline >>= rowBytes) {
+ *currDestPtr++ = 0;
+ for (auto src = currSrcScanline; src < endSrcScanline; ++src) {
+ *currDestPtr++ = *src;
+ }
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (w+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, w, h);
+}
+
+// assumes a 1-bit image and 8-bit distance field
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+
+
+ int rowWritesLeft = width;
+ const unsigned char *maskPtr = currSrcScanLine;
+ while (rowWritesLeft > 0) {
+ unsigned mask = *maskPtr++;
+ for (int i = 7; i >= 0 && rowWritesLeft; --i, --rowWritesLeft) {
+ *currDestPtr++ = (mask & (1 << i)) ? 0xff : 0;
+ }
+ }
+ currSrcScanLine += rowBytes;
+
+
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.h b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
new file mode 100644
index 0000000000..64458db8de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkDistanceFieldGen_DEFINED
+#define SkDistanceFieldGen_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// the max magnitude for the distance field
+// distance values are limited to the range (-SK_DistanceFieldMagnitude, SK_DistanceFieldMagnitude]
+#define SK_DistanceFieldMagnitude 4
+// we need to pad around the original glyph to allow our maximum distance of
+// SK_DistanceFieldMagnitude texels away from any edge
+#define SK_DistanceFieldPad 4
+// the rect we render with is inset from the distance field glyph size to allow for bilerp
+#define SK_DistanceFieldInset 2
+
+// For the fragment shader:
+// The distance field is constructed as unsigned char values,
+// so that the zero value is at 128, and the supported range of distances is [-4 * 127/128, 4].
+// Hence our multiplier (width of the range) is 4 * 255/128 and zero threshold is 128/255.
+#define SK_DistanceFieldMultiplier "7.96875"
+#define SK_DistanceFieldThreshold "0.50196078431"
+
+/** Given 8-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 8-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given LCD16 mask data (not a 16-bit image), generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 16-bit LCD data we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromLCD16Mask(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given 1-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 1-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given width and height of original image, return size (in bytes) of distance field
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ */
+inline size_t SkComputeDistanceFieldSize(int w, int h) {
+ return (w + 2*SK_DistanceFieldPad) * (h + 2*SK_DistanceFieldPad) * sizeof(unsigned char);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDocument.cpp b/gfx/skia/skia/src/core/SkDocument.cpp
new file mode 100644
index 0000000000..30c94d8317
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDocument.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDocument.h"
+#include "include/core/SkStream.h"
+
+SkDocument::SkDocument(SkWStream* stream) : fStream(stream), fState(kBetweenPages_State) {}
+
+SkDocument::~SkDocument() {
+ this->close();
+}
+
+static SkCanvas* trim(SkCanvas* canvas, SkScalar width, SkScalar height,
+ const SkRect* content) {
+ if (content && canvas) {
+ SkRect inner = *content;
+ if (!inner.intersect({0, 0, width, height})) {
+ return nullptr;
+ }
+ canvas->clipRect(inner);
+ canvas->translate(inner.x(), inner.y());
+ }
+ return canvas;
+}
+
+SkCanvas* SkDocument::beginPage(SkScalar width, SkScalar height,
+ const SkRect* content) {
+ if (width <= 0 || height <= 0 || kClosed_State == fState) {
+ return nullptr;
+ }
+ if (kInPage_State == fState) {
+ this->endPage();
+ }
+ SkASSERT(kBetweenPages_State == fState);
+ fState = kInPage_State;
+ return trim(this->onBeginPage(width, height), width, height, content);
+}
+
+void SkDocument::endPage() {
+ if (kInPage_State == fState) {
+ fState = kBetweenPages_State;
+ this->onEndPage();
+ }
+}
+
+void SkDocument::close() {
+ for (;;) {
+ switch (fState) {
+ case kBetweenPages_State: {
+ fState = kClosed_State;
+ this->onClose(fStream);
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+ return;
+ }
+ case kInPage_State:
+ this->endPage();
+ break;
+ case kClosed_State:
+ return;
+ }
+ }
+}
+
+void SkDocument::abort() {
+ this->onAbort();
+
+ fState = kClosed_State;
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkDraw.cpp b/gfx/skia/skia/src/core/SkDraw.cpp
new file mode 100644
index 0000000000..5ef63948a1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw.cpp
@@ -0,0 +1,1307 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDraw.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkAutoBlitterChoose.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawProcs.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkStroke.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkUtils.h"
+
+#include <utility>
+
+static SkPaint make_paint_with_image(
+ const SkPaint& origPaint, const SkBitmap& bitmap, SkMatrix* matrix = nullptr) {
+ SkPaint paint(origPaint);
+ paint.setShader(SkMakeBitmapShaderForPaint(origPaint, bitmap, SkTileMode::kClamp,
+ SkTileMode::kClamp, matrix,
+ kNever_SkCopyPixelsMode));
+ return paint;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDraw::SkDraw() {}
+
+bool SkDraw::computeConservativeLocalClipBounds(SkRect* localBounds) const {
+ if (fRC->isEmpty()) {
+ return false;
+ }
+
+ SkMatrix inverse;
+ if (!fMatrix->invert(&inverse)) {
+ return false;
+ }
+
+ SkIRect devBounds = fRC->getBounds();
+ // outset to have slop for antialasing and hairlines
+ devBounds.outset(1, 1);
+ inverse.mapRect(localBounds, SkRect::Make(devBounds));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkDraw::drawPaint(const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkIRect devRect;
+ devRect.setWH(fDst.width(), fDst.height());
+
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+ SkScan::FillIRect(devRect, *fRC, blitter.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct PtProcRec {
+ SkCanvas::PointMode fMode;
+ const SkPaint* fPaint;
+ const SkRegion* fClip;
+ const SkRasterClip* fRC;
+
+ // computed values
+ SkRect fClipBounds;
+ SkScalar fRadius;
+
+ typedef void (*Proc)(const PtProcRec&, const SkPoint devPts[], int count,
+ SkBlitter*);
+
+ bool init(SkCanvas::PointMode, const SkPaint&, const SkMatrix* matrix,
+ const SkRasterClip*);
+ Proc chooseProc(SkBlitter** blitter);
+
+private:
+ SkAAClipBlitterWrapper fWrapper;
+};
+
+static void bw_pt_rect_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkASSERT(rec.fClip->isRect());
+ const SkIRect& r = rec.fClip->getBounds();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_pt_rect_16_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ uint16_t* addr = dst->writable_addr16(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((uint16_t*)((char*)addr + y * rb))[x] = SkToU16(value);
+ }
+ }
+}
+
+static void bw_pt_rect_32_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ SkPMColor* addr = dst->writable_addr32(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((SkPMColor*)((char*)addr + y * rb))[x] = value;
+ }
+ }
+}
+
+static void bw_pt_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (rec.fClip->contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::HairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void bw_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::HairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// aa versions
+
+static void aa_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::AntiHairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void aa_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::AntiHairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// square procs (strokeWidth > 0 but matrix is square-scale (sx == sy)
+
+static SkRect make_square_rad(SkPoint center, SkScalar radius) {
+ return {
+ center.fX - radius, center.fY - radius,
+ center.fX + radius, center.fY + radius
+ };
+}
+
+static SkXRect make_xrect(const SkRect& r) {
+ SkASSERT(SkRectPriv::FitsInFixed(r));
+ return {
+ SkScalarToFixed(r.fLeft), SkScalarToFixed(r.fTop),
+ SkScalarToFixed(r.fRight), SkScalarToFixed(r.fBottom)
+ };
+}
+
+static void bw_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ SkRect r = make_square_rad(devPts[i], rec.fRadius);
+ if (r.intersect(rec.fClipBounds)) {
+ SkScan::FillXRect(make_xrect(r), *rec.fRC, blitter);
+ }
+ }
+}
+
+static void aa_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ SkRect r = make_square_rad(devPts[i], rec.fRadius);
+ if (r.intersect(rec.fClipBounds)) {
+ SkScan::AntiFillXRect(make_xrect(r), *rec.fRC, blitter);
+ }
+ }
+}
+
+// If this guy returns true, then chooseProc() must return a valid proc
+bool PtProcRec::init(SkCanvas::PointMode mode, const SkPaint& paint,
+ const SkMatrix* matrix, const SkRasterClip* rc) {
+ if ((unsigned)mode > (unsigned)SkCanvas::kPolygon_PointMode) {
+ return false;
+ }
+ if (paint.getPathEffect()) {
+ return false;
+ }
+ SkScalar width = paint.getStrokeWidth();
+ SkScalar radius = -1; // sentinel value, a "valid" value must be > 0
+
+ if (0 == width) {
+ radius = 0.5f;
+ } else if (paint.getStrokeCap() != SkPaint::kRound_Cap &&
+ matrix->isScaleTranslate() && SkCanvas::kPoints_PointMode == mode) {
+ SkScalar sx = matrix->get(SkMatrix::kMScaleX);
+ SkScalar sy = matrix->get(SkMatrix::kMScaleY);
+ if (SkScalarNearlyZero(sx - sy)) {
+ radius = SkScalarHalf(width * SkScalarAbs(sx));
+ }
+ }
+ if (radius > 0) {
+ SkRect clipBounds = SkRect::Make(rc->getBounds());
+ // if we return true, the caller may assume that the constructed shapes can be represented
+ // using SkFixed (after clipping), so we preflight that here.
+ if (!SkRectPriv::FitsInFixed(clipBounds)) {
+ return false;
+ }
+ fMode = mode;
+ fPaint = &paint;
+ fClip = nullptr;
+ fRC = rc;
+ fClipBounds = clipBounds;
+ fRadius = radius;
+ return true;
+ }
+ return false;
+}
+
+PtProcRec::Proc PtProcRec::chooseProc(SkBlitter** blitterPtr) {
+ Proc proc = nullptr;
+
+ SkBlitter* blitter = *blitterPtr;
+ if (fRC->isBW()) {
+ fClip = &fRC->bwRgn();
+ } else {
+ fWrapper.init(*fRC, blitter);
+ fClip = &fWrapper.getRgn();
+ blitter = fWrapper.getBlitter();
+ *blitterPtr = blitter;
+ }
+
+ // for our arrays
+ SkASSERT(0 == SkCanvas::kPoints_PointMode);
+ SkASSERT(1 == SkCanvas::kLines_PointMode);
+ SkASSERT(2 == SkCanvas::kPolygon_PointMode);
+ SkASSERT((unsigned)fMode <= (unsigned)SkCanvas::kPolygon_PointMode);
+
+ if (fPaint->isAntiAlias()) {
+ if (0 == fPaint->getStrokeWidth()) {
+ static const Proc gAAProcs[] = {
+ aa_square_proc, aa_line_hair_proc, aa_poly_hair_proc
+ };
+ proc = gAAProcs[fMode];
+ } else if (fPaint->getStrokeCap() != SkPaint::kRound_Cap) {
+ SkASSERT(SkCanvas::kPoints_PointMode == fMode);
+ proc = aa_square_proc;
+ }
+ } else { // BW
+ if (fRadius <= 0.5f) { // small radii and hairline
+ if (SkCanvas::kPoints_PointMode == fMode && fClip->isRect()) {
+ uint32_t value;
+ const SkPixmap* bm = blitter->justAnOpaqueColor(&value);
+ if (bm && kRGB_565_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_16_hair_proc;
+ } else if (bm && kN32_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_32_hair_proc;
+ } else {
+ proc = bw_pt_rect_hair_proc;
+ }
+ } else {
+ static Proc gBWProcs[] = {
+ bw_pt_hair_proc, bw_line_hair_proc, bw_poly_hair_proc
+ };
+ proc = gBWProcs[fMode];
+ }
+ } else {
+ proc = bw_square_proc;
+ }
+ }
+ return proc;
+}
+
+// each of these costs 8-bytes of stack space, so don't make it too large
+// must be even for lines/polygon to work
+#define MAX_DEV_PTS 32
+
+void SkDraw::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint,
+ SkBaseDevice* device) const {
+ // if we're in lines mode, force count to be even
+ if (SkCanvas::kLines_PointMode == mode) {
+ count &= ~(size_t)1;
+ }
+
+ if ((long)count <= 0) {
+ return;
+ }
+
+ SkASSERT(pts != nullptr);
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ PtProcRec rec;
+ if (!device && rec.init(mode, paint, fMatrix, fRC)) {
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+
+ SkPoint devPts[MAX_DEV_PTS];
+ const SkMatrix* matrix = fMatrix;
+ SkBlitter* bltr = blitter.get();
+ PtProcRec::Proc proc = rec.chooseProc(&bltr);
+ // we have to back up subsequent passes if we're in polygon mode
+ const size_t backup = (SkCanvas::kPolygon_PointMode == mode);
+
+ do {
+ int n = SkToInt(count);
+ if (n > MAX_DEV_PTS) {
+ n = MAX_DEV_PTS;
+ }
+ matrix->mapPoints(devPts, pts, n);
+ if (!SkScalarsAreFinite(&devPts[0].fX, n * 2)) {
+ return;
+ }
+ proc(rec, devPts, n, bltr);
+ pts += n - backup;
+ SkASSERT(SkToInt(count) >= n);
+ count -= n;
+ if (count > 0) {
+ count += backup;
+ }
+ } while (count != 0);
+ } else {
+ switch (mode) {
+ case SkCanvas::kPoints_PointMode: {
+ // temporarily mark the paint as filling.
+ SkPaint newPaint(paint);
+ newPaint.setStyle(SkPaint::kFill_Style);
+
+ SkScalar width = newPaint.getStrokeWidth();
+ SkScalar radius = SkScalarHalf(width);
+
+ if (newPaint.getStrokeCap() == SkPaint::kRound_Cap) {
+ if (device) {
+ for (size_t i = 0; i < count; ++i) {
+ SkRect r = SkRect::MakeLTRB(pts[i].fX - radius, pts[i].fY - radius,
+ pts[i].fX + radius, pts[i].fY + radius);
+ device->drawOval(r, newPaint);
+ }
+ } else {
+ SkPath path;
+ SkMatrix preMatrix;
+
+ path.addCircle(0, 0, radius);
+ for (size_t i = 0; i < count; i++) {
+ preMatrix.setTranslate(pts[i].fX, pts[i].fY);
+ // pass true for the last point, since we can modify
+ // then path then
+ path.setIsVolatile((count-1) == i);
+ this->drawPath(path, newPaint, &preMatrix, (count-1) == i);
+ }
+ }
+ } else {
+ SkRect r;
+
+ for (size_t i = 0; i < count; i++) {
+ r.fLeft = pts[i].fX - radius;
+ r.fTop = pts[i].fY - radius;
+ r.fRight = r.fLeft + width;
+ r.fBottom = r.fTop + width;
+ if (device) {
+ device->drawRect(r, newPaint);
+ } else {
+ this->drawRect(r, newPaint);
+ }
+ }
+ }
+ break;
+ }
+ case SkCanvas::kLines_PointMode:
+ if (2 == count && paint.getPathEffect()) {
+ // most likely a dashed line - see if it is one of the ones
+ // we can accelerate
+ SkStrokeRec rec(paint);
+ SkPathEffect::PointData pointData;
+
+ SkPath path;
+ path.moveTo(pts[0]);
+ path.lineTo(pts[1]);
+
+ SkRect cullRect = SkRect::Make(fRC->getBounds());
+
+ if (paint.getPathEffect()->asPoints(&pointData, path, rec,
+ *fMatrix, &cullRect)) {
+ // 'asPoints' managed to find some fast path
+
+ SkPaint newP(paint);
+ newP.setPathEffect(nullptr);
+ newP.setStyle(SkPaint::kFill_Style);
+
+ if (!pointData.fFirst.isEmpty()) {
+ if (device) {
+ device->drawPath(pointData.fFirst, newP);
+ } else {
+ this->drawPath(pointData.fFirst, newP);
+ }
+ }
+
+ if (!pointData.fLast.isEmpty()) {
+ if (device) {
+ device->drawPath(pointData.fLast, newP);
+ } else {
+ this->drawPath(pointData.fLast, newP);
+ }
+ }
+
+ if (pointData.fSize.fX == pointData.fSize.fY) {
+ // The rest of the dashed line can just be drawn as points
+ SkASSERT(pointData.fSize.fX == SkScalarHalf(newP.getStrokeWidth()));
+
+ if (SkPathEffect::PointData::kCircles_PointFlag & pointData.fFlags) {
+ newP.setStrokeCap(SkPaint::kRound_Cap);
+ } else {
+ newP.setStrokeCap(SkPaint::kButt_Cap);
+ }
+
+ if (device) {
+ device->drawPoints(SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP);
+ } else {
+ this->drawPoints(SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP,
+ device);
+ }
+ break;
+ } else {
+ // The rest of the dashed line must be drawn as rects
+ SkASSERT(!(SkPathEffect::PointData::kCircles_PointFlag &
+ pointData.fFlags));
+
+ SkRect r;
+
+ for (int i = 0; i < pointData.fNumPoints; ++i) {
+ r.setLTRB(pointData.fPoints[i].fX - pointData.fSize.fX,
+ pointData.fPoints[i].fY - pointData.fSize.fY,
+ pointData.fPoints[i].fX + pointData.fSize.fX,
+ pointData.fPoints[i].fY + pointData.fSize.fY);
+ if (device) {
+ device->drawRect(r, newP);
+ } else {
+ this->drawRect(r, newP);
+ }
+ }
+ }
+
+ break;
+ }
+ }
+ // couldn't take fast path so fall through!
+ case SkCanvas::kPolygon_PointMode: {
+ count -= 1;
+ SkPath path;
+ SkPaint p(paint);
+ p.setStyle(SkPaint::kStroke_Style);
+ size_t inc = (SkCanvas::kLines_PointMode == mode) ? 2 : 1;
+ path.setIsVolatile(true);
+ for (size_t i = 0; i < count; i += inc) {
+ path.moveTo(pts[i]);
+ path.lineTo(pts[i+1]);
+ if (device) {
+ device->drawPath(path, p, true);
+ } else {
+ this->drawPath(path, p, nullptr, true);
+ }
+ path.rewind();
+ }
+ break;
+ }
+ }
+ }
+}
+
+static inline SkPoint compute_stroke_size(const SkPaint& paint, const SkMatrix& matrix) {
+ SkASSERT(matrix.rectStaysRect());
+ SkASSERT(SkPaint::kFill_Style != paint.getStyle());
+
+ SkVector size;
+ SkPoint pt = { paint.getStrokeWidth(), paint.getStrokeWidth() };
+ matrix.mapVectors(&size, &pt, 1);
+ return SkPoint::Make(SkScalarAbs(size.fX), SkScalarAbs(size.fY));
+}
+
+static bool easy_rect_join(const SkPaint& paint, const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ if (SkPaint::kMiter_Join != paint.getStrokeJoin() ||
+ paint.getStrokeMiter() < SK_ScalarSqrt2) {
+ return false;
+ }
+
+ *strokeSize = compute_stroke_size(paint, matrix);
+ return true;
+}
+
+SkDraw::RectType SkDraw::ComputeRectType(const SkPaint& paint,
+ const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ RectType rtype;
+ const SkScalar width = paint.getStrokeWidth();
+ const bool zeroWidth = (0 == width);
+ SkPaint::Style style = paint.getStyle();
+
+ if ((SkPaint::kStrokeAndFill_Style == style) && zeroWidth) {
+ style = SkPaint::kFill_Style;
+ }
+
+ if (paint.getPathEffect() || paint.getMaskFilter() ||
+ !matrix.rectStaysRect() || SkPaint::kStrokeAndFill_Style == style) {
+ rtype = kPath_RectType;
+ } else if (SkPaint::kFill_Style == style) {
+ rtype = kFill_RectType;
+ } else if (zeroWidth) {
+ rtype = kHair_RectType;
+ } else if (easy_rect_join(paint, matrix, strokeSize)) {
+ rtype = kStroke_RectType;
+ } else {
+ rtype = kPath_RectType;
+ }
+ return rtype;
+}
+
+static const SkPoint* rect_points(const SkRect& r) {
+ return reinterpret_cast<const SkPoint*>(&r);
+}
+
+static SkPoint* rect_points(SkRect& r) {
+ return reinterpret_cast<SkPoint*>(&r);
+}
+
+static void draw_rect_as_path(const SkDraw& orig, const SkRect& prePaintRect,
+ const SkPaint& paint, const SkMatrix* matrix) {
+ SkDraw draw(orig);
+ draw.fMatrix = matrix;
+ SkPath tmp;
+ tmp.addRect(prePaintRect);
+ tmp.setFillType(SkPath::kWinding_FillType);
+ draw.drawPath(tmp, paint, nullptr, true);
+}
+
+void SkDraw::drawRect(const SkRect& prePaintRect, const SkPaint& paint,
+ const SkMatrix* paintMatrix, const SkRect* postPaintRect) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ const SkMatrix* matrix;
+ SkMatrix combinedMatrixStorage;
+ if (paintMatrix) {
+ SkASSERT(postPaintRect);
+ combinedMatrixStorage.setConcat(*fMatrix, *paintMatrix);
+ matrix = &combinedMatrixStorage;
+ } else {
+ SkASSERT(!postPaintRect);
+ matrix = fMatrix;
+ }
+
+ SkPoint strokeSize;
+ RectType rtype = ComputeRectType(paint, *fMatrix, &strokeSize);
+
+ if (kPath_RectType == rtype) {
+ draw_rect_as_path(*this, prePaintRect, paint, matrix);
+ return;
+ }
+
+ SkRect devRect;
+ const SkRect& paintRect = paintMatrix ? *postPaintRect : prePaintRect;
+ // skip the paintMatrix when transforming the rect by the CTM
+ fMatrix->mapPoints(rect_points(devRect), rect_points(paintRect), 2);
+ devRect.sort();
+
+ // look for the quick exit, before we build a blitter
+ SkRect bbox = devRect;
+ if (paint.getStyle() != SkPaint::kFill_Style) {
+ // extra space for hairlines
+ if (paint.getStrokeWidth() == 0) {
+ bbox.outset(1, 1);
+ } else {
+ // For kStroke_RectType, strokeSize is already computed.
+ const SkPoint& ssize = (kStroke_RectType == rtype)
+ ? strokeSize
+ : compute_stroke_size(paint, *fMatrix);
+ bbox.outset(SkScalarHalf(ssize.x()), SkScalarHalf(ssize.y()));
+ }
+ }
+ if (SkPathPriv::TooBigForMath(bbox)) {
+ return;
+ }
+
+ if (!SkRectPriv::FitsInFixed(bbox) && rtype != kHair_RectType) {
+ draw_rect_as_path(*this, prePaintRect, paint, matrix);
+ return;
+ }
+
+ SkIRect ir = bbox.roundOut();
+ if (fRC->quickReject(ir)) {
+ return;
+ }
+
+ SkAutoBlitterChoose blitterStorage(*this, matrix, paint);
+ const SkRasterClip& clip = *fRC;
+ SkBlitter* blitter = blitterStorage.get();
+
+ // we want to "fill" if we are kFill or kStrokeAndFill, since in the latter
+ // case we are also hairline (if we've gotten to here), which devolves to
+ // effectively just kFill
+ switch (rtype) {
+ case kFill_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFillRect(devRect, clip, blitter);
+ } else {
+ SkScan::FillRect(devRect, clip, blitter);
+ }
+ break;
+ case kStroke_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFrameRect(devRect, strokeSize, clip, blitter);
+ } else {
+ SkScan::FrameRect(devRect, strokeSize, clip, blitter);
+ }
+ break;
+ case kHair_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiHairRect(devRect, clip, blitter);
+ } else {
+ SkScan::HairRect(devRect, clip, blitter);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("bad rtype");
+ }
+}
+
+void SkDraw::drawDevMask(const SkMask& srcM, const SkPaint& paint) const {
+ if (srcM.fBounds.isEmpty()) {
+ return;
+ }
+
+ const SkMask* mask = &srcM;
+
+ SkMask dstM;
+ if (paint.getMaskFilter() &&
+ as_MFB(paint.getMaskFilter())->filterMask(&dstM, srcM, *fMatrix, nullptr)) {
+ mask = &dstM;
+ }
+ SkAutoMaskFreeImage ami(dstM.fImage);
+
+ SkAutoBlitterChoose blitterChooser(*this, nullptr, paint);
+ SkBlitter* blitter = blitterChooser.get();
+
+ SkAAClipBlitterWrapper wrapper;
+ const SkRegion* clipRgn;
+
+ if (fRC->isBW()) {
+ clipRgn = &fRC->bwRgn();
+ } else {
+ wrapper.init(*fRC, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter->blitMaskRegion(*mask, *clipRgn);
+}
+
+static SkScalar fast_len(const SkVector& vec) {
+ SkScalar x = SkScalarAbs(vec.fX);
+ SkScalar y = SkScalarAbs(vec.fY);
+ if (x < y) {
+ using std::swap;
+ swap(x, y);
+ }
+ return x + SkScalarHalf(y);
+}
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ SkASSERT(strokeWidth > 0);
+ // We need to try to fake a thick-stroke with a modulated hairline.
+
+ if (matrix.hasPerspective()) {
+ return false;
+ }
+
+ SkVector src[2], dst[2];
+ src[0].set(strokeWidth, 0);
+ src[1].set(0, strokeWidth);
+ matrix.mapVectors(dst, src, 2);
+ SkScalar len0 = fast_len(dst[0]);
+ SkScalar len1 = fast_len(dst[1]);
+ if (len0 <= SK_Scalar1 && len1 <= SK_Scalar1) {
+ if (coverage) {
+ *coverage = SkScalarAve(len0, len1);
+ }
+ return true;
+ }
+ return false;
+}
+
+void SkDraw::drawRRect(const SkRRect& rrect, const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate());
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ {
+ // TODO: Investigate optimizing these options. They are in the same
+ // order as SkDraw::drawPath, which handles each case. It may be
+ // that there is no way to optimize for these using the SkRRect path.
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(paint, *fMatrix, &coverage)) {
+ goto DRAW_PATH;
+ }
+
+ if (paint.getPathEffect() || paint.getStyle() != SkPaint::kFill_Style) {
+ goto DRAW_PATH;
+ }
+ }
+
+ if (paint.getMaskFilter()) {
+ // Transform the rrect into device space.
+ SkRRect devRRect;
+ if (rrect.transform(*fMatrix, &devRRect)) {
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+ if (as_MFB(paint.getMaskFilter())->filterRRect(devRRect, *fMatrix,
+ *fRC, blitter.get())) {
+ return; // filterRRect() called the blitter, so we're done
+ }
+ }
+ }
+
+DRAW_PATH:
+ // Now fall back to the default case of using a path.
+ SkPath path;
+ path.addRRect(rrect);
+ this->drawPath(path, paint, nullptr, true);
+}
+
+SkScalar SkDraw::ComputeResScaleForStroking(const SkMatrix& matrix) {
+ // Not sure how to handle perspective differently, so we just don't try (yet)
+ SkScalar sx = SkPoint::Length(matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewY]);
+ SkScalar sy = SkPoint::Length(matrix[SkMatrix::kMSkewX], matrix[SkMatrix::kMScaleY]);
+ if (SkScalarsAreFinite(sx, sy)) {
+ SkScalar scale = SkTMax(sx, sy);
+ if (scale > 0) {
+ static const SkScalar kMaxStrokeScale = 1e5f;
+ return SkTMin(scale, kMaxStrokeScale);
+ }
+ }
+ return 1;
+}
+
+void SkDraw::drawDevPath(const SkPath& devPath, const SkPaint& paint, bool drawCoverage,
+ SkBlitter* customBlitter, bool doFill) const {
+ if (SkPathPriv::TooBigForMath(devPath)) {
+ return;
+ }
+ SkBlitter* blitter = nullptr;
+ SkAutoBlitterChoose blitterStorage;
+ if (nullptr == customBlitter) {
+ blitter = blitterStorage.choose(*this, nullptr, paint, drawCoverage);
+ } else {
+ blitter = customBlitter;
+ }
+
+ if (paint.getMaskFilter()) {
+ SkStrokeRec::InitStyle style = doFill ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ if (as_MFB(paint.getMaskFilter())->filterPath(devPath, *fMatrix, *fRC, blitter, style)) {
+ return; // filterPath() called the blitter, so we're done
+ }
+ }
+
+ void (*proc)(const SkPath&, const SkRasterClip&, SkBlitter*);
+ if (doFill) {
+ if (paint.isAntiAlias()) {
+ proc = SkScan::AntiFillPath;
+ } else {
+ proc = SkScan::FillPath;
+ }
+ } else { // hairline
+ if (paint.isAntiAlias()) {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::AntiHairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::AntiHairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::AntiHairRoundPath;
+ break;
+ default:
+ proc SK_INIT_TO_AVOID_WARNING;
+ SkDEBUGFAIL("unknown paint cap type");
+ }
+ } else {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::HairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::HairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::HairRoundPath;
+ break;
+ default:
+ proc SK_INIT_TO_AVOID_WARNING;
+ SkDEBUGFAIL("unknown paint cap type");
+ }
+ }
+ }
+
+ proc(devPath, *fRC, blitter);
+}
+
+void SkDraw::drawPath(const SkPath& origSrcPath, const SkPaint& origPaint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable,
+ bool drawCoverage, SkBlitter* customBlitter) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkPath* pathPtr = (SkPath*)&origSrcPath;
+ bool doFill = true;
+ SkPath tmpPathStorage;
+ SkPath* tmpPath = &tmpPathStorage;
+ SkMatrix tmpMatrix;
+ const SkMatrix* matrix = fMatrix;
+ tmpPath->setIsVolatile(true);
+
+ if (prePathMatrix) {
+ if (origPaint.getPathEffect() || origPaint.getStyle() != SkPaint::kFill_Style) {
+ SkPath* result = pathPtr;
+
+ if (!pathIsMutable) {
+ result = tmpPath;
+ pathIsMutable = true;
+ }
+ pathPtr->transform(*prePathMatrix, result);
+ pathPtr = result;
+ } else {
+ tmpMatrix.setConcat(*matrix, *prePathMatrix);
+ matrix = &tmpMatrix;
+ }
+ }
+ // at this point we're done with prePathMatrix
+ SkDEBUGCODE(prePathMatrix = (const SkMatrix*)0x50FF8001;)
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ {
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(origPaint, *matrix, &coverage)) {
+ if (SK_Scalar1 == coverage) {
+ paint.writable()->setStrokeWidth(0);
+ } else if (SkBlendMode_SupportsCoverageAsAlpha(origPaint.getBlendMode())) {
+ U8CPU newAlpha;
+#if 0
+ newAlpha = SkToU8(SkScalarRoundToInt(coverage *
+ origPaint.getAlpha()));
+#else
+ // this is the old technique, which we preserve for now so
+ // we don't change previous results (testing)
+ // the new way seems fine, its just (a tiny bit) different
+ int scale = (int)(coverage * 256);
+ newAlpha = origPaint.getAlpha() * scale >> 8;
+#endif
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setStrokeWidth(0);
+ writablePaint->setAlpha(newAlpha);
+ }
+ }
+ }
+
+ if (paint->getPathEffect() || paint->getStyle() != SkPaint::kFill_Style) {
+ SkRect cullRect;
+ const SkRect* cullRectPtr = nullptr;
+ if (this->computeConservativeLocalClipBounds(&cullRect)) {
+ cullRectPtr = &cullRect;
+ }
+ doFill = paint->getFillPath(*pathPtr, tmpPath, cullRectPtr,
+ ComputeResScaleForStroking(*fMatrix));
+ pathPtr = tmpPath;
+ }
+
+ // avoid possibly allocating a new path in transform if we can
+ SkPath* devPathPtr = pathIsMutable ? pathPtr : tmpPath;
+
+ // transform the path into device space
+ pathPtr->transform(*matrix, devPathPtr);
+
+ this->drawDevPath(*devPathPtr, *paint, drawCoverage, customBlitter, doFill);
+}
+
+void SkDraw::drawBitmapAsMask(const SkBitmap& bitmap, const SkPaint& paint) const {
+ SkASSERT(bitmap.colorType() == kAlpha_8_SkColorType);
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ if (SkTreatAsSprite(*fMatrix, bitmap.dimensions(), paint)) {
+ int ix = SkScalarRoundToInt(fMatrix->getTranslateX());
+ int iy = SkScalarRoundToInt(fMatrix->getTranslateY());
+
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+ SkMask mask;
+ mask.fBounds.setXYWH(ix, iy, pmap.width(), pmap.height());
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkToU32(pmap.rowBytes());
+ // fImage is typed as writable, but in this case it is used read-only
+ mask.fImage = (uint8_t*)pmap.addr8(0, 0);
+
+ this->drawDevMask(mask, paint);
+ } else { // need to xform the bitmap first
+ SkRect r;
+ SkMask mask;
+
+ r.setIWH(bitmap.width(), bitmap.height());
+ fMatrix->mapRect(&r);
+ r.round(&mask.fBounds);
+
+ // set the mask's bounds to the transformed bitmap-bounds,
+ // clipped to the actual device and further limited by the clip bounds
+ {
+ SkASSERT(fDst.bounds().contains(fRC->getBounds()));
+ SkIRect devBounds = fDst.bounds();
+ devBounds.intersect(fRC->getBounds().makeOutset(1, 1));
+ // need intersect(l, t, r, b) on irect
+ if (!mask.fBounds.intersect(devBounds)) {
+ return;
+ }
+ }
+
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkAlign4(mask.fBounds.width());
+ size_t size = mask.computeImageSize();
+ if (0 == size) {
+ // the mask is too big to allocated, draw nothing
+ return;
+ }
+
+ // allocate (and clear) our temp buffer to hold the transformed bitmap
+ SkAutoTMalloc<uint8_t> storage(size);
+ mask.fImage = storage.get();
+ memset(mask.fImage, 0, size);
+
+ // now draw our bitmap(src) into mask(dst), transformed by the matrix
+ {
+ SkBitmap device;
+ device.installPixels(SkImageInfo::MakeA8(mask.fBounds.width(), mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+
+ SkCanvas c(device);
+ // need the unclipped top/left for the translate
+ c.translate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+ c.concat(*fMatrix);
+
+ // We can't call drawBitmap, or we'll infinitely recurse. Instead
+ // we manually build a shader and draw that into our new mask
+ SkPaint tmpPaint;
+ tmpPaint.setAntiAlias(paint.isAntiAlias());
+ tmpPaint.setDither(paint.isDither());
+ tmpPaint.setFilterQuality(paint.getFilterQuality());
+ SkPaint paintWithShader = make_paint_with_image(tmpPaint, bitmap);
+ SkRect rr;
+ rr.setIWH(bitmap.width(), bitmap.height());
+ c.drawRect(rr, paintWithShader);
+ }
+ this->drawDevMask(mask, paint);
+ }
+}
+
+static bool clipped_out(const SkMatrix& m, const SkRasterClip& c,
+ const SkRect& srcR) {
+ SkRect dstR;
+ m.mapRect(&dstR, srcR);
+ return c.quickReject(dstR.roundOut());
+}
+
+static bool clipped_out(const SkMatrix& matrix, const SkRasterClip& clip,
+ int width, int height) {
+ SkRect r;
+ r.setIWH(width, height);
+ return clipped_out(matrix, clip, r);
+}
+
+static bool clipHandlesSprite(const SkRasterClip& clip, int x, int y, const SkPixmap& pmap) {
+ return clip.isBW() || clip.quickContains(x, y, x + pmap.width(), y + pmap.height());
+}
+
+void SkDraw::drawBitmap(const SkBitmap& bitmap, const SkMatrix& prematrix,
+ const SkRect* dstBounds, const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (origPaint.getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+
+ SkMatrix matrix;
+ matrix.setConcat(*fMatrix, prematrix);
+
+ if (clipped_out(matrix, *fRC, bitmap.width(), bitmap.height())) {
+ return;
+ }
+
+ if (bitmap.colorType() != kAlpha_8_SkColorType
+ && SkTreatAsSprite(matrix, bitmap.dimensions(), *paint)) {
+ //
+ // It is safe to call lock pixels now, since we know the matrix is
+ // (more or less) identity.
+ //
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+ int ix = SkScalarRoundToInt(matrix.getTranslateX());
+ int iy = SkScalarRoundToInt(matrix.getTranslateY());
+ if (clipHandlesSprite(*fRC, ix, iy, pmap)) {
+ SkSTArenaAlloc<kSkBlitterContextSize> allocator;
+ // blitter will be owned by the allocator.
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, *paint, pmap, ix, iy, &allocator);
+ if (blitter) {
+ SkScan::FillIRect(SkIRect::MakeXYWH(ix, iy, pmap.width(), pmap.height()),
+ *fRC, blitter);
+ return;
+ }
+ // if !blitter, then we fall-through to the slower case
+ }
+ }
+
+ // now make a temp draw on the stack, and use it
+ //
+ SkDraw draw(*this);
+ draw.fMatrix = &matrix;
+
+ if (bitmap.colorType() == kAlpha_8_SkColorType && !paint->getColorFilter()) {
+ draw.drawBitmapAsMask(bitmap, *paint);
+ } else {
+ SkPaint paintWithShader = make_paint_with_image(*paint, bitmap);
+ const SkRect srcBounds = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ if (dstBounds) {
+ this->drawRect(srcBounds, paintWithShader, &prematrix, dstBounds);
+ } else {
+ draw.drawRect(srcBounds, paintWithShader);
+ }
+ }
+}
+
+void SkDraw::drawSprite(const SkBitmap& bitmap, int x, int y, const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ const SkIRect bounds = SkIRect::MakeXYWH(x, y, bitmap.width(), bitmap.height());
+
+ if (fRC->quickReject(bounds)) {
+ return; // nothing to draw
+ }
+
+ SkPaint paint(origPaint);
+ paint.setStyle(SkPaint::kFill_Style);
+
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+
+ if (nullptr == paint.getColorFilter() && clipHandlesSprite(*fRC, x, y, pmap)) {
+ // blitter will be owned by the allocator.
+ SkSTArenaAlloc<kSkBlitterContextSize> allocator;
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, paint, pmap, x, y, &allocator);
+ if (blitter) {
+ SkScan::FillIRect(bounds, *fRC, blitter);
+ return;
+ }
+ }
+
+ SkMatrix matrix;
+ SkRect r;
+
+ // get a scalar version of our rect
+ r.set(bounds);
+
+ // create shader with offset
+ matrix.setTranslate(r.fLeft, r.fTop);
+ SkPaint paintWithShader = make_paint_with_image(paint, bitmap, &matrix);
+ SkDraw draw(*this);
+ matrix.reset();
+ draw.fMatrix = &matrix;
+ // call ourself with a rect
+ draw.drawRect(r, paintWithShader);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkDraw::validate() const {
+ SkASSERT(fMatrix != nullptr);
+ SkASSERT(fRC != nullptr);
+
+ const SkIRect& cr = fRC->getBounds();
+ SkIRect br;
+
+ br.setWH(fDst.width(), fDst.height());
+ SkASSERT(cr.isEmpty() || br.contains(cr));
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDraw.h"
+
+bool SkDraw::ComputeMaskBounds(const SkRect& devPathBounds, const SkIRect* clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkIRect* bounds) {
+ // init our bounds from the path
+ *bounds = devPathBounds.makeOutset(SK_ScalarHalf, SK_ScalarHalf).roundOut();
+
+ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, *filterMatrix, &margin)) {
+ return false;
+ }
+ }
+
+ // (possibly) trim the bounds to reflect the clip
+ // (plus whatever slop the filter needs)
+ if (clipBounds) {
+ // Ugh. Guard against gigantic margins from wacky filters. Without this
+ // check we can request arbitrary amounts of slop beyond our visible
+ // clip, and bring down the renderer (at least on finite RAM machines
+ // like handsets, etc.). Need to balance this invented value between
+ // quality of large filters like blurs, and the corresponding memory
+ // requests.
+ static const int MAX_MARGIN = 128;
+ if (!bounds->intersect(clipBounds->makeOutset(SkMin32(margin.fX, MAX_MARGIN),
+ SkMin32(margin.fY, MAX_MARGIN)))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void draw_into_mask(const SkMask& mask, const SkPath& devPath,
+ SkStrokeRec::InitStyle style) {
+ SkDraw draw;
+ if (!draw.fDst.reset(mask)) {
+ return;
+ }
+
+ SkRasterClip clip;
+ SkMatrix matrix;
+ SkPaint paint;
+
+ clip.setRect(SkIRect::MakeWH(mask.fBounds.width(), mask.fBounds.height()));
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ draw.fRC = &clip;
+ draw.fMatrix = &matrix;
+ paint.setAntiAlias(true);
+ switch (style) {
+ case SkStrokeRec::kHairline_InitStyle:
+ SkASSERT(!paint.getStrokeWidth());
+ paint.setStyle(SkPaint::kStroke_Style);
+ break;
+ case SkStrokeRec::kFill_InitStyle:
+ SkASSERT(paint.getStyle() == SkPaint::kFill_Style);
+ break;
+
+ }
+ draw.drawPath(devPath, paint);
+}
+
+bool SkDraw::DrawToMask(const SkPath& devPath, const SkIRect* clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style) {
+ if (devPath.isEmpty()) {
+ return false;
+ }
+
+ if (SkMask::kJustRenderImage_CreateMode != mode) {
+ if (!ComputeMaskBounds(devPath.getBounds(), clipBounds, filter,
+ filterMatrix, &mask->fBounds))
+ return false;
+ }
+
+ if (SkMask::kComputeBoundsAndRenderImage_CreateMode == mode) {
+ mask->fFormat = SkMask::kA8_Format;
+ mask->fRowBytes = mask->fBounds.width();
+ size_t size = mask->computeImageSize();
+ if (0 == size) {
+ // we're too big to allocate the mask, abort
+ return false;
+ }
+ mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
+ }
+
+ if (SkMask::kJustComputeBounds_CreateMode != mode) {
+ draw_into_mask(*mask, devPath, style);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkDraw.h b/gfx/skia/skia/src/core/SkDraw.h
new file mode 100644
index 0000000000..69149f30da
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw.h
@@ -0,0 +1,170 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDraw_DEFINED
+#define SkDraw_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkVertices.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkMask.h"
+
+class SkBitmap;
+class SkClipStack;
+class SkBaseDevice;
+class SkBlitter;
+class SkMatrix;
+class SkPath;
+class SkRegion;
+class SkRasterClip;
+struct SkRect;
+class SkRRect;
+
+class SkDraw : public SkGlyphRunListPainter::BitmapDevicePainter {
+public:
+ SkDraw();
+
+ void drawPaint(const SkPaint&) const;
+ void drawPoints(SkCanvas::PointMode, size_t count, const SkPoint[],
+ const SkPaint&, SkBaseDevice*) const;
+ void drawRect(const SkRect& prePaintRect, const SkPaint&, const SkMatrix* paintMatrix,
+ const SkRect* postPaintRect) const;
+ void drawRect(const SkRect& rect, const SkPaint& paint) const {
+ this->drawRect(rect, paint, nullptr, nullptr);
+ }
+ void drawRRect(const SkRRect&, const SkPaint&) const;
+ /**
+ * To save on mallocs, we allow a flag that tells us that srcPath is
+ * mutable, so that we don't have to make copies of it as we transform it.
+ *
+ * If prePathMatrix is not null, it should logically be applied before any
+ * stroking or other effects. If there are no effects on the paint that
+ * affect the geometry/rasterization, then the pre matrix can just be
+ * pre-concated with the current matrix.
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint,
+ const SkMatrix* prePathMatrix = nullptr, bool pathIsMutable = false) const {
+ this->drawPath(path, paint, prePathMatrix, pathIsMutable, false);
+ }
+
+ /* If dstOrNull is null, computes a dst by mapping the bitmap's bounds through the matrix. */
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkPaint&) const;
+ void drawSprite(const SkBitmap&, int x, int y, const SkPaint&) const;
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList,
+ SkGlyphRunListPainter* glyphPainter) const;
+ void drawVertices(SkVertices::VertexMode mode, int vertexCount,
+ const SkPoint vertices[], const SkPoint textures[],
+ const SkColor colors[], const SkVertices::BoneIndices boneIndices[],
+ const SkVertices::BoneWeights boneWeights[], SkBlendMode bmode,
+ const uint16_t indices[], int ptCount,
+ const SkPaint& paint, const SkVertices::Bone bones[], int boneCount) const;
+ void drawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int count,
+ SkBlendMode, const SkPaint&);
+
+ /**
+ * Overwrite the target with the path's coverage (i.e. its mask).
+ * Will overwrite the entire device, so it need not be zero'd first.
+ *
+ * Only device A8 is supported right now.
+ */
+ void drawPathCoverage(const SkPath& src, const SkPaint& paint,
+ SkBlitter* customBlitter = nullptr) const {
+ bool isHairline = paint.getStyle() == SkPaint::kStroke_Style &&
+ paint.getStrokeWidth() > 0;
+ this->drawPath(src, paint, nullptr, false, !isHairline, customBlitter);
+ }
+
+ void paintPaths(SkDrawableGlyphBuffer* drawables,
+ SkScalar scale,
+ const SkPaint& paint) const override;
+
+ void paintMasks(SkDrawableGlyphBuffer* drawables, const SkPaint& paint) const override;
+
+ static bool ComputeMaskBounds(const SkRect& devPathBounds, const SkIRect* clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkIRect* bounds);
+
+ /** Helper function that creates a mask from a path and an optional maskfilter.
+ Note however, that the resulting mask will not have been actually filtered,
+ that must be done afterwards (by calling filterMask). The maskfilter is provided
+ solely to assist in computing the mask's bounds (if the mode requests that).
+ */
+ static bool DrawToMask(const SkPath& devPath, const SkIRect* clipBounds,
+ const SkMaskFilter*, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style);
+
+ void drawDevMask(const SkMask& mask, const SkPaint&) const;
+
+ enum RectType {
+ kHair_RectType,
+ kFill_RectType,
+ kStroke_RectType,
+ kPath_RectType
+ };
+
+ /**
+ * Based on the paint's style, strokeWidth, and the matrix, classify how
+ * to draw the rect. If no special-case is available, returns
+ * kPath_RectType.
+ *
+ * Iff RectType == kStroke_RectType, then strokeSize is set to the device
+ * width and height of the stroke.
+ */
+ static RectType ComputeRectType(const SkPaint&, const SkMatrix&,
+ SkPoint* strokeSize);
+
+ static SkScalar ComputeResScaleForStroking(const SkMatrix& matrix);
+private:
+ void drawBitmapAsMask(const SkBitmap&, const SkPaint&) const;
+
+ void drawPath(const SkPath&,
+ const SkPaint&,
+ const SkMatrix* preMatrix,
+ bool pathIsMutable,
+ bool drawCoverage,
+ SkBlitter* customBlitter = nullptr) const;
+
+ void drawLine(const SkPoint[2], const SkPaint&) const;
+
+ void drawDevPath(const SkPath& devPath,
+ const SkPaint& paint,
+ bool drawCoverage,
+ SkBlitter* customBlitter,
+ bool doFill) const;
+ /**
+ * Return the current clip bounds, in local coordinates, with slop to account
+ * for antialiasing or hairlines (i.e. device-bounds outset by 1, and then
+ * run through the inverse of the matrix).
+ *
+ * If the matrix cannot be inverted, or the current clip is empty, return
+ * false and ignore bounds parameter.
+ */
+ bool SK_WARN_UNUSED_RESULT computeConservativeLocalClipBounds(SkRect* bounds) const;
+
+public:
+ SkPixmap fDst;
+ const SkMatrix* fMatrix{nullptr}; // required
+ const SkRasterClip* fRC{nullptr}; // required
+
+ // optional, will be same dimensions as fDst if present
+ const SkPixmap* fCoverage{nullptr};
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawLooper.cpp b/gfx/skia/skia/src/core/SkDrawLooper.cpp
new file mode 100644
index 0000000000..9de4d43673
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawLooper.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDrawLooper.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkArenaAlloc.h"
+
+void SkDrawLooper::Context::Info::applyToCTM(SkMatrix* ctm) const {
+ if (fApplyPostCTM) {
+ ctm->postTranslate(fTranslate.fX, fTranslate.fY);
+ } else {
+ ctm->preTranslate(fTranslate.fX, fTranslate.fY);
+ }
+}
+
+void SkDrawLooper::Context::Info::applyToCanvas(SkCanvas* canvas) const {
+ if (fApplyPostCTM) {
+ SkMatrix ctm = canvas->getTotalMatrix();
+ ctm.postTranslate(fTranslate.fX, fTranslate.fY);
+ canvas->setMatrix(ctm);
+ } else {
+ canvas->translate(fTranslate.fX, fTranslate.fY);
+ }
+}
+
+bool SkDrawLooper::canComputeFastBounds(const SkPaint& paint) const {
+ SkSTArenaAlloc<48> alloc;
+
+ SkDrawLooper::Context* context = this->makeContext(&alloc);
+ for (;;) {
+ SkPaint p(paint);
+ SkDrawLooper::Context::Info info;
+ if (context->next(&info, &p)) {
+ if (!p.canComputeFastBounds()) {
+ return false;
+ }
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+void SkDrawLooper::computeFastBounds(const SkPaint& paint, const SkRect& s,
+ SkRect* dst) const {
+ // src and dst rects may alias and we need to keep the original src, so copy it.
+ const SkRect src = s;
+
+ SkSTArenaAlloc<48> alloc;
+
+ *dst = src; // catch case where there are no loops
+ SkDrawLooper::Context* context = this->makeContext(&alloc);
+
+ for (bool firstTime = true;; firstTime = false) {
+ SkPaint p(paint);
+ SkDrawLooper::Context::Info info;
+ if (context->next(&info, &p)) {
+ SkRect r(src);
+
+ p.computeFastBounds(r, &r);
+ r.offset(info.fTranslate.fX, info.fTranslate.fY);
+
+ if (firstTime) {
+ *dst = r;
+ } else {
+ dst->join(r);
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+bool SkDrawLooper::asABlurShadow(BlurShadowRec*) const {
+ return false;
+}
+
+void SkDrawLooper::apply(SkCanvas* canvas, const SkPaint& paint,
+ std::function<void(SkCanvas*, const SkPaint&)> proc) {
+ SkSTArenaAlloc<256> alloc;
+ Context* ctx = this->makeContext(&alloc);
+ if (ctx) {
+ Context::Info info;
+ for (;;) {
+ SkPaint p = paint;
+ if (!ctx->next(&info, &p)) {
+ break;
+ }
+ canvas->save();
+ if (info.fApplyPostCTM) {
+ SkMatrix ctm = canvas->getTotalMatrix();
+ ctm.postTranslate(info.fTranslate.fX, info.fTranslate.fY);
+ canvas->setMatrix(ctm);
+ } else {
+ canvas->translate(info.fTranslate.fX, info.fTranslate.fY);
+ }
+ proc(canvas, p);
+ canvas->restore();
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDrawProcs.h b/gfx/skia/skia/src/core/SkDrawProcs.h
new file mode 100644
index 0000000000..c07e181e9c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawProcs.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawProcs_DEFINED
+#define SkDrawProcs_DEFINED
+
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyph.h"
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix&,
+ SkScalar* coverage);
+
+/**
+ * If the current paint is set to stroke and the stroke-width when applied to
+ * the matrix is <= 1.0, then this returns true, and sets coverage (simulating
+ * a stroke by drawing a hairline with partial coverage). If any of these
+ * conditions are false, then this returns false and coverage is ignored.
+ */
+inline bool SkDrawTreatAsHairline(const SkPaint& paint, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ if (SkPaint::kStroke_Style != paint.getStyle()) {
+ return false;
+ }
+
+ SkScalar strokeWidth = paint.getStrokeWidth();
+ if (0 == strokeWidth) {
+ *coverage = SK_Scalar1;
+ return true;
+ }
+
+ if (!paint.isAntiAlias()) {
+ return false;
+ }
+
+ return SkDrawTreatAAStrokeAsHairline(strokeWidth, matrix, coverage);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp b/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp
new file mode 100644
index 0000000000..8912046f26
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/utils/SkPolyUtils.h"
+
+namespace SkDrawShadowMetrics {
+
+static SkScalar compute_z(SkScalar x, SkScalar y, const SkPoint3& params) {
+ return x*params.fX + y*params.fY + params.fZ;
+}
+
+bool GetSpotShadowTransform(const SkPoint3& lightPos, SkScalar lightRadius,
+ const SkMatrix& ctm, const SkPoint3& zPlaneParams,
+ const SkRect& pathBounds, SkMatrix* shadowTransform, SkScalar* radius) {
+ auto heightFunc = [zPlaneParams] (SkScalar x, SkScalar y) {
+ return zPlaneParams.fX*x + zPlaneParams.fY*y + zPlaneParams.fZ;
+ };
+ SkScalar occluderHeight = heightFunc(pathBounds.centerX(), pathBounds.centerY());
+
+ if (!ctm.hasPerspective()) {
+ SkScalar scale;
+ SkVector translate;
+ SkDrawShadowMetrics::GetSpotParams(occluderHeight, lightPos.fX, lightPos.fY, lightPos.fZ,
+ lightRadius, radius, &scale, &translate);
+ shadowTransform->setScaleTranslate(scale, scale, translate.fX, translate.fY);
+ shadowTransform->preConcat(ctm);
+ } else {
+ if (SkScalarNearlyZero(pathBounds.width()) || SkScalarNearlyZero(pathBounds.height())) {
+ return false;
+ }
+
+ // get rotated quad in 3D
+ SkPoint pts[4];
+ ctm.mapRectToQuad(pts, pathBounds);
+ // No shadows for bowties or other degenerate cases
+ if (!SkIsConvexPolygon(pts, 4)) {
+ return false;
+ }
+ SkPoint3 pts3D[4];
+ SkScalar z = heightFunc(pathBounds.fLeft, pathBounds.fTop);
+ pts3D[0].set(pts[0].fX, pts[0].fY, z);
+ z = heightFunc(pathBounds.fRight, pathBounds.fTop);
+ pts3D[1].set(pts[1].fX, pts[1].fY, z);
+ z = heightFunc(pathBounds.fRight, pathBounds.fBottom);
+ pts3D[2].set(pts[2].fX, pts[2].fY, z);
+ z = heightFunc(pathBounds.fLeft, pathBounds.fBottom);
+ pts3D[3].set(pts[3].fX, pts[3].fY, z);
+
+ // project from light through corners to z=0 plane
+ for (int i = 0; i < 4; ++i) {
+ SkScalar dz = lightPos.fZ - pts3D[i].fZ;
+ // light shouldn't be below or at a corner's z-location
+ if (dz <= SK_ScalarNearlyZero) {
+ return false;
+ }
+ SkScalar zRatio = pts3D[i].fZ / dz;
+ pts3D[i].fX -= (lightPos.fX - pts3D[i].fX)*zRatio;
+ pts3D[i].fY -= (lightPos.fY - pts3D[i].fY)*zRatio;
+ pts3D[i].fZ = SK_Scalar1;
+ }
+
+ // Generate matrix that projects from [-1,1]x[-1,1] square to projected quad
+ SkPoint3 h0, h1, h2;
+ // Compute homogenous crossing point between top and bottom edges (gives new x-axis).
+ h0 = (pts3D[1].cross(pts3D[0])).cross(pts3D[2].cross(pts3D[3]));
+ // Compute homogenous crossing point between left and right edges (gives new y-axis).
+ h1 = (pts3D[0].cross(pts3D[3])).cross(pts3D[1].cross(pts3D[2]));
+ // Compute homogenous crossing point between diagonals (gives new origin).
+ h2 = (pts3D[0].cross(pts3D[2])).cross(pts3D[1].cross(pts3D[3]));
+ // If h2 is a vector (z=0 in 2D homogeneous space), that means that at least
+ // two of the quad corners are coincident and we don't have a realistic projection
+ if (SkScalarNearlyZero(h2.fZ)) {
+ return false;
+ }
+ // In some cases the crossing points are in the wrong direction
+ // to map (-1,-1) to pts3D[0], so we need to correct for that.
+ // Want h0 to be to the right of the left edge.
+ SkVector3 v = pts3D[3] - pts3D[0];
+ SkVector3 w = h0 - pts3D[0];
+ SkScalar perpDot = v.fX*w.fY - v.fY*w.fX;
+ if (perpDot > 0) {
+ h0 = -h0;
+ }
+ // Want h1 to be above the bottom edge.
+ v = pts3D[1] - pts3D[0];
+ perpDot = v.fX*w.fY - v.fY*w.fX;
+ if (perpDot < 0) {
+ h1 = -h1;
+ }
+ shadowTransform->setAll(h0.fX / h2.fZ, h1.fX / h2.fZ, h2.fX / h2.fZ,
+ h0.fY / h2.fZ, h1.fY / h2.fZ, h2.fY / h2.fZ,
+ h0.fZ / h2.fZ, h1.fZ / h2.fZ, 1);
+ // generate matrix that transforms from bounds to [-1,1]x[-1,1] square
+ SkMatrix toHomogeneous;
+ SkScalar xScale = 2/(pathBounds.fRight - pathBounds.fLeft);
+ SkScalar yScale = 2/(pathBounds.fBottom - pathBounds.fTop);
+ toHomogeneous.setAll(xScale, 0, -xScale*pathBounds.fLeft - 1,
+ 0, yScale, -yScale*pathBounds.fTop - 1,
+ 0, 0, 1);
+ shadowTransform->preConcat(toHomogeneous);
+
+ *radius = SkDrawShadowMetrics::SpotBlurRadius(occluderHeight, lightPos.fZ, lightRadius);
+ }
+
+ return true;
+}
+
+void GetLocalBounds(const SkPath& path, const SkDrawShadowRec& rec, const SkMatrix& ctm,
+ SkRect* bounds) {
+ SkRect ambientBounds = path.getBounds();
+ SkScalar occluderZ;
+ if (SkScalarNearlyZero(rec.fZPlaneParams.fX) && SkScalarNearlyZero(rec.fZPlaneParams.fY)) {
+ occluderZ = rec.fZPlaneParams.fZ;
+ } else {
+ occluderZ = compute_z(ambientBounds.fLeft, ambientBounds.fTop, rec.fZPlaneParams);
+ occluderZ = SkTMax(occluderZ, compute_z(ambientBounds.fRight, ambientBounds.fTop,
+ rec.fZPlaneParams));
+ occluderZ = SkTMax(occluderZ, compute_z(ambientBounds.fLeft, ambientBounds.fBottom,
+ rec.fZPlaneParams));
+ occluderZ = SkTMax(occluderZ, compute_z(ambientBounds.fRight, ambientBounds.fBottom,
+ rec.fZPlaneParams));
+ }
+ SkScalar ambientBlur;
+ SkScalar spotBlur;
+ SkScalar spotScale;
+ SkPoint spotOffset;
+ if (ctm.hasPerspective()) {
+ // transform ambient and spot bounds into device space
+ ctm.mapRect(&ambientBounds);
+
+ // get ambient blur (in device space)
+ ambientBlur = SkDrawShadowMetrics::AmbientBlurRadius(occluderZ);
+
+ // get spot params (in device space)
+ SkPoint devLightPos = SkPoint::Make(rec.fLightPos.fX, rec.fLightPos.fY);
+ ctm.mapPoints(&devLightPos, 1);
+ SkDrawShadowMetrics::GetSpotParams(occluderZ, devLightPos.fX, devLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+ } else {
+ SkScalar devToSrcScale = SkScalarInvert(ctm.getMinScale());
+
+ // get ambient blur (in local space)
+ SkScalar devSpaceAmbientBlur = SkDrawShadowMetrics::AmbientBlurRadius(occluderZ);
+ ambientBlur = devSpaceAmbientBlur*devToSrcScale;
+
+ // get spot params (in local space)
+ SkDrawShadowMetrics::GetSpotParams(occluderZ, rec.fLightPos.fX, rec.fLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+
+ // convert spot blur to local space
+ spotBlur *= devToSrcScale;
+ }
+
+ // in both cases, adjust ambient and spot bounds
+ SkRect spotBounds = ambientBounds;
+ ambientBounds.outset(ambientBlur, ambientBlur);
+ spotBounds.fLeft *= spotScale;
+ spotBounds.fTop *= spotScale;
+ spotBounds.fRight *= spotScale;
+ spotBounds.fBottom *= spotScale;
+ spotBounds.offset(spotOffset.fX, spotOffset.fY);
+ spotBounds.outset(spotBlur, spotBlur);
+
+ // merge bounds
+ *bounds = ambientBounds;
+ bounds->join(spotBounds);
+ // outset a bit to account for floating point error
+ bounds->outset(1, 1);
+
+ // if perspective, transform back to src space
+ if (ctm.hasPerspective()) {
+ // TODO: create tighter mapping from dev rect back to src rect
+ SkMatrix inverse;
+ if (ctm.invert(&inverse)) {
+ inverse.mapRect(bounds);
+ }
+ }
+}
+
+
+}
+
diff --git a/gfx/skia/skia/src/core/SkDrawShadowInfo.h b/gfx/skia/skia/src/core/SkDrawShadowInfo.h
new file mode 100644
index 0000000000..c89c8f2046
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawShadowInfo.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawShadowInfo_DEFINED
+#define SkDrawShadowInfo_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+
+class SkMatrix;
+class SkPath;
+struct SkRect;
+
+struct SkDrawShadowRec {
+ SkPoint3 fZPlaneParams;
+ SkPoint3 fLightPos;
+ SkScalar fLightRadius;
+ SkColor fAmbientColor;
+ SkColor fSpotColor;
+ uint32_t fFlags;
+};
+
+namespace SkDrawShadowMetrics {
+
+static constexpr auto kAmbientHeightFactor = 1.0f / 128.0f;
+static constexpr auto kAmbientGeomFactor = 64.0f;
+// Assuming that we have a light height of 600 for the spot shadow,
+// the spot values will reach their maximum at a height of approximately 292.3077.
+// We'll round up to 300 to keep it simple.
+static constexpr auto kMaxAmbientRadius = 300*kAmbientHeightFactor*kAmbientGeomFactor;
+
+static inline float divide_and_pin(float numer, float denom, float min, float max) {
+ float result = SkTPin(sk_ieee_float_divide(numer, denom), min, max);
+ // ensure that SkTPin handled non-finites correctly
+ SkASSERT(result >= min && result <= max);
+ return result;
+}
+
+inline SkScalar AmbientBlurRadius(SkScalar height) {
+ return SkTMin(height*kAmbientHeightFactor*kAmbientGeomFactor, kMaxAmbientRadius);
+}
+
+inline SkScalar AmbientRecipAlpha(SkScalar height) {
+ return 1.0f + SkTMax(height*kAmbientHeightFactor, 0.0f);
+}
+
+inline SkScalar SpotBlurRadius(SkScalar occluderZ, SkScalar lightZ, SkScalar lightRadius) {
+ return lightRadius*divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+}
+
+inline void GetSpotParams(SkScalar occluderZ, SkScalar lightX, SkScalar lightY, SkScalar lightZ,
+ SkScalar lightRadius,
+ SkScalar* blurRadius, SkScalar* scale, SkVector* translate) {
+ SkScalar zRatio = divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+ *blurRadius = lightRadius*zRatio;
+ *scale = divide_and_pin(lightZ, lightZ - occluderZ, 1.0f, 1.95f);
+ *translate = SkVector::Make(-zRatio * lightX, -zRatio * lightY);
+}
+
+// Create the transformation to apply to a path to get its base shadow outline, given the light
+// parameters and the path's 3D transformation (given by ctm and zPlaneParams).
+// Also computes the blur radius to apply the transformed outline.
+bool GetSpotShadowTransform(const SkPoint3& lightPos, SkScalar lightRadius,
+ const SkMatrix& ctm, const SkPoint3& zPlaneParams,
+ const SkRect& pathBounds, SkMatrix* shadowTransform, SkScalar* radius);
+
+// get bounds prior to the ctm being applied
+void GetLocalBounds(const SkPath&, const SkDrawShadowRec&, const SkMatrix& ctm, SkRect* bounds);
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDraw_atlas.cpp b/gfx/skia/skia/src/core/SkDraw_atlas.cpp
new file mode 100644
index 0000000000..b3d86061be
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_atlas.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkRSXform.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkScan.h"
+#include "src/shaders/SkShaderBase.h"
+
+#include "include/core/SkMatrix.h"
+#include "src/core/SkScan.h"
+
+static void fill_rect(const SkMatrix& ctm, const SkRasterClip& rc,
+ const SkRect& r, SkBlitter* blitter, SkPath* scratchPath) {
+ if (ctm.rectStaysRect()) {
+ SkRect dr;
+ ctm.mapRect(&dr, r);
+ SkScan::FillRect(dr, rc, blitter);
+ } else {
+ SkPoint pts[4];
+ r.toQuad(pts);
+ ctm.mapPoints(pts, pts, 4);
+
+ scratchPath->rewind();
+ scratchPath->addPoly(pts, 4, true);
+ SkScan::FillPath(*scratchPath, rc, blitter);
+ }
+}
+
+static void load_color(SkRasterPipeline_UniformColorCtx* ctx, const float rgba[]) {
+ // only need one of these. can I query the pipeline to know if its lowp or highp?
+ ctx->rgba[0] = SkScalarRoundToInt(rgba[0]*255); ctx->r = rgba[0];
+ ctx->rgba[1] = SkScalarRoundToInt(rgba[1]*255); ctx->g = rgba[1];
+ ctx->rgba[2] = SkScalarRoundToInt(rgba[2]*255); ctx->b = rgba[2];
+ ctx->rgba[3] = SkScalarRoundToInt(rgba[3]*255); ctx->a = rgba[3];
+}
+
+void SkDraw::drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect textures[],
+ const SkColor colors[], int count, SkBlendMode bmode, const SkPaint& paint) {
+ sk_sp<SkShader> atlasShader = atlas->makeShader();
+ if (!atlasShader) {
+ return;
+ }
+
+ SkPaint p(paint);
+ p.setAntiAlias(false); // we never respect this for drawAtlas(or drawVertices)
+ p.setStyle(SkPaint::kFill_Style);
+ p.setShader(nullptr);
+ p.setMaskFilter(nullptr);
+
+ SkSTArenaAlloc<256> alloc;
+ SkRasterPipeline pipeline(&alloc);
+ SkStageRec rec = {
+ &pipeline, &alloc, fDst.colorType(), fDst.colorSpace(), p, nullptr, *fMatrix
+ };
+
+ SkStageUpdater* updator = as_SB(atlasShader.get())->appendUpdatableStages(rec);
+ if (!updator) {
+ SkDraw draw(*this);
+
+ p.setShader(atlasShader);
+ for (int i = 0; i < count; ++i) {
+ if (colors) {
+ p.setShader(SkShaders::Blend(bmode, SkShaders::Color(colors[i]), atlasShader));
+ }
+ SkMatrix mx;
+ mx.setRSXform(xform[i]);
+ mx.preTranslate(-textures[i].fLeft, -textures[i].fTop);
+ mx.postConcat(*fMatrix);
+ draw.fMatrix = &mx;
+ draw.drawRect(textures[i], p);
+ }
+ return;
+ }
+
+ SkRasterPipeline_UniformColorCtx* uniformCtx = nullptr;
+ SkColorSpaceXformSteps steps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType);
+
+ if (colors) {
+ // we will late-bind the values in ctx, once for each color in the loop
+ uniformCtx = alloc.make<SkRasterPipeline_UniformColorCtx>();
+ rec.fPipeline->append(SkRasterPipeline::uniform_color_dst, uniformCtx);
+ SkBlendMode_AppendStages(bmode, rec.fPipeline);
+ }
+
+ bool isOpaque = !colors && atlasShader->isOpaque();
+ if (p.getAlphaf() != 1) {
+ rec.fPipeline->append(SkRasterPipeline::scale_1_float, alloc.make<float>(p.getAlphaf()));
+ isOpaque = false;
+ }
+
+ auto blitter = SkCreateRasterPipelineBlitter(fDst, p, pipeline, isOpaque, &alloc);
+ SkPath scratchPath;
+
+ for (int i = 0; i < count; ++i) {
+ if (colors) {
+ SkColor4f c4 = SkColor4f::FromColor(colors[i]);
+ steps.apply(c4.vec());
+ load_color(uniformCtx, c4.premul().vec());
+ }
+
+ SkMatrix mx;
+ mx.setRSXform(xform[i]);
+ mx.preTranslate(-textures[i].fLeft, -textures[i].fTop);
+ mx.postConcat(*fMatrix);
+
+ updator->update(mx, nullptr);
+ fill_rect(mx, *fRC, textures[i], blitter, &scratchPath);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDraw_text.cpp b/gfx/skia/skia/src/core/SkDraw_text.cpp
new file mode 100644
index 0000000000..a3540818e4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_text.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkUtils.h"
+#include <climits>
+
+// disable warning : local variable used without having been initialized
+#if defined _WIN32
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool check_glyph_position(SkPoint position) {
+ // Prevent glyphs from being drawn outside of or straddling the edge of device space.
+ // Comparisons written a little weirdly so that NaN coordinates are treated safely.
+ auto gt = [](float a, int b) { return !(a <= (float)b); };
+ auto lt = [](float a, int b) { return !(a >= (float)b); };
+ return !(gt(position.fX, INT_MAX - (INT16_MAX + SkTo<int>(UINT16_MAX))) ||
+ lt(position.fX, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/)) ||
+ gt(position.fY, INT_MAX - (INT16_MAX + SkTo<int>(UINT16_MAX))) ||
+ lt(position.fY, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/)));
+}
+
+void SkDraw::paintMasks(SkDrawableGlyphBuffer* drawables, const SkPaint& paint) const {
+
+ // The size used for a typical blitter.
+ SkSTArenaAlloc<3308> alloc;
+ SkBlitter* blitter = SkBlitter::Choose(fDst, *fMatrix, paint, &alloc, false);
+ if (fCoverage) {
+ blitter = alloc.make<SkPairBlitter>(
+ blitter,
+ SkBlitter::Choose(*fCoverage, *fMatrix, SkPaint(), &alloc, true));
+ }
+
+ SkAAClipBlitterWrapper wrapper{*fRC, blitter};
+ blitter = wrapper.getBlitter();
+
+ bool useRegion = fRC->isBW() && !fRC->isRect();
+
+ if (useRegion) {
+ for (auto t : drawables->drawable()) {
+ SkGlyphVariant glyph; SkPoint pos;
+ std::tie(glyph, pos) = t;
+ if (check_glyph_position(pos)) {
+ SkMask mask = glyph.glyph()->mask(pos);
+
+ SkRegion::Cliperator clipper(fRC->bwRgn(), mask.fBounds);
+
+ if (!clipper.done()) {
+ if (SkMask::kARGB32_Format == mask.fFormat) {
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(mask.fBounds.size()),
+ mask.fImage,
+ mask.fRowBytes);
+ this->drawSprite(bm, mask.fBounds.x(), mask.fBounds.y(), paint);
+ } else {
+ const SkIRect& cr = clipper.rect();
+ do {
+ blitter->blitMask(mask, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+ }
+ }
+ }
+ } else {
+ SkIRect clipBounds = fRC->isBW() ? fRC->bwRgn().getBounds()
+ : fRC->aaRgn().getBounds();
+ for (auto t : drawables->drawable()) {
+ SkGlyphVariant glyph; SkPoint pos;
+ std::tie(glyph, pos) = t;
+ if (check_glyph_position(pos)) {
+ SkMask mask = glyph.glyph()->mask(pos);
+ SkIRect storage;
+ const SkIRect* bounds = &mask.fBounds;
+
+ // this extra test is worth it, assuming that most of the time it succeeds
+ // since we can avoid writing to storage
+ if (!clipBounds.containsNoEmptyCheck(mask.fBounds)) {
+ if (!storage.intersect(mask.fBounds, clipBounds)) {
+ continue;
+ }
+ bounds = &storage;
+ }
+
+ if (SkMask::kARGB32_Format == mask.fFormat) {
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(mask.fBounds.size()),
+ mask.fImage,
+ mask.fRowBytes);
+ this->drawSprite(bm, mask.fBounds.x(), mask.fBounds.y(), paint);
+ } else {
+ blitter->blitMask(mask, *bounds);
+ }
+ }
+ }
+ }
+}
+
+void SkDraw::paintPaths(SkDrawableGlyphBuffer* drawables,
+ SkScalar scale,
+ const SkPaint& paint) const {
+ for (auto t : drawables->drawable()) {
+ SkGlyphVariant path; SkPoint pos;
+ std::tie(path, pos) = t;
+ SkMatrix m;
+ m.setScaleTranslate(scale, scale, pos.x(), pos.y());
+ this->drawPath(*path.path(), paint, &m, false);
+ }
+}
+
+void SkDraw::drawGlyphRunList(const SkGlyphRunList& glyphRunList,
+ SkGlyphRunListPainter* glyphPainter) const {
+
+ SkDEBUGCODE(this->validate();)
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ glyphPainter->drawForBitmapDevice(glyphRunList, *fMatrix, this);
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkDraw_vertices.cpp b/gfx/skia/skia/src/core/SkDraw_vertices.cpp
new file mode 100644
index 0000000000..bed4c2aa76
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_vertices.cpp
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkAutoBlitterChoose.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkVertState.h"
+#include "src/shaders/SkComposeShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+struct Matrix43 {
+ float fMat[12]; // column major
+
+ Sk4f map(float x, float y) const {
+ return Sk4f::Load(&fMat[0]) * x + Sk4f::Load(&fMat[4]) * y + Sk4f::Load(&fMat[8]);
+ }
+
+ void setConcat(const Matrix43& a, const SkMatrix& b) {
+ fMat[ 0] = a.dot(0, b.getScaleX(), b.getSkewY());
+ fMat[ 1] = a.dot(1, b.getScaleX(), b.getSkewY());
+ fMat[ 2] = a.dot(2, b.getScaleX(), b.getSkewY());
+ fMat[ 3] = a.dot(3, b.getScaleX(), b.getSkewY());
+
+ fMat[ 4] = a.dot(0, b.getSkewX(), b.getScaleY());
+ fMat[ 5] = a.dot(1, b.getSkewX(), b.getScaleY());
+ fMat[ 6] = a.dot(2, b.getSkewX(), b.getScaleY());
+ fMat[ 7] = a.dot(3, b.getSkewX(), b.getScaleY());
+
+ fMat[ 8] = a.dot(0, b.getTranslateX(), b.getTranslateY()) + a.fMat[ 8];
+ fMat[ 9] = a.dot(1, b.getTranslateX(), b.getTranslateY()) + a.fMat[ 9];
+ fMat[10] = a.dot(2, b.getTranslateX(), b.getTranslateY()) + a.fMat[10];
+ fMat[11] = a.dot(3, b.getTranslateX(), b.getTranslateY()) + a.fMat[11];
+ }
+
+private:
+ float dot(int index, float x, float y) const {
+ return fMat[index + 0] * x + fMat[index + 4] * y;
+ }
+};
+
+static SkScan::HairRCProc ChooseHairProc(bool doAntiAlias) {
+ return doAntiAlias ? SkScan::AntiHairLine : SkScan::HairLine;
+}
+
+static bool SK_WARN_UNUSED_RESULT
+texture_to_matrix(const VertState& state, const SkPoint verts[], const SkPoint texs[],
+ SkMatrix* matrix) {
+ SkPoint src[3], dst[3];
+
+ src[0] = texs[state.f0];
+ src[1] = texs[state.f1];
+ src[2] = texs[state.f2];
+ dst[0] = verts[state.f0];
+ dst[1] = verts[state.f1];
+ dst[2] = verts[state.f2];
+ return matrix->setPolyToPoly(src, dst, 3);
+}
+
+class SkTriColorShader : public SkShaderBase {
+public:
+ SkTriColorShader(bool isOpaque) : fIsOpaque(isOpaque) {}
+
+ bool update(const SkMatrix& ctmInv, const SkPoint pts[], const SkPMColor4f colors[],
+ int index0, int index1, int index2);
+
+protected:
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec& rec, SkArenaAlloc* alloc) const override {
+ return nullptr;
+ }
+#endif
+ bool onAppendStages(const SkStageRec& rec) const override {
+ rec.fPipeline->append(SkRasterPipeline::seed_shader);
+ rec.fPipeline->append(SkRasterPipeline::matrix_4x3, &fM43);
+ return true;
+ }
+
+private:
+ bool isOpaque() const override { return fIsOpaque; }
+ // For serialization. This will never be called.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+ Matrix43 fM43; // we overwrite this for each triangle
+ const bool fIsOpaque;
+
+ typedef SkShaderBase INHERITED;
+};
+
+bool SkTriColorShader::update(const SkMatrix& ctmInv, const SkPoint pts[],
+ const SkPMColor4f colors[], int index0, int index1, int index2) {
+ SkMatrix m, im;
+ m.reset();
+ m.set(0, pts[index1].fX - pts[index0].fX);
+ m.set(1, pts[index2].fX - pts[index0].fX);
+ m.set(2, pts[index0].fX);
+ m.set(3, pts[index1].fY - pts[index0].fY);
+ m.set(4, pts[index2].fY - pts[index0].fY);
+ m.set(5, pts[index0].fY);
+ if (!m.invert(&im)) {
+ return false;
+ }
+
+ SkMatrix dstToUnit;
+ dstToUnit.setConcat(im, ctmInv);
+
+ Sk4f c0 = Sk4f::Load(colors[index0].vec()),
+ c1 = Sk4f::Load(colors[index1].vec()),
+ c2 = Sk4f::Load(colors[index2].vec());
+
+ Matrix43 colorm;
+ (c1 - c0).store(&colorm.fMat[0]);
+ (c2 - c0).store(&colorm.fMat[4]);
+ c0.store(&colorm.fMat[8]);
+ fM43.setConcat(colorm, dstToUnit);
+ return true;
+}
+
+// Convert the SkColors into float colors. The conversion depends on some conditions:
+// - If the pixmap has a dst colorspace, we have to be "color-correct".
+// Do we map into dst-colorspace before or after we interpolate?
+// - We have to decide when to apply per-color alpha (before or after we interpolate)
+//
+// For now, we will take a simple approach, but recognize this is just a start:
+// - convert colors into dst colorspace before interpolation (matches gradients)
+// - apply per-color alpha before interpolation (matches old version of vertices)
+//
+static SkPMColor4f* convert_colors(const SkColor src[], int count, SkColorSpace* deviceCS,
+ SkArenaAlloc* alloc) {
+ SkPMColor4f* dst = alloc->makeArray<SkPMColor4f>(count);
+ SkImageInfo srcInfo = SkImageInfo::Make(count, 1, kBGRA_8888_SkColorType,
+ kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkImageInfo dstInfo = SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType,
+ kPremul_SkAlphaType, sk_ref_sp(deviceCS));
+ SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0);
+ return dst;
+}
+
+static bool compute_is_opaque(const SkColor colors[], int count) {
+ uint32_t c = ~0;
+ for (int i = 0; i < count; ++i) {
+ c &= colors[i];
+ }
+ return SkColorGetA(c) == 0xFF;
+}
+
+void SkDraw::drawVertices(SkVertices::VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint textures[],
+ const SkColor colors[], const SkVertices::BoneIndices boneIndices[],
+ const SkVertices::BoneWeights boneWeights[], SkBlendMode bmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint, const SkVertices::Bone bones[],
+ int boneCount) const {
+ SkASSERT(0 == vertexCount || vertices);
+
+ // abort early if there is nothing to draw
+ if (vertexCount < 3 || (indices && indexCount < 3) || fRC->isEmpty()) {
+ return;
+ }
+ SkMatrix ctmInv;
+ if (!fMatrix->invert(&ctmInv)) {
+ return;
+ }
+
+ // make textures and shader mutually consistent
+ SkShader* shader = paint.getShader();
+ if (!(shader && textures)) {
+ shader = nullptr;
+ textures = nullptr;
+ }
+
+ // We can simplify things for certain blendmodes. This is for speed, and SkComposeShader
+ // itself insists we don't pass kSrc or kDst to it.
+ //
+ if (colors && textures) {
+ switch (bmode) {
+ case SkBlendMode::kSrc:
+ colors = nullptr;
+ break;
+ case SkBlendMode::kDst:
+ textures = nullptr;
+ break;
+ default: break;
+ }
+ }
+
+ // we don't use the shader if there are no textures
+ if (!textures) {
+ shader = nullptr;
+ }
+
+ constexpr size_t kDefVertexCount = 16;
+ constexpr size_t kOuterSize = sizeof(SkTriColorShader) +
+ sizeof(SkShader_Blend) +
+ (2 * sizeof(SkPoint) + sizeof(SkColor4f)) * kDefVertexCount;
+ SkSTArenaAlloc<kOuterSize> outerAlloc;
+
+ // deform vertices using the skeleton if it is passed in
+ if (bones && boneCount) {
+ // allocate space for the deformed vertices
+ SkPoint* deformed = outerAlloc.makeArray<SkPoint>(vertexCount);
+
+ // deform the vertices
+ if (boneIndices && boneWeights) {
+ for (int i = 0; i < vertexCount; i ++) {
+ const SkVertices::BoneIndices& indices = boneIndices[i];
+ const SkVertices::BoneWeights& weights = boneWeights[i];
+
+ // apply the world transform
+ SkPoint worldPoint = bones[0].mapPoint(vertices[i]);
+
+ // apply bone deformations
+ deformed[i] = SkPoint::Make(0.0f, 0.0f);
+ for (uint32_t j = 0; j < 4; j ++) {
+ // get the attachment data
+ uint32_t index = indices[j];
+ float weight = weights[j];
+
+ // skip the bone if there is no weight
+ if (weight == 0.0f) {
+ continue;
+ }
+ SkASSERT(index != 0);
+
+ // deformed += M * v * w
+ deformed[i] += bones[index].mapPoint(worldPoint) * weight;
+ }
+ }
+ } else {
+ // no bones, so only apply world transform
+ SkMatrix worldTransform = SkMatrix::I();
+ worldTransform.setAffine(bones[0].values);
+ worldTransform.mapPoints(deformed, vertices, vertexCount);
+ }
+
+ // change the vertices to point to deformed
+ vertices = deformed;
+ }
+
+ SkPoint* devVerts = outerAlloc.makeArray<SkPoint>(vertexCount);
+ fMatrix->mapPoints(devVerts, vertices, vertexCount);
+
+ {
+ SkRect bounds;
+ // this also sets bounds to empty if we see a non-finite value
+ bounds.setBounds(devVerts, vertexCount);
+ if (bounds.isEmpty()) {
+ return;
+ }
+ }
+
+ VertState state(vertexCount, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(vmode);
+
+ if (!(colors || textures)) {
+ // no colors[] and no texture, stroke hairlines with paint's color.
+ SkPaint p;
+ p.setStyle(SkPaint::kStroke_Style);
+ SkAutoBlitterChoose blitter(*this, nullptr, p);
+ // Abort early if we failed to create a shader context.
+ if (blitter->isNullBlitter()) {
+ return;
+ }
+ SkScan::HairRCProc hairProc = ChooseHairProc(paint.isAntiAlias());
+ const SkRasterClip& clip = *fRC;
+ while (vertProc(&state)) {
+ SkPoint array[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2], devVerts[state.f0]
+ };
+ hairProc(array, 4, clip, blitter.get());
+ }
+ return;
+ }
+
+ SkTriColorShader* triShader = nullptr;
+ SkPMColor4f* dstColors = nullptr;
+
+ if (colors) {
+ dstColors = convert_colors(colors, vertexCount, fDst.colorSpace(), &outerAlloc);
+ triShader = outerAlloc.make<SkTriColorShader>(compute_is_opaque(colors, vertexCount));
+ if (shader) {
+ shader = outerAlloc.make<SkShader_Blend>(bmode,
+ sk_ref_sp(triShader), sk_ref_sp(shader),
+ nullptr);
+ } else {
+ shader = triShader;
+ }
+ }
+
+ SkPaint p(paint);
+ p.setShader(sk_ref_sp(shader));
+
+ if (!textures) { // only tricolor shader
+ auto blitter = SkCreateRasterPipelineBlitter(fDst, p, *fMatrix, &outerAlloc);
+ while (vertProc(&state)) {
+ if (!triShader->update(ctmInv, vertices, dstColors, state.f0, state.f1, state.f2)) {
+ continue;
+ }
+
+ SkPoint tmp[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2]
+ };
+ SkScan::FillTriangle(tmp, *fRC, blitter);
+ }
+ return;
+ }
+
+ SkRasterPipeline pipeline(&outerAlloc);
+ SkStageRec rec = {
+ &pipeline, &outerAlloc, fDst.colorType(), fDst.colorSpace(), p, nullptr, *fMatrix
+ };
+ if (auto updater = as_SB(shader)->appendUpdatableStages(rec)) {
+ bool isOpaque = shader->isOpaque();
+ if (triShader) {
+ isOpaque = false; // unless we want to walk all the colors, and see if they are
+ // all opaque (and the blendmode will keep them that way
+ }
+
+ auto blitter = SkCreateRasterPipelineBlitter(fDst, p, pipeline, isOpaque, &outerAlloc);
+ while (vertProc(&state)) {
+ if (triShader && !triShader->update(ctmInv, vertices, dstColors,
+ state.f0, state.f1, state.f2)) {
+ continue;
+ }
+
+ SkMatrix localM;
+ if (!texture_to_matrix(state, vertices, textures, &localM) ||
+ !updater->update(*fMatrix, &localM)) {
+ continue;
+ }
+
+ SkPoint tmp[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2]
+ };
+ SkScan::FillTriangle(tmp, *fRC, blitter);
+ }
+ } else {
+ // must rebuild pipeline for each triangle, to pass in the computed ctm
+ while (vertProc(&state)) {
+ if (triShader && !triShader->update(ctmInv, vertices, dstColors,
+ state.f0, state.f1, state.f2)) {
+ continue;
+ }
+
+ SkSTArenaAlloc<2048> innerAlloc;
+
+ const SkMatrix* ctm = fMatrix;
+ SkMatrix tmpCtm;
+ if (textures) {
+ SkMatrix localM;
+ if (!texture_to_matrix(state, vertices, textures, &localM)) {
+ continue;
+ }
+ tmpCtm = SkMatrix::Concat(*fMatrix, localM);
+ ctm = &tmpCtm;
+ }
+
+ SkPoint tmp[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2]
+ };
+ auto blitter = SkCreateRasterPipelineBlitter(fDst, p, *ctm, &innerAlloc);
+ SkScan::FillTriangle(tmp, *fRC, blitter);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDrawable.cpp b/gfx/skia/skia/src/core/SkDrawable.cpp
new file mode 100644
index 0000000000..62b0272972
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawable.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDrawable.h"
+#include <atomic>
+
+static int32_t next_generation_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID++;
+ } while (id == 0);
+ return id;
+}
+
+SkDrawable::SkDrawable() : fGenerationID(0) {}
+
+static void draw_bbox(SkCanvas* canvas, const SkRect& r) {
+ SkPaint paint;
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setColor(0xFFFF7088);
+ canvas->drawRect(r, paint);
+ canvas->drawLine(r.left(), r.top(), r.right(), r.bottom(), paint);
+ canvas->drawLine(r.left(), r.bottom(), r.right(), r.top(), paint);
+}
+
+void SkDrawable::draw(SkCanvas* canvas, const SkMatrix* matrix) {
+ SkAutoCanvasRestore acr(canvas, true);
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+ this->onDraw(canvas);
+
+ if (false) {
+ draw_bbox(canvas, this->getBounds());
+ }
+}
+
+void SkDrawable::draw(SkCanvas* canvas, SkScalar x, SkScalar y) {
+ SkMatrix matrix = SkMatrix::MakeTrans(x, y);
+ this->draw(canvas, &matrix);
+}
+
+SkPicture* SkDrawable::newPictureSnapshot() {
+ return this->onNewPictureSnapshot();
+}
+
+uint32_t SkDrawable::getGenerationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = next_generation_id();
+ }
+ return fGenerationID;
+}
+
+SkRect SkDrawable::getBounds() {
+ return this->onGetBounds();
+}
+
+void SkDrawable::notifyDrawingChanged() {
+ fGenerationID = 0;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPictureRecorder.h"
+
+SkPicture* SkDrawable::onNewPictureSnapshot() {
+ SkPictureRecorder recorder;
+
+ const SkRect bounds = this->getBounds();
+ SkCanvas* canvas = recorder.beginRecording(bounds, nullptr, 0);
+ this->draw(canvas);
+ if (false) {
+ draw_bbox(canvas, bounds);
+ }
+ return recorder.finishRecordingAsPicture().release();
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.cpp b/gfx/skia/skia/src/core/SkEdge.cpp
new file mode 100644
index 0000000000..0aa3b618ad
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.cpp
@@ -0,0 +1,503 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkEdge.h"
+
+#include "include/private/SkTo.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkMathPriv.h"
+
+#include <utility>
+
+/*
+ In setLine, setQuadratic, setCubic, the first thing we do is to convert
+ the points into FDot6. This is modulated by the shift parameter, which
+ will either be 0, or something like 2 for antialiasing.
+
+ In the float case, we want to turn the float into .6 by saying pt * 64,
+ or pt * 256 for antialiasing. This is implemented as 1 << (shift + 6).
+
+ In the fixed case, we want to turn the fixed into .6 by saying pt >> 10,
+ or pt >> 8 for antialiasing. This is implemented as pt >> (10 - shift).
+*/
+
+static inline SkFixed SkFDot6ToFixedDiv2(SkFDot6 value) {
+ // we want to return SkFDot6ToFixed(value >> 1), but we don't want to throw
+ // away data in value, so just perform a modify up-shift
+ return SkLeftShift(value, 16 - 6 - 1);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip,
+ int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+ // are we completely above or below the clip?
+ if (clip && (top >= clip->fBottom || bot <= clip->fTop)) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+
+ if (clip) {
+ this->chopLineWithClip(*clip);
+ }
+ return 1;
+}
+
+// called from a curve subclass
+int SkEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1)
+{
+ SkASSERT(fWinding == 1 || fWinding == -1);
+ SkASSERT(fCurveCount != 0);
+// SkASSERT(fCurveShift != 0);
+
+ y0 >>= 10;
+ y1 >>= 10;
+
+ SkASSERT(y0 <= y1);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+// SkASSERT(top >= fFirstY);
+
+ // are we a zero-height line?
+ if (top == bot)
+ return 0;
+
+ x0 >>= 10;
+ x1 >>= 10;
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+
+ return 1;
+}
+
+void SkEdge::chopLineWithClip(const SkIRect& clip)
+{
+ int top = fFirstY;
+
+ SkASSERT(top < clip.fBottom);
+
+ // clip the line to the top
+ if (top < clip.fTop)
+ {
+ SkASSERT(fLastY >= clip.fTop);
+ fX += fDX * (clip.fTop - top);
+ fFirstY = clip.fTop;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* We store 1<<shift in a (signed) byte, so its maximum value is 1<<6 == 64.
+ Note that this limits the number of lines we use to approximate a curve.
+ If we need to increase this, we need to store fCurveCount in something
+ larger than int8_t.
+*/
+#define MAX_COEFF_SHIFT 6
+
+static inline SkFDot6 cheap_distance(SkFDot6 dx, SkFDot6 dy)
+{
+ dx = SkAbs32(dx);
+ dy = SkAbs32(dy);
+ // return max + min/2
+ if (dx > dy)
+ dx += dy >> 1;
+ else
+ dx = dy + (dx >> 1);
+ return dx;
+}
+
+static inline int diff_to_shift(SkFDot6 dx, SkFDot6 dy, int shiftAA = 2)
+{
+ // cheap calc of distance from center of p0-p2 to the center of the curve
+ SkFDot6 dist = cheap_distance(dx, dy);
+
+ // shift down dist (it is currently in dot6)
+ // down by 3 should give us 1/8 pixel accuracy (assuming our dist is accurate...)
+ // this is chosen by heuristic: make it as big as possible (to minimize segments)
+ // ... but small enough so that our curves still look smooth
+ // When shift > 0, we're using AA and everything is scaled up so we can
+ // lower the accuracy.
+ dist = (dist + (1 << 4)) >> (3 + shiftAA);
+
+ // each subdivision (shift value) cuts this dist (error) by 1/4
+ return (32 - SkCLZ(dist)) >> 1;
+}
+
+bool SkQuadraticEdge::setQuadraticWithoutUpdate(const SkPoint pts[3], int shift) {
+ SkFDot6 x0, y0, x1, y1, x2, y2;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (y0 > y2)
+ {
+ using std::swap;
+ swap(x0, x2);
+ swap(y0, y2);
+ winding = -1;
+ }
+ SkASSERT(y0 <= y1 && y1 <= y2);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y2);
+
+ // are we a zero-height quad (line)?
+ if (top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ SkFDot6 dx = (SkLeftShift(x1, 1) - x0 - x2) >> 2;
+ SkFDot6 dy = (SkLeftShift(y1, 1) - y0 - y2) >> 2;
+ // This is a little confusing:
+ // before this line, shift is the scale up factor for AA;
+ // after this line, shift is the fCurveShift.
+ shift = diff_to_shift(dx, dy, shift);
+ SkASSERT(shift >= 0);
+ }
+ // need at least 1 subdivision for our bias trick
+ if (shift == 0) {
+ shift = 1;
+ } else if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ fWinding = SkToS8(winding);
+ //fCubicDShift only set for cubics
+ fCurveCount = SkToS8(1 << shift);
+
+ /*
+ * We want to reformulate into polynomial form, to make it clear how we
+ * should forward-difference.
+ *
+ * p0 (1 - t)^2 + p1 t(1 - t) + p2 t^2 ==> At^2 + Bt + C
+ *
+ * A = p0 - 2p1 + p2
+ * B = 2(p1 - p0)
+ * C = p0
+ *
+ * Our caller must have constrained our inputs (p0..p2) to all fit into
+ * 16.16. However, as seen above, we sometimes compute values that can be
+ * larger (e.g. B = 2*(p1 - p0)). To guard against overflow, we will store
+ * A and B at 1/2 of their actual value, and just apply a 2x scale during
+ * application in updateQuadratic(). Hence we store (shift - 1) in
+ * fCurveShift.
+ */
+
+ fCurveShift = SkToU8(shift - 1);
+
+ SkFixed A = SkFDot6ToFixedDiv2(x0 - x1 - x1 + x2); // 1/2 the real value
+ SkFixed B = SkFDot6ToFixed(x1 - x0); // 1/2 the real value
+
+ fQx = SkFDot6ToFixed(x0);
+ fQDx = B + (A >> shift); // biased by shift
+ fQDDx = A >> (shift - 1); // biased by shift
+
+ A = SkFDot6ToFixedDiv2(y0 - y1 - y1 + y2); // 1/2 the real value
+ B = SkFDot6ToFixed(y1 - y0); // 1/2 the real value
+
+ fQy = SkFDot6ToFixed(y0);
+ fQDy = B + (A >> shift); // biased by shift
+ fQDDy = A >> (shift - 1); // biased by shift
+
+ fQLastX = SkFDot6ToFixed(x2);
+ fQLastY = SkFDot6ToFixed(y2);
+
+ return true;
+}
+
+int SkQuadraticEdge::setQuadratic(const SkPoint pts[3], int shift) {
+ if (!setQuadraticWithoutUpdate(pts, shift)) {
+ return 0;
+ }
+ return this->updateQuadratic();
+}
+
+int SkQuadraticEdge::updateQuadratic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fQx;
+ SkFixed oldy = fQy;
+ SkFixed dx = fQDx;
+ SkFixed dy = fQDy;
+ SkFixed newx, newy;
+ int shift = fCurveShift;
+
+ SkASSERT(count > 0);
+
+ do {
+ if (--count > 0)
+ {
+ newx = oldx + (dx >> shift);
+ dx += fQDDx;
+ newy = oldy + (dy >> shift);
+ dy += fQDDy;
+ }
+ else // last segment
+ {
+ newx = fQLastX;
+ newy = fQLastY;
+ }
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count > 0 && !success);
+
+ fQx = newx;
+ fQy = newy;
+ fQDx = dx;
+ fQDy = dy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+static inline int SkFDot6UpShift(SkFDot6 x, int upShift) {
+ SkASSERT((SkLeftShift(x, upShift) >> upShift) == x);
+ return SkLeftShift(x, upShift);
+}
+
+/* f(1/3) = (8a + 12b + 6c + d) / 27
+ f(2/3) = (a + 6b + 12c + 8d) / 27
+
+ f(1/3)-b = (8a - 15b + 6c + d) / 27
+ f(2/3)-c = (a + 6b - 15c + 8d) / 27
+
+ use 16/512 to approximate 1/27
+*/
+static SkFDot6 cubic_delta_from_line(SkFDot6 a, SkFDot6 b, SkFDot6 c, SkFDot6 d)
+{
+ // since our parameters may be negative, we don't use << to avoid ASAN warnings
+ SkFDot6 oneThird = (a*8 - b*15 + 6*c + d) * 19 >> 9;
+ SkFDot6 twoThird = (a + 6*b - c*15 + d*8) * 19 >> 9;
+
+ return SkMax32(SkAbs32(oneThird), SkAbs32(twoThird));
+}
+
+bool SkCubicEdge::setCubicWithoutUpdate(const SkPoint pts[4], int shift, bool sortY) {
+ SkFDot6 x0, y0, x1, y1, x2, y2, x3, y3;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+ x3 = SkScalarRoundToFDot6(pts[3].fX, shift);
+ y3 = SkScalarRoundToFDot6(pts[3].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+ x3 = int(pts[3].fX * scale);
+ y3 = int(pts[3].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (sortY && y0 > y3)
+ {
+ using std::swap;
+ swap(x0, x3);
+ swap(x1, x2);
+ swap(y0, y3);
+ swap(y1, y2);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y3);
+
+ // are we a zero-height cubic (line)?
+ if (sortY && top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ // Can't use (center of curve - center of baseline), since center-of-curve
+ // need not be the max delta from the baseline (it could even be coincident)
+ // so we try just looking at the two off-curve points
+ SkFDot6 dx = cubic_delta_from_line(x0, x1, x2, x3);
+ SkFDot6 dy = cubic_delta_from_line(y0, y1, y2, y3);
+ // add 1 (by observation)
+ shift = diff_to_shift(dx, dy) + 1;
+ }
+ // need at least 1 subdivision for our bias trick
+ SkASSERT(shift > 0);
+ if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ /* Since our in coming data is initially shifted down by 10 (or 8 in
+ antialias). That means the most we can shift up is 8. However, we
+ compute coefficients with a 3*, so the safest upshift is really 6
+ */
+ int upShift = 6; // largest safe value
+ int downShift = shift + upShift - 10;
+ if (downShift < 0) {
+ downShift = 0;
+ upShift = 10 - shift;
+ }
+
+ fWinding = SkToS8(winding);
+ fCurveCount = SkToS8(SkLeftShift(-1, shift));
+ fCurveShift = SkToU8(shift);
+ fCubicDShift = SkToU8(downShift);
+
+ SkFixed B = SkFDot6UpShift(3 * (x1 - x0), upShift);
+ SkFixed C = SkFDot6UpShift(3 * (x0 - x1 - x1 + x2), upShift);
+ SkFixed D = SkFDot6UpShift(x3 + 3 * (x1 - x2) - x0, upShift);
+
+ fCx = SkFDot6ToFixed(x0);
+ fCDx = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDx = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDx = 3*D >> (shift - 1); // biased by 2*shift
+
+ B = SkFDot6UpShift(3 * (y1 - y0), upShift);
+ C = SkFDot6UpShift(3 * (y0 - y1 - y1 + y2), upShift);
+ D = SkFDot6UpShift(y3 + 3 * (y1 - y2) - y0, upShift);
+
+ fCy = SkFDot6ToFixed(y0);
+ fCDy = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDy = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDy = 3*D >> (shift - 1); // biased by 2*shift
+
+ fCLastX = SkFDot6ToFixed(x3);
+ fCLastY = SkFDot6ToFixed(y3);
+
+ return true;
+}
+
+int SkCubicEdge::setCubic(const SkPoint pts[4], int shift) {
+ if (!this->setCubicWithoutUpdate(pts, shift)) {
+ return 0;
+ }
+ return this->updateCubic();
+}
+
+int SkCubicEdge::updateCubic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fCx;
+ SkFixed oldy = fCy;
+ SkFixed newx, newy;
+ const int ddshift = fCurveShift;
+ const int dshift = fCubicDShift;
+
+ SkASSERT(count < 0);
+
+ do {
+ if (++count < 0)
+ {
+ newx = oldx + (fCDx >> dshift);
+ fCDx += fCDDx >> ddshift;
+ fCDDx += fCDDDx;
+
+ newy = oldy + (fCDy >> dshift);
+ fCDy += fCDDy >> ddshift;
+ fCDDy += fCDDDy;
+ }
+ else // last segment
+ {
+ // SkDebugf("LastX err=%d, LastY err=%d\n", (oldx + (fCDx >> shift) - fLastX), (oldy + (fCDy >> shift) - fLastY));
+ newx = fCLastX;
+ newy = fCLastY;
+ }
+
+ // we want to say SkASSERT(oldy <= newy), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (newy < oldy) {
+ newy = oldy;
+ }
+
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count < 0 && !success);
+
+ fCx = newx;
+ fCy = newy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.h b/gfx/skia/skia/src/core/SkEdge.h
new file mode 100644
index 0000000000..c0c26ca0c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEdge_DEFINED
+#define SkEdge_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkFDot6.h"
+
+#include <utility>
+
+// This correctly favors the lower-pixel when y0 is on a 1/2 pixel boundary
+#define SkEdge_Compute_DY(top, y0) (SkLeftShift(top, 6) + 32 - (y0))
+
+struct SkEdge {
+ enum Type {
+ kLine_Type,
+ kQuad_Type,
+ kCubic_Type
+ };
+
+ SkEdge* fNext;
+ SkEdge* fPrev;
+
+ SkFixed fX;
+ SkFixed fDX;
+ int32_t fFirstY;
+ int32_t fLastY;
+ int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
+ uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
+ uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
+ int8_t fWinding; // 1 or -1
+
+ int setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip, int shiftUp);
+ // call this version if you know you don't have a clip
+ inline int setLine(const SkPoint& p0, const SkPoint& p1, int shiftUp);
+ inline int updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by);
+ void chopLineWithClip(const SkIRect& clip);
+
+ inline bool intersectsClip(const SkIRect& clip) const {
+ SkASSERT(fFirstY < clip.fBottom);
+ return fLastY >= clip.fTop;
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("edge: firstY:%d lastY:%d x:%g dx:%g w:%d\n", fFirstY, fLastY, SkFixedToFloat(fX), SkFixedToFloat(fDX), fWinding);
+ }
+
+ void validate() const {
+ SkASSERT(fPrev && fNext);
+ SkASSERT(fPrev->fNext == this);
+ SkASSERT(fNext->fPrev == this);
+
+ SkASSERT(fFirstY <= fLastY);
+ SkASSERT(SkAbs32(fWinding) == 1);
+ }
+#endif
+};
+
+struct SkQuadraticEdge : public SkEdge {
+ SkFixed fQx, fQy;
+ SkFixed fQDx, fQDy;
+ SkFixed fQDDx, fQDDy;
+ SkFixed fQLastX, fQLastY;
+
+ bool setQuadraticWithoutUpdate(const SkPoint pts[3], int shiftUp);
+ int setQuadratic(const SkPoint pts[3], int shiftUp);
+ int updateQuadratic();
+};
+
+struct SkCubicEdge : public SkEdge {
+ SkFixed fCx, fCy;
+ SkFixed fCDx, fCDy;
+ SkFixed fCDDx, fCDDy;
+ SkFixed fCDDDx, fCDDDy;
+ SkFixed fCLastX, fCLastY;
+
+ bool setCubicWithoutUpdate(const SkPoint pts[4], int shiftUp, bool sortY = true);
+ int setCubic(const SkPoint pts[4], int shiftUp);
+ int updateCubic();
+};
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+ return 1;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.cpp b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
new file mode 100644
index 0000000000..721217d905
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeBuilder.h"
+#include "src/core/SkEdgeClipper.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkSafeMath.h"
+
+SkEdgeBuilder::Combine SkBasicEdgeBuilder::combineVertical(const SkEdge* edge, SkEdge* last) {
+ if (last->fCurveCount || last->fDX || edge->fX != last->fX) {
+ return kNo_Combine;
+ }
+ if (edge->fWinding == last->fWinding) {
+ if (edge->fLastY + 1 == last->fFirstY) {
+ last->fFirstY = edge->fFirstY;
+ return kPartial_Combine;
+ }
+ if (edge->fFirstY == last->fLastY + 1) {
+ last->fLastY = edge->fLastY;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+ }
+ if (edge->fFirstY == last->fFirstY) {
+ if (edge->fLastY == last->fLastY) {
+ return kTotal_Combine;
+ }
+ if (edge->fLastY < last->fLastY) {
+ last->fFirstY = edge->fLastY + 1;
+ return kPartial_Combine;
+ }
+ last->fFirstY = last->fLastY + 1;
+ last->fLastY = edge->fLastY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ if (edge->fLastY == last->fLastY) {
+ if (edge->fFirstY > last->fFirstY) {
+ last->fLastY = edge->fFirstY - 1;
+ return kPartial_Combine;
+ }
+ last->fLastY = last->fFirstY - 1;
+ last->fFirstY = edge->fFirstY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+}
+
+SkEdgeBuilder::Combine SkAnalyticEdgeBuilder::combineVertical(const SkAnalyticEdge* edge,
+ SkAnalyticEdge* last) {
+ auto approximately_equal = [](SkFixed a, SkFixed b) {
+ return SkAbs32(a - b) < 0x100;
+ };
+
+ if (last->fCurveCount || last->fDX || edge->fX != last->fX) {
+ return kNo_Combine;
+ }
+ if (edge->fWinding == last->fWinding) {
+ if (edge->fLowerY == last->fUpperY) {
+ last->fUpperY = edge->fUpperY;
+ last->fY = last->fUpperY;
+ return kPartial_Combine;
+ }
+ if (approximately_equal(edge->fUpperY, last->fLowerY)) {
+ last->fLowerY = edge->fLowerY;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+ }
+ if (approximately_equal(edge->fUpperY, last->fUpperY)) {
+ if (approximately_equal(edge->fLowerY, last->fLowerY)) {
+ return kTotal_Combine;
+ }
+ if (edge->fLowerY < last->fLowerY) {
+ last->fUpperY = edge->fLowerY;
+ last->fY = last->fUpperY;
+ return kPartial_Combine;
+ }
+ last->fUpperY = last->fLowerY;
+ last->fY = last->fUpperY;
+ last->fLowerY = edge->fLowerY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ if (approximately_equal(edge->fLowerY, last->fLowerY)) {
+ if (edge->fUpperY > last->fUpperY) {
+ last->fLowerY = edge->fUpperY;
+ return kPartial_Combine;
+ }
+ last->fLowerY = last->fUpperY;
+ last->fUpperY = edge->fUpperY;
+ last->fY = last->fUpperY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+}
+
+template <typename Edge>
+static bool is_vertical(const Edge* edge) {
+ return edge->fDX == 0
+ && edge->fCurveCount == 0;
+}
+
+// TODO: we can deallocate the edge if edge->setFoo() fails
+// or when we don't use it (kPartial_Combine or kTotal_Combine).
+
+void SkBasicEdgeBuilder::addLine(const SkPoint pts[]) {
+ SkEdge* edge = fAlloc.make<SkEdge>();
+ if (edge->setLine(pts[0], pts[1], fClipShift)) {
+ Combine combine = is_vertical(edge) && !fList.empty()
+ ? this->combineVertical(edge, (SkEdge*)fList.top())
+ : kNo_Combine;
+
+ switch (combine) {
+ case kTotal_Combine: fList.pop(); break;
+ case kPartial_Combine: break;
+ case kNo_Combine: fList.push_back(edge); break;
+ }
+ }
+}
+void SkAnalyticEdgeBuilder::addLine(const SkPoint pts[]) {
+ SkAnalyticEdge* edge = fAlloc.make<SkAnalyticEdge>();
+ if (edge->setLine(pts[0], pts[1])) {
+
+ Combine combine = is_vertical(edge) && !fList.empty()
+ ? this->combineVertical(edge, (SkAnalyticEdge*)fList.top())
+ : kNo_Combine;
+
+ switch (combine) {
+ case kTotal_Combine: fList.pop(); break;
+ case kPartial_Combine: break;
+ case kNo_Combine: fList.push_back(edge); break;
+ }
+ }
+}
+void SkBasicEdgeBuilder::addQuad(const SkPoint pts[]) {
+ SkQuadraticEdge* edge = fAlloc.make<SkQuadraticEdge>();
+ if (edge->setQuadratic(pts, fClipShift)) {
+ fList.push_back(edge);
+ }
+}
+void SkAnalyticEdgeBuilder::addQuad(const SkPoint pts[]) {
+ SkAnalyticQuadraticEdge* edge = fAlloc.make<SkAnalyticQuadraticEdge>();
+ if (edge->setQuadratic(pts)) {
+ fList.push_back(edge);
+ }
+}
+
+void SkBasicEdgeBuilder::addCubic(const SkPoint pts[]) {
+ SkCubicEdge* edge = fAlloc.make<SkCubicEdge>();
+ if (edge->setCubic(pts, fClipShift)) {
+ fList.push_back(edge);
+ }
+}
+void SkAnalyticEdgeBuilder::addCubic(const SkPoint pts[]) {
+ SkAnalyticCubicEdge* edge = fAlloc.make<SkAnalyticCubicEdge>();
+ if (edge->setCubic(pts)) {
+ fList.push_back(edge);
+ }
+}
+
+// TODO: merge addLine() and addPolyLine()?
+
+SkEdgeBuilder::Combine SkBasicEdgeBuilder::addPolyLine(const SkPoint pts[],
+ char* arg_edge, char** arg_edgePtr) {
+ auto edge = (SkEdge*) arg_edge;
+ auto edgePtr = (SkEdge**)arg_edgePtr;
+
+ if (edge->setLine(pts[0], pts[1], fClipShift)) {
+ return is_vertical(edge) && edgePtr > (SkEdge**)fEdgeList
+ ? this->combineVertical(edge, edgePtr[-1])
+ : kNo_Combine;
+ }
+ return SkEdgeBuilder::kPartial_Combine; // A convenient lie. Same do-nothing behavior.
+}
+SkEdgeBuilder::Combine SkAnalyticEdgeBuilder::addPolyLine(const SkPoint pts[],
+ char* arg_edge, char** arg_edgePtr) {
+ auto edge = (SkAnalyticEdge*) arg_edge;
+ auto edgePtr = (SkAnalyticEdge**)arg_edgePtr;
+
+ if (edge->setLine(pts[0], pts[1])) {
+ return is_vertical(edge) && edgePtr > (SkAnalyticEdge**)fEdgeList
+ ? this->combineVertical(edge, edgePtr[-1])
+ : kNo_Combine;
+ }
+ return SkEdgeBuilder::kPartial_Combine; // As above.
+}
+
+SkRect SkBasicEdgeBuilder::recoverClip(const SkIRect& src) const {
+ return { SkIntToScalar(src.fLeft >> fClipShift),
+ SkIntToScalar(src.fTop >> fClipShift),
+ SkIntToScalar(src.fRight >> fClipShift),
+ SkIntToScalar(src.fBottom >> fClipShift), };
+}
+SkRect SkAnalyticEdgeBuilder::recoverClip(const SkIRect& src) const {
+ return SkRect::Make(src);
+}
+
+char* SkBasicEdgeBuilder::allocEdges(size_t n, size_t* size) {
+ *size = sizeof(SkEdge);
+ return (char*)fAlloc.makeArrayDefault<SkEdge>(n);
+}
+char* SkAnalyticEdgeBuilder::allocEdges(size_t n, size_t* size) {
+ *size = sizeof(SkAnalyticEdge);
+ return (char*)fAlloc.makeArrayDefault<SkAnalyticEdge>(n);
+}
+
+// TODO: maybe get rid of buildPoly() entirely?
+int SkEdgeBuilder::buildPoly(const SkPath& path, const SkIRect* iclip, bool canCullToTheRight) {
+ size_t maxEdgeCount = path.countPoints();
+ if (iclip) {
+ // clipping can turn 1 line into (up to) kMaxClippedLineSegments, since
+ // we turn portions that are clipped out on the left/right into vertical
+ // segments.
+ SkSafeMath safe;
+ maxEdgeCount = safe.mul(maxEdgeCount, SkLineClipper::kMaxClippedLineSegments);
+ if (!safe) {
+ return 0;
+ }
+ }
+
+ size_t edgeSize;
+ char* edge = this->allocEdges(maxEdgeCount, &edgeSize);
+
+ SkDEBUGCODE(char* edgeStart = edge);
+ char** edgePtr = fAlloc.makeArrayDefault<char*>(maxEdgeCount);
+ fEdgeList = (void**)edgePtr;
+
+ SkPathEdgeIter iter(path);
+ if (iclip) {
+ SkRect clip = this->recoverClip(*iclip);
+
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine: {
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ int lineCount = SkLineClipper::ClipLine(e.fPts, clip, lines, canCullToTheRight);
+ SkASSERT(lineCount <= SkLineClipper::kMaxClippedLineSegments);
+ for (int i = 0; i < lineCount; i++) {
+ switch( this->addPolyLine(lines + i, edge, edgePtr) ) {
+ case kTotal_Combine: edgePtr--; break;
+ case kPartial_Combine: break;
+ case kNo_Combine: *edgePtr++ = edge;
+ edge += edgeSize;
+ }
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ } else {
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine: {
+ switch( this->addPolyLine(e.fPts, edge, edgePtr) ) {
+ case kTotal_Combine: edgePtr--; break;
+ case kPartial_Combine: break;
+ case kNo_Combine: *edgePtr++ = edge;
+ edge += edgeSize;
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ }
+ SkASSERT((size_t)(edge - edgeStart) <= maxEdgeCount * edgeSize);
+ SkASSERT((size_t)(edgePtr - (char**)fEdgeList) <= maxEdgeCount);
+ return SkToInt(edgePtr - (char**)fEdgeList);
+}
+
+int SkEdgeBuilder::build(const SkPath& path, const SkIRect* iclip, bool canCullToTheRight) {
+ SkAutoConicToQuads quadder;
+ const SkScalar conicTol = SK_Scalar1 / 4;
+ bool is_finite = true;
+
+ SkPathEdgeIter iter(path);
+ if (iclip) {
+ SkRect clip = this->recoverClip(*iclip);
+ SkEdgeClipper clipper(canCullToTheRight);
+
+ auto apply_clipper = [this, &clipper, &is_finite] {
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = clipper.next(pts)) != SkPath::kDone_Verb) {
+ const int count = SkPathPriv::PtsInIter(verb);
+ if (!SkScalarsAreFinite(&pts[0].fX, count*2)) {
+ is_finite = false;
+ return;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb: this->addLine (pts); break;
+ case SkPath::kQuad_Verb: this->addQuad (pts); break;
+ case SkPath::kCubic_Verb: this->addCubic(pts); break;
+ default: break;
+ }
+ }
+ };
+
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine:
+ if (clipper.clipLine(e.fPts[0], e.fPts[1], clip)) {
+ apply_clipper();
+ }
+ break;
+ case SkPathEdgeIter::Edge::kQuad:
+ if (clipper.clipQuad(e.fPts, clip)) {
+ apply_clipper();
+ }
+ break;
+ case SkPathEdgeIter::Edge::kConic: {
+ const SkPoint* quadPts = quadder.computeQuads(
+ e.fPts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ if (clipper.clipQuad(quadPts, clip)) {
+ apply_clipper();
+ }
+ quadPts += 2;
+ }
+ } break;
+ case SkPathEdgeIter::Edge::kCubic:
+ if (clipper.clipCubic(e.fPts, clip)) {
+ apply_clipper();
+ }
+ break;
+ }
+ }
+ } else {
+ auto handle_quad = [this](const SkPoint pts[3]) {
+ SkPoint monoX[5];
+ int n = SkChopQuadAtYExtrema(pts, monoX);
+ for (int i = 0; i <= n; i++) {
+ this->addQuad(&monoX[i * 2]);
+ }
+ };
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine:
+ this->addLine(e.fPts);
+ break;
+ case SkPathEdgeIter::Edge::kQuad: {
+ handle_quad(e.fPts);
+ break;
+ }
+ case SkPathEdgeIter::Edge::kConic: {
+ const SkPoint* quadPts = quadder.computeQuads(
+ e.fPts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ handle_quad(quadPts);
+ quadPts += 2;
+ }
+ } break;
+ case SkPathEdgeIter::Edge::kCubic: {
+ SkPoint monoY[10];
+ int n = SkChopCubicAtYExtrema(e.fPts, monoY);
+ for (int i = 0; i <= n; i++) {
+ this->addCubic(&monoY[i * 3]);
+ }
+ break;
+ }
+ }
+ }
+ }
+ fEdgeList = fList.begin();
+ return is_finite ? fList.count() : 0;
+}
+
+int SkEdgeBuilder::buildEdges(const SkPath& path,
+ const SkIRect* shiftedClip) {
+ // If we're convex, then we need both edges, even if the right edge is past the clip.
+ const bool canCullToTheRight = !path.isConvex();
+
+ // We can use our buildPoly() optimization if all the segments are lines.
+ // (Edges are homogenous and stored contiguously in memory, no need for indirection.)
+ const int count = SkPath::kLine_SegmentMask == path.getSegmentMasks()
+ ? this->buildPoly(path, shiftedClip, canCullToTheRight)
+ : this->build (path, shiftedClip, canCullToTheRight);
+
+ SkASSERT(count >= 0);
+
+ // If we can't cull to the right, we should have count > 1 (or 0).
+ if (!canCullToTheRight) {
+ SkASSERT(count != 1);
+ }
+ return count;
+}
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.h b/gfx/skia/skia/src/core/SkEdgeBuilder.h
new file mode 100644
index 0000000000..5fb7c62522
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEdgeBuilder_DEFINED
+#define SkEdgeBuilder_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkEdge.h"
+
+class SkPath;
+
+class SkEdgeBuilder {
+public:
+ int buildEdges(const SkPath& path,
+ const SkIRect* shiftedClip);
+
+protected:
+ SkEdgeBuilder() = default;
+ virtual ~SkEdgeBuilder() = default;
+
+ // In general mode we allocate pointers in fList and fEdgeList points to its head.
+ // In polygon mode we preallocated edges contiguously in fAlloc and fEdgeList points there.
+ void** fEdgeList = nullptr;
+ SkTDArray<void*> fList;
+ SkSTArenaAlloc<512> fAlloc;
+
+ enum Combine {
+ kNo_Combine,
+ kPartial_Combine,
+ kTotal_Combine
+ };
+
+private:
+ int build (const SkPath& path, const SkIRect* clip, bool clipToTheRight);
+ int buildPoly(const SkPath& path, const SkIRect* clip, bool clipToTheRight);
+
+ virtual char* allocEdges(size_t n, size_t* sizeof_edge) = 0;
+ virtual SkRect recoverClip(const SkIRect&) const = 0;
+
+ virtual void addLine (const SkPoint pts[]) = 0;
+ virtual void addQuad (const SkPoint pts[]) = 0;
+ virtual void addCubic(const SkPoint pts[]) = 0;
+ virtual Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) = 0;
+};
+
+class SkBasicEdgeBuilder final : public SkEdgeBuilder {
+public:
+ explicit SkBasicEdgeBuilder(int clipShift) : fClipShift(clipShift) {}
+
+ SkEdge** edgeList() { return (SkEdge**)fEdgeList; }
+
+private:
+ Combine combineVertical(const SkEdge* edge, SkEdge* last);
+
+ char* allocEdges(size_t, size_t*) override;
+ SkRect recoverClip(const SkIRect&) const override;
+
+ void addLine (const SkPoint pts[]) override;
+ void addQuad (const SkPoint pts[]) override;
+ void addCubic(const SkPoint pts[]) override;
+ Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) override;
+
+ const int fClipShift;
+};
+
+class SkAnalyticEdgeBuilder final : public SkEdgeBuilder {
+public:
+ SkAnalyticEdgeBuilder() {}
+
+ SkAnalyticEdge** analyticEdgeList() { return (SkAnalyticEdge**)fEdgeList; }
+
+private:
+ Combine combineVertical(const SkAnalyticEdge* edge, SkAnalyticEdge* last);
+
+ char* allocEdges(size_t, size_t*) override;
+ SkRect recoverClip(const SkIRect&) const override;
+
+ void addLine (const SkPoint pts[]) override;
+ void addQuad (const SkPoint pts[]) override;
+ void addCubic(const SkPoint pts[]) override;
+ Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) override;
+};
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.cpp b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
new file mode 100644
index 0000000000..fcadd3e986
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkEdgeClipper.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkLineClipper.h"
+
+#include <utility>
+
+static bool quick_reject(const SkRect& bounds, const SkRect& clip) {
+ return bounds.fTop >= clip.fBottom || bounds.fBottom <= clip.fTop;
+}
+
+static inline void clamp_le(SkScalar& value, SkScalar max) {
+ if (value > max) {
+ value = max;
+ }
+}
+
+static inline void clamp_ge(SkScalar& value, SkScalar min) {
+ if (value < min) {
+ value = min;
+ }
+}
+
+/* src[] must be monotonic in Y. This routine copies src into dst, and sorts
+ it to be increasing in Y. If it had to reverse the order of the points,
+ it returns true, otherwise it returns false
+ */
+static bool sort_increasing_Y(SkPoint dst[], const SkPoint src[], int count) {
+ // we need the data to be monotonically increasing in Y
+ if (src[0].fY > src[count - 1].fY) {
+ for (int i = 0; i < count; i++) {
+ dst[i] = src[count - i - 1];
+ }
+ return true;
+ } else {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ return false;
+ }
+}
+
+bool SkEdgeClipper::clipLine(SkPoint p0, SkPoint p1, const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ const SkPoint pts[] = { p0, p1 };
+ int lineCount = SkLineClipper::ClipLine(pts, clip, lines, fCanCullToTheRight);
+ for (int i = 0; i < lineCount; i++) {
+ this->appendLine(lines[i], lines[i + 1]);
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+static bool chopMonoQuadAtX(SkPoint pts[3], SkScalar x, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fX, pts[1].fX, pts[2].fX, x, t);
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_quad_in_Y(SkPoint pts[3], const SkRect& clip) {
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ if (chopMonoQuadAtY(pts, clip.fTop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fY = clip.fTop;
+ clamp_ge(tmp[3].fY, clip.fTop);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY < clip.fTop) {
+ pts[i].fY = clip.fTop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (pts[2].fY > clip.fBottom) {
+ if (chopMonoQuadAtY(pts, clip.fBottom, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fY, clip.fBottom);
+ tmp[2].fY = clip.fBottom;
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY > clip.fBottom) {
+ pts[i].fY = clip.fBottom;
+ }
+ }
+ }
+ }
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ SkPoint pts[3];
+ bool reverse = sort_increasing_Y(pts, srcPts, 3);
+
+ // are we completely above or below
+ if (pts[2].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_quad_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[2].fX) {
+ using std::swap;
+ swap(pts[0], pts[2]);
+ reverse = !reverse;
+ }
+ SkASSERT(pts[0].fX <= pts[1].fX);
+ SkASSERT(pts[1].fX <= pts[2].fX);
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[2].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[2].fY, reverse);
+ }
+ return;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ if (chopMonoQuadAtX(pts, clip.fLeft, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[2].fY, reverse);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fX = clip.fLeft;
+ clamp_ge(tmp[3].fX, clip.fLeft);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ }
+
+ // are we partially to the right
+ if (pts[2].fX > clip.fRight) {
+ if (chopMonoQuadAtX(pts, clip.fRight, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fX, clip.fRight);
+ tmp[2].fX = clip.fRight;
+
+ this->appendQuad(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[2].fY, tmp[4].fY, reverse);
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the right
+ this->appendVLine(clip.fRight, pts[0].fY, pts[2].fY, reverse);
+ }
+ } else { // wholly inside the clip
+ this->appendQuad(pts, reverse);
+ }
+}
+
+bool SkEdgeClipper::clipQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ SkRect bounds;
+ bounds.setBounds(srcPts, 3);
+
+ if (!quick_reject(bounds, clip)) {
+ SkPoint monoY[5];
+ int countY = SkChopQuadAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[5];
+ int countX = SkChopQuadAtXExtrema(&monoY[y * 2], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoQuad(&monoX[x * 2], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar mono_cubic_closestT(const SkScalar src[], SkScalar x) {
+ SkScalar t = 0.5f;
+ SkScalar lastT;
+ SkScalar bestT SK_INIT_TO_AVOID_WARNING;
+ SkScalar step = 0.25f;
+ SkScalar D = src[0];
+ SkScalar A = src[6] + 3*(src[2] - src[4]) - D;
+ SkScalar B = 3*(src[4] - src[2] - src[2] + D);
+ SkScalar C = 3*(src[2] - D);
+ x -= D;
+ SkScalar closest = SK_ScalarMax;
+ do {
+ SkScalar loc = ((A * t + B) * t + C) * t;
+ SkScalar dist = SkScalarAbs(loc - x);
+ if (closest > dist) {
+ closest = dist;
+ bestT = t;
+ }
+ lastT = t;
+ t += loc < x ? step : -step;
+ step *= 0.5f;
+ } while (closest > 0.25f && lastT != t);
+ return bestT;
+}
+
+static void chop_mono_cubic_at_y(SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtY(src, y, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fY, y));
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_cubic_in_Y(SkPoint pts[4], const SkRect& clip) {
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fTop, tmp);
+
+ /*
+ * For a large range in the points, we can do a poor job of chopping, such that the t
+ * we computed resulted in the lower cubic still being partly above the clip.
+ *
+ * If just the first or first 2 Y values are above the fTop, we can just smash them
+ * down. If the first 3 Ys are above fTop, we can't smash all 3, as that can really
+ * distort the cubic. In this case, we take the first output (tmp[3..6] and treat it as
+ * a guess, and re-chop against fTop. Then we fall through to checking if we need to
+ * smash the first 1 or 2 Y values.
+ */
+ if (tmp[3].fY < clip.fTop && tmp[4].fY < clip.fTop && tmp[5].fY < clip.fTop) {
+ SkPoint tmp2[4];
+ memcpy(tmp2, &tmp[3].fX, 4 * sizeof(SkPoint));
+ chop_mono_cubic_at_y(tmp2, clip.fTop, tmp);
+ }
+
+ // tmp[3, 4].fY should all be to the below clip.fTop.
+ // Since we can't trust the numerics of the chopper, we force those conditions now
+ tmp[3].fY = clip.fTop;
+ clamp_ge(tmp[4].fY, clip.fTop);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (pts[3].fY > clip.fBottom) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fBottom, tmp);
+ tmp[3].fY = clip.fBottom;
+ clamp_le(tmp[2].fY, clip.fBottom);
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ pts[3] = tmp[3];
+ }
+}
+
+static void chop_mono_cubic_at_x(SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtX(src, x, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fX, x));
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoCubic(const SkPoint src[4], const SkRect& clip) {
+ SkPoint pts[4];
+ bool reverse = sort_increasing_Y(pts, src, 4);
+
+ // are we completely above or below
+ if (pts[3].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_cubic_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[3].fX) {
+ using std::swap;
+ swap(pts[0], pts[3]);
+ swap(pts[1], pts[2]);
+ reverse = !reverse;
+ }
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[3].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[3].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[3].fY, reverse);
+ }
+ return;
+ }
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fLeft, tmp);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[3].fY, reverse);
+
+ // tmp[3, 4].fX should all be to the right of clip.fLeft.
+ // Since we can't trust the numerics of
+ // the chopper, we force those conditions now
+ tmp[3].fX = clip.fLeft;
+ clamp_ge(tmp[4].fX, clip.fLeft);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially to the right
+ if (pts[3].fX > clip.fRight) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fRight, tmp);
+ tmp[3].fX = clip.fRight;
+ clamp_le(tmp[2].fX, clip.fRight);
+
+ this->appendCubic(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[3].fY, tmp[6].fY, reverse);
+ } else { // wholly inside the clip
+ this->appendCubic(pts, reverse);
+ }
+}
+
+static SkRect compute_cubic_bounds(const SkPoint pts[4]) {
+ SkRect r;
+ r.setBounds(pts, 4);
+ return r;
+}
+
+static bool too_big_for_reliable_float_math(const SkRect& r) {
+ // limit set as the largest float value for which we can still reliably compute things like
+ // - chopping at XY extrema
+ // - chopping at Y or X values for clipping
+ //
+ // Current value chosen just by experiment. Larger (and still succeeds) is always better.
+ //
+ const SkScalar limit = 1 << 22;
+ return r.fLeft < -limit || r.fTop < -limit || r.fRight > limit || r.fBottom > limit;
+}
+
+bool SkEdgeClipper::clipCubic(const SkPoint srcPts[4], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ const SkRect bounds = compute_cubic_bounds(srcPts);
+ // check if we're clipped out vertically
+ if (bounds.fBottom > clip.fTop && bounds.fTop < clip.fBottom) {
+ if (too_big_for_reliable_float_math(bounds)) {
+ // can't safely clip the cubic, so we give up and draw a line (which we can safely clip)
+ //
+ // If we rewrote chopcubicat*extrema and chopmonocubic using doubles, we could very
+ // likely always handle the cubic safely, but (it seems) at a big loss in speed, so
+ // we'd only want to take that alternate impl if needed. Perhaps a TODO to try it.
+ //
+ return this->clipLine(srcPts[0], srcPts[3], clip);
+ } else {
+ SkPoint monoY[10];
+ int countY = SkChopCubicAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[10];
+ int countX = SkChopCubicAtXExtrema(&monoY[y * 3], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoCubic(&monoX[x * 3], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEdgeClipper::appendLine(SkPoint p0, SkPoint p1) {
+ *fCurrVerb++ = SkPath::kLine_Verb;
+ fCurrPoint[0] = p0;
+ fCurrPoint[1] = p1;
+ fCurrPoint += 2;
+}
+
+void SkEdgeClipper::appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse) {
+ *fCurrVerb++ = SkPath::kLine_Verb;
+
+ if (reverse) {
+ using std::swap;
+ swap(y0, y1);
+ }
+ fCurrPoint[0].set(x, y0);
+ fCurrPoint[1].set(x, y1);
+ fCurrPoint += 2;
+}
+
+void SkEdgeClipper::appendQuad(const SkPoint pts[3], bool reverse) {
+ *fCurrVerb++ = SkPath::kQuad_Verb;
+
+ if (reverse) {
+ fCurrPoint[0] = pts[2];
+ fCurrPoint[2] = pts[0];
+ } else {
+ fCurrPoint[0] = pts[0];
+ fCurrPoint[2] = pts[2];
+ }
+ fCurrPoint[1] = pts[1];
+ fCurrPoint += 3;
+}
+
+void SkEdgeClipper::appendCubic(const SkPoint pts[4], bool reverse) {
+ *fCurrVerb++ = SkPath::kCubic_Verb;
+
+ if (reverse) {
+ for (int i = 0; i < 4; i++) {
+ fCurrPoint[i] = pts[3 - i];
+ }
+ } else {
+ memcpy(fCurrPoint, pts, 4 * sizeof(SkPoint));
+ }
+ fCurrPoint += 4;
+}
+
+SkPath::Verb SkEdgeClipper::next(SkPoint pts[]) {
+ SkPath::Verb verb = *fCurrVerb;
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ memcpy(pts, fCurrPoint, 2 * sizeof(SkPoint));
+ fCurrPoint += 2;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kQuad_Verb:
+ memcpy(pts, fCurrPoint, 3 * sizeof(SkPoint));
+ fCurrPoint += 3;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kCubic_Verb:
+ memcpy(pts, fCurrPoint, 4 * sizeof(SkPoint));
+ fCurrPoint += 4;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb in quadclippper2 iter");
+ break;
+ }
+ return verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static void assert_monotonic(const SkScalar coord[], int count) {
+ if (coord[0] > coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] >= coord[i * 2]);
+ }
+ } else if (coord[0] < coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] <= coord[i * 2]);
+ }
+ } else {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] == coord[i * 2]);
+ }
+ }
+}
+
+void sk_assert_monotonic_y(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fY, count);
+ }
+}
+
+void sk_assert_monotonic_x(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fX, count);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.h b/gfx/skia/skia/src/core/SkEdgeClipper.h
new file mode 100644
index 0000000000..2718ed774c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEdgeClipper_DEFINED
+#define SkEdgeClipper_DEFINED
+
+#include "include/core/SkPath.h"
+
+/** This is basically an iterator. It is initialized with an edge and a clip,
+ and then next() is called until it returns kDone_Verb.
+ */
+class SkEdgeClipper {
+public:
+ SkEdgeClipper(bool canCullToTheRight) : fCanCullToTheRight(canCullToTheRight) {}
+
+ bool clipLine(SkPoint p0, SkPoint p1, const SkRect& clip);
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+ bool canCullToTheRight() const { return fCanCullToTheRight; }
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+ const bool fCanCullToTheRight;
+
+ enum {
+ kMaxVerbs = 18, // max curvature in X and Y split cubic into 9 pieces, * (line + cubic)
+ kMaxPoints = 54 // 2 lines + 1 cubic require 6 points; times 9 pieces
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendLine(SkPoint p0, SkPoint p1);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEffectPriv.h b/gfx/skia/skia/src/core/SkEffectPriv.h
new file mode 100644
index 0000000000..965435e330
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEffectPriv.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEffectPriv_DEFINED
+#define SkEffectPriv_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+class SkArenaAlloc;
+class SkColorSpace;
+class SkMatrix;
+class SkPaint;
+class SkRasterPipeline;
+
+// Passed to effects that will add stages to rasterpipeline
+struct SkStageRec {
+ SkRasterPipeline* fPipeline;
+ SkArenaAlloc* fAlloc;
+ SkColorType fDstColorType;
+ SkColorSpace* fDstCS; // may be nullptr
+ const SkPaint& fPaint;
+ const SkMatrix* fLocalM; // may be nullptr
+ const SkMatrix fCTM;
+};
+
+#endif // SkEffectPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkEndian.h b/gfx/skia/skia/src/core/SkEndian.h
new file mode 100644
index 0000000000..04b10d72a4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEndian.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEndian_DEFINED
+#define SkEndian_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/** \file SkEndian.h
+
+ Macros and helper functions for handling 16 and 32 bit values in
+ big and little endian formats.
+*/
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+ #error "can't have both LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+ #error "need either LENDIAN or BENDIAN defined"
+#endif
+
+/** Swap the two bytes in the low 16bits of the parameters.
+ e.g. 0x1234 -> 0x3412
+*/
+static inline uint16_t SkEndianSwap16(uint16_t value) {
+ return static_cast<uint16_t>((value >> 8) | ((value & 0xFF) << 8));
+}
+
+template<uint16_t N> struct SkTEndianSwap16 {
+ static const uint16_t value = static_cast<uint16_t>((N >> 8) | ((N & 0xFF) << 8));
+};
+
+/** Vector version of SkEndianSwap16(), which swaps the
+ low two bytes of each value in the array.
+*/
+static inline void SkEndianSwap16s(uint16_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap16(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 4 bytes in a 32bit value.
+ e.g. 0x12345678 -> 0x78563412
+*/
+static constexpr uint32_t SkEndianSwap32(uint32_t value) {
+ return ((value & 0xFF) << 24) |
+ ((value & 0xFF00) << 8) |
+ ((value & 0xFF0000) >> 8) |
+ (value >> 24);
+}
+
+template<uint32_t N> struct SkTEndianSwap32 {
+ static const uint32_t value = ((N & 0xFF) << 24) |
+ ((N & 0xFF00) << 8) |
+ ((N & 0xFF0000) >> 8) |
+ (N >> 24);
+};
+
+/** Vector version of SkEndianSwap32(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap32s(uint32_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap32(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 8 bytes in a 64bit value.
+ e.g. 0x1122334455667788 -> 0x8877665544332211
+*/
+static inline uint64_t SkEndianSwap64(uint64_t value) {
+ return (((value & 0x00000000000000FFULL) << (8*7)) |
+ ((value & 0x000000000000FF00ULL) << (8*5)) |
+ ((value & 0x0000000000FF0000ULL) << (8*3)) |
+ ((value & 0x00000000FF000000ULL) << (8*1)) |
+ ((value & 0x000000FF00000000ULL) >> (8*1)) |
+ ((value & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((value & 0x00FF000000000000ULL) >> (8*5)) |
+ ((value) >> (8*7)));
+}
+template<uint64_t N> struct SkTEndianSwap64 {
+ static const uint64_t value = (((N & 0x00000000000000FFULL) << (8*7)) |
+ ((N & 0x000000000000FF00ULL) << (8*5)) |
+ ((N & 0x0000000000FF0000ULL) << (8*3)) |
+ ((N & 0x00000000FF000000ULL) << (8*1)) |
+ ((N & 0x000000FF00000000ULL) >> (8*1)) |
+ ((N & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((N & 0x00FF000000000000ULL) >> (8*5)) |
+ ((N) >> (8*7)));
+};
+
+/** Vector version of SkEndianSwap64(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap64s(uint64_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap64(*array);
+ array += 1;
+ }
+}
+
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_SwapBE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapBE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapBE64(n) SkEndianSwap64(n)
+ #define SkEndian_SwapLE16(n) (n)
+ #define SkEndian_SwapLE32(n) (n)
+ #define SkEndian_SwapLE64(n) (n)
+
+ #define SkTEndian_SwapBE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapBE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapBE64(n) SkTEndianSwap64<n>::value
+ #define SkTEndian_SwapLE16(n) (n)
+ #define SkTEndian_SwapLE32(n) (n)
+ #define SkTEndian_SwapLE64(n) (n)
+#else // SK_CPU_BENDIAN
+ #define SkEndian_SwapBE16(n) (n)
+ #define SkEndian_SwapBE32(n) (n)
+ #define SkEndian_SwapBE64(n) (n)
+ #define SkEndian_SwapLE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapLE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapLE64(n) SkEndianSwap64(n)
+
+ #define SkTEndian_SwapBE16(n) (n)
+ #define SkTEndian_SwapBE32(n) (n)
+ #define SkTEndian_SwapBE64(n) (n)
+ #define SkTEndian_SwapLE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapLE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapLE64(n) SkTEndianSwap64<n>::value
+#endif
+
+// When a bytestream is embedded in a 32-bit word, how far we need to
+// shift the word to extract each byte from the low 8 bits by anding with 0xff.
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_Byte0Shift 0
+ #define SkEndian_Byte1Shift 8
+ #define SkEndian_Byte2Shift 16
+ #define SkEndian_Byte3Shift 24
+#else // SK_CPU_BENDIAN
+ #define SkEndian_Byte0Shift 24
+ #define SkEndian_Byte1Shift 16
+ #define SkEndian_Byte2Shift 8
+ #define SkEndian_Byte3Shift 0
+#endif
+
+
+#if defined(SK_UINT8_BITFIELD_LENDIAN) && defined(SK_UINT8_BITFIELD_BENDIAN)
+ #error "can't have both bitfield LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_UINT8_BITFIELD_LENDIAN) && !defined(SK_UINT8_BITFIELD_BENDIAN)
+ #ifdef SK_CPU_LENDIAN
+ #define SK_UINT8_BITFIELD_LENDIAN
+ #else
+ #define SK_UINT8_BITFIELD_BENDIAN
+ #endif
+#endif
+
+#ifdef SK_UINT8_BITFIELD_LENDIAN
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f0 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f7 : 1;
+#else
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f7 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f0 : 1;
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEnumerate.h b/gfx/skia/skia/src/core/SkEnumerate.h
new file mode 100644
index 0000000000..801aa54946
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEnumerate.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIota_DEFINED
+#define SkIota_DEFINED
+
+#include <cstddef>
+#include <iterator>
+#include <tuple>
+
+#include "include/private/SkTLogic.h"
+
+// SkEnumerate returns a tuple with an index and the value returned by the iterator. The index always
+// starts at 0.
+template <typename Iter, typename C = skstd::monostate>
+class SkEnumerate {
+ using Result = std::tuple<size_t, decltype(*std::declval<Iter>())>;
+
+ class Iterator {
+ public:
+ using value_type = Result;
+ using difference_type = ptrdiff_t;
+ using pointer = value_type*;
+ using reference = value_type;
+ using iterator_category = std::input_iterator_tag;
+ constexpr Iterator(ptrdiff_t index, Iter it) : fIndex{index}, fIt{it} { }
+ constexpr Iterator(const Iterator&) = default;
+ constexpr Iterator operator++() { ++fIndex; ++fIt; return *this; }
+ constexpr Iterator operator++(int) { Iterator tmp(*this); operator++(); return tmp; }
+ constexpr bool operator==(const Iterator& rhs) const { return fIt == rhs.fIt; }
+ constexpr bool operator!=(const Iterator& rhs) const { return fIt != rhs.fIt; }
+ constexpr reference operator*() { return std::forward_as_tuple(fIndex, *fIt); }
+
+ private:
+ ptrdiff_t fIndex;
+ Iter fIt;
+ };
+
+public:
+ constexpr SkEnumerate(Iter begin, Iter end) : fBegin{begin}, fEnd{end} { }
+ explicit constexpr SkEnumerate(C&& c)
+ : fCollection{std::move(c)}
+ , fBegin{std::begin(fCollection)}
+ , fEnd{std::end(fCollection)} { }
+ constexpr SkEnumerate(const SkEnumerate& that) = default;
+ constexpr SkEnumerate& operator=(const SkEnumerate& that) {
+ fBegin = that.fBegin;
+ fEnd = that.fEnd; return *this;
+ }
+ constexpr Iterator begin() const { return Iterator{0, fBegin}; }
+ constexpr Iterator end() const { return Iterator{fEnd - fBegin, fEnd}; }
+
+private:
+ C fCollection;
+ Iter fBegin;
+ Iter fEnd;
+};
+
+template <typename C, typename Iter = decltype(std::begin(std::declval<C>()))>
+inline constexpr SkEnumerate<Iter> SkMakeEnumerate(C& c) {
+ return SkEnumerate<Iter>{std::begin(c), std::end(c)};
+}
+template <typename C, typename Iter = decltype(std::begin(std::declval<C>()))>
+inline constexpr SkEnumerate<Iter, C> SkMakeEnumerate(C&& c) {
+ return SkEnumerate<Iter, C>{std::forward<C>(c)};
+}
+
+template <class T, std::size_t N, typename Iter = decltype(std::begin(std::declval<T(&)[N]>()))>
+inline constexpr SkEnumerate<Iter> SkMakeEnumerate(T (&a)[N]) {
+ return SkEnumerate<Iter>{std::begin(a), std::end(a)};
+}
+#endif // SkIota_DEFINED
diff --git a/gfx/skia/skia/src/core/SkExchange.h b/gfx/skia/skia/src/core/SkExchange.h
new file mode 100644
index 0000000000..50e2fe983d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkExchange.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkExchange_DEFINED
+#define SkExchange_DEFINED
+
+#include <utility>
+
+namespace skstd {
+
+// std::exchange is in C++14
+template<typename T, typename U = T>
+inline static T exchange(T& obj, U&& new_val) {
+ T old_val = std::move(obj);
+ obj = std::forward<U>(new_val);
+ return old_val;
+}
+
+}
+
+#endif // SkExchange_DEFINED
diff --git a/gfx/skia/skia/src/core/SkExecutor.cpp b/gfx/skia/skia/src/core/SkExecutor.cpp
new file mode 100644
index 0000000000..ce2ded28c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkExecutor.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkExecutor.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkSemaphore.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkMakeUnique.h"
+#include <deque>
+#include <thread>
+
+#if defined(SK_BUILD_FOR_WIN)
+ #include "src/core/SkLeanWindows.h"
+ static int num_cores() {
+ SYSTEM_INFO sysinfo;
+ GetNativeSystemInfo(&sysinfo);
+ return (int)sysinfo.dwNumberOfProcessors;
+ }
+#else
+ #include <unistd.h>
+ static int num_cores() {
+ return (int)sysconf(_SC_NPROCESSORS_ONLN);
+ }
+#endif
+
+SkExecutor::~SkExecutor() {}
+
+// The default default SkExecutor is an SkTrivialExecutor, which just runs the work right away.
+class SkTrivialExecutor final : public SkExecutor {
+ void add(std::function<void(void)> work) override {
+ work();
+ }
+};
+
+static SkExecutor* gDefaultExecutor = nullptr;
+
+void SetDefaultTrivialExecutor() {
+ static SkTrivialExecutor *gTrivial = new SkTrivialExecutor();
+ gDefaultExecutor = gTrivial;
+}
+SkExecutor& SkExecutor::GetDefault() {
+ if (!gDefaultExecutor) {
+ SetDefaultTrivialExecutor();
+ }
+ return *gDefaultExecutor;
+}
+void SkExecutor::SetDefault(SkExecutor* executor) {
+ if (executor) {
+ gDefaultExecutor = executor;
+ } else {
+ SetDefaultTrivialExecutor();
+ }
+}
+
+// We'll always push_back() new work, but pop from the front of deques or the back of SkTArray.
+static inline std::function<void(void)> pop(std::deque<std::function<void(void)>>* list) {
+ std::function<void(void)> fn = std::move(list->front());
+ list->pop_front();
+ return fn;
+}
+static inline std::function<void(void)> pop(SkTArray<std::function<void(void)>>* list) {
+ std::function<void(void)> fn = std::move(list->back());
+ list->pop_back();
+ return fn;
+}
+
+// An SkThreadPool is an executor that runs work on a fixed pool of OS threads.
+template <typename WorkList>
+class SkThreadPool final : public SkExecutor {
+public:
+ explicit SkThreadPool(int threads) {
+ for (int i = 0; i < threads; i++) {
+ fThreads.emplace_back(&Loop, this);
+ }
+ }
+
+ ~SkThreadPool() override {
+ // Signal each thread that it's time to shut down.
+ for (int i = 0; i < fThreads.count(); i++) {
+ this->add(nullptr);
+ }
+ // Wait for each thread to shut down.
+ for (int i = 0; i < fThreads.count(); i++) {
+ fThreads[i].join();
+ }
+ }
+
+ virtual void add(std::function<void(void)> work) override {
+ // Add some work to our pile of work to do.
+ {
+ SkAutoMutexExclusive lock(fWorkLock);
+ fWork.emplace_back(std::move(work));
+ }
+ // Tell the Loop() threads to pick it up.
+ fWorkAvailable.signal(1);
+ }
+
+ virtual void borrow() override {
+ // If there is work waiting, do it.
+ if (fWorkAvailable.try_wait()) {
+ SkAssertResult(this->do_work());
+ }
+ }
+
+private:
+ // This method should be called only when fWorkAvailable indicates there's work to do.
+ bool do_work() {
+ std::function<void(void)> work;
+ {
+ SkAutoMutexExclusive lock(fWorkLock);
+ SkASSERT(!fWork.empty()); // TODO: if (fWork.empty()) { return true; } ?
+ work = pop(&fWork);
+ }
+
+ if (!work) {
+ return false; // This is Loop()'s signal to shut down.
+ }
+
+ work();
+ return true;
+ }
+
+ static void Loop(void* ctx) {
+ auto pool = (SkThreadPool*)ctx;
+ do {
+ pool->fWorkAvailable.wait();
+ } while (pool->do_work());
+ }
+
+ // Both SkMutex and SkSpinlock can work here.
+ using Lock = SkMutex;
+
+ SkTArray<std::thread> fThreads;
+ WorkList fWork;
+ Lock fWorkLock;
+ SkSemaphore fWorkAvailable;
+};
+
+std::unique_ptr<SkExecutor> SkExecutor::MakeFIFOThreadPool(int threads) {
+ using WorkList = std::deque<std::function<void(void)>>;
+ return skstd::make_unique<SkThreadPool<WorkList>>(threads > 0 ? threads : num_cores());
+}
+std::unique_ptr<SkExecutor> SkExecutor::MakeLIFOThreadPool(int threads) {
+ using WorkList = SkTArray<std::function<void(void)>>;
+ return skstd::make_unique<SkThreadPool<WorkList>>(threads > 0 ? threads : num_cores());
+}
diff --git a/gfx/skia/skia/src/core/SkFDot6.h b/gfx/skia/skia/src/core/SkFDot6.h
new file mode 100644
index 0000000000..037a434fd3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFDot6.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFDot6_DEFINED
+#define SkFDot6_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkTo.h"
+
+typedef int32_t SkFDot6;
+
+/* This uses the magic number approach suggested here:
+ * http://stereopsis.com/sree/fpu2006.html and used in
+ * _cairo_fixed_from_double. It does banker's rounding
+ * (i.e. round to nearest even)
+ */
+inline SkFDot6 SkScalarRoundToFDot6(SkScalar x, int shift = 0)
+{
+ union {
+ double fDouble;
+ int32_t fBits[2];
+ } tmp;
+ int fractionalBits = 6 + shift;
+ double magic = (1LL << (52 - (fractionalBits))) * 1.5;
+
+ tmp.fDouble = SkScalarToDouble(x) + magic;
+#ifdef SK_CPU_BENDIAN
+ return tmp.fBits[1];
+#else
+ return tmp.fBits[0];
+#endif
+}
+
+#define SK_FDot6One (64)
+#define SK_FDot6Half (32)
+
+#ifdef SK_DEBUG
+ inline SkFDot6 SkIntToFDot6(int x) {
+ SkASSERT(SkToS16(x) == x);
+ return x << 6;
+ }
+#else
+ #define SkIntToFDot6(x) ((x) << 6)
+#endif
+
+#define SkFDot6Floor(x) ((x) >> 6)
+#define SkFDot6Ceil(x) (((x) + 63) >> 6)
+#define SkFDot6Round(x) (((x) + 32) >> 6)
+
+#define SkFixedToFDot6(x) ((x) >> 10)
+
+inline SkFixed SkFDot6ToFixed(SkFDot6 x) {
+ SkASSERT((SkLeftShift(x, 10) >> 10) == x);
+
+ return SkLeftShift(x, 10);
+}
+
+#define SkScalarToFDot6(x) (SkFDot6)((x) * 64)
+#define SkFDot6ToScalar(x) ((SkScalar)(x) * 0.015625f)
+#define SkFDot6ToFloat SkFDot6ToScalar
+
+inline SkFixed SkFDot6Div(SkFDot6 a, SkFDot6 b) {
+ SkASSERT(b != 0);
+
+ if (SkTFitsIn<int16_t>(a)) {
+ return SkLeftShift(a, 16) / b;
+ } else {
+ return SkFixedDiv(a, b);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFixed15.h b/gfx/skia/skia/src/core/SkFixed15.h
new file mode 100644
index 0000000000..d50337f243
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFixed15.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFixed15_DEFINED
+#define SkFixed15_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// SkFixed15 is a fixed point value that represents values in [0,1] as [0x0000, 0x8000].
+// This mapping allows us to implement most operations in tightly packed 16-bit SIMD,
+// most notably multiplying using Q15 multiplication instructions (and a little fixup).
+
+class SkFixed15 {
+public:
+ SkFixed15() = default;
+
+ SkFixed15(float val) : fVal(val * 32768) { SkASSERT(0.0f <= val && val <= 1.0f); }
+ explicit operator float() const { return fVal * (1/32768.0f); }
+
+ static SkFixed15 Load(uint16_t val) {
+ SkASSERT(val <= 32768);
+ return val;
+ }
+ uint16_t store() const { return fVal; }
+
+ static SkFixed15 FromU8(uint8_t val) {
+ return val*128 + (val>>1) // 32768/255 == 128.50196..., which is very close to 128 + 0.5.
+ + ((val+1)>>8); // All val but 255 are correct. +1 if val == 255 to get 32768.
+ }
+
+ uint8_t to_u8() const {
+ // FromU8() and to_u8() roundtrip all bytes.
+ // There is still much room to tweak this towards the ideal, a rounding scale by 255/32768.
+ return (fVal - (fVal>>8))>>7;
+ }
+
+ SkFixed15 operator +(SkFixed15 o) const { return fVal + o.fVal; }
+ SkFixed15 operator -(SkFixed15 o) const { return fVal - o.fVal; }
+ SkFixed15 operator *(SkFixed15 o) const { return (fVal * o.fVal + (1<<14)) >> 15; }
+ SkFixed15 operator<<(int bits) const { return fVal << bits; }
+ SkFixed15 operator>>(int bits) const { return fVal >> bits; }
+
+ SkFixed15& operator +=(SkFixed15 o) { return (*this = *this + o); }
+ SkFixed15& operator -=(SkFixed15 o) { return (*this = *this - o); }
+ SkFixed15& operator *=(SkFixed15 o) { return (*this = *this * o); }
+ SkFixed15& operator<<=(int bits) { return (*this = *this << bits); }
+ SkFixed15& operator>>=(int bits) { return (*this = *this >> bits); }
+
+ bool operator==(SkFixed15 o) const { return fVal == o.fVal; }
+ bool operator!=(SkFixed15 o) const { return fVal != o.fVal; }
+ bool operator<=(SkFixed15 o) const { return fVal <= o.fVal; }
+ bool operator>=(SkFixed15 o) const { return fVal >= o.fVal; }
+ bool operator< (SkFixed15 o) const { return fVal < o.fVal; }
+ bool operator> (SkFixed15 o) const { return fVal > o.fVal; }
+
+private:
+ SkFixed15(int val) : fVal(val) {}
+
+ uint16_t fVal;
+};
+
+// Notes
+// - SSSE3+ multiply is _mm_abs_epi16(_mm_mulhrs_epi16(x, y));
+// - NEON multipy is vsraq_n_u16(vabsq_s16(vqrdmulhq_s16(x,y)),
+// vandq_s16(x,y), 15);
+// - Conversion to and from float can be done manually with bit masks and float add/subtract,
+// rather than the naive version here involving int<->float conversion and float multiply.
+// - On x86, conversion to float is _mm_sub_ps(_mm_unpacklo_epi16(x, _mm_set1_epi16(0x4380)),
+// _mm_set1_ps(256.0f)). // 0x43800000
+// - On ARM, we can use the vcvtq_n_f32_u32(vmovl_u16(x), 15) to convert to float,
+// and vcvtq_n_u32_f32(..., 15) for the other way around.
+
+#endif//SkFixed15_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFlattenable.cpp b/gfx/skia/skia/src/core/SkFlattenable.cpp
new file mode 100644
index 0000000000..9cff53af08
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFlattenable.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+
+#include <algorithm>
+
+SkNamedFactorySet::SkNamedFactorySet() : fNextAddedFactory(0) {}
+
+uint32_t SkNamedFactorySet::find(SkFlattenable::Factory factory) {
+ uint32_t index = fFactorySet.find(factory);
+ if (index > 0) {
+ return index;
+ }
+ const char* name = SkFlattenable::FactoryToName(factory);
+ if (nullptr == name) {
+ return 0;
+ }
+ *fNames.append() = name;
+ return fFactorySet.add(factory);
+}
+
+const char* SkNamedFactorySet::getNextAddedFactoryName() {
+ if (fNextAddedFactory < fNames.count()) {
+ return fNames[fNextAddedFactory++];
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRefCntSet::~SkRefCntSet() {
+ // call this now, while our decPtr() is sill in scope
+ this->reset();
+}
+
+void SkRefCntSet::incPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->ref();
+}
+
+void SkRefCntSet::decPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+struct Entry {
+ const char* fName;
+ SkFlattenable::Factory fFactory;
+};
+
+struct EntryComparator {
+ bool operator()(const Entry& a, const Entry& b) const {
+ return strcmp(a.fName, b.fName) < 0;
+ }
+ bool operator()(const Entry& a, const char* b) const {
+ return strcmp(a.fName, b) < 0;
+ }
+ bool operator()(const char* a, const Entry& b) const {
+ return strcmp(a, b.fName) < 0;
+ }
+};
+
+int gCount = 0;
+Entry gEntries[128];
+
+} // namespace
+
+void SkFlattenable::Finalize() {
+ std::sort(gEntries, gEntries + gCount, EntryComparator());
+}
+
+void SkFlattenable::Register(const char name[], Factory factory) {
+ SkASSERT(name);
+ SkASSERT(factory);
+ SkASSERT(gCount < (int)SK_ARRAY_COUNT(gEntries));
+
+ gEntries[gCount].fName = name;
+ gEntries[gCount].fFactory = factory;
+ gCount += 1;
+}
+
+SkFlattenable::Factory SkFlattenable::NameToFactory(const char name[]) {
+ RegisterFlattenablesIfNeeded();
+
+ SkASSERT(std::is_sorted(gEntries, gEntries + gCount, EntryComparator()));
+ auto pair = std::equal_range(gEntries, gEntries + gCount, name, EntryComparator());
+ if (pair.first == pair.second) {
+ return nullptr;
+ }
+ return pair.first->fFactory;
+}
+
+const char* SkFlattenable::FactoryToName(Factory fact) {
+ RegisterFlattenablesIfNeeded();
+
+ const Entry* entries = gEntries;
+ for (int i = gCount - 1; i >= 0; --i) {
+ if (entries[i].fFactory == fact) {
+ return entries[i].fName;
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkFlattenable::serialize(const SkSerialProcs* procs) const {
+ SkBinaryWriteBuffer writer;
+ if (procs) {
+ writer.setSerialProcs(*procs);
+ }
+ writer.writeFlattenable(this);
+ size_t size = writer.bytesWritten();
+ auto data = SkData::MakeUninitialized(size);
+ writer.writeToMemory(data->writable_data());
+ return data;
+}
+
+size_t SkFlattenable::serialize(void* memory, size_t memory_size,
+ const SkSerialProcs* procs) const {
+ SkBinaryWriteBuffer writer(memory, memory_size);
+ if (procs) {
+ writer.setSerialProcs(*procs);
+ }
+ writer.writeFlattenable(this);
+ return writer.usingInitialStorage() ? writer.bytesWritten() : 0u;
+}
+
+sk_sp<SkFlattenable> SkFlattenable::Deserialize(SkFlattenable::Type type, const void* data,
+ size_t size, const SkDeserialProcs* procs) {
+ SkReadBuffer buffer(data, size);
+ if (procs) {
+ buffer.setDeserialProcs(*procs);
+ }
+ return sk_sp<SkFlattenable>(buffer.readFlattenable(type));
+}
diff --git a/gfx/skia/skia/src/core/SkFont.cpp b/gfx/skia/skia/src/core/SkFont.cpp
new file mode 100644
index 0000000000..fdfbf66a96
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFont.cpp
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkPaintDefaults.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkUtils.h"
+#include "src/utils/SkUTF.h"
+
+#define kDefault_Size SkPaintDefaults_TextSize
+#define kDefault_Flags SkFont::kBaselineSnap_PrivFlag
+#define kDefault_Edging SkFont::Edging::kAntiAlias
+#define kDefault_Hinting SkPaintDefaults_Hinting
+
+static inline SkScalar valid_size(SkScalar size) {
+ return SkTMax<SkScalar>(0, size);
+}
+
+SkFont::SkFont(sk_sp<SkTypeface> face, SkScalar size, SkScalar scaleX, SkScalar skewX)
+ : fTypeface(std::move(face))
+ , fSize(valid_size(size))
+ , fScaleX(scaleX)
+ , fSkewX(skewX)
+ , fFlags(kDefault_Flags)
+ , fEdging(static_cast<unsigned>(kDefault_Edging))
+ , fHinting(static_cast<unsigned>(kDefault_Hinting))
+{}
+
+SkFont::SkFont(sk_sp<SkTypeface> face, SkScalar size) : SkFont(std::move(face), size, 1, 0) {}
+
+SkFont::SkFont(sk_sp<SkTypeface> face) : SkFont(std::move(face), kDefault_Size, 1, 0) {}
+
+SkFont::SkFont() : SkFont(nullptr, kDefault_Size) {}
+
+bool SkFont::operator==(const SkFont& b) const {
+ return fTypeface.get() == b.fTypeface.get() &&
+ fSize == b.fSize &&
+ fScaleX == b.fScaleX &&
+ fSkewX == b.fSkewX &&
+ fFlags == b.fFlags &&
+ fEdging == b.fEdging &&
+ fHinting == b.fHinting;
+}
+
+void SkFont::dump() const {
+ SkDebugf("typeface %p\n", fTypeface.get());
+ SkDebugf("size %g\n", fSize);
+ SkDebugf("skewx %g\n", fSkewX);
+ SkDebugf("scalex %g\n", fScaleX);
+ SkDebugf("flags 0x%X\n", fFlags);
+ SkDebugf("edging %d\n", (unsigned)fEdging);
+ SkDebugf("hinting %d\n", (unsigned)fHinting);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline uint32_t set_clear_mask(uint32_t bits, bool cond, uint32_t mask) {
+ return cond ? bits | mask : bits & ~mask;
+}
+
+void SkFont::setForceAutoHinting(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kForceAutoHinting_PrivFlag);
+}
+void SkFont::setEmbeddedBitmaps(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kEmbeddedBitmaps_PrivFlag);
+}
+void SkFont::setSubpixel(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kSubpixel_PrivFlag);
+}
+void SkFont::setLinearMetrics(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kLinearMetrics_PrivFlag);
+}
+void SkFont::setEmbolden(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kEmbolden_PrivFlag);
+}
+void SkFont::setBaselineSnap(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kBaselineSnap_PrivFlag);
+}
+void SkFont::setEdging(Edging e) {
+ fEdging = SkToU8(e);
+}
+
+void SkFont::setHinting(SkFontHinting h) {
+ fHinting = SkToU8(h);
+}
+
+void SkFont::setSize(SkScalar size) {
+ fSize = valid_size(size);
+}
+void SkFont::setScaleX(SkScalar scale) {
+ fScaleX = scale;
+}
+void SkFont::setSkewX(SkScalar skew) {
+ fSkewX = skew;
+}
+
+SkFont SkFont::makeWithSize(SkScalar newSize) const {
+ SkFont font = *this;
+ font.setSize(newSize);
+ return font;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkFont::setupForAsPaths(SkPaint* paint) {
+ constexpr uint32_t flagsToIgnore = kEmbeddedBitmaps_PrivFlag |
+ kForceAutoHinting_PrivFlag;
+
+ fFlags = (fFlags & ~flagsToIgnore) | kSubpixel_PrivFlag;
+ this->setHinting(SkFontHinting::kNone);
+
+ if (this->getEdging() == Edging::kSubpixelAntiAlias) {
+ this->setEdging(Edging::kAntiAlias);
+ }
+
+ if (paint) {
+ paint->setStyle(SkPaint::kFill_Style);
+ paint->setPathEffect(nullptr);
+ }
+ SkScalar textSize = fSize;
+ this->setSize(SkIntToScalar(SkFontPriv::kCanonicalTextSizeForPaths));
+ return textSize / SkFontPriv::kCanonicalTextSizeForPaths;
+}
+
+bool SkFont::hasSomeAntiAliasing() const {
+ Edging edging = this->getEdging();
+ return edging == SkFont::Edging::kAntiAlias
+ || edging == SkFont::Edging::kSubpixelAntiAlias;
+}
+
+SkGlyphID SkFont::unicharToGlyph(SkUnichar uni) const {
+ return this->getTypefaceOrDefault()->unicharToGlyph(uni);
+}
+
+void SkFont::unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ this->getTypefaceOrDefault()->unicharsToGlyphs(uni, count, glyphs);
+}
+
+class SkConvertToUTF32 {
+public:
+ SkConvertToUTF32() {}
+
+ const SkUnichar* convert(const void* text, size_t byteLength, SkTextEncoding encoding) {
+ const SkUnichar* uni;
+ switch (encoding) {
+ case SkTextEncoding::kUTF8: {
+ uni = fStorage.reset(byteLength);
+ const char* ptr = (const char*)text;
+ const char* end = ptr + byteLength;
+ for (int i = 0; ptr < end; ++i) {
+ fStorage[i] = SkUTF::NextUTF8(&ptr, end);
+ }
+ } break;
+ case SkTextEncoding::kUTF16: {
+ uni = fStorage.reset(byteLength);
+ const uint16_t* ptr = (const uint16_t*)text;
+ const uint16_t* end = ptr + (byteLength >> 1);
+ for (int i = 0; ptr < end; ++i) {
+ fStorage[i] = SkUTF::NextUTF16(&ptr, end);
+ }
+ } break;
+ case SkTextEncoding::kUTF32:
+ uni = (const SkUnichar*)text;
+ break;
+ default:
+ SK_ABORT("unexpected enum");
+ }
+ return uni;
+ }
+
+private:
+ SkAutoSTMalloc<256, SkUnichar> fStorage;
+};
+
+int SkFont::textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const {
+ if (0 == byteLength) {
+ return 0;
+ }
+
+ SkASSERT(text);
+
+ int count = SkFontPriv::CountTextElements(text, byteLength, encoding);
+ if (!glyphs || count > maxGlyphCount) {
+ return count;
+ }
+
+ if (encoding == SkTextEncoding::kGlyphID) {
+ memcpy(glyphs, text, count << 1);
+ return count;
+ }
+
+ SkConvertToUTF32 storage;
+ const SkUnichar* uni = storage.convert(text, byteLength, encoding);
+
+ this->getTypefaceOrDefault()->unicharsToGlyphs(uni, count, glyphs);
+ return count;
+}
+
+SkScalar SkFont::measureText(const void* text, size_t length, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const {
+
+ SkAutoToGlyphs atg(*this, text, length, encoding);
+ const int glyphCount = atg.count();
+ if (glyphCount == 0) {
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return 0;
+ }
+ const SkGlyphID* glyphIDs = atg.glyphs();
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeCanonicalized(*this, paint);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkMakeSpan(glyphIDs, glyphCount));
+
+ SkScalar width = 0;
+ if (bounds) {
+ *bounds = glyphs[0]->rect();
+ width = glyphs[0]->advanceX();
+ for (int i = 1; i < glyphCount; ++i) {
+ SkRect r = glyphs[i]->rect();
+ r.offset(width, 0);
+ bounds->join(r);
+ width += glyphs[i]->advanceX();
+ }
+ } else {
+ for (auto glyph : glyphs) {
+ width += glyph->advanceX();
+ }
+ }
+
+ const SkScalar scale = strikeSpec.strikeToSourceRatio();
+ if (scale != 1) {
+ width *= scale;
+ if (bounds) {
+ bounds->fLeft *= scale;
+ bounds->fTop *= scale;
+ bounds->fRight *= scale;
+ bounds->fBottom *= scale;
+ }
+ }
+
+ return width;
+}
+
+void SkFont::getWidthsBounds(const SkGlyphID glyphIDs[],
+ int count,
+ SkScalar widths[],
+ SkRect bounds[],
+ const SkPaint* paint) const {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeCanonicalized(*this, paint);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkMakeSpan(glyphIDs, count));
+
+ SkScalar scale = strikeSpec.strikeToSourceRatio();
+
+ if (bounds) {
+ SkMatrix scaleMat = SkMatrix::MakeScale(scale);
+ SkRect* cursor = bounds;
+ for (auto glyph : glyphs) {
+ scaleMat.mapRectScaleTranslate(cursor++, glyph->rect());
+ }
+ }
+
+ if (widths) {
+ SkScalar* cursor = widths;
+ for (auto glyph : glyphs) {
+ *cursor++ = glyph->advanceX() * scale;
+ }
+ }
+}
+
+void SkFont::getPos(const SkGlyphID glyphIDs[], int count, SkPoint pos[], SkPoint origin) const {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeCanonicalized(*this);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkMakeSpan(glyphIDs, count));
+
+ SkPoint sum = origin;
+ for (auto glyph : glyphs) {
+ *pos++ = sum;
+ sum += glyph->advanceVector() * strikeSpec.strikeToSourceRatio();
+ }
+}
+
+void SkFont::getXPos(
+ const SkGlyphID glyphIDs[], int count, SkScalar xpos[], SkScalar origin) const {
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeCanonicalized(*this);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkMakeSpan(glyphIDs, count));
+
+ SkScalar loc = origin;
+ SkScalar* cursor = xpos;
+ for (auto glyph : glyphs) {
+ *cursor++ = loc;
+ loc += glyph->advanceX() * strikeSpec.strikeToSourceRatio();
+ }
+}
+
+void SkFont::getPaths(const SkGlyphID glyphIDs[], int count,
+ void (*proc)(const SkPath*, const SkMatrix&, void*), void* ctx) const {
+ SkFont font(*this);
+ SkScalar scale = font.setupForAsPaths(nullptr);
+ const SkMatrix mx = SkMatrix::MakeScale(scale);
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(font);
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = paths.glyphs(SkMakeSpan(glyphIDs, count));
+
+ for (auto glyph : glyphs) {
+ proc(glyph->path(), mx, ctx);
+ }
+}
+
+bool SkFont::getPath(SkGlyphID glyphID, SkPath* path) const {
+ struct Pair {
+ SkPath* fPath;
+ bool fWasSet;
+ } pair = { path, false };
+
+ this->getPaths(&glyphID, 1, [](const SkPath* orig, const SkMatrix& mx, void* ctx) {
+ Pair* pair = static_cast<Pair*>(ctx);
+ if (orig) {
+ orig->transform(mx, pair->fPath);
+ pair->fWasSet = true;
+ }
+ }, &pair);
+ return pair.fWasSet;
+}
+
+SkScalar SkFont::getMetrics(SkFontMetrics* metrics) const {
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeCanonicalized(*this, nullptr);
+
+ SkFontMetrics storage;
+ if (nullptr == metrics) {
+ metrics = &storage;
+ }
+
+ auto cache = strikeSpec.findOrCreateExclusiveStrike();
+ *metrics = cache->getFontMetrics();
+
+ if (strikeSpec.strikeToSourceRatio() != 1) {
+ SkFontPriv::ScaleFontMetrics(metrics, strikeSpec.strikeToSourceRatio());
+ }
+ return metrics->fDescent - metrics->fAscent + metrics->fLeading;
+}
+
+SkTypeface* SkFont::getTypefaceOrDefault() const {
+ return fTypeface ? fTypeface.get() : SkTypeface::GetDefaultTypeface();
+}
+
+sk_sp<SkTypeface> SkFont::refTypefaceOrDefault() const {
+ return fTypeface ? fTypeface : SkTypeface::MakeDefault();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkFontPriv::ScaleFontMetrics(SkFontMetrics* metrics, SkScalar scale) {
+ metrics->fTop *= scale;
+ metrics->fAscent *= scale;
+ metrics->fDescent *= scale;
+ metrics->fBottom *= scale;
+ metrics->fLeading *= scale;
+ metrics->fAvgCharWidth *= scale;
+ metrics->fMaxCharWidth *= scale;
+ metrics->fXMin *= scale;
+ metrics->fXMax *= scale;
+ metrics->fXHeight *= scale;
+ metrics->fCapHeight *= scale;
+ metrics->fUnderlineThickness *= scale;
+ metrics->fUnderlinePosition *= scale;
+ metrics->fStrikeoutThickness *= scale;
+ metrics->fStrikeoutPosition *= scale;
+}
+
+SkRect SkFontPriv::GetFontBounds(const SkFont& font) {
+ SkMatrix m;
+ m.setScale(font.getSize() * font.getScaleX(), font.getSize());
+ m.postSkew(font.getSkewX(), 0);
+
+ SkTypeface* typeface = font.getTypefaceOrDefault();
+
+ SkRect bounds;
+ m.mapRect(&bounds, typeface->getBounds());
+ return bounds;
+}
+
+int SkFontPriv::CountTextElements(const void* text, size_t byteLength, SkTextEncoding encoding) {
+ switch (encoding) {
+ case SkTextEncoding::kUTF8:
+ return SkUTF::CountUTF8(reinterpret_cast<const char*>(text), byteLength);
+ case SkTextEncoding::kUTF16:
+ return SkUTF::CountUTF16(reinterpret_cast<const uint16_t*>(text), byteLength);
+ case SkTextEncoding::kUTF32:
+ return byteLength >> 2;
+ case SkTextEncoding::kGlyphID:
+ return byteLength >> 1;
+ }
+ SkASSERT(false);
+ return 0;
+}
+
+void SkFontPriv::GlyphsToUnichars(const SkFont& font, const SkGlyphID glyphs[], int count,
+ SkUnichar text[]) {
+ if (count <= 0) {
+ return;
+ }
+
+ auto typeface = font.getTypefaceOrDefault();
+ const unsigned numGlyphsInTypeface = typeface->countGlyphs();
+ SkAutoTArray<SkUnichar> unichars(numGlyphsInTypeface);
+ typeface->getGlyphToUnicodeMap(unichars.get());
+
+ for (int i = 0; i < count; ++i) {
+ unsigned id = glyphs[i];
+ text[i] = (id < numGlyphsInTypeface) ? unichars[id] : 0xFFFD;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+// packed int at the beginning of the serialized font:
+//
+// control_bits:8 size_as_byte:8 flags:12 edging:2 hinting:2
+
+enum {
+ kSize_Is_Byte_Bit = 1 << 31,
+ kHas_ScaleX_Bit = 1 << 30,
+ kHas_SkewX_Bit = 1 << 29,
+ kHas_Typeface_Bit = 1 << 28,
+
+ kShift_for_Size = 16,
+ kMask_For_Size = 0xFF,
+
+ kShift_For_Flags = 4,
+ kMask_For_Flags = 0xFFF,
+
+ kShift_For_Edging = 2,
+ kMask_For_Edging = 0x3,
+
+ kShift_For_Hinting = 0,
+ kMask_For_Hinting = 0x3
+};
+
+static bool scalar_is_byte(SkScalar x) {
+ int ix = (int)x;
+ return ix == x && ix >= 0 && ix <= kMask_For_Size;
+}
+
+void SkFontPriv::Flatten(const SkFont& font, SkWriteBuffer& buffer) {
+ SkASSERT(font.fFlags <= SkFont::kAllFlags);
+ SkASSERT((font.fFlags & ~kMask_For_Flags) == 0);
+ SkASSERT((font.fEdging & ~kMask_For_Edging) == 0);
+ SkASSERT((font.fHinting & ~kMask_For_Hinting) == 0);
+
+ uint32_t packed = 0;
+ packed |= font.fFlags << kShift_For_Flags;
+ packed |= font.fEdging << kShift_For_Edging;
+ packed |= font.fHinting << kShift_For_Hinting;
+
+ if (scalar_is_byte(font.fSize)) {
+ packed |= kSize_Is_Byte_Bit;
+ packed |= (int)font.fSize << kShift_for_Size;
+ }
+ if (font.fScaleX != 1) {
+ packed |= kHas_ScaleX_Bit;
+ }
+ if (font.fSkewX != 0) {
+ packed |= kHas_SkewX_Bit;
+ }
+ if (font.fTypeface) {
+ packed |= kHas_Typeface_Bit;
+ }
+
+ buffer.write32(packed);
+ if (!(packed & kSize_Is_Byte_Bit)) {
+ buffer.writeScalar(font.fSize);
+ }
+ if (packed & kHas_ScaleX_Bit) {
+ buffer.writeScalar(font.fScaleX);
+ }
+ if (packed & kHas_SkewX_Bit) {
+ buffer.writeScalar(font.fSkewX);
+ }
+ if (packed & kHas_Typeface_Bit) {
+ buffer.writeTypeface(font.fTypeface.get());
+ }
+}
+
+bool SkFontPriv::Unflatten(SkFont* font, SkReadBuffer& buffer) {
+ const uint32_t packed = buffer.read32();
+
+ if (packed & kSize_Is_Byte_Bit) {
+ font->fSize = (packed >> kShift_for_Size) & kMask_For_Size;
+ } else {
+ font->fSize = buffer.readScalar();
+ }
+ if (packed & kHas_ScaleX_Bit) {
+ font->fScaleX = buffer.readScalar();
+ }
+ if (packed & kHas_SkewX_Bit) {
+ font->fSkewX = buffer.readScalar();
+ }
+ if (packed & kHas_Typeface_Bit) {
+ font->fTypeface = buffer.readTypeface();
+ }
+
+ SkASSERT(SkFont::kAllFlags <= kMask_For_Flags);
+ // we & with kAllFlags, to clear out any unknown flag bits
+ font->fFlags = SkToU8((packed >> kShift_For_Flags) & SkFont::kAllFlags);
+
+ unsigned edging = (packed >> kShift_For_Edging) & kMask_For_Edging;
+ if (edging > (unsigned)SkFont::Edging::kSubpixelAntiAlias) {
+ edging = 0;
+ }
+ font->fEdging = SkToU8(edging);
+
+ unsigned hinting = (packed >> kShift_For_Hinting) & kMask_For_Hinting;
+ if (hinting > (unsigned)SkFontHinting::kFull) {
+ hinting = 0;
+ }
+ font->fHinting = SkToU8(hinting);
+
+ return buffer.isValid();
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.cpp b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
new file mode 100644
index 0000000000..c47781e450
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+
+enum {
+ kInvalid = 0x00,
+
+ // these must match the sfnt 'name' enums
+ kFontFamilyName = 0x01,
+ kFullName = 0x04,
+ kPostscriptName = 0x06,
+
+ // These count backwards from 0xFF, so as not to collide with the SFNT
+ // defines for names in its 'name' table.
+ kFontAxes = 0xFB,
+ kFontAxes_bad = 0xFC, // Broken negative axes, remove when MIN_PICTURE_VERSION > 62.
+ kFontIndex = 0xFD,
+ kSentinel = 0xFF,
+};
+
+SkFontDescriptor::SkFontDescriptor() { }
+
+static bool SK_WARN_UNUSED_RESULT read_string(SkStream* stream, SkString* string) {
+ size_t length;
+ if (!stream->readPackedUInt(&length)) { return false; }
+ if (length > 0) {
+ string->resize(length);
+ if (stream->read(string->writable_str(), length) != length) { return false; }
+ }
+ return true;
+}
+
+static bool write_string(SkWStream* stream, const SkString& string, uint32_t id) {
+ if (string.isEmpty()) { return true; }
+ return stream->writePackedUInt(id) &&
+ stream->writePackedUInt(string.size()) &&
+ stream->write(string.c_str(), string.size());
+}
+
+static bool write_uint(SkWStream* stream, size_t n, uint32_t id) {
+ return stream->writePackedUInt(id) &&
+ stream->writePackedUInt(n);
+}
+
+static size_t SK_WARN_UNUSED_RESULT read_id(SkStream* stream) {
+ size_t i;
+ if (!stream->readPackedUInt(&i)) { return kInvalid; }
+ return i;
+}
+
+bool SkFontDescriptor::Deserialize(SkStream* stream, SkFontDescriptor* result) {
+ size_t styleBits;
+ if (!stream->readPackedUInt(&styleBits)) { return false; }
+ result->fStyle = SkFontStyle((styleBits >> 16) & 0xFFFF,
+ (styleBits >> 8 ) & 0xFF,
+ static_cast<SkFontStyle::Slant>(styleBits & 0xFF));
+
+ SkAutoSTMalloc<4, SkFixed> axis;
+ size_t axisCount = 0;
+ size_t index = 0;
+ for (size_t id; (id = read_id(stream)) != kSentinel;) {
+ switch (id) {
+ case kFontFamilyName:
+ if (!read_string(stream, &result->fFamilyName)) { return false; }
+ break;
+ case kFullName:
+ if (!read_string(stream, &result->fFullName)) { return false; }
+ break;
+ case kPostscriptName:
+ if (!read_string(stream, &result->fPostscriptName)) { return false; }
+ break;
+ case kFontAxes:
+ if (!stream->readPackedUInt(&axisCount)) { return false; }
+ axis.reset(axisCount);
+ for (size_t i = 0; i < axisCount; ++i) {
+ if (!stream->readS32(&axis[i])) { return false; }
+ }
+ break;
+ case kFontAxes_bad:
+ if (!stream->readPackedUInt(&axisCount)) { return false; }
+ axis.reset(axisCount);
+ for (size_t i = 0; i < axisCount; ++i) {
+ size_t packedAxis;
+ if (!stream->readPackedUInt(&packedAxis)) { return false; }
+ axis[i] = packedAxis;
+ }
+ break;
+ case kFontIndex:
+ if (!stream->readPackedUInt(&index)) { return false; }
+ break;
+ default:
+ SkDEBUGFAIL("Unknown id used by a font descriptor");
+ return false;
+ }
+ }
+
+ size_t length;
+ if (!stream->readPackedUInt(&length)) { return false; }
+ if (length > 0) {
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+ if (stream->read(data->writable_data(), length) != length) {
+ SkDEBUGFAIL("Could not read font data");
+ return false;
+ }
+ result->fFontData = skstd::make_unique<SkFontData>(
+ SkMemoryStream::Make(std::move(data)), index, axis, axisCount);
+ }
+ return true;
+}
+
+void SkFontDescriptor::serialize(SkWStream* stream) const {
+ uint32_t styleBits = (fStyle.weight() << 16) | (fStyle.width() << 8) | (fStyle.slant());
+ stream->writePackedUInt(styleBits);
+
+ write_string(stream, fFamilyName, kFontFamilyName);
+ write_string(stream, fFullName, kFullName);
+ write_string(stream, fPostscriptName, kPostscriptName);
+ if (fFontData.get()) {
+ if (fFontData->getIndex()) {
+ write_uint(stream, fFontData->getIndex(), kFontIndex);
+ }
+ if (fFontData->getAxisCount()) {
+ write_uint(stream, fFontData->getAxisCount(), kFontAxes);
+ for (int i = 0; i < fFontData->getAxisCount(); ++i) {
+ stream->write32(fFontData->getAxis()[i]);
+ }
+ }
+ }
+
+ stream->writePackedUInt(kSentinel);
+
+ if (fFontData.get() && fFontData->hasStream()) {
+ std::unique_ptr<SkStreamAsset> fontStream = fFontData->detachStream();
+ size_t length = fontStream->getLength();
+ stream->writePackedUInt(length);
+ stream->writeStream(fontStream.get(), length);
+ } else {
+ stream->writePackedUInt(0);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.h b/gfx/skia/skia/src/core/SkFontDescriptor.h
new file mode 100644
index 0000000000..8f16996aad
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontDescriptor_DEFINED
+#define SkFontDescriptor_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkFontData {
+public:
+ /** Makes a copy of the data in 'axis'. */
+ SkFontData(std::unique_ptr<SkStreamAsset> stream, int index, const SkFixed axis[],int axisCount)
+ : fStream(std::move(stream)), fIndex(index), fAxisCount(axisCount), fAxis(axisCount)
+ {
+ for (int i = 0; i < axisCount; ++i) {
+ fAxis[i] = axis[i];
+ }
+ }
+ SkFontData(const SkFontData& that)
+ : fStream(that.fStream->duplicate())
+ , fIndex(that.fIndex)
+ , fAxisCount(that.fAxisCount)
+ , fAxis(fAxisCount)
+ {
+ for (int i = 0; i < fAxisCount; ++i) {
+ fAxis[i] = that.fAxis[i];
+ }
+ }
+ bool hasStream() const { return fStream.get() != nullptr; }
+ std::unique_ptr<SkStreamAsset> detachStream() { return std::move(fStream); }
+ SkStreamAsset* getStream() { return fStream.get(); }
+ SkStreamAsset const* getStream() const { return fStream.get(); }
+ int getIndex() const { return fIndex; }
+ int getAxisCount() const { return fAxisCount; }
+ const SkFixed* getAxis() const { return fAxis.get(); }
+
+private:
+ std::unique_ptr<SkStreamAsset> fStream;
+ int fIndex;
+ int fAxisCount;
+ SkAutoSTMalloc<4, SkFixed> fAxis;
+};
+
+class SkFontDescriptor : SkNoncopyable {
+public:
+ SkFontDescriptor();
+ // Does not affect ownership of SkStream.
+ static bool Deserialize(SkStream*, SkFontDescriptor* result);
+
+ void serialize(SkWStream*) const;
+
+ SkFontStyle getStyle() const { return fStyle; }
+ void setStyle(SkFontStyle style) { fStyle = style; }
+
+ const char* getFamilyName() const { return fFamilyName.c_str(); }
+ const char* getFullName() const { return fFullName.c_str(); }
+ const char* getPostscriptName() const { return fPostscriptName.c_str(); }
+ bool hasFontData() const { return fFontData.get() != nullptr; }
+ std::unique_ptr<SkFontData> detachFontData() { return std::move(fFontData); }
+
+ void setFamilyName(const char* name) { fFamilyName.set(name); }
+ void setFullName(const char* name) { fFullName.set(name); }
+ void setPostscriptName(const char* name) { fPostscriptName.set(name); }
+ /** Set the font data only if it is necessary for serialization. */
+ void setFontData(std::unique_ptr<SkFontData> data) { fFontData = std::move(data); }
+
+private:
+ SkString fFamilyName;
+ SkString fFullName;
+ SkString fPostscriptName;
+ std::unique_ptr<SkFontData> fFontData;
+
+ SkFontStyle fStyle;
+};
+
+#endif // SkFontDescriptor_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontLCDConfig.cpp b/gfx/skia/skia/src/core/SkFontLCDConfig.cpp
new file mode 100644
index 0000000000..f902e2c7ae
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontLCDConfig.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontLCDConfig.h"
+
+static SkFontLCDConfig::LCDOrientation gLCDOrientation = SkFontLCDConfig::kHorizontal_LCDOrientation;
+static SkFontLCDConfig::LCDOrder gLCDOrder = SkFontLCDConfig::kRGB_LCDOrder;
+
+SkFontLCDConfig::LCDOrientation SkFontLCDConfig::GetSubpixelOrientation() {
+ return gLCDOrientation;
+}
+
+void SkFontLCDConfig::SetSubpixelOrientation(LCDOrientation orientation) {
+ gLCDOrientation = orientation;
+}
+
+SkFontLCDConfig::LCDOrder SkFontLCDConfig::GetSubpixelOrder() {
+ return gLCDOrder;
+}
+
+void SkFontLCDConfig::SetSubpixelOrder(LCDOrder order) {
+ gLCDOrder = order;
+}
diff --git a/gfx/skia/skia/src/core/SkFontMgr.cpp b/gfx/skia/skia/src/core/SkFontMgr.cpp
new file mode 100644
index 0000000000..d42b07d384
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMgr.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkFontDescriptor.h"
+
+class SkFontStyle;
+class SkTypeface;
+
+class SkEmptyFontStyleSet : public SkFontStyleSet {
+public:
+ int count() override { return 0; }
+ void getStyle(int, SkFontStyle*, SkString*) override {
+ SkDEBUGFAIL("SkFontStyleSet::getStyle called on empty set");
+ }
+ SkTypeface* createTypeface(int index) override {
+ SkDEBUGFAIL("SkFontStyleSet::createTypeface called on empty set");
+ return nullptr;
+ }
+ SkTypeface* matchStyle(const SkFontStyle&) override {
+ return nullptr;
+ }
+};
+
+SkFontStyleSet* SkFontStyleSet::CreateEmpty() { return new SkEmptyFontStyleSet; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkEmptyFontMgr : public SkFontMgr {
+protected:
+ int onCountFamilies() const override {
+ return 0;
+ }
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkDEBUGFAIL("onGetFamilyName called with bad index");
+ }
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkDEBUGFAIL("onCreateStyleSet called with bad index");
+ return nullptr;
+ }
+ SkFontStyleSet* onMatchFamily(const char[]) const override {
+ return SkFontStyleSet::CreateEmpty();
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char[], const SkFontStyle&) const override {
+ return nullptr;
+ }
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+ SkTypeface* onMatchFaceStyle(const SkTypeface*, const SkFontStyle&) const override {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData>) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromFile(const char[], int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char [], SkFontStyle) const override {
+ return nullptr;
+ }
+};
+
+static SkFontStyleSet* emptyOnNull(SkFontStyleSet* fsset) {
+ if (nullptr == fsset) {
+ fsset = SkFontStyleSet::CreateEmpty();
+ }
+ return fsset;
+}
+
+int SkFontMgr::countFamilies() const {
+ return this->onCountFamilies();
+}
+
+void SkFontMgr::getFamilyName(int index, SkString* familyName) const {
+ this->onGetFamilyName(index, familyName);
+}
+
+SkFontStyleSet* SkFontMgr::createStyleSet(int index) const {
+ return emptyOnNull(this->onCreateStyleSet(index));
+}
+
+SkFontStyleSet* SkFontMgr::matchFamily(const char familyName[]) const {
+ return emptyOnNull(this->onMatchFamily(familyName));
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyle(const char familyName[],
+ const SkFontStyle& fs) const {
+ return this->onMatchFamilyStyle(familyName, fs);
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyleCharacter(const char familyName[], const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ return this->onMatchFamilyStyleCharacter(familyName, style, bcp47, bcp47Count, character);
+}
+
+SkTypeface* SkFontMgr::matchFaceStyle(const SkTypeface* face,
+ const SkFontStyle& fs) const {
+ return this->onMatchFaceStyle(face, fs);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ return this->onMakeFromData(std::move(data), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onMakeFromStreamIndex(std::move(stream), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onMakeFromStreamArgs(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromFontData(std::unique_ptr<SkFontData> data) const {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ return this->onMakeFromFontData(std::move(data));
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromFile(const char path[], int ttcIndex) const {
+ if (nullptr == path) {
+ return nullptr;
+ }
+ return this->onMakeFromFile(path, ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::legacyMakeTypeface(const char familyName[], SkFontStyle style) const {
+ return this->onLegacyMakeTypeface(familyName, style);
+}
+
+sk_sp<SkTypeface> SkFontMgr::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ return this->makeFromStream(std::move(stream), args.getCollectionIndex());
+}
+sk_sp<SkTypeface> SkFontMgr::onMakeFromFontData(std::unique_ptr<SkFontData> data) const {
+ return this->makeFromStream(data->detachStream(), data->getIndex());
+}
+
+// A global function pointer that's not declared, but can be overriden at startup by test tools.
+sk_sp<SkFontMgr> (*gSkFontMgr_DefaultFactory)() = nullptr;
+
+sk_sp<SkFontMgr> SkFontMgr::RefDefault() {
+ static SkOnce once;
+ static sk_sp<SkFontMgr> singleton;
+
+ once([]{
+ sk_sp<SkFontMgr> fm = gSkFontMgr_DefaultFactory ? gSkFontMgr_DefaultFactory()
+ : SkFontMgr::Factory();
+ singleton = fm ? std::move(fm) : sk_make_sp<SkEmptyFontMgr>();
+ });
+ return singleton;
+}
+
+/**
+* Width has the greatest priority.
+* If the value of pattern.width is 5 (normal) or less,
+* narrower width values are checked first, then wider values.
+* If the value of pattern.width is greater than 5 (normal),
+* wider values are checked first, followed by narrower values.
+*
+* Italic/Oblique has the next highest priority.
+* If italic requested and there is some italic font, use it.
+* If oblique requested and there is some oblique font, use it.
+* If italic requested and there is some oblique font, use it.
+* If oblique requested and there is some italic font, use it.
+*
+* Exact match.
+* If pattern.weight < 400, weights below pattern.weight are checked
+* in descending order followed by weights above pattern.weight
+* in ascending order until a match is found.
+* If pattern.weight > 500, weights above pattern.weight are checked
+* in ascending order followed by weights below pattern.weight
+* in descending order until a match is found.
+* If pattern.weight is 400, 500 is checked first
+* and then the rule for pattern.weight < 400 is used.
+* If pattern.weight is 500, 400 is checked first
+* and then the rule for pattern.weight < 400 is used.
+*/
+SkTypeface* SkFontStyleSet::matchStyleCSS3(const SkFontStyle& pattern) {
+ int count = this->count();
+ if (0 == count) {
+ return nullptr;
+ }
+
+ struct Score {
+ int score;
+ int index;
+ Score& operator +=(int rhs) { this->score += rhs; return *this; }
+ Score& operator <<=(int rhs) { this->score <<= rhs; return *this; }
+ bool operator <(const Score& that) { return this->score < that.score; }
+ };
+
+ Score maxScore = { 0, 0 };
+ for (int i = 0; i < count; ++i) {
+ SkFontStyle current;
+ this->getStyle(i, &current, nullptr);
+ Score currentScore = { 0, i };
+
+ // CSS stretch / SkFontStyle::Width
+ // Takes priority over everything else.
+ if (pattern.width() <= SkFontStyle::kNormal_Width) {
+ if (current.width() <= pattern.width()) {
+ currentScore += 10 - pattern.width() + current.width();
+ } else {
+ currentScore += 10 - current.width();
+ }
+ } else {
+ if (current.width() > pattern.width()) {
+ currentScore += 10 + pattern.width() - current.width();
+ } else {
+ currentScore += current.width();
+ }
+ }
+ currentScore <<= 8;
+
+ // CSS style (normal, italic, oblique) / SkFontStyle::Slant (upright, italic, oblique)
+ // Takes priority over all valid weights.
+ static_assert(SkFontStyle::kUpright_Slant == 0 &&
+ SkFontStyle::kItalic_Slant == 1 &&
+ SkFontStyle::kOblique_Slant == 2,
+ "SkFontStyle::Slant values not as required.");
+ SkASSERT(0 <= pattern.slant() && pattern.slant() <= 2 &&
+ 0 <= current.slant() && current.slant() <= 2);
+ static const int score[3][3] = {
+ /* Upright Italic Oblique [current]*/
+ /* Upright */ { 3 , 1 , 2 },
+ /* Italic */ { 1 , 3 , 2 },
+ /* Oblique */ { 1 , 2 , 3 },
+ /* [pattern] */
+ };
+ currentScore += score[pattern.slant()][current.slant()];
+ currentScore <<= 8;
+
+ // Synthetics (weight, style) [no stretch synthetic?]
+
+ // CSS weight / SkFontStyle::Weight
+ // The 'closer' to the target weight, the higher the score.
+ // 1000 is the 'heaviest' recognized weight
+ if (pattern.weight() == current.weight()) {
+ currentScore += 1000;
+ // less than 400 prefer lighter weights
+ } else if (pattern.weight() < 400) {
+ if (current.weight() <= pattern.weight()) {
+ currentScore += 1000 - pattern.weight() + current.weight();
+ } else {
+ currentScore += 1000 - current.weight();
+ }
+ // between 400 and 500 prefer heavier up to 500, then lighter weights
+ } else if (pattern.weight() <= 500) {
+ if (current.weight() >= pattern.weight() && current.weight() <= 500) {
+ currentScore += 1000 + pattern.weight() - current.weight();
+ } else if (current.weight() <= pattern.weight()) {
+ currentScore += 500 + current.weight();
+ } else {
+ currentScore += 1000 - current.weight();
+ }
+ // greater than 500 prefer heavier weights
+ } else if (pattern.weight() > 500) {
+ if (current.weight() > pattern.weight()) {
+ currentScore += 1000 + pattern.weight() - current.weight();
+ } else {
+ currentScore += current.weight();
+ }
+ }
+
+ if (maxScore < currentScore) {
+ maxScore = currentScore;
+ }
+ }
+
+ return this->createTypeface(maxScore.index);
+}
diff --git a/gfx/skia/skia/src/core/SkFontMgrPriv.h b/gfx/skia/skia/src/core/SkFontMgrPriv.h
new file mode 100644
index 0000000000..40cf264037
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMgrPriv.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkFontMgrPriv_DEFINED
+#define SkFontMgrPriv_DEFINED
+
+#include "include/core/SkFontMgr.h"
+
+extern sk_sp<SkFontMgr> (*gSkFontMgr_DefaultFactory)();
+
+#endif // SkFontMgrPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontPriv.h b/gfx/skia/skia/src/core/SkFontPriv.h
new file mode 100644
index 0000000000..42fdc29688
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontPriv.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontPriv_DEFINED
+#define SkFontPriv_DEFINED
+
+#include "include/core/SkFont.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkTypeface.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkFontPriv {
+public:
+ /* This is the size we use when we ask for a glyph's path. We then
+ * post-transform it as we draw to match the request.
+ * This is done to try to re-use cache entries for the path.
+ *
+ * This value is somewhat arbitrary. In theory, it could be 1, since
+ * we store paths as floats. However, we get the path from the font
+ * scaler, and it may represent its paths as fixed-point (or 26.6),
+ * so we shouldn't ask for something too big (might overflow 16.16)
+ * or too small (underflow 26.6).
+ *
+ * This value could track kMaxSizeForGlyphCache, assuming the above
+ * constraints, but since we ask for unhinted paths, the two values
+ * need not match per-se.
+ */
+ static constexpr int kCanonicalTextSizeForPaths = 64;
+
+ /**
+ * Return a matrix that applies the paint's text values: size, scale, skew
+ */
+ static SkMatrix MakeTextMatrix(SkScalar size, SkScalar scaleX, SkScalar skewX) {
+ SkMatrix m = SkMatrix::MakeScale(size * scaleX, size);
+ if (skewX) {
+ m.postSkew(skewX, 0);
+ }
+ return m;
+ }
+
+ static SkMatrix MakeTextMatrix(const SkFont& font) {
+ return MakeTextMatrix(font.getSize(), font.getScaleX(), font.getSkewX());
+ }
+
+ static void ScaleFontMetrics(SkFontMetrics*, SkScalar);
+
+ /**
+ Returns the union of bounds of all glyphs.
+ Returned dimensions are computed by font manager from font data,
+ ignoring SkPaint::Hinting. Includes font metrics, but not fake bold or SkPathEffect.
+
+ If text size is large, text scale is one, and text skew is zero,
+ returns the bounds as:
+ { SkFontMetrics::fXMin, SkFontMetrics::fTop, SkFontMetrics::fXMax, SkFontMetrics::fBottom }.
+
+ @return union of bounds of all glyphs
+ */
+ static SkRect GetFontBounds(const SkFont&);
+
+ static bool IsFinite(const SkFont& font) {
+ return SkScalarIsFinite(font.getSize()) &&
+ SkScalarIsFinite(font.getScaleX()) &&
+ SkScalarIsFinite(font.getSkewX());
+ }
+
+ // Returns the number of elements (characters or glyphs) in the array.
+ static int CountTextElements(const void* text, size_t byteLength, SkTextEncoding);
+
+ static void GlyphsToUnichars(const SkFont&, const uint16_t glyphs[], int count, SkUnichar[]);
+
+ static void Flatten(const SkFont&, SkWriteBuffer& buffer);
+ static bool Unflatten(SkFont*, SkReadBuffer& buffer);
+};
+
+class SkAutoToGlyphs {
+public:
+ SkAutoToGlyphs(const SkFont& font, const void* text, size_t length, SkTextEncoding encoding) {
+ if (encoding == SkTextEncoding::kGlyphID || length == 0) {
+ fGlyphs = reinterpret_cast<const uint16_t*>(text);
+ fCount = length >> 1;
+ } else {
+ fCount = font.countText(text, length, encoding);
+ fStorage.reset(fCount);
+ font.textToGlyphs(text, length, encoding, fStorage.get(), fCount);
+ fGlyphs = fStorage.get();
+ }
+ }
+
+ int count() const { return fCount; }
+ const uint16_t* glyphs() const { return fGlyphs; }
+
+private:
+ SkAutoSTArray<32, uint16_t> fStorage;
+ const uint16_t* fGlyphs;
+ int fCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFontStream.cpp b/gfx/skia/skia/src/core/SkFontStream.cpp
new file mode 100644
index 0000000000..54c8cabf2f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkFontStream.h"
+
+struct SkSFNTHeader {
+ uint32_t fVersion;
+ uint16_t fNumTables;
+ uint16_t fSearchRange;
+ uint16_t fEntrySelector;
+ uint16_t fRangeShift;
+};
+
+struct SkTTCFHeader {
+ uint32_t fTag;
+ uint32_t fVersion;
+ uint32_t fNumOffsets;
+ uint32_t fOffset0; // the first of N (fNumOffsets)
+};
+
+union SkSharedTTHeader {
+ SkSFNTHeader fSingle;
+ SkTTCFHeader fCollection;
+};
+
+struct SkSFNTDirEntry {
+ uint32_t fTag;
+ uint32_t fChecksum;
+ uint32_t fOffset;
+ uint32_t fLength;
+};
+
+static bool read(SkStream* stream, void* buffer, size_t amount) {
+ return stream->read(buffer, amount) == amount;
+}
+
+static bool skip(SkStream* stream, size_t amount) {
+ return stream->skip(amount) == amount;
+}
+
+/** Return the number of tables, or if this is a TTC (collection), return the
+ number of tables in the first element of the collection. In either case,
+ if offsetToDir is not-null, set it to the offset to the beginning of the
+ table headers (SkSFNTDirEntry), relative to the start of the stream.
+
+ On an error, return 0 for number of tables, and ignore offsetToDir
+ */
+static int count_tables(SkStream* stream, int ttcIndex, size_t* offsetToDir) {
+ SkASSERT(ttcIndex >= 0);
+
+ SkAutoSMalloc<1024> storage(sizeof(SkSharedTTHeader));
+ SkSharedTTHeader* header = (SkSharedTTHeader*)storage.get();
+
+ if (!read(stream, header, sizeof(SkSharedTTHeader))) {
+ return 0;
+ }
+
+ // by default, SkSFNTHeader is at the start of the stream
+ size_t offset = 0;
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(header->fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ unsigned count = SkEndian_SwapBE32(header->fCollection.fNumOffsets);
+ if ((unsigned)ttcIndex >= count) {
+ return 0;
+ }
+
+ if (ttcIndex > 0) { // need to read more of the shared header
+ stream->rewind();
+ size_t amount = sizeof(SkSharedTTHeader) + ttcIndex * sizeof(uint32_t);
+ header = (SkSharedTTHeader*)storage.reset(amount);
+ if (!read(stream, header, amount)) {
+ return 0;
+ }
+ }
+ // this is the offset to the local SkSFNTHeader
+ offset = SkEndian_SwapBE32((&header->fCollection.fOffset0)[ttcIndex]);
+ stream->rewind();
+ if (!skip(stream, offset)) {
+ return 0;
+ }
+ if (!read(stream, header, sizeof(SkSFNTHeader))) {
+ return 0;
+ }
+ }
+
+ if (offsetToDir) {
+ // add the size of the header, so we will point to the DirEntries
+ *offsetToDir = offset + sizeof(SkSFNTHeader);
+ }
+ return SkEndian_SwapBE16(header->fSingle.fNumTables);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SfntHeader {
+ SfntHeader() : fCount(0), fDir(nullptr) {}
+ ~SfntHeader() { sk_free(fDir); }
+
+ /** If it returns true, then fCount and fDir are properly initialized.
+ Note: fDir will point to the raw array of SkSFNTDirEntry values,
+ meaning they will still be in the file's native endianness (BE).
+
+ fDir will be automatically freed when this object is destroyed
+ */
+ bool init(SkStream* stream, int ttcIndex) {
+ stream->rewind();
+
+ size_t offsetToDir;
+ fCount = count_tables(stream, ttcIndex, &offsetToDir);
+ if (0 == fCount) {
+ return false;
+ }
+
+ stream->rewind();
+ if (!skip(stream, offsetToDir)) {
+ return false;
+ }
+
+ size_t size = fCount * sizeof(SkSFNTDirEntry);
+ fDir = reinterpret_cast<SkSFNTDirEntry*>(sk_malloc_throw(size));
+ return read(stream, fDir, size);
+ }
+
+ int fCount;
+ SkSFNTDirEntry* fDir;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStream::CountTTCEntries(SkStream* stream) {
+ stream->rewind();
+
+ SkSharedTTHeader shared;
+ if (!read(stream, &shared, sizeof(shared))) {
+ return 0;
+ }
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(shared.fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ return SkEndian_SwapBE32(shared.fCollection.fNumOffsets);
+ } else {
+ return 1; // normal 'sfnt' has 1 dir entry
+ }
+}
+
+int SkFontStream::GetTableTags(SkStream* stream, int ttcIndex,
+ SkFontTableTag tags[]) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ if (tags) {
+ for (int i = 0; i < header.fCount; i++) {
+ tags[i] = SkEndian_SwapBE32(header.fDir[i].fTag);
+ }
+ }
+ return header.fCount;
+}
+
+size_t SkFontStream::GetTableData(SkStream* stream, int ttcIndex,
+ SkFontTableTag tag,
+ size_t offset, size_t length, void* data) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ for (int i = 0; i < header.fCount; i++) {
+ if (SkEndian_SwapBE32(header.fDir[i].fTag) == tag) {
+ size_t realOffset = SkEndian_SwapBE32(header.fDir[i].fOffset);
+ size_t realLength = SkEndian_SwapBE32(header.fDir[i].fLength);
+ // now sanity check the caller's offset/length
+ if (offset >= realLength) {
+ return 0;
+ }
+ // if the caller is trusting the length from the file, then a
+ // hostile file might choose a value which would overflow offset +
+ // length.
+ if (offset + length < offset) {
+ return 0;
+ }
+ if (length > realLength - offset) {
+ length = realLength - offset;
+ }
+ if (data) {
+ // skip the stream to the part of the table we want to copy from
+ stream->rewind();
+ size_t bytesToSkip = realOffset + offset;
+ if (!skip(stream, bytesToSkip)) {
+ return 0;
+ }
+ if (!read(stream, data, length)) {
+ return 0;
+ }
+ }
+ return length;
+ }
+ }
+ return 0;
+}
diff --git a/gfx/skia/skia/src/core/SkFontStream.h b/gfx/skia/skia/src/core/SkFontStream.h
new file mode 100644
index 0000000000..57f0e85137
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStream_DEFINED
+#define SkFontStream_DEFINED
+
+class SkStream;
+
+#include "include/core/SkTypeface.h"
+
+class SkFontStream {
+public:
+ /**
+ * Return the number of shared directories inside a TTC sfnt, or return 1
+ * if the stream is a normal sfnt (ttf). If there is an error or
+ * no directory is found, return 0.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int CountTTCEntries(SkStream*);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int GetTableTags(SkStream*, int ttcIndex, SkFontTableTag tags[]);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static size_t GetTableData(SkStream*, int ttcIndex, SkFontTableTag tag,
+ size_t offset, size_t length, void* data);
+
+ static size_t GetTableSize(SkStream* stream, int ttcIndex, SkFontTableTag tag) {
+ return GetTableData(stream, ttcIndex, tag, 0, ~0U, nullptr);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp b/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp
new file mode 100644
index 0000000000..829d0d3fe6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This file is intentionally empty. We add it to the dependencies of skia_lib
+// so that GYP detects that libskia is a C++ library (implicitly depending on
+// the standard library, -lm, etc.) from its file extension.
+//
+// If we didn't do this, GYP would link libskia.so as a C library and we'd get
+// link-time failures for simple binaries that don't themselves depend on the
+// C++ standard library.
+//
+// Even if we try hard not to depend on the standard library, say, never
+// calling new or delete, the compiler can still insert calls on our behalf
+// that make us depend on it anyway: a handler when we call a for a pure
+// virtual, thread-safety guards around statics, probably other similar
+// language constructs.
diff --git a/gfx/skia/skia/src/core/SkFuzzLogging.h b/gfx/skia/skia/src/core/SkFuzzLogging.h
new file mode 100644
index 0000000000..8e546e3a06
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFuzzLogging.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFuzzLogging_DEFINED
+#define SkFuzzLogging_DEFINED
+
+// Utilities for Skia's fuzzer
+
+// When SK_FUZZ_LOGGING is defined SkDebugfs relevant to image filter fuzzing
+// will be enabled. This allows the filter fuzzing code to white list fuzzer
+// failures based on the output logs.
+// Define this flag in your SkUserConfig.h or in your Make/Build system.
+#ifdef SK_FUZZ_LOGGING
+ #define SkFUZZF(args) SkDebugf("SkFUZZ: "); SkDebugf args
+#else
+ #define SkFUZZF(args)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGaussFilter.cpp b/gfx/skia/skia/src/core/SkGaussFilter.cpp
new file mode 100644
index 0000000000..e80092e6e1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGaussFilter.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkFloatingPoint.h"
+#include "src/core/SkGaussFilter.h"
+#include <cmath>
+
+// The value when we can stop expanding the filter. The spec implies that 3% is acceptable, but
+// we just use 1%.
+static constexpr double kGoodEnough = 1.0 / 100.0;
+
+// Normalize the values of gauss to 1.0, and make sure they add to one.
+// NB if n == 1, then this will force gauss[0] == 1.
+static void normalize(int n, double* gauss) {
+ // Carefully add from smallest to largest to calculate the normalizing sum.
+ double sum = 0;
+ for (int i = n-1; i >= 1; i--) {
+ sum += 2 * gauss[i];
+ }
+ sum += gauss[0];
+
+ // Normalize gauss.
+ for (int i = 0; i < n; i++) {
+ gauss[i] /= sum;
+ }
+
+ // The factors should sum to 1. Take any remaining slop, and add it to gauss[0]. Add the
+ // values in such a way to maintain the most accuracy.
+ sum = 0;
+ for (int i = n - 1; i >= 1; i--) {
+ sum += 2 * gauss[i];
+ }
+
+ gauss[0] = 1 - sum;
+}
+
+static int calculate_bessel_factors(double sigma, double *gauss) {
+ auto var = sigma * sigma;
+
+ // The two functions below come from the equations in "Handbook of Mathematical Functions"
+ // by Abramowitz and Stegun. Specifically, equation 9.6.10 on page 375. Bessel0 is given
+ // explicitly as 9.6.12
+ // BesselI_0 for 0 <= sigma < 2.
+ // NB the k = 0 factor is just sum = 1.0.
+ auto besselI_0 = [](double t) -> double {
+ auto tSquaredOver4 = t * t / 4.0;
+ auto sum = 1.0;
+ auto factor = 1.0;
+ auto k = 1;
+ // Use a variable number of loops. When sigma is small, this only requires 3-4 loops, but
+ // when sigma is near 2, it could require 10 loops. The same holds for BesselI_1.
+ while(factor > 1.0/1000000.0) {
+ factor *= tSquaredOver4 / (k * k);
+ sum += factor;
+ k += 1;
+ }
+ return sum;
+ };
+ // BesselI_1 for 0 <= sigma < 2.
+ auto besselI_1 = [](double t) -> double {
+ auto tSquaredOver4 = t * t / 4.0;
+ auto sum = t / 2.0;
+ auto factor = sum;
+ auto k = 1;
+ while (factor > 1.0/1000000.0) {
+ factor *= tSquaredOver4 / (k * (k + 1));
+ sum += factor;
+ k += 1;
+ }
+ return sum;
+ };
+
+ // The following formula for calculating the Gaussian kernel is from
+ // "Scale-Space for Discrete Signals" by Tony Lindeberg.
+ // gauss(n; var) = besselI_n(var) / (e^var)
+ auto d = std::exp(var);
+ double b[SkGaussFilter::kGaussArrayMax] = {besselI_0(var), besselI_1(var)};
+ gauss[0] = b[0]/d;
+ gauss[1] = b[1]/d;
+
+ // The code below is tricky, and written to mirror the recursive equations from the book.
+ // The maximum spread for sigma == 2 is guass[4], but in order to know to stop guass[5]
+ // is calculated. At this point n == 5 meaning that gauss[0..4] are the factors, but a 6th
+ // element was used to calculate them.
+ int n = 1;
+ // The recurrence relation below is from "Numerical Recipes" 3rd Edition.
+ // Equation 6.5.16 p.282
+ while (gauss[n] > kGoodEnough) {
+ b[n+1] = -(2*n/var) * b[n] + b[n-1];
+ gauss[n+1] = b[n+1] / d;
+ n += 1;
+ }
+
+ normalize(n, gauss);
+
+ return n;
+}
+
+SkGaussFilter::SkGaussFilter(double sigma) {
+ SkASSERT(0 <= sigma && sigma < 2);
+
+ fN = calculate_bessel_factors(sigma, fBasis);
+}
diff --git a/gfx/skia/skia/src/core/SkGaussFilter.h b/gfx/skia/skia/src/core/SkGaussFilter.h
new file mode 100644
index 0000000000..1cf6eee1bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGaussFilter.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGaussFilter_DEFINED
+#define SkGaussFilter_DEFINED
+
+#include <cstddef>
+
+// Define gaussian filters for values of sigma < 2. Produce values good to 1 part in 1,000,000.
+// Produces values as defined in "Scale-Space for Discrete Signals" by Tony Lindeberg.
+class SkGaussFilter {
+public:
+ static constexpr int kGaussArrayMax = 6;
+
+ explicit SkGaussFilter(double sigma);
+
+ size_t size() const { return fN; }
+ int radius() const { return fN - 1; }
+ int width() const { return 2 * this->radius() + 1; }
+
+ // Allow a filter to be used in a C++ ranged-for loop.
+ const double* begin() const { return &fBasis[0]; }
+ const double* end() const { return &fBasis[fN]; }
+
+private:
+ double fBasis[kGaussArrayMax];
+ int fN;
+};
+
+#endif // SkGaussFilter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGeometry.cpp b/gfx/skia/skia/src/core/SkGeometry.cpp
new file mode 100644
index 0000000000..daed95579f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.cpp
@@ -0,0 +1,1491 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint3.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+
+#include <utility>
+
+static SkVector to_vector(const Sk2s& x) {
+ SkVector vector;
+ x.store(&vector);
+ return vector;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+static int is_not_monotonic(SkScalar a, SkScalar b, SkScalar c) {
+ SkScalar ab = a - b;
+ SkScalar bc = b - c;
+ if (ab < 0) {
+ bc = -bc;
+ }
+ return ab == 0 || bc < 0;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+static int valid_unit_divide(SkScalar numer, SkScalar denom, SkScalar* ratio) {
+ SkASSERT(ratio);
+
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+
+ SkScalar r = numer / denom;
+ if (SkScalarIsNaN(r)) {
+ return 0;
+ }
+ SkASSERTF(r >= 0 && r < SK_Scalar1, "numer %f, denom %f, r %f", numer, denom, r);
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+// Just returns its argument, but makes it easy to set a break-point to know when
+// SkFindUnitQuadRoots is going to return 0 (an error).
+static int return_check_zero(int value) {
+ if (value == 0) {
+ return 0;
+ }
+ return value;
+}
+
+/** From Numerical Recipes in C.
+
+ Q = -1/2 (B + sign(B) sqrt[B*B - 4*A*C])
+ x1 = Q / A
+ x2 = C / Q
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]) {
+ SkASSERT(roots);
+
+ if (A == 0) {
+ return return_check_zero(valid_unit_divide(-C, B, roots));
+ }
+
+ SkScalar* r = roots;
+
+ // use doubles so we don't overflow temporarily trying to compute R
+ double dr = (double)B * B - 4 * (double)A * C;
+ if (dr < 0) {
+ return return_check_zero(0);
+ }
+ dr = sqrt(dr);
+ SkScalar R = SkDoubleToScalar(dr);
+ if (!SkScalarIsFinite(R)) {
+ return return_check_zero(0);
+ }
+
+ SkScalar Q = (B < 0) ? -(B-R)/2 : -(B+R)/2;
+ r += valid_unit_divide(Q, A, r);
+ r += valid_unit_divide(C, Q, r);
+ if (r - roots == 2) {
+ if (roots[0] > roots[1]) {
+ using std::swap;
+ swap(roots[0], roots[1]);
+ } else if (roots[0] == roots[1]) { // nearly-equal?
+ r -= 1; // skip the double root
+ }
+ }
+ return return_check_zero((int)(r - roots));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = SkEvalQuadAt(src, t);
+ }
+ if (tangent) {
+ *tangent = SkEvalQuadTangentAt(src, t);
+ }
+}
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t) {
+ return to_point(SkQuadCoeff(src).eval(t));
+}
+
+SkVector SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t) {
+ // The derivative equation is 2(b - a +(a - 2b +c)t). This returns a
+ // zero tangent vector when t is 0 or 1, and the control point is equal
+ // to the end point. In this case, use the quad end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[1] == src[2])) {
+ return src[2] - src[0];
+ }
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+
+ Sk2s B = P1 - P0;
+ Sk2s A = P2 - P1 - B;
+ Sk2s T = A * Sk2s(t) + B;
+
+ return to_vector(T + T);
+}
+
+static inline Sk2s interp(const Sk2s& v0, const Sk2s& v1, const Sk2s& t) {
+ return v0 + (v1 - v0) * t;
+}
+
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t) {
+ SkASSERT(t > 0 && t < SK_Scalar1);
+
+ Sk2s p0 = from_point(src[0]);
+ Sk2s p1 = from_point(src[1]);
+ Sk2s p2 = from_point(src[2]);
+ Sk2s tt(t);
+
+ Sk2s p01 = interp(p0, p1, tt);
+ Sk2s p12 = interp(p1, p2, tt);
+
+ dst[0] = to_point(p0);
+ dst[1] = to_point(p01);
+ dst[2] = to_point(interp(p01, p12, tt));
+ dst[3] = to_point(p12);
+ dst[4] = to_point(p2);
+}
+
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]) {
+ SkChopQuadAt(src, dst, 0.5f);
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+static inline void flatten_double_quad_extrema(SkScalar coords[14]) {
+ coords[2] = coords[6] = coords[4];
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fY;
+ SkScalar b = src[1].fY;
+ SkScalar c = src[2].fY;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fY);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(src[0].fX, a);
+ dst[1].set(src[1].fX, b);
+ dst[2].set(src[2].fX, c);
+ return 0;
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fX;
+ SkScalar b = src[1].fX;
+ SkScalar c = src[2].fX;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fX);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(a, src[0].fY);
+ dst[1].set(b, src[1].fY);
+ dst[2].set(c, src[2].fY);
+ return 0;
+}
+
+// F(t) = a (1 - t) ^ 2 + 2 b t (1 - t) + c t ^ 2
+// F'(t) = 2 (b - a) + 2 (a - 2b + c) t
+// F''(t) = 2 (a - 2b + c)
+//
+// A = 2 (b - a)
+// B = 2 (a - 2b + c)
+//
+// Maximum curvature for a quadratic means solving
+// Fx' Fx'' + Fy' Fy'' = 0
+//
+// t = - (Ax Bx + Ay By) / (Bx ^ 2 + By ^ 2)
+//
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[0].fX - src[1].fX - src[1].fX + src[2].fX;
+ SkScalar By = src[0].fY - src[1].fY - src[1].fY + src[2].fY;
+
+ SkScalar numer = -(Ax * Bx + Ay * By);
+ SkScalar denom = Bx * Bx + By * By;
+ if (denom < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+ if (numer <= 0) {
+ return 0;
+ }
+ if (numer >= denom) { // Also catches denom=0.
+ return 1;
+ }
+ SkScalar t = numer / denom;
+ SkASSERT((0 <= t && t < 1) || SkScalarIsNaN(t));
+ return t;
+}
+
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t == 0 || t == 1) {
+ memcpy(dst, src, 3 * sizeof(SkPoint));
+ return 1;
+ } else {
+ SkChopQuadAt(src, dst, t);
+ return 2;
+ }
+}
+
+void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]) {
+ Sk2s scale(SkDoubleToScalar(2.0 / 3.0));
+ Sk2s s0 = from_point(src[0]);
+ Sk2s s1 = from_point(src[1]);
+ Sk2s s2 = from_point(src[2]);
+
+ dst[0] = to_point(s0);
+ dst[1] = to_point(s0 + (s1 - s0) * scale);
+ dst[2] = to_point(s2 + (s1 - s2) * scale);
+ dst[3] = to_point(s2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+///// CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS /////
+//////////////////////////////////////////////////////////////////////////////
+
+static SkVector eval_cubic_derivative(const SkPoint src[4], SkScalar t) {
+ SkQuadCoeff coeff;
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+
+ coeff.fA = P3 + Sk2s(3) * (P1 - P2) - P0;
+ coeff.fB = times_2(P2 - times_2(P1) + P0);
+ coeff.fC = P1 - P0;
+ return to_vector(coeff.eval(t));
+}
+
+static SkVector eval_cubic_2ndDerivative(const SkPoint src[4], SkScalar t) {
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+ Sk2s A = P3 + Sk2s(3) * (P1 - P2) - P0;
+ Sk2s B = P2 - times_2(P1) + P0;
+
+ return to_vector(A * Sk2s(t) + B);
+}
+
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* loc,
+ SkVector* tangent, SkVector* curvature) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (loc) {
+ *loc = to_point(SkCubicCoeff(src).eval(t));
+ }
+ if (tangent) {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1, and the
+ // adjacent control point is equal to the end point. In this case, use the
+ // next control point or the end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[2] == src[3])) {
+ if (t == 0) {
+ *tangent = src[2] - src[0];
+ } else {
+ *tangent = src[3] - src[1];
+ }
+ if (!tangent->fX && !tangent->fY) {
+ *tangent = src[3] - src[0];
+ }
+ } else {
+ *tangent = eval_cubic_derivative(src, t);
+ }
+ }
+ if (curvature) {
+ *curvature = eval_cubic_2ndDerivative(src, t);
+ }
+}
+
+/** Cubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit betwee 0 < t < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ SkScalar A = d - a + 3*(b - c);
+ SkScalar B = 2*(a - b - b + c);
+ SkScalar C = b - a;
+
+ return SkFindUnitQuadRoots(A, B, C, tValues);
+}
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t) {
+ SkASSERT(t > 0 && t < SK_Scalar1);
+
+ Sk2s p0 = from_point(src[0]);
+ Sk2s p1 = from_point(src[1]);
+ Sk2s p2 = from_point(src[2]);
+ Sk2s p3 = from_point(src[3]);
+ Sk2s tt(t);
+
+ Sk2s ab = interp(p0, p1, tt);
+ Sk2s bc = interp(p1, p2, tt);
+ Sk2s cd = interp(p2, p3, tt);
+ Sk2s abc = interp(ab, bc, tt);
+ Sk2s bcd = interp(bc, cd, tt);
+ Sk2s abcd = interp(abc, bcd, tt);
+
+ dst[0] = to_point(p0);
+ dst[1] = to_point(ab);
+ dst[2] = to_point(abc);
+ dst[3] = to_point(abcd);
+ dst[4] = to_point(bcd);
+ dst[5] = to_point(cd);
+ dst[6] = to_point(p3);
+}
+
+/* http://code.google.com/p/skia/issues/detail?id=32
+
+ This test code would fail when we didn't check the return result of
+ valid_unit_divide in SkChopCubicAt(... tValues[], int roots). The reason is
+ that after the first chop, the parameters to valid_unit_divide are equal
+ (thanks to finite float precision and rounding in the subtracts). Thus
+ even though the 2nd tValue looks < 1.0, after we renormalize it, we end
+ up with 1.0, hence the need to check and just return the last cubic as
+ a degenerate clump of 4 points in the sampe place.
+
+ static void test_cubic() {
+ SkPoint src[4] = {
+ { 556.25000, 523.03003 },
+ { 556.23999, 522.96002 },
+ { 556.21997, 522.89001 },
+ { 556.21997, 522.82001 }
+ };
+ SkPoint dst[10];
+ SkScalar tval[] = { 0.33333334f, 0.99999994f };
+ SkChopCubicAt(src, dst, tval, 2);
+ }
+ */
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[],
+ const SkScalar tValues[], int roots) {
+#ifdef SK_DEBUG
+ {
+ for (int i = 0; i < roots - 1; i++)
+ {
+ SkASSERT(0 < tValues[i] && tValues[i] < 1);
+ SkASSERT(0 < tValues[i+1] && tValues[i+1] < 1);
+ SkASSERT(tValues[i] < tValues[i+1]);
+ }
+ }
+#endif
+
+ if (dst) {
+ if (roots == 0) { // nothing to chop
+ memcpy(dst, src, 4*sizeof(SkPoint));
+ } else {
+ SkScalar t = tValues[0];
+ SkPoint tmp[4];
+
+ for (int i = 0; i < roots; i++) {
+ SkChopCubicAt(src, dst, t);
+ if (i == roots - 1) {
+ break;
+ }
+
+ dst += 3;
+ // have src point to the remaining cubic (after the chop)
+ memcpy(tmp, dst, 4 * sizeof(SkPoint));
+ src = tmp;
+
+ // watch out in case the renormalized t isn't in range
+ if (!valid_unit_divide(tValues[i+1] - tValues[i],
+ SK_Scalar1 - tValues[i], &t)) {
+ // if we can't, just create a degenerate cubic
+ dst[4] = dst[5] = dst[6] = src[3];
+ break;
+ }
+ }
+ }
+ }
+}
+
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]) {
+ SkChopCubicAt(src, dst, 0.5f);
+}
+
+static void flatten_double_cubic_extrema(SkScalar coords[14]) {
+ coords[4] = coords[8] = coords[6];
+}
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan
+ converter. Depending on what is returned, dst[] is treated as follows:
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fY, src[1].fY, src[2].fY,
+ src[3].fY, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fY);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fY);
+ }
+ }
+ return roots;
+}
+
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fX, src[1].fX, src[2].fX,
+ src[3].fX, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fX);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fX);
+ }
+ }
+ return roots;
+}
+
+/** http://www.faculty.idc.ac.il/arik/quality/appendixA.html
+
+ Inflection means that curvature is zero.
+ Curvature is [F' x F''] / [F'^3]
+ So we solve F'x X F''y - F'y X F''y == 0
+ After some canceling of the cubic term, we get
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+ (BxCy - ByCx)t^2 + (AxCy - AyCx)t + AxBy - AyBx == 0
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[2].fX - 2 * src[1].fX + src[0].fX;
+ SkScalar By = src[2].fY - 2 * src[1].fY + src[0].fY;
+ SkScalar Cx = src[3].fX + 3 * (src[1].fX - src[2].fX) - src[0].fX;
+ SkScalar Cy = src[3].fY + 3 * (src[1].fY - src[2].fY) - src[0].fY;
+
+ return SkFindUnitQuadRoots(Bx*Cy - By*Cx,
+ Ax*Cy - Ay*Cx,
+ Ax*By - Ay*Bx,
+ tValues);
+}
+
+int SkChopCubicAtInflections(const SkPoint src[], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(src, tValues);
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+// Assumes the third component of points is 1.
+// Calcs p0 . (p1 x p2)
+static double calc_dot_cross_cubic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ const double xComp = (double) p0.fX * ((double) p1.fY - (double) p2.fY);
+ const double yComp = (double) p0.fY * ((double) p2.fX - (double) p1.fX);
+ const double wComp = (double) p1.fX * (double) p2.fY - (double) p1.fY * (double) p2.fX;
+ return (xComp + yComp + wComp);
+}
+
+// Returns a positive power of 2 that, when multiplied by n, and excepting the two edge cases listed
+// below, shifts the exponent of n to yield a magnitude somewhere inside [1..2).
+// Returns 2^1023 if abs(n) < 2^-1022 (including 0).
+// Returns NaN if n is Inf or NaN.
+inline static double previous_inverse_pow2(double n) {
+ uint64_t bits;
+ memcpy(&bits, &n, sizeof(double));
+ bits = ((1023llu*2 << 52) + ((1llu << 52) - 1)) - bits; // exp=-exp
+ bits &= (0x7ffllu) << 52; // mantissa=1.0, sign=0
+ memcpy(&n, &bits, sizeof(double));
+ return n;
+}
+
+inline static void write_cubic_inflection_roots(double t0, double s0, double t1, double s1,
+ double* t, double* s) {
+ t[0] = t0;
+ s[0] = s0;
+
+ // This copysign/abs business orients the implicit function so positive values are always on the
+ // "left" side of the curve.
+ t[1] = -copysign(t1, t1 * s1);
+ s[1] = -fabs(s1);
+
+ // Ensure t[0]/s[0] <= t[1]/s[1] (s[1] is negative from above).
+ if (copysign(s[1], s[0]) * t[0] > -fabs(s[0]) * t[1]) {
+ using std::swap;
+ swap(t[0], t[1]);
+ swap(s[0], s[1]);
+ }
+}
+
+SkCubicType SkClassifyCubic(const SkPoint P[4], double t[2], double s[2], double d[4]) {
+ // Find the cubic's inflection function, I = [T^3 -3T^2 3T -1] dot D. (D0 will always be 0
+ // for integral cubics.)
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.2 Curve Categorization:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ double A1 = calc_dot_cross_cubic(P[0], P[3], P[2]);
+ double A2 = calc_dot_cross_cubic(P[1], P[0], P[3]);
+ double A3 = calc_dot_cross_cubic(P[2], P[1], P[0]);
+
+ double D3 = 3 * A3;
+ double D2 = D3 - A2;
+ double D1 = D2 - A2 + A1;
+
+ // Shift the exponents in D so the largest magnitude falls somewhere in 1..2. This protects us
+ // from overflow down the road while solving for roots and KLM functionals.
+ double Dmax = std::max(std::max(fabs(D1), fabs(D2)), fabs(D3));
+ double norm = previous_inverse_pow2(Dmax);
+ D1 *= norm;
+ D2 *= norm;
+ D3 *= norm;
+
+ if (d) {
+ d[3] = D3;
+ d[2] = D2;
+ d[1] = D1;
+ d[0] = 0;
+ }
+
+ // Now use the inflection function to classify the cubic.
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.4 Integral Cubics:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ if (0 != D1) {
+ double discr = 3*D2*D2 - 4*D1*D3;
+ if (discr > 0) { // Serpentine.
+ if (t && s) {
+ double q = 3*D2 + copysign(sqrt(3*discr), D2);
+ write_cubic_inflection_roots(q, 6*D1, 2*D3, q, t, s);
+ }
+ return SkCubicType::kSerpentine;
+ } else if (discr < 0) { // Loop.
+ if (t && s) {
+ double q = D2 + copysign(sqrt(-discr), D2);
+ write_cubic_inflection_roots(q, 2*D1, 2*(D2*D2 - D3*D1), D1*q, t, s);
+ }
+ return SkCubicType::kLoop;
+ } else { // Cusp.
+ if (t && s) {
+ write_cubic_inflection_roots(D2, 2*D1, D2, 2*D1, t, s);
+ }
+ return SkCubicType::kLocalCusp;
+ }
+ } else {
+ if (0 != D2) { // Cusp at T=infinity.
+ if (t && s) {
+ write_cubic_inflection_roots(D3, 3*D2, 1, 0, t, s); // T1=infinity.
+ }
+ return SkCubicType::kCuspAtInfinity;
+ } else { // Degenerate.
+ if (t && s) {
+ write_cubic_inflection_roots(1, 0, 1, 0, t, s); // T0=T1=infinity.
+ }
+ return 0 != D3 ? SkCubicType::kQuadratic : SkCubicType::kLineOrPoint;
+ }
+ }
+}
+
+template <typename T> void bubble_sort(T array[], int count) {
+ for (int i = count - 1; i > 0; --i)
+ for (int j = i; j > 0; --j)
+ if (array[j] < array[j-1])
+ {
+ T tmp(array[j]);
+ array[j] = array[j-1];
+ array[j-1] = tmp;
+ }
+}
+
+/**
+ * Given an array and count, remove all pair-wise duplicates from the array,
+ * keeping the existing sorting, and return the new count
+ */
+static int collaps_duplicates(SkScalar array[], int count) {
+ for (int n = count; n > 1; --n) {
+ if (array[0] == array[1]) {
+ for (int i = 1; i < n; ++i) {
+ array[i - 1] = array[i];
+ }
+ count -= 1;
+ } else {
+ array += 1;
+ }
+ }
+ return count;
+}
+
+#ifdef SK_DEBUG
+
+#define TEST_COLLAPS_ENTRY(array) array, SK_ARRAY_COUNT(array)
+
+static void test_collaps_duplicates() {
+ static bool gOnce;
+ if (gOnce) { return; }
+ gOnce = true;
+ const SkScalar src0[] = { 0 };
+ const SkScalar src1[] = { 0, 0 };
+ const SkScalar src2[] = { 0, 1 };
+ const SkScalar src3[] = { 0, 0, 0 };
+ const SkScalar src4[] = { 0, 0, 1 };
+ const SkScalar src5[] = { 0, 1, 1 };
+ const SkScalar src6[] = { 0, 1, 2 };
+ const struct {
+ const SkScalar* fData;
+ int fCount;
+ int fCollapsedCount;
+ } data[] = {
+ { TEST_COLLAPS_ENTRY(src0), 1 },
+ { TEST_COLLAPS_ENTRY(src1), 1 },
+ { TEST_COLLAPS_ENTRY(src2), 2 },
+ { TEST_COLLAPS_ENTRY(src3), 1 },
+ { TEST_COLLAPS_ENTRY(src4), 2 },
+ { TEST_COLLAPS_ENTRY(src5), 2 },
+ { TEST_COLLAPS_ENTRY(src6), 3 },
+ };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(data); ++i) {
+ SkScalar dst[3];
+ memcpy(dst, data[i].fData, data[i].fCount * sizeof(dst[0]));
+ int count = collaps_duplicates(dst, data[i].fCount);
+ SkASSERT(data[i].fCollapsedCount == count);
+ for (int j = 1; j < count; ++j) {
+ SkASSERT(dst[j-1] < dst[j]);
+ }
+ }
+}
+#endif
+
+static SkScalar SkScalarCubeRoot(SkScalar x) {
+ return SkScalarPow(x, 0.3333333f);
+}
+
+/* Solve coeff(t) == 0, returning the number of roots that
+ lie withing 0 < t < 1.
+ coeff[0]t^3 + coeff[1]t^2 + coeff[2]t + coeff[3]
+
+ Eliminates repeated roots (so that all tValues are distinct, and are always
+ in increasing order.
+*/
+static int solve_cubic_poly(const SkScalar coeff[4], SkScalar tValues[3]) {
+ if (SkScalarNearlyZero(coeff[0])) { // we're just a quadratic
+ return SkFindUnitQuadRoots(coeff[1], coeff[2], coeff[3], tValues);
+ }
+
+ SkScalar a, b, c, Q, R;
+
+ {
+ SkASSERT(coeff[0] != 0);
+
+ SkScalar inva = SkScalarInvert(coeff[0]);
+ a = coeff[1] * inva;
+ b = coeff[2] * inva;
+ c = coeff[3] * inva;
+ }
+ Q = (a*a - b*3) / 9;
+ R = (2*a*a*a - 9*a*b + 27*c) / 54;
+
+ SkScalar Q3 = Q * Q * Q;
+ SkScalar R2MinusQ3 = R * R - Q3;
+ SkScalar adiv3 = a / 3;
+
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ SkScalar theta = SkScalarACos(SkScalarPin(R / SkScalarSqrt(Q3), -1, 1));
+ SkScalar neg2RootQ = -2 * SkScalarSqrt(Q);
+
+ tValues[0] = SkScalarPin(neg2RootQ * SkScalarCos(theta/3) - adiv3, 0, 1);
+ tValues[1] = SkScalarPin(neg2RootQ * SkScalarCos((theta + 2*SK_ScalarPI)/3) - adiv3, 0, 1);
+ tValues[2] = SkScalarPin(neg2RootQ * SkScalarCos((theta - 2*SK_ScalarPI)/3) - adiv3, 0, 1);
+ SkDEBUGCODE(test_collaps_duplicates();)
+
+ // now sort the roots
+ bubble_sort(tValues, 3);
+ return collaps_duplicates(tValues, 3);
+ } else { // we have 1 real root
+ SkScalar A = SkScalarAbs(R) + SkScalarSqrt(R2MinusQ3);
+ A = SkScalarCubeRoot(A);
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ tValues[0] = SkScalarPin(A - adiv3, 0, 1);
+ return 1;
+ }
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+static void formulate_F1DotF2(const SkScalar src[], SkScalar coeff[4]) {
+ SkScalar a = src[2] - src[0];
+ SkScalar b = src[4] - 2 * src[2] + src[0];
+ SkScalar c = src[6] + 3 * (src[2] - src[4]) - src[0];
+
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]) {
+ SkScalar coeffX[4], coeffY[4];
+ int i;
+
+ formulate_F1DotF2(&src[0].fX, coeffX);
+ formulate_F1DotF2(&src[0].fY, coeffY);
+
+ for (i = 0; i < 4; i++) {
+ coeffX[i] += coeffY[i];
+ }
+
+ int numRoots = solve_cubic_poly(coeffX, tValues);
+ // now remove extrema where the curvature is zero (mins)
+ // !!!! need a test for this !!!!
+ return numRoots;
+}
+
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3]) {
+ SkScalar t_storage[3];
+
+ if (tValues == nullptr) {
+ tValues = t_storage;
+ }
+
+ SkScalar roots[3];
+ int rootCount = SkFindCubicMaxCurvature(src, roots);
+
+ // Throw out values not inside 0..1.
+ int count = 0;
+ for (int i = 0; i < rootCount; ++i) {
+ if (0 < roots[i] && roots[i] < 1) {
+ tValues[count++] = roots[i];
+ }
+ }
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+// Returns a constant proportional to the dimensions of the cubic.
+// Constant found through experimentation -- maybe there's a better way....
+static SkScalar calc_cubic_precision(const SkPoint src[4]) {
+ return (SkPointPriv::DistanceToSqd(src[1], src[0]) + SkPointPriv::DistanceToSqd(src[2], src[1])
+ + SkPointPriv::DistanceToSqd(src[3], src[2])) * 1e-8f;
+}
+
+// Returns true if both points src[testIndex], src[testIndex+1] are in the same half plane defined
+// by the line segment src[lineIndex], src[lineIndex+1].
+static bool on_same_side(const SkPoint src[4], int testIndex, int lineIndex) {
+ SkPoint origin = src[lineIndex];
+ SkVector line = src[lineIndex + 1] - origin;
+ SkScalar crosses[2];
+ for (int index = 0; index < 2; ++index) {
+ SkVector testLine = src[testIndex + index] - origin;
+ crosses[index] = line.cross(testLine);
+ }
+ return crosses[0] * crosses[1] >= 0;
+}
+
+// Return location (in t) of cubic cusp, if there is one.
+// Note that classify cubic code does not reliably return all cusp'd cubics, so
+// it is not called here.
+SkScalar SkFindCubicCusp(const SkPoint src[4]) {
+ // When the adjacent control point matches the end point, it behaves as if
+ // the cubic has a cusp: there's a point of max curvature where the derivative
+ // goes to zero. Ideally, this would be where t is zero or one, but math
+ // error makes not so. It is not uncommon to create cubics this way; skip them.
+ if (src[0] == src[1]) {
+ return -1;
+ }
+ if (src[2] == src[3]) {
+ return -1;
+ }
+ // Cubics only have a cusp if the line segments formed by the control and end points cross.
+ // Detect crossing if line ends are on opposite sides of plane formed by the other line.
+ if (on_same_side(src, 0, 2) || on_same_side(src, 2, 0)) {
+ return -1;
+ }
+ // Cubics may have multiple points of maximum curvature, although at most only
+ // one is a cusp.
+ SkScalar maxCurvature[3];
+ int roots = SkFindCubicMaxCurvature(src, maxCurvature);
+ for (int index = 0; index < roots; ++index) {
+ SkScalar testT = maxCurvature[index];
+ if (0 >= testT || testT >= 1) { // no need to consider max curvature on the end
+ continue;
+ }
+ // A cusp is at the max curvature, and also has a derivative close to zero.
+ // Choose the 'close to zero' meaning by comparing the derivative length
+ // with the overall cubic size.
+ SkVector dPt = eval_cubic_derivative(src, testT);
+ SkScalar dPtMagnitude = SkPointPriv::LengthSqd(dPt);
+ SkScalar precision = calc_cubic_precision(src);
+ if (dPtMagnitude < precision) {
+ // All three max curvature t values may be close to the cusp;
+ // return the first one.
+ return testT;
+ }
+ }
+ return -1;
+}
+
+#include "src/pathops/SkPathOpsCubic.h"
+
+typedef int (SkDCubic::*InterceptProc)(double intercept, double roots[3]) const;
+
+static bool cubic_dchop_at_intercept(const SkPoint src[4], SkScalar intercept, SkPoint dst[7],
+ InterceptProc method) {
+ SkDCubic cubic;
+ double roots[3];
+ int count = (cubic.set(src).*method)(intercept, roots);
+ if (count > 0) {
+ SkDCubicPair pair = cubic.chopAt(roots[0]);
+ for (int i = 0; i < 7; ++i) {
+ dst[i] = pair.pts[i].asSkPoint();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkChopMonoCubicAtY(SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ return cubic_dchop_at_intercept(src, y, dst, &SkDCubic::horizontalIntersect);
+}
+
+bool SkChopMonoCubicAtX(SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ return cubic_dchop_at_intercept(src, x, dst, &SkDCubic::verticalIntersect);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// NURB representation for conics. Helpful explanations at:
+//
+// http://citeseerx.ist.psu.edu/viewdoc/
+// download?doi=10.1.1.44.5740&rep=rep1&type=ps
+// and
+// http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html
+//
+// F = (A (1 - t)^2 + C t^2 + 2 B (1 - t) t w)
+// ------------------------------------------
+// ((1 - t)^2 + t^2 + 2 (1 - t) t w)
+//
+// = {t^2 (P0 + P2 - 2 P1 w), t (-2 P0 + 2 P1 w), P0}
+// ------------------------------------------------
+// {t^2 (2 - 2 w), t (-2 + 2 w), 1}
+//
+
+// F' = 2 (C t (1 + t (-1 + w)) - A (-1 + t) (t (-1 + w) - w) + B (1 - 2 t) w)
+//
+// t^2 : (2 P0 - 2 P2 - 2 P0 w + 2 P2 w)
+// t^1 : (-2 P0 + 2 P2 + 4 P0 w - 4 P1 w)
+// t^0 : -2 P0 w + 2 P1 w
+//
+// We disregard magnitude, so we can freely ignore the denominator of F', and
+// divide the numerator by 2
+//
+// coeff[0] for t^2
+// coeff[1] for t^1
+// coeff[2] for t^0
+//
+static void conic_deriv_coeff(const SkScalar src[],
+ SkScalar w,
+ SkScalar coeff[3]) {
+ const SkScalar P20 = src[4] - src[0];
+ const SkScalar P10 = src[2] - src[0];
+ const SkScalar wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static bool conic_find_extrema(const SkScalar src[], SkScalar w, SkScalar* t) {
+ SkScalar coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ SkScalar tValues[2];
+ int roots = SkFindUnitQuadRoots(coeff[0], coeff[1], coeff[2], tValues);
+ SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ *t = tValues[0];
+ return true;
+ }
+ return false;
+}
+
+// We only interpolate one dimension at a time (the first, at +0, +3, +6).
+static void p3d_interp(const SkScalar src[7], SkScalar dst[7], SkScalar t) {
+ SkScalar ab = SkScalarInterp(src[0], src[3], t);
+ SkScalar bc = SkScalarInterp(src[3], src[6], t);
+ dst[0] = ab;
+ dst[3] = SkScalarInterp(ab, bc, t);
+ dst[6] = bc;
+}
+
+static void ratquad_mapTo3D(const SkPoint src[3], SkScalar w, SkPoint3 dst[3]) {
+ dst[0].set(src[0].fX * 1, src[0].fY * 1, 1);
+ dst[1].set(src[1].fX * w, src[1].fY * w, w);
+ dst[2].set(src[2].fX * 1, src[2].fY * 1, 1);
+}
+
+static SkPoint project_down(const SkPoint3& src) {
+ return {src.fX / src.fZ, src.fY / src.fZ};
+}
+
+// return false if infinity or NaN is generated; caller must check
+bool SkConic::chopAt(SkScalar t, SkConic dst[2]) const {
+ SkPoint3 tmp[3], tmp2[3];
+
+ ratquad_mapTo3D(fPts, fW, tmp);
+
+ p3d_interp(&tmp[0].fX, &tmp2[0].fX, t);
+ p3d_interp(&tmp[0].fY, &tmp2[0].fY, t);
+ p3d_interp(&tmp[0].fZ, &tmp2[0].fZ, t);
+
+ dst[0].fPts[0] = fPts[0];
+ dst[0].fPts[1] = project_down(tmp2[0]);
+ dst[0].fPts[2] = project_down(tmp2[1]); dst[1].fPts[0] = dst[0].fPts[2];
+ dst[1].fPts[1] = project_down(tmp2[2]);
+ dst[1].fPts[2] = fPts[2];
+
+ // to put in "standard form", where w0 and w2 are both 1, we compute the
+ // new w1 as sqrt(w1*w1/w0*w2)
+ // or
+ // w1 /= sqrt(w0*w2)
+ //
+ // However, in our case, we know that for dst[0]:
+ // w0 == 1, and for dst[1], w2 == 1
+ //
+ SkScalar root = SkScalarSqrt(tmp2[1].fZ);
+ dst[0].fW = tmp2[0].fZ / root;
+ dst[1].fW = tmp2[2].fZ / root;
+ SkASSERT(sizeof(dst[0]) == sizeof(SkScalar) * 7);
+ SkASSERT(0 == offsetof(SkConic, fPts[0].fX));
+ return SkScalarsAreFinite(&dst[0].fPts[0].fX, 7 * 2);
+}
+
+void SkConic::chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const {
+ if (0 == t1 || 1 == t2) {
+ if (0 == t1 && 1 == t2) {
+ *dst = *this;
+ return;
+ } else {
+ SkConic pair[2];
+ if (this->chopAt(t1 ? t1 : t2, pair)) {
+ *dst = pair[SkToBool(t1)];
+ return;
+ }
+ }
+ }
+ SkConicCoeff coeff(*this);
+ Sk2s tt1(t1);
+ Sk2s aXY = coeff.fNumer.eval(tt1);
+ Sk2s aZZ = coeff.fDenom.eval(tt1);
+ Sk2s midTT((t1 + t2) / 2);
+ Sk2s dXY = coeff.fNumer.eval(midTT);
+ Sk2s dZZ = coeff.fDenom.eval(midTT);
+ Sk2s tt2(t2);
+ Sk2s cXY = coeff.fNumer.eval(tt2);
+ Sk2s cZZ = coeff.fDenom.eval(tt2);
+ Sk2s bXY = times_2(dXY) - (aXY + cXY) * Sk2s(0.5f);
+ Sk2s bZZ = times_2(dZZ) - (aZZ + cZZ) * Sk2s(0.5f);
+ dst->fPts[0] = to_point(aXY / aZZ);
+ dst->fPts[1] = to_point(bXY / bZZ);
+ dst->fPts[2] = to_point(cXY / cZZ);
+ Sk2s ww = bZZ / (aZZ * cZZ).sqrt();
+ dst->fW = ww[0];
+}
+
+SkPoint SkConic::evalAt(SkScalar t) const {
+ return to_point(SkConicCoeff(*this).eval(t));
+}
+
+SkVector SkConic::evalTangentAt(SkScalar t) const {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1,
+ // and the control point is equal to the end point.
+ // In this case, use the conic endpoints to compute the tangent.
+ if ((t == 0 && fPts[0] == fPts[1]) || (t == 1 && fPts[1] == fPts[2])) {
+ return fPts[2] - fPts[0];
+ }
+ Sk2s p0 = from_point(fPts[0]);
+ Sk2s p1 = from_point(fPts[1]);
+ Sk2s p2 = from_point(fPts[2]);
+ Sk2s ww(fW);
+
+ Sk2s p20 = p2 - p0;
+ Sk2s p10 = p1 - p0;
+
+ Sk2s C = ww * p10;
+ Sk2s A = ww * p20 - p20;
+ Sk2s B = p20 - C - C;
+
+ return to_vector(SkQuadCoeff(A, B, C).eval(t));
+}
+
+void SkConic::evalAt(SkScalar t, SkPoint* pt, SkVector* tangent) const {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = this->evalAt(t);
+ }
+ if (tangent) {
+ *tangent = this->evalTangentAt(t);
+ }
+}
+
+static SkScalar subdivide_w_value(SkScalar w) {
+ return SkScalarSqrt(SK_ScalarHalf + w * SK_ScalarHalf);
+}
+
+void SkConic::chop(SkConic * SK_RESTRICT dst) const {
+ Sk2s scale = Sk2s(SkScalarInvert(SK_Scalar1 + fW));
+ SkScalar newW = subdivide_w_value(fW);
+
+ Sk2s p0 = from_point(fPts[0]);
+ Sk2s p1 = from_point(fPts[1]);
+ Sk2s p2 = from_point(fPts[2]);
+ Sk2s ww(fW);
+
+ Sk2s wp1 = ww * p1;
+ Sk2s m = (p0 + times_2(wp1) + p2) * scale * Sk2s(0.5f);
+ SkPoint mPt = to_point(m);
+ if (!mPt.isFinite()) {
+ double w_d = fW;
+ double w_2 = w_d * 2;
+ double scale_half = 1 / (1 + w_d) * 0.5;
+ mPt.fX = SkDoubleToScalar((fPts[0].fX + w_2 * fPts[1].fX + fPts[2].fX) * scale_half);
+ mPt.fY = SkDoubleToScalar((fPts[0].fY + w_2 * fPts[1].fY + fPts[2].fY) * scale_half);
+ }
+ dst[0].fPts[0] = fPts[0];
+ dst[0].fPts[1] = to_point((p0 + wp1) * scale);
+ dst[0].fPts[2] = dst[1].fPts[0] = mPt;
+ dst[1].fPts[1] = to_point((wp1 + p2) * scale);
+ dst[1].fPts[2] = fPts[2];
+
+ dst[0].fW = dst[1].fW = newW;
+}
+
+/*
+ * "High order approximation of conic sections by quadratic splines"
+ * by Michael Floater, 1993
+ */
+#define AS_QUAD_ERROR_SETUP \
+ SkScalar a = fW - 1; \
+ SkScalar k = a / (4 * (2 + a)); \
+ SkScalar x = k * (fPts[0].fX - 2 * fPts[1].fX + fPts[2].fX); \
+ SkScalar y = k * (fPts[0].fY - 2 * fPts[1].fY + fPts[2].fY);
+
+void SkConic::computeAsQuadError(SkVector* err) const {
+ AS_QUAD_ERROR_SETUP
+ err->set(x, y);
+}
+
+bool SkConic::asQuadTol(SkScalar tol) const {
+ AS_QUAD_ERROR_SETUP
+ return (x * x + y * y) <= tol * tol;
+}
+
+// Limit the number of suggested quads to approximate a conic
+#define kMaxConicToQuadPOW2 5
+
+int SkConic::computeQuadPOW2(SkScalar tol) const {
+ if (tol < 0 || !SkScalarIsFinite(tol) || !SkPointPriv::AreFinite(fPts, 3)) {
+ return 0;
+ }
+
+ AS_QUAD_ERROR_SETUP
+
+ SkScalar error = SkScalarSqrt(x * x + y * y);
+ int pow2;
+ for (pow2 = 0; pow2 < kMaxConicToQuadPOW2; ++pow2) {
+ if (error <= tol) {
+ break;
+ }
+ error *= 0.25f;
+ }
+ // float version -- using ceil gives the same results as the above.
+ if (false) {
+ SkScalar err = SkScalarSqrt(x * x + y * y);
+ if (err <= tol) {
+ return 0;
+ }
+ SkScalar tol2 = tol * tol;
+ if (tol2 == 0) {
+ return kMaxConicToQuadPOW2;
+ }
+ SkScalar fpow2 = SkScalarLog2((x * x + y * y) / tol2) * 0.25f;
+ int altPow2 = SkScalarCeilToInt(fpow2);
+ if (altPow2 != pow2) {
+ SkDebugf("pow2 %d altPow2 %d fbits %g err %g tol %g\n", pow2, altPow2, fpow2, err, tol);
+ }
+ pow2 = altPow2;
+ }
+ return pow2;
+}
+
+// This was originally developed and tested for pathops: see SkOpTypes.h
+// returns true if (a <= b <= c) || (a >= b >= c)
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkPoint* subdivide(const SkConic& src, SkPoint pts[], int level) {
+ SkASSERT(level >= 0);
+
+ if (0 == level) {
+ memcpy(pts, &src.fPts[1], 2 * sizeof(SkPoint));
+ return pts + 2;
+ } else {
+ SkConic dst[2];
+ src.chop(dst);
+ const SkScalar startY = src.fPts[0].fY;
+ SkScalar endY = src.fPts[2].fY;
+ if (between(startY, src.fPts[1].fY, endY)) {
+ // If the input is monotonic and the output is not, the scan converter hangs.
+ // Ensure that the chopped conics maintain their y-order.
+ SkScalar midY = dst[0].fPts[2].fY;
+ if (!between(startY, midY, endY)) {
+ // If the computed midpoint is outside the ends, move it to the closer one.
+ SkScalar closerY = SkTAbs(midY - startY) < SkTAbs(midY - endY) ? startY : endY;
+ dst[0].fPts[2].fY = dst[1].fPts[0].fY = closerY;
+ }
+ if (!between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY)) {
+ // If the 1st control is not between the start and end, put it at the start.
+ // This also reduces the quad to a line.
+ dst[0].fPts[1].fY = startY;
+ }
+ if (!between(dst[1].fPts[0].fY, dst[1].fPts[1].fY, endY)) {
+ // If the 2nd control is not between the start and end, put it at the end.
+ // This also reduces the quad to a line.
+ dst[1].fPts[1].fY = endY;
+ }
+ // Verify that all five points are in order.
+ SkASSERT(between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY));
+ SkASSERT(between(dst[0].fPts[1].fY, dst[0].fPts[2].fY, dst[1].fPts[1].fY));
+ SkASSERT(between(dst[0].fPts[2].fY, dst[1].fPts[1].fY, endY));
+ }
+ --level;
+ pts = subdivide(dst[0], pts, level);
+ return subdivide(dst[1], pts, level);
+ }
+}
+
+int SkConic::chopIntoQuadsPOW2(SkPoint pts[], int pow2) const {
+ SkASSERT(pow2 >= 0);
+ *pts = fPts[0];
+ SkDEBUGCODE(SkPoint* endPts);
+ if (pow2 == kMaxConicToQuadPOW2) { // If an extreme weight generates many quads ...
+ SkConic dst[2];
+ this->chop(dst);
+ // check to see if the first chop generates a pair of lines
+ if (SkPointPriv::EqualsWithinTolerance(dst[0].fPts[1], dst[0].fPts[2]) &&
+ SkPointPriv::EqualsWithinTolerance(dst[1].fPts[0], dst[1].fPts[1])) {
+ pts[1] = pts[2] = pts[3] = dst[0].fPts[1]; // set ctrl == end to make lines
+ pts[4] = dst[1].fPts[2];
+ pow2 = 1;
+ SkDEBUGCODE(endPts = &pts[5]);
+ goto commonFinitePtCheck;
+ }
+ }
+ SkDEBUGCODE(endPts = ) subdivide(*this, pts + 1, pow2);
+commonFinitePtCheck:
+ const int quadCount = 1 << pow2;
+ const int ptCount = 2 * quadCount + 1;
+ SkASSERT(endPts - pts == ptCount);
+ if (!SkPointPriv::AreFinite(pts, ptCount)) {
+ // if we generated a non-finite, pin ourselves to the middle of the hull,
+ // as our first and last are already on the first/last pts of the hull.
+ for (int i = 1; i < ptCount - 1; ++i) {
+ pts[i] = fPts[1];
+ }
+ }
+ return 1 << pow2;
+}
+
+bool SkConic::findXExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fX, fW, t);
+}
+
+bool SkConic::findYExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fY, fW, t);
+}
+
+bool SkConic::chopAtXExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an X-extrema
+ SkScalar value = dst[0].fPts[2].fX;
+ dst[0].fPts[1].fX = value;
+ dst[1].fPts[0].fX = value;
+ dst[1].fPts[1].fX = value;
+ return true;
+ }
+ return false;
+}
+
+bool SkConic::chopAtYExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findYExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an Y-extrema
+ SkScalar value = dst[0].fPts[2].fY;
+ dst[0].fPts[1].fY = value;
+ dst[1].fPts[0].fY = value;
+ dst[1].fPts[1].fY = value;
+ return true;
+ }
+ return false;
+}
+
+void SkConic::computeTightBounds(SkRect* bounds) const {
+ SkPoint pts[4];
+ pts[0] = fPts[0];
+ pts[1] = fPts[2];
+ int count = 2;
+
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ if (this->findYExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ bounds->setBounds(pts, count);
+}
+
+void SkConic::computeFastBounds(SkRect* bounds) const {
+ bounds->setBounds(fPts, 3);
+}
+
+#if 0 // unimplemented
+bool SkConic::findMaxCurvature(SkScalar* t) const {
+ // TODO: Implement me
+ return false;
+}
+#endif
+
+SkScalar SkConic::TransformW(const SkPoint pts[], SkScalar w, const SkMatrix& matrix) {
+ if (!matrix.hasPerspective()) {
+ return w;
+ }
+
+ SkPoint3 src[3], dst[3];
+
+ ratquad_mapTo3D(pts, w, src);
+
+ matrix.mapHomogeneousPoints(dst, src, 3);
+
+ // w' = sqrt(w1*w1/w0*w2)
+ // use doubles temporarily, to handle small numer/denom
+ double w0 = dst[0].fZ;
+ double w1 = dst[1].fZ;
+ double w2 = dst[2].fZ;
+ return sk_double_to_float(sqrt(sk_ieee_double_divide(w1 * w1, w0 * w2)));
+}
+
+int SkConic::BuildUnitArc(const SkVector& uStart, const SkVector& uStop, SkRotationDirection dir,
+ const SkMatrix* userMatrix, SkConic dst[kMaxConicsForArc]) {
+ // rotate by x,y so that uStart is (1.0)
+ SkScalar x = SkPoint::DotProduct(uStart, uStop);
+ SkScalar y = SkPoint::CrossProduct(uStart, uStop);
+
+ SkScalar absY = SkScalarAbs(y);
+
+ // check for (effectively) coincident vectors
+ // this can happen if our angle is nearly 0 or nearly 180 (y == 0)
+ // ... we use the dot-prod to distinguish between 0 and 180 (x > 0)
+ if (absY <= SK_ScalarNearlyZero && x > 0 && ((y >= 0 && kCW_SkRotationDirection == dir) ||
+ (y <= 0 && kCCW_SkRotationDirection == dir))) {
+ return 0;
+ }
+
+ if (dir == kCCW_SkRotationDirection) {
+ y = -y;
+ }
+
+ // We decide to use 1-conic per quadrant of a circle. What quadrant does [xy] lie in?
+ // 0 == [0 .. 90)
+ // 1 == [90 ..180)
+ // 2 == [180..270)
+ // 3 == [270..360)
+ //
+ int quadrant = 0;
+ if (0 == y) {
+ quadrant = 2; // 180
+ SkASSERT(SkScalarAbs(x + SK_Scalar1) <= SK_ScalarNearlyZero);
+ } else if (0 == x) {
+ SkASSERT(absY - SK_Scalar1 <= SK_ScalarNearlyZero);
+ quadrant = y > 0 ? 1 : 3; // 90 : 270
+ } else {
+ if (y < 0) {
+ quadrant += 2;
+ }
+ if ((x < 0) != (y < 0)) {
+ quadrant += 1;
+ }
+ }
+
+ const SkPoint quadrantPts[] = {
+ { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, { -1, 0 }, { -1, -1 }, { 0, -1 }, { 1, -1 }
+ };
+ const SkScalar quadrantWeight = SK_ScalarRoot2Over2;
+
+ int conicCount = quadrant;
+ for (int i = 0; i < conicCount; ++i) {
+ dst[i].set(&quadrantPts[i * 2], quadrantWeight);
+ }
+
+ // Now compute any remaing (sub-90-degree) arc for the last conic
+ const SkPoint finalP = { x, y };
+ const SkPoint& lastQ = quadrantPts[quadrant * 2]; // will already be a unit-vector
+ const SkScalar dot = SkVector::DotProduct(lastQ, finalP);
+ if (!SkScalarIsFinite(dot)) {
+ return 0;
+ }
+ SkASSERT(0 <= dot && dot <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot < 1) {
+ SkVector offCurve = { lastQ.x() + x, lastQ.y() + y };
+ // compute the bisector vector, and then rescale to be the off-curve point.
+ // we compute its length from cos(theta/2) = length / 1, using half-angle identity we get
+ // length = sqrt(2 / (1 + cos(theta)). We already have cos() when to computed the dot.
+ // This is nice, since our computed weight is cos(theta/2) as well!
+ //
+ const SkScalar cosThetaOver2 = SkScalarSqrt((1 + dot) / 2);
+ offCurve.setLength(SkScalarInvert(cosThetaOver2));
+ if (!SkPointPriv::EqualsWithinTolerance(lastQ, offCurve)) {
+ dst[conicCount].set(lastQ, offCurve, finalP, cosThetaOver2);
+ conicCount += 1;
+ }
+ }
+
+ // now handle counter-clockwise and the initial unitStart rotation
+ SkMatrix matrix;
+ matrix.setSinCos(uStart.fY, uStart.fX);
+ if (dir == kCCW_SkRotationDirection) {
+ matrix.preScale(SK_Scalar1, -SK_Scalar1);
+ }
+ if (userMatrix) {
+ matrix.postConcat(*userMatrix);
+ }
+ for (int i = 0; i < conicCount; ++i) {
+ matrix.mapPoints(dst[i].fPts, 3);
+ }
+ return conicCount;
+}
diff --git a/gfx/skia/skia/src/core/SkGeometry.h b/gfx/skia/skia/src/core/SkGeometry.h
new file mode 100644
index 0000000000..23bdbc1c42
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGeometry_DEFINED
+#define SkGeometry_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/private/SkNx.h"
+
+static inline Sk2s from_point(const SkPoint& point) {
+ return Sk2s::Load(&point);
+}
+
+static inline SkPoint to_point(const Sk2s& x) {
+ SkPoint point;
+ x.store(&point);
+ return point;
+}
+
+static Sk2s times_2(const Sk2s& value) {
+ return value + value;
+}
+
+/** Given a quadratic equation Ax^2 + Bx + C = 0, return 0, 1, 2 roots for the
+ equation.
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t);
+SkPoint SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t);
+
+/** Set pt to the point on the src quadratic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent = nullptr);
+
+/** Given a src quadratic bezier, chop it at the specified t value,
+ where 0 < t < 1, and return the two new quadratics in dst:
+ dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t);
+
+/** Given a src quadratic bezier, chop it at the specified t == 1/2,
+ The new quads are returned in dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given the 3 coefficients for a quadratic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the quadratic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValues[1]);
+
+/** Given 3 points on a quadratic bezier, chop it into 1, 2 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..2] is the original quad
+ 1 dst[0..2] and dst[2..4] are the two new quads
+*/
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]);
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, if the point of maximum
+ curvature exists on the segment, returns the t value for this
+ point along the curve. Otherwise it will return a value of 0.
+*/
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]);
+
+/** Given 3 points on a quadratic bezier, divide it into 2 quadratics
+ if the point of maximum curvature exists on the quad segment.
+ Depending on what is returned, dst[] is treated as follows
+ 1 dst[0..2] is the original quad
+ 2 dst[0..2] and dst[2..4] are the two new quads
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, use degree elevation to
+ convert it into the cubic fitting the same curve. The new cubic
+ curve is returned in dst[0..3].
+*/
+SK_API void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Set pt to the point on the src cubic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* locOrNull,
+ SkVector* tangentOrNull, SkVector* curvatureOrNull);
+
+/** Given a src cubic bezier, chop it at the specified t value,
+ where 0 < t < 1, and return the two new cubics in dst:
+ dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t);
+
+/** Given a src cubic bezier, chop it at the specified t values,
+ where 0 < t < 1, and return the new cubics in dst:
+ dst[0..3],dst[3..6],...,dst[3*t_count..3*(t_count+1)]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[], const SkScalar t[],
+ int t_count);
+
+/** Given a src cubic bezier, chop it at the specified t == 1/2,
+ The new cubics are returned in dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]);
+
+/** Given the 4 coefficients for a cubic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the cubic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+ 2 0 < tValues[0] < tValues[1] < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]);
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]);
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]);
+
+/** Given a cubic bezier, return 0, 1, or 2 t-values that represent the
+ inflection points.
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[2]);
+
+/** Return 1 for no chop, 2 for having chopped the cubic at a single
+ inflection point, 3 for having chopped at 2 inflection points.
+ dst will hold the resulting 1, 2, or 3 cubics.
+*/
+int SkChopCubicAtInflections(const SkPoint src[4], SkPoint dst[10]);
+
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]);
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3] = nullptr);
+/** Returns t value of cusp if cubic has one; returns -1 otherwise.
+ */
+SkScalar SkFindCubicCusp(const SkPoint src[4]);
+
+bool SkChopMonoCubicAtX(SkPoint src[4], SkScalar y, SkPoint dst[7]);
+bool SkChopMonoCubicAtY(SkPoint src[4], SkScalar x, SkPoint dst[7]);
+
+enum class SkCubicType {
+ kSerpentine,
+ kLoop,
+ kLocalCusp, // Cusp at a non-infinite parameter value with an inflection at t=infinity.
+ kCuspAtInfinity, // Cusp with a cusp at t=infinity and a local inflection.
+ kQuadratic,
+ kLineOrPoint
+};
+
+static inline bool SkCubicIsDegenerate(SkCubicType type) {
+ switch (type) {
+ case SkCubicType::kSerpentine:
+ case SkCubicType::kLoop:
+ case SkCubicType::kLocalCusp:
+ case SkCubicType::kCuspAtInfinity:
+ return false;
+ case SkCubicType::kQuadratic:
+ case SkCubicType::kLineOrPoint:
+ return true;
+ }
+ SK_ABORT("Invalid SkCubicType");
+}
+
+static inline const char* SkCubicTypeName(SkCubicType type) {
+ switch (type) {
+ case SkCubicType::kSerpentine: return "kSerpentine";
+ case SkCubicType::kLoop: return "kLoop";
+ case SkCubicType::kLocalCusp: return "kLocalCusp";
+ case SkCubicType::kCuspAtInfinity: return "kCuspAtInfinity";
+ case SkCubicType::kQuadratic: return "kQuadratic";
+ case SkCubicType::kLineOrPoint: return "kLineOrPoint";
+ }
+ SK_ABORT("Invalid SkCubicType");
+}
+
+/** Returns the cubic classification.
+
+ t[],s[] are set to the two homogeneous parameter values at which points the lines L & M
+ intersect with K, sorted from smallest to largest and oriented so positive values of the
+ implicit are on the "left" side. For a serpentine curve they are the inflection points. For a
+ loop they are the double point. For a local cusp, they are both equal and denote the cusp point.
+ For a cusp at an infinite parameter value, one will be the local inflection point and the other
+ +inf (t,s = 1,0). If the curve is degenerate (i.e. quadratic or linear) they are both set to a
+ parameter value of +inf (t,s = 1,0).
+
+ d[] is filled with the cubic inflection function coefficients. See "Resolution Independent
+ Curve Rendering using Programmable Graphics Hardware", 4.2 Curve Categorization:
+
+ If the input points contain infinities or NaN, the return values are undefined.
+
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+*/
+SkCubicType SkClassifyCubic(const SkPoint p[4], double t[2] = nullptr, double s[2] = nullptr,
+ double d[4] = nullptr);
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum SkRotationDirection {
+ kCW_SkRotationDirection,
+ kCCW_SkRotationDirection
+};
+
+struct SkConic {
+ SkConic() {}
+ SkConic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+ SkConic(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, sizeof(fPts));
+ fW = w;
+ }
+
+ SkPoint fPts[3];
+ SkScalar fW;
+
+ void set(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, 3 * sizeof(SkPoint));
+ fW = w;
+ }
+
+ void set(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+
+ /**
+ * Given a t-value [0...1] return its position and/or tangent.
+ * If pos is not null, return its position at the t-value.
+ * If tangent is not null, return its tangent at the t-value. NOTE the
+ * tangent value's length is arbitrary, and only its direction should
+ * be used.
+ */
+ void evalAt(SkScalar t, SkPoint* pos, SkVector* tangent = nullptr) const;
+ bool SK_WARN_UNUSED_RESULT chopAt(SkScalar t, SkConic dst[2]) const;
+ void chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const;
+ void chop(SkConic dst[2]) const;
+
+ SkPoint evalAt(SkScalar t) const;
+ SkVector evalTangentAt(SkScalar t) const;
+
+ void computeAsQuadError(SkVector* err) const;
+ bool asQuadTol(SkScalar tol) const;
+
+ /**
+ * return the power-of-2 number of quads needed to approximate this conic
+ * with a sequence of quads. Will be >= 0.
+ */
+ int SK_API computeQuadPOW2(SkScalar tol) const;
+
+ /**
+ * Chop this conic into N quads, stored continguously in pts[], where
+ * N = 1 << pow2. The amount of storage needed is (1 + 2 * N)
+ */
+ int SK_API SK_WARN_UNUSED_RESULT chopIntoQuadsPOW2(SkPoint pts[], int pow2) const;
+
+ bool findXExtrema(SkScalar* t) const;
+ bool findYExtrema(SkScalar* t) const;
+ bool chopAtXExtrema(SkConic dst[2]) const;
+ bool chopAtYExtrema(SkConic dst[2]) const;
+
+ void computeTightBounds(SkRect* bounds) const;
+ void computeFastBounds(SkRect* bounds) const;
+
+ /** Find the parameter value where the conic takes on its maximum curvature.
+ *
+ * @param t output scalar for max curvature. Will be unchanged if
+ * max curvature outside 0..1 range.
+ *
+ * @return true if max curvature found inside 0..1 range, false otherwise
+ */
+// bool findMaxCurvature(SkScalar* t) const; // unimplemented
+
+ static SkScalar TransformW(const SkPoint[3], SkScalar w, const SkMatrix&);
+
+ enum {
+ kMaxConicsForArc = 5
+ };
+ static int BuildUnitArc(const SkVector& start, const SkVector& stop, SkRotationDirection,
+ const SkMatrix*, SkConic conics[kMaxConicsForArc]);
+};
+
+// inline helpers are contained in a namespace to avoid external leakage to fragile SkNx members
+namespace { // NOLINT(google-build-namespaces)
+
+/**
+ * use for : eval(t) == A * t^2 + B * t + C
+ */
+struct SkQuadCoeff {
+ SkQuadCoeff() {}
+
+ SkQuadCoeff(const Sk2s& A, const Sk2s& B, const Sk2s& C)
+ : fA(A)
+ , fB(B)
+ , fC(C)
+ {
+ }
+
+ SkQuadCoeff(const SkPoint src[3]) {
+ fC = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ fB = times_2(P1 - fC);
+ fA = P2 - times_2(P1) + fC;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ return eval(tt);
+ }
+
+ Sk2s eval(const Sk2s& tt) {
+ return (fA * tt + fB) * tt + fC;
+ }
+
+ Sk2s fA;
+ Sk2s fB;
+ Sk2s fC;
+};
+
+struct SkConicCoeff {
+ SkConicCoeff(const SkConic& conic) {
+ Sk2s p0 = from_point(conic.fPts[0]);
+ Sk2s p1 = from_point(conic.fPts[1]);
+ Sk2s p2 = from_point(conic.fPts[2]);
+ Sk2s ww(conic.fW);
+
+ Sk2s p1w = p1 * ww;
+ fNumer.fC = p0;
+ fNumer.fA = p2 - times_2(p1w) + p0;
+ fNumer.fB = times_2(p1w - p0);
+
+ fDenom.fC = Sk2s(1);
+ fDenom.fB = times_2(ww - fDenom.fC);
+ fDenom.fA = Sk2s(0) - fDenom.fB;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ Sk2s numer = fNumer.eval(tt);
+ Sk2s denom = fDenom.eval(tt);
+ return numer / denom;
+ }
+
+ SkQuadCoeff fNumer;
+ SkQuadCoeff fDenom;
+};
+
+struct SkCubicCoeff {
+ SkCubicCoeff(const SkPoint src[4]) {
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+ Sk2s three(3);
+ fA = P3 + three * (P1 - P2) - P0;
+ fB = three * (P2 - times_2(P1) + P0);
+ fC = three * (P1 - P0);
+ fD = P0;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ return eval(tt);
+ }
+
+ Sk2s eval(const Sk2s& t) {
+ return ((fA * t + fB) * t + fC) * t + fD;
+ }
+
+ Sk2s fA;
+ Sk2s fB;
+ Sk2s fC;
+ Sk2s fD;
+};
+
+}
+
+#include "include/private/SkTemplates.h"
+
+/**
+ * Help class to allocate storage for approximating a conic with N quads.
+ */
+class SkAutoConicToQuads {
+public:
+ SkAutoConicToQuads() : fQuadCount(0) {}
+
+ /**
+ * Given a conic and a tolerance, return the array of points for the
+ * approximating quad(s). Call countQuads() to know the number of quads
+ * represented in these points.
+ *
+ * The quads are allocated to share end-points. e.g. if there are 4 quads,
+ * there will be 9 points allocated as follows
+ * quad[0] == pts[0..2]
+ * quad[1] == pts[2..4]
+ * quad[2] == pts[4..6]
+ * quad[3] == pts[6..8]
+ */
+ const SkPoint* computeQuads(const SkConic& conic, SkScalar tol) {
+ int pow2 = conic.computeQuadPOW2(tol);
+ fQuadCount = 1 << pow2;
+ SkPoint* pts = fStorage.reset(1 + 2 * fQuadCount);
+ fQuadCount = conic.chopIntoQuadsPOW2(pts, pow2);
+ return pts;
+ }
+
+ const SkPoint* computeQuads(const SkPoint pts[3], SkScalar weight,
+ SkScalar tol) {
+ SkConic conic;
+ conic.set(pts, weight);
+ return computeQuads(conic, tol);
+ }
+
+ int countQuads() const { return fQuadCount; }
+
+private:
+ enum {
+ kQuadCount = 8, // should handle most conics
+ kPointCount = 1 + 2 * kQuadCount,
+ };
+ SkAutoSTMalloc<kPointCount, SkPoint> fStorage;
+ int fQuadCount; // #quads for current usage
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
new file mode 100644
index 0000000000..0c60c91169
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "include/private/SkOnce.h"
+
+void SkFlattenable::RegisterFlattenablesIfNeeded() {
+ static SkOnce once;
+ once([]{
+ SkFlattenable::PrivateInitializer::InitEffects();
+ SkFlattenable::PrivateInitializer::InitImageFilters();
+ SkFlattenable::Finalize();
+ });
+}
diff --git a/gfx/skia/skia/src/core/SkGlyph.cpp b/gfx/skia/skia/src/core/SkGlyph.cpp
new file mode 100644
index 0000000000..35218f4e5a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyph.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyph.h"
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkScalerContext.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+SkMask SkGlyph::mask() const {
+ // getMetrics had to be called.
+ SkASSERT(fMaskFormat != MASK_FORMAT_UNKNOWN);
+
+ SkMask mask;
+ mask.fImage = (uint8_t*)fImage;
+ mask.fBounds.setXYWH(fLeft, fTop, fWidth, fHeight);
+ mask.fRowBytes = this->rowBytes();
+ mask.fFormat = static_cast<SkMask::Format>(fMaskFormat);
+ return mask;
+}
+
+SkMask SkGlyph::mask(SkPoint position) const {
+ SkMask answer = this->mask();
+ answer.fBounds.offset(SkScalarFloorToInt(position.x()), SkScalarFloorToInt(position.y()));
+ return answer;
+}
+
+void SkGlyph::zeroMetrics() {
+ fAdvanceX = 0;
+ fAdvanceY = 0;
+ fWidth = 0;
+ fHeight = 0;
+ fTop = 0;
+ fLeft = 0;
+}
+
+static size_t bits_to_bytes(size_t bits) {
+ return (bits + 7) >> 3;
+}
+
+static size_t format_alignment(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kA8_Format:
+ case SkMask::k3D_Format:
+ case SkMask::kSDF_Format:
+ return alignof(uint8_t);
+ case SkMask::kARGB32_Format:
+ return alignof(uint32_t);
+ case SkMask::kLCD16_Format:
+ return alignof(uint16_t);
+ default:
+ SK_ABORT("Unknown mask format.");
+ break;
+ }
+ return 0;
+}
+
+static size_t format_rowbytes(int width, SkMask::Format format) {
+ return format == SkMask::kBW_Format ? bits_to_bytes(width)
+ : width * format_alignment(format);
+}
+
+SkGlyph::SkGlyph(const SkGlyphPrototype& p)
+ : fWidth{p.width}
+ , fHeight{p.height}
+ , fTop{p.top}
+ , fLeft{p.left}
+ , fAdvanceX{p.advanceX}
+ , fAdvanceY{p.advanceY}
+ , fMaskFormat{(uint8_t)p.maskFormat}
+ , fForceBW{p.forceBW}
+ , fID{p.id}
+ {}
+
+size_t SkGlyph::formatAlignment() const {
+ return format_alignment(this->maskFormat());
+}
+
+size_t SkGlyph::allocImage(SkArenaAlloc* alloc) {
+ SkASSERT(!this->isEmpty());
+ auto size = this->imageSize();
+ fImage = alloc->makeBytesAlignedTo(size, this->formatAlignment());
+
+ return size;
+}
+
+bool SkGlyph::setImage(SkArenaAlloc* alloc, SkScalerContext* scalerContext) {
+ if (!this->setImageHasBeenCalled()) {
+ // It used to be that getImage() could change the fMaskFormat. Extra checking to make
+ // sure there are no regressions.
+ SkDEBUGCODE(SkMask::Format oldFormat = this->maskFormat());
+ this->allocImage(alloc);
+ scalerContext->getImage(*this);
+ SkASSERT(oldFormat == this->maskFormat());
+ return true;
+ }
+ return false;
+}
+
+bool SkGlyph::setImage(SkArenaAlloc* alloc, const void* image) {
+ if (!this->setImageHasBeenCalled()) {
+ this->allocImage(alloc);
+ memcpy(fImage, image, this->imageSize());
+ return true;
+ }
+ return false;
+}
+
+bool SkGlyph::setMetricsAndImage(SkArenaAlloc* alloc, const SkGlyph& from) {
+ if (fImage == nullptr) {
+ fAdvanceX = from.fAdvanceX;
+ fAdvanceY = from.fAdvanceY;
+ fWidth = from.fWidth;
+ fHeight = from.fHeight;
+ fTop = from.fTop;
+ fLeft = from.fLeft;
+ fForceBW = from.fForceBW;
+ fMaskFormat = from.fMaskFormat;
+ return this->setImage(alloc, from.image());
+ }
+ return false;
+}
+
+size_t SkGlyph::rowBytes() const {
+ return format_rowbytes(fWidth, (SkMask::Format)fMaskFormat);
+}
+
+size_t SkGlyph::rowBytesUsingFormat(SkMask::Format format) const {
+ return format_rowbytes(fWidth, format);
+}
+
+size_t SkGlyph::imageSize() const {
+ if (this->isEmpty() || this->imageTooLarge()) { return 0; }
+
+ size_t size = this->rowBytes() * fHeight;
+
+ if (fMaskFormat == SkMask::k3D_Format) {
+ size *= 3;
+ }
+
+ return size;
+}
+
+void SkGlyph::installPath(SkArenaAlloc* alloc, const SkPath* path) {
+ SkASSERT(fPathData == nullptr);
+ SkASSERT(!this->setPathHasBeenCalled());
+ fPathData = alloc->make<SkGlyph::PathData>();
+ if (path != nullptr) {
+ fPathData->fPath = *path;
+ fPathData->fPath.updateBoundsCache();
+ fPathData->fPath.getGenerationID();
+ fPathData->fHasPath = true;
+ }
+}
+
+bool SkGlyph::setPath(SkArenaAlloc* alloc, SkScalerContext* scalerContext) {
+ if (!this->setPathHasBeenCalled()) {
+ SkPath path;
+ if (scalerContext->getPath(this->getPackedID(), &path)) {
+ this->installPath(alloc, &path);
+ } else {
+ this->installPath(alloc, nullptr);
+ }
+ return this->path() != nullptr;
+ }
+
+ return false;
+}
+
+bool SkGlyph::setPath(SkArenaAlloc* alloc, const SkPath* path) {
+ if (!this->setPathHasBeenCalled()) {
+ this->installPath(alloc, path);
+ return this->path() != nullptr;
+ }
+ return false;
+}
+
+const SkPath* SkGlyph::path() const {
+ // setPath must have been called previously.
+ SkASSERT(this->setPathHasBeenCalled());
+ if (fPathData->fHasPath) {
+ return &fPathData->fPath;
+ }
+ return nullptr;
+}
+
+static std::tuple<SkScalar, SkScalar> calculate_path_gap(
+ SkScalar topOffset, SkScalar bottomOffset, const SkPath& path) {
+
+ // Left and Right of an ever expanding gap around the path.
+ SkScalar left = SK_ScalarMax,
+ right = SK_ScalarMin;
+ auto expandGap = [&left, &right](SkScalar v) {
+ left = SkTMin(left, v);
+ right = SkTMax(right, v);
+ };
+
+ // Handle all the different verbs for the path.
+ SkPoint pts[4];
+ auto addLine = [&expandGap, &pts](SkScalar offset) {
+ SkScalar t = sk_ieee_float_divide(offset - pts[0].fY, pts[1].fY - pts[0].fY);
+ if (0 <= t && t < 1) { // this handles divide by zero above
+ expandGap(pts[0].fX + t * (pts[1].fX - pts[0].fX));
+ }
+ };
+
+ auto addQuad = [&expandGap, &pts](SkScalar offset) {
+ SkDQuad quad;
+ quad.set(pts);
+ double roots[2];
+ int count = quad.horizontalIntersect(offset, roots);
+ while (--count >= 0) {
+ expandGap(quad.ptAtT(roots[count]).asSkPoint().fX);
+ }
+ };
+
+ auto addCubic = [&expandGap, &pts](SkScalar offset) {
+ SkDCubic cubic;
+ cubic.set(pts);
+ double roots[3];
+ int count = cubic.horizontalIntersect(offset, roots);
+ while (--count >= 0) {
+ expandGap(cubic.ptAtT(roots[count]).asSkPoint().fX);
+ }
+ };
+
+ // Handle when a verb's points are in the gap between top and bottom.
+ auto addPts = [&expandGap, &pts, topOffset, bottomOffset](int ptCount) {
+ for (int i = 0; i < ptCount; ++i) {
+ if (topOffset < pts[i].fY && pts[i].fY < bottomOffset) {
+ expandGap(pts[i].fX);
+ }
+ }
+ };
+
+ SkPath::Iter iter(path, false);
+ SkPath::Verb verb;
+ while (SkPath::kDone_Verb != (verb = iter.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb: {
+ break;
+ }
+ case SkPath::kLine_Verb: {
+ addLine(topOffset);
+ addLine(bottomOffset);
+ addPts(2);
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ SkScalar quadTop = SkTMin(SkTMin(pts[0].fY, pts[1].fY), pts[2].fY);
+ if (bottomOffset < quadTop) { break; }
+ SkScalar quadBottom = SkTMax(SkTMax(pts[0].fY, pts[1].fY), pts[2].fY);
+ if (topOffset > quadBottom) { break; }
+ addQuad(topOffset);
+ addQuad(bottomOffset);
+ addPts(3);
+ break;
+ }
+ case SkPath::kConic_Verb: {
+ SkASSERT(0); // no support for text composed of conics
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ SkScalar quadTop =
+ SkTMin(SkTMin(SkTMin(pts[0].fY, pts[1].fY), pts[2].fY), pts[3].fY);
+ if (bottomOffset < quadTop) { break; }
+ SkScalar quadBottom =
+ SkTMax(SkTMax(SkTMax(pts[0].fY, pts[1].fY), pts[2].fY), pts[3].fY);
+ if (topOffset > quadBottom) { break; }
+ addCubic(topOffset);
+ addCubic(bottomOffset);
+ addPts(4);
+ break;
+ }
+ case SkPath::kClose_Verb: {
+ break;
+ }
+ default: {
+ SkASSERT(0);
+ break;
+ }
+ }
+ }
+
+ return std::tie(left, right);
+}
+
+void SkGlyph::ensureIntercepts(const SkScalar* bounds, SkScalar scale, SkScalar xPos,
+ SkScalar* array, int* count, SkArenaAlloc* alloc) {
+
+ auto offsetResults = [scale, xPos](
+ const SkGlyph::Intercept* intercept,SkScalar* array, int* count) {
+ if (array) {
+ array += *count;
+ for (int index = 0; index < 2; index++) {
+ *array++ = intercept->fInterval[index] * scale + xPos;
+ }
+ }
+ *count += 2;
+ };
+
+ const SkGlyph::Intercept* match =
+ [this](const SkScalar bounds[2]) -> const SkGlyph::Intercept* {
+ if (!fPathData) {
+ return nullptr;
+ }
+ const SkGlyph::Intercept* intercept = fPathData->fIntercept;
+ while (intercept) {
+ if (bounds[0] == intercept->fBounds[0] && bounds[1] == intercept->fBounds[1]) {
+ return intercept;
+ }
+ intercept = intercept->fNext;
+ }
+ return nullptr;
+ }(bounds);
+
+ if (match) {
+ if (match->fInterval[0] < match->fInterval[1]) {
+ offsetResults(match, array, count);
+ }
+ return;
+ }
+
+ SkGlyph::Intercept* intercept = alloc->make<SkGlyph::Intercept>();
+ intercept->fNext = fPathData->fIntercept;
+ intercept->fBounds[0] = bounds[0];
+ intercept->fBounds[1] = bounds[1];
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ fPathData->fIntercept = intercept;
+ const SkPath* path = &(fPathData->fPath);
+ const SkRect& pathBounds = path->getBounds();
+ if (pathBounds.fBottom < bounds[0] || bounds[1] < pathBounds.fTop) {
+ return;
+ }
+
+ std::tie(intercept->fInterval[0], intercept->fInterval[1])
+ = calculate_path_gap(bounds[0], bounds[1], *path);
+
+ if (intercept->fInterval[0] >= intercept->fInterval[1]) {
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ return;
+ }
+ offsetResults(intercept, array, count);
+}
diff --git a/gfx/skia/skia/src/core/SkGlyph.h b/gfx/skia/skia/src/core/SkGlyph.h
new file mode 100644
index 0000000000..46ba710897
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyph.h
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyph_DEFINED
+#define SkGlyph_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMask.h"
+
+class SkArenaAlloc;
+class SkStrike;
+class SkScalerContext;
+
+// needs to be != to any valid SkMask::Format
+#define MASK_FORMAT_UNKNOWN (0xFF)
+#define MASK_FORMAT_JUST_ADVANCE MASK_FORMAT_UNKNOWN
+
+// A combination of SkGlyphID and sub-pixel position information.
+struct SkPackedGlyphID {
+ static constexpr uint32_t kImpossibleID = ~0u;
+ enum {
+ // Lengths
+ kGlyphIDLen = 16u,
+ kSubPixelPosLen = 2u,
+
+ // Bit positions
+ kGlyphID = 0u,
+ kSubPixelY = kGlyphIDLen,
+ kSubPixelX = kGlyphIDLen + kSubPixelPosLen,
+ kEndData = kGlyphIDLen + 2 * kSubPixelPosLen,
+
+ // Masks
+ kGlyphIDMask = (1u << kGlyphIDLen) - 1,
+ kSubPixelPosMask = (1u << kSubPixelPosLen) - 1,
+ kMaskAll = (1u << kEndData) - 1,
+
+ // Location of sub pixel info in a fixed pointer number.
+ kFixedPointBinaryPointPos = 16u,
+ kFixedPointSubPixelPosBits = kFixedPointBinaryPointPos - kSubPixelPosLen,
+ };
+
+ constexpr explicit SkPackedGlyphID(SkGlyphID glyphID)
+ : fID{glyphID} { }
+
+ constexpr SkPackedGlyphID(SkGlyphID glyphID, SkFixed x, SkFixed y)
+ : fID {PackIDXY(glyphID, x, y)} {
+ SkASSERT(fID != kImpossibleID);
+ }
+
+ constexpr SkPackedGlyphID(SkGlyphID code, SkIPoint pt)
+ : SkPackedGlyphID(code, pt.fX, pt.fY) { }
+
+ constexpr SkPackedGlyphID() : fID{kImpossibleID} {}
+
+ bool operator==(const SkPackedGlyphID& that) const {
+ return fID == that.fID;
+ }
+ bool operator!=(const SkPackedGlyphID& that) const {
+ return !(*this == that);
+ }
+ bool operator<(SkPackedGlyphID that) const {
+ return this->fID < that.fID;
+ }
+
+ uint32_t code() const {
+ return fID & kGlyphIDMask;
+ }
+
+ uint32_t value() const {
+ return fID;
+ }
+
+ SkFixed getSubXFixed() const {
+ return this->subToFixed(kSubPixelX);
+ }
+
+ SkFixed getSubYFixed() const {
+ return this->subToFixed(kSubPixelY);
+ }
+
+ uint32_t hash() const {
+ return SkChecksum::CheapMix(fID);
+ }
+
+ SkString dump() const {
+ SkString str;
+ str.appendf("code: %d, x: %d, y:%d", code(), getSubXFixed(), getSubYFixed());
+ return str;
+ }
+
+private:
+ static constexpr uint32_t PackIDXY(SkGlyphID glyphID, SkFixed x, SkFixed y) {
+ return (FixedToSub(x) << kSubPixelX)
+ | (FixedToSub(y) << kSubPixelY)
+ | glyphID;
+ }
+
+ static constexpr uint32_t FixedToSub(SkFixed n) {
+ return ((uint32_t)n >> kFixedPointSubPixelPosBits) & kSubPixelPosMask;
+ }
+
+ constexpr SkFixed subToFixed(uint32_t subPixelPosBit) const {
+ uint32_t subPixelPosition = (fID >> subPixelPosBit) & kSubPixelPosMask;
+ return subPixelPosition << kFixedPointSubPixelPosBits;
+ }
+
+ uint32_t fID;
+};
+
+struct SkGlyphPrototype;
+
+class SkGlyph {
+public:
+ static constexpr SkFixed kSubpixelRound = SK_FixedHalf >> SkPackedGlyphID::kSubPixelPosLen;
+
+ // SkGlyph() is used for testing.
+ constexpr SkGlyph() : fID{SkPackedGlyphID()} { }
+ constexpr explicit SkGlyph(SkPackedGlyphID id) : fID{id} { }
+ explicit SkGlyph(const SkGlyphPrototype& p);
+
+ SkVector advanceVector() const { return SkVector{fAdvanceX, fAdvanceY}; }
+ SkScalar advanceX() const { return fAdvanceX; }
+ SkScalar advanceY() const { return fAdvanceY; }
+
+ SkGlyphID getGlyphID() const { return fID.code(); }
+ SkPackedGlyphID getPackedID() const { return fID; }
+ SkFixed getSubXFixed() const { return fID.getSubXFixed(); }
+ SkFixed getSubYFixed() const { return fID.getSubYFixed(); }
+
+ size_t rowBytes() const;
+ size_t rowBytesUsingFormat(SkMask::Format format) const;
+
+ // Call this to set all of the metrics fields to 0 (e.g. if the scaler
+ // encounters an error measuring a glyph). Note: this does not alter the
+ // fImage, fPath, fID, fMaskFormat fields.
+ void zeroMetrics();
+
+ SkMask mask() const;
+
+ SkMask mask(SkPoint position) const;
+
+ // Image
+ // If we haven't already tried to associate an image with this glyph
+ // (i.e. setImageHasBeenCalled() returns false), then use the
+ // SkScalerContext or const void* argument to set the image.
+ bool setImage(SkArenaAlloc* alloc, SkScalerContext* scalerContext);
+ bool setImage(SkArenaAlloc* alloc, const void* image);
+
+ // Merge the from glyph into this glyph using alloc to allocate image data. Return true if
+ // image data was allocated. If the image for this glyph has not been initialized, then copy
+ // the width, height, top, left, format, and image into this glyph making a copy of the image
+ // using the alloc.
+ bool setMetricsAndImage(SkArenaAlloc* alloc, const SkGlyph& from);
+
+ // Returns true if the image has been set.
+ bool setImageHasBeenCalled() const {
+ return fImage != nullptr || this->isEmpty() || this->imageTooLarge();
+ }
+
+ // Return a pointer to the path if the image exists, otherwise return nullptr.
+ const void* image() const { SkASSERT(this->setImageHasBeenCalled()); return fImage; }
+
+ // Return the size of the image.
+ size_t imageSize() const;
+
+ // Path
+ // If we haven't already tried to associate a path to this glyph
+ // (i.e. setPathHasBeenCalled() returns false), then use the
+ // SkScalerContext or SkPath argument to try to do so. N.B. this
+ // may still result in no path being associated with this glyph,
+ // e.g. if you pass a null SkPath or the typeface is bitmap-only.
+ //
+ // This setPath() call is sticky... once you call it, the glyph
+ // stays in its state permanently, ignoring any future calls.
+ //
+ // Returns true if this is the first time you called setPath()
+ // and there actually is a path; call path() to get it.
+ bool setPath(SkArenaAlloc* alloc, SkScalerContext* scalerContext);
+ bool setPath(SkArenaAlloc* alloc, const SkPath* path);
+
+ // Returns true if that path has been set.
+ bool setPathHasBeenCalled() const { return fPathData != nullptr; }
+
+ // Return a pointer to the path if it exists, otherwise return nullptr. Only works if the
+ // path was previously set.
+ const SkPath* path() const;
+
+ // Format
+ bool isColor() const { return fMaskFormat == SkMask::kARGB32_Format; }
+ SkMask::Format maskFormat() const { return static_cast<SkMask::Format>(fMaskFormat); }
+ size_t formatAlignment() const;
+
+ // Bounds
+ int maxDimension() const { return std::max(fWidth, fHeight); }
+ SkIRect iRect() const { return SkIRect::MakeXYWH(fLeft, fTop, fWidth, fHeight); }
+ SkRect rect() const { return SkRect::MakeXYWH(fLeft, fTop, fWidth, fHeight); }
+ int left() const { return fLeft; }
+ int top() const { return fTop; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ bool isEmpty() const {
+ // fHeight == 0 -> fWidth == 0;
+ SkASSERT(fHeight != 0 || fWidth == 0);
+ return fWidth == 0;
+ }
+ bool imageTooLarge() const { return fWidth >= kMaxGlyphWidth; }
+
+ // Make sure that the intercept information is on the glyph and return it, or return it if it
+ // already exists.
+ // * bounds - either end of the gap for the character.
+ // * scale, xPos - information about how wide the gap is.
+ // * array - accumulated gaps for many characters if not null.
+ // * count - the number of gaps.
+ void ensureIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkScalar* array, int* count, SkArenaAlloc* alloc);
+
+private:
+ // There are two sides to an SkGlyph, the scaler side (things that create glyph data) have
+ // access to all the fields. Scalers are assumed to maintain all the SkGlyph invariants. The
+ // consumer side has a tighter interface.
+ friend class RandomScalerContext;
+ friend class SkScalerContext;
+ friend class SkScalerContextProxy;
+ friend class SkScalerContext_Empty;
+ friend class SkScalerContext_FreeType;
+ friend class SkScalerContext_FreeType_Base;
+ friend class SkScalerContext_CairoFT;
+ friend class SkScalerContext_DW;
+ friend class SkScalerContext_GDI;
+ friend class SkScalerContext_Mac;
+ friend class SkStrikeClient;
+ friend class SkStrikeServer;
+ friend class SkTestScalerContext;
+ friend class SkTestSVGScalerContext;
+ friend class TestSVGTypeface;
+ friend class TestTypeface;
+
+ static constexpr uint16_t kMaxGlyphWidth = 1u << 13u;
+
+ // Support horizontal and vertical skipping strike-through / underlines.
+ // The caller walks the linked list looking for a match. For a horizontal underline,
+ // the fBounds contains the top and bottom of the underline. The fInterval pair contains the
+ // beginning and end of of the intersection of the bounds and the glyph's path.
+ // If interval[0] >= interval[1], no intersection was found.
+ struct Intercept {
+ Intercept* fNext;
+ SkScalar fBounds[2]; // for horz underlines, the boundaries in Y
+ SkScalar fInterval[2]; // the outside intersections of the axis and the glyph
+ };
+
+ struct PathData {
+ Intercept* fIntercept{nullptr};
+ SkPath fPath;
+ bool fHasPath{false};
+ };
+
+ size_t allocImage(SkArenaAlloc* alloc);
+
+ // path == nullptr indicates that there is no path.
+ void installPath(SkArenaAlloc* alloc, const SkPath* path);
+
+ // The width and height of the glyph mask.
+ uint16_t fWidth = 0,
+ fHeight = 0;
+
+ // The offset from the glyphs origin on the baseline to the top left of the glyph mask.
+ int16_t fTop = 0,
+ fLeft = 0;
+
+ // fImage must remain null if the glyph is empty or if width > kMaxGlyphWidth.
+ void* fImage = nullptr;
+
+ // Path data has tricky state. If the glyph isEmpty, then fPathData should always be nullptr,
+ // else if fPathData is not null, then a path has been requested. The fPath field of fPathData
+ // may still be null after the request meaning that there is no path for this glyph.
+ PathData* fPathData = nullptr;
+
+ // The advance for this glyph.
+ float fAdvanceX = 0,
+ fAdvanceY = 0;
+
+ // This is a combination of SkMask::Format and SkGlyph state. The SkGlyph can be in one of two
+ // states, just the advances have been calculated, and all the metrics are available. The
+ // illegal mask format is used to signal that only the advances are available.
+ uint8_t fMaskFormat = MASK_FORMAT_UNKNOWN;
+
+ // Used by the DirectWrite scaler to track state.
+ int8_t fForceBW = 0;
+
+ const SkPackedGlyphID fID;
+};
+
+struct SkGlyphPrototype {
+ SkPackedGlyphID id;
+
+ float advanceX = 0,
+ advanceY = 0;
+
+ // The width and height of the glyph mask.
+ uint16_t width = 0,
+ height = 0;
+
+ // The offset from the glyphs origin on the baseline to the top left of the glyph mask.
+ int16_t left = 0,
+ top = 0;
+
+ SkMask::Format maskFormat = SkMask::kBW_Format;
+
+ bool forceBW = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlyphBuffer.cpp b/gfx/skia/skia/src/core/SkGlyphBuffer.cpp
new file mode 100644
index 0000000000..a3ef7fb804
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphBuffer.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyphBuffer.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkStrikeForGPU.h"
+
+void SkDrawableGlyphBuffer::ensureSize(size_t size) {
+ if (size > fMaxSize) {
+ fMultiBuffer.reset(size);
+ fPositions.reset(size);
+ fMaxSize = size;
+ }
+
+ fInputSize = 0;
+ fDrawableSize = 0;
+}
+
+void SkDrawableGlyphBuffer::startSource(
+ const SkZip<const SkGlyphID, const SkPoint>& source, SkPoint origin) {
+ fInputSize = source.size();
+ fDrawableSize = 0;
+
+ // Map all the positions.
+ auto positions = source.get<1>();
+ SkMatrix::MakeTrans(origin.x(), origin.y()).mapPoints(
+ fPositions, positions.data(), positions.size());
+
+ // Convert from SkGlyphIDs to SkPackedGlyphIDs.
+ SkGlyphVariant* packedIDCursor = fMultiBuffer;
+ for (auto t : source) {
+ *packedIDCursor++ = SkPackedGlyphID{std::get<0>(t)};
+ }
+ SkDEBUGCODE(fPhase = kInput);
+}
+
+void SkDrawableGlyphBuffer::startDevice(
+ const SkZip<const SkGlyphID, const SkPoint>& source,
+ SkPoint origin, const SkMatrix& viewMatrix,
+ const SkGlyphPositionRoundingSpec& roundingSpec) {
+ fInputSize = source.size();
+ fDrawableSize = 0;
+
+ // Map the positions including subpixel position.
+ auto positions = source.get<1>();
+ SkMatrix matrix = viewMatrix;
+ matrix.preTranslate(origin.x(), origin.y());
+ SkPoint halfSampleFreq = roundingSpec.halfAxisSampleFreq;
+ matrix.postTranslate(halfSampleFreq.x(), halfSampleFreq.y());
+ matrix.mapPoints(fPositions, positions.data(), positions.size());
+
+ // Mask for controlling axis alignment.
+ SkIPoint mask = roundingSpec.ignorePositionMask;
+
+ // Convert glyph ids and positions to packed glyph ids.
+ SkZip<const SkGlyphID, const SkPoint> withMappedPos =
+ SkMakeZip(source.get<0>(), fPositions.get());
+ SkGlyphVariant* packedIDCursor = fMultiBuffer;
+ for (auto t : withMappedPos) {
+ SkGlyphID glyphID; SkPoint pos;
+ std::tie(glyphID, pos) = t;
+ SkFixed subX = SkScalarToFixed(pos.x()) & mask.x(),
+ subY = SkScalarToFixed(pos.y()) & mask.y();
+ *packedIDCursor++ = SkPackedGlyphID{glyphID, subX, subY};
+ }
+ SkDEBUGCODE(fPhase = kInput);
+}
+
+void SkDrawableGlyphBuffer::reset() {
+ SkDEBUGCODE(fPhase = kReset);
+ if (fMaxSize > 200) {
+ fMultiBuffer.reset();
+ fPositions.reset();
+ fMaxSize = 0;
+ }
+ fInputSize = 0;
+ fDrawableSize = 0;
+}
+
diff --git a/gfx/skia/skia/src/core/SkGlyphBuffer.h b/gfx/skia/skia/src/core/SkGlyphBuffer.h
new file mode 100644
index 0000000000..ca7541a623
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphBuffer.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphBuffer_DEFINED
+#define SkGlyphBuffer_DEFINED
+
+#include "src/core/SkGlyph.h"
+#include "src/core/SkZip.h"
+
+class SkStrikeForGPU;
+struct SkGlyphPositionRoundingSpec;
+
+// A memory format that allows an SkPackedGlyphID, SkGlyph*, and SkPath* to occupy the same
+// memory. This allows SkPackedGlyphIDs as input, and SkGlyph*/SkPath* as output using the same
+// memory.
+class SkGlyphVariant {
+public:
+ SkGlyphVariant() : fV{nullptr} { }
+ SkGlyphVariant& operator= (SkPackedGlyphID packedID) {
+ fV.packedID = packedID;
+ SkDEBUGCODE(fTag = kPackedID);
+ return *this;
+ }
+ SkGlyphVariant& operator= (SkGlyph* glyph) {
+ fV.glyph = glyph;
+ SkDEBUGCODE(fTag = kGlyph);
+ return *this;
+
+ }
+ SkGlyphVariant& operator= (const SkPath* path) {
+ fV.path = path;
+ SkDEBUGCODE(fTag = kPath);
+ return *this;
+ }
+
+ SkGlyph* glyph() const {
+ SkASSERT(fTag == kGlyph);
+ return fV.glyph;
+ }
+ const SkPath* path() const {
+ SkASSERT(fTag == kPath);
+ return fV.path;
+ }
+ SkPackedGlyphID packedID() const {
+ SkASSERT(fTag == kPackedID);
+ return fV.packedID;
+ }
+
+ operator SkPackedGlyphID() const { return this->packedID(); }
+ operator SkGlyph*() const { return this->glyph(); }
+ operator const SkPath*() const { return this->path(); }
+
+private:
+ union {
+ SkGlyph* glyph;
+ const SkPath* path;
+ SkPackedGlyphID packedID;
+ } fV;
+
+#ifdef SK_DEBUG
+ enum {
+ kEmpty,
+ kPackedID,
+ kGlyph,
+ kPath
+ } fTag{kEmpty};
+#endif
+};
+
+// A buffer for converting SkPackedGlyph to SkGlyph* or SkPath*. Initially the buffer contains
+// SkPackedGlyphIDs, but those are used to lookup SkGlyph*/SkPath* which are then copied over the
+// SkPackedGlyphIDs.
+class SkDrawableGlyphBuffer {
+public:
+ void ensureSize(size_t size);
+
+ // Load the buffer with SkPackedGlyphIDs and positions in source space.
+ void startSource(const SkZip<const SkGlyphID, const SkPoint>& source, SkPoint origin);
+
+ // Load the buffer with SkPackedGlyphIDs and positions using the device transform.
+ void startDevice(
+ const SkZip<const SkGlyphID, const SkPoint>& source,
+ SkPoint origin, const SkMatrix& viewMatrix,
+ const SkGlyphPositionRoundingSpec& roundingSpec);
+
+ // The input of SkPackedGlyphIDs
+ SkZip<SkGlyphVariant, SkPoint> input() {
+ SkASSERT(fPhase == kInput);
+ SkDEBUGCODE(fPhase = kProcess);
+ return SkZip<SkGlyphVariant, SkPoint>{fInputSize, fMultiBuffer, fPositions};
+ }
+
+ // Store the glyph in the next drawable slot, using the position information located at index
+ // from.
+ void push_back(SkGlyph* glyph, size_t from) {
+ SkASSERT(fPhase == kProcess);
+ SkASSERT(fDrawableSize <= from);
+ fPositions[fDrawableSize] = fPositions[from];
+ fMultiBuffer[fDrawableSize] = glyph;
+ fDrawableSize++;
+ }
+
+ // Store the path in the next drawable slot, using the position information located at index
+ // from.
+ void push_back(const SkPath* path, size_t from) {
+ SkASSERT(fPhase == kProcess);
+ SkASSERT(fDrawableSize <= from);
+ fPositions[fDrawableSize] = fPositions[from];
+ fMultiBuffer[fDrawableSize] = path;
+ fDrawableSize++;
+ }
+
+ // The result after a series of push_backs of drawable SkGlyph* or SkPath*.
+ SkZip<SkGlyphVariant, SkPoint> drawable() {
+ SkASSERT(fPhase == kProcess);
+ SkDEBUGCODE(fPhase = kDraw);
+ return SkZip<SkGlyphVariant, SkPoint>{fDrawableSize, fMultiBuffer, fPositions};
+ }
+
+ void reset();
+
+private:
+ size_t fMaxSize{0};
+ size_t fInputSize{0};
+ size_t fDrawableSize{0};
+ SkAutoTMalloc<SkGlyphVariant> fMultiBuffer;
+ SkAutoTMalloc<SkPoint> fPositions;
+
+#ifdef SK_DEBUG
+ enum {
+ kReset,
+ kInput,
+ kProcess,
+ kDraw
+ } fPhase{kReset};
+#endif
+};
+#endif // SkGlyphBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGlyphRun.cpp b/gfx/skia/skia/src/core/SkGlyphRun.cpp
new file mode 100644
index 0000000000..e85fb3742c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRun.cpp
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyphRun.h"
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkUtils.h"
+
+// -- SkGlyphRun -----------------------------------------------------------------------------------
+SkGlyphRun::SkGlyphRun(const SkFont& font,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters)
+ : fSource{SkMakeZip(glyphIDs, positions)}
+ , fText{text}
+ , fClusters{clusters}
+ , fFont{font} {}
+
+SkGlyphRun::SkGlyphRun(const SkGlyphRun& that, const SkFont& font)
+ : fSource{that.fSource}
+ , fText{that.fText}
+ , fClusters{that.fClusters}
+ , fFont{font} {}
+
+// -- SkGlyphRunList -------------------------------------------------------------------------------
+SkGlyphRunList::SkGlyphRunList() = default;
+SkGlyphRunList::SkGlyphRunList(
+ const SkPaint& paint,
+ const SkTextBlob* blob,
+ SkPoint origin,
+ SkSpan<const SkGlyphRun> glyphRunList)
+ : fGlyphRuns{glyphRunList}
+ , fOriginalPaint{&paint}
+ , fOriginalTextBlob{blob}
+ , fOrigin{origin} { }
+
+SkGlyphRunList::SkGlyphRunList(const SkGlyphRun& glyphRun, const SkPaint& paint)
+ : fGlyphRuns{SkSpan<const SkGlyphRun>{&glyphRun, 1}}
+ , fOriginalPaint{&paint}
+ , fOriginalTextBlob{nullptr}
+ , fOrigin{SkPoint::Make(0, 0)} {}
+
+uint64_t SkGlyphRunList::uniqueID() const {
+ return fOriginalTextBlob != nullptr ? fOriginalTextBlob->uniqueID()
+ : SK_InvalidUniqueID;
+}
+
+bool SkGlyphRunList::anyRunsLCD() const {
+ for (const auto& r : fGlyphRuns) {
+ if (r.font().getEdging() == SkFont::Edging::kSubpixelAntiAlias) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkGlyphRunList::anyRunsSubpixelPositioned() const {
+ for (const auto& r : fGlyphRuns) {
+ if (r.font().isSubpixel()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkGlyphRunList::allFontsFinite() const {
+ for (const auto& r : fGlyphRuns) {
+ if (!SkFontPriv::IsFinite(r.font())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkGlyphRunList::temporaryShuntBlobNotifyAddedToCache(uint32_t cacheID) const {
+ SkASSERT(fOriginalTextBlob != nullptr);
+ fOriginalTextBlob->notifyAddedToCache(cacheID);
+}
+
+// -- SkGlyphIDSet ---------------------------------------------------------------------------------
+// A faster set implementation that does not need any initialization, and reading the set items
+// is order the number of items, and not the size of the universe.
+// This implementation is based on the paper by Briggs and Torczon, "An Efficient Representation
+// for Sparse Sets"
+//
+// This implementation assumes that the unique glyphs added are appended to a vector that may
+// already have unique glyph from a previous computation. This allows the packing of multiple
+// UniqueID sequences in a single vector.
+SkSpan<const SkGlyphID> SkGlyphIDSet::uniquifyGlyphIDs(
+ uint32_t universeSize,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkGlyphID* uniqueGlyphIDs,
+ uint16_t* denseIndices) {
+ static constexpr SkGlyphID kUndefGlyph{0};
+
+ if (universeSize > fUniverseToUniqueSize) {
+ fUniverseToUnique.reset(universeSize);
+ fUniverseToUniqueSize = universeSize;
+ // If the following bzero becomes a performance problem, the memory can be marked as
+ // initialized for valgrind and msan.
+ // valgrind = VALGRIND_MAKE_MEM_DEFINED(fUniverseToUnique, universeSize * sizeof(SkGlyphID))
+ // msan = sk_msan_mark_initialized(fUniverseToUnique, universeSize * sizeof(SkGlyphID))
+ sk_bzero(fUniverseToUnique, universeSize * sizeof(SkGlyphID));
+ }
+
+ // No need to clear fUniverseToUnique here... the set insertion algorithm is designed to work
+ // correctly even when the fUniverseToUnique buffer is uninitialized!
+
+ size_t uniqueSize = 0;
+ size_t denseIndicesCursor = 0;
+ for (auto glyphID : glyphIDs) {
+
+ // If the glyphID is not in range then it is the undefined glyph.
+ if (glyphID >= universeSize) {
+ glyphID = kUndefGlyph;
+ }
+
+ // The index into the unique ID vector.
+ auto uniqueIndex = fUniverseToUnique[glyphID];
+
+ if (uniqueIndex >= uniqueSize || uniqueGlyphIDs[uniqueIndex] != glyphID) {
+ uniqueIndex = SkTo<uint16_t>(uniqueSize);
+ uniqueGlyphIDs[uniqueSize] = glyphID;
+ fUniverseToUnique[glyphID] = uniqueIndex;
+ uniqueSize += 1;
+ }
+
+ denseIndices[denseIndicesCursor++] = uniqueIndex;
+ }
+
+ // If we're hanging onto these arrays for a long time, we don't want their size to drift
+ // endlessly upwards. It's unusual to see a typeface with more than 4096 possible glyphs.
+ if (fUniverseToUniqueSize > 4096) {
+ fUniverseToUnique.reset(4096);
+ sk_bzero(fUniverseToUnique, 4096 * sizeof(SkGlyphID));
+ fUniverseToUniqueSize = 4096;
+ }
+
+ return SkSpan<const SkGlyphID>(uniqueGlyphIDs, uniqueSize);
+}
+
+// -- SkGlyphRunBuilder ----------------------------------------------------------------------------
+void SkGlyphRunBuilder::drawTextUTF8(const SkPaint& paint, const SkFont& font, const void* bytes,
+ size_t byteLength, SkPoint origin) {
+ auto glyphIDs = textToGlyphIDs(font, bytes, byteLength, SkTextEncoding::kUTF8);
+ if (!glyphIDs.empty()) {
+ this->initialize(glyphIDs.size());
+ this->simplifyDrawText(font, glyphIDs, origin, fPositions);
+ }
+
+ this->makeGlyphRunList(paint, nullptr, SkPoint::Make(0, 0));
+}
+
+void SkGlyphRunBuilder::drawTextBlob(const SkPaint& paint, const SkTextBlob& blob, SkPoint origin,
+ SkBaseDevice* device) {
+ // Figure out all the storage needed to pre-size everything below.
+ size_t totalGlyphs = 0;
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ totalGlyphs += it.glyphCount();
+ }
+
+ // Pre-size all the buffers so they don't move during processing.
+ this->initialize(totalGlyphs);
+
+ SkPoint* positions = fPositions;
+
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ if (it.positioning() != SkTextBlobRunIterator::kRSXform_Positioning) {
+ simplifyTextBlobIgnoringRSXForm(it, positions);
+ } else {
+ // Handle kRSXform_Positioning
+ if (!this->empty()) {
+ this->makeGlyphRunList(paint, &blob, origin);
+ device->drawGlyphRunList(this->useGlyphRunList());
+ }
+
+ device->drawGlyphRunRSXform(it.font(), it.glyphs(), (const SkRSXform*)it.pos(),
+ it.glyphCount(), origin, paint);
+
+ // re-init in case we keep looping and need the builder again
+ this->initialize(totalGlyphs);
+ }
+ positions += it.glyphCount();
+ }
+
+ if (!this->empty()) {
+ this->makeGlyphRunList(paint, &blob, origin);
+ device->drawGlyphRunList(this->useGlyphRunList());
+ }
+}
+
+void SkGlyphRunBuilder::textBlobToGlyphRunListIgnoringRSXForm(
+ const SkPaint& paint, const SkTextBlob& blob, SkPoint origin) {
+ // Figure out all the storage needed to pre-size everything below.
+ size_t totalGlyphs = 0;
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ totalGlyphs += it.glyphCount();
+ }
+
+ // Pre-size all the buffers so they don't move during processing.
+ this->initialize(totalGlyphs);
+
+ SkPoint* positions = fPositions;
+
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ simplifyTextBlobIgnoringRSXForm(it, positions);
+ positions += it.glyphCount();
+ }
+
+ if (!this->empty()) {
+ this->makeGlyphRunList(paint, &blob, origin);
+ }
+}
+
+void SkGlyphRunBuilder::simplifyTextBlobIgnoringRSXForm(const SkTextBlobRunIterator& it,
+ SkPoint* positions) {
+ size_t runSize = it.glyphCount();
+
+ auto text = SkSpan<const char>(it.text(), it.textSize());
+ auto clusters = SkSpan<const uint32_t>(it.clusters(), runSize);
+ const SkPoint& offset = it.offset();
+ auto glyphIDs = SkSpan<const SkGlyphID>{it.glyphs(), runSize};
+
+ switch (it.positioning()) {
+ case SkTextBlobRunIterator::kDefault_Positioning: {
+ this->simplifyDrawText(
+ it.font(), glyphIDs, offset, positions, text, clusters);
+ break;
+ }
+ case SkTextBlobRunIterator::kHorizontal_Positioning: {
+ auto constY = offset.y();
+ this->simplifyDrawPosTextH(
+ it.font(), glyphIDs, it.pos(), constY, positions, text, clusters);
+ break;
+ }
+ case SkTextBlobRunIterator::kFull_Positioning: {
+ this->simplifyDrawPosText(
+ it.font(), glyphIDs, (const SkPoint*) it.pos(), text, clusters);
+ break;
+ }
+ case SkTextBlobRunIterator::kRSXform_Positioning: break;
+ }
+}
+
+void SkGlyphRunBuilder::drawGlyphsWithPositions(const SkPaint& paint, const SkFont& font,
+ SkSpan<const SkGlyphID> glyphIDs, const SkPoint* pos) {
+ if (!glyphIDs.empty()) {
+ this->initialize(glyphIDs.size());
+ this->simplifyDrawPosText(font, glyphIDs, pos);
+ this->makeGlyphRunList(paint, nullptr, SkPoint::Make(0, 0));
+ }
+}
+
+const SkGlyphRunList& SkGlyphRunBuilder::useGlyphRunList() {
+ return fGlyphRunList;
+}
+
+void SkGlyphRunBuilder::initialize(size_t totalRunSize) {
+
+ if (totalRunSize > fMaxTotalRunSize) {
+ fMaxTotalRunSize = totalRunSize;
+ fPositions.reset(fMaxTotalRunSize);
+ }
+
+ fGlyphRunListStorage.clear();
+}
+
+SkSpan<const SkGlyphID> SkGlyphRunBuilder::textToGlyphIDs(
+ const SkFont& font, const void* bytes, size_t byteLength, SkTextEncoding encoding) {
+ if (encoding != SkTextEncoding::kGlyphID) {
+ int count = font.countText(bytes, byteLength, encoding);
+ if (count > 0) {
+ fScratchGlyphIDs.resize(count);
+ font.textToGlyphs(bytes, byteLength, encoding, fScratchGlyphIDs.data(), count);
+ return SkMakeSpan(fScratchGlyphIDs);
+ } else {
+ return SkSpan<const SkGlyphID>();
+ }
+ } else {
+ return SkSpan<const SkGlyphID>((const SkGlyphID*)bytes, byteLength / 2);
+ }
+}
+
+void SkGlyphRunBuilder::makeGlyphRun(
+ const SkFont& font,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters) {
+
+ // Ignore empty runs.
+ if (!glyphIDs.empty()) {
+ fGlyphRunListStorage.emplace_back(
+ font,
+ positions,
+ glyphIDs,
+ text,
+ clusters);
+ }
+}
+
+void SkGlyphRunBuilder::makeGlyphRunList(
+ const SkPaint& paint, const SkTextBlob* blob, SkPoint origin) {
+
+ fGlyphRunList.~SkGlyphRunList();
+ new (&fGlyphRunList) SkGlyphRunList{
+ paint, blob, origin, SkMakeSpan(fGlyphRunListStorage)};
+}
+
+void SkGlyphRunBuilder::simplifyDrawText(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ SkPoint origin, SkPoint* positions,
+ SkSpan<const char> text, SkSpan<const uint32_t> clusters) {
+ SkASSERT(!glyphIDs.empty());
+
+ auto runSize = glyphIDs.size();
+
+ if (!glyphIDs.empty()) {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(font);
+ SkBulkGlyphMetrics storage{strikeSpec};
+ auto glyphs = storage.glyphs(glyphIDs);
+
+ SkPoint endOfLastGlyph = origin;
+ SkPoint* cursor = positions;
+ for (auto glyph : glyphs) {
+ *cursor++ = endOfLastGlyph;
+ endOfLastGlyph += glyph->advanceVector();
+ }
+
+ this->makeGlyphRun(
+ font,
+ glyphIDs,
+ SkSpan<const SkPoint>{positions, runSize},
+ text,
+ clusters);
+ }
+}
+
+void SkGlyphRunBuilder::simplifyDrawPosTextH(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ const SkScalar* xpos, SkScalar constY, SkPoint* positions,
+ SkSpan<const char> text, SkSpan<const uint32_t> clusters) {
+
+ auto posCursor = positions;
+ for (auto x : SkSpan<const SkScalar>{xpos, glyphIDs.size()}) {
+ *posCursor++ = SkPoint::Make(x, constY);
+ }
+
+ simplifyDrawPosText(font, glyphIDs, positions, text, clusters);
+}
+
+void SkGlyphRunBuilder::simplifyDrawPosText(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ const SkPoint* pos,
+ SkSpan<const char> text, SkSpan<const uint32_t> clusters) {
+ auto runSize = glyphIDs.size();
+
+ this->makeGlyphRun(
+ font,
+ glyphIDs,
+ SkSpan<const SkPoint>{pos, runSize},
+ text,
+ clusters);
+}
diff --git a/gfx/skia/skia/src/core/SkGlyphRun.h b/gfx/skia/skia/src/core/SkGlyphRun.h
new file mode 100644
index 0000000000..3d9c918d40
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRun.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphRun_DEFINED
+#define SkGlyphRun_DEFINED
+
+#include <functional>
+#include <vector>
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkSpan.h"
+#include "src/core/SkZip.h"
+
+class SkBaseDevice;
+class SkGlyph;
+class SkTextBlob;
+class SkTextBlobRunIterator;
+
+class SkGlyphRun {
+public:
+ SkGlyphRun() = default;
+ SkGlyphRun(const SkFont& font,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters);
+ SkGlyphRun(const SkGlyphRun& glyphRun, const SkFont& font);
+
+ size_t runSize() const { return fSource.size(); }
+ SkSpan<const SkPoint> positions() const { return fSource.get<1>(); }
+ SkSpan<const SkGlyphID> glyphsIDs() const { return fSource.get<0>(); }
+ SkZip<const SkGlyphID, const SkPoint> source() const { return fSource; }
+ const SkFont& font() const { return fFont; }
+ SkSpan<const uint32_t> clusters() const { return fClusters; }
+ SkSpan<const char> text() const { return fText; }
+
+private:
+ // GlyphIDs and positions.
+ const SkZip<const SkGlyphID, const SkPoint> fSource;
+ // Original text from SkTextBlob if present. Will be empty of not present.
+ const SkSpan<const char> fText;
+ // Original clusters from SkTextBlob if present. Will be empty if not present.
+ const SkSpan<const uint32_t> fClusters;
+ // Paint for this run modified to have glyph encoding and left alignment.
+ SkFont fFont;
+};
+
+class SkGlyphRunList {
+ SkSpan<const SkGlyphRun> fGlyphRuns;
+
+public:
+ SkGlyphRunList();
+ // Blob maybe null.
+ SkGlyphRunList(
+ const SkPaint& paint,
+ const SkTextBlob* blob,
+ SkPoint origin,
+ SkSpan<const SkGlyphRun> glyphRunList);
+
+ SkGlyphRunList(const SkGlyphRun& glyphRun, const SkPaint& paint);
+
+ uint64_t uniqueID() const;
+ bool anyRunsLCD() const;
+ bool anyRunsSubpixelPositioned() const;
+ void temporaryShuntBlobNotifyAddedToCache(uint32_t cacheID) const;
+
+ bool canCache() const { return fOriginalTextBlob != nullptr; }
+ size_t runCount() const { return fGlyphRuns.size(); }
+ size_t totalGlyphCount() const {
+ size_t glyphCount = 0;
+ for(const auto& run : fGlyphRuns) {
+ glyphCount += run.runSize();
+ }
+ return glyphCount;
+ }
+ bool allFontsFinite() const;
+
+ SkPoint origin() const { return fOrigin; }
+ const SkPaint& paint() const { return *fOriginalPaint; }
+ const SkTextBlob* blob() const { return fOriginalTextBlob; }
+
+ auto begin() -> decltype(fGlyphRuns.begin()) { return fGlyphRuns.begin(); }
+ auto end() -> decltype(fGlyphRuns.end()) { return fGlyphRuns.end(); }
+ auto begin() const -> decltype(fGlyphRuns.cbegin()) { return fGlyphRuns.cbegin(); }
+ auto end() const -> decltype(fGlyphRuns.cend()) { return fGlyphRuns.cend(); }
+ auto size() const -> decltype(fGlyphRuns.size()) { return fGlyphRuns.size(); }
+ auto empty() const -> decltype(fGlyphRuns.empty()) { return fGlyphRuns.empty(); }
+ auto operator [] (size_t i) const -> decltype(fGlyphRuns[i]) { return fGlyphRuns[i]; }
+
+private:
+ const SkPaint* fOriginalPaint{nullptr}; // This should be deleted soon.
+ // The text blob is needed to hookup the call back that the SkTextBlob destructor calls. It
+ // should be used for nothing else
+ const SkTextBlob* fOriginalTextBlob{nullptr};
+ SkPoint fOrigin = {0, 0};
+};
+
+class SkGlyphIDSet {
+public:
+ SkSpan<const SkGlyphID> uniquifyGlyphIDs(
+ uint32_t universeSize, SkSpan<const SkGlyphID> glyphIDs,
+ SkGlyphID* uniqueGlyphIDs, uint16_t* denseindices);
+private:
+ size_t fUniverseToUniqueSize{0};
+ SkAutoTMalloc<uint16_t> fUniverseToUnique;
+};
+
+class SkGlyphRunBuilder {
+public:
+ void drawTextUTF8(
+ const SkPaint& paint, const SkFont&, const void* bytes, size_t byteLength, SkPoint origin);
+ void drawGlyphsWithPositions(
+ const SkPaint&, const SkFont&, SkSpan<const SkGlyphID> glyphIDs, const SkPoint* pos);
+ void drawTextBlob(const SkPaint& paint, const SkTextBlob& blob, SkPoint origin, SkBaseDevice*);
+
+ void textBlobToGlyphRunListIgnoringRSXForm(
+ const SkPaint& paint, const SkTextBlob& blob, SkPoint origin);
+
+ const SkGlyphRunList& useGlyphRunList();
+
+ bool empty() const { return fGlyphRunListStorage.empty(); }
+
+private:
+ void initialize(size_t totalRunSize);
+ SkSpan<const SkGlyphID> textToGlyphIDs(
+ const SkFont& font, const void* bytes, size_t byteLength, SkTextEncoding);
+
+ void makeGlyphRun(
+ const SkFont& font,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters);
+
+ void makeGlyphRunList(const SkPaint& paint, const SkTextBlob* blob, SkPoint origin);
+
+ void simplifyDrawText(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ SkPoint origin, SkPoint* positions,
+ SkSpan<const char> text = SkSpan<const char>{},
+ SkSpan<const uint32_t> clusters = SkSpan<const uint32_t>{});
+ void simplifyDrawPosTextH(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ const SkScalar* xpos, SkScalar constY, SkPoint* positions,
+ SkSpan<const char> text = SkSpan<const char>{},
+ SkSpan<const uint32_t> clusters = SkSpan<const uint32_t>{});
+ void simplifyDrawPosText(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs,
+ const SkPoint* pos,
+ SkSpan<const char> text = SkSpan<const char>{},
+ SkSpan<const uint32_t> clusters = SkSpan<const uint32_t>{});
+ void simplifyTextBlobIgnoringRSXForm(
+ const SkTextBlobRunIterator& it,
+ SkPoint* positions);
+
+ size_t fMaxTotalRunSize{0};
+ SkAutoTMalloc<SkPoint> fPositions;
+
+ std::vector<SkGlyphRun> fGlyphRunListStorage;
+ SkGlyphRunList fGlyphRunList;
+
+ // Used as a temporary for preparing using utfN text. This implies that only one run of
+ // glyph ids will ever be needed because blobs are already glyph based.
+ std::vector<SkGlyphID> fScratchGlyphIDs;
+};
+
+#endif // SkGlyphRun_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp b/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp
new file mode 100644
index 0000000000..cb63a0f916
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp
@@ -0,0 +1,932 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyphRunPainter.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/text/GrTextBlobCache.h"
+#include "src/gpu/text/GrTextContext.h"
+#endif
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeForGPU.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTraceEvent.h"
+
+#include <climits>
+
+// -- SkGlyphRunListPainter ------------------------------------------------------------------------
+SkGlyphRunListPainter::SkGlyphRunListPainter(const SkSurfaceProps& props,
+ SkColorType colorType,
+ SkScalerContextFlags flags,
+ SkStrikeForGPUCacheInterface* strikeCache)
+ : fDeviceProps{props}
+ , fBitmapFallbackProps{SkSurfaceProps{props.flags(), kUnknown_SkPixelGeometry}}
+ , fColorType{colorType}, fScalerContextFlags{flags}
+ , fStrikeCache{strikeCache} {}
+
+// TODO: unify with code in GrTextContext.cpp
+static SkScalerContextFlags compute_scaler_context_flags(const SkColorSpace* cs) {
+ // If we're doing linear blending, then we can disable the gamma hacks.
+ // Otherwise, leave them on. In either case, we still want the contrast boost:
+ // TODO: Can we be even smarter about mask gamma based on the dest transfer function?
+ if (cs && cs->gammaIsLinear()) {
+ return SkScalerContextFlags::kBoostContrast;
+ } else {
+ return SkScalerContextFlags::kFakeGammaAndBoostContrast;
+ }
+}
+
+SkGlyphRunListPainter::SkGlyphRunListPainter(const SkSurfaceProps& props,
+ SkColorType colorType,
+ SkColorSpace* cs,
+ SkStrikeForGPUCacheInterface* strikeCache)
+ : SkGlyphRunListPainter(props, colorType, compute_scaler_context_flags(cs), strikeCache) {}
+
+#if SK_SUPPORT_GPU
+SkGlyphRunListPainter::SkGlyphRunListPainter(const SkSurfaceProps& props, const GrColorInfo& csi)
+ : SkGlyphRunListPainter(props,
+ kUnknown_SkColorType,
+ compute_scaler_context_flags(csi.colorSpace()),
+ SkStrikeCache::GlobalStrikeCache()) {}
+
+SkGlyphRunListPainter::SkGlyphRunListPainter(const GrRenderTargetContext& rtc)
+ : SkGlyphRunListPainter{rtc.surfaceProps(), rtc.colorInfo()} {}
+
+#endif
+
+SkSpan<const SkPackedGlyphID> SkGlyphRunListPainter::DeviceSpacePackedGlyphIDs(
+ const SkGlyphPositionRoundingSpec& roundingSpec,
+ const SkMatrix& viewMatrix,
+ const SkPoint& origin,
+ int n,
+ const SkGlyphID* glyphIDs,
+ const SkPoint* positions,
+ SkPoint* mappedPositions,
+ SkPackedGlyphID* results) {
+ // Add rounding and origin.
+ SkMatrix matrix = viewMatrix;
+ matrix.preTranslate(origin.x(), origin.y());
+ SkPoint rounding = roundingSpec.halfAxisSampleFreq;
+ matrix.postTranslate(rounding.x(), rounding.y());
+ matrix.mapPoints(mappedPositions, positions, n);
+
+ SkIPoint mask = roundingSpec.ignorePositionMask;
+
+ for (int i = 0; i < n; i++) {
+ SkFixed subX = SkScalarToFixed(mappedPositions[i].x()) & mask.x(),
+ subY = SkScalarToFixed(mappedPositions[i].y()) & mask.y();
+ results[i] = SkPackedGlyphID{glyphIDs[i], subX, subY};
+ }
+
+ return SkSpan<const SkPackedGlyphID>{results, SkTo<size_t>(n)};
+}
+
+SkSpan<const SkPackedGlyphID> SkGlyphRunListPainter::SourceSpacePackedGlyphIDs(
+ const SkPoint& origin,
+ int n,
+ const SkGlyphID* glyphIDs,
+ const SkPoint* positions,
+ SkPoint* mappedPositions,
+ SkPackedGlyphID* results) {
+
+ SkMatrix::MakeTrans(origin.x(), origin.y()).mapPoints(
+ mappedPositions, positions, n);
+
+ SkPackedGlyphID* cursor = results;
+ for (int i = 0; i < n; i++) {
+ *cursor++ = SkPackedGlyphID{glyphIDs[i]};
+ }
+
+ return SkSpan<const SkPackedGlyphID>{results, SkTo<size_t>(n)};
+}
+
+void SkGlyphRunListPainter::drawForBitmapDevice(
+ const SkGlyphRunList& glyphRunList, const SkMatrix& deviceMatrix,
+ const BitmapDevicePainter* bitmapDevice) {
+ ScopedBuffers _ = this->ensureBuffers(glyphRunList);
+
+ const SkPaint& runPaint = glyphRunList.paint();
+ // The bitmap blitters can only draw lcd text to a N32 bitmap in srcOver. Otherwise,
+ // convert the lcd text into A8 text. The props communicates this to the scaler.
+ auto& props = (kN32_SkColorType == fColorType && runPaint.isSrcOver())
+ ? fDeviceProps
+ : fBitmapFallbackProps;
+
+ SkPoint origin = glyphRunList.origin();
+ for (auto& glyphRun : glyphRunList) {
+ const SkFont& runFont = glyphRun.font();
+
+ if (SkStrikeSpec::ShouldDrawAsPath(runPaint, runFont, deviceMatrix)) {
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePath(
+ runFont, runPaint, props, fScalerContextFlags);
+
+ auto strike = strikeSpec.findOrCreateExclusiveStrike();
+
+ fDrawable.startSource(glyphRun.source(), origin);
+ strike->prepareForDrawingPathsCPU(&fDrawable);
+
+ // The paint we draw paths with must have the same anti-aliasing state as the runFont
+ // allowing the paths to have the same edging as the glyph masks.
+ SkPaint pathPaint = runPaint;
+ pathPaint.setAntiAlias(runFont.hasSomeAntiAliasing());
+
+ bitmapDevice->paintPaths(&fDrawable, strikeSpec.strikeToSourceRatio(), pathPaint);
+ } else {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeMask(
+ runFont, runPaint, props, fScalerContextFlags, deviceMatrix);
+
+ auto strike = strikeSpec.findOrCreateExclusiveStrike();
+
+ fDrawable.startDevice(glyphRun.source(), origin, deviceMatrix, strike->roundingSpec());
+ strike->prepareForDrawingMasksCPU(&fDrawable);
+ bitmapDevice->paintMasks(&fDrawable, runPaint);
+ }
+ }
+}
+
+// Getting glyphs to the screen in a fallback situation can be complex. Here is the set of
+// transformations that have to happen. Normally, they would all be accommodated by the font
+// scaler, but the atlas has an upper limit to the glyphs it can handle. So the GPU is used to
+// make up the difference from the smaller atlas size to the larger size needed by the final
+// transform. Here are the transformations that are applied.
+//
+// final transform = [view matrix] * [text scale] * [text size]
+//
+// There are three cases:
+// * Go Fast - view matrix is scale and translate, and all the glyphs are small enough
+// Just scale the positions, and have the glyph cache handle the view matrix transformation.
+// The text scale is 1.
+// * It's complicated - view matrix is not scale and translate, and the glyphs are small enough
+// The glyph cache does not handle the view matrix, but stores the glyphs at the text size
+// specified by the run paint. The GPU handles the rotation, etc. specified by the view matrix.
+// The text scale is 1.
+// * Too big - The glyphs are too big to fit in the atlas
+// Reduce the text size so the glyphs will fit in the atlas, but don't apply any
+// transformations from the view matrix. Calculate a text scale based on that reduction. This
+// scale factor is used to increase the size of the destination rectangles. The destination
+// rectangles are then scaled, rotated, etc. by the GPU using the view matrix.
+void SkGlyphRunListPainter::processARGBFallback(SkScalar maxSourceGlyphDimension,
+ const SkPaint& runPaint,
+ const SkFont& runFont,
+ const SkMatrix& viewMatrix,
+ SkGlyphRunPainterInterface* process) {
+ SkASSERT(!fARGBGlyphsIDs.empty());
+
+ // if maxSourceGlyphDimension then no pixels will change.
+ if (maxSourceGlyphDimension == 0) { return; }
+
+ SkScalar maxScale = viewMatrix.getMaxScale();
+
+ // This is a linear estimate of the longest dimension among all the glyph widths and heights.
+ SkScalar conservativeMaxGlyphDimension = maxSourceGlyphDimension * maxScale;
+
+ // If the situation that the matrix is simple, and all the glyphs are small enough. Go fast!
+ // N.B. If the matrix has scale, that will be reflected in the strike through the viewMatrix
+ // in the useFastPath case.
+ bool useDeviceCache =
+ viewMatrix.isScaleTranslate()
+ && conservativeMaxGlyphDimension <= SkStrikeCommon::kSkSideTooBigForAtlas;
+
+ // A scaled and translated transform is the common case, and is handled directly in fallback.
+ // Even if the transform is scale and translate, fallback must be careful to use glyphs that
+ // fit in the atlas. If a glyph will not fit in the atlas, then the general transform case is
+ // used to render the glyphs.
+ if (useDeviceCache) {
+ // Translate the positions to device space.
+ // TODO: this code is dubious
+ viewMatrix.mapPoints(fARGBPositions.data(), fARGBPositions.size());
+ for (SkPoint& point : fARGBPositions) {
+ point.fX = SkScalarFloorToScalar(point.fX);
+ point.fY = SkScalarFloorToScalar(point.fY);
+ }
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeMask(
+ runFont, runPaint, fDeviceProps, fScalerContextFlags, viewMatrix);
+
+ SkScopedStrikeForGPU strike = strikeSpec.findOrCreateScopedStrike(fStrikeCache);
+
+ SkPackedGlyphID* cursor = fPackedGlyphIDs;
+ for (auto glyphID : fARGBGlyphsIDs) {
+ *cursor++ = SkPackedGlyphID{glyphID};
+ }
+
+ SkSpan<const SkGlyphPos> glyphPosSpan = strike->prepareForDrawingRemoveEmpty(
+ fPackedGlyphIDs,
+ fARGBPositions.data(),
+ fARGBGlyphsIDs.size(),
+ SkStrikeCommon::kSkSideTooBigForAtlas,
+ fGlyphPos);
+
+ if (process) {
+ process->processDeviceFallback(glyphPosSpan, strikeSpec);
+ }
+
+ } else {
+ // If the matrix is complicated or if scaling is used to fit the glyphs in the cache,
+ // then this case is used.
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeSourceFallback(
+ runFont, runPaint, fDeviceProps, fScalerContextFlags, maxSourceGlyphDimension);
+
+ SkScopedStrikeForGPU strike = strikeSpec.findOrCreateScopedStrike(fStrikeCache);
+
+ SkPackedGlyphID* cursor = fPackedGlyphIDs;
+ for (auto glyphID : fARGBGlyphsIDs) {
+ *cursor++ = SkPackedGlyphID{glyphID};
+ }
+
+ auto glyphPosSpan = strike->prepareForDrawingRemoveEmpty(
+ fPackedGlyphIDs,
+ fARGBPositions.data(),
+ fARGBGlyphsIDs.size(),
+ SkStrikeCommon::kSkSideTooBigForAtlas,
+ fGlyphPos);
+
+ if (process) {
+ process->processSourceFallback(
+ glyphPosSpan,
+ strikeSpec,
+ viewMatrix.hasPerspective());
+ }
+ }
+}
+
+#if SK_SUPPORT_GPU
+void SkGlyphRunListPainter::processGlyphRunList(const SkGlyphRunList& glyphRunList,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ bool contextSupportsDistanceFieldText,
+ const GrTextContext::Options& options,
+ SkGlyphRunPainterInterface* process) {
+
+ SkPoint origin = glyphRunList.origin();
+ const SkPaint& runPaint = glyphRunList.paint();
+
+ for (const auto& glyphRun : glyphRunList) {
+ SkScalar maxFallbackDimension{-SK_ScalarInfinity};
+ ScopedBuffers _ = this->ensureBuffers(glyphRun);
+
+ auto addFallback = [this, &maxFallbackDimension]
+ (const SkGlyph& glyph, SkPoint sourcePosition) {
+ maxFallbackDimension = std::max(maxFallbackDimension,
+ SkIntToScalar(glyph.maxDimension()));
+ fARGBGlyphsIDs.push_back(glyph.getGlyphID());
+ fARGBPositions.push_back(sourcePosition);
+ };
+
+ const SkFont& runFont = glyphRun.font();
+
+ bool useSDFT = GrTextContext::CanDrawAsDistanceFields(
+ runPaint, runFont, viewMatrix, props, contextSupportsDistanceFieldText, options);
+ if (process) {
+ process->startRun(glyphRun, useSDFT);
+ }
+
+ if (useSDFT) {
+ SkScalar minScale, maxScale;
+ SkStrikeSpec strikeSpec;
+ std::tie(strikeSpec, minScale, maxScale) =
+ SkStrikeSpec::MakeSDFT(
+ runFont, runPaint,fDeviceProps, viewMatrix, options);
+
+ SkScopedStrikeForGPU strike = strikeSpec.findOrCreateScopedStrike(fStrikeCache);
+
+ auto packedGlyphIDs = SourceSpacePackedGlyphIDs(
+ origin,
+ glyphRun.runSize(),
+ glyphRun.glyphsIDs().data(),
+ glyphRun.positions().data(),
+ fPositions,
+ fPackedGlyphIDs);
+
+ SkSpan<const SkGlyphPos> glyphPosSpan = strike->prepareForDrawingRemoveEmpty(
+ packedGlyphIDs.data(),
+ fPositions,
+ glyphRun.runSize(),
+ SkStrikeCommon::kSkSideTooBigForAtlas,
+ fGlyphPos);
+
+ size_t glyphsWithMaskCount = 0;
+ for (const SkGlyphPos& glyphPos : glyphPosSpan) {
+ const SkGlyph& glyph = *glyphPos.glyph;
+ SkPoint position = glyphPos.position;
+
+ // The SDF scaler context system ensures that a glyph is empty, kSDF_Format, or
+ // kARGB32_Format. The following if statements use this assumption.
+ SkASSERT(glyph.maskFormat() == SkMask::kSDF_Format || glyph.isColor());
+
+ if (SkStrikeForGPU::CanDrawAsSDFT(glyph)) {
+ // SDF mask will work.
+ fGlyphPos[glyphsWithMaskCount++] = glyphPos;
+ } else if (SkStrikeForGPU::CanDrawAsPath(glyph)) {
+ // If not color but too big, use a path.
+ fPaths.push_back(glyphPos);
+ } else {
+ // If no path, or it is color, then fallback.
+ addFallback(glyph, position);
+ }
+ }
+
+ if (process) {
+ bool hasWCoord =
+ viewMatrix.hasPerspective() || options.fDistanceFieldVerticesAlwaysHaveW;
+
+ // processSourceSDFT must be called even if there are no glyphs to make sure runs
+ // are set correctly.
+ process->processSourceSDFT(
+ SkSpan<const SkGlyphPos>{fGlyphPos, glyphsWithMaskCount},
+ strikeSpec,
+ runFont,
+ minScale,
+ maxScale,
+ hasWCoord);
+
+ if (!fPaths.empty()) {
+ process->processSourcePaths(
+ SkMakeSpan(fPaths),
+ strikeSpec);
+ }
+ }
+
+ // fGlyphPos will be reused here.
+ if (!fARGBGlyphsIDs.empty()) {
+ this->processARGBFallback(maxFallbackDimension * strikeSpec.strikeToSourceRatio(),
+ runPaint, runFont, viewMatrix, process);
+ }
+ } else if (SkStrikeSpec::ShouldDrawAsPath(runPaint, runFont, viewMatrix)) {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePath(
+ runFont, runPaint, fDeviceProps, fScalerContextFlags);
+
+ SkScopedStrikeForGPU strike = strikeSpec.findOrCreateScopedStrike(fStrikeCache);
+
+ auto packedGlyphIDs = SourceSpacePackedGlyphIDs(
+ origin,
+ glyphRun.runSize(),
+ glyphRun.glyphsIDs().data(),
+ glyphRun.positions().data(),
+ fPositions,
+ fPackedGlyphIDs);
+
+ SkSpan<const SkGlyphPos> glyphPosSpan = strike->prepareForDrawingRemoveEmpty(
+ packedGlyphIDs.data(),
+ fPositions,
+ glyphRun.runSize(),
+ 0,
+ fGlyphPos);
+
+ // As opposed to SDF and mask, path handling puts paths in fGlyphPos instead of fPaths.
+ size_t glyphsWithPathCount = 0;
+ for (const SkGlyphPos& glyphPos : glyphPosSpan) {
+ const SkGlyph& glyph = *glyphPos.glyph;
+ SkPoint position = glyphPos.position;
+ if (SkStrikeForGPU::CanDrawAsPath(glyph)) {
+ // Place paths in fGlyphPos
+ fGlyphPos[glyphsWithPathCount++] = glyphPos;
+ } else {
+ addFallback(glyph, position);
+ }
+ }
+
+ if (process) {
+ // processSourcePaths must be called even if there are no glyphs to make sure runs
+ // are set correctly.
+ process->processSourcePaths(
+ SkSpan<const SkGlyphPos>{fGlyphPos, glyphsWithPathCount},
+ strikeSpec);
+ }
+
+ // fGlyphPos will be reused here.
+ if (!fARGBGlyphsIDs.empty()) {
+ this->processARGBFallback(maxFallbackDimension * strikeSpec.strikeToSourceRatio(),
+ runPaint, runFont, viewMatrix, process);
+ }
+ } else {
+ SkStrikeSpec strikeSpec =
+ SkStrikeSpec::MakeMask(runFont, runPaint,
+ fDeviceProps, fScalerContextFlags, viewMatrix);
+
+ SkScopedStrikeForGPU strike = strikeSpec.findOrCreateScopedStrike(fStrikeCache);
+
+ auto packedGlyphIDs = DeviceSpacePackedGlyphIDs(
+ strike->roundingSpec(),
+ viewMatrix,
+ origin,
+ glyphRun.runSize(),
+ glyphRun.glyphsIDs().data(),
+ glyphRun.positions().data(),
+ fPositions,
+ fPackedGlyphIDs);
+
+ // Lookup all the glyphs from the cache. Strip empty glyphs.
+ SkSpan<const SkGlyphPos> glyphPosSpan = strike->prepareForDrawingRemoveEmpty(
+ packedGlyphIDs.data(),
+ fPositions,
+ glyphRun.runSize(),
+ SkStrikeCommon::kSkSideTooBigForAtlas,
+ fGlyphPos);
+
+ // Sort glyphs into the three bins: mask (fGlyphPos), path (fPaths), and fallback.
+ size_t glyphsWithMaskCount = 0;
+ for (const SkGlyphPos& glyphPos : glyphPosSpan) {
+ const SkGlyph& glyph = *glyphPos.glyph;
+ const SkPoint position = glyphPos.position;
+
+ // Does the glyph have work to do or is the code able to position the glyph?
+ if (!SkScalarsAreFinite(position.x(), position.y())) {
+ // Do nothing;
+ } else if (SkStrikeForGPU::CanDrawAsMask(glyph)) {
+ fGlyphPos[glyphsWithMaskCount++] = glyphPos;
+ } else if (SkStrikeForGPU::CanDrawAsPath(glyph)) {
+ fPaths.push_back(glyphPos);
+ } else {
+ addFallback(glyph, origin + glyphRun.positions()[glyphPos.index]);
+ }
+ }
+
+ if (process) {
+ // processDeviceMasks must be called even if there are no glyphs to make sure runs
+ // are set correctly.
+ process->processDeviceMasks(
+ SkSpan<const SkGlyphPos>{fGlyphPos, glyphsWithMaskCount}, strikeSpec);
+ if (!fPaths.empty()) {
+ process->processDevicePaths(SkMakeSpan(fPaths));
+ }
+ }
+
+ // fGlyphPos will be reused here.
+ if (!fARGBGlyphsIDs.empty()) {
+ this->processARGBFallback(maxFallbackDimension / viewMatrix.getMaxScale(),
+ runPaint, runFont, viewMatrix, process);
+ }
+ } // Mask case
+ } // For all glyph runs
+}
+#endif // SK_SUPPORT_GPU
+
+auto SkGlyphRunListPainter::ensureBuffers(const SkGlyphRunList& glyphRunList) -> ScopedBuffers {
+ size_t size = 0;
+ for (const SkGlyphRun& run : glyphRunList) {
+ size = std::max(run.runSize(), size);
+ }
+ return ScopedBuffers(this, size);
+}
+
+SkGlyphRunListPainter::ScopedBuffers
+SkGlyphRunListPainter::ensureBuffers(const SkGlyphRun& glyphRun) {
+ return ScopedBuffers(this, glyphRun.runSize());
+}
+
+#if SK_SUPPORT_GPU
+// -- GrTextContext --------------------------------------------------------------------------------
+SkPMColor4f generate_filtered_color(const SkPaint& paint, const GrColorInfo& colorInfo) {
+ SkColor4f filteredColor = paint.getColor4f();
+ if (auto* xform = colorInfo.colorSpaceXformFromSRGB()) {
+ filteredColor = xform->apply(filteredColor);
+ }
+ if (paint.getColorFilter() != nullptr) {
+ filteredColor = paint.getColorFilter()->filterColor4f(filteredColor, colorInfo.colorSpace(),
+ colorInfo.colorSpace());
+ }
+ return filteredColor.premul();
+}
+
+void GrTextContext::drawGlyphRunList(
+ GrRecordingContext* context, GrTextTarget* target, const GrClip& clip,
+ const SkMatrix& viewMatrix, const SkSurfaceProps& props,
+ const SkGlyphRunList& glyphRunList) {
+ SkPoint origin = glyphRunList.origin();
+
+ // Get the first paint to use as the key paint.
+ const SkPaint& listPaint = glyphRunList.paint();
+
+ SkPMColor4f filteredColor = generate_filtered_color(listPaint, target->colorInfo());
+ GrColor color = generate_filtered_color(listPaint, target->colorInfo()).toBytes_RGBA();
+
+ // If we have been abandoned, then don't draw
+ if (context->priv().abandoned()) {
+ return;
+ }
+
+ SkMaskFilterBase::BlurRec blurRec;
+ // It might be worth caching these things, but its not clear at this time
+ // TODO for animated mask filters, this will fill up our cache. We need a safeguard here
+ const SkMaskFilter* mf = listPaint.getMaskFilter();
+ bool canCache = glyphRunList.canCache() && !(listPaint.getPathEffect() ||
+ (mf && !as_MFB(mf)->asABlur(&blurRec)));
+ SkScalerContextFlags scalerContextFlags = ComputeScalerContextFlags(target->colorInfo());
+
+ auto grStrikeCache = context->priv().getGrStrikeCache();
+ GrTextBlobCache* textBlobCache = context->priv().getTextBlobCache();
+
+ sk_sp<GrTextBlob> cacheBlob;
+ GrTextBlob::Key key;
+ if (canCache) {
+ bool hasLCD = glyphRunList.anyRunsLCD();
+
+ // We canonicalize all non-lcd draws to use kUnknown_SkPixelGeometry
+ SkPixelGeometry pixelGeometry = hasLCD ? props.pixelGeometry() :
+ kUnknown_SkPixelGeometry;
+
+ // TODO we want to figure out a way to be able to use the canonical color on LCD text,
+ // see the note on ComputeCanonicalColor above. We pick a dummy value for LCD text to
+ // ensure we always match the same key
+ GrColor canonicalColor = hasLCD ? SK_ColorTRANSPARENT :
+ ComputeCanonicalColor(listPaint, hasLCD);
+
+ key.fPixelGeometry = pixelGeometry;
+ key.fUniqueID = glyphRunList.uniqueID();
+ key.fStyle = listPaint.getStyle();
+ key.fHasBlur = SkToBool(mf);
+ key.fCanonicalColor = canonicalColor;
+ key.fScalerContextFlags = scalerContextFlags;
+ cacheBlob = textBlobCache->find(key);
+ }
+
+ if (cacheBlob) {
+ if (cacheBlob->mustRegenerate(listPaint, glyphRunList.anyRunsSubpixelPositioned(),
+ blurRec, viewMatrix, origin.x(),origin.y())) {
+ // We have to remake the blob because changes may invalidate our masks.
+ // TODO we could probably get away reuse most of the time if the pointer is unique,
+ // but we'd have to clear the subrun information
+ textBlobCache->remove(cacheBlob.get());
+ cacheBlob = textBlobCache->makeCachedBlob(
+ glyphRunList, key, blurRec, listPaint, color, grStrikeCache);
+ cacheBlob->generateFromGlyphRunList(
+ *context->priv().caps()->shaderCaps(), fOptions,
+ listPaint, scalerContextFlags, viewMatrix, props,
+ glyphRunList, target->glyphPainter());
+ } else {
+ textBlobCache->makeMRU(cacheBlob.get());
+
+ if (CACHE_SANITY_CHECK) {
+ sk_sp<GrTextBlob> sanityBlob(textBlobCache->makeBlob(
+ glyphRunList, color, grStrikeCache));
+ sanityBlob->setupKey(key, blurRec, listPaint);
+ cacheBlob->generateFromGlyphRunList(
+ *context->priv().caps()->shaderCaps(), fOptions,
+ listPaint, scalerContextFlags, viewMatrix, props, glyphRunList,
+ target->glyphPainter());
+ GrTextBlob::AssertEqual(*sanityBlob, *cacheBlob);
+ }
+ }
+ } else {
+ if (canCache) {
+ cacheBlob = textBlobCache->makeCachedBlob(
+ glyphRunList, key, blurRec, listPaint, color, grStrikeCache);
+ } else {
+ cacheBlob = textBlobCache->makeBlob(glyphRunList, color, grStrikeCache);
+ }
+ cacheBlob->generateFromGlyphRunList(
+ *context->priv().caps()->shaderCaps(), fOptions, listPaint,
+ scalerContextFlags, viewMatrix, props, glyphRunList,
+ target->glyphPainter());
+ }
+
+ cacheBlob->flush(target, props, fDistanceAdjustTable.get(), listPaint, filteredColor,
+ clip, viewMatrix, origin.x(), origin.y());
+}
+
+void GrTextBlob::SubRun::appendGlyph(GrGlyph* glyph, SkRect dstRect) {
+
+ this->joinGlyphBounds(dstRect);
+
+ GrTextBlob* blob = fRun->fBlob;
+
+ bool hasW = this->hasWCoord();
+ // glyphs drawn in perspective must always have a w coord.
+ SkASSERT(hasW || !blob->fInitialViewMatrix.hasPerspective());
+ auto maskFormat = this->maskFormat();
+ size_t vertexStride = GetVertexStride(maskFormat, hasW);
+
+ intptr_t vertex = reinterpret_cast<intptr_t>(blob->fVertices + fVertexEndIndex);
+
+ // We always write the third position component used by SDFs. If it is unused it gets
+ // overwritten. Similarly, we always write the color and the blob will later overwrite it
+ // with texture coords if it is unused.
+ size_t colorOffset = hasW ? sizeof(SkPoint3) : sizeof(SkPoint);
+ // V0
+ *reinterpret_cast<SkPoint3*>(vertex) = {dstRect.fLeft, dstRect.fTop, 1.f};
+ *reinterpret_cast<GrColor*>(vertex + colorOffset) = fColor;
+ vertex += vertexStride;
+
+ // V1
+ *reinterpret_cast<SkPoint3*>(vertex) = {dstRect.fLeft, dstRect.fBottom, 1.f};
+ *reinterpret_cast<GrColor*>(vertex + colorOffset) = fColor;
+ vertex += vertexStride;
+
+ // V2
+ *reinterpret_cast<SkPoint3*>(vertex) = {dstRect.fRight, dstRect.fTop, 1.f};
+ *reinterpret_cast<GrColor*>(vertex + colorOffset) = fColor;
+ vertex += vertexStride;
+
+ // V3
+ *reinterpret_cast<SkPoint3*>(vertex) = {dstRect.fRight, dstRect.fBottom, 1.f};
+ *reinterpret_cast<GrColor*>(vertex + colorOffset) = fColor;
+
+ fVertexEndIndex += vertexStride * kVerticesPerGlyph;
+ blob->fGlyphs[fGlyphEndIndex++] = glyph;
+}
+
+void GrTextBlob::Run::switchSubRunIfNeededAndAppendGlyph(GrGlyph* glyph,
+ const sk_sp<GrTextStrike>& strike,
+ const SkRect& destRect,
+ bool needsTransform) {
+ GrMaskFormat format = glyph->fMaskFormat;
+
+ SubRun* subRun = &fSubRunInfo.back();
+ if (fInitialized && subRun->maskFormat() != format) {
+ subRun = pushBackSubRun(fStrikeSpec, fColor);
+ subRun->setStrike(strike);
+ } else if (!fInitialized) {
+ subRun->setStrike(strike);
+ }
+
+ fInitialized = true;
+ subRun->setMaskFormat(format);
+ subRun->setNeedsTransform(needsTransform);
+ subRun->appendGlyph(glyph, destRect);
+}
+
+void GrTextBlob::Run::appendDeviceSpaceGlyph(const sk_sp<GrTextStrike>& strike,
+ const SkGlyph& skGlyph, SkPoint origin) {
+ if (GrGlyph* glyph = strike->getGlyph(skGlyph)) {
+
+ SkRect glyphRect = glyph->destRect(origin);
+
+ if (!glyphRect.isEmpty()) {
+ this->switchSubRunIfNeededAndAppendGlyph(glyph, strike, glyphRect, false);
+ }
+ }
+}
+
+void GrTextBlob::Run::appendSourceSpaceGlyph(const sk_sp<GrTextStrike>& strike,
+ const SkGlyph& skGlyph,
+ SkPoint origin,
+ SkScalar textScale) {
+ if (GrGlyph* glyph = strike->getGlyph(skGlyph)) {
+
+ SkRect glyphRect = glyph->destRect(origin, textScale);
+
+ if (!glyphRect.isEmpty()) {
+ this->switchSubRunIfNeededAndAppendGlyph(glyph, strike, glyphRect, true);
+ }
+ }
+}
+
+void GrTextBlob::generateFromGlyphRunList(const GrShaderCaps& shaderCaps,
+ const GrTextContext::Options& options,
+ const SkPaint& paint,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const SkGlyphRunList& glyphRunList,
+ SkGlyphRunListPainter* glyphPainter) {
+ SkPoint origin = glyphRunList.origin();
+ const SkPaint& runPaint = glyphRunList.paint();
+ this->initReusableBlob(SkPaintPriv::ComputeLuminanceColor(runPaint), viewMatrix,
+ origin.x(), origin.y());
+
+ glyphPainter->processGlyphRunList(glyphRunList,
+ viewMatrix,
+ props,
+ shaderCaps.supportsDistanceFieldText(),
+ options,
+ this);
+}
+
+GrTextBlob::Run* GrTextBlob::currentRun() {
+ return &fRuns[fRunCount - 1];
+}
+
+void GrTextBlob::startRun(const SkGlyphRun& glyphRun, bool useSDFT) {
+ if (useSDFT) {
+ this->setHasDistanceField();
+ }
+ Run* run = this->pushBackRun();
+ run->setRunFontAntiAlias(glyphRun.font().hasSomeAntiAliasing());
+}
+
+void GrTextBlob::processDeviceMasks(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) {
+ Run* run = this->currentRun();
+ this->setHasBitmap();
+ run->setupFont(strikeSpec);
+ sk_sp<GrTextStrike> currStrike = strikeSpec.findOrCreateGrStrike(fStrikeCache);
+ for (const auto& mask : masks) {
+ SkPoint pt{SkScalarFloorToScalar(mask.position.fX),
+ SkScalarFloorToScalar(mask.position.fY)};
+ run->appendDeviceSpaceGlyph(currStrike, *mask.glyph, pt);
+ }
+}
+
+void GrTextBlob::processSourcePaths(SkSpan<const SkGlyphPos> paths,
+ const SkStrikeSpec& strikeSpec) {
+ Run* run = this->currentRun();
+ this->setHasBitmap();
+ run->setupFont(strikeSpec);
+ for (const auto& path : paths) {
+ if (const SkPath* glyphPath = path.glyph->path()) {
+ run->appendPathGlyph(*glyphPath, path.position, strikeSpec.strikeToSourceRatio(),
+ false);
+ }
+ }
+}
+
+void GrTextBlob::processDevicePaths(SkSpan<const SkGlyphPos> paths) {
+ Run* run = this->currentRun();
+ this->setHasBitmap();
+ for (const auto& path : paths) {
+ SkPoint pt{SkScalarFloorToScalar(path.position.fX),
+ SkScalarFloorToScalar(path.position.fY)};
+ // TODO: path should always be set. Remove when proven.
+ if (const SkPath* glyphPath = path.glyph->path()) {
+ run->appendPathGlyph(*glyphPath, pt, SK_Scalar1, true);
+ }
+ }
+}
+
+void GrTextBlob::processSourceSDFT(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ const SkFont& runFont,
+ SkScalar minScale,
+ SkScalar maxScale,
+ bool hasWCoord) {
+
+ Run* run = this->currentRun();
+ run->setSubRunHasDistanceFields(
+ runFont.getEdging() == SkFont::Edging::kSubpixelAntiAlias,
+ runFont.hasSomeAntiAliasing(),
+ hasWCoord);
+ this->setMinAndMaxScale(minScale, maxScale);
+ run->setupFont(strikeSpec);
+ sk_sp<GrTextStrike> currStrike = strikeSpec.findOrCreateGrStrike(fStrikeCache);
+ for (const auto& mask : masks) {
+ run->appendSourceSpaceGlyph(
+ currStrike, *mask.glyph, mask.position, strikeSpec.strikeToSourceRatio());
+ }
+}
+
+void GrTextBlob::processSourceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ bool hasW) {
+ Run* run = this->currentRun();
+
+ auto subRun = run->initARGBFallback();
+ sk_sp<GrTextStrike> grStrike = strikeSpec.findOrCreateGrStrike(fStrikeCache);
+ subRun->setStrike(grStrike);
+ subRun->setHasWCoord(hasW);
+
+ this->setHasBitmap();
+ run->setupFont(strikeSpec);
+ for (const auto& mask : masks) {
+ run->appendSourceSpaceGlyph
+ (grStrike, *mask.glyph, mask.position, strikeSpec.strikeToSourceRatio());
+ }
+}
+
+void GrTextBlob::processDeviceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) {
+ Run* run = this->currentRun();
+ this->setHasBitmap();
+ sk_sp<GrTextStrike> grStrike = strikeSpec.findOrCreateGrStrike(fStrikeCache);
+ auto subRun = run->initARGBFallback();
+ run->setupFont(strikeSpec);
+ subRun->setStrike(grStrike);
+ for (const auto& mask : masks) {
+ run->appendDeviceSpaceGlyph(grStrike, *mask.glyph, mask.position);
+ }
+}
+
+#if GR_TEST_UTILS
+
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+std::unique_ptr<GrDrawOp> GrTextContext::createOp_TestingOnly(GrRecordingContext* context,
+ GrTextContext* textContext,
+ GrRenderTargetContext* rtc,
+ const SkPaint& skPaint,
+ const SkFont& font,
+ const SkMatrix& viewMatrix,
+ const char* text,
+ int x,
+ int y) {
+ auto direct = context->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ auto strikeCache = direct->priv().getGrStrikeCache();
+
+ static SkSurfaceProps surfaceProps(SkSurfaceProps::kLegacyFontHost_InitType);
+
+ size_t textLen = (int)strlen(text);
+
+ SkPMColor4f filteredColor = generate_filtered_color(skPaint, rtc->colorInfo());
+ GrColor color = filteredColor.toBytes_RGBA();
+
+ auto origin = SkPoint::Make(x, y);
+ SkGlyphRunBuilder builder;
+ builder.drawTextUTF8(skPaint, font, text, textLen, origin);
+
+ auto glyphRunList = builder.useGlyphRunList();
+ sk_sp<GrTextBlob> blob;
+ if (!glyphRunList.empty()) {
+ blob = direct->priv().getTextBlobCache()->makeBlob(glyphRunList, color, strikeCache);
+ // Use the text and textLen below, because we don't want to mess with the paint.
+ SkScalerContextFlags scalerContextFlags = ComputeScalerContextFlags(rtc->colorInfo());
+ blob->generateFromGlyphRunList(
+ *context->priv().caps()->shaderCaps(), textContext->fOptions,
+ skPaint, scalerContextFlags, viewMatrix, surfaceProps,
+ glyphRunList, rtc->textTarget()->glyphPainter());
+ }
+
+ return blob->test_makeOp(textLen, 0, 0, viewMatrix, x, y, skPaint, filteredColor, surfaceProps,
+ textContext->dfAdjustTable(), rtc->textTarget());
+}
+
+#endif // GR_TEST_UTILS
+#endif // SK_SUPPORT_GPU
+
+SkGlyphRunListPainter::ScopedBuffers::ScopedBuffers(SkGlyphRunListPainter* painter, size_t size)
+ : fPainter{painter} {
+ fPainter->fDrawable.ensureSize(size);
+ if (fPainter->fMaxRunSize < size) {
+ fPainter->fMaxRunSize = size;
+
+ fPainter->fPositions.reset(size);
+ fPainter->fPackedGlyphIDs.reset(size);
+ fPainter->fGlyphPos.reset(size);
+ }
+}
+
+SkGlyphRunListPainter::ScopedBuffers::~ScopedBuffers() {
+ fPainter->fDrawable.reset();
+ fPainter->fPaths.clear();
+ fPainter->fARGBGlyphsIDs.clear();
+ fPainter->fARGBPositions.clear();
+
+ if (fPainter->fMaxRunSize > 200) {
+ fPainter->fMaxRunSize = 0;
+ fPainter->fPositions.reset();
+ fPainter->fPackedGlyphIDs.reset();
+ fPainter->fGlyphPos.reset();
+ fPainter->fPaths.shrink_to_fit();
+ fPainter->fARGBGlyphsIDs.shrink_to_fit();
+ fPainter->fARGBPositions.shrink_to_fit();
+ }
+}
+
+SkVector SkGlyphPositionRoundingSpec::HalfAxisSampleFreq(bool isSubpixel, SkAxisAlignment axisAlignment) {
+ if (!isSubpixel) {
+ return {SK_ScalarHalf, SK_ScalarHalf};
+ } else {
+ static constexpr SkScalar kSubpixelRounding = SkFixedToScalar(SkGlyph::kSubpixelRound);
+ switch (axisAlignment) {
+ case kX_SkAxisAlignment:
+ return {kSubpixelRounding, SK_ScalarHalf};
+ case kY_SkAxisAlignment:
+ return {SK_ScalarHalf, kSubpixelRounding};
+ case kNone_SkAxisAlignment:
+ return {kSubpixelRounding, kSubpixelRounding};
+ }
+ }
+
+ // Some compilers need this.
+ return {0, 0};
+}
+
+SkIPoint SkGlyphPositionRoundingSpec::IgnorePositionMask(
+ bool isSubpixel, SkAxisAlignment axisAlignment) {
+ return SkIPoint::Make((!isSubpixel || axisAlignment == kY_SkAxisAlignment) ? 0 : ~0,
+ (!isSubpixel || axisAlignment == kX_SkAxisAlignment) ? 0 : ~0);
+}
+
+SkGlyphPositionRoundingSpec::SkGlyphPositionRoundingSpec(bool isSubpixel,
+ SkAxisAlignment axisAlignment)
+ : halfAxisSampleFreq{HalfAxisSampleFreq(isSubpixel, axisAlignment)}
+ , ignorePositionMask{IgnorePositionMask(isSubpixel, axisAlignment)} {
+}
diff --git a/gfx/skia/skia/src/core/SkGlyphRunPainter.h b/gfx/skia/skia/src/core/SkGlyphRunPainter.h
new file mode 100644
index 0000000000..a5aff15cd5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRunPainter.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphRunPainter_DEFINED
+#define SkGlyphRunPainter_DEFINED
+
+#include "include/core/SkSurfaceProps.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkGlyphBuffer.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkTextBlobPriv.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/text/GrTextContext.h"
+class GrColorInfo;
+class GrRenderTargetContext;
+#endif
+
+class SkGlyphRunPainterInterface;
+class SkStrikeSpec;
+
+// round and ignorePositionMask are used to calculate the subpixel position of a glyph.
+// The per component (x or y) calculation is:
+//
+// subpixelOffset = (floor((viewportPosition + rounding) & mask) >> 14) & 3
+//
+// where mask is either 0 or ~0, and rounding is either
+// 1/2 for non-subpixel or 1/8 for subpixel.
+struct SkGlyphPositionRoundingSpec {
+ SkGlyphPositionRoundingSpec(bool isSubpixel, SkAxisAlignment axisAlignment);
+ const SkVector halfAxisSampleFreq;
+ const SkIPoint ignorePositionMask;
+
+private:
+ static SkVector HalfAxisSampleFreq(bool isSubpixel, SkAxisAlignment axisAlignment);
+ static SkIPoint IgnorePositionMask(bool isSubpixel, SkAxisAlignment axisAlignment);
+};
+
+class SkStrikeCommon {
+public:
+ // An atlas consists of plots, and plots hold glyphs. The minimum a plot can be is 256x256.
+ // This means that the maximum size a glyph can be is 256x256.
+ static constexpr uint16_t kSkSideTooBigForAtlas = 256;
+};
+
+class SkGlyphRunListPainter {
+public:
+ // Constructor for SkBitmpapDevice.
+ SkGlyphRunListPainter(const SkSurfaceProps& props,
+ SkColorType colorType,
+ SkColorSpace* cs,
+ SkStrikeForGPUCacheInterface* strikeCache);
+
+#if SK_SUPPORT_GPU
+ // The following two ctors are used exclusively by the GPU, and will always use the global
+ // strike cache.
+ SkGlyphRunListPainter(const SkSurfaceProps&, const GrColorInfo&);
+ explicit SkGlyphRunListPainter(const GrRenderTargetContext& renderTargetContext);
+#endif // SK_SUPPORT_GPU
+
+ class BitmapDevicePainter {
+ public:
+ virtual ~BitmapDevicePainter() = default;
+
+ virtual void paintPaths(
+ SkDrawableGlyphBuffer* drawables, SkScalar scale, const SkPaint& paint) const = 0;
+
+ virtual void paintMasks(SkDrawableGlyphBuffer* drawables, const SkPaint& paint) const = 0;
+ };
+
+ void drawForBitmapDevice(
+ const SkGlyphRunList& glyphRunList, const SkMatrix& deviceMatrix,
+ const BitmapDevicePainter* bitmapDevice);
+
+#if SK_SUPPORT_GPU
+ // A nullptr for process means that the calls to the cache will be performed, but none of the
+ // callbacks will be called.
+ void processGlyphRunList(const SkGlyphRunList& glyphRunList,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ bool contextSupportsDistanceFieldText,
+ const GrTextContext::Options& options,
+ SkGlyphRunPainterInterface* process);
+#endif // SK_SUPPORT_GPU
+
+private:
+ SkGlyphRunListPainter(const SkSurfaceProps& props, SkColorType colorType,
+ SkScalerContextFlags flags, SkStrikeForGPUCacheInterface* strikeCache);
+
+ struct ScopedBuffers {
+ ScopedBuffers(SkGlyphRunListPainter* painter, size_t size);
+ ~ScopedBuffers();
+ SkGlyphRunListPainter* fPainter;
+ };
+
+ ScopedBuffers SK_WARN_UNUSED_RESULT ensureBuffers(const SkGlyphRunList& glyphRunList);
+
+ // TODO: Remove once I can hoist ensureBuffers above the list for loop in all cases.
+ ScopedBuffers SK_WARN_UNUSED_RESULT ensureBuffers(const SkGlyphRun& glyphRun);
+
+ /**
+ * @param fARGBPositions in source space
+ * @param fARGBGlyphsIDs the glyphs to process
+ * @param fGlyphPos used as scratch space
+ * @param maxSourceGlyphDimension the longest dimension of any glyph as if all fARGBGlyphsIDs
+ * were drawn in source space (as if viewMatrix were identity)
+ */
+ void processARGBFallback(SkScalar maxSourceGlyphDimension,
+ const SkPaint& runPaint,
+ const SkFont& runFont,
+ const SkMatrix& viewMatrix,
+ SkGlyphRunPainterInterface* process);
+
+ static SkSpan<const SkPackedGlyphID> DeviceSpacePackedGlyphIDs(
+ const SkGlyphPositionRoundingSpec& roundingSpec,
+ const SkMatrix& viewMatrix,
+ const SkPoint& origin,
+ int n,
+ const SkGlyphID* glyphIDs,
+ const SkPoint* positions,
+ SkPoint* mappedPositions,
+ SkPackedGlyphID* results);
+
+ static SkSpan<const SkPackedGlyphID> SourceSpacePackedGlyphIDs(
+ const SkPoint& origin,
+ int n,
+ const SkGlyphID* glyphIDs,
+ const SkPoint* positions,
+ SkPoint* mappedPositions,
+ SkPackedGlyphID* results);
+
+ // The props as on the actual device.
+ const SkSurfaceProps fDeviceProps;
+ // The props for when the bitmap device can't draw LCD text.
+ const SkSurfaceProps fBitmapFallbackProps;
+ const SkColorType fColorType;
+ const SkScalerContextFlags fScalerContextFlags;
+
+ SkStrikeForGPUCacheInterface* const fStrikeCache;
+
+ SkDrawableGlyphBuffer fDrawable;
+
+ size_t fMaxRunSize{0};
+ SkAutoTMalloc<SkPoint> fPositions;
+ SkAutoTMalloc<SkPackedGlyphID> fPackedGlyphIDs;
+ SkAutoTMalloc<SkGlyphPos> fGlyphPos;
+
+ std::vector<SkGlyphPos> fPaths;
+
+ // Vectors for tracking ARGB fallback information.
+ std::vector<SkGlyphID> fARGBGlyphsIDs;
+ std::vector<SkPoint> fARGBPositions;
+};
+
+// SkGlyphRunPainterInterface are all the ways that Ganesh generates glyphs. The first
+// distinction is between Device and Source.
+// * Device - the data in the cache is scaled to the device. There is no transformation from the
+// cache to the screen.
+// * Source - the data in the cache needs to be scaled from the cache to source space using the
+// factor cacheToSourceScale. When drawn the system must combine cacheToSourceScale and the
+// deviceView matrix to transform the cache data onto the screen. This allows zooming and
+// simple animation to reuse the same glyph data by just changing the transform.
+//
+// In addition to transformation type above, Masks, Paths, SDFT, and Fallback (or really the
+// rendering method of last resort) are the different
+// formats of data used from the cache.
+class SkGlyphRunPainterInterface {
+public:
+ virtual ~SkGlyphRunPainterInterface() = default;
+
+ virtual void startRun(const SkGlyphRun& glyphRun, bool useSDFT) = 0;
+
+ virtual void processDeviceMasks(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) = 0;
+
+ virtual void processSourcePaths(SkSpan<const SkGlyphPos> paths,
+ const SkStrikeSpec& strikeSpec) = 0;
+
+ virtual void processDevicePaths(SkSpan<const SkGlyphPos> paths) = 0;
+
+ virtual void processSourceSDFT(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ const SkFont& runFont,
+ SkScalar minScale,
+ SkScalar maxScale,
+ bool hasWCoord) = 0;
+
+ virtual void processSourceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ bool hasW) = 0;
+
+ virtual void processDeviceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) = 0;
+
+};
+
+#endif // SkGlyphRunPainter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
new file mode 100644
index 0000000000..c863908aa3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGpuBlurUtils.h"
+
+#include "include/core/SkRect.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h"
+#include "src/gpu/effects/GrMatrixConvolutionEffect.h"
+
+#include "src/gpu/SkGr.h"
+
+#define MAX_BLUR_SIGMA 4.0f
+
+using Direction = GrGaussianConvolutionFragmentProcessor::Direction;
+
+static void scale_irect_roundout(SkIRect* rect, float xScale, float yScale) {
+ rect->fLeft = SkScalarFloorToInt(rect->fLeft * xScale);
+ rect->fTop = SkScalarFloorToInt(rect->fTop * yScale);
+ rect->fRight = SkScalarCeilToInt(rect->fRight * xScale);
+ rect->fBottom = SkScalarCeilToInt(rect->fBottom * yScale);
+}
+
+static void scale_irect(SkIRect* rect, int xScale, int yScale) {
+ rect->fLeft *= xScale;
+ rect->fTop *= yScale;
+ rect->fRight *= xScale;
+ rect->fBottom *= yScale;
+}
+
+#ifdef SK_DEBUG
+static inline int is_even(int x) { return !(x & 1); }
+#endif
+
+static void shrink_irect_by_2(SkIRect* rect, bool xAxis, bool yAxis) {
+ if (xAxis) {
+ SkASSERT(is_even(rect->fLeft) && is_even(rect->fRight));
+ rect->fLeft /= 2;
+ rect->fRight /= 2;
+ }
+ if (yAxis) {
+ SkASSERT(is_even(rect->fTop) && is_even(rect->fBottom));
+ rect->fTop /= 2;
+ rect->fBottom /= 2;
+ }
+}
+
+static float adjust_sigma(float sigma, int maxTextureSize, int *scaleFactor, int *radius) {
+ *scaleFactor = 1;
+ while (sigma > MAX_BLUR_SIGMA) {
+ *scaleFactor *= 2;
+ sigma *= 0.5f;
+ if (*scaleFactor > maxTextureSize) {
+ *scaleFactor = maxTextureSize;
+ sigma = MAX_BLUR_SIGMA;
+ }
+ }
+ *radius = static_cast<int>(ceilf(sigma * 3.0f));
+ SkASSERT(*radius <= GrGaussianConvolutionFragmentProcessor::kMaxKernelRadius);
+ return sigma;
+}
+
+static void convolve_gaussian_1d(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const SkIRect& dstRect,
+ const SkIPoint& srcOffset,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ Direction direction,
+ int radius,
+ float sigma,
+ GrTextureDomain::Mode mode,
+ int bounds[2]) {
+ GrPaint paint;
+ std::unique_ptr<GrFragmentProcessor> conv(GrGaussianConvolutionFragmentProcessor::Make(
+ std::move(proxy), srcColorType, direction, radius, sigma, mode, bounds));
+ paint.addColorFragmentProcessor(std::move(conv));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ SkMatrix localMatrix = SkMatrix::MakeTrans(-SkIntToScalar(srcOffset.x()),
+ -SkIntToScalar(srcOffset.y()));
+ renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(dstRect), localMatrix);
+}
+
+static std::unique_ptr<GrRenderTargetContext> convolve_gaussian_2d(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ const SkIRect& srcBounds,
+ const SkIPoint& srcOffset,
+ int radiusX,
+ int radiusY,
+ SkScalar sigmaX,
+ SkScalar sigmaY,
+ GrTextureDomain::Mode mode,
+ int finalW,
+ int finalH,
+ sk_sp<SkColorSpace> finalCS,
+ SkBackingFit dstFit) {
+
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ dstFit,
+ finalW,
+ finalH,
+ srcColorType,
+ std::move(finalCS),
+ 1,
+ GrMipMapped::kNo,
+ srcProxy->origin(),
+ nullptr,
+ SkBudgeted::kYes,
+ srcProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ SkMatrix localMatrix = SkMatrix::MakeTrans(-SkIntToScalar(srcOffset.x()),
+ -SkIntToScalar(srcOffset.y()));
+ SkISize size = SkISize::Make(2 * radiusX + 1, 2 * radiusY + 1);
+ SkIPoint kernelOffset = SkIPoint::Make(radiusX, radiusY);
+ GrPaint paint;
+ auto conv = GrMatrixConvolutionEffect::MakeGaussian(std::move(srcProxy), srcBounds, size,
+ 1.0, 0.0, kernelOffset, mode, true,
+ sigmaX, sigmaY);
+ paint.addColorFragmentProcessor(std::move(conv));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ GrFixedClip clip(SkIRect::MakeWH(finalW, finalH));
+
+ renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeWH(finalW, finalH), localMatrix);
+
+ return renderTargetContext;
+}
+
+// NOTE: Both convolve_gaussian or decimate accept a proxyOffset. This is separate from the
+// srcBounds and srcOffset, which are relative to the content rect of the image, whereas proxyOffset
+// maps from the content rect to the proxy's coordinate space. Due to how the destination bounds are
+// calculated, it is more convenient to have the proxy offset kept separate from the logical bounds
+// (which do impact destination decisions). Both functions incorporate the proxy offset into the
+// geometry they submit or before calling convolve_gaussian_1d.
+
+static std::unique_ptr<GrRenderTargetContext> convolve_gaussian(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ const SkIPoint& proxyOffset,
+ const SkIRect& srcRect,
+ const SkIPoint& srcOffset,
+ Direction direction,
+ int radius,
+ float sigma,
+ SkIRect* contentRect,
+ GrTextureDomain::Mode mode,
+ int finalW,
+ int finalH,
+ sk_sp<SkColorSpace> finalCS,
+ SkBackingFit fit) {
+ SkASSERT(srcRect.width() <= finalW && srcRect.height() <= finalH);
+
+ auto dstRenderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ fit,
+ srcRect.width(),
+ srcRect.height(),
+ srcColorType,
+ std::move(finalCS),
+ 1,
+ GrMipMapped::kNo,
+ srcProxy->origin(),
+ nullptr,
+ SkBudgeted::kYes,
+ srcProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!dstRenderTargetContext) {
+ return nullptr;
+ }
+
+ GrFixedClip clip(SkIRect::MakeWH(finalW, finalH));
+
+ int bounds[2] = { 0, 0 };
+ SkIRect dstRect = SkIRect::MakeWH(srcRect.width(), srcRect.height());
+ SkIPoint netOffset = srcOffset - proxyOffset;
+ if (GrTextureDomain::kIgnore_Mode == mode) {
+ *contentRect = dstRect;
+ convolve_gaussian_1d(dstRenderTargetContext.get(), clip, dstRect, netOffset,
+ std::move(srcProxy), srcColorType, direction, radius, sigma,
+ GrTextureDomain::kIgnore_Mode, bounds);
+ return dstRenderTargetContext;
+ }
+ // These destination rects need to be adjusted by srcOffset, but should *not* be adjusted by
+ // the proxyOffset, which is why keeping them separate is convenient.
+ SkIRect midRect = *contentRect, leftRect, rightRect;
+ midRect.offset(srcOffset);
+ SkIRect topRect, bottomRect;
+ if (Direction::kX == direction) {
+ bounds[0] = contentRect->left() + proxyOffset.x();
+ bounds[1] = contentRect->right() + proxyOffset.x();
+ topRect = SkIRect::MakeLTRB(0, 0, dstRect.right(), midRect.top());
+ bottomRect = SkIRect::MakeLTRB(0, midRect.bottom(), dstRect.right(), dstRect.bottom());
+ midRect.inset(radius, 0);
+ leftRect = SkIRect::MakeLTRB(0, midRect.top(), midRect.left(), midRect.bottom());
+ rightRect =
+ SkIRect::MakeLTRB(midRect.right(), midRect.top(), dstRect.width(), midRect.bottom());
+ dstRect.fTop = midRect.top();
+ dstRect.fBottom = midRect.bottom();
+
+ contentRect->fLeft = dstRect.fLeft;
+ contentRect->fTop = midRect.fTop;
+ contentRect->fRight = dstRect.fRight;
+ contentRect->fBottom = midRect.fBottom;
+ } else {
+ bounds[0] = contentRect->top() + proxyOffset.y();
+ bounds[1] = contentRect->bottom() + proxyOffset.y();
+ topRect = SkIRect::MakeLTRB(0, 0, midRect.left(), dstRect.bottom());
+ bottomRect = SkIRect::MakeLTRB(midRect.right(), 0, dstRect.right(), dstRect.bottom());
+ midRect.inset(0, radius);
+ leftRect = SkIRect::MakeLTRB(midRect.left(), 0, midRect.right(), midRect.top());
+ rightRect =
+ SkIRect::MakeLTRB(midRect.left(), midRect.bottom(), midRect.right(), dstRect.height());
+ dstRect.fLeft = midRect.left();
+ dstRect.fRight = midRect.right();
+
+ contentRect->fLeft = midRect.fLeft;
+ contentRect->fTop = dstRect.fTop;
+ contentRect->fRight = midRect.fRight;
+ contentRect->fBottom = dstRect.fBottom;
+ }
+ if (!topRect.isEmpty()) {
+ dstRenderTargetContext->clear(&topRect, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+ }
+
+ if (!bottomRect.isEmpty()) {
+ dstRenderTargetContext->clear(&bottomRect, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+ }
+
+ if (midRect.isEmpty()) {
+ // Blur radius covers srcBounds; use bounds over entire draw
+ convolve_gaussian_1d(dstRenderTargetContext.get(), clip, dstRect, netOffset,
+ std::move(srcProxy), srcColorType, direction, radius, sigma, mode,
+ bounds);
+ } else {
+ // Draw right and left margins with bounds; middle without.
+ convolve_gaussian_1d(dstRenderTargetContext.get(), clip, leftRect, netOffset,
+ srcProxy, srcColorType, direction, radius, sigma, mode, bounds);
+ convolve_gaussian_1d(dstRenderTargetContext.get(), clip, rightRect, netOffset,
+ srcProxy, srcColorType, direction, radius, sigma, mode, bounds);
+ convolve_gaussian_1d(dstRenderTargetContext.get(), clip, midRect, netOffset,
+ std::move(srcProxy), srcColorType, direction, radius, sigma,
+ GrTextureDomain::kIgnore_Mode, bounds);
+ }
+
+ return dstRenderTargetContext;
+}
+
+// Returns a high quality scaled-down version of src. This is used to create an intermediate,
+// shrunken version of the source image in the event that the requested blur sigma exceeds
+// MAX_BLUR_SIGMA.
+static sk_sp<GrTextureProxy> decimate(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ const SkIPoint& proxyOffset,
+ SkIPoint* srcOffset,
+ SkIRect* contentRect,
+ int scaleFactorX, int scaleFactorY,
+ int radiusX, int radiusY,
+ GrTextureDomain::Mode mode,
+ int finalW,
+ int finalH,
+ sk_sp<SkColorSpace> finalCS) {
+ SkASSERT(SkIsPow2(scaleFactorX) && SkIsPow2(scaleFactorY));
+ SkASSERT(scaleFactorX > 1 || scaleFactorY > 1);
+
+ SkIRect srcRect;
+ if (GrTextureDomain::kIgnore_Mode == mode) {
+ srcRect = SkIRect::MakeWH(finalW, finalH);
+ } else {
+ srcRect = *contentRect;
+ srcRect.offset(*srcOffset);
+ }
+
+ scale_irect_roundout(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
+ scale_irect(&srcRect, scaleFactorX, scaleFactorY);
+
+ SkIRect dstRect(srcRect);
+
+ // Map the src rect into proxy space, this only has to happen once since subsequent loops
+ // to decimate will have created a new proxy that has its origin at (0, 0).
+ srcRect.offset(proxyOffset.x(), proxyOffset.y());
+ std::unique_ptr<GrRenderTargetContext> dstRenderTargetContext;
+
+ for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) {
+ shrink_irect_by_2(&dstRect, i < scaleFactorX, i < scaleFactorY);
+
+ // We know this will not be the final draw so we are free to make it an approx match.
+ dstRenderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox,
+ dstRect.fRight,
+ dstRect.fBottom,
+ srcColorType,
+ finalCS,
+ 1,
+ GrMipMapped::kNo,
+ srcProxy->origin(),
+ nullptr,
+ SkBudgeted::kYes,
+ srcProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!dstRenderTargetContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ if (GrTextureDomain::kIgnore_Mode != mode && i == 1) {
+ // GrTextureDomainEffect does not support kRepeat_Mode with GrSamplerState::Filter.
+ GrTextureDomain::Mode modeForScaling = GrTextureDomain::kRepeat_Mode == mode
+ ? GrTextureDomain::kDecal_Mode
+ : mode;
+
+ SkRect domain = SkRect::Make(*contentRect);
+ domain.inset((i < scaleFactorX) ? SK_ScalarHalf + SK_ScalarNearlyZero : 0.0f,
+ (i < scaleFactorY) ? SK_ScalarHalf + SK_ScalarNearlyZero : 0.0f);
+ // Ensure that the insetting doesn't invert the domain rectangle.
+ if (domain.fRight < domain.fLeft) {
+ domain.fLeft = domain.fRight = SkScalarAve(domain.fLeft, domain.fRight);
+ }
+ if (domain.fBottom < domain.fTop) {
+ domain.fTop = domain.fBottom = SkScalarAve(domain.fTop, domain.fBottom);
+ }
+ domain.offset(proxyOffset.x(), proxyOffset.y());
+ auto fp = GrTextureDomainEffect::Make(std::move(srcProxy),
+ srcColorType,
+ SkMatrix::I(),
+ domain,
+ modeForScaling,
+ GrSamplerState::Filter::kBilerp);
+ paint.addColorFragmentProcessor(std::move(fp));
+ srcRect.offset(-(*srcOffset));
+ // TODO: consume the srcOffset in both first draws and always set it to zero
+ // back in GaussianBlur
+ srcOffset->set(0, 0);
+ } else {
+ paint.addColorTextureProcessor(std::move(srcProxy), srcColorType, SkMatrix::I(),
+ GrSamplerState::ClampBilerp());
+ }
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ dstRenderTargetContext->fillRectToRect(GrFixedClip::Disabled(), std::move(paint), GrAA::kNo,
+ SkMatrix::I(), SkRect::Make(dstRect),
+ SkRect::Make(srcRect));
+
+ srcProxy = dstRenderTargetContext->asTextureProxyRef();
+ if (!srcProxy) {
+ return nullptr;
+ }
+ srcRect = dstRect;
+ }
+
+ *contentRect = dstRect;
+
+ SkASSERT(dstRenderTargetContext);
+
+ return dstRenderTargetContext->asTextureProxyRef();
+}
+
+// Expand the contents of 'srcRenderTargetContext' to fit in 'dstII'. At this point, we are
+// expanding an intermediate image, so there's no need to account for a proxy offset from the
+// original input.
+static std::unique_ptr<GrRenderTargetContext> reexpand(
+ GrRecordingContext* context,
+ std::unique_ptr<GrRenderTargetContext> srcRenderTargetContext,
+ const SkIRect& localSrcBounds,
+ int scaleFactorX, int scaleFactorY,
+ int finalW,
+ int finalH,
+ sk_sp<SkColorSpace> finalCS,
+ SkBackingFit fit) {
+ const SkIRect srcRect = SkIRect::MakeWH(srcRenderTargetContext->width(),
+ srcRenderTargetContext->height());
+
+ sk_sp<GrTextureProxy> srcProxy = srcRenderTargetContext->asTextureProxyRef();
+ if (!srcProxy) {
+ return nullptr;
+ }
+
+ GrColorType srcColorType = srcRenderTargetContext->colorInfo().colorType();
+
+ srcRenderTargetContext = nullptr; // no longer needed
+
+ auto dstRenderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ fit, finalW, finalH, srcColorType, std::move(finalCS), 1, GrMipMapped::kNo,
+ srcProxy->origin());
+ if (!dstRenderTargetContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ SkRect domain = GrTextureDomain::MakeTexelDomain(localSrcBounds, GrTextureDomain::kClamp_Mode,
+ GrTextureDomain::kClamp_Mode);
+ auto fp = GrTextureDomainEffect::Make(std::move(srcProxy), srcColorType, SkMatrix::I(), domain,
+ GrTextureDomain::kClamp_Mode,
+ GrSamplerState::Filter::kBilerp);
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ GrFixedClip clip(SkIRect::MakeWH(finalW, finalH));
+
+ // TODO: using dstII as dstRect results in some image diffs - why?
+ SkIRect dstRect(srcRect);
+ scale_irect(&dstRect, scaleFactorX, scaleFactorY);
+
+ dstRenderTargetContext->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(dstRect), SkRect::Make(srcRect));
+
+ return dstRenderTargetContext;
+}
+
+namespace SkGpuBlurUtils {
+
+std::unique_ptr<GrRenderTargetContext> GaussianBlur(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAT,
+ const SkIPoint& proxyOffset,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkIRect& dstBounds,
+ const SkIRect& srcBounds,
+ float sigmaX,
+ float sigmaY,
+ GrTextureDomain::Mode mode,
+ SkBackingFit fit) {
+ SkASSERT(context);
+
+ TRACE_EVENT2("skia.gpu", "GaussianBlur", "sigmaX", sigmaX, "sigmaY", sigmaY);
+
+ int finalW = dstBounds.width();
+ int finalH = dstBounds.height();
+
+ int scaleFactorX, radiusX;
+ int scaleFactorY, radiusY;
+ int maxTextureSize = context->priv().caps()->maxTextureSize();
+ sigmaX = adjust_sigma(sigmaX, maxTextureSize, &scaleFactorX, &radiusX);
+ sigmaY = adjust_sigma(sigmaY, maxTextureSize, &scaleFactorY, &radiusY);
+ SkASSERT(sigmaX || sigmaY);
+
+ SkIPoint srcOffset = SkIPoint::Make(-dstBounds.x(), -dstBounds.y());
+ SkIRect localSrcBounds = srcBounds;
+ SkIPoint localProxyOffset = proxyOffset;
+
+ // For really small blurs (certainly no wider than 5x5 on desktop gpus) it is faster to just
+ // launch a single non separable kernel vs two launches
+ if (sigmaX > 0.0f && sigmaY > 0.0f &&
+ (2 * radiusX + 1) * (2 * radiusY + 1) <= MAX_KERNEL_SIZE) {
+ // We shouldn't be scaling because this is a small size blur
+ SkASSERT((1 == scaleFactorX) && (1 == scaleFactorY));
+ // Apply the proxy offset to src bounds and offset directly
+ srcOffset -= proxyOffset;
+ localSrcBounds.offset(proxyOffset);
+ return convolve_gaussian_2d(context, std::move(srcProxy), srcColorType, localSrcBounds,
+ srcOffset, radiusX, radiusY, sigmaX, sigmaY, mode,
+ finalW, finalH, colorSpace, fit);
+ }
+
+ // Only the last rendered renderTargetContext needs to match the supplied 'fit'
+ SkBackingFit xFit = fit, yFit = fit;
+ if (scaleFactorX > 1 || scaleFactorY > 1) {
+ xFit = yFit = SkBackingFit::kApprox; // reexpand will be last
+ } else if (sigmaY > 0.0f) {
+ xFit = SkBackingFit::kApprox; // the y-pass will be last
+ }
+
+ GrTextureDomain::Mode currDomainMode = mode;
+ if (scaleFactorX > 1 || scaleFactorY > 1) {
+ srcProxy = decimate(context, std::move(srcProxy), srcColorType, localProxyOffset,
+ &srcOffset, &localSrcBounds, scaleFactorX, scaleFactorY, radiusX,
+ radiusY, currDomainMode, finalW, finalH, colorSpace);
+ if (!srcProxy) {
+ return nullptr;
+ }
+ localProxyOffset.set(0, 0);
+ if (GrTextureDomain::kIgnore_Mode == currDomainMode) {
+ // decimate() always returns an approx texture, possibly with garbage after the image.
+ // We can't ignore the domain anymore.
+ currDomainMode = GrTextureDomain::kClamp_Mode;
+ }
+ }
+
+ std::unique_ptr<GrRenderTargetContext> dstRenderTargetContext;
+
+ auto srcRect = SkIRect::MakeWH(finalW, finalH);
+ scale_irect_roundout(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
+ if (sigmaX > 0.0f) {
+ dstRenderTargetContext = convolve_gaussian(
+ context, std::move(srcProxy), srcColorType, localProxyOffset, srcRect, srcOffset,
+ Direction::kX, radiusX, sigmaX, &localSrcBounds, currDomainMode, finalW, finalH,
+ colorSpace, xFit);
+ if (!dstRenderTargetContext) {
+ return nullptr;
+ }
+
+ srcProxy = dstRenderTargetContext->asTextureProxyRef();
+ if (!srcProxy) {
+ return nullptr;
+ }
+
+ srcRect.offsetTo(0, 0);
+ srcOffset.set(0, 0);
+ localProxyOffset.set(0, 0);
+ if (SkBackingFit::kApprox == xFit && GrTextureDomain::kIgnore_Mode == currDomainMode) {
+ // srcProxy is now an approx texture, possibly with garbage after the image. We can't
+ // ignore the domain anymore.
+ currDomainMode = GrTextureDomain::kClamp_Mode;
+ }
+ }
+
+ if (sigmaY > 0.0f) {
+ dstRenderTargetContext = convolve_gaussian(
+ context, std::move(srcProxy), srcColorType, localProxyOffset, srcRect, srcOffset,
+ Direction::kY, radiusY, sigmaY, &localSrcBounds, currDomainMode, finalW, finalH,
+ colorSpace, yFit);
+ if (!dstRenderTargetContext) {
+ return nullptr;
+ }
+
+ srcProxy = dstRenderTargetContext->asTextureProxyRef();
+ if (!srcProxy) {
+ return nullptr;
+ }
+
+ srcRect.offsetTo(0, 0);
+ srcOffset.set(0, 0);
+ localProxyOffset.set(0, 0);
+ }
+
+ SkASSERT(dstRenderTargetContext);
+ SkASSERT(srcProxy.get() == dstRenderTargetContext->asTextureProxy());
+ SkASSERT(localProxyOffset.x() == 0 && localProxyOffset.y() == 0);
+
+ if (scaleFactorX > 1 || scaleFactorY > 1) {
+ dstRenderTargetContext =
+ reexpand(context, std::move(dstRenderTargetContext), localSrcBounds, scaleFactorX,
+ scaleFactorY, finalW, finalH, colorSpace, fit);
+ }
+
+ SkASSERT(!dstRenderTargetContext || dstRenderTargetContext->origin() == srcProxy->origin());
+ return dstRenderTargetContext;
+}
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.h b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
new file mode 100644
index 0000000000..9143518cd1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGpuBlurUtils_DEFINED
+#define SkGpuBlurUtils_DEFINED
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+
+class GrContext;
+class GrTexture;
+
+struct SkRect;
+
+namespace SkGpuBlurUtils {
+ /**
+ * Applies a 2D Gaussian blur to a given texture. The blurred result is returned
+ * as a renderTargetContext in case the caller wishes to draw into the result.
+ *
+ * The 'proxyOffset' is kept separate form 'srcBounds' because they exist in different
+ * coordinate spaces. 'srcBounds' exists in the content space of the special image, and
+ * 'proxyOffset' maps from the content space to the proxy's space.
+ *
+ * Note: one of sigmaX and sigmaY should be non-zero!
+ * @param context The GPU context
+ * @param srcProxy The source to be blurred.
+ * @param srcColorType The colorType of srcProxy
+ * @param srcAlphaType The alphaType of srcProxy
+ * @param proxyOffset The offset from the top-left corner to valid texels in 'srcProxy',
+ which should come from the subset of the owning SkSpecialImage.
+ * @param colorSpace Color space of the source (used for the renderTargetContext result,
+ * too).
+ * @param dstBounds The destination bounds, relative to the source texture.
+ * @param srcBounds The source bounds, relative to the source texture's offset. No pixels
+ * will be sampled outside of this rectangle.
+ * @param sigmaX The blur's standard deviation in X.
+ * @param sigmaY The blur's standard deviation in Y.
+ * @param mode The mode to handle samples outside bounds.
+ * @param fit backing fit for the returned render target context
+ * @return The renderTargetContext containing the blurred result.
+ */
+std::unique_ptr<GrRenderTargetContext> GaussianBlur(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkIPoint& proxyOffset,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkIRect& dstBounds,
+ const SkIRect& srcBounds,
+ float sigmaX,
+ float sigmaY,
+ GrTextureDomain::Mode mode,
+ SkBackingFit fit = SkBackingFit::kApprox);
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/core/SkGraphics.cpp b/gfx/skia/skia/src/core/SkGraphics.cpp
new file mode 100644
index 0000000000..5db7daa1fe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGraphics.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkGraphics.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTime.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/utils/SkUTF.h"
+
+#include <stdlib.h>
+
+void SkGraphics::Init() {
+ // SkGraphics::Init() must be thread-safe and idempotent.
+ SkCpu::CacheRuntimeFeatures();
+ SkOpts::Init();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGraphics::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ SkResourceCache::DumpMemoryStatistics(dump);
+ SkStrikeCache::DumpMemoryStatistics(dump);
+}
+
+void SkGraphics::PurgeAllCaches() {
+ SkGraphics::PurgeFontCache();
+ SkGraphics::PurgeResourceCache();
+ SkImageFilter_Base::PurgeCache();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char kFontCacheLimitStr[] = "font-cache-limit";
+static const size_t kFontCacheLimitLen = sizeof(kFontCacheLimitStr) - 1;
+
+static const struct {
+ const char* fStr;
+ size_t fLen;
+ size_t (*fFunc)(size_t);
+} gFlags[] = {
+ { kFontCacheLimitStr, kFontCacheLimitLen, SkGraphics::SetFontCacheLimit }
+};
+
+/* flags are of the form param; or param=value; */
+void SkGraphics::SetFlags(const char* flags) {
+ if (!flags) {
+ return;
+ }
+ const char* nextSemi;
+ do {
+ size_t len = strlen(flags);
+ const char* paramEnd = flags + len;
+ const char* nextEqual = strchr(flags, '=');
+ if (nextEqual && paramEnd > nextEqual) {
+ paramEnd = nextEqual;
+ }
+ nextSemi = strchr(flags, ';');
+ if (nextSemi && paramEnd > nextSemi) {
+ paramEnd = nextSemi;
+ }
+ size_t paramLen = paramEnd - flags;
+ for (int i = 0; i < (int)SK_ARRAY_COUNT(gFlags); ++i) {
+ if (paramLen != gFlags[i].fLen) {
+ continue;
+ }
+ if (strncmp(flags, gFlags[i].fStr, paramLen) == 0) {
+ size_t val = 0;
+ if (nextEqual) {
+ val = (size_t) atoi(nextEqual + 1);
+ }
+ (gFlags[i].fFunc)(val);
+ break;
+ }
+ }
+ flags = nextSemi + 1;
+ } while (nextSemi);
+}
+
+size_t SkGraphics::GetFontCacheLimit() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheSizeLimit();
+}
+
+size_t SkGraphics::SetFontCacheLimit(size_t bytes) {
+ return SkStrikeCache::GlobalStrikeCache()->setCacheSizeLimit(bytes);
+}
+
+size_t SkGraphics::GetFontCacheUsed() {
+ return SkStrikeCache::GlobalStrikeCache()->getTotalMemoryUsed();
+}
+
+int SkGraphics::GetFontCacheCountLimit() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheCountLimit();
+}
+
+int SkGraphics::SetFontCacheCountLimit(int count) {
+ return SkStrikeCache::GlobalStrikeCache()->setCacheCountLimit(count);
+}
+
+int SkGraphics::GetFontCacheCountUsed() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheCountUsed();
+}
+
+int SkGraphics::GetFontCachePointSizeLimit() {
+ return SkStrikeCache::GlobalStrikeCache()->getCachePointSizeLimit();
+}
+
+int SkGraphics::SetFontCachePointSizeLimit(int limit) {
+ return SkStrikeCache::GlobalStrikeCache()->setCachePointSizeLimit(limit);
+}
+
+void SkGraphics::PurgeFontCache() {
+ SkStrikeCache::GlobalStrikeCache()->purgeAll();
+ SkTypefaceCache::PurgeAll();
+}
diff --git a/gfx/skia/skia/src/core/SkHalf.cpp b/gfx/skia/skia/src/core/SkHalf.cpp
new file mode 100644
index 0000000000..49ce7efbe2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkHalf.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkHalf.h"
+
+uint16_t halfMantissa(SkHalf h) {
+ return h & 0x03ff;
+}
+
+uint16_t halfExponent(SkHalf h) {
+ return (h >> 10) & 0x001f;
+}
+
+uint16_t halfSign(SkHalf h) {
+ return h >> 15;
+}
+
+union FloatUIntUnion {
+ uint32_t fUInt; // this must come first for the initializations below to work
+ float fFloat;
+};
+
+// based on Fabien Giesen's float_to_half_fast3()
+// see https://gist.github.com/rygorous/2156668
+SkHalf SkFloatToHalf(float f) {
+ static const uint32_t f32infty = { 255 << 23 };
+ static const uint32_t f16infty = { 31 << 23 };
+ static const FloatUIntUnion magic = { 15 << 23 };
+ static const uint32_t sign_mask = 0x80000000u;
+ static const uint32_t round_mask = ~0xfffu;
+ SkHalf o = 0;
+
+ FloatUIntUnion floatUnion;
+ floatUnion.fFloat = f;
+
+ uint32_t sign = floatUnion.fUInt & sign_mask;
+ floatUnion.fUInt ^= sign;
+
+ // NOTE all the integer compares in this function can be safely
+ // compiled into signed compares since all operands are below
+ // 0x80000000. Important if you want fast straight SSE2 code
+ // (since there's no unsigned PCMPGTD).
+
+ // Inf or NaN (all exponent bits set)
+ if (floatUnion.fUInt >= f32infty)
+ // NaN->qNaN and Inf->Inf
+ o = (floatUnion.fUInt > f32infty) ? 0x7e00 : 0x7c00;
+ // (De)normalized number or zero
+ else {
+ floatUnion.fUInt &= round_mask;
+ floatUnion.fFloat *= magic.fFloat;
+ floatUnion.fUInt -= round_mask;
+ // Clamp to signed infinity if overflowed
+ if (floatUnion.fUInt > f16infty) {
+ floatUnion.fUInt = f16infty;
+ }
+
+ o = floatUnion.fUInt >> 13; // Take the bits!
+ }
+
+ o |= sign >> 16;
+ return o;
+}
+
+// based on Fabien Giesen's half_to_float_fast2()
+// see https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+float SkHalfToFloat(SkHalf h) {
+ static const FloatUIntUnion magic = { 126 << 23 };
+ FloatUIntUnion o;
+
+ if (halfExponent(h) == 0)
+ {
+ // Zero / Denormal
+ o.fUInt = magic.fUInt + halfMantissa(h);
+ o.fFloat -= magic.fFloat;
+ }
+ else
+ {
+ // Set mantissa
+ o.fUInt = halfMantissa(h) << 13;
+ // Set exponent
+ if (halfExponent(h) == 0x1f)
+ // Inf/NaN
+ o.fUInt |= (255 << 23);
+ else
+ o.fUInt |= ((127 - 15 + halfExponent(h)) << 23);
+ }
+
+ // Set sign
+ o.fUInt |= (halfSign(h) << 31);
+ return o.fFloat;
+}
diff --git a/gfx/skia/skia/src/core/SkICC.cpp b/gfx/skia/skia/src/core/SkICC.cpp
new file mode 100644
index 0000000000..78e4a90f06
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkICC.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkICC.h"
+#include "include/private/SkFixed.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkICCPriv.h"
+#include "src/core/SkMD5.h"
+#include "src/core/SkUtils.h"
+
+static constexpr char kDescriptionTagBodyPrefix[12] =
+ { 'G', 'o', 'o', 'g', 'l', 'e', '/', 'S', 'k', 'i', 'a' , '/'};
+
+static constexpr size_t kICCDescriptionTagSize = 44;
+
+static_assert(kICCDescriptionTagSize ==
+ sizeof(kDescriptionTagBodyPrefix) + 2 * sizeof(SkMD5::Digest), "");
+static constexpr size_t kDescriptionTagBodySize = kICCDescriptionTagSize * 2; // ascii->utf16be
+
+static_assert(SkIsAlign4(kDescriptionTagBodySize), "Description must be aligned to 4-bytes.");
+static constexpr uint32_t kDescriptionTagHeader[7] {
+ SkEndian_SwapBE32(kTAG_TextType), // Type signature
+ 0, // Reserved
+ SkEndian_SwapBE32(1), // Number of records
+ SkEndian_SwapBE32(12), // Record size (must be 12)
+ SkEndian_SwapBE32(SkSetFourByteTag('e', 'n', 'U', 'S')), // English USA
+ SkEndian_SwapBE32(kDescriptionTagBodySize), // Length of string
+ SkEndian_SwapBE32(28), // Offset of string
+};
+
+static constexpr uint32_t kWhitePointTag[5] {
+ SkEndian_SwapBE32(kXYZ_PCSSpace),
+ 0,
+ SkEndian_SwapBE32(0x0000f6d6), // X = 0.96420 (D50)
+ SkEndian_SwapBE32(0x00010000), // Y = 1.00000 (D50)
+ SkEndian_SwapBE32(0x0000d32d), // Z = 0.82491 (D50)
+};
+
+// Google Inc. 2016 (UTF-16)
+static constexpr uint8_t kCopyrightTagBody[] = {
+ 0x00, 0x47, 0x00, 0x6f,
+ 0x00, 0x6f, 0x00, 0x67,
+ 0x00, 0x6c, 0x00, 0x65,
+ 0x00, 0x20, 0x00, 0x49,
+ 0x00, 0x6e, 0x00, 0x63,
+ 0x00, 0x2e, 0x00, 0x20,
+ 0x00, 0x32, 0x00, 0x30,
+ 0x00, 0x31, 0x00, 0x36,
+};
+static_assert(SkIsAlign4(sizeof(kCopyrightTagBody)), "Copyright must be aligned to 4-bytes.");
+static constexpr uint32_t kCopyrightTagHeader[7] {
+ SkEndian_SwapBE32(kTAG_TextType), // Type signature
+ 0, // Reserved
+ SkEndian_SwapBE32(1), // Number of records
+ SkEndian_SwapBE32(12), // Record size (must be 12)
+ SkEndian_SwapBE32(SkSetFourByteTag('e', 'n', 'U', 'S')), // English USA
+ SkEndian_SwapBE32(sizeof(kCopyrightTagBody)), // Length of string
+ SkEndian_SwapBE32(28), // Offset of string
+};
+
+// We will write a profile with the minimum nine required tags.
+static constexpr uint32_t kICCNumEntries = 9;
+
+static constexpr uint32_t kTAG_desc = SkSetFourByteTag('d', 'e', 's', 'c');
+static constexpr uint32_t kTAG_desc_Bytes = sizeof(kDescriptionTagHeader) +
+ kDescriptionTagBodySize;
+static constexpr uint32_t kTAG_desc_Offset = kICCHeaderSize +
+ kICCNumEntries * kICCTagTableEntrySize;
+
+static constexpr uint32_t kTAG_XYZ_Bytes = 20;
+static constexpr uint32_t kTAG_rXYZ_Offset = kTAG_desc_Offset + kTAG_desc_Bytes;
+static constexpr uint32_t kTAG_gXYZ_Offset = kTAG_rXYZ_Offset + kTAG_XYZ_Bytes;
+static constexpr uint32_t kTAG_bXYZ_Offset = kTAG_gXYZ_Offset + kTAG_XYZ_Bytes;
+
+static constexpr uint32_t kTAG_TRC_Bytes = 40;
+static constexpr uint32_t kTAG_rTRC_Offset = kTAG_bXYZ_Offset + kTAG_XYZ_Bytes;
+static constexpr uint32_t kTAG_gTRC_Offset = kTAG_rTRC_Offset;
+static constexpr uint32_t kTAG_bTRC_Offset = kTAG_rTRC_Offset;
+
+static constexpr uint32_t kTAG_wtpt = SkSetFourByteTag('w', 't', 'p', 't');
+static constexpr uint32_t kTAG_wtpt_Offset = kTAG_bTRC_Offset + kTAG_TRC_Bytes;
+
+static constexpr uint32_t kTAG_cprt = SkSetFourByteTag('c', 'p', 'r', 't');
+static constexpr uint32_t kTAG_cprt_Bytes = sizeof(kCopyrightTagHeader) +
+ sizeof(kCopyrightTagBody);
+static constexpr uint32_t kTAG_cprt_Offset = kTAG_wtpt_Offset + kTAG_XYZ_Bytes;
+
+static constexpr uint32_t kICCProfileSize = kTAG_cprt_Offset + kTAG_cprt_Bytes;
+
+static constexpr uint32_t kICCHeader[kICCHeaderSize / 4] {
+ SkEndian_SwapBE32(kICCProfileSize), // Size of the profile
+ 0, // Preferred CMM type (ignored)
+ SkEndian_SwapBE32(0x02100000), // Version 2.1
+ SkEndian_SwapBE32(kDisplay_Profile), // Display device profile
+ SkEndian_SwapBE32(kRGB_ColorSpace), // RGB input color space
+ SkEndian_SwapBE32(kXYZ_PCSSpace), // XYZ profile connection space
+ 0, 0, 0, // Date and time (ignored)
+ SkEndian_SwapBE32(kACSP_Signature), // Profile signature
+ 0, // Platform target (ignored)
+ 0x00000000, // Flags: not embedded, can be used independently
+ 0, // Device manufacturer (ignored)
+ 0, // Device model (ignored)
+ 0, 0, // Device attributes (ignored)
+ SkEndian_SwapBE32(1), // Relative colorimetric rendering intent
+ SkEndian_SwapBE32(0x0000f6d6), // D50 standard illuminant (X)
+ SkEndian_SwapBE32(0x00010000), // D50 standard illuminant (Y)
+ SkEndian_SwapBE32(0x0000d32d), // D50 standard illuminant (Z)
+ 0, // Profile creator (ignored)
+ 0, 0, 0, 0, // Profile id checksum (ignored)
+ 0, 0, 0, 0, 0, 0, 0, // Reserved (ignored)
+ SkEndian_SwapBE32(kICCNumEntries), // Number of tags
+};
+
+static constexpr uint32_t kICCTagTable[3 * kICCNumEntries] {
+ // Profile description
+ SkEndian_SwapBE32(kTAG_desc),
+ SkEndian_SwapBE32(kTAG_desc_Offset),
+ SkEndian_SwapBE32(kTAG_desc_Bytes),
+
+ // rXYZ
+ SkEndian_SwapBE32(kTAG_rXYZ),
+ SkEndian_SwapBE32(kTAG_rXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // gXYZ
+ SkEndian_SwapBE32(kTAG_gXYZ),
+ SkEndian_SwapBE32(kTAG_gXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // bXYZ
+ SkEndian_SwapBE32(kTAG_bXYZ),
+ SkEndian_SwapBE32(kTAG_bXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // rTRC
+ SkEndian_SwapBE32(kTAG_rTRC),
+ SkEndian_SwapBE32(kTAG_rTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // gTRC
+ SkEndian_SwapBE32(kTAG_gTRC),
+ SkEndian_SwapBE32(kTAG_gTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // bTRC
+ SkEndian_SwapBE32(kTAG_bTRC),
+ SkEndian_SwapBE32(kTAG_bTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // White point
+ SkEndian_SwapBE32(kTAG_wtpt),
+ SkEndian_SwapBE32(kTAG_wtpt_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // Copyright
+ SkEndian_SwapBE32(kTAG_cprt),
+ SkEndian_SwapBE32(kTAG_cprt_Offset),
+ SkEndian_SwapBE32(kTAG_cprt_Bytes),
+};
+
+// This is like SkFloatToFixed, but rounds to nearest, preserving as much accuracy as possible
+// when going float -> fixed -> float (it has the same accuracy when going fixed -> float -> fixed).
+// The use of double is necessary to accomodate the full potential 32-bit mantissa of the 16.16
+// SkFixed value, and so avoiding rounding problems with float. Also, see the comment in SkFixed.h.
+static SkFixed float_round_to_fixed(float x) {
+ return sk_float_saturate2int((float)floor((double)x * SK_Fixed1 + 0.5));
+}
+
+static void write_xyz_tag(uint32_t* ptr, const skcms_Matrix3x3& toXYZD50, int col) {
+ ptr[0] = SkEndian_SwapBE32(kXYZ_PCSSpace);
+ ptr[1] = 0;
+ ptr[2] = SkEndian_SwapBE32(float_round_to_fixed(toXYZD50.vals[0][col]));
+ ptr[3] = SkEndian_SwapBE32(float_round_to_fixed(toXYZD50.vals[1][col]));
+ ptr[4] = SkEndian_SwapBE32(float_round_to_fixed(toXYZD50.vals[2][col]));
+}
+
+static void write_trc_tag(uint32_t* ptr, const skcms_TransferFunction& fn) {
+ ptr[0] = SkEndian_SwapBE32(kTAG_ParaCurveType);
+ ptr[1] = 0;
+ ptr[2] = (uint32_t) (SkEndian_SwapBE16(kGABCDEF_ParaCurveType));
+ ptr[3] = SkEndian_SwapBE32(float_round_to_fixed(fn.g));
+ ptr[4] = SkEndian_SwapBE32(float_round_to_fixed(fn.a));
+ ptr[5] = SkEndian_SwapBE32(float_round_to_fixed(fn.b));
+ ptr[6] = SkEndian_SwapBE32(float_round_to_fixed(fn.c));
+ ptr[7] = SkEndian_SwapBE32(float_round_to_fixed(fn.d));
+ ptr[8] = SkEndian_SwapBE32(float_round_to_fixed(fn.e));
+ ptr[9] = SkEndian_SwapBE32(float_round_to_fixed(fn.f));
+}
+
+static bool nearly_equal(float x, float y) {
+ // A note on why I chose this tolerance: transfer_fn_almost_equal() uses a
+ // tolerance of 0.001f, which doesn't seem to be enough to distinguish
+ // between similar transfer functions, for example: gamma2.2 and sRGB.
+ //
+ // If the tolerance is 0.0f, then this we can't distinguish between two
+ // different encodings of what is clearly the same colorspace. Some
+ // experimentation with example files lead to this number:
+ static constexpr float kTolerance = 1.0f / (1 << 11);
+ return ::fabsf(x - y) <= kTolerance;
+}
+
+static bool nearly_equal(const skcms_TransferFunction& u,
+ const skcms_TransferFunction& v) {
+ return nearly_equal(u.g, v.g)
+ && nearly_equal(u.a, v.a)
+ && nearly_equal(u.b, v.b)
+ && nearly_equal(u.c, v.c)
+ && nearly_equal(u.d, v.d)
+ && nearly_equal(u.e, v.e)
+ && nearly_equal(u.f, v.f);
+}
+
+static bool nearly_equal(const skcms_Matrix3x3& u, const skcms_Matrix3x3& v) {
+ for (int r = 0; r < 3; r++) {
+ for (int c = 0; c < 3; c++) {
+ if (!nearly_equal(u.vals[r][c], v.vals[r][c])) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Return nullptr if the color profile doen't have a special name.
+const char* get_color_profile_description(const skcms_TransferFunction& fn,
+ const skcms_Matrix3x3& toXYZD50) {
+ bool srgb_xfer = nearly_equal(fn, SkNamedTransferFn::kSRGB);
+ bool srgb_gamut = nearly_equal(toXYZD50, SkNamedGamut::kSRGB);
+ if (srgb_xfer && srgb_gamut) {
+ return "sRGB";
+ }
+ bool line_xfer = nearly_equal(fn, SkNamedTransferFn::kLinear);
+ if (line_xfer && srgb_gamut) {
+ return "Linear Transfer with sRGB Gamut";
+ }
+ bool twoDotTwo = nearly_equal(fn, SkNamedTransferFn::k2Dot2);
+ if (twoDotTwo && srgb_gamut) {
+ return "2.2 Transfer with sRGB Gamut";
+ }
+ if (twoDotTwo && nearly_equal(toXYZD50, SkNamedGamut::kAdobeRGB)) {
+ return "AdobeRGB";
+ }
+ bool dcip3_gamut = nearly_equal(toXYZD50, SkNamedGamut::kDCIP3);
+ if (srgb_xfer || line_xfer) {
+ if (srgb_xfer && dcip3_gamut) {
+ return "sRGB Transfer with DCI-P3 Gamut";
+ }
+ if (line_xfer && dcip3_gamut) {
+ return "Linear Transfer with DCI-P3 Gamut";
+ }
+ bool rec2020 = nearly_equal(toXYZD50, SkNamedGamut::kRec2020);
+ if (srgb_xfer && rec2020) {
+ return "sRGB Transfer with Rec-BT-2020 Gamut";
+ }
+ if (line_xfer && rec2020) {
+ return "Linear Transfer with Rec-BT-2020 Gamut";
+ }
+ }
+ return nullptr;
+}
+
+static void get_color_profile_tag(char dst[kICCDescriptionTagSize],
+ const skcms_TransferFunction& fn,
+ const skcms_Matrix3x3& toXYZD50) {
+ SkASSERT(dst);
+ if (const char* description = get_color_profile_description(fn, toXYZD50)) {
+ SkASSERT(strlen(description) < kICCDescriptionTagSize);
+ strncpy(dst, description, kICCDescriptionTagSize);
+ // "If the length of src is less than n, strncpy() writes additional
+ // null bytes to dest to ensure that a total of n bytes are written."
+ } else {
+ strncpy(dst, kDescriptionTagBodyPrefix, sizeof(kDescriptionTagBodyPrefix));
+ SkMD5 md5;
+ md5.write(&toXYZD50, sizeof(toXYZD50));
+ static_assert(sizeof(fn) == sizeof(float) * 7, "packed");
+ md5.write(&fn, sizeof(fn));
+ SkMD5::Digest digest = md5.finish();
+ char* ptr = dst + sizeof(kDescriptionTagBodyPrefix);
+ for (unsigned i = 0; i < sizeof(SkMD5::Digest); ++i) {
+ uint8_t byte = digest.data[i];
+ *ptr++ = SkHexadecimalDigits::gUpper[byte >> 4];
+ *ptr++ = SkHexadecimalDigits::gUpper[byte & 0xF];
+ }
+ SkASSERT(ptr == dst + kICCDescriptionTagSize);
+ }
+}
+
+sk_sp<SkData> SkWriteICCProfile(const skcms_TransferFunction& fn,
+ const skcms_Matrix3x3& toXYZD50) {
+ // We can't encode HDR transfer functions in ICC
+ if (classify_transfer_fn(fn) != sRGBish_TF) {
+ return nullptr;
+ }
+
+ SkAutoMalloc profile(kICCProfileSize);
+ uint8_t* ptr = (uint8_t*) profile.get();
+
+ // Write profile header
+ memcpy(ptr, kICCHeader, sizeof(kICCHeader));
+ ptr += sizeof(kICCHeader);
+
+ // Write tag table
+ memcpy(ptr, kICCTagTable, sizeof(kICCTagTable));
+ ptr += sizeof(kICCTagTable);
+
+ // Write profile description tag
+ memcpy(ptr, kDescriptionTagHeader, sizeof(kDescriptionTagHeader));
+ ptr += sizeof(kDescriptionTagHeader);
+ {
+ char colorProfileTag[kICCDescriptionTagSize];
+ get_color_profile_tag(colorProfileTag, fn, toXYZD50);
+
+ // ASCII --> big-endian UTF-16.
+ for (size_t i = 0; i < kICCDescriptionTagSize; i++) {
+ *ptr++ = 0;
+ *ptr++ = colorProfileTag[i];
+ }
+ }
+
+ // Write XYZ tags
+ write_xyz_tag((uint32_t*) ptr, toXYZD50, 0);
+ ptr += kTAG_XYZ_Bytes;
+ write_xyz_tag((uint32_t*) ptr, toXYZD50, 1);
+ ptr += kTAG_XYZ_Bytes;
+ write_xyz_tag((uint32_t*) ptr, toXYZD50, 2);
+ ptr += kTAG_XYZ_Bytes;
+
+ // Write TRC tag
+ write_trc_tag((uint32_t*) ptr, fn);
+ ptr += kTAG_TRC_Bytes;
+
+ // Write white point tag (must be D50)
+ memcpy(ptr, kWhitePointTag, sizeof(kWhitePointTag));
+ ptr += sizeof(kWhitePointTag);
+
+ // Write copyright tag
+ memcpy(ptr, kCopyrightTagHeader, sizeof(kCopyrightTagHeader));
+ ptr += sizeof(kCopyrightTagHeader);
+ memcpy(ptr, kCopyrightTagBody, sizeof(kCopyrightTagBody));
+ ptr += sizeof(kCopyrightTagBody);
+
+ SkASSERT(kICCProfileSize == ptr - (uint8_t*) profile.get());
+ return SkData::MakeFromMalloc(profile.release(), kICCProfileSize);
+}
diff --git a/gfx/skia/skia/src/core/SkICCPriv.h b/gfx/skia/skia/src/core/SkICCPriv.h
new file mode 100644
index 0000000000..1ed2ca2dbc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkICCPriv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkICCPriv_DEFINED
+#define SkICCPriv_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkTypes.h"
+
+// This is equal to the header size according to the ICC specification (128)
+// plus the size of the tag count (4). We include the tag count since we
+// always require it to be present anyway.
+static constexpr size_t kICCHeaderSize = 132;
+
+// Contains a signature (4), offset (4), and size (4).
+static constexpr size_t kICCTagTableEntrySize = 12;
+
+static constexpr uint32_t kRGB_ColorSpace = SkSetFourByteTag('R', 'G', 'B', ' ');
+static constexpr uint32_t kCMYK_ColorSpace = SkSetFourByteTag('C', 'M', 'Y', 'K');
+static constexpr uint32_t kGray_ColorSpace = SkSetFourByteTag('G', 'R', 'A', 'Y');
+static constexpr uint32_t kDisplay_Profile = SkSetFourByteTag('m', 'n', 't', 'r');
+static constexpr uint32_t kInput_Profile = SkSetFourByteTag('s', 'c', 'n', 'r');
+static constexpr uint32_t kOutput_Profile = SkSetFourByteTag('p', 'r', 't', 'r');
+static constexpr uint32_t kColorSpace_Profile = SkSetFourByteTag('s', 'p', 'a', 'c');
+static constexpr uint32_t kXYZ_PCSSpace = SkSetFourByteTag('X', 'Y', 'Z', ' ');
+static constexpr uint32_t kLAB_PCSSpace = SkSetFourByteTag('L', 'a', 'b', ' ');
+static constexpr uint32_t kACSP_Signature = SkSetFourByteTag('a', 'c', 's', 'p');
+
+static constexpr uint32_t kTAG_rXYZ = SkSetFourByteTag('r', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_gXYZ = SkSetFourByteTag('g', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_bXYZ = SkSetFourByteTag('b', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_rTRC = SkSetFourByteTag('r', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_gTRC = SkSetFourByteTag('g', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_bTRC = SkSetFourByteTag('b', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_kTRC = SkSetFourByteTag('k', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_A2B0 = SkSetFourByteTag('A', '2', 'B', '0');
+
+static constexpr uint32_t kTAG_CurveType = SkSetFourByteTag('c', 'u', 'r', 'v');
+static constexpr uint32_t kTAG_ParaCurveType = SkSetFourByteTag('p', 'a', 'r', 'a');
+static constexpr uint32_t kTAG_TextType = SkSetFourByteTag('m', 'l', 'u', 'c');
+
+enum ParaCurveType {
+ kExponential_ParaCurveType = 0,
+ kGAB_ParaCurveType = 1,
+ kGABC_ParaCurveType = 2,
+ kGABDE_ParaCurveType = 3,
+ kGABCDEF_ParaCurveType = 4,
+};
+
+#endif // SkICCPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkIPoint16.h b/gfx/skia/skia/src/core/SkIPoint16.h
new file mode 100644
index 0000000000..c66465f720
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkIPoint16.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIPoint16_DEFINED
+#define SkIPoint16_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+
+/** \struct SkIPoint16
+ SkIPoint16 holds two 16 bit integer coordinates.
+ */
+struct SkIPoint16 {
+ int16_t fX; //!< x-axis value used by SkIPoint16
+
+ int16_t fY; //!< y-axis value used by SkIPoint16
+
+ /** Sets fX to x, fY to y. If SK_DEBUG is defined, asserts
+ if x or y does not fit in 16 bits.
+
+ @param x integer x-axis value of constructed SkIPoint
+ @param y integer y-axis value of constructed SkIPoint
+ @return SkIPoint16 (x, y)
+ */
+ static constexpr SkIPoint16 Make(int x, int y) {
+ return {SkToS16(x), SkToS16(y)};
+ }
+
+ /** Returns x-axis value of SkIPoint16.
+
+ @return fX
+ */
+ int16_t x() const { return fX; }
+
+ /** Returns y-axis value of SkIPoint.
+
+ @return fY
+ */
+ int16_t y() const { return fY; }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(int x, int y) {
+ fX = SkToS16(x);
+ fY = SkToS16(y);
+ }
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkImageFilter.cpp b/gfx/skia/skia/src/core/SkImageFilter.cpp
new file mode 100644
index 0000000000..107566459b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilter.cpp
@@ -0,0 +1,713 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRect.h"
+#include "include/effects/SkComposeImageFilter.h"
+#include "include/private/SkSafe32.h"
+#include "src/core/SkFuzzLogging.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkLocalMatrixImageFilter.h"
+#include "src/core/SkMatrixImageFilter.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#endif
+#include <atomic>
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// SkImageFilter - A number of the public APIs on SkImageFilter downcast to SkImageFilter_Base
+// in order to perform their actual work.
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Returns the number of inputs this filter will accept (some inputs can
+ * be NULL).
+ */
+int SkImageFilter::countInputs() const { return as_IFB(this)->fInputs.count(); }
+
+/**
+ * Returns the input filter at a given index, or NULL if no input is
+ * connected. The indices used are filter-specific.
+ */
+const SkImageFilter* SkImageFilter::getInput(int i) const {
+ SkASSERT(i < this->countInputs());
+ return as_IFB(this)->fInputs[i].get();
+}
+
+bool SkImageFilter::isColorFilterNode(SkColorFilter** filterPtr) const {
+ return as_IFB(this)->onIsColorFilterNode(filterPtr);
+}
+
+SkIRect SkImageFilter::filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction, const SkIRect* inputRect) const {
+ // The old filterBounds() function uses SkIRects that are defined in layer space so, while
+ // we still are supporting it, bypass SkIF_B's new public filter bounds functions and go right
+ // to the internal layer-space calculations.
+ skif::Mapping mapping(SkMatrix::I(), ctm);
+ if (kReverse_MapDirection == direction) {
+ skif::LayerSpace<SkIRect> targetOutput(src);
+ skif::LayerSpace<SkIRect> content(inputRect ? *inputRect : src);
+ return SkIRect(as_IFB(this)->onGetInputLayerBounds(mapping, targetOutput, content));
+ } else {
+ SkASSERT(!inputRect);
+ skif::LayerSpace<SkIRect> content(src);
+ skif::LayerSpace<SkIRect> output = as_IFB(this)->onGetOutputLayerBounds(mapping, content);
+ // Manually apply the crop rect for now, until cropping is performed by a dedicated SkIF.
+ SkIRect dst;
+ as_IFB(this)->getCropRect().applyTo(
+ SkIRect(output), ctm, as_IFB(this)->affectsTransparentBlack(), &dst);
+ return dst;
+ }
+}
+
+SkRect SkImageFilter::computeFastBounds(const SkRect& src) const {
+ if (0 == this->countInputs()) {
+ return src;
+ }
+ SkRect combinedBounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ for (int i = 1; i < this->countInputs(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ if (input) {
+ combinedBounds.join(input->computeFastBounds(src));
+ } else {
+ combinedBounds.join(src);
+ }
+ }
+ return combinedBounds;
+}
+
+bool SkImageFilter::canComputeFastBounds() const {
+ if (as_IFB(this)->affectsTransparentBlack()) {
+ return false;
+ }
+ for (int i = 0; i < this->countInputs(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ if (input && !input->canComputeFastBounds()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkImageFilter::asAColorFilter(SkColorFilter** filterPtr) const {
+ SkASSERT(nullptr != filterPtr);
+ if (!this->isColorFilterNode(filterPtr)) {
+ return false;
+ }
+ if (nullptr != this->getInput(0) || (*filterPtr)->affectsTransparentBlack()) {
+ (*filterPtr)->unref();
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkImageFilter> SkImageFilter::MakeMatrixFilter(const SkMatrix& matrix,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input) {
+ return SkMatrixImageFilter::Make(matrix, filterQuality, std::move(input));
+}
+
+sk_sp<SkImageFilter> SkImageFilter::makeWithLocalMatrix(const SkMatrix& matrix) const {
+ return SkLocalMatrixImageFilter::Make(matrix, this->refMe());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// SkImageFilter_Base
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SK_USE_FLUENT_IMAGE_FILTER_TYPES
+
+static int32_t next_image_filter_unique_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID++;
+ } while (id == 0);
+ return id;
+}
+
+SkImageFilter_Base::SkImageFilter_Base(sk_sp<SkImageFilter> const* inputs,
+ int inputCount, const CropRect* cropRect)
+ : fUsesSrcInput(false)
+ , fUniqueID(next_image_filter_unique_id()) {
+ fCropRect = cropRect ? *cropRect : CropRect(SkRect(), 0x0);
+
+ fInputs.reset(inputCount);
+
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i] || as_IFB(inputs[i])->fUsesSrcInput) {
+ fUsesSrcInput = true;
+ }
+ fInputs[i] = inputs[i];
+ }
+}
+
+SkImageFilter_Base::~SkImageFilter_Base() {
+ SkImageFilterCache::Get()->purgeByImageFilter(this);
+}
+
+bool SkImageFilter_Base::Common::unflatten(SkReadBuffer& buffer, int expectedCount) {
+ const int count = buffer.readInt();
+ if (!buffer.validate(count >= 0)) {
+ return false;
+ }
+ if (!buffer.validate(expectedCount < 0 || count == expectedCount)) {
+ return false;
+ }
+
+ SkASSERT(fInputs.empty());
+ for (int i = 0; i < count; i++) {
+ fInputs.push_back(buffer.readBool() ? buffer.readImageFilter() : nullptr);
+ if (!buffer.isValid()) {
+ return false;
+ }
+ }
+ SkRect rect;
+ buffer.readRect(&rect);
+ if (!buffer.isValid() || !buffer.validate(SkIsValidRect(rect))) {
+ return false;
+ }
+
+ uint32_t flags = buffer.readUInt();
+ fCropRect = CropRect(rect, flags);
+ return buffer.isValid();
+}
+
+void SkImageFilter_Base::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fInputs.count());
+ for (int i = 0; i < fInputs.count(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ buffer.writeBool(input != nullptr);
+ if (input != nullptr) {
+ buffer.writeFlattenable(input);
+ }
+ }
+ buffer.writeRect(fCropRect.rect());
+ buffer.writeUInt(fCropRect.flags());
+}
+
+skif::FilterResult<For::kOutput> SkImageFilter_Base::filterImage(const skif::Context& context) const {
+ // TODO (michaelludwig) - Old filters have an implicit assumption that the source image
+ // (originally passed separately) has an origin of (0, 0). SkComposeImageFilter makes an effort
+ // to ensure that remains the case. Once everyone uses the new type systems for bounds, non
+ // (0, 0) source origins will be easy to support.
+ SkASSERT(context.source().layerOrigin().x() == 0 && context.source().layerOrigin().y() == 0);
+
+ skif::FilterResult<For::kOutput> result;
+ if (!context.isValid()) {
+ return result;
+ }
+
+ uint32_t srcGenID = fUsesSrcInput ? context.sourceImage()->uniqueID() : 0;
+ const SkIRect srcSubset = fUsesSrcInput ? context.sourceImage()->subset()
+ : SkIRect::MakeWH(0, 0);
+
+ SkImageFilterCacheKey key(fUniqueID, context.mapping().layerMatrix(), context.clipBounds(),
+ srcGenID, srcSubset);
+ if (context.cache() && context.cache()->get(key, &result)) {
+ return result;
+ }
+
+ result = this->onFilterImage(context);
+
+#if SK_SUPPORT_GPU
+ if (context.gpuBacked() && result.image() && !result.image()->isTextureBacked()) {
+ // Keep the result on the GPU - this is still required for some
+ // image filters that don't support GPU in all cases
+ auto asTexture = result.image()->makeTextureImage(context.getContext());
+ result = skif::FilterResult<For::kOutput>(std::move(asTexture), result.layerOrigin());
+ }
+#endif
+
+ if (context.cache()) {
+ context.cache()->set(key, this, result);
+ }
+
+ return result;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::getInputBounds(
+ const skif::Mapping& mapping, const skif::DeviceSpace<SkRect>& desiredOutput,
+ const skif::ParameterSpace<SkRect>* knownContentBounds) const {
+ // Map both the device-space desired coverage area and the known content bounds to layer space
+ skif::LayerSpace<SkIRect> desiredBounds = mapping.deviceToLayer(desiredOutput).roundOut();
+ // If we have no known content bounds use the desired coverage area, because that is the most
+ // conservative possibility.
+ skif::LayerSpace<SkIRect> contentBounds =
+ knownContentBounds ? mapping.paramToLayer(*knownContentBounds).roundOut()
+ : desiredBounds;
+
+ // Process the layer-space desired output with the filter DAG to determine required input
+ skif::LayerSpace<SkIRect> requiredInput = this->onGetInputLayerBounds(
+ mapping, desiredBounds, contentBounds);
+ // If we know what's actually going to be drawn into the layer, and we don't change transparent
+ // black, then we can further restrict the layer to what the known content is
+ if (knownContentBounds && !this->affectsTransparentBlack()) {
+ if (!requiredInput.intersect(contentBounds)) {
+ // Nothing would be output by the filter, so return empty rect
+ return skif::LayerSpace<SkIRect>(SkIRect::MakeEmpty());
+ }
+ }
+ return requiredInput;
+}
+
+skif::DeviceSpace<SkIRect> SkImageFilter_Base::getOutputBounds(
+ const skif::Mapping& mapping, const skif::ParameterSpace<SkRect>& contentBounds) const {
+ // Map the input content into the layer space where filtering will occur
+ skif::LayerSpace<SkRect> layerContent = mapping.paramToLayer(contentBounds);
+ // Determine the filter DAGs output bounds in layer space
+ skif::LayerSpace<SkIRect> filterOutput = this->onGetOutputLayerBounds(
+ mapping, layerContent.roundOut());
+ // FIXME (michaelludwig) - To be removed once cropping is isolated, but remain consistent with
+ // old filterBounds(kForward) behavior.
+ SkIRect dst;
+ as_IFB(this)->getCropRect().applyTo(
+ SkIRect(filterOutput), mapping.layerMatrix(),
+ as_IFB(this)->affectsTransparentBlack(), &dst);
+
+ // Map all the way to device space
+ return mapping.layerToDevice(skif::LayerSpace<SkIRect>(dst));
+}
+
+// TODO (michaelludwig) - Default to using the old onFilterImage, as filters are updated one by one.
+// Once the old function is gone, this onFilterImage() will be made a pure virtual.
+skif::FilterResult<For::kOutput> SkImageFilter_Base::onFilterImage(const skif::Context& context) const {
+ SkIPoint origin;
+ auto image = this->onFilterImage(context, &origin);
+ return skif::FilterResult<For::kOutput>(std::move(image), skif::LayerSpace<SkIPoint>(origin));
+}
+
+bool SkImageFilter_Base::canHandleComplexCTM() const {
+ // CropRects need to apply in the source coordinate system, but are not aware of complex CTMs
+ // when performing clipping. For a simple fix, any filter with a crop rect set cannot support
+ // complex CTMs until that's updated.
+ if (this->cropRectIsSet() || !this->onCanHandleComplexCTM()) {
+ return false;
+ }
+ const int count = this->countInputs();
+ for (int i = 0; i < count; ++i) {
+ const SkImageFilter_Base* input = as_IFB(this->getInput(i));
+ if (input && !input->canHandleComplexCTM()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkImageFilter::CropRect::applyTo(const SkIRect& imageBounds, const SkMatrix& ctm,
+ bool embiggen, SkIRect* cropped) const {
+ *cropped = imageBounds;
+ if (fFlags) {
+ SkRect devCropR;
+ ctm.mapRect(&devCropR, fRect);
+ SkIRect devICropR = devCropR.roundOut();
+
+ // Compute the left/top first, in case we need to modify the right/bottom for a missing edge
+ if (fFlags & kHasLeft_CropEdge) {
+ if (embiggen || devICropR.fLeft > cropped->fLeft) {
+ cropped->fLeft = devICropR.fLeft;
+ }
+ } else {
+ devICropR.fRight = Sk32_sat_add(cropped->fLeft, devICropR.width());
+ }
+ if (fFlags & kHasTop_CropEdge) {
+ if (embiggen || devICropR.fTop > cropped->fTop) {
+ cropped->fTop = devICropR.fTop;
+ }
+ } else {
+ devICropR.fBottom = Sk32_sat_add(cropped->fTop, devICropR.height());
+ }
+ if (fFlags & kHasWidth_CropEdge) {
+ if (embiggen || devICropR.fRight < cropped->fRight) {
+ cropped->fRight = devICropR.fRight;
+ }
+ }
+ if (fFlags & kHasHeight_CropEdge) {
+ if (embiggen || devICropR.fBottom < cropped->fBottom) {
+ cropped->fBottom = devICropR.fBottom;
+ }
+ }
+ }
+}
+
+bool SkImageFilter_Base::applyCropRect(const Context& ctx, const SkIRect& srcBounds,
+ SkIRect* dstBounds) const {
+ SkIRect tmpDst = this->onFilterNodeBounds(srcBounds, ctx.ctm(), kForward_MapDirection, nullptr);
+ fCropRect.applyTo(tmpDst, ctx.ctm(), this->affectsTransparentBlack(), dstBounds);
+ // Intersect against the clip bounds, in case the crop rect has
+ // grown the bounds beyond the original clip. This can happen for
+ // example in tiling, where the clip is much smaller than the filtered
+ // primitive. If we didn't do this, we would be processing the filter
+ // at the full crop rect size in every tile.
+ return dstBounds->intersect(ctx.clipBounds());
+}
+
+// Return a larger (newWidth x newHeight) copy of 'src' with black padding
+// around it.
+static sk_sp<SkSpecialImage> pad_image(SkSpecialImage* src, const SkImageFilter_Base::Context& ctx,
+ int newWidth, int newHeight, int offX, int offY) {
+ // We would like to operate in the source's color space (so that we return an "identical"
+ // image, other than the padding. To achieve that, we'd create a new context using
+ // src->getColorSpace() to replace ctx.colorSpace().
+
+ // That fails in at least two ways. For formats that are texturable but not renderable (like
+ // F16 on some ES implementations), we can't create a surface to do the work. For sRGB, images
+ // may be tagged with an sRGB color space (which leads to an sRGB config in makeSurface). But
+ // the actual config of that sRGB image on a device with no sRGB support is non-sRGB.
+ //
+ // Rather than try to special case these situations, we execute the image padding in the
+ // destination color space. This should not affect the output of the DAG in (almost) any case,
+ // because the result of this call is going to be used as an input, where it would have been
+ // switched to the destination space anyway. The one exception would be a filter that expected
+ // to consume unclamped F16 data, but the padded version of the image is pre-clamped to 8888.
+ // We can revisit this logic if that ever becomes an actual problem.
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(SkISize::Make(newWidth, newHeight)));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ src->draw(canvas, offX, offY, nullptr);
+
+ return surf->makeImageSnapshot();
+}
+
+sk_sp<SkSpecialImage> SkImageFilter_Base::applyCropRectAndPad(const Context& ctx,
+ SkSpecialImage* src,
+ SkIPoint* srcOffset,
+ SkIRect* bounds) const {
+ const SkIRect srcBounds = SkIRect::MakeXYWH(srcOffset->x(), srcOffset->y(),
+ src->width(), src->height());
+
+ if (!this->applyCropRect(ctx, srcBounds, bounds)) {
+ return nullptr;
+ }
+
+ if (srcBounds.contains(*bounds)) {
+ return sk_sp<SkSpecialImage>(SkRef(src));
+ } else {
+ sk_sp<SkSpecialImage> img(pad_image(src, ctx, bounds->width(), bounds->height(),
+ Sk32_sat_sub(srcOffset->x(), bounds->x()),
+ Sk32_sat_sub(srcOffset->y(), bounds->y())));
+ *srcOffset = SkIPoint::Make(bounds->x(), bounds->y());
+ return img;
+ }
+}
+
+// NOTE: The new onGetOutputLayerBounds() and onGetInputLayerBounds() default to calling into the
+// deprecated onFilterBounds and onFilterNodeBounds. While these functions are not tagged, they do
+// match the documented default behavior for the new bounds functions.
+SkIRect SkImageFilter_Base::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ if (this->countInputs() < 1) {
+ return src;
+ }
+
+ SkIRect totalBounds;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ SkIRect rect = filter ? filter->filterBounds(src, ctm, dir, inputRect) : src;
+ if (0 == i) {
+ totalBounds = rect;
+ } else {
+ totalBounds.join(rect);
+ }
+ }
+
+ return totalBounds;
+}
+
+SkIRect SkImageFilter_Base::onFilterNodeBounds(const SkIRect& src, const SkMatrix&,
+ MapDirection, const SkIRect*) const {
+ return src;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::visitInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds) const {
+ if (this->countInputs() < 1) {
+ // TODO (michaelludwig) - if a filter doesn't have any inputs, it doesn't need any
+ // implicit source image, so arguably we could return an empty rect here. 'desiredOutput' is
+ // consistent with original behavior, so empty bounds may have unintended side effects
+ // but should be explored later.
+ return desiredOutput;
+ }
+
+ skif::LayerSpace<SkIRect> netInput;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ // The required input for this input filter, or 'targetOutput' if the filter is null and
+ // the source image is used (so must be sized to cover 'targetOutput').
+ skif::LayerSpace<SkIRect> requiredInput =
+ filter ? as_IFB(filter)->onGetInputLayerBounds(mapping, desiredOutput,
+ contentBounds)
+ : desiredOutput;
+ // Accumulate with all other filters
+ if (i == 0) {
+ netInput = requiredInput;
+ } else {
+ netInput.join(requiredInput);
+ }
+ }
+ return netInput;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::visitOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const {
+ if (this->countInputs() < 1) {
+ // TODO (michaelludwig) - if a filter doesn't have any inputs, it presumably is determining
+ // its output size from something other than the implicit source contentBounds, in which
+ // case it shouldn't be calling this helper function, so explore adding an unreachable test
+ return contentBounds;
+ }
+
+ skif::LayerSpace<SkIRect> netOutput;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ // The output for just this input filter, or 'contentBounds' if the filter is null and
+ // the source image is used (i.e. the identity filter applied to the source).
+ skif::LayerSpace<SkIRect> output =
+ filter ? as_IFB(filter)->onGetOutputLayerBounds(mapping, contentBounds)
+ : contentBounds;
+ // Accumulate with all other filters
+ if (i == 0) {
+ netOutput = output;
+ } else {
+ netOutput.join(output);
+ }
+ }
+ return netOutput;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::onGetInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds, VisitChildren recurse) const {
+ // Call old functions for now since they may have been overridden by a subclass that's not been
+ // updated yet; normally this would just default to visitInputLayerBounds()
+ SkIRect content = SkIRect(contentBounds);
+ SkIRect input = this->onFilterNodeBounds(SkIRect(desiredOutput), mapping.layerMatrix(),
+ kReverse_MapDirection, &content);
+ if (recurse == VisitChildren::kYes) {
+ SkIRect aggregate = this->onFilterBounds(input, mapping.layerMatrix(),
+ kReverse_MapDirection, &input);
+ return skif::LayerSpace<SkIRect>(aggregate);
+ } else {
+ return skif::LayerSpace<SkIRect>(input);
+ }
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::onGetOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const {
+ // Call old functions for now; normally this would default to visitOutputLayerBounds()
+ SkIRect aggregate = this->onFilterBounds(SkIRect(contentBounds), mapping.layerMatrix(),
+ kForward_MapDirection, nullptr);
+ SkIRect output = this->onFilterNodeBounds(aggregate, mapping.layerMatrix(),
+ kForward_MapDirection, nullptr);
+ return skif::LayerSpace<SkIRect>(output);
+}
+
+template<skif::Usage kU>
+skif::FilterResult<kU> SkImageFilter_Base::filterInput(int index, const skif::Context& ctx) const {
+ // Sanity checks for the index-specific input usages
+ SkASSERT(kU != skif::Usage::kInput0 || index == 0);
+ SkASSERT(kU != skif::Usage::kInput1 || index == 1);
+
+ const SkImageFilter* input = this->getInput(index);
+ if (!input) {
+ // Convert from the generic kInput of the source image to kU
+ return static_cast<skif::FilterResult<kU>>(ctx.source());
+ }
+
+ skif::FilterResult<For::kOutput> result = as_IFB(input)->filterImage(this->mapContext(ctx));
+ SkASSERT(!result.image() || ctx.gpuBacked() == result.image()->isTextureBacked());
+
+ // Map the output result of the input image filter to the input usage requested for this filter
+ return static_cast<skif::FilterResult<kU>>(std::move(result));
+}
+// Instantiate filterInput() for kInput, kInput0, and kInput1. This does not provide a definition
+// for kOutput, which should never be used anyways, and this way the linker will fail for us then.
+template skif::FilterResult<For::kInput> SkImageFilter_Base::filterInput(int, const skif::Context&) const;
+template skif::FilterResult<For::kInput0> SkImageFilter_Base::filterInput(int, const skif::Context&) const;
+template skif::FilterResult<For::kInput1> SkImageFilter_Base::filterInput(int, const skif::Context&) const;
+
+SkImageFilter_Base::Context SkImageFilter_Base::mapContext(const Context& ctx) const {
+ // We don't recurse through the child input filters because that happens automatically
+ // as part of the filterImage() evaluation. In this case, we want the bounds for the
+ // edge from this node to its children, without the effects of the child filters.
+ skif::LayerSpace<SkIRect> childOutput = this->onGetInputLayerBounds(
+ ctx.mapping(), ctx.desiredOutput(), ctx.desiredOutput(), VisitChildren::kNo);
+ return ctx.withNewDesiredOutput(childOutput);
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<SkSpecialImage> SkImageFilter_Base::DrawWithFP(GrRecordingContext* context,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ GrProtected isProtected) {
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ auto renderTargetContext =
+ context->priv().makeDeferredRenderTargetContext(SkBackingFit::kApprox,
+ bounds.width(),
+ bounds.height(),
+ SkColorTypeToGrColorType(colorType),
+ sk_ref_sp(colorSpace),
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ isProtected);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ SkIRect dstIRect = SkIRect::MakeWH(bounds.width(), bounds.height());
+ SkRect srcRect = SkRect::Make(bounds);
+ SkRect dstRect = SkRect::MakeWH(srcRect.width(), srcRect.height());
+ GrFixedClip clip(dstIRect);
+ renderTargetContext->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), dstRect,
+ srcRect);
+
+ return SkSpecialImage::MakeDeferredFromGpu(
+ context, dstIRect, kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(), renderTargetContext->colorInfo().colorType(),
+ renderTargetContext->colorInfo().refColorSpace());
+}
+
+sk_sp<SkSpecialImage> SkImageFilter_Base::ImageToColorSpace(SkSpecialImage* src,
+ SkColorType colorType,
+ SkColorSpace* colorSpace) {
+ // There are several conditions that determine if we actually need to convert the source to the
+ // destination's color space. Rather than duplicate that logic here, just try to make an xform
+ // object. If that produces something, then both are tagged, and the source is in a different
+ // gamut than the dest. There is some overhead to making the xform, but those are cached, and
+ // if we get one back, that means we're about to use it during the conversion anyway.
+ auto colorSpaceXform = GrColorSpaceXform::Make(src->getColorSpace(), src->alphaType(),
+ colorSpace, kPremul_SkAlphaType);
+
+ if (!colorSpaceXform) {
+ // No xform needed, just return the original image
+ return sk_ref_sp(src);
+ }
+
+ sk_sp<SkSpecialSurface> surf(src->makeSurface(colorType, colorSpace,
+ SkISize::Make(src->width(), src->height())));
+ if (!surf) {
+ return sk_ref_sp(src);
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+ SkPaint p;
+ p.setBlendMode(SkBlendMode::kSrc);
+ src->draw(canvas, 0, 0, &p);
+ return surf->makeImageSnapshot();
+}
+#endif
+
+// In repeat mode, when we are going to sample off one edge of the srcBounds we require the
+// opposite side be preserved.
+SkIRect SkImageFilter_Base::DetermineRepeatedSrcBound(const SkIRect& srcBounds,
+ const SkIVector& filterOffset,
+ const SkISize& filterSize,
+ const SkIRect& originalSrcBounds) {
+ SkIRect tmp = srcBounds;
+ tmp.adjust(-filterOffset.fX, -filterOffset.fY,
+ filterSize.fWidth - filterOffset.fX, filterSize.fHeight - filterOffset.fY);
+
+ if (tmp.fLeft < originalSrcBounds.fLeft || tmp.fRight > originalSrcBounds.fRight) {
+ tmp.fLeft = originalSrcBounds.fLeft;
+ tmp.fRight = originalSrcBounds.fRight;
+ }
+ if (tmp.fTop < originalSrcBounds.fTop || tmp.fBottom > originalSrcBounds.fBottom) {
+ tmp.fTop = originalSrcBounds.fTop;
+ tmp.fBottom = originalSrcBounds.fBottom;
+ }
+
+ return tmp;
+}
+
+void SkImageFilter_Base::PurgeCache() {
+ SkImageFilterCache::Get()->purge();
+}
+
+static sk_sp<SkImageFilter> apply_ctm_to_filter(sk_sp<SkImageFilter> input, const SkMatrix& ctm,
+ SkMatrix* remainder) {
+ if (ctm.isScaleTranslate() || as_IFB(input)->canHandleComplexCTM()) {
+ // The filter supports the CTM, so leave it as-is and 'remainder' stores the whole CTM
+ *remainder = ctm;
+ return input;
+ }
+
+ // We have a complex CTM and a filter that can't support them, so it needs to use the matrix
+ // transform filter that resamples the image contents. Decompose the simple portion of the ctm
+ // into 'remainder'
+ SkMatrix ctmToEmbed;
+ SkSize scale;
+ if (ctm.decomposeScale(&scale, &ctmToEmbed)) {
+ // decomposeScale splits ctm into scale * ctmToEmbed, so bake ctmToEmbed into DAG
+ // with a matrix filter and return scale as the remaining matrix for the real CTM.
+ remainder->setScale(scale.fWidth, scale.fHeight);
+
+ // ctmToEmbed is passed to SkMatrixImageFilter, which performs its transforms as if it were
+ // a pre-transformation before applying the image-filter context's CTM. In this case, we
+ // need ctmToEmbed to be a post-transformation (i.e. after the scale matrix since
+ // decomposeScale produces ctm = ctmToEmbed * scale). Giving scale^-1 * ctmToEmbed * scale
+ // to the matrix filter achieves this effect.
+ // TODO (michaelludwig) - When the original root node of a filter can be drawn directly to a
+ // device using ctmToEmbed, this abuse of SkMatrixImageFilter can go away.
+ ctmToEmbed.preScale(scale.fWidth, scale.fHeight);
+ ctmToEmbed.postScale(1.f / scale.fWidth, 1.f / scale.fHeight);
+ } else {
+ // Unable to decompose
+ // FIXME Ideally we'd embed the entire CTM as part of the matrix image filter, but
+ // the device <-> src bounds calculations for filters are very brittle under perspective,
+ // and can easily run into precision issues (wrong bounds that clip), or performance issues
+ // (producing large source-space images where 80% of the image is compressed into a few
+ // device pixels). A longer term solution for perspective-space image filtering is needed
+ // see skbug.com/9074
+ if (ctm.hasPerspective()) {
+ *remainder = ctm;
+ return input;
+ }
+
+ ctmToEmbed = ctm;
+ remainder->setIdentity();
+ }
+
+ return SkMatrixImageFilter::Make(ctmToEmbed, kLow_SkFilterQuality, input);
+}
+
+sk_sp<SkImageFilter> SkImageFilter_Base::applyCTM(const SkMatrix& ctm, SkMatrix* remainder) const {
+ return apply_ctm_to_filter(this->refMe(), ctm, remainder);
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.cpp b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
new file mode 100644
index 0000000000..c92338b457
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkImageFilterCache.h"
+
+#include <vector>
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkTInternalLList.h"
+
+#ifdef SK_BUILD_FOR_IOS
+ enum { kDefaultCacheSize = 2 * 1024 * 1024 };
+#else
+ enum { kDefaultCacheSize = 128 * 1024 * 1024 };
+#endif
+
+namespace {
+
+class CacheImpl : public SkImageFilterCache {
+public:
+ typedef SkImageFilterCacheKey Key;
+ CacheImpl(size_t maxBytes) : fMaxBytes(maxBytes), fCurrentBytes(0) { }
+ ~CacheImpl() override {
+ SkTDynamicHash<Value, Key>::Iter iter(&fLookup);
+
+ while (!iter.done()) {
+ Value* v = &*iter;
+ ++iter;
+ delete v;
+ }
+ }
+ struct Value {
+ Value(const Key& key, const skif::FilterResult<For::kOutput>& image,
+ const SkImageFilter* filter)
+ : fKey(key), fImage(image), fFilter(filter) {}
+
+ Key fKey;
+ skif::FilterResult<For::kOutput> fImage;
+ const SkImageFilter* fFilter;
+ static const Key& GetKey(const Value& v) {
+ return v.fKey;
+ }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Value);
+ };
+
+ bool get(const Key& key, skif::FilterResult<For::kOutput>* result) const override {
+ SkASSERT(result);
+
+ SkAutoMutexExclusive mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ if (v != fLRU.head()) {
+ fLRU.remove(v);
+ fLRU.addToHead(v);
+ }
+
+ *result = v->fImage;
+ return true;
+ }
+ return false;
+ }
+
+ void set(const Key& key, const SkImageFilter* filter,
+ const skif::FilterResult<For::kOutput>& result) override {
+ SkAutoMutexExclusive mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ this->removeInternal(v);
+ }
+ Value* v = new Value(key, result, filter);
+ fLookup.add(v);
+ fLRU.addToHead(v);
+ fCurrentBytes += result.image() ? result.image()->getSize() : 0;
+ if (auto* values = fImageFilterValues.find(filter)) {
+ values->push_back(v);
+ } else {
+ fImageFilterValues.set(filter, {v});
+ }
+
+ while (fCurrentBytes > fMaxBytes) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ if (tail == v) {
+ break;
+ }
+ this->removeInternal(tail);
+ }
+ }
+
+ void purge() override {
+ SkAutoMutexExclusive mutex(fMutex);
+ while (fCurrentBytes > 0) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ this->removeInternal(tail);
+ }
+ }
+
+ void purgeByImageFilter(const SkImageFilter* filter) override {
+ SkAutoMutexExclusive mutex(fMutex);
+ auto* values = fImageFilterValues.find(filter);
+ if (!values) {
+ return;
+ }
+ for (Value* v : *values) {
+ // We set the filter to be null so that removeInternal() won't delete from values while
+ // we're iterating over it.
+ v->fFilter = nullptr;
+ this->removeInternal(v);
+ }
+ fImageFilterValues.remove(filter);
+ }
+
+ SkDEBUGCODE(int count() const override { return fLookup.count(); })
+private:
+ void removeInternal(Value* v) {
+ if (v->fFilter) {
+ if (auto* values = fImageFilterValues.find(v->fFilter)) {
+ if (values->size() == 1 && (*values)[0] == v) {
+ fImageFilterValues.remove(v->fFilter);
+ } else {
+ for (auto it = values->begin(); it != values->end(); ++it) {
+ if (*it == v) {
+ values->erase(it);
+ break;
+ }
+ }
+ }
+ }
+ }
+ fCurrentBytes -= v->fImage.image() ? v->fImage.image()->getSize() : 0;
+ fLRU.remove(v);
+ fLookup.remove(v->fKey);
+ delete v;
+ }
+private:
+ SkTDynamicHash<Value, Key> fLookup;
+ mutable SkTInternalLList<Value> fLRU;
+ // Value* always points to an item in fLookup.
+ SkTHashMap<const SkImageFilter*, std::vector<Value*>> fImageFilterValues;
+ size_t fMaxBytes;
+ size_t fCurrentBytes;
+ mutable SkMutex fMutex;
+};
+
+} // namespace
+
+SkImageFilterCache* SkImageFilterCache::Create(size_t maxBytes) {
+ return new CacheImpl(maxBytes);
+}
+
+SkImageFilterCache* SkImageFilterCache::Get() {
+ static SkOnce once;
+ static SkImageFilterCache* cache;
+
+ once([]{ cache = SkImageFilterCache::Create(kDefaultCacheSize); });
+ return cache;
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.h b/gfx/skia/skia/src/core/SkImageFilterCache.h
new file mode 100644
index 0000000000..b82f4daf29
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilterCache_DEFINED
+#define SkImageFilterCache_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkImageFilterTypes.h"
+
+struct SkIPoint;
+class SkImageFilter;
+
+struct SkImageFilterCacheKey {
+ SkImageFilterCacheKey(const uint32_t uniqueID, const SkMatrix& matrix,
+ const SkIRect& clipBounds, uint32_t srcGenID, const SkIRect& srcSubset)
+ : fUniqueID(uniqueID)
+ , fMatrix(matrix)
+ , fClipBounds(clipBounds)
+ , fSrcGenID(srcGenID)
+ , fSrcSubset(srcSubset) {
+ // Assert that Key is tightly-packed, since it is hashed.
+ static_assert(sizeof(SkImageFilterCacheKey) == sizeof(uint32_t) + sizeof(SkMatrix) +
+ sizeof(SkIRect) + sizeof(uint32_t) + 4 * sizeof(int32_t),
+ "image_filter_key_tight_packing");
+ fMatrix.getType(); // force initialization of type, so hashes match
+ SkASSERT(fMatrix.isFinite()); // otherwise we can't rely on == self when comparing keys
+ }
+
+ uint32_t fUniqueID;
+ SkMatrix fMatrix;
+ SkIRect fClipBounds;
+ uint32_t fSrcGenID;
+ SkIRect fSrcSubset;
+
+ bool operator==(const SkImageFilterCacheKey& other) const {
+ return fUniqueID == other.fUniqueID &&
+ fMatrix == other.fMatrix &&
+ fClipBounds == other.fClipBounds &&
+ fSrcGenID == other.fSrcGenID &&
+ fSrcSubset == other.fSrcSubset;
+ }
+};
+
+// This cache maps from (filter's unique ID + CTM + clipBounds + src bitmap generation ID) to result
+// NOTE: this is the _specific_ unique ID of the image filter, so refiltering the same image with a
+// copy of the image filter (with exactly the same parameters) will not yield a cache hit.
+class SkImageFilterCache : public SkRefCnt {
+public:
+ SK_USE_FLUENT_IMAGE_FILTER_TYPES_IN_CLASS
+
+ enum { kDefaultTransientSize = 32 * 1024 * 1024 };
+
+ virtual ~SkImageFilterCache() {}
+ static SkImageFilterCache* Create(size_t maxBytes);
+ static SkImageFilterCache* Get();
+
+ // Returns true on cache hit and updates 'result' to be the cached result. Returns false when
+ // not in the cache, in which case 'result' is not modified.
+ virtual bool get(const SkImageFilterCacheKey& key,
+ skif::FilterResult<For::kOutput>* result) const = 0;
+ // 'filter' is included in the caching to allow the purging of all of an image filter's cached
+ // results when it is destroyed.
+ virtual void set(const SkImageFilterCacheKey& key, const SkImageFilter* filter,
+ const skif::FilterResult<For::kOutput>& result) = 0;
+ virtual void purge() = 0;
+ virtual void purgeByImageFilter(const SkImageFilter*) = 0;
+ SkDEBUGCODE(virtual int count() const = 0;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageFilterTypes.cpp b/gfx/skia/skia/src/core/SkImageFilterTypes.cpp
new file mode 100644
index 0000000000..d372fbf0b6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterTypes.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+
+// Both [I]Vectors and Sk[I]Sizes are transformed as non-positioned values, i.e. go through
+// mapVectors() not mapPoints().
+static SkIVector map_as_vector(int32_t x, int32_t y, const SkMatrix& matrix) {
+ SkVector v = SkVector::Make(SkIntToScalar(x), SkIntToScalar(y));
+ matrix.mapVectors(&v, 1);
+ return SkIVector::Make(SkScalarRoundToInt(v.fX), SkScalarRoundToInt(v.fY));
+}
+
+static SkVector map_as_vector(SkScalar x, SkScalar y, const SkMatrix& matrix) {
+ SkVector v = SkVector::Make(x, y);
+ matrix.mapVectors(&v, 1);
+ return v;
+}
+
+namespace skif {
+
+Mapping Mapping::Make(const SkMatrix& ctm, const SkImageFilter* filter) {
+ SkMatrix remainder, layer;
+ SkSize scale;
+ if (ctm.isScaleTranslate() || as_IFB(filter)->canHandleComplexCTM()) {
+ // It doesn't matter what type of matrix ctm is, we can have layer space be equivalent to
+ // device space.
+ remainder = SkMatrix::I();
+ layer = ctm;
+ } else if (ctm.decomposeScale(&scale, &remainder)) {
+ // TODO (michaelludwig) - Should maybe strip out any fractional part of the translation in
+ // 'ctm' so that can be incorporated during regular drawing, instead of by resampling the
+ // filtered image.
+ layer = SkMatrix::MakeScale(scale.fWidth, scale.fHeight);
+ } else {
+ // Perspective
+ // TODO (michaelludwig) - Should investigate choosing a scale factor for the layer matrix
+ // that minimizes the aliasing in the final draw.
+ remainder = ctm;
+ layer = SkMatrix::I();
+ }
+ return Mapping(remainder, layer);
+}
+
+// Instantiate map specializations for the 6 geometric types used during filtering
+template<>
+SkIRect Mapping::map<SkIRect>(const SkIRect& geom, const SkMatrix& matrix) {
+ return matrix.mapRect(SkRect::Make(geom)).roundOut();
+}
+
+template<>
+SkRect Mapping::map<SkRect>(const SkRect& geom, const SkMatrix& matrix) {
+ return matrix.mapRect(geom);
+}
+
+template<>
+SkIPoint Mapping::map<SkIPoint>(const SkIPoint& geom, const SkMatrix& matrix) {
+ SkPoint p = SkPoint::Make(SkIntToScalar(geom.fX), SkIntToScalar(geom.fY));
+ matrix.mapPoints(&p, 1);
+ return SkIPoint::Make(SkScalarRoundToInt(p.fX), SkScalarRoundToInt(p.fY));
+}
+
+template<>
+SkPoint Mapping::map<SkPoint>(const SkPoint& geom, const SkMatrix& matrix) {
+ SkPoint p;
+ matrix.mapPoints(&p, &geom, 1);
+ return p;
+}
+
+template<>
+IVector Mapping::map<IVector>(const IVector& geom, const SkMatrix& matrix) {
+ return IVector(map_as_vector(geom.fX, geom.fY, matrix));
+}
+
+template<>
+Vector Mapping::map<Vector>(const Vector& geom, const SkMatrix& matrix) {
+ return Vector(map_as_vector(geom.fX, geom.fY, matrix));
+}
+
+template<>
+SkISize Mapping::map<SkISize>(const SkISize& geom, const SkMatrix& matrix) {
+ SkIVector v = map_as_vector(geom.fWidth, geom.fHeight, matrix);
+ return SkISize::Make(v.fX, v.fY);
+}
+
+template<>
+SkSize Mapping::map<SkSize>(const SkSize& geom, const SkMatrix& matrix) {
+ SkVector v = map_as_vector(geom.fWidth, geom.fHeight, matrix);
+ return SkSize::Make(v.fX, v.fY);
+}
+
+} // end namespace skif
diff --git a/gfx/skia/skia/src/core/SkImageFilterTypes.h b/gfx/skia/skia/src/core/SkImageFilterTypes.h
new file mode 100644
index 0000000000..918ec7f54b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterTypes.h
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilterTypes_DEFINED
+#define SkImageFilterTypes_DEFINED
+
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+
+class GrRecordingContext;
+class SkImageFilter;
+class SkImageFilterCache;
+class SkSpecialSurface;
+class SkSurfaceProps;
+
+// The skif (SKI[mage]F[ilter]) namespace contains types that are used for filter implementations.
+// The defined types come in two groups: users of internal Skia types, and templates to help with
+// readability. Image filters cannot be implemented without access to key internal types, such as
+// SkSpecialImage. It is possible to avoid the use of the readability templates, although they are
+// strongly encouraged.
+namespace skif {
+
+// skif::IVector and skif::Vector represent plain-old-data types for storing direction vectors, so
+// that the coordinate-space templating system defined below can have a separate type id for
+// directions vs. points, and specialize appropriately. As such, all operations with direction
+// vectors are defined on the LayerSpace specialization, since that is the intended point of use.
+struct IVector {
+ int32_t fX;
+ int32_t fY;
+
+ IVector() = default;
+ IVector(int32_t x, int32_t y) : fX(x), fY(y) {}
+ explicit IVector(const SkIVector& v) : fX(v.fX), fY(v.fY) {}
+};
+
+struct Vector {
+ SkScalar fX;
+ SkScalar fY;
+
+ Vector() = default;
+ Vector(SkScalar x, SkScalar y) : fX(x), fY(y) {}
+ explicit Vector(const SkVector& v) : fX(v.fX), fY(v.fY) {}
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Coordinate Space Tagging
+// - In order to enforce correct coordinate spaces in image filter implementations and use,
+// geometry is wrapped by templated structs to declare in the type system what coordinate space
+// the coordinates are defined in.
+// - Currently there is ParameterSpace and DeviceSpace that are data-only wrappers around
+// coordinates, and the primary LayerSpace that provides all operative functionality for image
+// filters. It is intended that all logic about image bounds and access be conducted in the shared
+// layer space.
+// - The LayerSpace struct has type-safe specializations for SkIRect, SkRect, SkIPoint, SkPoint,
+// skif::IVector (to distinguish SkIVector from SkIPoint), skif::Vector, SkISize, and SkSize.
+// - A Mapping object provides type safe coordinate conversions between these spaces, and
+// automatically does the "right thing" for each geometric type.
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// ParameterSpace is a data-only wrapper around Skia's geometric types such as SkIPoint, and SkRect.
+// Parameter space is the same as the local coordinate space of an SkShader, or the coordinates
+// passed into SkCanvas::drawX calls, but "local" is avoided due to the alliteration with layer
+// space. SkImageFilters are defined in terms of ParameterSpace<T> geometry and must use the Mapping
+// on Context to transform the parameters into LayerSpace to evaluate the filter in the shared
+// coordinate space of the entire filter DAG.
+//
+// A value of ParameterSpace<SkIRect> implies that its wrapped SkIRect is defined in the local
+// parameter space.
+template<typename T>
+class ParameterSpace {
+public:
+ explicit ParameterSpace(const T& data) : fData(data) {}
+ explicit ParameterSpace(T&& data) : fData(std::move(data)) {}
+
+ explicit operator const T&() const { return fData; }
+
+private:
+ T fData;
+};
+
+// DeviceSpace is a data-only wrapper around Skia's geometric types. It is similar to
+// 'ParameterSpace' except that it is used to represent geometry that has been transformed or
+// defined in the root device space (i.e. the final pixels of drawn content). Much of what SkCanvas
+// tracks, such as its clip bounds are defined in this space and DeviceSpace provides a
+// type-enforced mechanism for the canvas to pass that information into the image filtering system,
+// using the Mapping of the filtering context.
+template<typename T>
+class DeviceSpace {
+public:
+ explicit DeviceSpace(const T& data) : fData(data) {}
+ explicit DeviceSpace(T&& data) : fData(std::move(data)) {}
+
+ explicit operator const T&() const { return fData; }
+
+private:
+ T fData;
+};
+
+// LayerSpace is a geometric wrapper that specifies the geometry is defined in the shared layer
+// space where image filters are evaluated. For a given Context (and its Mapping), the image filter
+// DAG operates in the same coordinate space. This space may be different from the local coordinate
+// space that defined the image filter parameters (such as blur sigma), and it may be different
+// from the total CTM of the SkCanvas.
+//
+// To encourage correct filter use and implementation, the bulk of filter logic should be performed
+// in layer space (e.g. determining what portion of an input image to read, or what the output
+// region is). LayerSpace specializations for the six common Skia math types (Sk[I]Rect, Sk[I]Point,
+// and Sk[I]Size), and skif::[I]Vector (to allow vectors to be specialized separately from points))
+// are provided that mimic their APIs but preserve the coordinate space and enforce type semantics.
+template<typename T>
+class LayerSpace {};
+
+// Layer-space specialization for integerized direction vectors.
+template<>
+class LayerSpace<IVector> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const IVector& geometry) : fData(geometry) {}
+ explicit LayerSpace(IVector&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const IVector&() const { return fData; }
+
+ explicit operator SkIVector() const { return SkIVector::Make(fData.fX, fData.fY); }
+
+ int32_t x() const { return fData.fX; }
+ int32_t y() const { return fData.fY; }
+
+ LayerSpace<IVector> operator-() const { return LayerSpace<IVector>({-fData.fX, -fData.fY}); }
+
+ LayerSpace<IVector> operator+(const LayerSpace<IVector>& v) const {
+ LayerSpace<IVector> sum = *this;
+ sum += v;
+ return sum;
+ }
+ LayerSpace<IVector> operator-(const LayerSpace<IVector>& v) const {
+ LayerSpace<IVector> diff = *this;
+ diff -= v;
+ return diff;
+ }
+
+ void operator+=(const LayerSpace<IVector>& v) {
+ fData.fX += v.fData.fX;
+ fData.fY += v.fData.fY;
+ }
+ void operator-=(const LayerSpace<IVector>& v) {
+ fData.fX -= v.fData.fX;
+ fData.fY -= v.fData.fY;
+ }
+
+private:
+ IVector fData;
+};
+
+// Layer-space specialization for floating point direction vectors.
+template<>
+class LayerSpace<Vector> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const Vector& geometry) : fData(geometry) {}
+ explicit LayerSpace(Vector&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const Vector&() const { return fData; }
+
+ explicit operator SkVector() const { return SkVector::Make(fData.fX, fData.fY); }
+
+ SkScalar x() const { return fData.fX; }
+ SkScalar y() const { return fData.fY; }
+
+ SkScalar length() const { return SkVector::Length(fData.fX, fData.fY); }
+
+ LayerSpace<Vector> operator-() const { return LayerSpace<Vector>({-fData.fX, -fData.fY}); }
+
+ LayerSpace<Vector> operator*(SkScalar s) const {
+ LayerSpace<Vector> scaled = *this;
+ scaled *= s;
+ return scaled;
+ }
+
+ LayerSpace<Vector> operator+(const LayerSpace<Vector>& v) const {
+ LayerSpace<Vector> sum = *this;
+ sum += v;
+ return sum;
+ }
+ LayerSpace<Vector> operator-(const LayerSpace<Vector>& v) const {
+ LayerSpace<Vector> diff = *this;
+ diff -= v;
+ return diff;
+ }
+
+ void operator*=(SkScalar s) {
+ fData.fX *= s;
+ fData.fY *= s;
+ }
+ void operator+=(const LayerSpace<Vector>& v) {
+ fData.fX += v.fData.fX;
+ fData.fY += v.fData.fY;
+ }
+ void operator-=(const LayerSpace<Vector>& v) {
+ fData.fX -= v.fData.fX;
+ fData.fY -= v.fData.fY;
+ }
+
+ friend LayerSpace<Vector> operator*(SkScalar s, const LayerSpace<Vector>& b) {
+ return b * s;
+ }
+
+private:
+ Vector fData;
+};
+
+// Layer-space specialization for integer 2D coordinates (treated as positions, not directions).
+template<>
+class LayerSpace<SkIPoint> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkIPoint& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkIPoint&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkIPoint&() const { return fData; }
+
+ // Parrot the SkIPoint API while preserving coordinate space.
+ int32_t x() const { return fData.fX; }
+ int32_t y() const { return fData.fY; }
+
+ // Offsetting by direction vectors produce more points
+ LayerSpace<SkIPoint> operator+(const LayerSpace<IVector>& v) {
+ return LayerSpace<SkIPoint>(fData + SkIVector(v));
+ }
+ LayerSpace<SkIPoint> operator-(const LayerSpace<IVector>& v) {
+ return LayerSpace<SkIPoint>(fData - SkIVector(v));
+ }
+
+ void operator+=(const LayerSpace<IVector>& v) {
+ fData += SkIVector(v);
+ }
+ void operator-=(const LayerSpace<IVector>& v) {
+ fData -= SkIVector(v);
+ }
+
+ // Subtracting another point makes a direction between them
+ LayerSpace<IVector> operator-(const LayerSpace<SkIPoint>& p) {
+ return LayerSpace<IVector>(IVector(fData - p.fData));
+ }
+
+private:
+ SkIPoint fData;
+};
+
+// Layer-space specialization for floating point 2D coordinates (treated as positions)
+template<>
+class LayerSpace<SkPoint> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkPoint& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkPoint&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkPoint&() const { return fData; }
+
+ // Parrot the SkPoint API while preserving coordinate space.
+ SkScalar x() const { return fData.fX; }
+ SkScalar y() const { return fData.fY; }
+
+ SkScalar distanceToOrigin() const { return fData.distanceToOrigin(); }
+
+ // Offsetting by direction vectors produce more points
+ LayerSpace<SkPoint> operator+(const LayerSpace<Vector>& v) {
+ return LayerSpace<SkPoint>(fData + SkVector(v));
+ }
+ LayerSpace<SkPoint> operator-(const LayerSpace<Vector>& v) {
+ return LayerSpace<SkPoint>(fData - SkVector(v));
+ }
+
+ void operator+=(const LayerSpace<Vector>& v) {
+ fData += SkVector(v);
+ }
+ void operator-=(const LayerSpace<Vector>& v) {
+ fData -= SkVector(v);
+ }
+
+ // Subtracting another point makes a direction between them
+ LayerSpace<Vector> operator-(const LayerSpace<SkPoint>& p) {
+ return LayerSpace<Vector>(Vector(fData - p.fData));
+ }
+
+private:
+ SkPoint fData;
+};
+
+// Layer-space specialization for integer dimensions
+template<>
+class LayerSpace<SkISize> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkISize& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkISize&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkISize&() const { return fData; }
+
+ int32_t width() const { return fData.width(); }
+ int32_t height() const { return fData.height(); }
+
+ bool isEmpty() const { return fData.isEmpty(); }
+
+private:
+ SkISize fData;
+};
+
+// Layer-space specialization for floating point dimensions
+template<>
+class LayerSpace<SkSize> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkSize& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkSize&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkSize&() const { return fData; }
+
+ SkScalar width() const { return fData.width(); }
+ SkScalar height() const { return fData.height(); }
+
+ bool isEmpty() const { return fData.isEmpty(); }
+ bool isZero() const { return fData.isZero(); }
+
+ LayerSpace<SkISize> round() const { return LayerSpace<SkISize>(fData.toRound()); }
+ LayerSpace<SkISize> ceil() const { return LayerSpace<SkISize>(fData.toCeil()); }
+ LayerSpace<SkISize> floor() const { return LayerSpace<SkISize>(fData.toFloor()); }
+
+private:
+ SkSize fData;
+};
+
+// Layer-space specialization for axis-aligned integer bounding boxes.
+template<>
+class LayerSpace<SkIRect> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkIRect& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkIRect&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkIRect&() const { return fData; }
+
+ // Parrot the SkIRect API while preserving coord space
+ int32_t left() const { return fData.fLeft; }
+ int32_t top() const { return fData.fTop; }
+ int32_t right() const { return fData.fRight; }
+ int32_t bottom() const { return fData.fBottom; }
+
+ int32_t width() const { return fData.width(); }
+ int32_t height() const { return fData.height(); }
+
+ LayerSpace<SkIPoint> topLeft() const { return LayerSpace<SkIPoint>(fData.topLeft()); }
+ LayerSpace<SkISize> size() const { return LayerSpace<SkISize>(fData.size()); }
+
+ bool intersect(const LayerSpace<SkIRect>& r) { return fData.intersect(r.fData); }
+ void join(const LayerSpace<SkIRect>& r) { fData.join(r.fData); }
+ void offset(const LayerSpace<IVector>& v) { fData.offset(SkIVector(v)); }
+ void outset(const LayerSpace<SkISize>& delta) { fData.outset(delta.width(), delta.height()); }
+
+private:
+ SkIRect fData;
+};
+
+// Layer-space specialization for axis-aligned float bounding boxes.
+template<>
+class LayerSpace<SkRect> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkRect& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkRect&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkRect&() const { return fData; }
+
+ // Parrot the SkRect API while preserving coord space and usage
+ SkScalar left() const { return fData.fLeft; }
+ SkScalar top() const { return fData.fTop; }
+ SkScalar right() const { return fData.fRight; }
+ SkScalar bottom() const { return fData.fBottom; }
+
+ SkScalar width() const { return fData.width(); }
+ SkScalar height() const { return fData.height(); }
+
+ LayerSpace<SkPoint> topLeft() const {
+ return LayerSpace<SkPoint>(SkPoint::Make(fData.fLeft, fData.fTop));
+ }
+ LayerSpace<SkSize> size() const {
+ return LayerSpace<SkSize>(SkSize::Make(fData.width(), fData.height()));
+ }
+ LayerSpace<SkIRect> roundOut() const { return LayerSpace<SkIRect>(fData.roundOut()); }
+
+ bool intersect(const LayerSpace<SkRect>& r) { return fData.intersect(r.fData); }
+ void join(const LayerSpace<SkRect>& r) { fData.join(r.fData); }
+ void offset(const LayerSpace<Vector>& v) { fData.offset(SkVector(v)); }
+ void outset(const LayerSpace<SkSize>& delta) { fData.outset(delta.width(), delta.height()); }
+
+private:
+ SkRect fData;
+};
+
+// Mapping is the primary definition of the shared layer space used when evaluating an image filter
+// DAG. It encapsulates any needed decomposition of the total CTM into the parameter-to-layer matrix
+// (that filters use to map their parameters to the layer space), and the layer-to-device matrix
+// (that canvas uses to map the output layer-space image into its root device space). Mapping
+// defines functions to transform ParameterSpace and DeviceSpace types to and from their LayerSpace
+// variants, which can then be used and reasoned about by SkImageFilter implementations.
+class Mapping {
+public:
+ // This constructor allows the decomposition to be explicitly provided
+ Mapping(const SkMatrix& layerToDev, const SkMatrix& paramToLayer)
+ : fLayerToDevMatrix(layerToDev)
+ , fParamToLayerMatrix(paramToLayer) {}
+
+ // Make the default decomposition Mapping, given the total CTM and the root image filter.
+ static Mapping Make(const SkMatrix& ctm, const SkImageFilter* filter);
+
+ // Return a new Mapping object whose parameter-to-layer matrix is equal to this->layerMatrix() *
+ // local, but both share the same layer-to-device matrix.
+ Mapping concatLocal(const SkMatrix& local) const {
+ return Mapping(fLayerToDevMatrix, SkMatrix::Concat(fParamToLayerMatrix, local));
+ }
+
+ const SkMatrix& deviceMatrix() const { return fLayerToDevMatrix; }
+ const SkMatrix& layerMatrix() const { return fParamToLayerMatrix; }
+ SkMatrix totalMatrix() const {
+ return SkMatrix::Concat(fLayerToDevMatrix, fParamToLayerMatrix);
+ }
+
+ template<typename T>
+ LayerSpace<T> paramToLayer(const ParameterSpace<T>& paramGeometry) const {
+ return LayerSpace<T>(map(static_cast<const T&>(paramGeometry), fParamToLayerMatrix));
+ }
+
+ template<typename T>
+ LayerSpace<T> deviceToLayer(const DeviceSpace<T>& devGeometry) const {
+ // The mapping from device space to layer space is defined by the inverse of the
+ // layer-to-device matrix
+ SkMatrix devToLayerMatrix;
+ if (!fLayerToDevMatrix.invert(&devToLayerMatrix)) {
+ // Punt and just pass through the geometry unmodified...
+ return LayerSpace<T>(static_cast<const T&>(devGeometry));
+ } else {
+ return LayerSpace<T>(map(static_cast<const T&>(devGeometry), devToLayerMatrix));
+ }
+ }
+
+ template<typename T>
+ DeviceSpace<T> layerToDevice(const LayerSpace<T>& layerGeometry) const {
+ return DeviceSpace<T>(map(static_cast<const T&>(layerGeometry), fLayerToDevMatrix));
+ }
+
+private:
+ // The image filter process decomposes the total CTM into layerToDev * paramToLayer and uses the
+ // param-to-layer matrix to define the layer-space coordinate system. Depending on how it's
+ // decomposed, either the layer matrix or the device matrix could be the identity matrix (but
+ // sometimes neither).
+ SkMatrix fLayerToDevMatrix;
+ SkMatrix fParamToLayerMatrix;
+
+ // Actual geometric mapping operations that work on coordinates and matrices w/o the type
+ // safety of the coordinate space wrappers (hence these are private).
+ template<typename T>
+ static T map(const T& geom, const SkMatrix& matrix);
+};
+
+// Usage is a template tag to improve the readability of filter implementations. It is attached to
+// images and geometry to group a collection of related variables and ensure that moving from one
+// use case to another is explicit.
+// NOTE: This can be aliased as 'For' when using the fluent type names.
+// TODO (michaelludwig) - If the primary motivation for Usage--enforcing layer to image space
+// transformations safely when multiple images are involved--can be handled entirely by helper
+// functions on FilterResult, then Usage can go away and FilterResult will not need to be templated
+enum class Usage {
+ // Designates the semantic purpose of the bounds, coordinate, or image as being an input
+ // to the image filter calculations. When this usage is used, it denotes a generic input,
+ // such as the current input in a dynamic loop, or some aggregate of all inputs. Because
+ // most image filters consume 1 or 2 filters only, the related kInput0 and kInput1 are
+ // also defined.
+ kInput,
+ // A more specific version of kInput, this marks the tagged variable as attached to the
+ // image filter of SkImageFilter_Base::getInput(0).
+ kInput0,
+ // A more specific version of kInput, this marks the tagged variable as attached to the
+ // image filter of SkImageFilter_Base::getInput(1).
+ kInput1,
+ // Designates the purpose of the bounds, coordinate, or image as being the output of the
+ // current image filter calculation. There is only ever one output for an image filter.
+ kOutput,
+};
+
+// Convenience macros to add 'using' declarations that rename the above enums to provide a more
+// fluent and readable API. This should only be used in a private or local scope to prevent leakage
+// of the names. Use the IN_CLASS variant at the start of a class declaration in those scenarios.
+// These macros enable the following simpler type names:
+// skif::Image<skif::Usage::kInput> -> Image<For::kInput>
+#define SK_USE_FLUENT_IMAGE_FILTER_TYPES \
+ using For = skif::Usage;
+
+#define SK_USE_FLUENT_IMAGE_FILTER_TYPES_IN_CLASS \
+ protected: SK_USE_FLUENT_IMAGE_FILTER_TYPES public:
+
+// Wraps an SkSpecialImage and tags it with a corresponding usage, either as generic input (e.g. the
+// source image), or a specific input image from a filter's connected inputs. It also includes the
+// origin of the image in the layer space. This origin is used to draw the image in the correct
+// location. The 'layerBounds' rectangle of the filtered image is the layer-space bounding box of
+// the image. It has its top left corner at 'origin' and has the same dimensions as the underlying
+// special image subset. Transforming 'layerBounds' by the Context's layer matrix and painting it
+// with the subset rectangle will display the filtered results in the appropriate device-space
+// region.
+//
+// When filter implementations are processing intermediate FilterResult results, it can be assumed
+// that all FilterResult' layerBounds are in the same layer coordinate space defined by the shared
+// skif::Context.
+//
+// NOTE: This is named FilterResult since most instances will represent the output of an image
+// filter (even if that is then cast to be the input to the next filter). The main exception is the
+// source input used when an input filter is null, but from a data-standpoint it is the same since
+// it is equivalent to the result of an identity filter.
+template<Usage kU>
+class FilterResult {
+public:
+ FilterResult() : fImage(nullptr), fOrigin(SkIPoint::Make(0, 0)) {}
+
+ FilterResult(sk_sp<SkSpecialImage> image, const LayerSpace<SkIPoint>& origin)
+ : fImage(std::move(image))
+ , fOrigin(origin) {}
+
+ // Allow explicit moves/copies in order to cast from one use type to another, except kInput0
+ // and kInput1 can only be cast to kOutput (e.g. as part of a noop image filter).
+ template<Usage kI>
+ explicit FilterResult(FilterResult<kI>&& image)
+ : fImage(std::move(image.fImage))
+ , fOrigin(image.fOrigin) {
+ static_assert((kU != Usage::kInput) || (kI != Usage::kInput0 && kI != Usage::kInput1),
+ "kInput0 and kInput1 cannot be moved to more generic kInput usage.");
+ static_assert((kU != Usage::kInput0 && kU != Usage::kInput1) ||
+ (kI == kU || kI == Usage::kInput || kI == Usage::kOutput),
+ "Can only move to specific input from the generic kInput or kOutput usage.");
+ }
+
+ template<Usage kI>
+ explicit FilterResult(const FilterResult<kI>& image)
+ : fImage(image.fImage)
+ , fOrigin(image.fOrigin) {
+ static_assert((kU != Usage::kInput) || (kI != Usage::kInput0 && kI != Usage::kInput1),
+ "kInput0 and kInput1 cannot be copied to more generic kInput usage.");
+ static_assert((kU != Usage::kInput0 && kU != Usage::kInput1) ||
+ (kI == kU || kI == Usage::kInput || kI == Usage::kOutput),
+ "Can only copy to specific input from the generic kInput usage.");
+ }
+
+ const SkSpecialImage* image() const { return fImage.get(); }
+ sk_sp<SkSpecialImage> refImage() const { return fImage; }
+
+ // Get the layer-space bounds of the result. This will have the same dimensions as the
+ // image and its top left corner will be 'origin()'.
+ LayerSpace<SkIRect> layerBounds() const {
+ return LayerSpace<SkIRect>(SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(),
+ fImage->width(), fImage->height()));
+ }
+
+ // Get the layer-space coordinate of this image's top left pixel.
+ const LayerSpace<SkIPoint>& layerOrigin() const { return fOrigin; }
+
+ // Extract image and origin, safely when the image is null.
+ // TODO (michaelludwig) - This is intended for convenience until all call sites of
+ // SkImageFilter_Base::filterImage() have been updated to work in the new type system
+ // (which comes later as SkDevice, SkCanvas, etc. need to be modified, and coordinate space
+ // tagging needs to be added).
+ sk_sp<SkSpecialImage> imageAndOffset(SkIPoint* offset) const {
+ if (fImage) {
+ *offset = SkIPoint(fOrigin);
+ return fImage;
+ } else {
+ *offset = {0, 0};
+ return nullptr;
+ }
+ }
+
+private:
+ // Allow all FilterResult templates access to each others members
+ template<Usage kO>
+ friend class FilterResult;
+
+ sk_sp<SkSpecialImage> fImage;
+ LayerSpace<SkIPoint> fOrigin;
+};
+
+// The context contains all necessary information to describe how the image filter should be
+// computed (i.e. the current layer matrix and clip), and the color information of the output of a
+// filter DAG. For now, this is just the color space (of the original requesting device). This is
+// used when constructing intermediate rendering surfaces, so that we ensure we land in a surface
+// that's similar/compatible to the final consumer of the DAG's output.
+class Context {
+public:
+ SK_USE_FLUENT_IMAGE_FILTER_TYPES_IN_CLASS
+
+ // Creates a context with the given layer matrix and destination clip, reading from 'source'
+ // with an origin of (0,0).
+ Context(const SkMatrix& layerMatrix, const SkIRect& clipBounds, SkImageFilterCache* cache,
+ SkColorType colorType, SkColorSpace* colorSpace, const SkSpecialImage* source)
+ : fMapping(SkMatrix::I(), layerMatrix)
+ , fDesiredOutput(clipBounds)
+ , fCache(cache)
+ , fColorType(colorType)
+ , fColorSpace(colorSpace)
+ , fSource(sk_ref_sp(source), LayerSpace<SkIPoint>({0, 0})) {}
+
+ Context(const Mapping& mapping, const LayerSpace<SkIRect>& desiredOutput,
+ SkImageFilterCache* cache, SkColorType colorType, SkColorSpace* colorSpace,
+ const FilterResult<For::kInput>& source)
+ : fMapping(mapping)
+ , fDesiredOutput(desiredOutput)
+ , fCache(cache)
+ , fColorType(colorType)
+ , fColorSpace(colorSpace)
+ , fSource(source) {}
+
+ // The mapping that defines the transformation from local parameter space of the filters to the
+ // layer space where the image filters are evaluated, as well as the remaining transformation
+ // from the layer space to the final device space. The layer space defined by the returned
+ // Mapping may be the same as the root device space, or be an intermediate space that is
+ // supported by the image filter DAG (depending on what it returns from canHandleComplexCTM()).
+ // If a node returns false from canHandleComplexCTM(), the layer matrix of the mapping will be
+ // at most a scale + translate, and the remaining matrix will be appropriately set to transform
+ // the layer space to the final device space (applied by the SkCanvas when filtering is
+ // finished).
+ const Mapping& mapping() const { return fMapping; }
+ // DEPRECATED: Use mapping() and its coordinate-space types instead
+ const SkMatrix& ctm() const { return fMapping.layerMatrix(); }
+ // The bounds, in the layer space, that the filtered image will be clipped to. The output
+ // from filterImage() must cover these clip bounds, except in areas where it will just be
+ // transparent black, in which case a smaller output image can be returned.
+ const LayerSpace<SkIRect>& desiredOutput() const { return fDesiredOutput; }
+ // DEPRECATED: Use desiredOutput() instead
+ const SkIRect& clipBounds() const { return static_cast<const SkIRect&>(fDesiredOutput); }
+ // The cache to use when recursing through the filter DAG, in order to avoid repeated
+ // calculations of the same image.
+ SkImageFilterCache* cache() const { return fCache; }
+ // The output device's color type, which can be used for intermediate images to be
+ // compatible with the eventual target of the filtered result.
+ SkColorType colorType() const { return fColorType; }
+#if SK_SUPPORT_GPU
+ GrColorType grColorType() const { return SkColorTypeToGrColorType(fColorType); }
+#endif
+ // The output device's color space, so intermediate images can match, and so filtering can
+ // be performed in the destination color space.
+ SkColorSpace* colorSpace() const { return fColorSpace; }
+ sk_sp<SkColorSpace> refColorSpace() const { return sk_ref_sp(fColorSpace); }
+ // The default surface properties to use when making transient surfaces during filtering.
+ const SkSurfaceProps* surfaceProps() const { return &fSource.image()->props(); }
+
+ // This is the image to use whenever an expected input filter has been set to null. In the
+ // majority of cases, this is the original source image for the image filter DAG so it comes
+ // from the SkDevice that holds either the saveLayer or the temporary rendered result. The
+ // exception is composing two image filters (via SkImageFilters::Compose), which must use
+ // the output of the inner DAG as the "source" for the outer DAG.
+ const FilterResult<For::kInput>& source() const { return fSource; }
+ // DEPRECATED: Use source() instead to get both the image and its origin.
+ const SkSpecialImage* sourceImage() const { return fSource.image(); }
+
+ // True if image filtering should occur on the GPU if possible.
+ bool gpuBacked() const { return fSource.image()->isTextureBacked(); }
+ // The recording context to use when computing the filter with the GPU.
+ GrRecordingContext* getContext() const { return fSource.image()->getContext(); }
+
+ /**
+ * Since a context can be built directly, its constructor has no chance to "return null" if
+ * it's given invalid or unsupported inputs. Call this to know of the the context can be
+ * used.
+ *
+ * The SkImageFilterCache Key, for example, requires a finite ctm (no infinities or NaN),
+ * so that test is part of isValid.
+ */
+ bool isValid() const { return fSource.image() != nullptr && fMapping.layerMatrix().isFinite(); }
+
+ // Create a surface of the given size, that matches the context's color type and color space
+ // as closely as possible, and uses the same backend of the device that produced the source
+ // image.
+ sk_sp<SkSpecialSurface> makeSurface(const SkISize& size,
+ const SkSurfaceProps* props = nullptr) const {
+ return fSource.image()->makeSurface(fColorType, fColorSpace, size,
+ kPremul_SkAlphaType, props);
+ }
+
+ // Create a new context that matches this context, but with an overridden layer space.
+ Context withNewMapping(const Mapping& mapping) const {
+ return Context(mapping, fDesiredOutput, fCache, fColorType, fColorSpace, fSource);
+ }
+ // Create a new context that matches this context, but with an overridden desired output rect.
+ Context withNewDesiredOutput(const LayerSpace<SkIRect>& desiredOutput) const {
+ return Context(fMapping, desiredOutput, fCache, fColorType, fColorSpace, fSource);
+ }
+
+private:
+ Mapping fMapping;
+ LayerSpace<SkIRect> fDesiredOutput;
+ SkImageFilterCache* fCache;
+ SkColorType fColorType;
+ // The pointed-to object is owned by the device controlling the filter process, and our lifetime
+ // is bounded by the device, so this can be a bare pointer.
+ SkColorSpace* fColorSpace;
+ FilterResult<For::kInput> fSource;
+};
+
+} // end namespace skif
+
+#endif // SkImageFilterTypes_DEFINED
diff --git a/gfx/skia/skia/src/core/SkImageFilter_Base.h b/gfx/skia/skia/src/core/SkImageFilter_Base.h
new file mode 100644
index 0000000000..5837e5e9f3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilter_Base.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilter_Base_DEFINED
+#define SkImageFilter_Base_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/SkTArray.h"
+
+#include "src/core/SkImageFilterTypes.h"
+
+class GrFragmentProcessor;
+class GrRecordingContext;
+
+// True base class that all SkImageFilter implementations need to extend from. This provides the
+// actual API surface that Skia will use to compute the filtered images.
+class SkImageFilter_Base : public SkImageFilter {
+public:
+ SK_USE_FLUENT_IMAGE_FILTER_TYPES_IN_CLASS
+
+ // DEPRECATED - Use skif::Context directly.
+ using Context = skif::Context;
+
+ /**
+ * Request a new filtered image to be created from the src image. The returned skif::Image
+ * provides both the pixel data and the origin point that it should be drawn at, relative to
+ * the layer space defined by the provided context.
+ *
+ * If the result image cannot be created, or the result would be transparent black, returns
+ * a skif::Image that has a null special image, in which its origin should be ignored.
+ *
+ * TODO: Right now the imagefilters sometimes return empty result bitmaps/
+ * specialimages. That doesn't seem quite right.
+ */
+ skif::FilterResult<For::kOutput> filterImage(const skif::Context& context) const;
+
+ /**
+ * Calculate the smallest-possible required layer bounds that would provide sufficient
+ * information to correctly compute the image filter for every pixel in the desired output
+ * bounds. The 'desiredOutput' is intended to represent either the root render target bounds,
+ * or the device-space bounds of the current clip. If the bounds of the content that will be
+ * drawn into the layer is known, 'knownContentBounds' should be provided, since it can be
+ * used to restrict the size of the layer if the image filter DAG does not affect transparent
+ * black.
+ *
+ * The returned rect is in the layer space defined by 'mapping', so it directly represents
+ * the size and location of the SkDevice created to rasterize the content prior to invoking the
+ * image filter (assuming its CTM and basis matrix are configured to match 'mapping').
+ *
+ * While this operation transforms an device-space output bounds to a layer-space input bounds,
+ * it is not necessarily the inverse of getOutputBounds(). For instance, a blur needs to have
+ * an outset margin when reading pixels at the edge (to satisfy its kernel), thus it expands
+ * its required input rect to include every pixel that contributes to the desired output rect.
+
+ * @param mapping The coordinate space mapping that defines both the transformation
+ * between local and layer, and layer to root device space, that will be
+ * used when the filter is later invoked.
+ * @param desiredOutput The desired output boundary that needs to be covered by the filter's
+ * output (assuming that the filter is then invoked with a suitable input)
+ * @param knownContentBounds
+ * Optional, the known layer-space bounds of the non-transparent content
+ * that would be rasterized in the source input image.
+ *
+ * @return The layer-space bounding box to use for an SkDevice when drawing the source image.
+ */
+ skif::LayerSpace<SkIRect> getInputBounds(
+ const skif::Mapping& mapping, const skif::DeviceSpace<SkRect>& desiredOutput,
+ const skif::ParameterSpace<SkRect>* knownContentBounds) const;
+
+ /**
+ * Calculate the device-space bounds of the output of this filter DAG, if it were to process
+ * an image layer covering the 'contentBounds'. The 'mapping' defines how the content will be
+ * transformed to layer space when it is drawn, and how the output filter image is then
+ * transformed to the final device space (i.e. it specifies the mapping between the root device
+ * space and the parameter space of the initially provided content).
+ *
+ * While this operation transforms a parameter-space input bounds to an device-space output
+ * bounds, it is not necessarily the inverse of getInputBounds(). For instance, a blur needs to
+ * have an outset margin when reading pixels at the edge (to satisfy its kernel), so it will
+ * generate a result larger than its input (so that the blur is visible) and, thus, expands its
+ * output to include every pixel that it will touch.
+ *
+ * @param mapping The coordinate space mapping that defines both the transformation
+ * between local and layer, and layer to root device space, that will be
+ * used when the filter is later invoked.
+ * @param contentBounds The local-space bounds of the non-transparent content that would be
+ * drawn into the source image prior to filtering with this DAG, i.e.
+ * the same as 'knownContentBounds' in getInputBounds().
+ *
+ * @return The root device-space bounding box of the filtered image, were it applied to
+ * content contained by 'contentBounds' and then drawn with 'mapping' to the root
+ * device (w/o any additional clipping).
+ */
+ skif::DeviceSpace<SkIRect> getOutputBounds(
+ const skif::Mapping& mapping, const skif::ParameterSpace<SkRect>& contentBounds) const;
+
+ /**
+ * Returns whether any edges of the crop rect have been set. The crop
+ * rect is set at construction time, and determines which pixels from the
+ * input image will be processed, and which pixels in the output image will be allowed.
+ * The size of the crop rect should be
+ * used as the size of the destination image. The origin of this rect
+ * should be used to offset access to the input images, and should also
+ * be added to the "offset" parameter in onFilterImage.
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter
+ */
+ bool cropRectIsSet() const { return fCropRect.flags() != 0x0; }
+
+ // DEPRECATED - Remove once cropping is handled by a separate filter
+ CropRect getCropRect() const { return fCropRect; }
+
+ // Expose isolated node bounds behavior for SampleImageFilterDAG and debugging
+ SkIRect filterNodeBounds(const SkIRect& srcRect, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ return this->onFilterNodeBounds(srcRect, ctm, dir, inputRect);
+ }
+
+ /**
+ * ImageFilters can natively handle scaling and translate components in the CTM. Only some of
+ * them can handle affine (or more complex) matrices. This call returns true iff the filter
+ * and all of its (non-null) inputs can handle these more complex matrices.
+ */
+ bool canHandleComplexCTM() const;
+
+ /**
+ * Return an image filter representing this filter applied with the given ctm. This will modify
+ * the DAG as needed if this filter does not support complex CTMs and 'ctm' is not simple. The
+ * ctm matrix will be decomposed such that ctm = A*B; B will be incorporated directly into the
+ * DAG and A must be the ctm set on the context passed to filterImage(). 'remainder' will be set
+ * to A.
+ *
+ * If this filter supports complex ctms, or 'ctm' is not complex, then A = ctm and B = I. When
+ * the filter does not support complex ctms, and the ctm is complex, then A represents the
+ * extracted simple portion of the ctm, and the complex portion is baked into a new DAG using a
+ * matrix filter.
+ *
+ * This will never return null.
+ *
+ * DEPRECATED - Should draw the results of filterImage() directly with the remainder matrix.
+ */
+ sk_sp<SkImageFilter> applyCTM(const SkMatrix& ctm, SkMatrix* remainder) const;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+protected:
+ class Common {
+ public:
+ /**
+ * Attempt to unflatten the cropRect and the expected number of input filters.
+ * If any number of input filters is valid, pass -1.
+ * If this fails (i.e. corrupt buffer or contents) then return false and common will
+ * be left uninitialized.
+ * If this returns true, then inputCount() is the number of found input filters, each
+ * of which may be NULL or a valid imagefilter.
+ */
+ bool unflatten(SkReadBuffer&, int expectedInputs);
+
+ const CropRect& cropRect() const { return fCropRect; }
+ int inputCount() const { return fInputs.count(); }
+ sk_sp<SkImageFilter>* inputs() { return fInputs.begin(); }
+
+ sk_sp<SkImageFilter> getInput(int index) { return fInputs[index]; }
+
+ private:
+ CropRect fCropRect;
+ // most filters accept at most 2 input-filters
+ SkSTArray<2, sk_sp<SkImageFilter>, true> fInputs;
+ };
+
+ // Whether or not to recurse to child input filters for certain operations that walk the DAG.
+ enum class VisitChildren : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ SkImageFilter_Base(sk_sp<SkImageFilter> const* inputs, int inputCount,
+ const CropRect* cropRect);
+
+ ~SkImageFilter_Base() override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+ // DEPRECATED - Use the private context-only variant
+ virtual sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const = 0;
+
+ // DEPRECATED - Override onGetOutputLayerBounds and onGetInputLayerBounds instead. The
+ // node-specific and aggregation functions are no longer separated in the current API. A helper
+ // function is provided to do the default recursion for the common filter case.
+ virtual SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const;
+ virtual SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const;
+
+ // DEPRECRATED - Call the Context-only getInputFilteredImage()
+ sk_sp<SkSpecialImage> filterInput(int index, const Context& ctx, SkIPoint* offset) const {
+ return this->getInputFilteredImage(index, ctx).imageAndOffset(offset);
+ }
+
+ // Helper function to visit each of this filter's child filters and call their
+ // onGetInputLayerBounds with the provided 'desiredOutput' and 'contentBounds'. Automatically
+ // handles null input filters. Returns the union of all of the children's input bounds.
+ skif::LayerSpace<SkIRect> visitInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds) const;
+ // Helper function to visit each of this filter's child filters and call their
+ // onGetOutputLayerBounds with the provided 'contentBounds'. Automatically handles null input
+ // filters.
+ skif::LayerSpace<SkIRect> visitOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const;
+
+ // Helper function to help with recursing through the filter DAG. It invokes filter processing
+ // set to null, it returns the dynamic source image on the Context instead.
+ //
+ // Implementations must handle cases when the input filter was unable to compute an image and
+ // the returned skif::Image has a null SkSpecialImage. If the filter affect transparent black
+ // should explicitly handle nullptr results and press on. In the error case this behavior will
+ // produce a better result than nothing and is necessary for the clipped out case.
+ skif::FilterResult<For::kInput> getInputFilteredImage(int index,
+ const skif::Context& context) const {
+ return this->filterInput<For::kInput>(index, context);
+ }
+ // Convenience that calls filterInput with index = 0 and the most specific usage.
+ skif::FilterResult<For::kInput0> getInputFilteredImage0(const skif::Context& context) const {
+ return this->filterInput<For::kInput0>(0, context);
+ }
+ // Convenience that calls filterInput with index = 1 and the most specific usage.
+ skif::FilterResult<For::kInput1> getInputFilteredImage1(const skif::Context& context) const {
+ return this->filterInput<For::kInput1>(1, context);
+ }
+
+ // DEPRECATED - Remove once cropping is handled by a separate filter
+ const CropRect* getCropRectIfSet() const {
+ return this->cropRectIsSet() ? &fCropRect : nullptr;
+ }
+
+ /** Given a "srcBounds" rect, computes destination bounds for this filter.
+ * "dstBounds" are computed by transforming the crop rect by the context's
+ * CTM, applying it to the initial bounds, and intersecting the result with
+ * the context's clip bounds. "srcBounds" (if non-null) are computed by
+ * intersecting the initial bounds with "dstBounds", to ensure that we never
+ * sample outside of the crop rect (this restriction may be relaxed in the
+ * future).
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter, although it may be
+ * necessary to provide a similar convenience function to compute the output bounds given the
+ * images returned by filterInput().
+ */
+ bool applyCropRect(const Context&, const SkIRect& srcBounds, SkIRect* dstBounds) const;
+
+ /** A variant of the above call which takes the original source bitmap and
+ * source offset. If the resulting crop rect is not entirely contained by
+ * the source bitmap's bounds, it creates a new bitmap in "result" and
+ * pads the edges with transparent black. In that case, the srcOffset is
+ * modified to be the same as the bounds, since no further adjustment is
+ * needed by the caller. This version should only be used by filters
+ * which are not capable of processing a smaller source bitmap into a
+ * larger destination.
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter.
+ */
+ sk_sp<SkSpecialImage> applyCropRectAndPad(const Context&, SkSpecialImage* src,
+ SkIPoint* srcOffset, SkIRect* bounds) const;
+
+ /**
+ * Creates a modified Context for use when recursing up the image filter DAG.
+ * The clip bounds are adjusted to accommodate any margins that this
+ * filter requires by calling this node's
+ * onFilterNodeBounds(..., kReverse_MapDirection).
+ */
+ // TODO (michaelludwig) - I don't think this is necessary to keep as protected. Other than the
+ // real use case in recursing through the DAG for filterInput(), it feels wrong for blur and
+ // other filters to need to call it.
+ Context mapContext(const Context& ctx) const;
+
+#if SK_SUPPORT_GPU
+ static sk_sp<SkSpecialImage> DrawWithFP(GrRecordingContext* context,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /**
+ * Returns a version of the passed-in image (possibly the original), that is in a colorspace
+ * with the same gamut as the one from the OutputProperties. This allows filters that do many
+ * texture samples to guarantee that any color space conversion has happened before running.
+ */
+ static sk_sp<SkSpecialImage> ImageToColorSpace(SkSpecialImage* src,
+ SkColorType colorType,
+ SkColorSpace* colorSpace);
+#endif
+
+ // If 'srcBounds' will sample outside the border of 'originalSrcBounds' (i.e., the sample
+ // will wrap around to the other side) we must preserve the far side of the src along that
+ // axis (e.g., if we will sample beyond the left edge of the src, the right side must be
+ // preserved for the repeat sampling to work).
+ // DEPRECATED - Remove once cropping is handled by a separate filter, that can also handle all
+ // tile modes (including repeat) properly
+ static SkIRect DetermineRepeatedSrcBound(const SkIRect& srcBounds,
+ const SkIVector& filterOffset,
+ const SkISize& filterSize,
+ const SkIRect& originalSrcBounds);
+
+private:
+ friend class SkImageFilter;
+ // For PurgeCache()
+ friend class SkGraphics;
+
+ static void PurgeCache();
+
+ void init(sk_sp<SkImageFilter> const* inputs, int inputCount, const CropRect* cropRect);
+
+ // Configuration points for the filter implementation, marked private since they should not
+ // need to be invoked by the subclasses. These refer to the node's specific behavior and are
+ // not responsible for aggregating the behavior of the entire filter DAG.
+
+ /**
+ * Return true (and returns a ref'd colorfilter) if this node in the DAG is just a colorfilter
+ * w/o CropRect constraints.
+ */
+ virtual bool onIsColorFilterNode(SkColorFilter** /*filterPtr*/) const { return false; }
+
+ /**
+ * Return true if this filter can map from its parameter space to a layer space described by an
+ * arbitrary transformation matrix. If this returns false, the filter only needs to worry about
+ * mapping from parameter to layer using a scale+translate matrix.
+ */
+ virtual bool onCanHandleComplexCTM() const { return false; }
+
+ /**
+ * Return true if this filter would transform transparent black pixels to a color other than
+ * transparent black. When false, optimizations can be taken to discard regions known to be
+ * transparent black and thus process fewer pixels.
+ */
+ virtual bool affectsTransparentBlack() const { return false; }
+
+ /**
+ * This is the virtual which should be overridden by the derived class to perform image
+ * filtering. Subclasses are responsible for recursing to their input filters, although the
+ * getFilteredInputX() functions are provided to handle all necessary details of this. If the
+ * filter has a fixed number of inputs, the getFilterInput0() and getFilteredInput1() functions
+ * ensure the returned filtered Images have the most specific input usage.
+ *
+ * If the image cannot be created (either because of an error or if the result would be empty
+ * because it was clipped out), this should return a filtered Image with a null SkSpecialImage.
+ * In these situations, callers that do not affect transparent black can end early, since the
+ * "transparent" implicit image would be unchanged. Callers that affect transparent black need
+ * to safely handle these null and empty images and return an image filling the context's clip
+ * bounds as if its input filtered image were transparent black.
+ */
+ virtual skif::FilterResult<For::kOutput> onFilterImage(const skif::Context& context) const;
+
+ /**
+ * Calculates the necessary input layer size in order for the final output of the filter to
+ * cover the desired output bounds. The provided 'desiredOutput' represents the requested
+ * input bounds for this node's parent filter node, i.e. this function answers "what does this
+ * node require for input in order to satisfy (as its own output), the input needs of its
+ * parent?".
+ *
+ * If 'recurse' is true, this function is responsible for recursing to its child image filters
+ * and accounting for what they require to meet this filter's input requirements. It is up to
+ * the filter to determine how to aggregate these inputs, but a helper function is provided for
+ * the common case where the final required layer size is the union of the child filters'
+ * required inputs, evaluated on what this filter requires for itself. 'recurse' is kNo
+ * when mapping Contexts while actually filtering images, since the child recursion is
+ * happening at a higher level.
+ *
+ * Unlike the public getInputBounds(), all internal bounds calculations are done in the shared
+ * layer space defined by 'mapping'.
+ *
+ * The default implementation assumes that current filter requires an input equal to
+ * 'desiredOutputBounds', and passes this down to its child filters, and returns the union of
+ * their required inputs.
+ */
+ virtual skif::LayerSpace<SkIRect> onGetInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse = VisitChildren::kYes) const;
+
+ /**
+ * Calculates the output bounds that this filter node would touch when processing an input
+ * sized to 'contentBounds'. This function is responsible for recursing to its child image
+ * filters and accounting for what they output. It is up to the filter to determine how to
+ * aggregate the outputs of its children, but a helper function is provided for the common
+ * case where the filter output is the union of its child outputs.
+ *
+ * Unlike the public getOutputBounds(), all internal bounds calculations are done in the
+ * shared layer space defined by 'mapping'.
+ *
+ * The default implementation assumes that the output of this filter is equal to the union of
+ * the outputs of its child filters evaluated with 'contentBounds'.
+ */
+ // TODO (michaelludwig) - When layerMatrix = I, this function could be used to implement
+ // onComputeFastBounds() instead of making filters implement the essentially the same calcs x2
+ virtual skif::LayerSpace<SkIRect> onGetOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const;
+
+ // The actual implementation of the protected getFilterInputX() functions, but don't expose the
+ // flexible templating to subclasses so it can't be abused.
+ template<skif::Usage kU>
+ skif::FilterResult<kU> filterInput(int index, const skif::Context& ctx) const;
+
+ SkAutoSTArray<2, sk_sp<SkImageFilter>> fInputs;
+
+ bool fUsesSrcInput;
+ CropRect fCropRect;
+ uint32_t fUniqueID; // Globally unique
+
+ typedef SkImageFilter INHERITED;
+};
+
+static inline SkImageFilter_Base* as_IFB(SkImageFilter* filter) {
+ return static_cast<SkImageFilter_Base*>(filter);
+}
+
+static inline SkImageFilter_Base* as_IFB(const sk_sp<SkImageFilter>& filter) {
+ return static_cast<SkImageFilter_Base*>(filter.get());
+}
+
+static inline const SkImageFilter_Base* as_IFB(const SkImageFilter* filter) {
+ return static_cast<const SkImageFilter_Base*>(filter);
+}
+
+/**
+ * Helper to unflatten the common data, and return nullptr if we fail.
+ */
+#define SK_IMAGEFILTER_UNFLATTEN_COMMON(localVar, expectedCount) \
+ Common localVar; \
+ do { \
+ if (!localVar.unflatten(buffer, expectedCount)) { \
+ return nullptr; \
+ } \
+ } while (0)
+
+#endif // SkImageFilter_Base_DEFINED
diff --git a/gfx/skia/skia/src/core/SkImageGenerator.cpp b/gfx/skia/skia/src/core/SkImageGenerator.cpp
new file mode 100644
index 0000000000..c51c1b0c56
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageGenerator.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkYUVAIndex.h"
+#include "src/core/SkNextID.h"
+
+SkImageGenerator::SkImageGenerator(const SkImageInfo& info, uint32_t uniqueID)
+ : fInfo(info)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID)
+{}
+
+bool SkImageGenerator::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+ if (nullptr == pixels) {
+ return false;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return false;
+ }
+
+ Options defaultOpts;
+ return this->onGetPixels(info, pixels, rowBytes, defaultOpts);
+}
+
+bool SkImageGenerator::queryYUVA8(SkYUVASizeInfo* sizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace) const {
+ SkASSERT(sizeInfo);
+
+ return this->onQueryYUVA8(sizeInfo, yuvaIndices, colorSpace);
+}
+
+bool SkImageGenerator::getYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
+ const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ void* planes[SkYUVASizeInfo::kMaxCount]) {
+
+ for (int i = 0; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ SkASSERT(sizeInfo.fSizes[i].fWidth >= 0);
+ SkASSERT(sizeInfo.fSizes[i].fHeight >= 0);
+ SkASSERT(sizeInfo.fWidthBytes[i] >= (size_t) sizeInfo.fSizes[i].fWidth);
+ }
+
+ int numPlanes = 0;
+ SkASSERT(SkYUVAIndex::AreValidIndices(yuvaIndices, &numPlanes));
+ SkASSERT(planes);
+ for (int i = 0; i < numPlanes; ++i) {
+ SkASSERT(planes[i]);
+ }
+
+ return this->onGetYUVA8Planes(sizeInfo, yuvaIndices, planes);
+}
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrTextureProxy.h"
+
+sk_sp<GrTextureProxy> SkImageGenerator::generateTexture(GrRecordingContext* ctx,
+ const SkImageInfo& info,
+ const SkIPoint& origin,
+ bool willNeedMipMaps) {
+ SkIRect srcRect = SkIRect::MakeXYWH(origin.x(), origin.y(), info.width(), info.height());
+ if (!SkIRect::MakeWH(fInfo.width(), fInfo.height()).contains(srcRect)) {
+ return nullptr;
+ }
+ return this->onGenerateTexture(ctx, info, origin, willNeedMipMaps);
+}
+
+sk_sp<GrTextureProxy> SkImageGenerator::onGenerateTexture(GrRecordingContext*,
+ const SkImageInfo&,
+ const SkIPoint&,
+ bool willNeedMipMaps) {
+ return nullptr;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkBitmap.h"
+#include "src/codec/SkColorTable.h"
+
+#include "include/core/SkGraphics.h"
+
+static SkGraphics::ImageGeneratorFromEncodedDataFactory gFactory;
+
+SkGraphics::ImageGeneratorFromEncodedDataFactory
+SkGraphics::SetImageGeneratorFromEncodedDataFactory(ImageGeneratorFromEncodedDataFactory factory)
+{
+ ImageGeneratorFromEncodedDataFactory prev = gFactory;
+ gFactory = factory;
+ return prev;
+}
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncoded(sk_sp<SkData> data) {
+ if (!data) {
+ return nullptr;
+ }
+ if (gFactory) {
+ if (std::unique_ptr<SkImageGenerator> generator = gFactory(data)) {
+ return generator;
+ }
+ }
+ return SkImageGenerator::MakeFromEncodedImpl(std::move(data));
+}
diff --git a/gfx/skia/skia/src/core/SkImageInfo.cpp b/gfx/skia/skia/src/core/SkImageInfo.cpp
new file mode 100644
index 0000000000..e6e12bccd9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageInfo.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkWriteBuffer.h"
+
+int SkColorTypeBytesPerPixel(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return 1;
+ case kRGB_565_SkColorType: return 2;
+ case kARGB_4444_SkColorType: return 2;
+ case kRGBA_8888_SkColorType: return 4;
+ case kBGRA_8888_SkColorType: return 4;
+ case kRGB_888x_SkColorType: return 4;
+ case kRGBA_1010102_SkColorType: return 4;
+ case kRGB_101010x_SkColorType: return 4;
+ case kGray_8_SkColorType: return 1;
+ case kRGBA_F16Norm_SkColorType: return 8;
+ case kRGBA_F16_SkColorType: return 8;
+ case kRGBA_F32_SkColorType: return 16;
+ case kR8G8_unorm_SkColorType: return 2;
+ case kA16_unorm_SkColorType: return 2;
+ case kR16G16_unorm_SkColorType: return 4;
+ case kA16_float_SkColorType: return 2;
+ case kR16G16_float_SkColorType: return 4;
+ case kR16G16B16A16_unorm_SkColorType: return 8;
+ }
+ SkUNREACHABLE;
+}
+
+bool SkColorTypeIsAlwaysOpaque(SkColorType ct) {
+ return !(kAlpha_SkColorTypeComponentFlag & SkColorTypeComponentFlags(ct));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+int SkColorInfo::bytesPerPixel() const { return SkColorTypeBytesPerPixel(fColorType); }
+
+int SkColorInfo::shiftPerPixel() const { return SkColorTypeShiftPerPixel(fColorType); }
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkImageInfo::computeOffset(int x, int y, size_t rowBytes) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+ return SkColorTypeComputeOffset(this->colorType(), x, y, rowBytes);
+}
+
+size_t SkImageInfo::computeByteSize(size_t rowBytes) const {
+ if (0 == this->height()) {
+ return 0;
+ }
+ SkSafeMath safe;
+ size_t bytes = safe.add(safe.mul(safe.addInt(this->height(), -1), rowBytes),
+ safe.mul(this->width(), this->bytesPerPixel()));
+ return safe.ok() ? bytes : SIZE_MAX;
+}
+
+SkImageInfo SkImageInfo::MakeS32(int width, int height, SkAlphaType at) {
+ return SkImageInfo({width, height}, {kN32_SkColorType, at, SkColorSpace::MakeSRGB()});
+}
+
+#ifdef SK_DEBUG
+void SkImageInfo::validate() const {
+ SkASSERT(fDimensions.width() >= 0);
+ SkASSERT(fDimensions.height() >= 0);
+ SkASSERT(SkColorTypeIsValid(this->colorType()));
+ SkASSERT(SkAlphaTypeIsValid(this->alphaType()));
+}
+#endif
+
+bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical) {
+ switch (colorType) {
+ case kUnknown_SkColorType:
+ alphaType = kUnknown_SkAlphaType;
+ break;
+ case kAlpha_8_SkColorType: // fall-through
+ case kA16_unorm_SkColorType: // fall-through
+ case kA16_float_SkColorType:
+ if (kUnpremul_SkAlphaType == alphaType) {
+ alphaType = kPremul_SkAlphaType;
+ }
+ // fall-through
+ case kARGB_4444_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kRGBA_1010102_SkColorType:
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F32_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ if (kUnknown_SkAlphaType == alphaType) {
+ return false;
+ }
+ break;
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ alphaType = kOpaque_SkAlphaType;
+ break;
+ default:
+ return false;
+ }
+ if (canonical) {
+ *canonical = alphaType;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/image/SkReadPixelsRec.h"
+
+bool SkReadPixelsRec::trim(int srcWidth, int srcHeight) {
+ if (nullptr == fPixels || fRowBytes < fInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 >= fInfo.width() || 0 >= fInfo.height()) {
+ return false;
+ }
+
+ int x = fX;
+ int y = fY;
+ SkIRect srcR = SkIRect::MakeXYWH(x, y, fInfo.width(), fInfo.height());
+ if (!srcR.intersect({0, 0, srcWidth, srcHeight})) {
+ return false;
+ }
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ // we negate and add them so UBSAN (pointer-overflow) doesn't get confused.
+ fPixels = ((char*)fPixels + -y*fRowBytes + -x*fInfo.bytesPerPixel());
+ // the intersect may have shrunk info's logical size
+ fInfo = fInfo.makeDimensions(srcR.size());
+ fX = srcR.x();
+ fY = srcR.y();
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkWritePixelsRec.h"
+
+bool SkWritePixelsRec::trim(int dstWidth, int dstHeight) {
+ if (nullptr == fPixels || fRowBytes < fInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 >= fInfo.width() || 0 >= fInfo.height()) {
+ return false;
+ }
+
+ int x = fX;
+ int y = fY;
+ SkIRect dstR = SkIRect::MakeXYWH(x, y, fInfo.width(), fInfo.height());
+ if (!dstR.intersect({0, 0, dstWidth, dstHeight})) {
+ return false;
+ }
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ // we negate and add them so UBSAN (pointer-overflow) doesn't get confused.
+ fPixels = ((const char*)fPixels + -y*fRowBytes + -x*fInfo.bytesPerPixel());
+ // the intersect may have shrunk info's logical size
+ fInfo = fInfo.makeDimensions(dstR.size());
+ fX = dstR.x();
+ fY = dstR.y();
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkImagePriv.h b/gfx/skia/skia/src/core/SkImagePriv.h
new file mode 100644
index 0000000000..0fd44338df
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImagePriv.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImagePriv_DEFINED
+#define SkImagePriv_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTileMode.h"
+
+enum SkCopyPixelsMode {
+ kIfMutable_SkCopyPixelsMode, //!< only copy src pixels if they are marked mutable
+ kAlways_SkCopyPixelsMode, //!< always copy src pixels (even if they are marked immutable)
+ kNever_SkCopyPixelsMode, //!< never copy src pixels (even if they are marked mutable)
+};
+
+// A good size for creating shader contexts on the stack.
+enum {kSkBlitterContextSize = 3332};
+
+// If alloc is non-nullptr, it will be used to allocate the returned SkShader, and MUST outlive
+// the SkShader.
+sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkTileMode, SkTileMode,
+ const SkMatrix* localMatrix, SkCopyPixelsMode);
+
+// Convenience function to return a shader that implements the shader+image behavior defined for
+// drawImage/Bitmap where the paint's shader is ignored when the bitmap is a color image, but
+// properly compose them together when it is an alpha image. This allows the returned paint to
+// be assigned to a paint clone without discarding the original behavior.
+sk_sp<SkShader> SkMakeBitmapShaderForPaint(const SkPaint& paint, const SkBitmap& src,
+ SkTileMode, SkTileMode,
+ const SkMatrix* localMatrix, SkCopyPixelsMode);
+
+/**
+ * Examines the bitmap to decide if it can share the existing pixelRef, or
+ * if it needs to make a deep-copy of the pixels.
+ *
+ * The bitmap's pixelref will be shared if either the bitmap is marked as
+ * immutable, or CopyPixelsMode allows it. Shared pixel refs are also
+ * locked when kLocked_SharedPixelRefMode is specified.
+ *
+ * Passing kLocked_SharedPixelRefMode allows the image's peekPixels() method
+ * to succeed, but it will force any lazy decodes/generators to execute if
+ * they exist on the pixelref.
+ *
+ * It is illegal to call this with a texture-backed bitmap.
+ *
+ * If the bitmap's colortype cannot be converted into a corresponding
+ * SkImageInfo, or the bitmap's pixels cannot be accessed, this will return
+ * nullptr.
+ */
+extern SK_API sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap&, SkCopyPixelsMode);
+
+// Given an image created from SkNewImageFromBitmap, return its pixelref. This
+// may be called to see if the surface and the image share the same pixelref,
+// in which case the surface may need to perform a copy-on-write.
+extern const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* rasterImage);
+
+/**
+ * Will attempt to upload and lock the contents of the image as a texture, so that subsequent
+ * draws to a gpu-target will come from that texture (and not by looking at the original image
+ * src). In particular this is intended to use the texture even if the image's original content
+ * changes subsequent to this call (i.e. the src is mutable!).
+ *
+ * All successful calls must be balanced by an equal number of calls to SkImage_unpinAsTexture().
+ *
+ * Once in this "pinned" state, the image has all of the same thread restrictions that exist
+ * for a natively created gpu image (e.g. SkImage::MakeFromTexture)
+ * - all drawing, pinning, unpinning must happen in the same thread as the GrContext.
+ *
+ * @return true if the image was successfully uploaded and locked into a texture
+ */
+bool SkImage_pinAsTexture(const SkImage*, GrContext*);
+
+/**
+ * The balancing call to a successful invokation of SkImage_pinAsTexture. When a balanced number of
+ * calls have been made, then the "pinned" texture is free to be purged, etc. This also means that a
+ * subsequent "pin" call will look at the original content again, and if its uniqueID/generationID
+ * has changed, then a newer texture will be uploaded/pinned.
+ *
+ * The context passed to unpin must match the one passed to pin.
+ */
+void SkImage_unpinAsTexture(const SkImage*, GrContext*);
+
+/**
+ * Returns the bounds of the image relative to its encoded buffer. For all non-lazy images,
+ * this returns (0,0,width,height). For a lazy-image, it may return a subset of that rect.
+ */
+SkIRect SkImage_getSubset(const SkImage*);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLRUCache.h b/gfx/skia/skia/src/core/SkLRUCache.h
new file mode 100644
index 0000000000..9c5e671684
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLRUCache.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLRUCache_DEFINED
+#define SkLRUCache_DEFINED
+
+#include "include/private/SkChecksum.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkTInternalLList.h"
+
+/**
+ * A generic LRU cache.
+ */
+template <typename K, typename V, typename HashK = SkGoodHash>
+class SkLRUCache : public SkNoncopyable {
+private:
+ struct Entry {
+ Entry(const K& key, V&& value)
+ : fKey(key)
+ , fValue(std::move(value)) {}
+
+ K fKey;
+ V fValue;
+
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
+ };
+
+public:
+ explicit SkLRUCache(int maxCount)
+ : fMaxCount(maxCount) {}
+
+ ~SkLRUCache() {
+ Entry* node = fLRU.head();
+ while (node) {
+ fLRU.remove(node);
+ delete node;
+ node = fLRU.head();
+ }
+ }
+
+ V* find(const K& key) {
+ Entry** value = fMap.find(key);
+ if (!value) {
+ return nullptr;
+ }
+ Entry* entry = *value;
+ if (entry != fLRU.head()) {
+ fLRU.remove(entry);
+ fLRU.addToHead(entry);
+ } // else it's already at head position, don't need to do anything
+ return &entry->fValue;
+ }
+
+ V* insert(const K& key, V value) {
+ Entry* entry = new Entry(key, std::move(value));
+ fMap.set(entry);
+ fLRU.addToHead(entry);
+ while (fMap.count() > fMaxCount) {
+ this->remove(fLRU.tail()->fKey);
+ }
+ return &entry->fValue;
+ }
+
+ int count() {
+ return fMap.count();
+ }
+
+ template <typename Fn> // f(V*)
+ void foreach(Fn&& fn) {
+ typename SkTInternalLList<Entry>::Iter iter;
+ for (Entry* e = iter.init(fLRU, SkTInternalLList<Entry>::Iter::kHead_IterStart); e;
+ e = iter.next()) {
+ fn(&e->fValue);
+ }
+ }
+
+ void reset() {
+ fMap.reset();
+ for (Entry* e = fLRU.head(); e; e = fLRU.head()) {
+ fLRU.remove(e);
+ delete e;
+ }
+ }
+
+private:
+ struct Traits {
+ static const K& GetKey(Entry* e) {
+ return e->fKey;
+ }
+
+ static uint32_t Hash(const K& k) {
+ return HashK()(k);
+ }
+ };
+
+ void remove(const K& key) {
+ Entry** value = fMap.find(key);
+ SkASSERT(value);
+ Entry* entry = *value;
+ SkASSERT(key == entry->fKey);
+ fMap.remove(key);
+ fLRU.remove(entry);
+ delete entry;
+ }
+
+ int fMaxCount;
+ SkTHashTable<Entry*, K, Traits> fMap;
+ SkTInternalLList<Entry> fLRU;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.cpp b/gfx/skia/skia/src/core/SkLatticeIter.cpp
new file mode 100644
index 0000000000..0195031506
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRect.h"
+#include "src/core/SkLatticeIter.h"
+
+/**
+ * Divs must be in increasing order with no duplicates.
+ */
+static bool valid_divs(const int* divs, int count, int start, int end) {
+ int prev = start - 1;
+ for (int i = 0; i < count; i++) {
+ if (prev >= divs[i] || divs[i] >= end) {
+ return false;
+ }
+ prev = divs[i];
+ }
+
+ return true;
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkCanvas::Lattice& lattice) {
+ SkIRect totalBounds = SkIRect::MakeWH(width, height);
+ SkASSERT(lattice.fBounds);
+ const SkIRect latticeBounds = *lattice.fBounds;
+ if (!totalBounds.contains(latticeBounds)) {
+ return false;
+ }
+
+ bool zeroXDivs = lattice.fXCount <= 0 || (1 == lattice.fXCount &&
+ latticeBounds.fLeft == lattice.fXDivs[0]);
+ bool zeroYDivs = lattice.fYCount <= 0 || (1 == lattice.fYCount &&
+ latticeBounds.fTop == lattice.fYDivs[0]);
+ if (zeroXDivs && zeroYDivs) {
+ return false;
+ }
+
+ return valid_divs(lattice.fXDivs, lattice.fXCount, latticeBounds.fLeft, latticeBounds.fRight)
+ && valid_divs(lattice.fYDivs, lattice.fYCount, latticeBounds.fTop, latticeBounds.fBottom);
+}
+
+/**
+ * Count the number of pixels that are in "scalable" patches.
+ */
+static int count_scalable_pixels(const int32_t* divs, int numDivs, bool firstIsScalable,
+ int start, int end) {
+ if (0 == numDivs) {
+ return firstIsScalable ? end - start : 0;
+ }
+
+ int i;
+ int count;
+ if (firstIsScalable) {
+ count = divs[0] - start;
+ i = 1;
+ } else {
+ count = 0;
+ i = 0;
+ }
+
+ for (; i < numDivs; i += 2) {
+ // Alternatively, we could use |top| and |bottom| as variable names, instead of
+ // |left| and |right|.
+ int left = divs[i];
+ int right = (i + 1 < numDivs) ? divs[i + 1] : end;
+ count += right - left;
+ }
+
+ return count;
+}
+
+/**
+ * Set points for the src and dst rects on subsequent draw calls.
+ */
+static void set_points(float* dst, int* src, const int* divs, int divCount, int srcFixed,
+ int srcScalable, int srcStart, int srcEnd, float dstStart, float dstEnd,
+ bool isScalable) {
+ float dstLen = dstEnd - dstStart;
+ float scale;
+ if (srcFixed <= dstLen) {
+ // This is the "normal" case, where we scale the "scalable" patches and leave
+ // the other patches fixed.
+ scale = (dstLen - ((float) srcFixed)) / ((float) srcScalable);
+ } else {
+ // In this case, we eliminate the "scalable" patches and scale the "fixed" patches.
+ scale = dstLen / ((float) srcFixed);
+ }
+
+ src[0] = srcStart;
+ dst[0] = dstStart;
+ for (int i = 0; i < divCount; i++) {
+ src[i + 1] = divs[i];
+ int srcDelta = src[i + 1] - src[i];
+ float dstDelta;
+ if (srcFixed <= dstLen) {
+ dstDelta = isScalable ? scale * srcDelta : srcDelta;
+ } else {
+ dstDelta = isScalable ? 0.0f : scale * srcDelta;
+ }
+ dst[i + 1] = dst[i] + dstDelta;
+
+ // Alternate between "scalable" and "fixed" patches.
+ isScalable = !isScalable;
+ }
+
+ src[divCount + 1] = srcEnd;
+ dst[divCount + 1] = dstEnd;
+}
+
+SkLatticeIter::SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst) {
+ const int* xDivs = lattice.fXDivs;
+ const int origXCount = lattice.fXCount;
+ const int* yDivs = lattice.fYDivs;
+ const int origYCount = lattice.fYCount;
+ SkASSERT(lattice.fBounds);
+ const SkIRect src = *lattice.fBounds;
+
+ // In the x-dimension, the first rectangle always starts at x = 0 and is "scalable".
+ // If xDiv[0] is 0, it indicates that the first rectangle is degenerate, so the
+ // first real rectangle "scalable" in the x-direction.
+ //
+ // The same interpretation applies to the y-dimension.
+ //
+ // As we move left to right across the image, alternating patches will be "fixed" or
+ // "scalable" in the x-direction. Similarly, as move top to bottom, alternating
+ // patches will be "fixed" or "scalable" in the y-direction.
+ int xCount = origXCount;
+ int yCount = origYCount;
+ bool xIsScalable = (xCount > 0 && src.fLeft == xDivs[0]);
+ if (xIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // xDiv. It is always implied that we start at the edge of the bounds.
+ xDivs++;
+ xCount--;
+ }
+ bool yIsScalable = (yCount > 0 && src.fTop == yDivs[0]);
+ if (yIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // yDiv. It is always implied that we start at the edge of the bounds.
+ yDivs++;
+ yCount--;
+ }
+
+ // Count "scalable" and "fixed" pixels in each dimension.
+ int xCountScalable = count_scalable_pixels(xDivs, xCount, xIsScalable, src.fLeft, src.fRight);
+ int xCountFixed = src.width() - xCountScalable;
+ int yCountScalable = count_scalable_pixels(yDivs, yCount, yIsScalable, src.fTop, src.fBottom);
+ int yCountFixed = src.height() - yCountScalable;
+
+ fSrcX.reset(xCount + 2);
+ fDstX.reset(xCount + 2);
+ set_points(fDstX.begin(), fSrcX.begin(), xDivs, xCount, xCountFixed, xCountScalable,
+ src.fLeft, src.fRight, dst.fLeft, dst.fRight, xIsScalable);
+
+ fSrcY.reset(yCount + 2);
+ fDstY.reset(yCount + 2);
+ set_points(fDstY.begin(), fSrcY.begin(), yDivs, yCount, yCountFixed, yCountScalable,
+ src.fTop, src.fBottom, dst.fTop, dst.fBottom, yIsScalable);
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = (xCount + 1) * (yCount + 1);
+ fNumRectsToDraw = fNumRectsInLattice;
+
+ if (lattice.fRectTypes) {
+ fRectTypes.push_back_n(fNumRectsInLattice);
+ fColors.push_back_n(fNumRectsInLattice);
+
+ const SkCanvas::Lattice::RectType* flags = lattice.fRectTypes;
+ const SkColor* colors = lattice.fColors;
+
+ bool hasPadRow = (yCount != origYCount);
+ bool hasPadCol = (xCount != origXCount);
+ if (hasPadRow) {
+ // The first row of rects are all empty, skip the first row of flags.
+ flags += origXCount + 1;
+ colors += origXCount + 1;
+ }
+
+ int i = 0;
+ for (int y = 0; y < yCount + 1; y++) {
+ for (int x = 0; x < origXCount + 1; x++) {
+ if (0 == x && hasPadCol) {
+ // The first column of rects are all empty. Skip a rect.
+ flags++;
+ colors++;
+ continue;
+ }
+
+ fRectTypes[i] = *flags;
+ fColors[i] = SkCanvas::Lattice::kFixedColor == *flags ? *colors : 0;
+ flags++;
+ colors++;
+ i++;
+ }
+ }
+
+ for (int j = 0; j < fRectTypes.count(); j++) {
+ if (SkCanvas::Lattice::kTransparent == fRectTypes[j]) {
+ fNumRectsToDraw--;
+ }
+ }
+ }
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkIRect& center) {
+ return !center.isEmpty() && SkIRect::MakeWH(width, height).contains(center);
+}
+
+SkLatticeIter::SkLatticeIter(int w, int h, const SkIRect& c, const SkRect& dst) {
+ SkASSERT(SkIRect::MakeWH(w, h).contains(c));
+
+ fSrcX.reset(4);
+ fSrcY.reset(4);
+ fDstX.reset(4);
+ fDstY.reset(4);
+
+ fSrcX[0] = 0;
+ fSrcX[1] = SkIntToScalar(c.fLeft);
+ fSrcX[2] = SkIntToScalar(c.fRight);
+ fSrcX[3] = SkIntToScalar(w);
+
+ fSrcY[0] = 0;
+ fSrcY[1] = SkIntToScalar(c.fTop);
+ fSrcY[2] = SkIntToScalar(c.fBottom);
+ fSrcY[3] = SkIntToScalar(h);
+
+ fDstX[0] = dst.fLeft;
+ fDstX[1] = dst.fLeft + SkIntToScalar(c.fLeft);
+ fDstX[2] = dst.fRight - SkIntToScalar(w - c.fRight);
+ fDstX[3] = dst.fRight;
+
+ fDstY[0] = dst.fTop;
+ fDstY[1] = dst.fTop + SkIntToScalar(c.fTop);
+ fDstY[2] = dst.fBottom - SkIntToScalar(h - c.fBottom);
+ fDstY[3] = dst.fBottom;
+
+ if (fDstX[1] > fDstX[2]) {
+ fDstX[1] = fDstX[0] + (fDstX[3] - fDstX[0]) * c.fLeft / (w - c.width());
+ fDstX[2] = fDstX[1];
+ }
+
+ if (fDstY[1] > fDstY[2]) {
+ fDstY[1] = fDstY[0] + (fDstY[3] - fDstY[0]) * c.fTop / (h - c.height());
+ fDstY[2] = fDstY[1];
+ }
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = 9;
+ fNumRectsToDraw = 9;
+}
+
+bool SkLatticeIter::next(SkIRect* src, SkRect* dst, bool* isFixedColor, SkColor* fixedColor) {
+ int currRect = fCurrX + fCurrY * (fSrcX.count() - 1);
+ if (currRect == fNumRectsInLattice) {
+ return false;
+ }
+
+ const int x = fCurrX;
+ const int y = fCurrY;
+ SkASSERT(x >= 0 && x < fSrcX.count() - 1);
+ SkASSERT(y >= 0 && y < fSrcY.count() - 1);
+
+ if (fSrcX.count() - 1 == ++fCurrX) {
+ fCurrX = 0;
+ fCurrY += 1;
+ }
+
+ if (fRectTypes.count() > 0
+ && SkToBool(SkCanvas::Lattice::kTransparent == fRectTypes[currRect])) {
+ return this->next(src, dst, isFixedColor, fixedColor);
+ }
+
+ src->setLTRB(fSrcX[x], fSrcY[y], fSrcX[x + 1], fSrcY[y + 1]);
+ dst->setLTRB(fDstX[x], fDstY[y], fDstX[x + 1], fDstY[y + 1]);
+ if (isFixedColor && fixedColor) {
+ *isFixedColor = fRectTypes.count() > 0
+ && SkToBool(SkCanvas::Lattice::kFixedColor == fRectTypes[currRect]);
+ if (*isFixedColor) {
+ *fixedColor = fColors[currRect];
+ }
+ }
+ return true;
+}
+
+void SkLatticeIter::mapDstScaleTranslate(const SkMatrix& matrix) {
+ SkASSERT(matrix.isScaleTranslate());
+ SkScalar tx = matrix.getTranslateX();
+ SkScalar sx = matrix.getScaleX();
+ for (int i = 0; i < fDstX.count(); i++) {
+ fDstX[i] = fDstX[i] * sx + tx;
+ }
+
+ SkScalar ty = matrix.getTranslateY();
+ SkScalar sy = matrix.getScaleY();
+ for (int i = 0; i < fDstY.count(); i++) {
+ fDstY[i] = fDstY[i] * sy + ty;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.h b/gfx/skia/skia/src/core/SkLatticeIter.h
new file mode 100644
index 0000000000..16eef21b7a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLatticeIter_DEFINED
+#define SkLatticeIter_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkTArray.h"
+
+struct SkIRect;
+struct SkRect;
+
+/**
+ * Disect a lattice request into an sequence of src-rect / dst-rect pairs
+ */
+class SK_API SkLatticeIter {
+public:
+
+ static bool Valid(int imageWidth, int imageHeight, const SkCanvas::Lattice& lattice);
+
+ SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst);
+
+ static bool Valid(int imageWidth, int imageHeight, const SkIRect& center);
+
+ SkLatticeIter(int imageWidth, int imageHeight, const SkIRect& center, const SkRect& dst);
+
+ /**
+ * While it returns true, use src/dst to draw the image/bitmap. Optional parameters
+ * isFixedColor and fixedColor specify if the rectangle is filled with a fixed color.
+ * If (*isFixedColor) is true, then (*fixedColor) contains the rectangle color.
+ */
+ bool next(SkIRect* src, SkRect* dst, bool* isFixedColor = nullptr,
+ SkColor* fixedColor = nullptr);
+
+ /** Version of above that converts the integer src rect to a scalar rect. */
+ bool next(SkRect* src, SkRect* dst, bool* isFixedColor = nullptr,
+ SkColor* fixedColor = nullptr) {
+ SkIRect isrcR;
+ if (this->next(&isrcR, dst, isFixedColor, fixedColor)) {
+ *src = SkRect::Make(isrcR);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Apply a matrix to the dst points.
+ */
+ void mapDstScaleTranslate(const SkMatrix& matrix);
+
+ /**
+ * Returns the number of rects that will actually be drawn.
+ */
+ int numRectsToDraw() const {
+ return fNumRectsToDraw;
+ }
+
+private:
+ SkTArray<int> fSrcX;
+ SkTArray<int> fSrcY;
+ SkTArray<SkScalar> fDstX;
+ SkTArray<SkScalar> fDstY;
+ SkTArray<SkCanvas::Lattice::RectType> fRectTypes;
+ SkTArray<SkColor> fColors;
+
+ int fCurrX;
+ int fCurrY;
+ int fNumRectsInLattice;
+ int fNumRectsToDraw;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLeanWindows.h b/gfx/skia/skia/src/core/SkLeanWindows.h
new file mode 100644
index 0000000000..ec8dd58cdc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLeanWindows.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLeanWindows_DEFINED
+#define SkLeanWindows_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# define WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX
+# define NOMINMAX_WAS_LOCALLY_DEFINED
+# endif
+#
+# include <windows.h>
+#
+# ifdef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_LEAN_AND_MEAN
+# endif
+# ifdef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX
+# endif
+#endif
+
+#endif // SkLeanWindows_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLineClipper.cpp b/gfx/skia/skia/src/core/SkLineClipper.cpp
new file mode 100644
index 0000000000..6a71a03028
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.cpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTo.h"
+#include "src/core/SkLineClipper.h"
+
+#include <utility>
+
+template <typename T> T pin_unsorted(T value, T limit0, T limit1) {
+ if (limit1 < limit0) {
+ using std::swap;
+ swap(limit0, limit1);
+ }
+ // now the limits are sorted
+ SkASSERT(limit0 <= limit1);
+
+ if (value < limit0) {
+ value = limit0;
+ } else if (value > limit1) {
+ value = limit1;
+ }
+ return value;
+}
+
+// return X coordinate of intersection with horizontal line at Y
+static SkScalar sect_with_horizontal(const SkPoint src[2], SkScalar Y) {
+ SkScalar dy = src[1].fY - src[0].fY;
+ if (SkScalarNearlyZero(dy)) {
+ return SkScalarAve(src[0].fX, src[1].fX);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = X0 + ((double)Y - Y0) * (X1 - X0) / (Y1 - Y0);
+
+ // The computed X value might still exceed [X0..X1] due to quantum flux
+ // when the doubles were added and subtracted, so we have to pin the
+ // answer :(
+ return (float)pin_unsorted(result, X0, X1);
+ }
+}
+
+// return Y coordinate of intersection with vertical line at X
+static SkScalar sect_with_vertical(const SkPoint src[2], SkScalar X) {
+ SkScalar dx = src[1].fX - src[0].fX;
+ if (SkScalarNearlyZero(dx)) {
+ return SkScalarAve(src[0].fY, src[1].fY);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = Y0 + ((double)X - X0) * (Y1 - Y0) / (X1 - X0);
+ return (float)result;
+ }
+}
+
+static SkScalar sect_clamp_with_vertical(const SkPoint src[2], SkScalar x) {
+ SkScalar y = sect_with_vertical(src, x);
+ // Our caller expects y to be between src[0].fY and src[1].fY (unsorted), but due to the
+ // numerics of floats/doubles, we might have computed a value slightly outside of that,
+ // so we have to manually clamp afterwards.
+ // See skbug.com/7491
+ return pin_unsorted(y, src[0].fY, src[1].fY);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool nestedLT(SkScalar a, SkScalar b, SkScalar dim) {
+ return a <= b && (a < b || dim > 0);
+}
+
+// returns true if outer contains inner, even if inner is empty.
+// note: outer.contains(inner) always returns false if inner is empty.
+static inline bool containsNoEmptyCheck(const SkRect& outer,
+ const SkRect& inner) {
+ return outer.fLeft <= inner.fLeft && outer.fTop <= inner.fTop &&
+ outer.fRight >= inner.fRight && outer.fBottom >= inner.fBottom;
+}
+
+bool SkLineClipper::IntersectLine(const SkPoint src[2], const SkRect& clip,
+ SkPoint dst[2]) {
+ SkRect bounds;
+
+ bounds.set(src[0], src[1]);
+ if (containsNoEmptyCheck(clip, bounds)) {
+ if (src != dst) {
+ memcpy(dst, src, 2 * sizeof(SkPoint));
+ }
+ return true;
+ }
+ // check for no overlap, and only permit coincident edges if the line
+ // and the edge are colinear
+ if (nestedLT(bounds.fRight, clip.fLeft, bounds.width()) ||
+ nestedLT(clip.fRight, bounds.fLeft, bounds.width()) ||
+ nestedLT(bounds.fBottom, clip.fTop, bounds.height()) ||
+ nestedLT(clip.fBottom, bounds.fTop, bounds.height())) {
+ return false;
+ }
+
+ int index0, index1;
+
+ if (src[0].fY < src[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ SkPoint tmp[2];
+ memcpy(tmp, src, sizeof(tmp));
+
+ // now compute Y intersections
+ if (tmp[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(src, clip.fTop), clip.fTop);
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(src, clip.fBottom), clip.fBottom);
+ }
+
+ if (tmp[0].fX < tmp[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // check for quick-reject in X again, now that we may have been chopped
+ if ((tmp[index1].fX <= clip.fLeft || tmp[index0].fX >= clip.fRight)) {
+ // usually we will return false, but we don't if the line is vertical and coincident
+ // with the clip.
+ if (tmp[0].fX != tmp[1].fX || tmp[0].fX < clip.fLeft || tmp[0].fX > clip.fRight) {
+ return false;
+ }
+ }
+
+ if (tmp[index0].fX < clip.fLeft) {
+ tmp[index0].set(clip.fLeft, sect_with_vertical(src, clip.fLeft));
+ }
+ if (tmp[index1].fX > clip.fRight) {
+ tmp[index1].set(clip.fRight, sect_with_vertical(src, clip.fRight));
+ }
+#ifdef SK_DEBUG
+ bounds.set(tmp[0], tmp[1]);
+ SkASSERT(containsNoEmptyCheck(clip, bounds));
+#endif
+ memcpy(dst, tmp, sizeof(tmp));
+ return true;
+}
+
+#ifdef SK_DEBUG
+// return value between the two limits, where the limits are either ascending
+// or descending.
+static bool is_between_unsorted(SkScalar value,
+ SkScalar limit0, SkScalar limit1) {
+ if (limit0 < limit1) {
+ return limit0 <= value && value <= limit1;
+ } else {
+ return limit1 <= value && value <= limit0;
+ }
+}
+#endif
+
+int SkLineClipper::ClipLine(const SkPoint pts[], const SkRect& clip, SkPoint lines[],
+ bool canCullToTheRight) {
+ int index0, index1;
+
+ if (pts[0].fY < pts[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // Check if we're completely clipped out in Y (above or below
+
+ if (pts[index1].fY <= clip.fTop) { // we're above the clip
+ return 0;
+ }
+ if (pts[index0].fY >= clip.fBottom) { // we're below the clip
+ return 0;
+ }
+
+ // Chop in Y to produce a single segment, stored in tmp[0..1]
+
+ SkPoint tmp[2];
+ memcpy(tmp, pts, sizeof(tmp));
+
+ // now compute intersections
+ if (pts[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(pts, clip.fTop), clip.fTop);
+ SkASSERT(is_between_unsorted(tmp[index0].fX, pts[0].fX, pts[1].fX));
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(pts, clip.fBottom), clip.fBottom);
+ SkASSERT(is_between_unsorted(tmp[index1].fX, pts[0].fX, pts[1].fX));
+ }
+
+ // Chop it into 1..3 segments that are wholly within the clip in X.
+
+ // temp storage for up to 3 segments
+ SkPoint resultStorage[kMaxPoints];
+ SkPoint* result; // points to our results, either tmp or resultStorage
+ int lineCount = 1;
+ bool reverse;
+
+ if (pts[0].fX < pts[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ reverse = false;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ reverse = true;
+ }
+
+ if (tmp[index1].fX <= clip.fLeft) { // wholly to the left
+ tmp[0].fX = tmp[1].fX = clip.fLeft;
+ result = tmp;
+ reverse = false;
+ } else if (tmp[index0].fX >= clip.fRight) { // wholly to the right
+ if (canCullToTheRight) {
+ return 0;
+ }
+ tmp[0].fX = tmp[1].fX = clip.fRight;
+ result = tmp;
+ reverse = false;
+ } else {
+ result = resultStorage;
+ SkPoint* r = result;
+
+ if (tmp[index0].fX < clip.fLeft) {
+ r->set(clip.fLeft, tmp[index0].fY);
+ r += 1;
+ r->set(clip.fLeft, sect_clamp_with_vertical(tmp, clip.fLeft));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ } else {
+ *r = tmp[index0];
+ }
+ r += 1;
+
+ if (tmp[index1].fX > clip.fRight) {
+ r->set(clip.fRight, sect_clamp_with_vertical(tmp, clip.fRight));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ r += 1;
+ r->set(clip.fRight, tmp[index1].fY);
+ } else {
+ *r = tmp[index1];
+ }
+
+ lineCount = SkToInt(r - result);
+ }
+
+ // Now copy the results into the caller's lines[] parameter
+ if (reverse) {
+ // copy the pts in reverse order to maintain winding order
+ for (int i = 0; i <= lineCount; i++) {
+ lines[lineCount - i] = result[i];
+ }
+ } else {
+ memcpy(lines, result, (lineCount + 1) * sizeof(SkPoint));
+ }
+ return lineCount;
+}
diff --git a/gfx/skia/skia/src/core/SkLineClipper.h b/gfx/skia/skia/src/core/SkLineClipper.h
new file mode 100644
index 0000000000..5c2a7dc376
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLineClipper_DEFINED
+#define SkLineClipper_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkLineClipper {
+public:
+ enum {
+ kMaxPoints = 4,
+ kMaxClippedLineSegments = kMaxPoints - 1
+ };
+
+ /* Clip the line pts[0]...pts[1] against clip, ignoring segments that
+ lie completely above or below the clip. For portions to the left or
+ right, turn those into vertical line segments that are aligned to the
+ edge of the clip.
+
+ Return the number of line segments that result, and store the end-points
+ of those segments sequentially in lines as follows:
+ 1st segment: lines[0]..lines[1]
+ 2nd segment: lines[1]..lines[2]
+ 3rd segment: lines[2]..lines[3]
+ */
+ static int ClipLine(const SkPoint pts[2], const SkRect& clip,
+ SkPoint lines[kMaxPoints], bool canCullToTheRight);
+
+ /* Intersect the line segment against the rect. If there is a non-empty
+ resulting segment, return true and set dst[] to that segment. If not,
+ return false and ignore dst[].
+
+ ClipLine is specialized for scan-conversion, as it adds vertical
+ segments on the sides to show where the line extended beyond the
+ left or right sides. IntersectLine does not.
+ */
+ static bool IntersectLine(const SkPoint src[2], const SkRect& clip, SkPoint dst[2]);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
new file mode 100644
index 0000000000..bb6dc3f13f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "src/core/SkLocalMatrixImageFilter.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+
+sk_sp<SkImageFilter> SkLocalMatrixImageFilter::Make(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input) {
+ if (!input) {
+ return nullptr;
+ }
+ if (localM.isIdentity()) {
+ return input;
+ }
+ if (!as_IFB(input)->canHandleComplexCTM() && !localM.isScaleTranslate()) {
+ // Nothing we can do at this point
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkLocalMatrixImageFilter(localM, input));
+}
+
+SkLocalMatrixImageFilter::SkLocalMatrixImageFilter(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fLocalM(localM) {
+}
+
+sk_sp<SkFlattenable> SkLocalMatrixImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ return SkLocalMatrixImageFilter::Make(lm, common.getInput(0));
+}
+
+void SkLocalMatrixImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fLocalM);
+}
+
+sk_sp<SkSpecialImage> SkLocalMatrixImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ Context localCtx = ctx.withNewMapping(ctx.mapping().concatLocal(fLocalM));
+ return this->filterInput(0, localCtx, offset);
+}
+
+SkIRect SkLocalMatrixImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ return this->getInput(0)->filterBounds(src, SkMatrix::Concat(ctm, fLocalM), dir, inputRect);
+}
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
new file mode 100644
index 0000000000..f20773ca0a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixImageFilter_DEFINED
+#define SkLocalMatrixImageFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "src/core/SkImageFilter_Base.h"
+
+/**
+ * Wraps another imagefilter + matrix, such that using this filter will give the same result
+ * as using the wrapped filter with the matrix applied to its context.
+ */
+class SkLocalMatrixImageFilter : public SkImageFilter_Base {
+public:
+ static sk_sp<SkImageFilter> Make(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+ bool onCanHandleComplexCTM() const override { return true; }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLocalMatrixImageFilter)
+
+ SkLocalMatrixImageFilter(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+ SkMatrix fLocalM;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMD5.cpp b/gfx/skia/skia/src/core/SkMD5.cpp
new file mode 100644
index 0000000000..158bb63b74
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * The following code is based on the description in RFC 1321.
+ * http://www.ietf.org/rfc/rfc1321.txt
+ */
+
+//The following macros can be defined to affect the MD5 code generated.
+//SK_MD5_CLEAR_DATA causes all intermediate state to be overwritten with 0's.
+//SK_CPU_LENDIAN allows 32 bit <=> 8 bit conversions without copies (if alligned).
+//SK_CPU_FAST_UNALIGNED_ACCESS allows 32 bit <=> 8 bit conversions without copies if SK_CPU_LENDIAN.
+
+#include "src/core/SkMD5.h"
+#include <string.h>
+
+/** MD5 basic transformation. Transforms state based on block. */
+static void transform(uint32_t state[4], const uint8_t block[64]);
+
+/** Encodes input into output (4 little endian 32 bit values). */
+static void encode(uint8_t output[16], const uint32_t input[4]);
+
+/** Encodes input into output (little endian 64 bit value). */
+static void encode(uint8_t output[8], const uint64_t input);
+
+/** Decodes input (4 little endian 32 bit values) into storage, if required. */
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]);
+
+SkMD5::SkMD5() : byteCount(0) {
+ // These are magic numbers from the specification.
+ this->state[0] = 0x67452301;
+ this->state[1] = 0xefcdab89;
+ this->state[2] = 0x98badcfe;
+ this->state[3] = 0x10325476;
+}
+
+bool SkMD5::write(const void* buf, size_t inputLength) {
+ const uint8_t* input = reinterpret_cast<const uint8_t*>(buf);
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int bufferAvailable = 64 - bufferIndex;
+
+ unsigned int inputIndex;
+ if (inputLength >= bufferAvailable) {
+ if (bufferIndex) {
+ memcpy(&this->buffer[bufferIndex], input, bufferAvailable);
+ transform(this->state, this->buffer);
+ inputIndex = bufferAvailable;
+ } else {
+ inputIndex = 0;
+ }
+
+ for (; inputIndex + 63 < inputLength; inputIndex += 64) {
+ transform(this->state, &input[inputIndex]);
+ }
+
+ bufferIndex = 0;
+ } else {
+ inputIndex = 0;
+ }
+
+ memcpy(&this->buffer[bufferIndex], &input[inputIndex], inputLength - inputIndex);
+
+ this->byteCount += inputLength;
+ return true;
+}
+
+SkMD5::Digest SkMD5::finish() {
+ SkMD5::Digest digest;
+ // Get the number of bits before padding.
+ uint8_t bits[8];
+ encode(bits, this->byteCount << 3);
+
+ // Pad out to 56 mod 64.
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int paddingLength = (bufferIndex < 56) ? (56 - bufferIndex) : (120 - bufferIndex);
+ static uint8_t PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ (void)this->write(PADDING, paddingLength);
+
+ // Append length (length before padding, will cause final update).
+ (void)this->write(bits, 8);
+
+ // Write out digest.
+ encode(digest.data, this->state);
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear state.
+ memset(this, 0, sizeof(*this));
+#endif
+ return digest;
+}
+
+struct F { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ //return (x & y) | ((~x) & z);
+ return ((y ^ z) & x) ^ z; //equivelent but faster
+}};
+
+struct G { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return (x & z) | (y & (~z));
+ //return ((x ^ y) & z) ^ y; //equivelent but slower
+}};
+
+struct H { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return x ^ y ^ z;
+}};
+
+struct I { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return y ^ (x | (~z));
+}};
+
+/** Rotates x left n bits. */
+static inline uint32_t rotate_left(uint32_t x, uint8_t n) {
+ return (x << n) | (x >> (32 - n));
+}
+
+template <typename T>
+static inline void operation(T operation, uint32_t& a, uint32_t b, uint32_t c, uint32_t d,
+ uint32_t x, uint8_t s, uint32_t t) {
+ a = b + rotate_left(a + operation(b, c, d) + x + t, s);
+}
+
+static void transform(uint32_t state[4], const uint8_t block[64]) {
+ uint32_t a = state[0], b = state[1], c = state[2], d = state[3];
+
+ uint32_t storage[16];
+ const uint32_t* X = decode(storage, block);
+
+ // Round 1
+ operation(F(), a, b, c, d, X[ 0], 7, 0xd76aa478); // 1
+ operation(F(), d, a, b, c, X[ 1], 12, 0xe8c7b756); // 2
+ operation(F(), c, d, a, b, X[ 2], 17, 0x242070db); // 3
+ operation(F(), b, c, d, a, X[ 3], 22, 0xc1bdceee); // 4
+ operation(F(), a, b, c, d, X[ 4], 7, 0xf57c0faf); // 5
+ operation(F(), d, a, b, c, X[ 5], 12, 0x4787c62a); // 6
+ operation(F(), c, d, a, b, X[ 6], 17, 0xa8304613); // 7
+ operation(F(), b, c, d, a, X[ 7], 22, 0xfd469501); // 8
+ operation(F(), a, b, c, d, X[ 8], 7, 0x698098d8); // 9
+ operation(F(), d, a, b, c, X[ 9], 12, 0x8b44f7af); // 10
+ operation(F(), c, d, a, b, X[10], 17, 0xffff5bb1); // 11
+ operation(F(), b, c, d, a, X[11], 22, 0x895cd7be); // 12
+ operation(F(), a, b, c, d, X[12], 7, 0x6b901122); // 13
+ operation(F(), d, a, b, c, X[13], 12, 0xfd987193); // 14
+ operation(F(), c, d, a, b, X[14], 17, 0xa679438e); // 15
+ operation(F(), b, c, d, a, X[15], 22, 0x49b40821); // 16
+
+ // Round 2
+ operation(G(), a, b, c, d, X[ 1], 5, 0xf61e2562); // 17
+ operation(G(), d, a, b, c, X[ 6], 9, 0xc040b340); // 18
+ operation(G(), c, d, a, b, X[11], 14, 0x265e5a51); // 19
+ operation(G(), b, c, d, a, X[ 0], 20, 0xe9b6c7aa); // 20
+ operation(G(), a, b, c, d, X[ 5], 5, 0xd62f105d); // 21
+ operation(G(), d, a, b, c, X[10], 9, 0x2441453); // 22
+ operation(G(), c, d, a, b, X[15], 14, 0xd8a1e681); // 23
+ operation(G(), b, c, d, a, X[ 4], 20, 0xe7d3fbc8); // 24
+ operation(G(), a, b, c, d, X[ 9], 5, 0x21e1cde6); // 25
+ operation(G(), d, a, b, c, X[14], 9, 0xc33707d6); // 26
+ operation(G(), c, d, a, b, X[ 3], 14, 0xf4d50d87); // 27
+ operation(G(), b, c, d, a, X[ 8], 20, 0x455a14ed); // 28
+ operation(G(), a, b, c, d, X[13], 5, 0xa9e3e905); // 29
+ operation(G(), d, a, b, c, X[ 2], 9, 0xfcefa3f8); // 30
+ operation(G(), c, d, a, b, X[ 7], 14, 0x676f02d9); // 31
+ operation(G(), b, c, d, a, X[12], 20, 0x8d2a4c8a); // 32
+
+ // Round 3
+ operation(H(), a, b, c, d, X[ 5], 4, 0xfffa3942); // 33
+ operation(H(), d, a, b, c, X[ 8], 11, 0x8771f681); // 34
+ operation(H(), c, d, a, b, X[11], 16, 0x6d9d6122); // 35
+ operation(H(), b, c, d, a, X[14], 23, 0xfde5380c); // 36
+ operation(H(), a, b, c, d, X[ 1], 4, 0xa4beea44); // 37
+ operation(H(), d, a, b, c, X[ 4], 11, 0x4bdecfa9); // 38
+ operation(H(), c, d, a, b, X[ 7], 16, 0xf6bb4b60); // 39
+ operation(H(), b, c, d, a, X[10], 23, 0xbebfbc70); // 40
+ operation(H(), a, b, c, d, X[13], 4, 0x289b7ec6); // 41
+ operation(H(), d, a, b, c, X[ 0], 11, 0xeaa127fa); // 42
+ operation(H(), c, d, a, b, X[ 3], 16, 0xd4ef3085); // 43
+ operation(H(), b, c, d, a, X[ 6], 23, 0x4881d05); // 44
+ operation(H(), a, b, c, d, X[ 9], 4, 0xd9d4d039); // 45
+ operation(H(), d, a, b, c, X[12], 11, 0xe6db99e5); // 46
+ operation(H(), c, d, a, b, X[15], 16, 0x1fa27cf8); // 47
+ operation(H(), b, c, d, a, X[ 2], 23, 0xc4ac5665); // 48
+
+ // Round 4
+ operation(I(), a, b, c, d, X[ 0], 6, 0xf4292244); // 49
+ operation(I(), d, a, b, c, X[ 7], 10, 0x432aff97); // 50
+ operation(I(), c, d, a, b, X[14], 15, 0xab9423a7); // 51
+ operation(I(), b, c, d, a, X[ 5], 21, 0xfc93a039); // 52
+ operation(I(), a, b, c, d, X[12], 6, 0x655b59c3); // 53
+ operation(I(), d, a, b, c, X[ 3], 10, 0x8f0ccc92); // 54
+ operation(I(), c, d, a, b, X[10], 15, 0xffeff47d); // 55
+ operation(I(), b, c, d, a, X[ 1], 21, 0x85845dd1); // 56
+ operation(I(), a, b, c, d, X[ 8], 6, 0x6fa87e4f); // 57
+ operation(I(), d, a, b, c, X[15], 10, 0xfe2ce6e0); // 58
+ operation(I(), c, d, a, b, X[ 6], 15, 0xa3014314); // 59
+ operation(I(), b, c, d, a, X[13], 21, 0x4e0811a1); // 60
+ operation(I(), a, b, c, d, X[ 4], 6, 0xf7537e82); // 61
+ operation(I(), d, a, b, c, X[11], 10, 0xbd3af235); // 62
+ operation(I(), c, d, a, b, X[ 2], 15, 0x2ad7d2bb); // 63
+ operation(I(), b, c, d, a, X[ 9], 21, 0xeb86d391); // 64
+
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear sensitive information.
+ if (X == &storage) {
+ memset(storage, 0, sizeof(storage));
+ }
+#endif
+}
+
+static void encode(uint8_t output[16], const uint32_t input[4]) {
+ for (size_t i = 0, j = 0; i < 4; i++, j += 4) {
+ output[j ] = (uint8_t) (input[i] & 0xff);
+ output[j+1] = (uint8_t)((input[i] >> 8) & 0xff);
+ output[j+2] = (uint8_t)((input[i] >> 16) & 0xff);
+ output[j+3] = (uint8_t)((input[i] >> 24) & 0xff);
+ }
+}
+
+static void encode(uint8_t output[8], const uint64_t input) {
+ output[0] = (uint8_t) (input & 0xff);
+ output[1] = (uint8_t)((input >> 8) & 0xff);
+ output[2] = (uint8_t)((input >> 16) & 0xff);
+ output[3] = (uint8_t)((input >> 24) & 0xff);
+ output[4] = (uint8_t)((input >> 32) & 0xff);
+ output[5] = (uint8_t)((input >> 40) & 0xff);
+ output[6] = (uint8_t)((input >> 48) & 0xff);
+ output[7] = (uint8_t)((input >> 56) & 0xff);
+}
+
+static inline bool is_aligned(const void *pointer, size_t byte_count) {
+ return reinterpret_cast<uintptr_t>(pointer) % byte_count == 0;
+}
+
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]) {
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_FAST_UNALIGNED_ACCESS)
+ return reinterpret_cast<const uint32_t*>(input);
+#else
+#if defined(SK_CPU_LENDIAN)
+ if (is_aligned(input, 4)) {
+ return reinterpret_cast<const uint32_t*>(input);
+ }
+#endif
+ for (size_t i = 0, j = 0; j < 64; i++, j += 4) {
+ storage[i] = ((uint32_t)input[j ]) |
+ (((uint32_t)input[j+1]) << 8) |
+ (((uint32_t)input[j+2]) << 16) |
+ (((uint32_t)input[j+3]) << 24);
+ }
+ return storage;
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkMD5.h b/gfx/skia/skia/src/core/SkMD5.h
new file mode 100644
index 0000000000..2d04eae578
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMD5_DEFINED
+#define SkMD5_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/private/SkTo.h"
+
+/* Calculate a 128-bit MD5 message-digest of the bytes sent to this stream. */
+class SkMD5 : public SkWStream {
+public:
+ SkMD5();
+
+ /** Processes input, adding it to the digest.
+ Calling this after finish is undefined. */
+ bool write(const void* buffer, size_t size) final;
+
+ size_t bytesWritten() const final { return SkToSizeT(this->byteCount); }
+
+ struct Digest {
+ uint8_t data[16];
+ bool operator ==(Digest const& other) const {
+ return 0 == memcmp(data, other.data, sizeof(data));
+ }
+ bool operator !=(Digest const& other) const { return !(*this == other); }
+ };
+
+ /** Computes and returns the digest. */
+ Digest finish();
+
+private:
+ uint64_t byteCount; // number of bytes, modulo 2^64
+ uint32_t state[4]; // state (ABCD)
+ uint8_t buffer[64]; // input buffer
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMSAN.h b/gfx/skia/skia/src/core/SkMSAN.h
new file mode 100644
index 0000000000..acde144016
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMSAN.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMSAN_DEFINED
+#define SkMSAN_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <string.h>
+
+// Typically declared in LLVM's msan_interface.h. Easier for us to just re-declare.
+extern "C" {
+ void __msan_check_mem_is_initialized(const volatile void*, size_t);
+ void __msan_unpoison (const volatile void*, size_t);
+}
+
+// Code that requires initialized inputs can call this to make it clear that
+// the blame for use of uninitialized data belongs further up the call stack.
+static inline void sk_msan_assert_initialized(const void* begin, const void* end) {
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_check_mem_is_initialized(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+// Lie to MSAN that this range of memory is initialized.
+// This can hide serious problems if overused. Every use of this should refer to a bug.
+static inline void sk_msan_mark_initialized(const void* begin, const void* end, const char* skbug) {
+ SkASSERT(skbug && 0 != strcmp(skbug, ""));
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_unpoison(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+#endif//SkMSAN_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMakeUnique.h b/gfx/skia/skia/src/core/SkMakeUnique.h
new file mode 100644
index 0000000000..860ea2e8a7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMakeUnique.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMakeUnique_DEFINED
+#define SkMakeUnique_DEFINED
+
+#include <memory>
+
+namespace skstd {
+
+// std::make_unique is in C++14
+template<typename T, typename... Args>
+std::unique_ptr<T> make_unique(Args&&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+template<typename T>
+std::unique_ptr<T> make_unique_default(size_t n) {
+ return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]);
+}
+
+}
+
+#endif // SkMakeUnique_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMallocPixelRef.cpp b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
new file mode 100644
index 0000000000..a9e14780cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMallocPixelRef.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/SkMalloc.h"
+#include "src/core/SkSafeMath.h"
+
+void* sk_calloc_throw(size_t count, size_t elemSize) {
+ return sk_calloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_throw(size_t count, size_t elemSize) {
+ return sk_malloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize) {
+ return sk_realloc_throw(buffer, SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_canfail(size_t count, size_t elemSize) {
+ return sk_malloc_canfail(SkSafeMath::Mul(count, elemSize));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool is_valid(const SkImageInfo& info) {
+ if (info.width() < 0 || info.height() < 0 ||
+ (unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType ||
+ (unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType)
+ {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkPixelRef> SkMallocPixelRef::MakeAllocate(const SkImageInfo& info, size_t rowBytes) {
+ if (rowBytes == 0) {
+ rowBytes = info.minRowBytes();
+ // rowBytes can still be zero, if it overflowed (width * bytesPerPixel > size_t)
+ // or if colortype is unknown
+ }
+ if (!is_valid(info) || !info.validRowBytes(rowBytes)) {
+ return nullptr;
+ }
+ size_t size = 0;
+ if (!info.isEmpty() && rowBytes) {
+ size = info.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+ }
+ void* addr = sk_calloc_canfail(size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ struct PixelRef final : public SkPixelRef {
+ PixelRef(int w, int h, void* s, size_t r) : SkPixelRef(w, h, s, r) {}
+ ~PixelRef() override { sk_free(this->pixels()); }
+ };
+ return sk_sp<SkPixelRef>(new PixelRef(info.width(), info.height(), addr, rowBytes));
+}
+
+sk_sp<SkPixelRef> SkMallocPixelRef::MakeWithData(const SkImageInfo& info,
+ size_t rowBytes,
+ sk_sp<SkData> data) {
+ SkASSERT(data != nullptr);
+ if (!is_valid(info)) {
+ return nullptr;
+ }
+ // TODO: what should we return if computeByteSize returns 0?
+ // - the info was empty?
+ // - we overflowed computing the size?
+ if ((rowBytes < info.minRowBytes()) || (data->size() < info.computeByteSize(rowBytes))) {
+ return nullptr;
+ }
+ struct PixelRef final : public SkPixelRef {
+ sk_sp<SkData> fData;
+ PixelRef(int w, int h, void* s, size_t r, sk_sp<SkData> d)
+ : SkPixelRef(w, h, s, r), fData(std::move(d)) {}
+ };
+ void* pixels = const_cast<void*>(data->data());
+ sk_sp<SkPixelRef> pr(new PixelRef(info.width(), info.height(), pixels, rowBytes,
+ std::move(data)));
+ pr->setImmutable(); // since we were created with (immutable) data
+ return pr;
+}
diff --git a/gfx/skia/skia/src/core/SkMask.cpp b/gfx/skia/skia/src/core/SkMask.cpp
new file mode 100644
index 0000000000..76f093f982
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMask.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMask.h"
+
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkSafeMath.h"
+
+/** returns the product if it is positive and fits in 31 bits. Otherwise this
+ returns 0.
+ */
+static int32_t safeMul32(int32_t a, int32_t b) {
+ int64_t size = sk_64_mul(a, b);
+ if (size > 0 && SkTFitsIn<int32_t>(size)) {
+ return size;
+ }
+ return 0;
+}
+
+size_t SkMask::computeImageSize() const {
+ return safeMul32(fBounds.height(), fRowBytes);
+}
+
+size_t SkMask::computeTotalImageSize() const {
+ size_t size = this->computeImageSize();
+ if (fFormat == SkMask::k3D_Format) {
+ size = safeMul32(SkToS32(size), 3);
+ }
+ return size;
+}
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+uint8_t* SkMask::AllocImage(size_t size, AllocType at) {
+ size_t aligned_size = SkSafeMath::Align4(size);
+ unsigned flags = SK_MALLOC_THROW;
+ if (at == kZeroInit_Alloc) {
+ flags |= SK_MALLOC_ZERO_INITIALIZE;
+ }
+ return static_cast<uint8_t*>(sk_malloc_flags(aligned_size, flags));
+}
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+void SkMask::FreeImage(void* image) {
+ sk_free(image);
+}
+
+SkMask SkMask::PrepareDestination(int radiusX, int radiusY, const SkMask& src) {
+ SkSafeMath safe;
+
+ SkMask dst;
+ // dstW = srcW + 2 * radiusX;
+ size_t dstW = safe.add(src.fBounds.width(), safe.add(radiusX, radiusX));
+ // dstH = srcH + 2 * radiusY;
+ size_t dstH = safe.add(src.fBounds.height(), safe.add(radiusY, radiusY));
+
+ if (!SkTFitsIn<int>(dstW) || !SkTFitsIn<int>(dstH)) {
+ dst.fBounds.setEmpty();
+ dst.fRowBytes = 0;
+ } else {
+ dst.fBounds.setWH(SkTo<int>(dstW), SkTo<int>(dstH));
+ dst.fBounds.offset(src.fBounds.x(), src.fBounds.y());
+ dst.fBounds.offset(-radiusX, -radiusY);
+ dst.fRowBytes = SkTo<uint32_t>(dstW);
+ }
+
+ dst.fImage = nullptr;
+ dst.fFormat = SkMask::kA8_Format;
+
+ size_t toAlloc = safe.mul(dstW, dstH);
+
+ if (safe && src.fImage != nullptr) {
+ dst.fImage = SkMask::AllocImage(toAlloc);
+ }
+
+ return dst;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const int gMaskFormatToShift[] = {
+ ~0, // BW -- not supported
+ 0, // A8
+ 0, // 3D
+ 2, // ARGB32
+ 1, // LCD16
+ 0, // SDF
+};
+
+static int maskFormatToShift(SkMask::Format format) {
+ SkASSERT((unsigned)format < SK_ARRAY_COUNT(gMaskFormatToShift));
+ SkASSERT(SkMask::kBW_Format != format);
+ return gMaskFormatToShift[format];
+}
+
+void* SkMask::getAddr(int x, int y) const {
+ SkASSERT(kBW_Format != fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage);
+
+ char* addr = (char*)fImage;
+ addr += (y - fBounds.fTop) * fRowBytes;
+ addr += (x - fBounds.fLeft) << maskFormatToShift(fFormat);
+ return addr;
+}
diff --git a/gfx/skia/skia/src/core/SkMask.h b/gfx/skia/skia/src/core/SkMask.h
new file mode 100644
index 0000000000..29ae48e6ed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMask.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMask_DEFINED
+#define SkMask_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTemplates.h"
+
+#include <memory>
+
+/** \class SkMask
+ SkMask is used to describe alpha bitmaps, either 1bit, 8bit, or
+ the 3-channel 3D format. These are passed to SkMaskFilter objects.
+*/
+struct SkMask {
+ SkMask() : fImage(nullptr) {}
+
+ enum Format {
+ kBW_Format, //!< 1bit per pixel mask (e.g. monochrome)
+ kA8_Format, //!< 8bits per pixel mask (e.g. antialiasing)
+ k3D_Format, //!< 3 8bit per pixl planes: alpha, mul, add
+ kARGB32_Format, //!< SkPMColor
+ kLCD16_Format, //!< 565 alpha for r/g/b
+ kSDF_Format, //!< 8bits representing signed distance field
+ };
+
+ enum {
+ kCountMaskFormats = kSDF_Format + 1
+ };
+
+ uint8_t* fImage;
+ SkIRect fBounds;
+ uint32_t fRowBytes;
+ Format fFormat;
+
+ static bool IsValidFormat(uint8_t format) { return format < kCountMaskFormats; }
+
+ /** Returns true if the mask is empty: i.e. it has an empty bounds.
+ */
+ bool isEmpty() const { return fBounds.isEmpty(); }
+
+ /** Return the byte size of the mask, assuming only 1 plane.
+ Does not account for k3D_Format. For that, use computeTotalImageSize().
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeImageSize() const;
+
+ /** Return the byte size of the mask, taking into account
+ any extra planes (e.g. k3D_Format).
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeTotalImageSize() const;
+
+ /** Returns the address of the byte that holds the specified bit.
+ Asserts that the mask is kBW_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr1(int x, int y) const {
+ SkASSERT(kBW_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ return fImage + ((x - fBounds.fLeft) >> 3) + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /** Returns the address of the specified byte.
+ Asserts that the mask is kA8_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr8(int x, int y) const {
+ SkASSERT(kA8_Format == fFormat || kSDF_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ return fImage + x - fBounds.fLeft + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /**
+ * Return the address of the specified 16bit mask. In the debug build,
+ * this asserts that the mask's format is kLCD16_Format, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint16_t* getAddrLCD16(int x, int y) const {
+ SkASSERT(kLCD16_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ uint16_t* row = (uint16_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Return the address of the specified 32bit mask. In the debug build,
+ * this asserts that the mask's format is 32bits, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint32_t* getAddr32(int x, int y) const {
+ SkASSERT(kARGB32_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ uint32_t* row = (uint32_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Returns the address of the specified pixel, computing the pixel-size
+ * at runtime based on the mask format. This will be slightly slower than
+ * using one of the routines where the format is implied by the name
+ * e.g. getAddr8 or getAddr32.
+ *
+ * x,y must be contained by the mask's bounds (this is asserted in the
+ * debug build, but not checked in the release build.)
+ *
+ * This should not be called with kBW_Format, as it will give unspecified
+ * results (and assert in the debug build).
+ */
+ void* getAddr(int x, int y) const;
+
+ enum AllocType {
+ kUninit_Alloc,
+ kZeroInit_Alloc,
+ };
+ static uint8_t* AllocImage(size_t bytes, AllocType = kUninit_Alloc);
+ static void FreeImage(void* image);
+
+ enum CreateMode {
+ kJustComputeBounds_CreateMode, //!< compute bounds and return
+ kJustRenderImage_CreateMode, //!< render into preallocate mask
+ kComputeBoundsAndRenderImage_CreateMode //!< compute bounds, alloc image and render into it
+ };
+
+ /** Iterates over the coverage values along a scanline in a given SkMask::Format. Provides
+ * constructor, copy constructor for creating
+ * operator++, operator-- for iterating over the coverage values on a scanline
+ * operator>>= to add row bytes
+ * operator* to get the coverage value at the current location
+ * operator< to compare two iterators
+ */
+ template <Format F> struct AlphaIter;
+
+ /**
+ * Returns initial destination mask data padded by radiusX and radiusY
+ */
+ static SkMask PrepareDestination(int radiusX, int radiusY, const SkMask& src);
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kBW_Format> {
+ AlphaIter(const uint8_t* ptr, int offset) : fPtr(ptr), fOffset(7 - offset) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr), fOffset(that.fOffset) {}
+ AlphaIter& operator++() {
+ if (0 < fOffset ) {
+ --fOffset;
+ } else {
+ ++fPtr;
+ fOffset = 7;
+ }
+ return *this;
+ }
+ AlphaIter& operator--() {
+ if (fOffset < 7) {
+ ++fOffset;
+ } else {
+ --fPtr;
+ fOffset = 0;
+ }
+ return *this;
+ }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint8_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return ((*fPtr) >> fOffset) & 1 ? 0xFF : 0; }
+ bool operator<(const AlphaIter& that) const {
+ return fPtr < that.fPtr || (fPtr == that.fPtr && fOffset > that.fOffset);
+ }
+ const uint8_t* fPtr;
+ int fOffset;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kA8_Format> {
+ AlphaIter(const uint8_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint8_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return *fPtr; }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint8_t* fPtr;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kARGB32_Format> {
+ AlphaIter(const uint32_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint32_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return SkGetPackedA32(*fPtr); }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint32_t* fPtr;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kLCD16_Format> {
+ AlphaIter(const uint16_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint16_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const {
+ unsigned packed = *fPtr;
+ unsigned r = SkPacked16ToR32(packed);
+ unsigned g = SkPacked16ToG32(packed);
+ unsigned b = SkPacked16ToB32(packed);
+ return (r + g + b) / 3;
+ }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint16_t* fPtr;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * \using SkAutoMaskImage
+ *
+ * Stack class used to manage the fImage buffer in a SkMask.
+ * When this object loses scope, the buffer is freed with SkMask::FreeImage().
+ */
+using SkAutoMaskFreeImage = std::unique_ptr<uint8_t, SkFunctionWrapper<decltype(SkMask::FreeImage), SkMask::FreeImage>>;
+#define SkAutoMaskFreeImage(...) SK_REQUIRE_LOCAL_VAR(SkAutoMaskFreeImage)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp b/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp
new file mode 100644
index 0000000000..66cc1147db
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp
@@ -0,0 +1,1051 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskBlurFilter.h"
+
+#include "include/core/SkColorPriv.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkGaussFilter.h"
+
+#include <cmath>
+#include <climits>
+
+namespace {
+static const double kPi = 3.14159265358979323846264338327950288;
+
+class PlanGauss final {
+public:
+ explicit PlanGauss(double sigma) {
+ auto possibleWindow = static_cast<int>(floor(sigma * 3 * sqrt(2 * kPi) / 4 + 0.5));
+ auto window = std::max(1, possibleWindow);
+
+ fPass0Size = window - 1;
+ fPass1Size = window - 1;
+ fPass2Size = (window & 1) == 1 ? window - 1 : window;
+
+ // Calculating the border is tricky. I will go through the odd case which is simpler, and
+ // then through the even case. Given a stack of filters seven wide for the odd case of
+ // three passes.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest changed pixel is when the filters are in the following configuration.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The A pixel is calculated using the value S, the B uses A, and the C uses B, and
+ // finally D is C. So, with a window size of seven the border is nine. In general, the
+ // border is 3*((window - 1)/2).
+ //
+ // For even cases the filter stack is more complicated. The spec specifies two passes
+ // of even filters and a final pass of odd filters. A stack for a width of six looks like
+ // this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest pixel looks like this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // For a window of size, the border value is seven. In general the border is 3 *
+ // (window/2) -1.
+ fBorder = (window & 1) == 1 ? 3 * ((window - 1) / 2) : 3 * (window / 2) - 1;
+ fSlidingWindow = 2 * fBorder + 1;
+
+ // If the window is odd then the divisor is just window ^ 3 otherwise,
+ // it is window * window * (window + 1) = window ^ 2 + window ^ 3;
+ auto window2 = window * window;
+ auto window3 = window2 * window;
+ auto divisor = (window & 1) == 1 ? window3 : window3 + window2;
+
+ fWeight = static_cast<uint64_t>(round(1.0 / divisor * (1ull << 32)));
+ }
+
+ size_t bufferSize() const { return fPass0Size + fPass1Size + fPass2Size; }
+
+ int border() const { return fBorder; }
+
+public:
+ class Scan {
+ public:
+ Scan(uint64_t weight, int noChangeCount,
+ uint32_t* buffer0, uint32_t* buffer0End,
+ uint32_t* buffer1, uint32_t* buffer1End,
+ uint32_t* buffer2, uint32_t* buffer2End)
+ : fWeight{weight}
+ , fNoChangeCount{noChangeCount}
+ , fBuffer0{buffer0}
+ , fBuffer0End{buffer0End}
+ , fBuffer1{buffer1}
+ , fBuffer1End{buffer1End}
+ , fBuffer2{buffer2}
+ , fBuffer2End{buffer2End}
+ { }
+
+ template <typename AlphaIter> void blur(const AlphaIter srcBegin, const AlphaIter srcEnd,
+ uint8_t* dst, int dstStride, uint8_t* dstEnd) const {
+ auto buffer0Cursor = fBuffer0;
+ auto buffer1Cursor = fBuffer1;
+ auto buffer2Cursor = fBuffer2;
+
+ memset(fBuffer0, 0x00, (fBuffer2End - fBuffer0) * sizeof(*fBuffer0));
+
+ uint32_t sum0 = 0;
+ uint32_t sum1 = 0;
+ uint32_t sum2 = 0;
+
+ // Consume the source generating pixels.
+ for (AlphaIter src = srcBegin; src < srcEnd; ++src, dst += dstStride) {
+ uint32_t leadingEdge = *src;
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dst = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+ }
+
+ // The leading edge is off the right side of the mask.
+ for (int i = 0; i < fNoChangeCount; i++) {
+ uint32_t leadingEdge = 0;
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dst = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+
+ dst += dstStride;
+ }
+
+ // Starting from the right, fill in the rest of the buffer.
+ memset(fBuffer0, 0, (fBuffer2End - fBuffer0) * sizeof(*fBuffer0));
+
+ sum0 = sum1 = sum2 = 0;
+
+ uint8_t* dstCursor = dstEnd;
+ AlphaIter src = srcEnd;
+ while (dstCursor > dst) {
+ dstCursor -= dstStride;
+ uint32_t leadingEdge = *(--src);
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dstCursor = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+ }
+ }
+
+ private:
+ static constexpr uint64_t kHalf = static_cast<uint64_t>(1) << 31;
+
+ uint8_t finalScale(uint32_t sum) const {
+ return SkTo<uint8_t>((fWeight * sum + kHalf) >> 32);
+ }
+
+ uint64_t fWeight;
+ int fNoChangeCount;
+ uint32_t* fBuffer0;
+ uint32_t* fBuffer0End;
+ uint32_t* fBuffer1;
+ uint32_t* fBuffer1End;
+ uint32_t* fBuffer2;
+ uint32_t* fBuffer2End;
+ };
+
+ Scan makeBlurScan(int width, uint32_t* buffer) const {
+ uint32_t* buffer0, *buffer0End, *buffer1, *buffer1End, *buffer2, *buffer2End;
+ buffer0 = buffer;
+ buffer0End = buffer1 = buffer0 + fPass0Size;
+ buffer1End = buffer2 = buffer1 + fPass1Size;
+ buffer2End = buffer2 + fPass2Size;
+ int noChangeCount = fSlidingWindow > width ? fSlidingWindow - width : 0;
+
+ return Scan(
+ fWeight, noChangeCount,
+ buffer0, buffer0End,
+ buffer1, buffer1End,
+ buffer2, buffer2End);
+ }
+
+ uint64_t fWeight;
+ int fBorder;
+ int fSlidingWindow;
+ int fPass0Size;
+ int fPass1Size;
+ int fPass2Size;
+};
+
+} // namespace
+
+// NB 135 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+// using the Gauss filter. It also limits the size of buffers used hold intermediate values. The
+// additional + 1 added to window represents adding one more leading element before subtracting the
+// trailing element.
+// Explanation of maximums:
+// sum0 = (window + 1) * 255
+// sum1 = (window + 1) * sum0 -> (window + 1) * (window + 1) * 255
+// sum2 = (window + 1) * sum1 -> (window + 1) * (window + 1) * (window + 1) * 255 -> window^3 * 255
+//
+// The value (window + 1)^3 * 255 must fit in a uint32_t. So,
+// (window + 1)^3 * 255 < 2^32. window = 255.
+//
+// window = floor(sigma * 3 * sqrt(2 * kPi) / 4)
+// For window <= 255, the largest value for sigma is 135.
+SkMaskBlurFilter::SkMaskBlurFilter(double sigmaW, double sigmaH)
+ : fSigmaW{SkTPin(sigmaW, 0.0, 135.0)}
+ , fSigmaH{SkTPin(sigmaH, 0.0, 135.0)}
+{
+ SkASSERT(sigmaW >= 0);
+ SkASSERT(sigmaH >= 0);
+}
+
+bool SkMaskBlurFilter::hasNoBlur() const {
+ return (3 * fSigmaW <= 1) && (3 * fSigmaH <= 1);
+}
+
+// We favor A8 masks, and if we need to work with another format, we'll convert to A8 first.
+// Each of these converts width (up to 8) mask values to A8.
+static void bw_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+
+ uint8_t masks = *from;
+ for (int i = 0; i < width; ++i) {
+ a8[i] = (masks >> (7 - i)) & 1 ? 0xFF
+ : 0x00;
+ }
+}
+static void lcd_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+
+ for (int i = 0; i < width; ++i) {
+ unsigned rgb = reinterpret_cast<const uint16_t*>(from)[i],
+ r = SkPacked16ToR32(rgb),
+ g = SkPacked16ToG32(rgb),
+ b = SkPacked16ToB32(rgb);
+ a8[i] = (r + g + b) / 3;
+ }
+}
+static void argb32_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+ for (int i = 0; i < width; ++i) {
+ uint32_t rgba = reinterpret_cast<const uint32_t*>(from)[i];
+ a8[i] = SkGetPackedA32(rgba);
+ }
+}
+using ToA8 = decltype(bw_to_a8);
+
+static Sk8h load(const uint8_t* from, int width, ToA8* toA8) {
+ // Our fast path is a full 8-byte load of A8.
+ // So we'll conditionally handle the two slow paths using tmp:
+ // - if we have a function to convert another mask to A8, use it;
+ // - if not but we have less than 8 bytes to load, load them one at a time.
+ uint8_t tmp[8] = {0,0,0,0, 0,0,0,0};
+ if (toA8) {
+ toA8(tmp, from, width);
+ from = tmp;
+ } else if (width < 8) {
+ for (int i = 0; i < width; ++i) {
+ tmp[i] = from[i];
+ }
+ from = tmp;
+ }
+
+ // Load A8 and convert to 8.8 fixed-point.
+ return SkNx_cast<uint16_t>(Sk8b::Load(from)) << 8;
+}
+
+static void store(uint8_t* to, const Sk8h& v, int width) {
+ Sk8b b = SkNx_cast<uint8_t>(v >> 8);
+ if (width == 8) {
+ b.store(to);
+ } else {
+ uint8_t buffer[8];
+ b.store(buffer);
+ for (int i = 0; i < width; i++) {
+ to[i] = buffer[i];
+ }
+ }
+};
+
+static constexpr uint16_t _____ = 0u;
+static constexpr uint16_t kHalf = 0x80u;
+
+// In all the blur_x_radius_N and blur_y_radius_N functions the gaussian values are encoded
+// in 0.16 format, none of the values is greater than one. The incoming mask values are in 8.8
+// format. The resulting multiply has a 8.24 format, by the mulhi truncates the lower 16 bits
+// resulting in a 8.8 format.
+//
+// The blur_x_radius_N function below blur along a row of pixels using a kernel with radius N. This
+// system is setup to minimize the number of multiplies needed.
+//
+// Explanation:
+// Blurring a specific mask value is given by the following equation where D_n is the resulting
+// mask value and S_n is the source value. The example below is for a filter with a radius of 1
+// and a width of 3 (radius == (width-1)/2). The indexes for the source and destination are
+// aligned. The filter is given by G_n where n is the symmetric filter value.
+//
+// D[n] = S[n-1]*G[1] + S[n]*G[0] + S[n+1]*G[1].
+//
+// We can start the source index at an offset relative to the destination separated by the
+// radius. This results in a non-traditional restating of the above filter.
+//
+// D[n] = S[n]*G[1] + S[n+1]*G[0] + S[n+2]*G[1]
+//
+// If we look at three specific consecutive destinations the following equations result:
+//
+// D[5] = S[5]*G[1] + S[6]*G[0] + S[7]*G[1]
+// D[7] = S[6]*G[1] + S[7]*G[0] + S[8]*G[1]
+// D[8] = S[7]*G[1] + S[8]*G[0] + S[9]*G[1].
+//
+// In the above equations, notice that S[7] is used in all three. In particular, two values are
+// used: S[7]*G[0] and S[7]*G[1]. So, S[7] is only multiplied twice, but used in D[5], D[6] and
+// D[7].
+//
+// From the point of view of a source value we end up with the following three equations.
+//
+// Given S[7]:
+// D[5] += S[7]*G[1]
+// D[6] += S[7]*G[0]
+// D[7] += S[7]*G[1]
+//
+// In General:
+// D[n] += S[n]*G[1]
+// D[n+1] += S[n]*G[0]
+// D[n+2] += S[n]*G[1]
+//
+// Now these equations can be ganged using SIMD to form:
+// D[n..n+7] += S[n..n+7]*G[1]
+// D[n+1..n+8] += S[n..n+7]*G[0]
+// D[n+2..n+9] += S[n..n+7]*G[1]
+// The next set of values becomes.
+// D[n+8..n+15] += S[n+8..n+15]*G[1]
+// D[n+9..n+16] += S[n+8..n+15]*G[0]
+// D[n+10..n+17] += S[n+8..n+15]*G[1]
+// You can see that the D[n+8] and D[n+9] values overlap the two sets, using parts of both
+// S[n..7] and S[n+8..n+15].
+//
+// Just one more transformation allows the code to maintain all working values in
+// registers. I introduce the notation {0, S[n..n+7] * G[k]} to mean that the value where 0 is
+// prepended to the array of values to form {0, S[n] * G[k], ..., S[n+7]*G[k]}.
+//
+// D[n..n+7] += S[n..n+7] * G[1]
+// D[n..n+8] += {0, S[n..n+7] * G[0]}
+// D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+//
+// Now we can encode D[n..n+7] in a single Sk8h register called d0, and D[n+8..n+15] in a
+// register d8. In addition, S[0..n+7] becomes s0.
+//
+// The translation of the {0, S[n..n+7] * G[k]} is translated in the following way below.
+//
+// Sk8h v0 = s0*G[0]
+// Sk8h v1 = s0*G[1]
+// /* D[n..n+7] += S[n..n+7] * G[1] */
+// d0 += v1;
+// /* D[n..n+8] += {0, S[n..n+7] * G[0]} */
+// d0 += {_____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5], v0[6]}
+// d1 += {v0[7], _____, _____, _____, _____, _____, _____, _____}
+// /* D[n..n+9] += {0, 0, S[n..n+7] * G[1]} */
+// d0 += {_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]}
+// d1 += {v1[6], v1[7], _____, _____, _____, _____, _____, _____}
+// Where we rely on the compiler to generate efficient code for the {____, n, ....} notation.
+
+static void blur_x_radius_1(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h&, const Sk8h&, const Sk8h&,
+ Sk8h* d0, Sk8h* d8) {
+
+ auto v1 = s0.mulHi(g1);
+ auto v0 = s0.mulHi(g0);
+
+ // D[n..n+7] += S[n..n+7] * G[1]
+ *d0 += v1;
+
+ //D[n..n+8] += {0, S[n..n+7] * G[0]}
+ *d0 += Sk8h{_____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5], v0[6]};
+ *d8 += Sk8h{v0[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]};
+ *d8 += Sk8h{v1[6], v1[7], _____, _____, _____, _____, _____, _____};
+
+}
+
+static void blur_x_radius_2(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h&, const Sk8h&,
+ Sk8h* d0, Sk8h* d8) {
+ auto v0 = s0.mulHi(g0);
+ auto v1 = s0.mulHi(g1);
+ auto v2 = s0.mulHi(g2);
+
+ // D[n..n+7] += S[n..n+7] * G[2]
+ *d0 += v2;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5], v1[6]};
+ *d8 += Sk8h{v1[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[0]}
+ *d0 += Sk8h{_____, _____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5]};
+ *d8 += Sk8h{v0[6], v0[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, _____, v1[0], v1[1], v1[2], v1[3], v1[4]};
+ *d8 += Sk8h{v1[5], v1[6], v1[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += Sk8h{_____, _____, _____, _____, v2[0], v2[1], v2[2], v2[3]};
+ *d8 += Sk8h{v2[4], v2[5], v2[6], v2[7], _____, _____, _____, _____};
+}
+
+static void blur_x_radius_3(
+ const Sk8h& s0,
+ const Sk8h& gauss0, const Sk8h& gauss1, const Sk8h& gauss2, const Sk8h& gauss3, const Sk8h&,
+ Sk8h* d0, Sk8h* d8) {
+ auto v0 = s0.mulHi(gauss0);
+ auto v1 = s0.mulHi(gauss1);
+ auto v2 = s0.mulHi(gauss2);
+ auto v3 = s0.mulHi(gauss3);
+
+ // D[n..n+7] += S[n..n+7] * G[3]
+ *d0 += v3;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[2]}
+ *d0 += Sk8h{_____, v2[0], v2[1], v2[2], v2[3], v2[4], v2[5], v2[6]};
+ *d8 += Sk8h{v2[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]};
+ *d8 += Sk8h{v1[6], v1[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[0]}
+ *d0 += Sk8h{_____, _____, _____, v0[0], v0[1], v0[2], v0[3], v0[4]};
+ *d8 += Sk8h{v0[5], v0[6], v0[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, _____, _____, v1[0], v1[1], v1[2], v1[3]};
+ *d8 += Sk8h{v1[4], v1[5], v1[6], v1[7], _____, _____, _____, _____};
+
+ // D[n..n+12] += {0, 0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += Sk8h{_____, _____, _____, _____, _____, v2[0], v2[1], v2[2]};
+ *d8 += Sk8h{v2[3], v2[4], v2[5], v2[6], v2[7], _____, _____, _____};
+
+ // D[n..n+13] += {0, 0, 0, 0, 0, 0, S[n..n+7] * G[3]}
+ *d0 += Sk8h{_____, _____, _____, _____, _____, _____, v3[0], v3[1]};
+ *d8 += Sk8h{v3[2], v3[3], v3[4], v3[5], v3[6], v3[7], _____, _____};
+}
+
+static void blur_x_radius_4(
+ const Sk8h& s0,
+ const Sk8h& gauss0,
+ const Sk8h& gauss1,
+ const Sk8h& gauss2,
+ const Sk8h& gauss3,
+ const Sk8h& gauss4,
+ Sk8h* d0, Sk8h* d8) {
+ auto v0 = s0.mulHi(gauss0);
+ auto v1 = s0.mulHi(gauss1);
+ auto v2 = s0.mulHi(gauss2);
+ auto v3 = s0.mulHi(gauss3);
+ auto v4 = s0.mulHi(gauss4);
+
+ // D[n..n+7] += S[n..n+7] * G[4]
+ *d0 += v4;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[3]}
+ *d0 += Sk8h{_____, v3[0], v3[1], v3[2], v3[3], v3[4], v3[5], v3[6]};
+ *d8 += Sk8h{v3[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[2]}
+ *d0 += Sk8h{_____, _____, v2[0], v2[1], v2[2], v2[3], v2[4], v2[5]};
+ *d8 += Sk8h{v2[6], v2[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, _____, v1[0], v1[1], v1[2], v1[3], v1[4]};
+ *d8 += Sk8h{v1[5], v1[6], v1[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[0]}
+ *d0 += Sk8h{_____, _____, _____, _____, v0[0], v0[1], v0[2], v0[3]};
+ *d8 += Sk8h{v0[4], v0[5], v0[6], v0[7], _____, _____, _____, _____};
+
+ // D[n..n+12] += {0, 0, 0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += Sk8h{_____, _____, _____, _____, _____, v1[0], v1[1], v1[2]};
+ *d8 += Sk8h{v1[3], v1[4], v1[5], v1[6], v1[7], _____, _____, _____};
+
+ // D[n..n+13] += {0, 0, 0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += Sk8h{_____, _____, _____, _____, _____, _____, v2[0], v2[1]};
+ *d8 += Sk8h{v2[2], v2[3], v2[4], v2[5], v2[6], v2[7], _____, _____};
+
+ // D[n..n+14] += {0, 0, 0, 0, 0, 0, 0, S[n..n+7] * G[3]}
+ *d0 += Sk8h{_____, _____, _____, _____, _____, _____, _____, v3[0]};
+ *d8 += Sk8h{v3[1], v3[2], v3[3], v3[4], v3[5], v3[6], v3[7], _____};
+
+ // D[n..n+15] += {0, 0, 0, 0, 0, 0, 0, 0, S[n..n+7] * G[4]}
+ *d8 += v4;
+}
+
+using BlurX = decltype(blur_x_radius_1);
+
+// BlurX will only be one of the functions blur_x_radius_(1|2|3|4).
+static void blur_row(
+ BlurX blur,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h& g3, const Sk8h& g4,
+ const uint8_t* src, int srcW,
+ uint8_t* dst, int dstW) {
+ // Clear the buffer to handle summing wider than source.
+ Sk8h d0{kHalf}, d8{kHalf};
+
+ // Go by multiples of 8 in src.
+ int x = 0;
+ for (; x <= srcW - 8; x += 8) {
+ blur(load(src, 8, nullptr), g0, g1, g2, g3, g4, &d0, &d8);
+
+ store(dst, d0, 8);
+
+ d0 = d8;
+ d8 = Sk8h{kHalf};
+
+ src += 8;
+ dst += 8;
+ }
+
+ // There are src values left, but the remainder of src values is not a multiple of 8.
+ int srcTail = srcW - x;
+ if (srcTail > 0) {
+
+ blur(load(src, srcTail, nullptr), g0, g1, g2, g3, g4, &d0, &d8);
+
+ int dstTail = std::min(8, dstW - x);
+ store(dst, d0, dstTail);
+
+ d0 = d8;
+ dst += dstTail;
+ x += dstTail;
+ }
+
+ // There are dst mask values to complete.
+ int dstTail = dstW - x;
+ if (dstTail > 0) {
+ store(dst, d0, dstTail);
+ }
+}
+
+// BlurX will only be one of the functions blur_x_radius_(1|2|3|4).
+static void blur_x_rect(BlurX blur,
+ uint16_t* gauss,
+ const uint8_t* src, size_t srcStride, int srcW,
+ uint8_t* dst, size_t dstStride, int dstW, int dstH) {
+
+ Sk8h g0{gauss[0]},
+ g1{gauss[1]},
+ g2{gauss[2]},
+ g3{gauss[3]},
+ g4{gauss[4]};
+
+ // Blur *ALL* the rows.
+ for (int y = 0; y < dstH; y++) {
+ blur_row(blur, g0, g1, g2, g3, g4, src, srcW, dst, dstW);
+ src += srcStride;
+ dst += dstStride;
+ }
+}
+
+static void direct_blur_x(int radius, uint16_t* gauss,
+ const uint8_t* src, size_t srcStride, int srcW,
+ uint8_t* dst, size_t dstStride, int dstW, int dstH) {
+
+ switch (radius) {
+ case 1:
+ blur_x_rect(blur_x_radius_1, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 2:
+ blur_x_rect(blur_x_radius_2, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 3:
+ blur_x_rect(blur_x_radius_3, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 4:
+ blur_x_rect(blur_x_radius_4, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ default:
+ SkASSERTF(false, "The radius %d is not handled\n", radius);
+ }
+}
+
+// The operations of the blur_y_radius_N functions work on a theme similar to the blur_x_radius_N
+// functions, but end up being simpler because there is no complicated shift of registers. We
+// start with the non-traditional form of the gaussian filter. In the following r is the value
+// when added generates the next value in the column.
+//
+// D[n+0r] = S[n+0r]*G[1]
+// + S[n+1r]*G[0]
+// + S[n+2r]*G[1]
+//
+// Expanding out in a way similar to blur_x_radius_N for specific values of n.
+//
+// D[n+0r] = S[n-2r]*G[1] + S[n-1r]*G[0] + S[n+0r]*G[1]
+// D[n+1r] = S[n-1r]*G[1] + S[n+0r]*G[0] + S[n+1r]*G[1]
+// D[n+2r] = S[n+0r]*G[1] + S[n+1r]*G[0] + S[n+2r]*G[1]
+//
+// We can see that S[n+0r] is in all three D[] equations, but is only multiplied twice. Now we
+// can look at the calculation form the point of view of a source value.
+//
+// Given S[n+0r]:
+// D[n+0r] += S[n+0r]*G[1];
+// /* D[n+0r] is done and can be stored now. */
+// D[n+1r] += S[n+0r]*G[0];
+// D[n+2r] = S[n+0r]*G[1];
+//
+// Remember, by induction, that D[n+0r] == S[n-2r]*G[1] + S[n-1r]*G[0] before adding in
+// S[n+0r]*G[1]. So, after the addition D[n+0r] has finished calculation and can be stored. Also,
+// notice that D[n+2r] is receiving its first value from S[n+0r]*G[1] and is not added in. Notice
+// how values flow in the following two iterations in source.
+//
+// D[n+0r] += S[n+0r]*G[1]
+// D[n+1r] += S[n+0r]*G[0]
+// D[n+2r] = S[n+0r]*G[1]
+// /* ------- */
+// D[n+1r] += S[n+1r]*G[1]
+// D[n+2r] += S[n+1r]*G[0]
+// D[n+3r] = S[n+1r]*G[1]
+//
+// Instead of using memory we can introduce temporaries d01 and d12. The update step changes
+// to the following.
+//
+// answer = d01 + S[n+0r]*G[1]
+// d01 = d12 + S[n+0r]*G[0]
+// d12 = S[n+0r]*G[1]
+// return answer
+//
+// Finally, this can be ganged into SIMD style.
+// answer[0..7] = d01[0..7] + S[n+0r..n+0r+7]*G[1]
+// d01[0..7] = d12[0..7] + S[n+0r..n+0r+7]*G[0]
+// d12[0..7] = S[n+0r..n+0r+7]*G[1]
+// return answer[0..7]
+static Sk8h blur_y_radius_1(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h&, const Sk8h&, const Sk8h&,
+ Sk8h* d01, Sk8h* d12, Sk8h*, Sk8h*, Sk8h*, Sk8h*, Sk8h*, Sk8h*) {
+ auto v0 = s0.mulHi(g0);
+ auto v1 = s0.mulHi(g1);
+
+ Sk8h answer = *d01 + v1;
+ *d01 = *d12 + v0;
+ *d12 = v1 + kHalf;
+
+ return answer;
+}
+
+static Sk8h blur_y_radius_2(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h&, const Sk8h&,
+ Sk8h* d01, Sk8h* d12, Sk8h* d23, Sk8h* d34, Sk8h*, Sk8h*, Sk8h*, Sk8h*) {
+ auto v0 = s0.mulHi(g0);
+ auto v1 = s0.mulHi(g1);
+ auto v2 = s0.mulHi(g2);
+
+ Sk8h answer = *d01 + v2;
+ *d01 = *d12 + v1;
+ *d12 = *d23 + v0;
+ *d23 = *d34 + v1;
+ *d34 = v2 + kHalf;
+
+ return answer;
+}
+
+static Sk8h blur_y_radius_3(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h& g3, const Sk8h&,
+ Sk8h* d01, Sk8h* d12, Sk8h* d23, Sk8h* d34, Sk8h* d45, Sk8h* d56, Sk8h*, Sk8h*) {
+ auto v0 = s0.mulHi(g0);
+ auto v1 = s0.mulHi(g1);
+ auto v2 = s0.mulHi(g2);
+ auto v3 = s0.mulHi(g3);
+
+ Sk8h answer = *d01 + v3;
+ *d01 = *d12 + v2;
+ *d12 = *d23 + v1;
+ *d23 = *d34 + v0;
+ *d34 = *d45 + v1;
+ *d45 = *d56 + v2;
+ *d56 = v3 + kHalf;
+
+ return answer;
+}
+
+static Sk8h blur_y_radius_4(
+ const Sk8h& s0,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h& g3, const Sk8h& g4,
+ Sk8h* d01, Sk8h* d12, Sk8h* d23, Sk8h* d34, Sk8h* d45, Sk8h* d56, Sk8h* d67, Sk8h* d78) {
+ auto v0 = s0.mulHi(g0);
+ auto v1 = s0.mulHi(g1);
+ auto v2 = s0.mulHi(g2);
+ auto v3 = s0.mulHi(g3);
+ auto v4 = s0.mulHi(g4);
+
+ Sk8h answer = *d01 + v4;
+ *d01 = *d12 + v3;
+ *d12 = *d23 + v2;
+ *d23 = *d34 + v1;
+ *d34 = *d45 + v0;
+ *d45 = *d56 + v1;
+ *d56 = *d67 + v2;
+ *d67 = *d78 + v3;
+ *d78 = v4 + kHalf;
+
+ return answer;
+}
+
+using BlurY = decltype(blur_y_radius_1);
+
+// BlurY will be one of blur_y_radius_(1|2|3|4).
+static void blur_column(
+ ToA8 toA8,
+ BlurY blur, int radius, int width,
+ const Sk8h& g0, const Sk8h& g1, const Sk8h& g2, const Sk8h& g3, const Sk8h& g4,
+ const uint8_t* src, size_t srcRB, int srcH,
+ uint8_t* dst, size_t dstRB) {
+ Sk8h d01{kHalf}, d12{kHalf}, d23{kHalf}, d34{kHalf},
+ d45{kHalf}, d56{kHalf}, d67{kHalf}, d78{kHalf};
+
+ auto flush = [&](uint8_t* to, const Sk8h& v0, const Sk8h& v1) {
+ store(to, v0, width);
+ to += dstRB;
+ store(to, v1, width);
+ return to + dstRB;
+ };
+
+ for (int y = 0; y < srcH; y += 1) {
+ auto s = load(src, width, toA8);
+ auto b = blur(s,
+ g0, g1, g2, g3, g4,
+ &d01, &d12, &d23, &d34, &d45, &d56, &d67, &d78);
+ store(dst, b, width);
+ src += srcRB;
+ dst += dstRB;
+ }
+
+ if (radius >= 1) {
+ dst = flush(dst, d01, d12);
+ }
+ if (radius >= 2) {
+ dst = flush(dst, d23, d34);
+ }
+ if (radius >= 3) {
+ dst = flush(dst, d45, d56);
+ }
+ if (radius >= 4) {
+ flush(dst, d67, d78);
+ }
+}
+
+// BlurY will be one of blur_y_radius_(1|2|3|4).
+static void blur_y_rect(ToA8 toA8, const int strideOf8,
+ BlurY blur, int radius, uint16_t *gauss,
+ const uint8_t *src, size_t srcRB, int srcW, int srcH,
+ uint8_t *dst, size_t dstRB) {
+
+ Sk8h g0{gauss[0]},
+ g1{gauss[1]},
+ g2{gauss[2]},
+ g3{gauss[3]},
+ g4{gauss[4]};
+
+ int x = 0;
+ for (; x <= srcW - 8; x += 8) {
+ blur_column(toA8, blur, radius, 8,
+ g0, g1, g2, g3, g4,
+ src, srcRB, srcH,
+ dst, dstRB);
+ src += strideOf8;
+ dst += 8;
+ }
+
+ int xTail = srcW - x;
+ if (xTail > 0) {
+ blur_column(toA8, blur, radius, xTail,
+ g0, g1, g2, g3, g4,
+ src, srcRB, srcH,
+ dst, dstRB);
+ }
+}
+
+static void direct_blur_y(ToA8 toA8, const int strideOf8,
+ int radius, uint16_t* gauss,
+ const uint8_t* src, size_t srcRB, int srcW, int srcH,
+ uint8_t* dst, size_t dstRB) {
+
+ switch (radius) {
+ case 1:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_1, 1, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 2:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_2, 2, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 3:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_3, 3, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 4:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_4, 4, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ default:
+ SkASSERTF(false, "The radius %d is not handled\n", radius);
+ }
+}
+
+static SkIPoint small_blur(double sigmaX, double sigmaY, const SkMask& src, SkMask* dst) {
+ SkASSERT(sigmaX == sigmaY); // TODO
+ SkASSERT(0.01 <= sigmaX && sigmaX < 2);
+ SkASSERT(0.01 <= sigmaY && sigmaY < 2);
+
+ SkGaussFilter filterX{sigmaX},
+ filterY{sigmaY};
+
+ int radiusX = filterX.radius(),
+ radiusY = filterY.radius();
+
+ SkASSERT(radiusX <= 4 && radiusY <= 4);
+
+ auto prepareGauss = [](const SkGaussFilter& filter, uint16_t* factors) {
+ int i = 0;
+ for (double d : filter) {
+ factors[i++] = static_cast<uint16_t>(round(d * (1 << 16)));
+ }
+ };
+
+ uint16_t gaussFactorsX[SkGaussFilter::kGaussArrayMax],
+ gaussFactorsY[SkGaussFilter::kGaussArrayMax];
+
+ prepareGauss(filterX, gaussFactorsX);
+ prepareGauss(filterY, gaussFactorsY);
+
+ *dst = SkMask::PrepareDestination(radiusX, radiusY, src);
+ if (src.fImage == nullptr) {
+ return {SkTo<int32_t>(radiusX), SkTo<int32_t>(radiusY)};
+ }
+ if (dst->fImage == nullptr) {
+ dst->fBounds.setEmpty();
+ return {0, 0};
+ }
+
+ int srcW = src.fBounds.width(),
+ srcH = src.fBounds.height();
+
+ int dstW = dst->fBounds.width(),
+ dstH = dst->fBounds.height();
+
+ size_t srcRB = src.fRowBytes,
+ dstRB = dst->fRowBytes;
+
+ //TODO: handle bluring in only one direction.
+
+ // Blur vertically and copy to destination.
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ direct_blur_y(bw_to_a8, 1,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kA8_Format:
+ direct_blur_y(nullptr, 8,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kARGB32_Format:
+ direct_blur_y(argb32_to_a8, 32,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kLCD16_Format:
+ direct_blur_y(lcd_to_a8, 16, radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+
+ // Blur horizontally in place.
+ direct_blur_x(radiusX, gaussFactorsX,
+ dst->fImage + radiusX, dstRB, srcW,
+ dst->fImage, dstRB, dstW, dstH);
+
+ return {radiusX, radiusY};
+}
+
+// TODO: assuming sigmaW = sigmaH. Allow different sigmas. Right now the
+// API forces the sigmas to be the same.
+SkIPoint SkMaskBlurFilter::blur(const SkMask& src, SkMask* dst) const {
+
+ if (fSigmaW < 2.0 && fSigmaH < 2.0) {
+ return small_blur(fSigmaW, fSigmaH, src, dst);
+ }
+
+ // 1024 is a place holder guess until more analysis can be done.
+ SkSTArenaAlloc<1024> alloc;
+
+ PlanGauss planW(fSigmaW);
+ PlanGauss planH(fSigmaH);
+
+ int borderW = planW.border(),
+ borderH = planH.border();
+ SkASSERT(borderH >= 0 && borderW >= 0);
+
+ *dst = SkMask::PrepareDestination(borderW, borderH, src);
+ if (src.fImage == nullptr) {
+ return {SkTo<int32_t>(borderW), SkTo<int32_t>(borderH)};
+ }
+ if (dst->fImage == nullptr) {
+ dst->fBounds.setEmpty();
+ return {0, 0};
+ }
+
+ int srcW = src.fBounds.width(),
+ srcH = src.fBounds.height(),
+ dstW = dst->fBounds.width(),
+ dstH = dst->fBounds.height();
+ SkASSERT(srcW >= 0 && srcH >= 0 && dstW >= 0 && dstH >= 0);
+
+ auto bufferSize = std::max(planW.bufferSize(), planH.bufferSize());
+ auto buffer = alloc.makeArrayDefault<uint32_t>(bufferSize);
+
+ // Blur both directions.
+ int tmpW = srcH,
+ tmpH = dstW;
+
+ auto tmp = alloc.makeArrayDefault<uint8_t>(tmpW * tmpH);
+
+ // Blur horizontally, and transpose.
+ const PlanGauss::Scan& scanW = planW.makeBlurScan(srcW, buffer);
+ switch (src.fFormat) {
+ case SkMask::kBW_Format: {
+ const uint8_t* bwStart = src.fImage;
+ auto start = SkMask::AlphaIter<SkMask::kBW_Format>(bwStart, 0);
+ auto end = SkMask::AlphaIter<SkMask::kBW_Format>(bwStart + (srcW / 8), srcW % 8);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kA8_Format: {
+ const uint8_t* a8Start = src.fImage;
+ auto start = SkMask::AlphaIter<SkMask::kA8_Format>(a8Start);
+ auto end = SkMask::AlphaIter<SkMask::kA8_Format>(a8Start + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kARGB32_Format: {
+ const uint32_t* argbStart = reinterpret_cast<const uint32_t*>(src.fImage);
+ auto start = SkMask::AlphaIter<SkMask::kARGB32_Format>(argbStart);
+ auto end = SkMask::AlphaIter<SkMask::kARGB32_Format>(argbStart + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kLCD16_Format: {
+ const uint16_t* lcdStart = reinterpret_cast<const uint16_t*>(src.fImage);
+ auto start = SkMask::AlphaIter<SkMask::kLCD16_Format>(lcdStart);
+ auto end = SkMask::AlphaIter<SkMask::kLCD16_Format>(lcdStart + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+
+ // Blur vertically (scan in memory order because of the transposition),
+ // and transpose back to the original orientation.
+ const PlanGauss::Scan& scanH = planH.makeBlurScan(tmpW, buffer);
+ for (int y = 0; y < tmpH; y++) {
+ auto tmpStart = &tmp[y * tmpW];
+ auto dstStart = &dst->fImage[y];
+
+ scanH.blur(tmpStart, tmpStart + tmpW,
+ dstStart, dst->fRowBytes, dstStart + dst->fRowBytes * dstH);
+ }
+
+ return {SkTo<int32_t>(borderW), SkTo<int32_t>(borderH)};
+}
diff --git a/gfx/skia/skia/src/core/SkMaskBlurFilter.h b/gfx/skia/skia/src/core/SkMaskBlurFilter.h
new file mode 100644
index 0000000000..fe10cf4abb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskBlurFilter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskBlurFilter_DEFINED
+#define SkMaskBlurFilter_DEFINED
+
+#include <algorithm>
+#include <memory>
+#include <tuple>
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkMask.h"
+
+// Implement a single channel Gaussian blur. The specifics for implementation are taken from:
+// https://drafts.fxtf.org/filters/#feGaussianBlurElement
+class SkMaskBlurFilter {
+public:
+ // Create an object suitable for filtering an SkMask using a filter with width sigmaW and
+ // height sigmaH.
+ SkMaskBlurFilter(double sigmaW, double sigmaH);
+
+ // returns true iff the sigmas will result in an identity mask (no blurring)
+ bool hasNoBlur() const;
+
+ // Given a src SkMask, generate dst SkMask returning the border width and height.
+ SkIPoint blur(const SkMask& src, SkMask* dst) const;
+
+private:
+ const double fSigmaW;
+ const double fSigmaH;
+};
+
+#endif // SkBlurMaskFilter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMaskCache.cpp b/gfx/skia/skia/src/core/SkMaskCache.cpp
new file mode 100644
index 0000000000..f08f4d7ee0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+struct MaskValue {
+ SkMask fMask;
+ SkCachedData* fData;
+};
+
+namespace {
+static unsigned gRRectBlurKeyNamespaceLabel;
+
+struct RRectBlurKey : public SkResourceCache::Key {
+public:
+ RRectBlurKey(SkScalar sigma, const SkRRect& rrect, SkBlurStyle style)
+ : fSigma(sigma)
+ , fStyle(style)
+ , fRRect(rrect)
+ {
+ this->init(&gRRectBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fRRect));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ SkRRect fRRect;
+};
+
+struct RRectBlurRec : public SkResourceCache::Rec {
+ RRectBlurRec(RRectBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RRectBlurRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RRectBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rrect-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RRectBlurRec& rec = static_cast<const RRectBlurRec&>(baseRec);
+ MaskValue* result = (MaskValue*)contextData;
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, SkMask* mask, SkResourceCache* localCache) {
+ MaskValue result;
+ RRectBlurKey key(sigma, rrect, style);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RRectBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RRectBlurKey key(sigma, rrect, style);
+ return CHECK_LOCAL(localCache, add, Add, new RRectBlurRec(key, mask, data));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+static unsigned gRectsBlurKeyNamespaceLabel;
+
+struct RectsBlurKey : public SkResourceCache::Key {
+public:
+ RectsBlurKey(SkScalar sigma, SkBlurStyle style, const SkRect rects[], int count)
+ : fSigma(sigma)
+ , fStyle(style)
+ {
+ SkASSERT(1 == count || 2 == count);
+ SkIRect ir;
+ rects[0].roundOut(&ir);
+ fSizes[0] = SkSize{rects[0].width(), rects[0].height()};
+ if (2 == count) {
+ fSizes[1] = SkSize{rects[1].width(), rects[1].height()};
+ fSizes[2] = SkSize{rects[0].x() - rects[1].x(), rects[0].y() - rects[1].y()};
+ } else {
+ fSizes[1] = SkSize{0, 0};
+ fSizes[2] = SkSize{0, 0};
+ }
+ fSizes[3] = SkSize{rects[0].x() - ir.x(), rects[0].y() - ir.y()};
+
+ this->init(&gRectsBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fSizes));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ SkSize fSizes[4];
+};
+
+struct RectsBlurRec : public SkResourceCache::Rec {
+ RectsBlurRec(RectsBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RectsBlurRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RectsBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rects-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RectsBlurRec& rec = static_cast<const RectsBlurRec&>(baseRec);
+ MaskValue* result = static_cast<MaskValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache) {
+ MaskValue result;
+ RectsBlurKey key(sigma, style, rects, count);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RectsBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RectsBlurKey key(sigma, style, rects, count);
+ return CHECK_LOCAL(localCache, add, Add, new RectsBlurRec(key, mask, data));
+}
diff --git a/gfx/skia/skia/src/core/SkMaskCache.h b/gfx/skia/skia/src/core/SkMaskCache.h
new file mode 100644
index 0000000000..d22a5d1be0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskCache_DEFINED
+#define SkMaskCache_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkResourceCache.h"
+
+class SkMaskCache {
+public:
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixels, and have mask
+ * already point to that memory.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a mask and its pixel-data to the cache.
+ */
+ static void Add(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+ static void Add(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskFilter.cpp b/gfx/skia/skia/src/core/SkMaskFilter.cpp
new file mode 100644
index 0000000000..573fe3cabb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskFilter.cpp
@@ -0,0 +1,720 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskFilterBase.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkBlurPriv.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkCoverageModePriv.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#include "src/gpu/text/GrSDFMaskFilter.h"
+#endif
+
+SkMaskFilterBase::NinePatch::~NinePatch() {
+ if (fCache) {
+ SkASSERT((const void*)fMask.fImage == fCache->data());
+ fCache->unref();
+ } else {
+ SkMask::FreeImage(fMask.fImage);
+ }
+}
+
+bool SkMaskFilterBase::asABlur(BlurRec*) const {
+ return false;
+}
+
+static void extractMaskSubset(const SkMask& src, SkMask* dst) {
+ SkASSERT(src.fBounds.contains(dst->fBounds));
+
+ const int dx = dst->fBounds.left() - src.fBounds.left();
+ const int dy = dst->fBounds.top() - src.fBounds.top();
+ dst->fImage = src.fImage + dy * src.fRowBytes + dx;
+ dst->fRowBytes = src.fRowBytes;
+ dst->fFormat = src.fFormat;
+}
+
+static void blitClippedMask(SkBlitter* blitter, const SkMask& mask,
+ const SkIRect& bounds, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(bounds, clipR)) {
+ blitter->blitMask(mask, r);
+ }
+}
+
+static void blitClippedRect(SkBlitter* blitter, const SkIRect& rect, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(rect, clipR)) {
+ blitter->blitRect(r.left(), r.top(), r.width(), r.height());
+ }
+}
+
+#if 0
+static void dump(const SkMask& mask) {
+ for (int y = mask.fBounds.top(); y < mask.fBounds.bottom(); ++y) {
+ for (int x = mask.fBounds.left(); x < mask.fBounds.right(); ++x) {
+ SkDebugf("%02X", *mask.getAddr8(x, y));
+ }
+ SkDebugf("\n");
+ }
+ SkDebugf("\n");
+}
+#endif
+
+static void draw_nine_clipped(const SkMask& mask, const SkIRect& outerR,
+ const SkIPoint& center, bool fillCenter,
+ const SkIRect& clipR, SkBlitter* blitter) {
+ int cx = center.x();
+ int cy = center.y();
+ SkMask m;
+
+ // top-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // top-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(),
+ outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ SkIRect innerR;
+ innerR.setLTRB(outerR.left() + cx - mask.fBounds.left(),
+ outerR.top() + cy - mask.fBounds.top(),
+ outerR.right() + (cx + 1 - mask.fBounds.right()),
+ outerR.bottom() + (cy + 1 - mask.fBounds.bottom()));
+ if (fillCenter) {
+ blitClippedRect(blitter, innerR, clipR);
+ }
+
+ const int innerW = innerR.width();
+ size_t storageSize = (innerW + 1) * (sizeof(int16_t) + sizeof(uint8_t));
+ SkAutoSMalloc<4*1024> storage(storageSize);
+ int16_t* runs = (int16_t*)storage.get();
+ uint8_t* alpha = (uint8_t*)(runs + innerW + 1);
+
+ SkIRect r;
+ // top
+ r.setLTRB(innerR.left(), outerR.top(), innerR.right(), innerR.top());
+ if (r.intersect(clipR)) {
+ int startY = SkMax32(0, r.top() - outerR.top());
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.top() + y);
+ blitter->blitAntiH(r.left(), outerR.top() + y, alpha, runs);
+ }
+ }
+ // bottom
+ r.setLTRB(innerR.left(), innerR.bottom(), innerR.right(), outerR.bottom());
+ if (r.intersect(clipR)) {
+ int startY = outerR.bottom() - r.bottom();
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.bottom() - y - 1);
+ blitter->blitAntiH(r.left(), outerR.bottom() - y - 1, alpha, runs);
+ }
+ }
+ // left
+ r.setLTRB(outerR.left(), innerR.top(), innerR.left(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ SkMask m;
+ m.fImage = mask.getAddr8(mask.fBounds.left() + r.left() - outerR.left(),
+ mask.fBounds.top() + cy);
+ m.fBounds = r;
+ m.fRowBytes = 0; // so we repeat the scanline for our height
+ m.fFormat = SkMask::kA8_Format;
+ blitter->blitMask(m, r);
+ }
+ // right
+ r.setLTRB(innerR.right(), innerR.top(), outerR.right(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ SkMask m;
+ m.fImage = mask.getAddr8(mask.fBounds.right() - outerR.right() + r.left(),
+ mask.fBounds.top() + cy);
+ m.fBounds = r;
+ m.fRowBytes = 0; // so we repeat the scanline for our height
+ m.fFormat = SkMask::kA8_Format;
+ blitter->blitMask(m, r);
+ }
+}
+
+static void draw_nine(const SkMask& mask, const SkIRect& outerR, const SkIPoint& center,
+ bool fillCenter, const SkRasterClip& clip, SkBlitter* blitter) {
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), outerR);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ draw_nine_clipped(mask, outerR, center, fillCenter, cr, blitter);
+ clipper.next();
+ } while (!clipper.done());
+ }
+}
+
+static int countNestedRects(const SkPath& path, SkRect rects[2]) {
+ if (SkPathPriv::IsNestedFillRects(path, rects)) {
+ return 2;
+ }
+ return path.isRect(&rects[0]);
+}
+
+bool SkMaskFilterBase::filterRRect(const SkRRect& devRRect, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter) const {
+ // Attempt to speed up drawing by creating a nine patch. If a nine patch
+ // cannot be used, return false to allow our caller to recover and perform
+ // the drawing another way.
+ NinePatch patch;
+ patch.fMask.fImage = nullptr;
+ if (kTrue_FilterReturn != this->filterRRectToNine(devRRect, matrix,
+ clip.getBounds(),
+ &patch)) {
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+ }
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, true, clip, blitter);
+ return true;
+}
+
+bool SkMaskFilterBase::filterPath(const SkPath& devPath, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter,
+ SkStrokeRec::InitStyle style) const {
+ SkRect rects[2];
+ int rectCount = 0;
+ if (SkStrokeRec::kFill_InitStyle == style) {
+ rectCount = countNestedRects(devPath, rects);
+ }
+ if (rectCount > 0) {
+ NinePatch patch;
+
+ switch (this->filterRectsToNine(rects, rectCount, matrix, clip.getBounds(), &patch)) {
+ case kFalse_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+
+ case kTrue_FilterReturn:
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, 1 == rectCount, clip,
+ blitter);
+ return true;
+
+ case kUnimplemented_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ // fall through
+ break;
+ }
+ }
+
+ SkMask srcM, dstM;
+
+ if (!SkDraw::DrawToMask(devPath, &clip.getBounds(), this, &matrix, &srcM,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ style)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoSrc(srcM.fImage);
+
+ if (!this->filterMask(&dstM, srcM, matrix, nullptr)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoDst(dstM.fImage);
+
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), dstM.fBounds);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ blitter->blitMask(dstM, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+
+ return true;
+}
+
+SkMaskFilterBase::FilterReturn
+SkMaskFilterBase::filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+SkMaskFilterBase::FilterReturn
+SkMaskFilterBase::filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor>
+SkMaskFilterBase::asFragmentProcessor(const GrFPArgs& args) const {
+ auto fp = this->onAsFragmentProcessor(args);
+ if (fp) {
+ SkASSERT(this->hasFragmentProcessor());
+ } else {
+ SkASSERT(!this->hasFragmentProcessor());
+ }
+ return fp;
+}
+bool SkMaskFilterBase::hasFragmentProcessor() const {
+ return this->onHasFragmentProcessor();
+}
+
+std::unique_ptr<GrFragmentProcessor>
+SkMaskFilterBase::onAsFragmentProcessor(const GrFPArgs&) const {
+ return nullptr;
+}
+bool SkMaskFilterBase::onHasFragmentProcessor() const { return false; }
+
+bool SkMaskFilterBase::canFilterMaskGPU(const GrShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const {
+ return false;
+}
+
+bool SkMaskFilterBase::directFilterMaskGPU(GrRecordingContext*,
+ GrRenderTargetContext*,
+ GrPaint&&,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape&) const {
+ return false;
+}
+
+sk_sp<GrTextureProxy> SkMaskFilterBase::filterMaskGPU(GrRecordingContext*,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const {
+ return nullptr;
+}
+#endif
+
+void SkMaskFilterBase::computeFastBounds(const SkRect& src, SkRect* dst) const {
+ SkMask srcM, dstM;
+
+ srcM.fBounds = src.roundOut();
+ srcM.fRowBytes = 0;
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkIPoint margin; // ignored
+ if (this->filterMask(&dstM, srcM, SkMatrix::I(), &margin)) {
+ dst->set(dstM.fBounds);
+ } else {
+ dst->set(srcM.fBounds);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename T> static inline T join(const T& a, const T& b) {
+ T r = a;
+ r.join(b);
+ return r;
+}
+template <typename T> static inline T sect(const T& a, const T& b) {
+ T r = a;
+ return r.intersect(b) ? r : T::MakeEmpty();
+}
+
+class SkComposeMF : public SkMaskFilterBase {
+public:
+ SkComposeMF(sk_sp<SkMaskFilter> outer, sk_sp<SkMaskFilter> inner)
+ : fOuter(std::move(outer))
+ , fInner(std::move(inner))
+ {
+ SkASSERT(as_MFB(fOuter)->getFormat() == SkMask::kA8_Format);
+ SkASSERT(as_MFB(fInner)->getFormat() == SkMask::kA8_Format);
+ }
+
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&, SkIPoint*) const override;
+
+ void computeFastBounds(const SkRect& src, SkRect* dst) const override {
+ SkRect tmp;
+ as_MFB(fInner)->computeFastBounds(src, &tmp);
+ as_MFB(fOuter)->computeFastBounds(tmp, dst);
+ }
+
+ SkMask::Format getFormat() const override { return SkMask::kA8_Format; }
+
+protected:
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs& args) const override{
+ std::unique_ptr<GrFragmentProcessor> array[2] = {
+ as_MFB(fInner)->asFragmentProcessor(args),
+ as_MFB(fOuter)->asFragmentProcessor(args),
+ };
+ if (!array[0] || !array[1]) {
+ return nullptr;
+ }
+ return GrFragmentProcessor::RunInSeries(array, 2);
+ }
+
+ bool onHasFragmentProcessor() const override {
+ return as_MFB(fInner)->hasFragmentProcessor() && as_MFB(fOuter)->hasFragmentProcessor();
+ }
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkComposeMF)
+
+ sk_sp<SkMaskFilter> fOuter;
+ sk_sp<SkMaskFilter> fInner;
+
+ void flatten(SkWriteBuffer&) const override;
+
+ friend class SkMaskFilter;
+
+ typedef SkMaskFilterBase INHERITED;
+};
+
+bool SkComposeMF::filterMask(SkMask* dst, const SkMask& src, const SkMatrix& ctm,
+ SkIPoint* margin) const {
+ SkIPoint innerMargin;
+ SkMask innerMask;
+
+ if (!as_MFB(fInner)->filterMask(&innerMask, src, ctm, &innerMargin)) {
+ return false;
+ }
+ if (!as_MFB(fOuter)->filterMask(dst, innerMask, ctm, margin)) {
+ return false;
+ }
+ if (margin) {
+ margin->fX += innerMargin.fX;
+ margin->fY += innerMargin.fY;
+ }
+ sk_free(innerMask.fImage);
+ return true;
+}
+
+void SkComposeMF::flatten(SkWriteBuffer & buffer) const {
+ buffer.writeFlattenable(fOuter.get());
+ buffer.writeFlattenable(fInner.get());
+}
+
+sk_sp<SkFlattenable> SkComposeMF::CreateProc(SkReadBuffer& buffer) {
+ auto outer = buffer.readMaskFilter();
+ auto inner = buffer.readMaskFilter();
+ if (!buffer.validate(outer && inner)) {
+ return nullptr;
+ }
+ return SkMaskFilter::MakeCompose(std::move(outer), std::move(inner));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkCombineMF : public SkMaskFilterBase {
+public:
+ SkCombineMF(sk_sp<SkMaskFilter> dst, sk_sp<SkMaskFilter> src, SkCoverageMode mode)
+ : fDst(std::move(dst))
+ , fSrc(std::move(src))
+ , fMode(mode)
+ {
+ SkASSERT(as_MFB(fSrc)->getFormat() == SkMask::kA8_Format);
+ SkASSERT(as_MFB(fDst)->getFormat() == SkMask::kA8_Format);
+ }
+
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&, SkIPoint*) const override;
+
+ void computeFastBounds(const SkRect& src, SkRect* dst) const override {
+ SkRect srcR, dstR;
+ as_MFB(fSrc)->computeFastBounds(src, &srcR);
+ as_MFB(fDst)->computeFastBounds(src, &dstR);
+ *dst = join(srcR, dstR);
+ }
+
+ SkMask::Format getFormat() const override { return SkMask::kA8_Format; }
+
+ SK_FLATTENABLE_HOOKS(SkCombineMF)
+
+protected:
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs& args) const override{
+ auto src = as_MFB(fSrc)->asFragmentProcessor(args);
+ auto dst = as_MFB(fDst)->asFragmentProcessor(args);
+ if (!src || !dst) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromTwoProcessors(std::move(src), std::move(dst),
+ SkUncorrelatedCoverageModeToBlendMode(fMode));
+ }
+
+ bool onHasFragmentProcessor() const override {
+ return as_MFB(fSrc)->hasFragmentProcessor() && as_MFB(fDst)->hasFragmentProcessor();
+ }
+#endif
+
+private:
+ sk_sp<SkMaskFilter> fDst;
+ sk_sp<SkMaskFilter> fSrc;
+ SkCoverageMode fMode;
+
+ void flatten(SkWriteBuffer&) const override;
+
+ friend class SkMaskFilter;
+
+ typedef SkMaskFilterBase INHERITED;
+};
+
+#include "src/core/SkSafeMath.h"
+
+class DrawIntoMask : public SkDraw {
+public:
+ // we ignore the offset of the mask->fBounds
+ DrawIntoMask(SkMask* mask) {
+ int w = mask->fBounds.width();
+ int h = mask->fBounds.height();
+ size_t size = SkSafeMath::Mul(w, h);
+ mask->fFormat = SkMask::kA8_Format;
+ mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
+ mask->fRowBytes = w;
+
+ SkAssertResult(fDst.reset(*mask));
+
+ fMatrixStorage.reset();
+ fMatrix = &fMatrixStorage;
+
+ fRCStorage.setRect({ 0, 0, w, h });
+ fRC = &fRCStorage;
+ }
+
+ void drawAsBitmap(const SkMask& m, const SkPaint& p) {
+ SkBitmap b;
+ b.installMaskPixels(m);
+ this->drawSprite(b, m.fBounds.fLeft, m.fBounds.fTop, p);
+ }
+
+private:
+ SkMatrix fMatrixStorage;
+ SkRasterClip fRCStorage;
+};
+
+static SkIRect join(const SkIRect& src, const SkIRect& dst, SkCoverageMode mode) {
+ switch (mode) {
+ case SkCoverageMode::kUnion: return join(src, dst);
+ case SkCoverageMode::kIntersect: return sect(src, dst);
+ case SkCoverageMode::kDifference: return src;
+ case SkCoverageMode::kReverseDifference: return dst;
+ case SkCoverageMode::kXor: return join(src, dst);
+ }
+ // not reached
+ return { 0, 0, 0, 0 };
+}
+
+bool SkCombineMF::filterMask(SkMask* dst, const SkMask& src, const SkMatrix& ctm,
+ SkIPoint* margin) const {
+ SkIPoint srcP, dstP;
+ SkMask srcM, dstM;
+
+ if (!as_MFB(fSrc)->filterMask(&srcM, src, ctm, &srcP)) {
+ return false;
+ }
+ if (!as_MFB(fDst)->filterMask(&dstM, src, ctm, &dstP)) {
+ return false;
+ }
+
+ dst->fBounds = join(srcM.fBounds, dstM.fBounds, fMode);
+ dst->fFormat = SkMask::kA8_Format;
+ if (src.fImage == nullptr) {
+ dst->fImage = nullptr;
+ return true;
+ }
+
+ DrawIntoMask md(dst);
+ SkPaint p;
+
+ p.setBlendMode(SkBlendMode::kSrc);
+ dstM.fBounds.offset(-dst->fBounds.fLeft, -dst->fBounds.fTop);
+ md.drawAsBitmap(dstM, p);
+ p.setBlendMode(SkUncorrelatedCoverageModeToBlendMode(fMode));
+ srcM.fBounds.offset(-dst->fBounds.fLeft, -dst->fBounds.fTop);
+ md.drawAsBitmap(srcM, p);
+
+ sk_free(srcM.fImage);
+ sk_free(dstM.fImage);
+ return true;
+}
+
+void SkCombineMF::flatten(SkWriteBuffer & buffer) const {
+ buffer.writeFlattenable(fDst.get());
+ buffer.writeFlattenable(fSrc.get());
+ buffer.write32(static_cast<uint32_t>(fMode));
+}
+
+sk_sp<SkFlattenable> SkCombineMF::CreateProc(SkReadBuffer& buffer) {
+ auto dst = buffer.readMaskFilter();
+ auto src = buffer.readMaskFilter();
+ SkCoverageMode mode = buffer.read32LE(SkCoverageMode::kLast);
+ if (!buffer.validate(dst && src)) {
+ return nullptr;
+ }
+ return SkMaskFilter::MakeCombine(std::move(dst), std::move(src), mode);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkMatrixMF : public SkMaskFilterBase {
+public:
+ SkMatrixMF(sk_sp<SkMaskFilter> filter, const SkMatrix& lm)
+ : fFilter(std::move(filter))
+ , fLM(lm)
+ {}
+
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix& ctm,
+ SkIPoint* margin) const override {
+ return as_MFB(fFilter)->filterMask(dst, src, SkMatrix::Concat(ctm, fLM), margin);
+ }
+
+ void computeFastBounds(const SkRect& src, SkRect* dst) const override {
+ *dst = src;
+ SkRect tmp;
+ fLM.mapRect(&tmp, src);
+ as_MFB(fFilter)->computeFastBounds(tmp, dst);
+ }
+
+ SkMask::Format getFormat() const override { return as_MFB(fFilter)->getFormat(); }
+
+ SK_FLATTENABLE_HOOKS(SkMatrixMF)
+
+protected:
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs& args) const override{
+ return as_MFB(fFilter)->asFragmentProcessor(GrFPArgs::WithPostLocalMatrix(args, fLM));
+ }
+
+ bool onHasFragmentProcessor() const override {
+ return as_MFB(fFilter)->hasFragmentProcessor();
+ }
+#endif
+
+private:
+ sk_sp<SkMaskFilter> fFilter;
+ const SkMatrix fLM;
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeMatrix(fLM);
+ buffer.writeFlattenable(fFilter.get());
+ }
+
+ friend class SkMaskFilter;
+ typedef SkMaskFilterBase INHERITED;
+};
+
+sk_sp<SkFlattenable> SkMatrixMF::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix m;
+ buffer.readMatrix(&m);
+ auto filter = buffer.readMaskFilter();
+ return filter ? filter->makeWithMatrix(m) : nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkMaskFilter> SkMaskFilter::MakeCompose(sk_sp<SkMaskFilter> outer,
+ sk_sp<SkMaskFilter> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ if (as_MFB(inner)->getFormat() != SkMask::kA8_Format ||
+ as_MFB(outer)->getFormat() != SkMask::kA8_Format) {
+ return nullptr;
+ }
+ return sk_sp<SkMaskFilter>(new SkComposeMF(std::move(outer), std::move(inner)));
+}
+
+sk_sp<SkMaskFilter> SkMaskFilter::MakeCombine(sk_sp<SkMaskFilter> dst, sk_sp<SkMaskFilter> src,
+ SkCoverageMode mode) {
+ if (!dst) {
+ return src;
+ }
+ if (!src) {
+ return dst;
+ }
+
+ if (as_MFB(dst)->getFormat() != SkMask::kA8_Format ||
+ as_MFB(src)->getFormat() != SkMask::kA8_Format) {
+ return nullptr;
+ }
+ return sk_sp<SkMaskFilter>(new SkCombineMF(std::move(dst), std::move(src), mode));
+}
+
+sk_sp<SkMaskFilter> SkMaskFilter::makeWithMatrix(const SkMatrix& lm) const {
+ sk_sp<SkMaskFilter> me = sk_ref_sp(const_cast<SkMaskFilter*>(this));
+ if (lm.isIdentity()) {
+ return me;
+ }
+ return sk_sp<SkMaskFilter>(new SkMatrixMF(std::move(me), lm));
+}
+
+void SkMaskFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMatrixMF);
+ SK_REGISTER_FLATTENABLE(SkComposeMF);
+ SK_REGISTER_FLATTENABLE(SkCombineMF);
+ sk_register_blur_maskfilter_createproc();
+#if SK_SUPPORT_GPU
+ gr_register_sdf_maskfilter_createproc();
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkMaskFilterBase.h b/gfx/skia/skia/src/core/SkMaskFilterBase.h
new file mode 100644
index 0000000000..3c06f1bd34
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskFilterBase.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskFilterBase_DEFINED
+#define SkMaskFilterBase_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkMask.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrTypesPriv.h"
+#endif
+
+class GrClip;
+struct GrFPArgs;
+class GrFragmentProcessor;
+class GrPaint;
+class GrRecordingContext;
+class GrRenderTarget;
+class GrRenderTargetContext;
+class GrResourceProvider;
+class GrShape;
+class GrTexture;
+class GrTextureProxy;
+
+class SkBitmap;
+class SkBlitter;
+class SkCachedData;
+class SkMatrix;
+class SkPath;
+class SkRasterClip;
+class SkRRect;
+
+class SkMaskFilterBase : public SkMaskFilter {
+public:
+ /** Returns the format of the resulting mask that this subclass will return
+ when its filterMask() method is called.
+ */
+ virtual SkMask::Format getFormat() const = 0;
+
+ /** Create a new mask by filter the src mask.
+ If src.fImage == null, then do not allocate or create the dst image
+ but do fill out the other fields in dstMask.
+ If you do allocate a dst image, use SkMask::AllocImage()
+ If this returns false, dst mask is ignored.
+ @param dst the result of the filter. If src.fImage == null, dst should not allocate its image
+ @param src the original image to be filtered.
+ @param matrix the CTM
+ @param margin if not null, return the buffer dx/dy need when calculating the effect. Used when
+ drawing a clipped object to know how much larger to allocate the src before
+ applying the filter. If returning false, ignore this parameter.
+ @return true if the dst mask was correctly created.
+ */
+ virtual bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const = 0;
+
+#if SK_SUPPORT_GPU
+ /**
+ * Returns a processor if the filter can be expressed a single-pass GrProcessor without
+ * requiring an explicit input mask. Per-pixel, the effect receives the incoming mask's
+ * coverage as the input color and outputs the filtered covereage value. This means that each
+ * pixel's filtered coverage must only depend on the unfiltered mask value for that pixel and
+ * not on surrounding values.
+ */
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs& args) const;
+
+ /**
+ * Returns true iff asFragmentProcessor() will return a processor
+ */
+ bool hasFragmentProcessor() const;
+
+ /**
+ * If asFragmentProcessor() fails the filter may be implemented on the GPU by a subclass
+ * overriding filterMaskGPU (declared below). That code path requires constructing a
+ * src mask as input. Since that is a potentially expensive operation, the subclass must also
+ * override this function to indicate whether filterTextureMaskGPU would succeeed if the mask
+ * were to be created.
+ *
+ * 'maskRect' returns the device space portion of the mask that the filter needs. The mask
+ * passed into 'filterMaskGPU' should have the same extent as 'maskRect' but be
+ * translated to the upper-left corner of the mask (i.e., (maskRect.fLeft, maskRect.fTop)
+ * appears at (0, 0) in the mask).
+ *
+ * Logically, how this works is:
+ * canFilterMaskGPU is called
+ * if (it returns true)
+ * the returned mask rect is used for quick rejecting
+ * the mask rect is used to generate the mask
+ * filterMaskGPU is called to filter the mask
+ *
+ * TODO: this should work as:
+ * if (canFilterMaskGPU(devShape, ...)) // rect, rrect, drrect, path
+ * filterMaskGPU(devShape, ...)
+ * this would hide the RRect special case and the mask generation
+ */
+ virtual bool canFilterMaskGPU(const GrShape&,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const;
+
+ /**
+ * Try to directly render the mask filter into the target. Returns true if drawing was
+ * successful. If false is returned then paint is unmodified.
+ */
+ virtual bool directFilterMaskGPU(GrRecordingContext*,
+ GrRenderTargetContext*,
+ GrPaint&& paint,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape) const;
+
+ /**
+ * This function is used to implement filters that require an explicit src mask. It should only
+ * be called if canFilterMaskGPU returned true and the maskRect param should be the output from
+ * that call.
+ * Implementations are free to get the GrContext from the src texture in order to create
+ * additional textures and perform multiple passes.
+ */
+ virtual sk_sp<GrTextureProxy> filterMaskGPU(GrRecordingContext*,
+ sk_sp<GrTextureProxy> srcProxy,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const;
+#endif
+
+ /**
+ * The fast bounds function is used to enable the paint to be culled early
+ * in the drawing pipeline. This function accepts the current bounds of the
+ * paint as its src param and the filter adjust those bounds using its
+ * current mask and returns the result using the dest param. Callers are
+ * allowed to provide the same struct for both src and dest so each
+ * implementation must accomodate that behavior.
+ *
+ * The default impl calls filterMask with the src mask having no image,
+ * but subclasses may override this if they can compute the rect faster.
+ */
+ virtual void computeFastBounds(const SkRect& src, SkRect* dest) const;
+
+ struct BlurRec {
+ SkScalar fSigma;
+ SkBlurStyle fStyle;
+ };
+ /**
+ * If this filter can be represented by a BlurRec, return true and (if not null) fill in the
+ * provided BlurRec parameter. If this effect cannot be represented as a BlurRec, return false
+ * and ignore the BlurRec parameter.
+ */
+ virtual bool asABlur(BlurRec*) const;
+
+protected:
+ SkMaskFilterBase() {}
+
+#if SK_SUPPORT_GPU
+ virtual std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs&) const;
+ virtual bool onHasFragmentProcessor() const;
+#endif
+
+ enum FilterReturn {
+ kFalse_FilterReturn,
+ kTrue_FilterReturn,
+ kUnimplemented_FilterReturn
+ };
+
+ class NinePatch : ::SkNoncopyable {
+ public:
+ NinePatch() : fCache(nullptr) { }
+ ~NinePatch();
+
+ SkMask fMask; // fBounds must have [0,0] in its top-left
+ SkIRect fOuterRect; // width/height must be >= fMask.fBounds'
+ SkIPoint fCenter; // identifies center row/col for stretching
+ SkCachedData* fCache;
+ };
+
+ /**
+ * Override if your subclass can filter a rect, and return the answer as
+ * a ninepatch mask to be stretched over the returned outerRect. On success
+ * return kTrue_FilterReturn. On failure (e.g. out of memory) return
+ * kFalse_FilterReturn. If the normal filterMask() entry-point should be
+ * called (the default) return kUnimplemented_FilterReturn.
+ *
+ * By convention, the caller will take the center rol/col from the returned
+ * mask as the slice it can replicate horizontally and vertically as we
+ * stretch the mask to fit inside outerRect. It is an error for outerRect
+ * to be smaller than the mask's bounds. This would imply that the width
+ * and height of the mask should be odd. This is not required, just that
+ * the caller will call mask.fBounds.centerX() and centerY() to find the
+ * strips that will be replicated.
+ */
+ virtual FilterReturn filterRectsToNine(const SkRect[], int count,
+ const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+ /**
+ * Similar to filterRectsToNine, except it performs the work on a round rect.
+ */
+ virtual FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+
+private:
+ friend class SkDraw;
+
+ /** Helper method that, given a path in device space, will rasterize it into a kA8_Format mask
+ and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ This method is not exported to java.
+ */
+ bool filterPath(const SkPath& devPath, const SkMatrix& ctm, const SkRasterClip&, SkBlitter*,
+ SkStrokeRec::InitStyle) const;
+
+ /** Helper method that, given a roundRect in device space, will rasterize it into a kA8_Format
+ mask and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ */
+ bool filterRRect(const SkRRect& devRRect, const SkMatrix& ctm, const SkRasterClip&,
+ SkBlitter*) const;
+
+ typedef SkFlattenable INHERITED;
+};
+
+inline SkMaskFilterBase* as_MFB(SkMaskFilter* mf) {
+ return static_cast<SkMaskFilterBase*>(mf);
+}
+
+inline const SkMaskFilterBase* as_MFB(const SkMaskFilter* mf) {
+ return static_cast<const SkMaskFilterBase*>(mf);
+}
+
+inline const SkMaskFilterBase* as_MFB(const sk_sp<SkMaskFilter>& mf) {
+ return static_cast<SkMaskFilterBase*>(mf.get());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.cpp b/gfx/skia/skia/src/core/SkMaskGamma.cpp
new file mode 100644
index 0000000000..b5b51a6286
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskGamma.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkFloatingPoint.h"
+#include "include/private/SkTo.h"
+
+class SkLinearColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luminance;
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luma;
+ }
+};
+
+class SkGammaColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar gamma, SkScalar luminance) const override {
+ return SkScalarPow(luminance, gamma);
+ }
+ SkScalar fromLuma(SkScalar gamma, SkScalar luma) const override {
+ return SkScalarPow(luma, SkScalarInvert(gamma));
+ }
+};
+
+class SkSRGBColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luminance <= 0.04045f) {
+ return luminance / 12.92f;
+ }
+ return SkScalarPow((luminance + 0.055f) / 1.055f,
+ 2.4f);
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luma <= 0.0031308f) {
+ return luma * 12.92f;
+ }
+ return 1.055f * SkScalarPow(luma, SkScalarInvert(2.4f))
+ - 0.055f;
+ }
+};
+
+/*static*/ const SkColorSpaceLuminance& SkColorSpaceLuminance::Fetch(SkScalar gamma) {
+ static SkLinearColorSpaceLuminance gSkLinearColorSpaceLuminance;
+ static SkGammaColorSpaceLuminance gSkGammaColorSpaceLuminance;
+ static SkSRGBColorSpaceLuminance gSkSRGBColorSpaceLuminance;
+
+ if (0 == gamma) {
+ return gSkSRGBColorSpaceLuminance;
+ } else if (SK_Scalar1 == gamma) {
+ return gSkLinearColorSpaceLuminance;
+ } else {
+ return gSkGammaColorSpaceLuminance;
+ }
+}
+
+static float apply_contrast(float srca, float contrast) {
+ return srca + ((1.0f - srca) * contrast * srca);
+}
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma) {
+ const float src = (float)srcI / 255.0f;
+ const float linSrc = srcConvert.toLuma(srcGamma, src);
+ //Guess at the dst. The perceptual inverse provides smaller visual
+ //discontinuities when slight changes to desaturated colors cause a channel
+ //to map to a different correcting lut with neighboring srcI.
+ //See https://code.google.com/p/chromium/issues/detail?id=141425#c59 .
+ const float dst = 1.0f - src;
+ const float linDst = dstConvert.toLuma(dstGamma, dst);
+
+ //Contrast value tapers off to 0 as the src luminance becomes white
+ const float adjustedContrast = SkScalarToFloat(contrast) * linDst;
+
+ //Remove discontinuity and instability when src is close to dst.
+ //The value 1/256 is arbitrary and appears to contain the instability.
+ if (fabs(src - dst) < (1.0f / 256.0f)) {
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ table[i] = SkToU8(sk_float_round2int(255.0f * srca));
+ }
+ } else {
+ // Avoid slow int to float conversion.
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ // 'rawSrca += 1.0f / 255.0f' and even
+ // 'rawSrca = i * (1.0f / 255.0f)' can add up to more than 1.0f.
+ // When this happens the table[255] == 0x0 instead of 0xff.
+ // See http://code.google.com/p/chromium/issues/detail?id=146466
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ SkASSERT(srca <= 1.0f);
+ float dsta = 1.0f - srca;
+
+ //Calculate the output we want.
+ float linOut = (linSrc * srca + dsta * linDst);
+ SkASSERT(linOut <= 1.0f);
+ float out = dstConvert.fromLuma(dstGamma, linOut);
+
+ //Undo what the blit blend will do.
+ float result = (out - dst) / (src - dst);
+ SkASSERT(sk_float_round2int(255.0f * result) <= 255);
+
+ table[i] = SkToU8(sk_float_round2int(255.0f * result));
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.h b/gfx/skia/skia/src/core/SkMaskGamma.h
new file mode 100644
index 0000000000..643deedd7e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskGamma_DEFINED
+#define SkMaskGamma_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkNoncopyable.h"
+
+/**
+ * SkColorSpaceLuminance is used to convert luminances to and from linear and
+ * perceptual color spaces.
+ *
+ * Luma is used to specify a linear luminance value [0.0, 1.0].
+ * Luminance is used to specify a luminance value in an arbitrary color space [0.0, 1.0].
+ */
+class SkColorSpaceLuminance : SkNoncopyable {
+public:
+ virtual ~SkColorSpaceLuminance() { }
+
+ /** Converts a color component luminance in the color space to a linear luma. */
+ virtual SkScalar toLuma(SkScalar gamma, SkScalar luminance) const = 0;
+ /** Converts a linear luma to a color component luminance in the color space. */
+ virtual SkScalar fromLuma(SkScalar gamma, SkScalar luma) const = 0;
+
+ /** Converts a color to a luminance value. */
+ static U8CPU computeLuminance(SkScalar gamma, SkColor c) {
+ const SkColorSpaceLuminance& luminance = Fetch(gamma);
+ SkScalar r = luminance.toLuma(gamma, SkIntToScalar(SkColorGetR(c)) / 255);
+ SkScalar g = luminance.toLuma(gamma, SkIntToScalar(SkColorGetG(c)) / 255);
+ SkScalar b = luminance.toLuma(gamma, SkIntToScalar(SkColorGetB(c)) / 255);
+ SkScalar luma = r * SK_LUM_COEFF_R +
+ g * SK_LUM_COEFF_G +
+ b * SK_LUM_COEFF_B;
+ SkASSERT(luma <= SK_Scalar1);
+ return SkScalarRoundToInt(luminance.fromLuma(gamma, luma) * 255);
+ }
+
+ /** Retrieves the SkColorSpaceLuminance for the given gamma. */
+ static const SkColorSpaceLuminance& Fetch(SkScalar gamma);
+};
+
+///@{
+/**
+ * Scales base <= 2^N-1 to 2^8-1
+ * @param N [1, 8] the number of bits used by base.
+ * @param base the number to be scaled to [0, 255].
+ */
+template<U8CPU N> static inline U8CPU sk_t_scale255(U8CPU base) {
+ base <<= (8 - N);
+ U8CPU lum = base;
+ for (unsigned int i = N; i < 8; i += N) {
+ lum |= base >> i;
+ }
+ return lum;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<1>(U8CPU base) {
+ return base * 0xFF;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<2>(U8CPU base) {
+ return base * 0x55;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<4>(U8CPU base) {
+ return base * 0x11;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<8>(U8CPU base) {
+ return base;
+}
+///@}
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend;
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma);
+
+/**
+ * A regular mask contains linear alpha values. A gamma correcting mask
+ * contains non-linear alpha values in an attempt to create gamma correct blits
+ * in the presence of a gamma incorrect (linear) blend in the blitter.
+ *
+ * SkMaskGamma creates and maintains tables which convert linear alpha values
+ * to gamma correcting alpha values.
+ * @param R The number of luminance bits to use [1, 8] from the red channel.
+ * @param G The number of luminance bits to use [1, 8] from the green channel.
+ * @param B The number of luminance bits to use [1, 8] from the blue channel.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskGamma : public SkRefCnt {
+
+public:
+
+ /** Creates a linear SkTMaskGamma. */
+ SkTMaskGamma() : fIsLinear(true) { }
+
+ /**
+ * Creates tables to convert linear alpha values to gamma correcting alpha
+ * values.
+ *
+ * @param contrast A value in the range [0.0, 1.0] which indicates the
+ * amount of artificial contrast to add.
+ * @param paint The color space in which the paint color was chosen.
+ * @param device The color space of the target device.
+ */
+ SkTMaskGamma(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma) : fIsLinear(false) {
+ const SkColorSpaceLuminance& paintConvert = SkColorSpaceLuminance::Fetch(paintGamma);
+ const SkColorSpaceLuminance& deviceConvert = SkColorSpaceLuminance::Fetch(deviceGamma);
+ for (U8CPU i = 0; i < (1 << MAX_LUM_BITS); ++i) {
+ U8CPU lum = sk_t_scale255<MAX_LUM_BITS>(i);
+ SkTMaskGamma_build_correcting_lut(fGammaTables[i], lum, contrast,
+ paintConvert, paintGamma,
+ deviceConvert, deviceGamma);
+ }
+ }
+
+ /** Given a color, returns the closest canonical color. */
+ static SkColor CanonicalColor(SkColor color) {
+ return SkColorSetRGB(
+ sk_t_scale255<R_LUM_BITS>(SkColorGetR(color) >> (8 - R_LUM_BITS)),
+ sk_t_scale255<G_LUM_BITS>(SkColorGetG(color) >> (8 - G_LUM_BITS)),
+ sk_t_scale255<B_LUM_BITS>(SkColorGetB(color) >> (8 - B_LUM_BITS)));
+ }
+
+ /** The type of the mask pre-blend which will be returned from preBlend(SkColor). */
+ typedef SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS> PreBlend;
+
+ /**
+ * Provides access to the tables appropriate for converting linear alpha
+ * values into gamma correcting alpha values when drawing the given color
+ * through the mask. The destination color will be approximated.
+ */
+ PreBlend preBlend(SkColor color) const;
+
+ /**
+ * Get dimensions for the full table set, so it can be allocated as a block.
+ */
+ void getGammaTableDimensions(int* tableWidth, int* numTables) const {
+ *tableWidth = 256;
+ *numTables = (1 << MAX_LUM_BITS);
+ }
+
+ /**
+ * Provides direct access to the full table set, so it can be uploaded
+ * into a texture or analyzed in other ways.
+ * Returns nullptr if fGammaTables hasn't been initialized.
+ */
+ const uint8_t* getGammaTables() const {
+ return fIsLinear ? nullptr : (const uint8_t*) fGammaTables;
+ }
+
+private:
+ static const int MAX_LUM_BITS =
+ B_LUM_BITS > (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS)
+ ? B_LUM_BITS : (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS);
+ uint8_t fGammaTables[1 << MAX_LUM_BITS][256];
+ bool fIsLinear;
+
+ typedef SkRefCnt INHERITED;
+};
+
+
+/**
+ * SkTMaskPreBlend is a tear-off of SkTMaskGamma. It provides the tables to
+ * convert a linear alpha value for a given channel to a gamma correcting alpha
+ * value for that channel. This class is immutable.
+ *
+ * If fR, fG, or fB is nullptr, all of them will be. This indicates that no mask
+ * pre blend should be applied. SkTMaskPreBlend::isApplicable() is provided as
+ * a convenience function to test for the absence of this case.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend {
+private:
+ SkTMaskPreBlend(sk_sp<const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>> parent,
+ const uint8_t* r, const uint8_t* g, const uint8_t* b)
+ : fParent(std::move(parent)), fR(r), fG(g), fB(b) { }
+
+ sk_sp<const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>> fParent;
+ friend class SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>;
+public:
+ /** Creates a non applicable SkTMaskPreBlend. */
+ SkTMaskPreBlend() : fParent(), fR(nullptr), fG(nullptr), fB(nullptr) { }
+
+ /**
+ * This copy contructor exists for correctness, but should never be called
+ * when return value optimization is enabled.
+ */
+ SkTMaskPreBlend(const SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>& that)
+ : fParent(that.fParent), fR(that.fR), fG(that.fG), fB(that.fB) { }
+
+ ~SkTMaskPreBlend() { }
+
+ /** True if this PreBlend should be applied. When false, fR, fG, and fB are nullptr. */
+ bool isApplicable() const { return SkToBool(this->fG); }
+
+ const uint8_t* fR;
+ const uint8_t* fG;
+ const uint8_t* fB;
+};
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS>
+SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>
+SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>::preBlend(SkColor color) const {
+ return fIsLinear ? SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>()
+ : SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>(sk_ref_sp(this),
+ fGammaTables[SkColorGetR(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetG(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetB(color) >> (8 - MAX_LUM_BITS)]);
+}
+
+///@{
+/**
+ * If APPLY_LUT is false, returns component unchanged.
+ * If APPLY_LUT is true, returns lut[component].
+ * @param APPLY_LUT whether or not the look-up table should be applied to component.
+ * @component the initial component.
+ * @lut a look-up table which transforms the component.
+ */
+template<bool APPLY_LUT> static inline U8CPU sk_apply_lut_if(U8CPU component, const uint8_t*) {
+ return component;
+}
+template<> /*static*/ inline U8CPU sk_apply_lut_if<true>(U8CPU component, const uint8_t* lut) {
+ return lut[component];
+}
+///@}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMath.cpp b/gfx/skia/skia/src/core/SkMath.cpp
new file mode 100644
index 0000000000..8b2df0e640
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMath.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkScalar.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkFloatingPoint.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkSafeMath.h"
+
+#define sub_shift(zeros, x, n) \
+ zeros -= n; \
+ x >>= n
+
+int SkCLZ_portable(uint32_t x) {
+ if (x == 0) {
+ return 32;
+ }
+
+ int zeros = 31;
+ if (x & 0xFFFF0000) {
+ sub_shift(zeros, x, 16);
+ }
+ if (x & 0xFF00) {
+ sub_shift(zeros, x, 8);
+ }
+ if (x & 0xF0) {
+ sub_shift(zeros, x, 4);
+ }
+ if (x & 0xC) {
+ sub_shift(zeros, x, 2);
+ }
+ if (x & 0x2) {
+ sub_shift(zeros, x, 1);
+ }
+
+ return zeros;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* www.worldserver.com/turk/computergraphics/FixedSqrt.pdf
+*/
+int32_t SkSqrtBits(int32_t x, int count) {
+ SkASSERT(x >= 0 && count > 0 && (unsigned)count <= 30);
+
+ uint32_t root = 0;
+ uint32_t remHi = 0;
+ uint32_t remLo = x;
+
+ do {
+ root <<= 1;
+
+ remHi = (remHi<<2) | (remLo>>30);
+ remLo <<= 2;
+
+ uint32_t testDiv = (root << 1) + 1;
+ if (remHi >= testDiv) {
+ remHi -= testDiv;
+ root++;
+ }
+ } while (--count >= 0);
+
+ return root;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkSafeMath::Add(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t sum = tmp.add(x, y);
+ return tmp.ok() ? sum : SIZE_MAX;
+}
+
+size_t SkSafeMath::Mul(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t prod = tmp.mul(x, y);
+ return tmp.ok() ? prod : SIZE_MAX;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool sk_floats_are_unit(const float array[], size_t count) {
+ bool is_unit = true;
+ for (size_t i = 0; i < count; ++i) {
+ is_unit &= (array[i] >= 0) & (array[i] <= 1);
+ }
+ return is_unit;
+}
diff --git a/gfx/skia/skia/src/core/SkMathPriv.h b/gfx/skia/skia/src/core/SkMathPriv.h
new file mode 100644
index 0000000000..d0ce3b2be8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMathPriv.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMathPriv_DEFINED
+#define SkMathPriv_DEFINED
+
+#include "include/core/SkMath.h"
+
+/**
+ * Return the integer square root of value, with a bias of bitBias
+ */
+int32_t SkSqrtBits(int32_t value, int bitBias);
+
+/** Return the integer square root of n, treated as a SkFixed (16.16)
+ */
+static inline int32_t SkSqrt32(int32_t n) { return SkSqrtBits(n, 15); }
+
+/**
+ * Returns (value < 0 ? 0 : value) efficiently (i.e. no compares or branches)
+ */
+static inline int SkClampPos(int value) {
+ return value & ~(value >> 31);
+}
+
+/**
+ * Stores numer/denom and numer%denom into div and mod respectively.
+ */
+template <typename In, typename Out>
+inline void SkTDivMod(In numer, In denom, Out* div, Out* mod) {
+#ifdef SK_CPU_ARM32
+ // If we wrote this as in the else branch, GCC won't fuse the two into one
+ // divmod call, but rather a div call followed by a divmod. Silly! This
+ // version is just as fast as calling __aeabi_[u]idivmod manually, but with
+ // prettier code.
+ //
+ // This benches as around 2x faster than the code in the else branch.
+ const In d = numer/denom;
+ *div = static_cast<Out>(d);
+ *mod = static_cast<Out>(numer-d*denom);
+#else
+ // On x86 this will just be a single idiv.
+ *div = static_cast<Out>(numer/denom);
+ *mod = static_cast<Out>(numer%denom);
+#endif
+}
+
+/** Returns -1 if n < 0, else returns 0
+ */
+#define SkExtractSign(n) ((int32_t)(n) >> 31)
+
+/** If sign == -1, returns -n, else sign must be 0, and returns n.
+ Typically used in conjunction with SkExtractSign().
+ */
+static inline int32_t SkApplySign(int32_t n, int32_t sign) {
+ SkASSERT(sign == 0 || sign == -1);
+ return (n ^ sign) - sign;
+}
+
+/** Return x with the sign of y */
+static inline int32_t SkCopySign32(int32_t x, int32_t y) {
+ return SkApplySign(x, SkExtractSign(x ^ y));
+}
+
+/** Given a positive value and a positive max, return the value
+ pinned against max.
+ Note: only works as long as max - value doesn't wrap around
+ @return max if value >= max, else value
+ */
+static inline unsigned SkClampUMax(unsigned value, unsigned max) {
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+// If a signed int holds min_int (e.g. 0x80000000) it is undefined what happens when
+// we negate it (even though we *know* we're 2's complement and we'll get the same
+// value back). So we create this helper function that casts to size_t (unsigned) first,
+// to avoid the complaint.
+static inline size_t sk_negate_to_size_t(int32_t value) {
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4146) // Thanks MSVC, we know what we're negating an unsigned
+#endif
+ return -static_cast<size_t>(value);
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Return a*b/255, truncating away any fractional bits. Only valid if both
+ a and b are 0..255
+ */
+static inline U8CPU SkMulDiv255Trunc(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 1;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Return (a*b)/255, taking the ceiling of any fractional bits. Only valid if
+ both a and b are 0..255. The expected result equals (a * b + 254) / 255.
+ */
+static inline U8CPU SkMulDiv255Ceiling(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 255;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Just the rounding step in SkDiv255Round: round(value / 255)
+ */
+static inline unsigned SkDiv255Round(unsigned prod) {
+ prod += 128;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+static inline float SkPinToUnitFloat(float x) {
+ return SkTMin(SkTMax(x, 0.0f), 1.0f);
+}
+
+/**
+ * Swap byte order of a 4-byte value, e.g. 0xaarrggbb -> 0xbbggrraa.
+ */
+#if defined(_MSC_VER)
+ #include <stdlib.h>
+ static inline uint32_t SkBSwap32(uint32_t v) { return _byteswap_ulong(v); }
+#else
+ static inline uint32_t SkBSwap32(uint32_t v) { return __builtin_bswap32(v); }
+#endif
+
+//! Returns the number of leading zero bits (0...32)
+int SkCLZ_portable(uint32_t);
+
+#ifndef SkCLZ
+ #if defined(SK_BUILD_FOR_WIN)
+ #include <intrin.h>
+
+ static inline int SkCLZ(uint32_t mask) {
+ if (mask) {
+ unsigned long index;
+ _BitScanReverse(&index, mask);
+ // Suppress this bogus /analyze warning. The check for non-zero
+ // guarantees that _BitScanReverse will succeed.
+#pragma warning(suppress : 6102) // Using 'index' from failed function call
+ return index ^ 0x1F;
+ } else {
+ return 32;
+ }
+ }
+ #elif defined(SK_CPU_ARM32) || defined(__GNUC__) || defined(__clang__)
+ static inline int SkCLZ(uint32_t mask) {
+ // __builtin_clz(0) is undefined, so we have to detect that case.
+ return mask ? __builtin_clz(mask) : 32;
+ }
+ #else
+ #define SkCLZ(x) SkCLZ_portable(x)
+ #endif
+#endif
+
+/**
+ * Returns the smallest power-of-2 that is >= the specified value. If value
+ * is already a power of 2, then it is returned unchanged. It is undefined
+ * if value is <= 0.
+ */
+static inline int SkNextPow2(int value) {
+ SkASSERT(value > 0);
+ return 1 << (32 - SkCLZ(value - 1));
+}
+
+/**
+* Returns the largest power-of-2 that is <= the specified value. If value
+* is already a power of 2, then it is returned unchanged. It is undefined
+* if value is <= 0.
+*/
+static inline int SkPrevPow2(int value) {
+ SkASSERT(value > 0);
+ return 1 << (32 - SkCLZ(value >> 1));
+}
+
+/**
+ * Returns the log2 of the specified value, were that value to be rounded up
+ * to the next power of 2. It is undefined to pass 0. Examples:
+ * SkNextLog2(1) -> 0
+ * SkNextLog2(2) -> 1
+ * SkNextLog2(3) -> 2
+ * SkNextLog2(4) -> 2
+ * SkNextLog2(5) -> 3
+ */
+static inline int SkNextLog2(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ(value - 1);
+}
+
+/**
+* Returns the log2 of the specified value, were that value to be rounded down
+* to the previous power of 2. It is undefined to pass 0. Examples:
+* SkPrevLog2(1) -> 0
+* SkPrevLog2(2) -> 1
+* SkPrevLog2(3) -> 1
+* SkPrevLog2(4) -> 2
+* SkPrevLog2(5) -> 2
+*/
+static inline int SkPrevLog2(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ(value >> 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return the smallest power-of-2 >= n.
+ */
+static inline uint32_t GrNextPow2(uint32_t n) {
+ return n ? (1 << (32 - SkCLZ(n - 1))) : 1;
+}
+
+/**
+ * Returns the next power of 2 >= n or n if the next power of 2 can't be represented by size_t.
+ */
+static inline size_t GrNextSizePow2(size_t n) {
+ constexpr int kNumSizeTBits = 8 * sizeof(size_t);
+ constexpr size_t kHighBitSet = size_t(1) << (kNumSizeTBits - 1);
+
+ if (!n) {
+ return 1;
+ } else if (n >= kHighBitSet) {
+ return n;
+ }
+
+ n--;
+ uint32_t shift = 1;
+ while (shift < kNumSizeTBits) {
+ n |= n >> shift;
+ shift <<= 1;
+ }
+ return n + 1;
+}
+
+// conservative check. will return false for very large values that "could" fit
+template <typename T> static inline bool SkFitsInFixed(T x) {
+ return SkTAbs(x) <= 32767.0f;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrix.cpp b/gfx/skia/skia/src/core/SkMatrix.cpp
new file mode 100644
index 0000000000..268308ffd4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrix.cpp
@@ -0,0 +1,1833 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkString.h"
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMatrixPriv.h"
+
+#include <cstddef>
+#include <utility>
+
+static void normalize_perspective(SkScalar mat[9]) {
+ // If it was interesting to never store the last element, we could divide all 8 other
+ // elements here by the 9th, making it 1.0...
+ //
+ // When SkScalar was SkFixed, we would sometimes rescale the entire matrix to keep its
+ // component values from getting too large. This is not a concern when using floats/doubles,
+ // so we do nothing now.
+
+ // Disable this for now, but it could be enabled.
+#if 0
+ if (0 == mat[SkMatrix::kMPersp0] && 0 == mat[SkMatrix::kMPersp1]) {
+ SkScalar p2 = mat[SkMatrix::kMPersp2];
+ if (p2 != 0 && p2 != 1) {
+ double inv = 1.0 / p2;
+ for (int i = 0; i < 6; ++i) {
+ mat[i] = SkDoubleToScalar(mat[i] * inv);
+ }
+ mat[SkMatrix::kMPersp2] = 1;
+ }
+ }
+#endif
+}
+
+// In a few places, we performed the following
+// a * b + c * d + e
+// as
+// a * b + (c * d + e)
+//
+// sdot and scross are indended to capture these compound operations into a
+// function, with an eye toward considering upscaling the intermediates to
+// doubles for more precision (as we do in concat and invert).
+//
+// However, these few lines that performed the last add before the "dot", cause
+// tiny image differences, so we guard that change until we see the impact on
+// chrome's layouttests.
+//
+#define SK_LEGACY_MATRIX_MATH_ORDER
+
+/* [scale-x skew-x trans-x] [X] [X']
+ [skew-y scale-y trans-y] * [Y] = [Y']
+ [persp-0 persp-1 persp-2] [1] [1 ]
+*/
+
+SkMatrix& SkMatrix::reset() { *this = SkMatrix(); return *this; }
+
+SkMatrix& SkMatrix::set9(const SkScalar buffer[]) {
+ memcpy(fMat, buffer, 9 * sizeof(SkScalar));
+ normalize_perspective(fMat);
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setAffine(const SkScalar buffer[]) {
+ fMat[kMScaleX] = buffer[kAScaleX];
+ fMat[kMSkewX] = buffer[kASkewX];
+ fMat[kMTransX] = buffer[kATransX];
+ fMat[kMSkewY] = buffer[kASkewY];
+ fMat[kMScaleY] = buffer[kAScaleY];
+ fMat[kMTransY] = buffer[kATransY];
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+}
+
+// this guy aligns with the masks, so we can compute a mask from a varaible 0/1
+enum {
+ kTranslate_Shift,
+ kScale_Shift,
+ kAffine_Shift,
+ kPerspective_Shift,
+ kRectStaysRect_Shift
+};
+
+static const int32_t kScalar1Int = 0x3f800000;
+
+uint8_t SkMatrix::computePerspectiveTypeMask() const {
+ // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment
+ // is a win, but replacing those below is not. We don't yet understand
+ // that result.
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // If this is a perspective transform, we return true for all other
+ // transform flags - this does not disable any optimizations, respects
+ // the rule that the type mask must be conservative, and speeds up
+ // type mask computation.
+ return SkToU8(kORableMasks);
+ }
+
+ return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask);
+}
+
+uint8_t SkMatrix::computeTypeMask() const {
+ unsigned mask = 0;
+
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // Once it is determined that that this is a perspective transform,
+ // all other flags are moot as far as optimizations are concerned.
+ return SkToU8(kORableMasks);
+ }
+
+ if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) {
+ mask |= kTranslate_Mask;
+ }
+
+ int m00 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleX]);
+ int m01 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewX]);
+ int m10 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewY]);
+ int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]);
+
+ if (m01 | m10) {
+ // The skew components may be scale-inducing, unless we are dealing
+ // with a pure rotation. Testing for a pure rotation is expensive,
+ // so we opt for being conservative by always setting the scale bit.
+ // along with affine.
+ // By doing this, we are also ensuring that matrices have the same
+ // type masks as their inverses.
+ mask |= kAffine_Mask | kScale_Mask;
+
+ // For rectStaysRect, in the affine case, we only need check that
+ // the primary diagonal is all zeros and that the secondary diagonal
+ // is all non-zero.
+
+ // map non-zero to 1
+ m01 = m01 != 0;
+ m10 = m10 != 0;
+
+ int dp0 = 0 == (m00 | m11) ; // true if both are 0
+ int ds1 = m01 & m10; // true if both are 1
+
+ mask |= (dp0 & ds1) << kRectStaysRect_Shift;
+ } else {
+ // Only test for scale explicitly if not affine, since affine sets the
+ // scale bit.
+ if ((m00 ^ kScalar1Int) | (m11 ^ kScalar1Int)) {
+ mask |= kScale_Mask;
+ }
+
+ // Not affine, therefore we already know secondary diagonal is
+ // all zeros, so we just need to check that primary diagonal is
+ // all non-zero.
+
+ // map non-zero to 1
+ m00 = m00 != 0;
+ m11 = m11 != 0;
+
+ // record if the (p)rimary diagonal is all non-zero
+ mask |= (m00 & m11) << kRectStaysRect_Shift;
+ }
+
+ return SkToU8(mask);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool operator==(const SkMatrix& a, const SkMatrix& b) {
+ const SkScalar* SK_RESTRICT ma = a.fMat;
+ const SkScalar* SK_RESTRICT mb = b.fMat;
+
+ return ma[0] == mb[0] && ma[1] == mb[1] && ma[2] == mb[2] &&
+ ma[3] == mb[3] && ma[4] == mb[4] && ma[5] == mb[5] &&
+ ma[6] == mb[6] && ma[7] == mb[7] && ma[8] == mb[8];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// helper function to determine if upper-left 2x2 of matrix is degenerate
+static inline bool is_degenerate_2x2(SkScalar scaleX, SkScalar skewX,
+ SkScalar skewY, SkScalar scaleY) {
+ SkScalar perp_dot = scaleX*scaleY - skewX*skewY;
+ return SkScalarNearlyZero(perp_dot, SK_ScalarNearlyZero*SK_ScalarNearlyZero);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::isSimilarity(SkScalar tol) const {
+ // if identity or translate matrix
+ TypeMask mask = this->getType();
+ if (mask <= kTranslate_Mask) {
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ // if no skew, can just compare scale factors
+ if (!(mask & kAffine_Mask)) {
+ return !SkScalarNearlyZero(mx) && SkScalarNearlyEqual(SkScalarAbs(mx), SkScalarAbs(my));
+ }
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is rotation/reflection + uniform scale if basis vectors
+ // are 90 degree rotations of each other
+ return (SkScalarNearlyEqual(mx, my, tol) && SkScalarNearlyEqual(sx, -sy, tol))
+ || (SkScalarNearlyEqual(mx, -my, tol) && SkScalarNearlyEqual(sx, sy, tol));
+}
+
+bool SkMatrix::preservesRightAngles(SkScalar tol) const {
+ TypeMask mask = this->getType();
+
+ if (mask <= kTranslate_Mask) {
+ // identity, translate and/or scale
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkASSERT(mask & (kAffine_Mask | kScale_Mask));
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is scale + rotation/reflection if basis vectors are orthogonal
+ SkVector vec[2];
+ vec[0].set(mx, sy);
+ vec[1].set(sx, my);
+
+ return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b + c * d;
+}
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar e, SkScalar f) {
+ return a * b + c * d + e * f;
+}
+
+static inline SkScalar scross(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b - c * d;
+}
+
+SkMatrix& SkMatrix::setTranslate(SkScalar dx, SkScalar dy) {
+ *this = SkMatrix(1, 0, dx,
+ 0, 1, dy,
+ 0, 0, 1,
+ (dx != 0 || dy != 0) ? kTranslate_Mask | kRectStaysRect_Mask
+ : kIdentity_Mask | kRectStaysRect_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preTranslate(SkScalar dx, SkScalar dy) {
+ const unsigned mask = this->getType();
+
+ if (mask <= kTranslate_Mask) {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ } else if (mask & kPerspective_Mask) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ return this->preConcat(m);
+ } else {
+ fMat[kMTransX] += sdot(fMat[kMScaleX], dx, fMat[kMSkewX], dy);
+ fMat[kMTransY] += sdot(fMat[kMSkewY], dx, fMat[kMScaleY], dy);
+ }
+ this->updateTranslateMask();
+ return *this;
+}
+
+SkMatrix& SkMatrix::postTranslate(SkScalar dx, SkScalar dy) {
+ if (this->hasPerspective()) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ this->postConcat(m);
+ } else {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ this->updateTranslateMask();
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ this->reset();
+ } else {
+ this->setScaleTranslate(sx, sy, px - sx * px, py - sy * py);
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::setScale(SkScalar sx, SkScalar sy) {
+ *this = SkMatrix(sx, 0, 0,
+ 0, sy, 0,
+ 0, 0, 1,
+ (sx == 1 && sy == 1) ? kIdentity_Mask | kRectStaysRect_Mask
+ : kScale_Mask | kRectStaysRect_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+
+ // the assumption is that these multiplies are very cheap, and that
+ // a full concat and/or just computing the matrix type is more expensive.
+ // Also, the fixed-point case checks for overflow, but the float doesn't,
+ // so we can get away with these blind multiplies.
+
+ fMat[kMScaleX] *= sx;
+ fMat[kMSkewY] *= sx;
+ fMat[kMPersp0] *= sx;
+
+ fMat[kMSkewX] *= sy;
+ fMat[kMScaleY] *= sy;
+ fMat[kMPersp1] *= sy;
+
+ // Attempt to simplify our type when applying an inverse scale.
+ // TODO: The persp/affine preconditions are in place to keep the mask consistent with
+ // what computeTypeMask() would produce (persp/skew always implies kScale).
+ // We should investigate whether these flag dependencies are truly needed.
+ if (fMat[kMScaleX] == 1 && fMat[kMScaleY] == 1
+ && !(fTypeMask & (kPerspective_Mask | kAffine_Mask))) {
+ this->clearTypeMask(kScale_Mask);
+ } else {
+ this->orTypeMask(kScale_Mask);
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy);
+ return this->postConcat(m);
+}
+
+// this guy perhaps can go away, if we have a fract/high-precision way to
+// scale matrices
+bool SkMatrix::postIDiv(int divx, int divy) {
+ if (divx == 0 || divy == 0) {
+ return false;
+ }
+
+ const float invX = 1.f / divx;
+ const float invY = 1.f / divy;
+
+ fMat[kMScaleX] *= invX;
+ fMat[kMSkewX] *= invX;
+ fMat[kMTransX] *= invX;
+
+ fMat[kMScaleY] *= invY;
+ fMat[kMSkewY] *= invY;
+ fMat[kMTransY] *= invY;
+
+ this->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV, SkScalar px, SkScalar py) {
+ const SkScalar oneMinusCosV = 1 - cosV;
+
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = sdot(sinV, py, oneMinusCosV, px);
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = sdot(-sinV, px, oneMinusCosV, py);
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setRSXform(const SkRSXform& xform) {
+ fMat[kMScaleX] = xform.fSCos;
+ fMat[kMSkewX] = -xform.fSSin;
+ fMat[kMTransX] = xform.fTx;
+
+ fMat[kMSkewY] = xform.fSSin;
+ fMat[kMScaleY] = xform.fSCos;
+ fMat[kMTransY] = xform.fTy;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV) {
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkScalar rad = SkDegreesToRadians(degrees);
+ return this->setSinCos(SkScalarSinSnapToZero(rad), SkScalarCosSnapToZero(rad), px, py);
+}
+
+SkMatrix& SkMatrix::setRotate(SkScalar degrees) {
+ SkScalar rad = SkDegreesToRadians(degrees);
+ return this->setSinCos(SkScalarSinSnapToZero(rad), SkScalarCosSnapToZero(rad));
+}
+
+SkMatrix& SkMatrix::preRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::postRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ return this->postConcat(m);
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ *this = SkMatrix(1, sx, -sx * py,
+ sy, 1, -sy * px,
+ 0, 0, 1,
+ kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setSkew(SkScalar sx, SkScalar sy) {
+ fMat[kMScaleX] = 1;
+ fMat[kMSkewX] = sx;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sy;
+ fMat[kMScaleY] = 1;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::postSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ return this->postConcat(m);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit align) {
+ if (src.isEmpty()) {
+ this->reset();
+ return false;
+ }
+
+ if (dst.isEmpty()) {
+ sk_bzero(fMat, 8 * sizeof(SkScalar));
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kScale_Mask | kRectStaysRect_Mask);
+ } else {
+ SkScalar tx, sx = dst.width() / src.width();
+ SkScalar ty, sy = dst.height() / src.height();
+ bool xLarger = false;
+
+ if (align != kFill_ScaleToFit) {
+ if (sx > sy) {
+ xLarger = true;
+ sx = sy;
+ } else {
+ sy = sx;
+ }
+ }
+
+ tx = dst.fLeft - src.fLeft * sx;
+ ty = dst.fTop - src.fTop * sy;
+ if (align == kCenter_ScaleToFit || align == kEnd_ScaleToFit) {
+ SkScalar diff;
+
+ if (xLarger) {
+ diff = dst.width() - src.width() * sy;
+ } else {
+ diff = dst.height() - src.height() * sy;
+ }
+
+ if (align == kCenter_ScaleToFit) {
+ diff = SkScalarHalf(diff);
+ }
+
+ if (xLarger) {
+ tx += diff;
+ } else {
+ ty += diff;
+ }
+ }
+
+ this->setScaleTranslate(sx, sy, tx, ty);
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline float muladdmul(float a, float b, float c, float d) {
+ return sk_double_to_float((double)a * b + (double)c * d);
+}
+
+static inline float rowcol3(const float row[], const float col[]) {
+ return row[0] * col[0] + row[1] * col[3] + row[2] * col[6];
+}
+
+static bool only_scale_and_translate(unsigned mask) {
+ return 0 == (mask & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask));
+}
+
+SkMatrix& SkMatrix::setConcat(const SkMatrix& a, const SkMatrix& b) {
+ TypeMask aType = a.getType();
+ TypeMask bType = b.getType();
+
+ if (a.isTriviallyIdentity()) {
+ *this = b;
+ } else if (b.isTriviallyIdentity()) {
+ *this = a;
+ } else if (only_scale_and_translate(aType | bType)) {
+ this->setScaleTranslate(a.fMat[kMScaleX] * b.fMat[kMScaleX],
+ a.fMat[kMScaleY] * b.fMat[kMScaleY],
+ a.fMat[kMScaleX] * b.fMat[kMTransX] + a.fMat[kMTransX],
+ a.fMat[kMScaleY] * b.fMat[kMTransY] + a.fMat[kMTransY]);
+ } else {
+ SkMatrix tmp;
+
+ if ((aType | bType) & kPerspective_Mask) {
+ tmp.fMat[kMScaleX] = rowcol3(&a.fMat[0], &b.fMat[0]);
+ tmp.fMat[kMSkewX] = rowcol3(&a.fMat[0], &b.fMat[1]);
+ tmp.fMat[kMTransX] = rowcol3(&a.fMat[0], &b.fMat[2]);
+ tmp.fMat[kMSkewY] = rowcol3(&a.fMat[3], &b.fMat[0]);
+ tmp.fMat[kMScaleY] = rowcol3(&a.fMat[3], &b.fMat[1]);
+ tmp.fMat[kMTransY] = rowcol3(&a.fMat[3], &b.fMat[2]);
+ tmp.fMat[kMPersp0] = rowcol3(&a.fMat[6], &b.fMat[0]);
+ tmp.fMat[kMPersp1] = rowcol3(&a.fMat[6], &b.fMat[1]);
+ tmp.fMat[kMPersp2] = rowcol3(&a.fMat[6], &b.fMat[2]);
+
+ normalize_perspective(tmp.fMat);
+ tmp.setTypeMask(kUnknown_Mask);
+ } else {
+ tmp.fMat[kMScaleX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMScaleX],
+ a.fMat[kMSkewX],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMSkewX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMSkewX],
+ a.fMat[kMSkewX],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMTransX],
+ a.fMat[kMSkewX],
+ b.fMat[kMTransY]) + a.fMat[kMTransX];
+
+ tmp.fMat[kMSkewY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMScaleX],
+ a.fMat[kMScaleY],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMScaleY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMSkewX],
+ a.fMat[kMScaleY],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMTransX],
+ a.fMat[kMScaleY],
+ b.fMat[kMTransY]) + a.fMat[kMTransY];
+
+ tmp.fMat[kMPersp0] = 0;
+ tmp.fMat[kMPersp1] = 0;
+ tmp.fMat[kMPersp2] = 1;
+ //SkDebugf("Concat mat non-persp type: %d\n", tmp.getType());
+ //SkASSERT(!(tmp.getType() & kPerspective_Mask));
+ tmp.setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ }
+ *this = tmp;
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::preConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if(!mat.isIdentity()) {
+ this->setConcat(*this, mat);
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::postConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if (!mat.isIdentity()) {
+ this->setConcat(mat, *this);
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Matrix inversion is very expensive, but also the place where keeping
+ precision may be most important (here and matrix concat). Hence to avoid
+ bitmap blitting artifacts when walking the inverse, we use doubles for
+ the intermediate math, even though we know that is more expensive.
+ */
+
+static inline SkScalar scross_dscale(SkScalar a, SkScalar b,
+ SkScalar c, SkScalar d, double scale) {
+ return SkDoubleToScalar(scross(a, b, c, d) * scale);
+}
+
+static inline double dcross(double a, double b, double c, double d) {
+ return a * b - c * d;
+}
+
+static inline SkScalar dcross_dscale(double a, double b,
+ double c, double d, double scale) {
+ return SkDoubleToScalar(dcross(a, b, c, d) * scale);
+}
+
+static double sk_inv_determinant(const float mat[9], int isPerspective) {
+ double det;
+
+ if (isPerspective) {
+ det = mat[SkMatrix::kMScaleX] *
+ dcross(mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp2],
+ mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp1])
+ +
+ mat[SkMatrix::kMSkewX] *
+ dcross(mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp0],
+ mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp2])
+ +
+ mat[SkMatrix::kMTransX] *
+ dcross(mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp1],
+ mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp0]);
+ } else {
+ det = dcross(mat[SkMatrix::kMScaleX], mat[SkMatrix::kMScaleY],
+ mat[SkMatrix::kMSkewX], mat[SkMatrix::kMSkewY]);
+ }
+
+ // Since the determinant is on the order of the cube of the matrix members,
+ // compare to the cube of the default nearly-zero constant (although an
+ // estimate of the condition number would be better if it wasn't so expensive).
+ if (SkScalarNearlyZero(sk_double_to_float(det),
+ SK_ScalarNearlyZero * SK_ScalarNearlyZero * SK_ScalarNearlyZero)) {
+ return 0;
+ }
+ return 1.0 / det;
+}
+
+void SkMatrix::SetAffineIdentity(SkScalar affine[6]) {
+ affine[kAScaleX] = 1;
+ affine[kASkewY] = 0;
+ affine[kASkewX] = 0;
+ affine[kAScaleY] = 1;
+ affine[kATransX] = 0;
+ affine[kATransY] = 0;
+}
+
+bool SkMatrix::asAffine(SkScalar affine[6]) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+ if (affine) {
+ affine[kAScaleX] = this->fMat[kMScaleX];
+ affine[kASkewY] = this->fMat[kMSkewY];
+ affine[kASkewX] = this->fMat[kMSkewX];
+ affine[kAScaleY] = this->fMat[kMScaleY];
+ affine[kATransX] = this->fMat[kMTransX];
+ affine[kATransY] = this->fMat[kMTransY];
+ }
+ return true;
+}
+
+void SkMatrix::mapPoints(SkPoint dst[], const SkPoint src[], int count) const {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]);
+ this->getMapPtsProc()(*this, dst, src, count);
+}
+
+void SkMatrix::mapXY(SkScalar x, SkScalar y, SkPoint* result) const {
+ SkASSERT(result);
+ this->getMapXYProc()(*this, x, y, result);
+}
+
+void SkMatrix::ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp) {
+ SkASSERT(src != dst);
+ SkASSERT(src && dst);
+
+ if (isPersp) {
+ dst[kMScaleX] = scross_dscale(src[kMScaleY], src[kMPersp2], src[kMTransY], src[kMPersp1], invDet);
+ dst[kMSkewX] = scross_dscale(src[kMTransX], src[kMPersp1], src[kMSkewX], src[kMPersp2], invDet);
+ dst[kMTransX] = scross_dscale(src[kMSkewX], src[kMTransY], src[kMTransX], src[kMScaleY], invDet);
+
+ dst[kMSkewY] = scross_dscale(src[kMTransY], src[kMPersp0], src[kMSkewY], src[kMPersp2], invDet);
+ dst[kMScaleY] = scross_dscale(src[kMScaleX], src[kMPersp2], src[kMTransX], src[kMPersp0], invDet);
+ dst[kMTransY] = scross_dscale(src[kMTransX], src[kMSkewY], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = scross_dscale(src[kMSkewY], src[kMPersp1], src[kMScaleY], src[kMPersp0], invDet);
+ dst[kMPersp1] = scross_dscale(src[kMSkewX], src[kMPersp0], src[kMScaleX], src[kMPersp1], invDet);
+ dst[kMPersp2] = scross_dscale(src[kMScaleX], src[kMScaleY], src[kMSkewX], src[kMSkewY], invDet);
+ } else { // not perspective
+ dst[kMScaleX] = SkDoubleToScalar(src[kMScaleY] * invDet);
+ dst[kMSkewX] = SkDoubleToScalar(-src[kMSkewX] * invDet);
+ dst[kMTransX] = dcross_dscale(src[kMSkewX], src[kMTransY], src[kMScaleY], src[kMTransX], invDet);
+
+ dst[kMSkewY] = SkDoubleToScalar(-src[kMSkewY] * invDet);
+ dst[kMScaleY] = SkDoubleToScalar(src[kMScaleX] * invDet);
+ dst[kMTransY] = dcross_dscale(src[kMSkewY], src[kMTransX], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = 0;
+ dst[kMPersp1] = 0;
+ dst[kMPersp2] = 1;
+ }
+}
+
+bool SkMatrix::invertNonIdentity(SkMatrix* inv) const {
+ SkASSERT(!this->isIdentity());
+
+ TypeMask mask = this->getType();
+
+ if (0 == (mask & ~(kScale_Mask | kTranslate_Mask))) {
+ bool invertible = true;
+ if (inv) {
+ if (mask & kScale_Mask) {
+ SkScalar invX = fMat[kMScaleX];
+ SkScalar invY = fMat[kMScaleY];
+ if (0 == invX || 0 == invY) {
+ return false;
+ }
+ invX = SkScalarInvert(invX);
+ invY = SkScalarInvert(invY);
+
+ // Must be careful when writing to inv, since it may be the
+ // same memory as this.
+
+ inv->fMat[kMSkewX] = inv->fMat[kMSkewY] =
+ inv->fMat[kMPersp0] = inv->fMat[kMPersp1] = 0;
+
+ inv->fMat[kMScaleX] = invX;
+ inv->fMat[kMScaleY] = invY;
+ inv->fMat[kMPersp2] = 1;
+ inv->fMat[kMTransX] = -fMat[kMTransX] * invX;
+ inv->fMat[kMTransY] = -fMat[kMTransY] * invY;
+
+ inv->setTypeMask(mask | kRectStaysRect_Mask);
+ } else {
+ // translate only
+ inv->setTranslate(-fMat[kMTransX], -fMat[kMTransY]);
+ }
+ } else { // inv is nullptr, just check if we're invertible
+ if (!fMat[kMScaleX] || !fMat[kMScaleY]) {
+ invertible = false;
+ }
+ }
+ return invertible;
+ }
+
+ int isPersp = mask & kPerspective_Mask;
+ double invDet = sk_inv_determinant(fMat, isPersp);
+
+ if (invDet == 0) { // underflow
+ return false;
+ }
+
+ bool applyingInPlace = (inv == this);
+
+ SkMatrix* tmp = inv;
+
+ SkMatrix storage;
+ if (applyingInPlace || nullptr == tmp) {
+ tmp = &storage; // we either need to avoid trampling memory or have no memory
+ }
+
+ ComputeInv(tmp->fMat, fMat, invDet, isPersp);
+ if (!tmp->isFinite()) {
+ return false;
+ }
+
+ tmp->setTypeMask(fTypeMask);
+
+ if (applyingInPlace) {
+ *inv = storage; // need to copy answer back
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Identity_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() == 0);
+
+ if (dst != src && count > 0) {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ }
+}
+
+void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= SkMatrix::kTranslate_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ if (count & 1) {
+ dst->fX = src->fX + tx;
+ dst->fY = src->fY + ty;
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ count >>= 1;
+ if (count & 1) {
+ (Sk4s::Load(src) + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (Sk4s::Load(src+0) + trans4).store(dst+0);
+ (Sk4s::Load(src+2) + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Scale_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask));
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ if (count & 1) {
+ dst->fX = src->fX * sx + tx;
+ dst->fY = src->fY * sy + ty;
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
+ count >>= 1;
+ if (count & 1) {
+ (Sk4s::Load(src) * scale4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (Sk4s::Load(src+0) * scale4 + trans4).store(dst+0);
+ (Sk4s::Load(src+2) * scale4 + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Persp_pts(const SkMatrix& m, SkPoint dst[],
+ const SkPoint src[], int count) {
+ SkASSERT(m.hasPerspective());
+
+ if (count > 0) {
+ do {
+ SkScalar sy = src->fY;
+ SkScalar sx = src->fX;
+ src += 1;
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ SkScalar z = sx * m.fMat[kMPersp0] + (sy * m.fMat[kMPersp1] + m.fMat[kMPersp2]);
+#else
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+#endif
+ if (z) {
+ z = 1 / z;
+ }
+
+ dst->fY = y * z;
+ dst->fX = x * z;
+ dst += 1;
+ } while (--count);
+ }
+}
+
+void SkMatrix::Affine_vpts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() != SkMatrix::kPerspective_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ SkScalar kx = m.getSkewX();
+ SkScalar ky = m.getSkewY();
+ if (count & 1) {
+ dst->set(src->fX * sx + src->fY * kx + tx,
+ src->fX * ky + src->fY * sy + ty);
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
+ Sk4s skew4(kx, ky, kx, ky); // applied to swizzle of src4
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ Sk4s src4 = Sk4s::Load(src);
+ Sk4s swz4 = SkNx_shuffle<1,0,3,2>(src4); // y0 x0, y1 x1
+ (src4 * scale4 + swz4 * skew4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ }
+}
+
+const SkMatrix::MapPtsProc SkMatrix::gMapPtsProcs[] = {
+ SkMatrix::Identity_pts, SkMatrix::Trans_pts,
+ SkMatrix::Scale_pts, SkMatrix::Scale_pts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrixPriv::MapHomogeneousPointsWithStride(const SkMatrix& mx, SkPoint3 dst[],
+ size_t dstStride, const SkPoint3 src[],
+ size_t srcStride, int count) {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]);
+
+ if (count > 0) {
+ if (mx.isIdentity()) {
+ if (src != dst) {
+ if (srcStride == sizeof(SkPoint3) && dstStride == sizeof(SkPoint3)) {
+ memcpy(dst, src, count * sizeof(SkPoint3));
+ } else {
+ for (int i = 0; i < count; ++i) {
+ *dst = *src;
+ dst = reinterpret_cast<SkPoint3*>(reinterpret_cast<char*>(dst) + dstStride);
+ src = reinterpret_cast<const SkPoint3*>(reinterpret_cast<const char*>(src) +
+ srcStride);
+ }
+ }
+ }
+ return;
+ }
+ do {
+ SkScalar sx = src->fX;
+ SkScalar sy = src->fY;
+ SkScalar sw = src->fZ;
+ src = reinterpret_cast<const SkPoint3*>(reinterpret_cast<const char*>(src) + srcStride);
+ const SkScalar* mat = mx.fMat;
+ typedef SkMatrix M;
+ SkScalar x = sdot(sx, mat[M::kMScaleX], sy, mat[M::kMSkewX], sw, mat[M::kMTransX]);
+ SkScalar y = sdot(sx, mat[M::kMSkewY], sy, mat[M::kMScaleY], sw, mat[M::kMTransY]);
+ SkScalar w = sdot(sx, mat[M::kMPersp0], sy, mat[M::kMPersp1], sw, mat[M::kMPersp2]);
+
+ dst->set(x, y, w);
+ dst = reinterpret_cast<SkPoint3*>(reinterpret_cast<char*>(dst) + dstStride);
+ } while (--count);
+ }
+}
+
+void SkMatrix::mapHomogeneousPoints(SkPoint3 dst[], const SkPoint3 src[], int count) const {
+ SkMatrixPriv::MapHomogeneousPointsWithStride(*this, dst, sizeof(SkPoint3), src,
+ sizeof(SkPoint3), count);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::mapVectors(SkPoint dst[], const SkPoint src[], int count) const {
+ if (this->hasPerspective()) {
+ SkPoint origin;
+
+ MapXYProc proc = this->getMapXYProc();
+ proc(*this, 0, 0, &origin);
+
+ for (int i = count - 1; i >= 0; --i) {
+ SkPoint tmp;
+
+ proc(*this, src[i].fX, src[i].fY, &tmp);
+ dst[i].set(tmp.fX - origin.fX, tmp.fY - origin.fY);
+ }
+ } else {
+ SkMatrix tmp = *this;
+
+ tmp.fMat[kMTransX] = tmp.fMat[kMTransY] = 0;
+ tmp.clearTypeMask(kTranslate_Mask);
+ tmp.mapPoints(dst, src, count);
+ }
+}
+
+static Sk4f sort_as_rect(const Sk4f& ltrb) {
+ Sk4f rblt(ltrb[2], ltrb[3], ltrb[0], ltrb[1]);
+ Sk4f min = Sk4f::Min(ltrb, rblt);
+ Sk4f max = Sk4f::Max(ltrb, rblt);
+ // We can extract either pair [0,1] or [2,3] from min and max and be correct, but on
+ // ARM this sequence generates the fastest (a single instruction).
+ return Sk4f(min[2], min[3], max[0], max[1]);
+}
+
+void SkMatrix::mapRectScaleTranslate(SkRect* dst, const SkRect& src) const {
+ SkASSERT(dst);
+ SkASSERT(this->isScaleTranslate());
+
+ SkScalar sx = fMat[kMScaleX];
+ SkScalar sy = fMat[kMScaleY];
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ Sk4f scale(sx, sy, sx, sy);
+ Sk4f trans(tx, ty, tx, ty);
+ sort_as_rect(Sk4f::Load(&src.fLeft) * scale + trans).store(&dst->fLeft);
+}
+
+bool SkMatrix::mapRect(SkRect* dst, const SkRect& src) const {
+ SkASSERT(dst);
+
+ if (this->getType() <= kTranslate_Mask) {
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ Sk4f trans(tx, ty, tx, ty);
+ sort_as_rect(Sk4f::Load(&src.fLeft) + trans).store(&dst->fLeft);
+ return true;
+ }
+ if (this->isScaleTranslate()) {
+ this->mapRectScaleTranslate(dst, src);
+ return true;
+ } else {
+ SkPoint quad[4];
+
+ src.toQuad(quad);
+ this->mapPoints(quad, quad, 4);
+ dst->setBoundsNoCheck(quad, 4);
+ return this->rectStaysRect(); // might still return true if rotated by 90, etc.
+ }
+}
+
+SkScalar SkMatrix::mapRadius(SkScalar radius) const {
+ SkVector vec[2];
+
+ vec[0].set(radius, 0);
+ vec[1].set(0, radius);
+ this->mapVectors(vec, 2);
+
+ SkScalar d0 = vec[0].length();
+ SkScalar d1 = vec[1].length();
+
+ // return geometric mean
+ return SkScalarSqrt(d0 * d1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Persp_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.hasPerspective());
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+ if (z) {
+ z = 1 / z;
+ }
+ pt->fX = x * z;
+ pt->fY = y * z;
+}
+
+void SkMatrix::RotTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask)) == kAffine_Mask);
+
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ pt->fX = sx * m.fMat[kMScaleX] + (sy * m.fMat[kMSkewX] + m.fMat[kMTransX]);
+ pt->fY = sx * m.fMat[kMSkewY] + (sy * m.fMat[kMScaleY] + m.fMat[kMTransY]);
+#else
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#endif
+}
+
+void SkMatrix::Rot_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask))== kAffine_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ pt->fX = sx * m.fMat[kMScaleX] + (sy * m.fMat[kMSkewX] + m.fMat[kMTransX]);
+ pt->fY = sx * m.fMat[kMSkewY] + (sy * m.fMat[kMScaleY] + m.fMat[kMTransY]);
+#else
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#endif
+}
+
+void SkMatrix::ScaleTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+
+ pt->fX = sx * m.fMat[kMScaleX] + m.fMat[kMTransX];
+ pt->fY = sy * m.fMat[kMScaleY] + m.fMat[kMTransY];
+}
+
+void SkMatrix::Scale_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+ pt->fX = sx * m.fMat[kMScaleX];
+ pt->fY = sy * m.fMat[kMScaleY];
+}
+
+void SkMatrix::Trans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.getType() == kTranslate_Mask);
+
+ pt->fX = sx + m.fMat[kMTransX];
+ pt->fY = sy + m.fMat[kMTransY];
+}
+
+void SkMatrix::Identity_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(0 == m.getType());
+
+ pt->fX = sx;
+ pt->fY = sy;
+}
+
+const SkMatrix::MapXYProc SkMatrix::gMapXYProcs[] = {
+ SkMatrix::Identity_xy, SkMatrix::Trans_xy,
+ SkMatrix::Scale_xy, SkMatrix::ScaleTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// if its nearly zero (just made up 26, perhaps it should be bigger or smaller)
+#define PerspNearlyZero(x) SkScalarNearlyZero(x, (1.0f / (1 << 26)))
+
+bool SkMatrix::isFixedStepInX() const {
+ return PerspNearlyZero(fMat[kMPersp0]);
+}
+
+SkVector SkMatrix::fixedStepInX(SkScalar y) const {
+ SkASSERT(PerspNearlyZero(fMat[kMPersp0]));
+ if (PerspNearlyZero(fMat[kMPersp1]) &&
+ PerspNearlyZero(fMat[kMPersp2] - 1)) {
+ return SkVector::Make(fMat[kMScaleX], fMat[kMSkewY]);
+ } else {
+ SkScalar z = y * fMat[kMPersp1] + fMat[kMPersp2];
+ return SkVector::Make(fMat[kMScaleX] / z, fMat[kMSkewY] / z);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool checkForZero(float x) {
+ return x*x == 0;
+}
+
+bool SkMatrix::Poly2Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ dst->fMat[kMScaleX] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMSkewY] = srcPt[0].fX - srcPt[1].fX;
+ dst->fMat[kMPersp0] = 0;
+
+ dst->fMat[kMSkewX] = srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = 0;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly3Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ dst->fMat[kMScaleX] = srcPt[2].fX - srcPt[0].fX;
+ dst->fMat[kMSkewY] = srcPt[2].fY - srcPt[0].fY;
+ dst->fMat[kMPersp0] = 0;
+
+ dst->fMat[kMSkewX] = srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = 0;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly4Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ float a1, a2;
+ float x0, y0, x1, y1, x2, y2;
+
+ x0 = srcPt[2].fX - srcPt[0].fX;
+ y0 = srcPt[2].fY - srcPt[0].fY;
+ x1 = srcPt[2].fX - srcPt[1].fX;
+ y1 = srcPt[2].fY - srcPt[1].fY;
+ x2 = srcPt[2].fX - srcPt[3].fX;
+ y2 = srcPt[2].fY - srcPt[3].fY;
+
+ /* check if abs(x2) > abs(y2) */
+ if ( x2 > 0 ? y2 > 0 ? x2 > y2 : x2 > -y2 : y2 > 0 ? -x2 > y2 : x2 < y2) {
+ float denom = sk_ieee_float_divide(x1 * y2, x2) - y1;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (((x0 - x1) * y2 / x2) - y0 + y1) / denom;
+ } else {
+ float denom = x1 - sk_ieee_float_divide(y1 * x2, y2);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (x0 - x1 - sk_ieee_float_divide((y0 - y1) * x2, y2)) / denom;
+ }
+
+ /* check if abs(x1) > abs(y1) */
+ if ( x1 > 0 ? y1 > 0 ? x1 > y1 : x1 > -y1 : y1 > 0 ? -x1 > y1 : x1 < y1) {
+ float denom = y2 - sk_ieee_float_divide(x2 * y1, x1);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (y0 - y2 - sk_ieee_float_divide((x0 - x2) * y1, x1)) / denom;
+ } else {
+ float denom = sk_ieee_float_divide(y2 * x1, y1) - x2;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (sk_ieee_float_divide((y0 - y2) * x1, y1) - x0 + x2) / denom;
+ }
+
+ dst->fMat[kMScaleX] = a2 * srcPt[3].fX + srcPt[3].fX - srcPt[0].fX;
+ dst->fMat[kMSkewY] = a2 * srcPt[3].fY + srcPt[3].fY - srcPt[0].fY;
+ dst->fMat[kMPersp0] = a2;
+
+ dst->fMat[kMSkewX] = a1 * srcPt[1].fX + srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = a1 * srcPt[1].fY + srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = a1;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+typedef bool (*PolyMapProc)(const SkPoint[], SkMatrix*);
+
+/* Adapted from Rob Johnson's original sample code in QuickDraw GX
+*/
+bool SkMatrix::setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count) {
+ if ((unsigned)count > 4) {
+ SkDebugf("--- SkMatrix::setPolyToPoly count out of range %d\n", count);
+ return false;
+ }
+
+ if (0 == count) {
+ this->reset();
+ return true;
+ }
+ if (1 == count) {
+ this->setTranslate(dst[0].fX - src[0].fX, dst[0].fY - src[0].fY);
+ return true;
+ }
+
+ const PolyMapProc gPolyMapProcs[] = {
+ SkMatrix::Poly2Proc, SkMatrix::Poly3Proc, SkMatrix::Poly4Proc
+ };
+ PolyMapProc proc = gPolyMapProcs[count - 2];
+
+ SkMatrix tempMap, result;
+
+ if (!proc(src, &tempMap)) {
+ return false;
+ }
+ if (!tempMap.invert(&result)) {
+ return false;
+ }
+ if (!proc(dst, &tempMap)) {
+ return false;
+ }
+ this->setConcat(tempMap, result);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum MinMaxOrBoth {
+ kMin_MinMaxOrBoth,
+ kMax_MinMaxOrBoth,
+ kBoth_MinMaxOrBoth
+};
+
+template <MinMaxOrBoth MIN_MAX_OR_BOTH> bool get_scale_factor(SkMatrix::TypeMask typeMask,
+ const SkScalar m[9],
+ SkScalar results[/*1 or 2*/]) {
+ if (typeMask & SkMatrix::kPerspective_Mask) {
+ return false;
+ }
+ if (SkMatrix::kIdentity_Mask == typeMask) {
+ results[0] = SK_Scalar1;
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[1] = SK_Scalar1;
+ }
+ return true;
+ }
+ if (!(typeMask & SkMatrix::kAffine_Mask)) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMinScalar(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMaxScalar(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else {
+ results[0] = SkScalarAbs(m[SkMatrix::kMScaleX]);
+ results[1] = SkScalarAbs(m[SkMatrix::kMScaleY]);
+ if (results[0] > results[1]) {
+ using std::swap;
+ swap(results[0], results[1]);
+ }
+ }
+ return true;
+ }
+ // ignore the translation part of the matrix, just look at 2x2 portion.
+ // compute singular values, take largest or smallest abs value.
+ // [a b; b c] = A^T*A
+ SkScalar a = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMScaleX],
+ m[SkMatrix::kMSkewY], m[SkMatrix::kMSkewY]);
+ SkScalar b = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMSkewY]);
+ SkScalar c = sdot(m[SkMatrix::kMSkewX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMScaleY]);
+ // eigenvalues of A^T*A are the squared singular values of A.
+ // characteristic equation is det((A^T*A) - l*I) = 0
+ // l^2 - (a + c)l + (ac-b^2)
+ // solve using quadratic equation (divisor is non-zero since l^2 has 1 coeff
+ // and roots are guaranteed to be pos and real).
+ SkScalar bSqd = b * b;
+ // if upper left 2x2 is orthogonal save some math
+ if (bSqd <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMinScalar(a, c);
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMaxScalar(a, c);
+ } else {
+ results[0] = a;
+ results[1] = c;
+ if (results[0] > results[1]) {
+ using std::swap;
+ swap(results[0], results[1]);
+ }
+ }
+ } else {
+ SkScalar aminusc = a - c;
+ SkScalar apluscdiv2 = SkScalarHalf(a + c);
+ SkScalar x = SkScalarHalf(SkScalarSqrt(aminusc * aminusc + 4 * bSqd));
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 - x;
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 + x;
+ } else {
+ results[0] = apluscdiv2 - x;
+ results[1] = apluscdiv2 + x;
+ }
+ }
+ if (!SkScalarIsFinite(results[0])) {
+ return false;
+ }
+ // Due to the floating point inaccuracy, there might be an error in a, b, c
+ // calculated by sdot, further deepened by subsequent arithmetic operations
+ // on them. Therefore, we allow and cap the nearly-zero negative values.
+ SkASSERT(results[0] >= -SK_ScalarNearlyZero);
+ if (results[0] < 0) {
+ results[0] = 0;
+ }
+ results[0] = SkScalarSqrt(results[0]);
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ if (!SkScalarIsFinite(results[1])) {
+ return false;
+ }
+ SkASSERT(results[1] >= -SK_ScalarNearlyZero);
+ if (results[1] < 0) {
+ results[1] = 0;
+ }
+ results[1] = SkScalarSqrt(results[1]);
+ }
+ return true;
+}
+
+SkScalar SkMatrix::getMinScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMin_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+SkScalar SkMatrix::getMaxScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMax_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+bool SkMatrix::getMinMaxScales(SkScalar scaleFactors[2]) const {
+ return get_scale_factor<kBoth_MinMaxOrBoth>(this->getType(), fMat, scaleFactors);
+}
+
+const SkMatrix& SkMatrix::I() {
+ static constexpr SkMatrix identity;
+ SkASSERT(identity.isIdentity());
+ return identity;
+}
+
+const SkMatrix& SkMatrix::InvalidMatrix() {
+ static constexpr SkMatrix invalid(SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ kTranslate_Mask | kScale_Mask |
+ kAffine_Mask | kPerspective_Mask);
+ return invalid;
+}
+
+bool SkMatrix::decomposeScale(SkSize* scale, SkMatrix* remaining) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+
+ const SkScalar sx = SkVector::Length(this->getScaleX(), this->getSkewY());
+ const SkScalar sy = SkVector::Length(this->getSkewX(), this->getScaleY());
+ if (!SkScalarIsFinite(sx) || !SkScalarIsFinite(sy) ||
+ SkScalarNearlyZero(sx) || SkScalarNearlyZero(sy)) {
+ return false;
+ }
+
+ if (scale) {
+ scale->set(sx, sy);
+ }
+ if (remaining) {
+ *remaining = *this;
+ remaining->preScale(SkScalarInvert(sx), SkScalarInvert(sy));
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkMatrix::writeToMemory(void* buffer) const {
+ // TODO write less for simple matrices
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (buffer) {
+ memcpy(buffer, fMat, sizeInMemory);
+ }
+ return sizeInMemory;
+}
+
+size_t SkMatrix::readFromMemory(const void* buffer, size_t length) {
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (length < sizeInMemory) {
+ return 0;
+ }
+ memcpy(fMat, buffer, sizeInMemory);
+ this->setTypeMask(kUnknown_Mask);
+ return sizeInMemory;
+}
+
+void SkMatrix::dump() const {
+ SkString str;
+ str.appendf("[%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f]",
+ fMat[0], fMat[1], fMat[2], fMat[3], fMat[4], fMat[5],
+ fMat[6], fMat[7], fMat[8]);
+ SkDebugf("%s\n", str.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkMatrixUtils.h"
+
+bool SkTreatAsSprite(const SkMatrix& mat, const SkISize& size, const SkPaint& paint) {
+ // Our path aa is 2-bits, and our rect aa is 8, so we could use 8,
+ // but in practice 4 seems enough (still looks smooth) and allows
+ // more slightly fractional cases to fall into the fast (sprite) case.
+ static const unsigned kAntiAliasSubpixelBits = 4;
+
+ const unsigned subpixelBits = paint.isAntiAlias() ? kAntiAliasSubpixelBits : 0;
+
+ // quick reject on affine or perspective
+ if (mat.getType() & ~(SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ return false;
+ }
+
+ // quick success check
+ if (!subpixelBits && !(mat.getType() & ~SkMatrix::kTranslate_Mask)) {
+ return true;
+ }
+
+ // mapRect supports negative scales, so we eliminate those first
+ if (mat.getScaleX() < 0 || mat.getScaleY() < 0) {
+ return false;
+ }
+
+ SkRect dst;
+ SkIRect isrc = SkIRect::MakeSize(size);
+
+ {
+ SkRect src;
+ src.set(isrc);
+ mat.mapRect(&dst, src);
+ }
+
+ // just apply the translate to isrc
+ isrc.offset(SkScalarRoundToInt(mat.getTranslateX()),
+ SkScalarRoundToInt(mat.getTranslateY()));
+
+ if (subpixelBits) {
+ isrc.fLeft = SkLeftShift(isrc.fLeft, subpixelBits);
+ isrc.fTop = SkLeftShift(isrc.fTop, subpixelBits);
+ isrc.fRight = SkLeftShift(isrc.fRight, subpixelBits);
+ isrc.fBottom = SkLeftShift(isrc.fBottom, subpixelBits);
+
+ const float scale = 1 << subpixelBits;
+ dst.fLeft *= scale;
+ dst.fTop *= scale;
+ dst.fRight *= scale;
+ dst.fBottom *= scale;
+ }
+
+ SkIRect idst;
+ dst.round(&idst);
+ return isrc == idst;
+}
+
+// A square matrix M can be decomposed (via polar decomposition) into two matrices --
+// an orthogonal matrix Q and a symmetric matrix S. In turn we can decompose S into U*W*U^T,
+// where U is another orthogonal matrix and W is a scale matrix. These can be recombined
+// to give M = (Q*U)*W*U^T, i.e., the product of two orthogonal matrices and a scale matrix.
+//
+// The one wrinkle is that traditionally Q may contain a reflection -- the
+// calculation has been rejiggered to put that reflection into W.
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2) {
+
+ SkScalar A = matrix[SkMatrix::kMScaleX];
+ SkScalar B = matrix[SkMatrix::kMSkewX];
+ SkScalar C = matrix[SkMatrix::kMSkewY];
+ SkScalar D = matrix[SkMatrix::kMScaleY];
+
+ if (is_degenerate_2x2(A, B, C, D)) {
+ return false;
+ }
+
+ double w1, w2;
+ SkScalar cos1, sin1;
+ SkScalar cos2, sin2;
+
+ // do polar decomposition (M = Q*S)
+ SkScalar cosQ, sinQ;
+ double Sa, Sb, Sd;
+ // if M is already symmetric (i.e., M = I*S)
+ if (SkScalarNearlyEqual(B, C)) {
+ cosQ = 1;
+ sinQ = 0;
+
+ Sa = A;
+ Sb = B;
+ Sd = D;
+ } else {
+ cosQ = A + D;
+ sinQ = C - B;
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cosQ*cosQ + sinQ*sinQ));
+ cosQ *= reciplen;
+ sinQ *= reciplen;
+
+ // S = Q^-1*M
+ // we don't calc Sc since it's symmetric
+ Sa = A*cosQ + C*sinQ;
+ Sb = B*cosQ + D*sinQ;
+ Sd = -B*sinQ + D*cosQ;
+ }
+
+ // Now we need to compute eigenvalues of S (our scale factors)
+ // and eigenvectors (bases for our rotation)
+ // From this, should be able to reconstruct S as U*W*U^T
+ if (SkScalarNearlyZero(SkDoubleToScalar(Sb))) {
+ // already diagonalized
+ cos1 = 1;
+ sin1 = 0;
+ w1 = Sa;
+ w2 = Sd;
+ cos2 = cosQ;
+ sin2 = sinQ;
+ } else {
+ double diff = Sa - Sd;
+ double discriminant = sqrt(diff*diff + 4.0*Sb*Sb);
+ double trace = Sa + Sd;
+ if (diff > 0) {
+ w1 = 0.5*(trace + discriminant);
+ w2 = 0.5*(trace - discriminant);
+ } else {
+ w1 = 0.5*(trace - discriminant);
+ w2 = 0.5*(trace + discriminant);
+ }
+
+ cos1 = SkDoubleToScalar(Sb); sin1 = SkDoubleToScalar(w1 - Sa);
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cos1*cos1 + sin1*sin1));
+ cos1 *= reciplen;
+ sin1 *= reciplen;
+
+ // rotation 2 is composition of Q and U
+ cos2 = cos1*cosQ - sin1*sinQ;
+ sin2 = sin1*cosQ + cos1*sinQ;
+
+ // rotation 1 is U^T
+ sin1 = -sin1;
+ }
+
+ if (scale) {
+ scale->fX = SkDoubleToScalar(w1);
+ scale->fY = SkDoubleToScalar(w2);
+ }
+ if (rotation1) {
+ rotation1->fX = cos1;
+ rotation1->fY = sin1;
+ }
+ if (rotation2) {
+ rotation2->fX = cos2;
+ rotation2->fY = sin2;
+ }
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRSXform::toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const {
+#if 0
+ // This is the slow way, but it documents what we're doing
+ quad[0].set(0, 0);
+ quad[1].set(width, 0);
+ quad[2].set(width, height);
+ quad[3].set(0, height);
+ SkMatrix m;
+ m.setRSXform(*this).mapPoints(quad, quad, 4);
+#else
+ const SkScalar m00 = fSCos;
+ const SkScalar m01 = -fSSin;
+ const SkScalar m02 = fTx;
+ const SkScalar m10 = -m01;
+ const SkScalar m11 = m00;
+ const SkScalar m12 = fTy;
+
+ quad[0].set(m02, m12);
+ quad[1].set(m00 * width + m02, m10 * width + m12);
+ quad[2].set(m00 * width + m01 * height + m02, m10 * width + m11 * height + m12);
+ quad[3].set(m01 * height + m02, m11 * height + m12);
+#endif
+}
+
+void SkRSXform::toTriStrip(SkScalar width, SkScalar height, SkPoint strip[4]) const {
+ const SkScalar m00 = fSCos;
+ const SkScalar m01 = -fSSin;
+ const SkScalar m02 = fTx;
+ const SkScalar m10 = -m01;
+ const SkScalar m11 = m00;
+ const SkScalar m12 = fTy;
+
+ strip[0].set(m02, m12);
+ strip[1].set(m01 * height + m02, m11 * height + m12);
+ strip[2].set(m00 * width + m02, m10 * width + m12);
+ strip[3].set(m00 * width + m01 * height + m02, m10 * width + m11 * height + m12);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkFilterQuality SkMatrixPriv::AdjustHighQualityFilterLevel(const SkMatrix& matrix,
+ bool matrixIsInverse) {
+ if (matrix.isIdentity()) {
+ return kNone_SkFilterQuality;
+ }
+
+ auto is_minimizing = [&](SkScalar scale) {
+ return matrixIsInverse ? scale > 1 : scale < 1;
+ };
+
+ SkScalar scales[2];
+ if (!matrix.getMinMaxScales(scales) || is_minimizing(scales[0])) {
+ // Bicubic doesn't handle arbitrary minimization well, as src texels can be skipped
+ // entirely,
+ return kMedium_SkFilterQuality;
+ }
+
+ // At this point if scales[1] == SK_Scalar1 then the matrix doesn't do any scaling.
+ if (scales[1] == SK_Scalar1) {
+ if (matrix.rectStaysRect() && SkScalarIsInt(matrix.getTranslateX()) &&
+ SkScalarIsInt(matrix.getTranslateY())) {
+ return kNone_SkFilterQuality;
+ } else {
+ // Use bilerp to handle rotation or fractional translation.
+ return kLow_SkFilterQuality;
+ }
+ }
+
+ return kHigh_SkFilterQuality;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrix44.cpp b/gfx/skia/skia/src/core/SkMatrix44.cpp
new file mode 100644
index 0000000000..67e710bf0e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrix44.cpp
@@ -0,0 +1,1032 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix44.h"
+#include <type_traits>
+#include <utility>
+
+// Copying SkMatrix44 byte-wise is performance-critical to Blink. This class is
+// contained in several Transform classes, which are copied multiple times
+// during the rendering life cycle. See crbug.com/938563 for reference.
+#if defined(SK_BUILD_FOR_WIN) || defined(SK_BUILD_FOR_MAC)
+// std::is_trivially_copyable is not supported for some older clang versions,
+// which (at least as of this patch) are in use for Chromecast.
+static_assert(std::is_trivially_copyable<SkMatrix44>::value,
+ "SkMatrix44 must be trivially copyable");
+#endif
+
+static inline bool eq4(const SkMScalar* SK_RESTRICT a,
+ const SkMScalar* SK_RESTRICT b) {
+ return (a[0] == b[0]) & (a[1] == b[1]) & (a[2] == b[2]) & (a[3] == b[3]);
+}
+
+bool SkMatrix44::operator==(const SkMatrix44& other) const {
+ if (this == &other) {
+ return true;
+ }
+
+ if (this->isIdentity() && other.isIdentity()) {
+ return true;
+ }
+
+ const SkMScalar* SK_RESTRICT a = &fMat[0][0];
+ const SkMScalar* SK_RESTRICT b = &other.fMat[0][0];
+
+#if 0
+ for (int i = 0; i < 16; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+#else
+ // to reduce branch instructions, we compare 4 at a time.
+ // see bench/Matrix44Bench.cpp for test.
+ if (!eq4(&a[0], &b[0])) {
+ return false;
+ }
+ if (!eq4(&a[4], &b[4])) {
+ return false;
+ }
+ if (!eq4(&a[8], &b[8])) {
+ return false;
+ }
+ return eq4(&a[12], &b[12]);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void SkMatrix44::recomputeTypeMask() {
+ if (0 != perspX() || 0 != perspY() || 0 != perspZ() || 1 != fMat[3][3]) {
+ fTypeMask = kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask;
+ return;
+ }
+
+ TypeMask mask = kIdentity_Mask;
+ if (0 != transX() || 0 != transY() || 0 != transZ()) {
+ mask |= kTranslate_Mask;
+ }
+
+ if (1 != scaleX() || 1 != scaleY() || 1 != scaleZ()) {
+ mask |= kScale_Mask;
+ }
+
+ if (0 != fMat[1][0] || 0 != fMat[0][1] || 0 != fMat[0][2] ||
+ 0 != fMat[2][0] || 0 != fMat[1][2] || 0 != fMat[2][1]) {
+ mask |= kAffine_Mask;
+ }
+ fTypeMask = mask;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::asColMajorf(float dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToFloat(src[i]);
+ }
+#elif defined SK_MSCALAR_IS_FLOAT
+ memcpy(dst, src, 16 * sizeof(float));
+#endif
+}
+
+void SkMatrix44::as3x4RowMajorf(float dst[]) const {
+ dst[0] = fMat[0][0]; dst[1] = fMat[1][0]; dst[2] = fMat[2][0]; dst[3] = fMat[3][0];
+ dst[4] = fMat[0][1]; dst[5] = fMat[1][1]; dst[6] = fMat[2][1]; dst[7] = fMat[3][1];
+ dst[8] = fMat[0][2]; dst[9] = fMat[1][2]; dst[10] = fMat[2][2]; dst[11] = fMat[3][2];
+}
+
+void SkMatrix44::asColMajord(double dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ memcpy(dst, src, 16 * sizeof(double));
+#elif defined SK_MSCALAR_IS_FLOAT
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToDouble(src[i]);
+ }
+#endif
+}
+
+void SkMatrix44::asRowMajorf(float dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToFloat(src[0]);
+ dst[4] = SkMScalarToFloat(src[1]);
+ dst[8] = SkMScalarToFloat(src[2]);
+ dst[12] = SkMScalarToFloat(src[3]);
+ src += 4;
+ dst += 1;
+ }
+}
+
+void SkMatrix44::asRowMajord(double dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToDouble(src[0]);
+ dst[4] = SkMScalarToDouble(src[1]);
+ dst[8] = SkMScalarToDouble(src[2]);
+ dst[12] = SkMScalarToDouble(src[3]);
+ src += 4;
+ dst += 1;
+ }
+}
+
+void SkMatrix44::setColMajorf(const float src[]) {
+ SkMScalar* dst = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToFloat(src[i]);
+ }
+#elif defined SK_MSCALAR_IS_FLOAT
+ memcpy(dst, src, 16 * sizeof(float));
+#endif
+
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::setColMajord(const double src[]) {
+ SkMScalar* dst = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ memcpy(dst, src, 16 * sizeof(double));
+#elif defined SK_MSCALAR_IS_FLOAT
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkDoubleToMScalar(src[i]);
+ }
+#endif
+
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::setRowMajorf(const float src[]) {
+ SkMScalar* dst = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToFloat(src[0]);
+ dst[4] = SkMScalarToFloat(src[1]);
+ dst[8] = SkMScalarToFloat(src[2]);
+ dst[12] = SkMScalarToFloat(src[3]);
+ src += 4;
+ dst += 1;
+ }
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::setRowMajord(const double src[]) {
+ SkMScalar* dst = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkDoubleToMScalar(src[0]);
+ dst[4] = SkDoubleToMScalar(src[1]);
+ dst[8] = SkDoubleToMScalar(src[2]);
+ dst[12] = SkDoubleToMScalar(src[3]);
+ src += 4;
+ dst += 1;
+ }
+ this->recomputeTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkMatrix44& SkMatrix44::I() {
+ static constexpr SkMatrix44 gIdentity44(kIdentity_Constructor);
+ return gIdentity44;
+}
+
+void SkMatrix44::setIdentity() {
+ fMat[0][0] = 1;
+ fMat[0][1] = 0;
+ fMat[0][2] = 0;
+ fMat[0][3] = 0;
+ fMat[1][0] = 0;
+ fMat[1][1] = 1;
+ fMat[1][2] = 0;
+ fMat[1][3] = 0;
+ fMat[2][0] = 0;
+ fMat[2][1] = 0;
+ fMat[2][2] = 1;
+ fMat[2][3] = 0;
+ fMat[3][0] = 0;
+ fMat[3][1] = 0;
+ fMat[3][2] = 0;
+ fMat[3][3] = 1;
+ this->setTypeMask(kIdentity_Mask);
+}
+
+void SkMatrix44::set3x3(SkMScalar m_00, SkMScalar m_10, SkMScalar m_20,
+ SkMScalar m_01, SkMScalar m_11, SkMScalar m_21,
+ SkMScalar m_02, SkMScalar m_12, SkMScalar m_22) {
+ fMat[0][0] = m_00; fMat[0][1] = m_10; fMat[0][2] = m_20; fMat[0][3] = 0;
+ fMat[1][0] = m_01; fMat[1][1] = m_11; fMat[1][2] = m_21; fMat[1][3] = 0;
+ fMat[2][0] = m_02; fMat[2][1] = m_12; fMat[2][2] = m_22; fMat[2][3] = 0;
+ fMat[3][0] = 0; fMat[3][1] = 0; fMat[3][2] = 0; fMat[3][3] = 1;
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::set3x3RowMajorf(const float src[]) {
+ fMat[0][0] = src[0]; fMat[0][1] = src[3]; fMat[0][2] = src[6]; fMat[0][3] = 0;
+ fMat[1][0] = src[1]; fMat[1][1] = src[4]; fMat[1][2] = src[7]; fMat[1][3] = 0;
+ fMat[2][0] = src[2]; fMat[2][1] = src[5]; fMat[2][2] = src[8]; fMat[2][3] = 0;
+ fMat[3][0] = 0; fMat[3][1] = 0; fMat[3][2] = 0; fMat[3][3] = 1;
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::set3x4RowMajorf(const float src[]) {
+ fMat[0][0] = src[0]; fMat[1][0] = src[1]; fMat[2][0] = src[2]; fMat[3][0] = src[3];
+ fMat[0][1] = src[4]; fMat[1][1] = src[5]; fMat[2][1] = src[6]; fMat[3][1] = src[7];
+ fMat[0][2] = src[8]; fMat[1][2] = src[9]; fMat[2][2] = src[10]; fMat[3][2] = src[11];
+ fMat[0][3] = 0; fMat[1][3] = 0; fMat[2][3] = 0; fMat[3][3] = 1;
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::set4x4(SkMScalar m_00, SkMScalar m_10, SkMScalar m_20, SkMScalar m_30,
+ SkMScalar m_01, SkMScalar m_11, SkMScalar m_21, SkMScalar m_31,
+ SkMScalar m_02, SkMScalar m_12, SkMScalar m_22, SkMScalar m_32,
+ SkMScalar m_03, SkMScalar m_13, SkMScalar m_23, SkMScalar m_33) {
+ fMat[0][0] = m_00; fMat[0][1] = m_10; fMat[0][2] = m_20; fMat[0][3] = m_30;
+ fMat[1][0] = m_01; fMat[1][1] = m_11; fMat[1][2] = m_21; fMat[1][3] = m_31;
+ fMat[2][0] = m_02; fMat[2][1] = m_12; fMat[2][2] = m_22; fMat[2][3] = m_32;
+ fMat[3][0] = m_03; fMat[3][1] = m_13; fMat[3][2] = m_23; fMat[3][3] = m_33;
+ this->recomputeTypeMask();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ this->setIdentity();
+
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ fMat[3][0] = dx;
+ fMat[3][1] = dy;
+ fMat[3][2] = dz;
+ this->setTypeMask(kTranslate_Mask);
+}
+
+void SkMatrix44::preTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ fMat[3][i] = fMat[0][i] * dx + fMat[1][i] * dy + fMat[2][i] * dz + fMat[3][i];
+ }
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::postTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ if (this->getType() & kPerspective_Mask) {
+ for (int i = 0; i < 4; ++i) {
+ fMat[i][0] += fMat[i][3] * dx;
+ fMat[i][1] += fMat[i][3] * dy;
+ fMat[i][2] += fMat[i][3] * dz;
+ }
+ } else {
+ fMat[3][0] += dx;
+ fMat[3][1] += dy;
+ fMat[3][2] += dz;
+ this->recomputeTypeMask();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ this->setIdentity();
+
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ fMat[0][0] = sx;
+ fMat[1][1] = sy;
+ fMat[2][2] = sz;
+ this->setTypeMask(kScale_Mask);
+}
+
+void SkMatrix44::preScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ // The implementation matrix * pureScale can be shortcut
+ // by knowing that pureScale components effectively scale
+ // the columns of the original matrix.
+ for (int i = 0; i < 4; i++) {
+ fMat[0][i] *= sx;
+ fMat[1][i] *= sy;
+ fMat[2][i] *= sz;
+ }
+ this->recomputeTypeMask();
+}
+
+void SkMatrix44::postScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ fMat[i][0] *= sx;
+ fMat[i][1] *= sy;
+ fMat[i][2] *= sz;
+ }
+ this->recomputeTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setRotateAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians) {
+ double len2 = (double)x * x + (double)y * y + (double)z * z;
+ if (1 != len2) {
+ if (0 == len2) {
+ this->setIdentity();
+ return;
+ }
+ double scale = 1 / sqrt(len2);
+ x = SkDoubleToMScalar(x * scale);
+ y = SkDoubleToMScalar(y * scale);
+ z = SkDoubleToMScalar(z * scale);
+ }
+ this->setRotateAboutUnit(x, y, z, radians);
+}
+
+void SkMatrix44::setRotateAboutUnit(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians) {
+ double c = cos(radians);
+ double s = sin(radians);
+ double C = 1 - c;
+ double xs = x * s;
+ double ys = y * s;
+ double zs = z * s;
+ double xC = x * C;
+ double yC = y * C;
+ double zC = z * C;
+ double xyC = x * yC;
+ double yzC = y * zC;
+ double zxC = z * xC;
+
+ // if you're looking at wikipedia, remember that we're column major.
+ this->set3x3(SkDoubleToMScalar(x * xC + c), // scale x
+ SkDoubleToMScalar(xyC + zs), // skew x
+ SkDoubleToMScalar(zxC - ys), // trans x
+
+ SkDoubleToMScalar(xyC - zs), // skew y
+ SkDoubleToMScalar(y * yC + c), // scale y
+ SkDoubleToMScalar(yzC + xs), // trans y
+
+ SkDoubleToMScalar(zxC + ys), // persp x
+ SkDoubleToMScalar(yzC - xs), // persp y
+ SkDoubleToMScalar(z * zC + c)); // persp 2
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool bits_isonly(int value, int mask) {
+ return 0 == (value & ~mask);
+}
+
+void SkMatrix44::setConcat(const SkMatrix44& a, const SkMatrix44& b) {
+ const SkMatrix44::TypeMask a_mask = a.getType();
+ const SkMatrix44::TypeMask b_mask = b.getType();
+
+ if (kIdentity_Mask == a_mask) {
+ *this = b;
+ return;
+ }
+ if (kIdentity_Mask == b_mask) {
+ *this = a;
+ return;
+ }
+
+ bool useStorage = (this == &a || this == &b);
+ SkMScalar storage[16];
+ SkMScalar* result = useStorage ? storage : &fMat[0][0];
+
+ // Both matrices are at most scale+translate
+ if (bits_isonly(a_mask | b_mask, kScale_Mask | kTranslate_Mask)) {
+ result[0] = a.fMat[0][0] * b.fMat[0][0];
+ result[1] = result[2] = result[3] = result[4] = 0;
+ result[5] = a.fMat[1][1] * b.fMat[1][1];
+ result[6] = result[7] = result[8] = result[9] = 0;
+ result[10] = a.fMat[2][2] * b.fMat[2][2];
+ result[11] = 0;
+ result[12] = a.fMat[0][0] * b.fMat[3][0] + a.fMat[3][0];
+ result[13] = a.fMat[1][1] * b.fMat[3][1] + a.fMat[3][1];
+ result[14] = a.fMat[2][2] * b.fMat[3][2] + a.fMat[3][2];
+ result[15] = 1;
+ } else {
+ for (int j = 0; j < 4; j++) {
+ for (int i = 0; i < 4; i++) {
+ double value = 0;
+ for (int k = 0; k < 4; k++) {
+ value += SkMScalarToDouble(a.fMat[k][i]) * b.fMat[j][k];
+ }
+ *result++ = SkDoubleToMScalar(value);
+ }
+ }
+ }
+
+ if (useStorage) {
+ memcpy(fMat, storage, sizeof(storage));
+ }
+ this->recomputeTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We always perform the calculation in doubles, to avoid prematurely losing
+ precision along the way. This relies on the compiler automatically
+ promoting our SkMScalar values to double (if needed).
+ */
+double SkMatrix44::determinant() const {
+ if (this->isIdentity()) {
+ return 1;
+ }
+ if (this->isScaleTranslate()) {
+ return fMat[0][0] * fMat[1][1] * fMat[2][2] * fMat[3][3];
+ }
+
+ double a00 = fMat[0][0];
+ double a01 = fMat[0][1];
+ double a02 = fMat[0][2];
+ double a03 = fMat[0][3];
+ double a10 = fMat[1][0];
+ double a11 = fMat[1][1];
+ double a12 = fMat[1][2];
+ double a13 = fMat[1][3];
+ double a20 = fMat[2][0];
+ double a21 = fMat[2][1];
+ double a22 = fMat[2][2];
+ double a23 = fMat[2][3];
+ double a30 = fMat[3][0];
+ double a31 = fMat[3][1];
+ double a32 = fMat[3][2];
+ double a33 = fMat[3][3];
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b02 = a00 * a13 - a03 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b04 = a01 * a13 - a03 * a11;
+ double b05 = a02 * a13 - a03 * a12;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20 * a33 - a23 * a30;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21 * a33 - a23 * a31;
+ double b11 = a22 * a33 - a23 * a32;
+
+ // Calculate the determinant
+ return b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool is_matrix_finite(const SkMatrix44& matrix) {
+ SkMScalar accumulator = 0;
+ for (int row = 0; row < 4; ++row) {
+ for (int col = 0; col < 4; ++col) {
+ accumulator *= matrix.get(row, col);
+ }
+ }
+ return accumulator == 0;
+}
+
+bool SkMatrix44::invert(SkMatrix44* storage) const {
+ if (this->isIdentity()) {
+ if (storage) {
+ storage->setIdentity();
+ }
+ return true;
+ }
+
+ if (this->isTranslate()) {
+ if (storage) {
+ storage->setTranslate(-fMat[3][0], -fMat[3][1], -fMat[3][2]);
+ }
+ return true;
+ }
+
+ SkMatrix44 tmp;
+ // Use storage if it's available and distinct from this matrix.
+ SkMatrix44* inverse = (storage && storage != this) ? storage : &tmp;
+ if (this->isScaleTranslate()) {
+ if (0 == fMat[0][0] * fMat[1][1] * fMat[2][2]) {
+ return false;
+ }
+
+ double invXScale = 1 / fMat[0][0];
+ double invYScale = 1 / fMat[1][1];
+ double invZScale = 1 / fMat[2][2];
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(invXScale);
+ inverse->fMat[0][1] = 0;
+ inverse->fMat[0][2] = 0;
+ inverse->fMat[0][3] = 0;
+
+ inverse->fMat[1][0] = 0;
+ inverse->fMat[1][1] = SkDoubleToMScalar(invYScale);
+ inverse->fMat[1][2] = 0;
+ inverse->fMat[1][3] = 0;
+
+ inverse->fMat[2][0] = 0;
+ inverse->fMat[2][1] = 0;
+ inverse->fMat[2][2] = SkDoubleToMScalar(invZScale);
+ inverse->fMat[2][3] = 0;
+
+ inverse->fMat[3][0] = SkDoubleToMScalar(-fMat[3][0] * invXScale);
+ inverse->fMat[3][1] = SkDoubleToMScalar(-fMat[3][1] * invYScale);
+ inverse->fMat[3][2] = SkDoubleToMScalar(-fMat[3][2] * invZScale);
+ inverse->fMat[3][3] = 1;
+
+ inverse->setTypeMask(this->getType());
+
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+ }
+
+ double a00 = fMat[0][0];
+ double a01 = fMat[0][1];
+ double a02 = fMat[0][2];
+ double a03 = fMat[0][3];
+ double a10 = fMat[1][0];
+ double a11 = fMat[1][1];
+ double a12 = fMat[1][2];
+ double a13 = fMat[1][3];
+ double a20 = fMat[2][0];
+ double a21 = fMat[2][1];
+ double a22 = fMat[2][2];
+ double a23 = fMat[2][3];
+ double a30 = fMat[3][0];
+ double a31 = fMat[3][1];
+ double a32 = fMat[3][2];
+ double a33 = fMat[3][3];
+
+ if (!(this->getType() & kPerspective_Mask)) {
+ // If we know the matrix has no perspective, then the perspective
+ // component is (0, 0, 0, 1). We can use this information to save a lot
+ // of arithmetic that would otherwise be spent to compute the inverse
+ // of a general matrix.
+
+ SkASSERT(a03 == 0);
+ SkASSERT(a13 == 0);
+ SkASSERT(a23 == 0);
+ SkASSERT(a33 == 1);
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21;
+ double b11 = a22;
+
+ // Calculate the determinant
+ double det = b00 * b11 - b01 * b10 + b03 * b08;
+
+ double invdet = sk_ieee_double_divide(1.0, det);
+ // If det is zero, we want to return false. However, we also want to return false
+ // if 1/det overflows to infinity (i.e. det is denormalized). Both of these are
+ // handled by checking that 1/det is finite.
+ if (!sk_float_isfinite(sk_double_to_float(invdet))) {
+ return false;
+ }
+
+ b00 *= invdet;
+ b01 *= invdet;
+ b03 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(a11 * b11 - a12 * b10);
+ inverse->fMat[0][1] = SkDoubleToMScalar(a02 * b10 - a01 * b11);
+ inverse->fMat[0][2] = SkDoubleToMScalar(b03);
+ inverse->fMat[0][3] = 0;
+ inverse->fMat[1][0] = SkDoubleToMScalar(a12 * b08 - a10 * b11);
+ inverse->fMat[1][1] = SkDoubleToMScalar(a00 * b11 - a02 * b08);
+ inverse->fMat[1][2] = SkDoubleToMScalar(-b01);
+ inverse->fMat[1][3] = 0;
+ inverse->fMat[2][0] = SkDoubleToMScalar(a10 * b10 - a11 * b08);
+ inverse->fMat[2][1] = SkDoubleToMScalar(a01 * b08 - a00 * b10);
+ inverse->fMat[2][2] = SkDoubleToMScalar(b00);
+ inverse->fMat[2][3] = 0;
+ inverse->fMat[3][0] = SkDoubleToMScalar(a11 * b07 - a10 * b09 - a12 * b06);
+ inverse->fMat[3][1] = SkDoubleToMScalar(a00 * b09 - a01 * b07 + a02 * b06);
+ inverse->fMat[3][2] = SkDoubleToMScalar(a31 * b01 - a30 * b03 - a32 * b00);
+ inverse->fMat[3][3] = 1;
+
+ inverse->setTypeMask(this->getType());
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+ }
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b02 = a00 * a13 - a03 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b04 = a01 * a13 - a03 * a11;
+ double b05 = a02 * a13 - a03 * a12;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20 * a33 - a23 * a30;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21 * a33 - a23 * a31;
+ double b11 = a22 * a33 - a23 * a32;
+
+ // Calculate the determinant
+ double det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
+
+ double invdet = sk_ieee_double_divide(1.0, det);
+ // If det is zero, we want to return false. However, we also want to return false
+ // if 1/det overflows to infinity (i.e. det is denormalized). Both of these are
+ // handled by checking that 1/det is finite.
+ if (!sk_float_isfinite(sk_double_to_float(invdet))) {
+ return false;
+ }
+
+ b00 *= invdet;
+ b01 *= invdet;
+ b02 *= invdet;
+ b03 *= invdet;
+ b04 *= invdet;
+ b05 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(a11 * b11 - a12 * b10 + a13 * b09);
+ inverse->fMat[0][1] = SkDoubleToMScalar(a02 * b10 - a01 * b11 - a03 * b09);
+ inverse->fMat[0][2] = SkDoubleToMScalar(a31 * b05 - a32 * b04 + a33 * b03);
+ inverse->fMat[0][3] = SkDoubleToMScalar(a22 * b04 - a21 * b05 - a23 * b03);
+ inverse->fMat[1][0] = SkDoubleToMScalar(a12 * b08 - a10 * b11 - a13 * b07);
+ inverse->fMat[1][1] = SkDoubleToMScalar(a00 * b11 - a02 * b08 + a03 * b07);
+ inverse->fMat[1][2] = SkDoubleToMScalar(a32 * b02 - a30 * b05 - a33 * b01);
+ inverse->fMat[1][3] = SkDoubleToMScalar(a20 * b05 - a22 * b02 + a23 * b01);
+ inverse->fMat[2][0] = SkDoubleToMScalar(a10 * b10 - a11 * b08 + a13 * b06);
+ inverse->fMat[2][1] = SkDoubleToMScalar(a01 * b08 - a00 * b10 - a03 * b06);
+ inverse->fMat[2][2] = SkDoubleToMScalar(a30 * b04 - a31 * b02 + a33 * b00);
+ inverse->fMat[2][3] = SkDoubleToMScalar(a21 * b02 - a20 * b04 - a23 * b00);
+ inverse->fMat[3][0] = SkDoubleToMScalar(a11 * b07 - a10 * b09 - a12 * b06);
+ inverse->fMat[3][1] = SkDoubleToMScalar(a00 * b09 - a01 * b07 + a02 * b06);
+ inverse->fMat[3][2] = SkDoubleToMScalar(a31 * b01 - a30 * b03 - a32 * b00);
+ inverse->fMat[3][3] = SkDoubleToMScalar(a20 * b03 - a21 * b01 + a22 * b00);
+ inverse->setTypeMask(this->getType());
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::transpose() {
+ if (!this->isIdentity()) {
+ using std::swap;
+ swap(fMat[0][1], fMat[1][0]);
+ swap(fMat[0][2], fMat[2][0]);
+ swap(fMat[0][3], fMat[3][0]);
+ swap(fMat[1][2], fMat[2][1]);
+ swap(fMat[1][3], fMat[3][1]);
+ swap(fMat[2][3], fMat[3][2]);
+ this->recomputeTypeMask();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::mapScalars(const SkScalar src[4], SkScalar dst[4]) const {
+ SkScalar storage[4];
+ SkScalar* result = (src == dst) ? storage : dst;
+
+ for (int i = 0; i < 4; i++) {
+ SkMScalar value = 0;
+ for (int j = 0; j < 4; j++) {
+ value += fMat[j][i] * src[j];
+ }
+ result[i] = SkMScalarToScalar(value);
+ }
+
+ if (storage == result) {
+ memcpy(dst, storage, sizeof(storage));
+ }
+}
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+
+void SkMatrix44::mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const {
+ SkMScalar storage[4];
+ SkMScalar* result = (src == dst) ? storage : dst;
+
+ for (int i = 0; i < 4; i++) {
+ SkMScalar value = 0;
+ for (int j = 0; j < 4; j++) {
+ value += fMat[j][i] * src[j];
+ }
+ result[i] = value;
+ }
+
+ if (storage == result) {
+ memcpy(dst, storage, sizeof(storage));
+ }
+}
+
+#endif
+
+typedef void (*Map2Procf)(const SkMScalar mat[][4], const float src2[], int count, float dst4[]);
+typedef void (*Map2Procd)(const SkMScalar mat[][4], const double src2[], int count, double dst4[]);
+
+static void map2_if(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ for (int i = 0; i < count; ++i) {
+ dst4[0] = src2[0];
+ dst4[1] = src2[1];
+ dst4[2] = 0;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_id(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int i = 0; i < count; ++i) {
+ dst4[0] = src2[0];
+ dst4[1] = src2[1];
+ dst4[2] = 0;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_tf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ const float mat30 = SkMScalarToFloat(mat[3][0]);
+ const float mat31 = SkMScalarToFloat(mat[3][1]);
+ const float mat32 = SkMScalarToFloat(mat[3][2]);
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = src2[0] + mat30;
+ dst4[1] = src2[1] + mat31;
+ dst4[2] = mat32;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_td(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = src2[0] + mat[3][0];
+ dst4[1] = src2[1] + mat[3][1];
+ dst4[2] = mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_sf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ const float mat32 = SkMScalarToFloat(mat[3][2]);
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = SkMScalarToFloat(mat[0][0] * src2[0] + mat[3][0]);
+ dst4[1] = SkMScalarToFloat(mat[1][1] * src2[1] + mat[3][1]);
+ dst4[2] = mat32;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_sd(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = mat[0][0] * src2[0] + mat[3][0];
+ dst4[1] = mat[1][1] * src2[1] + mat[3][1];
+ dst4[2] = mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_af(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ SkMScalar r;
+ for (int n = 0; n < count; ++n) {
+ SkMScalar sx = SkFloatToMScalar(src2[0]);
+ SkMScalar sy = SkFloatToMScalar(src2[1]);
+ r = mat[0][0] * sx + mat[1][0] * sy + mat[3][0];
+ dst4[0] = SkMScalarToFloat(r);
+ r = mat[0][1] * sx + mat[1][1] * sy + mat[3][1];
+ dst4[1] = SkMScalarToFloat(r);
+ r = mat[0][2] * sx + mat[1][2] * sy + mat[3][2];
+ dst4[2] = SkMScalarToFloat(r);
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_ad(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ double sx = src2[0];
+ double sy = src2[1];
+ dst4[0] = mat[0][0] * sx + mat[1][0] * sy + mat[3][0];
+ dst4[1] = mat[0][1] * sx + mat[1][1] * sy + mat[3][1];
+ dst4[2] = mat[0][2] * sx + mat[1][2] * sy + mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_pf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ SkMScalar r;
+ for (int n = 0; n < count; ++n) {
+ SkMScalar sx = SkFloatToMScalar(src2[0]);
+ SkMScalar sy = SkFloatToMScalar(src2[1]);
+ for (int i = 0; i < 4; i++) {
+ r = mat[0][i] * sx + mat[1][i] * sy + mat[3][i];
+ dst4[i] = SkMScalarToFloat(r);
+ }
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_pd(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ double sx = src2[0];
+ double sy = src2[1];
+ for (int i = 0; i < 4; i++) {
+ dst4[i] = mat[0][i] * sx + mat[1][i] * sy + mat[3][i];
+ }
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+void SkMatrix44::map2(const float src2[], int count, float dst4[]) const {
+ static const Map2Procf gProc[] = {
+ map2_if, map2_tf, map2_sf, map2_sf, map2_af, map2_af, map2_af, map2_af
+ };
+
+ TypeMask mask = this->getType();
+ Map2Procf proc = (mask & kPerspective_Mask) ? map2_pf : gProc[mask];
+ proc(fMat, src2, count, dst4);
+}
+
+void SkMatrix44::map2(const double src2[], int count, double dst4[]) const {
+ static const Map2Procd gProc[] = {
+ map2_id, map2_td, map2_sd, map2_sd, map2_ad, map2_ad, map2_ad, map2_ad
+ };
+
+ TypeMask mask = this->getType();
+ Map2Procd proc = (mask & kPerspective_Mask) ? map2_pd : gProc[mask];
+ proc(fMat, src2, count, dst4);
+}
+
+bool SkMatrix44::preserves2dAxisAlignment (SkMScalar epsilon) const {
+
+ // Can't check (mask & kPerspective_Mask) because Z isn't relevant here.
+ if (0 != perspX() || 0 != perspY()) return false;
+
+ // A matrix with two non-zeroish values in any of the upper right
+ // rows or columns will skew. If only one value in each row or
+ // column is non-zeroish, we get a scale plus perhaps a 90-degree
+ // rotation.
+ int col0 = 0;
+ int col1 = 0;
+ int row0 = 0;
+ int row1 = 0;
+
+ // Must test against epsilon, not 0, because we can get values
+ // around 6e-17 in the matrix that "should" be 0.
+
+ if (SkMScalarAbs(fMat[0][0]) > epsilon) {
+ col0++;
+ row0++;
+ }
+ if (SkMScalarAbs(fMat[0][1]) > epsilon) {
+ col1++;
+ row0++;
+ }
+ if (SkMScalarAbs(fMat[1][0]) > epsilon) {
+ col0++;
+ row1++;
+ }
+ if (SkMScalarAbs(fMat[1][1]) > epsilon) {
+ col1++;
+ row1++;
+ }
+ if (col0 > 1 || col1 > 1 || row0 > 1 || row1 > 1) {
+ return false;
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::dump() const {
+ static const char* format = "|%g %g %g %g|\n"
+ "|%g %g %g %g|\n"
+ "|%g %g %g %g|\n"
+ "|%g %g %g %g|\n";
+ SkDebugf(format,
+ fMat[0][0], fMat[1][0], fMat[2][0], fMat[3][0],
+ fMat[0][1], fMat[1][1], fMat[2][1], fMat[3][1],
+ fMat[0][2], fMat[1][2], fMat[2][2], fMat[3][2],
+ fMat[0][3], fMat[1][3], fMat[2][3], fMat[3][3]);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void initFromMatrix(SkMScalar dst[4][4], const SkMatrix& src) {
+ dst[0][0] = SkScalarToMScalar(src[SkMatrix::kMScaleX]);
+ dst[1][0] = SkScalarToMScalar(src[SkMatrix::kMSkewX]);
+ dst[2][0] = 0;
+ dst[3][0] = SkScalarToMScalar(src[SkMatrix::kMTransX]);
+ dst[0][1] = SkScalarToMScalar(src[SkMatrix::kMSkewY]);
+ dst[1][1] = SkScalarToMScalar(src[SkMatrix::kMScaleY]);
+ dst[2][1] = 0;
+ dst[3][1] = SkScalarToMScalar(src[SkMatrix::kMTransY]);
+ dst[0][2] = 0;
+ dst[1][2] = 0;
+ dst[2][2] = 1;
+ dst[3][2] = 0;
+ dst[0][3] = SkScalarToMScalar(src[SkMatrix::kMPersp0]);
+ dst[1][3] = SkScalarToMScalar(src[SkMatrix::kMPersp1]);
+ dst[2][3] = 0;
+ dst[3][3] = SkScalarToMScalar(src[SkMatrix::kMPersp2]);
+}
+
+SkMatrix44::SkMatrix44(const SkMatrix& src) {
+ this->operator=(src);
+}
+
+SkMatrix44& SkMatrix44::operator=(const SkMatrix& src) {
+ initFromMatrix(fMat, src);
+
+ if (src.isIdentity()) {
+ this->setTypeMask(kIdentity_Mask);
+ } else {
+ this->recomputeTypeMask();
+ }
+ return *this;
+}
+
+SkMatrix44::operator SkMatrix() const {
+ SkMatrix dst;
+
+ dst[SkMatrix::kMScaleX] = SkMScalarToScalar(fMat[0][0]);
+ dst[SkMatrix::kMSkewX] = SkMScalarToScalar(fMat[1][0]);
+ dst[SkMatrix::kMTransX] = SkMScalarToScalar(fMat[3][0]);
+
+ dst[SkMatrix::kMSkewY] = SkMScalarToScalar(fMat[0][1]);
+ dst[SkMatrix::kMScaleY] = SkMScalarToScalar(fMat[1][1]);
+ dst[SkMatrix::kMTransY] = SkMScalarToScalar(fMat[3][1]);
+
+ dst[SkMatrix::kMPersp0] = SkMScalarToScalar(fMat[0][3]);
+ dst[SkMatrix::kMPersp1] = SkMScalarToScalar(fMat[1][3]);
+ dst[SkMatrix::kMPersp2] = SkMScalarToScalar(fMat[3][3]);
+
+ return dst;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp b/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp
new file mode 100644
index 0000000000..9fb877851f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMatrixImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+SkMatrixImageFilter::SkMatrixImageFilter(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fTransform(transform)
+ , fFilterQuality(filterQuality) {
+}
+
+sk_sp<SkImageFilter> SkMatrixImageFilter::Make(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input) {
+ return sk_sp<SkImageFilter>(new SkMatrixImageFilter(transform,
+ filterQuality,
+ std::move(input)));
+}
+
+sk_sp<SkFlattenable> SkMatrixImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+
+ return Make(matrix, buffer.read32LE(kLast_SkFilterQuality), common.getInput(0));
+}
+
+void SkMatrixImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fTransform);
+ buffer.writeInt(fFilterQuality);
+}
+
+sk_sp<SkSpecialImage> SkMatrixImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkMatrix matrix;
+ if (!ctx.ctm().invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(fTransform);
+ matrix.postConcat(ctx.ctm());
+
+ const SkIRect srcBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ const SkRect srcRect = SkRect::Make(srcBounds);
+
+ SkRect dstRect;
+ matrix.mapRect(&dstRect, srcRect);
+ SkIRect dstBounds;
+ dstRect.roundOut(&dstBounds);
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(dstBounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ canvas->translate(-SkIntToScalar(dstBounds.x()), -SkIntToScalar(dstBounds.y()));
+ canvas->concat(matrix);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setFilterQuality(fFilterQuality);
+
+ input->draw(canvas, srcRect.x(), srcRect.y(), &paint);
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkMatrixImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ SkRect dst;
+ fTransform.mapRect(&dst, bounds);
+ return dst;
+}
+
+SkIRect SkMatrixImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ SkMatrix matrix;
+ if (!ctm.invert(&matrix)) {
+ return src;
+ }
+ if (kForward_MapDirection == dir) {
+ matrix.postConcat(fTransform);
+ } else {
+ SkMatrix transformInverse;
+ if (!fTransform.invert(&transformInverse)) {
+ return src;
+ }
+ matrix.postConcat(transformInverse);
+ }
+ matrix.postConcat(ctm);
+ SkRect floatBounds;
+ matrix.mapRect(&floatBounds, SkRect::Make(src));
+ SkIRect result = floatBounds.roundOut();
+
+ if (kReverse_MapDirection == dir && kNone_SkFilterQuality != fFilterQuality) {
+ // When filtering we might need some pixels in the source that might be otherwise
+ // clipped off.
+ result.outset(1, 1);
+ }
+
+ return result;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrixImageFilter.h b/gfx/skia/skia/src/core/SkMatrixImageFilter.h
new file mode 100644
index 0000000000..12ed13ed41
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixImageFilter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixImageFilter_DEFINED
+#define SkMatrixImageFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "src/core/SkImageFilter_Base.h"
+
+/*! \class SkMatrixImageFilter
+ Matrix transformation image filter. This filter draws its source
+ input transformed by the given matrix.
+ */
+
+class SkMatrixImageFilter : public SkImageFilter_Base {
+public:
+ /** Construct a 2D transformation image filter.
+ * @param transform The matrix to apply when drawing the src bitmap
+ * @param filterQuality The quality of filtering to apply when scaling.
+ * @param input The input image filter. If nullptr, the src bitmap
+ * passed to filterImage() is used instead.
+ */
+
+ static sk_sp<SkImageFilter> Make(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input);
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ SkMatrixImageFilter(const SkMatrix& transform,
+ SkFilterQuality,
+ sk_sp<SkImageFilter> input);
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkMatrixImageFilter)
+
+ SkMatrix fTransform;
+ SkFilterQuality fFilterQuality;
+ typedef SkImageFilter_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixPriv.h b/gfx/skia/skia/src/core/SkMatrixPriv.h
new file mode 100644
index 0000000000..3ac856b0e1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixPriv.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixPriv_DEFINE
+#define SkMatrixPriv_DEFINE
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkMatrix.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkPointPriv.h"
+
+class SkMatrixPriv {
+public:
+ enum {
+ // writeTo/readFromMemory will never return a value larger than this
+ kMaxFlattenSize = 9 * sizeof(SkScalar) + sizeof(uint32_t),
+ };
+
+ static size_t WriteToMemory(const SkMatrix& matrix, void* buffer) {
+ return matrix.writeToMemory(buffer);
+ }
+
+ static size_t ReadFromMemory(SkMatrix* matrix, const void* buffer, size_t length) {
+ return matrix->readFromMemory(buffer, length);
+ }
+
+ typedef SkMatrix::MapXYProc MapXYProc;
+ typedef SkMatrix::MapPtsProc MapPtsProc;
+
+
+ static MapPtsProc GetMapPtsProc(const SkMatrix& matrix) {
+ return SkMatrix::GetMapPtsProc(matrix.getType());
+ }
+
+ static MapXYProc GetMapXYProc(const SkMatrix& matrix) {
+ return SkMatrix::GetMapXYProc(matrix.getType());
+ }
+
+ /**
+ * Attempt to map the rect through the inverse of the matrix. If it is not invertible,
+ * then this returns false and dst is unchanged.
+ */
+ static bool SK_WARN_UNUSED_RESULT InverseMapRect(const SkMatrix& mx,
+ SkRect* dst, const SkRect& src) {
+ if (mx.getType() <= SkMatrix::kTranslate_Mask) {
+ SkScalar tx = mx.getTranslateX();
+ SkScalar ty = mx.getTranslateY();
+ Sk4f trans(tx, ty, tx, ty);
+ (Sk4f::Load(&src.fLeft) - trans).store(&dst->fLeft);
+ return true;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix inverse;
+ if (mx.invert(&inverse)) {
+ inverse.mapRect(dst, src);
+ return true;
+ }
+ return false;
+ }
+
+ /** Maps count pts, skipping stride bytes to advance from one SkPoint to the next.
+ Points are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ each resulting pts SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param mx matrix used to map the points
+ @param pts storage for mapped points
+ @param stride size of record starting with SkPoint, in bytes
+ @param count number of points to transform
+ */
+ static void MapPointsWithStride(const SkMatrix& mx, SkPoint pts[], size_t stride, int count) {
+ SkASSERT(stride >= sizeof(SkPoint));
+ SkASSERT(0 == stride % sizeof(SkScalar));
+
+ SkMatrix::TypeMask tm = mx.getType();
+
+ if (SkMatrix::kIdentity_Mask == tm) {
+ return;
+ }
+ if (SkMatrix::kTranslate_Mask == tm) {
+ const SkScalar tx = mx.getTranslateX();
+ const SkScalar ty = mx.getTranslateY();
+ Sk2s trans(tx, ty);
+ for (int i = 0; i < count; ++i) {
+ (Sk2s::Load(&pts->fX) + trans).store(&pts->fX);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ return;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix::MapXYProc proc = mx.getMapXYProc();
+ for (int i = 0; i < count; ++i) {
+ proc(mx, pts->fX, pts->fY, pts);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ }
+
+ /** Maps src SkPoint array of length count to dst SkPoint array, skipping stride bytes
+ to advance from one SkPoint to the next.
+ Points are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, src = | y |
+ | G H I | | 1 |
+
+ each resulting dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param mx matrix used to map the points
+ @param dst storage for mapped points
+ @param src points to transform
+ @param stride size of record starting with SkPoint, in bytes
+ @param count number of points to transform
+ */
+ static void MapPointsWithStride(const SkMatrix& mx, SkPoint dst[], size_t dstStride,
+ const SkPoint src[], size_t srcStride, int count) {
+ SkASSERT(srcStride >= sizeof(SkPoint));
+ SkASSERT(dstStride >= sizeof(SkPoint));
+ SkASSERT(0 == srcStride % sizeof(SkScalar));
+ SkASSERT(0 == dstStride % sizeof(SkScalar));
+ for (int i = 0; i < count; ++i) {
+ mx.mapPoints(dst, src, 1);
+ src = (SkPoint*)((intptr_t)src + srcStride);
+ dst = (SkPoint*)((intptr_t)dst + dstStride);
+ }
+ }
+
+ static void MapHomogeneousPointsWithStride(const SkMatrix& mx, SkPoint3 dst[], size_t dstStride,
+ const SkPoint3 src[], size_t srcStride, int count);
+
+ // Returns the recommended filterquality, assuming the caller originally wanted kHigh (bicubic)
+ static SkFilterQuality AdjustHighQualityFilterLevel(const SkMatrix&,
+ bool matrixIsInverse = false);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixUtils.h b/gfx/skia/skia/src/core/SkMatrixUtils.h
new file mode 100644
index 0000000000..2a17ec2fbb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixUtils.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixUtils_DEFINED
+#define SkMatrixUtils_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkSize.h"
+
+class SkMatrix;
+class SkPaint;
+
+/**
+ * Given a matrix, size and paint, return true if the computed dst-rect would
+ * align such that there is a 1-to-1 coorspondence between src and dst pixels.
+ * This can be called by drawing code to see if drawBitmap can be turned into
+ * drawSprite (which is faster).
+ *
+ * The src-rect is defined to be { 0, 0, size.width(), size.height() }
+ */
+bool SkTreatAsSprite(const SkMatrix&, const SkISize& size, const SkPaint& paint);
+
+/** Decomposes the upper-left 2x2 of the matrix into a rotation (represented by
+ the cosine and sine of the rotation angle), followed by a non-uniform scale,
+ followed by another rotation. If there is a reflection, one of the scale
+ factors will be negative.
+ Returns true if successful. Returns false if the matrix is degenerate.
+ */
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMessageBus.h b/gfx/skia/skia/src/core/SkMessageBus.h
new file mode 100644
index 0000000000..2a40b28777
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMessageBus.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMessageBus_DEFINED
+#define SkMessageBus_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+
+/**
+ * The following method must have a specialization for type 'Message':
+ *
+ * bool SkShouldPostMessageToBus(const Message&, uint32_t msgBusUniqueID)
+ *
+ * We may want to consider providing a default template implementation, to avoid this requirement by
+ * sending to all inboxes when the specialization for type 'Message' is not present.
+ */
+template <typename Message>
+class SkMessageBus : SkNoncopyable {
+public:
+ // Post a message to be received by Inboxes for this Message type. Checks
+ // SkShouldPostMessageToBus() for each inbox. Threadsafe.
+ static void Post(const Message& m);
+
+ class Inbox {
+ public:
+ Inbox(uint32_t uniqueID = SK_InvalidUniqueID);
+ ~Inbox();
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ // Overwrite out with all the messages we've received since the last call. Threadsafe.
+ void poll(SkTArray<Message>* out);
+
+ private:
+ SkTArray<Message> fMessages;
+ SkMutex fMessagesMutex;
+ uint32_t fUniqueID;
+
+ friend class SkMessageBus;
+ void receive(const Message& m); // SkMessageBus is a friend only to call this.
+ };
+
+private:
+ SkMessageBus();
+ static SkMessageBus* Get();
+
+ SkTDArray<Inbox*> fInboxes;
+ SkMutex fInboxesMutex;
+};
+
+// This must go in a single .cpp file, not some .h, or we risk creating more than one global
+// SkMessageBus per type when using shared libraries. NOTE: at most one per file will compile.
+#define DECLARE_SKMESSAGEBUS_MESSAGE(Message) \
+ template <> \
+ SkMessageBus<Message>* SkMessageBus<Message>::Get() { \
+ static SkOnce once; \
+ static SkMessageBus<Message>* bus; \
+ once([] { bus = new SkMessageBus<Message>(); }); \
+ return bus; \
+ }
+
+// ----------------------- Implementation of SkMessageBus::Inbox -----------------------
+
+template<typename Message>
+SkMessageBus<Message>::Inbox::Inbox(uint32_t uniqueID) : fUniqueID(uniqueID) {
+ // Register ourselves with the corresponding message bus.
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ bus->fInboxes.push_back(this);
+}
+
+template<typename Message>
+SkMessageBus<Message>::Inbox::~Inbox() {
+ // Remove ourselves from the corresponding message bus.
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ // This is a cheaper fInboxes.remove(fInboxes.find(this)) when order doesn't matter.
+ for (int i = 0; i < bus->fInboxes.count(); i++) {
+ if (this == bus->fInboxes[i]) {
+ bus->fInboxes.removeShuffle(i);
+ break;
+ }
+ }
+}
+
+template<typename Message>
+void SkMessageBus<Message>::Inbox::receive(const Message& m) {
+ SkAutoMutexExclusive lock(fMessagesMutex);
+ fMessages.push_back(m);
+}
+
+template<typename Message>
+void SkMessageBus<Message>::Inbox::poll(SkTArray<Message>* messages) {
+ SkASSERT(messages);
+ messages->reset();
+ SkAutoMutexExclusive lock(fMessagesMutex);
+ fMessages.swap(*messages);
+}
+
+// ----------------------- Implementation of SkMessageBus -----------------------
+
+template <typename Message>
+SkMessageBus<Message>::SkMessageBus() {}
+
+template <typename Message>
+/*static*/ void SkMessageBus<Message>::Post(const Message& m) {
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ for (int i = 0; i < bus->fInboxes.count(); i++) {
+ if (SkShouldPostMessageToBus(m, bus->fInboxes[i]->fUniqueID)) {
+ bus->fInboxes[i]->receive(m);
+ }
+ }
+}
+
+#endif // SkMessageBus_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMiniRecorder.cpp b/gfx/skia/skia/src/core/SkMiniRecorder.cpp
new file mode 100644
index 0000000000..e75ecd04c3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMiniRecorder.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkMiniRecorder.h"
+#include "src/core/SkPictureCommon.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkTLazy.h"
+#include <new>
+
+using namespace SkRecords;
+
+class SkEmptyPicture final : public SkPicture {
+public:
+ void playback(SkCanvas*, AbortCallback*) const override { }
+
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ int approximateOpCount() const override { return 0; }
+ SkRect cullRect() const override { return SkRect::MakeEmpty(); }
+};
+
+// Calculate conservative bounds for each type of draw op that can be its own mini picture.
+// These are fairly easy because we know they can't be affected by any matrix or saveLayers.
+static SkRect adjust_for_paint(SkRect bounds, const SkPaint& paint) {
+ return paint.canComputeFastBounds() ? paint.computeFastBounds(bounds, &bounds)
+ : SkRectPriv::MakeLargest();
+}
+static SkRect bounds(const DrawRect& op) {
+ return adjust_for_paint(op.rect, op.paint);
+}
+static SkRect bounds(const DrawPath& op) {
+ return op.path.isInverseFillType() ? SkRectPriv::MakeLargest()
+ : adjust_for_paint(op.path.getBounds(), op.paint);
+}
+static SkRect bounds(const DrawTextBlob& op) {
+ return adjust_for_paint(op.blob->bounds().makeOffset(op.x, op.y), op.paint);
+}
+
+template <typename T>
+class SkMiniPicture final : public SkPicture {
+public:
+ SkMiniPicture(const SkRect* cull, T* op) : fCull(cull ? *cull : bounds(*op)) {
+ memcpy(&fOp, op, sizeof(fOp)); // We take ownership of op's guts.
+ }
+
+ void playback(SkCanvas* c, AbortCallback*) const override {
+ SkRecords::Draw(c, nullptr, nullptr, 0, nullptr)(fOp);
+ }
+
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ int approximateOpCount() const override { return 1; }
+ SkRect cullRect() const override { return fCull; }
+
+private:
+ SkRect fCull;
+ T fOp;
+};
+
+
+SkMiniRecorder::SkMiniRecorder() : fState(State::kEmpty) {}
+SkMiniRecorder::~SkMiniRecorder() {
+ if (fState != State::kEmpty) {
+ // We have internal state pending.
+ // Detaching then deleting a picture is an easy way to clean up.
+ (void)this->detachAsPicture(nullptr);
+ }
+ SkASSERT(fState == State::kEmpty);
+}
+
+#define TRY_TO_STORE(Type, ...) \
+ if (fState != State::kEmpty) { return false; } \
+ fState = State::k##Type; \
+ new (fBuffer.get()) Type{__VA_ARGS__}; \
+ return true
+
+bool SkMiniRecorder::drawRect(const SkRect& rect, const SkPaint& paint) {
+ TRY_TO_STORE(DrawRect, paint, rect);
+}
+
+bool SkMiniRecorder::drawPath(const SkPath& path, const SkPaint& paint) {
+ TRY_TO_STORE(DrawPath, paint, path);
+}
+
+bool SkMiniRecorder::drawTextBlob(const SkTextBlob* b, SkScalar x, SkScalar y, const SkPaint& p) {
+ TRY_TO_STORE(DrawTextBlob, p, sk_ref_sp(b), x, y);
+}
+#undef TRY_TO_STORE
+
+
+sk_sp<SkPicture> SkMiniRecorder::detachAsPicture(const SkRect* cull) {
+#define CASE(Type) \
+ case State::k##Type: \
+ fState = State::kEmpty; \
+ return sk_make_sp<SkMiniPicture<Type>>(cull, reinterpret_cast<Type*>(fBuffer.get()))
+
+ static SkOnce once;
+ static SkPicture* empty;
+
+ switch (fState) {
+ case State::kEmpty:
+ once([]{ empty = new SkEmptyPicture; });
+ return sk_ref_sp(empty);
+ CASE(DrawPath);
+ CASE(DrawRect);
+ CASE(DrawTextBlob);
+ }
+ SkASSERT(false);
+ return nullptr;
+#undef CASE
+}
+
+void SkMiniRecorder::flushAndReset(SkCanvas* canvas) {
+#define CASE(Type) \
+ case State::k##Type: { \
+ fState = State::kEmpty; \
+ Type* op = reinterpret_cast<Type*>(fBuffer.get()); \
+ SkRecords::Draw(canvas, nullptr, nullptr, 0, nullptr)(*op); \
+ op->~Type(); \
+ } return
+
+ switch (fState) {
+ case State::kEmpty: return;
+ CASE(DrawPath);
+ CASE(DrawRect);
+ CASE(DrawTextBlob);
+ }
+ SkASSERT(false);
+#undef CASE
+}
diff --git a/gfx/skia/skia/src/core/SkMiniRecorder.h b/gfx/skia/skia/src/core/SkMiniRecorder.h
new file mode 100644
index 0000000000..0e5c143624
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMiniRecorder.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMiniRecorder_DEFINED
+#define SkMiniRecorder_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkRecords.h"
+class SkCanvas;
+
+// Records small pictures, but only a limited subset of the canvas API, and may fail.
+class SkMiniRecorder : SkNoncopyable {
+public:
+ SkMiniRecorder();
+ ~SkMiniRecorder();
+
+ // Try to record an op. Returns false on failure.
+ bool drawPath(const SkPath&, const SkPaint&);
+ bool drawRect(const SkRect&, const SkPaint&);
+ bool drawTextBlob(const SkTextBlob*, SkScalar x, SkScalar y, const SkPaint&);
+
+ // Detach anything we've recorded as a picture, resetting this SkMiniRecorder.
+ // If cull is nullptr we'll calculate it.
+ sk_sp<SkPicture> detachAsPicture(const SkRect* cull);
+
+ // Flush anything we've recorded to the canvas, resetting this SkMiniRecorder.
+ // This is logically the same as but rather more efficient than:
+ // sk_sp<SkPicture> pic(this->detachAsPicture(nullptr));
+ // pic->playback(canvas);
+ void flushAndReset(SkCanvas*);
+
+private:
+ enum class State {
+ kEmpty,
+ kDrawPath,
+ kDrawRect,
+ kDrawTextBlob,
+ };
+
+ State fState;
+
+ template <size_t A, size_t B>
+ struct Max { static const size_t val = A > B ? A : B; };
+
+ static const size_t kInlineStorage =
+ Max<sizeof(SkRecords::DrawPath),
+ Max<sizeof(SkRecords::DrawRect),
+ sizeof(SkRecords::DrawTextBlob)>::val>::val;
+ SkAlignedSStorage<kInlineStorage> fBuffer;
+};
+
+#endif//SkMiniRecorder_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMipMap.cpp b/gfx/skia/skia/src/core/SkMipMap.cpp
new file mode 100644
index 0000000000..748b7eaf70
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipMap.cpp
@@ -0,0 +1,778 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMipMap.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTo.h"
+#include "include/private/SkVx.h"
+#include "src/core/SkMathPriv.h"
+#include <new>
+
+//
+// ColorTypeFilter is the "Type" we pass to some downsample template functions.
+// It controls how we expand a pixel into a large type, with space between each component,
+// so we can then perform our simple filter (either box or triangle) and store the intermediates
+// in the expanded type.
+//
+
+struct ColorTypeFilter_8888 {
+ typedef uint32_t Type;
+ static Sk4h Expand(uint32_t x) {
+ return SkNx_cast<uint16_t>(Sk4b::Load(&x));
+ }
+ static uint32_t Compact(const Sk4h& x) {
+ uint32_t r;
+ SkNx_cast<uint8_t>(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_565 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & ~SK_G16_MASK_IN_PLACE) | ((x & SK_G16_MASK_IN_PLACE) << 16);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return ((x & ~SK_G16_MASK_IN_PLACE) & 0xFFFF) | ((x >> 16) & SK_G16_MASK_IN_PLACE);
+ }
+};
+
+struct ColorTypeFilter_4444 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & 0xF0F) | ((x & ~0xF0F) << 12);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & 0xF0F) | ((x >> 12) & ~0xF0F);
+ }
+};
+
+struct ColorTypeFilter_8 {
+ typedef uint8_t Type;
+ static unsigned Expand(unsigned x) {
+ return x;
+ }
+ static uint8_t Compact(unsigned x) {
+ return (uint8_t)x;
+ }
+};
+
+struct ColorTypeFilter_Alpha_F16 {
+ typedef uint16_t Type;
+ static Sk4f Expand(uint16_t x) {
+ return SkHalfToFloat_finite_ftz((uint64_t) x); // expand out to four lanes
+
+ }
+ static uint16_t Compact(const Sk4f& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return r & 0xFFFF; // but ignore the extra 3 here
+ }
+};
+
+struct ColorTypeFilter_RGBA_F16 {
+ typedef uint64_t Type; // SkHalf x4
+ static Sk4f Expand(uint64_t x) {
+ return SkHalfToFloat_finite_ftz(x);
+ }
+ static uint64_t Compact(const Sk4f& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_88 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & 0xFF) | ((x & ~0xFF) << 8);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & 0xFF) | ((x >> 8) & ~0xFF);
+ }
+};
+
+struct ColorTypeFilter_1616 {
+ typedef uint32_t Type;
+ static uint64_t Expand(uint32_t x) {
+ return (x & 0xFFFF) | ((x & ~0xFFFF) << 16);
+ }
+ static uint16_t Compact(uint64_t x) {
+ return (x & 0xFFFF) | ((x >> 16) & ~0xFFFF);
+ }
+};
+
+struct ColorTypeFilter_F16F16 {
+ typedef uint32_t Type;
+ static Sk4f Expand(uint32_t x) {
+ return SkHalfToFloat_finite_ftz((uint64_t) x); // expand out to four lanes
+ }
+ static uint32_t Compact(const Sk4f& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return (uint32_t) (r & 0xFFFFFFFF); // but ignore the extra 2 here
+ }
+};
+
+struct ColorTypeFilter_16161616 {
+ typedef uint64_t Type;
+ static skvx::Vec<4, uint32_t> Expand(uint64_t x) {
+ return skvx::cast<uint32_t>(skvx::Vec<4, uint16_t>::Load(&x));
+ }
+ static uint64_t Compact(const skvx::Vec<4, uint32_t>& x) {
+ uint64_t r;
+ skvx::cast<uint16_t>(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_16 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return x;
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (uint16_t) x;
+ }
+};
+
+struct ColorTypeFilter_1010102 {
+ typedef uint32_t Type;
+ static uint64_t Expand(uint64_t x) {
+ return (((x ) & 0x3ff) ) |
+ (((x >> 10) & 0x3ff) << 20) |
+ (((x >> 20) & 0x3ff) << 40) |
+ (((x >> 30) & 0x3 ) << 60);
+ }
+ static uint32_t Compact(uint64_t x) {
+ return (((x ) & 0x3ff) ) |
+ (((x >> 20) & 0x3ff) << 10) |
+ (((x >> 40) & 0x3ff) << 20) |
+ (((x >> 60) & 0x3 ) << 30);
+ }
+};
+
+template <typename T> T add_121(const T& a, const T& b, const T& c) {
+ return a + b + b + c;
+}
+
+template <typename T> T shift_right(const T& x, int bits) {
+ return x >> bits;
+}
+
+Sk4f shift_right(const Sk4f& x, int bits) {
+ return x * (1.0f / (1 << bits));
+}
+
+template <typename T> T shift_left(const T& x, int bits) {
+ return x << bits;
+}
+
+Sk4f shift_left(const Sk4f& x, int bits) {
+ return x * (1 << bits);
+}
+
+//
+// To produce each mip level, we need to filter down by 1/2 (e.g. 100x100 -> 50,50)
+// If the starting dimension is odd, we floor the size of the lower level (e.g. 101 -> 50)
+// In those (odd) cases, we use a triangle filter, with 1-pixel overlap between samplings,
+// else for even cases, we just use a 2x box filter.
+//
+// This produces 4 possible isotropic filters: 2x2 2x3 3x2 3x3 where WxH indicates the number of
+// src pixels we need to sample in each dimension to produce 1 dst pixel.
+//
+// OpenGL expects a full mipmap stack to contain anisotropic space as well.
+// This means a 100x1 image would continue down to a 50x1 image, 25x1 image...
+// Because of this, we need 4 more anisotropic filters: 1x2, 1x3, 2x1, 3x1.
+
+template <typename F> void downsample_1_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+
+ auto c = c00 + c10;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_1_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+ auto c20 = F::Expand(p2[0]);
+
+ auto c = add_121(c00, c10, c20);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_2_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+
+ auto c = c00 + c01;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_2_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+
+ auto c = c00 + c10 + c01 + c11;
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_2_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+ auto c20 = F::Expand(p2[0]);
+ auto c21 = F::Expand(p2[1]);
+
+ auto c = add_121(c00, c10, c20) + add_121(c01, c11, c21);
+ d[i] = F::Compact(shift_right(c, 3));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_3_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ auto c02 = F::Expand(p0[0]);
+ for (int i = 0; i < count; ++i) {
+ auto c00 = c02;
+ auto c01 = F::Expand(p0[1]);
+ c02 = F::Expand(p0[2]);
+
+ auto c = add_121(c00, c01, c02);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_3_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ // Given pixels:
+ // a0 b0 c0 d0 e0 ...
+ // a1 b1 c1 d1 e1 ...
+ // We want:
+ // (a0 + 2*b0 + c0 + a1 + 2*b1 + c1) / 8
+ // (c0 + 2*d0 + e0 + c1 + 2*d1 + e1) / 8
+ // ...
+
+ auto c0 = F::Expand(p0[0]);
+ auto c1 = F::Expand(p1[0]);
+ auto c = c0 + c1;
+ for (int i = 0; i < count; ++i) {
+ auto a = c;
+
+ auto b0 = F::Expand(p0[1]);
+ auto b1 = F::Expand(p1[1]);
+ auto b = b0 + b0 + b1 + b1;
+
+ c0 = F::Expand(p0[2]);
+ c1 = F::Expand(p1[2]);
+ c = c0 + c1;
+
+ auto sum = a + b + c;
+ d[i] = F::Compact(shift_right(sum, 3));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_3_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ // Given pixels:
+ // a0 b0 c0 d0 e0 ...
+ // a1 b1 c1 d1 e1 ...
+ // a2 b2 c2 d2 e2 ...
+ // We want:
+ // (a0 + 2*b0 + c0 + 2*a1 + 4*b1 + 2*c1 + a2 + 2*b2 + c2) / 16
+ // (c0 + 2*d0 + e0 + 2*c1 + 4*d1 + 2*e1 + c2 + 2*d2 + e2) / 16
+ // ...
+
+ auto c0 = F::Expand(p0[0]);
+ auto c1 = F::Expand(p1[0]);
+ auto c2 = F::Expand(p2[0]);
+ auto c = add_121(c0, c1, c2);
+ for (int i = 0; i < count; ++i) {
+ auto a = c;
+
+ auto b0 = F::Expand(p0[1]);
+ auto b1 = F::Expand(p1[1]);
+ auto b2 = F::Expand(p2[1]);
+ auto b = shift_left(add_121(b0, b1, b2), 1);
+
+ c0 = F::Expand(p0[2]);
+ c1 = F::Expand(p1[2]);
+ c2 = F::Expand(p2[2]);
+ c = add_121(c0, c1, c2);
+
+ auto sum = a + b + c;
+ d[i] = F::Compact(shift_right(sum, 4));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkMipMap::AllocLevelsSize(int levelCount, size_t pixelSize) {
+ if (levelCount < 0) {
+ return 0;
+ }
+ int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize;
+ if (!SkTFitsIn<int32_t>(size)) {
+ return 0;
+ }
+ return SkTo<int32_t>(size);
+}
+
+SkMipMap* SkMipMap::Build(const SkPixmap& src, SkDiscardableFactoryProc fact) {
+ typedef void FilterProc(void*, const void* srcPtr, size_t srcRB, int count);
+
+ FilterProc* proc_1_2 = nullptr;
+ FilterProc* proc_1_3 = nullptr;
+ FilterProc* proc_2_1 = nullptr;
+ FilterProc* proc_2_2 = nullptr;
+ FilterProc* proc_2_3 = nullptr;
+ FilterProc* proc_3_1 = nullptr;
+ FilterProc* proc_3_2 = nullptr;
+ FilterProc* proc_3_3 = nullptr;
+
+ const SkColorType ct = src.colorType();
+ const SkAlphaType at = src.alphaType();
+
+ switch (ct) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8888>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8888>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8888>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8888>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8888>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8888>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8888>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8888>;
+ break;
+ case kRGB_565_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_565>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_565>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_565>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_565>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_565>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_565>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_565>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_565>;
+ break;
+ case kARGB_4444_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_4444>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_4444>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_4444>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_4444>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_4444>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_4444>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_4444>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_4444>;
+ break;
+ case kAlpha_8_SkColorType:
+ case kGray_8_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8>;
+ break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_RGBA_F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_RGBA_F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_RGBA_F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_RGBA_F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_RGBA_F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_RGBA_F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_RGBA_F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_RGBA_F16>;
+ break;
+ case kR8G8_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_88>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_88>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_88>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_88>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_88>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_88>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_88>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_88>;
+ break;
+ case kR16G16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_1616>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_1616>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_1616>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_1616>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_1616>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_1616>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_1616>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_1616>;
+ break;
+ case kA16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_16>;
+ break;
+ case kRGBA_1010102_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_1010102>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_1010102>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_1010102>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_1010102>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_1010102>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_1010102>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_1010102>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_1010102>;
+ break;
+ case kA16_float_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_Alpha_F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_Alpha_F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_Alpha_F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_Alpha_F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_Alpha_F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_Alpha_F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_Alpha_F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_Alpha_F16>;
+ break;
+ case kR16G16_float_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_F16F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_F16F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_F16F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_F16F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_F16F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_F16F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_F16F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_F16F16>;
+ break;
+ case kR16G16B16A16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_16161616>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_16161616>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_16161616>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_16161616>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_16161616>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_16161616>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_16161616>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_16161616>;
+ break;
+ default:
+ return nullptr;
+ }
+
+ if (src.width() <= 1 && src.height() <= 1) {
+ return nullptr;
+ }
+ // whip through our loop to compute the exact size needed
+ size_t size = 0;
+ int countLevels = ComputeLevelCount(src.width(), src.height());
+ for (int currentMipLevel = countLevels; currentMipLevel >= 0; currentMipLevel--) {
+ SkISize mipSize = ComputeLevelSize(src.width(), src.height(), currentMipLevel);
+ size += SkColorTypeMinRowBytes(ct, mipSize.fWidth) * mipSize.fHeight;
+ }
+
+ size_t storageSize = SkMipMap::AllocLevelsSize(countLevels, size);
+ if (0 == storageSize) {
+ return nullptr;
+ }
+
+ SkMipMap* mipmap;
+ if (fact) {
+ SkDiscardableMemory* dm = fact(storageSize);
+ if (nullptr == dm) {
+ return nullptr;
+ }
+ mipmap = new SkMipMap(storageSize, dm);
+ } else {
+ mipmap = new SkMipMap(sk_malloc_throw(storageSize), storageSize);
+ }
+
+ // init
+ mipmap->fCS = sk_ref_sp(src.info().colorSpace());
+ mipmap->fCount = countLevels;
+ mipmap->fLevels = (Level*)mipmap->writable_data();
+ SkASSERT(mipmap->fLevels);
+
+ Level* levels = mipmap->fLevels;
+ uint8_t* baseAddr = (uint8_t*)&levels[countLevels];
+ uint8_t* addr = baseAddr;
+ int width = src.width();
+ int height = src.height();
+ uint32_t rowBytes;
+ SkPixmap srcPM(src);
+
+ // Depending on architecture and other factors, the pixel data alignment may need to be as
+ // large as 8 (for F16 pixels). See the comment on SkMipMap::Level.
+ SkASSERT(SkIsAlign8((uintptr_t)addr));
+
+ for (int i = 0; i < countLevels; ++i) {
+ FilterProc* proc;
+ if (height & 1) {
+ if (height == 1) { // src-height is 1
+ if (width & 1) { // src-width is 3
+ proc = proc_3_1;
+ } else { // src-width is 2
+ proc = proc_2_1;
+ }
+ } else { // src-height is 3
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_3;
+ } else { // src-width is 3
+ proc = proc_3_3;
+ }
+ } else { // src-width is 2
+ proc = proc_2_3;
+ }
+ }
+ } else { // src-height is 2
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_2;
+ } else { // src-width is 3
+ proc = proc_3_2;
+ }
+ } else { // src-width is 2
+ proc = proc_2_2;
+ }
+ }
+ width = SkTMax(1, width >> 1);
+ height = SkTMax(1, height >> 1);
+ rowBytes = SkToU32(SkColorTypeMinRowBytes(ct, width));
+
+ // We make the Info w/o any colorspace, since that storage is not under our control, and
+ // will not be deleted in a controlled fashion. When the caller is given the pixmap for
+ // a given level, we augment this pixmap with fCS (which we do manage).
+ new (&levels[i].fPixmap) SkPixmap(SkImageInfo::Make(width, height, ct, at), addr, rowBytes);
+ levels[i].fScale = SkSize::Make(SkIntToScalar(width) / src.width(),
+ SkIntToScalar(height) / src.height());
+
+ const SkPixmap& dstPM = levels[i].fPixmap;
+ const void* srcBasePtr = srcPM.addr();
+ void* dstBasePtr = dstPM.writable_addr();
+
+ const size_t srcRB = srcPM.rowBytes();
+ for (int y = 0; y < height; y++) {
+ proc(dstBasePtr, srcBasePtr, srcRB, width);
+ srcBasePtr = (char*)srcBasePtr + srcRB * 2; // jump two rows
+ dstBasePtr = (char*)dstBasePtr + dstPM.rowBytes();
+ }
+ srcPM = dstPM;
+ addr += height * rowBytes;
+ }
+ SkASSERT(addr == baseAddr + size);
+
+ SkASSERT(mipmap->fLevels);
+ return mipmap;
+}
+
+int SkMipMap::ComputeLevelCount(int baseWidth, int baseHeight) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return 0;
+ }
+
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+ // Continue scaling down until both axes are size 1.
+
+ const int largestAxis = SkTMax(baseWidth, baseHeight);
+ if (largestAxis < 2) {
+ // SkMipMap::Build requires a minimum size of 2.
+ return 0;
+ }
+ const int leadingZeros = SkCLZ(static_cast<uint32_t>(largestAxis));
+ // If the value 00011010 has 3 leading 0s then it has 5 significant bits
+ // (the bits which are not leading zeros)
+ const int significantBits = (sizeof(uint32_t) * 8) - leadingZeros;
+ // This is making the assumption that the size of a byte is 8 bits
+ // and that sizeof(uint32_t)'s implementation-defined behavior is 4.
+ int mipLevelCount = significantBits;
+
+ // SkMipMap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipMap is the base level.
+ // So subtract 1 from the mip level count.
+ if (mipLevelCount > 0) {
+ --mipLevelCount;
+ }
+
+ return mipLevelCount;
+}
+
+SkISize SkMipMap::ComputeLevelSize(int baseWidth, int baseHeight, int level) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return SkISize::Make(0, 0);
+ }
+
+ int maxLevelCount = ComputeLevelCount(baseWidth, baseHeight);
+ if (level >= maxLevelCount || level < 0) {
+ return SkISize::Make(0, 0);
+ }
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+
+ // SkMipMap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipMap is the base level.
+ // So subtract 1 from the mip level to get the index stored by SkMipMap.
+ int width = SkTMax(1, baseWidth >> (level + 1));
+ int height = SkTMax(1, baseHeight >> (level + 1));
+
+ return SkISize::Make(width, height);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMipMap::extractLevel(const SkSize& scaleSize, Level* levelPtr) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+
+ SkASSERT(scaleSize.width() >= 0 && scaleSize.height() >= 0);
+
+#ifndef SK_SUPPORT_LEGACY_ANISOTROPIC_MIPMAP_SCALE
+ // Use the smallest scale to match the GPU impl.
+ const SkScalar scale = SkTMin(scaleSize.width(), scaleSize.height());
+#else
+ // Ideally we'd pick the smaller scale, to match Ganesh. But ignoring one of the
+ // scales can produce some atrocious results, so for now we use the geometric mean.
+ // (https://bugs.chromium.org/p/skia/issues/detail?id=4863)
+ const SkScalar scale = SkScalarSqrt(scaleSize.width() * scaleSize.height());
+#endif
+
+ if (scale >= SK_Scalar1 || scale <= 0 || !SkScalarIsFinite(scale)) {
+ return false;
+ }
+
+ SkScalar L = -SkScalarLog2(scale);
+ if (!SkScalarIsFinite(L)) {
+ return false;
+ }
+ SkASSERT(L >= 0);
+ int level = SkScalarFloorToInt(L);
+
+ SkASSERT(level >= 0);
+ if (level <= 0) {
+ return false;
+ }
+
+ if (level > fCount) {
+ level = fCount;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[level - 1];
+ // need to augment with our colorspace
+ levelPtr->fPixmap.setColorSpace(fCS);
+ }
+ return true;
+}
+
+// Helper which extracts a pixmap from the src bitmap
+//
+SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
+ SkPixmap srcPixmap;
+ if (!src.peekPixels(&srcPixmap)) {
+ return nullptr;
+ }
+ return Build(srcPixmap, fact);
+}
+
+int SkMipMap::countLevels() const {
+ return fCount;
+}
+
+bool SkMipMap::getLevel(int index, Level* levelPtr) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+ if (index < 0) {
+ return false;
+ }
+ if (index > fCount - 1) {
+ return false;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[index];
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkMipMap.h b/gfx/skia/skia/src/core/SkMipMap.h
new file mode 100644
index 0000000000..9aa43d265e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipMap.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMipMap_DEFINED
+#define SkMipMap_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkCachedData.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkBitmap;
+class SkDiscardableMemory;
+
+typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes);
+
+/*
+ * SkMipMap will generate mipmap levels when given a base mipmap level image.
+ *
+ * Any function which deals with mipmap levels indices will start with index 0
+ * being the first mipmap level which was generated. Said another way, it does
+ * not include the base level in its range.
+ */
+class SkMipMap : public SkCachedData {
+public:
+ static SkMipMap* Build(const SkPixmap& src, SkDiscardableFactoryProc);
+ static SkMipMap* Build(const SkBitmap& src, SkDiscardableFactoryProc);
+
+ // Determines how many levels a SkMipMap will have without creating that mipmap.
+ // This does not include the base mipmap level that the user provided when
+ // creating the SkMipMap.
+ static int ComputeLevelCount(int baseWidth, int baseHeight);
+
+ // Determines the size of a given mipmap level.
+ // |level| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ static SkISize ComputeLevelSize(int baseWidth, int baseHeight, int level);
+
+ // We use a block of (possibly discardable) memory to hold an array of Level structs, followed
+ // by the pixel data for each level. On 32-bit platforms, Level would naturally be 4 byte
+ // aligned, so the pixel data could end up with 4 byte alignment. If the pixel data is F16,
+ // it must be 8 byte aligned. To ensure this, keep the Level struct 8 byte aligned as well.
+ struct alignas(8) Level {
+ SkPixmap fPixmap;
+ SkSize fScale; // < 1.0
+ };
+
+ bool extractLevel(const SkSize& scale, Level*) const;
+
+ // countLevels returns the number of mipmap levels generated (which does not
+ // include the base mipmap level).
+ int countLevels() const;
+
+ // |index| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ bool getLevel(int index, Level*) const;
+
+protected:
+ void onDataChange(void* oldData, void* newData) override {
+ fLevels = (Level*)newData; // could be nullptr
+ }
+
+private:
+ sk_sp<SkColorSpace> fCS;
+ Level* fLevels; // managed by the baseclass, may be null due to onDataChanged.
+ int fCount;
+
+ SkMipMap(void* malloc, size_t size) : INHERITED(malloc, size) {}
+ SkMipMap(size_t size, SkDiscardableMemory* dm) : INHERITED(size, dm) {}
+
+ static size_t AllocLevelsSize(int levelCount, size_t pixelSize);
+
+ typedef SkCachedData INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkModeColorFilter.cpp b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
new file mode 100644
index 0000000000..bad042be1c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/utils/SkRandom.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkModeColorFilter.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/utils/SkUTF.h"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkModeColorFilter::SkModeColorFilter(SkColor color, SkBlendMode mode) {
+ fColor = color;
+ fMode = mode;
+}
+
+bool SkModeColorFilter::onAsAColorMode(SkColor* color, SkBlendMode* mode) const {
+ if (color) {
+ *color = fColor;
+ }
+ if (mode) {
+ *mode = fMode;
+ }
+ return true;
+}
+
+uint32_t SkModeColorFilter::getFlags() const {
+ uint32_t flags = 0;
+ switch (fMode) {
+ case SkBlendMode::kDst: //!< [Da, Dc]
+ case SkBlendMode::kSrcATop: //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ flags |= kAlphaUnchanged_Flag;
+ default:
+ break;
+ }
+ return flags;
+}
+
+void SkModeColorFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fColor);
+ buffer.writeUInt((int)fMode);
+}
+
+sk_sp<SkFlattenable> SkModeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ SkColor color = buffer.readColor();
+ SkBlendMode mode = (SkBlendMode)buffer.readUInt();
+ return SkColorFilters::Blend(color, mode);
+}
+
+bool SkModeColorFilter::onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ rec.fPipeline->append(SkRasterPipeline::move_src_dst);
+ SkColor4f color = SkColor4f::FromColor(fColor);
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType).apply(color.vec());
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.premul().vec());
+ SkBlendMode_AppendStages(fMode, rec.fPipeline);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+
+std::unique_ptr<GrFragmentProcessor> SkModeColorFilter::asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo& dstColorInfo) const {
+ if (SkBlendMode::kDst == fMode) {
+ return nullptr;
+ }
+
+ auto constFP = GrConstColorProcessor::Make(SkColorToPMColor4f(fColor, dstColorInfo),
+ GrConstColorProcessor::InputMode::kIgnore);
+ auto fp = GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(constFP), fMode);
+ if (!fp) {
+ return nullptr;
+ }
+#ifdef SK_DEBUG
+ // With a solid color input this should always be able to compute the blended color
+ // (at least for coeff modes)
+ if ((unsigned)fMode <= (unsigned)SkBlendMode::kLastCoeffMode) {
+ SkASSERT(fp->hasConstantOutputForConstantInput());
+ }
+#endif
+ return fp;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilters::Blend(SkColor color, SkBlendMode mode) {
+ if (!SkIsValidMode(mode)) {
+ return nullptr;
+ }
+
+ unsigned alpha = SkColorGetA(color);
+
+ // first collaps some modes if possible
+
+ if (SkBlendMode::kClear == mode) {
+ color = 0;
+ mode = SkBlendMode::kSrc;
+ } else if (SkBlendMode::kSrcOver == mode) {
+ if (0 == alpha) {
+ mode = SkBlendMode::kDst;
+ } else if (255 == alpha) {
+ mode = SkBlendMode::kSrc;
+ }
+ // else just stay srcover
+ }
+
+ // weed out combinations that are noops, and just return null
+ if (SkBlendMode::kDst == mode ||
+ (0 == alpha && (SkBlendMode::kSrcOver == mode ||
+ SkBlendMode::kDstOver == mode ||
+ SkBlendMode::kDstOut == mode ||
+ SkBlendMode::kSrcATop == mode ||
+ SkBlendMode::kXor == mode ||
+ SkBlendMode::kDarken == mode)) ||
+ (0xFF == alpha && SkBlendMode::kDstIn == mode)) {
+ return nullptr;
+ }
+
+ return SkModeColorFilter::Make(color, mode);
+}
diff --git a/gfx/skia/skia/src/core/SkModeColorFilter.h b/gfx/skia/skia/src/core/SkModeColorFilter.h
new file mode 100644
index 0000000000..ba107590e6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkModeColorFilter.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkModeColorFilter_DEFINED
+#define SkModeColorFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+
+class SkModeColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make(SkColor color, SkBlendMode mode) {
+ return sk_sp<SkColorFilter>(new SkModeColorFilter(color, mode));
+ }
+
+ uint32_t getFlags() const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+protected:
+ SkModeColorFilter(SkColor color, SkBlendMode mode);
+
+ void flatten(SkWriteBuffer&) const override;
+ bool onAsAColorMode(SkColor*, SkBlendMode*) const override;
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkModeColorFilter)
+
+ SkColor fColor;
+ SkBlendMode fMode;
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp b/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp
new file mode 100644
index 0000000000..837aa600b9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMultiPictureDraw.h"
+#include "include/core/SkPicture.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkTaskGroup.h"
+
+void SkMultiPictureDraw::DrawData::draw() {
+ fCanvas->drawPicture(fPicture, &fMatrix, fPaint);
+}
+
+void SkMultiPictureDraw::DrawData::init(SkCanvas* canvas, const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ fPicture = SkRef(picture);
+ fCanvas = canvas;
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.setIdentity();
+ }
+ if (paint) {
+ fPaint = new SkPaint(*paint);
+ } else {
+ fPaint = nullptr;
+ }
+}
+
+void SkMultiPictureDraw::DrawData::Reset(SkTDArray<DrawData>& data) {
+ for (int i = 0; i < data.count(); ++i) {
+ data[i].fPicture->unref();
+ delete data[i].fPaint;
+ }
+ data.rewind();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkMultiPictureDraw::SkMultiPictureDraw(int reserve) {
+ if (reserve > 0) {
+ fGPUDrawData.setReserve(reserve);
+ fThreadSafeDrawData.setReserve(reserve);
+ }
+}
+
+void SkMultiPictureDraw::reset() {
+ DrawData::Reset(fGPUDrawData);
+ DrawData::Reset(fThreadSafeDrawData);
+}
+
+void SkMultiPictureDraw::add(SkCanvas* canvas,
+ const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint) {
+ if (nullptr == canvas || nullptr == picture) {
+ SkDEBUGFAIL("parameters to SkMultiPictureDraw::add should be non-nullptr");
+ return;
+ }
+
+ SkTDArray<DrawData>& array = canvas->getGrContext() ? fGPUDrawData : fThreadSafeDrawData;
+ array.append()->init(canvas, picture, matrix, paint);
+}
+
+class AutoMPDReset : SkNoncopyable {
+ SkMultiPictureDraw* fMPD;
+public:
+ AutoMPDReset(SkMultiPictureDraw* mpd) : fMPD(mpd) {}
+ ~AutoMPDReset() { fMPD->reset(); }
+};
+
+//#define FORCE_SINGLE_THREAD_DRAWING_FOR_TESTING
+
+void SkMultiPictureDraw::draw(bool flush) {
+ AutoMPDReset mpdreset(this);
+
+#ifdef FORCE_SINGLE_THREAD_DRAWING_FOR_TESTING
+ for (int i = 0; i < fThreadSafeDrawData.count(); ++i) {
+ fThreadSafeDrawData[i].draw();
+ }
+#else
+ SkTaskGroup().batch(fThreadSafeDrawData.count(), [&](int i) {
+ fThreadSafeDrawData[i].draw();
+ });
+#endif
+
+ // N.B. we could get going on any GPU work from this main thread while the CPU work runs.
+ // But in practice, we've either got GPU work or CPU work, not both.
+
+ const int count = fGPUDrawData.count();
+ if (0 == count) {
+ return;
+ }
+
+ for (int i = 0; i < count; ++i) {
+ const DrawData& data = fGPUDrawData[i];
+ SkCanvas* canvas = data.fCanvas;
+ const SkPicture* picture = data.fPicture;
+
+ canvas->drawPicture(picture, &data.fMatrix, data.fPaint);
+ if (flush) {
+ canvas->flush();
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkNextID.h b/gfx/skia/skia/src/core/SkNextID.h
new file mode 100644
index 0000000000..395c9a27a6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNextID.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNextID_DEFINED
+#define SkNextID_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkNextID {
+public:
+ /**
+ * Shared between SkPixelRef's generationID and SkImage's uniqueID
+ */
+ static uint32_t ImageID();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalFlatSource.cpp b/gfx/skia/skia/src/core/SkNormalFlatSource.cpp
new file mode 100644
index 0000000000..13fa2af488
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalFlatSource.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkNormalFlatSource.h"
+
+#include "include/core/SkPoint3.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkNormalSource.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+class NormalFlatFP : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make() {
+ return std::unique_ptr<GrFragmentProcessor>(new NormalFlatFP());
+ }
+
+ const char* name() const override { return "NormalFlatFP"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override { return Make(); }
+
+private:
+ class GLSLNormalFlatFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLNormalFlatFP() {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("%s = half4(0, 0, 1, 0);", args.fOutputColor);
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override {}
+ };
+
+ NormalFlatFP()
+ : INHERITED(kFlatNormalsFP_ClassID, kConstantOutputForConstantInput_OptimizationFlag) {
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {}
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f&) const override {
+ return { 0, 0, 1, 0 };
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLNormalFlatFP; }
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+std::unique_ptr<GrFragmentProcessor> SkNormalFlatSourceImpl::asFragmentProcessor(
+ const GrFPArgs&) const {
+ return NormalFlatFP::Make();
+}
+
+#endif // SK_SUPPORT_GPU
+
+////////////////////////////////////////////////////////////////////////////
+
+SkNormalFlatSourceImpl::Provider::Provider() {}
+
+SkNormalFlatSourceImpl::Provider::~Provider() {}
+
+SkNormalSource::Provider* SkNormalFlatSourceImpl::asProvider(const SkShaderBase::ContextRec &rec,
+ SkArenaAlloc *alloc) const {
+ return alloc->make<Provider>();
+}
+
+void SkNormalFlatSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 output[],
+ int count) const {
+ for (int i = 0; i < count; i++) {
+ output[i] = {0.0f, 0.0f, 1.0f};
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkNormalFlatSourceImpl::CreateProc(SkReadBuffer& buf) {
+ return sk_make_sp<SkNormalFlatSourceImpl>();
+}
+
+void SkNormalFlatSourceImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkNormalSource> SkNormalSource::MakeFlat() {
+ return sk_make_sp<SkNormalFlatSourceImpl>();
+}
diff --git a/gfx/skia/skia/src/core/SkNormalFlatSource.h b/gfx/skia/skia/src/core/SkNormalFlatSource.h
new file mode 100644
index 0000000000..a7960b0ac6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalFlatSource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalFlatSource_DEFINED
+#define SkNormalFlatSource_DEFINED
+
+#include "src/core/SkNormalSource.h"
+
+class SK_API SkNormalFlatSourceImpl : public SkNormalSource {
+public:
+ SkNormalFlatSourceImpl(){}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs& args) const override;
+#endif
+
+ SkNormalSource::Provider* asProvider(const SkShaderBase::ContextRec& rec,
+ SkArenaAlloc* alloc) const override;
+
+protected:
+ void flatten(SkWriteBuffer& buf) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkNormalFlatSourceImpl)
+
+ class Provider : public SkNormalSource::Provider {
+ public:
+ Provider();
+
+ ~Provider() override;
+
+ void fillScanLine(int x, int y, SkPoint3 output[], int count) const override;
+
+ private:
+ typedef SkNormalSource::Provider INHERITED;
+ };
+
+ friend class SkNormalSource;
+
+ typedef SkNormalSource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalMapSource.cpp b/gfx/skia/skia/src/core/SkNormalMapSource.cpp
new file mode 100644
index 0000000000..b84e726934
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalMapSource.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkNormalMapSource.h"
+
+#include "include/core/SkMatrix.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkNormalSource.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLightingShader.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+class NormalMapFP : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> mapFP,
+ const SkMatrix& invCTM) {
+ return std::unique_ptr<GrFragmentProcessor>(new NormalMapFP(std::move(mapFP), invCTM));
+ }
+
+ const char* name() const override { return "NormalMapFP"; }
+
+ const SkMatrix& invCTM() const { return fInvCTM; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return Make(this->childProcessor(0).clone(), fInvCTM);
+ }
+
+private:
+ class GLSLNormalMapFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLNormalMapFP() : fColumnMajorInvCTM22{0.0f} {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // add uniform
+ const char* xformUniName = nullptr;
+ fXformUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat2x2_GrSLType,
+ "Xform", &xformUniName);
+
+ SkString dstNormalColorName("dstNormalColor");
+ this->invokeChild(0, &dstNormalColorName, args);
+ fragBuilder->codeAppendf("float3 normal = normalize(%s.rgb - float3(0.5));",
+ dstNormalColorName.c_str());
+
+ // If there's no x & y components, return (0, 0, +/- 1) instead to avoid division by 0
+ fragBuilder->codeAppend( "if (abs(normal.z) > 0.999) {");
+ fragBuilder->codeAppendf(" %s = normalize(half4(0.0, 0.0, half(normal.z), 0.0));",
+ args.fOutputColor);
+ // Else, Normalizing the transformed X and Y, while keeping constant both Z and the
+ // vector's angle in the XY plane. This maintains the "slope" for the surface while
+ // appropriately rotating the normal regardless of any anisotropic scaling that occurs.
+ // Here, we call 'scaling factor' the number that must divide the transformed X and Y so
+ // that the normal's length remains equal to 1.
+ fragBuilder->codeAppend( "} else {");
+ fragBuilder->codeAppendf(" float2 transformed = %s * normal.xy;",
+ xformUniName);
+ fragBuilder->codeAppend( " float scalingFactorSquared = "
+ "( (transformed.x * transformed.x) "
+ "+ (transformed.y * transformed.y) )"
+ "/(1.0 - (normal.z * normal.z));");
+ fragBuilder->codeAppendf(" %s = half4(half2(transformed * "
+ "inversesqrt(scalingFactorSquared)),"
+ "half(normal.z), 0.0);",
+ args.fOutputColor);
+ fragBuilder->codeAppend( "}");
+ }
+
+ static void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(0x0);
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) override {
+ const NormalMapFP& normalMapFP = proc.cast<NormalMapFP>();
+
+ const SkMatrix& invCTM = normalMapFP.invCTM();
+ fColumnMajorInvCTM22[0] = invCTM.get(SkMatrix::kMScaleX);
+ fColumnMajorInvCTM22[1] = invCTM.get(SkMatrix::kMSkewY);
+ fColumnMajorInvCTM22[2] = invCTM.get(SkMatrix::kMSkewX);
+ fColumnMajorInvCTM22[3] = invCTM.get(SkMatrix::kMScaleY);
+ pdman.setMatrix2f(fXformUni, fColumnMajorInvCTM22);
+ }
+
+ private:
+ // Upper-right 2x2 corner of the inverse of the CTM in column-major form
+ float fColumnMajorInvCTM22[4];
+ GrGLSLProgramDataManager::UniformHandle fXformUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLNormalMapFP::GenKey(*this, caps, b);
+ }
+ NormalMapFP(std::unique_ptr<GrFragmentProcessor> mapFP, const SkMatrix& invCTM)
+ : INHERITED(kMappedNormalsFP_ClassID, kNone_OptimizationFlags)
+ , fInvCTM(invCTM) {
+ this->registerChildProcessor(std::move(mapFP));
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLNormalMapFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const NormalMapFP& normalMapFP = proc.cast<NormalMapFP>();
+ return fInvCTM == normalMapFP.fInvCTM;
+ }
+
+ SkMatrix fInvCTM;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+std::unique_ptr<GrFragmentProcessor> SkNormalMapSourceImpl::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ std::unique_ptr<GrFragmentProcessor> mapFP = as_SB(fMapShader)->asFragmentProcessor(args);
+ if (!mapFP) {
+ return nullptr;
+ }
+
+ return NormalMapFP::Make(std::move(mapFP), fInvCTM);
+}
+
+#endif // SK_SUPPORT_GPU
+
+////////////////////////////////////////////////////////////////////////////
+
+SkNormalMapSourceImpl::Provider::Provider(const SkNormalMapSourceImpl& source,
+ SkShaderBase::Context* mapContext)
+ : fSource(source)
+ , fMapContext(mapContext) {}
+
+SkNormalSource::Provider* SkNormalMapSourceImpl::asProvider(const SkShaderBase::ContextRec &rec,
+ SkArenaAlloc* alloc) const {
+ SkMatrix normTotalInv;
+ if (!this->computeNormTotalInverse(rec, &normTotalInv)) {
+ return nullptr;
+ }
+
+ // Normals really aren't colors, so to ensure we can always make the context, we ignore
+ // the rec's colorspace
+ SkColorSpace* dstColorSpace = nullptr;
+
+ // Overriding paint's alpha because we need the normal map's RGB channels to be unpremul'd
+ SkPaint overridePaint {*(rec.fPaint)};
+ overridePaint.setAlpha(0xFF);
+ SkShaderBase::ContextRec overrideRec(overridePaint, *(rec.fMatrix), rec.fLocalMatrix,
+ rec.fDstColorType, dstColorSpace);
+
+ auto* context = as_SB(fMapShader)->makeContext(overrideRec, alloc);
+ if (!context) {
+ return nullptr;
+ }
+
+ return alloc->make<Provider>(*this, context);
+}
+
+bool SkNormalMapSourceImpl::computeNormTotalInverse(const SkShaderBase::ContextRec& rec,
+ SkMatrix* normTotalInverse) const {
+ SkMatrix total = SkMatrix::Concat(*rec.fMatrix, as_SB(fMapShader)->getLocalMatrix());
+ if (rec.fLocalMatrix) {
+ total.preConcat(*rec.fLocalMatrix);
+ }
+
+ return total.invert(normTotalInverse);
+}
+
+#define BUFFER_MAX 16
+void SkNormalMapSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 output[],
+ int count) const {
+ SkPMColor tmpNormalColors[BUFFER_MAX];
+
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ fMapContext->shadeSpan(x, y, tmpNormalColors, n);
+
+ for (int i = 0; i < n; i++) {
+ SkPoint3 tempNorm;
+
+ tempNorm.set(SkIntToScalar(SkGetPackedR32(tmpNormalColors[i])) - 127.0f,
+ SkIntToScalar(SkGetPackedG32(tmpNormalColors[i])) - 127.0f,
+ SkIntToScalar(SkGetPackedB32(tmpNormalColors[i])) - 127.0f);
+
+ tempNorm.normalize();
+
+
+ if (!SkScalarNearlyEqual(SkScalarAbs(tempNorm.fZ), 1.0f)) {
+ SkVector transformed = fSource.fInvCTM.mapVector(tempNorm.fX, tempNorm.fY);
+
+ // Normalizing the transformed X and Y, while keeping constant both Z and the
+ // vector's angle in the XY plane. This maintains the "slope" for the surface while
+ // appropriately rotating the normal for any anisotropic scaling that occurs.
+ // Here, we call scaling factor the number that must divide the transformed X and Y
+ // so that the normal's length remains equal to 1.
+ SkScalar scalingFactorSquared =
+ (SkScalarSquare(transformed.fX) + SkScalarSquare(transformed.fY))
+ / (1.0f - SkScalarSquare(tempNorm.fZ));
+ SkScalar invScalingFactor = SkScalarInvert(SkScalarSqrt(scalingFactorSquared));
+
+ output[i].fX = transformed.fX * invScalingFactor;
+ output[i].fY = transformed.fY * invScalingFactor;
+ output[i].fZ = tempNorm.fZ;
+ } else {
+ output[i] = {0.0f, 0.0f, tempNorm.fZ};
+ output[i].normalize();
+ }
+
+ SkASSERT(SkScalarNearlyEqual(output[i].length(), 1.0f));
+ }
+
+ output += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkNormalMapSourceImpl::CreateProc(SkReadBuffer& buf) {
+
+ sk_sp<SkShader> mapShader = buf.readFlattenable<SkShaderBase>();
+
+ SkMatrix invCTM;
+ buf.readMatrix(&invCTM);
+
+ return sk_make_sp<SkNormalMapSourceImpl>(std::move(mapShader), invCTM);
+}
+
+void SkNormalMapSourceImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ buf.writeFlattenable(fMapShader.get());
+ buf.writeMatrix(fInvCTM);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkNormalSource> SkNormalSource::MakeFromNormalMap(sk_sp<SkShader> map, const SkMatrix& ctm) {
+ SkMatrix invCTM;
+
+ if (!ctm.invert(&invCTM) || !map) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkNormalMapSourceImpl>(std::move(map), invCTM);
+}
diff --git a/gfx/skia/skia/src/core/SkNormalMapSource.h b/gfx/skia/skia/src/core/SkNormalMapSource.h
new file mode 100644
index 0000000000..435cb0312a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalMapSource.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalMapSource_DEFINED
+#define SkNormalMapSource_DEFINED
+
+#include "src/core/SkNormalSource.h"
+
+class SkNormalMapSourceImpl : public SkNormalSource {
+public:
+ SkNormalMapSourceImpl(sk_sp<SkShader> mapShader, const SkMatrix& invCTM)
+ : fMapShader(std::move(mapShader))
+ , fInvCTM(invCTM) {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs& args) const override;
+#endif
+
+ SkNormalSource::Provider* asProvider(const SkShaderBase::ContextRec& rec,
+ SkArenaAlloc* alloc) const override;
+
+protected:
+ void flatten(SkWriteBuffer& buf) const override;
+
+ bool computeNormTotalInverse(const SkShaderBase::ContextRec& rec,
+ SkMatrix* normTotalInverse) const;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkNormalMapSourceImpl)
+
+ class Provider : public SkNormalSource::Provider {
+ public:
+ Provider(const SkNormalMapSourceImpl& source, SkShaderBase::Context* mapContext);
+
+ void fillScanLine(int x, int y, SkPoint3 output[], int count) const override;
+
+ private:
+ const SkNormalMapSourceImpl& fSource;
+ SkShaderBase::Context* fMapContext;
+
+ typedef SkNormalSource::Provider INHERITED;
+ };
+
+ sk_sp<SkShader> fMapShader;
+ SkMatrix fInvCTM; // Inverse of the canvas total matrix, used for rotating normals.
+
+ friend class SkNormalSource;
+
+ typedef SkNormalSource INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkNormalSource.cpp b/gfx/skia/skia/src/core/SkNormalSource.cpp
new file mode 100644
index 0000000000..9bcab1e7dc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalSource.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkNormalFlatSource.h"
+#include "src/core/SkNormalMapSource.h"
+#include "src/core/SkNormalSource.h"
+
+// Generating vtable
+SkNormalSource::~SkNormalSource() {}
+
+void SkNormalSource::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkNormalMapSourceImpl);
+ SK_REGISTER_FLATTENABLE(SkNormalFlatSourceImpl);
+}
+
diff --git a/gfx/skia/skia/src/core/SkNormalSource.h b/gfx/skia/skia/src/core/SkNormalSource.h
new file mode 100644
index 0000000000..49a5257779
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalSource.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalSource_DEFINED
+#define SkNormalSource_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkMatrix;
+struct SkPoint3;
+
+#if SK_SUPPORT_GPU
+class GrFragmentProcessor;
+#endif
+
+/** Abstract class that generates or reads in normals for use by SkLightingShader.
+*/
+class SK_API SkNormalSource : public SkFlattenable {
+public:
+ virtual ~SkNormalSource() override;
+
+#if SK_SUPPORT_GPU
+ /** Returns a fragment processor that takes no input and outputs a normal (already rotated)
+ as its output color. To be used as a child fragment processor.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const = 0;
+#endif
+
+ class Provider {
+ public:
+ virtual ~Provider() {}
+
+ /** Called for each span of the object being drawn on the CPU. Your subclass should set
+ the appropriate normals that correspond to the specified device coordinates.
+ */
+ virtual void fillScanLine(int x, int y, SkPoint3 output[], int count) const = 0;
+ };
+
+ /** Returns an instance of 'Provider' that provides normals for the CPU pipeline. The
+ necessary data will be initialized in place at 'storage'.
+ */
+ virtual Provider* asProvider(const SkShaderBase::ContextRec&, SkArenaAlloc*) const = 0;
+
+ /** Returns a normal source that provides normals sourced from the the normal map argument.
+
+ @param map a shader that outputs the normal map
+ @param ctm the current canvas' total matrix, used to rotate normals when necessary.
+
+ nullptr will be returned if 'map' is null
+
+ The normal map is currently assumed to be an 8888 image where the normal at a texel
+ is retrieved by:
+ N.x = R-127;
+ N.y = G-127;
+ N.z = B-127;
+ N.normalize();
+ The +Z axis is thus encoded in RGB as (127, 127, 255) while the -Z axis is
+ (127, 127, 0).
+ */
+ static sk_sp<SkNormalSource> MakeFromNormalMap(sk_sp<SkShader> map, const SkMatrix& ctm);
+
+ /** Returns a normal source that provides straight-up normals only <0, 0, 1>.
+ */
+ static sk_sp<SkNormalSource> MakeFlat();
+
+ static Type GetFlattenableType() { return kSkNormalSource_Type; }
+ Type getFlattenableType() const override { return GetFlattenableType(); }
+
+ static sk_sp<SkNormalSource> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkNormalSource>(static_cast<SkNormalSource*>(
+ SkFlattenable::Deserialize(GetFlattenableType(), data, size, procs).release()));
+ }
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkOSFile.h b/gfx/skia/skia/src/core/SkOSFile.h
new file mode 100644
index 0000000000..330b77bff4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOSFile.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+// TODO: add unittests for all these operations
+
+#ifndef SkOSFile_DEFINED
+#define SkOSFile_DEFINED
+
+#include <stdio.h>
+
+#include "include/core/SkString.h"
+
+enum SkFILE_Flags {
+ kRead_SkFILE_Flag = 0x01,
+ kWrite_SkFILE_Flag = 0x02
+};
+
+FILE* sk_fopen(const char path[], SkFILE_Flags);
+void sk_fclose(FILE*);
+
+size_t sk_fgetsize(FILE*);
+
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE*);
+
+void sk_fflush(FILE*);
+void sk_fsync(FILE*);
+
+size_t sk_ftell(FILE*);
+
+/** Maps a file into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fmmap(FILE* f, size_t* length);
+
+/** Maps a file descriptor into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fdmmap(int fd, size_t* length);
+
+/** Unmaps a file previously mapped by sk_fmmap or sk_fdmmap.
+ * The length parameter must be the same as returned from sk_fmmap.
+ */
+void sk_fmunmap(const void* addr, size_t length);
+
+/** Returns true if the two point at the exact same filesystem object. */
+bool sk_fidentical(FILE* a, FILE* b);
+
+/** Returns the underlying file descriptor for the given file.
+ * The return value will be < 0 on failure.
+ */
+int sk_fileno(FILE* f);
+
+/** Returns true if something (file, directory, ???) exists at this path,
+ * and has the specified access flags.
+ */
+bool sk_exists(const char *path, SkFILE_Flags = (SkFILE_Flags)0);
+
+// Returns true if a directory exists at this path.
+bool sk_isdir(const char *path);
+
+// Like pread, but may affect the file position marker.
+// Returns the number of bytes read or SIZE_MAX if failed.
+size_t sk_qread(FILE*, void* buffer, size_t count, size_t offset);
+
+
+// Create a new directory at this path; returns true if successful.
+// If the directory already existed, this will return true.
+// Description of the error, if any, will be written to stderr.
+bool sk_mkdir(const char* path);
+
+class SkOSFile {
+public:
+ class Iter {
+ public:
+ Iter();
+ Iter(const char path[], const char suffix[] = nullptr);
+ ~Iter();
+
+ void reset(const char path[], const char suffix[] = nullptr);
+ /** If getDir is true, only returns directories.
+ Results are undefined if true and false calls are
+ interleaved on a single iterator.
+ */
+ bool next(SkString* name, bool getDir = false);
+
+ static const size_t kStorageSize = 40;
+ private:
+ SkAlignedSStorage<kStorageSize> fSelf;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkOpts.cpp b/gfx/skia/skia/src/core/SkOpts.cpp
new file mode 100644
index 0000000000..7228c30712
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkHalf.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkOpts.h"
+
+#if defined(SK_ARM_HAS_NEON)
+ #if defined(SK_ARM_HAS_CRC32)
+ #define SK_OPTS_NS neon_and_crc32
+ #else
+ #define SK_OPTS_NS neon
+ #endif
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #define SK_OPTS_NS avx2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ #define SK_OPTS_NS avx
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #define SK_OPTS_NS sse42
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define SK_OPTS_NS sse41
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #define SK_OPTS_NS ssse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ #define SK_OPTS_NS sse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define SK_OPTS_NS sse2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #define SK_OPTS_NS sse
+#else
+ #define SK_OPTS_NS portable
+#endif
+
+#include "src/opts/SkBitmapFilter_opts.h"
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitMask_opts.h"
+#include "src/opts/SkBlitRow_opts.h"
+#include "src/opts/SkChecksum_opts.h"
+#include "src/opts/SkRasterPipeline_opts.h"
+#include "src/opts/SkSwizzler_opts.h"
+#include "src/opts/SkUtils_opts.h"
+#include "src/opts/SkXfermode_opts.h"
+
+#include "src/core/SkCubicSolver.h"
+
+namespace SkOpts {
+ // Define default function pointer values here...
+ // If our global compile options are set high enough, these defaults might even be
+ // CPU-specialized, e.g. a typical x86-64 machine might start with SSE2 defaults.
+ // They'll still get a chance to be replaced with even better ones, e.g. using SSE4.1.
+#define DEFINE_DEFAULT(name) decltype(name) name = SK_OPTS_NS::name
+ DEFINE_DEFAULT(create_xfermode);
+
+ DEFINE_DEFAULT(blit_mask_d32_a8);
+
+ DEFINE_DEFAULT(blit_row_color32);
+ DEFINE_DEFAULT(blit_row_s32a_opaque);
+
+ DEFINE_DEFAULT(RGBA_to_BGRA);
+ DEFINE_DEFAULT(RGBA_to_rgbA);
+ DEFINE_DEFAULT(RGBA_to_bgrA);
+ DEFINE_DEFAULT(RGB_to_RGB1);
+ DEFINE_DEFAULT(RGB_to_BGR1);
+ DEFINE_DEFAULT(gray_to_RGB1);
+ DEFINE_DEFAULT(grayA_to_RGBA);
+ DEFINE_DEFAULT(grayA_to_rgbA);
+ DEFINE_DEFAULT(inverted_CMYK_to_RGB1);
+ DEFINE_DEFAULT(inverted_CMYK_to_BGR1);
+
+ DEFINE_DEFAULT(memset16);
+ DEFINE_DEFAULT(memset32);
+ DEFINE_DEFAULT(memset64);
+
+ DEFINE_DEFAULT(rect_memset16);
+ DEFINE_DEFAULT(rect_memset32);
+ DEFINE_DEFAULT(rect_memset64);
+
+ DEFINE_DEFAULT(cubic_solver);
+
+ DEFINE_DEFAULT(hash_fn);
+
+ DEFINE_DEFAULT(S32_alpha_D32_filter_DX);
+
+ DEFINE_DEFAULT(convolve_vertically);
+ DEFINE_DEFAULT(convolve_horizontally);
+ DEFINE_DEFAULT(convolve_4_rows_horizontally);
+
+#undef DEFINE_DEFAULT
+
+#define M(st) (StageFn)SK_OPTS_NS::st,
+ StageFn stages_highp[] = { SK_RASTER_PIPELINE_STAGES(M) };
+ StageFn just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ void (*start_pipeline_highp)(size_t,size_t,size_t,size_t,void**)
+ = SK_OPTS_NS::start_pipeline;
+#undef M
+
+#define M(st) (StageFn)SK_OPTS_NS::lowp::st,
+ StageFn stages_lowp[] = { SK_RASTER_PIPELINE_STAGES(M) };
+ StageFn just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ void (*start_pipeline_lowp)(size_t,size_t,size_t,size_t,void**)
+ = SK_OPTS_NS::lowp::start_pipeline;
+#undef M
+
+ // Each Init_foo() is defined in src/opts/SkOpts_foo.cpp.
+ void Init_ssse3();
+ void Init_sse41();
+ void Init_sse42();
+ void Init_avx();
+ void Init_hsw();
+ void Init_crc32();
+
+ static void init() {
+#if !defined(SK_BUILD_NO_OPTS)
+ #if defined(SK_CPU_X86)
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSSE3
+ if (SkCpu::Supports(SkCpu::SSSE3)) { Init_ssse3(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE41
+ if (SkCpu::Supports(SkCpu::SSE41)) { Init_sse41(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE42
+ if (SkCpu::Supports(SkCpu::SSE42)) { Init_sse42(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_AVX
+ if (SkCpu::Supports(SkCpu::AVX)) { Init_avx(); }
+ if (SkCpu::Supports(SkCpu::HSW)) { Init_hsw(); }
+ #endif
+
+ if (SkCpu::Supports(SkCpu::HSW )) { Init_hsw(); }
+
+ #elif defined(SK_CPU_ARM64)
+ if (SkCpu::Supports(SkCpu::CRC32)) { Init_crc32(); }
+
+ #endif
+#endif
+ }
+
+ void Init() {
+ static SkOnce once;
+ once(init);
+ }
+} // namespace SkOpts
diff --git a/gfx/skia/skia/src/core/SkOpts.h b/gfx/skia/skia/src/core/SkOpts.h
new file mode 100644
index 0000000000..2a717c0bf8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpts_DEFINED
+#define SkOpts_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkConvolver.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkXfermodePriv.h"
+
+struct SkBitmapProcState;
+
+namespace SkOpts {
+ // Call to replace pointers to portable functions with pointers to CPU-specific functions.
+ // Thread-safe and idempotent.
+ // Called by SkGraphics::Init().
+ void Init();
+
+ // Declare function pointers here...
+
+ // May return nullptr if we haven't specialized the given Mode.
+ extern SkXfermode* (*create_xfermode)(SkBlendMode);
+
+ extern void (*blit_mask_d32_a8)(SkPMColor*, size_t, const SkAlpha*, size_t, SkColor, int, int);
+ extern void (*blit_row_color32)(SkPMColor*, const SkPMColor*, int, SkPMColor);
+ extern void (*blit_row_s32a_opaque)(SkPMColor*, const SkPMColor*, int, U8CPU);
+
+ // Swizzle input into some sort of 8888 pixel, {premul,unpremul} x {rgba,bgra}.
+ typedef void (*Swizzle_8888_u32)(uint32_t*, const uint32_t*, int);
+ extern Swizzle_8888_u32 RGBA_to_BGRA, // i.e. just swap RB
+ RGBA_to_rgbA, // i.e. just premultiply
+ RGBA_to_bgrA, // i.e. swap RB and premultiply
+ inverted_CMYK_to_RGB1, // i.e. convert color space
+ inverted_CMYK_to_BGR1; // i.e. convert color space
+
+ typedef void (*Swizzle_8888_u8)(uint32_t*, const uint8_t*, int);
+ extern Swizzle_8888_u8 RGB_to_RGB1, // i.e. insert an opaque alpha
+ RGB_to_BGR1, // i.e. swap RB and insert an opaque alpha
+ gray_to_RGB1, // i.e. expand to color channels + an opaque alpha
+ grayA_to_RGBA, // i.e. expand to color channels
+ grayA_to_rgbA; // i.e. expand to color channels and premultiply
+
+ extern void (*memset16)(uint16_t[], uint16_t, int);
+ extern void SK_API (*memset32)(uint32_t[], uint32_t, int);
+ extern void (*memset64)(uint64_t[], uint64_t, int);
+
+ extern void (*rect_memset16)(uint16_t[], uint16_t, int, size_t, int);
+ extern void (*rect_memset32)(uint32_t[], uint32_t, int, size_t, int);
+ extern void (*rect_memset64)(uint64_t[], uint64_t, int, size_t, int);
+
+ extern float (*cubic_solver)(float, float, float, float);
+
+ // The fastest high quality 32-bit hash we can provide on this platform.
+ extern uint32_t (*hash_fn)(const void*, size_t, uint32_t seed);
+ static inline uint32_t hash(const void* data, size_t bytes, uint32_t seed=0) {
+ return hash_fn(data, bytes, seed);
+ }
+
+ // SkBitmapProcState optimized Shader, Sample, or Matrix procs.
+ // This is the only one that can use anything past SSE2/NEON.
+ extern void (*S32_alpha_D32_filter_DX)(const SkBitmapProcState&,
+ const uint32_t* xy, int count, SkPMColor*);
+
+#define M(st) +1
+ // We can't necessarily express the type of SkJumper stage functions here,
+ // so we just use this void(*)(void) as a stand-in.
+ using StageFn = void(*)(void);
+ extern StageFn stages_highp[SK_RASTER_PIPELINE_STAGES(M)], just_return_highp;
+ extern StageFn stages_lowp [SK_RASTER_PIPELINE_STAGES(M)], just_return_lowp;
+
+ extern void (*start_pipeline_highp)(size_t,size_t,size_t,size_t, void**);
+ extern void (*start_pipeline_lowp )(size_t,size_t,size_t,size_t, void**);
+#undef M
+
+ extern void (*convolve_vertically)(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
+ int filter_length, unsigned char* const* source_data_rows,
+ int pixel_width, unsigned char* out_row, bool has_alpha);
+ extern void (*convolve_4_rows_horizontally)(const unsigned char* src_data[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* out_row[4], size_t out_row_bytes);
+ extern void (*convolve_horizontally)(const unsigned char* src_data, const SkConvolutionFilter1D& filter,
+ unsigned char* out_row, bool has_alpha);
+}
+
+#endif//SkOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOrderedReadBuffer.h b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
new file mode 100644
index 0000000000..239d8b68c2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
@@ -0,0 +1,9 @@
+// Temporary shim to keep a couple dependencies working in Chromium.
+#ifndef SkOrderedReadBuffer_DEFINED
+#define SkOrderedReadBuffer_DEFINED
+
+#include "src/core/SkReadBuffer.h"
+
+typedef SkReadBuffer SkOrderedReadBuffer;
+
+#endif//SkOrderedReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp b/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp
new file mode 100644
index 0000000000..3a7aaeb4e7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkOverdrawCanvas.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/utils/SkPatchUtils.h"
+
+SkOverdrawCanvas::SkOverdrawCanvas(SkCanvas* canvas)
+ : INHERITED(canvas->onImageInfo().width(), canvas->onImageInfo().height())
+{
+ // Non-drawing calls that SkOverdrawCanvas does not override (translate, save, etc.)
+ // will pass through to the input canvas.
+ this->addCanvas(canvas);
+
+ static constexpr float kIncrementAlpha[] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f/255,
+ };
+
+ fPaint.setAntiAlias(false);
+ fPaint.setBlendMode(SkBlendMode::kPlus);
+ fPaint.setColorFilter(SkColorFilters::Matrix(kIncrementAlpha));
+}
+
+namespace {
+class TextDevice : public SkNoPixelsDevice, public SkGlyphRunListPainter::BitmapDevicePainter {
+public:
+ TextDevice(SkCanvas* overdrawCanvas, const SkSurfaceProps& props)
+ : SkNoPixelsDevice{SkIRect::MakeWH(32767, 32767), props},
+ fOverdrawCanvas{overdrawCanvas},
+ fPainter{props, kN32_SkColorType, nullptr, SkStrikeCache::GlobalStrikeCache()} {}
+
+ void paintPaths(SkDrawableGlyphBuffer*, SkScalar scale, const SkPaint& paint) const override {}
+
+ void paintMasks(SkDrawableGlyphBuffer* drawables, const SkPaint& paint) const override {
+ for (auto t : drawables->drawable()) {
+ SkGlyphVariant glyph; SkPoint pos;
+ std::tie(glyph, pos) = t;
+ SkMask mask = glyph.glyph()->mask(pos);
+ fOverdrawCanvas->drawRect(SkRect::Make(mask.fBounds), SkPaint());
+ }
+ }
+
+protected:
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override {
+ fPainter.drawForBitmapDevice(glyphRunList, fOverdrawCanvas->getTotalMatrix(), this);
+ }
+
+private:
+ SkCanvas* const fOverdrawCanvas;
+ SkGlyphRunListPainter fPainter;
+};
+} // namespace
+
+void SkOverdrawCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkGlyphRunBuilder b;
+ SkSurfaceProps props{0, kUnknown_SkPixelGeometry};
+ this->getProps(&props);
+ TextDevice device{this, props};
+
+ b.drawTextBlob(paint, *blob, {x, y}, &device);
+}
+
+void SkOverdrawCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode blendMode,
+ const SkPaint&) {
+ fList[0]->onDrawPatch(cubics, colors, texCoords, blendMode, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawPaint(const SkPaint& paint) {
+ if (0 == paint.getColor() && !paint.getColorFilter() && !paint.getShader()) {
+ // This is a clear, ignore it.
+ } else {
+ fList[0]->onDrawPaint(this->overdrawPaint(paint));
+ }
+}
+
+void SkOverdrawCanvas::onDrawBehind(const SkPaint& paint) {
+ fList[0]->onDrawBehind(this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ fList[0]->onDrawRect(rect, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ fList[0]->onDrawRegion(region, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ fList[0]->onDrawOval(oval, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawArc(const SkRect& arc, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ fList[0]->onDrawArc(arc, startAngle, sweepAngle, useCenter, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ fList[0]->onDrawDRRect(outer, inner, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRRect(const SkRRect& rect, const SkPaint& paint) {
+ fList[0]->onDrawRRect(rect, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint points[],
+ const SkPaint& paint) {
+ fList[0]->onDrawPoints(mode, count, points, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawVerticesObject(const SkVertices* vertices,
+ const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode blendMode, const SkPaint& paint) {
+ fList[0]->onDrawVerticesObject(vertices,
+ bones,
+ boneCount,
+ blendMode,
+ this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawAtlas(const SkImage* image, const SkRSXform xform[],
+ const SkRect texs[], const SkColor colors[], int count,
+ SkBlendMode mode, const SkRect* cull, const SkPaint* paint) {
+ SkPaint* paintPtr = &fPaint;
+ SkPaint storage;
+ if (paint) {
+ storage = this->overdrawPaint(*paint);
+ paintPtr = &storage;
+ }
+
+ fList[0]->onDrawAtlas(image, xform, texs, colors, count, mode, cull, paintPtr);
+}
+
+void SkOverdrawCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ fList[0]->onDrawPath(path, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint*) {
+ fList[0]->onDrawRect(SkRect::MakeXYWH(x, y, image->width(), image->height()), fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) {
+ fList[0]->onDrawRect(dst, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImageNine(const SkImage*, const SkIRect&, const SkRect& dst,
+ const SkPaint*) {
+ fList[0]->onDrawRect(dst, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint*) {
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(image->width(), image->height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(image->width(), image->height(), latticePlusBounds)) {
+ SkLatticeIter iter(latticePlusBounds, dst);
+
+ SkRect dummy, iterDst;
+ while (iter.next(&dummy, &iterDst)) {
+ fList[0]->onDrawRect(iterDst, fPaint);
+ }
+ } else {
+ fList[0]->onDrawRect(dst, fPaint);
+ }
+}
+
+void SkOverdrawCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint*) {
+ fList[0]->onDrawRect(SkRect::MakeXYWH(x, y, bitmap.width(), bitmap.height()), fPaint);
+}
+
+void SkOverdrawCanvas::onDrawBitmapRect(const SkBitmap&, const SkRect*, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) {
+ fList[0]->onDrawRect(dst, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawBitmapNine(const SkBitmap&, const SkIRect&, const SkRect& dst,
+ const SkPaint*) {
+ fList[0]->onDrawRect(dst, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ sk_sp<SkImage> image = SkMakeImageFromRasterBitmap(bitmap, kNever_SkCopyPixelsMode);
+ this->onDrawImageLattice(image.get(), lattice, dst, paint);
+}
+
+void SkOverdrawCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ drawable->draw(this, matrix);
+}
+
+void SkOverdrawCanvas::onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) {
+ SkASSERT(false);
+ return;
+}
+
+void SkOverdrawCanvas::onDrawAnnotation(const SkRect&, const char[], SkData*) {}
+
+void SkOverdrawCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ SkRect bounds;
+ SkDrawShadowMetrics::GetLocalBounds(path, rec, this->getTotalMatrix(), &bounds);
+ fList[0]->onDrawRect(bounds, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ if (clip) {
+ SkPath path;
+ path.addPoly(clip, 4, true);
+ fList[0]->onDrawPath(path, fPaint);
+ } else {
+ fList[0]->onDrawRect(rect, fPaint);
+ }
+}
+
+void SkOverdrawCanvas::onDrawEdgeAAImageSet(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[], const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ int clipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ if (set[i].fMatrixIndex >= 0) {
+ fList[0]->save();
+ fList[0]->concat(preViewMatrices[set[i].fMatrixIndex]);
+ }
+ if (set[i].fHasClip) {
+ SkPath path;
+ path.addPoly(dstClips + clipIndex, 4, true);
+ clipIndex += 4;
+ fList[0]->onDrawPath(path, fPaint);
+ } else {
+ fList[0]->onDrawRect(set[i].fDstRect, fPaint);
+ }
+ if (set[i].fMatrixIndex >= 0) {
+ fList[0]->restore();
+ }
+ }
+}
+
+inline SkPaint SkOverdrawCanvas::overdrawPaint(const SkPaint& paint) {
+ SkPaint newPaint = fPaint;
+ newPaint.setStyle(paint.getStyle());
+ newPaint.setStrokeWidth(paint.getStrokeWidth());
+ return newPaint;
+}
diff --git a/gfx/skia/skia/src/core/SkPaint.cpp b/gfx/skia/skia/src/core/SkPaint.cpp
new file mode 100644
index 0000000000..f294dbe4a0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaint.cpp
@@ -0,0 +1,552 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkPaintDefaults.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeRange.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkStroke.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+// define this to get a printf for out-of-range parameter in setters
+// e.g. setTextSize(-1)
+//#define SK_REPORT_API_RANGE_CHECK
+
+
+SkPaint::SkPaint()
+ : fColor4f{0, 0, 0, 1} // opaque black
+ , fWidth{0}
+ , fMiterLimit{SkPaintDefaults_MiterLimit}
+ , fBitfields{(unsigned)false, // fAntiAlias
+ (unsigned)false, // fDither
+ (unsigned)SkPaint::kDefault_Cap, // fCapType
+ (unsigned)SkPaint::kDefault_Join, // fJoinType
+ (unsigned)SkPaint::kFill_Style, // fStyle
+ (unsigned)kNone_SkFilterQuality, // fFilterQuality
+ (unsigned)SkBlendMode::kSrcOver, // fBlendMode
+ 0} // fPadding
+{
+ static_assert(sizeof(fBitfields) == sizeof(fBitfieldsUInt), "");
+}
+
+SkPaint::SkPaint(const SkColor4f& color, SkColorSpace* colorSpace) : SkPaint() {
+ this->setColor(color, colorSpace);
+}
+
+SkPaint::SkPaint(const SkPaint& src) = default;
+
+SkPaint::SkPaint(SkPaint&& src) = default;
+
+SkPaint::~SkPaint() = default;
+
+SkPaint& SkPaint::operator=(const SkPaint& src) = default;
+
+SkPaint& SkPaint::operator=(SkPaint&& src) = default;
+
+bool operator==(const SkPaint& a, const SkPaint& b) {
+#define EQUAL(field) (a.field == b.field)
+ return EQUAL(fPathEffect)
+ && EQUAL(fShader)
+ && EQUAL(fMaskFilter)
+ && EQUAL(fColorFilter)
+ && EQUAL(fImageFilter)
+ && EQUAL(fColor4f)
+ && EQUAL(fWidth)
+ && EQUAL(fMiterLimit)
+ && EQUAL(fBitfieldsUInt)
+ ;
+#undef EQUAL
+}
+
+#define DEFINE_REF_FOO(type) sk_sp<Sk##type> SkPaint::ref##type() const { return f##type; }
+DEFINE_REF_FOO(ColorFilter)
+DEFINE_REF_FOO(ImageFilter)
+DEFINE_REF_FOO(MaskFilter)
+DEFINE_REF_FOO(PathEffect)
+DEFINE_REF_FOO(Shader)
+#undef DEFINE_REF_FOO
+
+void SkPaint::reset() { *this = SkPaint(); }
+
+void SkPaint::setFilterQuality(SkFilterQuality quality) {
+ fBitfields.fFilterQuality = quality;
+}
+
+void SkPaint::setStyle(Style style) {
+ if ((unsigned)style < kStyleCount) {
+ fBitfields.fStyle = style;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStyle(%d) out of range\n", style);
+#endif
+ }
+}
+
+void SkPaint::setColor(SkColor color) {
+ fColor4f = SkColor4f::FromColor(color);
+}
+
+void SkPaint::setColor(const SkColor4f& color, SkColorSpace* colorSpace) {
+ SkASSERT(fColor4f.fA >= 0 && fColor4f.fA <= 1.0f);
+
+ SkColorSpaceXformSteps steps{colorSpace, kUnpremul_SkAlphaType,
+ sk_srgb_singleton(), kUnpremul_SkAlphaType};
+ fColor4f = color;
+ steps.apply(fColor4f.vec());
+}
+
+void SkPaint::setAlphaf(float a) {
+ SkASSERT(a >= 0 && a <= 1.0f);
+ fColor4f.fA = a;
+}
+
+void SkPaint::setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ this->setColor(SkColorSetARGB(a, r, g, b));
+}
+
+void SkPaint::setStrokeWidth(SkScalar width) {
+ if (width >= 0) {
+ fWidth = width;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeWidth() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeMiter(SkScalar limit) {
+ if (limit >= 0) {
+ fMiterLimit = limit;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeMiter() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeCap(Cap ct) {
+ if ((unsigned)ct < kCapCount) {
+ fBitfields.fCapType = SkToU8(ct);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeCap(%d) out of range\n", ct);
+#endif
+ }
+}
+
+void SkPaint::setStrokeJoin(Join jt) {
+ if ((unsigned)jt < kJoinCount) {
+ fBitfields.fJoinType = SkToU8(jt);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeJoin(%d) out of range\n", jt);
+#endif
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define MOVE_FIELD(Field) void SkPaint::set##Field(sk_sp<Sk##Field> f) { f##Field = std::move(f); }
+MOVE_FIELD(ImageFilter)
+MOVE_FIELD(Shader)
+MOVE_FIELD(ColorFilter)
+MOVE_FIELD(PathEffect)
+MOVE_FIELD(MaskFilter)
+#undef MOVE_FIELD
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkStream.h"
+
+#ifdef SK_DEBUG
+ static void ASSERT_FITS_IN(uint32_t value, int bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ uint32_t mask = ~0U;
+ mask >>= (32 - bitCount);
+ SkASSERT(0 == (value & ~mask));
+ }
+#else
+ #define ASSERT_FITS_IN(value, bitcount)
+#endif
+
+enum FlatFlags {
+ kHasTypeface_FlatFlag = 0x1,
+ kHasEffects_FlatFlag = 0x2,
+
+ kFlatFlagMask = 0x3,
+};
+
+enum BitsPerField {
+ kFlags_BPF = 16,
+ kHint_BPF = 2,
+ kFilter_BPF = 2,
+ kFlatFlags_BPF = 3,
+};
+
+static inline int BPF_Mask(int bits) {
+ return (1 << bits) - 1;
+}
+
+// SkPaint originally defined flags, some of which now apply to SkFont. These are renames
+// of those flags, split into categories depending on which objects they (now) apply to.
+
+enum PaintFlagsForPaint {
+ kAA_PaintFlagForPaint = 0x01,
+ kDither_PaintFlagForPaint = 0x04,
+};
+
+enum PaintFlagsForFont {
+ kFakeBold_PaintFlagForFont = 0x20,
+ kLinear_PaintFlagForFont = 0x40,
+ kSubpixel_PaintFlagForFont = 0x80,
+ kLCD_PaintFlagForFont = 0x200,
+ kEmbeddedBitmap_PaintFlagForFont = 0x400,
+ kAutoHinting_PaintFlagForFont = 0x800,
+};
+
+static FlatFlags unpack_paint_flags(SkPaint* paint, uint32_t packed, SkFont* font) {
+ uint32_t f = packed >> 16;
+ paint->setAntiAlias((f & kAA_PaintFlagForPaint) != 0);
+ paint->setDither((f & kDither_PaintFlagForPaint) != 0);
+ if (font) {
+ font->setEmbolden((f & kFakeBold_PaintFlagForFont) != 0);
+ font->setLinearMetrics((f & kLinear_PaintFlagForFont) != 0);
+ font->setSubpixel((f & kSubpixel_PaintFlagForFont) != 0);
+ font->setEmbeddedBitmaps((f & kEmbeddedBitmap_PaintFlagForFont) != 0);
+ font->setForceAutoHinting((f & kAutoHinting_PaintFlagForFont) != 0);
+
+ font->setHinting((SkFontHinting)((packed >> 14) & BPF_Mask(kHint_BPF)));
+
+ if (f & kAA_PaintFlagForPaint) {
+ if (f & kLCD_PaintFlagForFont) {
+ font->setEdging(SkFont::Edging::kSubpixelAntiAlias);
+ } else {
+ font->setEdging(SkFont::Edging::kAntiAlias);
+ }
+ } else {
+ font->setEdging(SkFont::Edging::kAlias);
+ }
+ }
+
+ paint->setFilterQuality((SkFilterQuality)((packed >> 10) & BPF_Mask(kFilter_BPF)));
+ return (FlatFlags)(packed & kFlatFlagMask);
+}
+
+template <typename T> uint32_t shift_bits(T value, unsigned shift, unsigned bits) {
+ SkASSERT(shift + bits <= 32);
+ uint32_t v = static_cast<uint32_t>(value);
+ ASSERT_FITS_IN(v, bits);
+ return v << shift;
+}
+
+/* Packing the paint
+ flags : 8 // 2...
+ blend : 8 // 30+
+ cap : 2 // 3
+ join : 2 // 3
+ style : 2 // 3
+ filter: 2 // 4
+ flat : 8 // 1...
+ total : 32
+ */
+static uint32_t pack_v68(const SkPaint& paint, unsigned flatFlags) {
+ uint32_t packed = 0;
+ packed |= shift_bits(((unsigned)paint.isDither() << 1) |
+ (unsigned)paint.isAntiAlias(), 0, 8);
+ packed |= shift_bits(paint.getBlendMode(), 8, 8);
+ packed |= shift_bits(paint.getStrokeCap(), 16, 2);
+ packed |= shift_bits(paint.getStrokeJoin(), 18, 2);
+ packed |= shift_bits(paint.getStyle(), 20, 2);
+ packed |= shift_bits(paint.getFilterQuality(), 22, 2);
+ packed |= shift_bits(flatFlags, 24, 8);
+ return packed;
+}
+
+static uint32_t unpack_v68(SkPaint* paint, uint32_t packed, SkSafeRange& safe) {
+ paint->setAntiAlias((packed & 1) != 0);
+ paint->setDither((packed & 2) != 0);
+ packed >>= 8;
+ paint->setBlendMode(safe.checkLE(packed & 0xFF, SkBlendMode::kLastMode));
+ packed >>= 8;
+ paint->setStrokeCap(safe.checkLE(packed & 0x3, SkPaint::kLast_Cap));
+ packed >>= 2;
+ paint->setStrokeJoin(safe.checkLE(packed & 0x3, SkPaint::kLast_Join));
+ packed >>= 2;
+ paint->setStyle(safe.checkLE(packed & 0x3, SkPaint::kStrokeAndFill_Style));
+ packed >>= 2;
+ paint->setFilterQuality(safe.checkLE(packed & 0x3, kLast_SkFilterQuality));
+ packed >>= 2;
+ return packed;
+}
+
+/* To save space/time, we analyze the paint, and write a truncated version of
+ it if there are not tricky elements like shaders, etc.
+ */
+void SkPaintPriv::Flatten(const SkPaint& paint, SkWriteBuffer& buffer) {
+ uint8_t flatFlags = 0;
+
+ if (paint.getPathEffect() ||
+ paint.getShader() ||
+ paint.getMaskFilter() ||
+ paint.getColorFilter() ||
+ paint.getImageFilter()) {
+ flatFlags |= kHasEffects_FlatFlag;
+ }
+
+ buffer.writeScalar(paint.getStrokeWidth());
+ buffer.writeScalar(paint.getStrokeMiter());
+ buffer.writeColor4f(paint.getColor4f());
+
+ buffer.write32(pack_v68(paint, flatFlags));
+
+ if (flatFlags & kHasEffects_FlatFlag) {
+ buffer.writeFlattenable(paint.getPathEffect());
+ buffer.writeFlattenable(paint.getShader());
+ buffer.writeFlattenable(paint.getMaskFilter());
+ buffer.writeFlattenable(paint.getColorFilter());
+ buffer.write32(0); // legacy, was drawlooper
+ buffer.writeFlattenable(paint.getImageFilter());
+ }
+}
+
+SkReadPaintResult SkPaintPriv::Unflatten_PreV68(SkPaint* paint, SkReadBuffer& buffer, SkFont* font) {
+ SkSafeRange safe;
+
+ {
+ SkScalar sz = buffer.readScalar();
+ SkScalar sx = buffer.readScalar();
+ SkScalar kx = buffer.readScalar();
+ if (font) {
+ font->setSize(sz);
+ font->setScaleX(sx);
+ font->setSkewX(kx);
+ }
+ }
+
+ paint->setStrokeWidth(buffer.readScalar());
+ paint->setStrokeMiter(buffer.readScalar());
+ if (buffer.isVersionLT(SkPicturePriv::kFloat4PaintColor_Version)) {
+ paint->setColor(buffer.readColor());
+ } else {
+ SkColor4f color;
+ buffer.readColor4f(&color);
+ paint->setColor(color, sk_srgb_singleton());
+ }
+
+ unsigned flatFlags = unpack_paint_flags(paint, buffer.readUInt(), font);
+
+ uint32_t tmp = buffer.readUInt();
+ paint->setStrokeCap(safe.checkLE((tmp >> 24) & 0xFF, SkPaint::kLast_Cap));
+ paint->setStrokeJoin(safe.checkLE((tmp >> 16) & 0xFF, SkPaint::kLast_Join));
+ paint->setStyle(safe.checkLE((tmp >> 12) & 0xF, SkPaint::kStrokeAndFill_Style));
+ paint->setBlendMode(safe.checkLE(tmp & 0xFF, SkBlendMode::kLastMode));
+
+ sk_sp<SkTypeface> tf;
+ if (flatFlags & kHasTypeface_FlatFlag) {
+ tf = buffer.readTypeface();
+ }
+ if (font) {
+ font->setTypeface(tf);
+ }
+
+ if (flatFlags & kHasEffects_FlatFlag) {
+ paint->setPathEffect(buffer.readPathEffect());
+ paint->setShader(buffer.readShader());
+ paint->setMaskFilter(buffer.readMaskFilter());
+ paint->setColorFilter(buffer.readColorFilter());
+ (void)buffer.read32(); // use to be SkRasterizer
+ (void)buffer.read32(); // used to be drawlooper
+ paint->setImageFilter(buffer.readImageFilter());
+ } else {
+ paint->setPathEffect(nullptr);
+ paint->setShader(nullptr);
+ paint->setMaskFilter(nullptr);
+ paint->setColorFilter(nullptr);
+ paint->setImageFilter(nullptr);
+ }
+
+ if (!buffer.validate(safe)) {
+ paint->reset();
+ return kFailed_ReadPaint;
+ }
+ return kSuccess_PaintAndFont;
+}
+
+SkReadPaintResult SkPaintPriv::Unflatten(SkPaint* paint, SkReadBuffer& buffer, SkFont* font) {
+ if (buffer.isVersionLT(SkPicturePriv::kPaintDoesntSerializeFonts_Version)) {
+ return Unflatten_PreV68(paint, buffer, font);
+ }
+
+ SkSafeRange safe;
+
+ paint->setStrokeWidth(buffer.readScalar());
+ paint->setStrokeMiter(buffer.readScalar());
+ {
+ SkColor4f color;
+ buffer.readColor4f(&color);
+ paint->setColor(color, sk_srgb_singleton());
+ }
+
+ unsigned flatFlags = unpack_v68(paint, buffer.readUInt(), safe);
+
+ if (flatFlags & kHasEffects_FlatFlag) {
+ paint->setPathEffect(buffer.readPathEffect());
+ paint->setShader(buffer.readShader());
+ paint->setMaskFilter(buffer.readMaskFilter());
+ paint->setColorFilter(buffer.readColorFilter());
+ (void)buffer.readDrawLooper();
+ paint->setImageFilter(buffer.readImageFilter());
+ } else {
+ paint->setPathEffect(nullptr);
+ paint->setShader(nullptr);
+ paint->setMaskFilter(nullptr);
+ paint->setColorFilter(nullptr);
+ paint->setImageFilter(nullptr);
+ }
+
+ if (!buffer.validate(safe)) {
+ paint->reset();
+ return kFailed_ReadPaint;
+ }
+ return kSuccess_JustPaint;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPaint::getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect,
+ SkScalar resScale) const {
+ if (!src.isFinite()) {
+ dst->reset();
+ return false;
+ }
+
+ SkStrokeRec rec(*this, resScale);
+
+ const SkPath* srcPtr = &src;
+ SkPath tmpPath;
+
+ if (fPathEffect && fPathEffect->filterPath(&tmpPath, src, &rec, cullRect)) {
+ srcPtr = &tmpPath;
+ }
+
+ if (!rec.applyToPath(dst, *srcPtr)) {
+ if (srcPtr == &tmpPath) {
+ // If path's were copy-on-write, this trick would not be needed.
+ // As it is, we want to save making a deep-copy from tmpPath -> dst
+ // since we know we're just going to delete tmpPath when we return,
+ // so the swap saves that copy.
+ dst->swap(tmpPath);
+ } else {
+ *dst = *srcPtr;
+ }
+ }
+
+ if (!dst->isFinite()) {
+ dst->reset();
+ return false;
+ }
+ return !rec.isHairlineStyle();
+}
+
+bool SkPaint::canComputeFastBounds() const {
+ if (this->getImageFilter() && !this->getImageFilter()->canComputeFastBounds()) {
+ return false;
+ }
+ return true;
+}
+
+const SkRect& SkPaint::doComputeFastBounds(const SkRect& origSrc,
+ SkRect* storage,
+ Style style) const {
+ SkASSERT(storage);
+
+ const SkRect* src = &origSrc;
+
+ SkRect tmpSrc;
+ if (this->getPathEffect()) {
+ this->getPathEffect()->computeFastBounds(&tmpSrc, origSrc);
+ src = &tmpSrc;
+ }
+
+ SkScalar radius = SkStrokeRec::GetInflationRadius(*this, style);
+ *storage = src->makeOutset(radius, radius);
+
+ if (this->getMaskFilter()) {
+ as_MFB(this->getMaskFilter())->computeFastBounds(*storage, storage);
+ }
+
+ if (this->getImageFilter()) {
+ *storage = this->getImageFilter()->computeFastBounds(*storage);
+ }
+
+ return *storage;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkColorFilter* cf) {
+ return cf && !(cf->getFlags() & SkColorFilter::kAlphaUnchanged_Flag);
+}
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkImageFilter* imf) {
+ // TODO: check if we should allow imagefilters to broadcast that they don't affect alpha
+ // ala colorfilters
+ return imf != nullptr;
+}
+
+bool SkPaint::nothingToDraw() const {
+ switch (this->getBlendMode()) {
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kPlus:
+ if (0 == this->getAlpha()) {
+ return !affects_alpha(fColorFilter.get()) && !affects_alpha(fImageFilter.get());
+ }
+ break;
+ case SkBlendMode::kDst:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+uint32_t SkPaint::getHash() const {
+ // We're going to hash 5 pointers and 6 floats, finishing up with fBitfields,
+ // so fBitfields should be 5 pointers and 6 floats from the start.
+ static_assert(offsetof(SkPaint, fBitfieldsUInt) == 5 * sizeof(void*) + 6 * sizeof(float),
+ "SkPaint_notPackedTightly");
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(this),
+ offsetof(SkPaint, fBitfieldsUInt) + sizeof(fBitfieldsUInt));
+}
diff --git a/gfx/skia/skia/src/core/SkPaintDefaults.h b/gfx/skia/skia/src/core/SkPaintDefaults.h
new file mode 100644
index 0000000000..ce90fd1803
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintDefaults.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintDefaults_DEFINED
+#define SkPaintDefaults_DEFINED
+
+#include "include/core/SkFontTypes.h"
+
+/**
+ * Any of these can be specified by the build system (or SkUserConfig.h)
+ * to change the default values for a SkPaint. This file should not be
+ * edited directly.
+ */
+
+#ifndef SkPaintDefaults_TextSize
+ #define SkPaintDefaults_TextSize SkIntToScalar(12)
+#endif
+
+#ifndef SkPaintDefaults_Hinting
+ #define SkPaintDefaults_Hinting SkFontHinting::kNormal
+#endif
+
+#ifndef SkPaintDefaults_MiterLimit
+ #define SkPaintDefaults_MiterLimit SkIntToScalar(4)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.cpp b/gfx/skia/skia/src/core/SkPaintPriv.cpp
new file mode 100644
index 0000000000..cce22aad79
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+static bool changes_alpha(const SkPaint& paint) {
+ SkColorFilter* cf = paint.getColorFilter();
+ return cf && !(cf->getFlags() & SkColorFilter::kAlphaUnchanged_Flag);
+}
+
+bool SkPaintPriv::Overwrites(const SkPaint* paint, ShaderOverrideOpacity overrideOpacity) {
+ if (!paint) {
+ // No paint means we default to SRC_OVER, so we overwrite iff our shader-override
+ // is opaque, or we don't have one.
+ return overrideOpacity != kNotOpaque_ShaderOverrideOpacity;
+ }
+
+ SkXfermode::SrcColorOpacity opacityType = SkXfermode::kUnknown_SrcColorOpacity;
+
+ if (!changes_alpha(*paint)) {
+ const unsigned paintAlpha = paint->getAlpha();
+ if (0xff == paintAlpha && overrideOpacity != kNotOpaque_ShaderOverrideOpacity &&
+ (!paint->getShader() || paint->getShader()->isOpaque()))
+ {
+ opacityType = SkXfermode::kOpaque_SrcColorOpacity;
+ } else if (0 == paintAlpha) {
+ if (overrideOpacity == kNone_ShaderOverrideOpacity && !paint->getShader()) {
+ opacityType = SkXfermode::kTransparentBlack_SrcColorOpacity;
+ } else {
+ opacityType = SkXfermode::kTransparentAlpha_SrcColorOpacity;
+ }
+ }
+ }
+
+ return SkXfermode::IsOpaque(paint->getBlendMode(), opacityType);
+}
+
+bool SkPaintPriv::ShouldDither(const SkPaint& p, SkColorType dstCT) {
+ // The paint dither flag can veto.
+ if (!p.isDither()) {
+ return false;
+ }
+
+ // We always dither 565 or 4444 when requested.
+ if (dstCT == kRGB_565_SkColorType || dstCT == kARGB_4444_SkColorType) {
+ return true;
+ }
+
+ // Otherwise, dither is only needed for non-const paints.
+ return p.getImageFilter() || p.getMaskFilter()
+ || !p.getShader() || !as_SB(p.getShader())->isConstant();
+}
+
+// return true if the paint is just a single color (i.e. not a shader). If its
+// a shader, then we can't compute a const luminance for it :(
+static bool just_a_color(const SkPaint& paint, SkColor* color) {
+ SkColor c = paint.getColor();
+
+ const auto* shader = as_SB(paint.getShader());
+ if (shader && !shader->asLuminanceColor(&c)) {
+ return false;
+ }
+ if (paint.getColorFilter()) {
+ c = paint.getColorFilter()->filterColor(c);
+ }
+ if (color) {
+ *color = c;
+ }
+ return true;
+}
+
+SkColor SkPaintPriv::ComputeLuminanceColor(const SkPaint& paint) {
+ SkColor c;
+ if (!just_a_color(paint, &c)) {
+ c = SkColorSetRGB(0x7F, 0x80, 0x7F);
+ }
+ return c;
+}
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.h b/gfx/skia/skia/src/core/SkPaintPriv.h
new file mode 100644
index 0000000000..1befb699d1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintPriv_DEFINED
+#define SkPaintPriv_DEFINED
+
+#include "include/core/SkPaint.h"
+
+class SkFont;
+class SkReadBuffer;
+class SkWriteBuffer;
+
+enum SkReadPaintResult {
+ kFailed_ReadPaint,
+ kSuccess_JustPaint,
+ kSuccess_PaintAndFont,
+};
+
+class SkPaintPriv {
+public:
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ /**
+ * Returns true if drawing with this paint (or nullptr) will ovewrite all affected pixels.
+ *
+ * Note: returns conservative true, meaning it may return false even though the paint might
+ * in fact overwrite its pixels.
+ */
+ static bool Overwrites(const SkPaint* paint, ShaderOverrideOpacity);
+
+ static bool ShouldDither(const SkPaint&, SkColorType);
+
+ /*
+ * The luminance color is used to determine which Gamma Canonical color to map to. This is
+ * really only used by backends which want to cache glyph masks, and need some way to know if
+ * they need to generate new masks based off a given color.
+ */
+ static SkColor ComputeLuminanceColor(const SkPaint&);
+
+ /** Serializes SkPaint into a buffer. A companion unflatten() call
+ can reconstitute the paint at a later time.
+
+ @param buffer SkWriteBuffer receiving the flattened SkPaint data
+ */
+ static void Flatten(const SkPaint& paint, SkWriteBuffer& buffer);
+
+ /** Populates SkPaint, typically from a serialized stream, created by calling
+ flatten() at an earlier time.
+
+ SkReadBuffer class is not public, so unflatten() cannot be meaningfully called
+ by the client.
+
+ Older formats also stored font info in the serialized data. On success, this
+ returns if it deserialized just a paint, or both a font and paint. The font
+ param is optional.
+
+ @param buffer serialized data describing SkPaint content
+ @return false if the buffer contains invalid data
+ */
+ static SkReadPaintResult Unflatten(SkPaint* paint, SkReadBuffer& buffer, SkFont* font);
+
+private:
+ static SkReadPaintResult Unflatten_PreV68(SkPaint* paint, SkReadBuffer& buffer, SkFont*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPath.cpp b/gfx/skia/skia/src/core/SkPath.cpp
new file mode 100644
index 0000000000..43ce47a979
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPath.cpp
@@ -0,0 +1,3739 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkRRect.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBuffer.h"
+#include "src/core/SkCubicClipper.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPathMakers.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkTLazy.h"
+// need SkDVector
+#include "src/pathops/SkPathOpsPoint.h"
+
+#include <cmath>
+#include <utility>
+
+struct SkPath_Storage_Equivalent {
+ void* fPtr;
+ int32_t fIndex;
+ uint32_t fFlags;
+};
+
+static_assert(sizeof(SkPath) == sizeof(SkPath_Storage_Equivalent),
+ "Please keep an eye on SkPath packing.");
+
+static float poly_eval(float A, float B, float C, float t) {
+ return (A * t + B) * t + C;
+}
+
+static float poly_eval(float A, float B, float C, float D, float t) {
+ return ((A * t + B) * t + C) * t + D;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Path.bounds is defined to be the bounds of all the control points.
+ * If we called bounds.join(r) we would skip r if r was empty, which breaks
+ * our promise. Hence we have a custom joiner that doesn't look at emptiness
+ */
+static void joinNoEmptyChecks(SkRect* dst, const SkRect& src) {
+ dst->fLeft = SkMinScalar(dst->fLeft, src.fLeft);
+ dst->fTop = SkMinScalar(dst->fTop, src.fTop);
+ dst->fRight = SkMaxScalar(dst->fRight, src.fRight);
+ dst->fBottom = SkMaxScalar(dst->fBottom, src.fBottom);
+}
+
+static bool is_degenerate(const SkPath& path) {
+ return path.countVerbs() <= 1;
+}
+
+class SkAutoDisableDirectionCheck {
+public:
+ SkAutoDisableDirectionCheck(SkPath* path) : fPath(path) {
+ fSaved = static_cast<SkPathPriv::FirstDirection>(fPath->getFirstDirection());
+ }
+
+ ~SkAutoDisableDirectionCheck() {
+ fPath->setFirstDirection(fSaved);
+ }
+
+private:
+ SkPath* fPath;
+ SkPathPriv::FirstDirection fSaved;
+};
+#define SkAutoDisableDirectionCheck(...) SK_REQUIRE_LOCAL_VAR(SkAutoDisableDirectionCheck)
+
+/* This guy's constructor/destructor bracket a path editing operation. It is
+ used when we know the bounds of the amount we are going to add to the path
+ (usually a new contour, but not required).
+
+ It captures some state about the path up front (i.e. if it already has a
+ cached bounds), and then if it can, it updates the cache bounds explicitly,
+ avoiding the need to revisit all of the points in getBounds().
+
+ It also notes if the path was originally degenerate, and if so, sets
+ isConvex to true. Thus it can only be used if the contour being added is
+ convex.
+ */
+class SkAutoPathBoundsUpdate {
+public:
+ SkAutoPathBoundsUpdate(SkPath* path, const SkRect& r) : fPath(path), fRect(r) {
+ // Cannot use fRect for our bounds unless we know it is sorted
+ fRect.sort();
+ // Mark the path's bounds as dirty if (1) they are, or (2) the path
+ // is non-finite, and therefore its bounds are not meaningful
+ fHasValidBounds = path->hasComputedBounds() && path->isFinite();
+ fEmpty = path->isEmpty();
+ if (fHasValidBounds && !fEmpty) {
+ joinNoEmptyChecks(&fRect, fPath->getBounds());
+ }
+ fDegenerate = is_degenerate(*path);
+ }
+
+ ~SkAutoPathBoundsUpdate() {
+ fPath->setConvexity(fDegenerate ? SkPath::kConvex_Convexity
+ : SkPath::kUnknown_Convexity);
+ if ((fEmpty || fHasValidBounds) && fRect.isFinite()) {
+ fPath->setBounds(fRect);
+ }
+ }
+
+private:
+ SkPath* fPath;
+ SkRect fRect;
+ bool fHasValidBounds;
+ bool fDegenerate;
+ bool fEmpty;
+};
+#define SkAutoPathBoundsUpdate(...) SK_REQUIRE_LOCAL_VAR(SkAutoPathBoundsUpdate)
+
+////////////////////////////////////////////////////////////////////////////
+
+/*
+ Stores the verbs and points as they are given to us, with exceptions:
+ - we only record "Close" if it was immediately preceeded by Move | Line | Quad | Cubic
+ - we insert a Move(0,0) if Line | Quad | Cubic is our first command
+
+ The iterator does more cleanup, especially if forceClose == true
+ 1. If we encounter degenerate segments, remove them
+ 2. if we encounter Close, return a cons'd up Line() first (if the curr-pt != start-pt)
+ 3. if we encounter Move without a preceeding Close, and forceClose is true, goto #2
+ 4. if we encounter Line | Quad | Cubic after Close, cons up a Move
+*/
+
+////////////////////////////////////////////////////////////////////////////
+
+// flag to require a moveTo if we begin with something else, like lineTo etc.
+#define INITIAL_LASTMOVETOINDEX_VALUE ~0
+
+SkPath::SkPath()
+ : fPathRef(SkPathRef::CreateEmpty()) {
+ this->resetFields();
+ fIsVolatile = false;
+}
+
+void SkPath::resetFields() {
+ //fPathRef is assumed to have been emptied by the caller.
+ fLastMoveToIndex = INITIAL_LASTMOVETOINDEX_VALUE;
+ fFillType = kWinding_FillType;
+ this->setConvexity(kUnknown_Convexity);
+ this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+
+ // We don't touch Android's fSourcePath. It's used to track texture garbage collection, so we
+ // don't want to muck with it if it's been set to something non-nullptr.
+}
+
+SkPath::SkPath(const SkPath& that)
+ : fPathRef(SkRef(that.fPathRef.get())) {
+ this->copyFields(that);
+ SkDEBUGCODE(that.validate();)
+}
+
+SkPath::~SkPath() {
+ SkDEBUGCODE(this->validate();)
+}
+
+SkPath& SkPath::operator=(const SkPath& that) {
+ SkDEBUGCODE(that.validate();)
+
+ if (this != &that) {
+ fPathRef.reset(SkRef(that.fPathRef.get()));
+ this->copyFields(that);
+ }
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+void SkPath::copyFields(const SkPath& that) {
+ //fPathRef is assumed to have been set by the caller.
+ fLastMoveToIndex = that.fLastMoveToIndex;
+ fFillType = that.fFillType;
+ fIsVolatile = that.fIsVolatile;
+
+ // Non-atomic assignment of atomic values.
+ this->setConvexity(that.getConvexityOrUnknown());
+ this->setFirstDirection(that.getFirstDirection());
+}
+
+bool operator==(const SkPath& a, const SkPath& b) {
+ // note: don't need to look at isConvex or bounds, since just comparing the
+ // raw data is sufficient.
+ return &a == &b ||
+ (a.fFillType == b.fFillType && *a.fPathRef.get() == *b.fPathRef.get());
+}
+
+void SkPath::swap(SkPath& that) {
+ if (this != &that) {
+ fPathRef.swap(that.fPathRef);
+ std::swap(fLastMoveToIndex, that.fLastMoveToIndex);
+
+ const auto ft = fFillType;
+ fFillType = that.fFillType;
+ that.fFillType = ft;
+
+ const auto iv = fIsVolatile;
+ fIsVolatile = that.fIsVolatile;
+ that.fIsVolatile = iv;
+
+ // Non-atomic swaps of atomic values.
+ Convexity c = this->getConvexityOrUnknown();
+ this->setConvexity(that.getConvexityOrUnknown());
+ that.setConvexity(c);
+
+ uint8_t fd = this->getFirstDirection();
+ this->setFirstDirection(that.getFirstDirection());
+ that.setFirstDirection(fd);
+ }
+}
+
+bool SkPath::isInterpolatable(const SkPath& compare) const {
+ // need the same structure (verbs, conicweights) and same point-count
+ return fPathRef->fPoints.count() == compare.fPathRef->fPoints.count() &&
+ fPathRef->fVerbs == compare.fPathRef->fVerbs &&
+ fPathRef->fConicWeights == compare.fPathRef->fConicWeights;
+}
+
+bool SkPath::interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const {
+ int pointCount = fPathRef->countPoints();
+ if (pointCount != ending.fPathRef->countPoints()) {
+ return false;
+ }
+ if (!pointCount) {
+ return true;
+ }
+ out->reset();
+ out->addPath(*this);
+ fPathRef->interpolate(*ending.fPathRef, weight, out->fPathRef.get());
+ return true;
+}
+
+static inline bool check_edge_against_rect(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkRect& rect,
+ SkPathPriv::FirstDirection dir) {
+ const SkPoint* edgeBegin;
+ SkVector v;
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ v = p1 - p0;
+ edgeBegin = &p0;
+ } else {
+ v = p0 - p1;
+ edgeBegin = &p1;
+ }
+ if (v.fX || v.fY) {
+ // check the cross product of v with the vec from edgeBegin to each rect corner
+ SkScalar yL = v.fY * (rect.fLeft - edgeBegin->fX);
+ SkScalar xT = v.fX * (rect.fTop - edgeBegin->fY);
+ SkScalar yR = v.fY * (rect.fRight - edgeBegin->fX);
+ SkScalar xB = v.fX * (rect.fBottom - edgeBegin->fY);
+ if ((xT < yL) || (xT < yR) || (xB < yL) || (xB < yR)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkPath::conservativelyContainsRect(const SkRect& rect) const {
+ // This only handles non-degenerate convex paths currently.
+ if (kConvex_Convexity != this->getConvexity()) {
+ return false;
+ }
+
+ SkPathPriv::FirstDirection direction;
+ if (!SkPathPriv::CheapComputeFirstDirection(*this, &direction)) {
+ return false;
+ }
+
+ SkPoint firstPt;
+ SkPoint prevPt;
+ SkPath::Iter iter(*this, true);
+ SkPath::Verb verb;
+ SkPoint pts[4];
+ int segmentCount = 0;
+ SkDEBUGCODE(int moveCnt = 0;)
+ SkDEBUGCODE(int closeCount = 0;)
+
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ int nextPt = -1;
+ switch (verb) {
+ case kMove_Verb:
+ SkASSERT(!segmentCount && !closeCount);
+ SkDEBUGCODE(++moveCnt);
+ firstPt = prevPt = pts[0];
+ break;
+ case kLine_Verb:
+ if (!SkPathPriv::AllPointsEq(pts, 2)) {
+ nextPt = 1;
+ SkASSERT(moveCnt && !closeCount);
+ ++segmentCount;
+ }
+ break;
+ case kQuad_Verb:
+ case kConic_Verb:
+ if (!SkPathPriv::AllPointsEq(pts, 3)) {
+ SkASSERT(moveCnt && !closeCount);
+ ++segmentCount;
+ nextPt = 2;
+ }
+ break;
+ case kCubic_Verb:
+ if (!SkPathPriv::AllPointsEq(pts, 4)) {
+ SkASSERT(moveCnt && !closeCount);
+ ++segmentCount;
+ nextPt = 3;
+ }
+ break;
+ case kClose_Verb:
+ SkDEBUGCODE(++closeCount;)
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ if (-1 != nextPt) {
+ if (SkPath::kConic_Verb == verb) {
+ SkConic orig;
+ orig.set(pts, iter.conicWeight());
+ SkPoint quadPts[5];
+ int count = orig.chopIntoQuadsPOW2(quadPts, 1);
+ SkASSERT_RELEASE(2 == count);
+
+ if (!check_edge_against_rect(quadPts[0], quadPts[2], rect, direction)) {
+ return false;
+ }
+ if (!check_edge_against_rect(quadPts[2], quadPts[4], rect, direction)) {
+ return false;
+ }
+ } else {
+ if (!check_edge_against_rect(prevPt, pts[nextPt], rect, direction)) {
+ return false;
+ }
+ }
+ prevPt = pts[nextPt];
+ }
+ }
+
+ if (segmentCount) {
+ return check_edge_against_rect(prevPt, firstPt, rect, direction);
+ }
+ return false;
+}
+
+uint32_t SkPath::getGenerationID() const {
+ uint32_t genID = fPathRef->genID();
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkASSERT((unsigned)fFillType < (1 << (32 - SkPathPriv::kPathRefGenIDBitCnt)));
+ genID |= static_cast<uint32_t>(fFillType) << SkPathPriv::kPathRefGenIDBitCnt;
+#endif
+ return genID;
+}
+
+SkPath& SkPath::reset() {
+ SkDEBUGCODE(this->validate();)
+
+ fPathRef.reset(SkPathRef::CreateEmpty());
+ this->resetFields();
+ return *this;
+}
+
+SkPath& SkPath::rewind() {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Rewind(&fPathRef);
+ this->resetFields();
+ return *this;
+}
+
+bool SkPath::isLastContourClosed() const {
+ int verbCount = fPathRef->countVerbs();
+ if (0 == verbCount) {
+ return false;
+ }
+ return kClose_Verb == fPathRef->atVerb(verbCount - 1);
+}
+
+bool SkPath::isLine(SkPoint line[2]) const {
+ int verbCount = fPathRef->countVerbs();
+
+ if (2 == verbCount) {
+ SkASSERT(kMove_Verb == fPathRef->atVerb(0));
+ if (kLine_Verb == fPathRef->atVerb(1)) {
+ SkASSERT(2 == fPathRef->countPoints());
+ if (line) {
+ const SkPoint* pts = fPathRef->points();
+ line[0] = pts[0];
+ line[1] = pts[1];
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ Determines if path is a rect by keeping track of changes in direction
+ and looking for a loop either clockwise or counterclockwise.
+
+ The direction is computed such that:
+ 0: vertical up
+ 1: horizontal left
+ 2: vertical down
+ 3: horizontal right
+
+A rectangle cycles up/right/down/left or up/left/down/right.
+
+The test fails if:
+ The path is closed, and followed by a line.
+ A second move creates a new endpoint.
+ A diagonal line is parsed.
+ There's more than four changes of direction.
+ There's a discontinuity on the line (e.g., a move in the middle)
+ The line reverses direction.
+ The path contains a quadratic or cubic.
+ The path contains fewer than four points.
+ *The rectangle doesn't complete a cycle.
+ *The final point isn't equal to the first point.
+
+ *These last two conditions we relax if we have a 3-edge path that would
+ form a rectangle if it were closed (as we do when we fill a path)
+
+It's OK if the path has:
+ Several colinear line segments composing a rectangle side.
+ Single points on the rectangle side.
+
+The direction takes advantage of the corners found since opposite sides
+must travel in opposite directions.
+
+FIXME: Allow colinear quads and cubics to be treated like lines.
+FIXME: If the API passes fill-only, return true if the filled stroke
+ is a rectangle, though the caller failed to close the path.
+
+ directions values:
+ 0x1 is set if the segment is horizontal
+ 0x2 is set if the segment is moving to the right or down
+ thus:
+ two directions are opposites iff (dirA ^ dirB) == 0x2
+ two directions are perpendicular iff (dirA ^ dirB) == 0x1
+
+ */
+static int rect_make_dir(SkScalar dx, SkScalar dy) {
+ return ((0 != dx) << 0) | ((dx > 0 || dy > 0) << 1);
+}
+
+bool SkPath::isRect(SkRect* rect, bool* isClosed, Direction* direction) const {
+ SkDEBUGCODE(this->validate();)
+ int currVerb = 0;
+ const SkPoint* pts = fPathRef->points();
+ return SkPathPriv::IsRectContour(*this, false, &currVerb, &pts, isClosed, direction, rect);
+}
+
+bool SkPath::isOval(SkRect* bounds) const {
+ return SkPathPriv::IsOval(*this, bounds, nullptr, nullptr);
+}
+
+bool SkPath::isRRect(SkRRect* rrect) const {
+ return SkPathPriv::IsRRect(*this, rrect, nullptr, nullptr);
+}
+
+int SkPath::countPoints() const {
+ return fPathRef->countPoints();
+}
+
+int SkPath::getPoints(SkPoint dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = SkMin32(max, fPathRef->countPoints());
+ sk_careful_memcpy(dst, fPathRef->points(), count * sizeof(SkPoint));
+ return fPathRef->countPoints();
+}
+
+SkPoint SkPath::getPoint(int index) const {
+ if ((unsigned)index < (unsigned)fPathRef->countPoints()) {
+ return fPathRef->atPoint(index);
+ }
+ return SkPoint::Make(0, 0);
+}
+
+int SkPath::countVerbs() const {
+ return fPathRef->countVerbs();
+}
+
+int SkPath::getVerbs(uint8_t dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = SkMin32(max, fPathRef->countVerbs());
+ if (count) {
+ memcpy(dst, fPathRef->verbsBegin(), count);
+ }
+ return fPathRef->countVerbs();
+}
+
+size_t SkPath::approximateBytesUsed() const {
+ size_t size = sizeof (SkPath);
+ if (fPathRef != nullptr) {
+ size += fPathRef->countPoints() * sizeof(SkPoint)
+ + fPathRef->countVerbs()
+ + fPathRef->countWeights() * sizeof(SkScalar);
+ }
+
+ return size;
+}
+
+bool SkPath::getLastPt(SkPoint* lastPt) const {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count > 0) {
+ if (lastPt) {
+ *lastPt = fPathRef->atPoint(count - 1);
+ }
+ return true;
+ }
+ if (lastPt) {
+ lastPt->set(0, 0);
+ }
+ return false;
+}
+
+void SkPath::setPt(int index, SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count <= index) {
+ return;
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(index)->set(x, y);
+ }
+}
+
+void SkPath::setLastPt(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count == 0) {
+ this->moveTo(x, y);
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(count-1)->set(x, y);
+ }
+}
+
+// This is the public-facing non-const setConvexity().
+void SkPath::setConvexity(Convexity c) {
+ fConvexity.store(c, std::memory_order_relaxed);
+}
+
+// Const hooks for working with fConvexity and fFirstDirection from const methods.
+void SkPath::setConvexity(Convexity c) const {
+ fConvexity.store(c, std::memory_order_relaxed);
+}
+void SkPath::setFirstDirection(uint8_t d) const {
+ fFirstDirection.store(d, std::memory_order_relaxed);
+}
+uint8_t SkPath::getFirstDirection() const {
+ return fFirstDirection.load(std::memory_order_relaxed);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Construction methods
+
+#define DIRTY_AFTER_EDIT \
+ do { \
+ this->setConvexity(kUnknown_Convexity); \
+ this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection); \
+ } while (0)
+
+void SkPath::incReserve(int inc) {
+ SkDEBUGCODE(this->validate();)
+ if (inc > 0) {
+ SkPathRef::Editor(&fPathRef, inc, inc);
+ }
+ SkDEBUGCODE(this->validate();)
+}
+
+SkPath& SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ // remember our index
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ ed.growForVerb(kMove_Verb)->set(x, y);
+
+ DIRTY_AFTER_EDIT;
+ return *this;
+}
+
+SkPath& SkPath::rMoveTo(SkScalar x, SkScalar y) {
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->moveTo(pt.fX + x, pt.fY + y);
+}
+
+void SkPath::injectMoveToIfNeeded() {
+ if (fLastMoveToIndex < 0) {
+ SkScalar x, y;
+ if (fPathRef->countVerbs() == 0) {
+ x = y = 0;
+ } else {
+ const SkPoint& pt = fPathRef->atPoint(~fLastMoveToIndex);
+ x = pt.fX;
+ y = pt.fY;
+ }
+ this->moveTo(x, y);
+ }
+}
+
+SkPath& SkPath::lineTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kLine_Verb)->set(x, y);
+
+ DIRTY_AFTER_EDIT;
+ return *this;
+}
+
+SkPath& SkPath::rLineTo(SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->lineTo(pt.fX + x, pt.fY + y);
+}
+
+SkPath& SkPath::quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kQuad_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ DIRTY_AFTER_EDIT;
+ return *this;
+}
+
+SkPath& SkPath::rQuadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->quadTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2);
+}
+
+SkPath& SkPath::conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w) {
+ // check for <= 0 or NaN with this test
+ if (!(w > 0)) {
+ this->lineTo(x2, y2);
+ } else if (!SkScalarIsFinite(w)) {
+ this->lineTo(x1, y1);
+ this->lineTo(x2, y2);
+ } else if (SK_Scalar1 == w) {
+ this->quadTo(x1, y1, x2, y2);
+ } else {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kConic_Verb, w);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ DIRTY_AFTER_EDIT;
+ }
+ return *this;
+}
+
+SkPath& SkPath::rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->conicTo(pt.fX + dx1, pt.fY + dy1, pt.fX + dx2, pt.fY + dy2, w);
+}
+
+SkPath& SkPath::cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kCubic_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+ pts[2].set(x3, y3);
+
+ DIRTY_AFTER_EDIT;
+ return *this;
+}
+
+SkPath& SkPath::rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->cubicTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2,
+ pt.fX + x3, pt.fY + y3);
+}
+
+SkPath& SkPath::close() {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countVerbs();
+ if (count > 0) {
+ switch (fPathRef->atVerb(count - 1)) {
+ case kLine_Verb:
+ case kQuad_Verb:
+ case kConic_Verb:
+ case kCubic_Verb:
+ case kMove_Verb: {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kClose_Verb);
+ break;
+ }
+ case kClose_Verb:
+ // don't add a close if it's the first verb or a repeat
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+
+ // signal that we need a moveTo to follow us (unless we're done)
+#if 0
+ if (fLastMoveToIndex >= 0) {
+ fLastMoveToIndex = ~fLastMoveToIndex;
+ }
+#else
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+#endif
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void assert_known_direction(int dir) {
+ SkASSERT(SkPath::kCW_Direction == dir || SkPath::kCCW_Direction == dir);
+}
+
+SkPath& SkPath::addRect(const SkRect& rect, Direction dir) {
+ return this->addRect(rect, dir, 0);
+}
+
+SkPath& SkPath::addRect(SkScalar left, SkScalar top, SkScalar right,
+ SkScalar bottom, Direction dir) {
+ return this->addRect(SkRect::MakeLTRB(left, top, right, bottom), dir, 0);
+}
+
+SkPath& SkPath::addRect(const SkRect &rect, Direction dir, unsigned startIndex) {
+ assert_known_direction(dir);
+ this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathPriv::FirstDirection)dir
+ : SkPathPriv::kUnknown_FirstDirection);
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, rect);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+
+ const int kVerbs = 5; // moveTo + 3x lineTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_RectPointIterator iter(rect, dir, startIndex);
+
+ this->moveTo(iter.current());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+ return *this;
+}
+
+SkPath& SkPath::addPoly(const SkPoint pts[], int count, bool close) {
+ SkDEBUGCODE(this->validate();)
+ if (count <= 0) {
+ return *this;
+ }
+
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ // +close makes room for the extra kClose_Verb
+ SkPathRef::Editor ed(&fPathRef, count+close, count);
+
+ ed.growForVerb(kMove_Verb)->set(pts[0].fX, pts[0].fY);
+ if (count > 1) {
+ SkPoint* p = ed.growForRepeatedVerb(kLine_Verb, count - 1);
+ memcpy(p, &pts[1], (count-1) * sizeof(SkPoint));
+ }
+
+ if (close) {
+ ed.growForVerb(kClose_Verb);
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+ }
+
+ DIRTY_AFTER_EDIT;
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+#include "src/core/SkGeometry.h"
+
+static bool arc_is_lone_point(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ SkPoint* pt) {
+ if (0 == sweepAngle && (0 == startAngle || SkIntToScalar(360) == startAngle)) {
+ // Chrome uses this path to move into and out of ovals. If not
+ // treated as a special case the moves can distort the oval's
+ // bounding box (and break the circle special case).
+ pt->set(oval.fRight, oval.centerY());
+ return true;
+ } else if (0 == oval.width() && 0 == oval.height()) {
+ // Chrome will sometimes create 0 radius round rects. Having degenerate
+ // quad segments in the path prevents the path from being recognized as
+ // a rect.
+ // TODO: optimizing the case where only one of width or height is zero
+ // should also be considered. This case, however, doesn't seem to be
+ // as common as the single point case.
+ pt->set(oval.fRight, oval.fTop);
+ return true;
+ }
+ return false;
+}
+
+// Return the unit vectors pointing at the start/stop points for the given start/sweep angles
+//
+static void angles_to_unit_vectors(SkScalar startAngle, SkScalar sweepAngle,
+ SkVector* startV, SkVector* stopV, SkRotationDirection* dir) {
+ SkScalar startRad = SkDegreesToRadians(startAngle),
+ stopRad = SkDegreesToRadians(startAngle + sweepAngle);
+
+ startV->fY = SkScalarSinSnapToZero(startRad);
+ startV->fX = SkScalarCosSnapToZero(startRad);
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+
+ /* If the sweep angle is nearly (but less than) 360, then due to precision
+ loss in radians-conversion and/or sin/cos, we may end up with coincident
+ vectors, which will fool SkBuildQuadArc into doing nothing (bad) instead
+ of drawing a nearly complete circle (good).
+ e.g. canvas.drawArc(0, 359.99, ...)
+ -vs- canvas.drawArc(0, 359.9, ...)
+ We try to detect this edge case, and tweak the stop vector
+ */
+ if (*startV == *stopV) {
+ SkScalar sw = SkScalarAbs(sweepAngle);
+ if (sw < SkIntToScalar(360) && sw > SkIntToScalar(359)) {
+ // make a guess at a tiny angle (in radians) to tweak by
+ SkScalar deltaRad = SkScalarCopySign(SK_Scalar1/512, sweepAngle);
+ // not sure how much will be enough, so we use a loop
+ do {
+ stopRad -= deltaRad;
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+ } while (*startV == *stopV);
+ }
+ }
+ *dir = sweepAngle > 0 ? kCW_SkRotationDirection : kCCW_SkRotationDirection;
+}
+
+/**
+ * If this returns 0, then the caller should just line-to the singlePt, else it should
+ * ignore singlePt and append the specified number of conics.
+ */
+static int build_arc_conics(const SkRect& oval, const SkVector& start, const SkVector& stop,
+ SkRotationDirection dir, SkConic conics[SkConic::kMaxConicsForArc],
+ SkPoint* singlePt) {
+ SkMatrix matrix;
+
+ matrix.setScale(SkScalarHalf(oval.width()), SkScalarHalf(oval.height()));
+ matrix.postTranslate(oval.centerX(), oval.centerY());
+
+ int count = SkConic::BuildUnitArc(start, stop, dir, &matrix, conics);
+ if (0 == count) {
+ matrix.mapXY(stop.x(), stop.y(), singlePt);
+ }
+ return count;
+}
+
+SkPath& SkPath::addRoundRect(const SkRect& rect, const SkScalar radii[],
+ Direction dir) {
+ SkRRect rrect;
+ rrect.setRectRadii(rect, (const SkVector*) radii);
+ return this->addRRect(rrect, dir);
+}
+
+SkPath& SkPath::addRRect(const SkRRect& rrect, Direction dir) {
+ // legacy start indices: 6 (CW) and 7(CCW)
+ return this->addRRect(rrect, dir, dir == kCW_Direction ? 6 : 7);
+}
+
+SkPath& SkPath::addRRect(const SkRRect &rrect, Direction dir, unsigned startIndex) {
+ assert_known_direction(dir);
+
+ bool isRRect = hasOnlyMoveTos();
+ const SkRect& bounds = rrect.getBounds();
+
+ if (rrect.isRect() || rrect.isEmpty()) {
+ // degenerate(rect) => radii points are collapsing
+ this->addRect(bounds, dir, (startIndex + 1) / 2);
+ } else if (rrect.isOval()) {
+ // degenerate(oval) => line points are collapsing
+ this->addOval(bounds, dir, startIndex / 2);
+ } else {
+ this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathPriv::FirstDirection)dir
+ : SkPathPriv::kUnknown_FirstDirection);
+
+ SkAutoPathBoundsUpdate apbu(this, bounds);
+ SkAutoDisableDirectionCheck addc(this);
+
+ // we start with a conic on odd indices when moving CW vs. even indices when moving CCW
+ const bool startsWithConic = ((startIndex & 1) == (dir == kCW_Direction));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = startsWithConic
+ ? 9 // moveTo + 4x conicTo + 3x lineTo + close
+ : 10; // moveTo + 4x lineTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_RRectPointIterator rrectIter(rrect, dir, startIndex);
+ // Corner iterator indices follow the collapsed radii model,
+ // adjusted such that the start pt is "behind" the radii start pt.
+ const unsigned rectStartIndex = startIndex / 2 + (dir == kCW_Direction ? 0 : 1);
+ SkPath_RectPointIterator rectIter(bounds, dir, rectStartIndex);
+
+ this->moveTo(rrectIter.current());
+ if (startsWithConic) {
+ for (unsigned i = 0; i < 3; ++i) {
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ this->lineTo(rrectIter.next());
+ }
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ // final lineTo handled by close().
+ } else {
+ for (unsigned i = 0; i < 4; ++i) {
+ this->lineTo(rrectIter.next());
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ }
+ }
+ this->close();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.setIsRRect(isRRect, dir, startIndex % 8);
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+ }
+
+ SkDEBUGCODE(fPathRef->validate();)
+ return *this;
+}
+
+bool SkPath::hasOnlyMoveTos() const {
+ int count = fPathRef->countVerbs();
+ const uint8_t* verbs = fPathRef->verbsBegin();
+ for (int i = 0; i < count; ++i) {
+ if (*verbs == kLine_Verb ||
+ *verbs == kQuad_Verb ||
+ *verbs == kConic_Verb ||
+ *verbs == kCubic_Verb) {
+ return false;
+ }
+ ++verbs;
+ }
+ return true;
+}
+
+bool SkPath::isZeroLengthSincePoint(int startPtIndex) const {
+ int count = fPathRef->countPoints() - startPtIndex;
+ if (count < 2) {
+ return true;
+ }
+ const SkPoint* pts = fPathRef.get()->points() + startPtIndex;
+ const SkPoint& first = *pts;
+ for (int index = 1; index < count; ++index) {
+ if (first != pts[index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+SkPath& SkPath::addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ Direction dir) {
+ assert_known_direction(dir);
+
+ if (rx < 0 || ry < 0) {
+ return *this;
+ }
+
+ SkRRect rrect;
+ rrect.setRectXY(rect, rx, ry);
+ return this->addRRect(rrect, dir);
+}
+
+SkPath& SkPath::addOval(const SkRect& oval, Direction dir) {
+ // legacy start index: 1
+ return this->addOval(oval, dir, 1);
+}
+
+SkPath& SkPath::addOval(const SkRect &oval, Direction dir, unsigned startPointIndex) {
+ assert_known_direction(dir);
+
+ /* If addOval() is called after previous moveTo(),
+ this path is still marked as an oval. This is used to
+ fit into WebKit's calling sequences.
+ We can't simply check isEmpty() in this case, as additional
+ moveTo() would mark the path non empty.
+ */
+ bool isOval = hasOnlyMoveTos();
+ if (isOval) {
+ this->setFirstDirection((SkPathPriv::FirstDirection)dir);
+ } else {
+ this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+ }
+
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, oval);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = 6; // moveTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_OvalPointIterator ovalIter(oval, dir, startPointIndex);
+ // The corner iterator pts are tracking "behind" the oval/radii pts.
+ SkPath_RectPointIterator rectIter(oval, dir, startPointIndex + (dir == kCW_Direction ? 0 : 1));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ this->moveTo(ovalIter.current());
+ for (unsigned i = 0; i < 4; ++i) {
+ this->conicTo(rectIter.next(), ovalIter.next(), weight);
+ }
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ ed.setIsOval(isOval, kCCW_Direction == dir, startPointIndex % 4);
+ return *this;
+}
+
+SkPath& SkPath::addCircle(SkScalar x, SkScalar y, SkScalar r, Direction dir) {
+ if (r > 0) {
+ this->addOval(SkRect::MakeLTRB(x - r, y - r, x + r, y + r), dir);
+ }
+ return *this;
+}
+
+SkPath& SkPath::arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool forceMoveTo) {
+ if (oval.width() < 0 || oval.height() < 0) {
+ return *this;
+ }
+
+ if (fPathRef->countVerbs() == 0) {
+ forceMoveTo = true;
+ }
+
+ SkPoint lonePt;
+ if (arc_is_lone_point(oval, startAngle, sweepAngle, &lonePt)) {
+ return forceMoveTo ? this->moveTo(lonePt) : this->lineTo(lonePt);
+ }
+
+ SkVector startV, stopV;
+ SkRotationDirection dir;
+ angles_to_unit_vectors(startAngle, sweepAngle, &startV, &stopV, &dir);
+
+ SkPoint singlePt;
+
+ // Adds a move-to to 'pt' if forceMoveTo is true. Otherwise a lineTo unless we're sufficiently
+ // close to 'pt' currently. This prevents spurious lineTos when adding a series of contiguous
+ // arcs from the same oval.
+ auto addPt = [&forceMoveTo, this](const SkPoint& pt) {
+ SkPoint lastPt;
+ if (forceMoveTo) {
+ this->moveTo(pt);
+ } else if (!this->getLastPt(&lastPt) ||
+ !SkScalarNearlyEqual(lastPt.fX, pt.fX) ||
+ !SkScalarNearlyEqual(lastPt.fY, pt.fY)) {
+ this->lineTo(pt);
+ }
+ };
+
+ // At this point, we know that the arc is not a lone point, but startV == stopV
+ // indicates that the sweepAngle is too small such that angles_to_unit_vectors
+ // cannot handle it.
+ if (startV == stopV) {
+ SkScalar endAngle = SkDegreesToRadians(startAngle + sweepAngle);
+ SkScalar radiusX = oval.width() / 2;
+ SkScalar radiusY = oval.height() / 2;
+ // We do not use SkScalar[Sin|Cos]SnapToZero here. When sin(startAngle) is 0 and sweepAngle
+ // is very small and radius is huge, the expected behavior here is to draw a line. But
+ // calling SkScalarSinSnapToZero will make sin(endAngle) be 0 which will then draw a dot.
+ singlePt.set(oval.centerX() + radiusX * SkScalarCos(endAngle),
+ oval.centerY() + radiusY * SkScalarSin(endAngle));
+ addPt(singlePt);
+ return *this;
+ }
+
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = build_arc_conics(oval, startV, stopV, dir, conics, &singlePt);
+ if (count) {
+ this->incReserve(count * 2 + 1);
+ const SkPoint& pt = conics[0].fPts[0];
+ addPt(pt);
+ for (int i = 0; i < count; ++i) {
+ this->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ } else {
+ addPt(singlePt);
+ }
+ return *this;
+}
+
+// This converts the SVG arc to conics.
+// Partly adapted from Niko's code in kdelibs/kdecore/svgicons.
+// Then transcribed from webkit/chrome's SVGPathNormalizer::decomposeArcToCubic()
+// See also SVG implementation notes:
+// http://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
+// Note that arcSweep bool value is flipped from the original implementation.
+SkPath& SkPath::arcTo(SkScalar rx, SkScalar ry, SkScalar angle, SkPath::ArcSize arcLarge,
+ SkPath::Direction arcSweep, SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded();
+ SkPoint srcPts[2];
+ this->getLastPt(&srcPts[0]);
+ // If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a "lineto")
+ // joining the endpoints.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
+ if (!rx || !ry) {
+ return this->lineTo(x, y);
+ }
+ // If the current point and target point for the arc are identical, it should be treated as a
+ // zero length path. This ensures continuity in animations.
+ srcPts[1].set(x, y);
+ if (srcPts[0] == srcPts[1]) {
+ return this->lineTo(x, y);
+ }
+ rx = SkScalarAbs(rx);
+ ry = SkScalarAbs(ry);
+ SkVector midPointDistance = srcPts[0] - srcPts[1];
+ midPointDistance *= 0.5f;
+
+ SkMatrix pointTransform;
+ pointTransform.setRotate(-angle);
+
+ SkPoint transformedMidPoint;
+ pointTransform.mapPoints(&transformedMidPoint, &midPointDistance, 1);
+ SkScalar squareRx = rx * rx;
+ SkScalar squareRy = ry * ry;
+ SkScalar squareX = transformedMidPoint.fX * transformedMidPoint.fX;
+ SkScalar squareY = transformedMidPoint.fY * transformedMidPoint.fY;
+
+ // Check if the radii are big enough to draw the arc, scale radii if not.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
+ SkScalar radiiScale = squareX / squareRx + squareY / squareRy;
+ if (radiiScale > 1) {
+ radiiScale = SkScalarSqrt(radiiScale);
+ rx *= radiiScale;
+ ry *= radiiScale;
+ }
+
+ pointTransform.setScale(1 / rx, 1 / ry);
+ pointTransform.preRotate(-angle);
+
+ SkPoint unitPts[2];
+ pointTransform.mapPoints(unitPts, srcPts, (int) SK_ARRAY_COUNT(unitPts));
+ SkVector delta = unitPts[1] - unitPts[0];
+
+ SkScalar d = delta.fX * delta.fX + delta.fY * delta.fY;
+ SkScalar scaleFactorSquared = SkTMax(1 / d - 0.25f, 0.f);
+
+ SkScalar scaleFactor = SkScalarSqrt(scaleFactorSquared);
+ if (SkToBool(arcSweep) != SkToBool(arcLarge)) { // flipped from the original implementation
+ scaleFactor = -scaleFactor;
+ }
+ delta.scale(scaleFactor);
+ SkPoint centerPoint = unitPts[0] + unitPts[1];
+ centerPoint *= 0.5f;
+ centerPoint.offset(-delta.fY, delta.fX);
+ unitPts[0] -= centerPoint;
+ unitPts[1] -= centerPoint;
+ SkScalar theta1 = SkScalarATan2(unitPts[0].fY, unitPts[0].fX);
+ SkScalar theta2 = SkScalarATan2(unitPts[1].fY, unitPts[1].fX);
+ SkScalar thetaArc = theta2 - theta1;
+ if (thetaArc < 0 && !arcSweep) { // arcSweep flipped from the original implementation
+ thetaArc += SK_ScalarPI * 2;
+ } else if (thetaArc > 0 && arcSweep) { // arcSweep flipped from the original implementation
+ thetaArc -= SK_ScalarPI * 2;
+ }
+
+ // Very tiny angles cause our subsequent math to go wonky (skbug.com/9272)
+ // so we do a quick check here. The precise tolerance amount is just made up.
+ // PI/million happens to fix the bug in 9272, but a larger value is probably
+ // ok too.
+ if (SkScalarAbs(thetaArc) < (SK_ScalarPI / (1000 * 1000))) {
+ return this->lineTo(x, y);
+ }
+
+ pointTransform.setRotate(angle);
+ pointTransform.preScale(rx, ry);
+
+ // the arc may be slightly bigger than 1/4 circle, so allow up to 1/3rd
+ int segments = SkScalarCeilToInt(SkScalarAbs(thetaArc / (2 * SK_ScalarPI / 3)));
+ SkScalar thetaWidth = thetaArc / segments;
+ SkScalar t = SkScalarTan(0.5f * thetaWidth);
+ if (!SkScalarIsFinite(t)) {
+ return *this;
+ }
+ SkScalar startTheta = theta1;
+ SkScalar w = SkScalarSqrt(SK_ScalarHalf + SkScalarCos(thetaWidth) * SK_ScalarHalf);
+ auto scalar_is_integer = [](SkScalar scalar) -> bool {
+ return scalar == SkScalarFloorToScalar(scalar);
+ };
+ bool expectIntegers = SkScalarNearlyZero(SK_ScalarPI/2 - SkScalarAbs(thetaWidth)) &&
+ scalar_is_integer(rx) && scalar_is_integer(ry) &&
+ scalar_is_integer(x) && scalar_is_integer(y);
+
+ for (int i = 0; i < segments; ++i) {
+ SkScalar endTheta = startTheta + thetaWidth,
+ sinEndTheta = SkScalarSinSnapToZero(endTheta),
+ cosEndTheta = SkScalarCosSnapToZero(endTheta);
+
+ unitPts[1].set(cosEndTheta, sinEndTheta);
+ unitPts[1] += centerPoint;
+ unitPts[0] = unitPts[1];
+ unitPts[0].offset(t * sinEndTheta, -t * cosEndTheta);
+ SkPoint mapped[2];
+ pointTransform.mapPoints(mapped, unitPts, (int) SK_ARRAY_COUNT(unitPts));
+ /*
+ Computing the arc width introduces rounding errors that cause arcs to start
+ outside their marks. A round rect may lose convexity as a result. If the input
+ values are on integers, place the conic on integers as well.
+ */
+ if (expectIntegers) {
+ for (SkPoint& point : mapped) {
+ point.fX = SkScalarRoundToScalar(point.fX);
+ point.fY = SkScalarRoundToScalar(point.fY);
+ }
+ }
+ this->conicTo(mapped[0], mapped[1], w);
+ startTheta = endTheta;
+ }
+ return *this;
+}
+
+SkPath& SkPath::rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, SkPath::ArcSize largeArc,
+ SkPath::Direction sweep, SkScalar dx, SkScalar dy) {
+ SkPoint currentPoint;
+ this->getLastPt(&currentPoint);
+ return this->arcTo(rx, ry, xAxisRotate, largeArc, sweep,
+ currentPoint.fX + dx, currentPoint.fY + dy);
+}
+
+SkPath& SkPath::addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle) {
+ if (oval.isEmpty() || 0 == sweepAngle) {
+ return *this;
+ }
+
+ const SkScalar kFullCircleAngle = SkIntToScalar(360);
+
+ if (sweepAngle >= kFullCircleAngle || sweepAngle <= -kFullCircleAngle) {
+ // We can treat the arc as an oval if it begins at one of our legal starting positions.
+ // See SkPath::addOval() docs.
+ SkScalar startOver90 = startAngle / 90.f;
+ SkScalar startOver90I = SkScalarRoundToScalar(startOver90);
+ SkScalar error = startOver90 - startOver90I;
+ if (SkScalarNearlyEqual(error, 0)) {
+ // Index 1 is at startAngle == 0.
+ SkScalar startIndex = std::fmod(startOver90I + 1.f, 4.f);
+ startIndex = startIndex < 0 ? startIndex + 4.f : startIndex;
+ return this->addOval(oval, sweepAngle > 0 ? kCW_Direction : kCCW_Direction,
+ (unsigned) startIndex);
+ }
+ }
+ return this->arcTo(oval, startAngle, sweepAngle, true);
+}
+
+/*
+ Need to handle the case when the angle is sharp, and our computed end-points
+ for the arc go behind pt1 and/or p2...
+*/
+SkPath& SkPath::arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius) {
+ if (radius == 0) {
+ return this->lineTo(x1, y1);
+ }
+
+ // need to know our prev pt so we can construct tangent vectors
+ SkPoint start;
+ this->getLastPt(&start);
+
+ // need double precision for these calcs.
+ SkDVector befored, afterd;
+ befored.set({x1 - start.fX, y1 - start.fY}).normalize();
+ afterd.set({x2 - x1, y2 - y1}).normalize();
+ double cosh = befored.dot(afterd);
+ double sinh = befored.cross(afterd);
+
+ if (!befored.isFinite() || !afterd.isFinite() || SkScalarNearlyZero(SkDoubleToScalar(sinh))) {
+ return this->lineTo(x1, y1);
+ }
+
+ // safe to convert back to floats now
+ SkVector before = befored.asSkVector();
+ SkVector after = afterd.asSkVector();
+ SkScalar dist = SkScalarAbs(SkDoubleToScalar(radius * (1 - cosh) / sinh));
+ SkScalar xx = x1 - dist * before.fX;
+ SkScalar yy = y1 - dist * before.fY;
+ after.setLength(dist);
+ this->lineTo(xx, yy);
+ SkScalar weight = SkScalarSqrt(SkDoubleToScalar(SK_ScalarHalf + cosh * 0.5));
+ return this->conicTo(x1, y1, x1 + after.fX, y1 + after.fY, weight);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath& SkPath::addPath(const SkPath& path, SkScalar dx, SkScalar dy, AddPathMode mode) {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ return this->addPath(path, matrix, mode);
+}
+
+SkPath& SkPath::addPath(const SkPath& srcPath, const SkMatrix& matrix, AddPathMode mode) {
+ // Detect if we're trying to add ourself
+ const SkPath* src = &srcPath;
+ SkTLazy<SkPath> tmp;
+ if (this == src) {
+ src = tmp.set(srcPath);
+ }
+
+ SkPathRef::Editor(&fPathRef, src->countVerbs(), src->countPoints());
+
+ RawIter iter(*src);
+ SkPoint pts[4];
+ Verb verb;
+
+ SkMatrixPriv::MapPtsProc proc = SkMatrixPriv::GetMapPtsProc(matrix);
+ bool firstVerb = true;
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ proc(matrix, &pts[0], &pts[0], 1);
+ if (firstVerb && mode == kExtend_AddPathMode && !isEmpty()) {
+ injectMoveToIfNeeded(); // In case last contour is closed
+ SkPoint lastPt;
+ // don't add lineTo if it is degenerate
+ if (fLastMoveToIndex < 0 || !this->getLastPt(&lastPt) || lastPt != pts[0]) {
+ this->lineTo(pts[0]);
+ }
+ } else {
+ this->moveTo(pts[0]);
+ }
+ break;
+ case kLine_Verb:
+ proc(matrix, &pts[1], &pts[1], 1);
+ this->lineTo(pts[1]);
+ break;
+ case kQuad_Verb:
+ proc(matrix, &pts[1], &pts[1], 2);
+ this->quadTo(pts[1], pts[2]);
+ break;
+ case kConic_Verb:
+ proc(matrix, &pts[1], &pts[1], 2);
+ this->conicTo(pts[1], pts[2], iter.conicWeight());
+ break;
+ case kCubic_Verb:
+ proc(matrix, &pts[1], &pts[1], 3);
+ this->cubicTo(pts[1], pts[2], pts[3]);
+ break;
+ case kClose_Verb:
+ this->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ firstVerb = false;
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int pts_in_verb(unsigned verb) {
+ static const uint8_t gPtsInVerb[] = {
+ 1, // kMove
+ 1, // kLine
+ 2, // kQuad
+ 2, // kConic
+ 3, // kCubic
+ 0, // kClose
+ 0 // kDone
+ };
+
+ SkASSERT(verb < SK_ARRAY_COUNT(gPtsInVerb));
+ return gPtsInVerb[verb];
+}
+
+// ignore the last point of the 1st contour
+SkPath& SkPath::reversePathTo(const SkPath& path) {
+ if (path.fPathRef->fVerbs.count() == 0) {
+ return *this;
+ }
+
+ const uint8_t* verbs = path.fPathRef->verbsEnd();
+ const uint8_t* verbsBegin = path.fPathRef->verbsBegin();
+ SkASSERT(verbsBegin[0] == kMove_Verb);
+ const SkPoint* pts = path.fPathRef->pointsEnd() - 1;
+ const SkScalar* conicWeights = path.fPathRef->conicWeightsEnd();
+
+ while (verbs > verbsBegin) {
+ uint8_t v = *--verbs;
+ pts -= pts_in_verb(v);
+ switch (v) {
+ case kMove_Verb:
+ // if the path has multiple contours, stop after reversing the last
+ return *this;
+ case kLine_Verb:
+ this->lineTo(pts[0]);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case kClose_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ break;
+ }
+ }
+ return *this;
+}
+
+SkPath& SkPath::reverseAddPath(const SkPath& srcPath) {
+ // Detect if we're trying to add ourself
+ const SkPath* src = &srcPath;
+ SkTLazy<SkPath> tmp;
+ if (this == src) {
+ src = tmp.set(srcPath);
+ }
+
+ SkPathRef::Editor ed(&fPathRef, src->countVerbs(), src->countPoints());
+
+ const uint8_t* verbsBegin = src->fPathRef->verbsBegin();
+ const uint8_t* verbs = src->fPathRef->verbsEnd();
+ const SkPoint* pts = src->fPathRef->pointsEnd();
+ const SkScalar* conicWeights = src->fPathRef->conicWeightsEnd();
+
+ bool needMove = true;
+ bool needClose = false;
+ while (verbs > verbsBegin) {
+ uint8_t v = *--verbs;
+ int n = pts_in_verb(v);
+
+ if (needMove) {
+ --pts;
+ this->moveTo(pts->fX, pts->fY);
+ needMove = false;
+ }
+ pts -= n;
+ switch (v) {
+ case kMove_Verb:
+ if (needClose) {
+ this->close();
+ needClose = false;
+ }
+ needMove = true;
+ pts += 1; // so we see the point in "if (needMove)" above
+ break;
+ case kLine_Verb:
+ this->lineTo(pts[0]);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case kClose_Verb:
+ needClose = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ }
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPath::offset(SkScalar dx, SkScalar dy, SkPath* dst) const {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ this->transform(matrix, dst);
+}
+
+static void subdivide_cubic_to(SkPath* path, const SkPoint pts[4],
+ int level = 2) {
+ if (--level >= 0) {
+ SkPoint tmp[7];
+
+ SkChopCubicAtHalf(pts, tmp);
+ subdivide_cubic_to(path, &tmp[0], level);
+ subdivide_cubic_to(path, &tmp[3], level);
+ } else {
+ path->cubicTo(pts[1], pts[2], pts[3]);
+ }
+}
+
+void SkPath::transform(const SkMatrix& matrix, SkPath* dst) const {
+ if (matrix.isIdentity()) {
+ if (dst != nullptr && dst != this) {
+ *dst = *this;
+ }
+ return;
+ }
+
+ SkDEBUGCODE(this->validate();)
+ if (dst == nullptr) {
+ dst = (SkPath*)this;
+ }
+
+ if (matrix.hasPerspective()) {
+ SkPath tmp;
+ tmp.fFillType = fFillType;
+
+ SkPath::Iter iter(*this, false);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ tmp.moveTo(pts[0]);
+ break;
+ case kLine_Verb:
+ tmp.lineTo(pts[1]);
+ break;
+ case kQuad_Verb:
+ // promote the quad to a conic
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, SK_Scalar1, matrix));
+ break;
+ case kConic_Verb:
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, iter.conicWeight(), matrix));
+ break;
+ case kCubic_Verb:
+ subdivide_cubic_to(&tmp, pts);
+ break;
+ case kClose_Verb:
+ tmp.close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+
+ dst->swap(tmp);
+ SkPathRef::Editor ed(&dst->fPathRef);
+ matrix.mapPoints(ed.writablePoints(), ed.pathRef()->countPoints());
+ dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+ } else {
+ Convexity convexity = this->getConvexityOrUnknown();
+
+ SkPathRef::CreateTransformedCopy(&dst->fPathRef, *fPathRef.get(), matrix);
+
+ if (this != dst) {
+ dst->fLastMoveToIndex = fLastMoveToIndex;
+ dst->fFillType = fFillType;
+ dst->fIsVolatile = fIsVolatile;
+ }
+
+ // Due to finite/fragile float numerics, we can't assume that a convex path remains
+ // convex after a transformation, so mark it as unknown here.
+ // However, some transformations are thought to be safe:
+ // axis-aligned values under scale/translate.
+ //
+ // See skbug.com/8606
+ // If we can land a robust convex scan-converter, we may be able to relax/remove this
+ // check, and keep convex paths marked as such after a general transform...
+ //
+ if (matrix.isScaleTranslate() && SkPathPriv::IsAxisAligned(*this)) {
+ dst->setConvexity(convexity);
+ } else {
+ dst->setConvexity(kUnknown_Convexity);
+ }
+
+ if (this->getFirstDirection() == SkPathPriv::kUnknown_FirstDirection) {
+ dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+ } else {
+ SkScalar det2x2 =
+ matrix.get(SkMatrix::kMScaleX) * matrix.get(SkMatrix::kMScaleY) -
+ matrix.get(SkMatrix::kMSkewX) * matrix.get(SkMatrix::kMSkewY);
+ if (det2x2 < 0) {
+ dst->setFirstDirection(
+ SkPathPriv::OppositeFirstDirection(
+ (SkPathPriv::FirstDirection)this->getFirstDirection()));
+ } else if (det2x2 > 0) {
+ dst->setFirstDirection(this->getFirstDirection());
+ } else {
+ dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+ }
+ }
+
+ SkDEBUGCODE(dst->validate();)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+ fMoveTo.fX = fMoveTo.fY = fLastPt.fX = fLastPt.fY = 0;
+ fForceClose = fCloseLine = false;
+ fSegmentState = kEmptyContour_SegmentState;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+ fNeedClose = false;
+}
+
+SkPath::Iter::Iter(const SkPath& path, bool forceClose) {
+ this->setPath(path, forceClose);
+}
+
+void SkPath::Iter::setPath(const SkPath& path, bool forceClose) {
+ fPts = path.fPathRef->points();
+ fVerbs = path.fPathRef->verbsBegin();
+ fVerbStop = path.fPathRef->verbsEnd();
+ fConicWeights = path.fPathRef->conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+ fLastPt.fX = fLastPt.fY = 0;
+ fMoveTo.fX = fMoveTo.fY = 0;
+ fForceClose = SkToU8(forceClose);
+ fNeedClose = false;
+ fSegmentState = kEmptyContour_SegmentState;
+}
+
+bool SkPath::Iter::isClosedContour() const {
+ if (fVerbs == nullptr || fVerbs == fVerbStop) {
+ return false;
+ }
+ if (fForceClose) {
+ return true;
+ }
+
+ const uint8_t* verbs = fVerbs;
+ const uint8_t* stop = fVerbStop;
+
+ if (kMove_Verb == *verbs) {
+ verbs += 1; // skip the initial moveto
+ }
+
+ while (verbs < stop) {
+ // verbs points one beyond the current verb, decrement first.
+ unsigned v = *verbs++;
+ if (kMove_Verb == v) {
+ break;
+ }
+ if (kClose_Verb == v) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkPath::Verb SkPath::Iter::autoClose(SkPoint pts[2]) {
+ SkASSERT(pts);
+ if (fLastPt != fMoveTo) {
+ // A special case: if both points are NaN, SkPoint::operation== returns
+ // false, but the iterator expects that they are treated as the same.
+ // (consider SkPoint is a 2-dimension float point).
+ if (SkScalarIsNaN(fLastPt.fX) || SkScalarIsNaN(fLastPt.fY) ||
+ SkScalarIsNaN(fMoveTo.fX) || SkScalarIsNaN(fMoveTo.fY)) {
+ return kClose_Verb;
+ }
+
+ pts[0] = fLastPt;
+ pts[1] = fMoveTo;
+ fLastPt = fMoveTo;
+ fCloseLine = true;
+ return kLine_Verb;
+ } else {
+ pts[0] = fMoveTo;
+ return kClose_Verb;
+ }
+}
+
+const SkPoint& SkPath::Iter::cons_moveTo() {
+ if (fSegmentState == kAfterMove_SegmentState) {
+ // Set the first return pt to the move pt
+ fSegmentState = kAfterPrimitive_SegmentState;
+ return fMoveTo;
+ }
+
+ SkASSERT(fSegmentState == kAfterPrimitive_SegmentState);
+ // Set the first return pt to the last pt of the previous primitive.
+ return fPts[-1];
+}
+
+SkPath::Verb SkPath::Iter::next(SkPoint ptsParam[4]) {
+ SkASSERT(ptsParam);
+
+ if (fVerbs == fVerbStop) {
+ // Close the curve if requested and if there is some curve to close
+ if (fNeedClose && fSegmentState == kAfterPrimitive_SegmentState) {
+ if (kLine_Verb == this->autoClose(ptsParam)) {
+ return kLine_Verb;
+ }
+ fNeedClose = false;
+ return kClose_Verb;
+ }
+ return kDone_Verb;
+ }
+
+ unsigned verb = *fVerbs++;
+ const SkPoint* SK_RESTRICT srcPts = fPts;
+ SkPoint* SK_RESTRICT pts = ptsParam;
+
+ switch (verb) {
+ case kMove_Verb:
+ if (fNeedClose) {
+ fVerbs--; // move back one verb
+ verb = this->autoClose(pts);
+ if (verb == kClose_Verb) {
+ fNeedClose = false;
+ }
+ return (Verb)verb;
+ }
+ if (fVerbs == fVerbStop) { // might be a trailing moveto
+ return kDone_Verb;
+ }
+ fMoveTo = *srcPts;
+ pts[0] = *srcPts;
+ srcPts += 1;
+ fSegmentState = kAfterMove_SegmentState;
+ fLastPt = fMoveTo;
+ fNeedClose = fForceClose;
+ break;
+ case kLine_Verb:
+ pts[0] = this->cons_moveTo();
+ pts[1] = srcPts[0];
+ fLastPt = srcPts[0];
+ fCloseLine = false;
+ srcPts += 1;
+ break;
+ case kConic_Verb:
+ fConicWeights += 1;
+ // fall-through
+ case kQuad_Verb:
+ pts[0] = this->cons_moveTo();
+ memcpy(&pts[1], srcPts, 2 * sizeof(SkPoint));
+ fLastPt = srcPts[1];
+ srcPts += 2;
+ break;
+ case kCubic_Verb:
+ pts[0] = this->cons_moveTo();
+ memcpy(&pts[1], srcPts, 3 * sizeof(SkPoint));
+ fLastPt = srcPts[2];
+ srcPts += 3;
+ break;
+ case kClose_Verb:
+ verb = this->autoClose(pts);
+ if (verb == kLine_Verb) {
+ fVerbs--; // move back one verb
+ } else {
+ fNeedClose = false;
+ fSegmentState = kEmptyContour_SegmentState;
+ }
+ fLastPt = fMoveTo;
+ break;
+ }
+ fPts = srcPts;
+ return (Verb)verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "src/core/SkStringUtils.h"
+
+static void append_params(SkString* str, const char label[], const SkPoint pts[],
+ int count, SkScalarAsStringType strType, SkScalar conicWeight = -12345) {
+ str->append(label);
+ str->append("(");
+
+ const SkScalar* values = &pts[0].fX;
+ count *= 2;
+
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalar(str, values[i], strType);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight != -12345) {
+ str->append(", ");
+ SkAppendScalar(str, conicWeight, strType);
+ }
+ str->append(");");
+ if (kHex_SkScalarAsStringType == strType) {
+ str->append(" // ");
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalarDec(str, values[i]);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight >= 0) {
+ str->append(", ");
+ SkAppendScalarDec(str, conicWeight);
+ }
+ }
+ str->append("\n");
+}
+
+void SkPath::dump(SkWStream* wStream, bool forceClose, bool dumpAsHex) const {
+ SkScalarAsStringType asType = dumpAsHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+ Iter iter(*this, forceClose);
+ SkPoint pts[4];
+ Verb verb;
+
+ SkString builder;
+ char const * const gFillTypeStrs[] = {
+ "Winding",
+ "EvenOdd",
+ "InverseWinding",
+ "InverseEvenOdd",
+ };
+ builder.printf("path.setFillType(SkPath::k%s_FillType);\n",
+ gFillTypeStrs[(int) this->getFillType()]);
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ append_params(&builder, "path.moveTo", &pts[0], 1, asType);
+ break;
+ case kLine_Verb:
+ append_params(&builder, "path.lineTo", &pts[1], 1, asType);
+ break;
+ case kQuad_Verb:
+ append_params(&builder, "path.quadTo", &pts[1], 2, asType);
+ break;
+ case kConic_Verb:
+ append_params(&builder, "path.conicTo", &pts[1], 2, asType, iter.conicWeight());
+ break;
+ case kCubic_Verb:
+ append_params(&builder, "path.cubicTo", &pts[1], 3, asType);
+ break;
+ case kClose_Verb:
+ builder.append("path.close();\n");
+ break;
+ default:
+ SkDebugf(" path: UNKNOWN VERB %d, aborting dump...\n", verb);
+ verb = kDone_Verb; // stop the loop
+ break;
+ }
+ if (!wStream && builder.size()) {
+ SkDebugf("%s", builder.c_str());
+ builder.reset();
+ }
+ }
+ if (wStream) {
+ wStream->writeText(builder.c_str());
+ }
+}
+
+void SkPath::dump() const {
+ this->dump(nullptr, false, false);
+}
+
+void SkPath::dumpHex() const {
+ this->dump(nullptr, false, true);
+}
+
+
+bool SkPath::isValidImpl() const {
+ if ((fFillType & ~3) != 0) {
+ return false;
+ }
+
+#ifdef SK_DEBUG_PATH
+ if (!fBoundsIsDirty) {
+ SkRect bounds;
+
+ bool isFinite = compute_pt_bounds(&bounds, *fPathRef.get());
+ if (SkToBool(fIsFinite) != isFinite) {
+ return false;
+ }
+
+ if (fPathRef->countPoints() <= 1) {
+ // if we're empty, fBounds may be empty but translated, so we can't
+ // necessarily compare to bounds directly
+ // try path.addOval(2, 2, 2, 2) which is empty, but the bounds will
+ // be [2, 2, 2, 2]
+ if (!bounds.isEmpty() || !fBounds.isEmpty()) {
+ return false;
+ }
+ } else {
+ if (bounds.isEmpty()) {
+ if (!fBounds.isEmpty()) {
+ return false;
+ }
+ } else {
+ if (!fBounds.isEmpty()) {
+ if (!fBounds.contains(bounds)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+#endif // SK_DEBUG_PATH
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_LEGACY_PATH_CONVEXITY // for rebaselining Chrome
+
+static int sign(SkScalar x) { return x < 0; }
+#define kValueNeverReturnedBySign 2
+
+enum DirChange {
+ kLeft_DirChange,
+ kRight_DirChange,
+ kStraight_DirChange,
+ kBackwards_DirChange,
+
+ kInvalid_DirChange
+};
+
+
+static bool almost_equal(SkScalar compA, SkScalar compB) {
+ // The error epsilon was empirically derived; worse case round rects
+ // with a mid point outset by 2x float epsilon in tests had an error
+ // of 12.
+ const int epsilon = 16;
+ if (!SkScalarIsFinite(compA) || !SkScalarIsFinite(compB)) {
+ return false;
+ }
+ // no need to check for small numbers because SkPath::Iter has removed degenerate values
+ int aBits = SkFloatAs2sCompliment(compA);
+ int bBits = SkFloatAs2sCompliment(compB);
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+// only valid for a single contour
+struct Convexicator {
+ Convexicator()
+ : fPtCount(0)
+ , fConvexity(SkPath::kConvex_Convexity)
+ , fFirstDirection(SkPathPriv::kUnknown_FirstDirection)
+ , fIsFinite(true)
+ , fIsCurve(false)
+ , fBackwards(false) {
+ fExpectedDir = kInvalid_DirChange;
+ // warnings
+ fPriorPt.set(0,0);
+ fLastPt.set(0, 0);
+ fCurrPt.set(0, 0);
+ fLastVec.set(0, 0);
+ fFirstVec.set(0, 0);
+
+ fDx = fDy = 0;
+ fSx = fSy = kValueNeverReturnedBySign;
+ }
+
+ SkPath::Convexity getConvexity() const { return fConvexity; }
+
+ /** The direction returned is only valid if the path is determined convex */
+ SkPathPriv::FirstDirection getFirstDirection() const { return fFirstDirection; }
+
+ void addPt(const SkPoint& pt) {
+ if (SkPath::kConcave_Convexity == fConvexity || !fIsFinite) {
+ return;
+ }
+
+ if (0 == fPtCount) {
+ fCurrPt = pt;
+ ++fPtCount;
+ } else {
+ SkVector vec = pt - fCurrPt;
+ SkScalar lengthSqd = SkPointPriv::LengthSqd(vec);
+ if (!SkScalarIsFinite(lengthSqd)) {
+ fIsFinite = false;
+ } else if (lengthSqd) {
+ fPriorPt = fLastPt;
+ fLastPt = fCurrPt;
+ fCurrPt = pt;
+ if (++fPtCount == 2) {
+ fFirstVec = fLastVec = vec;
+ } else {
+ SkASSERT(fPtCount > 2);
+ this->addVec(vec);
+ }
+
+ int sx = sign(vec.fX);
+ int sy = sign(vec.fY);
+ fDx += (sx != fSx);
+ fDy += (sy != fSy);
+ fSx = sx;
+ fSy = sy;
+
+ if (fDx > 3 || fDy > 3) {
+ fConvexity = SkPath::kConcave_Convexity;
+ }
+ }
+ }
+ }
+
+ void close() {
+ if (fPtCount > 2) {
+ this->addVec(fFirstVec);
+ }
+ }
+
+ DirChange directionChange(const SkVector& curVec) {
+ SkScalar cross = SkPoint::CrossProduct(fLastVec, curVec);
+
+ SkScalar smallest = SkTMin(fCurrPt.fX, SkTMin(fCurrPt.fY, SkTMin(fLastPt.fX, fLastPt.fY)));
+ SkScalar largest = SkTMax(fCurrPt.fX, SkTMax(fCurrPt.fY, SkTMax(fLastPt.fX, fLastPt.fY)));
+ largest = SkTMax(largest, -smallest);
+
+ if (!almost_equal(largest, largest + cross)) {
+ int sign = SkScalarSignAsInt(cross);
+ if (sign) {
+ return (1 == sign) ? kRight_DirChange : kLeft_DirChange;
+ }
+ }
+
+ if (cross) {
+ double dLastVecX = SkScalarToDouble(fLastPt.fX) - SkScalarToDouble(fPriorPt.fX);
+ double dLastVecY = SkScalarToDouble(fLastPt.fY) - SkScalarToDouble(fPriorPt.fY);
+ double dCurrVecX = SkScalarToDouble(fCurrPt.fX) - SkScalarToDouble(fLastPt.fX);
+ double dCurrVecY = SkScalarToDouble(fCurrPt.fY) - SkScalarToDouble(fLastPt.fY);
+ double dCross = dLastVecX * dCurrVecY - dLastVecY * dCurrVecX;
+ if (!approximately_zero_when_compared_to(dCross, SkScalarToDouble(largest))) {
+ int sign = SkScalarSignAsInt(SkDoubleToScalar(dCross));
+ if (sign) {
+ return (1 == sign) ? kRight_DirChange : kLeft_DirChange;
+ }
+ }
+ }
+
+ if (!SkScalarNearlyZero(SkPointPriv::LengthSqd(fLastVec),
+ SK_ScalarNearlyZero*SK_ScalarNearlyZero) &&
+ !SkScalarNearlyZero(SkPointPriv::LengthSqd(curVec),
+ SK_ScalarNearlyZero*SK_ScalarNearlyZero) &&
+ fLastVec.dot(curVec) < 0.0f) {
+ return kBackwards_DirChange;
+ }
+
+ return kStraight_DirChange;
+ }
+
+ bool hasBackwards() const {
+ return fBackwards;
+ }
+
+ bool isFinite() const {
+ return fIsFinite;
+ }
+
+ void setCurve(bool isCurve) {
+ fIsCurve = isCurve;
+ }
+
+private:
+ void addVec(const SkVector& vec) {
+ SkASSERT(vec.fX || vec.fY);
+ DirChange dir = this->directionChange(vec);
+ switch (dir) {
+ case kLeft_DirChange: // fall through
+ case kRight_DirChange:
+ if (kInvalid_DirChange == fExpectedDir) {
+ fExpectedDir = dir;
+ fFirstDirection = (kRight_DirChange == dir) ? SkPathPriv::kCW_FirstDirection
+ : SkPathPriv::kCCW_FirstDirection;
+ } else if (dir != fExpectedDir) {
+ fConvexity = SkPath::kConcave_Convexity;
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ }
+ fLastVec = vec;
+ break;
+ case kStraight_DirChange:
+ break;
+ case kBackwards_DirChange:
+ if (fIsCurve) {
+ // If any of the subsequent dir is non-backward, it'll be concave.
+ // Otherwise, it's still convex.
+ fExpectedDir = dir;
+ }
+ fLastVec = vec;
+ fBackwards = true;
+ break;
+ case kInvalid_DirChange:
+ SK_ABORT("Use of invalid direction change flag");
+ break;
+ }
+ }
+
+ SkPoint fPriorPt;
+ SkPoint fLastPt;
+ SkPoint fCurrPt;
+ // fLastVec does not necessarily start at fLastPt. We only advance it when the cross product
+ // value with the current vec is deemed to be of a significant value.
+ SkVector fLastVec, fFirstVec;
+ int fPtCount; // non-degenerate points
+ DirChange fExpectedDir;
+ SkPath::Convexity fConvexity;
+ SkPathPriv::FirstDirection fFirstDirection;
+ int fDx, fDy, fSx, fSy;
+ bool fIsFinite;
+ bool fIsCurve;
+ bool fBackwards;
+};
+
+SkPath::Convexity SkPath::internalGetConvexity() const {
+ // Sometimes we think we need to calculate convexity but another thread already did.
+ auto c = this->getConvexityOrUnknown();
+ if (c != kUnknown_Convexity) {
+ return c;
+ }
+
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ SkPath::Iter iter(*this, true);
+
+ int contourCount = 0;
+ int count;
+ Convexicator state;
+
+ if (!isFinite()) {
+ return kUnknown_Convexity;
+ }
+ while ((verb = iter.next(pts, false, false)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ if (++contourCount > 1) {
+ this->setConvexity(kConcave_Convexity);
+ return kConcave_Convexity;
+ }
+ pts[1] = pts[0];
+ // fall through
+ case kLine_Verb:
+ count = 1;
+ state.setCurve(false);
+ break;
+ case kQuad_Verb:
+ // fall through
+ case kConic_Verb:
+ // fall through
+ case kCubic_Verb:
+ count = 2 + (kCubic_Verb == verb);
+ // As an additional enhancement, this could set curve true only
+ // if the curve is nonlinear
+ state.setCurve(true);
+ break;
+ case kClose_Verb:
+ state.setCurve(false);
+ state.close();
+ count = 0;
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ this->setConvexity(kConcave_Convexity);
+ return kConcave_Convexity;
+ }
+
+ for (int i = 1; i <= count; i++) {
+ state.addPt(pts[i]);
+ }
+ // early exit
+ if (!state.isFinite()) {
+ return kUnknown_Convexity;
+ }
+ if (kConcave_Convexity == state.getConvexity()) {
+ this->setConvexity(kConcave_Convexity);
+ return kConcave_Convexity;
+ }
+ }
+ this->setConvexity(state.getConvexity());
+
+ if (this->getConvexityOrUnknown() == kConvex_Convexity &&
+ this->getFirstDirection() == SkPathPriv::kUnknown_FirstDirection) {
+
+ if (state.getFirstDirection() == SkPathPriv::kUnknown_FirstDirection
+ && !this->getBounds().isEmpty()
+ && !state.hasBackwards()) {
+ this->setConvexity(Convexity::kConcave_Convexity);
+ } else {
+ this->setFirstDirection(state.getFirstDirection());
+ }
+ }
+ return this->getConvexityOrUnknown();
+}
+
+#else
+
+static int sign(SkScalar x) { return x < 0; }
+#define kValueNeverReturnedBySign 2
+
+enum DirChange {
+ kUnknown_DirChange,
+ kLeft_DirChange,
+ kRight_DirChange,
+ kStraight_DirChange,
+ kBackwards_DirChange, // if double back, allow simple lines to be convex
+ kInvalid_DirChange
+};
+
+
+static bool almost_equal(SkScalar compA, SkScalar compB) {
+ // The error epsilon was empirically derived; worse case round rects
+ // with a mid point outset by 2x float epsilon in tests had an error
+ // of 12.
+ const int epsilon = 16;
+ if (!SkScalarIsFinite(compA) || !SkScalarIsFinite(compB)) {
+ return false;
+ }
+ // no need to check for small numbers because SkPath::Iter has removed degenerate values
+ int aBits = SkFloatAs2sCompliment(compA);
+ int bBits = SkFloatAs2sCompliment(compB);
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+// only valid for a single contour
+struct Convexicator {
+
+ /** The direction returned is only valid if the path is determined convex */
+ SkPathPriv::FirstDirection getFirstDirection() const { return fFirstDirection; }
+
+ void setMovePt(const SkPoint& pt) {
+ fPriorPt = fLastPt = fCurrPt = pt;
+ }
+
+ bool addPt(const SkPoint& pt) {
+ if (fCurrPt == pt) {
+ return true;
+ }
+ fCurrPt = pt;
+ if (fPriorPt == fLastPt) { // should only be true for first non-zero vector
+ fLastVec = fCurrPt - fLastPt;
+ fFirstPt = pt;
+ } else if (!this->addVec(fCurrPt - fLastPt)) {
+ return false;
+ }
+ fPriorPt = fLastPt;
+ fLastPt = fCurrPt;
+ return true;
+ }
+
+ static SkPath::Convexity BySign(const SkPoint points[], int count) {
+ const SkPoint* last = points + count;
+ SkPoint currPt = *points++;
+ SkPoint firstPt = currPt;
+ int dxes = 0;
+ int dyes = 0;
+ int lastSx = kValueNeverReturnedBySign;
+ int lastSy = kValueNeverReturnedBySign;
+ for (int outerLoop = 0; outerLoop < 2; ++outerLoop ) {
+ while (points != last) {
+ SkVector vec = *points - currPt;
+ if (!vec.isZero()) {
+ // give up if vector construction failed
+ if (!vec.isFinite()) {
+ return SkPath::kUnknown_Convexity;
+ }
+ int sx = sign(vec.fX);
+ int sy = sign(vec.fY);
+ dxes += (sx != lastSx);
+ dyes += (sy != lastSy);
+ if (dxes > 3 || dyes > 3) {
+ return SkPath::kConcave_Convexity;
+ }
+ lastSx = sx;
+ lastSy = sy;
+ }
+ currPt = *points++;
+ if (outerLoop) {
+ break;
+ }
+ }
+ points = &firstPt;
+ }
+ return SkPath::kConvex_Convexity; // that is, it may be convex, don't know yet
+ }
+
+ bool close() {
+ return this->addPt(fFirstPt);
+ }
+
+ bool isFinite() const {
+ return fIsFinite;
+ }
+
+ int reversals() const {
+ return fReversals;
+ }
+
+private:
+ DirChange directionChange(const SkVector& curVec) {
+ SkScalar cross = SkPoint::CrossProduct(fLastVec, curVec);
+ if (!SkScalarIsFinite(cross)) {
+ return kUnknown_DirChange;
+ }
+ SkScalar smallest = SkTMin(fCurrPt.fX, SkTMin(fCurrPt.fY, SkTMin(fLastPt.fX, fLastPt.fY)));
+ SkScalar largest = SkTMax(fCurrPt.fX, SkTMax(fCurrPt.fY, SkTMax(fLastPt.fX, fLastPt.fY)));
+ largest = SkTMax(largest, -smallest);
+
+ if (almost_equal(largest, largest + cross)) {
+ constexpr SkScalar nearlyZeroSqd = SK_ScalarNearlyZero * SK_ScalarNearlyZero;
+ if (SkScalarNearlyZero(SkPointPriv::LengthSqd(fLastVec), nearlyZeroSqd) ||
+ SkScalarNearlyZero(SkPointPriv::LengthSqd(curVec), nearlyZeroSqd)) {
+ return kUnknown_DirChange;
+ }
+ return fLastVec.dot(curVec) < 0 ? kBackwards_DirChange : kStraight_DirChange;
+ }
+ return 1 == SkScalarSignAsInt(cross) ? kRight_DirChange : kLeft_DirChange;
+ }
+
+ bool addVec(const SkVector& curVec) {
+ DirChange dir = this->directionChange(curVec);
+ switch (dir) {
+ case kLeft_DirChange: // fall through
+ case kRight_DirChange:
+ if (kInvalid_DirChange == fExpectedDir) {
+ fExpectedDir = dir;
+ fFirstDirection = (kRight_DirChange == dir) ? SkPathPriv::kCW_FirstDirection
+ : SkPathPriv::kCCW_FirstDirection;
+ } else if (dir != fExpectedDir) {
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ return false;
+ }
+ fLastVec = curVec;
+ break;
+ case kStraight_DirChange:
+ break;
+ case kBackwards_DirChange:
+ // allow path to reverse direction twice
+ // Given path.moveTo(0, 0); path.lineTo(1, 1);
+ // - 1st reversal: direction change formed by line (0,0 1,1), line (1,1 0,0)
+ // - 2nd reversal: direction change formed by line (1,1 0,0), line (0,0 1,1)
+ fLastVec = curVec;
+ return ++fReversals < 3;
+ case kUnknown_DirChange:
+ return (fIsFinite = false);
+ case kInvalid_DirChange:
+ SK_ABORT("Use of invalid direction change flag");
+ break;
+ }
+ return true;
+ }
+
+ SkPoint fFirstPt {0, 0};
+ SkPoint fPriorPt {0, 0};
+ SkPoint fLastPt {0, 0};
+ SkPoint fCurrPt {0, 0};
+ SkVector fLastVec {0, 0};
+ DirChange fExpectedDir { kInvalid_DirChange };
+ SkPathPriv::FirstDirection fFirstDirection { SkPathPriv::kUnknown_FirstDirection };
+ int fReversals { 0 };
+ bool fIsFinite { true };
+};
+
+SkPath::Convexity SkPath::internalGetConvexity() const {
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ SkPath::Iter iter(*this, true);
+ auto setComputedConvexity = [=](Convexity convexity){
+ SkASSERT(kUnknown_Convexity != convexity);
+ this->setConvexity(convexity);
+ return convexity;
+ };
+
+ // Check to see if path changes direction more than three times as quick concave test
+ int pointCount = this->countPoints();
+ // last moveTo index may exceed point count if data comes from fuzzer (via SkImageFilter)
+ if (0 < fLastMoveToIndex && fLastMoveToIndex < pointCount) {
+ pointCount = fLastMoveToIndex;
+ }
+ if (pointCount > 3) {
+ const SkPoint* points = fPathRef->points();
+ const SkPoint* last = &points[pointCount];
+ // only consider the last of the initial move tos
+ while (SkPath::kMove_Verb == iter.next(pts)) {
+ ++points;
+ }
+ --points;
+ SkPath::Convexity convexity = Convexicator::BySign(points, (int) (last - points));
+ if (SkPath::kConcave_Convexity == convexity) {
+ return setComputedConvexity(SkPath::kConcave_Convexity);
+ } else if (SkPath::kUnknown_Convexity == convexity) {
+ return SkPath::kUnknown_Convexity;
+ }
+ iter.setPath(*this, true);
+ } else if (!this->isFinite()) {
+ return kUnknown_Convexity;
+ }
+
+ int contourCount = 0;
+ int count;
+ Convexicator state;
+ auto setFail = [=](){
+ if (!state.isFinite()) {
+ return SkPath::kUnknown_Convexity;
+ }
+ return setComputedConvexity(SkPath::kConcave_Convexity);
+ };
+
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ if (++contourCount > 1) {
+ return setComputedConvexity(kConcave_Convexity);
+ }
+ state.setMovePt(pts[0]);
+ count = 0;
+ break;
+ case kLine_Verb:
+ count = 1;
+ break;
+ case kQuad_Verb:
+ // fall through
+ case kConic_Verb:
+ count = 2;
+ break;
+ case kCubic_Verb:
+ count = 3;
+ break;
+ case kClose_Verb:
+ if (!state.close()) {
+ return setFail();
+ }
+ count = 0;
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return setComputedConvexity(kConcave_Convexity);
+ }
+ for (int i = 1; i <= count; i++) {
+ if (!state.addPt(pts[i])) {
+ return setFail();
+ }
+ }
+ }
+
+ if (this->getFirstDirection() == SkPathPriv::kUnknown_FirstDirection) {
+ if (state.getFirstDirection() == SkPathPriv::kUnknown_FirstDirection
+ && !this->getBounds().isEmpty()) {
+ return setComputedConvexity(state.reversals() < 3 ?
+ kConvex_Convexity : kConcave_Convexity);
+ }
+ this->setFirstDirection(state.getFirstDirection());
+ }
+ return setComputedConvexity(kConvex_Convexity);
+}
+
+bool SkPathPriv::IsConvex(const SkPoint points[], int count) {
+ SkPath::Convexity convexity = Convexicator::BySign(points, count);
+ if (SkPath::kConvex_Convexity != convexity) {
+ return false;
+ }
+ Convexicator state;
+ state.setMovePt(points[0]);
+ for (int i = 1; i < count; i++) {
+ if (!state.addPt(points[i])) {
+ return false;
+ }
+ }
+ if (!state.addPt(points[0])) {
+ return false;
+ }
+ if (!state.close()) {
+ return false;
+ }
+ return state.getFirstDirection() != SkPathPriv::kUnknown_FirstDirection
+ || state.reversals() < 3;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ContourIter {
+public:
+ ContourIter(const SkPathRef& pathRef);
+
+ bool done() const { return fDone; }
+ // if !done() then these may be called
+ int count() const { return fCurrPtCount; }
+ const SkPoint* pts() const { return fCurrPt; }
+ void next();
+
+private:
+ int fCurrPtCount;
+ const SkPoint* fCurrPt;
+ const uint8_t* fCurrVerb;
+ const uint8_t* fStopVerbs;
+ const SkScalar* fCurrConicWeight;
+ bool fDone;
+ SkDEBUGCODE(int fContourCounter;)
+};
+
+ContourIter::ContourIter(const SkPathRef& pathRef) {
+ fStopVerbs = pathRef.verbsEnd();
+ fDone = false;
+ fCurrPt = pathRef.points();
+ fCurrVerb = pathRef.verbsBegin();
+ fCurrConicWeight = pathRef.conicWeights();
+ fCurrPtCount = 0;
+ SkDEBUGCODE(fContourCounter = 0;)
+ this->next();
+}
+
+void ContourIter::next() {
+ if (fCurrVerb >= fStopVerbs) {
+ fDone = true;
+ }
+ if (fDone) {
+ return;
+ }
+
+ // skip pts of prev contour
+ fCurrPt += fCurrPtCount;
+
+ SkASSERT(SkPath::kMove_Verb == fCurrVerb[0]);
+ int ptCount = 1; // moveTo
+ const uint8_t* verbs = fCurrVerb;
+
+ for (verbs++; verbs < fStopVerbs; verbs++) {
+ switch (*verbs) {
+ case SkPath::kMove_Verb:
+ goto CONTOUR_END;
+ case SkPath::kLine_Verb:
+ ptCount += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fCurrConicWeight += 1;
+ // fall-through
+ case SkPath::kQuad_Verb:
+ ptCount += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ ptCount += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+CONTOUR_END:
+ fCurrPtCount = ptCount;
+ fCurrVerb = verbs;
+ SkDEBUGCODE(++fContourCounter;)
+}
+
+// returns cross product of (p1 - p0) and (p2 - p0)
+static SkScalar cross_prod(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ SkScalar cross = SkPoint::CrossProduct(p1 - p0, p2 - p0);
+ // We may get 0 when the above subtracts underflow. We expect this to be
+ // very rare and lazily promote to double.
+ if (0 == cross) {
+ double p0x = SkScalarToDouble(p0.fX);
+ double p0y = SkScalarToDouble(p0.fY);
+
+ double p1x = SkScalarToDouble(p1.fX);
+ double p1y = SkScalarToDouble(p1.fY);
+
+ double p2x = SkScalarToDouble(p2.fX);
+ double p2y = SkScalarToDouble(p2.fY);
+
+ cross = SkDoubleToScalar((p1x - p0x) * (p2y - p0y) -
+ (p1y - p0y) * (p2x - p0x));
+
+ }
+ return cross;
+}
+
+// Returns the first pt with the maximum Y coordinate
+static int find_max_y(const SkPoint pts[], int count) {
+ SkASSERT(count > 0);
+ SkScalar max = pts[0].fY;
+ int firstIndex = 0;
+ for (int i = 1; i < count; ++i) {
+ SkScalar y = pts[i].fY;
+ if (y > max) {
+ max = y;
+ firstIndex = i;
+ }
+ }
+ return firstIndex;
+}
+
+static int find_diff_pt(const SkPoint pts[], int index, int n, int inc) {
+ int i = index;
+ for (;;) {
+ i = (i + inc) % n;
+ if (i == index) { // we wrapped around, so abort
+ break;
+ }
+ if (pts[index] != pts[i]) { // found a different point, success!
+ break;
+ }
+ }
+ return i;
+}
+
+/**
+ * Starting at index, and moving forward (incrementing), find the xmin and
+ * xmax of the contiguous points that have the same Y.
+ */
+static int find_min_max_x_at_y(const SkPoint pts[], int index, int n,
+ int* maxIndexPtr) {
+ const SkScalar y = pts[index].fY;
+ SkScalar min = pts[index].fX;
+ SkScalar max = min;
+ int minIndex = index;
+ int maxIndex = index;
+ for (int i = index + 1; i < n; ++i) {
+ if (pts[i].fY != y) {
+ break;
+ }
+ SkScalar x = pts[i].fX;
+ if (x < min) {
+ min = x;
+ minIndex = i;
+ } else if (x > max) {
+ max = x;
+ maxIndex = i;
+ }
+ }
+ *maxIndexPtr = maxIndex;
+ return minIndex;
+}
+
+static void crossToDir(SkScalar cross, SkPathPriv::FirstDirection* dir) {
+ *dir = cross > 0 ? SkPathPriv::kCW_FirstDirection : SkPathPriv::kCCW_FirstDirection;
+}
+
+/*
+ * We loop through all contours, and keep the computed cross-product of the
+ * contour that contained the global y-max. If we just look at the first
+ * contour, we may find one that is wound the opposite way (correctly) since
+ * it is the interior of a hole (e.g. 'o'). Thus we must find the contour
+ * that is outer most (or at least has the global y-max) before we can consider
+ * its cross product.
+ */
+bool SkPathPriv::CheapComputeFirstDirection(const SkPath& path, FirstDirection* dir) {
+ auto d = path.getFirstDirection();
+ if (d != kUnknown_FirstDirection) {
+ *dir = static_cast<FirstDirection>(d);
+ return true;
+ }
+
+ // We don't want to pay the cost for computing convexity if it is unknown,
+ // so we call getConvexityOrUnknown() instead of isConvex().
+ if (path.getConvexityOrUnknown() == SkPath::kConvex_Convexity) {
+ SkASSERT(path.getFirstDirection() == kUnknown_FirstDirection);
+ *dir = static_cast<FirstDirection>(path.getFirstDirection());
+ return false;
+ }
+
+ ContourIter iter(*path.fPathRef.get());
+
+ // initialize with our logical y-min
+ SkScalar ymax = path.getBounds().fTop;
+ SkScalar ymaxCross = 0;
+
+ for (; !iter.done(); iter.next()) {
+ int n = iter.count();
+ if (n < 3) {
+ continue;
+ }
+
+ const SkPoint* pts = iter.pts();
+ SkScalar cross = 0;
+ int index = find_max_y(pts, n);
+ if (pts[index].fY < ymax) {
+ continue;
+ }
+
+ // If there is more than 1 distinct point at the y-max, we take the
+ // x-min and x-max of them and just subtract to compute the dir.
+ if (pts[(index + 1) % n].fY == pts[index].fY) {
+ int maxIndex;
+ int minIndex = find_min_max_x_at_y(pts, index, n, &maxIndex);
+ if (minIndex == maxIndex) {
+ goto TRY_CROSSPROD;
+ }
+ SkASSERT(pts[minIndex].fY == pts[index].fY);
+ SkASSERT(pts[maxIndex].fY == pts[index].fY);
+ SkASSERT(pts[minIndex].fX <= pts[maxIndex].fX);
+ // we just subtract the indices, and let that auto-convert to
+ // SkScalar, since we just want - or + to signal the direction.
+ cross = minIndex - maxIndex;
+ } else {
+ TRY_CROSSPROD:
+ // Find a next and prev index to use for the cross-product test,
+ // but we try to find pts that form non-zero vectors from pts[index]
+ //
+ // Its possible that we can't find two non-degenerate vectors, so
+ // we have to guard our search (e.g. all the pts could be in the
+ // same place).
+
+ // we pass n - 1 instead of -1 so we don't foul up % operator by
+ // passing it a negative LH argument.
+ int prev = find_diff_pt(pts, index, n, n - 1);
+ if (prev == index) {
+ // completely degenerate, skip to next contour
+ continue;
+ }
+ int next = find_diff_pt(pts, index, n, 1);
+ SkASSERT(next != index);
+ cross = cross_prod(pts[prev], pts[index], pts[next]);
+ // if we get a zero and the points are horizontal, then we look at the spread in
+ // x-direction. We really should continue to walk away from the degeneracy until
+ // there is a divergence.
+ if (0 == cross && pts[prev].fY == pts[index].fY && pts[next].fY == pts[index].fY) {
+ // construct the subtract so we get the correct Direction below
+ cross = pts[index].fX - pts[next].fX;
+ }
+ }
+
+ if (cross) {
+ // record our best guess so far
+ ymax = pts[index].fY;
+ ymaxCross = cross;
+ }
+ }
+ if (ymaxCross) {
+ crossToDir(ymaxCross, dir);
+ path.setFirstDirection(*dir);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (SkScalarNearlyZero(a) && SkScalarNearlyZero(b) && SkScalarNearlyZero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkScalar eval_cubic_pts(SkScalar c0, SkScalar c1, SkScalar c2, SkScalar c3,
+ SkScalar t) {
+ SkScalar A = c3 + 3*(c1 - c2) - c0;
+ SkScalar B = 3*(c2 - c1 - c1 + c0);
+ SkScalar C = 3*(c1 - c0);
+ SkScalar D = c0;
+ return poly_eval(A, B, C, D, t);
+}
+
+template <size_t N> static void find_minmax(const SkPoint pts[],
+ SkScalar* minPtr, SkScalar* maxPtr) {
+ SkScalar min, max;
+ min = max = pts[0].fX;
+ for (size_t i = 1; i < N; ++i) {
+ min = SkMinScalar(min, pts[i].fX);
+ max = SkMaxScalar(max, pts[i].fX);
+ }
+ *minPtr = min;
+ *maxPtr = max;
+}
+
+static bool checkOnCurve(SkScalar x, SkScalar y, const SkPoint& start, const SkPoint& end) {
+ if (start.fY == end.fY) {
+ return between(start.fX, x, end.fX) && x != end.fX;
+ } else {
+ return x == start.fX && y == start.fY;
+ }
+}
+
+static int winding_mono_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y3 = pts[3].fY;
+
+ int dir = 1;
+ if (y0 > y3) {
+ using std::swap;
+ swap(y0, y3);
+ dir = -1;
+ }
+ if (y < y0 || y > y3) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[3])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y3) {
+ return 0;
+ }
+
+ // quickreject or quickaccept
+ SkScalar min, max;
+ find_minmax<4>(pts, &min, &max);
+ if (x < min) {
+ return 0;
+ }
+ if (x > max) {
+ return dir;
+ }
+
+ // compute the actual x(t) value
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(pts, y, &t)) {
+ return 0;
+ }
+ SkScalar xt = eval_cubic_pts(pts[0].fX, pts[1].fX, pts[2].fX, pts[3].fX, t);
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[3].fX || y != pts[3].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ int w = 0;
+ for (int i = 0; i <= n; ++i) {
+ w += winding_mono_cubic(&dst[i * 3], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static double conic_eval_numerator(const SkScalar src[], SkScalar w, SkScalar t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ SkScalar src2w = src[2] * w;
+ SkScalar C = src[0];
+ SkScalar A = src[4] - 2 * src2w + C;
+ SkScalar B = 2 * (src2w - C);
+ return poly_eval(A, B, C, t);
+}
+
+
+static double conic_eval_denominator(SkScalar w, SkScalar t) {
+ SkScalar B = 2 * (w - 1);
+ SkScalar C = 1;
+ SkScalar A = -B;
+ return poly_eval(A, B, C, t);
+}
+
+static int winding_mono_conic(const SkConic& conic, SkScalar x, SkScalar y, int* onCurveCount) {
+ const SkPoint* pts = conic.fPts;
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ using std::swap;
+ swap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * conic.fW - y * conic.fW + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ xt = conic_eval_numerator(&pts[0].fX, conic.fW, t) / conic_eval_denominator(conic.fW, t);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static bool is_mono_quad(SkScalar y0, SkScalar y1, SkScalar y2) {
+ // return SkScalarSignAsInt(y0 - y1) + SkScalarSignAsInt(y1 - y2) != 0;
+ if (y0 == y1) {
+ return true;
+ }
+ if (y0 < y1) {
+ return y1 <= y2;
+ } else {
+ return y1 >= y2;
+ }
+}
+
+static int winding_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar weight,
+ int* onCurveCount) {
+ SkConic conic(pts, weight);
+ SkConic chopped[2];
+ // If the data points are very large, the conic may not be monotonic but may also
+ // fail to chop. Then, the chopper does not split the original conic in two.
+ bool isMono = is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY) || !conic.chopAtYExtrema(chopped);
+ int w = winding_mono_conic(isMono ? conic : chopped[0], x, y, onCurveCount);
+ if (!isMono) {
+ w += winding_mono_conic(chopped[1], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_mono_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ using std::swap;
+ swap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+ // bounds check on X (not required. is it faster?)
+#if 0
+ if (pts[0].fX > x && pts[1].fX > x && pts[2].fX > x) {
+ return 0;
+ }
+#endif
+
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ xt = poly_eval(A, B, C, t);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[5];
+ int n = 0;
+
+ if (!is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY)) {
+ n = SkChopQuadAtYExtrema(pts, dst);
+ pts = dst;
+ }
+ int w = winding_mono_quad(pts, x, y, onCurveCount);
+ if (n > 0) {
+ w += winding_mono_quad(&pts[2], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_line(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar x0 = pts[0].fX;
+ SkScalar y0 = pts[0].fY;
+ SkScalar x1 = pts[1].fX;
+ SkScalar y1 = pts[1].fY;
+
+ SkScalar dy = y1 - y0;
+
+ int dir = 1;
+ if (y0 > y1) {
+ using std::swap;
+ swap(y0, y1);
+ dir = -1;
+ }
+ if (y < y0 || y > y1) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[1])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y1) {
+ return 0;
+ }
+ SkScalar cross = (x1 - x0) * (y - pts[0].fY) - dy * (x - x0);
+
+ if (!cross) {
+ // zero cross means the point is on the line, and since the case where
+ // y of the query point is at the end point is handled above, we can be
+ // sure that we're on the line (excluding the end point) here
+ if (x != x1 || y != pts[1].fY) {
+ *onCurveCount += 1;
+ }
+ dir = 0;
+ } else if (SkScalarSignAsInt(cross) == dir) {
+ dir = 0;
+ }
+ return dir;
+}
+
+static void tangent_cubic(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)
+ && !between(pts[2].fY, y, pts[3].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)
+ && !between(pts[2].fX, x, pts[3].fX)) {
+ return;
+ }
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ for (int i = 0; i <= n; ++i) {
+ SkPoint* c = &dst[i * 3];
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(c, y, &t)) {
+ continue;
+ }
+ SkScalar xt = eval_cubic_pts(c[0].fX, c[1].fX, c[2].fX, c[3].fX, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkVector tangent;
+ SkEvalCubicAt(c, t, nullptr, &tangent, nullptr);
+ tangents->push_back(tangent);
+ }
+}
+
+static void tangent_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar w,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * w - y * w + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar xt = conic_eval_numerator(&pts[0].fX, w, t) / conic_eval_denominator(w, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkConic conic(pts, w);
+ tangents->push_back(conic.evalTangentAt(t));
+ }
+}
+
+static void tangent_quad(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ SkScalar xt = poly_eval(A, B, C, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ tangents->push_back(SkEvalQuadTangentAt(pts, t));
+ }
+}
+
+static void tangent_line(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y1 = pts[1].fY;
+ if (!between(y0, y, y1)) {
+ return;
+ }
+ SkScalar x0 = pts[0].fX;
+ SkScalar x1 = pts[1].fX;
+ if (!between(x0, x, x1)) {
+ return;
+ }
+ SkScalar dx = x1 - x0;
+ SkScalar dy = y1 - y0;
+ if (!SkScalarNearlyEqual((x - x0) * dy, dx * (y - y0))) {
+ return;
+ }
+ SkVector v;
+ v.set(dx, dy);
+ tangents->push_back(v);
+}
+
+static bool contains_inclusive(const SkRect& r, SkScalar x, SkScalar y) {
+ return r.fLeft <= x && x <= r.fRight && r.fTop <= y && y <= r.fBottom;
+}
+
+bool SkPath::contains(SkScalar x, SkScalar y) const {
+ bool isInverse = this->isInverseFillType();
+ if (this->isEmpty()) {
+ return isInverse;
+ }
+
+ if (!contains_inclusive(this->getBounds(), x, y)) {
+ return isInverse;
+ }
+
+ SkPath::Iter iter(*this, true);
+ bool done = false;
+ int w = 0;
+ int onCurveCount = 0;
+ do {
+ SkPoint pts[4];
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ w += winding_line(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kQuad_Verb:
+ w += winding_quad(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kConic_Verb:
+ w += winding_conic(pts, x, y, iter.conicWeight(), &onCurveCount);
+ break;
+ case SkPath::kCubic_Verb:
+ w += winding_cubic(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ } while (!done);
+ bool evenOddFill = SkPath::kEvenOdd_FillType == this->getFillType()
+ || SkPath::kInverseEvenOdd_FillType == this->getFillType();
+ if (evenOddFill) {
+ w &= 1;
+ }
+ if (w) {
+ return !isInverse;
+ }
+ if (onCurveCount <= 1) {
+ return SkToBool(onCurveCount) ^ isInverse;
+ }
+ if ((onCurveCount & 1) || evenOddFill) {
+ return SkToBool(onCurveCount & 1) ^ isInverse;
+ }
+ // If the point touches an even number of curves, and the fill is winding, check for
+ // coincidence. Count coincidence as places where the on curve points have identical tangents.
+ iter.setPath(*this, true);
+ done = false;
+ SkTDArray<SkVector> tangents;
+ do {
+ SkPoint pts[4];
+ int oldCount = tangents.count();
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ tangent_line(pts, x, y, &tangents);
+ break;
+ case SkPath::kQuad_Verb:
+ tangent_quad(pts, x, y, &tangents);
+ break;
+ case SkPath::kConic_Verb:
+ tangent_conic(pts, x, y, iter.conicWeight(), &tangents);
+ break;
+ case SkPath::kCubic_Verb:
+ tangent_cubic(pts, x, y, &tangents);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ if (tangents.count() > oldCount) {
+ int last = tangents.count() - 1;
+ const SkVector& tangent = tangents[last];
+ if (SkScalarNearlyZero(SkPointPriv::LengthSqd(tangent))) {
+ tangents.remove(last);
+ } else {
+ for (int index = 0; index < last; ++index) {
+ const SkVector& test = tangents[index];
+ if (SkScalarNearlyZero(test.cross(tangent))
+ && SkScalarSignAsInt(tangent.fX * test.fX) <= 0
+ && SkScalarSignAsInt(tangent.fY * test.fY) <= 0) {
+ tangents.remove(last);
+ tangents.removeShuffle(index);
+ break;
+ }
+ }
+ }
+ }
+ } while (!done);
+ return SkToBool(tangents.count()) ^ isInverse;
+}
+
+int SkPath::ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2) {
+ const SkConic conic(p0, p1, p2, w);
+ return conic.chopIntoQuadsPOW2(pts, pow2);
+}
+
+bool SkPathPriv::IsSimpleClosedRect(const SkPath& path, SkRect* rect, SkPath::Direction* direction,
+ unsigned* start) {
+ if (path.getSegmentMasks() != SkPath::kLine_SegmentMask) {
+ return false;
+ }
+ SkPath::RawIter iter(path);
+ SkPoint verbPts[4];
+ SkPath::Verb v;
+ SkPoint rectPts[5];
+ int rectPtCnt = 0;
+ while ((v = iter.next(verbPts)) != SkPath::kDone_Verb) {
+ switch (v) {
+ case SkPath::kMove_Verb:
+ if (0 != rectPtCnt) {
+ return false;
+ }
+ rectPts[0] = verbPts[0];
+ ++rectPtCnt;
+ break;
+ case SkPath::kLine_Verb:
+ if (5 == rectPtCnt) {
+ return false;
+ }
+ rectPts[rectPtCnt] = verbPts[1];
+ ++rectPtCnt;
+ break;
+ case SkPath::kClose_Verb:
+ if (4 == rectPtCnt) {
+ rectPts[4] = rectPts[0];
+ rectPtCnt = 5;
+ }
+ break;
+ default:
+ return false;
+ }
+ }
+ if (rectPtCnt < 5) {
+ return false;
+ }
+ if (rectPts[0] != rectPts[4]) {
+ return false;
+ }
+ // Check for two cases of rectangles: pts 0 and 3 form a vertical edge or a horizontal edge (
+ // and pts 1 and 2 the opposite vertical or horizontal edge).
+ bool vec03IsVertical;
+ if (rectPts[0].fX == rectPts[3].fX && rectPts[1].fX == rectPts[2].fX &&
+ rectPts[0].fY == rectPts[1].fY && rectPts[3].fY == rectPts[2].fY) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fX == rectPts[1].fX || rectPts[0].fY == rectPts[3].fY) {
+ return false;
+ }
+ vec03IsVertical = true;
+ } else if (rectPts[0].fY == rectPts[3].fY && rectPts[1].fY == rectPts[2].fY &&
+ rectPts[0].fX == rectPts[1].fX && rectPts[3].fX == rectPts[2].fX) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fY == rectPts[1].fY || rectPts[0].fX == rectPts[3].fX) {
+ return false;
+ }
+ vec03IsVertical = false;
+ } else {
+ return false;
+ }
+ // Set sortFlags so that it has the low bit set if pt index 0 is on right edge and second bit
+ // set if it is on the bottom edge.
+ unsigned sortFlags =
+ ((rectPts[0].fX < rectPts[2].fX) ? 0b00 : 0b01) |
+ ((rectPts[0].fY < rectPts[2].fY) ? 0b00 : 0b10);
+ switch (sortFlags) {
+ case 0b00:
+ rect->setLTRB(rectPts[0].fX, rectPts[0].fY, rectPts[2].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPath::kCW_Direction : SkPath::kCCW_Direction;
+ *start = 0;
+ break;
+ case 0b01:
+ rect->setLTRB(rectPts[2].fX, rectPts[0].fY, rectPts[0].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ *start = 1;
+ break;
+ case 0b10:
+ rect->setLTRB(rectPts[0].fX, rectPts[2].fY, rectPts[2].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ *start = 3;
+ break;
+ case 0b11:
+ rect->setLTRB(rectPts[2].fX, rectPts[2].fY, rectPts[0].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPath::kCW_Direction : SkPath::kCCW_Direction;
+ *start = 2;
+ break;
+ }
+ return true;
+}
+
+bool SkPathPriv::DrawArcIsConvex(SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect) {
+ if (isFillNoPathEffect && SkScalarAbs(sweepAngle) >= 360.f) {
+ // This gets converted to an oval.
+ return true;
+ }
+ if (useCenter) {
+ // This is a pie wedge. It's convex if the angle is <= 180.
+ return SkScalarAbs(sweepAngle) <= 180.f;
+ }
+ // When the angle exceeds 360 this wraps back on top of itself. Otherwise it is a circle clipped
+ // to a secant, i.e. convex.
+ return SkScalarAbs(sweepAngle) <= 360.f;
+}
+
+void SkPathPriv::CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect) {
+ SkASSERT(!oval.isEmpty());
+ SkASSERT(sweepAngle);
+
+ path->reset();
+ path->setIsVolatile(true);
+ path->setFillType(SkPath::kWinding_FillType);
+ if (isFillNoPathEffect && SkScalarAbs(sweepAngle) >= 360.f) {
+ path->addOval(oval);
+ SkASSERT(path->isConvex() && DrawArcIsConvex(sweepAngle, false, isFillNoPathEffect));
+ return;
+ }
+ if (useCenter) {
+ path->moveTo(oval.centerX(), oval.centerY());
+ }
+ auto firstDir =
+ sweepAngle > 0 ? SkPathPriv::kCW_FirstDirection : SkPathPriv::kCCW_FirstDirection;
+ bool convex = DrawArcIsConvex(sweepAngle, useCenter, isFillNoPathEffect);
+ // Arc to mods at 360 and drawArc is not supposed to.
+ bool forceMoveTo = !useCenter;
+ while (sweepAngle <= -360.f) {
+ path->arcTo(oval, startAngle, -180.f, forceMoveTo);
+ startAngle -= 180.f;
+ path->arcTo(oval, startAngle, -180.f, false);
+ startAngle -= 180.f;
+ forceMoveTo = false;
+ sweepAngle += 360.f;
+ }
+ while (sweepAngle >= 360.f) {
+ path->arcTo(oval, startAngle, 180.f, forceMoveTo);
+ startAngle += 180.f;
+ path->arcTo(oval, startAngle, 180.f, false);
+ startAngle += 180.f;
+ forceMoveTo = false;
+ sweepAngle -= 360.f;
+ }
+ path->arcTo(oval, startAngle, sweepAngle, forceMoveTo);
+ if (useCenter) {
+ path->close();
+ }
+ path->setConvexity(convex ? SkPath::kConvex_Convexity : SkPath::kConcave_Convexity);
+ path->setFirstDirection(firstDir);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "include/private/SkNx.h"
+
+static int compute_quad_extremas(const SkPoint src[3], SkPoint extremas[3]) {
+ SkScalar ts[2];
+ int n = SkFindQuadExtrema(src[0].fX, src[1].fX, src[2].fX, ts);
+ n += SkFindQuadExtrema(src[0].fY, src[1].fY, src[2].fY, &ts[n]);
+ SkASSERT(n >= 0 && n <= 2);
+ for (int i = 0; i < n; ++i) {
+ extremas[i] = SkEvalQuadAt(src, ts[i]);
+ }
+ extremas[n] = src[2];
+ return n + 1;
+}
+
+static int compute_conic_extremas(const SkPoint src[3], SkScalar w, SkPoint extremas[3]) {
+ SkConic conic(src[0], src[1], src[2], w);
+ SkScalar ts[2];
+ int n = conic.findXExtrema(ts);
+ n += conic.findYExtrema(&ts[n]);
+ SkASSERT(n >= 0 && n <= 2);
+ for (int i = 0; i < n; ++i) {
+ extremas[i] = conic.evalAt(ts[i]);
+ }
+ extremas[n] = src[2];
+ return n + 1;
+}
+
+static int compute_cubic_extremas(const SkPoint src[4], SkPoint extremas[5]) {
+ SkScalar ts[4];
+ int n = SkFindCubicExtrema(src[0].fX, src[1].fX, src[2].fX, src[3].fX, ts);
+ n += SkFindCubicExtrema(src[0].fY, src[1].fY, src[2].fY, src[3].fY, &ts[n]);
+ SkASSERT(n >= 0 && n <= 4);
+ for (int i = 0; i < n; ++i) {
+ SkEvalCubicAt(src, ts[i], &extremas[i], nullptr, nullptr);
+ }
+ extremas[n] = src[3];
+ return n + 1;
+}
+
+SkRect SkPath::computeTightBounds() const {
+ if (0 == this->countVerbs()) {
+ return SkRect::MakeEmpty();
+ }
+
+ if (this->getSegmentMasks() == SkPath::kLine_SegmentMask) {
+ return this->getBounds();
+ }
+
+ SkPoint extremas[5]; // big enough to hold worst-case curve type (cubic) extremas + 1
+ SkPoint pts[4];
+ SkPath::RawIter iter(*this);
+
+ // initial with the first MoveTo, so we don't have to check inside the switch
+ Sk2s min, max;
+ min = max = from_point(this->getPoint(0));
+ for (;;) {
+ int count = 0;
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ extremas[0] = pts[0];
+ count = 1;
+ break;
+ case SkPath::kLine_Verb:
+ extremas[0] = pts[1];
+ count = 1;
+ break;
+ case SkPath::kQuad_Verb:
+ count = compute_quad_extremas(pts, extremas);
+ break;
+ case SkPath::kConic_Verb:
+ count = compute_conic_extremas(pts, iter.conicWeight(), extremas);
+ break;
+ case SkPath::kCubic_Verb:
+ count = compute_cubic_extremas(pts, extremas);
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ goto DONE;
+ }
+ for (int i = 0; i < count; ++i) {
+ Sk2s tmp = from_point(extremas[i]);
+ min = Sk2s::Min(min, tmp);
+ max = Sk2s::Max(max, tmp);
+ }
+ }
+DONE:
+ SkRect bounds;
+ min.store((SkPoint*)&bounds.fLeft);
+ max.store((SkPoint*)&bounds.fRight);
+ return bounds;
+}
+
+bool SkPath::IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact) {
+ return exact ? p1 == p2 : SkPointPriv::EqualsWithinTolerance(p1, p2);
+}
+
+bool SkPath::IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 : SkPointPriv::EqualsWithinTolerance(p1, p2) &&
+ SkPointPriv::EqualsWithinTolerance(p2, p3);
+}
+
+bool SkPath::IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, const SkPoint& p4, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 && p3 == p4 :
+ SkPointPriv::EqualsWithinTolerance(p1, p2) &&
+ SkPointPriv::EqualsWithinTolerance(p2, p3) &&
+ SkPointPriv::EqualsWithinTolerance(p3, p4);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkPathPriv::IsRectContour(const SkPath& path, bool allowPartial, int* currVerb,
+ const SkPoint** ptsPtr, bool* isClosed, SkPath::Direction* direction,
+ SkRect* rect) {
+ int corners = 0;
+ SkPoint closeXY; // used to determine if final line falls on a diagonal
+ SkPoint lineStart; // used to construct line from previous point
+ const SkPoint* firstPt = nullptr; // first point in the rect (last of first moves)
+ const SkPoint* lastPt = nullptr; // last point in the rect (last of lines or first if closed)
+ SkPoint firstCorner;
+ SkPoint thirdCorner;
+ const SkPoint* pts = *ptsPtr;
+ const SkPoint* savePts = nullptr; // used to allow caller to iterate through a pair of rects
+ lineStart.set(0, 0);
+ signed char directions[] = {-1, -1, -1, -1, -1}; // -1 to 3; -1 is uninitialized
+ bool closedOrMoved = false;
+ bool autoClose = false;
+ bool insertClose = false;
+ int verbCnt = path.fPathRef->countVerbs();
+ while (*currVerb < verbCnt && (!allowPartial || !autoClose)) {
+ uint8_t verb = insertClose ? (uint8_t) SkPath::kClose_Verb : path.fPathRef->atVerb(*currVerb);
+ switch (verb) {
+ case SkPath::kClose_Verb:
+ savePts = pts;
+ autoClose = true;
+ insertClose = false;
+ case SkPath::kLine_Verb: {
+ if (SkPath::kClose_Verb != verb) {
+ lastPt = pts;
+ }
+ SkPoint lineEnd = SkPath::kClose_Verb == verb ? *firstPt : *pts++;
+ SkVector lineDelta = lineEnd - lineStart;
+ if (lineDelta.fX && lineDelta.fY) {
+ return false; // diagonal
+ }
+ if (!lineDelta.isFinite()) {
+ return false; // path contains infinity or NaN
+ }
+ if (lineStart == lineEnd) {
+ break; // single point on side OK
+ }
+ int nextDirection = rect_make_dir(lineDelta.fX, lineDelta.fY); // 0 to 3
+ if (0 == corners) {
+ directions[0] = nextDirection;
+ corners = 1;
+ closedOrMoved = false;
+ lineStart = lineEnd;
+ break;
+ }
+ if (closedOrMoved) {
+ return false; // closed followed by a line
+ }
+ if (autoClose && nextDirection == directions[0]) {
+ break; // colinear with first
+ }
+ closedOrMoved = autoClose;
+ if (directions[corners - 1] == nextDirection) {
+ if (3 == corners && SkPath::kLine_Verb == verb) {
+ thirdCorner = lineEnd;
+ }
+ lineStart = lineEnd;
+ break; // colinear segment
+ }
+ directions[corners++] = nextDirection;
+ // opposite lines must point in opposite directions; xoring them should equal 2
+ switch (corners) {
+ case 2:
+ firstCorner = lineStart;
+ break;
+ case 3:
+ if ((directions[0] ^ directions[2]) != 2) {
+ return false;
+ }
+ thirdCorner = lineEnd;
+ break;
+ case 4:
+ if ((directions[1] ^ directions[3]) != 2) {
+ return false;
+ }
+ break;
+ default:
+ return false; // too many direction changes
+ }
+ lineStart = lineEnd;
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ case SkPath::kCubic_Verb:
+ return false; // quadratic, cubic not allowed
+ case SkPath::kMove_Verb:
+ if (allowPartial && !autoClose && directions[0] >= 0) {
+ insertClose = true;
+ *currVerb -= 1; // try move again afterwards
+ goto addMissingClose;
+ }
+ if (pts != *ptsPtr) {
+ return false;
+ }
+ if (!corners) {
+ firstPt = pts;
+ } else {
+ closeXY = *firstPt - *lastPt;
+ if (closeXY.fX && closeXY.fY) {
+ return false; // we're diagonal, abort
+ }
+ }
+ lineStart = *pts++;
+ closedOrMoved = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ *currVerb += 1;
+ addMissingClose:
+ ;
+ }
+ // Success if 4 corners and first point equals last
+ if (corners < 3 || corners > 4) {
+ return false;
+ }
+ if (savePts) {
+ *ptsPtr = savePts;
+ }
+ // check if close generates diagonal
+ closeXY = *firstPt - *lastPt;
+ if (closeXY.fX && closeXY.fY) {
+ return false;
+ }
+ if (rect) {
+ rect->set(firstCorner, thirdCorner);
+ }
+ if (isClosed) {
+ *isClosed = autoClose;
+ }
+ if (direction) {
+ *direction = directions[0] == ((directions[1] + 1) & 3) ?
+ SkPath::kCW_Direction : SkPath::kCCW_Direction;
+ }
+ return true;
+}
+
+
+bool SkPathPriv::IsNestedFillRects(const SkPath& path, SkRect rects[2], SkPath::Direction dirs[2]) {
+ SkDEBUGCODE(path.validate();)
+ int currVerb = 0;
+ const SkPoint* pts = path.fPathRef->points();
+ SkPath::Direction testDirs[2];
+ SkRect testRects[2];
+ if (!IsRectContour(path, true, &currVerb, &pts, nullptr, &testDirs[0], &testRects[0])) {
+ return false;
+ }
+ if (IsRectContour(path, false, &currVerb, &pts, nullptr, &testDirs[1], &testRects[1])) {
+ if (testRects[0].contains(testRects[1])) {
+ if (rects) {
+ rects[0] = testRects[0];
+ rects[1] = testRects[1];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[0];
+ dirs[1] = testDirs[1];
+ }
+ return true;
+ }
+ if (testRects[1].contains(testRects[0])) {
+ if (rects) {
+ rects[0] = testRects[1];
+ rects[1] = testRects[0];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[1];
+ dirs[1] = testDirs[0];
+ }
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkPathEffect.cpp b/gfx/skia/skia/src/core/SkPathEffect.cpp
new file mode 100644
index 0000000000..f5cb185e23
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathEffect.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPathEffect::filterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* bounds) const {
+ SkPath tmp, *tmpDst = dst;
+ if (dst == &src) {
+ tmpDst = &tmp;
+ }
+ if (this->onFilterPath(tmpDst, src, rec, bounds)) {
+ if (dst == &src) {
+ *dst = tmp;
+ }
+ return true;
+ }
+ return false;
+}
+
+void SkPathEffect::computeFastBounds(SkRect* dst, const SkRect& src) const {
+ *dst = this->onComputeFastBounds(src);
+}
+
+bool SkPathEffect::asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec& rec, const SkMatrix& mx, const SkRect* rect) const {
+ return this->onAsPoints(results, src, rec, mx, rect);
+}
+
+SkPathEffect::DashType SkPathEffect::asADash(DashInfo* info) const {
+ return this->onAsADash(info);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPairPathEffect
+
+ Common baseclass for Compose and Sum. This subclass manages two pathEffects,
+ including flattening them. It does nothing in filterPath, and is only useful
+ for managing the lifetimes of its two arguments.
+ */
+class SkPairPathEffect : public SkPathEffect {
+protected:
+ SkPairPathEffect(sk_sp<SkPathEffect> pe0, sk_sp<SkPathEffect> pe1)
+ : fPE0(std::move(pe0)), fPE1(std::move(pe1))
+ {
+ SkASSERT(fPE0.get());
+ SkASSERT(fPE1.get());
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fPE0.get());
+ buffer.writeFlattenable(fPE1.get());
+ }
+
+ // these are visible to our subclasses
+ sk_sp<SkPathEffect> fPE0;
+ sk_sp<SkPathEffect> fPE1;
+
+private:
+ typedef SkPathEffect INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkComposePathEffect
+
+ This subclass of SkPathEffect composes its two arguments, to create
+ a compound pathEffect.
+ */
+class SkComposePathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply first the inner pathEffect
+ and the the outer pathEffect (e.g. outer(inner(path)))
+ The reference counts for outer and inner are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ return sk_sp<SkPathEffect>(new SkComposePathEffect(outer, inner));
+ }
+
+protected:
+ SkComposePathEffect(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner)
+ : INHERITED(outer, inner) {}
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const override {
+ SkPath tmp;
+ const SkPath* ptr = &src;
+
+ if (fPE1->filterPath(&tmp, src, rec, cullRect)) {
+ ptr = &tmp;
+ }
+ return fPE0->filterPath(dst, *ptr, rec, cullRect);
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkComposePathEffect)
+
+ // illegal
+ SkComposePathEffect(const SkComposePathEffect&);
+ SkComposePathEffect& operator=(const SkComposePathEffect&);
+ friend class SkPathEffect;
+
+ typedef SkPairPathEffect INHERITED;
+};
+
+sk_sp<SkFlattenable> SkComposePathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkComposePathEffect::Make(std::move(pe0), std::move(pe1));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** \class SkSumPathEffect
+
+ This subclass of SkPathEffect applies two pathEffects, one after the other.
+ Its filterPath() returns true if either of the effects succeeded.
+ */
+class SkSumPathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply two effects, in sequence.
+ (e.g. first(path) + second(path))
+ The reference counts for first and second are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second) {
+ if (!first) {
+ return second;
+ }
+ if (!second) {
+ return first;
+ }
+ return sk_sp<SkPathEffect>(new SkSumPathEffect(first, second));
+ }
+
+ SK_FLATTENABLE_HOOKS(SkSumPathEffect)
+
+protected:
+ SkSumPathEffect(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second)
+ : INHERITED(first, second) {}
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const override {
+ // use bit-or so that we always call both, even if the first one succeeds
+ return fPE0->filterPath(dst, src, rec, cullRect) |
+ fPE1->filterPath(dst, src, rec, cullRect);
+ }
+
+private:
+ // illegal
+ SkSumPathEffect(const SkSumPathEffect&);
+ SkSumPathEffect& operator=(const SkSumPathEffect&);
+ friend class SkPathEffect;
+
+ typedef SkPairPathEffect INHERITED;
+};
+
+sk_sp<SkFlattenable> SkSumPathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkSumPathEffect::Make(pe0, pe1);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkPathEffect::MakeSum(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second) {
+ return SkSumPathEffect::Make(std::move(first), std::move(second));
+}
+
+sk_sp<SkPathEffect> SkPathEffect::MakeCompose(sk_sp<SkPathEffect> outer,
+ sk_sp<SkPathEffect> inner) {
+ return SkComposePathEffect::Make(std::move(outer), std::move(inner));
+}
+
+void SkPathEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkComposePathEffect);
+ SK_REGISTER_FLATTENABLE(SkSumPathEffect);
+}
diff --git a/gfx/skia/skia/src/core/SkPathMakers.h b/gfx/skia/skia/src/core/SkPathMakers.h
new file mode 100644
index 0000000000..a4ef7e1215
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMakers.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMakers_DEFINED
+#define SkPathMakers_DEFINED
+
+#include "include/core/SkPath.h" // just for direction
+#include "include/core/SkPoint.h"
+#include "include/core/SkRRect.h"
+
+template <unsigned N> class SkPath_PointIterator {
+public:
+ SkPath_PointIterator(SkPath::Direction dir, unsigned startIndex)
+ : fCurrent(startIndex % N)
+ , fAdvance(dir == SkPath::kCW_Direction ? 1 : N - 1) { }
+
+ const SkPoint& current() const {
+ SkASSERT(fCurrent < N);
+ return fPts[fCurrent];
+ }
+
+ const SkPoint& next() {
+ fCurrent = (fCurrent + fAdvance) % N;
+ return this->current();
+ }
+
+ protected:
+ SkPoint fPts[N];
+
+ private:
+ unsigned fCurrent;
+ unsigned fAdvance;
+};
+
+class SkPath_RectPointIterator : public SkPath_PointIterator<4> {
+public:
+ SkPath_RectPointIterator(const SkRect& rect, SkPath::Direction dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ fPts[0] = SkPoint::Make(rect.fLeft, rect.fTop);
+ fPts[1] = SkPoint::Make(rect.fRight, rect.fTop);
+ fPts[2] = SkPoint::Make(rect.fRight, rect.fBottom);
+ fPts[3] = SkPoint::Make(rect.fLeft, rect.fBottom);
+ }
+};
+
+class SkPath_OvalPointIterator : public SkPath_PointIterator<4> {
+public:
+ SkPath_OvalPointIterator(const SkRect& oval, SkPath::Direction dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ const SkScalar cx = oval.centerX();
+ const SkScalar cy = oval.centerY();
+
+ fPts[0] = SkPoint::Make(cx, oval.fTop);
+ fPts[1] = SkPoint::Make(oval.fRight, cy);
+ fPts[2] = SkPoint::Make(cx, oval.fBottom);
+ fPts[3] = SkPoint::Make(oval.fLeft, cy);
+ }
+};
+
+class SkPath_RRectPointIterator : public SkPath_PointIterator<8> {
+public:
+ SkPath_RRectPointIterator(const SkRRect& rrect, SkPath::Direction dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ const SkRect& bounds = rrect.getBounds();
+ const SkScalar L = bounds.fLeft;
+ const SkScalar T = bounds.fTop;
+ const SkScalar R = bounds.fRight;
+ const SkScalar B = bounds.fBottom;
+
+ fPts[0] = SkPoint::Make(L + rrect.radii(SkRRect::kUpperLeft_Corner).fX, T);
+ fPts[1] = SkPoint::Make(R - rrect.radii(SkRRect::kUpperRight_Corner).fX, T);
+ fPts[2] = SkPoint::Make(R, T + rrect.radii(SkRRect::kUpperRight_Corner).fY);
+ fPts[3] = SkPoint::Make(R, B - rrect.radii(SkRRect::kLowerRight_Corner).fY);
+ fPts[4] = SkPoint::Make(R - rrect.radii(SkRRect::kLowerRight_Corner).fX, B);
+ fPts[5] = SkPoint::Make(L + rrect.radii(SkRRect::kLowerLeft_Corner).fX, B);
+ fPts[6] = SkPoint::Make(L, B - rrect.radii(SkRRect::kLowerLeft_Corner).fY);
+ fPts[7] = SkPoint::Make(L, T + rrect.radii(SkRRect::kUpperLeft_Corner).fY);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasure.cpp b/gfx/skia/skia/src/core/SkPathMeasure.cpp
new file mode 100644
index 0000000000..445e6a12ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasure.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPathMeasure.h"
+
+SkPathMeasure::SkPathMeasure() {}
+
+SkPathMeasure::SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale)
+ : fIter(path, forceClosed, resScale)
+{
+ fContour = fIter.next();
+}
+
+SkPathMeasure::~SkPathMeasure() {}
+
+void SkPathMeasure::setPath(const SkPath* path, bool forceClosed) {
+ fIter.reset(path ? *path : SkPath(), forceClosed);
+ fContour = fIter.next();
+}
+
+SkScalar SkPathMeasure::getLength() {
+ return fContour ? fContour->length() : 0;
+}
+
+bool SkPathMeasure::getPosTan(SkScalar distance, SkPoint* position, SkVector* tangent) {
+ return fContour && fContour->getPosTan(distance, position, tangent);
+}
+
+bool SkPathMeasure::getMatrix(SkScalar distance, SkMatrix* matrix, MatrixFlags flags) {
+ return fContour && fContour->getMatrix(distance, matrix, (SkContourMeasure::MatrixFlags)flags);
+}
+
+bool SkPathMeasure::getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo) {
+ return fContour && fContour->getSegment(startD, stopD, dst, startWithMoveTo);
+}
+
+bool SkPathMeasure::isClosed() {
+ return fContour && fContour->isClosed();
+}
+
+bool SkPathMeasure::nextContour() {
+ fContour = fIter.next();
+ return !!fContour;
+}
+
+#ifdef SK_DEBUG
+void SkPathMeasure::dump() {}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasurePriv.h b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
new file mode 100644
index 0000000000..dbad22b622
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasurePriv_DEFINED
+#define SkPathMeasurePriv_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "src/core/SkGeometry.h"
+
+// Used in the Segment struct defined in SkPathMeasure.h
+// It is used as a 2-bit field so if you add to this
+// you must increase the size of the bitfield there.
+enum SkSegType {
+ kLine_SegType,
+ kQuad_SegType,
+ kCubic_SegType,
+ kConic_SegType,
+};
+
+
+void SkPathMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst);
+
+#endif // SkPathMeasurePriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPathPriv.h b/gfx/skia/skia/src/core/SkPathPriv.h
new file mode 100644
index 0000000000..52d4685ba6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathPriv.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathPriv_DEFINED
+#define SkPathPriv_DEFINED
+
+#include "include/core/SkPath.h"
+
+class SkPathPriv {
+public:
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ static const int kPathRefGenIDBitCnt = 30; // leave room for the fill type (skbug.com/1762)
+#else
+ static const int kPathRefGenIDBitCnt = 32;
+#endif
+
+ enum FirstDirection : int {
+ kCW_FirstDirection, // == SkPath::kCW_Direction
+ kCCW_FirstDirection, // == SkPath::kCCW_Direction
+ kUnknown_FirstDirection,
+ };
+
+ static FirstDirection AsFirstDirection(SkPath::Direction dir) {
+ // since we agree numerically for the values in Direction, we can just cast.
+ return (FirstDirection)dir;
+ }
+
+ /**
+ * Return the opposite of the specified direction. kUnknown is its own
+ * opposite.
+ */
+ static FirstDirection OppositeFirstDirection(FirstDirection dir) {
+ static const FirstDirection gOppositeDir[] = {
+ kCCW_FirstDirection, kCW_FirstDirection, kUnknown_FirstDirection,
+ };
+ return gOppositeDir[dir];
+ }
+
+ /**
+ * Tries to quickly compute the direction of the first non-degenerate
+ * contour. If it can be computed, return true and set dir to that
+ * direction. If it cannot be (quickly) determined, return false and ignore
+ * the dir parameter. If the direction was determined, it is cached to make
+ * subsequent calls return quickly.
+ */
+ static bool CheapComputeFirstDirection(const SkPath&, FirstDirection* dir);
+
+ /**
+ * Returns true if the path's direction can be computed via
+ * cheapComputDirection() and if that computed direction matches the
+ * specified direction. If dir is kUnknown, returns true if the direction
+ * cannot be computed.
+ */
+ static bool CheapIsFirstDirection(const SkPath& path, FirstDirection dir) {
+ FirstDirection computedDir = kUnknown_FirstDirection;
+ (void)CheapComputeFirstDirection(path, &computedDir);
+ return computedDir == dir;
+ }
+
+ static bool IsClosedSingleContour(const SkPath& path) {
+ int verbCount = path.countVerbs();
+ if (verbCount == 0)
+ return false;
+ int moveCount = 0;
+ auto verbs = path.fPathRef->verbsBegin();
+ for (int i = 0; i < verbCount; i++) {
+ switch (verbs[i]) {
+ case SkPath::Verb::kMove_Verb:
+ moveCount += 1;
+ if (moveCount > 1) {
+ return false;
+ }
+ break;
+ case SkPath::Verb::kClose_Verb:
+ if (i == verbCount - 1) {
+ return true;
+ }
+ return false;
+ default: break;
+ }
+ }
+ return false;
+ }
+
+ static void AddGenIDChangeListener(const SkPath& path,
+ sk_sp<SkPathRef::GenIDChangeListener> listener) {
+ path.fPathRef->addGenIDChangeListener(std::move(listener));
+ }
+
+ /**
+ * This returns true for a rect that begins and ends at the same corner and has either a move
+ * followed by four lines or a move followed by 3 lines and a close. None of the parameters are
+ * optional. This does not permit degenerate line or point rectangles.
+ */
+ static bool IsSimpleClosedRect(const SkPath& path, SkRect* rect, SkPath::Direction* direction,
+ unsigned* start);
+
+ /**
+ * Creates a path from arc params using the semantics of SkCanvas::drawArc. This function
+ * assumes empty ovals and zero sweeps have already been filtered out.
+ */
+ static void CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect);
+
+ /**
+ * Determines whether an arc produced by CreateDrawArcPath will be convex. Assumes a non-empty
+ * oval.
+ */
+ static bool DrawArcIsConvex(SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect);
+
+ /**
+ * Returns a C++11-iterable object that traverses a path's verbs in order. e.g:
+ *
+ * for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
+ * ...
+ * }
+ */
+ struct Verbs {
+ public:
+ Verbs(const SkPath& path) : fPathRef(path.fPathRef.get()) {}
+ struct Iter {
+ void operator++() { fVerb++; }
+ bool operator!=(const Iter& b) { return fVerb != b.fVerb; }
+ SkPath::Verb operator*() { return static_cast<SkPath::Verb>(*fVerb); }
+ const uint8_t* fVerb;
+ };
+ Iter begin() { return Iter{fPathRef->verbsBegin()}; }
+ Iter end() { return Iter{fPathRef->verbsEnd()}; }
+ private:
+ Verbs(const Verbs&) = delete;
+ Verbs& operator=(const Verbs&) = delete;
+ SkPathRef* fPathRef;
+ };
+
+ /**
+ * Returns a pointer to the verb data.
+ */
+ static const uint8_t* VerbData(const SkPath& path) {
+ return path.fPathRef->verbsBegin();
+ }
+
+ /** Returns a raw pointer to the path points */
+ static const SkPoint* PointData(const SkPath& path) {
+ return path.fPathRef->points();
+ }
+
+ /** Returns the number of conic weights in the path */
+ static int ConicWeightCnt(const SkPath& path) {
+ return path.fPathRef->countWeights();
+ }
+
+ /** Returns a raw pointer to the path conic weights. */
+ static const SkScalar* ConicWeightData(const SkPath& path) {
+ return path.fPathRef->conicWeights();
+ }
+
+#ifndef SK_LEGACY_PATH_CONVEXITY
+ /** Returns true if path formed by pts is convex.
+
+ @param pts SkPoint array of path
+ @param count number of entries in array
+
+ @return true if pts represent a convex geometry
+ */
+ static bool IsConvex(const SkPoint pts[], int count);
+#endif
+
+ /** Returns true if the underlying SkPathRef has one single owner. */
+ static bool TestingOnly_unique(const SkPath& path) {
+ return path.fPathRef->unique();
+ }
+
+ /** Returns true if constructed by addCircle(), addOval(); and in some cases,
+ addRoundRect(), addRRect(). SkPath constructed with conicTo() or rConicTo() will not
+ return true though SkPath draws oval.
+
+ rect receives bounds of oval.
+ dir receives SkPath::Direction of oval: kCW_Direction if clockwise, kCCW_Direction if
+ counterclockwise.
+ start receives start of oval: 0 for top, 1 for right, 2 for bottom, 3 for left.
+
+ rect, dir, and start are unmodified if oval is not found.
+
+ Triggers performance optimizations on some GPU surface implementations.
+
+ @param rect storage for bounding SkRect of oval; may be nullptr
+ @param dir storage for SkPath::Direction; may be nullptr
+ @param start storage for start of oval; may be nullptr
+ @return true if SkPath was constructed by method that reduces to oval
+ */
+ static bool IsOval(const SkPath& path, SkRect* rect, SkPath::Direction* dir, unsigned* start) {
+ bool isCCW = false;
+ bool result = path.fPathRef->isOval(rect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ }
+ return result;
+ }
+
+ /** Returns true if constructed by addRoundRect(), addRRect(); and if construction
+ is not empty, not SkRect, and not oval. SkPath constructed with other calls
+ will not return true though SkPath draws SkRRect.
+
+ rrect receives bounds of SkRRect.
+ dir receives SkPath::Direction of oval: kCW_Direction if clockwise, kCCW_Direction if
+ counterclockwise.
+ start receives start of SkRRect: 0 for top, 1 for right, 2 for bottom, 3 for left.
+
+ rrect, dir, and start are unmodified if SkRRect is not found.
+
+ Triggers performance optimizations on some GPU surface implementations.
+
+ @param rrect storage for bounding SkRect of SkRRect; may be nullptr
+ @param dir storage for SkPath::Direction; may be nullptr
+ @param start storage for start of SkRRect; may be nullptr
+ @return true if SkPath contains only SkRRect
+ */
+ static bool IsRRect(const SkPath& path, SkRRect* rrect, SkPath::Direction* dir,
+ unsigned* start) {
+ bool isCCW = false;
+ bool result = path.fPathRef->isRRect(rrect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ }
+ return result;
+ }
+
+ /**
+ * Sometimes in the drawing pipeline, we have to perform math on path coordinates, even after
+ * the path is in device-coordinates. Tessellation and clipping are two examples. Usually this
+ * is pretty modest, but it can involve subtracting/adding coordinates, or multiplying by
+ * small constants (e.g. 2,3,4). To try to preflight issues where these optionations could turn
+ * finite path values into infinities (or NaNs), we allow the upper drawing code to reject
+ * the path if its bounds (in device coordinates) is too close to max float.
+ */
+ static bool TooBigForMath(const SkRect& bounds) {
+ // This value is just a guess. smaller is safer, but we don't want to reject largish paths
+ // that we don't have to.
+ constexpr SkScalar scale_down_to_allow_for_small_multiplies = 0.25f;
+ constexpr SkScalar max = SK_ScalarMax * scale_down_to_allow_for_small_multiplies;
+
+ // use ! expression so we return true if bounds contains NaN
+ return !(bounds.fLeft >= -max && bounds.fTop >= -max &&
+ bounds.fRight <= max && bounds.fBottom <= max);
+ }
+ static bool TooBigForMath(const SkPath& path) {
+ return TooBigForMath(path.getBounds());
+ }
+
+ // Returns number of valid points for each SkPath::Iter verb
+ static int PtsInIter(unsigned verb) {
+ static const uint8_t gPtsInVerb[] = {
+ 1, // kMove pts[0]
+ 2, // kLine pts[0..1]
+ 3, // kQuad pts[0..2]
+ 3, // kConic pts[0..2]
+ 4, // kCubic pts[0..3]
+ 0, // kClose
+ 0 // kDone
+ };
+
+ SkASSERT(verb < SK_ARRAY_COUNT(gPtsInVerb));
+ return gPtsInVerb[verb];
+ }
+
+ static bool IsAxisAligned(const SkPath& path) {
+ SkRect tmp;
+ return (path.fPathRef->fIsRRect | path.fPathRef->fIsOval) || path.isRect(&tmp);
+ }
+
+ static bool AllPointsEq(const SkPoint pts[], int count) {
+ for (int i = 1; i < count; ++i) {
+ if (pts[0] != pts[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static bool IsRectContour(const SkPath&, bool allowPartial, int* currVerb,
+ const SkPoint** ptsPtr, bool* isClosed, SkPath::Direction* direction,
+ SkRect* rect);
+
+ /** Returns true if SkPath is equivalent to nested SkRect pair when filled.
+ If false, rect and dirs are unchanged.
+ If true, rect and dirs are written to if not nullptr:
+ setting rect[0] to outer SkRect, and rect[1] to inner SkRect;
+ setting dirs[0] to SkPath::Direction of outer SkRect, and dirs[1] to SkPath::Direction of
+ inner SkRect.
+
+ @param rect storage for SkRect pair; may be nullptr
+ @param dirs storage for SkPath::Direction pair; may be nullptr
+ @return true if SkPath contains nested SkRect pair
+ */
+ static bool IsNestedFillRects(const SkPath&, SkRect rect[2],
+ SkPath::Direction dirs[2] = nullptr);
+};
+
+// Lightweight variant of SkPath::Iter that only returns segments (e.g. lines/conics).
+// Does not return kMove or kClose.
+// Always "auto-closes" each contour.
+// Roughly the same as SkPath::Iter(path, true), but does not return moves or closes
+//
+class SkPathEdgeIter {
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbsStop;
+ const SkPoint* fPts;
+ const SkPoint* fMoveToPtr;
+ const SkScalar* fConicWeights;
+ SkPoint fScratch[2]; // for auto-close lines
+ bool fNeedsCloseLine;
+ SkDEBUGCODE(bool fIsConic);
+
+ enum {
+ kIllegalEdgeValue = 99
+ };
+
+public:
+ SkPathEdgeIter(const SkPath& path);
+
+ SkScalar conicWeight() const {
+ SkASSERT(fIsConic);
+ return *fConicWeights;
+ }
+
+ enum class Edge {
+ kLine = SkPath::kLine_Verb,
+ kQuad = SkPath::kQuad_Verb,
+ kConic = SkPath::kConic_Verb,
+ kCubic = SkPath::kCubic_Verb,
+ };
+
+ static SkPath::Verb EdgeToVerb(Edge e) {
+ return SkPath::Verb(e);
+ }
+
+ struct Result {
+ const SkPoint* fPts; // points for the segment, or null if done
+ Edge fEdge;
+
+ // Returns true when it holds an Edge, false when the path is done.
+ MOZ_IMPLICIT operator bool() { return fPts != nullptr; }
+ };
+
+ Result next() {
+ auto closeline = [&]() {
+ fScratch[0] = fPts[-1];
+ fScratch[1] = *fMoveToPtr;
+ fNeedsCloseLine = false;
+ return Result{ fScratch, Edge::kLine };
+ };
+
+ for (;;) {
+ SkASSERT(fVerbs <= fVerbsStop);
+ if (fVerbs == fVerbsStop) {
+ return fNeedsCloseLine
+ ? closeline()
+ : Result{ nullptr, Edge(kIllegalEdgeValue) };
+ }
+
+ SkDEBUGCODE(fIsConic = false;)
+
+ const auto v = *fVerbs++;
+ switch (v) {
+ case SkPath::kMove_Verb: {
+ if (fNeedsCloseLine) {
+ auto res = closeline();
+ fMoveToPtr = fPts++;
+ return res;
+ }
+ fMoveToPtr = fPts++;
+ } break;
+ case SkPath::kClose_Verb:
+ if (fNeedsCloseLine) return closeline();
+ break;
+ default: {
+ // Actual edge.
+ const int pts_count = (v+2) / 2,
+ cws_count = (v & (v-1)) / 2;
+ SkASSERT(pts_count == SkPathPriv::PtsInIter(v) - 1);
+
+ fNeedsCloseLine = true;
+ fPts += pts_count;
+ fConicWeights += cws_count;
+
+ SkDEBUGCODE(fIsConic = (v == SkPath::kConic_Verb);)
+ SkASSERT(fIsConic == (cws_count > 0));
+
+ return { &fPts[-(pts_count + 1)], Edge(v) };
+ }
+ }
+ }
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathRef.cpp b/gfx/skia/skia/src/core/SkPathRef.cpp
new file mode 100644
index 0000000000..561fed7961
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathRef.cpp
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkPathRef.h"
+
+#include "include/core/SkPath.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBuffer.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkSafeMath.h"
+
+//////////////////////////////////////////////////////////////////////////////
+SkPathRef::Editor::Editor(sk_sp<SkPathRef>* pathRef,
+ int incReserveVerbs,
+ int incReservePoints)
+{
+ SkASSERT(incReserveVerbs >= 0);
+ SkASSERT(incReservePoints >= 0);
+
+ if ((*pathRef)->unique()) {
+ (*pathRef)->incReserve(incReserveVerbs, incReservePoints);
+ } else {
+ SkPathRef* copy = new SkPathRef;
+ copy->copy(**pathRef, incReserveVerbs, incReservePoints);
+ pathRef->reset(copy);
+ }
+ fPathRef = pathRef->get();
+ fPathRef->callGenIDChangeListeners();
+ fPathRef->fGenerationID = 0;
+ fPathRef->fBoundsIsDirty = true;
+ SkDEBUGCODE(fPathRef->fEditorsAttached++;)
+}
+
+// Sort of like makeSpace(0) but the the additional requirement that we actively shrink the
+// allocations to just fit the current needs. makeSpace() will only grow, but never shrinks.
+//
+void SkPath::shrinkToFit() {
+ fPathRef->fPoints.shrinkToFit();
+ fPathRef->fVerbs.shrinkToFit();
+ fPathRef->fConicWeights.shrinkToFit();
+ SkDEBUGCODE(fPathRef->validate();)
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkPathRef::~SkPathRef() {
+ // Deliberately don't validate() this path ref, otherwise there's no way
+ // to read one that's not valid and then free its memory without asserting.
+ this->callGenIDChangeListeners();
+ SkASSERT(fGenIDChangeListeners.empty()); // These are raw ptrs.
+ SkDEBUGCODE(fGenerationID = 0xEEEEEEEE;)
+ SkDEBUGCODE(fEditorsAttached.store(0x7777777);)
+}
+
+static SkPathRef* gEmpty = nullptr;
+
+SkPathRef* SkPathRef::CreateEmpty() {
+ static SkOnce once;
+ once([]{
+ gEmpty = new SkPathRef;
+ gEmpty->computeBounds(); // Avoids races later to be the first to do this.
+ });
+ return SkRef(gEmpty);
+}
+
+static void transform_dir_and_start(const SkMatrix& matrix, bool isRRect, bool* isCCW,
+ unsigned* start) {
+ int inStart = *start;
+ int rm = 0;
+ if (isRRect) {
+ // Degenerate rrect indices to oval indices and remember the remainder.
+ // Ovals have one index per side whereas rrects have two.
+ rm = inStart & 0b1;
+ inStart /= 2;
+ }
+ // Is the antidiagonal non-zero (otherwise the diagonal is zero)
+ int antiDiag;
+ // Is the non-zero value in the top row (either kMScaleX or kMSkewX) negative
+ int topNeg;
+ // Are the two non-zero diagonal or antidiagonal values the same sign.
+ int sameSign;
+ if (matrix.get(SkMatrix::kMScaleX) != 0) {
+ antiDiag = 0b00;
+ if (matrix.get(SkMatrix::kMScaleX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b00 : 0b01;
+ }
+ } else {
+ antiDiag = 0b01;
+ if (matrix.get(SkMatrix::kMSkewX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b00 : 0b01;
+ }
+ }
+ if (sameSign != antiDiag) {
+ // This is a rotation (and maybe scale). The direction is unchanged.
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (inStart + 4 - (topNeg | antiDiag)) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + rm;
+ }
+ } else {
+ // This is a mirror (and maybe scale). The direction is reversed.
+ *isCCW = !*isCCW;
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (6 + (topNeg | antiDiag) - inStart) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + (rm ? 0 : 1);
+ }
+ }
+}
+
+void SkPathRef::CreateTransformedCopy(sk_sp<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix) {
+ SkDEBUGCODE(src.validate();)
+ if (matrix.isIdentity()) {
+ if (dst->get() != &src) {
+ src.ref();
+ dst->reset(const_cast<SkPathRef*>(&src));
+ SkDEBUGCODE((*dst)->validate();)
+ }
+ return;
+ }
+
+ if (!(*dst)->unique()) {
+ dst->reset(new SkPathRef);
+ }
+
+ if (dst->get() != &src) {
+ (*dst)->fPoints = src.fPoints;
+ (*dst)->fVerbs = src.fVerbs;
+ (*dst)->fConicWeights = src.fConicWeights;
+ (*dst)->callGenIDChangeListeners();
+ (*dst)->fGenerationID = 0; // mark as dirty
+ }
+
+ // Need to check this here in case (&src == dst)
+ bool canXformBounds = !src.fBoundsIsDirty && matrix.rectStaysRect() && src.countPoints() > 1;
+
+ matrix.mapPoints((*dst)->fPoints.begin(), src.fPoints.begin(), src.fPoints.count());
+
+ /*
+ * Here we optimize the bounds computation, by noting if the bounds are
+ * already known, and if so, we just transform those as well and mark
+ * them as "known", rather than force the transformed path to have to
+ * recompute them.
+ *
+ * Special gotchas if the path is effectively empty (<= 1 point) or
+ * if it is non-finite. In those cases bounds need to stay empty,
+ * regardless of the matrix.
+ */
+ if (canXformBounds) {
+ (*dst)->fBoundsIsDirty = false;
+ if (src.fIsFinite) {
+ matrix.mapRect(&(*dst)->fBounds, src.fBounds);
+ if (!((*dst)->fIsFinite = (*dst)->fBounds.isFinite())) {
+ (*dst)->fBounds.setEmpty();
+ } else if (src.countPoints() & 1) {
+ /* Matrix optimizations may cause the first point to use slightly different
+ * math for its transform, which can lead to it being outside the transformed
+ * bounds. Include it in the bounds just in case.
+ */
+ SkPoint p = (*dst)->fPoints[0];
+ SkRect& r = (*dst)->fBounds;
+ r.fLeft = SkMinScalar(r.fLeft, p.fX);
+ r.fTop = SkMinScalar(r.fTop, p.fY);
+ r.fRight = SkMaxScalar(r.fRight, p.fX);
+ r.fBottom = SkMaxScalar(r.fBottom, p.fY);
+ }
+ } else {
+ (*dst)->fIsFinite = false;
+ (*dst)->fBounds.setEmpty();
+ }
+ } else {
+ (*dst)->fBoundsIsDirty = true;
+ }
+
+ (*dst)->fSegmentMask = src.fSegmentMask;
+
+ // It's an oval only if it stays a rect.
+ bool rectStaysRect = matrix.rectStaysRect();
+ (*dst)->fIsOval = src.fIsOval && rectStaysRect;
+ (*dst)->fIsRRect = src.fIsRRect && rectStaysRect;
+ if ((*dst)->fIsOval || (*dst)->fIsRRect) {
+ unsigned start = src.fRRectOrOvalStartIdx;
+ bool isCCW = SkToBool(src.fRRectOrOvalIsCCW);
+ transform_dir_and_start(matrix, (*dst)->fIsRRect, &isCCW, &start);
+ (*dst)->fRRectOrOvalIsCCW = isCCW;
+ (*dst)->fRRectOrOvalStartIdx = start;
+ }
+
+ if (dst->get() == &src) {
+ (*dst)->callGenIDChangeListeners();
+ (*dst)->fGenerationID = 0;
+ }
+
+ SkDEBUGCODE((*dst)->validate();)
+}
+
+void SkPathRef::Rewind(sk_sp<SkPathRef>* pathRef) {
+ if ((*pathRef)->unique()) {
+ SkDEBUGCODE((*pathRef)->validate();)
+ (*pathRef)->callGenIDChangeListeners();
+ (*pathRef)->fBoundsIsDirty = true; // this also invalidates fIsFinite
+ (*pathRef)->fGenerationID = 0;
+ (*pathRef)->fPoints.rewind();
+ (*pathRef)->fVerbs.rewind();
+ (*pathRef)->fConicWeights.rewind();
+ (*pathRef)->fSegmentMask = 0;
+ (*pathRef)->fIsOval = false;
+ (*pathRef)->fIsRRect = false;
+ SkDEBUGCODE((*pathRef)->validate();)
+ } else {
+ int oldVCnt = (*pathRef)->countVerbs();
+ int oldPCnt = (*pathRef)->countPoints();
+ pathRef->reset(new SkPathRef);
+ (*pathRef)->resetToSize(0, 0, 0, oldVCnt, oldPCnt);
+ }
+}
+
+bool SkPathRef::operator== (const SkPathRef& ref) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(ref.validate();)
+
+ // We explicitly check fSegmentMask as a quick-reject. We could skip it,
+ // since it is only a cache of info in the fVerbs, but its a fast way to
+ // notice a difference
+ if (fSegmentMask != ref.fSegmentMask) {
+ return false;
+ }
+
+ bool genIDMatch = fGenerationID && fGenerationID == ref.fGenerationID;
+#ifdef SK_RELEASE
+ if (genIDMatch) {
+ return true;
+ }
+#endif
+ if (fPoints != ref.fPoints || fConicWeights != ref.fConicWeights || fVerbs != ref.fVerbs) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ if (ref.fVerbs.count() == 0) {
+ SkASSERT(ref.fPoints.count() == 0);
+ }
+ return true;
+}
+
+void SkPathRef::writeToBuffer(SkWBuffer* buffer) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(size_t beforePos = buffer->pos();)
+
+ // Call getBounds() to ensure (as a side-effect) that fBounds
+ // and fIsFinite are computed.
+ const SkRect& bounds = this->getBounds();
+
+ // We store fSegmentMask for older readers, but current readers can't trust it, so they
+ // don't read it.
+ int32_t packed = ((fIsFinite & 1) << kIsFinite_SerializationShift) |
+ (fSegmentMask << kSegmentMask_SerializationShift);
+ buffer->write32(packed);
+
+ // TODO: write gen ID here. Problem: We don't know if we're cross process or not from
+ // SkWBuffer. Until this is fixed we write 0.
+ buffer->write32(0);
+ buffer->write32(fVerbs.count());
+ buffer->write32(fPoints.count());
+ buffer->write32(fConicWeights.count());
+ buffer->write(fVerbs.begin(), fVerbs.bytes());
+ buffer->write(fPoints.begin(), fVerbs.bytes());
+ buffer->write(fConicWeights.begin(), fConicWeights.bytes());
+ buffer->write(&bounds, sizeof(bounds));
+
+ SkASSERT(buffer->pos() - beforePos == (size_t) this->writeSize());
+}
+
+uint32_t SkPathRef::writeSize() const {
+ return uint32_t(5 * sizeof(uint32_t) +
+ fVerbs.bytes() + fPoints.bytes() + fConicWeights.bytes() +
+ sizeof(SkRect));
+}
+
+void SkPathRef::copy(const SkPathRef& ref,
+ int additionalReserveVerbs,
+ int additionalReservePoints) {
+ SkDEBUGCODE(this->validate();)
+ this->resetToSize(ref.fVerbs.count(), ref.fPoints.count(), ref.fConicWeights.count(),
+ additionalReserveVerbs, additionalReservePoints);
+ fVerbs = ref.fVerbs;
+ fPoints = ref.fPoints;
+ fConicWeights = ref.fConicWeights;
+ fBoundsIsDirty = ref.fBoundsIsDirty;
+ if (!fBoundsIsDirty) {
+ fBounds = ref.fBounds;
+ fIsFinite = ref.fIsFinite;
+ }
+ fSegmentMask = ref.fSegmentMask;
+ fIsOval = ref.fIsOval;
+ fIsRRect = ref.fIsRRect;
+ fRRectOrOvalIsCCW = ref.fRRectOrOvalIsCCW;
+ fRRectOrOvalStartIdx = ref.fRRectOrOvalStartIdx;
+ SkDEBUGCODE(this->validate();)
+}
+
+unsigned SkPathRef::computeSegmentMask() const {
+ const uint8_t* verbs = fVerbs.begin();
+ unsigned mask = 0;
+ for (int i = 0; i < fVerbs.count(); ++i) {
+ switch (verbs[i]) {
+ case SkPath::kLine_Verb: mask |= SkPath::kLine_SegmentMask; break;
+ case SkPath::kQuad_Verb: mask |= SkPath::kQuad_SegmentMask; break;
+ case SkPath::kConic_Verb: mask |= SkPath::kConic_SegmentMask; break;
+ case SkPath::kCubic_Verb: mask |= SkPath::kCubic_SegmentMask; break;
+ default: break;
+ }
+ }
+ return mask;
+}
+
+void SkPathRef::interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const {
+ const SkScalar* inValues = &ending.getPoints()->fX;
+ SkScalar* outValues = &out->getWritablePoints()->fX;
+ int count = out->countPoints() * 2;
+ for (int index = 0; index < count; ++index) {
+ outValues[index] = outValues[index] * weight + inValues[index] * (1 - weight);
+ }
+ out->fBoundsIsDirty = true;
+ out->fIsOval = false;
+ out->fIsRRect = false;
+}
+
+SkPoint* SkPathRef::growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights) {
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = numVbs;
+ break;
+ case SkPath::kLine_Verb:
+ fSegmentMask |= SkPath::kLine_SegmentMask;
+ pCnt = numVbs;
+ break;
+ case SkPath::kQuad_Verb:
+ fSegmentMask |= SkPath::kQuad_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kConic_Verb:
+ fSegmentMask |= SkPath::kConic_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kCubic_Verb:
+ fSegmentMask |= SkPath::kCubic_SegmentMask;
+ pCnt = 3 * numVbs;
+ break;
+ case SkPath::kClose_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kClose_Verb");
+ pCnt = 0;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kDone");
+ // fall through
+ default:
+ SkDEBUGFAIL("default should not be reached");
+ pCnt = 0;
+ }
+
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fIsOval = false;
+ fIsRRect = false;
+
+ memset(fVerbs.append(numVbs), verb, numVbs);
+ if (SkPath::kConic_Verb == verb) {
+ SkASSERT(weights);
+ *weights = fConicWeights.append(numVbs);
+ }
+ SkPoint* pts = fPoints.append(pCnt);
+
+ SkDEBUGCODE(this->validate();)
+ return pts;
+}
+
+SkPoint* SkPathRef::growForVerb(int /* SkPath::Verb*/ verb, SkScalar weight) {
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ unsigned mask = 0;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = 1;
+ break;
+ case SkPath::kLine_Verb:
+ mask = SkPath::kLine_SegmentMask;
+ pCnt = 1;
+ break;
+ case SkPath::kQuad_Verb:
+ mask = SkPath::kQuad_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kConic_Verb:
+ mask = SkPath::kConic_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kCubic_Verb:
+ mask = SkPath::kCubic_SegmentMask;
+ pCnt = 3;
+ break;
+ case SkPath::kClose_Verb:
+ pCnt = 0;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForVerb called for kDone");
+ // fall through
+ default:
+ SkDEBUGFAIL("default is not reached");
+ pCnt = 0;
+ }
+
+ fSegmentMask |= mask;
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fIsOval = false;
+ fIsRRect = false;
+
+ *fVerbs.append() = verb;
+ if (SkPath::kConic_Verb == verb) {
+ *fConicWeights.append() = weight;
+ }
+ SkPoint* pts = fPoints.append(pCnt);
+
+ SkDEBUGCODE(this->validate();)
+ return pts;
+}
+
+uint32_t SkPathRef::genID() const {
+ SkASSERT(fEditorsAttached.load() == 0);
+ static const uint32_t kMask = (static_cast<int64_t>(1) << SkPathPriv::kPathRefGenIDBitCnt) - 1;
+
+ if (fGenerationID == 0) {
+ if (fPoints.count() == 0 && fVerbs.count() == 0) {
+ fGenerationID = kEmptyGenID;
+ } else {
+ static std::atomic<uint32_t> nextID{kEmptyGenID + 1};
+ do {
+ fGenerationID = nextID.fetch_add(1, std::memory_order_relaxed) & kMask;
+ } while (fGenerationID == 0 || fGenerationID == kEmptyGenID);
+ }
+ }
+ return fGenerationID;
+}
+
+void SkPathRef::addGenIDChangeListener(sk_sp<GenIDChangeListener> listener) {
+ if (nullptr == listener || this == gEmpty) {
+ return;
+ }
+
+ SkAutoMutexExclusive lock(fGenIDChangeListenersMutex);
+
+ // Clean out any stale listeners before we append the new one.
+ for (int i = 0; i < fGenIDChangeListeners.count(); ++i) {
+ if (fGenIDChangeListeners[i]->shouldUnregisterFromPath()) {
+ fGenIDChangeListeners[i]->unref();
+ fGenIDChangeListeners.removeShuffle(i--); // No need to preserve the order after i.
+ }
+ }
+
+ SkASSERT(!listener->shouldUnregisterFromPath());
+ *fGenIDChangeListeners.append() = listener.release();
+}
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPathRef::callGenIDChangeListeners() {
+ auto visit = [this]() {
+ for (GenIDChangeListener* listener : fGenIDChangeListeners) {
+ if (!listener->shouldUnregisterFromPath()) {
+ listener->onChange();
+ }
+ // Listeners get at most one shot, so whether these triggered or not, blow them away.
+ listener->unref();
+ }
+ fGenIDChangeListeners.reset();
+ };
+
+ // Acquiring the mutex is relatively expensive, compared to operations like moveTo, etc.
+ // Thus we want to skip it if we're unique. This is safe because the only purpose of the
+ // mutex is to keep the listener-list intact while we iterate/edit it, and if we're unique,
+ // no one else can modify fGenIDChangeListeners.
+
+ if (this->unique()) {
+ visit();
+ } else {
+ SkAutoMutexExclusive lock(fGenIDChangeListenersMutex);
+ visit();
+ }
+}
+
+SkRRect SkPathRef::getRRect() const {
+ const SkRect& bounds = this->getBounds();
+ SkVector radii[4] = {{0, 0}, {0, 0}, {0, 0}, {0, 0}};
+ Iter iter(*this);
+ SkPoint pts[4];
+ uint8_t verb = iter.next(pts);
+ SkASSERT(SkPath::kMove_Verb == verb);
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (SkPath::kConic_Verb == verb) {
+ SkVector v1_0 = pts[1] - pts[0];
+ SkVector v2_1 = pts[2] - pts[1];
+ SkVector dxdy;
+ if (v1_0.fX) {
+ SkASSERT(!v2_1.fX && !v1_0.fY);
+ dxdy.set(SkScalarAbs(v1_0.fX), SkScalarAbs(v2_1.fY));
+ } else if (!v1_0.fY) {
+ SkASSERT(!v2_1.fX || !v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v2_1.fY));
+ } else {
+ SkASSERT(!v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v1_0.fY));
+ }
+ SkRRect::Corner corner =
+ pts[1].fX == bounds.fLeft ?
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperLeft_Corner : SkRRect::kLowerLeft_Corner :
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperRight_Corner : SkRRect::kLowerRight_Corner;
+ SkASSERT(!radii[corner].fX && !radii[corner].fY);
+ radii[corner] = dxdy;
+ } else {
+ SkASSERT((verb == SkPath::kLine_Verb
+ && (!(pts[1].fX - pts[0].fX) || !(pts[1].fY - pts[0].fY)))
+ || verb == SkPath::kClose_Verb);
+ }
+ }
+ SkRRect rrect;
+ rrect.setRectRadii(bounds, radii);
+ return rrect;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathRef::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+}
+
+SkPathRef::Iter::Iter(const SkPathRef& path) {
+ this->setPathRef(path);
+}
+
+void SkPathRef::Iter::setPathRef(const SkPathRef& path) {
+ fPts = path.points();
+ fVerbs = path.verbsBegin();
+ fVerbStop = path.verbsEnd();
+ fConicWeights = path.conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+
+ // Don't allow iteration through non-finite points.
+ if (!path.isFinite()) {
+ fVerbStop = fVerbs;
+ }
+}
+
+uint8_t SkPathRef::Iter::next(SkPoint pts[4]) {
+ SkASSERT(pts);
+
+ SkDEBUGCODE(unsigned peekResult = this->peek();)
+
+ if (fVerbs == fVerbStop) {
+ SkASSERT(peekResult == SkPath::kDone_Verb);
+ return (uint8_t) SkPath::kDone_Verb;
+ }
+
+ // fVerbs points one beyond next verb so decrement first.
+ unsigned verb = *fVerbs++;
+ const SkPoint* srcPts = fPts;
+
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pts[0] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kLine_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fConicWeights += 1;
+ // fall-through
+ case SkPath::kQuad_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ srcPts += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ pts[3] = srcPts[2];
+ srcPts += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ SkASSERT(fVerbs == fVerbStop);
+ break;
+ }
+ fPts = srcPts;
+ SkASSERT(peekResult == verb);
+ return (uint8_t) verb;
+}
+
+uint8_t SkPathRef::Iter::peek() const {
+ return fVerbs < fVerbStop ? *fVerbs : (uint8_t) SkPath::kDone_Verb;
+}
+
+
+bool SkPathRef::isValid() const {
+ if (fIsOval || fIsRRect) {
+ // Currently we don't allow both of these to be set, even though ovals are ro
+ if (fIsOval == fIsRRect) {
+ return false;
+ }
+ if (fIsOval) {
+ if (fRRectOrOvalStartIdx >= 4) {
+ return false;
+ }
+ } else {
+ if (fRRectOrOvalStartIdx >= 8) {
+ return false;
+ }
+ }
+ }
+
+ if (!fBoundsIsDirty && !fBounds.isEmpty()) {
+ bool isFinite = true;
+ Sk2s leftTop = Sk2s(fBounds.fLeft, fBounds.fTop);
+ Sk2s rightBot = Sk2s(fBounds.fRight, fBounds.fBottom);
+ for (int i = 0; i < fPoints.count(); ++i) {
+ Sk2s point = Sk2s(fPoints[i].fX, fPoints[i].fY);
+#ifdef SK_DEBUG
+ if (fPoints[i].isFinite() &&
+ ((point < leftTop).anyTrue() || (point > rightBot).anyTrue())) {
+ SkDebugf("bad SkPathRef bounds: %g %g %g %g\n",
+ fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ for (int j = 0; j < fPoints.count(); ++j) {
+ if (i == j) {
+ SkDebugf("*** bounds do not contain: ");
+ }
+ SkDebugf("%g %g\n", fPoints[j].fX, fPoints[j].fY);
+ }
+ return false;
+ }
+#endif
+
+ if (fPoints[i].isFinite() && (point < leftTop).anyTrue() && !(point > rightBot).anyTrue())
+ return false;
+ if (!fPoints[i].isFinite()) {
+ isFinite = false;
+ }
+ }
+ if (SkToBool(fIsFinite) != isFinite) {
+ return false;
+ }
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPathEdgeIter::SkPathEdgeIter(const SkPath& path) {
+ fMoveToPtr = fPts = path.fPathRef->points();
+ fVerbs = path.fPathRef->verbsBegin();
+ fVerbsStop = path.fPathRef->verbsEnd();
+ fConicWeights = path.fPathRef->conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+
+ fNeedsCloseLine = false;
+ SkDEBUGCODE(fIsConic = false;)
+}
diff --git a/gfx/skia/skia/src/core/SkPath_serial.cpp b/gfx/skia/skia/src/core/SkPath_serial.cpp
new file mode 100644
index 0000000000..44fd146bac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPath_serial.cpp
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkMath.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBuffer.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkSafeMath.h"
+
+#include <cmath>
+
+enum SerializationOffsets {
+ kType_SerializationShift = 28, // requires 4 bits
+ kDirection_SerializationShift = 26, // requires 2 bits
+ kFillType_SerializationShift = 8, // requires 8 bits
+ // low-8-bits are version
+ kVersion_SerializationMask = 0xFF,
+};
+
+enum SerializationVersions {
+ // kPathPrivFirstDirection_Version = 1,
+ // kPathPrivLastMoveToIndex_Version = 2,
+ // kPathPrivTypeEnumVersion = 3,
+ kJustPublicData_Version = 4, // introduced Feb/2018
+ kVerbsAreStoredForward_Version = 5, // introduced Sept/2019
+
+ kMin_Version = kJustPublicData_Version,
+ kCurrent_Version = kVerbsAreStoredForward_Version
+};
+
+enum SerializationType {
+ kGeneral = 0,
+ kRRect = 1
+};
+
+static unsigned extract_version(uint32_t packed) {
+ return packed & kVersion_SerializationMask;
+}
+
+static SkPath::FillType extract_filltype(uint32_t packed) {
+ return static_cast<SkPath::FillType>((packed >> kFillType_SerializationShift) & 0x3);
+}
+
+static SerializationType extract_serializationtype(uint32_t packed) {
+ return static_cast<SerializationType>((packed >> kType_SerializationShift) & 0xF);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkPath::writeToMemoryAsRRect(void* storage) const {
+ SkRect oval;
+ SkRRect rrect;
+ bool isCCW;
+ unsigned start;
+ if (fPathRef->isOval(&oval, &isCCW, &start)) {
+ rrect.setOval(oval);
+ // Convert to rrect start indices.
+ start *= 2;
+ } else if (!fPathRef->isRRect(&rrect, &isCCW, &start)) {
+ return 0;
+ }
+
+ // packed header, rrect, start index.
+ const size_t sizeNeeded = sizeof(int32_t) + SkRRect::kSizeInMemory + sizeof(int32_t);
+ if (!storage) {
+ return sizeNeeded;
+ }
+
+ int firstDir = isCCW ? SkPathPriv::kCCW_FirstDirection : SkPathPriv::kCW_FirstDirection;
+ int32_t packed = (fFillType << kFillType_SerializationShift) |
+ (firstDir << kDirection_SerializationShift) |
+ (SerializationType::kRRect << kType_SerializationShift) |
+ kCurrent_Version;
+
+ SkWBuffer buffer(storage);
+ buffer.write32(packed);
+ SkRRectPriv::WriteToBuffer(rrect, &buffer);
+ buffer.write32(SkToS32(start));
+ buffer.padToAlign4();
+ SkASSERT(sizeNeeded == buffer.pos());
+ return buffer.pos();
+}
+
+size_t SkPath::writeToMemory(void* storage) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (size_t bytes = this->writeToMemoryAsRRect(storage)) {
+ return bytes;
+ }
+
+ int32_t packed = (fFillType << kFillType_SerializationShift) |
+ (SerializationType::kGeneral << kType_SerializationShift) |
+ kCurrent_Version;
+
+ int32_t pts = fPathRef->countPoints();
+ int32_t cnx = fPathRef->countWeights();
+ int32_t vbs = fPathRef->countVerbs();
+
+ SkSafeMath safe;
+ size_t size = 4 * sizeof(int32_t);
+ size = safe.add(size, safe.mul(pts, sizeof(SkPoint)));
+ size = safe.add(size, safe.mul(cnx, sizeof(SkScalar)));
+ size = safe.add(size, safe.mul(vbs, sizeof(uint8_t)));
+ size = safe.alignUp(size, 4);
+ if (!safe) {
+ return 0;
+ }
+ if (!storage) {
+ return size;
+ }
+
+ SkWBuffer buffer(storage);
+ buffer.write32(packed);
+ buffer.write32(pts);
+ buffer.write32(cnx);
+ buffer.write32(vbs);
+ buffer.write(fPathRef->points(), pts * sizeof(SkPoint));
+ buffer.write(fPathRef->conicWeights(), cnx * sizeof(SkScalar));
+ buffer.write(fPathRef->verbsBegin(), vbs * sizeof(uint8_t));
+ buffer.padToAlign4();
+
+ SkASSERT(buffer.pos() == size);
+ return size;
+}
+
+sk_sp<SkData> SkPath::serialize() const {
+ size_t size = this->writeToMemory(nullptr);
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ this->writeToMemory(data->writable_data());
+ return data;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// reading
+
+size_t SkPath::readFromMemory(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+ unsigned version = extract_version(packed);
+ if (version < kMin_Version || version > kCurrent_Version) {
+ return 0;
+ }
+
+ if (version == kJustPublicData_Version || version == kVerbsAreStoredForward_Version) {
+ return this->readFromMemory_EQ4Or5(storage, length);
+ }
+ return 0;
+}
+
+size_t SkPath::readAsRRect(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+
+ SkASSERT(extract_serializationtype(packed) == SerializationType::kRRect);
+
+ uint8_t dir = (packed >> kDirection_SerializationShift) & 0x3;
+ FillType fillType = extract_filltype(packed);
+
+ Direction rrectDir;
+ SkRRect rrect;
+ int32_t start;
+ switch (dir) {
+ case SkPathPriv::kCW_FirstDirection:
+ rrectDir = kCW_Direction;
+ break;
+ case SkPathPriv::kCCW_FirstDirection:
+ rrectDir = kCCW_Direction;
+ break;
+ default:
+ return 0;
+ }
+ if (!SkRRectPriv::ReadFromBuffer(&buffer, &rrect)) {
+ return 0;
+ }
+ if (!buffer.readS32(&start) || start != SkTPin(start, 0, 7)) {
+ return 0;
+ }
+ this->reset();
+ this->addRRect(rrect, rrectDir, SkToUInt(start));
+ this->setFillType(fillType);
+ buffer.skipToAlign4();
+ return buffer.pos();
+}
+
+size_t SkPath::readFromMemory_EQ4Or5(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+
+ bool verbsAreReversed = true;
+ if (extract_version(packed) == kVerbsAreStoredForward_Version) {
+ verbsAreReversed = false;
+ }
+
+ switch (extract_serializationtype(packed)) {
+ case SerializationType::kRRect:
+ return this->readAsRRect(storage, length);
+ case SerializationType::kGeneral:
+ break; // fall through
+ default:
+ return 0;
+ }
+
+ int32_t pts, cnx, vbs;
+ if (!buffer.readS32(&pts) || !buffer.readS32(&cnx) || !buffer.readS32(&vbs)) {
+ return 0;
+ }
+
+ const SkPoint* points = buffer.skipCount<SkPoint>(pts);
+ const SkScalar* conics = buffer.skipCount<SkScalar>(cnx);
+ const uint8_t* verbs = buffer.skipCount<uint8_t>(vbs);
+ buffer.skipToAlign4();
+ if (!buffer.isValid()) {
+ return 0;
+ }
+ SkASSERT(buffer.pos() <= length);
+
+#define CHECK_POINTS_CONICS(p, c) \
+ do { \
+ if (p && ((pts -= p) < 0)) { \
+ return 0; \
+ } \
+ if (c && ((cnx -= c) < 0)) { \
+ return 0; \
+ } \
+ } while (0)
+
+ int verbsStep = 1;
+ if (verbsAreReversed) {
+ verbs += vbs - 1;
+ verbsStep = -1;
+ }
+
+ SkPath tmp;
+ tmp.setFillType(extract_filltype(packed));
+ tmp.incReserve(pts);
+ for (int i = 0; i < vbs; ++i) {
+ switch (*verbs) {
+ case kMove_Verb:
+ CHECK_POINTS_CONICS(1, 0);
+ tmp.moveTo(*points++);
+ break;
+ case kLine_Verb:
+ CHECK_POINTS_CONICS(1, 0);
+ tmp.lineTo(*points++);
+ break;
+ case kQuad_Verb:
+ CHECK_POINTS_CONICS(2, 0);
+ tmp.quadTo(points[0], points[1]);
+ points += 2;
+ break;
+ case kConic_Verb:
+ CHECK_POINTS_CONICS(2, 1);
+ tmp.conicTo(points[0], points[1], *conics++);
+ points += 2;
+ break;
+ case kCubic_Verb:
+ CHECK_POINTS_CONICS(3, 0);
+ tmp.cubicTo(points[0], points[1], points[2]);
+ points += 3;
+ break;
+ case kClose_Verb:
+ tmp.close();
+ break;
+ default:
+ return 0; // bad verb
+ }
+ verbs += verbsStep;
+ }
+#undef CHECK_POINTS_CONICS
+ if (pts || cnx) {
+ return 0; // leftover points and/or conics
+ }
+
+ *this = std::move(tmp);
+ return buffer.pos();
+}
diff --git a/gfx/skia/skia/src/core/SkPicture.cpp b/gfx/skia/skia/src/core/SkPicture.cpp
new file mode 100644
index 0000000000..e041ba64f0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicture.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPicture.h"
+
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkPictureCommon.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPicturePlayback.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkPictureRecord.h"
+#include <atomic>
+
+// When we read/write the SkPictInfo via a stream, we have a sentinel byte right after the info.
+// Note: in the read/write buffer versions, we have a slightly different convention:
+// We have a sentinel int32_t:
+// 0 : failure
+// 1 : PictureData
+// <0 : -size of the custom data
+enum {
+ kFailure_TrailingStreamByteAfterPictInfo = 0, // nothing follows
+ kPictureData_TrailingStreamByteAfterPictInfo = 1, // SkPictureData follows
+ kCustom_TrailingStreamByteAfterPictInfo = 2, // -size32 follows
+};
+
+/* SkPicture impl. This handles generic responsibilities like unique IDs and serialization. */
+
+SkPicture::SkPicture() {
+ static std::atomic<uint32_t> nextID{1};
+ do {
+ fUniqueID = nextID.fetch_add(+1, std::memory_order_relaxed);
+ } while (fUniqueID == 0);
+}
+
+static const char kMagic[] = { 's', 'k', 'i', 'a', 'p', 'i', 'c', 't' };
+
+SkPictInfo SkPicture::createHeader() const {
+ SkPictInfo info;
+ // Copy magic bytes at the beginning of the header
+ static_assert(sizeof(kMagic) == 8, "");
+ static_assert(sizeof(kMagic) == sizeof(info.fMagic), "");
+ memcpy(info.fMagic, kMagic, sizeof(kMagic));
+
+ // Set picture info after magic bytes in the header
+ info.setVersion(SkPicturePriv::kCurrent_Version);
+ info.fCullRect = this->cullRect();
+ return info;
+}
+
+bool SkPicture::IsValidPictInfo(const SkPictInfo& info) {
+ if (0 != memcmp(info.fMagic, kMagic, sizeof(kMagic))) {
+ return false;
+ }
+ if (info.getVersion() < SkPicturePriv::kMin_Version ||
+ info.getVersion() > SkPicturePriv::kCurrent_Version) {
+ return false;
+ }
+ return true;
+}
+
+bool SkPicture::StreamIsSKP(SkStream* stream, SkPictInfo* pInfo) {
+ if (!stream) {
+ return false;
+ }
+
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (stream->read(&info.fMagic, sizeof(kMagic)) != sizeof(kMagic)) {
+ return false;
+ }
+
+ uint32_t version;
+ if (!stream->readU32(&version)) { return false; }
+ info.setVersion(version);
+ if (!stream->readScalar(&info.fCullRect.fLeft )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fTop )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fRight )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fBottom)) { return false; }
+ if (info.getVersion() < SkPicturePriv::kRemoveHeaderFlags_Version) {
+ if (!stream->readU32(nullptr)) { return false; }
+ }
+
+ if (!IsValidPictInfo(info)) { return false; }
+
+ if (pInfo) { *pInfo = info; }
+ return true;
+}
+bool SkPicture_StreamIsSKP(SkStream* stream, SkPictInfo* pInfo) {
+ return SkPicture::StreamIsSKP(stream, pInfo);
+}
+
+bool SkPicture::BufferIsSKP(SkReadBuffer* buffer, SkPictInfo* pInfo) {
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (!buffer->readByteArray(&info.fMagic, sizeof(kMagic))) {
+ return false;
+ }
+
+ info.setVersion(buffer->readUInt());
+ buffer->readRect(&info.fCullRect);
+ if (info.getVersion() < SkPicturePriv::kRemoveHeaderFlags_Version) {
+ (void)buffer->readUInt(); // used to be flags
+ }
+
+ if (IsValidPictInfo(info)) {
+ if (pInfo) { *pInfo = info; }
+ return true;
+ }
+ return false;
+}
+
+sk_sp<SkPicture> SkPicture::Forwardport(const SkPictInfo& info,
+ const SkPictureData* data,
+ SkReadBuffer* buffer) {
+ if (!data) {
+ return nullptr;
+ }
+ if (!data->opData()) {
+ return nullptr;
+ }
+ SkPicturePlayback playback(data);
+ SkPictureRecorder r;
+ playback.draw(r.beginRecording(info.fCullRect), nullptr/*no callback*/, buffer);
+ return r.finishRecordingAsPicture();
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, const SkDeserialProcs* procs) {
+ return MakeFromStream(stream, procs, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const void* data, size_t size,
+ const SkDeserialProcs* procs) {
+ if (!data) {
+ return nullptr;
+ }
+ SkMemoryStream stream(data, size);
+ return MakeFromStream(&stream, procs, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const SkData* data, const SkDeserialProcs* procs) {
+ if (!data) {
+ return nullptr;
+ }
+ SkMemoryStream stream(data->data(), data->size());
+ return MakeFromStream(&stream, procs, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, const SkDeserialProcs* procsPtr,
+ SkTypefacePlayback* typefaces) {
+ SkPictInfo info;
+ if (!StreamIsSKP(stream, &info)) {
+ return nullptr;
+ }
+
+ SkDeserialProcs procs;
+ if (procsPtr) {
+ procs = *procsPtr;
+ }
+
+ uint8_t trailingStreamByteAfterPictInfo;
+ if (!stream->readU8(&trailingStreamByteAfterPictInfo)) { return nullptr; }
+ switch (trailingStreamByteAfterPictInfo) {
+ case kPictureData_TrailingStreamByteAfterPictInfo: {
+ std::unique_ptr<SkPictureData> data(
+ SkPictureData::CreateFromStream(stream, info, procs, typefaces));
+ return Forwardport(info, data.get(), nullptr);
+ }
+ case kCustom_TrailingStreamByteAfterPictInfo: {
+ int32_t ssize;
+ if (!stream->readS32(&ssize) || ssize >= 0 || !procs.fPictureProc) {
+ return nullptr;
+ }
+ size_t size = sk_negate_to_size_t(ssize);
+ auto data = SkData::MakeUninitialized(size);
+ if (stream->read(data->writable_data(), size) != size) {
+ return nullptr;
+ }
+ return procs.fPictureProc(data->data(), size, procs.fPictureCtx);
+ }
+ default: // fall through to error return
+ break;
+ }
+ return nullptr;
+}
+
+sk_sp<SkPicture> SkPicturePriv::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkPictInfo info;
+ if (!SkPicture::BufferIsSKP(&buffer, &info)) {
+ return nullptr;
+ }
+ // size should be 0, 1, or negative
+ int32_t ssize = buffer.read32();
+ if (ssize < 0) {
+ const SkDeserialProcs& procs = buffer.getDeserialProcs();
+ if (!procs.fPictureProc) {
+ return nullptr;
+ }
+ size_t size = sk_negate_to_size_t(ssize);
+ return procs.fPictureProc(buffer.skip(size), size, procs.fPictureCtx);
+ }
+ if (ssize != 1) {
+ // 1 is the magic 'size' that means SkPictureData follows
+ return nullptr;
+ }
+ std::unique_ptr<SkPictureData> data(SkPictureData::CreateFromBuffer(buffer, info));
+ return SkPicture::Forwardport(info, data.get(), &buffer);
+}
+
+SkPictureData* SkPicture::backport() const {
+ SkPictInfo info = this->createHeader();
+ SkPictureRecord rec(info.fCullRect.roundOut(), 0/*flags*/);
+ rec.beginRecording();
+ this->playback(&rec);
+ rec.endRecording();
+ return new SkPictureData(rec, info);
+}
+
+void SkPicture::serialize(SkWStream* stream, const SkSerialProcs* procs) const {
+ this->serialize(stream, procs, nullptr);
+}
+
+sk_sp<SkData> SkPicture::serialize(const SkSerialProcs* procs) const {
+ SkDynamicMemoryWStream stream;
+ this->serialize(&stream, procs, nullptr);
+ return stream.detachAsData();
+}
+
+static sk_sp<SkData> custom_serialize(const SkPicture* picture, const SkSerialProcs& procs) {
+ if (procs.fPictureProc) {
+ auto data = procs.fPictureProc(const_cast<SkPicture*>(picture), procs.fPictureCtx);
+ if (data) {
+ size_t size = data->size();
+ if (!SkTFitsIn<int32_t>(size) || size <= 1) {
+ return SkData::MakeEmpty();
+ }
+ return data;
+ }
+ }
+ return nullptr;
+}
+
+static bool write_pad32(SkWStream* stream, const void* data, size_t size) {
+ if (!stream->write(data, size)) {
+ return false;
+ }
+ if (size & 3) {
+ uint32_t zero = 0;
+ return stream->write(&zero, 4 - (size & 3));
+ }
+ return true;
+}
+
+// Private serialize.
+// SkPictureData::serialize makes a first pass on all subpictures, indicatewd by textBlobsOnly=true,
+// to fill typefaceSet.
+void SkPicture::serialize(SkWStream* stream, const SkSerialProcs* procsPtr,
+ SkRefCntSet* typefaceSet, bool textBlobsOnly) const {
+ SkSerialProcs procs;
+ if (procsPtr) {
+ procs = *procsPtr;
+ }
+
+ SkPictInfo info = this->createHeader();
+ stream->write(&info, sizeof(info));
+
+ if (auto custom = custom_serialize(this, procs)) {
+ int32_t size = SkToS32(custom->size());
+ if (size == 0) {
+ stream->write8(kFailure_TrailingStreamByteAfterPictInfo);
+ return;
+ }
+ stream->write8(kCustom_TrailingStreamByteAfterPictInfo);
+ stream->write32(-size); // negative for custom format
+ write_pad32(stream, custom->data(), size);
+ return;
+ }
+
+ std::unique_ptr<SkPictureData> data(this->backport());
+ if (data) {
+ stream->write8(kPictureData_TrailingStreamByteAfterPictInfo);
+ data->serialize(stream, procs, typefaceSet, textBlobsOnly);
+ } else {
+ stream->write8(kFailure_TrailingStreamByteAfterPictInfo);
+ }
+}
+
+void SkPicturePriv::Flatten(const sk_sp<const SkPicture> picture, SkWriteBuffer& buffer) {
+ SkPictInfo info = picture->createHeader();
+ std::unique_ptr<SkPictureData> data(picture->backport());
+
+ buffer.writeByteArray(&info.fMagic, sizeof(info.fMagic));
+ buffer.writeUInt(info.getVersion());
+ buffer.writeRect(info.fCullRect);
+
+ if (auto custom = custom_serialize(picture.get(), buffer.fProcs)) {
+ int32_t size = SkToS32(custom->size());
+ buffer.write32(-size); // negative for custom format
+ buffer.writePad32(custom->data(), size);
+ return;
+ }
+
+ if (data) {
+ buffer.write32(1); // special size meaning SkPictureData
+ data->flatten(buffer);
+ } else {
+ buffer.write32(0); // signal no content
+ }
+}
+
+sk_sp<SkPicture> SkPicture::MakePlaceholder(SkRect cull) {
+ struct Placeholder : public SkPicture {
+ explicit Placeholder(SkRect cull) : fCull(cull) {}
+
+ void playback(SkCanvas*, AbortCallback*) const override { }
+
+ // approximateOpCount() needs to be greater than kMaxPictureOpsToUnrollInsteadOfRef
+ // in SkCanvas.cpp to avoid that unrolling. SK_MaxS32 can't not be big enough!
+ int approximateOpCount() const override { return SK_MaxS32; }
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ SkRect cullRect() const override { return fCull; }
+
+ SkRect fCull;
+ };
+ return sk_make_sp<Placeholder>(cull);
+}
diff --git a/gfx/skia/skia/src/core/SkPictureCommon.h b/gfx/skia/skia/src/core/SkPictureCommon.h
new file mode 100644
index 0000000000..2a10699ca6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureCommon.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureCommon_DEFINED
+#define SkPictureCommon_DEFINED
+
+// Some shared code used by both SkBigPicture and SkMiniPicture.
+// SkTextHunter -- SkRecord visitor that returns true when the op draws text.
+// SkPathCounter -- SkRecord visitor that counts paths that draw slowly on the GPU.
+
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkTLogic.h"
+#include "src/core/SkRecords.h"
+
+// TODO: might be nicer to have operator() return an int (the number of slow paths) ?
+struct SkPathCounter {
+ // Some ops have a paint, some have an optional paint. Either way, get back a pointer.
+ static const SkPaint* AsPtr(const SkPaint& p) { return &p; }
+ static const SkPaint* AsPtr(const SkRecords::Optional<SkPaint>& p) { return p; }
+
+ SkPathCounter() : fNumSlowPathsAndDashEffects(0) {}
+
+ void checkPaint(const SkPaint* paint) {
+ if (paint && paint->getPathEffect()) {
+ // Initially assume it's slow.
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+
+ void operator()(const SkRecords::DrawPoints& op) {
+ this->checkPaint(&op.paint);
+ const SkPathEffect* effect = op.paint.getPathEffect();
+ if (effect) {
+ SkPathEffect::DashInfo info;
+ SkPathEffect::DashType dashType = effect->asADash(&info);
+ if (2 == op.count && SkPaint::kRound_Cap != op.paint.getStrokeCap() &&
+ SkPathEffect::kDash_DashType == dashType && 2 == info.fCount) {
+ fNumSlowPathsAndDashEffects--;
+ }
+ }
+ }
+
+ void operator()(const SkRecords::DrawPath& op) {
+ this->checkPaint(&op.paint);
+ if (op.paint.isAntiAlias() && !op.path.isConvex()) {
+ SkPaint::Style paintStyle = op.paint.getStyle();
+ const SkRect& pathBounds = op.path.getBounds();
+ if (SkPaint::kStroke_Style == paintStyle &&
+ 0 == op.paint.getStrokeWidth()) {
+ // AA hairline concave path is not slow.
+ } else if (SkPaint::kFill_Style == paintStyle && pathBounds.width() < 64.f &&
+ pathBounds.height() < 64.f && !op.path.isVolatile()) {
+ // AADF eligible concave path is not slow.
+ } else {
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+ }
+
+ void operator()(const SkRecords::ClipPath& op) {
+ // TODO: does the SkRegion op matter?
+ if (op.opAA.aa() && !op.path.isConvex()) {
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+
+ void operator()(const SkRecords::SaveLayer& op) {
+ this->checkPaint(AsPtr(op.paint));
+ }
+
+ template <typename T>
+ SK_WHEN(T::kTags & SkRecords::kHasPaint_Tag, void) operator()(const T& op) {
+ this->checkPaint(AsPtr(op.paint));
+ }
+
+ template <typename T>
+ SK_WHEN(!(T::kTags & SkRecords::kHasPaint_Tag), void)
+ operator()(const T& op) { /* do nothing */ }
+
+ int fNumSlowPathsAndDashEffects;
+};
+
+sk_sp<SkImage> ImageDeserializer_SkDeserialImageProc(const void*, size_t, void* imagedeserializer);
+
+bool SkPicture_StreamIsSKP(SkStream*, SkPictInfo*);
+
+#endif // SkPictureCommon_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPictureData.cpp b/gfx/skia/skia/src/core/SkPictureData.cpp
new file mode 100644
index 0000000000..9fd2768d6e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.cpp
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPictureData.h"
+
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <new>
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#endif
+
+template <typename T> int SafeCount(const T* obj) {
+ return obj ? obj->count() : 0;
+}
+
+SkPictureData::SkPictureData(const SkPictInfo& info)
+ : fInfo(info) {}
+
+void SkPictureData::initForPlayback() const {
+ // ensure that the paths bounds are pre-computed
+ for (int i = 0; i < fPaths.count(); i++) {
+ fPaths[i].updateBoundsCache();
+ }
+}
+
+SkPictureData::SkPictureData(const SkPictureRecord& record,
+ const SkPictInfo& info)
+ : fPictures(record.getPictures())
+ , fDrawables(record.getDrawables())
+ , fTextBlobs(record.getTextBlobs())
+ , fVertices(record.getVertices())
+ , fImages(record.getImages())
+ , fInfo(info) {
+
+ fOpData = record.opData();
+
+ fPaints = record.fPaints;
+
+ fPaths.reset(record.fPaths.count());
+ record.fPaths.foreach([this](const SkPath& path, int n) {
+ // These indices are logically 1-based, but we need to serialize them
+ // 0-based to keep the deserializing SkPictureData::getPath() working.
+ fPaths[n-1] = path;
+ });
+
+ this->initForPlayback();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkStream.h"
+
+static size_t compute_chunk_size(SkFlattenable::Factory* array, int count) {
+ size_t size = 4; // for 'count'
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ size += SkWStream::SizeOfPackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ size += SkWStream::SizeOfPackedUInt(len);
+ size += len;
+ }
+ }
+
+ return size;
+}
+
+static void write_tag_size(SkWriteBuffer& buffer, uint32_t tag, size_t size) {
+ buffer.writeUInt(tag);
+ buffer.writeUInt(SkToU32(size));
+}
+
+static void write_tag_size(SkWStream* stream, uint32_t tag, size_t size) {
+ stream->write32(tag);
+ stream->write32(SkToU32(size));
+}
+
+void SkPictureData::WriteFactories(SkWStream* stream, const SkFactorySet& rec) {
+ int count = rec.count();
+
+ SkAutoSTMalloc<16, SkFlattenable::Factory> storage(count);
+ SkFlattenable::Factory* array = (SkFlattenable::Factory*)storage.get();
+ rec.copyToArray(array);
+
+ size_t size = compute_chunk_size(array, count);
+
+ // TODO: write_tag_size should really take a size_t
+ write_tag_size(stream, SK_PICT_FACTORY_TAG, (uint32_t) size);
+ SkDEBUGCODE(size_t start = stream->bytesWritten());
+ stream->write32(count);
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ stream->writePackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ stream->writePackedUInt(len);
+ stream->write(name, len);
+ }
+ }
+
+ SkASSERT(size == (stream->bytesWritten() - start));
+}
+
+void SkPictureData::WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec,
+ const SkSerialProcs& procs) {
+ int count = rec.count();
+
+ write_tag_size(stream, SK_PICT_TYPEFACE_TAG, count);
+
+ SkAutoSTMalloc<16, SkTypeface*> storage(count);
+ SkTypeface** array = (SkTypeface**)storage.get();
+ rec.copyToArray((SkRefCnt**)array);
+
+ for (int i = 0; i < count; i++) {
+ SkTypeface* tf = array[i];
+ if (procs.fTypefaceProc) {
+ auto data = procs.fTypefaceProc(tf, procs.fTypefaceCtx);
+ if (data) {
+ stream->write(data->data(), data->size());
+ continue;
+ }
+ }
+ array[i]->serialize(stream);
+ }
+}
+
+void SkPictureData::flattenToBuffer(SkWriteBuffer& buffer, bool textBlobsOnly) const {
+ int i, n;
+
+ if (!textBlobsOnly) {
+ if ((n = fPaints.count()) > 0) {
+ write_tag_size(buffer, SK_PICT_PAINT_BUFFER_TAG, n);
+ for (i = 0; i < n; i++) {
+ buffer.writePaint(fPaints[i]);
+ }
+ }
+
+ if ((n = fPaths.count()) > 0) {
+ write_tag_size(buffer, SK_PICT_PATH_BUFFER_TAG, n);
+ buffer.writeInt(n);
+ for (int i = 0; i < n; i++) {
+ buffer.writePath(fPaths[i]);
+ }
+ }
+ }
+
+ if (!fTextBlobs.empty()) {
+ write_tag_size(buffer, SK_PICT_TEXTBLOB_BUFFER_TAG, fTextBlobs.count());
+ for (const auto& blob : fTextBlobs) {
+ SkTextBlobPriv::Flatten(*blob, buffer);
+ }
+ }
+
+ if (!textBlobsOnly) {
+ if (!fVertices.empty()) {
+ write_tag_size(buffer, SK_PICT_VERTICES_BUFFER_TAG, fVertices.count());
+ for (const auto& vert : fVertices) {
+ buffer.writeDataAsByteArray(vert->encode().get());
+ }
+ }
+
+ if (!fImages.empty()) {
+ write_tag_size(buffer, SK_PICT_IMAGE_BUFFER_TAG, fImages.count());
+ for (const auto& img : fImages) {
+ buffer.writeImage(img.get());
+ }
+ }
+ }
+}
+
+// SkPictureData::serialize() will write out paints, and then write out an array of typefaces
+// (unique set). However, paint's serializer will respect SerialProcs, which can cause us to
+// call that custom typefaceproc on *every* typeface, not just on the unique ones. To avoid this,
+// we ignore the custom proc (here) when we serialize the paints, and then do respect it when
+// we serialize the typefaces.
+static SkSerialProcs skip_typeface_proc(const SkSerialProcs& procs) {
+ SkSerialProcs newProcs = procs;
+ newProcs.fTypefaceProc = nullptr;
+ newProcs.fTypefaceCtx = nullptr;
+ return newProcs;
+}
+
+// topLevelTypeFaceSet is null only on the top level call.
+// This method is called recursively on every subpicture in two passes.
+// textBlobsOnly serves to indicate that we are on the first pass and skip as much work as
+// possible that is not relevant to collecting text blobs in topLevelTypeFaceSet
+// TODO(nifong): dedupe typefaces and all other shared resources in a faster and more readable way.
+void SkPictureData::serialize(SkWStream* stream, const SkSerialProcs& procs,
+ SkRefCntSet* topLevelTypeFaceSet, bool textBlobsOnly) const {
+ // This can happen at pretty much any time, so might as well do it first.
+ write_tag_size(stream, SK_PICT_READER_TAG, fOpData->size());
+ stream->write(fOpData->bytes(), fOpData->size());
+
+ // We serialize all typefaces into the typeface section of the top-level picture.
+ SkRefCntSet localTypefaceSet;
+ SkRefCntSet* typefaceSet = topLevelTypeFaceSet ? topLevelTypeFaceSet : &localTypefaceSet;
+
+ // We delay serializing the bulk of our data until after we've serialized
+ // factories and typefaces by first serializing to an in-memory write buffer.
+ SkFactorySet factSet; // buffer refs factSet, so factSet must come first.
+ SkBinaryWriteBuffer buffer;
+ buffer.setFactoryRecorder(sk_ref_sp(&factSet));
+ buffer.setSerialProcs(skip_typeface_proc(procs));
+ buffer.setTypefaceRecorder(sk_ref_sp(typefaceSet));
+ this->flattenToBuffer(buffer, textBlobsOnly);
+
+ // Dummy serialize our sub-pictures for the side effect of filling typefaceSet
+ // with typefaces from sub-pictures.
+ struct DevNull: public SkWStream {
+ DevNull() : fBytesWritten(0) {}
+ size_t fBytesWritten;
+ bool write(const void*, size_t size) override { fBytesWritten += size; return true; }
+ size_t bytesWritten() const override { return fBytesWritten; }
+ } devnull;
+ for (const auto& pic : fPictures) {
+ pic->serialize(&devnull, nullptr, typefaceSet, /*textBlobsOnly=*/ true);
+ }
+ if (textBlobsOnly) { return; } // return early from dummy serialize
+
+ // We need to write factories before we write the buffer.
+ // We need to write typefaces before we write the buffer or any sub-picture.
+ WriteFactories(stream, factSet);
+ // Pass the original typefaceproc (if any) now that we're ready to actually serialize the
+ // typefaces. We skipped this proc before, when we were serializing paints, so that the
+ // paints would just write indices into our typeface set.
+ WriteTypefaces(stream, *typefaceSet, procs);
+
+ // Write the buffer.
+ write_tag_size(stream, SK_PICT_BUFFER_SIZE_TAG, buffer.bytesWritten());
+ buffer.writeToStream(stream);
+
+ // Write sub-pictures by calling serialize again.
+ if (!fPictures.empty()) {
+ write_tag_size(stream, SK_PICT_PICTURE_TAG, fPictures.count());
+ for (const auto& pic : fPictures) {
+ pic->serialize(stream, &procs, typefaceSet, /*textBlobsOnly=*/ false);
+ }
+ }
+
+ stream->write32(SK_PICT_EOF_TAG);
+}
+
+void SkPictureData::flatten(SkWriteBuffer& buffer) const {
+ write_tag_size(buffer, SK_PICT_READER_TAG, fOpData->size());
+ buffer.writeByteArray(fOpData->bytes(), fOpData->size());
+
+ if (!fPictures.empty()) {
+ write_tag_size(buffer, SK_PICT_PICTURE_TAG, fPictures.count());
+ for (const auto& pic : fPictures) {
+ SkPicturePriv::Flatten(pic, buffer);
+ }
+ }
+
+ if (!fDrawables.empty()) {
+ write_tag_size(buffer, SK_PICT_DRAWABLE_TAG, fDrawables.count());
+ for (const auto& draw : fDrawables) {
+ buffer.writeFlattenable(draw.get());
+ }
+ }
+
+ // Write this picture playback's data into a writebuffer
+ this->flattenToBuffer(buffer, false);
+ buffer.write32(SK_PICT_EOF_TAG);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPictureData::parseStreamTag(SkStream* stream,
+ uint32_t tag,
+ uint32_t size,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ switch (tag) {
+ case SK_PICT_READER_TAG:
+ SkASSERT(nullptr == fOpData);
+ fOpData = SkData::MakeFromStream(stream, size);
+ if (!fOpData) {
+ return false;
+ }
+ break;
+ case SK_PICT_FACTORY_TAG: {
+ if (!stream->readU32(&size)) { return false; }
+ fFactoryPlayback = skstd::make_unique<SkFactoryPlayback>(size);
+ for (size_t i = 0; i < size; i++) {
+ SkString str;
+ size_t len;
+ if (!stream->readPackedUInt(&len)) { return false; }
+ str.resize(len);
+ if (stream->read(str.writable_str(), len) != len) {
+ return false;
+ }
+ fFactoryPlayback->base()[i] = SkFlattenable::NameToFactory(str.c_str());
+ }
+ } break;
+ case SK_PICT_TYPEFACE_TAG: {
+ fTFPlayback.setCount(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ sk_sp<SkTypeface> tf(SkTypeface::MakeDeserialize(stream));
+ if (!tf.get()) { // failed to deserialize
+ // fTFPlayback asserts it never has a null, so we plop in
+ // the default here.
+ tf = SkTypeface::MakeDefault();
+ }
+ fTFPlayback[i] = std::move(tf);
+ }
+ } break;
+ case SK_PICT_PICTURE_TAG: {
+ SkASSERT(fPictures.empty());
+ fPictures.reserve(SkToInt(size));
+
+ for (uint32_t i = 0; i < size; i++) {
+ auto pic = SkPicture::MakeFromStream(stream, &procs, topLevelTFPlayback);
+ if (!pic) {
+ return false;
+ }
+ fPictures.push_back(std::move(pic));
+ }
+ } break;
+ case SK_PICT_BUFFER_SIZE_TAG: {
+ SkAutoMalloc storage(size);
+ if (stream->read(storage.get(), size) != size) {
+ return false;
+ }
+
+ SkReadBuffer buffer(storage.get(), size);
+ buffer.setVersion(fInfo.getVersion());
+
+ if (!fFactoryPlayback) {
+ return false;
+ }
+ fFactoryPlayback->setupBuffer(buffer);
+ buffer.setDeserialProcs(procs);
+
+ if (fTFPlayback.count() > 0) {
+ // .skp files <= v43 have typefaces serialized with each sub picture.
+ fTFPlayback.setupBuffer(buffer);
+ } else {
+ // Newer .skp files serialize all typefaces with the top picture.
+ topLevelTFPlayback->setupBuffer(buffer);
+ }
+
+ while (!buffer.eof() && buffer.isValid()) {
+ tag = buffer.readUInt();
+ size = buffer.readUInt();
+ this->parseBufferTag(buffer, tag, size);
+ }
+ if (!buffer.isValid()) {
+ return false;
+ }
+ } break;
+ }
+ return true; // success
+}
+
+static sk_sp<SkImage> create_image_from_buffer(SkReadBuffer& buffer) {
+ return buffer.readImage();
+}
+static sk_sp<SkVertices> create_vertices_from_buffer(SkReadBuffer& buffer) {
+ auto data = buffer.readByteArrayAsData();
+ return data ? SkVertices::Decode(data->data(), data->size()) : nullptr;
+}
+
+static sk_sp<SkDrawable> create_drawable_from_buffer(SkReadBuffer& buffer) {
+ return sk_sp<SkDrawable>((SkDrawable*)buffer.readFlattenable(SkFlattenable::kSkDrawable_Type));
+}
+
+// We need two types 'cause SkDrawable is const-variant.
+template <typename T, typename U>
+bool new_array_from_buffer(SkReadBuffer& buffer, uint32_t inCount,
+ SkTArray<sk_sp<T>>& array, sk_sp<U> (*factory)(SkReadBuffer&)) {
+ if (!buffer.validate(array.empty() && SkTFitsIn<int>(inCount))) {
+ return false;
+ }
+ if (0 == inCount) {
+ return true;
+ }
+
+ for (uint32_t i = 0; i < inCount; ++i) {
+ auto obj = factory(buffer);
+
+ if (!buffer.validate(obj != nullptr)) {
+ array.reset();
+ return false;
+ }
+
+ array.push_back(std::move(obj));
+ }
+
+ return true;
+}
+
+void SkPictureData::parseBufferTag(SkReadBuffer& buffer, uint32_t tag, uint32_t size) {
+ switch (tag) {
+ case SK_PICT_PAINT_BUFFER_TAG: {
+ if (!buffer.validate(SkTFitsIn<int>(size))) {
+ return;
+ }
+ const int count = SkToInt(size);
+
+ for (int i = 0; i < count; ++i) {
+ // Do we need to keep an array of fFonts for legacy draws?
+ if (!buffer.readPaint(&fPaints.push_back(), nullptr)) {
+ return;
+ }
+ }
+ } break;
+ case SK_PICT_PATH_BUFFER_TAG:
+ if (size > 0) {
+ const int count = buffer.readInt();
+ if (!buffer.validate(count >= 0)) {
+ return;
+ }
+ for (int i = 0; i < count; i++) {
+ buffer.readPath(&fPaths.push_back());
+ if (!buffer.isValid()) {
+ return;
+ }
+ }
+ } break;
+ case SK_PICT_TEXTBLOB_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fTextBlobs, SkTextBlobPriv::MakeFromBuffer);
+ break;
+ case SK_PICT_VERTICES_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fVertices, create_vertices_from_buffer);
+ break;
+ case SK_PICT_IMAGE_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fImages, create_image_from_buffer);
+ break;
+ case SK_PICT_READER_TAG: {
+ // Preflight check that we can initialize all data from the buffer
+ // before allocating it.
+ if (!buffer.validateCanReadN<uint8_t>(size)) {
+ return;
+ }
+ auto data(SkData::MakeUninitialized(size));
+ if (!buffer.readByteArray(data->writable_data(), size) ||
+ !buffer.validate(nullptr == fOpData)) {
+ return;
+ }
+ SkASSERT(nullptr == fOpData);
+ fOpData = std::move(data);
+ } break;
+ case SK_PICT_PICTURE_TAG:
+ new_array_from_buffer(buffer, size, fPictures, SkPicturePriv::MakeFromBuffer);
+ break;
+ case SK_PICT_DRAWABLE_TAG:
+ new_array_from_buffer(buffer, size, fDrawables, create_drawable_from_buffer);
+ break;
+ default:
+ buffer.validate(false); // The tag was invalid.
+ break;
+ }
+}
+
+SkPictureData* SkPictureData::CreateFromStream(SkStream* stream,
+ const SkPictInfo& info,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ std::unique_ptr<SkPictureData> data(new SkPictureData(info));
+ if (!topLevelTFPlayback) {
+ topLevelTFPlayback = &data->fTFPlayback;
+ }
+
+ if (!data->parseStream(stream, procs, topLevelTFPlayback)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+SkPictureData* SkPictureData::CreateFromBuffer(SkReadBuffer& buffer,
+ const SkPictInfo& info) {
+ std::unique_ptr<SkPictureData> data(new SkPictureData(info));
+ buffer.setVersion(info.getVersion());
+
+ if (!data->parseBuffer(buffer)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+bool SkPictureData::parseStream(SkStream* stream,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ for (;;) {
+ uint32_t tag;
+ if (!stream->readU32(&tag)) { return false; }
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+
+ uint32_t size;
+ if (!stream->readU32(&size)) { return false; }
+ if (!this->parseStreamTag(stream, tag, size, procs, topLevelTFPlayback)) {
+ return false; // we're invalid
+ }
+ }
+ return true;
+}
+
+bool SkPictureData::parseBuffer(SkReadBuffer& buffer) {
+ while (buffer.isValid()) {
+ uint32_t tag = buffer.readUInt();
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+ this->parseBufferTag(buffer, tag, buffer.readUInt());
+ }
+
+ // Check that we encountered required tags
+ if (!buffer.validate(this->opData() != nullptr)) {
+ // If we didn't build any opData, we are invalid. Even an EmptyPicture allocates the
+ // SkData for the ops (though its length may be zero).
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPictureData.h b/gfx/skia/skia/src/core/SkPictureData.h
new file mode 100644
index 0000000000..25c9a7acc4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureData_DEFINED
+#define SkPictureData_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPicture.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkPictureFlat.h"
+
+#include <memory>
+
+class SkData;
+class SkPictureRecord;
+class SkReader32;
+struct SkSerialProcs;
+class SkStream;
+class SkWStream;
+class SkBBoxHierarchy;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkReadBuffer;
+class SkTextBlob;
+
+struct SkPictInfo {
+ SkPictInfo() : fVersion(~0U) {}
+
+ uint32_t getVersion() const {
+ SkASSERT(fVersion != ~0U);
+ return fVersion;
+ }
+
+ void setVersion(uint32_t version) {
+ SkASSERT(version != ~0U);
+ fVersion = version;
+ }
+
+public:
+ char fMagic[8];
+private:
+ uint32_t fVersion;
+public:
+ SkRect fCullRect;
+};
+
+#define SK_PICT_READER_TAG SkSetFourByteTag('r', 'e', 'a', 'd')
+#define SK_PICT_FACTORY_TAG SkSetFourByteTag('f', 'a', 'c', 't')
+#define SK_PICT_TYPEFACE_TAG SkSetFourByteTag('t', 'p', 'f', 'c')
+#define SK_PICT_PICTURE_TAG SkSetFourByteTag('p', 'c', 't', 'r')
+#define SK_PICT_DRAWABLE_TAG SkSetFourByteTag('d', 'r', 'a', 'w')
+
+// This tag specifies the size of the ReadBuffer, needed for the following tags
+#define SK_PICT_BUFFER_SIZE_TAG SkSetFourByteTag('a', 'r', 'a', 'y')
+// these are all inside the ARRAYS tag
+#define SK_PICT_PAINT_BUFFER_TAG SkSetFourByteTag('p', 'n', 't', ' ')
+#define SK_PICT_PATH_BUFFER_TAG SkSetFourByteTag('p', 't', 'h', ' ')
+#define SK_PICT_TEXTBLOB_BUFFER_TAG SkSetFourByteTag('b', 'l', 'o', 'b')
+#define SK_PICT_VERTICES_BUFFER_TAG SkSetFourByteTag('v', 'e', 'r', 't')
+#define SK_PICT_IMAGE_BUFFER_TAG SkSetFourByteTag('i', 'm', 'a', 'g')
+
+// Always write this guy last (with no length field afterwards)
+#define SK_PICT_EOF_TAG SkSetFourByteTag('e', 'o', 'f', ' ')
+
+template <typename T>
+T* read_index_base_1_or_null(SkReadBuffer* reader, const SkTArray<sk_sp<T>>& array) {
+ int index = reader->readInt();
+ return reader->validate(index > 0 && index <= array.count()) ? array[index - 1].get() : nullptr;
+}
+
+class SkPictureData {
+public:
+ SkPictureData(const SkPictureRecord& record, const SkPictInfo&);
+ // Does not affect ownership of SkStream.
+ static SkPictureData* CreateFromStream(SkStream*,
+ const SkPictInfo&,
+ const SkDeserialProcs&,
+ SkTypefacePlayback*);
+ static SkPictureData* CreateFromBuffer(SkReadBuffer&, const SkPictInfo&);
+
+ void serialize(SkWStream*, const SkSerialProcs&, SkRefCntSet*, bool textBlobsOnly=false) const;
+ void flatten(SkWriteBuffer&) const;
+
+ const sk_sp<SkData>& opData() const { return fOpData; }
+
+protected:
+ explicit SkPictureData(const SkPictInfo& info);
+
+ // Does not affect ownership of SkStream.
+ bool parseStream(SkStream*, const SkDeserialProcs&, SkTypefacePlayback*);
+ bool parseBuffer(SkReadBuffer& buffer);
+
+public:
+ const SkImage* getImage(SkReadBuffer* reader) const {
+ // images are written base-0, unlike paths, pictures, drawables, etc.
+ const int index = reader->readInt();
+ return reader->validateIndex(index, fImages.count()) ? fImages[index].get() : nullptr;
+ }
+
+ const SkPath& getPath(SkReadBuffer* reader) const {
+ int index = reader->readInt();
+ return reader->validate(index > 0 && index <= fPaths.count()) ?
+ fPaths[index - 1] : fEmptyPath;
+ }
+
+ const SkPicture* getPicture(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fPictures);
+ }
+
+ SkDrawable* getDrawable(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fDrawables);
+ }
+
+ const SkPaint* getPaint(SkReadBuffer* reader) const {
+ int index = reader->readInt();
+ if (index == 0) {
+ return nullptr; // recorder wrote a zero for no paint (likely drawimage)
+ }
+ return reader->validate(index > 0 && index <= fPaints.count()) ?
+ &fPaints[index - 1] : nullptr;
+ }
+
+ const SkTextBlob* getTextBlob(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fTextBlobs);
+ }
+
+ const SkVertices* getVertices(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fVertices);
+ }
+
+private:
+ // these help us with reading/writing
+ // Does not affect ownership of SkStream.
+ bool parseStreamTag(SkStream*, uint32_t tag, uint32_t size,
+ const SkDeserialProcs&, SkTypefacePlayback*);
+ void parseBufferTag(SkReadBuffer&, uint32_t tag, uint32_t size);
+ void flattenToBuffer(SkWriteBuffer&, bool textBlobsOnly) const;
+
+ SkTArray<SkPaint> fPaints;
+ SkTArray<SkPath> fPaths;
+
+ sk_sp<SkData> fOpData; // opcodes and parameters
+
+ const SkPath fEmptyPath;
+ const SkBitmap fEmptyBitmap;
+
+ SkTArray<sk_sp<const SkPicture>> fPictures;
+ SkTArray<sk_sp<SkDrawable>> fDrawables;
+ SkTArray<sk_sp<const SkTextBlob>> fTextBlobs;
+ SkTArray<sk_sp<const SkVertices>> fVertices;
+ SkTArray<sk_sp<const SkImage>> fImages;
+
+ SkTypefacePlayback fTFPlayback;
+ std::unique_ptr<SkFactoryPlayback> fFactoryPlayback;
+
+ const SkPictInfo fInfo;
+
+ static void WriteFactories(SkWStream* stream, const SkFactorySet& rec);
+ static void WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec, const SkSerialProcs&);
+
+ void initForPlayback() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.cpp b/gfx/skia/skia/src/core/SkPictureFlat.cpp
new file mode 100644
index 0000000000..a15f2e703c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkChecksum.h"
+#include "src/core/SkPictureFlat.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTypefacePlayback::setCount(size_t count) {
+ fCount = count;
+ fArray.reset(new sk_sp<SkTypeface>[count]);
+}
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.h b/gfx/skia/skia/src/core/SkPictureFlat.h
new file mode 100644
index 0000000000..715d131f97
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPictureFlat_DEFINED
+#define SkPictureFlat_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/private/SkChecksum.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkWriteBuffer.h"
+
+/*
+ * Note: While adding new DrawTypes, it is necessary to add to the end of this list
+ * and update LAST_DRAWTYPE_ENUM to avoid having the code read older skps wrong.
+ * (which can cause segfaults)
+ *
+ * Reordering can be done during version updates.
+ */
+enum DrawType {
+ UNUSED,
+ CLIP_PATH,
+ CLIP_REGION,
+ CLIP_RECT,
+ CLIP_RRECT,
+ CONCAT,
+ DRAW_BITMAP_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_MATRIX_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_NINE_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_RECT_RETIRED_2016_REMOVED_2018,
+ DRAW_CLEAR,
+ DRAW_DATA,
+ DRAW_OVAL,
+ DRAW_PAINT,
+ DRAW_PATH,
+ DRAW_PICTURE,
+ DRAW_POINTS,
+ DRAW_POS_TEXT_REMOVED_1_2019,
+ DRAW_POS_TEXT_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_POS_TEXT_H_REMOVED_1_2019,
+ DRAW_POS_TEXT_H_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_RECT,
+ DRAW_RRECT,
+ DRAW_SPRITE_RETIRED_2015_REMOVED_2018,
+ DRAW_TEXT_REMOVED_1_2019,
+ DRAW_TEXT_ON_PATH_RETIRED_08_2018_REMOVED_10_2018,
+ DRAW_TEXT_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_VERTICES_RETIRED_03_2017_REMOVED_01_2018,
+ RESTORE,
+ ROTATE,
+ SAVE,
+ SAVE_LAYER_SAVEFLAGS_DEPRECATED,
+ SCALE,
+ SET_MATRIX,
+ SKEW,
+ TRANSLATE,
+ NOOP,
+ BEGIN_COMMENT_GROUP_obsolete,
+ COMMENT_obsolete,
+ END_COMMENT_GROUP_obsolete,
+
+ // new ops -- feel free to re-alphabetize on next version bump
+ DRAW_DRRECT,
+ PUSH_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+ POP_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+
+ DRAW_PATCH, // could not add in aphabetical order
+ DRAW_PICTURE_MATRIX_PAINT,
+ DRAW_TEXT_BLOB,
+ DRAW_IMAGE,
+ DRAW_IMAGE_RECT_STRICT_obsolete,
+ DRAW_ATLAS,
+ DRAW_IMAGE_NINE,
+ DRAW_IMAGE_RECT,
+
+ SAVE_LAYER_SAVELAYERFLAGS_DEPRECATED_JAN_2016_REMOVED_01_2018,
+ SAVE_LAYER_SAVELAYERREC,
+
+ DRAW_ANNOTATION,
+ DRAW_DRAWABLE,
+ DRAW_DRAWABLE_MATRIX,
+ DRAW_TEXT_RSXFORM_DEPRECATED_DEC_2018,
+
+ TRANSLATE_Z, // deprecated (M60)
+
+ DRAW_SHADOW_REC,
+ DRAW_IMAGE_LATTICE,
+ DRAW_ARC,
+ DRAW_REGION,
+ DRAW_VERTICES_OBJECT,
+
+ FLUSH,
+
+ DRAW_EDGEAA_IMAGE_SET,
+
+ SAVE_BEHIND,
+
+ DRAW_EDGEAA_QUAD,
+
+ DRAW_BEHIND_PAINT,
+
+ LAST_DRAWTYPE_ENUM = DRAW_BEHIND_PAINT,
+};
+
+enum DrawVertexFlags {
+ DRAW_VERTICES_HAS_TEXS = 0x01,
+ DRAW_VERTICES_HAS_COLORS = 0x02,
+ DRAW_VERTICES_HAS_INDICES = 0x04,
+ DRAW_VERTICES_HAS_XFER = 0x08,
+};
+
+enum DrawAtlasFlags {
+ DRAW_ATLAS_HAS_COLORS = 1 << 0,
+ DRAW_ATLAS_HAS_CULL = 1 << 1,
+};
+
+enum DrawTextRSXformFlags {
+ DRAW_TEXT_RSXFORM_HAS_CULL = 1 << 0,
+};
+
+enum SaveLayerRecFlatFlags {
+ SAVELAYERREC_HAS_BOUNDS = 1 << 0,
+ SAVELAYERREC_HAS_PAINT = 1 << 1,
+ SAVELAYERREC_HAS_BACKDROP = 1 << 2,
+ SAVELAYERREC_HAS_FLAGS = 1 << 3,
+ SAVELAYERREC_HAS_CLIPMASK = 1 << 4,
+ SAVELAYERREC_HAS_CLIPMATRIX = 1 << 5,
+};
+
+enum SaveBehindFlatFlags {
+ SAVEBEHIND_HAS_SUBSET = 1 << 0,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// clipparams are packed in 5 bits
+// doAA:1 | clipOp:4
+
+static inline uint32_t ClipParams_pack(SkClipOp op, bool doAA) {
+ unsigned doAABit = doAA ? 1 : 0;
+ return (doAABit << 4) | static_cast<int>(op);
+}
+
+template <typename T> T asValidEnum(SkReadBuffer* buffer, uint32_t candidate) {
+
+ if (buffer->validate(candidate <= static_cast<uint32_t>(T::kMax_EnumValue))) {
+ return static_cast<T>(candidate);
+ }
+
+ return T::kMax_EnumValue;
+}
+
+static inline SkClipOp ClipParams_unpackRegionOp(SkReadBuffer* buffer, uint32_t packed) {
+ return asValidEnum<SkClipOp>(buffer, packed & 0xF);
+}
+
+static inline bool ClipParams_unpackDoAA(uint32_t packed) {
+ return SkToBool((packed >> 4) & 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkTypefacePlayback {
+public:
+ SkTypefacePlayback() : fCount(0), fArray(nullptr) {}
+ ~SkTypefacePlayback() = default;
+
+ void setCount(size_t count);
+
+ size_t count() const { return fCount; }
+
+ sk_sp<SkTypeface>& operator[](size_t index) {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setTypefaceArray(fArray.get(), fCount);
+ }
+
+protected:
+ size_t fCount;
+ std::unique_ptr<sk_sp<SkTypeface>[]> fArray;
+};
+
+class SkFactoryPlayback {
+public:
+ SkFactoryPlayback(int count) : fCount(count) { fArray = new SkFlattenable::Factory[count]; }
+
+ ~SkFactoryPlayback() { delete[] fArray; }
+
+ SkFlattenable::Factory* base() const { return fArray; }
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setFactoryPlayback(fArray, fCount);
+ }
+
+private:
+ int fCount;
+ SkFlattenable::Factory* fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
new file mode 100644
index 0000000000..21987f10df
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkSurface.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTLazy.h"
+#include "src/image/SkImage_Base.h"
+
+class SkPictureImageGenerator : public SkImageGenerator {
+public:
+ SkPictureImageGenerator(const SkImageInfo& info, sk_sp<SkPicture>, const SkMatrix*,
+ const SkPaint*);
+
+protected:
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options& opts)
+ override;
+
+#if SK_SUPPORT_GPU
+ TexGenType onCanGenerateTexture() const override { return TexGenType::kExpensive; }
+ sk_sp<GrTextureProxy> onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ const SkIPoint&, bool willNeedMipMaps) override;
+#endif
+
+private:
+ sk_sp<SkPicture> fPicture;
+ SkMatrix fMatrix;
+ SkTLazy<SkPaint> fPaint;
+
+ typedef SkImageGenerator INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkImageGenerator>
+SkImageGenerator::MakeFromPicture(const SkISize& size, sk_sp<SkPicture> picture,
+ const SkMatrix* matrix, const SkPaint* paint,
+ SkImage::BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace) {
+ if (!picture || !colorSpace || size.isEmpty()) {
+ return nullptr;
+ }
+
+ SkColorType colorType = kN32_SkColorType;
+ if (SkImage::BitDepth::kF16 == bitDepth) {
+ colorType = kRGBA_F16_SkColorType;
+ }
+
+ SkImageInfo info =
+ SkImageInfo::Make(size, colorType, kPremul_SkAlphaType, std::move(colorSpace));
+ return std::unique_ptr<SkImageGenerator>(
+ new SkPictureImageGenerator(info, std::move(picture), matrix, paint));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPictureImageGenerator::SkPictureImageGenerator(const SkImageInfo& info, sk_sp<SkPicture> picture,
+ const SkMatrix* matrix, const SkPaint* paint)
+ : INHERITED(info)
+ , fPicture(std::move(picture)) {
+
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.reset();
+ }
+
+ if (paint) {
+ fPaint.set(*paint);
+ }
+}
+
+bool SkPictureImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& opts) {
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ std::unique_ptr<SkCanvas> canvas = SkCanvas::MakeRasterDirect(info, pixels, rowBytes, &props);
+ if (!canvas) {
+ return false;
+ }
+ canvas->clear(0);
+ canvas->drawPicture(fPicture, &fMatrix, fPaint.getMaybeNull());
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+sk_sp<GrTextureProxy> SkPictureImageGenerator::onGenerateTexture(
+ GrRecordingContext* ctx, const SkImageInfo& info,
+ const SkIPoint& origin, bool willNeedMipMaps) {
+ SkASSERT(ctx);
+
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+
+ // CONTEXT TODO: remove this use of 'backdoor' to create an SkSkSurface
+ sk_sp<SkSurface> surface(SkSurface::MakeRenderTarget(ctx->priv().backdoor(),
+ SkBudgeted::kYes, info, 0,
+ kTopLeft_GrSurfaceOrigin, &props,
+ willNeedMipMaps));
+ if (!surface) {
+ return nullptr;
+ }
+
+ SkMatrix matrix = fMatrix;
+ matrix.postTranslate(-origin.x(), -origin.y());
+ surface->getCanvas()->clear(0);
+ surface->getCanvas()->drawPicture(fPicture.get(), &matrix, fPaint.getMaybeNull());
+ sk_sp<SkImage> image(surface->makeImageSnapshot());
+ if (!image) {
+ return nullptr;
+ }
+ sk_sp<GrTextureProxy> proxy = as_IB(image)->asTextureProxyRef(ctx);
+ SkASSERT(!willNeedMipMaps || GrMipMapped::kYes == proxy->mipMapped());
+ return proxy;
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.cpp b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
new file mode 100644
index 0000000000..da793f0837
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPicturePlayback.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeMath.h"
+#include "src/utils/SkPatchUtils.h"
+
+// matches old SkCanvas::SaveFlags
+enum LegacySaveFlags {
+ kClipToLayer_LegacySaveFlags = 0x10,
+};
+
+SkCanvas::SaveLayerFlags SkCanvasPriv::LegacySaveFlagsToSaveLayerFlags(uint32_t flags) {
+ uint32_t layerFlags = 0;
+
+ if (0 == (flags & kClipToLayer_LegacySaveFlags)) {
+ layerFlags |= kDontClipToLayer_SaveLayerFlag;
+ }
+ return layerFlags;
+}
+
+/*
+ * Read the next op code and chunk size from 'reader'. The returned size
+ * is the entire size of the chunk (including the opcode). Thus, the
+ * offset just prior to calling ReadOpAndSize + 'size' is the offset
+ * to the next chunk's op code. This also means that the size of a chunk
+ * with no arguments (just an opcode) will be 4.
+ */
+DrawType SkPicturePlayback::ReadOpAndSize(SkReadBuffer* reader, uint32_t* size) {
+ uint32_t temp = reader->readInt();
+ uint32_t op;
+ if ((temp & 0xFF) == temp) {
+ // old skp file - no size information
+ op = temp;
+ *size = 0;
+ } else {
+ UNPACK_8_24(temp, op, *size);
+ if (MASK_24 == *size) {
+ *size = reader->readInt();
+ }
+ }
+ return (DrawType)op;
+}
+
+
+static const SkRect* get_rect_ptr(SkReadBuffer* reader, SkRect* storage) {
+ if (reader->readBool()) {
+ reader->readRect(storage);
+ return storage;
+ } else {
+ return nullptr;
+ }
+}
+
+void SkPicturePlayback::draw(SkCanvas* canvas,
+ SkPicture::AbortCallback* callback,
+ SkReadBuffer* buffer) {
+ AutoResetOpID aroi(this);
+ SkASSERT(0 == fCurOffset);
+
+ SkReadBuffer reader(fPictureData->opData()->bytes(),
+ fPictureData->opData()->size());
+
+ // Record this, so we can concat w/ it if we encounter a setMatrix()
+ SkMatrix initialMatrix = canvas->getTotalMatrix();
+
+ SkAutoCanvasRestore acr(canvas, false);
+
+ while (!reader.eof()) {
+ if (callback && callback->abort()) {
+ return;
+ }
+
+ fCurOffset = reader.offset();
+ uint32_t size;
+ DrawType op = ReadOpAndSize(&reader, &size);
+ if (!reader.validate(op > UNUSED && op <= LAST_DRAWTYPE_ENUM)) {
+ return;
+ }
+
+ this->handleOp(&reader, op, size, canvas, initialMatrix);
+ }
+
+ // need to propagate invalid state to the parent reader
+ if (buffer) {
+ buffer->validate(reader.isValid());
+ }
+}
+
+static void validate_offsetToRestore(SkReadBuffer* reader, size_t offsetToRestore) {
+ if (offsetToRestore) {
+ reader->validate(SkIsAlign4(offsetToRestore) && offsetToRestore >= reader->offset());
+ }
+}
+
+void SkPicturePlayback::handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkMatrix& initialMatrix) {
+#define BREAK_ON_READ_ERROR(r) if (!r->isValid()) break
+
+ switch (op) {
+ case NOOP: {
+ SkASSERT(size >= 4);
+ reader->skip(size - 4);
+ } break;
+ case FLUSH:
+ canvas->flush();
+ break;
+ case CLIP_PATH: {
+ const SkPath& path = fPictureData->getPath(reader);
+ uint32_t packed = reader->readInt();
+ SkClipOp clipOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clipPath(path, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_REGION: {
+ SkRegion region;
+ reader->readRegion(&region);
+ uint32_t packed = reader->readInt();
+ SkClipOp clipOp = ClipParams_unpackRegionOp(reader, packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clipRegion(region, clipOp);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RECT: {
+ SkRect rect;
+ reader->readRect(&rect);
+ uint32_t packed = reader->readInt();
+ SkClipOp clipOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clipRect(rect, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RRECT: {
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ uint32_t packed = reader->readInt();
+ SkClipOp clipOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clipRRect(rrect, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case PUSH_CULL: break; // Deprecated, safe to ignore both push and pop.
+ case POP_CULL: break;
+ case CONCAT: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->concat(matrix);
+ break;
+ }
+ case DRAW_ANNOTATION: {
+ SkRect rect;
+ reader->readRect(&rect);
+ SkString key;
+ reader->readString(&key);
+ sk_sp<SkData> data = reader->readByteArrayAsData();
+ BREAK_ON_READ_ERROR(reader);
+ SkASSERT(data);
+
+ canvas->drawAnnotation(rect, key.c_str(), data.get());
+ } break;
+ case DRAW_ARC: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ SkScalar startAngle = reader->readScalar();
+ SkScalar sweepAngle = reader->readScalar();
+ int useCenter = reader->readInt();
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawArc(rect, startAngle, sweepAngle, SkToBool(useCenter), *paint);
+ }
+ } break;
+ case DRAW_ATLAS: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* atlas = fPictureData->getImage(reader);
+ const uint32_t flags = reader->readUInt();
+ const int count = reader->readUInt();
+ const SkRSXform* xform = (const SkRSXform*)reader->skip(count, sizeof(SkRSXform));
+ const SkRect* tex = (const SkRect*)reader->skip(count, sizeof(SkRect));
+ const SkColor* colors = nullptr;
+ SkBlendMode mode = SkBlendMode::kDst;
+ if (flags & DRAW_ATLAS_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(count, sizeof(SkColor));
+ mode = (SkBlendMode)reader->readUInt();
+ }
+ const SkRect* cull = nullptr;
+ if (flags & DRAW_ATLAS_HAS_CULL) {
+ cull = (const SkRect*)reader->skip(sizeof(SkRect));
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawAtlas(atlas, xform, tex, colors, count, mode, cull, paint);
+ } break;
+ case DRAW_CLEAR: {
+ auto c = reader->readInt();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clear(c);
+ } break;
+ case DRAW_DATA: {
+ // This opcode is now dead, just need to skip it for backwards compatibility
+ size_t length = reader->readInt();
+ (void)reader->skip(length);
+ // skip handles padding the read out to a multiple of 4
+ } break;
+ case DRAW_DRAWABLE: {
+ auto* d = fPictureData->getDrawable(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawDrawable(d);
+ } break;
+ case DRAW_DRAWABLE_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ SkDrawable* drawable = fPictureData->getDrawable(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawDrawable(drawable, &matrix);
+ } break;
+ case DRAW_DRRECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRRect outer, inner;
+ reader->readRRect(&outer);
+ reader->readRRect(&inner);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawDRRect(outer, inner, *paint);
+ }
+ } break;
+ case DRAW_EDGEAA_QUAD: {
+ SkRect rect;
+ reader->readRect(&rect);
+ SkCanvas::QuadAAFlags aaFlags = static_cast<SkCanvas::QuadAAFlags>(reader->read32());
+ SkColor4f color;
+ if (reader->isVersionLT(SkPicturePriv::kEdgeAAQuadColor4f_Version)) {
+ // Old version stored color as 8888
+ color = SkColor4f::FromColor(reader->read32());
+ } else {
+ reader->readColor4f(&color);
+ }
+ SkBlendMode blend = static_cast<SkBlendMode>(reader->read32());
+ bool hasClip = reader->readInt();
+ SkPoint* clip = nullptr;
+ if (hasClip) {
+ clip = (SkPoint*) reader->skip(4, sizeof(SkPoint));
+ }
+ BREAK_ON_READ_ERROR(reader);
+ canvas->experimental_DrawEdgeAAQuad(rect, clip, aaFlags, color, blend);
+ } break;
+ case DRAW_EDGEAA_IMAGE_SET: {
+ static const size_t kEntryReadSize =
+ 4 * sizeof(uint32_t) + 2 * sizeof(SkRect) + sizeof(SkScalar);
+ static const size_t kMatrixSize = 9 * sizeof(SkScalar); // != sizeof(SkMatrix)
+
+ int cnt = reader->readInt();
+ if (!reader->validate(cnt >= 0)) {
+ break;
+ }
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkCanvas::SrcRectConstraint constraint =
+ static_cast<SkCanvas::SrcRectConstraint>(reader->readInt());
+
+ if (!reader->validate(SkSafeMath::Mul(cnt, kEntryReadSize) <= reader->available())) {
+ break;
+ }
+
+ // Track minimum necessary clip points and matrices that must be provided to satisfy
+ // the entries.
+ int expectedClips = 0;
+ int maxMatrixIndex = -1;
+ SkAutoTArray<SkCanvas::ImageSetEntry> set(cnt);
+ for (int i = 0; i < cnt && reader->isValid(); ++i) {
+ set[i].fImage = sk_ref_sp(fPictureData->getImage(reader));
+ reader->readRect(&set[i].fSrcRect);
+ reader->readRect(&set[i].fDstRect);
+ set[i].fMatrixIndex = reader->readInt();
+ set[i].fAlpha = reader->readScalar();
+ set[i].fAAFlags = reader->readUInt();
+ set[i].fHasClip = reader->readInt();
+
+ expectedClips += set[i].fHasClip ? 1 : 0;
+ if (set[i].fMatrixIndex > maxMatrixIndex) {
+ maxMatrixIndex = set[i].fMatrixIndex;
+ }
+ }
+
+ int dstClipCount = reader->readInt();
+ SkPoint* dstClips = nullptr;
+ if (!reader->validate(expectedClips <= dstClipCount)) {
+ // Entries request more dstClip points than are provided in the buffer
+ break;
+ } else if (dstClipCount > 0) {
+ dstClips = (SkPoint*) reader->skip(dstClipCount, sizeof(SkPoint));
+ if (dstClips == nullptr) {
+ // Not enough bytes remaining so the reader has been invalidated
+ break;
+ }
+ }
+ int matrixCount = reader->readInt();
+ if (!reader->validate((maxMatrixIndex + 1) <= matrixCount) ||
+ !reader->validate(
+ SkSafeMath::Mul(matrixCount, kMatrixSize) <= reader->available())) {
+ // Entries access out-of-bound matrix indices, given provided matrices or
+ // there aren't enough bytes to provide that many matrices
+ break;
+ }
+ SkTArray<SkMatrix> matrices(matrixCount);
+ for (int i = 0; i < matrixCount && reader->isValid(); ++i) {
+ reader->readMatrix(&matrices.push_back());
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->experimental_DrawEdgeAAImageSet(set.get(), cnt, dstClips, matrices.begin(),
+ paint, constraint);
+ } break;
+ case DRAW_IMAGE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkPoint loc;
+ reader->readPoint(&loc);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImage(image, loc.fX, loc.fY, paint);
+ } break;
+ case DRAW_IMAGE_LATTICE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkCanvas::Lattice lattice;
+ (void)SkCanvasPriv::ReadLattice(*reader, &lattice);
+ const SkRect* dst = reader->skipT<SkRect>();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageLattice(image, lattice, *dst, paint);
+ } break;
+ case DRAW_IMAGE_NINE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkIRect center;
+ reader->readIRect(&center);
+ SkRect dst;
+ reader->readRect(&dst);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageNine(image, center, dst, paint);
+ } break;
+ case DRAW_IMAGE_RECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkRect storage;
+ const SkRect* src = get_rect_ptr(reader, &storage); // may be null
+ SkRect dst;
+ reader->readRect(&dst); // required
+ // DRAW_IMAGE_RECT_STRICT assumes this constraint, and doesn't store it
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ if (DRAW_IMAGE_RECT == op) {
+ // newer op-code stores the constraint explicitly
+ constraint = (SkCanvas::SrcRectConstraint)reader->readInt();
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->legacy_drawImageRect(image, src, dst, paint, constraint);
+ } break;
+ case DRAW_OVAL: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawOval(rect, *paint);
+ }
+ } break;
+ case DRAW_PAINT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawPaint(*paint);
+ }
+ } break;
+ case DRAW_BEHIND_PAINT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ SkCanvasPriv::DrawBehind(canvas, *paint);
+ }
+ } break;
+ case DRAW_PATCH: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+
+ const SkPoint* cubics = (const SkPoint*)reader->skip(SkPatchUtils::kNumCtrlPts,
+ sizeof(SkPoint));
+ uint32_t flag = reader->readInt();
+ const SkColor* colors = nullptr;
+ if (flag & DRAW_VERTICES_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(SkPatchUtils::kNumCorners, sizeof(SkColor));
+ }
+ const SkPoint* texCoords = nullptr;
+ if (flag & DRAW_VERTICES_HAS_TEXS) {
+ texCoords = (const SkPoint*)reader->skip(SkPatchUtils::kNumCorners,
+ sizeof(SkPoint));
+ }
+ SkBlendMode bmode = SkBlendMode::kModulate;
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ unsigned mode = reader->readInt();
+ if (mode <= (unsigned)SkBlendMode::kLastMode) {
+ bmode = (SkBlendMode)mode;
+ }
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawPatch(cubics, colors, texCoords, bmode, *paint);
+ }
+ } break;
+ case DRAW_PATH: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const auto& path = fPictureData->getPath(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawPath(path, *paint);
+ }
+ } break;
+ case DRAW_PICTURE: {
+ const auto* pic = fPictureData->getPicture(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPicture(pic);
+ } break;
+ case DRAW_PICTURE_MATRIX_PAINT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ const SkPicture* pic = fPictureData->getPicture(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPicture(pic, &matrix, paint);
+ } break;
+ case DRAW_POINTS: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkCanvas::PointMode mode = (SkCanvas::PointMode)reader->readInt();
+ size_t count = reader->readInt();
+ const SkPoint* pts = (const SkPoint*)reader->skip(count, sizeof(SkPoint));
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawPoints(mode, count, pts, *paint);
+ }
+ } break;
+ case DRAW_RECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawRect(rect, *paint);
+ }
+ } break;
+ case DRAW_REGION: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRegion region;
+ reader->readRegion(&region);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawRegion(region, *paint);
+ }
+ } break;
+ case DRAW_RRECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawRRect(rrect, *paint);
+ }
+ } break;
+ case DRAW_SHADOW_REC: {
+ const auto& path = fPictureData->getPath(reader);
+ SkDrawShadowRec rec;
+ reader->readPoint3(&rec.fZPlaneParams);
+ reader->readPoint3(&rec.fLightPos);
+ rec.fLightRadius = reader->readScalar();
+ if (reader->isVersionLT(SkPicturePriv::kTwoColorDrawShadow_Version)) {
+ SkScalar ambientAlpha = reader->readScalar();
+ SkScalar spotAlpha = reader->readScalar();
+ SkColor color = reader->read32();
+ rec.fAmbientColor = SkColorSetA(color, SkColorGetA(color)*ambientAlpha);
+ rec.fSpotColor = SkColorSetA(color, SkColorGetA(color)*spotAlpha);
+ } else {
+ rec.fAmbientColor = reader->read32();
+ rec.fSpotColor = reader->read32();
+ }
+ rec.fFlags = reader->read32();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->private_draw_shadow_rec(path, rec);
+ } break;
+ case DRAW_TEXT_BLOB: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkTextBlob* blob = fPictureData->getTextBlob(reader);
+ SkScalar x = reader->readScalar();
+ SkScalar y = reader->readScalar();
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint) {
+ canvas->drawTextBlob(blob, x, y, *paint);
+ }
+ } break;
+ case DRAW_VERTICES_OBJECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkVertices* vertices = fPictureData->getVertices(reader);
+ const int boneCount = reader->readInt();
+ const SkVertices::Bone* bones = boneCount ?
+ (const SkVertices::Bone*) reader->skip(boneCount, sizeof(SkVertices::Bone)) :
+ nullptr;
+ SkBlendMode bmode = reader->read32LE(SkBlendMode::kLastMode);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (paint && vertices) {
+ canvas->drawVertices(vertices, bones, boneCount, bmode, *paint);
+ }
+ } break;
+ case RESTORE:
+ canvas->restore();
+ break;
+ case ROTATE: {
+ auto deg = reader->readScalar();
+ canvas->rotate(deg);
+ } break;
+ case SAVE:
+ canvas->save();
+ break;
+ case SAVE_BEHIND: {
+ uint32_t flags = reader->readInt();
+ const SkRect* subset = nullptr;
+ SkRect storage;
+ if (flags & SAVEBEHIND_HAS_SUBSET) {
+ reader->readRect(&storage);
+ subset = &storage;
+ }
+ SkCanvasPriv::SaveBehind(canvas, subset);
+ } break;
+ case SAVE_LAYER_SAVEFLAGS_DEPRECATED: {
+ SkRect storage;
+ const SkRect* boundsPtr = get_rect_ptr(reader, &storage);
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ auto flags = SkCanvasPriv::LegacySaveFlagsToSaveLayerFlags(reader->readInt());
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->saveLayer(SkCanvas::SaveLayerRec(boundsPtr, paint, flags));
+ } break;
+ case SAVE_LAYER_SAVELAYERREC: {
+ SkCanvas::SaveLayerRec rec(nullptr, nullptr, nullptr, nullptr, nullptr, 0);
+ SkMatrix clipMatrix;
+ const uint32_t flatFlags = reader->readInt();
+ SkRect bounds;
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ reader->readRect(&bounds);
+ rec.fBounds = &bounds;
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ rec.fPaint = fPictureData->getPaint(reader);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ if (const auto* paint = fPictureData->getPaint(reader)) {
+ rec.fBackdrop = paint->getImageFilter();
+ }
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ rec.fSaveLayerFlags = reader->readInt();
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMASK) {
+ rec.fClipMask = fPictureData->getImage(reader);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMATRIX) {
+ reader->readMatrix(&clipMatrix);
+ rec.fClipMatrix = &clipMatrix;
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->saveLayer(rec);
+ } break;
+ case SCALE: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->scale(sx, sy);
+ } break;
+ case SET_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ matrix.postConcat(initialMatrix);
+ canvas->setMatrix(matrix);
+ } break;
+ case SKEW: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->skew(sx, sy);
+ } break;
+ case TRANSLATE: {
+ SkScalar dx = reader->readScalar();
+ SkScalar dy = reader->readScalar();
+ canvas->translate(dx, dy);
+ } break;
+ default:
+ reader->validate(false); // unknown op
+ break;
+ }
+
+#undef BREAK_ON_READ_ERROR
+}
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.h b/gfx/skia/skia/src/core/SkPicturePlayback.h
new file mode 100644
index 0000000000..76f7cad6b9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicturePlayback_DEFINED
+#define SkPicturePlayback_DEFINED
+
+#include "src/core/SkPictureFlat.h"
+
+class SkBitmap;
+class SkCanvas;
+class SkPaint;
+class SkPictureData;
+
+// The basic picture playback class replays the provided picture into a canvas.
+class SkPicturePlayback final : SkNoncopyable {
+public:
+ SkPicturePlayback(const SkPictureData* data)
+ : fPictureData(data)
+ , fCurOffset(0) {
+ }
+
+ void draw(SkCanvas* canvas, SkPicture::AbortCallback*, SkReadBuffer* buffer);
+
+ // TODO: remove the curOp calls after cleaning up GrGatherDevice
+ // Return the ID of the operation currently being executed when playing
+ // back. 0 indicates no call is active.
+ size_t curOpID() const { return fCurOffset; }
+ void resetOpID() { fCurOffset = 0; }
+
+protected:
+ const SkPictureData* fPictureData;
+
+ // The offset of the current operation when within the draw method
+ size_t fCurOffset;
+
+ void handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkMatrix& initialMatrix);
+
+ static DrawType ReadOpAndSize(SkReadBuffer* reader, uint32_t* size);
+
+ class AutoResetOpID {
+ public:
+ AutoResetOpID(SkPicturePlayback* playback) : fPlayback(playback) { }
+ ~AutoResetOpID() {
+ if (fPlayback) {
+ fPlayback->resetOpID();
+ }
+ }
+
+ private:
+ SkPicturePlayback* fPlayback;
+ };
+
+private:
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPicturePriv.h b/gfx/skia/skia/src/core/SkPicturePriv.h
new file mode 100644
index 0000000000..7c95675ffb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePriv.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicturePriv_DEFINED
+#define SkPicturePriv_DEFINED
+
+#include "include/core/SkPicture.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkPicturePriv {
+public:
+ /**
+ * Recreate a picture that was serialized into a buffer. If the creation requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling SkPicture::MakeFromBuffer().
+ * @param buffer Serialized picture data.
+ * @return A new SkPicture representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkPicture> MakeFromBuffer(SkReadBuffer& buffer);
+
+ /**
+ * Serialize to a buffer.
+ */
+ static void Flatten(const sk_sp<const SkPicture> , SkWriteBuffer& buffer);
+
+ // Returns NULL if this is not an SkBigPicture.
+ static const SkBigPicture* AsSkBigPicture(const sk_sp<const SkPicture> picture) {
+ return picture->asSkBigPicture();
+ }
+
+ // V35: Store SkRect (rather then width & height) in header
+ // V36: Remove (obsolete) alphatype from SkColorTable
+ // V37: Added shadow only option to SkDropShadowImageFilter (last version to record CLEAR)
+ // V38: Added PictureResolution option to SkPictureImageFilter
+ // V39: Added FilterLevel option to SkPictureImageFilter
+ // V40: Remove UniqueID serialization from SkImageFilter.
+ // V41: Added serialization of SkBitmapSource's filterQuality parameter
+ // V42: Added a bool to SkPictureShader serialization to indicate did-we-serialize-a-picture?
+ // V43: Added DRAW_IMAGE and DRAW_IMAGE_RECT opt codes to serialized data
+ // V44: Move annotations from paint to drawAnnotation
+ // V45: Add invNormRotation to SkLightingShader.
+ // V46: Add drawTextRSXform
+ // V47: Add occluder rect to SkBlurMaskFilter
+ // V48: Read and write extended SkTextBlobs.
+ // V49: Gradients serialized as SkColor4f + SkColorSpace
+ // V50: SkXfermode -> SkBlendMode
+ // V51: more SkXfermode -> SkBlendMode
+ // V52: Remove SkTextBlob::fRunCount
+ // V53: SaveLayerRec clip mask
+ // V54: ComposeShader can use a Mode or a Lerp
+ // V55: Drop blendmode[] from MergeImageFilter
+ // V56: Add TileMode in SkBlurImageFilter.
+ // V57: Sweep tiling info.
+ // V58: No more 2pt conical flipping.
+ // V59: No more LocalSpace option on PictureImageFilter
+ // V60: Remove flags in picture header
+ // V61: Change SkDrawPictureRec to take two colors rather than two alphas
+ // V62: Don't negate size of custom encoded images (don't write origin x,y either)
+ // V63: Store image bounds (including origin) instead of just width/height to support subsets
+ // V64: Remove occluder feature from blur maskFilter
+ // V65: Float4 paint color
+ // V66: Add saveBehind
+ // V67: Blobs serialize fonts instead of paints
+ // V68: Paint doesn't serialize font-related stuff
+ // V69: Clean up duplicated and redundant SkImageFilter related enums
+ // V70: Image filters definitions hidden, registered names updated to include "Impl"
+ // V71: Unify erode and dilate image filters
+ // V72: SkColorFilter_Matrix domain (rgba vs. hsla)
+ // V73: Use SkColor4f in per-edge AA quad API
+
+ enum Version {
+ kTileModeInBlurImageFilter_Version = 56,
+ kTileInfoInSweepGradient_Version = 57,
+ k2PtConicalNoFlip_Version = 58,
+ kRemovePictureImageFilterLocalSpace = 59,
+ kRemoveHeaderFlags_Version = 60,
+ kTwoColorDrawShadow_Version = 61,
+ kDontNegateImageSize_Version = 62,
+ kStoreImageBounds_Version = 63,
+ kRemoveOccluderFromBlurMaskFilter = 64,
+ kFloat4PaintColor_Version = 65,
+ kSaveBehind_Version = 66,
+ kSerializeFonts_Version = 67,
+ kPaintDoesntSerializeFonts_Version = 68,
+ kCleanupImageFilterEnums_Version = 69,
+ kHideImageFilterImpls_Version = 70,
+ kUnifyErodeDilateImpls_Version = 71,
+ kMatrixColorFilterDomain_Version = 72,
+ kEdgeAAQuadColor4f_Version = 73,
+
+ // Only SKPs within the min/current picture version range (inclusive) can be read.
+ kMin_Version = kTileModeInBlurImageFilter_Version,
+ kCurrent_Version = kEdgeAAQuadColor4f_Version
+ };
+
+ static_assert(kMin_Version <= 62, "Remove kFontAxes_bad from SkFontDescriptor.cpp");
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.cpp b/gfx/skia/skia/src/core/SkPictureRecord.cpp
new file mode 100644
index 0000000000..46ba4a42d2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.cpp
@@ -0,0 +1,932 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPictureRecord.h"
+
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkTSearch.h"
+#include "src/image/SkImage_Base.h"
+#include "src/utils/SkPatchUtils.h"
+
+#define HEAP_BLOCK_SIZE 4096
+
+enum {
+ // just need a value that save or getSaveCount would never return
+ kNoInitialSave = -1,
+};
+
+// A lot of basic types get stored as a uint32_t: bools, ints, paint indices, etc.
+static int const kUInt32Size = 4;
+
+SkPictureRecord::SkPictureRecord(const SkIRect& dimensions, uint32_t flags)
+ : INHERITED(dimensions)
+ , fRecordFlags(flags)
+ , fInitialSaveCount(kNoInitialSave) {
+}
+
+SkPictureRecord::SkPictureRecord(const SkISize& dimensions, uint32_t flags)
+ : SkPictureRecord(SkIRect::MakeSize(dimensions), flags) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPictureRecord::onFlush() {
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(FLUSH, &size);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::willSave() {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+ this->recordSave();
+
+ this->INHERITED::willSave();
+}
+
+void SkPictureRecord::recordSave() {
+ // op only
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(SAVE, &size);
+
+ this->validate(initialOffset, size);
+}
+
+SkCanvas::SaveLayerStrategy SkPictureRecord::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+ this->recordSaveLayer(rec);
+
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ /* No need for a (potentially very big) layer which we don't actually need
+ at this time (and may not be able to afford since during record our
+ clip starts out the size of the picture, which is often much larger
+ than the size of the actual device we'll use during playback).
+ */
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkPictureRecord::onDoSaveBehind(const SkRect* subset) {
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+
+ size_t size = sizeof(kUInt32Size) + sizeof(uint32_t); // op + flags
+ uint32_t flags = 0;
+ if (subset) {
+ flags |= SAVEBEHIND_HAS_SUBSET;
+ size += sizeof(*subset);
+ }
+
+ size_t initialOffset = this->addDraw(SAVE_BEHIND, &size);
+ this->addInt(flags);
+ if (subset) {
+ this->addRect(*subset);
+ }
+
+ this->validate(initialOffset, size);
+ return false;
+}
+
+void SkPictureRecord::recordSaveLayer(const SaveLayerRec& rec) {
+ // op + flatflags
+ size_t size = 2 * kUInt32Size;
+ uint32_t flatFlags = 0;
+
+ if (rec.fBounds) {
+ flatFlags |= SAVELAYERREC_HAS_BOUNDS;
+ size += sizeof(*rec.fBounds);
+ }
+ if (rec.fPaint) {
+ flatFlags |= SAVELAYERREC_HAS_PAINT;
+ size += sizeof(uint32_t); // index
+ }
+ if (rec.fBackdrop) {
+ flatFlags |= SAVELAYERREC_HAS_BACKDROP;
+ size += sizeof(uint32_t); // (paint) index
+ }
+ if (rec.fSaveLayerFlags) {
+ flatFlags |= SAVELAYERREC_HAS_FLAGS;
+ size += sizeof(uint32_t);
+ }
+ if (rec.fClipMask) {
+ flatFlags |= SAVELAYERREC_HAS_CLIPMASK;
+ size += sizeof(uint32_t); // clip image index
+ }
+ if (rec.fClipMatrix) {
+ flatFlags |= SAVELAYERREC_HAS_CLIPMATRIX;
+ size += SkMatrixPriv::WriteToMemory(*rec.fClipMatrix, nullptr);
+ }
+
+ const size_t initialOffset = this->addDraw(SAVE_LAYER_SAVELAYERREC, &size);
+ this->addInt(flatFlags);
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ this->addRect(*rec.fBounds);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ this->addPaintPtr(rec.fPaint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ // overkill, but we didn't already track single flattenables, so using a paint for that
+ SkPaint paint;
+ paint.setImageFilter(sk_ref_sp(const_cast<SkImageFilter*>(rec.fBackdrop)));
+ this->addPaint(paint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ this->addInt(rec.fSaveLayerFlags);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMASK) {
+ this->addImage(rec.fClipMask);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMATRIX) {
+ this->addMatrix(*rec.fClipMatrix);
+ }
+ this->validate(initialOffset, size);
+}
+
+#ifdef SK_DEBUG
+/*
+ * Read the op code from 'offset' in 'writer' and extract the size too.
+ */
+static DrawType peek_op_and_size(SkWriter32* writer, size_t offset, uint32_t* size) {
+ uint32_t peek = writer->readTAt<uint32_t>(offset);
+
+ uint32_t op;
+ UNPACK_8_24(peek, op, *size);
+ if (MASK_24 == *size) {
+ // size required its own slot right after the op code
+ *size = writer->readTAt<uint32_t>(offset + kUInt32Size);
+ }
+ return (DrawType) op;
+}
+#endif//SK_DEBUG
+
+void SkPictureRecord::willRestore() {
+#if 0
+ SkASSERT(fRestoreOffsetStack.count() > 1);
+#endif
+
+ // check for underflow
+ if (fRestoreOffsetStack.count() == 0) {
+ return;
+ }
+
+ this->recordRestore();
+
+ fRestoreOffsetStack.pop();
+
+ this->INHERITED::willRestore();
+}
+
+void SkPictureRecord::recordRestore(bool fillInSkips) {
+ if (fillInSkips) {
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel((uint32_t)fWriter.bytesWritten());
+ }
+ size_t size = 1 * kUInt32Size; // RESTORE consists solely of 1 op code
+ size_t initialOffset = this->addDraw(RESTORE, &size);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordTranslate(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kTranslate_Mask == m.getType());
+
+ // op + dx + dy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(TRANSLATE, &size);
+ this->addScalar(m.getTranslateX());
+ this->addScalar(m.getTranslateY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordScale(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kScale_Mask == m.getType());
+
+ // op + sx + sy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(SCALE, &size);
+ this->addScalar(m.getScaleX());
+ this->addScalar(m.getScaleY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::didConcat(const SkMatrix& matrix) {
+ switch (matrix.getType()) {
+ case SkMatrix::kTranslate_Mask:
+ this->recordTranslate(matrix);
+ break;
+ case SkMatrix::kScale_Mask:
+ this->recordScale(matrix);
+ break;
+ default:
+ this->recordConcat(matrix);
+ break;
+ }
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkPictureRecord::recordConcat(const SkMatrix& matrix) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + SkMatrixPriv::WriteToMemory(matrix, nullptr);
+ size_t initialOffset = this->addDraw(CONCAT, &size);
+ this->addMatrix(matrix);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::didSetMatrix(const SkMatrix& matrix) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + SkMatrixPriv::WriteToMemory(matrix, nullptr);
+ size_t initialOffset = this->addDraw(SET_MATRIX, &size);
+ this->addMatrix(matrix);
+ this->validate(initialOffset, size);
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+static bool clipOpExpands(SkClipOp op) {
+ switch (op) {
+ case kUnion_SkClipOp:
+ case kXOR_SkClipOp:
+ case kReverseDifference_SkClipOp:
+ case kReplace_SkClipOp:
+ return true;
+ case kIntersect_SkClipOp:
+ case kDifference_SkClipOp:
+ return false;
+ default:
+ SkDEBUGFAIL("unknown clipop");
+ return false;
+ }
+}
+
+void SkPictureRecord::fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset) {
+ int32_t offset = fRestoreOffsetStack.top();
+ while (offset > 0) {
+ uint32_t peek = fWriter.readTAt<uint32_t>(offset);
+ fWriter.overwriteTAt(offset, restoreOffset);
+ offset = peek;
+ }
+
+#ifdef SK_DEBUG
+ // offset of 0 has been disabled, so we skip it
+ if (offset > 0) {
+ // assert that the final offset value points to a save verb
+ uint32_t opSize;
+ DrawType drawOp = peek_op_and_size(&fWriter, -offset, &opSize);
+ SkASSERT(SAVE == drawOp || SAVE_LAYER_SAVELAYERREC == drawOp);
+ }
+#endif
+}
+
+void SkPictureRecord::beginRecording() {
+ // we have to call this *after* our constructor, to ensure that it gets
+ // recorded. This is balanced by restoreToCount() call from endRecording,
+ // which in-turn calls our overridden restore(), so those get recorded too.
+ fInitialSaveCount = this->save();
+}
+
+void SkPictureRecord::endRecording() {
+ SkASSERT(kNoInitialSave != fInitialSaveCount);
+ this->restoreToCount(fInitialSaveCount);
+}
+
+size_t SkPictureRecord::recordRestoreOffsetPlaceholder(SkClipOp op) {
+ if (fRestoreOffsetStack.isEmpty()) {
+ return -1;
+ }
+
+ // The RestoreOffset field is initially filled with a placeholder
+ // value that points to the offset of the previous RestoreOffset
+ // in the current stack level, thus forming a linked list so that
+ // the restore offsets can be filled in when the corresponding
+ // restore command is recorded.
+ int32_t prevOffset = fRestoreOffsetStack.top();
+
+ if (clipOpExpands(op)) {
+ // Run back through any previous clip ops, and mark their offset to
+ // be 0, disabling their ability to trigger a jump-to-restore, otherwise
+ // they could hide this clips ability to expand the clip (i.e. go from
+ // empty to non-empty).
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel(0);
+
+ // Reset the pointer back to the previous clip so that subsequent
+ // restores don't overwrite the offsets we just cleared.
+ prevOffset = 0;
+ }
+
+ size_t offset = fWriter.bytesWritten();
+ this->addInt(prevOffset);
+ fRestoreOffsetStack.top() = SkToU32(offset);
+ return offset;
+}
+
+void SkPictureRecord::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRect(const SkRect& rect, SkClipOp op, bool doAA) {
+ // id + rect + clip params
+ size_t size = 1 * kUInt32Size + sizeof(rect) + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RECT, &size);
+ this->addRect(rect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = this->recordRestoreOffsetPlaceholder(op);
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRRect(const SkRRect& rrect, SkClipOp op, bool doAA) {
+ // op + rrect + clip params
+ size_t size = 1 * kUInt32Size + SkRRect::kSizeInMemory + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RRECT, &size);
+ this->addRRect(rrect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder(op);
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ int pathID = this->addPathToHeap(path);
+ this->recordClipPath(pathID, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipPath(int pathID, SkClipOp op, bool doAA) {
+ // op + path index + clip params
+ size_t size = 3 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_PATH, &size);
+ this->addInt(pathID);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder(op);
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipRegion(const SkRegion& region, SkClipOp op) {
+ this->recordClipRegion(region, op);
+ this->INHERITED::onClipRegion(region, op);
+}
+
+size_t SkPictureRecord::recordClipRegion(const SkRegion& region, SkClipOp op) {
+ // op + clip params + region
+ size_t size = 2 * kUInt32Size + region.writeToMemory(nullptr);
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_REGION, &size);
+ this->addRegion(region);
+ this->addInt(ClipParams_pack(op, false));
+ size_t offset = this->recordRestoreOffsetPlaceholder(op);
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onDrawPaint(const SkPaint& paint) {
+ // op + paint index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PAINT, &size);
+ this->addPaint(paint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawBehind(const SkPaint& paint) {
+ // logically the same as drawPaint, but with a diff enum
+ // op + paint index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_BEHIND_PAINT, &size);
+ this->addPaint(paint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ // op + paint index + mode + count + point data
+ size_t size = 4 * kUInt32Size + count * sizeof(SkPoint);
+ size_t initialOffset = this->addDraw(DRAW_POINTS, &size);
+ this->addPaint(paint);
+
+ this->addInt(mode);
+ this->addInt(SkToInt(count));
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(oval);
+ size_t initialOffset = this->addDraw(DRAW_OVAL, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ // op + paint index + rect + start + sweep + bool (as int)
+ size_t size = 2 * kUInt32Size + sizeof(oval) + sizeof(startAngle) + sizeof(sweepAngle) +
+ sizeof(int);
+ size_t initialOffset = this->addDraw(DRAW_ARC, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->addScalar(startAngle);
+ this->addScalar(sweepAngle);
+ this->addInt(useCenter);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(rect);
+ size_t initialOffset = this->addDraw(DRAW_RECT, &size);
+ this->addPaint(paint);
+ this->addRect(rect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ // op + paint index + region
+ size_t regionBytes = region.writeToMemory(nullptr);
+ size_t size = 2 * kUInt32Size + regionBytes;
+ size_t initialOffset = this->addDraw(DRAW_REGION, &size);
+ this->addPaint(paint);
+ fWriter.writeRegion(region);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ // op + paint index + rrect
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory;
+ size_t initialOffset = this->addDraw(DRAW_RRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(rrect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ // op + paint index + rrects
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory * 2;
+ size_t initialOffset = this->addDraw(DRAW_DRRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(outer);
+ this->addRRect(inner);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ // op + paint index + path index
+ size_t size = 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PATH, &size);
+ this->addPaint(paint);
+ this->addPath(path);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImage(const SkImage* image, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ // op + paint_index + image_index + x + y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(DRAW_IMAGE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addScalar(x);
+ this->addScalar(y);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ // id + paint_index + image_index + bool_for_src + constraint
+ size_t size = 5 * kUInt32Size;
+ if (src) {
+ size += sizeof(*src); // + rect
+ }
+ size += sizeof(dst); // + rect
+
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_RECT, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addRectPtr(src); // may be null
+ this->addRect(dst);
+ this->addInt(constraint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageNine(const SkImage* img, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ // id + paint_index + image_index + center + dst
+ size_t size = 3 * kUInt32Size + sizeof(SkIRect) + sizeof(SkRect);
+
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_NINE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(img);
+ this->addIRect(center);
+ this->addRect(dst);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ size_t latticeSize = SkCanvasPriv::WriteLattice(nullptr, lattice);
+ // op + paint index + image index + lattice + dst rect
+ size_t size = 3 * kUInt32Size + latticeSize + sizeof(dst);
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_LATTICE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ (void)SkCanvasPriv::WriteLattice(fWriter.reservePad(latticeSize), lattice);
+ this->addRect(dst);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+
+ // op + paint index + blob index + x/y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(DRAW_TEXT_BLOB, &size);
+
+ this->addPaint(paint);
+ this->addTextBlob(blob);
+ this->addScalar(x);
+ this->addScalar(y);
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ // op + picture index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix && nullptr == paint) {
+ initialOffset = this->addDraw(DRAW_PICTURE, &size);
+ this->addPicture(picture);
+ } else {
+ const SkMatrix& m = matrix ? *matrix : SkMatrix::I();
+ size += SkMatrixPriv::WriteToMemory(m, nullptr) + kUInt32Size; // matrix + paint
+ initialOffset = this->addDraw(DRAW_PICTURE_MATRIX_PAINT, &size);
+ this->addPaintPtr(paint);
+ this->addMatrix(m);
+ this->addPicture(picture);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // op + drawable index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix) {
+ initialOffset = this->addDraw(DRAW_DRAWABLE, &size);
+ this->addDrawable(drawable);
+ } else {
+ size += SkMatrixPriv::WriteToMemory(*matrix, nullptr); // matrix
+ initialOffset = this->addDraw(DRAW_DRAWABLE_MATRIX, &size);
+ this->addMatrix(*matrix);
+ this->addDrawable(drawable);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawVerticesObject(const SkVertices* vertices,
+ const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode mode, const SkPaint& paint) {
+ // op + paint index + vertices index + number of bones + bone matrices + mode
+ size_t size = 5 * kUInt32Size + boneCount * sizeof(SkVertices::Bone);
+ size_t initialOffset = this->addDraw(DRAW_VERTICES_OBJECT, &size);
+
+ this->addPaint(paint);
+ this->addVertices(vertices);
+ this->addInt(boneCount);
+ fWriter.write(bones, boneCount * sizeof(SkVertices::Bone));
+ this->addInt(static_cast<uint32_t>(mode));
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ // op + paint index + patch 12 control points + flag + patch 4 colors + 4 texture coordinates
+ size_t size = 2 * kUInt32Size + SkPatchUtils::kNumCtrlPts * sizeof(SkPoint) + kUInt32Size;
+ uint32_t flag = 0;
+ if (colors) {
+ flag |= DRAW_VERTICES_HAS_COLORS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkColor);
+ }
+ if (texCoords) {
+ flag |= DRAW_VERTICES_HAS_TEXS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkPoint);
+ }
+ if (SkBlendMode::kModulate != bmode) {
+ flag |= DRAW_VERTICES_HAS_XFER;
+ size += kUInt32Size;
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_PATCH, &size);
+ this->addPaint(paint);
+ this->addPatch(cubics);
+ this->addInt(flag);
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, SkPatchUtils::kNumCorners * sizeof(SkColor));
+ }
+ if (texCoords) {
+ fWriter.write(texCoords, SkPatchUtils::kNumCorners * sizeof(SkPoint));
+ }
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ this->addInt((int)bmode);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ // [op + paint-index + atlas-index + flags + count] + [xform] + [tex] + [*colors + mode] + cull
+ size_t size = 5 * kUInt32Size + count * sizeof(SkRSXform) + count * sizeof(SkRect);
+ uint32_t flags = 0;
+ if (colors) {
+ flags |= DRAW_ATLAS_HAS_COLORS;
+ size += count * sizeof(SkColor);
+ size += sizeof(uint32_t); // xfermode::mode
+ }
+ if (cull) {
+ flags |= DRAW_ATLAS_HAS_CULL;
+ size += sizeof(SkRect);
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_ATLAS, &size);
+ this->addPaintPtr(paint);
+ this->addImage(atlas);
+ this->addInt(flags);
+ this->addInt(count);
+ fWriter.write(xform, count * sizeof(SkRSXform));
+ fWriter.write(tex, count * sizeof(SkRect));
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, count * sizeof(SkColor));
+ this->addInt((int)mode);
+ }
+ if (cull) {
+ fWriter.write(cull, sizeof(SkRect));
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ // op + path index + zParams + lightPos + lightRadius + spot/ambient alphas + color + flags
+ size_t size = 2 * kUInt32Size + 2 * sizeof(SkPoint3) + 1 * sizeof(SkScalar) + 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_SHADOW_REC, &size);
+
+ this->addPath(path);
+
+ fWriter.writePoint3(rec.fZPlaneParams);
+ fWriter.writePoint3(rec.fLightPos);
+ fWriter.writeScalar(rec.fLightRadius);
+ fWriter.write32(rec.fAmbientColor);
+ fWriter.write32(rec.fSpotColor);
+ fWriter.write32(rec.fFlags);
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ size_t keyLen = fWriter.WriteStringSize(key);
+ size_t valueLen = fWriter.WriteDataSize(value);
+ size_t size = 4 + sizeof(SkRect) + keyLen + valueLen;
+
+ size_t initialOffset = this->addDraw(DRAW_ANNOTATION, &size);
+ this->addRect(rect);
+ fWriter.writeString(key);
+ fWriter.writeData(value);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aa, const SkColor4f& color,
+ SkBlendMode mode) {
+
+ // op + rect + aa flags + color + mode + hasClip(as int) + clipCount*points
+ size_t size = 4 * kUInt32Size + sizeof(SkColor4f) + sizeof(rect) +
+ (clip ? 4 : 0) * sizeof(SkPoint);
+ size_t initialOffset = this->addDraw(DRAW_EDGEAA_QUAD, &size);
+ this->addRect(rect);
+ this->addInt((int) aa);
+ fWriter.write(&color, sizeof(SkColor4f));
+ this->addInt((int) mode);
+ this->addInt(clip != nullptr);
+ if (clip) {
+ this->addPoints(clip, 4);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawEdgeAAImageSet(const SkCanvas::ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ static constexpr size_t kMatrixSize = 9 * sizeof(SkScalar); // *not* sizeof(SkMatrix)
+ // op + count + paint + constraint + (image index, src rect, dst rect, alpha, aa flags,
+ // hasClip(int), matrixIndex) * cnt + totalClipCount + dstClips + totalMatrixCount + matrices
+ int totalDstClipCount, totalMatrixCount;
+ SkCanvasPriv::GetDstClipAndMatrixCounts(set, count, &totalDstClipCount, &totalMatrixCount);
+
+ size_t size = 6 * kUInt32Size + sizeof(SkPoint) * totalDstClipCount +
+ kMatrixSize * totalMatrixCount +
+ (4 * kUInt32Size + 2 * sizeof(SkRect) + sizeof(SkScalar)) * count;
+ size_t initialOffset = this->addDraw(DRAW_EDGEAA_IMAGE_SET, &size);
+ this->addInt(count);
+ this->addPaintPtr(paint);
+ this->addInt((int) constraint);
+ for (int i = 0; i < count; ++i) {
+ this->addImage(set[i].fImage.get());
+ this->addRect(set[i].fSrcRect);
+ this->addRect(set[i].fDstRect);
+ this->addInt(set[i].fMatrixIndex);
+ this->addScalar(set[i].fAlpha);
+ this->addInt((int)set[i].fAAFlags);
+ this->addInt(set[i].fHasClip);
+ }
+ this->addInt(totalDstClipCount);
+ this->addPoints(dstClips, totalDstClipCount);
+ this->addInt(totalMatrixCount);
+ for (int i = 0; i < totalMatrixCount; ++i) {
+ this->addMatrix(preViewMatrices[i]);
+ }
+ this->validate(initialOffset, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// De-duping helper.
+
+template <typename T>
+static bool equals(T* a, T* b) { return a->uniqueID() == b->uniqueID(); }
+
+template <>
+bool equals(SkDrawable* a, SkDrawable* b) {
+ // SkDrawable's generationID is not a stable unique identifier.
+ return a == b;
+}
+
+template <typename T>
+static int find_or_append(SkTArray<sk_sp<T>>& array, T* obj) {
+ for (int i = 0; i < array.count(); i++) {
+ if (equals(array[i].get(), obj)) {
+ return i;
+ }
+ }
+
+ array.push_back(sk_ref_sp(obj));
+
+ return array.count() - 1;
+}
+
+sk_sp<SkSurface> SkPictureRecord::onNewSurface(const SkImageInfo& info, const SkSurfaceProps&) {
+ return nullptr;
+}
+
+void SkPictureRecord::addImage(const SkImage* image) {
+ // convention for images is 0-based index
+ this->addInt(find_or_append(fImages, image));
+}
+
+void SkPictureRecord::addMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkPictureRecord::addPaintPtr(const SkPaint* paint) {
+ if (paint) {
+ fPaints.push_back(*paint);
+ this->addInt(fPaints.count());
+ } else {
+ this->addInt(0);
+ }
+}
+
+int SkPictureRecord::addPathToHeap(const SkPath& path) {
+ if (int* n = fPaths.find(path)) {
+ return *n;
+ }
+ int n = fPaths.count() + 1; // 0 is reserved for null / error.
+ fPaths.set(path, n);
+ return n;
+}
+
+void SkPictureRecord::addPath(const SkPath& path) {
+ this->addInt(this->addPathToHeap(path));
+}
+
+void SkPictureRecord::addPatch(const SkPoint cubics[12]) {
+ fWriter.write(cubics, SkPatchUtils::kNumCtrlPts * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addPicture(const SkPicture* picture) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fPictures, picture) + 1);
+}
+
+void SkPictureRecord::addDrawable(SkDrawable* drawable) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fDrawables, drawable) + 1);
+}
+
+void SkPictureRecord::addPoint(const SkPoint& point) {
+ fWriter.writePoint(point);
+}
+
+void SkPictureRecord::addPoints(const SkPoint pts[], int count) {
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addNoOp() {
+ size_t size = kUInt32Size; // op
+ this->addDraw(NOOP, &size);
+}
+
+void SkPictureRecord::addRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkPictureRecord::addRectPtr(const SkRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ fWriter.writeRect(*rect);
+ }
+}
+
+void SkPictureRecord::addIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(rect));
+}
+
+void SkPictureRecord::addIRectPtr(const SkIRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ *(SkIRect*)fWriter.reserve(sizeof(SkIRect)) = *rect;
+ }
+}
+
+void SkPictureRecord::addRRect(const SkRRect& rrect) {
+ fWriter.writeRRect(rrect);
+}
+
+void SkPictureRecord::addRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkPictureRecord::addText(const void* text, size_t byteLength) {
+ addInt(SkToInt(byteLength));
+ fWriter.writePad(text, byteLength);
+}
+
+void SkPictureRecord::addTextBlob(const SkTextBlob* blob) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fTextBlobs, blob) + 1);
+}
+
+void SkPictureRecord::addVertices(const SkVertices* vertices) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fVertices, vertices) + 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.h b/gfx/skia/skia/src/core/SkPictureRecord.h
new file mode 100644
index 0000000000..b83eec2893
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecord_DEFINED
+#define SkPictureRecord_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTHash.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkWriter32.h"
+
+// These macros help with packing and unpacking a single byte value and
+// a 3 byte value into/out of a uint32_t
+#define MASK_24 0x00FFFFFF
+#define UNPACK_8_24(combined, small, large) \
+ small = (combined >> 24) & 0xFF; \
+ large = combined & MASK_24
+#define PACK_8_24(small, large) ((small << 24) | large)
+
+
+class SkPictureRecord : public SkCanvasVirtualEnforcer<SkCanvas> {
+public:
+ SkPictureRecord(const SkISize& dimensions, uint32_t recordFlags);
+
+ SkPictureRecord(const SkIRect& dimensions, uint32_t recordFlags);
+
+ const SkTArray<sk_sp<const SkPicture>>& getPictures() const {
+ return fPictures;
+ }
+
+ const SkTArray<sk_sp<SkDrawable>>& getDrawables() const {
+ return fDrawables;
+ }
+
+ const SkTArray<sk_sp<const SkTextBlob>>& getTextBlobs() const {
+ return fTextBlobs;
+ }
+
+ const SkTArray<sk_sp<const SkVertices>>& getVertices() const {
+ return fVertices;
+ }
+
+ const SkTArray<sk_sp<const SkImage>>& getImages() const {
+ return fImages;
+ }
+
+ sk_sp<SkData> opData() const {
+ this->validate(fWriter.bytesWritten(), 0);
+
+ if (fWriter.bytesWritten() == 0) {
+ return SkData::MakeEmpty();
+ }
+ return fWriter.snapshotAsData();
+ }
+
+ void setFlags(uint32_t recordFlags) {
+ fRecordFlags = recordFlags;
+ }
+
+ const SkWriter32& writeStream() const {
+ return fWriter;
+ }
+
+ void beginRecording();
+ void endRecording();
+
+protected:
+ void addNoOp();
+
+private:
+ void handleOptimization(int opt);
+ size_t recordRestoreOffsetPlaceholder(SkClipOp);
+ void fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset);
+
+ SkTDArray<int32_t> fRestoreOffsetStack;
+
+ SkTDArray<uint32_t> fCullOffsetStack;
+
+ /*
+ * Write the 'drawType' operation and chunk size to the skp. 'size'
+ * can potentially be increased if the chunk size needs its own storage
+ * location (i.e., it overflows 24 bits).
+ * Returns the start offset of the chunk. This is the location at which
+ * the opcode & size are stored.
+ * TODO: since we are handing the size into here we could call reserve
+ * and then return a pointer to the memory storage. This could decrease
+ * allocation overhead but could lead to more wasted space (the tail
+ * end of blocks could go unused). Possibly add a second addDraw that
+ * operates in this manner.
+ */
+ size_t addDraw(DrawType drawType, size_t* size) {
+ size_t offset = fWriter.bytesWritten();
+
+ this->predrawNotify();
+
+ SkASSERT(0 != *size);
+ SkASSERT(((uint8_t) drawType) == drawType);
+
+ if (0 != (*size & ~MASK_24) || *size == MASK_24) {
+ fWriter.writeInt(PACK_8_24(drawType, MASK_24));
+ *size += 1;
+ fWriter.writeInt(SkToU32(*size));
+ } else {
+ fWriter.writeInt(PACK_8_24(drawType, SkToU32(*size)));
+ }
+
+ return offset;
+ }
+
+ void addInt(int value) {
+ fWriter.writeInt(value);
+ }
+ void addScalar(SkScalar scalar) {
+ fWriter.writeScalar(scalar);
+ }
+
+ void addImage(const SkImage*);
+ void addMatrix(const SkMatrix& matrix);
+ void addPaint(const SkPaint& paint) { this->addPaintPtr(&paint); }
+ void addPaintPtr(const SkPaint* paint);
+ void addPatch(const SkPoint cubics[12]);
+ void addPath(const SkPath& path);
+ void addPicture(const SkPicture* picture);
+ void addDrawable(SkDrawable* picture);
+ void addPoint(const SkPoint& point);
+ void addPoints(const SkPoint pts[], int count);
+ void addRect(const SkRect& rect);
+ void addRectPtr(const SkRect* rect);
+ void addIRect(const SkIRect& rect);
+ void addIRectPtr(const SkIRect* rect);
+ void addRRect(const SkRRect&);
+ void addRegion(const SkRegion& region);
+ void addText(const void* text, size_t byteLength);
+ void addTextBlob(const SkTextBlob* blob);
+ void addVertices(const SkVertices*);
+
+ int find(const SkBitmap& bitmap);
+
+protected:
+ void validate(size_t initialOffset, size_t size) const {
+ SkASSERT(fWriter.bytesWritten() == initialOffset + size);
+ }
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ bool onPeekPixels(SkPixmap*) override { return false; }
+
+ void onFlush() override;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode, const SkPaint& paint) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkRect*, const SkPaint*) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkPaint*, SrcRectConstraint) override;
+
+ int addPathToHeap(const SkPath& path); // does not write to ops stream
+
+ // These entry points allow the writing of matrices, clips, saves &
+ // restores to be deferred (e.g., if the MC state is being collapsed and
+ // only written out as needed).
+ void recordConcat(const SkMatrix& matrix);
+ void recordTranslate(const SkMatrix& matrix);
+ void recordScale(const SkMatrix& matrix);
+ size_t recordClipRect(const SkRect& rect, SkClipOp op, bool doAA);
+ size_t recordClipRRect(const SkRRect& rrect, SkClipOp op, bool doAA);
+ size_t recordClipPath(int pathID, SkClipOp op, bool doAA);
+ size_t recordClipRegion(const SkRegion& region, SkClipOp op);
+ void recordSave();
+ void recordSaveLayer(const SaveLayerRec&);
+ void recordRestore(bool fillInSkips = true);
+
+ // SHOULD NEVER BE CALLED
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override {
+ SK_ABORT("not reached");
+ }
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override {
+ SK_ABORT("not reached");
+ }
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override {
+ SK_ABORT("not reached");
+ }
+ void onDrawBitmapLattice(const SkBitmap&, const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override {
+ SK_ABORT("not reached");
+ }
+
+private:
+ SkTArray<SkPaint> fPaints;
+
+ struct PathHash {
+ uint32_t operator()(const SkPath& p) { return p.getGenerationID(); }
+ };
+ SkTHashMap<SkPath, int, PathHash> fPaths;
+
+ SkWriter32 fWriter;
+
+ SkTArray<sk_sp<const SkImage>> fImages;
+ SkTArray<sk_sp<const SkPicture>> fPictures;
+ SkTArray<sk_sp<SkDrawable>> fDrawables;
+ SkTArray<sk_sp<const SkTextBlob>> fTextBlobs;
+ SkTArray<sk_sp<const SkVertices>> fVertices;
+
+ uint32_t fRecordFlags;
+ int fInitialSaveCount;
+
+ friend class SkPictureData; // for SkPictureData's SkPictureRecord-based constructor
+
+ typedef SkCanvasVirtualEnforcer<SkCanvas> INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecorder.cpp b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
new file mode 100644
index 0000000000..20cc7d8e32
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkMiniRecorder.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkRecordOpts.h"
+#include "src/core/SkRecordedDrawable.h"
+#include "src/core/SkRecorder.h"
+
+SkPictureRecorder::SkPictureRecorder() {
+ fActivelyRecording = false;
+ fMiniRecorder.reset(new SkMiniRecorder);
+ fRecorder.reset(new SkRecorder(nullptr, SkRect::MakeEmpty(), fMiniRecorder.get()));
+}
+
+SkPictureRecorder::~SkPictureRecorder() {}
+
+SkCanvas* SkPictureRecorder::beginRecording(const SkRect& userCullRect,
+ SkBBHFactory* bbhFactory /* = nullptr */,
+ uint32_t recordFlags /* = 0 */) {
+ const SkRect cullRect = userCullRect.isEmpty() ? SkRect::MakeEmpty() : userCullRect;
+
+ fCullRect = cullRect;
+ fFlags = recordFlags;
+
+ if (bbhFactory) {
+ fBBH.reset((*bbhFactory)());
+ SkASSERT(fBBH.get());
+ }
+
+ if (!fRecord) {
+ fRecord.reset(new SkRecord);
+ }
+ SkRecorder::DrawPictureMode dpm = (recordFlags & kPlaybackDrawPicture_RecordFlag)
+ ? SkRecorder::Playback_DrawPictureMode
+ : SkRecorder::Record_DrawPictureMode;
+ fRecorder->reset(fRecord.get(), cullRect, dpm, fMiniRecorder.get());
+ fActivelyRecording = true;
+ return this->getRecordingCanvas();
+}
+
+SkCanvas* SkPictureRecorder::getRecordingCanvas() {
+ return fActivelyRecording ? fRecorder.get() : nullptr;
+}
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPicture(uint32_t finishFlags) {
+ fActivelyRecording = false;
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ if (fRecord->count() == 0) {
+ auto pic = fMiniRecorder->detachAsPicture(fBBH ? nullptr : &fCullRect);
+ fBBH.reset(nullptr);
+ return pic;
+ }
+
+ // TODO: delay as much of this work until just before first playback?
+ SkRecordOptimize(fRecord.get());
+
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ SkBigPicture::SnapshotArray* pictList =
+ drawableList ? drawableList->newDrawableSnapshot() : nullptr;
+
+ if (fBBH.get()) {
+ SkAutoTMalloc<SkRect> bounds(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds);
+ fBBH->insert(bounds, fRecord->count());
+
+ // Now that we've calculated content bounds, we can update fCullRect, often trimming it.
+ // TODO: get updated fCullRect from bounds instead of forcing the BBH to return it?
+ SkRect bbhBound = fBBH->getRootBound();
+ SkASSERT((bbhBound.isEmpty() || fCullRect.contains(bbhBound))
+ || (bbhBound.isEmpty() && fCullRect.isEmpty()));
+ fCullRect = bbhBound;
+ }
+
+ size_t subPictureBytes = fRecorder->approxBytesUsedBySubPictures();
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += pictList->begin()[i]->approximateBytesUsed();
+ }
+ return sk_make_sp<SkBigPicture>(fCullRect, fRecord.release(), pictList, fBBH.release(),
+ subPictureBytes);
+}
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPictureWithCull(const SkRect& cullRect,
+ uint32_t finishFlags) {
+ fCullRect = cullRect;
+ return this->finishRecordingAsPicture(finishFlags);
+}
+
+
+void SkPictureRecorder::partialReplay(SkCanvas* canvas) const {
+ if (nullptr == canvas) {
+ return;
+ }
+
+ int drawableCount = 0;
+ SkDrawable* const* drawables = nullptr;
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ if (drawableList) {
+ drawableCount = drawableList->count();
+ drawables = drawableList->begin();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, nullptr/*bbh*/, nullptr/*callback*/);
+}
+
+sk_sp<SkDrawable> SkPictureRecorder::finishRecordingAsDrawable(uint32_t finishFlags) {
+ fActivelyRecording = false;
+ fRecorder->flushMiniRecorder();
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ SkRecordOptimize(fRecord.get());
+
+ if (fBBH.get()) {
+ SkAutoTMalloc<SkRect> bounds(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds);
+ fBBH->insert(bounds, fRecord->count());
+ }
+
+ sk_sp<SkDrawable> drawable =
+ sk_make_sp<SkRecordedDrawable>(std::move(fRecord), std::move(fBBH),
+ fRecorder->detachDrawableList(), fCullRect);
+
+ return drawable;
+}
diff --git a/gfx/skia/skia/src/core/SkPixelRef.cpp b/gfx/skia/skia/src/core/SkPixelRef.cpp
new file mode 100644
index 0000000000..ac37f725a0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixelRef.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixelRef.h"
+#include "include/private/SkMutex.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkNextID.h"
+#include "src/core/SkPixelRefPriv.h"
+#include "src/core/SkTraceEvent.h"
+
+#include <atomic>
+
+uint32_t SkNextID::ImageID() {
+ // We never set the low bit.... see SkPixelRef::genIDIsUnique().
+ static std::atomic<uint32_t> nextID{2};
+
+ uint32_t id;
+ do {
+ id = nextID.fetch_add(2);
+ } while (id == 0);
+ return id;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPixelRef::SkPixelRef(int width, int height, void* pixels, size_t rowBytes)
+ : fWidth(width)
+ , fHeight(height)
+ , fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fAddedToCache(false)
+{
+ this->needsNewGenID();
+ fMutability = kMutable;
+}
+
+SkPixelRef::~SkPixelRef() {
+ this->callGenIDChangeListeners();
+}
+
+// This is undefined if there are clients in-flight trying to use us
+void SkPixelRef::android_only_reset(int width, int height, size_t rowBytes) {
+ fWidth = width;
+ fHeight = height;
+ fRowBytes = rowBytes;
+ // note: we do not change fPixels
+
+ // conservative, since its possible the "new" settings are the same as the old.
+ this->notifyPixelsChanged();
+}
+
+void SkPixelRef::needsNewGenID() {
+ fTaggedGenID.store(0);
+ SkASSERT(!this->genIDIsUnique()); // This method isn't threadsafe, so the assert should be fine.
+}
+
+uint32_t SkPixelRef::getGenerationID() const {
+ uint32_t id = fTaggedGenID.load();
+ if (0 == id) {
+ uint32_t next = SkNextID::ImageID() | 1u;
+ if (fTaggedGenID.compare_exchange_strong(id, next)) {
+ id = next; // There was no race or we won the race. fTaggedGenID is next now.
+ } else {
+ // We lost a race to set fTaggedGenID. compare_exchange() filled id with the winner.
+ }
+ // We can't quite SkASSERT(this->genIDIsUnique()). It could be non-unique
+ // if we got here via the else path (pretty unlikely, but possible).
+ }
+ return id & ~1u; // Mask off bottom unique bit.
+}
+
+void SkPixelRef::addGenIDChangeListener(GenIDChangeListener* listener) {
+ if (nullptr == listener || !this->genIDIsUnique()) {
+ // No point in tracking this if we're not going to call it.
+ delete listener;
+ return;
+ }
+ SkAutoMutexExclusive lock(fGenIDChangeListenersMutex);
+ *fGenIDChangeListeners.append() = listener;
+}
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPixelRef::callGenIDChangeListeners() {
+ SkAutoMutexExclusive lock(fGenIDChangeListenersMutex);
+ // We don't invalidate ourselves if we think another SkPixelRef is sharing our genID.
+ if (this->genIDIsUnique()) {
+ for (int i = 0; i < fGenIDChangeListeners.count(); i++) {
+ fGenIDChangeListeners[i]->onChange();
+ }
+
+ if (fAddedToCache.exchange(false)) {
+ SkNotifyBitmapGenIDIsStale(this->getGenerationID());
+ }
+ }
+ // Listeners get at most one shot, so whether these triggered or not, blow them away.
+ fGenIDChangeListeners.deleteAll();
+}
+
+void SkPixelRef::notifyPixelsChanged() {
+#ifdef SK_DEBUG
+ if (this->isImmutable()) {
+ SkDebugf("========== notifyPixelsChanged called on immutable pixelref");
+ }
+#endif
+ this->callGenIDChangeListeners();
+ this->needsNewGenID();
+}
+
+void SkPixelRef::setImmutable() {
+ fMutability = kImmutable;
+}
+
+void SkPixelRef::setImmutableWithID(uint32_t genID) {
+ /*
+ * We are forcing the genID to match an external value. The caller must ensure that this
+ * value does not conflict with other content.
+ *
+ * One use is to force this pixelref's id to match an SkImage's id
+ */
+ fMutability = kImmutable;
+ fTaggedGenID.store(genID);
+}
+
+void SkPixelRef::setTemporarilyImmutable() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kTemporarilyImmutable;
+}
+
+void SkPixelRef::restoreMutability() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kMutable;
+}
+
+sk_sp<SkPixelRef> SkMakePixelRefWithProc(int width, int height, size_t rowBytes, void* addr,
+ void (*releaseProc)(void* addr, void* ctx), void* ctx) {
+ SkASSERT(width >= 0 && height >= 0);
+ if (nullptr == releaseProc) {
+ return sk_make_sp<SkPixelRef>(width, height, addr, rowBytes);
+ }
+ struct PixelRef final : public SkPixelRef {
+ void (*fReleaseProc)(void*, void*);
+ void* fReleaseProcContext;
+ PixelRef(int w, int h, void* s, size_t r, void (*proc)(void*, void*), void* ctx)
+ : SkPixelRef(w, h, s, r), fReleaseProc(proc), fReleaseProcContext(ctx) {}
+ ~PixelRef() override { fReleaseProc(this->pixels(), fReleaseProcContext); }
+ };
+ return sk_sp<SkPixelRef>(new PixelRef(width, height, addr, rowBytes, releaseProc, ctx));
+}
diff --git a/gfx/skia/skia/src/core/SkPixelRefPriv.h b/gfx/skia/skia/src/core/SkPixelRefPriv.h
new file mode 100644
index 0000000000..a227b06daa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixelRefPriv.h
@@ -0,0 +1,20 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkPixelRefPriv_DEFINED
+#define SkPixelRefPriv_DEFINED
+/**
+ * Return a new SkMallocPixelRef with the provided pixel storage and
+ * rowBytes. On destruction, ReleaseProc will be called.
+ *
+ * If ReleaseProc is NULL, the pixels will never be released. This
+ * can be useful if the pixels were stack allocated. However, such an
+ * SkMallocPixelRef must not live beyond its pixels (e.g. by copying
+ * an SkBitmap pointing to it, or drawing to an SkPicture).
+ *
+ * Returns NULL on failure.
+ */
+sk_sp<SkPixelRef> SkMakePixelRefWithProc(int w, int h, size_t rowBytes, void* addr,
+ void (*releaseProc)(void* addr, void* ctx), void* ctx);
+
+#endif // SkPixelRefPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPixmap.cpp b/gfx/skia/skia/src/core/SkPixmap.cpp
new file mode 100644
index 0000000000..c8a09ff7c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixmap.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixmap.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkPixmapPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkUtils.h"
+#include "src/image/SkReadPixelsRec.h"
+#include "src/shaders/SkImageShader.h"
+
+#include <utility>
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkPixmap::reset() {
+ fPixels = nullptr;
+ fRowBytes = 0;
+ fInfo = SkImageInfo::MakeUnknown();
+}
+
+void SkPixmap::reset(const SkImageInfo& info, const void* addr, size_t rowBytes) {
+ if (addr) {
+ SkASSERT(info.validRowBytes(rowBytes));
+ }
+ fPixels = addr;
+ fRowBytes = rowBytes;
+ fInfo = info;
+}
+
+bool SkPixmap::reset(const SkMask& src) {
+ if (SkMask::kA8_Format == src.fFormat) {
+ this->reset(SkImageInfo::MakeA8(src.fBounds.width(), src.fBounds.height()),
+ src.fImage, src.fRowBytes);
+ return true;
+ }
+ this->reset();
+ return false;
+}
+
+void SkPixmap::setColorSpace(sk_sp<SkColorSpace> cs) {
+ fInfo = fInfo.makeColorSpace(std::move(cs));
+}
+
+bool SkPixmap::extractSubset(SkPixmap* result, const SkIRect& subset) const {
+ SkIRect srcRect, r;
+ srcRect.setWH(this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ const void* pixels = nullptr;
+ if (fPixels) {
+ const size_t bpp = fInfo.bytesPerPixel();
+ pixels = (const uint8_t*)fPixels + r.fTop * fRowBytes + r.fLeft * bpp;
+ }
+ result->reset(fInfo.makeDimensions(r.size()), pixels, fRowBytes);
+ return true;
+}
+
+// This is the same as SkPixmap::addr(x,y), but this version gets inlined, while the public
+// method does not. Perhaps we could bloat it so it can be inlined, but that would grow code-size
+// everywhere, instead of just here (on behalf of getAlphaf()).
+static const void* fast_getaddr(const SkPixmap& pm, int x, int y) {
+ x <<= SkColorTypeShiftPerPixel(pm.colorType());
+ return static_cast<const char*>(pm.addr()) + y * pm.rowBytes() + x;
+}
+
+float SkPixmap::getAlphaf(int x, int y) const {
+ SkASSERT(this->addr());
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ float value = 0;
+ const void* srcPtr = fast_getaddr(*this, x, y);
+
+ switch (this->colorType()) {
+ case kUnknown_SkColorType:
+ return 0;
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ return 1;
+ case kAlpha_8_SkColorType:
+ value = static_cast<const uint8_t*>(srcPtr)[0] * (1.0f/255);
+ break;
+ case kA16_unorm_SkColorType:
+ value = static_cast<const uint16_t*>(srcPtr)[0] * (1.0f/65535);
+ break;
+ case kA16_float_SkColorType: {
+ SkHalf half = static_cast<const SkHalf*>(srcPtr)[0];
+ value = SkHalfToFloat(half);
+ break;
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t u16 = static_cast<const uint16_t*>(srcPtr)[0];
+ value = SkGetPackedA4444(u16) * (1.0f/15);
+ break;
+ }
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ value = static_cast<const uint8_t*>(srcPtr)[3] * (1.0f/255);
+ break;
+ case kRGBA_1010102_SkColorType: {
+ uint32_t u32 = static_cast<const uint32_t*>(srcPtr)[0];
+ value = (u32 >> 30) * (1.0f/3);
+ break;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint64_t u64 = static_cast<const uint64_t*>(srcPtr)[0];
+ value = (u64 >> 48) * (1.0f/65535);
+ break;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ uint64_t px;
+ memcpy(&px, srcPtr, sizeof(px));
+ value = SkHalfToFloat_finite_ftz(px)[3];
+ break;
+ }
+ case kRGBA_F32_SkColorType:
+ value = static_cast<const float*>(srcPtr)[3];
+ break;
+ }
+ return value;
+}
+
+bool SkPixmap::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ if (!SkImageInfoValidConversion(dstInfo, fInfo)) {
+ return false;
+ }
+
+ SkReadPixelsRec rec(dstInfo, dstPixels, dstRB, x, y);
+ if (!rec.trim(fInfo.width(), fInfo.height())) {
+ return false;
+ }
+
+ const void* srcPixels = this->addr(rec.fX, rec.fY);
+ const SkImageInfo srcInfo = fInfo.makeDimensions(rec.fInfo.dimensions());
+ SkConvertPixels(rec.fInfo, rec.fPixels, rec.fRowBytes, srcInfo, srcPixels, this->rowBytes());
+ return true;
+}
+
+bool SkPixmap::erase(SkColor color, const SkIRect& subset) const {
+ return this->erase(SkColor4f::FromColor(color), &subset);
+}
+
+bool SkPixmap::erase(const SkColor4f& color, const SkIRect* subset) const {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setColor4f(color, this->colorSpace());
+
+ SkIRect clip = this->bounds();
+ if (subset && !clip.intersect(*subset)) {
+ return false;
+ }
+ SkRasterClip rc{clip};
+
+ SkDraw draw;
+ draw.fDst = *this;
+ draw.fMatrix = &SkMatrix::I();
+ draw.fRC = &rc;
+
+ draw.drawPaint(paint);
+ return true;
+}
+
+bool SkPixmap::scalePixels(const SkPixmap& actualDst, SkFilterQuality quality) const {
+ // We may need to tweak how we interpret these just a little below, so we make copies.
+ SkPixmap src = *this,
+ dst = actualDst;
+
+ // Can't do anthing with empty src or dst
+ if (src.width() <= 0 || src.height() <= 0 ||
+ dst.width() <= 0 || dst.height() <= 0) {
+ return false;
+ }
+
+ // no scaling involved?
+ if (src.width() == dst.width() && src.height() == dst.height()) {
+ return src.readPixels(dst);
+ }
+
+ // If src and dst are both unpremul, we'll fake the source out to appear as if premul,
+ // and mark the destination as opaque. This odd combination allows us to scale unpremul
+ // pixels without ever premultiplying them (perhaps losing information in the color channels).
+ // This is an idiosyncratic feature of scalePixels(), and is tested by scalepixels_unpremul GM.
+ bool clampAsIfUnpremul = false;
+ if (src.alphaType() == kUnpremul_SkAlphaType &&
+ dst.alphaType() == kUnpremul_SkAlphaType) {
+ src.reset(src.info().makeAlphaType(kPremul_SkAlphaType), src.addr(), src.rowBytes());
+ dst.reset(dst.info().makeAlphaType(kOpaque_SkAlphaType), dst.addr(), dst.rowBytes());
+
+ // We'll need to tell the image shader to clamp to [0,1] instead of the
+ // usual [0,a] when using a bicubic scaling (kHigh_SkFilterQuality).
+ clampAsIfUnpremul = true;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(src)) {
+ return false;
+ }
+ bitmap.setImmutable(); // Don't copy when we create an image.
+ bitmap.setIsVolatile(true); // Disable any caching.
+
+ SkMatrix scale = SkMatrix::MakeRectToRect(SkRect::Make(src.bounds()),
+ SkRect::Make(dst.bounds()),
+ SkMatrix::kFill_ScaleToFit);
+
+ // We'll create a shader to do this draw so we have control over the bicubic clamp.
+ sk_sp<SkShader> shader = SkImageShader::Make(SkImage::MakeFromBitmap(bitmap),
+ SkTileMode::kClamp,
+ SkTileMode::kClamp,
+ &scale,
+ clampAsIfUnpremul);
+
+ sk_sp<SkSurface> surface = SkSurface::MakeRasterDirect(dst.info(),
+ dst.writable_addr(),
+ dst.rowBytes());
+ if (!shader || !surface) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setFilterQuality(quality);
+ paint.setShader(std::move(shader));
+ surface->getCanvas()->drawPaint(paint);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColor SkPixmap::getColor(int x, int y) const {
+ SkASSERT(this->addr());
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ const bool needsUnpremul = (kPremul_SkAlphaType == fInfo.alphaType());
+ auto toColor = [needsUnpremul](uint32_t maybePremulColor) {
+ return needsUnpremul ? SkUnPreMultiply::PMColorToColor(maybePremulColor)
+ : SkSwizzle_BGRA_to_PMColor(maybePremulColor);
+ };
+
+ switch (this->colorType()) {
+ case kGray_8_SkColorType: {
+ uint8_t value = *this->addr8(x, y);
+ return SkColorSetRGB(value, value, value);
+ }
+ case kAlpha_8_SkColorType: {
+ return SkColorSetA(0, *this->addr8(x, y));
+ }
+ case kA16_unorm_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ return SkColorSetA(0, value * (255 / 65535.0f));
+ }
+ case kA16_float_SkColorType: {
+ SkHalf value = *this->addr16(x, y);
+ return SkColorSetA(0, 255 * SkHalfToFloat(value));
+ }
+ case kRGB_565_SkColorType: {
+ return SkPixel16ToColor(*this->addr16(x, y));
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ SkPMColor c = SkPixel4444ToPixel32(value);
+ return toColor(c);
+ }
+ case kR8G8_unorm_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ return (uint32_t)( ((value >> 0) & 0xff) ) << 16
+ | (uint32_t)( ((value >> 8) & 0xff) ) << 8
+ | 0xff000000;
+ }
+ case kR16G16_unorm_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ return (uint32_t)( ((value >> 0) & 0xffff) * (255/65535.0f) ) << 16
+ | (uint32_t)( ((value >> 16) & 0xffff) * (255/65535.0f) ) << 8
+ | 0xff000000;
+ }
+ case kR16G16_float_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ uint32_t r = 255 * SkHalfToFloat((value >> 0) & 0xffff);
+ uint32_t g = 255 * SkHalfToFloat((value >> 16) & 0xffff);
+ return (r << 16) | (g << 8) | 0xff000000;
+ }
+ case kRGB_888x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ return SkSwizzle_RB(value | 0xff000000);
+ }
+ case kBGRA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_BGRA_to_PMColor(value);
+ return toColor(c);
+ }
+ case kRGBA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_RGBA_to_PMColor(value);
+ return toColor(c);
+ }
+ case kRGB_101010x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ // Convert 10-bit rgb to 8-bit bgr, and mask in 0xff alpha at the top.
+ return (uint32_t)( ((value >> 0) & 0x3ff) * (255/1023.0f) ) << 16
+ | (uint32_t)( ((value >> 10) & 0x3ff) * (255/1023.0f) ) << 8
+ | (uint32_t)( ((value >> 20) & 0x3ff) * (255/1023.0f) ) << 0
+ | 0xff000000;
+ }
+ case kRGBA_1010102_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+
+ float r = ((value >> 0) & 0x3ff) * (1/1023.0f),
+ g = ((value >> 10) & 0x3ff) * (1/1023.0f),
+ b = ((value >> 20) & 0x3ff) * (1/1023.0f),
+ a = ((value >> 30) & 0x3 ) * (1/ 3.0f);
+ if (a != 0 && needsUnpremul) {
+ r = SkTPin(r/a, 0.0f, 1.0f);
+ g = SkTPin(g/a, 0.0f, 1.0f);
+ b = SkTPin(b/a, 0.0f, 1.0f);
+ }
+ return (uint32_t)( r * 255.0f ) << 16
+ | (uint32_t)( g * 255.0f ) << 8
+ | (uint32_t)( b * 255.0f ) << 0
+ | (uint32_t)( a * 255.0f ) << 24;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint64_t value = *this->addr64(x, y);
+
+ float r = ((value ) & 0xffff) * (1/65535.0f),
+ g = ((value >> 16) & 0xffff) * (1/65535.0f),
+ b = ((value >> 32) & 0xffff) * (1/65535.0f),
+ a = ((value >> 48) & 0xffff) * (1/65535.0f);
+ if (a != 0 && needsUnpremul) {
+ r *= (1.0f/a);
+ g *= (1.0f/a);
+ b *= (1.0f/a);
+ }
+ return (uint32_t)( r * 255.0f ) << 16
+ | (uint32_t)( g * 255.0f ) << 8
+ | (uint32_t)( b * 255.0f ) << 0
+ | (uint32_t)( a * 255.0f ) << 24;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ const uint64_t* addr =
+ (const uint64_t*)fPixels + y * (fRowBytes >> 3) + x;
+ Sk4f p4 = SkHalfToFloat_finite_ftz(*addr);
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * Sk4f(inva, inva, inva, 1);
+ }
+ SkColor c;
+ SkNx_cast<uint8_t>(p4 * Sk4f(255) + Sk4f(0.5f)).store(&c);
+ // p4 is RGBA, but we want BGRA, so we need to swap next
+ return SkSwizzle_RB(c);
+ }
+ case kRGBA_F32_SkColorType: {
+ const float* rgba =
+ (const float*)fPixels + 4*y*(fRowBytes >> 4) + 4*x;
+ Sk4f p4 = Sk4f::Load(rgba);
+ // From here on, just like F16:
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * Sk4f(inva, inva, inva, 1);
+ }
+ SkColor c;
+ SkNx_cast<uint8_t>(p4 * Sk4f(255) + Sk4f(0.5f)).store(&c);
+ // p4 is RGBA, but we want BGRA, so we need to swap next
+ return SkSwizzle_RB(c);
+ }
+ default:
+ SkDEBUGFAIL("");
+ return SkColorSetARGB(0, 0, 0, 0);
+ }
+}
+
+bool SkPixmap::computeIsOpaque() const {
+ const int height = this->height();
+ const int width = this->width();
+
+ switch (this->colorType()) {
+ case kAlpha_8_SkColorType: {
+ unsigned a = 0xFF;
+ for (int y = 0; y < height; ++y) {
+ const uint8_t* row = this->addr8(0, y);
+ for (int x = 0; x < width; ++x) {
+ a &= row[x];
+ }
+ if (0xFF != a) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kA16_unorm_SkColorType: {
+ unsigned a = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const uint16_t* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ a &= row[x];
+ }
+ if (0xFFFF != a) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kA16_float_SkColorType: {
+ for (int y = 0; y < height; ++y) {
+ const SkHalf* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ if (row[x] < SK_Half1) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ case kRGB_565_SkColorType:
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ return true;
+ break;
+ case kARGB_4444_SkColorType: {
+ unsigned c = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor16* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xF != SkGetPackedA4444(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType: {
+ SkPMColor c = (SkPMColor)~0;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor* row = this->addr32(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xFF != SkGetPackedA32(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ const SkHalf* row = (const SkHalf*)this->addr();
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (row[4 * x + 3] < SK_Half1) {
+ return false;
+ }
+ }
+ row += this->rowBytes() >> 1;
+ }
+ return true;
+ }
+ case kRGBA_F32_SkColorType: {
+ const float* row = (const float*)this->addr();
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (row[4 * x + 3] < 1.0f) {
+ return false;
+ }
+ }
+ row += this->rowBytes() >> 2;
+ }
+ return true;
+ }
+ case kRGBA_1010102_SkColorType: {
+ uint32_t c = ~0;
+ for (int y = 0; y < height; ++y) {
+ const uint32_t* row = this->addr32(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0b11 != c >> 30) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint16_t acc = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const uint64_t* row = this->addr64(0, y);
+ for (int x = 0; x < width; ++x) {
+ acc &= (row[x] >> 48);
+ }
+ if (0xFFFF != acc) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kUnknown_SkColorType:
+ SkDEBUGFAIL("");
+ break;
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool draw_orientation(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin origin) {
+ auto surf = SkSurface::MakeRasterDirect(dst.info(), dst.writable_addr(), dst.rowBytes());
+ if (!surf) {
+ return false;
+ }
+
+ SkBitmap bm;
+ bm.installPixels(src);
+
+ SkMatrix m = SkEncodedOriginToMatrix(origin, src.width(), src.height());
+
+ SkPaint p;
+ p.setBlendMode(SkBlendMode::kSrc);
+ surf->getCanvas()->concat(m);
+ surf->getCanvas()->drawBitmap(bm, 0, 0, &p);
+ return true;
+}
+
+bool SkPixmapPriv::Orient(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin origin) {
+ if (src.colorType() != dst.colorType()) {
+ return false;
+ }
+ // note: we just ignore alphaType and colorSpace for this transformation
+
+ int w = src.width();
+ int h = src.height();
+ if (ShouldSwapWidthHeight(origin)) {
+ using std::swap;
+ swap(w, h);
+ }
+ if (dst.width() != w || dst.height() != h) {
+ return false;
+ }
+ if (w == 0 || h == 0) {
+ return true;
+ }
+
+ // check for aliasing to self
+ if (src.addr() == dst.addr()) {
+ return kTopLeft_SkEncodedOrigin == origin;
+ }
+ return draw_orientation(dst, src, origin);
+}
+
+bool SkPixmapPriv::ShouldSwapWidthHeight(SkEncodedOrigin origin) {
+ // The last four SkEncodedOrigin values involve 90 degree rotations
+ return origin >= kLeftTop_SkEncodedOrigin;
+}
+
+SkImageInfo SkPixmapPriv::SwapWidthHeight(const SkImageInfo& info) {
+ return info.makeWH(info.height(), info.width());
+}
+
diff --git a/gfx/skia/skia/src/core/SkPixmapPriv.h b/gfx/skia/skia/src/core/SkPixmapPriv.h
new file mode 100644
index 0000000000..f3d643bfa4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixmapPriv.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixmapPriv_DEFINED
+#define SkPixmapPriv_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkPixmap.h"
+#include "src/core/SkAutoPixmapStorage.h"
+
+class SkPixmapPriv {
+public:
+ /**
+ * Copy the pixels in this pixmap into dst, applying the orientation transformations specified
+ * by the flags. If the inputs are invalid, this returns false and no copy is made.
+ */
+ static bool Orient(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin);
+
+ static bool ShouldSwapWidthHeight(SkEncodedOrigin o);
+ static SkImageInfo SwapWidthHeight(const SkImageInfo& info);
+
+ /**
+ * Decode an image and then copy into dst, applying origin.
+ *
+ * @param dst SkPixmap to write the final image, after
+ * applying the origin.
+ * @param origin SkEncodedOrigin to apply to the raw pixels.
+ * @param decode Function for decoding into a pixmap without
+ * applying the origin.
+ */
+
+ template <typename Fn>
+ static bool Orient(const SkPixmap& dst, SkEncodedOrigin origin, Fn&& decode) {
+ SkAutoPixmapStorage storage;
+ const SkPixmap* tmp = &dst;
+ if (origin != kTopLeft_SkEncodedOrigin) {
+ auto info = dst.info();
+ if (ShouldSwapWidthHeight(origin)) {
+ info = SwapWidthHeight(info);
+ }
+ if (!storage.tryAlloc(info)) {
+ return false;
+ }
+ tmp = &storage;
+ }
+ if (!decode(*tmp)) {
+ return false;
+ }
+ if (tmp != &dst) {
+ return Orient(dst, *tmp, origin);
+ }
+ return true;
+ }
+
+ static void ResetPixmapKeepInfo(SkPixmap* pm, const void* address, size_t rowBytes) {
+ pm->fRowBytes = rowBytes;
+ pm->fPixels = address;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPoint.cpp b/gfx/skia/skia/src/core/SkPoint.cpp
new file mode 100644
index 0000000000..ee902be772
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkPointPriv.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPoint::scale(SkScalar scale, SkPoint* dst) const {
+ SkASSERT(dst);
+ dst->set(fX * scale, fY * scale);
+}
+
+bool SkPoint::normalize() {
+ return this->setLength(fX, fY, SK_Scalar1);
+}
+
+bool SkPoint::setNormalize(SkScalar x, SkScalar y) {
+ return this->setLength(x, y, SK_Scalar1);
+}
+
+bool SkPoint::setLength(SkScalar length) {
+ return this->setLength(fX, fY, length);
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of mag2 (compared against nearlyzero^2)
+ * 2. overflow of mag2 (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+template <bool use_rsqrt> bool set_point_length(SkPoint* pt, float x, float y, float length,
+ float* orig_length = nullptr) {
+ SkASSERT(!use_rsqrt || (orig_length == nullptr));
+
+ // our mag2 step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x or y are very large, other wise we
+ // divide by inf. and return (0,0) vector.
+ double xx = x;
+ double yy = y;
+ double dmag = sqrt(xx * xx + yy * yy);
+ double dscale = sk_ieee_double_divide(length, dmag);
+ x *= dscale;
+ y *= dscale;
+ // check if we're not finite, or we're zero-length
+ if (!sk_float_isfinite(x) || !sk_float_isfinite(y) || (x == 0 && y == 0)) {
+ pt->set(0, 0);
+ return false;
+ }
+ float mag = 0;
+ if (orig_length) {
+ mag = sk_double_to_float(dmag);
+ }
+ pt->set(x, y);
+ if (orig_length) {
+ *orig_length = mag;
+ }
+ return true;
+}
+
+SkScalar SkPoint::Normalize(SkPoint* pt) {
+ float mag;
+ if (set_point_length<false>(pt, pt->fX, pt->fY, 1.0f, &mag)) {
+ return mag;
+ }
+ return 0;
+}
+
+SkScalar SkPoint::Length(SkScalar dx, SkScalar dy) {
+ float mag2 = dx * dx + dy * dy;
+ if (SkScalarIsFinite(mag2)) {
+ return sk_float_sqrt(mag2);
+ } else {
+ double xx = dx;
+ double yy = dy;
+ return sk_double_to_float(sqrt(xx * xx + yy * yy));
+ }
+}
+
+bool SkPoint::setLength(float x, float y, float length) {
+ return set_point_length<false>(this, x, y, length);
+}
+
+bool SkPointPriv::SetLengthFast(SkPoint* pt, float length) {
+ return set_point_length<true>(pt, pt->fX, pt->fY, length);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkPointPriv::DistanceToLineBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b,
+ Side* side) {
+
+ SkVector u = b - a;
+ SkVector v = pt - a;
+
+ SkScalar uLengthSqd = LengthSqd(u);
+ SkScalar det = u.cross(v);
+ if (side) {
+ SkASSERT(-1 == kLeft_Side &&
+ 0 == kOn_Side &&
+ 1 == kRight_Side);
+ *side = (Side) SkScalarSignAsInt(det);
+ }
+ SkScalar temp = sk_ieee_float_divide(det, uLengthSqd);
+ temp *= det;
+ // It's possible we have a degenerate line vector, or we're so far away it looks degenerate
+ // In this case, return squared distance to point A.
+ if (!SkScalarIsFinite(temp)) {
+ return LengthSqd(v);
+ }
+ return temp;
+}
+
+SkScalar SkPointPriv::DistanceToLineSegmentBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b) {
+ // See comments to distanceToLineBetweenSqd. If the projection of c onto
+ // u is between a and b then this returns the same result as that
+ // function. Otherwise, it returns the distance to the closer of a and
+ // b. Let the projection of v onto u be v'. There are three cases:
+ // 1. v' points opposite to u. c is not between a and b and is closer
+ // to a than b.
+ // 2. v' points along u and has magnitude less than y. c is between
+ // a and b and the distance to the segment is the same as distance
+ // to the line ab.
+ // 3. v' points along u and has greater magnitude than u. c is not
+ // not between a and b and is closer to b than a.
+ // v' = (u dot v) * u / |u|. So if (u dot v)/|u| is less than zero we're
+ // in case 1. If (u dot v)/|u| is > |u| we are in case 3. Otherwise
+ // we're in case 2. We actually compare (u dot v) to 0 and |u|^2 to
+ // avoid a sqrt to compute |u|.
+
+ SkVector u = b - a;
+ SkVector v = pt - a;
+
+ SkScalar uLengthSqd = LengthSqd(u);
+ SkScalar uDotV = SkPoint::DotProduct(u, v);
+
+ // closest point is point A
+ if (uDotV <= 0) {
+ return LengthSqd(v);
+ // closest point is point B
+ } else if (uDotV > uLengthSqd) {
+ return DistanceToSqd(b, pt);
+ // closest point is inside segment
+ } else {
+ SkScalar det = u.cross(v);
+ SkScalar temp = sk_ieee_float_divide(det, uLengthSqd);
+ temp *= det;
+ // It's possible we have a degenerate segment, or we're so far away it looks degenerate
+ // In this case, return squared distance to point A.
+ if (!SkScalarIsFinite(temp)) {
+ return LengthSqd(v);
+ }
+ return temp;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPoint3.cpp b/gfx/skia/skia/src/core/SkPoint3.cpp
new file mode 100644
index 0000000000..901e90ee6f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint3.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPoint3.h"
+
+// Returns the square of the Euclidian distance to (x,y,z).
+static inline float get_length_squared(float x, float y, float z) {
+ return x * x + y * y + z * z;
+}
+
+// Calculates the square of the Euclidian distance to (x,y,z) and stores it in
+// *lengthSquared. Returns true if the distance is judged to be "nearly zero".
+//
+// This logic is encapsulated in a helper method to make it explicit that we
+// always perform this check in the same manner, to avoid inconsistencies
+// (see http://code.google.com/p/skia/issues/detail?id=560 ).
+static inline bool is_length_nearly_zero(float x, float y, float z, float *lengthSquared) {
+ *lengthSquared = get_length_squared(x, y, z);
+ return *lengthSquared <= (SK_ScalarNearlyZero * SK_ScalarNearlyZero);
+}
+
+SkScalar SkPoint3::Length(SkScalar x, SkScalar y, SkScalar z) {
+ float magSq = get_length_squared(x, y, z);
+ if (SkScalarIsFinite(magSq)) {
+ return sk_float_sqrt(magSq);
+ } else {
+ double xx = x;
+ double yy = y;
+ double zz = z;
+ return (float)sqrt(xx * xx + yy * yy + zz * zz);
+ }
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of magSq (compared against nearlyzero^2)
+ * 2. overflow of magSq (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+bool SkPoint3::normalize() {
+ float magSq;
+ if (is_length_nearly_zero(fX, fY, fZ, &magSq)) {
+ this->set(0, 0, 0);
+ return false;
+ }
+ // sqrtf does not provide enough precision; since sqrt takes a double,
+ // there's no additional penalty to storing invScale in a double
+ double invScale;
+ if (sk_float_isfinite(magSq)) {
+ invScale = magSq;
+ } else {
+ // our magSq step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x, y or z is very large, otherwise we
+ // divide by inf. and return (0,0,0) vector.
+ double xx = fX;
+ double yy = fY;
+ double zz = fZ;
+ invScale = xx * xx + yy * yy + zz * zz;
+ }
+ // using a float instead of a double for scale loses too much precision
+ double scale = 1 / sqrt(invScale);
+ fX *= scale;
+ fY *= scale;
+ fZ *= scale;
+ if (!sk_float_isfinite(fX) || !sk_float_isfinite(fY) || !sk_float_isfinite(fZ)) {
+ this->set(0, 0, 0);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPointPriv.h b/gfx/skia/skia/src/core/SkPointPriv.h
new file mode 100644
index 0000000000..c8a6d520e0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPointPriv.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPointPriv_DEFINED
+#define SkPointPriv_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkPointPriv {
+public:
+ enum Side {
+ kLeft_Side = -1,
+ kOn_Side = 0,
+ kRight_Side = 1,
+ };
+
+ static bool AreFinite(const SkPoint array[], int count) {
+ return SkScalarsAreFinite(&array[0].fX, count << 1);
+ }
+
+ static const SkScalar* AsScalars(const SkPoint& pt) { return &pt.fX; }
+
+ static bool CanNormalize(SkScalar dx, SkScalar dy) {
+ return SkScalarsAreFinite(dx, dy) && (dx || dy);
+ }
+
+ static SkScalar DistanceToLineBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b, Side* side = nullptr);
+
+ static SkScalar DistanceToLineBetween(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b, Side* side = nullptr) {
+ return SkScalarSqrt(DistanceToLineBetweenSqd(pt, a, b, side));
+ }
+
+ static SkScalar DistanceToLineSegmentBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b);
+
+ static SkScalar DistanceToLineSegmentBetween(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b) {
+ return SkScalarSqrt(DistanceToLineSegmentBetweenSqd(pt, a, b));
+ }
+
+ static SkScalar DistanceToSqd(const SkPoint& pt, const SkPoint& a) {
+ SkScalar dx = pt.fX - a.fX;
+ SkScalar dy = pt.fY - a.fY;
+ return dx * dx + dy * dy;
+ }
+
+ static bool EqualsWithinTolerance(const SkPoint& p1, const SkPoint& p2) {
+ return !CanNormalize(p1.fX - p2.fX, p1.fY - p2.fY);
+ }
+
+ static bool EqualsWithinTolerance(const SkPoint& pt, const SkPoint& p, SkScalar tol) {
+ return SkScalarNearlyZero(pt.fX - p.fX, tol)
+ && SkScalarNearlyZero(pt.fY - p.fY, tol);
+ }
+
+ static SkScalar LengthSqd(const SkPoint& pt) {
+ return SkPoint::DotProduct(pt, pt);
+ }
+
+ static void Negate(SkIPoint& pt) {
+ pt.fX = -pt.fX;
+ pt.fY = -pt.fY;
+ }
+
+ static void RotateCCW(const SkPoint& src, SkPoint* dst) {
+ // use a tmp in case src == dst
+ SkScalar tmp = src.fX;
+ dst->fX = src.fY;
+ dst->fY = -tmp;
+ }
+
+ static void RotateCCW(SkPoint* pt) {
+ RotateCCW(*pt, pt);
+ }
+
+ static void RotateCW(const SkPoint& src, SkPoint* dst) {
+ // use a tmp in case src == dst
+ SkScalar tmp = src.fX;
+ dst->fX = -src.fY;
+ dst->fY = tmp;
+ }
+
+ static void RotateCW(SkPoint* pt) {
+ RotateCW(*pt, pt);
+ }
+
+ static bool SetLengthFast(SkPoint* pt, float length);
+
+ static SkPoint MakeOrthog(const SkPoint& vec, Side side = kLeft_Side) {
+ SkASSERT(side == kRight_Side || side == kLeft_Side);
+ return (side == kRight_Side) ? SkPoint{-vec.fY, vec.fX} : SkPoint{vec.fY, -vec.fX};
+ }
+
+ // counter-clockwise fan
+ static void SetRectFan(SkPoint v[], SkScalar l, SkScalar t, SkScalar r, SkScalar b,
+ size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)v + 0 * stride))->set(l, t);
+ ((SkPoint*)((intptr_t)v + 1 * stride))->set(l, b);
+ ((SkPoint*)((intptr_t)v + 2 * stride))->set(r, b);
+ ((SkPoint*)((intptr_t)v + 3 * stride))->set(r, t);
+ }
+
+ // tri strip with two counter-clockwise triangles
+ static void SetRectTriStrip(SkPoint v[], SkScalar l, SkScalar t, SkScalar r, SkScalar b,
+ size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)v + 0 * stride))->set(l, t);
+ ((SkPoint*)((intptr_t)v + 1 * stride))->set(l, b);
+ ((SkPoint*)((intptr_t)v + 2 * stride))->set(r, t);
+ ((SkPoint*)((intptr_t)v + 3 * stride))->set(r, b);
+ }
+ static void SetRectTriStrip(SkPoint v[], const SkRect& rect, size_t stride) {
+ SetRectTriStrip(v, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, stride);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp b/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp
new file mode 100644
index 0000000000..d8d8dfe4e2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPromiseImageTexture.h"
+#include "src/core/SkMessageBus.h"
+
+#if SK_SUPPORT_GPU
+
+std::atomic<uint32_t> SkPromiseImageTexture::gUniqueID{1};
+
+SkPromiseImageTexture::SkPromiseImageTexture(const GrBackendTexture& backendTexture) {
+ SkASSERT(backendTexture.isValid());
+ fBackendTexture = backendTexture;
+ fUniqueID = gUniqueID++;
+}
+
+SkPromiseImageTexture::~SkPromiseImageTexture() {
+ for (const auto& msg : fMessages) {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(msg);
+ }
+}
+
+void SkPromiseImageTexture::addKeyToInvalidate(uint32_t contextID, const GrUniqueKey& key) {
+ SkASSERT(contextID != SK_InvalidUniqueID);
+ SkASSERT(key.isValid());
+ for (const auto& msg : fMessages) {
+ if (msg.contextID() == contextID && msg.key() == key) {
+ return;
+ }
+ }
+ fMessages.emplace_back(key, contextID);
+}
+
+#if GR_TEST_UTILS
+SkTArray<GrUniqueKey> SkPromiseImageTexture::testingOnly_uniqueKeysToInvalidate() const {
+ SkTArray<GrUniqueKey> results;
+ for (const auto& msg : fMessages) {
+ results.push_back(msg.key());
+ }
+ return results;
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.cpp b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
new file mode 100644
index 0000000000..7c34879fc3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkTSearch.h"
+
+void SkPtrSet::reset() {
+ Pair* p = fList.begin();
+ Pair* stop = fList.end();
+ while (p < stop) {
+ this->decPtr(p->fPtr);
+ p += 1;
+ }
+ fList.reset();
+}
+
+bool SkPtrSet::Less(const Pair& a, const Pair& b) {
+ return (char*)a.fPtr < (char*)b.fPtr;
+}
+
+uint32_t SkPtrSet::find(void* ptr) const {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.count();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ return 0;
+ }
+ return fList[index].fIndex;
+}
+
+uint32_t SkPtrSet::add(void* ptr) {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.count();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ index = ~index; // turn it back into an index for insertion
+ this->incPtr(ptr);
+ pair.fIndex = count + 1;
+ *fList.insert(index) = pair;
+ return count + 1;
+ } else {
+ return fList[index].fIndex;
+ }
+}
+
+void SkPtrSet::copyToArray(void* array[]) const {
+ int count = fList.count();
+ if (count > 0) {
+ SkASSERT(array);
+ const Pair* p = fList.begin();
+ // p->fIndex is base-1, so we need to subtract to find its slot
+ for (int i = 0; i < count; i++) {
+ int index = p[i].fIndex - 1;
+ SkASSERT((unsigned)index < (unsigned)count);
+ array[index] = p[i].fPtr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.h b/gfx/skia/skia/src/core/SkPtrRecorder.h
new file mode 100644
index 0000000000..2ea550f6ad
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPtrSet_DEFINED
+#define SkPtrSet_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTDArray.h"
+
+/**
+ * Maintains a set of ptrs, assigning each a unique ID [1...N]. Duplicate ptrs
+ * return the same ID (since its a set). Subclasses can override inPtr()
+ * and decPtr(). incPtr() is called each time a unique ptr is added ot the
+ * set. decPtr() is called on each ptr when the set is destroyed or reset.
+ */
+class SkPtrSet : public SkRefCnt {
+public:
+
+
+ /**
+ * Search for the specified ptr in the set. If it is found, return its
+ * 32bit ID [1..N], or if not found, return 0. Always returns 0 for nullptr.
+ */
+ uint32_t find(void*) const;
+
+ /**
+ * Add the specified ptr to the set, returning a unique 32bit ID for it
+ * [1...N]. Duplicate ptrs will return the same ID.
+ *
+ * If the ptr is nullptr, it is not added, and 0 is returned.
+ */
+ uint32_t add(void*);
+
+ /**
+ * Return the number of (non-null) ptrs in the set.
+ */
+ int count() const { return fList.count(); }
+
+ /**
+ * Copy the ptrs in the set into the specified array (allocated by the
+ * caller). The ptrs are assgined to the array based on their corresponding
+ * ID. e.g. array[ptr.ID - 1] = ptr.
+ *
+ * incPtr() and decPtr() are not called during this operation.
+ */
+ void copyToArray(void* array[]) const;
+
+ /**
+ * Call decPtr() on each ptr in the set, and the reset the size of the set
+ * to 0.
+ */
+ void reset();
+
+ /**
+ * Set iterator.
+ */
+ class Iter {
+ public:
+ Iter(const SkPtrSet& set)
+ : fSet(set)
+ , fIndex(0) {}
+
+ /**
+ * Return the next ptr in the set or null if the end was reached.
+ */
+ void* next() {
+ return fIndex < fSet.fList.count() ? fSet.fList[fIndex++].fPtr : nullptr;
+ }
+
+ private:
+ const SkPtrSet& fSet;
+ int fIndex;
+ };
+
+protected:
+ virtual void incPtr(void*) {}
+ virtual void decPtr(void*) {}
+
+private:
+ struct Pair {
+ void* fPtr; // never nullptr
+ uint32_t fIndex; // 1...N
+ };
+
+ // we store the ptrs in sorted-order (using Cmp) so that we can efficiently
+ // detect duplicates when add() is called. Hence we need to store the
+ // ptr and its ID/fIndex explicitly, since the ptr's position in the array
+ // is not related to its "index".
+ SkTDArray<Pair> fList;
+
+ static bool Less(const Pair& a, const Pair& b);
+
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Templated wrapper for SkPtrSet, just meant to automate typecasting
+ * parameters to and from void* (which the base class expects).
+ */
+template <typename T> class SkTPtrSet : public SkPtrSet {
+public:
+ uint32_t find(T ptr) {
+ return this->INHERITED::find((void*)ptr);
+ }
+ uint32_t add(T ptr) {
+ return this->INHERITED::add((void*)ptr);
+ }
+
+ void copyToArray(T* array) const {
+ this->INHERITED::copyToArray((void**)array);
+ }
+
+private:
+ typedef SkPtrSet INHERITED;
+};
+
+/**
+ * Subclass of SkTPtrSet specialed to call ref() and unref() when the
+ * base class's incPtr() and decPtr() are called. This makes it a valid owner
+ * of each ptr, which is released when the set is reset or destroyed.
+ */
+class SkRefCntSet : public SkTPtrSet<SkRefCnt*> {
+public:
+ virtual ~SkRefCntSet();
+
+protected:
+ // overrides
+ virtual void incPtr(void*);
+ virtual void decPtr(void*);
+};
+
+class SkFactorySet : public SkTPtrSet<SkFlattenable::Factory> {};
+
+/**
+ * Similar to SkFactorySet, but only allows Factorys that have registered names.
+ * Also has a function to return the next added Factory's name.
+ */
+class SkNamedFactorySet : public SkRefCnt {
+public:
+
+
+ SkNamedFactorySet();
+
+ /**
+ * Find the specified Factory in the set. If it is not already in the set,
+ * and has registered its name, add it to the set, and return its index.
+ * If the Factory has no registered name, return 0.
+ */
+ uint32_t find(SkFlattenable::Factory);
+
+ /**
+ * If new Factorys have been added to the set, return the name of the first
+ * Factory added after the Factory name returned by the last call to this
+ * function.
+ */
+ const char* getNextAddedFactoryName();
+private:
+ int fNextAddedFactory;
+ SkFactorySet fFactorySet;
+ SkTDArray<const char*> fNames;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.cpp b/gfx/skia/skia/src/core/SkQuadClipper.cpp
new file mode 100644
index 0000000000..d265635b48
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+
+#include <utility>
+
+SkQuadClipper::SkQuadClipper() {
+ fClip.setEmpty();
+}
+
+void SkQuadClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* If we somehow returned the fact that we had to flip the pts in Y, we could
+ communicate that to setQuadratic, and then avoid having to flip it back
+ here (only to have setQuadratic do the flip again)
+ */
+bool SkQuadClipper::clipQuad(const SkPoint srcPts[3], SkPoint dst[3]) {
+ bool reverse;
+
+ // we need the data to be monotonically increasing in Y
+ if (srcPts[0].fY > srcPts[2].fY) {
+ dst[0] = srcPts[2];
+ dst[1] = srcPts[1];
+ dst[2] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 3 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[2].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (dst[0].fY < ctop) {
+ if (chopMonoQuadAtY(dst, ctop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(dst, tmp, t);
+ dst[0] = tmp[2];
+ dst[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY < ctop) {
+ dst[i].fY = ctop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (dst[2].fY > cbot) {
+ if (chopMonoQuadAtY(dst, cbot, &t)) {
+ SkChopQuadAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY > cbot) {
+ dst[i].fY = cbot;
+ }
+ }
+ }
+ }
+
+ if (reverse) {
+ using std::swap;
+ swap(dst[0], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.h b/gfx/skia/skia/src/core/SkQuadClipper.h
new file mode 100644
index 0000000000..c3f5d63c8a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkQuadClipper_DEFINED
+#define SkQuadClipper_DEFINED
+
+#include "include/core/SkPath.h"
+
+/** This class is initialized with a clip rectangle, and then can be fed quads,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkQuadClipper {
+public:
+ SkQuadClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool clipQuad(const SkPoint src[3], SkPoint dst[3]);
+
+private:
+ SkRect fClip;
+};
+
+/** Iterator that returns the clipped segements of a quad clipped to a rect.
+ The segments will be either lines or quads (based on SkPath::Verb), and
+ will all be monotonic in Y
+ */
+class SkQuadClipper2 {
+public:
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+
+ enum {
+ kMaxVerbs = 13,
+ kMaxPoints = 32
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRRect.cpp b/gfx/skia/skia/src/core/SkRRect.cpp
new file mode 100644
index 0000000000..df5b2d4908
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRRect.cpp
@@ -0,0 +1,688 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/private/SkMalloc.h"
+#include "src/core/SkBuffer.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkScaleToSides.h"
+
+#include <cmath>
+#include <utility>
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ if (!SkScalarsAreFinite(xRad, yRad)) {
+ xRad = yRad = 0; // devolve into a simple rect
+ }
+
+ if (fRect.width() < xRad+xRad || fRect.height() < yRad+yRad) {
+ // At most one of these two divides will be by zero, and neither numerator is zero.
+ SkScalar scale = SkMinScalar(sk_ieee_float_divide(fRect. width(), xRad + xRad),
+ sk_ieee_float_divide(fRect.height(), yRad + yRad));
+ SkASSERT(scale < SK_Scalar1);
+ xRad *= scale;
+ yRad *= scale;
+ }
+
+ if (xRad <= 0 || yRad <= 0) {
+ // all corners are square in this case
+ this->setRect(rect);
+ return;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kSimple_Type;
+ if (xRad >= SkScalarHalf(fRect.width()) && yRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ // TODO: assert that all the x&y radii are already W/2 & H/2
+ }
+
+ SkASSERT(this->isValid());
+}
+
+void SkRRect::setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ const SkScalar array[4] = { leftRad, topRad, rightRad, bottomRad };
+ if (!SkScalarsAreFinite(array, 4)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ leftRad = SkMaxScalar(leftRad, 0);
+ topRad = SkMaxScalar(topRad, 0);
+ rightRad = SkMaxScalar(rightRad, 0);
+ bottomRad = SkMaxScalar(bottomRad, 0);
+
+ SkScalar scale = SK_Scalar1;
+ if (leftRad + rightRad > fRect.width()) {
+ scale = fRect.width() / (leftRad + rightRad);
+ }
+ if (topRad + bottomRad > fRect.height()) {
+ scale = SkMinScalar(scale, fRect.height() / (topRad + bottomRad));
+ }
+
+ if (scale < SK_Scalar1) {
+ leftRad *= scale;
+ topRad *= scale;
+ rightRad *= scale;
+ bottomRad *= scale;
+ }
+
+ if (leftRad == rightRad && topRad == bottomRad) {
+ if (leftRad >= SkScalarHalf(fRect.width()) && topRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else if (0 == leftRad || 0 == topRad) {
+ // If the left and (by equality check above) right radii are zero then it is a rect.
+ // Same goes for top/bottom.
+ fType = kRect_Type;
+ leftRad = 0;
+ topRad = 0;
+ rightRad = 0;
+ bottomRad = 0;
+ } else {
+ fType = kSimple_Type;
+ }
+ } else {
+ fType = kNinePatch_Type;
+ }
+
+ fRadii[kUpperLeft_Corner].set(leftRad, topRad);
+ fRadii[kUpperRight_Corner].set(rightRad, topRad);
+ fRadii[kLowerRight_Corner].set(rightRad, bottomRad);
+ fRadii[kLowerLeft_Corner].set(leftRad, bottomRad);
+
+ SkASSERT(this->isValid());
+}
+
+// These parameters intentionally double. Apropos crbug.com/463920, if one of the
+// radii is huge while the other is small, single precision math can completely
+// miss the fact that a scale is required.
+static double compute_min_scale(double rad1, double rad2, double limit, double curMin) {
+ if ((rad1 + rad2) > limit) {
+ return SkTMin(curMin, limit / (rad1 + rad2));
+ }
+ return curMin;
+}
+
+static bool clamp_to_zero(SkVector radii[4]) {
+ bool allCornersSquare = true;
+
+ // Clamp negative radii to zero
+ for (int i = 0; i < 4; ++i) {
+ if (radii[i].fX <= 0 || radii[i].fY <= 0) {
+ // In this case we are being a little fast & loose. Since one of
+ // the radii is 0 the corner is square. However, the other radii
+ // could still be non-zero and play in the global scale factor
+ // computation.
+ radii[i].fX = 0;
+ radii[i].fY = 0;
+ } else {
+ allCornersSquare = false;
+ }
+ }
+
+ return allCornersSquare;
+}
+
+void SkRRect::setRectRadii(const SkRect& rect, const SkVector radii[4]) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ if (!SkScalarsAreFinite(&radii[0].fX, 8)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ memcpy(fRadii, radii, sizeof(fRadii));
+
+ if (clamp_to_zero(fRadii)) {
+ this->setRect(rect);
+ return;
+ }
+
+ this->scaleRadii(rect);
+}
+
+bool SkRRect::initializeRect(const SkRect& rect) {
+ // Check this before sorting because sorting can hide nans.
+ if (!rect.isFinite()) {
+ *this = SkRRect();
+ return false;
+ }
+ fRect = rect.makeSorted();
+ if (fRect.isEmpty()) {
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kEmpty_Type;
+ return false;
+ }
+ return true;
+}
+
+// If we can't distinguish one of the radii relative to the other, force it to zero so it
+// doesn't confuse us later. See crbug.com/850350
+//
+static void flush_to_zero(SkScalar& a, SkScalar& b) {
+ SkASSERT(a >= 0);
+ SkASSERT(b >= 0);
+ if (a + b == a) {
+ b = 0;
+ } else if (a + b == b) {
+ a = 0;
+ }
+}
+
+void SkRRect::scaleRadii(const SkRect& rect) {
+ // Proportionally scale down all radii to fit. Find the minimum ratio
+ // of a side and the radii on that side (for all four sides) and use
+ // that to scale down _all_ the radii. This algorithm is from the
+ // W3 spec (http://www.w3.org/TR/css3-background/) section 5.5 - Overlapping
+ // Curves:
+ // "Let f = min(Li/Si), where i is one of { top, right, bottom, left },
+ // Si is the sum of the two corresponding radii of the corners on side i,
+ // and Ltop = Lbottom = the width of the box,
+ // and Lleft = Lright = the height of the box.
+ // If f < 1, then all corner radii are reduced by multiplying them by f."
+ double scale = 1.0;
+
+ // The sides of the rectangle may be larger than a float.
+ double width = (double)fRect.fRight - (double)fRect.fLeft;
+ double height = (double)fRect.fBottom - (double)fRect.fTop;
+ scale = compute_min_scale(fRadii[0].fX, fRadii[1].fX, width, scale);
+ scale = compute_min_scale(fRadii[1].fY, fRadii[2].fY, height, scale);
+ scale = compute_min_scale(fRadii[2].fX, fRadii[3].fX, width, scale);
+ scale = compute_min_scale(fRadii[3].fY, fRadii[0].fY, height, scale);
+
+ flush_to_zero(fRadii[0].fX, fRadii[1].fX);
+ flush_to_zero(fRadii[1].fY, fRadii[2].fY);
+ flush_to_zero(fRadii[2].fX, fRadii[3].fX);
+ flush_to_zero(fRadii[3].fY, fRadii[0].fY);
+
+ if (scale < 1.0) {
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[0].fX, &fRadii[1].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[1].fY, &fRadii[2].fY);
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[2].fX, &fRadii[3].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[3].fY, &fRadii[0].fY);
+ }
+
+ // adjust radii may set x or y to zero; set companion to zero as well
+ if (clamp_to_zero(fRadii)) {
+ this->setRect(rect);
+ return;
+ }
+
+ // At this point we're either oval, simple, or complex (not empty or rect).
+ this->computeType();
+
+ SkASSERT(this->isValid());
+}
+
+// This method determines if a point known to be inside the RRect's bounds is
+// inside all the corners.
+bool SkRRect::checkCornerContainment(SkScalar x, SkScalar y) const {
+ SkPoint canonicalPt; // (x,y) translated to one of the quadrants
+ int index;
+
+ if (kOval_Type == this->type()) {
+ canonicalPt.set(x - fRect.centerX(), y - fRect.centerY());
+ index = kUpperLeft_Corner; // any corner will do in this case
+ } else {
+ if (x < fRect.fLeft + fRadii[kUpperLeft_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperLeft_Corner].fY) {
+ // UL corner
+ index = kUpperLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kUpperLeft_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY < 0);
+ } else if (x < fRect.fLeft + fRadii[kLowerLeft_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerLeft_Corner].fY) {
+ // LL corner
+ index = kLowerLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kLowerLeft_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY > 0);
+ } else if (x > fRect.fRight - fRadii[kUpperRight_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperRight_Corner].fY) {
+ // UR corner
+ index = kUpperRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kUpperRight_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY < 0);
+ } else if (x > fRect.fRight - fRadii[kLowerRight_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerRight_Corner].fY) {
+ // LR corner
+ index = kLowerRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kLowerRight_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY > 0);
+ } else {
+ // not in any of the corners
+ return true;
+ }
+ }
+
+ // A point is in an ellipse (in standard position) if:
+ // x^2 y^2
+ // ----- + ----- <= 1
+ // a^2 b^2
+ // or :
+ // b^2*x^2 + a^2*y^2 <= (ab)^2
+ SkScalar dist = SkScalarSquare(canonicalPt.fX) * SkScalarSquare(fRadii[index].fY) +
+ SkScalarSquare(canonicalPt.fY) * SkScalarSquare(fRadii[index].fX);
+ return dist <= SkScalarSquare(fRadii[index].fX * fRadii[index].fY);
+}
+
+bool SkRRectPriv::AllCornersCircular(const SkRRect& rr, SkScalar tolerance) {
+ return SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[1].fX, rr.fRadii[1].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[2].fX, rr.fRadii[2].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[3].fX, rr.fRadii[3].fY, tolerance);
+}
+
+bool SkRRect::contains(const SkRect& rect) const {
+ if (!this->getBounds().contains(rect)) {
+ // If 'rect' isn't contained by the RR's bounds then the
+ // RR definitely doesn't contain it
+ return false;
+ }
+
+ if (this->isRect()) {
+ // the prior test was sufficient
+ return true;
+ }
+
+ // At this point we know all four corners of 'rect' are inside the
+ // bounds of of this RR. Check to make sure all the corners are inside
+ // all the curves
+ return this->checkCornerContainment(rect.fLeft, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fBottom) &&
+ this->checkCornerContainment(rect.fLeft, rect.fBottom);
+}
+
+static bool radii_are_nine_patch(const SkVector radii[4]) {
+ return radii[SkRRect::kUpperLeft_Corner].fX == radii[SkRRect::kLowerLeft_Corner].fX &&
+ radii[SkRRect::kUpperLeft_Corner].fY == radii[SkRRect::kUpperRight_Corner].fY &&
+ radii[SkRRect::kUpperRight_Corner].fX == radii[SkRRect::kLowerRight_Corner].fX &&
+ radii[SkRRect::kLowerLeft_Corner].fY == radii[SkRRect::kLowerRight_Corner].fY;
+}
+
+// There is a simplified version of this method in setRectXY
+void SkRRect::computeType() {
+ if (fRect.isEmpty()) {
+ SkASSERT(fRect.isSorted());
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fRadii); ++i) {
+ SkASSERT((fRadii[i] == SkVector{0, 0}));
+ }
+ fType = kEmpty_Type;
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ bool allRadiiEqual = true; // are all x radii equal and all y radii?
+ bool allCornersSquare = 0 == fRadii[0].fX || 0 == fRadii[0].fY;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ // if either radius is zero the corner is square so both have to
+ // be non-zero to have a rounded corner
+ allCornersSquare = false;
+ }
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiEqual = false;
+ }
+ }
+
+ if (allCornersSquare) {
+ fType = kRect_Type;
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ if (allRadiiEqual) {
+ if (fRadii[0].fX >= SkScalarHalf(fRect.width()) &&
+ fRadii[0].fY >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else {
+ fType = kSimple_Type;
+ }
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ if (radii_are_nine_patch(fRadii)) {
+ fType = kNinePatch_Type;
+ } else {
+ fType = kComplex_Type;
+ }
+ SkASSERT(this->isValid());
+}
+
+bool SkRRect::transform(const SkMatrix& matrix, SkRRect* dst) const {
+ if (nullptr == dst) {
+ return false;
+ }
+
+ // Assert that the caller is not trying to do this in place, which
+ // would violate const-ness. Do not return false though, so that
+ // if they know what they're doing and want to violate it they can.
+ SkASSERT(dst != this);
+
+ if (matrix.isIdentity()) {
+ *dst = *this;
+ return true;
+ }
+
+ if (!matrix.preservesAxisAlignment()) {
+ return false;
+ }
+
+ SkRect newRect;
+ if (!matrix.mapRect(&newRect, fRect)) {
+ return false;
+ }
+
+ // The matrix may have scaled us to zero (or due to float madness, we now have collapsed
+ // some dimension of the rect, so we need to check for that. Note that matrix must be
+ // scale and translate and mapRect() produces a sorted rect. So an empty rect indicates
+ // loss of precision.
+ if (!newRect.isFinite() || newRect.isEmpty()) {
+ return false;
+ }
+
+ // At this point, this is guaranteed to succeed, so we can modify dst.
+ dst->fRect = newRect;
+
+ // Since the only transforms that were allowed are axis aligned, the type
+ // remains unchanged.
+ dst->fType = fType;
+
+ if (kRect_Type == fType) {
+ SkASSERT(dst->isValid());
+ return true;
+ }
+ if (kOval_Type == fType) {
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = SkScalarHalf(newRect.width());
+ dst->fRadii[i].fY = SkScalarHalf(newRect.height());
+ }
+ SkASSERT(dst->isValid());
+ return true;
+ }
+
+ // Now scale each corner
+ SkScalar xScale = matrix.getScaleX();
+ SkScalar yScale = matrix.getScaleY();
+
+ // There is a rotation of 90 (Clockwise 90) or 270 (Counter clockwise 90).
+ // 180 degrees rotations are simply flipX with a flipY and would come under
+ // a scale transform.
+ if (!matrix.isScaleTranslate()) {
+ const bool isClockwise = matrix.getSkewX() < 0;
+
+ // The matrix location for scale changes if there is a rotation.
+ xScale = matrix.getSkewY() * (isClockwise ? 1 : -1);
+ yScale = matrix.getSkewX() * (isClockwise ? -1 : 1);
+
+ const int dir = isClockwise ? 3 : 1;
+ for (int i = 0; i < 4; ++i) {
+ const int src = (i + dir) >= 4 ? (i + dir) % 4 : (i + dir);
+ // Swap X and Y axis for the radii.
+ dst->fRadii[i].fX = fRadii[src].fY;
+ dst->fRadii[i].fY = fRadii[src].fX;
+ }
+ } else {
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = fRadii[i].fX;
+ dst->fRadii[i].fY = fRadii[i].fY;
+ }
+ }
+
+ const bool flipX = xScale < 0;
+ if (flipX) {
+ xScale = -xScale;
+ }
+
+ const bool flipY = yScale < 0;
+ if (flipY) {
+ yScale = -yScale;
+ }
+
+ // Scale the radii without respecting the flip.
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX *= xScale;
+ dst->fRadii[i].fY *= yScale;
+ }
+
+ // Now swap as necessary.
+ using std::swap;
+ if (flipX) {
+ if (flipY) {
+ // Swap with opposite corners
+ swap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerRight_Corner]);
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ } else {
+ // Only swap in x
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kUpperLeft_Corner]);
+ swap(dst->fRadii[kLowerRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ }
+ } else if (flipY) {
+ // Only swap in y
+ swap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerLeft_Corner]);
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerRight_Corner]);
+ }
+
+ if (!AreRectAndRadiiValid(dst->fRect, dst->fRadii)) {
+ return false;
+ }
+
+ dst->scaleRadii(dst->fRect);
+ dst->isValid();
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::inset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ SkRect r = fRect.makeInset(dx, dy);
+ bool degenerate = false;
+ if (r.fRight <= r.fLeft) {
+ degenerate = true;
+ r.fLeft = r.fRight = SkScalarAve(r.fLeft, r.fRight);
+ }
+ if (r.fBottom <= r.fTop) {
+ degenerate = true;
+ r.fTop = r.fBottom = SkScalarAve(r.fTop, r.fBottom);
+ }
+ if (degenerate) {
+ dst->fRect = r;
+ memset(dst->fRadii, 0, sizeof(dst->fRadii));
+ dst->fType = kEmpty_Type;
+ return;
+ }
+ if (!r.isFinite()) {
+ *dst = SkRRect();
+ return;
+ }
+
+ SkVector radii[4];
+ memcpy(radii, fRadii, sizeof(radii));
+ for (int i = 0; i < 4; ++i) {
+ if (radii[i].fX) {
+ radii[i].fX -= dx;
+ }
+ if (radii[i].fY) {
+ radii[i].fY -= dy;
+ }
+ }
+ dst->setRectRadii(r, radii);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkRRect::writeToMemory(void* buffer) const {
+ // Serialize only the rect and corners, but not the derived type tag.
+ memcpy(buffer, this, kSizeInMemory);
+ return kSizeInMemory;
+}
+
+void SkRRectPriv::WriteToBuffer(const SkRRect& rr, SkWBuffer* buffer) {
+ // Serialize only the rect and corners, but not the derived type tag.
+ buffer->write(&rr, SkRRect::kSizeInMemory);
+}
+
+size_t SkRRect::readFromMemory(const void* buffer, size_t length) {
+ if (length < kSizeInMemory) {
+ return 0;
+ }
+
+ SkRRect raw;
+ memcpy(&raw, buffer, kSizeInMemory);
+ this->setRectRadii(raw.fRect, raw.fRadii);
+ return kSizeInMemory;
+}
+
+bool SkRRectPriv::ReadFromBuffer(SkRBuffer* buffer, SkRRect* rr) {
+ if (buffer->available() < SkRRect::kSizeInMemory) {
+ return false;
+ }
+ SkRRect storage;
+ return buffer->read(&storage, SkRRect::kSizeInMemory) &&
+ (rr->readFromMemory(&storage, SkRRect::kSizeInMemory) == SkRRect::kSizeInMemory);
+}
+
+#include "include/core/SkString.h"
+#include "src/core/SkStringUtils.h"
+
+void SkRRect::dump(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ fRect.dump(asHex);
+ SkString line("const SkPoint corners[] = {\n");
+ for (int i = 0; i < 4; ++i) {
+ SkString strX, strY;
+ SkAppendScalar(&strX, fRadii[i].x(), asType);
+ SkAppendScalar(&strY, fRadii[i].y(), asType);
+ line.appendf(" { %s, %s },", strX.c_str(), strY.c_str());
+ if (asHex) {
+ line.appendf(" /* %f %f */", fRadii[i].x(), fRadii[i].y());
+ }
+ line.append("\n");
+ }
+ line.append("};");
+ SkDebugf("%s\n", line.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need all combinations of predicates to be true to have a "safe" radius value.
+ */
+static bool are_radius_check_predicates_valid(SkScalar rad, SkScalar min, SkScalar max) {
+ return (min <= max) && (rad <= max - min) && (min + rad <= max) && (max - rad >= min) &&
+ rad >= 0;
+}
+
+bool SkRRect::isValid() const {
+ if (!AreRectAndRadiiValid(fRect, fRadii)) {
+ return false;
+ }
+
+ bool allRadiiZero = (0 == fRadii[0].fX && 0 == fRadii[0].fY);
+ bool allCornersSquare = (0 == fRadii[0].fX || 0 == fRadii[0].fY);
+ bool allRadiiSame = true;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX || 0 != fRadii[i].fY) {
+ allRadiiZero = false;
+ }
+
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiSame = false;
+ }
+
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ allCornersSquare = false;
+ }
+ }
+ bool patchesOfNine = radii_are_nine_patch(fRadii);
+
+ if (fType < 0 || fType > kLastType) {
+ return false;
+ }
+
+ switch (fType) {
+ case kEmpty_Type:
+ if (!fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kRect_Type:
+ if (fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kOval_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ if (!SkScalarNearlyEqual(fRadii[i].fX, SkScalarHalf(fRect.width())) ||
+ !SkScalarNearlyEqual(fRadii[i].fY, SkScalarHalf(fRect.height()))) {
+ return false;
+ }
+ }
+ break;
+ case kSimple_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+ break;
+ case kNinePatch_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ !patchesOfNine) {
+ return false;
+ }
+ break;
+ case kComplex_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ patchesOfNine) {
+ return false;
+ }
+ break;
+ }
+
+ return true;
+}
+
+bool SkRRect::AreRectAndRadiiValid(const SkRect& rect, const SkVector radii[4]) {
+ if (!rect.isFinite() || !rect.isSorted()) {
+ return false;
+ }
+ for (int i = 0; i < 4; ++i) {
+ if (!are_radius_check_predicates_valid(radii[i].fX, rect.fLeft, rect.fRight) ||
+ !are_radius_check_predicates_valid(radii[i].fY, rect.fTop, rect.fBottom)) {
+ return false;
+ }
+ }
+ return true;
+}
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkRRectPriv.h b/gfx/skia/skia/src/core/SkRRectPriv.h
new file mode 100644
index 0000000000..4cf529569b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRRectPriv.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRectPriv_DEFINED
+#define SkRRectPriv_DEFINED
+
+#include "include/core/SkRRect.h"
+
+class SkRBuffer;
+class SkWBuffer;
+
+class SkRRectPriv {
+public:
+ static bool IsCircle(const SkRRect& rr) {
+ return rr.isOval() && SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY);
+ }
+
+ static SkVector GetSimpleRadii(const SkRRect& rr) {
+ SkASSERT(!rr.isComplex());
+ return rr.fRadii[0];
+ }
+
+ static bool IsSimpleCircular(const SkRRect& rr) {
+ return rr.isSimple() && SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY);
+ }
+
+ static bool EqualRadii(const SkRRect& rr) {
+ return rr.isRect() || SkRRectPriv::IsCircle(rr) || SkRRectPriv::IsSimpleCircular(rr);
+ }
+
+ static const SkVector* GetRadiiArray(const SkRRect& rr) { return rr.fRadii; }
+
+ static bool AllCornersCircular(const SkRRect& rr, SkScalar tolerance = SK_ScalarNearlyZero);
+
+ static bool ReadFromBuffer(SkRBuffer* buffer, SkRRect* rr);
+
+ static void WriteToBuffer(const SkRRect& rr, SkWBuffer* buffer);
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkRTree.cpp b/gfx/skia/skia/src/core/SkRTree.cpp
new file mode 100644
index 0000000000..5688350bbc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRTree.h"
+
+SkRTree::SkRTree() : fCount(0) {}
+
+SkRect SkRTree::getRootBound() const {
+ if (fCount) {
+ return fRoot.fBounds;
+ } else {
+ return SkRect::MakeEmpty();
+ }
+}
+
+void SkRTree::insert(const SkRect boundsArray[], int N) {
+ SkASSERT(0 == fCount);
+
+ SkTDArray<Branch> branches;
+ branches.setReserve(N);
+
+ for (int i = 0; i < N; i++) {
+ const SkRect& bounds = boundsArray[i];
+ if (bounds.isEmpty()) {
+ continue;
+ }
+
+ Branch* b = branches.push();
+ b->fBounds = bounds;
+ b->fOpIndex = i;
+ }
+
+ fCount = branches.count();
+ if (fCount) {
+ if (1 == fCount) {
+ fNodes.setReserve(1);
+ Node* n = this->allocateNodeAtLevel(0);
+ n->fNumChildren = 1;
+ n->fChildren[0] = branches[0];
+ fRoot.fSubtree = n;
+ fRoot.fBounds = branches[0].fBounds;
+ } else {
+ fNodes.setReserve(CountNodes(fCount));
+ fRoot = this->bulkLoad(&branches);
+ }
+ }
+}
+
+SkRTree::Node* SkRTree::allocateNodeAtLevel(uint16_t level) {
+ SkDEBUGCODE(Node* p = fNodes.begin());
+ Node* out = fNodes.push();
+ SkASSERT(fNodes.begin() == p); // If this fails, we didn't setReserve() enough.
+ out->fNumChildren = 0;
+ out->fLevel = level;
+ return out;
+}
+
+// This function parallels bulkLoad, but just counts how many nodes bulkLoad would allocate.
+int SkRTree::CountNodes(int branches) {
+ if (branches == 1) {
+ return 1;
+ }
+ int numBranches = branches / kMaxChildren;
+ int remainder = branches % kMaxChildren;
+ if (remainder > 0) {
+ numBranches++;
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+ int currentBranch = 0;
+ int nodes = 0;
+ while (currentBranch < branches) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ nodes++;
+ currentBranch++;
+ for (int k = 1; k < incrementBy && currentBranch < branches; ++k) {
+ currentBranch++;
+ }
+ }
+ return nodes + CountNodes(nodes);
+}
+
+SkRTree::Branch SkRTree::bulkLoad(SkTDArray<Branch>* branches, int level) {
+ if (branches->count() == 1) { // Only one branch. It will be the root.
+ return (*branches)[0];
+ }
+
+ // We might sort our branches here, but we expect Blink gives us a reasonable x,y order.
+ // Skipping a call to sort (in Y) here resulted in a 17% win for recording with negligible
+ // difference in playback speed.
+ int numBranches = branches->count() / kMaxChildren;
+ int remainder = branches->count() % kMaxChildren;
+ int newBranches = 0;
+
+ if (remainder > 0) {
+ ++numBranches;
+ // If the remainder isn't enough to fill a node, we'll add fewer nodes to other branches.
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+
+ int currentBranch = 0;
+ while (currentBranch < branches->count()) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ // if need be, omit some nodes to make up for remainder
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ Node* n = allocateNodeAtLevel(level);
+ n->fNumChildren = 1;
+ n->fChildren[0] = (*branches)[currentBranch];
+ Branch b;
+ b.fBounds = (*branches)[currentBranch].fBounds;
+ b.fSubtree = n;
+ ++currentBranch;
+ for (int k = 1; k < incrementBy && currentBranch < branches->count(); ++k) {
+ b.fBounds.join((*branches)[currentBranch].fBounds);
+ n->fChildren[k] = (*branches)[currentBranch];
+ ++n->fNumChildren;
+ ++currentBranch;
+ }
+ (*branches)[newBranches] = b;
+ ++newBranches;
+ }
+ branches->setCount(newBranches);
+ return this->bulkLoad(branches, level + 1);
+}
+
+void SkRTree::search(const SkRect& query, SkTDArray<int>* results) const {
+ if (fCount > 0 && SkRect::Intersects(fRoot.fBounds, query)) {
+ this->search(fRoot.fSubtree, query, results);
+ }
+}
+
+void SkRTree::search(Node* node, const SkRect& query, SkTDArray<int>* results) const {
+ for (int i = 0; i < node->fNumChildren; ++i) {
+ if (SkRect::Intersects(node->fChildren[i].fBounds, query)) {
+ if (0 == node->fLevel) {
+ results->push_back(node->fChildren[i].fOpIndex);
+ } else {
+ this->search(node->fChildren[i].fSubtree, query, results);
+ }
+ }
+ }
+}
+
+size_t SkRTree::bytesUsed() const {
+ size_t byteCount = sizeof(SkRTree);
+
+ byteCount += fNodes.reserved() * sizeof(Node);
+
+ return byteCount;
+}
diff --git a/gfx/skia/skia/src/core/SkRTree.h b/gfx/skia/skia/src/core/SkRTree.h
new file mode 100644
index 0000000000..021a58b450
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRTree_DEFINED
+#define SkRTree_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkBBoxHierarchy.h"
+
+/**
+ * An R-Tree implementation. In short, it is a balanced n-ary tree containing a hierarchy of
+ * bounding rectangles.
+ *
+ * It only supports bulk-loading, i.e. creation from a batch of bounding rectangles.
+ * This performs a bottom-up bulk load using the STR (sort-tile-recursive) algorithm.
+ *
+ * TODO: Experiment with other bulk-load algorithms (in particular the Hilbert pack variant,
+ * which groups rects by position on the Hilbert curve, is probably worth a look). There also
+ * exist top-down bulk load variants (VAMSplit, TopDownGreedy, etc).
+ *
+ * For more details see:
+ *
+ * Beckmann, N.; Kriegel, H. P.; Schneider, R.; Seeger, B. (1990). "The R*-tree:
+ * an efficient and robust access method for points and rectangles"
+ */
+class SkRTree : public SkBBoxHierarchy {
+public:
+ SkRTree();
+ ~SkRTree() override {}
+
+ void insert(const SkRect[], int N) override;
+ void search(const SkRect& query, SkTDArray<int>* results) const override;
+ size_t bytesUsed() const override;
+
+ // Methods and constants below here are only public for tests.
+
+ // Return the depth of the tree structure.
+ int getDepth() const { return fCount ? fRoot.fSubtree->fLevel + 1 : 0; }
+ // Insertion count (not overall node count, which may be greater).
+ int getCount() const { return fCount; }
+
+ // Get the root bound.
+ SkRect getRootBound() const override;
+
+ // These values were empirically determined to produce reasonable performance in most cases.
+ static const int kMinChildren = 6,
+ kMaxChildren = 11;
+
+private:
+ struct Node;
+
+ struct Branch {
+ union {
+ Node* fSubtree;
+ int fOpIndex;
+ };
+ SkRect fBounds;
+ };
+
+ struct Node {
+ uint16_t fNumChildren;
+ uint16_t fLevel;
+ Branch fChildren[kMaxChildren];
+ };
+
+ void search(Node* root, const SkRect& query, SkTDArray<int>* results) const;
+
+ // Consumes the input array.
+ Branch bulkLoad(SkTDArray<Branch>* branches, int level = 0);
+
+ // How many times will bulkLoad() call allocateNodeAtLevel()?
+ static int CountNodes(int branches);
+
+ Node* allocateNodeAtLevel(uint16_t level);
+
+ // This is the count of data elements (rather than total nodes in the tree)
+ int fCount;
+ Branch fRoot;
+ SkTDArray<Node> fNodes;
+
+ typedef SkBBoxHierarchy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRWBuffer.cpp b/gfx/skia/skia/src/core/SkRWBuffer.cpp
new file mode 100644
index 0000000000..84e56e4748
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRWBuffer.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRWBuffer.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+
+#include <atomic>
+#include <new>
+
+// Force small chunks to be a page's worth
+static const size_t kMinAllocSize = 4096;
+
+struct SkBufferBlock {
+ SkBufferBlock* fNext; // updated by the writer
+ size_t fUsed; // updated by the writer
+ const size_t fCapacity;
+
+ SkBufferBlock(size_t capacity) : fNext(nullptr), fUsed(0), fCapacity(capacity) {}
+
+ const void* startData() const { return this + 1; }
+
+ size_t avail() const { return fCapacity - fUsed; }
+ void* availData() { return (char*)this->startData() + fUsed; }
+
+ static SkBufferBlock* Alloc(size_t length) {
+ size_t capacity = LengthToCapacity(length);
+ void* buffer = sk_malloc_throw(sizeof(SkBufferBlock) + capacity);
+ return new (buffer) SkBufferBlock(capacity);
+ }
+
+ // Return number of bytes actually appended. Important that we always completely this block
+ // before spilling into the next, since the reader uses fCapacity to know how many it can read.
+ //
+ size_t append(const void* src, size_t length) {
+ this->validate();
+ size_t amount = SkTMin(this->avail(), length);
+ memcpy(this->availData(), src, amount);
+ fUsed += amount;
+ this->validate();
+ return amount;
+ }
+
+ // Do not call in the reader thread, since the writer may be updating fUsed.
+ // (The assertion is still true, but TSAN still may complain about its raciness.)
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fCapacity > 0);
+ SkASSERT(fUsed <= fCapacity);
+#endif
+ }
+
+private:
+ static size_t LengthToCapacity(size_t length) {
+ const size_t minSize = kMinAllocSize - sizeof(SkBufferBlock);
+ return SkTMax(length, minSize);
+ }
+};
+
+struct SkBufferHead {
+ mutable std::atomic<int32_t> fRefCnt;
+ SkBufferBlock fBlock;
+
+ SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {}
+
+ static size_t LengthToCapacity(size_t length) {
+ const size_t minSize = kMinAllocSize - sizeof(SkBufferHead);
+ return SkTMax(length, minSize);
+ }
+
+ static SkBufferHead* Alloc(size_t length) {
+ size_t capacity = LengthToCapacity(length);
+ size_t size = sizeof(SkBufferHead) + capacity;
+ void* buffer = sk_malloc_throw(size);
+ return new (buffer) SkBufferHead(capacity);
+ }
+
+ void ref() const {
+ SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed));
+ }
+
+ void unref() const {
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(oldRefCnt);
+ if (1 == oldRefCnt) {
+ // Like unique(), the acquire is only needed on success.
+ SkBufferBlock* block = fBlock.fNext;
+ sk_free((void*)this);
+ while (block) {
+ SkBufferBlock* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+ }
+
+ void validate(size_t minUsed, const SkBufferBlock* tail = nullptr) const {
+#ifdef SK_DEBUG
+ SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
+ size_t totalUsed = 0;
+ const SkBufferBlock* block = &fBlock;
+ const SkBufferBlock* lastBlock = block;
+ while (block) {
+ block->validate();
+ totalUsed += block->fUsed;
+ lastBlock = block;
+ block = block->fNext;
+ }
+ SkASSERT(minUsed <= totalUsed);
+ if (tail) {
+ SkASSERT(tail == lastBlock);
+ }
+#endif
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// The reader can only access block.fCapacity (which never changes), and cannot access
+// block.fUsed, which may be updated by the writer.
+//
+SkROBuffer::SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* tail)
+ : fHead(head), fAvailable(available), fTail(tail)
+{
+ if (head) {
+ fHead->ref();
+ SkASSERT(available > 0);
+ head->validate(available, tail);
+ } else {
+ SkASSERT(0 == available);
+ SkASSERT(!tail);
+ }
+}
+
+SkROBuffer::~SkROBuffer() {
+ if (fHead) {
+ fHead->unref();
+ }
+}
+
+SkROBuffer::Iter::Iter(const SkROBuffer* buffer) {
+ this->reset(buffer);
+}
+
+SkROBuffer::Iter::Iter(const sk_sp<SkROBuffer>& buffer) {
+ this->reset(buffer.get());
+}
+
+void SkROBuffer::Iter::reset(const SkROBuffer* buffer) {
+ fBuffer = buffer;
+ if (buffer && buffer->fHead) {
+ fBlock = &buffer->fHead->fBlock;
+ fRemaining = buffer->fAvailable;
+ } else {
+ fBlock = nullptr;
+ fRemaining = 0;
+ }
+}
+
+const void* SkROBuffer::Iter::data() const {
+ return fRemaining ? fBlock->startData() : nullptr;
+}
+
+size_t SkROBuffer::Iter::size() const {
+ if (!fBlock) {
+ return 0;
+ }
+ return SkTMin(fBlock->fCapacity, fRemaining);
+}
+
+bool SkROBuffer::Iter::next() {
+ if (fRemaining) {
+ fRemaining -= this->size();
+ if (fBuffer->fTail == fBlock) {
+ // There are more blocks, but fBuffer does not know about them.
+ SkASSERT(0 == fRemaining);
+ fBlock = nullptr;
+ } else {
+ fBlock = fBlock->fNext;
+ }
+ }
+ return fRemaining != 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(nullptr), fTail(nullptr), fTotalUsed(0) {
+ if (initialCapacity) {
+ fHead = SkBufferHead::Alloc(initialCapacity);
+ fTail = &fHead->fBlock;
+ }
+}
+
+SkRWBuffer::~SkRWBuffer() {
+ this->validate();
+ if (fHead) {
+ fHead->unref();
+ }
+}
+
+// It is important that we always completely fill the current block before spilling over to the
+// next, since our reader will be using fCapacity (min'd against its total available) to know how
+// many bytes to read from a given block.
+//
+void SkRWBuffer::append(const void* src, size_t length, size_t reserve) {
+ this->validate();
+ if (0 == length) {
+ return;
+ }
+
+ fTotalUsed += length;
+
+ if (nullptr == fHead) {
+ fHead = SkBufferHead::Alloc(length + reserve);
+ fTail = &fHead->fBlock;
+ }
+
+ size_t written = fTail->append(src, length);
+ SkASSERT(written <= length);
+ src = (const char*)src + written;
+ length -= written;
+
+ if (length) {
+ SkBufferBlock* block = SkBufferBlock::Alloc(length + reserve);
+ fTail->fNext = block;
+ fTail = block;
+ written = fTail->append(src, length);
+ SkASSERT(written == length);
+ }
+ this->validate();
+}
+
+#ifdef SK_DEBUG
+void SkRWBuffer::validate() const {
+ if (fHead) {
+ fHead->validate(fTotalUsed, fTail);
+ } else {
+ SkASSERT(nullptr == fTail);
+ SkASSERT(0 == fTotalUsed);
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkROBufferStreamAsset : public SkStreamAsset {
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fGlobalOffset <= fBuffer->size());
+ SkASSERT(fLocalOffset <= fIter.size());
+ SkASSERT(fLocalOffset <= fGlobalOffset);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ class AutoValidate {
+ SkROBufferStreamAsset* fStream;
+ public:
+ AutoValidate(SkROBufferStreamAsset* stream) : fStream(stream) { stream->validate(); }
+ ~AutoValidate() { fStream->validate(); }
+ };
+ #define AUTO_VALIDATE AutoValidate av(this);
+#else
+ #define AUTO_VALIDATE
+#endif
+
+public:
+ SkROBufferStreamAsset(sk_sp<SkROBuffer> buffer) : fBuffer(std::move(buffer)), fIter(fBuffer) {
+ fGlobalOffset = fLocalOffset = 0;
+ }
+
+ size_t getLength() const override { return fBuffer->size(); }
+
+ bool rewind() override {
+ AUTO_VALIDATE
+ fIter.reset(fBuffer.get());
+ fGlobalOffset = fLocalOffset = 0;
+ return true;
+ }
+
+ size_t read(void* dst, size_t request) override {
+ AUTO_VALIDATE
+ size_t bytesRead = 0;
+ for (;;) {
+ size_t size = fIter.size();
+ SkASSERT(fLocalOffset <= size);
+ size_t avail = SkTMin(size - fLocalOffset, request - bytesRead);
+ if (dst) {
+ memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail);
+ dst = (char*)dst + avail;
+ }
+ bytesRead += avail;
+ fLocalOffset += avail;
+ SkASSERT(bytesRead <= request);
+ if (bytesRead == request) {
+ break;
+ }
+ // If we get here, we've exhausted the current iter
+ SkASSERT(fLocalOffset == size);
+ fLocalOffset = 0;
+ if (!fIter.next()) {
+ break; // ran out of data
+ }
+ }
+ fGlobalOffset += bytesRead;
+ SkASSERT(fGlobalOffset <= fBuffer->size());
+ return bytesRead;
+ }
+
+ bool isAtEnd() const override {
+ return fBuffer->size() == fGlobalOffset;
+ }
+
+ size_t getPosition() const override {
+ return fGlobalOffset;
+ }
+
+ bool seek(size_t position) override {
+ AUTO_VALIDATE
+ if (position < fGlobalOffset) {
+ this->rewind();
+ }
+ (void)this->skip(position - fGlobalOffset);
+ return true;
+ }
+
+ bool move(long offset) override{
+ AUTO_VALIDATE
+ offset += fGlobalOffset;
+ if (offset <= 0) {
+ this->rewind();
+ } else {
+ (void)this->seek(SkToSizeT(offset));
+ }
+ return true;
+ }
+
+private:
+ SkStreamAsset* onDuplicate() const override {
+ return new SkROBufferStreamAsset(fBuffer);
+ }
+
+ SkStreamAsset* onFork() const override {
+ auto clone = this->duplicate();
+ clone->seek(this->getPosition());
+ return clone.release();
+ }
+
+ sk_sp<SkROBuffer> fBuffer;
+ SkROBuffer::Iter fIter;
+ size_t fLocalOffset;
+ size_t fGlobalOffset;
+};
+
+std::unique_ptr<SkStreamAsset> SkRWBuffer::makeStreamSnapshot() const {
+ return skstd::make_unique<SkROBufferStreamAsset>(this->makeROBufferSnapshot());
+}
diff --git a/gfx/skia/skia/src/core/SkRasterClip.cpp b/gfx/skia/skia/src/core/SkRasterClip.cpp
new file mode 100644
index 0000000000..a2f796f057
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRegionPriv.h"
+
+enum MutateResult {
+ kDoNothing_MutateResult,
+ kReplaceClippedAgainstGlobalBounds_MutateResult,
+ kContinue_MutateResult,
+};
+
+static MutateResult mutate_conservative_op(SkRegion::Op* op, bool inverseFilled) {
+ if (inverseFilled) {
+ switch (*op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kDifference_Op:
+ // These ops can only shrink the current clip. So leaving
+ // the clip unchanged conservatively respects the contract.
+ return kDoNothing_MutateResult;
+ case SkRegion::kUnion_Op:
+ case SkRegion::kReplace_Op:
+ case SkRegion::kReverseDifference_Op:
+ case SkRegion::kXOR_Op: {
+ // These ops can grow the current clip up to the extents of
+ // the input clip, which is inverse filled, so we just set
+ // the current clip to the device bounds.
+ *op = SkRegion::kReplace_Op;
+ return kReplaceClippedAgainstGlobalBounds_MutateResult;
+ }
+ }
+ } else {
+ // Not inverse filled
+ switch (*op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kUnion_Op:
+ case SkRegion::kReplace_Op:
+ return kContinue_MutateResult;
+ case SkRegion::kDifference_Op:
+ // Difference can only shrink the current clip.
+ // Leaving clip unchanged conservatively fullfills the contract.
+ return kDoNothing_MutateResult;
+ case SkRegion::kReverseDifference_Op:
+ // To reverse, we swap in the bounds with a replace op.
+ // As with difference, leave it unchanged.
+ *op = SkRegion::kReplace_Op;
+ return kContinue_MutateResult;
+ case SkRegion::kXOR_Op:
+ // Be conservative, based on (A XOR B) always included in (A union B),
+ // which is always included in (bounds(A) union bounds(B))
+ *op = SkRegion::kUnion_Op;
+ return kContinue_MutateResult;
+ }
+ }
+ SkASSERT(false); // unknown op
+ return kDoNothing_MutateResult;
+}
+
+void SkConservativeClip::opRect(const SkRect& localRect, const SkMatrix& ctm,
+ const SkIRect& devBounds, SkRegion::Op op, bool doAA) {
+ SkIRect ir;
+ switch (mutate_conservative_op(&op, false)) {
+ case kDoNothing_MutateResult:
+ return;
+ case kReplaceClippedAgainstGlobalBounds_MutateResult:
+ ir = devBounds;
+ break;
+ case kContinue_MutateResult: {
+ SkRect devRect;
+ ctm.mapRect(&devRect, localRect);
+ ir = doAA ? devRect.roundOut() : devRect.round();
+ } break;
+ }
+ this->opIRect(ir, op);
+}
+
+void SkConservativeClip::opRRect(const SkRRect& rrect, const SkMatrix& ctm,
+ const SkIRect& devBounds, SkRegion::Op op, bool doAA) {
+ this->opRect(rrect.getBounds(), ctm, devBounds, op, doAA);
+}
+
+void SkConservativeClip::opPath(const SkPath& path, const SkMatrix& ctm, const SkIRect& devBounds,
+ SkRegion::Op op, bool doAA) {
+ SkIRect ir;
+ switch (mutate_conservative_op(&op, path.isInverseFillType())) {
+ case kDoNothing_MutateResult:
+ return;
+ case kReplaceClippedAgainstGlobalBounds_MutateResult:
+ ir = devBounds;
+ break;
+ case kContinue_MutateResult: {
+ SkRect bounds = path.getBounds();
+ ctm.mapRect(&bounds);
+ ir = bounds.roundOut();
+ break;
+ }
+ }
+ return this->opIRect(ir, op);
+}
+
+void SkConservativeClip::opRegion(const SkRegion& rgn, SkRegion::Op op) {
+ this->opIRect(rgn.getBounds(), op);
+}
+
+void SkConservativeClip::opIRect(const SkIRect& devRect, SkRegion::Op op) {
+ if (SkRegion::kIntersect_Op == op) {
+ if (!fBounds.intersect(devRect)) {
+ fBounds.setEmpty();
+ }
+ return;
+ }
+
+ // This may still create a complex region (which we would then take the bounds
+ // Perhaps we should inline the op-logic directly to never create the rgn...
+ SkRegion result;
+ result.op(SkRegion(fBounds), SkRegion(devRect), op);
+ fBounds = result.getBounds();
+ this->applyClipRestriction(op, &fBounds);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRasterClip::SkRasterClip(const SkRasterClip& src) {
+ AUTO_RASTERCLIP_VALIDATE(src);
+
+ fIsBW = src.fIsBW;
+ if (fIsBW) {
+ fBW = src.fBW;
+ } else {
+ fAA = src.fAA;
+ }
+
+ fIsEmpty = src.isEmpty();
+ fIsRect = src.isRect();
+ fClipRestrictionRect = src.fClipRestrictionRect;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkRegion& rgn) : fBW(rgn) {
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkIRect& bounds) : fBW(bounds) {
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip() {
+ fIsBW = true;
+ fIsEmpty = true;
+ fIsRect = false;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::~SkRasterClip() {
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkRasterClip::operator==(const SkRasterClip& other) const {
+ if (fIsBW != other.fIsBW) {
+ return false;
+ }
+ bool isEqual = fIsBW ? fBW == other.fBW : fAA == other.fAA;
+#ifdef SK_DEBUG
+ if (isEqual) {
+ SkASSERT(fIsEmpty == other.fIsEmpty);
+ SkASSERT(fIsRect == other.fIsRect);
+ }
+#endif
+ return isEqual;
+}
+
+bool SkRasterClip::isComplex() const {
+ return fIsBW ? fBW.isComplex() : !fAA.isEmpty();
+}
+
+const SkIRect& SkRasterClip::getBounds() const {
+ return fIsBW ? fBW.getBounds() : fAA.getBounds();
+}
+
+bool SkRasterClip::setEmpty() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fBW.setEmpty();
+ fAA.setEmpty();
+ fIsEmpty = true;
+ fIsRect = false;
+ return false;
+}
+
+bool SkRasterClip::setRect(const SkIRect& rect) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fAA.setEmpty();
+ fIsRect = fBW.setRect(rect);
+ fIsEmpty = !fIsRect;
+ return fIsRect;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+bool SkRasterClip::setConservativeRect(const SkRect& r, const SkIRect& clipR, bool isInverse) {
+ SkRegion::Op op;
+ if (isInverse) {
+ op = SkRegion::kDifference_Op;
+ } else {
+ op = SkRegion::kIntersect_Op;
+ }
+ fBW.setRect(clipR);
+ fBW.op(r.roundOut(), op);
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+bool SkRasterClip::setPath(const SkPath& path, const SkRegion& clip, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (this->isBW() && !doAA) {
+ (void)fBW.setPath(path, clip);
+ } else {
+ // TODO: since we are going to over-write fAA completely (aren't we?)
+ // we should just clear our BW data (if any) and set fIsAA=true
+ if (this->isBW()) {
+ this->convertToAA();
+ }
+ (void)fAA.setPath(path, &clip, doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRRect& rrect, const SkMatrix& matrix, const SkIRect& devBounds,
+ SkRegion::Op op, bool doAA) {
+ SkIRect bounds(devBounds);
+ this->applyClipRestriction(op, &bounds);
+
+ SkPath path;
+ path.addRRect(rrect);
+
+ return this->op(path, matrix, bounds, op, doAA);
+}
+
+bool SkRasterClip::op(const SkPath& path, const SkMatrix& matrix, const SkIRect& devBounds,
+ SkRegion::Op op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ SkIRect bounds(devBounds);
+ this->applyClipRestriction(op, &bounds);
+
+ // base is used to limit the size (and therefore memory allocation) of the
+ // region that results from scan converting devPath.
+ SkRegion base;
+
+ SkPath devPath;
+ if (matrix.isIdentity()) {
+ devPath = path;
+ } else {
+ path.transform(matrix, &devPath);
+ devPath.setIsVolatile(true);
+ }
+ if (SkRegion::kIntersect_Op == op) {
+ // since we are intersect, we can do better (tighter) with currRgn's
+ // bounds, than just using the device. However, if currRgn is complex,
+ // our region blitter may hork, so we do that case in two steps.
+ if (this->isRect()) {
+ // FIXME: we should also be able to do this when this->isBW(),
+ // but relaxing the test above triggers GM asserts in
+ // SkRgnBuilder::blitH(). We need to investigate what's going on.
+ return this->setPath(devPath, this->bwRgn(), doAA);
+ } else {
+ base.setRect(this->getBounds());
+ SkRasterClip clip;
+ clip.setPath(devPath, base, doAA);
+ return this->op(clip, op);
+ }
+ } else {
+ base.setRect(bounds);
+
+ if (SkRegion::kReplace_Op == op) {
+ return this->setPath(devPath, base, doAA);
+ } else {
+ SkRasterClip clip;
+ clip.setPath(devPath, base, doAA);
+ return this->op(clip, op);
+ }
+ }
+}
+
+bool SkRasterClip::setPath(const SkPath& path, const SkIRect& clip, bool doAA) {
+ SkRegion tmp;
+ tmp.setRect(clip);
+ return this->setPath(path, tmp, doAA);
+}
+
+bool SkRasterClip::op(const SkIRect& rect, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW ? fBW.op(rect, op) : fAA.op(rect, op);
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRegion& rgn, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fIsBW) {
+ (void)fBW.op(rgn, op);
+ } else {
+ SkAAClip tmp;
+ tmp.setRegion(rgn);
+ (void)fAA.op(tmp, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRasterClip& clip, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ clip.validate();
+
+ if (this->isBW() && clip.isBW()) {
+ (void)fBW.op(clip.fBW, op);
+ } else {
+ SkAAClip tmp;
+ const SkAAClip* other;
+
+ if (this->isBW()) {
+ this->convertToAA();
+ }
+ if (clip.isBW()) {
+ tmp.setRegion(clip.bwRgn());
+ other = &tmp;
+ } else {
+ other = &clip.aaRgn();
+ }
+ (void)fAA.op(*other, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+/**
+ * Our antialiasing currently has a granularity of 1/4 of a pixel along each
+ * axis. Thus we can treat an axis coordinate as an integer if it differs
+ * from its nearest int by < half of that value (1.8 in this case).
+ */
+static bool nearly_integral(SkScalar x) {
+ static const SkScalar domain = SK_Scalar1 / 4;
+ static const SkScalar halfDomain = domain / 2;
+
+ x += halfDomain;
+ return x - SkScalarFloorToScalar(x) < domain;
+}
+
+bool SkRasterClip::op(const SkRect& localRect, const SkMatrix& matrix, const SkIRect& devBounds,
+ SkRegion::Op op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ SkRect devRect;
+
+ const bool isScaleTrans = matrix.isScaleTranslate();
+ if (!isScaleTrans) {
+ SkPath path;
+ path.addRect(localRect);
+ path.setIsVolatile(true);
+ return this->op(path, matrix, devBounds, op, doAA);
+ }
+
+ matrix.mapRect(&devRect, localRect);
+
+ if (fIsBW && doAA) {
+ // check that the rect really needs aa, or is it close enought to
+ // integer boundaries that we can just treat it as a BW rect?
+ if (nearly_integral(devRect.fLeft) && nearly_integral(devRect.fTop) &&
+ nearly_integral(devRect.fRight) && nearly_integral(devRect.fBottom)) {
+ doAA = false;
+ }
+ }
+
+ if (fIsBW && !doAA) {
+ SkIRect ir;
+ devRect.round(&ir);
+ this->applyClipRestriction(op, &ir);
+ (void)fBW.op(ir, op);
+ } else {
+ if (fIsBW) {
+ this->convertToAA();
+ }
+ this->applyClipRestriction(op, &devRect);
+ (void)fAA.op(devRect, op, doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+void SkRasterClip::translate(int dx, int dy, SkRasterClip* dst) const {
+ if (nullptr == dst) {
+ return;
+ }
+
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+ if (0 == (dx | dy)) {
+ *dst = *this;
+ return;
+ }
+
+ dst->fIsBW = fIsBW;
+ if (fIsBW) {
+ fBW.translate(dx, dy, &dst->fBW);
+ dst->fAA.setEmpty();
+ } else {
+ fAA.translate(dx, dy, &dst->fAA);
+ dst->fBW.setEmpty();
+ }
+ dst->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::quickContains(const SkIRect& ir) const {
+ return fIsBW ? fBW.quickContains(ir) : fAA.quickContains(ir);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkRegion& SkRasterClip::forceGetBW() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (!fIsBW) {
+ fBW.setRect(fAA.getBounds());
+ }
+ return fBW;
+}
+
+void SkRasterClip::convertToAA() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ SkASSERT(fIsBW);
+ fAA.setRegion(fBW);
+ fIsBW = false;
+
+ // since we are being explicitly asked to convert-to-aa, we pass false so we don't "optimize"
+ // ourselves back to BW.
+ (void)this->updateCacheAndReturnNonEmpty(false);
+}
+
+#ifdef SK_DEBUG
+void SkRasterClip::validate() const {
+ // can't ever assert that fBW is empty, since we may have called forceGetBW
+ if (fIsBW) {
+ SkASSERT(fAA.isEmpty());
+ }
+
+ SkRegionPriv::Validate(fBW);
+ fAA.validate();
+
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ SkASSERT(this->computeIsRect() == fIsRect);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper() {
+ SkDEBUGCODE(fClipRgn = nullptr;)
+ SkDEBUGCODE(fBlitter = nullptr;)
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ this->init(clip, blitter);
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkAAClip* aaclip,
+ SkBlitter* blitter) {
+ SkASSERT(blitter);
+ SkASSERT(aaclip);
+ fBWRgn.setRect(aaclip->getBounds());
+ fAABlitter.init(blitter, aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+}
+
+void SkAAClipBlitterWrapper::init(const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(blitter);
+ if (clip.isBW()) {
+ fClipRgn = &clip.bwRgn();
+ fBlitter = blitter;
+ } else {
+ const SkAAClip& aaclip = clip.aaRgn();
+ fBWRgn.setRect(aaclip.getBounds());
+ fAABlitter.init(blitter, &aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRasterClip.h b/gfx/skia/skia/src/core/SkRasterClip.h
new file mode 100644
index 0000000000..994efd6273
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterClip_DEFINED
+#define SkRasterClip_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "include/private/SkMacros.h"
+#include "src/core/SkAAClip.h"
+
+class SkRRect;
+
+class SkConservativeClip {
+ SkIRect fBounds;
+ const SkIRect* fClipRestrictionRect;
+
+ inline void applyClipRestriction(SkRegion::Op op, SkIRect* bounds) {
+ if (op >= SkRegion::kUnion_Op && fClipRestrictionRect
+ && !fClipRestrictionRect->isEmpty()) {
+ if (!bounds->intersect(*fClipRestrictionRect)) {
+ bounds->setEmpty();
+ }
+ }
+ }
+
+public:
+ SkConservativeClip() : fBounds(SkIRect::MakeEmpty()), fClipRestrictionRect(nullptr) {}
+
+ bool isEmpty() const { return fBounds.isEmpty(); }
+ bool isRect() const { return true; }
+ const SkIRect& getBounds() const { return fBounds; }
+
+ void setEmpty() { fBounds.setEmpty(); }
+ void setRect(const SkIRect& r) { fBounds = r; }
+ void setDeviceClipRestriction(const SkIRect* rect) {
+ fClipRestrictionRect = rect;
+ }
+
+ void opRect(const SkRect&, const SkMatrix&, const SkIRect& limit, SkRegion::Op, bool isAA);
+ void opRRect(const SkRRect&, const SkMatrix&, const SkIRect& limit, SkRegion::Op, bool isAA);
+ void opPath(const SkPath&, const SkMatrix&, const SkIRect& limit, SkRegion::Op, bool isAA);
+ void opRegion(const SkRegion&, SkRegion::Op);
+ void opIRect(const SkIRect&, SkRegion::Op);
+};
+
+/**
+ * Wraps a SkRegion and SkAAClip, so we have a single object that can represent either our
+ * BW or antialiased clips.
+ *
+ * This class is optimized for the raster backend of canvas, but can be expense to keep up2date,
+ * so it supports a runtime option (force-conservative-rects) to turn it into a super-fast
+ * rect-only tracker. The gpu backend uses this since it does not need the result (it uses
+ * SkClipStack instead).
+ */
+class SkRasterClip {
+public:
+ SkRasterClip();
+ SkRasterClip(const SkIRect&);
+ SkRasterClip(const SkRegion&);
+ SkRasterClip(const SkRasterClip&);
+ ~SkRasterClip();
+
+ // Only compares the current state. Does not compare isForceConservativeRects(), so that field
+ // could be different but this could still return true.
+ bool operator==(const SkRasterClip&) const;
+ bool operator!=(const SkRasterClip& other) const {
+ return !(*this == other);
+ }
+
+ bool isBW() const { return fIsBW; }
+ bool isAA() const { return !fIsBW; }
+ const SkRegion& bwRgn() const { SkASSERT(fIsBW); return fBW; }
+ const SkAAClip& aaRgn() const { SkASSERT(!fIsBW); return fAA; }
+
+ bool isEmpty() const {
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ return fIsEmpty;
+ }
+
+ bool isRect() const {
+ SkASSERT(this->computeIsRect() == fIsRect);
+ return fIsRect;
+ }
+
+ bool isComplex() const;
+ const SkIRect& getBounds() const;
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+
+ bool op(const SkIRect&, SkRegion::Op);
+ bool op(const SkRegion&, SkRegion::Op);
+ bool op(const SkRect&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+ bool op(const SkRRect&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+ bool op(const SkPath&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+
+ void translate(int dx, int dy, SkRasterClip* dst) const;
+ void translate(int dx, int dy) {
+ this->translate(dx, dy, this);
+ }
+
+ bool quickContains(const SkIRect& rect) const;
+ bool quickContains(int left, int top, int right, int bottom) const {
+ return quickContains(SkIRect::MakeLTRB(left, top, right, bottom));
+ }
+
+ /**
+ * Return true if this region is empty, or if the specified rectangle does
+ * not intersect the region. Returning false is not a guarantee that they
+ * intersect, but returning true is a guarantee that they do not.
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return !SkIRect::Intersects(this->getBounds(), rect);
+ }
+
+ // hack for SkCanvas::getTotalClip
+ const SkRegion& forceGetBW();
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ void setDeviceClipRestriction(const SkIRect* rect) {
+ fClipRestrictionRect = rect;
+ }
+
+private:
+ SkRegion fBW;
+ SkAAClip fAA;
+ bool fIsBW;
+ // these 2 are caches based on querying the right obj based on fIsBW
+ bool fIsEmpty;
+ bool fIsRect;
+ const SkIRect* fClipRestrictionRect = nullptr;
+
+ bool computeIsEmpty() const {
+ return fIsBW ? fBW.isEmpty() : fAA.isEmpty();
+ }
+
+ bool computeIsRect() const {
+ return fIsBW ? fBW.isRect() : fAA.isRect();
+ }
+
+ bool updateCacheAndReturnNonEmpty(bool detectAARect = true) {
+ fIsEmpty = this->computeIsEmpty();
+
+ // detect that our computed AA is really just a (hard-edged) rect
+ if (detectAARect && !fIsEmpty && !fIsBW && fAA.isRect()) {
+ fBW.setRect(fAA.getBounds());
+ fAA.setEmpty(); // don't need this guy anymore
+ fIsBW = true;
+ }
+
+ fIsRect = this->computeIsRect();
+ return !fIsEmpty;
+ }
+
+ void convertToAA();
+
+ bool setPath(const SkPath& path, const SkRegion& clip, bool doAA);
+ bool setPath(const SkPath& path, const SkIRect& clip, bool doAA);
+ bool op(const SkRasterClip&, SkRegion::Op);
+ bool setConservativeRect(const SkRect& r, const SkIRect& clipR, bool isInverse);
+
+ inline void applyClipRestriction(SkRegion::Op op, SkIRect* bounds) {
+ if (op >= SkRegion::kUnion_Op && fClipRestrictionRect
+ && !fClipRestrictionRect->isEmpty()) {
+ if (!bounds->intersect(*fClipRestrictionRect)) {
+ bounds->setEmpty();
+ }
+ }
+ }
+
+ inline void applyClipRestriction(SkRegion::Op op, SkRect* bounds) {
+ if (op >= SkRegion::kUnion_Op && fClipRestrictionRect
+ && !fClipRestrictionRect->isEmpty()) {
+ if (!bounds->intersect(SkRect::Make(*fClipRestrictionRect))) {
+ bounds->setEmpty();
+ }
+ }
+ }
+};
+
+class SkAutoRasterClipValidate : SkNoncopyable {
+public:
+ SkAutoRasterClipValidate(const SkRasterClip& rc) : fRC(rc) {
+ fRC.validate();
+ }
+ ~SkAutoRasterClipValidate() {
+ fRC.validate();
+ }
+private:
+ const SkRasterClip& fRC;
+};
+#define SkAutoRasterClipValidate(...) SK_REQUIRE_LOCAL_VAR(SkAutoRasterClipValidate)
+
+#ifdef SK_DEBUG
+ #define AUTO_RASTERCLIP_VALIDATE(rc) SkAutoRasterClipValidate arcv(rc)
+#else
+ #define AUTO_RASTERCLIP_VALIDATE(rc)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Encapsulates the logic of deciding if we need to change/wrap the blitter
+ * for aaclipping. If so, getRgn and getBlitter return modified values. If
+ * not, they return the raw blitter and (bw) clip region.
+ *
+ * We need to keep the constructor/destructor cost as small as possible, so we
+ * can freely put this guy on the stack, and not pay too much for the case when
+ * we're really BW anyways.
+ */
+class SkAAClipBlitterWrapper {
+public:
+ SkAAClipBlitterWrapper();
+ SkAAClipBlitterWrapper(const SkRasterClip&, SkBlitter*);
+ SkAAClipBlitterWrapper(const SkAAClip*, SkBlitter*);
+
+ void init(const SkRasterClip&, SkBlitter*);
+
+ const SkIRect& getBounds() const {
+ SkASSERT(fClipRgn);
+ return fClipRgn->getBounds();
+ }
+ const SkRegion& getRgn() const {
+ SkASSERT(fClipRgn);
+ return *fClipRgn;
+ }
+ SkBlitter* getBlitter() {
+ SkASSERT(fBlitter);
+ return fBlitter;
+ }
+
+private:
+ SkRegion fBWRgn;
+ SkAAClipBlitter fAABlitter;
+ // what we return
+ const SkRegion* fClipRgn;
+ SkBlitter* fBlitter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterClipStack.h b/gfx/skia/skia/src/core/SkRasterClipStack.h
new file mode 100644
index 0000000000..3698e256c0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClipStack.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterClipStack_DEFINED
+#define SkRasterClipStack_DEFINED
+
+#include "include/core/SkClipOp.h"
+#include "include/core/SkDeque.h"
+#include "src/core/SkRasterClip.h"
+#include <new>
+
+template <typename T> class SkTStack {
+public:
+ SkTStack(void* storage, size_t size) : fDeque(sizeof(T), storage, size), fTop(nullptr) {}
+ ~SkTStack() {
+ while (!fDeque.empty()) {
+ ((T*)fDeque.back())->~T();
+ fDeque.pop_back();
+ }
+ }
+
+ bool empty() const { return fDeque.empty(); }
+
+ int count() const { return fDeque.count(); }
+
+ const T& top() const {
+ SkASSERT(fTop);
+ return *fTop;
+ }
+
+ T& top() {
+ SkASSERT(fTop);
+ return *fTop;
+ }
+
+ T* push_raw() { return (T*)fDeque.push_back(); }
+ T& push() {
+ fTop = this->push_raw();
+ new (fTop) T();
+ return *fTop;
+ }
+ T& push(const T& src) {
+ fTop = this->push_raw();
+ new (fTop) T(src);
+ return *fTop;
+ }
+
+ void pop() {
+ fTop->~T();
+ fDeque.pop_back();
+ fTop = fDeque.empty() ? nullptr : (T*)fDeque.back();
+ }
+
+private:
+ SkDeque fDeque;
+ T* fTop;
+};
+
+class SkRasterClipStack : SkNoncopyable {
+ int fCounter = 0;
+public:
+ SkRasterClipStack(int width, int height)
+ : fStack(fStorage, sizeof(fStorage))
+ , fRootBounds(SkIRect::MakeWH(width, height))
+ {
+ Rec& rec = fStack.push();
+ rec.fRC.setRect(fRootBounds);
+ rec.fDeferredCount = 0;
+ SkASSERT(fStack.count() == 1);
+ }
+
+ void setNewSize(int w, int h) {
+ fRootBounds.setXYWH(0, 0, w, h);
+
+ SkASSERT(fStack.count() == 1);
+ Rec& rec = fStack.top();
+ SkASSERT(rec.fDeferredCount == 0);
+ rec.fRC.setRect(fRootBounds);
+ }
+
+ const SkRasterClip& rc() const { return fStack.top().fRC; }
+
+ void save() {
+ fCounter += 1;
+ SkASSERT(fStack.top().fDeferredCount >= 0);
+ fStack.top().fDeferredCount += 1;
+ }
+
+ void restore() {
+ fCounter -= 1; SkASSERT(fCounter >= 0);
+ if (--fStack.top().fDeferredCount < 0) {
+ SkASSERT(fStack.top().fDeferredCount == -1);
+ SkASSERT(fStack.count() > 1);
+ fStack.pop();
+ }
+ }
+
+ void clipRect(const SkMatrix& ctm, const SkRect& rect, SkClipOp op, bool aa) {
+ this->writable_rc().op(rect, ctm, fRootBounds, (SkRegion::Op)op, aa);
+ this->trimIfExpanding(op);
+ this->validate();
+ }
+
+ void clipRRect(const SkMatrix& ctm, const SkRRect& rrect, SkClipOp op, bool aa) {
+ this->writable_rc().op(rrect, ctm, fRootBounds, (SkRegion::Op)op, aa);
+ this->trimIfExpanding(op);
+ this->validate();
+ }
+
+ void clipPath(const SkMatrix& ctm, const SkPath& path, SkClipOp op, bool aa) {
+ this->writable_rc().op(path, ctm, fRootBounds, (SkRegion::Op)op, aa);
+ this->trimIfExpanding(op);
+ this->validate();
+ }
+
+ void clipRegion(const SkRegion& rgn, SkClipOp op) {
+ this->writable_rc().op(rgn, (SkRegion::Op)op);
+ this->trimIfExpanding(op);
+ this->validate();
+ }
+
+ void setDeviceClipRestriction(SkIRect* mutableClipRestriction) {
+ this->writable_rc().setDeviceClipRestriction(mutableClipRestriction);
+ }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ const SkRasterClip& clip = this->rc();
+ if (fRootBounds.isEmpty()) {
+ SkASSERT(clip.isEmpty());
+ } else if (!clip.isEmpty()) {
+ SkASSERT(fRootBounds.contains(clip.getBounds()));
+ }
+#endif
+ }
+
+private:
+ struct Rec {
+ SkRasterClip fRC;
+ int fDeferredCount; // 0 for a "normal" entry
+ };
+
+ enum {
+ ELEM_COUNT = 16,
+ PTR_COUNT = ELEM_COUNT * sizeof(Rec) / sizeof(void*)
+ };
+ void* fStorage[PTR_COUNT];
+ SkTStack<Rec> fStack;
+ SkIRect fRootBounds;
+
+ SkRasterClip& writable_rc() {
+ SkASSERT(fStack.top().fDeferredCount >= 0);
+ if (fStack.top().fDeferredCount > 0) {
+ fStack.top().fDeferredCount -= 1;
+ fStack.push(fStack.top());
+ fStack.top().fDeferredCount = 0;
+ }
+ return fStack.top().fRC;
+ }
+
+ void trimIfExpanding(SkClipOp op) {
+ if ((int)op > (int)SkClipOp::kIntersect) {
+ Rec& rec = fStack.top();
+ SkASSERT(rec.fDeferredCount == 0);
+ rec.fRC.op(fRootBounds, SkRegion::kIntersect_Op);
+ }
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.cpp b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
new file mode 100644
index 0000000000..4f73643552
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include <algorithm>
+
+SkRasterPipeline::SkRasterPipeline(SkArenaAlloc* alloc) : fAlloc(alloc) {
+ this->reset();
+}
+void SkRasterPipeline::reset() {
+ fStages = nullptr;
+ fNumStages = 0;
+ fSlotsNeeded = 1; // We always need one extra slot for just_return().
+}
+
+void SkRasterPipeline::append(StockStage stage, void* ctx) {
+ SkASSERT(stage != uniform_color); // Please use append_constant_color().
+ SkASSERT(stage != unbounded_uniform_color); // Please use append_constant_color().
+ SkASSERT(stage != set_rgb); // Please use append_set_rgb().
+ SkASSERT(stage != unbounded_set_rgb); // Please use append_set_rgb().
+ SkASSERT(stage != clamp_gamut); // Please use append_gamut_clamp_if_normalized().
+ SkASSERT(stage != parametric); // Please use append_transfer_function().
+ SkASSERT(stage != gamma_); // Please use append_transfer_function().
+ SkASSERT(stage != PQish); // Please use append_transfer_function().
+ SkASSERT(stage != HLGish); // Please use append_transfer_function().
+ SkASSERT(stage != HLGinvish); // Please use append_transfer_function().
+ this->unchecked_append(stage, ctx);
+}
+void SkRasterPipeline::unchecked_append(StockStage stage, void* ctx) {
+ fStages = fAlloc->make<StageList>( StageList{fStages, stage, ctx} );
+ fNumStages += 1;
+ fSlotsNeeded += ctx ? 2 : 1;
+}
+void SkRasterPipeline::append(StockStage stage, uintptr_t ctx) {
+ void* ptrCtx;
+ memcpy(&ptrCtx, &ctx, sizeof(ctx));
+ this->append(stage, ptrCtx);
+}
+
+void SkRasterPipeline::extend(const SkRasterPipeline& src) {
+ if (src.empty()) {
+ return;
+ }
+ auto stages = fAlloc->makeArrayDefault<StageList>(src.fNumStages);
+
+ int n = src.fNumStages;
+ const StageList* st = src.fStages;
+ while (n --> 1) {
+ stages[n] = *st;
+ stages[n].prev = &stages[n-1];
+ st = st->prev;
+ }
+ stages[0] = *st;
+ stages[0].prev = fStages;
+
+ fStages = &stages[src.fNumStages - 1];
+ fNumStages += src.fNumStages;
+ fSlotsNeeded += src.fSlotsNeeded - 1; // Don't double count just_returns().
+}
+
+void SkRasterPipeline::dump() const {
+ SkDebugf("SkRasterPipeline, %d stages\n", fNumStages);
+ std::vector<const char*> stages;
+ for (auto st = fStages; st; st = st->prev) {
+ const char* name = "";
+ switch (st->stage) {
+ #define M(x) case x: name = #x; break;
+ SK_RASTER_PIPELINE_STAGES(M)
+ #undef M
+ }
+ stages.push_back(name);
+ }
+ std::reverse(stages.begin(), stages.end());
+ for (const char* name : stages) {
+ SkDebugf("\t%s\n", name);
+ }
+ SkDebugf("\n");
+}
+
+void SkRasterPipeline::append_set_rgb(SkArenaAlloc* alloc, const float rgb[3]) {
+ auto arg = alloc->makeArrayDefault<float>(3);
+ arg[0] = rgb[0];
+ arg[1] = rgb[1];
+ arg[2] = rgb[2];
+
+ auto stage = unbounded_set_rgb;
+ if (0 <= rgb[0] && rgb[0] <= 1 &&
+ 0 <= rgb[1] && rgb[1] <= 1 &&
+ 0 <= rgb[2] && rgb[2] <= 1)
+ {
+ stage = set_rgb;
+ }
+
+ this->unchecked_append(stage, arg);
+}
+
+void SkRasterPipeline::append_constant_color(SkArenaAlloc* alloc, const float rgba[4]) {
+ // r,g,b might be outside [0,1], but alpha should probably always be in [0,1].
+ SkASSERT(0 <= rgba[3] && rgba[3] <= 1);
+
+ if (rgba[0] == 0 && rgba[1] == 0 && rgba[2] == 0 && rgba[3] == 1) {
+ this->append(black_color);
+ } else if (rgba[0] == 1 && rgba[1] == 1 && rgba[2] == 1 && rgba[3] == 1) {
+ this->append(white_color);
+ } else {
+ auto ctx = alloc->make<SkRasterPipeline_UniformColorCtx>();
+ Sk4f color = Sk4f::Load(rgba);
+ color.store(&ctx->r);
+
+ // uniform_color requires colors in range and can go lowp,
+ // while unbounded_uniform_color supports out-of-range colors too but not lowp.
+ if (0 <= rgba[0] && rgba[0] <= rgba[3] &&
+ 0 <= rgba[1] && rgba[1] <= rgba[3] &&
+ 0 <= rgba[2] && rgba[2] <= rgba[3]) {
+ // To make loads more direct, we store 8-bit values in 16-bit slots.
+ color = color * 255.0f + 0.5f;
+ ctx->rgba[0] = (uint16_t)color[0];
+ ctx->rgba[1] = (uint16_t)color[1];
+ ctx->rgba[2] = (uint16_t)color[2];
+ ctx->rgba[3] = (uint16_t)color[3];
+ this->unchecked_append(uniform_color, ctx);
+ } else {
+ this->unchecked_append(unbounded_uniform_color, ctx);
+ }
+ }
+}
+
+void SkRasterPipeline::append_matrix(SkArenaAlloc* alloc, const SkMatrix& matrix) {
+ SkMatrix::TypeMask mt = matrix.getType();
+
+ if (mt == SkMatrix::kIdentity_Mask) {
+ return;
+ }
+ if (mt == SkMatrix::kTranslate_Mask) {
+ float* trans = alloc->makeArrayDefault<float>(2);
+ trans[0] = matrix.getTranslateX();
+ trans[1] = matrix.getTranslateY();
+ this->append(SkRasterPipeline::matrix_translate, trans);
+ } else if ((mt | (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) ==
+ (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ float* scaleTrans = alloc->makeArrayDefault<float>(4);
+ scaleTrans[0] = matrix.getScaleX();
+ scaleTrans[1] = matrix.getScaleY();
+ scaleTrans[2] = matrix.getTranslateX();
+ scaleTrans[3] = matrix.getTranslateY();
+ this->append(SkRasterPipeline::matrix_scale_translate, scaleTrans);
+ } else {
+ float* storage = alloc->makeArrayDefault<float>(9);
+ if (matrix.asAffine(storage)) {
+ // note: asAffine and the 2x3 stage really only need 6 entries
+ this->append(SkRasterPipeline::matrix_2x3, storage);
+ } else {
+ matrix.get9(storage);
+ this->append(SkRasterPipeline::matrix_perspective, storage);
+ }
+ }
+}
+
+void SkRasterPipeline::append_load(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(load_a8, ctx); break;
+ case kA16_unorm_SkColorType: this->append(load_a16, ctx); break;
+ case kA16_float_SkColorType: this->append(load_af16, ctx); break;
+ case kRGB_565_SkColorType: this->append(load_565, ctx); break;
+ case kARGB_4444_SkColorType: this->append(load_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(load_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(load_rg1616, ctx); break;
+ case kR16G16_float_SkColorType: this->append(load_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(load_8888, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(load_1010102, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType:this->append(load_16161616,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(load_f16, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(load_f32, ctx); break;
+
+ case kGray_8_SkColorType: this->append(load_a8, ctx);
+ this->append(alpha_to_gray);
+ break;
+
+ case kRGB_888x_SkColorType: this->append(load_8888, ctx);
+ this->append(force_opaque);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(load_1010102, ctx);
+ this->append(force_opaque);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(load_8888, ctx);
+ this->append(swap_rb);
+ break;
+ }
+}
+
+void SkRasterPipeline::append_load_dst(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(load_a8_dst, ctx); break;
+ case kA16_unorm_SkColorType: this->append(load_a16_dst, ctx); break;
+ case kA16_float_SkColorType: this->append(load_af16_dst, ctx); break;
+ case kRGB_565_SkColorType: this->append(load_565_dst, ctx); break;
+ case kARGB_4444_SkColorType: this->append(load_4444_dst, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(load_rg88_dst, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(load_rg1616_dst, ctx); break;
+ case kR16G16_float_SkColorType: this->append(load_rgf16_dst, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(load_8888_dst, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(load_1010102_dst, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType: this->append(load_16161616_dst,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(load_f16_dst, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(load_f32_dst, ctx); break;
+
+ case kGray_8_SkColorType: this->append(load_a8_dst, ctx);
+ this->append(alpha_to_gray_dst);
+ break;
+
+ case kRGB_888x_SkColorType: this->append(load_8888_dst, ctx);
+ this->append(force_opaque_dst);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(load_1010102_dst, ctx);
+ this->append(force_opaque_dst);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(load_8888_dst, ctx);
+ this->append(swap_rb_dst);
+ break;
+ }
+}
+
+void SkRasterPipeline::append_store(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(store_a8, ctx); break;
+ case kA16_unorm_SkColorType: this->append(store_a16, ctx); break;
+ case kA16_float_SkColorType: this->append(store_af16, ctx); break;
+ case kRGB_565_SkColorType: this->append(store_565, ctx); break;
+ case kARGB_4444_SkColorType: this->append(store_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(store_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(store_rg1616, ctx); break;
+ case kR16G16_float_SkColorType: this->append(store_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(store_8888, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(store_1010102, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType: this->append(store_16161616,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(store_f16, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(store_f32, ctx); break;
+
+ case kRGB_888x_SkColorType: this->append(force_opaque);
+ this->append(store_8888, ctx);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(force_opaque);
+ this->append(store_1010102, ctx);
+ break;
+
+ case kGray_8_SkColorType: this->append(bt709_luminance_or_luma_to_alpha);
+ this->append(store_a8, ctx);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(swap_rb);
+ this->append(store_8888, ctx);
+ break;
+ }
+}
+
+void SkRasterPipeline::append_transfer_function(const skcms_TransferFunction& tf) {
+ void* ctx = const_cast<void*>(static_cast<const void*>(&tf));
+ switch (classify_transfer_fn(tf)) {
+ case Bad_TF: SkASSERT(false); break;
+
+ case TFKind::sRGBish_TF:
+ if (tf.a == 1 && tf.b == 0 && tf.c == 0 && tf.d == 0 && tf.e == 0 && tf.f == 0) {
+ this->unchecked_append(gamma_, ctx);
+ } else {
+ this->unchecked_append(parametric, ctx);
+ }
+ break;
+ case PQish_TF: this->unchecked_append(PQish, ctx); break;
+ case HLGish_TF: this->unchecked_append(HLGish, ctx); break;
+ case HLGinvish_TF: this->unchecked_append(HLGinvish, ctx); break;
+ }
+}
+
+void SkRasterPipeline::append_gamut_clamp_if_normalized(const SkImageInfo& dstInfo) {
+ // N.B. we _do_ clamp for kRGBA_F16Norm_SkColorType... because it's normalized.
+ if (dstInfo.colorType() != kRGBA_F16_SkColorType &&
+ dstInfo.colorType() != kRGBA_F32_SkColorType &&
+ dstInfo.alphaType() == kPremul_SkAlphaType)
+ {
+ this->unchecked_append(SkRasterPipeline::clamp_gamut, nullptr);
+ }
+}
+
+SkRasterPipeline::StartPipelineFn SkRasterPipeline::build_pipeline(void** ip) const {
+ // We'll try to build a lowp pipeline, but if that fails fallback to a highp float pipeline.
+ void** reset_point = ip;
+
+ // Stages are stored backwards in fStages, so we reverse here, back to front.
+ *--ip = (void*)SkOpts::just_return_lowp;
+ for (const StageList* st = fStages; st; st = st->prev) {
+ if (auto fn = SkOpts::stages_lowp[st->stage]) {
+ if (st->ctx) {
+ *--ip = st->ctx;
+ }
+ *--ip = (void*)fn;
+ } else {
+ ip = reset_point;
+ break;
+ }
+ }
+ if (ip != reset_point) {
+ return SkOpts::start_pipeline_lowp;
+ }
+
+ *--ip = (void*)SkOpts::just_return_highp;
+ for (const StageList* st = fStages; st; st = st->prev) {
+ if (st->ctx) {
+ *--ip = st->ctx;
+ }
+ *--ip = (void*)SkOpts::stages_highp[st->stage];
+ }
+ return SkOpts::start_pipeline_highp;
+}
+
+void SkRasterPipeline::run(size_t x, size_t y, size_t w, size_t h) const {
+ if (this->empty()) {
+ return;
+ }
+
+ // Best to not use fAlloc here... we can't bound how often run() will be called.
+ SkAutoSTMalloc<64, void*> program(fSlotsNeeded);
+
+ auto start_pipeline = this->build_pipeline(program.get() + fSlotsNeeded);
+ start_pipeline(x,y,x+w,y+h, program.get());
+}
+
+std::function<void(size_t, size_t, size_t, size_t)> SkRasterPipeline::compile() const {
+ if (this->empty()) {
+ return [](size_t, size_t, size_t, size_t) {};
+ }
+
+ void** program = fAlloc->makeArray<void*>(fSlotsNeeded);
+
+ auto start_pipeline = this->build_pipeline(program + fSlotsNeeded);
+ return [=](size_t x, size_t y, size_t w, size_t h) {
+ start_pipeline(x,y,x+w,y+h, program);
+ };
+}
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.h b/gfx/skia/skia/src/core/SkRasterPipeline.h
new file mode 100644
index 0000000000..d13a8f0a60
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_DEFINED
+#define SkRasterPipeline_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkArenaAlloc.h"
+#include <functional>
+#include <vector> // TODO: unused
+
+/**
+ * SkRasterPipeline provides a cheap way to chain together a pixel processing pipeline.
+ *
+ * It's particularly designed for situations where the potential pipeline is extremely
+ * combinatoric: {N dst formats} x {M source formats} x {K mask formats} x {C transfer modes} ...
+ * No one wants to write specialized routines for all those combinations, and if we did, we'd
+ * end up bloating our code size dramatically. SkRasterPipeline stages can be chained together
+ * at runtime, so we can scale this problem linearly rather than combinatorically.
+ *
+ * Each stage is represented by a function conforming to a common interface and by an
+ * arbitrary context pointer. The stage funciton arguments and calling convention are
+ * designed to maximize the amount of data we can pass along the pipeline cheaply, and
+ * vary depending on CPU feature detection.
+ */
+
+#define SK_RASTER_PIPELINE_STAGES(M) \
+ M(callback) M(interpreter) \
+ M(move_src_dst) M(move_dst_src) \
+ M(clamp_0) M(clamp_1) M(clamp_a) M(clamp_gamut) \
+ M(unpremul) M(premul) M(premul_dst) \
+ M(force_opaque) M(force_opaque_dst) \
+ M(set_rgb) M(unbounded_set_rgb) M(swap_rb) M(swap_rb_dst) \
+ M(from_srgb) M(to_srgb) \
+ M(black_color) M(white_color) \
+ M(uniform_color) M(unbounded_uniform_color) M(uniform_color_dst) \
+ M(seed_shader) M(dither) \
+ M(load_a8) M(load_a8_dst) M(store_a8) M(gather_a8) \
+ M(load_565) M(load_565_dst) M(store_565) M(gather_565) \
+ M(load_4444) M(load_4444_dst) M(store_4444) M(gather_4444) \
+ M(load_f16) M(load_f16_dst) M(store_f16) M(gather_f16) \
+ M(load_af16) M(load_af16_dst) M(store_af16) M(gather_af16) \
+ M(load_rgf16) M(load_rgf16_dst) M(store_rgf16) M(gather_rgf16) \
+ M(load_f32) M(load_f32_dst) M(store_f32) M(gather_f32) \
+ M(load_rgf32) M(store_rgf32) \
+ M(load_8888) M(load_8888_dst) M(store_8888) M(gather_8888) \
+ M(load_rg88) M(load_rg88_dst) M(store_rg88) M(gather_rg88) \
+ M(load_a16) M(load_a16_dst) M(store_a16) M(gather_a16) \
+ M(load_rg1616) M(load_rg1616_dst) M(store_rg1616) M(gather_rg1616) \
+ M(load_16161616) M(load_16161616_dst) M(store_16161616) M(gather_16161616) \
+ M(load_1010102) M(load_1010102_dst) M(store_1010102) M(gather_1010102) \
+ M(alpha_to_gray) M(alpha_to_gray_dst) M(bt709_luminance_or_luma_to_alpha) \
+ M(bilerp_clamp_8888) M(bicubic_clamp_8888) \
+ M(store_u16_be) \
+ M(load_src) M(store_src) M(load_dst) M(store_dst) \
+ M(scale_u8) M(scale_565) M(scale_1_float) \
+ M( lerp_u8) M( lerp_565) M( lerp_1_float) M(lerp_native) \
+ M(dstatop) M(dstin) M(dstout) M(dstover) \
+ M(srcatop) M(srcin) M(srcout) M(srcover) \
+ M(clear) M(modulate) M(multiply) M(plus_) M(screen) M(xor_) \
+ M(colorburn) M(colordodge) M(darken) M(difference) \
+ M(exclusion) M(hardlight) M(lighten) M(overlay) M(softlight) \
+ M(hue) M(saturation) M(color) M(luminosity) \
+ M(srcover_rgba_8888) \
+ M(matrix_translate) M(matrix_scale_translate) \
+ M(matrix_2x3) M(matrix_3x3) M(matrix_3x4) M(matrix_4x5) M(matrix_4x3) \
+ M(matrix_perspective) \
+ M(parametric) M(gamma_) M(PQish) M(HLGish) M(HLGinvish) \
+ M(mirror_x) M(repeat_x) \
+ M(mirror_y) M(repeat_y) \
+ M(decal_x) M(decal_y) M(decal_x_and_y) \
+ M(check_decal_mask) \
+ M(negate_x) \
+ M(bilinear) M(bicubic) \
+ M(bilinear_nx) M(bilinear_px) M(bilinear_ny) M(bilinear_py) \
+ M(bicubic_n3x) M(bicubic_n1x) M(bicubic_p1x) M(bicubic_p3x) \
+ M(bicubic_n3y) M(bicubic_n1y) M(bicubic_p1y) M(bicubic_p3y) \
+ M(save_xy) M(accumulate) \
+ M(clamp_x_1) M(mirror_x_1) M(repeat_x_1) \
+ M(evenly_spaced_gradient) \
+ M(gradient) \
+ M(evenly_spaced_2_stop_gradient) \
+ M(xy_to_unit_angle) \
+ M(xy_to_radius) \
+ M(xy_to_2pt_conical_strip) \
+ M(xy_to_2pt_conical_focal_on_circle) \
+ M(xy_to_2pt_conical_well_behaved) \
+ M(xy_to_2pt_conical_smaller) \
+ M(xy_to_2pt_conical_greater) \
+ M(alter_2pt_conical_compensate_focal) \
+ M(alter_2pt_conical_unswap) \
+ M(mask_2pt_conical_nan) \
+ M(mask_2pt_conical_degenerates) M(apply_vector_mask) \
+ M(byte_tables) \
+ M(rgb_to_hsl) M(hsl_to_rgb) \
+ M(gauss_a_to_rgba) \
+ M(emboss) \
+ M(swizzle)
+
+// The largest number of pixels we handle at a time.
+static const int SkRasterPipeline_kMaxStride = 16;
+
+// Structs representing the arguments to some common stages.
+
+struct SkRasterPipeline_MemoryCtx {
+ void* pixels;
+ int stride;
+};
+
+struct SkRasterPipeline_GatherCtx {
+ const void* pixels;
+ int stride;
+ float width;
+ float height;
+};
+
+// State shared by save_xy, accumulate, and bilinear_* / bicubic_*.
+struct SkRasterPipeline_SamplerCtx {
+ float x[SkRasterPipeline_kMaxStride];
+ float y[SkRasterPipeline_kMaxStride];
+ float fx[SkRasterPipeline_kMaxStride];
+ float fy[SkRasterPipeline_kMaxStride];
+ float scalex[SkRasterPipeline_kMaxStride];
+ float scaley[SkRasterPipeline_kMaxStride];
+};
+
+struct SkRasterPipeline_TileCtx {
+ float scale;
+ float invScale; // cache of 1/scale
+};
+
+struct SkRasterPipeline_DecalTileCtx {
+ uint32_t mask[SkRasterPipeline_kMaxStride];
+ float limit_x;
+ float limit_y;
+};
+
+struct SkRasterPipeline_SamplerCtx2 : public SkRasterPipeline_GatherCtx {
+ SkColorType ct;
+ SkTileMode tileX, tileY;
+ float invWidth, invHeight;
+};
+
+struct SkRasterPipeline_CallbackCtx {
+ void (*fn)(SkRasterPipeline_CallbackCtx* self, int active_pixels/*<= SkRasterPipeline_kMaxStride*/);
+
+ // When called, fn() will have our active pixels available in rgba.
+ // When fn() returns, the pipeline will read back those active pixels from read_from.
+ float rgba[4*SkRasterPipeline_kMaxStride];
+ float* read_from = rgba;
+};
+
+namespace SkSL {
+class ByteCode;
+class ByteCodeFunction;
+}
+
+struct SkRasterPipeline_InterpreterCtx {
+ const SkSL::ByteCode* byteCode;
+ const SkSL::ByteCodeFunction* fn;
+
+ SkColor4f paintColor;
+ const void* inputs;
+ int ninputs;
+ bool shaderConvention; // if false, we're a colorfilter
+};
+
+struct SkRasterPipeline_GradientCtx {
+ size_t stopCount;
+ float* fs[4];
+ float* bs[4];
+ float* ts;
+ bool interpolatedInPremul;
+};
+
+struct SkRasterPipeline_EvenlySpaced2StopGradientCtx {
+ float f[4];
+ float b[4];
+ bool interpolatedInPremul;
+};
+
+struct SkRasterPipeline_2PtConicalCtx {
+ uint32_t fMask[SkRasterPipeline_kMaxStride];
+ float fP0,
+ fP1;
+};
+
+struct SkRasterPipeline_UniformColorCtx {
+ float r,g,b,a;
+ uint16_t rgba[4]; // [0,255] in a 16-bit lane.
+};
+
+struct SkRasterPipeline_EmbossCtx {
+ SkRasterPipeline_MemoryCtx mul,
+ add;
+};
+
+class SkRasterPipeline {
+public:
+ explicit SkRasterPipeline(SkArenaAlloc*);
+
+ SkRasterPipeline(const SkRasterPipeline&) = delete;
+ SkRasterPipeline(SkRasterPipeline&&) = default;
+
+ SkRasterPipeline& operator=(const SkRasterPipeline&) = delete;
+ SkRasterPipeline& operator=(SkRasterPipeline&&) = default;
+
+ void reset();
+
+ enum StockStage {
+ #define M(stage) stage,
+ SK_RASTER_PIPELINE_STAGES(M)
+ #undef M
+ };
+ void append(StockStage, void* = nullptr);
+ void append(StockStage stage, const void* ctx) { this->append(stage, const_cast<void*>(ctx)); }
+ void append(StockStage, uintptr_t ctx);
+
+ // Append all stages to this pipeline.
+ void extend(const SkRasterPipeline&);
+
+ // Runs the pipeline in 2d from (x,y) inclusive to (x+w,y+h) exclusive.
+ void run(size_t x, size_t y, size_t w, size_t h) const;
+
+ // Allocates a thunk which amortizes run() setup cost in alloc.
+ std::function<void(size_t, size_t, size_t, size_t)> compile() const;
+
+ void dump() const;
+
+ // Appends a stage for the specified matrix.
+ // Tries to optimize the stage by analyzing the type of matrix.
+ void append_matrix(SkArenaAlloc*, const SkMatrix&);
+
+ // Appends a stage for a constant uniform color.
+ // Tries to optimize the stage based on the color.
+ void append_constant_color(SkArenaAlloc*, const float rgba[4]);
+
+ void append_constant_color(SkArenaAlloc* alloc, const SkColor4f& color) {
+ this->append_constant_color(alloc, color.vec());
+ }
+
+ // Like append_constant_color() but only affecting r,g,b, ignoring the alpha channel.
+ void append_set_rgb(SkArenaAlloc*, const float rgb[3]);
+
+ void append_set_rgb(SkArenaAlloc* alloc, const SkColor4f& color) {
+ this->append_set_rgb(alloc, color.vec());
+ }
+
+ void append_load (SkColorType, const SkRasterPipeline_MemoryCtx*);
+ void append_load_dst(SkColorType, const SkRasterPipeline_MemoryCtx*);
+ void append_store (SkColorType, const SkRasterPipeline_MemoryCtx*);
+
+ void append_gamut_clamp_if_normalized(const SkImageInfo&);
+
+ void append_transfer_function(const skcms_TransferFunction&);
+
+ bool empty() const { return fStages == nullptr; }
+
+private:
+ struct StageList {
+ StageList* prev;
+ StockStage stage;
+ void* ctx;
+ };
+
+ using StartPipelineFn = void(*)(size_t,size_t,size_t,size_t, void** program);
+ StartPipelineFn build_pipeline(void**) const;
+
+ void unchecked_append(StockStage, void*);
+
+ // Used by old single-program void** style execution.
+ SkArenaAlloc* fAlloc;
+ StageList* fStages;
+ int fNumStages;
+ int fSlotsNeeded;
+};
+
+template <size_t bytes>
+class SkRasterPipeline_ : public SkRasterPipeline {
+public:
+ SkRasterPipeline_()
+ : SkRasterPipeline(&fBuiltinAlloc) {}
+
+private:
+ SkSTArenaAlloc<bytes> fBuiltinAlloc;
+};
+
+
+#endif//SkRasterPipeline_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
new file mode 100644
index 0000000000..b983d3b70d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkUtils.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkRasterPipelineBlitter final : public SkBlitter {
+public:
+ // This is our common entrypoint for creating the blitter once we've sorted out shaders.
+ static SkBlitter* Create(const SkPixmap&, const SkPaint&, SkArenaAlloc*,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque, bool is_constant);
+
+ SkRasterPipelineBlitter(SkPixmap dst,
+ SkBlendMode blend,
+ SkArenaAlloc* alloc)
+ : fDst(dst)
+ , fBlend(blend)
+ , fAlloc(alloc)
+ , fColorPipeline(alloc)
+ {}
+
+ void blitH (int x, int y, int w) override;
+ void blitAntiH (int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitMask (const SkMask&, const SkIRect& clip) override;
+ void blitRect (int x, int y, int width, int height) override;
+ void blitV (int x, int y, int height, SkAlpha alpha) override;
+
+private:
+ void append_load_dst (SkRasterPipeline*) const;
+ void append_store (SkRasterPipeline*) const;
+
+ SkPixmap fDst;
+ SkBlendMode fBlend;
+ SkArenaAlloc* fAlloc;
+ SkRasterPipeline fColorPipeline;
+
+ SkRasterPipeline_MemoryCtx
+ fDstPtr = {nullptr,0}, // Always points to the top-left of fDst.
+ fMaskPtr = {nullptr,0}; // Updated each call to blitMask().
+ SkRasterPipeline_EmbossCtx fEmbossCtx; // Used only for k3D_Format masks.
+
+ // We may be able to specialize blitH() or blitRect() into a memset.
+ void (*fMemset2D)(SkPixmap*, int x,int y, int w,int h, uint64_t color) = nullptr;
+ uint64_t fMemsetColor = 0; // Big enough for largest memsettable dst format, F16.
+
+ // Built lazily on first use.
+ std::function<void(size_t, size_t, size_t, size_t)> fBlitRect,
+ fBlitAntiH,
+ fBlitMaskA8,
+ fBlitMaskLCD16,
+ fBlitMask3D;
+
+ // These values are pointed to by the blit pipelines above,
+ // which allows us to adjust them from call to call.
+ float fCurrentCoverage = 0.0f;
+ float fDitherRate = 0.0f;
+
+ typedef SkBlitter INHERITED;
+};
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkMatrix& ctm,
+ SkArenaAlloc* alloc) {
+ // For legacy to keep working, we need to sometimes still distinguish null dstCS from sRGB.
+#if 0
+ SkColorSpace* dstCS = dst.colorSpace() ? dst.colorSpace()
+ : sk_srgb_singleton();
+#else
+ SkColorSpace* dstCS = dst.colorSpace();
+#endif
+ SkColorType dstCT = dst.colorType();
+ SkColor4f paintColor = paint.getColor4f();
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ dstCS, kUnpremul_SkAlphaType).apply(paintColor.vec());
+
+ auto shader = as_SB(paint.getShader());
+
+ SkRasterPipeline_<256> shaderPipeline;
+ if (!shader) {
+ // Having no shader makes things nice and easy... just use the paint color.
+ shaderPipeline.append_constant_color(alloc, paintColor.premul().vec());
+ bool is_opaque = paintColor.fA == 1.0f,
+ is_constant = true;
+ return SkRasterPipelineBlitter::Create(dst, paint, alloc,
+ shaderPipeline, is_opaque, is_constant);
+ }
+
+ bool is_opaque = shader->isOpaque() && paintColor.fA == 1.0f;
+ bool is_constant = shader->isConstant();
+
+ if (shader->appendStages({&shaderPipeline, alloc, dstCT, dstCS, paint, nullptr, ctm})) {
+ if (paintColor.fA != 1.0f) {
+ shaderPipeline.append(SkRasterPipeline::scale_1_float,
+ alloc->make<float>(paintColor.fA));
+ }
+ return SkRasterPipelineBlitter::Create(dst, paint, alloc,
+ shaderPipeline, is_opaque, is_constant);
+ }
+
+ // The shader has opted out of drawing anything.
+ return alloc->make<SkNullBlitter>();
+}
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque,
+ SkArenaAlloc* alloc) {
+ bool is_constant = false; // If this were the case, it'd be better to just set a paint color.
+ return SkRasterPipelineBlitter::Create(dst, paint, alloc,
+ shaderPipeline, is_opaque, is_constant);
+}
+
+SkBlitter* SkRasterPipelineBlitter::Create(const SkPixmap& dst,
+ const SkPaint& paint,
+ SkArenaAlloc* alloc,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque,
+ bool is_constant) {
+ auto blitter = alloc->make<SkRasterPipelineBlitter>(dst,
+ paint.getBlendMode(),
+ alloc);
+
+ // Our job in this factory is to fill out the blitter's color pipeline.
+ // This is the common front of the full blit pipelines, each constructed lazily on first use.
+ // The full blit pipelines handle reading and writing the dst, blending, coverage, dithering.
+ auto colorPipeline = &blitter->fColorPipeline;
+
+ // Let's get the shader in first.
+ colorPipeline->extend(shaderPipeline);
+
+ // If there's a color filter it comes next.
+ if (auto colorFilter = paint.getColorFilter()) {
+ SkStageRec rec = {
+ colorPipeline, alloc, dst.colorType(), dst.colorSpace(), paint, nullptr, SkMatrix::I()
+ };
+ colorFilter->appendStages(rec, is_opaque);
+ is_opaque = is_opaque && (colorFilter->getFlags() & SkColorFilter::kAlphaUnchanged_Flag);
+ }
+
+ // Not all formats make sense to dither (think, F16). We set their dither rate
+ // to zero. We need to decide if we're going to dither now to keep is_constant accurate.
+ if (paint.isDither()) {
+ switch (dst.info().colorType()) {
+ default: blitter->fDitherRate = 0.0f; break;
+ case kARGB_4444_SkColorType: blitter->fDitherRate = 1/15.0f; break;
+ case kRGB_565_SkColorType: blitter->fDitherRate = 1/63.0f; break;
+ case kGray_8_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: blitter->fDitherRate = 1/255.0f; break;
+ case kRGB_101010x_SkColorType:
+ case kRGBA_1010102_SkColorType: blitter->fDitherRate = 1/1023.0f; break;
+ }
+ // TODO: for constant colors, we could try to measure the effect of dithering, and if
+ // it has no value (i.e. all variations result in the same 32bit color, then we
+ // could disable it (for speed, by not adding the stage).
+ }
+ is_constant = is_constant && (blitter->fDitherRate == 0.0f);
+
+ // We're logically done here. The code between here and return blitter is all optimization.
+
+ // A pipeline that's still constant here can collapse back into a constant color.
+ if (is_constant) {
+ SkColor4f constantColor;
+ SkRasterPipeline_MemoryCtx constantColorPtr = { &constantColor, 0 };
+ colorPipeline->append_gamut_clamp_if_normalized(dst.info());
+ colorPipeline->append(SkRasterPipeline::store_f32, &constantColorPtr);
+ colorPipeline->run(0,0,1,1);
+ colorPipeline->reset();
+ colorPipeline->append_constant_color(alloc, constantColor);
+
+ is_opaque = constantColor.fA == 1.0f;
+ }
+
+ // We can strength-reduce SrcOver into Src when opaque.
+ if (is_opaque && blitter->fBlend == SkBlendMode::kSrcOver) {
+ blitter->fBlend = SkBlendMode::kSrc;
+ }
+
+ // When we're drawing a constant color in Src mode, we can sometimes just memset.
+ // (The previous two optimizations help find more opportunities for this one.)
+ if (is_constant && blitter->fBlend == SkBlendMode::kSrc) {
+ // Run our color pipeline all the way through to produce what we'd memset when we can.
+ // Not all blits can memset, so we need to keep colorPipeline too.
+ SkRasterPipeline_<256> p;
+ p.extend(*colorPipeline);
+ p.append_gamut_clamp_if_normalized(dst.info());
+ blitter->fDstPtr = SkRasterPipeline_MemoryCtx{&blitter->fMemsetColor, 0};
+ blitter->append_store(&p);
+ p.run(0,0,1,1);
+
+ switch (blitter->fDst.shiftPerPixel()) {
+ case 0: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ void* p = dst->writable_addr(x,y);
+ while (h --> 0) {
+ memset(p, c, w);
+ p = SkTAddOffset<void>(p, dst->rowBytes());
+ }
+ }; break;
+
+ case 1: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset16(dst->writable_addr16(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ case 2: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset32(dst->writable_addr32(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ case 3: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset64(dst->writable_addr64(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ // TODO(F32)?
+ }
+ }
+
+ blitter->fDstPtr = SkRasterPipeline_MemoryCtx{
+ blitter->fDst.writable_addr(),
+ blitter->fDst.rowBytesAsPixels(),
+ };
+
+ return blitter;
+}
+
+void SkRasterPipelineBlitter::append_load_dst(SkRasterPipeline* p) const {
+ p->append_load_dst(fDst.info().colorType(), &fDstPtr);
+ if (fDst.info().alphaType() == kUnpremul_SkAlphaType) {
+ p->append(SkRasterPipeline::premul_dst);
+ }
+}
+
+void SkRasterPipelineBlitter::append_store(SkRasterPipeline* p) const {
+ if (fDst.info().alphaType() == kUnpremul_SkAlphaType) {
+ p->append(SkRasterPipeline::unpremul);
+ }
+ if (fDitherRate > 0.0f) {
+ p->append(SkRasterPipeline::dither, &fDitherRate);
+ }
+
+ p->append_store(fDst.info().colorType(), &fDstPtr);
+}
+
+void SkRasterPipelineBlitter::blitH(int x, int y, int w) {
+ this->blitRect(x,y,w,1);
+}
+
+void SkRasterPipelineBlitter::blitRect(int x, int y, int w, int h) {
+ if (fMemset2D) {
+ fMemset2D(&fDst, x,y, w,h, fMemsetColor);
+ return;
+ }
+
+ if (!fBlitRect) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_gamut_clamp_if_normalized(fDst.info());
+ if (fBlend == SkBlendMode::kSrcOver
+ && (fDst.info().colorType() == kRGBA_8888_SkColorType ||
+ fDst.info().colorType() == kBGRA_8888_SkColorType)
+ && !fDst.colorSpace()
+ && fDst.info().alphaType() != kUnpremul_SkAlphaType
+ && fDitherRate == 0.0f) {
+ if (fDst.info().colorType() == kBGRA_8888_SkColorType) {
+ p.append(SkRasterPipeline::swap_rb);
+ }
+ p.append(SkRasterPipeline::srcover_rgba_8888, &fDstPtr);
+ } else {
+ if (fBlend != SkBlendMode::kSrc) {
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ }
+ this->append_store(&p);
+ }
+ fBlitRect = p.compile();
+ }
+
+ fBlitRect(x,y,w,h);
+}
+
+void SkRasterPipelineBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const int16_t runs[]) {
+ if (!fBlitAntiH) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_gamut_clamp_if_normalized(fDst.info());
+ if (SkBlendMode_ShouldPreScaleCoverage(fBlend, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipeline::scale_1_float, &fCurrentCoverage);
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ } else {
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ p.append(SkRasterPipeline::lerp_1_float, &fCurrentCoverage);
+ }
+
+ this->append_store(&p);
+ fBlitAntiH = p.compile();
+ }
+
+ for (int16_t run = *runs; run > 0; run = *runs) {
+ switch (*aa) {
+ case 0x00: break;
+ case 0xff: this->blitH(x,y,run); break;
+ default:
+ fCurrentCoverage = *aa * (1/255.0f);
+ fBlitAntiH(x,y,run,1);
+ }
+ x += run;
+ runs += run;
+ aa += run;
+ }
+}
+
+void SkRasterPipelineBlitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkIRect clip = {x,y, x+2,y+1};
+ uint8_t coverage[] = { (uint8_t)a0, (uint8_t)a1 };
+
+ SkMask mask;
+ mask.fImage = coverage;
+ mask.fBounds = clip;
+ mask.fRowBytes = 2;
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkIRect clip = {x,y, x+1,y+2};
+ uint8_t coverage[] = { (uint8_t)a0, (uint8_t)a1 };
+
+ SkMask mask;
+ mask.fImage = coverage;
+ mask.fBounds = clip;
+ mask.fRowBytes = 1;
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkIRect clip = {x,y, x+1,y+height};
+
+ SkMask mask;
+ mask.fImage = &alpha;
+ mask.fBounds = clip;
+ mask.fRowBytes = 0; // so we reuse the 1 "row" for all of height
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ // TODO: native BW masks?
+ return INHERITED::blitMask(mask, clip);
+ }
+
+ // ARGB and SDF masks shouldn't make it here.
+ SkASSERT(mask.fFormat == SkMask::kA8_Format
+ || mask.fFormat == SkMask::kLCD16_Format
+ || mask.fFormat == SkMask::k3D_Format);
+
+ auto extract_mask_plane = [&mask](int plane, SkRasterPipeline_MemoryCtx* ctx) {
+ // LCD is 16-bit per pixel; A8 and 3D are 8-bit per pixel.
+ size_t bpp = mask.fFormat == SkMask::kLCD16_Format ? 2 : 1;
+
+ // Select the right mask plane. Usually plane == 0 and this is just mask.fImage.
+ auto ptr = (uintptr_t)mask.fImage
+ + plane * mask.computeImageSize();
+
+ // Update ctx to point "into" this current mask, but lined up with fDstPtr at (0,0).
+ // This sort of trickery upsets UBSAN (pointer-overflow) so our ptr must be a uintptr_t.
+ // mask.fRowBytes is a uint32_t, which would break our addressing math on 64-bit builds.
+ size_t rowBytes = mask.fRowBytes;
+ ctx->stride = rowBytes / bpp;
+ ctx->pixels = (void*)(ptr - mask.fBounds.left() * bpp
+ - mask.fBounds.top() * rowBytes);
+ };
+
+ extract_mask_plane(0, &fMaskPtr);
+ if (mask.fFormat == SkMask::k3D_Format) {
+ extract_mask_plane(1, &fEmbossCtx.mul);
+ extract_mask_plane(2, &fEmbossCtx.add);
+ }
+
+ // Lazily build whichever pipeline we need, specialized for each mask format.
+ if (mask.fFormat == SkMask::kA8_Format && !fBlitMaskA8) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_gamut_clamp_if_normalized(fDst.info());
+ if (SkBlendMode_ShouldPreScaleCoverage(fBlend, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipeline::scale_u8, &fMaskPtr);
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ } else {
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ p.append(SkRasterPipeline::lerp_u8, &fMaskPtr);
+ }
+ this->append_store(&p);
+ fBlitMaskA8 = p.compile();
+ }
+ if (mask.fFormat == SkMask::kLCD16_Format && !fBlitMaskLCD16) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_gamut_clamp_if_normalized(fDst.info());
+ if (SkBlendMode_ShouldPreScaleCoverage(fBlend, /*rgb_coverage=*/true)) {
+ // Somewhat unusually, scale_565 needs dst loaded first.
+ this->append_load_dst(&p);
+ p.append(SkRasterPipeline::scale_565, &fMaskPtr);
+ SkBlendMode_AppendStages(fBlend, &p);
+ } else {
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ p.append(SkRasterPipeline::lerp_565, &fMaskPtr);
+ }
+ this->append_store(&p);
+ fBlitMaskLCD16 = p.compile();
+ }
+ if (mask.fFormat == SkMask::k3D_Format && !fBlitMask3D) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ // This bit is where we differ from kA8_Format:
+ p.append(SkRasterPipeline::emboss, &fEmbossCtx);
+ // Now onward just as kA8.
+ p.append_gamut_clamp_if_normalized(fDst.info());
+ if (SkBlendMode_ShouldPreScaleCoverage(fBlend, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipeline::scale_u8, &fMaskPtr);
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ } else {
+ this->append_load_dst(&p);
+ SkBlendMode_AppendStages(fBlend, &p);
+ p.append(SkRasterPipeline::lerp_u8, &fMaskPtr);
+ }
+ this->append_store(&p);
+ fBlitMask3D = p.compile();
+ }
+
+ std::function<void(size_t,size_t,size_t,size_t)>* blitter = nullptr;
+ switch (mask.fFormat) {
+ case SkMask::kA8_Format: blitter = &fBlitMaskA8; break;
+ case SkMask::kLCD16_Format: blitter = &fBlitMaskLCD16; break;
+ case SkMask::k3D_Format: blitter = &fBlitMask3D; break;
+ default:
+ SkASSERT(false);
+ return;
+ }
+
+ SkASSERT(blitter);
+ (*blitter)(clip.left(),clip.top(), clip.width(),clip.height());
+}
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.cpp b/gfx/skia/skia/src/core/SkReadBuffer.cpp
new file mode 100644
index 0000000000..ff78b74a34
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.cpp
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeMath.h"
+
+#ifndef SK_DISABLE_READBUFFER
+
+namespace {
+ // This generator intentionally should always fail on all attempts to get its pixels,
+ // simulating a bad or empty codec stream.
+ class EmptyImageGenerator final : public SkImageGenerator {
+ public:
+ EmptyImageGenerator(const SkImageInfo& info) : INHERITED(info) { }
+
+ private:
+ typedef SkImageGenerator INHERITED;
+ };
+
+ static sk_sp<SkImage> MakeEmptyImage(int width, int height) {
+ return SkImage::MakeFromGenerator(
+ skstd::make_unique<EmptyImageGenerator>(SkImageInfo::MakeN32Premul(width, height)));
+ }
+
+} // anonymous namespace
+
+
+SkReadBuffer::SkReadBuffer() {
+ fVersion = 0;
+
+ fTFArray = nullptr;
+ fTFCount = 0;
+
+ fFactoryArray = nullptr;
+ fFactoryCount = 0;
+}
+
+SkReadBuffer::SkReadBuffer(const void* data, size_t size) {
+ fVersion = 0;
+ this->setMemory(data, size);
+
+ fTFArray = nullptr;
+ fTFCount = 0;
+
+ fFactoryArray = nullptr;
+ fFactoryCount = 0;
+}
+
+void SkReadBuffer::setMemory(const void* data, size_t size) {
+ this->validate(IsPtrAlign4(data) && (SkAlign4(size) == size));
+ if (!fError) {
+ fReader.setMemory(data, size);
+ }
+}
+void SkReadBuffer::setInvalid() {
+ if (!fError) {
+ // When an error is found, send the read cursor to the end of the stream
+ fReader.skip(fReader.available());
+ fError = true;
+ }
+}
+
+const void* SkReadBuffer::skip(size_t size) {
+ size_t inc = SkAlign4(size);
+ this->validate(inc >= size);
+ const void* addr = fReader.peek();
+ this->validate(IsPtrAlign4(addr) && fReader.isAvailable(inc));
+ if (fError) {
+ return nullptr;
+ }
+
+ fReader.skip(size);
+ return addr;
+}
+
+const void* SkReadBuffer::skip(size_t count, size_t size) {
+ return this->skip(SkSafeMath::Mul(count, size));
+}
+
+void SkReadBuffer::setDeserialProcs(const SkDeserialProcs& procs) {
+ fProcs = procs;
+}
+
+bool SkReadBuffer::readBool() {
+ uint32_t value = this->readUInt();
+ // Boolean value should be either 0 or 1
+ this->validate(!(value & ~1));
+ return value != 0;
+}
+
+SkColor SkReadBuffer::readColor() {
+ return this->readUInt();
+}
+
+int32_t SkReadBuffer::readInt() {
+ const size_t inc = sizeof(int32_t);
+ this->validate(IsPtrAlign4(fReader.peek()) && fReader.isAvailable(inc));
+ return fError ? 0 : fReader.readInt();
+}
+
+SkScalar SkReadBuffer::readScalar() {
+ const size_t inc = sizeof(SkScalar);
+ this->validate(IsPtrAlign4(fReader.peek()) && fReader.isAvailable(inc));
+ return fError ? 0 : fReader.readScalar();
+}
+
+uint32_t SkReadBuffer::readUInt() {
+ return this->readInt();
+}
+
+int32_t SkReadBuffer::read32() {
+ return this->readInt();
+}
+
+uint8_t SkReadBuffer::peekByte() {
+ if (fReader.available() <= 0) {
+ fError = true;
+ return 0;
+ }
+ return *((uint8_t*) fReader.peek());
+}
+
+bool SkReadBuffer::readPad32(void* buffer, size_t bytes) {
+ if (const void* src = this->skip(bytes)) {
+ memcpy(buffer, src, bytes);
+ return true;
+ }
+ return false;
+}
+
+const char* SkReadBuffer::readString(size_t* len) {
+ *len = this->readUInt();
+
+ // The string is len characters and a terminating \0.
+ const char* c_str = this->skipT<char>(*len+1);
+
+ if (this->validate(c_str && c_str[*len] == '\0')) {
+ return c_str;
+ }
+ return nullptr;
+}
+
+void SkReadBuffer::readString(SkString* string) {
+ size_t len;
+ if (const char* c_str = this->readString(&len)) {
+ string->set(c_str, len);
+ return;
+ }
+ string->reset();
+}
+
+void SkReadBuffer::readColor4f(SkColor4f* color) {
+ if (!this->readPad32(color, sizeof(SkColor4f))) {
+ *color = {0, 0, 0, 0};
+ }
+}
+
+void SkReadBuffer::readPoint(SkPoint* point) {
+ point->fX = this->readScalar();
+ point->fY = this->readScalar();
+}
+
+void SkReadBuffer::readPoint3(SkPoint3* point) {
+ this->readPad32(point, sizeof(SkPoint3));
+}
+
+void SkReadBuffer::readMatrix(SkMatrix* matrix) {
+ size_t size = 0;
+ if (this->isValid()) {
+ size = SkMatrixPriv::ReadFromMemory(matrix, fReader.peek(), fReader.available());
+ (void)this->validate((SkAlign4(size) == size) && (0 != size));
+ }
+ if (!this->isValid()) {
+ matrix->reset();
+ }
+ (void)this->skip(size);
+}
+
+void SkReadBuffer::readIRect(SkIRect* rect) {
+ if (!this->readPad32(rect, sizeof(SkIRect))) {
+ rect->setEmpty();
+ }
+}
+
+void SkReadBuffer::readRect(SkRect* rect) {
+ if (!this->readPad32(rect, sizeof(SkRect))) {
+ rect->setEmpty();
+ }
+}
+
+void SkReadBuffer::readRRect(SkRRect* rrect) {
+ if (!this->validate(fReader.readRRect(rrect))) {
+ rrect->setEmpty();
+ }
+}
+
+void SkReadBuffer::readRegion(SkRegion* region) {
+ size_t size = 0;
+ if (!fError) {
+ size = region->readFromMemory(fReader.peek(), fReader.available());
+ if (!this->validate((SkAlign4(size) == size) && (0 != size))) {
+ region->setEmpty();
+ }
+ }
+ (void)this->skip(size);
+}
+
+void SkReadBuffer::readPath(SkPath* path) {
+ size_t size = 0;
+ if (!fError) {
+ size = path->readFromMemory(fReader.peek(), fReader.available());
+ if (!this->validate((SkAlign4(size) == size) && (0 != size))) {
+ path->reset();
+ }
+ }
+ (void)this->skip(size);
+}
+
+bool SkReadBuffer::readArray(void* value, size_t size, size_t elementSize) {
+ const uint32_t count = this->readUInt();
+ return this->validate(size == count) &&
+ this->readPad32(value, SkSafeMath::Mul(size, elementSize));
+}
+
+bool SkReadBuffer::readByteArray(void* value, size_t size) {
+ return this->readArray(value, size, sizeof(uint8_t));
+}
+
+bool SkReadBuffer::readColorArray(SkColor* colors, size_t size) {
+ return this->readArray(colors, size, sizeof(SkColor));
+}
+
+bool SkReadBuffer::readColor4fArray(SkColor4f* colors, size_t size) {
+ return this->readArray(colors, size, sizeof(SkColor4f));
+}
+
+bool SkReadBuffer::readIntArray(int32_t* values, size_t size) {
+ return this->readArray(values, size, sizeof(int32_t));
+}
+
+bool SkReadBuffer::readPointArray(SkPoint* points, size_t size) {
+ return this->readArray(points, size, sizeof(SkPoint));
+}
+
+bool SkReadBuffer::readScalarArray(SkScalar* values, size_t size) {
+ return this->readArray(values, size, sizeof(SkScalar));
+}
+
+sk_sp<SkData> SkReadBuffer::readByteArrayAsData() {
+ size_t numBytes = this->getArrayCount();
+ if (!this->validate(fReader.isAvailable(numBytes))) {
+ return nullptr;
+ }
+
+ SkAutoMalloc buffer(numBytes);
+ if (!this->readByteArray(buffer.get(), numBytes)) {
+ return nullptr;
+ }
+
+ return SkData::MakeFromMalloc(buffer.release(), numBytes);
+}
+
+uint32_t SkReadBuffer::getArrayCount() {
+ const size_t inc = sizeof(uint32_t);
+ fError = fError || !IsPtrAlign4(fReader.peek()) || !fReader.isAvailable(inc);
+ return fError ? 0 : *(uint32_t*)fReader.peek();
+}
+
+/* Format:
+ * (subset) width, height
+ * (subset) origin x, y
+ * size (31bits)
+ * data [ encoded, with raw width/height ]
+ */
+sk_sp<SkImage> SkReadBuffer::readImage() {
+ SkIRect bounds;
+ if (this->isVersionLT(SkPicturePriv::kStoreImageBounds_Version)) {
+ bounds.fLeft = bounds.fTop = 0;
+ bounds.fRight = this->read32();
+ bounds.fBottom = this->read32();
+ } else {
+ this->readIRect(&bounds);
+ }
+ const int width = bounds.width();
+ const int height = bounds.height();
+ if (width <= 0 || height <= 0) { // SkImage never has a zero dimension
+ this->validate(false);
+ return nullptr;
+ }
+
+ int32_t size = this->read32();
+ if (size == SK_NaN32) {
+ // 0x80000000 is never valid, since it cannot be passed to abs().
+ this->validate(false);
+ return nullptr;
+ }
+ if (size == 0) {
+ // The image could not be encoded at serialization time - return an empty placeholder.
+ return MakeEmptyImage(width, height);
+ }
+
+ // we used to negate the size for "custom" encoded images -- ignore that signal (Dec-2017)
+ size = SkAbs32(size);
+ if (size == 1) {
+ // legacy check (we stopped writing this for "raw" images Nov-2017)
+ this->validate(false);
+ return nullptr;
+ }
+
+ // Preflight check to make sure there's enough stuff in the buffer before
+ // we allocate the memory. This helps the fuzzer avoid OOM when it creates
+ // bad/corrupt input.
+ if (!this->validateCanReadN<uint8_t>(size)) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ if (!this->readPad32(data->writable_data(), size)) {
+ this->validate(false);
+ return nullptr;
+ }
+ if (this->isVersionLT(SkPicturePriv::kDontNegateImageSize_Version)) {
+ (void)this->read32(); // originX
+ (void)this->read32(); // originY
+ }
+
+ sk_sp<SkImage> image;
+ if (fProcs.fImageProc) {
+ image = fProcs.fImageProc(data->data(), data->size(), fProcs.fImageCtx);
+ }
+ if (!image) {
+ image = SkImage::MakeFromEncoded(std::move(data));
+ }
+ if (image) {
+ if (bounds.x() || bounds.y() || width < image->width() || height < image->height()) {
+ image = image->makeSubset(bounds);
+ }
+ }
+ // Question: are we correct to return an "empty" image instead of nullptr, if the decoder
+ // failed for some reason?
+ return image ? image : MakeEmptyImage(width, height);
+}
+
+sk_sp<SkTypeface> SkReadBuffer::readTypeface() {
+ // Read 32 bits (signed)
+ // 0 -- return null (default font)
+ // >0 -- index
+ // <0 -- custom (serial procs) : negative size in bytes
+
+ int32_t index = this->read32();
+ if (index == 0) {
+ return nullptr;
+ } else if (index > 0) {
+ if (!this->validate(index <= fTFCount)) {
+ return nullptr;
+ }
+ return fTFArray[index - 1];
+ } else { // custom
+ size_t size = sk_negate_to_size_t(index);
+ const void* data = this->skip(size);
+ if (!this->validate(data != nullptr && fProcs.fTypefaceProc)) {
+ return nullptr;
+ }
+ return fProcs.fTypefaceProc(data, size, fProcs.fTypefaceCtx);
+ }
+}
+
+SkFlattenable* SkReadBuffer::readFlattenable(SkFlattenable::Type ft) {
+ SkFlattenable::Factory factory = nullptr;
+
+ if (fFactoryCount > 0) {
+ int32_t index = this->read32();
+ if (0 == index || !this->isValid()) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+ index -= 1; // we stored the index-base-1
+ if ((unsigned)index >= (unsigned)fFactoryCount) {
+ this->validate(false);
+ return nullptr;
+ }
+ factory = fFactoryArray[index];
+ } else {
+ if (this->peekByte() != 0) {
+ // If the first byte is non-zero, the flattenable is specified by a string.
+ size_t ignored_length;
+ if (const char* name = this->readString(&ignored_length)) {
+ factory = SkFlattenable::NameToFactory(name);
+ fFlattenableDict.set(fFlattenableDict.count() + 1, factory);
+ }
+ } else {
+ // Read the index. We are guaranteed that the first byte
+ // is zeroed, so we must shift down a byte.
+ uint32_t index = this->readUInt() >> 8;
+ if (index == 0) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+
+ if (SkFlattenable::Factory* found = fFlattenableDict.find(index)) {
+ factory = *found;
+ }
+ }
+
+ if (!this->validate(factory != nullptr)) {
+ return nullptr;
+ }
+ }
+
+ // if we get here, factory may still be null, but if that is the case, the
+ // failure was ours, not the writer.
+ sk_sp<SkFlattenable> obj;
+ uint32_t sizeRecorded = this->read32();
+ if (factory) {
+ size_t offset = fReader.offset();
+ obj = (*factory)(*this);
+ // check that we read the amount we expected
+ size_t sizeRead = fReader.offset() - offset;
+ if (sizeRecorded != sizeRead) {
+ this->validate(false);
+ return nullptr;
+ }
+ if (obj && obj->getFlattenableType() != ft) {
+ this->validate(false);
+ return nullptr;
+ }
+ } else {
+ // we must skip the remaining data
+ fReader.skip(sizeRecorded);
+ }
+ if (!this->isValid()) {
+ return nullptr;
+ }
+ return obj.release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+int32_t SkReadBuffer::checkInt(int32_t min, int32_t max) {
+ SkASSERT(min <= max);
+ int32_t value = this->read32();
+ if (value < min || value > max) {
+ this->validate(false);
+ value = min;
+ }
+ return value;
+}
+
+SkFilterQuality SkReadBuffer::checkFilterQuality() {
+ return this->checkRange<SkFilterQuality>(kNone_SkFilterQuality, kLast_SkFilterQuality);
+}
+
+#endif // #ifndef SK_DISABLE_READBUFFER
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.h b/gfx/skia/skia/src/core/SkReadBuffer.h
new file mode 100644
index 0000000000..0bc91a09e0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadBuffer_DEFINED
+#define SkReadBuffer_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkDrawLooper.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSerialProcs.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReader32.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkData;
+class SkImage;
+
+#ifndef SK_DISABLE_READBUFFER
+
+class SkReadBuffer {
+public:
+ SkReadBuffer();
+ SkReadBuffer(const void* data, size_t size);
+
+ /**
+ * Returns true IFF the version is older than the specified version.
+ */
+ bool isVersionLT(SkPicturePriv::Version targetVersion) const {
+ SkASSERT(targetVersion > 0);
+ return fVersion > 0 && fVersion < targetVersion;
+ }
+
+ uint32_t getVersion() const { return fVersion; }
+
+ /** This may be called at most once; most clients of SkReadBuffer should not mess with it. */
+ void setVersion(int version) {
+ SkASSERT(0 == fVersion || version == fVersion);
+ fVersion = version;
+ }
+
+ size_t size() const { return fReader.size(); }
+ size_t offset() const { return fReader.offset(); }
+ bool eof() { return fReader.eof(); }
+ const void* skip(size_t size);
+ const void* skip(size_t count, size_t size); // does safe multiply
+ size_t available() const { return fReader.available(); }
+
+ template <typename T> const T* skipT() {
+ return static_cast<const T*>(this->skip(sizeof(T)));
+ }
+ template <typename T> const T* skipT(size_t count) {
+ return static_cast<const T*>(this->skip(count, sizeof(T)));
+ }
+
+ // primitives
+ bool readBool();
+ SkColor readColor();
+ int32_t readInt();
+ SkScalar readScalar();
+ uint32_t readUInt();
+ int32_t read32();
+
+ template <typename T> T read32LE(T max) {
+ uint32_t value = this->readUInt();
+ if (!this->validate(value <= static_cast<uint32_t>(max))) {
+ value = 0;
+ }
+ return static_cast<T>(value);
+ }
+
+ // peek
+ uint8_t peekByte();
+
+ void readString(SkString* string);
+
+ // common data structures
+ void readColor4f(SkColor4f* color);
+ void readPoint(SkPoint* point);
+ SkPoint readPoint() { SkPoint p; this->readPoint(&p); return p; }
+ void readPoint3(SkPoint3* point);
+ void readMatrix(SkMatrix* matrix);
+ void readIRect(SkIRect* rect);
+ void readRect(SkRect* rect);
+ void readRRect(SkRRect* rrect);
+ void readRegion(SkRegion* region);
+
+ void readPath(SkPath* path);
+
+ SkReadPaintResult readPaint(SkPaint* paint, SkFont* font) {
+ return SkPaintPriv::Unflatten(paint, *this, font);
+ }
+
+ SkFlattenable* readFlattenable(SkFlattenable::Type);
+ template <typename T> sk_sp<T> readFlattenable() {
+ return sk_sp<T>((T*)this->readFlattenable(T::GetFlattenableType()));
+ }
+ sk_sp<SkColorFilter> readColorFilter() { return this->readFlattenable<SkColorFilter>(); }
+ sk_sp<SkDrawLooper> readDrawLooper() { return this->readFlattenable<SkDrawLooper>(); }
+ sk_sp<SkImageFilter> readImageFilter() { return this->readFlattenable<SkImageFilter>(); }
+ sk_sp<SkMaskFilter> readMaskFilter() { return this->readFlattenable<SkMaskFilterBase>(); }
+ sk_sp<SkPathEffect> readPathEffect() { return this->readFlattenable<SkPathEffect>(); }
+ sk_sp<SkShader> readShader() { return this->readFlattenable<SkShaderBase>(); }
+
+ // Reads SkAlign4(bytes), but will only copy bytes into the buffer.
+ bool readPad32(void* buffer, size_t bytes);
+
+ // binary data and arrays
+ bool readByteArray(void* value, size_t size);
+ bool readColorArray(SkColor* colors, size_t size);
+ bool readColor4fArray(SkColor4f* colors, size_t size);
+ bool readIntArray(int32_t* values, size_t size);
+ bool readPointArray(SkPoint* points, size_t size);
+ bool readScalarArray(SkScalar* values, size_t size);
+
+ sk_sp<SkData> readByteArrayAsData();
+
+ // helpers to get info about arrays and binary data
+ uint32_t getArrayCount();
+
+ // If there is a real error (e.g. data is corrupted) this returns null. If the image cannot
+ // be created (e.g. it was not originally encoded) then this returns an image that doesn't
+ // draw.
+ sk_sp<SkImage> readImage();
+ sk_sp<SkTypeface> readTypeface();
+
+ void setTypefaceArray(sk_sp<SkTypeface> array[], int count) {
+ fTFArray = array;
+ fTFCount = count;
+ }
+
+ /**
+ * Call this with a pre-loaded array of Factories, in the same order as
+ * were created/written by the writer. SkPicture uses this.
+ */
+ void setFactoryPlayback(SkFlattenable::Factory array[], int count) {
+ fFactoryArray = array;
+ fFactoryCount = count;
+ }
+
+ void setDeserialProcs(const SkDeserialProcs& procs);
+ const SkDeserialProcs& getDeserialProcs() const { return fProcs; }
+
+ /**
+ * If isValid is false, sets the buffer to be "invalid". Returns true if the buffer
+ * is still valid.
+ */
+ bool validate(bool isValid) {
+ if (!isValid) {
+ this->setInvalid();
+ }
+ return !fError;
+ }
+
+ /**
+ * Helper function to do a preflight check before a large allocation or read.
+ * Returns true if there is enough bytes in the buffer to read n elements of T.
+ * If not, the buffer will be "invalid" and false will be returned.
+ */
+ template <typename T>
+ bool validateCanReadN(size_t n) {
+ return this->validate(n <= (fReader.available() / sizeof(T)));
+ }
+
+ bool isValid() const { return !fError; }
+ bool validateIndex(int index, int count) {
+ return this->validate(index >= 0 && index < count);
+ }
+
+ // Utilities that mark the buffer invalid if the requested value is out-of-range
+
+ // If the read value is outside of the range, validate(false) is called, and min
+ // is returned, else the value is returned.
+ int32_t checkInt(int min, int max);
+
+ template <typename T> T checkRange(T min, T max) {
+ return static_cast<T>(this->checkInt(static_cast<int32_t>(min),
+ static_cast<int32_t>(max)));
+ }
+
+ SkFilterQuality checkFilterQuality();
+
+private:
+ const char* readString(size_t* length);
+
+ void setInvalid();
+ bool readArray(void* value, size_t size, size_t elementSize);
+ void setMemory(const void*, size_t);
+
+ SkReader32 fReader;
+
+ // Only used if we do not have an fFactoryArray.
+ SkTHashMap<uint32_t, SkFlattenable::Factory> fFlattenableDict;
+
+ int fVersion;
+
+ sk_sp<SkTypeface>* fTFArray;
+ int fTFCount;
+
+ SkFlattenable::Factory* fFactoryArray;
+ int fFactoryCount;
+
+ SkDeserialProcs fProcs;
+
+ static bool IsPtrAlign4(const void* ptr) {
+ return SkIsAlign4((uintptr_t)ptr);
+ }
+
+ bool fError = false;
+};
+
+#else // #ifndef SK_DISABLE_READBUFFER
+
+class SkReadBuffer {
+public:
+ SkReadBuffer() {}
+ SkReadBuffer(const void*, size_t) {}
+
+ bool isVersionLT(SkPicturePriv::Version) const { return false; }
+ uint32_t getVersion() const { return 0xffffffff; }
+ void setVersion(int) {}
+
+ size_t size() const { return 0; }
+ size_t offset() const { return 0; }
+ bool eof() { return true; }
+ size_t available() const { return 0; }
+
+ const void* skip(size_t) { return nullptr; }
+ const void* skip(size_t, size_t) { return nullptr; }
+ template <typename T> const T* skipT() { return nullptr; }
+ template <typename T> const T* skipT(size_t) { return nullptr; }
+
+ bool readBool() { return 0; }
+ SkColor readColor() { return 0; }
+ int32_t readInt() { return 0; }
+ SkScalar readScalar() { return 0; }
+ uint32_t readUInt() { return 0; }
+ int32_t read32() { return 0; }
+
+ template <typename T> T read32LE(T max) { return max; }
+
+ uint8_t peekByte() { return 0; }
+
+ void readColor4f(SkColor4f* out) { *out = SkColor4f{0,0,0,0}; }
+ void readPoint (SkPoint* out) { *out = SkPoint{0,0}; }
+ void readPoint3 (SkPoint3* out) { *out = SkPoint3{0,0,0}; }
+ void readMatrix (SkMatrix* out) { *out = SkMatrix::I(); }
+ void readIRect (SkIRect* out) { *out = SkIRect{0,0,0,0}; }
+ void readRect (SkRect* out) { *out = SkRect{0,0,0,0}; }
+ void readRRect (SkRRect* out) { *out = SkRRect(); }
+ void readRegion (SkRegion* out) { *out = SkRegion(); }
+ void readString (SkString* out) { *out = SkString(); }
+ void readPath (SkPath* out) { *out = SkPath(); }
+ SkReadPaintResult readPaint (SkPaint* out, SkFont* font) {
+ *out = SkPaint();
+ if (font) {
+ *font = SkFont();
+ }
+ return kFailed_ReadPaint;
+ }
+
+ SkPoint readPoint() { return {0,0}; }
+
+ SkFlattenable* readFlattenable(SkFlattenable::Type) { return nullptr; }
+
+ template <typename T> sk_sp<T> readFlattenable() { return nullptr; }
+ sk_sp<SkColorFilter> readColorFilter() { return nullptr; }
+ sk_sp<SkDrawLooper> readDrawLooper() { return nullptr; }
+ sk_sp<SkImageFilter> readImageFilter() { return nullptr; }
+ sk_sp<SkMaskFilter> readMaskFilter() { return nullptr; }
+ sk_sp<SkPathEffect> readPathEffect() { return nullptr; }
+ sk_sp<SkShader> readShader() { return nullptr; }
+
+ bool readPad32 (void*, size_t) { return false; }
+ bool readByteArray (void*, size_t) { return false; }
+ bool readColorArray (SkColor*, size_t) { return false; }
+ bool readColor4fArray(SkColor4f*, size_t) { return false; }
+ bool readIntArray (int32_t*, size_t) { return false; }
+ bool readPointArray (SkPoint*, size_t) { return false; }
+ bool readScalarArray (SkScalar*, size_t) { return false; }
+
+ sk_sp<SkData> readByteArrayAsData() { return nullptr; }
+ uint32_t getArrayCount() { return 0; }
+
+ sk_sp<SkImage> readImage() { return nullptr; }
+ sk_sp<SkTypeface> readTypeface() { return nullptr; }
+
+ bool validate(bool) { return false; }
+ template <typename T> bool validateCanReadN(size_t) { return false; }
+ bool isValid() const { return false; }
+ bool validateIndex(int, int) { return false; }
+
+ int32_t checkInt(int min, int) { return min; }
+ template <typename T> T checkRange(T min, T) { return min; }
+
+ SkFilterQuality checkFilterQuality() { return SkFilterQuality::kNone_SkFilterQuality; }
+
+ void setTypefaceArray(sk_sp<SkTypeface>[], int) {}
+ void setFactoryPlayback(SkFlattenable::Factory[], int) {}
+ void setDeserialProcs(const SkDeserialProcs&) {}
+
+ const SkDeserialProcs& getDeserialProcs() const {
+ static const SkDeserialProcs procs;
+ return procs;
+ }
+};
+
+#endif // #ifndef SK_DISABLE_READBUFFER
+
+#endif // SkReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkReader32.h b/gfx/skia/skia/src/core/SkReader32.h
new file mode 100644
index 0000000000..ff2b820de6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReader32.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkReader32_DEFINED
+#define SkReader32_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/private/SkNoncopyable.h"
+
+class SkString;
+
+class SkReader32 : SkNoncopyable {
+public:
+ SkReader32() : fCurr(nullptr), fStop(nullptr), fBase(nullptr) {}
+ SkReader32(const void* data, size_t size) {
+ this->setMemory(data, size);
+ }
+
+ void setMemory(const void* data, size_t size) {
+ SkASSERT(ptr_align_4(data));
+ SkASSERT(SkAlign4(size) == size);
+
+ fBase = fCurr = (const char*)data;
+ fStop = (const char*)data + size;
+ }
+
+ size_t size() const { return fStop - fBase; }
+ size_t offset() const { return fCurr - fBase; }
+ bool eof() const { return fCurr >= fStop; }
+ const void* base() const { return fBase; }
+ const void* peek() const { return fCurr; }
+
+ size_t available() const { return fStop - fCurr; }
+ bool isAvailable(size_t size) const { return size <= this->available(); }
+
+ void rewind() { fCurr = fBase; }
+
+ void setOffset(size_t offset) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset <= this->size());
+ fCurr = fBase + offset;
+ }
+
+ bool readBool() { return this->readInt() != 0; }
+
+ int32_t readInt() {
+ SkASSERT(ptr_align_4(fCurr));
+ int32_t value = *(const int32_t*)fCurr;
+ fCurr += sizeof(value);
+ SkASSERT(fCurr <= fStop);
+ return value;
+ }
+
+ void* readPtr() {
+ void* ptr;
+ // we presume this "if" is resolved at compile-time
+ if (4 == sizeof(void*)) {
+ ptr = *(void**)fCurr;
+ } else {
+ memcpy(&ptr, fCurr, sizeof(void*));
+ }
+ fCurr += sizeof(void*);
+ return ptr;
+ }
+
+ SkScalar readScalar() {
+ SkASSERT(ptr_align_4(fCurr));
+ SkScalar value = *(const SkScalar*)fCurr;
+ fCurr += sizeof(value);
+ SkASSERT(fCurr <= fStop);
+ return value;
+ }
+
+ const void* skip(size_t size) {
+ SkASSERT(ptr_align_4(fCurr));
+ const void* addr = fCurr;
+ fCurr += SkAlign4(size);
+ SkASSERT(fCurr <= fStop);
+ return addr;
+ }
+
+ template <typename T> const T& skipT() {
+ SkASSERT(SkAlign4(sizeof(T)) == sizeof(T));
+ return *(const T*)this->skip(sizeof(T));
+ }
+
+ void read(void* dst, size_t size) {
+ SkASSERT(0 == size || dst != nullptr);
+ SkASSERT(ptr_align_4(fCurr));
+ sk_careful_memcpy(dst, fCurr, size);
+ fCurr += SkAlign4(size);
+ SkASSERT(fCurr <= fStop);
+ }
+
+ uint8_t readU8() { return (uint8_t)this->readInt(); }
+ uint16_t readU16() { return (uint16_t)this->readInt(); }
+ int32_t readS32() { return this->readInt(); }
+ uint32_t readU32() { return this->readInt(); }
+
+ bool readPath(SkPath* path) {
+ return this->readObjectFromMemory(path);
+ }
+
+ bool readMatrix(SkMatrix* matrix) {
+ return this->readObjectFromMemory(matrix);
+ }
+
+ bool readRRect(SkRRect* rrect) {
+ return this->readObjectFromMemory(rrect);
+ }
+
+ bool readRegion(SkRegion* rgn) {
+ return this->readObjectFromMemory(rgn);
+ }
+
+ /**
+ * Read the length of a string (written by SkWriter32::writeString) into
+ * len (if len is not nullptr) and return the null-ternimated address of the
+ * string within the reader's buffer.
+ */
+ const char* readString(size_t* len = nullptr);
+
+ /**
+ * Read the string (written by SkWriter32::writeString) and return it in
+ * copy (if copy is not null). Return the length of the string.
+ */
+ size_t readIntoString(SkString* copy);
+
+ sk_sp<SkData> readData() {
+ uint32_t byteLength = this->readU32();
+ if (0 == byteLength) {
+ return SkData::MakeEmpty();
+ }
+ return SkData::MakeWithCopy(this->skip(byteLength), byteLength);
+ }
+
+private:
+ template <typename T> bool readObjectFromMemory(T* obj) {
+ size_t size = obj->readFromMemory(this->peek(), this->available());
+ // If readFromMemory() fails (which means that available() was too small), it returns 0
+ bool success = (size > 0) && (size <= this->available()) && (SkAlign4(size) == size);
+ // In case of failure, we want to skip to the end
+ (void)this->skip(success ? size : this->available());
+ return success;
+ }
+
+ // these are always 4-byte aligned
+ const char* fCurr; // current position within buffer
+ const char* fStop; // end of buffer
+ const char* fBase; // beginning of buffer
+
+#ifdef SK_DEBUG
+ static bool ptr_align_4(const void* ptr) {
+ return (((const char*)ptr - (const char*)nullptr) & 3) == 0;
+ }
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRecord.cpp b/gfx/skia/skia/src/core/SkRecord.cpp
new file mode 100644
index 0000000000..6f93944b36
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "src/core/SkRecord.h"
+#include <algorithm>
+
+SkRecord::~SkRecord() {
+ Destroyer destroyer;
+ for (int i = 0; i < this->count(); i++) {
+ this->mutate(i, destroyer);
+ }
+}
+
+void SkRecord::grow() {
+ SkASSERT(fCount == fReserved);
+ fReserved = fReserved ? fReserved * 2 : 4;
+ fRecords.realloc(fReserved);
+}
+
+size_t SkRecord::bytesUsed() const {
+ size_t bytes = fApproxBytesAllocated + sizeof(SkRecord);
+ return bytes;
+}
+
+void SkRecord::defrag() {
+ // Remove all the NoOps, preserving the order of other ops, e.g.
+ // Save, ClipRect, NoOp, DrawRect, NoOp, NoOp, Restore
+ // -> Save, ClipRect, DrawRect, Restore
+ Record* noops = std::remove_if(fRecords.get(), fRecords.get() + fCount,
+ [](Record op) { return op.type() == SkRecords::NoOp_Type; });
+ fCount = noops - fRecords.get();
+}
diff --git a/gfx/skia/skia/src/core/SkRecord.h b/gfx/skia/skia/src/core/SkRecord.h
new file mode 100644
index 0000000000..68c63492ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecord_DEFINED
+#define SkRecord_DEFINED
+
+#include "include/private/SkTLogic.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkRecords.h"
+
+// SkRecord represents a sequence of SkCanvas calls, saved for future use.
+// These future uses may include: replay, optimization, serialization, or combinations of those.
+//
+// Though an enterprising user may find calling alloc(), append(), visit(), and mutate() enough to
+// work with SkRecord, you probably want to look at SkRecorder which presents an SkCanvas interface
+// for creating an SkRecord, and SkRecordDraw which plays an SkRecord back into another SkCanvas.
+//
+// SkRecord often looks like it's compatible with any type T, but really it's compatible with any
+// type T which has a static const SkRecords::Type kType. That is to say, SkRecord is compatible
+// only with SkRecords::* structs defined in SkRecords.h. Your compiler will helpfully yell if you
+// get this wrong.
+
+class SkRecord : public SkRefCnt {
+public:
+ SkRecord() = default;
+ ~SkRecord();
+
+ // Returns the number of canvas commands in this SkRecord.
+ int count() const { return fCount; }
+
+ // Visit the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(const T& record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto visit(int i, F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ return fRecords[i].visit(f);
+ }
+
+ // Mutate the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(T* record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto mutate(int i, F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ return fRecords[i].mutate(f);
+ }
+
+ // Allocate contiguous space for count Ts, to be freed when the SkRecord is destroyed.
+ // Here T can be any class, not just those from SkRecords. Throws on failure.
+ template <typename T>
+ T* alloc(size_t count = 1) {
+ struct RawBytes {
+ alignas(T) char data[sizeof(T)];
+ };
+ fApproxBytesAllocated += count * sizeof(T) + alignof(T);
+ return (T*)fAlloc.makeArrayDefault<RawBytes>(count);
+ }
+
+ // Add a new command of type T to the end of this SkRecord.
+ // You are expected to placement new an object of type T onto this pointer.
+ template <typename T>
+ T* append() {
+ if (fCount == fReserved) {
+ this->grow();
+ }
+ return fRecords[fCount++].set(this->allocCommand<T>());
+ }
+
+ // Replace the i-th command with a new command of type T.
+ // You are expected to placement new an object of type T onto this pointer.
+ // References to the original command are invalidated.
+ template <typename T>
+ T* replace(int i) {
+ SkASSERT(i < this->count());
+
+ Destroyer destroyer;
+ this->mutate(i, destroyer);
+
+ return fRecords[i].set(this->allocCommand<T>());
+ }
+
+ // Replace the i-th command with a new command of type T.
+ // You are expected to placement new an object of type T onto this pointer.
+ // You must show proof that you've already adopted the existing command.
+ template <typename T, typename Existing>
+ T* replace(int i, const SkRecords::Adopted<Existing>& proofOfAdoption) {
+ SkASSERT(i < this->count());
+
+ SkASSERT(Existing::kType == fRecords[i].type());
+ SkASSERT(proofOfAdoption == fRecords[i].ptr());
+
+ return fRecords[i].set(this->allocCommand<T>());
+ }
+
+ // Does not return the bytes in any pointers embedded in the Records; callers
+ // need to iterate with a visitor to measure those they care for.
+ size_t bytesUsed() const;
+
+ // Rearrange and resize this record to eliminate any NoOps.
+ // May change count() and the indices of ops, but preserves their order.
+ void defrag();
+
+private:
+ // An SkRecord is structured as an array of pointers into a big chunk of memory where
+ // records representing each canvas draw call are stored:
+ //
+ // fRecords: [*][*][*]...
+ // | | |
+ // | | |
+ // | | +---------------------------------------+
+ // | +-----------------+ |
+ // | | |
+ // v v v
+ // fAlloc: [SkRecords::DrawRect][SkRecords::DrawPosTextH][SkRecords::DrawRect]...
+ //
+ // We store the types of each of the pointers alongside the pointer.
+ // The cost to append a T to this structure is 8 + sizeof(T) bytes.
+
+ // A mutator that can be used with replace to destroy canvas commands.
+ struct Destroyer {
+ template <typename T>
+ void operator()(T* record) { record->~T(); }
+ };
+
+ template <typename T>
+ SK_WHEN(std::is_empty<T>::value, T*) allocCommand() {
+ static T singleton = {};
+ return &singleton;
+ }
+
+ template <typename T>
+ SK_WHEN(!std::is_empty<T>::value, T*) allocCommand() { return this->alloc<T>(); }
+
+ void grow();
+
+ // A typed pointer to some bytes in fAlloc. visit() and mutate() allow polymorphic dispatch.
+ struct Record {
+ SkRecords::Type fType;
+ void* fPtr;
+
+ // Point this record to its data in fAlloc. Returns ptr for convenience.
+ template <typename T>
+ T* set(T* ptr) {
+ fType = T::kType;
+ fPtr = ptr;
+ SkASSERT(this->ptr() == ptr && this->type() == T::kType);
+ return ptr;
+ }
+
+ SkRecords::Type type() const { return fType; }
+ void* ptr() const { return fPtr; }
+
+ // Visit this record with functor F (see public API above).
+ template <typename F>
+ auto visit(F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ #define CASE(T) case SkRecords::T##_Type: return f(*(const SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ static const SkRecords::NoOp noop{};
+ return f(noop);
+ }
+
+ // Mutate this record with functor F (see public API above).
+ template <typename F>
+ auto mutate(F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ #define CASE(T) case SkRecords::T##_Type: return f((SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ static const SkRecords::NoOp noop{};
+ return f(const_cast<SkRecords::NoOp*>(&noop));
+ }
+ };
+
+ // fRecords needs to be a data structure that can append fixed length data, and need to
+ // support efficient random access and forward iteration. (It doesn't need to be contiguous.)
+ int fCount{0},
+ fReserved{0};
+ SkAutoTMalloc<Record> fRecords;
+
+ // fAlloc needs to be a data structure which can append variable length data in contiguous
+ // chunks, returning a stable handle to that data for later retrieval.
+ SkArenaAlloc fAlloc{256};
+ size_t fApproxBytesAllocated{0};
+};
+
+#endif//SkRecord_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.cpp b/gfx/skia/skia/src/core/SkRecordDraw.cpp
new file mode 100644
index 0000000000..16827f07a4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/utils/SkPatchUtils.h"
+
+void SkRecordDraw(const SkRecord& record,
+ SkCanvas* canvas,
+ SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[],
+ int drawableCount,
+ const SkBBoxHierarchy* bbh,
+ SkPicture::AbortCallback* callback) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ if (bbh) {
+ // Draw only ops that affect pixels in the canvas's current clip.
+ // The SkRecord and BBH were recorded in identity space. This canvas
+ // is not necessarily in that same space. getLocalClipBounds() returns us
+ // this canvas' clip bounds transformed back into identity space, which
+ // lets us query the BBH.
+ SkRect query = canvas->getLocalClipBounds();
+
+ SkTDArray<int> ops;
+ bbh->search(query, &ops);
+
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < ops.count(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(ops[i], draw);
+ }
+ } else {
+ // Draw all ops.
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < record.count(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(i, draw);
+ }
+ }
+}
+
+void SkRecordPartialDraw(const SkRecord& record, SkCanvas* canvas,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop,
+ const SkMatrix& initialCTM) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ stop = SkTMin(stop, record.count());
+ SkRecords::Draw draw(canvas, drawablePicts, nullptr, drawableCount, &initialCTM);
+ for (int i = start; i < stop; i++) {
+ record.visit(i, draw);
+ }
+}
+
+namespace SkRecords {
+
+// NoOps draw nothing.
+template <> void Draw::draw(const NoOp&) {}
+
+#define DRAW(T, call) template <> void Draw::draw(const T& r) { fCanvas->call; }
+DRAW(Flush, flush());
+DRAW(Restore, restore());
+DRAW(Save, save());
+DRAW(SaveLayer, saveLayer(SkCanvas::SaveLayerRec(r.bounds,
+ r.paint,
+ r.backdrop.get(),
+ r.clipMask.get(),
+ r.clipMatrix,
+ r.saveLayerFlags)));
+
+template <> void Draw::draw(const SaveBehind& r) {
+ SkCanvasPriv::SaveBehind(fCanvas, r.subset);
+}
+
+template <> void Draw::draw(const DrawBehind& r) {
+ SkCanvasPriv::DrawBehind(fCanvas, r.paint);
+}
+
+DRAW(SetMatrix, setMatrix(SkMatrix::Concat(fInitialCTM, r.matrix)));
+DRAW(Concat, concat(r.matrix));
+DRAW(Translate, translate(r.dx, r.dy));
+
+DRAW(ClipPath, clipPath(r.path, r.opAA.op(), r.opAA.aa()));
+DRAW(ClipRRect, clipRRect(r.rrect, r.opAA.op(), r.opAA.aa()));
+DRAW(ClipRect, clipRect(r.rect, r.opAA.op(), r.opAA.aa()));
+DRAW(ClipRegion, clipRegion(r.region, r.op));
+
+DRAW(DrawArc, drawArc(r.oval, r.startAngle, r.sweepAngle, r.useCenter, r.paint));
+DRAW(DrawDRRect, drawDRRect(r.outer, r.inner, r.paint));
+DRAW(DrawImage, drawImage(r.image.get(), r.left, r.top, r.paint));
+
+template <> void Draw::draw(const DrawImageLattice& r) {
+ SkCanvas::Lattice lattice;
+ lattice.fXCount = r.xCount;
+ lattice.fXDivs = r.xDivs;
+ lattice.fYCount = r.yCount;
+ lattice.fYDivs = r.yDivs;
+ lattice.fRectTypes = (0 == r.flagCount) ? nullptr : r.flags;
+ lattice.fColors = (0 == r.flagCount) ? nullptr : r.colors;
+ lattice.fBounds = &r.src;
+ fCanvas->drawImageLattice(r.image.get(), lattice, r.dst, r.paint);
+}
+
+DRAW(DrawImageRect, legacy_drawImageRect(r.image.get(), r.src, r.dst, r.paint, r.constraint));
+DRAW(DrawImageNine, drawImageNine(r.image.get(), r.center, r.dst, r.paint));
+DRAW(DrawOval, drawOval(r.oval, r.paint));
+DRAW(DrawPaint, drawPaint(r.paint));
+DRAW(DrawPath, drawPath(r.path, r.paint));
+DRAW(DrawPatch, drawPatch(r.cubics, r.colors, r.texCoords, r.bmode, r.paint));
+DRAW(DrawPicture, drawPicture(r.picture.get(), &r.matrix, r.paint));
+DRAW(DrawPoints, drawPoints(r.mode, r.count, r.pts, r.paint));
+DRAW(DrawRRect, drawRRect(r.rrect, r.paint));
+DRAW(DrawRect, drawRect(r.rect, r.paint));
+DRAW(DrawRegion, drawRegion(r.region, r.paint));
+DRAW(DrawTextBlob, drawTextBlob(r.blob.get(), r.x, r.y, r.paint));
+DRAW(DrawAtlas, drawAtlas(r.atlas.get(),
+ r.xforms, r.texs, r.colors, r.count, r.mode, r.cull, r.paint));
+DRAW(DrawVertices, drawVertices(r.vertices, r.bones, r.boneCount, r.bmode, r.paint));
+DRAW(DrawShadowRec, private_draw_shadow_rec(r.path, r.rec));
+DRAW(DrawAnnotation, drawAnnotation(r.rect, r.key.c_str(), r.value.get()));
+
+DRAW(DrawEdgeAAQuad, experimental_DrawEdgeAAQuad(
+ r.rect, r.clip, r.aa, r.color, r.mode));
+DRAW(DrawEdgeAAImageSet, experimental_DrawEdgeAAImageSet(
+ r.set.get(), r.count, r.dstClips, r.preViewMatrices, r.paint, r.constraint));
+
+#undef DRAW
+
+template <> void Draw::draw(const DrawDrawable& r) {
+ SkASSERT(r.index >= 0);
+ SkASSERT(r.index < fDrawableCount);
+ if (fDrawables) {
+ SkASSERT(nullptr == fDrawablePicts);
+ fCanvas->drawDrawable(fDrawables[r.index], r.matrix);
+ } else {
+ fCanvas->drawPicture(fDrawablePicts[r.index], r.matrix, nullptr);
+ }
+}
+
+// This is an SkRecord visitor that fills an SkBBoxHierarchy.
+//
+// The interesting part here is how to calculate bounds for ops which don't
+// have intrinsic bounds. What is the bounds of a Save or a Translate?
+//
+// We answer this by thinking about a particular definition of bounds: if I
+// don't execute this op, pixels in this rectangle might draw incorrectly. So
+// the bounds of a Save, a Translate, a Restore, etc. are the union of the
+// bounds of Draw* ops that they might have an effect on. For any given
+// Save/Restore block, the bounds of the Save, the Restore, and any other
+// non-drawing ("control") ops inside are exactly the union of the bounds of
+// the drawing ops inside that block.
+//
+// To implement this, we keep a stack of active Save blocks. As we consume ops
+// inside the Save/Restore block, drawing ops are unioned with the bounds of
+// the block, and control ops are stashed away for later. When we finish the
+// block with a Restore, our bounds are complete, and we go back and fill them
+// in for all the control ops we stashed away.
+class FillBounds : SkNoncopyable {
+public:
+ FillBounds(const SkRect& cullRect, const SkRecord& record, SkRect bounds[])
+ : fCullRect(cullRect)
+ , fBounds(bounds) {
+ fCTM = SkMatrix::I();
+
+ // We push an extra save block to track the bounds of any top-level control operations.
+ fSaveStack.push_back({ 0, Bounds::MakeEmpty(), nullptr, fCTM });
+ }
+
+ ~FillBounds() {
+ // If we have any lingering unpaired Saves, simulate restores to make
+ // sure all ops in those Save blocks have their bounds calculated.
+ while (!fSaveStack.isEmpty()) {
+ this->popSaveBlock();
+ }
+
+ // Any control ops not part of any Save/Restore block draw everywhere.
+ while (!fControlIndices.isEmpty()) {
+ this->popControl(fCullRect);
+ }
+ }
+
+ void setCurrentOp(int currentOp) { fCurrentOp = currentOp; }
+
+
+ template <typename T> void operator()(const T& op) {
+ this->updateCTM(op);
+ this->trackBounds(op);
+ }
+
+ // In this file, SkRect are in local coordinates, Bounds are translated back to identity space.
+ typedef SkRect Bounds;
+
+ // Adjust rect for all paints that may affect its geometry, then map it to identity space.
+ Bounds adjustAndMap(SkRect rect, const SkPaint* paint) const {
+ // Inverted rectangles really confuse our BBHs.
+ rect.sort();
+
+ // Adjust the rect for its own paint.
+ if (!AdjustForPaint(paint, &rect)) {
+ // The paint could do anything to our bounds. The only safe answer is the cull.
+ return fCullRect;
+ }
+
+ // Adjust rect for all the paints from the SaveLayers we're inside.
+ if (!this->adjustForSaveLayerPaints(&rect)) {
+ // Same deal as above.
+ return fCullRect;
+ }
+
+ // Map the rect back to identity space.
+ fCTM.mapRect(&rect);
+
+ // Nothing can draw outside the cull rect.
+ if (!rect.intersect(fCullRect)) {
+ return Bounds::MakeEmpty();
+ }
+
+ return rect;
+ }
+
+private:
+ struct SaveBounds {
+ int controlOps; // Number of control ops in this Save block, including the Save.
+ Bounds bounds; // Bounds of everything in the block.
+ const SkPaint* paint; // Unowned. If set, adjusts the bounds of all ops in this block.
+ SkMatrix ctm;
+ };
+
+ // Only Restore, SetMatrix, Concat, and Translate change the CTM.
+ template <typename T> void updateCTM(const T&) {}
+ void updateCTM(const Restore& op) { fCTM = op.matrix; }
+ void updateCTM(const SetMatrix& op) { fCTM = op.matrix; }
+ void updateCTM(const Concat& op) { fCTM.preConcat(op.matrix); }
+ void updateCTM(const Translate& op) { fCTM.preTranslate(op.dx, op.dy); }
+
+ // The bounds of these ops must be calculated when we hit the Restore
+ // from the bounds of the ops in the same Save block.
+ void trackBounds(const Save&) { this->pushSaveBlock(nullptr); }
+ void trackBounds(const SaveLayer& op) { this->pushSaveBlock(op.paint); }
+ void trackBounds(const SaveBehind&) { this->pushSaveBlock(nullptr); }
+ void trackBounds(const Restore&) { fBounds[fCurrentOp] = this->popSaveBlock(); }
+
+ void trackBounds(const SetMatrix&) { this->pushControl(); }
+ void trackBounds(const Concat&) { this->pushControl(); }
+ void trackBounds(const Translate&) { this->pushControl(); }
+ void trackBounds(const ClipRect&) { this->pushControl(); }
+ void trackBounds(const ClipRRect&) { this->pushControl(); }
+ void trackBounds(const ClipPath&) { this->pushControl(); }
+ void trackBounds(const ClipRegion&) { this->pushControl(); }
+
+
+ // For all other ops, we can calculate and store the bounds directly now.
+ template <typename T> void trackBounds(const T& op) {
+ fBounds[fCurrentOp] = this->bounds(op);
+ this->updateSaveBounds(fBounds[fCurrentOp]);
+ }
+
+ void pushSaveBlock(const SkPaint* paint) {
+ // Starting a new Save block. Push a new entry to represent that.
+ SaveBounds sb;
+ sb.controlOps = 0;
+ // If the paint affects transparent black,
+ // the bound shouldn't be smaller than the cull.
+ sb.bounds =
+ PaintMayAffectTransparentBlack(paint) ? fCullRect : Bounds::MakeEmpty();
+ sb.paint = paint;
+ sb.ctm = this->fCTM;
+
+ fSaveStack.push_back(sb);
+ this->pushControl();
+ }
+
+ static bool PaintMayAffectTransparentBlack(const SkPaint* paint) {
+ if (paint) {
+ // FIXME: this is very conservative
+ if (paint->getImageFilter() || paint->getColorFilter()) {
+ return true;
+ }
+
+ // Unusual blendmodes require us to process a saved layer
+ // even with operations outisde the clip.
+ // For example, DstIn is used by masking layers.
+ // https://code.google.com/p/skia/issues/detail?id=1291
+ // https://crbug.com/401593
+ switch (paint->getBlendMode()) {
+ // For each of the following transfer modes, if the source
+ // alpha is zero (our transparent black), the resulting
+ // blended alpha is not necessarily equal to the original
+ // destination alpha.
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+ return false;
+ }
+
+ Bounds popSaveBlock() {
+ // We're done the Save block. Apply the block's bounds to all control ops inside it.
+ SaveBounds sb;
+ fSaveStack.pop(&sb);
+
+ while (sb.controlOps --> 0) {
+ this->popControl(sb.bounds);
+ }
+
+ // This whole Save block may be part another Save block.
+ this->updateSaveBounds(sb.bounds);
+
+ // If called from a real Restore (not a phony one for balance), it'll need the bounds.
+ return sb.bounds;
+ }
+
+ void pushControl() {
+ fControlIndices.push_back(fCurrentOp);
+ if (!fSaveStack.isEmpty()) {
+ fSaveStack.top().controlOps++;
+ }
+ }
+
+ void popControl(const Bounds& bounds) {
+ fBounds[fControlIndices.top()] = bounds;
+ fControlIndices.pop();
+ }
+
+ void updateSaveBounds(const Bounds& bounds) {
+ // If we're in a Save block, expand its bounds to cover these bounds too.
+ if (!fSaveStack.isEmpty()) {
+ fSaveStack.top().bounds.join(bounds);
+ }
+ }
+
+ Bounds bounds(const Flush&) const { return fCullRect; }
+
+ Bounds bounds(const DrawPaint&) const { return fCullRect; }
+ Bounds bounds(const DrawBehind&) const { return fCullRect; }
+ Bounds bounds(const NoOp&) const { return Bounds::MakeEmpty(); } // NoOps don't draw.
+
+ Bounds bounds(const DrawRect& op) const { return this->adjustAndMap(op.rect, &op.paint); }
+ Bounds bounds(const DrawRegion& op) const {
+ SkRect rect = SkRect::Make(op.region.getBounds());
+ return this->adjustAndMap(rect, &op.paint);
+ }
+ Bounds bounds(const DrawOval& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ // Tighter arc bounds?
+ Bounds bounds(const DrawArc& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ Bounds bounds(const DrawRRect& op) const {
+ return this->adjustAndMap(op.rrect.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawDRRect& op) const {
+ return this->adjustAndMap(op.outer.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawImage& op) const {
+ const SkImage* image = op.image.get();
+ SkRect rect = SkRect::MakeXYWH(op.left, op.top, image->width(), image->height());
+
+ return this->adjustAndMap(rect, op.paint);
+ }
+ Bounds bounds(const DrawImageLattice& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawImageRect& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawImageNine& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawPath& op) const {
+ return op.path.isInverseFillType() ? fCullRect
+ : this->adjustAndMap(op.path.getBounds(), &op.paint);
+ }
+ Bounds bounds(const DrawPoints& op) const {
+ SkRect dst;
+ dst.setBounds(op.pts, op.count);
+
+ // Pad the bounding box a little to make sure hairline points' bounds aren't empty.
+ SkScalar stroke = SkMaxScalar(op.paint.getStrokeWidth(), 0.01f);
+ dst.outset(stroke/2, stroke/2);
+
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawPatch& op) const {
+ SkRect dst;
+ dst.setBounds(op.cubics, SkPatchUtils::kNumCtrlPts);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawVertices& op) const {
+ return this->adjustAndMap(op.vertices->bounds(), &op.paint);
+ }
+
+ Bounds bounds(const DrawAtlas& op) const {
+ if (op.cull) {
+ // TODO: <reed> can we pass nullptr for the paint? Isn't cull already "correct"
+ // for the paint (by the caller)?
+ return this->adjustAndMap(*op.cull, op.paint);
+ } else {
+ return fCullRect;
+ }
+ }
+
+ Bounds bounds(const DrawShadowRec& op) const {
+ SkRect bounds;
+ SkDrawShadowMetrics::GetLocalBounds(op.path, op.rec, fCTM, &bounds);
+ return this->adjustAndMap(bounds, nullptr);
+ }
+
+ Bounds bounds(const DrawPicture& op) const {
+ SkRect dst = op.picture->cullRect();
+ op.matrix.mapRect(&dst);
+ return this->adjustAndMap(dst, op.paint);
+ }
+
+ Bounds bounds(const DrawTextBlob& op) const {
+ SkRect dst = op.blob->bounds();
+ dst.offset(op.x, op.y);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+
+ Bounds bounds(const DrawDrawable& op) const {
+ return this->adjustAndMap(op.worstCaseBounds, nullptr);
+ }
+
+ Bounds bounds(const DrawAnnotation& op) const {
+ return this->adjustAndMap(op.rect, nullptr);
+ }
+ Bounds bounds(const DrawEdgeAAQuad& op) const {
+ SkRect bounds = op.rect;
+ if (op.clip) {
+ bounds.setBounds(op.clip, 4);
+ }
+ return this->adjustAndMap(bounds, nullptr);
+ }
+ Bounds bounds(const DrawEdgeAAImageSet& op) const {
+ SkRect rect = SkRect::MakeEmpty();
+ int clipIndex = 0;
+ for (int i = 0; i < op.count; ++i) {
+ SkRect entryBounds = op.set[i].fDstRect;
+ if (op.set[i].fHasClip) {
+ entryBounds.setBounds(op.dstClips + clipIndex, 4);
+ clipIndex += 4;
+ }
+ if (op.set[i].fMatrixIndex >= 0) {
+ op.preViewMatrices[op.set[i].fMatrixIndex].mapRect(&entryBounds);
+ }
+ rect.join(this->adjustAndMap(entryBounds, nullptr));
+ }
+ return rect;
+ }
+
+ // Returns true if rect was meaningfully adjusted for the effects of paint,
+ // false if the paint could affect the rect in unknown ways.
+ static bool AdjustForPaint(const SkPaint* paint, SkRect* rect) {
+ if (paint) {
+ if (paint->canComputeFastBounds()) {
+ *rect = paint->computeFastBounds(*rect, rect);
+ return true;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ bool adjustForSaveLayerPaints(SkRect* rect, int savesToIgnore = 0) const {
+ for (int i = fSaveStack.count() - 1 - savesToIgnore; i >= 0; i--) {
+ SkMatrix inverse;
+ if (!fSaveStack[i].ctm.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(rect);
+ if (!AdjustForPaint(fSaveStack[i].paint, rect)) {
+ return false;
+ }
+ fSaveStack[i].ctm.mapRect(rect);
+ }
+ return true;
+ }
+
+ // We do not guarantee anything for operations outside of the cull rect
+ const SkRect fCullRect;
+
+ // Conservative identity-space bounds for each op in the SkRecord.
+ Bounds* fBounds;
+
+ // We walk fCurrentOp through the SkRecord,
+ // as we go using updateCTM() to maintain the exact CTM (fCTM).
+ int fCurrentOp;
+ SkMatrix fCTM;
+
+ // Used to track the bounds of Save/Restore blocks and the control ops inside them.
+ SkTDArray<SaveBounds> fSaveStack;
+ SkTDArray<int> fControlIndices;
+};
+
+} // namespace SkRecords
+
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord& record, SkRect bounds[]) {
+ {
+ SkRecords::FillBounds visitor(cullRect, record, bounds);
+ for (int i = 0; i < record.count(); i++) {
+ visitor.setCurrentOp(i);
+ record.visit(i, visitor);
+ }
+ }
+}
+
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.h b/gfx/skia/skia/src/core/SkRecordDraw.h
new file mode 100644
index 0000000000..baffab5572
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordDraw_DEFINED
+#define SkRecordDraw_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "src/core/SkBBoxHierarchy.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkRecord.h"
+
+class SkDrawable;
+class SkLayerInfo;
+
+// Calculate conservative identity space bounds for each op in the record.
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord&, SkRect bounds[]);
+
+// SkRecordFillBounds(), and gathers information about saveLayers and stores it for later
+// use (e.g., layer hoisting). The gathered information is sufficient to determine
+// where each saveLayer will land and which ops in the picture it represents.
+void SkRecordComputeLayers(const SkRect& cullRect, const SkRecord&, SkRect bounds[],
+ const SkBigPicture::SnapshotArray*, SkLayerInfo* data);
+
+// Draw an SkRecord into an SkCanvas. A convenience wrapper around SkRecords::Draw.
+void SkRecordDraw(const SkRecord&, SkCanvas*, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkBBoxHierarchy*, SkPicture::AbortCallback*);
+
+// Draw a portion of an SkRecord into an SkCanvas.
+// When drawing a portion of an SkRecord the CTM on the passed in canvas must be
+// the composition of the replay matrix with the record-time CTM (for the portion
+// of the record that is being replayed). For setMatrix calls to behave correctly
+// the initialCTM parameter must set to just the replay matrix.
+void SkRecordPartialDraw(const SkRecord&, SkCanvas*,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop, const SkMatrix& initialCTM);
+
+namespace SkRecords {
+
+// This is an SkRecord visitor that will draw that SkRecord to an SkCanvas.
+class Draw : SkNoncopyable {
+public:
+ explicit Draw(SkCanvas* canvas, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkMatrix* initialCTM = nullptr)
+ : fInitialCTM(initialCTM ? *initialCTM : canvas->getTotalMatrix())
+ , fCanvas(canvas)
+ , fDrawablePicts(drawablePicts)
+ , fDrawables(drawables)
+ , fDrawableCount(drawableCount)
+ {}
+
+ // This operator calls methods on the |canvas|. The various draw() wrapper
+ // methods around SkCanvas are defined by the DRAW() macro in
+ // SkRecordDraw.cpp.
+ template <typename T> void operator()(const T& r) {
+ this->draw(r);
+ }
+
+protected:
+ SkPicture const* const* drawablePicts() const { return fDrawablePicts; }
+ int drawableCount() const { return fDrawableCount; }
+
+private:
+ // No base case, so we'll be compile-time checked that we implement all possibilities.
+ template <typename T> void draw(const T&);
+
+ const SkMatrix fInitialCTM;
+ SkCanvas* fCanvas;
+ SkPicture const* const* fDrawablePicts;
+ SkDrawable* const* fDrawables;
+ int fDrawableCount;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordDraw_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.cpp b/gfx/skia/skia/src/core/SkRecordOpts.cpp
new file mode 100644
index 0000000000..127a9a861e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.cpp
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRecordOpts.h"
+
+#include "include/private/SkTDArray.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkRecordPattern.h"
+#include "src/core/SkRecords.h"
+
+using namespace SkRecords;
+
+// Most of the optimizations in this file are pattern-based. These are all defined as structs with:
+// - a Match typedef
+// - a bool onMatch(SkRceord*, Match*, int begin, int end) method,
+// which returns true if it made changes and false if not.
+
+// Run a pattern-based optimization once across the SkRecord, returning true if it made any changes.
+// It looks for spans which match Pass::Match, and when found calls onMatch() with that pattern,
+// record, and [begin,end) span of the commands that matched.
+template <typename Pass>
+static bool apply(Pass* pass, SkRecord* record) {
+ typename Pass::Match match;
+ bool changed = false;
+ int begin, end = 0;
+
+ while (match.search(record, &begin, &end)) {
+ changed |= pass->onMatch(record, &match, begin, end);
+ }
+ return changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void multiple_set_matrices(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Match;
+
+ bool onMatch(SkRecord* record, Match* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ while (apply(&pass, record));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // experimental, but needs knowledge of previous matrix to operate correctly
+static void apply_matrix_to_draw_params(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Pattern;
+
+ bool onMatch(SkRecord* record, Pattern* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ // No need to loop, as we never "open up" opportunities for more of this type of optimization.
+ apply(&pass, record);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Turns the logical NoOp Save and Restore in Save-Draw*-Restore patterns into actual NoOps.
+struct SaveOnlyDrawsRestoreNooper {
+ typedef Pattern<Is<Save>,
+ Greedy<Or<Is<NoOp>, IsDraw>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ record->replace<NoOp>(begin); // Save
+ record->replace<NoOp>(end-1); // Restore
+ return true;
+ }
+};
+
+static bool fold_opacity_layer_color_to_paint(const SkPaint* layerPaint,
+ bool isSaveLayer,
+ SkPaint* paint) {
+ // We assume layerPaint is always from a saveLayer. If isSaveLayer is
+ // true, we assume paint is too.
+
+ // The alpha folding can proceed if the filter layer paint does not have properties which cause
+ // the resulting filter layer to be "blended" in complex ways to the parent layer.
+ // TODO: most likely only some xfer modes are the hard constraints
+ if (!paint->isSrcOver()) {
+ return false;
+ }
+
+ if (!isSaveLayer && paint->getImageFilter()) {
+ // For normal draws, the paint color is used as one input for the color for the draw. Image
+ // filter will operate on the result, and thus we can not change the input.
+ // For layer saves, the image filter is applied to the layer contents. The layer is then
+ // modulated with the paint color, so it's fine to proceed with the fold for saveLayer
+ // paints with image filters.
+ return false;
+ }
+
+ if (paint->getColorFilter()) {
+ // Filter input depends on the paint color.
+
+ // Here we could filter the color if we knew the draw is going to be uniform color. This
+ // should be detectable as drawPath/drawRect/.. without a shader being uniform, while
+ // drawBitmap/drawSprite or a shader being non-uniform. However, current matchers don't
+ // give the type out easily, so just do not optimize that at the moment.
+ return false;
+ }
+
+ if (layerPaint) {
+ const uint32_t layerColor = layerPaint->getColor();
+ // The layer paint color must have only alpha component.
+ if (SK_ColorTRANSPARENT != SkColorSetA(layerColor, SK_AlphaTRANSPARENT)) {
+ return false;
+ }
+
+ // The layer paint can not have any effects.
+ if (layerPaint->getPathEffect() ||
+ layerPaint->getShader() ||
+ !layerPaint->isSrcOver() ||
+ layerPaint->getMaskFilter() ||
+ layerPaint->getColorFilter() ||
+ layerPaint->getImageFilter()) {
+ return false;
+ }
+ paint->setAlpha(SkMulDiv255Round(paint->getAlpha(), SkColorGetA(layerColor)));
+ }
+
+ return true;
+}
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+struct SaveNoDrawsRestoreNooper {
+ // Greedy matches greedily, so we also have to exclude Save and Restore.
+ // Nested SaveLayers need to be excluded, or we'll match their Restore!
+ typedef Pattern<Is<Save>,
+ Greedy<Not<Or<Is<Save>,
+ Is<SaveLayer>,
+ Is<Restore>,
+ IsDraw>>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ // The entire span between Save and Restore (inclusively) does nothing.
+ for (int i = begin; i < end; i++) {
+ record->replace<NoOp>(i);
+ }
+ return true;
+ }
+};
+void SkRecordNoopSaveRestores(SkRecord* record) {
+ SaveOnlyDrawsRestoreNooper onlyDraws;
+ SaveNoDrawsRestoreNooper noDraws;
+
+ // Run until they stop changing things.
+ while (apply(&onlyDraws, record) || apply(&noDraws, record));
+}
+
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+static bool effectively_srcover(const SkPaint* paint) {
+ if (!paint || paint->isSrcOver()) {
+ return true;
+ }
+ // src-mode with opaque and no effects (which might change opaqueness) is ok too.
+ return !paint->getShader() && !paint->getColorFilter() && !paint->getImageFilter() &&
+ 0xFF == paint->getAlpha() && paint->getBlendMode() == SkBlendMode::kSrc;
+}
+
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+struct SaveLayerDrawRestoreNooper {
+ typedef Pattern<Is<SaveLayer>, IsDraw, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop || match->first<SaveLayer>()->clipMask) {
+ // can't throw away the layer if we have a backdrop or clip mask
+ return false;
+ }
+
+ if (match->first<SaveLayer>()->saveLayerFlags &
+ SkCanvasPriv::kDontClipToLayer_SaveLayerFlag) {
+ // can't throw away the layer if set
+ return false;
+ }
+
+ // A SaveLayer's bounds field is just a hint, so we should be free to ignore it.
+ SkPaint* layerPaint = match->first<SaveLayer>()->paint;
+ SkPaint* drawPaint = match->second<SkPaint>();
+
+ if (nullptr == layerPaint && effectively_srcover(drawPaint)) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ if (drawPaint == nullptr) {
+ // We can just give the draw the SaveLayer's paint.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(layerPaint, false /*isSaveLayer*/, drawPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex+2); // Restore
+ return true;
+ }
+};
+void SkRecordNoopSaveLayerDrawRestores(SkRecord* record) {
+ SaveLayerDrawRestoreNooper pass;
+ apply(&pass, record);
+}
+#endif
+
+/* For SVG generated:
+ SaveLayer (non-opaque, typically for CSS opacity)
+ Save
+ ClipRect
+ SaveLayer (typically for SVG filter)
+ Restore
+ Restore
+ Restore
+*/
+struct SvgOpacityAndFilterLayerMergePass {
+ typedef Pattern<Is<SaveLayer>, Is<Save>, Is<ClipRect>, Is<SaveLayer>,
+ Is<Restore>, Is<Restore>, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop) {
+ // can't throw away the layer if we have a backdrop
+ return false;
+ }
+
+ SkPaint* opacityPaint = match->first<SaveLayer>()->paint;
+ if (nullptr == opacityPaint) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ // This layer typically contains a filter, but this should work for layers with for other
+ // purposes too.
+ SkPaint* filterLayerPaint = match->fourth<SaveLayer>()->paint;
+ if (filterLayerPaint == nullptr) {
+ // We can just give the inner SaveLayer the paint of the outer SaveLayer.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(opacityPaint, true /*isSaveLayer*/,
+ filterLayerPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex + 6); // Restore
+ return true;
+ }
+};
+
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord* record) {
+ SvgOpacityAndFilterLayerMergePass pass;
+ apply(&pass, record);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRecordOptimize(SkRecord* record) {
+ // This might be useful as a first pass in the future if we want to weed
+ // out junk for other optimization passes. Right now, nothing needs it,
+ // and the bounding box hierarchy will do the work of skipping no-op
+ // Save-NoDraw-Restore sequences better than we can here.
+ // As there is a known problem with this peephole and drawAnnotation, disable this.
+ // If we want to enable this we must first fix this bug:
+ // https://bugs.chromium.org/p/skia/issues/detail?id=5548
+// SkRecordNoopSaveRestores(record);
+
+ // Turn off this optimization completely for Android framework
+ // because it makes the following Android CTS test fail:
+ // android.uirendering.cts.testclasses.LayerTests#testSaveLayerClippedWithAlpha
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkRecordNoopSaveLayerDrawRestores(record);
+#endif
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
+
+void SkRecordOptimize2(SkRecord* record) {
+ multiple_set_matrices(record);
+ SkRecordNoopSaveRestores(record);
+ // See why we turn this off in SkRecordOptimize above.
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkRecordNoopSaveLayerDrawRestores(record);
+#endif
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.h b/gfx/skia/skia/src/core/SkRecordOpts.h
new file mode 100644
index 0000000000..a1e3c245a0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordOpts_DEFINED
+#define SkRecordOpts_DEFINED
+
+#include "src/core/SkRecord.h"
+
+// Run all optimizations in recommended order.
+void SkRecordOptimize(SkRecord*);
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+void SkRecordNoopSaveRestores(SkRecord*);
+
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+void SkRecordNoopSaveLayerDrawRestores(SkRecord*);
+#endif
+
+// For SVG generated SaveLayer-Save-ClipRect-SaveLayer-3xRestore patterns, merge
+// the alpha of the first SaveLayer to the second SaveLayer.
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord*);
+
+// Experimental optimizers
+void SkRecordOptimize2(SkRecord*);
+
+#endif//SkRecordOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordPattern.h b/gfx/skia/skia/src/core/SkRecordPattern.h
new file mode 100644
index 0000000000..7e96592c70
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordPattern.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordPattern_DEFINED
+#define SkRecordPattern_DEFINED
+
+#include "include/private/SkTLogic.h"
+#include "src/core/SkRecord.h"
+
+namespace SkRecords {
+
+// First, some matchers. These match a single command in the SkRecord,
+// and may hang onto some data from it. If so, you can get the data by calling .get().
+
+// Matches a command of type T, and stores that command.
+template <typename T>
+class Is {
+public:
+ Is() : fPtr(nullptr) {}
+
+ typedef T type;
+ type* get() { return fPtr; }
+
+ bool operator()(T* ptr) {
+ fPtr = ptr;
+ return true;
+ }
+
+ template <typename U>
+ bool operator()(U*) {
+ fPtr = nullptr;
+ return false;
+ }
+
+private:
+ type* fPtr;
+};
+
+// Matches any command that draws, and stores its paint.
+class IsDraw {
+public:
+ IsDraw() : fPaint(nullptr) {}
+
+ typedef SkPaint type;
+ type* get() { return fPaint; }
+
+ template <typename T>
+ SK_WHEN((T::kTags & kDrawWithPaint_Tag) == kDrawWithPaint_Tag, bool) operator()(T* draw) {
+ fPaint = AsPtr(draw->paint);
+ return true;
+ }
+
+ template <typename T>
+ SK_WHEN((T::kTags & kDrawWithPaint_Tag) == kDraw_Tag, bool) operator()(T* draw) {
+ fPaint = nullptr;
+ return true;
+ }
+
+ template <typename T>
+ SK_WHEN(!(T::kTags & kDraw_Tag), bool) operator()(T* draw) {
+ fPaint = nullptr;
+ return false;
+ }
+
+private:
+ // Abstracts away whether the paint is always part of the command or optional.
+ template <typename T> static T* AsPtr(SkRecords::Optional<T>& x) { return x; }
+ template <typename T> static T* AsPtr(T& x) { return &x; }
+
+ type* fPaint;
+};
+
+// Matches if Matcher doesn't. Stores nothing.
+template <typename Matcher>
+struct Not {
+ template <typename T>
+ bool operator()(T* ptr) { return !Matcher()(ptr); }
+};
+
+// Matches if any of First or Rest... does. Stores nothing.
+template <typename First, typename... Rest>
+struct Or {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr) || Or<Rest...>()(ptr); }
+};
+template <typename First>
+struct Or<First> {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr); }
+};
+
+
+// Greedy is a special matcher that greedily matches Matcher 0 or more times. Stores nothing.
+template <typename Matcher>
+struct Greedy {
+ template <typename T>
+ bool operator()(T* ptr) { return Matcher()(ptr); }
+};
+
+// Pattern matches each of its matchers in order.
+//
+// This is the main entry point to pattern matching, and so provides a couple of extra API bits:
+// - search scans through the record to look for matches;
+// - first, second, third, ... return the data stored by their respective matchers in the pattern.
+
+template <typename... Matchers> class Pattern;
+
+template <> class Pattern<> {
+public:
+ // Bottoms out recursion. Just return whatever i the front decided on.
+ int match(SkRecord*, int i) { return i; }
+};
+
+template <typename First, typename... Rest>
+class Pattern<First, Rest...> {
+public:
+ // If this pattern matches the SkRecord starting from i,
+ // return the index just past the end of the pattern, otherwise return 0.
+ SK_ALWAYS_INLINE int match(SkRecord* record, int i) {
+ i = this->matchFirst(&fFirst, record, i);
+ return i > 0 ? fRest.match(record, i) : 0;
+ }
+
+ // Starting from *end, walk through the SkRecord to find the first span matching this pattern.
+ // If there is no such span, return false. If there is, return true and set [*begin, *end).
+ SK_ALWAYS_INLINE bool search(SkRecord* record, int* begin, int* end) {
+ for (*begin = *end; *begin < record->count(); ++(*begin)) {
+ *end = this->match(record, *begin);
+ if (*end != 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // TODO: some sort of smart get<i>()
+ template <typename T> T* first() { return fFirst.get(); }
+ template <typename T> T* second() { return fRest.template first<T>(); }
+ template <typename T> T* third() { return fRest.template second<T>(); }
+ template <typename T> T* fourth() { return fRest.template third<T>(); }
+
+private:
+ // If first isn't a Greedy, try to match at i once.
+ template <typename T>
+ int matchFirst(T* first, SkRecord* record, int i) {
+ if (i < record->count()) {
+ if (record->mutate(i, *first)) {
+ return i+1;
+ }
+ }
+ return 0;
+ }
+
+ // If first is a Greedy, walk i until it doesn't match.
+ template <typename T>
+ int matchFirst(Greedy<T>* first, SkRecord* record, int i) {
+ while (i < record->count()) {
+ if (!record->mutate(i, *first)) {
+ return i;
+ }
+ i++;
+ }
+ return 0;
+ }
+
+ First fFirst;
+ Pattern<Rest...> fRest;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordPattern_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.cpp b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
new file mode 100644
index 0000000000..c9af8a2c08
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPictureRecorder.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPicturePlayback.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkRecordedDrawable.h"
+
+void SkRecordedDrawable::onDraw(SkCanvas* canvas) {
+ SkDrawable* const* drawables = nullptr;
+ int drawableCount = 0;
+ if (fDrawableList) {
+ drawables = fDrawableList->begin();
+ drawableCount = fDrawableList->count();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, fBBH.get(), nullptr);
+}
+
+SkPicture* SkRecordedDrawable::onNewPictureSnapshot() {
+ SkBigPicture::SnapshotArray* pictList = nullptr;
+ if (fDrawableList) {
+ // TODO: should we plumb-down the BBHFactory and recordFlags from our host
+ // PictureRecorder?
+ pictList = fDrawableList->newDrawableSnapshot();
+ }
+
+ size_t subPictureBytes = 0;
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += pictList->begin()[i]->approximateBytesUsed();
+ }
+ // SkBigPicture will take ownership of a ref on both fRecord and fBBH.
+ // We're not willing to give up our ownership, so we must ref them for SkPicture.
+ return new SkBigPicture(fBounds, SkRef(fRecord.get()), pictList, SkSafeRef(fBBH.get()),
+ subPictureBytes);
+}
+
+void SkRecordedDrawable::flatten(SkWriteBuffer& buffer) const {
+ // Write the bounds.
+ buffer.writeRect(fBounds);
+
+ // Create an SkPictureRecord to record the draw commands.
+ SkPictInfo info;
+ SkPictureRecord pictureRecord(SkISize::Make(fBounds.width(), fBounds.height()), 0);
+
+ // If the query contains the whole picture, don't bother with the bounding box hierarchy.
+ SkBBoxHierarchy* bbh;
+ if (pictureRecord.getLocalClipBounds().contains(fBounds)) {
+ bbh = nullptr;
+ } else {
+ bbh = fBBH.get();
+ }
+
+ // Record the draw commands.
+ pictureRecord.beginRecording();
+ SkRecordDraw(*fRecord, &pictureRecord, nullptr, fDrawableList->begin(), fDrawableList->count(),
+ bbh, nullptr);
+ pictureRecord.endRecording();
+
+ // Flatten the recorded commands and drawables.
+ SkPictureData pictureData(pictureRecord, info);
+ pictureData.flatten(buffer);
+}
+
+sk_sp<SkFlattenable> SkRecordedDrawable::CreateProc(SkReadBuffer& buffer) {
+ // Read the bounds.
+ SkRect bounds;
+ buffer.readRect(&bounds);
+
+ // Unflatten into a SkPictureData.
+ SkPictInfo info;
+ info.setVersion(buffer.getVersion());
+ info.fCullRect = bounds;
+ std::unique_ptr<SkPictureData> pictureData(SkPictureData::CreateFromBuffer(buffer, info));
+ if (!pictureData) {
+ return nullptr;
+ }
+
+ // Create a drawable.
+ SkPicturePlayback playback(pictureData.get());
+ SkPictureRecorder recorder;
+ playback.draw(recorder.beginRecording(bounds), nullptr, &buffer);
+ return recorder.finishRecordingAsDrawable();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.h b/gfx/skia/skia/src/core/SkRecordedDrawable.h
new file mode 100644
index 0000000000..75502cd862
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkRecordedDrawable_DEFINED
+#define SkRecordedDrawable_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "src/core/SkBBoxHierarchy.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecorder.h"
+
+class SkRecordedDrawable : public SkDrawable {
+public:
+ SkRecordedDrawable(sk_sp<SkRecord> record, sk_sp<SkBBoxHierarchy> bbh,
+ std::unique_ptr<SkDrawableList> drawableList, const SkRect& bounds)
+ : fRecord(std::move(record))
+ , fBBH(std::move(bbh))
+ , fDrawableList(std::move(drawableList))
+ , fBounds(bounds)
+ {}
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+protected:
+ SkRect onGetBounds() override { return fBounds; }
+
+ void onDraw(SkCanvas* canvas) override;
+
+ SkPicture* onNewPictureSnapshot() override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkRecordedDrawable)
+
+ sk_sp<SkRecord> fRecord;
+ sk_sp<SkBBoxHierarchy> fBBH;
+ std::unique_ptr<SkDrawableList> fDrawableList;
+ const SkRect fBounds;
+};
+#endif // SkRecordedDrawable_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecorder.cpp b/gfx/skia/skia/src/core/SkRecorder.cpp
new file mode 100644
index 0000000000..df584499bb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.cpp
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRecorder.h"
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkSurface.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/utils/SkPatchUtils.h"
+
+#include <new>
+
+SkDrawableList::~SkDrawableList() {
+ fArray.unrefAll();
+}
+
+SkBigPicture::SnapshotArray* SkDrawableList::newDrawableSnapshot() {
+ const int count = fArray.count();
+ if (0 == count) {
+ return nullptr;
+ }
+ SkAutoTMalloc<const SkPicture*> pics(count);
+ for (int i = 0; i < count; ++i) {
+ pics[i] = fArray[i]->newPictureSnapshot();
+ }
+ return new SkBigPicture::SnapshotArray(pics.release(), count);
+}
+
+void SkDrawableList::append(SkDrawable* drawable) {
+ *fArray.append() = SkRef(drawable);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRecorder::SkRecorder(SkRecord* record, int width, int height, SkMiniRecorder* mr)
+ : SkCanvasVirtualEnforcer<SkNoDrawCanvas>(width, height)
+ , fDrawPictureMode(Record_DrawPictureMode)
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record)
+ , fMiniRecorder(mr) {}
+
+SkRecorder::SkRecorder(SkRecord* record, const SkRect& bounds, SkMiniRecorder* mr)
+ : SkCanvasVirtualEnforcer<SkNoDrawCanvas>(bounds.roundOut())
+ , fDrawPictureMode(Record_DrawPictureMode)
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record)
+ , fMiniRecorder(mr) {}
+
+void SkRecorder::reset(SkRecord* record, const SkRect& bounds,
+ DrawPictureMode dpm, SkMiniRecorder* mr) {
+ this->forgetRecord();
+ fDrawPictureMode = dpm;
+ fRecord = record;
+ this->resetCanvas(bounds.roundOut());
+ fMiniRecorder = mr;
+}
+
+void SkRecorder::forgetRecord() {
+ fDrawableList.reset(nullptr);
+ fApproxBytesUsedBySubPictures = 0;
+ fRecord = nullptr;
+}
+
+// To make appending to fRecord a little less verbose.
+template<typename T, typename... Args>
+void SkRecorder::append(Args&&... args) {
+ if (fMiniRecorder) {
+ this->flushMiniRecorder();
+ }
+ new (fRecord->append<T>()) T{std::forward<Args>(args)...};
+}
+
+#define TRY_MINIRECORDER(method, ...) \
+ if (fMiniRecorder && fMiniRecorder->method(__VA_ARGS__)) return
+
+// For methods which must call back into SkNoDrawCanvas.
+#define INHERITED(method, ...) this->SkNoDrawCanvas::method(__VA_ARGS__)
+
+// Use copy() only for optional arguments, to be copied if present or skipped if not.
+// (For most types we just pass by value and let copy constructors do their thing.)
+template <typename T>
+T* SkRecorder::copy(const T* src) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ return new (fRecord->alloc<T>()) T(*src);
+}
+
+// This copy() is for arrays.
+// It will work with POD or non-POD, though currently we only use it for POD.
+template <typename T>
+T* SkRecorder::copy(const T src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ T* dst = fRecord->alloc<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ new (dst + i) T(src[i]);
+ }
+ return dst;
+}
+
+// Specialization for copying strings, using memcpy.
+// This measured around 2x faster for copying code points,
+// but I found no corresponding speedup for other arrays.
+template <>
+char* SkRecorder::copy(const char src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ char* dst = fRecord->alloc<char>(count);
+ memcpy(dst, src, count);
+ return dst;
+}
+
+// As above, assuming and copying a terminating \0.
+template <>
+char* SkRecorder::copy(const char* src) {
+ return this->copy(src, strlen(src)+1);
+}
+
+void SkRecorder::flushMiniRecorder() {
+ if (fMiniRecorder) {
+ SkMiniRecorder* mr = fMiniRecorder;
+ fMiniRecorder = nullptr; // Needs to happen before flushAndReset() or we recurse forever.
+ mr->flushAndReset(this);
+ }
+}
+
+void SkRecorder::onDrawPaint(const SkPaint& paint) {
+ this->append<SkRecords::DrawPaint>(paint);
+}
+
+void SkRecorder::onDrawBehind(const SkPaint& paint) {
+ this->append<SkRecords::DrawBehind>(paint);
+}
+
+void SkRecorder::onDrawPoints(PointMode mode,
+ size_t count,
+ const SkPoint pts[],
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawPoints>(paint, mode, SkToUInt(count), this->copy(pts, count));
+}
+
+void SkRecorder::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ TRY_MINIRECORDER(drawRect, rect, paint);
+ this->append<SkRecords::DrawRect>(paint, rect);
+}
+
+void SkRecorder::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ this->append<SkRecords::DrawRegion>(paint, region);
+}
+
+void SkRecorder::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ this->append<SkRecords::DrawOval>(paint, oval);
+}
+
+void SkRecorder::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ this->append<SkRecords::DrawArc>(paint, oval, startAngle, sweepAngle, useCenter);
+}
+
+void SkRecorder::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ this->append<SkRecords::DrawRRect>(paint, rrect);
+}
+
+void SkRecorder::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ this->append<SkRecords::DrawDRRect>(paint, outer, inner);
+}
+
+void SkRecorder::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ if (fDrawPictureMode == Record_DrawPictureMode) {
+ if (!fDrawableList) {
+ fDrawableList.reset(new SkDrawableList);
+ }
+ fDrawableList->append(drawable);
+ this->append<SkRecords::DrawDrawable>(this->copy(matrix), drawable->getBounds(), fDrawableList->count() - 1);
+ } else {
+ SkASSERT(fDrawPictureMode == Playback_DrawPictureMode);
+ drawable->draw(this, matrix);
+ }
+}
+
+void SkRecorder::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ TRY_MINIRECORDER(drawPath, path, paint);
+ this->append<SkRecords::DrawPath>(paint, path);
+}
+
+void SkRecorder::onDrawBitmap(const SkBitmap& bitmap,
+ SkScalar left,
+ SkScalar top,
+ const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImage(image.get(), left, top, paint);
+ }
+}
+
+void SkRecorder::onDrawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImageRect(image.get(), src, dst, paint, constraint);
+ }
+}
+
+void SkRecorder::onDrawBitmapNine(const SkBitmap& bitmap,
+ const SkIRect& center,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImageNine(image.get(), center, dst, paint);
+ }
+}
+
+void SkRecorder::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ this->onDrawImageLattice(image.get(), lattice, dst, paint);
+}
+
+void SkRecorder::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ this->append<SkRecords::DrawImage>(this->copy(paint), sk_ref_sp(image), left, top);
+}
+
+void SkRecorder::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ this->append<SkRecords::DrawImageRect>(this->copy(paint), sk_ref_sp(image), this->copy(src), dst, constraint);
+}
+
+void SkRecorder::onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ this->append<SkRecords::DrawImageNine>(this->copy(paint), sk_ref_sp(image), center, dst);
+}
+
+void SkRecorder::onDrawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ int flagCount = lattice.fRectTypes ? (lattice.fXCount + 1) * (lattice.fYCount + 1) : 0;
+ SkASSERT(lattice.fBounds);
+ this->append<SkRecords::DrawImageLattice>(this->copy(paint), sk_ref_sp(image),
+ lattice.fXCount, this->copy(lattice.fXDivs, lattice.fXCount),
+ lattice.fYCount, this->copy(lattice.fYDivs, lattice.fYCount),
+ flagCount, this->copy(lattice.fRectTypes, flagCount),
+ this->copy(lattice.fColors, flagCount), *lattice.fBounds, dst);
+}
+
+void SkRecorder::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ TRY_MINIRECORDER(drawTextBlob, blob, x, y, paint);
+ this->append<SkRecords::DrawTextBlob>(paint, sk_ref_sp(blob), x, y);
+}
+
+void SkRecorder::onDrawPicture(const SkPicture* pic, const SkMatrix* matrix, const SkPaint* paint) {
+ if (fDrawPictureMode == Record_DrawPictureMode) {
+ fApproxBytesUsedBySubPictures += pic->approximateBytesUsed();
+ this->append<SkRecords::DrawPicture>(this->copy(paint), sk_ref_sp(pic), matrix ? *matrix : SkMatrix::I());
+ } else if (fDrawPictureMode == PlaybackTop_DrawPictureMode) {
+ // temporarily change the mode of this recorder to Record,
+ fDrawPictureMode = Record_DrawPictureMode;
+ // play back the top level picture
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, pic->cullRect());
+ pic->playback(this);
+ // restore the mode
+ fDrawPictureMode = PlaybackTop_DrawPictureMode;
+ } else {
+ SkASSERT(fDrawPictureMode == Playback_DrawPictureMode);
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, pic->cullRect());
+ pic->playback(this);
+ }
+}
+
+void SkRecorder::onDrawVerticesObject(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode bmode, const SkPaint& paint) {
+ this->append<SkRecords::DrawVertices>(paint,
+ sk_ref_sp(const_cast<SkVertices*>(vertices)),
+ this->copy(bones, boneCount),
+ boneCount,
+ bmode);
+}
+
+void SkRecorder::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawPatch>(paint,
+ cubics ? this->copy(cubics, SkPatchUtils::kNumCtrlPts) : nullptr,
+ colors ? this->copy(colors, SkPatchUtils::kNumCorners) : nullptr,
+ texCoords ? this->copy(texCoords, SkPatchUtils::kNumCorners) : nullptr,
+ bmode);
+}
+
+void SkRecorder::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ this->append<SkRecords::DrawAtlas>(this->copy(paint),
+ sk_ref_sp(atlas),
+ this->copy(xform, count),
+ this->copy(tex, count),
+ this->copy(colors, count),
+ count,
+ mode,
+ this->copy(cull));
+}
+
+void SkRecorder::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ this->append<SkRecords::DrawShadowRec>(path, rec);
+}
+
+void SkRecorder::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ this->append<SkRecords::DrawAnnotation>(rect, SkString(key), sk_ref_sp(value));
+}
+
+void SkRecorder::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ this->append<SkRecords::DrawEdgeAAQuad>(
+ rect, this->copy(clip, 4), aa, color, mode);
+}
+
+void SkRecorder::onDrawEdgeAAImageSet(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ int totalDstClipCount, totalMatrixCount;
+ SkCanvasPriv::GetDstClipAndMatrixCounts(set, count, &totalDstClipCount, &totalMatrixCount);
+
+ SkAutoTArray<ImageSetEntry> setCopy(count);
+ for (int i = 0; i < count; ++i) {
+ setCopy[i] = set[i];
+ }
+
+ this->append<SkRecords::DrawEdgeAAImageSet>(this->copy(paint), std::move(setCopy), count,
+ this->copy(dstClips, totalDstClipCount),
+ this->copy(preViewMatrices, totalMatrixCount), constraint);
+}
+
+void SkRecorder::onFlush() {
+ this->append<SkRecords::Flush>();
+}
+
+void SkRecorder::willSave() {
+ this->append<SkRecords::Save>();
+}
+
+SkCanvas::SaveLayerStrategy SkRecorder::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ this->append<SkRecords::SaveLayer>(this->copy(rec.fBounds)
+ , this->copy(rec.fPaint)
+ , sk_ref_sp(rec.fBackdrop)
+ , sk_ref_sp(rec.fClipMask)
+ , this->copy(rec.fClipMatrix)
+ , rec.fSaveLayerFlags);
+ return SkCanvas::kNoLayer_SaveLayerStrategy;
+}
+
+bool SkRecorder::onDoSaveBehind(const SkRect* subset) {
+ this->append<SkRecords::SaveBehind>(this->copy(subset));
+ return false;
+}
+
+void SkRecorder::didRestore() {
+ this->append<SkRecords::Restore>(this->getTotalMatrix());
+}
+
+void SkRecorder::didConcat(const SkMatrix& matrix) {
+ this->append<SkRecords::Concat>(matrix);
+}
+
+void SkRecorder::didSetMatrix(const SkMatrix& matrix) {
+ this->append<SkRecords::SetMatrix>(matrix);
+}
+
+void SkRecorder::didTranslate(SkScalar dx, SkScalar dy) {
+ this->append<SkRecords::Translate>(dx, dy);
+}
+
+void SkRecorder::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRect, rect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipRect>(rect, opAA);
+}
+
+void SkRecorder::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRRect, rrect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipRRect>(rrect, opAA);
+}
+
+void SkRecorder::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipPath, path, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipPath>(path, opAA);
+}
+
+void SkRecorder::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ INHERITED(onClipRegion, deviceRgn, op);
+ this->append<SkRecords::ClipRegion>(deviceRgn, op);
+}
+
+sk_sp<SkSurface> SkRecorder::onNewSurface(const SkImageInfo&, const SkSurfaceProps&) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkRecorder.h b/gfx/skia/skia/src/core/SkRecorder.h
new file mode 100644
index 0000000000..c20f84cf8c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecorder_DEFINED
+#define SkRecorder_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/private/SkTDArray.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkMiniRecorder.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecords.h"
+
+class SkBBHFactory;
+
+class SkDrawableList : SkNoncopyable {
+public:
+ SkDrawableList() {}
+ ~SkDrawableList();
+
+ int count() const { return fArray.count(); }
+ SkDrawable* const* begin() const { return fArray.begin(); }
+
+ void append(SkDrawable* drawable);
+
+ // Return a new or ref'd array of pictures that were snapped from our drawables.
+ SkBigPicture::SnapshotArray* newDrawableSnapshot();
+
+private:
+ SkTDArray<SkDrawable*> fArray;
+};
+
+// SkRecorder provides an SkCanvas interface for recording into an SkRecord.
+
+class SkRecorder final : public SkCanvasVirtualEnforcer<SkNoDrawCanvas> {
+public:
+ // Does not take ownership of the SkRecord.
+ SkRecorder(SkRecord*, int width, int height, SkMiniRecorder* = nullptr); // TODO: remove
+ SkRecorder(SkRecord*, const SkRect& bounds, SkMiniRecorder* = nullptr);
+
+ enum DrawPictureMode {
+ Record_DrawPictureMode,
+ Playback_DrawPictureMode,
+ // Plays back top level drawPicture calls only, but records pictures within those.
+ PlaybackTop_DrawPictureMode,
+ };
+ void reset(SkRecord*, const SkRect& bounds, DrawPictureMode, SkMiniRecorder* = nullptr);
+
+ size_t approxBytesUsedBySubPictures() const { return fApproxBytesUsedBySubPictures; }
+
+ SkDrawableList* getDrawableList() const { return fDrawableList.get(); }
+ std::unique_ptr<SkDrawableList> detachDrawableList() { return std::move(fDrawableList); }
+
+ // Make SkRecorder forget entirely about its SkRecord*; all calls to SkRecorder will fail.
+ void forgetRecord();
+
+ void onFlush() override;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override {}
+ void didRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawTextBlob(const SkTextBlob* blob,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVerticesObject(const SkVertices*, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode, const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int count, SkBlendMode, const SkRect* cull, const SkPaint*) override;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onClipRect(const SkRect& rect, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath& path, SkClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkPaint*, SrcRectConstraint) override;
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ void flushMiniRecorder();
+
+private:
+ template <typename T>
+ T* copy(const T*);
+
+ template <typename T>
+ T* copy(const T[], size_t count);
+
+ template<typename T, typename... Args>
+ void append(Args&&...);
+
+ DrawPictureMode fDrawPictureMode;
+ size_t fApproxBytesUsedBySubPictures;
+ SkRecord* fRecord;
+ std::unique_ptr<SkDrawableList> fDrawableList;
+
+ SkMiniRecorder* fMiniRecorder;
+};
+
+#endif//SkRecorder_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecords.cpp b/gfx/skia/skia/src/core/SkRecords.cpp
new file mode 100644
index 0000000000..1b27660b78
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecords.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRecords.h"
+
+namespace SkRecords {
+ PreCachedPath::PreCachedPath(const SkPath& path) : SkPath(path) {
+ this->updateBoundsCache();
+ (void)this->getGenerationID();
+#if 0 // Disabled to see if we ever really race on this. It costs time, chromium:496982.
+ SkPathPriv::FirstDirection junk;
+ (void)SkPathPriv::CheapComputeFirstDirection(*this, &junk);
+#endif
+ }
+
+ TypedMatrix::TypedMatrix(const SkMatrix& matrix) : SkMatrix(matrix) {
+ (void)this->getType();
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRecords.h b/gfx/skia/skia/src/core/SkRecords.h
new file mode 100644
index 0000000000..2c246961bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecords.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecords_DEFINED
+#define SkRecords_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkVertices.h"
+#include "src/core/SkDrawShadowInfo.h"
+
+namespace SkRecords {
+
+// A list of all the types of canvas calls we can record.
+// Each of these is reified into a struct below.
+//
+// (We're using the macro-of-macro trick here to do several different things with the same list.)
+//
+// We leave this SK_RECORD_TYPES macro defined for use by code that wants to operate on SkRecords
+// types polymorphically. (See SkRecord::Record::{visit,mutate} for an example.)
+//
+// Order doesn't technically matter here, but the compiler can generally generate better code if
+// you keep them semantically grouped, especially the Draws. It's also nice to leave NoOp at 0.
+#define SK_RECORD_TYPES(M) \
+ M(NoOp) \
+ M(Flush) \
+ M(Restore) \
+ M(Save) \
+ M(SaveLayer) \
+ M(SaveBehind) \
+ M(SetMatrix) \
+ M(Translate) \
+ M(Concat) \
+ M(ClipPath) \
+ M(ClipRRect) \
+ M(ClipRect) \
+ M(ClipRegion) \
+ M(DrawArc) \
+ M(DrawDrawable) \
+ M(DrawImage) \
+ M(DrawImageLattice) \
+ M(DrawImageRect) \
+ M(DrawImageNine) \
+ M(DrawDRRect) \
+ M(DrawOval) \
+ M(DrawBehind) \
+ M(DrawPaint) \
+ M(DrawPath) \
+ M(DrawPatch) \
+ M(DrawPicture) \
+ M(DrawPoints) \
+ M(DrawRRect) \
+ M(DrawRect) \
+ M(DrawRegion) \
+ M(DrawTextBlob) \
+ M(DrawAtlas) \
+ M(DrawVertices) \
+ M(DrawShadowRec) \
+ M(DrawAnnotation) \
+ M(DrawEdgeAAQuad) \
+ M(DrawEdgeAAImageSet)
+
+
+// Defines SkRecords::Type, an enum of all record types.
+#define ENUM(T) T##_Type,
+enum Type { SK_RECORD_TYPES(ENUM) };
+#undef ENUM
+
+#define ACT_AS_PTR(ptr) \
+ operator T*() const { return ptr; } \
+ T* operator->() const { return ptr; }
+
+// An Optional doesn't own the pointer's memory, but may need to destroy non-POD data.
+template <typename T>
+class Optional {
+public:
+ Optional() : fPtr(nullptr) {}
+ Optional(T* ptr) : fPtr(ptr) {}
+ Optional(Optional&& o) : fPtr(o.fPtr) {
+ o.fPtr = nullptr;
+ }
+ ~Optional() { if (fPtr) fPtr->~T(); }
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+ Optional(const Optional&) = delete;
+ Optional& operator=(const Optional&) = delete;
+};
+
+// Like Optional, but ptr must not be NULL.
+template <typename T>
+class Adopted {
+public:
+ Adopted(T* ptr) : fPtr(ptr) { SkASSERT(fPtr); }
+ Adopted(Adopted* source) {
+ // Transfer ownership from source to this.
+ fPtr = source->fPtr;
+ source->fPtr = NULL;
+ }
+ ~Adopted() { if (fPtr) fPtr->~T(); }
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+ Adopted(const Adopted&) = delete;
+ Adopted& operator=(const Adopted&) = delete;
+};
+
+// PODArray doesn't own the pointer's memory, and we assume the data is POD.
+template <typename T>
+class PODArray {
+public:
+ PODArray() {}
+ PODArray(T* ptr) : fPtr(ptr) {}
+ // Default copy and assign.
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+};
+
+#undef ACT_AS_PTR
+
+// SkPath::getBounds() isn't thread safe unless we precache the bounds in a singlethreaded context.
+// SkPath::cheapComputeDirection() is similar.
+// Recording is a convenient time to cache these, or we can delay it to between record and playback.
+struct PreCachedPath : public SkPath {
+ PreCachedPath() {}
+ PreCachedPath(const SkPath& path);
+};
+
+// Like SkPath::getBounds(), SkMatrix::getType() isn't thread safe unless we precache it.
+// This may not cover all SkMatrices used by the picture (e.g. some could be hiding in a shader).
+struct TypedMatrix : public SkMatrix {
+ TypedMatrix() {}
+ TypedMatrix(const SkMatrix& matrix);
+};
+
+enum Tags {
+ kDraw_Tag = 1, // May draw something (usually named DrawFoo).
+ kHasImage_Tag = 2, // Contains an SkImage or SkBitmap.
+ kHasText_Tag = 4, // Contains text.
+ kHasPaint_Tag = 8, // May have an SkPaint field, at least optionally.
+
+ kDrawWithPaint_Tag = kDraw_Tag | kHasPaint_Tag,
+};
+
+// A macro to make it a little easier to define a struct that can be stored in SkRecord.
+#define RECORD(T, tags, ...) \
+struct T { \
+ static const Type kType = T##_Type; \
+ static const int kTags = tags; \
+ __VA_ARGS__; \
+};
+
+RECORD(NoOp, 0);
+RECORD(Flush, 0);
+RECORD(Restore, 0,
+ TypedMatrix matrix);
+RECORD(Save, 0);
+
+RECORD(SaveLayer, kHasPaint_Tag,
+ Optional<SkRect> bounds;
+ Optional<SkPaint> paint;
+ sk_sp<const SkImageFilter> backdrop;
+ sk_sp<const SkImage> clipMask;
+ Optional<SkMatrix> clipMatrix;
+ SkCanvas::SaveLayerFlags saveLayerFlags);
+
+RECORD(SaveBehind, 0,
+ Optional<SkRect> subset);
+
+RECORD(SetMatrix, 0,
+ TypedMatrix matrix);
+RECORD(Concat, 0,
+ TypedMatrix matrix);
+
+RECORD(Translate, 0,
+ SkScalar dx;
+ SkScalar dy);
+
+struct ClipOpAndAA {
+ ClipOpAndAA() {}
+ ClipOpAndAA(SkClipOp op, bool aa) : fOp(static_cast<unsigned>(op)), fAA(aa) {}
+
+ SkClipOp op() const { return static_cast<SkClipOp>(fOp); }
+ bool aa() const { return fAA != 0; }
+
+private:
+ unsigned fOp : 31; // This really only needs to be 3, but there's no win today to do so.
+ unsigned fAA : 1; // MSVC won't pack an enum with an bool, so we call this an unsigned.
+};
+static_assert(sizeof(ClipOpAndAA) == 4, "ClipOpAndAASize");
+
+RECORD(ClipPath, 0,
+ PreCachedPath path;
+ ClipOpAndAA opAA);
+RECORD(ClipRRect, 0,
+ SkRRect rrect;
+ ClipOpAndAA opAA);
+RECORD(ClipRect, 0,
+ SkRect rect;
+ ClipOpAndAA opAA);
+RECORD(ClipRegion, 0,
+ SkRegion region;
+ SkClipOp op);
+
+// While not strictly required, if you have an SkPaint, it's fastest to put it first.
+RECORD(DrawArc, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval;
+ SkScalar startAngle;
+ SkScalar sweepAngle;
+ unsigned useCenter);
+RECORD(DrawDRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect outer;
+ SkRRect inner);
+RECORD(DrawDrawable, kDraw_Tag,
+ Optional<SkMatrix> matrix;
+ SkRect worstCaseBounds;
+ int32_t index);
+RECORD(DrawImage, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkScalar left;
+ SkScalar top);
+RECORD(DrawImageLattice, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ int xCount;
+ PODArray<int> xDivs;
+ int yCount;
+ PODArray<int> yDivs;
+ int flagCount;
+ PODArray<SkCanvas::Lattice::RectType> flags;
+ PODArray<SkColor> colors;
+ SkIRect src;
+ SkRect dst);
+RECORD(DrawImageRect, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ Optional<SkRect> src;
+ SkRect dst;
+ SkCanvas::SrcRectConstraint constraint);
+RECORD(DrawImageNine, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkIRect center;
+ SkRect dst);
+RECORD(DrawOval, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval);
+RECORD(DrawPaint, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint);
+RECORD(DrawBehind, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint);
+RECORD(DrawPath, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PreCachedPath path);
+RECORD(DrawPicture, kDraw_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkPicture> picture;
+ TypedMatrix matrix);
+RECORD(DrawPoints, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkCanvas::PointMode mode;
+ unsigned count;
+ SkPoint* pts);
+RECORD(DrawRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect rrect);
+RECORD(DrawRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect rect);
+RECORD(DrawRegion, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRegion region);
+RECORD(DrawTextBlob, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ sk_sp<const SkTextBlob> blob;
+ SkScalar x;
+ SkScalar y);
+RECORD(DrawPatch, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<SkPoint> cubics;
+ PODArray<SkColor> colors;
+ PODArray<SkPoint> texCoords;
+ SkBlendMode bmode);
+RECORD(DrawAtlas, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> atlas;
+ PODArray<SkRSXform> xforms;
+ PODArray<SkRect> texs;
+ PODArray<SkColor> colors;
+ int count;
+ SkBlendMode mode;
+ Optional<SkRect> cull);
+RECORD(DrawVertices, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ sk_sp<SkVertices> vertices;
+ PODArray<SkVertices::Bone> bones;
+ int boneCount;
+ SkBlendMode bmode);
+RECORD(DrawShadowRec, kDraw_Tag,
+ PreCachedPath path;
+ SkDrawShadowRec rec);
+RECORD(DrawAnnotation, 0, // TODO: kDraw_Tag, skia:5548
+ SkRect rect;
+ SkString key;
+ sk_sp<SkData> value);
+RECORD(DrawEdgeAAQuad, kDraw_Tag,
+ SkRect rect;
+ PODArray<SkPoint> clip;
+ SkCanvas::QuadAAFlags aa;
+ SkColor4f color;
+ SkBlendMode mode);
+RECORD(DrawEdgeAAImageSet, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ SkAutoTArray<SkCanvas::ImageSetEntry> set;
+ int count;
+ PODArray<SkPoint> dstClips;
+ PODArray<SkMatrix> preViewMatrices;
+ SkCanvas::SrcRectConstraint constraint);
+#undef RECORD
+
+} // namespace SkRecords
+
+#endif//SkRecords_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRect.cpp b/gfx/skia/skia/src/core/SkRect.cpp
new file mode 100644
index 0000000000..37f054a053
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRect.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRect.h"
+
+#include "include/private/SkMalloc.h"
+
+bool SkIRect::intersect(const SkIRect& a, const SkIRect& b) {
+ SkIRect tmp = {
+ SkMax32(a.fLeft, b.fLeft),
+ SkMax32(a.fTop, b.fTop),
+ SkMin32(a.fRight, b.fRight),
+ SkMin32(a.fBottom, b.fBottom)
+ };
+ if (tmp.isEmpty()) {
+ return false;
+ }
+ *this = tmp;
+ return true;
+}
+
+void SkIRect::join(const SkIRect& r) {
+ // do nothing if the params are empty
+ if (r.fLeft >= r.fRight || r.fTop >= r.fBottom) {
+ return;
+ }
+
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ *this = r;
+ } else {
+ if (r.fLeft < fLeft) fLeft = r.fLeft;
+ if (r.fTop < fTop) fTop = r.fTop;
+ if (r.fRight > fRight) fRight = r.fRight;
+ if (r.fBottom > fBottom) fBottom = r.fBottom;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkRect::toQuad(SkPoint quad[4]) const {
+ SkASSERT(quad);
+
+ quad[0].set(fLeft, fTop);
+ quad[1].set(fRight, fTop);
+ quad[2].set(fRight, fBottom);
+ quad[3].set(fLeft, fBottom);
+}
+
+#include "include/private/SkNx.h"
+
+bool SkRect::setBoundsCheck(const SkPoint pts[], int count) {
+ SkASSERT((pts && count > 0) || count == 0);
+
+ if (count <= 0) {
+ this->setEmpty();
+ return true;
+ }
+
+ Sk4s min, max;
+ if (count & 1) {
+ min = max = Sk4s(pts->fX, pts->fY,
+ pts->fX, pts->fY);
+ pts += 1;
+ count -= 1;
+ } else {
+ min = max = Sk4s::Load(pts);
+ pts += 2;
+ count -= 2;
+ }
+
+ Sk4s accum = min * 0;
+ while (count) {
+ Sk4s xy = Sk4s::Load(pts);
+ accum = accum * xy;
+ min = Sk4s::Min(min, xy);
+ max = Sk4s::Max(max, xy);
+ pts += 2;
+ count -= 2;
+ }
+
+ bool all_finite = (accum * 0 == 0).allTrue();
+ if (all_finite) {
+ this->setLTRB(SkTMin(min[0], min[2]), SkTMin(min[1], min[3]),
+ SkTMax(max[0], max[2]), SkTMax(max[1], max[3]));
+ } else {
+ this->setEmpty();
+ }
+ return all_finite;
+}
+
+void SkRect::setBoundsNoCheck(const SkPoint pts[], int count) {
+ if (!this->setBoundsCheck(pts, count)) {
+ this->setLTRB(SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN);
+ }
+}
+
+#define CHECK_INTERSECT(al, at, ar, ab, bl, bt, br, bb) \
+ SkScalar L = SkMaxScalar(al, bl); \
+ SkScalar R = SkMinScalar(ar, br); \
+ SkScalar T = SkMaxScalar(at, bt); \
+ SkScalar B = SkMinScalar(ab, bb); \
+ do { if (!(L < R && T < B)) return false; } while (0)
+ // do the !(opposite) check so we return false if either arg is NaN
+
+bool SkRect::intersect(const SkRect& r) {
+ CHECK_INTERSECT(r.fLeft, r.fTop, r.fRight, r.fBottom, fLeft, fTop, fRight, fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+bool SkRect::intersect(const SkRect& a, const SkRect& b) {
+ CHECK_INTERSECT(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+void SkRect::join(const SkRect& r) {
+ if (r.isEmpty()) {
+ return;
+ }
+
+ if (this->isEmpty()) {
+ *this = r;
+ } else {
+ fLeft = SkMinScalar(fLeft, r.fLeft);
+ fTop = SkMinScalar(fTop, r.fTop);
+ fRight = SkMaxScalar(fRight, r.fRight);
+ fBottom = SkMaxScalar(fBottom, r.fBottom);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkString.h"
+#include "src/core/SkStringUtils.h"
+
+static const char* set_scalar(SkString* storage, SkScalar value, SkScalarAsStringType asType) {
+ storage->reset();
+ SkAppendScalar(storage, value, asType);
+ return storage->c_str();
+}
+
+void SkRect::dump(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ SkString line;
+ if (asHex) {
+ SkString tmp;
+ line.printf( "SkRect::MakeLTRB(%s, /* %f */\n", set_scalar(&tmp, fLeft, asType), fLeft);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fTop, asType), fTop);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fRight, asType), fRight);
+ line.appendf(" %s /* %f */);", set_scalar(&tmp, fBottom, asType), fBottom);
+ } else {
+ SkString strL, strT, strR, strB;
+ SkAppendScalarDec(&strL, fLeft);
+ SkAppendScalarDec(&strT, fTop);
+ SkAppendScalarDec(&strR, fRight);
+ SkAppendScalarDec(&strB, fBottom);
+ line.printf("SkRect::MakeLTRB(%s, %s, %s, %s);",
+ strL.c_str(), strT.c_str(), strR.c_str(), strB.c_str());
+ }
+ SkDebugf("%s\n", line.c_str());
+}
diff --git a/gfx/skia/skia/src/core/SkRectPriv.h b/gfx/skia/skia/src/core/SkRectPriv.h
new file mode 100644
index 0000000000..e812f5ef36
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRectPriv.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRectPriv_DEFINED
+#define SkRectPriv_DEFINED
+
+#include "include/core/SkRect.h"
+#include "src/core/SkMathPriv.h"
+
+class SkRectPriv {
+public:
+ // Returns an irect that is very large, and can be safely round-trip with SkRect and still
+ // be considered non-empty (i.e. width/height > 0) even if we round-out the SkRect.
+ static SkIRect MakeILarge() {
+ // SK_MaxS32 >> 1 seemed better, but it did not survive round-trip with SkRect and rounding.
+ // Also, 1 << 29 can be perfectly represented in float, while SK_MaxS32 >> 1 cannot.
+ const int32_t large = 1 << 29;
+ return { -large, -large, large, large };
+ }
+
+ static SkIRect MakeILargestInverted() {
+ return { SK_MaxS32, SK_MaxS32, SK_MinS32, SK_MinS32 };
+ }
+
+ static SkRect MakeLargeS32() {
+ SkRect r;
+ r.set(MakeILarge());
+ return r;
+ }
+
+ static SkRect MakeLargest() {
+ return { SK_ScalarMin, SK_ScalarMin, SK_ScalarMax, SK_ScalarMax };
+ }
+
+ static constexpr SkRect MakeLargestInverted() {
+ return { SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin };
+ }
+
+ static void GrowToInclude(SkRect* r, const SkPoint& pt) {
+ r->fLeft = SkMinScalar(pt.fX, r->fLeft);
+ r->fRight = SkMaxScalar(pt.fX, r->fRight);
+ r->fTop = SkMinScalar(pt.fY, r->fTop);
+ r->fBottom = SkMaxScalar(pt.fY, r->fBottom);
+ }
+
+ // Conservative check if r can be expressed in fixed-point.
+ // Will return false for very large values that might have fit
+ static bool FitsInFixed(const SkRect& r) {
+ return SkFitsInFixed(r.fLeft) && SkFitsInFixed(r.fTop) &&
+ SkFitsInFixed(r.fRight) && SkFitsInFixed(r.fBottom);
+ }
+
+ static bool Is16Bit(const SkIRect& r) {
+ return SkTFitsIn<int16_t>(r.fLeft) && SkTFitsIn<int16_t>(r.fTop) &&
+ SkTFitsIn<int16_t>(r.fRight) && SkTFitsIn<int16_t>(r.fBottom);
+ }
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion.cpp b/gfx/skia/skia/src/core/SkRegion.cpp
new file mode 100644
index 0000000000..efa50bc539
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion.cpp
@@ -0,0 +1,1582 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRegion.h"
+
+#include "include/private/SkMacros.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkRegionPriv.h"
+#include "src/core/SkSafeMath.h"
+#include "src/utils/SkUTF.h"
+
+#include <utility>
+
+/* Region Layout
+ *
+ * TOP
+ *
+ * [ Bottom, X-Intervals, [Left, Right]..., X-Sentinel ]
+ * ...
+ *
+ * Y-Sentinel
+ */
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define SkRegion_gEmptyRunHeadPtr ((SkRegionPriv::RunHead*)-1)
+#define SkRegion_gRectRunHeadPtr nullptr
+
+constexpr int kRunArrayStackCount = 256;
+
+// This is a simple data structure which is like a SkSTArray<N,T,true>, except that:
+// - It does not initialize memory.
+// - It does not distinguish between reserved space and initialized space.
+// - resizeToAtLeast() instead of resize()
+// - Uses sk_realloc_throw()
+// - Can never be made smaller.
+// Measurement: for the `region_union_16` benchmark, this is 6% faster.
+class RunArray {
+public:
+ RunArray() { fPtr = fStack; }
+ #ifdef SK_DEBUG
+ int count() const { return fCount; }
+ #endif
+ SkRegionPriv::RunType& operator[](int i) {
+ SkASSERT((unsigned)i < (unsigned)fCount);
+ return fPtr[i];
+ }
+ /** Resize the array to a size greater-than-or-equal-to count. */
+ void resizeToAtLeast(int count) {
+ if (count > fCount) {
+ // leave at least 50% extra space for future growth.
+ count += count >> 1;
+ fMalloc.realloc(count);
+ if (fPtr == fStack) {
+ memcpy(fMalloc.get(), fStack, fCount * sizeof(SkRegionPriv::RunType));
+ }
+ fPtr = fMalloc.get();
+ fCount = count;
+ }
+ }
+private:
+ SkRegionPriv::RunType fStack[kRunArrayStackCount];
+ SkAutoTMalloc<SkRegionPriv::RunType> fMalloc;
+ int fCount = kRunArrayStackCount;
+ SkRegionPriv::RunType* fPtr; // non-owning pointer
+};
+
+/* Pass in the beginning with the intervals.
+ * We back up 1 to read the interval-count.
+ * Return the beginning of the next scanline (i.e. the next Y-value)
+ */
+static SkRegionPriv::RunType* skip_intervals(const SkRegionPriv::RunType runs[]) {
+ int intervals = runs[-1];
+#ifdef SK_DEBUG
+ if (intervals > 0) {
+ SkASSERT(runs[0] < runs[1]);
+ SkASSERT(runs[1] < SkRegion_kRunTypeSentinel);
+ } else {
+ SkASSERT(0 == intervals);
+ SkASSERT(SkRegion_kRunTypeSentinel == runs[0]);
+ }
+#endif
+ runs += intervals * 2 + 1;
+ return const_cast<SkRegionPriv::RunType*>(runs);
+}
+
+bool SkRegion::RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds) {
+ assert_sentinel(runs[0], false); // top
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (count == kRectRegionRuns) {
+ assert_sentinel(runs[1], false); // bottom
+ SkASSERT(1 == runs[2]);
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ assert_sentinel(runs[5], true);
+ assert_sentinel(runs[6], true);
+
+ SkASSERT(runs[0] < runs[1]); // valid height
+ SkASSERT(runs[3] < runs[4]); // valid width
+
+ bounds->setLTRB(runs[3], runs[0], runs[4], runs[1]);
+ return true;
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+SkRegion::SkRegion() {
+ fBounds.setEmpty();
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+}
+
+SkRegion::SkRegion(const SkRegion& src) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRegion(src);
+}
+
+SkRegion::SkRegion(const SkIRect& rect) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRect(rect);
+}
+
+SkRegion::~SkRegion() {
+ this->freeRuns();
+}
+
+void SkRegion::freeRuns() {
+ if (this->isComplex()) {
+ SkASSERT(fRunHead->fRefCnt >= 1);
+ if (--fRunHead->fRefCnt == 0) {
+ sk_free(fRunHead);
+ }
+ }
+}
+
+void SkRegion::allocateRuns(int count, int ySpanCount, int intervalCount) {
+ fRunHead = RunHead::Alloc(count, ySpanCount, intervalCount);
+}
+
+void SkRegion::allocateRuns(int count) {
+ fRunHead = RunHead::Alloc(count);
+}
+
+void SkRegion::allocateRuns(const RunHead& head) {
+ fRunHead = RunHead::Alloc(head.fRunCount,
+ head.getYSpanCount(),
+ head.getIntervalCount());
+}
+
+SkRegion& SkRegion::operator=(const SkRegion& src) {
+ (void)this->setRegion(src);
+ return *this;
+}
+
+void SkRegion::swap(SkRegion& other) {
+ using std::swap;
+ swap(fBounds, other.fBounds);
+ swap(fRunHead, other.fRunHead);
+}
+
+int SkRegion::computeRegionComplexity() const {
+ if (this->isEmpty()) {
+ return 0;
+ } else if (this->isRect()) {
+ return 1;
+ }
+ return fRunHead->getIntervalCount();
+}
+
+bool SkRegion::setEmpty() {
+ this->freeRuns();
+ fBounds.setEmpty();
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+ return false;
+}
+
+bool SkRegion::setRect(const SkIRect& r) {
+ if (r.isEmpty() ||
+ SkRegion_kRunTypeSentinel == r.right() ||
+ SkRegion_kRunTypeSentinel == r.bottom()) {
+ return this->setEmpty();
+ }
+ this->freeRuns();
+ fBounds = r;
+ fRunHead = SkRegion_gRectRunHeadPtr;
+ return true;
+}
+
+bool SkRegion::setRegion(const SkRegion& src) {
+ if (this != &src) {
+ this->freeRuns();
+
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (this->isComplex()) {
+ fRunHead->fRefCnt++;
+ }
+ }
+ return fRunHead != SkRegion_gEmptyRunHeadPtr;
+}
+
+bool SkRegion::op(const SkIRect& rect, const SkRegion& rgn, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(tmp, rgn, op);
+}
+
+bool SkRegion::op(const SkRegion& rgn, const SkIRect& rect, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(rgn, tmp, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+#include <stdio.h>
+char* SkRegion::toString() {
+ Iterator iter(*this);
+ int count = 0;
+ while (!iter.done()) {
+ count++;
+ iter.next();
+ }
+ // 4 ints, up to 10 digits each plus sign, 3 commas, '(', ')', SkRegion() and '\0'
+ const int max = (count*((11*4)+5))+11+1;
+ char* result = (char*)sk_malloc_throw(max);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ count = snprintf(result, max, "SkRegion(");
+ iter.reset(*this);
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ count += snprintf(result+count, max - count,
+ "(%d,%d,%d,%d)", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ iter.next();
+ }
+ count += snprintf(result+count, max - count, ")");
+ return result;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkRegion::count_runtype_values(int* itop, int* ibot) const {
+ int maxT;
+
+ if (this->isRect()) {
+ maxT = 2;
+ } else {
+ SkASSERT(this->isComplex());
+ maxT = fRunHead->getIntervalCount() * 2;
+ }
+ *itop = fBounds.fTop;
+ *ibot = fBounds.fBottom;
+ return maxT;
+}
+
+static bool isRunCountEmpty(int count) {
+ return count <= 2;
+}
+
+bool SkRegion::setRuns(RunType runs[], int count) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkASSERT(count > 0);
+
+ if (isRunCountEmpty(count)) {
+ // SkDEBUGF("setRuns: empty\n");
+ assert_sentinel(runs[count-1], true);
+ return this->setEmpty();
+ }
+
+ // trim off any empty spans from the top and bottom
+ // weird I should need this, perhaps op() could be smarter...
+ if (count > kRectRegionRuns) {
+ RunType* stop = runs + count;
+ assert_sentinel(runs[0], false); // top
+ assert_sentinel(runs[1], false); // bottom
+ // runs[2] is uncomputed intervalCount
+
+ if (runs[3] == SkRegion_kRunTypeSentinel) { // should be first left...
+ runs += 3; // skip empty initial span
+ runs[0] = runs[-2]; // set new top to prev bottom
+ assert_sentinel(runs[1], false); // bot: a sentinal would mean two in a row
+ assert_sentinel(runs[2], false); // intervalcount
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ }
+
+ assert_sentinel(stop[-1], true);
+ assert_sentinel(stop[-2], true);
+
+ // now check for a trailing empty span
+ if (stop[-5] == SkRegion_kRunTypeSentinel) { // eek, stop[-4] was a bottom with no x-runs
+ stop[-4] = SkRegion_kRunTypeSentinel; // kill empty last span
+ stop -= 3;
+ assert_sentinel(stop[-1], true); // last y-sentinel
+ assert_sentinel(stop[-2], true); // last x-sentinel
+ assert_sentinel(stop[-3], false); // last right
+ assert_sentinel(stop[-4], false); // last left
+ assert_sentinel(stop[-5], false); // last interval-count
+ assert_sentinel(stop[-6], false); // last bottom
+ }
+ count = (int)(stop - runs);
+ }
+
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (SkRegion::RunsAreARect(runs, count, &fBounds)) {
+ return this->setRect(fBounds);
+ }
+
+ // if we get here, we need to become a complex region
+
+ if (!this->isComplex() || fRunHead->fRunCount != count) {
+ this->freeRuns();
+ this->allocateRuns(count);
+ SkASSERT(this->isComplex());
+ }
+
+ // must call this before we can write directly into runs()
+ // in case we are sharing the buffer with another region (copy on write)
+ fRunHead = fRunHead->ensureWritable();
+ memcpy(fRunHead->writable_runs(), runs, count * sizeof(RunType));
+ fRunHead->computeRunBounds(&fBounds);
+
+ // Our computed bounds might be too large, so we have to check here.
+ if (fBounds.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ return true;
+}
+
+void SkRegion::BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]) {
+ runs[0] = bounds.fTop;
+ runs[1] = bounds.fBottom;
+ runs[2] = 1; // 1 interval for this scanline
+ runs[3] = bounds.fLeft;
+ runs[4] = bounds.fRight;
+ runs[5] = SkRegion_kRunTypeSentinel;
+ runs[6] = SkRegion_kRunTypeSentinel;
+}
+
+bool SkRegion::contains(int32_t x, int32_t y) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (!fBounds.contains(x, y)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* runs = fRunHead->findScanline(y);
+
+ // Skip the Bottom and IntervalCount
+ runs += 2;
+
+ // Just walk this scanline, checking each interval. The X-sentinel will
+ // appear as a left-inteval (runs[0]) and should abort the search.
+ //
+ // We could do a bsearch, using interval-count (runs[1]), but need to time
+ // when that would be worthwhile.
+ //
+ for (;;) {
+ if (x < runs[0]) {
+ break;
+ }
+ if (x < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+static SkRegionPriv::RunType scanline_bottom(const SkRegionPriv::RunType runs[]) {
+ return runs[0];
+}
+
+static const SkRegionPriv::RunType* scanline_next(const SkRegionPriv::RunType runs[]) {
+ // skip [B N [L R]... S]
+ return runs + 2 + runs[1] * 2 + 1;
+}
+
+static bool scanline_contains(const SkRegionPriv::RunType runs[],
+ SkRegionPriv::RunType L, SkRegionPriv::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (L < runs[0]) {
+ break;
+ }
+ if (R <= runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::contains(const SkIRect& r) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (!fBounds.contains(r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(r.fTop);
+ for (;;) {
+ if (!scanline_contains(scanline, r.fLeft, r.fRight)) {
+ return false;
+ }
+ if (r.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return true;
+}
+
+bool SkRegion::contains(const SkRegion& rgn) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkDEBUGCODE(SkRegionPriv::Validate(rgn));
+
+ if (this->isEmpty() || rgn.isEmpty() || !fBounds.contains(rgn.fBounds)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ if (rgn.isRect()) {
+ return this->contains(rgn.getBounds());
+ }
+
+ /*
+ * A contains B is equivalent to
+ * B - A == 0
+ */
+ return !Oper(rgn, *this, kDifference_Op, nullptr);
+}
+
+const SkRegion::RunType* SkRegion::getRuns(RunType tmpStorage[],
+ int* intervals) const {
+ SkASSERT(tmpStorage && intervals);
+ const RunType* runs = tmpStorage;
+
+ if (this->isEmpty()) {
+ tmpStorage[0] = SkRegion_kRunTypeSentinel;
+ *intervals = 0;
+ } else if (this->isRect()) {
+ BuildRectRuns(fBounds, tmpStorage);
+ *intervals = 1;
+ } else {
+ runs = fRunHead->readonly_runs();
+ *intervals = fRunHead->getIntervalCount();
+ }
+ return runs;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool scanline_intersects(const SkRegionPriv::RunType runs[],
+ SkRegionPriv::RunType L, SkRegionPriv::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (R <= runs[0]) {
+ break;
+ }
+ if (L < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkIRect& r) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (this->isEmpty() || r.isEmpty()) {
+ return false;
+ }
+
+ SkIRect sect;
+ if (!sect.intersect(fBounds, r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(sect.fTop);
+ for (;;) {
+ if (scanline_intersects(scanline, sect.fLeft, sect.fRight)) {
+ return true;
+ }
+ if (sect.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkRegion& rgn) const {
+ if (this->isEmpty() || rgn.isEmpty()) {
+ return false;
+ }
+
+ if (!SkIRect::Intersects(fBounds, rgn.fBounds)) {
+ return false;
+ }
+
+ bool weAreARect = this->isRect();
+ bool theyAreARect = rgn.isRect();
+
+ if (weAreARect && theyAreARect) {
+ return true;
+ }
+ if (weAreARect) {
+ return rgn.intersects(this->getBounds());
+ }
+ if (theyAreARect) {
+ return this->intersects(rgn.getBounds());
+ }
+
+ // both of us are complex
+ return Oper(*this, rgn, kIntersect_Op, nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::operator==(const SkRegion& b) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkDEBUGCODE(SkRegionPriv::Validate(b));
+
+ if (this == &b) {
+ return true;
+ }
+ if (fBounds != b.fBounds) {
+ return false;
+ }
+
+ const SkRegion::RunHead* ah = fRunHead;
+ const SkRegion::RunHead* bh = b.fRunHead;
+
+ // this catches empties and rects being equal
+ if (ah == bh) {
+ return true;
+ }
+ // now we insist that both are complex (but different ptrs)
+ if (!this->isComplex() || !b.isComplex()) {
+ return false;
+ }
+ return ah->fRunCount == bh->fRunCount &&
+ !memcmp(ah->readonly_runs(), bh->readonly_runs(),
+ ah->fRunCount * sizeof(SkRegion::RunType));
+}
+
+// Return a (new) offset such that when applied (+=) to min and max, we don't overflow/underflow
+static int32_t pin_offset_s32(int32_t min, int32_t max, int32_t offset) {
+ SkASSERT(min <= max);
+ const int32_t lo = -SK_MaxS32-1,
+ hi = +SK_MaxS32;
+ if ((int64_t)min + offset < lo) { offset = lo - min; }
+ if ((int64_t)max + offset > hi) { offset = hi - max; }
+ return offset;
+}
+
+void SkRegion::translate(int dx, int dy, SkRegion* dst) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (nullptr == dst) {
+ return;
+ }
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+ // pin dx and dy so we don't overflow our existing bounds
+ dx = pin_offset_s32(fBounds.fLeft, fBounds.fRight, dx);
+ dy = pin_offset_s32(fBounds.fTop, fBounds.fBottom, dy);
+
+ if (this->isRect()) {
+ dst->setRect(fBounds.makeOffset(dx, dy));
+ } else {
+ if (this == dst) {
+ dst->fRunHead = dst->fRunHead->ensureWritable();
+ } else {
+ SkRegion tmp;
+ tmp.allocateRuns(*fRunHead);
+ SkASSERT(tmp.isComplex());
+ tmp.fBounds = fBounds;
+ dst->swap(tmp);
+ }
+
+ dst->fBounds.offset(dx, dy);
+
+ const RunType* sruns = fRunHead->readonly_runs();
+ RunType* druns = dst->fRunHead->writable_runs();
+
+ *druns++ = (SkRegion::RunType)(*sruns++ + dy); // top
+ for (;;) {
+ int bottom = *sruns++;
+ if (bottom == SkRegion_kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(bottom + dy); // bottom;
+ *druns++ = *sruns++; // copy intervalCount;
+ for (;;) {
+ int x = *sruns++;
+ if (x == SkRegion_kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(x + dx);
+ *druns++ = (SkRegion::RunType)(*sruns++ + dx);
+ }
+ *druns++ = SkRegion_kRunTypeSentinel; // x sentinel
+ }
+ *druns++ = SkRegion_kRunTypeSentinel; // y sentinel
+
+ SkASSERT(sruns - fRunHead->readonly_runs() == fRunHead->fRunCount);
+ SkASSERT(druns - dst->fRunHead->readonly_runs() == dst->fRunHead->fRunCount);
+ }
+
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::setRects(const SkIRect rects[], int count) {
+ if (0 == count) {
+ this->setEmpty();
+ } else {
+ this->setRect(rects[0]);
+ for (int i = 1; i < count; i++) {
+ this->op(rects[i], kUnion_Op);
+ }
+ }
+ return !this->isEmpty();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+#ifdef SK_DEBUG
+static void assert_valid_pair(int left, int rite)
+{
+ SkASSERT(left == SkRegion_kRunTypeSentinel || left < rite);
+}
+#else
+ #define assert_valid_pair(left, rite)
+#endif
+
+struct spanRec {
+ const SkRegionPriv::RunType* fA_runs;
+ const SkRegionPriv::RunType* fB_runs;
+ int fA_left, fA_rite, fB_left, fB_rite;
+ int fLeft, fRite, fInside;
+
+ void init(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[]) {
+ fA_left = *a_runs++;
+ fA_rite = *a_runs++;
+ fB_left = *b_runs++;
+ fB_rite = *b_runs++;
+
+ fA_runs = a_runs;
+ fB_runs = b_runs;
+ }
+
+ bool done() const {
+ SkASSERT(fA_left <= SkRegion_kRunTypeSentinel);
+ SkASSERT(fB_left <= SkRegion_kRunTypeSentinel);
+ return fA_left == SkRegion_kRunTypeSentinel &&
+ fB_left == SkRegion_kRunTypeSentinel;
+ }
+
+ void next() {
+ assert_valid_pair(fA_left, fA_rite);
+ assert_valid_pair(fB_left, fB_rite);
+
+ int inside, left, rite SK_INIT_TO_AVOID_WARNING;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ int a_left = fA_left;
+ int a_rite = fA_rite;
+ int b_left = fB_left;
+ int b_rite = fB_rite;
+
+ if (a_left < b_left) {
+ inside = 1;
+ left = a_left;
+ if (a_rite <= b_left) { // [...] <...>
+ rite = a_rite;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = a_left = b_left;
+ }
+ } else if (b_left < a_left) {
+ inside = 2;
+ left = b_left;
+ if (b_rite <= a_left) { // [...] <...>
+ rite = b_rite;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = b_left = a_left;
+ }
+ } else { // a_left == b_left
+ inside = 3;
+ left = a_left; // or b_left
+ if (a_rite <= b_rite) {
+ rite = b_left = a_rite;
+ a_flush = true;
+ }
+ if (b_rite <= a_rite) {
+ rite = a_left = b_rite;
+ b_flush = true;
+ }
+ }
+
+ if (a_flush) {
+ a_left = *fA_runs++;
+ a_rite = *fA_runs++;
+ }
+ if (b_flush) {
+ b_left = *fB_runs++;
+ b_rite = *fB_runs++;
+ }
+
+ SkASSERT(left <= rite);
+
+ // now update our state
+ fA_left = a_left;
+ fA_rite = a_rite;
+ fB_left = b_left;
+ fB_rite = b_rite;
+
+ fLeft = left;
+ fRite = rite;
+ fInside = inside;
+ }
+};
+
+static int distance_to_sentinel(const SkRegionPriv::RunType* runs) {
+ const SkRegionPriv::RunType* ptr = runs;
+ while (*ptr != SkRegion_kRunTypeSentinel) { ptr += 2; }
+ return ptr - runs;
+}
+
+static int operate_on_span(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[],
+ RunArray* array, int dstOffset,
+ int min, int max) {
+ // This is a worst-case for this span plus two for TWO terminating sentinels.
+ array->resizeToAtLeast(
+ dstOffset + distance_to_sentinel(a_runs) + distance_to_sentinel(b_runs) + 2);
+ SkRegionPriv::RunType* dst = &(*array)[dstOffset]; // get pointer AFTER resizing.
+
+ spanRec rec;
+ bool firstInterval = true;
+
+ rec.init(a_runs, b_runs);
+
+ while (!rec.done()) {
+ rec.next();
+
+ int left = rec.fLeft;
+ int rite = rec.fRite;
+
+ // add left,rite to our dst buffer (checking for coincidence
+ if ((unsigned)(rec.fInside - min) <= (unsigned)(max - min) &&
+ left < rite) { // skip if equal
+ if (firstInterval || *(dst - 1) < left) {
+ *dst++ = (SkRegionPriv::RunType)(left);
+ *dst++ = (SkRegionPriv::RunType)(rite);
+ firstInterval = false;
+ } else {
+ // update the right edge
+ *(dst - 1) = (SkRegionPriv::RunType)(rite);
+ }
+ }
+ }
+ SkASSERT(dst < &(*array)[array->count() - 1]);
+ *dst++ = SkRegion_kRunTypeSentinel;
+ return dst - &(*array)[0];
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static const struct {
+ uint8_t fMin;
+ uint8_t fMax;
+} gOpMinMax[] = {
+ { 1, 1 }, // Difference
+ { 3, 3 }, // Intersection
+ { 1, 3 }, // Union
+ { 1, 2 } // XOR
+};
+// need to ensure that the op enum lines up with our minmax array
+static_assert(0 == SkRegion::kDifference_Op, "");
+static_assert(1 == SkRegion::kIntersect_Op, "");
+static_assert(2 == SkRegion::kUnion_Op, "");
+static_assert(3 == SkRegion::kXOR_Op, "");
+
+class RgnOper {
+public:
+ RgnOper(int top, RunArray* array, SkRegion::Op op)
+ : fMin(gOpMinMax[op].fMin)
+ , fMax(gOpMinMax[op].fMax)
+ , fArray(array)
+ , fTop((SkRegionPriv::RunType)top) // just a first guess, we might update this
+ { SkASSERT((unsigned)op <= 3); }
+
+ void addSpan(int bottom, const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[]) {
+ // skip X values and slots for the next Y+intervalCount
+ int start = fPrevDst + fPrevLen + 2;
+ // start points to beginning of dst interval
+ int stop = operate_on_span(a_runs, b_runs, fArray, start, fMin, fMax);
+ size_t len = SkToSizeT(stop - start);
+ SkASSERT(len >= 1 && (len & 1) == 1);
+ SkASSERT(SkRegion_kRunTypeSentinel == (*fArray)[stop - 1]);
+
+ // Assert memcmp won't exceed fArray->count().
+ SkASSERT(fArray->count() >= SkToInt(start + len - 1));
+ if (fPrevLen == len &&
+ (1 == len || !memcmp(&(*fArray)[fPrevDst],
+ &(*fArray)[start],
+ (len - 1) * sizeof(SkRegionPriv::RunType)))) {
+ // update Y value
+ (*fArray)[fPrevDst - 2] = (SkRegionPriv::RunType)bottom;
+ } else { // accept the new span
+ if (len == 1 && fPrevLen == 0) {
+ fTop = (SkRegionPriv::RunType)bottom; // just update our bottom
+ } else {
+ (*fArray)[start - 2] = (SkRegionPriv::RunType)bottom;
+ (*fArray)[start - 1] = SkToS32(len >> 1);
+ fPrevDst = start;
+ fPrevLen = len;
+ }
+ }
+ }
+
+ int flush() {
+ (*fArray)[fStartDst] = fTop;
+ // Previously reserved enough for TWO sentinals.
+ SkASSERT(fArray->count() > SkToInt(fPrevDst + fPrevLen));
+ (*fArray)[fPrevDst + fPrevLen] = SkRegion_kRunTypeSentinel;
+ return (int)(fPrevDst - fStartDst + fPrevLen + 1);
+ }
+
+ bool isEmpty() const { return 0 == fPrevLen; }
+
+ uint8_t fMin, fMax;
+
+private:
+ RunArray* fArray;
+ int fStartDst = 0;
+ int fPrevDst = 1;
+ size_t fPrevLen = 0; // will never match a length from operate_on_span
+ SkRegionPriv::RunType fTop;
+};
+
+// want a unique value to signal that we exited due to quickExit
+#define QUICK_EXIT_TRUE_COUNT (-1)
+
+static int operate(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[],
+ RunArray* dst,
+ SkRegion::Op op,
+ bool quickExit) {
+ const SkRegionPriv::RunType gEmptyScanline[] = {
+ 0, // dummy bottom value
+ 0, // zero intervals
+ SkRegion_kRunTypeSentinel,
+ // just need a 2nd value, since spanRec.init() reads 2 values, even
+ // though if the first value is the sentinel, it ignores the 2nd value.
+ // w/o the 2nd value here, we might read uninitialized memory.
+ // This happens when we are using gSentinel, which is pointing at
+ // our sentinel value.
+ 0
+ };
+ const SkRegionPriv::RunType* const gSentinel = &gEmptyScanline[2];
+
+ int a_top = *a_runs++;
+ int a_bot = *a_runs++;
+ int b_top = *b_runs++;
+ int b_bot = *b_runs++;
+
+ a_runs += 1; // skip the intervalCount;
+ b_runs += 1; // skip the intervalCount;
+
+ // Now a_runs and b_runs to their intervals (or sentinel)
+
+ assert_sentinel(a_top, false);
+ assert_sentinel(a_bot, false);
+ assert_sentinel(b_top, false);
+ assert_sentinel(b_bot, false);
+
+ RgnOper oper(SkMin32(a_top, b_top), dst, op);
+
+ int prevBot = SkRegion_kRunTypeSentinel; // so we fail the first test
+
+ while (a_bot < SkRegion_kRunTypeSentinel ||
+ b_bot < SkRegion_kRunTypeSentinel) {
+ int top, bot SK_INIT_TO_AVOID_WARNING;
+ const SkRegionPriv::RunType* run0 = gSentinel;
+ const SkRegionPriv::RunType* run1 = gSentinel;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ if (a_top < b_top) {
+ top = a_top;
+ run0 = a_runs;
+ if (a_bot <= b_top) { // [...] <...>
+ bot = a_bot;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = a_top = b_top;
+ }
+ } else if (b_top < a_top) {
+ top = b_top;
+ run1 = b_runs;
+ if (b_bot <= a_top) { // [...] <...>
+ bot = b_bot;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = b_top = a_top;
+ }
+ } else { // a_top == b_top
+ top = a_top; // or b_top
+ run0 = a_runs;
+ run1 = b_runs;
+ if (a_bot <= b_bot) {
+ bot = b_top = a_bot;
+ a_flush = true;
+ }
+ if (b_bot <= a_bot) {
+ bot = a_top = b_bot;
+ b_flush = true;
+ }
+ }
+
+ if (top > prevBot) {
+ oper.addSpan(top, gSentinel, gSentinel);
+ }
+ oper.addSpan(bot, run0, run1);
+
+ if (quickExit && !oper.isEmpty()) {
+ return QUICK_EXIT_TRUE_COUNT;
+ }
+
+ if (a_flush) {
+ a_runs = skip_intervals(a_runs);
+ a_top = a_bot;
+ a_bot = *a_runs++;
+ a_runs += 1; // skip uninitialized intervalCount
+ if (a_bot == SkRegion_kRunTypeSentinel) {
+ a_top = a_bot;
+ }
+ }
+ if (b_flush) {
+ b_runs = skip_intervals(b_runs);
+ b_top = b_bot;
+ b_bot = *b_runs++;
+ b_runs += 1; // skip uninitialized intervalCount
+ if (b_bot == SkRegion_kRunTypeSentinel) {
+ b_top = b_bot;
+ }
+ }
+
+ prevBot = bot;
+ }
+ return oper.flush();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+#if 0 // UNUSED
+static int count_to_intervals(int count) {
+ SkASSERT(count >= 6); // a single rect is 6 values
+ return (count - 4) >> 1;
+}
+#endif
+
+static bool setEmptyCheck(SkRegion* result) {
+ return result ? result->setEmpty() : false;
+}
+
+static bool setRectCheck(SkRegion* result, const SkIRect& rect) {
+ return result ? result->setRect(rect) : !rect.isEmpty();
+}
+
+static bool setRegionCheck(SkRegion* result, const SkRegion& rgn) {
+ return result ? result->setRegion(rgn) : !rgn.isEmpty();
+}
+
+bool SkRegion::Oper(const SkRegion& rgnaOrig, const SkRegion& rgnbOrig, Op op,
+ SkRegion* result) {
+ SkASSERT((unsigned)op < kOpCount);
+
+ if (kReplace_Op == op) {
+ return setRegionCheck(result, rgnbOrig);
+ }
+
+ // swith to using pointers, so we can swap them as needed
+ const SkRegion* rgna = &rgnaOrig;
+ const SkRegion* rgnb = &rgnbOrig;
+ // after this point, do not refer to rgnaOrig or rgnbOrig!!!
+
+ // collaps difference and reverse-difference into just difference
+ if (kReverseDifference_Op == op) {
+ using std::swap;
+ swap(rgna, rgnb);
+ op = kDifference_Op;
+ }
+
+ SkIRect bounds;
+ bool a_empty = rgna->isEmpty();
+ bool b_empty = rgnb->isEmpty();
+ bool a_rect = rgna->isRect();
+ bool b_rect = rgnb->isRect();
+
+ switch (op) {
+ case kDifference_Op:
+ if (a_empty) {
+ return setEmptyCheck(result);
+ }
+ if (b_empty || !SkIRect::Intersects(rgna->fBounds, rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.containsNoEmptyCheck(rgna->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ break;
+
+ case kIntersect_Op:
+ if ((a_empty | b_empty)
+ || !bounds.intersect(rgna->fBounds, rgnb->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ if (a_rect & b_rect) {
+ return setRectCheck(result, bounds);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+
+ case kUnion_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ break;
+
+ case kXOR_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return false;
+ }
+
+ RunType tmpA[kRectRegionRuns];
+ RunType tmpB[kRectRegionRuns];
+
+ int a_intervals, b_intervals;
+ const RunType* a_runs = rgna->getRuns(tmpA, &a_intervals);
+ const RunType* b_runs = rgnb->getRuns(tmpB, &b_intervals);
+
+ RunArray array;
+ int count = operate(a_runs, b_runs, &array, op, nullptr == result);
+ SkASSERT(count <= array.count());
+
+ if (result) {
+ SkASSERT(count >= 0);
+ return result->setRuns(&array[0], count);
+ } else {
+ return (QUICK_EXIT_TRUE_COUNT == count) || !isRunCountEmpty(count);
+ }
+}
+
+bool SkRegion::op(const SkRegion& rgna, const SkRegion& rgnb, Op op) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ return SkRegion::Oper(rgna, rgnb, op, this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkBuffer.h"
+
+size_t SkRegion::writeToMemory(void* storage) const {
+ if (nullptr == storage) {
+ size_t size = sizeof(int32_t); // -1 (empty), 0 (rect), runCount
+ if (!this->isEmpty()) {
+ size += sizeof(fBounds);
+ if (this->isComplex()) {
+ size += 2 * sizeof(int32_t); // ySpanCount + intervalCount
+ size += fRunHead->fRunCount * sizeof(RunType);
+ }
+ }
+ return size;
+ }
+
+ SkWBuffer buffer(storage);
+
+ if (this->isEmpty()) {
+ buffer.write32(-1);
+ } else {
+ bool isRect = this->isRect();
+
+ buffer.write32(isRect ? 0 : fRunHead->fRunCount);
+ buffer.write(&fBounds, sizeof(fBounds));
+
+ if (!isRect) {
+ buffer.write32(fRunHead->getYSpanCount());
+ buffer.write32(fRunHead->getIntervalCount());
+ buffer.write(fRunHead->readonly_runs(),
+ fRunHead->fRunCount * sizeof(RunType));
+ }
+ }
+ return buffer.pos();
+}
+
+static bool validate_run_count(int ySpanCount, int intervalCount, int runCount) {
+ // return 2 + 3 * ySpanCount + 2 * intervalCount;
+ if (ySpanCount < 1 || intervalCount < 2) {
+ return false;
+ }
+ SkSafeMath safeMath;
+ int sum = 2;
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, intervalCount);
+ sum = safeMath.addInt(sum, intervalCount);
+ return safeMath && sum == runCount;
+}
+
+// Validate that a memory sequence is a valid region.
+// Try to check all possible errors.
+// never read beyond &runs[runCount-1].
+static bool validate_run(const int32_t* runs,
+ int runCount,
+ const SkIRect& givenBounds,
+ int32_t ySpanCount,
+ int32_t intervalCount) {
+ // Region Layout:
+ // Top ( Bottom Span_Interval_Count ( Left Right )* Sentinel )+ Sentinel
+ if (!validate_run_count(SkToInt(ySpanCount), SkToInt(intervalCount), runCount)) {
+ return false;
+ }
+ SkASSERT(runCount >= 7); // 7==SkRegion::kRectRegionRuns
+ // quick sanity check:
+ if (runs[runCount - 1] != SkRegion_kRunTypeSentinel ||
+ runs[runCount - 2] != SkRegion_kRunTypeSentinel) {
+ return false;
+ }
+ const int32_t* const end = runs + runCount;
+ SkIRect bounds = {0, 0, 0 ,0}; // calulated bounds
+ SkIRect rect = {0, 0, 0, 0}; // current rect
+ rect.fTop = *runs++;
+ if (rect.fTop == SkRegion_kRunTypeSentinel) {
+ return false; // no rect can contain SkRegion_kRunTypeSentinel
+ }
+ if (rect.fTop != givenBounds.fTop) {
+ return false; // Must not begin with empty span that does not contribute to bounds.
+ }
+ do {
+ --ySpanCount;
+ if (ySpanCount < 0) {
+ return false; // too many yspans
+ }
+ rect.fBottom = *runs++;
+ if (rect.fBottom == SkRegion_kRunTypeSentinel) {
+ return false;
+ }
+ if (rect.fBottom > givenBounds.fBottom) {
+ return false; // Must not end with empty span that does not contribute to bounds.
+ }
+ if (rect.fBottom <= rect.fTop) {
+ return false; // y-intervals must be ordered; rects must be non-empty.
+ }
+
+ int32_t xIntervals = *runs++;
+ SkASSERT(runs < end);
+ if (xIntervals < 0 || xIntervals > intervalCount || runs + 1 + 2 * xIntervals > end) {
+ return false;
+ }
+ intervalCount -= xIntervals;
+ bool firstInterval = true;
+ int32_t lastRight = 0; // check that x-intervals are distinct and ordered.
+ while (xIntervals-- > 0) {
+ rect.fLeft = *runs++;
+ rect.fRight = *runs++;
+ if (rect.fLeft == SkRegion_kRunTypeSentinel ||
+ rect.fRight == SkRegion_kRunTypeSentinel ||
+ rect.fLeft >= rect.fRight || // check non-empty rect
+ (!firstInterval && rect.fLeft <= lastRight)) {
+ return false;
+ }
+ lastRight = rect.fRight;
+ firstInterval = false;
+ bounds.join(rect);
+ }
+ if (*runs++ != SkRegion_kRunTypeSentinel) {
+ return false; // required check sentinal.
+ }
+ rect.fTop = rect.fBottom;
+ SkASSERT(runs < end);
+ } while (*runs != SkRegion_kRunTypeSentinel);
+ ++runs;
+ if (ySpanCount != 0 || intervalCount != 0 || givenBounds != bounds) {
+ return false;
+ }
+ SkASSERT(runs == end); // if ySpanCount && intervalCount are right, must be correct length.
+ return true;
+}
+size_t SkRegion::readFromMemory(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ SkRegion tmp;
+ int32_t count;
+
+ // Serialized Region Format:
+ // Empty:
+ // -1
+ // Simple Rect:
+ // 0 LEFT TOP RIGHT BOTTOM
+ // Complex Region:
+ // COUNT LEFT TOP RIGHT BOTTOM Y_SPAN_COUNT TOTAL_INTERVAL_COUNT [RUNS....]
+ if (!buffer.readS32(&count) || count < -1) {
+ return 0;
+ }
+ if (count >= 0) {
+ if (!buffer.read(&tmp.fBounds, sizeof(tmp.fBounds)) || tmp.fBounds.isEmpty()) {
+ return 0; // Short buffer or bad bounds for non-empty region; report failure.
+ }
+ if (count == 0) {
+ tmp.fRunHead = SkRegion_gRectRunHeadPtr;
+ } else {
+ int32_t ySpanCount, intervalCount;
+ if (!buffer.readS32(&ySpanCount) ||
+ !buffer.readS32(&intervalCount) ||
+ buffer.available() < count * sizeof(int32_t)) {
+ return 0;
+ }
+ if (!validate_run((const int32_t*)((const char*)storage + buffer.pos()), count,
+ tmp.fBounds, ySpanCount, intervalCount)) {
+ return 0; // invalid runs, don't even allocate
+ }
+ tmp.allocateRuns(count, ySpanCount, intervalCount);
+ SkASSERT(tmp.isComplex());
+ SkAssertResult(buffer.read(tmp.fRunHead->writable_runs(), count * sizeof(int32_t)));
+ }
+ }
+ SkASSERT(tmp.isValid());
+ SkASSERT(buffer.isValid());
+ this->swap(tmp);
+ return buffer.pos();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::isValid() const {
+ if (this->isEmpty()) {
+ return fBounds == SkIRect{0, 0, 0, 0};
+ }
+ if (fBounds.isEmpty()) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ return fRunHead && fRunHead->fRefCnt > 0 &&
+ validate_run(fRunHead->readonly_runs(), fRunHead->fRunCount, fBounds,
+ fRunHead->getYSpanCount(), fRunHead->getIntervalCount());
+}
+
+#ifdef SK_DEBUG
+void SkRegionPriv::Validate(const SkRegion& rgn) { SkASSERT(rgn.isValid()); }
+
+void SkRegion::dump() const {
+ if (this->isEmpty()) {
+ SkDebugf(" rgn: empty\n");
+ } else {
+ SkDebugf(" rgn: [%d %d %d %d]", fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ if (this->isComplex()) {
+ const RunType* runs = fRunHead->readonly_runs();
+ for (int i = 0; i < fRunHead->fRunCount; i++)
+ SkDebugf(" %d", runs[i]);
+ }
+ SkDebugf("\n");
+ }
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Iterator::Iterator(const SkRegion& rgn) {
+ this->reset(rgn);
+}
+
+bool SkRegion::Iterator::rewind() {
+ if (fRgn) {
+ this->reset(*fRgn);
+ return true;
+ }
+ return false;
+}
+
+void SkRegion::Iterator::reset(const SkRegion& rgn) {
+ fRgn = &rgn;
+ if (rgn.isEmpty()) {
+ fDone = true;
+ } else {
+ fDone = false;
+ if (rgn.isRect()) {
+ fRect = rgn.fBounds;
+ fRuns = nullptr;
+ } else {
+ fRuns = rgn.fRunHead->readonly_runs();
+ fRect.setLTRB(fRuns[3], fRuns[0], fRuns[4], fRuns[1]);
+ fRuns += 5;
+ // Now fRuns points to the 2nd interval (or x-sentinel)
+ }
+ }
+}
+
+void SkRegion::Iterator::next() {
+ if (fDone) {
+ return;
+ }
+
+ if (fRuns == nullptr) { // rect case
+ fDone = true;
+ return;
+ }
+
+ const RunType* runs = fRuns;
+
+ if (runs[0] < SkRegion_kRunTypeSentinel) { // valid X value
+ fRect.fLeft = runs[0];
+ fRect.fRight = runs[1];
+ runs += 2;
+ } else { // we're at the end of a line
+ runs += 1;
+ if (runs[0] < SkRegion_kRunTypeSentinel) { // valid Y value
+ int intervals = runs[1];
+ if (0 == intervals) { // empty line
+ fRect.fTop = runs[0];
+ runs += 3;
+ } else {
+ fRect.fTop = fRect.fBottom;
+ }
+
+ fRect.fBottom = runs[0];
+ assert_sentinel(runs[2], false);
+ assert_sentinel(runs[3], false);
+ fRect.fLeft = runs[2];
+ fRect.fRight = runs[3];
+ runs += 4;
+ } else { // end of rgn
+ fDone = true;
+ }
+ }
+ fRuns = runs;
+}
+
+SkRegion::Cliperator::Cliperator(const SkRegion& rgn, const SkIRect& clip)
+ : fIter(rgn), fClip(clip), fDone(true) {
+ const SkIRect& r = fIter.rect();
+
+ while (!fIter.done()) {
+ if (r.fTop >= clip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(clip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+void SkRegion::Cliperator::next() {
+ if (fDone) {
+ return;
+ }
+
+ const SkIRect& r = fIter.rect();
+
+ fDone = true;
+ fIter.next();
+ while (!fIter.done()) {
+ if (r.fTop >= fClip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(fClip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Spanerator::Spanerator(const SkRegion& rgn, int y, int left,
+ int right) {
+ SkDEBUGCODE(SkRegionPriv::Validate(rgn));
+
+ const SkIRect& r = rgn.getBounds();
+
+ fDone = true;
+ if (!rgn.isEmpty() && y >= r.fTop && y < r.fBottom &&
+ right > r.fLeft && left < r.fRight) {
+ if (rgn.isRect()) {
+ if (left < r.fLeft) {
+ left = r.fLeft;
+ }
+ if (right > r.fRight) {
+ right = r.fRight;
+ }
+ fLeft = left;
+ fRight = right;
+ fRuns = nullptr; // means we're a rect, not a rgn
+ fDone = false;
+ } else {
+ const SkRegion::RunType* runs = rgn.fRunHead->findScanline(y);
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ // runs[0..1] is to the right of the span, so we're done
+ if (runs[0] >= right) {
+ break;
+ }
+ // runs[0..1] is to the left of the span, so continue
+ if (runs[1] <= left) {
+ runs += 2;
+ continue;
+ }
+ // runs[0..1] intersects the span
+ fRuns = runs;
+ fLeft = left;
+ fRight = right;
+ fDone = false;
+ break;
+ }
+ }
+ }
+}
+
+bool SkRegion::Spanerator::next(int* left, int* right) {
+ if (fDone) {
+ return false;
+ }
+
+ if (fRuns == nullptr) { // we're a rect
+ fDone = true; // ok, now we're done
+ if (left) {
+ *left = fLeft;
+ }
+ if (right) {
+ *right = fRight;
+ }
+ return true; // this interval is legal
+ }
+
+ const SkRegion::RunType* runs = fRuns;
+
+ if (runs[0] >= fRight) {
+ fDone = true;
+ return false;
+ }
+
+ SkASSERT(runs[1] > fLeft);
+
+ if (left) {
+ *left = SkMax32(fLeft, runs[0]);
+ }
+ if (right) {
+ *right = SkMin32(fRight, runs[1]);
+ }
+ fRuns = runs + 2;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void visit_pairs(int pairCount, int y, const int32_t pairs[],
+ const std::function<void(const SkIRect&)>& visitor) {
+ for (int i = 0; i < pairCount; ++i) {
+ visitor({ pairs[0], y, pairs[1], y + 1 });
+ pairs += 2;
+ }
+}
+
+void SkRegionPriv::VisitSpans(const SkRegion& rgn,
+ const std::function<void(const SkIRect&)>& visitor) {
+ if (rgn.isEmpty()) {
+ return;
+ }
+ if (rgn.isRect()) {
+ visitor(rgn.getBounds());
+ } else {
+ const int32_t* p = rgn.fRunHead->readonly_runs();
+ int32_t top = *p++;
+ int32_t bot = *p++;
+ do {
+ int pairCount = *p++;
+ if (pairCount == 1) {
+ visitor({ p[0], top, p[1], bot });
+ p += 2;
+ } else if (pairCount > 1) {
+ // we have to loop repeated in Y, sending each interval in Y -> X order
+ for (int y = top; y < bot; ++y) {
+ visit_pairs(pairCount, y, p, visitor);
+ }
+ p += pairCount * 2;
+ }
+ assert_sentinel(*p, true);
+ p += 1; // skip sentinel
+
+ // read next bottom or sentinel
+ top = bot;
+ bot = *p++;
+ } while (!SkRegionValueIsSentinel(bot));
+ }
+}
+
diff --git a/gfx/skia/skia/src/core/SkRegionPriv.h b/gfx/skia/skia/src/core/SkRegionPriv.h
new file mode 100644
index 0000000000..2d0559ca0d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegionPriv.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRegionPriv_DEFINED
+#define SkRegionPriv_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+#include <atomic>
+#include <functional>
+
+class SkRegionPriv {
+public:
+ static constexpr int kRunTypeSentinel = 0x7FFFFFFF;
+ typedef SkRegion::RunType RunType;
+ typedef SkRegion::RunHead RunHead;
+
+ // Call the function with each span, in Y -> X ascending order.
+ // We pass a rect, but we will still ensure the span Y->X ordering, so often the height
+ // of the rect may be 1. It should never be empty.
+ static void VisitSpans(const SkRegion& rgn, const std::function<void(const SkIRect&)>&);
+
+#ifdef SK_DEBUG
+ static void Validate(const SkRegion& rgn);
+#endif
+};
+
+static constexpr int SkRegion_kRunTypeSentinel = 0x7FFFFFFF;
+
+inline bool SkRegionValueIsSentinel(int32_t value) {
+ return value == (int32_t)SkRegion_kRunTypeSentinel;
+}
+
+#define assert_sentinel(value, isSentinel) \
+ SkASSERT(SkRegionValueIsSentinel(value) == isSentinel)
+
+#ifdef SK_DEBUG
+// Given the first interval (just past the interval-count), compute the
+// interval count, by search for the x-sentinel
+//
+static int compute_intervalcount(const SkRegionPriv::RunType runs[]) {
+ const SkRegionPriv::RunType* curr = runs;
+ while (*curr < SkRegion_kRunTypeSentinel) {
+ SkASSERT(curr[0] < curr[1]);
+ SkASSERT(curr[1] < SkRegion_kRunTypeSentinel);
+ curr += 2;
+ }
+ return SkToInt((curr - runs) >> 1);
+}
+#endif
+
+struct SkRegion::RunHead {
+private:
+
+public:
+ std::atomic<int32_t> fRefCnt;
+ int32_t fRunCount;
+
+ /**
+ * Number of spans with different Y values. This does not count the initial
+ * Top value, nor does it count the final Y-Sentinel value. In the logical
+ * case of a rectangle, this would return 1, and an empty region would
+ * return 0.
+ */
+ int getYSpanCount() const {
+ return fYSpanCount;
+ }
+
+ /**
+ * Number of intervals in the entire region. This equals the number of
+ * rects that would be returned by the Iterator. In the logical case of
+ * a rect, this would return 1, and an empty region would return 0.
+ */
+ int getIntervalCount() const {
+ return fIntervalCount;
+ }
+
+ static RunHead* Alloc(int count) {
+ if (count < SkRegion::kRectRegionRuns) {
+ return nullptr;
+ }
+
+ const int64_t size = sk_64_mul(count, sizeof(RunType)) + sizeof(RunHead);
+ if (count < 0 || !SkTFitsIn<int32_t>(size)) { SK_ABORT("Invalid Size"); }
+
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt = 1;
+ head->fRunCount = count;
+ // these must be filled in later, otherwise we will be invalid
+ head->fYSpanCount = 0;
+ head->fIntervalCount = 0;
+ return head;
+ }
+
+ static RunHead* Alloc(int count, int yspancount, int intervalCount) {
+ if (yspancount <= 0 || intervalCount <= 1) {
+ return nullptr;
+ }
+
+ RunHead* head = Alloc(count);
+ if (!head) {
+ return nullptr;
+ }
+ head->fYSpanCount = yspancount;
+ head->fIntervalCount = intervalCount;
+ return head;
+ }
+
+ SkRegion::RunType* writable_runs() {
+ SkASSERT(fRefCnt == 1);
+ return (SkRegion::RunType*)(this + 1);
+ }
+
+ const SkRegion::RunType* readonly_runs() const {
+ return (const SkRegion::RunType*)(this + 1);
+ }
+
+ RunHead* ensureWritable() {
+ RunHead* writable = this;
+ if (fRefCnt > 1) {
+ // We need to alloc & copy the current region before decrease
+ // the refcount because it could be freed in the meantime.
+ writable = Alloc(fRunCount, fYSpanCount, fIntervalCount);
+ memcpy(writable->writable_runs(), this->readonly_runs(),
+ fRunCount * sizeof(RunType));
+
+ // fRefCount might have changed since we last checked.
+ // If we own the last reference at this point, we need to
+ // free the memory.
+ if (--fRefCnt == 0) {
+ sk_free(this);
+ }
+ }
+ return writable;
+ }
+
+ /**
+ * Given a scanline (including its Bottom value at runs[0]), return the next
+ * scanline. Asserts that there is one (i.e. runs[0] < Sentinel)
+ */
+ static SkRegion::RunType* SkipEntireScanline(const SkRegion::RunType runs[]) {
+ // we are not the Y Sentinel
+ SkASSERT(runs[0] < SkRegion_kRunTypeSentinel);
+
+ const int intervals = runs[1];
+ SkASSERT(runs[2 + intervals * 2] == SkRegion_kRunTypeSentinel);
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(&runs[2]);
+ SkASSERT(n == intervals);
+ }
+#endif
+
+ // skip the entire line [B N [L R] S]
+ runs += 1 + 1 + intervals * 2 + 1;
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+
+ /**
+ * Return the scanline that contains the Y value. This requires that the Y
+ * value is already known to be contained within the bounds of the region,
+ * and so this routine never returns nullptr.
+ *
+ * It returns the beginning of the scanline, starting with its Bottom value.
+ */
+ SkRegion::RunType* findScanline(int y) const {
+ const RunType* runs = this->readonly_runs();
+
+ // if the top-check fails, we didn't do a quick check on the bounds
+ SkASSERT(y >= runs[0]);
+
+ runs += 1; // skip top-Y
+ for (;;) {
+ int bottom = runs[0];
+ // If we hit this, we've walked off the region, and our bounds check
+ // failed.
+ SkASSERT(bottom < SkRegion_kRunTypeSentinel);
+ if (y < bottom) {
+ break;
+ }
+ runs = SkipEntireScanline(runs);
+ }
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+ // Copy src runs into us, computing interval counts and bounds along the way
+ void computeRunBounds(SkIRect* bounds) {
+ RunType* runs = this->writable_runs();
+ bounds->fTop = *runs++;
+
+ int bot;
+ int ySpanCount = 0;
+ int intervalCount = 0;
+ int left = SK_MaxS32;
+ int rite = SK_MinS32;
+
+ do {
+ bot = *runs++;
+ SkASSERT(bot < SkRegion_kRunTypeSentinel);
+ ySpanCount += 1;
+
+ const int intervals = *runs++;
+ SkASSERT(intervals >= 0);
+ SkASSERT(intervals < SkRegion_kRunTypeSentinel);
+
+ if (intervals > 0) {
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(runs);
+ SkASSERT(n == intervals);
+ }
+#endif
+ RunType L = runs[0];
+ SkASSERT(L < SkRegion_kRunTypeSentinel);
+ if (left > L) {
+ left = L;
+ }
+
+ runs += intervals * 2;
+ RunType R = runs[-1];
+ SkASSERT(R < SkRegion_kRunTypeSentinel);
+ if (rite < R) {
+ rite = R;
+ }
+
+ intervalCount += intervals;
+ }
+ SkASSERT(SkRegion_kRunTypeSentinel == *runs);
+ runs += 1; // skip x-sentinel
+
+ // test Y-sentinel
+ } while (SkRegion_kRunTypeSentinel > *runs);
+
+#ifdef SK_DEBUG
+ // +1 to skip the last Y-sentinel
+ int runCount = SkToInt(runs - this->writable_runs() + 1);
+ SkASSERT(runCount == fRunCount);
+#endif
+
+ fYSpanCount = ySpanCount;
+ fIntervalCount = intervalCount;
+
+ bounds->fLeft = left;
+ bounds->fRight = rite;
+ bounds->fBottom = bot;
+ }
+
+private:
+ int32_t fYSpanCount;
+ int32_t fIntervalCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion_path.cpp b/gfx/skia/skia/src/core/SkRegion_path.cpp
new file mode 100644
index 0000000000..ff07c466c5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion_path.cpp
@@ -0,0 +1,549 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRegionPriv.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkTSort.h"
+
+// The rgnbuilder caller *seems* to pass short counts, possible often seens early failure, so
+// we may not want to promote this to a "std" routine just yet.
+static bool sk_memeq32(const int32_t* SK_RESTRICT a, const int32_t* SK_RESTRICT b, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+class SkRgnBuilder : public SkBlitter {
+public:
+ SkRgnBuilder();
+ ~SkRgnBuilder() override;
+
+ // returns true if it could allocate the working storage needed
+ bool init(int maxHeight, int maxTransitions, bool pathIsInverse);
+
+ void done() {
+ if (fCurrScanline != nullptr) {
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+ if (!this->collapsWithPrev()) { // flush the last line
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ }
+ }
+
+ int computeRunCount() const;
+ void copyToRect(SkIRect*) const;
+ void copyToRgn(SkRegion::RunType runs[]) const;
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH not implemented");
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("SkRgnBuilder: Top = %d\n", fTop);
+ const Scanline* line = (Scanline*)fStorage;
+ while (line < fCurrScanline) {
+ SkDebugf("SkRgnBuilder::Scanline: LastY=%d, fXCount=%d", line->fLastY, line->fXCount);
+ for (int i = 0; i < line->fXCount; i++) {
+ SkDebugf(" %d", line->firstX()[i]);
+ }
+ SkDebugf("\n");
+
+ line = line->nextScanline();
+ }
+ }
+#endif
+private:
+ /*
+ * Scanline mimics a row in the region, nearly. A row in a region is:
+ * [Bottom IntervalCount [L R]... Sentinel]
+ * while a Scanline is
+ * [LastY XCount [L R]... uninitialized]
+ * The two are the same length (which is good), but we have to transmute
+ * the scanline a little when we convert it to a region-row.
+ *
+ * Potentially we could recode this to exactly match the row format, in
+ * which case copyToRgn() could be a single memcpy. Not sure that is worth
+ * the effort.
+ */
+ struct Scanline {
+ SkRegion::RunType fLastY;
+ SkRegion::RunType fXCount;
+
+ SkRegion::RunType* firstX() const { return (SkRegion::RunType*)(this + 1); }
+ Scanline* nextScanline() const {
+ // add final +1 for the x-sentinel
+ return (Scanline*)((SkRegion::RunType*)(this + 1) + fXCount + 1);
+ }
+ };
+ SkRegion::RunType* fStorage;
+ Scanline* fCurrScanline;
+ Scanline* fPrevScanline;
+ // points at next avialable x[] in fCurrScanline
+ SkRegion::RunType* fCurrXPtr;
+ SkRegion::RunType fTop; // first Y value
+
+ int fStorageCount;
+
+ bool collapsWithPrev() {
+ if (fPrevScanline != nullptr &&
+ fPrevScanline->fLastY + 1 == fCurrScanline->fLastY &&
+ fPrevScanline->fXCount == fCurrScanline->fXCount &&
+ sk_memeq32(fPrevScanline->firstX(), fCurrScanline->firstX(), fCurrScanline->fXCount))
+ {
+ // update the height of fPrevScanline
+ fPrevScanline->fLastY = fCurrScanline->fLastY;
+ return true;
+ }
+ return false;
+ }
+};
+
+SkRgnBuilder::SkRgnBuilder()
+ : fStorage(nullptr) {
+}
+
+SkRgnBuilder::~SkRgnBuilder() {
+ sk_free(fStorage);
+}
+
+bool SkRgnBuilder::init(int maxHeight, int maxTransitions, bool pathIsInverse) {
+ if ((maxHeight | maxTransitions) < 0) {
+ return false;
+ }
+
+ SkSafeMath safe;
+
+ if (pathIsInverse) {
+ // allow for additional X transitions to "invert" each scanline
+ // [ L' ... normal transitions ... R' ]
+ //
+ maxTransitions = safe.addInt(maxTransitions, 2);
+ }
+
+ // compute the count with +1 and +3 slop for the working buffer
+ size_t count = safe.mul(safe.addInt(maxHeight, 1), safe.addInt(3, maxTransitions));
+
+ if (pathIsInverse) {
+ // allow for two "empty" rows for the top and bottom
+ // [ Y, 1, L, R, S] == 5 (*2 for top and bottom)
+ count = safe.add(count, 10);
+ }
+
+ if (!safe || !SkTFitsIn<int32_t>(count)) {
+ return false;
+ }
+ fStorageCount = SkToS32(count);
+
+ fStorage = (SkRegion::RunType*)sk_malloc_canfail(fStorageCount, sizeof(SkRegion::RunType));
+ if (nullptr == fStorage) {
+ return false;
+ }
+
+ fCurrScanline = nullptr; // signal empty collection
+ fPrevScanline = nullptr; // signal first scanline
+ return true;
+}
+
+void SkRgnBuilder::blitH(int x, int y, int width) {
+ if (fCurrScanline == nullptr) { // first time
+ fTop = (SkRegion::RunType)(y);
+ fCurrScanline = (Scanline*)fStorage;
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ } else {
+ SkASSERT(y >= fCurrScanline->fLastY);
+
+ if (y > fCurrScanline->fLastY) {
+ // if we get here, we're done with fCurrScanline
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+
+ int prevLastY = fCurrScanline->fLastY;
+ if (!this->collapsWithPrev()) {
+ fPrevScanline = fCurrScanline;
+ fCurrScanline = fCurrScanline->nextScanline();
+
+ }
+ if (y - 1 > prevLastY) { // insert empty run
+ fCurrScanline->fLastY = (SkRegion::RunType)(y - 1);
+ fCurrScanline->fXCount = 0;
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ // setup for the new curr line
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ }
+ }
+ // check if we should extend the current run, or add a new one
+ if (fCurrXPtr > fCurrScanline->firstX() && fCurrXPtr[-1] == x) {
+ fCurrXPtr[-1] = (SkRegion::RunType)(x + width);
+ } else {
+ fCurrXPtr[0] = (SkRegion::RunType)(x);
+ fCurrXPtr[1] = (SkRegion::RunType)(x + width);
+ fCurrXPtr += 2;
+ }
+ SkASSERT(fCurrXPtr - fStorage < fStorageCount);
+}
+
+int SkRgnBuilder::computeRunCount() const {
+ if (fCurrScanline == nullptr) {
+ return 0;
+ }
+
+ const SkRegion::RunType* line = fStorage;
+ const SkRegion::RunType* stop = (const SkRegion::RunType*)fCurrScanline;
+
+ return 2 + (int)(stop - line);
+}
+
+void SkRgnBuilder::copyToRect(SkIRect* r) const {
+ SkASSERT(fCurrScanline != nullptr);
+ // A rect's scanline is [bottom intervals left right sentinel] == 5
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage == 5);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ SkASSERT(line->fXCount == 2);
+
+ r->setLTRB(line->firstX()[0], fTop, line->firstX()[1], line->fLastY + 1);
+}
+
+void SkRgnBuilder::copyToRgn(SkRegion::RunType runs[]) const {
+ SkASSERT(fCurrScanline != nullptr);
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage > 4);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ const Scanline* stop = fCurrScanline;
+
+ *runs++ = fTop;
+ do {
+ *runs++ = (SkRegion::RunType)(line->fLastY + 1);
+ int count = line->fXCount;
+ *runs++ = count >> 1; // intervalCount
+ if (count) {
+ memcpy(runs, line->firstX(), count * sizeof(SkRegion::RunType));
+ runs += count;
+ }
+ *runs++ = SkRegion_kRunTypeSentinel;
+ line = line->nextScanline();
+ } while (line < stop);
+ SkASSERT(line == stop);
+ *runs = SkRegion_kRunTypeSentinel;
+}
+
+static unsigned verb_to_initial_last_index(unsigned verb) {
+ static const uint8_t gPathVerbToInitialLastIndex[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_Verb
+ 2, // kConic_Verb
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < SK_ARRAY_COUNT(gPathVerbToInitialLastIndex));
+ return gPathVerbToInitialLastIndex[verb];
+}
+
+static unsigned verb_to_max_edges(unsigned verb) {
+ static const uint8_t gPathVerbToMaxEdges[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_VerbB
+ 2, // kConic_VerbB
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < SK_ARRAY_COUNT(gPathVerbToMaxEdges));
+ return gPathVerbToMaxEdges[verb];
+}
+
+// If returns 0, ignore itop and ibot
+static int count_path_runtype_values(const SkPath& path, int* itop, int* ibot) {
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ int maxEdges = 0;
+ SkScalar top = SkIntToScalar(SK_MaxS16);
+ SkScalar bot = SkIntToScalar(SK_MinS16);
+
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ maxEdges += verb_to_max_edges(verb);
+
+ int lastIndex = verb_to_initial_last_index(verb);
+ if (lastIndex > 0) {
+ for (int i = 1; i <= lastIndex; i++) {
+ if (top > pts[i].fY) {
+ top = pts[i].fY;
+ } else if (bot < pts[i].fY) {
+ bot = pts[i].fY;
+ }
+ }
+ } else if (SkPath::kMove_Verb == verb) {
+ if (top > pts[0].fY) {
+ top = pts[0].fY;
+ } else if (bot < pts[0].fY) {
+ bot = pts[0].fY;
+ }
+ }
+ }
+ if (0 == maxEdges) {
+ return 0; // we have only moves+closes
+ }
+
+ SkASSERT(top <= bot);
+ *itop = SkScalarRoundToInt(top);
+ *ibot = SkScalarRoundToInt(bot);
+ return maxEdges;
+}
+
+static bool check_inverse_on_empty_return(SkRegion* dst, const SkPath& path, const SkRegion& clip) {
+ if (path.isInverseFillType()) {
+ return dst->set(clip);
+ } else {
+ return dst->setEmpty();
+ }
+}
+
+bool SkRegion::setPath(const SkPath& path, const SkRegion& clip) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (clip.isEmpty() || !path.isFinite()) {
+ return this->setEmpty();
+ }
+
+ if (path.isEmpty()) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ // Our builder is very fragile, and can't be called with spans/rects out of Y->X order.
+ // To ensure this, we only "fill" clipped to a rect (the clip's bounds), and if the
+ // clip is more complex than that, we just post-intersect the result with the clip.
+ if (clip.isComplex()) {
+ if (!this->setPath(path, SkRegion(clip.getBounds()))) {
+ return false;
+ }
+ return this->op(clip, kIntersect_Op);
+ }
+
+ // compute worst-case rgn-size for the path
+ int pathTop, pathBot;
+ int pathTransitions = count_path_runtype_values(path, &pathTop, &pathBot);
+ if (0 == pathTransitions) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ int clipTop, clipBot;
+ int clipTransitions = clip.count_runtype_values(&clipTop, &clipBot);
+
+ int top = SkMax32(pathTop, clipTop);
+ int bot = SkMin32(pathBot, clipBot);
+ if (top >= bot) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ SkRgnBuilder builder;
+
+ if (!builder.init(bot - top,
+ SkMax32(pathTransitions, clipTransitions),
+ path.isInverseFillType())) {
+ // can't allocate working space, so return false
+ return this->setEmpty();
+ }
+
+ SkScan::FillPath(path, clip, &builder);
+ builder.done();
+
+ int count = builder.computeRunCount();
+ if (count == 0) {
+ return this->setEmpty();
+ } else if (count == kRectRegionRuns) {
+ builder.copyToRect(&fBounds);
+ this->setRect(fBounds);
+ } else {
+ SkRegion tmp;
+
+ tmp.fRunHead = RunHead::Alloc(count);
+ builder.copyToRgn(tmp.fRunHead->writable_runs());
+ tmp.fRunHead->computeRunBounds(&tmp.fBounds);
+ this->swap(tmp);
+ }
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct Edge {
+ enum {
+ kY0Link = 0x01,
+ kY1Link = 0x02,
+
+ kCompleteLink = (kY0Link | kY1Link)
+ };
+
+ SkRegionPriv::RunType fX;
+ SkRegionPriv::RunType fY0, fY1;
+ uint8_t fFlags;
+ Edge* fNext;
+
+ void set(int x, int y0, int y1) {
+ SkASSERT(y0 != y1);
+
+ fX = (SkRegionPriv::RunType)(x);
+ fY0 = (SkRegionPriv::RunType)(y0);
+ fY1 = (SkRegionPriv::RunType)(y1);
+ fFlags = 0;
+ SkDEBUGCODE(fNext = nullptr;)
+ }
+
+ int top() const {
+ return SkMin32(fY0, fY1);
+ }
+};
+
+static void find_link(Edge* base, Edge* stop) {
+ SkASSERT(base < stop);
+
+ if (base->fFlags == Edge::kCompleteLink) {
+ SkASSERT(base->fNext);
+ return;
+ }
+
+ SkASSERT(base + 1 < stop);
+
+ int y0 = base->fY0;
+ int y1 = base->fY1;
+
+ Edge* e = base;
+ if ((base->fFlags & Edge::kY0Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY1Link) == 0 && y0 == e->fY1) {
+ SkASSERT(nullptr == e->fNext);
+ e->fNext = base;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY1Link);
+ break;
+ }
+ }
+ }
+
+ e = base;
+ if ((base->fFlags & Edge::kY1Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY0Link) == 0 && y1 == e->fY0) {
+ SkASSERT(nullptr == base->fNext);
+ base->fNext = e;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY0Link);
+ break;
+ }
+ }
+ }
+
+ base->fFlags = Edge::kCompleteLink;
+}
+
+static int extract_path(Edge* edge, Edge* stop, SkPath* path) {
+ while (0 == edge->fFlags) {
+ edge++; // skip over "used" edges
+ }
+
+ SkASSERT(edge < stop);
+
+ Edge* base = edge;
+ Edge* prev = edge;
+ edge = edge->fNext;
+ SkASSERT(edge != base);
+
+ int count = 1;
+ path->moveTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY0));
+ prev->fFlags = 0;
+ do {
+ if (prev->fX != edge->fX || prev->fY1 != edge->fY0) { // skip collinear
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->lineTo(SkIntToScalar(edge->fX), SkIntToScalar(edge->fY0)); // H
+ }
+ prev = edge;
+ edge = edge->fNext;
+ count += 1;
+ prev->fFlags = 0;
+ } while (edge != base);
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->close();
+ return count;
+}
+
+struct EdgeLT {
+ bool operator()(const Edge& a, const Edge& b) const {
+ return (a.fX == b.fX) ? a.top() < b.top() : a.fX < b.fX;
+ }
+};
+
+bool SkRegion::getBoundaryPath(SkPath* path) const {
+ // path could safely be nullptr if we're empty, but the caller shouldn't
+ // *know* that
+ SkASSERT(path);
+
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const SkIRect& bounds = this->getBounds();
+
+ if (this->isRect()) {
+ SkRect r;
+ r.set(bounds); // this converts the ints to scalars
+ path->addRect(r);
+ return true;
+ }
+
+ SkRegion::Iterator iter(*this);
+ SkTDArray<Edge> edges;
+
+ for (const SkIRect& r = iter.rect(); !iter.done(); iter.next()) {
+ Edge* edge = edges.append(2);
+ edge[0].set(r.fLeft, r.fBottom, r.fTop);
+ edge[1].set(r.fRight, r.fTop, r.fBottom);
+ }
+
+ int count = edges.count();
+ Edge* start = edges.begin();
+ Edge* stop = start + count;
+ SkTQSort<Edge>(start, stop - 1, EdgeLT());
+
+ Edge* e;
+ for (e = start; e != stop; e++) {
+ find_link(e, stop);
+ }
+
+#ifdef SK_DEBUG
+ for (e = start; e != stop; e++) {
+ SkASSERT(e->fNext != nullptr);
+ SkASSERT(e->fFlags == Edge::kCompleteLink);
+ }
+#endif
+
+ path->incReserve(count << 1);
+ do {
+ SkASSERT(count > 1);
+ count -= extract_path(start, stop, path);
+ } while (count > 0);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkRemoteGlyphCache.cpp b/gfx/skia/skia/src/core/SkRemoteGlyphCache.cpp
new file mode 100644
index 0000000000..aea005506c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRemoteGlyphCache.cpp
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRemoteGlyphCache.h"
+
+#include <iterator>
+#include <memory>
+#include <new>
+#include <string>
+#include <tuple>
+
+#include "src/core/SkDevice.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkTypeface_remote.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/text/GrTextContext.h"
+#endif
+
+static SkDescriptor* auto_descriptor_from_desc(const SkDescriptor* source_desc,
+ SkFontID font_id,
+ SkAutoDescriptor* ad) {
+ ad->reset(source_desc->getLength());
+ auto* desc = ad->getDesc();
+ desc->init();
+
+ // Rec.
+ {
+ uint32_t size;
+ auto ptr = source_desc->findEntry(kRec_SkDescriptorTag, &size);
+ SkScalerContextRec rec;
+ memcpy(&rec, ptr, size);
+ rec.fFontID = font_id;
+ desc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+ }
+
+ // Effects.
+ {
+ uint32_t size;
+ auto ptr = source_desc->findEntry(kEffects_SkDescriptorTag, &size);
+ if (ptr) { desc->addEntry(kEffects_SkDescriptorTag, size, ptr); }
+ }
+
+ desc->computeChecksum();
+ return desc;
+}
+
+static const SkDescriptor* create_descriptor(
+ const SkPaint& paint, const SkFont& font, const SkMatrix& m,
+ const SkSurfaceProps& props, SkScalerContextFlags flags,
+ SkAutoDescriptor* ad, SkScalerContextEffects* effects) {
+ SkScalerContextRec rec;
+ SkScalerContext::MakeRecAndEffects(font, paint, props, flags, m, &rec, effects);
+ return SkScalerContext::AutoDescriptorGivenRecAndEffects(rec, *effects, ad);
+}
+
+// -- Serializer -----------------------------------------------------------------------------------
+size_t pad(size_t size, size_t alignment) { return (size + (alignment - 1)) & ~(alignment - 1); }
+
+// Alignment between x86 and x64 differs for some types, in particular
+// int64_t and doubles have 4 and 8-byte alignment, respectively.
+// Be consistent even when writing and reading across different architectures.
+template<typename T>
+size_t serialization_alignment() {
+ return sizeof(T) == 8 ? 8 : alignof(T);
+}
+
+class Serializer {
+public:
+ Serializer(std::vector<uint8_t>* buffer) : fBuffer{buffer} { }
+
+ template <typename T, typename... Args>
+ T* emplace(Args&&... args) {
+ auto result = allocate(sizeof(T), serialization_alignment<T>());
+ return new (result) T{std::forward<Args>(args)...};
+ }
+
+ template <typename T>
+ void write(const T& data) {
+ T* result = (T*)allocate(sizeof(T), serialization_alignment<T>());
+ memcpy(result, &data, sizeof(T));
+ }
+
+ template <typename T>
+ T* allocate() {
+ T* result = (T*)allocate(sizeof(T), serialization_alignment<T>());
+ return result;
+ }
+
+ void writeDescriptor(const SkDescriptor& desc) {
+ write(desc.getLength());
+ auto result = allocate(desc.getLength(), alignof(SkDescriptor));
+ memcpy(result, &desc, desc.getLength());
+ }
+
+ void* allocate(size_t size, size_t alignment) {
+ size_t aligned = pad(fBuffer->size(), alignment);
+ fBuffer->resize(aligned + size);
+ return &(*fBuffer)[aligned];
+ }
+
+private:
+ std::vector<uint8_t>* fBuffer;
+};
+
+// -- Deserializer -------------------------------------------------------------------------------
+// Note that the Deserializer is reading untrusted data, we need to guard against invalid data.
+class Deserializer {
+public:
+ Deserializer(const volatile char* memory, size_t memorySize)
+ : fMemory(memory), fMemorySize(memorySize) {}
+
+ template <typename T>
+ bool read(T* val) {
+ auto* result = this->ensureAtLeast(sizeof(T), serialization_alignment<T>());
+ if (!result) return false;
+
+ memcpy(val, const_cast<const char*>(result), sizeof(T));
+ return true;
+ }
+
+ bool readDescriptor(SkAutoDescriptor* ad) {
+ uint32_t descLength = 0u;
+ if (!read<uint32_t>(&descLength)) return false;
+ if (descLength < sizeof(SkDescriptor)) return false;
+ if (descLength != SkAlign4(descLength)) return false;
+
+ auto* result = this->ensureAtLeast(descLength, alignof(SkDescriptor));
+ if (!result) return false;
+
+ ad->reset(descLength);
+ memcpy(ad->getDesc(), const_cast<const char*>(result), descLength);
+
+ if (ad->getDesc()->getLength() > descLength) return false;
+ return ad->getDesc()->isValid();
+ }
+
+ const volatile void* read(size_t size, size_t alignment) {
+ return this->ensureAtLeast(size, alignment);
+ }
+
+ size_t bytesRead() const { return fBytesRead; }
+
+private:
+ const volatile char* ensureAtLeast(size_t size, size_t alignment) {
+ size_t padded = pad(fBytesRead, alignment);
+
+ // Not enough data.
+ if (padded > fMemorySize) return nullptr;
+ if (size > fMemorySize - padded) return nullptr;
+
+ auto* result = fMemory + padded;
+ fBytesRead = padded + size;
+ return result;
+ }
+
+ // Note that we read each piece of memory only once to guard against TOCTOU violations.
+ const volatile char* fMemory;
+ size_t fMemorySize;
+ size_t fBytesRead = 0u;
+};
+
+// Paths use a SkWriter32 which requires 4 byte alignment.
+static const size_t kPathAlignment = 4u;
+
+// -- StrikeSpec -----------------------------------------------------------------------------------
+struct StrikeSpec {
+ StrikeSpec() = default;
+ StrikeSpec(SkFontID typefaceID_, SkDiscardableHandleId discardableHandleId_)
+ : typefaceID{typefaceID_}, discardableHandleId(discardableHandleId_) {}
+ SkFontID typefaceID = 0u;
+ SkDiscardableHandleId discardableHandleId = 0u;
+ /* desc */
+ /* n X (glyphs ids) */
+};
+
+// -- RemoteStrike ----------------------------------------------------------------------------
+class SkStrikeServer::RemoteStrike : public SkStrikeForGPU {
+public:
+ // N.B. RemoteStrike is not valid until ensureScalerContext is called.
+ RemoteStrike(const SkDescriptor& descriptor,
+ std::unique_ptr<SkScalerContext> context,
+ SkDiscardableHandleId discardableHandleId);
+ ~RemoteStrike() override;
+
+ void addGlyph(SkPackedGlyphID, bool asPath);
+ void writePendingGlyphs(Serializer* serializer);
+ SkDiscardableHandleId discardableHandleId() const { return fDiscardableHandleId; }
+
+ const SkDescriptor& getDescriptor() const override {
+ return *fDescriptor.getDesc();
+ }
+
+ void setTypefaceAndEffects(const SkTypeface* typeface, SkScalerContextEffects effects);
+
+ const SkGlyphPositionRoundingSpec& roundingSpec() const override {
+ return fRoundingSpec;
+ }
+
+ SkSpan<const SkGlyphPos>
+ prepareForDrawingRemoveEmpty(
+ const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[], size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) override;
+
+ void onAboutToExitScope() override {}
+
+ bool hasPendingGlyphs() const {
+ return !fPendingGlyphImages.empty() || !fPendingGlyphPaths.empty();
+ }
+
+ void resetScalerContext();
+
+private:
+ void writeGlyphPath(const SkPackedGlyphID& glyphID, Serializer* serializer) const;
+
+ void ensureScalerContext();
+
+ // The set of glyphs cached on the remote client.
+ SkTHashSet<SkPackedGlyphID> fCachedGlyphImages;
+ SkTHashSet<SkPackedGlyphID> fCachedGlyphPaths;
+
+ // The set of glyphs which has not yet been serialized and sent to the
+ // remote client.
+ std::vector<SkPackedGlyphID> fPendingGlyphImages;
+ std::vector<SkPackedGlyphID> fPendingGlyphPaths;
+
+ const SkAutoDescriptor fDescriptor;
+
+ const SkDiscardableHandleId fDiscardableHandleId;
+
+ const SkGlyphPositionRoundingSpec fRoundingSpec;
+
+ // The context built using fDescriptor
+ std::unique_ptr<SkScalerContext> fContext;
+
+ // These fields are set every time getOrCreateCache. This allows the code to maintain the
+ // fContext as lazy as possible.
+ const SkTypeface* fTypeface{nullptr};
+ SkScalerContextEffects fEffects;
+
+ bool fHaveSentFontMetrics{false};
+
+ class GlyphMapHashTraits {
+ public:
+ static SkPackedGlyphID GetKey(const SkGlyph* glyph) {
+ return glyph->getPackedID();
+ }
+ static uint32_t Hash(SkPackedGlyphID glyphId) {
+ return glyphId.hash();
+ }
+ };
+
+ // FallbackTextHelper cases require glyph metrics when analyzing a glyph run, in which case
+ // we cache them here.
+ SkTHashTable<SkGlyph*, SkPackedGlyphID, GlyphMapHashTraits> fGlyphMap;
+
+ SkArenaAlloc fAlloc{256};
+};
+
+SkStrikeServer::RemoteStrike::RemoteStrike(
+ const SkDescriptor& descriptor,
+ std::unique_ptr<SkScalerContext> context,
+ uint32_t discardableHandleId)
+ : fDescriptor{descriptor}
+ , fDiscardableHandleId(discardableHandleId)
+ , fRoundingSpec{context->isSubpixel(), context->computeAxisAlignmentForHText()}
+ // N.B. context must come last because it is used above.
+ , fContext{std::move(context)} {
+ SkASSERT(fDescriptor.getDesc() != nullptr);
+ SkASSERT(fContext != nullptr);
+}
+
+SkStrikeServer::RemoteStrike::~RemoteStrike() = default;
+
+void SkStrikeServer::RemoteStrike::addGlyph(SkPackedGlyphID glyph, bool asPath) {
+ auto* cache = asPath ? &fCachedGlyphPaths : &fCachedGlyphImages;
+ auto* pending = asPath ? &fPendingGlyphPaths : &fPendingGlyphImages;
+
+ // Already cached.
+ if (cache->contains(glyph)) {
+ return;
+ }
+
+ // A glyph is going to be sent. Make sure we have a scaler context to send it.
+ this->ensureScalerContext();
+
+ // Serialize and cache. Also create the scalar context to use when serializing
+ // this glyph.
+ cache->add(glyph);
+ pending->push_back(glyph);
+}
+
+size_t SkStrikeServer::MapOps::operator()(const SkDescriptor* key) const {
+ return key->getChecksum();
+}
+
+bool SkStrikeServer::MapOps::operator()(const SkDescriptor* lhs, const SkDescriptor* rhs) const {
+ return *lhs == *rhs;
+}
+
+
+// -- TrackLayerDevice -----------------------------------------------------------------------------
+class SkTextBlobCacheDiffCanvas::TrackLayerDevice final : public SkNoPixelsDevice {
+public:
+ TrackLayerDevice(
+ const SkIRect& bounds, const SkSurfaceProps& props, SkStrikeServer* server,
+ sk_sp<SkColorSpace> colorSpace, bool DFTSupport)
+ : SkNoPixelsDevice(bounds, props, std::move(colorSpace))
+ , fStrikeServer(server)
+ , fDFTSupport(DFTSupport)
+ , fPainter{props, kUnknown_SkColorType, imageInfo().colorSpace(), fStrikeServer} {
+ SkASSERT(fStrikeServer != nullptr);
+ }
+
+ SkBaseDevice* onCreateDevice(const CreateInfo& cinfo, const SkPaint*) override {
+ const SkSurfaceProps surfaceProps(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+ return new TrackLayerDevice(this->getGlobalBounds(), surfaceProps, fStrikeServer,
+ cinfo.fInfo.refColorSpace(), fDFTSupport);
+ }
+
+protected:
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override {
+#if SK_SUPPORT_GPU
+ GrTextContext::Options options;
+ GrTextContext::SanitizeOptions(&options);
+
+ fPainter.processGlyphRunList(glyphRunList,
+ this->ctm(),
+ this->surfaceProps(),
+ fDFTSupport,
+ options,
+ nullptr);
+#endif // SK_SUPPORT_GPU
+ }
+
+private:
+ SkStrikeServer* const fStrikeServer;
+ const bool fDFTSupport{false};
+ SkGlyphRunListPainter fPainter;
+};
+
+// -- SkTextBlobCacheDiffCanvas -------------------------------------------------------------------
+SkTextBlobCacheDiffCanvas::SkTextBlobCacheDiffCanvas(int width, int height,
+ const SkSurfaceProps& props,
+ SkStrikeServer* strikeServer,
+ bool DFTSupport)
+ : SkTextBlobCacheDiffCanvas{width, height, props, strikeServer, nullptr, DFTSupport} { }
+
+SkTextBlobCacheDiffCanvas::SkTextBlobCacheDiffCanvas(int width, int height,
+ const SkSurfaceProps& props,
+ SkStrikeServer* strikeServer,
+ sk_sp<SkColorSpace> colorSpace,
+ bool DFTSupport)
+ : SkNoDrawCanvas{sk_make_sp<TrackLayerDevice>(SkIRect::MakeWH(width, height),
+ props,
+ strikeServer,
+ std::move(colorSpace),
+ DFTSupport)} { }
+
+SkTextBlobCacheDiffCanvas::~SkTextBlobCacheDiffCanvas() = default;
+
+SkCanvas::SaveLayerStrategy SkTextBlobCacheDiffCanvas::getSaveLayerStrategy(
+ const SaveLayerRec& rec) {
+ return kFullLayer_SaveLayerStrategy;
+}
+
+bool SkTextBlobCacheDiffCanvas::onDoSaveBehind(const SkRect*) {
+ return false;
+}
+
+void SkTextBlobCacheDiffCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkCanvas::onDrawTextBlob(blob, x, y, paint);
+}
+
+// -- WireTypeface ---------------------------------------------------------------------------------
+struct WireTypeface {
+ WireTypeface() = default;
+ WireTypeface(SkFontID typeface_id, int glyph_count, SkFontStyle style, bool is_fixed)
+ : typefaceID(typeface_id), glyphCount(glyph_count), style(style), isFixed(is_fixed) {}
+
+ SkFontID typefaceID{0};
+ int glyphCount{0};
+ SkFontStyle style;
+ bool isFixed{false};
+};
+
+// SkStrikeServer ----------------------------------------------------------------------------------
+SkStrikeServer::SkStrikeServer(DiscardableHandleManager* discardableHandleManager)
+ : fDiscardableHandleManager(discardableHandleManager) {
+ SkASSERT(fDiscardableHandleManager);
+}
+
+SkStrikeServer::~SkStrikeServer() = default;
+
+sk_sp<SkData> SkStrikeServer::serializeTypeface(SkTypeface* tf) {
+ auto* data = fSerializedTypefaces.find(SkTypeface::UniqueID(tf));
+ if (data) {
+ return *data;
+ }
+
+ WireTypeface wire(SkTypeface::UniqueID(tf), tf->countGlyphs(), tf->fontStyle(),
+ tf->isFixedPitch());
+ data = fSerializedTypefaces.set(SkTypeface::UniqueID(tf),
+ SkData::MakeWithCopy(&wire, sizeof(wire)));
+ return *data;
+}
+
+void SkStrikeServer::writeStrikeData(std::vector<uint8_t>* memory) {
+ size_t strikesToSend = 0;
+ fRemoteStrikesToSend.foreach ([&strikesToSend](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strikesToSend++;
+ } else {
+ strike->resetScalerContext();
+ }
+ });
+
+ if (strikesToSend == 0 && fTypefacesToSend.empty()) {
+ fRemoteStrikesToSend.reset();
+ return;
+ }
+
+ Serializer serializer(memory);
+ serializer.emplace<uint64_t>(fTypefacesToSend.size());
+ for (const auto& tf : fTypefacesToSend) {
+ serializer.write<WireTypeface>(tf);
+ }
+ fTypefacesToSend.clear();
+
+ serializer.emplace<uint64_t>(SkTo<uint64_t>(strikesToSend));
+ fRemoteStrikesToSend.foreach (
+#ifdef SK_DEBUG
+ [&serializer, this](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strike->writePendingGlyphs(&serializer);
+ strike->resetScalerContext();
+ }
+ auto it = fDescToRemoteStrike.find(&strike->getDescriptor());
+ SkASSERT(it != fDescToRemoteStrike.end());
+ SkASSERT(it->second.get() == strike);
+ }
+
+#else
+ [&serializer](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strike->writePendingGlyphs(&serializer);
+ strike->resetScalerContext();
+ }
+ }
+#endif
+ );
+ fRemoteStrikesToSend.reset();
+}
+
+SkStrikeServer::RemoteStrike* SkStrikeServer::getOrCreateCache(
+ const SkPaint& paint,
+ const SkFont& font,
+ const SkSurfaceProps& props,
+ const SkMatrix& matrix,
+ SkScalerContextFlags flags,
+ SkScalerContextEffects* effects) {
+ SkAutoDescriptor descStorage;
+ auto desc = create_descriptor(paint, font, matrix, props, flags, &descStorage, effects);
+
+ return this->getOrCreateCache(*desc, *font.getTypefaceOrDefault(), *effects);
+}
+
+SkScopedStrikeForGPU SkStrikeServer::findOrCreateScopedStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) {
+ return SkScopedStrikeForGPU{this->getOrCreateCache(desc, typeface, effects)};
+}
+
+void SkStrikeServer::AddGlyphForTesting(
+ RemoteStrike* cache, SkPackedGlyphID glyphID, bool asPath) {
+ cache->addGlyph(glyphID, asPath);
+}
+
+void SkStrikeServer::checkForDeletedEntries() {
+ auto it = fDescToRemoteStrike.begin();
+ while (fDescToRemoteStrike.size() > fMaxEntriesInDescriptorMap &&
+ it != fDescToRemoteStrike.end()) {
+ RemoteStrike* strike = it->second.get();
+ if (fDiscardableHandleManager->isHandleDeleted(strike->discardableHandleId())) {
+ // If we are removing the strike, we better not be trying to send it at the same time.
+ SkASSERT(!fRemoteStrikesToSend.contains(strike));
+ it = fDescToRemoteStrike.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+SkStrikeServer::RemoteStrike* SkStrikeServer::getOrCreateCache(
+ const SkDescriptor& desc, const SkTypeface& typeface, SkScalerContextEffects effects) {
+
+ // In cases where tracing is turned off, make sure not to get an unused function warning.
+ // Lambdaize the function.
+ TRACE_EVENT1("skia", "RecForDesc", "rec",
+ TRACE_STR_COPY(
+ [&desc](){
+ auto ptr = desc.findEntry(kRec_SkDescriptorTag, nullptr);
+ SkScalerContextRec rec;
+ std::memcpy(&rec, ptr, sizeof(rec));
+ return rec.dump();
+ }().c_str()
+ )
+ );
+
+ auto it = fDescToRemoteStrike.find(&desc);
+ if (it != fDescToRemoteStrike.end()) {
+ // We have processed the RemoteStrike before. Reuse it.
+ RemoteStrike* strike = it->second.get();
+ strike->setTypefaceAndEffects(&typeface, effects);
+ if (fRemoteStrikesToSend.contains(strike)) {
+ // Already tracking
+ return strike;
+ }
+
+ // Strike is in unknown state on GPU. Start tracking strike on GPU by locking it.
+ bool locked = fDiscardableHandleManager->lockHandle(it->second->discardableHandleId());
+ if (locked) {
+ fRemoteStrikesToSend.add(strike);
+ return strike;
+ }
+
+ fDescToRemoteStrike.erase(it);
+ }
+
+ // Create a new RemoteStrike. Start by processing the typeface.
+ const SkFontID typefaceId = typeface.uniqueID();
+ if (!fCachedTypefaces.contains(typefaceId)) {
+ fCachedTypefaces.add(typefaceId);
+ fTypefacesToSend.emplace_back(typefaceId, typeface.countGlyphs(),
+ typeface.fontStyle(),
+ typeface.isFixedPitch());
+ }
+
+ auto context = typeface.createScalerContext(effects, &desc);
+ auto newHandle = fDiscardableHandleManager->createHandle(); // Locked on creation
+ auto remoteStrike = skstd::make_unique<RemoteStrike>(desc, std::move(context), newHandle);
+ remoteStrike->setTypefaceAndEffects(&typeface, effects);
+ auto remoteStrikePtr = remoteStrike.get();
+ fRemoteStrikesToSend.add(remoteStrikePtr);
+ auto d = &remoteStrike->getDescriptor();
+ fDescToRemoteStrike[d] = std::move(remoteStrike);
+
+ checkForDeletedEntries();
+
+ // Be sure we can build glyphs with this RemoteStrike.
+ remoteStrikePtr->setTypefaceAndEffects(&typeface, effects);
+ return remoteStrikePtr;
+}
+
+// No need to write fForceBW because it is a flag private to SkScalerContext_DW, which will never
+// be called on the GPU side.
+static void writeGlyph(SkGlyph* glyph, Serializer* serializer) {
+ serializer->write<SkPackedGlyphID>(glyph->getPackedID());
+ serializer->write<float>(glyph->advanceX());
+ serializer->write<float>(glyph->advanceY());
+ serializer->write<uint16_t>(glyph->width());
+ serializer->write<uint16_t>(glyph->height());
+ serializer->write<int16_t>(glyph->top());
+ serializer->write<int16_t>(glyph->left());
+ serializer->write<uint8_t>(glyph->maskFormat());
+}
+
+void SkStrikeServer::RemoteStrike::writePendingGlyphs(Serializer* serializer) {
+ SkASSERT(this->hasPendingGlyphs());
+
+ // Write the desc.
+ serializer->emplace<StrikeSpec>(fContext->getTypeface()->uniqueID(), fDiscardableHandleId);
+ serializer->writeDescriptor(*fDescriptor.getDesc());
+
+ serializer->emplace<bool>(fHaveSentFontMetrics);
+ if (!fHaveSentFontMetrics) {
+ // Write FontMetrics if not sent before.
+ SkFontMetrics fontMetrics;
+ fContext->getFontMetrics(&fontMetrics);
+ serializer->write<SkFontMetrics>(fontMetrics);
+ fHaveSentFontMetrics = true;
+ }
+
+ // Write glyphs images.
+ serializer->emplace<uint64_t>(fPendingGlyphImages.size());
+ for (const auto& glyphID : fPendingGlyphImages) {
+ SkGlyph glyph{glyphID};
+ fContext->getMetrics(&glyph);
+ SkASSERT(SkMask::IsValidFormat(glyph.fMaskFormat));
+
+ writeGlyph(&glyph, serializer);
+ auto imageSize = glyph.imageSize();
+ if (imageSize == 0u) continue;
+
+ glyph.fImage = serializer->allocate(imageSize, glyph.formatAlignment());
+ fContext->getImage(glyph);
+ // TODO: Generating the image can change the mask format, do we need to update it in the
+ // serialized glyph?
+ }
+ fPendingGlyphImages.clear();
+
+ // Write glyphs paths.
+ serializer->emplace<uint64_t>(fPendingGlyphPaths.size());
+ for (const auto& glyphID : fPendingGlyphPaths) {
+ SkGlyph glyph{glyphID};
+ fContext->getMetrics(&glyph);
+ SkASSERT(SkMask::IsValidFormat(glyph.fMaskFormat));
+
+ writeGlyph(&glyph, serializer);
+ writeGlyphPath(glyphID, serializer);
+ }
+ fPendingGlyphPaths.clear();
+}
+
+void SkStrikeServer::RemoteStrike::ensureScalerContext() {
+ if (fContext == nullptr) {
+ fContext = fTypeface->createScalerContext(fEffects, fDescriptor.getDesc());
+ }
+}
+
+void SkStrikeServer::RemoteStrike::resetScalerContext() {
+ fContext.reset();
+ fTypeface = nullptr;
+}
+
+void SkStrikeServer::RemoteStrike::setTypefaceAndEffects(
+ const SkTypeface* typeface, SkScalerContextEffects effects) {
+ fTypeface = typeface;
+ fEffects = effects;
+}
+
+void SkStrikeServer::RemoteStrike::writeGlyphPath(const SkPackedGlyphID& glyphID,
+ Serializer* serializer) const {
+ SkPath path;
+ if (!fContext->getPath(glyphID, &path)) {
+ serializer->write<uint64_t>(0u);
+ return;
+ }
+
+ size_t pathSize = path.writeToMemory(nullptr);
+ serializer->write<uint64_t>(pathSize);
+ path.writeToMemory(serializer->allocate(pathSize, kPathAlignment));
+}
+
+
+// Be sure to read and understand the comment for prepareForDrawingRemoveEmpty in
+// SkStrikeForGPU.h before working on this code.
+SkSpan<const SkGlyphPos>
+SkStrikeServer::RemoteStrike::prepareForDrawingRemoveEmpty(
+ const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[], size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) {
+ size_t drawableGlyphCount = 0;
+ for (size_t i = 0; i < n; i++) {
+ SkPoint glyphPos = positions[i];
+
+ // Check the cache for the glyph.
+ SkGlyph* glyphPtr = fGlyphMap.findOrNull(packedGlyphIDs[i]);
+
+ // Has this glyph ever been seen before?
+ if (glyphPtr == nullptr) {
+
+ // Never seen before. Make a new glyph.
+ glyphPtr = fAlloc.make<SkGlyph>(packedGlyphIDs[i]);
+ fGlyphMap.set(glyphPtr);
+ this->ensureScalerContext();
+ fContext->getMetrics(glyphPtr);
+
+ if (glyphPtr->maxDimension() <= maxDimension) {
+ // do nothing
+ } else if (!glyphPtr->isColor()) {
+ // The glyph is too big for the atlas, but it is not color, so it is handled
+ // with a path.
+ if (glyphPtr->setPath(&fAlloc, fContext.get())) {
+ // Always send the path data, even if its not available, to make sure empty
+ // paths are not incorrectly assumed to be cache misses.
+ fCachedGlyphPaths.add(glyphPtr->getPackedID());
+ fPendingGlyphPaths.push_back(glyphPtr->getPackedID());
+ }
+ } else {
+ // This will be handled by the fallback strike.
+ SkASSERT(glyphPtr->maxDimension() > maxDimension && glyphPtr->isColor());
+ }
+
+ // Make sure to send the glyph to the GPU because we always send the image for a glyph.
+ fCachedGlyphImages.add(packedGlyphIDs[i]);
+ fPendingGlyphImages.push_back(packedGlyphIDs[i]);
+ }
+
+ // Each non-empty glyph needs to be added as per the contract for
+ // prepareForDrawingRemoveEmpty.
+ // TODO(herb): Change the code to only send the glyphs for fallback?
+ if (!glyphPtr->isEmpty()) {
+ results[drawableGlyphCount++] = {i, glyphPtr, glyphPos};
+ }
+ }
+ return SkMakeSpan(results, drawableGlyphCount);
+}
+
+// SkStrikeClient ----------------------------------------------------------------------------------
+class SkStrikeClient::DiscardableStrikePinner : public SkStrikePinner {
+public:
+ DiscardableStrikePinner(SkDiscardableHandleId discardableHandleId,
+ sk_sp<DiscardableHandleManager> manager)
+ : fDiscardableHandleId(discardableHandleId), fManager(std::move(manager)) {}
+
+ ~DiscardableStrikePinner() override = default;
+ bool canDelete() override { return fManager->deleteHandle(fDiscardableHandleId); }
+
+private:
+ const SkDiscardableHandleId fDiscardableHandleId;
+ sk_sp<DiscardableHandleManager> fManager;
+};
+
+SkStrikeClient::SkStrikeClient(sk_sp<DiscardableHandleManager> discardableManager,
+ bool isLogging,
+ SkStrikeCache* strikeCache)
+ : fDiscardableHandleManager(std::move(discardableManager))
+ , fStrikeCache{strikeCache ? strikeCache : SkStrikeCache::GlobalStrikeCache()}
+ , fIsLogging{isLogging} {}
+
+SkStrikeClient::~SkStrikeClient() = default;
+
+#define READ_FAILURE \
+ { \
+ SkDebugf("Bad font data serialization line: %d", __LINE__); \
+ DiscardableHandleManager::ReadFailureData data = { \
+ memorySize, deserializer.bytesRead(), typefaceSize, \
+ strikeCount, glyphImagesCount, glyphPathsCount}; \
+ fDiscardableHandleManager->notifyReadFailure(data); \
+ return false; \
+ }
+
+// No need to read fForceBW because it is a flag private to SkScalerContext_DW, which will never
+// be called on the GPU side.
+bool SkStrikeClient::ReadGlyph(SkTLazy<SkGlyph>& glyph, Deserializer* deserializer) {
+ SkPackedGlyphID glyphID;
+ if (!deserializer->read<SkPackedGlyphID>(&glyphID)) return false;
+ glyph.init(glyphID);
+ if (!deserializer->read<float>(&glyph->fAdvanceX)) return false;
+ if (!deserializer->read<float>(&glyph->fAdvanceY)) return false;
+ if (!deserializer->read<uint16_t>(&glyph->fWidth)) return false;
+ if (!deserializer->read<uint16_t>(&glyph->fHeight)) return false;
+ if (!deserializer->read<int16_t>(&glyph->fTop)) return false;
+ if (!deserializer->read<int16_t>(&glyph->fLeft)) return false;
+ if (!deserializer->read<uint8_t>(&glyph->fMaskFormat)) return false;
+ if (!SkMask::IsValidFormat(glyph->fMaskFormat)) return false;
+
+ return true;
+}
+
+bool SkStrikeClient::readStrikeData(const volatile void* memory, size_t memorySize) {
+ SkASSERT(memorySize != 0u);
+ Deserializer deserializer(static_cast<const volatile char*>(memory), memorySize);
+
+ uint64_t typefaceSize = 0u;
+ uint64_t strikeCount = 0u;
+ uint64_t glyphImagesCount = 0u;
+ uint64_t glyphPathsCount = 0u;
+
+ if (!deserializer.read<uint64_t>(&typefaceSize)) READ_FAILURE
+
+ for (size_t i = 0; i < typefaceSize; ++i) {
+ WireTypeface wire;
+ if (!deserializer.read<WireTypeface>(&wire)) READ_FAILURE
+
+ // TODO(khushalsagar): The typeface no longer needs a reference to the
+ // SkStrikeClient, since all needed glyphs must have been pushed before
+ // raster.
+ addTypeface(wire);
+ }
+
+ if (!deserializer.read<uint64_t>(&strikeCount)) READ_FAILURE
+
+ for (size_t i = 0; i < strikeCount; ++i) {
+ StrikeSpec spec;
+ if (!deserializer.read<StrikeSpec>(&spec)) READ_FAILURE
+
+ SkAutoDescriptor sourceAd;
+ if (!deserializer.readDescriptor(&sourceAd)) READ_FAILURE
+
+ bool fontMetricsInitialized;
+ if (!deserializer.read(&fontMetricsInitialized)) READ_FAILURE
+
+ SkFontMetrics fontMetrics{};
+ if (!fontMetricsInitialized) {
+ if (!deserializer.read<SkFontMetrics>(&fontMetrics)) READ_FAILURE
+ }
+
+ // Get the local typeface from remote fontID.
+ auto* tfPtr = fRemoteFontIdToTypeface.find(spec.typefaceID);
+ // Received strikes for a typeface which doesn't exist.
+ if (!tfPtr) READ_FAILURE
+ auto* tf = tfPtr->get();
+
+ // Replace the ContextRec in the desc from the server to create the client
+ // side descriptor.
+ // TODO: Can we do this in-place and re-compute checksum? Instead of a complete copy.
+ SkAutoDescriptor ad;
+ auto* client_desc = auto_descriptor_from_desc(sourceAd.getDesc(), tf->uniqueID(), &ad);
+
+ auto strike = fStrikeCache->findStrikeExclusive(*client_desc);
+ // Metrics are only sent the first time. If the metrics are not initialized, there must
+ // be an existing strike.
+ if (fontMetricsInitialized && strike == nullptr) READ_FAILURE
+ if (strike == nullptr) {
+ // Note that we don't need to deserialize the effects since we won't be generating any
+ // glyphs here anyway, and the desc is still correct since it includes the serialized
+ // effects.
+ SkScalerContextEffects effects;
+ auto scaler = SkStrikeCache::CreateScalerContext(*client_desc, effects, *tf);
+ strike = fStrikeCache->createStrikeExclusive(
+ *client_desc, std::move(scaler), &fontMetrics,
+ skstd::make_unique<DiscardableStrikePinner>(spec.discardableHandleId,
+ fDiscardableHandleManager));
+ auto proxyContext = static_cast<SkScalerContextProxy*>(strike->getScalerContext());
+ proxyContext->initCache(strike.get(), fStrikeCache);
+ }
+
+ if (!deserializer.read<uint64_t>(&glyphImagesCount)) READ_FAILURE
+ for (size_t j = 0; j < glyphImagesCount; j++) {
+ SkTLazy<SkGlyph> glyph;
+ if (!ReadGlyph(glyph, &deserializer)) READ_FAILURE
+
+ if (!glyph->isEmpty()) {
+ const volatile void* image =
+ deserializer.read(glyph->imageSize(), glyph->formatAlignment());
+ if (!image) READ_FAILURE
+ glyph->fImage = (void*)image;
+ }
+
+ strike->mergeGlyphAndImage(glyph->getPackedID(), *glyph);
+ }
+
+ if (!deserializer.read<uint64_t>(&glyphPathsCount)) READ_FAILURE
+ for (size_t j = 0; j < glyphPathsCount; j++) {
+ SkTLazy<SkGlyph> glyph;
+ if (!ReadGlyph(glyph, &deserializer)) READ_FAILURE
+
+ SkGlyph* allocatedGlyph = strike->mergeGlyphAndImage(glyph->getPackedID(), *glyph);
+
+ SkPath* pathPtr = nullptr;
+ SkPath path;
+ uint64_t pathSize = 0u;
+ if (!deserializer.read<uint64_t>(&pathSize)) READ_FAILURE
+
+ if (pathSize > 0) {
+ auto* pathData = deserializer.read(pathSize, kPathAlignment);
+ if (!pathData) READ_FAILURE
+ if (!path.readFromMemory(const_cast<const void*>(pathData), pathSize)) READ_FAILURE
+ pathPtr = &path;
+ }
+
+ strike->preparePath(allocatedGlyph, pathPtr);
+ }
+ }
+
+ return true;
+}
+
+sk_sp<SkTypeface> SkStrikeClient::deserializeTypeface(const void* buf, size_t len) {
+ WireTypeface wire;
+ if (len != sizeof(wire)) return nullptr;
+ memcpy(&wire, buf, sizeof(wire));
+ return this->addTypeface(wire);
+}
+
+sk_sp<SkTypeface> SkStrikeClient::addTypeface(const WireTypeface& wire) {
+ auto* typeface = fRemoteFontIdToTypeface.find(wire.typefaceID);
+ if (typeface) return *typeface;
+
+ auto newTypeface = sk_make_sp<SkTypefaceProxy>(
+ wire.typefaceID, wire.glyphCount, wire.style, wire.isFixed,
+ fDiscardableHandleManager, fIsLogging);
+ fRemoteFontIdToTypeface.set(wire.typefaceID, newTypeface);
+ return newTypeface;
+}
diff --git a/gfx/skia/skia/src/core/SkRemoteGlyphCache.h b/gfx/skia/skia/src/core/SkRemoteGlyphCache.h
new file mode 100644
index 0000000000..d81413f55c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRemoteGlyphCache.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemoteGlyphCache_DEFINED
+#define SkRemoteGlyphCache_DEFINED
+
+#include <memory>
+#include <tuple>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTHash.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkStrikeForGPU.h"
+#include "src/core/SkTLazy.h"
+
+class Deserializer;
+class Serializer;
+enum SkAxisAlignment : uint32_t;
+class SkDescriptor;
+class SkStrike;
+struct SkPackedGlyphID;
+enum SkScalerContextFlags : uint32_t;
+class SkStrikeCache;
+class SkTypefaceProxy;
+struct WireTypeface;
+
+class SkStrikeServer;
+
+// A SkTextBlobCacheDiffCanvas is used to populate the SkStrikeServer with ops
+// which will be serialized and rendered using the SkStrikeClient.
+class SkTextBlobCacheDiffCanvas : public SkNoDrawCanvas {
+public:
+
+ // For testing use only
+ SkTextBlobCacheDiffCanvas(int width, int height, const SkSurfaceProps& props,
+ SkStrikeServer* strikeServer, bool DFTSupport = true);
+
+ SK_API SkTextBlobCacheDiffCanvas(int width, int height, const SkSurfaceProps& props,
+ SkStrikeServer* strikeServer, sk_sp<SkColorSpace> colorSpace,
+ bool DFTSupport);
+
+ SK_API ~SkTextBlobCacheDiffCanvas() override;
+
+protected:
+ SkCanvas::SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& rec) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+private:
+ class TrackLayerDevice;
+};
+
+using SkDiscardableHandleId = uint32_t;
+
+// This class is not thread-safe.
+class SkStrikeServer final : public SkStrikeForGPUCacheInterface {
+public:
+ // An interface used by the server to create handles for pinning SkStrike
+ // entries on the remote client.
+ class DiscardableHandleManager {
+ public:
+ SK_API virtual ~DiscardableHandleManager() = default;
+
+ // Creates a new *locked* handle and returns a unique ID that can be used to identify
+ // it on the remote client.
+ SK_API virtual SkDiscardableHandleId createHandle() = 0;
+
+ // Returns true if the handle could be successfully locked. The server can
+ // assume it will remain locked until the next set of serialized entries is
+ // pulled from the SkStrikeServer.
+ // If returns false, the cache entry mapped to the handle has been deleted
+ // on the client. Any subsequent attempts to lock the same handle are not
+ // allowed.
+ SK_API virtual bool lockHandle(SkDiscardableHandleId) = 0;
+
+ // Returns true if a handle has been deleted on the remote client. It is
+ // invalid to use a handle id again with this manager once this returns true.
+ // TODO(khushalsagar): Make pure virtual once chrome implementation lands.
+ SK_API virtual bool isHandleDeleted(SkDiscardableHandleId) { return false; }
+ };
+
+ SK_API explicit SkStrikeServer(DiscardableHandleManager* discardableHandleManager);
+ SK_API ~SkStrikeServer() override;
+
+ // Serializes the typeface to be transmitted using this server.
+ SK_API sk_sp<SkData> serializeTypeface(SkTypeface*);
+
+ // Serializes the strike data captured using a SkTextBlobCacheDiffCanvas. Any
+ // handles locked using the DiscardableHandleManager will be assumed to be
+ // unlocked after this call.
+ SK_API void writeStrikeData(std::vector<uint8_t>* memory);
+
+ // Methods used internally in Skia ------------------------------------------
+ class RemoteStrike;
+
+ RemoteStrike* getOrCreateCache(const SkPaint&,
+ const SkFont& font,
+ const SkSurfaceProps&,
+ const SkMatrix&,
+ SkScalerContextFlags flags,
+ SkScalerContextEffects* effects);
+
+ SkScopedStrikeForGPU findOrCreateScopedStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) override;
+
+ static void AddGlyphForTesting(
+ RemoteStrike* cache, SkPackedGlyphID glyphID, bool asPath);
+
+ void setMaxEntriesInDescriptorMapForTesting(size_t count) {
+ fMaxEntriesInDescriptorMap = count;
+ }
+ size_t remoteStrikeMapSizeForTesting() const { return fDescToRemoteStrike.size(); }
+
+private:
+ static constexpr size_t kMaxEntriesInDescriptorMap = 2000u;
+
+ void checkForDeletedEntries();
+
+ RemoteStrike* getOrCreateCache(const SkDescriptor& desc,
+ const SkTypeface& typeface,
+ SkScalerContextEffects effects);
+
+ struct MapOps {
+ size_t operator()(const SkDescriptor* key) const;
+ bool operator()(const SkDescriptor* lhs, const SkDescriptor* rhs) const;
+ };
+ using DescToRemoteStrike =
+ std::unordered_map<const SkDescriptor*, std::unique_ptr<RemoteStrike>, MapOps, MapOps>;
+ DescToRemoteStrike fDescToRemoteStrike;
+
+ DiscardableHandleManager* const fDiscardableHandleManager;
+ SkTHashSet<SkFontID> fCachedTypefaces;
+ size_t fMaxEntriesInDescriptorMap = kMaxEntriesInDescriptorMap;
+
+ // Cached serialized typefaces.
+ SkTHashMap<SkFontID, sk_sp<SkData>> fSerializedTypefaces;
+
+ // State cached until the next serialization.
+ SkTHashSet<RemoteStrike*> fRemoteStrikesToSend;
+ std::vector<WireTypeface> fTypefacesToSend;
+};
+
+class SkStrikeClient {
+public:
+ // This enum is used in histogram reporting in chromium. Please don't re-order the list of
+ // entries, and consider it to be append-only.
+ enum CacheMissType : uint32_t {
+ // Hard failures where no fallback could be found.
+ kFontMetrics = 0,
+ kGlyphMetrics = 1,
+ kGlyphImage = 2,
+ kGlyphPath = 3,
+
+ // The original glyph could not be found and a fallback was used.
+ kGlyphMetricsFallback = 4,
+ kGlyphPathFallback = 5,
+
+ kLast = kGlyphPathFallback
+ };
+
+ // An interface to delete handles that may be pinned by the remote server.
+ class DiscardableHandleManager : public SkRefCnt {
+ public:
+ ~DiscardableHandleManager() override = default;
+
+ // Returns true if the handle was unlocked and can be safely deleted. Once
+ // successful, subsequent attempts to delete the same handle are invalid.
+ virtual bool deleteHandle(SkDiscardableHandleId) = 0;
+
+ virtual void notifyCacheMiss(CacheMissType) {}
+
+ struct ReadFailureData {
+ size_t memorySize;
+ size_t bytesRead;
+ uint64_t typefaceSize;
+ uint64_t strikeCount;
+ uint64_t glyphImagesCount;
+ uint64_t glyphPathsCount;
+ };
+ virtual void notifyReadFailure(const ReadFailureData& data) {}
+ };
+
+ SK_API explicit SkStrikeClient(sk_sp<DiscardableHandleManager>,
+ bool isLogging = true,
+ SkStrikeCache* strikeCache = nullptr);
+ SK_API ~SkStrikeClient();
+
+ // Deserializes the typeface previously serialized using the SkStrikeServer. Returns null if the
+ // data is invalid.
+ SK_API sk_sp<SkTypeface> deserializeTypeface(const void* data, size_t length);
+
+ // Deserializes the strike data from a SkStrikeServer. All messages generated
+ // from a server when serializing the ops must be deserialized before the op
+ // is rasterized.
+ // Returns false if the data is invalid.
+ SK_API bool readStrikeData(const volatile void* memory, size_t memorySize);
+
+private:
+ class DiscardableStrikePinner;
+
+ static bool ReadGlyph(SkTLazy<SkGlyph>& glyph, Deserializer* deserializer);
+ sk_sp<SkTypeface> addTypeface(const WireTypeface& wire);
+
+ SkTHashMap<SkFontID, sk_sp<SkTypeface>> fRemoteFontIdToTypeface;
+ sk_sp<DiscardableHandleManager> fDiscardableHandleManager;
+ SkStrikeCache* const fStrikeCache;
+ const bool fIsLogging;
+};
+
+#endif // SkRemoteGlyphCache_DEFINED
diff --git a/gfx/skia/skia/src/core/SkResourceCache.cpp b/gfx/skia/skia/src/core/SkResourceCache.cpp
new file mode 100644
index 0000000000..61197f85cc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.cpp
@@ -0,0 +1,605 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkResourceCache.h"
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDiscardableMemory.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkOpts.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+DECLARE_SKMESSAGEBUS_MESSAGE(SkResourceCache::PurgeSharedIDMessage)
+
+static inline bool SkShouldPostMessageToBus(
+ const SkResourceCache::PurgeSharedIDMessage&, uint32_t) {
+ // SkResourceCache is typically used as a singleton and we don't label Inboxes so all messages
+ // go to all inboxes.
+ return true;
+}
+
+// This can be defined by the caller's build system
+//#define SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+
+#ifndef SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT
+# define SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT 1024
+#endif
+
+#ifndef SK_DEFAULT_IMAGE_CACHE_LIMIT
+ #define SK_DEFAULT_IMAGE_CACHE_LIMIT (32 * 1024 * 1024)
+#endif
+
+void SkResourceCache::Key::init(void* nameSpace, uint64_t sharedID, size_t dataSize) {
+ SkASSERT(SkAlign4(dataSize) == dataSize);
+
+ // fCount32 and fHash are not hashed
+ static const int kUnhashedLocal32s = 2; // fCache32 + fHash
+ static const int kSharedIDLocal32s = 2; // fSharedID_lo + fSharedID_hi
+ static const int kHashedLocal32s = kSharedIDLocal32s + (sizeof(fNamespace) >> 2);
+ static const int kLocal32s = kUnhashedLocal32s + kHashedLocal32s;
+
+ static_assert(sizeof(Key) == (kLocal32s << 2), "unaccounted_key_locals");
+ static_assert(sizeof(Key) == offsetof(Key, fNamespace) + sizeof(fNamespace),
+ "namespace_field_must_be_last");
+
+ fCount32 = SkToS32(kLocal32s + (dataSize >> 2));
+ fSharedID_lo = (uint32_t)(sharedID & 0xFFFFFFFF);
+ fSharedID_hi = (uint32_t)(sharedID >> 32);
+ fNamespace = nameSpace;
+ // skip unhashed fields when computing the hash
+ fHash = SkOpts::hash(this->as32() + kUnhashedLocal32s,
+ (fCount32 - kUnhashedLocal32s) << 2);
+}
+
+#include "include/private/SkTHash.h"
+
+namespace {
+ struct HashTraits {
+ static uint32_t Hash(const SkResourceCache::Key& key) { return key.hash(); }
+ static const SkResourceCache::Key& GetKey(const SkResourceCache::Rec* rec) {
+ return rec->getKey();
+ }
+ };
+}
+
+class SkResourceCache::Hash :
+ public SkTHashTable<SkResourceCache::Rec*, SkResourceCache::Key, HashTraits> {};
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::init() {
+ fHead = nullptr;
+ fTail = nullptr;
+ fHash = new Hash;
+ fTotalBytesUsed = 0;
+ fCount = 0;
+ fSingleAllocationByteLimit = 0;
+
+ // One of these should be explicit set by the caller after we return.
+ fTotalByteLimit = 0;
+ fDiscardableFactory = nullptr;
+}
+
+SkResourceCache::SkResourceCache(DiscardableFactory factory) {
+ this->init();
+ fDiscardableFactory = factory;
+}
+
+SkResourceCache::SkResourceCache(size_t byteLimit) {
+ this->init();
+ fTotalByteLimit = byteLimit;
+}
+
+SkResourceCache::~SkResourceCache() {
+ Rec* rec = fHead;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+ delete fHash;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkResourceCache::find(const Key& key, FindVisitor visitor, void* context) {
+ this->checkMessages();
+
+ if (auto found = fHash->find(key)) {
+ Rec* rec = *found;
+ if (visitor(*rec, context)) {
+ this->moveToHead(rec); // for our LRU
+ return true;
+ } else {
+ this->remove(rec); // stale
+ return false;
+ }
+ }
+ return false;
+}
+
+static void make_size_str(size_t size, SkString* str) {
+ const char suffix[] = { 'b', 'k', 'm', 'g', 't', 0 };
+ int i = 0;
+ while (suffix[i] && (size > 1024)) {
+ i += 1;
+ size >>= 10;
+ }
+ str->printf("%zu%c", size, suffix[i]);
+}
+
+static bool gDumpCacheTransactions;
+
+void SkResourceCache::add(Rec* rec, void* payload) {
+ this->checkMessages();
+
+ SkASSERT(rec);
+ // See if we already have this key (racy inserts, etc.)
+ if (Rec** preexisting = fHash->find(rec->getKey())) {
+ Rec* prev = *preexisting;
+ if (prev->canBePurged()) {
+ // if it can be purged, the install may fail, so we have to remove it
+ this->remove(prev);
+ } else {
+ // if it cannot be purged, we reuse it and delete the new one
+ prev->postAddInstall(payload);
+ delete rec;
+ return;
+ }
+ }
+
+ this->addToHead(rec);
+ fHash->set(rec);
+ rec->postAddInstall(payload);
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(rec->bytesUsed(), &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: add %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ // since the new rec may push us over-budget, we perform a purge check now
+ this->purgeAsNeeded();
+}
+
+void SkResourceCache::remove(Rec* rec) {
+ SkASSERT(rec->canBePurged());
+ size_t used = rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+
+ this->release(rec);
+ fHash->remove(rec->getKey());
+
+ fTotalBytesUsed -= used;
+ fCount -= 1;
+
+ //SkDebugf("-RC count [%3d] bytes %d\n", fCount, fTotalBytesUsed);
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(used, &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: remove %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ delete rec;
+}
+
+void SkResourceCache::purgeAsNeeded(bool forcePurge) {
+ size_t byteLimit;
+ int countLimit;
+
+ if (fDiscardableFactory) {
+ countLimit = SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT;
+ byteLimit = UINT32_MAX; // no limit based on bytes
+ } else {
+ countLimit = SK_MaxS32; // no limit based on count
+ byteLimit = fTotalByteLimit;
+ }
+
+ Rec* rec = fTail;
+ while (rec) {
+ if (!forcePurge && fTotalBytesUsed < byteLimit && fCount < countLimit) {
+ break;
+ }
+
+ Rec* prev = rec->fPrev;
+ if (rec->canBePurged()) {
+ this->remove(rec);
+ }
+ rec = prev;
+ }
+}
+
+//#define SK_TRACK_PURGE_SHAREDID_HITRATE
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+static int gPurgeCallCounter;
+static int gPurgeHitCounter;
+#endif
+
+void SkResourceCache::purgeSharedID(uint64_t sharedID) {
+ if (0 == sharedID) {
+ return;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ gPurgeCallCounter += 1;
+ bool found = false;
+#endif
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ Rec* prev = rec->fPrev;
+ if (rec->getKey().getSharedID() == sharedID) {
+ // even though the "src" is now dead, caches could still be in-flight, so
+ // we have to check if it can be removed.
+ if (rec->canBePurged()) {
+ this->remove(rec);
+ }
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ found = true;
+#endif
+ }
+ rec = prev;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ if (found) {
+ gPurgeHitCounter += 1;
+ }
+
+ SkDebugf("PurgeShared calls=%d hits=%d rate=%g\n", gPurgeCallCounter, gPurgeHitCounter,
+ gPurgeHitCounter * 100.0 / gPurgeCallCounter);
+#endif
+}
+
+void SkResourceCache::visitAll(Visitor visitor, void* context) {
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ visitor(*rec, context);
+ rec = rec->fPrev;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkResourceCache::setTotalByteLimit(size_t newLimit) {
+ size_t prevLimit = fTotalByteLimit;
+ fTotalByteLimit = newLimit;
+ if (newLimit < prevLimit) {
+ this->purgeAsNeeded();
+ }
+ return prevLimit;
+}
+
+SkCachedData* SkResourceCache::newCachedData(size_t bytes) {
+ this->checkMessages();
+
+ if (fDiscardableFactory) {
+ SkDiscardableMemory* dm = fDiscardableFactory(bytes);
+ return dm ? new SkCachedData(bytes, dm) : nullptr;
+ } else {
+ return new SkCachedData(sk_malloc_throw(bytes), bytes);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::release(Rec* rec) {
+ Rec* prev = rec->fPrev;
+ Rec* next = rec->fNext;
+
+ if (!prev) {
+ SkASSERT(fHead == rec);
+ fHead = next;
+ } else {
+ prev->fNext = next;
+ }
+
+ if (!next) {
+ fTail = prev;
+ } else {
+ next->fPrev = prev;
+ }
+
+ rec->fNext = rec->fPrev = nullptr;
+}
+
+void SkResourceCache::moveToHead(Rec* rec) {
+ if (fHead == rec) {
+ return;
+ }
+
+ SkASSERT(fHead);
+ SkASSERT(fTail);
+
+ this->validate();
+
+ this->release(rec);
+
+ fHead->fPrev = rec;
+ rec->fNext = fHead;
+ fHead = rec;
+
+ this->validate();
+}
+
+void SkResourceCache::addToHead(Rec* rec) {
+ this->validate();
+
+ rec->fPrev = nullptr;
+ rec->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = rec;
+ }
+ fHead = rec;
+ if (!fTail) {
+ fTail = rec;
+ }
+ fTotalBytesUsed += rec->bytesUsed();
+ fCount += 1;
+
+ this->validate();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkResourceCache::validate() const {
+ if (nullptr == fHead) {
+ SkASSERT(nullptr == fTail);
+ SkASSERT(0 == fTotalBytesUsed);
+ return;
+ }
+
+ if (fHead == fTail) {
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(nullptr == fHead->fNext);
+ SkASSERT(fHead->bytesUsed() == fTotalBytesUsed);
+ return;
+ }
+
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(fHead->fNext);
+ SkASSERT(nullptr == fTail->fNext);
+ SkASSERT(fTail->fPrev);
+
+ size_t used = 0;
+ int count = 0;
+ const Rec* rec = fHead;
+ while (rec) {
+ count += 1;
+ used += rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+ rec = rec->fNext;
+ }
+ SkASSERT(fCount == count);
+
+ rec = fTail;
+ while (rec) {
+ SkASSERT(count > 0);
+ count -= 1;
+ SkASSERT(used >= rec->bytesUsed());
+ used -= rec->bytesUsed();
+ rec = rec->fPrev;
+ }
+
+ SkASSERT(0 == count);
+ SkASSERT(0 == used);
+}
+#endif
+
+void SkResourceCache::dump() const {
+ this->validate();
+
+ SkDebugf("SkResourceCache: count=%d bytes=%d %s\n",
+ fCount, fTotalBytesUsed, fDiscardableFactory ? "discardable" : "malloc");
+}
+
+size_t SkResourceCache::setSingleAllocationByteLimit(size_t newLimit) {
+ size_t oldLimit = fSingleAllocationByteLimit;
+ fSingleAllocationByteLimit = newLimit;
+ return oldLimit;
+}
+
+size_t SkResourceCache::getSingleAllocationByteLimit() const {
+ return fSingleAllocationByteLimit;
+}
+
+size_t SkResourceCache::getEffectiveSingleAllocationByteLimit() const {
+ // fSingleAllocationByteLimit == 0 means the caller is asking for our default
+ size_t limit = fSingleAllocationByteLimit;
+
+ // if we're not discardable (i.e. we are fixed-budget) then cap the single-limit
+ // to our budget.
+ if (nullptr == fDiscardableFactory) {
+ if (0 == limit) {
+ limit = fTotalByteLimit;
+ } else {
+ limit = SkTMin(limit, fTotalByteLimit);
+ }
+ }
+ return limit;
+}
+
+void SkResourceCache::checkMessages() {
+ SkTArray<PurgeSharedIDMessage> msgs;
+ fPurgeSharedIDInbox.poll(&msgs);
+ for (int i = 0; i < msgs.count(); ++i) {
+ this->purgeSharedID(msgs[i].fSharedID);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkResourceCache* gResourceCache = nullptr;
+static SkMutex& resource_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+/** Must hold resource_cache_mutex() when calling. */
+static SkResourceCache* get_cache() {
+ // resource_cache_mutex() is always held when this is called, so we don't need to be fancy in here.
+ resource_cache_mutex().assertHeld();
+ if (nullptr == gResourceCache) {
+#ifdef SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+ gResourceCache = new SkResourceCache(SkDiscardableMemory::Create);
+#else
+ gResourceCache = new SkResourceCache(SK_DEFAULT_IMAGE_CACHE_LIMIT);
+#endif
+ }
+ return gResourceCache;
+}
+
+size_t SkResourceCache::GetTotalBytesUsed() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getTotalBytesUsed();
+}
+
+size_t SkResourceCache::GetTotalByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getTotalByteLimit();
+}
+
+size_t SkResourceCache::SetTotalByteLimit(size_t newLimit) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->setTotalByteLimit(newLimit);
+}
+
+SkResourceCache::DiscardableFactory SkResourceCache::GetDiscardableFactory() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->discardableFactory();
+}
+
+SkCachedData* SkResourceCache::NewCachedData(size_t bytes) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->newCachedData(bytes);
+}
+
+void SkResourceCache::Dump() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->dump();
+}
+
+size_t SkResourceCache::SetSingleAllocationByteLimit(size_t size) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->setSingleAllocationByteLimit(size);
+}
+
+size_t SkResourceCache::GetSingleAllocationByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getSingleAllocationByteLimit();
+}
+
+size_t SkResourceCache::GetEffectiveSingleAllocationByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getEffectiveSingleAllocationByteLimit();
+}
+
+void SkResourceCache::PurgeAll() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->purgeAll();
+}
+
+bool SkResourceCache::Find(const Key& key, FindVisitor visitor, void* context) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->find(key, visitor, context);
+}
+
+void SkResourceCache::Add(Rec* rec, void* payload) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->add(rec, payload);
+}
+
+void SkResourceCache::VisitAll(Visitor visitor, void* context) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->visitAll(visitor, context);
+}
+
+void SkResourceCache::PostPurgeSharedID(uint64_t sharedID) {
+ if (sharedID) {
+ SkMessageBus<PurgeSharedIDMessage>::Post(PurgeSharedIDMessage(sharedID));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkImageFilter.h"
+
+size_t SkGraphics::GetResourceCacheTotalBytesUsed() {
+ return SkResourceCache::GetTotalBytesUsed();
+}
+
+size_t SkGraphics::GetResourceCacheTotalByteLimit() {
+ return SkResourceCache::GetTotalByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheTotalByteLimit(size_t newLimit) {
+ return SkResourceCache::SetTotalByteLimit(newLimit);
+}
+
+size_t SkGraphics::GetResourceCacheSingleAllocationByteLimit() {
+ return SkResourceCache::GetSingleAllocationByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheSingleAllocationByteLimit(size_t newLimit) {
+ return SkResourceCache::SetSingleAllocationByteLimit(newLimit);
+}
+
+void SkGraphics::PurgeResourceCache() {
+ SkImageFilter_Base::PurgeCache();
+ return SkResourceCache::PurgeAll();
+}
+
+/////////////
+
+static void dump_visitor(const SkResourceCache::Rec& rec, void*) {
+ SkDebugf("RC: %12s bytes %9lu discardable %p\n",
+ rec.getCategory(), rec.bytesUsed(), rec.diagnostic_only_getDiscardable());
+}
+
+void SkResourceCache::TestDumpMemoryStatistics() {
+ VisitAll(dump_visitor, nullptr);
+}
+
+static void sk_trace_dump_visitor(const SkResourceCache::Rec& rec, void* context) {
+ SkTraceMemoryDump* dump = static_cast<SkTraceMemoryDump*>(context);
+ SkString dumpName = SkStringPrintf("skia/sk_resource_cache/%s_%p", rec.getCategory(), &rec);
+ SkDiscardableMemory* discardable = rec.diagnostic_only_getDiscardable();
+ if (discardable) {
+ dump->setDiscardableMemoryBacking(dumpName.c_str(), *discardable);
+
+ // The discardable memory size will be calculated by dumper, but we also dump what we think
+ // the size of object in memory is irrespective of whether object is live or dead.
+ dump->dumpNumericValue(dumpName.c_str(), "discardable_size", "bytes", rec.bytesUsed());
+ } else {
+ dump->dumpNumericValue(dumpName.c_str(), "size", "bytes", rec.bytesUsed());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+ }
+}
+
+void SkResourceCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ // Since resource could be backed by malloc or discardable, the cache always dumps detailed
+ // stats to be accurate.
+ VisitAll(sk_trace_dump_visitor, dump);
+}
diff --git a/gfx/skia/skia/src/core/SkResourceCache.h b/gfx/skia/skia/src/core/SkResourceCache.h
new file mode 100644
index 0000000000..4805b28d87
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkResourceCache_DEFINED
+#define SkResourceCache_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkMessageBus.h"
+
+class SkCachedData;
+class SkDiscardableMemory;
+class SkTraceMemoryDump;
+
+/**
+ * Cache object for bitmaps (with possible scale in X Y as part of the key).
+ *
+ * Multiple caches can be instantiated, but each instance is not implicitly
+ * thread-safe, so if a given instance is to be shared across threads, the
+ * caller must manage the access itself (e.g. via a mutex).
+ *
+ * As a convenience, a global instance is also defined, which can be safely
+ * access across threads via the static methods (e.g. FindAndLock, etc.).
+ */
+class SkResourceCache {
+public:
+ struct Key {
+ /** Key subclasses must call this after their own fields and data are initialized.
+ * All fields and data must be tightly packed.
+ * @param nameSpace must be unique per Key subclass.
+ * @param sharedID == 0 means ignore this field, does not support group purging.
+ * @param dataSize is size of fields and data of the subclass, must be a multiple of 4.
+ */
+ void init(void* nameSpace, uint64_t sharedID, size_t dataSize);
+
+ /** Returns the size of this key. */
+ size_t size() const {
+ return fCount32 << 2;
+ }
+
+ void* getNamespace() const { return fNamespace; }
+ uint64_t getSharedID() const { return ((uint64_t)fSharedID_hi << 32) | fSharedID_lo; }
+
+ // This is only valid after having called init().
+ uint32_t hash() const { return fHash; }
+
+ bool operator==(const Key& other) const {
+ const uint32_t* a = this->as32();
+ const uint32_t* b = other.as32();
+ for (int i = 0; i < fCount32; ++i) { // (This checks fCount == other.fCount first.)
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ int32_t fCount32; // local + user contents count32
+ uint32_t fHash;
+ // split uint64_t into hi and lo so we don't force ourselves to pad on 32bit machines.
+ uint32_t fSharedID_lo;
+ uint32_t fSharedID_hi;
+ void* fNamespace; // A unique namespace tag. This is hashed.
+ /* uint32_t fContents32[] */
+
+ const uint32_t* as32() const { return (const uint32_t*)this; }
+ };
+
+ struct Rec {
+ typedef SkResourceCache::Key Key;
+
+ Rec() {}
+ virtual ~Rec() {}
+
+ uint32_t getHash() const { return this->getKey().hash(); }
+
+ virtual const Key& getKey() const = 0;
+ virtual size_t bytesUsed() const = 0;
+
+ // Called if the cache needs to purge/remove/delete the Rec. Default returns true.
+ // Subclass may return false if there are outstanding references to it (e.g. bitmaps).
+ // Will only be deleted/removed-from-the-cache when this returns true.
+ virtual bool canBePurged() { return true; }
+
+ // A rec is first created/initialized, and then added to the cache. As part of the add(),
+ // the cache will callback into the rec with postAddInstall, passing in whatever payload
+ // was passed to add/Add.
+ //
+ // This late-install callback exists because the process of add-ing might end up deleting
+ // the new rec (if an existing rec in the cache has the same key and cannot be purged).
+ // If the new rec will be deleted during add, the pre-existing one (with the same key)
+ // will have postAddInstall() called on it instead, so that either way an "install" will
+ // happen during the add.
+ virtual void postAddInstall(void*) {}
+
+ // for memory usage diagnostics
+ virtual const char* getCategory() const = 0;
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; }
+
+ private:
+ Rec* fNext;
+ Rec* fPrev;
+
+ friend class SkResourceCache;
+ };
+
+ // Used with SkMessageBus
+ struct PurgeSharedIDMessage {
+ PurgeSharedIDMessage(uint64_t sharedID) : fSharedID(sharedID) {}
+ uint64_t fSharedID;
+ };
+
+ typedef const Rec* ID;
+
+ /**
+ * Callback function for find(). If called, the cache will have found a match for the
+ * specified Key, and will pass in the corresponding Rec, along with a caller-specified
+ * context. The function can read the data in Rec, and copy whatever it likes into context
+ * (casting context to whatever it really is).
+ *
+ * The return value determines what the cache will do with the Rec. If the function returns
+ * true, then the Rec is considered "valid". If false is returned, the Rec will be considered
+ * "stale" and will be purged from the cache.
+ */
+ typedef bool (*FindVisitor)(const Rec&, void* context);
+
+ /**
+ * Returns a locked/pinned SkDiscardableMemory instance for the specified
+ * number of bytes, or nullptr on failure.
+ */
+ typedef SkDiscardableMemory* (*DiscardableFactory)(size_t bytes);
+
+ /*
+ * The following static methods are thread-safe wrappers around a global
+ * instance of this cache.
+ */
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * Find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ static bool Find(const Key& key, FindVisitor, void* context);
+ static void Add(Rec*, void* payload = nullptr);
+
+ typedef void (*Visitor)(const Rec&, void* context);
+ // Call the visitor for every Rec in the cache.
+ static void VisitAll(Visitor, void* context);
+
+ static size_t GetTotalBytesUsed();
+ static size_t GetTotalByteLimit();
+ static size_t SetTotalByteLimit(size_t newLimit);
+
+ static size_t SetSingleAllocationByteLimit(size_t);
+ static size_t GetSingleAllocationByteLimit();
+ static size_t GetEffectiveSingleAllocationByteLimit();
+
+ static void PurgeAll();
+
+ static void TestDumpMemoryStatistics();
+
+ /** Dump memory usage statistics of every Rec in the cache using the
+ SkTraceMemoryDump interface.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Returns the DiscardableFactory used by the global cache, or nullptr.
+ */
+ static DiscardableFactory GetDiscardableFactory();
+
+ static SkCachedData* NewCachedData(size_t bytes);
+
+ static void PostPurgeSharedID(uint64_t sharedID);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ static void Dump();
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Construct the cache to call DiscardableFactory when it
+ * allocates memory for the pixels. In this mode, the cache has
+ * not explicit budget, and so methods like getTotalBytesUsed()
+ * and getTotalByteLimit() will return 0, and setTotalByteLimit
+ * will ignore its argument and return 0.
+ */
+ SkResourceCache(DiscardableFactory);
+
+ /**
+ * Construct the cache, allocating memory with malloc, and respect the
+ * byteLimit, purging automatically when a new image is added to the cache
+ * that pushes the total bytesUsed over the limit. Note: The limit can be
+ * changed at runtime with setTotalByteLimit.
+ */
+ explicit SkResourceCache(size_t byteLimit);
+ ~SkResourceCache();
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ bool find(const Key&, FindVisitor, void* context);
+ void add(Rec*, void* payload = nullptr);
+ void visitAll(Visitor, void* context);
+
+ size_t getTotalBytesUsed() const { return fTotalBytesUsed; }
+ size_t getTotalByteLimit() const { return fTotalByteLimit; }
+
+ /**
+ * This is respected by SkBitmapProcState::possiblyScaleImage.
+ * 0 is no maximum at all; this is the default.
+ * setSingleAllocationByteLimit() returns the previous value.
+ */
+ size_t setSingleAllocationByteLimit(size_t maximumAllocationSize);
+ size_t getSingleAllocationByteLimit() const;
+ // returns the logical single allocation size (pinning against the budget when the cache
+ // is not backed by discardable memory.
+ size_t getEffectiveSingleAllocationByteLimit() const;
+
+ /**
+ * Set the maximum number of bytes available to this cache. If the current
+ * cache exceeds this new value, it will be purged to try to fit within
+ * this new limit.
+ */
+ size_t setTotalByteLimit(size_t newLimit);
+
+ void purgeSharedID(uint64_t sharedID);
+
+ void purgeAll() {
+ this->purgeAsNeeded(true);
+ }
+
+ DiscardableFactory discardableFactory() const { return fDiscardableFactory; }
+
+ SkCachedData* newCachedData(size_t bytes);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ void dump() const;
+
+private:
+ Rec* fHead;
+ Rec* fTail;
+
+ class Hash;
+ Hash* fHash;
+
+ DiscardableFactory fDiscardableFactory;
+
+ size_t fTotalBytesUsed;
+ size_t fTotalByteLimit;
+ size_t fSingleAllocationByteLimit;
+ int fCount;
+
+ SkMessageBus<PurgeSharedIDMessage>::Inbox fPurgeSharedIDInbox;
+
+ void checkMessages();
+ void purgeAsNeeded(bool forcePurge = false);
+
+ // linklist management
+ void moveToHead(Rec*);
+ void addToHead(Rec*);
+ void release(Rec*);
+ void remove(Rec*);
+
+ void init(); // called by constructors
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+#endif
diff --git a/gfx/skia/skia/src/core/SkSafeMath.h b/gfx/skia/skia/src/core/SkSafeMath.h
new file mode 100644
index 0000000000..ba64f12bf8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSafeMath.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafeMath_DEFINED
+#define SkSafeMath_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTFitsIn.h"
+#include <limits>
+
+// SkSafeMath always check that a series of operations do not overflow.
+// This must be correct for all platforms, because this is a check for safety at runtime.
+
+class SkSafeMath {
+public:
+ SkSafeMath() = default;
+
+ bool ok() const { return fOK; }
+ explicit operator bool() const { return fOK; }
+
+ size_t mul(size_t x, size_t y) {
+ return sizeof(size_t) == sizeof(uint64_t) ? mul64(x, y) : mul32(x, y);
+ }
+
+ size_t add(size_t x, size_t y) {
+ size_t result = x + y;
+ fOK &= result >= x;
+ return result;
+ }
+
+ /**
+ * Return a + b, unless this result is an overflow/underflow. In those cases, fOK will
+ * be set to false, and it is undefined what this returns.
+ */
+ int addInt(int a, int b) {
+ if (b < 0 && a < std::numeric_limits<int>::min() - b) {
+ fOK = false;
+ return a;
+ } else if (b > 0 && a > std::numeric_limits<int>::max() - b) {
+ fOK = false;
+ return a;
+ }
+ return a + b;
+ }
+
+ size_t alignUp(size_t x, size_t alignment) {
+ SkASSERT(alignment && !(alignment & (alignment - 1)));
+ return add(x, alignment - 1) & ~(alignment - 1);
+ }
+
+ template <typename T> T castTo(size_t value) {
+ if (!SkTFitsIn<T>(value)) {
+ fOK = false;
+ }
+ return static_cast<T>(value);
+ }
+
+ // These saturate to their results
+ static size_t Add(size_t x, size_t y);
+ static size_t Mul(size_t x, size_t y);
+ static size_t Align4(size_t x) {
+ SkSafeMath safe;
+ return safe.alignUp(x, 4);
+ }
+
+private:
+ uint32_t mul32(uint32_t x, uint32_t y) {
+ uint64_t bx = x;
+ uint64_t by = y;
+ uint64_t result = bx * by;
+ fOK &= result >> 32 == 0;
+ return result;
+ }
+
+ uint64_t mul64(uint64_t x, uint64_t y) {
+ if (x <= std::numeric_limits<uint64_t>::max() >> 32
+ && y <= std::numeric_limits<uint64_t>::max() >> 32) {
+ return x * y;
+ } else {
+ auto hi = [](uint64_t x) { return x >> 32; };
+ auto lo = [](uint64_t x) { return x & 0xFFFFFFFF; };
+
+ uint64_t lx_ly = lo(x) * lo(y);
+ uint64_t hx_ly = hi(x) * lo(y);
+ uint64_t lx_hy = lo(x) * hi(y);
+ uint64_t hx_hy = hi(x) * hi(y);
+ uint64_t result = 0;
+ result = this->add(lx_ly, (hx_ly << 32));
+ result = this->add(result, (lx_hy << 32));
+ fOK &= (hx_hy + (hx_ly >> 32) + (lx_hy >> 32)) == 0;
+
+ #if defined(SK_DEBUG) && defined(__clang__) && defined(__x86_64__)
+ auto double_check = (unsigned __int128)x * y;
+ SkASSERT(result == (double_check & 0xFFFFFFFFFFFFFFFF));
+ SkASSERT(!fOK || (double_check >> 64 == 0));
+ #endif
+
+ return result;
+ }
+ }
+ bool fOK = true;
+};
+
+#endif//SkSafeMath_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSafeRange.h b/gfx/skia/skia/src/core/SkSafeRange.h
new file mode 100644
index 0000000000..b207cff8b5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSafeRange.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafeRange_DEFINED
+#define SkSafeRange_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+// SkSafeRange always check that a series of operations are in-range.
+// This check is sticky, so that if any one operation fails, the object will remember that and
+// return false from ok().
+
+class SkSafeRange {
+public:
+ MOZ_IMPLICIT operator bool() const { return fOK; }
+
+ bool ok() const { return fOK; }
+
+ // checks 0 <= value <= max.
+ // On success, returns value
+ // On failure, returns 0 and sets ok() to false
+ template <typename T> T checkLE(uint64_t value, T max) {
+ SkASSERT(static_cast<int64_t>(max) >= 0);
+ if (value > static_cast<uint64_t>(max)) {
+ fOK = false;
+ value = 0;
+ }
+ return static_cast<T>(value);
+ }
+
+ int checkGE(int value, int min) {
+ if (value < min) {
+ fOK = false;
+ value = min;
+ }
+ return value;
+ }
+
+private:
+ bool fOK = true;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScalar.cpp b/gfx/skia/skia/src/core/SkScalar.cpp
new file mode 100644
index 0000000000..66f875e8a3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalar.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkMath.h"
+#include "include/core/SkScalar.h"
+
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length) {
+ SkASSERT(length > 0);
+ SkASSERT(keys != nullptr);
+ SkASSERT(values != nullptr);
+#ifdef SK_DEBUG
+ for (int i = 1; i < length; i++)
+ SkASSERT(keys[i] >= keys[i-1]);
+#endif
+ int right = 0;
+ while (right < length && searchKey > keys[right])
+ right++;
+ // Could use sentinel values to eliminate conditionals, but since the
+ // tables are taken as input, a simpler format is better.
+ if (length == right)
+ return values[length-1];
+ if (0 == right)
+ return values[0];
+ // Otherwise, interpolate between right - 1 and right.
+ SkScalar rightKey = keys[right];
+ SkScalar leftKey = keys[right-1];
+ SkScalar fract = (searchKey - leftKey) / (rightKey - leftKey);
+ return SkScalarInterp(values[right-1], values[right], fract);
+}
diff --git a/gfx/skia/skia/src/core/SkScaleToSides.h b/gfx/skia/skia/src/core/SkScaleToSides.h
new file mode 100644
index 0000000000..e48d542e2e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScaleToSides.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScaleToSides_DEFINED
+#define SkScaleToSides_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cmath>
+#include <utility>
+
+class SkScaleToSides {
+public:
+ // This code assumes that a and b fit in a float, and therefore the resulting smaller value
+ // of a and b will fit in a float. The side of the rectangle may be larger than a float.
+ // Scale must be less than or equal to the ratio limit / (*a + *b).
+ // This code assumes that NaN and Inf are never passed in.
+ static void AdjustRadii(double limit, double scale, SkScalar* a, SkScalar* b) {
+ SkASSERTF(scale < 1.0 && scale > 0.0, "scale: %g", scale);
+
+ *a = (float)((double)*a * scale);
+ *b = (float)((double)*b * scale);
+
+ if (*a + *b > limit) {
+ float* minRadius = a;
+ float* maxRadius = b;
+
+ // Force minRadius to be the smaller of the two.
+ if (*minRadius > *maxRadius) {
+ using std::swap;
+ swap(minRadius, maxRadius);
+ }
+
+ // newMinRadius must be float in order to give the actual value of the radius.
+ // The newMinRadius will always be smaller than limit. The largest that minRadius can be
+ // is 1/2 the ratio of minRadius : (minRadius + maxRadius), therefore in the resulting
+ // division, minRadius can be no larger than 1/2 limit + ULP. The newMinRadius can be
+ // 1/2 a ULP off at this point.
+ float newMinRadius = *minRadius;
+
+ // Because newMaxRadius is the result of a double to float conversion, it can be larger
+ // than limit, but only by one ULP.
+ float newMaxRadius = (float)(limit - newMinRadius);
+
+ // The total sum of newMinRadius and newMaxRadius can be upto 1.5 ULPs off. If the
+ // sum is greater than the limit then newMaxRadius may have to be reduced twice.
+ // Note: nextafterf is a c99 call and should be std::nextafter, but this is not
+ // implemented in the GCC ARM compiler.
+ if (newMaxRadius + newMinRadius > limit) {
+ newMaxRadius = nextafterf(newMaxRadius, 0.0f);
+ if (newMaxRadius + newMinRadius > limit) {
+ newMaxRadius = nextafterf(newMaxRadius, 0.0f);
+ }
+ }
+ *maxRadius = newMaxRadius;
+ }
+
+ SkASSERTF(*a >= 0.0f && *b >= 0.0f, "a: %g, b: %g, limit: %g, scale: %g", *a, *b, limit,
+ scale);
+
+ SkASSERTF(*a + *b <= limit,
+ "\nlimit: %.17f, sum: %.17f, a: %.10f, b: %.10f, scale: %.20f",
+ limit, *a + *b, *a, *b, scale);
+ }
+};
+#endif // ScaleToSides_DEFINED
diff --git a/gfx/skia/skia/src/core/SkScalerContext.cpp b/gfx/skia/skia/src/core/SkScalerContext.cpp
new file mode 100644
index 0000000000..fb783c965f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.cpp
@@ -0,0 +1,1205 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkScalerContext.h"
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkStroke.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTextFormatParams.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/utils/SkMatrix22.h"
+#include <new>
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+ #define DUMP_RECx
+#endif
+
+SkScalerContextRec SkScalerContext::PreprocessRec(const SkTypeface& typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor& desc) {
+ SkScalerContextRec rec =
+ *static_cast<const SkScalerContextRec*>(desc.findEntry(kRec_SkDescriptorTag, nullptr));
+
+ // Allow the typeface to adjust the rec.
+ typeface.onFilterRec(&rec);
+
+ if (effects.fMaskFilter) {
+ // Pre-blend is not currently applied to filtered text.
+ // The primary filter is blur, for which contrast makes no sense,
+ // and for which the destination guess error is more visible.
+ // Also, all existing users of blur have calibrated for linear.
+ rec.ignorePreBlend();
+ }
+
+ SkColor lumColor = rec.getLuminanceColor();
+
+ if (rec.fMaskFormat == SkMask::kA8_Format) {
+ U8CPU lum = SkComputeLuminance(SkColorGetR(lumColor),
+ SkColorGetG(lumColor),
+ SkColorGetB(lumColor));
+ lumColor = SkColorSetRGB(lum, lum, lum);
+ }
+
+ // TODO: remove CanonicalColor when we to fix up Chrome layout tests.
+ rec.setLuminanceColor(lumColor);
+
+ return rec;
+}
+
+SkScalerContext::SkScalerContext(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : fRec(PreprocessRec(*typeface, effects, *desc))
+ , fTypeface(std::move(typeface))
+ , fPathEffect(sk_ref_sp(effects.fPathEffect))
+ , fMaskFilter(sk_ref_sp(effects.fMaskFilter))
+ // Initialize based on our settings. Subclasses can also force this.
+ , fGenerateImageFromPath(fRec.fFrameWidth > 0 || fPathEffect != nullptr)
+
+ , fPreBlend(fMaskFilter ? SkMaskGamma::PreBlend() : SkScalerContext::GetMaskPreBlend(fRec))
+{
+#ifdef DUMP_REC
+ SkDebugf("SkScalerContext checksum %x count %d length %d\n",
+ desc->getChecksum(), desc->getCount(), desc->getLength());
+ SkDebugf("%s", fRec.dump().c_str());
+ SkDebugf(" effects %x\n", desc->findEntry(kEffects_SkDescriptorTag, nullptr));
+#endif
+}
+
+SkScalerContext::~SkScalerContext() {}
+
+/**
+ * In order to call cachedDeviceLuminance, cachedPaintLuminance, or
+ * cachedMaskGamma the caller must hold the mask_gamma_cache_mutex and continue
+ * to hold it until the returned pointer is refed or forgotten.
+ */
+static SkMutex& mask_gamma_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+static SkMaskGamma* gLinearMaskGamma = nullptr;
+static SkMaskGamma* gMaskGamma = nullptr;
+static SkScalar gContrast = SK_ScalarMin;
+static SkScalar gPaintGamma = SK_ScalarMin;
+static SkScalar gDeviceGamma = SK_ScalarMin;
+
+/**
+ * The caller must hold the mask_gamma_cache_mutex() and continue to hold it until
+ * the returned SkMaskGamma pointer is refed or forgotten.
+ */
+static const SkMaskGamma& cached_mask_gamma(SkScalar contrast, SkScalar paintGamma,
+ SkScalar deviceGamma) {
+ mask_gamma_cache_mutex().assertHeld();
+ if (0 == contrast && SK_Scalar1 == paintGamma && SK_Scalar1 == deviceGamma) {
+ if (nullptr == gLinearMaskGamma) {
+ gLinearMaskGamma = new SkMaskGamma;
+ }
+ return *gLinearMaskGamma;
+ }
+ if (gContrast != contrast || gPaintGamma != paintGamma || gDeviceGamma != deviceGamma) {
+ SkSafeUnref(gMaskGamma);
+ gMaskGamma = new SkMaskGamma(contrast, paintGamma, deviceGamma);
+ gContrast = contrast;
+ gPaintGamma = paintGamma;
+ gDeviceGamma = deviceGamma;
+ }
+ return *gMaskGamma;
+}
+
+/**
+ * Expands fDeviceGamma, fPaintGamma, fContrast, and fLumBits into a mask pre-blend.
+ */
+SkMaskGamma::PreBlend SkScalerContext::GetMaskPreBlend(const SkScalerContextRec& rec) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+
+ const SkMaskGamma& maskGamma = cached_mask_gamma(rec.getContrast(),
+ rec.getPaintGamma(),
+ rec.getDeviceGamma());
+
+ // TODO: remove CanonicalColor when we to fix up Chrome layout tests.
+ return maskGamma.preBlend(rec.getLuminanceColor());
+}
+
+size_t SkScalerContext::GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma,
+ SkScalar deviceGamma, int* width, int* height) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+ const SkMaskGamma& maskGamma = cached_mask_gamma(contrast,
+ paintGamma,
+ deviceGamma);
+
+ maskGamma.getGammaTableDimensions(width, height);
+ size_t size = (*width)*(*height)*sizeof(uint8_t);
+
+ return size;
+}
+
+bool SkScalerContext::GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ uint8_t* data) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+ const SkMaskGamma& maskGamma = cached_mask_gamma(contrast,
+ paintGamma,
+ deviceGamma);
+ const uint8_t* gammaTables = maskGamma.getGammaTables();
+ if (!gammaTables) {
+ return false;
+ }
+
+ int width, height;
+ maskGamma.getGammaTableDimensions(&width, &height);
+ size_t size = width*height * sizeof(uint8_t);
+ memcpy(data, gammaTables, size);
+ return true;
+}
+
+void SkScalerContext::getAdvance(SkGlyph* glyph) {
+ if (generateAdvance(glyph)) {
+ glyph->fMaskFormat = MASK_FORMAT_JUST_ADVANCE;
+ } else {
+ this->getMetrics(glyph);
+ SkASSERT(glyph->fMaskFormat != MASK_FORMAT_UNKNOWN);
+ }
+}
+
+void SkScalerContext::getMetrics(SkGlyph* glyph) {
+ bool generatingImageFromPath = fGenerateImageFromPath;
+ if (!generatingImageFromPath) {
+ generateMetrics(glyph);
+ SkASSERT(glyph->fMaskFormat != MASK_FORMAT_UNKNOWN);
+ } else {
+ SkPath devPath;
+ generatingImageFromPath = this->internalGetPath(glyph->getPackedID(), &devPath);
+ if (!generatingImageFromPath) {
+ generateMetrics(glyph);
+ SkASSERT(glyph->fMaskFormat != MASK_FORMAT_UNKNOWN);
+ } else {
+ uint8_t originMaskFormat = glyph->fMaskFormat;
+ if (!generateAdvance(glyph)) {
+ generateMetrics(glyph);
+ }
+
+ if (originMaskFormat != MASK_FORMAT_UNKNOWN) {
+ glyph->fMaskFormat = originMaskFormat;
+ } else {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ }
+
+ // If we are going to create the mask, then we cannot keep the color
+ if (SkMask::kARGB32_Format == glyph->fMaskFormat) {
+ glyph->fMaskFormat = SkMask::kA8_Format;
+ }
+
+ const SkIRect ir = devPath.getBounds().roundOut();
+ if (ir.isEmpty() || !SkRectPriv::Is16Bit(ir)) {
+ goto SK_ERROR;
+ }
+ glyph->fLeft = ir.fLeft;
+ glyph->fTop = ir.fTop;
+ glyph->fWidth = SkToU16(ir.width());
+ glyph->fHeight = SkToU16(ir.height());
+
+ if (glyph->fWidth > 0) {
+ switch (glyph->fMaskFormat) {
+ case SkMask::kLCD16_Format:
+ if (fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ glyph->fHeight += 2;
+ glyph->fTop -= 1;
+ } else {
+ glyph->fWidth += 2;
+ glyph->fLeft -= 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ // if either dimension is empty, zap the image bounds of the glyph
+ if (0 == glyph->fWidth || 0 == glyph->fHeight) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fTop = 0;
+ glyph->fLeft = 0;
+ glyph->fMaskFormat = 0;
+ return;
+ }
+
+ if (fMaskFilter) {
+ SkMask src = glyph->mask(),
+ dst;
+ SkMatrix matrix;
+
+ fRec.getMatrixFrom2x2(&matrix);
+
+ src.fImage = nullptr; // only want the bounds from the filter
+ if (as_MFB(fMaskFilter)->filterMask(&dst, src, matrix, nullptr)) {
+ if (dst.fBounds.isEmpty() || !SkRectPriv::Is16Bit(dst.fBounds)) {
+ goto SK_ERROR;
+ }
+ SkASSERT(dst.fImage == nullptr);
+ glyph->fLeft = dst.fBounds.fLeft;
+ glyph->fTop = dst.fBounds.fTop;
+ glyph->fWidth = SkToU16(dst.fBounds.width());
+ glyph->fHeight = SkToU16(dst.fBounds.height());
+ glyph->fMaskFormat = dst.fFormat;
+ }
+ }
+ return;
+
+SK_ERROR:
+ // draw nothing 'cause we failed
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ // put a valid value here, in case it was earlier set to
+ // MASK_FORMAT_JUST_ADVANCE
+ glyph->fMaskFormat = fRec.fMaskFormat;
+}
+
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+static void applyLUTToA8Mask(const SkMask& mask, const uint8_t* lut) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)mask.fImage;
+ unsigned rowBytes = mask.fRowBytes;
+
+ for (int y = mask.fBounds.height() - 1; y >= 0; --y) {
+ for (int x = mask.fBounds.width() - 1; x >= 0; --x) {
+ dst[x] = lut[dst[x]];
+ }
+ dst += rowBytes;
+ }
+}
+
+static void pack4xHToLCD16(const SkPixmap& src, const SkMask& dst,
+ const SkMaskGamma::PreBlend& maskPreBlend,
+ const bool doBGR, const bool doVert) {
+#define SAMPLES_PER_PIXEL 4
+#define LCD_PER_PIXEL 3
+ SkASSERT(kAlpha_8_SkColorType == src.colorType());
+ SkASSERT(SkMask::kLCD16_Format == dst.fFormat);
+
+ // doVert in this function means swap x and y when writing to dst.
+ if (doVert) {
+ SkASSERT(src.width() == (dst.fBounds.height() - 2) * 4);
+ SkASSERT(src.height() == dst.fBounds.width());
+ } else {
+ SkASSERT(src.width() == (dst.fBounds.width() - 2) * 4);
+ SkASSERT(src.height() == dst.fBounds.height());
+ }
+
+ const int sample_width = src.width();
+ const int height = src.height();
+
+ uint16_t* dstImage = (uint16_t*)dst.fImage;
+ size_t dstRB = dst.fRowBytes;
+ // An N tap FIR is defined by
+ // out[n] = coeff[0]*x[n] + coeff[1]*x[n-1] + ... + coeff[N]*x[n-N]
+ // or
+ // out[n] = sum(i, 0, N, coeff[i]*x[n-i])
+
+ // The strategy is to use one FIR (different coefficients) for each of r, g, and b.
+ // This means using every 4th FIR output value of each FIR and discarding the rest.
+ // The FIRs are aligned, and the coefficients reach 5 samples to each side of their 'center'.
+ // (For r and b this is technically incorrect, but the coeffs outside round to zero anyway.)
+
+ // These are in some fixed point repesentation.
+ // Adding up to more than one simulates ink spread.
+ // For implementation reasons, these should never add up to more than two.
+
+ // Coefficients determined by a gausian where 5 samples = 3 std deviations (0x110 'contrast').
+ // Calculated using tools/generate_fir_coeff.py
+ // With this one almost no fringing is ever seen, but it is imperceptibly blurry.
+ // The lcd smoothed text is almost imperceptibly different from gray,
+ // but is still sharper on small stems and small rounded corners than gray.
+ // This also seems to be about as wide as one can get and only have a three pixel kernel.
+ // TODO: calculate these at runtime so parameters can be adjusted (esp contrast).
+ static const unsigned int coefficients[LCD_PER_PIXEL][SAMPLES_PER_PIXEL*3] = {
+ //The red subpixel is centered inside the first sample (at 1/6 pixel), and is shifted.
+ { 0x03, 0x0b, 0x1c, 0x33, 0x40, 0x39, 0x24, 0x10, 0x05, 0x01, 0x00, 0x00, },
+ //The green subpixel is centered between two samples (at 1/2 pixel), so is symetric
+ { 0x00, 0x02, 0x08, 0x16, 0x2b, 0x3d, 0x3d, 0x2b, 0x16, 0x08, 0x02, 0x00, },
+ //The blue subpixel is centered inside the last sample (at 5/6 pixel), and is shifted.
+ { 0x00, 0x00, 0x01, 0x05, 0x10, 0x24, 0x39, 0x40, 0x33, 0x1c, 0x0b, 0x03, },
+ };
+
+ for (int y = 0; y < height; ++y) {
+ uint16_t* dstP;
+ size_t dstPDelta;
+ if (doVert) {
+ dstP = dstImage + y;
+ dstPDelta = dstRB;
+ } else {
+ dstP = SkTAddOffset<uint16_t>(dstImage, dstRB * y);
+ dstPDelta = sizeof(uint16_t);
+ }
+
+ const uint8_t* srcP = src.addr8(0, y);
+
+ // TODO: this fir filter implementation is straight forward, but slow.
+ // It should be possible to make it much faster.
+ for (int sample_x = -4; sample_x < sample_width + 4; sample_x += 4) {
+ int fir[LCD_PER_PIXEL] = { 0 };
+ for (int sample_index = SkMax32(0, sample_x - 4), coeff_index = sample_index - (sample_x - 4)
+ ; sample_index < SkMin32(sample_x + 8, sample_width)
+ ; ++sample_index, ++coeff_index)
+ {
+ int sample_value = srcP[sample_index];
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] += coefficients[subpxl_index][coeff_index] * sample_value;
+ }
+ }
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] /= 0x100;
+ fir[subpxl_index] = SkMin32(fir[subpxl_index], 255);
+ }
+
+ U8CPU r, g, b;
+ if (doBGR) {
+ r = fir[2];
+ g = fir[1];
+ b = fir[0];
+ } else {
+ r = fir[0];
+ g = fir[1];
+ b = fir[2];
+ }
+ if (maskPreBlend.isApplicable()) {
+ r = maskPreBlend.fR[r];
+ g = maskPreBlend.fG[g];
+ b = maskPreBlend.fB[b];
+ }
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkMax32(r, 10); g = SkMax32(g, 10); b = SkMax32(b, 10);
+#endif
+ *dstP = SkPack888ToRGB16(r, g, b);
+ dstP = SkTAddOffset<uint16_t>(dstP, dstPDelta);
+ }
+ }
+}
+
+static inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ return byte >> 7;
+}
+
+static uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+static void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ SkASSERT(width >= 0);
+ SkASSERT(srcRB >= (size_t)width);
+ const size_t srcPad = srcRB - width;
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+static void generateMask(const SkMask& mask, const SkPath& path,
+ const SkMaskGamma::PreBlend& maskPreBlend,
+ bool doBGR, bool doVert) {
+ SkPaint paint;
+
+ int srcW = mask.fBounds.width();
+ int srcH = mask.fBounds.height();
+ int dstW = srcW;
+ int dstH = srcH;
+ int dstRB = mask.fRowBytes;
+
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ paint.setAntiAlias(SkMask::kBW_Format != mask.fFormat);
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ dstRB = 0; // signals we need a copy
+ break;
+ case SkMask::kA8_Format:
+ break;
+ case SkMask::kLCD16_Format:
+ if (doVert) {
+ dstW = 4*dstH - 8;
+ dstH = srcW;
+ matrix.setAll(0, 4, -SkIntToScalar(mask.fBounds.fTop + 1) * 4,
+ 1, 0, -SkIntToScalar(mask.fBounds.fLeft),
+ 0, 0, 1);
+ } else {
+ dstW = 4*dstW - 8;
+ matrix.setAll(4, 0, -SkIntToScalar(mask.fBounds.fLeft + 1) * 4,
+ 0, 1, -SkIntToScalar(mask.fBounds.fTop),
+ 0, 0, 1);
+ }
+ dstRB = 0; // signals we need a copy
+ break;
+ default:
+ SkDEBUGFAIL("unexpected mask format");
+ }
+
+ SkRasterClip clip;
+ clip.setRect(SkIRect::MakeWH(dstW, dstH));
+
+ const SkImageInfo info = SkImageInfo::MakeA8(dstW, dstH);
+ SkAutoPixmapStorage dst;
+
+ if (0 == dstRB) {
+ if (!dst.tryAlloc(info)) {
+ // can't allocate offscreen, so empty the mask and return
+ sk_bzero(mask.fImage, mask.computeImageSize());
+ return;
+ }
+ } else {
+ dst.reset(info, mask.fImage, dstRB);
+ }
+ sk_bzero(dst.writable_addr(), dst.computeByteSize());
+
+ SkDraw draw;
+ draw.fDst = dst;
+ draw.fRC = &clip;
+ draw.fMatrix = &matrix;
+ draw.drawPath(path, paint);
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ packA8ToA1(mask, dst.addr8(0, 0), dst.rowBytes());
+ break;
+ case SkMask::kA8_Format:
+ if (maskPreBlend.isApplicable()) {
+ applyLUTToA8Mask(mask, maskPreBlend.fG);
+ }
+ break;
+ case SkMask::kLCD16_Format:
+ pack4xHToLCD16(dst, mask, maskPreBlend, doBGR, doVert);
+ break;
+ default:
+ break;
+ }
+}
+
+void SkScalerContext::getImage(const SkGlyph& origGlyph) {
+ const SkGlyph* glyph = &origGlyph;
+ SkGlyph tmpGlyph{origGlyph.getPackedID()};
+
+ // in case we need to call generateImage on a mask-format that is different
+ // (i.e. larger) than what our caller allocated by looking at origGlyph.
+ SkAutoMalloc tmpGlyphImageStorage;
+
+ if (fMaskFilter) { // restore the prefilter bounds
+
+ // need the original bounds, sans our maskfilter
+ sk_sp<SkMaskFilter> mf = std::move(fMaskFilter);
+ this->getMetrics(&tmpGlyph);
+ fMaskFilter = std::move(mf);
+
+ // we need the prefilter bounds to be <= filter bounds
+ SkASSERT(tmpGlyph.fWidth <= origGlyph.fWidth);
+ SkASSERT(tmpGlyph.fHeight <= origGlyph.fHeight);
+
+ if (tmpGlyph.fMaskFormat == origGlyph.fMaskFormat) {
+ tmpGlyph.fImage = origGlyph.fImage;
+ } else {
+ tmpGlyphImageStorage.reset(tmpGlyph.imageSize());
+ tmpGlyph.fImage = tmpGlyphImageStorage.get();
+ }
+ glyph = &tmpGlyph;
+ }
+
+ if (!fGenerateImageFromPath) {
+ generateImage(*glyph);
+ } else {
+ SkPath devPath;
+ SkMask mask = glyph->mask();
+
+ if (!this->internalGetPath(glyph->getPackedID(), &devPath)) {
+ generateImage(*glyph);
+ } else {
+ SkASSERT(SkMask::kARGB32_Format != origGlyph.fMaskFormat);
+ SkASSERT(SkMask::kARGB32_Format != mask.fFormat);
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+ generateMask(mask, devPath, fPreBlend, doBGR, doVert);
+ }
+ }
+
+ if (fMaskFilter) {
+ // the src glyph image shouldn't be 3D
+ SkASSERT(SkMask::k3D_Format != glyph->fMaskFormat);
+
+ SkMask srcM = glyph->mask(),
+ dstM;
+ SkMatrix matrix;
+
+ fRec.getMatrixFrom2x2(&matrix);
+
+ if (as_MFB(fMaskFilter)->filterMask(&dstM, srcM, matrix, nullptr)) {
+ int width = SkMin32(origGlyph.fWidth, dstM.fBounds.width());
+ int height = SkMin32(origGlyph.fHeight, dstM.fBounds.height());
+ int dstRB = origGlyph.rowBytes();
+ int srcRB = dstM.fRowBytes;
+
+ const uint8_t* src = (const uint8_t*)dstM.fImage;
+ uint8_t* dst = (uint8_t*)origGlyph.fImage;
+
+ if (SkMask::k3D_Format == dstM.fFormat) {
+ // we have to copy 3 times as much
+ height *= 3;
+ }
+
+ // clean out our glyph, since it may be larger than dstM
+ //sk_bzero(dst, height * dstRB);
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ src += srcRB;
+ dst += dstRB;
+ }
+ SkMask::FreeImage(dstM.fImage);
+ }
+ }
+}
+
+bool SkScalerContext::getPath(SkPackedGlyphID glyphID, SkPath* path) {
+ return this->internalGetPath(glyphID, path);
+}
+
+void SkScalerContext::getFontMetrics(SkFontMetrics* fm) {
+ SkASSERT(fm);
+ this->generateFontMetrics(fm);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkScalerContext::internalGetPath(SkPackedGlyphID glyphID, SkPath* devPath) {
+ SkPath path;
+ if (!generatePath(glyphID.code(), &path)) {
+ return false;
+ }
+
+ if (fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag) {
+ SkFixed dx = glyphID.getSubXFixed();
+ SkFixed dy = glyphID.getSubYFixed();
+ if (dx | dy) {
+ path.offset(SkFixedToScalar(dx), SkFixedToScalar(dy));
+ }
+ }
+
+ if (fRec.fFrameWidth > 0 || fPathEffect != nullptr) {
+ // need the path in user-space, with only the point-size applied
+ // so that our stroking and effects will operate the same way they
+ // would if the user had extracted the path themself, and then
+ // called drawPath
+ SkPath localPath;
+ SkMatrix matrix, inverse;
+
+ fRec.getMatrixFrom2x2(&matrix);
+ if (!matrix.invert(&inverse)) {
+ // assume devPath is already empty.
+ return true;
+ }
+ path.transform(inverse, &localPath);
+ // now localPath is only affected by the paint settings, and not the canvas matrix
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+
+ if (fRec.fFrameWidth > 0) {
+ rec.setStrokeStyle(fRec.fFrameWidth,
+ SkToBool(fRec.fFlags & kFrameAndFill_Flag));
+ // glyphs are always closed contours, so cap type is ignored,
+ // so we just pass something.
+ rec.setStrokeParams((SkPaint::Cap)fRec.fStrokeCap,
+ (SkPaint::Join)fRec.fStrokeJoin,
+ fRec.fMiterLimit);
+ }
+
+ if (fPathEffect) {
+ SkPath effectPath;
+ if (fPathEffect->filterPath(&effectPath, localPath, &rec, nullptr)) {
+ localPath.swap(effectPath);
+ }
+ }
+
+ if (rec.needToApply()) {
+ SkPath strokePath;
+ if (rec.applyToPath(&strokePath, localPath)) {
+ localPath.swap(strokePath);
+ }
+ }
+
+ // now return stuff to the caller
+ if (devPath) {
+ localPath.transform(matrix, devPath);
+ }
+ } else { // nothing tricky to do
+ if (devPath) {
+ devPath->swap(path);
+ }
+ }
+
+ if (devPath) {
+ devPath->updateBoundsCache();
+ }
+ return true;
+}
+
+
+void SkScalerContextRec::getMatrixFrom2x2(SkMatrix* dst) const {
+ dst->setAll(fPost2x2[0][0], fPost2x2[0][1], 0,
+ fPost2x2[1][0], fPost2x2[1][1], 0,
+ 0, 0, 1);
+}
+
+void SkScalerContextRec::getLocalMatrix(SkMatrix* m) const {
+ *m = SkFontPriv::MakeTextMatrix(fTextSize, fPreScaleX, fPreSkewX);
+}
+
+void SkScalerContextRec::getSingleMatrix(SkMatrix* m) const {
+ this->getLocalMatrix(m);
+
+ // now concat the device matrix
+ SkMatrix deviceMatrix;
+ this->getMatrixFrom2x2(&deviceMatrix);
+ m->postConcat(deviceMatrix);
+}
+
+bool SkScalerContextRec::computeMatrices(PreMatrixScale preMatrixScale, SkVector* s, SkMatrix* sA,
+ SkMatrix* GsA, SkMatrix* G_inv, SkMatrix* A_out)
+{
+ // A is the 'total' matrix.
+ SkMatrix A;
+ this->getSingleMatrix(&A);
+
+ // The caller may find the 'total' matrix useful when dealing directly with EM sizes.
+ if (A_out) {
+ *A_out = A;
+ }
+
+ // GA is the matrix A with rotation removed.
+ SkMatrix GA;
+ bool skewedOrFlipped = A.getSkewX() || A.getSkewY() || A.getScaleX() < 0 || A.getScaleY() < 0;
+ if (skewedOrFlipped) {
+ // QR by Givens rotations. G is Q^T and GA is R. G is rotational (no reflections).
+ // h is where A maps the horizontal baseline.
+ SkPoint h = SkPoint::Make(SK_Scalar1, 0);
+ A.mapPoints(&h, 1);
+
+ // G is the Givens Matrix for A (rotational matrix where GA[0][1] == 0).
+ SkMatrix G;
+ SkComputeGivensRotation(h, &G);
+
+ GA = G;
+ GA.preConcat(A);
+
+ // The 'remainingRotation' is G inverse, which is fairly simple since G is 2x2 rotational.
+ if (G_inv) {
+ G_inv->setAll(
+ G.get(SkMatrix::kMScaleX), -G.get(SkMatrix::kMSkewX), G.get(SkMatrix::kMTransX),
+ -G.get(SkMatrix::kMSkewY), G.get(SkMatrix::kMScaleY), G.get(SkMatrix::kMTransY),
+ G.get(SkMatrix::kMPersp0), G.get(SkMatrix::kMPersp1), G.get(SkMatrix::kMPersp2));
+ }
+ } else {
+ GA = A;
+ if (G_inv) {
+ G_inv->reset();
+ }
+ }
+
+ // If the 'total' matrix is singular, set the 'scale' to something finite and zero the matrices.
+ // All underlying ports have issues with zero text size, so use the matricies to zero.
+ // If one of the scale factors is less than 1/256 then an EM filling square will
+ // never affect any pixels.
+ // If there are any nonfinite numbers in the matrix, bail out and set the matrices to zero.
+ if (SkScalarAbs(GA.get(SkMatrix::kMScaleX)) <= SK_ScalarNearlyZero ||
+ SkScalarAbs(GA.get(SkMatrix::kMScaleY)) <= SK_ScalarNearlyZero ||
+ !GA.isFinite())
+ {
+ s->fX = SK_Scalar1;
+ s->fY = SK_Scalar1;
+ sA->setScale(0, 0);
+ if (GsA) {
+ GsA->setScale(0, 0);
+ }
+ if (G_inv) {
+ G_inv->reset();
+ }
+ return false;
+ }
+
+ // At this point, given GA, create s.
+ switch (preMatrixScale) {
+ case kFull_PreMatrixScale:
+ s->fX = SkScalarAbs(GA.get(SkMatrix::kMScaleX));
+ s->fY = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ break;
+ case kVertical_PreMatrixScale: {
+ SkScalar yScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ s->fX = yScale;
+ s->fY = yScale;
+ break;
+ }
+ case kVerticalInteger_PreMatrixScale: {
+ SkScalar realYScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ SkScalar intYScale = SkScalarRoundToScalar(realYScale);
+ if (intYScale == 0) {
+ intYScale = SK_Scalar1;
+ }
+ s->fX = intYScale;
+ s->fY = intYScale;
+ break;
+ }
+ }
+
+ // The 'remaining' matrix sA is the total matrix A without the scale.
+ if (!skewedOrFlipped && (
+ (kFull_PreMatrixScale == preMatrixScale) ||
+ (kVertical_PreMatrixScale == preMatrixScale && A.getScaleX() == A.getScaleY())))
+ {
+ // If GA == A and kFull_PreMatrixScale, sA is identity.
+ // If GA == A and kVertical_PreMatrixScale and A.scaleX == A.scaleY, sA is identity.
+ sA->reset();
+ } else if (!skewedOrFlipped && kVertical_PreMatrixScale == preMatrixScale) {
+ // If GA == A and kVertical_PreMatrixScale, sA.scaleY is SK_Scalar1.
+ sA->reset();
+ sA->setScaleX(A.getScaleX() / s->fY);
+ } else {
+ // TODO: like kVertical_PreMatrixScale, kVerticalInteger_PreMatrixScale with int scales.
+ *sA = A;
+ sA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ // The 'remainingWithoutRotation' matrix GsA is the non-rotational part of A without the scale.
+ if (GsA) {
+ *GsA = GA;
+ // G is rotational so reorders with the scale.
+ GsA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ return true;
+}
+
+SkAxisAlignment SkScalerContext::computeAxisAlignmentForHText() const {
+ return fRec.computeAxisAlignmentForHText();
+}
+
+SkAxisAlignment SkScalerContextRec::computeAxisAlignmentForHText() const {
+ // Why fPost2x2 can be used here.
+ // getSingleMatrix multiplies in getLocalMatrix, which consists of
+ // * fTextSize (a scale, which has no effect)
+ // * fPreScaleX (a scale in x, which has no effect)
+ // * fPreSkewX (has no effect, but would on vertical text alignment).
+ // In other words, making the text bigger, stretching it along the
+ // horizontal axis, or fake italicizing it does not move the baseline.
+ if (!SkToBool(fFlags & SkScalerContext::kBaselineSnap_Flag)) {
+ return kNone_SkAxisAlignment;
+ }
+
+ if (0 == fPost2x2[1][0]) {
+ // The x axis is mapped onto the x axis.
+ return kX_SkAxisAlignment;
+ }
+ if (0 == fPost2x2[0][0]) {
+ // The x axis is mapped onto the y axis.
+ return kY_SkAxisAlignment;
+ }
+ return kNone_SkAxisAlignment;
+}
+
+void SkScalerContextRec::setLuminanceColor(SkColor c) {
+ fLumBits = SkMaskGamma::CanonicalColor(
+ SkColorSetRGB(SkColorGetR(c), SkColorGetG(c), SkColorGetB(c)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkScalerContext_Empty : public SkScalerContext {
+public:
+ SkScalerContext_Empty(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(typeface), effects, desc) {}
+
+protected:
+ unsigned generateGlyphCount() override {
+ return 0;
+ }
+ bool generateAdvance(SkGlyph* glyph) override {
+ glyph->zeroMetrics();
+ return true;
+ }
+ void generateMetrics(SkGlyph* glyph) override {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ glyph->zeroMetrics();
+ }
+ void generateImage(const SkGlyph& glyph) override {}
+ bool generatePath(SkGlyphID glyph, SkPath* path) override {
+ path->reset();
+ return false;
+ }
+ void generateFontMetrics(SkFontMetrics* metrics) override {
+ if (metrics) {
+ sk_bzero(metrics, sizeof(*metrics));
+ }
+ }
+};
+
+extern SkScalerContext* SkCreateColorScalerContext(const SkDescriptor* desc);
+
+std::unique_ptr<SkScalerContext> SkTypeface::createScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc, bool allowFailure) const
+{
+ std::unique_ptr<SkScalerContext> c(this->onCreateScalerContext(effects, desc));
+ if (!c && !allowFailure) {
+ c = skstd::make_unique<SkScalerContext_Empty>(sk_ref_sp(const_cast<SkTypeface*>(this)),
+ effects, desc);
+ }
+
+ // !allowFailure implies c != nullptr
+ SkASSERT(c || allowFailure);
+
+ return c;
+}
+
+/*
+ * Return the scalar with only limited fractional precision. Used to consolidate matrices
+ * that vary only slightly when we create our key into the font cache, since the font scaler
+ * typically returns the same looking resuts for tiny changes in the matrix.
+ */
+static SkScalar sk_relax(SkScalar x) {
+ SkScalar n = SkScalarRoundToScalar(x * 1024);
+ return n / 1024.0f;
+}
+
+static SkMask::Format compute_mask_format(const SkFont& font) {
+ switch (font.getEdging()) {
+ case SkFont::Edging::kAlias:
+ return SkMask::kBW_Format;
+ case SkFont::Edging::kAntiAlias:
+ return SkMask::kA8_Format;
+ case SkFont::Edging::kSubpixelAntiAlias:
+ return SkMask::kLCD16_Format;
+ }
+ SkASSERT(false);
+ return SkMask::kA8_Format;
+}
+
+// Beyond this size, LCD doesn't appreciably improve quality, but it always
+// cost more RAM and draws slower, so we set a cap.
+#ifndef SK_MAX_SIZE_FOR_LCDTEXT
+ #define SK_MAX_SIZE_FOR_LCDTEXT 48
+#endif
+
+const SkScalar gMaxSize2ForLCDText = SK_MAX_SIZE_FOR_LCDTEXT * SK_MAX_SIZE_FOR_LCDTEXT;
+
+static bool too_big_for_lcd(const SkScalerContextRec& rec, bool checkPost2x2) {
+ if (checkPost2x2) {
+ SkScalar area = rec.fPost2x2[0][0] * rec.fPost2x2[1][1] -
+ rec.fPost2x2[1][0] * rec.fPost2x2[0][1];
+ area *= rec.fTextSize * rec.fTextSize;
+ return area > gMaxSize2ForLCDText;
+ } else {
+ return rec.fTextSize > SK_MAX_SIZE_FOR_LCDTEXT;
+ }
+}
+
+// The only reason this is not file static is because it needs the context of SkScalerContext to
+// access SkPaint::computeLuminanceColor.
+void SkScalerContext::MakeRecAndEffects(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects) {
+ SkASSERT(!deviceMatrix.hasPerspective());
+
+ sk_bzero(rec, sizeof(SkScalerContextRec));
+
+ SkTypeface* typeface = font.getTypefaceOrDefault();
+
+ rec->fFontID = typeface->uniqueID();
+ rec->fTextSize = font.getSize();
+ rec->fPreScaleX = font.getScaleX();
+ rec->fPreSkewX = font.getSkewX();
+
+ bool checkPost2x2 = false;
+
+ const SkMatrix::TypeMask mask = deviceMatrix.getType();
+ if (mask & SkMatrix::kScale_Mask) {
+ rec->fPost2x2[0][0] = sk_relax(deviceMatrix.getScaleX());
+ rec->fPost2x2[1][1] = sk_relax(deviceMatrix.getScaleY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][0] = rec->fPost2x2[1][1] = SK_Scalar1;
+ }
+ if (mask & SkMatrix::kAffine_Mask) {
+ rec->fPost2x2[0][1] = sk_relax(deviceMatrix.getSkewX());
+ rec->fPost2x2[1][0] = sk_relax(deviceMatrix.getSkewY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][1] = rec->fPost2x2[1][0] = 0;
+ }
+
+ SkPaint::Style style = paint.getStyle();
+ SkScalar strokeWidth = paint.getStrokeWidth();
+
+ unsigned flags = 0;
+
+ if (font.isEmbolden()) {
+#ifdef SK_USE_FREETYPE_EMBOLDEN
+ flags |= SkScalerContext::kEmbolden_Flag;
+#else
+ SkScalar fakeBoldScale = SkScalarInterpFunc(font.getSize(),
+ kStdFakeBoldInterpKeys,
+ kStdFakeBoldInterpValues,
+ kStdFakeBoldInterpLength);
+ SkScalar extra = font.getSize() * fakeBoldScale;
+
+ if (style == SkPaint::kFill_Style) {
+ style = SkPaint::kStrokeAndFill_Style;
+ strokeWidth = extra; // ignore paint's strokeWidth if it was "fill"
+ } else {
+ strokeWidth += extra;
+ }
+#endif
+ }
+
+ if (style != SkPaint::kFill_Style && strokeWidth > 0) {
+ rec->fFrameWidth = strokeWidth;
+ rec->fMiterLimit = paint.getStrokeMiter();
+ rec->fStrokeJoin = SkToU8(paint.getStrokeJoin());
+ rec->fStrokeCap = SkToU8(paint.getStrokeCap());
+
+ if (style == SkPaint::kStrokeAndFill_Style) {
+ flags |= SkScalerContext::kFrameAndFill_Flag;
+ }
+ } else {
+ rec->fFrameWidth = 0;
+ rec->fMiterLimit = 0;
+ rec->fStrokeJoin = 0;
+ rec->fStrokeCap = 0;
+ }
+
+ rec->fMaskFormat = SkToU8(compute_mask_format(font));
+
+ if (SkMask::kLCD16_Format == rec->fMaskFormat) {
+ if (too_big_for_lcd(*rec, checkPost2x2)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ } else {
+ SkPixelGeometry geometry = surfaceProps.pixelGeometry();
+
+ switch (geometry) {
+ case kUnknown_SkPixelGeometry:
+ // eeek, can't support LCD
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ break;
+ case kRGB_H_SkPixelGeometry:
+ // our default, do nothing.
+ break;
+ case kBGR_H_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case kRGB_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case kBGR_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ }
+ }
+ }
+
+ if (font.isEmbeddedBitmaps()) {
+ flags |= SkScalerContext::kEmbeddedBitmapText_Flag;
+ }
+ if (font.isSubpixel()) {
+ flags |= SkScalerContext::kSubpixelPositioning_Flag;
+ }
+ if (font.isForceAutoHinting()) {
+ flags |= SkScalerContext::kForceAutohinting_Flag;
+ }
+ if (font.isLinearMetrics()) {
+ flags |= SkScalerContext::kLinearMetrics_Flag;
+ }
+ if (font.isBaselineSnap()) {
+ flags |= SkScalerContext::kBaselineSnap_Flag;
+ }
+ rec->fFlags = SkToU16(flags);
+
+ // these modify fFlags, so do them after assigning fFlags
+ rec->setHinting(font.getHinting());
+ rec->setLuminanceColor(SkPaintPriv::ComputeLuminanceColor(paint));
+
+ // For now always set the paint gamma equal to the device gamma.
+ // The math in SkMaskGamma can handle them being different,
+ // but it requires superluminous masks when
+ // Ex : deviceGamma(x) < paintGamma(x) and x is sufficiently large.
+ rec->setDeviceGamma(SK_GAMMA_EXPONENT);
+ rec->setPaintGamma(SK_GAMMA_EXPONENT);
+
+#ifdef SK_GAMMA_CONTRAST
+ rec->setContrast(SK_GAMMA_CONTRAST);
+#else
+ // A value of 0.5 for SK_GAMMA_CONTRAST appears to be a good compromise.
+ // With lower values small text appears washed out (though correctly so).
+ // With higher values lcd fringing is worse and the smoothing effect of
+ // partial coverage is diminished.
+ rec->setContrast(0.5f);
+#endif
+
+ if (!SkToBool(scalerContextFlags & SkScalerContextFlags::kFakeGamma)) {
+ rec->ignoreGamma();
+ }
+ if (!SkToBool(scalerContextFlags & SkScalerContextFlags::kBoostContrast)) {
+ rec->setContrast(0);
+ }
+
+ new (effects) SkScalerContextEffects{paint};
+}
+
+SkDescriptor* SkScalerContext::MakeDescriptorForPaths(SkFontID typefaceID,
+ SkAutoDescriptor* ad) {
+ SkScalerContextRec rec;
+ memset(&rec, 0, sizeof(rec));
+ rec.fFontID = typefaceID;
+ rec.fTextSize = SkFontPriv::kCanonicalTextSizeForPaths;
+ rec.fPreScaleX = rec.fPost2x2[0][0] = rec.fPost2x2[1][1] = SK_Scalar1;
+ return AutoDescriptorGivenRecAndEffects(rec, SkScalerContextEffects(), ad);
+}
+
+SkDescriptor* SkScalerContext::CreateDescriptorAndEffectsUsingPaint(
+ const SkFont& font, const SkPaint& paint, const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags, const SkMatrix& deviceMatrix, SkAutoDescriptor* ad,
+ SkScalerContextEffects* effects)
+{
+ SkScalerContextRec rec;
+ MakeRecAndEffects(font, paint, surfaceProps, scalerContextFlags, deviceMatrix, &rec, effects);
+ return AutoDescriptorGivenRecAndEffects(rec, *effects, ad);
+}
+
+static size_t calculate_size_and_flatten(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkBinaryWriteBuffer* effectBuffer) {
+ size_t descSize = sizeof(rec);
+ int entryCount = 1;
+
+ if (effects.fPathEffect || effects.fMaskFilter) {
+ if (effects.fPathEffect) { effectBuffer->writeFlattenable(effects.fPathEffect); }
+ if (effects.fMaskFilter) { effectBuffer->writeFlattenable(effects.fMaskFilter); }
+ entryCount += 1;
+ descSize += effectBuffer->bytesWritten();
+ }
+
+ descSize += SkDescriptor::ComputeOverhead(entryCount);
+ return descSize;
+}
+
+static void generate_descriptor(const SkScalerContextRec& rec,
+ const SkBinaryWriteBuffer& effectBuffer,
+ SkDescriptor* desc) {
+ desc->init();
+ desc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+
+ if (effectBuffer.bytesWritten() > 0) {
+ effectBuffer.writeToMemory(desc->addEntry(kEffects_SkDescriptorTag,
+ effectBuffer.bytesWritten(),
+ nullptr));
+ }
+
+ desc->computeChecksum();
+}
+
+SkDescriptor* SkScalerContext::AutoDescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkAutoDescriptor* ad)
+{
+ SkBinaryWriteBuffer buf;
+
+ ad->reset(calculate_size_and_flatten(rec, effects, &buf));
+ generate_descriptor(rec, buf, ad->getDesc());
+
+ return ad->getDesc();
+}
+
+std::unique_ptr<SkDescriptor> SkScalerContext::DescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects)
+{
+ SkBinaryWriteBuffer buf;
+
+ auto desc = SkDescriptor::Alloc(calculate_size_and_flatten(rec, effects, &buf));
+ generate_descriptor(rec, buf, desc.get());
+
+ return desc;
+}
+
+void SkScalerContext::DescriptorBufferGiveRec(const SkScalerContextRec& rec, void* buffer) {
+ generate_descriptor(rec, SkBinaryWriteBuffer{}, (SkDescriptor*)buffer);
+}
+
+bool SkScalerContext::CheckBufferSizeForRec(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ size_t size) {
+ SkBinaryWriteBuffer buf;
+ return size >= calculate_size_and_flatten(rec, effects, &buf);
+}
+
+
+
+
diff --git a/gfx/skia/skia/src/core/SkScalerContext.h b/gfx/skia/skia/src/core/SkScalerContext.h
new file mode 100644
index 0000000000..72b45de166
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalerContext_DEFINED
+#define SkScalerContext_DEFINED
+
+#include <memory>
+
+#include "include/core/SkFont.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkMacros.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkStrikeForGPU.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+class SkAutoDescriptor;
+class SkDescriptor;
+class SkMaskFilter;
+class SkPathEffect;
+class SkScalerContext;
+class SkScalerContext_DW;
+
+enum SkScalerContextFlags : uint32_t {
+ kNone = 0,
+ kFakeGamma = 1 << 0,
+ kBoostContrast = 1 << 1,
+ kFakeGammaAndBoostContrast = kFakeGamma | kBoostContrast,
+};
+
+enum SkAxisAlignment : uint32_t {
+ kNone_SkAxisAlignment,
+ kX_SkAxisAlignment,
+ kY_SkAxisAlignment
+};
+
+/*
+ * To allow this to be forward-declared, it must be its own typename, rather
+ * than a nested struct inside SkScalerContext (where it started).
+ *
+ * SkScalerContextRec must be dense, and all bytes must be set to a know quantity because this
+ * structure is used to calculate a checksum.
+ */
+SK_BEGIN_REQUIRE_DENSE
+struct SkScalerContextRec {
+ uint32_t fFontID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+
+private:
+ //These describe the parameters to create (uniquely identify) the pre-blend.
+ uint32_t fLumBits;
+ uint8_t fDeviceGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fPaintGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fContrast; //0.8+1, [0.0, 1.0] artificial contrast
+ const uint8_t fReservedAlign{0};
+
+public:
+
+ SkScalar getDeviceGamma() const {
+ return SkIntToScalar(fDeviceGamma) / (1 << 6);
+ }
+ void setDeviceGamma(SkScalar dg) {
+ SkASSERT(0 <= dg && dg < SkIntToScalar(4));
+ fDeviceGamma = SkScalarFloorToInt(dg * (1 << 6));
+ }
+
+ SkScalar getPaintGamma() const {
+ return SkIntToScalar(fPaintGamma) / (1 << 6);
+ }
+ void setPaintGamma(SkScalar pg) {
+ SkASSERT(0 <= pg && pg < SkIntToScalar(4));
+ fPaintGamma = SkScalarFloorToInt(pg * (1 << 6));
+ }
+
+ SkScalar getContrast() const {
+ sk_ignore_unused_variable(fReservedAlign);
+ return SkIntToScalar(fContrast) / ((1 << 8) - 1);
+ }
+ void setContrast(SkScalar c) {
+ SkASSERT(0 <= c && c <= SK_Scalar1);
+ fContrast = SkScalarRoundToInt(c * ((1 << 8) - 1));
+ }
+
+ /**
+ * Causes the luminance color to be ignored, and the paint and device
+ * gamma to be effectively 1.0
+ */
+ void ignoreGamma() {
+ setLuminanceColor(SK_ColorTRANSPARENT);
+ setPaintGamma(SK_Scalar1);
+ setDeviceGamma(SK_Scalar1);
+ }
+
+ /**
+ * Causes the luminance color and contrast to be ignored, and the
+ * paint and device gamma to be effectively 1.0.
+ */
+ void ignorePreBlend() {
+ ignoreGamma();
+ setContrast(0);
+ }
+
+ uint8_t fMaskFormat;
+private:
+ uint8_t fStrokeJoin : 4;
+ uint8_t fStrokeCap : 4;
+
+public:
+ uint16_t fFlags;
+
+ // Warning: when adding members note that the size of this structure
+ // must be a multiple of 4. SkDescriptor requires that its arguments be
+ // multiples of four and this structure is put in an SkDescriptor in
+ // SkPaint::MakeRecAndEffects.
+
+ SkString dump() const {
+ SkString msg;
+ msg.appendf("Rec\n");
+ msg.appendf(" textsize %g prescale %g preskew %g post [%g %g %g %g]\n",
+ fTextSize, fPreScaleX, fPreSkewX, fPost2x2[0][0],
+ fPost2x2[0][1], fPost2x2[1][0], fPost2x2[1][1]);
+ msg.appendf(" frame %g miter %g format %d join %d cap %d flags %#hx\n",
+ fFrameWidth, fMiterLimit, fMaskFormat, fStrokeJoin, fStrokeCap, fFlags);
+ msg.appendf(" lum bits %x, device gamma %d, paint gamma %d contrast %d\n", fLumBits,
+ fDeviceGamma, fPaintGamma, fContrast);
+ return msg;
+ }
+
+ void getMatrixFrom2x2(SkMatrix*) const;
+ void getLocalMatrix(SkMatrix*) const;
+ void getSingleMatrix(SkMatrix*) const;
+
+ /** The kind of scale which will be applied by the underlying port (pre-matrix). */
+ enum PreMatrixScale {
+ kFull_PreMatrixScale, // The underlying port can apply both x and y scale.
+ kVertical_PreMatrixScale, // The underlying port can only apply a y scale.
+ kVerticalInteger_PreMatrixScale // The underlying port can only apply an integer y scale.
+ };
+ /**
+ * Compute useful matrices for use with sizing in underlying libraries.
+ *
+ * There are two kinds of text size, a 'requested/logical size' which is like asking for size
+ * '12' and a 'real' size which is the size after the matrix is applied. The matrices produced
+ * by this method are based on the 'real' size. This method effectively finds the total device
+ * matrix and decomposes it in various ways.
+ *
+ * The most useful decomposition is into 'scale' and 'remaining'. The 'scale' is applied first
+ * and then the 'remaining' to fully apply the total matrix. This decomposition is useful when
+ * the text size ('scale') may have meaning apart from the total matrix. This is true when
+ * hinting, and sometimes true for other properties as well.
+ *
+ * The second (optional) decomposition is of 'remaining' into a non-rotational part
+ * 'remainingWithoutRotation' and a rotational part 'remainingRotation'. The 'scale' is applied
+ * first, then 'remainingWithoutRotation', then 'remainingRotation' to fully apply the total
+ * matrix. This decomposition is helpful when only horizontal metrics can be trusted, so the
+ * 'scale' and 'remainingWithoutRotation' will be handled by the underlying library, but
+ * the final rotation 'remainingRotation' will be handled manually.
+ *
+ * The 'total' matrix is also (optionally) available. This is useful in cases where the
+ * underlying library will not be used, often when working directly with font data.
+ *
+ * The parameters 'scale' and 'remaining' are required, the other pointers may be nullptr.
+ *
+ * @param preMatrixScale the kind of scale to extract from the total matrix.
+ * @param scale the scale extracted from the total matrix (both values positive).
+ * @param remaining apply after scale to apply the total matrix.
+ * @param remainingWithoutRotation apply after scale to apply the total matrix sans rotation.
+ * @param remainingRotation apply after remainingWithoutRotation to apply the total matrix.
+ * @param total the total matrix.
+ * @return false if the matrix was singular. The output will be valid but not invertible.
+ */
+ bool computeMatrices(PreMatrixScale preMatrixScale,
+ SkVector* scale, SkMatrix* remaining,
+ SkMatrix* remainingWithoutRotation = nullptr,
+ SkMatrix* remainingRotation = nullptr,
+ SkMatrix* total = nullptr);
+
+ SkAxisAlignment computeAxisAlignmentForHText() const;
+
+ inline SkFontHinting getHinting() const;
+ inline void setHinting(SkFontHinting);
+
+ SkMask::Format getFormat() const {
+ return static_cast<SkMask::Format>(fMaskFormat);
+ }
+
+ SkColor getLuminanceColor() const {
+ return fLumBits;
+ }
+
+ // setLuminanceColor forces the alpha to be 0xFF because the blitter that draws the glyph
+ // will apply the alpha from the paint. Don't apply the alpha twice.
+ void setLuminanceColor(SkColor c);
+
+private:
+ // TODO: remove
+ friend class SkScalerContext;
+};
+SK_END_REQUIRE_DENSE
+
+// TODO: rename SkScalerContextEffects -> SkStrikeEffects
+struct SkScalerContextEffects {
+ SkScalerContextEffects() : fPathEffect(nullptr), fMaskFilter(nullptr) {}
+ SkScalerContextEffects(SkPathEffect* pe, SkMaskFilter* mf)
+ : fPathEffect(pe), fMaskFilter(mf) {}
+ explicit SkScalerContextEffects(const SkPaint& paint)
+ : fPathEffect(paint.getPathEffect())
+ , fMaskFilter(paint.getMaskFilter()) {}
+
+ SkPathEffect* fPathEffect;
+ SkMaskFilter* fMaskFilter;
+};
+
+//The following typedef hides from the rest of the implementation the number of
+//most significant bits to consider when creating mask gamma tables. Two bits
+//per channel was chosen as a balance between fidelity (more bits) and cache
+//sizes (fewer bits). Three bits per channel was chosen when #303942; (used by
+//the Chrome UI) turned out too green.
+typedef SkTMaskGamma<3, 3, 3> SkMaskGamma;
+
+class SkScalerContext {
+public:
+ enum Flags {
+ kFrameAndFill_Flag = 0x0001,
+ kUnused = 0x0002,
+ kEmbeddedBitmapText_Flag = 0x0004,
+ kEmbolden_Flag = 0x0008,
+ kSubpixelPositioning_Flag = 0x0010,
+ kForceAutohinting_Flag = 0x0020, // Use auto instead of bytcode hinting if hinting.
+
+ // together, these two flags resulting in a two bit value which matches
+ // up with the SkPaint::Hinting enum.
+ kHinting_Shift = 7, // to shift into the other flags above
+ kHintingBit1_Flag = 0x0080,
+ kHintingBit2_Flag = 0x0100,
+
+ // Pixel geometry information.
+ // only meaningful if fMaskFormat is kLCD16
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // Generate A8 from LCD source (for GDI and CoreGraphics).
+ // only meaningful if fMaskFormat is kA8
+ kGenA8FromLCD_Flag = 0x0800, // could be 0x200 (bit meaning dependent on fMaskFormat)
+ kLinearMetrics_Flag = 0x1000,
+ kBaselineSnap_Flag = 0x2000,
+
+ kLightOnDark_Flag = 0x8000, // Moz + Mac only, used to distinguish different mask dilations
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ };
+
+ SkScalerContext(sk_sp<SkTypeface>, const SkScalerContextEffects&, const SkDescriptor*);
+ virtual ~SkScalerContext();
+
+ SkTypeface* getTypeface() const { return fTypeface.get(); }
+
+ SkMask::Format getMaskFormat() const {
+ return (SkMask::Format)fRec.fMaskFormat;
+ }
+
+ bool isSubpixel() const {
+ return SkToBool(fRec.fFlags & kSubpixelPositioning_Flag);
+ }
+
+ bool isLinearMetrics() const {
+ return SkToBool(fRec.fFlags & kLinearMetrics_Flag);
+ }
+
+ // DEPRECATED
+ bool isVertical() const { return false; }
+
+ unsigned getGlyphCount() { return this->generateGlyphCount(); }
+ void getAdvance(SkGlyph*);
+ void getMetrics(SkGlyph*);
+ void getImage(const SkGlyph&);
+ bool SK_WARN_UNUSED_RESULT getPath(SkPackedGlyphID, SkPath*);
+ void getFontMetrics(SkFontMetrics*);
+
+ /** Return the size in bytes of the associated gamma lookup table
+ */
+ static size_t GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ int* width, int* height);
+
+ /** Get the associated gamma lookup table. The 'data' pointer must point to pre-allocated
+ * memory, with size in bytes greater than or equal to the return value of getGammaLUTSize().
+ *
+ * If the lookup table hasn't been initialized (e.g., it's linear), this will return false.
+ */
+ static bool GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ uint8_t* data);
+
+ static void MakeRecAndEffects(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects);
+
+ // If we are creating rec and effects from a font only, then there is no device around either.
+ static void MakeRecAndEffectsFromFont(const SkFont& font,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects) {
+ SkPaint paint;
+ return MakeRecAndEffects(
+ font, paint, SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType),
+ SkScalerContextFlags::kNone, SkMatrix::I(), rec, effects);
+ }
+
+ static SkDescriptor* MakeDescriptorForPaths(SkFontID fontID,
+ SkAutoDescriptor* ad);
+
+ static SkDescriptor* AutoDescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkAutoDescriptor* ad);
+
+ static std::unique_ptr<SkDescriptor> DescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects);
+
+ static void DescriptorBufferGiveRec(const SkScalerContextRec& rec, void* buffer);
+ static bool CheckBufferSizeForRec(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ size_t size);
+
+ static SkMaskGamma::PreBlend GetMaskPreBlend(const SkScalerContextRec& rec);
+
+ const SkScalerContextRec& getRec() const { return fRec; }
+
+ SkScalerContextEffects getEffects() const {
+ return { fPathEffect.get(), fMaskFilter.get() };
+ }
+
+ /**
+ * Return the axis (if any) that the baseline for horizontal text should land on.
+ * As an example, the identity matrix will return kX_SkAxisAlignment
+ */
+ SkAxisAlignment computeAxisAlignmentForHText() const;
+
+ static SkDescriptor* CreateDescriptorAndEffectsUsingPaint(
+ const SkFont&, const SkPaint&, const SkSurfaceProps&,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix, SkAutoDescriptor* ad,
+ SkScalerContextEffects* effects);
+
+protected:
+ SkScalerContextRec fRec;
+
+ /** Generates the contents of glyph.fAdvanceX and glyph.fAdvanceY if it can do so quickly.
+ * Returns true if it could, false otherwise.
+ */
+ virtual bool generateAdvance(SkGlyph* glyph) = 0;
+
+ /** Generates the contents of glyph.fWidth, fHeight, fTop, fLeft,
+ * as well as fAdvanceX and fAdvanceY if not already set.
+ *
+ * TODO: fMaskFormat is set by getMetrics later; cannot be set here.
+ */
+ virtual void generateMetrics(SkGlyph* glyph) = 0;
+
+ /** Generates the contents of glyph.fImage.
+ * When called, glyph.fImage will be pointing to a pre-allocated,
+ * uninitialized region of memory of size glyph.imageSize().
+ * This method may change glyph.fMaskFormat if the new image size is
+ * less than or equal to the old image size.
+ *
+ * Because glyph.imageSize() will determine the size of fImage,
+ * generateMetrics will be called before generateImage.
+ */
+ virtual void generateImage(const SkGlyph& glyph) = 0;
+
+ /** Sets the passed path to the glyph outline.
+ * If this cannot be done the path is set to empty;
+ * @return false if this glyph does not have any path.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT generatePath(SkGlyphID glyphId, SkPath* path) = 0;
+
+ /** Retrieves font metrics. */
+ virtual void generateFontMetrics(SkFontMetrics*) = 0;
+
+ /** Returns the number of glyphs in the font. */
+ virtual unsigned generateGlyphCount() = 0;
+
+ void forceGenerateImageFromPath() { fGenerateImageFromPath = true; }
+ void forceOffGenerateImageFromPath() { fGenerateImageFromPath = false; }
+
+private:
+ friend class RandomScalerContext; // For debug purposes
+
+ static SkScalerContextRec PreprocessRec(const SkTypeface& typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor& desc);
+
+ // never null
+ sk_sp<SkTypeface> fTypeface;
+
+ // optional objects, which may be null
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkMaskFilter> fMaskFilter;
+
+ // if this is set, we draw the image from a path, rather than
+ // calling generateImage.
+ bool fGenerateImageFromPath;
+
+ /** Returns false if the glyph has no path at all. */
+ bool internalGetPath(SkPackedGlyphID id, SkPath* devPath);
+
+ // SkMaskGamma::PreBlend converts linear masks to gamma correcting masks.
+protected:
+ // Visible to subclasses so that generateImage can apply the pre-blend directly.
+ const SkMaskGamma::PreBlend fPreBlend;
+};
+
+#define kRec_SkDescriptorTag SkSetFourByteTag('s', 'r', 'e', 'c')
+#define kEffects_SkDescriptorTag SkSetFourByteTag('e', 'f', 'c', 't')
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontHinting SkScalerContextRec::getHinting() const {
+ unsigned hint = (fFlags & SkScalerContext::kHinting_Mask) >>
+ SkScalerContext::kHinting_Shift;
+ return static_cast<SkFontHinting>(hint);
+}
+
+void SkScalerContextRec::setHinting(SkFontHinting hinting) {
+ fFlags = (fFlags & ~SkScalerContext::kHinting_Mask) |
+ (static_cast<unsigned>(hinting) << SkScalerContext::kHinting_Shift);
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan.cpp b/gfx/skia/skia/src/core/SkScan.cpp
new file mode 100644
index 0000000000..b93f5f4f07
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+
+std::atomic<bool> gSkUseAnalyticAA{true};
+std::atomic<bool> gSkForceAnalyticAA{false};
+
+static inline void blitrect(SkBlitter* blitter, const SkIRect& r) {
+ blitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+}
+
+void SkScan::FillIRect(const SkIRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (!r.isEmpty()) {
+ if (clip) {
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(r)) {
+ blitrect(blitter, r);
+ } else {
+ SkIRect rr = r;
+ if (rr.intersect(clipBounds)) {
+ blitrect(blitter, rr);
+ }
+ }
+ } else {
+ SkRegion::Cliperator cliper(*clip, r);
+ const SkIRect& rr = cliper.rect();
+
+ while (!cliper.done()) {
+ blitrect(blitter, rr);
+ cliper.next();
+ }
+ }
+ } else {
+ blitrect(blitter, r);
+ }
+ }
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect r;
+
+ XRect_round(xr, &r);
+ SkScan::FillIRect(r, clip, blitter);
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect ir;
+
+ r.round(&ir);
+ SkScan::FillIRect(ir, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FillIRect(const SkIRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillIRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillIRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || xr.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillXRect(xr, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
diff --git a/gfx/skia/skia/src/core/SkScan.h b/gfx/skia/skia/src/core/SkScan.h
new file mode 100644
index 0000000000..a1db6e5e6d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkScan_DEFINED
+#define SkScan_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkFixed.h"
+#include <atomic>
+
+class SkRasterClip;
+class SkRegion;
+class SkBlitter;
+class SkPath;
+
+/** Defines a fixed-point rectangle, identical to the integer SkIRect, but its
+ coordinates are treated as SkFixed rather than int32_t.
+*/
+typedef SkIRect SkXRect;
+
+extern std::atomic<bool> gSkUseAnalyticAA;
+extern std::atomic<bool> gSkForceAnalyticAA;
+
+class AdditiveBlitter;
+
+class SkScan {
+public:
+ /*
+ * Draws count-1 line segments, one at a time:
+ * line(pts[0], pts[1])
+ * line(pts[1], pts[2])
+ * line(......, pts[count - 1])
+ */
+ typedef void (*HairRgnProc)(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ typedef void (*HairRCProc)(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+
+ static void FillPath(const SkPath&, const SkIRect&, SkBlitter*);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // rasterclip
+
+ static void FillIRect(const SkIRect&, const SkRasterClip&, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void FrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void FillTriangle(const SkPoint pts[], const SkRasterClip&, SkBlitter*);
+ static void HairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void AntiHairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void HairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void HairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+
+ // Needed by do_fill_path in SkScanPriv.h
+ static void FillPath(const SkPath&, const SkRegion& clip, SkBlitter*);
+
+private:
+ friend class SkAAClip;
+ friend class SkRegion;
+
+ static void FillIRect(const SkIRect&, const SkRegion* clip, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRegion* clip, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRegion*, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRegion& clip, SkBlitter*, bool forceRLE);
+ static void FillTriangle(const SkPoint pts[], const SkRegion*, SkBlitter*);
+
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRegion*, SkBlitter*);
+ static void HairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ static void AntiHairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ static void AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
+ const SkIRect& clipBounds, bool forceRLE);
+ static void SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
+ const SkIRect& clipBounds, bool forceRLE);
+};
+
+/** Assign an SkXRect from a SkIRect, by promoting the src rect's coordinates
+ from int to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkIRect& src) {
+ xr->fLeft = SkIntToFixed(src.fLeft);
+ xr->fTop = SkIntToFixed(src.fTop);
+ xr->fRight = SkIntToFixed(src.fRight);
+ xr->fBottom = SkIntToFixed(src.fBottom);
+}
+
+/** Assign an SkXRect from a SkRect, by promoting the src rect's coordinates
+ from SkScalar to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkRect& src) {
+ xr->fLeft = SkScalarToFixed(src.fLeft);
+ xr->fTop = SkScalarToFixed(src.fTop);
+ xr->fRight = SkScalarToFixed(src.fRight);
+ xr->fBottom = SkScalarToFixed(src.fBottom);
+}
+
+/** Round the SkXRect coordinates, and store the result in the SkIRect.
+*/
+static inline void XRect_round(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedRoundToInt(xr.fLeft);
+ dst->fTop = SkFixedRoundToInt(xr.fTop);
+ dst->fRight = SkFixedRoundToInt(xr.fRight);
+ dst->fBottom = SkFixedRoundToInt(xr.fBottom);
+}
+
+/** Round the SkXRect coordinates out (i.e. use floor for left/top, and ceiling
+ for right/bottom), and store the result in the SkIRect.
+*/
+static inline void XRect_roundOut(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedFloorToInt(xr.fLeft);
+ dst->fTop = SkFixedFloorToInt(xr.fTop);
+ dst->fRight = SkFixedCeilToInt(xr.fRight);
+ dst->fBottom = SkFixedCeilToInt(xr.fBottom);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScanPriv.h b/gfx/skia/skia/src/core/SkScanPriv.h
new file mode 100644
index 0000000000..2bec85f7b8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScanPriv.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScanPriv_DEFINED
+#define SkScanPriv_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkScan.h"
+
+// controls how much we super-sample (when we use that scan convertion)
+#define SK_SUPERSAMPLE_SHIFT 2
+
+class SkScanClipper {
+public:
+ SkScanClipper(SkBlitter* blitter, const SkRegion* clip, const SkIRect& bounds,
+ bool skipRejectTest = false, bool boundsPreClipped = false);
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+ const SkIRect* getClipRect() const { return fClipRect; }
+
+private:
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+#ifdef SK_DEBUG
+ SkRectClipCheckBlitter fRectClipCheckBlitter;
+#endif
+ SkBlitter* fBlitter;
+ const SkIRect* fClipRect;
+};
+
+void sk_fill_path(const SkPath& path, const SkIRect& clipRect,
+ SkBlitter* blitter, int start_y, int stop_y, int shiftEdgesUp,
+ bool pathContainedInClip);
+
+// blit the rects above and below avoid, clipped to clip
+void sk_blit_above(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+void sk_blit_below(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+
+template<class EdgeType>
+static inline void remove_edge(EdgeType* edge) {
+ edge->fPrev->fNext = edge->fNext;
+ edge->fNext->fPrev = edge->fPrev;
+}
+
+template<class EdgeType>
+static inline void insert_edge_after(EdgeType* edge, EdgeType* afterMe) {
+ edge->fPrev = afterMe;
+ edge->fNext = afterMe->fNext;
+ afterMe->fNext->fPrev = edge;
+ afterMe->fNext = edge;
+}
+
+template<class EdgeType>
+static void backward_insert_edge_based_on_x(EdgeType* edge) {
+ SkFixed x = edge->fX;
+ EdgeType* prev = edge->fPrev;
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ if (prev->fNext != edge) {
+ remove_edge(edge);
+ insert_edge_after(edge, prev);
+ }
+}
+
+// Start from the right side, searching backwards for the point to begin the new edge list
+// insertion, marching forwards from here. The implementation could have started from the left
+// of the prior insertion, and search to the right, or with some additional caching, binary
+// search the starting point. More work could be done to determine optimal new edge insertion.
+template<class EdgeType>
+static EdgeType* backward_insert_start(EdgeType* prev, SkFixed x) {
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ return prev;
+}
+
+// Check if the path is a rect and fat enough after clipping; if so, blit it.
+static inline bool TryBlitFatAntiRect(SkBlitter* blitter, const SkPath& path, const SkIRect& clip) {
+ SkRect rect;
+ if (!path.isRect(&rect)) {
+ return false; // not rect
+ }
+ if (!rect.intersect(SkRect::Make(clip))) {
+ return true; // The intersection is empty. Hence consider it done.
+ }
+ SkIRect bounds = rect.roundOut();
+ if (bounds.width() < 3) {
+ return false; // not fat
+ }
+ blitter->blitFatAntiRect(rect);
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan_AAAPath.cpp b/gfx/skia/skia/src/core/SkScan_AAAPath.cpp
new file mode 100644
index 0000000000..a9c0af4173
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_AAAPath.cpp
@@ -0,0 +1,2011 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeBuilder.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkScanPriv.h"
+#include "src/core/SkTSort.h"
+#include "src/utils/SkUTF.h"
+
+#include <utility>
+
+#if defined(SK_DISABLE_AAA)
+void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
+ SkDEBUGFAIL("AAA Disabled");
+ return;
+}
+#else
+
+/*
+
+The following is a high-level overview of our analytic anti-aliasing
+algorithm. We consider a path as a collection of line segments, as
+quadratic/cubic curves are converted to small line segments. Without loss of
+generality, let's assume that the draw region is [0, W] x [0, H].
+
+Our algorithm is based on horizontal scan lines (y = c_i) as the previous
+sampling-based algorithm did. However, our algorithm uses non-equal-spaced
+scan lines, while the previous method always uses equal-spaced scan lines,
+such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
+and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
+16-supersampling AA algorithm.
+
+Our algorithm contains scan lines y = c_i for c_i that is either:
+
+1. an integer between [0, H]
+
+2. the y value of a line segment endpoint
+
+3. the y value of an intersection of two line segments
+
+For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
+the coverage of this horizontal strip of our path on each pixel. This can be
+done very efficiently because the strip of our path now only consists of
+trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
+rectangles and triangles as special cases).
+
+We now describe how the coverage of single pixel is computed against such a
+trapezoid. That coverage is essentially the intersection area of a rectangle
+(e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
+could be complicated, as shown in the example region A below:
+
++-----------\----+
+| \ C|
+| \ |
+\ \ |
+|\ A \|
+| \ \
+| \ |
+| B \ |
++----\-----------+
+
+However, we don't have to compute the area of A directly. Instead, we can
+compute the excluded area, which are B and C, quite easily, because they're
+just triangles. In fact, we can prove that an excluded region (take B as an
+example) is either itself a simple trapezoid (including rectangles, triangles,
+and empty regions), or its opposite (the opposite of B is A + C) is a simple
+trapezoid. In any case, we can compute its area efficiently.
+
+In summary, our algorithm has a higher quality because it generates ground-
+truth coverages analytically. It is also faster because it has much fewer
+unnessasary horizontal scan lines. For example, given a triangle path, the
+number of scan lines in our algorithm is only about 3 + H while the
+16-supersampling algorithm has about 4H scan lines.
+
+*/
+
+static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
+ SkASSERT(*alpha + delta <= 256);
+ *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
+}
+
+static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
+ *alpha = SkTMin(0xFF, *alpha + delta);
+}
+
+class AdditiveBlitter : public SkBlitter {
+public:
+ ~AdditiveBlitter() override {}
+
+ virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
+
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
+ virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
+ virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
+ }
+
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("Please call real blitter's blitV instead.");
+ }
+
+ void blitH(int x, int y, int width) override {
+ SkDEBUGFAIL("Please call real blitter's blitH instead.");
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkDEBUGFAIL("Please call real blitter's blitRect instead.");
+ }
+
+ void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
+ override {
+ SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
+ }
+
+ virtual int getWidth() = 0;
+
+ // Flush the additive alpha cache if floor(y) and floor(nextY) is different
+ // (i.e., we'll start working on a new pixel row).
+ virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
+};
+
+// We need this mask blitter because it significantly accelerates small path filling.
+class MaskAdditiveBlitter : public AdditiveBlitter {
+public:
+ MaskAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse);
+ ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
+
+ // Most of the time, we still consider this mask blitter as the real blitter
+ // so we can accelerate blitRect and others. But sometimes we want to return
+ // the absolute real blitter (e.g., when we fall back to the old code path).
+ SkBlitter* getRealBlitter(bool forceRealBlitter) override {
+ return forceRealBlitter ? fRealBlitter : this;
+ }
+
+ // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+
+ // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
+ // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
+ override;
+
+ // The flush is only needed for RLE (RunBasedAdditiveBlitter)
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
+
+ int getWidth() override { return fClipRect.width(); }
+
+ static bool CanHandleRect(const SkIRect& bounds) {
+ int width = bounds.width();
+ if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
+ return false;
+ }
+ int64_t rb = SkAlign4(width);
+ // use 64bits to detect overflow
+ int64_t storage = rb * bounds.height();
+
+ return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
+ (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
+ }
+
+ // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
+ uint8_t* getRow(int y) {
+ if (y != fY) {
+ fY = y;
+ fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
+ }
+ return fRow;
+ }
+
+private:
+ // so we don't try to do very wide things, where the RLE blitter would be faster
+ static const int kMAX_WIDTH = 32;
+ static const int kMAX_STORAGE = 1024;
+
+ SkBlitter* fRealBlitter;
+ SkMask fMask;
+ SkIRect fClipRect;
+ // we add 2 because we can write 1 extra byte at either end due to precision error
+ uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
+
+ uint8_t* fRow;
+ int fY;
+};
+
+MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse) {
+ SkASSERT(CanHandleRect(ir));
+ SkASSERT(!isInverse);
+
+ fRealBlitter = realBlitter;
+
+ fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
+ fMask.fBounds = ir;
+ fMask.fRowBytes = ir.width();
+ fMask.fFormat = SkMask::kA8_Format;
+
+ fY = ir.fTop - 1;
+ fRow = nullptr;
+
+ fClipRect = ir;
+ if (!fClipRect.intersect(clipBounds)) {
+ SkASSERT(0);
+ fClipRect.setEmpty();
+ }
+
+ memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ SK_ABORT("Don't use this; directly add alphas to the mask.");
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ add_alpha(&this->getRow(y)[x], alpha);
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < width; ++i) {
+ add_alpha(&row[x + i], alpha);
+ }
+}
+
+void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 0) {
+ return;
+ }
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ // This must be called as if this is a real blitter.
+ // So we directly set alpha rather than adding it.
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < height; ++i) {
+ row[x] = alpha;
+ row += fMask.fRowBytes;
+ }
+}
+
+void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ // This must be called as if this is a real blitter.
+ // So we directly set alpha rather than adding it.
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < height; ++i) {
+ memset(row + x, 0xFF, width);
+ row += fMask.fRowBytes;
+ }
+}
+
+void MaskAdditiveBlitter::blitAntiRect(int x,
+ int y,
+ int width,
+ int height,
+ SkAlpha leftAlpha,
+ SkAlpha rightAlpha) {
+ blitV(x, y, height, leftAlpha);
+ blitV(x + 1 + width, y, height, rightAlpha);
+ blitRect(x + 1, y, width, height);
+}
+
+class RunBasedAdditiveBlitter : public AdditiveBlitter {
+public:
+ RunBasedAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse);
+
+ ~RunBasedAdditiveBlitter() override { this->flush(); }
+
+ SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+
+ int getWidth() override { return fWidth; }
+
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
+ if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
+ this->flush();
+ }
+ }
+
+protected:
+ SkBlitter* fRealBlitter;
+
+ int fCurrY; // Current y coordinate.
+ int fWidth; // Widest row of region to be blitted
+ int fLeft; // Leftmost x coordinate in any row
+ int fTop; // Initial y coordinate (top of bounds)
+
+ // The next three variables are used to track a circular buffer that
+ // contains the values used in SkAlphaRuns. These variables should only
+ // ever be updated in advanceRuns(), and fRuns should always point to
+ // a valid SkAlphaRuns...
+ int fRunsToBuffer;
+ void* fRunsBuffer;
+ int fCurrentRun;
+ SkAlphaRuns fRuns;
+
+ int fOffsetX;
+
+ bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
+
+ // extra one to store the zero at the end
+ int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
+
+ // This function updates the fRuns variable to point to the next buffer space
+ // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
+ // and resets fRuns to point to an empty scanline.
+ void advanceRuns() {
+ const size_t kRunsSz = this->getRunsSz();
+ fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
+ fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
+ fCurrentRun * kRunsSz);
+ fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
+ fRuns.reset(fWidth);
+ }
+
+ // Blitting 0xFF and 0 is much faster so we snap alphas close to them
+ SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
+
+ void flush() {
+ if (fCurrY >= fTop) {
+ SkASSERT(fCurrentRun < fRunsToBuffer);
+ for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
+ // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
+ fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
+ }
+ if (!fRuns.empty()) {
+ // SkDEBUGCODE(fRuns.dump();)
+ fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
+ this->advanceRuns();
+ fOffsetX = 0;
+ }
+ fCurrY = fTop - 1;
+ }
+ }
+
+ void checkY(int y) {
+ if (y != fCurrY) {
+ this->flush();
+ fCurrY = y;
+ }
+ }
+};
+
+RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse) {
+ fRealBlitter = realBlitter;
+
+ SkIRect sectBounds;
+ if (isInverse) {
+ // We use the clip bounds instead of the ir, since we may be asked to
+ // draw outside of the rect when we're a inverse filltype
+ sectBounds = clipBounds;
+ } else {
+ if (!sectBounds.intersect(ir, clipBounds)) {
+ sectBounds.setEmpty();
+ }
+ }
+
+ const int left = sectBounds.left();
+ const int right = sectBounds.right();
+
+ fLeft = left;
+ fWidth = right - left;
+ fTop = sectBounds.top();
+ fCurrY = fTop - 1;
+
+ fRunsToBuffer = realBlitter->requestRowsPreserved();
+ fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
+ fCurrentRun = -1;
+
+ this->advanceRuns();
+
+ fOffsetX = 0;
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < 0) {
+ len += x;
+ antialias -= x;
+ x = 0;
+ }
+ len = SkTMin(len, fWidth - x);
+ SkASSERT(check(x, len));
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
+ for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
+ for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
+ fRuns.fRuns[x + i + j] = 1;
+ fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
+ }
+ fRuns.fRuns[x + i] = 1;
+ }
+ for (int i = 0; i < len; ++i) {
+ add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
+ }
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (this->check(x, 1)) {
+ fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
+ }
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (this->check(x, width)) {
+ fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
+ }
+}
+
+// This exists specifically for concave path filling.
+// In those cases, we can easily accumulate alpha greater than 0xFF.
+class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
+public:
+ SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse)
+ : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+};
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < 0) {
+ len += x;
+ antialias -= x;
+ x = 0;
+ }
+ len = SkTMin(len, fWidth - x);
+ SkASSERT(check(x, len));
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
+ for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
+ for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
+ fRuns.fRuns[x + i + j] = 1;
+ fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
+ }
+ fRuns.fRuns[x + i] = 1;
+ }
+ for (int i = 0; i < len; ++i) {
+ safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, 1)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
+ safely_add_alpha(&fRuns.fAlpha[x], alpha);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, width)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
+ for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
+ safely_add_alpha(&fRuns.fAlpha[i], alpha);
+ }
+ }
+}
+
+// Return the alpha of a trapezoid whose height is 1
+static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
+ SkASSERT(l1 >= 0 && l2 >= 0);
+ SkFixed area = (l1 + l2) / 2;
+ return SkTo<SkAlpha>(area >> 8);
+}
+
+// The alpha of right-triangle (a, a*b)
+static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
+ SkASSERT(a <= SK_Fixed1);
+#if 0
+ // TODO(mtklein): skia:8877
+ SkASSERT(b <= SK_Fixed1);
+#endif
+
+ // Approximating...
+ // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
+ SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
+
+#if 0
+ // TODO(mtklein): skia:8877
+ return SkTo<SkAlpha>(area >> 8);
+#else
+ return SkTo<SkAlpha>((area >> 8) & 0xFF);
+#endif
+}
+
+static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
+ return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
+}
+
+static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
+ return (alpha * fullAlpha) >> 8;
+}
+
+// For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
+// For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
+// This is rarely the problem so we'll only use this for blitting rectangles.
+static SkAlpha fixed_to_alpha(SkFixed f) {
+ SkASSERT(f <= SK_Fixed1);
+ return get_partial_alpha(0xFF, f);
+}
+
+// Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
+// approximate (very coarsely) the x coordinate of the intersection.
+static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
+ if (l1 > r1) {
+ std::swap(l1, r1);
+ }
+ if (l2 > r2) {
+ std::swap(l2, r2);
+ }
+ return (SkTMax(l1, l2) + SkTMin(r1, r2)) / 2;
+}
+
+// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
+static void compute_alpha_above_line(SkAlpha* alphas,
+ SkFixed l,
+ SkFixed r,
+ SkFixed dY,
+ SkAlpha fullAlpha) {
+ SkASSERT(l <= r);
+ SkASSERT(l >> 16 == 0);
+ int R = SkFixedCeilToInt(r);
+ if (R == 0) {
+ return;
+ } else if (R == 1) {
+ alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
+ } else {
+ SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
+ SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
+ SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
+ alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
+ SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
+ for (int i = 1; i < R - 1; ++i) {
+ alphas[i] = alpha16 >> 8;
+ alpha16 += dY;
+ }
+ alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
+ }
+}
+
+// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
+static void compute_alpha_below_line(SkAlpha* alphas,
+ SkFixed l,
+ SkFixed r,
+ SkFixed dY,
+ SkAlpha fullAlpha) {
+ SkASSERT(l <= r);
+ SkASSERT(l >> 16 == 0);
+ int R = SkFixedCeilToInt(r);
+ if (R == 0) {
+ return;
+ } else if (R == 1) {
+ alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
+ } else {
+ SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
+ SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
+ SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
+ alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
+ SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
+ for (int i = R - 2; i > 0; i--) {
+ alphas[i] = (alpha16 >> 8) & 0xFF;
+ alpha16 += dY;
+ }
+ alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
+ }
+}
+
+// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
+static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ SkAlpha alpha,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
+ maskRow[x] = alpha;
+ } else if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
+ } else {
+ add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitV(x, y, 1, alpha);
+ } else {
+ blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
+ }
+ }
+}
+
+static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ SkAlpha a1,
+ SkAlpha a2,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x], a1);
+ safely_add_alpha(&maskRow[x + 1], a2);
+ } else {
+ add_alpha(&maskRow[x], a1);
+ add_alpha(&maskRow[x + 1], a2);
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
+ } else {
+ blitter->blitAntiH(x, y, a1);
+ blitter->blitAntiH(x + 1, y, a2);
+ }
+ }
+}
+
+static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ int len,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x + i], fullAlpha);
+ } else {
+ add_alpha(&maskRow[x + i], fullAlpha);
+ }
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitH(x, y, len);
+ } else {
+ blitter->blitAntiH(x, y, len, fullAlpha);
+ }
+ }
+}
+
+static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
+ int y,
+ SkFixed ul,
+ SkFixed ur,
+ SkFixed ll,
+ SkFixed lr,
+ SkFixed lDY,
+ SkFixed rDY,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
+ int len = R - L;
+
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
+ blit_single_alpha(blitter,
+ y,
+ L,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ return;
+ }
+
+ const int kQuickLen = 31;
+ char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
+ SkAlpha* alphas;
+
+ if (len <= kQuickLen) {
+ alphas = (SkAlpha*)quickMemory;
+ } else {
+ alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
+ }
+
+ SkAlpha* tempAlphas = alphas + len + 1;
+ int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
+
+ for (int i = 0; i < len; ++i) {
+ runs[i] = 1;
+ alphas[i] = fullAlpha;
+ }
+ runs[len] = 0;
+
+ int uL = SkFixedFloorToInt(ul);
+ int lL = SkFixedCeilToInt(ll);
+ if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
+ SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
+ SkFixed second = ll - ul - first;
+ SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
+ SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
+ alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
+ alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
+ } else {
+ compute_alpha_below_line(
+ tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
+ for (int i = uL; i < lL; ++i) {
+ if (alphas[i - L] > tempAlphas[i - L]) {
+ alphas[i - L] -= tempAlphas[i - L];
+ } else {
+ alphas[i - L] = 0;
+ }
+ }
+ }
+
+ int uR = SkFixedFloorToInt(ur);
+ int lR = SkFixedCeilToInt(lr);
+ if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
+ SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
+ SkFixed second = lr - ur - first;
+ SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
+ SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
+ alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
+ alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
+ } else {
+ compute_alpha_above_line(
+ tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
+ for (int i = uR; i < lR; ++i) {
+ if (alphas[i - L] > tempAlphas[i - L]) {
+ alphas[i - L] -= tempAlphas[i - L];
+ } else {
+ alphas[i - L] = 0;
+ }
+ }
+ }
+
+ if (isUsingMask) {
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[L + i], alphas[i]);
+ } else {
+ add_alpha(&maskRow[L + i], alphas[i]);
+ }
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ // Real blitter is faster than RunBasedAdditiveBlitter
+ blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
+ } else {
+ blitter->blitAntiH(L, y, alphas, len);
+ }
+ }
+
+ if (len > kQuickLen) {
+ delete[] alphas;
+ }
+}
+
+static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter,
+ int y,
+ SkFixed ul,
+ SkFixed ur,
+ SkFixed ll,
+ SkFixed lr,
+ SkFixed lDY,
+ SkFixed rDY,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter = false,
+ bool needSafeCheck = false) {
+ SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
+
+ if (ul > ur) {
+ return;
+ }
+
+ // Edge crosses. Approximate it. This should only happend due to precision limit,
+ // so the approximation could be very coarse.
+ if (ll > lr) {
+ ll = lr = approximate_intersection(ul, ll, ur, lr);
+ }
+
+ if (ul == ur && ll == lr) {
+ return; // empty trapzoid
+ }
+
+ // We're going to use the left line ul-ll and the rite line ur-lr
+ // to exclude the area that's not covered by the path.
+ // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
+ // so we'll do that for simplicity.
+ if (ul > ll) {
+ std::swap(ul, ll);
+ }
+ if (ur > lr) {
+ std::swap(ur, lr);
+ }
+
+ SkFixed joinLeft = SkFixedCeilToFixed(ll);
+ SkFixed joinRite = SkFixedFloorToFixed(ur);
+ if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
+ if (ul < joinLeft) {
+ int len = SkFixedCeilToInt(joinLeft - ul);
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
+ blit_single_alpha(blitter,
+ y,
+ ul >> 16,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else if (len == 2) {
+ SkFixed first = joinLeft - SK_Fixed1 - ul;
+ SkFixed second = ll - ul - first;
+ SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
+ SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
+ blit_two_alphas(blitter,
+ y,
+ ul >> 16,
+ a1,
+ a2,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ ul,
+ joinLeft,
+ ll,
+ joinLeft,
+ lDY,
+ SK_MaxS32,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ }
+ // SkAAClip requires that we blit from left to right.
+ // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
+ if (joinLeft < joinRite) {
+ blit_full_alpha(blitter,
+ y,
+ SkFixedFloorToInt(joinLeft),
+ SkFixedFloorToInt(joinRite - joinLeft),
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ if (lr > joinRite) {
+ int len = SkFixedCeilToInt(lr - joinRite);
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
+ blit_single_alpha(blitter,
+ y,
+ joinRite >> 16,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else if (len == 2) {
+ SkFixed first = joinRite + SK_Fixed1 - ur;
+ SkFixed second = lr - ur - first;
+ SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
+ SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
+ blit_two_alphas(blitter,
+ y,
+ joinRite >> 16,
+ a1,
+ a2,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ joinRite,
+ ur,
+ joinRite,
+ lr,
+ SK_MaxS32,
+ rDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ }
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ ul,
+ ur,
+ ll,
+ lr,
+ lDY,
+ rDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+}
+
+static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
+ int valuea = a.fUpperY;
+ int valueb = b.fUpperY;
+
+ if (valuea == valueb) {
+ valuea = a.fX;
+ valueb = b.fX;
+ }
+
+ if (valuea == valueb) {
+ valuea = a.fDX;
+ valueb = b.fDX;
+ }
+
+ return valuea < valueb;
+}
+
+static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
+ SkTQSort(list, list + count - 1);
+
+ // now make the edges linked in sorted order
+ for (int i = 1; i < count; ++i) {
+ list[i - 1]->fNext = list[i];
+ list[i]->fPrev = list[i - 1];
+ }
+
+ *last = list[count - 1];
+ return list[0];
+}
+
+static void validate_sort(const SkAnalyticEdge* edge) {
+#ifdef SK_DEBUG
+ SkFixed y = SkIntToFixed(-32768);
+
+ while (edge->fUpperY != SK_MaxS32) {
+ edge->validate();
+ SkASSERT(y <= edge->fUpperY);
+
+ y = edge->fUpperY;
+ edge = (SkAnalyticEdge*)edge->fNext;
+ }
+#endif
+}
+
+// For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
+// For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
+// relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
+static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
+ if (thisEdge->fCurveCount < 0) {
+ const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
+ int ddshift = cEdge.fCurveShift;
+ return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
+ SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
+ // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
+ (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
+ } else if (thisEdge->fCurveCount > 0) {
+ const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
+ return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
+ SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
+ // current Dy is (fQDy - fQDDy) >> shift
+ (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
+ }
+ return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
+ nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
+}
+
+// Check if the leftE and riteE are changing smoothly in terms of fDX.
+// If yes, we can later skip the fractional y and directly jump to integer y.
+static bool is_smooth_enough(SkAnalyticEdge* leftE,
+ SkAnalyticEdge* riteE,
+ SkAnalyticEdge* currE,
+ int stop_y) {
+ if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
+ return false; // We're at the end so we won't skip anything
+ }
+ if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
+ return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
+ } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
+ return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
+ }
+
+ // Now both edges are changing, find the second next edge
+ SkAnalyticEdge* nextCurrE = currE->fNext;
+ if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
+ return false;
+ }
+ // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
+ if (nextCurrE->fUpperX < currE->fUpperX) {
+ std::swap(currE, nextCurrE);
+ }
+ return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
+}
+
+static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ SkFixed leftBound,
+ SkFixed riteBound,
+ bool isUsingMask) {
+ validate_sort((SkAnalyticEdge*)prevHead->fNext);
+
+ SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
+ SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
+ SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
+
+ SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY);
+
+ for (;;) {
+ // We have to check fLowerY first because some edges might be alone (e.g., there's only
+ // a left edge but no right edge in a given y scan line) due to precision limit.
+ while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
+ if (!leftE->update(y)) {
+ if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
+ goto END_WALK;
+ }
+ leftE = currE;
+ currE = (SkAnalyticEdge*)currE->fNext;
+ }
+ }
+ while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
+ if (!riteE->update(y)) {
+ if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
+ goto END_WALK;
+ }
+ riteE = currE;
+ currE = (SkAnalyticEdge*)currE->fNext;
+ }
+ }
+
+ SkASSERT(leftE);
+ SkASSERT(riteE);
+
+ // check our bottom clip
+ if (SkFixedFloorToInt(y) >= stop_y) {
+ break;
+ }
+
+ SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
+ SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
+
+ leftE->goY(y);
+ riteE->goY(y);
+
+ if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
+ std::swap(leftE, riteE);
+ }
+
+ SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY);
+ if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
+ local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
+ }
+ local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y));
+
+ SkFixed left = SkTMax(leftBound, leftE->fX);
+ SkFixed dLeft = leftE->fDX;
+ SkFixed rite = SkTMin(riteBound, riteE->fX);
+ SkFixed dRite = riteE->fDX;
+ if (0 == (dLeft | dRite)) {
+ int fullLeft = SkFixedCeilToInt(left);
+ int fullRite = SkFixedFloorToInt(rite);
+ SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
+ SkFixed partialRite = rite - SkIntToFixed(fullRite);
+ int fullTop = SkFixedCeilToInt(y);
+ int fullBot = SkFixedFloorToInt(local_bot_fixed);
+ SkFixed partialTop = SkIntToFixed(fullTop) - y;
+ SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
+ if (fullTop > fullBot) { // The rectangle is within one pixel height...
+ partialTop -= (SK_Fixed1 - partialBot);
+ partialBot = 0;
+ }
+
+ if (fullRite >= fullLeft) {
+ if (partialTop > 0) { // blit first partial row
+ if (partialLeft > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullTop - 1,
+ fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
+ }
+ blitter->blitAntiH(
+ fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
+ if (partialRite > 0) {
+ blitter->blitAntiH(fullRite,
+ fullTop - 1,
+ fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
+ }
+ blitter->flush_if_y_changed(y, y + partialTop);
+ }
+
+ // Blit all full-height rows from fullTop to fullBot
+ if (fullBot > fullTop &&
+ // SkAAClip cannot handle the empty rect so check the non-emptiness here
+ // (bug chromium:662800)
+ (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
+ fixed_to_alpha(partialRite) > 0)) {
+ blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
+ fullTop,
+ fullRite - fullLeft,
+ fullBot - fullTop,
+ fixed_to_alpha(partialLeft),
+ fixed_to_alpha(partialRite));
+ }
+
+ if (partialBot > 0) { // blit last partial row
+ if (partialLeft > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullBot,
+ fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
+ }
+ blitter->blitAntiH(
+ fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
+ if (partialRite > 0) {
+ blitter->blitAntiH(fullRite,
+ fullBot,
+ fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
+ }
+ }
+ } else { // left and rite are within the same pixel
+ if (partialTop > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullTop - 1,
+ 1,
+ fixed_to_alpha(SkFixedMul(partialTop, rite - left)));
+ blitter->flush_if_y_changed(y, y + partialTop);
+ }
+ if (fullBot > fullTop) {
+ blitter->getRealBlitter()->blitV(
+ fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(rite - left));
+ }
+ if (partialBot > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullBot,
+ 1,
+ fixed_to_alpha(SkFixedMul(partialBot, rite - left)));
+ }
+ }
+
+ y = local_bot_fixed;
+ } else {
+ // The following constant are used to snap X
+ // We snap X mainly for speedup (no tiny triangle) and
+ // avoiding edge cases caused by precision errors
+ const SkFixed kSnapDigit = SK_Fixed1 >> 4;
+ const SkFixed kSnapHalf = kSnapDigit >> 1;
+ const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
+ left += kSnapHalf;
+ rite += kSnapHalf; // For fast rounding
+
+ // Number of blit_trapezoid_row calls we'll have
+ int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
+
+ // If we're using mask blitter, we advance the mask row in this function
+ // to save some "if" condition checks.
+ SkAlpha* maskRow = nullptr;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+
+ // Instead of writing one loop that handles both partial-row blit_trapezoid_row
+ // and full-row trapezoid_row together, we use the following 3-stage flow to
+ // handle partial-row blit and full-row blit separately. It will save us much time
+ // on changing y, left, and rite.
+ if (count > 1) {
+ if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
+ count--;
+ SkFixed nextY = SkFixedCeilToFixed(y + 1);
+ SkFixed dY = nextY - y;
+ SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
+ SkFixed nextRite = rite + SkFixedMul(dRite, dY);
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound &&
+ (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ get_partial_alpha(0xFF, dY),
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, nextY);
+ left = nextLeft;
+ rite = nextRite;
+ y = nextY;
+ }
+
+ while (count > 1) { // Full rows in the middle
+ count--;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+ SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound &&
+ (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ 0xFF,
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, nextY);
+ left = nextLeft;
+ rite = nextRite;
+ y = nextY;
+ }
+ }
+
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+
+ SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
+ SkASSERT(dY <= SK_Fixed1);
+ // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
+ // Take them back into the bound here.
+ // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
+ SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
+ SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ get_partial_alpha(0xFF, dY),
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, local_bot_fixed);
+ left = nextLeft;
+ rite = nextRite;
+ y = local_bot_fixed;
+ left -= kSnapHalf;
+ rite -= kSnapHalf;
+ }
+
+ leftE->fX = left;
+ riteE->fX = rite;
+ leftE->fY = riteE->fY = y;
+ }
+
+END_WALK:;
+}
+
+static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
+ *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
+}
+
+static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
+ if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
+ *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
+ }
+}
+
+static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
+ if (newEdge->fUpperY > y) {
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ SkAnalyticEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ while (newEdge->fUpperY <= y) {
+ check_intersection(newEdge, y, nextNextY);
+ update_next_next_y(newEdge->fLowerY, y, nextNextY);
+ newEdge = newEdge->fNext;
+ }
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ // find first x pos to insert
+ SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
+ // insert the lot, fixing up the links as we go
+ do {
+ SkAnalyticEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkAnalyticEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ SkASSERT(start != after);
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+ nextEdge:
+ check_intersection(newEdge, y, nextNextY);
+ update_next_next_y(newEdge->fLowerY, y, nextNextY);
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fUpperY <= y);
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+}
+
+static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
+#ifdef SK_DEBUG
+ while (edge->fUpperY <= y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fUpperY <= edge->fLowerY);
+ SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+#endif
+}
+
+// Return true if prev->fX, next->fX are too close in the current pixel row.
+static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
+ // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
+ // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
+ // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
+ // edges prev and next are within one pixel.
+ constexpr SkFixed SLACK = SK_Fixed1;
+
+ // Note that even if the following test failed, the edges might still be very close to each
+ // other at some point within the current pixel row because of prev->fDX and next->fDX.
+ // However, to handle that case, we have to sacrafice more performance.
+ // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
+ // so I'll ignore fDX for performance tradeoff.
+ return next && prev && next->fUpperY < lowerY &&
+ prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
+ // The following is more accurate but also slower.
+ // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
+ // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
+}
+
+// This function exists for the case where the previous rite edge is removed because
+// its fLowerY <= nextY
+static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
+ return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
+}
+
+static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
+ SkFixed lowerY,
+ SkFixed lowerLeft,
+ SkFixed lowerRite,
+ AdditiveBlitter* blitter,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ SkFixed leftClip,
+ SkFixed rightClip) {
+ SkAnalyticEdge* riteE = leftE->fRiteE;
+ SkASSERT(riteE);
+ SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
+ SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
+ int y = SkFixedFloorToInt(leftE->fSavedY);
+ // Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
+ // to elimiate cumulative error: if there are many fractional y scan lines within the
+ // same row, the former may accumulate the rounding error while the later won't.
+ SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
+ fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));
+ // We need fSavedDY because the (quad or cubic) edge might be updated
+ blit_trapezoid_row(
+ blitter,
+ y,
+ SkTMax(leftE->fSavedX, leftClip),
+ SkTMin(riteE->fSavedX, rightClip),
+ SkTMax(lowerLeft, leftClip),
+ SkTMin(lowerRite, rightClip),
+ leftE->fSavedDY,
+ riteE->fSavedDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
+ edges_too_close(riteE, riteE->fNext, lowerY))),
+ true);
+ leftE->fRiteE = nullptr;
+}
+
+static void deferred_blit(SkAnalyticEdge* leftE,
+ SkAnalyticEdge* riteE,
+ SkFixed left,
+ SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
+ SkFixed y,
+ SkFixed nextY,
+ bool isIntegralNextY,
+ bool leftEnds,
+ bool riteEnds,
+ AdditiveBlitter* blitter,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ SkFixed leftClip,
+ SkFixed rightClip,
+ int yShift) {
+ if (leftE->fRiteE && leftE->fRiteE != riteE) {
+ // leftE's right edge changed. Blit the saved trapezoid.
+ SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
+ blit_saved_trapezoid(leftE,
+ y,
+ left,
+ leftE->fRiteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ if (!leftE->fRiteE) {
+ // Save and defer blitting the trapezoid
+ SkASSERT(riteE->fRiteE == nullptr);
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
+ leftE->saveXY(left, y, leftDY);
+ riteE->saveXY(riteE->fX, y, riteE->fDY);
+ leftE->fRiteE = riteE;
+ }
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ riteE->goY(nextY, yShift);
+ // Always blit when edges end or nextY is integral
+ if (isIntegralNextY || leftEnds || riteEnds) {
+ blit_saved_trapezoid(leftE,
+ nextY,
+ leftE->fX,
+ riteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+}
+
+static void aaa_walk_edges(SkAnalyticEdge* prevHead,
+ SkAnalyticEdge* nextTail,
+ SkPath::FillType fillType,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ SkFixed leftClip,
+ SkFixed rightClip,
+ bool isUsingMask,
+ bool forceRLE,
+ bool useDeferred,
+ bool skipIntersect) {
+ prevHead->fX = prevHead->fUpperX = leftClip;
+ nextTail->fX = nextTail->fUpperX = rightClip;
+ SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
+ SkFixed nextNextY = SK_MaxS32;
+
+ {
+ SkAnalyticEdge* edge;
+ for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
+ edge->goY(y);
+ update_next_next_y(edge->fLowerY, y, &nextNextY);
+ }
+ update_next_next_y(edge->fUpperY, y, &nextNextY);
+ }
+
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ int windingMask = (fillType & 1) ? 1 : -1;
+
+ bool isInverse = SkPath::IsInverseFillType(fillType);
+
+ if (isInverse && SkIntToFixed(start_y) != y) {
+ int width = SkFixedFloorToInt(rightClip - leftClip);
+ if (SkFixedFloorToInt(y) != start_y) {
+ blitter->getRealBlitter()->blitRect(
+ SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
+ start_y = SkFixedFloorToInt(y);
+ }
+ SkAlpha* maskRow =
+ isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
+ blit_full_alpha(blitter,
+ start_y,
+ SkFixedFloorToInt(leftClip),
+ width,
+ fixed_to_alpha(y - SkIntToFixed(start_y)),
+ maskRow,
+ isUsingMask,
+ false,
+ false);
+ }
+
+ while (true) {
+ int w = 0;
+ bool in_interval = isInverse;
+ SkFixed prevX = prevHead->fX;
+ SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
+ bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
+ SkAnalyticEdge* currE = prevHead->fNext;
+ SkAnalyticEdge* leftE = prevHead;
+ SkFixed left = leftClip;
+ SkFixed leftDY = 0;
+ bool leftEnds = false;
+ int prevRite = SkFixedFloorToInt(leftClip);
+
+ nextNextY = SK_MaxS32;
+
+ SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
+ int yShift = 0;
+ if ((nextY - y) & (SK_Fixed1 >> 2)) {
+ yShift = 2;
+ nextY = y + (SK_Fixed1 >> 2);
+ } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
+ yShift = 1;
+ SkASSERT(nextY == y + (SK_Fixed1 >> 1));
+ }
+
+ SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
+
+ // If we're using mask blitter, we advance the mask row in this function
+ // to save some "if" condition checks.
+ SkAlpha* maskRow = nullptr;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
+ }
+
+ SkASSERT(currE->fPrev == prevHead);
+ validate_edges_for_y(currE, y);
+
+ // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
+ // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
+ bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
+
+ while (currE->fUpperY <= y) {
+ SkASSERT(currE->fLowerY >= nextY);
+ SkASSERT(currE->fY == y);
+
+ w += currE->fWinding;
+ bool prev_in_interval = in_interval;
+ in_interval = !(w & windingMask) == isInverse;
+
+ bool isLeft = in_interval && !prev_in_interval;
+ bool isRite = !in_interval && prev_in_interval;
+ bool currEnds = currE->fLowerY == nextY;
+
+ if (useDeferred) {
+ if (currE->fRiteE && !isLeft) {
+ // currE is a left edge previously, but now it's not.
+ // Blit the trapezoid between fSavedY and y.
+ SkASSERT(currE->fRiteE->fY == y);
+ blit_saved_trapezoid(currE,
+ y,
+ currE->fX,
+ currE->fRiteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ if (leftE->fRiteE == currE && !isRite) {
+ // currE is a right edge previously, but now it's not.
+ // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
+ // in the previous if clause). Hence we blit the trapezoid.
+ blit_saved_trapezoid(leftE,
+ y,
+ left,
+ currE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ }
+
+ if (isRite) {
+ if (useDeferred) {
+ deferred_blit(leftE,
+ currE,
+ left,
+ leftDY,
+ y,
+ nextY,
+ isIntegralNextY,
+ leftEnds,
+ currEnds,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip,
+ yShift);
+ } else {
+ SkFixed rite = currE->fX;
+ currE->goY(nextY, yShift);
+ SkFixed nextLeft = SkTMax(leftClip, leftE->fX);
+ rite = SkTMin(rightClip, rite);
+ SkFixed nextRite = SkTMin(rightClip, currE->fX);
+ blit_trapezoid_row(
+ blitter,
+ y >> 16,
+ left,
+ rite,
+ nextLeft,
+ nextRite,
+ leftDY,
+ currE->fDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF &&
+ (edges_too_close(prevRite, left, leftE->fX) ||
+ edges_too_close(currE, currE->fNext, nextY))),
+ true);
+ prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
+ }
+ } else {
+ if (isLeft) {
+ left = SkTMax(currE->fX, leftClip);
+ leftDY = currE->fDY;
+ leftE = currE;
+ leftEnds = leftE->fLowerY == nextY;
+ }
+ currE->goY(nextY, yShift);
+ }
+
+ SkAnalyticEdge* next = currE->fNext;
+ SkFixed newX;
+
+ while (currE->fLowerY <= nextY) {
+ if (currE->fCurveCount < 0) {
+ SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
+ cubicEdge->keepContinuous();
+ if (!cubicEdge->updateCubic()) {
+ break;
+ }
+ } else if (currE->fCurveCount > 0) {
+ SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
+ quadEdge->keepContinuous();
+ if (!quadEdge->updateQuadratic()) {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ SkASSERT(currE->fY == nextY);
+
+ if (currE->fLowerY <= nextY) {
+ remove_edge(currE);
+ } else {
+ update_next_next_y(currE->fLowerY, nextY, &nextNextY);
+ newX = currE->fX;
+ SkASSERT(currE->fLowerY > nextY);
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ // If the crossing edge is a right edge, blit the saved trapezoid.
+ if (leftE->fRiteE == currE && useDeferred) {
+ SkASSERT(leftE->fY == nextY && currE->fY == nextY);
+ blit_saved_trapezoid(leftE,
+ nextY,
+ leftE->fX,
+ currE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ backward_insert_edge_based_on_x(currE);
+ } else {
+ prevX = newX;
+ }
+ if (!skipIntersect) {
+ check_intersection(currE, nextY, &nextNextY);
+ }
+ }
+
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ // was our right-edge culled away?
+ if (in_interval) {
+ if (useDeferred) {
+ deferred_blit(leftE,
+ nextTail,
+ left,
+ leftDY,
+ y,
+ nextY,
+ isIntegralNextY,
+ leftEnds,
+ false,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip,
+ yShift);
+ } else {
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left,
+ rightClip,
+ SkTMax(leftClip, leftE->fX),
+ rightClip,
+ leftDY,
+ 0,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF &&
+ edges_too_close(leftE->fPrev, leftE, nextY)),
+ true);
+ }
+ }
+
+ if (forceRLE) {
+ ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
+ }
+
+ y = nextY;
+ if (y >= SkIntToFixed(stop_y)) {
+ break;
+ }
+
+ // now currE points to the first edge with a fUpperY larger than the previous y
+ insert_new_edges(currE, y, &nextNextY);
+ }
+}
+
+static SK_ALWAYS_INLINE void aaa_fill_path(
+ const SkPath& path,
+ const SkIRect& clipRect,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ bool pathContainedInClip,
+ bool isUsingMask,
+ bool forceRLE) { // forceRLE implies that SkAAClip is calling us
+ SkASSERT(blitter);
+
+ SkAnalyticEdgeBuilder builder;
+ int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
+ SkAnalyticEdge** list = builder.analyticEdgeList();
+
+ SkIRect rect = clipRect;
+ if (0 == count) {
+ if (path.isInverseFillType()) {
+ /*
+ * Since we are in inverse-fill, our caller has already drawn above
+ * our top (start_y) and will draw below our bottom (stop_y). Thus
+ * we need to restrict our drawing to the intersection of the clip
+ * and those two limits.
+ */
+ if (rect.fTop < start_y) {
+ rect.fTop = start_y;
+ }
+ if (rect.fBottom > stop_y) {
+ rect.fBottom = stop_y;
+ }
+ if (!rect.isEmpty()) {
+ blitter->getRealBlitter()->blitRect(
+ rect.fLeft, rect.fTop, rect.width(), rect.height());
+ }
+ }
+ return;
+ }
+
+ SkAnalyticEdge headEdge, tailEdge, *last;
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkAnalyticEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fRiteE = nullptr;
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
+ headEdge.fX = SK_MinS32;
+ headEdge.fDX = 0;
+ headEdge.fDY = SK_MaxS32;
+ headEdge.fUpperX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fRiteE = nullptr;
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
+ tailEdge.fX = SK_MaxS32;
+ tailEdge.fDX = 0;
+ tailEdge.fDY = SK_MaxS32;
+ tailEdge.fUpperX = SK_MaxS32;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+
+ if (!pathContainedInClip && start_y < clipRect.fTop) {
+ start_y = clipRect.fTop;
+ }
+ if (!pathContainedInClip && stop_y > clipRect.fBottom) {
+ stop_y = clipRect.fBottom;
+ }
+
+ SkFixed leftBound = SkIntToFixed(rect.fLeft);
+ SkFixed rightBound = SkIntToFixed(rect.fRight);
+ if (isUsingMask) {
+ // If we're using mask, then we have to limit the bound within the path bounds.
+ // Otherwise, the edge drift may access an invalid address inside the mask.
+ SkIRect ir;
+ path.getBounds().roundOut(&ir);
+ leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
+ rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
+ }
+
+ if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
+ aaa_walk_convex_edges(
+ &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
+ } else {
+ // Only use deferred blitting if there are many edges.
+ bool useDeferred =
+ count >
+ (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
+
+ // We skip intersection computation if there are many points which probably already
+ // give us enough fractional scan lines.
+ bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
+
+ aaa_walk_edges(&headEdge,
+ &tailEdge,
+ path.getFillType(),
+ blitter,
+ start_y,
+ stop_y,
+ leftBound,
+ rightBound,
+ isUsingMask,
+ forceRLE,
+ useDeferred,
+ skipIntersect);
+ }
+}
+
+void SkScan::AAAFillPath(const SkPath& path,
+ SkBlitter* blitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool forceRLE) {
+ bool containedInClip = clipBounds.contains(ir);
+ bool isInverse = path.isInverseFillType();
+
+ // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
+ // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
+ // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
+ // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
+ // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
+ // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
+ // blitFatAntiRect to avoid the mask and its overhead.
+ if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
+ // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
+ // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
+ if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
+ MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ true,
+ forceRLE);
+ }
+ } else if (!isInverse && path.isConvex()) {
+ // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
+ // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
+ // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
+ // RunBasedAdditiveBlitter would suffice.
+ RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ false,
+ forceRLE);
+ } else {
+ // If the filling area might not be convex, the more involved aaa_walk_edges would
+ // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
+ // does that at a cost of performance.
+ SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ false,
+ forceRLE);
+ }
+}
+#endif // defined(SK_DISABLE_AAA)
diff --git a/gfx/skia/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
new file mode 100644
index 0000000000..9b21e4b43b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
@@ -0,0 +1,838 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkScanPriv.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkPathPriv.h"
+
+#define SHIFT SK_SUPERSAMPLE_SHIFT
+#define SCALE (1 << SHIFT)
+#define MASK (SCALE - 1)
+
+/** @file
+ We have two techniques for capturing the output of the supersampler:
+ - SUPERMASK, which records a large mask-bitmap
+ this is often faster for small, complex objects
+ - RLE, which records a rle-encoded scanline
+ this is often faster for large objects with big spans
+
+ These blitters use two coordinate systems:
+ - destination coordinates, scale equal to the output - often
+ abbreviated with 'i' or 'I' in variable names
+ - supersampled coordinates, scale equal to the output * SCALE
+ */
+
+//#define FORCE_SUPERMASK
+//#define FORCE_RLE
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Base class for a single-pass supersampled blitter.
+class BaseSuperBlitter : public SkBlitter {
+public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse);
+
+ /// Must be explicitly defined on subclasses.
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+ /// May not be called on BaseSuperBlitter because it blits out of order.
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+
+protected:
+ SkBlitter* fRealBlitter;
+ /// Current y coordinate, in destination coordinates.
+ int fCurrIY;
+ /// Widest row of region to be blitted, in destination coordinates.
+ int fWidth;
+ /// Leftmost x coordinate in any row, in destination coordinates.
+ int fLeft;
+ /// Leftmost x coordinate in any row, in supersampled coordinates.
+ int fSuperLeft;
+
+ SkDEBUGCODE(int fCurrX;)
+ /// Current y coordinate in supersampled coordinates.
+ int fCurrY;
+ /// Initial y coordinate (top of bounds).
+ int fTop;
+
+ SkIRect fSectBounds;
+};
+
+BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse) {
+ fRealBlitter = realBlit;
+
+ SkIRect sectBounds;
+ if (isInverse) {
+ // We use the clip bounds instead of the ir, since we may be asked to
+ //draw outside of the rect when we're a inverse filltype
+ sectBounds = clipBounds;
+ } else {
+ if (!sectBounds.intersect(ir, clipBounds)) {
+ sectBounds.setEmpty();
+ }
+ }
+
+ const int left = sectBounds.left();
+ const int right = sectBounds.right();
+
+ fLeft = left;
+ fSuperLeft = SkLeftShift(left, SHIFT);
+ fWidth = right - left;
+ fTop = sectBounds.top();
+ fCurrIY = fTop - 1;
+ fCurrY = SkLeftShift(fTop, SHIFT) - 1;
+
+ SkDEBUGCODE(fCurrX = -1;)
+}
+
+/// Run-length-encoded supersampling antialiased blitter.
+class SuperBlitter : public BaseSuperBlitter {
+public:
+ SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
+ bool isInverse);
+
+ ~SuperBlitter() override {
+ this->flush();
+ }
+
+ /// Once fRuns contains a complete supersampled row, flush() blits
+ /// it out through the wrapped blitter.
+ void flush();
+
+ /// Blits a row of pixels, with location and width specified
+ /// in supersampled coordinates.
+ void blitH(int x, int y, int width) override;
+ /// Blits a rectangle of pixels, with location and size specified
+ /// in supersampled coordinates.
+ void blitRect(int x, int y, int width, int height) override;
+
+private:
+ // The next three variables are used to track a circular buffer that
+ // contains the values used in SkAlphaRuns. These variables should only
+ // ever be updated in advanceRuns(), and fRuns should always point to
+ // a valid SkAlphaRuns...
+ int fRunsToBuffer;
+ void* fRunsBuffer;
+ int fCurrentRun;
+ SkAlphaRuns fRuns;
+
+ // extra one to store the zero at the end
+ int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
+
+ // This function updates the fRuns variable to point to the next buffer space
+ // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
+ // and resets fRuns to point to an empty scanline.
+ void advanceRuns() {
+ const size_t kRunsSz = this->getRunsSz();
+ fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
+ fRuns.fRuns = reinterpret_cast<int16_t*>(
+ reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
+ fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
+ fRuns.reset(fWidth);
+ }
+
+ int fOffsetX;
+};
+
+SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
+ bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
+{
+ fRunsToBuffer = realBlitter->requestRowsPreserved();
+ fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
+ fCurrentRun = -1;
+
+ this->advanceRuns();
+
+ fOffsetX = 0;
+}
+
+void SuperBlitter::flush() {
+ if (fCurrIY >= fTop) {
+
+ SkASSERT(fCurrentRun < fRunsToBuffer);
+ if (!fRuns.empty()) {
+ // SkDEBUGCODE(fRuns.dump();)
+ fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
+ this->advanceRuns();
+ fOffsetX = 0;
+ }
+
+ fCurrIY = fTop - 1;
+ SkDEBUGCODE(fCurrX = -1;)
+ }
+}
+
+/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
+ *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
+ to produce a final value in [0, 255] and handles clamping 256->255
+ itself, with the same (alpha - (alpha >> 8)) correction as
+ coverage_to_exact_alpha().
+*/
+static inline int coverage_to_partial_alpha(int aa) {
+ aa <<= 8 - 2*SHIFT;
+ return aa;
+}
+
+/** coverage_to_exact_alpha() is being used by our blitter, which wants
+ a final value in [0, 255].
+*/
+static inline int coverage_to_exact_alpha(int aa) {
+ int alpha = (256 >> SHIFT) * aa;
+ // clamp 256->255
+ return alpha - (alpha >> 8);
+}
+
+void SuperBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+
+ int iy = y >> SHIFT;
+ SkASSERT(iy >= fCurrIY);
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+#ifdef SK_DEBUG
+ SkASSERT(y != fCurrY || x >= fCurrX);
+#endif
+ SkASSERT(y >= fCurrY);
+ if (fCurrY != y) {
+ fOffsetX = 0;
+ fCurrY = y;
+ }
+
+ if (iy != fCurrIY) { // new scanline
+ this->flush();
+ fCurrIY = iy;
+ }
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ // integer-pixel-aligned ends of blit, rounded out
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+ if (n < 0) {
+ fb = fe - fb;
+ n = 0;
+ fe = 0;
+ } else {
+ if (fb == 0) {
+ n += 1;
+ } else {
+ fb = SCALE - fb;
+ }
+ }
+
+ fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
+ fOffsetX);
+
+#ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+#endif
+}
+
+#if 0 // UNUSED
+static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+ int n, U8CPU riteA) {
+ SkASSERT(leftA <= 0xFF);
+ SkASSERT(riteA <= 0xFF);
+
+ int16_t* run = runs.fRuns;
+ uint8_t* aa = runs.fAlpha;
+
+ if (ileft > 0) {
+ run[0] = ileft;
+ aa[0] = 0;
+ run += ileft;
+ aa += ileft;
+ }
+
+ SkASSERT(leftA < 0xFF);
+ if (leftA > 0) {
+ *run++ = 1;
+ *aa++ = leftA;
+ }
+
+ if (n > 0) {
+ run[0] = n;
+ aa[0] = 0xFF;
+ run += n;
+ aa += n;
+ }
+
+ SkASSERT(riteA < 0xFF);
+ if (riteA > 0) {
+ *run++ = 1;
+ *aa++ = riteA;
+ }
+ run[0] = 0;
+}
+#endif
+
+void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+ return;
+ }
+ }
+ SkASSERT(height > 0);
+
+ // Since this is a rect, instead of blitting supersampled rows one at a
+ // time and then resolving to the destination canvas, we can blit
+ // directly to the destintion canvas one row per SCALE supersampled rows.
+ int start_y = y >> SHIFT;
+ int stop_y = (y + height) >> SHIFT;
+ int count = stop_y - start_y;
+ if (count > 0) {
+ y += count << SHIFT;
+ height -= count << SHIFT;
+
+ // save original X for our tail blitH() loop at the bottom
+ int origX = x;
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ // There is always a left column, a middle, and a right column.
+ // ileft is the destination x of the first pixel of the entire rect.
+ // xleft is (SCALE - # of covered supersampled pixels) in that
+ // destination pixel.
+ int ileft = x >> SHIFT;
+ int xleft = x & MASK;
+ // irite is the destination x of the last pixel of the OPAQUE section.
+ // xrite is the number of supersampled pixels extending beyond irite;
+ // xrite/SCALE should give us alpha.
+ int irite = (x + width) >> SHIFT;
+ int xrite = (x + width) & MASK;
+ if (!xrite) {
+ xrite = SCALE;
+ irite--;
+ }
+
+ // Need to call flush() to clean up pending draws before we
+ // even consider blitV(), since otherwise it can look nonmonotonic.
+ SkASSERT(start_y > fCurrIY);
+ this->flush();
+
+ int n = irite - ileft - 1;
+ if (n < 0) {
+ // If n < 0, we'll only have a single partially-transparent column
+ // of pixels to render.
+ xleft = xrite - xleft;
+ SkASSERT(xleft <= SCALE);
+ SkASSERT(xleft > 0);
+ fRealBlitter->blitV(ileft + fLeft, start_y, count,
+ coverage_to_exact_alpha(xleft));
+ } else {
+ // With n = 0, we have two possibly-transparent columns of pixels
+ // to render; with n > 0, we have opaque columns between them.
+
+ xleft = SCALE - xleft;
+
+ // Using coverage_to_exact_alpha is not consistent with blitH()
+ const int coverageL = coverage_to_exact_alpha(xleft);
+ const int coverageR = coverage_to_exact_alpha(xrite);
+
+ SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
+ SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
+
+ fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
+ coverageL, coverageR);
+ }
+
+ // preamble for our next call to blitH()
+ fCurrIY = stop_y - 1;
+ fOffsetX = 0;
+ fCurrY = y - 1;
+ fRuns.reset(fWidth);
+ x = origX;
+ }
+
+ // catch any remaining few rows
+ SkASSERT(height <= MASK);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Masked supersampling antialiased blitter.
+class MaskSuperBlitter : public BaseSuperBlitter {
+public:
+ MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
+ ~MaskSuperBlitter() override {
+ fRealBlitter->blitMask(fMask, fClipRect);
+ }
+
+ void blitH(int x, int y, int width) override;
+
+ static bool CanHandleRect(const SkIRect& bounds) {
+#ifdef FORCE_RLE
+ return false;
+#endif
+ int width = bounds.width();
+ int64_t rb = SkAlign4(width);
+ // use 64bits to detect overflow
+ int64_t storage = rb * bounds.height();
+
+ return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
+ (storage <= MaskSuperBlitter::kMAX_STORAGE);
+ }
+
+private:
+ enum {
+#ifdef FORCE_SUPERMASK
+ kMAX_WIDTH = 2048,
+ kMAX_STORAGE = 1024 * 1024 * 2
+#else
+ kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
+ kMAX_STORAGE = 1024
+#endif
+ };
+
+ SkMask fMask;
+ SkIRect fClipRect;
+ // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
+ // perform a test to see if stopAlpha != 0
+ uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
+};
+
+MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
+{
+ SkASSERT(CanHandleRect(ir));
+ SkASSERT(!isInverse);
+
+ fMask.fImage = (uint8_t*)fStorage;
+ fMask.fBounds = ir;
+ fMask.fRowBytes = ir.width();
+ fMask.fFormat = SkMask::kA8_Format;
+
+ fClipRect = ir;
+ if (!fClipRect.intersect(clipBounds)) {
+ SkASSERT(0);
+ fClipRect.setEmpty();
+ }
+
+ // For valgrind, write 1 extra byte at the end so we don't read
+ // uninitialized memory. See comment in add_aa_span and fStorage[].
+ memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
+}
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract.
+ */
+ unsigned tmp = *alpha + startAlpha;
+ SkASSERT(tmp <= 256);
+ *alpha = SkToU8(tmp - (tmp >> 8));
+}
+
+static inline uint32_t quadplicate_byte(U8CPU value) {
+ uint32_t pair = (value << 8) | value;
+ return (pair << 16) | pair;
+}
+
+// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
+// only ever call us with at most enough to hit 256 (never larger), so it is
+// enough to just subtract the high-bit. Actually clamping with a branch would
+// be slower (e.g. if (tmp > 255) tmp = 255;)
+//
+static inline void saturated_add(uint8_t* ptr, U8CPU add) {
+ unsigned tmp = *ptr + add;
+ SkASSERT(tmp <= 256);
+ *ptr = SkToU8(tmp - (tmp >> 8));
+}
+
+// minimum count before we want to setup an inner loop, adding 4-at-a-time
+#define MIN_COUNT_FOR_QUAD_LOOP 16
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
+ U8CPU stopAlpha, U8CPU maxValue) {
+ SkASSERT(middleCount >= 0);
+
+ saturated_add(alpha, startAlpha);
+ alpha += 1;
+
+ if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
+ // loop until we're quad-byte aligned
+ while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ middleCount -= 1;
+ }
+
+ int bigCount = middleCount >> 2;
+ uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
+ uint32_t qval = quadplicate_byte(maxValue);
+ do {
+ *qptr++ += qval;
+ } while (--bigCount > 0);
+
+ middleCount &= 3;
+ alpha = reinterpret_cast<uint8_t*> (qptr);
+ // fall through to the following while-loop
+ }
+
+ while (--middleCount >= 0) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ }
+
+ // potentially this can be off the end of our "legal" alpha values, but that
+ // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
+ // every time (slow), we just do it, and ensure that we've allocated extra space
+ // (see the + 1 comment in fStorage[]
+ saturated_add(alpha, stopAlpha);
+}
+
+void MaskSuperBlitter::blitH(int x, int y, int width) {
+ int iy = (y >> SHIFT);
+
+ SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
+ iy -= fMask.fBounds.fTop; // make it relative to 0
+
+ // This should never happen, but it does. Until the true cause is
+ // discovered, let's skip this span instead of crashing.
+ // See http://crbug.com/17569.
+ if (iy < 0) {
+ return;
+ }
+
+#ifdef SK_DEBUG
+ {
+ int ix = x >> SHIFT;
+ SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
+ }
+#endif
+
+ x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
+
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+
+ if (n < 0) {
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fe - fb));
+ } else {
+ fb = SCALE - fb;
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
+ }
+
+#ifdef SK_DEBUG
+ fCurrX = x + width;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkIRect safeRoundOut(const SkRect& src) {
+ // roundOut will pin huge floats to max/min int
+ SkIRect dst = src.roundOut();
+
+ // intersect with a smaller huge rect, so the rect will not be considered empty for being
+ // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
+ // exceeds signed 32bit.
+ const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
+ (void)dst.intersect({ -limit, -limit, limit, limit});
+
+ return dst;
+}
+
+constexpr int kSampleSize = 8;
+#if !defined(SK_DISABLE_AAA)
+ constexpr SkScalar kComplexityThreshold = 0.25;
+#endif
+
+static void compute_complexity(const SkPath& path, SkScalar& avgLength, SkScalar& complexity) {
+ int n = path.countPoints();
+ if (n < kSampleSize || path.getBounds().isEmpty()) {
+ // set to invalid value to indicate that we failed to compute
+ avgLength = complexity = -1;
+ return;
+ }
+
+ SkScalar sumLength = 0;
+ SkPoint lastPoint = path.getPoint(0);
+ for(int i = 1; i < kSampleSize; ++i) {
+ SkPoint point = path.getPoint(i);
+ sumLength += SkPoint::Distance(lastPoint, point);
+ lastPoint = point;
+ }
+ avgLength = sumLength / (kSampleSize - 1);
+
+ auto sqr = [](SkScalar x) { return x*x; };
+
+ SkScalar diagonalSqr = sqr(path.getBounds().width()) + sqr(path.getBounds().height());
+
+ // If the path consists of random line segments, the number of intersections should be
+ // proportional to this.
+ SkScalar intersections = sk_ieee_float_divide(sqr(n) * sqr(avgLength), diagonalSqr);
+
+ // The number of intersections per scanline should be proportional to this number.
+ complexity = sk_ieee_float_divide(intersections, path.getBounds().height());
+
+ if (sk_float_isnan(complexity)) { // it may be possible to have 0.0 / 0.0; inf is fine for us.
+ complexity = -1;
+ }
+}
+
+static bool ShouldUseAAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
+#if defined(SK_DISABLE_AAA)
+ return false;
+#else
+ if (gSkForceAnalyticAA) {
+ return true;
+ }
+ if (!gSkUseAnalyticAA) {
+ return false;
+ }
+ if (path.isRect(nullptr)) {
+ return true;
+ }
+
+ #ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
+ const SkRect& bounds = path.getBounds();
+ // When the path have so many points compared to the size of its
+ // bounds/resolution, it indicates that the path is not quite smooth in
+ // the current resolution: the expected number of turning points in
+ // every pixel row/column is significantly greater than zero. Hence
+ // Aanlytic AA is not likely to produce visible quality improvements,
+ // and Analytic AA might be slower than supersampling.
+ return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
+ #else
+ if (path.countPoints() >= path.getBounds().height()) {
+ // SAA is faster than AAA in this case even if there are no
+ // intersections because AAA will have too many scan lines. See
+ // skbug.com/8272
+ return false;
+ }
+ // We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
+ return complexity < kComplexityThreshold;
+ #endif
+#endif
+}
+
+void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool forceRLE) {
+ bool containedInClip = clipBounds.contains(ir);
+ bool isInverse = path.isInverseFillType();
+
+ // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
+ // if we're an inverse filltype
+ if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
+ MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
+ SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
+ sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
+ } else {
+ SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
+ sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
+ }
+}
+
+static int overflows_short_shift(int value, int shift) {
+ const int s = 16 + shift;
+ return (SkLeftShift(value, s) >> s) - value;
+}
+
+/**
+ Would any of the coordinates of this rectangle not fit in a short,
+ when left-shifted by shift?
+*/
+static int rect_overflows_short_shift(SkIRect rect, int shift) {
+ SkASSERT(!overflows_short_shift(8191, shift));
+ SkASSERT(overflows_short_shift(8192, shift));
+ SkASSERT(!overflows_short_shift(32767, 0));
+ SkASSERT(overflows_short_shift(32768, 0));
+
+ // Since we expect these to succeed, we bit-or together
+ // for a tiny extra bit of speed.
+ return overflows_short_shift(rect.fLeft, shift) |
+ overflows_short_shift(rect.fRight, shift) |
+ overflows_short_shift(rect.fTop, shift) |
+ overflows_short_shift(rect.fBottom, shift);
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter, bool forceRLE) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ const bool isInverse = path.isInverseFillType();
+ SkIRect ir = safeRoundOut(path.getBounds());
+ if (ir.isEmpty()) {
+ if (isInverse) {
+ blitter->blitRegion(origClip);
+ }
+ return;
+ }
+
+ // If the intersection of the path bounds and the clip bounds
+ // will overflow 32767 when << by SHIFT, we can't supersample,
+ // so draw without antialiasing.
+ SkIRect clippedIR;
+ if (isInverse) {
+ // If the path is an inverse fill, it's going to fill the entire
+ // clip, and we care whether the entire clip exceeds our limits.
+ clippedIR = origClip.getBounds();
+ } else {
+ if (!clippedIR.intersect(ir, origClip.getBounds())) {
+ return;
+ }
+ }
+ if (rect_overflows_short_shift(clippedIR, SHIFT)) {
+ SkScan::FillPath(path, origClip, blitter);
+ return;
+ }
+
+ // Our antialiasing can't handle a clip larger than 32767, so we restrict
+ // the clip to that limit here. (the runs[] uses int16_t for its index).
+ //
+ // A more general solution (one that could also eliminate the need to
+ // disable aa based on ir bounds (see overflows_short_shift) would be
+ // to tile the clip/target...
+ SkRegion tmpClipStorage;
+ const SkRegion* clipRgn = &origClip;
+ {
+ static const int32_t kMaxClipCoord = 32767;
+ const SkIRect& bounds = origClip.getBounds();
+ if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
+ SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
+ tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
+ clipRgn = &tmpClipStorage;
+ }
+ }
+ // for here down, use clipRgn, not origClip
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+
+ if (clipper.getBlitter() == nullptr) { // clipped out
+ if (isInverse) {
+ blitter->blitRegion(*clipRgn);
+ }
+ return;
+ }
+
+ SkASSERT(clipper.getClipRect() == nullptr ||
+ *clipper.getClipRect() == clipRgn->getBounds());
+
+ // now use the (possibly wrapped) blitter
+ blitter = clipper.getBlitter();
+
+ if (isInverse) {
+ sk_blit_above(blitter, ir, *clipRgn);
+ }
+
+ SkScalar avgLength, complexity;
+ compute_complexity(path, avgLength, complexity);
+
+ if (ShouldUseAAA(path, avgLength, complexity)) {
+ // Do not use AAA if path is too complicated:
+ // there won't be any speedup or significant visual improvement.
+ SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
+ } else {
+ SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
+ }
+
+ if (isInverse) {
+ sk_blit_below(blitter, ir, *clipRgn);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkRasterClip.h"
+
+void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isEmpty() || !path.isFinite()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillPath(path, clip.bwRgn(), blitter);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ SkScan::FillPath(path, tmp, &aaBlitter);
+ }
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isEmpty() || !path.isFinite()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ AntiFillPath(path, clip.bwRgn(), blitter, false);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ AntiFillPath(path, tmp, &aaBlitter, true); // SkAAClipBlitter can blitMask, why forceRLE?
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Antihair.cpp b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
new file mode 100644
index 0000000000..feb96f2470
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
@@ -0,0 +1,1009 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkScan.h"
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkRasterClip.h"
+
+#include <utility>
+
+/* Our attempt to compute the worst case "bounds" for the horizontal and
+ vertical cases has some numerical bug in it, and we sometimes undervalue
+ our extends. The bug is that when this happens, we will set the clip to
+ nullptr (for speed), and thus draw outside of the clip by a pixel, which might
+ only look bad, but it might also access memory outside of the valid range
+ allcoated for the device bitmap.
+
+ This define enables our fix to outset our "bounds" by 1, thus avoiding the
+ chance of the bug, but at the cost of sometimes taking the rectblitter
+ case (i.e. not setting the clip to nullptr) when we might not actually need
+ to. If we can improve/fix the actual calculations, then we can remove this
+ step.
+ */
+#define OUTSET_BEFORE_CLIP_TEST true
+
+#define HLINE_STACK_BUFFER 100
+
+static inline int SmallDot6Scale(int value, int dot6) {
+ SkASSERT((int16_t)value == value);
+ SkASSERT((unsigned)dot6 <= 64);
+ return (value * dot6) >> 6;
+}
+
+//#define TEST_GAMMA
+
+#ifdef TEST_GAMMA
+ static uint8_t gGammaTable[256];
+ #define ApplyGamma(table, alpha) (table)[alpha]
+
+ static void build_gamma_table() {
+ static bool gInit = false;
+
+ if (gInit == false) {
+ for (int i = 0; i < 256; i++) {
+ SkFixed n = i * 257;
+ n += n >> 15;
+ SkASSERT(n >= 0 && n <= SK_Fixed1);
+ n = SkFixedSqrt(n);
+ n = n * 255 >> 16;
+ // SkDebugf("morph %d -> %d\n", i, n);
+ gGammaTable[i] = SkToU8(n);
+ }
+ gInit = true;
+ }
+ }
+#else
+ #define ApplyGamma(table, alpha) SkToU8(alpha)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void call_hline_blitter(SkBlitter* blitter, int x, int y, int count,
+ U8CPU alpha) {
+ SkASSERT(count > 0);
+
+ int16_t runs[HLINE_STACK_BUFFER + 1];
+ uint8_t aa[HLINE_STACK_BUFFER];
+
+ aa[0] = ApplyGamma(gGammaTable, alpha);
+ do {
+ int n = count;
+ if (n > HLINE_STACK_BUFFER) {
+ n = HLINE_STACK_BUFFER;
+ }
+ runs[0] = SkToS16(n);
+ runs[n] = 0;
+ blitter->blitAntiH(x, y, aa, runs);
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+class SkAntiHairBlitter {
+public:
+ SkAntiHairBlitter() : fBlitter(nullptr) {}
+ virtual ~SkAntiHairBlitter() {}
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+
+ void setup(SkBlitter* blitter) {
+ fBlitter = blitter;
+ }
+
+ virtual SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) = 0;
+ virtual SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed slope) = 0;
+
+private:
+ SkBlitter* fBlitter;
+};
+
+class HLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+
+ // lower line
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y, 1, ma);
+ }
+
+ // upper line
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, 1, ma);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+
+ virtual SkFixed drawLine(int x, int stopx, SkFixed fy,
+ SkFixed slope) override {
+ SkASSERT(x < stopx);
+ int count = stopx - x;
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+
+ // lower line
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y, count, a);
+ }
+
+ // upper line
+ a = 255 - a;
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, count, a);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class Horish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed dy, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+ unsigned a0 = SmallDot6Scale(255 - a, mod64);
+ unsigned a1 = SmallDot6Scale(a, mod64);
+ this->getBlitter()->blitAntiV2(x, lower_y - 1, a0, a1);
+
+ return fy + dy - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed dy) override {
+ SkASSERT(x < stopx);
+
+ fy += SK_Fixed1/2;
+ SkBlitter* blitter = this->getBlitter();
+ do {
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+ blitter->blitAntiV2(x, lower_y - 1, 255 - a, a);
+ fy += dy;
+ } while (++x < stopx);
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class VLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)((fx >> 8) & 0xFF);
+
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x, y, 1, ma);
+ }
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x - 1, y, 1, ma);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)((fx >> 8) & 0xFF);
+
+ if (a) {
+ this->getBlitter()->blitV(x, y, stopy - y, a);
+ }
+ a = 255 - a;
+ if (a) {
+ this->getBlitter()->blitV(x - 1, y, stopy - y, a);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+class Vertish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)((fx >> 8) & 0xFF);
+ this->getBlitter()->blitAntiH2(x - 1, y,
+ SmallDot6Scale(255 - a, mod64), SmallDot6Scale(a, mod64));
+
+ return fx + dx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ fx += SK_Fixed1/2;
+ do {
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)((fx >> 8) & 0xFF);
+ this->getBlitter()->blitAntiH2(x - 1, y, 255 - a, a);
+ fx += dx;
+ } while (++y < stopy);
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+static inline SkFixed fastfixdiv(SkFDot6 a, SkFDot6 b) {
+ SkASSERT((SkLeftShift(a, 16) >> 16) == a);
+ SkASSERT(b != 0);
+ return SkLeftShift(a, 16) / b;
+}
+
+#define SkBITCOUNT(x) (sizeof(x) << 3)
+
+#if 1
+// returns high-bit set iff x==0x8000...
+static inline int bad_int(int x) {
+ return x & -x;
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return (bad_int(a) | bad_int(b) | bad_int(c) | bad_int(d)) >> (SkBITCOUNT(int) - 1);
+}
+#else
+static inline int good_int(int x) {
+ return x ^ (1 << (SkBITCOUNT(x) - 1));
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return !(good_int(a) & good_int(b) & good_int(c) & good_int(d));
+}
+#endif
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+/*
+ * We want the fractional part of ordinate, but we want multiples of 64 to
+ * return 64, not 0, so we can't just say (ordinate & 63).
+ * We basically want to compute those bits, and if they're 0, return 64.
+ * We can do that w/o a branch with an extra sub and add.
+ */
+static int contribution_64(SkFDot6 ordinate) {
+#if 0
+ int result = ordinate & 63;
+ if (0 == result) {
+ result = 64;
+ }
+#else
+ int result = ((ordinate - 1) & 63) + 1;
+#endif
+ SkASSERT(result > 0 && result <= 64);
+ return result;
+}
+
+static void do_anti_hairline(SkFDot6 x0, SkFDot6 y0, SkFDot6 x1, SkFDot6 y1,
+ const SkIRect* clip, SkBlitter* blitter) {
+ // check for integer NaN (0x80000000) which we can't handle (can't negate it)
+ // It appears typically from a huge float (inf or nan) being converted to int.
+ // If we see it, just don't draw.
+ if (any_bad_ints(x0, y0, x1, y1)) {
+ return;
+ }
+
+ // The caller must clip the line to [-32767.0 ... 32767.0] ahead of time
+ // (in dot6 format)
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (SkAbs32(x1 - x0) > SkIntToFDot6(511) || SkAbs32(y1 - y0) > SkIntToFDot6(511)) {
+ /* instead of (x0 + x1) >> 1, we shift each separately. This is less
+ precise, but avoids overflowing the intermediate result if the
+ values are huge. A better fix might be to clip the original pts
+ directly (i.e. do the divide), so we don't spend time subdividing
+ huge lines at all.
+ */
+ int hx = (x0 >> 1) + (x1 >> 1);
+ int hy = (y0 >> 1) + (y1 >> 1);
+ do_anti_hairline(x0, y0, hx, hy, clip, blitter);
+ do_anti_hairline(hx, hy, x1, y1, clip, blitter);
+ return;
+ }
+
+ int scaleStart, scaleStop;
+ int istart, istop;
+ SkFixed fstart, slope;
+
+ HLine_SkAntiHairBlitter hline_blitter;
+ Horish_SkAntiHairBlitter horish_blitter;
+ VLine_SkAntiHairBlitter vline_blitter;
+ Vertish_SkAntiHairBlitter vertish_blitter;
+ SkAntiHairBlitter* hairBlitter = nullptr;
+
+ if (SkAbs32(x1 - x0) > SkAbs32(y1 - y0)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+
+ istart = SkFDot6Floor(x0);
+ istop = SkFDot6Ceil(x1);
+ fstart = SkFDot6ToFixed(y0);
+ if (y0 == y1) { // completely horizontal, take fast case
+ slope = 0;
+ hairBlitter = &hline_blitter;
+ } else {
+ slope = fastfixdiv(y1 - y0, x1 - x0);
+ SkASSERT(slope >= -SK_Fixed1 && slope <= SK_Fixed1);
+ fstart += (slope * (32 - (x0 & 63)) + 32) >> 6;
+ hairBlitter = &horish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = x1 - x0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (x0 & 63);
+ scaleStop = x1 & 63;
+ }
+
+ if (clip){
+ if (istart >= clip->fRight || istop <= clip->fLeft) {
+ return;
+ }
+ if (istart < clip->fLeft) {
+ fstart += slope * (clip->fLeft - istart);
+ istart = clip->fLeft;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(x1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fRight) {
+ istop = clip->fRight;
+ scaleStop = 0; // so we don't draw this last column
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop) {
+ return;
+ }
+ // now test if our Y values are completely inside the clip
+ int top, bottom;
+ if (slope >= 0) { // T2B
+ top = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ bottom = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // B2T
+ bottom = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ top = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ top -= 1;
+ bottom += 1;
+#endif
+ if (top >= clip->fBottom || bottom <= clip->fTop) {
+ return;
+ }
+ if (clip->fTop <= top && clip->fBottom >= bottom) {
+ clip = nullptr;
+ }
+ }
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+
+ istart = SkFDot6Floor(y0);
+ istop = SkFDot6Ceil(y1);
+ fstart = SkFDot6ToFixed(x0);
+ if (x0 == x1) {
+ if (y0 == y1) { // are we zero length?
+ return; // nothing to do
+ }
+ slope = 0;
+ hairBlitter = &vline_blitter;
+ } else {
+ slope = fastfixdiv(x1 - x0, y1 - y0);
+ SkASSERT(slope <= SK_Fixed1 && slope >= -SK_Fixed1);
+ fstart += (slope * (32 - (y0 & 63)) + 32) >> 6;
+ hairBlitter = &vertish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = y1 - y0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (y0 & 63);
+ scaleStop = y1 & 63;
+ }
+
+ if (clip) {
+ if (istart >= clip->fBottom || istop <= clip->fTop) {
+ return;
+ }
+ if (istart < clip->fTop) {
+ fstart += slope * (clip->fTop - istart);
+ istart = clip->fTop;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(y1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fBottom) {
+ istop = clip->fBottom;
+ scaleStop = 0; // so we don't draw this last row
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop)
+ return;
+
+ // now test if our X values are completely inside the clip
+ int left, right;
+ if (slope >= 0) { // L2R
+ left = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ right = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // R2L
+ right = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ left = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ left -= 1;
+ right += 1;
+#endif
+ if (left >= clip->fRight || right <= clip->fLeft) {
+ return;
+ }
+ if (clip->fLeft <= left && clip->fRight >= right) {
+ clip = nullptr;
+ }
+ }
+ }
+
+ SkRectClipBlitter rectClipper;
+ if (clip) {
+ rectClipper.init(blitter, *clip);
+ blitter = &rectClipper;
+ }
+
+ SkASSERT(hairBlitter);
+ hairBlitter->setup(blitter);
+
+#ifdef SK_DEBUG
+ if (scaleStart > 0 && scaleStop > 0) {
+ // be sure we don't draw twice in the same pixel
+ SkASSERT(istart < istop - 1);
+ }
+#endif
+
+ fstart = hairBlitter->drawCap(istart, fstart, slope, scaleStart);
+ istart += 1;
+ int fullSpans = istop - istart - (scaleStop > 0);
+ if (fullSpans > 0) {
+ fstart = hairBlitter->drawLine(istart, istart + fullSpans, fstart, slope);
+ }
+ if (scaleStop > 0) {
+ hairBlitter->drawCap(istop - 1, fstart, slope, scaleStop);
+ }
+}
+
+void SkScan::AntiHairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip && clip->isEmpty()) {
+ return;
+ }
+
+ SkASSERT(clip == nullptr || !clip->getBounds().isEmpty());
+
+#ifdef TEST_GAMMA
+ build_gamma_table();
+#endif
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ /* We perform integral clipping later on, but we do a scalar clip first
+ to ensure that our coordinates are expressible in fixed/integers.
+
+ antialiased hairlines can draw up to 1/2 of a pixel outside of
+ their bounds, so we need to outset the clip before calling the
+ clipper. To make the numerics safer, we outset by a whole pixel,
+ since the 1/2 pixel boundary is important to the antihair blitter,
+ we don't want to risk numerical fate by chopping on that edge.
+ */
+ clipBounds.outset(SK_Scalar1, SK_Scalar1);
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ if (clip) {
+ SkFDot6 left = SkMin32(x0, x1);
+ SkFDot6 top = SkMin32(y0, y1);
+ SkFDot6 right = SkMax32(x0, x1);
+ SkFDot6 bottom = SkMax32(y0, y1);
+ SkIRect ir;
+
+ ir.setLTRB(SkFDot6Floor(left) - 1,
+ SkFDot6Floor(top) - 1,
+ SkFDot6Ceil(right) + 1,
+ SkFDot6Ceil(bottom) + 1);
+
+ if (clip->quickReject(ir)) {
+ continue;
+ }
+ if (!clip->quickContains(ir)) {
+ SkRegion::Cliperator iter(*clip, ir);
+ const SkIRect* r = &iter.rect();
+
+ while (!iter.done()) {
+ do_anti_hairline(x0, y0, x1, y1, r, blitter);
+ iter.next();
+ }
+ continue;
+ }
+ // fall through to no-clip case
+ }
+ do_anti_hairline(x0, y0, x1, y1, nullptr, blitter);
+ }
+}
+
+void SkScan::AntiHairRect(const SkRect& rect, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ SkPoint pts[5];
+
+ pts[0].set(rect.fLeft, rect.fTop);
+ pts[1].set(rect.fRight, rect.fTop);
+ pts[2].set(rect.fRight, rect.fBottom);
+ pts[3].set(rect.fLeft, rect.fBottom);
+ pts[4] = pts[0];
+ SkScan::AntiHairLine(pts, 5, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int FDot8; // 24.8 integer fixed point
+
+static inline FDot8 SkFixedToFDot8(SkFixed x) {
+ return (x + 0x80) >> 8;
+}
+
+static void do_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ blitter->blitV(L >> 8, top, 1, SkAlphaMul(alpha, R - L));
+ return;
+ }
+
+ int left = L >> 8;
+
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, SkAlphaMul(alpha, 256 - (L & 0xFF)));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, SkAlphaMul(alpha, R & 0xFF));
+ }
+}
+
+static void antifilldot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B, SkBlitter* blitter,
+ bool fillInner) {
+ // check for empty now that we're in our reduced precision space
+ if (L >= R || T >= B) {
+ return;
+ }
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ do_scanline(L, top, R, B - T - 1, blitter);
+ return;
+ }
+
+ if (T & 0xFF) {
+ do_scanline(L, top, R, 256 - (T & 0xFF), blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ int left = L >> 8;
+ if (left == ((R - 1) >> 8)) { // just 1-pixel wide
+ blitter->blitV(left, top, height, R - L - 1);
+ } else {
+ if (L & 0xFF) {
+ blitter->blitV(left, top, height, 256 - (L & 0xFF));
+ left += 1;
+ }
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0 && fillInner) {
+ blitter->blitRect(left, top, width, height);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, height, R & 0xFF);
+ }
+ }
+ }
+
+ if (B & 0xFF) {
+ do_scanline(L, bot, R, B & 0xFF, blitter);
+ }
+}
+
+static void antifillrect(const SkXRect& xr, SkBlitter* blitter) {
+ antifilldot8(SkFixedToFDot8(xr.fLeft), SkFixedToFDot8(xr.fTop),
+ SkFixedToFDot8(xr.fRight), SkFixedToFDot8(xr.fBottom),
+ blitter, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (nullptr == clip) {
+ antifillrect(xr, blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(outerBounds)) {
+ antifillrect(xr, blitter);
+ } else {
+ SkXRect tmpR;
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, clipBounds);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ }
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ const SkIRect& rr = clipper.rect();
+
+ while (!clipper.done()) {
+ SkXRect tmpR;
+
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, rr);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ }
+}
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillXRect(xr, &clip.bwRgn(), blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip.quickContains(outerBounds)) {
+ AntiFillXRect(xr, nullptr, blitter);
+ } else {
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ AntiFillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+ }
+ }
+}
+
+/* This guy takes a float-rect, but with the key improvement that it has
+ already been clipped, so we know that it is safe to convert it into a
+ XRect (fixedpoint), as it won't overflow.
+*/
+static void antifillrect(const SkRect& r, SkBlitter* blitter) {
+ SkXRect xr;
+
+ XRect_set(&xr, r);
+ antifillrect(xr, blitter);
+}
+
+/* We repeat the clipping logic of AntiFillXRect because the float rect might
+ overflow if we blindly converted it to an XRect. This sucks that we have to
+ repeat the clipping logic, but I don't see how to share the code/logic.
+
+ We clip r (as needed) into one or more (smaller) float rects, and then pass
+ those to our version of antifillrect, which converts it into an XRect and
+ then calls the blit.
+*/
+void SkScan::AntiFillRect(const SkRect& origR, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip) {
+ SkRect newR;
+ newR.set(clip->getBounds());
+ if (!newR.intersect(origR)) {
+ return;
+ }
+
+ const SkIRect outerBounds = newR.roundOut();
+
+ if (clip->isRect()) {
+ antifillrect(newR, blitter);
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ while (!clipper.done()) {
+ newR.set(clipper.rect());
+ if (newR.intersect(origR)) {
+ antifillrect(newR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ } else {
+ antifillrect(origR, blitter);
+ }
+}
+
+void SkScan::AntiFillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillRect(r, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFillRect(r, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkAlphaMulRound(a, b) SkMulDiv255Round(a, b)
+
+// calls blitRect() if the rectangle is non-empty
+static void fillcheckrect(int L, int T, int R, int B, SkBlitter* blitter) {
+ if (L < R && T < B) {
+ blitter->blitRect(L, T, R - L, B - T);
+ }
+}
+
+static inline FDot8 SkScalarToFDot8(SkScalar x) {
+ return (int)(x * 256);
+}
+
+static inline int FDot8Floor(FDot8 x) {
+ return x >> 8;
+}
+
+static inline int FDot8Ceil(FDot8 x) {
+ return (x + 0xFF) >> 8;
+}
+
+// 1 - (1 - a)*(1 - b)
+static inline U8CPU InvAlphaMul(U8CPU a, U8CPU b) {
+ // need precise rounding (not just SkAlphaMul) so that values like
+ // a=228, b=252 don't overflow the result
+ return SkToU8(a + b - SkAlphaMulRound(a, b));
+}
+
+static void inner_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ FDot8 widClamp = R - L;
+ // border case clamp 256 to 255 instead of going through call_hline_blitter
+ // see skbug/4406
+ widClamp = widClamp - (widClamp >> 8);
+ blitter->blitV(L >> 8, top, 1, InvAlphaMul(alpha, widClamp));
+ return;
+ }
+
+ int left = L >> 8;
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, InvAlphaMul(alpha, L & 0xFF));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, InvAlphaMul(alpha, ~R & 0xFF));
+ }
+}
+
+static void innerstrokedot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B,
+ SkBlitter* blitter) {
+ SkASSERT(L < R && T < B);
+
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ // We want the inverse of B-T, since we're the inner-stroke
+ int alpha = 256 - (B - T);
+ if (alpha) {
+ inner_scanline(L, top, R, alpha, blitter);
+ }
+ return;
+ }
+
+ if (T & 0xFF) {
+ inner_scanline(L, top, R, T & 0xFF, blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ if (L & 0xFF) {
+ blitter->blitV(L >> 8, top, height, L & 0xFF);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(R >> 8, top, height, ~R & 0xFF);
+ }
+ }
+
+ if (B & 0xFF) {
+ inner_scanline(L, bot, R, ~B & 0xFF, blitter);
+ }
+}
+
+static inline void align_thin_stroke(FDot8& edge1, FDot8& edge2) {
+ SkASSERT(edge1 <= edge2);
+
+ if (FDot8Floor(edge1) == FDot8Floor(edge2)) {
+ edge2 -= (edge1 & 0xFF);
+ edge1 &= ~0xFF;
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRegion* clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ SkScalar rx = SkScalarHalf(strokeSize.fX);
+ SkScalar ry = SkScalarHalf(strokeSize.fY);
+
+ // outset by the radius
+ FDot8 outerL = SkScalarToFDot8(r.fLeft - rx);
+ FDot8 outerT = SkScalarToFDot8(r.fTop - ry);
+ FDot8 outerR = SkScalarToFDot8(r.fRight + rx);
+ FDot8 outerB = SkScalarToFDot8(r.fBottom + ry);
+
+ SkIRect outer;
+ // set outer to the outer rect of the outer section
+ outer.setLTRB(FDot8Floor(outerL), FDot8Floor(outerT), FDot8Ceil(outerR), FDot8Ceil(outerB));
+
+ SkBlitterClipper clipper;
+ if (clip) {
+ if (clip->quickReject(outer)) {
+ return;
+ }
+ if (!clip->contains(outer)) {
+ blitter = clipper.apply(blitter, clip, &outer);
+ }
+ // now we can ignore clip for the rest of the function
+ }
+
+ // in case we lost a bit with diameter/2
+ rx = strokeSize.fX - rx;
+ ry = strokeSize.fY - ry;
+
+ // inset by the radius
+ FDot8 innerL = SkScalarToFDot8(r.fLeft + rx);
+ FDot8 innerT = SkScalarToFDot8(r.fTop + ry);
+ FDot8 innerR = SkScalarToFDot8(r.fRight - rx);
+ FDot8 innerB = SkScalarToFDot8(r.fBottom - ry);
+
+ // For sub-unit strokes, tweak the hulls such that one of the edges coincides with the pixel
+ // edge. This ensures that the general rect stroking logic below
+ // a) doesn't blit the same scanline twice
+ // b) computes the correct coverage when both edges fall within the same pixel
+ if (strokeSize.fX < 1 || strokeSize.fY < 1) {
+ align_thin_stroke(outerL, innerL);
+ align_thin_stroke(outerT, innerT);
+ align_thin_stroke(innerR, outerR);
+ align_thin_stroke(innerB, outerB);
+ }
+
+ // stroke the outer hull
+ antifilldot8(outerL, outerT, outerR, outerB, blitter, false);
+
+ // set outer to the outer rect of the middle section
+ outer.setLTRB(FDot8Ceil(outerL), FDot8Ceil(outerT), FDot8Floor(outerR), FDot8Floor(outerB));
+
+ if (innerL >= innerR || innerT >= innerB) {
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, outer.fBottom,
+ blitter);
+ } else {
+ SkIRect inner;
+ // set inner to the inner rect of the middle section
+ inner.setLTRB(FDot8Floor(innerL), FDot8Floor(innerT), FDot8Ceil(innerR), FDot8Ceil(innerB));
+
+ // draw the frame in 4 pieces
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, inner.fTop,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fTop, inner.fLeft, inner.fBottom,
+ blitter);
+ fillcheckrect(inner.fRight, inner.fTop, outer.fRight, inner.fBottom,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fBottom, outer.fRight, outer.fBottom,
+ blitter);
+
+ // now stroke the inner rect, which is similar to antifilldot8() except that
+ // it treats the fractional coordinates with the inverse bias (since its
+ // inner).
+ innerstrokedot8(innerL, innerT, innerR, innerB, blitter);
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFrameRect(r, strokeSize, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFrameRect(r, strokeSize, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Hairline.cpp b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
new file mode 100644
index 0000000000..f9365cecb2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
@@ -0,0 +1,732 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+
+#include <utility>
+
+static void horiline(int x, int stopx, SkFixed fy, SkFixed dy,
+ SkBlitter* blitter) {
+ SkASSERT(x < stopx);
+
+ do {
+ blitter->blitH(x, fy >> 16, 1);
+ fy += dy;
+ } while (++x < stopx);
+}
+
+static void vertline(int y, int stopy, SkFixed fx, SkFixed dx,
+ SkBlitter* blitter) {
+ SkASSERT(y < stopy);
+
+ do {
+ blitter->blitH(fx >> 16, y, 1);
+ fx += dx;
+ } while (++y < stopy);
+}
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+void SkScan::HairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* origBlitter) {
+ SkBlitterClipper clipper;
+ SkIRect clipR, ptsR;
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkBlitter* blitter = origBlitter;
+
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ // Perform a clip in scalar space, so we catch huge values which might
+ // be missed after we convert to SkFDot6 (overflow)
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (clip) {
+ // now perform clipping again, as the rounding to dot6 can wiggle us
+ // our rects are really dot6 rects, but since we've already used
+ // lineclipper, we know they will fit in 32bits (26.6)
+ const SkIRect& bounds = clip->getBounds();
+
+ clipR.setLTRB(SkIntToFDot6(bounds.fLeft), SkIntToFDot6(bounds.fTop),
+ SkIntToFDot6(bounds.fRight), SkIntToFDot6(bounds.fBottom));
+ ptsR.setLTRB(x0, y0, x1, y1);
+ ptsR.sort();
+
+ // outset the right and bottom, to account for how hairlines are
+ // actually drawn, which may hit the pixel to the right or below of
+ // the coordinate
+ ptsR.fRight += SK_FDot6One;
+ ptsR.fBottom += SK_FDot6One;
+
+ if (!SkIRect::Intersects(ptsR, clipR)) {
+ continue;
+ }
+ if (!clip->isRect() || !clipR.contains(ptsR)) {
+ blitter = clipper.apply(origBlitter, clip);
+ }
+ }
+
+ SkFDot6 dx = x1 - x0;
+ SkFDot6 dy = y1 - y0;
+
+ if (SkAbs32(dx) > SkAbs32(dy)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+ int ix0 = SkFDot6Round(x0);
+ int ix1 = SkFDot6Round(x1);
+ if (ix0 == ix1) {// too short to draw
+ continue;
+ }
+
+ SkFixed slope = SkFixedDiv(dy, dx);
+ SkFixed startY = SkFDot6ToFixed(y0) + (slope * ((32 - x0) & 63) >> 6);
+
+ horiline(ix0, ix1, startY, slope, blitter);
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+ int iy0 = SkFDot6Round(y0);
+ int iy1 = SkFDot6Round(y1);
+ if (iy0 == iy1) { // too short to draw
+ continue;
+ }
+
+ SkFixed slope = SkFixedDiv(dx, dy);
+ SkFixed startX = SkFDot6ToFixed(x0) + (slope * ((32 - y0) & 63) >> 6);
+
+ vertline(iy0, iy1, startX, slope, blitter);
+ }
+ }
+}
+
+// we don't just draw 4 lines, 'cause that can leave a gap in the bottom-right
+// and double-hit the top-left.
+void SkScan::HairRect(const SkRect& rect, const SkRasterClip& clip, SkBlitter* blitter) {
+ SkAAClipBlitterWrapper wrapper;
+ SkBlitterClipper clipper;
+ // Create the enclosing bounds of the hairrect. i.e. we will stroke the interior of r.
+ SkIRect r = SkIRect::MakeLTRB(SkScalarFloorToInt(rect.fLeft),
+ SkScalarFloorToInt(rect.fTop),
+ SkScalarFloorToInt(rect.fRight + 1),
+ SkScalarFloorToInt(rect.fBottom + 1));
+
+ // Note: r might be crazy big, if rect was huge, possibly getting pinned to max/min s32.
+ // We need to trim it back to something reasonable before we can query its width etc.
+ // since r.fRight - r.fLeft might wrap around to negative even if fRight > fLeft.
+ //
+ // We outset the clip bounds by 1 before intersecting, since r is being stroked and not filled
+ // so we don't want to pin an edge of it to the clip. The intersect's job is mostly to just
+ // get the actual edge values into a reasonable range (e.g. so width() can't overflow).
+ if (!r.intersect(clip.getBounds().makeOutset(1, 1))) {
+ return;
+ }
+
+ if (clip.quickReject(r)) {
+ return;
+ }
+ if (!clip.quickContains(r)) {
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrapper.init(clip, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter = clipper.apply(blitter, clipRgn);
+ }
+
+ int width = r.width();
+ int height = r.height();
+
+ if ((width | height) == 0) {
+ return;
+ }
+ if (width <= 2 || height <= 2) {
+ blitter->blitRect(r.fLeft, r.fTop, width, height);
+ return;
+ }
+ // if we get here, we know we have 4 segments to draw
+ blitter->blitH(r.fLeft, r.fTop, width); // top
+ blitter->blitRect(r.fLeft, r.fTop + 1, 1, height - 2); // left
+ blitter->blitRect(r.fRight - 1, r.fTop + 1, 1, height - 2); // right
+ blitter->blitH(r.fLeft, r.fBottom - 1, width); // bottom
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPath.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkGeometry.h"
+
+#define kMaxCubicSubdivideLevel 9
+#define kMaxQuadSubdivideLevel 5
+
+static uint32_t compute_int_quad_dist(const SkPoint pts[3]) {
+ // compute the vector between the control point ([1]) and the middle of the
+ // line connecting the start and end ([0] and [2])
+ SkScalar dx = SkScalarHalf(pts[0].fX + pts[2].fX) - pts[1].fX;
+ SkScalar dy = SkScalarHalf(pts[0].fY + pts[2].fY) - pts[1].fY;
+ // we want everyone to be positive
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ // convert to whole pixel values (use ceiling to be conservative).
+ // assign to unsigned so we can safely add 1/2 of the smaller and still fit in
+ // uint32_t, since SkScalarCeilToInt() returns 31 bits at most.
+ uint32_t idx = SkScalarCeilToInt(dx);
+ uint32_t idy = SkScalarCeilToInt(dy);
+ // use the cheap approx for distance
+ if (idx > idy) {
+ return idx + (idy >> 1);
+ } else {
+ return idy + (idx >> 1);
+ }
+}
+
+static void hair_quad(const SkPoint pts[3], const SkRegion* clip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ SkASSERT(level <= kMaxQuadSubdivideLevel);
+
+ SkQuadCoeff coeff(pts);
+
+ const int lines = 1 << level;
+ Sk2s t(0);
+ Sk2s dt(SK_Scalar1 / lines);
+
+ SkPoint tmp[(1 << kMaxQuadSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < SK_ARRAY_COUNT(tmp));
+
+ tmp[0] = pts[0];
+ Sk2s A = coeff.fA;
+ Sk2s B = coeff.fB;
+ Sk2s C = coeff.fC;
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ ((A * t + B) * t + C).store(&tmp[i]);
+ }
+ tmp[lines] = pts[2];
+ lineproc(tmp, lines + 1, clip, blitter);
+}
+
+static SkRect compute_nocheck_quad_bounds(const SkPoint pts[3]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 6));
+
+ Sk2s min = Sk2s::Load(pts);
+ Sk2s max = min;
+ for (int i = 1; i < 3; ++i) {
+ Sk2s pair = Sk2s::Load(pts+i);
+ min = Sk2s::Min(min, pair);
+ max = Sk2s::Max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static bool is_inverted(const SkRect& r) {
+ return r.fLeft > r.fRight || r.fTop > r.fBottom;
+}
+
+// Can't call SkRect::intersects, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_overlap(const SkRect& a, const SkRect& b) {
+ SkASSERT(!is_inverted(a) && !is_inverted(b));
+ return a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom;
+}
+
+// Can't call SkRect::contains, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_contains(const SkRect& outer, const SkRect& inner) {
+ SkASSERT(!is_inverted(outer) && !is_inverted(inner));
+ return inner.fRight <= outer.fRight && inner.fLeft >= outer.fLeft &&
+ inner.fBottom <= outer.fBottom && inner.fTop >= outer.fTop;
+}
+
+static inline void hairquad(const SkPoint pts[3], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_quad_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ hair_quad(pts, clip, blitter, level, lineproc);
+}
+
+static inline Sk2s abs(const Sk2s& value) {
+ return Sk2s::Max(value, Sk2s(0)-value);
+}
+
+static inline SkScalar max_component(const Sk2s& value) {
+ SkScalar components[2];
+ value.store(components);
+ return SkTMax(components[0], components[1]);
+}
+
+static inline int compute_cubic_segs(const SkPoint pts[4]) {
+ Sk2s p0 = from_point(pts[0]);
+ Sk2s p1 = from_point(pts[1]);
+ Sk2s p2 = from_point(pts[2]);
+ Sk2s p3 = from_point(pts[3]);
+
+ const Sk2s oneThird(1.0f / 3.0f);
+ const Sk2s twoThird(2.0f / 3.0f);
+
+ Sk2s p13 = oneThird * p3 + twoThird * p0;
+ Sk2s p23 = oneThird * p0 + twoThird * p3;
+
+ SkScalar diff = max_component(Sk2s::Max(abs(p1 - p13), abs(p2 - p23)));
+ SkScalar tol = SK_Scalar1 / 8;
+
+ for (int i = 0; i < kMaxCubicSubdivideLevel; ++i) {
+ if (diff < tol) {
+ return 1 << i;
+ }
+ tol *= 4;
+ }
+ return 1 << kMaxCubicSubdivideLevel;
+}
+
+static bool lt_90(SkPoint p0, SkPoint pivot, SkPoint p2) {
+ return SkVector::DotProduct(p0 - pivot, p2 - pivot) >= 0;
+}
+
+// The off-curve points are "inside" the limits of the on-curve pts
+static bool quick_cubic_niceness_check(const SkPoint pts[4]) {
+ return lt_90(pts[1], pts[0], pts[3]) &&
+ lt_90(pts[2], pts[0], pts[3]) &&
+ lt_90(pts[1], pts[3], pts[0]) &&
+ lt_90(pts[2], pts[3], pts[0]);
+}
+
+typedef SkNx<2, uint32_t> Sk2x32;
+
+static inline Sk2x32 sk2s_is_finite(const Sk2s& x) {
+ const Sk2x32 exp_mask = Sk2x32(0xFF << 23);
+ return (Sk2x32::Load(&x) & exp_mask) != exp_mask;
+}
+
+static void hair_cubic(const SkPoint pts[4], const SkRegion* clip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ const int lines = compute_cubic_segs(pts);
+ SkASSERT(lines > 0);
+ if (1 == lines) {
+ SkPoint tmp[2] = { pts[0], pts[3] };
+ lineproc(tmp, 2, clip, blitter);
+ return;
+ }
+
+ SkCubicCoeff coeff(pts);
+
+ const Sk2s dt(SK_Scalar1 / lines);
+ Sk2s t(0);
+
+ SkPoint tmp[(1 << kMaxCubicSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < SK_ARRAY_COUNT(tmp));
+
+ tmp[0] = pts[0];
+ Sk2s A = coeff.fA;
+ Sk2s B = coeff.fB;
+ Sk2s C = coeff.fC;
+ Sk2s D = coeff.fD;
+ Sk2x32 is_finite(~0); // start out as true
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ Sk2s p = ((A * t + B) * t + C) * t + D;
+ is_finite &= sk2s_is_finite(p);
+ p.store(&tmp[i]);
+ }
+ if (is_finite.allTrue()) {
+ tmp[lines] = pts[3];
+ lineproc(tmp, lines + 1, clip, blitter);
+ } // else some point(s) are non-finite, so don't draw
+}
+
+static SkRect compute_nocheck_cubic_bounds(const SkPoint pts[4]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 8));
+
+ Sk2s min = Sk2s::Load(pts);
+ Sk2s max = min;
+ for (int i = 1; i < 4; ++i) {
+ Sk2s pair = Sk2s::Load(pts+i);
+ min = Sk2s::Min(min, pair);
+ max = Sk2s::Max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static inline void haircubic(const SkPoint pts[4], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_cubic_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ if (quick_cubic_niceness_check(pts)) {
+ hair_cubic(pts, clip, blitter, lineproc);
+ } else {
+ SkPoint tmp[13];
+ SkScalar tValues[3];
+
+ int count = SkChopCubicAtMaxCurvature(pts, tmp, tValues);
+ for (int i = 0; i < count; i++) {
+ hair_cubic(&tmp[i * 3], clip, blitter, lineproc);
+ }
+ }
+}
+
+static int compute_quad_level(const SkPoint pts[3]) {
+ uint32_t d = compute_int_quad_dist(pts);
+ /* quadratics approach the line connecting their start and end points
+ 4x closer with each subdivision, so we compute the number of
+ subdivisions to be the minimum need to get that distance to be less
+ than a pixel.
+ */
+ int level = (33 - SkCLZ(d)) >> 1;
+ // sanity check on level (from the previous version)
+ if (level > kMaxQuadSubdivideLevel) {
+ level = kMaxQuadSubdivideLevel;
+ }
+ return level;
+}
+
+/* Extend the points in the direction of the starting or ending tangent by 1/2 unit to
+ account for a round or square cap. If there's no distance between the end point and
+ the control point, use the next control point to create a tangent. If the curve
+ is degenerate, move the cap out 1/2 unit horizontally. */
+template <SkPaint::Cap capStyle>
+void extend_pts(SkPath::Verb prevVerb, SkPath::Verb nextVerb, SkPoint* pts, int ptCount) {
+ SkASSERT(SkPaint::kSquare_Cap == capStyle || SkPaint::kRound_Cap == capStyle);
+ // The area of a circle is PI*R*R. For a unit circle, R=1/2, and the cap covers half of that.
+ const SkScalar capOutset = SkPaint::kSquare_Cap == capStyle ? 0.5f : SK_ScalarPI / 8;
+ if (SkPath::kMove_Verb == prevVerb) {
+ SkPoint* first = pts;
+ SkPoint* ctrl = first;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *first - *++ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(1, 0);
+ controls = ptCount - 1; // If all points are equal, move all but one
+ } else {
+ tangent.normalize();
+ }
+ do { // If the end point and control points are equal, loop to move them in tandem.
+ first->fX += tangent.fX * capOutset;
+ first->fY += tangent.fY * capOutset;
+ ++first;
+ } while (++controls < ptCount);
+ }
+ if (SkPath::kMove_Verb == nextVerb || SkPath::kDone_Verb == nextVerb
+ || SkPath::kClose_Verb == nextVerb) {
+ SkPoint* last = &pts[ptCount - 1];
+ SkPoint* ctrl = last;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *last - *--ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(-1, 0);
+ controls = ptCount - 1;
+ } else {
+ tangent.normalize();
+ }
+ do {
+ last->fX += tangent.fX * capOutset;
+ last->fY += tangent.fY * capOutset;
+ --last;
+ } while (++controls < ptCount);
+ }
+}
+
+template <SkPaint::Cap capStyle>
+void hair_path(const SkPath& path, const SkRasterClip& rclip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ if (path.isEmpty()) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clip = nullptr;
+ SkRect insetStorage, outsetStorage;
+ const SkRect* insetClip = nullptr;
+ const SkRect* outsetClip = nullptr;
+
+ {
+ const int capOut = SkPaint::kButt_Cap == capStyle ? 1 : 2;
+ const SkIRect ibounds = path.getBounds().roundOut().makeOutset(capOut, capOut);
+ if (rclip.quickReject(ibounds)) {
+ return;
+ }
+ if (!rclip.quickContains(ibounds)) {
+ if (rclip.isBW()) {
+ clip = &rclip.bwRgn();
+ } else {
+ wrap.init(rclip, blitter);
+ blitter = wrap.getBlitter();
+ clip = &wrap.getRgn();
+ }
+
+ /*
+ * We now cache two scalar rects, to use for culling per-segment (e.g. cubic).
+ * Since we're hairlining, the "bounds" of the control points isn't necessairly the
+ * limit of where a segment can draw (it might draw up to 1 pixel beyond in aa-hairs).
+ *
+ * Compute the pt-bounds per segment is easy, so we do that, and then inversely adjust
+ * the culling bounds so we can just do a straight compare per segment.
+ *
+ * insetClip is use for quick-accept (i.e. the segment is not clipped), so we inset
+ * it from the clip-bounds (since segment bounds can be off by 1).
+ *
+ * outsetClip is used for quick-reject (i.e. the segment is entirely outside), so we
+ * outset it from the clip-bounds.
+ */
+ insetStorage.set(clip->getBounds());
+ outsetStorage = insetStorage.makeOutset(1, 1);
+ insetStorage.inset(1, 1);
+ if (is_inverted(insetStorage)) {
+ /*
+ * our bounds checks assume the rects are never inverted. If insetting has
+ * created that, we assume that the area is too small to safely perform a
+ * quick-accept, so we just mark the rect as empty (so the quick-accept check
+ * will always fail.
+ */
+ insetStorage.setEmpty(); // just so we don't pass an inverted rect
+ }
+ if (rclip.isRect()) {
+ insetClip = &insetStorage;
+ }
+ outsetClip = &outsetStorage;
+ }
+ }
+
+ SkPath::RawIter iter(path);
+ SkPoint pts[4], firstPt, lastPt;
+ SkPath::Verb verb, prevVerb;
+ SkAutoConicToQuads converter;
+
+ if (SkPaint::kButt_Cap != capStyle) {
+ prevVerb = SkPath::kDone_Verb;
+ }
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ firstPt = lastPt = pts[0];
+ break;
+ case SkPath::kLine_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ lastPt = pts[1];
+ break;
+ case SkPath::kQuad_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 3);
+ }
+ hairquad(pts, clip, insetClip, outsetClip, blitter, compute_quad_level(pts), lineproc);
+ lastPt = pts[2];
+ break;
+ case SkPath::kConic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 3);
+ }
+ // how close should the quads be to the original conic?
+ const SkScalar tol = SK_Scalar1 / 4;
+ const SkPoint* quadPts = converter.computeQuads(pts,
+ iter.conicWeight(), tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ int level = compute_quad_level(quadPts);
+ hairquad(quadPts, clip, insetClip, outsetClip, blitter, level, lineproc);
+ quadPts += 2;
+ }
+ lastPt = pts[2];
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 4);
+ }
+ haircubic(pts, clip, insetClip, outsetClip, blitter, kMaxCubicSubdivideLevel, lineproc);
+ lastPt = pts[3];
+ } break;
+ case SkPath::kClose_Verb:
+ pts[0] = lastPt;
+ pts[1] = firstPt;
+ if (SkPaint::kButt_Cap != capStyle && prevVerb == SkPath::kMove_Verb) {
+ // cap moveTo/close to match svg expectations for degenerate segments
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ }
+ if (SkPaint::kButt_Cap != capStyle) {
+ if (prevVerb == SkPath::kMove_Verb &&
+ verb >= SkPath::kLine_Verb && verb <= SkPath::kCubic_Verb) {
+ firstPt = pts[0]; // the curve moved the initial point, so close to it instead
+ }
+ prevVerb = verb;
+ }
+ }
+}
+
+void SkScan::HairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ if (strokeSize.fX < 0 || strokeSize.fY < 0) {
+ return;
+ }
+
+ const SkScalar dx = strokeSize.fX;
+ const SkScalar dy = strokeSize.fY;
+ SkScalar rx = SkScalarHalf(dx);
+ SkScalar ry = SkScalarHalf(dy);
+ SkRect outer, tmp;
+
+ outer.setLTRB(r.fLeft - rx, r.fTop - ry, r.fRight + rx, r.fBottom + ry);
+
+ if (r.width() <= dx || r.height() <= dy) {
+ SkScan::FillRect(outer, clip, blitter);
+ return;
+ }
+
+ tmp.setLTRB(outer.fLeft, outer.fTop, outer.fRight, outer.fTop + dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fTop = outer.fBottom - dy;
+ tmp.fBottom = outer.fBottom;
+ SkScan::FillRect(tmp, clip, blitter);
+
+ tmp.setLTRB(outer.fLeft, outer.fTop + dy, outer.fLeft + dx, outer.fBottom - dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fLeft = outer.fRight - dx;
+ tmp.fRight = outer.fRight;
+ SkScan::FillRect(tmp, clip, blitter);
+}
+
+void SkScan::HairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ HairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.setBounds(pts, count);
+ r.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut())) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ HairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
+
+void SkScan::AntiHairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiHairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.setBounds(pts, count);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut().makeOutset(1, 1))) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ AntiHairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Path.cpp b/gfx/skia/skia/src/core/SkScan_Path.cpp
new file mode 100644
index 0000000000..4271f4e4b4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Path.cpp
@@ -0,0 +1,780 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkSafe32.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeBuilder.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScanPriv.h"
+#include "src/core/SkTSort.h"
+
+#include <utility>
+
+#define kEDGE_HEAD_Y SK_MinS32
+#define kEDGE_TAIL_Y SK_MaxS32
+
+#ifdef SK_DEBUG
+ static void validate_sort(const SkEdge* edge) {
+ int y = kEDGE_HEAD_Y;
+
+ while (edge->fFirstY != SK_MaxS32) {
+ edge->validate();
+ SkASSERT(y <= edge->fFirstY);
+
+ y = edge->fFirstY;
+ edge = edge->fNext;
+ }
+ }
+#else
+ #define validate_sort(edge)
+#endif
+
+static void insert_new_edges(SkEdge* newEdge, int curr_y) {
+ if (newEdge->fFirstY != curr_y) {
+ return;
+ }
+ SkEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ return;
+ }
+ // find first x pos to insert
+ SkEdge* start = backward_insert_start(prev, newEdge->fX);
+ // insert the lot, fixing up the links as we go
+ do {
+ SkEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+nextEdge:
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fFirstY == curr_y);
+}
+
+#ifdef SK_DEBUG
+static void validate_edges_for_y(const SkEdge* edge, int curr_y) {
+ while (edge->fFirstY <= curr_y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fFirstY <= edge->fLastY);
+
+ SkASSERT(edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+}
+#else
+ #define validate_edges_for_y(edge, curr_y)
+#endif
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+typedef void (*PrePostProc)(SkBlitter* blitter, int y, bool isStartOfScanline);
+#define PREPOST_START true
+#define PREPOST_END false
+
+static void walk_edges(SkEdge* prevHead, SkPath::FillType fillType,
+ SkBlitter* blitter, int start_y, int stop_y,
+ PrePostProc proc, int rightClip) {
+ validate_sort(prevHead->fNext);
+
+ int curr_y = start_y;
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ int windingMask = (fillType & 1) ? 1 : -1;
+
+ for (;;) {
+ int w = 0;
+ int left SK_INIT_TO_AVOID_WARNING;
+ SkEdge* currE = prevHead->fNext;
+ SkFixed prevX = prevHead->fX;
+
+ validate_edges_for_y(currE, curr_y);
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_START); // pre-proc
+ }
+
+ while (currE->fFirstY <= curr_y) {
+ SkASSERT(currE->fLastY >= curr_y);
+
+ int x = SkFixedRoundToInt(currE->fX);
+
+ if ((w & windingMask) == 0) { // we're starting interval
+ left = x;
+ }
+
+ w += currE->fWinding;
+
+ if ((w & windingMask) == 0) { // we finished an interval
+ int width = x - left;
+ SkASSERT(width >= 0);
+ if (width > 0) {
+ blitter->blitH(left, curr_y, width);
+ }
+ }
+
+ SkEdge* next = currE->fNext;
+ SkFixed newX;
+
+ if (currE->fLastY == curr_y) { // are we done with this edge?
+ if (currE->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)currE)->updateQuadratic()) {
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ } else if (currE->fCurveCount < 0) {
+ if (((SkCubicEdge*)currE)->updateCubic()) {
+ SkASSERT(currE->fFirstY == curr_y + 1);
+
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ }
+ remove_edge(currE);
+ } else {
+ SkASSERT(currE->fLastY > curr_y);
+ newX = currE->fX + currE->fDX;
+ currE->fX = newX;
+ NEXT_X:
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ backward_insert_edge_based_on_x(currE);
+ } else {
+ prevX = newX;
+ }
+ }
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ if ((w & windingMask) != 0) { // was our right-edge culled away?
+ int width = rightClip - left;
+ if (width > 0) {
+ blitter->blitH(left, curr_y, width);
+ }
+ }
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_END); // post-proc
+ }
+
+ curr_y += 1;
+ if (curr_y >= stop_y) {
+ break;
+ }
+ // now currE points to the first edge with a Yint larger than curr_y
+ insert_new_edges(currE, curr_y);
+ }
+}
+
+// return true if we're NOT done with this edge
+static bool update_edge(SkEdge* edge, int last_y) {
+ SkASSERT(edge->fLastY >= last_y);
+ if (last_y == edge->fLastY) {
+ if (edge->fCurveCount < 0) {
+ if (((SkCubicEdge*)edge)->updateCubic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return true;
+ }
+ } else if (edge->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)edge)->updateQuadratic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return true;
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+// Unexpected conditions for which we need to return
+#define ASSERT_RETURN(cond) \
+ do { \
+ if (!(cond)) { \
+ SkASSERT(false); \
+ return; \
+ } \
+ } while (0)
+
+// Needs Y to only change once (looser than convex in X)
+static void walk_simple_edges(SkEdge* prevHead, SkBlitter* blitter, int start_y, int stop_y) {
+ validate_sort(prevHead->fNext);
+
+ SkEdge* leftE = prevHead->fNext;
+ SkEdge* riteE = leftE->fNext;
+ SkEdge* currE = riteE->fNext;
+
+ // our edge choppers for curves can result in the initial edges
+ // not lining up, so we take the max.
+ int local_top = SkMax32(leftE->fFirstY, riteE->fFirstY);
+ ASSERT_RETURN(local_top >= start_y);
+
+ while (local_top < stop_y) {
+ SkASSERT(leftE->fFirstY <= stop_y);
+ SkASSERT(riteE->fFirstY <= stop_y);
+
+ int local_bot = SkMin32(leftE->fLastY, riteE->fLastY);
+ local_bot = SkMin32(local_bot, stop_y - 1);
+ ASSERT_RETURN(local_top <= local_bot);
+
+ SkFixed left = leftE->fX;
+ SkFixed dLeft = leftE->fDX;
+ SkFixed rite = riteE->fX;
+ SkFixed dRite = riteE->fDX;
+ int count = local_bot - local_top;
+ ASSERT_RETURN(count >= 0);
+
+ if (0 == (dLeft | dRite)) {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L > R) {
+ std::swap(L, R);
+ }
+ if (L < R) {
+ count += 1;
+ blitter->blitRect(L, local_top, R - L, count);
+ }
+ local_top = local_bot + 1;
+ } else {
+ do {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L > R) {
+ std::swap(L, R);
+ }
+ if (L < R) {
+ blitter->blitH(L, local_top, R - L);
+ }
+ // Either/both of these might overflow, since we perform this step even if
+ // (later) we determine that we are done with the edge, and so the computed
+ // left or rite edge will not be used (see update_edge). Use this helper to
+ // silence UBSAN when we perform the add.
+ left = Sk32_can_overflow_add(left, dLeft);
+ rite = Sk32_can_overflow_add(rite, dRite);
+ local_top += 1;
+ } while (--count >= 0);
+ }
+
+ leftE->fX = left;
+ riteE->fX = rite;
+
+ if (!update_edge(leftE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ return; // we're done
+ }
+ leftE = currE;
+ currE = currE->fNext;
+ ASSERT_RETURN(leftE->fFirstY == local_top);
+ }
+ if (!update_edge(riteE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ return; // we're done
+ }
+ riteE = currE;
+ currE = currE->fNext;
+ ASSERT_RETURN(riteE->fFirstY == local_top);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// this guy overrides blitH, and will call its proxy blitter with the inverse
+// of the spans it is given (clipped to the left/right of the cliprect)
+//
+// used to implement inverse filltypes on paths
+//
+class InverseBlitter : public SkBlitter {
+public:
+ void setBlitter(SkBlitter* blitter, const SkIRect& clip, int shift) {
+ fBlitter = blitter;
+ fFirstX = clip.fLeft << shift;
+ fLastX = clip.fRight << shift;
+ }
+ void prepost(int y, bool isStart) {
+ if (isStart) {
+ fPrevX = fFirstX;
+ } else {
+ int invWidth = fLastX - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ }
+ }
+
+ // overrides
+ void blitH(int x, int y, int width) override {
+ int invWidth = x - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ fPrevX = x + width;
+ }
+
+ // we do not expect to get called with these entrypoints
+ void blitAntiH(int, int, const SkAlpha[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH unexpected");
+ }
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("blitV unexpected");
+ }
+ void blitRect(int x, int y, int width, int height) override {
+ SkDEBUGFAIL("blitRect unexpected");
+ }
+ void blitMask(const SkMask&, const SkIRect& clip) override {
+ SkDEBUGFAIL("blitMask unexpected");
+ }
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override {
+ SkDEBUGFAIL("justAnOpaqueColor unexpected");
+ return nullptr;
+ }
+
+private:
+ SkBlitter* fBlitter;
+ int fFirstX, fLastX, fPrevX;
+};
+
+static void PrePostInverseBlitterProc(SkBlitter* blitter, int y, bool isStart) {
+ ((InverseBlitter*)blitter)->prepost(y, isStart);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static bool operator<(const SkEdge& a, const SkEdge& b) {
+ int valuea = a.fFirstY;
+ int valueb = b.fFirstY;
+
+ if (valuea == valueb) {
+ valuea = a.fX;
+ valueb = b.fX;
+ }
+
+ return valuea < valueb;
+}
+
+static SkEdge* sort_edges(SkEdge* list[], int count, SkEdge** last) {
+ SkTQSort(list, list + count - 1);
+
+ // now make the edges linked in sorted order
+ for (int i = 1; i < count; i++) {
+ list[i - 1]->fNext = list[i];
+ list[i]->fPrev = list[i - 1];
+ }
+
+ *last = list[count - 1];
+ return list[0];
+}
+
+// clipRect has not been shifted up
+void sk_fill_path(const SkPath& path, const SkIRect& clipRect, SkBlitter* blitter,
+ int start_y, int stop_y, int shiftEdgesUp, bool pathContainedInClip) {
+ SkASSERT(blitter);
+
+ SkIRect shiftedClip = clipRect;
+ shiftedClip.fLeft = SkLeftShift(shiftedClip.fLeft, shiftEdgesUp);
+ shiftedClip.fRight = SkLeftShift(shiftedClip.fRight, shiftEdgesUp);
+ shiftedClip.fTop = SkLeftShift(shiftedClip.fTop, shiftEdgesUp);
+ shiftedClip.fBottom = SkLeftShift(shiftedClip.fBottom, shiftEdgesUp);
+
+ SkBasicEdgeBuilder builder(shiftEdgesUp);
+ int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &shiftedClip);
+ SkEdge** list = builder.edgeList();
+
+ if (0 == count) {
+ if (path.isInverseFillType()) {
+ /*
+ * Since we are in inverse-fill, our caller has already drawn above
+ * our top (start_y) and will draw below our bottom (stop_y). Thus
+ * we need to restrict our drawing to the intersection of the clip
+ * and those two limits.
+ */
+ SkIRect rect = clipRect;
+ if (rect.fTop < start_y) {
+ rect.fTop = start_y;
+ }
+ if (rect.fBottom > stop_y) {
+ rect.fBottom = stop_y;
+ }
+ if (!rect.isEmpty()) {
+ blitter->blitRect(rect.fLeft << shiftEdgesUp,
+ rect.fTop << shiftEdgesUp,
+ rect.width() << shiftEdgesUp,
+ rect.height() << shiftEdgesUp);
+ }
+ }
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+
+ start_y = SkLeftShift(start_y, shiftEdgesUp);
+ stop_y = SkLeftShift(stop_y, shiftEdgesUp);
+ if (!pathContainedInClip && start_y < shiftedClip.fTop) {
+ start_y = shiftedClip.fTop;
+ }
+ if (!pathContainedInClip && stop_y > shiftedClip.fBottom) {
+ stop_y = shiftedClip.fBottom;
+ }
+
+ InverseBlitter ib;
+ PrePostProc proc = nullptr;
+
+ if (path.isInverseFillType()) {
+ ib.setBlitter(blitter, clipRect, shiftEdgesUp);
+ blitter = &ib;
+ proc = PrePostInverseBlitterProc;
+ }
+
+ // count >= 2 is required as the convex walker does not handle missing right edges
+ if (path.isConvex() && (nullptr == proc) && count >= 2) {
+ walk_simple_edges(&headEdge, blitter, start_y, stop_y);
+ } else {
+ walk_edges(&headEdge, path.getFillType(), blitter, start_y, stop_y, proc,
+ shiftedClip.right());
+ }
+}
+
+void sk_blit_above(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = cr.fTop;
+ tmp.fBottom = ir.fTop;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+void sk_blit_below(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = ir.fBottom;
+ tmp.fBottom = cr.fBottom;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * If the caller is drawing an inverse-fill path, then it pass true for
+ * skipRejectTest, so we don't abort drawing just because the src bounds (ir)
+ * is outside of the clip.
+ */
+SkScanClipper::SkScanClipper(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect& ir, bool skipRejectTest, bool irPreClipped) {
+ fBlitter = nullptr; // null means blit nothing
+ fClipRect = nullptr;
+
+ if (clip) {
+ fClipRect = &clip->getBounds();
+ if (!skipRejectTest && !SkIRect::Intersects(*fClipRect, ir)) { // completely clipped out
+ return;
+ }
+
+ if (clip->isRect()) {
+ if (!irPreClipped && fClipRect->contains(ir)) {
+#ifdef SK_DEBUG
+ fRectClipCheckBlitter.init(blitter, *fClipRect);
+ blitter = &fRectClipCheckBlitter;
+#endif
+ fClipRect = nullptr;
+ } else {
+ // only need a wrapper blitter if we're horizontally clipped
+ if (irPreClipped ||
+ fClipRect->fLeft > ir.fLeft || fClipRect->fRight < ir.fRight) {
+ fRectBlitter.init(blitter, *fClipRect);
+ blitter = &fRectBlitter;
+ } else {
+#ifdef SK_DEBUG
+ fRectClipCheckBlitter.init(blitter, *fClipRect);
+ blitter = &fRectClipCheckBlitter;
+#endif
+ }
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ fBlitter = blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool clip_to_limit(const SkRegion& orig, SkRegion* reduced) {
+ // need to limit coordinates such that the width/height of our rect can be represented
+ // in SkFixed (16.16). See skbug.com/7998
+ const int32_t limit = 32767 >> 1;
+
+ SkIRect limitR;
+ limitR.setLTRB(-limit, -limit, limit, limit);
+ if (limitR.contains(orig.getBounds())) {
+ return false;
+ }
+ reduced->op(orig, limitR, SkRegion::kIntersect_Op);
+ return true;
+}
+
+// Bias used for conservative rounding of float rects to int rects, to nudge the irects a little
+// larger, so we don't "think" a path's bounds are inside a clip, when (due to numeric drift in
+// the scan-converter) we might walk beyond the predicted limits.
+//
+// This value has been determined trial and error: pick the smallest value (after the 0.5) that
+// fixes any problematic cases (e.g. crbug.com/844457)
+// NOTE: cubics appear to be the main reason for needing this slop. If we could (perhaps) have a
+// more accurate walker for cubics, we may be able to reduce this fudge factor.
+static const double kConservativeRoundBias = 0.5 + 1.5 / SK_FDot6One;
+
+/**
+ * Round the value down. This is used to round the top and left of a rectangle,
+ * and corresponds to the way the scan converter treats the top and left edges.
+ * It has a slight bias to make the "rounded" int smaller than a normal round, to create a more
+ * conservative int-bounds (larger) from a float rect.
+ */
+static inline int round_down_to_int(SkScalar x) {
+ double xx = x;
+ xx -= kConservativeRoundBias;
+ return sk_double_saturate2int(ceil(xx));
+}
+
+/**
+ * Round the value up. This is used to round the right and bottom of a rectangle.
+ * It has a slight bias to make the "rounded" int smaller than a normal round, to create a more
+ * conservative int-bounds (larger) from a float rect.
+ */
+static inline int round_up_to_int(SkScalar x) {
+ double xx = x;
+ xx += kConservativeRoundBias;
+ return sk_double_saturate2int(floor(xx));
+}
+
+/*
+ * Conservative rounding function, which effectively nudges the int-rect to be slightly larger
+ * than SkRect::round() might have produced. This is a safety-net for the scan-converter, which
+ * inspects the returned int-rect, and may disable clipping (for speed) if it thinks all of the
+ * edges will fit inside the clip's bounds. The scan-converter introduces slight numeric errors
+ * due to accumulated += of the slope, so this function is used to return a conservatively large
+ * int-bounds, and thus we will only disable clipping if we're sure the edges will stay in-bounds.
+ */
+static SkIRect conservative_round_to_int(const SkRect& src) {
+ return {
+ round_down_to_int(src.fLeft),
+ round_down_to_int(src.fTop),
+ round_up_to_int(src.fRight),
+ round_up_to_int(src.fBottom),
+ };
+}
+
+void SkScan::FillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ // Our edges are fixed-point, and don't like the bounds of the clip to
+ // exceed that. Here we trim the clip just so we don't overflow later on
+ const SkRegion* clipPtr = &origClip;
+ SkRegion finiteClip;
+ if (clip_to_limit(origClip, &finiteClip)) {
+ if (finiteClip.isEmpty()) {
+ return;
+ }
+ clipPtr = &finiteClip;
+ }
+ // don't reference "origClip" any more, just use clipPtr
+
+
+ SkRect bounds = path.getBounds();
+ bool irPreClipped = false;
+ if (!SkRectPriv::MakeLargeS32().contains(bounds)) {
+ if (!bounds.intersect(SkRectPriv::MakeLargeS32())) {
+ bounds.setEmpty();
+ }
+ irPreClipped = true;
+ }
+
+ SkIRect ir = conservative_round_to_int(bounds);
+ if (ir.isEmpty()) {
+ if (path.isInverseFillType()) {
+ blitter->blitRegion(*clipPtr);
+ }
+ return;
+ }
+
+ SkScanClipper clipper(blitter, clipPtr, ir, path.isInverseFillType(), irPreClipped);
+
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ // we have to keep our calls to blitter in sorted order, so we
+ // must blit the above section first, then the middle, then the bottom.
+ if (path.isInverseFillType()) {
+ sk_blit_above(blitter, ir, *clipPtr);
+ }
+ SkASSERT(clipper.getClipRect() == nullptr ||
+ *clipper.getClipRect() == clipPtr->getBounds());
+ sk_fill_path(path, clipPtr->getBounds(), blitter, ir.fTop, ir.fBottom,
+ 0, clipper.getClipRect() == nullptr);
+ if (path.isInverseFillType()) {
+ sk_blit_below(blitter, ir, *clipPtr);
+ }
+ } else {
+ // what does it mean to not have a blitter if path.isInverseFillType???
+ }
+}
+
+void SkScan::FillPath(const SkPath& path, const SkIRect& ir,
+ SkBlitter* blitter) {
+ SkRegion rgn(ir);
+ FillPath(path, rgn, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int build_tri_edges(SkEdge edge[], const SkPoint pts[],
+ const SkIRect* clipRect, SkEdge* list[]) {
+ SkEdge** start = list;
+
+ if (edge->setLine(pts[0], pts[1], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[1], pts[2], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[2], pts[0], clipRect, 0)) {
+ *list++ = edge;
+ }
+ return (int)(list - start);
+}
+
+
+static void sk_fill_triangle(const SkPoint pts[], const SkIRect* clipRect,
+ SkBlitter* blitter, const SkIRect& ir) {
+ SkASSERT(pts && blitter);
+
+ SkEdge edgeStorage[3];
+ SkEdge* list[3];
+
+ int count = build_tri_edges(edgeStorage, pts, clipRect, list);
+ if (count < 2) {
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+ int stop_y = ir.fBottom;
+ if (clipRect && stop_y > clipRect->fBottom) {
+ stop_y = clipRect->fBottom;
+ }
+ int start_y = ir.fTop;
+ if (clipRect && start_y < clipRect->fTop) {
+ start_y = clipRect->fTop;
+ }
+ walk_simple_edges(&headEdge, blitter, start_y, stop_y);
+}
+
+void SkScan::FillTriangle(const SkPoint pts[], const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty()) {
+ return;
+ }
+
+ SkRect r;
+ r.setBounds(pts, 3);
+ // If r is too large (larger than can easily fit in SkFixed) then we need perform geometric
+ // clipping. This is a bit of work, so we just call the general FillPath() to handle it.
+ // Use FixedMax/2 as the limit so we can subtract two edges and still store that in Fixed.
+ const SkScalar limit = SK_MaxS16 >> 1;
+ if (!SkRect::MakeLTRB(-limit, -limit, limit, limit).contains(r)) {
+ SkPath path;
+ path.addPoly(pts, 3, false);
+ FillPath(path, clip, blitter);
+ return;
+ }
+
+ SkIRect ir = conservative_round_to_int(r);
+ if (ir.isEmpty() || !SkIRect::Intersects(ir, clip.getBounds())) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrap.init(clip, blitter);
+ clipRgn = &wrap.getRgn();
+ blitter = wrap.getBlitter();
+ }
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ sk_fill_triangle(pts, clipper.getClipRect(), blitter, ir);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScopeExit.h b/gfx/skia/skia/src/core/SkScopeExit.h
new file mode 100644
index 0000000000..d4705abc5d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScopeExit.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScopeExit_DEFINED
+#define SkScopeExit_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMacros.h"
+
+#include <functional>
+
+/** SkScopeExit calls a std:::function<void()> in its destructor. */
+class SkScopeExit {
+public:
+ SkScopeExit() = default;
+ SkScopeExit(std::function<void()> f) : fFn(std::move(f)) {}
+ SkScopeExit(SkScopeExit&& that) : fFn(std::move(that.fFn)) {}
+
+ ~SkScopeExit() {
+ if (fFn) {
+ fFn();
+ }
+ }
+
+ void clear() { fFn = {}; }
+
+ SkScopeExit& operator=(SkScopeExit&& that) {
+ fFn = std::move(that.fFn);
+ return *this;
+ }
+
+private:
+ std::function<void()> fFn;
+
+ SkScopeExit( const SkScopeExit& ) = delete;
+ SkScopeExit& operator=(const SkScopeExit& ) = delete;
+};
+
+/**
+ * SK_AT_SCOPE_EXIT(stmt) evaluates stmt when the current scope ends.
+ *
+ * E.g.
+ * {
+ * int x = 5;
+ * {
+ * SK_AT_SCOPE_EXIT(x--);
+ * SkASSERT(x == 5);
+ * }
+ * SkASSERT(x == 4);
+ * }
+ */
+#define SK_AT_SCOPE_EXIT(stmt) \
+ SkScopeExit SK_MACRO_APPEND_LINE(at_scope_exit_)([&]() { stmt; })
+
+#endif // SkScopeExit_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSemaphore.cpp b/gfx/skia/skia/src/core/SkSemaphore.cpp
new file mode 100644
index 0000000000..692cbfc15c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSemaphore.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSemaphore.h"
+#include "src/core/SkLeanWindows.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ #include <dispatch/dispatch.h>
+
+ struct SkSemaphore::OSSemaphore {
+ dispatch_semaphore_t fSemaphore;
+
+ OSSemaphore() { fSemaphore = dispatch_semaphore_create(0/*initial count*/); }
+ ~OSSemaphore() { dispatch_release(fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { dispatch_semaphore_signal(fSemaphore); } }
+ void wait() { dispatch_semaphore_wait(fSemaphore, DISPATCH_TIME_FOREVER); }
+ };
+#elif defined(SK_BUILD_FOR_WIN)
+ struct SkSemaphore::OSSemaphore {
+ HANDLE fSemaphore;
+
+ OSSemaphore() {
+ fSemaphore = CreateSemaphore(nullptr /*security attributes, optional*/,
+ 0 /*initial count*/,
+ MAXLONG /*max count*/,
+ nullptr /*name, optional*/);
+ }
+ ~OSSemaphore() { CloseHandle(fSemaphore); }
+
+ void signal(int n) {
+ ReleaseSemaphore(fSemaphore, n, nullptr/*returns previous count, optional*/);
+ }
+ void wait() { WaitForSingleObject(fSemaphore, INFINITE/*timeout in ms*/); }
+ };
+#else
+ // It's important we test for Mach before this. This code will compile but not work there.
+ #include <errno.h>
+ #include <semaphore.h>
+ struct SkSemaphore::OSSemaphore {
+ sem_t fSemaphore;
+
+ OSSemaphore() { sem_init(&fSemaphore, 0/*cross process?*/, 0/*initial count*/); }
+ ~OSSemaphore() { sem_destroy(&fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { sem_post(&fSemaphore); } }
+ void wait() {
+ // Try until we're not interrupted.
+ while(sem_wait(&fSemaphore) == -1 && errno == EINTR);
+ }
+ };
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSemaphore::~SkSemaphore() {
+ delete fOSSemaphore;
+}
+
+void SkSemaphore::osSignal(int n) {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->signal(n);
+}
+
+void SkSemaphore::osWait() {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->wait();
+}
+
+bool SkSemaphore::try_wait() {
+ int count = fCount.load(std::memory_order_relaxed);
+ if (count > 0) {
+ return fCount.compare_exchange_weak(count, count-1, std::memory_order_acquire);
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.cpp b/gfx/skia/skia/src/core/SkSharedMutex.cpp
new file mode 100644
index 0000000000..22de1a1357
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkSharedMutex.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSemaphore.h"
+
+#if !defined(__has_feature)
+ #define __has_feature(x) 0
+#endif
+
+#if __has_feature(thread_sanitizer)
+
+ /* Report that a lock has been created at address "lock". */
+ #define ANNOTATE_RWLOCK_CREATE(lock) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" is about to be destroyed. */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" has been acquired.
+ is_w=1 for writer lock, is_w=0 for reader lock. */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
+
+ /* Report that the lock at address "lock" is about to be released. */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
+
+ #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
+ #if defined(__GNUC__)
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+ #else
+ /* TODO(glider): for Windows support we may want to change this macro in order
+ to prepend __declspec(selectany) to the annotations' declarations. */
+ #error weak annotations are not supported for your compiler
+ #endif
+ #else
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+ #endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+
+ extern "C" {
+ void AnnotateRWLockCreate(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockDestroy(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockAcquired(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockReleased(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ }
+
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#else
+
+ #define ANNOTATE_RWLOCK_CREATE(lock)
+ #define ANNOTATE_RWLOCK_DESTROY(lock)
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
+
+#endif
+
+#ifdef SK_DEBUG
+
+ #include "include/private/SkTDArray.h"
+ #include "include/private/SkThreadID.h"
+
+ class SkSharedMutex::ThreadIDSet {
+ public:
+ // Returns true if threadID is in the set.
+ bool find(SkThreadID threadID) const {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return true;
+ }
+ return false;
+ }
+
+ // Returns true if did not already exist.
+ bool tryAdd(SkThreadID threadID) {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return false;
+ }
+ fThreadIDs.append(1, &threadID);
+ return true;
+ }
+ // Returns true if already exists in Set.
+ bool tryRemove(SkThreadID threadID) {
+ for (int i = 0; i < fThreadIDs.count(); ++i) {
+ if (fThreadIDs[i] == threadID) {
+ fThreadIDs.remove(i);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void swap(ThreadIDSet& other) {
+ fThreadIDs.swap(other.fThreadIDs);
+ }
+
+ int count() const {
+ return fThreadIDs.count();
+ }
+
+ private:
+ SkTDArray<SkThreadID> fThreadIDs;
+ };
+
+ SkSharedMutex::SkSharedMutex()
+ : fCurrentShared(new ThreadIDSet)
+ , fWaitingExclusive(new ThreadIDSet)
+ , fWaitingShared(new ThreadIDSet){
+ ANNOTATE_RWLOCK_CREATE(this);
+ }
+
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+
+ void SkSharedMutex::acquire() {
+ SkThreadID threadID(SkGetThreadID());
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexExclusive l(fMu);
+
+ SkASSERTF(!fCurrentShared->find(threadID),
+ "Thread %lx already has an shared lock\n", threadID);
+
+ if (!fWaitingExclusive->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
+ }
+
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
+ fExclusiveQueue.wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ // Implementation Detail:
+ // The shared threads need two seperate queues to keep the threads that were added after the
+ // exclusive lock separate from the threads added before.
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+ SkThreadID threadID(SkGetThreadID());
+ int sharedWaitingCount;
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ if (!fWaitingExclusive->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
+ }
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ sharedWaitingCount = fWaitingShared->count();
+ fWaitingShared.swap(fCurrentShared);
+ sharedQueueSelect = fSharedQueueSelect;
+ if (sharedWaitingCount > 0) {
+ fSharedQueueSelect = 1 - fSharedQueueSelect;
+ }
+ }
+
+ if (sharedWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
+ } else if (exclusiveWaitingCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeld() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ SkASSERT(fWaitingExclusive->find(threadID));
+ }
+
+ void SkSharedMutex::acquireShared() {
+ SkThreadID threadID(SkGetThreadID());
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexExclusive l(fMu);
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ if (exclusiveWaitingCount > 0) {
+ if (!fWaitingShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
+ }
+ } else {
+ if (!fCurrentShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
+ }
+ }
+ sharedQueueSelect = fSharedQueueSelect;
+ }
+
+ if (exclusiveWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+ SkThreadID threadID(SkGetThreadID());
+
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexExclusive l(fMu);
+ if (!fCurrentShared->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
+ }
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (0 == currentSharedCount && waitingExclusiveCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeldShared() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(fCurrentShared->find(threadID));
+ }
+
+#else
+
+ // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
+ // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
+ // the log of the count which is 1024.
+ //
+ // The three counts held in fQueueCounts are:
+ // * Shared - the number of shared lock holders currently running.
+ // * WaitingExclusive - the number of threads waiting for an exclusive lock.
+ // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
+ // to finish.
+ static const int kLogThreadCount = 10;
+
+ enum {
+ kSharedOffset = (0 * kLogThreadCount),
+ kWaitingExlusiveOffset = (1 * kLogThreadCount),
+ kWaitingSharedOffset = (2 * kLogThreadCount),
+ kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
+ kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
+ kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
+ };
+
+ SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+ void SkSharedMutex::acquire() {
+ // Increment the count of exclusive queue waiters.
+ int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
+ std::memory_order_acquire);
+
+ // If there are no other exclusive waiters and no shared threads are running then run
+ // else wait.
+ if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
+ fExclusiveQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+
+ int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
+ int32_t waitingShared;
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+
+ // Decrement exclusive waiters.
+ newQueueCounts -= 1 << kWaitingExlusiveOffset;
+
+ // The number of threads waiting to acquire a shared lock.
+ waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
+
+ // If there are any move the counts of all the shared waiters to actual shared. They are
+ // going to run next.
+ if (waitingShared > 0) {
+
+ // Set waiting shared to zero.
+ newQueueCounts &= ~kWaitingSharedMask;
+
+ // Because this is the exclusive release, then there are zero readers. So, the bits
+ // for shared locks should be zero. Since those bits are zero, we can just |= in the
+ // waitingShared count instead of clearing with an &= and then |= the count.
+ newQueueCounts |= waitingShared << kSharedOffset;
+ }
+
+ } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+
+ if (waitingShared > 0) {
+ // Run all the shared.
+ fSharedQueue.signal(waitingShared);
+ } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ // Run a single exclusive waiter.
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::acquireShared() {
+ int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+ // If there are waiting exclusives then this shared lock waits else it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ newQueueCounts += 1 << kWaitingSharedOffset;
+ } else {
+ newQueueCounts += 1 << kSharedOffset;
+ }
+ } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
+ std::memory_order_acquire,
+ std::memory_order_relaxed));
+
+ // If there are waiting exclusives, then this shared waits until after it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ fSharedQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+
+ // Decrement the shared count.
+ int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
+ std::memory_order_release);
+
+ // If shared count is going to zero (because the old count == 1) and there are exclusive
+ // waiters, then run a single exclusive waiter.
+ if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
+ && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.h b/gfx/skia/skia/src/core/SkSharedMutex.h
new file mode 100644
index 0000000000..2544ee605a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSharedLock_DEFINED
+#define SkSharedLock_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkSemaphore.h"
+#include "include/private/SkThreadAnnotations.h"
+#include <atomic>
+
+#ifdef SK_DEBUG
+ #include "include/private/SkMutex.h"
+ #include <memory>
+#endif // SK_DEBUG
+
+// There are two shared lock implementations one debug the other is high performance. They implement
+// an interface similar to pthread's rwlocks.
+// This is a shared lock implementation similar to pthreads rwlocks. The high performance
+// implementation is cribbed from Preshing's article:
+// http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+//
+// This lock does not obey strict queue ordering. It will always alternate between readers and
+// a single writer.
+class SK_CAPABILITY("mutex") SkSharedMutex {
+public:
+ SkSharedMutex();
+ ~SkSharedMutex();
+ // Acquire lock for exclusive use.
+ void acquire() SK_ACQUIRE();
+
+ // Release lock for exclusive use.
+ void release() SK_RELEASE_CAPABILITY();
+
+ // Fail if exclusive is not held.
+ void assertHeld() const SK_ASSERT_CAPABILITY(this);
+
+ // Acquire lock for shared use.
+ void acquireShared() SK_ACQUIRE_SHARED();
+
+ // Release lock for shared use.
+ void releaseShared() SK_RELEASE_SHARED_CAPABILITY();
+
+ // Fail if shared lock not held.
+ void assertHeldShared() const SK_ASSERT_SHARED_CAPABILITY(this);
+
+private:
+#ifdef SK_DEBUG
+ class ThreadIDSet;
+ std::unique_ptr<ThreadIDSet> fCurrentShared;
+ std::unique_ptr<ThreadIDSet> fWaitingExclusive;
+ std::unique_ptr<ThreadIDSet> fWaitingShared;
+ int fSharedQueueSelect{0};
+ mutable SkMutex fMu;
+ SkSemaphore fSharedQueue[2];
+ SkSemaphore fExclusiveQueue;
+#else
+ std::atomic<int32_t> fQueueCounts;
+ SkSemaphore fSharedQueue;
+ SkSemaphore fExclusiveQueue;
+#endif // SK_DEBUG
+};
+
+#ifndef SK_DEBUG
+inline void SkSharedMutex::assertHeld() const {};
+inline void SkSharedMutex::assertHeldShared() const {};
+#endif // SK_DEBUG
+
+class SK_SCOPED_CAPABILITY SkAutoSharedMutexExclusive {
+public:
+ explicit SkAutoSharedMutexExclusive(SkSharedMutex& lock) SK_ACQUIRE(lock)
+ : fLock(lock) {
+ lock.acquire();
+ }
+ ~SkAutoSharedMutexExclusive() SK_RELEASE_CAPABILITY() { fLock.release(); }
+
+private:
+ SkSharedMutex& fLock;
+};
+
+#define SkAutoSharedMutexExclusive(...) SK_REQUIRE_LOCAL_VAR(SkAutoSharedMutexExclusive)
+
+class SK_SCOPED_CAPABILITY SkAutoSharedMutexShared {
+public:
+ explicit SkAutoSharedMutexShared(SkSharedMutex& lock) SK_ACQUIRE_SHARED(lock)
+ : fLock(lock) {
+ lock.acquireShared();
+ }
+
+ // You would think this should be SK_RELEASE_SHARED_CAPABILITY, but SK_SCOPED_CAPABILITY
+ // doesn't fully understand the difference between shared and exclusive.
+ // Please review https://reviews.llvm.org/D52578 for more information.
+ ~SkAutoSharedMutexShared() SK_RELEASE_CAPABILITY() { fLock.releaseShared(); }
+
+private:
+ SkSharedMutex& fLock;
+};
+
+#define SkAutoSharedMutexShared(...) SK_REQUIRE_LOCAL_VAR(SkAutoSharedMutexShared)
+
+#endif // SkSharedLock_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSpan.h b/gfx/skia/skia/src/core/SkSpan.h
new file mode 100644
index 0000000000..8db1866c79
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpan.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpan_DEFINED
+#define SkSpan_DEFINED
+
+#include <cstddef>
+#include "include/private/SkTo.h"
+
+template <typename T>
+class SkSpan {
+public:
+ constexpr SkSpan() : fPtr{nullptr}, fSize{0} {}
+ constexpr SkSpan(T* ptr, size_t size) : fPtr{ptr}, fSize{size} {}
+ template <typename U, typename = typename std::enable_if<std::is_same<const U, T>::value>::type>
+ constexpr SkSpan(const SkSpan<U>& that) : fPtr(that.data()), fSize{that.size()} {}
+ constexpr SkSpan(const SkSpan& o) = default;
+ constexpr SkSpan& operator=(const SkSpan& that) {
+ fPtr = that.fPtr;
+ fSize = that.fSize;
+ return *this;
+ }
+ constexpr T& operator [] (size_t i) const { return fPtr[i]; }
+ constexpr T& front() const { return fPtr[0]; }
+ constexpr T& back() const { return fPtr[fSize - 1]; }
+ constexpr T* begin() const { return fPtr; }
+ constexpr T* end() const { return fPtr + fSize; }
+ constexpr const T* cbegin() const { return fPtr; }
+ constexpr const T* cend() const { return fPtr + fSize; }
+ constexpr T* data() const { return fPtr; }
+ constexpr size_t size() const { return fSize; }
+ constexpr bool empty() const { return fSize == 0; }
+ constexpr size_t size_bytes() const { return fSize * sizeof(T); }
+ constexpr SkSpan<T> first(size_t prefixLen) { return SkSpan<T>{fPtr, prefixLen}; }
+
+private:
+ T* fPtr;
+ size_t fSize;
+};
+
+template <typename T, typename S>
+inline constexpr SkSpan<T> SkMakeSpan(T* p, S s) { return SkSpan<T>{p, SkTo<size_t>(s)}; }
+
+template <size_t N, typename T>
+inline constexpr SkSpan<T> SkMakeSpan(T(&a)[N]) { return SkSpan<T>{a, N}; }
+
+template <typename Container>
+inline auto SkMakeSpan(Container& c)
+ -> SkSpan<typename std::remove_reference<decltype(*(c.data()))>::type> {
+ return {c.data(), c.size()};
+}
+#endif // SkSpan_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.cpp b/gfx/skia/skia/src/core/SkSpecialImage.cpp
new file mode 100644
index 0000000000..3c72e616bb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.cpp
@@ -0,0 +1,560 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "src/core/SkSpecialImage.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/image/SkImage_Base.h"
+#include <atomic>
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/image/SkImage_Gpu.h"
+#endif
+
+// Currently the raster imagefilters can only handle certain imageinfos. Call this to know if
+// a given info is supported.
+static bool valid_for_imagefilters(const SkImageInfo& info) {
+ // no support for other swizzles/depths yet
+ return info.colorType() == kN32_SkColorType;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+class SkSpecialImage_Base : public SkSpecialImage {
+public:
+ SkSpecialImage_Base(const SkIRect& subset, uint32_t uniqueID, const SkSurfaceProps* props)
+ : INHERITED(subset, uniqueID, props) {
+ }
+ ~SkSpecialImage_Base() override { }
+
+ virtual void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) const = 0;
+
+ virtual bool onGetROPixels(SkBitmap*) const = 0;
+
+ virtual GrRecordingContext* onGetContext() const { return nullptr; }
+
+ virtual SkColorSpace* onGetColorSpace() const = 0;
+
+#if SK_SUPPORT_GPU
+ virtual sk_sp<GrTextureProxy> onAsTextureProxyRef(GrRecordingContext* context) const = 0;
+#endif
+
+ // This subset is relative to the backing store's coordinate frame, it has already been mapped
+ // from the content rect by the non-virtual makeSubset().
+ virtual sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const = 0;
+
+ virtual sk_sp<SkSpecialSurface> onMakeSurface(
+ SkColorType colorType, const SkColorSpace* colorSpace, const SkISize& size,
+ SkAlphaType at, const SkSurfaceProps* = nullptr) const = 0;
+
+ // This subset (when not null) is relative to the backing store's coordinate frame, it has
+ // already been mapped from the content rect by the non-virtual asImage().
+ virtual sk_sp<SkImage> onAsImage(const SkIRect* subset) const = 0;
+
+ virtual sk_sp<SkSurface> onMakeTightSurface(
+ SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at) const = 0;
+
+private:
+ typedef SkSpecialImage INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+static inline const SkSpecialImage_Base* as_SIB(const SkSpecialImage* image) {
+ return static_cast<const SkSpecialImage_Base*>(image);
+}
+
+SkSpecialImage::SkSpecialImage(const SkIRect& subset,
+ uint32_t uniqueID,
+ const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props))
+ , fSubset(subset)
+ , fUniqueID(kNeedNewImageUniqueID_SpecialImage == uniqueID ? SkNextID::ImageID() : uniqueID) {
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::makeTextureImage(GrRecordingContext* context) const {
+#if SK_SUPPORT_GPU
+ if (!context) {
+ return nullptr;
+ }
+ if (GrRecordingContext* curContext = as_SIB(this)->onGetContext()) {
+ return curContext->priv().matches(context) ? sk_ref_sp(this) : nullptr;
+ }
+
+ auto proxyProvider = context->priv().proxyProvider();
+ SkBitmap bmp;
+ // At this point, we are definitely not texture-backed, so we must be raster or generator
+ // backed. If we remove the special-wrapping-an-image subclass, we may be able to assert that
+ // we are strictly raster-backed (i.e. generator images become raster when they are specialized)
+ // in which case getROPixels could turn into peekPixels...
+ if (!this->getROPixels(&bmp)) {
+ return nullptr;
+ }
+
+ if (bmp.empty()) {
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeEmpty(), bmp, &this->props());
+ }
+
+ // TODO: this is a tight copy of 'bmp' but it doesn't have to be (given SkSpecialImage's
+ // semantics). Since this is cached though we would have to bake the fit into the cache key.
+ sk_sp<GrTextureProxy> proxy = GrMakeCachedBitmapProxy(proxyProvider, bmp);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ const SkIRect rect = SkIRect::MakeWH(proxy->width(), proxy->height());
+
+ // GrMakeCachedBitmapProxy has uploaded only the specified subset of 'bmp' so we need not
+ // bother with SkBitmap::getSubset
+ return SkSpecialImage::MakeDeferredFromGpu(context,
+ rect,
+ this->uniqueID(),
+ std::move(proxy),
+ SkColorTypeToGrColorType(bmp.colorType()),
+ sk_ref_sp(this->getColorSpace()),
+ &this->props(),
+ this->alphaType());
+#else
+ return nullptr;
+#endif
+}
+
+void SkSpecialImage::draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const {
+ return as_SIB(this)->onDraw(canvas, x, y, paint);
+}
+
+bool SkSpecialImage::getROPixels(SkBitmap* bm) const {
+ return as_SIB(this)->onGetROPixels(bm);
+}
+
+bool SkSpecialImage::isTextureBacked() const {
+ return SkToBool(as_SIB(this)->onGetContext());
+}
+
+GrRecordingContext* SkSpecialImage::getContext() const {
+ return as_SIB(this)->onGetContext();
+}
+
+SkColorSpace* SkSpecialImage::getColorSpace() const {
+ return as_SIB(this)->onGetColorSpace();
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTextureProxy> SkSpecialImage::asTextureProxyRef(GrRecordingContext* context) const {
+ return as_SIB(this)->onAsTextureProxyRef(context);
+}
+#endif
+
+sk_sp<SkSpecialSurface> SkSpecialImage::makeSurface(
+ SkColorType colorType, const SkColorSpace* colorSpace, const SkISize& size,
+ SkAlphaType at, const SkSurfaceProps* props) const {
+ return as_SIB(this)->onMakeSurface(colorType, colorSpace, size, at, props);
+}
+
+sk_sp<SkSurface> SkSpecialImage::makeTightSurface(
+ SkColorType colorType, const SkColorSpace* colorSpace, const SkISize& size,
+ SkAlphaType at) const {
+ return as_SIB(this)->onMakeTightSurface(colorType, colorSpace, size, at);
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::makeSubset(const SkIRect& subset) const {
+ SkIRect absolute = subset.makeOffset(this->subset().topLeft());
+ return as_SIB(this)->onMakeSubset(absolute);
+}
+
+sk_sp<SkImage> SkSpecialImage::asImage(const SkIRect* subset) const {
+ if (subset) {
+ SkIRect absolute = subset->makeOffset(this->subset().topLeft());
+ return as_SIB(this)->onAsImage(&absolute);
+ } else {
+ return as_SIB(this)->onAsImage(nullptr);
+ }
+}
+
+#if defined(SK_DEBUG) || SK_SUPPORT_GPU
+static bool rect_fits(const SkIRect& rect, int width, int height) {
+ if (0 == width && 0 == height) {
+ SkASSERT(0 == rect.fLeft && 0 == rect.fRight && 0 == rect.fTop && 0 == rect.fBottom);
+ return true;
+ }
+
+ return rect.fLeft >= 0 && rect.fLeft < width && rect.fLeft < rect.fRight &&
+ rect.fRight >= 0 && rect.fRight <= width &&
+ rect.fTop >= 0 && rect.fTop < height && rect.fTop < rect.fBottom &&
+ rect.fBottom >= 0 && rect.fBottom <= height;
+}
+#endif
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromImage(GrRecordingContext* context,
+ const SkIRect& subset,
+ sk_sp<SkImage> image,
+ const SkSurfaceProps* props) {
+ SkASSERT(rect_fits(subset, image->width(), image->height()));
+
+#if SK_SUPPORT_GPU
+ if (sk_sp<GrTextureProxy> proxy = as_IB(image)->asTextureProxyRef(context)) {
+ if (!as_IB(image)->context()->priv().matches(context)) {
+ return nullptr;
+ }
+
+ return MakeDeferredFromGpu(context, subset, image->uniqueID(), std::move(proxy),
+ SkColorTypeToGrColorType(image->colorType()),
+ image->refColorSpace(), props);
+ } else
+#endif
+ {
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(&bm)) {
+ return MakeFromRaster(subset, bm, props);
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpecialImage_Raster : public SkSpecialImage_Base {
+public:
+ SkSpecialImage_Raster(const SkIRect& subset, const SkBitmap& bm, const SkSurfaceProps* props)
+ : INHERITED(subset, bm.getGenerationID(), props)
+ , fBitmap(bm)
+ {
+ SkASSERT(bm.pixelRef());
+ SkASSERT(fBitmap.getPixels());
+ }
+
+ SkAlphaType alphaType() const override { return fBitmap.alphaType(); }
+
+ SkColorType colorType() const override { return fBitmap.colorType(); }
+
+ size_t getSize() const override { return fBitmap.computeByteSize(); }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ canvas->drawBitmapRect(fBitmap, this->subset(),
+ dst, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ bool onGetROPixels(SkBitmap* bm) const override {
+ return fBitmap.extractSubset(bm, this->subset());
+ }
+
+ SkColorSpace* onGetColorSpace() const override {
+ return fBitmap.colorSpace();
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> onAsTextureProxyRef(GrRecordingContext* context) const override {
+ if (context) {
+ return GrMakeCachedBitmapProxy(context->priv().proxyProvider(), fBitmap);
+ }
+
+ return nullptr;
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> onMakeSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at,
+ const SkSurfaceProps* props) const override {
+ // Ignore the requested color type, the raster backend currently only supports N32
+ colorType = kN32_SkColorType; // TODO: find ways to allow f16
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ return SkSpecialSurface::MakeRaster(info, props);
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ // No need to extract subset, onGetROPixels handles that when needed
+ return SkSpecialImage::MakeFromRaster(subset, fBitmap, &this->props());
+ }
+
+ sk_sp<SkImage> onAsImage(const SkIRect* subset) const override {
+ if (subset) {
+ SkBitmap subsetBM;
+
+ if (!fBitmap.extractSubset(&subsetBM, *subset)) {
+ return nullptr;
+ }
+
+ return SkImage::MakeFromBitmap(subsetBM);
+ }
+
+ return SkImage::MakeFromBitmap(fBitmap);
+ }
+
+sk_sp<SkSurface> onMakeTightSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at) const override {
+ // Ignore the requested color type, the raster backend currently only supports N32
+ colorType = kN32_SkColorType; // TODO: find ways to allow f16
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ return SkSurface::MakeRaster(info);
+ }
+
+private:
+ SkBitmap fBitmap;
+
+ typedef SkSpecialImage_Base INHERITED;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromRaster(const SkIRect& subset,
+ const SkBitmap& bm,
+ const SkSurfaceProps* props) {
+ SkASSERT(rect_fits(subset, bm.width(), bm.height()));
+
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ const SkBitmap* srcBM = &bm;
+ SkBitmap tmp;
+ // ImageFilters only handle N32 at the moment, so force our src to be that
+ if (!valid_for_imagefilters(bm.info())) {
+ if (!tmp.tryAllocPixels(bm.info().makeColorType(kN32_SkColorType)) ||
+ !bm.readPixels(tmp.info(), tmp.getPixels(), tmp.rowBytes(), 0, 0))
+ {
+ return nullptr;
+ }
+ srcBM = &tmp;
+ }
+ return sk_make_sp<SkSpecialImage_Raster>(subset, *srcBM, props);
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::CopyFromRaster(const SkIRect& subset,
+ const SkBitmap& bm,
+ const SkSurfaceProps* props) {
+ SkASSERT(rect_fits(subset, bm.width(), bm.height()));
+
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ SkBitmap tmp;
+ SkImageInfo info = bm.info().makeDimensions(subset.size());
+ // As in MakeFromRaster, must force src to N32 for ImageFilters
+ if (!valid_for_imagefilters(bm.info())) {
+ info = info.makeColorType(kN32_SkColorType);
+ }
+ if (!tmp.tryAllocPixels(info)) {
+ return nullptr;
+ }
+ if (!bm.readPixels(tmp.info(), tmp.getPixels(), tmp.rowBytes(), subset.x(), subset.y())) {
+ return nullptr;
+ }
+
+ // Since we're making a copy of the raster, the resulting special image is the exact size
+ // of the requested subset of the original and no longer needs to be offset by subset's left
+ // and top, since those were relative to the original's buffer.
+ return sk_make_sp<SkSpecialImage_Raster>(
+ SkIRect::MakeWH(subset.width(), subset.height()), tmp, props);
+}
+
+#if SK_SUPPORT_GPU
+///////////////////////////////////////////////////////////////////////////////
+static sk_sp<SkImage> wrap_proxy_in_image(GrRecordingContext* context, sk_sp<GrTextureProxy> proxy,
+ SkAlphaType alphaType, sk_sp<SkColorSpace> colorSpace) {
+ // CONTEXT TODO: remove this use of 'backdoor' to create an SkImage
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(context->priv().backdoor()),
+ kNeedNewImageUniqueID, alphaType,
+ std::move(proxy), std::move(colorSpace));
+}
+
+class SkSpecialImage_Gpu : public SkSpecialImage_Base {
+public:
+ SkSpecialImage_Gpu(GrRecordingContext* context, const SkIRect& subset,
+ uint32_t uniqueID, sk_sp<GrTextureProxy> proxy, GrColorType ct,
+ SkAlphaType at, sk_sp<SkColorSpace> colorSpace, const SkSurfaceProps* props)
+ : INHERITED(subset, uniqueID, props)
+ , fContext(context)
+ , fTextureProxy(std::move(proxy))
+ , fColorType(ct)
+ , fAlphaType(at)
+ , fColorSpace(std::move(colorSpace))
+ , fAddedRasterVersionToCache(false) {
+ }
+
+ ~SkSpecialImage_Gpu() override {
+ if (fAddedRasterVersionToCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+ }
+
+ SkAlphaType alphaType() const override { return fAlphaType; }
+
+ SkColorType colorType() const override { return GrColorTypeToSkColorType(fColorType); }
+
+ size_t getSize() const override {
+ return fTextureProxy->gpuMemorySize(*fContext->priv().caps());
+ }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ // TODO: In this instance we know we're going to draw a sub-portion of the backing
+ // texture into the canvas so it is okay to wrap it in an SkImage. This poses
+ // some problems for full deferral however in that when the deferred SkImage_Gpu
+ // instantiates itself it is going to have to either be okay with having a larger
+ // than expected backing texture (unlikely) or the 'fit' of the SurfaceProxy needs
+ // to be tightened (if it is deferred).
+ sk_sp<SkImage> img =
+ sk_sp<SkImage>(new SkImage_Gpu(sk_ref_sp(canvas->getGrContext()), this->uniqueID(),
+ fAlphaType, fTextureProxy, fColorSpace));
+
+ canvas->drawImageRect(img, this->subset(),
+ dst, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ GrRecordingContext* onGetContext() const override { return fContext; }
+
+ sk_sp<GrTextureProxy> onAsTextureProxyRef(GrRecordingContext*) const override {
+ return fTextureProxy;
+ }
+
+ bool onGetROPixels(SkBitmap* dst) const override {
+ const auto desc = SkBitmapCacheDesc::Make(this->uniqueID(), this->subset());
+ if (SkBitmapCache::Find(desc, dst)) {
+ SkASSERT(dst->getGenerationID() == this->uniqueID());
+ SkASSERT(dst->isImmutable());
+ SkASSERT(dst->getPixels());
+ return true;
+ }
+
+ SkPixmap pmap;
+ SkImageInfo info = SkImageInfo::MakeN32(this->width(), this->height(),
+ this->alphaType(), fColorSpace);
+ auto rec = SkBitmapCache::Alloc(desc, info, &pmap);
+ if (!rec) {
+ return false;
+ }
+ auto sContext = fContext->priv().makeWrappedSurfaceContext(fTextureProxy, fColorType,
+ this->alphaType(), fColorSpace);
+ if (!sContext) {
+ return false;
+ }
+
+ if (!sContext->readPixels(info, pmap.writable_addr(), pmap.rowBytes(),
+ {this->subset().left(), this->subset().top()})) {
+ return false;
+ }
+
+ SkBitmapCache::Add(std::move(rec), dst);
+ fAddedRasterVersionToCache.store(true);
+ return true;
+ }
+
+ SkColorSpace* onGetColorSpace() const override {
+ return fColorSpace.get();
+ }
+
+ sk_sp<SkSpecialSurface> onMakeSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at,
+ const SkSurfaceProps* props) const override {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ return SkSpecialSurface::MakeRenderTarget(fContext, size.width(), size.height(),
+ SkColorTypeToGrColorType(colorType),
+ sk_ref_sp(colorSpace), props);
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ return SkSpecialImage::MakeDeferredFromGpu(fContext,
+ subset,
+ this->uniqueID(),
+ fTextureProxy,
+ fColorType,
+ fColorSpace,
+ &this->props(),
+ fAlphaType);
+ }
+
+ // TODO: move all the logic here into the subset-flavor GrSurfaceProxy::copy?
+ sk_sp<SkImage> onAsImage(const SkIRect* subset) const override {
+ if (subset) {
+ // TODO: if this becomes a bottle neck we could base this logic on what the size
+ // will be when it is finally instantiated - but that is more fraught.
+ if (GrProxyProvider::IsFunctionallyExact(fTextureProxy.get()) &&
+ 0 == subset->fLeft && 0 == subset->fTop &&
+ fTextureProxy->width() == subset->width() &&
+ fTextureProxy->height() == subset->height()) {
+ fTextureProxy->priv().exactify(false);
+ // The existing GrTexture is already tight so reuse it in the SkImage
+ return wrap_proxy_in_image(fContext, fTextureProxy, fAlphaType, fColorSpace);
+ }
+
+ sk_sp<GrTextureProxy> subsetProxy(
+ GrSurfaceProxy::Copy(fContext, fTextureProxy.get(), fColorType,
+ GrMipMapped::kNo, *subset, SkBackingFit::kExact,
+ SkBudgeted::kYes));
+ if (!subsetProxy) {
+ return nullptr;
+ }
+
+ SkASSERT(subsetProxy->priv().isExact());
+ // MDB: this is acceptable (wrapping subsetProxy in an SkImage) bc Copy will
+ // return a kExact-backed proxy
+ return wrap_proxy_in_image(fContext, std::move(subsetProxy), fAlphaType, fColorSpace);
+ }
+
+ fTextureProxy->priv().exactify(true);
+
+ return wrap_proxy_in_image(fContext, fTextureProxy, fAlphaType, fColorSpace);
+ }
+
+ sk_sp<SkSurface> onMakeTightSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at) const override {
+ // TODO (michaelludwig): Why does this ignore colorType but onMakeSurface doesn't ignore it?
+ // Once makeTightSurface() goes away, should this type overriding behavior be moved into
+ // onMakeSurface() or is this unnecessary?
+ colorType = colorSpace && colorSpace->gammaIsLinear()
+ ? kRGBA_F16_SkColorType : kRGBA_8888_SkColorType;
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ // CONTEXT TODO: remove this use of 'backdoor' to create an SkSurface
+ return SkSurface::MakeRenderTarget(fContext->priv().backdoor(), SkBudgeted::kYes, info);
+ }
+
+private:
+ GrRecordingContext* fContext;
+ sk_sp<GrTextureProxy> fTextureProxy;
+ const GrColorType fColorType;
+ const SkAlphaType fAlphaType;
+ sk_sp<SkColorSpace> fColorSpace;
+ mutable std::atomic<bool> fAddedRasterVersionToCache;
+
+ typedef SkSpecialImage_Base INHERITED;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeDeferredFromGpu(GrRecordingContext* context,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkAlphaType at) {
+ if (!context || context->priv().abandoned() || !proxy) {
+ return nullptr;
+ }
+ SkASSERT_RELEASE(rect_fits(subset, proxy->width(), proxy->height()));
+ return sk_make_sp<SkSpecialImage_Gpu>(context, subset, uniqueID, std::move(proxy), colorType,
+ at, std::move(colorSpace), props);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.h b/gfx/skia/skia/src/core/SkSpecialImage.h
new file mode 100644
index 0000000000..d68b966c31
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialImage_DEFINED
+#define SkSpecialImage_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/core/SkNextID.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrTypesPriv.h"
+#endif
+
+class GrRecordingContext;
+class GrTextureProxy;
+class SkBitmap;
+class SkCanvas;
+class SkImage;
+struct SkImageInfo;
+class SkPaint;
+class SkPixmap;
+class SkSpecialSurface;
+class SkSurface;
+
+enum {
+ kNeedNewImageUniqueID_SpecialImage = 0
+};
+
+/**
+ * This is a restricted form of SkImage solely intended for internal use. It
+ * differs from SkImage in that:
+ * - it can only be backed by raster or gpu (no generators)
+ * - it can be backed by a GrTextureProxy larger than its nominal bounds
+ * - it can't be drawn tiled
+ * - it can't be drawn with MIPMAPs
+ * It is similar to SkImage in that it abstracts how the pixels are stored/represented.
+ *
+ * Note: the contents of the backing storage outside of the subset rect are undefined.
+ */
+class SkSpecialImage : public SkRefCnt {
+public:
+ typedef void* ReleaseContext;
+ typedef void(*RasterReleaseProc)(void* pixels, ReleaseContext);
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+ const SkIRect& subset() const { return fSubset; }
+ SkColorSpace* getColorSpace() const;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+ virtual SkAlphaType alphaType() const = 0;
+ virtual SkColorType colorType() const = 0;
+ virtual size_t getSize() const = 0;
+
+ /**
+ * Ensures that a special image is backed by a texture (when GrRecordingContext is non-null).
+ * If no transformation is required, the returned image may be the same as this special image.
+ * If this special image is from a different GrRecordingContext, this will fail.
+ */
+ sk_sp<SkSpecialImage> makeTextureImage(GrRecordingContext*) const;
+
+ /**
+ * Draw this SpecialImage into the canvas, automatically taking into account the image's subset
+ */
+ void draw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) const;
+
+ static sk_sp<SkSpecialImage> MakeFromImage(GrRecordingContext*,
+ const SkIRect& subset,
+ sk_sp<SkImage>,
+ const SkSurfaceProps* = nullptr);
+ static sk_sp<SkSpecialImage> MakeFromRaster(const SkIRect& subset,
+ const SkBitmap&,
+ const SkSurfaceProps* = nullptr);
+ static sk_sp<SkSpecialImage> CopyFromRaster(const SkIRect& subset,
+ const SkBitmap&,
+ const SkSurfaceProps* = nullptr);
+#if SK_SUPPORT_GPU
+ static sk_sp<SkSpecialImage> MakeDeferredFromGpu(GrRecordingContext*,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ sk_sp<GrTextureProxy>,
+ GrColorType,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps* = nullptr,
+ SkAlphaType at = kPremul_SkAlphaType);
+#endif
+
+ /**
+ * Create a new special surface with a backend that is compatible with this special image.
+ */
+ sk_sp<SkSpecialSurface> makeSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at = kPremul_SkAlphaType,
+ const SkSurfaceProps* props = nullptr) const;
+
+ /**
+ * Create a new surface with a backend that is compatible with this special image.
+ * TODO: switch this to makeSurface once we resolved the naming issue
+ * TODO (michaelludwig) - This is only used by SkTileImageFilter, which appears should be
+ * updated to work correctly with subsets and then makeTightSurface() can go away entirely.
+ */
+ sk_sp<SkSurface> makeTightSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at = kPremul_SkAlphaType) const;
+
+ /**
+ * Extract a subset of this special image and return it as a special image.
+ * It may or may not point to the same backing memory. The input 'subset' is relative to the
+ * special image's content rect.
+ */
+ sk_sp<SkSpecialImage> makeSubset(const SkIRect& subset) const;
+
+ /**
+ * Create an SkImage from the contents of this special image optionally extracting a subset.
+ * It may or may not point to the same backing memory.
+ * Note: when no 'subset' parameter is specified the the entire SkSpecialImage will be
+ * returned - including whatever extra padding may have resulted from a loose fit!
+ * When the 'subset' parameter is specified the returned image will be tight even if that
+ * entails a copy! The 'subset' is relative to this special image's content rect.
+ */
+ sk_sp<SkImage> asImage(const SkIRect* subset = nullptr) const;
+
+ /**
+ * If the SpecialImage is backed by a gpu texture, return true.
+ */
+ bool isTextureBacked() const;
+
+ /**
+ * Return the GrRecordingContext if the SkSpecialImage is GrTexture-backed
+ */
+ GrRecordingContext* getContext() const;
+
+#if SK_SUPPORT_GPU
+ /**
+ * Regardless of how the underlying backing data is stored, returns the contents as a
+ * GrTextureProxy. The returned proxy represents the entire backing image, so texture
+ * coordinates must be mapped from the content rect (e.g. relative to 'subset()') to the proxy's
+ * space (offset by subset().topLeft()).
+ */
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*) const;
+#endif
+
+ /**
+ * Regardless of the underlying backing store, return the contents as an SkBitmap.
+ * The returned bitmap represents the subset accessed by this image, thus (0,0) refers to the
+ * top-left corner of 'subset'.
+ */
+ bool getROPixels(SkBitmap*) const;
+
+protected:
+ SkSpecialImage(const SkIRect& subset, uint32_t uniqueID, const SkSurfaceProps*);
+
+private:
+ const SkSurfaceProps fProps;
+ const SkIRect fSubset;
+ const uint32_t fUniqueID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.cpp b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
new file mode 100644
index 0000000000..2f34fa03d0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "include/core/SkCanvas.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkSurfacePriv.h"
+
+ ///////////////////////////////////////////////////////////////////////////////
+class SkSpecialSurface_Base : public SkSpecialSurface {
+public:
+ SkSpecialSurface_Base(const SkIRect& subset, const SkSurfaceProps* props)
+ : INHERITED(subset, props)
+ , fCanvas(nullptr) {
+ }
+
+ virtual ~SkSpecialSurface_Base() { }
+
+ // reset is called after an SkSpecialImage has been snapped
+ void reset() { fCanvas.reset(); }
+
+ // This can return nullptr if reset has already been called or something when wrong in the ctor
+ SkCanvas* onGetCanvas() { return fCanvas.get(); }
+
+ virtual sk_sp<SkSpecialImage> onMakeImageSnapshot() = 0;
+
+protected:
+ std::unique_ptr<SkCanvas> fCanvas; // initialized by derived classes in ctors
+
+private:
+ typedef SkSpecialSurface INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+static SkSpecialSurface_Base* as_SB(SkSpecialSurface* surface) {
+ return static_cast<SkSpecialSurface_Base*>(surface);
+}
+
+SkSpecialSurface::SkSpecialSurface(const SkIRect& subset,
+ const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props).flags(), kUnknown_SkPixelGeometry)
+ , fSubset(subset) {
+ SkASSERT(fSubset.width() > 0);
+ SkASSERT(fSubset.height() > 0);
+}
+
+SkCanvas* SkSpecialSurface::getCanvas() {
+ return as_SB(this)->onGetCanvas();
+}
+
+sk_sp<SkSpecialImage> SkSpecialSurface::makeImageSnapshot() {
+ sk_sp<SkSpecialImage> image(as_SB(this)->onMakeImageSnapshot());
+ as_SB(this)->reset();
+ return image; // the caller gets the creation ref
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#include "include/core/SkMallocPixelRef.h"
+
+class SkSpecialSurface_Raster : public SkSpecialSurface_Base {
+public:
+ SkSpecialSurface_Raster(const SkImageInfo& info,
+ sk_sp<SkPixelRef> pr,
+ const SkIRect& subset,
+ const SkSurfaceProps* props)
+ : INHERITED(subset, props) {
+ SkASSERT(info.width() == pr->width() && info.height() == pr->height());
+ fBitmap.setInfo(info, info.minRowBytes());
+ fBitmap.setPixelRef(std::move(pr), 0, 0);
+
+ fCanvas.reset(new SkCanvas(fBitmap, this->props()));
+ fCanvas->clipRect(SkRect::Make(subset));
+#ifdef SK_IS_BOT
+ fCanvas->clear(SK_ColorRED); // catch any imageFilter sloppiness
+#endif
+ }
+
+ ~SkSpecialSurface_Raster() override { }
+
+ sk_sp<SkSpecialImage> onMakeImageSnapshot() override {
+ return SkSpecialImage::MakeFromRaster(this->subset(), fBitmap, &this->props());
+ }
+
+private:
+ SkBitmap fBitmap;
+
+ typedef SkSpecialSurface_Base INHERITED;
+};
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeFromBitmap(const SkIRect& subset, SkBitmap& bm,
+ const SkSurfaceProps* props) {
+ if (subset.isEmpty() || !SkSurfaceValidateRasterInfo(bm.info(), bm.rowBytes())) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSpecialSurface_Raster>(bm.info(), sk_ref_sp(bm.pixelRef()), subset, props);
+}
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRaster(const SkImageInfo& info,
+ const SkSurfaceProps* props) {
+ if (!SkSurfaceValidateRasterInfo(info)) {
+ return nullptr;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, 0);
+ if (!pr) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeWH(info.width(), info.height());
+
+ return sk_make_sp<SkSpecialSurface_Raster>(info, std::move(pr), subset, props);
+}
+
+#if SK_SUPPORT_GPU
+///////////////////////////////////////////////////////////////////////////////
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGpuDevice.h"
+
+class SkSpecialSurface_Gpu : public SkSpecialSurface_Base {
+public:
+ SkSpecialSurface_Gpu(GrRecordingContext* context,
+ std::unique_ptr<GrRenderTargetContext> renderTargetContext,
+ int width, int height, const SkIRect& subset)
+ : INHERITED(subset, &renderTargetContext->surfaceProps())
+ , fProxy(renderTargetContext->asTextureProxyRef()) {
+ // CONTEXT TODO: remove this use of 'backdoor' to create an SkGpuDevice
+ auto device = SkGpuDevice::Make(context->priv().backdoor(), std::move(renderTargetContext),
+ SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return;
+ }
+
+ fCanvas.reset(new SkCanvas(std::move(device)));
+ fCanvas->clipRect(SkRect::Make(subset));
+#ifdef SK_IS_BOT
+ fCanvas->clear(SK_ColorRED); // catch any imageFilter sloppiness
+#endif
+ }
+
+ sk_sp<SkSpecialImage> onMakeImageSnapshot() override {
+ if (!fProxy) {
+ return nullptr;
+ }
+ GrColorType ct = SkColorTypeToGrColorType(fCanvas->imageInfo().colorType());
+
+ return SkSpecialImage::MakeDeferredFromGpu(fCanvas->getGrContext(),
+ this->subset(),
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(fProxy), ct,
+ fCanvas->imageInfo().refColorSpace(),
+ &this->props());
+ }
+
+private:
+ sk_sp<GrTextureProxy> fProxy;
+ typedef SkSpecialSurface_Base INHERITED;
+};
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRenderTarget(GrRecordingContext* context,
+ int width, int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ if (!context) {
+ return nullptr;
+ }
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox, width, height, colorType, std::move(colorSpace), 1,
+ GrMipMapped::kNo, kBottomLeft_GrSurfaceOrigin, props);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeWH(width, height);
+
+ return sk_make_sp<SkSpecialSurface_Gpu>(context, std::move(renderTargetContext),
+ width, height, subset);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.h b/gfx/skia/skia/src/core/SkSpecialSurface.h
new file mode 100644
index 0000000000..f890e1e0ac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialSurface_DEFINED
+#define SkSpecialSurface_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrTypesPriv.h"
+#endif
+
+class GrBackendFormat;
+class GrContext;
+class GrRecordingContext;
+class SkBitmap;
+class SkCanvas;
+class SkSpecialImage;
+
+/**
+ * SkSpecialSurface is a restricted form of SkSurface solely for internal use. It differs
+ * from SkSurface in that:
+ * - it can be backed by GrTextures larger than [ fWidth, fHeight ]
+ * - it can't be used for tiling
+ * - it becomes inactive once a snapshot of it is taken (i.e., no copy-on-write)
+ * - it has no generation ID
+ */
+class SkSpecialSurface : public SkRefCnt {
+public:
+ const SkSurfaceProps& props() const { return fProps; }
+
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+
+ /**
+ * Return a canvas that will draw into this surface. This will always
+ * return the same canvas for a given surface, and is managed/owned by the
+ * surface.
+ *
+ * The canvas will be invalid after 'newImageSnapshot' is called.
+ */
+ SkCanvas* getCanvas();
+
+ /**
+ * Returns an image of the current state of the surface pixels up to this
+ * point. The canvas returned by 'getCanvas' becomes invalidated by this
+ * call and no more drawing to this surface is allowed.
+ *
+ * Note: the caller inherits a ref from this call that must be balanced
+ */
+ sk_sp<SkSpecialImage> makeImageSnapshot();
+
+#if SK_SUPPORT_GPU
+ /**
+ * Allocate a new GPU-backed SkSpecialSurface. If the requested surface cannot
+ * be created, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRenderTarget(GrRecordingContext*, int width, int height,
+ GrColorType, sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr);
+#endif
+
+ /**
+ * Use and existing SkBitmap as the backing store.
+ */
+ static sk_sp<SkSpecialSurface> MakeFromBitmap(const SkIRect& subset, SkBitmap& bm,
+ const SkSurfaceProps* = nullptr);
+
+ /**
+ * Return a new CPU-backed surface, with the memory for the pixels automatically
+ * allocated.
+ *
+ * If the requested surface cannot be created, or the request is not a
+ * supported configuration, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRaster(const SkImageInfo&,
+ const SkSurfaceProps* = nullptr);
+
+protected:
+ SkSpecialSurface(const SkIRect& subset, const SkSurfaceProps*);
+
+ // For testing only
+ friend class TestingSpecialSurfaceAccess;
+ const SkIRect& subset() const { return fSubset; }
+
+private:
+ const SkSurfaceProps fProps;
+ const SkIRect fSubset;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpinlock.cpp b/gfx/skia/skia/src/core/SkSpinlock.cpp
new file mode 100644
index 0000000000..59ea2b7cb9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpinlock.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSpinlock.h"
+
+#if 0
+ #include "include/private/SkMutex.h"
+ #include <execinfo.h>
+ #include <stdio.h>
+
+ static void debug_trace() {
+ void* stack[64];
+ int len = backtrace(stack, SK_ARRAY_COUNT(stack));
+
+ // As you might imagine, we can't use an SkSpinlock here...
+ static SkMutex lock;
+ {
+ SkAutoMutexExclusive locked(lock);
+ fprintf(stderr, "\n");
+ backtrace_symbols_fd(stack, len, 2/*stderr*/);
+ fprintf(stderr, "\n");
+ }
+ }
+#else
+ static void debug_trace() {}
+#endif
+
+// Renamed from "pause" to avoid conflict with function defined in unistd.h
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+ static void do_pause() { _mm_pause(); }
+#else
+ static void do_pause() { /*spin*/ }
+#endif
+
+void SkSpinlock::contendedAcquire() {
+ debug_trace();
+
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ while (fLocked.exchange(true, std::memory_order_acquire)) {
+ do_pause();
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter.h b/gfx/skia/skia/src/core/SkSpriteBlitter.h
new file mode 100644
index 0000000000..48a873a5ff
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpriteBlitter_DEFINED
+#define SkSpriteBlitter_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkShader.h"
+#include "src/core/SkBlitter.h"
+
+class SkPaint;
+
+// SkSpriteBlitter specializes SkBlitter in a way to move large rectangles of pixels around.
+// Because of this use, the main primitive shifts from blitH style things to the more efficient
+// blitRect.
+class SkSpriteBlitter : public SkBlitter {
+public:
+ SkSpriteBlitter(const SkPixmap& source);
+
+ virtual void setup(const SkPixmap& dst, int left, int top, const SkPaint&);
+
+ // blitH, blitAntiH, blitV and blitMask should not be called on an SkSpriteBlitter.
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+
+ // A SkSpriteBlitter must implement blitRect.
+ void blitRect(int x, int y, int width, int height) override = 0;
+
+ static SkSpriteBlitter* ChooseL32(const SkPixmap& source, const SkPaint&, SkArenaAlloc*);
+ static SkSpriteBlitter* ChooseL565(const SkPixmap& source, const SkPaint&, SkArenaAlloc*);
+ static SkSpriteBlitter* ChooseLA8(const SkPixmap& source, const SkPaint&, SkArenaAlloc*);
+
+protected:
+ SkPixmap fDst;
+ const SkPixmap fSource;
+ int fLeft, fTop;
+ const SkPaint* fPaint;
+
+private:
+ typedef SkBlitter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
new file mode 100644
index 0000000000..457929dc32
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkSpriteBlitter.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/utils/SkUTF.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32 : public SkSpriteBlitter {
+public:
+ Sprite_D32_S32(const SkPixmap& src, U8CPU alpha) : INHERITED(src) {
+ SkASSERT(src.colorType() == kN32_SkColorType);
+
+ unsigned flags32 = 0;
+ if (255 != alpha) {
+ flags32 |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ if (!src.isOpaque()) {
+ flags32 |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+
+ fProc32 = SkBlitRow::Factory32(flags32);
+ fAlpha = alpha;
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkBlitRow::Proc32 proc = fProc32;
+ U8CPU alpha = fAlpha;
+
+ do {
+ proc(dst, src, width, alpha);
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ SkBlitRow::Proc32 fProc32;
+ U8CPU fAlpha;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32A_Xfer: public SkSpriteBlitter {
+public:
+ Sprite_D32_S32A_Xfer(const SkPixmap& source, const SkPaint& paint) : SkSpriteBlitter(source) {
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+ SkASSERT(fXfermode);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkXfermode* xfermode = fXfermode;
+
+ do {
+ xfermode->xfer32(dst, src, width, nullptr);
+
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+protected:
+ SkXfermode* fXfermode;
+
+private:
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseL32(const SkPixmap& source, const SkPaint& paint,
+ SkArenaAlloc* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getColorFilter() != nullptr) {
+ return nullptr;
+ }
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ U8CPU alpha = paint.getAlpha();
+
+ if (source.colorType() == kN32_SkColorType) {
+ if (paint.isSrcOver()) {
+ // this can handle alpha, but not xfermode
+ return allocator->make<Sprite_D32_S32>(source, alpha);
+ }
+ if (255 == alpha) {
+ // this can handle an xfermode, but not alpha
+ return allocator->make<Sprite_D32_S32A_Xfer>(source, paint);
+ }
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter_RGB565.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter_RGB565.cpp
new file mode 100644
index 0000000000..d0d5a99c97
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter_RGB565.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkSpriteBlitter.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/utils/SkUTF.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void S32_src(uint16_t dst[], const SkPMColor src[], int count) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkPixel32ToPixel16(src[i]);
+ }
+}
+
+static void S32_srcover(uint16_t dst[], const SkPMColor src[], int count) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkSrcOver32To16(src[i], dst[i]);
+ }
+}
+
+class Sprite_D16_S32 : public SkSpriteBlitter {
+public:
+ Sprite_D16_S32(const SkPixmap& src, SkBlendMode mode) : INHERITED(src) {
+ SkASSERT(src.colorType() == kN32_SkColorType);
+ SkASSERT(mode == SkBlendMode::kSrc || mode == SkBlendMode::kSrcOver);
+
+ fUseSrcOver = (mode == SkBlendMode::kSrcOver) && !src.isOpaque();
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint16_t* SK_RESTRICT dst = fDst.writable_addr16(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ do {
+ if (fUseSrcOver) {
+ S32_srcover(dst, src, width);
+ } else {
+ S32_src(dst, src, width);
+ }
+
+ dst = (uint16_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ bool fUseSrcOver;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseL565(const SkPixmap& source, const SkPaint& paint,
+ SkArenaAlloc* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getColorFilter() != nullptr) {
+ return nullptr;
+ }
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ U8CPU alpha = paint.getAlpha();
+ if (alpha != 0xFF) {
+ return nullptr;
+ }
+
+ if (source.colorType() == kN32_SkColorType) {
+ switch (paint.getBlendMode()) {
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcOver:
+ return allocator->make<Sprite_D16_S32>(source, paint.getBlendMode());
+ default:
+ break;
+ }
+ }
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+static unsigned div255(unsigned a, unsigned b) {
+ return (a * b * 257 + 127) >> 16;
+}
+
+static void S32_src_da8(uint8_t dst[], const SkPMColor src[], int count) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = SkGetPackedA32(src[i]);
+ }
+}
+
+static void S32_srcover_da8(uint8_t dst[], const SkPMColor src[], int count) {
+ for (int i = 0; i < count; ++i) {
+ SkPMColor c = src[i];
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+ if (a == 0xFF) {
+ dst[i] = 0xFF;
+ } else {
+ dst[i] = a + div255(255 - a, dst[i]);
+ }
+ }
+ }
+}
+
+class Sprite_D8_S32 : public SkSpriteBlitter {
+public:
+ Sprite_D8_S32(const SkPixmap& src, SkBlendMode mode) : INHERITED(src) {
+ SkASSERT(src.colorType() == kN32_SkColorType);
+ SkASSERT(mode == SkBlendMode::kSrc || mode == SkBlendMode::kSrcOver);
+
+ fUseSrcOver = (mode == SkBlendMode::kSrcOver) && !src.isOpaque();
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint8_t* SK_RESTRICT dst = fDst.writable_addr8(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ do {
+ if (fUseSrcOver) {
+ S32_srcover_da8(dst, src, width);
+ } else {
+ S32_src_da8(dst, src, width);
+ }
+
+ dst = (uint8_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ bool fUseSrcOver;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseLA8(const SkPixmap& source, const SkPaint& paint,
+ SkArenaAlloc* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getColorFilter() != nullptr) {
+ return nullptr;
+ }
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ U8CPU alpha = paint.getAlpha();
+ if (alpha != 0xFF) {
+ return nullptr;
+ }
+
+ if (source.colorType() == kN32_SkColorType) {
+ switch (paint.getBlendMode()) {
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcOver:
+ return allocator->make<Sprite_D8_S32>(source, paint.getBlendMode());
+ default:
+ break;
+ }
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkStream.cpp b/gfx/skia/skia/src/core/SkStream.cpp
new file mode 100644
index 0000000000..2bdd47feb4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStream.cpp
@@ -0,0 +1,948 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkTFitsIn.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkStreamPriv.h"
+
+#include <limits>
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkStream::readS8(int8_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readS16(int16_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readS32(int32_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readScalar(SkScalar* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+#define SK_MAX_BYTE_FOR_U8 0xFD
+#define SK_BYTE_SENTINEL_FOR_U16 0xFE
+#define SK_BYTE_SENTINEL_FOR_U32 0xFF
+
+bool SkStream::readPackedUInt(size_t* i) {
+ uint8_t byte;
+ if (!this->read(&byte, 1)) {
+ return false;
+ }
+ if (SK_BYTE_SENTINEL_FOR_U16 == byte) {
+ uint16_t i16;
+ if (!this->readU16(&i16)) { return false; }
+ *i = i16;
+ } else if (SK_BYTE_SENTINEL_FOR_U32 == byte) {
+ uint32_t i32;
+ if (!this->readU32(&i32)) { return false; }
+ *i = i32;
+ } else {
+ *i = byte;
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkWStream::~SkWStream()
+{
+}
+
+void SkWStream::flush()
+{
+}
+
+bool SkWStream::writeDecAsText(int32_t dec)
+{
+ char buffer[SkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeBigDecAsText(int64_t dec, int minDigits)
+{
+ char buffer[SkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeHexAsText(uint32_t hex, int digits)
+{
+ SkString tmp;
+ tmp.appendHex(hex, digits);
+ return this->write(tmp.c_str(), tmp.size());
+}
+
+bool SkWStream::writeScalarAsText(SkScalar value)
+{
+ char buffer[SkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeScalar(SkScalar value) {
+ return this->write(&value, sizeof(value));
+}
+
+int SkWStream::SizeOfPackedUInt(size_t value) {
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ return 1;
+ } else if (value <= 0xFFFF) {
+ return 3;
+ }
+ return 5;
+}
+
+bool SkWStream::writePackedUInt(size_t value) {
+ uint8_t data[5];
+ size_t len = 1;
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ data[0] = value;
+ len = 1;
+ } else if (value <= 0xFFFF) {
+ uint16_t value16 = value;
+ data[0] = SK_BYTE_SENTINEL_FOR_U16;
+ memcpy(&data[1], &value16, 2);
+ len = 3;
+ } else {
+ uint32_t value32 = SkToU32(value);
+ data[0] = SK_BYTE_SENTINEL_FOR_U32;
+ memcpy(&data[1], &value32, 4);
+ len = 5;
+ }
+ return this->write(data, len);
+}
+
+bool SkWStream::writeStream(SkStream* stream, size_t length) {
+ char scratch[1024];
+ const size_t MAX = sizeof(scratch);
+
+ while (length != 0) {
+ size_t n = length;
+ if (n > MAX) {
+ n = MAX;
+ }
+ stream->read(scratch, n);
+ if (!this->write(scratch, n)) {
+ return false;
+ }
+ length -= n;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFILEStream::SkFILEStream(std::shared_ptr<FILE> file, size_t size,
+ size_t offset, size_t originalOffset)
+ : fFILE(std::move(file))
+ , fSize(size)
+ , fOffset(SkTMin(offset, fSize))
+ , fOriginalOffset(SkTMin(originalOffset, fSize))
+{ }
+
+SkFILEStream::SkFILEStream(std::shared_ptr<FILE> file, size_t size, size_t offset)
+ : SkFILEStream(std::move(file), size, offset, offset)
+{ }
+
+SkFILEStream::SkFILEStream(FILE* file)
+ : SkFILEStream(std::shared_ptr<FILE>(file, sk_fclose),
+ file ? sk_fgetsize(file) : 0,
+ file ? sk_ftell(file) : 0)
+{ }
+
+
+SkFILEStream::SkFILEStream(const char path[])
+ : SkFILEStream(path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr)
+{ }
+
+SkFILEStream::~SkFILEStream() {
+ this->close();
+}
+
+void SkFILEStream::close() {
+ fFILE.reset();
+ fSize = 0;
+ fOffset = 0;
+}
+
+size_t SkFILEStream::read(void* buffer, size_t size) {
+ if (size > fSize - fOffset) {
+ size = fSize - fOffset;
+ }
+ size_t bytesRead = size;
+ if (buffer) {
+ bytesRead = sk_qread(fFILE.get(), buffer, size, fOffset);
+ }
+ if (bytesRead == SIZE_MAX) {
+ return 0;
+ }
+ fOffset += bytesRead;
+ return bytesRead;
+}
+
+bool SkFILEStream::isAtEnd() const {
+ if (fOffset == fSize) {
+ return true;
+ }
+ return fOffset >= sk_fgetsize(fFILE.get());
+}
+
+bool SkFILEStream::rewind() {
+ fOffset = fOriginalOffset;
+ return true;
+}
+
+SkStreamAsset* SkFILEStream::onDuplicate() const {
+ return new SkFILEStream(fFILE, fSize, fOriginalOffset, fOriginalOffset);
+}
+
+size_t SkFILEStream::getPosition() const {
+ SkASSERT(fOffset >= fOriginalOffset);
+ return fOffset - fOriginalOffset;
+}
+
+bool SkFILEStream::seek(size_t position) {
+ fOffset = SkTMin(SkSafeMath::Add(position, fOriginalOffset), fSize);
+ return true;
+}
+
+bool SkFILEStream::move(long offset) {
+ if (offset < 0) {
+ if (offset == std::numeric_limits<long>::min()
+ || !SkTFitsIn<size_t>(-offset)
+ || (size_t) (-offset) >= this->getPosition()) {
+ fOffset = fOriginalOffset;
+ } else {
+ fOffset += offset;
+ }
+ } else if (!SkTFitsIn<size_t>(offset)) {
+ fOffset = fSize;
+ } else {
+ fOffset = SkTMin(SkSafeMath::Add(fOffset, (size_t) offset), fSize);
+ }
+
+ SkASSERT(fOffset >= fOriginalOffset && fOffset <= fSize);
+ return true;
+}
+
+SkStreamAsset* SkFILEStream::onFork() const {
+ return new SkFILEStream(fFILE, fSize, fOffset, fOriginalOffset);
+}
+
+size_t SkFILEStream::getLength() const {
+ return fSize - fOriginalOffset;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkData> newFromParams(const void* src, size_t size, bool copyData) {
+ if (copyData) {
+ return SkData::MakeWithCopy(src, size);
+ } else {
+ return SkData::MakeWithoutCopy(src, size);
+ }
+}
+
+SkMemoryStream::SkMemoryStream() {
+ fData = SkData::MakeEmpty();
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(size_t size) {
+ fData = SkData::MakeUninitialized(size);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(sk_sp<SkData> data) : fData(std::move(data)) {
+ if (nullptr == fData) {
+ fData = SkData::MakeEmpty();
+ }
+ fOffset = 0;
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::MakeCopy(const void* data, size_t length) {
+ return skstd::make_unique<SkMemoryStream>(data, length, true);
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::MakeDirect(const void* data, size_t length) {
+ return skstd::make_unique<SkMemoryStream>(data, length, false);
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::Make(sk_sp<SkData> data) {
+ return skstd::make_unique<SkMemoryStream>(std::move(data));
+}
+
+void SkMemoryStream::setMemoryOwned(const void* src, size_t size) {
+ fData = SkData::MakeFromMalloc(src, size);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setMemory(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setData(sk_sp<SkData> data) {
+ if (nullptr == data) {
+ fData = SkData::MakeEmpty();
+ } else {
+ fData = data;
+ }
+ fOffset = 0;
+}
+
+void SkMemoryStream::skipToAlign4() {
+ // cast to remove unary-minus warning
+ fOffset += -(int)fOffset & 0x03;
+}
+
+size_t SkMemoryStream::read(void* buffer, size_t size) {
+ size_t dataSize = fData->size();
+
+ if (size > dataSize - fOffset) {
+ size = dataSize - fOffset;
+ }
+ if (buffer) {
+ memcpy(buffer, fData->bytes() + fOffset, size);
+ }
+ fOffset += size;
+ return size;
+}
+
+size_t SkMemoryStream::peek(void* buffer, size_t size) const {
+ SkASSERT(buffer != nullptr);
+
+ const size_t currentOffset = fOffset;
+ SkMemoryStream* nonConstThis = const_cast<SkMemoryStream*>(this);
+ const size_t bytesRead = nonConstThis->read(buffer, size);
+ nonConstThis->fOffset = currentOffset;
+ return bytesRead;
+}
+
+bool SkMemoryStream::isAtEnd() const {
+ return fOffset == fData->size();
+}
+
+bool SkMemoryStream::rewind() {
+ fOffset = 0;
+ return true;
+}
+
+SkMemoryStream* SkMemoryStream::onDuplicate() const {
+ return new SkMemoryStream(fData);
+}
+
+size_t SkMemoryStream::getPosition() const {
+ return fOffset;
+}
+
+bool SkMemoryStream::seek(size_t position) {
+ fOffset = position > fData->size()
+ ? fData->size()
+ : position;
+ return true;
+}
+
+bool SkMemoryStream::move(long offset) {
+ return this->seek(fOffset + offset);
+}
+
+SkMemoryStream* SkMemoryStream::onFork() const {
+ std::unique_ptr<SkMemoryStream> that(this->duplicate());
+ that->seek(fOffset);
+ return that.release();
+}
+
+size_t SkMemoryStream::getLength() const {
+ return fData->size();
+}
+
+const void* SkMemoryStream::getMemoryBase() {
+ return fData->data();
+}
+
+const void* SkMemoryStream::getAtPos() {
+ return fData->bytes() + fOffset;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkFILEWStream::SkFILEWStream(const char path[])
+{
+ fFILE = sk_fopen(path, kWrite_SkFILE_Flag);
+}
+
+SkFILEWStream::~SkFILEWStream()
+{
+ if (fFILE) {
+ sk_fclose(fFILE);
+ }
+}
+
+size_t SkFILEWStream::bytesWritten() const {
+ return sk_ftell(fFILE);
+}
+
+bool SkFILEWStream::write(const void* buffer, size_t size)
+{
+ if (fFILE == nullptr) {
+ return false;
+ }
+
+ if (sk_fwrite(buffer, size, fFILE) != size)
+ {
+ SkDEBUGCODE(SkDebugf("SkFILEWStream failed writing %d bytes\n", size);)
+ sk_fclose(fFILE);
+ fFILE = nullptr;
+ return false;
+ }
+ return true;
+}
+
+void SkFILEWStream::flush()
+{
+ if (fFILE) {
+ sk_fflush(fFILE);
+ }
+}
+
+void SkFILEWStream::fsync()
+{
+ flush();
+ if (fFILE) {
+ sk_fsync(fFILE);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+
+static inline void sk_memcpy_4bytes(void* dst, const void* src, size_t size) {
+ if (size == 4) {
+ memcpy(dst, src, 4);
+ } else {
+ memcpy(dst, src, size);
+ }
+}
+
+#define SkDynamicMemoryWStream_MinBlockSize 4096
+
+struct SkDynamicMemoryWStream::Block {
+ Block* fNext;
+ char* fCurr;
+ char* fStop;
+
+ const char* start() const { return (const char*)(this + 1); }
+ char* start() { return (char*)(this + 1); }
+ size_t avail() const { return fStop - fCurr; }
+ size_t written() const { return fCurr - this->start(); }
+
+ void init(size_t size) {
+ fNext = nullptr;
+ fCurr = this->start();
+ fStop = this->start() + size;
+ }
+
+ const void* append(const void* data, size_t size) {
+ SkASSERT((size_t)(fStop - fCurr) >= size);
+ sk_memcpy_4bytes(fCurr, data, size);
+ fCurr += size;
+ return (const void*)((const char*)data + size);
+ }
+};
+
+SkDynamicMemoryWStream::SkDynamicMemoryWStream(SkDynamicMemoryWStream&& other)
+ : fHead(other.fHead)
+ , fTail(other.fTail)
+ , fBytesWrittenBeforeTail(other.fBytesWrittenBeforeTail)
+{
+ other.fHead = nullptr;
+ other.fTail = nullptr;
+ other.fBytesWrittenBeforeTail = 0;
+}
+
+SkDynamicMemoryWStream& SkDynamicMemoryWStream::operator=(SkDynamicMemoryWStream&& other) {
+ if (this != &other) {
+ this->~SkDynamicMemoryWStream();
+ new (this) SkDynamicMemoryWStream(std::move(other));
+ }
+ return *this;
+}
+
+SkDynamicMemoryWStream::~SkDynamicMemoryWStream() {
+ this->reset();
+}
+
+void SkDynamicMemoryWStream::reset() {
+ Block* block = fHead;
+ while (block != nullptr) {
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+}
+
+size_t SkDynamicMemoryWStream::bytesWritten() const {
+ this->validate();
+
+ if (fTail) {
+ return fBytesWrittenBeforeTail + fTail->written();
+ }
+ return 0;
+}
+
+bool SkDynamicMemoryWStream::write(const void* buffer, size_t count) {
+ if (count > 0) {
+ SkASSERT(buffer);
+ size_t size;
+
+ if (fTail) {
+ if (fTail->avail() > 0) {
+ size = SkTMin(fTail->avail(), count);
+ buffer = fTail->append(buffer, size);
+ SkASSERT(count >= size);
+ count -= size;
+ if (count == 0) {
+ return true;
+ }
+ }
+ // If we get here, we've just exhausted fTail, so update our tracker
+ fBytesWrittenBeforeTail += fTail->written();
+ }
+
+ size = SkTMax<size_t>(count, SkDynamicMemoryWStream_MinBlockSize - sizeof(Block));
+ size = SkAlign4(size); // ensure we're always a multiple of 4 (see padToAlign4())
+
+ Block* block = (Block*)sk_malloc_throw(sizeof(Block) + size);
+ block->init(size);
+ block->append(buffer, count);
+
+ if (fTail != nullptr) {
+ fTail->fNext = block;
+ } else {
+ fHead = fTail = block;
+ }
+ fTail = block;
+ this->validate();
+ }
+ return true;
+}
+
+bool SkDynamicMemoryWStream::writeToAndReset(SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(dst != this);
+ if (0 == this->bytesWritten()) {
+ return true;
+ }
+ if (0 == dst->bytesWritten()) {
+ *dst = std::move(*this);
+ return true;
+ }
+ dst->fTail->fNext = fHead;
+ dst->fBytesWrittenBeforeTail += fBytesWrittenBeforeTail + dst->fTail->written();
+ dst->fTail = fTail;
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return true;
+}
+
+void SkDynamicMemoryWStream::prependToAndReset(SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(dst != this);
+ if (0 == this->bytesWritten()) {
+ return;
+ }
+ if (0 == dst->bytesWritten()) {
+ *dst = std::move(*this);
+ return;
+ }
+ fTail->fNext = dst->fHead;
+ dst->fHead = fHead;
+ dst->fBytesWrittenBeforeTail += fBytesWrittenBeforeTail + fTail->written();
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return;
+}
+
+
+bool SkDynamicMemoryWStream::read(void* buffer, size_t offset, size_t count) {
+ if (offset + count > this->bytesWritten()) {
+ return false; // test does not partially modify
+ }
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ if (offset < size) {
+ size_t part = offset + count > size ? size - offset : count;
+ memcpy(buffer, block->start() + offset, part);
+ if (count <= part) {
+ return true;
+ }
+ count -= part;
+ buffer = (void*) ((char* ) buffer + part);
+ }
+ offset = offset > size ? offset - size : 0;
+ block = block->fNext;
+ }
+ return false;
+}
+
+void SkDynamicMemoryWStream::copyTo(void* dst) const {
+ SkASSERT(dst);
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ memcpy(dst, block->start(), size);
+ dst = (void*)((char*)dst + size);
+ block = block->fNext;
+ }
+}
+
+bool SkDynamicMemoryWStream::writeToStream(SkWStream* dst) const {
+ SkASSERT(dst);
+ for (Block* block = fHead; block != nullptr; block = block->fNext) {
+ if (!dst->write(block->start(), block->written())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkDynamicMemoryWStream::padToAlign4() {
+ // The contract is to write zeros until the entire stream has written a multiple of 4 bytes.
+ // Our Blocks are guaranteed always be (a) full (except the tail) and (b) a multiple of 4
+ // so it is sufficient to just examine the tail (if present).
+
+ if (fTail) {
+ // cast to remove unary-minus warning
+ int padBytes = -(int)fTail->written() & 0x03;
+ if (padBytes) {
+ int zero = 0;
+ fTail->append(&zero, padBytes);
+ }
+ }
+}
+
+
+void SkDynamicMemoryWStream::copyToAndReset(void* ptr) {
+ if (!ptr) {
+ this->reset();
+ return;
+ }
+ // By looping through the source and freeing as we copy, we
+ // can reduce real memory use with large streams.
+ char* dst = reinterpret_cast<char*>(ptr);
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t len = block->written();
+ memcpy(dst, block->start(), len);
+ dst += len;
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+}
+
+bool SkDynamicMemoryWStream::writeToAndReset(SkWStream* dst) {
+ SkASSERT(dst);
+ // By looping through the source and freeing as we copy, we
+ // can reduce real memory use with large streams.
+ bool dstStreamGood = true;
+ for (Block* block = fHead; block != nullptr; ) {
+ if (dstStreamGood && !dst->write(block->start(), block->written())) {
+ dstStreamGood = false;
+ }
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return dstStreamGood;
+}
+
+sk_sp<SkData> SkDynamicMemoryWStream::detachAsData() {
+ const size_t size = this->bytesWritten();
+ if (0 == size) {
+ return SkData::MakeEmpty();
+ }
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ this->copyToAndReset(data->writable_data());
+ return data;
+}
+
+#ifdef SK_DEBUG
+void SkDynamicMemoryWStream::validate() const {
+ if (!fHead) {
+ SkASSERT(!fTail);
+ SkASSERT(fBytesWrittenBeforeTail == 0);
+ return;
+ }
+ SkASSERT(fTail);
+
+ size_t bytes = 0;
+ const Block* block = fHead;
+ while (block) {
+ if (block->fNext) {
+ bytes += block->written();
+ }
+ block = block->fNext;
+ }
+ SkASSERT(bytes == fBytesWrittenBeforeTail);
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkBlockMemoryRefCnt : public SkRefCnt {
+public:
+ explicit SkBlockMemoryRefCnt(SkDynamicMemoryWStream::Block* head) : fHead(head) { }
+
+ virtual ~SkBlockMemoryRefCnt() {
+ SkDynamicMemoryWStream::Block* block = fHead;
+ while (block != nullptr) {
+ SkDynamicMemoryWStream::Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+
+ SkDynamicMemoryWStream::Block* const fHead;
+};
+
+class SkBlockMemoryStream : public SkStreamAsset {
+public:
+ SkBlockMemoryStream(sk_sp<SkBlockMemoryRefCnt> headRef, size_t size)
+ : fBlockMemory(std::move(headRef)), fCurrent(fBlockMemory->fHead)
+ , fSize(size) , fOffset(0), fCurrentOffset(0) { }
+
+ size_t read(void* buffer, size_t rawCount) override {
+ size_t count = rawCount;
+ if (fOffset + count > fSize) {
+ count = fSize - fOffset;
+ }
+ size_t bytesLeftToRead = count;
+ while (fCurrent != nullptr) {
+ size_t bytesLeftInCurrent = fCurrent->written() - fCurrentOffset;
+ size_t bytesFromCurrent = SkTMin(bytesLeftToRead, bytesLeftInCurrent);
+ if (buffer) {
+ memcpy(buffer, fCurrent->start() + fCurrentOffset, bytesFromCurrent);
+ buffer = SkTAddOffset<void>(buffer, bytesFromCurrent);
+ }
+ if (bytesLeftToRead <= bytesFromCurrent) {
+ fCurrentOffset += bytesFromCurrent;
+ fOffset += count;
+ return count;
+ }
+ bytesLeftToRead -= bytesFromCurrent;
+ fCurrent = fCurrent->fNext;
+ fCurrentOffset = 0;
+ }
+ SkASSERT(false);
+ return 0;
+ }
+
+ bool isAtEnd() const override {
+ return fOffset == fSize;
+ }
+
+ size_t peek(void* buff, size_t bytesToPeek) const override {
+ SkASSERT(buff != nullptr);
+
+ bytesToPeek = SkTMin(bytesToPeek, fSize - fOffset);
+
+ size_t bytesLeftToPeek = bytesToPeek;
+ char* buffer = static_cast<char*>(buff);
+ const SkDynamicMemoryWStream::Block* current = fCurrent;
+ size_t currentOffset = fCurrentOffset;
+ while (bytesLeftToPeek) {
+ SkASSERT(current);
+ size_t bytesFromCurrent = SkTMin(current->written() - currentOffset, bytesLeftToPeek);
+ memcpy(buffer, current->start() + currentOffset, bytesFromCurrent);
+ bytesLeftToPeek -= bytesFromCurrent;
+ buffer += bytesFromCurrent;
+ current = current->fNext;
+ currentOffset = 0;
+ }
+ return bytesToPeek;
+ }
+
+ bool rewind() override {
+ fCurrent = fBlockMemory->fHead;
+ fOffset = 0;
+ fCurrentOffset = 0;
+ return true;
+ }
+
+ SkBlockMemoryStream* onDuplicate() const override {
+ return new SkBlockMemoryStream(fBlockMemory, fSize);
+ }
+
+ size_t getPosition() const override {
+ return fOffset;
+ }
+
+ bool seek(size_t position) override {
+ // If possible, skip forward.
+ if (position >= fOffset) {
+ size_t skipAmount = position - fOffset;
+ return this->skip(skipAmount) == skipAmount;
+ }
+ // If possible, move backward within the current block.
+ size_t moveBackAmount = fOffset - position;
+ if (moveBackAmount <= fCurrentOffset) {
+ fCurrentOffset -= moveBackAmount;
+ fOffset -= moveBackAmount;
+ return true;
+ }
+ // Otherwise rewind and move forward.
+ return this->rewind() && this->skip(position) == position;
+ }
+
+ bool move(long offset) override {
+ return seek(fOffset + offset);
+ }
+
+ SkBlockMemoryStream* onFork() const override {
+ SkBlockMemoryStream* that = this->onDuplicate();
+ that->fCurrent = this->fCurrent;
+ that->fOffset = this->fOffset;
+ that->fCurrentOffset = this->fCurrentOffset;
+ return that;
+ }
+
+ size_t getLength() const override {
+ return fSize;
+ }
+
+ const void* getMemoryBase() override {
+ if (fBlockMemory->fHead && !fBlockMemory->fHead->fNext) {
+ return fBlockMemory->fHead->start();
+ }
+ return nullptr;
+ }
+
+private:
+ sk_sp<SkBlockMemoryRefCnt> const fBlockMemory;
+ SkDynamicMemoryWStream::Block const * fCurrent;
+ size_t const fSize;
+ size_t fOffset;
+ size_t fCurrentOffset;
+};
+
+std::unique_ptr<SkStreamAsset> SkDynamicMemoryWStream::detachAsStream() {
+ if (nullptr == fHead) {
+ // no need to reset.
+ return SkMemoryStream::Make(nullptr);
+ }
+ if (fHead == fTail) { // one block, may be worth shrinking.
+ ptrdiff_t used = fTail->fCurr - (char*)fTail;
+ fHead = fTail = (SkDynamicMemoryWStream::Block*)sk_realloc_throw(fTail, SkToSizeT(used));
+ fTail->fStop = fTail->fCurr = (char*)fTail + used; // Update pointers.
+ SkASSERT(nullptr == fTail->fNext);
+ SkASSERT(0 == fBytesWrittenBeforeTail);
+ }
+ std::unique_ptr<SkStreamAsset> stream
+ = skstd::make_unique<SkBlockMemoryStream>(sk_make_sp<SkBlockMemoryRefCnt>(fHead),
+ this->bytesWritten());
+ fHead = nullptr; // signal reset() to not free anything
+ this->reset();
+ return stream;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkData> mmap_filename(const char path[]) {
+ FILE* file = sk_fopen(path, kRead_SkFILE_Flag);
+ if (nullptr == file) {
+ return nullptr;
+ }
+
+ auto data = SkData::MakeFromFILE(file);
+ sk_fclose(file);
+ return data;
+}
+
+std::unique_ptr<SkStreamAsset> SkStream::MakeFromFile(const char path[]) {
+ auto data(mmap_filename(path));
+ if (data) {
+ return skstd::make_unique<SkMemoryStream>(std::move(data));
+ }
+
+ // If we get here, then our attempt at using mmap failed, so try normal file access.
+ auto stream = skstd::make_unique<SkFILEStream>(path);
+ if (!stream->isValid()) {
+ return nullptr;
+ }
+ return stream;
+}
+
+// Declared in SkStreamPriv.h:
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream) {
+ SkASSERT(stream != nullptr);
+
+ if (stream->hasLength()) {
+ return SkData::MakeFromStream(stream, stream->getLength());
+ }
+
+ SkDynamicMemoryWStream tempStream;
+ const size_t bufferSize = 4096;
+ char buffer[bufferSize];
+ do {
+ size_t bytesRead = stream->read(buffer, bufferSize);
+ tempStream.write(buffer, bytesRead);
+ } while (!stream->isAtEnd());
+ return tempStream.detachAsData();
+}
+
+bool SkStreamCopy(SkWStream* out, SkStream* input) {
+ const char* base = static_cast<const char*>(input->getMemoryBase());
+ if (base && input->hasPosition() && input->hasLength()) {
+ // Shortcut that avoids the while loop.
+ size_t position = input->getPosition();
+ size_t length = input->getLength();
+ SkASSERT(length >= position);
+ return out->write(&base[position], length - position);
+ }
+ char scratch[4096];
+ size_t count;
+ while (true) {
+ count = input->read(scratch, sizeof(scratch));
+ if (0 == count) {
+ return true;
+ }
+ if (!out->write(scratch, count)) {
+ return false;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStreamPriv.h b/gfx/skia/skia/src/core/SkStreamPriv.h
new file mode 100644
index 0000000000..d9b6cc6b4f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStreamPriv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStreamPriv_DEFINED
+#define SkStreamPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkData;
+class SkStream;
+class SkWStream;
+
+/**
+ * Copy the provided stream to an SkData variable.
+ *
+ * Note: Assumes the stream is at the beginning. If it has a length,
+ * but is not at the beginning, this call will fail (return NULL).
+ *
+ * @param stream SkStream to be copied into data.
+ * @return The resulting SkData after the copy, nullptr on failure.
+ */
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream);
+
+/**
+ * Copies the input stream from the current position to the end.
+ * Does not rewind the input stream.
+ */
+bool SkStreamCopy(SkWStream* out, SkStream* input);
+
+#endif // SkStreamPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrike.cpp b/gfx/skia/skia/src/core/SkStrike.cpp
new file mode 100644
index 0000000000..1c444c8d04
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrike.cpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrike.h"
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkMakeUnique.h"
+#include <cctype>
+
+SkStrike::SkStrike(
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ const SkFontMetrics& fontMetrics)
+ : fDesc{desc}
+ , fScalerContext{std::move(scaler)}
+ , fFontMetrics{fontMetrics}
+ , fRoundingSpec{fScalerContext->isSubpixel(),
+ fScalerContext->computeAxisAlignmentForHText()} {
+ SkASSERT(fScalerContext != nullptr);
+ fMemoryUsed = sizeof(*this);
+}
+
+#ifdef SK_DEBUG
+#define VALIDATE() AutoValidate av(this)
+#else
+#define VALIDATE()
+#endif
+
+// -- glyph creation -------------------------------------------------------------------------------
+SkGlyph* SkStrike::makeGlyph(SkPackedGlyphID packedGlyphID) {
+ fMemoryUsed += sizeof(SkGlyph);
+ SkGlyph* glyph = fAlloc.make<SkGlyph>(packedGlyphID);
+ fGlyphMap.set(glyph);
+ return glyph;
+}
+
+SkGlyph* SkStrike::glyph(SkPackedGlyphID packedGlyphID) {
+ VALIDATE();
+ SkGlyph* glyph = fGlyphMap.findOrNull(packedGlyphID);
+ if (glyph == nullptr) {
+ glyph = this->makeGlyph(packedGlyphID);
+ fScalerContext->getMetrics(glyph);
+ }
+ return glyph;
+}
+
+SkGlyph* SkStrike::glyph(SkGlyphID glyphID) {
+ return this->glyph(SkPackedGlyphID{glyphID});
+}
+
+SkGlyph* SkStrike::glyph(SkGlyphID glyphID, SkPoint position) {
+ SkIPoint mask = fRoundingSpec.ignorePositionMask;
+ SkFixed subX = SkScalarToFixed(position.x()) & mask.x(),
+ subY = SkScalarToFixed(position.y()) & mask.y();
+ return this->glyph(SkPackedGlyphID{glyphID, subX, subY});
+}
+
+SkGlyph* SkStrike::glyphFromPrototype(const SkGlyphPrototype& p, void* image) {
+ SkGlyph* glyph = fGlyphMap.findOrNull(p.id);
+ if (glyph == nullptr) {
+ fMemoryUsed += sizeof(SkGlyph);
+ glyph = fAlloc.make<SkGlyph>(p);
+ fGlyphMap.set(glyph);
+ }
+ if (glyph->setImage(&fAlloc, image)) {
+ fMemoryUsed += glyph->imageSize();
+ }
+ return glyph;
+}
+
+SkGlyph* SkStrike::glyphOrNull(SkPackedGlyphID id) const {
+ return fGlyphMap.findOrNull(id);
+}
+
+const SkPath* SkStrike::preparePath(SkGlyph* glyph) {
+ if (glyph->setPath(&fAlloc, fScalerContext.get())) {
+ fMemoryUsed += glyph->path()->approximateBytesUsed();
+ }
+ return glyph->path();
+}
+
+const SkPath* SkStrike::preparePath(SkGlyph* glyph, const SkPath* path) {
+ if (glyph->setPath(&fAlloc, path)) {
+ fMemoryUsed += glyph->path()->approximateBytesUsed();
+ }
+ return glyph->path();
+}
+
+const SkDescriptor& SkStrike::getDescriptor() const {
+ return *fDesc.getDesc();
+}
+
+unsigned SkStrike::getGlyphCount() const {
+ return fScalerContext->getGlyphCount();
+}
+
+int SkStrike::countCachedGlyphs() const {
+ return fGlyphMap.count();
+}
+
+SkSpan<const SkGlyph*> SkStrike::internalPrepare(
+ SkSpan<const SkGlyphID> glyphIDs, PathDetail pathDetail, const SkGlyph** results) {
+ const SkGlyph** cursor = results;
+ for (auto glyphID : glyphIDs) {
+ SkGlyph* glyphPtr = this->glyph(glyphID);
+ if (pathDetail == kMetricsAndPath) {
+ this->preparePath(glyphPtr);
+ }
+ *cursor++ = glyphPtr;
+ }
+
+ return {results, glyphIDs.size()};
+}
+
+const void* SkStrike::prepareImage(SkGlyph* glyph) {
+ if (glyph->setImage(&fAlloc, fScalerContext.get())) {
+ fMemoryUsed += glyph->imageSize();
+ }
+ return glyph->image();
+}
+
+SkGlyph* SkStrike::mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& from) {
+ SkGlyph* glyph = fGlyphMap.findOrNull(toID);
+ if (glyph == nullptr) {
+ glyph = this->makeGlyph(toID);
+ }
+ if (glyph->setMetricsAndImage(&fAlloc, from)) {
+ fMemoryUsed += glyph->imageSize();
+ }
+ return glyph;
+}
+
+bool SkStrike::belongsToCache(const SkGlyph* glyph) const {
+ return glyph && fGlyphMap.findOrNull(glyph->getPackedID()) == glyph;
+}
+
+const SkGlyph* SkStrike::getCachedGlyphAnySubPix(SkGlyphID glyphID,
+ SkPackedGlyphID vetoID) const {
+ for (SkFixed subY = 0; subY < SK_Fixed1; subY += SK_FixedQuarter) {
+ for (SkFixed subX = 0; subX < SK_Fixed1; subX += SK_FixedQuarter) {
+ SkPackedGlyphID packedGlyphID{glyphID, subX, subY};
+ if (packedGlyphID == vetoID) continue;
+ if (SkGlyph* glyphPtr = fGlyphMap.findOrNull(packedGlyphID)) {
+ return glyphPtr;
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+SkSpan<const SkGlyph*> SkStrike::metrics(SkSpan<const SkGlyphID> glyphIDs,
+ const SkGlyph* results[]) {
+ return this->internalPrepare(glyphIDs, kMetricsOnly, results);
+}
+
+SkSpan<const SkGlyph*> SkStrike::preparePaths(SkSpan<const SkGlyphID> glyphIDs,
+ const SkGlyph* results[]) {
+ return this->internalPrepare(glyphIDs, kMetricsAndPath, results);
+}
+
+SkSpan<const SkGlyph*>
+SkStrike::prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs, const SkGlyph* results[]) {
+ const SkGlyph** cursor = results;
+ for (auto glyphID : glyphIDs) {
+ SkGlyph* glyphPtr = this->glyph(glyphID);
+ (void)this->prepareImage(glyphPtr);
+ *cursor++ = glyphPtr;
+ }
+
+ return {results, glyphIDs.size()};
+}
+
+void SkStrike::prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) {
+ for (auto t : SkMakeEnumerate(drawables->input())) {
+ size_t i; SkGlyphVariant packedID;
+ std::forward_as_tuple(i, std::tie(packedID, std::ignore)) = t;
+ SkGlyph* glyph = this->glyph(packedID);
+ if (!glyph->isEmpty()) {
+ const void* image = this->prepareImage(glyph);
+ // If the glyph is too large, then no image is created.
+ if (image != nullptr) {
+ drawables->push_back(glyph, i);
+ }
+ }
+ }
+}
+
+void SkStrike::prepareForDrawingPathsCPU(SkDrawableGlyphBuffer* drawables) {
+ for (auto t : SkMakeEnumerate(drawables->input())) {
+ size_t i; SkGlyphVariant packedID;
+ std::forward_as_tuple(i, std::tie(packedID, std::ignore)) = t;
+ SkGlyph* glyph = this->glyph(packedID);
+ if (!glyph->isEmpty()) {
+ const SkPath* path = this->preparePath(glyph);
+ // The glyph my not have a path.
+ if (path != nullptr) {
+ drawables->push_back(path, i);
+ }
+ }
+ }
+}
+
+// N.B. This glyphMetrics call culls all the glyphs which will not display based on a non-finite
+// position or that there are no mask pixels.
+SkSpan<const SkGlyphPos>
+SkStrike::prepareForDrawingRemoveEmpty(const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[],
+ size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) {
+ size_t drawableGlyphCount = 0;
+ for (size_t i = 0; i < n; i++) {
+ SkPoint pos = positions[i];
+ if (SkScalarsAreFinite(pos.x(), pos.y())) {
+ SkGlyph* glyphPtr = this->glyph(packedGlyphIDs[i]);
+ if (!glyphPtr->isEmpty()) {
+ results[drawableGlyphCount++] = {i, glyphPtr, pos};
+ if (glyphPtr->maxDimension() <= maxDimension) {
+ // The glyph fits. Prepare image later.
+ } else if (!glyphPtr->isColor()) {
+ // The out of atlas glyph is not color so we can draw it using paths.
+ this->preparePath(glyphPtr);
+ } else {
+ // This will be handled by the fallback strike.
+ SkASSERT(glyphPtr->maxDimension() > maxDimension && glyphPtr->isColor());
+ }
+ }
+ }
+ }
+
+ return SkSpan<const SkGlyphPos>{results, drawableGlyphCount};
+}
+
+void SkStrike::findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkGlyph* glyph, SkScalar* array, int* count) {
+ glyph->ensureIntercepts(bounds, scale, xPos, array, count, &fAlloc);
+}
+
+void SkStrike::dump() const {
+ const SkTypeface* face = fScalerContext->getTypeface();
+ const SkScalerContextRec& rec = fScalerContext->getRec();
+ SkMatrix matrix;
+ rec.getSingleMatrix(&matrix);
+ matrix.preScale(SkScalarInvert(rec.fTextSize), SkScalarInvert(rec.fTextSize));
+ SkString name;
+ face->getFamilyName(&name);
+
+ SkString msg;
+ SkFontStyle style = face->fontStyle();
+ msg.printf("cache typeface:%x %25s:(%d,%d,%d)\n %s glyphs:%3d",
+ face->uniqueID(), name.c_str(), style.weight(), style.width(), style.slant(),
+ rec.dump().c_str(), fGlyphMap.count());
+ SkDebugf("%s\n", msg.c_str());
+}
+
+void SkStrike::onAboutToExitScope() { }
+
+#ifdef SK_DEBUG
+void SkStrike::forceValidate() const {
+ size_t memoryUsed = sizeof(*this);
+ fGlyphMap.foreach ([&memoryUsed](const SkGlyph* glyphPtr) {
+ memoryUsed += sizeof(SkGlyph);
+ if (glyphPtr->setImageHasBeenCalled()) {
+ memoryUsed += glyphPtr->imageSize();
+ }
+ if (glyphPtr->setPathHasBeenCalled() && glyphPtr->path() != nullptr) {
+ memoryUsed += glyphPtr->path()->approximateBytesUsed();
+ }
+ });
+ SkASSERT(fMemoryUsed == memoryUsed);
+}
+
+void SkStrike::validate() const {
+#ifdef SK_DEBUG_GLYPH_CACHE
+ forceValidate();
+#endif
+}
+#endif // SK_DEBUG
+
+
diff --git a/gfx/skia/skia/src/core/SkStrike.h b/gfx/skia/skia/src/core/SkStrike.h
new file mode 100644
index 0000000000..f893835cc6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrike.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+ */
+
+#ifndef SkStrike_DEFINED
+#define SkStrike_DEFINED
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkPaint.h"
+#include "include/private/SkTHash.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrikeForGPU.h"
+#include <memory>
+
+/** \class SkGlyphCache
+
+ This class represents a strike: a specific combination of typeface, size, matrix, etc., and
+ holds the glyphs for that strike. Calling any of the getGlyphID... methods will
+ return the requested glyph, either instantly if it is already cached, or by first generating
+ it and then adding it to the strike.
+
+ The strikes are held in a global list, available to all threads. To interact with one, call
+ either Find{OrCreate}Exclusive().
+
+ The Find*Exclusive() method returns SkExclusiveStrikePtr, which releases exclusive ownership
+ when they go out of scope.
+*/
+class SkStrike final : public SkStrikeForGPU {
+public:
+ SkStrike(const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ const SkFontMetrics&);
+
+ // Return a glyph. Create it if it doesn't exist, and initialize the glyph with metrics and
+ // advances using a scaler.
+ SkGlyph* glyph(SkPackedGlyphID packedID);
+ SkGlyph* glyph(SkGlyphID glyphID);
+ SkGlyph* glyph(SkGlyphID, SkPoint);
+
+ // Return a glyph. Create it if it doesn't exist, and initialize with the prototype.
+ SkGlyph* glyphFromPrototype(const SkGlyphPrototype& p, void* image = nullptr);
+
+ // Return a glyph or nullptr if it does not exits in the strike.
+ SkGlyph* glyphOrNull(SkPackedGlyphID id) const;
+
+ const void* prepareImage(SkGlyph* glyph);
+
+ // Lookup (or create if needed) the toGlyph using toID. If that glyph is not initialized with
+ // an image, then use the information in from to initialize the width, height top, left,
+ // format and image of the toGlyph. This is mainly used preserving the glyph if it was
+ // created by a search of desperation.
+ SkGlyph* mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& from);
+
+ // If the path has never been set, then use the scaler context to add the glyph.
+ const SkPath* preparePath(SkGlyph*);
+
+ // If the path has never been set, then add a path to glyph.
+ const SkPath* preparePath(SkGlyph* glyph, const SkPath* path);
+
+ /** Returns the number of glyphs for this strike.
+ */
+ unsigned getGlyphCount() const;
+
+ /** Return the number of glyphs currently cached. */
+ int countCachedGlyphs() const;
+
+ /** If the advance axis intersects the glyph's path, append the positions scaled and offset
+ to the array (if non-null), and set the count to the updated array length.
+ */
+ void findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkGlyph* , SkScalar* array, int* count);
+
+ /** Fallback glyphs used during font remoting if the original glyph can't be found.
+ */
+ bool belongsToCache(const SkGlyph* glyph) const;
+ /** Find any glyph in this cache with the given ID, regardless of subpixel positioning.
+ * If set and present, skip over the glyph with vetoID.
+ */
+ const SkGlyph* getCachedGlyphAnySubPix(SkGlyphID,
+ SkPackedGlyphID vetoID = SkPackedGlyphID()) const;
+
+ /** Return the vertical metrics for this strike.
+ */
+ const SkFontMetrics& getFontMetrics() const {
+ return fFontMetrics;
+ }
+
+ SkMask::Format getMaskFormat() const {
+ return fScalerContext->getMaskFormat();
+ }
+
+ const SkGlyphPositionRoundingSpec& roundingSpec() const override {
+ return fRoundingSpec;
+ }
+
+ const SkDescriptor& getDescriptor() const override;
+
+ SkSpan<const SkGlyph*> metrics(SkSpan<const SkGlyphID> glyphIDs,
+ const SkGlyph* results[]);
+
+ SkSpan<const SkGlyph*> preparePaths(SkSpan<const SkGlyphID> glyphIDs,
+ const SkGlyph* results[]);
+
+ SkSpan<const SkGlyph*> prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs,
+ const SkGlyph* results[]);
+
+ void prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables);
+
+ void prepareForDrawingPathsCPU(SkDrawableGlyphBuffer* drawables);
+ SkSpan<const SkGlyphPos> prepareForDrawingRemoveEmpty(const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[],
+ size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) override;
+
+ void onAboutToExitScope() override;
+
+ /** Return the approx RAM usage for this cache. */
+ size_t getMemoryUsed() const { return fMemoryUsed; }
+
+ void dump() const;
+
+ SkScalerContext* getScalerContext() const { return fScalerContext.get(); }
+
+#ifdef SK_DEBUG
+ void forceValidate() const;
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate : SkNoncopyable {
+ public:
+ AutoValidate(const SkStrike* cache) : fCache(cache) {
+ if (fCache) {
+ fCache->validate();
+ }
+ }
+ ~AutoValidate() {
+ if (fCache) {
+ fCache->validate();
+ }
+ }
+ void forget() {
+ fCache = nullptr;
+ }
+ private:
+ const SkStrike* fCache;
+ };
+
+private:
+ class GlyphMapHashTraits {
+ public:
+ static SkPackedGlyphID GetKey(const SkGlyph* glyph) {
+ return glyph->getPackedID();
+ }
+ static uint32_t Hash(SkPackedGlyphID glyphId) {
+ return glyphId.hash();
+ }
+ };
+
+ SkGlyph* makeGlyph(SkPackedGlyphID);
+
+ enum PathDetail {
+ kMetricsOnly,
+ kMetricsAndPath
+ };
+
+ // internalPrepare will only be called with a mutex already held.
+ SkSpan<const SkGlyph*> internalPrepare(
+ SkSpan<const SkGlyphID> glyphIDs,
+ PathDetail pathDetail,
+ const SkGlyph** results);
+
+ const SkAutoDescriptor fDesc;
+ const std::unique_ptr<SkScalerContext> fScalerContext;
+ SkFontMetrics fFontMetrics;
+
+ // Map from a combined GlyphID and sub-pixel position to a SkGlyph*.
+ // The actual glyph is stored in the fAlloc. This structure provides an
+ // unchanging pointer as long as the strike is alive.
+ SkTHashTable<SkGlyph*, SkPackedGlyphID, GlyphMapHashTraits> fGlyphMap;
+
+ // so we don't grow our arrays a lot
+ static constexpr size_t kMinGlyphCount = 8;
+ static constexpr size_t kMinGlyphImageSize = 16 /* height */ * 8 /* width */;
+ static constexpr size_t kMinAllocAmount = kMinGlyphImageSize * kMinGlyphCount;
+
+ SkArenaAlloc fAlloc {kMinAllocAmount};
+
+ // Tracks (approx) how much ram is tied-up in this strike.
+ size_t fMemoryUsed;
+
+ const SkGlyphPositionRoundingSpec fRoundingSpec;
+};
+
+#endif // SkStrike_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrikeCache.cpp b/gfx/skia/skia/src/core/SkStrikeCache.cpp
new file mode 100644
index 0000000000..bdef140897
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeCache.cpp
@@ -0,0 +1,581 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrikeCache.h"
+
+#include <cctype>
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkStrike.h"
+
+class SkStrikeCache::Node final : public SkStrikeForGPU {
+public:
+ Node(SkStrikeCache* strikeCache,
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ const SkFontMetrics& metrics,
+ std::unique_ptr<SkStrikePinner> pinner)
+ : fStrikeCache{strikeCache}
+ , fStrike{desc, std::move(scaler), metrics}
+ , fPinner{std::move(pinner)} {}
+
+ const SkGlyphPositionRoundingSpec& roundingSpec() const override {
+ return fStrike.roundingSpec();
+ }
+
+ SkSpan<const SkGlyphPos>
+ prepareForDrawingRemoveEmpty(const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[],
+ size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) override {
+ return fStrike.prepareForDrawingRemoveEmpty(packedGlyphIDs,
+ positions,
+ n,
+ maxDimension,
+ results);
+ }
+
+ const SkDescriptor& getDescriptor() const override {
+ return fStrike.getDescriptor();
+ }
+
+ void onAboutToExitScope() override {
+ fStrikeCache->attachNode(this);
+ }
+
+ SkStrikeCache* const fStrikeCache;
+ Node* fNext{nullptr};
+ Node* fPrev{nullptr};
+ SkStrike fStrike;
+ std::unique_ptr<SkStrikePinner> fPinner;
+};
+
+bool gSkUseThreadLocalStrikeCaches_IAcknowledgeThisIsIncrediblyExperimental = false;
+
+SkStrikeCache* SkStrikeCache::GlobalStrikeCache() {
+#if !defined(SK_BUILD_FOR_IOS)
+ if (gSkUseThreadLocalStrikeCaches_IAcknowledgeThisIsIncrediblyExperimental) {
+ static thread_local auto* cache = new SkStrikeCache;
+ return cache;
+ }
+#endif
+ static auto* cache = new SkStrikeCache;
+ return cache;
+}
+
+SkStrikeCache::ExclusiveStrikePtr::ExclusiveStrikePtr(SkStrikeCache::Node* node)
+ : fNode{node} {}
+
+SkStrikeCache::ExclusiveStrikePtr::ExclusiveStrikePtr()
+ : fNode{nullptr} {}
+
+SkStrikeCache::ExclusiveStrikePtr::ExclusiveStrikePtr(ExclusiveStrikePtr&& o)
+ : fNode{o.fNode} {
+ o.fNode = nullptr;
+}
+
+SkStrikeCache::ExclusiveStrikePtr&
+SkStrikeCache::ExclusiveStrikePtr::operator = (ExclusiveStrikePtr&& o) {
+ if (fNode != nullptr) {
+ fNode->fStrikeCache->attachNode(fNode);
+ }
+ fNode = o.fNode;
+ o.fNode = nullptr;
+ return *this;
+}
+
+SkStrikeCache::ExclusiveStrikePtr::~ExclusiveStrikePtr() {
+ if (fNode != nullptr) {
+ fNode->fStrikeCache->attachNode(fNode);
+ }
+}
+
+SkStrike* SkStrikeCache::ExclusiveStrikePtr::get() const {
+ return &fNode->fStrike;
+}
+
+SkStrike* SkStrikeCache::ExclusiveStrikePtr::operator -> () const {
+ return this->get();
+}
+
+SkStrike& SkStrikeCache::ExclusiveStrikePtr::operator * () const {
+ return *this->get();
+}
+
+bool operator == (const SkStrikeCache::ExclusiveStrikePtr& lhs,
+ const SkStrikeCache::ExclusiveStrikePtr& rhs) {
+ return lhs.fNode == rhs.fNode;
+}
+
+bool operator == (const SkStrikeCache::ExclusiveStrikePtr& lhs, decltype(nullptr)) {
+ return lhs.fNode == nullptr;
+}
+
+bool operator == (decltype(nullptr), const SkStrikeCache::ExclusiveStrikePtr& rhs) {
+ return nullptr == rhs.fNode;
+}
+
+SkStrikeCache::~SkStrikeCache() {
+ Node* node = fHead;
+ while (node) {
+ Node* next = node->fNext;
+ delete node;
+ node = next;
+ }
+}
+
+std::unique_ptr<SkScalerContext> SkStrikeCache::CreateScalerContext(
+ const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) {
+ auto scaler = typeface.createScalerContext(effects, &desc, true /* can fail */);
+
+ // Check if we can create a scaler-context before creating the glyphcache.
+ // If not, we may have exhausted OS/font resources, so try purging the
+ // cache once and try again
+ // pass true the first time, to notice if the scalercontext failed,
+ if (scaler == nullptr) {
+ PurgeAll();
+ scaler = typeface.createScalerContext(effects, &desc, false /* must succeed */);
+ }
+ return scaler;
+}
+
+SkExclusiveStrikePtr SkStrikeCache::findOrCreateStrikeExclusive(
+ const SkDescriptor& desc, const SkScalerContextEffects& effects, const SkTypeface& typeface)
+{
+ return SkExclusiveStrikePtr(this->findOrCreateStrike(desc, effects, typeface));
+}
+
+auto SkStrikeCache::findOrCreateStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) -> Node* {
+ Node* node = this->findAndDetachStrike(desc);
+ if (node == nullptr) {
+ auto scaler = CreateScalerContext(desc, effects, typeface);
+ node = this->createStrike(desc, std::move(scaler));
+ }
+ return node;
+}
+
+SkScopedStrikeForGPU SkStrikeCache::findOrCreateScopedStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) {
+ return SkScopedStrikeForGPU{this->findOrCreateStrike(desc, effects, typeface)};
+}
+
+void SkStrikeCache::PurgeAll() {
+ GlobalStrikeCache()->purgeAll();
+}
+
+void SkStrikeCache::Dump() {
+ SkDebugf("GlyphCache [ used budget ]\n");
+ SkDebugf(" bytes [ %8zu %8zu ]\n",
+ SkGraphics::GetFontCacheUsed(), SkGraphics::GetFontCacheLimit());
+ SkDebugf(" count [ %8zu %8zu ]\n",
+ SkGraphics::GetFontCacheCountUsed(), SkGraphics::GetFontCacheCountLimit());
+
+ int counter = 0;
+
+ auto visitor = [&counter](const SkStrike& cache) {
+ const SkScalerContextRec& rec = cache.getScalerContext()->getRec();
+
+ SkDebugf("index %d\n", counter);
+ SkDebugf("%s", rec.dump().c_str());
+ counter += 1;
+ };
+
+ GlobalStrikeCache()->forEachStrike(visitor);
+}
+
+namespace {
+ const char gGlyphCacheDumpName[] = "skia/sk_glyph_cache";
+} // namespace
+
+void SkStrikeCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ dump->dumpNumericValue(gGlyphCacheDumpName, "size", "bytes", SkGraphics::GetFontCacheUsed());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "budget_size", "bytes",
+ SkGraphics::GetFontCacheLimit());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "glyph_count", "objects",
+ SkGraphics::GetFontCacheCountUsed());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "budget_glyph_count", "objects",
+ SkGraphics::GetFontCacheCountLimit());
+
+ if (dump->getRequestedDetails() == SkTraceMemoryDump::kLight_LevelOfDetail) {
+ dump->setMemoryBacking(gGlyphCacheDumpName, "malloc", nullptr);
+ return;
+ }
+
+ auto visitor = [&dump](const SkStrike& cache) {
+ const SkTypeface* face = cache.getScalerContext()->getTypeface();
+ const SkScalerContextRec& rec = cache.getScalerContext()->getRec();
+
+ SkString fontName;
+ face->getFamilyName(&fontName);
+ // Replace all special characters with '_'.
+ for (size_t index = 0; index < fontName.size(); ++index) {
+ if (!std::isalnum(fontName[index])) {
+ fontName[index] = '_';
+ }
+ }
+
+ SkString dumpName = SkStringPrintf(
+ "%s/%s_%d/%p", gGlyphCacheDumpName, fontName.c_str(), rec.fFontID, &cache);
+
+ dump->dumpNumericValue(dumpName.c_str(),
+ "size", "bytes", cache.getMemoryUsed());
+ dump->dumpNumericValue(dumpName.c_str(),
+ "glyph_count", "objects", cache.countCachedGlyphs());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+ };
+
+ GlobalStrikeCache()->forEachStrike(visitor);
+}
+
+
+void SkStrikeCache::attachNode(Node* node) {
+ if (node == nullptr) {
+ return;
+ }
+ SkAutoSpinlock ac(fLock);
+
+ this->validate();
+ node->fStrike.validate();
+
+ this->internalAttachToHead(node);
+ this->internalPurge();
+}
+
+SkExclusiveStrikePtr SkStrikeCache::findStrikeExclusive(const SkDescriptor& desc) {
+ return SkExclusiveStrikePtr(this->findAndDetachStrike(desc));
+}
+
+auto SkStrikeCache::findAndDetachStrike(const SkDescriptor& desc) -> Node* {
+ SkAutoSpinlock ac(fLock);
+
+ for (Node* node = internalGetHead(); node != nullptr; node = node->fNext) {
+ if (node->fStrike.getDescriptor() == desc) {
+ this->internalDetachCache(node);
+ return node;
+ }
+ }
+
+ return nullptr;
+}
+
+
+static bool loose_compare(const SkDescriptor& lhs, const SkDescriptor& rhs) {
+ uint32_t size;
+ auto ptr = lhs.findEntry(kRec_SkDescriptorTag, &size);
+ SkScalerContextRec lhsRec;
+ memcpy(&lhsRec, ptr, size);
+
+ ptr = rhs.findEntry(kRec_SkDescriptorTag, &size);
+ SkScalerContextRec rhsRec;
+ memcpy(&rhsRec, ptr, size);
+
+ // If these don't match, there's no way we can use these strikes interchangeably.
+ // Note that a typeface from each renderer maps to a unique proxy typeface on the GPU,
+ // keyed in the glyph cache using fontID in the SkDescriptor. By limiting this search
+ // to descriptors with the same fontID, we ensure that a renderer never uses glyphs
+ // generated by a different renderer.
+ return
+ lhsRec.fFontID == rhsRec.fFontID &&
+ lhsRec.fTextSize == rhsRec.fTextSize &&
+ lhsRec.fPreScaleX == rhsRec.fPreScaleX &&
+ lhsRec.fPreSkewX == rhsRec.fPreSkewX &&
+ lhsRec.fPost2x2[0][0] == rhsRec.fPost2x2[0][0] &&
+ lhsRec.fPost2x2[0][1] == rhsRec.fPost2x2[0][1] &&
+ lhsRec.fPost2x2[1][0] == rhsRec.fPost2x2[1][0] &&
+ lhsRec.fPost2x2[1][1] == rhsRec.fPost2x2[1][1];
+}
+
+bool SkStrikeCache::desperationSearchForImage(const SkDescriptor& desc, SkGlyph* glyph,
+ SkStrike* targetCache) {
+ SkAutoSpinlock ac(fLock);
+
+ SkGlyphID glyphID = glyph->getGlyphID();
+ for (Node* node = internalGetHead(); node != nullptr; node = node->fNext) {
+ if (loose_compare(node->fStrike.getDescriptor(), desc)) {
+ if (SkGlyph *fallback = node->fStrike.glyphOrNull(glyph->getPackedID())) {
+ // This desperate-match node may disappear as soon as we drop fLock, so we
+ // need to copy the glyph from node into this strike, including a
+ // deep copy of the mask.
+ targetCache->mergeGlyphAndImage(glyph->getPackedID(), *fallback);
+ return true;
+ }
+
+ // Look for any sub-pixel pos for this glyph, in case there is a pos mismatch.
+ if (const auto* fallback = node->fStrike.getCachedGlyphAnySubPix(glyphID)) {
+ targetCache->mergeGlyphAndImage(glyph->getPackedID(), *fallback);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool SkStrikeCache::desperationSearchForPath(
+ const SkDescriptor& desc, SkGlyphID glyphID, SkPath* path) {
+ SkAutoSpinlock ac(fLock);
+
+ // The following is wrong there is subpixel positioning with paths...
+ // Paths are only ever at sub-pixel position (0,0), so we can just try that directly rather
+ // than try our packed position first then search all others on failure like for masks.
+ //
+ // This will have to search the sub-pixel positions too.
+ // There is also a problem with accounting for cache size with shared path data.
+ for (Node* node = internalGetHead(); node != nullptr; node = node->fNext) {
+ if (loose_compare(node->fStrike.getDescriptor(), desc)) {
+ if (SkGlyph *from = node->fStrike.glyphOrNull(SkPackedGlyphID{glyphID})) {
+ if (from->setPathHasBeenCalled() && from->path() != nullptr) {
+ // We can just copy the path out by value here, so no need to worry
+ // about the lifetime of this desperate-match node.
+ *path = *from->path();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+SkExclusiveStrikePtr SkStrikeCache::createStrikeExclusive(
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ SkFontMetrics* maybeMetrics,
+ std::unique_ptr<SkStrikePinner> pinner)
+{
+ return SkExclusiveStrikePtr(
+ this->createStrike(desc, std::move(scaler), maybeMetrics, std::move(pinner)));
+}
+
+auto SkStrikeCache::createStrike(
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ SkFontMetrics* maybeMetrics,
+ std::unique_ptr<SkStrikePinner> pinner) -> Node* {
+ SkFontMetrics fontMetrics;
+ if (maybeMetrics != nullptr) {
+ fontMetrics = *maybeMetrics;
+ } else {
+ scaler->getFontMetrics(&fontMetrics);
+ }
+
+ return new Node{this, desc, std::move(scaler), fontMetrics, std::move(pinner)};
+}
+
+void SkStrikeCache::purgeAll() {
+ SkAutoSpinlock ac(fLock);
+ this->internalPurge(fTotalMemoryUsed);
+}
+
+size_t SkStrikeCache::getTotalMemoryUsed() const {
+ SkAutoSpinlock ac(fLock);
+ return fTotalMemoryUsed;
+}
+
+int SkStrikeCache::getCacheCountUsed() const {
+ SkAutoSpinlock ac(fLock);
+ return fCacheCount;
+}
+
+int SkStrikeCache::getCacheCountLimit() const {
+ SkAutoSpinlock ac(fLock);
+ return fCacheCountLimit;
+}
+
+size_t SkStrikeCache::setCacheSizeLimit(size_t newLimit) {
+ static const size_t minLimit = 256 * 1024;
+ if (newLimit < minLimit) {
+ newLimit = minLimit;
+ }
+
+ SkAutoSpinlock ac(fLock);
+
+ size_t prevLimit = fCacheSizeLimit;
+ fCacheSizeLimit = newLimit;
+ this->internalPurge();
+ return prevLimit;
+}
+
+size_t SkStrikeCache::getCacheSizeLimit() const {
+ SkAutoSpinlock ac(fLock);
+ return fCacheSizeLimit;
+}
+
+int SkStrikeCache::setCacheCountLimit(int newCount) {
+ if (newCount < 0) {
+ newCount = 0;
+ }
+
+ SkAutoSpinlock ac(fLock);
+
+ int prevCount = fCacheCountLimit;
+ fCacheCountLimit = newCount;
+ this->internalPurge();
+ return prevCount;
+}
+
+int SkStrikeCache::getCachePointSizeLimit() const {
+ SkAutoSpinlock ac(fLock);
+ return fPointSizeLimit;
+}
+
+int SkStrikeCache::setCachePointSizeLimit(int newLimit) {
+ if (newLimit < 0) {
+ newLimit = 0;
+ }
+
+ SkAutoSpinlock ac(fLock);
+
+ int prevLimit = fPointSizeLimit;
+ fPointSizeLimit = newLimit;
+ return prevLimit;
+}
+
+void SkStrikeCache::forEachStrike(std::function<void(const SkStrike&)> visitor) const {
+ SkAutoSpinlock ac(fLock);
+
+ this->validate();
+
+ for (Node* node = this->internalGetHead(); node != nullptr; node = node->fNext) {
+ visitor(node->fStrike);
+ }
+}
+
+size_t SkStrikeCache::internalPurge(size_t minBytesNeeded) {
+ this->validate();
+
+ size_t bytesNeeded = 0;
+ if (fTotalMemoryUsed > fCacheSizeLimit) {
+ bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit;
+ }
+ bytesNeeded = SkTMax(bytesNeeded, minBytesNeeded);
+ if (bytesNeeded) {
+ // no small purges!
+ bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed >> 2);
+ }
+
+ int countNeeded = 0;
+ if (fCacheCount > fCacheCountLimit) {
+ countNeeded = fCacheCount - fCacheCountLimit;
+ // no small purges!
+ countNeeded = SkMax32(countNeeded, fCacheCount >> 2);
+ }
+
+ // early exit
+ if (!countNeeded && !bytesNeeded) {
+ return 0;
+ }
+
+ size_t bytesFreed = 0;
+ int countFreed = 0;
+
+ // Start at the tail and proceed backwards deleting; the list is in LRU
+ // order, with unimportant entries at the tail.
+ Node* node = this->internalGetTail();
+ while (node != nullptr && (bytesFreed < bytesNeeded || countFreed < countNeeded)) {
+ Node* prev = node->fPrev;
+
+ // Only delete if the strike is not pinned.
+ if (node->fPinner == nullptr || node->fPinner->canDelete()) {
+ bytesFreed += node->fStrike.getMemoryUsed();
+ countFreed += 1;
+ this->internalDetachCache(node);
+ delete node;
+ }
+ node = prev;
+ }
+
+ this->validate();
+
+#ifdef SPEW_PURGE_STATUS
+ if (countFreed) {
+ SkDebugf("purging %dK from font cache [%d entries]\n",
+ (int)(bytesFreed >> 10), countFreed);
+ }
+#endif
+
+ return bytesFreed;
+}
+
+void SkStrikeCache::internalAttachToHead(Node* node) {
+ SkASSERT(nullptr == node->fPrev && nullptr == node->fNext);
+ if (fHead) {
+ fHead->fPrev = node;
+ node->fNext = fHead;
+ }
+ fHead = node;
+
+ if (fTail == nullptr) {
+ fTail = node;
+ }
+
+ fCacheCount += 1;
+ fTotalMemoryUsed += node->fStrike.getMemoryUsed();
+}
+
+void SkStrikeCache::internalDetachCache(Node* node) {
+ SkASSERT(fCacheCount > 0);
+ fCacheCount -= 1;
+ fTotalMemoryUsed -= node->fStrike.getMemoryUsed();
+
+ if (node->fPrev) {
+ node->fPrev->fNext = node->fNext;
+ } else {
+ fHead = node->fNext;
+ }
+ if (node->fNext) {
+ node->fNext->fPrev = node->fPrev;
+ } else {
+ fTail = node->fPrev;
+ }
+ node->fPrev = node->fNext = nullptr;
+}
+
+void SkStrikeCache::ValidateGlyphCacheDataSize() {
+#ifdef SK_DEBUG
+ GlobalStrikeCache()->validateGlyphCacheDataSize();
+#endif
+}
+
+#ifdef SK_DEBUG
+void SkStrikeCache::validateGlyphCacheDataSize() const {
+ this->forEachStrike(
+ [](const SkStrike& cache) { cache.forceValidate();
+ });
+}
+#endif
+
+#ifdef SK_DEBUG
+void SkStrikeCache::validate() const {
+ size_t computedBytes = 0;
+ int computedCount = 0;
+
+ const Node* node = fHead;
+ while (node != nullptr) {
+ computedBytes += node->fStrike.getMemoryUsed();
+ computedCount += 1;
+ node = node->fNext;
+ }
+
+ SkASSERTF(fCacheCount == computedCount, "fCacheCount: %d, computedCount: %d", fCacheCount,
+ computedCount);
+ SkASSERTF(fTotalMemoryUsed == computedBytes, "fTotalMemoryUsed: %d, computedBytes: %d",
+ fTotalMemoryUsed, computedBytes);
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkStrikeCache.h b/gfx/skia/skia/src/core/SkStrikeCache.h
new file mode 100644
index 0000000000..189da86506
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeCache.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrikeCache_DEFINED
+#define SkStrikeCache_DEFINED
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "include/private/SkSpinlock.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkStrike.h"
+
+class SkTraceMemoryDump;
+
+#ifndef SK_DEFAULT_FONT_CACHE_COUNT_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_COUNT_LIMIT 2048
+#endif
+
+#ifndef SK_DEFAULT_FONT_CACHE_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_LIMIT (2 * 1024 * 1024)
+#endif
+
+#ifndef SK_DEFAULT_FONT_CACHE_POINT_SIZE_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_POINT_SIZE_LIMIT 256
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkStrikePinner {
+public:
+ virtual ~SkStrikePinner() = default;
+ virtual bool canDelete() = 0;
+};
+
+class SkStrikeCache final : public SkStrikeForGPUCacheInterface {
+ class Node;
+
+public:
+ SkStrikeCache() = default;
+ ~SkStrikeCache() override;
+
+ class ExclusiveStrikePtr {
+ public:
+ explicit ExclusiveStrikePtr(Node*);
+ ExclusiveStrikePtr();
+ ExclusiveStrikePtr(const ExclusiveStrikePtr&) = delete;
+ ExclusiveStrikePtr& operator = (const ExclusiveStrikePtr&) = delete;
+ ExclusiveStrikePtr(ExclusiveStrikePtr&&);
+ ExclusiveStrikePtr& operator = (ExclusiveStrikePtr&&);
+ ~ExclusiveStrikePtr();
+
+ SkStrike* get() const;
+ SkStrike* operator -> () const;
+ SkStrike& operator * () const;
+ explicit operator bool () const { return fNode != nullptr; }
+ friend bool operator == (const ExclusiveStrikePtr&, const ExclusiveStrikePtr&);
+ friend bool operator == (const ExclusiveStrikePtr&, decltype(nullptr));
+ friend bool operator == (decltype(nullptr), const ExclusiveStrikePtr&);
+
+ private:
+ Node* fNode;
+ };
+
+ static SkStrikeCache* GlobalStrikeCache();
+
+ ExclusiveStrikePtr findStrikeExclusive(const SkDescriptor&);
+
+ ExclusiveStrikePtr createStrikeExclusive(
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ SkFontMetrics* maybeMetrics = nullptr,
+ std::unique_ptr<SkStrikePinner> = nullptr);
+
+ ExclusiveStrikePtr findOrCreateStrikeExclusive(
+ const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface);
+
+ // Routines to find suitable data when working in a remote cache situation. These are
+ // suitable as substitutes for similar calls in SkScalerContext.
+ bool desperationSearchForImage(const SkDescriptor& desc,
+ SkGlyph* glyph,
+ SkStrike* targetCache);
+ bool desperationSearchForPath(const SkDescriptor& desc, SkGlyphID glyphID, SkPath* path);
+
+ SkScopedStrikeForGPU findOrCreateScopedStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) override;
+
+ static std::unique_ptr<SkScalerContext> CreateScalerContext(
+ const SkDescriptor&, const SkScalerContextEffects&, const SkTypeface&);
+
+ static void PurgeAll();
+ static void ValidateGlyphCacheDataSize();
+ static void Dump();
+
+ // Dump memory usage statistics of all the attaches caches in the process using the
+ // SkTraceMemoryDump interface.
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ void purgeAll(); // does not change budget
+
+ int getCacheCountLimit() const;
+ int setCacheCountLimit(int limit);
+ int getCacheCountUsed() const;
+
+ size_t getCacheSizeLimit() const;
+ size_t setCacheSizeLimit(size_t limit);
+ size_t getTotalMemoryUsed() const;
+
+ int getCachePointSizeLimit() const;
+ int setCachePointSizeLimit(int limit);
+
+#ifdef SK_DEBUG
+ // A simple accounting of what each glyph cache reports and the strike cache total.
+ void validate() const SK_REQUIRES(fLock);
+ // Make sure that each glyph cache's memory tracking and actual memory used are in sync.
+ void validateGlyphCacheDataSize() const;
+#else
+ void validate() const {}
+ void validateGlyphCacheDataSize() const {}
+#endif
+
+private:
+ Node* findAndDetachStrike(const SkDescriptor&);
+ Node* createStrike(
+ const SkDescriptor& desc,
+ std::unique_ptr<SkScalerContext> scaler,
+ SkFontMetrics* maybeMetrics = nullptr,
+ std::unique_ptr<SkStrikePinner> = nullptr);
+ Node* findOrCreateStrike(
+ const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface);
+ void attachNode(Node* node);
+
+ // The following methods can only be called when mutex is already held.
+ Node* internalGetHead() const SK_REQUIRES(fLock) { return fHead; }
+ Node* internalGetTail() const SK_REQUIRES(fLock) { return fTail; }
+ void internalDetachCache(Node*) SK_REQUIRES(fLock);
+ void internalAttachToHead(Node*) SK_REQUIRES(fLock);
+
+ // Checkout budgets, modulated by the specified min-bytes-needed-to-purge,
+ // and attempt to purge caches to match.
+ // Returns number of bytes freed.
+ size_t internalPurge(size_t minBytesNeeded = 0) SK_REQUIRES(fLock);
+
+ void forEachStrike(std::function<void(const SkStrike&)> visitor) const;
+
+ mutable SkSpinlock fLock;
+ Node* fHead SK_GUARDED_BY(fLock) {nullptr};
+ Node* fTail SK_GUARDED_BY(fLock) {nullptr};
+ size_t fTotalMemoryUsed{0};
+ size_t fCacheSizeLimit{SK_DEFAULT_FONT_CACHE_LIMIT};
+ int32_t fCacheCountLimit{SK_DEFAULT_FONT_CACHE_COUNT_LIMIT};
+ int32_t fCacheCount{0};
+ int32_t fPointSizeLimit{SK_DEFAULT_FONT_CACHE_POINT_SIZE_LIMIT};
+};
+
+using SkExclusiveStrikePtr = SkStrikeCache::ExclusiveStrikePtr;
+
+#endif // SkStrikeCache_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrikeForGPU.cpp b/gfx/skia/skia/src/core/SkStrikeForGPU.cpp
new file mode 100644
index 0000000000..1c2da44b41
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeForGPU.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "src/core/SkStrikeForGPU.h"
+
+#include "src/core/SkGlyphRunPainter.h"
+
+bool SkStrikeForGPU::CanDrawAsMask(const SkGlyph& glyph) {
+ return glyph.maxDimension() <= SkStrikeCommon::kSkSideTooBigForAtlas;
+}
+
+bool SkStrikeForGPU::CanDrawAsSDFT(const SkGlyph& glyph) {
+ return glyph.maxDimension() <= SkStrikeCommon::kSkSideTooBigForAtlas
+ && glyph.maskFormat() == SkMask::kSDF_Format;
+}
+
+bool SkStrikeForGPU::CanDrawAsPath(const SkGlyph& glyph) {
+ SkASSERT(glyph.isColor() || glyph.setPathHasBeenCalled());
+ return !glyph.isColor() && glyph.path() != nullptr;
+}
+
diff --git a/gfx/skia/skia/src/core/SkStrikeForGPU.h b/gfx/skia/skia/src/core/SkStrikeForGPU.h
new file mode 100644
index 0000000000..a72ea355e5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeForGPU.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrikeInterface_DEFINED
+#define SkStrikeInterface_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkSpan.h"
+
+#include <memory>
+
+class SkDescriptor;
+class SkGlyph;
+class SkMaskFilter;
+class SkPathEffect;
+class SkTypeface;
+struct SkGlyphPositionRoundingSpec;
+struct SkScalerContextEffects;
+
+struct SkGlyphPos {
+ size_t index;
+ const SkGlyph* glyph;
+ SkPoint position;
+};
+
+struct SkPathPos {
+ const SkPath* path;
+ SkPoint position;
+};
+
+class SkStrikeForGPU {
+public:
+ virtual ~SkStrikeForGPU() = default;
+ virtual const SkDescriptor& getDescriptor() const = 0;
+
+ // prepareForDrawingRemoveEmpty takes glyphIDs, and position, and returns a list of SkGlyphs
+ // and positions where all the data to draw the glyph has been created. The maxDimension
+ // parameter determines if the mask/SDF version will be created, or an alternate drawing
+ // format should be used. For path-only drawing set maxDimension to 0, and for bitmap-device
+ // drawing (where there is no upper limit to the glyph in the cache) use INT_MAX.
+ // prepareForDrawingRemoveEmpty should remove all empty glyphs from the returned span.
+ virtual SkSpan<const SkGlyphPos>
+ prepareForDrawingRemoveEmpty(const SkPackedGlyphID packedGlyphIDs[],
+ const SkPoint positions[],
+ size_t n,
+ int maxDimension,
+ SkGlyphPos results[]) = 0;
+
+ virtual const SkGlyphPositionRoundingSpec& roundingSpec() const = 0;
+
+ // Used with SkScopedStrikeForGPU to take action at the end of a scope.
+ virtual void onAboutToExitScope() = 0;
+
+ // Common categories for glyph types used by GPU.
+ static bool CanDrawAsMask(const SkGlyph& glyph);
+ static bool CanDrawAsSDFT(const SkGlyph& glyph);
+ static bool CanDrawAsPath(const SkGlyph& glyph);
+
+
+ struct Deleter {
+ void operator()(SkStrikeForGPU* ptr) const {
+ ptr->onAboutToExitScope();
+ }
+ };
+};
+
+using SkScopedStrikeForGPU = std::unique_ptr<SkStrikeForGPU, SkStrikeForGPU::Deleter>;
+
+class SkStrikeForGPUCacheInterface {
+public:
+ virtual ~SkStrikeForGPUCacheInterface() = default;
+ virtual SkScopedStrikeForGPU findOrCreateScopedStrike(const SkDescriptor& desc,
+ const SkScalerContextEffects& effects,
+ const SkTypeface& typeface) = 0;
+};
+#endif //SkStrikeInterface_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrikeSpec.cpp b/gfx/skia/skia/src/core/SkStrikeSpec.cpp
new file mode 100644
index 0000000000..731c259dc2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeSpec.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrikeSpec.h"
+
+#include "include/core/SkGraphics.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTLazy.h"
+
+SkStrikeSpec SkStrikeSpec::MakeMask(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix) {
+ SkStrikeSpec storage;
+
+ storage.commonSetup(font, paint, surfaceProps, scalerContextFlags, deviceMatrix);
+
+ return storage;
+}
+
+SkStrikeSpec SkStrikeSpec::MakePath(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags) {
+ SkStrikeSpec storage;
+
+ // setup our std runPaint, in hopes of getting hits in the cache
+ SkPaint pathPaint{paint};
+ SkFont pathFont{font};
+
+ // The factor to get from the size stored in the strike to the size needed for
+ // the source.
+ storage.fStrikeToSourceRatio = pathFont.setupForAsPaths(&pathPaint);
+
+ // The sub-pixel position will always happen when transforming to the screen.
+ pathFont.setSubpixel(false);
+
+ storage.commonSetup(pathFont, pathPaint, surfaceProps, scalerContextFlags, SkMatrix::I());
+
+ return storage;
+}
+
+SkStrikeSpec SkStrikeSpec::MakeSourceFallback(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ SkScalar maxSourceGlyphDimension) {
+ SkStrikeSpec storage;
+
+ // Subtract 2 to account for the bilerp pad around the glyph
+ SkScalar maxAtlasDimension = SkStrikeCommon::kSkSideTooBigForAtlas - 2;
+
+ SkScalar runFontTextSize = font.getSize();
+
+ // Scale the text size down so the long side of all the glyphs will fit in the atlas.
+ SkScalar fallbackTextSize = SkScalarFloorToScalar(
+ (maxAtlasDimension / maxSourceGlyphDimension) * runFontTextSize);
+
+ SkFont fallbackFont{font};
+ fallbackFont.setSize(fallbackTextSize);
+
+ // No sub-pixel needed. The transform to the screen will take care of sub-pixel positioning.
+ fallbackFont.setSubpixel(false);
+
+ // The scale factor to go from strike size to the source size for glyphs.
+ storage.fStrikeToSourceRatio = runFontTextSize / fallbackTextSize;
+
+ storage.commonSetup(fallbackFont, paint, surfaceProps, scalerContextFlags, SkMatrix::I());
+
+ return storage;
+}
+
+SkStrikeSpec SkStrikeSpec::MakeCanonicalized(const SkFont& font, const SkPaint* paint) {
+ SkStrikeSpec storage;
+
+ SkPaint canonicalizedPaint;
+ if (paint != nullptr) {
+ canonicalizedPaint = *paint;
+ }
+
+ const SkFont* canonicalizedFont = &font;
+ SkTLazy<SkFont> pathFont;
+ if (ShouldDrawAsPath(canonicalizedPaint, font, SkMatrix::I())) {
+ canonicalizedFont = pathFont.set(font);
+ storage.fStrikeToSourceRatio = pathFont->setupForAsPaths(nullptr);
+ canonicalizedPaint.reset();
+ }
+
+ storage.commonSetup(*canonicalizedFont,
+ canonicalizedPaint,
+ SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType),
+ kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+ return storage;
+}
+
+SkStrikeSpec SkStrikeSpec::MakeWithNoDevice(const SkFont& font, const SkPaint* paint) {
+ SkStrikeSpec storage;
+
+ SkPaint setupPaint;
+ if (paint != nullptr) {
+ setupPaint = *paint;
+ }
+
+ storage.commonSetup(font,
+ setupPaint,
+ SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType),
+ kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+
+ return storage;
+
+}
+
+SkStrikeSpec SkStrikeSpec::MakeDefault() {
+ SkFont defaultFont;
+ return MakeCanonicalized(defaultFont);
+}
+
+bool SkStrikeSpec::ShouldDrawAsPath(
+ const SkPaint& paint, const SkFont& font, const SkMatrix& viewMatrix) {
+
+ // hairline glyphs are fast enough so we don't need to cache them
+ if (SkPaint::kStroke_Style == paint.getStyle() && 0 == paint.getStrokeWidth()) {
+ return true;
+ }
+
+ // we don't cache perspective
+ if (viewMatrix.hasPerspective()) {
+ return true;
+ }
+
+ // Glyphs like Emojis can't be rendered as a path.
+ if (font.getTypeface() && font.getTypeface()->hasColorGlyphs()) {
+ return false;
+ }
+
+ SkMatrix textMatrix = SkFontPriv::MakeTextMatrix(font);
+ textMatrix.postConcat(viewMatrix);
+
+ // we have a self-imposed maximum, just for memory-usage sanity
+ SkScalar limit = SkMinScalar(SkGraphics::GetFontCachePointSizeLimit(), 1024);
+ SkScalar maxSizeSquared = limit * limit;
+
+ auto distance = [&textMatrix](int XIndex, int YIndex) {
+ return textMatrix[XIndex] * textMatrix[XIndex] + textMatrix[YIndex] * textMatrix[YIndex];
+ };
+
+ return distance(SkMatrix::kMScaleX, SkMatrix::kMSkewY ) > maxSizeSquared
+ || distance(SkMatrix::kMSkewX, SkMatrix::kMScaleY) > maxSizeSquared;
+}
+
+SkStrikeSpec SkStrikeSpec::MakePDFVector(const SkTypeface& typeface, int* size) {
+ SkFont font;
+ font.setHinting(SkFontHinting::kNone);
+ font.setEdging(SkFont::Edging::kAlias);
+ font.setTypeface(sk_ref_sp(&typeface));
+ int unitsPerEm = typeface.getUnitsPerEm();
+ if (unitsPerEm <= 0) {
+ unitsPerEm = 1024;
+ }
+ if (size) {
+ *size = unitsPerEm;
+ }
+ font.setSize((SkScalar)unitsPerEm);
+
+ SkStrikeSpec storage;
+ storage.commonSetup(font,
+ SkPaint(),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry),
+ kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+
+ return storage;
+}
+
+#if SK_SUPPORT_GPU
+std::tuple<SkStrikeSpec, SkScalar, SkScalar>
+SkStrikeSpec::MakeSDFT(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps, const SkMatrix& deviceMatrix,
+ const GrTextContext::Options& options) {
+ SkStrikeSpec storage;
+
+ SkPaint dfPaint = GrTextContext::InitDistanceFieldPaint(paint);
+ SkFont dfFont = GrTextContext::InitDistanceFieldFont(
+ font, deviceMatrix, options, &storage.fStrikeToSourceRatio);
+
+ // Fake-gamma and subpixel antialiasing are applied in the shader, so we ignore the
+ // passed-in scaler context flags. (It's only used when we fall-back to bitmap text).
+ SkScalerContextFlags flags = SkScalerContextFlags::kNone;
+
+ SkScalar minScale, maxScale;
+ std::tie(minScale, maxScale) = GrTextContext::InitDistanceFieldMinMaxScale(
+ font.getSize(), deviceMatrix, options);
+
+ storage.commonSetup(dfFont, dfPaint, surfaceProps, flags, SkMatrix::I());
+
+ return std::tie(storage, minScale, maxScale);
+}
+
+sk_sp<GrTextStrike> SkStrikeSpec::findOrCreateGrStrike(GrStrikeCache* cache) const {
+ return cache->getStrike(*fAutoDescriptor.getDesc());
+}
+#endif
+
+void SkStrikeSpec::commonSetup(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix) {
+ SkScalerContextEffects effects;
+
+ SkScalerContext::CreateDescriptorAndEffectsUsingPaint(
+ font, paint, surfaceProps, scalerContextFlags, deviceMatrix,
+ &fAutoDescriptor, &effects);
+
+ fMaskFilter = sk_ref_sp(effects.fMaskFilter);
+ fPathEffect = sk_ref_sp(effects.fPathEffect);
+ fTypeface = font.refTypefaceOrDefault();
+}
+
+SkScopedStrikeForGPU SkStrikeSpec::findOrCreateScopedStrike(SkStrikeForGPUCacheInterface* cache) const {
+ SkScalerContextEffects effects{fPathEffect.get(), fMaskFilter.get()};
+ return cache->findOrCreateScopedStrike(*fAutoDescriptor.getDesc(), effects, *fTypeface);
+}
+
+SkExclusiveStrikePtr SkStrikeSpec::findOrCreateExclusiveStrike(SkStrikeCache* cache) const {
+ SkScalerContextEffects effects{fPathEffect.get(), fMaskFilter.get()};
+ return cache->findOrCreateStrikeExclusive(*fAutoDescriptor.getDesc(), effects, *fTypeface);
+}
+
+SkBulkGlyphMetrics::SkBulkGlyphMetrics(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateExclusiveStrike()} { }
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetrics::glyphs(SkSpan<const SkGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->metrics(glyphIDs, fGlyphs.get());
+}
+
+SkBulkGlyphMetricsAndPaths::SkBulkGlyphMetricsAndPaths(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateExclusiveStrike()} { }
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetricsAndPaths::glyphs(SkSpan<const SkGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->preparePaths(glyphIDs, fGlyphs.get());
+}
+
+SkBulkGlyphMetricsAndImages::SkBulkGlyphMetricsAndImages(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateExclusiveStrike()} { }
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetricsAndImages::glyphs(SkSpan<const SkPackedGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->prepareImages(glyphIDs, fGlyphs.get());
+}
diff --git a/gfx/skia/skia/src/core/SkStrikeSpec.h b/gfx/skia/skia/src/core/SkStrikeSpec.h
new file mode 100644
index 0000000000..d34e711661
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeSpec.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrikeSpec_DEFINED
+#define SkStrikeSpec_DEFINED
+
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeForGPU.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/text/GrStrikeCache.h"
+#include "src/gpu/text/GrTextContext.h"
+#endif
+
+class SkFont;
+class SkPaint;
+class SkStrikeCache;
+class SkSurfaceProps;
+
+class SkStrikeSpec {
+public:
+ // Create a strike spec for mask style cache entries.
+ static SkStrikeSpec MakeMask(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix);
+
+ // Create a strike spec for path style cache entries.
+ static SkStrikeSpec MakePath(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags);
+
+ static SkStrikeSpec MakeSourceFallback(const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ SkScalar maxSourceGlyphDimension);
+
+ // Create a canonical strike spec for device-less measurements.
+ static SkStrikeSpec MakeCanonicalized(
+ const SkFont& font, const SkPaint* paint = nullptr);
+
+ // Create a strike spec without a device, and does not switch over to path for large sizes.
+ // This means that strikeToSourceRatio() is always 1.
+ static SkStrikeSpec MakeWithNoDevice(const SkFont& font, const SkPaint* paint = nullptr);
+
+ // Make a canonical strike spec for device-less measurements using default typeface and size.
+ static SkStrikeSpec MakeDefault();
+
+ // Make a strike spec for PDF Vector strikes
+ static SkStrikeSpec MakePDFVector(const SkTypeface& typeface, int* size);
+
+#if SK_SUPPORT_GPU
+ // Create a strike spec for scaled distance field text.
+ static std::tuple<SkStrikeSpec, SkScalar, SkScalar> MakeSDFT(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ const SkMatrix& deviceMatrix,
+ const GrTextContext::Options& options);
+
+ sk_sp<GrTextStrike> findOrCreateGrStrike(GrStrikeCache* cache) const;
+#endif
+
+ SkScopedStrikeForGPU findOrCreateScopedStrike(SkStrikeForGPUCacheInterface* cache) const;
+
+ SkExclusiveStrikePtr findOrCreateExclusiveStrike(
+ SkStrikeCache* cache = SkStrikeCache::GlobalStrikeCache()) const;
+
+ SkScalar strikeToSourceRatio() const { return fStrikeToSourceRatio; }
+ const SkDescriptor& descriptor() const { return *fAutoDescriptor.getDesc(); }
+ static bool ShouldDrawAsPath(const SkPaint& paint, const SkFont& font, const SkMatrix& matrix);
+
+private:
+ void commonSetup(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix);
+
+ SkAutoDescriptor fAutoDescriptor;
+ sk_sp<SkMaskFilter> fMaskFilter;
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fStrikeToSourceRatio{1.0f};
+};
+
+class SkBulkGlyphMetrics {
+public:
+ explicit SkBulkGlyphMetrics(const SkStrikeSpec& spec);
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkGlyphID> glyphIDs);
+
+private:
+ static constexpr int kTypicalGlyphCount = 20;
+ SkAutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ SkExclusiveStrikePtr fStrike;
+};
+
+class SkBulkGlyphMetricsAndPaths {
+public:
+ explicit SkBulkGlyphMetricsAndPaths(const SkStrikeSpec& spec);
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkGlyphID> glyphIDs);
+
+private:
+ static constexpr int kTypicalGlyphCount = 20;
+ SkAutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ SkExclusiveStrikePtr fStrike;
+};
+
+class SkBulkGlyphMetricsAndImages {
+public:
+ explicit SkBulkGlyphMetricsAndImages(const SkStrikeSpec& spec);
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkPackedGlyphID> glyphIDs);
+
+private:
+ static constexpr int kTypicalGlyphCount = 64;
+ SkAutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ SkExclusiveStrikePtr fStrike;
+};
+
+#endif // SkStrikeSpec_DEFINED
diff --git a/gfx/skia/skia/src/core/SkString.cpp b/gfx/skia/skia/src/core/SkString.cpp
new file mode 100644
index 0000000000..d42ee6f07e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkString.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkUtils.h"
+#include "src/utils/SkUTF.h"
+
+#include <cstdio>
+#include <new>
+#include <utility>
+#include <vector>
+
+// number of bytes (on the stack) to receive the printf result
+static const size_t kBufferSize = 1024;
+
+static const char* apply_format_string(const char* format, va_list args, char* stackBuffer,
+ size_t stackBufferSize, int* length, SkString* heapBuffer) {
+ va_list argsCopy;
+ va_copy(argsCopy, args);
+ *length = std::vsnprintf(stackBuffer, stackBufferSize, format, args);
+ if (*length < 0) {
+ SkDebugf("SkString: vsnprintf reported error.");
+ va_end(argsCopy);
+ *length = 0;
+ return stackBuffer;
+ }
+ if (*length < SkToInt(stackBufferSize)) {
+ va_end(argsCopy);
+ return stackBuffer;
+ }
+ heapBuffer->resize(*length);
+ SkDEBUGCODE(int check =)
+ std::vsnprintf(heapBuffer->writable_str(), *length + 1, format, argsCopy);
+ SkASSERT(check == *length);
+ va_end(argsCopy);
+ return heapBuffer->c_str();
+}
+
+#define ARGS_TO_BUFFER(format, buffer, size, written, result) \
+ SkString overflow; \
+ do { \
+ va_list args; \
+ va_start(args, format); \
+ result = apply_format_string(format, args, buffer, size, &written, &overflow); \
+ va_end(args); \
+ } while (0)
+
+#define V_SKSTRING_PRINTF(output, format) \
+ do { \
+ char buffer[kBufferSize]; \
+ va_list args; \
+ va_start(args, format); \
+ int length; \
+ auto result = apply_format_string(format, args, buffer, kBufferSize, &length, &output); \
+ SkASSERT(result == output.c_str() || result == buffer); \
+ if (result == buffer) { \
+ output.set(buffer, length); \
+ } \
+ } while (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]) {
+ SkASSERT(string);
+ SkASSERT(suffixStr);
+ size_t strLen = strlen(string);
+ size_t suffixLen = strlen(suffixStr);
+ return strLen >= suffixLen &&
+ !strncmp(string + strLen - suffixLen, suffixStr, suffixLen);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixChar) {
+ SkASSERT(string);
+ size_t strLen = strlen(string);
+ if (0 == strLen) {
+ return false;
+ } else {
+ return (suffixChar == string[strLen-1]);
+ }
+}
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]) {
+ int index = 0;
+ do {
+ const char* limit = strchr(prefixes, '\0');
+ if (!strncmp(string, prefixes, limit - prefixes)) {
+ return index;
+ }
+ prefixes = limit + 1;
+ index++;
+ } while (prefixes[0]);
+ return -1;
+}
+
+char* SkStrAppendU32(char string[], uint32_t dec) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[SkStrAppendU32_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + dec % 10);
+ dec /= 10;
+ } while (dec != 0);
+
+ SkASSERT(p >= buffer);
+ char* stop = buffer + sizeof(buffer);
+ while (p < stop) {
+ *string++ = *p++;
+ }
+ SkASSERT(string - start <= SkStrAppendU32_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS32(char string[], int32_t dec) {
+ uint32_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU32(string, udec);
+}
+
+char* SkStrAppendU64(char string[], uint64_t dec, int minDigits) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[SkStrAppendU64_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + (int32_t) (dec % 10));
+ dec /= 10;
+ minDigits--;
+ } while (dec != 0);
+
+ while (minDigits > 0) {
+ *--p = '0';
+ minDigits--;
+ }
+
+ SkASSERT(p >= buffer);
+ size_t cp_len = buffer + sizeof(buffer) - p;
+ memcpy(string, p, cp_len);
+ string += cp_len;
+
+ SkASSERT(string - start <= SkStrAppendU64_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS64(char string[], int64_t dec, int minDigits) {
+ uint64_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU64(string, udec, minDigits);
+}
+
+char* SkStrAppendFloat(char string[], float value) {
+ // since floats have at most 8 significant digits, we limit our %g to that.
+ static const char gFormat[] = "%.8g";
+ // make it 1 larger for the terminating 0
+ char buffer[SkStrAppendScalar_MaxSize + 1];
+ int len = snprintf(buffer, sizeof(buffer), gFormat, value);
+ memcpy(string, buffer, len);
+ SkASSERT(len <= SkStrAppendScalar_MaxSize);
+ return string + len;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkString::Rec SkString::gEmptyRec(0, 0);
+
+#define SizeOfRec() (gEmptyRec.data() - (const char*)&gEmptyRec)
+
+static uint32_t trim_size_t_to_u32(size_t value) {
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (value > UINT32_MAX) {
+ value = UINT32_MAX;
+ }
+ }
+ return (uint32_t)value;
+}
+
+static size_t check_add32(size_t base, size_t extra) {
+ SkASSERT(base <= UINT32_MAX);
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (base + extra > UINT32_MAX) {
+ extra = UINT32_MAX - base;
+ }
+ }
+ return extra;
+}
+
+sk_sp<SkString::Rec> SkString::Rec::Make(const char text[], size_t len) {
+ if (0 == len) {
+ return sk_sp<SkString::Rec>(const_cast<Rec*>(&gEmptyRec));
+ }
+
+ SkSafeMath safe;
+ // We store a 32bit version of the length
+ uint32_t stringLen = safe.castTo<uint32_t>(len);
+ // Add SizeOfRec() for our overhead and 1 for null-termination
+ size_t allocationSize = safe.add(len, SizeOfRec() + sizeof(char));
+ // Align up to a multiple of 4
+ allocationSize = safe.alignUp(allocationSize, 4);
+
+ SkASSERT_RELEASE(safe.ok());
+
+ void* storage = ::operator new (allocationSize);
+ sk_sp<Rec> rec(new (storage) Rec(stringLen, 1));
+ if (text) {
+ memcpy(rec->data(), text, len);
+ }
+ rec->data()[len] = 0;
+ return rec;
+}
+
+void SkString::Rec::ref() const {
+ if (this == &SkString::gEmptyRec) {
+ return;
+ }
+ SkAssertResult(this->fRefCnt.fetch_add(+1, std::memory_order_relaxed));
+}
+
+void SkString::Rec::unref() const {
+ if (this == &SkString::gEmptyRec) {
+ return;
+ }
+ int32_t oldRefCnt = this->fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(oldRefCnt);
+ if (1 == oldRefCnt) {
+ delete this;
+ }
+}
+
+bool SkString::Rec::unique() const {
+ return fRefCnt.load(std::memory_order_acquire) == 1;
+}
+
+#ifdef SK_DEBUG
+const SkString& SkString::validate() const {
+ // make sure know one has written over our global
+ SkASSERT(0 == gEmptyRec.fLength);
+ SkASSERT(0 == gEmptyRec.fRefCnt.load(std::memory_order_relaxed));
+ SkASSERT(0 == gEmptyRec.data()[0]);
+
+ if (fRec.get() != &gEmptyRec) {
+ SkASSERT(fRec->fLength > 0);
+ SkASSERT(fRec->fRefCnt.load(std::memory_order_relaxed) > 0);
+ SkASSERT(0 == fRec->data()[fRec->fLength]);
+ }
+ return *this;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString::SkString() : fRec(const_cast<Rec*>(&gEmptyRec)) {
+}
+
+SkString::SkString(size_t len) {
+ fRec = Rec::Make(nullptr, len);
+}
+
+SkString::SkString(const char text[]) {
+ size_t len = text ? strlen(text) : 0;
+
+ fRec = Rec::Make(text, len);
+}
+
+SkString::SkString(const char text[], size_t len) {
+ fRec = Rec::Make(text, len);
+}
+
+SkString::SkString(const SkString& src) : fRec(src.validate().fRec) {}
+
+SkString::SkString(SkString&& src) : fRec(std::move(src.validate().fRec)) {
+ src.fRec.reset(const_cast<Rec*>(&gEmptyRec));
+}
+
+SkString::~SkString() {
+ this->validate();
+}
+
+bool SkString::equals(const SkString& src) const {
+ return fRec == src.fRec || this->equals(src.c_str(), src.size());
+}
+
+bool SkString::equals(const char text[]) const {
+ return this->equals(text, text ? strlen(text) : 0);
+}
+
+bool SkString::equals(const char text[], size_t len) const {
+ SkASSERT(len == 0 || text != nullptr);
+
+ return fRec->fLength == len && !memcmp(fRec->data(), text, len);
+}
+
+SkString& SkString::operator=(const SkString& src) {
+ this->validate();
+ fRec = src.fRec; // sk_sp<Rec>::operator=(const sk_sp<Ref>&) checks for self-assignment.
+ return *this;
+}
+
+SkString& SkString::operator=(SkString&& src) {
+ this->validate();
+
+ if (fRec != src.fRec) {
+ this->swap(src);
+ }
+ return *this;
+}
+
+SkString& SkString::operator=(const char text[]) {
+ this->validate();
+ return *this = SkString(text);
+}
+
+void SkString::reset() {
+ this->validate();
+ fRec.reset(const_cast<Rec*>(&gEmptyRec));
+}
+
+char* SkString::writable_str() {
+ this->validate();
+
+ if (fRec->fLength) {
+ if (!fRec->unique()) {
+ fRec = Rec::Make(fRec->data(), fRec->fLength);
+ }
+ }
+ return fRec->data();
+}
+
+void SkString::set(const char text[]) {
+ this->set(text, text ? strlen(text) : 0);
+}
+
+void SkString::set(const char text[], size_t len) {
+ len = trim_size_t_to_u32(len);
+ bool unique = fRec->unique();
+ if (0 == len) {
+ this->reset();
+ } else if (unique && len <= fRec->fLength) {
+ // should we resize if len <<<< fLength, to save RAM? (e.g. len < (fLength>>1))?
+ // just use less of the buffer without allocating a smaller one
+ char* p = this->writable_str();
+ if (text) {
+ memcpy(p, text, len);
+ }
+ p[len] = 0;
+ fRec->fLength = SkToU32(len);
+ } else if (unique && (fRec->fLength >> 2) == (len >> 2)) {
+ // we have spare room in the current allocation, so don't alloc a larger one
+ char* p = this->writable_str();
+ if (text) {
+ memcpy(p, text, len);
+ }
+ p[len] = 0;
+ fRec->fLength = SkToU32(len);
+ } else {
+ SkString tmp(text, len);
+ this->swap(tmp);
+ }
+}
+
+void SkString::insert(size_t offset, const char text[]) {
+ this->insert(offset, text, text ? strlen(text) : 0);
+}
+
+void SkString::insert(size_t offset, const char text[], size_t len) {
+ if (len) {
+ size_t length = fRec->fLength;
+ if (offset > length) {
+ offset = length;
+ }
+
+ // Check if length + len exceeds 32bits, we trim len
+ len = check_add32(length, len);
+ if (0 == len) {
+ return;
+ }
+
+ /* If we're the only owner, and we have room in our allocation for the insert,
+ do it in place, rather than allocating a new buffer.
+
+ To know we have room, compare the allocated sizes
+ beforeAlloc = SkAlign4(length + 1)
+ afterAlloc = SkAligh4(length + 1 + len)
+ but SkAlign4(x) is (x + 3) >> 2 << 2
+ which is equivalent for testing to (length + 1 + 3) >> 2 == (length + 1 + 3 + len) >> 2
+ and we can then eliminate the +1+3 since that doesn't affec the answer
+ */
+ if (fRec->unique() && (length >> 2) == ((length + len) >> 2)) {
+ char* dst = this->writable_str();
+
+ if (offset < length) {
+ memmove(dst + offset + len, dst + offset, length - offset);
+ }
+ memcpy(dst + offset, text, len);
+
+ dst[length + len] = 0;
+ fRec->fLength = SkToU32(length + len);
+ } else {
+ /* Seems we should use realloc here, since that is safe if it fails
+ (we have the original data), and might be faster than alloc/copy/free.
+ */
+ SkString tmp(fRec->fLength + len);
+ char* dst = tmp.writable_str();
+
+ if (offset > 0) {
+ memcpy(dst, fRec->data(), offset);
+ }
+ memcpy(dst + offset, text, len);
+ if (offset < fRec->fLength) {
+ memcpy(dst + offset + len, fRec->data() + offset,
+ fRec->fLength - offset);
+ }
+
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::insertUnichar(size_t offset, SkUnichar uni) {
+ char buffer[SkUTF::kMaxBytesInUTF8Sequence];
+ size_t len = SkUTF::ToUTF8(uni, buffer);
+
+ if (len) {
+ this->insert(offset, buffer, len);
+ }
+}
+
+void SkString::insertS32(size_t offset, int32_t dec) {
+ char buffer[SkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertS64(size_t offset, int64_t dec, int minDigits) {
+ char buffer[SkStrAppendS64_MaxSize];
+ char* stop = SkStrAppendS64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU32(size_t offset, uint32_t dec) {
+ char buffer[SkStrAppendU32_MaxSize];
+ char* stop = SkStrAppendU32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU64(size_t offset, uint64_t dec, int minDigits) {
+ char buffer[SkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertHex(size_t offset, uint32_t hex, int minDigits) {
+ minDigits = SkTPin(minDigits, 0, 8);
+
+ char buffer[8];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkHexadecimalDigits::gUpper[hex & 0xF];
+ hex >>= 4;
+ minDigits -= 1;
+ } while (hex != 0);
+
+ while (--minDigits >= 0) {
+ *--p = '0';
+ }
+
+ SkASSERT(p >= buffer);
+ this->insert(offset, p, buffer + sizeof(buffer) - p);
+}
+
+void SkString::insertScalar(size_t offset, SkScalar value) {
+ char buffer[SkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::printf(const char format[], ...) {
+ V_SKSTRING_PRINTF((*this), format);
+}
+
+void SkString::appendf(const char format[], ...) {
+ char buffer[kBufferSize];
+ int length;
+ const char* result;
+ ARGS_TO_BUFFER(format, buffer, kBufferSize, length, result);
+
+ this->append(result, length);
+}
+
+void SkString::appendVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ int length = vsnprintf(buffer, kBufferSize, format, args);
+ SkASSERT(length >= 0 && length < SkToInt(kBufferSize));
+
+ this->append(buffer, length);
+}
+
+void SkString::prependf(const char format[], ...) {
+ char buffer[kBufferSize];
+ int length;
+ const char* result;
+ ARGS_TO_BUFFER(format, buffer, kBufferSize, length, result);
+
+ this->prepend(result, length);
+}
+
+void SkString::prependVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ int length = vsnprintf(buffer, kBufferSize, format, args);
+ SkASSERT(length >= 0 && length < SkToInt(kBufferSize));
+
+ this->prepend(buffer, length);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkString::remove(size_t offset, size_t length) {
+ size_t size = this->size();
+
+ if (offset < size) {
+ if (length > size - offset) {
+ length = size - offset;
+ }
+ SkASSERT(length <= size);
+ SkASSERT(offset <= size - length);
+ if (length > 0) {
+ SkString tmp(size - length);
+ char* dst = tmp.writable_str();
+ const char* src = this->c_str();
+
+ if (offset) {
+ memcpy(dst, src, offset);
+ }
+ size_t tail = size - (offset + length);
+ if (tail) {
+ memcpy(dst + offset, src + (offset + length), tail);
+ }
+ SkASSERT(dst[tmp.size()] == 0);
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::swap(SkString& other) {
+ this->validate();
+ other.validate();
+
+ using std::swap;
+ swap(fRec, other.fRec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString SkStringPrintf(const char* format, ...) {
+ SkString formattedOutput;
+ V_SKSTRING_PRINTF(formattedOutput, format);
+ return formattedOutput;
+}
+
+void SkStrSplit(const char* str, const char* delimiters, SkStrSplitMode splitMode,
+ SkTArray<SkString>* out) {
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ }
+ if (!*str) {
+ return;
+ }
+
+ while (true) {
+ // Find a token.
+ const size_t len = strcspn(str, delimiters);
+ if (splitMode == kStrict_SkStrSplitMode || len > 0) {
+ out->push_back().set(str, len);
+ str += len;
+ }
+
+ if (!*str) {
+ return;
+ }
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ } else {
+ // Skip one delimiter.
+ str += 1;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStringUtils.cpp b/gfx/skia/skia/src/core/SkStringUtils.cpp
new file mode 100644
index 0000000000..efb9d13096
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "src/core/SkStringUtils.h"
+#include "src/utils/SkUTF.h"
+
+void SkAppendScalar(SkString* str, SkScalar value, SkScalarAsStringType asType) {
+ switch (asType) {
+ case kHex_SkScalarAsStringType:
+ str->appendf("SkBits2Float(0x%08x)", SkFloat2Bits(value));
+ break;
+ case kDec_SkScalarAsStringType: {
+ SkString tmp;
+ tmp.printf("%g", value);
+ if (tmp.contains('.')) {
+ tmp.appendUnichar('f');
+ }
+ str->append(tmp);
+ break;
+ }
+ }
+}
+
+SkString SkTabString(const SkString& string, int tabCnt) {
+ if (tabCnt <= 0) {
+ return string;
+ }
+ SkString tabs;
+ for (int i = 0; i < tabCnt; ++i) {
+ tabs.append("\t");
+ }
+ SkString result;
+ static const char newline[] = "\n";
+ const char* input = string.c_str();
+ int nextNL = SkStrFind(input, newline);
+ while (nextNL >= 0) {
+ if (nextNL > 0) {
+ result.append(tabs);
+ }
+ result.append(input, nextNL + 1);
+ input += nextNL + 1;
+ nextNL = SkStrFind(input, newline);
+ }
+ if (*input != '\0') {
+ result.append(tabs);
+ result.append(input);
+ }
+ return result;
+}
+
+SkString SkStringFromUTF16(const uint16_t* src, size_t count) {
+ SkString ret;
+ const uint16_t* stop = src + count;
+ if (count > 0) {
+ SkASSERT(src);
+ size_t n = 0;
+ const uint16_t* end = src + count;
+ for (const uint16_t* ptr = src; ptr < end;) {
+ const uint16_t* last = ptr;
+ SkUnichar u = SkUTF::NextUTF16(&ptr, stop);
+ size_t s = SkUTF::ToUTF8(u);
+ if (n > UINT32_MAX - s) {
+ end = last; // truncate input string
+ break;
+ }
+ n += s;
+ }
+ ret = SkString(n);
+ char* out = ret.writable_str();
+ for (const uint16_t* ptr = src; ptr < end;) {
+ out += SkUTF::ToUTF8(SkUTF::NextUTF16(&ptr, stop), out);
+ }
+ SkASSERT(out == ret.writable_str() + n);
+ }
+ return ret;
+}
diff --git a/gfx/skia/skia/src/core/SkStringUtils.h b/gfx/skia/skia/src/core/SkStringUtils.h
new file mode 100644
index 0000000000..184ca6e2d5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStringUtils_DEFINED
+#define SkStringUtils_DEFINED
+
+#include "include/core/SkScalar.h"
+
+class SkString;
+
+enum SkScalarAsStringType {
+ kDec_SkScalarAsStringType,
+ kHex_SkScalarAsStringType,
+};
+
+void SkAppendScalar(SkString*, SkScalar, SkScalarAsStringType);
+
+static inline void SkAppendScalarDec(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kDec_SkScalarAsStringType);
+}
+
+static inline void SkAppendScalarHex(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kHex_SkScalarAsStringType);
+}
+
+/** Indents every non-empty line of the string by tabCnt tabs */
+SkString SkTabString(const SkString& string, int tabCnt);
+
+SkString SkStringFromUTF16(const uint16_t* src, size_t count);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStroke.cpp b/gfx/skia/skia/src/core/SkStroke.cpp
new file mode 100644
index 0000000000..ecfefdf686
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.cpp
@@ -0,0 +1,1579 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrokerPriv.h"
+
+#include "include/private/SkMacros.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+
+#include <utility>
+
+enum {
+ kTangent_RecursiveLimit,
+ kCubic_RecursiveLimit,
+ kConic_RecursiveLimit,
+ kQuad_RecursiveLimit
+};
+
+// quads with extreme widths (e.g. (0,1) (1,6) (0,3) width=5e7) recurse to point of failure
+// largest seen for normal cubics : 5, 26
+// largest seen for normal quads : 11
+static const int kRecursiveLimits[] = { 5*3, 26*3, 11*3, 11*3 }; // 3x limits seen in practice
+
+static_assert(0 == kTangent_RecursiveLimit, "cubic_stroke_relies_on_tangent_equalling_zero");
+static_assert(1 == kCubic_RecursiveLimit, "cubic_stroke_relies_on_cubic_equalling_one");
+static_assert(SK_ARRAY_COUNT(kRecursiveLimits) == kQuad_RecursiveLimit + 1,
+ "recursive_limits_mismatch");
+
+#if defined SK_DEBUG && QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ int gMaxRecursion[SK_ARRAY_COUNT(kRecursiveLimits)] = { 0 };
+#endif
+#ifndef DEBUG_QUAD_STROKER
+ #define DEBUG_QUAD_STROKER 0
+#endif
+
+#if DEBUG_QUAD_STROKER
+ /* Enable to show the decisions made in subdividing the curve -- helpful when the resulting
+ stroke has more than the optimal number of quadratics and lines */
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ SkDebugf("[%d] %s " format "\n", depth, __FUNCTION__, __VA_ARGS__), \
+ SkDebugf(" " #resultType " t=(%g,%g)\n", quadPts->fStartT, quadPts->fEndT), \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...) , __VA_ARGS__
+#else
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...)
+#endif
+
+static inline bool degenerate_vector(const SkVector& v) {
+ return !SkPointPriv::CanNormalize(v.fX, v.fY);
+}
+
+static bool set_normal_unitnormal(const SkPoint& before, const SkPoint& after, SkScalar scale,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize((after.fX - before.fX) * scale,
+ (after.fY - before.fY) * scale)) {
+ return false;
+ }
+ SkPointPriv::RotateCCW(unitNormal);
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+static bool set_normal_unitnormal(const SkVector& vec,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize(vec.fX, vec.fY)) {
+ return false;
+ }
+ SkPointPriv::RotateCCW(unitNormal);
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkQuadConstruct { // The state of the quad stroke under construction.
+ SkPoint fQuad[3]; // the stroked quad parallel to the original curve
+ SkPoint fTangentStart; // a point tangent to fQuad[0]
+ SkPoint fTangentEnd; // a point tangent to fQuad[2]
+ SkScalar fStartT; // a segment of the original curve
+ SkScalar fMidT; // "
+ SkScalar fEndT; // "
+ bool fStartSet; // state to share common points across structs
+ bool fEndSet; // "
+ bool fOppositeTangents; // set if coincident tangents have opposite directions
+
+ // return false if start and end are too close to have a unique middle
+ bool init(SkScalar start, SkScalar end) {
+ fStartT = start;
+ fMidT = (start + end) * SK_ScalarHalf;
+ fEndT = end;
+ fStartSet = fEndSet = false;
+ return fStartT < fMidT && fMidT < fEndT;
+ }
+
+ bool initWithStart(SkQuadConstruct* parent) {
+ if (!init(parent->fStartT, parent->fMidT)) {
+ return false;
+ }
+ fQuad[0] = parent->fQuad[0];
+ fTangentStart = parent->fTangentStart;
+ fStartSet = true;
+ return true;
+ }
+
+ bool initWithEnd(SkQuadConstruct* parent) {
+ if (!init(parent->fMidT, parent->fEndT)) {
+ return false;
+ }
+ fQuad[2] = parent->fQuad[2];
+ fTangentEnd = parent->fTangentEnd;
+ fEndSet = true;
+ return true;
+ }
+};
+
+class SkPathStroker {
+public:
+ SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit, SkPaint::Cap,
+ SkPaint::Join, SkScalar resScale,
+ bool canIgnoreCenter);
+
+ bool hasOnlyMoveTo() const { return 0 == fSegmentCount; }
+ SkPoint moveToPt() const { return fFirstPt; }
+
+ void moveTo(const SkPoint&);
+ void lineTo(const SkPoint&, const SkPath::Iter* iter = nullptr);
+ void quadTo(const SkPoint&, const SkPoint&);
+ void conicTo(const SkPoint&, const SkPoint&, SkScalar weight);
+ void cubicTo(const SkPoint&, const SkPoint&, const SkPoint&);
+ void close(bool isLine) { this->finishContour(true, isLine); }
+
+ void done(SkPath* dst, bool isLine) {
+ this->finishContour(false, isLine);
+ dst->swap(fOuter);
+ }
+
+ SkScalar getResScale() const { return fResScale; }
+
+ bool isCurrentContourEmpty() const {
+ return fInner.isZeroLengthSincePoint(0) &&
+ fOuter.isZeroLengthSincePoint(fFirstOuterPtIndexInContour);
+ }
+
+private:
+ SkScalar fRadius;
+ SkScalar fInvMiterLimit;
+ SkScalar fResScale;
+ SkScalar fInvResScale;
+ SkScalar fInvResScaleSquared;
+
+ SkVector fFirstNormal, fPrevNormal, fFirstUnitNormal, fPrevUnitNormal;
+ SkPoint fFirstPt, fPrevPt; // on original path
+ SkPoint fFirstOuterPt;
+ int fFirstOuterPtIndexInContour;
+ int fSegmentCount;
+ bool fPrevIsLine;
+ bool fCanIgnoreCenter;
+
+ SkStrokerPriv::CapProc fCapper;
+ SkStrokerPriv::JoinProc fJoiner;
+
+ SkPath fInner, fOuter, fCusper; // outer is our working answer, inner is temp
+
+ enum StrokeType {
+ kOuter_StrokeType = 1, // use sign-opposite values later to flip perpendicular axis
+ kInner_StrokeType = -1
+ } fStrokeType;
+
+ enum ResultType {
+ kSplit_ResultType, // the caller should split the quad stroke in two
+ kDegenerate_ResultType, // the caller should add a line
+ kQuad_ResultType, // the caller should (continue to try to) add a quad stroke
+ };
+
+ enum ReductionType {
+ kPoint_ReductionType, // all curve points are practically identical
+ kLine_ReductionType, // the control point is on the line between the ends
+ kQuad_ReductionType, // the control point is outside the line between the ends
+ kDegenerate_ReductionType, // the control point is on the line but outside the ends
+ kDegenerate2_ReductionType, // two control points are on the line but outside ends (cubic)
+ kDegenerate3_ReductionType, // three areas of max curvature found (for cubic)
+ };
+
+ enum IntersectRayType {
+ kCtrlPt_RayType,
+ kResultType_RayType,
+ };
+
+ int fRecursionDepth; // track stack depth to abort if numerics run amok
+ bool fFoundTangents; // do less work until tangents meet (cubic)
+ bool fJoinCompleted; // previous join was not degenerate
+
+ void addDegenerateLine(const SkQuadConstruct* );
+ static ReductionType CheckConicLinear(const SkConic& , SkPoint* reduction);
+ static ReductionType CheckCubicLinear(const SkPoint cubic[4], SkPoint reduction[3],
+ const SkPoint** tanPtPtr);
+ static ReductionType CheckQuadLinear(const SkPoint quad[3], SkPoint* reduction);
+ ResultType compareQuadConic(const SkConic& , SkQuadConstruct* ) const;
+ ResultType compareQuadCubic(const SkPoint cubic[4], SkQuadConstruct* );
+ ResultType compareQuadQuad(const SkPoint quad[3], SkQuadConstruct* );
+ void conicPerpRay(const SkConic& , SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void conicQuadEnds(const SkConic& , SkQuadConstruct* ) const;
+ bool conicStroke(const SkConic& , SkQuadConstruct* );
+ bool cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* ) const;
+ void cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* );
+ void cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* , SkPoint* mid) const;
+ bool cubicStroke(const SkPoint cubic[4], SkQuadConstruct* );
+ void init(StrokeType strokeType, SkQuadConstruct* , SkScalar tStart, SkScalar tEnd);
+ ResultType intersectRay(SkQuadConstruct* , IntersectRayType STROKER_DEBUG_PARAMS(int) ) const;
+ bool ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const;
+ void quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ bool quadStroke(const SkPoint quad[3], SkQuadConstruct* );
+ void setConicEndNormal(const SkConic& ,
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setCubicEndNormal(const SkPoint cubic[4],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalCD, SkVector* unitNormalCD);
+ void setQuadEndNormal(const SkPoint quad[3],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt, SkPoint* tangent) const;
+ static bool SlightAngle(SkQuadConstruct* );
+ ResultType strokeCloseEnough(const SkPoint stroke[3], const SkPoint ray[2],
+ SkQuadConstruct* STROKER_DEBUG_PARAMS(int depth) ) const;
+ ResultType tangentsMeet(const SkPoint cubic[4], SkQuadConstruct* );
+
+ void finishContour(bool close, bool isLine);
+ bool preJoinTo(const SkPoint&, SkVector* normal, SkVector* unitNormal,
+ bool isLine);
+ void postJoinTo(const SkPoint&, const SkVector& normal,
+ const SkVector& unitNormal);
+
+ void line_to(const SkPoint& currPt, const SkVector& normal);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPathStroker::preJoinTo(const SkPoint& currPt, SkVector* normal,
+ SkVector* unitNormal, bool currIsLine) {
+ SkASSERT(fSegmentCount >= 0);
+
+ SkScalar prevX = fPrevPt.fX;
+ SkScalar prevY = fPrevPt.fY;
+
+ if (!set_normal_unitnormal(fPrevPt, currPt, fResScale, fRadius, normal, unitNormal)) {
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper) {
+ return false;
+ }
+ /* Square caps and round caps draw even if the segment length is zero.
+ Since the zero length segment has no direction, set the orientation
+ to upright as the default orientation */
+ normal->set(fRadius, 0);
+ unitNormal->set(1, 0);
+ }
+
+ if (fSegmentCount == 0) {
+ fFirstNormal = *normal;
+ fFirstUnitNormal = *unitNormal;
+ fFirstOuterPt.set(prevX + normal->fX, prevY + normal->fY);
+
+ fOuter.moveTo(fFirstOuterPt.fX, fFirstOuterPt.fY);
+ fInner.moveTo(prevX - normal->fX, prevY - normal->fY);
+ } else { // we have a previous segment
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt, *unitNormal,
+ fRadius, fInvMiterLimit, fPrevIsLine, currIsLine);
+ }
+ fPrevIsLine = currIsLine;
+ return true;
+}
+
+void SkPathStroker::postJoinTo(const SkPoint& currPt, const SkVector& normal,
+ const SkVector& unitNormal) {
+ fJoinCompleted = true;
+ fPrevPt = currPt;
+ fPrevUnitNormal = unitNormal;
+ fPrevNormal = normal;
+ fSegmentCount += 1;
+}
+
+void SkPathStroker::finishContour(bool close, bool currIsLine) {
+ if (fSegmentCount > 0) {
+ SkPoint pt;
+
+ if (close) {
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt,
+ fFirstUnitNormal, fRadius, fInvMiterLimit,
+ fPrevIsLine, currIsLine);
+ fOuter.close();
+
+ if (fCanIgnoreCenter) {
+ // If we can ignore the center just make sure the larger of the two paths
+ // is preserved and don't add the smaller one.
+ if (fInner.getBounds().contains(fOuter.getBounds())) {
+ fInner.swap(fOuter);
+ }
+ } else {
+ // now add fInner as its own contour
+ fInner.getLastPt(&pt);
+ fOuter.moveTo(pt.fX, pt.fY);
+ fOuter.reversePathTo(fInner);
+ fOuter.close();
+ }
+ } else { // add caps to start and end
+ // cap the end
+ fInner.getLastPt(&pt);
+ fCapper(&fOuter, fPrevPt, fPrevNormal, pt,
+ currIsLine ? &fInner : nullptr);
+ fOuter.reversePathTo(fInner);
+ // cap the start
+ fCapper(&fOuter, fFirstPt, -fFirstNormal, fFirstOuterPt,
+ fPrevIsLine ? &fInner : nullptr);
+ fOuter.close();
+ }
+ if (!fCusper.isEmpty()) {
+ fOuter.addPath(fCusper);
+ fCusper.rewind();
+ }
+ }
+ // since we may re-use fInner, we rewind instead of reset, to save on
+ // reallocating its internal storage.
+ fInner.rewind();
+ fSegmentCount = -1;
+ fFirstOuterPtIndexInContour = fOuter.countPoints();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathStroker::SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit,
+ SkPaint::Cap cap, SkPaint::Join join, SkScalar resScale,
+ bool canIgnoreCenter)
+ : fRadius(radius)
+ , fResScale(resScale)
+ , fCanIgnoreCenter(canIgnoreCenter) {
+
+ /* This is only used when join is miter_join, but we initialize it here
+ so that it is always defined, to fis valgrind warnings.
+ */
+ fInvMiterLimit = 0;
+
+ if (join == SkPaint::kMiter_Join) {
+ if (miterLimit <= SK_Scalar1) {
+ join = SkPaint::kBevel_Join;
+ } else {
+ fInvMiterLimit = SkScalarInvert(miterLimit);
+ }
+ }
+ fCapper = SkStrokerPriv::CapFactory(cap);
+ fJoiner = SkStrokerPriv::JoinFactory(join);
+ fSegmentCount = -1;
+ fFirstOuterPtIndexInContour = 0;
+ fPrevIsLine = false;
+
+ // Need some estimate of how large our final result (fOuter)
+ // and our per-contour temp (fInner) will be, so we don't spend
+ // extra time repeatedly growing these arrays.
+ //
+ // 3x for result == inner + outer + join (swag)
+ // 1x for inner == 'wag' (worst contour length would be better guess)
+ fOuter.incReserve(src.countPoints() * 3);
+ fOuter.setIsVolatile(true);
+ fInner.incReserve(src.countPoints());
+ fInner.setIsVolatile(true);
+ // TODO : write a common error function used by stroking and filling
+ // The '4' below matches the fill scan converter's error term
+ fInvResScale = SkScalarInvert(resScale * 4);
+ fInvResScaleSquared = fInvResScale * fInvResScale;
+ fRecursionDepth = 0;
+}
+
+void SkPathStroker::moveTo(const SkPoint& pt) {
+ if (fSegmentCount > 0) {
+ this->finishContour(false, false);
+ }
+ fSegmentCount = 0;
+ fFirstPt = fPrevPt = pt;
+ fJoinCompleted = false;
+}
+
+void SkPathStroker::line_to(const SkPoint& currPt, const SkVector& normal) {
+ fOuter.lineTo(currPt.fX + normal.fX, currPt.fY + normal.fY);
+ fInner.lineTo(currPt.fX - normal.fX, currPt.fY - normal.fY);
+}
+
+static bool has_valid_tangent(const SkPath::Iter* iter) {
+ SkPath::Iter copy = *iter;
+ SkPath::Verb verb;
+ SkPoint pts[4];
+ while ((verb = copy.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ return false;
+ case SkPath::kLine_Verb:
+ if (pts[0] == pts[1]) {
+ continue;
+ }
+ return true;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2]) {
+ continue;
+ }
+ return true;
+ case SkPath::kCubic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2] && pts[0] == pts[3]) {
+ continue;
+ }
+ return true;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ return false;
+ }
+ }
+ return false;
+}
+
+void SkPathStroker::lineTo(const SkPoint& currPt, const SkPath::Iter* iter) {
+ bool teenyLine = SkPointPriv::EqualsWithinTolerance(fPrevPt, currPt, SK_ScalarNearlyZero * fInvResScale);
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper && teenyLine) {
+ return;
+ }
+ if (teenyLine && (fJoinCompleted || (iter && has_valid_tangent(iter)))) {
+ return;
+ }
+ SkVector normal, unitNormal;
+
+ if (!this->preJoinTo(currPt, &normal, &unitNormal, true)) {
+ return;
+ }
+ this->line_to(currPt, normal);
+ this->postJoinTo(currPt, normal, unitNormal);
+}
+
+void SkPathStroker::setQuadEndNormal(const SkPoint quad[3], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ if (!set_normal_unitnormal(quad[1], quad[2], fResScale, fRadius, normalBC, unitNormalBC)) {
+ *normalBC = normalAB;
+ *unitNormalBC = unitNormalAB;
+ }
+}
+
+void SkPathStroker::setConicEndNormal(const SkConic& conic, const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ setQuadEndNormal(conic.fPts, normalAB, unitNormalAB, normalBC, unitNormalBC);
+}
+
+void SkPathStroker::setCubicEndNormal(const SkPoint cubic[4], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalCD, SkVector* unitNormalCD) {
+ SkVector ab = cubic[1] - cubic[0];
+ SkVector cd = cubic[3] - cubic[2];
+
+ bool degenerateAB = degenerate_vector(ab);
+ bool degenerateCD = degenerate_vector(cd);
+
+ if (degenerateAB && degenerateCD) {
+ goto DEGENERATE_NORMAL;
+ }
+
+ if (degenerateAB) {
+ ab = cubic[2] - cubic[0];
+ degenerateAB = degenerate_vector(ab);
+ }
+ if (degenerateCD) {
+ cd = cubic[3] - cubic[1];
+ degenerateCD = degenerate_vector(cd);
+ }
+ if (degenerateAB || degenerateCD) {
+DEGENERATE_NORMAL:
+ *normalCD = normalAB;
+ *unitNormalCD = unitNormalAB;
+ return;
+ }
+ SkAssertResult(set_normal_unitnormal(cd, fRadius, normalCD, unitNormalCD));
+}
+
+void SkPathStroker::init(StrokeType strokeType, SkQuadConstruct* quadPts, SkScalar tStart,
+ SkScalar tEnd) {
+ fStrokeType = strokeType;
+ fFoundTangents = false;
+ quadPts->init(tStart, tEnd);
+}
+
+// returns the distance squared from the point to the line
+static SkScalar pt_to_line(const SkPoint& pt, const SkPoint& lineStart, const SkPoint& lineEnd) {
+ SkVector dxy = lineEnd - lineStart;
+ SkVector ab0 = pt - lineStart;
+ SkScalar numer = dxy.dot(ab0);
+ SkScalar denom = dxy.dot(dxy);
+ SkScalar t = sk_ieee_float_divide(numer, denom);
+ if (t >= 0 && t <= 1) {
+ SkPoint hit;
+ hit.fX = lineStart.fX * (1 - t) + lineEnd.fX * t;
+ hit.fY = lineStart.fY * (1 - t) + lineEnd.fY * t;
+ return SkPointPriv::DistanceToSqd(hit, pt);
+ } else {
+ return SkPointPriv::DistanceToSqd(pt, lineStart);
+ }
+}
+
+/* Given a cubic, determine if all four points are in a line.
+ Return true if the inner points is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Given the indices of the outermost points, and that outer_1 is greater than outer_2,
+ this table shows the index of the smaller of the remaining points:
+
+ outer_2
+ 0 1 2 3
+ outer_1 ----------------
+ 0 | - 2 1 1
+ 1 | - - 0 0
+ 2 | - - - 0
+ 3 | - - - -
+
+ If outer_1 == 0 and outer_2 == 1, the smaller of the remaining indices (2 and 3) is 2.
+
+ This table can be collapsed to: (1 + (2 >> outer_2)) >> outer_1
+
+ Given three indices (outer_1 outer_2 mid_1) from 0..3, the remaining index is:
+
+ mid_2 == (outer_1 ^ outer_2 ^ mid_1)
+ */
+static bool cubic_in_line(const SkPoint cubic[4]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 3; ++index) {
+ for (int inner = index + 1; inner < 4; ++inner) {
+ SkVector testDiff = cubic[inner] - cubic[index];
+ SkScalar testMax = SkTMax(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 2);
+ SkASSERT(outer2 >= 1 && outer2 <= 3);
+ SkASSERT(outer1 < outer2);
+ int mid1 = (1 + (2 >> outer2)) >> outer1;
+ SkASSERT(mid1 >= 0 && mid1 <= 2);
+ SkASSERT(outer1 != mid1 && outer2 != mid1);
+ int mid2 = outer1 ^ outer2 ^ mid1;
+ SkASSERT(mid2 >= 1 && mid2 <= 3);
+ SkASSERT(mid2 != outer1 && mid2 != outer2 && mid2 != mid1);
+ SkASSERT(((1 << outer1) | (1 << outer2) | (1 << mid1) | (1 << mid2)) == 0x0f);
+ SkScalar lineSlop = ptMax * ptMax * 0.00001f; // this multiplier is pulled out of the air
+ return pt_to_line(cubic[mid1], cubic[outer1], cubic[outer2]) <= lineSlop
+ && pt_to_line(cubic[mid2], cubic[outer1], cubic[outer2]) <= lineSlop;
+}
+
+/* Given quad, see if all there points are in a line.
+ Return true if the inside point is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Since the XOR of the indices is 3 (0 ^ 1 ^ 2)
+ the missing index equals: outer_1 ^ outer_2 ^ 3
+ */
+static bool quad_in_line(const SkPoint quad[3]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 2; ++index) {
+ for (int inner = index + 1; inner < 3; ++inner) {
+ SkVector testDiff = quad[inner] - quad[index];
+ SkScalar testMax = SkTMax(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 1);
+ SkASSERT(outer2 >= 1 && outer2 <= 2);
+ SkASSERT(outer1 < outer2);
+ int mid = outer1 ^ outer2 ^ 3;
+ const float kCurvatureSlop = 0.000005f; // this multiplier is pulled out of the air
+ SkScalar lineSlop = ptMax * ptMax * kCurvatureSlop;
+ return pt_to_line(quad[mid], quad[outer1], quad[outer2]) <= lineSlop;
+}
+
+static bool conic_in_line(const SkConic& conic) {
+ return quad_in_line(conic.fPts);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckCubicLinear(const SkPoint cubic[4],
+ SkPoint reduction[3], const SkPoint** tangentPtPtr) {
+ bool degenerateAB = degenerate_vector(cubic[1] - cubic[0]);
+ bool degenerateBC = degenerate_vector(cubic[2] - cubic[1]);
+ bool degenerateCD = degenerate_vector(cubic[3] - cubic[2]);
+ if (degenerateAB & degenerateBC & degenerateCD) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB + degenerateBC + degenerateCD == 2) {
+ return kLine_ReductionType;
+ }
+ if (!cubic_in_line(cubic)) {
+ *tangentPtPtr = degenerateAB ? &cubic[2] : &cubic[1];
+ return kQuad_ReductionType;
+ }
+ SkScalar tValues[3];
+ int count = SkFindCubicMaxCurvature(cubic, tValues);
+ int rCount = 0;
+ // Now loop over the t-values, and reject any that evaluate to either end-point
+ for (int index = 0; index < count; ++index) {
+ SkScalar t = tValues[index];
+ if (0 >= t || t >= 1) {
+ continue;
+ }
+ SkEvalCubicAt(cubic, t, &reduction[rCount], nullptr, nullptr);
+ if (reduction[rCount] != cubic[0] && reduction[rCount] != cubic[3]) {
+ ++rCount;
+ }
+ }
+ if (rCount == 0) {
+ return kLine_ReductionType;
+ }
+ static_assert(kQuad_ReductionType + 1 == kDegenerate_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 2 == kDegenerate2_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 3 == kDegenerate3_ReductionType, "enum_out_of_whack");
+
+ return (ReductionType) (kQuad_ReductionType + rCount);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckConicLinear(const SkConic& conic,
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(conic.fPts[1] - conic.fPts[0]);
+ bool degenerateBC = degenerate_vector(conic.fPts[2] - conic.fPts[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!conic_in_line(conic)) {
+ return kQuad_ReductionType;
+ }
+ // SkFindConicMaxCurvature would be a better solution, once we know how to
+ // implement it. Quad curvature is a reasonable substitute
+ SkScalar t = SkFindQuadMaxCurvature(conic.fPts);
+ if (0 == t) {
+ return kLine_ReductionType;
+ }
+ conic.evalAt(t, reduction, nullptr);
+ return kDegenerate_ReductionType;
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckQuadLinear(const SkPoint quad[3],
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(quad[1] - quad[0]);
+ bool degenerateBC = degenerate_vector(quad[2] - quad[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!quad_in_line(quad)) {
+ return kQuad_ReductionType;
+ }
+ SkScalar t = SkFindQuadMaxCurvature(quad);
+ if (0 == t || 1 == t) {
+ return kLine_ReductionType;
+ }
+ *reduction = SkEvalQuadAt(quad, t);
+ return kDegenerate_ReductionType;
+}
+
+void SkPathStroker::conicTo(const SkPoint& pt1, const SkPoint& pt2, SkScalar weight) {
+ const SkConic conic(fPrevPt, pt1, pt2, weight);
+ SkPoint reduction;
+ ReductionType reductionType = CheckConicLinear(conic, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->setConicEndNormal(conic, normalAB, unitAB, &normalBC, &unitBC);
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+void SkPathStroker::quadTo(const SkPoint& pt1, const SkPoint& pt2) {
+ const SkPoint quad[3] = { fPrevPt, pt1, pt2 };
+ SkPoint reduction;
+ ReductionType reductionType = CheckQuadLinear(quad, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->setQuadEndNormal(quad, normalAB, unitAB, &normalBC, &unitBC);
+
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+// Given a point on the curve and its derivative, scale the derivative by the radius, and
+// compute the perpendicular point and its tangent.
+void SkPathStroker::setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt,
+ SkPoint* tangent) const {
+ if (!dxy->setLength(fRadius)) {
+ dxy->set(fRadius, 0);
+ }
+ SkScalar axisFlip = SkIntToScalar(fStrokeType); // go opposite ways for outer, inner
+ onPt->fX = tPt.fX + axisFlip * dxy->fY;
+ onPt->fY = tPt.fY - axisFlip * dxy->fX;
+ if (tangent) {
+ tangent->fX = onPt->fX + dxy->fX;
+ tangent->fY = onPt->fY + dxy->fY;
+ }
+}
+
+// Given a conic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+// Returns false if the perpendicular could not be computed (because the derivative collapsed to 0)
+void SkPathStroker::conicPerpRay(const SkConic& conic, SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ conic.evalAt(t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = conic.fPts[2] - conic.fPts[0];
+ }
+ this->setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a conic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::conicQuadEnds(const SkConic& conic, SkQuadConstruct* quadPts) const {
+ if (!quadPts->fStartSet) {
+ SkPoint conicStartPt;
+ this->conicPerpRay(conic, quadPts->fStartT, &conicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint conicEndPt;
+ this->conicPerpRay(conic, quadPts->fEndT, &conicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+
+// Given a cubic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkPoint chopped[7];
+ SkEvalCubicAt(cubic, t, tPt, &dxy, nullptr);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ const SkPoint* cPts = cubic;
+ if (SkScalarNearlyZero(t)) {
+ dxy = cubic[2] - cubic[0];
+ } else if (SkScalarNearlyZero(1 - t)) {
+ dxy = cubic[3] - cubic[1];
+ } else {
+ // If the cubic inflection falls on the cusp, subdivide the cubic
+ // to find the tangent at that point.
+ SkChopCubicAt(cubic, chopped, t);
+ dxy = chopped[3] - chopped[2];
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = chopped[3] - chopped[1];
+ cPts = chopped;
+ }
+ }
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = cPts[3] - cPts[0];
+ }
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a cubic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!quadPts->fStartSet) {
+ SkPoint cubicStartPt;
+ this->cubicPerpRay(cubic, quadPts->fStartT, &cubicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint cubicEndPt;
+ this->cubicPerpRay(cubic, quadPts->fEndT, &cubicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+void SkPathStroker::cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* quadPts,
+ SkPoint* mid) const {
+ SkPoint cubicMidPt;
+ this->cubicPerpRay(cubic, quadPts->fMidT, &cubicMidPt, mid, nullptr);
+}
+
+// Given a quad and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkEvalQuadAt(quad, t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = quad[2] - quad[0];
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Find the intersection of the stroke tangents to construct a stroke quad.
+// Return whether the stroke is a degenerate (a line), a quad, or must be split.
+// Optionally compute the quad's control point.
+SkPathStroker::ResultType SkPathStroker::intersectRay(SkQuadConstruct* quadPts,
+ IntersectRayType intersectRayType STROKER_DEBUG_PARAMS(int depth)) const {
+ const SkPoint& start = quadPts->fQuad[0];
+ const SkPoint& end = quadPts->fQuad[2];
+ SkVector aLen = quadPts->fTangentStart - start;
+ SkVector bLen = quadPts->fTangentEnd - end;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen ( == denom )
+ */
+ SkScalar denom = aLen.cross(bLen);
+ if (denom == 0 || !SkScalarIsFinite(denom)) {
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts, "denom == 0");
+ }
+ quadPts->fOppositeTangents = false;
+ SkVector ab0 = start - end;
+ SkScalar numerA = bLen.cross(ab0);
+ SkScalar numerB = aLen.cross(ab0);
+ if ((numerA >= 0) == (numerB >= 0)) { // if the control point is outside the quad ends
+ // if the perpendicular distances from the quad points to the opposite tangent line
+ // are small, a straight line is good enough
+ SkScalar dist1 = pt_to_line(start, end, quadPts->fTangentEnd);
+ SkScalar dist2 = pt_to_line(end, start, quadPts->fTangentStart);
+ if (SkTMax(dist1, dist2) <= fInvResScaleSquared) {
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "SkTMax(dist1=%g, dist2=%g) <= fInvResScaleSquared", dist1, dist2);
+ }
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) == (numerB=%g >= 0)", numerA, numerB);
+ }
+ // check to see if the denominator is teeny relative to the numerator
+ // if the offset by one will be lost, the ratio is too large
+ numerA /= denom;
+ bool validDivide = numerA > numerA - 1;
+ if (validDivide) {
+ if (kCtrlPt_RayType == intersectRayType) {
+ SkPoint* ctrlPt = &quadPts->fQuad[1];
+ // the intersection of the tangents need not be on the tangent segment
+ // so 0 <= numerA <= 1 is not necessarily true
+ ctrlPt->fX = start.fX * (1 - numerA) + quadPts->fTangentStart.fX * numerA;
+ ctrlPt->fY = start.fY * (1 - numerA) + quadPts->fTangentStart.fY * numerA;
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) != (numerB=%g >= 0)", numerA, numerB);
+ }
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ // if the lines are parallel, straight line is good enough
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "SkScalarNearlyZero(denom=%g)", denom);
+}
+
+// Given a cubic and a t-range, determine if the stroke can be described by a quadratic.
+SkPathStroker::ResultType SkPathStroker::tangentsMeet(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ this->cubicQuadEnds(cubic, quadPts);
+ return this->intersectRay(quadPts, kResultType_RayType STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+// Intersect the line with the quad and return the t values on the quad where the line crosses.
+static int intersect_quad_ray(const SkPoint line[2], const SkPoint quad[3], SkScalar roots[2]) {
+ SkVector vec = line[1] - line[0];
+ SkScalar r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (quad[n].fY - line[0].fY) * vec.fX - (quad[n].fX - line[0].fX) * vec.fY;
+ }
+ SkScalar A = r[2];
+ SkScalar B = r[1];
+ SkScalar C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkFindUnitQuadRoots(A, 2 * B, C, roots);
+}
+
+// Return true if the point is close to the bounds of the quad. This is used as a quick reject.
+bool SkPathStroker::ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const {
+ SkScalar xMin = SkTMin(SkTMin(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX + fInvResScale < xMin) {
+ return false;
+ }
+ SkScalar xMax = SkTMax(SkTMax(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX - fInvResScale > xMax) {
+ return false;
+ }
+ SkScalar yMin = SkTMin(SkTMin(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY + fInvResScale < yMin) {
+ return false;
+ }
+ SkScalar yMax = SkTMax(SkTMax(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY - fInvResScale > yMax) {
+ return false;
+ }
+ return true;
+}
+
+static bool points_within_dist(const SkPoint& nearPt, const SkPoint& farPt, SkScalar limit) {
+ return SkPointPriv::DistanceToSqd(nearPt, farPt) <= limit * limit;
+}
+
+static bool sharp_angle(const SkPoint quad[3]) {
+ SkVector smaller = quad[1] - quad[0];
+ SkVector larger = quad[1] - quad[2];
+ SkScalar smallerLen = SkPointPriv::LengthSqd(smaller);
+ SkScalar largerLen = SkPointPriv::LengthSqd(larger);
+ if (smallerLen > largerLen) {
+ using std::swap;
+ swap(smaller, larger);
+ largerLen = smallerLen;
+ }
+ if (!smaller.setLength(largerLen)) {
+ return false;
+ }
+ SkScalar dot = smaller.dot(larger);
+ return dot > 0;
+}
+
+SkPathStroker::ResultType SkPathStroker::strokeCloseEnough(const SkPoint stroke[3],
+ const SkPoint ray[2], SkQuadConstruct* quadPts STROKER_DEBUG_PARAMS(int depth)) const {
+ SkPoint strokeMid = SkEvalQuadAt(stroke, SK_ScalarHalf);
+ // measure the distance from the curve to the quad-stroke midpoint, compare to radius
+ if (points_within_dist(ray[0], strokeMid, fInvResScale)) { // if the difference is small
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (1) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, strokeMid=%g,%g, fInvResScale=%g)",
+ ray[0].fX, ray[0].fY, strokeMid.fX, strokeMid.fY, fInvResScale);
+ }
+ // measure the distance to quad's bounds (quick reject)
+ // an alternative : look for point in triangle
+ if (!ptInQuadBounds(stroke, ray[0])) { // if far, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "!pt_in_quad_bounds(stroke=(%g,%g %g,%g %g,%g), ray[0]=%g,%g)",
+ stroke[0].fX, stroke[0].fY, stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY,
+ ray[0].fX, ray[0].fY);
+ }
+ // measure the curve ray distance to the quad-stroke
+ SkScalar roots[2];
+ int rootCount = intersect_quad_ray(ray, stroke, roots);
+ if (rootCount != 1) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "rootCount=%d != 1", rootCount);
+ }
+ SkPoint quadPt = SkEvalQuadAt(stroke, roots[0]);
+ SkScalar error = fInvResScale * (SK_Scalar1 - SkScalarAbs(roots[0] - 0.5f) * 2);
+ if (points_within_dist(ray[0], quadPt, error)) { // if the difference is small, we're done
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (2) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, quadPt=%g,%g, error=%g)",
+ ray[0].fX, ray[0].fY, quadPt.fX, quadPt.fY, error);
+ }
+ // otherwise, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts, "%s", "fall through");
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadCubic(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ this->cubicQuadEnds(cubic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on cubic
+ this->cubicPerpRay(cubic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadConic(const SkConic& conic,
+ SkQuadConstruct* quadPts) const {
+ // get the quadratic approximation of the stroke
+ this->conicQuadEnds(conic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on conic
+ this->conicPerpRay(conic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadQuad(const SkPoint quad[3],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ if (!quadPts->fStartSet) {
+ SkPoint quadStartPt;
+ this->quadPerpRay(quad, quadPts->fStartT, &quadStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint quadEndPt;
+ this->quadPerpRay(quad, quadPts->fEndT, &quadEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2];
+ this->quadPerpRay(quad, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+void SkPathStroker::addDegenerateLine(const SkQuadConstruct* quadPts) {
+ const SkPoint* quad = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->lineTo(quad[2].fX, quad[2].fY);
+}
+
+bool SkPathStroker::cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* quadPts) const {
+ SkPoint strokeMid;
+ this->cubicQuadMid(cubic, quadPts, &strokeMid);
+ SkScalar dist = pt_to_line(strokeMid, quadPts->fQuad[0], quadPts->fQuad[2]);
+ return dist < fInvResScaleSquared;
+}
+
+bool SkPathStroker::cubicStroke(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!fFoundTangents) {
+ ResultType resultType = this->tangentsMeet(cubic, quadPts);
+ if (kQuad_ResultType != resultType) {
+ if ((kDegenerate_ResultType == resultType
+ || points_within_dist(quadPts->fQuad[0], quadPts->fQuad[2],
+ fInvResScale)) && cubicMidOnLine(cubic, quadPts)) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ } else {
+ fFoundTangents = true;
+ }
+ }
+ if (fFoundTangents) {
+ ResultType resultType = this->compareQuadCubic(cubic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ const SkPoint* stroke = quadPts->fQuad;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ if (!quadPts->fOppositeTangents) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ }
+ }
+ if (!SkScalarIsFinite(quadPts->fQuad[2].fX) || !SkScalarIsFinite(quadPts->fQuad[2].fY)) {
+ return false; // just abort if projected quad isn't representable
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[fFoundTangents] = SkTMax(gMaxRecursion[fFoundTangents],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[fFoundTangents]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ if (!half.initWithStart(quadPts)) {
+ addDegenerateLine(quadPts);
+ --fRecursionDepth;
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ if (!half.initWithEnd(quadPts)) {
+ addDegenerateLine(quadPts);
+ --fRecursionDepth;
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::conicStroke(const SkConic& conic, SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadConic(conic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[kConic_RecursiveLimit] = SkTMax(gMaxRecursion[kConic_RecursiveLimit],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[kConic_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::quadStroke(const SkPoint quad[3], SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadQuad(quad, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[kQuad_RecursiveLimit] = SkTMax(gMaxRecursion[kQuad_RecursiveLimit],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[kQuad_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+void SkPathStroker::cubicTo(const SkPoint& pt1, const SkPoint& pt2,
+ const SkPoint& pt3) {
+ const SkPoint cubic[4] = { fPrevPt, pt1, pt2, pt3 };
+ SkPoint reduction[3];
+ const SkPoint* tangentPt;
+ ReductionType reductionType = CheckCubicLinear(cubic, reduction, &tangentPt);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt3);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt3);
+ return;
+ }
+ if (kDegenerate_ReductionType <= reductionType && kDegenerate3_ReductionType >= reductionType) {
+ this->lineTo(reduction[0]);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ if (kDegenerate2_ReductionType <= reductionType) {
+ this->lineTo(reduction[1]);
+ }
+ if (kDegenerate3_ReductionType == reductionType) {
+ this->lineTo(reduction[2]);
+ }
+ this->lineTo(pt3);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalCD, unitCD;
+ if (!this->preJoinTo(*tangentPt, &normalAB, &unitAB, false)) {
+ this->lineTo(pt3);
+ return;
+ }
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(cubic, tValues);
+ SkScalar lastT = 0;
+ for (int index = 0; index <= count; ++index) {
+ SkScalar nextT = index < count ? tValues[index] : 1;
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ lastT = nextT;
+ }
+ SkScalar cusp = SkFindCubicCusp(cubic);
+ if (cusp > 0) {
+ SkPoint cuspLoc;
+ SkEvalCubicAt(cubic, cusp, &cuspLoc, nullptr, nullptr);
+ fCusper.addCircle(cuspLoc.fX, cuspLoc.fY, fRadius);
+ }
+ // emit the join even if one stroke succeeded but the last one failed
+ // this avoids reversing an inner stroke with a partial path followed by another moveto
+ this->setCubicEndNormal(cubic, normalAB, unitAB, &normalCD, &unitCD);
+
+ this->postJoinTo(pt3, normalCD, unitCD);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkPaintDefaults.h"
+
+SkStroke::SkStroke() {
+ fWidth = SK_Scalar1;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fResScale = 1;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fDoFill = false;
+}
+
+SkStroke::SkStroke(const SkPaint& p) {
+ fWidth = p.getStrokeWidth();
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+SkStroke::SkStroke(const SkPaint& p, SkScalar width) {
+ fWidth = width;
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+void SkStroke::setWidth(SkScalar width) {
+ SkASSERT(width >= 0);
+ fWidth = width;
+}
+
+void SkStroke::setMiterLimit(SkScalar miterLimit) {
+ SkASSERT(miterLimit >= 0);
+ fMiterLimit = miterLimit;
+}
+
+void SkStroke::setCap(SkPaint::Cap cap) {
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ fCap = SkToU8(cap);
+}
+
+void SkStroke::setJoin(SkPaint::Join join) {
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ fJoin = SkToU8(join);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// If src==dst, then we use a tmp path to record the stroke, and then swap
+// its contents with src when we're done.
+class AutoTmpPath {
+public:
+ AutoTmpPath(const SkPath& src, SkPath** dst) : fSrc(src) {
+ if (&src == *dst) {
+ *dst = &fTmpDst;
+ fSwapWithSrc = true;
+ } else {
+ (*dst)->reset();
+ fSwapWithSrc = false;
+ }
+ }
+
+ ~AutoTmpPath() {
+ if (fSwapWithSrc) {
+ fTmpDst.swap(*const_cast<SkPath*>(&fSrc));
+ }
+ }
+
+private:
+ SkPath fTmpDst;
+ const SkPath& fSrc;
+ bool fSwapWithSrc;
+};
+
+void SkStroke::strokePath(const SkPath& src, SkPath* dst) const {
+ SkASSERT(dst);
+
+ SkScalar radius = SkScalarHalf(fWidth);
+
+ AutoTmpPath tmp(src, &dst);
+
+ if (radius <= 0) {
+ return;
+ }
+
+ // If src is really a rect, call our specialty strokeRect() method
+ {
+ SkRect rect;
+ bool isClosed;
+ SkPath::Direction dir;
+ if (src.isRect(&rect, &isClosed, &dir) && isClosed) {
+ this->strokeRect(rect, dst, dir);
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+ return;
+ }
+ }
+
+ // We can always ignore centers for stroke and fill convex line-only paths
+ // TODO: remove the line-only restriction
+ bool ignoreCenter = fDoFill && (src.getSegmentMasks() == SkPath::kLine_SegmentMask) &&
+ src.isLastContourClosed() && src.isConvex();
+
+ SkPathStroker stroker(src, radius, fMiterLimit, this->getCap(), this->getJoin(),
+ fResScale, ignoreCenter);
+ SkPath::Iter iter(src, false);
+ SkPath::Verb lastSegment = SkPath::kMove_Verb;
+
+ for (;;) {
+ SkPoint pts[4];
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ stroker.moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ stroker.lineTo(pts[1], &iter);
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ case SkPath::kQuad_Verb:
+ stroker.quadTo(pts[1], pts[2]);
+ lastSegment = SkPath::kQuad_Verb;
+ break;
+ case SkPath::kConic_Verb: {
+ stroker.conicTo(pts[1], pts[2], iter.conicWeight());
+ lastSegment = SkPath::kConic_Verb;
+ break;
+ } break;
+ case SkPath::kCubic_Verb:
+ stroker.cubicTo(pts[1], pts[2], pts[3]);
+ lastSegment = SkPath::kCubic_Verb;
+ break;
+ case SkPath::kClose_Verb:
+ if (SkPaint::kButt_Cap != this->getCap()) {
+ /* If the stroke consists of a moveTo followed by a close, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ if (stroker.hasOnlyMoveTo()) {
+ stroker.lineTo(stroker.moveToPt());
+ goto ZERO_LENGTH;
+ }
+ /* If the stroke consists of a moveTo followed by one or more zero-length
+ verbs, then followed by a close, treat is as if it were followed by a
+ zero-length line. Lines without length can have square & round end caps. */
+ if (stroker.isCurrentContourEmpty()) {
+ ZERO_LENGTH:
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ }
+ }
+ stroker.close(lastSegment == SkPath::kLine_Verb);
+ break;
+ case SkPath::kDone_Verb:
+ goto DONE;
+ }
+ }
+DONE:
+ stroker.done(dst, lastSegment == SkPath::kLine_Verb);
+
+ if (fDoFill && !ignoreCenter) {
+ if (SkPathPriv::CheapIsFirstDirection(src, SkPathPriv::kCCW_FirstDirection)) {
+ dst->reverseAddPath(src);
+ } else {
+ dst->addPath(src);
+ }
+ } else {
+ // Seems like we can assume that a 2-point src would always result in
+ // a convex stroke, but testing has proved otherwise.
+ // TODO: fix the stroker to make this assumption true (without making
+ // it slower that the work that will be done in computeConvexity())
+#if 0
+ // this test results in a non-convex stroke :(
+ static void test(SkCanvas* canvas) {
+ SkPoint pts[] = { 146.333328, 192.333328, 300.333344, 293.333344 };
+ SkPaint paint;
+ paint.setStrokeWidth(7);
+ paint.setStrokeCap(SkPaint::kRound_Cap);
+ canvas->drawLine(pts[0].fX, pts[0].fY, pts[1].fX, pts[1].fY, paint);
+ }
+#endif
+#if 0
+ if (2 == src.countPoints()) {
+ dst->setIsConvex(true);
+ }
+#endif
+ }
+
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+}
+
+static SkPath::Direction reverse_direction(SkPath::Direction dir) {
+ static const SkPath::Direction gOpposite[] = { SkPath::kCCW_Direction, SkPath::kCW_Direction };
+ return gOpposite[dir];
+}
+
+static void addBevel(SkPath* path, const SkRect& r, const SkRect& outer, SkPath::Direction dir) {
+ SkPoint pts[8];
+
+ if (SkPath::kCW_Direction == dir) {
+ pts[0].set(r.fLeft, outer.fTop);
+ pts[1].set(r.fRight, outer.fTop);
+ pts[2].set(outer.fRight, r.fTop);
+ pts[3].set(outer.fRight, r.fBottom);
+ pts[4].set(r.fRight, outer.fBottom);
+ pts[5].set(r.fLeft, outer.fBottom);
+ pts[6].set(outer.fLeft, r.fBottom);
+ pts[7].set(outer.fLeft, r.fTop);
+ } else {
+ pts[7].set(r.fLeft, outer.fTop);
+ pts[6].set(r.fRight, outer.fTop);
+ pts[5].set(outer.fRight, r.fTop);
+ pts[4].set(outer.fRight, r.fBottom);
+ pts[3].set(r.fRight, outer.fBottom);
+ pts[2].set(r.fLeft, outer.fBottom);
+ pts[1].set(outer.fLeft, r.fBottom);
+ pts[0].set(outer.fLeft, r.fTop);
+ }
+ path->addPoly(pts, 8, true);
+}
+
+void SkStroke::strokeRect(const SkRect& origRect, SkPath* dst,
+ SkPath::Direction dir) const {
+ SkASSERT(dst != nullptr);
+ dst->reset();
+
+ SkScalar radius = SkScalarHalf(fWidth);
+ if (radius <= 0) {
+ return;
+ }
+
+ SkScalar rw = origRect.width();
+ SkScalar rh = origRect.height();
+ if ((rw < 0) ^ (rh < 0)) {
+ dir = reverse_direction(dir);
+ }
+ SkRect rect(origRect);
+ rect.sort();
+ // reassign these, now that we know they'll be >= 0
+ rw = rect.width();
+ rh = rect.height();
+
+ SkRect r(rect);
+ r.outset(radius, radius);
+
+ SkPaint::Join join = (SkPaint::Join)fJoin;
+ if (SkPaint::kMiter_Join == join && fMiterLimit < SK_ScalarSqrt2) {
+ join = SkPaint::kBevel_Join;
+ }
+
+ switch (join) {
+ case SkPaint::kMiter_Join:
+ dst->addRect(r, dir);
+ break;
+ case SkPaint::kBevel_Join:
+ addBevel(dst, rect, r, dir);
+ break;
+ case SkPaint::kRound_Join:
+ dst->addRoundRect(r, radius, radius, dir);
+ break;
+ default:
+ break;
+ }
+
+ if (fWidth < SkMinScalar(rw, rh) && !fDoFill) {
+ r = rect;
+ r.inset(radius, radius);
+ dst->addRect(r, reverse_direction(dir));
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStroke.h b/gfx/skia/skia/src/core/SkStroke.h
new file mode 100644
index 0000000000..66edf3d92f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStroke_DEFINED
+#define SkStroke_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkStrokerPriv.h"
+
+#ifdef SK_DEBUG
+extern bool gDebugStrokerErrorSet;
+extern SkScalar gDebugStrokerError;
+extern int gMaxRecursion[];
+#endif
+
+/** \class SkStroke
+ SkStroke is the utility class that constructs paths by stroking
+ geometries (lines, rects, ovals, roundrects, paths). This is
+ invoked when a geometry or text is drawn in a canvas with the
+ kStroke_Mask bit set in the paint.
+*/
+class SkStroke {
+public:
+ SkStroke();
+ SkStroke(const SkPaint&);
+ SkStroke(const SkPaint&, SkScalar width); // width overrides paint.getStrokeWidth()
+
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ void setCap(SkPaint::Cap);
+
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+ void setJoin(SkPaint::Join);
+
+ void setMiterLimit(SkScalar);
+ void setWidth(SkScalar);
+
+ bool getDoFill() const { return SkToBool(fDoFill); }
+ void setDoFill(bool doFill) { fDoFill = SkToU8(doFill); }
+
+ /**
+ * ResScale is the "intended" resolution for the output.
+ * Default is 1.0.
+ * Larger values (res > 1) indicate that the result should be more precise, since it will
+ * be zoomed up, and small errors will be magnified.
+ * Smaller values (0 < res < 1) indicate that the result can be less precise, since it will
+ * be zoomed down, and small errors may be invisible.
+ */
+ SkScalar getResScale() const { return fResScale; }
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Stroke the specified rect, winding it in the specified direction..
+ */
+ void strokeRect(const SkRect& rect, SkPath* result,
+ SkPath::Direction = SkPath::kCW_Direction) const;
+ void strokePath(const SkPath& path, SkPath*) const;
+
+ ////////////////////////////////////////////////////////////////
+
+private:
+ SkScalar fWidth, fMiterLimit;
+ SkScalar fResScale;
+ uint8_t fCap, fJoin;
+ bool fDoFill;
+
+ friend class SkPaint;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStrokeRec.cpp b/gfx/skia/skia/src/core/SkStrokeRec.cpp
new file mode 100644
index 0000000000..a668dab2d0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokeRec.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkPaintDefaults.h"
+
+// must be < 0, since ==0 means hairline, and >0 means normal stroke
+#define kStrokeRec_FillStyleWidth (-SK_Scalar1)
+
+SkStrokeRec::SkStrokeRec(InitStyle s) {
+ fResScale = 1;
+ fWidth = (kFill_InitStyle == s) ? kStrokeRec_FillStyleWidth : 0;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fStrokeAndFill = false;
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkScalar resScale) {
+ this->init(paint, paint.getStyle(), resScale);
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkPaint::Style styleOverride, SkScalar resScale) {
+ this->init(paint, styleOverride, resScale);
+}
+
+void SkStrokeRec::init(const SkPaint& paint, SkPaint::Style style, SkScalar resScale) {
+ fResScale = resScale;
+
+ switch (style) {
+ case SkPaint::kFill_Style:
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStroke_Style:
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStrokeAndFill_Style:
+ if (0 == paint.getStrokeWidth()) {
+ // hairline+fill == fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ } else {
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = true;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown paint style");
+ // fall back on just fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ }
+
+ // copy these from the paint, regardless of our "style"
+ fMiterLimit = paint.getStrokeMiter();
+ fCap = paint.getStrokeCap();
+ fJoin = paint.getStrokeJoin();
+}
+
+SkStrokeRec::Style SkStrokeRec::getStyle() const {
+ if (fWidth < 0) {
+ return kFill_Style;
+ } else if (0 == fWidth) {
+ return kHairline_Style;
+ } else {
+ return fStrokeAndFill ? kStrokeAndFill_Style : kStroke_Style;
+ }
+}
+
+void SkStrokeRec::setFillStyle() {
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setHairlineStyle() {
+ fWidth = 0;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setStrokeStyle(SkScalar width, bool strokeAndFill) {
+ if (strokeAndFill && (0 == width)) {
+ // hairline+fill == fill
+ this->setFillStyle();
+ } else {
+ fWidth = width;
+ fStrokeAndFill = strokeAndFill;
+ }
+}
+
+#include "src/core/SkStroke.h"
+
+#ifdef SK_DEBUG
+ // enables tweaking these values at runtime from Viewer
+ bool gDebugStrokerErrorSet = false;
+ SkScalar gDebugStrokerError;
+#endif
+
+bool SkStrokeRec::applyToPath(SkPath* dst, const SkPath& src) const {
+ if (fWidth <= 0) { // hairline or fill
+ return false;
+ }
+
+ SkStroke stroker;
+ stroker.setCap((SkPaint::Cap)fCap);
+ stroker.setJoin((SkPaint::Join)fJoin);
+ stroker.setMiterLimit(fMiterLimit);
+ stroker.setWidth(fWidth);
+ stroker.setDoFill(fStrokeAndFill);
+#ifdef SK_DEBUG
+ stroker.setResScale(gDebugStrokerErrorSet ? gDebugStrokerError : fResScale);
+#else
+ stroker.setResScale(fResScale);
+#endif
+ stroker.strokePath(src, dst);
+ return true;
+}
+
+void SkStrokeRec::applyToPaint(SkPaint* paint) const {
+ if (fWidth < 0) { // fill
+ paint->setStyle(SkPaint::kFill_Style);
+ return;
+ }
+
+ paint->setStyle(fStrokeAndFill ? SkPaint::kStrokeAndFill_Style : SkPaint::kStroke_Style);
+ paint->setStrokeWidth(fWidth);
+ paint->setStrokeMiter(fMiterLimit);
+ paint->setStrokeCap((SkPaint::Cap)fCap);
+ paint->setStrokeJoin((SkPaint::Join)fJoin);
+}
+
+SkScalar SkStrokeRec::getInflationRadius() const {
+ return GetInflationRadius((SkPaint::Join)fJoin, fMiterLimit, (SkPaint::Cap)fCap, fWidth);
+}
+
+SkScalar SkStrokeRec::GetInflationRadius(const SkPaint& paint, SkPaint::Style style) {
+ SkScalar width = SkPaint::kFill_Style == style ? -SK_Scalar1 : paint.getStrokeWidth();
+ return GetInflationRadius(paint.getStrokeJoin(), paint.getStrokeMiter(), paint.getStrokeCap(),
+ width);
+
+}
+
+SkScalar SkStrokeRec::GetInflationRadius(SkPaint::Join join, SkScalar miterLimit, SkPaint::Cap cap,
+ SkScalar strokeWidth) {
+ if (strokeWidth < 0) { // fill
+ return 0;
+ } else if (0 == strokeWidth) {
+ // FIXME: We need a "matrixScale" parameter here in order to properly handle hairlines.
+ // Their with is determined in device space, unlike other strokes.
+ // http://skbug.com/8157
+ return SK_Scalar1;
+ }
+
+ // since we're stroked, outset the rect by the radius (and join type, caps)
+ SkScalar multiplier = SK_Scalar1;
+ if (SkPaint::kMiter_Join == join) {
+ multiplier = SkTMax(multiplier, miterLimit);
+ }
+ if (SkPaint::kSquare_Cap == cap) {
+ multiplier = SkTMax(multiplier, SK_ScalarSqrt2);
+ }
+ return strokeWidth/2 * multiplier;
+}
+
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.cpp b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
new file mode 100644
index 0000000000..32cf9ecb4e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkStrokerPriv.h"
+
+#include <utility>
+
+static void ButtCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath*) {
+ path->lineTo(stop.fX, stop.fY);
+}
+
+static void RoundCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath*) {
+ SkVector parallel;
+ SkPointPriv::RotateCW(normal, &parallel);
+
+ SkPoint projectedCenter = pivot + parallel;
+
+ path->conicTo(projectedCenter + normal, projectedCenter, SK_ScalarRoot2Over2);
+ path->conicTo(projectedCenter - normal, stop, SK_ScalarRoot2Over2);
+}
+
+static void SquareCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath* otherPath) {
+ SkVector parallel;
+ SkPointPriv::RotateCW(normal, &parallel);
+
+ if (otherPath) {
+ path->setLastPt(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ } else {
+ path->lineTo(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ path->lineTo(stop.fX, stop.fY);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+static bool is_clockwise(const SkVector& before, const SkVector& after) {
+ return before.fX * after.fY > before.fY * after.fX;
+}
+
+enum AngleType {
+ kNearly180_AngleType,
+ kSharp_AngleType,
+ kShallow_AngleType,
+ kNearlyLine_AngleType
+};
+
+static AngleType Dot2AngleType(SkScalar dot) {
+// need more precise fixed normalization
+// SkASSERT(SkScalarAbs(dot) <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot >= 0) { // shallow or line
+ return SkScalarNearlyZero(SK_Scalar1 - dot) ? kNearlyLine_AngleType : kShallow_AngleType;
+ } else { // sharp or 180
+ return SkScalarNearlyZero(SK_Scalar1 + dot) ? kNearly180_AngleType : kSharp_AngleType;
+ }
+}
+
+static void HandleInnerJoin(SkPath* inner, const SkPoint& pivot, const SkVector& after) {
+#if 1
+ /* In the degenerate case that the stroke radius is larger than our segments
+ just connecting the two inner segments may "show through" as a funny
+ diagonal. To pseudo-fix this, we go through the pivot point. This adds
+ an extra point/edge, but I can't see a cheap way to know when this is
+ not needed :(
+ */
+ inner->lineTo(pivot.fX, pivot.fY);
+#endif
+
+ inner->lineTo(pivot.fX - after.fX, pivot.fY - after.fY);
+}
+
+static void BluntJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool) {
+ SkVector after;
+ afterUnitNormal.scale(radius, &after);
+
+ if (!is_clockwise(beforeUnitNormal, afterUnitNormal)) {
+ using std::swap;
+ swap(outer, inner);
+ after.negate();
+ }
+
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ HandleInnerJoin(inner, pivot, after);
+}
+
+static void RoundJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool) {
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+
+ if (angleType == kNearlyLine_AngleType)
+ return;
+
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkRotationDirection dir = kCW_SkRotationDirection;
+
+ if (!is_clockwise(before, after)) {
+ using std::swap;
+ swap(outer, inner);
+ before.negate();
+ after.negate();
+ dir = kCCW_SkRotationDirection;
+ }
+
+ SkMatrix matrix;
+ matrix.setScale(radius, radius);
+ matrix.postTranslate(pivot.fX, pivot.fY);
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = SkConic::BuildUnitArc(before, after, dir, &matrix, conics);
+ if (count > 0) {
+ for (int i = 0; i < count; ++i) {
+ outer->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ after.scale(radius);
+ HandleInnerJoin(inner, pivot, after);
+ }
+}
+
+#define kOneOverSqrt2 (0.707106781f)
+
+static void MiterJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine) {
+ // negate the dot since we're using normals instead of tangents
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkVector mid;
+ SkScalar sinHalfAngle;
+ bool ccw;
+
+ if (angleType == kNearlyLine_AngleType) {
+ return;
+ }
+ if (angleType == kNearly180_AngleType) {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ ccw = !is_clockwise(before, after);
+ if (ccw) {
+ using std::swap;
+ swap(outer, inner);
+ before.negate();
+ after.negate();
+ }
+
+ /* Before we enter the world of square-roots and divides,
+ check if we're trying to join an upright right angle
+ (common case for stroking rectangles). If so, special case
+ that (for speed an accuracy).
+ Note: we only need to check one normal if dot==0
+ */
+ if (0 == dotProd && invMiterLimit <= kOneOverSqrt2) {
+ mid = (before + after) * radius;
+ goto DO_MITER;
+ }
+
+ /* midLength = radius / sinHalfAngle
+ if (midLength > miterLimit * radius) abort
+ if (radius / sinHalf > miterLimit * radius) abort
+ if (1 / sinHalf > miterLimit) abort
+ if (1 / miterLimit > sinHalf) abort
+ My dotProd is opposite sign, since it is built from normals and not tangents
+ hence 1 + dot instead of 1 - dot in the formula
+ */
+ sinHalfAngle = SkScalarSqrt(SkScalarHalf(SK_Scalar1 + dotProd));
+ if (sinHalfAngle < invMiterLimit) {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ // choose the most accurate way to form the initial mid-vector
+ if (angleType == kSharp_AngleType) {
+ mid.set(after.fY - before.fY, before.fX - after.fX);
+ if (ccw) {
+ mid.negate();
+ }
+ } else {
+ mid.set(before.fX + after.fX, before.fY + after.fY);
+ }
+
+ mid.setLength(radius / sinHalfAngle);
+DO_MITER:
+ if (prevIsLine) {
+ outer->setLastPt(pivot.fX + mid.fX, pivot.fY + mid.fY);
+ } else {
+ outer->lineTo(pivot.fX + mid.fX, pivot.fY + mid.fY);
+ }
+
+DO_BLUNT:
+ after.scale(radius);
+ if (!currIsLine) {
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ }
+ HandleInnerJoin(inner, pivot, after);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+SkStrokerPriv::CapProc SkStrokerPriv::CapFactory(SkPaint::Cap cap) {
+ const SkStrokerPriv::CapProc gCappers[] = {
+ ButtCapper, RoundCapper, SquareCapper
+ };
+
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ return gCappers[cap];
+}
+
+SkStrokerPriv::JoinProc SkStrokerPriv::JoinFactory(SkPaint::Join join) {
+ const SkStrokerPriv::JoinProc gJoiners[] = {
+ MiterJoiner, RoundJoiner, BluntJoiner
+ };
+
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ return gJoiners[join];
+}
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.h b/gfx/skia/skia/src/core/SkStrokerPriv.h
new file mode 100644
index 0000000000..a7294f7e27
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkStrokerPriv_DEFINED
+#define SkStrokerPriv_DEFINED
+
+#include "src/core/SkStroke.h"
+
+#define CWX(x, y) (-y)
+#define CWY(x, y) (x)
+#define CCWX(x, y) (y)
+#define CCWY(x, y) (-x)
+
+#define CUBIC_ARC_FACTOR ((SK_ScalarSqrt2 - SK_Scalar1) * 4 / 3)
+
+// this enables a global which is not thread-safe; doing so triggers a TSAN error in Chrome tests.
+#define QUAD_STROKE_APPROX_EXTENDED_DEBUGGING 0 // set to 1 to enable debugging in StrokerTest.cpp
+
+class SkStrokerPriv {
+public:
+ typedef void (*CapProc)(SkPath* path,
+ const SkPoint& pivot,
+ const SkVector& normal,
+ const SkPoint& stop,
+ SkPath* otherPath);
+
+ typedef void (*JoinProc)(SkPath* outer, SkPath* inner,
+ const SkVector& beforeUnitNormal,
+ const SkPoint& pivot,
+ const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine);
+
+ static CapProc CapFactory(SkPaint::Cap);
+ static JoinProc JoinFactory(SkPaint::Join);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp b/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp
new file mode 100644
index 0000000000..0d9b5e8266
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSurfaceCharacterization.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+
+#ifdef SK_DEBUG
+void SkSurfaceCharacterization::validate() const {
+ const GrCaps* caps = fContextInfo->priv().caps();
+
+ GrColorType grCT = SkColorTypeToGrColorType(this->colorType());
+ SkASSERT(fSampleCnt && caps->isFormatAsColorTypeRenderable(grCT, fBackendFormat, fSampleCnt));
+
+ SkASSERT(caps->areColorTypeAndFormatCompatible(grCT, fBackendFormat));
+}
+#endif
+
+
+bool SkSurfaceCharacterization::operator==(const SkSurfaceCharacterization& other) const {
+ if (!this->isValid() || !other.isValid()) {
+ return false;
+ }
+
+ if (fContextInfo != other.fContextInfo) {
+ return false;
+ }
+
+ return fCacheMaxResourceBytes == other.fCacheMaxResourceBytes &&
+ fOrigin == other.fOrigin &&
+ fImageInfo == other.fImageInfo &&
+ fBackendFormat == other.fBackendFormat &&
+ fSampleCnt == other.fSampleCnt &&
+ fIsTextureable == other.fIsTextureable &&
+ fIsMipMapped == other.fIsMipMapped &&
+ fUsesGLFBO0 == other.fUsesGLFBO0 &&
+ fVulkanSecondaryCBCompatible == other.fVulkanSecondaryCBCompatible &&
+ fIsProtected == other.fIsProtected &&
+ fSurfaceProps == other.fSurfaceProps;
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createResized(int width, int height) const {
+ const GrCaps* caps = fContextInfo->priv().caps();
+ if (!caps) {
+ return SkSurfaceCharacterization();
+ }
+
+ if (width <= 0 || height <= 0 || width > caps->maxRenderTargetSize() ||
+ height > caps->maxRenderTargetSize()) {
+ return SkSurfaceCharacterization();
+ }
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes,
+ fImageInfo.makeWH(width, height), fBackendFormat, fOrigin,
+ fSampleCnt, fIsTextureable, fIsMipMapped, fUsesGLFBO0,
+ fVulkanSecondaryCBCompatible, fIsProtected, fSurfaceProps);
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createColorSpace(
+ sk_sp<SkColorSpace> cs) const {
+ if (!this->isValid()) {
+ return SkSurfaceCharacterization();
+ }
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes,
+ fImageInfo.makeColorSpace(std::move(cs)), fBackendFormat,
+ fOrigin, fSampleCnt, fIsTextureable, fIsMipMapped, fUsesGLFBO0,
+ fVulkanSecondaryCBCompatible, fIsProtected, fSurfaceProps);
+}
+
+
+bool SkSurfaceCharacterization::isCompatible(const GrBackendTexture& backendTex) const {
+ if (!this->isValid() || !backendTex.isValid()) {
+ return false;
+ }
+
+ if (fBackendFormat != backendTex.getBackendFormat()) {
+ return false;
+ }
+
+ if (this->usesGLFBO0()) {
+ // It is a backend texture so can't be wrapping FBO0
+ return false;
+ }
+
+ if (this->vulkanSecondaryCBCompatible()) {
+ return false;
+ }
+
+ if (this->isMipMapped() && !backendTex.hasMipMaps()) {
+ // backend texture is allowed to have mipmaps even if the characterization doesn't require
+ // them.
+ return false;
+ }
+
+ if (this->width() != backendTex.width() || this->height() != backendTex.height()) {
+ return false;
+ }
+
+ if (this->isProtected() != GrProtected(backendTex.isProtected())) {
+ return false;
+ }
+
+ return true;
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSurfacePriv.h b/gfx/skia/skia/src/core/SkSurfacePriv.h
new file mode 100644
index 0000000000..6e5ff0962f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSurfacePriv.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfacePriv_DEFINED
+#define SkSurfacePriv_DEFINED
+
+#include "include/core/SkSurfaceProps.h"
+
+struct SkImageInfo;
+
+static inline SkSurfaceProps SkSurfacePropsCopyOrDefault(const SkSurfaceProps* props) {
+ if (props) {
+ return *props;
+ } else {
+ return SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType);
+ }
+}
+
+constexpr size_t kIgnoreRowBytesValue = static_cast<size_t>(~0);
+
+bool SkSurfaceValidateRasterInfo(const SkImageInfo&, size_t rb = kIgnoreRowBytesValue);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSwizzle.cpp b/gfx/skia/skia/src/core/SkSwizzle.cpp
new file mode 100644
index 0000000000..301b0184f1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSwizzle.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSwizzle.h"
+
+#include "src/core/SkOpts.h"
+
+void SkSwapRB(uint32_t* dest, const uint32_t* src, int count) {
+ SkOpts::RGBA_to_BGRA(dest, src, count);
+}
diff --git a/gfx/skia/skia/src/core/SkTDPQueue.h b/gfx/skia/skia/src/core/SkTDPQueue.h
new file mode 100644
index 0000000000..569bbcbb45
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTDPQueue.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDPQueue_DEFINED
+#define SkTDPQueue_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/core/SkTSort.h"
+
+#include <utility>
+
+/**
+ * This class implements a priority queue. T is the type of the elements in the queue. LESS is a
+ * function that compares two Ts and returns true if the first is higher priority than the second.
+ *
+ * Optionally objects may know their index into the priority queue. The queue will update the index
+ * as the objects move through the queue. This is enabled by using a non-nullptr function for INDEX.
+ * When an INDEX function is provided random deletes from the queue are allowed using remove().
+ * Additionally, the * priority is allowed to change as long as priorityDidChange() is called
+ * afterwards. In debug builds the index will be set to -1 before an element is removed from the
+ * queue.
+ */
+template <typename T,
+ bool (*LESS)(const T&, const T&),
+ int* (*INDEX)(const T&) = (int* (*)(const T&))nullptr>
+class SkTDPQueue {
+public:
+ SkTDPQueue() {}
+ SkTDPQueue(int reserve) { fArray.setReserve(reserve); }
+
+ SkTDPQueue(SkTDPQueue&&) = default;
+ SkTDPQueue& operator =(SkTDPQueue&&) = default;
+
+ SkTDPQueue(const SkTDPQueue&) = delete;
+ SkTDPQueue& operator=(const SkTDPQueue&) = delete;
+
+ /** Number of items in the queue. */
+ int count() const { return fArray.count(); }
+
+ /** Gets the next item in the queue without popping it. */
+ const T& peek() const { return fArray[0]; }
+ T& peek() { return fArray[0]; }
+
+ /** Removes the next item. */
+ void pop() {
+ this->validate();
+ SkDEBUGCODE(if (SkToBool(INDEX)) { *INDEX(fArray[0]) = -1; })
+ if (1 == fArray.count()) {
+ fArray.pop();
+ return;
+ }
+
+ fArray[0] = fArray[fArray.count() - 1];
+ this->setIndex(0);
+ fArray.pop();
+ this->percolateDownIfNecessary(0);
+
+ this->validate();
+ }
+
+ /** Inserts a new item in the queue based on its priority. */
+ void insert(T entry) {
+ this->validate();
+ int index = fArray.count();
+ *fArray.append() = entry;
+ this->setIndex(fArray.count() - 1);
+ this->percolateUpIfNecessary(index);
+ this->validate();
+ }
+
+ /** Random access removal. This requires that the INDEX function is non-nullptr. */
+ void remove(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.count());
+ this->validate();
+ SkDEBUGCODE(*INDEX(fArray[index]) = -1;)
+ if (index == fArray.count() - 1) {
+ fArray.pop();
+ return;
+ }
+ fArray[index] = fArray[fArray.count() - 1];
+ fArray.pop();
+ this->setIndex(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Notification that the priority of an entry has changed. This must be called after an
+ item's priority is changed to maintain correct ordering. Changing the priority is only
+ allowed if an INDEX function is provided. */
+ void priorityDidChange(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.count());
+ this->validate(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Gets the item at index i in the priority queue (for i < this->count()). at(0) is equivalent
+ to peek(). Otherwise, there is no guarantee about ordering of elements in the queue. */
+ T at(int i) const { return fArray[i]; }
+
+ /** Sorts the queue into priority order. The queue is only guarenteed to remain in sorted order
+ * until any other operation, other than at(), is performed.
+ */
+ void sort() {
+ if (fArray.count() > 1) {
+ SkTQSort<T>(fArray.begin(), fArray.end() - 1, LESS);
+ for (int i = 0; i < fArray.count(); i++) {
+ this->setIndex(i);
+ }
+ this->validate();
+ }
+ }
+
+private:
+ static int LeftOf(int x) { SkASSERT(x >= 0); return 2 * x + 1; }
+ static int ParentOf(int x) { SkASSERT(x > 0); return (x - 1) >> 1; }
+
+ void percolateUpOrDown(int index) {
+ SkASSERT(index >= 0);
+ if (!percolateUpIfNecessary(index)) {
+ this->validate(index);
+ this->percolateDownIfNecessary(index);
+ }
+ }
+
+ bool percolateUpIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ bool percolated = false;
+ do {
+ if (0 == index) {
+ this->setIndex(index);
+ return percolated;
+ }
+ int p = ParentOf(index);
+ if (LESS(fArray[index], fArray[p])) {
+ using std::swap;
+ swap(fArray[index], fArray[p]);
+ this->setIndex(index);
+ index = p;
+ percolated = true;
+ } else {
+ this->setIndex(index);
+ return percolated;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void percolateDownIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ do {
+ int child = LeftOf(index);
+
+ if (child >= fArray.count()) {
+ // We're a leaf.
+ this->setIndex(index);
+ return;
+ }
+
+ if (child + 1 >= fArray.count()) {
+ // We only have a left child.
+ if (LESS(fArray[child], fArray[index])) {
+ using std::swap;
+ swap(fArray[child], fArray[index]);
+ this->setIndex(child);
+ this->setIndex(index);
+ return;
+ }
+ } else if (LESS(fArray[child + 1], fArray[child])) {
+ // The right child is the one we should swap with, if we swap.
+ child++;
+ }
+
+ // Check if we need to swap.
+ if (LESS(fArray[child], fArray[index])) {
+ using std::swap;
+ swap(fArray[child], fArray[index]);
+ this->setIndex(index);
+ index = child;
+ } else {
+ // We're less than both our children.
+ this->setIndex(index);
+ return;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void setIndex(int index) {
+ SkASSERT(index < fArray.count());
+ if (SkToBool(INDEX)) {
+ *INDEX(fArray[index]) = index;
+ }
+ }
+
+ void validate(int excludedIndex = -1) const {
+#ifdef SK_DEBUG
+ for (int i = 1; i < fArray.count(); ++i) {
+ int p = ParentOf(i);
+ if (excludedIndex != p && excludedIndex != i) {
+ SkASSERT(!(LESS(fArray[i], fArray[p])));
+ SkASSERT(!SkToBool(INDEX) || *INDEX(fArray[i]) == i);
+ }
+ }
+#endif
+ }
+
+ SkTDArray<T> fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTDynamicHash.h b/gfx/skia/skia/src/core/SkTDynamicHash.h
new file mode 100644
index 0000000000..381d2129a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTDynamicHash.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDynamicHash_DEFINED
+#define SkTDynamicHash_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTemplates.h"
+
+// Traits requires:
+// static const Key& GetKey(const T&) { ... }
+// static uint32_t Hash(const Key&) { ... }
+// We'll look on T for these by default, or you can pass a custom Traits type.
+template <typename T,
+ typename Key,
+ typename Traits = T,
+ int kGrowPercent = 75> // Larger -> more memory efficient, but slower.
+class SkTDynamicHash {
+public:
+ SkTDynamicHash() : fCount(0), fDeleted(0), fCapacity(0), fArray(nullptr) {
+ SkASSERT(this->validate());
+ }
+
+ ~SkTDynamicHash() {
+ sk_free(fArray);
+ }
+
+ class Iter {
+ public:
+ explicit Iter(SkTDynamicHash* hash) : fHash(hash), fCurrentIndex(-1) {
+ SkASSERT(hash);
+ ++(*this);
+ }
+ bool done() const {
+ SkASSERT(fCurrentIndex <= fHash->fCapacity);
+ return fCurrentIndex == fHash->fCapacity;
+ }
+ T& operator*() const {
+ SkASSERT(!this->done());
+ return *this->current();
+ }
+ void operator++() {
+ do {
+ fCurrentIndex++;
+ } while (!this->done() && (this->current() == Empty() || this->current() == Deleted()));
+ }
+
+ private:
+ T* current() const { return fHash->fArray[fCurrentIndex]; }
+
+ SkTDynamicHash* fHash;
+ int fCurrentIndex;
+ };
+
+ class ConstIter {
+ public:
+ explicit ConstIter(const SkTDynamicHash* hash) : fHash(hash), fCurrentIndex(-1) {
+ SkASSERT(hash);
+ ++(*this);
+ }
+ bool done() const {
+ SkASSERT(fCurrentIndex <= fHash->fCapacity);
+ return fCurrentIndex == fHash->fCapacity;
+ }
+ const T& operator*() const {
+ SkASSERT(!this->done());
+ return *this->current();
+ }
+ void operator++() {
+ do {
+ fCurrentIndex++;
+ } while (!this->done() && (this->current() == Empty() || this->current() == Deleted()));
+ }
+
+ private:
+ const T* current() const { return fHash->fArray[fCurrentIndex]; }
+
+ const SkTDynamicHash* fHash;
+ int fCurrentIndex;
+ };
+
+ int count() const { return fCount; }
+
+ // Return the entry with this key if we have it, otherwise nullptr.
+ T* find(const Key& key) const {
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ T* candidate = fArray[index];
+ if (Empty() == candidate) {
+ return nullptr;
+ }
+ if (Deleted() != candidate && GetKey(*candidate) == key) {
+ return candidate;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ return nullptr;
+ }
+
+ // Add an entry with this key. We require that no entry with newEntry's key is already present.
+ void add(T* newEntry) {
+ SkASSERT(nullptr == this->find(GetKey(*newEntry)));
+ this->maybeGrow();
+ this->innerAdd(newEntry);
+ SkASSERT(this->validate());
+ }
+
+ // Remove the entry with this key. We require that an entry with this key is present.
+ void remove(const Key& key) {
+ SkASSERT(this->find(key));
+ this->innerRemove(key);
+ SkASSERT(this->validate());
+ }
+
+ void rewind() {
+ if (fArray) {
+ sk_bzero(fArray, sizeof(T*)* fCapacity);
+ }
+ fCount = 0;
+ fDeleted = 0;
+ }
+
+ void reset() {
+ fCount = 0;
+ fDeleted = 0;
+ fCapacity = 0;
+ sk_free(fArray);
+ fArray = nullptr;
+ }
+
+protected:
+ // These methods are used by tests only.
+
+ int capacity() const { return fCapacity; }
+
+ // How many collisions do we go through before finding where this entry should be inserted?
+ int countCollisions(const Key& key) const {
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Empty() == candidate || Deleted() == candidate || GetKey(*candidate) == key) {
+ return round;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ return 0;
+ }
+
+private:
+ // We have two special values to indicate an empty or deleted entry.
+ static T* Empty() { return reinterpret_cast<T*>(0); } // i.e. nullptr
+ static T* Deleted() { return reinterpret_cast<T*>(1); } // Also an invalid pointer.
+
+ bool validate() const {
+ #define SKTDYNAMICHASH_CHECK(x) SkASSERT(x); if (!(x)) return false
+ static const int kLarge = 50; // Arbitrary, tweak to suit your patience.
+
+ // O(1) checks, always done.
+ // Is capacity sane?
+ SKTDYNAMICHASH_CHECK(SkIsPow2(fCapacity));
+
+ // O(N) checks, skipped when very large.
+ if (fCount < kLarge * kLarge) {
+ // Are fCount and fDeleted correct, and are all elements findable?
+ int count = 0, deleted = 0;
+ for (int i = 0; i < fCapacity; i++) {
+ if (Deleted() == fArray[i]) {
+ deleted++;
+ } else if (Empty() != fArray[i]) {
+ count++;
+ SKTDYNAMICHASH_CHECK(this->find(GetKey(*fArray[i])));
+ }
+ }
+ SKTDYNAMICHASH_CHECK(count == fCount);
+ SKTDYNAMICHASH_CHECK(deleted == fDeleted);
+ }
+
+ // O(N^2) checks, skipped when large.
+ if (fCount < kLarge) {
+ // Are all entries unique?
+ for (int i = 0; i < fCapacity; i++) {
+ if (Empty() == fArray[i] || Deleted() == fArray[i]) {
+ continue;
+ }
+ for (int j = i+1; j < fCapacity; j++) {
+ if (Empty() == fArray[j] || Deleted() == fArray[j]) {
+ continue;
+ }
+ SKTDYNAMICHASH_CHECK(fArray[i] != fArray[j]);
+ SKTDYNAMICHASH_CHECK(!(GetKey(*fArray[i]) == GetKey(*fArray[j])));
+ }
+ }
+ }
+ #undef SKTDYNAMICHASH_CHECK
+ return true;
+ }
+
+ void innerAdd(T* newEntry) {
+ const Key& key = GetKey(*newEntry);
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Empty() == candidate || Deleted() == candidate) {
+ if (Deleted() == candidate) {
+ fDeleted--;
+ }
+ fCount++;
+ fArray[index] = newEntry;
+ return;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ }
+
+ void innerRemove(const Key& key) {
+ const int firstIndex = this->firstIndex(key);
+ int index = firstIndex;
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Deleted() != candidate && GetKey(*candidate) == key) {
+ fDeleted++;
+ fCount--;
+ fArray[index] = Deleted();
+ return;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ }
+
+ void maybeGrow() {
+ if (100 * (int64_t(fCount + fDeleted) + 1) > int64_t(fCapacity) * kGrowPercent) {
+ auto newCapacity = fCapacity > 0 ? fCapacity : 4;
+
+ // Only grow the storage when most non-empty entries are
+ // in active use. Otherwise, just purge the tombstones.
+ if (fCount > fDeleted) {
+ SkASSERT_RELEASE(newCapacity <= std::numeric_limits<int>::max() / 2);
+ newCapacity *= 2;
+ }
+ SkASSERT(newCapacity > fCount + 1);
+ this->resize(newCapacity);
+ }
+ }
+
+ void resize(int newCapacity) {
+ SkDEBUGCODE(int oldCount = fCount;)
+ int oldCapacity = fCapacity;
+ SkAutoTMalloc<T*> oldArray(fArray);
+
+ fCount = fDeleted = 0;
+ fCapacity = newCapacity;
+ fArray = (T**)sk_calloc_throw(fCapacity, sizeof(T*));
+
+ for (int i = 0; i < oldCapacity; i++) {
+ T* entry = oldArray[i];
+ if (Empty() != entry && Deleted() != entry) {
+ this->innerAdd(entry);
+ }
+ }
+ SkASSERT(oldCount == fCount);
+ }
+
+ // fCapacity is always a power of 2, so this masks the correct low bits to index into our hash.
+ uint32_t hashMask() const { return fCapacity - 1; }
+
+ int firstIndex(const Key& key) const {
+ return Hash(key) & this->hashMask();
+ }
+
+ // Given index at round N, what is the index to check at N+1? round should start at 0.
+ int nextIndex(int index, int round) const {
+ // This will search a power-of-two array fully without repeating an index.
+ return (index + round + 1) & this->hashMask();
+ }
+
+ static const Key& GetKey(const T& t) { return Traits::GetKey(t); }
+ static uint32_t Hash(const Key& key) { return Traits::Hash(key); }
+
+ int fCount; // Number of non Empty(), non Deleted() entries in fArray.
+ int fDeleted; // Number of Deleted() entries in fArray.
+ int fCapacity; // Number of entries in fArray. Always a power of 2.
+ T** fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTInternalLList.h b/gfx/skia/skia/src/core/SkTInternalLList.h
new file mode 100644
index 0000000000..f435c0c4f0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTInternalLList.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTInternalLList_DEFINED
+#define SkTInternalLList_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * This macro creates the member variables required by the SkTInternalLList class. It should be
+ * placed in the private section of any class that will be stored in a double linked list.
+ */
+#define SK_DECLARE_INTERNAL_LLIST_INTERFACE(ClassName) \
+ friend class SkTInternalLList<ClassName>; \
+ /* back pointer to the owning list - for debugging */ \
+ SkDEBUGCODE(SkTInternalLList<ClassName>* fList = nullptr;) \
+ ClassName* fPrev = nullptr; \
+ ClassName* fNext = nullptr
+
+/**
+ * This class implements a templated internal doubly linked list data structure.
+ */
+template <class T> class SkTInternalLList {
+public:
+ SkTInternalLList() {}
+
+ void reset() {
+ fHead = nullptr;
+ fTail = nullptr;
+ }
+
+ void remove(T* entry) {
+ SkASSERT(fHead && fTail);
+ SkASSERT(this->isInList(entry));
+
+ T* prev = entry->fPrev;
+ T* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+
+ entry->fPrev = nullptr;
+ entry->fNext = nullptr;
+
+#ifdef SK_DEBUG
+ entry->fList = nullptr;
+#endif
+ }
+
+ void addToHead(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = nullptr;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (nullptr == fTail) {
+ fTail = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ void addToTail(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = fTail;
+ entry->fNext = nullptr;
+ if (fTail) {
+ fTail->fNext = entry;
+ }
+ fTail = entry;
+ if (nullptr == fHead) {
+ fHead = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry before an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the tail.
+ */
+ void addBefore(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToTail(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fNext = existingEntry;
+ T* prev = existingEntry->fPrev;
+ existingEntry->fPrev = newEntry;
+ newEntry->fPrev = prev;
+ if (nullptr == prev) {
+ SkASSERT(fHead == existingEntry);
+ fHead = newEntry;
+ } else {
+ prev->fNext = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry after an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the head.
+ */
+ void addAfter(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToHead(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fPrev = existingEntry;
+ T* next = existingEntry->fNext;
+ existingEntry->fNext = newEntry;
+ newEntry->fNext = next;
+ if (nullptr == next) {
+ SkASSERT(fTail == existingEntry);
+ fTail = newEntry;
+ } else {
+ next->fPrev = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ void concat(SkTInternalLList&& list) {
+ if (list.isEmpty()) {
+ return;
+ }
+
+ list.fHead->fPrev = fTail;
+ if (!fHead) {
+ SkASSERT(!list.fHead->fPrev);
+ fHead = list.fHead;
+ } else {
+ SkASSERT(fTail);
+ fTail->fNext = list.fHead;
+ }
+ fTail = list.fTail;
+
+#ifdef SK_DEBUG
+ for (T* node = list.fHead; node; node = node->fNext) {
+ SkASSERT(node->fList == &list);
+ node->fList = this;
+ }
+#endif
+
+ list.fHead = list.fTail = nullptr;
+ }
+
+ bool isEmpty() const {
+ SkASSERT(SkToBool(fHead) == SkToBool(fTail));
+ return !fHead;
+ }
+
+ T* head() { return fHead; }
+ T* tail() { return fTail; }
+
+ class Iter {
+ public:
+ enum IterStart {
+ kHead_IterStart,
+ kTail_IterStart
+ };
+
+ Iter() : fCurr(nullptr) {}
+ Iter(const Iter& iter) : fCurr(iter.fCurr) {}
+ Iter& operator= (const Iter& iter) { fCurr = iter.fCurr; return *this; }
+
+ T* init(const SkTInternalLList& list, IterStart startLoc) {
+ if (kHead_IterStart == startLoc) {
+ fCurr = list.fHead;
+ } else {
+ SkASSERT(kTail_IterStart == startLoc);
+ fCurr = list.fTail;
+ }
+
+ return fCurr;
+ }
+
+ T* get() { return fCurr; }
+
+ /**
+ * Return the next/previous element in the list or NULL if at the end.
+ */
+ T* next() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fNext;
+ return fCurr;
+ }
+
+ T* prev() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fPrev;
+ return fCurr;
+ }
+
+ /**
+ * C++11 range-for interface.
+ */
+ bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
+ T* operator*() { return this->get(); }
+ void operator++() { this->next(); }
+
+ private:
+ T* fCurr;
+ };
+
+ Iter begin() const {
+ Iter iter;
+ iter.init(*this, Iter::kHead_IterStart);
+ return iter;
+ }
+
+ Iter end() const { return Iter(); }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(!fHead == !fTail);
+ Iter iter;
+ for (T* item = iter.init(*this, Iter::kHead_IterStart); item; item = iter.next()) {
+ SkASSERT(this->isInList(item));
+ if (nullptr == item->fPrev) {
+ SkASSERT(fHead == item);
+ } else {
+ SkASSERT(item->fPrev->fNext == item);
+ }
+ if (nullptr == item->fNext) {
+ SkASSERT(fTail == item);
+ } else {
+ SkASSERT(item->fNext->fPrev == item);
+ }
+ }
+ }
+
+ /**
+ * Debugging-only method that uses the list back pointer to check if 'entry' is indeed in 'this'
+ * list.
+ */
+ bool isInList(const T* entry) const {
+ return entry->fList == this;
+ }
+
+ /**
+ * Debugging-only method that laboriously counts the list entries.
+ */
+ int countEntries() const {
+ int count = 0;
+ for (T* entry = fHead; entry; entry = entry->fNext) {
+ ++count;
+ }
+ return count;
+ }
+#endif // SK_DEBUG
+
+private:
+ T* fHead = nullptr;
+ T* fTail = nullptr;
+
+ SkTInternalLList(const SkTInternalLList&) = delete;
+ SkTInternalLList& operator=(const SkTInternalLList&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTLList.h b/gfx/skia/skia/src/core/SkTLList.h
new file mode 100644
index 0000000000..496ee9e9de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLList.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLList_DEFINED
+#define SkTLList_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkTInternalLList.h"
+#include <new>
+#include <utility>
+
+/** Doubly-linked list of objects. The objects' lifetimes are controlled by the list. I.e. the
+ the list creates the objects and they are deleted upon removal. This class block-allocates
+ space for entries based on a param passed to the constructor.
+
+ Elements of the list can be constructed in place using the following macros:
+ SkNEW_INSERT_IN_LLIST_BEFORE(list, location, type_name, args)
+ SkNEW_INSERT_IN_LLIST_AFTER(list, location, type_name, args)
+ where list is a SkTLList<type_name>*, location is an iterator, and args is the paren-surrounded
+ constructor arguments for type_name. These macros behave like addBefore() and addAfter().
+
+ allocCnt is the number of objects to allocate as a group. In the worst case fragmentation
+ each object is using the space required for allocCnt unfragmented objects.
+*/
+template <typename T, unsigned int N> class SkTLList {
+private:
+ struct Block;
+ struct Node {
+ SkAlignedSTStorage<1, T> fObj;
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Node);
+ Block* fBlock; // owning block.
+ };
+ typedef SkTInternalLList<Node> NodeList;
+
+public:
+ class Iter;
+
+ // Having fCount initialized to -1 indicates that the first time we attempt to grab a free node
+ // all the nodes in the pre-allocated first block need to be inserted into the free list. This
+ // allows us to skip that loop in instances when the list is never populated.
+ SkTLList() : fCount(-1) {}
+
+ ~SkTLList() {
+ this->validate();
+ typename NodeList::Iter iter;
+ Node* node = iter.init(fList, Iter::kHead_IterStart);
+ while (node) {
+ reinterpret_cast<T*>(node->fObj.get())->~T();
+ Block* block = node->fBlock;
+ node = iter.next();
+ if (0 == --block->fNodesInUse) {
+ for (unsigned int i = 0; i < N; ++i) {
+ block->fNodes[i].~Node();
+ }
+ if (block != &fFirstBlock) {
+ sk_free(block);
+ }
+ }
+ }
+ }
+
+ /** Adds a new element to the list at the head. */
+ template <typename... Args> T* addToHead(Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addToHead(node);
+ this->validate();
+ return new (node->fObj.get()) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list at the tail. */
+ template <typename... Args> T* addToTail(Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addToTail(node);
+ this->validate();
+ return new (node->fObj.get()) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list before the location indicated by the iterator. If the
+ iterator refers to a nullptr location then the new element is added at the tail */
+ template <typename... Args> T* addBefore(Iter location, Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addBefore(node, location.getNode());
+ this->validate();
+ return new (node->fObj.get()) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list after the location indicated by the iterator. If the
+ iterator refers to a nullptr location then the new element is added at the head */
+ template <typename... Args> T* addAfter(Iter location, Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addAfter(node, location.getNode());
+ this->validate();
+ return new (node->fObj.get()) T(std::forward<Args>(args)...);
+ }
+
+ /** Convenience methods for getting an iterator initialized to the head/tail of the list. */
+ Iter headIter() const { return Iter(*this, Iter::kHead_IterStart); }
+ Iter tailIter() const { return Iter(*this, Iter::kTail_IterStart); }
+
+ T* head() { return Iter(*this, Iter::kHead_IterStart).get(); }
+ T* tail() { return Iter(*this, Iter::kTail_IterStart).get(); }
+ const T* head() const { return Iter(*this, Iter::kHead_IterStart).get(); }
+ const T* tail() const { return Iter(*this, Iter::kTail_IterStart).get(); }
+
+ void popHead() {
+ this->validate();
+ Node* node = fList.head();
+ if (node) {
+ this->removeNode(node);
+ }
+ this->validate();
+ }
+
+ void popTail() {
+ this->validate();
+ Node* node = fList.head();
+ if (node) {
+ this->removeNode(node);
+ }
+ this->validate();
+ }
+
+ void remove(T* t) {
+ this->validate();
+ Node* node = reinterpret_cast<Node*>(t);
+ SkASSERT(reinterpret_cast<T*>(node->fObj.get()) == t);
+ this->removeNode(node);
+ this->validate();
+ }
+
+ void reset() {
+ this->validate();
+ Iter iter(*this, Iter::kHead_IterStart);
+ while (iter.get()) {
+ Iter next = iter;
+ next.next();
+ this->remove(iter.get());
+ iter = next;
+ }
+ SkASSERT(0 == fCount || -1 == fCount);
+ this->validate();
+ }
+
+ int count() const { return SkTMax(fCount ,0); }
+ bool isEmpty() const { this->validate(); return 0 == fCount || -1 == fCount; }
+
+ bool operator== (const SkTLList& list) const {
+ if (this == &list) {
+ return true;
+ }
+ // Call count() rather than use fCount because an empty list may have fCount = 0 or -1.
+ if (this->count() != list.count()) {
+ return false;
+ }
+ for (Iter a(*this, Iter::kHead_IterStart), b(list, Iter::kHead_IterStart);
+ a.get();
+ a.next(), b.next()) {
+ SkASSERT(b.get()); // already checked that counts match.
+ if (!(*a.get() == *b.get())) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!= (const SkTLList& list) const { return !(*this == list); }
+
+ /** The iterator becomes invalid if the element it refers to is removed from the list. */
+ class Iter : private NodeList::Iter {
+ private:
+ typedef typename NodeList::Iter INHERITED;
+
+ public:
+ typedef typename INHERITED::IterStart IterStart;
+ //!< Start the iterator at the head of the list.
+ static const IterStart kHead_IterStart = INHERITED::kHead_IterStart;
+ //!< Start the iterator at the tail of the list.
+ static const IterStart kTail_IterStart = INHERITED::kTail_IterStart;
+
+ Iter() {}
+
+ Iter(const SkTLList& list, IterStart start = kHead_IterStart) {
+ INHERITED::init(list.fList, start);
+ }
+
+ T* init(const SkTLList& list, IterStart start = kHead_IterStart) {
+ return this->nodeToObj(INHERITED::init(list.fList, start));
+ }
+
+ T* get() { return this->nodeToObj(INHERITED::get()); }
+
+ T* next() { return this->nodeToObj(INHERITED::next()); }
+
+ T* prev() { return this->nodeToObj(INHERITED::prev()); }
+
+ Iter& operator= (const Iter& iter) { INHERITED::operator=(iter); return *this; }
+
+ private:
+ friend class SkTLList;
+ Node* getNode() { return INHERITED::get(); }
+
+ T* nodeToObj(Node* node) {
+ if (node) {
+ return reinterpret_cast<T*>(node->fObj.get());
+ } else {
+ return nullptr;
+ }
+ }
+ };
+
+private:
+ struct Block {
+ int fNodesInUse;
+ Node fNodes[N];
+ };
+
+ void delayedInit() {
+ SkASSERT(-1 == fCount);
+ fFirstBlock.fNodesInUse = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ fFreeList.addToHead(fFirstBlock.fNodes + i);
+ fFirstBlock.fNodes[i].fBlock = &fFirstBlock;
+ }
+ fCount = 0;
+ this->validate();
+ }
+
+ Node* createNode() {
+ if (-1 == fCount) {
+ this->delayedInit();
+ }
+ Node* node = fFreeList.head();
+ if (node) {
+ fFreeList.remove(node);
+ ++node->fBlock->fNodesInUse;
+ } else {
+ // Should not get here when count == 0 because we always have the preallocated first
+ // block.
+ SkASSERT(fCount > 0);
+ Block* block = reinterpret_cast<Block*>(sk_malloc_throw(sizeof(Block)));
+ node = &block->fNodes[0];
+ new (node) Node;
+ node->fBlock = block;
+ block->fNodesInUse = 1;
+ for (unsigned int i = 1; i < N; ++i) {
+ new (block->fNodes + i) Node;
+ fFreeList.addToHead(block->fNodes + i);
+ block->fNodes[i].fBlock = block;
+ }
+ }
+ ++fCount;
+ return node;
+ }
+
+ void removeNode(Node* node) {
+ SkASSERT(node);
+ fList.remove(node);
+ reinterpret_cast<T*>(node->fObj.get())->~T();
+ Block* block = node->fBlock;
+ // Don't ever elease the first block, just add its nodes to the free list
+ if (0 == --block->fNodesInUse && block != &fFirstBlock) {
+ for (unsigned int i = 0; i < N; ++i) {
+ if (block->fNodes + i != node) {
+ fFreeList.remove(block->fNodes + i);
+ }
+ block->fNodes[i].~Node();
+ }
+ sk_free(block);
+ } else {
+ fFreeList.addToHead(node);
+ }
+ --fCount;
+ this->validate();
+ }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ bool isEmpty = false;
+ if (-1 == fCount) {
+ // We should not yet have initialized the free list.
+ SkASSERT(fFreeList.isEmpty());
+ isEmpty = true;
+ } else if (0 == fCount) {
+ // Should only have the nodes from the first block in the free list.
+ SkASSERT(fFreeList.countEntries() == N);
+ isEmpty = true;
+ }
+ SkASSERT(isEmpty == fList.isEmpty());
+ fList.validate();
+ fFreeList.validate();
+ typename NodeList::Iter iter;
+ Node* freeNode = iter.init(fFreeList, Iter::kHead_IterStart);
+ while (freeNode) {
+ SkASSERT(fFreeList.isInList(freeNode));
+ Block* block = freeNode->fBlock;
+ // Only the first block is allowed to have all its nodes in the free list.
+ SkASSERT(block->fNodesInUse > 0 || block == &fFirstBlock);
+ SkASSERT((unsigned)block->fNodesInUse < N);
+ int activeCnt = 0;
+ int freeCnt = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ bool free = fFreeList.isInList(block->fNodes + i);
+ bool active = fList.isInList(block->fNodes + i);
+ SkASSERT(free != active);
+ activeCnt += active;
+ freeCnt += free;
+ }
+ SkASSERT(activeCnt == block->fNodesInUse);
+ freeNode = iter.next();
+ }
+
+ int count = 0;
+ Node* activeNode = iter.init(fList, Iter::kHead_IterStart);
+ while (activeNode) {
+ ++count;
+ SkASSERT(fList.isInList(activeNode));
+ Block* block = activeNode->fBlock;
+ SkASSERT(block->fNodesInUse > 0 && (unsigned)block->fNodesInUse <= N);
+
+ int activeCnt = 0;
+ int freeCnt = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ bool free = fFreeList.isInList(block->fNodes + i);
+ bool active = fList.isInList(block->fNodes + i);
+ SkASSERT(free != active);
+ activeCnt += active;
+ freeCnt += free;
+ }
+ SkASSERT(activeCnt == block->fNodesInUse);
+ activeNode = iter.next();
+ }
+ SkASSERT(count == fCount || (0 == count && -1 == fCount));
+#endif
+ }
+
+ NodeList fList;
+ NodeList fFreeList;
+ Block fFirstBlock;
+ int fCount;
+
+ SkTLList(const SkTLList&) = delete;
+ SkTLList& operator=(const SkTLList&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTLS.cpp b/gfx/skia/skia/src/core/SkTLS.cpp
new file mode 100644
index 0000000000..e781a539b1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLS.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkTLS.h"
+
+struct SkTLSRec {
+ SkTLSRec* fNext;
+ void* fData;
+ SkTLS::CreateProc fCreateProc;
+ SkTLS::DeleteProc fDeleteProc;
+
+ ~SkTLSRec() {
+ if (fDeleteProc) {
+ fDeleteProc(fData);
+ }
+ // else we leak fData, or it will be managed by the caller
+ }
+};
+
+void SkTLS::Destructor(void* ptr) {
+ SkTLSRec* rec = (SkTLSRec*)ptr;
+ do {
+ SkTLSRec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ } while (rec);
+}
+
+void* SkTLS::Get(CreateProc createProc, DeleteProc deleteProc) {
+ if (nullptr == createProc) {
+ return nullptr;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(true);
+
+ if (ptr) {
+ const SkTLSRec* rec = (const SkTLSRec*)ptr;
+ do {
+ if (rec->fCreateProc == createProc) {
+ SkASSERT(rec->fDeleteProc == deleteProc);
+ return rec->fData;
+ }
+ } while ((rec = rec->fNext) != nullptr);
+ // not found, so create a new one
+ }
+
+ // add a new head of our change
+ SkTLSRec* rec = new SkTLSRec;
+ rec->fNext = (SkTLSRec*)ptr;
+
+ SkTLS::PlatformSetSpecific(rec);
+
+ rec->fData = createProc();
+ rec->fCreateProc = createProc;
+ rec->fDeleteProc = deleteProc;
+ return rec->fData;
+}
+
+void* SkTLS::Find(CreateProc createProc) {
+ if (nullptr == createProc) {
+ return nullptr;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(false);
+
+ if (ptr) {
+ const SkTLSRec* rec = (const SkTLSRec*)ptr;
+ do {
+ if (rec->fCreateProc == createProc) {
+ return rec->fData;
+ }
+ } while ((rec = rec->fNext) != nullptr);
+ }
+ return nullptr;
+}
+
+void SkTLS::Delete(CreateProc createProc) {
+ if (nullptr == createProc) {
+ return;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(false);
+
+ SkTLSRec* curr = (SkTLSRec*)ptr;
+ SkTLSRec* prev = nullptr;
+ while (curr) {
+ SkTLSRec* next = curr->fNext;
+ if (curr->fCreateProc == createProc) {
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ // we have a new head of our chain
+ SkTLS::PlatformSetSpecific(next);
+ }
+ delete curr;
+ break;
+ }
+ prev = curr;
+ curr = next;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTLS.h b/gfx/skia/skia/src/core/SkTLS.h
new file mode 100644
index 0000000000..3d86a26433
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLS.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLS_DEFINED
+#define SkTLS_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Maintains a per-thread cache, using a CreateProc as the key into that cache.
+ */
+class SkTLS {
+public:
+ typedef void* (*CreateProc)();
+ typedef void (*DeleteProc)(void*);
+
+ /**
+ * If Get() has previously been called with this CreateProc, then this
+ * returns its cached data, otherwise it returns nullptr. The CreateProc is
+ * never invoked in Find, it is only used as a key for searching the
+ * cache.
+ */
+ static void* Find(CreateProc);
+
+ /**
+ * Return the cached data that was returned by the CreateProc. This proc
+ * is only called the first time Get is called, and there after it is
+ * cached (per-thread), using the CreateProc as a key to look it up.
+ *
+ * When this thread, or Delete is called, the cached data is removed, and
+ * if a DeleteProc was specified, it is passed the pointer to the cached
+ * data.
+ */
+ static void* Get(CreateProc, DeleteProc);
+
+ /**
+ * Remove (optionally calling the DeleteProc if it was specificed in Get)
+ * the cached data associated with this CreateProc. If no associated cached
+ * data is found, do nothing.
+ */
+ static void Delete(CreateProc);
+
+private:
+ // Our implementation requires only 1 TLS slot, as we manage multiple values
+ // ourselves in a list, with the platform specific value as our head.
+
+ /**
+ * Implemented by the platform, to return the value of our (one) slot per-thread
+ *
+ * If forceCreateTheSlot is true, then we must have created the "slot" for
+ * our TLS, even though we know that the return value will be nullptr in that
+ * case (i.e. no-slot and first-time-slot both return nullptr). This ensures
+ * that after calling GetSpecific, we know that we can legally call
+ * SetSpecific.
+ *
+ * If forceCreateTheSlot is false, then the impl can either create the
+ * slot or not.
+ */
+ static void* PlatformGetSpecific(bool forceCreateTheSlot);
+
+ /**
+ * Implemented by the platform, to set the value for our (one) slot per-thread
+ *
+ * The implementation can rely on GetSpecific(true) having been previously
+ * called before SetSpecific is called.
+ */
+ static void PlatformSetSpecific(void*);
+
+public:
+ /**
+ * Will delete our internal list. To be called by the platform if/when its
+ * TLS slot is deleted (often at thread shutdown).
+ *
+ * Public *only* for the platform's use, not to be called by a client.
+ */
+ static void Destructor(void* ptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTLazy.h b/gfx/skia/skia/src/core/SkTLazy.h
new file mode 100644
index 0000000000..a6533cbcc2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLazy.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLazy_DEFINED
+#define SkTLazy_DEFINED
+
+#include "include/core/SkTypes.h"
+#include <new>
+#include <type_traits>
+#include <utility>
+
+/**
+ * Efficient way to defer allocating/initializing a class until it is needed
+ * (if ever).
+ */
+template <typename T> class SkTLazy {
+public:
+ SkTLazy() = default;
+ explicit SkTLazy(const T* src) : fPtr(src ? new (&fStorage) T(*src) : nullptr) {}
+ SkTLazy(const SkTLazy& that) : fPtr(that.fPtr ? new (&fStorage) T(*that.fPtr) : nullptr) {}
+ SkTLazy(SkTLazy&& that) : fPtr(that.fPtr ? new (&fStorage) T(std::move(*that.fPtr)) : nullptr){}
+
+ ~SkTLazy() { this->reset(); }
+
+ SkTLazy& operator=(const SkTLazy& that) {
+ if (that.isValid()) {
+ this->set(*that);
+ } else {
+ this->reset();
+ }
+ return *this;
+ }
+
+ SkTLazy& operator=(SkTLazy&& that) {
+ if (that.isValid()) {
+ this->set(std::move(*that));
+ } else {
+ this->reset();
+ }
+ return *this;
+ }
+
+ /**
+ * Return a pointer to an instance of the class initialized with 'args'.
+ * If a previous instance had been initialized (either from init() or
+ * set()) it will first be destroyed, so that a freshly initialized
+ * instance is always returned.
+ */
+ template <typename... Args> T* init(Args&&... args) {
+ this->reset();
+ fPtr = new (&fStorage) T(std::forward<Args>(args)...);
+ return fPtr;
+ }
+
+ /**
+ * Copy src into this, and return a pointer to a copy of it. Note this
+ * will always return the same pointer, so if it is called on a lazy that
+ * has already been initialized, then this will copy over the previous
+ * contents.
+ */
+ T* set(const T& src) {
+ if (this->isValid()) {
+ *fPtr = src;
+ } else {
+ fPtr = new (&fStorage) T(src);
+ }
+ return fPtr;
+ }
+
+ T* set(T&& src) {
+ if (this->isValid()) {
+ *fPtr = std::move(src);
+ } else {
+ fPtr = new (&fStorage) T(std::move(src));
+ }
+ return fPtr;
+ }
+
+ /**
+ * Destroy the lazy object (if it was created via init() or set())
+ */
+ void reset() {
+ if (this->isValid()) {
+ fPtr->~T();
+ fPtr = nullptr;
+ }
+ }
+
+ /**
+ * Returns true if a valid object has been initialized in the SkTLazy,
+ * false otherwise.
+ */
+ bool isValid() const { return SkToBool(fPtr); }
+
+ /**
+ * Returns the object. This version should only be called when the caller
+ * knows that the object has been initialized.
+ */
+ T* get() const { SkASSERT(this->isValid()); return fPtr; }
+ T* operator->() const { return this->get(); }
+ T& operator*() const { return *this->get(); }
+
+ /**
+ * Like above but doesn't assert if object isn't initialized (in which case
+ * nullptr is returned).
+ */
+ T* getMaybeNull() const { return fPtr; }
+
+private:
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type fStorage;
+ T* fPtr{nullptr}; // nullptr or fStorage
+};
+
+/**
+ * A helper built on top of SkTLazy to do copy-on-first-write. The object is initialized
+ * with a const pointer but provides a non-const pointer accessor. The first time the
+ * accessor is called (if ever) the object is cloned.
+ *
+ * In the following example at most one copy of constThing is made:
+ *
+ * SkTCopyOnFirstWrite<Thing> thing(&constThing);
+ * ...
+ * function_that_takes_a_const_thing_ptr(thing); // constThing is passed
+ * ...
+ * if (need_to_modify_thing()) {
+ * thing.writable()->modifyMe(); // makes a copy of constThing
+ * }
+ * ...
+ * x = thing->readSomething();
+ * ...
+ * if (need_to_modify_thing_now()) {
+ * thing.writable()->changeMe(); // makes a copy of constThing if we didn't call modifyMe()
+ * }
+ *
+ * consume_a_thing(thing); // could be constThing or a modified copy.
+ */
+template <typename T>
+class SkTCopyOnFirstWrite {
+public:
+ explicit SkTCopyOnFirstWrite(const T& initial) : fObj(&initial) {}
+
+ explicit SkTCopyOnFirstWrite(const T* initial) : fObj(initial) {}
+
+ // Constructor for delayed initialization.
+ SkTCopyOnFirstWrite() : fObj(nullptr) {}
+
+ SkTCopyOnFirstWrite(const SkTCopyOnFirstWrite& that) { *this = that; }
+ SkTCopyOnFirstWrite( SkTCopyOnFirstWrite&& that) { *this = std::move(that); }
+
+ SkTCopyOnFirstWrite& operator=(const SkTCopyOnFirstWrite& that) {
+ fLazy = that.fLazy;
+ fObj = fLazy.isValid() ? fLazy.get() : that.fObj;
+ return *this;
+ }
+
+ SkTCopyOnFirstWrite& operator=(SkTCopyOnFirstWrite&& that) {
+ fLazy = std::move(that.fLazy);
+ fObj = fLazy.isValid() ? fLazy.get() : that.fObj;
+ return *this;
+ }
+
+ // Should only be called once, and only if the default constructor was used.
+ void init(const T& initial) {
+ SkASSERT(nullptr == fObj);
+ SkASSERT(!fLazy.isValid());
+ fObj = &initial;
+ }
+
+ /**
+ * Returns a writable T*. The first time this is called the initial object is cloned.
+ */
+ T* writable() {
+ SkASSERT(fObj);
+ if (!fLazy.isValid()) {
+ fLazy.set(*fObj);
+ fObj = fLazy.get();
+ }
+ return const_cast<T*>(fObj);
+ }
+
+ const T* get() const { return fObj; }
+
+ /**
+ * Operators for treating this as though it were a const pointer.
+ */
+
+ const T *operator->() const { return fObj; }
+
+ operator const T*() const { return fObj; }
+
+ const T& operator *() const { return *fObj; }
+
+private:
+ const T* fObj;
+ SkTLazy<T> fLazy;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTMultiMap.h b/gfx/skia/skia/src/core/SkTMultiMap.h
new file mode 100644
index 0000000000..d14aa97b0f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTMultiMap.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTMultiMap_DEFINED
+#define SkTMultiMap_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "src/core/SkTDynamicHash.h"
+
+/** A set that contains pointers to instances of T. Instances can be looked up with key Key.
+ * Multiple (possibly same) values can have the same key.
+ */
+template <typename T,
+ typename Key,
+ typename HashTraits=T>
+class SkTMultiMap {
+ struct ValueList {
+ explicit ValueList(T* value) : fValue(value), fNext(nullptr) {}
+
+ static const Key& GetKey(const ValueList& e) { return HashTraits::GetKey(*e.fValue); }
+ static uint32_t Hash(const Key& key) { return HashTraits::Hash(key); }
+ T* fValue;
+ ValueList* fNext;
+ };
+public:
+ SkTMultiMap() : fCount(0) {}
+
+ ~SkTMultiMap() {
+ typename SkTDynamicHash<ValueList, Key>::Iter iter(&fHash);
+ for ( ; !iter.done(); ++iter) {
+ ValueList* next;
+ for (ValueList* cur = &(*iter); cur; cur = next) {
+ HashTraits::OnFree(cur->fValue);
+ next = cur->fNext;
+ delete cur;
+ }
+ }
+ }
+
+ void insert(const Key& key, T* value) {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ // The new ValueList entry is inserted as the second element in the
+ // linked list, and it will contain the value of the first element.
+ ValueList* newEntry = new ValueList(list->fValue);
+ newEntry->fNext = list->fNext;
+ // The existing first ValueList entry is updated to contain the
+ // inserted value.
+ list->fNext = newEntry;
+ list->fValue = value;
+ } else {
+ fHash.add(new ValueList(value));
+ }
+
+ ++fCount;
+ }
+
+ void remove(const Key& key, const T* value) {
+ ValueList* list = fHash.find(key);
+ // Temporarily making this safe for remove entries not in the map because of
+ // crbug.com/877915.
+#if 0
+ // Since we expect the caller to be fully aware of what is stored, just
+ // assert that the caller removes an existing value.
+ SkASSERT(list);
+ ValueList* prev = nullptr;
+ while (list->fValue != value) {
+ prev = list;
+ list = list->fNext;
+ }
+ this->internalRemove(prev, list, key);
+#else
+ ValueList* prev = nullptr;
+ while (list && list->fValue != value) {
+ prev = list;
+ list = list->fNext;
+ }
+ // Crash in Debug since it'd be great to detect a repro of 877915.
+ SkASSERT(list);
+ if (list) {
+ this->internalRemove(prev, list, key);
+ }
+#endif
+ }
+
+ T* find(const Key& key) const {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ return list->fValue;
+ }
+ return nullptr;
+ }
+
+ template<class FindPredicate>
+ T* find(const Key& key, const FindPredicate f) {
+ ValueList* list = fHash.find(key);
+ while (list) {
+ if (f(list->fValue)){
+ return list->fValue;
+ }
+ list = list->fNext;
+ }
+ return nullptr;
+ }
+
+ template<class FindPredicate>
+ T* findAndRemove(const Key& key, const FindPredicate f) {
+ ValueList* list = fHash.find(key);
+
+ ValueList* prev = nullptr;
+ while (list) {
+ if (f(list->fValue)){
+ T* value = list->fValue;
+ this->internalRemove(prev, list, key);
+ return value;
+ }
+ prev = list;
+ list = list->fNext;
+ }
+ return nullptr;
+ }
+
+ int count() const { return fCount; }
+
+#ifdef SK_DEBUG
+ class ConstIter {
+ public:
+ explicit ConstIter(const SkTMultiMap* mmap)
+ : fIter(&(mmap->fHash))
+ , fList(nullptr) {
+ if (!fIter.done()) {
+ fList = &(*fIter);
+ }
+ }
+
+ bool done() const {
+ return fIter.done();
+ }
+
+ const T* operator*() {
+ SkASSERT(fList);
+ return fList->fValue;
+ }
+
+ void operator++() {
+ if (fList) {
+ fList = fList->fNext;
+ }
+ if (!fList) {
+ ++fIter;
+ if (!fIter.done()) {
+ fList = &(*fIter);
+ }
+ }
+ }
+
+ private:
+ typename SkTDynamicHash<ValueList, Key>::ConstIter fIter;
+ const ValueList* fList;
+ };
+
+ bool has(const T* value, const Key& key) const {
+ for (ValueList* list = fHash.find(key); list; list = list->fNext) {
+ if (list->fValue == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // This is not particularly fast and only used for validation, so debug only.
+ int countForKey(const Key& key) const {
+ int count = 0;
+ ValueList* list = fHash.find(key);
+ while (list) {
+ list = list->fNext;
+ ++count;
+ }
+ return count;
+ }
+#endif
+
+private:
+ SkTDynamicHash<ValueList, Key> fHash;
+ int fCount;
+
+ void internalRemove(ValueList* prev, ValueList* elem, const Key& key) {
+ if (elem->fNext) {
+ ValueList* next = elem->fNext;
+ elem->fValue = next->fValue;
+ elem->fNext = next->fNext;
+ delete next;
+ } else if (prev) {
+ prev->fNext = nullptr;
+ delete elem;
+ } else {
+ fHash.remove(key);
+ delete elem;
+ }
+
+ --fCount;
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTSearch.cpp b/gfx/skia/skia/src/core/SkTSearch.cpp
new file mode 100644
index 0000000000..dbcd83a88c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTSearch.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/core/SkTSearch.h"
+
+#include "include/private/SkMalloc.h"
+
+#include <ctype.h>
+
+static inline const char* index_into_base(const char*const* base, int index,
+ size_t elemSize)
+{
+ return *(const char*const*)((const char*)base + index * elemSize);
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize)
+{
+ if (count <= 0)
+ return ~0;
+
+ SkASSERT(base != nullptr);
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi)
+ {
+ int mid = (hi + lo) >> 1;
+ const char* elem = index_into_base(base, mid, elemSize);
+
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp < 0)
+ lo = mid + 1;
+ else if (cmp > 0 || strlen(elem) > target_len)
+ hi = mid;
+ else
+ return mid;
+ }
+
+ const char* elem = index_into_base(base, hi, elemSize);
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp || strlen(elem) > target_len)
+ {
+ if (cmp < 0)
+ hi += 1;
+ hi = ~hi;
+ }
+ return hi;
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrSearch(base, count, target, strlen(target), elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t len, size_t elemSize)
+{
+ SkASSERT(target);
+
+ SkAutoAsciiToLC tolc(target, len);
+
+ return SkStrSearch(base, count, tolc.lc(), len, elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrLCSearch(base, count, target, strlen(target), elemSize);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkAutoAsciiToLC::SkAutoAsciiToLC(const char str[], size_t len)
+{
+ // see if we need to compute the length
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+ fLength = len;
+
+ // assign lc to our preallocated storage if len is small enough, or allocate
+ // it on the heap
+ char* lc;
+ if (len <= STORAGE) {
+ lc = fStorage;
+ } else {
+ lc = (char*)sk_malloc_throw(len + 1);
+ }
+ fLC = lc;
+
+ // convert any asii to lower-case. we let non-ascii (utf8) chars pass
+ // through unchanged
+ for (int i = (int)(len - 1); i >= 0; --i) {
+ int c = str[i];
+ if ((c & 0x80) == 0) { // is just ascii
+ c = tolower(c);
+ }
+ lc[i] = c;
+ }
+ lc[len] = 0;
+}
+
+SkAutoAsciiToLC::~SkAutoAsciiToLC()
+{
+ if (fLC != fStorage) {
+ sk_free(fLC);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTSearch.h b/gfx/skia/skia/src/core/SkTSearch.h
new file mode 100644
index 0000000000..0466917a27
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTSearch.h
@@ -0,0 +1,146 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTSearch_DEFINED
+#define SkTSearch_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * All of the SkTSearch variants want to return the index (0...N-1) of the
+ * found element, or the bit-not of where to insert the element.
+ *
+ * At a simple level, if the return value is negative, it was not found.
+ *
+ * For clients that want to insert the new element if it was not found, use
+ * the following logic:
+ *
+ * int index = SkTSearch(...);
+ * if (index >= 0) {
+ * // found at index
+ * } else {
+ * index = ~index; // now we are positive
+ * // insert at index
+ * }
+ */
+
+
+// The most general form of SkTSearch takes an array of T and a key of type K. A functor, less, is
+// used to perform comparisons. It has two function operators:
+// bool operator() (const T& t, const K& k)
+// bool operator() (const K& t, const T& k)
+template <typename T, typename K, typename LESS>
+int SkTSearch(const T base[], int count, const K& key, size_t elemSize, LESS& less)
+{
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != nullptr); // base may be nullptr if count is zero
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi) {
+ int mid = lo + ((hi - lo) >> 1);
+ const T* elem = (const T*)((const char*)base + mid * elemSize);
+
+ if (less(*elem, key))
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ const T* elem = (const T*)((const char*)base + hi * elemSize);
+ if (less(*elem, key)) {
+ hi += 1;
+ hi = ~hi;
+ } else if (less(key, *elem)) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+// Adapts a less-than function to a functor.
+template <typename T, bool (LESS)(const T&, const T&)> struct SkTLessFunctionToFunctorAdaptor {
+ bool operator()(const T& a, const T& b) { return LESS(a, b); }
+};
+
+// Specialization for case when T==K and the caller wants to use a function rather than functor.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ static SkTLessFunctionToFunctorAdaptor<T, LESS> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+// Adapts operator < to a functor.
+template <typename T> struct SkTLessFunctor {
+ bool operator()(const T& a, const T& b) { return a < b; }
+};
+
+// Specialization for T==K, compare using op <.
+template <typename T>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ static SkTLessFunctor<T> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+// Similar to SkLessFunctionToFunctorAdaptor but makes the functor interface take T* rather than T.
+template <typename T, bool (LESS)(const T&, const T&)> struct SkTLessFunctionToPtrFunctorAdaptor {
+ bool operator() (const T* t, const T* k) { return LESS(*t, *k); }
+};
+
+// Specialization for case where domain is an array of T* and the key value is a T*, and you want
+// to compare the T objects, not the pointers.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(T* base[], int count, T* target, size_t elemSize) {
+ static SkTLessFunctionToPtrFunctorAdaptor<T, LESS> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Like SkStrSearch, but treats target as if it were all lower-case. Assumes that
+ base points to a table of lower-case strings.
+*/
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Helper class to convert a string to lower-case, but only modifying the ascii
+ characters. This makes the routine very fast and never changes the string
+ length, but it is not suitable for linguistic purposes. Normally this is
+ used for buiding and searching string tables.
+*/
+class SkAutoAsciiToLC {
+public:
+ SkAutoAsciiToLC(const char str[], size_t len = (size_t)-1);
+ ~SkAutoAsciiToLC();
+
+ const char* lc() const { return fLC; }
+ size_t length() const { return fLength; }
+
+private:
+ char* fLC; // points to either the heap or fStorage
+ size_t fLength;
+ enum {
+ STORAGE = 64
+ };
+ char fStorage[STORAGE+1];
+};
+
+// Helper when calling qsort with a compare proc that has typed its arguments
+#define SkCastForQSort(compare) reinterpret_cast<int (*)(const void*, const void*)>(compare)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTSort.h b/gfx/skia/skia/src/core/SkTSort.h
new file mode 100644
index 0000000000..1e853b0da7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTSort.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTSort_DEFINED
+#define SkTSort_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMathPriv.h"
+
+#include <utility>
+
+/* A comparison functor which performs the comparison 'a < b'. */
+template <typename T> struct SkTCompareLT {
+ bool operator()(const T a, const T b) const { return a < b; }
+};
+
+/* A comparison functor which performs the comparison '*a < *b'. */
+template <typename T> struct SkTPointerCompareLT {
+ bool operator()(const T* a, const T* b) const { return *a < *b; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sinks a hole from array[root] to leaf and then sifts the original array[root] element
+ * from the leaf level up.
+ *
+ * This version does extra work, in that it copies child to parent on the way down,
+ * then copies parent to child on the way back up. When copies are inexpensive,
+ * this is an optimization as this sift variant should only be used when
+ * the potentially out of place root entry value is expected to be small.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftUp(T array[], size_t root, size_t bottom, C lessThan) {
+ T x = array[root-1];
+ size_t start = root;
+ size_t j = root << 1;
+ while (j <= bottom) {
+ if (j < bottom && lessThan(array[j-1], array[j])) {
+ ++j;
+ }
+ array[root-1] = array[j-1];
+ root = j;
+ j = root << 1;
+ }
+ j = root >> 1;
+ while (j >= start) {
+ if (lessThan(array[j-1], x)) {
+ array[root-1] = array[j-1];
+ root = j;
+ j = root >> 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sifts the array[root] element from the root down.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftDown(T array[], size_t root, size_t bottom, C lessThan) {
+ T x = array[root-1];
+ size_t child = root << 1;
+ while (child <= bottom) {
+ if (child < bottom && lessThan(array[child-1], array[child])) {
+ ++child;
+ }
+ if (lessThan(x, array[child-1])) {
+ array[root-1] = array[child-1];
+ root = child;
+ child = root << 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/** Sorts the array of size count using comparator lessThan using a Heap Sort algorithm. Be sure to
+ * specialize swap if T has an efficient swap operation.
+ *
+ * @param array the array to be sorted.
+ * @param count the number of elements in the array.
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTHeapSort(T array[], size_t count, C lessThan) {
+ for (size_t i = count >> 1; i > 0; --i) {
+ SkTHeapSort_SiftDown(array, i, count, lessThan);
+ }
+
+ for (size_t i = count - 1; i > 0; --i) {
+ using std::swap;
+ swap(array[0], array[i]);
+ SkTHeapSort_SiftUp(array, 1, i, lessThan);
+ }
+}
+
+/** Sorts the array of size count using comparator '<' using a Heap Sort algorithm. */
+template <typename T> void SkTHeapSort(T array[], size_t count) {
+ SkTHeapSort(array, count, SkTCompareLT<T>());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Sorts the array of size count using comparator lessThan using an Insertion Sort algorithm. */
+template <typename T, typename C> static void SkTInsertionSort(T* left, T* right, C lessThan) {
+ for (T* next = left + 1; next <= right; ++next) {
+ if (!lessThan(*next, *(next - 1))) {
+ continue;
+ }
+ T insert = std::move(*next);
+ T* hole = next;
+ do {
+ *hole = std::move(*(hole - 1));
+ --hole;
+ } while (left < hole && lessThan(insert, *(hole - 1)));
+ *hole = std::move(insert);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T, typename C>
+static T* SkTQSort_Partition(T* left, T* right, T* pivot, C lessThan) {
+ using std::swap;
+ T pivotValue = *pivot;
+ swap(*pivot, *right);
+ T* newPivot = left;
+ while (left < right) {
+ if (lessThan(*left, pivotValue)) {
+ swap(*left, *newPivot);
+ newPivot += 1;
+ }
+ left += 1;
+ }
+ swap(*newPivot, *right);
+ return newPivot;
+}
+
+/* Intro Sort is a modified Quick Sort.
+ * When the region to be sorted is a small constant size it uses Insertion Sort.
+ * When depth becomes zero, it switches over to Heap Sort.
+ * This implementation recurses on the left region after pivoting and loops on the right,
+ * we already limit the stack depth by switching to heap sort,
+ * and cache locality on the data appears more important than saving a few stack frames.
+ *
+ * @param depth at this recursion depth, switch to Heap Sort.
+ * @param left the beginning of the region to be sorted.
+ * @param right the end of the region to be sorted (inclusive).
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTIntroSort(int depth, T* left, T* right, C lessThan) {
+ while (true) {
+ if (right - left < 32) {
+ SkTInsertionSort(left, right, lessThan);
+ return;
+ }
+
+ if (depth == 0) {
+ SkTHeapSort<T>(left, right - left + 1, lessThan);
+ return;
+ }
+ --depth;
+
+ T* pivot = left + ((right - left) >> 1);
+ pivot = SkTQSort_Partition(left, right, pivot, lessThan);
+
+ SkTIntroSort(depth, left, pivot - 1, lessThan);
+ left = pivot + 1;
+ }
+}
+
+/** Sorts the region from left to right using comparator lessThan using a Quick Sort algorithm. Be
+ * sure to specialize swap if T has an efficient swap operation.
+ *
+ * @param left the beginning of the region to be sorted.
+ * @param right the end of the region to be sorted (inclusive).
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTQSort(T* left, T* right, C lessThan) {
+ if (left >= right) {
+ return;
+ }
+ // Limit Intro Sort recursion depth to no more than 2 * ceil(log2(n)).
+ int depth = 2 * SkNextLog2(SkToU32(right - left));
+ SkTIntroSort(depth, left, right, lessThan);
+}
+
+/** Sorts the region from left to right using comparator '<' using a Quick Sort algorithm. */
+template <typename T> void SkTQSort(T* left, T* right) {
+ SkTQSort(left, right, SkTCompareLT<T>());
+}
+
+/** Sorts the region from left to right using comparator '* < *' using a Quick Sort algorithm. */
+template <typename T> void SkTQSort(T** left, T** right) {
+ SkTQSort(left, right, SkTPointerCompareLT<T>());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTTopoSort.h b/gfx/skia/skia/src/core/SkTTopoSort.h
new file mode 100644
index 0000000000..bc1b07ef59
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTTopoSort.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTTopoSort_DEFINED
+#define SkTTopoSort_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTArray.h"
+
+#ifdef SK_DEBUG
+template <typename T, typename Traits = T>
+void SkTTopoSort_CheckAllUnmarked(const SkTArray<sk_sp<T>>& graph) {
+ for (int i = 0; i < graph.count(); ++i) {
+ SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+ SkASSERT(!Traits::WasOutput(graph[i].get()));
+ }
+}
+
+template <typename T, typename Traits = T>
+void SkTTopoSort_CleanExit(const SkTArray<sk_sp<T>>& graph) {
+ for (int i = 0; i < graph.count(); ++i) {
+ SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+ SkASSERT(Traits::WasOutput(graph[i].get()));
+ }
+}
+#endif
+
+// Recursively visit a node and all the other nodes it depends on.
+// Return false if there is a loop.
+template <typename T, typename Traits = T>
+bool SkTTopoSort_Visit(T* node, SkTArray<sk_sp<T>>* result) {
+ if (Traits::IsTempMarked(node)) {
+ // There is a loop.
+ return false;
+ }
+
+ // If the node under consideration has been already been output it means it
+ // (and all the nodes it depends on) are already in 'result'.
+ if (!Traits::WasOutput(node)) {
+ // This node hasn't been output yet. Recursively assess all the
+ // nodes it depends on outputing them first.
+ Traits::SetTempMark(node);
+ for (int i = 0; i < Traits::NumDependencies(node); ++i) {
+ if (!SkTTopoSort_Visit<T, Traits>(Traits::Dependency(node, i), result)) {
+ return false;
+ }
+ }
+ Traits::Output(node, result->count()); // mark this node as output
+ Traits::ResetTempMark(node);
+
+ result->push_back(sk_ref_sp(node));
+ }
+
+ return true;
+}
+
+// Topologically sort the nodes in 'graph'. For this sort, when node 'i' depends
+// on node 'j' it means node 'j' must appear in the result before node 'i'.
+// A false return value means there was a loop and the contents of 'graph' will
+// be in some arbitrary state.
+//
+// Traits requires:
+// static void Output(T* t, int index) { ... } // 'index' is 't's position in the result
+// static bool WasOutput(const T* t) { ... }
+//
+// static void SetTempMark(T* t) { ... } // transiently used during toposort
+// static void ResetTempMark(T* t) { ... }
+// static bool IsTempMarked(const T* t) { ... }
+//
+// static int NumDependencies(const T* t) { ... } // 't' will be output after all the other -
+// static T* Dependency(T* t, int index) { ... } // nodes on which it depends
+// We'll look on T for these by default, or you can pass a custom Traits type.
+//
+// TODO: potentially add a version that takes a seed node and just outputs that
+// node and all the nodes on which it depends. This could be used to partially
+// flush a GrRenderTask DAG.
+template <typename T, typename Traits = T>
+bool SkTTopoSort(SkTArray<sk_sp<T>>* graph) {
+ SkTArray<sk_sp<T>> result;
+
+#ifdef SK_DEBUG
+ SkTTopoSort_CheckAllUnmarked<T, Traits>(*graph);
+#endif
+
+ result.reserve(graph->count());
+
+ for (int i = 0; i < graph->count(); ++i) {
+ if (Traits::WasOutput((*graph)[i].get())) {
+ // This node was depended on by some earlier node and has already
+ // been output
+ continue;
+ }
+
+ // Output this node after all the nodes it depends on have been output.
+ if (!SkTTopoSort_Visit<T, Traits>((*graph)[i].get(), &result)) {
+ return false;
+ }
+ }
+
+ SkASSERT(graph->count() == result.count());
+ graph->swap(result);
+
+#ifdef SK_DEBUG
+ SkTTopoSort_CleanExit<T, Traits>(*graph);
+#endif
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.cpp b/gfx/skia/skia/src/core/SkTaskGroup.cpp
new file mode 100644
index 0000000000..5425821c31
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkExecutor.h"
+#include "src/core/SkTaskGroup.h"
+
+SkTaskGroup::SkTaskGroup(SkExecutor& executor) : fPending(0), fExecutor(executor) {}
+
+void SkTaskGroup::add(std::function<void(void)> fn) {
+ fPending.fetch_add(+1, std::memory_order_relaxed);
+ fExecutor.add([=] {
+ fn();
+ fPending.fetch_add(-1, std::memory_order_release);
+ });
+}
+
+void SkTaskGroup::batch(int N, std::function<void(int)> fn) {
+ // TODO: I really thought we had some sort of more clever chunking logic.
+ fPending.fetch_add(+N, std::memory_order_relaxed);
+ for (int i = 0; i < N; i++) {
+ fExecutor.add([=] {
+ fn(i);
+ fPending.fetch_add(-1, std::memory_order_release);
+ });
+ }
+}
+
+bool SkTaskGroup::done() const {
+ return fPending.load(std::memory_order_acquire) == 0;
+}
+
+void SkTaskGroup::wait() {
+ // Actively help the executor do work until our task group is done.
+ // This lets SkTaskGroups nest arbitrarily deep on a single SkExecutor:
+ // no thread ever blocks waiting for others to do its work.
+ // (We may end up doing work that's not part of our task group. That's fine.)
+ while (!this->done()) {
+ fExecutor.borrow();
+ }
+}
+
+SkTaskGroup::Enabler::Enabler(int threads) {
+ if (threads) {
+ fThreadPool = SkExecutor::MakeLIFOThreadPool(threads);
+ SkExecutor::SetDefault(fThreadPool.get());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.h b/gfx/skia/skia/src/core/SkTaskGroup.h
new file mode 100644
index 0000000000..8d0da7c64c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTaskGroup_DEFINED
+#define SkTaskGroup_DEFINED
+
+#include "include/core/SkExecutor.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include <atomic>
+#include <functional>
+
+class SkTaskGroup : SkNoncopyable {
+public:
+ // Tasks added to this SkTaskGroup will run on its executor.
+ explicit SkTaskGroup(SkExecutor& executor = SkExecutor::GetDefault());
+ ~SkTaskGroup() { this->wait(); }
+
+ // Add a task to this SkTaskGroup.
+ void add(std::function<void(void)> fn);
+
+ // Add a batch of N tasks, all calling fn with different arguments.
+ void batch(int N, std::function<void(int)> fn);
+
+ // Returns true if all Tasks previously add()ed to this SkTaskGroup have run.
+ // It is safe to reuse this SkTaskGroup once done().
+ bool done() const;
+
+ // Block until done().
+ void wait();
+
+ // A convenience for testing tools.
+ // Creates and owns a thread pool, and passes it to SkExecutor::SetDefault().
+ struct Enabler {
+ explicit Enabler(int threads = -1); // -1 -> num_cores, 0 -> noop
+ std::unique_ptr<SkExecutor> fThreadPool;
+ };
+
+private:
+ std::atomic<int32_t> fPending;
+ SkExecutor& fExecutor;
+};
+
+#endif//SkTaskGroup_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextBlob.cpp b/gfx/skia/skia/src/core/SkTextBlob.cpp
new file mode 100644
index 0000000000..3c84bda6f8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlob.cpp
@@ -0,0 +1,947 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <atomic>
+#include <limits>
+#include <new>
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/text/GrTextBlobCache.h"
+#endif
+
+namespace {
+struct RunFontStorageEquivalent {
+ SkScalar fSize, fScaleX;
+ void* fTypeface;
+ SkScalar fSkewX;
+ uint32_t fFlags;
+};
+static_assert(sizeof(SkFont) == sizeof(RunFontStorageEquivalent), "runfont_should_stay_packed");
+}
+
+size_t SkTextBlob::RunRecord::StorageSize(uint32_t glyphCount, uint32_t textSize,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe) {
+ static_assert(SkIsAlign4(sizeof(SkScalar)), "SkScalar size alignment");
+
+ auto glyphSize = safe->mul(glyphCount, sizeof(uint16_t)),
+ posSize = safe->mul(PosCount(glyphCount, positioning, safe), sizeof(SkScalar));
+
+ // RunRecord object + (aligned) glyph buffer + position buffer
+ auto size = sizeof(SkTextBlob::RunRecord);
+ size = safe->add(size, safe->alignUp(glyphSize, 4));
+ size = safe->add(size, posSize);
+
+ if (textSize) { // Extended run.
+ size = safe->add(size, sizeof(uint32_t));
+ size = safe->add(size, safe->mul(glyphCount, sizeof(uint32_t)));
+ size = safe->add(size, textSize);
+ }
+
+ return safe->alignUp(size, sizeof(void*));
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::First(const SkTextBlob* blob) {
+ // The first record (if present) is stored following the blob object.
+ // (aligned up to make the RunRecord aligned too)
+ return reinterpret_cast<const RunRecord*>(SkAlignPtr((uintptr_t)(blob + 1)));
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::Next(const RunRecord* run) {
+ return SkToBool(run->fFlags & kLast_Flag) ? nullptr : NextUnchecked(run);
+}
+
+namespace {
+struct RunRecordStorageEquivalent {
+ SkFont fFont;
+ SkPoint fOffset;
+ uint32_t fCount;
+ uint32_t fFlags;
+ SkDEBUGCODE(unsigned fMagic;)
+};
+}
+
+void SkTextBlob::RunRecord::validate(const uint8_t* storageTop) const {
+ SkASSERT(kRunRecordMagic == fMagic);
+ SkASSERT((uint8_t*)NextUnchecked(this) <= storageTop);
+
+ SkASSERT(glyphBuffer() + fCount <= (uint16_t*)posBuffer());
+ SkASSERT(posBuffer() + fCount * ScalarsPerGlyph(positioning())
+ <= (SkScalar*)NextUnchecked(this));
+ if (isExtended()) {
+ SkASSERT(textSize() > 0);
+ SkASSERT(textSizePtr() < (uint32_t*)NextUnchecked(this));
+ SkASSERT(clusterBuffer() < (uint32_t*)NextUnchecked(this));
+ SkASSERT(textBuffer() + textSize() <= (char*)NextUnchecked(this));
+ }
+ static_assert(sizeof(SkTextBlob::RunRecord) == sizeof(RunRecordStorageEquivalent),
+ "runrecord_should_stay_packed");
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::NextUnchecked(const RunRecord* run) {
+ SkSafeMath safe;
+ auto res = reinterpret_cast<const RunRecord*>(
+ reinterpret_cast<const uint8_t*>(run)
+ + StorageSize(run->glyphCount(), run->textSize(), run->positioning(), &safe));
+ SkASSERT(safe);
+ return res;
+}
+
+size_t SkTextBlob::RunRecord::PosCount(uint32_t glyphCount,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe) {
+ return safe->mul(glyphCount, ScalarsPerGlyph(positioning));
+}
+
+uint32_t* SkTextBlob::RunRecord::textSizePtr() const {
+ // textSize follows the position buffer.
+ SkASSERT(isExtended());
+ SkSafeMath safe;
+ auto res = (uint32_t*)(&this->posBuffer()[PosCount(fCount, positioning(), &safe)]);
+ SkASSERT(safe);
+ return res;
+}
+
+void SkTextBlob::RunRecord::grow(uint32_t count) {
+ SkScalar* initialPosBuffer = posBuffer();
+ uint32_t initialCount = fCount;
+ fCount += count;
+
+ // Move the initial pos scalars to their new location.
+ size_t copySize = initialCount * sizeof(SkScalar) * ScalarsPerGlyph(positioning());
+ SkASSERT((uint8_t*)posBuffer() + copySize <= (uint8_t*)NextUnchecked(this));
+
+ // memmove, as the buffers may overlap
+ memmove(posBuffer(), initialPosBuffer, copySize);
+}
+
+static int32_t next_id() {
+ static std::atomic<int32_t> nextID{1};
+ int32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+SkTextBlob::SkTextBlob(const SkRect& bounds)
+ : fBounds(bounds)
+ , fUniqueID(next_id())
+ , fCacheID(SK_InvalidUniqueID) {}
+
+SkTextBlob::~SkTextBlob() {
+#if SK_SUPPORT_GPU
+ if (SK_InvalidUniqueID != fCacheID.load()) {
+ GrTextBlobCache::PostPurgeBlobMessage(fUniqueID, fCacheID);
+ }
+#endif
+
+ const auto* run = RunRecord::First(this);
+ do {
+ const auto* nextRun = RunRecord::Next(run);
+ SkDEBUGCODE(run->validate((uint8_t*)this + fStorageSize);)
+ run->~RunRecord();
+ run = nextRun;
+ } while (run);
+}
+
+namespace {
+
+union PositioningAndExtended {
+ int32_t intValue;
+ struct {
+ uint8_t positioning;
+ uint8_t extended;
+ uint16_t padding;
+ };
+};
+
+static_assert(sizeof(PositioningAndExtended) == sizeof(int32_t), "");
+
+} // namespace
+
+enum SkTextBlob::GlyphPositioning : uint8_t {
+ kDefault_Positioning = 0, // Default glyph advances -- zero scalars per glyph.
+ kHorizontal_Positioning = 1, // Horizontal positioning -- one scalar per glyph.
+ kFull_Positioning = 2, // Point positioning -- two scalars per glyph.
+ kRSXform_Positioning = 3, // RSXform positioning -- four scalars per glyph.
+};
+
+unsigned SkTextBlob::ScalarsPerGlyph(GlyphPositioning pos) {
+ const uint8_t gScalarsPerPositioning[] = {
+ 0, // kDefault_Positioning
+ 1, // kHorizontal_Positioning
+ 2, // kFull_Positioning
+ 4, // kRSXform_Positioning
+ };
+ SkASSERT((unsigned)pos <= 3);
+ return gScalarsPerPositioning[pos];
+}
+
+void SkTextBlob::operator delete(void* p) {
+ sk_free(p);
+}
+
+void* SkTextBlob::operator new(size_t) {
+ SK_ABORT("All blobs are created by placement new.");
+}
+
+void* SkTextBlob::operator new(size_t, void* p) {
+ return p;
+}
+
+SkTextBlobRunIterator::SkTextBlobRunIterator(const SkTextBlob* blob)
+ : fCurrentRun(SkTextBlob::RunRecord::First(blob)) {
+ SkDEBUGCODE(fStorageTop = (uint8_t*)blob + blob->fStorageSize;)
+}
+
+void SkTextBlobRunIterator::next() {
+ SkASSERT(!this->done());
+
+ if (!this->done()) {
+ SkDEBUGCODE(fCurrentRun->validate(fStorageTop);)
+ fCurrentRun = SkTextBlob::RunRecord::Next(fCurrentRun);
+ }
+}
+
+SkTextBlobRunIterator::GlyphPositioning SkTextBlobRunIterator::positioning() const {
+ SkASSERT(!this->done());
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kDefault_Positioning) ==
+ kDefault_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kHorizontal_Positioning) ==
+ kHorizontal_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kFull_Positioning) ==
+ kFull_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kRSXform_Positioning) ==
+ kRSXform_Positioning, "");
+
+ return SkTo<GlyphPositioning>(fCurrentRun->positioning());
+}
+
+bool SkTextBlobRunIterator::isLCD() const {
+ return fCurrentRun->font().getEdging() == SkFont::Edging::kSubpixelAntiAlias;
+}
+
+SkTextBlobBuilder::SkTextBlobBuilder()
+ : fStorageSize(0)
+ , fStorageUsed(0)
+ , fRunCount(0)
+ , fDeferredBounds(false)
+ , fLastRun(0) {
+ fBounds.setEmpty();
+}
+
+SkTextBlobBuilder::~SkTextBlobBuilder() {
+ if (nullptr != fStorage.get()) {
+ // We are abandoning runs and must destruct the associated font data.
+ // The easiest way to accomplish that is to use the blob destructor.
+ this->make();
+ }
+}
+
+SkRect SkTextBlobBuilder::TightRunBounds(const SkTextBlob::RunRecord& run) {
+ const SkFont& font = run.font();
+ SkRect bounds;
+
+ if (SkTextBlob::kDefault_Positioning == run.positioning()) {
+ font.measureText(run.glyphBuffer(), run.glyphCount() * sizeof(uint16_t),
+ SkTextEncoding::kGlyphID, &bounds);
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+ }
+
+ SkAutoSTArray<16, SkRect> glyphBounds(run.glyphCount());
+ font.getBounds(run.glyphBuffer(), run.glyphCount(), glyphBounds.get(), nullptr);
+
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning());
+ // kFull_Positioning => [ x, y, x, y... ]
+ // kHorizontal_Positioning => [ x, x, x... ]
+ // (const y applied by runBounds.offset(run->offset()) later)
+ const SkScalar horizontalConstY = 0;
+ const SkScalar* glyphPosX = run.posBuffer();
+ const SkScalar* glyphPosY = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ glyphPosX + 1 : &horizontalConstY;
+ const unsigned posXInc = SkTextBlob::ScalarsPerGlyph(run.positioning());
+ const unsigned posYInc = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ posXInc : 0;
+
+ bounds.setEmpty();
+ for (unsigned i = 0; i < run.glyphCount(); ++i) {
+ bounds.join(glyphBounds[i].makeOffset(*glyphPosX, *glyphPosY));
+ glyphPosX += posXInc;
+ glyphPosY += posYInc;
+ }
+
+ SkASSERT((void*)glyphPosX <= SkTextBlob::RunRecord::Next(&run));
+
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+static SkRect map_quad_to_rect(const SkRSXform& xform, const SkRect& rect) {
+ return SkMatrix().setRSXform(xform).mapRect(rect);
+}
+
+SkRect SkTextBlobBuilder::ConservativeRunBounds(const SkTextBlob::RunRecord& run) {
+ SkASSERT(run.glyphCount() > 0);
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning() ||
+ SkTextBlob::kRSXform_Positioning == run.positioning());
+
+ const SkRect fontBounds = SkFontPriv::GetFontBounds(run.font());
+ if (fontBounds.isEmpty()) {
+ // Empty font bounds are likely a font bug. TightBounds has a better chance of
+ // producing useful results in this case.
+ return TightRunBounds(run);
+ }
+
+ // Compute the glyph position bbox.
+ SkRect bounds;
+ switch (run.positioning()) {
+ case SkTextBlob::kHorizontal_Positioning: {
+ const SkScalar* glyphPos = run.posBuffer();
+ SkASSERT((void*)(glyphPos + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ SkScalar minX = *glyphPos;
+ SkScalar maxX = *glyphPos;
+ for (unsigned i = 1; i < run.glyphCount(); ++i) {
+ SkScalar x = glyphPos[i];
+ minX = SkMinScalar(x, minX);
+ maxX = SkMaxScalar(x, maxX);
+ }
+
+ bounds.setLTRB(minX, 0, maxX, 0);
+ } break;
+ case SkTextBlob::kFull_Positioning: {
+ const SkPoint* glyphPosPts = run.pointBuffer();
+ SkASSERT((void*)(glyphPosPts + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ bounds.setBounds(glyphPosPts, run.glyphCount());
+ } break;
+ case SkTextBlob::kRSXform_Positioning: {
+ const SkRSXform* xform = run.xformBuffer();
+ SkASSERT((void*)(xform + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+ bounds = map_quad_to_rect(xform[0], fontBounds);
+ for (unsigned i = 1; i < run.glyphCount(); ++i) {
+ bounds.join(map_quad_to_rect(xform[i], fontBounds));
+ }
+ } break;
+ default:
+ SK_ABORT("unsupported positioning mode");
+ }
+
+ if (run.positioning() != SkTextBlob::kRSXform_Positioning) {
+ // Expand by typeface glyph bounds.
+ bounds.fLeft += fontBounds.left();
+ bounds.fTop += fontBounds.top();
+ bounds.fRight += fontBounds.right();
+ bounds.fBottom += fontBounds.bottom();
+ }
+
+ // Offset by run position.
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+void SkTextBlobBuilder::updateDeferredBounds() {
+ SkASSERT(!fDeferredBounds || fRunCount > 0);
+
+ if (!fDeferredBounds) {
+ return;
+ }
+
+ SkASSERT(fLastRun >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+
+ // FIXME: we should also use conservative bounds for kDefault_Positioning.
+ SkRect runBounds = SkTextBlob::kDefault_Positioning == run->positioning() ?
+ TightRunBounds(*run) : ConservativeRunBounds(*run);
+ fBounds.join(runBounds);
+ fDeferredBounds = false;
+}
+
+void SkTextBlobBuilder::reserve(size_t size) {
+ SkSafeMath safe;
+
+ // We don't currently pre-allocate, but maybe someday...
+ if (safe.add(fStorageUsed, size) <= fStorageSize && safe) {
+ return;
+ }
+
+ if (0 == fRunCount) {
+ SkASSERT(nullptr == fStorage.get());
+ SkASSERT(0 == fStorageSize);
+ SkASSERT(0 == fStorageUsed);
+
+ // the first allocation also includes blob storage
+ // aligned up to a pointer alignment so SkTextBlob::RunRecords after it stay aligned.
+ fStorageUsed = SkAlignPtr(sizeof(SkTextBlob));
+ }
+
+ fStorageSize = safe.add(fStorageUsed, size);
+
+ // FYI: This relies on everything we store being relocatable, particularly SkPaint.
+ // Also, this is counting on the underlying realloc to throw when passed max().
+ fStorage.realloc(safe ? fStorageSize : std::numeric_limits<size_t>::max());
+}
+
+bool SkTextBlobBuilder::mergeRun(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ uint32_t count, SkPoint offset) {
+ if (0 == fLastRun) {
+ SkASSERT(0 == fRunCount);
+ return false;
+ }
+
+ SkASSERT(fLastRun >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+ SkASSERT(run->glyphCount() > 0);
+
+ if (run->textSize() != 0) {
+ return false;
+ }
+
+ if (run->positioning() != positioning
+ || run->font() != font
+ || (run->glyphCount() + count < run->glyphCount())) {
+ return false;
+ }
+
+ // we can merge same-font/same-positioning runs in the following cases:
+ // * fully positioned run following another fully positioned run
+ // * horizontally postioned run following another horizontally positioned run with the same
+ // y-offset
+ if (SkTextBlob::kFull_Positioning != positioning
+ && (SkTextBlob::kHorizontal_Positioning != positioning
+ || run->offset().y() != offset.y())) {
+ return false;
+ }
+
+ SkSafeMath safe;
+ size_t sizeDelta =
+ SkTextBlob::RunRecord::StorageSize(run->glyphCount() + count, 0, positioning, &safe) -
+ SkTextBlob::RunRecord::StorageSize(run->glyphCount() , 0, positioning, &safe);
+ if (!safe) {
+ return false;
+ }
+
+ this->reserve(sizeDelta);
+
+ // reserve may have realloced
+ run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() + fLastRun);
+ uint32_t preMergeCount = run->glyphCount();
+ run->grow(count);
+
+ // Callers expect the buffers to point at the newly added slice, ant not at the beginning.
+ fCurrentRunBuffer.glyphs = run->glyphBuffer() + preMergeCount;
+ fCurrentRunBuffer.pos = run->posBuffer()
+ + preMergeCount * SkTextBlob::ScalarsPerGlyph(positioning);
+
+ fStorageUsed += sizeDelta;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+
+ return true;
+}
+
+void SkTextBlobBuilder::allocInternal(const SkFont& font,
+ SkTextBlob::GlyphPositioning positioning,
+ int count, int textSize, SkPoint offset,
+ const SkRect* bounds) {
+ if (count <= 0 || textSize < 0) {
+ fCurrentRunBuffer = { nullptr, nullptr, nullptr, nullptr };
+ return;
+ }
+
+ if (textSize != 0 || !this->mergeRun(font, positioning, count, offset)) {
+ this->updateDeferredBounds();
+
+ SkSafeMath safe;
+ size_t runSize = SkTextBlob::RunRecord::StorageSize(count, textSize, positioning, &safe);
+ if (!safe) {
+ fCurrentRunBuffer = { nullptr, nullptr, nullptr, nullptr };
+ return;
+ }
+
+ this->reserve(runSize);
+
+ SkASSERT(fStorageUsed >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkASSERT(fStorageUsed + runSize <= fStorageSize);
+
+ SkTextBlob::RunRecord* run = new (fStorage.get() + fStorageUsed)
+ SkTextBlob::RunRecord(count, textSize, offset, font, positioning);
+ fCurrentRunBuffer.glyphs = run->glyphBuffer();
+ fCurrentRunBuffer.pos = run->posBuffer();
+ fCurrentRunBuffer.utf8text = run->textBuffer();
+ fCurrentRunBuffer.clusters = run->clusterBuffer();
+
+ fLastRun = fStorageUsed;
+ fStorageUsed += runSize;
+ fRunCount++;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+ }
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.utf8text);
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.clusters);
+ if (!fDeferredBounds) {
+ if (bounds) {
+ fBounds.join(*bounds);
+ } else {
+ fDeferredBounds = true;
+ }
+ }
+}
+
+// SkFont versions
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRun(const SkFont& font, int count,
+ SkScalar x, SkScalar y,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kDefault_Positioning, count, 0, {x, y}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunPosH(const SkFont& font, int count,
+ SkScalar y,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kHorizontal_Positioning, count, 0, {0, y}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunPos(const SkFont& font, int count,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kFull_Positioning, count, 0, {0, 0}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer&
+SkTextBlobBuilder::allocRunRSXform(const SkFont& font, int count) {
+ this->allocInternal(font, SkTextBlob::kRSXform_Positioning, count, 0, {0, 0}, nullptr);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunText(const SkFont& font, int count,
+ SkScalar x, SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kDefault_Positioning,
+ count,
+ textByteCount,
+ SkPoint::Make(x, y),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPosH(const SkFont& font, int count,
+ SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kHorizontal_Positioning,
+ count,
+ textByteCount,
+ SkPoint::Make(0, y),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPos(const SkFont& font, int count,
+ int textByteCount,
+ SkString lang,
+ const SkRect *bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kFull_Positioning,
+ count, textByteCount,
+ SkPoint::Make(0, 0),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunRSXform(const SkFont& font, int count,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kRSXform_Positioning,
+ count,
+ textByteCount,
+ {0, 0},
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+sk_sp<SkTextBlob> SkTextBlobBuilder::make() {
+ if (!fRunCount) {
+ // We don't instantiate empty blobs.
+ SkASSERT(!fStorage.get());
+ SkASSERT(fStorageUsed == 0);
+ SkASSERT(fStorageSize == 0);
+ SkASSERT(fLastRun == 0);
+ SkASSERT(fBounds.isEmpty());
+ return nullptr;
+ }
+
+ this->updateDeferredBounds();
+
+ // Tag the last run as such.
+ auto* lastRun = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() + fLastRun);
+ lastRun->fFlags |= SkTextBlob::RunRecord::kLast_Flag;
+
+ SkTextBlob* blob = new (fStorage.release()) SkTextBlob(fBounds);
+ SkDEBUGCODE(const_cast<SkTextBlob*>(blob)->fStorageSize = fStorageSize;)
+
+ SkDEBUGCODE(
+ SkSafeMath safe;
+ size_t validateSize = SkAlignPtr(sizeof(SkTextBlob));
+ for (const auto* run = SkTextBlob::RunRecord::First(blob); run;
+ run = SkTextBlob::RunRecord::Next(run)) {
+ validateSize += SkTextBlob::RunRecord::StorageSize(
+ run->fCount, run->textSize(), run->positioning(), &safe);
+ run->validate(reinterpret_cast<const uint8_t*>(blob) + fStorageUsed);
+ fRunCount--;
+ }
+ SkASSERT(validateSize == fStorageUsed);
+ SkASSERT(fRunCount == 0);
+ SkASSERT(safe);
+ )
+
+ fStorageUsed = 0;
+ fStorageSize = 0;
+ fRunCount = 0;
+ fLastRun = 0;
+ fBounds.setEmpty();
+
+ return sk_sp<SkTextBlob>(blob);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkTextBlobPriv::Flatten(const SkTextBlob& blob, SkWriteBuffer& buffer) {
+ // seems like we could skip this, and just recompute bounds in unflatten, but
+ // some cc_unittests fail if we remove this...
+ buffer.writeRect(blob.bounds());
+
+ SkTextBlobRunIterator it(&blob);
+ while (!it.done()) {
+ SkASSERT(it.glyphCount() > 0);
+
+ buffer.write32(it.glyphCount());
+ PositioningAndExtended pe;
+ pe.intValue = 0;
+ pe.positioning = it.positioning();
+ SkASSERT((int32_t)it.positioning() == pe.intValue); // backwards compat.
+
+ uint32_t textSize = it.textSize();
+ pe.extended = textSize > 0;
+ buffer.write32(pe.intValue);
+ if (pe.extended) {
+ buffer.write32(textSize);
+ }
+ buffer.writePoint(it.offset());
+
+ SkFontPriv::Flatten(it.font(), buffer);
+
+ buffer.writeByteArray(it.glyphs(), it.glyphCount() * sizeof(uint16_t));
+ buffer.writeByteArray(it.pos(),
+ it.glyphCount() * sizeof(SkScalar) *
+ SkTextBlob::ScalarsPerGlyph(
+ SkTo<SkTextBlob::GlyphPositioning>(it.positioning())));
+ if (pe.extended) {
+ buffer.writeByteArray(it.clusters(), sizeof(uint32_t) * it.glyphCount());
+ buffer.writeByteArray(it.text(), it.textSize());
+ }
+
+ it.next();
+ }
+
+ // Marker for the last run (0 is not a valid glyph count).
+ buffer.write32(0);
+}
+
+sk_sp<SkTextBlob> SkTextBlobPriv::MakeFromBuffer(SkReadBuffer& reader) {
+ SkRect bounds;
+ reader.readRect(&bounds);
+
+ SkTextBlobBuilder blobBuilder;
+ SkSafeMath safe;
+ for (;;) {
+ int glyphCount = reader.read32();
+ if (glyphCount == 0) {
+ // End-of-runs marker.
+ break;
+ }
+
+ PositioningAndExtended pe;
+ pe.intValue = reader.read32();
+ const auto pos = SkTo<SkTextBlob::GlyphPositioning>(pe.positioning);
+ if (glyphCount <= 0 || pos > SkTextBlob::kRSXform_Positioning) {
+ return nullptr;
+ }
+ int textSize = pe.extended ? reader.read32() : 0;
+ if (textSize < 0) {
+ return nullptr;
+ }
+
+ SkPoint offset;
+ reader.readPoint(&offset);
+ SkFont font;
+ if (reader.isVersionLT(SkPicturePriv::kSerializeFonts_Version)) {
+ SkPaint paint;
+ reader.readPaint(&paint, &font);
+ } else {
+ SkFontPriv::Unflatten(&font, reader);
+ }
+
+ // Compute the expected size of the buffer and ensure we have enough to deserialize
+ // a run before allocating it.
+ const size_t glyphSize = safe.mul(glyphCount, sizeof(uint16_t)),
+ posSize =
+ safe.mul(glyphCount, safe.mul(sizeof(SkScalar),
+ SkTextBlob::ScalarsPerGlyph(pos))),
+ clusterSize = pe.extended ? safe.mul(glyphCount, sizeof(uint32_t)) : 0;
+ const size_t totalSize =
+ safe.add(safe.add(glyphSize, posSize), safe.add(clusterSize, textSize));
+
+ if (!reader.isValid() || !safe || totalSize > reader.available()) {
+ return nullptr;
+ }
+
+ const SkTextBlobBuilder::RunBuffer* buf = nullptr;
+ switch (pos) {
+ case SkTextBlob::kDefault_Positioning:
+ buf = &blobBuilder.allocRunText(font, glyphCount, offset.x(), offset.y(),
+ textSize, SkString(), &bounds);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ buf = &blobBuilder.allocRunTextPosH(font, glyphCount, offset.y(),
+ textSize, SkString(), &bounds);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ buf = &blobBuilder.allocRunTextPos(font, glyphCount, textSize, SkString(), &bounds);
+ break;
+ case SkTextBlob::kRSXform_Positioning:
+ buf = &blobBuilder.allocRunRSXform(font, glyphCount, textSize, SkString(), &bounds);
+ break;
+ }
+
+ if (!buf->glyphs ||
+ !buf->pos ||
+ (pe.extended && (!buf->clusters || !buf->utf8text))) {
+ return nullptr;
+ }
+
+ if (!reader.readByteArray(buf->glyphs, glyphSize) ||
+ !reader.readByteArray(buf->pos, posSize)) {
+ return nullptr;
+ }
+
+ if (pe.extended) {
+ if (!reader.readByteArray(buf->clusters, clusterSize) ||
+ !reader.readByteArray(buf->utf8text, textSize)) {
+ return nullptr;
+ }
+ }
+ }
+
+ return blobBuilder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromText(const void* text, size_t byteLength, const SkFont& font,
+ SkTextEncoding encoding) {
+ // Note: we deliberately promote this to fully positioned blobs, since we'd have to pay the
+ // same cost down stream (i.e. computing bounds), so its cheaper to pay the cost once now.
+ const int count = font.countText(text, byteLength, encoding);
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPos(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ font.getPos(buffer.glyphs, count, buffer.points(), {0, 0});
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkFont& font,
+ SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPos(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.points(), pos, count * sizeof(SkPoint));
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY,
+ const SkFont& font, SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPosH(font, count, constY);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.pos, xpos, count * sizeof(SkScalar));
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkFont& font,
+ SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunRSXform(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.xforms(), xform, count * sizeof(SkRSXform));
+ return builder.make();
+}
+
+sk_sp<SkData> SkTextBlob::serialize(const SkSerialProcs& procs) const {
+ SkBinaryWriteBuffer buffer;
+ buffer.setSerialProcs(procs);
+ SkTextBlobPriv::Flatten(*this, buffer);
+
+ size_t total = buffer.bytesWritten();
+ sk_sp<SkData> data = SkData::MakeUninitialized(total);
+ buffer.writeToMemory(data->writable_data());
+ return data;
+}
+
+sk_sp<SkTextBlob> SkTextBlob::Deserialize(const void* data, size_t length,
+ const SkDeserialProcs& procs) {
+ SkReadBuffer buffer(data, length);
+ buffer.setDeserialProcs(procs);
+ return SkTextBlobPriv::MakeFromBuffer(buffer);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkTextBlob::serialize(const SkSerialProcs& procs, void* memory, size_t memory_size) const {
+ SkBinaryWriteBuffer buffer(memory, memory_size);
+ buffer.setSerialProcs(procs);
+ SkTextBlobPriv::Flatten(*this, buffer);
+ return buffer.usingInitialStorage() ? buffer.bytesWritten() : 0u;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+int get_glyph_run_intercepts(const SkGlyphRun& glyphRun,
+ const SkPaint& paint,
+ const SkScalar bounds[2],
+ SkScalar intervals[],
+ int* intervalCount) {
+ SkScalar scale = SK_Scalar1;
+ SkPaint interceptPaint{paint};
+ SkFont interceptFont{glyphRun.font()};
+
+ interceptPaint.setMaskFilter(nullptr); // don't want this affecting our path-cache lookup
+
+ // can't use our canonical size if we need to apply path effects
+ if (interceptPaint.getPathEffect() == nullptr) {
+ // If the wrong size is going to be used, don't hint anything.
+ interceptFont.setHinting(SkFontHinting::kNone);
+ interceptFont.setSubpixel(true);
+ scale = interceptFont.getSize() / SkFontPriv::kCanonicalTextSizeForPaths;
+ interceptFont.setSize(SkIntToScalar(SkFontPriv::kCanonicalTextSizeForPaths));
+ // Note: fScale can be zero here (even if it wasn't before the divide). It can also
+ // be very very small. We call sk_ieee_float_divide below to ensure IEEE divide behavior,
+ // since downstream we will check for the resulting coordinates being non-finite anyway.
+ // Thus we don't need to check for zero here.
+ if (interceptPaint.getStrokeWidth() > 0
+ && interceptPaint.getStyle() != SkPaint::kFill_Style) {
+ interceptPaint.setStrokeWidth(
+ sk_ieee_float_divide(interceptPaint.getStrokeWidth(), scale));
+ }
+ }
+
+ interceptPaint.setStyle(SkPaint::kFill_Style);
+ interceptPaint.setPathEffect(nullptr);
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(interceptFont, &interceptPaint);
+ auto cache = strikeSpec.findOrCreateExclusiveStrike();
+
+ SkScalar xOffset = 0;
+ SkScalar xPos = xOffset;
+ SkScalar prevAdvance = 0;
+
+ const SkPoint* posCursor = glyphRun.positions().begin();
+ for (auto glyphID : glyphRun.glyphsIDs()) {
+ SkPoint pos = *posCursor++;
+
+ SkGlyph* glyph = cache->glyph(glyphID);
+ xPos += prevAdvance * scale;
+ prevAdvance = glyph->advanceX();
+ if (cache->preparePath(glyph) != nullptr) {
+ // The typeface is scaled, so un-scale the bounds to be in the space of the typeface.
+ // Also ensure the bounds are properly offset by the vertical positioning of the glyph.
+ SkScalar scaledBounds[2] = {
+ (bounds[0] - pos.y()) / scale,
+ (bounds[1] - pos.y()) / scale
+ };
+ cache->findIntercepts(scaledBounds, scale, pos.x(), glyph, intervals, intervalCount);
+ }
+ }
+ return *intervalCount;
+}
+} // namespace
+
+int SkTextBlob::getIntercepts(const SkScalar bounds[2], SkScalar intervals[],
+ const SkPaint* paint) const {
+
+ SkTLazy<SkPaint> defaultPaint;
+ if (paint == nullptr) {
+ defaultPaint.init();
+ paint = defaultPaint.get();
+ }
+
+ SkGlyphRunBuilder builder;
+ builder.textBlobToGlyphRunListIgnoringRSXForm(*paint, *this, SkPoint{0, 0});
+ auto glyphRunList = builder.useGlyphRunList();
+
+ int intervalCount = 0;
+ for (const SkGlyphRun& glyphRun : glyphRunList) {
+ intervalCount = get_glyph_run_intercepts(glyphRun, *paint, bounds, intervals, &intervalCount);
+ }
+
+ return intervalCount;
+}
+
+////////
+
+SkTextBlob::Iter::Iter(const SkTextBlob& blob) {
+ fRunRecord = RunRecord::First(&blob);
+}
+
+bool SkTextBlob::Iter::next(Run* rec) {
+ if (fRunRecord) {
+ if (rec) {
+ rec->fTypeface = fRunRecord->font().getTypeface();
+ rec->fGlyphCount = fRunRecord->glyphCount();
+ rec->fGlyphIndices = fRunRecord->glyphBuffer();
+ }
+ if (fRunRecord->isLastRun()) {
+ fRunRecord = nullptr;
+ } else {
+ fRunRecord = RunRecord::Next(fRunRecord);
+ }
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkTextBlobPriv.h b/gfx/skia/skia/src/core/SkTextBlobPriv.h
new file mode 100644
index 0000000000..5394b50d5d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlobPriv.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextBlobPriv_DEFINED
+#define SkTextBlobPriv_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkSafeMath.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkTextBlobPriv {
+public:
+ /**
+ * Serialize to a buffer.
+ */
+ static void Flatten(const SkTextBlob& , SkWriteBuffer&);
+
+ /**
+ * Recreate an SkTextBlob that was serialized into a buffer.
+ *
+ * @param SkReadBuffer Serialized blob data.
+ * @return A new SkTextBlob representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkTextBlob> MakeFromBuffer(SkReadBuffer&);
+};
+
+class SkTextBlobBuilderPriv {
+public:
+ static const SkTextBlobBuilder::RunBuffer& AllocRunText(SkTextBlobBuilder* builder,
+ const SkFont& font, int count, SkScalar x, SkScalar y, int textByteCount,
+ SkString lang, const SkRect* bounds = nullptr) {
+ return builder->allocRunText(font, count, x, y, textByteCount, lang, bounds);
+ }
+ static const SkTextBlobBuilder::RunBuffer& AllocRunTextPosH(SkTextBlobBuilder* builder,
+ const SkFont& font, int count, SkScalar y, int textByteCount, SkString lang,
+ const SkRect* bounds = nullptr) {
+ return builder->allocRunTextPosH(font, count, y, textByteCount, lang, bounds);
+ }
+ static const SkTextBlobBuilder::RunBuffer& AllocRunTextPos(SkTextBlobBuilder* builder,
+ const SkFont& font, int count, int textByteCount, SkString lang,
+ const SkRect* bounds = nullptr) {
+ return builder->allocRunTextPos(font, count, textByteCount, lang, bounds);
+ }
+};
+
+//
+// Textblob data is laid out into externally-managed storage as follows:
+//
+// -----------------------------------------------------------------------------
+// | SkTextBlob | RunRecord | Glyphs[] | Pos[] | RunRecord | Glyphs[] | Pos[] | ...
+// -----------------------------------------------------------------------------
+//
+// Each run record describes a text blob run, and can be used to determine the (implicit)
+// location of the following record.
+//
+// Extended Textblob runs have more data after the Pos[] array:
+//
+// -------------------------------------------------------------------------
+// ... | RunRecord | Glyphs[] | Pos[] | TextSize | Clusters[] | Text[] | ...
+// -------------------------------------------------------------------------
+//
+// To determine the length of the extended run data, the TextSize must be read.
+//
+// Extended Textblob runs may be mixed with non-extended runs.
+
+SkDEBUGCODE(static const unsigned kRunRecordMagic = 0xb10bcafe;)
+
+class SkTextBlob::RunRecord {
+public:
+ RunRecord(uint32_t count, uint32_t textSize, const SkPoint& offset, const SkFont& font, GlyphPositioning pos)
+ : fFont(font)
+ , fCount(count)
+ , fOffset(offset)
+ , fFlags(pos) {
+ SkASSERT(static_cast<unsigned>(pos) <= Flags::kPositioning_Mask);
+
+ SkDEBUGCODE(fMagic = kRunRecordMagic);
+ if (textSize > 0) {
+ fFlags |= kExtended_Flag;
+ *this->textSizePtr() = textSize;
+ }
+ }
+
+ uint32_t glyphCount() const {
+ return fCount;
+ }
+
+ const SkPoint& offset() const {
+ return fOffset;
+ }
+
+ const SkFont& font() const {
+ return fFont;
+ }
+
+ GlyphPositioning positioning() const {
+ return static_cast<GlyphPositioning>(fFlags & kPositioning_Mask);
+ }
+
+ uint16_t* glyphBuffer() const {
+ static_assert(SkIsAlignPtr(sizeof(RunRecord)), "");
+ // Glyphs are stored immediately following the record.
+ return reinterpret_cast<uint16_t*>(const_cast<RunRecord*>(this) + 1);
+ }
+
+ // can be aliased with pointBuffer() or xformBuffer()
+ SkScalar* posBuffer() const {
+ // Position scalars follow the (aligned) glyph buffer.
+ return reinterpret_cast<SkScalar*>(reinterpret_cast<uint8_t*>(this->glyphBuffer()) +
+ SkAlign4(fCount * sizeof(uint16_t)));
+ }
+
+ // alias for posBuffer()
+ SkPoint* pointBuffer() const {
+ SkASSERT(this->positioning() == (GlyphPositioning)2);
+ return reinterpret_cast<SkPoint*>(this->posBuffer());
+ }
+
+ // alias for posBuffer()
+ SkRSXform* xformBuffer() const {
+ SkASSERT(this->positioning() == (GlyphPositioning)3);
+ return reinterpret_cast<SkRSXform*>(this->posBuffer());
+ }
+
+ uint32_t textSize() const { return isExtended() ? *this->textSizePtr() : 0; }
+
+ uint32_t* clusterBuffer() const {
+ // clusters follow the textSize.
+ return isExtended() ? 1 + this->textSizePtr() : nullptr;
+ }
+
+ char* textBuffer() const {
+ return isExtended()
+ ? reinterpret_cast<char*>(this->clusterBuffer() + fCount)
+ : nullptr;
+ }
+
+ bool isLastRun() const { return SkToBool(fFlags & kLast_Flag); }
+
+ static size_t StorageSize(uint32_t glyphCount, uint32_t textSize,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe);
+
+ static const RunRecord* First(const SkTextBlob* blob);
+
+ static const RunRecord* Next(const RunRecord* run);
+
+ void validate(const uint8_t* storageTop) const;
+
+private:
+ friend class SkTextBlobBuilder;
+
+ enum Flags {
+ kPositioning_Mask = 0x03, // bits 0-1 reserved for positioning
+ kLast_Flag = 0x04, // set for the last blob run
+ kExtended_Flag = 0x08, // set for runs with text/cluster info
+ };
+
+ static const RunRecord* NextUnchecked(const RunRecord* run);
+
+ static size_t PosCount(uint32_t glyphCount,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe);
+
+ uint32_t* textSizePtr() const;
+
+ void grow(uint32_t count);
+
+ bool isExtended() const {
+ return fFlags & kExtended_Flag;
+ }
+
+ SkFont fFont;
+ uint32_t fCount;
+ SkPoint fOffset;
+ uint32_t fFlags;
+
+ SkDEBUGCODE(unsigned fMagic;)
+};
+
+/**
+ * Iterate through all of the text runs of the text blob. For example:
+ * for (SkTextBlobRunIterator it(blob); !it.done(); it.next()) {
+ * .....
+ * }
+ */
+class SkTextBlobRunIterator {
+public:
+ SkTextBlobRunIterator(const SkTextBlob* blob);
+
+ enum GlyphPositioning : uint8_t {
+ kDefault_Positioning = 0, // Default glyph advances -- zero scalars per glyph.
+ kHorizontal_Positioning = 1, // Horizontal positioning -- one scalar per glyph.
+ kFull_Positioning = 2, // Point positioning -- two scalars per glyph.
+ kRSXform_Positioning = 3, // RSXform positioning -- four scalars per glyph.
+ };
+
+ bool done() const {
+ return !fCurrentRun;
+ }
+ void next();
+
+ uint32_t glyphCount() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphCount();
+ }
+ const uint16_t* glyphs() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphBuffer();
+ }
+ const SkScalar* pos() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->posBuffer();
+ }
+ // alias for pos()
+ const SkPoint* points() const {
+ return fCurrentRun->pointBuffer();
+ }
+ // alias for pos()
+ const SkRSXform* xforms() const {
+ return fCurrentRun->xformBuffer();
+ }
+ const SkPoint& offset() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->offset();
+ }
+ const SkFont& font() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->font();
+ }
+ GlyphPositioning positioning() const;
+ uint32_t* clusters() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->clusterBuffer();
+ }
+ uint32_t textSize() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textSize();
+ }
+ char* text() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textBuffer();
+ }
+
+ bool isLCD() const;
+
+private:
+ const SkTextBlob::RunRecord* fCurrentRun;
+
+ SkDEBUGCODE(uint8_t* fStorageTop;)
+};
+
+#endif // SkTextBlobPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextFormatParams.h b/gfx/skia/skia/src/core/SkTextFormatParams.h
new file mode 100644
index 0000000000..a4f1c0eb78
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextFormatParams.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextFormatParams_DEFINES
+#define SkTextFormatParams_DEFINES
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+// Fraction of the text size to lower a strike through line below the baseline.
+#define kStdStrikeThru_Offset (-SK_Scalar1 * 6 / 21)
+// Fraction of the text size to lower a underline below the baseline.
+#define kStdUnderline_Offset (SK_Scalar1 / 9)
+// Fraction of the text size to use for a strike through or under-line.
+#define kStdUnderline_Thickness (SK_Scalar1 / 18)
+
+// The fraction of text size to embolden fake bold text scales with text size.
+// At 9 points or below, the stroke width is increased by text size / 24.
+// At 36 points and above, it is increased by text size / 32. In between,
+// it is interpolated between those values.
+static const SkScalar kStdFakeBoldInterpKeys[] = {
+ SK_Scalar1*9,
+ SK_Scalar1*36,
+};
+static const SkScalar kStdFakeBoldInterpValues[] = {
+ SK_Scalar1/24,
+ SK_Scalar1/32,
+};
+static_assert(SK_ARRAY_COUNT(kStdFakeBoldInterpKeys) == SK_ARRAY_COUNT(kStdFakeBoldInterpValues),
+ "mismatched_array_size");
+static const int kStdFakeBoldInterpLength = SK_ARRAY_COUNT(kStdFakeBoldInterpKeys);
+
+#endif //SkTextFormatParams_DEFINES
diff --git a/gfx/skia/skia/src/core/SkThreadID.cpp b/gfx/skia/skia/src/core/SkThreadID.cpp
new file mode 100644
index 0000000000..e882834e72
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkThreadID.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkThreadID.h"
+
+#ifdef SK_BUILD_FOR_WIN
+ #include "src/core/SkLeanWindows.h"
+ SkThreadID SkGetThreadID() { return GetCurrentThreadId(); }
+#else
+ #include <pthread.h>
+ SkThreadID SkGetThreadID() { return (int64_t)pthread_self(); }
+#endif
diff --git a/gfx/skia/skia/src/core/SkTime.cpp b/gfx/skia/skia/src/core/SkTime.cpp
new file mode 100644
index 0000000000..47151f91ec
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTime.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTime.h"
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkLeanWindows.h"
+
+void SkTime::DateTime::toISO8601(SkString* dst) const {
+ if (dst) {
+ int timeZoneMinutes = SkToInt(fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ dst->printf("%04u-%02u-%02uT%02u:%02u:%02u%c%02d:%02d",
+ static_cast<unsigned>(fYear), static_cast<unsigned>(fMonth),
+ static_cast<unsigned>(fDay), static_cast<unsigned>(fHour),
+ static_cast<unsigned>(fMinute),
+ static_cast<unsigned>(fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+ }
+}
+
+#ifdef SK_BUILD_FOR_WIN
+
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = st.wYear;
+ dt->fMonth = SkToU8(st.wMonth);
+ dt->fDayOfWeek = SkToU8(st.wDayOfWeek);
+ dt->fDay = SkToU8(st.wDay);
+ dt->fHour = SkToU8(st.wHour);
+ dt->fMinute = SkToU8(st.wMinute);
+ dt->fSecond = SkToU8(st.wSecond);
+ }
+}
+
+#else // SK_BUILD_FOR_WIN
+
+#include <time.h>
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ time_t m_time;
+ time(&m_time);
+ struct tm tstruct;
+ gmtime_r(&m_time, &tstruct);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = tstruct.tm_year + 1900;
+ dt->fMonth = SkToU8(tstruct.tm_mon + 1);
+ dt->fDayOfWeek = SkToU8(tstruct.tm_wday);
+ dt->fDay = SkToU8(tstruct.tm_mday);
+ dt->fHour = SkToU8(tstruct.tm_hour);
+ dt->fMinute = SkToU8(tstruct.tm_min);
+ dt->fSecond = SkToU8(tstruct.tm_sec);
+ }
+}
+#endif // SK_BUILD_FOR_WIN
+
+#if !defined(__has_feature)
+ #define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer) || defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+#include <time.h>
+double SkTime::GetNSecs() {
+ // See skia:6504
+ struct timespec tp;
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+ return tp.tv_sec * 1e9 + tp.tv_nsec;
+}
+#else
+#include <chrono>
+double SkTime::GetNSecs() {
+ auto now = std::chrono::steady_clock::now();
+ std::chrono::duration<double, std::nano> ns = now.time_since_epoch();
+ return ns.count();
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEvent.h b/gfx/skia/skia/src/core/SkTraceEvent.h
new file mode 100644
index 0000000000..b4548ddf01
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEvent.h
@@ -0,0 +1,366 @@
+// Copyright (c) 2014 Google Inc.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines implementation details of how the trace macros in
+// SkTraceEventCommon.h collect and store trace events. Anything not
+// implementation-specific should go in SkTraceEventCommon.h instead of here.
+
+#ifndef SkTraceEvent_DEFINED
+#define SkTraceEvent_DEFINED
+
+#include "include/utils/SkEventTracer.h"
+#include "src/core/SkTraceEventCommon.h"
+#include <atomic>
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Makes it easier to add traces with a simple TRACE_EVENT0("skia", TRACE_FUNC).
+#if defined(_MSC_VER)
+ #define TRACE_FUNC __FUNCSIG__
+#else
+ #define TRACE_FUNC __PRETTY_FUNCTION__
+#endif
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+ skia::tracing_internals::TraceStringWithCopy(str)
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
+ SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ SkEventTracer::GetInstance()->getCategoryGroupEnabled
+
+// Add a trace event to the platform tracing system.
+// SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ SkEventTracer::GetInstance()->addTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// SkEventTracer::Handle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ SkEventTracer::GetInstance()->updateTraceEventDuration
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT SK_API
+
+// We prepend this string to all category names, so that ALL Skia trace events are
+// disabled by default when tracing in Chrome.
+#define TRACE_CATEGORY_PREFIX "disabled-by-default-"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+ trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+ INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(atomic.load(std::memory_order_relaxed)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ atomic.store(reinterpret_cast<intptr_t>(category_group_enabled), \
+ std::memory_order_relaxed); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static std::atomic<intptr_t> INTERNAL_TRACE_EVENT_UID(atomic){0}; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ TRACE_CATEGORY_PREFIX category_group, \
+ INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ skia::tracing_internals::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ skia::tracing_internals::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ skia::tracing_internals::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), trace_event_flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ do { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, skia::tracing_internals::kNoEventId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ } \
+ } while (0)
+
+namespace skia {
+namespace tracing_internals {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const uint64_t kNoEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+public:
+ TraceID(const void* id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(uint64_t id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned short id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(long long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(int id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(short id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(signed char id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+
+ uint64_t data() const { return data_; }
+
+private:
+ uint64_t data_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char* () const { return str_; }
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+ union_member, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent and AddTraceEvent template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ kZeroNumArgs, nullptr, nullptr, nullptr, flags);
+}
+
+template<class ARG1_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, &arg1_name, arg_types, arg_values, flags);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(nullptr) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled,
+ const char* name,
+ SkEventTracer::Handle event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ SkEventTracer::Handle event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+} // namespace tracing_internals
+} // namespace skia
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEventCommon.h b/gfx/skia/skia/src/core/SkTraceEventCommon.h
new file mode 100644
index 0000000000..2bf5c826d3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEventCommon.h
@@ -0,0 +1,291 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef SkTraceEventCommon_DEFINED
+#define SkTraceEventCommon_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/utils/SkTraceEventPhase.h"
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Duration of scoped regions
+// Instantaneous events
+// Counters
+//
+// The first two arguments to all TRACE macros are the category and name. Both are strings, and
+// must have application lifetime (statics or literals). The same applies to arg_names, and string
+// argument values. However, you can force a copy of a string argument value with TRACE_STR_COPY:
+// TRACE_EVENT1("category", "name", "arg1", "literal string is only referenced");
+// TRACE_EVENT1("category", "name", "arg1", TRACE_STR_COPY("string will be copied"));
+//
+//
+// Categories are used to group events, and
+// can be enabled or disabled by the tracing framework. The trace system will automatically add the
+// process id, thread id, and microsecond timestamp to all events.
+//
+//
+// The TRACE_EVENT[0-2] macros trace the duration of entire scopes:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", "howMuch", howMuch);
+// ...
+// }
+//
+//
+// Trace event also supports counters, which is a way to track a quantity as it varies over time.
+// Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// The tracing UI will show these counters in a single graph, as a summed area chart.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+#define TRACE_EMPTY do {} while (0)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+#include <cutils/trace.h>
+#include <stdarg.h>
+
+class SkAndroidFrameworkTraceUtil {
+public:
+ SkAndroidFrameworkTraceUtil(const char* name) {
+ if (CC_UNLIKELY(gEnableAndroidTracing)) {
+ ATRACE_BEGIN(name);
+ }
+ }
+ SkAndroidFrameworkTraceUtil(bool, const char* fmt, ...) {
+ if (CC_LIKELY((!gEnableAndroidTracing) || (!ATRACE_ENABLED()))) return;
+
+ const int BUFFER_SIZE = 256;
+ va_list ap;
+ char buf[BUFFER_SIZE];
+
+ va_start(ap, fmt);
+ vsnprintf(buf, BUFFER_SIZE, fmt, ap);
+ va_end(ap);
+
+ ATRACE_BEGIN(buf);
+ }
+ ~SkAndroidFrameworkTraceUtil() {
+ if (CC_UNLIKELY(gEnableAndroidTracing)) {
+ ATRACE_END();
+ }
+ }
+
+ static void setEnableTracing(bool enableAndroidTracing) {
+ gEnableAndroidTracing = enableAndroidTracing;
+ }
+
+ static bool getEnableTracing() {
+ return gEnableAndroidTracing;
+ }
+
+private:
+ static bool gEnableAndroidTracing;
+};
+
+#define ATRACE_ANDROID_FRAMEWORK(fmt, ...) SkAndroidFrameworkTraceUtil __trace(true, fmt, ##__VA_ARGS__)
+
+// Records a pair of begin and end events called "name" for the current scope, with 0, 1 or 2
+// associated arguments. In the framework, the arguments are ignored.
+#define TRACE_EVENT0(category_group, name) \
+ SkAndroidFrameworkTraceUtil __trace(name)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ SkAndroidFrameworkTraceUtil __trace(name)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ SkAndroidFrameworkTraceUtil __trace(name)
+
+// Records a single event called "name" immediately, with 0, 1 or 2 associated arguments. If the
+// category is not enabled, then this does nothing.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ do { SkAndroidFrameworkTraceUtil __trace(name); } while(0)
+
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ do { SkAndroidFrameworkTraceUtil __trace(name); } while(0)
+
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ do { SkAndroidFrameworkTraceUtil __trace(name); } while(0)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+#define TRACE_COUNTER1(category_group, name, value) \
+ if (CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ ATRACE_INT(name, value); \
+ }
+
+// Records the values of a multi-parted counter called "name" immediately.
+// In Chrome, this macro produces a stacked bar chart. ATrace doesn't support
+// that, so this just produces two separate counters.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, value2_name, value2_val) \
+ do { \
+ if (CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ ATRACE_INT(name "-" value1_name, value1_val); \
+ ATRACE_INT(name "-" value2_name, value2_val); \
+ } \
+ } while (0)
+
+// ATrace has no object tracking
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) TRACE_EMPTY
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) TRACE_EMPTY
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) TRACE_EMPTY
+
+// Macro to efficiently determine if a given category group is enabled.
+// This is only used for some shader text logging that isn't supported in ATrace anyway.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { *ret = false; } while (0)
+
+#else // !SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+#define ATRACE_ANDROID_FRAMEWORK(fmt, ...) TRACE_EMPTY
+
+// Records a pair of begin and end events called "name" for the current scope, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2 associated arguments. If the
+// category is not enabled, then this does nothing.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+#define TRACE_EVENT_ASYNC_BEGIN0(category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_ASYNC_END0(category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Macros to track the life time and value of arbitrary client objects.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+#endif
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+#endif // SkTraceEventCommon_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTypeface.cpp b/gfx/skia/skia/src/core/SkTypeface.cpp
new file mode 100644
index 0000000000..17159bebf9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface.cpp
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+
+SkTypeface::SkTypeface(const SkFontStyle& style, bool isFixedPitch)
+ : fUniqueID(SkTypefaceCache::NewFontID()), fStyle(style), fIsFixedPitch(isFixedPitch) { }
+
+SkTypeface::~SkTypeface() { }
+
+#ifdef SK_WHITELIST_SERIALIZED_TYPEFACES
+extern void WhitelistSerializeTypeface(const SkTypeface*, SkWStream* );
+#define SK_TYPEFACE_DELEGATE WhitelistSerializeTypeface
+#else
+#define SK_TYPEFACE_DELEGATE nullptr
+#endif
+
+void (*gSerializeTypefaceDelegate)(const SkTypeface*, SkWStream* ) = SK_TYPEFACE_DELEGATE;
+sk_sp<SkTypeface> (*gDeserializeTypefaceDelegate)(SkStream* ) = nullptr;
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+class SkEmptyTypeface : public SkTypeface {
+public:
+ static sk_sp<SkTypeface> Make() { return sk_sp<SkTypeface>(new SkEmptyTypeface); }
+protected:
+ SkEmptyTypeface() : SkTypeface(SkFontStyle(), true) { }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override { return nullptr; }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ return sk_ref_sp(this);
+ }
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override {
+ return nullptr;
+ }
+ void onFilterRec(SkScalerContextRec*) const override { }
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ return nullptr;
+ }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override { }
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override {
+ sk_bzero(glyphs, count * sizeof(glyphs[0]));
+ }
+ int onCountGlyphs() const override { return 0; }
+ void getPostScriptGlyphNames(SkString*) const override {}
+ void getGlyphToUnicodeMap(SkUnichar*) const override {}
+ int onGetUPEM() const override { return 0; }
+ class EmptyLocalizedStrings : public SkTypeface::LocalizedStrings {
+ public:
+ bool next(SkTypeface::LocalizedString*) override { return false; }
+ };
+ void onGetFamilyName(SkString* familyName) const override {
+ familyName->reset();
+ }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override {
+ return new EmptyLocalizedStrings;
+ }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return 0;
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return 0;
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override { return 0; }
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override {
+ return 0;
+ }
+};
+
+} // namespace
+
+SkFontStyle SkTypeface::FromOldStyle(Style oldStyle) {
+ return SkFontStyle((oldStyle & SkTypeface::kBold) ? SkFontStyle::kBold_Weight
+ : SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ (oldStyle & SkTypeface::kItalic) ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
+
+SkTypeface* SkTypeface::GetDefaultTypeface(Style style) {
+ static SkOnce once[4];
+ static sk_sp<SkTypeface> defaults[4];
+
+ SkASSERT((int)style < 4);
+ once[style]([style] {
+ sk_sp<SkFontMgr> fm(SkFontMgr::RefDefault());
+ auto t = fm->legacyMakeTypeface(nullptr, FromOldStyle(style));
+ defaults[style] = t ? t : SkEmptyTypeface::Make();
+ });
+ return defaults[style].get();
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDefault() {
+ return sk_ref_sp(GetDefaultTypeface());
+}
+
+uint32_t SkTypeface::UniqueID(const SkTypeface* face) {
+ if (nullptr == face) {
+ face = GetDefaultTypeface();
+ }
+ return face->uniqueID();
+}
+
+bool SkTypeface::Equal(const SkTypeface* facea, const SkTypeface* faceb) {
+ return facea == faceb || SkTypeface::UniqueID(facea) == SkTypeface::UniqueID(faceb);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkTypeface> SkTypeface::MakeFromName(const char name[],
+ SkFontStyle fontStyle) {
+ if (nullptr == name && (fontStyle.slant() == SkFontStyle::kItalic_Slant ||
+ fontStyle.slant() == SkFontStyle::kUpright_Slant) &&
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ||
+ fontStyle.weight() == SkFontStyle::kNormal_Weight)) {
+ return sk_ref_sp(GetDefaultTypeface(static_cast<SkTypeface::Style>(
+ (fontStyle.slant() == SkFontStyle::kItalic_Slant ? SkTypeface::kItalic :
+ SkTypeface::kNormal) |
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ? SkTypeface::kBold :
+ SkTypeface::kNormal))));
+ }
+ return SkFontMgr::RefDefault()->legacyMakeTypeface(name, fontStyle);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset> stream, int index) {
+ if (!stream) {
+ return nullptr;
+ }
+ return SkFontMgr::RefDefault()->makeFromStream(std::move(stream), index);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromData(sk_sp<SkData> data, int index) {
+ if (!data) {
+ return nullptr;
+ }
+ return SkFontMgr::RefDefault()->makeFromData(std::move(data), index);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromFontData(std::unique_ptr<SkFontData> data) {
+ return SkFontMgr::RefDefault()->makeFromFontData(std::move(data));
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromFile(const char path[], int index) {
+ return SkFontMgr::RefDefault()->makeFromFile(path, index);
+}
+
+sk_sp<SkTypeface> SkTypeface::makeClone(const SkFontArguments& args) const {
+ return this->onMakeClone(args);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTypeface::serialize(SkWStream* wstream, SerializeBehavior behavior) const {
+ if (gSerializeTypefaceDelegate) {
+ (*gSerializeTypefaceDelegate)(this, wstream);
+ return;
+ }
+
+ bool isLocalData = false;
+ SkFontDescriptor desc;
+ this->onGetFontDescriptor(&desc, &isLocalData);
+
+ bool shouldSerializeData = false;
+ switch (behavior) {
+ case SerializeBehavior::kDoIncludeData: shouldSerializeData = true; break;
+ case SerializeBehavior::kDontIncludeData: shouldSerializeData = false; break;
+ case SerializeBehavior::kIncludeDataIfLocal: shouldSerializeData = isLocalData; break;
+ }
+
+ // TODO: why do we check hasFontData() and allow the data to pass through even if the caller
+ // has said they don't want the fontdata? Does this actually happen (getDescriptor returns
+ // fontdata as well?)
+ if (shouldSerializeData && !desc.hasFontData()) {
+ desc.setFontData(this->onMakeFontData());
+ }
+ desc.serialize(wstream);
+}
+
+sk_sp<SkData> SkTypeface::serialize(SerializeBehavior behavior) const {
+ SkDynamicMemoryWStream stream;
+ this->serialize(&stream, behavior);
+ return stream.detachAsData();
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDeserialize(SkStream* stream) {
+ if (gDeserializeTypefaceDelegate) {
+ return (*gDeserializeTypefaceDelegate)(stream);
+ }
+
+ SkFontDescriptor desc;
+ if (!SkFontDescriptor::Deserialize(stream, &desc)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkFontData> data = desc.detachFontData();
+ if (data) {
+ sk_sp<SkTypeface> typeface(SkTypeface::MakeFromFontData(std::move(data)));
+ if (typeface) {
+ return typeface;
+ }
+ }
+
+ return SkTypeface::MakeFromName(desc.getFamilyName(), desc.getStyle());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkTypeface::getVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ return this->onGetVariationDesignPosition(coordinates, coordinateCount);
+}
+
+int SkTypeface::getVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+ return this->onGetVariationDesignParameters(parameters, parameterCount);
+}
+
+int SkTypeface::countTables() const {
+ return this->onGetTableTags(nullptr);
+}
+
+int SkTypeface::getTableTags(SkFontTableTag tags[]) const {
+ return this->onGetTableTags(tags);
+}
+
+size_t SkTypeface::getTableSize(SkFontTableTag tag) const {
+ return this->onGetTableData(tag, 0, ~0U, nullptr);
+}
+
+size_t SkTypeface::getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const {
+ return this->onGetTableData(tag, offset, length, data);
+}
+
+sk_sp<SkData> SkTypeface::copyTableData(SkFontTableTag tag) const {
+ return this->onCopyTableData(tag);
+}
+
+sk_sp<SkData> SkTypeface::onCopyTableData(SkFontTableTag tag) const {
+ size_t size = this->getTableSize(tag);
+ if (size) {
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ (void)this->getTableData(tag, 0, size, data->writable_data());
+ return data;
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface::openStream(int* ttcIndex) const {
+ int ttcIndexStorage;
+ if (nullptr == ttcIndex) {
+ // So our subclasses don't need to check for null param
+ ttcIndex = &ttcIndexStorage;
+ }
+ return this->onOpenStream(ttcIndex);
+}
+
+std::unique_ptr<SkFontData> SkTypeface::makeFontData() const {
+ return this->onMakeFontData();
+}
+
+// This implementation is temporary until this method can be made pure virtual.
+std::unique_ptr<SkFontData> SkTypeface::onMakeFontData() const {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+ if (!stream) {
+ return nullptr;
+ }
+ return skstd::make_unique<SkFontData>(std::move(stream), index, nullptr, 0);
+};
+
+void SkTypeface::unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ if (count > 0 && glyphs && uni) {
+ this->onCharsToGlyphs(uni, count, glyphs);
+ }
+}
+
+SkGlyphID SkTypeface::unicharToGlyph(SkUnichar uni) const {
+ SkGlyphID glyphs[1] = { 0 };
+ this->onCharsToGlyphs(&uni, 1, glyphs);
+ return glyphs[0];
+}
+
+int SkTypeface::countGlyphs() const {
+ return this->onCountGlyphs();
+}
+
+int SkTypeface::getUnitsPerEm() const {
+ // should we try to cache this in the base-class?
+ return this->onGetUPEM();
+}
+
+bool SkTypeface::getKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ SkASSERT(count >= 0);
+ // check for the only legal way to pass a nullptr.. everything is 0
+ // in which case they just want to know if this face can possibly support
+ // kerning (true) or never (false).
+ if (nullptr == glyphs || nullptr == adjustments) {
+ SkASSERT(nullptr == glyphs);
+ SkASSERT(0 == count);
+ SkASSERT(nullptr == adjustments);
+ }
+ return this->onGetKerningPairAdjustments(glyphs, count, adjustments);
+}
+
+SkTypeface::LocalizedStrings* SkTypeface::createFamilyNameIterator() const {
+ return this->onCreateFamilyNameIterator();
+}
+
+void SkTypeface::getFamilyName(SkString* name) const {
+ SkASSERT(name);
+ this->onGetFamilyName(name);
+}
+
+void SkTypeface::getGlyphToUnicodeMap(SkUnichar* dst) const {
+ sk_bzero(dst, sizeof(SkUnichar) * this->countGlyphs());
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface::getAdvancedMetrics() const {
+ std::unique_ptr<SkAdvancedTypefaceMetrics> result = this->onGetAdvancedMetrics();
+ if (result && result->fPostScriptName.isEmpty()) {
+ result->fPostScriptName = result->fFontName;
+ }
+ if (result && result->fType == SkAdvancedTypefaceMetrics::kTrueType_Font) {
+ SkOTTableOS2::Version::V2::Type::Field fsType;
+ constexpr SkFontTableTag os2Tag = SkTEndian_SwapBE32(SkOTTableOS2::TAG);
+ constexpr size_t fsTypeOffset = offsetof(SkOTTableOS2::Version::V2, fsType);
+ if (this->getTableData(os2Tag, fsTypeOffset, sizeof(fsType), &fsType) == sizeof(fsType)) {
+ if (fsType.Bitmap || (fsType.Restricted && !(fsType.PreviewPrint || fsType.Editable))) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (fsType.NoSubsetting) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ }
+ }
+ return result;
+}
+
+bool SkTypeface::onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkDescriptor.h"
+
+SkRect SkTypeface::getBounds() const {
+ fBoundsOnce([this] {
+ if (!this->onComputeBounds(&fBounds)) {
+ fBounds.setEmpty();
+ }
+ });
+ return fBounds;
+}
+
+bool SkTypeface::onComputeBounds(SkRect* bounds) const {
+ // we use a big size to ensure lots of significant bits from the scalercontext.
+ // then we scale back down to return our final answer (at 1-pt)
+ const SkScalar textSize = 2048;
+ const SkScalar invTextSize = 1 / textSize;
+
+ SkFont font;
+ font.setTypeface(sk_ref_sp(const_cast<SkTypeface*>(this)));
+ font.setSize(textSize);
+ font.setLinearMetrics(true);
+
+ SkScalerContextRec rec;
+ SkScalerContextEffects effects;
+
+ SkScalerContext::MakeRecAndEffectsFromFont(font, &rec, &effects);
+
+ SkAutoDescriptor ad;
+ SkScalerContextEffects noeffects;
+ SkScalerContext::AutoDescriptorGivenRecAndEffects(rec, noeffects, &ad);
+
+ std::unique_ptr<SkScalerContext> ctx = this->createScalerContext(noeffects, ad.getDesc(), true);
+ if (!ctx) {
+ return false;
+ }
+
+ SkFontMetrics fm;
+ ctx->getFontMetrics(&fm);
+ bounds->setLTRB(fm.fXMin * invTextSize, fm.fTop * invTextSize,
+ fm.fXMax * invTextSize, fm.fBottom * invTextSize);
+ return true;
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface::onGetAdvancedMetrics() const {
+ SkDEBUGFAIL("Typefaces that need to work with PDF backend must override this.");
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.cpp b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
new file mode 100644
index 0000000000..0d05bac7fe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMutex.h"
+#include "src/core/SkTypefaceCache.h"
+#include <atomic>
+
+#define TYPEFACE_CACHE_LIMIT 1024
+
+SkTypefaceCache::SkTypefaceCache() {}
+
+void SkTypefaceCache::add(sk_sp<SkTypeface> face) {
+ if (fTypefaces.count() >= TYPEFACE_CACHE_LIMIT) {
+ this->purge(TYPEFACE_CACHE_LIMIT >> 2);
+ }
+
+ fTypefaces.emplace_back(std::move(face));
+}
+
+sk_sp<SkTypeface> SkTypefaceCache::findByProcAndRef(FindProc proc, void* ctx) const {
+ for (const sk_sp<SkTypeface>& typeface : fTypefaces) {
+ if (proc(typeface.get(), ctx)) {
+ return typeface;
+ }
+ }
+ return nullptr;
+}
+
+void SkTypefaceCache::purge(int numToPurge) {
+ int count = fTypefaces.count();
+ int i = 0;
+ while (i < count) {
+ if (fTypefaces[i]->unique()) {
+ fTypefaces.removeShuffle(i);
+ --count;
+ if (--numToPurge == 0) {
+ return;
+ }
+ } else {
+ ++i;
+ }
+ }
+}
+
+void SkTypefaceCache::purgeAll() {
+ this->purge(fTypefaces.count());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypefaceCache& SkTypefaceCache::Get() {
+ static SkTypefaceCache gCache;
+ return gCache;
+}
+
+SkFontID SkTypefaceCache::NewFontID() {
+ static std::atomic<int32_t> nextID{1};
+ return nextID++;
+}
+
+static SkMutex& typeface_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+void SkTypefaceCache::Add(sk_sp<SkTypeface> face) {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ Get().add(std::move(face));
+#endif
+}
+
+sk_sp<SkTypeface> SkTypefaceCache::FindByProcAndRef(FindProc proc, void* ctx) {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ return Get().findByProcAndRef(proc, ctx);
+#else
+ return nullptr;
+#endif
+}
+
+void SkTypefaceCache::PurgeAll() {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ Get().purgeAll();
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static bool DumpProc(SkTypeface* face, void* ctx) {
+ SkString n;
+ face->getFamilyName(&n);
+ SkFontStyle s = face->fontStyle();
+ SkFontID id = face->uniqueID();
+ SkDebugf("SkTypefaceCache: face %p fontID %d weight %d width %d style %d name %s\n",
+ face, id, s.weight(), s.width(), s.slant(), n.c_str());
+ return false;
+}
+#endif
+
+void SkTypefaceCache::Dump() {
+#ifdef SK_DEBUG
+ (void)Get().findByProcAndRef(DumpProc, nullptr);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.h b/gfx/skia/skia/src/core/SkTypefaceCache.h
new file mode 100644
index 0000000000..ac3faafec0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef SkTypefaceCache_DEFINED
+#define SkTypefaceCache_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTArray.h"
+
+class SkTypefaceCache {
+public:
+ SkTypefaceCache();
+
+ /**
+ * Callback for FindByProc. Returns true if the given typeface is a match
+ * for the given context. The passed typeface is owned by the cache and is
+ * not additionally ref()ed. The typeface may be in the disposed state.
+ */
+ typedef bool(*FindProc)(SkTypeface*, void* context);
+
+ /**
+ * Add a typeface to the cache. Later, if we need to purge the cache,
+ * typefaces uniquely owned by the cache will be unref()ed.
+ */
+ void add(sk_sp<SkTypeface>);
+
+ /**
+ * Iterate through the cache, calling proc(typeface, ctx) for each typeface.
+ * If proc returns true, then return that typeface.
+ * If it never returns true, return nullptr.
+ */
+ sk_sp<SkTypeface> findByProcAndRef(FindProc proc, void* ctx) const;
+
+ /**
+ * This will unref all of the typefaces in the cache for which the cache
+ * is the only owner. Normally this is handled automatically as needed.
+ * This function is exposed for clients that explicitly want to purge the
+ * cache (e.g. to look for leaks).
+ */
+ void purgeAll();
+
+ /**
+ * Helper: returns a unique fontID to pass to the constructor of
+ * your subclass of SkTypeface
+ */
+ static SkFontID NewFontID();
+
+ // These are static wrappers around a global instance of a cache.
+
+ static void Add(sk_sp<SkTypeface>);
+ static sk_sp<SkTypeface> FindByProcAndRef(FindProc proc, void* ctx);
+ static void PurgeAll();
+
+ /**
+ * Debugging only: dumps the status of the typefaces in the cache
+ */
+ static void Dump();
+
+private:
+ static SkTypefaceCache& Get();
+
+ void purge(int count);
+
+ SkTArray<sk_sp<SkTypeface>> fTypefaces;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTypefacePriv.h b/gfx/skia/skia/src/core/SkTypefacePriv.h
new file mode 100644
index 0000000000..cc25b843af
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefacePriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypefacePriv_DEFINED
+#define SkTypefacePriv_DEFINED
+
+#include "include/core/SkTypeface.h"
+
+/**
+ * Return a ref'd typeface, which must later be unref'd
+ *
+ * If the parameter is non-null, it will be ref'd and returned, otherwise
+ * it will be the default typeface.
+ */
+static inline sk_sp<SkTypeface> ref_or_default(SkTypeface* face) {
+ return face ? sk_ref_sp(face) : SkTypeface::MakeDefault();
+}
+
+/**
+ * Always resolves to a non-null typeface, either the value passed to its
+ * constructor, or the default typeface if null was passed.
+ */
+class SkAutoResolveDefaultTypeface : public sk_sp<SkTypeface> {
+public:
+ SkAutoResolveDefaultTypeface() : INHERITED(SkTypeface::MakeDefault()) {}
+
+ SkAutoResolveDefaultTypeface(SkTypeface* face)
+ : INHERITED(ref_or_default(face)) {}
+
+private:
+ typedef sk_sp<SkTypeface> INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTypeface_remote.cpp b/gfx/skia/skia/src/core/SkTypeface_remote.cpp
new file mode 100644
index 0000000000..838a783313
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface_remote.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkRemoteGlyphCache.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkTypeface_remote.h"
+
+SkScalerContextProxy::SkScalerContextProxy(sk_sp<SkTypeface> tf,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager)
+ : SkScalerContext{std::move(tf), effects, desc}
+ , fDiscardableManager{std::move(manager)} {}
+
+void SkScalerContextProxy::initCache(SkStrike* cache, SkStrikeCache* strikeCache) {
+ SkASSERT(fCache == nullptr);
+ SkASSERT(cache != nullptr);
+
+ fCache = cache;
+ fStrikeCache = strikeCache;
+}
+
+unsigned SkScalerContextProxy::generateGlyphCount() {
+ SK_ABORT("Should never be called.");
+}
+
+bool SkScalerContextProxy::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContextProxy::generateMetrics(SkGlyph* glyph) {
+ TRACE_EVENT1("skia", "generateMetrics", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateMetrics: %s\n", this->getRec().dump().c_str());
+ }
+
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ // Since the scaler context is being called, we don't have the needed data. Try to find a
+ // fallback before failing.
+ if (fCache && fCache->belongsToCache(glyph)) {
+ // First check the original cache, in case there is a sub-pixel pos mismatch.
+ if (const SkGlyph* from =
+ fCache->getCachedGlyphAnySubPix(glyph->getGlyphID(), glyph->getPackedID())) {
+ fCache->mergeGlyphAndImage(glyph->getPackedID(), *from);
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphMetricsFallback);
+ return;
+ }
+
+ // Now check other caches for a desc mismatch.
+ if (fStrikeCache->desperationSearchForImage(fCache->getDescriptor(), glyph, fCache)) {
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphMetricsFallback);
+ return;
+ }
+ }
+
+ glyph->zeroMetrics();
+ fDiscardableManager->notifyCacheMiss(SkStrikeClient::CacheMissType::kGlyphMetrics);
+}
+
+void SkScalerContextProxy::generateImage(const SkGlyph& glyph) {
+ TRACE_EVENT1("skia", "generateImage", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateImage: %s\n", this->getRec().dump().c_str());
+ }
+
+ // There is no desperation search here, because if there was an image to be found it was
+ // copied over with the metrics search.
+ fDiscardableManager->notifyCacheMiss(SkStrikeClient::CacheMissType::kGlyphImage);
+}
+
+bool SkScalerContextProxy::generatePath(SkGlyphID glyphID, SkPath* path) {
+ TRACE_EVENT1("skia", "generatePath", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generatePath: %s\n", this->getRec().dump().c_str());
+ }
+
+ // Since the scaler context is being called, we don't have the needed data. Try to find a
+ // fallback before failing.
+ auto desc = SkScalerContext::DescriptorGivenRecAndEffects(this->getRec(), this->getEffects());
+ bool foundPath = fStrikeCache && fStrikeCache->desperationSearchForPath(*desc, glyphID, path);
+ fDiscardableManager->notifyCacheMiss(foundPath
+ ? SkStrikeClient::CacheMissType::kGlyphPathFallback
+ : SkStrikeClient::CacheMissType::kGlyphPath);
+ return foundPath;
+}
+
+void SkScalerContextProxy::generateFontMetrics(SkFontMetrics* metrics) {
+ TRACE_EVENT1(
+ "skia", "generateFontMetrics", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateFontMetrics: %s\n", this->getRec().dump().c_str());
+ SkDEBUGCODE(SkStrikeCache::Dump());
+ }
+
+ // Font metrics aren't really used for render, so just zero out the data and return.
+ fDiscardableManager->notifyCacheMiss(SkStrikeClient::CacheMissType::kFontMetrics);
+ sk_bzero(metrics, sizeof(*metrics));
+}
+
+SkTypefaceProxy* SkScalerContextProxy::getProxyTypeface() const {
+ return (SkTypefaceProxy*)this->getTypeface();
+}
diff --git a/gfx/skia/skia/src/core/SkTypeface_remote.h b/gfx/skia/skia/src/core/SkTypeface_remote.h
new file mode 100644
index 0000000000..3d454cf760
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface_remote.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemoteTypeface_DEFINED
+#define SkRemoteTypeface_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkRemoteGlyphCache.h"
+#include "src/core/SkScalerContext.h"
+
+class SkTypefaceProxy;
+class SkStrikeCache;
+
+class SkScalerContextProxy : public SkScalerContext {
+public:
+ SkScalerContextProxy(sk_sp<SkTypeface> tf,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager);
+
+ void initCache(SkStrike*, SkStrikeCache*);
+
+protected:
+ unsigned generateGlyphCount() override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyphID, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics* metrics) override;
+ SkTypefaceProxy* getProxyTypeface() const;
+
+private:
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fDiscardableManager;
+ SkStrike* fCache = nullptr;
+ SkStrikeCache* fStrikeCache = nullptr;
+ typedef SkScalerContext INHERITED;
+};
+
+class SkTypefaceProxy : public SkTypeface {
+public:
+ SkTypefaceProxy(SkFontID fontId,
+ int glyphCount,
+ const SkFontStyle& style,
+ bool isFixed,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager,
+ bool isLogging = true)
+ : INHERITED{style, false}
+ , fFontId{fontId}
+ , fGlyphCount{glyphCount}
+ , fIsLogging{isLogging}
+ , fDiscardableManager{std::move(manager)} {}
+ SkFontID remoteTypefaceID() const {return fFontId;}
+ int glyphCount() const {return fGlyphCount;}
+ bool isLogging() const {return fIsLogging;}
+
+protected:
+ int onGetUPEM() const override { SK_ABORT("Should never be called."); }
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ SK_ABORT("Should never be called.");
+ }
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ SK_ABORT("Should never be called.");
+ }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override {
+ SK_ABORT("Should never be called.");
+ }
+ void onGetFamilyName(SkString* familyName) const override {
+ // Used by SkStrikeCache::DumpMemoryStatistics.
+ *familyName = "";
+ }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override {
+ SK_ABORT("Should never be called.");
+ }
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override {
+ SK_ABORT("Should never be called.");
+ }
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const override {
+ return new SkScalerContextProxy(sk_ref_sp(const_cast<SkTypefaceProxy*>(this)), effects,
+ desc, fDiscardableManager);
+ }
+ void onFilterRec(SkScalerContextRec* rec) const override {
+ // The rec filtering is already applied by the server when generating
+ // the glyphs.
+ }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override {
+ SK_ABORT("Should never be called.");
+ }
+ void getGlyphToUnicodeMap(SkUnichar*) const override {
+ SK_ABORT("Should never be called.");
+ }
+
+ void getPostScriptGlyphNames(SkString*) const override {
+ SK_ABORT("Should never be called.");
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ SK_ABORT("Should never be called.");
+ }
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onCountGlyphs() const override {
+ return this->glyphCount();
+ }
+
+ void* onGetCTFontRef() const override {
+ SK_ABORT("Should never be called.");
+ }
+
+private:
+ const SkFontID fFontId;
+ const int fGlyphCount;
+ const bool fIsLogging;
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fDiscardableManager;
+
+
+ typedef SkTypeface INHERITED;
+};
+
+#endif // SkRemoteTypeface_DEFINED
diff --git a/gfx/skia/skia/src/core/SkUnPreMultiply.cpp b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
new file mode 100644
index 0000000000..2b999190ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+
+SkColor SkUnPreMultiply::PMColorToColor(SkPMColor c) {
+ const unsigned a = SkGetPackedA32(c);
+ const Scale scale = GetScale(a);
+ return SkColorSetARGB(a,
+ ApplyScale(scale, SkGetPackedR32(c)),
+ ApplyScale(scale, SkGetPackedG32(c)),
+ ApplyScale(scale, SkGetPackedB32(c)));
+}
+
+const uint32_t SkUnPreMultiply::gTable[] = {
+ 0x00000000, 0xFF000000, 0x7F800000, 0x55000000, 0x3FC00000, 0x33000000, 0x2A800000, 0x246DB6DB,
+ 0x1FE00000, 0x1C555555, 0x19800000, 0x172E8BA3, 0x15400000, 0x139D89D9, 0x1236DB6E, 0x11000000,
+ 0x0FF00000, 0x0F000000, 0x0E2AAAAB, 0x0D6BCA1B, 0x0CC00000, 0x0C249249, 0x0B9745D1, 0x0B1642C8,
+ 0x0AA00000, 0x0A333333, 0x09CEC4EC, 0x0971C71C, 0x091B6DB7, 0x08CB08D4, 0x08800000, 0x0839CE74,
+ 0x07F80000, 0x07BA2E8C, 0x07800000, 0x07492492, 0x07155555, 0x06E45307, 0x06B5E50D, 0x0689D89E,
+ 0x06600000, 0x063831F4, 0x06124925, 0x05EE23B9, 0x05CBA2E9, 0x05AAAAAB, 0x058B2164, 0x056CEFA9,
+ 0x05500000, 0x05343EB2, 0x0519999A, 0x05000000, 0x04E76276, 0x04CFB2B8, 0x04B8E38E, 0x04A2E8BA,
+ 0x048DB6DB, 0x0479435E, 0x0465846A, 0x045270D0, 0x04400000, 0x042E29F8, 0x041CE73A, 0x040C30C3,
+ 0x03FC0000, 0x03EC4EC5, 0x03DD1746, 0x03CE540F, 0x03C00000, 0x03B21643, 0x03A49249, 0x03976FC6,
+ 0x038AAAAB, 0x037E3F20, 0x03722983, 0x03666666, 0x035AF287, 0x034FCACE, 0x0344EC4F, 0x033A5441,
+ 0x03300000, 0x0325ED09, 0x031C18FA, 0x0312818B, 0x03092492, 0x03000000, 0x02F711DC, 0x02EE5847,
+ 0x02E5D174, 0x02DD7BAF, 0x02D55555, 0x02CD5CD6, 0x02C590B2, 0x02BDEF7C, 0x02B677D4, 0x02AF286C,
+ 0x02A80000, 0x02A0FD5C, 0x029A1F59, 0x029364D9, 0x028CCCCD, 0x0286562E, 0x02800000, 0x0279C952,
+ 0x0273B13B, 0x026DB6DB, 0x0267D95C, 0x026217ED, 0x025C71C7, 0x0256E62A, 0x0251745D, 0x024C1BAD,
+ 0x0246DB6E, 0x0241B2F9, 0x023CA1AF, 0x0237A6F5, 0x0232C235, 0x022DF2DF, 0x02293868, 0x02249249,
+ 0x02200000, 0x021B810F, 0x021714FC, 0x0212BB51, 0x020E739D, 0x020A3D71, 0x02061862, 0x02020408,
+ 0x01FE0000, 0x01FA0BE8, 0x01F62762, 0x01F25214, 0x01EE8BA3, 0x01EAD3BB, 0x01E72A08, 0x01E38E39,
+ 0x01E00000, 0x01DC7F11, 0x01D90B21, 0x01D5A3EA, 0x01D24925, 0x01CEFA8E, 0x01CBB7E3, 0x01C880E5,
+ 0x01C55555, 0x01C234F7, 0x01BF1F90, 0x01BC14E6, 0x01B914C2, 0x01B61EED, 0x01B33333, 0x01B05161,
+ 0x01AD7943, 0x01AAAAAB, 0x01A7E567, 0x01A5294A, 0x01A27627, 0x019FCBD2, 0x019D2A20, 0x019A90E8,
+ 0x01980000, 0x01957741, 0x0192F685, 0x01907DA5, 0x018E0C7D, 0x018BA2E9, 0x018940C5, 0x0186E5F1,
+ 0x01849249, 0x018245AE, 0x01800000, 0x017DC11F, 0x017B88EE, 0x0179574E, 0x01772C23, 0x01750750,
+ 0x0172E8BA, 0x0170D045, 0x016EBDD8, 0x016CB157, 0x016AAAAB, 0x0168A9B9, 0x0166AE6B, 0x0164B8A8,
+ 0x0162C859, 0x0160DD68, 0x015EF7BE, 0x015D1746, 0x015B3BEA, 0x01596596, 0x01579436, 0x0155C7B5,
+ 0x01540000, 0x01523D04, 0x01507EAE, 0x014EC4EC, 0x014D0FAC, 0x014B5EDD, 0x0149B26D, 0x01480A4B,
+ 0x01466666, 0x0144C6B0, 0x01432B17, 0x0141938C, 0x01400000, 0x013E7064, 0x013CE4A9, 0x013B5CC1,
+ 0x0139D89E, 0x01385831, 0x0136DB6E, 0x01356246, 0x0133ECAE, 0x01327A97, 0x01310BF6, 0x012FA0BF,
+ 0x012E38E4, 0x012CD45A, 0x012B7315, 0x012A150B, 0x0128BA2F, 0x01276276, 0x01260DD6, 0x0124BC45,
+ 0x01236DB7, 0x01222222, 0x0120D97D, 0x011F93BC, 0x011E50D8, 0x011D10C5, 0x011BD37A, 0x011A98EF,
+ 0x0119611A, 0x01182BF3, 0x0116F970, 0x0115C988, 0x01149C34, 0x0113716B, 0x01124925, 0x01112359,
+ 0x01100000, 0x010EDF12, 0x010DC087, 0x010CA458, 0x010B8A7E, 0x010A72F0, 0x01095DA9, 0x01084AA0,
+ 0x010739CE, 0x01062B2E, 0x01051EB8, 0x01041466, 0x01030C31, 0x01020612, 0x01010204, 0x01000000
+};
+
+#ifdef BUILD_DIVIDE_TABLE
+void SkUnPreMultiply_BuildTable() {
+ for (unsigned i = 0; i <= 255; i++) {
+ uint32_t scale;
+
+ if (0 == i) {
+ scale = 0;
+ } else {
+ scale = ((255 << 24) + (i >> 1)) / i;
+ }
+
+ SkDebugf(" 0x%08X,", scale);
+ if ((i & 7) == 7) {
+ SkDebugf("\n");
+ }
+
+ // test the result
+ for (int j = 1; j <= i; j++) {
+ uint32_t test = (j * scale + (1 << 23)) >> 24;
+ uint32_t div = roundf(j * 255.0f / i);
+ int diff = SkAbs32(test - div);
+ SkASSERT(diff <= 1 && test <= 255);
+ }
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkUtils.cpp b/gfx/skia/skia/src/core/SkUtils.cpp
new file mode 100644
index 0000000000..d369d509b8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtils.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkUtils.h"
+
+template <typename T>
+static SkUnichar next(const T** srcPtr, unsigned N, SkUnichar (*fn)(const T**, const T*)) {
+ SkASSERT(srcPtr);
+ const T* ptr = *srcPtr;
+ SkUnichar c = fn(&ptr, ptr + N);
+ if (c == -1) {
+ SkASSERT(false);
+ ++(*srcPtr);
+ return 0xFFFD; // REPLACEMENT CHARACTER
+ }
+ *srcPtr = ptr;
+ return c;
+}
+SkUnichar SkUTF8_NextUnichar(const char** p) {
+ return next<char>(p, SkUTF::kMaxBytesInUTF8Sequence, SkUTF::NextUTF8);
+}
+SkUnichar SkUTF16_NextUnichar(const uint16_t** p) {
+ return next<uint16_t>(p, 2, SkUTF::NextUTF16);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const char SkHexadecimalDigits::gUpper[16] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+const char SkHexadecimalDigits::gLower[16] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
diff --git a/gfx/skia/skia/src/core/SkUtils.h b/gfx/skia/skia/src/core/SkUtils.h
new file mode 100644
index 0000000000..cf2ee10914
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtils.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtils_DEFINED
+#define SkUtils_DEFINED
+
+#include "include/core/SkFontTypes.h"
+#include "src/core/SkOpts.h"
+#include "src/utils/SkUTF.h"
+
+/** Similar to memset(), but it assigns a 16, 32, or 64-bit value into the buffer.
+ @param buffer The memory to have value copied into it
+ @param value The value to be copied into buffer
+ @param count The number of times value should be copied into the buffer.
+*/
+static inline void sk_memset16(uint16_t buffer[], uint16_t value, int count) {
+ SkOpts::memset16(buffer, value, count);
+}
+static inline void sk_memset32(uint32_t buffer[], uint32_t value, int count) {
+ SkOpts::memset32(buffer, value, count);
+}
+static inline void sk_memset64(uint64_t buffer[], uint64_t value, int count) {
+ SkOpts::memset64(buffer, value, count);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Unlike the functions in SkUTF.h, these two functions do not take an array
+// length parameter. When possible, use SkUTF::NextUTF{8,16} instead.
+SkUnichar SkUTF8_NextUnichar(const char**);
+SkUnichar SkUTF16_NextUnichar(const uint16_t**);
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool SkUTF16_IsLeadingSurrogate(uint16_t c) { return ((c) & 0xFC00) == 0xD800; }
+
+static inline bool SkUTF16_IsTrailingSurrogate (uint16_t c) { return ((c) & 0xFC00) == 0xDC00; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline int SkUTFN_CountUnichars(SkTextEncoding enc, const void* utfN, size_t bytes) {
+ switch (enc) {
+ case SkTextEncoding::kUTF8: return SkUTF::CountUTF8((const char*)utfN, bytes);
+ case SkTextEncoding::kUTF16: return SkUTF::CountUTF16((const uint16_t*)utfN, bytes);
+ case SkTextEncoding::kUTF32: return SkUTF::CountUTF32((const int32_t*)utfN, bytes);
+ default: SkDEBUGFAIL("unknown text encoding"); return -1;
+ }
+}
+
+static inline SkUnichar SkUTFN_Next(SkTextEncoding enc, const void** ptr, const void* stop) {
+ switch (enc) {
+ case SkTextEncoding::kUTF8:
+ return SkUTF::NextUTF8((const char**)ptr, (const char*)stop);
+ case SkTextEncoding::kUTF16:
+ return SkUTF::NextUTF16((const uint16_t**)ptr, (const uint16_t*)stop);
+ case SkTextEncoding::kUTF32:
+ return SkUTF::NextUTF32((const int32_t**)ptr, (const int32_t*)stop);
+ default: SkDEBUGFAIL("unknown text encoding"); return -1;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace SkHexadecimalDigits {
+ extern const char gUpper[16]; // 0-9A-F
+ extern const char gLower[16]; // 0-9a-f
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// If T is an 8-byte GCC or Clang vector extension type, it would naturally
+// pass or return in the MMX mm0 register on 32-bit x86 builds. This has the
+// fun side effect of clobbering any state in the x87 st0 register. (There is
+// no ABI governing who should preserve mm?/st? registers, so no one does!)
+//
+// We force-inline sk_unaligned_load() and sk_unaligned_store() to avoid that,
+// making them safe to use for all types on all platforms, thus solving the
+// problem once and for all!
+
+template <typename T, typename P>
+static SK_ALWAYS_INLINE T sk_unaligned_load(const P* ptr) {
+ // TODO: static_assert desirable things about T here so as not to be totally abused.
+ T val;
+ memcpy(&val, ptr, sizeof(val));
+ return val;
+}
+
+template <typename T, typename P>
+static SK_ALWAYS_INLINE void sk_unaligned_store(P* ptr, T val) {
+ // TODO: ditto
+ memcpy(ptr, &val, sizeof(val));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkUtilsArm.cpp b/gfx/skia/skia/src/core/SkUtilsArm.cpp
new file mode 100644
index 0000000000..c29938fdfc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtilsArm.cpp
@@ -0,0 +1,8 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This file no longer needs to exist, but it's still referenced by Chrome's GYP / GN builds.
diff --git a/gfx/skia/skia/src/core/SkVM.cpp b/gfx/skia/skia/src/core/SkVM.cpp
new file mode 100644
index 0000000000..cd2bc3ae2b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVM.cpp
@@ -0,0 +1,2221 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/SkTFitsIn.h"
+#include "include/private/SkThreadID.h"
+#include "include/private/SkVx.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkVM.h"
+#include <string.h>
+#if defined(SKVM_JIT)
+ #include <sys/mman.h>
+#endif
+#if defined(SKVM_PERF_DUMPS)
+ #include <stdio.h>
+ #include <time.h>
+#endif
+
+
+namespace skvm {
+
+ // Debugging tools, mostly for printing various data structures out to a stream.
+
+ namespace {
+ class SkDebugfStream final : public SkWStream {
+ size_t fBytesWritten = 0;
+
+ bool write(const void* buffer, size_t size) override {
+ SkDebugf("%.*s", size, buffer);
+ fBytesWritten += size;
+ return true;
+ }
+
+ size_t bytesWritten() const override {
+ return fBytesWritten;
+ }
+ };
+
+ struct V { Val id; };
+ struct R { Reg id; };
+ struct Shift { int bits; };
+ struct Splat { int bits; };
+ struct Hex { int bits; };
+
+ static void write(SkWStream* o, const char* s) {
+ o->writeText(s);
+ }
+
+ static void write(SkWStream* o, Arg a) {
+ write(o, "arg(");
+ o->writeDecAsText(a.ix);
+ write(o, ")");
+ }
+ static void write(SkWStream* o, V v) {
+ write(o, "v");
+ o->writeDecAsText(v.id);
+ }
+ static void write(SkWStream* o, R r) {
+ write(o, "r");
+ o->writeDecAsText(r.id);
+ }
+ static void write(SkWStream* o, Shift s) {
+ o->writeDecAsText(s.bits);
+ }
+ static void write(SkWStream* o, Splat s) {
+ float f;
+ memcpy(&f, &s.bits, 4);
+ o->writeHexAsText(s.bits);
+ write(o, " (");
+ o->writeScalarAsText(f);
+ write(o, ")");
+ }
+ static void write(SkWStream* o, Hex h) {
+ o->writeHexAsText(h.bits);
+ }
+
+ template <typename T, typename... Ts>
+ static void write(SkWStream* o, T first, Ts... rest) {
+ write(o, first);
+ write(o, " ");
+ write(o, rest...);
+ }
+ }
+
+ static void dump_builder_program(const std::vector<Builder::Instruction>& program,
+ SkWStream* o) {
+ for (Val id = 0; id < (Val)program.size(); id++) {
+ const Builder::Instruction& inst = program[id];
+ Op op = inst.op;
+ Val x = inst.x,
+ y = inst.y,
+ z = inst.z;
+ int imm = inst.imm;
+ write(o, inst.death == 0 ? "☠️ " :
+ !inst.can_hoist ? " " :
+ inst.used_in_loop ? "↑ " :
+ "↟ ");
+ switch (op) {
+ case Op::store8: write(o, "store8" , Arg{imm}, V{x}); break;
+ case Op::store16: write(o, "store16", Arg{imm}, V{x}); break;
+ case Op::store32: write(o, "store32", Arg{imm}, V{x}); break;
+
+ case Op::load8: write(o, V{id}, "= load8" , Arg{imm}); break;
+ case Op::load16: write(o, V{id}, "= load16", Arg{imm}); break;
+ case Op::load32: write(o, V{id}, "= load32", Arg{imm}); break;
+
+ case Op::gather8: write(o, V{id}, "= gather8" , Arg{imm}, V{x}); break;
+ case Op::gather16: write(o, V{id}, "= gather16", Arg{imm}, V{x}); break;
+ case Op::gather32: write(o, V{id}, "= gather32", Arg{imm}, V{x}); break;
+
+ case Op::uniform8: write(o, V{id}, "= uniform8" , Arg{imm & 0xffff}, Hex{imm>>16}); break;
+ case Op::uniform16: write(o, V{id}, "= uniform16", Arg{imm & 0xffff}, Hex{imm>>16}); break;
+ case Op::uniform32: write(o, V{id}, "= uniform32", Arg{imm & 0xffff}, Hex{imm>>16}); break;
+
+ case Op::splat: write(o, V{id}, "= splat", Splat{imm}); break;
+
+
+ case Op::add_f32: write(o, V{id}, "= add_f32", V{x}, V{y} ); break;
+ case Op::sub_f32: write(o, V{id}, "= sub_f32", V{x}, V{y} ); break;
+ case Op::mul_f32: write(o, V{id}, "= mul_f32", V{x}, V{y} ); break;
+ case Op::div_f32: write(o, V{id}, "= div_f32", V{x}, V{y} ); break;
+ case Op::mad_f32: write(o, V{id}, "= mad_f32", V{x}, V{y}, V{z}); break;
+
+ case Op:: eq_f32: write(o, V{id}, "= eq_f32", V{x}, V{y}); break;
+ case Op::neq_f32: write(o, V{id}, "= neq_f32", V{x}, V{y}); break;
+ case Op:: lt_f32: write(o, V{id}, "= lt_f32", V{x}, V{y}); break;
+ case Op::lte_f32: write(o, V{id}, "= lte_f32", V{x}, V{y}); break;
+ case Op:: gt_f32: write(o, V{id}, "= gt_f32", V{x}, V{y}); break;
+ case Op::gte_f32: write(o, V{id}, "= gte_f32", V{x}, V{y}); break;
+
+
+ case Op::add_i32: write(o, V{id}, "= add_i32", V{x}, V{y}); break;
+ case Op::sub_i32: write(o, V{id}, "= sub_i32", V{x}, V{y}); break;
+ case Op::mul_i32: write(o, V{id}, "= mul_i32", V{x}, V{y}); break;
+
+ case Op::shl_i32: write(o, V{id}, "= shl_i32", V{x}, Shift{imm}); break;
+ case Op::shr_i32: write(o, V{id}, "= shr_i32", V{x}, Shift{imm}); break;
+ case Op::sra_i32: write(o, V{id}, "= sra_i32", V{x}, Shift{imm}); break;
+
+ case Op:: eq_i32: write(o, V{id}, "= eq_i32", V{x}, V{y}); break;
+ case Op::neq_i32: write(o, V{id}, "= neq_i32", V{x}, V{y}); break;
+ case Op:: lt_i32: write(o, V{id}, "= lt_i32", V{x}, V{y}); break;
+ case Op::lte_i32: write(o, V{id}, "= lte_i32", V{x}, V{y}); break;
+ case Op:: gt_i32: write(o, V{id}, "= gt_i32", V{x}, V{y}); break;
+ case Op::gte_i32: write(o, V{id}, "= gte_i32", V{x}, V{y}); break;
+
+ case Op::add_i16x2: write(o, V{id}, "= add_i16x2", V{x}, V{y}); break;
+ case Op::sub_i16x2: write(o, V{id}, "= sub_i16x2", V{x}, V{y}); break;
+ case Op::mul_i16x2: write(o, V{id}, "= mul_i16x2", V{x}, V{y}); break;
+
+ case Op::shl_i16x2: write(o, V{id}, "= shl_i16x2", V{x}, Shift{imm}); break;
+ case Op::shr_i16x2: write(o, V{id}, "= shr_i16x2", V{x}, Shift{imm}); break;
+ case Op::sra_i16x2: write(o, V{id}, "= sra_i16x2", V{x}, Shift{imm}); break;
+
+ case Op:: eq_i16x2: write(o, V{id}, "= eq_i16x2", V{x}, V{y}); break;
+ case Op::neq_i16x2: write(o, V{id}, "= neq_i16x2", V{x}, V{y}); break;
+ case Op:: lt_i16x2: write(o, V{id}, "= lt_i16x2", V{x}, V{y}); break;
+ case Op::lte_i16x2: write(o, V{id}, "= lte_i16x2", V{x}, V{y}); break;
+ case Op:: gt_i16x2: write(o, V{id}, "= gt_i16x2", V{x}, V{y}); break;
+ case Op::gte_i16x2: write(o, V{id}, "= gte_i16x2", V{x}, V{y}); break;
+
+ case Op::bit_and : write(o, V{id}, "= bit_and" , V{x}, V{y} ); break;
+ case Op::bit_or : write(o, V{id}, "= bit_or" , V{x}, V{y} ); break;
+ case Op::bit_xor : write(o, V{id}, "= bit_xor" , V{x}, V{y} ); break;
+ case Op::bit_clear: write(o, V{id}, "= bit_clear", V{x}, V{y} ); break;
+ case Op::select : write(o, V{id}, "= select" , V{x}, V{y}, V{z}); break;
+
+ case Op::bytes: write(o, V{id}, "= bytes", V{x}, Hex{imm}); break;
+ case Op::extract: write(o, V{id}, "= extract", V{x}, Shift{imm}, V{y}); break;
+ case Op::pack: write(o, V{id}, "= pack", V{x}, V{y}, Shift{imm}); break;
+
+ case Op::to_f32: write(o, V{id}, "= to_f32", V{x}); break;
+ case Op::to_i32: write(o, V{id}, "= to_i32", V{x}); break;
+ }
+
+ write(o, "\n");
+ }
+ }
+
+ void Builder::dump(SkWStream* o) const {
+ SkDebugfStream debug;
+ if (!o) { o = &debug; }
+
+ o->writeDecAsText(fProgram.size());
+ o->writeText(" values:\n");
+ dump_builder_program(fProgram, o);
+ }
+
+ void Program::dump(SkWStream* o) const {
+ SkDebugfStream debug;
+ if (!o) { o = &debug; }
+
+ o->writeDecAsText(fRegs);
+ o->writeText(" registers, ");
+ o->writeDecAsText(fInstructions.size());
+ o->writeText(" instructions:\n");
+ for (int i = 0; i < (int)fInstructions.size(); i++) {
+ if (i == fLoop) {
+ write(o, "loop:\n");
+ }
+ const Program::Instruction& inst = fInstructions[i];
+ Op op = inst.op;
+ Reg d = inst.d,
+ x = inst.x,
+ y = inst.y,
+ z = inst.z;
+ int imm = inst.imm;
+ switch (op) {
+ case Op::store8: write(o, "store8" , Arg{imm}, R{x}); break;
+ case Op::store16: write(o, "store16", Arg{imm}, R{x}); break;
+ case Op::store32: write(o, "store32", Arg{imm}, R{x}); break;
+
+ case Op::load8: write(o, R{d}, "= load8" , Arg{imm}); break;
+ case Op::load16: write(o, R{d}, "= load16", Arg{imm}); break;
+ case Op::load32: write(o, R{d}, "= load32", Arg{imm}); break;
+
+ case Op::gather8: write(o, R{d}, "= gather8" , Arg{imm}, R{x}); break;
+ case Op::gather16: write(o, R{d}, "= gather16", Arg{imm}, R{x}); break;
+ case Op::gather32: write(o, R{d}, "= gather32", Arg{imm}, R{x}); break;
+
+ case Op::uniform8: write(o, R{d}, "= uniform8" , Arg{imm & 0xffff}, Hex{imm>>16}); break;
+ case Op::uniform16: write(o, R{d}, "= uniform16", Arg{imm & 0xffff}, Hex{imm>>16}); break;
+ case Op::uniform32: write(o, R{d}, "= uniform32", Arg{imm & 0xffff}, Hex{imm>>16}); break;
+
+ case Op::splat: write(o, R{d}, "= splat", Splat{imm}); break;
+
+
+ case Op::add_f32: write(o, R{d}, "= add_f32", R{x}, R{y} ); break;
+ case Op::sub_f32: write(o, R{d}, "= sub_f32", R{x}, R{y} ); break;
+ case Op::mul_f32: write(o, R{d}, "= mul_f32", R{x}, R{y} ); break;
+ case Op::div_f32: write(o, R{d}, "= div_f32", R{x}, R{y} ); break;
+ case Op::mad_f32: write(o, R{d}, "= mad_f32", R{x}, R{y}, R{z}); break;
+
+ case Op:: eq_f32: write(o, R{d}, "= eq_f32", R{x}, R{y}); break;
+ case Op::neq_f32: write(o, R{d}, "= neq_f32", R{x}, R{y}); break;
+ case Op:: lt_f32: write(o, R{d}, "= lt_f32", R{x}, R{y}); break;
+ case Op::lte_f32: write(o, R{d}, "= lte_f32", R{x}, R{y}); break;
+ case Op:: gt_f32: write(o, R{d}, "= gt_f32", R{x}, R{y}); break;
+ case Op::gte_f32: write(o, R{d}, "= gte_f32", R{x}, R{y}); break;
+
+
+ case Op::add_i32: write(o, R{d}, "= add_i32", R{x}, R{y}); break;
+ case Op::sub_i32: write(o, R{d}, "= sub_i32", R{x}, R{y}); break;
+ case Op::mul_i32: write(o, R{d}, "= mul_i32", R{x}, R{y}); break;
+
+ case Op::shl_i32: write(o, R{d}, "= shl_i32", R{x}, Shift{imm}); break;
+ case Op::shr_i32: write(o, R{d}, "= shr_i32", R{x}, Shift{imm}); break;
+ case Op::sra_i32: write(o, R{d}, "= sra_i32", R{x}, Shift{imm}); break;
+
+ case Op:: eq_i32: write(o, R{d}, "= eq_i32", R{x}, R{y}); break;
+ case Op::neq_i32: write(o, R{d}, "= neq_i32", R{x}, R{y}); break;
+ case Op:: lt_i32: write(o, R{d}, "= lt_i32", R{x}, R{y}); break;
+ case Op::lte_i32: write(o, R{d}, "= lte_i32", R{x}, R{y}); break;
+ case Op:: gt_i32: write(o, R{d}, "= gt_i32", R{x}, R{y}); break;
+ case Op::gte_i32: write(o, R{d}, "= gte_i32", R{x}, R{y}); break;
+
+
+ case Op::add_i16x2: write(o, R{d}, "= add_i16x2", R{x}, R{y}); break;
+ case Op::sub_i16x2: write(o, R{d}, "= sub_i16x2", R{x}, R{y}); break;
+ case Op::mul_i16x2: write(o, R{d}, "= mul_i16x2", R{x}, R{y}); break;
+
+ case Op::shl_i16x2: write(o, R{d}, "= shl_i16x2", R{x}, Shift{imm}); break;
+ case Op::shr_i16x2: write(o, R{d}, "= shr_i16x2", R{x}, Shift{imm}); break;
+ case Op::sra_i16x2: write(o, R{d}, "= sra_i16x2", R{x}, Shift{imm}); break;
+
+ case Op:: eq_i16x2: write(o, R{d}, "= eq_i16x2", R{x}, R{y}); break;
+ case Op::neq_i16x2: write(o, R{d}, "= neq_i16x2", R{x}, R{y}); break;
+ case Op:: lt_i16x2: write(o, R{d}, "= lt_i16x2", R{x}, R{y}); break;
+ case Op::lte_i16x2: write(o, R{d}, "= lte_i16x2", R{x}, R{y}); break;
+ case Op:: gt_i16x2: write(o, R{d}, "= gt_i16x2", R{x}, R{y}); break;
+ case Op::gte_i16x2: write(o, R{d}, "= gte_i16x2", R{x}, R{y}); break;
+
+
+ case Op::bit_and : write(o, R{d}, "= bit_and" , R{x}, R{y} ); break;
+ case Op::bit_or : write(o, R{d}, "= bit_or" , R{x}, R{y} ); break;
+ case Op::bit_xor : write(o, R{d}, "= bit_xor" , R{x}, R{y} ); break;
+ case Op::bit_clear: write(o, R{d}, "= bit_clear", R{x}, R{y} ); break;
+ case Op::select : write(o, R{d}, "= select" , R{x}, R{y}, R{z}); break;
+
+ case Op::bytes: write(o, R{d}, "= bytes", R{x}, Hex{imm}); break;
+ case Op::extract: write(o, R{d}, "= extract", R{x}, Shift{imm}, R{y}); break;
+ case Op::pack: write(o, R{d}, "= pack", R{x}, R{y}, Shift{imm}); break;
+
+ case Op::to_f32: write(o, R{d}, "= to_f32", R{x}); break;
+ case Op::to_i32: write(o, R{d}, "= to_i32", R{x}); break;
+ }
+ write(o, "\n");
+ }
+ }
+
+ // Builder -> Program, with liveness and loop hoisting analysis.
+
+ Program Builder::done(const char* debug_name) {
+ // Basic liveness analysis:
+ // an instruction is live until all live instructions that need its input have retired.
+ for (Val id = fProgram.size(); id --> 0; ) {
+ Instruction& inst = fProgram[id];
+ // All side-effect-only instructions (stores) are live.
+ if (inst.op <= Op::store32) {
+ inst.death = id;
+ }
+ // The arguments of a live instruction must live until at least that instruction.
+ if (inst.death != 0) {
+ // Notice how we're walking backward, storing the latest instruction in death.
+ if (inst.x != NA && fProgram[inst.x].death == 0) { fProgram[inst.x].death = id; }
+ if (inst.y != NA && fProgram[inst.y].death == 0) { fProgram[inst.y].death = id; }
+ if (inst.z != NA && fProgram[inst.z].death == 0) { fProgram[inst.z].death = id; }
+ }
+ }
+
+ // Mark which values don't depend on the loop and can be hoisted.
+ for (Val id = 0; id < (Val)fProgram.size(); id++) {
+ Builder::Instruction& inst = fProgram[id];
+
+ // Varying loads (and gathers) and stores cannot be hoisted out of the loop.
+ if (inst.op <= Op::gather32) {
+ inst.can_hoist = false;
+ }
+
+ // If any of an instruction's inputs can't be hoisted, it can't be hoisted itself.
+ if (inst.can_hoist) {
+ if (inst.x != NA) { inst.can_hoist &= fProgram[inst.x].can_hoist; }
+ if (inst.y != NA) { inst.can_hoist &= fProgram[inst.y].can_hoist; }
+ if (inst.z != NA) { inst.can_hoist &= fProgram[inst.z].can_hoist; }
+ }
+
+ // We'll want to know if hoisted values are used in the loop;
+ // if not, we can recycle their registers like we do loop values.
+ if (!inst.can_hoist /*i.e. we're in the loop, so the arguments are used_in_loop*/) {
+ if (inst.x != NA) { fProgram[inst.x].used_in_loop = true; }
+ if (inst.y != NA) { fProgram[inst.y].used_in_loop = true; }
+ if (inst.z != NA) { fProgram[inst.z].used_in_loop = true; }
+ }
+ }
+
+ return {fProgram, fStrides, debug_name};
+ }
+
+ // TODO: it's probably not important that we include post-Builder::done() fields like
+ // death, can_hoist, and used_in_loop in operator==() and InstructionHash::operator().
+ // They'll always have the same, initial values as set in Builder::push().
+
+ static bool operator==(const Builder::Instruction& a, const Builder::Instruction& b) {
+ return a.op == b.op
+ && a.x == b.x
+ && a.y == b.y
+ && a.z == b.z
+ && a.imm == b.imm
+ && a.death == b.death
+ && a.can_hoist == b.can_hoist
+ && a.used_in_loop == b.used_in_loop;
+ }
+
+ // TODO: replace with SkOpts::hash()?
+ size_t Builder::InstructionHash::operator()(const Instruction& inst) const {
+ return Hash((uint8_t)inst.op)
+ ^ Hash(inst.x)
+ ^ Hash(inst.y)
+ ^ Hash(inst.z)
+ ^ Hash(inst.imm)
+ ^ Hash(inst.death)
+ ^ Hash(inst.can_hoist)
+ ^ Hash(inst.used_in_loop);
+ }
+
+ // Most instructions produce a value and return it by ID,
+ // the value-producing instruction's own index in the program vector.
+ Val Builder::push(Op op, Val x, Val y, Val z, int imm) {
+ Instruction inst{op, x, y, z, imm,
+ /*death=*/0, /*can_hoist=*/true, /*used_in_loop=*/false};
+
+ // Basic common subexpression elimination:
+ // if we've already seen this exact Instruction, use it instead of creating a new one.
+ if (Val* id = fIndex.find(inst)) {
+ return *id;
+ }
+ Val id = static_cast<Val>(fProgram.size());
+ fProgram.push_back(inst);
+ fIndex.set(inst, id);
+ return id;
+ }
+
+ bool Builder::isZero(Val id) const {
+ return fProgram[id].op == Op::splat
+ && fProgram[id].imm == 0;
+ }
+
+ Arg Builder::arg(int stride) {
+ int ix = (int)fStrides.size();
+ fStrides.push_back(stride);
+ return {ix};
+ }
+
+ void Builder::store8 (Arg ptr, I32 val) { (void)this->push(Op::store8 , val.id,NA,NA, ptr.ix); }
+ void Builder::store16(Arg ptr, I32 val) { (void)this->push(Op::store16, val.id,NA,NA, ptr.ix); }
+ void Builder::store32(Arg ptr, I32 val) { (void)this->push(Op::store32, val.id,NA,NA, ptr.ix); }
+
+ I32 Builder::load8 (Arg ptr) { return {this->push(Op::load8 , NA,NA,NA, ptr.ix) }; }
+ I32 Builder::load16(Arg ptr) { return {this->push(Op::load16, NA,NA,NA, ptr.ix) }; }
+ I32 Builder::load32(Arg ptr) { return {this->push(Op::load32, NA,NA,NA, ptr.ix) }; }
+
+ I32 Builder::gather8 (Arg ptr, I32 offset) {
+ return {this->push(Op::gather8 , offset.id,NA,NA, ptr.ix)};
+ }
+ I32 Builder::gather16(Arg ptr, I32 offset) {
+ return {this->push(Op::gather16, offset.id,NA,NA, ptr.ix)};
+ }
+ I32 Builder::gather32(Arg ptr, I32 offset) {
+ return {this->push(Op::gather32, offset.id,NA,NA, ptr.ix)};
+ }
+
+ I32 Builder::uniform8(Arg ptr, int offset) {
+ return {this->push(Op::uniform8, NA,NA,NA, ptr.ix | (offset<<16))};
+ }
+ I32 Builder::uniform16(Arg ptr, int offset) {
+ return {this->push(Op::uniform16, NA,NA,NA, ptr.ix | (offset<<16))};
+ }
+ I32 Builder::uniform32(Arg ptr, int offset) {
+ return {this->push(Op::uniform32, NA,NA,NA, ptr.ix | (offset<<16))};
+ }
+
+ // The two splat() functions are just syntax sugar over splatting a 4-byte bit pattern.
+ I32 Builder::splat(int n) { return {this->push(Op::splat, NA,NA,NA, n) }; }
+ F32 Builder::splat(float f) {
+ int bits;
+ memcpy(&bits, &f, 4);
+ return {this->push(Op::splat, NA,NA,NA, bits)};
+ }
+
+ F32 Builder::add(F32 x, F32 y ) { return {this->push(Op::add_f32, x.id, y.id)}; }
+ F32 Builder::sub(F32 x, F32 y ) { return {this->push(Op::sub_f32, x.id, y.id)}; }
+ F32 Builder::mul(F32 x, F32 y ) { return {this->push(Op::mul_f32, x.id, y.id)}; }
+ F32 Builder::div(F32 x, F32 y ) { return {this->push(Op::div_f32, x.id, y.id)}; }
+ F32 Builder::mad(F32 x, F32 y, F32 z) {
+ if (this->isZero(z.id)) {
+ return this->mul(x,y);
+ }
+ return {this->push(Op::mad_f32, x.id, y.id, z.id)};
+ }
+
+ I32 Builder::add(I32 x, I32 y) { return {this->push(Op::add_i32, x.id, y.id)}; }
+ I32 Builder::sub(I32 x, I32 y) { return {this->push(Op::sub_i32, x.id, y.id)}; }
+ I32 Builder::mul(I32 x, I32 y) { return {this->push(Op::mul_i32, x.id, y.id)}; }
+
+ I32 Builder::add_16x2(I32 x, I32 y) { return {this->push(Op::add_i16x2, x.id, y.id)}; }
+ I32 Builder::sub_16x2(I32 x, I32 y) { return {this->push(Op::sub_i16x2, x.id, y.id)}; }
+ I32 Builder::mul_16x2(I32 x, I32 y) { return {this->push(Op::mul_i16x2, x.id, y.id)}; }
+
+ I32 Builder::shl(I32 x, int bits) { return {this->push(Op::shl_i32, x.id,NA,NA, bits)}; }
+ I32 Builder::shr(I32 x, int bits) { return {this->push(Op::shr_i32, x.id,NA,NA, bits)}; }
+ I32 Builder::sra(I32 x, int bits) { return {this->push(Op::sra_i32, x.id,NA,NA, bits)}; }
+
+ I32 Builder::shl_16x2(I32 x, int bits) { return {this->push(Op::shl_i16x2, x.id,NA,NA, bits)}; }
+ I32 Builder::shr_16x2(I32 x, int bits) { return {this->push(Op::shr_i16x2, x.id,NA,NA, bits)}; }
+ I32 Builder::sra_16x2(I32 x, int bits) { return {this->push(Op::sra_i16x2, x.id,NA,NA, bits)}; }
+
+ I32 Builder:: eq(F32 x, F32 y) { return {this->push(Op:: eq_f32, x.id, y.id)}; }
+ I32 Builder::neq(F32 x, F32 y) { return {this->push(Op::neq_f32, x.id, y.id)}; }
+ I32 Builder:: lt(F32 x, F32 y) { return {this->push(Op:: lt_f32, x.id, y.id)}; }
+ I32 Builder::lte(F32 x, F32 y) { return {this->push(Op::lte_f32, x.id, y.id)}; }
+ I32 Builder:: gt(F32 x, F32 y) { return {this->push(Op:: gt_f32, x.id, y.id)}; }
+ I32 Builder::gte(F32 x, F32 y) { return {this->push(Op::gte_f32, x.id, y.id)}; }
+
+ I32 Builder:: eq(I32 x, I32 y) { return {this->push(Op:: eq_i32, x.id, y.id)}; }
+ I32 Builder::neq(I32 x, I32 y) { return {this->push(Op::neq_i32, x.id, y.id)}; }
+ I32 Builder:: lt(I32 x, I32 y) { return {this->push(Op:: lt_i32, x.id, y.id)}; }
+ I32 Builder::lte(I32 x, I32 y) { return {this->push(Op::lte_i32, x.id, y.id)}; }
+ I32 Builder:: gt(I32 x, I32 y) { return {this->push(Op:: gt_i32, x.id, y.id)}; }
+ I32 Builder::gte(I32 x, I32 y) { return {this->push(Op::gte_i32, x.id, y.id)}; }
+
+ I32 Builder:: eq_16x2(I32 x, I32 y) { return {this->push(Op:: eq_i16x2, x.id, y.id)}; }
+ I32 Builder::neq_16x2(I32 x, I32 y) { return {this->push(Op::neq_i16x2, x.id, y.id)}; }
+ I32 Builder:: lt_16x2(I32 x, I32 y) { return {this->push(Op:: lt_i16x2, x.id, y.id)}; }
+ I32 Builder::lte_16x2(I32 x, I32 y) { return {this->push(Op::lte_i16x2, x.id, y.id)}; }
+ I32 Builder:: gt_16x2(I32 x, I32 y) { return {this->push(Op:: gt_i16x2, x.id, y.id)}; }
+ I32 Builder::gte_16x2(I32 x, I32 y) { return {this->push(Op::gte_i16x2, x.id, y.id)}; }
+
+ I32 Builder::bit_and (I32 x, I32 y) { return {this->push(Op::bit_and , x.id, y.id)}; }
+ I32 Builder::bit_or (I32 x, I32 y) { return {this->push(Op::bit_or , x.id, y.id)}; }
+ I32 Builder::bit_xor (I32 x, I32 y) { return {this->push(Op::bit_xor , x.id, y.id)}; }
+ I32 Builder::bit_clear(I32 x, I32 y) { return {this->push(Op::bit_clear, x.id, y.id)}; }
+ I32 Builder::select(I32 x, I32 y, I32 z) { return {this->push(Op::select, x.id, y.id, z.id)}; }
+
+
+ I32 Builder::extract(I32 x, int bits, I32 y) {
+ return {this->push(Op::extract, x.id,y.id,NA, bits)};
+ }
+
+ I32 Builder::pack(I32 x, I32 y, int bits) {
+ return {this->push(Op::pack, x.id,y.id,NA, bits)};
+ }
+
+ I32 Builder::bytes(I32 x, int control) {
+ return {this->push(Op::bytes, x.id,NA,NA, control)};
+ }
+
+ F32 Builder::to_f32(I32 x) { return {this->push(Op::to_f32, x.id)}; }
+ I32 Builder::to_i32(F32 x) { return {this->push(Op::to_i32, x.id)}; }
+
+ // ~~~~ Program::eval() and co. ~~~~ //
+
+ // Handy references for x86-64 instruction encoding:
+ // https://wiki.osdev.org/X86-64_Instruction_Encoding
+ // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x64.htm
+ // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x86.htm
+ // http://ref.x86asm.net/coder64.html
+
+ // Used for ModRM / immediate instruction encoding.
+ static uint8_t _233(int a, int b, int c) {
+ return (a & 3) << 6
+ | (b & 7) << 3
+ | (c & 7) << 0;
+ }
+
+ // ModRM byte encodes the arguments of an opcode.
+ enum class Mod { Indirect, OneByteImm, FourByteImm, Direct };
+ static uint8_t mod_rm(Mod mod, int reg, int rm) {
+ return _233((int)mod, reg, rm);
+ }
+
+ static Mod mod(int imm) {
+ if (imm == 0) { return Mod::Indirect; }
+ if (SkTFitsIn<int8_t>(imm)) { return Mod::OneByteImm; }
+ return Mod::FourByteImm;
+ }
+
+ static int imm_bytes(Mod mod) {
+ switch (mod) {
+ case Mod::Indirect: return 0;
+ case Mod::OneByteImm: return 1;
+ case Mod::FourByteImm: return 4;
+ case Mod::Direct: SkUNREACHABLE;
+ }
+ SkUNREACHABLE;
+ }
+
+#if 0
+ // SIB byte encodes a memory address, base + (index * scale).
+ enum class Scale { One, Two, Four, Eight };
+ static uint8_t sib(Scale scale, int index, int base) {
+ return _233((int)scale, index, base);
+ }
+#endif
+
+ // The REX prefix is used to extend most old 32-bit instructions to 64-bit.
+ static uint8_t rex(bool W, // If set, operation is 64-bit, otherwise default, usually 32-bit.
+ bool R, // Extra top bit to select ModRM reg, registers 8-15.
+ bool X, // Extra top bit for SIB index register.
+ bool B) { // Extra top bit for SIB base or ModRM rm register.
+ return 0b01000000 // Fixed 0100 for top four bits.
+ | (W << 3)
+ | (R << 2)
+ | (X << 1)
+ | (B << 0);
+ }
+
+
+ // The VEX prefix extends SSE operations to AVX. Used generally, even with XMM.
+ struct VEX {
+ int len;
+ uint8_t bytes[3];
+ };
+
+ static VEX vex(bool WE, // Like REX W for int operations, or opcode extension for float?
+ bool R, // Same as REX R. Pass high bit of dst register, dst>>3.
+ bool X, // Same as REX X.
+ bool B, // Same as REX B. Pass y>>3 for 3-arg ops, x>>3 for 2-arg.
+ int map, // SSE opcode map selector: 0x0f, 0x380f, 0x3a0f.
+ int vvvv, // 4-bit second operand register. Pass our x for 3-arg ops.
+ bool L, // Set for 256-bit ymm operations, off for 128-bit xmm.
+ int pp) { // SSE mandatory prefix: 0x66, 0xf3, 0xf2, else none.
+
+ // Pack x86 opcode map selector to 5-bit VEX encoding.
+ map = [map]{
+ switch (map) {
+ case 0x0f: return 0b00001;
+ case 0x380f: return 0b00010;
+ case 0x3a0f: return 0b00011;
+ // Several more cases only used by XOP / TBM.
+ }
+ SkUNREACHABLE;
+ }();
+
+ // Pack mandatory SSE opcode prefix byte to 2-bit VEX encoding.
+ pp = [pp]{
+ switch (pp) {
+ case 0x66: return 0b01;
+ case 0xf3: return 0b10;
+ case 0xf2: return 0b11;
+ }
+ return 0b00;
+ }();
+
+ VEX vex = {0, {0,0,0}};
+ if (X == 0 && B == 0 && WE == 0 && map == 0b00001) {
+ // With these conditions met, we can optionally compress VEX to 2-byte.
+ vex.len = 2;
+ vex.bytes[0] = 0xc5;
+ vex.bytes[1] = (pp & 3) << 0
+ | (L & 1) << 2
+ | (~vvvv & 15) << 3
+ | (~(int)R & 1) << 7;
+ } else {
+ // We could use this 3-byte VEX prefix all the time if we like.
+ vex.len = 3;
+ vex.bytes[0] = 0xc4;
+ vex.bytes[1] = (map & 31) << 0
+ | (~(int)B & 1) << 5
+ | (~(int)X & 1) << 6
+ | (~(int)R & 1) << 7;
+ vex.bytes[2] = (pp & 3) << 0
+ | (L & 1) << 2
+ | (~vvvv & 15) << 3
+ | (WE & 1) << 7;
+ }
+ return vex;
+ }
+
+ Assembler::Assembler(void* buf) : fCode((uint8_t*)buf), fCurr(fCode), fSize(0) {}
+
+ size_t Assembler::size() const { return fSize; }
+
+ void Assembler::bytes(const void* p, int n) {
+ if (fCurr) {
+ memcpy(fCurr, p, n);
+ fCurr += n;
+ }
+ fSize += n;
+ }
+
+ void Assembler::byte(uint8_t b) { this->bytes(&b, 1); }
+ void Assembler::word(uint32_t w) { this->bytes(&w, 4); }
+
+ void Assembler::align(int mod) {
+ while (this->size() % mod) {
+ this->byte(0x00);
+ }
+ }
+
+ void Assembler::vzeroupper() {
+ this->byte(0xc5);
+ this->byte(0xf8);
+ this->byte(0x77);
+ }
+ void Assembler::ret() { this->byte(0xc3); }
+
+ // Common instruction building for 64-bit opcodes with an immediate argument.
+ void Assembler::op(int opcode, int opcode_ext, GP64 dst, int imm) {
+ opcode |= 0b0000'0001; // low bit set for 64-bit operands
+ opcode |= 0b1000'0000; // top bit set for instructions with any immediate
+
+ int imm_bytes = 4;
+ if (SkTFitsIn<int8_t>(imm)) {
+ imm_bytes = 1;
+ opcode |= 0b0000'0010; // second bit set for 8-bit immediate, else 32-bit.
+ }
+
+ this->byte(rex(1,0,0,dst>>3));
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, opcode_ext, dst&7));
+ this->bytes(&imm, imm_bytes);
+ }
+
+ void Assembler::add(GP64 dst, int imm) { this->op(0,0b000, dst,imm); }
+ void Assembler::sub(GP64 dst, int imm) { this->op(0,0b101, dst,imm); }
+ void Assembler::cmp(GP64 reg, int imm) { this->op(0,0b111, reg,imm); }
+
+ void Assembler::op(int prefix, int map, int opcode, Ymm dst, Ymm x, Ymm y, bool W/*=false*/) {
+ VEX v = vex(W, dst>>3, 0, y>>3,
+ map, x, 1/*ymm, not xmm*/, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, dst&7, y&7));
+ }
+
+ void Assembler::vpaddd (Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0xfe, dst,x,y); }
+ void Assembler::vpsubd (Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0xfa, dst,x,y); }
+ void Assembler::vpmulld(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x40, dst,x,y); }
+
+ void Assembler::vpsubw (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xf9, dst,x,y); }
+ void Assembler::vpmullw(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xd5, dst,x,y); }
+
+ void Assembler::vpand (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xdb, dst,x,y); }
+ void Assembler::vpor (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xeb, dst,x,y); }
+ void Assembler::vpxor (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xef, dst,x,y); }
+ void Assembler::vpandn(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xdf, dst,x,y); }
+
+ void Assembler::vaddps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x58, dst,x,y); }
+ void Assembler::vsubps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x5c, dst,x,y); }
+ void Assembler::vmulps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x59, dst,x,y); }
+ void Assembler::vdivps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x5e, dst,x,y); }
+
+ void Assembler::vfmadd132ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x98, dst,x,y); }
+ void Assembler::vfmadd213ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0xa8, dst,x,y); }
+ void Assembler::vfmadd231ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0xb8, dst,x,y); }
+
+ void Assembler::vpackusdw(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x2b, dst,x,y); }
+ void Assembler::vpackuswb(Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0x67, dst,x,y); }
+
+ void Assembler::vpcmpeqd(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0x76, dst,x,y); }
+ void Assembler::vpcmpgtd(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0x66, dst,x,y); }
+
+ void Assembler::vpblendvb(Ymm dst, Ymm x, Ymm y, Ymm z) {
+ int prefix = 0x66,
+ map = 0x3a0f,
+ opcode = 0x4c;
+ VEX v = vex(0, dst>>3, 0, y>>3,
+ map, x, /*ymm?*/1, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, dst&7, y&7));
+ this->byte(z << 4);
+ }
+
+ // dst = x op /opcode_ext imm
+ void Assembler::op(int prefix, int map, int opcode, int opcode_ext, Ymm dst, Ymm x, int imm) {
+ // This is a little weird, but if we pass the opcode_ext as if it were the dst register,
+ // the dst register as if x, and the x register as if y, all the bits end up where we want.
+ this->op(prefix, map, opcode, (Ymm)opcode_ext,dst,x);
+ this->byte(imm);
+ }
+
+ void Assembler::vpslld(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,6, dst,x,imm); }
+ void Assembler::vpsrld(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,2, dst,x,imm); }
+ void Assembler::vpsrad(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,4, dst,x,imm); }
+
+ void Assembler::vpsrlw(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x71,2, dst,x,imm); }
+
+
+ void Assembler::vpermq(Ymm dst, Ymm x, int imm) {
+ // A bit unusual among the instructions we use, this is 64-bit operation, so we set W.
+ bool W = true;
+ this->op(0x66,0x3a0f,0x00, dst,x,W);
+ this->byte(imm);
+ }
+
+ void Assembler::vmovdqa(Ymm dst, Ymm src) { this->op(0x66,0x0f,0x6f, dst,src); }
+
+ void Assembler::vcvtdq2ps (Ymm dst, Ymm x) { this->op(0, 0x0f,0x5b, dst,x); }
+ void Assembler::vcvttps2dq(Ymm dst, Ymm x) { this->op(0xf3,0x0f,0x5b, dst,x); }
+
+ Assembler::Label Assembler::here() {
+ return { (int)this->size(), Label::None, {} };
+ }
+
+ int Assembler::disp19(Label* l) {
+ SkASSERT(l->kind == Label::None ||
+ l->kind == Label::ARMDisp19);
+ l->kind = Label::ARMDisp19;
+ l->references.push_back(here().offset);
+ // ARM 19-bit instruction count, from the beginning of this instruction.
+ return (l->offset - here().offset) / 4;
+ }
+
+ int Assembler::disp32(Label* l) {
+ SkASSERT(l->kind == Label::None ||
+ l->kind == Label::X86Disp32);
+ l->kind = Label::X86Disp32;
+ l->references.push_back(here().offset);
+ // x86 32-bit byte count, from the end of this instruction.
+ return l->offset - (here().offset + 4);
+ }
+
+ void Assembler::op(int prefix, int map, int opcode, Ymm dst, Ymm x, Label* l) {
+ // IP-relative addressing uses Mod::Indirect with the R/M encoded as-if rbp or r13.
+ const int rip = rbp;
+
+ VEX v = vex(0, dst>>3, 0, rip>>3,
+ map, x, /*ymm?*/1, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, rip&7));
+ this->word(this->disp32(l));
+ }
+
+ void Assembler::vpshufb(Ymm dst, Ymm x, Label* l) { this->op(0x66,0x380f,0x00, dst,x,l); }
+
+ void Assembler::vbroadcastss(Ymm dst, Label* l) { this->op(0x66,0x380f,0x18, dst, (Ymm)0, l); }
+ void Assembler::vbroadcastss(Ymm dst, Xmm src) { this->op(0x66,0x380f,0x18, dst, (Ymm)src); }
+ void Assembler::vbroadcastss(Ymm dst, GP64 ptr, int off) {
+ int prefix = 0x66,
+ map = 0x380f,
+ opcode = 0x18;
+ VEX v = vex(0, dst>>3, 0, ptr>>3,
+ map, 0, /*ymm?*/1, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+
+ this->byte(mod_rm(mod(off), dst&7, ptr&7));
+ this->bytes(&off, imm_bytes(mod(off)));
+ }
+
+ void Assembler::jump(uint8_t condition, Label* l) {
+ // These conditional jumps can be either 2 bytes (short) or 6 bytes (near):
+ // 7? one-byte-disp
+ // 0F 8? four-byte-disp
+ // We always use the near displacement to make updating labels simpler (no resizing).
+ this->byte(0x0f);
+ this->byte(condition);
+ this->word(this->disp32(l));
+ }
+ void Assembler::je (Label* l) { this->jump(0x84, l); }
+ void Assembler::jne(Label* l) { this->jump(0x85, l); }
+ void Assembler::jl (Label* l) { this->jump(0x8c, l); }
+
+ void Assembler::jmp(Label* l) {
+ // Like above in jump(), we could use 8-bit displacement here, but always use 32-bit.
+ this->byte(0xe9);
+ this->word(this->disp32(l));
+ }
+
+ void Assembler::load_store(int prefix, int map, int opcode, Ymm ymm, GP64 ptr) {
+ VEX v = vex(0, ymm>>3, 0, ptr>>3,
+ map, 0, /*ymm?*/1, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, ymm&7, ptr&7));
+ }
+
+ void Assembler::vmovups (Ymm dst, GP64 src) { this->load_store(0 , 0x0f,0x10, dst,src); }
+ void Assembler::vpmovzxwd(Ymm dst, GP64 src) { this->load_store(0x66,0x380f,0x33, dst,src); }
+ void Assembler::vpmovzxbd(Ymm dst, GP64 src) { this->load_store(0x66,0x380f,0x31, dst,src); }
+
+ void Assembler::vmovups (GP64 dst, Ymm src) { this->load_store(0 , 0x0f,0x11, src,dst); }
+ void Assembler::vmovups (GP64 dst, Xmm src) {
+ // Same as vmovups(GP64,YMM) and load_store() except ymm? is 0.
+ int prefix = 0,
+ map = 0x0f,
+ opcode = 0x11;
+ VEX v = vex(0, src>>3, 0, dst>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
+ }
+
+ void Assembler::vmovq(GP64 dst, Xmm src) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0xd6;
+ VEX v = vex(0, src>>3, 0, dst>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
+ }
+
+ void Assembler::vmovd(GP64 dst, Xmm src) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0x7e;
+ VEX v = vex(0, src>>3, 0, dst>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
+ }
+
+ void Assembler::vmovd_direct(GP64 dst, Xmm src) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0x7e;
+ VEX v = vex(0, src>>3, 0, dst>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, src&7, dst&7));
+ }
+
+ void Assembler::vmovd(Xmm dst, GP64 src) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0x6e;
+ VEX v = vex(0, dst>>3, 0, src>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, src&7));
+ }
+
+ void Assembler::vmovd_direct(Xmm dst, GP64 src) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0x6e;
+ VEX v = vex(0, dst>>3, 0, src>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, dst&7, src&7));
+ }
+
+ void Assembler::movzbl(GP64 dst, GP64 src, int off) {
+ if ((dst>>3) || (src>>3)) {
+ this->byte(rex(0,dst>>3,0,src>>3));
+ }
+ this->byte(0x0f);
+ this->byte(0xb6);
+ this->byte(mod_rm(mod(off), dst&7, src&7));
+ this->bytes(&off, imm_bytes(mod(off)));
+ }
+
+
+ void Assembler::movb(GP64 dst, GP64 src) {
+ if ((dst>>3) || (src>>3)) {
+ this->byte(rex(0,src>>3,0,dst>>3));
+ }
+ this->byte(0x88);
+ this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
+ }
+
+ void Assembler::vpinsrw(Xmm dst, Xmm src, GP64 ptr, int imm) {
+ int prefix = 0x66,
+ map = 0x0f,
+ opcode = 0xc4;
+ VEX v = vex(0, dst>>3, 0, ptr>>3,
+ map, src, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, ptr&7));
+ this->byte(imm);
+ }
+
+ void Assembler::vpinsrb(Xmm dst, Xmm src, GP64 ptr, int imm) {
+ int prefix = 0x66,
+ map = 0x3a0f,
+ opcode = 0x20;
+ VEX v = vex(0, dst>>3, 0, ptr>>3,
+ map, src, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, ptr&7));
+ this->byte(imm);
+ }
+
+ void Assembler::vpextrw(GP64 ptr, Xmm src, int imm) {
+ int prefix = 0x66,
+ map = 0x3a0f,
+ opcode = 0x15;
+
+ VEX v = vex(0, src>>3, 0, ptr>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, src&7, ptr&7));
+ this->byte(imm);
+ }
+ void Assembler::vpextrb(GP64 ptr, Xmm src, int imm) {
+ int prefix = 0x66,
+ map = 0x3a0f,
+ opcode = 0x14;
+
+ VEX v = vex(0, src>>3, 0, ptr>>3,
+ map, 0, /*ymm?*/0, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, src&7, ptr&7));
+ this->byte(imm);
+ }
+
+ // https://static.docs.arm.com/ddi0596/a/DDI_0596_ARM_a64_instruction_set_architecture.pdf
+
+ static int operator"" _mask(unsigned long long bits) { return (1<<(int)bits)-1; }
+
+ void Assembler::op(uint32_t hi, V m, uint32_t lo, V n, V d) {
+ this->word( (hi & 11_mask) << 21
+ | (m & 5_mask) << 16
+ | (lo & 6_mask) << 10
+ | (n & 5_mask) << 5
+ | (d & 5_mask) << 0);
+ }
+
+ void Assembler::and16b(V d, V n, V m) { this->op(0b0'1'0'01110'00'1, m, 0b00011'1, n, d); }
+ void Assembler::orr16b(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b00011'1, n, d); }
+ void Assembler::eor16b(V d, V n, V m) { this->op(0b0'1'1'01110'00'1, m, 0b00011'1, n, d); }
+ void Assembler::bic16b(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b00011'1, n, d); }
+ void Assembler::bsl16b(V d, V n, V m) { this->op(0b0'1'1'01110'01'1, m, 0b00011'1, n, d); }
+
+ void Assembler::add4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10000'1, n, d); }
+ void Assembler::sub4s(V d, V n, V m) { this->op(0b0'1'1'01110'10'1, m, 0b10000'1, n, d); }
+ void Assembler::mul4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10011'1, n, d); }
+
+ void Assembler::cmeq4s(V d, V n, V m) { this->op(0b0'1'1'01110'10'1, m, 0b10001'1, n, d); }
+ void Assembler::cmgt4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b0011'0'1, n, d); }
+
+ void Assembler::sub8h(V d, V n, V m) { this->op(0b0'1'1'01110'01'1, m, 0b10000'1, n, d); }
+ void Assembler::mul8h(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b10011'1, n, d); }
+
+ void Assembler::fadd4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11010'1, n, d); }
+ void Assembler::fsub4s(V d, V n, V m) { this->op(0b0'1'0'01110'1'0'1, m, 0b11010'1, n, d); }
+ void Assembler::fmul4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11011'1, n, d); }
+ void Assembler::fdiv4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11111'1, n, d); }
+
+ void Assembler::fmla4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11001'1, n, d); }
+
+ void Assembler::tbl(V d, V n, V m) { this->op(0b0'1'001110'00'0, m, 0b0'00'0'00, n, d); }
+
+ void Assembler::op(uint32_t op22, int imm, V n, V d) {
+ this->word( (op22 & 22_mask) << 10
+ | imm << 16 // imm is embedded inside op, bit size depends on op
+ | (n & 5_mask) << 5
+ | (d & 5_mask) << 0);
+ }
+
+ void Assembler::sli4s(V d, V n, int imm) {
+ this->op(0b0'1'1'011110'0100'000'01010'1, ( imm&31), n, d);
+ }
+ void Assembler::shl4s(V d, V n, int imm) {
+ this->op(0b0'1'0'011110'0100'000'01010'1, ( imm&31), n, d);
+ }
+ void Assembler::sshr4s(V d, V n, int imm) {
+ this->op(0b0'1'0'011110'0100'000'00'0'0'0'1, (-imm&31), n, d);
+ }
+ void Assembler::ushr4s(V d, V n, int imm) {
+ this->op(0b0'1'1'011110'0100'000'00'0'0'0'1, (-imm&31), n, d);
+ }
+ void Assembler::ushr8h(V d, V n, int imm) {
+ this->op(0b0'1'1'011110'0010'000'00'0'0'0'1, (-imm&15), n, d);
+ }
+
+ void Assembler::scvtf4s (V d, V n) { this->op(0b0'1'0'01110'0'0'10000'11101'10, n,d); }
+ void Assembler::fcvtzs4s(V d, V n) { this->op(0b0'1'0'01110'1'0'10000'1101'1'10, n,d); }
+
+ void Assembler::xtns2h(V d, V n) { this->op(0b0'0'0'01110'01'10000'10010'10, n,d); }
+ void Assembler::xtnh2b(V d, V n) { this->op(0b0'0'0'01110'00'10000'10010'10, n,d); }
+
+ void Assembler::uxtlb2h(V d, V n) { this->op(0b0'0'1'011110'0001'000'10100'1, n,d); }
+ void Assembler::uxtlh2s(V d, V n) { this->op(0b0'0'1'011110'0010'000'10100'1, n,d); }
+
+ void Assembler::ret(X n) {
+ this->word(0b1101011'0'0'10'11111'0000'0'0 << 10
+ | (n & 5_mask) << 5);
+ }
+
+ void Assembler::add(X d, X n, int imm12) {
+ this->word(0b1'0'0'10001'00 << 22
+ | (imm12 & 12_mask) << 10
+ | (n & 5_mask) << 5
+ | (d & 5_mask) << 0);
+ }
+ void Assembler::sub(X d, X n, int imm12) {
+ this->word( 0b1'1'0'10001'00 << 22
+ | (imm12 & 12_mask) << 10
+ | (n & 5_mask) << 5
+ | (d & 5_mask) << 0);
+ }
+ void Assembler::subs(X d, X n, int imm12) {
+ this->word( 0b1'1'1'10001'00 << 22
+ | (imm12 & 12_mask) << 10
+ | (n & 5_mask) << 5
+ | (d & 5_mask) << 0);
+ }
+
+ void Assembler::b(Condition cond, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->word( 0b0101010'0 << 24
+ | (imm19 & 19_mask) << 5
+ | ((int)cond & 4_mask) << 0);
+ }
+ void Assembler::cbz(X t, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->word( 0b1'011010'0 << 24
+ | (imm19 & 19_mask) << 5
+ | (t & 5_mask) << 0);
+ }
+ void Assembler::cbnz(X t, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->word( 0b1'011010'1 << 24
+ | (imm19 & 19_mask) << 5
+ | (t & 5_mask) << 0);
+ }
+
+ void Assembler::ldrq(V dst, X src) { this->op(0b00'111'1'01'11'000000000000, src, dst); }
+ void Assembler::ldrs(V dst, X src) { this->op(0b10'111'1'01'01'000000000000, src, dst); }
+ void Assembler::ldrb(V dst, X src) { this->op(0b00'111'1'01'01'000000000000, src, dst); }
+
+ void Assembler::strq(V src, X dst) { this->op(0b00'111'1'01'10'000000000000, dst, src); }
+ void Assembler::strs(V src, X dst) { this->op(0b10'111'1'01'00'000000000000, dst, src); }
+ void Assembler::strb(V src, X dst) { this->op(0b00'111'1'01'00'000000000000, dst, src); }
+
+ void Assembler::ldrq(V dst, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->word( 0b10'011'1'00 << 24
+ | (imm19 & 19_mask) << 5
+ | (dst & 5_mask) << 0);
+ }
+
+ void Assembler::label(Label* l) {
+ if (fCode) {
+ // The instructions all currently point to l->offset.
+ // We'll want to add a delta to point them to here().
+ int delta = here().offset - l->offset;
+ l->offset = here().offset;
+
+ if (l->kind == Label::ARMDisp19) {
+ for (int ref : l->references) {
+ // ref points to a 32-bit instruction with 19-bit displacement in instructions.
+ uint32_t inst;
+ memcpy(&inst, fCode + ref, 4);
+
+ // [ 8 bits to preserve] [ 19 bit signed displacement ] [ 5 bits to preserve ]
+ int disp = (int)(inst << 8) >> 13;
+
+ disp += delta/4; // delta is in bytes, we want instructions.
+
+ // Put it all back together, preserving the high 8 bits and low 5.
+ inst = ((disp << 5) & (19_mask << 5))
+ | ((inst ) & ~(19_mask << 5));
+
+ memcpy(fCode + ref, &inst, 4);
+ }
+ }
+
+ if (l->kind == Label::X86Disp32) {
+ for (int ref : l->references) {
+ // ref points to a 32-bit displacement in bytes.
+ int disp;
+ memcpy(&disp, fCode + ref, 4);
+
+ disp += delta;
+
+ memcpy(fCode + ref, &disp, 4);
+ }
+ }
+ }
+ }
+
+ void Program::eval(int n, void* args[]) const {
+ const int nargs = (int)fStrides.size();
+
+ if (fJITBuf) {
+ void** a = args;
+ const void* b = fJITBuf;
+ switch (nargs) {
+ case 0: return ((void(*)(int ))b)(n );
+ case 1: return ((void(*)(int,void* ))b)(n,a[0] );
+ case 2: return ((void(*)(int,void*,void* ))b)(n,a[0],a[1] );
+ case 3: return ((void(*)(int,void*,void*,void* ))b)(n,a[0],a[1],a[2] );
+ case 4: return ((void(*)(int,void*,void*,void*,void*))b)(n,a[0],a[1],a[2],a[3]);
+ default: SkUNREACHABLE; // TODO
+ }
+ }
+
+ // We'll operate in SIMT style, knocking off K-size chunks from n while possible.
+ constexpr int K = 16;
+ using I32 = skvx::Vec<K, int>;
+ using F32 = skvx::Vec<K, float>;
+ using U32 = skvx::Vec<K, uint32_t>;
+ using U16 = skvx::Vec<K, uint16_t>;
+ using U8 = skvx::Vec<K, uint8_t>;
+
+ using I16x2 = skvx::Vec<2*K, int16_t>;
+ using U16x2 = skvx::Vec<2*K, uint16_t>;
+
+ union Slot {
+ F32 f32;
+ I32 i32;
+ U32 u32;
+ I16x2 i16x2;
+ U16x2 u16x2;
+ };
+
+ Slot few_regs[16];
+ std::unique_ptr<char[]> many_regs;
+
+ Slot* regs = few_regs;
+
+ if (fRegs > (int)SK_ARRAY_COUNT(few_regs)) {
+ // Annoyingly we can't trust that malloc() or new will work with Slot because
+ // the skvx::Vec types may have alignment greater than what they provide.
+ // We'll overallocate one extra register so we can align manually.
+ many_regs.reset(new char[ sizeof(Slot) * (fRegs + 1) ]);
+
+ uintptr_t addr = (uintptr_t)many_regs.get();
+ addr += alignof(Slot) -
+ (addr & (alignof(Slot) - 1));
+ SkASSERT((addr & (alignof(Slot) - 1)) == 0);
+ regs = (Slot*)addr;
+ }
+
+
+ auto r = [&](Reg id) -> Slot& {
+ SkASSERT(0 <= id && id < fRegs);
+ return regs[id];
+ };
+ auto arg = [&](int ix) {
+ SkASSERT(0 <= ix && ix < nargs);
+ return args[ix];
+ };
+
+ // Step each argument pointer ahead by its stride a number of times.
+ auto step_args = [&](int times) {
+ for (int i = 0; i < (int)fStrides.size(); i++) {
+ args[i] = (void*)( (char*)args[i] + times * fStrides[i] );
+ }
+ };
+
+ int start = 0,
+ stride;
+ for ( ; n > 0; start = fLoop, n -= stride, step_args(stride)) {
+ stride = n >= K ? K : 1;
+
+ for (int i = start; i < (int)fInstructions.size(); i++) {
+ Instruction inst = fInstructions[i];
+
+ // d = op(x,y,z/imm)
+ Reg d = inst.d,
+ x = inst.x,
+ y = inst.y,
+ z = inst.z;
+ int imm = inst.imm;
+
+ // Ops that interact with memory need to know whether we're stride=1 or K,
+ // but all non-memory ops can run the same code no matter the stride.
+ switch (2*(int)inst.op + (stride == K ? 1 : 0)) {
+ default: SkUNREACHABLE;
+
+ #define STRIDE_1(op) case 2*(int)op
+ #define STRIDE_K(op) case 2*(int)op + 1
+ STRIDE_1(Op::store8 ): memcpy(arg(imm), &r(x).i32, 1); break;
+ STRIDE_1(Op::store16): memcpy(arg(imm), &r(x).i32, 2); break;
+ STRIDE_1(Op::store32): memcpy(arg(imm), &r(x).i32, 4); break;
+
+ STRIDE_K(Op::store8 ): skvx::cast<uint8_t> (r(x).i32).store(arg(imm)); break;
+ STRIDE_K(Op::store16): skvx::cast<uint16_t>(r(x).i32).store(arg(imm)); break;
+ STRIDE_K(Op::store32): (r(x).i32).store(arg(imm)); break;
+
+ STRIDE_1(Op::load8 ): r(d).i32 = 0; memcpy(&r(d).i32, arg(imm), 1); break;
+ STRIDE_1(Op::load16): r(d).i32 = 0; memcpy(&r(d).i32, arg(imm), 2); break;
+ STRIDE_1(Op::load32): r(d).i32 = 0; memcpy(&r(d).i32, arg(imm), 4); break;
+
+ STRIDE_K(Op::load8 ): r(d).i32= skvx::cast<int>(U8 ::Load(arg(imm))); break;
+ STRIDE_K(Op::load16): r(d).i32= skvx::cast<int>(U16::Load(arg(imm))); break;
+ STRIDE_K(Op::load32): r(d).i32= I32::Load(arg(imm)) ; break;
+
+ STRIDE_1(Op::gather8):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = (i == 0) ? ((const uint8_t* )arg(imm))[ r(x).i32[i] ] : 0;
+ } break;
+ STRIDE_1(Op::gather16):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = (i == 0) ? ((const uint16_t*)arg(imm))[ r(x).i32[i] ] : 0;
+ } break;
+ STRIDE_1(Op::gather32):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = (i == 0) ? ((const int* )arg(imm))[ r(x).i32[i] ] : 0;
+ } break;
+
+ STRIDE_K(Op::gather8):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = ((const uint8_t* )arg(imm))[ r(x).i32[i] ];
+ } break;
+ STRIDE_K(Op::gather16):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = ((const uint16_t*)arg(imm))[ r(x).i32[i] ];
+ } break;
+ STRIDE_K(Op::gather32):
+ for (int i = 0; i < K; i++) {
+ r(d).i32[i] = ((const int* )arg(imm))[ r(x).i32[i] ];
+ } break;
+
+ #undef STRIDE_1
+ #undef STRIDE_K
+
+ // Ops that don't interact with memory should never care about the stride.
+ #define CASE(op) case 2*(int)op: /*fallthrough*/ case 2*(int)op+1
+
+ CASE(Op::uniform8):
+ r(d).i32 = *(const uint8_t* )( (const char*)arg(imm&0xffff) + (imm>>16) );
+ break;
+ CASE(Op::uniform16):
+ r(d).i32 = *(const uint16_t*)( (const char*)arg(imm&0xffff) + (imm>>16) );
+ break;
+ CASE(Op::uniform32):
+ r(d).i32 = *(const int* )( (const char*)arg(imm&0xffff) + (imm>>16) );
+ break;
+
+ CASE(Op::splat): r(d).i32 = imm; break;
+
+ CASE(Op::add_f32): r(d).f32 = r(x).f32 + r(y).f32; break;
+ CASE(Op::sub_f32): r(d).f32 = r(x).f32 - r(y).f32; break;
+ CASE(Op::mul_f32): r(d).f32 = r(x).f32 * r(y).f32; break;
+ CASE(Op::div_f32): r(d).f32 = r(x).f32 / r(y).f32; break;
+
+ CASE(Op::mad_f32): r(d).f32 = r(x).f32 * r(y).f32 + r(z).f32; break;
+
+ CASE(Op::add_i32): r(d).i32 = r(x).i32 + r(y).i32; break;
+ CASE(Op::sub_i32): r(d).i32 = r(x).i32 - r(y).i32; break;
+ CASE(Op::mul_i32): r(d).i32 = r(x).i32 * r(y).i32; break;
+
+ CASE(Op::add_i16x2): r(d).i16x2 = r(x).i16x2 + r(y).i16x2; break;
+ CASE(Op::sub_i16x2): r(d).i16x2 = r(x).i16x2 - r(y).i16x2; break;
+ CASE(Op::mul_i16x2): r(d).i16x2 = r(x).i16x2 * r(y).i16x2; break;
+
+ CASE(Op::shl_i32): r(d).i32 = r(x).i32 << imm; break;
+ CASE(Op::sra_i32): r(d).i32 = r(x).i32 >> imm; break;
+ CASE(Op::shr_i32): r(d).u32 = r(x).u32 >> imm; break;
+
+ CASE(Op::shl_i16x2): r(d).i16x2 = r(x).i16x2 << imm; break;
+ CASE(Op::sra_i16x2): r(d).i16x2 = r(x).i16x2 >> imm; break;
+ CASE(Op::shr_i16x2): r(d).u16x2 = r(x).u16x2 >> imm; break;
+
+ CASE(Op:: eq_f32): r(d).i32 = r(x).f32 == r(y).f32; break;
+ CASE(Op::neq_f32): r(d).i32 = r(x).f32 != r(y).f32; break;
+ CASE(Op:: lt_f32): r(d).i32 = r(x).f32 < r(y).f32; break;
+ CASE(Op::lte_f32): r(d).i32 = r(x).f32 <= r(y).f32; break;
+ CASE(Op:: gt_f32): r(d).i32 = r(x).f32 > r(y).f32; break;
+ CASE(Op::gte_f32): r(d).i32 = r(x).f32 >= r(y).f32; break;
+
+ CASE(Op:: eq_i32): r(d).i32 = r(x).i32 == r(y).i32; break;
+ CASE(Op::neq_i32): r(d).i32 = r(x).i32 != r(y).i32; break;
+ CASE(Op:: lt_i32): r(d).i32 = r(x).i32 < r(y).i32; break;
+ CASE(Op::lte_i32): r(d).i32 = r(x).i32 <= r(y).i32; break;
+ CASE(Op:: gt_i32): r(d).i32 = r(x).i32 > r(y).i32; break;
+ CASE(Op::gte_i32): r(d).i32 = r(x).i32 >= r(y).i32; break;
+
+ CASE(Op:: eq_i16x2): r(d).i16x2 = r(x).i16x2 == r(y).i16x2; break;
+ CASE(Op::neq_i16x2): r(d).i16x2 = r(x).i16x2 != r(y).i16x2; break;
+ CASE(Op:: lt_i16x2): r(d).i16x2 = r(x).i16x2 < r(y).i16x2; break;
+ CASE(Op::lte_i16x2): r(d).i16x2 = r(x).i16x2 <= r(y).i16x2; break;
+ CASE(Op:: gt_i16x2): r(d).i16x2 = r(x).i16x2 > r(y).i16x2; break;
+ CASE(Op::gte_i16x2): r(d).i16x2 = r(x).i16x2 >= r(y).i16x2; break;
+
+ CASE(Op::bit_and ): r(d).i32 = r(x).i32 & r(y).i32; break;
+ CASE(Op::bit_or ): r(d).i32 = r(x).i32 | r(y).i32; break;
+ CASE(Op::bit_xor ): r(d).i32 = r(x).i32 ^ r(y).i32; break;
+ CASE(Op::bit_clear): r(d).i32 = r(x).i32 & ~r(y).i32; break;
+
+ CASE(Op::select): r(d).i32 = skvx::if_then_else(r(x).i32, r(y).i32, r(z).i32);
+ break;
+
+
+ CASE(Op::extract): r(d).u32 = (r(x).u32 >> imm) & r(y).u32; break;
+ CASE(Op::pack): r(d).u32 = r(x).u32 | (r(y).u32 << imm); break;
+
+ CASE(Op::bytes): {
+ const U32 table[] = {
+ 0,
+ (r(x).u32 ) & 0xff,
+ (r(x).u32 >> 8) & 0xff,
+ (r(x).u32 >> 16) & 0xff,
+ (r(x).u32 >> 24) & 0xff,
+ };
+ r(d).u32 = table[(imm >> 0) & 0xf] << 0
+ | table[(imm >> 4) & 0xf] << 8
+ | table[(imm >> 8) & 0xf] << 16
+ | table[(imm >> 12) & 0xf] << 24;
+ } break;
+
+ CASE(Op::to_f32): r(d).f32 = skvx::cast<float>(r(x).i32); break;
+ CASE(Op::to_i32): r(d).i32 = skvx::cast<int> (r(x).f32); break;
+ #undef CASE
+ }
+ }
+ }
+ }
+
+ bool Program::hasJIT() const {
+ return fJITBuf != nullptr;
+ }
+
+ void Program::dropJIT() {
+ #if defined(SKVM_JIT)
+ if (fJITBuf) {
+ munmap(fJITBuf, fJITSize);
+ }
+ #else
+ SkASSERT(!this->hasJIT());
+ #endif
+
+ fJITBuf = nullptr;
+ fJITSize = 0;
+ }
+
+ Program::~Program() { this->dropJIT(); }
+
+ Program::Program(Program&& other) {
+ fInstructions = std::move(other.fInstructions);
+ fRegs = other.fRegs;
+ fLoop = other.fLoop;
+ fStrides = std::move(other.fStrides);
+ fOriginalProgram = std::move(other.fOriginalProgram);
+
+ std::swap(fJITBuf , other.fJITBuf);
+ std::swap(fJITSize , other.fJITSize);
+ }
+
+ Program& Program::operator=(Program&& other) {
+ fInstructions = std::move(other.fInstructions);
+ fRegs = other.fRegs;
+ fLoop = other.fLoop;
+ fStrides = std::move(other.fStrides);
+ fOriginalProgram = std::move(other.fOriginalProgram);
+
+ std::swap(fJITBuf , other.fJITBuf);
+ std::swap(fJITSize , other.fJITSize);
+ return *this;
+ }
+
+ Program::Program() {}
+
+ Program::Program(const std::vector<Builder::Instruction>& instructions,
+ const std::vector<int>& strides,
+ const char* debug_name)
+ : fStrides(strides)
+ , fOriginalProgram(instructions)
+ {
+ this->setupInterpreter(instructions);
+ #if defined(SKVM_JIT)
+ this->setupJIT(instructions, debug_name);
+ #endif
+ }
+
+ // Translate Builder::Instructions to Program::Instructions used by the interpreter.
+ void Program::setupInterpreter(const std::vector<Builder::Instruction>& instructions) {
+ // Register each instruction is assigned to.
+ std::vector<Reg> reg(instructions.size());
+
+ // This next bit is a bit more complicated than strictly necessary;
+ // we could just assign every live instruction to its own register.
+ //
+ // But recycling registers is fairly cheap, and good practice for the
+ // JITs where minimizing register pressure really is important.
+ //
+ // Since we have effectively infinite registers, we hoist any value we can.
+ // (The JIT may choose a more complex policy to reduce register pressure.)
+ auto hoisted = [&](Val id) { return instructions[id].can_hoist; };
+
+ fRegs = 0;
+ int live_instructions = 0;
+ std::vector<Reg> avail;
+
+ // Assign this value to a register, recycling them where we can.
+ auto assign_register = [&](Val id) {
+ live_instructions++;
+ const Builder::Instruction& inst = instructions[id];
+
+ // If this is a real input and it's lifetime ends at this instruction,
+ // we can recycle the register it's occupying.
+ auto maybe_recycle_register = [&](Val input) {
+ if (input != NA
+ && instructions[input].death == id
+ && !(hoisted(input) && instructions[input].used_in_loop)) {
+ avail.push_back(reg[input]);
+ }
+ };
+
+ // Take care to not recycle the same register twice.
+ if (true ) { maybe_recycle_register(inst.x); }
+ if (inst.y != inst.x ) { maybe_recycle_register(inst.y); }
+ if (inst.z != inst.x && inst.z != inst.y) { maybe_recycle_register(inst.z); }
+
+ // Allocate a register if we have to, preferring to reuse anything available.
+ if (avail.empty()) {
+ reg[id] = fRegs++;
+ } else {
+ reg[id] = avail.back();
+ avail.pop_back();
+ }
+ };
+
+ // Assign a register to each live hoisted instruction.
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (instructions[id].death != 0 && hoisted(id)) {
+ assign_register(id);
+ }
+ }
+
+ // Assign registers to each live loop instruction.
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (instructions[id].death != 0 && !hoisted(id)) {
+ assign_register(id);
+
+ }
+ }
+
+ // Translate Builder::Instructions to Program::Instructions by mapping values to
+ // registers. This will be two passes, first hoisted instructions, then inside the loop.
+
+ // The loop begins at the fLoop'th Instruction.
+ fLoop = 0;
+ fInstructions.reserve(live_instructions);
+
+ // Add a dummy mapping for the N/A sentinel Val to any arbitrary register
+ // so lookups don't have to know which arguments are used by which Ops.
+ auto lookup_register = [&](Val id) {
+ return id == NA ? (Reg)0
+ : reg[id];
+ };
+
+ auto push_instruction = [&](Val id, const Builder::Instruction& inst) {
+ Program::Instruction pinst{
+ inst.op,
+ lookup_register(id),
+ lookup_register(inst.x),
+ lookup_register(inst.y),
+ {lookup_register(inst.z)},
+ };
+ if (inst.z == NA) { pinst.imm = inst.imm; }
+ fInstructions.push_back(pinst);
+ };
+
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ const Builder::Instruction& inst = instructions[id];
+ if (inst.death != 0 && hoisted(id)) {
+ push_instruction(id, inst);
+ fLoop++;
+ }
+ }
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ const Builder::Instruction& inst = instructions[id];
+ if (inst.death != 0 && !hoisted(id)) {
+ push_instruction(id, inst);
+ }
+ }
+ }
+
+#if defined(SKVM_JIT)
+
+ // Just so happens that we can translate the immediate control for our bytes() op
+ // to a single 128-bit mask that can be consumed by both AVX2 vpshufb and NEON tbl!
+ static void bytes_control(int imm, int mask[4]) {
+ auto nibble_to_vpshufb = [](uint8_t n) -> uint8_t {
+ // 0 -> 0xff, Fill with zero
+ // 1 -> 0x00, Select byte 0
+ // 2 -> 0x01, " 1
+ // 3 -> 0x02, " 2
+ // 4 -> 0x03, " 3
+ return n - 1;
+ };
+ uint8_t control[] = {
+ nibble_to_vpshufb( (imm >> 0) & 0xf ),
+ nibble_to_vpshufb( (imm >> 4) & 0xf ),
+ nibble_to_vpshufb( (imm >> 8) & 0xf ),
+ nibble_to_vpshufb( (imm >> 12) & 0xf ),
+ };
+ for (int i = 0; i < 4; i++) {
+ mask[i] = (int)control[0] << 0
+ | (int)control[1] << 8
+ | (int)control[2] << 16
+ | (int)control[3] << 24;
+
+ // Update each byte that refers to a byte index by 4 to
+ // point into the next 32-bit lane, but leave any 0xff
+ // that fills with zero alone.
+ control[0] += control[0] == 0xff ? 0 : 4;
+ control[1] += control[1] == 0xff ? 0 : 4;
+ control[2] += control[2] == 0xff ? 0 : 4;
+ control[3] += control[3] == 0xff ? 0 : 4;
+ }
+ }
+
+ bool Program::jit(const std::vector<Builder::Instruction>& instructions,
+ const bool try_hoisting,
+ Assembler* a) const {
+ using A = Assembler;
+
+ auto debug_dump = [&] {
+ #if 0
+ SkDebugfStream stream;
+ this->dump(&stream);
+ dump_builder_program(fOriginalProgram, &stream);
+ return true;
+ #else
+ return false;
+ #endif
+ };
+
+ #if defined(__x86_64__)
+ if (!SkCpu::Supports(SkCpu::HSW)) {
+ return false;
+ }
+ A::GP64 N = A::rdi,
+ arg[] = { A::rsi, A::rdx, A::rcx, A::r8, A::r9 };
+
+ // All 16 ymm registers are available to use.
+ using Reg = A::Ymm;
+ uint32_t avail = 0xffff;
+
+ #elif defined(__aarch64__)
+ A::X N = A::x0,
+ arg[] = { A::x1, A::x2, A::x3, A::x4, A::x5, A::x6, A::x7 };
+
+ // We can use v0-v7 and v16-v31 freely; we'd need to preserve v8-v15.
+ using Reg = A::V;
+ uint32_t avail = 0xffff00ff;
+ #endif
+
+ if (SK_ARRAY_COUNT(arg) < fStrides.size()) {
+ return false;
+ }
+
+ auto hoisted = [&](Val id) { return try_hoisting && instructions[id].can_hoist; };
+
+ std::vector<Reg> r(instructions.size());
+
+ struct LabelAndReg {
+ A::Label label;
+ Reg reg;
+ };
+ SkTHashMap<int, LabelAndReg> splats,
+ bytes_masks;
+
+ auto warmup = [&](Val id) {
+ const Builder::Instruction& inst = instructions[id];
+ if (inst.death == 0) {
+ return true;
+ }
+
+ Op op = inst.op;
+ int imm = inst.imm;
+
+ switch (op) {
+ default: break;
+
+ case Op::splat: if (!splats.find(imm)) { splats.set(imm, {}); }
+ break;
+
+ case Op::bytes: if (!bytes_masks.find(imm)) {
+ bytes_masks.set(imm, {});
+ if (try_hoisting) {
+ // vpshufb can always work with the mask from memory,
+ // but it helps to hoist the mask to a register for tbl.
+ #if defined(__aarch64__)
+ LabelAndReg* entry = bytes_masks.find(imm);
+ if (int found = __builtin_ffs(avail)) {
+ entry->reg = (Reg)(found-1);
+ avail ^= 1 << entry->reg;
+ a->ldrq(entry->reg, &entry->label);
+ } else {
+ return false;
+ }
+ #endif
+ }
+ }
+ break;
+ }
+ return true;
+ };
+
+ auto emit = [&](Val id, bool scalar) {
+ const Builder::Instruction& inst = instructions[id];
+
+ // No need to emit dead code instructions that produce values that are never used.
+ if (inst.death == 0) {
+ return true;
+ }
+
+ Op op = inst.op;
+ Val x = inst.x,
+ y = inst.y,
+ z = inst.z;
+ int imm = inst.imm;
+
+ // Most (but not all) ops create an output value and need a register to hold it, dst.
+ // We track each instruction's dst in r[] so we can thread it through as an input
+ // to any future instructions needing that value.
+ //
+ // And some ops may need a temporary scratch register, tmp. Some need both tmp and dst.
+ //
+ // tmp and dst are very similar and can and will often be assigned the same register,
+ // but tmp may never alias any of the instructions's inputs, while dst may when this
+ // instruction consumes that input, i.e. if the input reaches its end of life here.
+ //
+ // We'll assign both registers lazily to keep register pressure as low as possible.
+ bool tmp_is_set = false,
+ dst_is_set = false;
+ Reg tmp_reg = (Reg)0; // This initial value won't matter... anything legal is fine.
+
+ bool ok = true; // Set to false if we need to assign a register and none's available.
+
+ // First lock in how to choose tmp if we need to based on the registers
+ // available before this instruction, not including any of its input registers.
+ auto tmp = [&,avail/*important, closing over avail's current value*/]{
+ if (!tmp_is_set) {
+ tmp_is_set = true;
+ if (int found = __builtin_ffs(avail)) {
+ // This is a scratch register just for this op,
+ // so we leave it marked available for future ops.
+ tmp_reg = (Reg)(found - 1);
+ } else {
+ // We needed a tmp register but couldn't find one available. :'(
+ // This will cause emit() to return false, in turn causing jit() to fail.
+ if (debug_dump()) {
+ SkDebugf("\nCould not find a register to hold tmp\n");
+ }
+ ok = false;
+ }
+ }
+ return tmp_reg;
+ };
+
+ // Now make available any registers that are consumed by this instruction.
+ // (The register pool we can pick dst from is >= the pool for tmp, adding any of these.)
+ auto maybe_recycle_register = [&](Val input) {
+ if (input != NA
+ && instructions[input].death == id
+ && !(hoisted(input) && instructions[input].used_in_loop)) {
+ avail |= 1 << r[input];
+ }
+ };
+ maybe_recycle_register(x);
+ maybe_recycle_register(y);
+ maybe_recycle_register(z);
+ // set_dst() and dst() will work read/write with this perhaps-just-updated avail.
+
+ // Some ops may decide dst on their own to best fit the instruction (see Op::mad_f32).
+ auto set_dst = [&](Reg reg){
+ SkASSERT(dst_is_set == false);
+ dst_is_set = true;
+
+ SkASSERT(avail & (1<<reg));
+ avail ^= 1<<reg;
+
+ r[id] = reg;
+ };
+
+ // Thanks to AVX and NEON's 3-argument instruction sets,
+ // most ops can use any register as dst.
+ auto dst = [&]{
+ if (!dst_is_set) {
+ if (int found = __builtin_ffs(avail)) {
+ set_dst((Reg)(found-1));
+ } else {
+ // Same deal as with tmp... all the registers are occupied. Time to fail!
+ if (debug_dump()) {
+ SkDebugf("\nCould not find a register to hold value %d\n", id);
+ }
+ ok = false;
+ }
+ }
+ return r[id];
+ };
+
+ // Because we use the same logic to pick an arbitrary dst and to pick tmp,
+ // and we know that tmp will never overlap any of the inputs, `dst() == tmp()`
+ // is a simple idiom to check that the destination does not overlap any of the inputs.
+ // Sometimes we can use this knowledge to do better instruction selection.
+
+ // Ok! Keep in mind that we haven't assigned tmp or dst yet,
+ // just laid out hooks for how to do so if we need them, depending on the instruction.
+ //
+ // Now let's actually assemble the instruction!
+ switch (op) {
+ default:
+ if (debug_dump()) {
+ SkDEBUGFAILF("\n%d not yet implemented\n", op);
+ }
+ return false; // TODO: many new ops
+
+ #if defined(__x86_64__)
+ case Op::store8: if (scalar) { a->vpextrb (arg[imm], (A::Xmm)r[x], 0); }
+ else { a->vpackusdw(tmp(), r[x], r[x]);
+ a->vpermq (tmp(), tmp(), 0xd8);
+ a->vpackuswb(tmp(), tmp(), tmp());
+ a->vmovq (arg[imm], (A::Xmm)tmp()); }
+ break;
+
+ case Op::store16: if (scalar) { a->vpextrw (arg[imm], (A::Xmm)r[x], 0); }
+ else { a->vpackusdw(tmp(), r[x], r[x]);
+ a->vpermq (tmp(), tmp(), 0xd8);
+ a->vmovups (arg[imm], (A::Xmm)tmp()); }
+ break;
+
+ case Op::store32: if (scalar) { a->vmovd (arg[imm], (A::Xmm)r[x]); }
+ else { a->vmovups(arg[imm], r[x]); }
+ break;
+
+ case Op::load8: if (scalar) {
+ a->vpxor (dst(), dst(), dst());
+ a->vpinsrb((A::Xmm)dst(), (A::Xmm)dst(), arg[imm], 0);
+ } else {
+ a->vpmovzxbd(dst(), arg[imm]);
+ } break;
+
+ case Op::load16: if (scalar) {
+ a->vpxor (dst(), dst(), dst());
+ a->vpinsrw((A::Xmm)dst(), (A::Xmm)dst(), arg[imm], 0);
+ } else {
+ a->vpmovzxwd(dst(), arg[imm]);
+ } break;
+
+ case Op::load32: if (scalar) { a->vmovd ((A::Xmm)dst(), arg[imm]); }
+ else { a->vmovups( dst(), arg[imm]); }
+ break;
+
+ case Op::uniform8: a->movzbl(A::rax, arg[imm&0xffff], imm>>16);
+ a->vmovd_direct((A::Xmm)dst(), A::rax);
+ a->vbroadcastss(dst(), (A::Xmm)dst());
+ break;
+
+ case Op::uniform32: a->vbroadcastss(dst(), arg[imm&0xffff], imm>>16);
+ break;
+
+ case Op::splat: a->vbroadcastss(dst(), &splats.find(imm)->label);
+ break;
+ // TODO: many of these instructions have variants that
+ // can read one of their arugments from 32-byte memory
+ // instead of a register. Find a way to avoid needing
+ // to splat most* constants out at all?
+ // (*Might work for x - 255 but not 255 - x, so will
+ // always need to be able to splat to a register.)
+
+ case Op::add_f32: a->vaddps(dst(), r[x], r[y]); break;
+ case Op::sub_f32: a->vsubps(dst(), r[x], r[y]); break;
+ case Op::mul_f32: a->vmulps(dst(), r[x], r[y]); break;
+ case Op::div_f32: a->vdivps(dst(), r[x], r[y]); break;
+
+ case Op::mad_f32:
+ if (avail & (1<<r[x])) { set_dst(r[x]); a->vfmadd132ps(r[x], r[z], r[y]); }
+ else if (avail & (1<<r[y])) { set_dst(r[y]); a->vfmadd213ps(r[y], r[x], r[z]); }
+ else if (avail & (1<<r[z])) { set_dst(r[z]); a->vfmadd231ps(r[z], r[x], r[y]); }
+ else { SkASSERT(dst() == tmp());
+ a->vmovdqa (dst(),r[x]);
+ a->vfmadd132ps(dst(),r[z], r[y]); }
+ break;
+
+ case Op::add_i32: a->vpaddd (dst(), r[x], r[y]); break;
+ case Op::sub_i32: a->vpsubd (dst(), r[x], r[y]); break;
+ case Op::mul_i32: a->vpmulld(dst(), r[x], r[y]); break;
+
+ case Op::sub_i16x2: a->vpsubw (dst(), r[x], r[y]); break;
+ case Op::mul_i16x2: a->vpmullw(dst(), r[x], r[y]); break;
+ case Op::shr_i16x2: a->vpsrlw (dst(), r[x], imm); break;
+
+ case Op::bit_and : a->vpand (dst(), r[x], r[y]); break;
+ case Op::bit_or : a->vpor (dst(), r[x], r[y]); break;
+ case Op::bit_xor : a->vpxor (dst(), r[x], r[y]); break;
+ case Op::bit_clear: a->vpandn(dst(), r[y], r[x]); break; // N.B. Y then X.
+ case Op::select : a->vpblendvb(dst(), r[z], r[y], r[x]); break;
+
+ case Op::shl_i32: a->vpslld(dst(), r[x], imm); break;
+ case Op::shr_i32: a->vpsrld(dst(), r[x], imm); break;
+ case Op::sra_i32: a->vpsrad(dst(), r[x], imm); break;
+
+ case Op::eq_i32: a->vpcmpeqd(dst(), r[x], r[y]); break;
+ case Op::lt_i32: a->vpcmpgtd(dst(), r[y], r[x]); break;
+ case Op::gt_i32: a->vpcmpgtd(dst(), r[x], r[y]); break;
+
+ case Op::extract: if (imm == 0) { a->vpand (dst(), r[x], r[y]); }
+ else { a->vpsrld(tmp(), r[x], imm);
+ a->vpand (dst(), tmp(), r[y]); }
+ break;
+
+ case Op::pack: a->vpslld(tmp(), r[y], imm);
+ a->vpor (dst(), tmp(), r[x]);
+ break;
+
+ case Op::to_f32: a->vcvtdq2ps (dst(), r[x]); break;
+ case Op::to_i32: a->vcvttps2dq(dst(), r[x]); break;
+
+ case Op::bytes: a->vpshufb(dst(), r[x], &bytes_masks.find(imm)->label);
+ break;
+
+ #elif defined(__aarch64__)
+ case Op::store8: a->xtns2h(tmp(), r[x]);
+ a->xtnh2b(tmp(), tmp());
+ if (scalar) { a->strb (tmp(), arg[imm]); }
+ else { a->strs (tmp(), arg[imm]); }
+ break;
+ // TODO: another case where it'd be okay to alias r[x] and tmp if r[x] dies here.
+
+ case Op::store32: if (scalar) { a->strs(r[x], arg[imm]); }
+ else { a->strq(r[x], arg[imm]); }
+ break;
+
+ case Op::load8: if (scalar) { a->ldrb(tmp(), arg[imm]); }
+ else { a->ldrs(tmp(), arg[imm]); }
+ a->uxtlb2h(tmp(), tmp());
+ a->uxtlh2s(dst(), tmp());
+ break;
+
+ case Op::load32: if (scalar) { a->ldrs(dst(), arg[imm]); }
+ else { a->ldrq(dst(), arg[imm]); }
+ break;
+
+ case Op::splat: a->ldrq(dst(), &splats.find(imm)->label);
+ break;
+ // TODO: If we hoist these, pack 4 values in each register
+ // and use vector/lane operations, cutting the register
+ // pressure cost of hoisting by 4?
+
+ case Op::add_f32: a->fadd4s(dst(), r[x], r[y]); break;
+ case Op::sub_f32: a->fsub4s(dst(), r[x], r[y]); break;
+ case Op::mul_f32: a->fmul4s(dst(), r[x], r[y]); break;
+ case Op::div_f32: a->fdiv4s(dst(), r[x], r[y]); break;
+
+ case Op::mad_f32: // fmla4s is z += x*y
+ if (avail & (1<<r[z])) { set_dst(r[z]); a->fmla4s( r[z], r[x], r[y]); }
+ else { a->orr16b(tmp(), r[z], r[z]);
+ a->fmla4s(tmp(), r[x], r[y]);
+ if(dst() != tmp()) { a->orr16b(dst(), tmp(), tmp()); } }
+ break;
+
+
+ case Op::add_i32: a->add4s(dst(), r[x], r[y]); break;
+ case Op::sub_i32: a->sub4s(dst(), r[x], r[y]); break;
+ case Op::mul_i32: a->mul4s(dst(), r[x], r[y]); break;
+
+ case Op::sub_i16x2: a->sub8h (dst(), r[x], r[y]); break;
+ case Op::mul_i16x2: a->mul8h (dst(), r[x], r[y]); break;
+ case Op::shr_i16x2: a->ushr8h(dst(), r[x], imm); break;
+
+ case Op::bit_and : a->and16b(dst(), r[x], r[y]); break;
+ case Op::bit_or : a->orr16b(dst(), r[x], r[y]); break;
+ case Op::bit_xor : a->eor16b(dst(), r[x], r[y]); break;
+ case Op::bit_clear: a->bic16b(dst(), r[x], r[y]); break;
+
+ case Op::select: // bsl16b is x = x ? y : z
+ if (avail & (1<<r[x])) { set_dst(r[x]); a->bsl16b( r[x], r[y], r[z]); }
+ else { a->orr16b(tmp(), r[x], r[x]);
+ a->bsl16b(tmp(), r[y], r[z]);
+ if(dst() != tmp()) { a->orr16b(dst(), tmp(), tmp()); } }
+ break;
+
+ case Op::shl_i32: a-> shl4s(dst(), r[x], imm); break;
+ case Op::shr_i32: a->ushr4s(dst(), r[x], imm); break;
+ case Op::sra_i32: a->sshr4s(dst(), r[x], imm); break;
+
+ case Op::eq_i32: a->cmeq4s(dst(), r[x], r[y]); break;
+ case Op::lt_i32: a->cmgt4s(dst(), r[y], r[x]); break;
+ case Op::gt_i32: a->cmgt4s(dst(), r[x], r[y]); break;
+
+ case Op::extract: if (imm) { a->ushr4s(tmp(), r[x], imm);
+ a->and16b(dst(), tmp(), r[y]); }
+ else { a->and16b(dst(), r[x], r[y]); }
+ break;
+
+ case Op::pack:
+ if (avail & (1<<r[x])) { set_dst(r[x]); a->sli4s ( r[x], r[y], imm); }
+ else { a->shl4s (tmp(), r[y], imm);
+ a->orr16b(dst(), tmp(), r[x]); }
+ break;
+
+ case Op::to_f32: a->scvtf4s (dst(), r[x]); break;
+ case Op::to_i32: a->fcvtzs4s(dst(), r[x]); break;
+
+ case Op::bytes:
+ if (try_hoisting) { a->tbl (dst(), r[x], bytes_masks.find(imm)->reg); }
+ else { a->ldrq(tmp(), &bytes_masks.find(imm)->label);
+ a->tbl (dst(), r[x], tmp()); }
+ break;
+ #endif
+ }
+
+ // Calls to tmp() or dst() might have flipped this false from its default true state.
+ return ok;
+ };
+
+
+ #if defined(__x86_64__)
+ const int K = 8;
+ auto jump_if_less = [&](A::Label* l) { a->jl (l); };
+ auto jump = [&](A::Label* l) { a->jmp(l); };
+
+ auto add = [&](A::GP64 gp, int imm) { a->add(gp, imm); };
+ auto sub = [&](A::GP64 gp, int imm) { a->sub(gp, imm); };
+
+ auto exit = [&]{ a->vzeroupper(); a->ret(); };
+ #elif defined(__aarch64__)
+ const int K = 4;
+ auto jump_if_less = [&](A::Label* l) { a->blt(l); };
+ auto jump = [&](A::Label* l) { a->b (l); };
+
+ auto add = [&](A::X gp, int imm) { a->add(gp, gp, imm); };
+ auto sub = [&](A::X gp, int imm) { a->sub(gp, gp, imm); };
+
+ auto exit = [&]{ a->ret(A::x30); };
+ #endif
+
+ A::Label body,
+ tail,
+ done;
+
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (!warmup(id)) {
+ return false;
+ }
+ if (hoisted(id) && !emit(id, /*scalar=*/false)) {
+ return false;
+ }
+ }
+
+ a->label(&body);
+ {
+ a->cmp(N, K);
+ jump_if_less(&tail);
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (!hoisted(id) && !emit(id, /*scalar=*/false)) {
+ return false;
+ }
+ }
+ for (int i = 0; i < (int)fStrides.size(); i++) {
+ if (fStrides[i]) {
+ add(arg[i], K*fStrides[i]);
+ }
+ }
+ sub(N, K);
+ jump(&body);
+ }
+
+ a->label(&tail);
+ {
+ a->cmp(N, 1);
+ jump_if_less(&done);
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (!hoisted(id) && !emit(id, /*scalar=*/true)) {
+ return false;
+ }
+ }
+ for (int i = 0; i < (int)fStrides.size(); i++) {
+ if (fStrides[i]) {
+ add(arg[i], 1*fStrides[i]);
+ }
+ }
+ sub(N, 1);
+ jump(&tail);
+ }
+
+ a->label(&done);
+ {
+ exit();
+ }
+
+ bytes_masks.foreach([&](int imm, LabelAndReg* entry) {
+ // One 16-byte pattern for ARM tbl, that same pattern twice for x86-64 vpshufb.
+ #if defined(__x86_64__)
+ a->align(32);
+ #elif defined(__aarch64__)
+ a->align(4);
+ #endif
+
+ a->label(&entry->label);
+ int mask[4];
+ bytes_control(imm, mask);
+ a->bytes(mask, sizeof(mask));
+ #if defined(__x86_64__)
+ a->bytes(mask, sizeof(mask));
+ #endif
+ });
+
+ splats.foreach([&](int imm, LabelAndReg* entry) {
+ // vbroadcastss 4 bytes on x86-64, or simply load 16-bytes on aarch64.
+ a->align(4);
+ a->label(&entry->label);
+ a->word(imm);
+ #if defined(__aarch64__)
+ a->word(imm);
+ a->word(imm);
+ a->word(imm);
+ #endif
+ });
+
+ return true;
+ }
+
+ void Program::setupJIT(const std::vector<Builder::Instruction>& instructions,
+ const char* debug_name) {
+ // Assemble with no buffer to determine a.size(), the number of bytes we'll assemble.
+ Assembler a{nullptr};
+
+ // First try allowing code hoisting (faster code)
+ // then again without if that fails (lower register pressure).
+ bool try_hoisting = true;
+ if (!this->jit(instructions, try_hoisting, &a)) {
+ try_hoisting = false;
+ if (!this->jit(instructions, try_hoisting, &a)) {
+ return;
+ }
+ }
+
+ // Allocate space that we can remap as executable.
+ const size_t page = sysconf(_SC_PAGESIZE);
+ fJITSize = ((a.size() + page - 1) / page) * page; // mprotect works at page granularity.
+ fJITBuf = mmap(nullptr,fJITSize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1,0);
+
+ // Assemble the program for real.
+ a = Assembler{fJITBuf};
+ SkAssertResult(this->jit(instructions, try_hoisting, &a));
+ SkASSERT(a.size() <= fJITSize);
+
+ // Remap as executable, and flush caches on platforms that need that.
+ mprotect(fJITBuf, fJITSize, PROT_READ|PROT_EXEC);
+ __builtin___clear_cache((char*)fJITBuf,
+ (char*)fJITBuf + fJITSize);
+ #if defined(SKVM_PERF_DUMPS)
+ this->dumpJIT(debug_name, a.size());
+ #endif
+ }
+#endif
+
+#if defined(SKVM_PERF_DUMPS)
+ void Program::dumpJIT(const char* debug_name, size_t size) const {
+ #if 0 && defined(__aarch64__)
+ if (debug_name) {
+ SkDebugf("\n%s:", debug_name);
+ }
+ // cat | llvm-mc -arch aarch64 -disassemble
+ auto cur = (const uint8_t*)fJITBuf;
+ for (int i = 0; i < (int)size; i++) {
+ if (i % 4 == 0) {
+ SkDebugf("\n");
+ }
+ SkDebugf("0x%02x ", *cur++);
+ }
+ SkDebugf("\n");
+ #endif
+
+ // We're doing some really stateful things below so one thread at a time please...
+ static SkSpinlock dump_lock;
+ SkAutoSpinlock lock(dump_lock);
+
+ auto fnv1a = [](const void* vbuf, size_t n) {
+ uint32_t hash = 2166136261;
+ for (auto buf = (const uint8_t*)vbuf; n --> 0; buf++) {
+ hash ^= *buf;
+ hash *= 16777619;
+ }
+ return hash;
+ };
+
+
+ char name[64];
+ uint32_t hash = fnv1a(fJITBuf, size);
+ if (debug_name) {
+ sprintf(name, "skvm-jit-%s", debug_name);
+ } else {
+ sprintf(name, "skvm-jit-%u", hash);
+ }
+
+ // Create a jit-<pid>.dump file that we can `perf inject -j` into a
+ // perf.data captured with `perf record -k 1`, letting us see each
+ // JIT'd Program as if a function named skvm-jit-<hash>. E.g.
+ //
+ // ninja -C out nanobench
+ // perf record -k 1 out/nanobench -m SkVM_4096_I32\$
+ // perf inject -j -i perf.data -o perf.data.jit
+ // perf report -i perf.data.jit
+ //
+ // Running `perf inject -j` will also dump an .so for each JIT'd
+ // program, named jitted-<pid>-<hash>.so.
+ //
+ // https://lwn.net/Articles/638566/
+ // https://v8.dev/docs/linux-perf
+ // https://cs.chromium.org/chromium/src/v8/src/diagnostics/perf-jit.cc
+ // https://lore.kernel.org/patchwork/patch/622240/
+
+
+ auto timestamp_ns = []() -> uint64_t {
+ // It's important to use CLOCK_MONOTONIC here so that perf can
+ // correlate our timestamps with those captured by `perf record
+ // -k 1`. That's also what `-k 1` does, by the way, tell perf
+ // record to use CLOCK_MONOTONIC.
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * (uint64_t)1e9 + ts.tv_nsec;
+ };
+
+ // We'll open the jit-<pid>.dump file and write a small header once,
+ // and just leave it open forever because we're lazy.
+ static FILE* jitdump = [&]{
+ // Must map as w+ for the mmap() call below to work.
+ char path[64];
+ sprintf(path, "jit-%d.dump", getpid());
+ FILE* f = fopen(path, "w+");
+
+ // Calling mmap() on the file adds a "hey they mmap()'d this" record to
+ // the perf.data file that will point `perf inject -j` at this log file.
+ // Kind of a strange way to tell `perf inject` where the file is...
+ void* marker = mmap(nullptr, sysconf(_SC_PAGESIZE),
+ PROT_READ|PROT_EXEC, MAP_PRIVATE,
+ fileno(f), /*offset=*/0);
+ SkASSERT_RELEASE(marker != MAP_FAILED);
+ // Like never calling fclose(f), we'll also just always leave marker mmap()'d.
+
+ #if defined(__x86_64__)
+ const uint32_t elf_mach = 62;
+ #elif defined(__aarch64__)
+ const uint32_t elf_mach = 183;
+ #endif
+
+ struct Header {
+ uint32_t magic, version, header_size, elf_mach, reserved, pid;
+ uint64_t timestamp_us, flags;
+ } header = {
+ 0x4A695444, 1, sizeof(Header), elf_mach, 0, (uint32_t)getpid(),
+ timestamp_ns() / 1000, 0,
+ };
+ fwrite(&header, sizeof(header), 1, f);
+
+ return f;
+ }();
+
+ struct CodeLoad {
+ uint32_t event_type, event_size;
+ uint64_t timestamp_ns;
+
+ uint32_t pid, tid;
+ uint64_t vma/*???*/, code_addr, code_size, id;
+ } load = {
+ 0/*code load*/, (uint32_t)(sizeof(CodeLoad) + strlen(name) + 1 + size),
+ timestamp_ns(),
+
+ (uint32_t)getpid(), (uint32_t)SkGetThreadID(),
+ (uint64_t)fJITBuf, (uint64_t)fJITBuf, size, hash,
+ };
+
+ // Write the header, the JIT'd function name, and the JIT'd code itself.
+ fwrite(&load, sizeof(load), 1, jitdump);
+ fwrite(name, 1, strlen(name), jitdump);
+ fwrite("\0", 1, 1, jitdump);
+ fwrite(fJITBuf, 1, size, jitdump);
+ }
+#endif
+
+
+} // namespace skvm
diff --git a/gfx/skia/skia/src/core/SkVM.h b/gfx/skia/skia/src/core/SkVM.h
new file mode 100644
index 0000000000..06b189ac14
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVM.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVM_DEFINED
+#define SkVM_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTHash.h"
+#include <functional> // std::hash
+#include <vector> // std::vector
+
+class SkWStream;
+
+namespace skvm {
+
+ class Assembler {
+ public:
+ explicit Assembler(void* buf);
+
+ size_t size() const;
+
+ // Order matters... GP64, Xmm, Ymm values match 4-bit register encoding for each.
+ enum GP64 {
+ rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
+ r8 , r9 , r10, r11, r12, r13, r14, r15,
+ };
+ enum Xmm {
+ xmm0, xmm1, xmm2 , xmm3 , xmm4 , xmm5 , xmm6 , xmm7 ,
+ xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
+ };
+ enum Ymm {
+ ymm0, ymm1, ymm2 , ymm3 , ymm4 , ymm5 , ymm6 , ymm7 ,
+ ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
+ };
+
+ // X and V values match 5-bit encoding for each (nothing tricky).
+ enum X {
+ x0 , x1 , x2 , x3 , x4 , x5 , x6 , x7 ,
+ x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23,
+ x24, x25, x26, x27, x28, x29, x30, xzr,
+ };
+ enum V {
+ v0 , v1 , v2 , v3 , v4 , v5 , v6 , v7 ,
+ v8 , v9 , v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31,
+ };
+
+ void bytes(const void*, int);
+ void byte(uint8_t);
+ void word(uint32_t);
+
+ // x86-64
+
+ void align(int mod);
+
+ void vzeroupper();
+ void ret();
+
+ void add(GP64, int imm);
+ void sub(GP64, int imm);
+
+ // All dst = x op y.
+ using DstEqXOpY = void(Ymm dst, Ymm x, Ymm y);
+ DstEqXOpY vpand, vpor, vpxor, vpandn,
+ vpaddd, vpsubd, vpmulld,
+ vpsubw, vpmullw,
+ vaddps, vsubps, vmulps, vdivps,
+ vfmadd132ps, vfmadd213ps, vfmadd231ps,
+ vpackusdw, vpackuswb,
+ vpcmpeqd, vpcmpgtd;
+
+ using DstEqXOpImm = void(Ymm dst, Ymm x, int imm);
+ DstEqXOpImm vpslld, vpsrld, vpsrad,
+ vpsrlw,
+ vpermq;
+
+ using DstEqOpX = void(Ymm dst, Ymm x);
+ DstEqOpX vmovdqa, vcvtdq2ps, vcvttps2dq;
+
+ void vpblendvb(Ymm dst, Ymm x, Ymm y, Ymm z);
+
+ struct Label {
+ int offset = 0;
+ enum { None, ARMDisp19, X86Disp32 } kind = None;
+ std::vector<int> references;
+ };
+
+ Label here();
+ void label(Label*);
+
+ void jmp(Label*);
+ void je (Label*);
+ void jne(Label*);
+ void jl (Label*);
+ void cmp(GP64, int imm);
+
+ void vbroadcastss(Ymm dst, Label*);
+ void vbroadcastss(Ymm dst, Xmm src);
+ void vbroadcastss(Ymm dst, GP64 ptr, int off); // dst = *(ptr+off)
+
+ void vpshufb(Ymm dst, Ymm x, Label*);
+
+ void vmovups (Ymm dst, GP64 ptr); // dst = *ptr, 256-bit
+ void vpmovzxwd(Ymm dst, GP64 ptr); // dst = *ptr, 128-bit, each uint16_t expanded to int
+ void vpmovzxbd(Ymm dst, GP64 ptr); // dst = *ptr, 64-bit, each uint8_t expanded to int
+ void vmovd (Xmm dst, GP64 ptr); // dst = *ptr, 32-bit
+
+ void vmovups(GP64 ptr, Ymm src); // *ptr = src, 256-bit
+ void vmovups(GP64 ptr, Xmm src); // *ptr = src, 128-bit
+ void vmovq (GP64 ptr, Xmm src); // *ptr = src, 64-bit
+ void vmovd (GP64 ptr, Xmm src); // *ptr = src, 32-bit
+
+ void movzbl(GP64 dst, GP64 ptr, int off); // dst = *(ptr+off), uint8_t -> int
+ void movb (GP64 ptr, GP64 src); // *ptr = src, 8-bit
+
+ void vmovd_direct(GP64 dst, Xmm src); // dst = src, 32-bit
+ void vmovd_direct(Xmm dst, GP64 src); // dst = src, 32-bit
+
+ void vpinsrw(Xmm dst, Xmm src, GP64 ptr, int imm); // dst = src; dst[imm] = *ptr, 16-bit
+ void vpinsrb(Xmm dst, Xmm src, GP64 ptr, int imm); // dst = src; dst[imm] = *ptr, 8-bit
+
+ void vpextrw(GP64 ptr, Xmm src, int imm); // *dst = src[imm] , 16-bit
+ void vpextrb(GP64 ptr, Xmm src, int imm); // *dst = src[imm] , 8-bit
+
+ // aarch64
+
+ // d = op(n,m)
+ using DOpNM = void(V d, V n, V m);
+ DOpNM and16b, orr16b, eor16b, bic16b, bsl16b,
+ add4s, sub4s, mul4s,
+ cmeq4s, cmgt4s,
+ sub8h, mul8h,
+ fadd4s, fsub4s, fmul4s, fdiv4s,
+ tbl;
+
+ // d += n*m
+ void fmla4s(V d, V n, V m);
+
+ // d = op(n,imm)
+ using DOpNImm = void(V d, V n, int imm);
+ DOpNImm sli4s,
+ shl4s, sshr4s, ushr4s,
+ ushr8h;
+
+ // d = op(n)
+ using DOpN = void(V d, V n);
+ DOpN scvtf4s, // int -> float
+ fcvtzs4s, // truncate float -> int
+ xtns2h, // u32 -> u16
+ xtnh2b, // u16 -> u8
+ uxtlb2h, // u8 -> u16
+ uxtlh2s; // u16 -> u32
+
+ // TODO: both these platforms support rounding float->int (vcvtps2dq, fcvtns.4s)... use?
+
+ void ret (X);
+ void add (X d, X n, int imm12);
+ void sub (X d, X n, int imm12);
+ void subs(X d, X n, int imm12); // subtract setting condition flags
+
+ // There's another encoding for unconditional branches that can jump further,
+ // but this one encoded as b.al is simple to implement and should be fine.
+ void b (Label* l) { this->b(Condition::al, l); }
+ void bne(Label* l) { this->b(Condition::ne, l); }
+ void blt(Label* l) { this->b(Condition::lt, l); }
+
+ // "cmp ..." is just an assembler mnemonic for "subs xzr, ..."!
+ void cmp(X n, int imm12) { this->subs(xzr, n, imm12); }
+
+ // Compare and branch if zero/non-zero, as if
+ // cmp(t,0)
+ // beq/bne(l)
+ // but without setting condition flags.
+ void cbz (X t, Label* l);
+ void cbnz(X t, Label* l);
+
+ void ldrq(V dst, Label*); // 128-bit PC-relative load
+
+ void ldrq(V dst, X src); // 128-bit dst = *src
+ void ldrs(V dst, X src); // 32-bit dst = *src
+ void ldrb(V dst, X src); // 8-bit dst = *src
+
+ void strq(V src, X dst); // 128-bit *dst = src
+ void strs(V src, X dst); // 32-bit *dst = src
+ void strb(V src, X dst); // 8-bit *dst = src
+
+ private:
+ // dst = op(dst, imm)
+ void op(int opcode, int opcode_ext, GP64 dst, int imm);
+
+
+ // dst = op(x,y) or op(x)
+ void op(int prefix, int map, int opcode, Ymm dst, Ymm x, Ymm y, bool W=false);
+ void op(int prefix, int map, int opcode, Ymm dst, Ymm x, bool W=false) {
+ // Two arguments ops seem to pass them in dst and y, forcing x to 0 so VEX.vvvv == 1111.
+ this->op(prefix, map, opcode, dst,(Ymm)0,x, W);
+ }
+
+ // dst = op(x,imm)
+ void op(int prefix, int map, int opcode, int opcode_ext, Ymm dst, Ymm x, int imm);
+
+ // dst = op(x,label) or op(label)
+ void op(int prefix, int map, int opcode, Ymm dst, Ymm x, Label* l);
+
+ // *ptr = ymm or ymm = *ptr, depending on opcode.
+ void load_store(int prefix, int map, int opcode, Ymm ymm, GP64 ptr);
+
+ // Opcode for 3-arguments ops is split between hi and lo:
+ // [11 bits hi] [5 bits m] [6 bits lo] [5 bits n] [5 bits d]
+ void op(uint32_t hi, V m, uint32_t lo, V n, V d);
+
+ // 2-argument ops, with or without an immediate.
+ void op(uint32_t op22, int imm, V n, V d);
+ void op(uint32_t op22, V n, V d) { this->op(op22,0,n,d); }
+ void op(uint32_t op22, X x, V v) { this->op(op22,0,(V)x,v); }
+
+ // Order matters... value is 4-bit encoding for condition code.
+ enum class Condition { eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,al };
+ void b(Condition, Label*);
+
+ void jump(uint8_t condition, Label*);
+
+ int disp19(Label*);
+ int disp32(Label*);
+
+ uint8_t* fCode;
+ uint8_t* fCurr;
+ size_t fSize;
+ };
+
+ enum class Op : uint8_t {
+ store8, store16, store32,
+ // ↑ side effects / no side effects ↓
+
+ load8, load16, load32,
+ gather8, gather16, gather32,
+ // ↑ always varying / uniforms, constants, Just Math ↓
+
+ uniform8, uniform16, uniform32,
+ splat,
+
+ add_f32, add_i32, add_i16x2,
+ sub_f32, sub_i32, sub_i16x2,
+ mul_f32, mul_i32, mul_i16x2,
+ div_f32,
+ mad_f32,
+ shl_i32, shl_i16x2,
+ shr_i32, shr_i16x2,
+ sra_i32, sra_i16x2,
+
+ to_i32, to_f32,
+
+ eq_f32, eq_i32, eq_i16x2,
+ neq_f32, neq_i32, neq_i16x2,
+ lt_f32, lt_i32, lt_i16x2,
+ lte_f32, lte_i32, lte_i16x2,
+ gt_f32, gt_i32, gt_i16x2,
+ gte_f32, gte_i32, gte_i16x2,
+
+ bit_and,
+ bit_or,
+ bit_xor,
+ bit_clear,
+ select,
+
+ bytes, extract, pack,
+ };
+
+ using Val = int;
+ // We reserve the last Val ID as a sentinel meaning none, n/a, null, nil, etc.
+ static const Val NA = ~0;
+
+ struct Arg { int ix; };
+ struct I32 { Val id; };
+ struct F32 { Val id; };
+
+ class Program;
+
+ class Builder {
+ public:
+ struct Instruction {
+ Op op; // v* = op(x,y,z,imm), where * == index of this Instruction.
+ Val x,y,z; // Enough arguments for mad().
+ int imm; // Immediate bit pattern, shift count, argument index, etc.
+
+ // Not populated until done() has been called.
+ int death; // Index of last live instruction taking this input; live if != 0.
+ bool can_hoist; // Value independent of all loop variables?
+ bool used_in_loop; // Is the value used in the loop (or only by hoisted values)?
+ };
+
+ Program done(const char* debug_name = nullptr);
+
+ // Mostly for debugging, tests, etc.
+ std::vector<Instruction> program() const { return fProgram; }
+
+
+ // Declare an argument with given stride (use stride=0 for uniforms).
+ // TODO: different types for varying and uniforms?
+ Arg arg(int stride);
+
+ // Convenience arg() wrappers for most common strides, sizeof(T) and 0.
+ template <typename T>
+ Arg varying() { return this->arg(sizeof(T)); }
+ Arg uniform() { return this->arg(0); }
+
+ // TODO: allow uniform (i.e. Arg) offsets to store* and load*?
+ // TODO: sign extension (signed types) for <32-bit loads?
+ // TODO: unsigned integer operations where relevant (just comparisons?)?
+
+ // Store {8,16,32}-bit varying.
+ void store8 (Arg ptr, I32 val);
+ void store16(Arg ptr, I32 val);
+ void store32(Arg ptr, I32 val);
+
+ // Load u8,u16,i32 varying.
+ I32 load8 (Arg ptr);
+ I32 load16(Arg ptr);
+ I32 load32(Arg ptr);
+
+ // Gather u8,u16,i32 with varying element-count offset.
+ I32 gather8 (Arg ptr, I32 offset);
+ I32 gather16(Arg ptr, I32 offset);
+ I32 gather32(Arg ptr, I32 offset);
+
+ // Load u8,u16,i32 uniform with optional byte-count offset.
+ I32 uniform8 (Arg ptr, int offset=0);
+ I32 uniform16(Arg ptr, int offset=0);
+ I32 uniform32(Arg ptr, int offset=0);
+
+ // Load an immediate constant.
+ I32 splat(int n);
+ I32 splat(unsigned u) { return this->splat((int)u); }
+ F32 splat(float f);
+
+ // float math, comparisons, etc.
+ F32 add(F32 x, F32 y);
+ F32 sub(F32 x, F32 y);
+ F32 mul(F32 x, F32 y);
+ F32 div(F32 x, F32 y);
+ F32 mad(F32 x, F32 y, F32 z); // x*y+z, often an FMA
+
+ I32 eq (F32 x, F32 y);
+ I32 neq(F32 x, F32 y);
+ I32 lt (F32 x, F32 y);
+ I32 lte(F32 x, F32 y);
+ I32 gt (F32 x, F32 y);
+ I32 gte(F32 x, F32 y);
+
+ I32 to_i32(F32 x);
+ I32 bit_cast(F32 x) { return {x.id}; }
+
+ // int math, comparisons, etc.
+ I32 add(I32 x, I32 y);
+ I32 sub(I32 x, I32 y);
+ I32 mul(I32 x, I32 y);
+
+ I32 shl(I32 x, int bits);
+ I32 shr(I32 x, int bits);
+ I32 sra(I32 x, int bits);
+
+ I32 eq (I32 x, I32 y);
+ I32 neq(I32 x, I32 y);
+ I32 lt (I32 x, I32 y);
+ I32 lte(I32 x, I32 y);
+ I32 gt (I32 x, I32 y);
+ I32 gte(I32 x, I32 y);
+
+ F32 to_f32(I32 x);
+ F32 bit_cast(I32 x) { return {x.id}; }
+
+ // Treat each 32-bit lane as a pair of 16-bit ints.
+ I32 add_16x2(I32 x, I32 y);
+ I32 sub_16x2(I32 x, I32 y);
+ I32 mul_16x2(I32 x, I32 y);
+
+ I32 shl_16x2(I32 x, int bits);
+ I32 shr_16x2(I32 x, int bits);
+ I32 sra_16x2(I32 x, int bits);
+
+ I32 eq_16x2(I32 x, I32 y);
+ I32 neq_16x2(I32 x, I32 y);
+ I32 lt_16x2(I32 x, I32 y);
+ I32 lte_16x2(I32 x, I32 y);
+ I32 gt_16x2(I32 x, I32 y);
+ I32 gte_16x2(I32 x, I32 y);
+
+ // Bitwise operations.
+ I32 bit_and (I32 x, I32 y);
+ I32 bit_or (I32 x, I32 y);
+ I32 bit_xor (I32 x, I32 y);
+ I32 bit_clear(I32 x, I32 y); // x & ~y
+
+ I32 select(I32 cond, I32 t, I32 f); // cond ? t : f
+ F32 select(I32 cond, F32 t, F32 f) {
+ return this->bit_cast(this->select(cond, this->bit_cast(t)
+ , this->bit_cast(f)));
+ }
+
+ // More complex operations...
+
+ // Shuffle the bytes in x according to each nibble of control, as if
+ //
+ // uint8_t bytes[] = {
+ // 0,
+ // ((uint32_t)x ) & 0xff,
+ // ((uint32_t)x >> 8) & 0xff,
+ // ((uint32_t)x >> 16) & 0xff,
+ // ((uint32_t)x >> 24) & 0xff,
+ // };
+ // return (uint32_t)bytes[(control >> 0) & 0xf] << 0
+ // | (uint32_t)bytes[(control >> 4) & 0xf] << 8
+ // | (uint32_t)bytes[(control >> 8) & 0xf] << 16
+ // | (uint32_t)bytes[(control >> 12) & 0xf] << 24;
+ //
+ // So, e.g.,
+ // - bytes(x, 0x1111) splats the low byte of x to all four bytes
+ // - bytes(x, 0x4321) is x, an identity
+ // - bytes(x, 0x0000) is 0
+ // - bytes(x, 0x0404) transforms an RGBA pixel into an A0A0 bit pattern.
+ I32 bytes (I32 x, int control);
+
+ I32 extract(I32 x, int bits, I32 y); // (x >> bits) & y
+ I32 pack (I32 x, I32 y, int bits); // x | (y << bits), assuming (x & (y << bits)) == 0
+
+ void dump(SkWStream* = nullptr) const;
+
+ private:
+ struct InstructionHash {
+ template <typename T>
+ static size_t Hash(T val) {
+ return std::hash<T>{}(val);
+ }
+ size_t operator()(const Instruction& inst) const;
+ };
+
+ Val push(Op, Val x, Val y=NA, Val z=NA, int imm=0);
+ bool isZero(Val) const;
+
+ SkTHashMap<Instruction, Val, InstructionHash> fIndex;
+ std::vector<Instruction> fProgram;
+ std::vector<int> fStrides;
+ };
+
+ using Reg = int;
+
+ class Program {
+ public:
+ struct Instruction { // d = op(x, y, z/imm)
+ Op op;
+ Reg d,x,y;
+ union { Reg z; int imm; };
+ };
+
+ Program(const std::vector<Builder::Instruction>& instructions,
+ const std::vector<int> & strides,
+ const char* debug_name);
+
+ Program();
+ ~Program();
+ Program(Program&&);
+ Program& operator=(Program&&);
+ Program(const Program&) = delete;
+ Program& operator=(const Program&) = delete;
+
+ void eval(int n, void* args[]) const;
+
+ template <typename... T>
+ void eval(int n, T*... arg) const {
+ SkASSERT(sizeof...(arg) == fStrides.size());
+ // This nullptr isn't important except that it makes args[] non-empty if you pass none.
+ void* args[] = { (void*)arg..., nullptr };
+ this->eval(n, args);
+ }
+
+ std::vector<Instruction> instructions() const { return fInstructions; }
+ int nregs() const { return fRegs; }
+ int loop() const { return fLoop; }
+ bool empty() const { return fInstructions.empty(); }
+
+ bool hasJIT() const; // Has this Program been JITted?
+ void dropJIT(); // If hasJIT(), drop it, forcing interpreter fallback.
+
+ void dump(SkWStream* = nullptr) const;
+
+ private:
+ void setupInterpreter(const std::vector<Builder::Instruction>&);
+ void setupJIT (const std::vector<Builder::Instruction>&, const char* debug_name);
+
+ bool jit(const std::vector<Builder::Instruction>&,
+ bool try_hoisting,
+ Assembler*) const;
+
+ // Dump jit-*.dump files for perf inject.
+ void dumpJIT(const char* debug_name, size_t size) const;
+
+ std::vector<Instruction> fInstructions;
+ int fRegs = 0;
+ int fLoop = 0;
+ std::vector<int> fStrides;
+
+ // We only hang onto these to help debugging.
+ std::vector<Builder::Instruction> fOriginalProgram;
+
+ void* fJITBuf = nullptr;
+ size_t fJITSize = 0;
+ };
+
+ // TODO: control flow
+ // TODO: 64-bit values?
+ // TODO: SSE2/SSE4.1, AVX-512F, ARMv8.2 JITs?
+ // TODO: lower to LLVM or WebASM for comparison?
+}
+
+#endif//SkVM_DEFINED
diff --git a/gfx/skia/skia/src/core/SkVMBlitter.cpp b/gfx/skia/skia/src/core/SkVMBlitter.cpp
new file mode 100644
index 0000000000..5b767afbe2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVMBlitter.cpp
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkLRUCache.h"
+#include "src/core/SkVM.h"
+
+namespace {
+
+ enum class Coverage { Full, UniformA8, MaskA8, MaskLCD16, Mask3D };
+
+ SK_BEGIN_REQUIRE_DENSE;
+ struct Key {
+ SkColorType colorType;
+ SkAlphaType alphaType;
+ Coverage coverage;
+ SkBlendMode blendMode;
+ SkShader* shader;
+ SkColorFilter* colorFilter;
+
+ Key withCoverage(Coverage c) const {
+ Key k = *this;
+ k.coverage = c;
+ return k;
+ }
+ };
+ SK_END_REQUIRE_DENSE;
+
+ static bool operator==(const Key& x, const Key& y) {
+ return x.colorType == y.colorType
+ && x.alphaType == y.alphaType
+ && x.coverage == y.coverage
+ && x.blendMode == y.blendMode
+ && x.shader == y.shader
+ && x.colorFilter == y.colorFilter;
+ }
+
+ static SkString debug_name(const Key& key) {
+ return SkStringPrintf("CT%d-AT%d-Cov%d-Blend%d-Shader%d-CF%d",
+ key.colorType,
+ key.alphaType,
+ key.coverage,
+ key.blendMode,
+ SkToBool(key.shader),
+ SkToBool(key.colorFilter));
+ }
+
+ static bool debug_dump(const Key& key) {
+ #if 0
+ SkDebugf("%s\n", debug_name(key).c_str());
+ return true;
+ #else
+ return false;
+ #endif
+ }
+
+ static SkLRUCache<Key, skvm::Program>* try_acquire_program_cache() {
+ #if defined(SK_BUILD_FOR_IOS)
+ // iOS doesn't support thread_local on versions less than 9.0. pthread
+ // based fallbacks must be used there. We could also use an SkSpinlock
+ // and tryAcquire()/release(), or...
+ return nullptr; // ... we could just not cache programs on those platforms.
+ #else
+ thread_local static auto* cache = new SkLRUCache<Key, skvm::Program>{8};
+ return cache;
+ #endif
+ }
+
+ static void release_program_cache() { }
+
+
+ struct Uniforms {
+ uint32_t paint_color;
+ uint8_t coverage; // Used when Coverage::UniformA8.
+ };
+
+ struct Builder : public skvm::Builder {
+ //using namespace skvm;
+
+ struct Color { skvm::I32 r,g,b,a; };
+
+
+ skvm::I32 inv(skvm::I32 x) {
+ return sub(splat(255), x);
+ }
+
+ // TODO: provide this in skvm::Builder, with a custom NEON impl.
+ skvm::I32 div255(skvm::I32 v) {
+ // This should be a bit-perfect version of (v+127)/255,
+ // implemented as (v + ((v+128)>>8) + 128)>>8.
+ skvm::I32 v128 = add(v, splat(128));
+ return shr(add(v128, shr(v128, 8)), 8);
+ }
+
+ skvm::I32 mix(skvm::I32 x, skvm::I32 y, skvm::I32 t) {
+ return div255(add(mul(x, inv(t)),
+ mul(y, t )));
+ }
+
+ Color unpack_8888(skvm::I32 rgba) {
+ return {
+ extract(rgba, 0, splat(0xff)),
+ extract(rgba, 8, splat(0xff)),
+ extract(rgba, 16, splat(0xff)),
+ extract(rgba, 24, splat(0xff)),
+ };
+ }
+
+ skvm::I32 pack_8888(Color c) {
+ return pack(pack(c.r, c.g, 8),
+ pack(c.b, c.a, 8), 16);
+ }
+
+ Color unpack_565(skvm::I32 bgr) {
+ // N.B. kRGB_565_SkColorType is named confusingly;
+ // blue is in the low bits and red the high.
+ skvm::I32 r = extract(bgr, 11, splat(0b011'111)),
+ g = extract(bgr, 5, splat(0b111'111)),
+ b = extract(bgr, 0, splat(0b011'111));
+ return {
+ // Scale 565 up to 888.
+ bit_or(shl(r, 3), shr(r, 2)),
+ bit_or(shl(g, 2), shr(g, 4)),
+ bit_or(shl(b, 3), shr(b, 2)),
+ splat(0xff),
+ };
+ }
+
+ skvm::I32 pack_565(Color c) {
+ skvm::I32 r = div255(mul(c.r, splat(31))),
+ g = div255(mul(c.g, splat(63))),
+ b = div255(mul(c.b, splat(31)));
+ return pack(pack(b, g,5), r,11);
+ }
+
+ // TODO: add native min/max ops to skvm::Builder
+ skvm::I32 min(skvm::I32 x, skvm::I32 y) { return select(lt(x,y), x,y); }
+ skvm::I32 max(skvm::I32 x, skvm::I32 y) { return select(gt(x,y), x,y); }
+
+ static bool CanBuild(const Key& key) {
+ // These checks parallel the TODOs in Builder::Builder().
+ if (key.shader) { return false; }
+ if (key.colorFilter) { return false; }
+
+ switch (key.colorType) {
+ default: return false;
+ case kRGB_565_SkColorType: break;
+ case kRGBA_8888_SkColorType: break;
+ case kBGRA_8888_SkColorType: break;
+ }
+
+ if (key.alphaType == kUnpremul_SkAlphaType) { return false; }
+
+ switch (key.blendMode) {
+ default: return false;
+ case SkBlendMode::kSrc: break;
+ case SkBlendMode::kSrcOver: break;
+ }
+
+ return true;
+ }
+
+ explicit Builder(const Key& key) {
+ #define TODO SkUNREACHABLE
+ SkASSERT(CanBuild(key));
+ skvm::Arg uniforms = uniform(),
+ dst_ptr = arg(SkColorTypeBytesPerPixel(key.colorType));
+ // When coverage is MaskA8 or MaskLCD16 there will be one more mask varying,
+ // and when coverage is Mask3D there will be three more mask varyings.
+
+
+ // When there's no shader and no color filter, the source color is the paint color.
+ if (key.shader) { TODO; }
+ if (key.colorFilter) { TODO; }
+ Color src = unpack_8888(uniform32(uniforms, offsetof(Uniforms, paint_color)));
+
+ // There are several orderings here of when we load dst and coverage
+ // and how coverage is applied, and to complicate things, LCD coverage
+ // needs to know dst.a. We're careful to assert it's loaded in time.
+ Color dst;
+ SkDEBUGCODE(bool dst_loaded = false;)
+
+ // load_coverage() returns false when there's no need to apply coverage.
+ auto load_coverage = [&](Color* cov) {
+ switch (key.coverage) {
+ case Coverage::Full: return false;
+
+ case Coverage::UniformA8: cov->r = cov->g = cov->b = cov->a =
+ uniform8(uniforms, offsetof(Uniforms, coverage));
+ return true;
+
+ case Coverage::MaskA8: cov->r = cov->g = cov->b = cov->a =
+ load8(varying<uint8_t>());
+ return true;
+
+ case Coverage::MaskLCD16:
+ SkASSERT(dst_loaded);
+ *cov = unpack_565(load16(varying<uint16_t>()));
+ cov->a = select(lt(src.a, dst.a), min(cov->r, min(cov->g,cov->b))
+ , max(cov->r, max(cov->g,cov->b)));
+ return true;
+
+ case Coverage::Mask3D: TODO;
+ }
+ // GCC insists...
+ return false;
+ };
+
+ // The math for some blend modes lets us fold coverage into src before the blend,
+ // obviating the need for the lerp afterwards. This early-coverage strategy tends
+ // to be both faster and require fewer registers.
+ bool lerp_coverage_post_blend = true;
+ if (SkBlendMode_ShouldPreScaleCoverage(key.blendMode,
+ key.coverage == Coverage::MaskLCD16)) {
+ Color cov;
+ if (load_coverage(&cov)) {
+ src.r = div255(mul(src.r, cov.r));
+ src.g = div255(mul(src.g, cov.g));
+ src.b = div255(mul(src.b, cov.b));
+ src.a = div255(mul(src.a, cov.a));
+ }
+ lerp_coverage_post_blend = false;
+ }
+
+ // Load up the destination color.
+ SkDEBUGCODE(dst_loaded = true;)
+ switch (key.colorType) {
+ default: TODO;
+ case kRGB_565_SkColorType: dst = unpack_565 (load16(dst_ptr)); break;
+ case kRGBA_8888_SkColorType: dst = unpack_8888(load32(dst_ptr)); break;
+ case kBGRA_8888_SkColorType: dst = unpack_8888(load32(dst_ptr));
+ std::swap(dst.r, dst.b);
+ break;
+ }
+
+ // When a destination is tagged opaque, we may assume it both starts and stays fully
+ // opaque, ignoring any math that disagrees. So anything involving force_opaque is
+ // optional, and sometimes helps cut a small amount of work in these programs.
+ const bool force_opaque = true && key.alphaType == kOpaque_SkAlphaType;
+ if (force_opaque) { dst.a = splat(0xff); }
+
+ // We'd need to premul dst after loading and unpremul before storing.
+ if (key.alphaType == kUnpremul_SkAlphaType) { TODO; }
+
+ // Blend src and dst.
+ switch (key.blendMode) {
+ default: TODO;
+
+ case SkBlendMode::kSrc: break;
+
+ case SkBlendMode::kSrcOver: {
+ src.r = add(src.r, div255(mul(dst.r, inv(src.a))));
+ src.g = add(src.g, div255(mul(dst.g, inv(src.a))));
+ src.b = add(src.b, div255(mul(dst.b, inv(src.a))));
+ src.a = add(src.a, div255(mul(dst.a, inv(src.a))));
+ } break;
+ }
+
+ // Lerp with coverage post-blend if needed.
+ Color cov;
+ if (lerp_coverage_post_blend && load_coverage(&cov)) {
+ src.r = mix(dst.r, src.r, cov.r);
+ src.g = mix(dst.g, src.g, cov.g);
+ src.b = mix(dst.b, src.b, cov.b);
+ src.a = mix(dst.a, src.a, cov.a);
+ }
+
+ if (force_opaque) { src.a = splat(0xff); }
+
+ // Store back to the destination.
+ switch (key.colorType) {
+ default: SkUNREACHABLE;
+
+ case kRGB_565_SkColorType: store16(dst_ptr, pack_565(src)); break;
+
+ case kBGRA_8888_SkColorType: std::swap(src.r, src.b); // fallthrough
+ case kRGBA_8888_SkColorType: store32(dst_ptr, pack_8888(src)); break;
+ }
+ #undef TODO
+ }
+ };
+
+ class Blitter final : public SkBlitter {
+ public:
+ bool ok = false;
+
+ Blitter(const SkPixmap& device, const SkPaint& paint)
+ : fDevice(device)
+ , fKey {
+ device.colorType(),
+ device.alphaType(),
+ Coverage::Full,
+ paint.getBlendMode(),
+ paint.getShader(),
+ paint.getColorFilter(),
+ }
+ {
+ SkColor4f color = paint.getColor4f();
+ SkColorSpaceXformSteps{sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ device.colorSpace(), kUnpremul_SkAlphaType}.apply(color.vec());
+
+ if (color.fitsInBytes() && Builder::CanBuild(fKey)) {
+ fUniforms.paint_color = color.premul().toBytes_RGBA();
+ ok = true;
+ }
+ }
+
+ ~Blitter() override {
+ if (SkLRUCache<Key, skvm::Program>* cache = try_acquire_program_cache()) {
+ auto cache_program = [&](skvm::Program&& program, Coverage coverage) {
+ if (!program.empty()) {
+ Key key = fKey.withCoverage(coverage);
+ if (skvm::Program* found = cache->find(key)) {
+ *found = std::move(program);
+ } else {
+ cache->insert(key, std::move(program));
+ }
+ }
+ };
+ cache_program(std::move(fBlitH), Coverage::Full);
+ cache_program(std::move(fBlitAntiH), Coverage::UniformA8);
+ cache_program(std::move(fBlitMaskA8), Coverage::MaskA8);
+ cache_program(std::move(fBlitMaskLCD16), Coverage::MaskLCD16);
+
+ release_program_cache();
+ }
+ }
+
+ private:
+ SkPixmap fDevice; // TODO: can this be const&?
+ const Key fKey;
+ Uniforms fUniforms;
+ skvm::Program fBlitH,
+ fBlitAntiH,
+ fBlitMaskA8,
+ fBlitMaskLCD16;
+
+ skvm::Program buildProgram(Coverage coverage) {
+ Key key = fKey.withCoverage(coverage);
+ {
+ skvm::Program p;
+ if (SkLRUCache<Key, skvm::Program>* cache = try_acquire_program_cache()) {
+ if (skvm::Program* found = cache->find(key)) {
+ p = std::move(*found);
+ }
+ release_program_cache();
+ }
+ if (!p.empty()) {
+ return p;
+ }
+ }
+ #if 0
+ static std::atomic<int> done{0};
+ if (0 == done++) {
+ atexit([]{ SkDebugf("%d calls to done\n", done.load()); });
+ }
+ #endif
+ Builder builder{key};
+ skvm::Program program = builder.done(debug_name(key).c_str());
+ if (!program.hasJIT() && debug_dump(key)) {
+ SkDebugf("\nfalling back to interpreter for blitter with this key.\n");
+ builder.dump();
+ program.dump();
+ }
+ return program;
+ }
+
+ void blitH(int x, int y, int w) override {
+ if (fBlitH.empty()) {
+ fBlitH = this->buildProgram(Coverage::Full);
+ }
+ fBlitH.eval(w, &fUniforms, fDevice.addr(x,y));
+ }
+
+ void blitAntiH(int x, int y, const SkAlpha cov[], const int16_t runs[]) override {
+ if (fBlitAntiH.empty()) {
+ fBlitAntiH = this->buildProgram(Coverage::UniformA8);
+ }
+ for (int16_t run = *runs; run > 0; run = *runs) {
+ fUniforms.coverage = *cov;
+ fBlitAntiH.eval(run, &fUniforms, fDevice.addr(x,y));
+
+ x += run;
+ runs += run;
+ cov += run;
+ }
+ }
+
+ void blitMask(const SkMask& mask, const SkIRect& clip) override {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ // TODO: native BW masks?
+ return SkBlitter::blitMask(mask, clip);
+ }
+
+ const skvm::Program* program = nullptr;
+ switch (mask.fFormat) {
+ default: SkUNREACHABLE; // ARGB and SDF masks shouldn't make it here.
+
+ case SkMask::k3D_Format: // TODO: the mul and add 3D mask planes too
+ case SkMask::kA8_Format:
+ if (fBlitMaskA8.empty()) {
+ fBlitMaskA8 = this->buildProgram(Coverage::MaskA8);
+ }
+ program = &fBlitMaskA8;
+ break;
+
+ case SkMask::kLCD16_Format:
+ if (fBlitMaskLCD16.empty()) {
+ fBlitMaskLCD16 = this->buildProgram(Coverage::MaskLCD16);
+ }
+ program = &fBlitMaskLCD16;
+ break;
+ }
+
+ SkASSERT(program);
+ if (program) {
+ for (int y = clip.top(); y < clip.bottom(); y++) {
+ program->eval(clip.width(),
+ &fUniforms,
+ fDevice.addr(clip.left(), y),
+ mask.getAddr(clip.left(), y));
+ }
+ }
+ }
+ };
+
+} // namespace
+
+
+SkBlitter* SkCreateSkVMBlitter(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkMatrix& ctm,
+ SkArenaAlloc* alloc) {
+ auto blitter = alloc->make<Blitter>(device, paint);
+ return blitter->ok ? blitter
+ : nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkValidatingReadBuffer.h b/gfx/skia/skia/src/core/SkValidatingReadBuffer.h
new file mode 100644
index 0000000000..887c621f84
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidatingReadBuffer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkValidatingReadBuffer_DEFINED
+#define SkValidatingReadBuffer_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkReader32.h"
+#include "src/core/SkWriteBuffer.h"
+
+class SkBitmap;
+
+// DEPRECATED -- just use SkReadBuffer (so we can delete this header)
+typedef SkReadBuffer SkValidatingReadBuffer;
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkValidationUtils.h b/gfx/skia/skia/src/core/SkValidationUtils.h
new file mode 100644
index 0000000000..16dfdb3199
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidationUtils.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkValidationUtils_DEFINED
+#define SkValidationUtils_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "src/core/SkXfermodePriv.h"
+
+/** Returns true if mode's value is in the SkBlendMode enum.
+ */
+static inline bool SkIsValidMode(SkBlendMode mode) {
+ return (unsigned)mode <= (unsigned)SkBlendMode::kLastMode;
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_MaxS32
+ */
+static inline bool SkIsValidIRect(const SkIRect& rect) {
+ return rect.width() >= 0 && rect.height() >= 0;
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_ScalarMax
+ */
+static inline bool SkIsValidRect(const SkRect& rect) {
+ return (rect.fLeft <= rect.fRight) &&
+ (rect.fTop <= rect.fBottom) &&
+ SkScalarIsFinite(rect.width()) &&
+ SkScalarIsFinite(rect.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkVertState.cpp b/gfx/skia/skia/src/core/SkVertState.cpp
new file mode 100644
index 0000000000..d10a23ddde
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkVertState.h"
+
+bool VertState::Triangles(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TrianglesX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TriangleStrip(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = index + 2;
+ if (index & 1) {
+ state->f0 = index + 1;
+ state->f1 = index + 0;
+ } else {
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleStripX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = indices[index + 2];
+ if (index & 1) {
+ state->f0 = indices[index + 1];
+ state->f1 = indices[index + 0];
+ } else {
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFan(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFanX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+VertState::Proc VertState::chooseProc(SkVertices::VertexMode mode) {
+ switch (mode) {
+ case SkVertices::kTriangles_VertexMode:
+ return fIndices ? TrianglesX : Triangles;
+ case SkVertices::kTriangleStrip_VertexMode:
+ return fIndices ? TriangleStripX : TriangleStrip;
+ case SkVertices::kTriangleFan_VertexMode:
+ return fIndices ? TriangleFanX : TriangleFan;
+ default:
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkVertState.h b/gfx/skia/skia/src/core/SkVertState.h
new file mode 100644
index 0000000000..fb981b7c2e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVertState_DEFINED
+#define SkVertState_DEFINED
+
+#include "include/core/SkVertices.h"
+
+/** \struct VertState
+ This is a helper for drawVertices(). It is used to iterate over the triangles
+ that are to be rendered based on an SkCanvas::VertexMode and (optionally) an
+ index array. It does not copy the index array and the client must ensure it
+ remains valid for the lifetime of the VertState object.
+*/
+
+struct VertState {
+ int f0, f1, f2;
+
+ /**
+ * Construct a VertState from a vertex count, index array, and index count.
+ * If the vertices are unindexed pass nullptr for indices.
+ */
+ VertState(int vCount, const uint16_t indices[], int indexCount)
+ : fIndices(indices) {
+ fCurrIndex = 0;
+ if (indices) {
+ fCount = indexCount;
+ } else {
+ fCount = vCount;
+ }
+ }
+
+ typedef bool (*Proc)(VertState*);
+
+ /**
+ * Choose an appropriate function to traverse the vertices.
+ * @param mode Specifies the SkCanvas::VertexMode.
+ */
+ Proc chooseProc(SkVertices::VertexMode mode);
+
+private:
+ int fCount;
+ int fCurrIndex;
+ const uint16_t* fIndices;
+
+ static bool Triangles(VertState*);
+ static bool TrianglesX(VertState*);
+ static bool TriangleStrip(VertState*);
+ static bool TriangleStripX(VertState*);
+ static bool TriangleFan(VertState*);
+ static bool TriangleFanX(VertState*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkVertices.cpp b/gfx/skia/skia/src/core/SkVertices.cpp
new file mode 100644
index 0000000000..dc2070372c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertices.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkVertices.h"
+
+#include "include/core/SkData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkReader32.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkSafeRange.h"
+#include "src/core/SkWriter32.h"
+#include <atomic>
+#include <new>
+
+static int32_t next_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+struct SkVertices::Sizes {
+ Sizes(SkVertices::VertexMode mode, int vertexCount, int indexCount, bool hasTexs,
+ bool hasColors, bool hasBones) {
+ SkSafeMath safe;
+
+ fVSize = safe.mul(vertexCount, sizeof(SkPoint));
+ fTSize = hasTexs ? safe.mul(vertexCount, sizeof(SkPoint)) : 0;
+ fCSize = hasColors ? safe.mul(vertexCount, sizeof(SkColor)) : 0;
+ fBISize = hasBones ? safe.mul(vertexCount, sizeof(BoneIndices)) : 0;
+ fBWSize = hasBones ? safe.mul(vertexCount, sizeof(BoneWeights)) : 0;
+
+ fBuilderTriFanISize = 0;
+ fISize = safe.mul(indexCount, sizeof(uint16_t));
+ if (kTriangleFan_VertexMode == mode) {
+ int numFanTris = 0;
+ if (indexCount) {
+ fBuilderTriFanISize = fISize;
+ numFanTris = indexCount - 2;
+ } else {
+ numFanTris = vertexCount - 2;
+ // By forcing this to become indexed we are adding a constraint to the maximum
+ // number of vertices.
+ if (vertexCount > (SkTo<int>(UINT16_MAX) + 1)) {
+ sk_bzero(this, sizeof(*this));
+ return;
+ }
+ }
+ if (numFanTris <= 0) {
+ sk_bzero(this, sizeof(*this));
+ return;
+ }
+ fISize = safe.mul(numFanTris, 3 * sizeof(uint16_t));
+ }
+
+ fTotal = safe.add(sizeof(SkVertices),
+ safe.add(fVSize,
+ safe.add(fTSize,
+ safe.add(fCSize,
+ safe.add(fBISize,
+ safe.add(fBWSize,
+ fISize))))));
+
+ if (safe.ok()) {
+ fArrays = fTotal - sizeof(SkVertices); // just the sum of the arrays
+ } else {
+ sk_bzero(this, sizeof(*this));
+ }
+ }
+
+ bool isValid() const { return fTotal != 0; }
+
+ size_t fTotal; // size of entire SkVertices allocation (obj + arrays)
+ size_t fArrays; // size of all the arrays (V + T + C + BI + BW + I)
+ size_t fVSize;
+ size_t fTSize;
+ size_t fCSize;
+ size_t fBISize;
+ size_t fBWSize;
+ size_t fISize;
+
+ // For indexed tri-fans this is the number of amount of space fo indices needed in the builder
+ // before conversion to indexed triangles (or zero if not indexed or not a triangle fan).
+ size_t fBuilderTriFanISize;
+};
+
+SkVertices::Builder::Builder(VertexMode mode, int vertexCount, int indexCount,
+ uint32_t builderFlags) {
+ bool hasTexs = SkToBool(builderFlags & SkVertices::kHasTexCoords_BuilderFlag);
+ bool hasColors = SkToBool(builderFlags & SkVertices::kHasColors_BuilderFlag);
+ bool hasBones = SkToBool(builderFlags & SkVertices::kHasBones_BuilderFlag);
+ bool isVolatile = !SkToBool(builderFlags & SkVertices::kIsNonVolatile_BuilderFlag);
+ this->init(mode, vertexCount, indexCount, isVolatile,
+ SkVertices::Sizes(mode, vertexCount, indexCount, hasTexs, hasColors, hasBones));
+}
+
+SkVertices::Builder::Builder(VertexMode mode, int vertexCount, int indexCount, bool isVolatile,
+ const SkVertices::Sizes& sizes) {
+ this->init(mode, vertexCount, indexCount, isVolatile, sizes);
+}
+
+void SkVertices::Builder::init(VertexMode mode, int vertexCount, int indexCount, bool isVolatile,
+ const SkVertices::Sizes& sizes) {
+ if (!sizes.isValid()) {
+ return; // fVertices will already be null
+ }
+
+ void* storage = ::operator new (sizes.fTotal);
+ if (sizes.fBuilderTriFanISize) {
+ fIntermediateFanIndices.reset(new uint8_t[sizes.fBuilderTriFanISize]);
+ }
+
+ fVertices.reset(new (storage) SkVertices);
+
+ // need to point past the object to store the arrays
+ char* ptr = (char*)storage + sizeof(SkVertices);
+
+ fVertices->fPositions = (SkPoint*)ptr; ptr += sizes.fVSize;
+ fVertices->fTexs = sizes.fTSize ? (SkPoint*)ptr : nullptr; ptr += sizes.fTSize;
+ fVertices->fColors = sizes.fCSize ? (SkColor*)ptr : nullptr; ptr += sizes.fCSize;
+ fVertices->fBoneIndices = sizes.fBISize ? (BoneIndices*) ptr : nullptr; ptr += sizes.fBISize;
+ fVertices->fBoneWeights = sizes.fBWSize ? (BoneWeights*) ptr : nullptr; ptr += sizes.fBWSize;
+ fVertices->fIndices = sizes.fISize ? (uint16_t*)ptr : nullptr;
+ fVertices->fVertexCnt = vertexCount;
+ fVertices->fIndexCnt = indexCount;
+ fVertices->fIsVolatile = isVolatile;
+ fVertices->fMode = mode;
+
+ // We defer assigning fBounds and fUniqueID until detach() is called
+}
+
+sk_sp<SkVertices> SkVertices::Builder::detach() {
+ if (fVertices) {
+ fVertices->fBounds.setBounds(fVertices->fPositions, fVertices->fVertexCnt);
+ if (fVertices->fMode == kTriangleFan_VertexMode) {
+ if (fIntermediateFanIndices.get()) {
+ SkASSERT(fVertices->fIndexCnt);
+ auto tempIndices = this->indices();
+ for (int t = 0; t < fVertices->fIndexCnt - 2; ++t) {
+ fVertices->fIndices[3 * t + 0] = tempIndices[0];
+ fVertices->fIndices[3 * t + 1] = tempIndices[t + 1];
+ fVertices->fIndices[3 * t + 2] = tempIndices[t + 2];
+ }
+ fVertices->fIndexCnt = 3 * (fVertices->fIndexCnt - 2);
+ } else {
+ SkASSERT(!fVertices->fIndexCnt);
+ for (int t = 0; t < fVertices->fVertexCnt - 2; ++t) {
+ fVertices->fIndices[3 * t + 0] = 0;
+ fVertices->fIndices[3 * t + 1] = SkToU16(t + 1);
+ fVertices->fIndices[3 * t + 2] = SkToU16(t + 2);
+ }
+ fVertices->fIndexCnt = 3 * (fVertices->fVertexCnt - 2);
+ }
+ fVertices->fMode = kTriangles_VertexMode;
+ }
+ fVertices->fUniqueID = next_id();
+ return std::move(fVertices); // this will null fVertices after the return
+ }
+ return nullptr;
+}
+
+int SkVertices::Builder::vertexCount() const {
+ return fVertices ? fVertices->vertexCount() : 0;
+}
+
+int SkVertices::Builder::indexCount() const {
+ return fVertices ? fVertices->indexCount() : 0;
+}
+
+bool SkVertices::Builder::isVolatile() const {
+ return fVertices ? fVertices->isVolatile() : true;
+}
+
+SkPoint* SkVertices::Builder::positions() {
+ return fVertices ? const_cast<SkPoint*>(fVertices->positions()) : nullptr;
+}
+
+SkPoint* SkVertices::Builder::texCoords() {
+ return fVertices ? const_cast<SkPoint*>(fVertices->texCoords()) : nullptr;
+}
+
+SkColor* SkVertices::Builder::colors() {
+ return fVertices ? const_cast<SkColor*>(fVertices->colors()) : nullptr;
+}
+
+SkVertices::BoneIndices* SkVertices::Builder::boneIndices() {
+ return fVertices ? const_cast<BoneIndices*>(fVertices->boneIndices()) : nullptr;
+}
+
+SkVertices::BoneWeights* SkVertices::Builder::boneWeights() {
+ return fVertices ? const_cast<BoneWeights*>(fVertices->boneWeights()) : nullptr;
+}
+
+uint16_t* SkVertices::Builder::indices() {
+ if (!fVertices) {
+ return nullptr;
+ }
+ if (fIntermediateFanIndices) {
+ return reinterpret_cast<uint16_t*>(fIntermediateFanIndices.get());
+ }
+ return const_cast<uint16_t*>(fVertices->indices());
+}
+
+/** Makes a copy of the SkVertices and applies a set of bones, then returns the deformed
+ vertices.
+
+ @param bones The bones to apply.
+ @param boneCount The number of bones.
+ @return The transformed SkVertices.
+*/
+sk_sp<SkVertices> SkVertices::applyBones(const SkVertices::Bone bones[], int boneCount) const {
+ // If there aren't any bones, then nothing changes.
+ // We don't check if the SkVertices object has bone indices/weights because there is the case
+ // where the object can have no indices/weights but still have a world transform applied.
+ if (!bones || !boneCount) {
+ return sk_ref_sp(this);
+ }
+ SkASSERT(boneCount >= 1);
+
+ // Copy the SkVertices.
+ sk_sp<SkVertices> copy = SkVertices::MakeCopy(this->mode(),
+ this->vertexCount(),
+ this->positions(),
+ this->texCoords(),
+ this->colors(),
+ nullptr,
+ nullptr,
+ this->indexCount(),
+ this->indices());
+
+ // Transform the positions.
+ for (int i = 0; i < this->vertexCount(); i++) {
+ SkPoint& position = copy->fPositions[i];
+
+ // Apply the world transform.
+ position = bones[0].mapPoint(position);
+
+ // Apply the bone deformations.
+ if (boneCount > 1) {
+ SkASSERT(this->boneIndices());
+ SkASSERT(this->boneWeights());
+
+ SkPoint result = SkPoint::Make(0.0f, 0.0f);
+ const SkVertices::BoneIndices& indices = this->boneIndices()[i];
+ const SkVertices::BoneWeights& weights = this->boneWeights()[i];
+ for (int j = 0; j < 4; j++) {
+ int index = indices[j];
+ float weight = weights[j];
+ if (index == 0 || weight == 0.0f) {
+ continue;
+ }
+ SkASSERT(index < boneCount);
+
+ // result += M * v * w.
+ result += bones[index].mapPoint(position) * weight;
+ }
+ position = result;
+ }
+ }
+
+ // Recalculate the bounds.
+ copy->fBounds.setBounds(copy->fPositions, copy->fVertexCnt);
+
+ return copy;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkVertices> SkVertices::MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint pos[], const SkPoint texs[],
+ const SkColor colors[],
+ const BoneIndices boneIndices[],
+ const BoneWeights boneWeights[],
+ int indexCount, const uint16_t indices[],
+ bool isVolatile) {
+ SkASSERT((!boneIndices && !boneWeights) || (boneIndices && boneWeights));
+ Sizes sizes(mode,
+ vertexCount,
+ indexCount,
+ texs != nullptr,
+ colors != nullptr,
+ boneIndices != nullptr);
+ if (!sizes.isValid()) {
+ return nullptr;
+ }
+
+ Builder builder(mode, vertexCount, indexCount, isVolatile, sizes);
+ SkASSERT(builder.isValid());
+
+ sk_careful_memcpy(builder.positions(), pos, sizes.fVSize);
+ sk_careful_memcpy(builder.texCoords(), texs, sizes.fTSize);
+ sk_careful_memcpy(builder.colors(), colors, sizes.fCSize);
+ sk_careful_memcpy(builder.boneIndices(), boneIndices, sizes.fBISize);
+ sk_careful_memcpy(builder.boneWeights(), boneWeights, sizes.fBWSize);
+ size_t isize = (mode == kTriangleFan_VertexMode) ? sizes.fBuilderTriFanISize : sizes.fISize;
+ sk_careful_memcpy(builder.indices(), indices, isize);
+
+ return builder.detach();
+}
+
+size_t SkVertices::approximateSize() const {
+ Sizes sizes(fMode,
+ fVertexCnt,
+ fIndexCnt,
+ this->hasTexCoords(),
+ this->hasColors(),
+ this->hasBones());
+ SkASSERT(sizes.isValid());
+ return sizeof(SkVertices) + sizes.fArrays;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// storage = packed | vertex_count | index_count | pos[] | texs[] | colors[] | boneIndices[] |
+// boneWeights[] | indices[]
+// = header + arrays
+
+#define kMode_Mask 0x0FF
+#define kHasTexs_Mask 0x100
+#define kHasColors_Mask 0x200
+#define kHasBones_Mask 0x400
+#define kIsNonVolatile_Mask 0x800
+#define kHeaderSize (3 * sizeof(uint32_t))
+
+sk_sp<SkData> SkVertices::encode() const {
+ // packed has room for addtional flags in the future (e.g. versioning)
+ uint32_t packed = static_cast<uint32_t>(fMode);
+ SkASSERT((packed & ~kMode_Mask) == 0); // our mode fits in the mask bits
+ if (this->hasTexCoords()) {
+ packed |= kHasTexs_Mask;
+ }
+ if (this->hasColors()) {
+ packed |= kHasColors_Mask;
+ }
+ if (this->hasBones()) {
+ packed |= kHasBones_Mask;
+ }
+ if (!this->isVolatile()) {
+ packed |= kIsNonVolatile_Mask;
+ }
+
+ Sizes sizes(fMode,
+ fVertexCnt,
+ fIndexCnt,
+ this->hasTexCoords(),
+ this->hasColors(),
+ this->hasBones());
+ SkASSERT(sizes.isValid());
+ SkASSERT(!sizes.fBuilderTriFanISize);
+ // need to force alignment to 4 for SkWriter32 -- will pad w/ 0s as needed
+ const size_t size = SkAlign4(kHeaderSize + sizes.fArrays);
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ SkWriter32 writer(data->writable_data(), data->size());
+
+ writer.write32(packed);
+ writer.write32(fVertexCnt);
+ writer.write32(fIndexCnt);
+ writer.write(fPositions, sizes.fVSize);
+ writer.write(fTexs, sizes.fTSize);
+ writer.write(fColors, sizes.fCSize);
+ writer.write(fBoneIndices, sizes.fBISize);
+ writer.write(fBoneWeights, sizes.fBWSize);
+ // if index-count is odd, we won't be 4-bytes aligned, so we call the pad version
+ writer.writePad(fIndices, sizes.fISize);
+
+ return data;
+}
+
+sk_sp<SkVertices> SkVertices::Decode(const void* data, size_t length) {
+ if (length < kHeaderSize) {
+ return nullptr;
+ }
+
+ SkReader32 reader(data, length);
+ SkSafeRange safe;
+
+ const uint32_t packed = reader.readInt();
+ const int vertexCount = safe.checkGE(reader.readInt(), 0);
+ const int indexCount = safe.checkGE(reader.readInt(), 0);
+ const VertexMode mode = safe.checkLE<VertexMode>(packed & kMode_Mask,
+ SkVertices::kLast_VertexMode);
+ if (!safe) {
+ return nullptr;
+ }
+ const bool hasTexs = SkToBool(packed & kHasTexs_Mask);
+ const bool hasColors = SkToBool(packed & kHasColors_Mask);
+ const bool hasBones = SkToBool(packed & kHasBones_Mask);
+ const bool isVolatile = !SkToBool(packed & kIsNonVolatile_Mask);
+ Sizes sizes(mode, vertexCount, indexCount, hasTexs, hasColors, hasBones);
+ if (!sizes.isValid()) {
+ return nullptr;
+ }
+ // logically we can be only 2-byte aligned, but our buffer is always 4-byte aligned
+ if (SkAlign4(kHeaderSize + sizes.fArrays) != length) {
+ return nullptr;
+ }
+
+ Builder builder(mode, vertexCount, indexCount, isVolatile, sizes);
+
+ reader.read(builder.positions(), sizes.fVSize);
+ reader.read(builder.texCoords(), sizes.fTSize);
+ reader.read(builder.colors(), sizes.fCSize);
+ reader.read(builder.boneIndices(), sizes.fBISize);
+ reader.read(builder.boneWeights(), sizes.fBWSize);
+ size_t isize = (mode == kTriangleFan_VertexMode) ? sizes.fBuilderTriFanISize : sizes.fISize;
+ reader.read(builder.indices(), isize);
+ if (indexCount > 0) {
+ // validate that the indicies are in range
+ SkASSERT(indexCount == builder.indexCount());
+ const uint16_t* indices = builder.indices();
+ for (int i = 0; i < indexCount; ++i) {
+ if (indices[i] >= (unsigned)vertexCount) {
+ return nullptr;
+ }
+ }
+ }
+ return builder.detach();
+}
+
+void SkVertices::operator delete(void* p)
+{
+ ::operator delete(p);
+}
diff --git a/gfx/skia/skia/src/core/SkVptr.h b/gfx/skia/skia/src/core/SkVptr.h
new file mode 100644
index 0000000000..2a33ad06c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVptr.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVptr_DEFINED
+#define SkVptr_DEFINED
+
+#include <string.h>
+#include <type_traits>
+
+// Experimentally, see if we can get at the vptr of objects with one.
+
+template <typename T>
+static inline void* SkVptr(const T& object) {
+ static_assert(std::has_virtual_destructor<T>::value, "");
+ void* vptr;
+ memcpy(&vptr, (const void*)&object, sizeof(vptr));
+ return vptr;
+}
+
+#endif//SkVptr_DEFINED
diff --git a/gfx/skia/skia/src/core/SkWriteBuffer.cpp b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
new file mode 100644
index 0000000000..f12099ab16
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkWriteBuffer.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPtrRecorder.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer()
+ : fFactorySet(nullptr)
+ , fTFSet(nullptr) {
+}
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer(void* storage, size_t storageSize)
+ : fFactorySet(nullptr)
+ , fTFSet(nullptr)
+ , fWriter(storage, storageSize)
+{}
+
+SkBinaryWriteBuffer::~SkBinaryWriteBuffer() {}
+
+bool SkBinaryWriteBuffer::usingInitialStorage() const {
+ return fWriter.usingInitialStorage();
+}
+
+void SkBinaryWriteBuffer::writeByteArray(const void* data, size_t size) {
+ fWriter.write32(SkToU32(size));
+ fWriter.writePad(data, size);
+}
+
+void SkBinaryWriteBuffer::writeBool(bool value) {
+ fWriter.writeBool(value);
+}
+
+void SkBinaryWriteBuffer::writeScalar(SkScalar value) {
+ fWriter.writeScalar(value);
+}
+
+void SkBinaryWriteBuffer::writeScalarArray(const SkScalar* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(SkScalar));
+}
+
+void SkBinaryWriteBuffer::writeInt(int32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeIntArray(const int32_t* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(int32_t));
+}
+
+void SkBinaryWriteBuffer::writeUInt(uint32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeString(const char* value) {
+ fWriter.writeString(value);
+}
+
+void SkBinaryWriteBuffer::writeColor(SkColor color) {
+ fWriter.write32(color);
+}
+
+void SkBinaryWriteBuffer::writeColorArray(const SkColor* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor));
+}
+
+void SkBinaryWriteBuffer::writeColor4f(const SkColor4f& color) {
+ fWriter.write(&color, sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writeColor4fArray(const SkColor4f* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writePoint(const SkPoint& point) {
+ fWriter.writeScalar(point.fX);
+ fWriter.writeScalar(point.fY);
+}
+
+void SkBinaryWriteBuffer::writePoint3(const SkPoint3& point) {
+ this->writePad32(&point, sizeof(SkPoint3));
+}
+
+void SkBinaryWriteBuffer::writePointArray(const SkPoint* point, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(point, count * sizeof(SkPoint));
+}
+
+void SkBinaryWriteBuffer::writeMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkBinaryWriteBuffer::writeIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(SkIRect));
+}
+
+void SkBinaryWriteBuffer::writeRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkBinaryWriteBuffer::writeRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkBinaryWriteBuffer::writePath(const SkPath& path) {
+ fWriter.writePath(path);
+}
+
+size_t SkBinaryWriteBuffer::writeStream(SkStream* stream, size_t length) {
+ fWriter.write32(SkToU32(length));
+ size_t bytesWritten = fWriter.readFromStream(stream, length);
+ if (bytesWritten < length) {
+ fWriter.reservePad(length - bytesWritten);
+ }
+ return bytesWritten;
+}
+
+bool SkBinaryWriteBuffer::writeToStream(SkWStream* stream) const {
+ return fWriter.writeToStream(stream);
+}
+
+/* Format:
+ * (subset) bounds
+ * size (31bits)
+ * data [ encoded, with raw width/height ]
+ */
+void SkBinaryWriteBuffer::writeImage(const SkImage* image) {
+ const SkIRect bounds = SkImage_getSubset(image);
+ this->writeIRect(bounds);
+
+ sk_sp<SkData> data;
+ if (fProcs.fImageProc) {
+ data = fProcs.fImageProc(const_cast<SkImage*>(image), fProcs.fImageCtx);
+ }
+ if (!data) {
+ data = image->encodeToData();
+ }
+
+ size_t size = data ? data->size() : 0;
+ if (!SkTFitsIn<int32_t>(size)) {
+ size = 0; // too big to store
+ }
+ this->write32(SkToS32(size)); // writing 0 signals failure
+ if (size) {
+ this->writePad32(data->data(), size);
+ }
+}
+
+void SkBinaryWriteBuffer::writeTypeface(SkTypeface* obj) {
+ // Write 32 bits (signed)
+ // 0 -- default font
+ // >0 -- index
+ // <0 -- custom (serial procs)
+
+ if (obj == nullptr) {
+ fWriter.write32(0);
+ } else if (fProcs.fTypefaceProc) {
+ auto data = fProcs.fTypefaceProc(obj, fProcs.fTypefaceCtx);
+ if (data) {
+ size_t size = data->size();
+ if (!SkTFitsIn<int32_t>(size)) {
+ size = 0; // fall back to default font
+ }
+ int32_t ssize = SkToS32(size);
+ fWriter.write32(-ssize); // negative to signal custom
+ if (size) {
+ this->writePad32(data->data(), size);
+ }
+ return;
+ }
+ // no data means fall through for std behavior
+ }
+ fWriter.write32(fTFSet ? fTFSet->add(obj) : 0);
+}
+
+void SkBinaryWriteBuffer::writePaint(const SkPaint& paint) {
+ SkPaintPriv::Flatten(paint, *this);
+}
+
+void SkBinaryWriteBuffer::setFactoryRecorder(sk_sp<SkFactorySet> rec) {
+ fFactorySet = std::move(rec);
+}
+
+void SkBinaryWriteBuffer::setTypefaceRecorder(sk_sp<SkRefCntSet> rec) {
+ fTFSet = std::move(rec);
+}
+
+void SkBinaryWriteBuffer::writeFlattenable(const SkFlattenable* flattenable) {
+ if (nullptr == flattenable) {
+ this->write32(0);
+ return;
+ }
+
+ /*
+ * We can write 1 of 2 versions of the flattenable:
+ * 1. index into fFactorySet : This assumes the writer will later
+ * resolve the function-ptrs into strings for its reader. SkPicture
+ * does exactly this, by writing a table of names (matching the indices)
+ * up front in its serialized form.
+ * 2. string name of the flattenable or index into fFlattenableDict: We
+ * store the string to allow the reader to specify its own factories
+ * after write time. In order to improve compression, if we have
+ * already written the string, we write its index instead.
+ */
+
+ SkFlattenable::Factory factory = flattenable->getFactory();
+ SkASSERT(factory);
+
+ if (fFactorySet) {
+ this->write32(fFactorySet->add(factory));
+ } else {
+
+ if (uint32_t* indexPtr = fFlattenableDict.find(factory)) {
+ // We will write the index as a 32-bit int. We want the first byte
+ // that we send to be zero - this will act as a sentinel that we
+ // have an index (not a string). This means that we will send the
+ // the index shifted left by 8. The remaining 24-bits should be
+ // plenty to store the index. Note that this strategy depends on
+ // being little endian.
+ SkASSERT(0 == *indexPtr >> 24);
+ this->write32(*indexPtr << 8);
+ } else {
+ const char* name = flattenable->getTypeName();
+ SkASSERT(name);
+ // Otherwise write the string. Clients should not use the empty
+ // string as a name, or we will have a problem.
+ SkASSERT(0 != strcmp("", name));
+ this->writeString(name);
+
+ // Add key to dictionary.
+ fFlattenableDict.set(factory, fFlattenableDict.count() + 1);
+ }
+ }
+
+ // make room for the size of the flattened object
+ (void)fWriter.reserve(sizeof(uint32_t));
+ // record the current size, so we can subtract after the object writes.
+ size_t offset = fWriter.bytesWritten();
+ // now flatten the object
+ flattenable->flatten(*this);
+ size_t objSize = fWriter.bytesWritten() - offset;
+ // record the obj's size
+ fWriter.overwriteTAt(offset - sizeof(uint32_t), SkToU32(objSize));
+}
diff --git a/gfx/skia/skia/src/core/SkWriteBuffer.h b/gfx/skia/skia/src/core/SkWriteBuffer.h
new file mode 100644
index 0000000000..5bd58e9156
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriteBuffer.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWriteBuffer_DEFINED
+#define SkWriteBuffer_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkWriter32.h"
+
+class SkFactorySet;
+class SkImage;
+class SkPath;
+class SkRefCntSet;
+
+class SkWriteBuffer {
+public:
+ SkWriteBuffer() {}
+ virtual ~SkWriteBuffer() {}
+
+ virtual void writePad32(const void* buffer, size_t bytes) = 0;
+
+ virtual void writeByteArray(const void* data, size_t size) = 0;
+ void writeDataAsByteArray(SkData* data) {
+ this->writeByteArray(data->data(), data->size());
+ }
+ virtual void writeBool(bool value) = 0;
+ virtual void writeScalar(SkScalar value) = 0;
+ virtual void writeScalarArray(const SkScalar* value, uint32_t count) = 0;
+ virtual void writeInt(int32_t value) = 0;
+ virtual void writeIntArray(const int32_t* value, uint32_t count) = 0;
+ virtual void writeUInt(uint32_t value) = 0;
+ void write32(int32_t value) {
+ this->writeInt(value);
+ }
+ virtual void writeString(const char* value) = 0;
+
+ virtual void writeFlattenable(const SkFlattenable* flattenable) = 0;
+ virtual void writeColor(SkColor color) = 0;
+ virtual void writeColorArray(const SkColor* color, uint32_t count) = 0;
+ virtual void writeColor4f(const SkColor4f& color) = 0;
+ virtual void writeColor4fArray(const SkColor4f* color, uint32_t count) = 0;
+ virtual void writePoint(const SkPoint& point) = 0;
+ virtual void writePointArray(const SkPoint* point, uint32_t count) = 0;
+ virtual void writePoint3(const SkPoint3& point) = 0;
+ virtual void writeMatrix(const SkMatrix& matrix) = 0;
+ virtual void writeIRect(const SkIRect& rect) = 0;
+ virtual void writeRect(const SkRect& rect) = 0;
+ virtual void writeRegion(const SkRegion& region) = 0;
+ virtual void writePath(const SkPath& path) = 0;
+ virtual size_t writeStream(SkStream* stream, size_t length) = 0;
+ virtual void writeImage(const SkImage*) = 0;
+ virtual void writeTypeface(SkTypeface* typeface) = 0;
+ virtual void writePaint(const SkPaint& paint) = 0;
+
+ void setSerialProcs(const SkSerialProcs& procs) { fProcs = procs; }
+
+protected:
+ SkSerialProcs fProcs;
+
+ friend class SkPicturePriv; // fProcs
+};
+
+/**
+ * Concrete implementation that serializes to a flat binary blob.
+ */
+class SkBinaryWriteBuffer : public SkWriteBuffer {
+public:
+ SkBinaryWriteBuffer();
+ SkBinaryWriteBuffer(void* initialStorage, size_t storageSize);
+ ~SkBinaryWriteBuffer() override;
+
+ void write(const void* buffer, size_t bytes) {
+ fWriter.write(buffer, bytes);
+ }
+ void writePad32(const void* buffer, size_t bytes) override {
+ fWriter.writePad(buffer, bytes);
+ }
+
+ void reset(void* storage = nullptr, size_t storageSize = 0) {
+ fWriter.reset(storage, storageSize);
+ }
+
+ size_t bytesWritten() const { return fWriter.bytesWritten(); }
+
+ // Returns true iff all of the bytes written so far are stored in the initial storage
+ // buffer provided in the constructor or the most recent call to reset.
+ bool usingInitialStorage() const;
+
+ void writeByteArray(const void* data, size_t size) override;
+ void writeBool(bool value) override;
+ void writeScalar(SkScalar value) override;
+ void writeScalarArray(const SkScalar* value, uint32_t count) override;
+ void writeInt(int32_t value) override;
+ void writeIntArray(const int32_t* value, uint32_t count) override;
+ void writeUInt(uint32_t value) override;
+ void writeString(const char* value) override;
+
+ void writeFlattenable(const SkFlattenable* flattenable) override;
+ void writeColor(SkColor color) override;
+ void writeColorArray(const SkColor* color, uint32_t count) override;
+ void writeColor4f(const SkColor4f& color) override;
+ void writeColor4fArray(const SkColor4f* color, uint32_t count) override;
+ void writePoint(const SkPoint& point) override;
+ void writePointArray(const SkPoint* point, uint32_t count) override;
+ void writePoint3(const SkPoint3& point) override;
+ void writeMatrix(const SkMatrix& matrix) override;
+ void writeIRect(const SkIRect& rect) override;
+ void writeRect(const SkRect& rect) override;
+ void writeRegion(const SkRegion& region) override;
+ void writePath(const SkPath& path) override;
+ size_t writeStream(SkStream* stream, size_t length) override;
+ void writeImage(const SkImage*) override;
+ void writeTypeface(SkTypeface* typeface) override;
+ void writePaint(const SkPaint& paint) override;
+
+ bool writeToStream(SkWStream*) const;
+ void writeToMemory(void* dst) const { fWriter.flatten(dst); }
+
+ void setFactoryRecorder(sk_sp<SkFactorySet>);
+ void setTypefaceRecorder(sk_sp<SkRefCntSet>);
+
+private:
+ sk_sp<SkFactorySet> fFactorySet;
+ sk_sp<SkRefCntSet> fTFSet;
+
+ SkWriter32 fWriter;
+
+ // Only used if we do not have an fFactorySet
+ SkTHashMap<SkFlattenable::Factory, uint32_t> fFlattenableDict;
+};
+
+#endif // SkWriteBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkWritePixelsRec.h b/gfx/skia/skia/src/core/SkWritePixelsRec.h
new file mode 100644
index 0000000000..4f02c4e9de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWritePixelsRec.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWritePixelsRec_DEFINED
+#define SkWritePixelsRec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+
+/**
+ * Helper class to package and trim the parameters passed to writePixels()
+ */
+struct SkWritePixelsRec {
+ SkWritePixelsRec(const SkImageInfo& info, const void* pixels, size_t rowBytes, int x, int y)
+ : fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fInfo(info)
+ , fX(x)
+ , fY(y)
+ {}
+
+ SkWritePixelsRec(const SkPixmap& pm, int x, int y)
+ : fPixels(pm.addr())
+ , fRowBytes(pm.rowBytes())
+ , fInfo(pm.info())
+ , fX(x)
+ , fY(y)
+ {}
+
+ const void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+ int fX;
+ int fY;
+
+ /*
+ * On true, may have modified its fields (except fRowBytes) to make it a legal subset
+ * of the specified dst width/height.
+ *
+ * On false, leaves self unchanged, but indicates that it does not overlap dst, or
+ * is not valid (e.g. bad fInfo) for writePixels().
+ */
+ bool trim(int dstWidth, int dstHeight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkWriter32.cpp b/gfx/skia/skia/src/core/SkWriter32.cpp
new file mode 100644
index 0000000000..cab2812962
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriter32.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkReader32.h"
+
+#include "src/core/SkWriter32.h"
+
+void SkWriter32::writeMatrix(const SkMatrix& matrix) {
+ size_t size = SkMatrixPriv::WriteToMemory(matrix, nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ SkMatrixPriv::WriteToMemory(matrix, this->reserve(size));
+}
+
+/*
+ * Strings are stored as: length[4-bytes] + string_data + '\0' + pad_to_mul_4
+ */
+
+const char* SkReader32::readString(size_t* outLen) {
+ size_t len = this->readU32();
+ const void* ptr = this->peek();
+
+ // skip over the string + '\0' and then pad to a multiple of 4
+ size_t alignedSize = SkAlign4(len + 1);
+ this->skip(alignedSize);
+
+ if (outLen) {
+ *outLen = len;
+ }
+ return (const char*)ptr;
+}
+
+size_t SkReader32::readIntoString(SkString* copy) {
+ size_t len;
+ const char* ptr = this->readString(&len);
+ if (copy) {
+ copy->set(ptr, len);
+ }
+ return len;
+}
+
+void SkWriter32::writeString(const char str[], size_t len) {
+ if (nullptr == str) {
+ str = "";
+ len = 0;
+ }
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+
+ // [ 4 byte len ] [ str ... ] [1 - 4 \0s]
+ uint32_t* ptr = this->reservePad(sizeof(uint32_t) + len + 1);
+ *ptr = SkToU32(len);
+ char* chars = (char*)(ptr + 1);
+ memcpy(chars, str, len);
+ chars[len] = '\0';
+}
+
+size_t SkWriter32::WriteStringSize(const char* str, size_t len) {
+ if ((long)len < 0) {
+ SkASSERT(str);
+ len = strlen(str);
+ }
+ const size_t lenBytes = 4; // we use 4 bytes to record the length
+ // add 1 since we also write a terminating 0
+ return SkAlign4(lenBytes + len + 1);
+}
+
+void SkWriter32::growToAtLeast(size_t size) {
+ const bool wasExternal = (fExternal != nullptr) && (fData == fExternal);
+
+ fCapacity = 4096 + SkTMax(size, fCapacity + (fCapacity / 2));
+ fInternal.realloc(fCapacity);
+ fData = fInternal.get();
+
+ if (wasExternal) {
+ // we were external, so copy in the data
+ memcpy(fData, fExternal, fUsed);
+ }
+}
+
+sk_sp<SkData> SkWriter32::snapshotAsData() const {
+ return SkData::MakeWithCopy(fData, fUsed);
+}
diff --git a/gfx/skia/skia/src/core/SkWriter32.h b/gfx/skia/skia/src/core/SkWriter32.h
new file mode 100644
index 0000000000..3f7d305357
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriter32.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWriter32_DEFINED
+#define SkWriter32_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+
+class SK_API SkWriter32 : SkNoncopyable {
+public:
+ /**
+ * The caller can specify an initial block of storage, which the caller manages.
+ *
+ * SkWriter32 will try to back reserve and write calls with this external storage until the
+ * first time an allocation doesn't fit. From then it will use dynamically allocated storage.
+ * This used to be optional behavior, but pipe now relies on it.
+ */
+ SkWriter32(void* external = nullptr, size_t externalBytes = 0) {
+ this->reset(external, externalBytes);
+ }
+
+ // return the current offset (will always be a multiple of 4)
+ size_t bytesWritten() const { return fUsed; }
+
+ // Returns true iff all of the bytes written so far are stored in the initial storage
+ // buffer provided in the constructor or the most recent call to reset.
+ bool usingInitialStorage() const { return fData == fExternal; }
+
+ void reset(void* external = nullptr, size_t externalBytes = 0) {
+ // we cast this pointer to int* and float* at times, so assert that it is aligned.
+ SkASSERT(SkIsAlign4((uintptr_t)external));
+ // we always write multiples of 4-bytes, so truncate down the size to match that
+ externalBytes &= ~3;
+
+ fData = (uint8_t*)external;
+ fCapacity = externalBytes;
+ fUsed = 0;
+ fExternal = external;
+ }
+
+ // size MUST be multiple of 4
+ uint32_t* reserve(size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ size_t offset = fUsed;
+ size_t totalRequired = fUsed + size;
+ if (totalRequired > fCapacity) {
+ this->growToAtLeast(totalRequired);
+ }
+ fUsed = totalRequired;
+ return (uint32_t*)(fData + offset);
+ }
+
+ /**
+ * Read a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ const T& readTAt(size_t offset) const {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ return *(T*)(fData + offset);
+ }
+
+ /**
+ * Overwrite a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ void overwriteTAt(size_t offset, const T& value) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ *(T*)(fData + offset) = value;
+ }
+
+ bool writeBool(bool value) {
+ this->write32(value);
+ return value;
+ }
+
+ void writeInt(int32_t value) {
+ this->write32(value);
+ }
+
+ void write8(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFF;
+ }
+
+ void write16(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFFFF;
+ }
+
+ void write32(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writePtr(void* value) {
+ // this->reserve() only returns 4-byte aligned pointers,
+ // so this may be an under-aligned write if we were to do this like the others.
+ memcpy(this->reserve(sizeof(value)), &value, sizeof(value));
+ }
+
+ void writeScalar(SkScalar value) {
+ *(SkScalar*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writePoint(const SkPoint& pt) {
+ *(SkPoint*)this->reserve(sizeof(pt)) = pt;
+ }
+
+ void writePoint3(const SkPoint3& pt) {
+ *(SkPoint3*)this->reserve(sizeof(pt)) = pt;
+ }
+
+ void writeRect(const SkRect& rect) {
+ *(SkRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeIRect(const SkIRect& rect) {
+ *(SkIRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeRRect(const SkRRect& rrect) {
+ rrect.writeToMemory(this->reserve(SkRRect::kSizeInMemory));
+ }
+
+ void writePath(const SkPath& path) {
+ size_t size = path.writeToMemory(nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ path.writeToMemory(this->reserve(size));
+ }
+
+ void writeMatrix(const SkMatrix& matrix);
+
+ void writeRegion(const SkRegion& rgn) {
+ size_t size = rgn.writeToMemory(nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ rgn.writeToMemory(this->reserve(size));
+ }
+
+ // write count bytes (must be a multiple of 4)
+ void writeMul4(const void* values, size_t size) {
+ this->write(values, size);
+ }
+
+ /**
+ * Write size bytes from values. size must be a multiple of 4, though
+ * values need not be 4-byte aligned.
+ */
+ void write(const void* values, size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ sk_careful_memcpy(this->reserve(size), values, size);
+ }
+
+ /**
+ * Reserve size bytes. Does not need to be 4 byte aligned. The remaining space (if any) will be
+ * filled in with zeroes.
+ */
+ uint32_t* reservePad(size_t size) {
+ size_t alignedSize = SkAlign4(size);
+ uint32_t* p = this->reserve(alignedSize);
+ if (alignedSize != size) {
+ SkASSERT(alignedSize >= 4);
+ p[alignedSize / 4 - 1] = 0;
+ }
+ return p;
+ }
+
+ /**
+ * Write size bytes from src, and pad to 4 byte alignment with zeroes.
+ */
+ void writePad(const void* src, size_t size) {
+ sk_careful_memcpy(this->reservePad(size), src, size);
+ }
+
+ /**
+ * Writes a string to the writer, which can be retrieved with
+ * SkReader32::readString().
+ * The length can be specified, or if -1 is passed, it will be computed by
+ * calling strlen(). The length must be < max size_t.
+ *
+ * If you write NULL, it will be read as "".
+ */
+ void writeString(const char* str, size_t len = (size_t)-1);
+
+ /**
+ * Computes the size (aligned to multiple of 4) need to write the string
+ * in a call to writeString(). If the length is not specified, it will be
+ * computed by calling strlen().
+ */
+ static size_t WriteStringSize(const char* str, size_t len = (size_t)-1);
+
+ void writeData(const SkData* data) {
+ uint32_t len = data ? SkToU32(data->size()) : 0;
+ this->write32(len);
+ if (data) {
+ this->writePad(data->data(), len);
+ }
+ }
+
+ static size_t WriteDataSize(const SkData* data) {
+ return 4 + SkAlign4(data ? data->size() : 0);
+ }
+
+ /**
+ * Move the cursor back to offset bytes from the beginning.
+ * offset must be a multiple of 4 no greater than size().
+ */
+ void rewindToOffset(size_t offset) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset <= bytesWritten());
+ fUsed = offset;
+ }
+
+ // copy into a single buffer (allocated by caller). Must be at least size()
+ void flatten(void* dst) const {
+ memcpy(dst, fData, fUsed);
+ }
+
+ bool writeToStream(SkWStream* stream) const {
+ return stream->write(fData, fUsed);
+ }
+
+ // read from the stream, and write up to length bytes. Return the actual
+ // number of bytes written.
+ size_t readFromStream(SkStream* stream, size_t length) {
+ return stream->read(this->reservePad(length), length);
+ }
+
+ /**
+ * Captures a snapshot of the data as it is right now, and return it.
+ */
+ sk_sp<SkData> snapshotAsData() const;
+private:
+ void growToAtLeast(size_t size);
+
+ uint8_t* fData; // Points to either fInternal or fExternal.
+ size_t fCapacity; // Number of bytes we can write to fData.
+ size_t fUsed; // Number of bytes written.
+ void* fExternal; // Unmanaged memory block.
+ SkAutoTMalloc<uint8_t> fInternal; // Managed memory block.
+};
+
+/**
+ * Helper class to allocated SIZE bytes as part of the writer, and to provide
+ * that storage to the constructor as its initial storage buffer.
+ *
+ * This wrapper ensures proper alignment rules are met for the storage.
+ */
+template <size_t SIZE> class SkSWriter32 : public SkWriter32 {
+public:
+ SkSWriter32() { this->reset(); }
+
+ void reset() {this->INHERITED::reset(fData.fStorage, SIZE); }
+
+private:
+ union {
+ void* fPtrAlignment;
+ double fDoubleAlignment;
+ char fStorage[SIZE];
+ } fData;
+
+ typedef SkWriter32 INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkXfermode.cpp b/gfx/skia/skia/src/core/SkXfermode.cpp
new file mode 100644
index 0000000000..71a13dcc3a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermode.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkOnce.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodePriv.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/effects/GrCustomXfermode.h"
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkProcCoeffXfermode : public SkXfermode {
+public:
+ SkProcCoeffXfermode(SkBlendMode mode) : fMode(mode) {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const override {
+ SkASSERT(dst && src && count >= 0);
+
+ SkRasterPipeline_<256> p;
+
+ SkRasterPipeline_MemoryCtx dst_ctx = { (void*)dst, 0 },
+ src_ctx = { (void*)src, 0 },
+ aa_ctx = { (void*)aa, 0 };
+
+ p.append_load (kN32_SkColorType, &src_ctx);
+ p.append_load_dst(kN32_SkColorType, &dst_ctx);
+
+ if (SkBlendMode_ShouldPreScaleCoverage(fMode, /*rgb_coverage=*/false)) {
+ if (aa) {
+ p.append(SkRasterPipeline::scale_u8, &aa_ctx);
+ }
+ SkBlendMode_AppendStages(fMode, &p);
+ } else {
+ SkBlendMode_AppendStages(fMode, &p);
+ if (aa) {
+ p.append(SkRasterPipeline::lerp_u8, &aa_ctx);
+ }
+ }
+
+ p.append_store(kN32_SkColorType, &dst_ctx);
+ p.run(0, 0, count,1);
+ }
+
+private:
+ const SkBlendMode fMode;
+
+ typedef SkXfermode INHERITED;
+};
+
+const char* SkBlendMode_Name(SkBlendMode mode) {
+ SkASSERT((unsigned) mode <= (unsigned)SkBlendMode::kLastMode);
+ const char* gModeStrings[] = {
+ "Clear", "Src", "Dst", "SrcOver", "DstOver", "SrcIn", "DstIn",
+ "SrcOut", "DstOut", "SrcATop", "DstATop", "Xor", "Plus",
+ "Modulate", "Screen", "Overlay", "Darken", "Lighten", "ColorDodge",
+ "ColorBurn", "HardLight", "SoftLight", "Difference", "Exclusion",
+ "Multiply", "Hue", "Saturation", "Color", "Luminosity"
+ };
+ return gModeStrings[(int)mode];
+ static_assert(SK_ARRAY_COUNT(gModeStrings) == (size_t)SkBlendMode::kLastMode + 1, "mode_count");
+}
+
+sk_sp<SkXfermode> SkXfermode::Make(SkBlendMode mode) {
+ if ((unsigned)mode > (unsigned)SkBlendMode::kLastMode) {
+ // report error
+ return nullptr;
+ }
+
+ // Skia's "default" mode is srcover. nullptr in SkPaint is interpreted as srcover
+ // so we can just return nullptr from the factory.
+ if (SkBlendMode::kSrcOver == mode) {
+ return nullptr;
+ }
+
+ const int COUNT_BLENDMODES = (int)SkBlendMode::kLastMode + 1;
+
+ static SkOnce once[COUNT_BLENDMODES];
+ static SkXfermode* cached[COUNT_BLENDMODES];
+
+ once[(int)mode]([mode] {
+ if (auto xfermode = SkOpts::create_xfermode(mode)) {
+ cached[(int)mode] = xfermode;
+ } else {
+ cached[(int)mode] = new SkProcCoeffXfermode(mode);
+ }
+ });
+ return sk_ref_sp(cached[(int)mode]);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkXfermode::IsOpaque(SkBlendMode mode, SrcColorOpacity opacityType) {
+ SkBlendModeCoeff src, dst;
+ if (!SkBlendMode_AsCoeff(mode, &src, &dst)) {
+ return false;
+ }
+
+ switch (src) {
+ case SkBlendModeCoeff::kDA:
+ case SkBlendModeCoeff::kDC:
+ case SkBlendModeCoeff::kIDA:
+ case SkBlendModeCoeff::kIDC:
+ return false;
+ default:
+ break;
+ }
+
+ switch (dst) {
+ case SkBlendModeCoeff::kZero:
+ return true;
+ case SkBlendModeCoeff::kISA:
+ return kOpaque_SrcColorOpacity == opacityType;
+ case SkBlendModeCoeff::kSA:
+ return kTransparentBlack_SrcColorOpacity == opacityType ||
+ kTransparentAlpha_SrcColorOpacity == opacityType;
+ case SkBlendModeCoeff::kSC:
+ return kTransparentBlack_SrcColorOpacity == opacityType;
+ default:
+ return false;
+ }
+ return false;
+}
+
+#if SK_SUPPORT_GPU
+const GrXPFactory* SkBlendMode_AsXPFactory(SkBlendMode mode) {
+ if (SkBlendMode_AsCoeff(mode, nullptr, nullptr)) {
+ const GrXPFactory* result = GrPorterDuffXPFactory::Get(mode);
+ SkASSERT(result);
+ return result;
+ }
+
+ SkASSERT(GrCustomXfermode::IsSupportedMode(mode));
+ return GrCustomXfermode::Get(mode);
+}
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
new file mode 100644
index 0000000000..95675a5d5f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkXfermodeInterpretation.h"
+
+static bool just_solid_color(const SkPaint& p) {
+ return SK_AlphaOPAQUE == p.getAlpha() && !p.getColorFilter() && !p.getShader();
+}
+
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint& paint, bool dstIsOpaque) {
+ switch (paint.getBlendMode()) {
+ case SkBlendMode::kSrcOver:
+ return kSrcOver_SkXfermodeInterpretation;
+ case SkBlendMode::kSrc:
+ if (just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDst:
+ return kSkipDrawing_SkXfermodeInterpretation;
+ case SkBlendMode::kDstOver:
+ if (dstIsOpaque) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kSrcIn:
+ if (dstIsOpaque && just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDstIn:
+ if (just_solid_color(paint)) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ default:
+ return kNormal_SkXfermodeInterpretation;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.h b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
new file mode 100644
index 0000000000..d0a420f383
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodeInterpretation_DEFINED
+#define SkXfermodeInterpretation_DEFINED
+
+class SkPaint;
+
+/**
+ * By analyzing the paint, we may decide we can take special
+ * action. This enum lists our possible actions.
+ */
+enum SkXfermodeInterpretation {
+ kNormal_SkXfermodeInterpretation, //< draw normally
+ kSrcOver_SkXfermodeInterpretation, //< draw as if in srcover mode
+ kSkipDrawing_SkXfermodeInterpretation //< draw nothing
+};
+
+/**
+ * Given a paint, determine whether the paint's transfer mode can be
+ * replaced with kSrcOver_Mode or not drawn at all. This is used by
+ * SkBlitter and SkPDFDevice.
+ */
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint&, bool dstIsOpaque);
+
+#endif // SkXfermodeInterpretation_DEFINED
diff --git a/gfx/skia/skia/src/core/SkXfermodePriv.h b/gfx/skia/skia/src/core/SkXfermodePriv.h
new file mode 100644
index 0000000000..0b7a920d8e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodePriv.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodePriv_DEFINED
+#define SkXfermodePriv_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+
+class GrFragmentProcessor;
+class GrTexture;
+class GrXPFactory;
+class SkRasterPipeline;
+class SkString;
+
+class SkXfermode : public SkRefCnt {
+public:
+ virtual void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const = 0;
+
+ /** Return an SkXfermode object for the specified mode.
+ */
+ static sk_sp<SkXfermode> Make(SkBlendMode);
+
+ /**
+ * Skia maintains global xfermode objects corresponding to each BlendMode. This returns a
+ * ptr to that global xfermode (or null if the mode is srcover). Thus the caller may use
+ * the returned ptr, but it should leave its refcnt untouched.
+ */
+ static SkXfermode* Peek(SkBlendMode mode) {
+ sk_sp<SkXfermode> xfer = Make(mode);
+ if (!xfer) {
+ SkASSERT(SkBlendMode::kSrcOver == mode);
+ return nullptr;
+ }
+ SkASSERT(!xfer->unique());
+ return xfer.get();
+ }
+
+ enum SrcColorOpacity {
+ // The src color is known to be opaque (alpha == 255)
+ kOpaque_SrcColorOpacity = 0,
+ // The src color is known to be fully transparent (color == 0)
+ kTransparentBlack_SrcColorOpacity = 1,
+ // The src alpha is known to be fully transparent (alpha == 0)
+ kTransparentAlpha_SrcColorOpacity = 2,
+ // The src color opacity is unknown
+ kUnknown_SrcColorOpacity = 3
+ };
+
+ static bool IsOpaque(SkBlendMode, SrcColorOpacity);
+
+protected:
+ SkXfermode() {}
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkYUVASizeInfo.cpp b/gfx/skia/skia/src/core/SkYUVASizeInfo.cpp
new file mode 100644
index 0000000000..9df8e9cbe1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVASizeInfo.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkYUVASizeInfo.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkSafeMath.h"
+
+size_t SkYUVASizeInfo::computeTotalBytes() const {
+ SkSafeMath safe;
+ size_t totalBytes = 0;
+
+ for (int i = 0; i < kMaxCount; ++i) {
+ SkASSERT((!fSizes[i].isEmpty() && fWidthBytes[i]) ||
+ (fSizes[i].isEmpty() && !fWidthBytes[i]));
+ totalBytes = safe.add(totalBytes, safe.mul(fWidthBytes[i], fSizes[i].height()));
+ }
+
+ return safe.ok() ? totalBytes : SIZE_MAX;
+}
+
+void SkYUVASizeInfo::computePlanes(void* base, void* planes[SkYUVASizeInfo::kMaxCount]) const {
+ planes[0] = base;
+ int i = 1;
+ for (; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ if (fSizes[i].isEmpty()) {
+ break;
+ }
+ planes[i] = SkTAddOffset<void>(planes[i - 1], fWidthBytes[i - 1] * fSizes[i - 1].height());
+ }
+ for (; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ planes[i] = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkYUVMath.cpp b/gfx/skia/skia/src/core/SkYUVMath.cpp
new file mode 100644
index 0000000000..091e6948de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVMath.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix44.h"
+#include "src/core/SkYUVMath.h"
+
+// in SkColorMatrix order (row-major)
+// Created by running SkColorMatrix_DumpYUVMatrixTables()
+
+const float Rec709_rgb_to_yuv[] = {
+ 0.182586f, 0.614231f, 0.062007f, 0.000000f, 0.062745f,
+ -0.100644f, -0.338572f, 0.439216f, 0.000000f, 0.501961f,
+ 0.439216f, -0.398942f, -0.040274f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec709_yuv_to_rgb[] = {
+ 1.164384f, 0.000000f, 1.792741f, 0.000000f, -0.972945f,
+ 1.164384f, -0.213249f, -0.532909f, 0.000000f, 0.301483f,
+ 1.164384f, 2.112402f, 0.000000f, 0.000000f, -1.133402f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec601_rgb_to_yuv[] = {
+ 0.256788f, 0.504129f, 0.097906f, 0.000000f, 0.062745f,
+ -0.148223f, -0.290993f, 0.439216f, 0.000000f, 0.501961f,
+ 0.439216f, -0.367788f, -0.071427f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec601_yuv_to_rgb[] = {
+ 1.164384f, 0.000000f, 1.596027f, 0.000000f, -0.874202f,
+ 1.164384f, -0.391762f, -0.812968f, 0.000000f, 0.531668f,
+ 1.164384f, 2.017232f, 0.000000f, 0.000000f, -1.085631f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float JPEG_rgb_to_yuv[] = {
+ 0.299000f, 0.587000f, 0.114000f, 0.000000f, 0.000000f,
+ -0.168736f, -0.331264f, 0.500000f, 0.000000f, 0.501961f,
+ 0.500000f, -0.418688f, -0.081312f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float JPEG_yuv_to_rgb[] = {
+ 1.000000f, 0.000000f, 1.402000f, 0.000000f, -0.703749f,
+ 1.000000f, -0.344136f, -0.714136f, 0.000000f, 0.531211f,
+ 1.000000f, 1.772000f, 0.000000f, 0.000000f, -0.889475f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+
+static_assert(kJPEG_SkYUVColorSpace == 0, "");
+static_assert(kRec601_SkYUVColorSpace == 1, "");
+static_assert(kRec709_SkYUVColorSpace == 2, "");
+
+const float* yuv_to_rgb_array[] = {
+ JPEG_yuv_to_rgb,
+ Rec601_yuv_to_rgb,
+ Rec709_yuv_to_rgb,
+};
+
+const float* rgb_to_yuv_array[] = {
+ JPEG_rgb_to_yuv,
+ Rec601_rgb_to_yuv,
+ Rec709_rgb_to_yuv,
+};
+
+constexpr size_t kSizeOfColorMatrix = 20 * sizeof(float);
+
+void SkColorMatrix_RGB2YUV(SkYUVColorSpace cs, float m[20]) {
+ if ((unsigned)cs < (unsigned)kIdentity_SkYUVColorSpace) {
+ memcpy(m, rgb_to_yuv_array[(unsigned)cs], kSizeOfColorMatrix);
+ } else {
+ memset(m, 0, kSizeOfColorMatrix);
+ m[0] = m[6] = m[12] = m[18] = 1;
+ }
+}
+
+void SkColorMatrix_YUV2RGB(SkYUVColorSpace cs, float m[20]) {
+ if ((unsigned)cs < (unsigned)kIdentity_SkYUVColorSpace) {
+ memcpy(m, yuv_to_rgb_array[(unsigned)cs], kSizeOfColorMatrix);
+ } else {
+ memset(m, 0, kSizeOfColorMatrix);
+ m[0] = m[6] = m[12] = m[18] = 1;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// we just drop the alpha rol/col from the colormatrix
+// output is | tr |
+// | 3x3 tg |
+// | tb |
+// | 0 0 0 1 |
+static void colormatrix_to_matrix44(const float src[20], SkMatrix44* dst) {
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 3; ++c) {
+ dst->set(r, c, src[r*5 + c]);
+ }
+ dst->set(r, 3, src[r*5 + 4]);
+ }
+ dst->set(3, 0, 0);
+ dst->set(3, 1, 0);
+ dst->set(3, 2, 0);
+ dst->set(3, 3, 1);
+}
+
+// input: ignore the bottom row
+// output: inject identity row/column for alpha
+static void matrix44_to_colormatrix(const SkMatrix44& src, float dst[20]) {
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 3; ++c) {
+ dst[r*5 + c] = src.get(r, c);
+ }
+ dst[r*5 + 3] = 0; // scale alpha
+ dst[r*5 + 4] = src.get(r, 3); // translate
+ }
+ dst[15] = dst[16] = dst[17] = dst[19] = 0;
+ dst[18] = 1;
+}
+
+static void scale3(float m[], float s) {
+ for (int i = 0; i < 3; ++i) {
+ m[i] *= s;
+ }
+}
+
+namespace {
+struct YUVCoeff {
+ float Kr, Kb;
+ float Cr, Cb;
+ float scaleY, addY;
+ float scaleUV;
+};
+} // namespace
+
+const YUVCoeff gCoeff[] = {
+ // kJPEG_SkYUVColorSpace
+ { 0.299f, 0.114f, 1/1.772f, 1/1.402f, 1, 0, 1, },
+
+ // kRec601_SkYUVColorSpace
+ { 0.299f, 0.114f, 1/1.772f, 1/1.402f, 219/255.f, 16/255.f, 224/255.f, },
+
+ // kRec709_SkYUVColorSpace
+ { 0.2126f, 0.0722f, 1/1.8556f, 1/1.5748f, 219/255.f, 16/255.f, 224/255.f, },
+};
+
+static void make_rgb_to_yuv_matrix(float mx[20], const YUVCoeff& c) {
+ const float Kr = c.Kr;
+ const float Kb = c.Kb;
+ const float Kg = 1.0f - Kr - Kb;
+
+ float m[20] = {
+ Kr, Kg, Kb, 0, c.addY,
+ -Kr, -Kg, 1-Kb, 0, 128/255.f,
+ 1-Kr, -Kg, -Kb, 0, 128/255.f,
+ 0, 0, 0, 1, 0,
+ };
+ memcpy(mx, m, sizeof(m));
+ scale3(mx + 0, c.scaleY);
+ scale3(mx + 5, c.Cr * c.scaleUV);
+ scale3(mx + 10, c.Cb * c.scaleUV);
+}
+
+static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv) {
+ const char* names[] = {
+ "JPEG", "Rec601", "Rec709",
+ };
+ const char* dirnames[] = {
+ "yuv_to_rgb", "rgb_to_yuv",
+ };
+ SkDebugf("const float %s_%s[] = {\n", names[cs], dirnames[rgb2yuv]);
+ for (int i = 0; i < 4; ++i) {
+ SkDebugf(" ");
+ for (int j = 0; j < 5; ++j) {
+ SkDebugf(" %9.6ff,", m[i * 5 + j]);
+ }
+ SkDebugf("\n");
+ }
+ SkDebugf("};\n");
+}
+
+// Used to create the prebuilt tables for each colorspace.
+// Don't remove this function, in case we want to recompute those tables in the future.
+void SkColorMatrix_DumpYUVMatrixTables() {
+ for (auto cs : {kRec709_SkYUVColorSpace, kRec601_SkYUVColorSpace, kJPEG_SkYUVColorSpace}) {
+ float m[20];
+ make_rgb_to_yuv_matrix(m, gCoeff[(unsigned)cs]);
+ dump(m, cs, true);
+ SkMatrix44 m44, im44;
+ colormatrix_to_matrix44(m, &m44);
+ float im[20];
+#ifdef SK_DEBUG
+ // be sure our coversion between matrix44 and colormatrix is perfect
+ matrix44_to_colormatrix(m44, im);
+ SkASSERT(memcmp(m, im, sizeof(im)) == 0);
+#endif
+ SkAssertResult(m44.invert(&im44));
+ matrix44_to_colormatrix(im44, im);
+ dump(im, cs, false);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkYUVMath.h b/gfx/skia/skia/src/core/SkYUVMath.h
new file mode 100644
index 0000000000..9ecd2c8366
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVMath.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVMath_DEFINED
+#define SkYUVMath_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+void SkColorMatrix_RGB2YUV(SkYUVColorSpace, float m[20]);
+void SkColorMatrix_YUV2RGB(SkYUVColorSpace, float m[20]);
+
+// Used to create the pre-compiled tables in SkYUVMath.cpp
+void SkColorMatrix_DumpYUVMatrixTables();
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
new file mode 100644
index 0000000000..756937f607
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkYUVPlanesCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+namespace {
+static unsigned gYUVPlanesKeyNamespaceLabel;
+
+struct YUVValue {
+ SkYUVPlanesCache::Info fInfo;
+ SkCachedData* fData;
+};
+
+struct YUVPlanesKey : public SkResourceCache::Key {
+ YUVPlanesKey(uint32_t genID)
+ : fGenID(genID)
+ {
+ this->init(&gYUVPlanesKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(genID),
+ sizeof(genID));
+ }
+
+ uint32_t fGenID;
+};
+
+struct YUVPlanesRec : public SkResourceCache::Rec {
+ YUVPlanesRec(YUVPlanesKey key, SkCachedData* data, SkYUVPlanesCache::Info* info)
+ : fKey(key)
+ {
+ fValue.fData = data;
+ fValue.fInfo = *info;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~YUVPlanesRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ YUVPlanesKey fKey;
+ YUVValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "yuv-planes"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const YUVPlanesRec& rec = static_cast<const YUVPlanesRec&>(baseRec);
+ YUVValue* result = static_cast<YUVValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ result->fData = tmpData;
+ result->fInfo = rec.fValue.fInfo;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkYUVPlanesCache::FindAndRef(uint32_t genID, Info* info,
+ SkResourceCache* localCache) {
+ YUVValue result;
+ YUVPlanesKey key(genID);
+ if (!CHECK_LOCAL(localCache, find, Find, key, YUVPlanesRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *info = result.fInfo;
+ return result.fData;
+}
+
+void SkYUVPlanesCache::Add(uint32_t genID, SkCachedData* data, Info* info,
+ SkResourceCache* localCache) {
+ YUVPlanesKey key(genID);
+ return CHECK_LOCAL(localCache, add, Add, new YUVPlanesRec(key, data, info));
+}
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.h b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
new file mode 100644
index 0000000000..18792305d6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVPlanesCache_DEFINED
+#define SkYUVPlanesCache_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkYUVAIndex.h"
+#include "include/core/SkYUVASizeInfo.h"
+#include "src/core/SkCachedData.h"
+
+class SkResourceCache;
+
+class SkYUVPlanesCache {
+public:
+ /**
+ * The Info struct contains data about the 4 Y, U, V, and A planes of memory stored
+ * contiguously, in that order, as a single block of memory within SkYUVPlanesCache.
+ *
+ * fSizeInfo: fWidth, fHeight, and fWidthBytes of each of the Y, U, V, and A planes.
+ * fColorSpace: color space that will be used for the YUV -> RGB conversion.
+ */
+ struct Info {
+ SkYUVASizeInfo fSizeInfo;
+ SkYUVAIndex fYUVAIndices[SkYUVAIndex::kIndexCount];
+ SkYUVColorSpace fColorSpace;
+ };
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixels.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(uint32_t genID, Info* info,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a pixelRef ID and its YUV planes data to the cache.
+ */
+ static void Add(uint32_t genID, SkCachedData* data, Info* info,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkZip.h b/gfx/skia/skia/src/core/SkZip.h
new file mode 100644
index 0000000000..1b38b41f33
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkZip.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkZip_DEFINED
+#define SkZip_DEFINED
+
+#include <cstddef>
+#include <iterator>
+#include <tuple>
+#include <type_traits>
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkSpan.h"
+
+// Take a list of things that can be pointers, and use them all in parallel. The iterators and
+// accessor operator[] for the class produce a tuple of the items.
+template<typename... Ts>
+class SkZip {
+ using ReturnTuple = std::tuple<Ts&...>;
+
+ class Iterator {
+ public:
+ using value_type = ReturnTuple;
+ using difference_type = ptrdiff_t;
+ using pointer = value_type*;
+ using reference = value_type;
+ using iterator_category = std::input_iterator_tag;
+ constexpr Iterator(const SkZip* zip, size_t index) : fZip{zip}, fIndex{index} { }
+ constexpr Iterator(const Iterator& that) : Iterator{ that.fZip, that.fIndex } { }
+ constexpr Iterator& operator++() { ++fIndex; return *this; }
+ constexpr Iterator operator++(int) { Iterator tmp(*this); operator++(); return tmp; }
+ constexpr bool operator==(const Iterator& rhs) const { return fIndex == rhs.fIndex; }
+ constexpr bool operator!=(const Iterator& rhs) const { return fIndex != rhs.fIndex; }
+ constexpr reference operator*() { return (*fZip)[fIndex]; }
+ friend constexpr difference_type operator-(Iterator lhs, Iterator rhs) {
+ return lhs.fIndex - rhs.fIndex;
+ }
+
+ private:
+ const SkZip* const fZip = nullptr;
+ size_t fIndex = 0;
+ };
+
+ template<typename T>
+ using make_nullptr = std::integral_constant<std::nullptr_t, nullptr>;
+
+public:
+ constexpr SkZip() : fPointers{make_nullptr<Ts*>::value...}, fSize{0} {}
+ constexpr SkZip(size_t) = delete;
+ constexpr SkZip(size_t size, Ts*... ts)
+ : fPointers{ts...}
+ , fSize{size} {}
+ constexpr SkZip(const SkZip& that) = default;
+
+ // Check to see if U can be used for const T or is the same as T
+ template <typename U, typename T>
+ using CanConvertToConst = typename std::integral_constant<bool,
+ std::is_convertible<U*, T*>::value && sizeof(U) == sizeof(T)>::type;
+
+ // Allow SkZip<const T> to be constructed from SkZip<T>.
+ template<typename... Us,
+ typename = std::enable_if<skstd::conjunction<CanConvertToConst<Us, Ts>...>::value>>
+ constexpr SkZip(const SkZip<Us...>& that)
+ : fPointers(that.data())
+ , fSize{that.size()} { }
+
+ constexpr ReturnTuple operator[](size_t i) const { return this->index(i);}
+ constexpr size_t size() const { return fSize; }
+ constexpr bool empty() const { return this->size() == 0; }
+ constexpr ReturnTuple front() const { return this->index(0); }
+ constexpr ReturnTuple back() const { return this->index(this->size() - 1); }
+ constexpr Iterator begin() const { return Iterator{this, 0}; }
+ constexpr Iterator end() const { return Iterator{this, this->size()}; }
+ template<size_t I> constexpr auto get() const {
+ return SkMakeSpan(std::get<I>(fPointers), fSize);
+ }
+ constexpr std::tuple<Ts*...> data() const { return fPointers; }
+ constexpr SkZip first(size_t n) const {
+ SkASSERT(n <= this->size());
+ return SkZip{n, fPointers};
+ }
+
+private:
+ constexpr SkZip(size_t n, const std::tuple<Ts*...>& pointers)
+ : fPointers{pointers}
+ , fSize{n} {}
+
+ constexpr ReturnTuple index(size_t i) const {
+ SkASSERT(this->size() > 0);
+ SkASSERT(i < this->size());
+ return indexDetail(i, skstd::make_index_sequence<sizeof...(Ts)>{});
+ }
+
+ template<std::size_t... Is>
+ constexpr ReturnTuple indexDetail(size_t i, skstd::index_sequence<Is...>) const {
+ return ReturnTuple((std::get<Is>(fPointers))[i]...);
+ }
+
+ std::tuple<Ts*...> fPointers;
+ size_t fSize;
+};
+
+class SkMakeZipDetail {
+ template<typename T> struct DecayPointer{
+ using U = typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+ using type = typename std::conditional<std::is_pointer<U>::value, U, T>::type;
+ };
+ template<typename T> using DecayPointerT = typename DecayPointer<T>::type;
+
+ template<typename C> struct ContiguousMemory { };
+ template<typename T> struct ContiguousMemory<T*> {
+ using value_type = T;
+ static constexpr value_type* Data(T* t) { return t; }
+ static constexpr size_t Size(T* s) { return SIZE_MAX; }
+ };
+ template<typename T, size_t N> struct ContiguousMemory<T(&)[N]> {
+ using value_type = T;
+ static constexpr value_type* Data(T(&t)[N]) { return t; }
+ static constexpr size_t Size(T(&)[N]) { return N; }
+ };
+ // In general, we don't want r-value collections, but SkSpans are ok, because they are a view
+ // onto an actual container.
+ template<typename T> struct ContiguousMemory<SkSpan<T>> {
+ using value_type = T;
+ static constexpr value_type* Data(SkSpan<T> s) { return s.data(); }
+ static constexpr size_t Size(SkSpan<T> s) { return s.size(); }
+ };
+ // Only accept l-value references to collections.
+ template<typename C> struct ContiguousMemory<C&> {
+ using value_type = typename std::remove_pointer<decltype(std::declval<C>().data())>::type;
+ static constexpr value_type* Data(C& c) { return c.data(); }
+ static constexpr size_t Size(C& c) { return c.size(); }
+ };
+ template<typename C> using Span = ContiguousMemory<DecayPointerT<C>>;
+ template<typename C> using ValueType = typename Span<C>::value_type;
+
+ template<typename C, typename... Ts> struct PickOneSize { };
+ template <typename T, typename... Ts> struct PickOneSize<T*, Ts...> {
+ static constexpr size_t Size(T* t, Ts... ts) {
+ return PickOneSize<Ts...>::Size(std::forward<Ts>(ts)...);
+ }
+ };
+ template <typename T, typename... Ts, size_t N> struct PickOneSize<T(&)[N], Ts...> {
+ static constexpr size_t Size(T(&)[N], Ts...) { return N; }
+ };
+ template<typename T, typename... Ts> struct PickOneSize<SkSpan<T>, Ts...> {
+ static constexpr size_t Size(SkSpan<T> s, Ts...) { return s.size(); }
+ };
+ template<typename C, typename... Ts> struct PickOneSize<C&, Ts...> {
+ static constexpr size_t Size(C& c, Ts...) { return c.size(); }
+ };
+
+public:
+ template<typename... Ts>
+ static constexpr auto MakeZip(Ts&& ... ts) {
+
+ // Pick the first collection that has a size, and use that for the size.
+ size_t size = PickOneSize<DecayPointerT<Ts>...>::Size(std::forward<Ts>(ts)...);
+
+#ifdef SK_DEBUG
+ // Check that all sizes are the same.
+ size_t minSize = SIZE_MAX;
+ size_t maxSize = 0;
+ for (size_t s : {Span<Ts>::Size(std::forward<Ts>(ts))...}) {
+ if (s != SIZE_MAX) {
+ minSize = SkTMin(minSize, s);
+ maxSize = SkTMax(maxSize, s);
+ }
+ }
+ SkASSERT(minSize == maxSize);
+#endif
+
+ return SkZip<ValueType<Ts>...>{size, Span<Ts>::Data(std::forward<Ts>(ts))...};
+ }
+};
+
+template<typename... Ts>
+inline constexpr auto SkMakeZip(Ts&& ... ts) {
+ return SkMakeZipDetail::MakeZip(std::forward<Ts>(ts)...);
+}
+#endif //SkZip_DEFINED
diff --git a/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
new file mode 100644
index 0000000000..360179d663
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/effects/Sk1DPathEffect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+// Since we are stepping by a float, the do/while loop might go on forever (or nearly so).
+// Put in a governor to limit crash values from looping too long (and allocating too much ram).
+#define MAX_REASONABLE_ITERATIONS 100000
+
+bool Sk1DPathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ SkPathMeasure meas(src, false);
+ do {
+ int governor = MAX_REASONABLE_ITERATIONS;
+ SkScalar length = meas.getLength();
+ SkScalar distance = this->begin(length);
+ while (distance < length && --governor >= 0) {
+ SkScalar delta = this->next(dst, distance, meas);
+ if (delta <= 0) {
+ break;
+ }
+ distance += delta;
+ }
+ } while (meas.nextContour());
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath1DPathEffect::SkPath1DPathEffect(const SkPath& path, SkScalar advance, SkScalar phase,
+ Style style) : fPath(path) {
+ SkASSERT(advance > 0 && !path.isEmpty());
+ SkASSERT((unsigned)style <= kMorph_Style);
+
+ // Make the path thread-safe.
+ fPath.updateBoundsCache();
+ (void)fPath.getGenerationID();
+
+ // cleanup their phase parameter, inverting it so that it becomes an
+ // offset along the path (to match the interpretation in PostScript)
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ } else {
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ phase = advance - phase;
+ }
+ // now catch the edge case where phase == advance (within epsilon)
+ if (phase >= advance) {
+ phase = 0;
+ }
+ SkASSERT(phase >= 0);
+
+ fAdvance = advance;
+ fInitialOffset = phase;
+
+ if ((unsigned)style > kMorph_Style) {
+ SkDEBUGF("SkPath1DPathEffect style enum out of range %d\n", style);
+ }
+ fStyle = style;
+}
+
+bool SkPath1DPathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ rec->setFillStyle();
+ return this->INHERITED::onFilterPath(dst, src, rec, cullRect);
+}
+
+static bool morphpoints(SkPoint dst[], const SkPoint src[], int count,
+ SkPathMeasure& meas, SkScalar dist) {
+ for (int i = 0; i < count; i++) {
+ SkPoint pos;
+ SkVector tangent;
+
+ SkScalar sx = src[i].fX;
+ SkScalar sy = src[i].fY;
+
+ if (!meas.getPosTan(dist + sx, &pos, &tangent)) {
+ return false;
+ }
+
+ SkMatrix matrix;
+ SkPoint pt;
+
+ pt.set(sx, sy);
+ matrix.setSinCos(tangent.fY, tangent.fX, 0, 0);
+ matrix.preTranslate(-sx, 0);
+ matrix.postTranslate(pos.fX, pos.fY);
+ matrix.mapPoints(&dst[i], &pt, 1);
+ }
+ return true;
+}
+
+/* TODO
+
+Need differentially more subdivisions when the follow-path is curvy. Not sure how to
+determine that, but we need it. I guess a cheap answer is let the caller tell us,
+but that seems like a cop-out. Another answer is to get Rob Johnson to figure it out.
+*/
+static void morphpath(SkPath* dst, const SkPath& src, SkPathMeasure& meas,
+ SkScalar dist) {
+ SkPath::Iter iter(src, false);
+ SkPoint srcP[4], dstP[3];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(srcP)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (morphpoints(dstP, srcP, 1, meas, dist)) {
+ dst->moveTo(dstP[0]);
+ }
+ break;
+ case SkPath::kLine_Verb:
+ srcP[2] = srcP[1];
+ srcP[1].set(SkScalarAve(srcP[0].fX, srcP[2].fX),
+ SkScalarAve(srcP[0].fY, srcP[2].fY));
+ // fall through to quad
+ case SkPath::kQuad_Verb:
+ if (morphpoints(dstP, &srcP[1], 2, meas, dist)) {
+ dst->quadTo(dstP[0], dstP[1]);
+ }
+ break;
+ case SkPath::kConic_Verb:
+ if (morphpoints(dstP, &srcP[1], 2, meas, dist)) {
+ dst->conicTo(dstP[0], dstP[1], iter.conicWeight());
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ if (morphpoints(dstP, &srcP[1], 3, meas, dist)) {
+ dst->cubicTo(dstP[0], dstP[1], dstP[2]);
+ }
+ break;
+ case SkPath::kClose_Verb:
+ dst->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+}
+
+SkScalar SkPath1DPathEffect::begin(SkScalar contourLength) const {
+ return fInitialOffset;
+}
+
+sk_sp<SkFlattenable> SkPath1DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkScalar advance = buffer.readScalar();
+ SkPath path;
+ buffer.readPath(&path);
+ SkScalar phase = buffer.readScalar();
+ Style style = buffer.read32LE(kLastEnum_Style);
+ return buffer.isValid() ? SkPath1DPathEffect::Make(path, advance, phase, style) : nullptr;
+}
+
+void SkPath1DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fAdvance);
+ buffer.writePath(fPath);
+ buffer.writeScalar(fInitialOffset);
+ buffer.writeUInt(fStyle);
+}
+
+SkScalar SkPath1DPathEffect::next(SkPath* dst, SkScalar distance,
+ SkPathMeasure& meas) const {
+#if defined(IS_FUZZING_WITH_LIBFUZZER)
+ if (dst->countPoints() > 100000) {
+ return fAdvance;
+ }
+#endif
+ switch (fStyle) {
+ case kTranslate_Style: {
+ SkPoint pos;
+ if (meas.getPosTan(distance, &pos, nullptr)) {
+ dst->addPath(fPath, pos.fX, pos.fY);
+ }
+ } break;
+ case kRotate_Style: {
+ SkMatrix matrix;
+ if (meas.getMatrix(distance, &matrix)) {
+ dst->addPath(fPath, matrix);
+ }
+ } break;
+ case kMorph_Style:
+ morphpath(dst, fPath, meas, distance);
+ break;
+ default:
+ SkDEBUGFAIL("unknown Style enum");
+ break;
+ }
+ return fAdvance;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkPath1DPathEffect::Make(const SkPath& path, SkScalar advance, SkScalar phase,
+ Style style) {
+ if (advance <= 0 || !SkScalarIsFinite(advance) || !SkScalarIsFinite(phase) || path.isEmpty()) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkPath1DPathEffect(path, advance, phase, style));
+}
diff --git a/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
new file mode 100644
index 0000000000..fa19bb96b8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/effects/Sk2DPathEffect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+Sk2DPathEffect::Sk2DPathEffect(const SkMatrix& mat) : fMatrix(mat) {
+ // Calling invert will set the type mask on both matrices, making them thread safe.
+ fMatrixIsInvertible = fMatrix.invert(&fInverse);
+}
+
+bool Sk2DPathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ if (!fMatrixIsInvertible) {
+ return false;
+ }
+
+ SkPath tmp;
+ SkIRect ir;
+
+ src.transform(fInverse, &tmp);
+ tmp.getBounds().round(&ir);
+ if (!ir.isEmpty()) {
+ this->begin(ir, dst);
+
+ SkRegion rgn;
+ rgn.setPath(tmp, SkRegion(ir));
+ SkRegion::Iterator iter(rgn);
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& rect = iter.rect();
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ this->nextSpan(rect.fLeft, y, rect.width(), dst);
+ }
+ }
+
+ this->end(dst);
+ }
+ return true;
+}
+
+void Sk2DPathEffect::nextSpan(int x, int y, int count, SkPath* path) const {
+ if (!fMatrixIsInvertible) {
+ return;
+ }
+
+ const SkMatrix& mat = this->getMatrix();
+ SkPoint src, dst;
+
+ src.set(SkIntToScalar(x) + SK_ScalarHalf, SkIntToScalar(y) + SK_ScalarHalf);
+ do {
+ mat.mapPoints(&dst, &src, 1);
+ this->next(dst, x++, y, path);
+ src.fX += SK_Scalar1;
+ } while (--count > 0);
+}
+
+void Sk2DPathEffect::begin(const SkIRect& uvBounds, SkPath* dst) const {}
+void Sk2DPathEffect::next(const SkPoint& loc, int u, int v, SkPath* dst) const {}
+void Sk2DPathEffect::end(SkPath* dst) const {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void Sk2DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fMatrix);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkLine2DPathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ if (this->INHERITED::onFilterPath(dst, src, rec, cullRect)) {
+ rec->setStrokeStyle(fWidth);
+ return true;
+ }
+ return false;
+}
+
+void SkLine2DPathEffect::nextSpan(int u, int v, int ucount, SkPath* dst) const {
+ if (ucount > 1) {
+ SkPoint src[2], dstP[2];
+
+ src[0].set(SkIntToScalar(u) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ src[1].set(SkIntToScalar(u+ucount) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ this->getMatrix().mapPoints(dstP, src, 2);
+
+ dst->moveTo(dstP[0]);
+ dst->lineTo(dstP[1]);
+ }
+}
+
+sk_sp<SkFlattenable> SkLine2DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkScalar width = buffer.readScalar();
+ return SkLine2DPathEffect::Make(width, matrix);
+}
+
+void SkLine2DPathEffect::flatten(SkWriteBuffer &buffer) const {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writeScalar(fWidth);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath2DPathEffect::SkPath2DPathEffect(const SkMatrix& m, const SkPath& p)
+ : INHERITED(m), fPath(p) {
+}
+
+sk_sp<SkFlattenable> SkPath2DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkPath path;
+ buffer.readPath(&path);
+ return SkPath2DPathEffect::Make(matrix, path);
+}
+
+void SkPath2DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writePath(fPath);
+}
+
+void SkPath2DPathEffect::next(const SkPoint& loc, int u, int v,
+ SkPath* dst) const {
+ dst->addPath(fPath, loc.fX, loc.fY);
+}
diff --git a/gfx/skia/skia/src/effects/SkColorMatrix.cpp b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
new file mode 100644
index 0000000000..f6d8eaeded
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkColorMatrix.h"
+#include "include/private/SkFloatingPoint.h"
+
+enum {
+ kR_Scale = 0,
+ kG_Scale = 6,
+ kB_Scale = 12,
+ kA_Scale = 18,
+
+ kR_Trans = 4,
+ kG_Trans = 9,
+ kB_Trans = 14,
+ kA_Trans = 19,
+};
+
+static void set_concat(float result[20], const float outer[20], const float inner[20]) {
+ float tmp[20];
+ float* target;
+
+ if (outer == result || inner == result) {
+ target = tmp; // will memcpy answer when we're done into result
+ } else {
+ target = result;
+ }
+
+ int index = 0;
+ for (int j = 0; j < 20; j += 5) {
+ for (int i = 0; i < 4; i++) {
+ target[index++] = outer[j + 0] * inner[i + 0] +
+ outer[j + 1] * inner[i + 5] +
+ outer[j + 2] * inner[i + 10] +
+ outer[j + 3] * inner[i + 15];
+ }
+ target[index++] = outer[j + 0] * inner[4] +
+ outer[j + 1] * inner[9] +
+ outer[j + 2] * inner[14] +
+ outer[j + 3] * inner[19] +
+ outer[j + 4];
+ }
+
+ if (target != result) {
+ memcpy(result, target, 20 * sizeof(float));
+ }
+}
+
+void SkColorMatrix::setIdentity() {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[kR_Scale] = fMat[kG_Scale] = fMat[kB_Scale] = fMat[kA_Scale] = 1;
+}
+
+void SkColorMatrix::setScale(float rScale, float gScale, float bScale, float aScale) {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[kR_Scale] = rScale;
+ fMat[kG_Scale] = gScale;
+ fMat[kB_Scale] = bScale;
+ fMat[kA_Scale] = aScale;
+}
+
+void SkColorMatrix::postTranslate(float dr, float dg, float db, float da) {
+ fMat[kR_Trans] += dr;
+ fMat[kG_Trans] += dg;
+ fMat[kB_Trans] += db;
+ fMat[kA_Trans] += da;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkColorMatrix::setRotate(Axis axis, float degrees) {
+ float r = sk_float_degrees_to_radians(degrees);
+ this->setSinCos(axis, sk_float_sin(r), sk_float_cos(r));
+}
+
+void SkColorMatrix::setSinCos(Axis axis, float sine, float cosine) {
+ SkASSERT((unsigned)axis < 3);
+
+ static const uint8_t gRotateIndex[] = {
+ 6, 7, 11, 12,
+ 0, 10, 2, 12,
+ 0, 1, 5, 6,
+ };
+ const uint8_t* index = gRotateIndex + axis * 4;
+
+ this->setIdentity();
+ fMat[index[0]] = cosine;
+ fMat[index[1]] = sine;
+ fMat[index[2]] = -sine;
+ fMat[index[3]] = cosine;
+}
+
+void SkColorMatrix::preRotate(Axis axis, float degrees) {
+ SkColorMatrix tmp;
+ tmp.setRotate(axis, degrees);
+ this->preConcat(tmp);
+}
+
+void SkColorMatrix::postRotate(Axis axis, float degrees) {
+ SkColorMatrix tmp;
+ tmp.setRotate(axis, degrees);
+ this->postConcat(tmp);
+}
+
+void SkColorMatrix::setConcat(const SkColorMatrix& matA, const SkColorMatrix& matB) {
+ set_concat(fMat, matA.fMat, matB.fMat);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void setrow(float row[], float r, float g, float b) {
+ row[0] = r;
+ row[1] = g;
+ row[2] = b;
+}
+
+static const float kHueR = 0.213f;
+static const float kHueG = 0.715f;
+static const float kHueB = 0.072f;
+
+void SkColorMatrix::setSaturation(float sat) {
+ memset(fMat, 0, sizeof(fMat));
+
+ const float R = kHueR * (1 - sat);
+ const float G = kHueG * (1 - sat);
+ const float B = kHueB * (1 - sat);
+
+ setrow(fMat + 0, R + sat, G, B);
+ setrow(fMat + 5, R, G + sat, B);
+ setrow(fMat + 10, R, G, B + sat);
+ fMat[kA_Scale] = 1;
+}
+
+static const float kR2Y = 0.299f;
+static const float kG2Y = 0.587f;
+static const float kB2Y = 0.114f;
+
+static const float kR2U = -0.16874f;
+static const float kG2U = -0.33126f;
+static const float kB2U = 0.5f;
+
+static const float kR2V = 0.5f;
+static const float kG2V = -0.41869f;
+static const float kB2V = -0.08131f;
+
+void SkColorMatrix::setRGB2YUV() {
+ memset(fMat, 0, sizeof(fMat));
+
+ setrow(fMat + 0, kR2Y, kG2Y, kB2Y);
+ setrow(fMat + 5, kR2U, kG2U, kB2U);
+ setrow(fMat + 10, kR2V, kG2V, kB2V);
+ fMat[kA_Scale] = 1;
+}
+
+static const float kV2R = 1.402f;
+static const float kU2G = -0.34414f;
+static const float kV2G = -0.71414f;
+static const float kU2B = 1.772f;
+
+void SkColorMatrix::setYUV2RGB() {
+ memset(fMat, 0, sizeof(fMat));
+
+ setrow(fMat + 0, 1, 0, kV2R);
+ setrow(fMat + 5, 1, kU2G, kV2G);
+ setrow(fMat + 10, 1, kU2B, 0);
+ fMat[kA_Scale] = 1;
+}
diff --git a/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
new file mode 100644
index 0000000000..91811a6d30
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkColorMatrixFilter.h"
+
+static SkScalar byte_to_unit_float(U8CPU byte) {
+ if (0xFF == byte) {
+ // want to get this exact
+ return 1;
+ } else {
+ return byte * 0.00392156862745f;
+ }
+}
+
+sk_sp<SkColorFilter> SkColorMatrixFilter::MakeLightingFilter(SkColor mul, SkColor add) {
+ const SkColor opaqueAlphaMask = SK_ColorBLACK;
+ // omit the alpha and compare only the RGB values
+ if (0 == (add & ~opaqueAlphaMask)) {
+ return SkColorFilters::Blend(mul | opaqueAlphaMask, SkBlendMode::kModulate);
+ }
+
+ SkColorMatrix matrix;
+ matrix.setScale(byte_to_unit_float(SkColorGetR(mul)),
+ byte_to_unit_float(SkColorGetG(mul)),
+ byte_to_unit_float(SkColorGetB(mul)),
+ 1);
+ matrix.postTranslate(byte_to_unit_float(SkColorGetR(add)),
+ byte_to_unit_float(SkColorGetG(add)),
+ byte_to_unit_float(SkColorGetB(add)),
+ 0);
+ return SkColorFilters::Matrix(matrix);
+}
diff --git a/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
new file mode 100644
index 0000000000..16670e55e9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/effects/SkCornerPathEffect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+SkCornerPathEffect::SkCornerPathEffect(SkScalar radius) {
+ // check with ! to catch NaNs
+ if (!(radius > 0)) {
+ radius = 0;
+ }
+ fRadius = radius;
+}
+SkCornerPathEffect::~SkCornerPathEffect() {}
+
+static bool ComputeStep(const SkPoint& a, const SkPoint& b, SkScalar radius,
+ SkPoint* step) {
+ SkScalar dist = SkPoint::Distance(a, b);
+
+ *step = b - a;
+ if (dist <= radius * 2) {
+ *step *= SK_ScalarHalf;
+ return false;
+ } else {
+ *step *= radius / dist;
+ return true;
+ }
+}
+
+bool SkCornerPathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ if (fRadius <= 0) {
+ return false;
+ }
+
+ SkPath::Iter iter(src, false);
+ SkPath::Verb verb, prevVerb = SkPath::kDone_Verb;
+ SkPoint pts[4];
+
+ bool closed;
+ SkPoint moveTo, lastCorner;
+ SkVector firstStep, step;
+ bool prevIsValid = true;
+
+ // to avoid warnings
+ step.set(0, 0);
+ moveTo.set(0, 0);
+ firstStep.set(0, 0);
+ lastCorner.set(0, 0);
+
+ for (;;) {
+ switch (verb = iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ // close out the previous (open) contour
+ if (SkPath::kLine_Verb == prevVerb) {
+ dst->lineTo(lastCorner);
+ }
+ closed = iter.isClosedContour();
+ if (closed) {
+ moveTo = pts[0];
+ prevIsValid = false;
+ } else {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ break;
+ case SkPath::kLine_Verb: {
+ bool drawSegment = ComputeStep(pts[0], pts[1], fRadius, &step);
+ // prev corner
+ if (!prevIsValid) {
+ dst->moveTo(moveTo + step);
+ prevIsValid = true;
+ } else {
+ dst->quadTo(pts[0].fX, pts[0].fY, pts[0].fX + step.fX,
+ pts[0].fY + step.fY);
+ }
+ if (drawSegment) {
+ dst->lineTo(pts[1].fX - step.fX, pts[1].fY - step.fY);
+ }
+ lastCorner = pts[1];
+ prevIsValid = true;
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->quadTo(pts[1], pts[2]);
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kConic_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->conicTo(pts[1], pts[2], iter.conicWeight());
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kCubic_Verb:
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ // TBD - just replicate the curve for now
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ lastCorner = pts[3];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kClose_Verb:
+ if (firstStep.fX || firstStep.fY) {
+ dst->quadTo(lastCorner.fX, lastCorner.fY,
+ lastCorner.fX + firstStep.fX,
+ lastCorner.fY + firstStep.fY);
+ }
+ dst->close();
+ prevIsValid = false;
+ break;
+ case SkPath::kDone_Verb:
+ if (prevIsValid) {
+ dst->lineTo(lastCorner);
+ }
+ return true;
+ default:
+ SkDEBUGFAIL("default should not be reached");
+ return false;
+ }
+
+ if (SkPath::kMove_Verb == prevVerb) {
+ firstStep = step;
+ }
+ prevVerb = verb;
+ }
+
+ return true;
+}
+
+sk_sp<SkFlattenable> SkCornerPathEffect::CreateProc(SkReadBuffer& buffer) {
+ return SkCornerPathEffect::Make(buffer.readScalar());
+}
+
+void SkCornerPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fRadius);
+}
diff --git a/gfx/skia/skia/src/effects/SkDashImpl.h b/gfx/skia/skia/src/effects/SkDashImpl.h
new file mode 100644
index 0000000000..1d509713f9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDashImpl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashImpl_DEFINED
+#define SkDashImpl_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+class SkDashImpl : public SkPathEffect {
+public:
+ SkDashImpl(const SkScalar intervals[], int count, SkScalar phase);
+
+protected:
+ ~SkDashImpl() override;
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+ bool onAsPoints(PointData* results, const SkPath& src, const SkStrokeRec&, const SkMatrix&,
+ const SkRect*) const override;
+
+ DashType onAsADash(DashInfo* info) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkDashImpl)
+
+ SkScalar* fIntervals;
+ int32_t fCount;
+ SkScalar fPhase;
+ // computed from phase
+
+ SkScalar fInitialDashLength;
+ int32_t fInitialDashIndex;
+ SkScalar fIntervalLength;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkDashPathEffect.cpp b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
new file mode 100644
index 0000000000..913d851c04
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkDashPathEffect.h"
+
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkDashImpl.h"
+#include "src/utils/SkDashPathPriv.h"
+
+#include <utility>
+
+SkDashImpl::SkDashImpl(const SkScalar intervals[], int count, SkScalar phase)
+ : fPhase(0)
+ , fInitialDashLength(-1)
+ , fInitialDashIndex(0)
+ , fIntervalLength(0) {
+ SkASSERT(intervals);
+ SkASSERT(count > 1 && SkIsAlign2(count));
+
+ fIntervals = (SkScalar*)sk_malloc_throw(sizeof(SkScalar) * count);
+ fCount = count;
+ for (int i = 0; i < count; i++) {
+ fIntervals[i] = intervals[i];
+ }
+
+ // set the internal data members
+ SkDashPath::CalcDashParameters(phase, fIntervals, fCount,
+ &fInitialDashLength, &fInitialDashIndex, &fIntervalLength, &fPhase);
+}
+
+SkDashImpl::~SkDashImpl() {
+ sk_free(fIntervals);
+}
+
+bool SkDashImpl::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const {
+ return SkDashPath::InternalFilter(dst, src, rec, cullRect, fIntervals, fCount,
+ fInitialDashLength, fInitialDashIndex, fIntervalLength);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius *= rec.getMiter();
+ }
+ rect->outset(radius, radius);
+}
+
+// Attempt to trim the line to minimally cover the cull rect (currently
+// only works for horizontal and vertical lines).
+// Return true if processing should continue; false otherwise.
+static bool cull_line(SkPoint* pts, const SkStrokeRec& rec,
+ const SkMatrix& ctm, const SkRect* cullRect,
+ const SkScalar intervalLength) {
+ if (nullptr == cullRect) {
+ SkASSERT(false); // Shouldn't ever occur in practice
+ return false;
+ }
+
+ SkScalar dx = pts[1].x() - pts[0].x();
+ SkScalar dy = pts[1].y() - pts[0].y();
+
+ if ((dx && dy) || (!dx && !dy)) {
+ return false;
+ }
+
+ SkRect bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ // cullRect is in device space while pts are in the local coordinate system
+ // defined by the ctm. We want our answer in the local coordinate system.
+
+ SkASSERT(ctm.rectStaysRect());
+ SkMatrix inv;
+ if (!ctm.invert(&inv)) {
+ return false;
+ }
+
+ inv.mapRect(&bounds);
+
+ if (dx) {
+ SkASSERT(dx && !dy);
+ SkScalar minX = pts[0].fX;
+ SkScalar maxX = pts[1].fX;
+
+ if (dx < 0) {
+ using std::swap;
+ swap(minX, maxX);
+ }
+
+ SkASSERT(minX < maxX);
+ if (maxX <= bounds.fLeft || minX >= bounds.fRight) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left and
+ // right of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minX < bounds.fLeft) {
+ minX = bounds.fLeft - SkScalarMod(bounds.fLeft - minX, intervalLength);
+ }
+ if (maxX > bounds.fRight) {
+ maxX = bounds.fRight + SkScalarMod(maxX - bounds.fRight, intervalLength);
+ }
+
+ SkASSERT(maxX > minX);
+ if (dx < 0) {
+ using std::swap;
+ swap(minX, maxX);
+ }
+ pts[0].fX = minX;
+ pts[1].fX = maxX;
+ } else {
+ SkASSERT(dy && !dx);
+ SkScalar minY = pts[0].fY;
+ SkScalar maxY = pts[1].fY;
+
+ if (dy < 0) {
+ using std::swap;
+ swap(minY, maxY);
+ }
+
+ SkASSERT(minY < maxY);
+ if (maxY <= bounds.fTop || minY >= bounds.fBottom) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the top and
+ // bottom of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minY < bounds.fTop) {
+ minY = bounds.fTop - SkScalarMod(bounds.fTop - minY, intervalLength);
+ }
+ if (maxY > bounds.fBottom) {
+ maxY = bounds.fBottom + SkScalarMod(maxY - bounds.fBottom, intervalLength);
+ }
+
+ SkASSERT(maxY > minY);
+ if (dy < 0) {
+ using std::swap;
+ swap(minY, maxY);
+ }
+ pts[0].fY = minY;
+ pts[1].fY = maxY;
+ }
+
+ return true;
+}
+
+// Currently asPoints is more restrictive then it needs to be. In the future
+// we need to:
+// allow kRound_Cap capping (could allow rotations in the matrix with this)
+// allow paths to be returned
+bool SkDashImpl::onAsPoints(PointData* results, const SkPath& src, const SkStrokeRec& rec,
+ const SkMatrix& matrix, const SkRect* cullRect) const {
+ // width < 0 -> fill && width == 0 -> hairline so requiring width > 0 rules both out
+ if (0 >= rec.getWidth()) {
+ return false;
+ }
+
+ // TODO: this next test could be eased up. We could allow any number of
+ // intervals as long as all the ons match and all the offs match.
+ // Additionally, they do not necessarily need to be integers.
+ // We cannot allow arbitrary intervals since we want the returned points
+ // to be uniformly sized.
+ if (fCount != 2 ||
+ !SkScalarNearlyEqual(fIntervals[0], fIntervals[1]) ||
+ !SkScalarIsInt(fIntervals[0]) ||
+ !SkScalarIsInt(fIntervals[1])) {
+ return false;
+ }
+
+ SkPoint pts[2];
+
+ if (!src.isLine(pts)) {
+ return false;
+ }
+
+ // TODO: this test could be eased up to allow circles
+ if (SkPaint::kButt_Cap != rec.getCap()) {
+ return false;
+ }
+
+ // TODO: this test could be eased up for circles. Rotations could be allowed.
+ if (!matrix.rectStaysRect()) {
+ return false;
+ }
+
+ // See if the line can be limited to something plausible.
+ if (!cull_line(pts, rec, matrix, cullRect, fIntervalLength)) {
+ return false;
+ }
+
+ SkScalar length = SkPoint::Distance(pts[1], pts[0]);
+
+ SkVector tangent = pts[1] - pts[0];
+ if (tangent.isZero()) {
+ return false;
+ }
+
+ tangent.scale(SkScalarInvert(length));
+
+ // TODO: make this test for horizontal & vertical lines more robust
+ bool isXAxis = true;
+ if (SkScalarNearlyEqual(SK_Scalar1, tangent.fX) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fX)) {
+ results->fSize.set(SkScalarHalf(fIntervals[0]), SkScalarHalf(rec.getWidth()));
+ } else if (SkScalarNearlyEqual(SK_Scalar1, tangent.fY) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fY)) {
+ results->fSize.set(SkScalarHalf(rec.getWidth()), SkScalarHalf(fIntervals[0]));
+ isXAxis = false;
+ } else if (SkPaint::kRound_Cap != rec.getCap()) {
+ // Angled lines don't have axis-aligned boxes.
+ return false;
+ }
+
+ if (results) {
+ results->fFlags = 0;
+ SkScalar clampedInitialDashLength = SkMinScalar(length, fInitialDashLength);
+
+ if (SkPaint::kRound_Cap == rec.getCap()) {
+ results->fFlags |= PointData::kCircles_PointFlag;
+ }
+
+ results->fNumPoints = 0;
+ SkScalar len2 = length;
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(len2 >= clampedInitialDashLength);
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ if (clampedInitialDashLength >= fIntervals[0]) {
+ ++results->fNumPoints; // partial first dash
+ }
+ len2 -= clampedInitialDashLength;
+ }
+ len2 -= fIntervals[1]; // also skip first space
+ if (len2 < 0) {
+ len2 = 0;
+ }
+ } else {
+ len2 -= clampedInitialDashLength; // skip initial partial empty
+ }
+ }
+ // Too many midpoints can cause results->fNumPoints to overflow or
+ // otherwise cause the results->fPoints allocation below to OOM.
+ // Cap it to a sane value.
+ SkScalar numIntervals = len2 / fIntervalLength;
+ if (!SkScalarIsFinite(numIntervals) || numIntervals > SkDashPath::kMaxDashCount) {
+ return false;
+ }
+ int numMidPoints = SkScalarFloorToInt(numIntervals);
+ results->fNumPoints += numMidPoints;
+ len2 -= numMidPoints * fIntervalLength;
+ bool partialLast = false;
+ if (len2 > 0) {
+ if (len2 < fIntervals[0]) {
+ partialLast = true;
+ } else {
+ ++numMidPoints;
+ ++results->fNumPoints;
+ }
+ }
+
+ results->fPoints = new SkPoint[results->fNumPoints];
+
+ SkScalar distance = 0;
+ int curPt = 0;
+
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(clampedInitialDashLength <= length);
+
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ // partial first block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar x = pts[0].fX + tangent.fX * SkScalarHalf(clampedInitialDashLength);
+ SkScalar y = pts[0].fY + tangent.fY * SkScalarHalf(clampedInitialDashLength);
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(clampedInitialDashLength);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(clampedInitialDashLength);
+ }
+ if (clampedInitialDashLength < fIntervals[0]) {
+ // This one will not be like the others
+ results->fFirst.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ } else {
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+ }
+
+ distance += clampedInitialDashLength;
+ }
+
+ distance += fIntervals[1]; // skip over the next blank block too
+ } else {
+ distance += clampedInitialDashLength;
+ }
+ }
+
+ if (0 != numMidPoints) {
+ distance += SkScalarHalf(fIntervals[0]);
+
+ for (int i = 0; i < numMidPoints; ++i) {
+ SkScalar x = pts[0].fX + tangent.fX * distance;
+ SkScalar y = pts[0].fY + tangent.fY * distance;
+
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+
+ distance += fIntervalLength;
+ }
+
+ distance -= SkScalarHalf(fIntervals[0]);
+ }
+
+ if (partialLast) {
+ // partial final block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar temp = length - distance;
+ SkASSERT(temp < fIntervals[0]);
+ SkScalar x = pts[0].fX + tangent.fX * (distance + SkScalarHalf(temp));
+ SkScalar y = pts[0].fY + tangent.fY * (distance + SkScalarHalf(temp));
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(temp);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(temp);
+ }
+ results->fLast.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ }
+
+ SkASSERT(curPt == results->fNumPoints);
+ }
+
+ return true;
+}
+
+SkPathEffect::DashType SkDashImpl::onAsADash(DashInfo* info) const {
+ if (info) {
+ if (info->fCount >= fCount && info->fIntervals) {
+ memcpy(info->fIntervals, fIntervals, fCount * sizeof(SkScalar));
+ }
+ info->fCount = fCount;
+ info->fPhase = fPhase;
+ }
+ return kDash_DashType;
+}
+
+void SkDashImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fPhase);
+ buffer.writeScalarArray(fIntervals, fCount);
+}
+
+sk_sp<SkFlattenable> SkDashImpl::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar phase = buffer.readScalar();
+ uint32_t count = buffer.getArrayCount();
+
+ // Don't allocate gigantic buffers if there's not data for them.
+ if (!buffer.validateCanReadN<SkScalar>(count)) {
+ return nullptr;
+ }
+
+ SkAutoSTArray<32, SkScalar> intervals(count);
+ if (buffer.readScalarArray(intervals.get(), count)) {
+ return SkDashPathEffect::Make(intervals.get(), SkToInt(count), phase);
+ }
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkDashPathEffect::Make(const SkScalar intervals[], int count, SkScalar phase) {
+ if (!SkDashPath::ValidDashPath(phase, intervals, count)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDashImpl(intervals, count, phase));
+}
diff --git a/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
new file mode 100644
index 0000000000..93ea43fa29
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/effects/SkDiscretePathEffect.h"
+#include "include/private/SkFixed.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+sk_sp<SkPathEffect> SkDiscretePathEffect::Make(SkScalar segLength, SkScalar deviation,
+ uint32_t seedAssist) {
+ if (!SkScalarIsFinite(segLength) || !SkScalarIsFinite(deviation)) {
+ return nullptr;
+ }
+ if (segLength <= SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDiscretePathEffect(segLength, deviation, seedAssist));
+}
+
+static void Perterb(SkPoint* p, const SkVector& tangent, SkScalar scale) {
+ SkVector normal = tangent;
+ SkPointPriv::RotateCCW(&normal);
+ normal.setLength(scale);
+ *p += normal;
+}
+
+SkDiscretePathEffect::SkDiscretePathEffect(SkScalar segLength,
+ SkScalar deviation,
+ uint32_t seedAssist)
+ : fSegLength(segLength), fPerterb(deviation), fSeedAssist(seedAssist)
+{
+}
+
+/** \class LCGRandom
+
+ Utility class that implements pseudo random 32bit numbers using a fast
+ linear equation. Unlike rand(), this class holds its own seed (initially
+ set to 0), so that multiple instances can be used with no side-effects.
+
+ Copied from the original implementation of SkRandom. Only contains the
+ methods used by SkDiscretePathEffect::filterPath, with methods that were
+ not called directly moved to private.
+*/
+
+class LCGRandom {
+public:
+ LCGRandom(uint32_t seed) : fSeed(seed) {}
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+private:
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() { uint32_t r = fSeed * kMul + kAdd; fSeed = r; return r; }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ uint32_t fSeed;
+};
+
+bool SkDiscretePathEffect::onFilterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect*) const {
+ bool doFill = rec->isFillStyle();
+
+ SkPathMeasure meas(src, doFill);
+
+ /* Caller may supply their own seed assist, which by default is 0 */
+ uint32_t seed = fSeedAssist ^ SkScalarRoundToInt(meas.getLength());
+
+ LCGRandom rand(seed ^ ((seed << 16) | (seed >> 16)));
+ SkScalar scale = fPerterb;
+ SkPoint p;
+ SkVector v;
+
+ do {
+ SkScalar length = meas.getLength();
+
+ if (fSegLength * (2 + doFill) > length) {
+ meas.getSegment(0, length, dst, true); // to short for us to mangle
+ } else {
+ int n = SkScalarRoundToInt(length / fSegLength);
+ constexpr int kMaxReasonableIterations = 100000;
+ n = SkTMin(n, kMaxReasonableIterations);
+ SkScalar delta = length / n;
+ SkScalar distance = 0;
+
+ if (meas.isClosed()) {
+ n -= 1;
+ distance += delta/2;
+ }
+
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, rand.nextSScalar1() * scale);
+ dst->moveTo(p);
+ }
+ while (--n >= 0) {
+ distance += delta;
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, rand.nextSScalar1() * scale);
+ dst->lineTo(p);
+ }
+ }
+ if (meas.isClosed()) {
+ dst->close();
+ }
+ }
+ } while (meas.nextContour());
+ return true;
+}
+
+sk_sp<SkFlattenable> SkDiscretePathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkScalar segLength = buffer.readScalar();
+ SkScalar perterb = buffer.readScalar();
+ uint32_t seed = buffer.readUInt();
+ return Make(segLength, perterb, seed);
+}
+
+void SkDiscretePathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fSegLength);
+ buffer.writeScalar(fPerterb);
+ buffer.writeUInt(fSeedAssist);
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.cpp b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
new file mode 100644
index 0000000000..dcac971c7d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/effects/SkEmbossMask.h"
+
+#include "include/core/SkMath.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMathPriv.h"
+
+static inline int nonzero_to_one(int x) {
+#if 0
+ return x != 0;
+#else
+ return ((unsigned)(x | -x)) >> 31;
+#endif
+}
+
+static inline int neq_to_one(int x, int max) {
+#if 0
+ return x != max;
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return ((unsigned)(x - max)) >> 31;
+#endif
+}
+
+static inline int neq_to_mask(int x, int max) {
+#if 0
+ return -(x != max);
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return (x - max) >> 31;
+#endif
+}
+
+static inline unsigned div255(unsigned x) {
+ SkASSERT(x <= (255*255));
+ return x * ((1 << 24) / 255) >> 24;
+}
+
+#define kDelta 32 // small enough to show off angle differences
+
+void SkEmbossMask::Emboss(SkMask* mask, const SkEmbossMaskFilter::Light& light) {
+ SkASSERT(mask->fFormat == SkMask::k3D_Format);
+
+ int specular = light.fSpecular;
+ int ambient = light.fAmbient;
+ SkFixed lx = SkScalarToFixed(light.fDirection[0]);
+ SkFixed ly = SkScalarToFixed(light.fDirection[1]);
+ SkFixed lz = SkScalarToFixed(light.fDirection[2]);
+ SkFixed lz_dot_nz = lz * kDelta;
+ int lz_dot8 = lz >> 8;
+
+ size_t planeSize = mask->computeImageSize();
+ uint8_t* alpha = mask->fImage;
+ uint8_t* multiply = (uint8_t*)alpha + planeSize;
+ uint8_t* additive = multiply + planeSize;
+
+ int rowBytes = mask->fRowBytes;
+ int maxy = mask->fBounds.height() - 1;
+ int maxx = mask->fBounds.width() - 1;
+
+ int prev_row = 0;
+ for (int y = 0; y <= maxy; y++) {
+ int next_row = neq_to_mask(y, maxy) & rowBytes;
+
+ for (int x = 0; x <= maxx; x++) {
+ int nx = alpha[x + neq_to_one(x, maxx)] - alpha[x - nonzero_to_one(x)];
+ int ny = alpha[x + next_row] - alpha[x - prev_row];
+
+ SkFixed numer = lx * nx + ly * ny + lz_dot_nz;
+ int mul = ambient;
+ int add = 0;
+
+ if (numer > 0) { // preflight when numer/denom will be <= 0
+ int denom = SkSqrt32(nx * nx + ny * ny + kDelta*kDelta);
+ SkFixed dot = numer / denom;
+ dot >>= 8; // now dot is 2^8 instead of 2^16
+ mul = SkMin32(mul + dot, 255);
+
+ // now for the reflection
+
+ // R = 2 (Light * Normal) Normal - Light
+ // hilite = R * Eye(0, 0, 1)
+
+ int hilite = (2 * dot - lz_dot8) * lz_dot8 >> 8;
+ if (hilite > 0) {
+ // pin hilite to 255, since our fast math is also a little sloppy
+ hilite = SkClampMax(hilite, 255);
+
+ // specular is 4.4
+ // would really like to compute the fractional part of this
+ // and then possibly cache a 256 table for a given specular
+ // value in the light, and just pass that in to this function.
+ add = hilite;
+ for (int i = specular >> 4; i > 0; --i) {
+ add = div255(add * hilite);
+ }
+ }
+ }
+ multiply[x] = SkToU8(mul);
+ additive[x] = SkToU8(add);
+ }
+ alpha += rowBytes;
+ multiply += rowBytes;
+ additive += rowBytes;
+ prev_row = rowBytes;
+ }
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.h b/gfx/skia/skia/src/effects/SkEmbossMask.h
new file mode 100644
index 0000000000..7185fc909e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEmbossMask_DEFINED
+#define SkEmbossMask_DEFINED
+
+#include "src/effects/SkEmbossMaskFilter.h"
+
+class SkEmbossMask {
+public:
+ static void Emboss(SkMask* mask, const SkEmbossMaskFilter::Light&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
new file mode 100644
index 0000000000..5592fc7cd3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkBlurMaskFilter.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkEmbossMask.h"
+#include "src/effects/SkEmbossMaskFilter.h"
+
+static void normalize3(SkScalar dst[3], const SkScalar src[3]) {
+ SkScalar mag = SkScalarSquare(src[0]) + SkScalarSquare(src[1]) + SkScalarSquare(src[2]);
+ SkScalar scale = SkScalarInvert(SkScalarSqrt(mag));
+
+ for (int i = 0; i < 3; i++) {
+ dst[i] = src[i] * scale;
+ }
+}
+
+sk_sp<SkMaskFilter> SkEmbossMaskFilter::Make(SkScalar blurSigma, const Light& light) {
+ if (!SkScalarIsFinite(blurSigma) || blurSigma <= 0) {
+ return nullptr;
+ }
+
+ Light newLight = light;
+ normalize3(newLight.fDirection, light.fDirection);
+ if (!SkScalarsAreFinite(newLight.fDirection, 3)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkMaskFilter>(new SkEmbossMaskFilter(blurSigma, newLight));
+}
+
+#ifdef SK_SUPPORT_LEGACY_EMBOSSMASKFILTER
+sk_sp<SkMaskFilter> SkBlurMaskFilter::MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular) {
+ if (direction == nullptr) {
+ return nullptr;
+ }
+
+ SkEmbossMaskFilter::Light light;
+
+ memcpy(light.fDirection, direction, sizeof(light.fDirection));
+ // ambient should be 0...1 as a scalar
+ light.fAmbient = SkUnitScalarClampToByte(ambient);
+ // specular should be 0..15.99 as a scalar
+ static const SkScalar kSpecularMultiplier = SkIntToScalar(255) / 16;
+ light.fSpecular = static_cast<U8CPU>(SkScalarPin(specular, 0, 16) * kSpecularMultiplier + 0.5);
+
+ return SkEmbossMaskFilter::Make(blurSigma, light);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkEmbossMaskFilter::SkEmbossMaskFilter(SkScalar blurSigma, const Light& light)
+ : fLight(light), fBlurSigma(blurSigma)
+{
+ SkASSERT(fBlurSigma > 0);
+ SkASSERT(SkScalarsAreFinite(fLight.fDirection, 3));
+}
+
+SkMask::Format SkEmbossMaskFilter::getFormat() const {
+ return SkMask::k3D_Format;
+}
+
+bool SkEmbossMaskFilter::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ SkScalar sigma = matrix.mapRadius(fBlurSigma);
+
+ if (!SkBlurMask::BoxBlur(dst, src, sigma, kInner_SkBlurStyle)) {
+ return false;
+ }
+
+ dst->fFormat = SkMask::k3D_Format;
+ if (margin) {
+ margin->set(SkScalarCeilToInt(3*sigma), SkScalarCeilToInt(3*sigma));
+ }
+
+ if (src.fImage == nullptr) {
+ return true;
+ }
+
+ // create a larger buffer for the other two channels (should force fBlur to do this for us)
+
+ {
+ uint8_t* alphaPlane = dst->fImage;
+ size_t planeSize = dst->computeImageSize();
+ if (0 == planeSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(planeSize * 3);
+ memcpy(dst->fImage, alphaPlane, planeSize);
+ SkMask::FreeImage(alphaPlane);
+ }
+
+ // run the light direction through the matrix...
+ Light light = fLight;
+ matrix.mapVectors((SkVector*)(void*)light.fDirection,
+ (SkVector*)(void*)fLight.fDirection, 1);
+
+ // now restore the length of the XY component
+ // cast to SkVector so we can call setLength (this double cast silences alias warnings)
+ SkVector* vec = (SkVector*)(void*)light.fDirection;
+ vec->setLength(light.fDirection[0],
+ light.fDirection[1],
+ SkPoint::Length(fLight.fDirection[0], fLight.fDirection[1]));
+
+ SkEmbossMask::Emboss(dst, light);
+
+ // restore original alpha
+ memcpy(dst->fImage, src.fImage, src.computeImageSize());
+
+ return true;
+}
+
+sk_sp<SkFlattenable> SkEmbossMaskFilter::CreateProc(SkReadBuffer& buffer) {
+ Light light;
+ if (buffer.readByteArray(&light, sizeof(Light))) {
+ light.fPad = 0; // for the font-cache lookup to be clean
+ const SkScalar sigma = buffer.readScalar();
+ return Make(sigma, light);
+ }
+ return nullptr;
+}
+
+void SkEmbossMaskFilter::flatten(SkWriteBuffer& buffer) const {
+ Light tmpLight = fLight;
+ tmpLight.fPad = 0; // for the font-cache lookup to be clean
+ buffer.writeByteArray(&tmpLight, sizeof(tmpLight));
+ buffer.writeScalar(fBlurSigma);
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h
new file mode 100644
index 0000000000..d79e89b94e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEmbossMaskFilter_DEFINED
+#define SkEmbossMaskFilter_DEFINED
+
+#include "src/core/SkMaskFilterBase.h"
+
+/** \class SkEmbossMaskFilter
+
+ This mask filter creates a 3D emboss look, by specifying a light and blur amount.
+*/
+class SK_API SkEmbossMaskFilter : public SkMaskFilterBase {
+public:
+ struct Light {
+ SkScalar fDirection[3]; // x,y,z
+ uint16_t fPad;
+ uint8_t fAmbient;
+ uint8_t fSpecular; // exponent, 4.4 right now
+ };
+
+ static sk_sp<SkMaskFilter> Make(SkScalar blurSigma, const Light& light);
+
+ // overrides from SkMaskFilter
+ // This method is not exported to java.
+ SkMask::Format getFormat() const override;
+ // This method is not exported to java.
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+protected:
+ SkEmbossMaskFilter(SkScalar blurSigma, const Light& light);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkEmbossMaskFilter)
+
+ Light fLight;
+ SkScalar fBlurSigma;
+
+ typedef SkMaskFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp b/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp
new file mode 100644
index 0000000000..5382a037af
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp
@@ -0,0 +1,361 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/core/SkString.h"
+#include "include/effects/SkHighContrastFilter.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#endif
+
+using InvertStyle = SkHighContrastConfig::InvertStyle;
+
+class SkHighContrast_Filter : public SkColorFilter {
+public:
+ SkHighContrast_Filter(const SkHighContrastConfig& config) {
+ fConfig = config;
+ // Clamp contrast to just inside -1 to 1 to avoid division by zero.
+ fConfig.fContrast = SkScalarPin(fConfig.fContrast,
+ -1.0f + FLT_EPSILON,
+ 1.0f - FLT_EPSILON);
+ }
+
+ ~SkHighContrast_Filter() override {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkHighContrast_Filter)
+
+ SkHighContrastConfig fConfig;
+
+ friend class SkHighContrastFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+bool SkHighContrast_Filter::onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ SkRasterPipeline* p = rec.fPipeline;
+ SkArenaAlloc* alloc = rec.fAlloc;
+
+ if (!shaderIsOpaque) {
+ p->append(SkRasterPipeline::unpremul);
+ }
+
+ // Linearize before applying high-contrast filter.
+ auto tf = alloc->make<skcms_TransferFunction>();
+ if (rec.fDstCS) {
+ rec.fDstCS->transferFn(&tf->g);
+ } else {
+ // Historically we approximate untagged destinations as gamma 2.
+ // TODO: sRGB?
+ *tf = {2,1, 0,0,0,0,0};
+ }
+ p->append_transfer_function(*tf);
+
+ if (fConfig.fGrayscale) {
+ float r = SK_LUM_COEFF_R;
+ float g = SK_LUM_COEFF_G;
+ float b = SK_LUM_COEFF_B;
+ float* matrix = alloc->makeArray<float>(12);
+ matrix[0] = matrix[1] = matrix[2] = r;
+ matrix[3] = matrix[4] = matrix[5] = g;
+ matrix[6] = matrix[7] = matrix[8] = b;
+ p->append(SkRasterPipeline::matrix_3x4, matrix);
+ }
+
+ if (fConfig.fInvertStyle == InvertStyle::kInvertBrightness) {
+ float* matrix = alloc->makeArray<float>(12);
+ matrix[0] = matrix[4] = matrix[8] = -1;
+ matrix[9] = matrix[10] = matrix[11] = 1;
+ p->append(SkRasterPipeline::matrix_3x4, matrix);
+ } else if (fConfig.fInvertStyle == InvertStyle::kInvertLightness) {
+ p->append(SkRasterPipeline::rgb_to_hsl);
+ float* matrix = alloc->makeArray<float>(12);
+ matrix[0] = matrix[4] = matrix[11] = 1;
+ matrix[8] = -1;
+ p->append(SkRasterPipeline::matrix_3x4, matrix);
+ p->append(SkRasterPipeline::hsl_to_rgb);
+ }
+
+ if (fConfig.fContrast != 0.0) {
+ float* matrix = alloc->makeArray<float>(12);
+ float c = fConfig.fContrast;
+ float m = (1 + c) / (1 - c);
+ float b = (-0.5f * m + 0.5f);
+ matrix[0] = matrix[4] = matrix[8] = m;
+ matrix[9] = matrix[10] = matrix[11] = b;
+ p->append(SkRasterPipeline::matrix_3x4, matrix);
+ }
+
+ p->append(SkRasterPipeline::clamp_0);
+ p->append(SkRasterPipeline::clamp_1);
+
+ // Re-encode back from linear.
+ auto invTF = alloc->make<skcms_TransferFunction>();
+ if (rec.fDstCS) {
+ rec.fDstCS->invTransferFn(&invTF->g);
+ } else {
+ // See above... historically untagged == gamma 2 in this filter.
+ *invTF ={0.5f,1, 0,0,0,0,0};
+ }
+ p->append_transfer_function(*invTF);
+
+ if (!shaderIsOpaque) {
+ p->append(SkRasterPipeline::premul);
+ }
+ return true;
+}
+
+void SkHighContrast_Filter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeBool(fConfig.fGrayscale);
+ buffer.writeInt(static_cast<int>(fConfig.fInvertStyle));
+ buffer.writeScalar(fConfig.fContrast);
+}
+
+sk_sp<SkFlattenable> SkHighContrast_Filter::CreateProc(SkReadBuffer& buffer) {
+ SkHighContrastConfig config;
+ config.fGrayscale = buffer.readBool();
+ config.fInvertStyle = buffer.read32LE(InvertStyle::kLast);
+ config.fContrast = buffer.readScalar();
+
+ return SkHighContrastFilter::Make(config);
+}
+
+sk_sp<SkColorFilter> SkHighContrastFilter::Make(
+ const SkHighContrastConfig& config) {
+ if (!config.isValid()) {
+ return nullptr;
+ }
+ return sk_make_sp<SkHighContrast_Filter>(config);
+}
+
+void SkHighContrastFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkHighContrast_Filter);
+}
+
+#if SK_SUPPORT_GPU
+class HighContrastFilterEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkHighContrastConfig& config,
+ bool linearize) {
+ return std::unique_ptr<GrFragmentProcessor>(new HighContrastFilterEffect(config,
+ linearize));
+ }
+
+ const char* name() const override { return "HighContrastFilter"; }
+
+ const SkHighContrastConfig& config() const { return fConfig; }
+ bool linearize() const { return fLinearize; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return Make(fConfig, fLinearize);
+ }
+
+private:
+ HighContrastFilterEffect(const SkHighContrastConfig& config, bool linearize)
+ : INHERITED(kHighContrastFilterEffect_ClassID, kNone_OptimizationFlags)
+ , fConfig(config)
+ , fLinearize(linearize) {}
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ virtual void onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const HighContrastFilterEffect& that = other.cast<HighContrastFilterEffect>();
+ return fConfig.fGrayscale == that.fConfig.fGrayscale &&
+ fConfig.fInvertStyle == that.fConfig.fInvertStyle &&
+ fConfig.fContrast == that.fConfig.fContrast &&
+ fLinearize == that.fLinearize;
+ }
+
+ SkHighContrastConfig fConfig;
+ bool fLinearize;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GLHighContrastFilterEffect : public GrGLSLFragmentProcessor {
+public:
+ static void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+ void emitCode(EmitArgs& args) override;
+
+private:
+ UniformHandle fContrastUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+GrGLSLFragmentProcessor* HighContrastFilterEffect::onCreateGLSLInstance() const {
+ return new GLHighContrastFilterEffect();
+}
+
+void HighContrastFilterEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLHighContrastFilterEffect::GenKey(*this, caps, b);
+}
+
+void GLHighContrastFilterEffect::onSetData(const GrGLSLProgramDataManager& pdm,
+ const GrFragmentProcessor& proc) {
+ const HighContrastFilterEffect& hcfe = proc.cast<HighContrastFilterEffect>();
+ pdm.set1f(fContrastUni, hcfe.config().fContrast);
+}
+
+void GLHighContrastFilterEffect::GenKey(
+ const GrProcessor& proc, const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ const HighContrastFilterEffect& hcfe = proc.cast<HighContrastFilterEffect>();
+ b->add32(static_cast<uint32_t>(hcfe.config().fGrayscale));
+ b->add32(static_cast<uint32_t>(hcfe.config().fInvertStyle));
+ b->add32(hcfe.linearize() ? 1 : 0);
+}
+
+void GLHighContrastFilterEffect::emitCode(EmitArgs& args) {
+ const HighContrastFilterEffect& hcfe = args.fFp.cast<HighContrastFilterEffect>();
+ const SkHighContrastConfig& config = hcfe.config();
+
+ const char* contrast;
+ fContrastUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "contrast", &contrast);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("half4 color = %s;", args.fInputColor);
+
+ // Unpremultiply. The max() is to guard against 0 / 0.
+ fragBuilder->codeAppendf("half nonZeroAlpha = max(color.a, 0.0001);");
+ fragBuilder->codeAppendf("color = half4(color.rgb / nonZeroAlpha, nonZeroAlpha);");
+
+ if (hcfe.linearize()) {
+ fragBuilder->codeAppend("color.rgb = color.rgb * color.rgb;");
+ }
+
+ // Grayscale.
+ if (config.fGrayscale) {
+ fragBuilder->codeAppendf("half luma = dot(color, half4(%f, %f, %f, 0));",
+ SK_LUM_COEFF_R, SK_LUM_COEFF_G, SK_LUM_COEFF_B);
+ fragBuilder->codeAppendf("color = half4(luma, luma, luma, 0);");
+ }
+
+ if (config.fInvertStyle == InvertStyle::kInvertBrightness) {
+ fragBuilder->codeAppendf("color = half4(1, 1, 1, 1) - color;");
+ }
+
+ if (config.fInvertStyle == InvertStyle::kInvertLightness) {
+ // Convert from RGB to HSL.
+ fragBuilder->codeAppendf("half fmax = max(color.r, max(color.g, color.b));");
+ fragBuilder->codeAppendf("half fmin = min(color.r, min(color.g, color.b));");
+ fragBuilder->codeAppendf("half l = (fmax + fmin) / 2;");
+
+ fragBuilder->codeAppendf("half h;");
+ fragBuilder->codeAppendf("half s;");
+
+ fragBuilder->codeAppendf("if (fmax == fmin) {");
+ fragBuilder->codeAppendf(" h = 0;");
+ fragBuilder->codeAppendf(" s = 0;");
+ fragBuilder->codeAppendf("} else {");
+ fragBuilder->codeAppendf(" half d = fmax - fmin;");
+ fragBuilder->codeAppendf(" s = l > 0.5 ?");
+ fragBuilder->codeAppendf(" d / (2 - fmax - fmin) :");
+ fragBuilder->codeAppendf(" d / (fmax + fmin);");
+ // We'd like to just write "if (color.r == fmax) { ... }". On many GPUs, running the
+ // angle_d3d9_es2 config, that failed. It seems that max(x, y) is not necessarily equal
+ // to either x or y. Tried several ways to fix it, but this was the only reasonable fix.
+ fragBuilder->codeAppendf(" if (color.r >= color.g && color.r >= color.b) {");
+ fragBuilder->codeAppendf(" h = (color.g - color.b) / d + ");
+ fragBuilder->codeAppendf(" (color.g < color.b ? 6 : 0);");
+ fragBuilder->codeAppendf(" } else if (color.g >= color.b) {");
+ fragBuilder->codeAppendf(" h = (color.b - color.r) / d + 2;");
+ fragBuilder->codeAppendf(" } else {");
+ fragBuilder->codeAppendf(" h = (color.r - color.g) / d + 4;");
+ fragBuilder->codeAppendf(" }");
+ fragBuilder->codeAppendf("}");
+ fragBuilder->codeAppendf("h /= 6;");
+ fragBuilder->codeAppendf("l = 1.0 - l;");
+ // Convert back from HSL to RGB.
+ SkString hue2rgbFuncName;
+ const GrShaderVar gHue2rgbArgs[] = {
+ GrShaderVar("p", kHalf_GrSLType),
+ GrShaderVar("q", kHalf_GrSLType),
+ GrShaderVar("t", kHalf_GrSLType),
+ };
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "hue2rgb",
+ SK_ARRAY_COUNT(gHue2rgbArgs),
+ gHue2rgbArgs,
+ "if (t < 0)"
+ " t += 1;"
+ "if (t > 1)"
+ " t -= 1;"
+ "if (t < 1/6.)"
+ " return p + (q - p) * 6 * t;"
+ "if (t < 1/2.)"
+ " return q;"
+ "if (t < 2/3.)"
+ " return p + (q - p) * (2/3. - t) * 6;"
+ "return p;",
+ &hue2rgbFuncName);
+ fragBuilder->codeAppendf("if (s == 0) {");
+ fragBuilder->codeAppendf(" color = half4(l, l, l, 0);");
+ fragBuilder->codeAppendf("} else {");
+ fragBuilder->codeAppendf(" half q = l < 0.5 ? l * (1 + s) : l + s - l * s;");
+ fragBuilder->codeAppendf(" half p = 2 * l - q;");
+ fragBuilder->codeAppendf(" color.r = %s(p, q, h + 1/3.);", hue2rgbFuncName.c_str());
+ fragBuilder->codeAppendf(" color.g = %s(p, q, h);", hue2rgbFuncName.c_str());
+ fragBuilder->codeAppendf(" color.b = %s(p, q, h - 1/3.);", hue2rgbFuncName.c_str());
+ fragBuilder->codeAppendf("}");
+ }
+
+ // Contrast.
+ fragBuilder->codeAppendf("if (%s != 0) {", contrast);
+ fragBuilder->codeAppendf(" half m = (1 + %s) / (1 - %s);", contrast, contrast);
+ fragBuilder->codeAppendf(" half off = (-0.5 * m + 0.5);");
+ fragBuilder->codeAppendf(" color = m * color + off;");
+ fragBuilder->codeAppendf("}");
+
+ // Clamp.
+ fragBuilder->codeAppendf("color = saturate(color);");
+
+ if (hcfe.linearize()) {
+ fragBuilder->codeAppend("color.rgb = sqrt(color.rgb);");
+ }
+
+ // Restore the original alpha and premultiply.
+ fragBuilder->codeAppendf("color.a = %s.a;", args.fInputColor);
+ fragBuilder->codeAppendf("color.rgb *= color.a;");
+
+ // Copy to the output color.
+ fragBuilder->codeAppendf("%s = color;", args.fOutputColor);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkHighContrast_Filter::asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo& csi) const {
+ bool linearize = !csi.isLinearlyBlended();
+ return HighContrastFilterEffect::Make(fConfig, linearize);
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
new file mode 100644
index 0000000000..42592211a9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkBlurDrawLooper.h"
+#include "include/effects/SkLayerDrawLooper.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodePriv.h"
+
+SkLayerDrawLooper::LayerInfo::LayerInfo() {
+ fPaintBits = 0; // ignore our paint fields
+ fColorMode = SkBlendMode::kDst; // ignore our color
+ fOffset.set(0, 0);
+ fPostTranslate = false;
+}
+
+SkLayerDrawLooper::SkLayerDrawLooper()
+ : fRecs(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::~SkLayerDrawLooper() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkLayerDrawLooper::Context*
+SkLayerDrawLooper::makeContext(SkArenaAlloc* alloc) const {
+ return alloc->make<LayerDrawLooperContext>(this);
+}
+
+static SkColor4f xferColor(const SkColor4f& src, const SkColor4f& dst, SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kSrc:
+ return src;
+ case SkBlendMode::kDst:
+ return dst;
+ default: {
+ SkPMColor4f pmS = src.premul();
+ SkPMColor4f pmD = dst.premul();
+ return SkBlendMode_Apply(mode, pmS, pmD).unpremul();
+ }
+ }
+}
+
+// Even with kEntirePaint_Bits, we always ensure that the master paint's
+// text-encoding is respected, since that controls how we interpret the
+// text/length parameters of a draw[Pos]Text call.
+void SkLayerDrawLooper::LayerDrawLooperContext::ApplyInfo(
+ SkPaint* dst, const SkPaint& src, const LayerInfo& info) {
+ SkColor4f srcColor = src.getColor4f();
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // The framework may respect the alpha value on the original paint.
+ // Match this legacy behavior.
+ if (src.getAlpha() == 255) {
+ srcColor.fA = dst->getColor4f().fA;
+ }
+#endif
+ dst->setColor4f(xferColor(srcColor, dst->getColor4f(), (SkBlendMode)info.fColorMode),
+ sk_srgb_singleton());
+
+ BitFlags bits = info.fPaintBits;
+
+ if (0 == bits) {
+ return;
+ }
+ if (kEntirePaint_Bits == bits) {
+ // we've already computed these, so save it from the assignment
+ bool aa = dst->isAntiAlias();
+ bool di = dst->isDither();
+ SkColor4f c = dst->getColor4f();
+ *dst = src;
+ dst->setAntiAlias(aa);
+ dst->setDither(di);
+ dst->setColor4f(c, sk_srgb_singleton());
+ return;
+ }
+
+ if (bits & kStyle_Bit) {
+ dst->setStyle(src.getStyle());
+ dst->setStrokeWidth(src.getStrokeWidth());
+ dst->setStrokeMiter(src.getStrokeMiter());
+ dst->setStrokeCap(src.getStrokeCap());
+ dst->setStrokeJoin(src.getStrokeJoin());
+ }
+
+ if (bits & kPathEffect_Bit) {
+ dst->setPathEffect(src.refPathEffect());
+ }
+ if (bits & kMaskFilter_Bit) {
+ dst->setMaskFilter(src.refMaskFilter());
+ }
+ if (bits & kShader_Bit) {
+ dst->setShader(src.refShader());
+ }
+ if (bits & kColorFilter_Bit) {
+ dst->setColorFilter(src.refColorFilter());
+ }
+ if (bits & kXfermode_Bit) {
+ dst->setBlendMode(src.getBlendMode());
+ }
+
+ // we don't override these
+#if 0
+ dst->setTypeface(src.getTypeface());
+ dst->setTextSize(src.getTextSize());
+ dst->setTextScaleX(src.getTextScaleX());
+ dst->setRasterizer(src.getRasterizer());
+ dst->setLooper(src.getLooper());
+ dst->setTextEncoding(src.getTextEncoding());
+ dst->setHinting(src.getHinting());
+#endif
+}
+
+SkLayerDrawLooper::LayerDrawLooperContext::LayerDrawLooperContext(
+ const SkLayerDrawLooper* looper) : fCurrRec(looper->fRecs) {}
+
+bool SkLayerDrawLooper::LayerDrawLooperContext::next(Info* info, SkPaint* paint) {
+ if (nullptr == fCurrRec) {
+ return false;
+ }
+
+ ApplyInfo(paint, fCurrRec->fPaint, fCurrRec->fInfo);
+
+ if (info) {
+ info->fTranslate = fCurrRec->fInfo.fOffset;
+ info->fApplyPostCTM = fCurrRec->fInfo.fPostTranslate;
+ }
+ fCurrRec = fCurrRec->fNext;
+ return true;
+}
+
+bool SkLayerDrawLooper::asABlurShadow(BlurShadowRec* bsRec) const {
+ if (fCount != 2) {
+ return false;
+ }
+ const Rec* rec = fRecs;
+
+ // bottom layer needs to be just blur(maskfilter)
+ if ((rec->fInfo.fPaintBits & ~kMaskFilter_Bit)) {
+ return false;
+ }
+ if (SkBlendMode::kSrc != (SkBlendMode)rec->fInfo.fColorMode) {
+ return false;
+ }
+ const SkMaskFilter* mf = rec->fPaint.getMaskFilter();
+ if (nullptr == mf) {
+ return false;
+ }
+ SkMaskFilterBase::BlurRec maskBlur;
+ if (!as_MFB(mf)->asABlur(&maskBlur)) {
+ return false;
+ }
+
+ rec = rec->fNext;
+ // top layer needs to be "plain"
+ if (rec->fInfo.fPaintBits) {
+ return false;
+ }
+ if (SkBlendMode::kDst != (SkBlendMode)rec->fInfo.fColorMode) {
+ return false;
+ }
+ if (!rec->fInfo.fOffset.equals(0, 0)) {
+ return false;
+ }
+
+ if (bsRec) {
+ bsRec->fSigma = maskBlur.fSigma;
+ bsRec->fOffset = fRecs->fInfo.fOffset;
+ // TODO: Update BlurShadowRec to use SkColor4f?
+ bsRec->fColor = fRecs->fPaint.getColor();
+ bsRec->fStyle = maskBlur.fStyle;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLayerDrawLooper::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fCount);
+
+ Rec* rec = fRecs;
+ for (int i = 0; i < fCount; i++) {
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ buffer.writeInt(0);
+
+ buffer.writeInt(rec->fInfo.fPaintBits);
+ buffer.writeInt((int)rec->fInfo.fColorMode);
+ buffer.writePoint(rec->fInfo.fOffset);
+ buffer.writeBool(rec->fInfo.fPostTranslate);
+ buffer.writePaint(rec->fPaint);
+ rec = rec->fNext;
+ }
+}
+
+sk_sp<SkFlattenable> SkLayerDrawLooper::CreateProc(SkReadBuffer& buffer) {
+ int count = buffer.readInt();
+
+ Builder builder;
+ for (int i = 0; i < count; i++) {
+ LayerInfo info;
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ (void)buffer.readInt();
+
+ info.fPaintBits = buffer.readInt();
+ info.fColorMode = (SkBlendMode)buffer.readInt();
+ buffer.readPoint(&info.fOffset);
+ info.fPostTranslate = buffer.readBool();
+ buffer.readPaint(builder.addLayerOnTop(info), nullptr);
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ }
+ return builder.detach();
+}
+
+SkLayerDrawLooper::Builder::Builder()
+ : fRecs(nullptr),
+ fTopRec(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::Builder::~Builder() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayer(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = fRecs;
+ rec->fInfo = info;
+ fRecs = rec;
+ if (nullptr == fTopRec) {
+ fTopRec = rec;
+ }
+
+ return &rec->fPaint;
+}
+
+void SkLayerDrawLooper::Builder::addLayer(SkScalar dx, SkScalar dy) {
+ LayerInfo info;
+
+ info.fOffset.set(dx, dy);
+ (void)this->addLayer(info);
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayerOnTop(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = nullptr;
+ rec->fInfo = info;
+ if (nullptr == fRecs) {
+ fRecs = rec;
+ } else {
+ SkASSERT(fTopRec);
+ fTopRec->fNext = rec;
+ }
+ fTopRec = rec;
+
+ return &rec->fPaint;
+}
+
+sk_sp<SkDrawLooper> SkLayerDrawLooper::Builder::detach() {
+ SkLayerDrawLooper* looper = new SkLayerDrawLooper;
+ looper->fCount = fCount;
+ looper->fRecs = fRecs;
+
+ fCount = 0;
+ fRecs = nullptr;
+ fTopRec = nullptr;
+
+ return sk_sp<SkDrawLooper>(looper);
+}
+
+sk_sp<SkDrawLooper> SkBlurDrawLooper::Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy)
+{
+ return Make(SkColor4f::FromColor(color), sk_srgb_singleton(), sigma, dx, dy);
+}
+
+sk_sp<SkDrawLooper> SkBlurDrawLooper::Make(SkColor4f color, SkColorSpace* cs,
+ SkScalar sigma, SkScalar dx, SkScalar dy)
+{
+ sk_sp<SkMaskFilter> blur = nullptr;
+ if (sigma > 0.0f) {
+ blur = SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, true);
+ }
+
+ SkLayerDrawLooper::Builder builder;
+
+ // First layer
+ SkLayerDrawLooper::LayerInfo defaultLayer;
+ builder.addLayer(defaultLayer);
+
+ // Blur layer
+ SkLayerDrawLooper::LayerInfo blurInfo;
+ blurInfo.fColorMode = SkBlendMode::kSrc;
+ blurInfo.fPaintBits = SkLayerDrawLooper::kMaskFilter_Bit;
+ blurInfo.fOffset = SkVector::Make(dx, dy);
+ SkPaint* paint = builder.addLayer(blurInfo);
+ paint->setMaskFilter(std::move(blur));
+ paint->setColor4f(color, cs);
+
+ return builder.detach();
+}
diff --git a/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
new file mode 100644
index 0000000000..f164408caa
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/effects/SkLumaColorFilter.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/effects/generated/GrLumaColorFilterEffect.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#endif
+
+bool SkLumaColorFilter::onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ rec.fPipeline->append(SkRasterPipeline::bt709_luminance_or_luma_to_alpha);
+ rec.fPipeline->append(SkRasterPipeline::clamp_0);
+ rec.fPipeline->append(SkRasterPipeline::clamp_1);
+ return true;
+}
+
+sk_sp<SkColorFilter> SkLumaColorFilter::Make() {
+ return sk_sp<SkColorFilter>(new SkLumaColorFilter);
+}
+
+SkLumaColorFilter::SkLumaColorFilter() : INHERITED() {}
+
+sk_sp<SkFlattenable> SkLumaColorFilter::CreateProc(SkReadBuffer&) {
+ return Make();
+}
+
+void SkLumaColorFilter::flatten(SkWriteBuffer&) const {}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkLumaColorFilter::asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo&) const {
+ return GrLumaColorFilterEffect::Make();
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkOpPE.h b/gfx/skia/skia/src/effects/SkOpPE.h
new file mode 100644
index 0000000000..1f51579ebf
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOpPE.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpPE_DEFINED
+#define SkOpPE_DEFINED
+
+#include "include/effects/SkOpPathEffect.h"
+
+class SkOpPE : public SkPathEffect {
+public:
+ SkOpPE(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op);
+
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkOpPE)
+
+ sk_sp<SkPathEffect> fOne;
+ sk_sp<SkPathEffect> fTwo;
+ SkPathOp fOp;
+
+ typedef SkPathEffect INHERITED;
+};
+
+class SkMatrixPE : public SkPathEffect {
+public:
+ SkMatrixPE(const SkMatrix&);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkMatrixPE)
+
+ SkMatrix fMatrix;
+
+ typedef SkPathEffect INHERITED;
+};
+
+class SkStrokePE : public SkPathEffect {
+public:
+ SkStrokePE(SkScalar width, SkPaint::Join, SkPaint::Cap, SkScalar miter);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+ // TODO: override onComputeFastBounds (I think)
+
+private:
+ SK_FLATTENABLE_HOOKS(SkStrokePE)
+
+ SkScalar fWidth,
+ fMiter;
+ SkPaint::Join fJoin;
+ SkPaint::Cap fCap;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/effects/SkOpPathEffect.cpp b/gfx/skia/skia/src/effects/SkOpPathEffect.cpp
new file mode 100644
index 0000000000..e7b8a604bd
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOpPathEffect.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkOpPE.h"
+
+sk_sp<SkPathEffect> SkMergePathEffect::Make(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two,
+ SkPathOp op) {
+ return sk_sp<SkPathEffect>(new SkOpPE(std::move(one), std::move(two), op));
+}
+
+SkOpPE::SkOpPE(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op)
+ : fOne(std::move(one)), fTwo(std::move(two)), fOp(op) {}
+
+bool SkOpPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cull) const {
+ SkPath one, two;
+ if (fOne) {
+ if (!fOne->filterPath(&one, src, rec, cull)) {
+ return false;
+ }
+ } else {
+ one = src;
+ }
+ if (fTwo) {
+ if (!fTwo->filterPath(&two, src, rec, cull)) {
+ return false;
+ }
+ } else {
+ two = src;
+ }
+ return Op(one, two, fOp, dst);
+}
+
+void SkOpPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fOne.get());
+ buffer.writeFlattenable(fTwo.get());
+ buffer.write32(fOp);
+}
+
+sk_sp<SkFlattenable> SkOpPE::CreateProc(SkReadBuffer& buffer) {
+ auto one = buffer.readPathEffect();
+ auto two = buffer.readPathEffect();
+ SkPathOp op = buffer.read32LE(kReverseDifference_SkPathOp);
+ return buffer.isValid() ? SkMergePathEffect::Make(std::move(one), std::move(two), op) : nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkMatrixPathEffect::MakeTranslate(SkScalar dx, SkScalar dy) {
+ if (!SkScalarsAreFinite(dx, dy)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkMatrixPE(SkMatrix::MakeTrans(dx, dy)));
+}
+
+sk_sp<SkPathEffect> SkMatrixPathEffect::Make(const SkMatrix& matrix) {
+ if (!matrix.isFinite()) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkMatrixPE(matrix));
+}
+
+SkMatrixPE::SkMatrixPE(const SkMatrix& matrix) : fMatrix(matrix) {
+ SkASSERT(matrix.isFinite());
+}
+
+bool SkMatrixPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const {
+ src.transform(fMatrix, dst);
+ return true;
+}
+
+void SkMatrixPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(fMatrix);
+}
+
+sk_sp<SkFlattenable> SkMatrixPE::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix mx;
+ buffer.readMatrix(&mx);
+ return buffer.isValid() ? SkMatrixPathEffect::Make(mx) : nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkStrokePathEffect::Make(SkScalar width, SkPaint::Join join, SkPaint::Cap cap,
+ SkScalar miter) {
+ if (!SkScalarsAreFinite(width, miter) || width < 0 || miter < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkStrokePE(width, join, cap, miter));
+}
+
+SkStrokePE::SkStrokePE(SkScalar width, SkPaint::Join join, SkPaint::Cap cap, SkScalar miter)
+ : fWidth(width), fMiter(miter), fJoin(join), fCap(cap) {}
+
+bool SkStrokePE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const {
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ rec.setStrokeStyle(fWidth);
+ rec.setStrokeParams(fCap, fJoin, fMiter);
+ return rec.applyToPath(dst, src);
+}
+
+void SkStrokePE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fWidth);
+ buffer.writeScalar(fMiter);
+ buffer.write32(fJoin);
+ buffer.write32(fCap);
+}
+
+sk_sp<SkFlattenable> SkStrokePE::CreateProc(SkReadBuffer& buffer) {
+ SkScalar width = buffer.readScalar();
+ SkScalar miter = buffer.readScalar();
+ SkPaint::Join join = buffer.read32LE(SkPaint::kLast_Join);
+ SkPaint::Cap cap = buffer.read32LE(SkPaint::kLast_Cap);
+ return buffer.isValid() ? SkStrokePathEffect::Make(width, join, cap, miter) : nullptr;
+}
+
+
diff --git a/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp b/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp
new file mode 100644
index 0000000000..c91748eca6
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkOverdrawColorFilter.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/effects/GrSkSLFP.h"
+
+GR_FP_SRC_STRING SKSL_OVERDRAW_SRC = R"(
+layout(ctype=SkPMColor) uniform half4 color0;
+layout(ctype=SkPMColor) uniform half4 color1;
+layout(ctype=SkPMColor) uniform half4 color2;
+layout(ctype=SkPMColor) uniform half4 color3;
+layout(ctype=SkPMColor) uniform half4 color4;
+layout(ctype=SkPMColor) uniform half4 color5;
+
+void main(inout half4 color) {
+ half alpha = 255.0 * color.a;
+ if (alpha < 0.5) {
+ color = color0;
+ } else if (alpha < 1.5) {
+ color = color1;
+ } else if (alpha < 2.5) {
+ color = color2;
+ } else if (alpha < 3.5) {
+ color = color3;
+ } else if (alpha < 4.5) {
+ color = color4;
+ } else {
+ color = color5;
+ }
+}
+)";
+#endif
+
+bool SkOverdrawColorFilter::onAppendStages(const SkStageRec& rec, bool shader_is_opaque) const {
+ struct Ctx : public SkRasterPipeline_CallbackCtx {
+ const SkPMColor* colors;
+ };
+ // TODO: do we care about transforming to dstCS?
+ auto ctx = rec.fAlloc->make<Ctx>();
+ ctx->colors = fColors;
+ ctx->fn = [](SkRasterPipeline_CallbackCtx* arg, int active_pixels) {
+ auto ctx = (Ctx*)arg;
+ auto pixels = (SkPMColor4f*)ctx->rgba;
+ for (int i = 0; i < active_pixels; i++) {
+ uint8_t alpha = (int)(pixels[i].fA * 255);
+ if (alpha >= kNumColors) {
+ alpha = kNumColors - 1;
+ }
+ pixels[i] = SkPMColor4f::FromPMColor(ctx->colors[alpha]);
+ }
+ };
+ rec.fPipeline->append(SkRasterPipeline::callback, ctx);
+ return true;
+}
+
+void SkOverdrawColorFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeByteArray(fColors, kNumColors * sizeof(SkPMColor));
+}
+
+sk_sp<SkFlattenable> SkOverdrawColorFilter::CreateProc(SkReadBuffer& buffer) {
+ SkPMColor colors[kNumColors];
+ size_t size = buffer.getArrayCount();
+ if (!buffer.validate(size == sizeof(colors))) {
+ return nullptr;
+ }
+ if (!buffer.readByteArray(colors, sizeof(colors))) {
+ return nullptr;
+ }
+
+ return SkOverdrawColorFilter::Make(colors);
+}
+
+void SkOverdrawColorFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkOverdrawColorFilter);
+}
+#if SK_SUPPORT_GPU
+
+#include "include/private/GrRecordingContext.h"
+
+std::unique_ptr<GrFragmentProcessor> SkOverdrawColorFilter::asFragmentProcessor(
+ GrRecordingContext* context, const GrColorInfo&) const {
+ static int overdrawIndex = GrSkSLFP::NewIndex();
+ return GrSkSLFP::Make(context, overdrawIndex, "Overdraw", SKSL_OVERDRAW_SRC, fColors,
+ sizeof(fColors));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkPackBits.cpp b/gfx/skia/skia/src/effects/SkPackBits.cpp
new file mode 100644
index 0000000000..a590eb6c60
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPackBits.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTo.h"
+#include "src/effects/SkPackBits.h"
+
+#include <cstring>
+
+size_t SkPackBits::ComputeMaxSize8(size_t srcSize) {
+ // worst case is the number of 8bit values + 1 byte per (up to) 128 entries.
+ return ((srcSize + 127) >> 7) + srcSize;
+}
+
+static uint8_t* flush_same8(uint8_t dst[], uint8_t value, size_t count) {
+ while (count > 0) {
+ size_t n = count > 128 ? 128 : count;
+ *dst++ = (uint8_t)(n - 1);
+ *dst++ = (uint8_t)value;
+ count -= n;
+ }
+ return dst;
+}
+
+static uint8_t* flush_diff8(uint8_t* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT src, size_t count) {
+ while (count > 0) {
+ size_t n = count > 128 ? 128 : count;
+ *dst++ = (uint8_t)(n + 127);
+ memcpy(dst, src, n);
+ src += n;
+ dst += n;
+ count -= n;
+ }
+ return dst;
+}
+
+size_t SkPackBits::Pack8(const uint8_t* SK_RESTRICT src, size_t srcSize,
+ uint8_t* SK_RESTRICT dst, size_t dstSize) {
+ if (dstSize < ComputeMaxSize8(srcSize)) {
+ return 0;
+ }
+
+ uint8_t* const origDst = dst;
+ const uint8_t* stop = src + srcSize;
+
+ for (intptr_t count = stop - src; count > 0; count = stop - src) {
+ if (1 == count) {
+ *dst++ = 0;
+ *dst++ = *src;
+ break;
+ }
+
+ unsigned value = *src;
+ const uint8_t* s = src + 1;
+
+ if (*s == value) { // accumulate same values...
+ do {
+ s++;
+ if (s == stop) {
+ break;
+ }
+ } while (*s == value);
+ dst = flush_same8(dst, value, SkToInt(s - src));
+ } else { // accumulate diff values...
+ do {
+ if (++s == stop) {
+ goto FLUSH_DIFF;
+ }
+ // only stop if we hit 3 in a row,
+ // otherwise we get bigger than compuatemax
+ } while (*s != s[-1] || s[-1] != s[-2]);
+ s -= 2; // back up so we don't grab the "same" values that follow
+ FLUSH_DIFF:
+ dst = flush_diff8(dst, src, SkToInt(s - src));
+ }
+ src = s;
+ }
+ return dst - origDst;
+}
+
+int SkPackBits::Unpack8(const uint8_t* SK_RESTRICT src, size_t srcSize,
+ uint8_t* SK_RESTRICT dst, size_t dstSize) {
+ uint8_t* const origDst = dst;
+ uint8_t* const endDst = dst + dstSize;
+ const uint8_t* stop = src + srcSize;
+
+ while (src < stop) {
+ unsigned n = *src++;
+ if (n <= 127) { // repeat count (n + 1)
+ n += 1;
+ if (dst > (endDst - n) || src >= stop) {
+ return 0;
+ }
+ memset(dst, *src++, n);
+ } else { // same count (n - 127)
+ n -= 127;
+ if (dst > (endDst - n) || src > (stop - n)) {
+ return 0;
+ }
+ memcpy(dst, src, n);
+ src += n;
+ }
+ dst += n;
+ }
+ SkASSERT(src <= stop);
+ SkASSERT(dst <= endDst);
+ return SkToInt(dst - origDst);
+}
diff --git a/gfx/skia/skia/src/effects/SkPackBits.h b/gfx/skia/skia/src/effects/SkPackBits.h
new file mode 100644
index 0000000000..b04e28d56c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPackBits.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPackBits_DEFINED
+#define SkPackBits_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkPackBits {
+public:
+ /** Given the number of 8bit values that will be passed to Pack8,
+ returns the worst-case size needed for the dst[] buffer.
+ */
+ static size_t ComputeMaxSize8(size_t srcSize);
+
+ /** Write the src array into a packed format. The packing process may end
+ up writing more bytes than it read, so dst[] must be large enough.
+ @param src Input array of 8bit values
+ @param srcSize Number of entries in src[]
+ @param dst Buffer (allocated by caller) to write the packed data
+ into
+ @param dstSize Number of bytes in the output buffer.
+ @return the number of bytes written to dst[]
+ */
+ static size_t Pack8(const uint8_t src[], size_t srcSize, uint8_t dst[],
+ size_t dstSize);
+
+ /** Unpack the data in src[], and expand it into dst[]. The src[] data was
+ written by a previous call to Pack8.
+ @param src Input data to unpack, previously created by Pack8.
+ @param srcSize Number of bytes of src to unpack
+ @param dst Buffer (allocated by caller) to expand the src[] into.
+ @param dstSize Number of bytes in the output buffer.
+ @return the number of bytes written into dst, or 0 if srcSize or dstSize are too small.
+ */
+ static int Unpack8(const uint8_t src[], size_t srcSize, uint8_t dst[],
+ size_t dstSize);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp b/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp
new file mode 100644
index 0000000000..1cae6a1a77
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkShaderMaskFilter.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkShaderMF : public SkMaskFilterBase {
+public:
+ SkShaderMF(sk_sp<SkShader> shader) : fShader(std::move(shader)) {}
+
+ SkMask::Format getFormat() const override { return SkMask::kA8_Format; }
+
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+ void computeFastBounds(const SkRect& src, SkRect* dst) const override {
+ *dst = src;
+ }
+
+ bool asABlur(BlurRec*) const override { return false; }
+
+protected:
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs&) const override;
+ bool onHasFragmentProcessor() const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkShaderMF)
+
+ sk_sp<SkShader> fShader;
+
+ SkShaderMF(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+
+ friend class SkShaderMaskFilter;
+
+ typedef SkMaskFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkShaderMF::CreateProc(SkReadBuffer& buffer) {
+ return SkShaderMaskFilter::Make(buffer.readShader());
+}
+
+void SkShaderMF::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+}
+
+static void rect_memcpy(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ size_t copyBytes, int rows) {
+ for (int i = 0; i < rows; ++i) {
+ memcpy(dst, src, copyBytes);
+ dst = (char*)dst + dstRB;
+ src = (const char*)src + srcRB;
+ }
+}
+
+bool SkShaderMF::filterMask(SkMask* dst, const SkMask& src, const SkMatrix& ctm,
+ SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ if (margin) {
+ margin->set(0, 0);
+ }
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = src.fBounds.width(); // need alignment?
+ dst->fFormat = SkMask::kA8_Format;
+
+ if (src.fImage == nullptr) {
+ dst->fImage = nullptr;
+ return true;
+ }
+ size_t size = dst->computeImageSize();
+ if (0 == size) {
+ return false; // too big to allocate, abort
+ }
+
+ // Allocate and initialize dst image with a copy of the src image
+ dst->fImage = SkMask::AllocImage(size);
+ rect_memcpy(dst->fImage, dst->fRowBytes, src.fImage, src.fRowBytes,
+ src.fBounds.width() * sizeof(uint8_t), src.fBounds.height());
+
+ // Now we have a dst-mask, just need to setup a canvas and draw into it
+ SkBitmap bitmap;
+ if (!bitmap.installMaskPixels(*dst)) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setShader(fShader);
+ paint.setFilterQuality(SkFilterQuality::kLow_SkFilterQuality);
+ // this blendmode is the trick: we only draw the shader where the mask is
+ paint.setBlendMode(SkBlendMode::kSrcIn);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(dst->fBounds.fLeft), -SkIntToScalar(dst->fBounds.fTop));
+ canvas.concat(ctm);
+ canvas.drawPaint(paint);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+
+std::unique_ptr<GrFragmentProcessor> SkShaderMF::onAsFragmentProcessor(const GrFPArgs& args) const {
+ return GrFragmentProcessor::MulInputByChildAlpha(as_SB(fShader)->asFragmentProcessor(args));
+}
+
+bool SkShaderMF::onHasFragmentProcessor() const {
+ return true;
+}
+
+#endif
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkMaskFilter> SkShaderMaskFilter::Make(sk_sp<SkShader> shader) {
+ return shader ? sk_sp<SkMaskFilter>(new SkShaderMF(std::move(shader))) : nullptr;
+}
+
+void SkShaderMaskFilter::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkShaderMF); }
diff --git a/gfx/skia/skia/src/effects/SkTableColorFilter.cpp b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
new file mode 100644
index 0000000000..601da8f149
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkTableColorFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+static const uint8_t gIdentityTable[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
+ 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+ 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+};
+
+class SkTable_ColorFilter : public SkColorFilter {
+public:
+ SkTable_ColorFilter(const uint8_t tableA[], const uint8_t tableR[],
+ const uint8_t tableG[], const uint8_t tableB[]) {
+ fBitmap = nullptr;
+ fFlags = 0;
+
+ uint8_t* dst = fStorage;
+ if (tableA) {
+ memcpy(dst, tableA, 256);
+ dst += 256;
+ fFlags |= kA_Flag;
+ }
+ if (tableR) {
+ memcpy(dst, tableR, 256);
+ dst += 256;
+ fFlags |= kR_Flag;
+ }
+ if (tableG) {
+ memcpy(dst, tableG, 256);
+ dst += 256;
+ fFlags |= kG_Flag;
+ }
+ if (tableB) {
+ memcpy(dst, tableB, 256);
+ fFlags |= kB_Flag;
+ }
+ }
+
+ ~SkTable_ColorFilter() override { delete fBitmap; }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+ enum {
+ kA_Flag = 1 << 0,
+ kR_Flag = 1 << 1,
+ kG_Flag = 1 << 2,
+ kB_Flag = 1 << 3,
+ };
+
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ const uint8_t *r = gIdentityTable,
+ *g = gIdentityTable,
+ *b = gIdentityTable,
+ *a = gIdentityTable;
+ const uint8_t* ptr = fStorage;
+ if (fFlags & kA_Flag) { a = ptr; ptr += 256; }
+ if (fFlags & kR_Flag) { r = ptr; ptr += 256; }
+ if (fFlags & kG_Flag) { g = ptr; ptr += 256; }
+ if (fFlags & kB_Flag) { b = ptr; }
+
+ SkRasterPipeline* p = rec.fPipeline;
+ if (!shaderIsOpaque) {
+ p->append(SkRasterPipeline::unpremul);
+ }
+
+ struct Tables { const uint8_t *r, *g, *b, *a; };
+ p->append(SkRasterPipeline::byte_tables, rec.fAlloc->make<Tables>(Tables{r,g,b,a}));
+
+ bool definitelyOpaque = shaderIsOpaque && a[0xff] == 0xff;
+ if (!definitelyOpaque) {
+ p->append(SkRasterPipeline::premul);
+ }
+ return true;
+ }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTable_ColorFilter)
+
+ void getTableAsBitmap(SkBitmap* table) const;
+
+ mutable const SkBitmap* fBitmap; // lazily allocated
+
+ uint8_t fStorage[256 * 4];
+ unsigned fFlags;
+
+ friend class SkTableColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+static const uint8_t gCountNibBits[] = {
+ 0, 1, 1, 2,
+ 1, 2, 2, 3,
+ 1, 2, 2, 3,
+ 2, 3, 3, 4
+};
+
+#include "src/effects/SkPackBits.h"
+
+void SkTable_ColorFilter::flatten(SkWriteBuffer& buffer) const {
+ uint8_t storage[5*256];
+ int count = gCountNibBits[fFlags & 0xF];
+ size_t size = SkPackBits::Pack8(fStorage, count * 256, storage,
+ sizeof(storage));
+
+ buffer.write32(fFlags);
+ buffer.writeByteArray(storage, size);
+}
+
+sk_sp<SkFlattenable> SkTable_ColorFilter::CreateProc(SkReadBuffer& buffer) {
+ const int flags = buffer.read32();
+ const size_t count = gCountNibBits[flags & 0xF];
+ SkASSERT(count <= 4);
+
+ uint8_t packedStorage[5*256];
+ size_t packedSize = buffer.getArrayCount();
+ if (!buffer.validate(packedSize <= sizeof(packedStorage))) {
+ return nullptr;
+ }
+ if (!buffer.readByteArray(packedStorage, packedSize)) {
+ return nullptr;
+ }
+
+ uint8_t unpackedStorage[4*256];
+ size_t unpackedSize = SkPackBits::Unpack8(packedStorage, packedSize,
+ unpackedStorage, sizeof(unpackedStorage));
+ // now check that we got the size we expected
+ if (!buffer.validate(unpackedSize == count*256)) {
+ return nullptr;
+ }
+
+ const uint8_t* a = nullptr;
+ const uint8_t* r = nullptr;
+ const uint8_t* g = nullptr;
+ const uint8_t* b = nullptr;
+ const uint8_t* ptr = unpackedStorage;
+
+ if (flags & kA_Flag) {
+ a = ptr;
+ ptr += 256;
+ }
+ if (flags & kR_Flag) {
+ r = ptr;
+ ptr += 256;
+ }
+ if (flags & kG_Flag) {
+ g = ptr;
+ ptr += 256;
+ }
+ if (flags & kB_Flag) {
+ b = ptr;
+ ptr += 256;
+ }
+ return SkTableColorFilter::MakeARGB(a, r, g, b);
+}
+
+void SkTable_ColorFilter::getTableAsBitmap(SkBitmap* table) const {
+ if (table) {
+ if (nullptr == fBitmap) {
+ SkBitmap* bmp = new SkBitmap;
+ bmp->allocPixels(SkImageInfo::MakeA8(256, 4));
+ uint8_t* bitmapPixels = bmp->getAddr8(0, 0);
+ int offset = 0;
+ static const unsigned kFlags[] = { kA_Flag, kR_Flag, kG_Flag, kB_Flag };
+
+ for (int x = 0; x < 4; ++x) {
+ if (!(fFlags & kFlags[x])) {
+ memcpy(bitmapPixels, gIdentityTable, sizeof(gIdentityTable));
+ } else {
+ memcpy(bitmapPixels, fStorage + offset, 256);
+ offset += 256;
+ }
+ bitmapPixels += 256;
+ }
+ bmp->setImmutable();
+ fBitmap = bmp;
+ }
+ *table = *fBitmap;
+ }
+}
+
+#if SK_SUPPORT_GPU
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class ColorTableEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrRecordingContext* context,
+ const SkBitmap& bitmap);
+
+ ~ColorTableEffect() override {}
+
+ const char* name() const override { return "ColorTableEffect"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ColorTableEffect(sk_ref_sp(fTextureSampler.proxy())));
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ ColorTableEffect(sk_sp<GrTextureProxy> proxy)
+ : INHERITED(kColorTableEffect_ClassID,
+ kNone_OptimizationFlags) // Not bothering with table-specific optimizations.
+ , fTextureSampler(std::move(proxy)) {
+ this->setTextureSamplerCnt(1);
+ }
+
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ TextureSampler fTextureSampler;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GLColorTableEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*) {}
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLColorTableEffect::emitCode(EmitArgs& args) {
+ static const float kColorScaleFactor = 255.0f / 256.0f;
+ static const float kColorOffsetFactor = 1.0f / 512.0f;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ if (nullptr == args.fInputColor) {
+ // the input color is solid white (all ones).
+ static const float kMaxValue = kColorScaleFactor + kColorOffsetFactor;
+ fragBuilder->codeAppendf("\t\thalf4 coord = half4(%f, %f, %f, %f);\n",
+ kMaxValue, kMaxValue, kMaxValue, kMaxValue);
+
+ } else {
+ fragBuilder->codeAppendf("\t\thalf nonZeroAlpha = max(%s.a, .0001);\n", args.fInputColor);
+ fragBuilder->codeAppendf("\t\thalf4 coord = half4(%s.rgb / nonZeroAlpha, nonZeroAlpha);\n",
+ args.fInputColor);
+ fragBuilder->codeAppendf("\t\tcoord = coord * %f + half4(%f, %f, %f, %f);\n",
+ kColorScaleFactor,
+ kColorOffsetFactor, kColorOffsetFactor,
+ kColorOffsetFactor, kColorOffsetFactor);
+ }
+
+ SkString coord;
+
+ fragBuilder->codeAppendf("\t\t%s.a = ", args.fOutputColor);
+ coord.printf("half2(coord.a, 0.125)");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.r = ", args.fOutputColor);
+ coord.printf("half2(coord.r, 0.375)");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.g = ", args.fOutputColor);
+ coord.printf("half2(coord.g, 0.625)");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.b = ", args.fOutputColor);
+ coord.printf("half2(coord.b, 0.875)");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.rgb *= %s.a;\n", args.fOutputColor, args.fOutputColor);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+std::unique_ptr<GrFragmentProcessor> ColorTableEffect::Make(GrRecordingContext* context,
+ const SkBitmap& bitmap) {
+ SkASSERT(kPremul_SkAlphaType == bitmap.alphaType());
+ SkASSERT(bitmap.isImmutable());
+
+ if (kUnknown_GrPixelConfig == SkColorType2GrPixelConfig(bitmap.colorType())) {
+ return nullptr;
+ }
+
+ sk_sp<SkImage> srcImage = SkImage::MakeFromBitmap(bitmap);
+ if (!srcImage) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = GrMakeCachedImageProxy(context->priv().proxyProvider(),
+ std::move(srcImage));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new ColorTableEffect(std::move(proxy)));
+}
+
+void ColorTableEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLColorTableEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* ColorTableEffect::onCreateGLSLInstance() const {
+ return new GLColorTableEffect;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorTableEffect);
+
+#if GR_TEST_UTILS
+
+#include "include/gpu/GrContext.h"
+
+std::unique_ptr<GrFragmentProcessor> ColorTableEffect::TestCreate(GrProcessorTestData* d) {
+ int flags = 0;
+ uint8_t luts[256][4];
+ do {
+ for (int i = 0; i < 4; ++i) {
+ flags |= d->fRandom->nextBool() ? (1 << i): 0;
+ }
+ } while (!flags);
+ for (int i = 0; i < 4; ++i) {
+ if (flags & (1 << i)) {
+ for (int j = 0; j < 256; ++j) {
+ luts[j][i] = SkToU8(d->fRandom->nextBits(8));
+ }
+ }
+ }
+ auto filter(SkTableColorFilter::MakeARGB(
+ (flags & (1 << 0)) ? luts[0] : nullptr,
+ (flags & (1 << 1)) ? luts[1] : nullptr,
+ (flags & (1 << 2)) ? luts[2] : nullptr,
+ (flags & (1 << 3)) ? luts[3] : nullptr
+ ));
+ sk_sp<SkColorSpace> colorSpace = GrTest::TestColorSpace(d->fRandom);
+ auto fp = filter->asFragmentProcessor(
+ d->context(),
+ GrColorInfo(GrColorType::kRGBA_8888, kUnknown_SkAlphaType, std::move(colorSpace)));
+ SkASSERT(fp);
+ return fp;
+}
+#endif
+
+std::unique_ptr<GrFragmentProcessor> SkTable_ColorFilter::asFragmentProcessor(
+ GrRecordingContext* context, const GrColorInfo&) const {
+ SkBitmap bitmap;
+ this->getTableAsBitmap(&bitmap);
+
+ return ColorTableEffect::Make(context, bitmap);
+}
+
+#endif // SK_SUPPORT_GPU
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkTableColorFilter::Make(const uint8_t table[256]) {
+ return sk_make_sp<SkTable_ColorFilter>(table, table, table, table);
+}
+
+sk_sp<SkColorFilter> SkTableColorFilter::MakeARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]) {
+ return sk_make_sp<SkTable_ColorFilter>(tableA, tableR, tableG, tableB);
+}
+
+void SkTableColorFilter::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkTable_ColorFilter); }
diff --git a/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
new file mode 100644
index 0000000000..4fe0fb1319
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/effects/SkTableMaskFilter.h"
+#include "include/private/SkFixed.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+class SkTableMaskFilterImpl : public SkMaskFilterBase {
+public:
+ explicit SkTableMaskFilterImpl(const uint8_t table[256]);
+
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask*, const SkMask&, const SkMatrix&, SkIPoint*) const override;
+
+protected:
+ ~SkTableMaskFilterImpl() override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTableMaskFilterImpl)
+
+ SkTableMaskFilterImpl();
+
+ uint8_t fTable[256];
+
+ typedef SkMaskFilter INHERITED;
+};
+
+SkTableMaskFilterImpl::SkTableMaskFilterImpl() {
+ for (int i = 0; i < 256; i++) {
+ fTable[i] = i;
+ }
+}
+
+SkTableMaskFilterImpl::SkTableMaskFilterImpl(const uint8_t table[256]) {
+ memcpy(fTable, table, sizeof(fTable));
+}
+
+SkTableMaskFilterImpl::~SkTableMaskFilterImpl() {}
+
+bool SkTableMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix&, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = SkAlign4(dst->fBounds.width());
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+ dst->fImage = SkMask::AllocImage(dst->computeImageSize());
+
+ const uint8_t* srcP = src.fImage;
+ uint8_t* dstP = dst->fImage;
+ const uint8_t* table = fTable;
+ int dstWidth = dst->fBounds.width();
+ int extraZeros = dst->fRowBytes - dstWidth;
+
+ for (int y = dst->fBounds.height() - 1; y >= 0; --y) {
+ for (int x = dstWidth - 1; x >= 0; --x) {
+ dstP[x] = table[srcP[x]];
+ }
+ srcP += src.fRowBytes;
+ // we can't just inc dstP by rowbytes, because if it has any
+ // padding between its width and its rowbytes, we need to zero those
+ // so that the bitters can read those safely if that is faster for
+ // them
+ dstP += dstWidth;
+ for (int i = extraZeros - 1; i >= 0; --i) {
+ *dstP++ = 0;
+ }
+ }
+ }
+
+ if (margin) {
+ margin->set(0, 0);
+ }
+ return true;
+}
+
+SkMask::Format SkTableMaskFilterImpl::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+void SkTableMaskFilterImpl::flatten(SkWriteBuffer& wb) const {
+ wb.writeByteArray(fTable, 256);
+}
+
+sk_sp<SkFlattenable> SkTableMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ uint8_t table[256];
+ if (!buffer.readByteArray(table, 256)) {
+ return nullptr;
+ }
+ return sk_sp<SkFlattenable>(SkTableMaskFilter::Create(table));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMaskFilter* SkTableMaskFilter::Create(const uint8_t table[256]) {
+ return new SkTableMaskFilterImpl(table);
+}
+
+SkMaskFilter* SkTableMaskFilter::CreateGamma(SkScalar gamma) {
+ uint8_t table[256];
+ MakeGammaTable(table, gamma);
+ return new SkTableMaskFilterImpl(table);
+}
+
+SkMaskFilter* SkTableMaskFilter::CreateClip(uint8_t min, uint8_t max) {
+ uint8_t table[256];
+ MakeClipTable(table, min, max);
+ return new SkTableMaskFilterImpl(table);
+}
+
+void SkTableMaskFilter::MakeGammaTable(uint8_t table[256], SkScalar gamma) {
+ const float dx = 1 / 255.0f;
+ const float g = SkScalarToFloat(gamma);
+
+ float x = 0;
+ for (int i = 0; i < 256; i++) {
+ // float ee = powf(x, g) * 255;
+ table[i] = SkTPin(sk_float_round2int(powf(x, g) * 255), 0, 255);
+ x += dx;
+ }
+}
+
+void SkTableMaskFilter::MakeClipTable(uint8_t table[256], uint8_t min,
+ uint8_t max) {
+ if (0 == max) {
+ max = 1;
+ }
+ if (min >= max) {
+ min = max - 1;
+ }
+ SkASSERT(min < max);
+
+ SkFixed scale = (1 << 16) * 255 / (max - min);
+ memset(table, 0, min + 1);
+ for (int i = min + 1; i < max; i++) {
+ int value = SkFixedRoundToInt(scale * (i - min));
+ SkASSERT(value <= 255);
+ table[i] = value;
+ }
+ memset(table + max, 255, 256 - max);
+
+#if 0
+ int j;
+ for (j = 0; j < 256; j++) {
+ if (table[j]) {
+ break;
+ }
+ }
+ SkDebugf("%d %d start [%d]", min, max, j);
+ for (; j < 256; j++) {
+ SkDebugf(" %d", table[j]);
+ }
+ SkDebugf("\n\n");
+#endif
+}
diff --git a/gfx/skia/skia/src/effects/SkTrimPE.h b/gfx/skia/skia/src/effects/SkTrimPE.h
new file mode 100644
index 0000000000..b457a38fe8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTrimPE.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTrimImpl_DEFINED
+#define SkTrimImpl_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+#include "include/effects/SkTrimPathEffect.h"
+
+class SkTrimPE : public SkPathEffect {
+public:
+ SkTrimPE(SkScalar startT, SkScalar stopT, SkTrimPathEffect::Mode);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTrimPE)
+
+ const SkScalar fStartT,
+ fStopT;
+ const SkTrimPathEffect::Mode fMode;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp b/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp
new file mode 100644
index 0000000000..d0a8806d40
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPathMeasure.h"
+#include "include/effects/SkTrimPathEffect.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkTrimPE.h"
+
+namespace {
+
+class Segmentator : public SkNoncopyable {
+public:
+ Segmentator(const SkPath& src, SkPath* dst)
+ : fMeasure(src, false)
+ , fDst(dst) {}
+
+ void add(SkScalar start, SkScalar stop) {
+ SkASSERT(start < stop);
+
+ // TODO: we appear to skip zero-length contours.
+ do {
+ const auto nextOffset = fCurrentSegmentOffset + fMeasure.getLength();
+
+ if (start < nextOffset) {
+ fMeasure.getSegment(start - fCurrentSegmentOffset,
+ stop - fCurrentSegmentOffset,
+ fDst, true);
+
+ if (stop < nextOffset)
+ break;
+ }
+
+ fCurrentSegmentOffset = nextOffset;
+ } while (fMeasure.nextContour());
+ }
+
+private:
+ SkPathMeasure fMeasure;
+ SkPath* fDst;
+
+ SkScalar fCurrentSegmentOffset = 0;
+
+ using INHERITED = SkNoncopyable;
+};
+
+} // namespace
+
+SkTrimPE::SkTrimPE(SkScalar startT, SkScalar stopT, SkTrimPathEffect::Mode mode)
+ : fStartT(startT), fStopT(stopT), fMode(mode) {}
+
+bool SkTrimPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const {
+ if (fStartT >= fStopT) {
+ SkASSERT(fMode == SkTrimPathEffect::Mode::kNormal);
+ return true;
+ }
+
+ // First pass: compute the total len.
+ SkScalar len = 0;
+ SkPathMeasure meas(src, false);
+ do {
+ len += meas.getLength();
+ } while (meas.nextContour());
+
+ const auto arcStart = len * fStartT,
+ arcStop = len * fStopT;
+
+ // Second pass: actually add segments.
+ Segmentator segmentator(src, dst);
+ if (fMode == SkTrimPathEffect::Mode::kNormal) {
+ if (arcStart < arcStop) segmentator.add(arcStart, arcStop);
+ } else {
+ if (0 < arcStart) segmentator.add(0, arcStart);
+ if (arcStop < len) segmentator.add(arcStop, len);
+ }
+
+ return true;
+}
+
+void SkTrimPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fStartT);
+ buffer.writeScalar(fStopT);
+ buffer.writeUInt(static_cast<uint32_t>(fMode));
+}
+
+sk_sp<SkFlattenable> SkTrimPE::CreateProc(SkReadBuffer& buffer) {
+ const auto start = buffer.readScalar(),
+ stop = buffer.readScalar();
+ const auto mode = buffer.readUInt();
+
+ return SkTrimPathEffect::Make(start, stop,
+ (mode & 1) ? SkTrimPathEffect::Mode::kInverted : SkTrimPathEffect::Mode::kNormal);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkTrimPathEffect::Make(SkScalar startT, SkScalar stopT, Mode mode) {
+ if (!SkScalarsAreFinite(startT, stopT)) {
+ return nullptr;
+ }
+
+ if (startT <= 0 && stopT >= 1 && mode == Mode::kNormal) {
+ return nullptr;
+ }
+
+ startT = SkTPin(startT, 0.f, 1.f);
+ stopT = SkTPin(stopT, 0.f, 1.f);
+
+ if (startT >= stopT && mode == Mode::kInverted) {
+ return nullptr;
+ }
+
+ return sk_sp<SkPathEffect>(new SkTrimPE(startT, stopT, mode));
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdFilter.cpp
new file mode 100644
index 0000000000..a059b6588a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdFilter.cpp
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkAlphaThresholdFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkRegion.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#endif
+
+namespace {
+
+class SkAlphaThresholdFilterImpl final : public SkImageFilter_Base {
+public:
+ SkAlphaThresholdFilterImpl(const SkRegion& region, SkScalar innerThreshold,
+ SkScalar outerThreshold, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr)
+ : INHERITED(&input, 1, cropRect)
+ , fRegion(region)
+ , fInnerThreshold(innerThreshold)
+ , fOuterThreshold(outerThreshold) {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> createMaskTexture(GrRecordingContext*,
+ const SkMatrix&,
+ const SkIRect& bounds) const;
+#endif
+
+private:
+ friend void SkAlphaThresholdFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkAlphaThresholdFilterImpl)
+
+ SkRegion fRegion;
+ SkScalar fInnerThreshold;
+ SkScalar fOuterThreshold;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+}; // end namespace
+
+sk_sp<SkImageFilter> SkAlphaThresholdFilter::Make(const SkRegion& region, SkScalar innerThreshold,
+ SkScalar outerThreshold,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ innerThreshold = SkScalarPin(innerThreshold, 0.f, 1.f);
+ outerThreshold = SkScalarPin(outerThreshold, 0.f, 1.f);
+ if (!SkScalarIsFinite(innerThreshold) || !SkScalarIsFinite(outerThreshold)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkAlphaThresholdFilterImpl(
+ region, innerThreshold, outerThreshold, std::move(input), cropRect));
+ }
+
+void SkAlphaThresholdFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkAlphaThresholdFilterImpl);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkAlphaThresholdFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar inner = buffer.readScalar();
+ SkScalar outer = buffer.readScalar();
+ SkRegion rgn;
+ buffer.readRegion(&rgn);
+ return SkAlphaThresholdFilter::Make(rgn, inner, outer, common.getInput(0),
+ &common.cropRect());
+}
+
+void SkAlphaThresholdFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fInnerThreshold);
+ buffer.writeScalar(fOuterThreshold);
+ buffer.writeRegion(fRegion);
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTextureProxy> SkAlphaThresholdFilterImpl::createMaskTexture(GrRecordingContext* context,
+ const SkMatrix& inMatrix,
+ const SkIRect& bounds) const {
+ auto rtContext = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kApprox, bounds.width(), bounds.height(), GrColorType::kAlpha_8, nullptr);
+ if (!rtContext) {
+ return nullptr;
+ }
+
+ SkRegion::Iterator iter(fRegion);
+ rtContext->clear(nullptr, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+
+ GrFixedClip clip(SkIRect::MakeWH(bounds.width(), bounds.height()));
+ while (!iter.done()) {
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ SkRect rect = SkRect::Make(iter.rect());
+
+ rtContext->drawRect(clip, std::move(paint), GrAA::kNo, inMatrix, rect);
+
+ iter.next();
+ }
+
+ return rtContext->asTextureProxyRef();
+}
+#endif
+
+sk_sp<SkSpecialImage> SkAlphaThresholdFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> inputProxy(input->asTextureProxyRef(context));
+ SkASSERT(inputProxy);
+ const bool isProtected = inputProxy->isProtected();
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ bounds.offset(-inputOffset);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ sk_sp<GrTextureProxy> maskProxy(this->createMaskTexture(context, matrix, bounds));
+ if (!maskProxy) {
+ return nullptr;
+ }
+
+ GrColorType srcColorType = SkColorTypeToGrColorType(input->colorType());
+ auto textureFP = GrSimpleTextureEffect::Make(std::move(inputProxy), srcColorType,
+ SkMatrix::MakeTrans(input->subset().x(),
+ input->subset().y()));
+ textureFP = GrColorSpaceXformEffect::Make(std::move(textureFP), input->getColorSpace(),
+ input->alphaType(), ctx.colorSpace());
+ if (!textureFP) {
+ return nullptr;
+ }
+
+ auto thresholdFP = GrAlphaThresholdFragmentProcessor::Make(std::move(maskProxy),
+ fInnerThreshold,
+ fOuterThreshold,
+ bounds);
+ if (!thresholdFP) {
+ return nullptr;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fpSeries[] = { std::move(textureFP),
+ std::move(thresholdFP) };
+ auto fp = GrFragmentProcessor::RunInSeries(fpSeries, 2);
+
+ return DrawWithFP(context, std::move(fp), bounds, ctx.colorType(), ctx.colorSpace(),
+ isProtected ? GrProtected::kYes : GrProtected::kNo);
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+
+ SkMatrix localInverse;
+ if (!ctx.ctm().invert(&localInverse)) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ kPremul_SkAlphaType);
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ U8CPU innerThreshold = (U8CPU)(fInnerThreshold * 0xFF);
+ U8CPU outerThreshold = (U8CPU)(fOuterThreshold * 0xFF);
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ SkIPoint srcOffset = { bounds.fLeft - inputOffset.fX, bounds.fTop - inputOffset.fY };
+ for (int y = 0; y < dstHeight; ++y) {
+ const SkColor* sptr = inputBM.getAddr32(srcOffset.fX, srcOffset.fY+y);
+
+ for (int x = 0; x < dstWidth; ++x) {
+ const SkColor& source = sptr[x];
+ SkColor outputColor(source);
+ SkPoint position;
+ localInverse.mapXY((SkScalar)x + bounds.fLeft, (SkScalar)y + bounds.fTop, &position);
+ if (fRegion.contains((int32_t)position.x(), (int32_t)position.y())) {
+ if (SkColorGetA(source) < innerThreshold) {
+ U8CPU alpha = SkColorGetA(source);
+ if (alpha == 0) {
+ alpha = 1;
+ }
+ float scale = (float)innerThreshold / alpha;
+ outputColor = SkColorSetARGB(innerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ } else {
+ if (SkColorGetA(source) > outerThreshold) {
+ float scale = (float)outerThreshold / SkColorGetA(source);
+ outputColor = SkColorSetARGB(outerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ }
+ dptr[y * dstWidth + x] = outputColor;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp
new file mode 100644
index 0000000000..2b1aaaf6a5
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkArithmeticImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/effects/SkXfermodeImageFilter.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+GR_FP_SRC_STRING SKSL_ARITHMETIC_SRC = R"(
+uniform float4 k;
+in bool enforcePMColor;
+in fragmentProcessor child;
+
+void main(inout half4 color) {
+ half4 dst = sample(child);
+ color = saturate(half(k.x) * color * dst + half(k.y) * color + half(k.z) * dst + half(k.w));
+ @if (enforcePMColor) {
+ color.rgb = min(color.rgb, color.a);
+ }
+}
+)";
+#endif
+
+namespace {
+
+class ArithmeticImageFilterImpl final : public SkImageFilter_Base {
+public:
+ ArithmeticImageFilterImpl(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> inputs[2], const CropRect* cropRect)
+ : INHERITED(inputs, 2, cropRect), fK{k1, k2, k3, k4}, fEnforcePMColor(enforcePMColor) {}
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const;
+#endif
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const;
+
+private:
+ friend void SkArithmeticImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(ArithmeticImageFilterImpl)
+
+ bool affectsTransparentBlack() const override { return !SkScalarNearlyZero(fK[3]); }
+
+ const float fK[4];
+ const bool fEnforcePMColor;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+}; // end namespace
+
+sk_sp<SkImageFilter> SkArithmeticImageFilter::Make(float k1, float k2, float k3, float k4,
+ bool enforcePMColor,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* crop) {
+ if (!SkScalarIsFinite(k1) || !SkScalarIsFinite(k2) || !SkScalarIsFinite(k3) ||
+ !SkScalarIsFinite(k4)) {
+ return nullptr;
+ }
+
+ // are we nearly some other "std" mode?
+ int mode = -1; // illegal mode
+ if (SkScalarNearlyZero(k1) && SkScalarNearlyEqual(k2, SK_Scalar1) && SkScalarNearlyZero(k3) &&
+ SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kSrc;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) &&
+ SkScalarNearlyEqual(k3, SK_Scalar1) && SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kDst;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) && SkScalarNearlyZero(k3) &&
+ SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kClear;
+ }
+ if (mode >= 0) {
+ return SkXfermodeImageFilter::Make((SkBlendMode)mode, std::move(background),
+ std::move(foreground), crop);
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = {std::move(background), std::move(foreground)};
+ return sk_sp<SkImageFilter>(
+ new ArithmeticImageFilterImpl(k1, k2, k3, k4, enforcePMColor, inputs, crop));
+}
+
+void SkArithmeticImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(ArithmeticImageFilterImpl);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> ArithmeticImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ float k[4];
+ for (int i = 0; i < 4; ++i) {
+ k[i] = buffer.readScalar();
+ }
+ const bool enforcePMColor = buffer.readBool();
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkArithmeticImageFilter::Make(k[0], k[1], k[2], k[3], enforcePMColor, common.getInput(0),
+ common.getInput(1), &common.cropRect());
+}
+
+void ArithmeticImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ for (int i = 0; i < 4; ++i) {
+ buffer.writeScalar(fK[i]);
+ }
+ buffer.writeBool(fEnforcePMColor);
+}
+
+static Sk4f pin(float min, const Sk4f& val, float max) {
+ return Sk4f::Max(min, Sk4f::Min(val, max));
+}
+
+template <bool EnforcePMColor>
+void arith_span(const float k[], SkPMColor dst[], const SkPMColor src[], int count) {
+ const Sk4f k1 = k[0] * (1/255.0f),
+ k2 = k[1],
+ k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ Sk4f s = SkNx_cast<float>(Sk4b::Load(src+i)),
+ d = SkNx_cast<float>(Sk4b::Load(dst+i)),
+ r = pin(0, k1*s*d + k2*s + k3*d + k4, 255);
+ if (EnforcePMColor) {
+ Sk4f a = SkNx_shuffle<3,3,3,3>(r);
+ r = Sk4f::Min(a, r);
+ }
+ SkNx_cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+// apply mode to src==transparent (0)
+template<bool EnforcePMColor> void arith_transparent(const float k[], SkPMColor dst[], int count) {
+ const Sk4f k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ Sk4f d = SkNx_cast<float>(Sk4b::Load(dst+i)),
+ r = pin(0, k3*d + k4, 255);
+ if (EnforcePMColor) {
+ Sk4f a = SkNx_shuffle<3,3,3,3>(r);
+ r = Sk4f::Min(a, r);
+ }
+ SkNx_cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+static bool intersect(SkPixmap* dst, SkPixmap* src, int srcDx, int srcDy) {
+ SkIRect dstR = SkIRect::MakeWH(dst->width(), dst->height());
+ SkIRect srcR = SkIRect::MakeXYWH(srcDx, srcDy, src->width(), src->height());
+ SkIRect sect;
+ if (!sect.intersect(dstR, srcR)) {
+ return false;
+ }
+ *dst = SkPixmap(dst->info().makeDimensions(sect.size()),
+ dst->addr(sect.fLeft, sect.fTop),
+ dst->rowBytes());
+ *src = SkPixmap(src->info().makeDimensions(sect.size()),
+ src->addr(SkTMax(0, -srcDx), SkTMax(0, -srcDy)),
+ src->rowBytes());
+ return true;
+}
+
+sk_sp<SkSpecialImage> ArithmeticImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint backgroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> background(this->filterInput(0, ctx, &backgroundOffset));
+
+ SkIPoint foregroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> foreground(this->filterInput(1, ctx, &foregroundOffset));
+
+ SkIRect foregroundBounds = SkIRect::EmptyIRect();
+ if (foreground) {
+ foregroundBounds = SkIRect::MakeXYWH(foregroundOffset.x(), foregroundOffset.y(),
+ foreground->width(), foreground->height());
+ }
+
+ SkIRect srcBounds = SkIRect::EmptyIRect();
+ if (background) {
+ srcBounds = SkIRect::MakeXYWH(backgroundOffset.x(), backgroundOffset.y(),
+ background->width(), background->height());
+ }
+
+ srcBounds.join(foregroundBounds);
+ if (srcBounds.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ return this->filterImageGPU(ctx, background, backgroundOffset, foreground,
+ foregroundOffset, bounds);
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0); // can't count on background to fully clear the background
+ canvas->translate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ if (background) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ background->draw(canvas, SkIntToScalar(backgroundOffset.fX),
+ SkIntToScalar(backgroundOffset.fY), &paint);
+ }
+
+ this->drawForeground(canvas, foreground.get(), foregroundBounds);
+
+ return surf->makeImageSnapshot();
+}
+
+SkIRect ArithmeticImageFilterImpl::onFilterBounds(const SkIRect& src,
+ const SkMatrix& ctm,
+ MapDirection dir,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir) {
+ return INHERITED::onFilterBounds(src, ctm, dir, inputRect);
+ }
+
+ SkASSERT(2 == this->countInputs());
+
+ // result(i1,i2) = k1*i1*i2 + k2*i1 + k3*i2 + k4
+ // Note that background (getInput(0)) is i2, and foreground (getInput(1)) is i1.
+ auto i2 = this->getInput(0) ? this->getInput(0)->filterBounds(src, ctm, dir, nullptr) : src;
+ auto i1 = this->getInput(1) ? this->getInput(1)->filterBounds(src, ctm, dir, nullptr) : src;
+
+ // Arithmetic with non-zero k4 may influence the complete filter primitive
+ // region. [k4 > 0 => result(0,0) = k4 => result(i1,i2) >= k4]
+ if (!SkScalarNearlyZero(fK[3])) {
+ i1.join(i2);
+ return i1;
+ }
+
+ // If both K2 or K3 are non-zero, both i1 and i2 appear.
+ if (!SkScalarNearlyZero(fK[1]) && !SkScalarNearlyZero(fK[2])) {
+ i1.join(i2);
+ return i1;
+ }
+
+ // If k2 is non-zero, output can be produced whenever i1 is non-transparent.
+ // [k3 = k4 = 0 => result(i1,i2) = k1*i1*i2 + k2*i1 = (k1*i2 + k2)*i1]
+ if (!SkScalarNearlyZero(fK[1])) {
+ return i1;
+ }
+
+ // If k3 is non-zero, output can be produced whenever i2 is non-transparent.
+ // [k2 = k4 = 0 => result(i1,i2) = k1*i1*i2 + k3*i2 = (k1*i1 + k3)*i2]
+ if (!SkScalarNearlyZero(fK[2])) {
+ return i2;
+ }
+
+ // If just k1 is non-zero, output will only be produce where both inputs
+ // are non-transparent. Use intersection.
+ // [k1 > 0 and k2 = k3 = k4 = 0 => result(i1,i2) = k1*i1*i2]
+ if (!SkScalarNearlyZero(fK[0])) {
+ if (!i1.intersect(i2)) {
+ return SkIRect::MakeEmpty();
+ }
+ return i1;
+ }
+
+ // [k1 = k2 = k3 = k4 = 0 => result(i1,i2) = 0]
+ return SkIRect::MakeEmpty();
+}
+
+#if SK_SUPPORT_GPU
+
+sk_sp<SkSpecialImage> ArithmeticImageFilterImpl::filterImageGPU(
+ const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> backgroundProxy, foregroundProxy;
+
+ GrProtected isProtected = GrProtected::kNo;
+ if (background) {
+ backgroundProxy = background->asTextureProxyRef(context);
+ isProtected = backgroundProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+ }
+
+ if (foreground) {
+ foregroundProxy = foreground->asTextureProxyRef(context);
+ isProtected = foregroundProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+ }
+
+ GrPaint paint;
+ std::unique_ptr<GrFragmentProcessor> bgFP;
+
+ if (backgroundProxy) {
+ SkIRect bgSubset = background->subset();
+ SkMatrix backgroundMatrix = SkMatrix::MakeTrans(
+ SkIntToScalar(bgSubset.left() - backgroundOffset.fX),
+ SkIntToScalar(bgSubset.top() - backgroundOffset.fY));
+ GrColorType bgColorType = SkColorTypeToGrColorType(background->colorType());
+ bgFP = GrTextureDomainEffect::Make(
+ std::move(backgroundProxy), bgColorType, backgroundMatrix,
+ GrTextureDomain::MakeTexelDomain(bgSubset, GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrSamplerState::Filter::kNearest);
+ bgFP = GrColorSpaceXformEffect::Make(std::move(bgFP), background->getColorSpace(),
+ background->alphaType(),
+ ctx.colorSpace());
+ } else {
+ bgFP = GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ }
+
+ if (foregroundProxy) {
+ SkIRect fgSubset = foreground->subset();
+ SkMatrix foregroundMatrix = SkMatrix::MakeTrans(
+ SkIntToScalar(fgSubset.left() - foregroundOffset.fX),
+ SkIntToScalar(fgSubset.top() - foregroundOffset.fY));
+ GrColorType fgColorType = SkColorTypeToGrColorType(foreground->colorType());
+ auto foregroundFP = GrTextureDomainEffect::Make(
+ std::move(foregroundProxy), fgColorType, foregroundMatrix,
+ GrTextureDomain::MakeTexelDomain(fgSubset, GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrSamplerState::Filter::kNearest);
+ foregroundFP = GrColorSpaceXformEffect::Make(std::move(foregroundFP),
+ foreground->getColorSpace(),
+ foreground->alphaType(),
+ ctx.colorSpace());
+ paint.addColorFragmentProcessor(std::move(foregroundFP));
+
+ static int arithmeticIndex = GrSkSLFP::NewIndex();
+ ArithmeticFPInputs inputs;
+ static_assert(sizeof(inputs.k) == sizeof(fK), "struct size mismatch");
+ memcpy(inputs.k, fK, sizeof(inputs.k));
+ inputs.enforcePMColor = fEnforcePMColor;
+ std::unique_ptr<GrFragmentProcessor> xferFP = GrSkSLFP::Make(context,
+ arithmeticIndex,
+ "Arithmetic",
+ SKSL_ARITHMETIC_SRC,
+ &inputs,
+ sizeof(inputs));
+ if (xferFP) {
+ ((GrSkSLFP&) *xferFP).addChild(std::move(bgFP));
+ paint.addColorFragmentProcessor(std::move(xferFP));
+ }
+ } else {
+ paint.addColorFragmentProcessor(std::move(bgFP));
+ }
+
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ auto renderTargetContext =
+ context->priv().makeDeferredRenderTargetContext(SkBackingFit::kApprox,
+ bounds.width(),
+ bounds.height(),
+ ctx.grColorType(),
+ ctx.refColorSpace(),
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ isProtected);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, matrix,
+ SkRect::Make(bounds));
+
+ return SkSpecialImage::MakeDeferredFromGpu(context,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().colorType(),
+ renderTargetContext->colorInfo().refColorSpace());
+}
+#endif
+
+void ArithmeticImageFilterImpl::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPixmap dst;
+ if (!canvas->peekPixels(&dst)) {
+ return;
+ }
+
+ const SkMatrix& ctm = canvas->getTotalMatrix();
+ SkASSERT(ctm.getType() <= SkMatrix::kTranslate_Mask);
+ const int dx = SkScalarRoundToInt(ctm.getTranslateX());
+ const int dy = SkScalarRoundToInt(ctm.getTranslateY());
+ // be sure to perform this offset using SkIRect, since it saturates to avoid overflows
+ const SkIRect fgoffset = fgBounds.makeOffset(dx, dy);
+
+ if (img) {
+ SkBitmap srcBM;
+ SkPixmap src;
+ if (!img->getROPixels(&srcBM)) {
+ return;
+ }
+ if (!srcBM.peekPixels(&src)) {
+ return;
+ }
+
+ auto proc = fEnforcePMColor ? arith_span<true> : arith_span<false>;
+ SkPixmap tmpDst = dst;
+ if (intersect(&tmpDst, &src, fgoffset.fLeft, fgoffset.fTop)) {
+ for (int y = 0; y < tmpDst.height(); ++y) {
+ proc(fK, tmpDst.writable_addr32(0, y), src.addr32(0, y), tmpDst.width());
+ }
+ }
+ }
+
+ // Now apply the mode with transparent-color to the outside of the fg image
+ SkRegion outside(SkIRect::MakeWH(dst.width(), dst.height()));
+ outside.op(fgoffset, SkRegion::kDifference_Op);
+ auto proc = fEnforcePMColor ? arith_transparent<true> : arith_transparent<false>;
+ for (SkRegion::Iterator iter(outside); !iter.done(); iter.next()) {
+ const SkIRect r = iter.rect();
+ for (int y = r.fTop; y < r.fBottom; ++y) {
+ proc(fK, dst.writable_addr32(r.fLeft, y), r.width());
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp
new file mode 100644
index 0000000000..2dde5e2be7
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkBlurImageFilter.h"
+
+#include <algorithm>
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkTileMode.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTFitsIn.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkGpuBlurUtils.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#endif
+
+namespace {
+
+class SkBlurImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkBlurImageFilterImpl(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSigma{sigmaX, sigmaY}
+ , fTileMode(tileMode) {}
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void SkBlurImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkBlurImageFilterImpl)
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> gpuFilter(
+ const Context& ctx, SkVector sigma,
+ const sk_sp<SkSpecialImage> &input,
+ SkIRect inputBounds, SkIRect dstBounds, SkIPoint inputOffset, SkIPoint* offset) const;
+#endif
+
+ SkSize fSigma;
+ SkTileMode fTileMode;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+static SkTileMode to_sktilemode(SkBlurImageFilter::TileMode tileMode) {
+ switch(tileMode) {
+ case SkBlurImageFilter::kClamp_TileMode:
+ return SkTileMode::kClamp;
+ case SkBlurImageFilter::kRepeat_TileMode:
+ return SkTileMode::kRepeat;
+ case SkBlurImageFilter::kClampToBlack_TileMode:
+ // Fall through
+ default:
+ return SkTileMode::kDecal;
+ }
+}
+
+sk_sp<SkImageFilter> SkBlurImageFilter::Make(SkScalar sigmaX, SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect,
+ TileMode tileMode) {
+ return Make(sigmaX, sigmaY, to_sktilemode(tileMode), std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkBlurImageFilter::Make(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (sigmaX < SK_ScalarNearlyZero && sigmaY < SK_ScalarNearlyZero && !cropRect) {
+ return input;
+ }
+ return sk_sp<SkImageFilter>(
+ new SkBlurImageFilterImpl(sigmaX, sigmaY, tileMode, input, cropRect));
+}
+
+void SkBlurImageFilter::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkBlurImageFilterImpl); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkBlurImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ SkTileMode tileMode;
+ if (buffer.isVersionLT(SkPicturePriv::kTileModeInBlurImageFilter_Version)) {
+ tileMode = SkTileMode::kDecal;
+ } else if (buffer.isVersionLT(SkPicturePriv::kCleanupImageFilterEnums_Version)) {
+ tileMode = to_sktilemode(buffer.read32LE(SkBlurImageFilter::kLast_TileMode));
+ } else {
+ tileMode = buffer.read32LE(SkTileMode::kLastTileMode);
+ }
+
+ static_assert(SkBlurImageFilter::kLast_TileMode == 2, "CreateProc");
+
+ return SkBlurImageFilter::Make(
+ sigmaX, sigmaY, tileMode, common.getInput(0), &common.cropRect());
+}
+
+void SkBlurImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fSigma.fWidth);
+ buffer.writeScalar(fSigma.fHeight);
+
+ // Fuzzer sanity checks
+ static_assert((int) SkTileMode::kLastTileMode == 3 && SkBlurImageFilter::kLast_TileMode == 2,
+ "SkBlurImageFilterImpl::flatten");
+ SkASSERT(fTileMode <= SkTileMode::kLastTileMode);
+ buffer.writeInt(static_cast<int>(fTileMode));
+}
+
+#if SK_SUPPORT_GPU
+static GrTextureDomain::Mode to_texture_domain_mode(SkTileMode tileMode) {
+ switch (tileMode) {
+ case SkTileMode::kClamp:
+ return GrTextureDomain::kClamp_Mode;
+ case SkTileMode::kDecal:
+ return GrTextureDomain::kDecal_Mode;
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Support mirror mode, treat as repeat for now
+ case SkTileMode::kRepeat:
+ return GrTextureDomain::kRepeat_Mode;
+ default:
+ SK_ABORT("Unsupported tile mode.");
+ }
+}
+#endif
+
+// This is defined by the SVG spec:
+// https://drafts.fxtf.org/filter-effects/#feGaussianBlurElement
+static int calculate_window(double sigma) {
+ // NB 136 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+ // using the Gauss filter. It also limits the size of buffers used hold intermediate values.
+ // Explanation of maximums:
+ // sum0 = window * 255
+ // sum1 = window * sum0 -> window * window * 255
+ // sum2 = window * sum1 -> window * window * window * 255 -> window^3 * 255
+ //
+ // The value window^3 * 255 must fit in a uint32_t. So,
+ // window^3 < 2^32. window = 255.
+ //
+ // window = floor(sigma * 3 * sqrt(2 * kPi) / 4 + 0.5)
+ // For window <= 255, the largest value for sigma is 136.
+ sigma = SkTPin(sigma, 0.0, 136.0);
+ auto possibleWindow = static_cast<int>(floor(sigma * 3 * sqrt(2 * SK_DoublePI) / 4 + 0.5));
+ return std::max(1, possibleWindow);
+}
+
+// Calculating the border is tricky. The border is the distance in pixels between the first dst
+// pixel and the first src pixel (or the last src pixel and the last dst pixel).
+// I will go through the odd case which is simpler, and then through the even case. Given a
+// stack of filters seven wide for the odd case of three passes.
+//
+// S
+// aaaAaaa
+// bbbBbbb
+// cccCccc
+// D
+//
+// The furthest changed pixel is when the filters are in the following configuration.
+//
+// S
+// aaaAaaa
+// bbbBbbb
+// cccCccc
+// D
+//
+// The A pixel is calculated using the value S, the B uses A, and the C uses B, and
+// finally D is C. So, with a window size of seven the border is nine. In the odd case, the
+// border is 3*((window - 1)/2).
+//
+// For even cases the filter stack is more complicated. The spec specifies two passes
+// of even filters and a final pass of odd filters. A stack for a width of six looks like
+// this.
+//
+// S
+// aaaAaa
+// bbBbbb
+// cccCccc
+// D
+//
+// The furthest pixel looks like this.
+//
+// S
+// aaaAaa
+// bbBbbb
+// cccCccc
+// D
+//
+// For a window of six, the border value is eight. In the even case the border is 3 *
+// (window/2) - 1.
+static int calculate_border(int window) {
+ return (window & 1) == 1 ? 3 * ((window - 1) / 2) : 3 * (window / 2) - 1;
+}
+
+static int calculate_buffer(int window) {
+ int bufferSize = window - 1;
+ return (window & 1) == 1 ? 3 * bufferSize : 3 * bufferSize + 1;
+}
+
+// blur_one_direction implements the common three pass box filter approximation of Gaussian blur,
+// but combines all three passes into a single pass. This approach is facilitated by three circular
+// buffers the width of the window which track values for trailing edges of each of the three
+// passes. This allows the algorithm to use more precision in the calculation because the values
+// are not rounded each pass. And this implementation also avoids a trap that's easy to fall
+// into resulting in blending in too many zeroes near the edge.
+//
+// In general, a window sum has the form:
+// sum_n+1 = sum_n + leading_edge - trailing_edge.
+// If instead we do the subtraction at the end of the previous iteration, we can just
+// calculate the sums instead of having to do the subtractions too.
+//
+// In previous iteration:
+// sum_n+1 = sum_n - trailing_edge.
+//
+// In this iteration:
+// sum_n+1 = sum_n + leading_edge.
+//
+// Now we can stack all three sums and do them at once. Sum0 gets its leading edge from the
+// actual data. Sum1's leading edge is just Sum0, and Sum2's leading edge is Sum1. So, doing the
+// three passes at the same time has the form:
+//
+// sum0_n+1 = sum0_n + leading edge
+// sum1_n+1 = sum1_n + sum0_n+1
+// sum2_n+1 = sum2_n + sum1_n+1
+//
+// sum2_n+1 / window^3 is the new value of the destination pixel.
+//
+// Reduce the sums by the trailing edges which were stored in the circular buffers,
+// for the next go around. This is the case for odd sized windows, even windows the the third
+// circular buffer is one larger then the first two circular buffers.
+//
+// sum2_n+2 = sum2_n+1 - buffer2[i];
+// buffer2[i] = sum1;
+// sum1_n+2 = sum1_n+1 - buffer1[i];
+// buffer1[i] = sum0;
+// sum0_n+2 = sum0_n+1 - buffer0[i];
+// buffer0[i] = leading edge
+//
+// This is all encapsulated in the processValue function below.
+//
+using Pass0And1 = Sk4u[2];
+// The would be dLeft parameter is assumed to be 0.
+static void blur_one_direction(Sk4u* buffer, int window,
+ int srcLeft, int srcRight, int dstRight,
+ const uint32_t* src, int srcXStride, int srcYStride, int srcH,
+ uint32_t* dst, int dstXStride, int dstYStride) {
+
+ // The circular buffers are one less than the window.
+ auto pass0Count = window - 1,
+ pass1Count = window - 1,
+ pass2Count = (window & 1) == 1 ? window - 1 : window;
+
+ Pass0And1* buffer01Start = (Pass0And1*)buffer;
+ Sk4u* buffer2Start = buffer + pass0Count + pass1Count;
+ Pass0And1* buffer01End = (Pass0And1*)buffer2Start;
+ Sk4u* buffer2End = buffer2Start + pass2Count;
+
+ // If the window is odd then the divisor is just window ^ 3 otherwise,
+ // it is window * window * (window + 1) = window ^ 3 + window ^ 2;
+ auto window2 = window * window;
+ auto window3 = window2 * window;
+ auto divisor = (window & 1) == 1 ? window3 : window3 + window2;
+
+ // NB the sums in the blur code use the following technique to avoid
+ // adding 1/2 to round the divide.
+ //
+ // Sum/d + 1/2 == (Sum + h) / d
+ // Sum + d(1/2) == Sum + h
+ // h == (1/2)d
+ //
+ // But the d/2 it self should be rounded.
+ // h == d/2 + 1/2 == (d + 1) / 2
+ //
+ // weight = 1 / d * 2 ^ 32
+ auto weight = static_cast<uint32_t>(round(1.0 / divisor * (1ull << 32)));
+ auto half = static_cast<uint32_t>((divisor + 1) / 2);
+
+ auto border = calculate_border(window);
+
+ // Calculate the start and end of the source pixels with respect to the destination start.
+ auto srcStart = srcLeft - border,
+ srcEnd = srcRight - border,
+ dstEnd = dstRight;
+
+ for (auto y = 0; y < srcH; y++) {
+ auto buffer01Cursor = buffer01Start;
+ auto buffer2Cursor = buffer2Start;
+
+ Sk4u sum0{0u};
+ Sk4u sum1{0u};
+ Sk4u sum2{half};
+
+ sk_bzero(buffer01Start, (buffer2End - (Sk4u *) (buffer01Start)) * sizeof(*buffer2Start));
+
+ // Given an expanded input pixel, move the window ahead using the leadingEdge value.
+ auto processValue = [&](const Sk4u& leadingEdge) -> Sk4u {
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ Sk4u value = sum2.mulHi(weight);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < buffer2End ? buffer2Cursor + 1 : buffer2Start;
+
+ sum1 -= (*buffer01Cursor)[1];
+ (*buffer01Cursor)[1] = sum0;
+ sum0 -= (*buffer01Cursor)[0];
+ (*buffer01Cursor)[0] = leadingEdge;
+ buffer01Cursor =
+ (buffer01Cursor + 1) < buffer01End ? buffer01Cursor + 1 : buffer01Start;
+
+ return value;
+ };
+
+ auto srcIdx = srcStart;
+ auto dstIdx = 0;
+ const uint32_t* srcCursor = src;
+ uint32_t* dstCursor = dst;
+
+ // The destination pixels are not effected by the src pixels,
+ // change to zero as per the spec.
+ // https://drafts.fxtf.org/filter-effects/#FilterPrimitivesOverviewIntro
+ while (dstIdx < srcIdx) {
+ *dstCursor = 0;
+ dstCursor += dstXStride;
+ SK_PREFETCH(dstCursor);
+ dstIdx++;
+ }
+
+ // The edge of the source is before the edge of the destination. Calculate the sums for
+ // the pixels before the start of the destination.
+ while (dstIdx > srcIdx) {
+ Sk4u leadingEdge = srcIdx < srcEnd ? SkNx_cast<uint32_t>(Sk4b::Load(srcCursor)) : 0;
+ (void) processValue(leadingEdge);
+ srcCursor += srcXStride;
+ srcIdx++;
+ }
+
+ // The dstIdx and srcIdx are in sync now; the code just uses the dstIdx for both now.
+ // Consume the source generating pixels to dst.
+ auto loopEnd = std::min(dstEnd, srcEnd);
+ while (dstIdx < loopEnd) {
+ Sk4u leadingEdge = SkNx_cast<uint32_t>(Sk4b::Load(srcCursor));
+ SkNx_cast<uint8_t>(processValue(leadingEdge)).store(dstCursor);
+ srcCursor += srcXStride;
+ dstCursor += dstXStride;
+ SK_PREFETCH(dstCursor);
+ dstIdx++;
+ }
+
+ // The leading edge is beyond the end of the source. Assume that the pixels
+ // are now 0x0000 until the end of the destination.
+ loopEnd = dstEnd;
+ while (dstIdx < loopEnd) {
+ SkNx_cast<uint8_t>(processValue(0u)).store(dstCursor);
+ dstCursor += dstXStride;
+ SK_PREFETCH(dstCursor);
+ dstIdx++;
+ }
+
+ src += srcYStride;
+ dst += dstYStride;
+ }
+}
+
+static sk_sp<SkSpecialImage> copy_image_with_bounds(
+ const SkImageFilter_Base::Context& ctx, const sk_sp<SkSpecialImage> &input,
+ SkIRect srcBounds, SkIRect dstBounds) {
+ SkBitmap inputBM;
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkBitmap src;
+ inputBM.extractSubset(&src, srcBounds);
+
+ // Make everything relative to the destination bounds.
+ srcBounds.offset(-dstBounds.x(), -dstBounds.y());
+ dstBounds.offset(-dstBounds.x(), -dstBounds.y());
+
+ auto srcW = srcBounds.width(),
+ dstW = dstBounds.width(),
+ dstH = dstBounds.height();
+
+ SkImageInfo dstInfo = SkImageInfo::Make(dstW, dstH, inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(dstInfo)) {
+ return nullptr;
+ }
+
+ // There is no blurring to do, but we still need to copy the source while accounting for the
+ // dstBounds. Remember that the src was intersected with the dst.
+ int y = 0;
+ size_t dstWBytes = dstW * sizeof(uint32_t);
+ for (;y < srcBounds.top(); y++) {
+ sk_bzero(dst.getAddr32(0, y), dstWBytes);
+ }
+
+ for (;y < srcBounds.bottom(); y++) {
+ int x = 0;
+ uint32_t* dstPtr = dst.getAddr32(0, y);
+ for (;x < srcBounds.left(); x++) {
+ *dstPtr++ = 0;
+ }
+
+ memcpy(dstPtr, src.getAddr32(x - srcBounds.left(), y - srcBounds.top()),
+ srcW * sizeof(uint32_t));
+
+ dstPtr += srcW;
+ x += srcW;
+
+ for (;x < dstBounds.right(); x++) {
+ *dstPtr++ = 0;
+ }
+ }
+
+ for (;y < dstBounds.bottom(); y++) {
+ sk_bzero(dst.getAddr32(0, y), dstWBytes);
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(),
+ dstBounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+// TODO: Implement CPU backend for different fTileMode.
+static sk_sp<SkSpecialImage> cpu_blur(
+ const SkImageFilter_Base::Context& ctx,
+ SkVector sigma, const sk_sp<SkSpecialImage> &input,
+ SkIRect srcBounds, SkIRect dstBounds) {
+ auto windowW = calculate_window(sigma.x()),
+ windowH = calculate_window(sigma.y());
+
+ if (windowW <= 1 && windowH <= 1) {
+ return copy_image_with_bounds(ctx, input, srcBounds, dstBounds);
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkBitmap src;
+ inputBM.extractSubset(&src, srcBounds);
+
+ // Make everything relative to the destination bounds.
+ srcBounds.offset(-dstBounds.x(), -dstBounds.y());
+ dstBounds.offset(-dstBounds.x(), -dstBounds.y());
+
+ auto srcW = srcBounds.width(),
+ srcH = srcBounds.height(),
+ dstW = dstBounds.width(),
+ dstH = dstBounds.height();
+
+ SkImageInfo dstInfo = inputBM.info().makeWH(dstW, dstH);
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(dstInfo)) {
+ return nullptr;
+ }
+
+ auto bufferSizeW = calculate_buffer(windowW),
+ bufferSizeH = calculate_buffer(windowH);
+
+ // The amount 1024 is enough for buffers up to 10 sigma. The tmp bitmap will be
+ // allocated on the heap.
+ SkSTArenaAlloc<1024> alloc;
+ Sk4u* buffer = alloc.makeArrayDefault<Sk4u>(std::max(bufferSizeW, bufferSizeH));
+
+ // Basic Plan: The three cases to handle
+ // * Horizontal and Vertical - blur horizontally while copying values from the source to
+ // the destination. Then, do an in-place vertical blur.
+ // * Horizontal only - blur horizontally copying values from the source to the destination.
+ // * Vertical only - blur vertically copying values from the source to the destination.
+
+ // Default to vertical only blur case. If a horizontal blur is needed, then these values
+ // will be adjusted while doing the horizontal blur.
+ auto intermediateSrc = static_cast<uint32_t *>(src.getPixels());
+ auto intermediateRowBytesAsPixels = src.rowBytesAsPixels();
+ auto intermediateWidth = srcW;
+
+ // Because the border is calculated before the fork of the GPU/CPU path. The border is
+ // the maximum of the two rendering methods. In the case where sigma is zero, then the
+ // src and dst left values are the same. If sigma is small resulting in a window size of
+ // 1, then border calculations add some pixels which will always be zero. Inset the
+ // destination by those zero pixels. This case is very rare.
+ auto intermediateDst = dst.getAddr32(srcBounds.left(), 0);
+
+ // The following code is executed very rarely, I have never seen it in a real web
+ // page. If sigma is small but not zero then shared GPU/CPU border calculation
+ // code adds extra pixels for the border. Just clear everything to clear those pixels.
+ // This solution is overkill, but very simple.
+ if (windowW == 1 || windowH == 1) {
+ dst.eraseColor(0);
+ }
+
+ if (windowW > 1) {
+ // Make int64 to avoid overflow in multiplication below.
+ int64_t shift = srcBounds.top() - dstBounds.top();
+
+ // For the horizontal blur, starts part way down in anticipation of the vertical blur.
+ // For a vertical sigma of zero shift should be zero. But, for small sigma,
+ // shift may be > 0 but the vertical window could be 1.
+ intermediateSrc = static_cast<uint32_t *>(dst.getPixels())
+ + (shift > 0 ? shift * dst.rowBytesAsPixels() : 0);
+ intermediateRowBytesAsPixels = dst.rowBytesAsPixels();
+ intermediateWidth = dstW;
+ intermediateDst = static_cast<uint32_t *>(dst.getPixels());
+
+ blur_one_direction(
+ buffer, windowW,
+ srcBounds.left(), srcBounds.right(), dstBounds.right(),
+ static_cast<uint32_t *>(src.getPixels()), 1, src.rowBytesAsPixels(), srcH,
+ intermediateSrc, 1, intermediateRowBytesAsPixels);
+ }
+
+ if (windowH > 1) {
+ blur_one_direction(
+ buffer, windowH,
+ srcBounds.top(), srcBounds.bottom(), dstBounds.bottom(),
+ intermediateSrc, intermediateRowBytesAsPixels, 1, intermediateWidth,
+ intermediateDst, dst.rowBytesAsPixels(), 1);
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(),
+ dstBounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+// This rather arbitrary-looking value results in a maximum box blur kernel size
+// of 1000 pixels on the raster path, which matches the WebKit and Firefox
+// implementations. Since the GPU path does not compute a box blur, putting
+// the limit on sigma ensures consistent behaviour between the GPU and
+// raster paths.
+#define MAX_SIGMA SkIntToScalar(532)
+
+static SkVector map_sigma(const SkSize& localSigma, const SkMatrix& ctm) {
+ SkVector sigma = SkVector::Make(localSigma.width(), localSigma.height());
+ ctm.mapVectors(&sigma, 1);
+ sigma.fX = SkMinScalar(SkScalarAbs(sigma.fX), MAX_SIGMA);
+ sigma.fY = SkMinScalar(SkScalarAbs(sigma.fY), MAX_SIGMA);
+ return sigma;
+}
+
+sk_sp<SkSpecialImage> SkBlurImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.fX, inputOffset.fY,
+ input->width(), input->height());
+
+ // Calculate the destination bounds.
+ SkIRect dstBounds;
+ if (!this->applyCropRect(this->mapContext(ctx), inputBounds, &dstBounds)) {
+ return nullptr;
+ }
+ if (!inputBounds.intersect(dstBounds)) {
+ return nullptr;
+ }
+
+ // Save the offset in preparation to make all rectangles relative to the inputOffset.
+ SkIPoint resultOffset = SkIPoint::Make(dstBounds.fLeft, dstBounds.fTop);
+
+ // Make all bounds relative to the inputOffset.
+ inputBounds.offset(-inputOffset);
+ dstBounds.offset(-inputOffset);
+
+ SkVector sigma = map_sigma(fSigma, ctx.ctm());
+ if (sigma.x() < 0 || sigma.y() < 0) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialImage> result;
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ // Ensure the input is in the destination's gamut. This saves us from having to do the
+ // xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace());
+ result = this->gpuFilter(ctx, sigma, input, inputBounds, dstBounds, inputOffset,
+ &resultOffset);
+ } else
+#endif
+ {
+ // NB 135 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+ // using the Gauss filter. It also limits the size of buffers used hold intermediate values. The
+ // additional + 1 added to window represents adding one more leading element before subtracting the
+ // trailing element.
+ // Explanation of maximums:
+ // sum0 = (window + 1) * 255
+ // sum1 = (window + 1) * sum0 -> (window + 1) * (window + 1) * 255
+ // sum2 = (window + 1) * sum1 -> (window + 1) * (window + 1) * (window + 1) * 255 -> window^3 * 255
+ //
+ // The value (window + 1)^3 * 255 must fit in a uint32_t. So,
+ // (window + 1)^3 * 255 < 2^32. window = 255.
+ //
+ // window = floor(sigma * 3 * sqrt(2 * kPi) / 4)
+ // For window <= 255, the largest value for sigma is 135.
+ sigma.fX = SkTPin(sigma.fX, 0.0f, 135.0f);
+ sigma.fY = SkTPin(sigma.fY, 0.0f, 135.0f);
+
+ result = cpu_blur(ctx, sigma, input, inputBounds, dstBounds);
+ }
+
+ // Return the resultOffset if the blur succeeded.
+ if (result != nullptr) {
+ *offset = resultOffset;
+ }
+ return result;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<SkSpecialImage> SkBlurImageFilterImpl::gpuFilter(
+ const Context& ctx, SkVector sigma, const sk_sp<SkSpecialImage> &input, SkIRect inputBounds,
+ SkIRect dstBounds, SkIPoint inputOffset, SkIPoint* offset) const {
+ if (0 == sigma.x() && 0 == sigma.y()) {
+ offset->fX = inputBounds.x() + inputOffset.fX;
+ offset->fY = inputBounds.y() + inputOffset.fY;
+ return input->makeSubset(inputBounds);
+ }
+
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> inputTexture(input->asTextureProxyRef(context));
+ if (!inputTexture) {
+ return nullptr;
+ }
+
+ // TODO (michaelludwig) - The color space choice is odd, should it just be ctx.refColorSpace()?
+ auto renderTargetContext = SkGpuBlurUtils::GaussianBlur(
+ context,
+ std::move(inputTexture),
+ SkColorTypeToGrColorType(input->colorType()),
+ input->alphaType(),
+ input->subset().topLeft(),
+ ctx.colorSpace() ? sk_ref_sp(input->getColorSpace()) : nullptr,
+ dstBounds,
+ inputBounds,
+ sigma.x(),
+ sigma.y(),
+ to_texture_domain_mode(fTileMode));
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ return SkSpecialImage::MakeDeferredFromGpu(
+ context,
+ SkIRect::MakeWH(dstBounds.width(), dstBounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().colorType(),
+ sk_ref_sp(input->getColorSpace()),
+ ctx.surfaceProps());
+}
+#endif
+
+SkRect SkBlurImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(fSigma.width() * 3, fSigma.height() * 3);
+ return bounds;
+}
+
+SkIRect SkBlurImageFilterImpl::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const {
+ SkVector sigma = map_sigma(fSigma, ctm);
+ return src.makeOutset(SkScalarCeilToInt(sigma.x() * 3), SkScalarCeilToInt(sigma.y() * 3));
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp
new file mode 100644
index 0000000000..b9154431c6
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkColorFilterImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorFilter.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkColorFilterImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkColorFilterImageFilterImpl(sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fColorFilter(std::move(cf)) {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ bool onIsColorFilterNode(SkColorFilter**) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+ bool affectsTransparentBlack() const override;
+
+private:
+ friend void SkColorFilterImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkColorFilterImageFilterImpl)
+
+ sk_sp<SkColorFilter> fColorFilter;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkColorFilterImageFilter::Make(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (!cf) {
+ return nullptr;
+ }
+
+ SkColorFilter* inputCF;
+ if (input && input->isColorFilterNode(&inputCF)) {
+ // This is an optimization, as it collapses the hierarchy by just combining the two
+ // colorfilters into a single one, which the new imagefilter will wrap.
+ sk_sp<SkColorFilter> newCF = cf->makeComposed(sk_sp<SkColorFilter>(inputCF));
+ if (newCF) {
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilterImpl(
+ std::move(newCF), sk_ref_sp(input->getInput(0)), cropRect));
+ }
+ }
+
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilterImpl(
+ std::move(cf), std::move(input), cropRect));
+}
+
+void SkColorFilterImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkColorFilterImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkColorFilterImageFilter", SkColorFilterImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkColorFilterImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkColorFilter> cf(buffer.readColorFilter());
+ return SkColorFilterImageFilter::Make(std::move(cf), common.getInput(0), &common.cropRect());
+}
+
+void SkColorFilterImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeFlattenable(fColorFilter.get());
+}
+
+sk_sp<SkSpecialImage> SkColorFilterImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+
+ SkIRect inputBounds;
+ if (fColorFilter->affectsTransparentBlack()) {
+ // If the color filter affects transparent black, the bounds are the entire clip.
+ inputBounds = ctx.clipBounds();
+ } else if (!input) {
+ return nullptr;
+ } else {
+ inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setColorFilter(fColorFilter);
+
+ // TODO: it may not be necessary to clear or drawPaint inside the input bounds
+ // (see skbug.com/5075)
+ if (fColorFilter->affectsTransparentBlack()) {
+ // The subsequent input->draw() call may not fill the entire canvas. For filters which
+ // affect transparent black, ensure that the filter is applied everywhere.
+ paint.setColor(SK_ColorTRANSPARENT);
+ canvas->drawPaint(paint);
+ paint.setColor(SK_ColorBLACK);
+ } else {
+ canvas->clear(0x0);
+ }
+
+ if (input) {
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.fX - bounds.fLeft),
+ SkIntToScalar(inputOffset.fY - bounds.fTop),
+ &paint);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+bool SkColorFilterImageFilterImpl::onIsColorFilterNode(SkColorFilter** filter) const {
+ SkASSERT(1 == this->countInputs());
+ if (!this->cropRectIsSet()) {
+ if (filter) {
+ *filter = SkRef(fColorFilter.get());
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkColorFilterImageFilterImpl::affectsTransparentBlack() const {
+ return fColorFilter->affectsTransparentBlack();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp
new file mode 100644
index 0000000000..f83eefe9d0
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkComposeImageFilter.h"
+
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkComposeImageFilterImpl final : public SkImageFilter_Base {
+public:
+ explicit SkComposeImageFilterImpl(sk_sp<SkImageFilter> inputs[2])
+ : INHERITED(inputs, 2, nullptr) {
+ SkASSERT(inputs[0].get());
+ SkASSERT(inputs[1].get());
+ }
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+
+private:
+ friend void SkComposeImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkComposeImageFilterImpl)
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkComposeImageFilter::Make(sk_sp<SkImageFilter> outer,
+ sk_sp<SkImageFilter> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ sk_sp<SkImageFilter> inputs[2] = { std::move(outer), std::move(inner) };
+ return sk_sp<SkImageFilter>(new SkComposeImageFilterImpl(inputs));
+}
+
+void SkComposeImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkComposeImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkComposeImageFilter", SkComposeImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkComposeImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ return SkComposeImageFilter::Make(common.getInput(0), common.getInput(1));
+}
+
+SkRect SkComposeImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ const SkImageFilter* outer = this->getInput(0);
+ const SkImageFilter* inner = this->getInput(1);
+
+ return outer->computeFastBounds(inner->computeFastBounds(src));
+}
+
+sk_sp<SkSpecialImage> SkComposeImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ // The bounds passed to the inner filter must be filtered by the outer
+ // filter, so that the inner filter produces the pixels that the outer
+ // filter requires as input. This matters if the outer filter moves pixels.
+ SkIRect innerClipBounds;
+ innerClipBounds = this->getInput(0)->filterBounds(ctx.clipBounds(), ctx.ctm(),
+ kReverse_MapDirection, &ctx.clipBounds());
+ Context innerContext = ctx.withNewDesiredOutput(skif::LayerSpace<SkIRect>(innerClipBounds));
+ SkIPoint innerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> inner(this->filterInput(1, innerContext, &innerOffset));
+ if (!inner) {
+ return nullptr;
+ }
+
+ // TODO (michaelludwig) - Once all filters are updated to process coordinate spaces more
+ // robustly, we can allow source images to have non-(0,0) origins, which will mean that the
+ // CTM/clipBounds modifications for the outerContext can go away.
+ SkMatrix outerMatrix(ctx.ctm());
+ outerMatrix.postTranslate(SkIntToScalar(-innerOffset.x()), SkIntToScalar(-innerOffset.y()));
+ SkIRect clipBounds = ctx.clipBounds();
+ clipBounds.offset(-innerOffset.x(), -innerOffset.y());
+ // NOTE: This is the only spot in image filtering where the source image of the context
+ // is not constant for the entire DAG evaluation. Given that the inner and outer DAG branches
+ // were already created, there's no alternative way for the leaf nodes of the outer DAG to
+ // get the results of the inner DAG. Overriding the source image of the context has the correct
+ // effect, but means that the source image is not fixed for the entire filter process.
+ Context outerContext(outerMatrix, clipBounds, ctx.cache(), ctx.colorType(), ctx.colorSpace(),
+ inner.get());
+
+ SkIPoint outerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> outer(this->filterInput(0, outerContext, &outerOffset));
+ if (!outer) {
+ return nullptr;
+ }
+
+ *offset = innerOffset + outerOffset;
+ return outer;
+}
+
+SkIRect SkComposeImageFilterImpl::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ const SkImageFilter* outer = this->getInput(0);
+ const SkImageFilter* inner = this->getInput(1);
+
+ const SkIRect innerRect = inner->filterBounds(src, ctm, dir, inputRect);
+ return outer->filterBounds(innerRect, ctm, dir,
+ kReverse_MapDirection == dir ? &innerRect : nullptr);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapEffect.cpp b/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapEffect.cpp
new file mode 100644
index 0000000000..ede0de438c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapEffect.cpp
@@ -0,0 +1,675 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkDisplacementMapEffect.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#endif
+
+namespace {
+
+class SkDisplacementMapEffectImpl final : public SkImageFilter_Base {
+public:
+ SkDisplacementMapEffectImpl(SkColorChannel xChannelSelector, SkColorChannel yChannelSelector,
+ SkScalar scale, sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ virtual SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ friend void SkDisplacementMapEffect::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkDisplacementMapEffectImpl)
+
+ SkColorChannel fXChannelSelector;
+ SkColorChannel fYChannelSelector;
+ SkScalar fScale;
+
+ const SkImageFilter* getDisplacementInput() const { return getInput(0); }
+ const SkImageFilter* getColorInput() const { return getInput(1); }
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+// Shift values to extract channels from an SkColor (SkColorGetR, SkColorGetG, etc)
+const uint8_t gChannelTypeToShift[] = {
+ 16, // R
+ 8, // G
+ 0, // B
+ 24, // A
+};
+struct Extractor {
+ Extractor(SkColorChannel typeX,
+ SkColorChannel typeY)
+ : fShiftX(gChannelTypeToShift[static_cast<int>(typeX)])
+ , fShiftY(gChannelTypeToShift[static_cast<int>(typeY)])
+ {}
+
+ unsigned fShiftX, fShiftY;
+
+ unsigned getX(SkColor c) const { return (c >> fShiftX) & 0xFF; }
+ unsigned getY(SkColor c) const { return (c >> fShiftY) & 0xFF; }
+};
+
+static bool channel_selector_type_is_valid(SkColorChannel cst) {
+ switch (cst) {
+ case SkColorChannel::kR:
+ case SkColorChannel::kG:
+ case SkColorChannel::kB:
+ case SkColorChannel::kA:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static SkColorChannel convert_channel_type(SkDisplacementMapEffect::ChannelSelectorType c) {
+ switch(c) {
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ return SkColorChannel::kR;
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ return SkColorChannel::kG;
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ return SkColorChannel::kB;
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ return SkColorChannel::kA;
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ default:
+ // Raster backend historically treated this as B, GPU backend would fail when generating
+ // shader code. Just return B without aborting in debug-builds in order to keep fuzzers
+ // happy when they pass in the technically still valid kUnknown_ChannelSelectorType.
+ return SkColorChannel::kB;
+ }
+}
+
+} // end namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkDisplacementMapEffect::Make(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const SkImageFilter::CropRect* cropRect) {
+ return Make(convert_channel_type(xChannelSelector), convert_channel_type(yChannelSelector),
+ scale, std::move(displacement), std::move(color), cropRect);
+}
+
+sk_sp<SkImageFilter> SkDisplacementMapEffect::Make(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const SkImageFilter::CropRect* cropRect) {
+ if (!channel_selector_type_is_valid(xChannelSelector) ||
+ !channel_selector_type_is_valid(yChannelSelector)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = { std::move(displacement), std::move(color) };
+ return sk_sp<SkImageFilter>(new SkDisplacementMapEffectImpl(xChannelSelector, yChannelSelector,
+ scale, inputs, cropRect));
+}
+
+void SkDisplacementMapEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkDisplacementMapEffectImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkDisplacementMapEffect", SkDisplacementMapEffectImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkDisplacementMapEffectImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+
+ SkColorChannel xsel, ysel;
+ if (buffer.isVersionLT(SkPicturePriv::kCleanupImageFilterEnums_Version)) {
+ xsel = convert_channel_type(buffer.read32LE(
+ SkDisplacementMapEffect::kLast_ChannelSelectorType));
+ ysel = convert_channel_type(buffer.read32LE(
+ SkDisplacementMapEffect::kLast_ChannelSelectorType));
+ } else {
+ xsel = buffer.read32LE(SkColorChannel::kLastEnum);
+ ysel = buffer.read32LE(SkColorChannel::kLastEnum);
+ }
+
+ SkScalar scale = buffer.readScalar();
+
+ return SkDisplacementMapEffect::Make(xsel, ysel, scale, common.getInput(0), common.getInput(1),
+ &common.cropRect());
+}
+
+void SkDisplacementMapEffectImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt((int) fXChannelSelector);
+ buffer.writeInt((int) fYChannelSelector);
+ buffer.writeScalar(fScale);
+}
+
+#if SK_SUPPORT_GPU
+class GrDisplacementMapEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ SkColorChannel xChannelSelector, SkColorChannel yChannelSelector, SkVector scale,
+ sk_sp<GrTextureProxy> displacement, const SkIRect& displSubset,
+ const SkMatrix& offsetMatrix, sk_sp<GrTextureProxy> color, const SkIRect& colorSubset) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDisplacementMapEffect(
+ xChannelSelector, yChannelSelector, scale, std::move(displacement), displSubset,
+ offsetMatrix, std::move(color), colorSubset));
+ }
+
+ ~GrDisplacementMapEffect() override;
+
+ SkColorChannel xChannelSelector() const {
+ return fXChannelSelector;
+ }
+ SkColorChannel yChannelSelector() const {
+ return fYChannelSelector;
+ }
+ const SkVector& scale() const { return fScale; }
+
+ const char* name() const override { return "DisplacementMap"; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ GrDisplacementMapEffect(const GrDisplacementMapEffect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrDisplacementMapEffect(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ const SkVector& scale, sk_sp<GrTextureProxy> displacement,
+ const SkIRect& displSubset, const SkMatrix& offsetMatrix,
+ sk_sp<GrTextureProxy> color, const SkIRect& colorSubset);
+
+ const TextureSampler& onTextureSampler(int i) const override {
+ return IthTextureSampler(i, fDisplacementSampler, fColorSampler);
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ GrCoordTransform fDisplacementTransform;
+ TextureSampler fDisplacementSampler;
+ GrCoordTransform fColorTransform;
+ GrTextureDomain fDomain;
+ TextureSampler fColorSampler;
+ SkColorChannel fXChannelSelector;
+ SkColorChannel fYChannelSelector;
+ SkVector fScale;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
+
+static void compute_displacement(Extractor ex, const SkVector& scale, SkBitmap* dst,
+ const SkBitmap& displ, const SkIPoint& offset,
+ const SkBitmap& src,
+ const SkIRect& bounds) {
+ static const SkScalar Inv8bit = SkScalarInvert(255);
+ const int srcW = src.width();
+ const int srcH = src.height();
+ const SkVector scaleForColor = SkVector::Make(scale.fX * Inv8bit, scale.fY * Inv8bit);
+ const SkVector scaleAdj = SkVector::Make(SK_ScalarHalf - scale.fX * SK_ScalarHalf,
+ SK_ScalarHalf - scale.fY * SK_ScalarHalf);
+ SkPMColor* dstPtr = dst->getAddr32(0, 0);
+ for (int y = bounds.top(); y < bounds.bottom(); ++y) {
+ const SkPMColor* displPtr = displ.getAddr32(bounds.left() + offset.fX, y + offset.fY);
+ for (int x = bounds.left(); x < bounds.right(); ++x, ++displPtr) {
+ SkColor c = SkUnPreMultiply::PMColorToColor(*displPtr);
+
+ SkScalar displX = scaleForColor.fX * ex.getX(c) + scaleAdj.fX;
+ SkScalar displY = scaleForColor.fY * ex.getY(c) + scaleAdj.fY;
+ // Truncate the displacement values
+ const int32_t srcX = Sk32_sat_add(x, SkScalarTruncToInt(displX));
+ const int32_t srcY = Sk32_sat_add(y, SkScalarTruncToInt(displY));
+ *dstPtr++ = ((srcX < 0) || (srcX >= srcW) || (srcY < 0) || (srcY >= srcH)) ?
+ 0 : *(src.getAddr32(srcX, srcY));
+ }
+ }
+}
+
+sk_sp<SkSpecialImage> SkDisplacementMapEffectImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint colorOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> color(this->filterInput(1, ctx, &colorOffset));
+ if (!color) {
+ return nullptr;
+ }
+
+ SkIPoint displOffset = SkIPoint::Make(0, 0);
+ // Creation of the displacement map should happen in a non-colorspace aware context. This
+ // texture is a purely mathematical construct, so we want to just operate on the stored
+ // values. Consider:
+ // User supplies an sRGB displacement map. If we're rendering to a wider gamut, then we could
+ // end up filtering the displacement map into that gamut, which has the effect of reducing
+ // the amount of displacement that it represents (as encoded values move away from the
+ // primaries).
+ // With a more complex DAG attached to this input, it's not clear that working in ANY specific
+ // color space makes sense, so we ignore color spaces (and gamma) entirely. This may not be
+ // ideal, but it's at least consistent and predictable.
+ Context displContext(ctx.mapping(), ctx.desiredOutput(), ctx.cache(),
+ kN32_SkColorType, nullptr, ctx.source());
+ sk_sp<SkSpecialImage> displ(this->filterInput(0, displContext, &displOffset));
+ if (!displ) {
+ return nullptr;
+ }
+
+ const SkIRect srcBounds = SkIRect::MakeXYWH(colorOffset.x(), colorOffset.y(),
+ color->width(), color->height());
+
+ // Both paths do bounds checking on color pixel access, we don't need to
+ // pad the color bitmap to bounds here.
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkIRect displBounds;
+ displ = this->applyCropRectAndPad(ctx, displ.get(), &displOffset, &displBounds);
+ if (!displ) {
+ return nullptr;
+ }
+
+ if (!bounds.intersect(displBounds)) {
+ return nullptr;
+ }
+
+ const SkIRect colorBounds = bounds.makeOffset(-colorOffset);
+ // If the offset overflowed (saturated) then we have to abort, as we need their
+ // dimensions to be equal. See https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=7209
+ if (colorBounds.size() != bounds.size()) {
+ return nullptr;
+ }
+
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctx.ctm().mapVectors(&scale, 1);
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> colorProxy(color->asTextureProxyRef(context));
+ sk_sp<GrTextureProxy> displProxy(displ->asTextureProxyRef(context));
+ if (!colorProxy || !displProxy) {
+ return nullptr;
+ }
+ const auto isProtected = colorProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+
+ SkMatrix offsetMatrix = SkMatrix::MakeTrans(SkIntToScalar(colorOffset.fX - displOffset.fX),
+ SkIntToScalar(colorOffset.fY - displOffset.fY));
+
+ std::unique_ptr<GrFragmentProcessor> fp =
+ GrDisplacementMapEffect::Make(fXChannelSelector,
+ fYChannelSelector,
+ scale,
+ std::move(displProxy),
+ displ->subset(),
+ offsetMatrix,
+ std::move(colorProxy),
+ color->subset());
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), color->getColorSpace(),
+ color->alphaType(), ctx.colorSpace());
+
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(colorBounds.x()), -SkIntToScalar(colorBounds.y()));
+
+ auto renderTargetContext =
+ context->priv().makeDeferredRenderTargetContext(SkBackingFit::kApprox,
+ bounds.width(),
+ bounds.height(),
+ ctx.grColorType(),
+ ctx.refColorSpace(),
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ isProtected);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, matrix,
+ SkRect::Make(colorBounds));
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeDeferredFromGpu(
+ context,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().colorType(),
+ renderTargetContext->colorInfo().refColorSpace());
+ }
+#endif
+
+ SkBitmap colorBM, displBM;
+
+ if (!color->getROPixels(&colorBM) || !displ->getROPixels(&displBM)) {
+ return nullptr;
+ }
+
+ if ((colorBM.colorType() != kN32_SkColorType) ||
+ (displBM.colorType() != kN32_SkColorType)) {
+ return nullptr;
+ }
+
+ if (!colorBM.getPixels() || !displBM.getPixels()) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ colorBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ compute_displacement(Extractor(fXChannelSelector, fYChannelSelector), scale, &dst,
+ displBM, colorOffset - displOffset, colorBM, colorBounds);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+SkRect SkDisplacementMapEffectImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getColorInput() ? this->getColorInput()->computeFastBounds(src) : src;
+ bounds.outset(SkScalarAbs(fScale) * SK_ScalarHalf, SkScalarAbs(fScale) * SK_ScalarHalf);
+ return bounds;
+}
+
+SkIRect SkDisplacementMapEffectImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection, const SkIRect* inputRect) const {
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctm.mapVectors(&scale, 1);
+ return src.makeOutset(SkScalarCeilToInt(SkScalarAbs(scale.fX) * SK_ScalarHalf),
+ SkScalarCeilToInt(SkScalarAbs(scale.fY) * SK_ScalarHalf));
+}
+
+SkIRect SkDisplacementMapEffectImpl::onFilterBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ // Recurse only into color input.
+ if (this->getColorInput()) {
+ return this->getColorInput()->filterBounds(src, ctm, dir, inputRect);
+ }
+ return src;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+class GrGLDisplacementMapEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fScaleUni;
+ GrTextureDomain::GLDomain fGLDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrDisplacementMapEffect::onCreateGLSLInstance() const {
+ return new GrGLDisplacementMapEffect;
+}
+
+void GrDisplacementMapEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDisplacementMapEffect::GenKey(*this, caps, b);
+}
+
+GrDisplacementMapEffect::GrDisplacementMapEffect(
+ SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ const SkVector& scale,
+ sk_sp<GrTextureProxy> displacement,
+ const SkIRect& displSubset,
+ const SkMatrix& offsetMatrix,
+ sk_sp<GrTextureProxy> color,
+ const SkIRect& colorSubset)
+ : INHERITED(kGrDisplacementMapEffect_ClassID,
+ GrFragmentProcessor::kNone_OptimizationFlags)
+ , fDisplacementTransform(
+ SkMatrix::Concat(SkMatrix::MakeTrans(displSubset.x(), displSubset.y()),
+ offsetMatrix),
+ displacement.get())
+ , fDisplacementSampler(displacement)
+ , fColorTransform(SkMatrix::MakeTrans(colorSubset.x(), colorSubset.y()), color.get())
+ , fDomain(color.get(),
+ GrTextureDomain::MakeTexelDomain(colorSubset,
+ GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrTextureDomain::kDecal_Mode)
+ , fColorSampler(color)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {
+ this->addCoordTransform(&fDisplacementTransform);
+ this->addCoordTransform(&fColorTransform);
+ this->setTextureSamplerCnt(2);
+}
+
+GrDisplacementMapEffect::GrDisplacementMapEffect(const GrDisplacementMapEffect& that)
+ : INHERITED(kGrDisplacementMapEffect_ClassID, that.optimizationFlags())
+ , fDisplacementTransform(that.fDisplacementTransform)
+ , fDisplacementSampler(that.fDisplacementSampler)
+ , fColorTransform(that.fColorTransform)
+ , fDomain(that.fDomain)
+ , fColorSampler(that.fColorSampler)
+ , fXChannelSelector(that.fXChannelSelector)
+ , fYChannelSelector(that.fYChannelSelector)
+ , fScale(that.fScale) {
+ this->addCoordTransform(&fDisplacementTransform);
+ this->addCoordTransform(&fColorTransform);
+ this->setTextureSamplerCnt(2);
+}
+
+GrDisplacementMapEffect::~GrDisplacementMapEffect() {}
+
+std::unique_ptr<GrFragmentProcessor> GrDisplacementMapEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDisplacementMapEffect(*this));
+}
+
+bool GrDisplacementMapEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrDisplacementMapEffect& s = sBase.cast<GrDisplacementMapEffect>();
+ return fXChannelSelector == s.fXChannelSelector &&
+ fYChannelSelector == s.fYChannelSelector &&
+ fScale == s.fScale;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDisplacementMapEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrDisplacementMapEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdxDispl = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ int texIdxColor = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> dispProxy = d->textureProxy(texIdxDispl);
+ sk_sp<GrTextureProxy> colorProxy = d->textureProxy(texIdxColor);
+ static const int kMaxComponent = static_cast<int>(SkColorChannel::kLastEnum);
+ SkColorChannel xChannelSelector =
+ static_cast<SkColorChannel>(d->fRandom->nextRangeU(1, kMaxComponent));
+ SkColorChannel yChannelSelector =
+ static_cast<SkColorChannel>(d->fRandom->nextRangeU(1, kMaxComponent));
+ SkVector scale = SkVector::Make(d->fRandom->nextRangeScalar(0, 100.0f),
+ d->fRandom->nextRangeScalar(0, 100.0f));
+ SkISize colorDimensions;
+ colorDimensions.fWidth = d->fRandom->nextRangeU(0, colorProxy->width());
+ colorDimensions.fHeight = d->fRandom->nextRangeU(0, colorProxy->height());
+ SkIRect dispRect = SkIRect::MakeWH(dispProxy->width(), dispProxy->height());
+ return GrDisplacementMapEffect::Make(xChannelSelector, yChannelSelector, scale,
+ std::move(dispProxy),
+ dispRect,
+ SkMatrix::I(),
+ std::move(colorProxy), SkIRect::MakeSize(colorDimensions));
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDisplacementMapEffect::emitCode(EmitArgs& args) {
+ const GrDisplacementMapEffect& displacementMap = args.fFp.cast<GrDisplacementMapEffect>();
+ const GrTextureDomain& domain = displacementMap.domain();
+
+ fScaleUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType, "Scale");
+ const char* scaleUni = args.fUniformHandler->getUniformCStr(fScaleUni);
+ const char* dColor = "dColor";
+ const char* cCoords = "cCoords";
+ const char* nearZero = "1e-6"; // Since 6.10352e-5 is the smallest half float, use
+ // a number smaller than that to approximate 0, but
+ // leave room for 32-bit float GPU rounding errors.
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppendf("\t\thalf4 %s = ", dColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0],
+ args.fTransformedCoords[0].fVaryingPoint.c_str(),
+ args.fTransformedCoords[0].fVaryingPoint.getType());
+ fragBuilder->codeAppend(";\n");
+
+ // Unpremultiply the displacement
+ fragBuilder->codeAppendf(
+ "\t\t%s.rgb = (%s.a < %s) ? half3(0.0) : saturate(%s.rgb / %s.a);",
+ dColor, dColor, nearZero, dColor, dColor);
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[1].fVaryingPoint);
+ fragBuilder->codeAppendf("\t\tfloat2 %s = %s + %s*(%s.",
+ cCoords, coords2D.c_str(), scaleUni, dColor);
+
+ switch (displacementMap.xChannelSelector()) {
+ case SkColorChannel::kR:
+ fragBuilder->codeAppend("r");
+ break;
+ case SkColorChannel::kG:
+ fragBuilder->codeAppend("g");
+ break;
+ case SkColorChannel::kB:
+ fragBuilder->codeAppend("b");
+ break;
+ case SkColorChannel::kA:
+ fragBuilder->codeAppend("a");
+ break;
+ default:
+ SkDEBUGFAIL("Unknown X channel selector");
+ }
+
+ switch (displacementMap.yChannelSelector()) {
+ case SkColorChannel::kR:
+ fragBuilder->codeAppend("r");
+ break;
+ case SkColorChannel::kG:
+ fragBuilder->codeAppend("g");
+ break;
+ case SkColorChannel::kB:
+ fragBuilder->codeAppend("b");
+ break;
+ case SkColorChannel::kA:
+ fragBuilder->codeAppend("a");
+ break;
+ default:
+ SkDEBUGFAIL("Unknown Y channel selector");
+ }
+ fragBuilder->codeAppend("-half2(0.5));\t\t");
+
+ fGLDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ domain,
+ args.fOutputColor,
+ SkString(cCoords),
+ args.fTexSamplers[1]);
+ fragBuilder->codeAppend(";\n");
+}
+
+void GrGLDisplacementMapEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ const GrDisplacementMapEffect& displacementMap = proc.cast<GrDisplacementMapEffect>();
+ GrTextureProxy* proxy = displacementMap.textureSampler(1).proxy();
+ GrTexture* colorTex = proxy->peekTexture();
+
+ SkScalar scaleX = displacementMap.scale().fX / colorTex->width();
+ SkScalar scaleY = displacementMap.scale().fY / colorTex->height();
+ pdman.set2f(fScaleUni, SkScalarToFloat(scaleX),
+ proxy->origin() == kTopLeft_GrSurfaceOrigin ?
+ SkScalarToFloat(scaleY) : SkScalarToFloat(-scaleY));
+ fGLDomain.setData(pdman, displacementMap.domain(), proxy,
+ displacementMap.textureSampler(1).samplerState());
+}
+
+void GrGLDisplacementMapEffect::GenKey(const GrProcessor& proc,
+ const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ const GrDisplacementMapEffect& displacementMap = proc.cast<GrDisplacementMapEffect>();
+
+ static constexpr int kChannelSelectorKeyBits = 2; // Max value is 3, so 2 bits are required
+
+ uint32_t xKey = static_cast<uint32_t>(displacementMap.xChannelSelector());
+ uint32_t yKey = static_cast<uint32_t>(displacementMap.yChannelSelector())
+ << kChannelSelectorKeyBits;
+
+ b->add32(xKey | yKey);
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp
new file mode 100644
index 0000000000..5f8fc19bad
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkDropShadowImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/effects/SkBlurImageFilter.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkDropShadowImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkDropShadowImageFilterImpl(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, bool shadowOnly, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fDx(dx)
+ , fDy(dy)
+ , fSigmaX(sigmaX)
+ , fSigmaY(sigmaY)
+ , fColor(color)
+ , fShadowOnly(shadowOnly) {}
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void SkDropShadowImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkDropShadowImageFilterImpl)
+
+ SkScalar fDx, fDy, fSigmaX, fSigmaY;
+ SkColor fColor;
+ bool fShadowOnly;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkDropShadowImageFilter::Make(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, ShadowMode shadowMode,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ bool shadowOnly = shadowMode == SkDropShadowImageFilter::kDrawShadowOnly_ShadowMode;
+ return sk_sp<SkImageFilter>(new SkDropShadowImageFilterImpl(
+ dx, dy, sigmaX, sigmaY, color, shadowOnly, std::move(input), cropRect));
+}
+
+void SkDropShadowImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkDropShadowImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkDropShadowImageFilter", SkDropShadowImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkDropShadowImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar dx = buffer.readScalar();
+ SkScalar dy = buffer.readScalar();
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ SkColor color = buffer.readColor();
+
+ // For backwards compatibility, the shadow mode had been saved as an enum cast to a 32LE int,
+ // where shadow-and-foreground was 0 and shadow-only was 1. Other than the number of bits, this
+ // is equivalent to the bool that SkDropShadowImageFilterImpl now uses.
+ bool shadowOnly = SkToBool(buffer.read32LE(1));
+ // TODO (michaelludwig) - TODO: Call factory function once SkDropShadowImageFilter::Make no
+ // longer takes the old enum as its argument
+ return sk_sp<SkImageFilter>(new SkDropShadowImageFilterImpl(
+ dx, dy, sigmaX, sigmaY, color, shadowOnly, common.getInput(0), &common.cropRect()));
+}
+
+void SkDropShadowImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fDx);
+ buffer.writeScalar(fDy);
+ buffer.writeScalar(fSigmaX);
+ buffer.writeScalar(fSigmaY);
+ buffer.writeColor(fColor);
+ // See CreateProc, but we save the bool as an int to match previous enum serialization.
+ buffer.writeInt(fShadowOnly);
+}
+
+sk_sp<SkSpecialImage> SkDropShadowImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctx.ctm().mapVectors(&sigma, 1);
+ sigma.fX = SkMaxScalar(0, sigma.fX);
+ sigma.fY = SkMaxScalar(0, sigma.fY);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setImageFilter(SkBlurImageFilter::Make(sigma.fX, sigma.fY, nullptr));
+ paint.setColorFilter(SkColorFilters::Blend(fColor, SkBlendMode::kSrcIn));
+
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ ctx.ctm().mapVectors(&offsetVec, 1);
+
+ canvas->translate(SkIntToScalar(inputOffset.fX - bounds.fLeft),
+ SkIntToScalar(inputOffset.fY - bounds.fTop));
+ input->draw(canvas, offsetVec.fX, offsetVec.fY, &paint);
+
+ if (!fShadowOnly) {
+ input->draw(canvas, 0, 0, nullptr);
+ }
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkDropShadowImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ SkRect shadowBounds = bounds;
+ shadowBounds.offset(fDx, fDy);
+ shadowBounds.outset(fSigmaX * 3, fSigmaY * 3);
+ if (!fShadowOnly) {
+ bounds.join(shadowBounds);
+ } else {
+ bounds = shadowBounds;
+ }
+ return bounds;
+}
+
+SkIRect SkDropShadowImageFilterImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ if (kReverse_MapDirection == dir) {
+ offsetVec.negate();
+ }
+ ctm.mapVectors(&offsetVec, 1);
+ SkIRect dst = src.makeOffset(SkScalarCeilToInt(offsetVec.x()),
+ SkScalarCeilToInt(offsetVec.y()));
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctm.mapVectors(&sigma, 1);
+ dst.outset(
+ SkScalarCeilToInt(SkScalarAbs(sigma.x() * 3)),
+ SkScalarCeilToInt(SkScalarAbs(sigma.y() * 3)));
+ if (!fShadowOnly) {
+ dst.join(src);
+ }
+ return dst;
+}
+
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkImageFilters.cpp b/gfx/skia/skia/src/effects/imagefilters/SkImageFilters.cpp
new file mode 100644
index 0000000000..3121d7ac4b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkImageFilters.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkImageFilters.h"
+
+// TODO (michaelludwig) - Right now there is a bit of a weird dependency where the implementations
+// of the new, preferred filter factories depends on the per-filter headers in include/effects,
+// which have themselves been marked as deprecated. But, once clients are updated to use the
+// new factories implemented in this file, the per-filter headers can go into
+// src/effects/imagefilters and will no longer be "deprecated" since they've been made fully
+// internal at that point.
+#include "include/effects/SkAlphaThresholdFilter.h"
+#include "include/effects/SkArithmeticImageFilter.h"
+#include "include/effects/SkBlurImageFilter.h"
+#include "include/effects/SkColorFilterImageFilter.h"
+#include "include/effects/SkComposeImageFilter.h"
+#include "include/effects/SkDisplacementMapEffect.h"
+#include "include/effects/SkDropShadowImageFilter.h"
+#include "include/effects/SkImageSource.h"
+#include "include/effects/SkLightingImageFilter.h"
+#include "include/effects/SkMagnifierImageFilter.h"
+#include "include/effects/SkMatrixConvolutionImageFilter.h"
+#include "include/effects/SkMergeImageFilter.h"
+#include "include/effects/SkMorphologyImageFilter.h"
+#include "include/effects/SkOffsetImageFilter.h"
+#include "include/effects/SkPaintImageFilter.h"
+#include "include/effects/SkPictureImageFilter.h"
+#include "include/effects/SkTileImageFilter.h"
+#include "include/effects/SkXfermodeImageFilter.h"
+
+// TODO (michaelludwig) - Once SkCanvas can draw the results of a filter with any transform, this
+// filter can be moved out of core
+#include "src/core/SkMatrixImageFilter.h"
+
+// TODO (michaelludwig) - We are phasing out the use of SkImageFilter::CropRect since it does not
+// appear as though edge flags are actually used and will move towards an explicit cropping filter.
+// To assist with this, the new factory functions just take the basic SkIRect* even though the
+// implementations have not been updated yet.
+static SkImageFilter::CropRect make_crop_rect(const SkIRect* cropRect) {
+ return cropRect ? SkImageFilter::CropRect(SkRect::Make(*cropRect))
+ : SkImageFilter::CropRect(SkRect::MakeEmpty(), 0x0);
+}
+
+void SkImageFilters::RegisterFlattenables() {
+ SkAlphaThresholdFilter::RegisterFlattenables();
+ SkArithmeticImageFilter::RegisterFlattenables();
+ SkBlurImageFilter::RegisterFlattenables();
+ SkColorFilterImageFilter::RegisterFlattenables();
+ SkComposeImageFilter::RegisterFlattenables();
+ SkDilateImageFilter::RegisterFlattenables();
+ SkDisplacementMapEffect::RegisterFlattenables();
+ SkDropShadowImageFilter::RegisterFlattenables();
+ SkImageSource::RegisterFlattenables();
+ SkLightingImageFilter::RegisterFlattenables();
+ SkMagnifierImageFilter::RegisterFlattenables();
+ SkMatrixConvolutionImageFilter::RegisterFlattenables();
+ SkMergeImageFilter::RegisterFlattenables();
+ SkOffsetImageFilter::RegisterFlattenables();
+ SkPaintImageFilter::RegisterFlattenables();
+ SkPictureImageFilter::RegisterFlattenables();
+ SkTileImageFilter::RegisterFlattenables();
+ SkXfermodeImageFilter::RegisterFlattenables();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkImageFilters::AlphaThreshold(
+ const SkRegion& region, SkScalar innerMin, SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkAlphaThresholdFilter::Make(region, innerMin, outerMax, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Arithmetic(
+ SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> background, sk_sp<SkImageFilter> foreground, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkArithmeticImageFilter::Make(k1, k2, k3, k4, enforcePMColor, std::move(background),
+ std::move(foreground), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Blur(
+ SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode, sk_sp<SkImageFilter> input,
+ const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkBlurImageFilter::Make(sigmaX, sigmaY, tileMode, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::ColorFilter(
+ sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkColorFilterImageFilter::Make(std::move(cf), std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Compose(
+ sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner) {
+ return SkComposeImageFilter::Make(std::move(outer), std::move(inner));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DisplacementMap(
+ SkColorChannel xChannelSelector, SkColorChannel yChannelSelector, SkScalar scale,
+ sk_sp<SkImageFilter> displacement, sk_sp<SkImageFilter> color, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkDisplacementMapEffect::Make(xChannelSelector, yChannelSelector, scale,
+ std::move(displacement), std::move(color), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DropShadow(
+ SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY, SkColor color,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ // TODO (michaelludwig) - Once SkDropShadowImageFilter is fully hidden, this can be updated to
+ // pass a constant bool into the internal factory.
+ return SkDropShadowImageFilter::Make(
+ dx, dy, sigmaX, sigmaY, color,
+ SkDropShadowImageFilter::kDrawShadowAndForeground_ShadowMode,
+ std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DropShadowOnly(
+ SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY, SkColor color,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ // TODO (michaelludwig) - Once SkDropShadowImageFilter is fully hidden, this can be updated to
+ // pass a constant bool into the internal factory.
+ return SkDropShadowImageFilter::Make(dx, dy, sigmaX, sigmaY, color,
+ SkDropShadowImageFilter::kDrawShadowOnly_ShadowMode,
+ std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Image(
+ sk_sp<SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ SkFilterQuality filterQuality) {
+ return SkImageSource::Make(std::move(image), srcRect, dstRect, filterQuality);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Magnifier(
+ const SkRect& srcRect, SkScalar inset, sk_sp<SkImageFilter> input,const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkMagnifierImageFilter::Make(srcRect, inset, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::MatrixConvolution(
+ const SkISize& kernelSize, const SkScalar kernel[], SkScalar gain, SkScalar bias,
+ const SkIPoint& kernelOffset, SkTileMode tileMode, bool convolveAlpha,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkMatrixConvolutionImageFilter::Make(kernelSize, kernel, gain, bias, kernelOffset,
+ tileMode, convolveAlpha, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::MatrixTransform(
+ const SkMatrix& transform, SkFilterQuality filterQuality, sk_sp<SkImageFilter> input) {
+ return SkMatrixImageFilter::Make(transform, filterQuality, std::move(input));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Merge(
+ sk_sp<SkImageFilter>* const filters, int count, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkMergeImageFilter::Make(filters, count, &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Offset(
+ SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkOffsetImageFilter::Make(dx, dy, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Paint(const SkPaint& paint, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkPaintImageFilter::Make(paint, &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Picture(sk_sp<SkPicture> pic, const SkRect& targetRect) {
+ return SkPictureImageFilter::Make(std::move(pic), targetRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Tile(
+ const SkRect& src, const SkRect& dst, sk_sp<SkImageFilter> input) {
+ return SkTileImageFilter::Make(src, dst, std::move(input));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Xfermode(
+ SkBlendMode mode, sk_sp<SkImageFilter> background, sk_sp<SkImageFilter> foreground,
+ const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkXfermodeImageFilter::Make(mode, std::move(background), std::move(foreground), &r);
+}
+
+// Morphology filter effects
+
+sk_sp<SkImageFilter> SkImageFilters::Dilate(
+ int radiusX, int radiusY, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkDilateImageFilter::Make(radiusX, radiusY, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Erode(
+ int radiusX, int radiusY, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkErodeImageFilter::Make(radiusX, radiusY, std::move(input), &r);
+}
+
+// Lighting filter effects
+
+sk_sp<SkImageFilter> SkImageFilters::DistantLitDiffuse(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakeDistantLitDiffuse(direction, lightColor, surfaceScale, kd,
+ std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::PointLitDiffuse(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakePointLitDiffuse(location, lightColor, surfaceScale, kd,
+ std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::SpotLitDiffuse(
+ const SkPoint3& location, const SkPoint3& target, SkScalar falloffExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakeSpotLitDiffuse(location, target, falloffExponent, cutoffAngle,
+ lightColor, surfaceScale, kd,
+ std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DistantLitSpecular(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakeDistantLitSpecular(direction, lightColor, surfaceScale,
+ ks, shininess, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::PointLitSpecular(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakePointLitSpecular(location, lightColor, surfaceScale, ks,
+ shininess, std::move(input), &r);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::SpotLitSpecular(
+ const SkPoint3& location, const SkPoint3& target, SkScalar falloffExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkIRect* cropRect) {
+ SkImageFilter::CropRect r = make_crop_rect(cropRect);
+ return SkLightingImageFilter::MakeSpotLitSpecular(location, target, falloffExponent,
+ cutoffAngle, lightColor, surfaceScale,
+ ks, shininess, std::move(input), &r);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkImageSource.cpp b/gfx/skia/skia/src/effects/imagefilters/SkImageSource.cpp
new file mode 100644
index 0000000000..7c4d1af053
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkImageSource.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkImageSource.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkString.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkImageSourceImpl final : public SkImageFilter_Base {
+public:
+ SkImageSourceImpl(sk_sp<SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ SkFilterQuality filterQuality)
+ : INHERITED(nullptr, 0, nullptr)
+ , fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fFilterQuality(filterQuality) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void SkImageSource::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkImageSourceImpl)
+
+ sk_sp<SkImage> fImage;
+ SkRect fSrcRect, fDstRect;
+ SkFilterQuality fFilterQuality;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageSource::Make(sk_sp<SkImage> image) {
+ SkRect rect = image ? SkRect::MakeIWH(image->width(), image->height()) : SkRect::MakeEmpty();
+ return SkImageSource::Make(std::move(image), rect, rect, kHigh_SkFilterQuality);
+}
+
+sk_sp<SkImageFilter> SkImageSource::Make(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality) {
+ if (!image || srcRect.width() <= 0.0f || srcRect.height() <= 0.0f) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkImageSourceImpl(
+ std::move(image), srcRect, dstRect, filterQuality));
+}
+
+void SkImageSource::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkImageSourceImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkImageSourceImpl", SkImageSourceImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkImageSourceImpl::CreateProc(SkReadBuffer& buffer) {
+ SkFilterQuality filterQuality = (SkFilterQuality)buffer.readInt();
+
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+
+ sk_sp<SkImage> image(buffer.readImage());
+ if (!image) {
+ return nullptr;
+ }
+
+ return SkImageSource::Make(std::move(image), src, dst, filterQuality);
+}
+
+void SkImageSourceImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fFilterQuality);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+ buffer.writeImage(fImage.get());
+}
+
+sk_sp<SkSpecialImage> SkImageSourceImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkRect dstRect;
+ ctx.ctm().mapRect(&dstRect, fDstRect);
+
+ SkRect bounds = SkRect::MakeIWH(fImage->width(), fImage->height());
+ if (fSrcRect == bounds) {
+ int iLeft = dstRect.fLeft;
+ int iTop = dstRect.fTop;
+ // TODO: this seems to be a very noise-prone way to determine this (esp. the floating-point
+ // widths & heights).
+ if (dstRect.width() == bounds.width() && dstRect.height() == bounds.height() &&
+ iLeft == dstRect.fLeft && iTop == dstRect.fTop) {
+ // The dest is just an un-scaled integer translation of the entire image; return it
+ offset->fX = iLeft;
+ offset->fY = iTop;
+
+ return SkSpecialImage::MakeFromImage(ctx.getContext(),
+ SkIRect::MakeWH(fImage->width(), fImage->height()),
+ fImage, ctx.surfaceProps());
+ }
+ }
+
+ const SkIRect dstIRect = dstRect.roundOut();
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(dstIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ // TODO: it seems like this clear shouldn't be necessary (see skbug.com/5075)
+ canvas->clear(0x0);
+
+ SkPaint paint;
+
+ // Subtract off the integer component of the translation (will be applied in offset, below).
+ dstRect.offset(-SkIntToScalar(dstIRect.fLeft), -SkIntToScalar(dstIRect.fTop));
+ paint.setBlendMode(SkBlendMode::kSrc);
+ // FIXME: this probably shouldn't be necessary, but drawImageRect asserts
+ // None filtering when it's translate-only
+ paint.setFilterQuality(
+ fSrcRect.width() == dstRect.width() && fSrcRect.height() == dstRect.height() ?
+ kNone_SkFilterQuality : fFilterQuality);
+ canvas->drawImageRect(fImage.get(), fSrcRect, dstRect, &paint,
+ SkCanvas::kStrict_SrcRectConstraint);
+
+ offset->fX = dstIRect.fLeft;
+ offset->fY = dstIRect.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkImageSourceImpl::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
+
+SkIRect SkImageSourceImpl::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == direction) {
+ return INHERITED::onFilterNodeBounds(src, ctm, direction, inputRect);
+ }
+
+ SkRect dstRect = fDstRect;
+ ctm.mapRect(&dstRect);
+ return dstRect.roundOut();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp
new file mode 100644
index 0000000000..43d32dcc91
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp
@@ -0,0 +1,2120 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkLightingImageFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrGLDiffuseLightingEffect;
+class GrGLSpecularLightingEffect;
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+#endif
+
+const SkScalar gOneThird = SkIntToScalar(1) / 3;
+const SkScalar gTwoThirds = SkIntToScalar(2) / 3;
+const SkScalar gOneHalf = 0.5f;
+const SkScalar gOneQuarter = 0.25f;
+
+#if SK_SUPPORT_GPU
+static void setUniformPoint3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ GR_STATIC_ASSERT(sizeof(SkPoint3) == 3 * sizeof(float));
+ pdman.set3fv(uni, 1, &point.fX);
+}
+
+static void setUniformNormal3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ setUniformPoint3(pdman, uni, point);
+}
+#endif
+
+// Shift matrix components to the left, as we advance pixels to the right.
+static inline void shiftMatrixLeft(int m[9]) {
+ m[0] = m[1];
+ m[3] = m[4];
+ m[6] = m[7];
+ m[1] = m[2];
+ m[4] = m[5];
+ m[7] = m[8];
+}
+
+static inline void fast_normalize(SkPoint3* vector) {
+ // add a tiny bit so we don't have to worry about divide-by-zero
+ SkScalar magSq = vector->dot(*vector) + SK_ScalarNearlyZero;
+ SkScalar scale = sk_float_rsqrt(magSq);
+ vector->fX *= scale;
+ vector->fY *= scale;
+ vector->fZ *= scale;
+}
+
+static SkPoint3 read_point3(SkReadBuffer& buffer) {
+ SkPoint3 point;
+ point.fX = buffer.readScalar();
+ point.fY = buffer.readScalar();
+ point.fZ = buffer.readScalar();
+ buffer.validate(SkScalarIsFinite(point.fX) &&
+ SkScalarIsFinite(point.fY) &&
+ SkScalarIsFinite(point.fZ));
+ return point;
+};
+
+static void write_point3(const SkPoint3& point, SkWriteBuffer& buffer) {
+ buffer.writeScalar(point.fX);
+ buffer.writeScalar(point.fY);
+ buffer.writeScalar(point.fZ);
+};
+
+class GrGLLight;
+class SkImageFilterLight : public SkRefCnt {
+public:
+ enum LightType {
+ kDistant_LightType,
+ kPoint_LightType,
+ kSpot_LightType,
+
+ kLast_LightType = kSpot_LightType
+ };
+ virtual LightType type() const = 0;
+ const SkPoint3& color() const { return fColor; }
+ virtual GrGLLight* createGLLight() const = 0;
+ virtual bool isEqual(const SkImageFilterLight& other) const {
+ return fColor == other.fColor;
+ }
+ virtual SkImageFilterLight* transform(const SkMatrix& matrix) const = 0;
+
+ // Defined below SkLight's subclasses.
+ void flattenLight(SkWriteBuffer& buffer) const;
+ static SkImageFilterLight* UnflattenLight(SkReadBuffer& buffer);
+
+ virtual SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const = 0;
+ virtual SkPoint3 lightColor(const SkPoint3& surfaceToLight) const = 0;
+
+protected:
+ SkImageFilterLight(SkColor color) {
+ fColor = SkPoint3::Make(SkIntToScalar(SkColorGetR(color)),
+ SkIntToScalar(SkColorGetG(color)),
+ SkIntToScalar(SkColorGetB(color)));
+ }
+ SkImageFilterLight(const SkPoint3& color) : fColor(color) {}
+
+ SkImageFilterLight(SkReadBuffer& buffer) {
+ fColor = read_point3(buffer);
+ }
+
+ virtual void onFlattenLight(SkWriteBuffer& buffer) const = 0;
+
+
+private:
+ typedef SkRefCnt INHERITED;
+ SkPoint3 fColor;
+};
+
+class BaseLightingType {
+public:
+ BaseLightingType() {}
+ virtual ~BaseLightingType() {}
+
+ virtual SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const= 0;
+};
+
+class DiffuseLightingType : public BaseLightingType {
+public:
+ DiffuseLightingType(SkScalar kd)
+ : fKD(kd) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const override {
+ SkScalar colorScale = fKD * normal.dot(surfaceTolight);
+ colorScale = SkScalarClampMax(colorScale, SK_Scalar1);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(255,
+ SkClampMax(SkScalarRoundToInt(color.fX), 255),
+ SkClampMax(SkScalarRoundToInt(color.fY), 255),
+ SkClampMax(SkScalarRoundToInt(color.fZ), 255));
+ }
+private:
+ SkScalar fKD;
+};
+
+static SkScalar max_component(const SkPoint3& p) {
+ return p.x() > p.y() ? (p.x() > p.z() ? p.x() : p.z()) : (p.y() > p.z() ? p.y() : p.z());
+}
+
+class SpecularLightingType : public BaseLightingType {
+public:
+ SpecularLightingType(SkScalar ks, SkScalar shininess)
+ : fKS(ks), fShininess(shininess) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const override {
+ SkPoint3 halfDir(surfaceTolight);
+ halfDir.fZ += SK_Scalar1; // eye position is always (0, 0, 1)
+ fast_normalize(&halfDir);
+ SkScalar colorScale = fKS * SkScalarPow(normal.dot(halfDir), fShininess);
+ colorScale = SkScalarClampMax(colorScale, SK_Scalar1);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(SkClampMax(SkScalarRoundToInt(max_component(color)), 255),
+ SkClampMax(SkScalarRoundToInt(color.fX), 255),
+ SkClampMax(SkScalarRoundToInt(color.fY), 255),
+ SkClampMax(SkScalarRoundToInt(color.fZ), 255));
+ }
+private:
+ SkScalar fKS;
+ SkScalar fShininess;
+};
+
+static inline SkScalar sobel(int a, int b, int c, int d, int e, int f, SkScalar scale) {
+ return (-a + b - 2 * c + 2 * d -e + f) * scale;
+}
+
+static inline SkPoint3 pointToNormal(SkScalar x, SkScalar y, SkScalar surfaceScale) {
+ SkPoint3 vector = SkPoint3::Make(-x * surfaceScale, -y * surfaceScale, 1);
+ fast_normalize(&vector);
+ return vector;
+}
+
+static inline SkPoint3 topLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(0, 0, m[4], m[5], m[7], m[8], gTwoThirds),
+ sobel(0, 0, m[4], m[7], m[5], m[8], gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 topNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[5], m[6], m[8], gOneThird),
+ sobel(m[3], m[6], m[4], m[7], m[5], m[8], gOneHalf),
+ surfaceScale);
+}
+
+static inline SkPoint3 topRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[4], m[6], m[7], gTwoThirds),
+ sobel(m[3], m[6], m[4], m[7], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 leftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], m[7], m[8], gOneHalf),
+ sobel( 0, 0, m[1], m[7], m[2], m[8], gOneThird),
+ surfaceScale);
+}
+
+
+static inline SkPoint3 interiorNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], m[6], m[8], gOneQuarter),
+ sobel(m[0], m[6], m[1], m[7], m[2], m[8], gOneQuarter),
+ surfaceScale);
+}
+
+static inline SkPoint3 rightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], m[6], m[7], gOneHalf),
+ sobel(m[0], m[6], m[1], m[7], 0, 0, gOneThird),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], 0, 0, gTwoThirds),
+ sobel( 0, 0, m[1], m[4], m[2], m[5], gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], 0, 0, gOneThird),
+ sobel(m[0], m[3], m[1], m[4], m[2], m[5], gOneHalf),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], 0, 0, gTwoThirds),
+ sobel(m[0], m[3], m[1], m[4], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+
+class UncheckedPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+};
+
+// The DecalPixelFetcher is used when the destination crop rect exceeds the input bitmap bounds.
+class DecalPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+ }
+};
+
+template <class PixelFetcher>
+static void lightBitmap(const BaseLightingType& lightingType,
+ const SkImageFilterLight* l,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ SkASSERT(dst->width() == bounds.width() && dst->height() == bounds.height());
+ int left = bounds.left(), right = bounds.right();
+ int bottom = bounds.bottom();
+ int y = bounds.top();
+ SkIRect srcBounds = src.bounds();
+ SkPMColor* dptr = dst->getAddr32(0, 0);
+ {
+ int x = left;
+ int m[9];
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ for (++y; y < bottom - 1; ++y) {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, y - 1, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(leftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x) {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(interiorNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(rightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, bottom - 2, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, bottom - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+}
+
+static void lightBitmap(const BaseLightingType& lightingType,
+ const SkImageFilterLight* light,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ if (src.bounds().contains(bounds)) {
+ lightBitmap<UncheckedPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ } else {
+ lightBitmap<DecalPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ }
+}
+
+enum BoundaryMode {
+ kTopLeft_BoundaryMode,
+ kTop_BoundaryMode,
+ kTopRight_BoundaryMode,
+ kLeft_BoundaryMode,
+ kInterior_BoundaryMode,
+ kRight_BoundaryMode,
+ kBottomLeft_BoundaryMode,
+ kBottom_BoundaryMode,
+ kBottomRight_BoundaryMode,
+
+ kBoundaryModeCount,
+};
+
+class SkLightingImageFilterInternal : public SkImageFilter_Base {
+protected:
+ SkLightingImageFilterInternal(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fLight(std::move(light))
+ , fSurfaceScale(surfaceScale / 255) {}
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ this->INHERITED::flatten(buffer);
+ fLight->flattenLight(buffer);
+ buffer.writeScalar(fSurfaceScale * 255);
+ }
+
+ bool affectsTransparentBlack() const override { return true; }
+
+ const SkImageFilterLight* light() const { return fLight.get(); }
+ inline sk_sp<const SkImageFilterLight> refLight() const { return fLight; }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ SkSpecialImage* input,
+ const SkIRect& bounds,
+ const SkMatrix& matrix) const;
+ virtual std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(
+ sk_sp<GrTextureProxy>,
+ const SkMatrix&,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const = 0;
+#endif
+
+private:
+#if SK_SUPPORT_GPU
+ void drawRect(GrRenderTargetContext*,
+ sk_sp<GrTextureProxy> srcProxy,
+ const SkMatrix& matrix,
+ const GrClip& clip,
+ const SkRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const;
+#endif
+
+ sk_sp<SkImageFilterLight> fLight;
+ SkScalar fSurfaceScale;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+#if SK_SUPPORT_GPU
+void SkLightingImageFilterInternal::drawRect(GrRenderTargetContext* renderTargetContext,
+ sk_sp<GrTextureProxy> srcProxy,
+ const SkMatrix& matrix,
+ const GrClip& clip,
+ const SkRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const {
+ SkRect srcRect = dstRect.makeOffset(SkIntToScalar(bounds.x()), SkIntToScalar(bounds.y()));
+ GrPaint paint;
+ auto fp = this->makeFragmentProcessor(std::move(srcProxy), matrix, srcBounds, boundaryMode);
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ renderTargetContext->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), dstRect,
+ srcRect);
+}
+
+sk_sp<SkSpecialImage> SkLightingImageFilterInternal::filterImageGPU(
+ const Context& ctx,
+ SkSpecialImage* input,
+ const SkIRect& offsetBounds,
+ const SkMatrix& matrix) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> inputProxy(input->asTextureProxyRef(context));
+ SkASSERT(inputProxy);
+
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox,
+ offsetBounds.width(),
+ offsetBounds.height(),
+ ctx.grColorType(),
+ ctx.refColorSpace(),
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ inputProxy->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ SkIRect dstIRect = SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height());
+ SkRect dstRect = SkRect::Make(dstIRect);
+
+ // setup new clip
+ GrFixedClip clip(dstIRect);
+
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+ SkRect topLeft = SkRect::MakeXYWH(0, 0, 1, 1);
+ SkRect top = SkRect::MakeXYWH(1, 0, dstRect.width() - 2, 1);
+ SkRect topRight = SkRect::MakeXYWH(dstRect.width() - 1, 0, 1, 1);
+ SkRect left = SkRect::MakeXYWH(0, 1, 1, dstRect.height() - 2);
+ SkRect interior = dstRect.makeInset(1, 1);
+ SkRect right = SkRect::MakeXYWH(dstRect.width() - 1, 1, 1, dstRect.height() - 2);
+ SkRect bottomLeft = SkRect::MakeXYWH(0, dstRect.height() - 1, 1, 1);
+ SkRect bottom = SkRect::MakeXYWH(1, dstRect.height() - 1, dstRect.width() - 2, 1);
+ SkRect bottomRight = SkRect::MakeXYWH(dstRect.width() - 1, dstRect.height() - 1, 1, 1);
+
+ const SkIRect* pSrcBounds = inputBounds.contains(offsetBounds) ? nullptr : &inputBounds;
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, topLeft,
+ kTopLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, top,
+ kTop_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, topRight,
+ kTopRight_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, left,
+ kLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, interior,
+ kInterior_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, right,
+ kRight_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, bottomLeft,
+ kBottomLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, bottom,
+ kBottom_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(renderTargetContext.get(), inputProxy, matrix, clip, bottomRight,
+ kBottomRight_BoundaryMode, pSrcBounds, offsetBounds);
+
+ return SkSpecialImage::MakeDeferredFromGpu(
+ context,
+ SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().colorType(),
+ renderTargetContext->colorInfo().refColorSpace());
+}
+#endif
+
+class SkDiffuseLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter>,
+ const CropRect*);
+
+ SkScalar kd() const { return fKD; }
+
+protected:
+ SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light, SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(sk_sp<GrTextureProxy>,
+ const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkDiffuseLightingImageFilter)
+ friend class SkLightingImageFilter;
+ SkScalar fKD;
+
+ typedef SkLightingImageFilterInternal INHERITED;
+};
+
+class SkSpecularLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter>, const CropRect*);
+
+ SkScalar ks() const { return fKS; }
+ SkScalar shininess() const { return fShininess; }
+
+protected:
+ SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input, const CropRect*);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(sk_sp<GrTextureProxy>,
+ const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkSpecularLightingImageFilter)
+
+ SkScalar fKS;
+ SkScalar fShininess;
+ friend class SkLightingImageFilter;
+ typedef SkLightingImageFilterInternal INHERITED;
+};
+
+#if SK_SUPPORT_GPU
+
+class GrLightingEffect : public GrFragmentProcessor {
+public:
+ const SkImageFilterLight* light() const { return fLight.get(); }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+ const SkMatrix& filterMatrix() const { return fFilterMatrix; }
+ BoundaryMode boundaryMode() const { return fBoundaryMode; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+protected:
+ GrLightingEffect(ClassID classID, sk_sp<GrTextureProxy>, sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale, const SkMatrix& matrix, BoundaryMode boundaryMode,
+ const SkIRect* srcBounds);
+
+ GrLightingEffect(const GrLightingEffect& that);
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+private:
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GrCoordTransform fCoordTransform;
+ GrTextureDomain fDomain;
+ TextureSampler fTextureSampler;
+ sk_sp<const SkImageFilterLight> fLight;
+ SkScalar fSurfaceScale;
+ SkMatrix fFilterMatrix;
+ BoundaryMode fBoundaryMode;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GrDiffuseLightingEffect : public GrLightingEffect {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrDiffuseLightingEffect(std::move(proxy), std::move(light), surfaceScale,
+ matrix, kd, boundaryMode, srcBounds));
+ }
+
+ const char* name() const override { return "DiffuseLighting"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDiffuseLightingEffect(*this));
+ }
+
+ SkScalar kd() const { return fKD; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrDiffuseLightingEffect(sk_sp<GrTextureProxy>,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds);
+
+ explicit GrDiffuseLightingEffect(const GrDiffuseLightingEffect& that);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ SkScalar fKD;
+
+ typedef GrLightingEffect INHERITED;
+};
+
+class GrSpecularLightingEffect : public GrLightingEffect {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSpecularLightingEffect(std::move(proxy), std::move(light), surfaceScale,
+ matrix, ks, shininess, boundaryMode, srcBounds));
+ }
+
+ const char* name() const override { return "SpecularLighting"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSpecularLightingEffect(*this));
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkScalar ks() const { return fKS; }
+ SkScalar shininess() const { return fShininess; }
+
+private:
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrSpecularLightingEffect(sk_sp<GrTextureProxy>,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds);
+
+ explicit GrSpecularLightingEffect(const GrSpecularLightingEffect&);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ SkScalar fKS;
+ SkScalar fShininess;
+
+ typedef GrLightingEffect INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLLight {
+public:
+ virtual ~GrGLLight() {}
+
+ /**
+ * This is called by GrGLLightingEffect::emitCode() before either of the two virtual functions
+ * below. It adds a half3 uniform visible in the FS that represents the constant light color.
+ */
+ void emitLightColorUniform(GrGLSLUniformHandler*);
+
+ /**
+ * These two functions are called from GrGLLightingEffect's emitCode() function.
+ * emitSurfaceToLight places an expression in param out that is the vector from the surface to
+ * the light. The expression will be used in the FS. emitLightColor writes an expression into
+ * the FS that is the color of the light. Either function may add functions and/or uniforms to
+ * the FS. The default of emitLightColor appends the name of the constant light color uniform
+ * and so this function only needs to be overridden if the light color varies spatially.
+ */
+ virtual void emitSurfaceToLight(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char* z) = 0;
+ virtual void emitLightColor(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight);
+
+ // This is called from GrGLLightingEffect's setData(). Subclasses of GrGLLight must call
+ // INHERITED::setData().
+ virtual void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const;
+
+protected:
+ /**
+ * Gets the constant light color uniform. Subclasses can use this in their emitLightColor
+ * function.
+ */
+ UniformHandle lightColorUni() const { return fColorUni; }
+
+private:
+ UniformHandle fColorUni;
+
+ typedef SkRefCnt INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistantLight : public GrGLLight {
+public:
+ ~GrGLDistantLight() override {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ typedef GrGLLight INHERITED;
+ UniformHandle fDirectionUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLPointLight : public GrGLLight {
+public:
+ ~GrGLPointLight() override {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ typedef GrGLLight INHERITED;
+ UniformHandle fLocationUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLSpotLight : public GrGLLight {
+public:
+ ~GrGLSpotLight() override {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+ void emitLightColor(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight) override;
+
+private:
+ typedef GrGLLight INHERITED;
+
+ SkString fLightColorFunc;
+ UniformHandle fLocationUni;
+ UniformHandle fExponentUni;
+ UniformHandle fCosOuterConeAngleUni;
+ UniformHandle fCosInnerConeAngleUni;
+ UniformHandle fConeScaleUni;
+ UniformHandle fSUni;
+};
+#else
+
+class GrGLLight;
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkDistantLight : public SkImageFilterLight {
+public:
+ SkDistantLight(const SkPoint3& direction, SkColor color)
+ : INHERITED(color), fDirection(direction) {
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ return fDirection;
+ }
+ SkPoint3 lightColor(const SkPoint3&) const override { return this->color(); }
+ LightType type() const override { return kDistant_LightType; }
+ const SkPoint3& direction() const { return fDirection; }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLDistantLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kDistant_LightType) {
+ return false;
+ }
+
+ const SkDistantLight& o = static_cast<const SkDistantLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fDirection == o.fDirection;
+ }
+
+ SkDistantLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fDirection = read_point3(buffer);
+ }
+
+protected:
+ SkDistantLight(const SkPoint3& direction, const SkPoint3& color)
+ : INHERITED(color), fDirection(direction) {
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ return new SkDistantLight(direction(), color());
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fDirection, buffer);
+ }
+
+private:
+ SkPoint3 fDirection;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkPointLight : public SkImageFilterLight {
+public:
+ SkPointLight(const SkPoint3& location, SkColor color)
+ : INHERITED(color), fLocation(location) {}
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkIntToScalar(z) * surfaceScale);
+ fast_normalize(&direction);
+ return direction;
+ }
+ SkPoint3 lightColor(const SkPoint3&) const override { return this->color(); }
+ LightType type() const override { return kPoint_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLPointLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kPoint_LightType) {
+ return false;
+ }
+ const SkPointLight& o = static_cast<const SkPointLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation;
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX,
+ location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ return new SkPointLight(location, color());
+ }
+
+ SkPointLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = read_point3(buffer);
+ }
+
+protected:
+ SkPointLight(const SkPoint3& location, const SkPoint3& color)
+ : INHERITED(color), fLocation(location) {}
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fLocation, buffer);
+ }
+
+private:
+ SkPoint3 fLocation;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpotLight : public SkImageFilterLight {
+public:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cutoffAngle,
+ SkColor color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(SkScalarPin(specularExponent, kSpecularExponentMin, kSpecularExponentMax))
+ {
+ fS = target - location;
+ fast_normalize(&fS);
+ fCosOuterConeAngle = SkScalarCos(SkDegreesToRadians(cutoffAngle));
+ const SkScalar antiAliasThreshold = 0.016f;
+ fCosInnerConeAngle = fCosOuterConeAngle + antiAliasThreshold;
+ fConeScale = SkScalarInvert(antiAliasThreshold);
+ }
+
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX, location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ SkPoint target2 = SkPoint::Make(fTarget.fX, fTarget.fY);
+ matrix.mapPoints(&target2, 1);
+ SkPoint targetZ = SkPoint::Make(fTarget.fZ, fTarget.fZ);
+ matrix.mapVectors(&targetZ, 1);
+ SkPoint3 target = SkPoint3::Make(target2.fX, target2.fY,
+ SkScalarAve(targetZ.fX, targetZ.fY));
+ SkPoint3 s = target - location;
+ fast_normalize(&s);
+ return new SkSpotLight(location,
+ target,
+ fSpecularExponent,
+ fCosOuterConeAngle,
+ fCosInnerConeAngle,
+ fConeScale,
+ s,
+ color());
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkIntToScalar(z) * surfaceScale);
+ fast_normalize(&direction);
+ return direction;
+ }
+ SkPoint3 lightColor(const SkPoint3& surfaceToLight) const override {
+ SkScalar cosAngle = -surfaceToLight.dot(fS);
+ SkScalar scale = 0;
+ if (cosAngle >= fCosOuterConeAngle) {
+ scale = SkScalarPow(cosAngle, fSpecularExponent);
+ if (cosAngle < fCosInnerConeAngle) {
+ scale *= (cosAngle - fCosOuterConeAngle) * fConeScale;
+ }
+ }
+ return this->color().makeScale(scale);
+ }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLSpotLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+ LightType type() const override { return kSpot_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ const SkPoint3& target() const { return fTarget; }
+ SkScalar specularExponent() const { return fSpecularExponent; }
+ SkScalar cosInnerConeAngle() const { return fCosInnerConeAngle; }
+ SkScalar cosOuterConeAngle() const { return fCosOuterConeAngle; }
+ SkScalar coneScale() const { return fConeScale; }
+ const SkPoint3& s() const { return fS; }
+
+ SkSpotLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = read_point3(buffer);
+ fTarget = read_point3(buffer);
+ fSpecularExponent = buffer.readScalar();
+ fCosOuterConeAngle = buffer.readScalar();
+ fCosInnerConeAngle = buffer.readScalar();
+ fConeScale = buffer.readScalar();
+ fS = read_point3(buffer);
+ buffer.validate(SkScalarIsFinite(fSpecularExponent) &&
+ SkScalarIsFinite(fCosOuterConeAngle) &&
+ SkScalarIsFinite(fCosInnerConeAngle) &&
+ SkScalarIsFinite(fConeScale));
+ }
+protected:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cosOuterConeAngle,
+ SkScalar cosInnerConeAngle,
+ SkScalar coneScale,
+ const SkPoint3& s,
+ const SkPoint3& color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(specularExponent),
+ fCosOuterConeAngle(cosOuterConeAngle),
+ fCosInnerConeAngle(cosInnerConeAngle),
+ fConeScale(coneScale),
+ fS(s)
+ {
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fLocation, buffer);
+ write_point3(fTarget, buffer);
+ buffer.writeScalar(fSpecularExponent);
+ buffer.writeScalar(fCosOuterConeAngle);
+ buffer.writeScalar(fCosInnerConeAngle);
+ buffer.writeScalar(fConeScale);
+ write_point3(fS, buffer);
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kSpot_LightType) {
+ return false;
+ }
+
+ const SkSpotLight& o = static_cast<const SkSpotLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation &&
+ fTarget == o.fTarget &&
+ fSpecularExponent == o.fSpecularExponent &&
+ fCosOuterConeAngle == o.fCosOuterConeAngle;
+ }
+
+private:
+ static const SkScalar kSpecularExponentMin;
+ static const SkScalar kSpecularExponentMax;
+
+ SkPoint3 fLocation;
+ SkPoint3 fTarget;
+ SkScalar fSpecularExponent;
+ SkScalar fCosOuterConeAngle;
+ SkScalar fCosInnerConeAngle;
+ SkScalar fConeScale;
+ SkPoint3 fS;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+// According to the spec, the specular term should be in the range [1, 128] :
+// http://www.w3.org/TR/SVG/filters.html#feSpecularLightingSpecularExponentAttribute
+const SkScalar SkSpotLight::kSpecularExponentMin = 1.0f;
+const SkScalar SkSpotLight::kSpecularExponentMax = 128.0f;
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkImageFilterLight::flattenLight(SkWriteBuffer& buffer) const {
+ // Write type first, then baseclass, then subclass.
+ buffer.writeInt(this->type());
+ write_point3(fColor, buffer);
+ this->onFlattenLight(buffer);
+}
+
+/*static*/ SkImageFilterLight* SkImageFilterLight::UnflattenLight(SkReadBuffer& buffer) {
+ SkImageFilterLight::LightType type = buffer.read32LE(SkImageFilterLight::kLast_LightType);
+
+ switch (type) {
+ // Each of these constructors must first call SkLight's, so we'll read the baseclass
+ // then subclass, same order as flattenLight.
+ case SkImageFilterLight::kDistant_LightType:
+ return new SkDistantLight(buffer);
+ case SkImageFilterLight::kPoint_LightType:
+ return new SkPointLight(buffer);
+ case SkImageFilterLight::kSpot_LightType:
+ return new SkSpotLight(buffer);
+ default:
+ // Should never get here due to prior check of SkSafeRange
+ SkDEBUGFAIL("Unknown LightType.");
+ return nullptr;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeDistantLitDiffuse(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakePointLitDiffuse(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeSpotLitDiffuse(
+ const SkPoint3& location, const SkPoint3& target, SkScalar specularExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(
+ new SkSpotLight(location, target, specularExponent, cutoffAngle, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeDistantLitSpecular(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakePointLitSpecular(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeSpotLitSpecular(
+ const SkPoint3& location, const SkPoint3& target, SkScalar specularExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(
+ new SkSpotLight(location, target, specularExponent, cutoffAngle, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkDiffuseLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(kd)) {
+ return nullptr;
+ }
+ // According to the spec, kd can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feDiffuseLightingElement
+ if (kd < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkDiffuseLightingImageFilter(std::move(light), surfaceScale,
+ kd, std::move(input), cropRect));
+}
+
+SkDiffuseLightingImageFilter::SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKD(kd) {
+}
+
+sk_sp<SkFlattenable> SkDiffuseLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar kd = buffer.readScalar();
+
+ return Make(std::move(light), surfaceScale, kd, common.getInput(0), &common.cropRect());
+}
+
+void SkDiffuseLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKD);
+}
+
+sk_sp<SkSpecialImage> SkDiffuseLightingImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(ctx, input.get(), bounds, matrix);
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ DiffuseLightingType lightingType(fKD);
+ lightBitmap(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkDiffuseLightingImageFilter::makeFragmentProcessor(
+ sk_sp<GrTextureProxy> proxy,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const {
+ SkScalar scale = this->surfaceScale() * 255;
+ return GrDiffuseLightingEffect::Make(std::move(proxy), this->refLight(), scale, matrix,
+ this->kd(), boundaryMode, srcBounds);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkSpecularLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(ks) || !SkScalarIsFinite(shininess)) {
+ return nullptr;
+ }
+ // According to the spec, ks can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feSpecularLightingElement
+ if (ks < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkSpecularLightingImageFilter(std::move(light), surfaceScale,
+ ks, shininess,
+ std::move(input), cropRect));
+}
+
+SkSpecularLightingImageFilter::SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKS(ks)
+ , fShininess(shininess) {
+}
+
+sk_sp<SkFlattenable> SkSpecularLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar ks = buffer.readScalar();
+ SkScalar shine = buffer.readScalar();
+
+ return Make(std::move(light), surfaceScale, ks, shine, common.getInput(0),
+ &common.cropRect());
+}
+
+void SkSpecularLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKS);
+ buffer.writeScalar(fShininess);
+}
+
+sk_sp<SkSpecialImage> SkSpecularLightingImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(ctx, input.get(), bounds, matrix);
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SpecularLightingType lightingType(fKS, fShininess);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ lightBitmap(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()), dst);
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkSpecularLightingImageFilter::makeFragmentProcessor(
+ sk_sp<GrTextureProxy> proxy,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const {
+ SkScalar scale = this->surfaceScale() * 255;
+ return GrSpecularLightingEffect::Make(std::move(proxy), this->refLight(), scale, matrix,
+ this->ks(), this->shininess(), boundaryMode, srcBounds);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+static SkString emitNormalFunc(BoundaryMode mode,
+ const char* pointToNormalName,
+ const char* sobelFuncName) {
+ SkString result;
+ switch (mode) {
+ case kTopLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(0.0, 0.0, m[4], m[5], m[7], m[8], %g),\n"
+ "\t %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kTop_BoundaryMode:
+ result.printf("\treturn %s(%s(0.0, 0.0, m[3], m[5], m[6], m[8], %g),\n"
+ "\t %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kTopRight_BoundaryMode:
+ result.printf("\treturn %s(%s( 0.0, 0.0, m[3], m[4], m[6], m[7], %g),\n"
+ "\t %s(m[3], m[6], m[4], m[7], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(m[1], m[2], m[4], m[5], m[7], m[8], %g),\n"
+ "\t %s( 0.0, 0.0, m[1], m[7], m[2], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kInterior_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[2], m[3], m[5], m[6], m[8], %g),\n"
+ "\t %s(m[0], m[6], m[1], m[7], m[2], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneQuarter,
+ sobelFuncName, gOneQuarter);
+ break;
+ case kRight_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[1], m[3], m[4], m[6], m[7], %g),\n"
+ "\t %s(m[0], m[6], m[1], m[7], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kBottomLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(m[1], m[2], m[4], m[5], 0.0, 0.0, %g),\n"
+ "\t %s( 0.0, 0.0, m[1], m[4], m[2], m[5], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kBottom_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[2], m[3], m[5], 0.0, 0.0, %g),\n"
+ "\t %s(m[0], m[3], m[1], m[4], m[2], m[5], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kBottomRight_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[1], m[3], m[4], 0.0, 0.0, %g),\n"
+ "\t %s(m[0], m[3], m[1], m[4], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ return result;
+}
+
+class GrGLLightingEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLLightingEffect() : fLight(nullptr) { }
+ ~GrGLLightingEffect() override { delete fLight; }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder* b);
+
+protected:
+ /**
+ * Subclasses of GrGLLightingEffect must call INHERITED::onSetData();
+ */
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ virtual void emitLightFunc(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ SkString* funcName) = 0;
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+
+ UniformHandle fImageIncrementUni;
+ UniformHandle fSurfaceScaleUni;
+ GrTextureDomain::GLDomain fDomain;
+ GrGLLight* fLight;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDiffuseLightingEffect : public GrGLLightingEffect {
+public:
+ void emitLightFunc(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, SkString* funcName) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ typedef GrGLLightingEffect INHERITED;
+
+ UniformHandle fKDUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLSpecularLightingEffect : public GrGLLightingEffect {
+public:
+ void emitLightFunc(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, SkString* funcName) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ typedef GrGLLightingEffect INHERITED;
+
+ UniformHandle fKSUni;
+ UniformHandle fShininessUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static GrTextureDomain create_domain(GrTextureProxy* proxy, const SkIRect* srcBounds,
+ GrTextureDomain::Mode mode) {
+ if (srcBounds) {
+ SkRect texelDomain = GrTextureDomain::MakeTexelDomain(*srcBounds, mode);
+ return GrTextureDomain(proxy, texelDomain, mode, mode);
+ } else {
+ return GrTextureDomain::IgnoredDomain();
+ }
+}
+
+GrLightingEffect::GrLightingEffect(ClassID classID,
+ sk_sp<GrTextureProxy> proxy,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ // Perhaps this could advertise the opaque or coverage-as-alpha optimizations?
+ : INHERITED(classID, kNone_OptimizationFlags)
+ , fCoordTransform(proxy.get())
+ , fDomain(create_domain(proxy.get(), srcBounds, GrTextureDomain::kDecal_Mode))
+ , fTextureSampler(std::move(proxy))
+ , fLight(std::move(light))
+ , fSurfaceScale(surfaceScale)
+ , fFilterMatrix(matrix)
+ , fBoundaryMode(boundaryMode) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+GrLightingEffect::GrLightingEffect(const GrLightingEffect& that)
+ : INHERITED(that.classID(), that.optimizationFlags())
+ , fCoordTransform(that.fCoordTransform)
+ , fDomain(that.fDomain)
+ , fTextureSampler(that.fTextureSampler)
+ , fLight(that.fLight)
+ , fSurfaceScale(that.fSurfaceScale)
+ , fFilterMatrix(that.fFilterMatrix)
+ , fBoundaryMode(that.fBoundaryMode) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+bool GrLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrLightingEffect& s = sBase.cast<GrLightingEffect>();
+ return fLight->isEqual(*s.fLight) &&
+ fSurfaceScale == s.fSurfaceScale &&
+ fBoundaryMode == s.fBoundaryMode;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDiffuseLightingEffect::GrDiffuseLightingEffect(sk_sp<GrTextureProxy> proxy,
+ sk_sp<const SkImageFilterLight>light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ : INHERITED(kGrDiffuseLightingEffect_ClassID, std::move(proxy), std::move(light),
+ surfaceScale, matrix, boundaryMode, srcBounds)
+ , fKD(kd) {}
+
+GrDiffuseLightingEffect::GrDiffuseLightingEffect(const GrDiffuseLightingEffect& that)
+ : INHERITED(that), fKD(that.fKD) {}
+
+bool GrDiffuseLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrDiffuseLightingEffect& s = sBase.cast<GrDiffuseLightingEffect>();
+ return INHERITED::onIsEqual(sBase) && this->kd() == s.kd();
+}
+
+void GrDiffuseLightingEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDiffuseLightingEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrDiffuseLightingEffect::onCreateGLSLInstance() const {
+ return new GrGLDiffuseLightingEffect;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDiffuseLightingEffect);
+
+#if GR_TEST_UTILS
+
+static SkPoint3 random_point3(SkRandom* random) {
+ return SkPoint3::Make(SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()));
+}
+
+static SkImageFilterLight* create_random_light(SkRandom* random) {
+ int type = random->nextULessThan(3);
+ switch (type) {
+ case 0: {
+ return new SkDistantLight(random_point3(random), random->nextU());
+ }
+ case 1: {
+ return new SkPointLight(random_point3(random), random->nextU());
+ }
+ case 2: {
+ return new SkSpotLight(random_point3(random), random_point3(random),
+ random->nextUScalar1(), random->nextUScalar1(), random->nextU());
+ }
+ default:
+ SK_ABORT("Unexpected value.");
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrDiffuseLightingEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar kd = d->fRandom->nextUScalar1();
+ sk_sp<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+ SkIRect srcBounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()),
+ d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()));
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+ return GrDiffuseLightingEffect::Make(std::move(proxy), std::move(light), surfaceScale, matrix,
+ kd, mode, &srcBounds);
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLLightingEffect::emitCode(EmitArgs& args) {
+ const GrLightingEffect& le = args.fFp.cast<GrLightingEffect>();
+ if (!fLight) {
+ fLight = le.light()->createGLLight();
+ }
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf2_GrSLType, "ImageIncrement");
+ fSurfaceScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf_GrSLType, "SurfaceScale");
+ fLight->emitLightColorUniform(uniformHandler);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString lightFunc;
+ this->emitLightFunc(uniformHandler, fragBuilder, &lightFunc);
+ const GrShaderVar gSobelArgs[] = {
+ GrShaderVar("a", kHalf_GrSLType),
+ GrShaderVar("b", kHalf_GrSLType),
+ GrShaderVar("c", kHalf_GrSLType),
+ GrShaderVar("d", kHalf_GrSLType),
+ GrShaderVar("e", kHalf_GrSLType),
+ GrShaderVar("f", kHalf_GrSLType),
+ GrShaderVar("scale", kHalf_GrSLType),
+ };
+ SkString sobelFuncName;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "sobel",
+ SK_ARRAY_COUNT(gSobelArgs),
+ gSobelArgs,
+ "\treturn (-a + b - 2.0 * c + 2.0 * d -e + f) * scale;\n",
+ &sobelFuncName);
+ const GrShaderVar gPointToNormalArgs[] = {
+ GrShaderVar("x", kHalf_GrSLType),
+ GrShaderVar("y", kHalf_GrSLType),
+ GrShaderVar("scale", kHalf_GrSLType),
+ };
+ SkString pointToNormalName;
+ fragBuilder->emitFunction(kHalf3_GrSLType,
+ "pointToNormal",
+ SK_ARRAY_COUNT(gPointToNormalArgs),
+ gPointToNormalArgs,
+ "\treturn normalize(half3(-x * scale, -y * scale, 1));\n",
+ &pointToNormalName);
+
+ const GrShaderVar gInteriorNormalArgs[] = {
+ GrShaderVar("m", kHalf_GrSLType, 9),
+ GrShaderVar("surfaceScale", kHalf_GrSLType),
+ };
+ SkString normalBody = emitNormalFunc(le.boundaryMode(),
+ pointToNormalName.c_str(),
+ sobelFuncName.c_str());
+ SkString normalName;
+ fragBuilder->emitFunction(kHalf3_GrSLType,
+ "normal",
+ SK_ARRAY_COUNT(gInteriorNormalArgs),
+ gInteriorNormalArgs,
+ normalBody.c_str(),
+ &normalName);
+
+ fragBuilder->codeAppendf("\t\tfloat2 coord = %s;\n", coords2D.c_str());
+ fragBuilder->codeAppend("\t\thalf m[9];\n");
+
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+ const char* surfScale = uniformHandler->getUniformCStr(fSurfaceScaleUni);
+
+ int index = 0;
+ for (int dy = 1; dy >= -1; dy--) {
+ for (int dx = -1; dx <= 1; dx++) {
+ SkString texCoords;
+ texCoords.appendf("coord + half2(%d, %d) * %s", dx, dy, imgInc);
+ SkString temp;
+ temp.appendf("temp%d", index);
+ fragBuilder->codeAppendf("half4 %s;", temp.c_str());
+ fDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ le.domain(),
+ temp.c_str(),
+ texCoords,
+ args.fTexSamplers[0]);
+ fragBuilder->codeAppendf("m[%d] = %s.a;", index, temp.c_str());
+ index++;
+ }
+ }
+ fragBuilder->codeAppend("\t\thalf3 surfaceToLight = ");
+ SkString arg;
+ arg.appendf("%s * m[4]", surfScale);
+ fLight->emitSurfaceToLight(uniformHandler, fragBuilder, arg.c_str());
+ fragBuilder->codeAppend(";\n");
+ fragBuilder->codeAppendf("\t\t%s = %s(%s(m, %s), surfaceToLight, ",
+ args.fOutputColor, lightFunc.c_str(), normalName.c_str(), surfScale);
+ fLight->emitLightColor(uniformHandler, fragBuilder, "surfaceToLight");
+ fragBuilder->codeAppend(");\n");
+ fragBuilder->codeAppendf("%s *= %s;\n", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLLightingEffect::GenKey(const GrProcessor& proc,
+ const GrShaderCaps& caps, GrProcessorKeyBuilder* b) {
+ const GrLightingEffect& lighting = proc.cast<GrLightingEffect>();
+ b->add32(lighting.boundaryMode() << 2 | lighting.light()->type());
+ b->add32(GrTextureDomain::GLDomain::DomainKey(lighting.domain()));
+}
+
+void GrGLLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ const GrLightingEffect& lighting = proc.cast<GrLightingEffect>();
+ if (!fLight) {
+ fLight = lighting.light()->createGLLight();
+ }
+
+ GrTextureProxy* proxy = lighting.textureSampler(0).proxy();
+ GrTexture* texture = proxy->peekTexture();
+
+ float ySign = proxy->origin() == kTopLeft_GrSurfaceOrigin ? -1.0f : 1.0f;
+ pdman.set2f(fImageIncrementUni, 1.0f / texture->width(), ySign / texture->height());
+ pdman.set1f(fSurfaceScaleUni, lighting.surfaceScale());
+ sk_sp<SkImageFilterLight> transformedLight(
+ lighting.light()->transform(lighting.filterMatrix()));
+ fDomain.setData(pdman, lighting.domain(), proxy, lighting.textureSampler(0).samplerState());
+ fLight->setData(pdman, transformedLight.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDiffuseLightingEffect::emitLightFunc(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* kd;
+ fKDUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "KD", &kd);
+
+ const GrShaderVar gLightArgs[] = {
+ GrShaderVar("normal", kHalf3_GrSLType),
+ GrShaderVar("surfaceToLight", kHalf3_GrSLType),
+ GrShaderVar("lightColor", kHalf3_GrSLType)
+ };
+ SkString lightBody;
+ lightBody.appendf("\thalf colorScale = %s * dot(normal, surfaceToLight);\n", kd);
+ lightBody.appendf("\treturn half4(lightColor * saturate(colorScale), 1.0);\n");
+ fragBuilder->emitFunction(kHalf4_GrSLType,
+ "light",
+ SK_ARRAY_COUNT(gLightArgs),
+ gLightArgs,
+ lightBody.c_str(),
+ funcName);
+}
+
+void GrGLDiffuseLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ INHERITED::onSetData(pdman, proc);
+ const GrDiffuseLightingEffect& diffuse = proc.cast<GrDiffuseLightingEffect>();
+ pdman.set1f(fKDUni, diffuse.kd());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrSpecularLightingEffect::GrSpecularLightingEffect(sk_sp<GrTextureProxy> proxy,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ : INHERITED(kGrSpecularLightingEffect_ClassID, std::move(proxy), std::move(light),
+ surfaceScale, matrix, boundaryMode, srcBounds)
+ , fKS(ks)
+ , fShininess(shininess) {}
+
+GrSpecularLightingEffect::GrSpecularLightingEffect(const GrSpecularLightingEffect& that)
+ : INHERITED(that), fKS(that.fKS), fShininess(that.fShininess) {}
+
+bool GrSpecularLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrSpecularLightingEffect& s = sBase.cast<GrSpecularLightingEffect>();
+ return INHERITED::onIsEqual(sBase) &&
+ this->ks() == s.ks() &&
+ this->shininess() == s.shininess();
+}
+
+void GrSpecularLightingEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLSpecularLightingEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrSpecularLightingEffect::onCreateGLSLInstance() const {
+ return new GrGLSpecularLightingEffect;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSpecularLightingEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrSpecularLightingEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar ks = d->fRandom->nextUScalar1();
+ SkScalar shininess = d->fRandom->nextUScalar1();
+ sk_sp<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+ SkIRect srcBounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()),
+ d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()));
+ return GrSpecularLightingEffect::Make(std::move(proxy), std::move(light), surfaceScale, matrix,
+ ks, shininess, mode, &srcBounds);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLSpecularLightingEffect::emitLightFunc(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* ks;
+ const char* shininess;
+
+ fKSUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "KS", &ks);
+ fShininessUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf_GrSLType,
+ "Shininess",
+ &shininess);
+
+ const GrShaderVar gLightArgs[] = {
+ GrShaderVar("normal", kHalf3_GrSLType),
+ GrShaderVar("surfaceToLight", kHalf3_GrSLType),
+ GrShaderVar("lightColor", kHalf3_GrSLType)
+ };
+ SkString lightBody;
+ lightBody.appendf("\thalf3 halfDir = half3(normalize(surfaceToLight + half3(0, 0, 1)));\n");
+ lightBody.appendf("\thalf colorScale = half(%s * pow(dot(normal, halfDir), %s));\n",
+ ks, shininess);
+ lightBody.appendf("\thalf3 color = lightColor * saturate(colorScale);\n");
+ lightBody.appendf("\treturn half4(color, max(max(color.r, color.g), color.b));\n");
+ fragBuilder->emitFunction(kHalf4_GrSLType,
+ "light",
+ SK_ARRAY_COUNT(gLightArgs),
+ gLightArgs,
+ lightBody.c_str(),
+ funcName);
+}
+
+void GrGLSpecularLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& effect) {
+ INHERITED::onSetData(pdman, effect);
+ const GrSpecularLightingEffect& spec = effect.cast<GrSpecularLightingEffect>();
+ pdman.set1f(fKSUni, spec.ks());
+ pdman.set1f(fShininessUni, spec.shininess());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void GrGLLight::emitLightColorUniform(GrGLSLUniformHandler* uniformHandler) {
+ fColorUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType, "LightColor");
+}
+
+void GrGLLight::emitLightColor(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char *surfaceToLight) {
+ fragBuilder->codeAppend(uniformHandler->getUniformCStr(this->lightColorUni()));
+}
+
+void GrGLLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ setUniformPoint3(pdman, fColorUni,
+ light->color().makeScale(SkScalarInvert(SkIntToScalar(255))));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDistantLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kDistant_LightType);
+ const SkDistantLight* distantLight = static_cast<const SkDistantLight*>(light);
+ setUniformNormal3(pdman, fDirectionUni, distantLight->direction());
+}
+
+void GrGLDistantLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* dir;
+ fDirectionUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType,
+ "LightDirection", &dir);
+ fragBuilder->codeAppend(dir);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLPointLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kPoint_LightType);
+ const SkPointLight* pointLight = static_cast<const SkPointLight*>(light);
+ setUniformPoint3(pdman, fLocationUni, pointLight->location());
+}
+
+void GrGLPointLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* loc;
+ fLocationUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType,
+ "LightLocation", &loc);
+ fragBuilder->codeAppendf("normalize(%s - half3(sk_FragCoord.xy, %s))",
+ loc, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLSpotLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kSpot_LightType);
+ const SkSpotLight* spotLight = static_cast<const SkSpotLight *>(light);
+ setUniformPoint3(pdman, fLocationUni, spotLight->location());
+ pdman.set1f(fExponentUni, spotLight->specularExponent());
+ pdman.set1f(fCosInnerConeAngleUni, spotLight->cosInnerConeAngle());
+ pdman.set1f(fCosOuterConeAngleUni, spotLight->cosOuterConeAngle());
+ pdman.set1f(fConeScaleUni, spotLight->coneScale());
+ setUniformNormal3(pdman, fSUni, spotLight->s());
+}
+
+void GrGLSpotLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* location;
+ fLocationUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType,
+ "LightLocation", &location);
+
+ fragBuilder->codeAppendf("normalize(%s - half3(sk_FragCoord.xy, %s))",
+ location, z);
+}
+
+void GrGLSpotLight::emitLightColor(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char *surfaceToLight) {
+
+ const char* color = uniformHandler->getUniformCStr(this->lightColorUni()); // created by parent class.
+
+ const char* exponent;
+ const char* cosInner;
+ const char* cosOuter;
+ const char* coneScale;
+ const char* s;
+ fExponentUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "Exponent", &exponent);
+ fCosInnerConeAngleUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "CosInnerConeAngle", &cosInner);
+ fCosOuterConeAngleUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "CosOuterConeAngle", &cosOuter);
+ fConeScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "ConeScale", &coneScale);
+ fSUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType, "S", &s);
+
+ const GrShaderVar gLightColorArgs[] = {
+ GrShaderVar("surfaceToLight", kHalf3_GrSLType)
+ };
+ SkString lightColorBody;
+ lightColorBody.appendf("\thalf cosAngle = -dot(surfaceToLight, %s);\n", s);
+ lightColorBody.appendf("\tif (cosAngle < %s) {\n", cosOuter);
+ lightColorBody.appendf("\t\treturn half3(0);\n");
+ lightColorBody.appendf("\t}\n");
+ lightColorBody.appendf("\thalf scale = pow(cosAngle, %s);\n", exponent);
+ lightColorBody.appendf("\tif (cosAngle < %s) {\n", cosInner);
+ lightColorBody.appendf("\t\treturn %s * scale * (cosAngle - %s) * %s;\n",
+ color, cosOuter, coneScale);
+ lightColorBody.appendf("\t}\n");
+ lightColorBody.appendf("\treturn %s;\n", color);
+ fragBuilder->emitFunction(kHalf3_GrSLType,
+ "lightColor",
+ SK_ARRAY_COUNT(gLightColorArgs),
+ gLightColorArgs,
+ lightColorBody.c_str(),
+ &fLightColorFunc);
+
+ fragBuilder->codeAppendf("%s(%s)", fLightColorFunc.c_str(), surfaceToLight);
+}
+
+#endif
+
+void SkLightingImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkDiffuseLightingImageFilter);
+ SK_REGISTER_FLATTENABLE(SkSpecularLightingImageFilter);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp
new file mode 100644
index 0000000000..8e360932ab
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkMagnifierImageFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+////////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/effects/generated/GrMagnifierEffect.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#endif
+
+namespace {
+
+class SkMagnifierImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkMagnifierImageFilterImpl(const SkRect& srcRect, SkScalar inset, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSrcRect(srcRect)
+ , fInset(inset) {
+ SkASSERT(srcRect.left() >= 0 && srcRect.top() >= 0 && inset >= 0);
+ }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void SkMagnifierImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkMagnifierImageFilterImpl)
+
+ SkRect fSrcRect;
+ SkScalar fInset;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkMagnifierImageFilter::Make(const SkRect& srcRect, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (!SkScalarIsFinite(inset) || !SkIsValidRect(srcRect)) {
+ return nullptr;
+ }
+ if (inset < 0) {
+ return nullptr;
+ }
+ // Negative numbers in src rect are not supported
+ if (srcRect.fLeft < 0 || srcRect.fTop < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMagnifierImageFilterImpl(srcRect, inset, std::move(input),
+ cropRect));
+}
+
+void SkMagnifierImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMagnifierImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMagnifierImageFilter", SkMagnifierImageFilterImpl::CreateProc);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkMagnifierImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src;
+ buffer.readRect(&src);
+ return SkMagnifierImageFilter::Make(src, buffer.readScalar(), common.getInput(0),
+ &common.cropRect());
+}
+
+void SkMagnifierImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeScalar(fInset);
+}
+
+sk_sp<SkSpecialImage> SkMagnifierImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkScalar invInset = fInset > 0 ? SkScalarInvert(fInset) : SK_Scalar1;
+
+ SkScalar invXZoom = fSrcRect.width() / bounds.width();
+ SkScalar invYZoom = fSrcRect.height() / bounds.height();
+
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> inputProxy(input->asTextureProxyRef(context));
+ SkASSERT(inputProxy);
+
+ const auto isProtected = inputProxy->isProtected();
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+ // Map bounds and srcRect into the proxy space. Due to the zoom effect,
+ // it's not just an offset for fSrcRect.
+ bounds.offset(input->subset().x(), input->subset().y());
+ SkRect srcRect = fSrcRect.makeOffset((1.f - invXZoom) * input->subset().x(),
+ (1.f - invYZoom) * input->subset().y());
+
+ auto fp = GrMagnifierEffect::Make(std::move(inputProxy),
+ bounds,
+ srcRect,
+ invXZoom,
+ invYZoom,
+ bounds.width() * invInset,
+ bounds.height() * invInset);
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), input->getColorSpace(),
+ input->alphaType(), ctx.colorSpace());
+ if (!fp) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(fp), bounds, ctx.colorType(), ctx.colorSpace(),
+ isProtected ? GrProtected::kYes : GrProtected::kNo);
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if ((inputBM.colorType() != kN32_SkColorType) ||
+ (fSrcRect.width() >= inputBM.width()) || (fSrcRect.height() >= inputBM.height())) {
+ return nullptr;
+ }
+
+ SkASSERT(inputBM.getPixels());
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ for (int y = 0; y < dstHeight; ++y) {
+ for (int x = 0; x < dstWidth; ++x) {
+ SkScalar x_dist = SkMin32(x, dstWidth - x - 1) * invInset;
+ SkScalar y_dist = SkMin32(y, dstHeight - y - 1) * invInset;
+ SkScalar weight = 0;
+
+ static const SkScalar kScalar2 = SkScalar(2);
+
+ // To create a smooth curve at the corners, we need to work on
+ // a square twice the size of the inset.
+ if (x_dist < kScalar2 && y_dist < kScalar2) {
+ x_dist = kScalar2 - x_dist;
+ y_dist = kScalar2 - y_dist;
+
+ SkScalar dist = SkScalarSqrt(SkScalarSquare(x_dist) +
+ SkScalarSquare(y_dist));
+ dist = SkMaxScalar(kScalar2 - dist, 0);
+ weight = SkMinScalar(SkScalarSquare(dist), SK_Scalar1);
+ } else {
+ SkScalar sqDist = SkMinScalar(SkScalarSquare(x_dist),
+ SkScalarSquare(y_dist));
+ weight = SkMinScalar(sqDist, SK_Scalar1);
+ }
+
+ SkScalar x_interp = weight * (fSrcRect.x() + x * invXZoom) + (1 - weight) * x;
+ SkScalar y_interp = weight * (fSrcRect.y() + y * invYZoom) + (1 - weight) * y;
+
+ int x_val = SkTPin(bounds.x() + SkScalarFloorToInt(x_interp), 0, inputBM.width() - 1);
+ int y_val = SkTPin(bounds.y() + SkScalarFloorToInt(y_interp), 0, inputBM.height() - 1);
+
+ *dptr = *inputBM.getAddr32(x_val, y_val);
+ dptr++;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp
new file mode 100644
index 0000000000..611f02703b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp
@@ -0,0 +1,574 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkMatrixConvolutionImageFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/GrMatrixConvolutionEffect.h"
+#endif
+
+namespace {
+
+class SkMatrixConvolutionImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkMatrixConvolutionImageFilterImpl(const SkISize& kernelSize, const SkScalar* kernel,
+ SkScalar gain, SkScalar bias, const SkIPoint& kernelOffset,
+ SkTileMode tileMode, bool convolveAlpha,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fKernelSize(kernelSize)
+ , fGain(gain)
+ , fBias(bias)
+ , fKernelOffset(kernelOffset)
+ , fTileMode(tileMode)
+ , fConvolveAlpha(convolveAlpha) {
+ size_t size = (size_t) sk_64_mul(fKernelSize.width(), fKernelSize.height());
+ fKernel = new SkScalar[size];
+ memcpy(fKernel, kernel, size * sizeof(SkScalar));
+ SkASSERT(kernelSize.fWidth >= 1 && kernelSize.fHeight >= 1);
+ SkASSERT(kernelOffset.fX >= 0 && kernelOffset.fX < kernelSize.fWidth);
+ SkASSERT(kernelOffset.fY >= 0 && kernelOffset.fY < kernelSize.fHeight);
+ }
+
+ ~SkMatrixConvolutionImageFilterImpl() override {
+ delete[] fKernel;
+ }
+
+protected:
+
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ bool affectsTransparentBlack() const override;
+
+private:
+ friend void SkMatrixConvolutionImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkMatrixConvolutionImageFilterImpl)
+
+ SkISize fKernelSize;
+ SkScalar* fKernel;
+ SkScalar fGain;
+ SkScalar fBias;
+ SkIPoint fKernelOffset;
+ SkTileMode fTileMode;
+ bool fConvolveAlpha;
+
+ template <class PixelFetcher, bool convolveAlpha>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ template <class PixelFetcher>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+class UncheckedPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = SkTPin(x, bounds.fLeft, bounds.fRight - 1);
+ y = SkTPin(y, bounds.fTop, bounds.fBottom - 1);
+ return *src.getAddr32(x, y);
+ }
+};
+
+class RepeatPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = (x - bounds.left()) % bounds.width() + bounds.left();
+ y = (y - bounds.top()) % bounds.height() + bounds.top();
+ if (x < bounds.left()) {
+ x += bounds.width();
+ }
+ if (y < bounds.top()) {
+ y += bounds.height();
+ }
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampToBlackPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return *src.getAddr32(x, y);
+ }
+ }
+};
+
+} // end namespace
+
+static SkTileMode to_sktilemode(SkMatrixConvolutionImageFilter::TileMode tileMode) {
+ switch(tileMode) {
+ case SkMatrixConvolutionImageFilter::kClamp_TileMode:
+ return SkTileMode::kClamp;
+ case SkMatrixConvolutionImageFilter::kRepeat_TileMode:
+ return SkTileMode::kRepeat;
+ case SkMatrixConvolutionImageFilter::kClampToBlack_TileMode:
+ // Fall through
+ default:
+ return SkTileMode::kDecal;
+ }
+}
+
+sk_sp<SkImageFilter> SkMatrixConvolutionImageFilter::Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ return Make(kernelSize, kernel, gain, bias, kernelOffset, to_sktilemode(tileMode),
+ convolveAlpha, std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkMatrixConvolutionImageFilter::Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ SkTileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ // We need to be able to read at most SK_MaxS32 bytes, so divide that
+ // by the size of a scalar to know how many scalars we can read.
+ static constexpr int32_t kMaxKernelSize = SK_MaxS32 / sizeof(SkScalar);
+
+ if (kernelSize.width() < 1 || kernelSize.height() < 1) {
+ return nullptr;
+ }
+ if (kMaxKernelSize / kernelSize.fWidth < kernelSize.fHeight) {
+ return nullptr;
+ }
+ if (!kernel) {
+ return nullptr;
+ }
+ if ((kernelOffset.fX < 0) || (kernelOffset.fX >= kernelSize.fWidth) ||
+ (kernelOffset.fY < 0) || (kernelOffset.fY >= kernelSize.fHeight)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMatrixConvolutionImageFilterImpl(
+ kernelSize, kernel, gain, bias, kernelOffset, tileMode, convolveAlpha,
+ std::move(input), cropRect));
+}
+
+void SkMatrixConvolutionImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMatrixConvolutionImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMatrixConvolutionImageFilter",
+ SkMatrixConvolutionImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkMatrixConvolutionImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+
+ SkISize kernelSize;
+ kernelSize.fWidth = buffer.readInt();
+ kernelSize.fHeight = buffer.readInt();
+ const int count = buffer.getArrayCount();
+
+ const int64_t kernelArea = sk_64_mul(kernelSize.width(), kernelSize.height());
+ if (!buffer.validate(kernelArea == count)) {
+ return nullptr;
+ }
+ if (!buffer.validateCanReadN<SkScalar>(count)) {
+ return nullptr;
+ }
+ SkAutoSTArray<16, SkScalar> kernel(count);
+ if (!buffer.readScalarArray(kernel.get(), count)) {
+ return nullptr;
+ }
+ SkScalar gain = buffer.readScalar();
+ SkScalar bias = buffer.readScalar();
+ SkIPoint kernelOffset;
+ kernelOffset.fX = buffer.readInt();
+ kernelOffset.fY = buffer.readInt();
+
+ SkTileMode tileMode;
+ if (buffer.isVersionLT(SkPicturePriv::kCleanupImageFilterEnums_Version)) {
+ tileMode = to_sktilemode(buffer.read32LE(SkMatrixConvolutionImageFilter::kLast_TileMode));
+ } else {
+ tileMode = buffer.read32LE(SkTileMode::kLastTileMode);
+ }
+ bool convolveAlpha = buffer.readBool();
+
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkMatrixConvolutionImageFilter::Make(
+ kernelSize, kernel.get(), gain, bias, kernelOffset, tileMode,
+ convolveAlpha, common.getInput(0), &common.cropRect());
+}
+
+void SkMatrixConvolutionImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fKernelSize.fWidth);
+ buffer.writeInt(fKernelSize.fHeight);
+ buffer.writeScalarArray(fKernel, fKernelSize.fWidth * fKernelSize.fHeight);
+ buffer.writeScalar(fGain);
+ buffer.writeScalar(fBias);
+ buffer.writeInt(fKernelOffset.fX);
+ buffer.writeInt(fKernelOffset.fY);
+ buffer.writeInt((int) fTileMode);
+ buffer.writeBool(fConvolveAlpha);
+}
+
+template<class PixelFetcher, bool convolveAlpha>
+void SkMatrixConvolutionImageFilterImpl::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& r,
+ const SkIRect& bounds) const {
+ SkIRect rect(r);
+ if (!rect.intersect(bounds)) {
+ return;
+ }
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ SkPMColor* dptr = result->getAddr32(rect.fLeft - offset.fX, y - offset.fY);
+ for (int x = rect.fLeft; x < rect.fRight; ++x) {
+ SkScalar sumA = 0, sumR = 0, sumG = 0, sumB = 0;
+ for (int cy = 0; cy < fKernelSize.fHeight; cy++) {
+ for (int cx = 0; cx < fKernelSize.fWidth; cx++) {
+ SkPMColor s = PixelFetcher::fetch(src,
+ x + cx - fKernelOffset.fX,
+ y + cy - fKernelOffset.fY,
+ bounds);
+ SkScalar k = fKernel[cy * fKernelSize.fWidth + cx];
+ if (convolveAlpha) {
+ sumA += SkGetPackedA32(s) * k;
+ }
+ sumR += SkGetPackedR32(s) * k;
+ sumG += SkGetPackedG32(s) * k;
+ sumB += SkGetPackedB32(s) * k;
+ }
+ }
+ int a = convolveAlpha
+ ? SkClampMax(SkScalarFloorToInt(sumA * fGain + fBias), 255)
+ : 255;
+ int r = SkClampMax(SkScalarFloorToInt(sumR * fGain + fBias), a);
+ int g = SkClampMax(SkScalarFloorToInt(sumG * fGain + fBias), a);
+ int b = SkClampMax(SkScalarFloorToInt(sumB * fGain + fBias), a);
+ if (!convolveAlpha) {
+ a = SkGetPackedA32(PixelFetcher::fetch(src, x, y, bounds));
+ *dptr++ = SkPreMultiplyARGB(a, r, g, b);
+ } else {
+ *dptr++ = SkPackARGB32(a, r, g, b);
+ }
+ }
+ }
+}
+
+template<class PixelFetcher>
+void SkMatrixConvolutionImageFilterImpl::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ if (fConvolveAlpha) {
+ filterPixels<PixelFetcher, true>(src, result, offset, rect, bounds);
+ } else {
+ filterPixels<PixelFetcher, false>(src, result, offset, rect, bounds);
+ }
+}
+
+void SkMatrixConvolutionImageFilterImpl::filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ switch (fTileMode) {
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Implement mirror tiling, treat as repeat for now.
+ case SkTileMode::kRepeat:
+ // In repeat mode, we still need to wrap the samples around the src
+ filterPixels<RepeatPixelFetcher>(src, result, offset, rect, bounds);
+ break;
+ case SkTileMode::kClamp:
+ // Fall through
+ case SkTileMode::kDecal:
+ filterPixels<UncheckedPixelFetcher>(src, result, offset, rect, bounds);
+ break;
+ }
+}
+
+void SkMatrixConvolutionImageFilterImpl::filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& srcBounds) const {
+ switch (fTileMode) {
+ case SkTileMode::kClamp:
+ filterPixels<ClampPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Implement mirror tiling, treat as repeat for now.
+ case SkTileMode::kRepeat:
+ filterPixels<RepeatPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ case SkTileMode::kDecal:
+ filterPixels<ClampToBlackPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ }
+}
+
+// FIXME: This should be refactored to SkImageFilterUtils for
+// use by other filters. For now, we assume the input is always
+// premultiplied and unpremultiply it
+static SkBitmap unpremultiply_bitmap(const SkBitmap& src) {
+ if (!src.getPixels()) {
+ return SkBitmap();
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32(src.width(), src.height(), src.alphaType());
+ SkBitmap result;
+ if (!result.tryAllocPixels(info)) {
+ return SkBitmap();
+ }
+ for (int y = 0; y < src.height(); ++y) {
+ const uint32_t* srcRow = src.getAddr32(0, y);
+ uint32_t* dstRow = result.getAddr32(0, y);
+ for (int x = 0; x < src.width(); ++x) {
+ dstRow[x] = SkUnPreMultiply::PMColorToColor(srcRow[x]);
+ }
+ }
+ return result;
+}
+
+#if SK_SUPPORT_GPU
+
+static GrTextureDomain::Mode convert_tilemodes(SkTileMode tileMode) {
+ switch (tileMode) {
+ case SkTileMode::kClamp:
+ return GrTextureDomain::kClamp_Mode;
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Implement mirror tiling, treat as repeat for now.
+ case SkTileMode::kRepeat:
+ return GrTextureDomain::kRepeat_Mode;
+ case SkTileMode::kDecal:
+ return GrTextureDomain::kDecal_Mode;
+ default:
+ SkASSERT(false);
+ }
+ return GrTextureDomain::kIgnore_Mode;
+}
+#endif
+
+sk_sp<SkSpecialImage> SkMatrixConvolutionImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect dstBounds;
+ input = this->applyCropRectAndPad(this->mapContext(ctx), input.get(), &inputOffset, &dstBounds);
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect originalSrcBounds = SkIRect::MakeXYWH(inputOffset.fX, inputOffset.fY,
+ input->width(), input->height());
+
+ SkIRect srcBounds = this->onFilterNodeBounds(dstBounds, ctx.ctm(), kReverse_MapDirection,
+ &originalSrcBounds);
+
+ if (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode) {
+ srcBounds = DetermineRepeatedSrcBound(srcBounds, fKernelOffset,
+ fKernelSize, originalSrcBounds);
+ } else {
+ if (!srcBounds.intersect(dstBounds)) {
+ return nullptr;
+ }
+ }
+
+#if SK_SUPPORT_GPU
+ // Note: if the kernel is too big, the GPU path falls back to SW
+ if (ctx.gpuBacked() &&
+ fKernelSize.width() * fKernelSize.height() <= MAX_KERNEL_SIZE) {
+ auto context = ctx.getContext();
+
+ // Ensure the input is in the destination color space. Typically applyCropRect will have
+ // called pad_image to account for our dilation of bounds, so the result will already be
+ // moved to the destination color space. If a filter DAG avoids that, then we use this
+ // fall-back, which saves us from having to do the xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace());
+
+ sk_sp<GrTextureProxy> inputProxy(input->asTextureProxyRef(context));
+ SkASSERT(inputProxy);
+
+ const auto isProtected = inputProxy->isProtected();
+
+ offset->fX = dstBounds.left();
+ offset->fY = dstBounds.top();
+ dstBounds.offset(-inputOffset);
+ srcBounds.offset(-inputOffset);
+ // Map srcBounds from input's logical image domain to that of the proxy
+ srcBounds.offset(input->subset().x(), input->subset().y());
+
+ auto fp = GrMatrixConvolutionEffect::Make(std::move(inputProxy),
+ srcBounds,
+ fKernelSize,
+ fKernel,
+ fGain,
+ fBias,
+ fKernelOffset,
+ convert_tilemodes(fTileMode),
+ fConvolveAlpha);
+ if (!fp) {
+ return nullptr;
+ }
+
+ // FIXME (michaelludwig) - Clean this up as part of the imagefilter refactor, some filters
+ // instead require a coord transform on the FP. At very least, be consistent, at best make
+ // it so that filter impls don't need to worry about the subset origin.
+
+ // Must also map the dstBounds since it is used as the src rect in DrawWithFP when
+ // evaluating the FP, and the dst rect just uses the size of dstBounds.
+ dstBounds.offset(input->subset().x(), input->subset().y());
+ return DrawWithFP(context, std::move(fp), dstBounds, ctx.colorType(), ctx.colorSpace(),
+ isProtected ? GrProtected::kYes : GrProtected::kNo);
+ }
+#endif
+
+ SkBitmap inputBM;
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!fConvolveAlpha && !inputBM.isOpaque()) {
+ inputBM = unpremultiply_bitmap(inputBM);
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32(dstBounds.width(), dstBounds.height(),
+ inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ dstBounds.offset(-inputOffset);
+ srcBounds.offset(-inputOffset);
+
+ SkIRect interior;
+ if (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode) {
+ // In repeat mode, the filterPixels calls will wrap around
+ // so we just need to render 'dstBounds'
+ interior = dstBounds;
+ } else {
+ interior = SkIRect::MakeXYWH(dstBounds.left() + fKernelOffset.fX,
+ dstBounds.top() + fKernelOffset.fY,
+ dstBounds.width() - fKernelSize.fWidth + 1,
+ dstBounds.height() - fKernelSize.fHeight + 1);
+ }
+
+ SkIRect top = SkIRect::MakeLTRB(dstBounds.left(), dstBounds.top(),
+ dstBounds.right(), interior.top());
+ SkIRect bottom = SkIRect::MakeLTRB(dstBounds.left(), interior.bottom(),
+ dstBounds.right(), dstBounds.bottom());
+ SkIRect left = SkIRect::MakeLTRB(dstBounds.left(), interior.top(),
+ interior.left(), interior.bottom());
+ SkIRect right = SkIRect::MakeLTRB(interior.right(), interior.top(),
+ dstBounds.right(), interior.bottom());
+
+ SkIVector dstContentOffset = { offset->fX - inputOffset.fX, offset->fY - inputOffset.fY };
+
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, top, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, left, srcBounds);
+ this->filterInteriorPixels(inputBM, &dst, dstContentOffset, interior, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, right, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, bottom, srcBounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(), dstBounds.height()),
+ dst);
+}
+
+SkIRect SkMatrixConvolutionImageFilterImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir && inputRect &&
+ (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode)) {
+ SkASSERT(inputRect);
+ return DetermineRepeatedSrcBound(src, fKernelOffset, fKernelSize, *inputRect);
+ }
+
+ SkIRect dst = src;
+ int w = fKernelSize.width() - 1, h = fKernelSize.height() - 1;
+
+ if (kReverse_MapDirection == dir) {
+ dst.adjust(-fKernelOffset.fX, -fKernelOffset.fY,
+ w - fKernelOffset.fX, h - fKernelOffset.fY);
+ } else {
+ dst.adjust(fKernelOffset.fX - w, fKernelOffset.fY - h, fKernelOffset.fX, fKernelOffset.fY);
+ }
+ return dst;
+}
+
+bool SkMatrixConvolutionImageFilterImpl::affectsTransparentBlack() const {
+ // It seems that the only rational way for repeat sample mode to work is if the caller
+ // explicitly restricts the input in which case the input range is explicitly known and
+ // specified.
+ // TODO: is seems that this should be true for clamp mode too.
+
+ // For the other modes, because the kernel is applied in device-space, we have no idea what
+ // pixels it will affect in object-space.
+ return SkTileMode::kRepeat != fTileMode && SkTileMode::kMirror != fTileMode;
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp
new file mode 100644
index 0000000000..d7925d78fd
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkMergeImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkMergeImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkMergeImageFilterImpl(sk_sp<SkImageFilter>* const filters, int count,
+ const CropRect* cropRect)
+ : INHERITED(filters, count, cropRect) {
+ SkASSERT(count >= 0);
+ }
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+
+private:
+ friend void SkMergeImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkMergeImageFilterImpl)
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkMergeImageFilter::Make(sk_sp<SkImageFilter>* const filters, int count,
+ const SkImageFilter::CropRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkMergeImageFilterImpl(filters, count, cropRect));
+}
+
+void SkMergeImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMergeImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMergeImageFilter", SkMergeImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkMergeImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ Common common;
+ if (!common.unflatten(buffer, -1) || !buffer.isValid()) {
+ return nullptr;
+ }
+ return SkMergeImageFilter::Make(common.inputs(), common.inputCount(), &common.cropRect());
+}
+
+sk_sp<SkSpecialImage> SkMergeImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ int inputCount = this->countInputs();
+ if (inputCount < 1) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ bounds.setEmpty();
+
+ std::unique_ptr<sk_sp<SkSpecialImage>[]> inputs(new sk_sp<SkSpecialImage>[inputCount]);
+ std::unique_ptr<SkIPoint[]> offsets(new SkIPoint[inputCount]);
+
+ // Filter all of the inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ offsets[i] = { 0, 0 };
+ inputs[i] = this->filterInput(i, ctx, &offsets[i]);
+ if (!inputs[i]) {
+ continue;
+ }
+ const SkIRect inputBounds = SkIRect::MakeXYWH(offsets[i].fX, offsets[i].fY,
+ inputs[i]->width(), inputs[i]->height());
+ bounds.join(inputBounds);
+ }
+ if (bounds.isEmpty()) {
+ return nullptr;
+ }
+
+ // Apply the crop rect to the union of the inputs' bounds.
+ // Note that the crop rect can only reduce the bounds, since this
+ // filter does not affect transparent black.
+ bool embiggen = false;
+ this->getCropRect().applyTo(bounds, ctx.ctm(), embiggen, &bounds);
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ const int x0 = bounds.left();
+ const int y0 = bounds.top();
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ // Composite all of the filter inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i]) {
+ continue;
+ }
+
+ inputs[i]->draw(canvas,
+ SkIntToScalar(offsets[i].x() - x0), SkIntToScalar(offsets[i].y() - y0),
+ nullptr);
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return surf->makeImageSnapshot();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp
new file mode 100644
index 0000000000..3fa7eb1aec
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp
@@ -0,0 +1,853 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkMorphologyImageFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#endif
+
+namespace {
+
+enum class MorphType {
+ kErode,
+ kDilate,
+ kLastType = kDilate
+};
+
+enum class MorphDirection { kX, kY };
+
+class SkMorphologyImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkMorphologyImageFilterImpl(MorphType type, int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fType(type)
+ , fRadius(SkISize::Make(radiusX, radiusY)) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+ /**
+ * All morphology procs have the same signature: src is the source buffer, dst the
+ * destination buffer, radius is the morphology radius, width and height are the bounds
+ * of the destination buffer (in pixels), and srcStride and dstStride are the
+ * number of pixels per row in each buffer. All buffers are 8888.
+ */
+
+ typedef void (*Proc)(const SkPMColor* src, SkPMColor* dst, int radius,
+ int width, int height, int srcStride, int dstStride);
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ void flatten(SkWriteBuffer&) const override;
+
+ SkISize radius() const { return fRadius; }
+ SkSize mappedRadius(const SkMatrix& ctm) const {
+ SkVector radiusVector = SkVector::Make(SkIntToScalar(fRadius.width()),
+ SkIntToScalar(fRadius.height()));
+ ctm.mapVectors(&radiusVector, 1);
+ radiusVector.setAbs(radiusVector);
+ return SkSize::Make(radiusVector.x(), radiusVector.y());
+ }
+
+private:
+ friend void SkDilateImageFilter::RegisterFlattenables();
+
+ SK_FLATTENABLE_HOOKS(SkMorphologyImageFilterImpl)
+ // Historically the morphology op was implicitly encoded in the factory type used to decode
+ // the image filter, so provide backwards compatible functions for old SKPs.
+ static sk_sp<SkFlattenable> CreateProcWithType(SkReadBuffer&, const MorphType*);
+ static sk_sp<SkFlattenable> DilateCreateProc(SkReadBuffer& buffer) {
+ static const MorphType kType = MorphType::kDilate;
+ return CreateProcWithType(buffer, &kType);
+ }
+ static sk_sp<SkFlattenable> ErodeCreateProc(SkReadBuffer& buffer) {
+ static const MorphType kType = MorphType::kErode;
+ return CreateProcWithType(buffer, &kType);
+ }
+
+ MorphType fType;
+ SkISize fRadius;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkDilateImageFilter::Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMorphologyImageFilterImpl(
+ MorphType::kDilate, radiusX, radiusY, std::move(input), cropRect));
+}
+
+sk_sp<SkImageFilter> SkErodeImageFilter::Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMorphologyImageFilterImpl(
+ MorphType::kErode, radiusX, radiusY, std::move(input), cropRect));
+}
+
+void SkDilateImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMorphologyImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old names
+ SkFlattenable::Register("SkDilateImageFilter", SkMorphologyImageFilterImpl::DilateCreateProc);
+ SkFlattenable::Register(
+ "SkDilateImageFilterImpl", SkMorphologyImageFilterImpl::DilateCreateProc);
+ SkFlattenable::Register("SkErodeImageFilter", SkMorphologyImageFilterImpl::ErodeCreateProc);
+ SkFlattenable::Register("SkErodeImageFilterImpl", SkMorphologyImageFilterImpl::ErodeCreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// 'type' acts as a signal that old-style deserialization is required. It is temporary.
+sk_sp<SkFlattenable> SkMorphologyImageFilterImpl::CreateProcWithType(SkReadBuffer& buffer,
+ const MorphType* type) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ const int width = buffer.readInt();
+ const int height = buffer.readInt();
+
+ MorphType filterType;
+ if (type) {
+ // The old create procs that have an associated op should only be used on old SKPs
+ SkASSERT(buffer.isVersionLT(SkPicturePriv::kUnifyErodeDilateImpls_Version));
+ filterType = *type;
+ } else {
+ filterType = buffer.read32LE(MorphType::kLastType);
+ }
+
+ if (filterType == MorphType::kDilate) {
+ return SkDilateImageFilter::Make(width, height, common.getInput(0), &common.cropRect());
+ } else if (filterType == MorphType::kErode) {
+ return SkErodeImageFilter::Make(width, height, common.getInput(0), &common.cropRect());
+ } else {
+ return nullptr;
+ }
+}
+
+sk_sp<SkFlattenable> SkMorphologyImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ // Pass null to have the create proc read the op from the buffer
+ return CreateProcWithType(buffer, nullptr);
+}
+
+void SkMorphologyImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fRadius.fWidth);
+ buffer.writeInt(fRadius.fHeight);
+ buffer.writeInt(static_cast<int>(fType));
+}
+
+static void call_proc_X(SkMorphologyImageFilterImpl::Proc procX,
+ const SkBitmap& src, SkBitmap* dst,
+ int radiusX, const SkIRect& bounds) {
+ procX(src.getAddr32(bounds.left(), bounds.top()), dst->getAddr32(0, 0),
+ radiusX, bounds.width(), bounds.height(),
+ src.rowBytesAsPixels(), dst->rowBytesAsPixels());
+}
+
+static void call_proc_Y(SkMorphologyImageFilterImpl::Proc procY,
+ const SkPMColor* src, int srcRowBytesAsPixels, SkBitmap* dst,
+ int radiusY, const SkIRect& bounds) {
+ procY(src, dst->getAddr32(0, 0),
+ radiusY, bounds.height(), bounds.width(),
+ srcRowBytesAsPixels, dst->rowBytesAsPixels());
+}
+
+SkRect SkMorphologyImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(SkIntToScalar(fRadius.width()), SkIntToScalar(fRadius.height()));
+ return bounds;
+}
+
+SkIRect SkMorphologyImageFilterImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection, const SkIRect* inputRect) const {
+ SkSize radius = mappedRadius(ctm);
+ return src.makeOutset(SkScalarCeilToInt(radius.width()), SkScalarCeilToInt(radius.height()));
+}
+
+#if SK_SUPPORT_GPU
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Morphology effects. Depending upon the type of morphology, either the
+ * component-wise min (Erode_Type) or max (Dilate_Type) of all pixels in the
+ * kernel is selected as the new color. The new color is modulated by the input
+ * color.
+ */
+class GrMorphologyEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ MorphDirection dir,
+ int radius, MorphType type) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMorphologyEffect(std::move(proxy), srcColorType, dir, radius, type, nullptr));
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ MorphDirection dir,
+ int radius, MorphType type,
+ const float bounds[2]) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMorphologyEffect(std::move(proxy), srcColorType, dir, radius, type, bounds));
+ }
+
+ MorphType type() const { return fType; }
+ bool useRange() const { return fUseRange; }
+ const float* range() const { return fRange; }
+ MorphDirection direction() const { return fDirection; }
+ int radius() const { return fRadius; }
+ int width() const { return 2 * fRadius + 1; }
+
+ const char* name() const override { return "Morphology"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMorphologyEffect(*this));
+ }
+
+private:
+ GrCoordTransform fCoordTransform;
+ TextureSampler fTextureSampler;
+ MorphDirection fDirection;
+ int fRadius;
+ MorphType fType;
+ bool fUseRange;
+ float fRange[2];
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSampler; }
+
+ GrMorphologyEffect(sk_sp<GrTextureProxy>, GrColorType srcColorType, MorphDirection, int radius,
+ MorphType, const float range[2]);
+ explicit GrMorphologyEffect(const GrMorphologyEffect&);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLMorphologyEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fPixelSizeUni;
+ GrGLSLProgramDataManager::UniformHandle fRangeUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLMorphologyEffect::emitCode(EmitArgs& args) {
+ const GrMorphologyEffect& me = args.fFp.cast<GrMorphologyEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fPixelSizeUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "PixelSize");
+ const char* pixelSizeInc = uniformHandler->getUniformCStr(fPixelSizeUni);
+ fRangeUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat2_GrSLType, "Range");
+ const char* range = uniformHandler->getUniformCStr(fRangeUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ const char* func;
+ switch (me.type()) {
+ case MorphType::kErode:
+ fragBuilder->codeAppendf("\t\t%s = half4(1, 1, 1, 1);\n", args.fOutputColor);
+ func = "min";
+ break;
+ case MorphType::kDilate:
+ fragBuilder->codeAppendf("\t\t%s = half4(0, 0, 0, 0);\n", args.fOutputColor);
+ func = "max";
+ break;
+ default:
+ SK_ABORT("Unexpected type");
+ func = ""; // suppress warning
+ break;
+ }
+
+ const char* dir;
+ switch (me.direction()) {
+ case MorphDirection::kX:
+ dir = "x";
+ break;
+ case MorphDirection::kY:
+ dir = "y";
+ break;
+ default:
+ SK_ABORT("Unknown filter direction.");
+ dir = ""; // suppress warning
+ }
+
+ int width = me.width();
+
+ // float2 coord = coord2D;
+ fragBuilder->codeAppendf("\t\tfloat2 coord = %s;\n", coords2D.c_str());
+ // coord.x -= radius * pixelSize;
+ fragBuilder->codeAppendf("\t\tcoord.%s -= %d.0 * %s; \n", dir, me.radius(), pixelSizeInc);
+ if (me.useRange()) {
+ // highBound = min(highBound, coord.x + (width-1) * pixelSize);
+ fragBuilder->codeAppendf("\t\tfloat highBound = min(%s.y, coord.%s + %f * %s);",
+ range, dir, float(width - 1), pixelSizeInc);
+ // coord.x = max(lowBound, coord.x);
+ fragBuilder->codeAppendf("\t\tcoord.%s = max(%s.x, coord.%s);", dir, range, dir);
+ }
+ fragBuilder->codeAppendf("\t\tfor (int i = 0; i < %d; i++) {\n", width);
+ fragBuilder->codeAppendf("\t\t\t%s = %s(%s, ", args.fOutputColor, func, args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "coord");
+ fragBuilder->codeAppend(");\n");
+ // coord.x += pixelSize;
+ fragBuilder->codeAppendf("\t\t\tcoord.%s += %s;\n", dir, pixelSizeInc);
+ if (me.useRange()) {
+ // coord.x = min(highBound, coord.x);
+ fragBuilder->codeAppendf("\t\t\tcoord.%s = min(highBound, coord.%s);", dir, dir);
+ }
+ fragBuilder->codeAppend("\t\t}\n");
+ fragBuilder->codeAppendf("%s *= %s;\n", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLMorphologyEffect::GenKey(const GrProcessor& proc,
+ const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
+ uint32_t key = static_cast<uint32_t>(m.radius());
+ key |= (static_cast<uint32_t>(m.type()) << 8);
+ key |= (static_cast<uint32_t>(m.direction()) << 9);
+ if (m.useRange()) {
+ key |= 1 << 10;
+ }
+ b->add32(key);
+}
+
+void GrGLMorphologyEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
+ GrSurfaceProxy* proxy = m.textureSampler(0).proxy();
+ GrTexture& texture = *proxy->peekTexture();
+
+ float pixelSize = 0.0f;
+ switch (m.direction()) {
+ case MorphDirection::kX:
+ pixelSize = 1.0f / texture.width();
+ break;
+ case MorphDirection::kY:
+ pixelSize = 1.0f / texture.height();
+ break;
+ default:
+ SK_ABORT("Unknown filter direction.");
+ }
+ pdman.set1f(fPixelSizeUni, pixelSize);
+
+ if (m.useRange()) {
+ const float* range = m.range();
+ if (MorphDirection::kY == m.direction() &&
+ proxy->origin() == kBottomLeft_GrSurfaceOrigin) {
+ pdman.set2f(fRangeUni, 1.0f - (range[1]*pixelSize), 1.0f - (range[0]*pixelSize));
+ } else {
+ pdman.set2f(fRangeUni, range[0] * pixelSize, range[1] * pixelSize);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrMorphologyEffect::GrMorphologyEffect(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ MorphDirection direction,
+ int radius,
+ MorphType type,
+ const float range[2])
+ : INHERITED(kGrMorphologyEffect_ClassID,
+ ModulateForClampedSamplerOptFlags(srcColorType))
+ , fCoordTransform(proxy.get())
+ , fTextureSampler(std::move(proxy))
+ , fDirection(direction)
+ , fRadius(radius)
+ , fType(type)
+ , fUseRange(SkToBool(range)) {
+ // Make sure the sampler's ctor uses the clamp wrap mode
+ SkASSERT(fTextureSampler.samplerState().wrapModeX() == GrSamplerState::WrapMode::kClamp &&
+ fTextureSampler.samplerState().wrapModeY() == GrSamplerState::WrapMode::kClamp);
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ if (fUseRange) {
+ fRange[0] = range[0];
+ fRange[1] = range[1];
+ }
+}
+
+GrMorphologyEffect::GrMorphologyEffect(const GrMorphologyEffect& that)
+ : INHERITED(kGrMorphologyEffect_ClassID, that.optimizationFlags())
+ , fCoordTransform(that.fCoordTransform)
+ , fTextureSampler(that.fTextureSampler)
+ , fDirection(that.fDirection)
+ , fRadius(that.fRadius)
+ , fType(that.fType)
+ , fUseRange(that.fUseRange) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ if (that.fUseRange) {
+ fRange[0] = that.fRange[0];
+ fRange[1] = that.fRange[1];
+ }
+}
+
+void GrMorphologyEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLMorphologyEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrMorphologyEffect::onCreateGLSLInstance() const {
+ return new GrGLMorphologyEffect;
+}
+bool GrMorphologyEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMorphologyEffect& s = sBase.cast<GrMorphologyEffect>();
+ return (this->radius() == s.radius() &&
+ this->direction() == s.direction() &&
+ this->useRange() == s.useRange() &&
+ this->type() == s.type());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMorphologyEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrMorphologyEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+
+ MorphDirection dir = d->fRandom->nextBool() ? MorphDirection::kX : MorphDirection::kY;
+ static const int kMaxRadius = 10;
+ int radius = d->fRandom->nextRangeU(1, kMaxRadius);
+ MorphType type = d->fRandom->nextBool() ? MorphType::kErode : MorphType::kDilate;
+
+ return GrMorphologyEffect::Make(std::move(proxy), d->textureProxyColorType(texIdx), dir, radius,
+ type);
+}
+#endif
+
+static void apply_morphology_rect(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ const float bounds[2],
+ MorphDirection direction) {
+ GrPaint paint;
+ paint.addColorFragmentProcessor(GrMorphologyEffect::Make(std::move(proxy), srcColorType,
+ direction, radius, morphType,
+ bounds));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ renderTargetContext->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(dstRect), SkRect::Make(srcRect));
+}
+
+static void apply_morphology_rect_no_bounds(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ MorphDirection direction) {
+ GrPaint paint;
+ paint.addColorFragmentProcessor(GrMorphologyEffect::Make(std::move(proxy), srcColorType,
+ direction, radius, morphType));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ renderTargetContext->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(dstRect), SkRect::Make(srcRect));
+}
+
+static void apply_morphology_pass(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ sk_sp<GrTextureProxy> textureProxy,
+ GrColorType srcColorType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ MorphDirection direction) {
+ float bounds[2] = { 0.0f, 1.0f };
+ SkIRect lowerSrcRect = srcRect, lowerDstRect = dstRect;
+ SkIRect middleSrcRect = srcRect, middleDstRect = dstRect;
+ SkIRect upperSrcRect = srcRect, upperDstRect = dstRect;
+ if (direction == MorphDirection::kX) {
+ bounds[0] = SkIntToScalar(srcRect.left()) + 0.5f;
+ bounds[1] = SkIntToScalar(srcRect.right()) - 0.5f;
+ lowerSrcRect.fRight = srcRect.left() + radius;
+ lowerDstRect.fRight = dstRect.left() + radius;
+ upperSrcRect.fLeft = srcRect.right() - radius;
+ upperDstRect.fLeft = dstRect.right() - radius;
+ middleSrcRect.inset(radius, 0);
+ middleDstRect.inset(radius, 0);
+ } else {
+ bounds[0] = SkIntToScalar(srcRect.top()) + 0.5f;
+ bounds[1] = SkIntToScalar(srcRect.bottom()) - 0.5f;
+ lowerSrcRect.fBottom = srcRect.top() + radius;
+ lowerDstRect.fBottom = dstRect.top() + radius;
+ upperSrcRect.fTop = srcRect.bottom() - radius;
+ upperDstRect.fTop = dstRect.bottom() - radius;
+ middleSrcRect.inset(0, radius);
+ middleDstRect.inset(0, radius);
+ }
+ if (middleSrcRect.width() <= 0) {
+ // radius covers srcRect; use bounds over entire draw
+ apply_morphology_rect(renderTargetContext, clip, std::move(textureProxy), srcColorType,
+ srcRect, dstRect, radius, morphType, bounds, direction);
+ } else {
+ // Draw upper and lower margins with bounds; middle without.
+ apply_morphology_rect(renderTargetContext, clip, textureProxy, srcColorType,
+ lowerSrcRect, lowerDstRect, radius, morphType, bounds, direction);
+ apply_morphology_rect(renderTargetContext, clip, textureProxy, srcColorType,
+ upperSrcRect, upperDstRect, radius, morphType, bounds, direction);
+ apply_morphology_rect_no_bounds(renderTargetContext, clip, std::move(textureProxy),
+ srcColorType, middleSrcRect, middleDstRect, radius,
+ morphType, direction);
+ }
+}
+
+static sk_sp<SkSpecialImage> apply_morphology(
+ GrRecordingContext* context, SkSpecialImage* input, const SkIRect& rect,
+ MorphType morphType, SkISize radius, const SkImageFilter_Base::Context& ctx) {
+ sk_sp<GrTextureProxy> srcTexture(input->asTextureProxyRef(context));
+ GrColorType srcColorType = SkColorTypeToGrColorType(input->colorType());
+ SkASSERT(srcTexture);
+ sk_sp<SkColorSpace> colorSpace = ctx.refColorSpace();
+ GrColorType colorType = ctx.grColorType();
+
+ // setup new clip
+ const GrFixedClip clip(SkIRect::MakeWH(srcTexture->width(), srcTexture->height()));
+
+ const SkIRect dstRect = SkIRect::MakeWH(rect.width(), rect.height());
+ SkIRect srcRect = rect;
+ // Map into proxy space
+ srcRect.offset(input->subset().x(), input->subset().y());
+ SkASSERT(radius.width() > 0 || radius.height() > 0);
+
+ if (radius.fWidth > 0) {
+ auto dstRTContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox,
+ rect.width(),
+ rect.height(),
+ colorType,
+ colorSpace,
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ srcTexture->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!dstRTContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstRTContext.get(), clip, std::move(srcTexture), srcColorType,
+ srcRect, dstRect, radius.fWidth, morphType, MorphDirection::kX);
+ SkIRect clearRect = SkIRect::MakeXYWH(dstRect.fLeft, dstRect.fBottom,
+ dstRect.width(), radius.fHeight);
+ SkPMColor4f clearColor = MorphType::kErode == morphType
+ ? SK_PMColor4fWHITE : SK_PMColor4fTRANSPARENT;
+ dstRTContext->clear(&clearRect, clearColor, GrRenderTargetContext::CanClearFullscreen::kNo);
+
+ srcTexture = dstRTContext->asTextureProxyRef();
+ srcColorType = colorType;
+ srcRect = dstRect;
+ }
+ if (radius.fHeight > 0) {
+ auto dstRTContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox,
+ rect.width(),
+ rect.height(),
+ colorType,
+ colorSpace,
+ 1,
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ srcTexture->isProtected() ? GrProtected::kYes : GrProtected::kNo);
+ if (!dstRTContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstRTContext.get(), clip, std::move(srcTexture), srcColorType,
+ srcRect, dstRect, radius.fHeight, morphType, MorphDirection::kY);
+
+ srcTexture = dstRTContext->asTextureProxyRef();
+ }
+
+ return SkSpecialImage::MakeDeferredFromGpu(context,
+ SkIRect::MakeWH(rect.width(), rect.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(srcTexture), colorType,
+ std::move(colorSpace), &input->props());
+}
+#endif
+
+namespace {
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ __m128i extreme = (type == MorphType::kDilate) ? _mm_setzero_si128()
+ : _mm_set1_epi32(0xFFFFFFFF);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ __m128i src_pixel = _mm_cvtsi32_si128(*p);
+ extreme = (type == MorphType::kDilate) ? _mm_max_epu8(src_pixel, extreme)
+ : _mm_min_epu8(src_pixel, extreme);
+ }
+ *dptr = _mm_cvtsi128_si32(extreme);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ uint8x8_t extreme = vdup_n_u8(type == MorphType::kDilate ? 0 : 255);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ uint8x8_t src_pixel = vreinterpret_u8_u32(vdup_n_u32(*p));
+ extreme = (type == MorphType::kDilate) ? vmax_u8(src_pixel, extreme)
+ : vmin_u8(src_pixel, extreme);
+ }
+ *dptr = vget_lane_u32(vreinterpret_u32_u8(extreme), 0);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) src += srcStrideX;
+ if (x + radius < width - 1) upperSrc += srcStrideX;
+ dst += dstStrideX;
+ }
+ }
+
+#else
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ // If we're maxing (dilate), start from 0; if minning (erode), start from 255.
+ const int start = (type == MorphType::kDilate) ? 0 : 255;
+ int B = start, G = start, R = start, A = start;
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ int b = SkGetPackedB32(*p),
+ g = SkGetPackedG32(*p),
+ r = SkGetPackedR32(*p),
+ a = SkGetPackedA32(*p);
+ if (type == MorphType::kDilate) {
+ B = SkTMax(b, B);
+ G = SkTMax(g, G);
+ R = SkTMax(r, R);
+ A = SkTMax(a, A);
+ } else {
+ B = SkTMin(b, B);
+ G = SkTMin(g, G);
+ R = SkTMin(r, R);
+ A = SkTMin(a, A);
+ }
+ }
+ *dptr = SkPackARGB32(A, R, G, B);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+ }
+#endif
+} // namespace
+
+sk_sp<SkSpecialImage> SkMorphologyImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ input = this->applyCropRectAndPad(this->mapContext(ctx), input.get(), &inputOffset, &bounds);
+ if (!input) {
+ return nullptr;
+ }
+
+ SkSize radius = mappedRadius(ctx.ctm());
+ int width = SkScalarFloorToInt(radius.width());
+ int height = SkScalarFloorToInt(radius.height());
+
+ if (width < 0 || height < 0) {
+ return nullptr;
+ }
+
+ SkIRect srcBounds = bounds;
+ srcBounds.offset(-inputOffset);
+
+ if (0 == width && 0 == height) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return input->makeSubset(srcBounds);
+ }
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ // Ensure the input is in the destination color space. Typically applyCropRect will have
+ // called pad_image to account for our dilation of bounds, so the result will already be
+ // moved to the destination color space. If a filter DAG avoids that, then we use this
+ // fall-back, which saves us from having to do the xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace());
+
+ sk_sp<SkSpecialImage> result(apply_morphology(context, input.get(), srcBounds, fType,
+ SkISize::Make(width, height), ctx));
+ if (result) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ }
+ return result;
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::Make(bounds.size(), inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkMorphologyImageFilterImpl::Proc procX, procY;
+
+ if (MorphType::kDilate == fType) {
+ procX = &morph<MorphType::kDilate, MorphDirection::kX>;
+ procY = &morph<MorphType::kDilate, MorphDirection::kY>;
+ } else {
+ procX = &morph<MorphType::kErode, MorphDirection::kX>;
+ procY = &morph<MorphType::kErode, MorphDirection::kY>;
+ }
+
+ if (width > 0 && height > 0) {
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ call_proc_X(procX, inputBM, &tmp, width, srcBounds);
+ SkIRect tmpBounds = SkIRect::MakeWH(srcBounds.width(), srcBounds.height());
+ call_proc_Y(procY,
+ tmp.getAddr32(tmpBounds.left(), tmpBounds.top()), tmp.rowBytesAsPixels(),
+ &dst, height, tmpBounds);
+ } else if (width > 0) {
+ call_proc_X(procX, inputBM, &dst, width, srcBounds);
+ } else if (height > 0) {
+ call_proc_Y(procY,
+ inputBM.getAddr32(srcBounds.left(), srcBounds.top()),
+ inputBM.rowBytesAsPixels(),
+ &dst, height, srcBounds);
+ }
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkOffsetImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkOffsetImageFilter.cpp
new file mode 100644
index 0000000000..8a7de697f3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkOffsetImageFilter.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkOffsetImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkOffsetImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkOffsetImageFilterImpl(SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect) {
+ fOffset.set(dx, dy);
+ }
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void SkOffsetImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkOffsetImageFilterImpl)
+
+ SkVector fOffset;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkOffsetImageFilter::Make(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ if (!SkScalarIsFinite(dx) || !SkScalarIsFinite(dy)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkOffsetImageFilterImpl(dx, dy, std::move(input), cropRect));
+}
+
+void SkOffsetImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkOffsetImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkOffsetImageFilter", SkOffsetImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkOffsetImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkPoint offset;
+ buffer.readPoint(&offset);
+ return SkOffsetImageFilter::Make(offset.x(), offset.y(), common.getInput(0),
+ &common.cropRect());
+}
+
+void SkOffsetImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fOffset);
+}
+
+static SkIPoint map_offset_vector(const SkMatrix& ctm, const SkVector& offset) {
+ SkVector vec = ctm.mapVector(offset.fX, offset.fY);
+ return SkIPoint::Make(SkScalarRoundToInt(vec.fX), SkScalarRoundToInt(vec.fY));
+}
+
+sk_sp<SkSpecialImage> SkOffsetImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint srcOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &srcOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIPoint vec = map_offset_vector(ctx.ctm(), fOffset);
+
+ if (!this->cropRectIsSet()) {
+ offset->fX = Sk32_sat_add(srcOffset.fX, vec.fX);
+ offset->fY = Sk32_sat_add(srcOffset.fY, vec.fY);
+ return input;
+ } else {
+ SkIRect bounds;
+ const SkIRect srcBounds = SkIRect::MakeXYWH(srcOffset.fX, srcOffset.fY,
+ input->width(), input->height());
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ // TODO: it seems like this clear shouldn't be necessary (see skbug.com/5075)
+ canvas->clear(0x0);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ canvas->translate(SkIntToScalar(srcOffset.fX - bounds.fLeft),
+ SkIntToScalar(srcOffset.fY - bounds.fTop));
+
+ input->draw(canvas, vec.fX, vec.fY, &paint);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+ }
+}
+
+SkRect SkOffsetImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.offset(fOffset.fX, fOffset.fY);
+ return bounds;
+}
+
+SkIRect SkOffsetImageFilterImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ SkIPoint vec = map_offset_vector(ctm, fOffset);
+ if (kReverse_MapDirection == dir) {
+ SkPointPriv::Negate(vec);
+ }
+
+ return src.makeOffset(vec);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkPaintImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkPaintImageFilter.cpp
new file mode 100644
index 0000000000..1aae4849d1
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkPaintImageFilter.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkPaintImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkPaintImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkPaintImageFilterImpl(const SkPaint& paint, const CropRect* rect)
+ : INHERITED(nullptr, 0, rect)
+ , fPaint(paint) {}
+
+ bool affectsTransparentBlack() const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void SkPaintImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkPaintImageFilterImpl)
+
+ SkPaint fPaint;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkPaintImageFilter::Make(const SkPaint& paint,
+ const SkImageFilter::CropRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkPaintImageFilterImpl(paint, cropRect));
+}
+
+void SkPaintImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPaintImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkPaintImageFilter", SkPaintImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkPaintImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 0);
+ SkPaint paint;
+ buffer.readPaint(&paint, nullptr);
+ return SkPaintImageFilter::Make(paint, &common.cropRect());
+}
+
+void SkPaintImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePaint(fPaint);
+}
+
+sk_sp<SkSpecialImage> SkPaintImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIRect bounds;
+ const SkIRect srcBounds = SkIRect::MakeWH(ctx.sourceImage()->width(),
+ ctx.sourceImage()->height());
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ SkRect rect = SkRect::MakeIWH(bounds.width(), bounds.height());
+ SkMatrix inverse;
+ if (matrix.invert(&inverse)) {
+ inverse.mapRect(&rect);
+ }
+ canvas->setMatrix(matrix);
+ if (rect.isFinite()) {
+ canvas->drawRect(rect, fPaint);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+bool SkPaintImageFilterImpl::affectsTransparentBlack() const {
+ return true;
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp
new file mode 100644
index 0000000000..63a43c7f08
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkPictureImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPicture.h"
+#include "include/effects/SkImageSource.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkPictureImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkPictureImageFilterImpl(sk_sp<SkPicture> picture, const SkRect& cropRect)
+ : INHERITED(nullptr, 0, nullptr)
+ , fPicture(std::move(picture))
+ , fCropRect(cropRect) {}
+
+protected:
+ /* Constructs an SkPictureImageFilter object from an SkReadBuffer.
+ * Note: If the SkPictureImageFilter object construction requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling this constructor.
+ * @param SkReadBuffer Serialized picture data.
+ */
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void SkPictureImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkPictureImageFilterImpl)
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fCropRect;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkPictureImageFilter::Make(sk_sp<SkPicture> picture) {
+ SkRect cropRect = picture ? picture->cullRect() : SkRect::MakeEmpty();
+ return Make(std::move(picture), cropRect);
+}
+
+sk_sp<SkImageFilter> SkPictureImageFilter::Make(sk_sp<SkPicture> picture, const SkRect& cropRect) {
+ return sk_sp<SkImageFilter>(new SkPictureImageFilterImpl(std::move(picture), cropRect));
+}
+
+void SkPictureImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPictureImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkPictureImageFilter", SkPictureImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+enum PictureResolution {
+ kDeviceSpace_PictureResolution,
+ kLocalSpace_PictureResolution
+};
+static sk_sp<SkImageFilter> make_localspace_filter(sk_sp<SkPicture> pic, const SkRect& cropRect,
+ SkFilterQuality fq) {
+ SkISize dim = { SkScalarRoundToInt(cropRect.width()), SkScalarRoundToInt(cropRect.height()) };
+ auto img = SkImage::MakeFromPicture(std::move(pic), dim, nullptr, nullptr,
+ SkImage::BitDepth::kU8, SkColorSpace::MakeSRGB());
+ return SkImageSource::Make(img, cropRect, cropRect, fq);
+}
+
+sk_sp<SkFlattenable> SkPictureImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPicture> picture;
+ SkRect cropRect;
+
+ if (buffer.readBool()) {
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ buffer.readRect(&cropRect);
+
+ if (buffer.isVersionLT(SkPicturePriv::kRemovePictureImageFilterLocalSpace)) {
+ PictureResolution pictureResolution = buffer.checkRange<PictureResolution>(
+ kDeviceSpace_PictureResolution, kLocalSpace_PictureResolution);
+ if (kLocalSpace_PictureResolution == pictureResolution) {
+ return make_localspace_filter(std::move(picture), cropRect,
+ buffer.checkFilterQuality());
+ }
+ }
+ return SkPictureImageFilter::Make(std::move(picture), cropRect);
+}
+
+void SkPictureImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ bool hasPicture = (fPicture != nullptr);
+ buffer.writeBool(hasPicture);
+ if (hasPicture) {
+ SkPicturePriv::Flatten(fPicture, buffer);
+ }
+ buffer.writeRect(fCropRect);
+}
+
+sk_sp<SkSpecialImage> SkPictureImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ if (!fPicture) {
+ return nullptr;
+ }
+
+ SkRect floatBounds;
+ ctx.ctm().mapRect(&floatBounds, fCropRect);
+ SkIRect bounds = floatBounds.roundOut();
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ SkASSERT(!bounds.isEmpty());
+
+ // Given the standard usage of the picture image filter (i.e., to render content at a fixed
+ // resolution that, most likely, differs from the screen's) disable LCD text.
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size(), &props));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkASSERT(kUnknown_SkPixelGeometry == surf->props().pixelGeometry());
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+ canvas->clear(0x0);
+
+ canvas->translate(-SkIntToScalar(bounds.fLeft), -SkIntToScalar(bounds.fTop));
+ canvas->concat(ctx.ctm());
+ canvas->drawPicture(fPicture);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp
new file mode 100644
index 0000000000..987747dc00
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkTileImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurface.h"
+#include "include/effects/SkOffsetImageFilter.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace {
+
+class SkTileImageFilterImpl final : public SkImageFilter_Base {
+public:
+ SkTileImageFilterImpl(const SkRect& srcRect, const SkRect& dstRect, sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect) {}
+
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void SkTileImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkTileImageFilterImpl)
+
+ SkRect fSrcRect;
+ SkRect fDstRect;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkTileImageFilter::Make(const SkRect& srcRect, const SkRect& dstRect,
+ sk_sp<SkImageFilter> input) {
+ if (!SkIsValidRect(srcRect) || !SkIsValidRect(dstRect)) {
+ return nullptr;
+ }
+ if (srcRect.width() == dstRect.width() && srcRect.height() == dstRect.height()) {
+ SkRect ir = dstRect;
+ if (!ir.intersect(srcRect)) {
+ return input;
+ }
+ SkImageFilter::CropRect cropRect(ir);
+ return SkOffsetImageFilter::Make(dstRect.x() - srcRect.x(),
+ dstRect.y() - srcRect.y(),
+ std::move(input),
+ &cropRect);
+ }
+ return sk_sp<SkImageFilter>(new SkTileImageFilterImpl(srcRect, dstRect, std::move(input)));
+}
+
+void SkTileImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkTileImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkTileImageFilter", SkTileImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkTileImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+ return SkTileImageFilter::Make(src, dst, common.getInput(0));
+}
+
+void SkTileImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+}
+
+sk_sp<SkSpecialImage> SkTileImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkRect dstRect;
+ ctx.ctm().mapRect(&dstRect, fDstRect);
+ if (!dstRect.intersect(SkRect::Make(ctx.clipBounds()))) {
+ return nullptr;
+ }
+
+ const SkIRect dstIRect = dstRect.roundOut();
+ if (!fSrcRect.width() || !fSrcRect.height() || !dstIRect.width() || !dstIRect.height()) {
+ return nullptr;
+ }
+
+ SkRect srcRect;
+ ctx.ctm().mapRect(&srcRect, fSrcRect);
+ SkIRect srcIRect;
+ srcRect.roundOut(&srcIRect);
+ srcIRect.offset(-inputOffset);
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+
+ if (!SkIRect::Intersects(srcIRect, inputBounds)) {
+ return nullptr;
+ }
+
+ // We create an SkImage here b.c. it needs to be a tight fit for the tiling
+ sk_sp<SkImage> subset;
+ if (inputBounds.contains(srcIRect)) {
+ subset = input->asImage(&srcIRect);
+ } else {
+ sk_sp<SkSurface> surf(input->makeTightSurface(ctx.colorType(), ctx.colorSpace(),
+ srcIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.x()), SkIntToScalar(inputOffset.y()),
+ &paint);
+
+ subset = surf->makeImageSnapshot();
+ }
+ if (!subset) {
+ return nullptr;
+ }
+ SkASSERT(subset->width() == srcIRect.width());
+ SkASSERT(subset->height() == srcIRect.height());
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(dstIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setShader(subset->makeShader(SkTileMode::kRepeat, SkTileMode::kRepeat));
+ canvas->translate(-dstRect.fLeft, -dstRect.fTop);
+ canvas->drawRect(dstRect, paint);
+ offset->fX = dstIRect.fLeft;
+ offset->fY = dstIRect.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkTileImageFilterImpl::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ SkRect rect = kReverse_MapDirection == dir ? fSrcRect : fDstRect;
+ ctm.mapRect(&rect);
+ return rect.roundOut();
+}
+
+SkIRect SkTileImageFilterImpl::onFilterBounds(const SkIRect& src, const SkMatrix&,
+ MapDirection, const SkIRect* inputRect) const {
+ // Don't recurse into inputs.
+ return src;
+}
+
+SkRect SkTileImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
+
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkXfermodeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkXfermodeImageFilter.cpp
new file mode 100644
index 0000000000..14cf52feea
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkXfermodeImageFilter.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkXfermodeImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#endif
+#include "src/core/SkClipOpPriv.h"
+
+namespace {
+
+class SkXfermodeImageFilterImpl : public SkImageFilter_Base {
+public:
+ SkXfermodeImageFilterImpl(SkBlendMode mode, sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fMode(mode) {}
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const;
+#endif
+
+ void flatten(SkWriteBuffer&) const override;
+
+ void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const;
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> makeFGFrag(
+ std::unique_ptr<GrFragmentProcessor> bgFP) const;
+#endif
+
+private:
+ friend void SkXfermodeImageFilter::RegisterFlattenables();
+ SK_FLATTENABLE_HOOKS(SkXfermodeImageFilterImpl)
+
+ SkBlendMode fMode;
+
+ typedef SkImageFilter_Base INHERITED;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkXfermodeImageFilter::Make(SkBlendMode mode,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilter> inputs[2] = { std::move(background), std::move(foreground) };
+ return sk_sp<SkImageFilter>(new SkXfermodeImageFilterImpl(mode, inputs, cropRect));
+}
+
+void SkXfermodeImageFilter::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkXfermodeImageFilterImpl);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkXfermodeImageFilter_Base", SkXfermodeImageFilterImpl::CreateProc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static unsigned unflatten_blendmode(SkReadBuffer& buffer) {
+ unsigned mode = buffer.read32();
+ (void)buffer.validate(mode <= (unsigned)SkBlendMode::kLastMode);
+ return mode;
+}
+
+sk_sp<SkFlattenable> SkXfermodeImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ unsigned mode = unflatten_blendmode(buffer);
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkXfermodeImageFilter::Make((SkBlendMode)mode, common.getInput(0),
+ common.getInput(1), &common.cropRect());
+}
+
+void SkXfermodeImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.write32((unsigned)fMode);
+}
+
+sk_sp<SkSpecialImage> SkXfermodeImageFilterImpl::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint backgroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> background(this->filterInput(0, ctx, &backgroundOffset));
+
+ SkIPoint foregroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> foreground(this->filterInput(1, ctx, &foregroundOffset));
+
+ SkIRect foregroundBounds = SkIRect::EmptyIRect();
+ if (foreground) {
+ foregroundBounds = SkIRect::MakeXYWH(foregroundOffset.x(), foregroundOffset.y(),
+ foreground->width(), foreground->height());
+ }
+
+ SkIRect srcBounds = SkIRect::EmptyIRect();
+ if (background) {
+ srcBounds = SkIRect::MakeXYWH(backgroundOffset.x(), backgroundOffset.y(),
+ background->width(), background->height());
+ }
+
+ srcBounds.join(foregroundBounds);
+ if (srcBounds.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+#if SK_SUPPORT_GPU
+ if (ctx.gpuBacked()) {
+ return this->filterImageGPU(ctx, background, backgroundOffset,
+ foreground, foregroundOffset, bounds);
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0); // can't count on background to fully clear the background
+ canvas->translate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ if (background) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ background->draw(canvas,
+ SkIntToScalar(backgroundOffset.fX), SkIntToScalar(backgroundOffset.fY),
+ &paint);
+ }
+
+ this->drawForeground(canvas, foreground.get(), foregroundBounds);
+
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkXfermodeImageFilterImpl::onFilterBounds(const SkIRect& src,
+ const SkMatrix& ctm,
+ MapDirection dir,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir) {
+ return INHERITED::onFilterBounds(src, ctm, dir, inputRect);
+ }
+
+ SkASSERT(!inputRect);
+ SkASSERT(2 == this->countInputs());
+ auto getBackground = [&]() {
+ return this->getInput(0) ? this->getInput(0)->filterBounds(src, ctm, dir, inputRect) : src;
+ };
+ auto getForeground = [&]() {
+ return this->getInput(1) ? this->getInput(1)->filterBounds(src, ctm, dir, inputRect) : src;
+ };
+ switch (fMode) {
+ case SkBlendMode::kClear:
+ return SkIRect::MakeEmpty();
+
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kDstATop:
+ return getForeground();
+
+ case SkBlendMode::kDst:
+ case SkBlendMode::kSrcATop:
+ return getBackground();
+
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn: {
+ auto result = getBackground();
+ if (!result.intersect(getForeground())) {
+ return SkIRect::MakeEmpty();
+ }
+ return result;
+ }
+
+ default: {
+ auto result = getBackground();
+ result.join(getForeground());
+ return result;
+ }
+ }
+}
+
+void SkXfermodeImageFilterImpl::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPaint paint;
+ paint.setBlendMode(fMode);
+ if (img) {
+ img->draw(canvas, SkIntToScalar(fgBounds.fLeft), SkIntToScalar(fgBounds.fTop), &paint);
+ }
+
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->clipRect(SkRect::Make(fgBounds), kDifference_SkClipOp);
+ paint.setColor(0);
+ canvas->drawPaint(paint);
+}
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+
+sk_sp<SkSpecialImage> SkXfermodeImageFilterImpl::filterImageGPU(
+ const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto context = ctx.getContext();
+
+ sk_sp<GrTextureProxy> backgroundProxy, foregroundProxy;
+
+ if (background) {
+ backgroundProxy = background->asTextureProxyRef(context);
+ }
+
+ if (foreground) {
+ foregroundProxy = foreground->asTextureProxyRef(context);
+ }
+
+ GrPaint paint;
+ std::unique_ptr<GrFragmentProcessor> bgFP;
+
+ if (backgroundProxy) {
+ SkIRect bgSubset = background->subset();
+ SkMatrix bgMatrix = SkMatrix::MakeTrans(
+ SkIntToScalar(bgSubset.left() - backgroundOffset.fX),
+ SkIntToScalar(bgSubset.top() - backgroundOffset.fY));
+ GrColorType bgColorType = SkColorTypeToGrColorType(background->colorType());
+ bgFP = GrTextureDomainEffect::Make(
+ std::move(backgroundProxy), bgColorType, bgMatrix,
+ GrTextureDomain::MakeTexelDomain(bgSubset, GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrSamplerState::Filter::kNearest);
+ bgFP = GrColorSpaceXformEffect::Make(std::move(bgFP), background->getColorSpace(),
+ background->alphaType(),
+ ctx.colorSpace());
+ } else {
+ bgFP = GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ }
+
+ if (foregroundProxy) {
+ SkIRect fgSubset = foreground->subset();
+ SkMatrix fgMatrix = SkMatrix::MakeTrans(
+ SkIntToScalar(fgSubset.left() - foregroundOffset.fX),
+ SkIntToScalar(fgSubset.top() - foregroundOffset.fY));
+ GrColorType fgColorType = SkColorTypeToGrColorType(foreground->colorType());
+ auto foregroundFP = GrTextureDomainEffect::Make(
+ std::move(foregroundProxy), fgColorType, fgMatrix,
+ GrTextureDomain::MakeTexelDomain(fgSubset, GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrSamplerState::Filter::kNearest);
+ foregroundFP = GrColorSpaceXformEffect::Make(std::move(foregroundFP),
+ foreground->getColorSpace(),
+ foreground->alphaType(),
+ ctx.colorSpace());
+ paint.addColorFragmentProcessor(std::move(foregroundFP));
+
+ std::unique_ptr<GrFragmentProcessor> xferFP = this->makeFGFrag(std::move(bgFP));
+
+ // A null 'xferFP' here means kSrc_Mode was used in which case we can just proceed
+ if (xferFP) {
+ paint.addColorFragmentProcessor(std::move(xferFP));
+ }
+ } else {
+ paint.addColorFragmentProcessor(std::move(bgFP));
+ }
+
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox, bounds.width(), bounds.height(), ctx.grColorType(),
+ ctx.refColorSpace());
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, matrix,
+ SkRect::Make(bounds));
+
+ return SkSpecialImage::MakeDeferredFromGpu(context,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().colorType(),
+ renderTargetContext->colorInfo().refColorSpace());
+}
+
+std::unique_ptr<GrFragmentProcessor> SkXfermodeImageFilterImpl::makeFGFrag(
+ std::unique_ptr<GrFragmentProcessor> bgFP) const {
+ return GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(bgFP), fMode);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
new file mode 100644
index 0000000000..da7678a725
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkFontMgr_indirect.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+
+class SkData;
+
+class SkStyleSet_Indirect : public SkFontStyleSet {
+public:
+ /** Takes ownership of the SkRemotableFontIdentitySet. */
+ SkStyleSet_Indirect(const SkFontMgr_Indirect* owner, int familyIndex,
+ SkRemotableFontIdentitySet* data)
+ : fOwner(SkRef(owner)), fFamilyIndex(familyIndex), fData(data)
+ { }
+
+ int count() override { return fData->count(); }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* style) override {
+ if (fs) {
+ *fs = fData->at(index).fFontStyle;
+ }
+ if (style) {
+ // TODO: is this useful? Current locale?
+ style->reset();
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return fOwner->createTypefaceFromFontId(fData->at(index));
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (fFamilyIndex >= 0) {
+ SkFontIdentity id = fOwner->fProxy->matchIndexStyle(fFamilyIndex, pattern);
+ return fOwner->createTypefaceFromFontId(id);
+ }
+
+ return this->matchStyleCSS3(pattern);
+ }
+private:
+ sk_sp<const SkFontMgr_Indirect> fOwner;
+ int fFamilyIndex;
+ sk_sp<SkRemotableFontIdentitySet> fData;
+};
+
+int SkFontMgr_Indirect::onCountFamilies() const {
+ return 0;
+}
+
+void SkFontMgr_Indirect::onGetFamilyName(int index, SkString* familyName) const {
+ SK_ABORT("Not implemented");
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onCreateStyleSet(int index) const {
+ SK_ABORT("Not implemented");
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onMatchFamily(const char familyName[]) const {
+ return new SkStyleSet_Indirect(this, -1, fProxy->matchName(familyName));
+}
+
+SkTypeface* SkFontMgr_Indirect::createTypefaceFromFontId(const SkFontIdentity& id) const {
+ if (id.fDataId == SkFontIdentity::kInvalidDataId) {
+ return nullptr;
+ }
+
+ SkAutoMutexExclusive ama(fDataCacheMutex);
+
+ sk_sp<SkTypeface> dataTypeface;
+ int dataTypefaceIndex = 0;
+ for (int i = 0; i < fDataCache.count(); ++i) {
+ const DataEntry& entry = fDataCache[i];
+ if (entry.fDataId == id.fDataId) {
+ if (entry.fTtcIndex == id.fTtcIndex &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ return entry.fTypeface;
+ }
+ if (dataTypeface.get() == nullptr &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ dataTypeface.reset(entry.fTypeface);
+ dataTypefaceIndex = entry.fTtcIndex;
+ }
+ }
+
+ if (entry.fTypeface->weak_expired()) {
+ fDataCache.removeShuffle(i);
+ --i;
+ }
+ }
+
+ // No exact match, but did find a data match.
+ if (dataTypeface.get() != nullptr) {
+ std::unique_ptr<SkStreamAsset> stream(dataTypeface->openStream(nullptr));
+ if (stream.get() != nullptr) {
+ return fImpl->makeFromStream(std::move(stream), dataTypefaceIndex).release();
+ }
+ }
+
+ // No data match, request data and add entry.
+ std::unique_ptr<SkStreamAsset> stream(fProxy->getData(id.fDataId));
+ if (stream.get() == nullptr) {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> typeface(fImpl->makeFromStream(std::move(stream), id.fTtcIndex));
+ if (typeface.get() == nullptr) {
+ return nullptr;
+ }
+
+ DataEntry& newEntry = fDataCache.push_back();
+ typeface->weak_ref();
+ newEntry.fDataId = id.fDataId;
+ newEntry.fTtcIndex = id.fTtcIndex;
+ newEntry.fTypeface = typeface.get(); // weak reference passed to new entry.
+
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const {
+ SkFontIdentity id = fProxy->matchNameStyle(familyName, fontStyle);
+ return this->createTypefaceFromFontId(id);
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const {
+ SkFontIdentity id = fProxy->matchNameStyleCharacter(familyName, style, bcp47,
+ bcp47Count, character);
+ return this->createTypefaceFromFontId(id);
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const {
+ SkString familyName;
+ familyMember->getFamilyName(&familyName);
+ return this->matchFamilyStyle(familyName.c_str(), fontStyle);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ return fImpl->makeFromStream(std::move(stream), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromFile(const char path[], int ttcIndex) const {
+ return fImpl->makeFromFile(path, ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return fImpl->makeFromData(std::move(data), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ sk_sp<SkTypeface> face(this->matchFamilyStyle(familyName, style));
+
+ if (nullptr == face.get()) {
+ face.reset(this->matchFamilyStyle(nullptr, style));
+ }
+
+ if (nullptr == face.get()) {
+ SkFontIdentity fontId = this->fProxy->matchIndexStyle(0, style);
+ face.reset(this->createTypefaceFromFontId(fontId));
+ }
+
+ return face;
+}
diff --git a/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
new file mode 100644
index 0000000000..9b89a53c2d
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/SkOnce.h"
+
+SkRemotableFontIdentitySet::SkRemotableFontIdentitySet(int count, SkFontIdentity** data)
+ : fCount(count), fData(count)
+{
+ SkASSERT(data);
+ *data = fData;
+}
+
+SkRemotableFontIdentitySet* SkRemotableFontIdentitySet::NewEmpty() {
+ static SkOnce once;
+ static SkRemotableFontIdentitySet* empty;
+ once([]{ empty = new SkRemotableFontIdentitySet; });
+ return SkRef(empty);
+}
diff --git a/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.cpp b/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.cpp
new file mode 100644
index 0000000000..34b9c07717
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#define GL_GLEXT_PROTOTYPES
+#define EGL_EGLEXT_PROTOTYPES
+
+
+#include "src/gpu/GrAHardwareBufferImageGenerator.h"
+
+#include <android/hardware_buffer.h>
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkExchange.h"
+#include "src/core/SkMessageBus.h"
+#include "src/gpu/GrAHardwareBufferUtils.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrResourceProviderPriv.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/gl/GrGLDefines.h"
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkExtensions.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#endif
+
+#define PROT_CONTENT_EXT_STR "EGL_EXT_protected_content"
+#define EGL_PROTECTED_CONTENT_EXT 0x32C0
+
+std::unique_ptr<SkImageGenerator> GrAHardwareBufferImageGenerator::Make(
+ AHardwareBuffer* graphicBuffer, SkAlphaType alphaType, sk_sp<SkColorSpace> colorSpace,
+ GrSurfaceOrigin surfaceOrigin) {
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(graphicBuffer, &bufferDesc);
+
+ SkColorType colorType =
+ GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(bufferDesc.format);
+ SkImageInfo info = SkImageInfo::Make(bufferDesc.width, bufferDesc.height, colorType,
+ alphaType, std::move(colorSpace));
+
+ bool createProtectedImage = 0 != (bufferDesc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT);
+ return std::unique_ptr<SkImageGenerator>(new GrAHardwareBufferImageGenerator(
+ info, graphicBuffer, alphaType, createProtectedImage,
+ bufferDesc.format, surfaceOrigin));
+}
+
+GrAHardwareBufferImageGenerator::GrAHardwareBufferImageGenerator(const SkImageInfo& info,
+ AHardwareBuffer* hardwareBuffer, SkAlphaType alphaType, bool isProtectedContent,
+ uint32_t bufferFormat, GrSurfaceOrigin surfaceOrigin)
+ : INHERITED(info)
+ , fHardwareBuffer(hardwareBuffer)
+ , fBufferFormat(bufferFormat)
+ , fIsProtectedContent(isProtectedContent)
+ , fSurfaceOrigin(surfaceOrigin) {
+ AHardwareBuffer_acquire(fHardwareBuffer);
+}
+
+GrAHardwareBufferImageGenerator::~GrAHardwareBufferImageGenerator() {
+ AHardwareBuffer_release(fHardwareBuffer);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrTextureProxy> GrAHardwareBufferImageGenerator::makeProxy(GrRecordingContext* context) {
+ if (context->priv().abandoned()) {
+ return nullptr;
+ }
+
+ auto direct = context->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ GrBackendFormat backendFormat = GrAHardwareBufferUtils::GetBackendFormat(direct,
+ fHardwareBuffer,
+ fBufferFormat,
+ false);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(this->getInfo().colorType());
+ GrPixelConfig pixelConfig = context->priv().caps()->getConfigFromBackendFormat(backendFormat,
+ grColorType);
+
+ if (pixelConfig == kUnknown_GrPixelConfig) {
+ return nullptr;
+ }
+
+ int width = this->getInfo().width();
+ int height = this->getInfo().height();
+
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = pixelConfig;
+
+ GrTextureType textureType = GrTextureType::k2D;
+ if (context->backend() == GrBackendApi::kOpenGL) {
+ textureType = GrTextureType::kExternal;
+ } else if (context->backend() == GrBackendApi::kVulkan) {
+ VkFormat format;
+ SkAssertResult(backendFormat.asVkFormat(&format));
+ if (format == VK_FORMAT_UNDEFINED) {
+ textureType = GrTextureType::kExternal;
+ }
+ }
+
+ auto proxyProvider = context->priv().proxyProvider();
+
+ AHardwareBuffer* hardwareBuffer = fHardwareBuffer;
+ AHardwareBuffer_acquire(hardwareBuffer);
+
+ const bool isProtectedContent = fIsProtectedContent;
+
+ class AutoAHBRelease {
+ public:
+ AutoAHBRelease(AHardwareBuffer* ahb) : fAhb(ahb) {}
+ // std::function() must be CopyConstructible, but ours should never actually be copied.
+ AutoAHBRelease(const AutoAHBRelease&) { SkASSERT(0); }
+ AutoAHBRelease(AutoAHBRelease&& that) : fAhb(that.fAhb) { that.fAhb = nullptr; }
+ ~AutoAHBRelease() { fAhb ? AHardwareBuffer_release(fAhb) : void(); }
+
+ AutoAHBRelease& operator=(AutoAHBRelease&& that) {
+ fAhb = skstd::exchange(that.fAhb, nullptr);
+ return *this;
+ }
+ AutoAHBRelease& operator=(const AutoAHBRelease&) = delete;
+
+ AHardwareBuffer* get() const { return fAhb; }
+
+ private:
+ AHardwareBuffer* fAhb;
+ };
+
+ sk_sp<GrTextureProxy> texProxy = proxyProvider->createLazyProxy(
+ [direct, buffer = AutoAHBRelease(hardwareBuffer), width, height, isProtectedContent,
+ backendFormat, grColorType](
+ GrResourceProvider* resourceProvider) -> GrSurfaceProxy::LazyCallbackResult {
+ GrAHardwareBufferUtils::DeleteImageProc deleteImageProc = nullptr;
+ GrAHardwareBufferUtils::UpdateImageProc updateImageProc = nullptr;
+ GrAHardwareBufferUtils::TexImageCtx texImageCtx = nullptr;
+
+ GrBackendTexture backendTex =
+ GrAHardwareBufferUtils::MakeBackendTexture(direct, buffer.get(),
+ width, height,
+ &deleteImageProc,
+ &updateImageProc,
+ &texImageCtx,
+ isProtectedContent,
+ backendFormat,
+ false);
+ if (!backendTex.isValid()) {
+ return {};
+ }
+ SkASSERT(deleteImageProc && texImageCtx);
+
+ // We make this texture cacheable to avoid recreating a GrTexture every time this
+ // is invoked. We know the owning SkIamge will send an invalidation message when the
+ // image is destroyed, so the texture will be removed at that time.
+ sk_sp<GrTexture> tex = resourceProvider->wrapBackendTexture(
+ backendTex, grColorType, kBorrow_GrWrapOwnership, GrWrapCacheable::kYes,
+ kRead_GrIOType);
+ if (!tex) {
+ deleteImageProc(texImageCtx);
+ return {};
+ }
+
+ if (deleteImageProc) {
+ tex->setRelease(deleteImageProc, texImageCtx);
+ }
+
+ return tex;
+ },
+ backendFormat, desc, GrRenderable::kNo, 1, fSurfaceOrigin, GrMipMapped::kNo,
+ GrMipMapsStatus::kNotAllocated, GrInternalSurfaceFlags::kReadOnly, SkBackingFit::kExact,
+ SkBudgeted::kNo, GrProtected::kNo, GrSurfaceProxy::UseAllocator::kYes);
+
+ return texProxy;
+}
+
+sk_sp<GrTextureProxy> GrAHardwareBufferImageGenerator::onGenerateTexture(
+ GrRecordingContext* context, const SkImageInfo& info,
+ const SkIPoint& origin, bool willNeedMipMaps) {
+ sk_sp<GrTextureProxy> texProxy = this->makeProxy(context);
+ if (!texProxy) {
+ return nullptr;
+ }
+
+ if (0 == origin.fX && 0 == origin.fY &&
+ info.width() == this->getInfo().width() && info.height() == this->getInfo().height()) {
+ // If the caller wants the full texture we're done. The caller will handle making a copy for
+ // mip maps if that is required.
+ return texProxy;
+ }
+ // Otherwise, make a copy for the requested subset.
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
+
+ GrMipMapped mipMapped = willNeedMipMaps ? GrMipMapped::kYes : GrMipMapped::kNo;
+
+ GrColorType grColorType = SkColorTypeToGrColorType(this->getInfo().colorType());
+ return GrSurfaceProxy::Copy(context, texProxy.get(), grColorType, mipMapped, subset,
+ SkBackingFit::kExact, SkBudgeted::kYes);
+}
+
+bool GrAHardwareBufferImageGenerator::onIsValid(GrContext* context) const {
+ if (nullptr == context) {
+ return false; //CPU backend is not supported, because hardware buffer can be swizzled
+ }
+ return GrBackendApi::kOpenGL == context->backend() ||
+ GrBackendApi::kVulkan == context->backend();
+}
+
+#endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
diff --git a/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.h b/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.h
new file mode 100644
index 0000000000..f200d39073
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAHardwareBufferImageGenerator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrAHardwareBufferImageGenerator_DEFINED
+#define GrAHardwareBufferImageGenerator_DEFINED
+
+#include "include/core/SkImageGenerator.h"
+
+#include "include/private/GrTypesPriv.h"
+
+class GrGpuResource;
+
+extern "C" {
+ typedef struct AHardwareBuffer AHardwareBuffer;
+}
+
+/**
+ * GrAHardwareBufferImageGenerator allows to create an SkImage attached to
+ * an existing android native hardware buffer. A hardware buffer has to be
+ * created with AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE usage, because it is
+ * bound to an external texture using an EGLImage. The image generator will
+ * keep a reference to the hardware buffer for its lifetime. A hardware buffer
+ * can be shared between processes and same buffer can be used in multiple GPU
+ * contexts.
+ * To implement certain features like tiling, Skia may copy the texture to
+ * avoid OpenGL API limitations.
+ */
+class GrAHardwareBufferImageGenerator : public SkImageGenerator {
+public:
+ static std::unique_ptr<SkImageGenerator> Make(AHardwareBuffer*, SkAlphaType,
+ sk_sp<SkColorSpace>, GrSurfaceOrigin);
+
+ ~GrAHardwareBufferImageGenerator() override;
+
+ static void DeleteGLTexture(void* ctx);
+
+protected:
+
+ bool onIsValid(GrContext*) const override;
+
+ TexGenType onCanGenerateTexture() const override { return TexGenType::kCheap; }
+ sk_sp<GrTextureProxy> onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ const SkIPoint&, bool willNeedMipMaps) override;
+
+private:
+ GrAHardwareBufferImageGenerator(const SkImageInfo&, AHardwareBuffer*, SkAlphaType,
+ bool isProtectedContent, uint32_t bufferFormat,
+ GrSurfaceOrigin surfaceOrigin);
+ sk_sp<GrTextureProxy> makeProxy(GrRecordingContext* context);
+
+ void releaseTextureRef();
+
+ static void ReleaseRefHelper_TextureReleaseProc(void* ctx);
+
+ AHardwareBuffer* fHardwareBuffer;
+ uint32_t fBufferFormat;
+ const bool fIsProtectedContent;
+ GrSurfaceOrigin fSurfaceOrigin;
+
+ typedef SkImageGenerator INHERITED;
+};
+#endif // GrAHardwareBufferImageGenerator_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.cpp b/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.cpp
new file mode 100644
index 0000000000..77a5ed067a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.cpp
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#define GL_GLEXT_PROTOTYPES
+#define EGL_EGLEXT_PROTOTYPES
+
+#include "src/gpu/GrAHardwareBufferUtils.h"
+
+#include <android/hardware_buffer.h>
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+
+#include "include/gpu/GrContext.h"
+#include "include/gpu/gl/GrGLTypes.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/gl/GrGLDefines.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkCaps.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#endif
+
+#define PROT_CONTENT_EXT_STR "EGL_EXT_protected_content"
+#define EGL_PROTECTED_CONTENT_EXT 0x32C0
+
+#define VK_CALL(X) gpu->vkInterface()->fFunctions.f##X;
+
+namespace GrAHardwareBufferUtils {
+
+SkColorType GetSkColorTypeFromBufferFormat(uint32_t bufferFormat) {
+ switch (bufferFormat) {
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ return kRGBA_8888_SkColorType;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ return kRGB_888x_SkColorType;
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ return kRGBA_F16_SkColorType;
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ return kRGB_565_SkColorType;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ return kRGB_888x_SkColorType;
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ return kRGBA_1010102_SkColorType;
+ default:
+ // Given that we only use this texture as a source, colorType will not impact how Skia
+ // uses the texture. The only potential affect this is anticipated to have is that for
+ // some format types if we are not bound as an OES texture we may get invalid results
+ // for SKP capture if we read back the texture.
+ return kRGBA_8888_SkColorType;
+ }
+}
+
+GrBackendFormat GetBackendFormat(GrContext* context, AHardwareBuffer* hardwareBuffer,
+ uint32_t bufferFormat, bool requireKnownFormat) {
+ GrBackendApi backend = context->backend();
+
+ if (backend == GrBackendApi::kOpenGL) {
+ switch (bufferFormat) {
+ //TODO: find out if we can detect, which graphic buffers support GR_GL_TEXTURE_2D
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ return GrBackendFormat::MakeGL(GR_GL_RGBA8, GR_GL_TEXTURE_EXTERNAL);
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ return GrBackendFormat::MakeGL(GR_GL_RGBA16F, GR_GL_TEXTURE_EXTERNAL);
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ return GrBackendFormat::MakeGL(GR_GL_RGB565, GR_GL_TEXTURE_EXTERNAL);
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ return GrBackendFormat::MakeGL(GR_GL_RGB10_A2, GR_GL_TEXTURE_EXTERNAL);
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ return GrBackendFormat::MakeGL(GR_GL_RGB8, GR_GL_TEXTURE_EXTERNAL);
+ default:
+ if (requireKnownFormat) {
+ return GrBackendFormat();
+ } else {
+ return GrBackendFormat::MakeGL(GR_GL_RGBA8, GR_GL_TEXTURE_EXTERNAL);
+ }
+ }
+ } else if (backend == GrBackendApi::kVulkan) {
+#ifdef SK_VULKAN
+ switch (bufferFormat) {
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ return GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8A8_UNORM);
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ return GrBackendFormat::MakeVk(VK_FORMAT_R16G16B16A16_SFLOAT);
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ return GrBackendFormat::MakeVk(VK_FORMAT_R5G6B5_UNORM_PACK16);
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ return GrBackendFormat::MakeVk(VK_FORMAT_A2B10G10R10_UNORM_PACK32);
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ return GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8A8_UNORM);
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ return GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8_UNORM);
+ default: {
+ if (requireKnownFormat) {
+ return GrBackendFormat();
+ } else {
+ GrVkGpu* gpu = static_cast<GrVkGpu*>(context->priv().getGpu());
+ SkASSERT(gpu);
+ VkDevice device = gpu->device();
+
+ if (!gpu->vkCaps().supportsAndroidHWBExternalMemory()) {
+ return GrBackendFormat();
+ }
+ VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps;
+ hwbFormatProps.sType =
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+ hwbFormatProps.pNext = nullptr;
+
+ VkAndroidHardwareBufferPropertiesANDROID hwbProps;
+ hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+ hwbProps.pNext = &hwbFormatProps;
+
+ VkResult err = VK_CALL(GetAndroidHardwareBufferProperties(device,
+ hardwareBuffer,
+ &hwbProps));
+ if (VK_SUCCESS != err) {
+ return GrBackendFormat();
+ }
+
+ if (hwbFormatProps.format != VK_FORMAT_UNDEFINED) {
+ return GrBackendFormat();
+ }
+
+ GrVkYcbcrConversionInfo ycbcrConversion;
+ ycbcrConversion.fYcbcrModel = hwbFormatProps.suggestedYcbcrModel;
+ ycbcrConversion.fYcbcrRange = hwbFormatProps.suggestedYcbcrRange;
+ ycbcrConversion.fXChromaOffset = hwbFormatProps.suggestedXChromaOffset;
+ ycbcrConversion.fYChromaOffset = hwbFormatProps.suggestedYChromaOffset;
+ ycbcrConversion.fForceExplicitReconstruction = VK_FALSE;
+ ycbcrConversion.fExternalFormat = hwbFormatProps.externalFormat;
+ ycbcrConversion.fFormatFeatures = hwbFormatProps.formatFeatures;
+ if (VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT &
+ hwbFormatProps.formatFeatures) {
+ ycbcrConversion.fChromaFilter = VK_FILTER_LINEAR;
+ } else {
+ ycbcrConversion.fChromaFilter = VK_FILTER_NEAREST;
+ }
+
+ return GrBackendFormat::MakeVk(ycbcrConversion);
+ }
+ }
+ }
+#else
+ return GrBackendFormat();
+#endif
+ }
+ return GrBackendFormat();
+}
+
+class GLTextureHelper {
+public:
+ GLTextureHelper(GrGLuint texID, EGLImageKHR image, EGLDisplay display, GrGLuint texTarget)
+ : fTexID(texID)
+ , fImage(image)
+ , fDisplay(display)
+ , fTexTarget(texTarget) { }
+ ~GLTextureHelper() {
+ glDeleteTextures(1, &fTexID);
+ // eglDestroyImageKHR will remove a ref from the AHardwareBuffer
+ eglDestroyImageKHR(fDisplay, fImage);
+ }
+ void rebind(GrContext* grContext);
+
+private:
+ GrGLuint fTexID;
+ EGLImageKHR fImage;
+ EGLDisplay fDisplay;
+ GrGLuint fTexTarget;
+};
+
+void GLTextureHelper::rebind(GrContext* grContext) {
+ glBindTexture(fTexTarget, fTexID);
+ GLenum status = GL_NO_ERROR;
+ if ((status = glGetError()) != GL_NO_ERROR) {
+ SkDebugf("glBindTexture(%#x, %d) failed (%#x)", (int) fTexTarget,
+ (int) fTexID, (int) status);
+ return;
+ }
+ glEGLImageTargetTexture2DOES(fTexTarget, fImage);
+ if ((status = glGetError()) != GL_NO_ERROR) {
+ SkDebugf("glEGLImageTargetTexture2DOES failed (%#x)", (int) status);
+ return;
+ }
+ grContext->resetContext(kTextureBinding_GrGLBackendState);
+}
+
+void delete_gl_texture(void* context) {
+ GLTextureHelper* cleanupHelper = static_cast<GLTextureHelper*>(context);
+ delete cleanupHelper;
+}
+
+void update_gl_texture(void* context, GrContext* grContext) {
+ GLTextureHelper* cleanupHelper = static_cast<GLTextureHelper*>(context);
+ cleanupHelper->rebind(grContext);
+}
+
+static GrBackendTexture make_gl_backend_texture(
+ GrContext* context, AHardwareBuffer* hardwareBuffer,
+ int width, int height,
+ DeleteImageProc* deleteProc,
+ UpdateImageProc* updateProc,
+ TexImageCtx* imageCtx,
+ bool isProtectedContent,
+ const GrBackendFormat& backendFormat,
+ bool isRenderable) {
+ while (GL_NO_ERROR != glGetError()) {} //clear GL errors
+
+ EGLClientBuffer clientBuffer = eglGetNativeClientBufferANDROID(hardwareBuffer);
+ EGLint attribs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
+ isProtectedContent ? EGL_PROTECTED_CONTENT_EXT : EGL_NONE,
+ isProtectedContent ? EGL_TRUE : EGL_NONE,
+ EGL_NONE };
+ EGLDisplay display = eglGetCurrentDisplay();
+ // eglCreateImageKHR will add a ref to the AHardwareBuffer
+ EGLImageKHR image = eglCreateImageKHR(display, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ clientBuffer, attribs);
+ if (EGL_NO_IMAGE_KHR == image) {
+ SkDebugf("Could not create EGL image, err = (%#x)", (int) eglGetError() );
+ return GrBackendTexture();
+ }
+
+ GrGLuint texID;
+ glGenTextures(1, &texID);
+ if (!texID) {
+ eglDestroyImageKHR(display, image);
+ return GrBackendTexture();
+ }
+
+ GrGLuint target = isRenderable ? GR_GL_TEXTURE_2D : GR_GL_TEXTURE_EXTERNAL;
+
+ glBindTexture(target, texID);
+ GLenum status = GL_NO_ERROR;
+ if ((status = glGetError()) != GL_NO_ERROR) {
+ SkDebugf("glBindTexture failed (%#x)", (int) status);
+ glDeleteTextures(1, &texID);
+ eglDestroyImageKHR(display, image);
+ return GrBackendTexture();
+ }
+ glEGLImageTargetTexture2DOES(target, image);
+ if ((status = glGetError()) != GL_NO_ERROR) {
+ SkDebugf("glEGLImageTargetTexture2DOES failed (%#x)", (int) status);
+ glDeleteTextures(1, &texID);
+ eglDestroyImageKHR(display, image);
+ return GrBackendTexture();
+ }
+ context->resetContext(kTextureBinding_GrGLBackendState);
+
+ GrGLTextureInfo textureInfo;
+ textureInfo.fID = texID;
+ SkASSERT(backendFormat.isValid());
+ textureInfo.fTarget = target;
+ textureInfo.fFormat = GrGLFormatToEnum(backendFormat.asGLFormat());
+
+ *deleteProc = delete_gl_texture;
+ *updateProc = update_gl_texture;
+ *imageCtx = new GLTextureHelper(texID, image, display, target);
+
+ return GrBackendTexture(width, height, GrMipMapped::kNo, textureInfo);
+}
+
+#ifdef SK_VULKAN
+class VulkanCleanupHelper {
+public:
+ VulkanCleanupHelper(GrVkGpu* gpu, VkImage image, VkDeviceMemory memory)
+ : fDevice(gpu->device())
+ , fImage(image)
+ , fMemory(memory)
+ , fDestroyImage(gpu->vkInterface()->fFunctions.fDestroyImage)
+ , fFreeMemory(gpu->vkInterface()->fFunctions.fFreeMemory) {}
+ ~VulkanCleanupHelper() {
+ fDestroyImage(fDevice, fImage, nullptr);
+ fFreeMemory(fDevice, fMemory, nullptr);
+ }
+private:
+ VkDevice fDevice;
+ VkImage fImage;
+ VkDeviceMemory fMemory;
+ PFN_vkDestroyImage fDestroyImage;
+ PFN_vkFreeMemory fFreeMemory;
+};
+
+void delete_vk_image(void* context) {
+ VulkanCleanupHelper* cleanupHelper = static_cast<VulkanCleanupHelper*>(context);
+ delete cleanupHelper;
+}
+
+void update_vk_image(void* context, GrContext* grContext) {
+ // no op
+}
+
+static GrBackendTexture make_vk_backend_texture(
+ GrContext* context, AHardwareBuffer* hardwareBuffer,
+ int width, int height,
+ DeleteImageProc* deleteProc,
+ UpdateImageProc* updateProc,
+ TexImageCtx* imageCtx,
+ bool isProtectedContent,
+ const GrBackendFormat& backendFormat,
+ bool isRenderable) {
+ SkASSERT(context->backend() == GrBackendApi::kVulkan);
+ GrVkGpu* gpu = static_cast<GrVkGpu*>(context->priv().getGpu());
+
+ VkPhysicalDevice physicalDevice = gpu->physicalDevice();
+ VkDevice device = gpu->device();
+
+ SkASSERT(gpu);
+
+ if (!gpu->vkCaps().supportsAndroidHWBExternalMemory()) {
+ return GrBackendTexture();
+ }
+
+ VkFormat format;
+ SkAssertResult(backendFormat.asVkFormat(&format));
+
+ VkResult err;
+
+ VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps;
+ hwbFormatProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+ hwbFormatProps.pNext = nullptr;
+
+ VkAndroidHardwareBufferPropertiesANDROID hwbProps;
+ hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+ hwbProps.pNext = &hwbFormatProps;
+
+ err = VK_CALL(GetAndroidHardwareBufferProperties(device, hardwareBuffer, &hwbProps));
+ if (VK_SUCCESS != err) {
+ return GrBackendTexture();
+ }
+
+ VkExternalFormatANDROID externalFormat;
+ externalFormat.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
+ externalFormat.pNext = nullptr;
+ externalFormat.externalFormat = 0; // If this is zero it is as if we aren't using this struct.
+
+ const GrVkYcbcrConversionInfo* ycbcrConversion = backendFormat.getVkYcbcrConversionInfo();
+ if (!ycbcrConversion) {
+ return GrBackendTexture();
+ }
+
+ if (hwbFormatProps.format != VK_FORMAT_UNDEFINED) {
+ // TODO: We should not assume the transfer features here and instead should have a way for
+ // Ganesh's tracking of intenral images to report whether or not they support transfers.
+ SkASSERT(SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures) &&
+ SkToBool(VK_FORMAT_FEATURE_TRANSFER_SRC_BIT & hwbFormatProps.formatFeatures) &&
+ SkToBool(VK_FORMAT_FEATURE_TRANSFER_DST_BIT & hwbFormatProps.formatFeatures));
+ SkASSERT(!ycbcrConversion->isValid());
+ } else {
+ SkASSERT(ycbcrConversion->isValid());
+ // We have an external only format
+ SkASSERT(SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures));
+ SkASSERT(format == VK_FORMAT_UNDEFINED);
+ SkASSERT(hwbFormatProps.externalFormat == ycbcrConversion->fExternalFormat);
+ externalFormat.externalFormat = hwbFormatProps.externalFormat;
+ }
+ SkASSERT(format == hwbFormatProps.format);
+
+ const VkExternalMemoryImageCreateInfo externalMemoryImageInfo{
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType
+ &externalFormat, // pNext
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes
+ };
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (format != VK_FORMAT_UNDEFINED) {
+ usageFlags = usageFlags |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (isRenderable) {
+ usageFlags = usageFlags | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+ }
+
+ // TODO: Check the supported tilings vkGetPhysicalDeviceImageFormatProperties2 to see if we have
+ // to use linear. Add better linear support throughout Ganesh.
+ VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ &externalMemoryImageInfo, // pNext
+ 0, // VkImageCreateFlags
+ VK_IMAGE_TYPE_2D, // VkImageType
+ format, // VkFormat
+ { (uint32_t)width, (uint32_t)height, 1 }, // VkExtent3D
+ 1, // mipLevels
+ 1, // arrayLayers
+ VK_SAMPLE_COUNT_1_BIT, // samples
+ tiling, // VkImageTiling
+ usageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
+ };
+
+ VkImage image;
+ err = VK_CALL(CreateImage(device, &imageCreateInfo, nullptr, &image));
+ if (VK_SUCCESS != err) {
+ return GrBackendTexture();
+ }
+
+ VkPhysicalDeviceMemoryProperties2 phyDevMemProps;
+ phyDevMemProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
+ phyDevMemProps.pNext = nullptr;
+
+ uint32_t typeIndex = 0;
+ uint32_t heapIndex = 0;
+ bool foundHeap = false;
+ VK_CALL(GetPhysicalDeviceMemoryProperties2(physicalDevice, &phyDevMemProps));
+ uint32_t memTypeCnt = phyDevMemProps.memoryProperties.memoryTypeCount;
+ for (uint32_t i = 0; i < memTypeCnt && !foundHeap; ++i) {
+ if (hwbProps.memoryTypeBits & (1 << i)) {
+ const VkPhysicalDeviceMemoryProperties& pdmp = phyDevMemProps.memoryProperties;
+ uint32_t supportedFlags = pdmp.memoryTypes[i].propertyFlags &
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ if (supportedFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
+ typeIndex = i;
+ heapIndex = pdmp.memoryTypes[i].heapIndex;
+ foundHeap = true;
+ }
+ }
+ }
+ if (!foundHeap) {
+ VK_CALL(DestroyImage(device, image, nullptr));
+ return GrBackendTexture();
+ }
+
+ VkImportAndroidHardwareBufferInfoANDROID hwbImportInfo;
+ hwbImportInfo.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
+ hwbImportInfo.pNext = nullptr;
+ hwbImportInfo.buffer = hardwareBuffer;
+
+ VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
+ dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+ dedicatedAllocInfo.pNext = &hwbImportInfo;
+ dedicatedAllocInfo.image = image;
+ dedicatedAllocInfo.buffer = VK_NULL_HANDLE;
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ &dedicatedAllocInfo, // pNext
+ hwbProps.allocationSize, // allocationSize
+ typeIndex, // memoryTypeIndex
+ };
+
+ VkDeviceMemory memory;
+
+ err = VK_CALL(AllocateMemory(device, &allocInfo, nullptr, &memory));
+ if (VK_SUCCESS != err) {
+ VK_CALL(DestroyImage(device, image, nullptr));
+ return GrBackendTexture();
+ }
+
+ VkBindImageMemoryInfo bindImageInfo;
+ bindImageInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
+ bindImageInfo.pNext = nullptr;
+ bindImageInfo.image = image;
+ bindImageInfo.memory = memory;
+ bindImageInfo.memoryOffset = 0;
+
+ err = VK_CALL(BindImageMemory2(device, 1, &bindImageInfo));
+ if (VK_SUCCESS != err) {
+ VK_CALL(DestroyImage(device, image, nullptr));
+ VK_CALL(FreeMemory(device, memory, nullptr));
+ return GrBackendTexture();
+ }
+
+ GrVkImageInfo imageInfo;
+
+ imageInfo.fImage = image;
+ imageInfo.fAlloc = GrVkAlloc(memory, 0, hwbProps.allocationSize, 0);
+ imageInfo.fImageTiling = tiling;
+ imageInfo.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageInfo.fFormat = format;
+ imageInfo.fLevelCount = 1;
+ // TODO: This should possibly be VK_QUEUE_FAMILY_FOREIGN_EXT but current Adreno devices do not
+ // support that extension. Or if we know the source of the AHardwareBuffer is not from a
+ // "foreign" device we can leave them as external.
+ imageInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;
+ imageInfo.fYcbcrConversionInfo = *ycbcrConversion;
+
+ *deleteProc = delete_vk_image;
+ *updateProc = update_vk_image;
+ *imageCtx = new VulkanCleanupHelper(gpu, image, memory);
+
+ return GrBackendTexture(width, height, imageInfo);
+}
+#endif
+
+static bool can_import_protected_content_eglimpl() {
+ EGLDisplay dpy = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ const char* exts = eglQueryString(dpy, EGL_EXTENSIONS);
+ size_t cropExtLen = strlen(PROT_CONTENT_EXT_STR);
+ size_t extsLen = strlen(exts);
+ bool equal = !strcmp(PROT_CONTENT_EXT_STR, exts);
+ bool atStart = !strncmp(PROT_CONTENT_EXT_STR " ", exts, cropExtLen+1);
+ bool atEnd = (cropExtLen+1) < extsLen
+ && !strcmp(" " PROT_CONTENT_EXT_STR,
+ exts + extsLen - (cropExtLen+1));
+ bool inMiddle = strstr(exts, " " PROT_CONTENT_EXT_STR " ");
+ return equal || atStart || atEnd || inMiddle;
+}
+
+static bool can_import_protected_content(GrContext* context) {
+ if (GrBackendApi::kOpenGL == context->backend()) {
+ // Only compute whether the extension is present once the first time this
+ // function is called.
+ static bool hasIt = can_import_protected_content_eglimpl();
+ return hasIt;
+ }
+ return false;
+}
+
+GrBackendTexture MakeBackendTexture(GrContext* context, AHardwareBuffer* hardwareBuffer,
+ int width, int height,
+ DeleteImageProc* deleteProc,
+ UpdateImageProc* updateProc,
+ TexImageCtx* imageCtx,
+ bool isProtectedContent,
+ const GrBackendFormat& backendFormat,
+ bool isRenderable) {
+ if (context->abandoned()) {
+ return GrBackendTexture();
+ }
+ bool createProtectedImage = isProtectedContent && can_import_protected_content(context);
+
+ if (GrBackendApi::kOpenGL == context->backend()) {
+ return make_gl_backend_texture(context, hardwareBuffer, width, height, deleteProc,
+ updateProc, imageCtx, createProtectedImage, backendFormat,
+ isRenderable);
+ } else {
+ SkASSERT(GrBackendApi::kVulkan == context->backend());
+#ifdef SK_VULKAN
+ // Currently we don't support protected images on vulkan
+ SkASSERT(!createProtectedImage);
+ return make_vk_backend_texture(context, hardwareBuffer, width, height, deleteProc,
+ updateProc, imageCtx, createProtectedImage, backendFormat,
+ isRenderable);
+#else
+ return GrBackendTexture();
+#endif
+ }
+}
+
+} // GrAHardwareBufferUtils
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.h b/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.h
new file mode 100644
index 0000000000..d0982f4bf2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAHardwareBufferUtils.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrAHardwareBufferUtils_DEFINED
+#define GrAHardwareBufferUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrTypes.h"
+
+class GrContext;
+
+extern "C" {
+ typedef struct AHardwareBuffer AHardwareBuffer;
+}
+
+namespace GrAHardwareBufferUtils {
+
+SkColorType GetSkColorTypeFromBufferFormat(uint32_t bufferFormat);
+
+GrBackendFormat GetBackendFormat(GrContext* context, AHardwareBuffer* hardwareBuffer,
+ uint32_t bufferFormat, bool requireKnownFormat);
+
+typedef void* TexImageCtx;
+typedef void (*DeleteImageProc)(TexImageCtx);
+typedef void (*UpdateImageProc)(TexImageCtx, GrContext*);
+
+/**
+ * Create a GrBackendTexture from AHardwareBuffer
+ *
+ * @param context GPU context
+ * @param hardwareBuffer AHB
+ * @param width texture width
+ * @param height texture height
+ * @param deleteProc returns a function that deletes the texture and
+ * other GPU resources. Must be invoked on the same
+ * thread as MakeBackendTexture
+ * @param updateProc returns a function, that needs to be invoked, when
+ * AHB buffer content has changed. Must be invoked on
+ * the same thread as MakeBackendTexture
+ * @param imageCtx returns an opaque image context, that is passed as
+ * first argument to deleteProc and updateProc
+ * @param isProtectedContent if true, GL backend uses EXT_protected_content
+ * @param backendFormat backend format, usually created with helper
+ * function GetBackendFormat
+ * @param isRenderable true if GrBackendTexture can be used as a color
+ * attachment
+ * @return valid GrBackendTexture object on success
+ */
+GrBackendTexture MakeBackendTexture(GrContext* context, AHardwareBuffer* hardwareBuffer,
+ int width, int height,
+ DeleteImageProc* deleteProc,
+ UpdateImageProc* updateProc,
+ TexImageCtx* imageCtx,
+ bool isProtectedContent,
+ const GrBackendFormat& backendFormat,
+ bool isRenderable);
+
+} // GrAHardwareBufferUtils
+
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAllocator.h b/gfx/skia/skia/src/gpu/GrAllocator.h
new file mode 100644
index 0000000000..b2945e8ab6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAllocator.h
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAllocator_DEFINED
+#define GrAllocator_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrConfig.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTArray.h"
+#include <new>
+
+class GrAllocator : SkNoncopyable {
+public:
+ ~GrAllocator() { this->reset(); }
+
+ /**
+ * Create an allocator
+ *
+ * @param itemSize the size of each item to allocate
+ * @param itemsPerBlock the number of items to allocate at once
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least itemSize*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ GrAllocator(size_t itemSize, int itemsPerBlock, void* initialBlock)
+ : fItemSize(itemSize)
+ , fItemsPerBlock(itemsPerBlock)
+ , fOwnFirstBlock(nullptr == initialBlock)
+ , fCount(0)
+ , fInsertionIndexInBlock(0) {
+ SkASSERT(itemsPerBlock > 0);
+ fBlockSize = fItemSize * fItemsPerBlock;
+ if (fOwnFirstBlock) {
+ // This force us to allocate a new block on push_back().
+ fInsertionIndexInBlock = fItemsPerBlock;
+ } else {
+ fBlocks.push_back() = initialBlock;
+ fInsertionIndexInBlock = 0;
+ }
+ }
+
+ /**
+ * Adds an item and returns pointer to it.
+ *
+ * @return pointer to the added item.
+ */
+ void* push_back() {
+ // we always have at least one block
+ if (fItemsPerBlock == fInsertionIndexInBlock) {
+ fBlocks.push_back() = sk_malloc_throw(fBlockSize);
+ fInsertionIndexInBlock = 0;
+ }
+ void* ret = (char*)fBlocks.back() + fItemSize * fInsertionIndexInBlock;
+ ++fCount;
+ ++fInsertionIndexInBlock;
+ return ret;
+ }
+
+ /**
+ * Remove the last item, only call if count() != 0
+ */
+ void pop_back() {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ --fInsertionIndexInBlock;
+ --fCount;
+ if (0 == fInsertionIndexInBlock) {
+ // Never delete the first block
+ if (fBlocks.count() > 1) {
+ sk_free(fBlocks.back());
+ fBlocks.pop_back();
+ fInsertionIndexInBlock = fItemsPerBlock;
+ }
+ }
+ }
+
+ /**
+ * Removes all added items.
+ */
+ void reset() {
+ int firstBlockToFree = fOwnFirstBlock ? 0 : 1;
+ for (int i = firstBlockToFree; i < fBlocks.count(); ++i) {
+ sk_free(fBlocks[i]);
+ }
+ if (fOwnFirstBlock) {
+ fBlocks.reset();
+ // This force us to allocate a new block on push_back().
+ fInsertionIndexInBlock = fItemsPerBlock;
+ } else {
+ fBlocks.pop_back_n(fBlocks.count() - 1);
+ fInsertionIndexInBlock = 0;
+ }
+ fCount = 0;
+ }
+
+ /**
+ * Returns the item count.
+ */
+ int count() const {
+ return fCount;
+ }
+
+ /**
+ * Is the count 0?
+ */
+ bool empty() const { return 0 == fCount; }
+
+ /**
+ * Access first item, only call if count() != 0
+ */
+ void* front() {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (char*)(fBlocks.front());
+ }
+
+ /**
+ * Access first item, only call if count() != 0
+ */
+ const void* front() const {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (const char*)(fBlocks.front());
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ void* back() {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (char*)(fBlocks.back()) + (fInsertionIndexInBlock - 1) * fItemSize;
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ const void* back() const {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (const char*)(fBlocks.back()) + (fInsertionIndexInBlock - 1) * fItemSize;
+ }
+
+ /**
+ * Iterates through the allocator. This is faster than using operator[] when walking linearly
+ * through the allocator.
+ */
+ class Iter {
+ public:
+ /**
+ * Initializes the iterator. next() must be called before get().
+ */
+ Iter(const GrAllocator* allocator)
+ : fAllocator(allocator)
+ , fBlockIndex(-1)
+ , fIndexInBlock(allocator->fItemsPerBlock - 1)
+ , fItemIndex(-1) {}
+
+ /**
+ * Advances the iterator. Iteration is finished when next() returns false.
+ */
+ bool next() {
+ ++fIndexInBlock;
+ ++fItemIndex;
+ if (fIndexInBlock == fAllocator->fItemsPerBlock) {
+ ++fBlockIndex;
+ fIndexInBlock = 0;
+ }
+ return fItemIndex < fAllocator->fCount;
+ }
+
+ /**
+ * Gets the current iterator value. Call next() at least once before calling. Don't call
+ * after next() returns false.
+ */
+ void* get() const {
+ SkASSERT(fItemIndex >= 0 && fItemIndex < fAllocator->fCount);
+ return (char*) fAllocator->fBlocks[fBlockIndex] + fIndexInBlock * fAllocator->fItemSize;
+ }
+
+ private:
+ const GrAllocator* fAllocator;
+ int fBlockIndex;
+ int fIndexInBlock;
+ int fItemIndex;
+ };
+
+ /**
+ * Access item by index.
+ */
+ void* operator[] (int i) {
+ SkASSERT(i >= 0 && i < fCount);
+ return (char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+ /**
+ * Access item by index.
+ */
+ const void* operator[] (int i) const {
+ SkASSERT(i >= 0 && i < fCount);
+ return (const char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+protected:
+ /**
+ * Set first block of memory to write into. Must be called before any other methods.
+ * This requires that you have passed nullptr in the constructor.
+ *
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least itemSize*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ void setInitialBlock(void* initialBlock) {
+ SkASSERT(0 == fCount);
+ SkASSERT(0 == fBlocks.count());
+ SkASSERT(fItemsPerBlock == fInsertionIndexInBlock);
+ fOwnFirstBlock = false;
+ fBlocks.push_back() = initialBlock;
+ fInsertionIndexInBlock = 0;
+ }
+
+ // For access to above function.
+ template <typename T> friend class GrTAllocator;
+
+private:
+ static const int NUM_INIT_BLOCK_PTRS = 8;
+
+ SkSTArray<NUM_INIT_BLOCK_PTRS, void*, true> fBlocks;
+ size_t fBlockSize;
+ size_t fItemSize;
+ int fItemsPerBlock;
+ bool fOwnFirstBlock;
+ int fCount;
+ int fInsertionIndexInBlock;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+template <typename T> class GrTAllocator;
+template <typename T> void* operator new(size_t, GrTAllocator<T>*);
+
+template <typename T> class GrTAllocator : SkNoncopyable {
+public:
+ virtual ~GrTAllocator() { this->reset(); }
+
+ /**
+ * Create an allocator
+ *
+ * @param itemsPerBlock the number of items to allocate at once
+ */
+ explicit GrTAllocator(int itemsPerBlock)
+ : fAllocator(sizeof(T), itemsPerBlock, nullptr) {}
+
+ /**
+ * Adds an item and returns it.
+ *
+ * @return the added item.
+ */
+ T& push_back() {
+ void* item = fAllocator.push_back();
+ SkASSERT(item);
+ new (item) T;
+ return *(T*)item;
+ }
+
+ T& push_back(const T& t) {
+ void* item = fAllocator.push_back();
+ SkASSERT(item);
+ new (item) T(t);
+ return *(T*)item;
+ }
+
+ template <typename... Args> T& emplace_back(Args&&... args) {
+ void* item = fAllocator.push_back();
+ SkASSERT(item);
+ new (item) T(std::forward<Args>(args)...);
+ return *(T*)item;
+ }
+
+ /**
+ * Remove the last item, only call if count() != 0
+ */
+ void pop_back() {
+ this->back().~T();
+ fAllocator.pop_back();
+ }
+
+ /**
+ * Removes all added items.
+ */
+ void reset() {
+ int c = fAllocator.count();
+ for (int i = 0; i < c; ++i) {
+ ((T*)fAllocator[i])->~T();
+ }
+ fAllocator.reset();
+ }
+
+ /**
+ * Returns the item count.
+ */
+ int count() const {
+ return fAllocator.count();
+ }
+
+ /**
+ * Is the count 0?
+ */
+ bool empty() const { return fAllocator.empty(); }
+
+ /**
+ * Access first item, only call if count() != 0
+ */
+ T& front() {
+ return *(T*)fAllocator.front();
+ }
+
+ /**
+ * Access first item, only call if count() != 0
+ */
+ const T& front() const {
+ return *(T*)fAllocator.front();
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ T& back() {
+ return *(T*)fAllocator.back();
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ const T& back() const {
+ return *(const T*)fAllocator.back();
+ }
+
+ /**
+ * Iterates through the allocator. This is faster than using operator[] when walking linearly
+ * through the allocator.
+ */
+ class Iter {
+ public:
+ /**
+ * Initializes the iterator. next() must be called before get() or ops * and ->.
+ */
+ Iter(const GrTAllocator* allocator) : fImpl(&allocator->fAllocator) {}
+
+ /**
+ * Advances the iterator. Iteration is finished when next() returns false.
+ */
+ bool next() { return fImpl.next(); }
+
+ /**
+ * Gets the current iterator value. Call next() at least once before calling. Don't call
+ * after next() returns false.
+ */
+ T* get() const { return (T*) fImpl.get(); }
+
+ /**
+ * Convenience operators. Same rules for calling apply as get().
+ */
+ T& operator*() const { return *this->get(); }
+ T* operator->() const { return this->get(); }
+
+ private:
+ GrAllocator::Iter fImpl;
+ };
+
+ /**
+ * Access item by index.
+ */
+ T& operator[] (int i) {
+ return *(T*)(fAllocator[i]);
+ }
+
+ /**
+ * Access item by index.
+ */
+ const T& operator[] (int i) const {
+ return *(const T*)(fAllocator[i]);
+ }
+
+protected:
+ /*
+ * Set first block of memory to write into. Must be called before any other methods.
+ *
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least size(T)*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ void setInitialBlock(void* initialBlock) {
+ fAllocator.setInitialBlock(initialBlock);
+ }
+
+private:
+ friend void* operator new<T>(size_t, GrTAllocator*);
+
+ GrAllocator fAllocator;
+ typedef SkNoncopyable INHERITED;
+};
+
+template <int N, typename T> class GrSTAllocator : public GrTAllocator<T> {
+private:
+ typedef GrTAllocator<T> INHERITED;
+
+public:
+ GrSTAllocator() : INHERITED(N) {
+ this->setInitialBlock(fStorage.get());
+ }
+
+private:
+ SkAlignedSTStorage<N, T> fStorage;
+};
+
+template <typename T> void* operator new(size_t size, GrTAllocator<T>* allocator) {
+ return allocator->fAllocator.push_back();
+}
+
+// Skia doesn't use C++ exceptions but it may be compiled with them enabled. Having an op delete
+// to match the op new silences warnings about missing op delete when a constructor throws an
+// exception.
+template <typename T> void operator delete(void*, GrTAllocator<T>*) {
+ SK_ABORT("Invalid Operation");
+}
+
+#define GrNEW_APPEND_TO_ALLOCATOR(allocator_ptr, type_name, args) \
+ new (allocator_ptr) type_name args
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAppliedClip.h b/gfx/skia/skia/src/gpu/GrAppliedClip.h
new file mode 100644
index 0000000000..5e6e4cacce
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAppliedClip.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAppliedClip_DEFINED
+#define GrAppliedClip_DEFINED
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrScissorState.h"
+#include "src/gpu/GrWindowRectsState.h"
+
+#include "src/core/SkClipStack.h"
+
+
+/**
+ * Produced by GrHardClip. It provides a set of modifications to the hardware drawing state that
+ * implement the clip.
+ */
+class GrAppliedHardClip {
+public:
+ GrAppliedHardClip() = default;
+ GrAppliedHardClip(GrAppliedHardClip&& that) = default;
+ GrAppliedHardClip(const GrAppliedHardClip&) = delete;
+
+ const GrScissorState& scissorState() const { return fScissorState; }
+ const GrWindowRectsState& windowRectsState() const { return fWindowRectsState; }
+ uint32_t stencilStackID() const { return fStencilStackID; }
+ bool hasStencilClip() const { return SkClipStack::kInvalidGenID != fStencilStackID; }
+
+ /**
+ * Intersects the applied clip with the provided rect. Returns false if the draw became empty.
+ * 'clippedDrawBounds' will be intersected with 'irect'. This returns false if the clip becomes
+ * empty or the draw no longer intersects the clip. In either case the draw can be skipped.
+ */
+ bool addScissor(const SkIRect& irect, SkRect* clippedDrawBounds) {
+ return fScissorState.intersect(irect) && clippedDrawBounds->intersect(SkRect::Make(irect));
+ }
+
+ void addWindowRectangles(const GrWindowRectsState& windowState) {
+ SkASSERT(!fWindowRectsState.enabled());
+ fWindowRectsState = windowState;
+ }
+
+ void addWindowRectangles(const GrWindowRectangles& windows, GrWindowRectsState::Mode mode) {
+ SkASSERT(!fWindowRectsState.enabled());
+ fWindowRectsState.set(windows, mode);
+ }
+
+ void addStencilClip(uint32_t stencilStackID) {
+ SkASSERT(SkClipStack::kInvalidGenID == fStencilStackID);
+ fStencilStackID = stencilStackID;
+ }
+
+ bool doesClip() const {
+ return fScissorState.enabled() || this->hasStencilClip() || fWindowRectsState.enabled();
+ }
+
+ bool operator==(const GrAppliedHardClip& that) const {
+ return fScissorState == that.fScissorState &&
+ fWindowRectsState == that.fWindowRectsState &&
+ fStencilStackID == that.fStencilStackID;
+ }
+ bool operator!=(const GrAppliedHardClip& that) const { return !(*this == that); }
+
+private:
+ GrScissorState fScissorState;
+ GrWindowRectsState fWindowRectsState;
+ uint32_t fStencilStackID = SkClipStack::kInvalidGenID;
+};
+
+/**
+ * Produced by GrClip. It provides a set of modifications to GrPipeline that implement the clip.
+ */
+class GrAppliedClip {
+public:
+ GrAppliedClip() = default;
+ GrAppliedClip(GrAppliedClip&& that) = default;
+ GrAppliedClip(const GrAppliedClip&) = delete;
+
+ const GrScissorState& scissorState() const { return fHardClip.scissorState(); }
+ const GrWindowRectsState& windowRectsState() const { return fHardClip.windowRectsState(); }
+ uint32_t stencilStackID() const { return fHardClip.stencilStackID(); }
+ bool hasStencilClip() const { return fHardClip.hasStencilClip(); }
+ int numClipCoverageFragmentProcessors() const { return fClipCoverageFPs.count(); }
+ const GrFragmentProcessor* clipCoverageFragmentProcessor(int i) const {
+ SkASSERT(fClipCoverageFPs[i]);
+ return fClipCoverageFPs[i].get();
+ }
+ std::unique_ptr<const GrFragmentProcessor> detachClipCoverageFragmentProcessor(int i) {
+ SkASSERT(fClipCoverageFPs[i]);
+ return std::move(fClipCoverageFPs[i]);
+ }
+
+ GrAppliedHardClip& hardClip() { return fHardClip; }
+
+ void addCoverageFP(std::unique_ptr<GrFragmentProcessor> fp) {
+ SkASSERT(fp);
+ fClipCoverageFPs.push_back(std::move(fp));
+ }
+
+ bool doesClip() const {
+ return fHardClip.doesClip() || !fClipCoverageFPs.empty();
+ }
+
+ bool operator==(const GrAppliedClip& that) const {
+ if (fHardClip != that.fHardClip ||
+ fClipCoverageFPs.count() != that.fClipCoverageFPs.count()) {
+ return false;
+ }
+ for (int i = 0; i < fClipCoverageFPs.count(); ++i) {
+ if (!fClipCoverageFPs[i] || !that.fClipCoverageFPs[i]) {
+ if (fClipCoverageFPs[i] == that.fClipCoverageFPs[i]) {
+ continue; // Both are null.
+ }
+ return false;
+ }
+ if (!fClipCoverageFPs[i]->isEqual(*that.fClipCoverageFPs[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!=(const GrAppliedClip& that) const { return !(*this == that); }
+
+ void visitProxies(const GrOp::VisitProxyFunc& func) const {
+ for (const std::unique_ptr<GrFragmentProcessor>& fp : fClipCoverageFPs) {
+ if (fp) { // This might be called after detach.
+ fp->visitProxies(func);
+ }
+ }
+ }
+
+private:
+ GrAppliedHardClip fHardClip;
+ SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fClipCoverageFPs;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAuditTrail.cpp b/gfx/skia/skia/src/gpu/GrAuditTrail.cpp
new file mode 100644
index 0000000000..9c72acf725
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAuditTrail.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/ops/GrOp.h"
+#include "src/utils/SkJSONWriter.h"
+
+const int GrAuditTrail::kGrAuditTrailInvalidID = -1;
+
+void GrAuditTrail::addOp(const GrOp* op, GrRenderTargetProxy::UniqueID proxyID) {
+ SkASSERT(fEnabled);
+ Op* auditOp = new Op;
+ fOpPool.emplace_back(auditOp);
+ auditOp->fName = op->name();
+ auditOp->fBounds = op->bounds();
+ auditOp->fClientID = kGrAuditTrailInvalidID;
+ auditOp->fOpsTaskID = kGrAuditTrailInvalidID;
+ auditOp->fChildID = kGrAuditTrailInvalidID;
+
+ // consume the current stack trace if any
+ auditOp->fStackTrace = fCurrentStackTrace;
+ fCurrentStackTrace.reset();
+
+ if (fClientID != kGrAuditTrailInvalidID) {
+ auditOp->fClientID = fClientID;
+ Ops** opsLookup = fClientIDLookup.find(fClientID);
+ Ops* ops = nullptr;
+ if (!opsLookup) {
+ ops = new Ops;
+ fClientIDLookup.set(fClientID, ops);
+ } else {
+ ops = *opsLookup;
+ }
+
+ ops->push_back(auditOp);
+ }
+
+ // Our algorithm doesn't bother to reorder inside of an OpNode so the ChildID will start at 0
+ auditOp->fOpsTaskID = fOpsTask.count();
+ auditOp->fChildID = 0;
+
+ // We use the op pointer as a key to find the OpNode we are 'glomming' ops onto
+ fIDLookup.set(op->uniqueID(), auditOp->fOpsTaskID);
+ OpNode* opNode = new OpNode(proxyID);
+ opNode->fBounds = op->bounds();
+ opNode->fChildren.push_back(auditOp);
+ fOpsTask.emplace_back(opNode);
+}
+
+void GrAuditTrail::opsCombined(const GrOp* consumer, const GrOp* consumed) {
+ // Look up the op we are going to glom onto
+ int* indexPtr = fIDLookup.find(consumer->uniqueID());
+ SkASSERT(indexPtr);
+ int index = *indexPtr;
+ SkASSERT(index < fOpsTask.count() && fOpsTask[index]);
+ OpNode& consumerOp = *fOpsTask[index];
+
+ // Look up the op which will be glommed
+ int* consumedPtr = fIDLookup.find(consumed->uniqueID());
+ SkASSERT(consumedPtr);
+ int consumedIndex = *consumedPtr;
+ SkASSERT(consumedIndex < fOpsTask.count() && fOpsTask[consumedIndex]);
+ OpNode& consumedOp = *fOpsTask[consumedIndex];
+
+ // steal all of consumed's ops
+ for (int i = 0; i < consumedOp.fChildren.count(); i++) {
+ Op* childOp = consumedOp.fChildren[i];
+
+ // set the ids for the child op
+ childOp->fOpsTaskID = index;
+ childOp->fChildID = consumerOp.fChildren.count();
+ consumerOp.fChildren.push_back(childOp);
+ }
+
+ // Update the bounds for the combineWith node
+ consumerOp.fBounds = consumer->bounds();
+
+ // remove the old node from our opsTask and clear the combinee's lookup
+ // NOTE: because we can't change the shape of the oplist, we use a sentinel
+ fOpsTask[consumedIndex].reset(nullptr);
+ fIDLookup.remove(consumed->uniqueID());
+}
+
+void GrAuditTrail::copyOutFromOpsTask(OpInfo* outOpInfo, int opsTaskID) {
+ SkASSERT(opsTaskID < fOpsTask.count());
+ const OpNode* bn = fOpsTask[opsTaskID].get();
+ SkASSERT(bn);
+ outOpInfo->fBounds = bn->fBounds;
+ outOpInfo->fProxyUniqueID = bn->fProxyUniqueID;
+ for (int j = 0; j < bn->fChildren.count(); j++) {
+ OpInfo::Op& outOp = outOpInfo->fOps.push_back();
+ const Op* currentOp = bn->fChildren[j];
+ outOp.fBounds = currentOp->fBounds;
+ outOp.fClientID = currentOp->fClientID;
+ }
+}
+
+void GrAuditTrail::getBoundsByClientID(SkTArray<OpInfo>* outInfo, int clientID) {
+ Ops** opsLookup = fClientIDLookup.find(clientID);
+ if (opsLookup) {
+ // We track which oplistID we're currently looking at. If it changes, then we need to push
+ // back a new op info struct. We happen to know that ops are in sequential order in the
+ // oplist, otherwise we'd have to do more bookkeeping
+ int currentOpsTaskID = kGrAuditTrailInvalidID;
+ for (int i = 0; i < (*opsLookup)->count(); i++) {
+ const Op* op = (**opsLookup)[i];
+
+ // Because we will copy out all of the ops associated with a given op list id everytime
+ // the id changes, we only have to update our struct when the id changes.
+ if (kGrAuditTrailInvalidID == currentOpsTaskID || op->fOpsTaskID != currentOpsTaskID) {
+ OpInfo& outOpInfo = outInfo->push_back();
+
+ // copy out all of the ops so the client can display them even if they have a
+ // different clientID
+ this->copyOutFromOpsTask(&outOpInfo, op->fOpsTaskID);
+ }
+ }
+ }
+}
+
+void GrAuditTrail::getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID) {
+ this->copyOutFromOpsTask(outInfo, opsTaskID);
+}
+
+void GrAuditTrail::fullReset() {
+ SkASSERT(fEnabled);
+ fOpsTask.reset();
+ fIDLookup.reset();
+ // free all client ops
+ fClientIDLookup.foreach ([](const int&, Ops** ops) { delete *ops; });
+ fClientIDLookup.reset();
+ fOpPool.reset(); // must be last, frees all of the memory
+}
+
+template <typename T>
+void GrAuditTrail::JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array) {
+ if (array.count()) {
+ writer.beginArray(name);
+ for (int i = 0; i < array.count(); i++) {
+ // Handle sentinel nullptrs
+ if (array[i]) {
+ array[i]->toJson(writer);
+ }
+ }
+ writer.endArray();
+ }
+}
+
+void GrAuditTrail::toJson(SkJSONWriter& writer) const {
+ writer.beginObject();
+ JsonifyTArray(writer, "Ops", fOpsTask);
+ writer.endObject();
+}
+
+void GrAuditTrail::toJson(SkJSONWriter& writer, int clientID) const {
+ writer.beginObject();
+ Ops** ops = fClientIDLookup.find(clientID);
+ if (ops) {
+ JsonifyTArray(writer, "Ops", **ops);
+ }
+ writer.endObject();
+}
+
+static void skrect_to_json(SkJSONWriter& writer, const char* name, const SkRect& rect) {
+ writer.beginObject(name);
+ writer.appendFloat("Left", rect.fLeft);
+ writer.appendFloat("Right", rect.fRight);
+ writer.appendFloat("Top", rect.fTop);
+ writer.appendFloat("Bottom", rect.fBottom);
+ writer.endObject();
+}
+
+void GrAuditTrail::Op::toJson(SkJSONWriter& writer) const {
+ writer.beginObject();
+ writer.appendString("Name", fName.c_str());
+ writer.appendS32("ClientID", fClientID);
+ writer.appendS32("OpsTaskID", fOpsTaskID);
+ writer.appendS32("ChildID", fChildID);
+ skrect_to_json(writer, "Bounds", fBounds);
+ if (fStackTrace.count()) {
+ writer.beginArray("Stack");
+ for (int i = 0; i < fStackTrace.count(); i++) {
+ writer.appendString(fStackTrace[i].c_str());
+ }
+ writer.endArray();
+ }
+ writer.endObject();
+}
+
+void GrAuditTrail::OpNode::toJson(SkJSONWriter& writer) const {
+ writer.beginObject();
+ writer.appendU32("ProxyID", fProxyUniqueID.asUInt());
+ skrect_to_json(writer, "Bounds", fBounds);
+ JsonifyTArray(writer, "Ops", fChildren);
+ writer.endObject();
+}
diff --git a/gfx/skia/skia/src/gpu/GrAuditTrail.h b/gfx/skia/skia/src/gpu/GrAuditTrail.h
new file mode 100644
index 0000000000..dd6549ef6c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAuditTrail.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAuditTrail_DEFINED
+#define GrAuditTrail_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/gpu/GrConfig.h"
+#include "include/gpu/GrGpuResource.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTHash.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+
+class GrOp;
+class SkJSONWriter;
+
+/*
+ * GrAuditTrail collects a list of draw ops, detailed information about those ops, and can dump them
+ * to json.
+ *
+ * Capturing this information is expensive and consumes a lot of memory, therefore it is important
+ * to enable auditing only when required and disable it promptly. The AutoEnable class helps to
+ * ensure that the audit trail is disabled in a timely fashion. Once the information has been dealt
+ * with, be sure to call reset(), or the log will simply keep growing.
+ */
+class GrAuditTrail {
+public:
+ GrAuditTrail()
+ : fClientID(kGrAuditTrailInvalidID)
+ , fEnabled(false) {}
+
+ class AutoEnable {
+ public:
+ AutoEnable(GrAuditTrail* auditTrail)
+ : fAuditTrail(auditTrail) {
+ SkASSERT(!fAuditTrail->isEnabled());
+ fAuditTrail->setEnabled(true);
+ }
+
+ ~AutoEnable() {
+ SkASSERT(fAuditTrail->isEnabled());
+ fAuditTrail->setEnabled(false);
+ }
+
+ private:
+ GrAuditTrail* fAuditTrail;
+ };
+
+ class AutoManageOpsTask {
+ public:
+ AutoManageOpsTask(GrAuditTrail* auditTrail)
+ : fAutoEnable(auditTrail), fAuditTrail(auditTrail) {}
+
+ ~AutoManageOpsTask() { fAuditTrail->fullReset(); }
+
+ private:
+ AutoEnable fAutoEnable;
+ GrAuditTrail* fAuditTrail;
+ };
+
+ class AutoCollectOps {
+ public:
+ AutoCollectOps(GrAuditTrail* auditTrail, int clientID)
+ : fAutoEnable(auditTrail), fAuditTrail(auditTrail) {
+ fAuditTrail->setClientID(clientID);
+ }
+
+ ~AutoCollectOps() { fAuditTrail->setClientID(kGrAuditTrailInvalidID); }
+
+ private:
+ AutoEnable fAutoEnable;
+ GrAuditTrail* fAuditTrail;
+ };
+
+ void pushFrame(const char* framename) {
+ SkASSERT(fEnabled);
+ fCurrentStackTrace.push_back(SkString(framename));
+ }
+
+ void addOp(const GrOp*, GrRenderTargetProxy::UniqueID proxyID);
+
+ void opsCombined(const GrOp* consumer, const GrOp* consumed);
+
+ // Because op combining is heavily dependent on sequence of draw calls, these calls will only
+ // produce valid information for the given draw sequence which preceeded them. Specifically, ops
+ // of future draw calls may combine with previous ops and thus would invalidate the json. What
+ // this means is that for some sequence of draw calls N, the below toJson calls will only
+ // produce JSON which reflects N draw calls. This JSON may or may not be accurate for N + 1 or
+ // N - 1 draws depending on the actual combining algorithm used.
+ void toJson(SkJSONWriter& writer) const;
+
+ // returns a json string of all of the ops associated with a given client id
+ void toJson(SkJSONWriter& writer, int clientID) const;
+
+ bool isEnabled() { return fEnabled; }
+ void setEnabled(bool enabled) { fEnabled = enabled; }
+
+ void setClientID(int clientID) { fClientID = clientID; }
+
+ // We could just return our internal bookkeeping struct if copying the data out becomes
+ // a performance issue, but until then its nice to decouple
+ struct OpInfo {
+ struct Op {
+ int fClientID;
+ SkRect fBounds;
+ };
+
+ SkRect fBounds;
+ GrSurfaceProxy::UniqueID fProxyUniqueID;
+ SkTArray<Op> fOps;
+ };
+
+ void getBoundsByClientID(SkTArray<OpInfo>* outInfo, int clientID);
+ void getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID);
+
+ void fullReset();
+
+ static const int kGrAuditTrailInvalidID;
+
+private:
+ // TODO if performance becomes an issue, we can move to using SkVarAlloc
+ struct Op {
+ void toJson(SkJSONWriter& writer) const;
+ SkString fName;
+ SkTArray<SkString> fStackTrace;
+ SkRect fBounds;
+ int fClientID;
+ int fOpsTaskID;
+ int fChildID;
+ };
+ typedef SkTArray<std::unique_ptr<Op>, true> OpPool;
+
+ typedef SkTArray<Op*> Ops;
+
+ struct OpNode {
+ OpNode(const GrSurfaceProxy::UniqueID& proxyID) : fProxyUniqueID(proxyID) { }
+ void toJson(SkJSONWriter& writer) const;
+
+ SkRect fBounds;
+ Ops fChildren;
+ const GrSurfaceProxy::UniqueID fProxyUniqueID;
+ };
+ typedef SkTArray<std::unique_ptr<OpNode>, true> OpsTask;
+
+ void copyOutFromOpsTask(OpInfo* outOpInfo, int opsTask);
+
+ template <typename T>
+ static void JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array);
+
+ OpPool fOpPool;
+ SkTHashMap<uint32_t, int> fIDLookup;
+ SkTHashMap<int, Ops*> fClientIDLookup;
+ OpsTask fOpsTask;
+ SkTArray<SkString> fCurrentStackTrace;
+
+ // The client can pass in an optional client ID which we will use to mark the ops
+ int fClientID;
+ bool fEnabled;
+};
+
+#define GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, invoke, ...) \
+ if (audit_trail->isEnabled()) audit_trail->invoke(__VA_ARGS__)
+
+#define GR_AUDIT_TRAIL_AUTO_FRAME(audit_trail, framename) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD((audit_trail), pushFrame, framename)
+
+#define GR_AUDIT_TRAIL_RESET(audit_trail) \
+ //GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, fullReset);
+
+#define GR_AUDIT_TRAIL_ADD_OP(audit_trail, op, proxy_id) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, addOp, op, proxy_id)
+
+#define GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(audit_trail, combineWith, op) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, opsCombined, combineWith, op)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h b/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h
new file mode 100644
index 0000000000..aa24177728
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAutoLocaleSetter_DEFINED
+#define GrAutoLocaleSetter_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkNoncopyable.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+#include "include/core/SkString.h"
+#endif
+
+#if !defined(SK_BUILD_FOR_ANDROID)
+#include <locale.h>
+#endif
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#include <xlocale.h>
+#include <cstring>
+#define HAVE_XLOCALE 1
+#else
+#define HAVE_XLOCALE 0
+#endif
+
+#if defined(SK_BUILD_FOR_ANDROID) || defined(__UCLIBC__) || defined(_NEWLIB_VERSION)
+#define HAVE_LOCALE_T 0
+#else
+#define HAVE_LOCALE_T 1
+#endif
+
+/**
+ * Helper class for ensuring that we don't use the wrong locale when building shaders. Android
+ * doesn't support locale in the NDK, so this is a no-op there.
+ */
+class GrAutoLocaleSetter : public SkNoncopyable {
+public:
+ GrAutoLocaleSetter (const char* name) {
+#if defined(SK_BUILD_FOR_WIN)
+ fOldPerThreadLocale = _configthreadlocale(_ENABLE_PER_THREAD_LOCALE);
+ char* oldLocale = setlocale(LC_ALL, name);
+ if (oldLocale) {
+ fOldLocale = oldLocale;
+ fShouldRestoreLocale = true;
+ } else {
+ fShouldRestoreLocale = false;
+ }
+#elif HAVE_LOCALE_T
+#if HAVE_XLOCALE
+ // In xlocale nullptr means the C locale.
+ if (0 == strcmp(name, "C")) {
+ name = nullptr;
+ }
+#endif
+ fLocale = newlocale(LC_ALL_MASK, name, nullptr);
+ if (fLocale) {
+ fOldLocale = uselocale(fLocale);
+ } else {
+ fOldLocale = static_cast<locale_t>(nullptr);
+ }
+#else
+ (void) name; // suppress unused param warning.
+#endif
+ }
+
+ ~GrAutoLocaleSetter () {
+#if defined(SK_BUILD_FOR_WIN)
+ if (fShouldRestoreLocale) {
+ setlocale(LC_ALL, fOldLocale.c_str());
+ }
+ _configthreadlocale(fOldPerThreadLocale);
+#elif HAVE_LOCALE_T
+ if (fLocale) {
+ uselocale(fOldLocale);
+ freelocale(fLocale);
+ }
+#endif
+ }
+
+private:
+#if defined(SK_BUILD_FOR_WIN)
+ int fOldPerThreadLocale;
+ bool fShouldRestoreLocale;
+ SkString fOldLocale;
+#elif HAVE_LOCALE_T
+ locale_t fOldLocale;
+ locale_t fLocale;
+#endif
+};
+
+#undef HAVE_LOCALE_T
+#undef HAVE_XLOCALE
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBackendSurface.cpp b/gfx/skia/skia/src/gpu/GrBackendSurface.cpp
new file mode 100644
index 0000000000..fa95520811
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBackendSurface.cpp
@@ -0,0 +1,924 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/GrBackendSurface.h"
+
+#include "src/gpu/gl/GrGLUtil.h"
+
+#ifdef SK_DAWN
+#include "include/gpu/dawn/GrDawnTypes.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkImageLayout.h"
+#include "src/gpu/vk/GrVkUtil.h"
+#endif
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#include "src/gpu/mtl/GrMtlCppUtil.h"
+#endif
+
+GrBackendFormat::GrBackendFormat(const GrBackendFormat& that)
+ : fBackend(that.fBackend)
+ , fValid(that.fValid)
+ , fTextureType(that.fTextureType) {
+ if (!fValid) {
+ return;
+ }
+
+ switch (fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ fGLFormat = that.fGLFormat;
+ break;
+#endif
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan:
+ fVk = that.fVk;
+ break;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ fMtlFormat = that.fMtlFormat;
+ break;
+#endif
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn:
+ fDawnFormat = that.fDawnFormat;
+ break;
+#endif
+ case GrBackendApi::kMock:
+ fMockColorType = that.fMockColorType;
+ break;
+ default:
+ SK_ABORT("Unknown GrBackend");
+ }
+}
+
+GrBackendFormat::GrBackendFormat(GrGLenum format, GrGLenum target)
+ : fBackend(GrBackendApi::kOpenGL)
+ , fValid(true)
+ , fGLFormat(format) {
+ switch (target) {
+ case GR_GL_TEXTURE_NONE:
+ fTextureType = GrTextureType::kNone;
+ break;
+ case GR_GL_TEXTURE_2D:
+ fTextureType = GrTextureType::k2D;
+ break;
+ case GR_GL_TEXTURE_RECTANGLE:
+ fTextureType = GrTextureType::kRectangle;
+ break;
+ case GR_GL_TEXTURE_EXTERNAL:
+ fTextureType = GrTextureType::kExternal;
+ break;
+ default:
+ SK_ABORT("Unexpected texture target");
+ }
+}
+
+GrGLFormat GrBackendFormat::asGLFormat() const {
+ if (this->isValid() && GrBackendApi::kOpenGL == fBackend) {
+ return GrGLFormatFromGLEnum(fGLFormat);
+ }
+ return GrGLFormat::kUnknown;
+}
+
+GrBackendFormat GrBackendFormat::MakeVk(const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ SkASSERT(ycbcrInfo.isValid());
+ return GrBackendFormat(ycbcrInfo.fFormat, ycbcrInfo);
+}
+
+GrBackendFormat::GrBackendFormat(VkFormat vkFormat, const GrVkYcbcrConversionInfo& ycbcrInfo)
+ : fBackend(GrBackendApi::kVulkan)
+#ifdef SK_VULKAN
+ , fValid(true)
+#else
+ , fValid(false)
+#endif
+ , fTextureType(GrTextureType::k2D) {
+ fVk.fFormat = vkFormat;
+ fVk.fYcbcrConversionInfo = ycbcrInfo;
+ if (fVk.fYcbcrConversionInfo.isValid() && fVk.fYcbcrConversionInfo.fExternalFormat) {
+ fTextureType = GrTextureType::kExternal;
+ }
+}
+
+bool GrBackendFormat::asVkFormat(VkFormat* format) const {
+ SkASSERT(format);
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *format = fVk.fFormat;
+ return true;
+ }
+ return false;
+}
+
+const GrVkYcbcrConversionInfo* GrBackendFormat::getVkYcbcrConversionInfo() const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ return &fVk.fYcbcrConversionInfo;
+ }
+ return nullptr;
+}
+
+#ifdef SK_DAWN
+GrBackendFormat::GrBackendFormat(dawn::TextureFormat format)
+ : fBackend(GrBackendApi::kDawn)
+ , fValid(true)
+ , fDawnFormat(format)
+ , fTextureType(GrTextureType::k2D) {
+}
+
+bool GrBackendFormat::asDawnFormat(dawn::TextureFormat* format) const {
+ SkASSERT(format);
+ if (this->isValid() && GrBackendApi::kDawn == fBackend) {
+ *format = fDawnFormat;
+ return true;
+ }
+ return false;
+}
+#endif
+
+#ifdef SK_METAL
+GrBackendFormat::GrBackendFormat(GrMTLPixelFormat mtlFormat)
+ : fBackend(GrBackendApi::kMetal)
+ , fValid(true)
+ , fMtlFormat(mtlFormat)
+ , fTextureType(GrTextureType::k2D) {
+}
+
+GrMTLPixelFormat GrBackendFormat::asMtlFormat() const {
+ if (this->isValid() && GrBackendApi::kMetal == fBackend) {
+ return fMtlFormat;
+ }
+ // MTLPixelFormatInvalid == 0
+ return GrMTLPixelFormat(0);
+}
+#endif
+
+GrBackendFormat::GrBackendFormat(GrColorType colorType)
+ : fBackend(GrBackendApi::kMock)
+ , fValid(true)
+ , fTextureType(GrTextureType::k2D) {
+ fMockColorType = colorType;
+}
+
+GrColorType GrBackendFormat::asMockColorType() const {
+ if (this->isValid() && GrBackendApi::kMock == fBackend) {
+ return fMockColorType;
+ }
+ return GrColorType::kUnknown;
+}
+
+GrBackendFormat GrBackendFormat::makeTexture2D() const {
+ GrBackendFormat copy = *this;
+ if (const GrVkYcbcrConversionInfo* ycbcrInfo = this->getVkYcbcrConversionInfo()) {
+ if (ycbcrInfo->isValid()) {
+ // If we have a ycbcr we remove it from the backend format and set the VkFormat to
+ // R8G8B8A8_UNORM
+ SkASSERT(copy.fBackend == GrBackendApi::kVulkan);
+ copy.fVk.fYcbcrConversionInfo = GrVkYcbcrConversionInfo();
+ copy.fVk.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
+ }
+ }
+ copy.fTextureType = GrTextureType::k2D;
+ return copy;
+}
+
+bool GrBackendFormat::operator==(const GrBackendFormat& that) const {
+ // Invalid GrBackendFormats are never equal to anything.
+ if (!fValid || !that.fValid) {
+ return false;
+ }
+
+ if (fBackend != that.fBackend) {
+ return false;
+ }
+
+ switch (fBackend) {
+ case GrBackendApi::kOpenGL:
+ return fGLFormat == that.fGLFormat;
+ case GrBackendApi::kVulkan:
+#ifdef SK_VULKAN
+ return fVk.fFormat == that.fVk.fFormat &&
+ fVk.fYcbcrConversionInfo == that.fVk.fYcbcrConversionInfo;
+#endif
+ break;
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ return fMtlFormat == that.fMtlFormat;
+#endif
+ break;
+ case GrBackendApi::kDawn:
+#ifdef SK_DAWN
+ return fDawnFormat == that.fDawnFormat;
+#endif
+ break;
+ case GrBackendApi::kMock:
+ return fMockColorType == that.fMockColorType;
+ default:
+ SK_ABORT("Unknown GrBackend");
+ }
+ return false;
+}
+
+#if GR_TEST_UTILS
+#include "include/core/SkString.h"
+#include "src/gpu/GrTestUtils.h"
+
+#ifdef SK_GL
+#include "src/gpu/gl/GrGLUtil.h"
+#endif
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkUtil.h"
+#endif
+
+SkString GrBackendFormat::toStr() const {
+ SkString str;
+
+ if (!fValid) {
+ str.append("invalid");
+ return str;
+ }
+
+ str.appendf("%s-", GrBackendApiToStr(fBackend));
+
+ switch (fBackend) {
+ case GrBackendApi::kOpenGL:
+#ifdef SK_GL
+ str.append(GrGLFormatToStr(fGLFormat));
+#endif
+ break;
+ case GrBackendApi::kVulkan:
+#ifdef SK_VULKAN
+ str.append(GrVkFormatToStr(fVk.fFormat));
+#endif
+ break;
+ case GrBackendApi::kMetal:
+#ifdef SK_METAL
+ str.append(GrMtlFormatToStr(fMtlFormat));
+#endif
+ break;
+ case GrBackendApi::kDawn:
+#ifdef SK_DAWN
+ str.append(GrDawnFormatToStr(fDawnFormat));
+#endif
+ break;
+ case GrBackendApi::kMock:
+ str.append(GrColorTypeToStr(fMockColorType));
+ break;
+ }
+
+ return str;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DAWN
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ const GrDawnImageInfo& dawnInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fMipMapped(GrMipMapped(dawnInfo.fLevelCount > 1))
+ , fBackend(GrBackendApi::kDawn)
+ , fDawnInfo(dawnInfo) {}
+#endif
+
+GrBackendTexture::GrBackendTexture(int width, int height, const GrVkImageInfo& vkInfo)
+#ifdef SK_VULKAN
+ : GrBackendTexture(width, height, vkInfo,
+ sk_sp<GrVkImageLayout>(new GrVkImageLayout(vkInfo.fImageLayout))) {}
+#else
+ : fIsValid(false) {}
+#endif
+
+#ifdef SK_GL
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ GrMipMapped mipMapped,
+ const GrGLTextureInfo glInfo,
+ sk_sp<GrGLTextureParameters> params)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fMipMapped(mipMapped)
+ , fBackend(GrBackendApi::kOpenGL)
+ , fGLInfo(glInfo, params.release()) {}
+
+sk_sp<GrGLTextureParameters> GrBackendTexture::getGLTextureParams() const {
+ if (fBackend != GrBackendApi::kOpenGL) {
+ return nullptr;
+ }
+ return fGLInfo.refParameters();
+}
+#endif
+
+#ifdef SK_VULKAN
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<GrVkImageLayout> layout)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fMipMapped(GrMipMapped(vkInfo.fLevelCount > 1))
+ , fBackend(GrBackendApi::kVulkan)
+ , fVkInfo(vkInfo, layout.release()) {}
+#endif
+
+#ifdef SK_METAL
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ GrMipMapped mipMapped,
+ const GrMtlTextureInfo& mtlInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fMipMapped(mipMapped)
+ , fBackend(GrBackendApi::kMetal)
+ , fMtlInfo(mtlInfo) {}
+#endif
+
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ GrMipMapped mipMapped,
+ const GrGLTextureInfo& glInfo)
+ : GrBackendTexture(width, height, mipMapped, glInfo, sk_make_sp<GrGLTextureParameters>()) {
+ // Make no assumptions about client's texture's parameters.
+ this->glTextureParametersModified();
+}
+
+GrBackendTexture::GrBackendTexture(int width,
+ int height,
+ GrMipMapped mipMapped,
+ const GrMockTextureInfo& mockInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fMipMapped(mipMapped)
+ , fBackend(GrBackendApi::kMock)
+ , fMockInfo(mockInfo) {}
+
+GrBackendTexture::~GrBackendTexture() {
+ this->cleanup();
+}
+
+void GrBackendTexture::cleanup() {
+#ifdef SK_GL
+ if (this->isValid() && GrBackendApi::kOpenGL == fBackend) {
+ fGLInfo.cleanup();
+ }
+#endif
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ fVkInfo.cleanup();
+ }
+#endif
+}
+
+GrBackendTexture::GrBackendTexture(const GrBackendTexture& that) : fIsValid(false) {
+ *this = that;
+}
+
+GrBackendTexture& GrBackendTexture::operator=(const GrBackendTexture& that) {
+ if (!that.isValid()) {
+ this->cleanup();
+ fIsValid = false;
+ return *this;
+ } else if (fIsValid && this->fBackend != that.fBackend) {
+ this->cleanup();
+ fIsValid = false;
+ }
+ fWidth = that.fWidth;
+ fHeight = that.fHeight;
+ fMipMapped = that.fMipMapped;
+ fBackend = that.fBackend;
+
+ switch (that.fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ fGLInfo.assign(that.fGLInfo, this->isValid());
+ break;
+#endif
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan:
+ fVkInfo.assign(that.fVkInfo, this->isValid());
+ break;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ fMtlInfo = that.fMtlInfo;
+ break;
+#endif
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn:
+ fDawnInfo = that.fDawnInfo;
+ break;
+#endif
+ case GrBackendApi::kMock:
+ fMockInfo = that.fMockInfo;
+ break;
+ default:
+ SK_ABORT("Unknown GrBackend");
+ }
+ fIsValid = true;
+ return *this;
+}
+
+#ifdef SK_DAWN
+bool GrBackendTexture::getDawnImageInfo(GrDawnImageInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kDawn == fBackend) {
+ *outInfo = fDawnInfo;
+ return true;
+ }
+ return false;
+}
+#endif
+
+bool GrBackendTexture::getVkImageInfo(GrVkImageInfo* outInfo) const {
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *outInfo = fVkInfo.snapImageInfo();
+ return true;
+ }
+#endif
+ return false;
+}
+
+void GrBackendTexture::setVkImageLayout(VkImageLayout layout) {
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ fVkInfo.setImageLayout(layout);
+ }
+#endif
+}
+
+#ifdef SK_VULKAN
+sk_sp<GrVkImageLayout> GrBackendTexture::getGrVkImageLayout() const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ return fVkInfo.getGrVkImageLayout();
+ }
+ return nullptr;
+}
+#endif
+
+#ifdef SK_METAL
+bool GrBackendTexture::getMtlTextureInfo(GrMtlTextureInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kMetal == fBackend) {
+ *outInfo = fMtlInfo;
+ return true;
+ }
+ return false;
+}
+#endif
+
+bool GrBackendTexture::getGLTextureInfo(GrGLTextureInfo* outInfo) const {
+#ifdef SK_GL
+ if (this->isValid() && GrBackendApi::kOpenGL == fBackend) {
+ *outInfo = fGLInfo.info();
+ return true;
+ } else if (this->isValid() && GrBackendApi::kMock == fBackend) {
+ // Hack! This allows some blink unit tests to work when using the Mock GrContext.
+ // Specifically, tests that rely on CanvasResourceProviderTextureGpuMemoryBuffer.
+ // If that code ever goes away (or ideally becomes backend-agnostic), this can go away.
+ *outInfo = GrGLTextureInfo{ GR_GL_TEXTURE_2D,
+ static_cast<GrGLuint>(fMockInfo.fID),
+ GR_GL_RGBA8 };
+ return true;
+ }
+#endif
+ return false;
+}
+
+void GrBackendTexture::glTextureParametersModified() {
+#ifdef SK_GL
+ if (this->isValid() && fBackend == GrBackendApi::kOpenGL) {
+ fGLInfo.parameters()->invalidate();
+ }
+#endif
+}
+
+bool GrBackendTexture::getMockTextureInfo(GrMockTextureInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kMock == fBackend) {
+ *outInfo = fMockInfo;
+ return true;
+ }
+ return false;
+}
+
+bool GrBackendTexture::isProtected() const {
+ if (!this->isValid() || this->backend() != GrBackendApi::kVulkan) {
+ return false;
+ }
+ return fVkInfo.isProtected();
+}
+
+bool GrBackendTexture::isSameTexture(const GrBackendTexture& that) {
+ if (!this->isValid() || !that.isValid()) {
+ return false;
+ }
+ if (fBackend != that.fBackend) {
+ return false;
+ }
+ switch (fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ return fGLInfo.info().fID == that.fGLInfo.info().fID;
+#endif
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan:
+ return fVkInfo.snapImageInfo().fImage == that.fVkInfo.snapImageInfo().fImage;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ return this->fMtlInfo.fTexture == that.fMtlInfo.fTexture;
+#endif
+ case GrBackendApi::kMock:
+ return fMockInfo.fID == that.fMockInfo.fID;
+ default:
+ return false;
+ }
+}
+
+GrBackendFormat GrBackendTexture::getBackendFormat() const {
+ if (!this->isValid()) {
+ return GrBackendFormat();
+ }
+ switch (fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ return GrBackendFormat::MakeGL(fGLInfo.info().fFormat, fGLInfo.info().fTarget);
+#endif
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan: {
+ auto info = fVkInfo.snapImageInfo();
+ if (info.fYcbcrConversionInfo.isValid()) {
+ SkASSERT(info.fFormat == info.fYcbcrConversionInfo.fFormat);
+ return GrBackendFormat::MakeVk(info.fYcbcrConversionInfo);
+ }
+ return GrBackendFormat::MakeVk(info.fFormat);
+ }
+#endif
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn: {
+ return GrBackendFormat::MakeDawn(fDawnInfo.fFormat);
+ }
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal: {
+ GrMtlTextureInfo mtlInfo;
+ SkAssertResult(this->getMtlTextureInfo(&mtlInfo));
+ return GrBackendFormat::MakeMtl(GrGetMTLPixelFormatFromMtlTextureInfo(mtlInfo));
+ }
+#endif
+ case GrBackendApi::kMock:
+ return fMockInfo.getBackendFormat();
+ default:
+ return GrBackendFormat();
+ }
+}
+
+#if GR_TEST_UTILS
+bool GrBackendTexture::TestingOnly_Equals(const GrBackendTexture& t0, const GrBackendTexture& t1) {
+ if (!t0.isValid() || !t1.isValid()) {
+ return false; // two invalid backend textures are not considered equal
+ }
+
+ if (t0.fWidth != t1.fWidth ||
+ t0.fHeight != t1.fHeight ||
+ t0.fMipMapped != t1.fMipMapped ||
+ t0.fBackend != t1.fBackend) {
+ return false;
+ }
+
+ switch (t0.fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ return t0.fGLInfo.info() == t1.fGLInfo.info();
+#endif
+ case GrBackendApi::kMock:
+ return t0.fMockInfo == t1.fMockInfo;
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan:
+ return t0.fVkInfo == t1.fVkInfo;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ return t0.fMtlInfo == t1.fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn:
+ return t0.fDawnInfo == t1.fDawnInfo;
+#endif
+ default:
+ return false;
+ }
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DAWN
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrDawnImageInfo& dawnInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fSampleCnt(sampleCnt)
+ , fStencilBits(stencilBits)
+ , fBackend(GrBackendApi::kDawn)
+ , fDawnInfo(dawnInfo) {}
+#endif
+
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrVkImageInfo& vkInfo)
+ : GrBackendRenderTarget(width, height, sampleCnt, vkInfo) {
+ // This is a deprecated constructor that takes a bogus stencil bits.
+ SkASSERT(0 == stencilBits);
+}
+
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrVkImageInfo& vkInfo)
+#ifdef SK_VULKAN
+ : GrBackendRenderTarget(width, height, sampleCnt, vkInfo,
+ sk_sp<GrVkImageLayout>(new GrVkImageLayout(vkInfo.fImageLayout))) {}
+#else
+ : fIsValid(false) {}
+#endif
+
+#ifdef SK_VULKAN
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<GrVkImageLayout> layout)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fSampleCnt(SkTMax(1, sampleCnt))
+ , fStencilBits(0) // We always create stencil buffers internally for vulkan
+ , fBackend(GrBackendApi::kVulkan)
+ , fVkInfo(vkInfo, layout.release()) {}
+#endif
+
+#ifdef SK_METAL
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrMtlTextureInfo& mtlInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fSampleCnt(SkTMax(1, sampleCnt))
+ , fStencilBits(0)
+ , fBackend(GrBackendApi::kMetal)
+ , fMtlInfo(mtlInfo) {}
+#endif
+
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrGLFramebufferInfo& glInfo)
+ : fWidth(width)
+ , fHeight(height)
+ , fSampleCnt(SkTMax(1, sampleCnt))
+ , fStencilBits(stencilBits)
+ , fBackend(GrBackendApi::kOpenGL)
+ , fGLInfo(glInfo) {
+ fIsValid = SkToBool(glInfo.fFormat); // the glInfo must have a valid format
+}
+
+GrBackendRenderTarget::GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrMockRenderTargetInfo& mockInfo)
+ : fIsValid(true)
+ , fWidth(width)
+ , fHeight(height)
+ , fSampleCnt(SkTMax(1, sampleCnt))
+ , fStencilBits(stencilBits)
+ , fMockInfo(mockInfo) {}
+
+GrBackendRenderTarget::~GrBackendRenderTarget() {
+ this->cleanup();
+}
+
+void GrBackendRenderTarget::cleanup() {
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ fVkInfo.cleanup();
+ }
+#endif
+}
+
+GrBackendRenderTarget::GrBackendRenderTarget(const GrBackendRenderTarget& that) : fIsValid(false) {
+ *this = that;
+}
+
+GrBackendRenderTarget& GrBackendRenderTarget::operator=(const GrBackendRenderTarget& that) {
+ if (!that.isValid()) {
+ this->cleanup();
+ fIsValid = false;
+ return *this;
+ } else if (fIsValid && this->fBackend != that.fBackend) {
+ this->cleanup();
+ fIsValid = false;
+ }
+ fWidth = that.fWidth;
+ fHeight = that.fHeight;
+ fSampleCnt = that.fSampleCnt;
+ fStencilBits = that.fStencilBits;
+ fBackend = that.fBackend;
+
+ switch (that.fBackend) {
+ case GrBackendApi::kOpenGL:
+ fGLInfo = that.fGLInfo;
+ break;
+ case GrBackendApi::kVulkan:
+#ifdef SK_VULKAN
+ fVkInfo.assign(that.fVkInfo, this->isValid());
+#endif
+ break;
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn:
+ fDawnInfo = that.fDawnInfo;
+ break;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ fMtlInfo = that.fMtlInfo;
+ break;
+#endif
+ case GrBackendApi::kMock:
+ fMockInfo = that.fMockInfo;
+ break;
+ default:
+ SK_ABORT("Unknown GrBackend");
+ }
+ fIsValid = that.fIsValid;
+ return *this;
+}
+
+#ifdef SK_DAWN
+bool GrBackendRenderTarget::getDawnImageInfo(GrDawnImageInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kDawn == fBackend) {
+ *outInfo = fDawnInfo;
+ return true;
+ }
+ return false;
+}
+#endif
+
+bool GrBackendRenderTarget::getVkImageInfo(GrVkImageInfo* outInfo) const {
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *outInfo = fVkInfo.snapImageInfo();
+ return true;
+ }
+#endif
+ return false;
+}
+
+void GrBackendRenderTarget::setVkImageLayout(VkImageLayout layout) {
+#ifdef SK_VULKAN
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ fVkInfo.setImageLayout(layout);
+ }
+#endif
+}
+
+#ifdef SK_VULKAN
+sk_sp<GrVkImageLayout> GrBackendRenderTarget::getGrVkImageLayout() const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ return fVkInfo.getGrVkImageLayout();
+ }
+ return nullptr;
+}
+#endif
+
+#ifdef SK_METAL
+bool GrBackendRenderTarget::getMtlTextureInfo(GrMtlTextureInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kMetal == fBackend) {
+ *outInfo = fMtlInfo;
+ return true;
+ }
+ return false;
+}
+#endif
+
+bool GrBackendRenderTarget::getGLFramebufferInfo(GrGLFramebufferInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kOpenGL == fBackend) {
+ *outInfo = fGLInfo;
+ return true;
+ }
+ return false;
+}
+
+GrBackendFormat GrBackendRenderTarget::getBackendFormat() const {
+ if (!this->isValid()) {
+ return GrBackendFormat();
+ }
+ switch (fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ return GrBackendFormat::MakeGL(fGLInfo.fFormat, GR_GL_TEXTURE_NONE);
+#endif
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan: {
+ auto info = fVkInfo.snapImageInfo();
+ if (info.fYcbcrConversionInfo.isValid()) {
+ SkASSERT(info.fFormat == info.fYcbcrConversionInfo.fFormat);
+ return GrBackendFormat::MakeVk(info.fYcbcrConversionInfo);
+ }
+ return GrBackendFormat::MakeVk(info.fFormat);
+ }
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal: {
+ GrMtlTextureInfo mtlInfo;
+ SkAssertResult(this->getMtlTextureInfo(&mtlInfo));
+ return GrBackendFormat::MakeMtl(GrGetMTLPixelFormatFromMtlTextureInfo(mtlInfo));
+ }
+#endif
+ case GrBackendApi::kMock:
+ return fMockInfo.getBackendFormat();
+ default:
+ return GrBackendFormat();
+ }
+}
+
+bool GrBackendRenderTarget::getMockRenderTargetInfo(GrMockRenderTargetInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kMock == fBackend) {
+ *outInfo = fMockInfo;
+ return true;
+ }
+ return false;
+}
+
+bool GrBackendRenderTarget::isProtected() const {
+ if (!this->isValid() || this->backend() != GrBackendApi::kVulkan) {
+ return false;
+ }
+ return fVkInfo.isProtected();
+}
+
+#if GR_TEST_UTILS
+bool GrBackendRenderTarget::TestingOnly_Equals(const GrBackendRenderTarget& r0,
+ const GrBackendRenderTarget& r1) {
+ if (!r0.isValid() || !r1.isValid()) {
+ return false; // two invalid backend rendertargets are not considered equal
+ }
+
+ if (r0.fWidth != r1.fWidth ||
+ r0.fHeight != r1.fHeight ||
+ r0.fSampleCnt != r1.fSampleCnt ||
+ r0.fStencilBits != r1.fStencilBits ||
+ r0.fBackend != r1.fBackend) {
+ return false;
+ }
+
+ switch (r0.fBackend) {
+#ifdef SK_GL
+ case GrBackendApi::kOpenGL:
+ return r0.fGLInfo == r1.fGLInfo;
+#endif
+ case GrBackendApi::kMock:
+ return r0.fMockInfo == r1.fMockInfo;
+#ifdef SK_VULKAN
+ case GrBackendApi::kVulkan:
+ return r0.fVkInfo == r1.fVkInfo;
+#endif
+#ifdef SK_METAL
+ case GrBackendApi::kMetal:
+ return r0.fMtlInfo == r1.fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ case GrBackendApi::kDawn:
+ return r0.fDawnInfo == r1.fDawnInfo;
+#endif
+ default:
+ return false;
+ }
+
+ SkASSERT(0);
+ return false;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.cpp b/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.cpp
new file mode 100644
index 0000000000..bd4cacb529
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMessageBus.h"
+#include "src/gpu/GrBackendTextureImageGenerator.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrResourceProviderPriv.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/gl/GrGLTexture.h"
+
+GrBackendTextureImageGenerator::RefHelper::RefHelper(GrTexture* texture, uint32_t owningContextID)
+ : fOriginalTexture(texture)
+ , fOwningContextID(owningContextID)
+ , fBorrowingContextReleaseProc(nullptr)
+ , fBorrowingContextID(SK_InvalidGenID) {}
+
+GrBackendTextureImageGenerator::RefHelper::~RefHelper() {
+ SkASSERT(fBorrowingContextID == SK_InvalidUniqueID);
+
+ // Generator has been freed, and no one is borrowing the texture. Notify the original cache
+ // that it can free the last ref, so it happens on the correct thread.
+ GrTextureFreedMessage msg { fOriginalTexture, fOwningContextID };
+ SkMessageBus<GrTextureFreedMessage>::Post(msg);
+}
+
+std::unique_ptr<SkImageGenerator>
+GrBackendTextureImageGenerator::Make(sk_sp<GrTexture> texture, GrSurfaceOrigin origin,
+ sk_sp<GrSemaphore> semaphore, SkColorType colorType,
+ SkAlphaType alphaType, sk_sp<SkColorSpace> colorSpace) {
+ GrContext* context = texture->getContext();
+
+ // Attach our texture to this context's resource cache. This ensures that deletion will happen
+ // in the correct thread/context. This adds the only ref to the texture that will persist from
+ // this point. That ref will be released when the generator's RefHelper is freed.
+ context->priv().getResourceCache()->insertDelayedTextureUnref(texture.get());
+
+ GrBackendTexture backendTexture = texture->getBackendTexture();
+
+ if (!context->priv().caps()->areColorTypeAndFormatCompatible(
+ SkColorTypeToGrColorType(colorType), backendTexture.getBackendFormat())) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType,
+ std::move(colorSpace));
+ return std::unique_ptr<SkImageGenerator>(new GrBackendTextureImageGenerator(
+ info, texture.get(), origin, context->priv().contextID(),
+ std::move(semaphore), backendTexture));
+}
+
+GrBackendTextureImageGenerator::GrBackendTextureImageGenerator(const SkImageInfo& info,
+ GrTexture* texture,
+ GrSurfaceOrigin origin,
+ uint32_t owningContextID,
+ sk_sp<GrSemaphore> semaphore,
+ const GrBackendTexture& backendTex)
+ : INHERITED(info)
+ , fRefHelper(new RefHelper(texture, owningContextID))
+ , fSemaphore(std::move(semaphore))
+ , fBackendTexture(backendTex)
+ , fSurfaceOrigin(origin) {}
+
+GrBackendTextureImageGenerator::~GrBackendTextureImageGenerator() {
+ fRefHelper->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void GrBackendTextureImageGenerator::ReleaseRefHelper_TextureReleaseProc(void* ctx) {
+ RefHelper* refHelper = static_cast<RefHelper*>(ctx);
+ SkASSERT(refHelper);
+
+ refHelper->fBorrowingContextReleaseProc = nullptr;
+ refHelper->fBorrowingContextID = SK_InvalidGenID;
+ refHelper->unref();
+}
+
+sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(
+ GrRecordingContext* context, const SkImageInfo& info,
+ const SkIPoint& origin, bool willNeedMipMaps) {
+ SkASSERT(context);
+
+ if (context->backend() != fBackendTexture.backend()) {
+ return nullptr;
+ }
+ if (info.colorType() != this->getInfo().colorType()) {
+ return nullptr;
+ }
+
+ auto proxyProvider = context->priv().proxyProvider();
+ const GrCaps* caps = context->priv().caps();
+
+ fBorrowingMutex.acquire();
+ sk_sp<GrRefCntedCallback> releaseProcHelper;
+ if (SK_InvalidGenID != fRefHelper->fBorrowingContextID) {
+ if (fRefHelper->fBorrowingContextID != context->priv().contextID()) {
+ fBorrowingMutex.release();
+ return nullptr;
+ } else {
+ SkASSERT(fRefHelper->fBorrowingContextReleaseProc);
+ // Ref the release proc to be held by the proxy we make below
+ releaseProcHelper = sk_ref_sp(fRefHelper->fBorrowingContextReleaseProc);
+ }
+ } else {
+ SkASSERT(!fRefHelper->fBorrowingContextReleaseProc);
+ // The ref we add to fRefHelper here will be passed into and owned by the
+ // GrRefCntedCallback.
+ fRefHelper->ref();
+ releaseProcHelper.reset(
+ new GrRefCntedCallback(ReleaseRefHelper_TextureReleaseProc, fRefHelper));
+ fRefHelper->fBorrowingContextReleaseProc = releaseProcHelper.get();
+ }
+ fRefHelper->fBorrowingContextID = context->priv().contextID();
+ if (!fRefHelper->fBorrowedTextureKey.isValid()) {
+ static const auto kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(&fRefHelper->fBorrowedTextureKey, kDomain, 1);
+ builder[0] = this->uniqueID();
+ }
+ fBorrowingMutex.release();
+
+ SkASSERT(fRefHelper->fBorrowingContextID == context->priv().contextID());
+
+ GrBackendFormat backendFormat = fBackendTexture.getBackendFormat();
+ SkASSERT(backendFormat.isValid());
+
+ GrColorType grColorType = SkColorTypeToGrColorType(info.colorType());
+
+ GrPixelConfig config = caps->getConfigFromBackendFormat(backendFormat, grColorType);
+ if (kUnknown_GrPixelConfig == config) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = fBackendTexture.width();
+ desc.fHeight = fBackendTexture.height();
+ desc.fConfig = config;
+ GrMipMapped mipMapped = fBackendTexture.hasMipMaps() ? GrMipMapped::kYes : GrMipMapped::kNo;
+
+ // Ganesh assumes that, when wrapping a mipmapped backend texture from a client, that its
+ // mipmaps are fully fleshed out.
+ GrMipMapsStatus mipMapsStatus = fBackendTexture.hasMipMaps()
+ ? GrMipMapsStatus::kValid : GrMipMapsStatus::kNotAllocated;
+
+ // Must make copies of member variables to capture in the lambda since this image generator may
+ // be deleted before we actually execute the lambda.
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createLazyProxy(
+ [refHelper = fRefHelper, releaseProcHelper, semaphore = fSemaphore,
+ backendTexture = fBackendTexture, grColorType](
+ GrResourceProvider* resourceProvider) -> GrSurfaceProxy::LazyCallbackResult {
+ if (semaphore) {
+ resourceProvider->priv().gpu()->waitSemaphore(semaphore);
+ }
+
+ // If a client re-draws the same image multiple times, the texture we return
+ // will be cached and re-used. If they draw a subset, though, we may be
+ // re-called. In that case, we want to re-use the borrowed texture we've
+ // previously created.
+ sk_sp<GrTexture> tex;
+ SkASSERT(refHelper->fBorrowedTextureKey.isValid());
+ auto surf = resourceProvider->findByUniqueKey<GrSurface>(
+ refHelper->fBorrowedTextureKey);
+ if (surf) {
+ SkASSERT(surf->asTexture());
+ tex = sk_ref_sp(surf->asTexture());
+ } else {
+ // We just gained access to the texture. If we're on the original context, we
+ // could use the original texture, but we'd have no way of detecting that it's
+ // no longer in-use. So we always make a wrapped copy, where the release proc
+ // informs us that the context is done with it. This is unfortunate - we'll have
+ // two texture objects referencing the same GPU object. However, no client can
+ // ever see the original texture, so this should be safe.
+ // We make the texture uncacheable so that the release proc is called ASAP.
+ tex = resourceProvider->wrapBackendTexture(
+ backendTexture, grColorType, kBorrow_GrWrapOwnership,
+ GrWrapCacheable::kNo, kRead_GrIOType);
+ if (!tex) {
+ return {};
+ }
+ tex->setRelease(releaseProcHelper);
+ tex->resourcePriv().setUniqueKey(refHelper->fBorrowedTextureKey);
+ }
+ // We use keys to avoid re-wrapping the GrBackendTexture in a GrTexture. This is
+ // unrelated to the whatever SkImage key may be assigned to the proxy.
+ return {std::move(tex), true, GrSurfaceProxy::LazyInstantiationKeyMode::kUnsynced};
+ },
+ backendFormat, desc, GrRenderable::kNo, 1, fSurfaceOrigin, mipMapped, mipMapsStatus,
+ GrInternalSurfaceFlags::kReadOnly, SkBackingFit::kExact, SkBudgeted::kNo,
+ GrProtected::kNo, GrSurfaceProxy::UseAllocator::kYes);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ if (0 == origin.fX && 0 == origin.fY &&
+ info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height() &&
+ (!willNeedMipMaps || GrMipMapped::kYes == proxy->mipMapped())) {
+ // If the caller wants the entire texture and we have the correct mip support, we're done
+ return proxy;
+ } else {
+ // Otherwise, make a copy of the requested subset. Make sure our temporary is renderable,
+ // because Vulkan will want to do the copy as a draw. All other copies would require a
+ // layout change in Vulkan and we do not change the layout of borrowed images.
+ GrMipMapped mipMapped = willNeedMipMaps ? GrMipMapped::kYes : GrMipMapped::kNo;
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
+
+ return GrSurfaceProxy::Copy(context, proxy.get(), grColorType, mipMapped, subset,
+ SkBackingFit::kExact, SkBudgeted::kYes);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.h b/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.h
new file mode 100644
index 0000000000..8a82c5c88b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBackendTextureImageGenerator.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrBackendTextureImageGenerator_DEFINED
+#define GrBackendTextureImageGenerator_DEFINED
+
+#include "include/core/SkImageGenerator.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/private/GrResourceKey.h"
+#include "include/private/SkMutex.h"
+
+class GrSemaphore;
+
+/*
+ * This ImageGenerator is used to wrap a texture in one GrContext and can then be used as a source
+ * in another GrContext. It holds onto a semaphore which the producing GrContext will signal and the
+ * consuming GrContext will wait on before using the texture. Only one GrContext can ever be used
+ * as a consumer (this is mostly because Vulkan can't allow multiple things to wait on the same
+ * semaphore).
+ *
+ * In practice, this capability is used by clients to create backend-specific texture resources in
+ * one thread (with, say, GrContext-A) and then ship them over to another GrContext (say,
+ * GrContext-B) which will then use the texture as a source for draws. GrContext-A uses the
+ * semaphore to notify GrContext-B when the shared texture is ready to use.
+ */
+class GrBackendTextureImageGenerator : public SkImageGenerator {
+public:
+ static std::unique_ptr<SkImageGenerator> Make(sk_sp<GrTexture>, GrSurfaceOrigin,
+ sk_sp<GrSemaphore>, SkColorType,
+ SkAlphaType, sk_sp<SkColorSpace>);
+
+ ~GrBackendTextureImageGenerator() override;
+
+protected:
+ // NOTE: We would like to validate that the owning context hasn't been abandoned, but we can't
+ // do that safely (we might be on another thread). So assume everything is fine.
+ bool onIsValid(GrContext*) const override { return true; }
+
+ TexGenType onCanGenerateTexture() const override { return TexGenType::kCheap; }
+ sk_sp<GrTextureProxy> onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ const SkIPoint&, bool willNeedMipMaps) override;
+
+private:
+ GrBackendTextureImageGenerator(const SkImageInfo& info, GrTexture*, GrSurfaceOrigin,
+ uint32_t owningContextID, sk_sp<GrSemaphore>,
+ const GrBackendTexture&);
+
+ static void ReleaseRefHelper_TextureReleaseProc(void* ctx);
+
+ class RefHelper : public SkNVRefCnt<RefHelper> {
+ public:
+ RefHelper(GrTexture*, uint32_t owningContextID);
+
+ ~RefHelper();
+
+ GrTexture* fOriginalTexture;
+ uint32_t fOwningContextID;
+
+ // We use this key so that we don't rewrap the GrBackendTexture in a GrTexture for each
+ // proxy created from this generator for a particular borrowing context.
+ GrUniqueKey fBorrowedTextureKey;
+ // There is no ref associated with this pointer. We rely on our atomic bookkeeping with the
+ // context ID to know when this pointer is valid and safe to use. This is used to make sure
+ // all uses of the wrapped texture are finished on the borrowing context before we open
+ // this back up to other contexts. In general a ref to this release proc is owned by all
+ // proxies and gpu uses of the backend texture.
+ GrRefCntedCallback* fBorrowingContextReleaseProc;
+ uint32_t fBorrowingContextID;
+ };
+
+ RefHelper* fRefHelper;
+ // This Mutex is used to guard the borrowing of the texture to one GrContext at a time as well
+ // as the creation of the fBorrowingContextReleaseProc. The latter happening if two threads with
+ // the same consuming GrContext try to generate a texture at the same time.
+ SkMutex fBorrowingMutex;
+
+ sk_sp<GrSemaphore> fSemaphore;
+
+ GrBackendTexture fBackendTexture;
+ GrSurfaceOrigin fSurfaceOrigin;
+
+ typedef SkImageGenerator INHERITED;
+};
+#endif // GrBackendTextureImageGenerator_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrBaseContextPriv.h b/gfx/skia/skia/src/gpu/GrBaseContextPriv.h
new file mode 100644
index 0000000000..c65aacbb8b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBaseContextPriv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBaseContextPriv_DEFINED
+#define GrBaseContextPriv_DEFINED
+
+#include "include/private/GrContext_Base.h"
+
+/** Class that exposes methods on GrContext_Base that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrContext_Base. It should never have
+ additional data members or virtual methods. */
+class GrBaseContextPriv {
+public:
+ // from GrContext_Base
+ uint32_t contextID() const { return fContext->contextID(); }
+
+ bool matches(GrContext_Base* candidate) const { return fContext->matches(candidate); }
+
+ const GrContextOptions& options() const { return fContext->options(); }
+
+ const GrCaps* caps() const { return fContext->caps(); }
+ sk_sp<const GrCaps> refCaps() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ GrImageContext* asImageContext() { return fContext->asImageContext(); }
+ GrRecordingContext* asRecordingContext() { return fContext->asRecordingContext(); }
+ GrContext* asDirectContext() { return fContext->asDirectContext(); }
+
+private:
+ explicit GrBaseContextPriv(GrContext_Base* context) : fContext(context) {}
+ GrBaseContextPriv(const GrBaseContextPriv&); // unimpl
+ GrBaseContextPriv& operator=(const GrBaseContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrBaseContextPriv* operator&() const;
+ GrBaseContextPriv* operator&();
+
+ GrContext_Base* fContext;
+
+ friend class GrContext_Base; // to construct/copy this type.
+};
+
+inline GrBaseContextPriv GrContext_Base::priv() { return GrBaseContextPriv(this); }
+
+inline const GrBaseContextPriv GrContext_Base::priv () const {
+ return GrBaseContextPriv(const_cast<GrContext_Base*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.cpp b/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.cpp
new file mode 100644
index 0000000000..4ec2b537bb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrBitmapTextureMaker.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPixelRef.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/SkGr.h"
+
+GrBitmapTextureMaker::GrBitmapTextureMaker(GrRecordingContext* context, const SkBitmap& bitmap,
+ bool useDecal)
+ : INHERITED(context, bitmap.width(), bitmap.height(), bitmap.info().colorInfo(), useDecal)
+ , fBitmap(bitmap) {
+ if (!bitmap.isVolatile()) {
+ SkIPoint origin = bitmap.pixelRefOrigin();
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, bitmap.width(),
+ bitmap.height());
+ GrMakeKeyFromImageID(&fOriginalKey, bitmap.pixelRef()->getGenerationID(), subset);
+ }
+}
+
+sk_sp<GrTextureProxy> GrBitmapTextureMaker::refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) {
+ if (AllowedTexGenType::kCheap == onlyIfFast) {
+ return nullptr;
+ }
+
+ GrProxyProvider* proxyProvider = this->context()->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy;
+
+ if (fOriginalKey.isValid()) {
+ auto colorType = SkColorTypeToGrColorType(fBitmap.colorType());
+ proxy = proxyProvider->findOrCreateProxyByUniqueKey(fOriginalKey, colorType,
+ kTopLeft_GrSurfaceOrigin);
+ if (proxy && (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped())) {
+ return proxy;
+ }
+ }
+
+ if (!proxy) {
+ proxy = proxyProvider->createProxyFromBitmap(fBitmap, willBeMipped ? GrMipMapped::kYes
+ : GrMipMapped::kNo);
+ if (proxy) {
+ if (fOriginalKey.isValid()) {
+ proxyProvider->assignUniqueKeyToProxy(fOriginalKey, proxy.get());
+ }
+ if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) {
+ SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
+ if (fOriginalKey.isValid()) {
+ GrInstallBitmapUniqueKeyInvalidator(
+ fOriginalKey, proxyProvider->contextID(), fBitmap.pixelRef());
+ }
+ return proxy;
+ }
+ }
+ }
+
+ if (proxy) {
+ SkASSERT(willBeMipped);
+ SkASSERT(GrMipMapped::kNo == proxy->mipMapped());
+ // We need a mipped proxy, but we either found a proxy earlier that wasn't mipped or
+ // generated a non mipped proxy. Thus we generate a new mipped surface and copy the original
+ // proxy into the base layer. We will then let the gpu generate the rest of the mips.
+ GrColorType srcColorType = SkColorTypeToGrColorType(fBitmap.colorType());
+ if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(this->context(), proxy.get(),
+ srcColorType)) {
+ SkASSERT(mippedProxy->origin() == kTopLeft_GrSurfaceOrigin);
+ if (fOriginalKey.isValid()) {
+ // In this case we are stealing the key from the original proxy which should only
+ // happen when we have just generated mipmaps for an originally unmipped
+ // proxy/texture. This means that all future uses of the key will access the
+ // mipmapped version. The texture backing the unmipped version will remain in the
+ // resource cache until the last texture proxy referencing it is deleted at which
+ // time it too will be deleted or recycled.
+ SkASSERT(proxy->getUniqueKey() == fOriginalKey);
+ proxyProvider->removeUniqueKeyFromProxy(proxy.get());
+ proxyProvider->assignUniqueKeyToProxy(fOriginalKey, mippedProxy.get());
+ GrInstallBitmapUniqueKeyInvalidator(fOriginalKey, proxyProvider->contextID(),
+ fBitmap.pixelRef());
+ }
+ return mippedProxy;
+ }
+ // We failed to make a mipped proxy with the base copied into it. This could have
+ // been from failure to make the proxy or failure to do the copy. Thus we will fall
+ // back to just using the non mipped proxy; See skbug.com/7094.
+ return proxy;
+ }
+ return nullptr;
+}
+
+void GrBitmapTextureMaker::makeCopyKey(const CopyParams& copyParams, GrUniqueKey* copyKey) {
+ // Destination color space is irrelevant - we always upload the bitmap's contents as-is
+ if (fOriginalKey.isValid()) {
+ MakeCopyKeyFromOrigKey(fOriginalKey, copyParams, copyKey);
+ }
+}
+
+void GrBitmapTextureMaker::didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) {
+ GrInstallBitmapUniqueKeyInvalidator(copyKey, contextUniqueID, fBitmap.pixelRef());
+}
diff --git a/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.h b/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.h
new file mode 100644
index 0000000000..9205b90a9e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBitmapTextureMaker.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBitmapTextureMaker_DEFINED
+#define GrBitmapTextureMaker_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "src/gpu/GrTextureMaker.h"
+
+/** This class manages the conversion of SW-backed bitmaps to GrTextures. If the input bitmap is
+ non-volatile the texture is cached using a key created from the pixels' image id and the
+ subset of the pixelref specified by the bitmap. */
+class GrBitmapTextureMaker : public GrTextureMaker {
+public:
+ GrBitmapTextureMaker(GrRecordingContext* context, const SkBitmap& bitmap,
+ bool useDecal = false);
+
+protected:
+ sk_sp<GrTextureProxy> refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) override;
+
+ void makeCopyKey(const CopyParams& copyParams, GrUniqueKey* copyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) override;
+
+private:
+ const SkBitmap fBitmap;
+ GrUniqueKey fOriginalKey;
+
+ typedef GrTextureMaker INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBlend.h b/gfx/skia/skia/src/gpu/GrBlend.h
new file mode 100644
index 0000000000..8b681e8153
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlend.h
@@ -0,0 +1,151 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBlend_DEFINED
+#define GrBlend_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Equations for alpha-blending.
+ */
+enum GrBlendEquation {
+ // Basic blend equations.
+ kAdd_GrBlendEquation, //<! Cs*S + Cd*D
+ kSubtract_GrBlendEquation, //<! Cs*S - Cd*D
+ kReverseSubtract_GrBlendEquation, //<! Cd*D - Cs*S
+
+ // Advanced blend equations. These are described in the SVG and PDF specs.
+ kScreen_GrBlendEquation,
+ kOverlay_GrBlendEquation,
+ kDarken_GrBlendEquation,
+ kLighten_GrBlendEquation,
+ kColorDodge_GrBlendEquation,
+ kColorBurn_GrBlendEquation,
+ kHardLight_GrBlendEquation,
+ kSoftLight_GrBlendEquation,
+ kDifference_GrBlendEquation,
+ kExclusion_GrBlendEquation,
+ kMultiply_GrBlendEquation,
+ kHSLHue_GrBlendEquation,
+ kHSLSaturation_GrBlendEquation,
+ kHSLColor_GrBlendEquation,
+ kHSLLuminosity_GrBlendEquation,
+
+ kIllegal_GrBlendEquation,
+
+ kFirstAdvancedGrBlendEquation = kScreen_GrBlendEquation,
+ kLast_GrBlendEquation = kIllegal_GrBlendEquation,
+};
+
+static const int kGrBlendEquationCnt = kLast_GrBlendEquation + 1;
+
+
+/**
+ * Coefficients for alpha-blending.
+ */
+enum GrBlendCoeff {
+ kZero_GrBlendCoeff, //<! 0
+ kOne_GrBlendCoeff, //<! 1
+ kSC_GrBlendCoeff, //<! src color
+ kISC_GrBlendCoeff, //<! one minus src color
+ kDC_GrBlendCoeff, //<! dst color
+ kIDC_GrBlendCoeff, //<! one minus dst color
+ kSA_GrBlendCoeff, //<! src alpha
+ kISA_GrBlendCoeff, //<! one minus src alpha
+ kDA_GrBlendCoeff, //<! dst alpha
+ kIDA_GrBlendCoeff, //<! one minus dst alpha
+ kConstC_GrBlendCoeff, //<! constant color
+ kIConstC_GrBlendCoeff, //<! one minus constant color
+ kConstA_GrBlendCoeff, //<! constant color alpha
+ kIConstA_GrBlendCoeff, //<! one minus constant color alpha
+ kS2C_GrBlendCoeff,
+ kIS2C_GrBlendCoeff,
+ kS2A_GrBlendCoeff,
+ kIS2A_GrBlendCoeff,
+
+ kIllegal_GrBlendCoeff,
+
+ kLast_GrBlendCoeff = kIllegal_GrBlendCoeff,
+};
+
+static const int kGrBlendCoeffCnt = kLast_GrBlendCoeff + 1;
+
+static constexpr bool GrBlendCoeffRefsSrc(const GrBlendCoeff coeff) {
+ return kSC_GrBlendCoeff == coeff || kISC_GrBlendCoeff == coeff || kSA_GrBlendCoeff == coeff ||
+ kISA_GrBlendCoeff == coeff;
+}
+
+static constexpr bool GrBlendCoeffRefsDst(const GrBlendCoeff coeff) {
+ return kDC_GrBlendCoeff == coeff || kIDC_GrBlendCoeff == coeff || kDA_GrBlendCoeff == coeff ||
+ kIDA_GrBlendCoeff == coeff;
+}
+
+static constexpr bool GrBlendCoeffRefsSrc2(const GrBlendCoeff coeff) {
+ return kS2C_GrBlendCoeff == coeff || kIS2C_GrBlendCoeff == coeff ||
+ kS2A_GrBlendCoeff == coeff || kIS2A_GrBlendCoeff == coeff;
+}
+
+static constexpr bool GrBlendCoeffsUseSrcColor(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) {
+ return kZero_GrBlendCoeff != srcCoeff || GrBlendCoeffRefsSrc(dstCoeff);
+}
+
+static constexpr bool GrBlendCoeffsUseDstColor(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) {
+ return GrBlendCoeffRefsDst(srcCoeff) || kZero_GrBlendCoeff != dstCoeff;
+}
+
+static constexpr bool GrBlendEquationIsAdvanced(GrBlendEquation equation) {
+ return equation >= kFirstAdvancedGrBlendEquation
+ && equation != kIllegal_GrBlendEquation;
+}
+
+static constexpr bool GrBlendModifiesDst(GrBlendEquation equation, GrBlendCoeff srcCoeff,
+ GrBlendCoeff dstCoeff) {
+ return (kAdd_GrBlendEquation != equation && kReverseSubtract_GrBlendEquation != equation) ||
+ kZero_GrBlendCoeff != srcCoeff || kOne_GrBlendCoeff != dstCoeff;
+}
+
+/**
+ * Advanced blend equations can always tweak alpha for coverage. (See GrCustomXfermode.cpp)
+ *
+ * For "add" and "reverse subtract" the blend equation with f=coverage is:
+ *
+ * D' = f * (S * srcCoeff + D * dstCoeff) + (1-f) * D
+ * = f * S * srcCoeff + D * (f * dstCoeff + (1 - f))
+ *
+ * (Let srcCoeff be negative for reverse subtract.) We can tweak alpha for coverage when the
+ * following relationship holds:
+ *
+ * (f*S) * srcCoeff' + D * dstCoeff' == f * S * srcCoeff + D * (f * dstCoeff + (1 - f))
+ *
+ * (Where srcCoeff' and dstCoeff' have any reference to S pre-multiplied by f.)
+ *
+ * It's easy to see this works for the src term as long as srcCoeff' == srcCoeff (meaning srcCoeff
+ * does not reference S). For the dst term, this will work as long as the following is true:
+ *|
+ * dstCoeff' == f * dstCoeff + (1 - f)
+ * dstCoeff' == 1 - f * (1 - dstCoeff)
+ *
+ * By inspection we can see this will work as long as dstCoeff has a 1, and any other term in
+ * dstCoeff references S.
+ *
+ * Moreover, if the blend doesn't modify the dst at all then it is ok to arbitrarily modify the src
+ * color so folding in coverage is allowed.
+ */
+static constexpr bool GrBlendAllowsCoverageAsAlpha(GrBlendEquation equation,
+ GrBlendCoeff srcCoeff,
+ GrBlendCoeff dstCoeff) {
+ return GrBlendEquationIsAdvanced(equation) ||
+ !GrBlendModifiesDst(equation, srcCoeff, dstCoeff) ||
+ ((kAdd_GrBlendEquation == equation || kReverseSubtract_GrBlendEquation == equation) &&
+ !GrBlendCoeffRefsSrc(srcCoeff) &&
+ (kOne_GrBlendCoeff == dstCoeff || kISC_GrBlendCoeff == dstCoeff ||
+ kISA_GrBlendCoeff == dstCoeff));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBlurUtils.cpp b/gfx/skia/skia/src/gpu/GrBlurUtils.cpp
new file mode 100644
index 0000000000..7c922cbdca
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlurUtils.cpp
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrBlurUtils.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrSoftwarePathRenderer.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#include "src/gpu/geometry/GrShape.h"
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkTLazy.h"
+#include "src/gpu/SkGr.h"
+
+static bool clip_bounds_quick_reject(const SkIRect& clipBounds, const SkIRect& rect) {
+ return clipBounds.isEmpty() || rect.isEmpty() || !SkIRect::Intersects(clipBounds, rect);
+}
+
+// Draw a mask using the supplied paint. Since the coverage/geometry
+// is already burnt into the mask this boils down to a rect draw.
+// Return true if the mask was successfully drawn.
+static bool draw_mask(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& maskRect,
+ GrPaint&& paint,
+ sk_sp<GrTextureProxy> mask,
+ GrColorType maskColorType) {
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+
+ SkMatrix matrix = SkMatrix::MakeTrans(-SkIntToScalar(maskRect.fLeft),
+ -SkIntToScalar(maskRect.fTop));
+ matrix.preConcat(viewMatrix);
+ paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(std::move(mask), maskColorType,
+ matrix));
+
+ renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::Make(maskRect), inverse);
+ return true;
+}
+
+static void mask_release_proc(void* addr, void* /*context*/) {
+ SkMask::FreeImage(addr);
+}
+
+static bool sw_draw_with_mask_filter(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ const GrClip& clipData,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape,
+ const SkMaskFilter* filter,
+ const SkIRect& clipBounds,
+ GrPaint&& paint,
+ const GrUniqueKey& key) {
+ SkASSERT(filter);
+ SkASSERT(!shape.style().applies());
+
+ auto proxyProvider = context->priv().proxyProvider();
+
+ sk_sp<GrTextureProxy> filteredMask;
+
+ SkStrokeRec::InitStyle fillOrHairline = shape.style().isSimpleHairline()
+ ? SkStrokeRec::kHairline_InitStyle
+ : SkStrokeRec::kFill_InitStyle;
+
+ if (key.isValid()) {
+ // TODO: this cache look up is duplicated in draw_shape_with_mask_filter for gpu
+ filteredMask = proxyProvider->findOrCreateProxyByUniqueKey(key, GrColorType::kAlpha_8,
+ kTopLeft_GrSurfaceOrigin);
+ }
+
+ SkIRect drawRect;
+ if (filteredMask) {
+ SkRect devBounds = shape.bounds();
+ viewMatrix.mapRect(&devBounds);
+
+ // Here we need to recompute the destination bounds in order to draw the mask correctly
+ SkMask srcM, dstM;
+ if (!SkDraw::ComputeMaskBounds(devBounds, &clipBounds, filter, &viewMatrix,
+ &srcM.fBounds)) {
+ return false;
+ }
+
+ srcM.fFormat = SkMask::kA8_Format;
+
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
+ return false;
+ }
+
+ // Unfortunately, we cannot double check that the computed bounds (i.e., dstM.fBounds)
+ // match the stored bounds of the mask bc the proxy may have been recreated and,
+ // when it is recreated, it just gets the bounds of the underlying GrTexture (which
+ // might be a loose fit).
+ drawRect = dstM.fBounds;
+ } else {
+ // TODO: it seems like we could create an SkDraw here and set its fMatrix field rather
+ // than explicitly transforming the path to device space.
+ SkPath devPath;
+
+ shape.asPath(&devPath);
+
+ devPath.transform(viewMatrix);
+
+ SkMask srcM, dstM;
+ if (!SkDraw::DrawToMask(devPath, &clipBounds, filter, &viewMatrix, &srcM,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode, fillOrHairline)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoSrc(srcM.fImage);
+
+ SkASSERT(SkMask::kA8_Format == srcM.fFormat);
+
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
+ return false;
+ }
+ // this will free-up dstM when we're done (allocated in filterMask())
+ SkAutoMaskFreeImage autoDst(dstM.fImage);
+
+ if (clip_bounds_quick_reject(clipBounds, dstM.fBounds)) {
+ return false;
+ }
+
+ // we now have a device-aligned 8bit mask in dstM, ready to be drawn using
+ // the current clip (and identity matrix) and GrPaint settings
+ SkBitmap bm;
+ if (!bm.installPixels(SkImageInfo::MakeA8(dstM.fBounds.width(), dstM.fBounds.height()),
+ autoDst.release(), dstM.fRowBytes, mask_release_proc, nullptr)) {
+ return false;
+ }
+ bm.setImmutable();
+
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bm);
+ if (!image) {
+ return false;
+ }
+
+ filteredMask = proxyProvider->createTextureProxy(std::move(image), 1, SkBudgeted::kYes,
+ SkBackingFit::kApprox);
+ if (!filteredMask) {
+ return false;
+ }
+
+ SkASSERT(kTopLeft_GrSurfaceOrigin == filteredMask->origin());
+
+ drawRect = dstM.fBounds;
+
+ if (key.isValid()) {
+ proxyProvider->assignUniqueKeyToProxy(key, filteredMask.get());
+ }
+ }
+
+ return draw_mask(renderTargetContext, clipData, viewMatrix, drawRect,
+ std::move(paint), std::move(filteredMask), GrColorType::kAlpha_8);
+}
+
+// Create a mask of 'shape' and return the resulting renderTargetContext
+static std::unique_ptr<GrRenderTargetContext> create_mask_GPU(GrRecordingContext* context,
+ const SkIRect& maskRect,
+ const SkMatrix& origViewMatrix,
+ const GrShape& shape,
+ int sampleCnt) {
+ // Use GrResourceProvider::MakeApprox to implement our own approximate size matching, but demand
+ // a "SkBackingFit::kExact" size match on the actual render target. We do this because the
+ // filter will reach outside the src bounds, so we need to pre-clear these values to ensure a
+ // "decal" sampling effect (i.e., ensure reads outside the src bounds return alpha=0).
+ //
+ // FIXME: Reads outside the left and top edges will actually clamp to the edge pixel. And in the
+ // event that MakeApprox does not change the size, reads outside the right and/or bottom will do
+ // the same. We should offset our filter within the render target and expand the size as needed
+ // to guarantee at least 1px of padding on all sides.
+ auto rtContext = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, GrResourceProvider::MakeApprox(maskRect.width()),
+ GrResourceProvider::MakeApprox(maskRect.height()), GrColorType::kAlpha_8, nullptr,
+ sampleCnt, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin);
+ if (!rtContext) {
+ return nullptr;
+ }
+
+ rtContext->clear(SK_PMColor4fTRANSPARENT);
+
+ GrPaint maskPaint;
+ maskPaint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
+
+ // setup new clip
+ const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
+ GrFixedClip clip(clipRect);
+
+ // Draw the mask into maskTexture with the path's integerized top-left at
+ // the origin using maskPaint.
+ SkMatrix viewMatrix = origViewMatrix;
+ viewMatrix.postTranslate(-SkIntToScalar(maskRect.fLeft), -SkIntToScalar(maskRect.fTop));
+ rtContext->drawShape(clip, std::move(maskPaint), GrAA::kYes, viewMatrix, shape);
+ return rtContext;
+}
+
+static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
+ SkIRect* devBounds) {
+ SkRect shapeBounds = shape.styledBounds();
+ if (shapeBounds.isEmpty()) {
+ return false;
+ }
+ SkRect shapeDevBounds;
+ matrix.mapRect(&shapeDevBounds, shapeBounds);
+ // Even though these are "unclipped" bounds we still clip to the int32_t range.
+ // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
+ // would round down to this value when cast to a float, but who really cares.
+ // INT32_MIN is exactly representable.
+ static constexpr int32_t kMaxInt = 2147483520;
+ if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
+ return false;
+ }
+ // Make sure that the resulting SkIRect can have representable width and height
+ if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
+ SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
+ return false;
+ }
+ shapeDevBounds.roundOut(devBounds);
+ return true;
+}
+
+// Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
+// is no intersection.
+static bool get_shape_and_clip_bounds(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const GrShape& shape,
+ const SkMatrix& matrix,
+ SkIRect* unclippedDevShapeBounds,
+ SkIRect* devClipBounds) {
+ // compute bounds as intersection of rt size, clip, and path
+ clip.getConservativeBounds(renderTargetContext->width(),
+ renderTargetContext->height(),
+ devClipBounds);
+
+ if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
+ *unclippedDevShapeBounds = SkIRect::EmptyIRect();
+ return false;
+ }
+
+ return true;
+}
+
+static void draw_shape_with_mask_filter(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilterBase* maskFilter,
+ const GrShape& origShape) {
+ SkASSERT(maskFilter);
+
+ const GrShape* shape = &origShape;
+ SkTLazy<GrShape> tmpShape;
+
+ if (origShape.style().applies()) {
+ SkScalar styleScale = GrStyle::MatrixToScaleFactor(viewMatrix);
+ if (0 == styleScale) {
+ return;
+ }
+
+ tmpShape.init(origShape.applyStyle(GrStyle::Apply::kPathEffectAndStrokeRec, styleScale));
+ if (tmpShape.get()->isEmpty()) {
+ return;
+ }
+
+ shape = tmpShape.get();
+ }
+
+ if (maskFilter->directFilterMaskGPU(context,
+ renderTargetContext,
+ std::move(paint),
+ clip,
+ viewMatrix,
+ *shape)) {
+ // the mask filter was able to draw itself directly, so there's nothing
+ // left to do.
+ return;
+ }
+ assert_alive(paint);
+
+ // If the path is hairline, ignore inverse fill.
+ bool inverseFilled = shape->inverseFilled() &&
+ !GrPathRenderer::IsStrokeHairlineOrEquivalent(shape->style(),
+ viewMatrix, nullptr);
+
+ SkIRect unclippedDevShapeBounds, devClipBounds;
+ if (!get_shape_and_clip_bounds(renderTargetContext, clip, *shape, viewMatrix,
+ &unclippedDevShapeBounds,
+ &devClipBounds)) {
+ // TODO: just cons up an opaque mask here
+ if (!inverseFilled) {
+ return;
+ }
+ }
+
+ // To prevent overloading the cache with entries during animations we limit the cache of masks
+ // to cases where the matrix preserves axis alignment.
+#ifdef SK_DISABLE_MASKFILTERED_MASK_CACHING
+ bool useCache = false;
+#else
+ bool useCache = !inverseFilled && viewMatrix.preservesAxisAlignment() &&
+ shape->hasUnstyledKey() && as_MFB(maskFilter)->asABlur(nullptr);
+#endif
+
+ const SkIRect* boundsForClip = &devClipBounds;
+ if (useCache) {
+ SkIRect clippedMaskRect, unClippedMaskRect;
+ maskFilter->canFilterMaskGPU(*shape, unclippedDevShapeBounds, devClipBounds,
+ viewMatrix, &clippedMaskRect);
+ maskFilter->canFilterMaskGPU(*shape, unclippedDevShapeBounds, unclippedDevShapeBounds,
+ viewMatrix, &unClippedMaskRect);
+ if (clippedMaskRect.isEmpty()) {
+ return;
+ }
+
+ // Use the cache only if >50% of the filtered mask is visible.
+ int unclippedWidth = unClippedMaskRect.width();
+ int unclippedHeight = unClippedMaskRect.height();
+ int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
+ int64_t clippedArea = sk_64_mul(clippedMaskRect.width(), clippedMaskRect.height());
+ int maxTextureSize = renderTargetContext->caps()->maxTextureSize();
+ if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
+ unclippedHeight > maxTextureSize) {
+ useCache = false;
+ } else {
+ // Make the clip not affect the mask
+ boundsForClip = &unclippedDevShapeBounds;
+ }
+ }
+
+ GrUniqueKey maskKey;
+ if (useCache) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + 2 + shape->unstyledKeySize(),
+ "Mask Filtered Masks");
+
+ // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
+ SkScalar sx = viewMatrix.get(SkMatrix::kMScaleX);
+ SkScalar sy = viewMatrix.get(SkMatrix::kMScaleY);
+ SkScalar kx = viewMatrix.get(SkMatrix::kMSkewX);
+ SkScalar ky = viewMatrix.get(SkMatrix::kMSkewY);
+ SkScalar tx = viewMatrix.get(SkMatrix::kMTransX);
+ SkScalar ty = viewMatrix.get(SkMatrix::kMTransY);
+ // Allow 8 bits each in x and y of subpixel positioning.
+ SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
+ SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
+
+ builder[0] = SkFloat2Bits(sx);
+ builder[1] = SkFloat2Bits(sy);
+ builder[2] = SkFloat2Bits(kx);
+ builder[3] = SkFloat2Bits(ky);
+ // Distinguish between hairline and filled paths. For hairlines, we also need to include
+ // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
+ // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
+ // all cases we might see.
+ uint32_t styleBits = shape->style().isSimpleHairline()
+ ? ((shape->style().strokeRec().getCap() << 1) | 1)
+ : 0;
+ builder[4] = fracX | (fracY >> 8) | (styleBits << 16);
+
+ SkMaskFilterBase::BlurRec rec;
+ SkAssertResult(as_MFB(maskFilter)->asABlur(&rec));
+
+ builder[5] = rec.fStyle; // TODO: we could put this with the other style bits
+ builder[6] = SkFloat2Bits(rec.fSigma);
+ shape->writeUnstyledKey(&builder[7]);
+ }
+
+ SkIRect maskRect;
+ if (maskFilter->canFilterMaskGPU(*shape,
+ unclippedDevShapeBounds,
+ *boundsForClip,
+ viewMatrix,
+ &maskRect)) {
+ if (clip_bounds_quick_reject(*boundsForClip, maskRect)) {
+ // clipped out
+ return;
+ }
+
+ sk_sp<GrTextureProxy> filteredMask;
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ if (maskKey.isValid()) {
+ // TODO: this cache look up is duplicated in sw_draw_with_mask_filter for raster
+ filteredMask = proxyProvider->findOrCreateProxyByUniqueKey(
+ maskKey, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin);
+ }
+
+ if (!filteredMask) {
+ std::unique_ptr<GrRenderTargetContext> maskRTC(create_mask_GPU(
+ context,
+ maskRect,
+ viewMatrix,
+ *shape,
+ renderTargetContext->numSamples()));
+ if (maskRTC) {
+ filteredMask = maskFilter->filterMaskGPU(context,
+ maskRTC->asTextureProxyRef(),
+ maskRTC->colorInfo().colorType(),
+ maskRTC->colorInfo().alphaType(),
+ viewMatrix,
+ maskRect);
+ SkASSERT(kTopLeft_GrSurfaceOrigin == filteredMask->origin());
+
+ if (filteredMask && maskKey.isValid()) {
+ proxyProvider->assignUniqueKeyToProxy(maskKey, filteredMask.get());
+ }
+ }
+ }
+
+ if (filteredMask) {
+ if (draw_mask(renderTargetContext, clip, viewMatrix,
+ maskRect, std::move(paint), std::move(filteredMask),
+ GrColorType::kAlpha_8)) {
+ // This path is completely drawn
+ return;
+ }
+ assert_alive(paint);
+ }
+ }
+
+ sw_draw_with_mask_filter(context, renderTargetContext, clip, viewMatrix, *shape,
+ maskFilter, *boundsForClip, std::move(paint), maskKey);
+}
+
+void GrBlurUtils::drawShapeWithMaskFilter(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const GrShape& shape,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilter* mf) {
+ draw_shape_with_mask_filter(context, renderTargetContext, clip, std::move(paint),
+ viewMatrix, as_MFB(mf), shape);
+}
+
+void GrBlurUtils::drawShapeWithMaskFilter(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const SkPaint& paint,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape) {
+ if (context->priv().abandoned()) {
+ return;
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(context, renderTargetContext->colorInfo(), paint, viewMatrix, &grPaint)) {
+ return;
+ }
+
+ SkMaskFilterBase* mf = as_MFB(paint.getMaskFilter());
+ if (mf && !mf->hasFragmentProcessor()) {
+ // The MaskFilter wasn't already handled in SkPaintToGrPaint
+ draw_shape_with_mask_filter(context, renderTargetContext, clip, std::move(grPaint),
+ viewMatrix, mf, shape);
+ } else {
+ GrAA aa = GrAA(paint.isAntiAlias());
+ renderTargetContext->drawShape(clip, std::move(grPaint), aa, viewMatrix, shape);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrBlurUtils.h b/gfx/skia/skia/src/gpu/GrBlurUtils.h
new file mode 100644
index 0000000000..d2f2570de0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlurUtils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBlurUtils_DEFINED
+#define GrBlurUtils_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+
+class GrClip;
+class GrContext;
+class GrPaint;
+class GrRecordingContext;
+class GrRenderTarget;
+class GrRenderTargetContext;
+class GrShape;
+class GrStyle;
+struct SkIRect;
+class SkMaskFilter;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPathEffect;
+
+
+/**
+ * Blur utilities.
+ */
+namespace GrBlurUtils {
+ /**
+ * Draw a shape handling the mask filter if present.
+ */
+ void drawShapeWithMaskFilter(GrRecordingContext*,
+ GrRenderTargetContext*,
+ const GrClip&,
+ const SkPaint&,
+ const SkMatrix& viewMatrix,
+ const GrShape&);
+
+ /**
+ * Draw a shape handling the mask filter. The mask filter is not optional.
+ * The GrPaint will be modified after return.
+ */
+ void drawShapeWithMaskFilter(GrRecordingContext*,
+ GrRenderTargetContext*,
+ const GrClip&,
+ const GrShape&,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilter*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBuffer.h b/gfx/skia/skia/src/gpu/GrBuffer.h
new file mode 100644
index 0000000000..b9780d1f4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBuffer.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuffer_DEFINED
+#define GrBuffer_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/** Base class for a GPU buffer object or a client side arrays. */
+class GrBuffer {
+public:
+ GrBuffer(const GrBuffer&) = delete;
+ GrBuffer& operator=(const GrBuffer&) = delete;
+
+ virtual ~GrBuffer() = default;
+
+ // Our subclasses derive from different ref counting base classes. In order to use base
+ // class pointers with sk_sp we virtualize ref() and unref().
+ virtual void ref() const = 0;
+ virtual void unref() const = 0;
+
+ /** Size of the buffer in bytes. */
+ virtual size_t size() const = 0;
+
+ /** Is this an instance of GrCpuBuffer? Otherwise, an instance of GrGpuBuffer. */
+ virtual bool isCpuBuffer() const = 0;
+
+protected:
+ GrBuffer() = default;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp b/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp
new file mode 100644
index 0000000000..8d09d5c4b8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkMacros.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrBufferAllocPool.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrCpuBuffer.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/GrResourceProvider.h"
+
+sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
+ int maxBuffersToCache) {
+ return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
+}
+
+GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
+ : fMaxBuffersToCache(maxBuffersToCache) {
+ if (fMaxBuffersToCache) {
+ fBuffers.reset(new Buffer[fMaxBuffersToCache]);
+ }
+}
+
+sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
+ bool mustBeInitialized) {
+ SkASSERT(size > 0);
+ Buffer* result = nullptr;
+ if (size == kDefaultBufferSize) {
+ int i = 0;
+ for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
+ SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
+ if (fBuffers[i].fBuffer->unique()) {
+ result = &fBuffers[i];
+ }
+ }
+ if (!result && i < fMaxBuffersToCache) {
+ fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
+ result = &fBuffers[i];
+ }
+ }
+ Buffer tempResult;
+ if (!result) {
+ tempResult.fBuffer = GrCpuBuffer::Make(size);
+ result = &tempResult;
+ }
+ if (mustBeInitialized && !result->fCleared) {
+ result->fCleared = true;
+ memset(result->fBuffer->data(), 0, result->fBuffer->size());
+ }
+ return result->fBuffer;
+}
+
+void GrBufferAllocPool::CpuBufferCache::releaseAll() {
+ for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
+ fBuffers[i].fBuffer.reset();
+ fBuffers[i].fCleared = false;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+ #define VALIDATE validate
+#else
+ static void VALIDATE(bool = false) {}
+#endif
+
+#define UNMAP_BUFFER(block) \
+ do { \
+ TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
+ TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
+ (float)((block).fBytesFree) / (block).fBuffer->size()); \
+ SkASSERT(!block.fBuffer->isCpuBuffer()); \
+ static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
+ } while (false)
+
+constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
+
+GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
+ sk_sp<CpuBufferCache> cpuBufferCache)
+ : fBlocks(8)
+ , fCpuBufferCache(std::move(cpuBufferCache))
+ , fGpu(gpu)
+ , fBufferType(bufferType) {}
+
+void GrBufferAllocPool::deleteBlocks() {
+ if (fBlocks.count()) {
+ GrBuffer* buffer = fBlocks.back().fBuffer.get();
+ if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
+ UNMAP_BUFFER(fBlocks.back());
+ }
+ }
+ while (!fBlocks.empty()) {
+ this->destroyBlock();
+ }
+ SkASSERT(!fBufferPtr);
+}
+
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
+ this->deleteBlocks();
+}
+
+void GrBufferAllocPool::reset() {
+ VALIDATE();
+ fBytesInUse = 0;
+ this->deleteBlocks();
+ this->resetCpuData(0);
+ VALIDATE();
+}
+
+void GrBufferAllocPool::unmap() {
+ VALIDATE();
+
+ if (fBufferPtr) {
+ BufferBlock& block = fBlocks.back();
+ GrBuffer* buffer = block.fBuffer.get();
+ if (!buffer->isCpuBuffer()) {
+ if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
+ UNMAP_BUFFER(block);
+ } else {
+ size_t flushSize = block.fBuffer->size() - block.fBytesFree;
+ this->flushCpuData(fBlocks.back(), flushSize);
+ }
+ }
+ fBufferPtr = nullptr;
+ }
+ VALIDATE();
+}
+
+#ifdef SK_DEBUG
+void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
+ bool wasDestroyed = false;
+ if (fBufferPtr) {
+ SkASSERT(!fBlocks.empty());
+ const GrBuffer* buffer = fBlocks.back().fBuffer.get();
+ if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
+ SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
+ }
+ } else if (!fBlocks.empty()) {
+ const GrBuffer* buffer = fBlocks.back().fBuffer.get();
+ SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
+ }
+ size_t bytesInUse = 0;
+ for (int i = 0; i < fBlocks.count() - 1; ++i) {
+ const GrBuffer* buffer = fBlocks[i].fBuffer.get();
+ SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
+ }
+ for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
+ GrBuffer* buffer = fBlocks[i].fBuffer.get();
+ if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
+ wasDestroyed = true;
+ } else {
+ size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
+ bytesInUse += bytes;
+ SkASSERT(bytes || unusedBlockAllowed);
+ }
+ }
+
+ if (!wasDestroyed) {
+ SkASSERT(bytesInUse == fBytesInUse);
+ if (unusedBlockAllowed) {
+ SkASSERT((fBytesInUse && !fBlocks.empty()) ||
+ (!fBytesInUse && (fBlocks.count() < 2)));
+ } else {
+ SkASSERT((0 == fBytesInUse) == fBlocks.empty());
+ }
+ }
+}
+#endif
+
+void* GrBufferAllocPool::makeSpace(size_t size,
+ size_t alignment,
+ sk_sp<const GrBuffer>* buffer,
+ size_t* offset) {
+ VALIDATE();
+
+ SkASSERT(buffer);
+ SkASSERT(offset);
+
+ if (fBufferPtr) {
+ BufferBlock& back = fBlocks.back();
+ size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
+ size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
+ SkSafeMath safeMath;
+ size_t alignedSize = safeMath.add(pad, size);
+ if (!safeMath.ok()) {
+ return nullptr;
+ }
+ if (alignedSize <= back.fBytesFree) {
+ memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
+ usedBytes += pad;
+ *offset = usedBytes;
+ *buffer = back.fBuffer;
+ back.fBytesFree -= alignedSize;
+ fBytesInUse += alignedSize;
+ VALIDATE();
+ return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
+ }
+ }
+
+ // We could honor the space request using by a partial update of the current
+ // VB (if there is room). But we don't currently use draw calls to GL that
+ // allow the driver to know that previously issued draws won't read from
+ // the part of the buffer we update. Also, the GL buffer implementation
+ // may be cheating on the actual buffer size by shrinking the buffer on
+ // updateData() if the amount of data passed is less than the full buffer
+ // size.
+
+ if (!this->createBlock(size)) {
+ return nullptr;
+ }
+ SkASSERT(fBufferPtr);
+
+ *offset = 0;
+ BufferBlock& back = fBlocks.back();
+ *buffer = back.fBuffer;
+ back.fBytesFree -= size;
+ fBytesInUse += size;
+ VALIDATE();
+ return fBufferPtr;
+}
+
+void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
+ size_t fallbackSize,
+ size_t alignment,
+ sk_sp<const GrBuffer>* buffer,
+ size_t* offset,
+ size_t* actualSize) {
+ VALIDATE();
+
+ SkASSERT(buffer);
+ SkASSERT(offset);
+ SkASSERT(actualSize);
+
+ if (fBufferPtr) {
+ BufferBlock& back = fBlocks.back();
+ size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
+ size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
+ if ((minSize + pad) <= back.fBytesFree) {
+ // Consume padding first, to make subsequent alignment math easier
+ memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
+ usedBytes += pad;
+ back.fBytesFree -= pad;
+ fBytesInUse += pad;
+
+ // Give caller all remaining space in this block up to fallbackSize (but aligned
+ // correctly)
+ size_t size;
+ if (back.fBytesFree >= fallbackSize) {
+ SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
+ size = fallbackSize;
+ } else {
+ size = GrSizeAlignDown(back.fBytesFree, alignment);
+ }
+ *offset = usedBytes;
+ *buffer = back.fBuffer;
+ *actualSize = size;
+ back.fBytesFree -= size;
+ fBytesInUse += size;
+ VALIDATE();
+ return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
+ }
+ }
+
+ // We could honor the space request using by a partial update of the current
+ // VB (if there is room). But we don't currently use draw calls to GL that
+ // allow the driver to know that previously issued draws won't read from
+ // the part of the buffer we update. Also, the GL buffer implementation
+ // may be cheating on the actual buffer size by shrinking the buffer on
+ // updateData() if the amount of data passed is less than the full buffer
+ // size.
+
+ if (!this->createBlock(fallbackSize)) {
+ return nullptr;
+ }
+ SkASSERT(fBufferPtr);
+
+ *offset = 0;
+ BufferBlock& back = fBlocks.back();
+ *buffer = back.fBuffer;
+ *actualSize = fallbackSize;
+ back.fBytesFree -= fallbackSize;
+ fBytesInUse += fallbackSize;
+ VALIDATE();
+ return fBufferPtr;
+}
+
+void GrBufferAllocPool::putBack(size_t bytes) {
+ VALIDATE();
+
+ while (bytes) {
+ // caller shouldn't try to put back more than they've taken
+ SkASSERT(!fBlocks.empty());
+ BufferBlock& block = fBlocks.back();
+ size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
+ if (bytes >= bytesUsed) {
+ bytes -= bytesUsed;
+ fBytesInUse -= bytesUsed;
+ // if we locked a vb to satisfy the make space and we're releasing
+ // beyond it, then unmap it.
+ GrBuffer* buffer = block.fBuffer.get();
+ if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
+ UNMAP_BUFFER(block);
+ }
+ this->destroyBlock();
+ } else {
+ block.fBytesFree += bytes;
+ fBytesInUse -= bytes;
+ bytes = 0;
+ break;
+ }
+ }
+
+ VALIDATE();
+}
+
+bool GrBufferAllocPool::createBlock(size_t requestSize) {
+ size_t size = SkTMax(requestSize, kDefaultBufferSize);
+
+ VALIDATE();
+
+ BufferBlock& block = fBlocks.push_back();
+
+ block.fBuffer = this->getBuffer(size);
+ if (!block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
+ }
+
+ block.fBytesFree = block.fBuffer->size();
+ if (fBufferPtr) {
+ SkASSERT(fBlocks.count() > 1);
+ BufferBlock& prev = fBlocks.fromBack(1);
+ GrBuffer* buffer = prev.fBuffer.get();
+ if (!buffer->isCpuBuffer()) {
+ if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
+ UNMAP_BUFFER(prev);
+ } else {
+ this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
+ }
+ }
+ fBufferPtr = nullptr;
+ }
+
+ SkASSERT(!fBufferPtr);
+
+ // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
+ // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
+ // threshold.
+ if (block.fBuffer->isCpuBuffer()) {
+ fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
+ SkASSERT(fBufferPtr);
+ } else {
+ if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
+ size > fGpu->caps()->bufferMapThreshold()) {
+ fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
+ }
+ }
+ if (!fBufferPtr) {
+ this->resetCpuData(block.fBytesFree);
+ fBufferPtr = fCpuStagingBuffer->data();
+ }
+
+ VALIDATE(true);
+
+ return true;
+}
+
+void GrBufferAllocPool::destroyBlock() {
+ SkASSERT(!fBlocks.empty());
+ SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
+ !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
+ fBlocks.pop_back();
+ fBufferPtr = nullptr;
+}
+
+void GrBufferAllocPool::resetCpuData(size_t newSize) {
+ SkASSERT(newSize >= kDefaultBufferSize || !newSize);
+ if (!newSize) {
+ fCpuStagingBuffer.reset();
+ return;
+ }
+ if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
+ return;
+ }
+ bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
+ fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
+ : GrCpuBuffer::Make(newSize);
+}
+
+void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
+ SkASSERT(block.fBuffer.get());
+ SkASSERT(!block.fBuffer.get()->isCpuBuffer());
+ GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
+ SkASSERT(!buffer->isMapped());
+ SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
+ SkASSERT(flushSize <= buffer->size());
+ VALIDATE(true);
+
+ if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
+ flushSize > fGpu->caps()->bufferMapThreshold()) {
+ void* data = buffer->map();
+ if (data) {
+ memcpy(data, fBufferPtr, flushSize);
+ UNMAP_BUFFER(block);
+ return;
+ }
+ }
+ buffer->updateData(fBufferPtr, flushSize);
+ VALIDATE(true);
+}
+
+sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
+ auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
+
+ if (fGpu->caps()->preferClientSideDynamicBuffers()) {
+ bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
+ return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
+ : GrCpuBuffer::Make(size);
+ }
+ return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
+ : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
+
+void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
+ int vertexCount,
+ sk_sp<const GrBuffer>* buffer,
+ int* startVertex) {
+ SkASSERT(vertexCount >= 0);
+ SkASSERT(buffer);
+ SkASSERT(startVertex);
+
+ size_t offset SK_INIT_TO_AVOID_WARNING;
+ void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
+ vertexSize,
+ buffer,
+ &offset);
+
+ SkASSERT(0 == offset % vertexSize);
+ *startVertex = static_cast<int>(offset / vertexSize);
+ return ptr;
+}
+
+void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
+ int fallbackVertexCount,
+ sk_sp<const GrBuffer>* buffer, int* startVertex,
+ int* actualVertexCount) {
+ SkASSERT(minVertexCount >= 0);
+ SkASSERT(fallbackVertexCount >= minVertexCount);
+ SkASSERT(buffer);
+ SkASSERT(startVertex);
+ SkASSERT(actualVertexCount);
+
+ size_t offset SK_INIT_TO_AVOID_WARNING;
+ size_t actualSize SK_INIT_TO_AVOID_WARNING;
+ void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
+ SkSafeMath::Mul(vertexSize, fallbackVertexCount),
+ vertexSize,
+ buffer,
+ &offset,
+ &actualSize);
+
+ SkASSERT(0 == offset % vertexSize);
+ *startVertex = static_cast<int>(offset / vertexSize);
+
+ SkASSERT(0 == actualSize % vertexSize);
+ SkASSERT(actualSize >= vertexSize * minVertexCount);
+ *actualVertexCount = static_cast<int>(actualSize / vertexSize);
+
+ return ptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
+ : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
+
+void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
+ int* startIndex) {
+ SkASSERT(indexCount >= 0);
+ SkASSERT(buffer);
+ SkASSERT(startIndex);
+
+ size_t offset SK_INIT_TO_AVOID_WARNING;
+ void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
+ sizeof(uint16_t),
+ buffer,
+ &offset);
+
+ SkASSERT(0 == offset % sizeof(uint16_t));
+ *startIndex = static_cast<int>(offset / sizeof(uint16_t));
+ return ptr;
+}
+
+void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
+ sk_sp<const GrBuffer>* buffer, int* startIndex,
+ int* actualIndexCount) {
+ SkASSERT(minIndexCount >= 0);
+ SkASSERT(fallbackIndexCount >= minIndexCount);
+ SkASSERT(buffer);
+ SkASSERT(startIndex);
+ SkASSERT(actualIndexCount);
+
+ size_t offset SK_INIT_TO_AVOID_WARNING;
+ size_t actualSize SK_INIT_TO_AVOID_WARNING;
+ void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
+ SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
+ sizeof(uint16_t),
+ buffer,
+ &offset,
+ &actualSize);
+
+ SkASSERT(0 == offset % sizeof(uint16_t));
+ *startIndex = static_cast<int>(offset / sizeof(uint16_t));
+
+ SkASSERT(0 == actualSize % sizeof(uint16_t));
+ SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
+ *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
+ return ptr;
+}
diff --git a/gfx/skia/skia/src/gpu/GrBufferAllocPool.h b/gfx/skia/skia/src/gpu/GrBufferAllocPool.h
new file mode 100644
index 0000000000..d42ef56423
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBufferAllocPool.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBufferAllocPool_DEFINED
+#define GrBufferAllocPool_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrCpuBuffer.h"
+#include "src/gpu/GrNonAtomicRef.h"
+
+class GrGpu;
+
+/**
+ * A pool of geometry buffers tied to a GrGpu.
+ *
+ * The pool allows a client to make space for geometry and then put back excess
+ * space if it over allocated. When a client is ready to draw from the pool
+ * it calls unmap on the pool ensure buffers are ready for drawing. The pool
+ * can be reset after drawing is completed to recycle space.
+ *
+ * At creation time a minimum per-buffer size can be specified. Additionally,
+ * a number of buffers to preallocate can be specified. These will
+ * be allocated at the min size and kept around until the pool is destroyed.
+ */
+class GrBufferAllocPool : SkNoncopyable {
+public:
+ static constexpr size_t kDefaultBufferSize = 1 << 15;
+
+ /**
+ * A cache object that can be shared by multiple GrBufferAllocPool instances. It caches
+ * cpu buffer allocations to avoid reallocating them.
+ */
+ class CpuBufferCache : public GrNonAtomicRef<CpuBufferCache> {
+ public:
+ static sk_sp<CpuBufferCache> Make(int maxBuffersToCache);
+
+ sk_sp<GrCpuBuffer> makeBuffer(size_t size, bool mustBeInitialized);
+ void releaseAll();
+
+ private:
+ CpuBufferCache(int maxBuffersToCache);
+
+ struct Buffer {
+ sk_sp<GrCpuBuffer> fBuffer;
+ bool fCleared = false;
+ };
+ std::unique_ptr<Buffer[]> fBuffers;
+ int fMaxBuffersToCache = 0;
+ };
+
+ /**
+ * Ensures all buffers are unmapped and have all data written to them.
+ * Call before drawing using buffers from the pool.
+ */
+ void unmap();
+
+ /**
+ * Invalidates all the data in the pool, unrefs non-preallocated buffers.
+ */
+ void reset();
+
+ /**
+ * Frees data from makeSpaces in LIFO order.
+ */
+ void putBack(size_t bytes);
+
+protected:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the buffers.
+ * @param bufferType The type of buffers to create.
+ * @param cpuBufferCache If non-null a cache for client side array buffers
+ * or staging buffers used before data is uploaded to
+ * GPU buffer objects.
+ */
+ GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, sk_sp<CpuBufferCache> cpuBufferCache);
+
+ virtual ~GrBufferAllocPool();
+
+ /**
+ * Returns a block of memory to hold data. A buffer designated to hold the
+ * data is given to the caller. The buffer may or may not be locked. The
+ * returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the data is guaranteed to be in the
+ * buffer at the offset indicated by offset. Until that time it may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param size the amount of data to make space for
+ * @param alignment alignment constraint from start of buffer
+ * @param buffer returns the buffer that will hold the data.
+ * @param offset returns the offset into buffer of the data.
+ * @return pointer to where the client should write the data.
+ */
+ void* makeSpace(size_t size, size_t alignment, sk_sp<const GrBuffer>* buffer, size_t* offset);
+
+ /**
+ * Returns a block of memory to hold data. A buffer designated to hold the
+ * data is given to the caller. The buffer may or may not be locked. The
+ * returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the data is guaranteed to be in the
+ * buffer at the offset indicated by offset. Until that time it may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * The caller requests a minimum number of bytes, but the block may be (much)
+ * larger. Assuming that a new block must be allocated, it will be fallbackSize bytes.
+ * The actual block size is returned in actualSize.
+ *
+ * @param minSize the minimum amount of data to make space for
+ * @param fallbackSize the amount of data to make space for if a new block is needed
+ * @param alignment alignment constraint from start of buffer
+ * @param buffer returns the buffer that will hold the data.
+ * @param offset returns the offset into buffer of the data.
+ * @param actualSize returns the capacity of the block
+ * @return pointer to where the client should write the data.
+ */
+ void* makeSpaceAtLeast(size_t minSize,
+ size_t fallbackSize,
+ size_t alignment,
+ sk_sp<const GrBuffer>* buffer,
+ size_t* offset,
+ size_t* actualSize);
+
+ sk_sp<GrBuffer> getBuffer(size_t size);
+
+private:
+ struct BufferBlock {
+ size_t fBytesFree;
+ sk_sp<GrBuffer> fBuffer;
+ };
+
+ bool createBlock(size_t requestSize);
+ void destroyBlock();
+ void deleteBlocks();
+ void flushCpuData(const BufferBlock& block, size_t flushSize);
+ void resetCpuData(size_t newSize);
+#ifdef SK_DEBUG
+ void validate(bool unusedBlockAllowed = false) const;
+#endif
+ size_t fBytesInUse = 0;
+
+ SkTArray<BufferBlock> fBlocks;
+ sk_sp<CpuBufferCache> fCpuBufferCache;
+ sk_sp<GrCpuBuffer> fCpuStagingBuffer;
+ GrGpu* fGpu;
+ GrGpuBufferType fBufferType;
+ void* fBufferPtr = nullptr;
+};
+
+/**
+ * A GrBufferAllocPool of vertex buffers
+ */
+class GrVertexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the vertex buffers.
+ * @param cpuBufferCache If non-null a cache for client side array buffers
+ * or staging buffers used before data is uploaded to
+ * GPU buffer objects.
+ */
+ GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache);
+
+ /**
+ * Returns a block of memory to hold vertices. A buffer designated to hold
+ * the vertices given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the vertices are guaranteed to be in
+ * the buffer at the offset indicated by startVertex. Until that time they
+ * may be in temporary storage and/or the buffer may be locked.
+ *
+ * @param vertexSize specifies size of a vertex to allocate space for
+ * @param vertexCount number of vertices to allocate space for
+ * @param buffer returns the vertex buffer that will hold the
+ * vertices.
+ * @param startVertex returns the offset into buffer of the first vertex.
+ * In units of the size of a vertex from layout param.
+ * @return pointer to first vertex.
+ */
+ void* makeSpace(size_t vertexSize,
+ int vertexCount,
+ sk_sp<const GrBuffer>* buffer,
+ int* startVertex);
+
+ /**
+ * Returns a block of memory to hold vertices. A buffer designated to hold
+ * the vertices given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the vertices are guaranteed to be in
+ * the buffer at the offset indicated by startVertex. Until that time they
+ * may be in temporary storage and/or the buffer may be locked.
+ *
+ * The caller requests a minimum number of vertices, but the block may be (much)
+ * larger. Assuming that a new block must be allocated, it will be sized to hold
+ * fallbackVertexCount vertices. The actual block size (in vertices) is returned in
+ * actualVertexCount.
+ *
+ * @param vertexSize specifies size of a vertex to allocate space for
+ * @param minVertexCount minimum number of vertices to allocate space for
+ * @param fallbackVertexCount number of vertices to allocate space for if a new block is needed
+ * @param buffer returns the vertex buffer that will hold the vertices.
+ * @param startVertex returns the offset into buffer of the first vertex.
+ * In units of the size of a vertex from layout param.
+ * @param actualVertexCount returns the capacity of the block (in vertices)
+ * @return pointer to first vertex.
+ */
+ void* makeSpaceAtLeast(size_t vertexSize,
+ int minVertexCount,
+ int fallbackVertexCount,
+ sk_sp<const GrBuffer>* buffer,
+ int* startVertex,
+ int* actualVertexCount);
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+/**
+ * A GrBufferAllocPool of index buffers
+ */
+class GrIndexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the index buffers.
+ * @param cpuBufferCache If non-null a cache for client side array buffers
+ * or staging buffers used before data is uploaded to
+ * GPU buffer objects.
+ */
+ GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache);
+
+ /**
+ * Returns a block of memory to hold indices. A buffer designated to hold
+ * the indices is given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the indices are guaranteed to be in the
+ * buffer at the offset indicated by startIndex. Until that time they may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param indexCount number of indices to allocate space for
+ * @param buffer returns the index buffer that will hold the indices.
+ * @param startIndex returns the offset into buffer of the first index.
+ * @return pointer to first index.
+ */
+ void* makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer, int* startIndex);
+
+ /**
+ * Returns a block of memory to hold indices. A buffer designated to hold
+ * the indices is given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the indices are guaranteed to be in the
+ * buffer at the offset indicated by startIndex. Until that time they may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * The caller requests a minimum number of indices, but the block may be (much)
+ * larger. Assuming that a new block must be allocated, it will be sized to hold
+ * fallbackIndexCount indices. The actual block size (in indices) is returned in
+ * actualIndexCount.
+ *
+ * @param minIndexCount minimum number of indices to allocate space for
+ * @param fallbackIndexCount number of indices to allocate space for if a new block is needed
+ * @param buffer returns the index buffer that will hold the indices.
+ * @param startIndex returns the offset into buffer of the first index.
+ * @param actualIndexCount returns the capacity of the block (in indices)
+ * @return pointer to first index.
+ */
+ void* makeSpaceAtLeast(int minIndexCount,
+ int fallbackIndexCount,
+ sk_sp<const GrBuffer>* buffer,
+ int* startIndex,
+ int* actualIndexCount);
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrCaps.cpp b/gfx/skia/skia/src/gpu/GrCaps.cpp
new file mode 100644
index 0000000000..0b93e58a59
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCaps.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrSurface.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrWindowRectangles.h"
+#include "src/utils/SkJSONWriter.h"
+
+GrCaps::GrCaps(const GrContextOptions& options) {
+ fMipMapSupport = false;
+ fNPOTTextureTileSupport = false;
+ fReuseScratchTextures = true;
+ fReuseScratchBuffers = true;
+ fGpuTracingSupport = false;
+ fOversizedStencilSupport = false;
+ fTextureBarrierSupport = false;
+ fSampleLocationsSupport = false;
+ fMultisampleDisableSupport = false;
+ fInstanceAttribSupport = false;
+ fMixedSamplesSupport = false;
+ fMSAAResolvesAutomatically = false;
+ fUsePrimitiveRestart = false;
+ fPreferClientSideDynamicBuffers = false;
+ fPreferFullscreenClears = false;
+ fMustClearUploadedBufferData = false;
+ fShouldInitializeTextures = false;
+ fSupportsAHardwareBufferImages = false;
+ fFenceSyncSupport = false;
+ fSemaphoreSupport = false;
+ fCrossContextTextureSupport = false;
+ fHalfFloatVertexAttributeSupport = false;
+ fDynamicStateArrayGeometryProcessorTextureSupport = false;
+ fPerformPartialClearsAsDraws = false;
+ fPerformColorClearsAsDraws = false;
+ fPerformStencilClearsAsDraws = false;
+ fAllowCoverageCounting = false;
+ fTransferBufferSupport = false;
+ fWritePixelsRowBytesSupport = false;
+ fReadPixelsRowBytesSupport = false;
+ fDriverBlacklistCCPR = false;
+ fDriverBlacklistMSAACCPR = false;
+
+ fBlendEquationSupport = kBasic_BlendEquationSupport;
+ fAdvBlendEqBlacklist = 0;
+
+ fMapBufferFlags = kNone_MapFlags;
+
+ fMaxVertexAttributes = 0;
+ fMaxRenderTargetSize = 1;
+ fMaxPreferredRenderTargetSize = 1;
+ fMaxTextureSize = 1;
+ fMaxWindowRectangles = 0;
+ fInternalMultisampleCount = 0;
+
+ fSuppressPrints = options.fSuppressPrints;
+#if GR_TEST_UTILS
+ fWireframeMode = options.fWireframeMode;
+#else
+ fWireframeMode = false;
+#endif
+ fBufferMapThreshold = options.fBufferMapThreshold;
+ fAvoidStencilBuffers = false;
+ fAvoidWritePixelsFastPath = false;
+
+ fPreferVRAMUseOverFlushes = true;
+
+ fPreferTrianglesOverSampleMask = false;
+
+ // Default to true, allow older versions of OpenGL to disable explicitly
+ fClampToBorderSupport = true;
+
+ fDriverBugWorkarounds = options.fDriverBugWorkarounds;
+}
+
+void GrCaps::applyOptionsOverrides(const GrContextOptions& options) {
+ this->onApplyOptionsOverrides(options);
+ if (options.fDisableDriverCorrectnessWorkarounds) {
+ SkASSERT(!fDriverBlacklistCCPR);
+ SkASSERT(!fDriverBlacklistMSAACCPR);
+ SkASSERT(!fAvoidStencilBuffers);
+ SkASSERT(!fAdvBlendEqBlacklist);
+ SkASSERT(!fPerformColorClearsAsDraws);
+ SkASSERT(!fPerformStencilClearsAsDraws);
+ // Don't check the partial-clear workaround, since that is a backend limitation, not a
+ // driver workaround (it just so happens the fallbacks are the same).
+ }
+ if (GrContextOptions::Enable::kNo == options.fUseDrawInsteadOfClear) {
+ fPerformColorClearsAsDraws = false;
+ fPerformStencilClearsAsDraws = false;
+ } else if (GrContextOptions::Enable::kYes == options.fUseDrawInsteadOfClear) {
+ fPerformColorClearsAsDraws = true;
+ fPerformStencilClearsAsDraws = true;
+ }
+
+ fAllowCoverageCounting = !options.fDisableCoverageCountingPaths;
+
+ fMaxTextureSize = SkTMin(fMaxTextureSize, options.fMaxTextureSizeOverride);
+ fMaxTileSize = fMaxTextureSize;
+#if GR_TEST_UTILS
+ // If the max tile override is zero, it means we should use the max texture size.
+ if (options.fMaxTileSizeOverride && options.fMaxTileSizeOverride < fMaxTextureSize) {
+ fMaxTileSize = options.fMaxTileSizeOverride;
+ }
+ if (options.fSuppressGeometryShaders) {
+ fShaderCaps->fGeometryShaderSupport = false;
+ }
+ if (options.fClearAllTextures) {
+ fShouldInitializeTextures = true;
+ }
+#endif
+
+ if (fMaxWindowRectangles > GrWindowRectangles::kMaxWindows) {
+ SkDebugf("WARNING: capping window rectangles at %i. HW advertises support for %i.\n",
+ GrWindowRectangles::kMaxWindows, fMaxWindowRectangles);
+ fMaxWindowRectangles = GrWindowRectangles::kMaxWindows;
+ }
+
+ fInternalMultisampleCount = options.fInternalMultisampleCount;
+
+ fAvoidStencilBuffers = options.fAvoidStencilBuffers;
+
+ fDriverBugWorkarounds.applyOverrides(options.fDriverBugWorkarounds);
+}
+
+
+#ifdef SK_ENABLE_DUMP_GPU
+#include "src/gpu/GrTestUtils.h"
+
+static SkString map_flags_to_string(uint32_t flags) {
+ SkString str;
+ if (GrCaps::kNone_MapFlags == flags) {
+ str = "none";
+ } else {
+ SkASSERT(GrCaps::kCanMap_MapFlag & flags);
+ SkDEBUGCODE(flags &= ~GrCaps::kCanMap_MapFlag);
+ str = "can_map";
+
+ if (GrCaps::kSubset_MapFlag & flags) {
+ str.append(" partial");
+ } else {
+ str.append(" full");
+ }
+ SkDEBUGCODE(flags &= ~GrCaps::kSubset_MapFlag);
+ if (GrCaps::kAsyncRead_MapFlag & flags) {
+ str.append(" async_read");
+ } else {
+ str.append(" sync_read");
+ }
+ SkDEBUGCODE(flags &= ~GrCaps::kAsyncRead_MapFlag);
+ }
+ SkASSERT(0 == flags); // Make sure we handled all the flags.
+ return str;
+}
+
+void GrCaps::dumpJSON(SkJSONWriter* writer) const {
+ writer->beginObject();
+
+ writer->appendBool("MIP Map Support", fMipMapSupport);
+ writer->appendBool("NPOT Texture Tile Support", fNPOTTextureTileSupport);
+ writer->appendBool("Reuse Scratch Textures", fReuseScratchTextures);
+ writer->appendBool("Reuse Scratch Buffers", fReuseScratchBuffers);
+ writer->appendBool("Gpu Tracing Support", fGpuTracingSupport);
+ writer->appendBool("Oversized Stencil Support", fOversizedStencilSupport);
+ writer->appendBool("Texture Barrier Support", fTextureBarrierSupport);
+ writer->appendBool("Sample Locations Support", fSampleLocationsSupport);
+ writer->appendBool("Multisample disable support", fMultisampleDisableSupport);
+ writer->appendBool("Instance Attrib Support", fInstanceAttribSupport);
+ writer->appendBool("Mixed Samples Support", fMixedSamplesSupport);
+ writer->appendBool("MSAA Resolves Automatically", fMSAAResolvesAutomatically);
+ writer->appendBool("Use primitive restart", fUsePrimitiveRestart);
+ writer->appendBool("Prefer client-side dynamic buffers", fPreferClientSideDynamicBuffers);
+ writer->appendBool("Prefer fullscreen clears (and stencil discard)", fPreferFullscreenClears);
+ writer->appendBool("Must clear buffer memory", fMustClearUploadedBufferData);
+ writer->appendBool("Should initialize textures", fShouldInitializeTextures);
+ writer->appendBool("Supports importing AHardwareBuffers", fSupportsAHardwareBufferImages);
+ writer->appendBool("Fence sync support", fFenceSyncSupport);
+ writer->appendBool("Semaphore support", fSemaphoreSupport);
+ writer->appendBool("Cross context texture support", fCrossContextTextureSupport);
+ writer->appendBool("Half float vertex attribute support", fHalfFloatVertexAttributeSupport);
+ writer->appendBool("Specify GeometryProcessor textures as a dynamic state array",
+ fDynamicStateArrayGeometryProcessorTextureSupport);
+ writer->appendBool("Use draws for partial clears", fPerformPartialClearsAsDraws);
+ writer->appendBool("Use draws for color clears", fPerformColorClearsAsDraws);
+ writer->appendBool("Use draws for stencil clip clears", fPerformStencilClearsAsDraws);
+ writer->appendBool("Allow coverage counting shortcuts", fAllowCoverageCounting);
+ writer->appendBool("Supports transfer buffers", fTransferBufferSupport);
+ writer->appendBool("Write pixels row bytes support", fWritePixelsRowBytesSupport);
+ writer->appendBool("Read pixels row bytes support", fReadPixelsRowBytesSupport);
+ writer->appendBool("Blacklist CCPR on current driver [workaround]", fDriverBlacklistCCPR);
+ writer->appendBool("Blacklist MSAA version of CCPR on current driver [workaround]",
+ fDriverBlacklistMSAACCPR);
+ writer->appendBool("Clamp-to-border", fClampToBorderSupport);
+
+ writer->appendBool("Prefer VRAM Use over flushes [workaround]", fPreferVRAMUseOverFlushes);
+ writer->appendBool("Prefer more triangles over sample mask [MSAA only]",
+ fPreferTrianglesOverSampleMask);
+ writer->appendBool("Avoid stencil buffers [workaround]", fAvoidStencilBuffers);
+
+ if (this->advancedBlendEquationSupport()) {
+ writer->appendHexU32("Advanced Blend Equation Blacklist", fAdvBlendEqBlacklist);
+ }
+
+ writer->appendS32("Max Vertex Attributes", fMaxVertexAttributes);
+ writer->appendS32("Max Texture Size", fMaxTextureSize);
+ writer->appendS32("Max Render Target Size", fMaxRenderTargetSize);
+ writer->appendS32("Max Preferred Render Target Size", fMaxPreferredRenderTargetSize);
+ writer->appendS32("Max Window Rectangles", fMaxWindowRectangles);
+ writer->appendS32("Preferred Sample Count for Internal MSAA and Mixed Samples",
+ fInternalMultisampleCount);
+
+ static const char* kBlendEquationSupportNames[] = {
+ "Basic",
+ "Advanced",
+ "Advanced Coherent",
+ };
+ GR_STATIC_ASSERT(0 == kBasic_BlendEquationSupport);
+ GR_STATIC_ASSERT(1 == kAdvanced_BlendEquationSupport);
+ GR_STATIC_ASSERT(2 == kAdvancedCoherent_BlendEquationSupport);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kBlendEquationSupportNames) == kLast_BlendEquationSupport + 1);
+
+ writer->appendString("Blend Equation Support",
+ kBlendEquationSupportNames[fBlendEquationSupport]);
+ writer->appendString("Map Buffer Support", map_flags_to_string(fMapBufferFlags).c_str());
+
+ this->onDumpJSON(writer);
+
+ writer->appendName("shaderCaps");
+ this->shaderCaps()->dumpJSON(writer);
+
+ writer->endObject();
+}
+#else
+void GrCaps::dumpJSON(SkJSONWriter* writer) const { }
+#endif
+
+bool GrCaps::surfaceSupportsWritePixels(const GrSurface* surface) const {
+ return surface->readOnly() ? false : this->onSurfaceSupportsWritePixels(surface);
+}
+
+bool GrCaps::canCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ if (dst->readOnly()) {
+ return false;
+ }
+ // Currently we only ever do copies where the configs are the same. This check really should be
+ // checking if the backend formats, color types, and swizzle are compatible. Our copy always
+ // copies exact byte to byte from src to dst so when need to check the if we do this, the dst
+ // has the expected values stored in the right places taking the swizzle into account. For now
+ // we can be more restrictive and just make sure the configs are the same and if we generalize
+ // copies and swizzles more in the future this can be updated.
+ if (this->makeConfigSpecific(dst->config(), dst->backendFormat()) !=
+ this->makeConfigSpecific(src->config(), src->backendFormat())) {
+ return false;
+ }
+ return this->onCanCopySurface(dst, src, srcRect, dstPoint);
+}
+
+bool GrCaps::validateSurfaceParams(const SkISize& size, const GrBackendFormat& format,
+ GrPixelConfig config, GrRenderable renderable,
+ int renderTargetSampleCnt, GrMipMapped mipped) const {
+ if (!this->isFormatTexturable(format)) {
+ return false;
+ }
+
+ if (GrMipMapped::kYes == mipped && !this->mipMapSupport()) {
+ return false;
+ }
+
+ if (size.width() < 1 || size.height() < 1) {
+ return false;
+ }
+
+ if (renderable == GrRenderable::kYes) {
+ if (!this->isFormatRenderable(format, renderTargetSampleCnt)) {
+ return false;
+ }
+ int maxRTSize = this->maxRenderTargetSize();
+ if (size.width() > maxRTSize || size.height() > maxRTSize) {
+ return false;
+ }
+ } else {
+ // We currently do not support multisampled textures
+ if (renderTargetSampleCnt != 1) {
+ return false;
+ }
+ int maxSize = this->maxTextureSize();
+ if (size.width() > maxSize || size.height() > maxSize) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+GrCaps::SupportedRead GrCaps::supportedReadPixelsColorType(GrColorType srcColorType,
+ const GrBackendFormat& srcFormat,
+ GrColorType dstColorType) const {
+ SupportedRead read = this->onSupportedReadPixelsColorType(srcColorType, srcFormat,
+ dstColorType);
+
+ // There are known problems with 24 vs 32 bit BPP with this color type. Just fail for now if
+ // using a transfer buffer.
+ if (GrColorType::kRGB_888x == read.fColorType) {
+ read.fOffsetAlignmentForTransferBuffer = 0;
+ }
+ // It's very convenient to access 1 byte-per-channel 32 bit color types as uint32_t on the CPU.
+ // Make those aligned reads out of the buffer even if the underlying API doesn't require it.
+ auto componentFlags = GrColorTypeComponentFlags(read.fColorType);
+ if ((componentFlags == kRGBA_SkColorTypeComponentFlags ||
+ componentFlags == kRGB_SkColorTypeComponentFlags ||
+ componentFlags == kAlpha_SkColorTypeComponentFlag ||
+ componentFlags == kGray_SkColorTypeComponentFlag) &&
+ GrColorTypeBytesPerPixel(read.fColorType) == 4) {
+ switch (read.fOffsetAlignmentForTransferBuffer & 0b11) {
+ // offset alignment already a multiple of 4
+ case 0:
+ break;
+ // offset alignment is a multiple of 2 but not 4.
+ case 2:
+ read.fOffsetAlignmentForTransferBuffer *= 2;
+ // offset alignment is not a multiple of 2.
+ default:
+ read.fOffsetAlignmentForTransferBuffer *= 4;
+ }
+ }
+ return read;
+}
+
+#ifdef SK_DEBUG
+bool GrCaps::AreConfigsCompatible(GrPixelConfig genericConfig, GrPixelConfig specificConfig) {
+ bool compatible = false;
+
+ switch (genericConfig) {
+ case kAlpha_8_GrPixelConfig:
+ compatible = kAlpha_8_GrPixelConfig == specificConfig || // here bc of the mock context
+ kAlpha_8_as_Alpha_GrPixelConfig == specificConfig ||
+ kAlpha_8_as_Red_GrPixelConfig == specificConfig;
+ break;
+ case kGray_8_GrPixelConfig:
+ compatible = kGray_8_GrPixelConfig == specificConfig || // here bc of the mock context
+ kGray_8_as_Lum_GrPixelConfig == specificConfig ||
+ kGray_8_as_Red_GrPixelConfig == specificConfig;
+ break;
+ case kAlpha_half_GrPixelConfig:
+ compatible = kAlpha_half_GrPixelConfig == specificConfig || // bc of the mock context
+ kAlpha_half_as_Red_GrPixelConfig == specificConfig ||
+ kAlpha_half_as_Lum_GrPixelConfig == specificConfig;
+ break;
+ case kRGB_888_GrPixelConfig:
+ compatible = kRGB_888_GrPixelConfig == specificConfig ||
+ kRGB_888X_GrPixelConfig == specificConfig;
+ break;
+ case kRGBA_8888_GrPixelConfig:
+ compatible = kRGBA_8888_GrPixelConfig == specificConfig ||
+ kBGRA_8888_GrPixelConfig == specificConfig;
+ break;
+ default:
+ compatible = genericConfig == specificConfig;
+ break;
+ }
+
+ if (!compatible) {
+ SkDebugf("Configs are not compatible: %d %d\n", genericConfig, specificConfig);
+ }
+
+ return compatible;
+}
+#endif
+
+GrBackendFormat GrCaps::getDefaultBackendFormat(GrColorType grColorType,
+ GrRenderable renderable) const {
+ GrBackendFormat format = this->onGetDefaultBackendFormat(grColorType, renderable);
+ if (!this->isFormatTexturableAndUploadable(grColorType, format)) {
+ return {};
+ }
+
+ if (renderable == GrRenderable::kYes) {
+ if (!this->isFormatAsColorTypeRenderable(grColorType, format)) {
+ return {};
+ }
+ }
+
+ return format;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrCaps.h b/gfx/skia/skia/src/gpu/GrCaps.h
new file mode 100644
index 0000000000..27f0ec4847
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCaps.h
@@ -0,0 +1,553 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrCaps_DEFINED
+#define GrCaps_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/gpu/GrDriverBugWorkarounds.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSurfaceProxy.h"
+
+class GrBackendFormat;
+class GrBackendRenderTarget;
+class GrBackendTexture;
+struct GrContextOptions;
+class GrRenderTargetProxy;
+class GrSurface;
+class SkJSONWriter;
+
+/**
+ * Represents the capabilities of a GrContext.
+ */
+class GrCaps : public SkRefCnt {
+public:
+ GrCaps(const GrContextOptions&);
+
+ void dumpJSON(SkJSONWriter*) const;
+
+ const GrShaderCaps* shaderCaps() const { return fShaderCaps.get(); }
+
+ bool npotTextureTileSupport() const { return fNPOTTextureTileSupport; }
+ /** To avoid as-yet-unnecessary complexity we don't allow any partial support of MIP Maps (e.g.
+ only for POT textures) */
+ bool mipMapSupport() const { return fMipMapSupport; }
+
+ bool gpuTracingSupport() const { return fGpuTracingSupport; }
+ bool oversizedStencilSupport() const { return fOversizedStencilSupport; }
+ bool textureBarrierSupport() const { return fTextureBarrierSupport; }
+ bool sampleLocationsSupport() const { return fSampleLocationsSupport; }
+ bool multisampleDisableSupport() const { return fMultisampleDisableSupport; }
+ bool instanceAttribSupport() const { return fInstanceAttribSupport; }
+ bool mixedSamplesSupport() const { return fMixedSamplesSupport; }
+ // This flag indicates that we never have to resolve MSAA. In practice, it means that we have
+ // an MSAA-render-to-texture extension: Any render target we create internally will use the
+ // extension, and any wrapped render target is the client's responsibility.
+ bool msaaResolvesAutomatically() const { return fMSAAResolvesAutomatically; }
+ bool halfFloatVertexAttributeSupport() const { return fHalfFloatVertexAttributeSupport; }
+
+ // Primitive restart functionality is core in ES 3.0, but using it will cause slowdowns on some
+ // systems. This cap is only set if primitive restart will improve performance.
+ bool usePrimitiveRestart() const { return fUsePrimitiveRestart; }
+
+ bool preferClientSideDynamicBuffers() const { return fPreferClientSideDynamicBuffers; }
+
+ // On tilers, an initial fullscreen clear is an OPTIMIZATION. It allows the hardware to
+ // initialize each tile with a constant value rather than loading each pixel from memory.
+ bool preferFullscreenClears() const { return fPreferFullscreenClears; }
+
+ // Should we discard stencil values after a render pass? (Tilers get better performance if we
+ // always load stencil buffers with a "clear" op, and then discard the content when finished.)
+ bool discardStencilValuesAfterRenderPass() const {
+ // This method is actually just a duplicate of preferFullscreenClears(), with a descriptive
+ // name for the sake of readability.
+ return this->preferFullscreenClears();
+ }
+
+ bool preferVRAMUseOverFlushes() const { return fPreferVRAMUseOverFlushes; }
+
+ bool preferTrianglesOverSampleMask() const { return fPreferTrianglesOverSampleMask; }
+
+ bool avoidStencilBuffers() const { return fAvoidStencilBuffers; }
+
+ bool avoidWritePixelsFastPath() const { return fAvoidWritePixelsFastPath; }
+
+ /**
+ * Indicates the capabilities of the fixed function blend unit.
+ */
+ enum BlendEquationSupport {
+ kBasic_BlendEquationSupport, //<! Support to select the operator that
+ // combines src and dst terms.
+ kAdvanced_BlendEquationSupport, //<! Additional fixed function support for specific
+ // SVG/PDF blend modes. Requires blend barriers.
+ kAdvancedCoherent_BlendEquationSupport, //<! Advanced blend equation support that does not
+ // require blend barriers, and permits overlap.
+
+ kLast_BlendEquationSupport = kAdvancedCoherent_BlendEquationSupport
+ };
+
+ BlendEquationSupport blendEquationSupport() const { return fBlendEquationSupport; }
+
+ bool advancedBlendEquationSupport() const {
+ return fBlendEquationSupport >= kAdvanced_BlendEquationSupport;
+ }
+
+ bool advancedCoherentBlendEquationSupport() const {
+ return kAdvancedCoherent_BlendEquationSupport == fBlendEquationSupport;
+ }
+
+ bool isAdvancedBlendEquationBlacklisted(GrBlendEquation equation) const {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+ SkASSERT(this->advancedBlendEquationSupport());
+ return SkToBool(fAdvBlendEqBlacklist & (1 << equation));
+ }
+
+ /**
+ * Indicates whether GPU->CPU memory mapping for GPU resources such as vertex buffers and
+ * textures allows partial mappings or full mappings.
+ */
+ enum MapFlags {
+ kNone_MapFlags = 0x0, //<! Cannot map the resource.
+
+ kCanMap_MapFlag = 0x1, //<! The resource can be mapped. Must be set for any of
+ // the other flags to have meaning.
+ kSubset_MapFlag = 0x2, //<! The resource can be partially mapped.
+ kAsyncRead_MapFlag = 0x4, //<! Are maps for reading asynchronous WRT GrOpsRenderPass
+ // submitted to GrGpu.
+ };
+
+ uint32_t mapBufferFlags() const { return fMapBufferFlags; }
+
+ // Scratch textures not being reused means that those scratch textures
+ // that we upload to (i.e., don't have a render target) will not be
+ // recycled in the texture cache. This is to prevent ghosting by drivers
+ // (in particular for deferred architectures).
+ bool reuseScratchTextures() const { return fReuseScratchTextures; }
+ bool reuseScratchBuffers() const { return fReuseScratchBuffers; }
+
+ /// maximum number of attribute values per vertex
+ int maxVertexAttributes() const { return fMaxVertexAttributes; }
+
+ int maxRenderTargetSize() const { return fMaxRenderTargetSize; }
+
+ /** This is the largest render target size that can be used without incurring extra perfomance
+ cost. It is usually the max RT size, unless larger render targets are known to be slower. */
+ int maxPreferredRenderTargetSize() const { return fMaxPreferredRenderTargetSize; }
+
+ int maxTextureSize() const { return fMaxTextureSize; }
+
+ /** This is the maximum tile size to use by GPU devices for rendering sw-backed images/bitmaps.
+ It is usually the max texture size, unless we're overriding it for testing. */
+ int maxTileSize() const {
+ SkASSERT(fMaxTileSize <= fMaxTextureSize);
+ return fMaxTileSize;
+ }
+
+ int maxWindowRectangles() const { return fMaxWindowRectangles; }
+
+ // Returns whether mixed samples is supported for the given backend render target.
+ bool isWindowRectanglesSupportedForRT(const GrBackendRenderTarget& rt) const {
+ return this->maxWindowRectangles() > 0 && this->onIsWindowRectanglesSupportedForRT(rt);
+ }
+
+ virtual bool isFormatSRGB(const GrBackendFormat&) const = 0;
+
+ // Callers can optionally pass in an SkImage::CompressionType which will be filled in with the
+ // correct type if the GrBackendFormat is compressed.
+ virtual bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const = 0;
+
+ // TODO: Once we use the supportWritePixels call for uploads, we can remove this function and
+ // instead only have the version that takes a GrBackendFormat.
+ virtual bool isFormatTexturableAndUploadable(GrColorType, const GrBackendFormat&) const = 0;
+ // Can a texture be made with the GrBackendFormat, and then be bound and sampled in a shader.
+ virtual bool isFormatTexturable(const GrBackendFormat&) const = 0;
+
+ // Returns whether a texture of the given format can be copied to a texture of the same format.
+ virtual bool isFormatCopyable(const GrBackendFormat&) const = 0;
+
+ // Returns the maximum supported sample count for a format. 0 means the format is not renderable
+ // 1 means the format is renderable but doesn't support MSAA. This call only refers to the
+ // format itself. A caller should also confirm if the format is renderable with a given
+ // GrColorType by calling isFormatRenderable.
+ virtual int maxRenderTargetSampleCount(const GrBackendFormat&) const = 0;
+
+ // Returns the number of samples to use when performing internal draws to the given config with
+ // MSAA or mixed samples. If 0, Ganesh should not attempt to use internal multisampling.
+ int internalMultisampleCount(const GrBackendFormat& format) const {
+ return SkTMin(fInternalMultisampleCount, this->maxRenderTargetSampleCount(format));
+ }
+
+ virtual bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const = 0;
+
+ virtual bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const = 0;
+
+ // Find a sample count greater than or equal to the requested count which is supported for a
+ // render target of the given format or 0 if no such sample count is supported. If the requested
+ // sample count is 1 then 1 will be returned if non-MSAA rendering is supported, otherwise 0.
+ // For historical reasons requestedCount==0 is handled identically to requestedCount==1.
+ virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat&) const = 0;
+
+ // Returns the number of bytes per pixel for the given GrBackendFormat. This is only supported
+ // for "normal" formats. For compressed formats this will return 0.
+ virtual size_t bytesPerPixel(const GrBackendFormat&) const = 0;
+
+ /**
+ * Backends may have restrictions on what types of surfaces support GrGpu::writePixels().
+ * If this returns false then the caller should implement a fallback where a temporary texture
+ * is created, pixels are written to it, and then that is copied or drawn into the the surface.
+ */
+ bool surfaceSupportsWritePixels(const GrSurface*) const;
+
+ /**
+ * Indicates whether surface supports GrGpu::readPixels, must be copied, or cannot be read.
+ */
+ enum class SurfaceReadPixelsSupport {
+ /** GrGpu::readPixels is supported by the surface. */
+ kSupported,
+ /**
+ * GrGpu::readPixels is not supported by this surface but this surface can be drawn
+ * or copied to a Ganesh-created GrTextureType::kTexture2D and then that surface will be
+ * readable.
+ */
+ kCopyToTexture2D,
+ /**
+ * Not supported
+ */
+ kUnsupported,
+ };
+ /**
+ * Backends may have restrictions on what types of surfaces support GrGpu::readPixels(). We may
+ * either be able to read directly from the surface, read from a copy of the surface, or not
+ * read at all.
+ */
+ virtual SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const = 0;
+
+ struct SupportedWrite {
+ GrColorType fColorType;
+ // If the write is occurring using GrGpu::transferPixelsTo then this provides the
+ // minimum alignment of the offset into the transfer buffer.
+ size_t fOffsetAlignmentForTransferBuffer;
+ };
+
+ /**
+ * Given a dst pixel config and a src color type what color type must the caller coax the
+ * the data into in order to use GrGpu::writePixels().
+ */
+ virtual SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const = 0;
+
+ struct SupportedRead {
+ GrColorType fColorType;
+ // If the read is occurring using GrGpu::transferPixelsFrom then this provides the
+ // minimum alignment of the offset into the transfer buffer.
+ size_t fOffsetAlignmentForTransferBuffer;
+ };
+
+ /**
+ * Given a src surface's color type and its backend format as well as a color type the caller
+ * would like read into, this provides a legal color type that the caller may pass to
+ * GrGpu::readPixels(). The returned color type may differ from the passed dstColorType, in
+ * which case the caller must convert the read pixel data (see GrConvertPixels). When converting
+ * to dstColorType the swizzle in the returned struct should be applied. The caller must check
+ * the returned color type for kUnknown.
+ */
+ SupportedRead supportedReadPixelsColorType(GrColorType srcColorType,
+ const GrBackendFormat& srcFormat,
+ GrColorType dstColorType) const;
+
+ /**
+ * Do GrGpu::writePixels() and GrGpu::transferPixelsTo() support a src buffer where the row
+ * bytes is not equal to bpp * w?
+ */
+ bool writePixelsRowBytesSupport() const { return fWritePixelsRowBytesSupport; }
+ /**
+ * Does GrGpu::readPixels() support a dst buffer where the row bytes is not equal to bpp * w?
+ */
+ bool readPixelsRowBytesSupport() const { return fReadPixelsRowBytesSupport; }
+
+ /** Are transfer buffers (to textures and from surfaces) supported? */
+ bool transferBufferSupport() const { return fTransferBufferSupport; }
+
+ bool suppressPrints() const { return fSuppressPrints; }
+
+ size_t bufferMapThreshold() const {
+ SkASSERT(fBufferMapThreshold >= 0);
+ return fBufferMapThreshold;
+ }
+
+ /** True in environments that will issue errors if memory uploaded to buffers
+ is not initialized (even if not read by draw calls). */
+ bool mustClearUploadedBufferData() const { return fMustClearUploadedBufferData; }
+
+ /** For some environments, there is a performance or safety concern to not
+ initializing textures. For example, with WebGL and Firefox, there is a large
+ performance hit to not doing it.
+ */
+ bool shouldInitializeTextures() const { return fShouldInitializeTextures; }
+
+ /** Returns true if the given backend supports importing AHardwareBuffers via the
+ * GrAHardwarebufferImageGenerator. This will only ever be supported on Android devices with API
+ * level >= 26.
+ * */
+ bool supportsAHardwareBufferImages() const { return fSupportsAHardwareBufferImages; }
+
+ bool wireframeMode() const { return fWireframeMode; }
+
+ /** Supports using GrFence. */
+ bool fenceSyncSupport() const { return fFenceSyncSupport; }
+
+ /** Supports using GrSemaphore. */
+ bool semaphoreSupport() const { return fSemaphoreSupport; }
+
+ bool crossContextTextureSupport() const { return fCrossContextTextureSupport; }
+ /**
+ * Returns whether or not we will be able to do a copy given the passed in params
+ */
+ bool canCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const;
+
+ bool dynamicStateArrayGeometryProcessorTextureSupport() const {
+ return fDynamicStateArrayGeometryProcessorTextureSupport;
+ }
+
+ // Not all backends support clearing with a scissor test (e.g. Metal), this will always
+ // return true if performColorClearsAsDraws() returns true.
+ bool performPartialClearsAsDraws() const {
+ return fPerformColorClearsAsDraws || fPerformPartialClearsAsDraws;
+ }
+
+ // Many drivers have issues with color clears.
+ bool performColorClearsAsDraws() const { return fPerformColorClearsAsDraws; }
+
+ /// Adreno 4xx devices experience an issue when there are a large number of stencil clip bit
+ /// clears. The minimal repro steps are not precisely known but drawing a rect with a stencil
+ /// op instead of using glClear seems to resolve the issue.
+ bool performStencilClearsAsDraws() const { return fPerformStencilClearsAsDraws; }
+
+ // Can we use coverage counting shortcuts to render paths? Coverage counting can cause artifacts
+ // along shared edges if care isn't taken to ensure both contours wind in the same direction.
+ bool allowCoverageCounting() const { return fAllowCoverageCounting; }
+
+ // Should we disable the CCPR code due to a faulty driver?
+ bool driverBlacklistCCPR() const { return fDriverBlacklistCCPR; }
+ bool driverBlacklistMSAACCPR() const { return fDriverBlacklistMSAACCPR; }
+
+ /**
+ * This is used to try to ensure a successful copy a dst in order to perform shader-based
+ * blending.
+ *
+ * fRectsMustMatch will be set to true if the copy operation must ensure that the src and dest
+ * rects are identical.
+ *
+ * fMustCopyWholeSrc will be set to true if copy rect must equal src's bounds.
+ *
+ * Caller will detect cases when copy cannot succeed and try copy-as-draw as a fallback.
+ */
+ struct DstCopyRestrictions {
+ GrSurfaceProxy::RectsMustMatch fRectsMustMatch = GrSurfaceProxy::RectsMustMatch::kNo;
+ bool fMustCopyWholeSrc = false;
+ };
+ virtual DstCopyRestrictions getDstCopyRestrictions(const GrRenderTargetProxy* src,
+ GrColorType ct) const {
+ return {};
+ }
+
+ bool validateSurfaceParams(const SkISize&, const GrBackendFormat&, GrPixelConfig,
+ GrRenderable renderable, int renderTargetSampleCnt,
+ GrMipMapped) const;
+
+ bool areColorTypeAndFormatCompatible(GrColorType grCT,
+ const GrBackendFormat& format) const {
+ if (GrColorType::kUnknown == grCT) {
+ return false;
+ }
+
+ return this->onAreColorTypeAndFormatCompatible(grCT, format);
+ }
+
+ // TODO: it seems like we could pass the full SkImageInfo and validate its colorSpace too
+ // Returns kUnknown if a valid config could not be determined.
+ GrPixelConfig getConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType grCT) const {
+ if (GrColorType::kUnknown == grCT) {
+ return kUnknown_GrPixelConfig;
+ }
+
+ return this->onGetConfigFromBackendFormat(format, grCT);
+ }
+
+ /**
+ * Special method only for YUVA images. Returns a colortype that matches the backend format or
+ * kUnknown if a colortype could not be determined.
+ */
+ virtual GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat&,
+ bool isAlphaChannel) const = 0;
+
+ /** These are used when creating a new texture internally. */
+ GrBackendFormat getDefaultBackendFormat(GrColorType, GrRenderable) const;
+
+ virtual GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const = 0;
+
+ /**
+ * The CLAMP_TO_BORDER wrap mode for texture coordinates was added to desktop GL in 1.3, and
+ * GLES 3.2, but is also available in extensions. Vulkan and Metal always have support.
+ */
+ bool clampToBorderSupport() const { return fClampToBorderSupport; }
+
+ /**
+ * Returns the GrSwizzle to use when sampling from a texture with the passed in GrBackendFormat
+ * and GrColorType.
+ */
+ virtual GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const = 0;
+
+ /**
+ * Returns the GrSwizzle to use when outputting to a render target with the passed in
+ * GrBackendFormat and GrColorType.
+ */
+ virtual GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const = 0;
+
+ const GrDriverBugWorkarounds& workarounds() const { return fDriverBugWorkarounds; }
+
+ /**
+ * Given a possibly generic GrPixelConfig and a backend format return a specific
+ * GrPixelConfig.
+ */
+ GrPixelConfig makeConfigSpecific(GrPixelConfig config, const GrBackendFormat& format) const {
+ auto ct = GrPixelConfigToColorType(config);
+ auto result = this->getConfigFromBackendFormat(format, ct);
+ SkASSERT(config == result || AreConfigsCompatible(config, result));
+ return result;
+ }
+
+#ifdef SK_DEBUG
+ // This is just a debugging entry point until we're weaned off of GrPixelConfig. It
+ // should be used to verify that the pixel config from user-level code (the genericConfig)
+ // is compatible with a pixel config we've computed from scratch (the specificConfig).
+ static bool AreConfigsCompatible(GrPixelConfig genericConfig, GrPixelConfig specificConfig);
+#endif
+
+#if GR_TEST_UTILS
+ struct TestFormatColorTypeCombination {
+ GrColorType fColorType;
+ GrBackendFormat fFormat;
+ };
+
+ virtual std::vector<TestFormatColorTypeCombination> getTestingCombinations() const = 0;
+#endif
+
+protected:
+ /** Subclasses must call this at the end of their constructors in order to apply caps
+ overrides requested by the client. Note that overrides will only reduce the caps never
+ expand them. */
+ void applyOptionsOverrides(const GrContextOptions& options);
+
+ sk_sp<GrShaderCaps> fShaderCaps;
+
+ bool fNPOTTextureTileSupport : 1;
+ bool fMipMapSupport : 1;
+ bool fReuseScratchTextures : 1;
+ bool fReuseScratchBuffers : 1;
+ bool fGpuTracingSupport : 1;
+ bool fOversizedStencilSupport : 1;
+ bool fTextureBarrierSupport : 1;
+ bool fSampleLocationsSupport : 1;
+ bool fMultisampleDisableSupport : 1;
+ bool fInstanceAttribSupport : 1;
+ bool fMixedSamplesSupport : 1;
+ bool fMSAAResolvesAutomatically : 1;
+ bool fUsePrimitiveRestart : 1;
+ bool fPreferClientSideDynamicBuffers : 1;
+ bool fPreferFullscreenClears : 1;
+ bool fMustClearUploadedBufferData : 1;
+ bool fShouldInitializeTextures : 1;
+ bool fSupportsAHardwareBufferImages : 1;
+ bool fHalfFloatVertexAttributeSupport : 1;
+ bool fClampToBorderSupport : 1;
+ bool fPerformPartialClearsAsDraws : 1;
+ bool fPerformColorClearsAsDraws : 1;
+ bool fPerformStencilClearsAsDraws : 1;
+ bool fAllowCoverageCounting : 1;
+ bool fTransferBufferSupport : 1;
+ bool fWritePixelsRowBytesSupport : 1;
+ bool fReadPixelsRowBytesSupport : 1;
+
+ // Driver workaround
+ bool fDriverBlacklistCCPR : 1;
+ bool fDriverBlacklistMSAACCPR : 1;
+ bool fAvoidStencilBuffers : 1;
+ bool fAvoidWritePixelsFastPath : 1;
+
+ // ANGLE performance workaround
+ bool fPreferVRAMUseOverFlushes : 1;
+
+ // On some platforms it's better to make more triangles than to use the sample mask (MSAA only).
+ bool fPreferTrianglesOverSampleMask : 1;
+
+ bool fFenceSyncSupport : 1;
+ bool fSemaphoreSupport : 1;
+
+ // Requires fence sync support in GL.
+ bool fCrossContextTextureSupport : 1;
+
+ // Not (yet) implemented in VK backend.
+ bool fDynamicStateArrayGeometryProcessorTextureSupport : 1;
+
+ BlendEquationSupport fBlendEquationSupport;
+ uint32_t fAdvBlendEqBlacklist;
+ GR_STATIC_ASSERT(kLast_GrBlendEquation < 32);
+
+ uint32_t fMapBufferFlags;
+ int fBufferMapThreshold;
+
+ int fMaxRenderTargetSize;
+ int fMaxPreferredRenderTargetSize;
+ int fMaxVertexAttributes;
+ int fMaxTextureSize;
+ int fMaxTileSize;
+ int fMaxWindowRectangles;
+ int fInternalMultisampleCount;
+
+ GrDriverBugWorkarounds fDriverBugWorkarounds;
+
+private:
+ virtual void onApplyOptionsOverrides(const GrContextOptions&) {}
+ virtual void onDumpJSON(SkJSONWriter*) const {}
+ virtual bool onSurfaceSupportsWritePixels(const GrSurface*) const = 0;
+ virtual bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const = 0;
+ virtual GrBackendFormat onGetDefaultBackendFormat(GrColorType, GrRenderable) const = 0;
+
+ // Backends should implement this if they have any extra requirements for use of window
+ // rectangles for a specific GrBackendRenderTarget outside of basic support.
+ virtual bool onIsWindowRectanglesSupportedForRT(const GrBackendRenderTarget&) const {
+ return true;
+ }
+
+ virtual GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType ct) const = 0;
+
+ virtual bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const = 0;
+
+ virtual SupportedRead onSupportedReadPixelsColorType(GrColorType srcColorType,
+ const GrBackendFormat& srcFormat,
+ GrColorType dstColorType) const = 0;
+
+
+ bool fSuppressPrints : 1;
+ bool fWireframeMode : 1;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.cpp b/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.cpp
new file mode 100644
index 0000000000..b927726c6c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrClientMappedBufferManager.h"
+
+#include <algorithm>
+
+GrClientMappedBufferManager::GrClientMappedBufferManager(uint32_t contextID)
+ : fFinishedBufferInbox(contextID) {}
+
+GrClientMappedBufferManager::~GrClientMappedBufferManager() {
+ this->process();
+ if (!fAbandoned) {
+ // If we're going down before we got the messages we go ahead and unmap all the buffers.
+ // It's up to the client to ensure that they aren't being accessed on another thread while
+ // this is happening (or afterwards on any thread).
+ for (auto& b : fClientHeldBuffers) {
+ b->unmap();
+ }
+ }
+}
+
+void GrClientMappedBufferManager::insert(sk_sp<GrGpuBuffer> b) {
+ SkDEBUGCODE(auto end = fClientHeldBuffers.end());
+ SkASSERT(std::find(fClientHeldBuffers.begin(), end, b) == end);
+ fClientHeldBuffers.emplace_front(std::move(b));
+}
+
+void GrClientMappedBufferManager::process() {
+ SkSTArray<4, BufferFinishedMessage> messages;
+ fFinishedBufferInbox.poll(&messages);
+ if (!fAbandoned) {
+ for (auto& m : messages) {
+ this->remove(m.fBuffer);
+ m.fBuffer->unmap();
+ }
+ }
+}
+
+void GrClientMappedBufferManager::abandon() {
+ fAbandoned = true;
+ fClientHeldBuffers.clear();
+}
+
+void GrClientMappedBufferManager::remove(const sk_sp<GrGpuBuffer>& b) {
+ // There is no convenient remove only the first element that equals a value functionality in
+ // std::forward_list.
+ auto prev = fClientHeldBuffers.before_begin();
+ auto end = fClientHeldBuffers.end();
+ SkASSERT(std::find(fClientHeldBuffers.begin(), end, b) != end);
+ for (auto cur = fClientHeldBuffers.begin(); cur != end; prev = cur++) {
+ if (*cur == b) {
+ fClientHeldBuffers.erase_after(prev);
+ break;
+ }
+ }
+ SkASSERT(std::find(fClientHeldBuffers.begin(), end, b) == end);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+DECLARE_SKMESSAGEBUS_MESSAGE(GrClientMappedBufferManager::BufferFinishedMessage)
+
+bool SkShouldPostMessageToBus(const GrClientMappedBufferManager::BufferFinishedMessage& m,
+ uint32_t msgBusUniqueID) {
+ return m.fInboxID == msgBusUniqueID;
+}
diff --git a/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.h b/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.h
new file mode 100644
index 0000000000..3f3b0678d9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClientMappedBufferManager.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClientMappedBufferManager_DEFINED
+#define GrClientMappedBufferManager_DEFINED
+
+#include "include/private/SkTArray.h"
+#include "src/core/SkMessageBus.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include <forward_list>
+
+/**
+ * We sometimes hand clients objects that contain mapped GrGpuBuffers. The client may consume
+ * the mapped buffer on another thread. This object manages receiving messages that buffers are
+ * ready to be unmapped (on the direct GrContext's thread). It also handles cleaning up mapped
+ * buffers if the GrContext is destroyed before the client has finished with the buffer.
+ *
+ * Buffers are first registered using insert() before being passed the client. process() should be
+ * called periodically on the direct GrContext thread to poll for messages and process them.
+ */
+class GrClientMappedBufferManager final {
+public:
+ /**
+ * The message type that internal users of this should post to unmap the buffer.
+ * Set fInboxID to inboxID(). fBuffer must have been previously passed to insert().
+ */
+ struct BufferFinishedMessage {
+ sk_sp<GrGpuBuffer> fBuffer;
+ uint32_t fInboxID;
+ };
+ using BufferFinishedMessageBus = SkMessageBus<BufferFinishedMessage>;
+
+ GrClientMappedBufferManager(uint32_t contextID);
+ GrClientMappedBufferManager(const GrClientMappedBufferManager&) = delete;
+ GrClientMappedBufferManager(GrClientMappedBufferManager&&) = delete;
+
+ ~GrClientMappedBufferManager();
+
+ GrClientMappedBufferManager& operator=(const GrClientMappedBufferManager&) = delete;
+ GrClientMappedBufferManager& operator=(GrClientMappedBufferManager&&) = delete;
+
+ /** Initialize BufferFinishedMessage::fInboxID to this value. */
+ uint32_t inboxID() const { return fFinishedBufferInbox.uniqueID(); }
+
+ /**
+ * Let the manager know to expect a message with buffer 'b'. It's illegal for a buffer to be
+ * inserted again before it is unmapped by process().
+ */
+ void insert(sk_sp<GrGpuBuffer> b);
+
+ /** Poll for messages and unmap any incoming buffers. */
+ void process();
+
+ /** Notifies the manager that the context has been abandoned. No more unmaps() will occur.*/
+ void abandon();
+
+private:
+ BufferFinishedMessageBus::Inbox fFinishedBufferInbox;
+ std::forward_list<sk_sp<GrGpuBuffer>> fClientHeldBuffers;
+ bool fAbandoned = false;
+
+ void remove(const sk_sp<GrGpuBuffer>& b);
+};
+
+bool SkShouldPostMessageToBus(const GrClientMappedBufferManager::BufferFinishedMessage&,
+ uint32_t msgBusUniqueID);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrClip.h b/gfx/skia/skia/src/gpu/GrClip.h
new file mode 100644
index 0000000000..cc73c641fd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClip.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClip_DEFINED
+#define GrClip_DEFINED
+
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+class GrContext;
+
+/**
+ * GrClip is an abstract base class for applying a clip. It constructs a clip mask if necessary, and
+ * fills out a GrAppliedClip instructing the caller on how to set up the draw state.
+ */
+class GrClip {
+public:
+ virtual bool quickContains(const SkRect&) const = 0;
+ virtual bool quickContains(const SkRRect& rrect) const {
+ return this->quickContains(rrect.getBounds());
+ }
+ virtual void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects = nullptr) const = 0;
+ /**
+ * This computes a GrAppliedClip from the clip which in turn can be used to build a GrPipeline.
+ * To determine the appropriate clipping implementation the GrClip subclass must know whether
+ * the draw will enable HW AA or uses the stencil buffer. On input 'bounds' is a conservative
+ * bounds of the draw that is to be clipped. After return 'bounds' has been intersected with a
+ * conservative bounds of the clip. A return value of false indicates that the draw can be
+ * skipped as it is fully clipped out.
+ */
+ virtual bool apply(GrRecordingContext*, GrRenderTargetContext*, bool useHWAA,
+ bool hasUserStencilSettings, GrAppliedClip*, SkRect* bounds) const = 0;
+
+ virtual ~GrClip() {}
+
+ /**
+ * This method quickly and conservatively determines whether the entire clip is equivalent to
+ * intersection with a rrect. This will only return true if the rrect does not fully contain
+ * the render target bounds. Moreover, the returned rrect need not be contained by the render
+ * target bounds. We assume all draws will be implicitly clipped by the render target bounds.
+ *
+ * @param rtBounds The bounds of the render target that the clip will be applied to.
+ * @param rrect If return is true rrect will contain the rrect equivalent to the clip within
+ * rtBounds.
+ * @param aa If return is true aa will indicate whether the rrect clip is antialiased.
+ * @return true if the clip is equivalent to a single rrect, false otherwise.
+ *
+ */
+ virtual bool isRRect(const SkRect& rtBounds, SkRRect* rrect, GrAA* aa) const = 0;
+
+ /**
+ * This is the maximum distance that a draw may extend beyond a clip's boundary and still count
+ * count as "on the other side". We leave some slack because floating point rounding error is
+ * likely to blame. The rationale for 1e-3 is that in the coverage case (and barring unexpected
+ * rounding), as long as coverage stays within 0.5 * 1/256 of its intended value it shouldn't
+ * have any effect on the final pixel values.
+ */
+ constexpr static SkScalar kBoundsTolerance = 1e-3f;
+
+ /**
+ * Returns true if the given query bounds count as entirely inside the clip.
+ *
+ * @param innerClipBounds device-space rect contained by the clip (SkRect or SkIRect).
+ * @param queryBounds device-space bounds of the query region.
+ */
+ template <typename TRect>
+ constexpr static bool IsInsideClip(const TRect& innerClipBounds, const SkRect& queryBounds) {
+ return innerClipBounds.fRight > innerClipBounds.fLeft + kBoundsTolerance &&
+ innerClipBounds.fBottom > innerClipBounds.fTop + kBoundsTolerance &&
+ innerClipBounds.fLeft < queryBounds.fLeft + kBoundsTolerance &&
+ innerClipBounds.fTop < queryBounds.fTop + kBoundsTolerance &&
+ innerClipBounds.fRight > queryBounds.fRight - kBoundsTolerance &&
+ innerClipBounds.fBottom > queryBounds.fBottom - kBoundsTolerance;
+ }
+
+ /**
+ * Returns true if the given query bounds count as entirely outside the clip.
+ *
+ * @param outerClipBounds device-space rect that contains the clip (SkRect or SkIRect).
+ * @param queryBounds device-space bounds of the query region.
+ */
+ template <typename TRect>
+ constexpr static bool IsOutsideClip(const TRect& outerClipBounds, const SkRect& queryBounds) {
+ return
+ // Is the clip so small that it is effectively empty?
+ outerClipBounds.fRight - outerClipBounds.fLeft <= kBoundsTolerance ||
+ outerClipBounds.fBottom - outerClipBounds.fTop <= kBoundsTolerance ||
+
+ // Are the query bounds effectively outside the clip?
+ outerClipBounds.fLeft >= queryBounds.fRight - kBoundsTolerance ||
+ outerClipBounds.fTop >= queryBounds.fBottom - kBoundsTolerance ||
+ outerClipBounds.fRight <= queryBounds.fLeft + kBoundsTolerance ||
+ outerClipBounds.fBottom <= queryBounds.fTop + kBoundsTolerance;
+ }
+
+ /**
+ * Returns the minimal integer rect that counts as containing a given set of bounds.
+ */
+ static SkIRect GetPixelIBounds(const SkRect& bounds) {
+ return SkIRect::MakeLTRB(SkScalarFloorToInt(bounds.fLeft + kBoundsTolerance),
+ SkScalarFloorToInt(bounds.fTop + kBoundsTolerance),
+ SkScalarCeilToInt(bounds.fRight - kBoundsTolerance),
+ SkScalarCeilToInt(bounds.fBottom - kBoundsTolerance));
+ }
+
+ /**
+ * Returns the minimal pixel-aligned rect that counts as containing a given set of bounds.
+ */
+ static SkRect GetPixelBounds(const SkRect& bounds) {
+ return SkRect::MakeLTRB(SkScalarFloorToScalar(bounds.fLeft + kBoundsTolerance),
+ SkScalarFloorToScalar(bounds.fTop + kBoundsTolerance),
+ SkScalarCeilToScalar(bounds.fRight - kBoundsTolerance),
+ SkScalarCeilToScalar(bounds.fBottom - kBoundsTolerance));
+ }
+
+ /**
+ * Returns true if the given rect counts as aligned with pixel boundaries.
+ */
+ static bool IsPixelAligned(const SkRect& rect) {
+ return SkScalarAbs(SkScalarRoundToScalar(rect.fLeft) - rect.fLeft) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fTop) - rect.fTop) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fRight) - rect.fRight) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fBottom) - rect.fBottom) <= kBoundsTolerance;
+ }
+};
+
+
+/**
+ * GrHardClip never uses coverage FPs. It can only enforce the clip using the already-existing
+ * stencil buffer contents and/or fixed-function state like scissor. Always aliased if MSAA is off.
+ */
+class GrHardClip : public GrClip {
+public:
+ /**
+ * Sets the appropriate hardware state modifications on GrAppliedHardClip that will implement
+ * the clip. On input 'bounds' is a conservative bounds of the draw that is to be clipped. After
+ * return 'bounds' has been intersected with a conservative bounds of the clip. A return value
+ * of false indicates that the draw can be skipped as it is fully clipped out.
+ */
+ virtual bool apply(int rtWidth, int rtHeight, GrAppliedHardClip* out, SkRect* bounds) const = 0;
+
+private:
+ bool apply(GrRecordingContext*, GrRenderTargetContext* rtc, bool useHWAA,
+ bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds) const final {
+ return this->apply(rtc->width(), rtc->height(), &out->hardClip(), bounds);
+ }
+};
+
+/**
+ * Specialized implementation for no clip.
+ */
+class GrNoClip final : public GrHardClip {
+private:
+ bool quickContains(const SkRect&) const final { return true; }
+ bool quickContains(const SkRRect&) const final { return true; }
+ void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const final {
+ devResult->setXYWH(0, 0, width, height);
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = true;
+ }
+ }
+ bool apply(int rtWidth, int rtHeight, GrAppliedHardClip*, SkRect*) const final { return true; }
+ bool isRRect(const SkRect&, SkRRect*, GrAA*) const override { return false; }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrClipStackClip.cpp b/gfx/skia/skia/src/gpu/GrClipStackClip.cpp
new file mode 100644
index 0000000000..2db564fcad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClipStackClip.cpp
@@ -0,0 +1,544 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTo.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTaskGroup.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrClipStackClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDeferredProxyUploader.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrSWMaskHelper.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/GrConvexPolyEffect.h"
+#include "src/gpu/effects/GrRRectEffect.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/geometry/GrShape.h"
+
+typedef SkClipStack::Element Element;
+typedef GrReducedClip::InitialState InitialState;
+typedef GrReducedClip::ElementList ElementList;
+
+const char GrClipStackClip::kMaskTestTag[] = "clip_mask";
+
+bool GrClipStackClip::quickContains(const SkRect& rect) const {
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+ return fStack->quickContains(rect);
+}
+
+bool GrClipStackClip::quickContains(const SkRRect& rrect) const {
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+ return fStack->quickContains(rrect);
+}
+
+bool GrClipStackClip::isRRect(const SkRect& origRTBounds, SkRRect* rr, GrAA* aa) const {
+ if (!fStack) {
+ return false;
+ }
+ const SkRect* rtBounds = &origRTBounds;
+ bool isAA;
+ if (fStack->isRRect(*rtBounds, rr, &isAA)) {
+ *aa = GrAA(isAA);
+ return true;
+ }
+ return false;
+}
+
+void GrClipStackClip::getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const {
+ if (!fStack) {
+ devResult->setXYWH(0, 0, width, height);
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = true;
+ }
+ return;
+ }
+ SkRect devBounds;
+ fStack->getConservativeBounds(0, 0, width, height, &devBounds, isIntersectionOfRects);
+ devBounds.roundOut(devResult);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// set up the draw state to enable the aa clipping mask.
+static std::unique_ptr<GrFragmentProcessor> create_fp_for_mask(sk_sp<GrTextureProxy> mask,
+ const SkIRect& devBound) {
+ SkIRect domainTexels = SkIRect::MakeWH(devBound.width(), devBound.height());
+ return GrDeviceSpaceTextureDecalFragmentProcessor::Make(std::move(mask), domainTexels,
+ {devBound.fLeft, devBound.fTop});
+}
+
+// Does the path in 'element' require SW rendering? If so, return true (and,
+// optionally, set 'prOut' to NULL. If not, return false (and, optionally, set
+// 'prOut' to the non-SW path renderer that will do the job).
+bool GrClipStackClip::PathNeedsSWRenderer(GrRecordingContext* context,
+ const SkIRect& scissorRect,
+ bool hasUserStencilSettings,
+ const GrRenderTargetContext* renderTargetContext,
+ const SkMatrix& viewMatrix,
+ const Element* element,
+ GrPathRenderer** prOut,
+ bool needsStencil) {
+ if (Element::DeviceSpaceType::kRect == element->getDeviceSpaceType()) {
+ // rects can always be drawn directly w/o using the software path
+ // TODO: skip rrects once we're drawing them directly.
+ if (prOut) {
+ *prOut = nullptr;
+ }
+ return false;
+ } else {
+ // We shouldn't get here with an empty clip element.
+ SkASSERT(Element::DeviceSpaceType::kEmpty != element->getDeviceSpaceType());
+
+ // the gpu alpha mask will draw the inverse paths as non-inverse to a temp buffer
+ SkPath path;
+ element->asDeviceSpacePath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ // We only use this method when rendering coverage clip masks.
+ SkASSERT(renderTargetContext->numSamples() <= 1);
+ auto aaType = (element->isAA()) ? GrAAType::kCoverage : GrAAType::kNone;
+
+ GrPathRendererChain::DrawType type =
+ needsStencil ? GrPathRendererChain::DrawType::kStencilAndColor
+ : GrPathRendererChain::DrawType::kColor;
+
+ GrShape shape(path, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fCaps = context->priv().caps();
+ canDrawArgs.fProxy = renderTargetContext->proxy();
+ canDrawArgs.fClipConservativeBounds = &scissorRect;
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAAType = aaType;
+ SkASSERT(!renderTargetContext->wrapsVkSecondaryCB());
+ canDrawArgs.fTargetIsWrappedVkSecondaryCB = false;
+ canDrawArgs.fHasUserStencilSettings = hasUserStencilSettings;
+
+ // the 'false' parameter disallows use of the SW path renderer
+ GrPathRenderer* pr =
+ context->priv().drawingManager()->getPathRenderer(canDrawArgs, false, type);
+ if (prOut) {
+ *prOut = pr;
+ }
+ return SkToBool(!pr);
+ }
+}
+
+/*
+ * This method traverses the clip stack to see if the GrSoftwarePathRenderer
+ * will be used on any element. If so, it returns true to indicate that the
+ * entire clip should be rendered in SW and then uploaded en masse to the gpu.
+ */
+bool GrClipStackClip::UseSWOnlyPath(GrRecordingContext* context,
+ bool hasUserStencilSettings,
+ const GrRenderTargetContext* renderTargetContext,
+ const GrReducedClip& reducedClip) {
+ // TODO: right now it appears that GPU clip masks are strictly slower than software. We may
+ // want to revisit this assumption once we can test with render target sorting.
+ return true;
+
+ // TODO: generalize this function so that when
+ // a clip gets complex enough it can just be done in SW regardless
+ // of whether it would invoke the GrSoftwarePathRenderer.
+
+ // If we're avoiding stencils, always use SW. This includes drawing into a wrapped vulkan
+ // secondary command buffer which can't handle stencils.
+ if (context->priv().caps()->avoidStencilBuffers() ||
+ renderTargetContext->wrapsVkSecondaryCB()) {
+ return true;
+ }
+
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip
+ // space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-reducedClip.left()), SkIntToScalar(-reducedClip.top()));
+
+ for (ElementList::Iter iter(reducedClip.maskElements()); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+
+ SkClipOp op = element->getOp();
+ bool invert = element->isInverseFilled();
+ bool needsStencil = invert ||
+ kIntersect_SkClipOp == op || kReverseDifference_SkClipOp == op;
+
+ if (PathNeedsSWRenderer(context, reducedClip.scissor(), hasUserStencilSettings,
+ renderTargetContext, translate, element, nullptr, needsStencil)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// sort out what kind of clip mask needs to be created: alpha, stencil,
+// scissor, or entirely software
+bool GrClipStackClip::apply(GrRecordingContext* context, GrRenderTargetContext* renderTargetContext,
+ bool useHWAA, bool hasUserStencilSettings, GrAppliedClip* out,
+ SkRect* bounds) const {
+ SkRect devBounds = SkRect::MakeIWH(renderTargetContext->width(), renderTargetContext->height());
+ if (!devBounds.intersect(*bounds)) {
+ return false;
+ }
+
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+
+ // An default count of 4 was chosen because of the common pattern in Blink of:
+ // isect RR
+ // diff RR
+ // isect convex_poly
+ // isect convex_poly
+ // when drawing rounded div borders.
+ constexpr int kMaxAnalyticFPs = 4;
+
+ int maxWindowRectangles = renderTargetContext->priv().maxWindowRectangles();
+ int maxAnalyticFPs = kMaxAnalyticFPs;
+ if (renderTargetContext->numSamples() > 1 || useHWAA || hasUserStencilSettings) {
+ // Disable analytic clips when we have MSAA. In MSAA we never conflate coverage and opacity.
+ maxAnalyticFPs = 0;
+ // We disable MSAA when avoiding stencil.
+ SkASSERT(!context->priv().caps()->avoidStencilBuffers());
+ }
+ auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
+
+ GrReducedClip reducedClip(*fStack, devBounds, context->priv().caps(),
+ maxWindowRectangles, maxAnalyticFPs, ccpr ? maxAnalyticFPs : 0);
+ if (InitialState::kAllOut == reducedClip.initialState() &&
+ reducedClip.maskElements().isEmpty()) {
+ return false;
+ }
+
+ if (reducedClip.hasScissor() && !GrClip::IsInsideClip(reducedClip.scissor(), devBounds)) {
+ out->hardClip().addScissor(reducedClip.scissor(), bounds);
+ }
+
+ if (!reducedClip.windowRectangles().empty()) {
+ out->hardClip().addWindowRectangles(reducedClip.windowRectangles(),
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ if (!reducedClip.maskElements().isEmpty()) {
+ if (!this->applyClipMask(context, renderTargetContext, reducedClip, hasUserStencilSettings,
+ out)) {
+ return false;
+ }
+ }
+
+ // The opsTask ID must not be looked up until AFTER producing the clip mask (if any). That step
+ // can cause a flush or otherwise change which opstask our draw is going into.
+ uint32_t opsTaskID = renderTargetContext->getOpsTask()->uniqueID();
+ if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opsTaskID)) {
+ out->addCoverageFP(std::move(clipFPs));
+ }
+
+ return true;
+}
+
+bool GrClipStackClip::applyClipMask(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext,
+ const GrReducedClip& reducedClip, bool hasUserStencilSettings,
+ GrAppliedClip* out) const {
+#ifdef SK_DEBUG
+ SkASSERT(reducedClip.hasScissor());
+ SkIRect rtIBounds = SkIRect::MakeWH(renderTargetContext->width(),
+ renderTargetContext->height());
+ const SkIRect& scissor = reducedClip.scissor();
+ SkASSERT(rtIBounds.contains(scissor)); // Mask shouldn't be larger than the RT.
+#endif
+
+ // MIXED SAMPLES TODO: We may want to explore using the stencil buffer for AA clipping.
+ if ((renderTargetContext->numSamples() <= 1 && reducedClip.maskRequiresAA()) ||
+ context->priv().caps()->avoidStencilBuffers() ||
+ renderTargetContext->wrapsVkSecondaryCB()) {
+ sk_sp<GrTextureProxy> result;
+ if (UseSWOnlyPath(context, hasUserStencilSettings, renderTargetContext, reducedClip)) {
+ // The clip geometry is complex enough that it will be more efficient to create it
+ // entirely in software
+ result = this->createSoftwareClipMask(context, reducedClip, renderTargetContext);
+ } else {
+ result = this->createAlphaClipMask(context, reducedClip);
+ }
+
+ if (result) {
+ // The mask's top left coord should be pinned to the rounded-out top left corner of
+ // the clip's device space bounds.
+ out->addCoverageFP(create_fp_for_mask(std::move(result), reducedClip.scissor()));
+ return true;
+ }
+
+ // If alpha or software clip mask creation fails, fall through to the stencil code paths,
+ // unless stencils are disallowed.
+ if (context->priv().caps()->avoidStencilBuffers() ||
+ renderTargetContext->wrapsVkSecondaryCB()) {
+ SkDebugf("WARNING: Clip mask requires stencil, but stencil unavailable. "
+ "Clip will be ignored.\n");
+ return false;
+ }
+ }
+
+ // This relies on the property that a reduced sub-rect of the last clip will contain all the
+ // relevant window rectangles that were in the last clip. This subtle requirement will go away
+ // after clipping is overhauled.
+ if (renderTargetContext->priv().mustRenderClip(reducedClip.maskGenID(), reducedClip.scissor(),
+ reducedClip.numAnalyticFPs())) {
+ reducedClip.drawStencilClipMask(context, renderTargetContext);
+ renderTargetContext->priv().setLastClip(reducedClip.maskGenID(), reducedClip.scissor(),
+ reducedClip.numAnalyticFPs());
+ }
+ // GrAppliedClip doesn't need to figure numAnalyticFPs into its key (used by operator==) because
+ // it verifies the FPs are also equal.
+ out->hardClip().addStencilClip(reducedClip.maskGenID());
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 8-bit clip mask in alpha
+
+static void create_clip_mask_key(uint32_t clipGenID, const SkIRect& bounds, int numAnalyticFPs,
+ GrUniqueKey* key) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kDomain, 4, GrClipStackClip::kMaskTestTag);
+ builder[0] = clipGenID;
+ // SkToS16 because image filters outset layers to a size indicated by the filter, which can
+ // sometimes result in negative coordinates from device space.
+ builder[1] = SkToS16(bounds.fLeft) | (SkToS16(bounds.fRight) << 16);
+ builder[2] = SkToS16(bounds.fTop) | (SkToS16(bounds.fBottom) << 16);
+ builder[3] = numAnalyticFPs;
+}
+
+static void add_invalidate_on_pop_message(GrRecordingContext* context,
+ const SkClipStack& stack, uint32_t clipGenID,
+ const GrUniqueKey& clipMaskKey) {
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kTop_IterStart);
+ while (const Element* element = iter.prev()) {
+ if (element->getGenID() == clipGenID) {
+ element->addResourceInvalidationMessage(proxyProvider, clipMaskKey);
+ return;
+ }
+ }
+ SkDEBUGFAIL("Gen ID was not found in stack.");
+}
+
+sk_sp<GrTextureProxy> GrClipStackClip::createAlphaClipMask(GrRecordingContext* context,
+ const GrReducedClip& reducedClip) const {
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ GrUniqueKey key;
+ create_clip_mask_key(reducedClip.maskGenID(), reducedClip.scissor(),
+ reducedClip.numAnalyticFPs(), &key);
+
+ sk_sp<GrTextureProxy> proxy(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin));
+ if (proxy) {
+ return proxy;
+ }
+
+ auto isProtected = proxy->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+ auto rtc = context->priv().makeDeferredRenderTargetContextWithFallback(SkBackingFit::kApprox,
+ reducedClip.width(),
+ reducedClip.height(),
+ GrColorType::kAlpha_8,
+ nullptr,
+ 1,
+ GrMipMapped::kNo,
+ kTopLeft_GrSurfaceOrigin,
+ nullptr,
+ SkBudgeted::kYes,
+ isProtected);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ if (!reducedClip.drawAlphaClipMask(rtc.get())) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> result(rtc->asTextureProxyRef());
+ if (!result) {
+ return nullptr;
+ }
+
+ SkASSERT(result->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, result.get());
+ add_invalidate_on_pop_message(context, *fStack, reducedClip.maskGenID(), key);
+
+ return result;
+}
+
+namespace {
+
+/**
+ * Payload class for use with GrTDeferredProxyUploader. The clip mask code renders multiple
+ * elements, each storing their own AA setting (and already transformed into device space). This
+ * stores all of the information needed by the worker thread to draw all clip elements (see below,
+ * in createSoftwareClipMask).
+ */
+class ClipMaskData {
+public:
+ ClipMaskData(const GrReducedClip& reducedClip)
+ : fScissor(reducedClip.scissor())
+ , fInitialState(reducedClip.initialState()) {
+ for (ElementList::Iter iter(reducedClip.maskElements()); iter.get(); iter.next()) {
+ fElements.addToTail(*iter.get());
+ }
+ }
+
+ const SkIRect& scissor() const { return fScissor; }
+ InitialState initialState() const { return fInitialState; }
+ const ElementList& elements() const { return fElements; }
+
+private:
+ SkIRect fScissor;
+ InitialState fInitialState;
+ ElementList fElements;
+};
+
+}
+
+static void draw_clip_elements_to_mask_helper(GrSWMaskHelper& helper, const ElementList& elements,
+ const SkIRect& scissor, InitialState initialState) {
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-scissor.left()), SkIntToScalar(-scissor.top()));
+
+ helper.clear(InitialState::kAllIn == initialState ? 0xFF : 0x00);
+
+ for (ElementList::Iter iter(elements); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ SkClipOp op = element->getOp();
+ GrAA aa = GrAA(element->isAA());
+
+ if (kIntersect_SkClipOp == op || kReverseDifference_SkClipOp == op) {
+ // Intersect and reverse difference require modifying pixels outside of the geometry
+ // that is being "drawn". In both cases we erase all the pixels outside of the geometry
+ // but leave the pixels inside the geometry alone. For reverse difference we invert all
+ // the pixels before clearing the ones outside the geometry.
+ if (kReverseDifference_SkClipOp == op) {
+ SkRect temp = SkRect::Make(scissor);
+ // invert the entire scene
+ helper.drawRect(temp, translate, SkRegion::kXOR_Op, GrAA::kNo, 0xFF);
+ }
+ SkPath clipPath;
+ element->asDeviceSpacePath(&clipPath);
+ clipPath.toggleInverseFillType();
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ helper.drawShape(shape, translate, SkRegion::kReplace_Op, aa, 0x00);
+ continue;
+ }
+
+ // The other ops (union, xor, diff) only affect pixels inside
+ // the geometry so they can just be drawn normally
+ if (Element::DeviceSpaceType::kRect == element->getDeviceSpaceType()) {
+ helper.drawRect(element->getDeviceSpaceRect(), translate, (SkRegion::Op)op, aa, 0xFF);
+ } else {
+ SkPath path;
+ element->asDeviceSpacePath(&path);
+ GrShape shape(path, GrStyle::SimpleFill());
+ helper.drawShape(shape, translate, (SkRegion::Op)op, aa, 0xFF);
+ }
+ }
+}
+
+sk_sp<GrTextureProxy> GrClipStackClip::createSoftwareClipMask(
+ GrRecordingContext* context, const GrReducedClip& reducedClip,
+ GrRenderTargetContext* renderTargetContext) const {
+ GrUniqueKey key;
+ create_clip_mask_key(reducedClip.maskGenID(), reducedClip.scissor(),
+ reducedClip.numAnalyticFPs(), &key);
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ const GrCaps* caps = context->priv().caps();
+
+ sk_sp<GrTextureProxy> proxy(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin));
+ if (proxy) {
+ return proxy;
+ }
+
+ // The mask texture may be larger than necessary. We round out the clip bounds and pin the top
+ // left corner of the resulting rect to the top left of the texture.
+ SkIRect maskSpaceIBounds = SkIRect::MakeWH(reducedClip.width(), reducedClip.height());
+
+ SkTaskGroup* taskGroup = nullptr;
+ if (auto direct = context->priv().asDirectContext()) {
+ taskGroup = direct->priv().getTaskGroup();
+ }
+
+ if (taskGroup && renderTargetContext) {
+ // Create our texture proxy
+ GrSurfaceDesc desc;
+ desc.fWidth = maskSpaceIBounds.width();
+ desc.fHeight = maskSpaceIBounds.height();
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
+ GrRenderable::kNo);
+
+ // MDB TODO: We're going to fill this proxy with an ASAP upload (which is out of order wrt
+ // to ops), so it can't have any pending IO.
+ proxy = proxyProvider->createProxy(format,
+ desc,
+ GrRenderable::kNo,
+ 1,
+ kTopLeft_GrSurfaceOrigin,
+ GrMipMapped::kNo,
+ SkBackingFit::kApprox,
+ SkBudgeted::kYes,
+ GrProtected::kNo);
+
+ auto uploader = skstd::make_unique<GrTDeferredProxyUploader<ClipMaskData>>(reducedClip);
+ GrTDeferredProxyUploader<ClipMaskData>* uploaderRaw = uploader.get();
+ auto drawAndUploadMask = [uploaderRaw, maskSpaceIBounds] {
+ TRACE_EVENT0("skia.gpu", "Threaded SW Clip Mask Render");
+ GrSWMaskHelper helper(uploaderRaw->getPixels());
+ if (helper.init(maskSpaceIBounds)) {
+ draw_clip_elements_to_mask_helper(helper, uploaderRaw->data().elements(),
+ uploaderRaw->data().scissor(),
+ uploaderRaw->data().initialState());
+ } else {
+ SkDEBUGFAIL("Unable to allocate SW clip mask.");
+ }
+ uploaderRaw->signalAndFreeData();
+ };
+
+ taskGroup->add(std::move(drawAndUploadMask));
+ proxy->texPriv().setDeferredUploader(std::move(uploader));
+ } else {
+ GrSWMaskHelper helper;
+ if (!helper.init(maskSpaceIBounds)) {
+ return nullptr;
+ }
+
+ draw_clip_elements_to_mask_helper(helper, reducedClip.maskElements(), reducedClip.scissor(),
+ reducedClip.initialState());
+
+ proxy = helper.toTextureProxy(context, SkBackingFit::kApprox);
+ }
+
+ SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, proxy.get());
+ add_invalidate_on_pop_message(context, *fStack, reducedClip.maskGenID(), key);
+ return proxy;
+}
diff --git a/gfx/skia/skia/src/gpu/GrClipStackClip.h b/gfx/skia/skia/src/gpu/GrClipStackClip.h
new file mode 100644
index 0000000000..7d16d5e9b1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClipStackClip.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrClipStackClip_DEFINED
+#define GrClipStackClip_DEFINED
+
+#include "src/core/SkClipStack.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrReducedClip.h"
+
+class GrPathRenderer;
+class GrTextureProxy;
+
+/**
+ * GrClipStackClip can apply a generic SkClipStack to the draw state. It may need to generate an
+ * 8-bit alpha clip mask and/or modify the stencil buffer during apply().
+ */
+class GrClipStackClip final : public GrClip {
+public:
+ GrClipStackClip(const SkClipStack* stack = nullptr) { this->reset(stack); }
+
+ void reset(const SkClipStack* stack) { fStack = stack; }
+
+ bool quickContains(const SkRect&) const final;
+ bool quickContains(const SkRRect&) const final;
+ void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const final;
+ bool apply(GrRecordingContext*, GrRenderTargetContext*, bool useHWAA,
+ bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds) const final;
+
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, GrAA* aa) const override;
+
+ sk_sp<GrTextureProxy> testingOnly_createClipMask(GrContext*) const;
+ static const char kMaskTestTag[];
+
+private:
+ static bool PathNeedsSWRenderer(GrRecordingContext* context,
+ const SkIRect& scissorRect,
+ bool hasUserStencilSettings,
+ const GrRenderTargetContext*,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element,
+ GrPathRenderer** prOut,
+ bool needsStencil);
+
+ bool applyClipMask(GrRecordingContext*, GrRenderTargetContext*, const GrReducedClip&,
+ bool hasUserStencilSettings, GrAppliedClip*) const;
+
+ // Creates an alpha mask of the clip. The mask is a rasterization of elements through the
+ // rect specified by clipSpaceIBounds.
+ sk_sp<GrTextureProxy> createAlphaClipMask(GrRecordingContext*, const GrReducedClip&) const;
+
+ // Similar to createAlphaClipMask but it rasterizes in SW and uploads to the result texture.
+ sk_sp<GrTextureProxy> createSoftwareClipMask(GrRecordingContext*, const GrReducedClip&,
+ GrRenderTargetContext*) const;
+
+ static bool UseSWOnlyPath(GrRecordingContext*,
+ bool hasUserStencilSettings,
+ const GrRenderTargetContext*,
+ const GrReducedClip&);
+
+ const SkClipStack* fStack;
+};
+
+#endif // GrClipStackClip_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrColor.h b/gfx/skia/skia/src/gpu/GrColor.h
new file mode 100644
index 0000000000..39e7bfa7bf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColor.h
@@ -0,0 +1,113 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrColor_DEFINED
+#define GrColor_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+
+/**
+ * GrColor is 4 bytes for R, G, B, A, in a specific order defined below. Whether the color is
+ * premultiplied or not depends on the context in which it is being used.
+ */
+typedef uint32_t GrColor;
+
+// shift amount to assign a component to a GrColor int
+// These shift values are chosen for compatibility with GL attrib arrays
+// ES doesn't allow BGRA vertex attrib order so if they were not in this order
+// we'd have to swizzle in shaders.
+#ifdef SK_CPU_BENDIAN
+ #define GrColor_SHIFT_R 24
+ #define GrColor_SHIFT_G 16
+ #define GrColor_SHIFT_B 8
+ #define GrColor_SHIFT_A 0
+#else
+ #define GrColor_SHIFT_R 0
+ #define GrColor_SHIFT_G 8
+ #define GrColor_SHIFT_B 16
+ #define GrColor_SHIFT_A 24
+#endif
+
+/**
+ * Pack 4 components (RGBA) into a GrColor int
+ */
+static inline GrColor GrColorPackRGBA(unsigned r, unsigned g, unsigned b, unsigned a) {
+ SkASSERT((uint8_t)r == r);
+ SkASSERT((uint8_t)g == g);
+ SkASSERT((uint8_t)b == b);
+ SkASSERT((uint8_t)a == a);
+ return (r << GrColor_SHIFT_R) |
+ (g << GrColor_SHIFT_G) |
+ (b << GrColor_SHIFT_B) |
+ (a << GrColor_SHIFT_A);
+}
+
+// extract a component (byte) from a GrColor int
+
+#define GrColorUnpackR(color) (((color) >> GrColor_SHIFT_R) & 0xFF)
+#define GrColorUnpackG(color) (((color) >> GrColor_SHIFT_G) & 0xFF)
+#define GrColorUnpackB(color) (((color) >> GrColor_SHIFT_B) & 0xFF)
+#define GrColorUnpackA(color) (((color) >> GrColor_SHIFT_A) & 0xFF)
+
+/**
+ * Since premultiplied means that alpha >= color, we construct a color with
+ * each component==255 and alpha == 0 to be "illegal"
+ */
+#define GrColor_ILLEGAL (~(0xFF << GrColor_SHIFT_A))
+
+/** Normalizes and coverts an uint8_t to a float. [0, 255] -> [0.0, 1.0] */
+static inline float GrNormalizeByteToFloat(uint8_t value) {
+ static const float ONE_OVER_255 = 1.f / 255.f;
+ return value * ONE_OVER_255;
+}
+
+/** Used to pick vertex attribute types. */
+static inline bool SkPMColor4fFitsInBytes(const SkPMColor4f& color) {
+ // Might want to instead check that the components are [0...a] instead of [0...1]?
+ return color.fitsInBytes();
+}
+
+static inline uint64_t SkPMColor4f_toFP16(const SkPMColor4f& color) {
+ uint64_t halfColor;
+ SkFloatToHalf_finite_ftz(Sk4f::Load(color.vec())).store(&halfColor);
+ return halfColor;
+}
+
+/**
+ * GrVertexColor is a helper for writing colors to a vertex attribute. It stores either GrColor
+ * or four half-float channels, depending on the wideColor parameter. GrVertexWriter will write
+ * the correct amount of data. Note that the GP needs to have been constructed with the correct
+ * attribute type for colors, to match the usage here.
+ */
+class GrVertexColor {
+public:
+ explicit GrVertexColor(const SkPMColor4f& color, bool wideColor)
+ : fWideColor(wideColor) {
+ if (wideColor) {
+ SkFloatToHalf_finite_ftz(Sk4f::Load(color.vec())).store(&fColor);
+ } else {
+ fColor[0] = color.toBytes_RGBA();
+ }
+ }
+
+ size_t size() const { return fWideColor ? 8 : 4; }
+
+private:
+ friend struct GrVertexWriter;
+
+ uint32_t fColor[2];
+ bool fWideColor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrColorInfo.cpp b/gfx/skia/skia/src/gpu/GrColorInfo.cpp
new file mode 100644
index 0000000000..4caf7c1c0c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColorInfo.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrColorInfo.h"
+
+#include "src/core/SkColorSpacePriv.h"
+
+GrColorInfo::GrColorInfo(
+ GrColorType colorType, SkAlphaType alphaType, sk_sp<SkColorSpace> colorSpace)
+ : fColorSpace(std::move(colorSpace)), fColorType(colorType), fAlphaType(alphaType) {}
+
+GrColorInfo::GrColorInfo(const SkColorInfo& ci)
+ : GrColorInfo(SkColorTypeToGrColorType(ci.colorType()),
+ ci.alphaType(),
+ ci.refColorSpace()) {}
+
+GrColorInfo::GrColorInfo(const GrColorInfo& ci)
+ : GrColorInfo(ci.colorType(),
+ ci.alphaType(),
+ ci.refColorSpace()) {}
+
+GrColorSpaceXform* GrColorInfo::colorSpaceXformFromSRGB() const {
+ // TODO: Make this atomic if we start accessing this on multiple threads.
+ if (!fInitializedColorSpaceXformFromSRGB) {
+ // sRGB sources are very common (SkColor, etc...), so we cache that transformation
+ fColorXformFromSRGB = GrColorSpaceXform::Make(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ fColorSpace.get(), kUnpremul_SkAlphaType);
+ fInitializedColorSpaceXformFromSRGB = true;
+ }
+ // You can't be color-space aware in legacy mode
+ SkASSERT(fColorSpace || !fColorXformFromSRGB);
+ return fColorXformFromSRGB.get();
+}
diff --git a/gfx/skia/skia/src/gpu/GrColorInfo.h b/gfx/skia/skia/src/gpu/GrColorInfo.h
new file mode 100644
index 0000000000..91b35b8dec
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColorInfo.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrColorInfo_DEFINED
+#define GrColorInfo_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrColorSpaceXform.h"
+
+/**
+ * All the info needed to interpret a color: Color type + alpha type + color space. Also caches
+ * the GrColorSpaceXform from sRGB. */
+class GrColorInfo {
+public:
+ GrColorInfo() = default;
+ GrColorInfo(const GrColorInfo&);
+ GrColorInfo(GrColorType, SkAlphaType, sk_sp<SkColorSpace>);
+ /* implicit */ GrColorInfo(const SkColorInfo&);
+
+ bool isLinearlyBlended() const { return fColorSpace && fColorSpace->gammaIsLinear(); }
+
+ SkColorSpace* colorSpace() const { return fColorSpace.get(); }
+ sk_sp<SkColorSpace> refColorSpace() const { return fColorSpace; }
+
+ GrColorSpaceXform* colorSpaceXformFromSRGB() const;
+ sk_sp<GrColorSpaceXform> refColorSpaceXformFromSRGB() const {
+ return sk_ref_sp(this->colorSpaceXformFromSRGB());
+ }
+
+ GrColorType colorType() const { return fColorType; }
+ SkAlphaType alphaType() const { return fAlphaType; }
+
+ bool isValid() const {
+ return fColorType != GrColorType::kUnknown && fAlphaType != kUnknown_SkAlphaType;
+ }
+
+private:
+ sk_sp<SkColorSpace> fColorSpace;
+ mutable sk_sp<GrColorSpaceXform> fColorXformFromSRGB;
+ GrColorType fColorType = GrColorType::kUnknown;
+ SkAlphaType fAlphaType = kUnknown_SkAlphaType;
+ mutable bool fInitializedColorSpaceXformFromSRGB = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp b/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp
new file mode 100644
index 0000000000..e8d739cfc9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+sk_sp<GrColorSpaceXform> GrColorSpaceXform::Make(SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst, SkAlphaType dstAT) {
+ SkColorSpaceXformSteps steps(src, srcAT, dst, dstAT);
+ return steps.flags.mask() == 0 ? nullptr /* Noop transform */
+ : sk_make_sp<GrColorSpaceXform>(steps);
+}
+
+bool GrColorSpaceXform::Equals(const GrColorSpaceXform* a, const GrColorSpaceXform* b) {
+ if (a == b) {
+ return true;
+ }
+
+ if (!a || !b || a->fSteps.flags.mask() != b->fSteps.flags.mask()) {
+ return false;
+ }
+
+ if (a->fSteps.flags.linearize &&
+ 0 != memcmp(&a->fSteps.srcTF, &b->fSteps.srcTF, sizeof(a->fSteps.srcTF))) {
+ return false;
+ }
+
+ if (a->fSteps.flags.gamut_transform &&
+ 0 != memcmp(&a->fSteps.src_to_dst_matrix, &b->fSteps.src_to_dst_matrix,
+ sizeof(a->fSteps.src_to_dst_matrix))) {
+ return false;
+ }
+
+ if (a->fSteps.flags.encode &&
+ 0 != memcmp(&a->fSteps.dstTFInv, &b->fSteps.dstTFInv, sizeof(a->fSteps.dstTFInv))) {
+ return false;
+ }
+
+ return true;
+}
+
+SkColor4f GrColorSpaceXform::apply(const SkColor4f& srcColor) {
+ SkColor4f result = srcColor;
+ fSteps.apply(result.vec());
+ return result;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLColorSpaceXformEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrColorSpaceXformEffect& csxe = args.fFp.cast<GrColorSpaceXformEffect>();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ fColorSpaceHelper.emitCode(uniformHandler, csxe.colorXform());
+
+ if (this->numChildProcessors()) {
+ SkString childColor("src_color");
+ this->invokeChild(0, &childColor, args);
+
+ SkString xformedColor;
+ fragBuilder->appendColorGamutXform(&xformedColor, childColor.c_str(), &fColorSpaceHelper);
+ fragBuilder->codeAppendf("%s = %s * %s;", args.fOutputColor, xformedColor.c_str(),
+ args.fInputColor);
+ } else {
+ SkString xformedColor;
+ fragBuilder->appendColorGamutXform(&xformedColor, args.fInputColor, &fColorSpaceHelper);
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, xformedColor.c_str());
+ }
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) override {
+ const GrColorSpaceXformEffect& csxe = processor.cast<GrColorSpaceXformEffect>();
+ fColorSpaceHelper.setData(pdman, csxe.colorXform());
+ }
+
+ GrGLSLColorSpaceXformHelper fColorSpaceHelper;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrColorSpaceXformEffect::GrColorSpaceXformEffect(std::unique_ptr<GrFragmentProcessor> child,
+ sk_sp<GrColorSpaceXform> colorXform)
+ : INHERITED(kGrColorSpaceXformEffect_ClassID, OptFlags(child.get()))
+ , fColorXform(std::move(colorXform)) {
+ if (child) {
+ this->registerChildProcessor(std::move(child));
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrColorSpaceXformEffect::clone() const {
+ std::unique_ptr<GrFragmentProcessor> child =
+ this->numChildProcessors() ? this->childProcessor(0).clone() : nullptr;
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrColorSpaceXformEffect(std::move(child), fColorXform));
+}
+
+bool GrColorSpaceXformEffect::onIsEqual(const GrFragmentProcessor& s) const {
+ const GrColorSpaceXformEffect& other = s.cast<GrColorSpaceXformEffect>();
+ return GrColorSpaceXform::Equals(fColorXform.get(), other.fColorXform.get());
+}
+
+void GrColorSpaceXformEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32(GrColorSpaceXform::XformKey(fColorXform.get()));
+}
+
+GrGLSLFragmentProcessor* GrColorSpaceXformEffect::onCreateGLSLInstance() const {
+ return new GrGLColorSpaceXformEffect();
+}
+
+GrFragmentProcessor::OptimizationFlags GrColorSpaceXformEffect::OptFlags(
+ const GrFragmentProcessor* child) {
+ // TODO: Implement constant output for constant input
+ if (child) {
+ OptimizationFlags flags = kNone_OptimizationFlags;
+ if (child->compatibleWithCoverageAsAlpha()) {
+ flags |= kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+ if (child->preservesOpaqueInput()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ return flags;
+ } else {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ kPreservesOpaqueInput_OptimizationFlag;
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrColorSpaceXformEffect::Make(SkColorSpace* src,
+ SkAlphaType srcAT,
+ SkColorSpace* dst,
+ SkAlphaType dstAT) {
+ auto xform = GrColorSpaceXform::Make(src, srcAT,
+ dst, dstAT);
+ if (!xform) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorSpaceXformEffect(nullptr,
+ std::move(xform)));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrColorSpaceXformEffect::Make(
+ std::unique_ptr<GrFragmentProcessor> child,
+ SkColorSpace* src, SkAlphaType srcAT, SkColorSpace* dst) {
+ if (!child) {
+ return nullptr;
+ }
+
+ auto xform = GrColorSpaceXform::Make(src, srcAT,
+ dst, kPremul_SkAlphaType);
+ if (!xform) {
+ return child;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorSpaceXformEffect(std::move(child),
+ std::move(xform)));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrColorSpaceXformEffect::Make(
+ std::unique_ptr<GrFragmentProcessor> child, sk_sp<GrColorSpaceXform> colorXform) {
+ if (!child) {
+ return nullptr;
+ }
+ if (!colorXform) {
+ return child;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorSpaceXformEffect(std::move(child),
+ std::move(colorXform)));
+}
diff --git a/gfx/skia/skia/src/gpu/GrColorSpaceXform.h b/gfx/skia/skia/src/gpu/GrColorSpaceXform.h
new file mode 100644
index 0000000000..65e18363b5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColorSpaceXform.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrColorSpaceXform_DEFINED
+#define GrColorSpaceXform_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/gpu/GrFragmentProcessor.h"
+
+class SkColorSpace;
+
+ /**
+ * Represents a color space transformation
+ */
+class GrColorSpaceXform : public SkRefCnt {
+public:
+ GrColorSpaceXform(const SkColorSpaceXformSteps& steps) : fSteps(steps) {}
+
+ static sk_sp<GrColorSpaceXform> Make(SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst, SkAlphaType dstAT);
+
+ const SkColorSpaceXformSteps& steps() const { return fSteps; }
+
+ /**
+ * GrGLSLFragmentProcessor::GenKey() must call this and include the returned value in its
+ * computed key.
+ */
+ static uint32_t XformKey(const GrColorSpaceXform* xform) {
+ // Code generation depends on which steps we apply,
+ // and the kinds of transfer functions (if we're applying those).
+ if (!xform) { return 0; }
+
+ const SkColorSpaceXformSteps& steps(xform->fSteps);
+ uint32_t key = steps.flags.mask();
+ if (steps.flags.linearize) {
+ key |= classify_transfer_fn(steps.srcTF) << 8;
+ }
+ if (steps.flags.encode) {
+ key |= classify_transfer_fn(steps.dstTFInv) << 16;
+ }
+ return key;
+ }
+
+ static bool Equals(const GrColorSpaceXform* a, const GrColorSpaceXform* b);
+
+ SkColor4f apply(const SkColor4f& srcColor);
+
+private:
+ friend class GrGLSLColorSpaceXformHelper;
+
+ SkColorSpaceXformSteps fSteps;
+};
+
+class GrColorSpaceXformEffect : public GrFragmentProcessor {
+public:
+ /**
+ * Returns a fragment processor that converts the input's color space from src to dst.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst, SkAlphaType dstAT);
+
+ /**
+ * Returns a fragment processor that calls the passed in fragment processor, and then converts
+ * the color space of the output from src to dst.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> child,
+ SkColorSpace* src, SkAlphaType srcAT,
+ SkColorSpace* dst);
+
+ /**
+ * Returns a fragment processor that calls the passed in FP and then converts it with the given
+ * color xform. Returns null if child is null, returns child if the xform is null (e.g. noop).
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> child,
+ sk_sp<GrColorSpaceXform> colorXform);
+
+ const char* name() const override { return "ColorSpaceXform"; }
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ const GrColorSpaceXform* colorXform() const { return fColorXform.get(); }
+
+private:
+ GrColorSpaceXformEffect(std::unique_ptr<GrFragmentProcessor> child,
+ sk_sp<GrColorSpaceXform> colorXform);
+
+ static OptimizationFlags OptFlags(const GrFragmentProcessor* child);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ sk_sp<GrColorSpaceXform> fColorXform;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrContext.cpp b/gfx/skia/skia/src/gpu/GrContext.cpp
new file mode 100644
index 0000000000..d9f37f0d4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContext.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTaskGroup.h"
+#include "src/gpu/GrClientMappedBufferManager.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrPathRendererChain.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/gpu/GrSoftwarePathRenderer.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/gpu/text/GrTextBlobCache.h"
+#include "src/gpu/text/GrTextContext.h"
+#include "src/image/SkImage_GpuBase.h"
+#include "src/image/SkSurface_Gpu.h"
+#include <atomic>
+
+#define ASSERT_OWNED_PROXY(P) \
+ SkASSERT(!(P) || !((P)->peekTexture()) || (P)->peekTexture()->getContext() == this)
+
+#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+#define RETURN_IF_ABANDONED if (this->abandoned()) { return; }
+#define RETURN_FALSE_IF_ABANDONED if (this->abandoned()) { return false; }
+#define RETURN_NULL_IF_ABANDONED if (this->abandoned()) { return nullptr; }
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrContext::GrContext(GrBackendApi backend, const GrContextOptions& options, int32_t contextID)
+ : INHERITED(backend, options, contextID) {
+ fResourceCache = nullptr;
+ fResourceProvider = nullptr;
+}
+
+GrContext::~GrContext() {
+ ASSERT_SINGLE_OWNER
+
+ if (this->drawingManager()) {
+ this->drawingManager()->cleanup();
+ }
+ delete fResourceProvider;
+ delete fResourceCache;
+}
+
+bool GrContext::init(sk_sp<const GrCaps> caps, sk_sp<GrSkSLFPFactoryCache> FPFactoryCache) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(fThreadSafeProxy); // needs to have been initialized by derived classes
+ SkASSERT(this->proxyProvider());
+
+ if (!INHERITED::init(std::move(caps), std::move(FPFactoryCache))) {
+ return false;
+ }
+
+ SkASSERT(this->caps());
+ SkASSERT(this->getGrStrikeCache());
+ SkASSERT(this->getTextBlobCache());
+
+ if (fGpu) {
+ fResourceCache = new GrResourceCache(this->caps(), this->singleOwner(), this->contextID());
+ fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, this->singleOwner());
+ fMappedBufferManager = skstd::make_unique<GrClientMappedBufferManager>(this->contextID());
+ }
+
+ if (fResourceCache) {
+ fResourceCache->setProxyProvider(this->proxyProvider());
+ }
+
+ fDidTestPMConversions = false;
+
+ // DDL TODO: we need to think through how the task group & persistent cache
+ // get passed on to/shared between all the DDLRecorders created with this context.
+ if (this->options().fExecutor) {
+ fTaskGroup = skstd::make_unique<SkTaskGroup>(*this->options().fExecutor);
+ }
+
+ fPersistentCache = this->options().fPersistentCache;
+ fShaderErrorHandler = this->options().fShaderErrorHandler;
+ if (!fShaderErrorHandler) {
+ fShaderErrorHandler = GrShaderUtils::DefaultShaderErrorHandler();
+ }
+
+ return true;
+}
+
+sk_sp<GrContextThreadSafeProxy> GrContext::threadSafeProxy() {
+ return fThreadSafeProxy;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrContext::abandonContext() {
+ if (this->abandoned()) {
+ return;
+ }
+
+ INHERITED::abandonContext();
+
+ fMappedBufferManager->abandon();
+
+ fResourceProvider->abandon();
+
+ // Need to cleanup the drawing manager first so all the render targets
+ // will be released/forgotten before they too are abandoned.
+ this->drawingManager()->cleanup();
+
+ // abandon first to so destructors
+ // don't try to free the resources in the API.
+ fResourceCache->abandonAll();
+
+ fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
+
+ fMappedBufferManager.reset();
+}
+
+void GrContext::releaseResourcesAndAbandonContext() {
+ if (this->abandoned()) {
+ return;
+ }
+
+ INHERITED::abandonContext();
+
+ fMappedBufferManager.reset();
+
+ fResourceProvider->abandon();
+
+ // Need to cleanup the drawing manager first so all the render targets
+ // will be released/forgotten before they too are abandoned.
+ this->drawingManager()->cleanup();
+
+ // Release all resources in the backend 3D API.
+ fResourceCache->releaseAll();
+
+ fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
+}
+
+void GrContext::resetGLTextureBindings() {
+ if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
+ return;
+ }
+ fGpu->resetTextureBindings();
+}
+
+void GrContext::resetContext(uint32_t state) {
+ ASSERT_SINGLE_OWNER
+ fGpu->markContextDirty(state);
+}
+
+void GrContext::freeGpuResources() {
+ ASSERT_SINGLE_OWNER
+
+ // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
+ // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
+ this->getGrStrikeCache()->freeAll();
+
+ this->drawingManager()->freeGpuResources();
+
+ fResourceCache->purgeAllUnlocked();
+}
+
+void GrContext::purgeUnlockedResources(bool scratchResourcesOnly) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->abandoned()) {
+ return;
+ }
+
+ fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
+ fResourceCache->purgeAsNeeded();
+
+ // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
+ // place to purge stale blobs
+ this->getTextBlobCache()->purgeStaleBlobs();
+}
+
+void GrContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ ASSERT_SINGLE_OWNER
+
+ if (this->abandoned()) {
+ return;
+ }
+
+ fMappedBufferManager->process();
+ auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
+
+ fResourceCache->purgeAsNeeded();
+ fResourceCache->purgeResourcesNotUsedSince(purgeTime);
+
+ if (auto ccpr = this->drawingManager()->getCoverageCountingPathRenderer()) {
+ ccpr->purgeCacheEntriesOlderThan(this->proxyProvider(), purgeTime);
+ }
+
+ // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
+ // place to purge stale blobs
+ this->getTextBlobCache()->purgeStaleBlobs();
+}
+
+void GrContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->abandoned()) {
+ return;
+ }
+
+ fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
+}
+
+void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
+ ASSERT_SINGLE_OWNER
+
+ if (resourceCount) {
+ *resourceCount = fResourceCache->getBudgetedResourceCount();
+ }
+ if (resourceBytes) {
+ *resourceBytes = fResourceCache->getBudgetedResourceBytes();
+ }
+}
+
+size_t GrContext::getResourceCachePurgeableBytes() const {
+ ASSERT_SINGLE_OWNER
+ return fResourceCache->getPurgeableBytes();
+}
+
+size_t GrContext::ComputeImageSize(sk_sp<SkImage> image, GrMipMapped mipMapped, bool useNextPow2) {
+ if (!image->isTextureBacked()) {
+ return 0;
+ }
+ SkImage_GpuBase* gpuImage = static_cast<SkImage_GpuBase*>(as_IB(image.get()));
+ GrTextureProxy* proxy = gpuImage->peekProxy();
+ if (!proxy) {
+ return 0;
+ }
+
+ const GrCaps& caps = *gpuImage->context()->priv().caps();
+ int colorSamplesPerPixel = 1;
+ return GrSurface::ComputeSize(caps, proxy->backendFormat(), image->width(), image->height(),
+ colorSamplesPerPixel, mipMapped, useNextPow2);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+int GrContext::maxTextureSize() const { return this->caps()->maxTextureSize(); }
+
+int GrContext::maxRenderTargetSize() const { return this->caps()->maxRenderTargetSize(); }
+
+bool GrContext::colorTypeSupportedAsImage(SkColorType colorType) const {
+ GrBackendFormat format =
+ this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
+ GrRenderable::kNo);
+ return format.isValid();
+}
+
+int GrContext::maxSurfaceSampleCountForColorType(SkColorType colorType) const {
+ GrBackendFormat format =
+ this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
+ GrRenderable::kYes);
+ return this->caps()->maxRenderTargetSampleCount(format);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[]) {
+ if (!fGpu || fGpu->caps()->semaphoreSupport()) {
+ return false;
+ }
+ for (int i = 0; i < numSemaphores; ++i) {
+ sk_sp<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
+ waitSemaphores[i], GrResourceProvider::SemaphoreWrapType::kWillWait,
+ kAdopt_GrWrapOwnership);
+ fGpu->waitSemaphore(std::move(sema));
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info,
+ const GrPrepareForExternalIORequests& externalRequests) {
+ ASSERT_SINGLE_OWNER
+ if (this->abandoned()) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ return this->drawingManager()->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
+ info, externalRequests);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::checkAsyncWorkCompletion() {
+ if (fGpu) {
+ fGpu->checkFinishProcs();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::storeVkPipelineCacheData() {
+ if (fGpu) {
+ fGpu->storeVkPipelineCacheData();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrContext::supportsDistanceFieldText() const {
+ return this->caps()->shaderCaps()->supportsDistanceFieldText();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
+ ASSERT_SINGLE_OWNER
+ if (maxResources) {
+ *maxResources = -1;
+ }
+ if (maxResourceBytes) {
+ *maxResourceBytes = this->getResourceCacheLimit();
+ }
+}
+
+size_t GrContext::getResourceCacheLimit() const {
+ ASSERT_SINGLE_OWNER
+ return fResourceCache->getMaxResourceBytes();
+}
+
+void GrContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
+ ASSERT_SINGLE_OWNER
+ this->setResourceCacheLimit(maxResourceBytes);
+}
+
+void GrContext::setResourceCacheLimit(size_t maxResourceBytes) {
+ ASSERT_SINGLE_OWNER
+ fResourceCache->setLimit(maxResourceBytes);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+void GrContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ ASSERT_SINGLE_OWNER
+ fResourceCache->dumpMemoryStatistics(traceMemoryDump);
+ traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
+ this->getTextBlobCache()->usedBytes());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+GrBackendTexture GrContext::createBackendTexture(int width, int height,
+ const GrBackendFormat& backendFormat,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ GrProtected isProtected) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ if (!this->asDirectContext()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ return fGpu->createBackendTexture(width, height, backendFormat,
+ mipMapped, renderable,
+ nullptr, 0, nullptr, isProtected);
+}
+
+GrBackendTexture GrContext::createBackendTexture(int width, int height,
+ SkColorType skColorType,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ GrProtected isProtected) {
+ if (!this->asDirectContext()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
+
+ return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
+}
+
+GrBackendTexture GrContext::createBackendTexture(const SkSurfaceCharacterization& c) {
+ if (!this->asDirectContext() || !c.isValid()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ if (c.usesGLFBO0()) {
+ // If we are making the surface we will never use FBO0.
+ return GrBackendTexture();
+ }
+
+ if (c.vulkanSecondaryCBCompatible()) {
+ return {};
+ }
+
+ const GrBackendFormat format = this->defaultBackendFormat(c.colorType(), GrRenderable::kYes);
+ if (!format.isValid()) {
+ return GrBackendTexture();
+ }
+
+ GrBackendTexture result = this->createBackendTexture(c.width(), c.height(), format,
+ GrMipMapped(c.isMipMapped()),
+ GrRenderable::kYes,
+ c.isProtected());
+ SkASSERT(c.isCompatible(result));
+ return result;
+}
+
+GrBackendTexture GrContext::createBackendTexture(const SkSurfaceCharacterization& c,
+ const SkColor4f& color) {
+ if (!this->asDirectContext() || !c.isValid()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ if (c.usesGLFBO0()) {
+ // If we are making the surface we will never use FBO0.
+ return GrBackendTexture();
+ }
+
+ if (c.vulkanSecondaryCBCompatible()) {
+ return {};
+ }
+
+ const GrBackendFormat format = this->defaultBackendFormat(c.colorType(), GrRenderable::kYes);
+ if (!format.isValid()) {
+ return GrBackendTexture();
+ }
+
+ GrBackendTexture result = this->createBackendTexture(c.width(), c.height(), format, color,
+ GrMipMapped(c.isMipMapped()),
+ GrRenderable::kYes,
+ c.isProtected());
+ SkASSERT(c.isCompatible(result));
+ return result;
+}
+
+GrBackendTexture GrContext::createBackendTexture(int width, int height,
+ const GrBackendFormat& backendFormat,
+ const SkColor4f& color,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ GrProtected isProtected) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ if (!this->asDirectContext()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ return fGpu->createBackendTexture(width, height, backendFormat,
+ mipMapped, renderable,
+ nullptr, 0, &color, isProtected);
+}
+
+GrBackendTexture GrContext::createBackendTexture(int width, int height,
+ SkColorType skColorType,
+ const SkColor4f& color,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ GrProtected isProtected) {
+ if (!this->asDirectContext()) {
+ return GrBackendTexture();
+ }
+
+ if (this->abandoned()) {
+ return GrBackendTexture();
+ }
+
+ GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
+ if (!format.isValid()) {
+ return GrBackendTexture();
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
+ SkColor4f swizzledColor = this->caps()->getOutputSwizzle(format, grColorType).applyTo(color);
+
+ return this->createBackendTexture(width, height, format, swizzledColor, mipMapped, renderable,
+ isProtected);
+}
+
+GrBackendTexture GrContext::createBackendTexture(const SkPixmap srcData[], int numLevels,
+ GrRenderable renderable, GrProtected isProtected) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ if (!this->asDirectContext()) {
+ return {};
+ }
+
+ if (this->abandoned()) {
+ return {};
+ }
+
+ if (!srcData || !numLevels) {
+ return {};
+ }
+
+ int baseWidth = srcData[0].width();
+ int baseHeight = srcData[0].height();
+ SkColorType colorType = srcData[0].colorType();
+
+ GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
+
+ return fGpu->createBackendTexture(baseWidth, baseHeight, backendFormat,
+ numLevels > 1 ? GrMipMapped::kYes : GrMipMapped::kNo,
+ renderable, srcData, numLevels, nullptr, isProtected);
+}
+
+void GrContext::deleteBackendTexture(GrBackendTexture backendTex) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ if (this->abandoned() || !backendTex.isValid()) {
+ return;
+ }
+
+ fGpu->deleteBackendTexture(backendTex);
+}
+
+bool GrContext::precompileShader(const SkData& key, const SkData& data) {
+ return fGpu->precompileShader(key, data);
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+#include "src/utils/SkJSONWriter.h"
+SkString GrContext::dump() const {
+ SkDynamicMemoryWStream stream;
+ SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
+ writer.beginObject();
+
+ writer.appendString("backend", GrBackendApiToStr(this->backend()));
+
+ writer.appendName("caps");
+ this->caps()->dumpJSON(&writer);
+
+ writer.appendName("gpu");
+ this->fGpu->dumpJSON(&writer);
+
+ // Flush JSON to the memory stream
+ writer.endObject();
+ writer.flush();
+
+ // Null terminate the JSON data in the memory stream
+ stream.write8(0);
+
+ // Allocate a string big enough to hold all the data, then copy out of the stream
+ SkString result(stream.bytesWritten());
+ stream.copyToAndReset(result.writable_str());
+ return result;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrContextPriv.cpp b/gfx/skia/skia/src/gpu/GrContextPriv.cpp
new file mode 100644
index 0000000000..a84ace62c8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContextPriv.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrContextPriv.h"
+
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+#include "src/gpu/GrSurfaceContextPriv.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/generated/GrConfigConversionEffect.h"
+#include "src/gpu/text/GrTextBlobCache.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_Gpu.h"
+
+#define ASSERT_OWNED_PROXY(P) \
+ SkASSERT(!(P) || !((P)->peekTexture()) || (P)->peekTexture()->getContext() == fContext)
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fContext->singleOwner());)
+#define RETURN_VALUE_IF_ABANDONED(value) if (fContext->abandoned()) { return (value); }
+#define RETURN_IF_ABANDONED RETURN_VALUE_IF_ABANDONED(void)
+
+sk_sp<const GrCaps> GrContextPriv::refCaps() const {
+ return fContext->refCaps();
+}
+
+sk_sp<GrSkSLFPFactoryCache> GrContextPriv::fpFactoryCache() {
+ return fContext->fpFactoryCache();
+}
+
+sk_sp<GrOpMemoryPool> GrContextPriv::refOpMemoryPool() {
+ return fContext->refOpMemoryPool();
+}
+
+void GrContextPriv::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
+ fContext->addOnFlushCallbackObject(onFlushCBObject);
+}
+
+std::unique_ptr<GrSurfaceContext> GrContextPriv::makeWrappedSurfaceContext(
+ sk_sp<GrSurfaceProxy> proxy,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ return fContext->makeWrappedSurfaceContext(std::move(proxy), colorType, alphaType,
+ std::move(colorSpace), props);
+}
+
+std::unique_ptr<GrTextureContext> GrContextPriv::makeDeferredTextureContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredTextureContext(fit, width, height, colorType, alphaType,
+ std::move(colorSpace), mipMapped, origin, budgeted,
+ isProtected);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrContextPriv::makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredRenderTargetContext(fit, width, height, colorType,
+ std::move(colorSpace), sampleCnt, mipMapped,
+ origin, surfaceProps, budgeted, isProtected);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrContextPriv::makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit fit, int width, int height, GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace, int sampleCnt, GrMipMapped mipMapped,
+ GrSurfaceOrigin origin, const SkSurfaceProps* surfaceProps, SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredRenderTargetContextWithFallback(
+ fit, width, height, colorType, std::move(colorSpace), sampleCnt, mipMapped, origin,
+ surfaceProps, budgeted, isProtected);
+}
+
+std::unique_ptr<GrTextureContext> GrContextPriv::makeBackendTextureContext(
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace) {
+ ASSERT_SINGLE_OWNER
+
+ sk_sp<GrSurfaceProxy> proxy = this->proxyProvider()->wrapBackendTexture(
+ tex, colorType, origin, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, kRW_GrIOType);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeTextureContext(std::move(proxy), colorType, alphaType,
+ std::move(colorSpace));
+}
+
+std::unique_ptr<GrRenderTargetContext> GrContextPriv::makeBackendTextureRenderTargetContext(
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ ReleaseProc releaseProc,
+ ReleaseContext releaseCtx) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(sampleCnt > 0);
+
+ sk_sp<GrTextureProxy> proxy(this->proxyProvider()->wrapRenderableBackendTexture(
+ tex, origin, sampleCnt, colorType, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
+ releaseProc, releaseCtx));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeRenderTargetContext(std::move(proxy), colorType,
+ std::move(colorSpace), props);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrContextPriv::makeBackendRenderTargetRenderTargetContext(
+ const GrBackendRenderTarget& backendRT,
+ GrSurfaceOrigin origin,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ ReleaseProc releaseProc,
+ ReleaseContext releaseCtx) {
+ ASSERT_SINGLE_OWNER
+
+ sk_sp<GrSurfaceProxy> proxy = this->proxyProvider()->wrapBackendRenderTarget(
+ backendRT, colorType, origin, releaseProc, releaseCtx);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeRenderTargetContext(std::move(proxy), colorType,
+ std::move(colorSpace), surfaceProps);
+}
+
+std::unique_ptr<GrRenderTargetContext>
+GrContextPriv::makeBackendTextureAsRenderTargetRenderTargetContext(const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(sampleCnt > 0);
+ sk_sp<GrSurfaceProxy> proxy(
+ this->proxyProvider()->wrapBackendTextureAsRenderTarget(tex, colorType,
+ origin, sampleCnt));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeRenderTargetContext(std::move(proxy), colorType,
+ std::move(colorSpace), props);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrContextPriv::makeVulkanSecondaryCBRenderTargetContext(
+ const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo, const SkSurfaceProps* props) {
+ ASSERT_SINGLE_OWNER
+ sk_sp<GrSurfaceProxy> proxy(
+ this->proxyProvider()->wrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeRenderTargetContext(
+ std::move(proxy),
+ SkColorTypeToGrColorType(imageInfo.colorType()),
+ imageInfo.refColorSpace(),
+ props);
+}
+
+GrSemaphoresSubmitted GrContextPriv::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
+ const GrFlushInfo& info) {
+ ASSERT_SINGLE_OWNER
+ RETURN_VALUE_IF_ABANDONED(GrSemaphoresSubmitted::kNo)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrContextPriv", "flushSurfaces", fContext);
+ SkASSERT(numProxies >= 0);
+ SkASSERT(!numProxies || proxies);
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ ASSERT_OWNED_PROXY(proxies[i]);
+ }
+ return fContext->drawingManager()->flushSurfaces(
+ proxies, numProxies, SkSurface::BackendSurfaceAccess::kNoAccess, info);
+}
+
+void GrContextPriv::flushSurface(GrSurfaceProxy* proxy) {
+ this->flushSurfaces(proxy ? &proxy : nullptr, proxy ? 1 : 0, {});
+}
+
+void GrContextPriv::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
+ fContext->drawingManager()->moveRenderTasksToDDL(ddl);
+}
+
+void GrContextPriv::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
+ GrRenderTargetProxy* newDest) {
+ fContext->drawingManager()->copyRenderTasksFromDDL(ddl, newDest);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+void GrContextPriv::resetGpuStats() const {
+#if GR_GPU_STATS
+ fContext->fGpu->stats()->reset();
+#endif
+}
+
+void GrContextPriv::dumpCacheStats(SkString* out) const {
+#if GR_CACHE_STATS
+ fContext->fResourceCache->dumpStats(out);
+#endif
+}
+
+void GrContextPriv::dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys,
+ SkTArray<double>* values) const {
+#if GR_CACHE_STATS
+ fContext->fResourceCache->dumpStatsKeyValuePairs(keys, values);
+#endif
+}
+
+void GrContextPriv::printCacheStats() const {
+ SkString out;
+ this->dumpCacheStats(&out);
+ SkDebugf("%s", out.c_str());
+}
+
+void GrContextPriv::dumpGpuStats(SkString* out) const {
+#if GR_GPU_STATS
+ return fContext->fGpu->stats()->dump(out);
+#endif
+}
+
+void GrContextPriv::dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys,
+ SkTArray<double>* values) const {
+#if GR_GPU_STATS
+ return fContext->fGpu->stats()->dumpKeyValuePairs(keys, values);
+#endif
+}
+
+void GrContextPriv::printGpuStats() const {
+ SkString out;
+ this->dumpGpuStats(&out);
+ SkDebugf("%s", out.c_str());
+}
+
+void GrContextPriv::testingOnly_setTextBlobCacheLimit(size_t bytes) {
+ fContext->priv().getTextBlobCache()->setBudget(bytes);
+}
+
+sk_sp<SkImage> GrContextPriv::testingOnly_getFontAtlasImage(GrMaskFormat format, unsigned int index) {
+ auto atlasManager = this->getAtlasManager();
+ if (!atlasManager) {
+ return nullptr;
+ }
+
+ unsigned int numActiveProxies;
+ const sk_sp<GrTextureProxy>* proxies = atlasManager->getProxies(format, &numActiveProxies);
+ if (index >= numActiveProxies || !proxies || !proxies[index]) {
+ return nullptr;
+ }
+
+ SkASSERT(proxies[index]->priv().isExact());
+ sk_sp<SkImage> image(new SkImage_Gpu(sk_ref_sp(fContext), kNeedNewImageUniqueID,
+ kPremul_SkAlphaType, proxies[index], nullptr));
+ return image;
+}
+
+void GrContextPriv::testingOnly_purgeAllUnlockedResources() {
+ fContext->fResourceCache->purgeAllUnlocked();
+}
+
+void GrContextPriv::testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
+ fContext->flush();
+ fContext->drawingManager()->testingOnly_removeOnFlushCallbackObject(cb);
+}
+#endif
+
+bool GrContextPriv::validPMUPMConversionExists() {
+ ASSERT_SINGLE_OWNER
+ if (!fContext->fDidTestPMConversions) {
+ fContext->fPMUPMConversionsRoundTrip =
+ GrConfigConversionEffect::TestForPreservingPMConversions(fContext);
+ fContext->fDidTestPMConversions = true;
+ }
+
+ // The PM<->UPM tests fail or succeed together so we only need to check one.
+ return fContext->fPMUPMConversionsRoundTrip;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrContextPriv::createPMToUPMEffect(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ ASSERT_SINGLE_OWNER
+ // We should have already called this->priv().validPMUPMConversionExists() in this case
+ SkASSERT(fContext->fDidTestPMConversions);
+ // ...and it should have succeeded
+ SkASSERT(this->validPMUPMConversionExists());
+
+ return GrConfigConversionEffect::Make(std::move(fp), PMConversion::kToUnpremul);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrContextPriv::createUPMToPMEffect(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ ASSERT_SINGLE_OWNER
+ // We should have already called this->priv().validPMUPMConversionExists() in this case
+ SkASSERT(fContext->fDidTestPMConversions);
+ // ...and it should have succeeded
+ SkASSERT(this->validPMUPMConversionExists());
+
+ return GrConfigConversionEffect::Make(std::move(fp), PMConversion::kToPremul);
+}
diff --git a/gfx/skia/skia/src/gpu/GrContextPriv.h b/gfx/skia/skia/src/gpu/GrContextPriv.h
new file mode 100644
index 0000000000..64522b065e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContextPriv.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextPriv_DEFINED
+#define GrContextPriv_DEFINED
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/text/GrAtlasManager.h"
+
+class GrBackendFormat;
+class GrBackendRenderTarget;
+class GrOpMemoryPool;
+class GrOnFlushCallbackObject;
+class GrSemaphore;
+class GrSkSLFPFactory;
+class GrSkSLFPFactoryCache;
+class GrSurfaceProxy;
+class GrTextureContext;
+
+class SkDeferredDisplayList;
+class SkTaskGroup;
+
+/** Class that adds methods to GrContext that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrContext. It should never have additional
+ data members or virtual methods. */
+class GrContextPriv {
+public:
+
+ // from GrContext_Base
+ uint32_t contextID() const { return fContext->contextID(); }
+
+ bool matches(GrContext_Base* candidate) const { return fContext->matches(candidate); }
+
+ const GrContextOptions& options() const { return fContext->options(); }
+
+ const GrCaps* caps() const { return fContext->caps(); }
+ sk_sp<const GrCaps> refCaps() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ GrImageContext* asImageContext() { return fContext->asImageContext(); }
+ GrRecordingContext* asRecordingContext() { return fContext->asRecordingContext(); }
+ GrContext* asDirectContext() { return fContext->asDirectContext(); }
+
+ // from GrImageContext
+ GrProxyProvider* proxyProvider() { return fContext->proxyProvider(); }
+ const GrProxyProvider* proxyProvider() const { return fContext->proxyProvider(); }
+
+ bool abandoned() const { return fContext->abandoned(); }
+
+ /** This is only useful for debug purposes */
+ SkDEBUGCODE(GrSingleOwner* singleOwner() const { return fContext->singleOwner(); } )
+
+ // from GrRecordingContext
+ GrDrawingManager* drawingManager() { return fContext->drawingManager(); }
+
+ sk_sp<GrOpMemoryPool> refOpMemoryPool();
+ GrOpMemoryPool* opMemoryPool() { return fContext->opMemoryPool(); }
+
+ GrStrikeCache* getGrStrikeCache() { return fContext->getGrStrikeCache(); }
+ GrTextBlobCache* getTextBlobCache() { return fContext->getTextBlobCache(); }
+
+ /**
+ * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
+ *
+ * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
+ * ensure its lifetime is tied to that of the context.
+ */
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+ std::unique_ptr<GrSurfaceContext> makeWrappedSurfaceContext(sk_sp<GrSurfaceProxy>,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace> = nullptr,
+ const SkSurfaceProps* = nullptr);
+
+ /** Create a new texture context backed by a deferred-style GrTextureProxy. */
+ std::unique_ptr<GrTextureContext> makeDeferredTextureContext(
+ SkBackingFit,
+ int width,
+ int height,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected = GrProtected::kNo);
+
+ /*
+ * Create a new render target context backed by a deferred-style
+ * GrRenderTargetProxy. We guarantee that "asTextureProxy" will succeed for
+ * renderTargetContexts created via this entry point.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /*
+ * This method will attempt to create a renderTargetContext that has, at least, the number of
+ * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be
+ * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA).
+ * SRGB-ness will be preserved.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted budgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ GrAuditTrail* auditTrail() { return fContext->auditTrail(); }
+
+ /**
+ * Create a GrContext without a resource cache
+ */
+ static sk_sp<GrContext> MakeDDL(const sk_sp<GrContextThreadSafeProxy>&);
+
+ std::unique_ptr<GrTextureContext> makeBackendTextureContext(const GrBackendTexture&,
+ GrSurfaceOrigin,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>);
+
+ // These match the definitions in SkSurface & GrSurface.h, for whence they came
+ typedef void* ReleaseContext;
+ typedef void (*ReleaseProc)(ReleaseContext);
+
+ std::unique_ptr<GrRenderTargetContext> makeBackendTextureRenderTargetContext(
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr,
+ ReleaseProc = nullptr,
+ ReleaseContext = nullptr);
+
+ std::unique_ptr<GrRenderTargetContext> makeBackendRenderTargetRenderTargetContext(
+ const GrBackendRenderTarget&,
+ GrSurfaceOrigin origin,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr,
+ ReleaseProc = nullptr,
+ ReleaseContext = nullptr);
+
+ std::unique_ptr<GrRenderTargetContext> makeBackendTextureAsRenderTargetRenderTargetContext(
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr);
+
+ std::unique_ptr<GrRenderTargetContext> makeVulkanSecondaryCBRenderTargetContext(
+ const SkImageInfo&, const GrVkDrawableInfo&, const SkSurfaceProps* = nullptr);
+
+ /**
+ * Finalizes all pending reads and writes to the surfaces and also performs an MSAA resolves
+ * if necessary. The GrSurfaceProxy array is treated as a hint. If it is supplied the context
+ * will guarantee that the draws required for those proxies are flushed but it could do more.
+ * If no array is provided then all current work will be flushed.
+ *
+ * It is not necessary to call this before reading the render target via Skia/GrContext.
+ * GrContext will detect when it must perform a resolve before reading pixels back from the
+ * surface or using it as a texture.
+ */
+ GrSemaphoresSubmitted flushSurfaces(GrSurfaceProxy*[], int numProxies, const GrFlushInfo&);
+
+ /** Version of above that flushes for a single proxy and uses a default GrFlushInfo. Null is
+ * allowed. */
+ void flushSurface(GrSurfaceProxy*);
+
+ /**
+ * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
+ * did we find a pair of round-trip preserving conversion effects?
+ */
+ bool validPMUPMConversionExists();
+
+ /**
+ * These functions create premul <-> unpremul effects, using the specialized round-trip effects
+ * from GrConfigConversionEffect.
+ */
+ std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
+ std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
+
+ SkTaskGroup* getTaskGroup() { return fContext->fTaskGroup.get(); }
+
+ GrResourceProvider* resourceProvider() { return fContext->fResourceProvider; }
+ const GrResourceProvider* resourceProvider() const { return fContext->fResourceProvider; }
+
+ GrResourceCache* getResourceCache() { return fContext->fResourceCache; }
+
+ GrGpu* getGpu() { return fContext->fGpu.get(); }
+ const GrGpu* getGpu() const { return fContext->fGpu.get(); }
+
+ // This accessor should only ever be called by the GrOpFlushState.
+ GrAtlasManager* getAtlasManager() {
+ return fContext->onGetAtlasManager();
+ }
+
+ void moveRenderTasksToDDL(SkDeferredDisplayList*);
+ void copyRenderTasksFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
+
+ GrContextOptions::PersistentCache* getPersistentCache() { return fContext->fPersistentCache; }
+ GrContextOptions::ShaderErrorHandler* getShaderErrorHandler() const {
+ return fContext->fShaderErrorHandler;
+ }
+
+ GrClientMappedBufferManager* clientMappedBufferManager() {
+ return fContext->fMappedBufferManager.get();
+ }
+
+#if GR_TEST_UTILS
+ /** Reset GPU stats */
+ void resetGpuStats() const ;
+
+ /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
+ void dumpCacheStats(SkString*) const;
+ void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void printCacheStats() const;
+
+ /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
+ void dumpGpuStats(SkString*) const;
+ void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void printGpuStats() const;
+
+ /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
+ this is for testing only */
+ void testingOnly_setTextBlobCacheLimit(size_t bytes);
+
+ /** Get pointer to atlas texture for given mask format. Note that this wraps an
+ actively mutating texture in an SkImage. This could yield unexpected results
+ if it gets cached or used more generally. */
+ sk_sp<SkImage> testingOnly_getFontAtlasImage(GrMaskFormat format, unsigned int index = 0);
+
+ /**
+ * Purge all the unlocked resources from the cache.
+ * This entry point is mainly meant for timing texture uploads
+ * and is not defined in normal builds of Skia.
+ */
+ void testingOnly_purgeAllUnlockedResources();
+
+ void testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject*);
+#endif
+
+private:
+ explicit GrContextPriv(GrContext* context) : fContext(context) {}
+ GrContextPriv(const GrContextPriv&); // unimpl
+ GrContextPriv& operator=(const GrContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrContextPriv* operator&() const;
+ GrContextPriv* operator&();
+
+ GrContext* fContext;
+
+ friend class GrContext; // to construct/copy this type.
+};
+
+inline GrContextPriv GrContext::priv() { return GrContextPriv(this); }
+
+inline const GrContextPriv GrContext::priv() const {
+ return GrContextPriv(const_cast<GrContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrContextThreadSafeProxy.cpp b/gfx/skia/skia/src/gpu/GrContextThreadSafeProxy.cpp
new file mode 100644
index 0000000000..64c7bf6a4c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContextThreadSafeProxy.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrBaseContextPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+#include "src/image/SkSurface_Gpu.h"
+
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkCaps.h"
+#endif
+
+GrContextThreadSafeProxy::GrContextThreadSafeProxy(GrBackendApi backend,
+ const GrContextOptions& options,
+ uint32_t contextID)
+ : INHERITED(backend, options, contextID) {
+}
+
+GrContextThreadSafeProxy::~GrContextThreadSafeProxy() = default;
+
+bool GrContextThreadSafeProxy::init(sk_sp<const GrCaps> caps,
+ sk_sp<GrSkSLFPFactoryCache> FPFactoryCache) {
+ return INHERITED::init(std::move(caps), std::move(FPFactoryCache));
+}
+
+SkSurfaceCharacterization GrContextThreadSafeProxy::createCharacterization(
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii, const GrBackendFormat& backendFormat,
+ int sampleCnt, GrSurfaceOrigin origin,
+ const SkSurfaceProps& surfaceProps,
+ bool isMipMapped, bool willUseGLFBO0, bool isTextureable,
+ GrProtected isProtected) {
+ if (!backendFormat.isValid()) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ SkASSERT(isTextureable || !isMipMapped);
+
+ if (GrBackendApi::kOpenGL != backendFormat.backend() && willUseGLFBO0) {
+ // The willUseGLFBO0 flags can only be used for a GL backend.
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ if (!this->caps()->mipMapSupport()) {
+ isMipMapped = false;
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(ii.colorType());
+
+ if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, backendFormat)) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ if (!this->caps()->isFormatAsColorTypeRenderable(grColorType, backendFormat, sampleCnt)) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendFormat);
+ SkASSERT(sampleCnt);
+
+ if (willUseGLFBO0 && isTextureable) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ if (isTextureable && !this->caps()->isFormatTexturable(backendFormat)) {
+ // Skia doesn't agree that this is textureable.
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+ if (GrBackendApi::kVulkan == backendFormat.backend()) {
+ if (GrBackendApi::kVulkan != this->backend()) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+
+#ifdef SK_VULKAN
+ const GrVkCaps* vkCaps = (const GrVkCaps*) this->caps();
+
+ // The protection status of the characterization and the context need to match
+ if (isProtected != GrProtected(vkCaps->supportsProtectedMemory())) {
+ return SkSurfaceCharacterization(); // return an invalid characterization
+ }
+#endif
+ }
+
+ return SkSurfaceCharacterization(sk_ref_sp<GrContextThreadSafeProxy>(this),
+ cacheMaxResourceBytes, ii, backendFormat,
+ origin, sampleCnt,
+ SkSurfaceCharacterization::Textureable(isTextureable),
+ SkSurfaceCharacterization::MipMapped(isMipMapped),
+ SkSurfaceCharacterization::UsesGLFBO0(willUseGLFBO0),
+ SkSurfaceCharacterization::VulkanSecondaryCBCompatible(false),
+ isProtected,
+ surfaceProps);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+sk_sp<GrSkSLFPFactoryCache> GrContextThreadSafeProxyPriv::fpFactoryCache() {
+ return fProxy->fpFactoryCache();
+}
+
+sk_sp<GrContextThreadSafeProxy> GrContextThreadSafeProxyPriv::Make(
+ GrBackendApi backend,
+ const GrContextOptions& options,
+ uint32_t contextID,
+ sk_sp<const GrCaps> caps,
+ sk_sp<GrSkSLFPFactoryCache> cache) {
+ sk_sp<GrContextThreadSafeProxy> proxy(new GrContextThreadSafeProxy(backend, options,
+ contextID));
+
+ if (!proxy->init(std::move(caps), std::move(cache))) {
+ return nullptr;
+ }
+ return proxy;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrContextThreadSafeProxyPriv.h b/gfx/skia/skia/src/gpu/GrContextThreadSafeProxyPriv.h
new file mode 100644
index 0000000000..9f83e4bc38
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContextThreadSafeProxyPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextThreadSafeProxyPriv_DEFINED
+#define GrContextThreadSafeProxyPriv_DEFINED
+
+#include "include/gpu/GrContextThreadSafeProxy.h"
+
+#include "src/gpu/GrCaps.h"
+
+/**
+ * Class that adds methods to GrContextThreadSafeProxy that are only intended for use internal to
+ * Skia. This class is purely a privileged window into GrContextThreadSafeProxy. It should never
+ * have additional data members or virtual methods.
+ */
+class GrContextThreadSafeProxyPriv {
+public:
+ // from GrContext_Base
+ uint32_t contextID() const { return fProxy->contextID(); }
+
+ bool matches(GrContext_Base* candidate) const { return fProxy->matches(candidate); }
+
+ const GrContextOptions& options() const { return fProxy->options(); }
+
+ const GrCaps* caps() const { return fProxy->caps(); }
+ sk_sp<const GrCaps> refCaps() const { return fProxy->refCaps(); }
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ // GrContextThreadSafeProxyPriv
+ static sk_sp<GrContextThreadSafeProxy> Make(GrBackendApi,
+ const GrContextOptions&,
+ uint32_t contextID,
+ sk_sp<const GrCaps>,
+ sk_sp<GrSkSLFPFactoryCache>);
+
+private:
+ explicit GrContextThreadSafeProxyPriv(GrContextThreadSafeProxy* proxy) : fProxy(proxy) {}
+ GrContextThreadSafeProxyPriv(const GrContextThreadSafeProxy&) = delete;
+ GrContextThreadSafeProxyPriv& operator=(const GrContextThreadSafeProxyPriv&) = delete;
+
+ // No taking addresses of this type.
+ const GrContextThreadSafeProxyPriv* operator&() const = delete;
+ GrContextThreadSafeProxyPriv* operator&() = delete;
+
+ GrContextThreadSafeProxy* fProxy;
+
+ friend class GrContextThreadSafeProxy; // to construct/copy this type.
+};
+
+inline GrContextThreadSafeProxyPriv GrContextThreadSafeProxy::priv() {
+ return GrContextThreadSafeProxyPriv(this);
+}
+
+inline const GrContextThreadSafeProxyPriv GrContextThreadSafeProxy::priv() const {
+ return GrContextThreadSafeProxyPriv(const_cast<GrContextThreadSafeProxy*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrContext_Base.cpp b/gfx/skia/skia/src/gpu/GrContext_Base.cpp
new file mode 100644
index 0000000000..1c79b32eca
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContext_Base.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrContext_Base.h"
+
+#include "src/gpu/GrBaseContextPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+
+static int32_t next_id() {
+ static std::atomic<int32_t> nextID{1};
+ int32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+GrContext_Base::GrContext_Base(GrBackendApi backend,
+ const GrContextOptions& options,
+ uint32_t contextID)
+ : fBackend(backend)
+ , fOptions(options)
+ , fContextID(SK_InvalidGenID == contextID ? next_id() : contextID) {
+}
+
+GrContext_Base::~GrContext_Base() { }
+
+bool GrContext_Base::init(sk_sp<const GrCaps> caps, sk_sp<GrSkSLFPFactoryCache> FPFactoryCache) {
+ SkASSERT(caps && FPFactoryCache);
+
+ fCaps = caps;
+ fFPFactoryCache = FPFactoryCache;
+ return true;
+}
+
+const GrCaps* GrContext_Base::caps() const { return fCaps.get(); }
+sk_sp<const GrCaps> GrContext_Base::refCaps() const { return fCaps; }
+
+sk_sp<GrSkSLFPFactoryCache> GrContext_Base::fpFactoryCache() { return fFPFactoryCache; }
+
+GrBackendFormat GrContext_Base::defaultBackendFormat(SkColorType skColorType,
+ GrRenderable renderable) const {
+ const GrCaps* caps = this->caps();
+
+ GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
+
+ GrBackendFormat format = caps->getDefaultBackendFormat(grColorType, renderable);
+ if (!format.isValid()) {
+ return GrBackendFormat();
+ }
+
+ SkASSERT(caps->isFormatTexturableAndUploadable(grColorType, format));
+ SkASSERT(renderable == GrRenderable::kNo ||
+ caps->isFormatAsColorTypeRenderable(grColorType, format));
+
+ return format;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+sk_sp<const GrCaps> GrBaseContextPriv::refCaps() const {
+ return fContext->refCaps();
+}
+
+sk_sp<GrSkSLFPFactoryCache> GrBaseContextPriv::fpFactoryCache() {
+ return fContext->fpFactoryCache();
+}
diff --git a/gfx/skia/skia/src/gpu/GrCoordTransform.h b/gfx/skia/skia/src/gpu/GrCoordTransform.h
new file mode 100644
index 0000000000..2451363d38
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCoordTransform.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCoordTransform_DEFINED
+#define GrCoordTransform_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrTexture;
+
+/**
+ * A class representing a linear transformation of local coordinates. GrFragnentProcessors
+ * these transformations, and the GrGeometryProcessor implements the transformation.
+ */
+class GrCoordTransform {
+public:
+ GrCoordTransform()
+ : fProxy(nullptr)
+ , fNormalize(false)
+ , fReverseY(false)
+ , fComputeInVertexShader(true) {
+ SkDEBUGCODE(fInProcessor = false);
+ }
+
+ GrCoordTransform(const GrCoordTransform&) = default;
+
+ /**
+ * Create a transformation that maps [0, 1] to a proxy's boundaries. The proxy origin also
+ * implies whether a y-reversal should be performed.
+ */
+ GrCoordTransform(GrTextureProxy* proxy) {
+ SkASSERT(proxy);
+ SkDEBUGCODE(fInProcessor = false);
+ this->reset(SkMatrix::I(), proxy);
+ }
+
+ /**
+ * Create a transformation from a matrix. The proxy origin also implies whether a y-reversal
+ * should be performed.
+ */
+ GrCoordTransform(const SkMatrix& m, GrTextureProxy* proxy) {
+ SkASSERT(proxy);
+ SkDEBUGCODE(fInProcessor = false);
+ this->reset(m, proxy);
+ }
+
+ /**
+ * Create a transformation that applies the matrix to a coord set.
+ */
+ GrCoordTransform(const SkMatrix& m) {
+ SkDEBUGCODE(fInProcessor = false);
+ this->reset(m);
+ }
+
+ GrCoordTransform& operator= (const GrCoordTransform& that) {
+ SkASSERT(!fInProcessor);
+ fMatrix = that.fMatrix;
+ fProxy = that.fProxy;
+ fNormalize = that.fNormalize;
+ fReverseY = that.fReverseY;
+ return *this;
+ }
+
+ /**
+ * Access the matrix for editing. Note, this must be done before adding the transform to an
+ * effect, since effects are immutable.
+ */
+ SkMatrix* accessMatrix() {
+ SkASSERT(!fInProcessor);
+ return &fMatrix;
+ }
+
+ bool hasSameEffectAs(const GrCoordTransform& that) const {
+ if (fNormalize != that.fNormalize ||
+ fReverseY != that.fReverseY ||
+ !fMatrix.cheapEqualTo(that.fMatrix)) {
+ return false;
+ }
+
+ if (fNormalize) {
+ if (fProxy->underlyingUniqueID() != that.fProxy->underlyingUniqueID()) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+ const GrTextureProxy* proxy() const { return fProxy; }
+ bool normalize() const { return fNormalize; }
+ bool reverseY() const { return fReverseY; }
+
+ // This should only ever be called at flush time after the backing texture has been
+ // successfully instantiated
+ GrTexture* peekTexture() const { return fProxy->peekTexture(); }
+
+ bool computeInVertexShader() const { return fComputeInVertexShader; }
+
+ void setComputeInVertexShader(bool computeInVertexShader) {
+ fComputeInVertexShader = computeInVertexShader;
+ }
+
+private:
+ void reset(const SkMatrix& m, GrTextureProxy* proxy = nullptr) {
+ SkASSERT(!fInProcessor);
+
+ fMatrix = m;
+ fProxy = proxy;
+ fNormalize = proxy && proxy->textureType() != GrTextureType::kRectangle;
+ fReverseY = proxy && kBottomLeft_GrSurfaceOrigin == proxy->origin();
+ fComputeInVertexShader = true;
+ }
+
+ // The textures' effect is to optionally normalize the final matrix, so a blind
+ // equality check could be misleading
+ bool operator==(const GrCoordTransform& that) const;
+ bool operator!=(const GrCoordTransform& that) const;
+
+ SkMatrix fMatrix;
+ const GrTextureProxy* fProxy;
+ bool fNormalize;
+ bool fReverseY;
+ bool fComputeInVertexShader;
+
+#ifdef SK_DEBUG
+public:
+ void setInProcessor() const { fInProcessor = true; }
+private:
+ mutable bool fInProcessor;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrCopyRenderTask.cpp b/gfx/skia/skia/src/gpu/GrCopyRenderTask.cpp
new file mode 100644
index 0000000000..23e8086d3f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCopyRenderTask.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCopyRenderTask.h"
+
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceAllocator.h"
+
+sk_sp<GrRenderTask> GrCopyRenderTask::Make(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy,
+ const SkIPoint& dstPoint,
+ const GrCaps* caps) {
+ SkASSERT(dstProxy);
+ SkASSERT(srcProxy);
+ SkIRect clippedSrcRect;
+ SkIPoint clippedDstPoint;
+ // If the rect is outside the srcProxy or dstProxy then we've already succeeded.
+ if (!GrClipSrcRectAndDstPoint(dstProxy->isize(), srcProxy->isize(), srcRect, dstPoint,
+ &clippedSrcRect, &clippedDstPoint)) {
+ return nullptr;
+ }
+
+ if (caps->isFormatCompressed(dstProxy->backendFormat())) {
+ return nullptr;
+ }
+
+ SkASSERT(dstProxy->origin() == srcProxy->origin());
+ if (srcProxy->origin() == kBottomLeft_GrSurfaceOrigin) {
+ int rectHeight = clippedSrcRect.height();
+ clippedSrcRect.fTop = srcProxy->height() - clippedSrcRect.fBottom;
+ clippedSrcRect.fBottom = clippedSrcRect.fTop + rectHeight;
+ clippedDstPoint.fY = dstProxy->height() - clippedDstPoint.fY - rectHeight;
+ }
+
+ sk_sp<GrCopyRenderTask> task(new GrCopyRenderTask(
+ std::move(srcProxy), clippedSrcRect, std::move(dstProxy), clippedDstPoint));
+ return task;
+}
+
+GrCopyRenderTask::GrCopyRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy,
+ const SkIPoint& dstPoint)
+ : GrRenderTask(std::move(dstProxy))
+ , fSrcProxy(std::move(srcProxy))
+ , fSrcRect(srcRect)
+ , fDstPoint(dstPoint) {
+ fTarget->setLastRenderTask(this);
+}
+
+void GrCopyRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+ // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // we read fSrcProxy and copy to fTarget.
+ alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
+ alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
+ alloc->incOps();
+}
+
+bool GrCopyRenderTask::onExecute(GrOpFlushState* flushState) {
+ if (!fSrcProxy->isInstantiated() || !fTarget->isInstantiated()) {
+ return false;
+ }
+ GrSurface* srcSurface = fSrcProxy->peekSurface();
+ GrSurface* dstSurface = fTarget->peekSurface();
+ if (fSrcProxy->origin() == kBottomLeft_GrSurfaceOrigin) {
+ if (fSrcProxy->height() != srcSurface->height()) {
+ fSrcRect.offset(0, srcSurface->height() - fSrcProxy->height());
+ }
+ if (fTarget->height() != dstSurface->height()) {
+ fDstPoint.fY = fDstPoint.fY + (dstSurface->height() - fTarget->height());
+ }
+ }
+ return flushState->gpu()->copySurface(dstSurface, srcSurface, fSrcRect, fDstPoint);
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrCopyRenderTask.h b/gfx/skia/skia/src/gpu/GrCopyRenderTask.h
new file mode 100644
index 0000000000..640bb1167c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCopyRenderTask.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCopyRenderTask_DEFINED
+#define GrCopyRenderTask_DEFINED
+
+#include "src/gpu/GrRenderTask.h"
+
+class GrCopyRenderTask final : public GrRenderTask {
+public:
+ static sk_sp<GrRenderTask> Make(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy,
+ const SkIPoint& dstPoint,
+ const GrCaps*);
+
+private:
+ GrCopyRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy,
+ const SkIPoint& dstPoint);
+
+ bool onIsUsed(GrSurfaceProxy* proxy) const override {
+ SkASSERT(proxy != fTarget.get()); // This case should be handled by GrRenderTask.
+ return proxy == fSrcProxy.get();
+ }
+ // If instantiation failed, at flush time we simply will skip doing the copy.
+ void handleInternalAllocationFailure() override {}
+ void gatherProxyIntervals(GrResourceAllocator*) const override;
+ ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect* targetUpdateBounds) override {
+ targetUpdateBounds->setXYWH(fDstPoint.x(), fDstPoint.y(), fSrcRect.width(),
+ fSrcRect.height());
+ return ExpectedOutcome::kTargetDirty;
+ }
+ bool onExecute(GrOpFlushState*) override;
+
+#ifdef SK_DEBUG
+ void visitProxies_debugOnly(const VisitSurfaceProxyFunc& fn) const override {
+ fn(fSrcProxy.get(), GrMipMapped::kNo);
+ }
+#endif
+
+ sk_sp<GrSurfaceProxy> fSrcProxy;
+ SkIRect fSrcRect;
+ SkIPoint fDstPoint;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrCpuBuffer.h b/gfx/skia/skia/src/gpu/GrCpuBuffer.h
new file mode 100644
index 0000000000..5149766676
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCpuBuffer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCpuBuffer_DEFINED
+#define GrCpuBuffer_DEFINED
+
+#include "src/core/SkSafeMath.h"
+#include "src/gpu/GrBuffer.h"
+#include "src/gpu/GrNonAtomicRef.h"
+
+class GrCpuBuffer final : public GrNonAtomicRef<GrCpuBuffer>, public GrBuffer {
+public:
+ static sk_sp<GrCpuBuffer> Make(size_t size) {
+ SkASSERT(size > 0);
+ SkSafeMath sm;
+ size_t combinedSize = sm.add(sizeof(GrCpuBuffer), size);
+ if (!sm.ok()) {
+ SK_ABORT("Buffer size is too big.");
+ }
+ auto mem = ::operator new(combinedSize);
+ return sk_sp<GrCpuBuffer>(new (mem) GrCpuBuffer((char*)mem + sizeof(GrCpuBuffer), size));
+ }
+
+ // TODO(b/30449950): use sized delete once P0722R3 is available
+ static void operator delete(void* p) { ::operator delete(p); }
+
+ void ref() const override { GrNonAtomicRef<GrCpuBuffer>::ref(); }
+ void unref() const override { GrNonAtomicRef<GrCpuBuffer>::unref(); }
+ size_t size() const override { return fSize; }
+ bool isCpuBuffer() const override { return true; }
+
+ char* data() { return reinterpret_cast<char*>(fData); }
+ const char* data() const { return reinterpret_cast<const char*>(fData); }
+
+private:
+ GrCpuBuffer(void* data, size_t size) : fData(data), fSize(size) {}
+ void* fData;
+ size_t fSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDDLContext.cpp b/gfx/skia/skia/src/gpu/GrDDLContext.cpp
new file mode 100644
index 0000000000..4d14504d4e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDDLContext.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+
+/**
+ * The DDL Context is the one in effect during DDL Recording. It isn't backed by a GrGPU and
+ * cannot allocate any GPU resources.
+ */
+class GrDDLContext : public GrContext {
+public:
+ GrDDLContext(sk_sp<GrContextThreadSafeProxy> proxy)
+ : INHERITED(proxy->backend(), proxy->priv().options(), proxy->priv().contextID()) {
+ fThreadSafeProxy = std::move(proxy);
+ }
+
+ ~GrDDLContext() override { }
+
+ void abandonContext() override {
+ SkASSERT(0); // abandoning in a DDL Recorder doesn't make a whole lot of sense
+ INHERITED::abandonContext();
+ }
+
+ void releaseResourcesAndAbandonContext() override {
+ SkASSERT(0); // abandoning in a DDL Recorder doesn't make a whole lot of sense
+ INHERITED::releaseResourcesAndAbandonContext();
+ }
+
+ void freeGpuResources() override {
+ SkASSERT(0); // freeing resources in a DDL Recorder doesn't make a whole lot of sense
+ INHERITED::freeGpuResources();
+ }
+
+protected:
+ // TODO: Here we're pretending this isn't derived from GrContext. Switch this to be derived from
+ // GrRecordingContext!
+ GrContext* asDirectContext() override { return nullptr; }
+
+ bool init(sk_sp<const GrCaps> caps, sk_sp<GrSkSLFPFactoryCache> FPFactoryCache) override {
+ SkASSERT(caps && FPFactoryCache);
+ SkASSERT(fThreadSafeProxy); // should've been set in the ctor
+
+ if (!INHERITED::init(std::move(caps), std::move(FPFactoryCache))) {
+ return false;
+ }
+
+ // DDL contexts/drawing managers always sort the oplists and attempt to reduce opsTask
+ // splitting.
+ this->setupDrawingManager(true, true);
+
+ SkASSERT(this->caps());
+
+ return true;
+ }
+
+ GrAtlasManager* onGetAtlasManager() override {
+ SkASSERT(0); // the DDL Recorders should never invoke this
+ return nullptr;
+ }
+
+private:
+ typedef GrContext INHERITED;
+};
+
+sk_sp<GrContext> GrContextPriv::MakeDDL(const sk_sp<GrContextThreadSafeProxy>& proxy) {
+ sk_sp<GrContext> context(new GrDDLContext(proxy));
+
+ if (!context->init(proxy->priv().refCaps(), proxy->priv().fpFactoryCache())) {
+ return nullptr;
+ }
+ return context;
+}
diff --git a/gfx/skia/skia/src/gpu/GrDataUtils.cpp b/gfx/skia/skia/src/gpu/GrDataUtils.cpp
new file mode 100644
index 0000000000..6265aaec28
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDataUtils.cpp
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrDataUtils.h"
+
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkUtils.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrImageInfo.h"
+
+struct ETC1Block {
+ uint32_t fHigh;
+ uint32_t fLow;
+};
+
+static const int kNumModifierTables = 8;
+static const int kNumPixelIndices = 4;
+
+// The index of each row in this table is the ETC1 table codeword
+// The index of each column in this table is the ETC1 pixel index value
+static const int kModifierTables[kNumModifierTables][kNumPixelIndices] = {
+ /* 0 */ { 2, 8, -2, -8 },
+ /* 1 */ { 5, 17, -5, -17 },
+ /* 2 */ { 9, 29, -9, -29 },
+ /* 3 */ { 13, 42, -13, -42 },
+ /* 4 */ { 18, 60, -18, -60 },
+ /* 5 */ { 24, 80, -24, -80 },
+ /* 6 */ { 33, 106, -33, -106 },
+ /* 7 */ { 47, 183, -47, -183 }
+};
+
+static inline int convert_5To8(int b) {
+ int c = b & 0x1f;
+ return (c << 3) | (c >> 2);
+}
+
+// Evaluate one of the entries in 'kModifierTables' to see how close it can get (r8,g8,b8) to
+// the original color (rOrig, gOrib, bOrig).
+static int test_table_entry(int rOrig, int gOrig, int bOrig,
+ int r8, int g8, int b8,
+ int table, int offset) {
+ SkASSERT(0 <= table && table < 8);
+ SkASSERT(0 <= offset && offset < 4);
+
+ r8 = SkTPin<uint8_t>(r8 + kModifierTables[table][offset], 0, 255);
+ g8 = SkTPin<uint8_t>(g8 + kModifierTables[table][offset], 0, 255);
+ b8 = SkTPin<uint8_t>(b8 + kModifierTables[table][offset], 0, 255);
+
+ return SkTAbs(rOrig - r8) + SkTAbs(gOrig - g8) + SkTAbs(bOrig - b8);
+}
+
+// Create an ETC1 compressed block that is filled with 'col'
+static void create_etc1_block(SkColor col, ETC1Block* block) {
+ block->fHigh = 0;
+ block->fLow = 0;
+
+ int rOrig = SkColorGetR(col);
+ int gOrig = SkColorGetG(col);
+ int bOrig = SkColorGetB(col);
+
+ int r5 = SkMulDiv255Round(31, rOrig);
+ int g5 = SkMulDiv255Round(31, gOrig);
+ int b5 = SkMulDiv255Round(31, bOrig);
+
+ int r8 = convert_5To8(r5);
+ int g8 = convert_5To8(g5);
+ int b8 = convert_5To8(b5);
+
+ // We always encode solid color textures as 555 + zero diffs
+ block->fHigh |= (r5 << 27) | (g5 << 19) | (b5 << 11) | 0x2;
+
+ int bestTableIndex = 0, bestPixelIndex = 0;
+ int bestSoFar = 1024;
+ for (int tableIndex = 0; tableIndex < kNumModifierTables; ++tableIndex) {
+ for (int pixelIndex = 0; pixelIndex < kNumPixelIndices; ++pixelIndex) {
+ int score = test_table_entry(rOrig, gOrig, bOrig, r8, g8, b8,
+ tableIndex, pixelIndex);
+
+ if (bestSoFar > score) {
+ bestSoFar = score;
+ bestTableIndex = tableIndex;
+ bestPixelIndex = pixelIndex;
+ }
+ }
+ }
+
+ block->fHigh |= (bestTableIndex << 5) | (bestTableIndex << 2);
+
+ for (int i = 0; i < 16; ++i) {
+ block->fLow |= bestPixelIndex << 2*i;
+ }
+}
+
+static int num_ETC1_blocks_w(int w) {
+ if (w < 4) {
+ w = 1;
+ } else {
+ SkASSERT((w & 3) == 0);
+ w >>= 2;
+ }
+ return w;
+}
+
+static int num_ETC1_blocks(int w, int h) {
+ w = num_ETC1_blocks_w(w);
+
+ if (h < 4) {
+ h = 1;
+ } else {
+ SkASSERT((h & 3) == 0);
+ h >>= 2;
+ }
+
+ return w * h;
+}
+
+size_t GrCompressedDataSize(SkImage::CompressionType type, int width, int height) {
+ switch (type) {
+ case SkImage::kETC1_CompressionType:
+ int numBlocks = num_ETC1_blocks(width, height);
+ return numBlocks * sizeof(ETC1Block);
+ }
+ SK_ABORT("Unexpected compression type");
+}
+
+size_t GrCompressedRowBytes(SkImage::CompressionType type, int width) {
+ switch (type) {
+ case SkImage::kETC1_CompressionType:
+ int numBlocksWidth = num_ETC1_blocks_w(width);
+ return numBlocksWidth * sizeof(ETC1Block);
+ }
+ SK_ABORT("Unexpected compression type");
+}
+
+// Fill in 'dest' with ETC1 blocks derived from 'colorf'
+static void fillin_ETC1_with_color(int width, int height, const SkColor4f& colorf, void* dest) {
+ SkColor color = colorf.toSkColor();
+
+ ETC1Block block;
+ create_etc1_block(color, &block);
+
+ int numBlocks = num_ETC1_blocks(width, height);
+
+ for (int i = 0; i < numBlocks; ++i) {
+ ((ETC1Block*)dest)[i] = block;
+ }
+}
+
+// Fill in the width x height 'dest' with the munged version of 'colorf' that matches 'config'
+static bool fill_buffer_with_color(GrColorType colorType, int width, int height,
+ const SkColor4f& colorf, void* dest) {
+ GrColor color = colorf.toBytes_RGBA();
+
+ uint8_t r = GrColorUnpackR(color);
+ uint8_t g = GrColorUnpackG(color);
+ uint8_t b = GrColorUnpackB(color);
+ uint8_t a = GrColorUnpackA(color);
+
+ switch (colorType) {
+ case GrColorType::kAlpha_8: {
+ memset(dest, a, width * height);
+ break;
+ }
+ case GrColorType::kGray_8: {
+ uint8_t gray8 = SkComputeLuminance(r, g, b);
+
+ memset(dest, gray8, width * height);
+ break;
+ }
+ case GrColorType::kBGR_565: {
+ uint16_t rgb565 = SkPack888ToRGB16(r, g, b);
+
+ sk_memset16((uint16_t*) dest, rgb565, width * height);
+ break;
+ }
+ case GrColorType::kABGR_4444: {
+ uint8_t r4 = (r >> 4) & 0xF;
+ uint8_t g4 = (g >> 4) & 0xF;
+ uint8_t b4 = (b >> 4) & 0xF;
+ uint8_t a4 = (a >> 4) & 0xF;
+
+ uint16_t rgba4444 = r4 << SK_R4444_SHIFT | g4 << SK_G4444_SHIFT |
+ b4 << SK_B4444_SHIFT | a4 << SK_A4444_SHIFT;
+
+ sk_memset16((uint16_t*) dest, rgba4444, width * height);
+ break;
+ }
+ case GrColorType::kRGBA_8888: {
+ sk_memset32((uint32_t *) dest, color, width * height);
+ break;
+ }
+ case GrColorType::kRGB_888x: {
+ GrColor opaque = GrColorPackRGBA(r, g, b, 0xFF);
+
+ sk_memset32((uint32_t *) dest, opaque, width * height);
+ break;
+ }
+ case GrColorType::kRG_88: {
+ uint16_t rg88 = (g << 8) | r;
+
+ sk_memset16((uint16_t*) dest, rg88, width * height);
+ break;
+ }
+ case GrColorType::kBGRA_8888: {
+ GrColor swizzled = GrColorPackRGBA(b, g, r, a);
+
+ sk_memset32((uint32_t *) dest, swizzled, width * height);
+ break;
+ }
+ case GrColorType::kRGBA_8888_SRGB: {
+ sk_memset32((uint32_t *) dest, color, width * height);
+ break;
+ }
+ case GrColorType::kRGBA_1010102: {
+ uint32_t r10 = SkScalarRoundToInt(colorf.fR * 1023.0f);
+ uint32_t g10 = SkScalarRoundToInt(colorf.fG * 1023.0f);
+ uint32_t b10 = SkScalarRoundToInt(colorf.fB * 1023.0f);
+ uint8_t a2 = SkScalarRoundToInt(colorf.fA * 3.0f);
+
+ uint32_t rgba1010102 = a2 << 30 | b10 << 20 | g10 << 10 | r10;
+
+ sk_memset32((uint32_t *) dest, rgba1010102, width * height);
+ break;
+ }
+ case GrColorType::kAlpha_F16: {
+ SkHalf alphaHalf = SkFloatToHalf(colorf.fA);
+
+ sk_memset16((uint16_t *) dest, alphaHalf, width * height);
+ break;
+ }
+ case GrColorType::kRGBA_F16_Clamped: // fall through
+ case GrColorType::kRGBA_F16: {
+ uint64_t rHalf = SkFloatToHalf(colorf.fR);
+ uint64_t gHalf = SkFloatToHalf(colorf.fG);
+ uint64_t bHalf = SkFloatToHalf(colorf.fB);
+ uint64_t aHalf = SkFloatToHalf(colorf.fA);
+
+ uint64_t rgbaHalf = (aHalf << 48) | (bHalf << 32) | (gHalf << 16) | rHalf;
+
+ sk_memset64((uint64_t *) dest, rgbaHalf, width * height);
+ break;
+ }
+ case GrColorType::kAlpha_16: {
+ uint16_t a16 = SkScalarRoundToInt(colorf.fA * 65535.0f);
+ sk_memset16((uint16_t*) dest, a16, width * height);
+ break;
+ }
+ case GrColorType::kRG_1616: {
+ uint32_t r16 = SkScalarRoundToInt(colorf.fR * 65535.0f);
+ uint32_t g16 = SkScalarRoundToInt(colorf.fG * 65535.0f);
+
+ uint32_t rg1616 = (g16 << 16) | r16;
+
+ sk_memset32((uint32_t*) dest, rg1616, width * height);
+ break;
+ }
+ case GrColorType::kRGBA_16161616: {
+ uint64_t r16 = SkScalarRoundToInt(colorf.fR * 65535.0f);
+ uint64_t g16 = SkScalarRoundToInt(colorf.fG * 65535.0f);
+ uint64_t b16 = SkScalarRoundToInt(colorf.fB * 65535.0f);
+ uint64_t a16 = SkScalarRoundToInt(colorf.fA * 65535.0f);
+
+ uint64_t rgba16161616 = (a16 << 48) | (b16 << 32) | (g16 << 16) | r16;
+ sk_memset64((uint64_t*) dest, rgba16161616, width * height);
+ break;
+ }
+ case GrColorType::kRG_F16: {
+ uint32_t rHalf = SkFloatToHalf(colorf.fR);
+ uint32_t gHalf = SkFloatToHalf(colorf.fG);
+
+ uint32_t rgHalf = (gHalf << 16) | rHalf;
+
+ sk_memset32((uint32_t *) dest, rgHalf, width * height);
+ break;
+ }
+ default:
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, int baseWidth, int baseHeight,
+ SkTArray<size_t>* individualMipOffsets, int mipLevelCount) {
+ SkASSERT(individualMipOffsets && !individualMipOffsets->count());
+ SkASSERT(mipLevelCount >= 1);
+
+ individualMipOffsets->push_back(0);
+
+ size_t combinedBufferSize = baseWidth * bytesPerPixel * baseHeight;
+ int currentWidth = baseWidth;
+ int currentHeight = baseHeight;
+
+ // The Vulkan spec for copying a buffer to an image requires that the alignment must be at
+ // least 4 bytes and a multiple of the bytes per pixel of the image config.
+ SkASSERT(bytesPerPixel == 1 || bytesPerPixel == 2 || bytesPerPixel == 3 ||
+ bytesPerPixel == 4 || bytesPerPixel == 8 || bytesPerPixel == 16);
+ int desiredAlignment = (bytesPerPixel == 3) ? 12 : (bytesPerPixel > 4 ? bytesPerPixel : 4);
+
+ for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; ++currentMipLevel) {
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+
+ size_t trimmedSize = currentWidth * bytesPerPixel * currentHeight;
+ const size_t alignmentDiff = combinedBufferSize % desiredAlignment;
+ if (alignmentDiff != 0) {
+ combinedBufferSize += desiredAlignment - alignmentDiff;
+ }
+ SkASSERT((0 == combinedBufferSize % 4) && (0 == combinedBufferSize % bytesPerPixel));
+
+ individualMipOffsets->push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ }
+
+ SkASSERT(individualMipOffsets->count() == mipLevelCount);
+ return combinedBufferSize;
+}
+
+void GrFillInData(GrColorType colorType, int baseWidth, int baseHeight,
+ const SkTArray<size_t>& individualMipOffsets, char* dstPixels,
+ const SkColor4f& colorf) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ int mipLevels = individualMipOffsets.count();
+
+ int currentWidth = baseWidth;
+ int currentHeight = baseHeight;
+ for (int currentMipLevel = 0; currentMipLevel < mipLevels; ++currentMipLevel) {
+ size_t offset = individualMipOffsets[currentMipLevel];
+
+ fill_buffer_with_color(colorType, currentWidth, currentHeight, colorf,
+ &(dstPixels[offset]));
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+ }
+}
+
+void GrFillInCompressedData(SkImage::CompressionType type, int baseWidth, int baseHeight,
+ char* dstPixels, const SkColor4f& colorf) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ int currentWidth = baseWidth;
+ int currentHeight = baseHeight;
+ if (SkImage::kETC1_CompressionType == type) {
+ fillin_ETC1_with_color(currentWidth, currentHeight, colorf, dstPixels);
+ }
+}
+
+static GrSwizzle get_load_and_get_swizzle(GrColorType ct, SkRasterPipeline::StockStage* load,
+ bool* isNormalized, bool* isSRGB) {
+ GrSwizzle swizzle("rgba");
+ *isNormalized = true;
+ *isSRGB = false;
+ switch (ct) {
+ case GrColorType::kAlpha_8: *load = SkRasterPipeline::load_a8; break;
+ case GrColorType::kAlpha_16: *load = SkRasterPipeline::load_a16; break;
+ case GrColorType::kBGR_565: *load = SkRasterPipeline::load_565; break;
+ case GrColorType::kABGR_4444: *load = SkRasterPipeline::load_4444; break;
+ case GrColorType::kRGBA_8888: *load = SkRasterPipeline::load_8888; break;
+ case GrColorType::kRG_88: *load = SkRasterPipeline::load_rg88; break;
+ case GrColorType::kRGBA_1010102: *load = SkRasterPipeline::load_1010102; break;
+ case GrColorType::kAlpha_F16: *load = SkRasterPipeline::load_af16; break;
+ case GrColorType::kRGBA_F16_Clamped: *load = SkRasterPipeline::load_f16; break;
+ case GrColorType::kRG_1616: *load = SkRasterPipeline::load_rg1616; break;
+ case GrColorType::kRGBA_16161616: *load = SkRasterPipeline::load_16161616; break;
+
+ case GrColorType::kRGBA_8888_SRGB: *load = SkRasterPipeline::load_8888;
+ *isSRGB = true;
+ break;
+ case GrColorType::kRG_F16: *load = SkRasterPipeline::load_rgf16;
+ *isNormalized = false;
+ break;
+ case GrColorType::kRGBA_F16: *load = SkRasterPipeline::load_f16;
+ *isNormalized = false;
+ break;
+ case GrColorType::kRGBA_F32: *load = SkRasterPipeline::load_f32;
+ *isNormalized = false;
+ break;
+ case GrColorType::kAlpha_8xxx: *load = SkRasterPipeline::load_8888;
+ swizzle = GrSwizzle("000r");
+ break;
+ case GrColorType::kAlpha_F32xxx: *load = SkRasterPipeline::load_f32;
+ swizzle = GrSwizzle("000r");
+ break;
+ case GrColorType::kGray_8xxx: *load = SkRasterPipeline::load_8888;
+ swizzle = GrSwizzle("rrr1");
+ break;
+ case GrColorType::kGray_8: *load = SkRasterPipeline::load_a8;
+ swizzle = GrSwizzle("aaa1");
+ break;
+ case GrColorType::kBGRA_8888: *load = SkRasterPipeline::load_8888;
+ swizzle = GrSwizzle("bgra");
+ break;
+ case GrColorType::kRGB_888x: *load = SkRasterPipeline::load_8888;
+ swizzle = GrSwizzle("rgb1");
+ break;
+
+ case GrColorType::kUnknown:
+ SK_ABORT("unexpected CT");
+ }
+ return swizzle;
+}
+
+static GrSwizzle get_dst_swizzle_and_store(GrColorType ct, SkRasterPipeline::StockStage* store,
+ bool* isNormalized, bool* isSRGB) {
+ GrSwizzle swizzle("rgba");
+ *isNormalized = true;
+ *isSRGB = false;
+ switch (ct) {
+ case GrColorType::kAlpha_8: *store = SkRasterPipeline::store_a8; break;
+ case GrColorType::kAlpha_16: *store = SkRasterPipeline::store_a16; break;
+ case GrColorType::kBGR_565: *store = SkRasterPipeline::store_565; break;
+ case GrColorType::kABGR_4444: *store = SkRasterPipeline::store_4444; break;
+ case GrColorType::kRGBA_8888: *store = SkRasterPipeline::store_8888; break;
+ case GrColorType::kRG_88: *store = SkRasterPipeline::store_rg88; break;
+ case GrColorType::kRGBA_1010102: *store = SkRasterPipeline::store_1010102; break;
+ case GrColorType::kRGBA_F16_Clamped: *store = SkRasterPipeline::store_f16; break;
+ case GrColorType::kRG_1616: *store = SkRasterPipeline::store_rg1616; break;
+ case GrColorType::kRGBA_16161616: *store = SkRasterPipeline::store_16161616; break;
+
+ case GrColorType::kRGBA_8888_SRGB: *store = SkRasterPipeline::store_8888;
+ *isSRGB = true;
+ break;
+ case GrColorType::kRG_F16: *store = SkRasterPipeline::store_rgf16;
+ *isNormalized = false;
+ break;
+ case GrColorType::kAlpha_F16: *store = SkRasterPipeline::store_af16;
+ *isNormalized = false;
+ break;
+ case GrColorType::kRGBA_F16: *store = SkRasterPipeline::store_f16;
+ *isNormalized = false;
+ break;
+ case GrColorType::kRGBA_F32: *store = SkRasterPipeline::store_f32;
+ *isNormalized = false;
+ break;
+ case GrColorType::kAlpha_8xxx: *store = SkRasterPipeline::store_8888;
+ swizzle = GrSwizzle("a000");
+ break;
+ case GrColorType::kAlpha_F32xxx: *store = SkRasterPipeline::store_f32;
+ swizzle = GrSwizzle("a000");
+ break;
+ case GrColorType::kBGRA_8888: swizzle = GrSwizzle("bgra");
+ *store = SkRasterPipeline::store_8888;
+ break;
+ case GrColorType::kRGB_888x: swizzle = GrSwizzle("rgb1");
+ *store = SkRasterPipeline::store_8888;
+ break;
+
+ case GrColorType::kGray_8: // not currently supported as output
+ case GrColorType::kGray_8xxx: // not currently supported as output
+ case GrColorType::kUnknown:
+ SK_ABORT("unexpected CT");
+ }
+ return swizzle;
+}
+
+static inline void append_clamp_gamut(SkRasterPipeline* pipeline) {
+ // SkRasterPipeline may not know our color type and also doesn't like caller to directly
+ // append clamp_gamut. Fake it out.
+ static SkImageInfo fakeII = SkImageInfo::MakeN32Premul(1, 1);
+ pipeline->append_gamut_clamp_if_normalized(fakeII);
+}
+
+bool GrConvertPixels(const GrImageInfo& dstInfo, void* dst, size_t dstRB,
+ const GrImageInfo& srcInfo, const void* src, size_t srcRB,
+ bool flipY) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ if (!srcInfo.isValid() || !dstInfo.isValid()) {
+ return false;
+ }
+ if (!src || !dst) {
+ return false;
+ }
+ if (dstInfo.width() != srcInfo.width() || srcInfo.height() != dstInfo.height()) {
+ return false;
+ }
+ if (GrColorTypeComponentFlags(dstInfo.colorType()) & kGray_SkColorTypeComponentFlag) {
+ // We don't currently support conversion to Gray.
+ return false;
+ }
+ if (dstRB < dstInfo.minRowBytes() || srcRB < srcInfo.minRowBytes()) {
+ return false;
+ }
+
+ size_t srcBpp = srcInfo.bpp();
+ size_t dstBpp = dstInfo.bpp();
+
+ // SkRasterPipeline operates on row-pixels not row-bytes.
+ SkASSERT(dstRB % dstBpp == 0);
+ SkASSERT(srcRB % srcBpp == 0);
+
+ bool premul = srcInfo.alphaType() == kUnpremul_SkAlphaType &&
+ dstInfo.alphaType() == kPremul_SkAlphaType;
+ bool unpremul = srcInfo.alphaType() == kPremul_SkAlphaType &&
+ dstInfo.alphaType() == kUnpremul_SkAlphaType;
+ bool alphaOrCSConversion =
+ premul || unpremul || !SkColorSpace::Equals(srcInfo.colorSpace(), dstInfo.colorSpace());
+
+ if (srcInfo.colorType() == dstInfo.colorType() && !alphaOrCSConversion) {
+ size_t tightRB = dstBpp * dstInfo.width();
+ if (flipY) {
+ dst = static_cast<char*>(dst) + dstRB * (dstInfo.height() - 1);
+ for (int y = 0; y < dstInfo.height(); ++y) {
+ memcpy(dst, src, tightRB);
+ src = static_cast<const char*>(src) + srcRB;
+ dst = static_cast< char*>(dst) - dstRB;
+ }
+ } else {
+ SkRectMemcpy(dst, dstRB, src, srcRB, tightRB, srcInfo.height());
+ }
+ return true;
+ }
+
+ SkRasterPipeline::StockStage load;
+ bool srcIsNormalized;
+ bool srcIsSRGB;
+ auto loadSwizzle = get_load_and_get_swizzle(srcInfo.colorType(), &load, &srcIsNormalized,
+ &srcIsSRGB);
+
+ SkRasterPipeline::StockStage store;
+ bool dstIsNormalized;
+ bool dstIsSRGB;
+ auto storeSwizzle = get_dst_swizzle_and_store(dstInfo.colorType(), &store, &dstIsNormalized,
+ &dstIsSRGB);
+
+ bool clampGamut;
+ SkTLazy<SkColorSpaceXformSteps> steps;
+ GrSwizzle loadStoreSwizzle;
+ if (alphaOrCSConversion) {
+ steps.init(srcInfo.colorSpace(), srcInfo.alphaType(),
+ dstInfo.colorSpace(), dstInfo.alphaType());
+ clampGamut = dstIsNormalized && dstInfo.alphaType() == kPremul_SkAlphaType;
+ } else {
+ clampGamut =
+ dstIsNormalized && !srcIsNormalized && dstInfo.alphaType() == kPremul_SkAlphaType;
+ if (!clampGamut) {
+ loadStoreSwizzle = GrSwizzle::Concat(loadSwizzle, storeSwizzle);
+ }
+ }
+ int cnt = 1;
+ int height = srcInfo.height();
+ SkRasterPipeline_MemoryCtx srcCtx{const_cast<void*>(src), SkToInt(srcRB / srcBpp)},
+ dstCtx{ dst , SkToInt(dstRB / dstBpp)};
+
+ if (flipY) {
+ // It *almost* works to point the src at the last row and negate the stride and run the
+ // whole rectangle. However, SkRasterPipeline::run()'s control loop uses size_t loop
+ // variables so it winds up relying on unsigned overflow math. It works out in practice
+ // but UBSAN says "no!" as it's technically undefined and in theory a compiler could emit
+ // code that didn't do what is intended. So we go one row at a time. :(
+ srcCtx.pixels = static_cast<char*>(srcCtx.pixels) + srcRB * (height - 1);
+ std::swap(cnt, height);
+ }
+
+ bool hasConversion = alphaOrCSConversion || clampGamut;
+
+ if (srcIsSRGB && dstIsSRGB && !hasConversion) {
+ // No need to convert from srgb if we are just going to immediately convert it back.
+ srcIsSRGB = dstIsSRGB = false;
+ }
+
+ hasConversion = hasConversion || srcIsSRGB || dstIsSRGB;
+
+ for (int i = 0; i < cnt; ++i) {
+ SkRasterPipeline_<256> pipeline;
+ pipeline.append(load, &srcCtx);
+ if (hasConversion) {
+ loadSwizzle.apply(&pipeline);
+ if (srcIsSRGB) {
+ pipeline.append(SkRasterPipeline::from_srgb);
+ }
+ if (alphaOrCSConversion) {
+ steps->apply(&pipeline, srcIsNormalized);
+ }
+ if (clampGamut) {
+ append_clamp_gamut(&pipeline);
+ }
+ // If we add support for storing to Gray we would add a luminance to alpha conversion
+ // here. We also wouldn't then need a to_srgb stage after since it would have not effect
+ // on the alpha channel. It would also mean we have an SRGB Gray color type which
+ // doesn't exist currently.
+ if (dstIsSRGB) {
+ pipeline.append(SkRasterPipeline::to_srgb);
+ }
+ storeSwizzle.apply(&pipeline);
+ } else {
+ loadStoreSwizzle.apply(&pipeline);
+ }
+ pipeline.append(store, &dstCtx);
+ pipeline.run(0, 0, srcInfo.width(), height);
+ srcCtx.pixels = static_cast<char*>(srcCtx.pixels) - srcRB;
+ dstCtx.pixels = static_cast<char*>(dstCtx.pixels) + dstRB;
+ }
+ return true;
+}
+
+GrColorType SkColorTypeAndFormatToGrColorType(const GrCaps* caps,
+ SkColorType skCT,
+ const GrBackendFormat& format) {
+ GrColorType grCT = SkColorTypeToGrColorType(skCT);
+ // Until we support SRGB in the SkColorType we have to do this manual check here to make sure
+ // we use the correct GrColorType.
+ if (caps->isFormatSRGB(format)) {
+ if (grCT != GrColorType::kRGBA_8888) {
+ return GrColorType::kUnknown;
+ }
+ grCT = GrColorType::kRGBA_8888_SRGB;
+ }
+ return grCT;
+}
diff --git a/gfx/skia/skia/src/gpu/GrDataUtils.h b/gfx/skia/skia/src/gpu/GrDataUtils.h
new file mode 100644
index 0000000000..b51a4ddf40
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDataUtils.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDataUtils_DEFINED
+#define GrDataUtils_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrSwizzle.h"
+
+class GrImageInfo;
+
+size_t GrCompressedDataSize(SkImage::CompressionType, int w, int h);
+
+// Returns a value that can be used to set rowBytes for a transfer function.
+size_t GrCompressedRowBytes(SkImage::CompressionType, int w);
+
+// Compute the size of the buffer required to hold all the mipLevels of the specified type
+// of data when all rowBytes are tight.
+// Note there may still be padding between the mipLevels to meet alignment requirements.
+size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, int baseWidth, int baseHeight,
+ SkTArray<size_t>* individualMipOffsets, int mipLevelCount);
+
+void GrFillInData(GrColorType, int baseWidth, int baseHeight,
+ const SkTArray<size_t>& individualMipOffsets, char* dest, const SkColor4f& color);
+
+void GrFillInCompressedData(SkImage::CompressionType, int width, int height, char* dest,
+ const SkColor4f& color);
+
+// Swizzle param is applied after loading and before converting from srcInfo to dstInfo.
+bool GrConvertPixels(const GrImageInfo& dstInfo, void* dst, size_t dstRB,
+ const GrImageInfo& srcInfo, const void* src, size_t srcRB,
+ bool flipY = false);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp
new file mode 100644
index 0000000000..09a6846649
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+/*
+ * The default Geometry Processor simply takes position and multiplies it by the uniform view
+ * matrix. It also leaves coverage untouched. Behind the scenes, we may add per vertex color or
+ * local coords.
+ */
+
+enum GPFlag {
+ kColorAttribute_GPFlag = 0x1,
+ kColorAttributeIsSkColor_GPFlag = 0x2,
+ kColorAttributeIsWide_GPFlag = 0x4,
+ kLocalCoordAttribute_GPFlag = 0x8,
+ kCoverageAttribute_GPFlag = 0x10,
+ kCoverageAttributeTweak_GPFlag = 0x20,
+};
+
+class DefaultGeoProc : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps* shaderCaps,
+ uint32_t gpTypeFlags,
+ const SkPMColor4f& color,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ bool localCoordsWillBeRead,
+ uint8_t coverage) {
+ return sk_sp<GrGeometryProcessor>(new DefaultGeoProc(
+ shaderCaps, gpTypeFlags, color, std::move(colorSpaceXform), viewMatrix, localMatrix,
+ coverage, localCoordsWillBeRead));
+ }
+
+ const char* name() const override { return "DefaultGeometryProcessor"; }
+
+ const SkPMColor4f& color() const { return fColor; }
+ bool hasVertexColor() const { return fInColor.isInitialized(); }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool localCoordsWillBeRead() const { return fLocalCoordsWillBeRead; }
+ uint8_t coverage() const { return fCoverage; }
+ bool hasVertexCoverage() const { return fInCoverage.isInitialized(); }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor()
+ : fViewMatrix(SkMatrix::InvalidMatrix())
+ , fColor(SK_PMColor4fILLEGAL)
+ , fCoverage(0xff) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const DefaultGeoProc& gp = args.fGP.cast<DefaultGeoProc>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ bool tweakAlpha = SkToBool(gp.fFlags & kCoverageAttributeTweak_GPFlag);
+ SkASSERT(!tweakAlpha || gp.hasVertexCoverage());
+
+ // Setup pass through color
+ if (gp.hasVertexColor() || tweakAlpha) {
+ GrGLSLVarying varying(kHalf4_GrSLType);
+ varyingHandler->addVarying("color", &varying);
+
+ // There are several optional steps to process the color. Start with the attribute,
+ // or with uniform color (in the case of folding coverage into a uniform color):
+ if (gp.hasVertexColor()) {
+ vertBuilder->codeAppendf("half4 color = %s;", gp.fInColor.name());
+ } else {
+ const char* colorUniformName;
+ fColorUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kHalf4_GrSLType,
+ "Color",
+ &colorUniformName);
+ vertBuilder->codeAppendf("half4 color = %s;", colorUniformName);
+ }
+
+ // For SkColor, do a red/blue swap, possible color space conversion, and premul
+ if (gp.fFlags & kColorAttributeIsSkColor_GPFlag) {
+ vertBuilder->codeAppend("color = color.bgra;");
+
+ if (gp.fColorSpaceXform) {
+ fColorSpaceHelper.emitCode(uniformHandler, gp.fColorSpaceXform.get(),
+ kVertex_GrShaderFlag);
+ SkString xformedColor;
+ vertBuilder->appendColorGamutXform(&xformedColor, "color",
+ &fColorSpaceHelper);
+ vertBuilder->codeAppendf("color = %s;", xformedColor.c_str());
+ }
+
+ vertBuilder->codeAppend("color = half4(color.rgb * color.a, color.a);");
+ }
+
+ // Optionally fold coverage into alpha (color).
+ if (tweakAlpha) {
+ vertBuilder->codeAppendf("color = color * %s;", gp.fInCoverage.name());
+ }
+ vertBuilder->codeAppendf("%s = color;\n", varying.vsOut());
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, varying.fsIn());
+ } else {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.fInPosition.name(),
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ if (gp.fInLocalCoords.isInitialized()) {
+ // emit transforms with explicit local coords
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gp.fInLocalCoords.asShaderVar(),
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+ } else {
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gp.fInPosition.asShaderVar(),
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+ }
+
+ // Setup coverage as pass through
+ if (gp.hasVertexCoverage() && !tweakAlpha) {
+ fragBuilder->codeAppendf("half alpha = 1.0;");
+ varyingHandler->addPassThroughAttribute(gp.fInCoverage, "alpha");
+ fragBuilder->codeAppendf("%s = half4(alpha);", args.fOutputCoverage);
+ } else if (gp.coverage() == 0xff) {
+ fragBuilder->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ } else {
+ const char* fragCoverage;
+ fCoverageUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf_GrSLType,
+ "Coverage",
+ &fragCoverage);
+ fragBuilder->codeAppendf("%s = half4(%s);", args.fOutputCoverage, fragCoverage);
+ }
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DefaultGeoProc& def = gp.cast<DefaultGeoProc>();
+ uint32_t key = def.fFlags;
+ key |= (def.coverage() == 0xff) ? 0x80 : 0;
+ key |= (def.localCoordsWillBeRead() && def.localMatrix().hasPerspective()) ? 0x100 : 0;
+ key |= ComputePosKey(def.viewMatrix()) << 20;
+ b->add32(key);
+ b->add32(GrColorSpaceXform::XformKey(def.fColorSpaceXform.get()));
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const DefaultGeoProc& dgp = gp.cast<DefaultGeoProc>();
+
+ if (!dgp.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dgp.viewMatrix())) {
+ fViewMatrix = dgp.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (!dgp.hasVertexColor() && dgp.color() != fColor) {
+ pdman.set4fv(fColorUniform, 1, dgp.color().vec());
+ fColor = dgp.color();
+ }
+
+ if (dgp.coverage() != fCoverage && !dgp.hasVertexCoverage()) {
+ pdman.set1f(fCoverageUniform, GrNormalizeByteToFloat(dgp.coverage()));
+ fCoverage = dgp.coverage();
+ }
+ this->setTransformDataHelper(dgp.fLocalMatrix, pdman, &transformIter);
+
+ fColorSpaceHelper.setData(pdman, dgp.fColorSpaceXform.get());
+ }
+
+ private:
+ SkMatrix fViewMatrix;
+ SkPMColor4f fColor;
+ uint8_t fCoverage;
+ UniformHandle fViewMatrixUniform;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageUniform;
+ GrGLSLColorSpaceXformHelper fColorSpaceHelper;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ DefaultGeoProc(const GrShaderCaps* shaderCaps,
+ uint32_t gpTypeFlags,
+ const SkPMColor4f& color,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ uint8_t coverage,
+ bool localCoordsWillBeRead)
+ : INHERITED(kDefaultGeoProc_ClassID)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fCoverage(coverage)
+ , fFlags(gpTypeFlags)
+ , fLocalCoordsWillBeRead(localCoordsWillBeRead)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ if (fFlags & kColorAttribute_GPFlag) {
+ fInColor = MakeColorAttribute("inColor",
+ SkToBool(fFlags & kColorAttributeIsWide_GPFlag));
+ }
+ if (fFlags & kLocalCoordAttribute_GPFlag) {
+ fInLocalCoords = {"inLocalCoord", kFloat2_GrVertexAttribType,
+ kFloat2_GrSLType};
+ }
+ if (fFlags & kCoverageAttribute_GPFlag) {
+ fInCoverage = {"inCoverage", kFloat_GrVertexAttribType, kHalf_GrSLType};
+ }
+ this->setVertexAttributes(&fInPosition, 4);
+ }
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInLocalCoords;
+ Attribute fInCoverage;
+ SkPMColor4f fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ uint8_t fCoverage;
+ uint32_t fFlags;
+ bool fLocalCoordsWillBeRead;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DefaultGeoProc);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> DefaultGeoProc::TestCreate(GrProcessorTestData* d) {
+ uint32_t flags = 0;
+ if (d->fRandom->nextBool()) {
+ flags |= kColorAttribute_GPFlag;
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kColorAttributeIsSkColor_GPFlag;
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kColorAttributeIsWide_GPFlag;
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kCoverageAttribute_GPFlag;
+ if (d->fRandom->nextBool()) {
+ flags |= kCoverageAttributeTweak_GPFlag;
+ }
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kLocalCoordAttribute_GPFlag;
+ }
+
+ return DefaultGeoProc::Make(d->caps()->shaderCaps(),
+ flags,
+ SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ GrTest::TestColorXform(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool(),
+ GrRandomCoverage(d->fRandom));
+}
+#endif
+
+sk_sp<GrGeometryProcessor> GrDefaultGeoProcFactory::Make(const GrShaderCaps* shaderCaps,
+ const Color& color,
+ const Coverage& coverage,
+ const LocalCoords& localCoords,
+ const SkMatrix& viewMatrix) {
+ uint32_t flags = 0;
+ if (Color::kPremulGrColorAttribute_Type == color.fType) {
+ flags |= kColorAttribute_GPFlag;
+ } else if (Color::kUnpremulSkColorAttribute_Type == color.fType) {
+ flags |= kColorAttribute_GPFlag | kColorAttributeIsSkColor_GPFlag;
+ } else if (Color::kPremulWideColorAttribute_Type == color.fType) {
+ flags |= kColorAttribute_GPFlag | kColorAttributeIsWide_GPFlag;
+ }
+ if (Coverage::kAttribute_Type == coverage.fType) {
+ flags |= kCoverageAttribute_GPFlag;
+ } else if (Coverage::kAttributeTweakAlpha_Type == coverage.fType) {
+ flags |= kCoverageAttribute_GPFlag | kCoverageAttributeTweak_GPFlag;
+ }
+ flags |= localCoords.fType == LocalCoords::kHasExplicit_Type ? kLocalCoordAttribute_GPFlag : 0;
+
+ uint8_t inCoverage = coverage.fCoverage;
+ bool localCoordsWillBeRead = localCoords.fType != LocalCoords::kUnused_Type;
+
+ return DefaultGeoProc::Make(shaderCaps,
+ flags,
+ color.fColor,
+ color.fColorSpaceXform,
+ viewMatrix,
+ localCoords.fMatrix ? *localCoords.fMatrix : SkMatrix::I(),
+ localCoordsWillBeRead,
+ inCoverage);
+}
+
+sk_sp<GrGeometryProcessor> GrDefaultGeoProcFactory::MakeForDeviceSpace(
+ const GrShaderCaps* shaderCaps,
+ const Color& color,
+ const Coverage& coverage,
+ const LocalCoords& localCoords,
+ const SkMatrix& viewMatrix) {
+ SkMatrix invert = SkMatrix::I();
+ if (LocalCoords::kUnused_Type != localCoords.fType) {
+ SkASSERT(LocalCoords::kUsePosition_Type == localCoords.fType);
+ if (!viewMatrix.isIdentity() && !viewMatrix.invert(&invert)) {
+ return nullptr;
+ }
+
+ if (localCoords.hasLocalMatrix()) {
+ invert.postConcat(*localCoords.fMatrix);
+ }
+ }
+
+ LocalCoords inverted(LocalCoords::kUsePosition_Type, &invert);
+ return Make(shaderCaps, color, coverage, inverted, SkMatrix::I());
+}
diff --git a/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h
new file mode 100644
index 0000000000..fb61567d81
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDefaultGeoProcFactory_DEFINED
+#define GrDefaultGeoProcFactory_DEFINED
+
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrShaderCaps.h"
+
+/*
+ * A factory for creating default Geometry Processors which simply multiply position by the uniform
+ * view matrix and wire through color, coverage, UV coords if requested.
+ */
+namespace GrDefaultGeoProcFactory {
+ struct Color {
+ enum Type {
+ kPremulGrColorUniform_Type,
+ kPremulGrColorAttribute_Type,
+ kPremulWideColorAttribute_Type,
+ kUnpremulSkColorAttribute_Type,
+ };
+ explicit Color(const SkPMColor4f& color)
+ : fType(kPremulGrColorUniform_Type)
+ , fColor(color)
+ , fColorSpaceXform(nullptr) {}
+ Color(Type type)
+ : fType(type)
+ , fColor(SK_PMColor4fILLEGAL)
+ , fColorSpaceXform(nullptr) {
+ SkASSERT(type != kPremulGrColorUniform_Type);
+ }
+
+ Type fType;
+ SkPMColor4f fColor;
+
+ // This only applies to SkColor. Any GrColors are assumed to have been color converted
+ // during paint conversion.
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+ };
+
+ struct Coverage {
+ enum Type {
+ kSolid_Type,
+ kUniform_Type,
+ kAttribute_Type,
+ kAttributeTweakAlpha_Type,
+ };
+ explicit Coverage(uint8_t coverage) : fType(kUniform_Type), fCoverage(coverage) {}
+ Coverage(Type type) : fType(type), fCoverage(0xff) {
+ SkASSERT(type != kUniform_Type);
+ }
+
+ Type fType;
+ uint8_t fCoverage;
+ };
+
+ struct LocalCoords {
+ enum Type {
+ kUnused_Type,
+ kUsePosition_Type,
+ kHasExplicit_Type,
+ kHasTransformed_Type,
+ };
+ LocalCoords(Type type) : fType(type), fMatrix(nullptr) {}
+ LocalCoords(Type type, const SkMatrix* matrix) : fType(type), fMatrix(matrix) {
+ SkASSERT(kUnused_Type != type);
+ }
+ bool hasLocalMatrix() const { return nullptr != fMatrix; }
+
+ Type fType;
+ const SkMatrix* fMatrix;
+ };
+
+ sk_sp<GrGeometryProcessor> Make(const GrShaderCaps*,
+ const Color&,
+ const Coverage&,
+ const LocalCoords&,
+ const SkMatrix& viewMatrix);
+
+ /*
+ * Use this factory to create a GrGeometryProcessor that expects a device space vertex position
+ * attribute. The view matrix must still be provided to compute correctly transformed
+ * coordinates for GrFragmentProcessors. It may fail if the view matrix is not invertible.
+ */
+ sk_sp<GrGeometryProcessor> MakeForDeviceSpace(const GrShaderCaps*,
+ const Color&,
+ const Coverage&,
+ const LocalCoords&,
+ const SkMatrix& viewMatrix);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDeferredProxyUploader.h b/gfx/skia/skia/src/gpu/GrDeferredProxyUploader.h
new file mode 100644
index 0000000000..c0047c0841
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDeferredProxyUploader.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDeferredProxyUploader_DEFINED
+#define GrDeferredProxyUploader_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkSemaphore.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkMakeUnique.h"
+
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+
+/**
+ * GrDeferredProxyUploader assists with threaded generation of textures. Currently used by both
+ * software clip masks, and the software path renderer. The calling code typically needs to store
+ * some additional data (T) for use on the worker thread. GrTDeferredProxyUploader allows storing
+ * such data. The common flow is:
+ *
+ * 1) A GrTDeferredProxyUploader is created, with some payload (eg an SkPath to draw).
+ * The uploader is owned by the proxy that it's going to populate.
+ * 2) A task is created with a pointer to the uploader. A worker thread executes that task, using
+ * the payload data to allocate and fill in the fPixels pixmap.
+ * 3) The worker thread calls signalAndFreeData(), which notifies the main thread that the pixmap
+ * is ready, and then deletes the payload data (which is no longer needed).
+ * 4) In parallel to 2-3, on the main thread... Some op is created that refers to the proxy. When
+ * that op is added to an op list, the op list retains a pointer to the "deferred" proxies.
+ * 5) At flush time, the op list ensures that the deferred proxies are instantiated, then calls
+ * scheduleUpload on those proxies, which calls scheduleUpload on the uploader (below).
+ * 6) scheduleUpload defers the upload even further, by adding an ASAPUpload to the flush.
+ * 7) When the ASAP upload happens, we wait to make sure that the pixels are marked ready
+ * (from step #3 on the worker thread). Then we perform the actual upload to the texture.
+ * Finally, we call resetDeferredUploader, which deletes the uploader object, causing fPixels
+ * to be freed.
+ */
+class GrDeferredProxyUploader : public SkNoncopyable {
+public:
+ GrDeferredProxyUploader() : fScheduledUpload(false), fWaited(false) {}
+
+ virtual ~GrDeferredProxyUploader() {
+ // In normal usage (i.e., through GrTDeferredProxyUploader) this will be redundant
+ this->wait();
+ }
+
+ void scheduleUpload(GrOpFlushState* flushState, GrTextureProxy* proxy) {
+ if (fScheduledUpload) {
+ // Multiple references to the owning proxy may have caused us to already execute
+ return;
+ }
+
+ auto uploadMask = [this, proxy](GrDeferredTextureUploadWritePixelsFn& writePixelsFn) {
+ this->wait();
+ GrColorType pixelColorType = SkColorTypeToGrColorType(this->fPixels.info().colorType());
+ // If the worker thread was unable to allocate pixels, this check will fail, and we'll
+ // end up drawing with an uninitialized mask texture, but at least we won't crash.
+ if (this->fPixels.addr()) {
+ writePixelsFn(proxy, 0, 0, this->fPixels.width(), this->fPixels.height(),
+ pixelColorType, this->fPixels.addr(), this->fPixels.rowBytes());
+ }
+ // Upload has finished, so tell the proxy to release this GrDeferredProxyUploader
+ proxy->texPriv().resetDeferredUploader();
+ };
+ flushState->addASAPUpload(std::move(uploadMask));
+ fScheduledUpload = true;
+ }
+
+ void signalAndFreeData() {
+ this->freeData();
+ fPixelsReady.signal();
+ }
+
+ SkAutoPixmapStorage* getPixels() { return &fPixels; }
+
+protected:
+ void wait() {
+ if (!fWaited) {
+ fPixelsReady.wait();
+ fWaited = true;
+ }
+ }
+
+private:
+ virtual void freeData() {}
+
+ SkAutoPixmapStorage fPixels;
+ SkSemaphore fPixelsReady;
+ bool fScheduledUpload;
+ bool fWaited;
+};
+
+template <typename T>
+class GrTDeferredProxyUploader : public GrDeferredProxyUploader {
+public:
+ template <typename... Args>
+ GrTDeferredProxyUploader(Args&&... args)
+ : fData(skstd::make_unique<T>(std::forward<Args>(args)...)) {
+ }
+
+ ~GrTDeferredProxyUploader() override {
+ // We need to wait here, so that we don't free fData before the worker thread is done
+ // with it. (This happens if the proxy is deleted early due to a full clear or failure
+ // of an op list to instantiate).
+ this->wait();
+ }
+
+ T& data() { return *fData; }
+
+private:
+ void freeData() override {
+ fData.reset();
+ }
+
+ std::unique_ptr<T> fData;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDeferredUpload.h b/gfx/skia/skia/src/gpu/GrDeferredUpload.h
new file mode 100644
index 0000000000..887f853d7b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDeferredUpload.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDeferredUpload_DEFINED
+#define GrDeferredUpload_DEFINED
+
+#include <functional>
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrTextureProxy;
+
+/**
+ * A word about deferred uploads and tokens: Ops should usually schedule their uploads to occur at
+ * the beginning of a frame whenever possible. These are called ASAP uploads. Of course, this
+ * requires that there are no draws that have yet to be flushed that rely on the old texture
+ * contents. In that case the ASAP upload would happen prior to the draw and therefore the draw
+ * would read the new (wrong) texture data. When this read-before-write data hazard exists they
+ * should schedule an inline upload.
+ *
+ * Ops, in conjunction with helpers such as GrDrawOpAtlas, use upload tokens to know what the most
+ * recent draw was that referenced a resource (or portion of a resource). Each draw is assigned a
+ * token. A resource (or portion thereof) can be tagged with the most recent reading draw's token.
+ * The deferred uploads target provides a facility for testing whether the draw corresponding to the
+ * token has been flushed. If it has not been flushed then the op must perform an inline upload
+ * instead so that the upload occurs after the draw depending on the old contents and before the
+ * draw depending on the updated contents. When scheduling an inline upload the op provides the
+ * token of the draw that the upload must occur before.
+ */
+
+/**
+ * GrDeferredUploadToken is used to sequence the uploads relative to each other and to draws.
+ */
+class GrDeferredUploadToken {
+public:
+ static GrDeferredUploadToken AlreadyFlushedToken() { return GrDeferredUploadToken(0); }
+
+ GrDeferredUploadToken(const GrDeferredUploadToken&) = default;
+ GrDeferredUploadToken& operator=(const GrDeferredUploadToken&) = default;
+
+ bool operator==(const GrDeferredUploadToken& that) const {
+ return fSequenceNumber == that.fSequenceNumber;
+ }
+ bool operator!=(const GrDeferredUploadToken& that) const { return !(*this == that); }
+ bool operator<(const GrDeferredUploadToken that) const {
+ return fSequenceNumber < that.fSequenceNumber;
+ }
+ bool operator<=(const GrDeferredUploadToken that) const {
+ return fSequenceNumber <= that.fSequenceNumber;
+ }
+ bool operator>(const GrDeferredUploadToken that) const {
+ return fSequenceNumber > that.fSequenceNumber;
+ }
+ bool operator>=(const GrDeferredUploadToken that) const {
+ return fSequenceNumber >= that.fSequenceNumber;
+ }
+
+ GrDeferredUploadToken& operator++() {
+ ++fSequenceNumber;
+ return *this;
+ }
+ GrDeferredUploadToken operator++(int) {
+ auto old = fSequenceNumber;
+ ++fSequenceNumber;
+ return GrDeferredUploadToken(old);
+ }
+
+ GrDeferredUploadToken next() const { return GrDeferredUploadToken(fSequenceNumber + 1); }
+
+ /** Is this token in the [start, end] inclusive interval? */
+ bool inInterval(const GrDeferredUploadToken& start, const GrDeferredUploadToken& end) {
+ return *this >= start && *this <= end;
+ }
+
+private:
+ GrDeferredUploadToken() = delete;
+ explicit GrDeferredUploadToken(uint64_t sequenceNumber) : fSequenceNumber(sequenceNumber) {}
+ uint64_t fSequenceNumber;
+};
+
+/*
+ * The GrTokenTracker encapsulates the incrementing and distribution of tokens.
+ */
+class GrTokenTracker {
+public:
+ /** Gets the token one beyond the last token that has been flushed. */
+ GrDeferredUploadToken nextTokenToFlush() const { return fLastFlushedToken.next(); }
+
+ /** Gets the next draw token that will be issued by this target. This can be used by an op
+ to record that the next draw it issues will use a resource (e.g. texture) while preparing
+ that draw. */
+ GrDeferredUploadToken nextDrawToken() const { return fLastIssuedToken.next(); }
+
+private:
+ // Only these three classes get to increment the token counters
+ friend class SkInternalAtlasTextContext;
+ friend class GrOpFlushState;
+ friend class TestingUploadTarget;
+
+ /** Issues the next token for a draw. */
+ GrDeferredUploadToken issueDrawToken() { return ++fLastIssuedToken; }
+
+ /** Advances the last flushed token by one. */
+ GrDeferredUploadToken flushToken() { return ++fLastFlushedToken; }
+
+ GrDeferredUploadToken fLastIssuedToken = GrDeferredUploadToken::AlreadyFlushedToken();
+ GrDeferredUploadToken fLastFlushedToken = GrDeferredUploadToken::AlreadyFlushedToken();
+};
+
+/**
+ * Passed to a deferred upload when it is executed, this method allows the deferred upload to
+ * actually write its pixel data into a texture.
+ */
+using GrDeferredTextureUploadWritePixelsFn =
+ std::function<bool(GrTextureProxy*, int left, int top, int width, int height,
+ GrColorType srcColorType, const void* buffer, size_t rowBytes)>;
+
+/**
+ * A deferred texture upload is simply a std::function that takes a
+ * GrDeferredTextureUploadWritePixelsFn as a parameter. It is called when it should perform its
+ * upload as the draw/upload sequence is executed.
+ */
+using GrDeferredTextureUploadFn = std::function<void(GrDeferredTextureUploadWritePixelsFn&)>;
+
+/**
+ * An interface for scheduling deferred uploads. It accepts asap and deferred inline uploads.
+ */
+class GrDeferredUploadTarget {
+public:
+ virtual ~GrDeferredUploadTarget() {}
+
+ virtual const GrTokenTracker* tokenTracker() = 0;
+
+ /** Returns the token of the draw that this upload will occur before. */
+ virtual GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) = 0;
+
+ /** Returns the token of the draw that this upload will occur before. Since ASAP uploads
+ are done first during a flush, this will be the first token since the most recent
+ flush. */
+ virtual GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&& upload) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.cpp b/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.cpp
new file mode 100644
index 0000000000..29aa16dcb0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.cpp
@@ -0,0 +1,870 @@
+/*
+ * Copyright 2017 ARM Ltd.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/gpu/GrDistanceFieldGenFromVector.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/gpu/GrConfig.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+
+/**
+ * If a scanline (a row of texel) cross from the kRight_SegSide
+ * of a segment to the kLeft_SegSide, the winding score should
+ * add 1.
+ * And winding score should subtract 1 if the scanline cross
+ * from kLeft_SegSide to kRight_SegSide.
+ * Always return kNA_SegSide if the scanline does not cross over
+ * the segment. Winding score should be zero in this case.
+ * You can get the winding number for each texel of the scanline
+ * by adding the winding score from left to right.
+ * Assuming we always start from outside, so the winding number
+ * should always start from zero.
+ * ________ ________
+ * | | | |
+ * ...R|L......L|R.....L|R......R|L..... <= Scanline & side of segment
+ * |+1 |-1 |-1 |+1 <= Winding score
+ * 0 | 1 ^ 0 ^ -1 |0 <= Winding number
+ * |________| |________|
+ *
+ * .......NA................NA..........
+ * 0 0
+ */
+enum SegSide {
+ kLeft_SegSide = -1,
+ kOn_SegSide = 0,
+ kRight_SegSide = 1,
+ kNA_SegSide = 2,
+};
+
+struct DFData {
+ float fDistSq; // distance squared to nearest (so far) edge
+ int fDeltaWindingScore; // +1 or -1 whenever a scanline cross over a segment
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Type definition for double precision DPoint and DAffineMatrix
+ */
+
+// Point with double precision
+struct DPoint {
+ double fX, fY;
+
+ static DPoint Make(double x, double y) {
+ DPoint pt;
+ pt.set(x, y);
+ return pt;
+ }
+
+ double x() const { return fX; }
+ double y() const { return fY; }
+
+ void set(double x, double y) { fX = x; fY = y; }
+
+ /** Returns the euclidian distance from (0,0) to (x,y)
+ */
+ static double Length(double x, double y) {
+ return sqrt(x * x + y * y);
+ }
+
+ /** Returns the euclidian distance between a and b
+ */
+ static double Distance(const DPoint& a, const DPoint& b) {
+ return Length(a.fX - b.fX, a.fY - b.fY);
+ }
+
+ double distanceToSqd(const DPoint& pt) const {
+ double dx = fX - pt.fX;
+ double dy = fY - pt.fY;
+ return dx * dx + dy * dy;
+ }
+};
+
+// Matrix with double precision for affine transformation.
+// We don't store row 3 because its always (0, 0, 1).
+class DAffineMatrix {
+public:
+ double operator[](int index) const {
+ SkASSERT((unsigned)index < 6);
+ return fMat[index];
+ }
+
+ double& operator[](int index) {
+ SkASSERT((unsigned)index < 6);
+ return fMat[index];
+ }
+
+ void setAffine(double m11, double m12, double m13,
+ double m21, double m22, double m23) {
+ fMat[0] = m11;
+ fMat[1] = m12;
+ fMat[2] = m13;
+ fMat[3] = m21;
+ fMat[4] = m22;
+ fMat[5] = m23;
+ }
+
+ /** Set the matrix to identity
+ */
+ void reset() {
+ fMat[0] = fMat[4] = 1.0;
+ fMat[1] = fMat[3] =
+ fMat[2] = fMat[5] = 0.0;
+ }
+
+ // alias for reset()
+ void setIdentity() { this->reset(); }
+
+ DPoint mapPoint(const SkPoint& src) const {
+ DPoint pt = DPoint::Make(src.x(), src.y());
+ return this->mapPoint(pt);
+ }
+
+ DPoint mapPoint(const DPoint& src) const {
+ return DPoint::Make(fMat[0] * src.x() + fMat[1] * src.y() + fMat[2],
+ fMat[3] * src.x() + fMat[4] * src.y() + fMat[5]);
+ }
+private:
+ double fMat[6];
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const double kClose = (SK_Scalar1 / 16.0);
+static const double kCloseSqd = kClose * kClose;
+static const double kNearlyZero = (SK_Scalar1 / (1 << 18));
+static const double kTangentTolerance = (SK_Scalar1 / (1 << 11));
+static const float kConicTolerance = 0.25f;
+
+static inline bool between_closed_open(double a, double b, double c,
+ double tolerance = 0.0,
+ bool xformToleranceToX = false) {
+ SkASSERT(tolerance >= 0.0);
+ double tolB = tolerance;
+ double tolC = tolerance;
+
+ if (xformToleranceToX) {
+ // Canonical space is y = x^2 and the derivative of x^2 is 2x.
+ // So the slope of the tangent line at point (x, x^2) is 2x.
+ //
+ // /|
+ // sqrt(2x * 2x + 1 * 1) / | 2x
+ // /__|
+ // 1
+ tolB = tolerance / sqrt(4.0 * b * b + 1.0);
+ tolC = tolerance / sqrt(4.0 * c * c + 1.0);
+ }
+ return b < c ? (a >= b - tolB && a < c - tolC) :
+ (a >= c - tolC && a < b - tolB);
+}
+
+static inline bool between_closed(double a, double b, double c,
+ double tolerance = 0.0,
+ bool xformToleranceToX = false) {
+ SkASSERT(tolerance >= 0.0);
+ double tolB = tolerance;
+ double tolC = tolerance;
+
+ if (xformToleranceToX) {
+ tolB = tolerance / sqrt(4.0 * b * b + 1.0);
+ tolC = tolerance / sqrt(4.0 * c * c + 1.0);
+ }
+ return b < c ? (a >= b - tolB && a <= c + tolC) :
+ (a >= c - tolC && a <= b + tolB);
+}
+
+static inline bool nearly_zero(double x, double tolerance = kNearlyZero) {
+ SkASSERT(tolerance >= 0.0);
+ return fabs(x) <= tolerance;
+}
+
+static inline bool nearly_equal(double x, double y,
+ double tolerance = kNearlyZero,
+ bool xformToleranceToX = false) {
+ SkASSERT(tolerance >= 0.0);
+ if (xformToleranceToX) {
+ tolerance = tolerance / sqrt(4.0 * y * y + 1.0);
+ }
+ return fabs(x - y) <= tolerance;
+}
+
+static inline double sign_of(const double &val) {
+ return (val < 0.0) ? -1.0 : 1.0;
+}
+
+static bool is_colinear(const SkPoint pts[3]) {
+ return nearly_zero((pts[1].y() - pts[0].y()) * (pts[1].x() - pts[2].x()) -
+ (pts[1].y() - pts[2].y()) * (pts[1].x() - pts[0].x()), kCloseSqd);
+}
+
+class PathSegment {
+public:
+ enum {
+ // These enum values are assumed in member functions below.
+ kLine = 0,
+ kQuad = 1,
+ } fType;
+
+ // line uses 2 pts, quad uses 3 pts
+ SkPoint fPts[3];
+
+ DPoint fP0T, fP2T;
+ DAffineMatrix fXformMatrix;
+ double fScalingFactor;
+ double fScalingFactorSqd;
+ double fNearlyZeroScaled;
+ double fTangentTolScaledSqd;
+ SkRect fBoundingBox;
+
+ void init();
+
+ int countPoints() {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fType + 2;
+ }
+
+ const SkPoint& endPt() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fPts[fType + 1];
+ }
+};
+
+typedef SkTArray<PathSegment, true> PathSegmentArray;
+
+void PathSegment::init() {
+ const DPoint p0 = DPoint::Make(fPts[0].x(), fPts[0].y());
+ const DPoint p2 = DPoint::Make(this->endPt().x(), this->endPt().y());
+ const double p0x = p0.x();
+ const double p0y = p0.y();
+ const double p2x = p2.x();
+ const double p2y = p2.y();
+
+ fBoundingBox.set(fPts[0], this->endPt());
+
+ if (fType == PathSegment::kLine) {
+ fScalingFactorSqd = fScalingFactor = 1.0;
+ double hypotenuse = DPoint::Distance(p0, p2);
+
+ const double cosTheta = (p2x - p0x) / hypotenuse;
+ const double sinTheta = (p2y - p0y) / hypotenuse;
+
+ fXformMatrix.setAffine(
+ cosTheta, sinTheta, -(cosTheta * p0x) - (sinTheta * p0y),
+ -sinTheta, cosTheta, (sinTheta * p0x) - (cosTheta * p0y)
+ );
+ } else {
+ SkASSERT(fType == PathSegment::kQuad);
+
+ // Calculate bounding box
+ const SkPoint _P1mP0 = fPts[1] - fPts[0];
+ SkPoint t = _P1mP0 - fPts[2] + fPts[1];
+ t.fX = _P1mP0.x() / t.x();
+ t.fY = _P1mP0.y() / t.y();
+ t.fX = SkScalarClampMax(t.x(), 1.0);
+ t.fY = SkScalarClampMax(t.y(), 1.0);
+ t.fX = _P1mP0.x() * t.x();
+ t.fY = _P1mP0.y() * t.y();
+ const SkPoint m = fPts[0] + t;
+ SkRectPriv::GrowToInclude(&fBoundingBox, m);
+
+ const double p1x = fPts[1].x();
+ const double p1y = fPts[1].y();
+
+ const double p0xSqd = p0x * p0x;
+ const double p0ySqd = p0y * p0y;
+ const double p2xSqd = p2x * p2x;
+ const double p2ySqd = p2y * p2y;
+ const double p1xSqd = p1x * p1x;
+ const double p1ySqd = p1y * p1y;
+
+ const double p01xProd = p0x * p1x;
+ const double p02xProd = p0x * p2x;
+ const double b12xProd = p1x * p2x;
+ const double p01yProd = p0y * p1y;
+ const double p02yProd = p0y * p2y;
+ const double b12yProd = p1y * p2y;
+
+ const double sqrtA = p0y - (2.0 * p1y) + p2y;
+ const double a = sqrtA * sqrtA;
+ const double h = -1.0 * (p0y - (2.0 * p1y) + p2y) * (p0x - (2.0 * p1x) + p2x);
+ const double sqrtB = p0x - (2.0 * p1x) + p2x;
+ const double b = sqrtB * sqrtB;
+ const double c = (p0xSqd * p2ySqd) - (4.0 * p01xProd * b12yProd)
+ - (2.0 * p02xProd * p02yProd) + (4.0 * p02xProd * p1ySqd)
+ + (4.0 * p1xSqd * p02yProd) - (4.0 * b12xProd * p01yProd)
+ + (p2xSqd * p0ySqd);
+ const double g = (p0x * p02yProd) - (2.0 * p0x * p1ySqd)
+ + (2.0 * p0x * b12yProd) - (p0x * p2ySqd)
+ + (2.0 * p1x * p01yProd) - (4.0 * p1x * p02yProd)
+ + (2.0 * p1x * b12yProd) - (p2x * p0ySqd)
+ + (2.0 * p2x * p01yProd) + (p2x * p02yProd)
+ - (2.0 * p2x * p1ySqd);
+ const double f = -((p0xSqd * p2y) - (2.0 * p01xProd * p1y)
+ - (2.0 * p01xProd * p2y) - (p02xProd * p0y)
+ + (4.0 * p02xProd * p1y) - (p02xProd * p2y)
+ + (2.0 * p1xSqd * p0y) + (2.0 * p1xSqd * p2y)
+ - (2.0 * b12xProd * p0y) - (2.0 * b12xProd * p1y)
+ + (p2xSqd * p0y));
+
+ const double cosTheta = sqrt(a / (a + b));
+ const double sinTheta = -1.0 * sign_of((a + b) * h) * sqrt(b / (a + b));
+
+ const double gDef = cosTheta * g - sinTheta * f;
+ const double fDef = sinTheta * g + cosTheta * f;
+
+
+ const double x0 = gDef / (a + b);
+ const double y0 = (1.0 / (2.0 * fDef)) * (c - (gDef * gDef / (a + b)));
+
+
+ const double lambda = -1.0 * ((a + b) / (2.0 * fDef));
+ fScalingFactor = fabs(1.0 / lambda);
+ fScalingFactorSqd = fScalingFactor * fScalingFactor;
+
+ const double lambda_cosTheta = lambda * cosTheta;
+ const double lambda_sinTheta = lambda * sinTheta;
+
+ fXformMatrix.setAffine(
+ lambda_cosTheta, -lambda_sinTheta, lambda * x0,
+ lambda_sinTheta, lambda_cosTheta, lambda * y0
+ );
+ }
+
+ fNearlyZeroScaled = kNearlyZero / fScalingFactor;
+ fTangentTolScaledSqd = kTangentTolerance * kTangentTolerance / fScalingFactorSqd;
+
+ fP0T = fXformMatrix.mapPoint(p0);
+ fP2T = fXformMatrix.mapPoint(p2);
+}
+
+static void init_distances(DFData* data, int size) {
+ DFData* currData = data;
+
+ for (int i = 0; i < size; ++i) {
+ // init distance to "far away"
+ currData->fDistSq = SK_DistanceFieldMagnitude * SK_DistanceFieldMagnitude;
+ currData->fDeltaWindingScore = 0;
+ ++currData;
+ }
+}
+
+static inline void add_line_to_segment(const SkPoint pts[2],
+ PathSegmentArray* segments) {
+ segments->push_back();
+ segments->back().fType = PathSegment::kLine;
+ segments->back().fPts[0] = pts[0];
+ segments->back().fPts[1] = pts[1];
+
+ segments->back().init();
+}
+
+static inline void add_quad_segment(const SkPoint pts[3],
+ PathSegmentArray* segments) {
+ if (SkPointPriv::DistanceToSqd(pts[0], pts[1]) < kCloseSqd ||
+ SkPointPriv::DistanceToSqd(pts[1], pts[2]) < kCloseSqd ||
+ is_colinear(pts)) {
+ if (pts[0] != pts[2]) {
+ SkPoint line_pts[2];
+ line_pts[0] = pts[0];
+ line_pts[1] = pts[2];
+ add_line_to_segment(line_pts, segments);
+ }
+ } else {
+ segments->push_back();
+ segments->back().fType = PathSegment::kQuad;
+ segments->back().fPts[0] = pts[0];
+ segments->back().fPts[1] = pts[1];
+ segments->back().fPts[2] = pts[2];
+
+ segments->back().init();
+ }
+}
+
+static inline void add_cubic_segments(const SkPoint pts[4],
+ PathSegmentArray* segments) {
+ SkSTArray<15, SkPoint, true> quads;
+ GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, &quads);
+ int count = quads.count();
+ for (int q = 0; q < count; q += 3) {
+ add_quad_segment(&quads[q], segments);
+ }
+}
+
+static float calculate_nearest_point_for_quad(
+ const PathSegment& segment,
+ const DPoint &xFormPt) {
+ static const float kThird = 0.33333333333f;
+ static const float kTwentySeventh = 0.037037037f;
+
+ const float a = 0.5f - (float)xFormPt.y();
+ const float b = -0.5f * (float)xFormPt.x();
+
+ const float a3 = a * a * a;
+ const float b2 = b * b;
+
+ const float c = (b2 * 0.25f) + (a3 * kTwentySeventh);
+
+ if (c >= 0.f) {
+ const float sqrtC = sqrt(c);
+ const float result = (float)cbrt((-b * 0.5f) + sqrtC) + (float)cbrt((-b * 0.5f) - sqrtC);
+ return result;
+ } else {
+ const float cosPhi = (float)sqrt((b2 * 0.25f) * (-27.f / a3)) * ((b > 0) ? -1.f : 1.f);
+ const float phi = (float)acos(cosPhi);
+ float result;
+ if (xFormPt.x() > 0.f) {
+ result = 2.f * (float)sqrt(-a * kThird) * (float)cos(phi * kThird);
+ if (!between_closed(result, segment.fP0T.x(), segment.fP2T.x())) {
+ result = 2.f * (float)sqrt(-a * kThird) * (float)cos((phi * kThird) + (SK_ScalarPI * 2.f * kThird));
+ }
+ } else {
+ result = 2.f * (float)sqrt(-a * kThird) * (float)cos((phi * kThird) + (SK_ScalarPI * 2.f * kThird));
+ if (!between_closed(result, segment.fP0T.x(), segment.fP2T.x())) {
+ result = 2.f * (float)sqrt(-a * kThird) * (float)cos(phi * kThird);
+ }
+ }
+ return result;
+ }
+}
+
+// This structure contains some intermediate values shared by the same row.
+// It is used to calculate segment side of a quadratic bezier.
+struct RowData {
+ // The intersection type of a scanline and y = x * x parabola in canonical space.
+ enum IntersectionType {
+ kNoIntersection,
+ kVerticalLine,
+ kTangentLine,
+ kTwoPointsIntersect
+ } fIntersectionType;
+
+ // The direction of the quadratic segment/scanline in the canonical space.
+ // 1: The quadratic segment/scanline going from negative x-axis to positive x-axis.
+ // 0: The scanline is a vertical line in the canonical space.
+ // -1: The quadratic segment/scanline going from positive x-axis to negative x-axis.
+ int fQuadXDirection;
+ int fScanlineXDirection;
+
+ // The y-value(equal to x*x) of intersection point for the kVerticalLine intersection type.
+ double fYAtIntersection;
+
+ // The x-value for two intersection points.
+ double fXAtIntersection1;
+ double fXAtIntersection2;
+};
+
+void precomputation_for_row(
+ RowData *rowData,
+ const PathSegment& segment,
+ const SkPoint& pointLeft,
+ const SkPoint& pointRight
+ ) {
+ if (segment.fType != PathSegment::kQuad) {
+ return;
+ }
+
+ const DPoint& xFormPtLeft = segment.fXformMatrix.mapPoint(pointLeft);
+ const DPoint& xFormPtRight = segment.fXformMatrix.mapPoint(pointRight);
+
+ rowData->fQuadXDirection = (int)sign_of(segment.fP2T.x() - segment.fP0T.x());
+ rowData->fScanlineXDirection = (int)sign_of(xFormPtRight.x() - xFormPtLeft.x());
+
+ const double x1 = xFormPtLeft.x();
+ const double y1 = xFormPtLeft.y();
+ const double x2 = xFormPtRight.x();
+ const double y2 = xFormPtRight.y();
+
+ if (nearly_equal(x1, x2, segment.fNearlyZeroScaled, true)) {
+ rowData->fIntersectionType = RowData::kVerticalLine;
+ rowData->fYAtIntersection = x1 * x1;
+ rowData->fScanlineXDirection = 0;
+ return;
+ }
+
+ // Line y = mx + b
+ const double m = (y2 - y1) / (x2 - x1);
+ const double b = -m * x1 + y1;
+
+ const double m2 = m * m;
+ const double c = m2 + 4.0 * b;
+
+ const double tol = 4.0 * segment.fTangentTolScaledSqd / (m2 + 1.0);
+
+ // Check if the scanline is the tangent line of the curve,
+ // and the curve start or end at the same y-coordinate of the scanline
+ if ((rowData->fScanlineXDirection == 1 &&
+ (segment.fPts[0].y() == pointLeft.y() ||
+ segment.fPts[2].y() == pointLeft.y())) &&
+ nearly_zero(c, tol)) {
+ rowData->fIntersectionType = RowData::kTangentLine;
+ rowData->fXAtIntersection1 = m / 2.0;
+ rowData->fXAtIntersection2 = m / 2.0;
+ } else if (c <= 0.0) {
+ rowData->fIntersectionType = RowData::kNoIntersection;
+ return;
+ } else {
+ rowData->fIntersectionType = RowData::kTwoPointsIntersect;
+ const double d = sqrt(c);
+ rowData->fXAtIntersection1 = (m + d) / 2.0;
+ rowData->fXAtIntersection2 = (m - d) / 2.0;
+ }
+}
+
+SegSide calculate_side_of_quad(
+ const PathSegment& segment,
+ const SkPoint& point,
+ const DPoint& xFormPt,
+ const RowData& rowData) {
+ SegSide side = kNA_SegSide;
+
+ if (RowData::kVerticalLine == rowData.fIntersectionType) {
+ side = (SegSide)(int)(sign_of(xFormPt.y() - rowData.fYAtIntersection) * rowData.fQuadXDirection);
+ }
+ else if (RowData::kTwoPointsIntersect == rowData.fIntersectionType) {
+ const double p1 = rowData.fXAtIntersection1;
+ const double p2 = rowData.fXAtIntersection2;
+
+ int signP1 = (int)sign_of(p1 - xFormPt.x());
+ bool includeP1 = true;
+ bool includeP2 = true;
+
+ if (rowData.fScanlineXDirection == 1) {
+ if ((rowData.fQuadXDirection == -1 && segment.fPts[0].y() <= point.y() &&
+ nearly_equal(segment.fP0T.x(), p1, segment.fNearlyZeroScaled, true)) ||
+ (rowData.fQuadXDirection == 1 && segment.fPts[2].y() <= point.y() &&
+ nearly_equal(segment.fP2T.x(), p1, segment.fNearlyZeroScaled, true))) {
+ includeP1 = false;
+ }
+ if ((rowData.fQuadXDirection == -1 && segment.fPts[2].y() <= point.y() &&
+ nearly_equal(segment.fP2T.x(), p2, segment.fNearlyZeroScaled, true)) ||
+ (rowData.fQuadXDirection == 1 && segment.fPts[0].y() <= point.y() &&
+ nearly_equal(segment.fP0T.x(), p2, segment.fNearlyZeroScaled, true))) {
+ includeP2 = false;
+ }
+ }
+
+ if (includeP1 && between_closed(p1, segment.fP0T.x(), segment.fP2T.x(),
+ segment.fNearlyZeroScaled, true)) {
+ side = (SegSide)(signP1 * rowData.fQuadXDirection);
+ }
+ if (includeP2 && between_closed(p2, segment.fP0T.x(), segment.fP2T.x(),
+ segment.fNearlyZeroScaled, true)) {
+ int signP2 = (int)sign_of(p2 - xFormPt.x());
+ if (side == kNA_SegSide || signP2 == 1) {
+ side = (SegSide)(-signP2 * rowData.fQuadXDirection);
+ }
+ }
+ } else if (RowData::kTangentLine == rowData.fIntersectionType) {
+ // The scanline is the tangent line of current quadratic segment.
+
+ const double p = rowData.fXAtIntersection1;
+ int signP = (int)sign_of(p - xFormPt.x());
+ if (rowData.fScanlineXDirection == 1) {
+ // The path start or end at the tangent point.
+ if (segment.fPts[0].y() == point.y()) {
+ side = (SegSide)(signP);
+ } else if (segment.fPts[2].y() == point.y()) {
+ side = (SegSide)(-signP);
+ }
+ }
+ }
+
+ return side;
+}
+
+static float distance_to_segment(const SkPoint& point,
+ const PathSegment& segment,
+ const RowData& rowData,
+ SegSide* side) {
+ SkASSERT(side);
+
+ const DPoint xformPt = segment.fXformMatrix.mapPoint(point);
+
+ if (segment.fType == PathSegment::kLine) {
+ float result = SK_DistanceFieldPad * SK_DistanceFieldPad;
+
+ if (between_closed(xformPt.x(), segment.fP0T.x(), segment.fP2T.x())) {
+ result = (float)(xformPt.y() * xformPt.y());
+ } else if (xformPt.x() < segment.fP0T.x()) {
+ result = (float)(xformPt.x() * xformPt.x() + xformPt.y() * xformPt.y());
+ } else {
+ result = (float)((xformPt.x() - segment.fP2T.x()) * (xformPt.x() - segment.fP2T.x())
+ + xformPt.y() * xformPt.y());
+ }
+
+ if (between_closed_open(point.y(), segment.fBoundingBox.top(),
+ segment.fBoundingBox.bottom())) {
+ *side = (SegSide)(int)sign_of(xformPt.y());
+ } else {
+ *side = kNA_SegSide;
+ }
+ return result;
+ } else {
+ SkASSERT(segment.fType == PathSegment::kQuad);
+
+ const float nearestPoint = calculate_nearest_point_for_quad(segment, xformPt);
+
+ float dist;
+
+ if (between_closed(nearestPoint, segment.fP0T.x(), segment.fP2T.x())) {
+ DPoint x = DPoint::Make(nearestPoint, nearestPoint * nearestPoint);
+ dist = (float)xformPt.distanceToSqd(x);
+ } else {
+ const float distToB0T = (float)xformPt.distanceToSqd(segment.fP0T);
+ const float distToB2T = (float)xformPt.distanceToSqd(segment.fP2T);
+
+ if (distToB0T < distToB2T) {
+ dist = distToB0T;
+ } else {
+ dist = distToB2T;
+ }
+ }
+
+ if (between_closed_open(point.y(), segment.fBoundingBox.top(),
+ segment.fBoundingBox.bottom())) {
+ *side = calculate_side_of_quad(segment, point, xformPt, rowData);
+ } else {
+ *side = kNA_SegSide;
+ }
+
+ return (float)(dist * segment.fScalingFactorSqd);
+ }
+}
+
+static void calculate_distance_field_data(PathSegmentArray* segments,
+ DFData* dataPtr,
+ int width, int height) {
+ int count = segments->count();
+ for (int a = 0; a < count; ++a) {
+ PathSegment& segment = (*segments)[a];
+ const SkRect& segBB = segment.fBoundingBox.makeOutset(
+ SK_DistanceFieldPad, SK_DistanceFieldPad);
+ int startColumn = (int)segBB.left();
+ int endColumn = SkScalarCeilToInt(segBB.right());
+
+ int startRow = (int)segBB.top();
+ int endRow = SkScalarCeilToInt(segBB.bottom());
+
+ SkASSERT((startColumn >= 0) && "StartColumn < 0!");
+ SkASSERT((endColumn <= width) && "endColumn > width!");
+ SkASSERT((startRow >= 0) && "StartRow < 0!");
+ SkASSERT((endRow <= height) && "EndRow > height!");
+
+ // Clip inside the distance field to avoid overflow
+ startColumn = SkTMax(startColumn, 0);
+ endColumn = SkTMin(endColumn, width);
+ startRow = SkTMax(startRow, 0);
+ endRow = SkTMin(endRow, height);
+
+ for (int row = startRow; row < endRow; ++row) {
+ SegSide prevSide = kNA_SegSide;
+ const float pY = row + 0.5f;
+ RowData rowData;
+
+ const SkPoint pointLeft = SkPoint::Make((SkScalar)startColumn, pY);
+ const SkPoint pointRight = SkPoint::Make((SkScalar)endColumn, pY);
+
+ if (between_closed_open(pY, segment.fBoundingBox.top(),
+ segment.fBoundingBox.bottom())) {
+ precomputation_for_row(&rowData, segment, pointLeft, pointRight);
+ }
+
+ for (int col = startColumn; col < endColumn; ++col) {
+ int idx = (row * width) + col;
+
+ const float pX = col + 0.5f;
+ const SkPoint point = SkPoint::Make(pX, pY);
+
+ const float distSq = dataPtr[idx].fDistSq;
+ int dilation = distSq < 1.5 * 1.5 ? 1 :
+ distSq < 2.5 * 2.5 ? 2 :
+ distSq < 3.5 * 3.5 ? 3 : SK_DistanceFieldPad;
+ if (dilation > SK_DistanceFieldPad) {
+ dilation = SK_DistanceFieldPad;
+ }
+
+ // Optimisation for not calculating some points.
+ if (dilation != SK_DistanceFieldPad && !segment.fBoundingBox.roundOut()
+ .makeOutset(dilation, dilation).contains(col, row)) {
+ continue;
+ }
+
+ SegSide side = kNA_SegSide;
+ int deltaWindingScore = 0;
+ float currDistSq = distance_to_segment(point, segment, rowData, &side);
+ if (prevSide == kLeft_SegSide && side == kRight_SegSide) {
+ deltaWindingScore = -1;
+ } else if (prevSide == kRight_SegSide && side == kLeft_SegSide) {
+ deltaWindingScore = 1;
+ }
+
+ prevSide = side;
+
+ if (currDistSq < distSq) {
+ dataPtr[idx].fDistSq = currDistSq;
+ }
+
+ dataPtr[idx].fDeltaWindingScore += deltaWindingScore;
+ }
+ }
+ }
+}
+
+template <int distanceMagnitude>
+static unsigned char pack_distance_field_val(float dist) {
+ // The distance field is constructed as unsigned char values, so that the zero value is at 128,
+ // Beside 128, we have 128 values in range [0, 128), but only 127 values in range (128, 255].
+ // So we multiply distanceMagnitude by 127/128 at the latter range to avoid overflow.
+ dist = SkScalarPin(-dist, -distanceMagnitude, distanceMagnitude * 127.0f / 128.0f);
+
+ // Scale into the positive range for unsigned distance.
+ dist += distanceMagnitude;
+
+ // Scale into unsigned char range.
+ // Round to place negative and positive values as equally as possible around 128
+ // (which represents zero).
+ return (unsigned char)SkScalarRoundToInt(dist / (2 * distanceMagnitude) * 256.0f);
+}
+
+bool GrGenerateDistanceFieldFromPath(unsigned char* distanceField,
+ const SkPath& path, const SkMatrix& drawMatrix,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+
+#ifdef SK_DEBUG
+ SkPath xformPath;
+ path.transform(drawMatrix, &xformPath);
+ SkIRect pathBounds = xformPath.getBounds().roundOut();
+ SkIRect expectPathBounds =
+ SkIRect::MakeWH(width - 2 * SK_DistanceFieldPad, height - 2 * SK_DistanceFieldPad);
+#endif
+
+ SkASSERT(expectPathBounds.isEmpty() ||
+ expectPathBounds.contains(pathBounds.x(), pathBounds.y()));
+ SkASSERT(expectPathBounds.isEmpty() || pathBounds.isEmpty() ||
+ expectPathBounds.contains(pathBounds));
+
+ SkPath simplifiedPath;
+ SkPath workingPath;
+ if (Simplify(path, &simplifiedPath)) {
+ workingPath = simplifiedPath;
+ } else {
+ workingPath = path;
+ }
+
+ if (!IsDistanceFieldSupportedFillType(workingPath.getFillType())) {
+ return false;
+ }
+
+ workingPath.transform(drawMatrix);
+
+ SkDEBUGCODE(pathBounds = workingPath.getBounds().roundOut());
+ SkASSERT(expectPathBounds.isEmpty() ||
+ expectPathBounds.contains(pathBounds.x(), pathBounds.y()));
+ SkASSERT(expectPathBounds.isEmpty() || pathBounds.isEmpty() ||
+ expectPathBounds.contains(pathBounds));
+
+ // translate path to offset (SK_DistanceFieldPad, SK_DistanceFieldPad)
+ SkMatrix dfMatrix;
+ dfMatrix.setTranslate(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ workingPath.transform(dfMatrix);
+
+ // create temp data
+ size_t dataSize = width * height * sizeof(DFData);
+ SkAutoSMalloc<1024> dfStorage(dataSize);
+ DFData* dataPtr = (DFData*) dfStorage.get();
+
+ // create initial distance data
+ init_distances(dataPtr, width * height);
+
+ SkPathEdgeIter iter(workingPath);
+ SkSTArray<15, PathSegment, true> segments;
+
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine: {
+ add_line_to_segment(e.fPts, &segments);
+ break;
+ }
+ case SkPathEdgeIter::Edge::kQuad:
+ add_quad_segment(e.fPts, &segments);
+ break;
+ case SkPathEdgeIter::Edge::kConic: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(e.fPts, weight, kConicTolerance);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ add_quad_segment(quadPts + 2*i, &segments);
+ }
+ break;
+ }
+ case SkPathEdgeIter::Edge::kCubic: {
+ add_cubic_segments(e.fPts, &segments);
+ break;
+ }
+ }
+ }
+
+ calculate_distance_field_data(&segments, dataPtr, width, height);
+
+ for (int row = 0; row < height; ++row) {
+ int windingNumber = 0; // Winding number start from zero for each scanline
+ for (int col = 0; col < width; ++col) {
+ int idx = (row * width) + col;
+ windingNumber += dataPtr[idx].fDeltaWindingScore;
+
+ enum DFSign {
+ kInside = -1,
+ kOutside = 1
+ } dfSign;
+
+ if (workingPath.getFillType() == SkPath::kWinding_FillType) {
+ dfSign = windingNumber ? kInside : kOutside;
+ } else if (workingPath.getFillType() == SkPath::kInverseWinding_FillType) {
+ dfSign = windingNumber ? kOutside : kInside;
+ } else if (workingPath.getFillType() == SkPath::kEvenOdd_FillType) {
+ dfSign = (windingNumber % 2) ? kInside : kOutside;
+ } else {
+ SkASSERT(workingPath.getFillType() == SkPath::kInverseEvenOdd_FillType);
+ dfSign = (windingNumber % 2) ? kOutside : kInside;
+ }
+
+ // The winding number at the end of a scanline should be zero.
+ SkASSERT(((col != width - 1) || (windingNumber == 0)) &&
+ "Winding number should be zero at the end of a scan line.");
+ // Fallback to use SkPath::contains to determine the sign of pixel in release build.
+ if (col == width - 1 && windingNumber != 0) {
+ for (int col = 0; col < width; ++col) {
+ int idx = (row * width) + col;
+ dfSign = workingPath.contains(col + 0.5, row + 0.5) ? kInside : kOutside;
+ const float miniDist = sqrt(dataPtr[idx].fDistSq);
+ const float dist = dfSign * miniDist;
+
+ unsigned char pixelVal = pack_distance_field_val<SK_DistanceFieldMagnitude>(dist);
+
+ distanceField[(row * rowBytes) + col] = pixelVal;
+ }
+ continue;
+ }
+
+ const float miniDist = sqrt(dataPtr[idx].fDistSq);
+ const float dist = dfSign * miniDist;
+
+ unsigned char pixelVal = pack_distance_field_val<SK_DistanceFieldMagnitude>(dist);
+
+ distanceField[(row * rowBytes) + col] = pixelVal;
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.h b/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.h
new file mode 100644
index 0000000000..8e8d1b3413
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDistanceFieldGenFromVector.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 ARM Ltd.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDistanceFieldGenFromVector_DEFINED
+#define GrDistanceFieldGenFromVector_DEFINED
+
+#include "include/core/SkPath.h"
+
+class SkMatrix;
+
+#ifndef SK_USE_LEGACY_DISTANCE_FIELDS
+ #define SK_USE_LEGACY_DISTANCE_FIELDS
+#endif
+
+/** Given a vector path, generate the associated distance field.
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding defined in "SkDistanceFieldGen.h".
+ * @param path The path we're using to generate the distance field.
+ * @param matrix Transformation matrix for path.
+ * @param width Width of the distance field.
+ * @param height Height of the distance field.
+ * @param rowBytes Size of each row in the distance field, in bytes.
+ */
+bool GrGenerateDistanceFieldFromPath(unsigned char* distanceField,
+ const SkPath& path, const SkMatrix& viewMatrix,
+ int width, int height, size_t rowBytes);
+
+inline bool IsDistanceFieldSupportedFillType(SkPath::FillType fFillType)
+{
+ return (SkPath::kEvenOdd_FillType == fFillType ||
+ SkPath::kInverseEvenOdd_FillType == fFillType);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawOpAtlas.cpp b/gfx/skia/skia/src/gpu/GrDrawOpAtlas.cpp
new file mode 100644
index 0000000000..b41d34b141
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawOpAtlas.cpp
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrDrawOpAtlas.h"
+
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRectanizer.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrResourceProviderPriv.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTracing.h"
+
+// When proxy allocation is deferred until flush time the proxies acting as atlases require
+// special handling. This is because the usage that can be determined from the ops themselves
+// isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
+// atlases. Extending the usage interval of any op that uses an atlas to the start of the
+// flush (as is done for proxies that are used for sw-generated masks) also won't work because
+// the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
+// must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
+// (which calls this method).
+void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
+ for (uint32_t i = 0; i < fNumActivePages; ++i) {
+ // All the atlas pages are now instantiated at flush time in the activeNewPage method.
+ SkASSERT(fProxies[i] && fProxies[i]->isInstantiated());
+ }
+}
+
+std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
+ const GrBackendFormat& format,
+ GrColorType colorType, int width,
+ int height, int plotWidth, int plotHeight,
+ AllowMultitexturing allowMultitexturing,
+ GrDrawOpAtlas::EvictionFunc func, void* data) {
+ if (!format.isValid()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType, width,
+ height, plotWidth, plotHeight,
+ allowMultitexturing));
+ if (!atlas->getProxies()[0]) {
+ return nullptr;
+ }
+
+ atlas->registerEvictionCallback(func, data);
+ return atlas;
+}
+
+#ifdef DUMP_ATLAS_DATA
+static bool gDumpAtlasData = false;
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY,
+ int width, int height, GrColorType colorType)
+ : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
+ , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
+ , fFlushesSinceLastUse(0)
+ , fPageIndex(pageIndex)
+ , fPlotIndex(plotIndex)
+ , fGenID(genID)
+ , fID(CreateId(fPageIndex, fPlotIndex, fGenID))
+ , fData(nullptr)
+ , fWidth(width)
+ , fHeight(height)
+ , fX(offX)
+ , fY(offY)
+ , fRects(nullptr)
+ , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
+ , fColorType(colorType)
+ , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
+#ifdef SK_DEBUG
+ , fDirty(false)
+#endif
+{
+ // We expect the allocated dimensions to be a multiple of 4 bytes
+ SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
+ // The padding for faster uploads only works for 1, 2 and 4 byte texels
+ SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
+ fDirtyRect.setEmpty();
+}
+
+GrDrawOpAtlas::Plot::~Plot() {
+ sk_free(fData);
+ delete fRects;
+}
+
+bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) {
+ SkASSERT(width <= fWidth && height <= fHeight);
+
+ if (!fRects) {
+ fRects = GrRectanizer::Factory(fWidth, fHeight);
+ }
+
+ if (!fRects->addRect(width, height, loc)) {
+ return false;
+ }
+
+ if (!fData) {
+ fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
+ fHeight));
+ }
+ size_t rowBytes = width * fBytesPerPixel;
+ const unsigned char* imagePtr = (const unsigned char*)image;
+ // point ourselves at the right starting spot
+ unsigned char* dataPtr = fData;
+ dataPtr += fBytesPerPixel * fWidth * loc->fY;
+ dataPtr += fBytesPerPixel * loc->fX;
+ // copy into the data buffer, swizzling as we go if this is ARGB data
+ if (4 == fBytesPerPixel && kSkia8888_GrPixelConfig == kBGRA_8888_GrPixelConfig) {
+ for (int i = 0; i < height; ++i) {
+ SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
+ dataPtr += fBytesPerPixel * fWidth;
+ imagePtr += rowBytes;
+ }
+ } else {
+ for (int i = 0; i < height; ++i) {
+ memcpy(dataPtr, imagePtr, rowBytes);
+ dataPtr += fBytesPerPixel * fWidth;
+ imagePtr += rowBytes;
+ }
+ }
+
+ fDirtyRect.join({loc->fX, loc->fY, loc->fX + width, loc->fY + height});
+
+ loc->fX += fOffset.fX;
+ loc->fY += fOffset.fY;
+ SkDEBUGCODE(fDirty = true;)
+
+ return true;
+}
+
+void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
+ GrTextureProxy* proxy) {
+ // We should only be issuing uploads if we are in fact dirty
+ SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ size_t rowBytes = fBytesPerPixel * fWidth;
+ const unsigned char* dataPtr = fData;
+ // Clamp to 4-byte aligned boundaries
+ unsigned int clearBits = 0x3 / fBytesPerPixel;
+ fDirtyRect.fLeft &= ~clearBits;
+ fDirtyRect.fRight += clearBits;
+ fDirtyRect.fRight &= ~clearBits;
+ SkASSERT(fDirtyRect.fRight <= fWidth);
+ // Set up dataPtr
+ dataPtr += rowBytes * fDirtyRect.fTop;
+ dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
+
+ writePixels(proxy, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(), fColorType, dataPtr, rowBytes);
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+}
+
+void GrDrawOpAtlas::Plot::resetRects() {
+ if (fRects) {
+ fRects->reset();
+ }
+
+ fGenID++;
+ fID = CreateId(fPageIndex, fPlotIndex, fGenID);
+ fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
+ fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
+
+ // zero out the plot
+ if (fData) {
+ sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
+ }
+
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format,
+ GrColorType colorType, int width, int height,
+ int plotWidth, int plotHeight, AllowMultitexturing allowMultitexturing)
+ : fFormat(format)
+ , fColorType(colorType)
+ , fTextureWidth(width)
+ , fTextureHeight(height)
+ , fPlotWidth(plotWidth)
+ , fPlotHeight(plotHeight)
+ , fAtlasGeneration(kInvalidAtlasGeneration + 1)
+ , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
+ , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
+ , fNumActivePages(0) {
+ int numPlotsX = width/plotWidth;
+ int numPlotsY = height/plotHeight;
+ SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
+ SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
+ SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
+
+ fNumPlots = numPlotsX * numPlotsY;
+
+ this->createPages(proxyProvider);
+}
+
+inline void GrDrawOpAtlas::processEviction(AtlasID id) {
+ for (int i = 0; i < fEvictionCallbacks.count(); i++) {
+ (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
+ }
+ ++fAtlasGeneration;
+}
+
+inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target, AtlasID* id, Plot* plot) {
+ int pageIdx = GetPageIndexFromID(plot->id());
+ this->makeMRU(plot, pageIdx);
+
+ // If our most recent upload has already occurred then we have to insert a new
+ // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
+ // This new update will piggy back on that previously scheduled update.
+ if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
+ // With c+14 we could move sk_sp into lamba to only ref once.
+ sk_sp<Plot> plotsp(SkRef(plot));
+
+ GrTextureProxy* proxy = fProxies[pageIdx].get();
+ SkASSERT(proxy->isInstantiated()); // This is occurring at flush time
+
+ GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
+ [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, proxy);
+ });
+ plot->setLastUploadToken(lastUploadToken);
+ }
+ *id = plot->id();
+ return true;
+}
+
+bool GrDrawOpAtlas::uploadToPage(const GrCaps& caps, unsigned int pageIdx, AtlasID* id,
+ GrDeferredUploadTarget* target, int width, int height,
+ const void* image, SkIPoint16* loc) {
+ SkASSERT(fProxies[pageIdx] && fProxies[pageIdx]->isInstantiated());
+
+ // look through all allocated plots for one we can share, in Most Recently Refed order
+ PlotList::Iter plotIter;
+ plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
+
+ for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
+ SkASSERT(caps.bytesPerPixel(fProxies[pageIdx]->backendFormat()) == plot->bpp());
+
+ if (plot->addSubImage(width, height, image, loc)) {
+ return this->updatePlot(target, id, plot);
+ }
+ }
+
+ return false;
+}
+
+// Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
+//
+// This value is somewhat arbitrary -- the idea is to keep it low enough that
+// a page with unused plots will get removed reasonably quickly, but allow it
+// to hang around for a bit in case it's needed. The assumption is that flushes
+// are rare; i.e., we are not continually refreshing the frame.
+static constexpr auto kRecentlyUsedCount = 256;
+
+GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
+ AtlasID* id, GrDeferredUploadTarget* target,
+ int width, int height,
+ const void* image, SkIPoint16* loc) {
+ if (width > fPlotWidth || height > fPlotHeight) {
+ return ErrorCode::kError;
+ }
+
+ const GrCaps& caps = *resourceProvider->caps();
+
+ // Look through each page to see if we can upload without having to flush
+ // We prioritize this upload to the first pages, not the most recently used, to make it easier
+ // to remove unused pages in reverse page order.
+ for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
+ if (this->uploadToPage(caps, pageIdx, id, target, width, height, image, loc)) {
+ return ErrorCode::kSucceeded;
+ }
+ }
+
+ // If the above fails, then see if the least recently used plot per page has already been
+ // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
+ // We wait until we've grown to the full number of pages to begin evicting already flushed
+ // plots so that we can maximize the opportunity for reuse.
+ // As before we prioritize this upload to the first pages, not the most recently used.
+ if (fNumActivePages == this->maxPages()) {
+ for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
+ Plot* plot = fPages[pageIdx].fPlotList.tail();
+ SkASSERT(plot);
+ if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
+ this->processEvictionAndResetRects(plot);
+ SkASSERT(caps.bytesPerPixel(fProxies[pageIdx]->backendFormat()) == plot->bpp());
+ SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
+ SkASSERT(verify);
+ if (!this->updatePlot(target, id, plot)) {
+ return ErrorCode::kError;
+ }
+ return ErrorCode::kSucceeded;
+ }
+ }
+ } else {
+ // If we haven't activated all the available pages, try to create a new one and add to it
+ if (!this->activateNewPage(resourceProvider)) {
+ return ErrorCode::kError;
+ }
+
+ if (this->uploadToPage(caps, fNumActivePages-1, id, target, width, height, image, loc)) {
+ return ErrorCode::kSucceeded;
+ } else {
+ // If we fail to upload to a newly activated page then something has gone terribly
+ // wrong - return an error
+ return ErrorCode::kError;
+ }
+ }
+
+ if (!fNumActivePages) {
+ return ErrorCode::kError;
+ }
+
+ // Try to find a plot that we can perform an inline upload to.
+ // We prioritize this upload in reverse order of pages to counterbalance the order above.
+ Plot* plot = nullptr;
+ for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
+ Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
+ if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
+ plot = currentPlot;
+ break;
+ }
+ }
+
+ // If we can't find a plot that is not used in a draw currently being prepared by an op, then
+ // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
+ // function. When that draw is enqueued, the draw token advances, and the subsequent call will
+ // continue past this branch and prepare an inline upload that will occur after the enqueued
+ // draw which references the plot's pre-upload content.
+ if (!plot) {
+ return ErrorCode::kTryAgain;
+ }
+
+ this->processEviction(plot->id());
+ int pageIdx = GetPageIndexFromID(plot->id());
+ fPages[pageIdx].fPlotList.remove(plot);
+ sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->index()];
+ newPlot.reset(plot->clone());
+
+ fPages[pageIdx].fPlotList.addToHead(newPlot.get());
+ SkASSERT(caps.bytesPerPixel(fProxies[pageIdx]->backendFormat()) == newPlot->bpp());
+ SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
+ SkASSERT(verify);
+
+ // Note that this plot will be uploaded inline with the draws whereas the
+ // one it displaced most likely was uploaded ASAP.
+ // With c+14 we could move sk_sp into lambda to only ref once.
+ sk_sp<Plot> plotsp(SkRef(newPlot.get()));
+
+ GrTextureProxy* proxy = fProxies[pageIdx].get();
+ SkASSERT(proxy->isInstantiated());
+
+ GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
+ [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, proxy);
+ });
+ newPlot->setLastUploadToken(lastUploadToken);
+
+ *id = newPlot->id();
+
+ return ErrorCode::kSucceeded;
+}
+
+void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
+ if (fNumActivePages <= 1) {
+ fPrevFlushToken = startTokenForNextFlush;
+ return;
+ }
+
+ // For all plots, reset number of flushes since used if used this frame.
+ PlotList::Iter plotIter;
+ bool atlasUsedThisFlush = false;
+ for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
+ plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ while (Plot* plot = plotIter.get()) {
+ // Reset number of flushes since used
+ if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
+ plot->resetFlushesSinceLastUsed();
+ atlasUsedThisFlush = true;
+ }
+
+ plotIter.next();
+ }
+ }
+
+ // We only try to compact if the atlas was used in the recently completed flush.
+ // This is to handle the case where a lot of text or path rendering has occurred but then just
+ // a blinking cursor is drawn.
+ // TODO: consider if we should also do this if it's been a long time since the last atlas use
+ if (atlasUsedThisFlush) {
+ SkTArray<Plot*> availablePlots;
+ uint32_t lastPageIndex = fNumActivePages - 1;
+
+ // For all plots but the last one, update number of flushes since used, and check to see
+ // if there are any in the first pages that the last page can safely upload to.
+ for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("page %d: ", pageIndex);
+ }
+#endif
+ plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ while (Plot* plot = plotIter.get()) {
+ // Update number of flushes since plot was last used
+ // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
+ // to avoid deleting everything when we return to text drawing in the blinking
+ // cursor case
+ if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
+ plot->incFlushesSinceLastUsed();
+ }
+
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("%d ", plot->flushesSinceLastUsed());
+ }
+#endif
+ // Count plots we can potentially upload to in all pages except the last one
+ // (the potential compactee).
+ if (plot->flushesSinceLastUsed() > kRecentlyUsedCount) {
+ availablePlots.push_back() = plot;
+ }
+
+ plotIter.next();
+ }
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("\n");
+ }
+#endif
+ }
+
+ // Count recently used plots in the last page and evict any that are no longer in use.
+ // Since we prioritize uploading to the first pages, this will eventually
+ // clear out usage of this page unless we have a large need.
+ plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ unsigned int usedPlots = 0;
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("page %d: ", lastPageIndex);
+ }
+#endif
+ while (Plot* plot = plotIter.get()) {
+ // Update number of flushes since plot was last used
+ if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
+ plot->incFlushesSinceLastUsed();
+ }
+
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("%d ", plot->flushesSinceLastUsed());
+ }
+#endif
+ // If this plot was used recently
+ if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) {
+ usedPlots++;
+ } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
+ // otherwise if aged out just evict it.
+ this->processEvictionAndResetRects(plot);
+ }
+ plotIter.next();
+ }
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("\n");
+ }
+#endif
+
+ // If recently used plots in the last page are using less than a quarter of the page, try
+ // to evict them if there's available space in earlier pages. Since we prioritize uploading
+ // to the first pages, this will eventually clear out usage of this page unless we have a
+ // large need.
+ if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
+ plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ while (Plot* plot = plotIter.get()) {
+ // If this plot was used recently
+ if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) {
+ // See if there's room in an earlier page and if so evict.
+ // We need to be somewhat harsh here so that a handful of plots that are
+ // consistently in use don't end up locking the page in memory.
+ if (availablePlots.count() > 0) {
+ this->processEvictionAndResetRects(plot);
+ this->processEvictionAndResetRects(availablePlots.back());
+ availablePlots.pop_back();
+ --usedPlots;
+ }
+ if (!usedPlots || !availablePlots.count()) {
+ break;
+ }
+ }
+ plotIter.next();
+ }
+ }
+
+ // If none of the plots in the last page have been used recently, delete it.
+ if (!usedPlots) {
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("delete %d\n", fNumPages-1);
+ }
+#endif
+ this->deactivateLastPage();
+ }
+ }
+
+ fPrevFlushToken = startTokenForNextFlush;
+}
+
+bool GrDrawOpAtlas::createPages(GrProxyProvider* proxyProvider) {
+ SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
+
+ GrSurfaceDesc desc;
+ desc.fWidth = fTextureWidth;
+ desc.fHeight = fTextureHeight;
+ desc.fConfig = GrColorTypeToPixelConfig(fColorType);
+
+ int numPlotsX = fTextureWidth/fPlotWidth;
+ int numPlotsY = fTextureHeight/fPlotHeight;
+
+ for (uint32_t i = 0; i < this->maxPages(); ++i) {
+ fProxies[i] = proxyProvider->createProxy(
+ fFormat, desc, GrRenderable::kNo, 1, kTopLeft_GrSurfaceOrigin, GrMipMapped::kNo,
+ SkBackingFit::kExact, SkBudgeted::kYes, GrProtected::kNo,
+ GrInternalSurfaceFlags::kNone, GrSurfaceProxy::UseAllocator::kNo);
+ if (!fProxies[i]) {
+ return false;
+ }
+
+ // set up allocated plots
+ fPages[i].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]);
+
+ sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
+ for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
+ for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
+ uint32_t plotIndex = r * numPlotsX + c;
+ currPlot->reset(new Plot(i, plotIndex, 1, x, y, fPlotWidth, fPlotHeight,
+ fColorType));
+
+ // build LRU list
+ fPages[i].fPlotList.addToHead(currPlot->get());
+ ++currPlot;
+ }
+ }
+
+ }
+
+ return true;
+}
+
+
+bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
+ SkASSERT(fNumActivePages < this->maxPages());
+
+ if (!fProxies[fNumActivePages]->instantiate(resourceProvider)) {
+ return false;
+ }
+
+#ifdef DUMP_ATLAS_DATA
+ if (gDumpAtlasData) {
+ SkDebugf("activated page#: %d\n", fNumActivePages);
+ }
+#endif
+
+ ++fNumActivePages;
+ return true;
+}
+
+
+inline void GrDrawOpAtlas::deactivateLastPage() {
+ SkASSERT(fNumActivePages);
+
+ uint32_t lastPageIndex = fNumActivePages - 1;
+
+ int numPlotsX = fTextureWidth/fPlotWidth;
+ int numPlotsY = fTextureHeight/fPlotHeight;
+
+ fPages[lastPageIndex].fPlotList.reset();
+ for (int r = 0; r < numPlotsY; ++r) {
+ for (int c = 0; c < numPlotsX; ++c) {
+ uint32_t plotIndex = r * numPlotsX + c;
+
+ Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
+ currPlot->resetRects();
+ currPlot->resetFlushesSinceLastUsed();
+
+ // rebuild the LRU list
+ SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
+ SkDEBUGCODE(currPlot->fList = nullptr);
+ fPages[lastPageIndex].fPlotList.addToHead(currPlot);
+ }
+ }
+
+ // remove ref to the backing texture
+ fProxies[lastPageIndex]->deinstantiate();
+ --fNumActivePages;
+}
+
+GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
+ static const SkISize kARGBDimensions[] = {
+ {256, 256}, // maxBytes < 2^19
+ {512, 256}, // 2^19 <= maxBytes < 2^20
+ {512, 512}, // 2^20 <= maxBytes < 2^21
+ {1024, 512}, // 2^21 <= maxBytes < 2^22
+ {1024, 1024}, // 2^22 <= maxBytes < 2^23
+ {2048, 1024}, // 2^23 <= maxBytes
+ };
+
+ // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
+ maxBytes >>= 18;
+ // Take the floor of the log to get the index
+ int index = maxBytes > 0
+ ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
+ : 0;
+
+ SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
+ SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
+ fARGBDimensions.set(SkTMin<int>(kARGBDimensions[index].width(), maxTextureSize),
+ SkTMin<int>(kARGBDimensions[index].height(), maxTextureSize));
+ fMaxTextureSize = SkTMin<int>(maxTextureSize, kMaxAtlasDim);
+}
+
+SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
+ if (kA8_GrMaskFormat == type) {
+ // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
+ return { SkTMin<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
+ SkTMin<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
+ } else {
+ return fARGBDimensions;
+ }
+}
+
+SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
+ if (kA8_GrMaskFormat == type) {
+ SkISize atlasDimensions = this->atlasDimensions(type);
+ // For A8 we want to grow the plots at larger texture sizes to accept more of the
+ // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
+ // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
+
+ // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
+ // and 256x256 plots otherwise.
+ int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
+ int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
+
+ return { plotWidth, plotHeight };
+ } else {
+ // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
+ return { 256, 256 };
+ }
+}
+
+constexpr int GrDrawOpAtlasConfig::kMaxAtlasDim;
diff --git a/gfx/skia/skia/src/gpu/GrDrawOpAtlas.h b/gfx/skia/skia/src/gpu/GrDrawOpAtlas.h
new file mode 100644
index 0000000000..5056d68078
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawOpAtlas.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawOpAtlas_DEFINED
+#define GrDrawOpAtlas_DEFINED
+
+#include <cmath>
+
+#include "include/core/SkSize.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkIPoint16.h"
+#include "src/core/SkTInternalLList.h"
+
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrOnFlushResourceProvider;
+class GrRectanizer;
+
+
+/**
+ * This class manages one or more atlas textures on behalf of GrDrawOps. The draw ops that use the
+ * atlas perform texture uploads when preparing their draws during flush. The class provides
+ * facilities for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in
+ * "ASAP" mode until it is impossible to add data without overwriting texels read by draws that
+ * have not yet executed on the gpu. At that point, the atlas will attempt to allocate a new
+ * atlas texture (or "page") of the same size, up to a maximum number of textures, and upload
+ * to that texture. If that's not possible, the uploads are performed "inline" between draws. If a
+ * single draw would use enough subimage space to overflow the atlas texture then the atlas will
+ * fail to add a subimage. This gives the op the chance to end the draw and begin a new one.
+ * Additional uploads will then succeed in inline mode.
+ *
+ * When the atlas has multiple pages, new uploads are prioritized to the lower index pages, i.e.,
+ * it will try to upload to page 0 before page 1 or 2. To keep the atlas from continually using
+ * excess space, periodic garbage collection is needed to shift data from the higher index pages to
+ * the lower ones, and then eventually remove any pages that are no longer in use. "In use" is
+ * determined by using the GrDrawUploadToken system: After a flush each subarea of the page
+ * is checked to see whether it was used in that flush; if it is not, a counter is incremented.
+ * Once that counter reaches a threshold that subarea is considered to be no longer in use.
+ *
+ * Garbage collection is initiated by the GrDrawOpAtlas's client via the compact() method. One
+ * solution is to make the client a subclass of GrOnFlushCallbackObject, register it with the
+ * GrContext via addOnFlushCallbackObject(), and the client's postFlush() method calls compact()
+ * and passes in the given GrDrawUploadToken.
+ */
+class GrDrawOpAtlas {
+private:
+ static constexpr auto kMaxMultitexturePages = 4;
+
+
+public:
+ /** Is the atlas allowed to use more than one texture? */
+ enum class AllowMultitexturing : bool { kNo, kYes };
+
+ static constexpr int kMaxPlots = 32; // restricted by the fPlotAlreadyUpdated bitfield
+ // in BulkUseTokenUpdater
+
+ /**
+ * An AtlasID is an opaque handle which callers can use to determine if the atlas contains
+ * a specific piece of data.
+ */
+ typedef uint64_t AtlasID;
+ static const uint32_t kInvalidAtlasID = 0;
+ static const uint64_t kInvalidAtlasGeneration = 0;
+
+ /**
+ * A function pointer for use as a callback during eviction. Whenever GrDrawOpAtlas evicts a
+ * specific AtlasID, it will call all of the registered listeners so they can process the
+ * eviction.
+ */
+ typedef void (*EvictionFunc)(GrDrawOpAtlas::AtlasID, void*);
+
+ /**
+ * Returns a GrDrawOpAtlas. This function can be called anywhere, but the returned atlas
+ * should only be used inside of GrMeshDrawOp::onPrepareDraws.
+ * @param GrColorType The colorType which this atlas will store
+ * @param width width in pixels of the atlas
+ * @param height height in pixels of the atlas
+ * @param numPlotsX The number of plots the atlas should be broken up into in the X
+ * direction
+ * @param numPlotsY The number of plots the atlas should be broken up into in the Y
+ * direction
+ * @param allowMultitexturing Can the atlas use more than one texture.
+ * @param func An eviction function which will be called whenever the atlas has to
+ * evict data
+ * @param data User supplied data which will be passed into func whenever an
+ * eviction occurs
+ * @return An initialized GrDrawOpAtlas, or nullptr if creation fails
+ */
+ static std::unique_ptr<GrDrawOpAtlas> Make(GrProxyProvider*,
+ const GrBackendFormat& format,
+ GrColorType,
+ int width, int height,
+ int plotWidth, int plotHeight,
+ AllowMultitexturing allowMultitexturing,
+ GrDrawOpAtlas::EvictionFunc func, void* data);
+
+ /**
+ * Adds a width x height subimage to the atlas. Upon success it returns 'kSucceeded' and returns
+ * the ID and the subimage's coordinates in the backing texture. 'kTryAgain' is returned if
+ * the subimage cannot fit in the atlas without overwriting texels that will be read in the
+ * current draw. This indicates that the op should end its current draw and begin another
+ * before adding more data. Upon success, an upload of the provided image data will have
+ * been added to the GrDrawOp::Target, in "asap" mode if possible, otherwise in "inline" mode.
+ * Successive uploads in either mode may be consolidated.
+ * 'kError' will be returned when some unrecoverable error was encountered while trying to
+ * add the subimage. In this case the op being created should be discarded.
+ *
+ * NOTE: When the GrDrawOp prepares a draw that reads from the atlas, it must immediately call
+ * 'setUseToken' with the currentToken from the GrDrawOp::Target, otherwise the next call to
+ * addToAtlas might cause the previous data to be overwritten before it has been read.
+ */
+
+ enum class ErrorCode {
+ kError,
+ kSucceeded,
+ kTryAgain
+ };
+
+ ErrorCode addToAtlas(GrResourceProvider*, AtlasID*, GrDeferredUploadTarget*,
+ int width, int height,
+ const void* image, SkIPoint16* loc);
+
+ const sk_sp<GrTextureProxy>* getProxies() const { return fProxies; }
+
+ uint64_t atlasGeneration() const { return fAtlasGeneration; }
+
+ inline bool hasID(AtlasID id) {
+ if (kInvalidAtlasID == id) {
+ return false;
+ }
+ uint32_t plot = GetPlotIndexFromID(id);
+ SkASSERT(plot < fNumPlots);
+ uint32_t page = GetPageIndexFromID(id);
+ SkASSERT(page < fNumActivePages);
+ return fPages[page].fPlotArray[plot]->genID() == GetGenerationFromID(id);
+ }
+
+ /** To ensure the atlas does not evict a given entry, the client must set the last use token. */
+ inline void setLastUseToken(AtlasID id, GrDeferredUploadToken token) {
+ SkASSERT(this->hasID(id));
+ uint32_t plotIdx = GetPlotIndexFromID(id);
+ SkASSERT(plotIdx < fNumPlots);
+ uint32_t pageIdx = GetPageIndexFromID(id);
+ SkASSERT(pageIdx < fNumActivePages);
+ Plot* plot = fPages[pageIdx].fPlotArray[plotIdx].get();
+ this->makeMRU(plot, pageIdx);
+ plot->setLastUseToken(token);
+ }
+
+ inline void registerEvictionCallback(EvictionFunc func, void* userData) {
+ EvictionData* data = fEvictionCallbacks.append();
+ data->fFunc = func;
+ data->fData = userData;
+ }
+
+ uint32_t numActivePages() { return fNumActivePages; }
+
+ /**
+ * A class which can be handed back to GrDrawOpAtlas for updating last use tokens in bulk. The
+ * current max number of plots per page the GrDrawOpAtlas can handle is 32. If in the future
+ * this is insufficient then we can move to a 64 bit int.
+ */
+ class BulkUseTokenUpdater {
+ public:
+ BulkUseTokenUpdater() {
+ memset(fPlotAlreadyUpdated, 0, sizeof(fPlotAlreadyUpdated));
+ }
+ BulkUseTokenUpdater(const BulkUseTokenUpdater& that)
+ : fPlotsToUpdate(that.fPlotsToUpdate) {
+ memcpy(fPlotAlreadyUpdated, that.fPlotAlreadyUpdated, sizeof(fPlotAlreadyUpdated));
+ }
+
+ bool add(AtlasID id) {
+ int index = GrDrawOpAtlas::GetPlotIndexFromID(id);
+ int pageIdx = GrDrawOpAtlas::GetPageIndexFromID(id);
+ if (this->find(pageIdx, index)) {
+ return false;
+ }
+ this->set(pageIdx, index);
+ return true;
+ }
+
+ void reset() {
+ fPlotsToUpdate.reset();
+ memset(fPlotAlreadyUpdated, 0, sizeof(fPlotAlreadyUpdated));
+ }
+
+ struct PlotData {
+ PlotData(int pageIdx, int plotIdx) : fPageIndex(pageIdx), fPlotIndex(plotIdx) {}
+ uint32_t fPageIndex;
+ uint32_t fPlotIndex;
+ };
+
+ private:
+ bool find(int pageIdx, int index) const {
+ SkASSERT(index < kMaxPlots);
+ return (fPlotAlreadyUpdated[pageIdx] >> index) & 1;
+ }
+
+ void set(int pageIdx, int index) {
+ SkASSERT(!this->find(pageIdx, index));
+ fPlotAlreadyUpdated[pageIdx] |= (1 << index);
+ fPlotsToUpdate.push_back(PlotData(pageIdx, index));
+ }
+
+ static constexpr int kMinItems = 4;
+ SkSTArray<kMinItems, PlotData, true> fPlotsToUpdate;
+ uint32_t fPlotAlreadyUpdated[kMaxMultitexturePages]; // TODO: increase this to uint64_t
+ // to allow more plots per page
+
+ friend class GrDrawOpAtlas;
+ };
+
+ void setLastUseTokenBulk(const BulkUseTokenUpdater& updater, GrDeferredUploadToken token) {
+ int count = updater.fPlotsToUpdate.count();
+ for (int i = 0; i < count; i++) {
+ const BulkUseTokenUpdater::PlotData& pd = updater.fPlotsToUpdate[i];
+ // it's possible we've added a plot to the updater and subsequently the plot's page
+ // was deleted -- so we check to prevent a crash
+ if (pd.fPageIndex < fNumActivePages) {
+ Plot* plot = fPages[pd.fPageIndex].fPlotArray[pd.fPlotIndex].get();
+ this->makeMRU(plot, pd.fPageIndex);
+ plot->setLastUseToken(token);
+ }
+ }
+ }
+
+ void compact(GrDeferredUploadToken startTokenForNextFlush);
+
+ static uint32_t GetPageIndexFromID(AtlasID id) {
+ return id & 0xff;
+ }
+
+ void instantiate(GrOnFlushResourceProvider*);
+
+ uint32_t maxPages() const {
+ return fMaxPages;
+ }
+
+ int numAllocated_TestingOnly() const;
+ void setMaxPages_TestingOnly(uint32_t maxPages);
+
+private:
+ GrDrawOpAtlas(GrProxyProvider*, const GrBackendFormat& format, GrColorType, int width,
+ int height, int plotWidth, int plotHeight,
+ AllowMultitexturing allowMultitexturing);
+
+ /**
+ * The backing GrTexture for a GrDrawOpAtlas is broken into a spatial grid of Plots. The Plots
+ * keep track of subimage placement via their GrRectanizer. A Plot manages the lifetime of its
+ * data using two tokens, a last use token and a last upload token. Once a Plot is "full" (i.e.
+ * there is no room for the new subimage according to the GrRectanizer), it can no longer be
+ * used unless the last use of the Plot has already been flushed through to the gpu.
+ */
+ class Plot : public SkRefCnt {
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Plot);
+
+ public:
+ /** index() is a unique id for the plot relative to the owning GrAtlas and page. */
+ uint32_t index() const { return fPlotIndex; }
+ /**
+ * genID() is incremented when the plot is evicted due to a atlas spill. It is used to know
+ * if a particular subimage is still present in the atlas.
+ */
+ uint64_t genID() const { return fGenID; }
+ GrDrawOpAtlas::AtlasID id() const {
+ SkASSERT(GrDrawOpAtlas::kInvalidAtlasID != fID);
+ return fID;
+ }
+ SkDEBUGCODE(size_t bpp() const { return fBytesPerPixel; })
+
+ bool addSubImage(int width, int height, const void* image, SkIPoint16* loc);
+
+ /**
+ * To manage the lifetime of a plot, we use two tokens. We use the last upload token to
+ * know when we can 'piggy back' uploads, i.e. if the last upload hasn't been flushed to
+ * the gpu, we don't need to issue a new upload even if we update the cpu backing store. We
+ * use lastUse to determine when we can evict a plot from the cache, i.e. if the last use
+ * has already flushed through the gpu then we can reuse the plot.
+ */
+ GrDeferredUploadToken lastUploadToken() const { return fLastUpload; }
+ GrDeferredUploadToken lastUseToken() const { return fLastUse; }
+ void setLastUploadToken(GrDeferredUploadToken token) { fLastUpload = token; }
+ void setLastUseToken(GrDeferredUploadToken token) { fLastUse = token; }
+
+ void uploadToTexture(GrDeferredTextureUploadWritePixelsFn&, GrTextureProxy*);
+ void resetRects();
+
+ int flushesSinceLastUsed() { return fFlushesSinceLastUse; }
+ void resetFlushesSinceLastUsed() { fFlushesSinceLastUse = 0; }
+ void incFlushesSinceLastUsed() { fFlushesSinceLastUse++; }
+
+ private:
+ Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY,
+ int width, int height, GrColorType colorType);
+
+ ~Plot() override;
+
+ /**
+ * Create a clone of this plot. The cloned plot will take the place of the current plot in
+ * the atlas
+ */
+ Plot* clone() const {
+ return new Plot(fPageIndex, fPlotIndex, fGenID + 1, fX, fY, fWidth, fHeight,
+ fColorType);
+ }
+
+ static GrDrawOpAtlas::AtlasID CreateId(uint32_t pageIdx, uint32_t plotIdx,
+ uint64_t generation) {
+ SkASSERT(pageIdx < (1 << 8));
+ SkASSERT(pageIdx < kMaxMultitexturePages);
+ SkASSERT(plotIdx < (1 << 8));
+ SkASSERT(generation < ((uint64_t)1 << 48));
+ return generation << 16 | plotIdx << 8 | pageIdx;
+ }
+
+ GrDeferredUploadToken fLastUpload;
+ GrDeferredUploadToken fLastUse;
+ // the number of flushes since this plot has been last used
+ int fFlushesSinceLastUse;
+
+ struct {
+ const uint32_t fPageIndex : 16;
+ const uint32_t fPlotIndex : 16;
+ };
+ uint64_t fGenID;
+ GrDrawOpAtlas::AtlasID fID;
+ unsigned char* fData;
+ const int fWidth;
+ const int fHeight;
+ const int fX;
+ const int fY;
+ GrRectanizer* fRects;
+ const SkIPoint16 fOffset; // the offset of the plot in the backing texture
+ const GrColorType fColorType;
+ const size_t fBytesPerPixel;
+ SkIRect fDirtyRect;
+ SkDEBUGCODE(bool fDirty);
+
+ friend class GrDrawOpAtlas;
+
+ typedef SkRefCnt INHERITED;
+ };
+
+ typedef SkTInternalLList<Plot> PlotList;
+
+ static uint32_t GetPlotIndexFromID(AtlasID id) {
+ return (id >> 8) & 0xff;
+ }
+
+ // top 48 bits are reserved for the generation ID
+ static uint64_t GetGenerationFromID(AtlasID id) {
+ return (id >> 16) & 0xffffffffffff;
+ }
+
+ inline bool updatePlot(GrDeferredUploadTarget*, AtlasID*, Plot*);
+
+ inline void makeMRU(Plot* plot, int pageIdx) {
+ if (fPages[pageIdx].fPlotList.head() == plot) {
+ return;
+ }
+
+ fPages[pageIdx].fPlotList.remove(plot);
+ fPages[pageIdx].fPlotList.addToHead(plot);
+
+ // No MRU update for pages -- since we will always try to add from
+ // the front and remove from the back there is no need for MRU.
+ }
+
+ bool uploadToPage(const GrCaps&, unsigned int pageIdx, AtlasID* id,
+ GrDeferredUploadTarget* target, int width, int height, const void* image,
+ SkIPoint16* loc);
+
+ bool createPages(GrProxyProvider*);
+ bool activateNewPage(GrResourceProvider*);
+ void deactivateLastPage();
+
+ void processEviction(AtlasID);
+ inline void processEvictionAndResetRects(Plot* plot) {
+ this->processEviction(plot->id());
+ plot->resetRects();
+ }
+
+ GrBackendFormat fFormat;
+ GrColorType fColorType;
+ int fTextureWidth;
+ int fTextureHeight;
+ int fPlotWidth;
+ int fPlotHeight;
+ unsigned int fNumPlots;
+
+ uint64_t fAtlasGeneration;
+ // nextTokenToFlush() value at the end of the previous flush
+ GrDeferredUploadToken fPrevFlushToken;
+
+ struct EvictionData {
+ EvictionFunc fFunc;
+ void* fData;
+ };
+
+ SkTDArray<EvictionData> fEvictionCallbacks;
+
+ struct Page {
+ // allocated array of Plots
+ std::unique_ptr<sk_sp<Plot>[]> fPlotArray;
+ // LRU list of Plots (MRU at head - LRU at tail)
+ PlotList fPlotList;
+ };
+ // proxies kept separate to make it easier to pass them up to client
+ sk_sp<GrTextureProxy> fProxies[kMaxMultitexturePages];
+ Page fPages[kMaxMultitexturePages];
+ uint32_t fMaxPages;
+
+ uint32_t fNumActivePages;
+};
+
+// There are three atlases (A8, 565, ARGB) that are kept in relation with one another. In
+// general, the A8 dimensions are 2x the 565 and ARGB dimensions with the constraint that an atlas
+// size will always contain at least one plot. Since the ARGB atlas takes the most space, its
+// dimensions are used to size the other two atlases.
+class GrDrawOpAtlasConfig {
+public:
+ // The capabilities of the GPU define maxTextureSize. The client provides maxBytes, and this
+ // represents the largest they want a single atlas texture to be. Due to multitexturing, we
+ // may expand temporarily to use more space as needed.
+ GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes);
+
+ // For testing only - make minimum sized atlases -- a single plot for ARGB, four for A8
+ GrDrawOpAtlasConfig() : GrDrawOpAtlasConfig(kMaxAtlasDim, 0) {}
+
+ SkISize atlasDimensions(GrMaskFormat type) const;
+ SkISize plotDimensions(GrMaskFormat type) const;
+
+private:
+ // On some systems texture coordinates are represented using half-precision floating point,
+ // which limits the largest atlas dimensions to 2048x2048.
+ // For simplicity we'll use this constraint for all of our atlas textures.
+ // This can be revisited later if we need larger atlases.
+ static constexpr int kMaxAtlasDim = 2048;
+
+ SkISize fARGBDimensions;
+ int fMaxTextureSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawOpTest.cpp b/gfx/skia/skia/src/gpu/GrDrawOpTest.cpp
new file mode 100644
index 0000000000..5b93c3fa4a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawOpTest.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/GrContext_Base.h"
+#include "include/utils/SkRandom.h"
+#include "src/gpu/GrBaseContextPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrUserStencilSettings.h"
+
+#if GR_TEST_UTILS
+
+const GrUserStencilSettings* GrGetRandomStencil(SkRandom* random, GrContext_Base* context) {
+ if (context->priv().caps()->avoidStencilBuffers()) {
+ return &GrUserStencilSettings::kUnused;
+ }
+ static constexpr GrUserStencilSettings kReads(
+ GrUserStencilSettings::StaticInit<
+ 0x8080,
+ GrUserStencilTest::kLess,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+ );
+ static constexpr GrUserStencilSettings kWrites(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>()
+ );
+ static constexpr GrUserStencilSettings kReadsAndWrites(
+ GrUserStencilSettings::StaticInit<
+ 0x8000,
+ GrUserStencilTest::kEqual,
+ 0x6000,
+ GrUserStencilOp::kIncWrap,
+ GrUserStencilOp::kInvert,
+ 0x77ff>()
+ );
+
+ static const GrUserStencilSettings* kStencilSettings[] = {
+ &GrUserStencilSettings::kUnused,
+ &kReads,
+ &kWrites,
+ &kReadsAndWrites,
+ };
+ return kStencilSettings[random->nextULessThan(SK_ARRAY_COUNT(kStencilSettings))];
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawOpTest.h b/gfx/skia/skia/src/gpu/GrDrawOpTest.h
new file mode 100644
index 0000000000..fe8047cbb2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawOpTest.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawOpTest_DEFINED
+#define GrDrawOpTest_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrTestUtils.h"
+
+#if GR_TEST_UTILS
+
+class GrContext_Base;
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+class GrRenderTargetContext;
+struct GrUserStencilSettings;
+class SkRandom;
+
+/** This function draws a randomly configured GrDrawOp for testing purposes. */
+void GrDrawRandomOp(SkRandom*, GrRenderTargetContext*, GrPaint&&);
+
+/** GrDrawOp subclasses should define test factory functions using this macro. */
+#define GR_DRAW_OP_TEST_DEFINE(Op) \
+ std::unique_ptr<GrDrawOp> Op##__Test(GrPaint&& paint, SkRandom* random, \
+ GrRecordingContext* context, int numSamples)
+#define GR_DRAW_OP_TEST_FRIEND(Op) \
+ friend std::unique_ptr<GrDrawOp> Op##__Test(GrPaint&& paint, SkRandom* random, \
+ GrRecordingContext* context, int numSamples)
+
+/** Helper for op test factories to pick a random stencil state. */
+const GrUserStencilSettings* GrGetRandomStencil(SkRandom* random, GrContext_Base*);
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawingManager.cpp b/gfx/skia/skia/src/gpu/GrDrawingManager.cpp
new file mode 100644
index 0000000000..b651700886
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawingManager.cpp
@@ -0,0 +1,939 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrDrawingManager.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include "src/core/SkTTopoSort.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrClientMappedBufferManager.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrCopyRenderTask.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrRenderTask.h"
+#include "src/gpu/GrResourceAllocator.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSoftwarePathRenderer.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/GrTextureResolveRenderTask.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/GrTransferFromRenderTask.h"
+#include "src/gpu/GrWaitRenderTask.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include "src/gpu/text/GrTextContext.h"
+#include "src/image/SkSurface_Gpu.h"
+
+GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks)
+ : fSortRenderTasks(sortRenderTasks) {}
+
+GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {}
+
+void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
+ idArray->reset(fRenderTasks.count());
+ for (int i = 0; i < fRenderTasks.count(); ++i) {
+ if (fRenderTasks[i]) {
+ (*idArray)[i] = fRenderTasks[i]->uniqueID();
+ }
+ }
+}
+
+void GrDrawingManager::RenderTaskDAG::reset() {
+ fRenderTasks.reset();
+}
+
+void GrDrawingManager::RenderTaskDAG::removeRenderTask(int index) {
+ if (!fRenderTasks[index]->unique()) {
+ // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
+ fRenderTasks[index]->endFlush();
+ }
+
+ fRenderTasks[index] = nullptr;
+}
+
+void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) {
+ for (int i = startIndex; i < stopIndex; ++i) {
+ if (!fRenderTasks[i]) {
+ continue;
+ }
+ this->removeRenderTask(i);
+ }
+}
+
+bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
+ for (int i = 0; i < fRenderTasks.count(); ++i) {
+ if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
+ if (renderTask) {
+ return fRenderTasks.emplace_back(std::move(renderTask)).get();
+ }
+ return nullptr;
+}
+
+GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
+ SkASSERT(!fRenderTasks.empty());
+ if (renderTask) {
+ // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows
+ // and reallocates during emplace_back.
+ fRenderTasks.emplace_back(fRenderTasks.back().release());
+ return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
+ }
+ return nullptr;
+}
+
+void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
+ fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
+}
+
+void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
+ SkASSERT(renderTasks->empty());
+ renderTasks->swap(fRenderTasks);
+}
+
+void GrDrawingManager::RenderTaskDAG::prepForFlush() {
+ if (fSortRenderTasks) {
+ SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(
+ &fRenderTasks);
+ SkASSERT(result);
+ }
+
+#ifdef SK_DEBUG
+ // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
+ // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
+ if (fRenderTasks.count()) {
+ GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
+ for (int i = 1; i < fRenderTasks.count(); ++i) {
+ GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
+
+ if (prevOpsTask && curOpsTask) {
+ SkASSERT(prevOpsTask->fTarget.get() != curOpsTask->fTarget.get());
+ }
+
+ prevOpsTask = curOpsTask;
+ }
+ }
+#endif
+}
+
+void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
+ for (int i = 0; i < fRenderTasks.count(); ++i) {
+ if (fRenderTasks[i]) {
+ fRenderTasks[i]->makeClosed(*caps);
+ }
+ }
+}
+
+void GrDrawingManager::RenderTaskDAG::cleanup(const GrCaps* caps) {
+ for (int i = 0; i < fRenderTasks.count(); ++i) {
+ if (!fRenderTasks[i]) {
+ continue;
+ }
+
+ // no renderTask should receive a dependency
+ fRenderTasks[i]->makeClosed(*caps);
+
+ // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks
+ // after a cleanup.
+ // MDB TODO: is this still true?
+ if (!fRenderTasks[i]->unique()) {
+ // TODO: Eventually this should be guaranteed unique.
+ // https://bugs.chromium.org/p/skia/issues/detail?id=7111
+ fRenderTasks[i]->endFlush();
+ }
+ }
+
+ fRenderTasks.reset();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
+ const GrPathRendererChain::Options& optionsForPathRendererChain,
+ const GrTextContext::Options& optionsForTextContext,
+ bool sortRenderTasks,
+ bool reduceOpsTaskSplitting)
+ : fContext(context)
+ , fOptionsForPathRendererChain(optionsForPathRendererChain)
+ , fOptionsForTextContext(optionsForTextContext)
+ , fDAG(sortRenderTasks)
+ , fTextContext(nullptr)
+ , fPathRendererChain(nullptr)
+ , fSoftwarePathRenderer(nullptr)
+ , fFlushing(false)
+ , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
+}
+
+void GrDrawingManager::cleanup() {
+ fDAG.cleanup(fContext->priv().caps());
+
+ fPathRendererChain = nullptr;
+ fSoftwarePathRenderer = nullptr;
+
+ fOnFlushCBObjects.reset();
+}
+
+GrDrawingManager::~GrDrawingManager() {
+ this->cleanup();
+}
+
+bool GrDrawingManager::wasAbandoned() const {
+ return fContext->priv().abandoned();
+}
+
+void GrDrawingManager::freeGpuResources() {
+ for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
+ if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
+ // it's safe to just do this because we're iterating in reverse
+ fOnFlushCBObjects.removeShuffle(i);
+ }
+ }
+
+ // a path renderer may be holding onto resources
+ fPathRendererChain = nullptr;
+ fSoftwarePathRenderer = nullptr;
+}
+
+// MDB TODO: make use of the 'proxy' parameter.
+GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
+ SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
+ const GrPrepareForExternalIORequests& externalRequests) {
+ SkASSERT(numProxies >= 0);
+ SkASSERT(!numProxies || proxies);
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
+
+ if (fFlushing || this->wasAbandoned()) {
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ SkDEBUGCODE(this->validate());
+
+ if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
+ !externalRequests.hasRequests()) {
+ bool canSkip = numProxies > 0;
+ for (int i = 0; i < numProxies && canSkip; ++i) {
+ canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
+ }
+ if (canSkip) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+ }
+
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
+ }
+ direct->priv().clientMappedBufferManager()->process();
+
+ GrGpu* gpu = direct->priv().getGpu();
+ if (!gpu) {
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
+ }
+
+ fFlushing = true;
+
+ auto resourceProvider = direct->priv().resourceProvider();
+ auto resourceCache = direct->priv().getResourceCache();
+
+ // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
+ // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
+ // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
+ // if the SkGpuDevice(s) write to them again.
+ fDAG.closeAll(fContext->priv().caps());
+ fActiveOpsTask = nullptr;
+
+ fDAG.prepForFlush();
+ if (!fCpuBufferCache) {
+ // We cache more buffers when the backend is using client side arrays. Otherwise, we
+ // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
+ // buffer object. Each pool only requires one staging buffer at a time.
+ int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
+ fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
+ }
+
+ GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
+
+ GrOnFlushResourceProvider onFlushProvider(this);
+ // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
+ // stack here is to preserve the flush tokens.
+
+ // Prepare any onFlush op lists (e.g. atlases).
+ if (!fOnFlushCBObjects.empty()) {
+ fDAG.gatherIDs(&fFlushingRenderTaskIDs);
+
+ for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
+ onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
+ fFlushingRenderTaskIDs.count());
+ }
+ for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
+ onFlushRenderTask->makeClosed(*fContext->priv().caps());
+#ifdef SK_DEBUG
+ // OnFlush callbacks are invoked during flush, and are therefore expected to handle
+ // resource allocation & usage on their own. (No deferred or lazy proxies!)
+ onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
+ [](GrSurfaceProxy* p, GrMipMapped mipMapped) {
+ SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
+ SkASSERT(!p->isLazy());
+ if (p->requiresManualMSAAResolve()) {
+ // The onFlush callback is responsible for ensuring MSAA gets resolved.
+ SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
+ }
+ if (GrMipMapped::kYes == mipMapped) {
+ // The onFlush callback is responsible for regenerating mips if needed.
+ SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipMapsAreDirty());
+ }
+ });
+#endif
+ onFlushRenderTask->prepare(&flushState);
+ }
+ }
+
+#if 0
+ // Enable this to print out verbose GrOp information
+ SkDEBUGCODE(SkDebugf("onFlush renderTasks:"));
+ for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
+ SkDEBUGCODE(onFlushRenderTask->dump();)
+ }
+ SkDEBUGCODE(SkDebugf("Normal renderTasks:"));
+ for (int i = 0; i < fRenderTasks.count(); ++i) {
+ SkDEBUGCODE(fRenderTasks[i]->dump();)
+ }
+#endif
+
+ int startIndex, stopIndex;
+ bool flushed = false;
+
+ {
+ GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks()));
+ for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
+ if (fDAG.renderTask(i)) {
+ fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
+ }
+ alloc.markEndOfOpsTask(i);
+ }
+ alloc.determineRecyclability();
+
+ GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
+ int numRenderTasksExecuted = 0;
+ while (alloc.assign(&startIndex, &stopIndex, &error)) {
+ if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
+ for (int i = startIndex; i < stopIndex; ++i) {
+ GrRenderTask* renderTask = fDAG.renderTask(i);
+ if (!renderTask) {
+ continue;
+ }
+ if (!renderTask->isInstantiated()) {
+ // No need to call the renderTask's handleInternalAllocationFailure
+ // since we will already skip executing the renderTask since it is not
+ // instantiated.
+ continue;
+ }
+ renderTask->handleInternalAllocationFailure();
+ }
+ }
+
+ if (this->executeRenderTasks(
+ startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
+ flushed = true;
+ }
+ }
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
+ // If there are any remaining opsTaskss at this point, make sure they will not survive the
+ // flush. Otherwise we need to call endFlush() on them.
+ // http://skbug.com/7111
+ SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
+ }
+#endif
+ fDAG.reset();
+ this->clearDDLTargets();
+
+#ifdef SK_DEBUG
+ // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
+ // When we move to partial flushes this assert will no longer be valid.
+ // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
+ // will be stored in the DDL's GrOpMemoryPools.
+ GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
+ opMemoryPool->isEmpty();
+#endif
+
+ GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
+ externalRequests);
+
+ // Give the cache a chance to purge resources that become purgeable due to flushing.
+ if (flushed) {
+ resourceCache->purgeAsNeeded();
+ flushed = false;
+ }
+ for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
+ onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
+ fFlushingRenderTaskIDs.count());
+ flushed = true;
+ }
+ if (flushed) {
+ resourceCache->purgeAsNeeded();
+ }
+ fFlushingRenderTaskIDs.reset();
+ fFlushing = false;
+
+ return result;
+}
+
+bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
+ int* numRenderTasksExecuted) {
+ SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
+
+#if GR_FLUSH_TIME_OP_SPEW
+ SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
+ startIndex, stopIndex, 0, fDAG.numRenderTasks());
+ for (int i = startIndex; i < stopIndex; ++i) {
+ if (fDAG.renderTask(i)) {
+ fDAG.renderTask(i)->dump(true);
+ }
+ }
+#endif
+
+ bool anyRenderTasksExecuted = false;
+
+ for (int i = startIndex; i < stopIndex; ++i) {
+ GrRenderTask* renderTask = fDAG.renderTask(i);
+ if (!renderTask || !renderTask->isInstantiated()) {
+ continue;
+ }
+
+ SkASSERT(renderTask->deferredProxiesAreInstantiated());
+
+ renderTask->prepare(flushState);
+ }
+
+ // Upload all data to the GPU
+ flushState->preExecuteDraws();
+
+ // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
+ // for each command buffer associated with the oplists. If this gets too large we can cause the
+ // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
+ // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
+ // memory pressure.
+ static constexpr int kMaxRenderTasksBeforeFlush = 100;
+
+ // Execute the onFlush renderTasks first, if any.
+ for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
+ if (!onFlushRenderTask->execute(flushState)) {
+ SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
+ }
+ SkASSERT(onFlushRenderTask->unique());
+ onFlushRenderTask = nullptr;
+ (*numRenderTasksExecuted)++;
+ if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
+ flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
+ GrFlushInfo(), GrPrepareForExternalIORequests());
+ *numRenderTasksExecuted = 0;
+ }
+ }
+ fOnFlushRenderTasks.reset();
+
+ // Execute the normal op lists.
+ for (int i = startIndex; i < stopIndex; ++i) {
+ GrRenderTask* renderTask = fDAG.renderTask(i);
+ if (!renderTask || !renderTask->isInstantiated()) {
+ continue;
+ }
+
+ if (renderTask->execute(flushState)) {
+ anyRenderTasksExecuted = true;
+ }
+ (*numRenderTasksExecuted)++;
+ if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
+ flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
+ GrFlushInfo(), GrPrepareForExternalIORequests());
+ *numRenderTasksExecuted = 0;
+ }
+ }
+
+ SkASSERT(!flushState->opsRenderPass());
+ SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
+
+ // We reset the flush state before the RenderTasks so that the last resources to be freed are
+ // those that are written to in the RenderTasks. This helps to make sure the most recently used
+ // resources are the last to be purged by the resource cache.
+ flushState->reset();
+
+ fDAG.removeRenderTasks(startIndex, stopIndex);
+
+ return anyRenderTasksExecuted;
+}
+
+GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
+ SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info) {
+ if (this->wasAbandoned()) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+ SkDEBUGCODE(this->validate());
+ SkASSERT(numProxies >= 0);
+ SkASSERT(!numProxies || proxies);
+
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
+ }
+
+ GrGpu* gpu = direct->priv().getGpu();
+ if (!gpu) {
+ return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
+ }
+
+ // TODO: It is important to upgrade the drawingmanager to just flushing the
+ // portion of the DAG required by 'proxies' in order to restore some of the
+ // semantics of this method.
+ GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
+ GrPrepareForExternalIORequests());
+ for (int i = 0; i < numProxies; ++i) {
+ GrSurfaceProxy* proxy = proxies[i];
+ if (!proxy->isInstantiated()) {
+ return result;
+ }
+ // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
+ // because the client will call through to this method when drawing into a target created by
+ // wrapBackendTextureAsRenderTarget, and will expect the original texture to be fully
+ // resolved upon return.
+ if (proxy->requiresManualMSAAResolve()) {
+ auto* rtProxy = proxy->asRenderTargetProxy();
+ SkASSERT(rtProxy);
+ if (rtProxy->isMSAADirty()) {
+ SkASSERT(rtProxy->peekRenderTarget());
+ gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect(),
+ rtProxy->origin(), GrGpu::ForExternalIO::kYes);
+ rtProxy->markMSAAResolved();
+ }
+ }
+ // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
+ // case their backend textures are being stolen.
+ // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
+ // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
+ if (auto* textureProxy = proxy->asTextureProxy()) {
+ if (textureProxy->mipMapsAreDirty()) {
+ SkASSERT(textureProxy->peekTexture());
+ gpu->regenerateMipMapLevels(textureProxy->peekTexture());
+ textureProxy->markMipMapsClean();
+ }
+ }
+ }
+
+ SkDEBUGCODE(this->validate());
+ return result;
+}
+
+void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
+ fOnFlushCBObjects.push_back(onFlushCBObject);
+}
+
+#if GR_TEST_UTILS
+void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
+ int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
+ fOnFlushCBObjects.begin();
+ SkASSERT(n < fOnFlushCBObjects.count());
+ fOnFlushCBObjects.removeShuffle(n);
+}
+#endif
+
+void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
+ SkDEBUGCODE(this->validate());
+
+ // no renderTask should receive a new command after this
+ fDAG.closeAll(fContext->priv().caps());
+ fActiveOpsTask = nullptr;
+
+ fDAG.swap(&ddl->fRenderTasks);
+
+ for (auto renderTask : ddl->fRenderTasks) {
+ renderTask->prePrepare(fContext);
+ }
+
+ if (fPathRendererChain) {
+ if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
+ ddl->fPendingPaths = ccpr->detachPendingPaths();
+ }
+ }
+
+ SkDEBUGCODE(this->validate());
+}
+
+void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
+ GrRenderTargetProxy* newDest) {
+ SkDEBUGCODE(this->validate());
+
+ if (fActiveOpsTask) {
+ // This is a temporary fix for the partial-MDB world. In that world we're not
+ // reordering so ops that (in the single opsTask world) would've just glommed onto the
+ // end of the single opsTask but referred to a far earlier RT need to appear in their
+ // own opsTask.
+ fActiveOpsTask->makeClosed(*fContext->priv().caps());
+ fActiveOpsTask = nullptr;
+ }
+
+ this->addDDLTarget(newDest);
+
+ // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
+ // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
+ ddl->fLazyProxyData->fReplayDest = newDest;
+
+ if (ddl->fPendingPaths.size()) {
+ GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
+
+ ccpr->mergePendingPaths(ddl->fPendingPaths);
+ }
+
+ fDAG.add(ddl->fRenderTasks);
+
+ SkDEBUGCODE(this->validate());
+}
+
+#ifdef SK_DEBUG
+void GrDrawingManager::validate() const {
+ if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
+ SkASSERT(!fActiveOpsTask);
+ } else {
+ if (fActiveOpsTask) {
+ SkASSERT(!fDAG.empty());
+ SkASSERT(!fActiveOpsTask->isClosed());
+ SkASSERT(fActiveOpsTask == fDAG.back());
+ }
+
+ for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
+ if (fActiveOpsTask != fDAG.renderTask(i)) {
+ // The resolveTask associated with the activeTask remains open for as long as the
+ // activeTask does.
+ bool isActiveResolveTask =
+ fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i);
+ SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed());
+ }
+ }
+
+ if (!fDAG.empty() && !fDAG.back()->isClosed()) {
+ SkASSERT(fActiveOpsTask == fDAG.back());
+ }
+ }
+}
+#endif
+
+void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
+ if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
+ // In this case we need to close all the renderTasks that rely on the current contents of
+ // 'target'. That is bc we're going to update the content of the proxy so they need to be
+ // split in case they use both the old and new content. (This is a bit of an overkill: they
+ // really only need to be split if they ever reference proxy's contents again but that is
+ // hard to predict/handle).
+ if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) {
+ lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
+ }
+ } else if (fActiveOpsTask) {
+ // This is a temporary fix for the partial-MDB world. In that world we're not
+ // reordering so ops that (in the single opsTask world) would've just glommed onto the
+ // end of the single opsTask but referred to a far earlier RT need to appear in their
+ // own opsTask.
+ fActiveOpsTask->makeClosed(*fContext->priv().caps());
+ fActiveOpsTask = nullptr;
+ }
+}
+
+sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(sk_sp<GrRenderTargetProxy> rtp, bool managedOpsTask) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(fContext);
+
+ this->closeRenderTasksForNewRenderTask(rtp.get());
+
+ sk_sp<GrOpsTask> opsTask(new GrOpsTask(fContext->priv().refOpMemoryPool(), rtp,
+ fContext->priv().auditTrail()));
+ SkASSERT(rtp->getLastRenderTask() == opsTask.get());
+
+ if (managedOpsTask) {
+ fDAG.add(opsTask);
+
+ if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) {
+ fActiveOpsTask = opsTask.get();
+ }
+ }
+
+ SkDEBUGCODE(this->validate());
+ return opsTask;
+}
+
+GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
+ // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
+ // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
+ // state. This is because those opsTasks can still receive new ops and because if they refer to
+ // the mipmapped version of 'proxy', they will then come to depend on the render task being
+ // created here.
+ //
+ // Add the new textureResolveTask before the fActiveOpsTask (if not in
+ // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
+ // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
+ return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast(
+ sk_make_sp<GrTextureResolveRenderTask>()));
+}
+
+void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
+ std::unique_ptr<sk_sp<GrSemaphore>[]> semaphores,
+ int numSemaphores) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(fContext);
+
+ const GrCaps& caps = *fContext->priv().caps();
+
+ sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(proxy, std::move(semaphores),
+ numSemaphores);
+ if (fReduceOpsTaskSplitting) {
+ GrRenderTask* lastTask = proxy->getLastRenderTask();
+ if (lastTask && !lastTask->isClosed()) {
+ // We directly make the currently open renderTask depend on waitTask instead of using
+ // the proxy version of addDependency. The waitTask will never need to trigger any
+ // resolves or mip map generation which is the main advantage of going through the proxy
+ // version. Additionally we would've had to temporarily set the wait task as the
+ // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to
+ // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the
+ // waitTask doesn't get reordered before them and unnecessarily block those tasks.
+ // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore
+ // even though they don't need to be for correctness.
+
+ // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a
+ // circular self dependency of waitTask on waitTask.
+ waitTask->addDependenciesFromOtherTask(lastTask);
+ lastTask->addDependency(waitTask.get());
+ } else {
+ // If there is a last task we set the waitTask to depend on it so that it doesn't get
+ // reordered in front of the lastTask causing the lastTask to be blocked by the
+ // semaphore. Again we directly just go through adding the dependency to the task and
+ // not the proxy since we don't need to worry about resolving anything.
+ if (lastTask) {
+ waitTask->addDependency(lastTask);
+ }
+ proxy->setLastRenderTask(waitTask.get());
+ }
+ fDAG.add(waitTask);
+ } else {
+ if (fActiveOpsTask && (fActiveOpsTask->fTarget == proxy)) {
+ SkASSERT(proxy->getLastRenderTask() == fActiveOpsTask);
+ fDAG.addBeforeLast(waitTask);
+ // In this case we keep the current renderTask open but just insert the new waitTask
+ // before it in the list. The waitTask will never need to trigger any resolves or mip
+ // map generation which is the main advantage of going through the proxy version.
+ // Additionally we would've had to temporarily set the wait task as the lastRenderTask
+ // on the proxy, add the dependency, and then reset the lastRenderTask to
+ // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
+ // dependencies so that we don't unnecessarily reorder the waitTask before them.
+ // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
+ // semaphore even though they don't need to be for correctness.
+
+ // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
+ // get a circular self dependency of waitTask on waitTask.
+ waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
+ fActiveOpsTask->addDependency(waitTask.get());
+ } else {
+ // In this case we just close the previous RenderTask and start and append the waitTask
+ // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
+ // there is a lastTask on the proxy we make waitTask depend on that task. This
+ // dependency isn't strictly needed but it does keep the DAG from reordering the
+ // waitTask earlier and blocking more tasks.
+ if (GrRenderTask* lastTask = proxy->getLastRenderTask()) {
+ waitTask->addDependency(lastTask);
+ }
+ proxy->setLastRenderTask(waitTask.get());
+ this->closeRenderTasksForNewRenderTask(proxy.get());
+ fDAG.add(waitTask);
+ }
+ }
+ waitTask->makeClosed(caps);
+
+ SkDEBUGCODE(this->validate());
+}
+
+void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ GrColorType surfaceColorType,
+ GrColorType dstColorType,
+ sk_sp<GrGpuBuffer> dstBuffer,
+ size_t dstOffset) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(fContext);
+ // This copies from srcProxy to dstBuffer so it doesn't have a real target.
+ this->closeRenderTasksForNewRenderTask(nullptr);
+
+ GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>(
+ srcProxy, srcRect, surfaceColorType, dstColorType, std::move(dstBuffer), dstOffset));
+
+ const GrCaps& caps = *fContext->priv().caps();
+
+ // We always say GrMipMapped::kNo here since we are always just copying from the base layer. We
+ // don't need to make sure the whole mip map chain is valid.
+ task->addDependency(srcProxy.get(), GrMipMapped::kNo, GrTextureResolveManager(this), caps);
+ task->makeClosed(caps);
+
+ // We have closed the previous active oplist but since a new oplist isn't being added there
+ // shouldn't be an active one.
+ SkASSERT(!fActiveOpsTask);
+ SkDEBUGCODE(this->validate());
+}
+
+bool GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy,
+ const SkIPoint& dstPoint) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(fContext);
+
+ this->closeRenderTasksForNewRenderTask(dstProxy.get());
+ const GrCaps& caps = *fContext->priv().caps();
+
+ GrRenderTask* task =
+ fDAG.add(GrCopyRenderTask::Make(srcProxy, srcRect, dstProxy, dstPoint, &caps));
+ if (!task) {
+ return false;
+ }
+
+
+ // We always say GrMipMapped::kNo here since we are always just copying from the base layer to
+ // another base layer. We don't need to make sure the whole mip map chain is valid.
+ task->addDependency(srcProxy.get(), GrMipMapped::kNo, GrTextureResolveManager(this), caps);
+ task->makeClosed(caps);
+
+ // We have closed the previous active oplist but since a new oplist isn't being added there
+ // shouldn't be an active one.
+ SkASSERT(!fActiveOpsTask);
+ SkDEBUGCODE(this->validate());
+ return true;
+}
+
+GrTextContext* GrDrawingManager::getTextContext() {
+ if (!fTextContext) {
+ fTextContext = GrTextContext::Make(fOptionsForTextContext);
+ }
+
+ return fTextContext.get();
+}
+
+/*
+ * This method finds a path renderer that can draw the specified path on
+ * the provided target.
+ * Due to its expense, the software path renderer has split out so it can
+ * can be individually allowed/disallowed via the "allowSW" boolean.
+ */
+GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ bool allowSW,
+ GrPathRendererChain::DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport) {
+
+ if (!fPathRendererChain) {
+ fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
+ }
+
+ GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
+ if (!pr && allowSW) {
+ auto swPR = this->getSoftwarePathRenderer();
+ if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
+ pr = swPR;
+ }
+ }
+
+ return pr;
+}
+
+GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
+ if (!fSoftwarePathRenderer) {
+ fSoftwarePathRenderer.reset(
+ new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
+ fOptionsForPathRendererChain.fAllowPathMaskCaching));
+ }
+ return fSoftwarePathRenderer.get();
+}
+
+GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
+ if (!fPathRendererChain) {
+ fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
+ }
+ return fPathRendererChain->getCoverageCountingPathRenderer();
+}
+
+void GrDrawingManager::flushIfNecessary() {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ return;
+ }
+
+ auto resourceCache = direct->priv().getResourceCache();
+ if (resourceCache && resourceCache->requestsFlush()) {
+ this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
+ GrPrepareForExternalIORequests());
+ resourceCache->purgeAsNeeded();
+ }
+}
+
+std::unique_ptr<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
+ sk_sp<GrSurfaceProxy> sProxy,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ bool managedOpsTask) {
+ if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
+ return nullptr;
+ }
+
+ sk_sp<GrRenderTargetProxy> renderTargetProxy(sk_ref_sp(sProxy->asRenderTargetProxy()));
+
+ return std::unique_ptr<GrRenderTargetContext>(
+ new GrRenderTargetContext(fContext,
+ std::move(renderTargetProxy),
+ colorType,
+ std::move(colorSpace),
+ surfaceProps,
+ managedOpsTask));
+}
+
+std::unique_ptr<GrTextureContext> GrDrawingManager::makeTextureContext(
+ sk_sp<GrSurfaceProxy> sProxy,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace) {
+ if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
+ return nullptr;
+ }
+
+ // GrTextureRenderTargets should always be using a GrRenderTargetContext
+ SkASSERT(!sProxy->asRenderTargetProxy());
+
+ sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
+
+ return std::unique_ptr<GrTextureContext>(new GrTextureContext(
+ fContext, std::move(textureProxy), colorType, alphaType, std::move(colorSpace)));
+}
diff --git a/gfx/skia/skia/src/gpu/GrDrawingManager.h b/gfx/skia/skia/src/gpu/GrDrawingManager.h
new file mode 100644
index 0000000000..72f3211ba1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawingManager.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawingManager_DEFINED
+#define GrDrawingManager_DEFINED
+
+#include <set>
+#include "include/core/SkSurface.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/GrBufferAllocPool.h"
+#include "src/gpu/GrDeferredUpload.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrPathRendererChain.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/text/GrTextContext.h"
+
+class GrCoverageCountingPathRenderer;
+class GrOnFlushCallbackObject;
+class GrOpFlushState;
+class GrOpsTask;
+class GrRecordingContext;
+class GrRenderTargetContext;
+class GrRenderTargetProxy;
+class GrSoftwarePathRenderer;
+class GrTextureContext;
+class GrTextureResolveRenderTask;
+class SkDeferredDisplayList;
+
+class GrDrawingManager {
+public:
+ ~GrDrawingManager();
+
+ void freeGpuResources();
+
+ std::unique_ptr<GrRenderTargetContext> makeRenderTargetContext(sk_sp<GrSurfaceProxy>,
+ GrColorType,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*,
+ bool managedOpsTask = true);
+ std::unique_ptr<GrTextureContext> makeTextureContext(sk_sp<GrSurfaceProxy>,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>);
+
+ // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
+ // others). An unmanaged one is created and used by the onFlushCallback.
+ sk_sp<GrOpsTask> newOpsTask(sk_sp<GrRenderTargetProxy>, bool managedOpsTask);
+
+ // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
+ // method will only add the new render task to the list. It is up to the caller to call
+ // addProxy() on the returned object.
+ GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&);
+
+ // Create a new render task that will cause the gpu to wait on semaphores before executing any
+ // more RenderTasks that target proxy. It is possible for this wait to also block additional
+ // work (even to other proxies) that has already been recorded or will be recorded later. The
+ // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
+ // signaled.
+ void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, std::unique_ptr<sk_sp<GrSemaphore>[]>,
+ int numSemaphores);
+
+ // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
+ // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
+ // to be copied. The surfaceColorType says how we should interpret the data when reading back
+ // from the source. DstColorType describes how the data should be stored in the dstBuffer.
+ // DstOffset is the offset into the dstBuffer where we will start writing data.
+ void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
+ GrColorType surfaceColorType, GrColorType dstColorType,
+ sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
+
+ // Creates a new render task which copies a pixel rectangle from srcProxy into dstProxy. The src
+ // pixels copied are specified by srcRect. They are copied to a rect of the same size in
+ // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then pixel
+ // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
+ // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
+ // the backend-specific limitations.
+ bool newCopyRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
+ sk_sp<GrSurfaceProxy> dstProxy, const SkIPoint& dstPoint);
+
+ GrRecordingContext* getContext() { return fContext; }
+
+ GrTextContext* getTextContext();
+
+ GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ bool allowSW,
+ GrPathRendererChain::DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport = nullptr);
+
+ GrPathRenderer* getSoftwarePathRenderer();
+
+ // Returns a direct pointer to the coverage counting path renderer, or null if it is not
+ // supported and turned on.
+ GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
+
+ void flushIfNecessary();
+
+ static bool ProgramUnitTest(GrContext* context, int maxStages, int maxLevels);
+
+ GrSemaphoresSubmitted flushSurfaces(GrSurfaceProxy* proxies[],
+ int cnt,
+ SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info);
+ GrSemaphoresSubmitted flushSurface(GrSurfaceProxy* proxy,
+ SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info) {
+ return this->flushSurfaces(&proxy, 1, access, info);
+ }
+
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+#if GR_TEST_UTILS
+ void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
+#endif
+
+ void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
+ void copyRenderTasksFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
+
+private:
+ // This class encapsulates maintenance and manipulation of the drawing manager's DAG of
+ // renderTasks.
+ class RenderTaskDAG {
+ public:
+ RenderTaskDAG(bool sortRenderTasks);
+ ~RenderTaskDAG();
+
+ // Currently, when explicitly allocating resources, this call will topologically sort the
+ // GrRenderTasks.
+ // MDB TODO: remove once incremental GrRenderTask sorting is enabled
+ void prepForFlush();
+
+ void closeAll(const GrCaps* caps);
+
+ // A yucky combination of closeAll and reset
+ void cleanup(const GrCaps* caps);
+
+ void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const;
+
+ void reset();
+
+ // These calls forceably remove a GrRenderTask from the DAG. They are problematic bc they
+ // just remove the GrRenderTask but don't cleanup any refering pointers (i.e., dependency
+ // pointers in the DAG). They work right now bc they are only called at flush time, after
+ // the topological sort is complete (so the dangling pointers aren't used).
+ void removeRenderTask(int index);
+ void removeRenderTasks(int startIndex, int stopIndex);
+
+ bool empty() const { return fRenderTasks.empty(); }
+ int numRenderTasks() const { return fRenderTasks.count(); }
+
+ bool isUsed(GrSurfaceProxy*) const;
+
+ GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
+ const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
+
+ GrRenderTask* back() { return fRenderTasks.back().get(); }
+ const GrRenderTask* back() const { return fRenderTasks.back().get(); }
+
+ GrRenderTask* add(sk_sp<GrRenderTask>);
+ GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>);
+ void add(const SkTArray<sk_sp<GrRenderTask>>&);
+
+ void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
+
+ bool sortingRenderTasks() const { return fSortRenderTasks; }
+
+ private:
+ SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
+ bool fSortRenderTasks;
+ };
+
+ GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
+ const GrTextContext::Options&,
+ bool sortRenderTasks,
+ bool reduceOpsTaskSplitting);
+
+ bool wasAbandoned() const;
+
+ void cleanup();
+
+ // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
+ // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
+ // 'target'.
+ void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
+
+ // return true if any GrRenderTasks were actually executed; false otherwise
+ bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
+ int* numRenderTasksExecuted);
+
+ GrSemaphoresSubmitted flush(GrSurfaceProxy* proxies[],
+ int numProxies,
+ SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo&,
+ const GrPrepareForExternalIORequests&);
+
+ SkDEBUGCODE(void validate() const);
+
+ friend class GrContext; // access to: flush & cleanup
+ friend class GrContextPriv; // access to: flush
+ friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
+ friend class GrRecordingContext; // access to: ctor
+ friend class SkImage; // for access to: flush
+
+ static const int kNumPixelGeometries = 5; // The different pixel geometries
+ static const int kNumDFTOptions = 2; // DFT or no DFT
+
+ GrRecordingContext* fContext;
+ GrPathRendererChain::Options fOptionsForPathRendererChain;
+ GrTextContext::Options fOptionsForTextContext;
+ // This cache is used by both the vertex and index pools. It reuses memory across multiple
+ // flushes.
+ sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
+
+ RenderTaskDAG fDAG;
+ GrOpsTask* fActiveOpsTask = nullptr;
+ // These are the IDs of the opsTask currently being flushed (in internalFlush)
+ SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
+ // These are the new renderTasks generated by the onFlush CBs
+ SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
+
+ std::unique_ptr<GrTextContext> fTextContext;
+
+ std::unique_ptr<GrPathRendererChain> fPathRendererChain;
+ sk_sp<GrSoftwarePathRenderer> fSoftwarePathRenderer;
+
+ GrTokenTracker fTokenTracker;
+ bool fFlushing;
+ bool fReduceOpsTaskSplitting;
+
+ SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
+
+ void addDDLTarget(GrSurfaceProxy* proxy) { fDDLTargets.insert(proxy); }
+ bool isDDLTarget(GrSurfaceProxy* proxy) { return fDDLTargets.find(proxy) != fDDLTargets.end(); }
+ void clearDDLTargets() { fDDLTargets.clear(); }
+
+ // We play a trick with lazy proxies to retarget the base target of a DDL to the SkSurface
+ // it is replayed on. Because of this remapping we need to explicitly store the targets of
+ // DDL replaying.
+ // Note: we do not expect a whole lot of these per flush
+ std::set<GrSurfaceProxy*> fDDLTargets;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDriverBugWorkarounds.cpp b/gfx/skia/skia/src/gpu/GrDriverBugWorkarounds.cpp
new file mode 100644
index 0000000000..e91623ce8d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDriverBugWorkarounds.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrDriverBugWorkarounds.h"
+
+#include "include/core/SkTypes.h"
+
+GrDriverBugWorkarounds::GrDriverBugWorkarounds() = default;
+
+GrDriverBugWorkarounds::GrDriverBugWorkarounds(
+ const std::vector<int>& enabled_driver_bug_workarounds) {
+ for (auto id : enabled_driver_bug_workarounds) {
+ switch (id) {
+#define GPU_OP(type, name) \
+ case GrDriverBugWorkaroundType::type: \
+ name = true; \
+ break;
+
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ default:
+ SK_ABORT("Not implemented");
+ break;
+ }
+ }
+}
+
+void GrDriverBugWorkarounds::applyOverrides(
+ const GrDriverBugWorkarounds& workarounds) {
+#define GPU_OP(type, name) \
+ name |= workarounds.name;
+
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+}
+
+GrDriverBugWorkarounds::~GrDriverBugWorkarounds() = default;
diff --git a/gfx/skia/skia/src/gpu/GrFPArgs.h b/gfx/skia/skia/src/gpu/GrFPArgs.h
new file mode 100644
index 0000000000..87b7e368d8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFPArgs.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFPArgs_DEFINED
+#define GrFPArgs_DEFINED
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkMatrix.h"
+
+class GrColorInfo;
+class GrRecordingContext;
+
+struct GrFPArgs {
+ GrFPArgs(GrRecordingContext* context,
+ const SkMatrix* viewMatrix,
+ SkFilterQuality filterQuality,
+ const GrColorInfo* dstColorInfo)
+ : fContext(context)
+ , fViewMatrix(viewMatrix)
+ , fFilterQuality(filterQuality)
+ , fDstColorInfo(dstColorInfo) {
+ SkASSERT(fContext);
+ SkASSERT(fViewMatrix);
+ }
+
+ class WithPreLocalMatrix;
+ class WithPostLocalMatrix;
+
+ GrRecordingContext* fContext;
+ const SkMatrix* fViewMatrix;
+
+ // We track both pre and post local matrix adjustments. For a given FP:
+ //
+ // total_local_matrix = postLocalMatrix x FP_localMatrix x preLocalMatrix
+ //
+ // Use the helpers above to create pre/post GrFPArgs wrappers.
+ //
+ const SkMatrix* fPreLocalMatrix = nullptr;
+ const SkMatrix* fPostLocalMatrix = nullptr;
+
+ // Make this SkAlphaType?
+ bool fInputColorIsOpaque = false;
+
+ SkFilterQuality fFilterQuality;
+ const GrColorInfo* fDstColorInfo;
+};
+
+class GrFPArgs::WithPreLocalMatrix final : public GrFPArgs {
+public:
+ WithPreLocalMatrix(const GrFPArgs& args, const SkMatrix& lm) : INHERITED(args) {
+ if (!lm.isIdentity()) {
+ if (fPreLocalMatrix) {
+ fStorage.setConcat(lm, *fPreLocalMatrix);
+ fPreLocalMatrix = fStorage.isIdentity() ? nullptr : &fStorage;
+ } else {
+ fPreLocalMatrix = &lm;
+ }
+ }
+ }
+
+private:
+ WithPreLocalMatrix(const WithPreLocalMatrix&) = delete;
+ WithPreLocalMatrix& operator=(const WithPreLocalMatrix&) = delete;
+
+ SkMatrix fStorage;
+
+ using INHERITED = GrFPArgs;
+};
+
+class GrFPArgs::WithPostLocalMatrix final : public GrFPArgs {
+public:
+ WithPostLocalMatrix(const GrFPArgs& args, const SkMatrix& lm) : INHERITED(args) {
+ if (!lm.isIdentity()) {
+ if (fPostLocalMatrix) {
+ fStorage.setConcat(*fPostLocalMatrix, lm);
+ fPostLocalMatrix = fStorage.isIdentity() ? nullptr : &fStorage;
+ } else {
+ fPostLocalMatrix = &lm;
+ }
+ }
+ }
+
+private:
+ WithPostLocalMatrix(const WithPostLocalMatrix&) = delete;
+ WithPostLocalMatrix& operator=(const WithPostLocalMatrix&) = delete;
+
+ SkMatrix fStorage;
+
+ using INHERITED = GrFPArgs;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrFixedClip.cpp b/gfx/skia/skia/src/gpu/GrFixedClip.cpp
new file mode 100644
index 0000000000..2977d9d72b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFixedClip.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrFixedClip.h"
+
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+bool GrFixedClip::quickContains(const SkRect& rect) const {
+ if (fWindowRectsState.enabled()) {
+ return false;
+ }
+ return !fScissorState.enabled() || GrClip::IsInsideClip(fScissorState.rect(), rect);
+}
+
+void GrFixedClip::getConservativeBounds(int w, int h, SkIRect* devResult, bool* iior) const {
+ devResult->setXYWH(0, 0, w, h);
+ if (fScissorState.enabled()) {
+ if (!devResult->intersect(fScissorState.rect())) {
+ devResult->setEmpty();
+ }
+ }
+ if (iior) {
+ *iior = true;
+ }
+}
+
+bool GrFixedClip::isRRect(const SkRect& rtBounds, SkRRect* rr, GrAA* aa) const {
+ if (fWindowRectsState.enabled()) {
+ return false;
+ }
+ if (fScissorState.enabled()) {
+ SkRect rect = SkRect::Make(fScissorState.rect());
+ if (!rect.intersects(rtBounds)) {
+ return false;
+ }
+ rr->setRect(rect);
+ *aa = GrAA::kNo;
+ return true;
+ }
+ return false;
+};
+
+bool GrFixedClip::apply(int rtWidth, int rtHeight, GrAppliedHardClip* out, SkRect* bounds) const {
+ if (fScissorState.enabled()) {
+ SkIRect tightScissor = SkIRect::MakeWH(rtWidth, rtHeight);
+ if (!tightScissor.intersect(fScissorState.rect())) {
+ return false;
+ }
+ if (IsOutsideClip(tightScissor, *bounds)) {
+ return false;
+ }
+ if (!IsInsideClip(fScissorState.rect(), *bounds)) {
+ out->addScissor(tightScissor, bounds);
+ }
+ }
+
+ if (fWindowRectsState.enabled()) {
+ out->addWindowRectangles(fWindowRectsState);
+ }
+
+ return true;
+}
+
+const GrFixedClip& GrFixedClip::Disabled() {
+ static const GrFixedClip disabled = GrFixedClip();
+ return disabled;
+}
diff --git a/gfx/skia/skia/src/gpu/GrFixedClip.h b/gfx/skia/skia/src/gpu/GrFixedClip.h
new file mode 100644
index 0000000000..087617eacd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFixedClip.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFixedClip_DEFINED
+#define GrFixedClip_DEFINED
+
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrScissorState.h"
+#include "src/gpu/GrWindowRectsState.h"
+
+/**
+ * Implements GrHardClip with scissor and window rectangles.
+ */
+class GrFixedClip final : public GrHardClip {
+public:
+ GrFixedClip() = default;
+ explicit GrFixedClip(const SkIRect& scissorRect) : fScissorState(scissorRect) {}
+
+ const GrScissorState& scissorState() const { return fScissorState; }
+ bool scissorEnabled() const { return fScissorState.enabled(); }
+ const SkIRect& scissorRect() const { SkASSERT(scissorEnabled()); return fScissorState.rect(); }
+
+ void disableScissor() { fScissorState.setDisabled(); }
+
+ void setScissor(const SkIRect& irect) {
+ fScissorState.set(irect);
+ }
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& irect) {
+ return fScissorState.intersect(irect);
+ }
+
+ const GrWindowRectsState& windowRectsState() const { return fWindowRectsState; }
+ bool hasWindowRectangles() const { return fWindowRectsState.enabled(); }
+
+ void disableWindowRectangles() { fWindowRectsState.setDisabled(); }
+
+ void setWindowRectangles(const GrWindowRectangles& windows, GrWindowRectsState::Mode mode) {
+ fWindowRectsState.set(windows, mode);
+ }
+
+ bool quickContains(const SkRect&) const override;
+ void getConservativeBounds(int w, int h, SkIRect* devResult, bool* iior) const override;
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, GrAA*) const override;
+ bool apply(int rtWidth, int rtHeight, GrAppliedHardClip*, SkRect*) const override;
+
+ static const GrFixedClip& Disabled();
+
+private:
+ GrScissorState fScissorState;
+ GrWindowRectsState fWindowRectsState;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp
new file mode 100644
index 0000000000..1e1f2e42a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp
@@ -0,0 +1,431 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessorAnalysis.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/effects/generated/GrOverrideInputFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrPremulInputFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+bool GrFragmentProcessor::isEqual(const GrFragmentProcessor& that) const {
+ if (this->classID() != that.classID()) {
+ return false;
+ }
+ if (this->numTextureSamplers() != that.numTextureSamplers()) {
+ return false;
+ }
+ for (int i = 0; i < this->numTextureSamplers(); ++i) {
+ if (this->textureSampler(i) != that.textureSampler(i)) {
+ return false;
+ }
+ }
+ if (!this->hasSameTransforms(that)) {
+ return false;
+ }
+ if (!this->onIsEqual(that)) {
+ return false;
+ }
+ if (this->numChildProcessors() != that.numChildProcessors()) {
+ return false;
+ }
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ if (!this->childProcessor(i).isEqual(that.childProcessor(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrFragmentProcessor::visitProxies(const GrOp::VisitProxyFunc& func) {
+ GrFragmentProcessor::TextureAccessIter iter(this);
+ while (const TextureSampler* sampler = iter.next()) {
+ bool mipped = (GrSamplerState::Filter::kMipMap == sampler->samplerState().filter());
+ func(sampler->proxy(), GrMipMapped(mipped));
+ }
+}
+
+GrGLSLFragmentProcessor* GrFragmentProcessor::createGLSLInstance() const {
+ GrGLSLFragmentProcessor* glFragProc = this->onCreateGLSLInstance();
+ glFragProc->fChildProcessors.push_back_n(fChildProcessors.count());
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ glFragProc->fChildProcessors[i] = fChildProcessors[i]->createGLSLInstance();
+ }
+ return glFragProc;
+}
+
+const GrFragmentProcessor::TextureSampler& GrFragmentProcessor::textureSampler(int i) const {
+ SkASSERT(i >= 0 && i < fTextureSamplerCnt);
+ return this->onTextureSampler(i);
+}
+
+void GrFragmentProcessor::addCoordTransform(GrCoordTransform* transform) {
+ transform->setComputeInVertexShader(this->computeLocalCoordsInVertexShader());
+ fCoordTransforms.push_back(transform);
+ fFlags |= kUsesLocalCoords_Flag;
+ SkDEBUGCODE(transform->setInProcessor();)
+}
+
+#ifdef SK_DEBUG
+bool GrFragmentProcessor::isInstantiated() const {
+ for (int i = 0; i < fTextureSamplerCnt; ++i) {
+ if (!this->textureSampler(i).isInstantiated()) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ if (!this->childProcessor(i).isInstantiated()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+int GrFragmentProcessor::registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child) {
+ if (child->usesLocalCoords()) {
+ fFlags |= kUsesLocalCoords_Flag;
+ }
+ fRequestedFeatures |= child->fRequestedFeatures;
+
+ int index = fChildProcessors.count();
+ fChildProcessors.push_back(std::move(child));
+
+ return index;
+}
+
+bool GrFragmentProcessor::hasSameTransforms(const GrFragmentProcessor& that) const {
+ if (this->numCoordTransforms() != that.numCoordTransforms()) {
+ return false;
+ }
+ int count = this->numCoordTransforms();
+ for (int i = 0; i < count; ++i) {
+ if (!this->coordTransform(i).hasSameEffectAs(that.coordTransform(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::MulChildByInputAlpha(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ if (!fp) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(fp), SkBlendMode::kDstIn);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::MulInputByChildAlpha(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ if (!fp) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(fp), SkBlendMode::kSrcIn);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::PremulInput(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ if (!fp) {
+ return nullptr;
+ }
+ std::unique_ptr<GrFragmentProcessor> fpPipeline[] = { GrPremulInputFragmentProcessor::Make(),
+ std::move(fp) };
+ return GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::SwizzleOutput(
+ std::unique_ptr<GrFragmentProcessor> fp, const GrSwizzle& swizzle) {
+ class SwizzleFragmentProcessor : public GrFragmentProcessor {
+ public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const GrSwizzle& swizzle) {
+ return std::unique_ptr<GrFragmentProcessor>(new SwizzleFragmentProcessor(swizzle));
+ }
+
+ const char* name() const override { return "Swizzle"; }
+ const GrSwizzle& swizzle() const { return fSwizzle; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override { return Make(fSwizzle); }
+
+ private:
+ SwizzleFragmentProcessor(const GrSwizzle& swizzle)
+ : INHERITED(kSwizzleFragmentProcessor_ClassID, kAll_OptimizationFlags)
+ , fSwizzle(swizzle) {}
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ const SwizzleFragmentProcessor& sfp = args.fFp.cast<SwizzleFragmentProcessor>();
+ const GrSwizzle& swizzle = sfp.swizzle();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("%s = %s.%s;",
+ args.fOutputColor, args.fInputColor, swizzle.c_str());
+ }
+ };
+ return new GLFP;
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ b->add32(fSwizzle.asKey());
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const SwizzleFragmentProcessor& sfp = other.cast<SwizzleFragmentProcessor>();
+ return fSwizzle == sfp.fSwizzle;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return fSwizzle.applyTo(input);
+ }
+
+ GrSwizzle fSwizzle;
+
+ typedef GrFragmentProcessor INHERITED;
+ };
+
+ if (!fp) {
+ return nullptr;
+ }
+ if (GrSwizzle::RGBA() == swizzle) {
+ return fp;
+ }
+ std::unique_ptr<GrFragmentProcessor> fpPipeline[] = { std::move(fp),
+ SwizzleFragmentProcessor::Make(swizzle) };
+ return GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::MakeInputPremulAndMulByOutput(
+ std::unique_ptr<GrFragmentProcessor> fp) {
+ class PremulFragmentProcessor : public GrFragmentProcessor {
+ public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor> processor) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new PremulFragmentProcessor(std::move(processor)));
+ }
+
+ const char* name() const override { return "Premultiply"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return Make(this->childProcessor(0).clone());
+ }
+
+ private:
+ PremulFragmentProcessor(std::unique_ptr<GrFragmentProcessor> processor)
+ : INHERITED(kPremulFragmentProcessor_ClassID, OptFlags(processor.get())) {
+ this->registerChildProcessor(std::move(processor));
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ this->invokeChild(0, args);
+ fragBuilder->codeAppendf("%s.rgb *= %s.rgb;", args.fOutputColor,
+ args.fInputColor);
+ fragBuilder->codeAppendf("%s *= %s.a;", args.fOutputColor, args.fInputColor);
+ }
+ };
+ return new GLFP;
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ static OptimizationFlags OptFlags(const GrFragmentProcessor* inner) {
+ OptimizationFlags flags = kNone_OptimizationFlags;
+ if (inner->preservesOpaqueInput()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ if (inner->hasConstantOutputForConstantInput()) {
+ flags |= kConstantOutputForConstantInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ SkPMColor4f childColor = ConstantOutputForConstantInput(this->childProcessor(0),
+ SK_PMColor4fWHITE);
+ SkPMColor4f premulInput = SkColor4f{ input.fR, input.fG, input.fB, input.fA }.premul();
+ return premulInput * childColor;
+ }
+
+ typedef GrFragmentProcessor INHERITED;
+ };
+ if (!fp) {
+ return nullptr;
+ }
+ return PremulFragmentProcessor::Make(std::move(fp));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::OverrideInput(
+ std::unique_ptr<GrFragmentProcessor> fp, const SkPMColor4f& color, bool useUniform) {
+ if (!fp) {
+ return nullptr;
+ }
+ return GrOverrideInputFragmentProcessor::Make(std::move(fp), color, useUniform);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrFragmentProcessor::RunInSeries(
+ std::unique_ptr<GrFragmentProcessor>* series, int cnt) {
+ class SeriesFragmentProcessor : public GrFragmentProcessor {
+ public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor>* children, int cnt) {
+ return std::unique_ptr<GrFragmentProcessor>(new SeriesFragmentProcessor(children, cnt));
+ }
+
+ const char* name() const override { return "Series"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> children(this->numChildProcessors());
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ if (!children.push_back(this->childProcessor(i).clone())) {
+ return nullptr;
+ }
+ }
+ return Make(children.begin(), this->numChildProcessors());
+ }
+
+ private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ // First guy's input might be nil.
+ SkString temp("out0");
+ this->invokeChild(0, args.fInputColor, &temp, args);
+ SkString input = temp;
+ for (int i = 1; i < this->numChildProcessors() - 1; ++i) {
+ temp.printf("out%d", i);
+ this->invokeChild(i, input.c_str(), &temp, args);
+ input = temp;
+ }
+ // Last guy writes to our output variable.
+ this->invokeChild(this->numChildProcessors() - 1, input.c_str(), args);
+ }
+ };
+ return new GLFP;
+ }
+
+ SeriesFragmentProcessor(std::unique_ptr<GrFragmentProcessor>* children, int cnt)
+ : INHERITED(kSeriesFragmentProcessor_ClassID, OptFlags(children, cnt)) {
+ SkASSERT(cnt > 1);
+ for (int i = 0; i < cnt; ++i) {
+ this->registerChildProcessor(std::move(children[i]));
+ }
+ }
+
+ static OptimizationFlags OptFlags(std::unique_ptr<GrFragmentProcessor>* children, int cnt) {
+ OptimizationFlags flags = kAll_OptimizationFlags;
+ for (int i = 0; i < cnt && flags != kNone_OptimizationFlags; ++i) {
+ flags &= children[i]->optimizationFlags();
+ }
+ return flags;
+ }
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& inColor) const override {
+ SkPMColor4f color = inColor;
+ int childCnt = this->numChildProcessors();
+ for (int i = 0; i < childCnt; ++i) {
+ color = ConstantOutputForConstantInput(this->childProcessor(i), color);
+ }
+ return color;
+ }
+
+ typedef GrFragmentProcessor INHERITED;
+ };
+
+ if (!cnt) {
+ return nullptr;
+ }
+ if (1 == cnt) {
+ return std::move(series[0]);
+ }
+ // Run the through the series, do the invariant output processing, and look for eliminations.
+ GrProcessorAnalysisColor inputColor;
+ inputColor.setToUnknown();
+ GrColorFragmentProcessorAnalysis info(inputColor, unique_ptr_address_as_pointer_address(series),
+ cnt);
+ SkTArray<std::unique_ptr<GrFragmentProcessor>> replacementSeries;
+ SkPMColor4f knownColor;
+ int leadingFPsToEliminate = info.initialProcessorsToEliminate(&knownColor);
+ if (leadingFPsToEliminate) {
+ std::unique_ptr<GrFragmentProcessor> colorFP(
+ GrConstColorProcessor::Make(knownColor, GrConstColorProcessor::InputMode::kIgnore));
+ if (leadingFPsToEliminate == cnt) {
+ return colorFP;
+ }
+ cnt = cnt - leadingFPsToEliminate + 1;
+ replacementSeries.reserve(cnt);
+ replacementSeries.emplace_back(std::move(colorFP));
+ for (int i = 0; i < cnt - 1; ++i) {
+ replacementSeries.emplace_back(std::move(series[leadingFPsToEliminate + i]));
+ }
+ series = replacementSeries.begin();
+ }
+ return SeriesFragmentProcessor::Make(series, cnt);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrFragmentProcessor::Iter::Iter(const GrPipeline& pipeline) {
+ for (int i = pipeline.numFragmentProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(&pipeline.getFragmentProcessor(i));
+ }
+}
+
+GrFragmentProcessor::Iter::Iter(const GrPaint& paint) {
+ for (int i = paint.numCoverageFragmentProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(paint.getCoverageFragmentProcessor(i));
+ }
+ for (int i = paint.numColorFragmentProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(paint.getColorFragmentProcessor(i));
+ }
+}
+
+const GrFragmentProcessor* GrFragmentProcessor::Iter::next() {
+ if (fFPStack.empty()) {
+ return nullptr;
+ }
+ const GrFragmentProcessor* back = fFPStack.back();
+ fFPStack.pop_back();
+ for (int i = back->numChildProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(&back->childProcessor(i));
+ }
+ return back;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrFragmentProcessor::TextureSampler::TextureSampler(sk_sp<GrTextureProxy> proxy,
+ const GrSamplerState& samplerState) {
+ this->reset(std::move(proxy), samplerState);
+}
+
+void GrFragmentProcessor::TextureSampler::reset(sk_sp<GrTextureProxy> proxy,
+ const GrSamplerState& samplerState) {
+ fProxy = std::move(proxy);
+ fSamplerState = samplerState;
+ fSamplerState.setFilterMode(SkTMin(samplerState.filter(), this->proxy()->highestFilterMode()));
+}
diff --git a/gfx/skia/skia/src/gpu/GrFragmentProcessor.h b/gfx/skia/skia/src/gpu/GrFragmentProcessor.h
new file mode 100644
index 0000000000..21e27b868b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFragmentProcessor.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFragmentProcessor_DEFINED
+#define GrFragmentProcessor_DEFINED
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrGLSLFragmentProcessor;
+class GrPaint;
+class GrPipeline;
+class GrProcessorKeyBuilder;
+class GrShaderCaps;
+class GrSwizzle;
+
+/** Provides custom fragment shader code. Fragment processors receive an input color (half4) and
+ produce an output color. They may reference textures and uniforms. They may use
+ GrCoordTransforms to receive a transformation of the local coordinates that map from local space
+ to the fragment being processed.
+ */
+class GrFragmentProcessor : public GrProcessor {
+public:
+ class TextureSampler;
+
+ /**
+ * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
+ * only consider the input color's alpha. However, there is a competing desire to have reusable
+ * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
+ * color is considered. This function exists to filter the input color and pass it to a FP. It
+ * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
+ * input alpha. The passed in FP will not receive an input color.
+ */
+ static std::unique_ptr<GrFragmentProcessor> MulChildByInputAlpha(
+ std::unique_ptr<GrFragmentProcessor> child);
+
+ /**
+ * Like MulChildByInputAlpha(), but reverses the sense of src and dst. In this case, return
+ * the input modulated by the child's alpha. The passed in FP will not receive an input color.
+ *
+ * output = input * child.a
+ */
+ static std::unique_ptr<GrFragmentProcessor> MulInputByChildAlpha(
+ std::unique_ptr<GrFragmentProcessor> child);
+
+ /**
+ * This assumes that the input color to the returned processor will be unpremul and that the
+ * passed processor (which becomes the returned processor's child) produces a premul output.
+ * The result of the returned processor is a premul of its input color modulated by the child
+ * processor's premul output.
+ */
+ static std::unique_ptr<GrFragmentProcessor> MakeInputPremulAndMulByOutput(
+ std::unique_ptr<GrFragmentProcessor>);
+
+ /**
+ * Returns a parent fragment processor that adopts the passed fragment processor as a child.
+ * The parent will ignore its input color and instead feed the passed in color as input to the
+ * child.
+ */
+ static std::unique_ptr<GrFragmentProcessor> OverrideInput(std::unique_ptr<GrFragmentProcessor>,
+ const SkPMColor4f&,
+ bool useUniform = true);
+
+ /**
+ * Returns a fragment processor that premuls the input before calling the passed in fragment
+ * processor.
+ */
+ static std::unique_ptr<GrFragmentProcessor> PremulInput(std::unique_ptr<GrFragmentProcessor>);
+
+ /**
+ * Returns a fragment processor that calls the passed in fragment processor, and then swizzles
+ * the output.
+ */
+ static std::unique_ptr<GrFragmentProcessor> SwizzleOutput(std::unique_ptr<GrFragmentProcessor>,
+ const GrSwizzle&);
+
+ /**
+ * Returns a fragment processor that runs the passed in array of fragment processors in a
+ * series. The original input is passed to the first, the first's output is passed to the
+ * second, etc. The output of the returned processor is the output of the last processor of the
+ * series.
+ *
+ * The array elements with be moved.
+ */
+ static std::unique_ptr<GrFragmentProcessor> RunInSeries(std::unique_ptr<GrFragmentProcessor>*,
+ int cnt);
+
+ /**
+ * Makes a copy of this fragment processor that draws equivalently to the original.
+ * If the processor has child processors they are cloned as well.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> clone() const = 0;
+
+ GrGLSLFragmentProcessor* createGLSLInstance() const;
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
+ this->onGetGLSLProcessorKey(caps, b);
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ fChildProcessors[i]->getGLSLProcessorKey(caps, b);
+ }
+ }
+
+ int numTextureSamplers() const { return fTextureSamplerCnt; }
+ const TextureSampler& textureSampler(int i) const;
+
+ int numCoordTransforms() const { return fCoordTransforms.count(); }
+
+ /** Returns the coordinate transformation at index. index must be valid according to
+ numTransforms(). */
+ const GrCoordTransform& coordTransform(int index) const { return *fCoordTransforms[index]; }
+
+ const SkTArray<GrCoordTransform*, true>& coordTransforms() const {
+ return fCoordTransforms;
+ }
+
+ int numChildProcessors() const { return fChildProcessors.count(); }
+
+ const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
+
+ SkDEBUGCODE(bool isInstantiated() const;)
+
+ /** Do any of the coordtransforms for this processor require local coords? */
+ bool usesLocalCoords() const { return SkToBool(fFlags & kUsesLocalCoords_Flag); }
+
+ bool computeLocalCoordsInVertexShader() const {
+ return SkToBool(fFlags & kComputeLocalCoordsInVertexShader_Flag);
+ }
+
+ void setComputeLocalCoordsInVertexShader(bool value) const {
+ if (value) {
+ fFlags |= kComputeLocalCoordsInVertexShader_Flag;
+ } else {
+ fFlags &= ~kComputeLocalCoordsInVertexShader_Flag;
+ }
+ for (GrCoordTransform* transform : fCoordTransforms) {
+ transform->setComputeInVertexShader(value);
+ }
+ for (const auto& child : fChildProcessors) {
+ child->setComputeLocalCoordsInVertexShader(value);
+ }
+ }
+
+ /**
+ * A GrDrawOp may premultiply its antialiasing coverage into its GrGeometryProcessor's color
+ * output under the following scenario:
+ * * all the color fragment processors report true to this query,
+ * * all the coverage fragment processors report true to this query,
+ * * the blend mode arithmetic allows for it it.
+ * To be compatible a fragment processor's output must be a modulation of its input color or
+ * alpha with a computed premultiplied color or alpha that is in 0..1 range. The computed color
+ * or alpha that is modulated against the input cannot depend on the input's alpha. The computed
+ * value cannot depend on the input's color channels unless it unpremultiplies the input color
+ * channels by the input alpha.
+ */
+ bool compatibleWithCoverageAsAlpha() const {
+ return SkToBool(fFlags & kCompatibleWithCoverageAsAlpha_OptimizationFlag);
+ }
+
+ /**
+ * If this is true then all opaque input colors to the processor produce opaque output colors.
+ */
+ bool preservesOpaqueInput() const {
+ return SkToBool(fFlags & kPreservesOpaqueInput_OptimizationFlag);
+ }
+
+ /**
+ * Tests whether given a constant input color the processor produces a constant output color
+ * (for all fragments). If true outputColor will contain the constant color produces for
+ * inputColor.
+ */
+ bool hasConstantOutputForConstantInput(SkPMColor4f inputColor, SkPMColor4f* outputColor) const {
+ if (fFlags & kConstantOutputForConstantInput_OptimizationFlag) {
+ *outputColor = this->constantOutputForConstantInput(inputColor);
+ return true;
+ }
+ return false;
+ }
+ bool hasConstantOutputForConstantInput() const {
+ return SkToBool(fFlags & kConstantOutputForConstantInput_OptimizationFlag);
+ }
+
+ /** Returns true if this and other processor conservatively draw identically. It can only return
+ true when the two processor are of the same subclass (i.e. they return the same object from
+ from getFactory()).
+
+ A return value of true from isEqual() should not be used to test whether the processor would
+ generate the same shader code. To test for identical code generation use getGLSLProcessorKey
+ */
+ bool isEqual(const GrFragmentProcessor& that) const;
+
+ /**
+ * Pre-order traversal of a FP hierarchy, or of the forest of FPs in a GrPipeline. In the latter
+ * case the tree rooted at each FP in the GrPipeline is visited successively.
+ */
+ class Iter : public SkNoncopyable {
+ public:
+ explicit Iter(const GrFragmentProcessor* fp) { fFPStack.push_back(fp); }
+ explicit Iter(const GrPipeline& pipeline);
+ explicit Iter(const GrPaint&);
+ const GrFragmentProcessor* next();
+
+ private:
+ SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
+ };
+
+ /**
+ * Iterates over all the Ts owned by a GrFragmentProcessor and its children or over all the Ts
+ * owned by the forest of GrFragmentProcessors in a GrPipeline. FPs are visited in the same
+ * order as Iter and each of an FP's Ts are visited in order.
+ */
+ template <typename T, int (GrFragmentProcessor::*COUNT)() const,
+ const T& (GrFragmentProcessor::*GET)(int)const>
+ class FPItemIter : public SkNoncopyable {
+ public:
+ explicit FPItemIter(const GrFragmentProcessor* fp)
+ : fCurrFP(nullptr)
+ , fCTIdx(0)
+ , fFPIter(fp) {
+ fCurrFP = fFPIter.next();
+ }
+ explicit FPItemIter(const GrPipeline& pipeline)
+ : fCurrFP(nullptr)
+ , fCTIdx(0)
+ , fFPIter(pipeline) {
+ fCurrFP = fFPIter.next();
+ }
+
+ const T* next() {
+ if (!fCurrFP) {
+ return nullptr;
+ }
+ while (fCTIdx == (fCurrFP->*COUNT)()) {
+ fCTIdx = 0;
+ fCurrFP = fFPIter.next();
+ if (!fCurrFP) {
+ return nullptr;
+ }
+ }
+ return &(fCurrFP->*GET)(fCTIdx++);
+ }
+
+ private:
+ const GrFragmentProcessor* fCurrFP;
+ int fCTIdx;
+ GrFragmentProcessor::Iter fFPIter;
+ };
+
+ using CoordTransformIter = FPItemIter<GrCoordTransform,
+ &GrFragmentProcessor::numCoordTransforms,
+ &GrFragmentProcessor::coordTransform>;
+
+ using TextureAccessIter = FPItemIter<TextureSampler,
+ &GrFragmentProcessor::numTextureSamplers,
+ &GrFragmentProcessor::textureSampler>;
+
+ void visitProxies(const GrOp::VisitProxyFunc& func);
+
+protected:
+ enum OptimizationFlags : uint32_t {
+ kNone_OptimizationFlags,
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag = 0x1,
+ kPreservesOpaqueInput_OptimizationFlag = 0x2,
+ kConstantOutputForConstantInput_OptimizationFlag = 0x4,
+ kAll_OptimizationFlags = kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ kPreservesOpaqueInput_OptimizationFlag |
+ kConstantOutputForConstantInput_OptimizationFlag
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(OptimizationFlags)
+
+ /**
+ * Can be used as a helper to decide which fragment processor OptimizationFlags should be set.
+ * This assumes that the subclass output color will be a modulation of the input color with a
+ * value read from a texture of the passed color type and that the texture contains
+ * premultiplied color or alpha values that are in range.
+ *
+ * Since there are multiple ways in which a sampler may have its coordinates clamped or wrapped,
+ * callers must determine on their own if the sampling uses a decal strategy in any way, in
+ * which case the texture may become transparent regardless of the color type.
+ */
+ static OptimizationFlags ModulateForSamplerOptFlags(GrColorType colorType, bool samplingDecal) {
+ if (samplingDecal) {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ } else {
+ return ModulateForClampedSamplerOptFlags(colorType);
+ }
+ }
+
+ // As above, but callers should somehow ensure or assert their sampler still uses clamping
+ static OptimizationFlags ModulateForClampedSamplerOptFlags(GrColorType colorType) {
+ if (!GrColorTypeHasAlpha(colorType)) {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ kPreservesOpaqueInput_OptimizationFlag;
+ } else {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+ }
+
+ GrFragmentProcessor(ClassID classID, OptimizationFlags optimizationFlags)
+ : INHERITED(classID)
+ , fFlags(optimizationFlags | kComputeLocalCoordsInVertexShader_Flag) {
+ SkASSERT((optimizationFlags & ~kAll_OptimizationFlags) == 0);
+ }
+
+ OptimizationFlags optimizationFlags() const {
+ return static_cast<OptimizationFlags>(kAll_OptimizationFlags & fFlags);
+ }
+
+ /** Useful when you can't call fp->optimizationFlags() on a base class object from a subclass.*/
+ static OptimizationFlags ProcessorOptimizationFlags(const GrFragmentProcessor* fp) {
+ return fp->optimizationFlags();
+ }
+
+ /**
+ * This allows one subclass to access another subclass's implementation of
+ * constantOutputForConstantInput. It must only be called when
+ * hasConstantOutputForConstantInput() is known to be true.
+ */
+ static SkPMColor4f ConstantOutputForConstantInput(const GrFragmentProcessor& fp,
+ const SkPMColor4f& input) {
+ SkASSERT(fp.hasConstantOutputForConstantInput());
+ return fp.constantOutputForConstantInput(input);
+ }
+
+ /**
+ * Fragment Processor subclasses call this from their constructor to register coordinate
+ * transformations. Coord transforms provide a mechanism for a processor to receive coordinates
+ * in their FS code. The matrix expresses a transformation from local space. For a given
+ * fragment the matrix will be applied to the local coordinate that maps to the fragment.
+ *
+ * When the transformation has perspective, the transformed coordinates will have
+ * 3 components. Otherwise they'll have 2.
+ *
+ * This must only be called from the constructor because GrProcessors are immutable. The
+ * processor subclass manages the lifetime of the transformations (this function only stores a
+ * pointer). The GrCoordTransform is typically a member field of the GrProcessor subclass.
+ *
+ * A processor subclass that has multiple methods of construction should always add its coord
+ * transforms in a consistent order. The non-virtual implementation of isEqual() automatically
+ * compares transforms and will assume they line up across the two processor instances.
+ */
+ void addCoordTransform(GrCoordTransform*);
+
+ /**
+ * FragmentProcessor subclasses call this from their constructor to register any child
+ * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
+ * transforms have been added.
+ * This is for processors whose shader code will be composed of nested processors whose output
+ * colors will be combined somehow to produce its output color. Registering these child
+ * processors will allow the ProgramBuilder to automatically handle their transformed coords and
+ * texture accesses and mangle their uniform and output color names.
+ */
+ int registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child);
+
+ void setTextureSamplerCnt(int cnt) {
+ SkASSERT(cnt >= 0);
+ fTextureSamplerCnt = cnt;
+ }
+
+ /**
+ * Helper for implementing onTextureSampler(). E.g.:
+ * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
+ */
+ template <typename... Args>
+ static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
+ const Args&... samps) {
+ return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
+ }
+ inline static const TextureSampler& IthTextureSampler(int i);
+
+private:
+ virtual SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& /* inputColor */) const {
+ SK_ABORT("Subclass must override this if advertising this optimization.");
+ }
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrFragmentProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
+
+ /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
+ virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
+
+ /**
+ * Subclass implements this to support isEqual(). It will only be called if it is known that
+ * the two processors are of the same subclass (i.e. they return the same object from
+ * getFactory()). The processor subclass should not compare its coord transforms as that will
+ * be performed automatically in the non-virtual isEqual().
+ */
+ virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
+
+ virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
+
+ bool hasSameTransforms(const GrFragmentProcessor&) const;
+
+ enum PrivateFlags {
+ kFirstPrivateFlag = kAll_OptimizationFlags + 1,
+ kUsesLocalCoords_Flag = kFirstPrivateFlag,
+ kComputeLocalCoordsInVertexShader_Flag = kFirstPrivateFlag << 1,
+ };
+
+ mutable uint32_t fFlags = kComputeLocalCoordsInVertexShader_Flag;
+
+ int fTextureSamplerCnt = 0;
+
+ SkSTArray<4, GrCoordTransform*, true> fCoordTransforms;
+
+ SkSTArray<1, std::unique_ptr<GrFragmentProcessor>, true> fChildProcessors;
+
+ typedef GrProcessor INHERITED;
+};
+
+/**
+ * Used to represent a texture that is required by a GrFragmentProcessor. It holds a GrTextureProxy
+ * along with an associated GrSamplerState. TextureSamplers don't perform any coord manipulation to
+ * account for texture origin.
+ */
+class GrFragmentProcessor::TextureSampler {
+public:
+ TextureSampler() = default;
+
+ /**
+ * This copy constructor is used by GrFragmentProcessor::clone() implementations.
+ */
+ explicit TextureSampler(const TextureSampler& that)
+ : fProxy(that.fProxy)
+ , fSamplerState(that.fSamplerState) {}
+
+ TextureSampler(sk_sp<GrTextureProxy>, const GrSamplerState& = GrSamplerState::ClampNearest());
+
+ TextureSampler& operator=(const TextureSampler&) = delete;
+
+ void reset(sk_sp<GrTextureProxy>, const GrSamplerState&);
+
+ bool operator==(const TextureSampler& that) const {
+ return this->proxy()->underlyingUniqueID() == that.proxy()->underlyingUniqueID() &&
+ fSamplerState == that.fSamplerState;
+ }
+
+ bool operator!=(const TextureSampler& other) const { return !(*this == other); }
+
+ SkDEBUGCODE(bool isInstantiated() const { return fProxy->isInstantiated(); })
+
+ // 'peekTexture' should only ever be called after a successful 'instantiate' call
+ GrTexture* peekTexture() const {
+ SkASSERT(fProxy->isInstantiated());
+ return fProxy->peekTexture();
+ }
+
+ GrTextureProxy* proxy() const { return fProxy.get(); }
+ const GrSamplerState& samplerState() const { return fSamplerState; }
+ const GrSwizzle& swizzle() const { return this->proxy()->textureSwizzle(); }
+
+ bool isInitialized() const { return SkToBool(fProxy.get()); }
+
+private:
+ sk_sp<GrTextureProxy> fProxy;
+ GrSamplerState fSamplerState;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+const GrFragmentProcessor::TextureSampler& GrFragmentProcessor::IthTextureSampler(int i) {
+ SK_ABORT("Illegal texture sampler index");
+ static const TextureSampler kBogus;
+ return kBogus;
+}
+
+GR_MAKE_BITFIELD_OPS(GrFragmentProcessor::OptimizationFlags)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGeometryProcessor.h b/gfx/skia/skia/src/gpu/GrGeometryProcessor.h
new file mode 100644
index 0000000000..6f596a5ae4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGeometryProcessor.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGeometryProcessor_DEFINED
+#define GrGeometryProcessor_DEFINED
+
+#include "src/gpu/GrPrimitiveProcessor.h"
+
+/**
+ * A GrGeometryProcessor is a flexible method for rendering a primitive. The GrGeometryProcessor
+ * has complete control over vertex attributes and uniforms(aside from the render target) but it
+ * must obey the same contract as any GrPrimitiveProcessor, specifically it must emit a color and
+ * coverage into the fragment shader. Where this color and coverage come from is completely the
+ * responsibility of the GrGeometryProcessor.
+ */
+class GrGeometryProcessor : public GrPrimitiveProcessor {
+public:
+ GrGeometryProcessor(ClassID classID)
+ : INHERITED(classID)
+ , fWillUseGeoShader(false) {}
+
+ bool willUseGeoShader() const final { return fWillUseGeoShader; }
+
+protected:
+ void setWillUseGeoShader() { fWillUseGeoShader = true; }
+
+ // GPs that need to use either half-float or ubyte colors can just call this to get a correctly
+ // configured Attribute struct
+ static Attribute MakeColorAttribute(const char* name, bool wideColor) {
+ return { name,
+ wideColor ? kHalf4_GrVertexAttribType : kUByte4_norm_GrVertexAttribType,
+ kHalf4_GrSLType };
+ }
+
+private:
+ bool fWillUseGeoShader;
+
+ typedef GrPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGlyph.h b/gfx/skia/skia/src/gpu/GrGlyph.h
new file mode 100644
index 0000000000..96ea558150
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGlyph.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGlyph_DEFINED
+#define GrGlyph_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/geometry/GrRect.h"
+
+#include "include/core/SkPath.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/SkFixed.h"
+
+struct GrGlyph {
+ enum MaskStyle {
+ kCoverage_MaskStyle,
+ kDistance_MaskStyle
+ };
+
+ static GrMaskFormat FormatFromSkGlyph(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kSDF_Format:
+ // fall through to kA8 -- we store BW and SDF glyphs in our 8-bit cache
+ case SkMask::kA8_Format:
+ return kA8_GrMaskFormat;
+ case SkMask::k3D_Format:
+ return kA8_GrMaskFormat; // ignore the mul and add planes, just use the mask
+ case SkMask::kLCD16_Format:
+ return kA565_GrMaskFormat;
+ case SkMask::kARGB32_Format:
+ return kARGB_GrMaskFormat;
+ default:
+ SkDEBUGFAIL("unsupported SkMask::Format");
+ return kA8_GrMaskFormat;
+ }
+ }
+
+ static MaskStyle MaskStyleFromSkGlyph(const SkGlyph& skGlyph) {
+ return skGlyph.maskFormat() == SkMask::kSDF_Format
+ ? GrGlyph::MaskStyle::kDistance_MaskStyle
+ : GrGlyph::MaskStyle::kCoverage_MaskStyle;
+ }
+
+ GrGlyph(const SkGlyph& skGlyph)
+ : fPackedID{skGlyph.getPackedID()}
+ , fMaskFormat{FormatFromSkGlyph(skGlyph.maskFormat())}
+ , fMaskStyle{MaskStyleFromSkGlyph(skGlyph)}
+ , fBounds{GrIRect16::Make(skGlyph.iRect())} {}
+
+
+ SkRect destRect(SkPoint origin) {
+ return SkRect::MakeXYWH(
+ SkIntToScalar(fBounds.fLeft) + origin.x(),
+ SkIntToScalar(fBounds.fTop) + origin.y(),
+ SkIntToScalar(fBounds.width()),
+ SkIntToScalar(fBounds.height()));
+ }
+
+ SkRect destRect(SkPoint origin, SkScalar textScale) {
+ if (fMaskStyle == kCoverage_MaskStyle) {
+ return SkRect::MakeXYWH(
+ SkIntToScalar(fBounds.fLeft) * textScale + origin.x(),
+ SkIntToScalar(fBounds.fTop) * textScale + origin.y(),
+ SkIntToScalar(fBounds.width()) * textScale,
+ SkIntToScalar(fBounds.height()) * textScale);
+ } else {
+ return SkRect::MakeXYWH(
+ (SkIntToScalar(fBounds.fLeft) + SK_DistanceFieldInset) * textScale + origin.x(),
+ (SkIntToScalar(fBounds.fTop) + SK_DistanceFieldInset) * textScale + origin.y(),
+ (SkIntToScalar(fBounds.width()) - 2 * SK_DistanceFieldInset) * textScale,
+ (SkIntToScalar(fBounds.height()) - 2 * SK_DistanceFieldInset) * textScale);
+ }
+ }
+
+ int width() const { return fBounds.width(); }
+ int height() const { return fBounds.height(); }
+ uint32_t pageIndex() const { return GrDrawOpAtlas::GetPageIndexFromID(fID); }
+ MaskStyle maskStyle() const { return fMaskStyle; }
+
+ // GetKey and Hash for the the hash table.
+ static const SkPackedGlyphID& GetKey(const GrGlyph& glyph) {
+ return glyph.fPackedID;
+ }
+
+ static uint32_t Hash(SkPackedGlyphID key) {
+ return SkChecksum::Mix(key.hash());
+ }
+
+ const SkPackedGlyphID fPackedID;
+ const GrMaskFormat fMaskFormat;
+ const MaskStyle fMaskStyle;
+ const GrIRect16 fBounds;
+ SkIPoint16 fAtlasLocation{0, 0};
+ GrDrawOpAtlas::AtlasID fID{GrDrawOpAtlas::kInvalidAtlasID};
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpu.cpp b/gfx/skia/skia/src/gpu/GrGpu.cpp
new file mode 100644
index 0000000000..40be00406a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpu.cpp
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrGpu.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrNativeRect.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/GrTracing.h"
+#include "src/utils/SkJSONWriter.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrGpu::GrGpu(GrContext* context) : fResetBits(kAll_GrBackendState), fContext(context) {}
+
+GrGpu::~GrGpu() {}
+
+void GrGpu::disconnect(DisconnectType) {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrGpu::IsACopyNeededForRepeatWrapMode(const GrCaps* caps, GrTextureProxy* texProxy,
+ int width, int height,
+ GrSamplerState::Filter filter,
+ GrTextureProducer::CopyParams* copyParams,
+ SkScalar scaleAdjust[2]) {
+ if (!caps->npotTextureTileSupport() &&
+ (!SkIsPow2(width) || !SkIsPow2(height))) {
+ SkASSERT(scaleAdjust);
+ copyParams->fWidth = GrNextPow2(width);
+ copyParams->fHeight = GrNextPow2(height);
+ SkASSERT(scaleAdjust);
+ scaleAdjust[0] = ((SkScalar)copyParams->fWidth) / width;
+ scaleAdjust[1] = ((SkScalar)copyParams->fHeight) / height;
+ switch (filter) {
+ case GrSamplerState::Filter::kNearest:
+ copyParams->fFilter = GrSamplerState::Filter::kNearest;
+ break;
+ case GrSamplerState::Filter::kBilerp:
+ case GrSamplerState::Filter::kMipMap:
+ // We are only ever scaling up so no reason to ever indicate kMipMap.
+ copyParams->fFilter = GrSamplerState::Filter::kBilerp;
+ break;
+ }
+ return true;
+ }
+
+ if (texProxy) {
+ // If the texture format itself doesn't support repeat wrap mode or mipmapping (and
+ // those capabilities are required) force a copy.
+ if (texProxy->hasRestrictedSampling()) {
+ copyParams->fFilter = GrSamplerState::Filter::kNearest;
+ copyParams->fWidth = texProxy->width();
+ copyParams->fHeight = texProxy->height();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
+ GrSamplerState::Filter filter,
+ GrTextureProducer::CopyParams* copyParams) {
+ SkASSERT(texProxy);
+ bool willNeedMips = GrSamplerState::Filter::kMipMap == filter && caps->mipMapSupport();
+ // If the texture format itself doesn't support mipmapping (and those capabilities are required)
+ // force a copy.
+ if (willNeedMips && texProxy->mipMapped() == GrMipMapped::kNo) {
+ copyParams->fFilter = GrSamplerState::Filter::kNearest;
+ copyParams->fWidth = texProxy->width();
+ copyParams->fHeight = texProxy->height();
+ return true;
+ }
+
+ return false;
+}
+
+static bool validate_texel_levels(int w, int h, GrColorType texelColorType,
+ const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
+ SkASSERT(mipLevelCount > 0);
+ bool hasBasePixels = texels[0].fPixels;
+ int levelsWithPixelsCnt = 0;
+ auto bpp = GrColorTypeBytesPerPixel(texelColorType);
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
+ if (texels[currentMipLevel].fPixels) {
+ const size_t minRowBytes = w * bpp;
+ if (caps->writePixelsRowBytesSupport()) {
+ if (texels[currentMipLevel].fRowBytes < minRowBytes) {
+ return false;
+ }
+ if (texels[currentMipLevel].fRowBytes % bpp) {
+ return false;
+ }
+ } else {
+ if (texels[currentMipLevel].fRowBytes != minRowBytes) {
+ return false;
+ }
+ }
+ ++levelsWithPixelsCnt;
+ }
+ if (w == 1 && h == 1) {
+ if (currentMipLevel != mipLevelCount - 1) {
+ return false;
+ }
+ } else {
+ w = std::max(w / 2, 1);
+ h = std::max(h / 2, 1);
+ }
+ }
+ // Either just a base layer or a full stack is required.
+ if (mipLevelCount != 1 && (w != 1 || h != 1)) {
+ return false;
+ }
+ // Can specify just the base, all levels, or no levels.
+ if (!hasBasePixels) {
+ return levelsWithPixelsCnt == 0;
+ }
+ return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
+}
+
+sk_sp<GrTexture> GrGpu::createTextureCommon(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ if (this->caps()->isFormatCompressed(format)) {
+ // Call GrGpu::createCompressedTexture.
+ return nullptr;
+ }
+
+ GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
+ if (!this->caps()->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
+ renderable, renderTargetSampleCnt, mipMapped)) {
+ return nullptr;
+ }
+
+ if (renderable == GrRenderable::kYes) {
+ renderTargetSampleCnt =
+ this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
+ }
+ // Attempt to catch un- or wrongly initialized sample counts.
+ SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
+ this->handleDirtyContext();
+ auto tex = this->onCreateTexture(desc,
+ format,
+ renderable,
+ renderTargetSampleCnt,
+ budgeted,
+ isProtected,
+ mipLevelCount,
+ levelClearMask);
+ if (tex) {
+ SkASSERT(tex->backendFormat() == format);
+ SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
+ if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
+ tex->resourcePriv().removeScratchKey();
+ }
+ fStats.incTextureCreates();
+ if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
+ SkASSERT(GrRenderable::kYes == renderable);
+ tex->asRenderTarget()->setRequiresManualMSAAResolve();
+ }
+ }
+ return tex;
+}
+
+sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrMipMapped mipMapped,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ int mipLevelCount = 1;
+ if (mipMapped == GrMipMapped::kYes) {
+ mipLevelCount = 32 - SkCLZ(static_cast<uint32_t>(SkTMax(desc.fWidth, desc.fHeight)));
+ }
+ uint32_t levelClearMask =
+ this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
+ auto tex = this->createTextureCommon(desc, format, renderable, renderTargetSampleCnt, budgeted,
+ isProtected, mipLevelCount, levelClearMask);
+ if (tex && mipMapped == GrMipMapped::kYes && levelClearMask) {
+ tex->texturePriv().markMipMapsClean();
+ }
+ return tex;
+}
+
+sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrColorType textureColorType,
+ GrColorType srcColorType,
+ const GrMipLevel texels[],
+ int texelLevelCount) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ if (texelLevelCount) {
+ if (!validate_texel_levels(desc.fWidth, desc.fHeight, srcColorType, texels, texelLevelCount,
+ this->caps())) {
+ return nullptr;
+ }
+ }
+
+ int mipLevelCount = SkTMax(1, texelLevelCount);
+ uint32_t levelClearMask = 0;
+ if (this->caps()->shouldInitializeTextures()) {
+ if (texelLevelCount) {
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (!texels->fPixels) {
+ levelClearMask |= static_cast<uint32_t>(1 << i);
+ }
+ }
+ } else {
+ levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
+ }
+ }
+
+ auto tex = this->createTextureCommon(desc, format, renderable, renderTargetSampleCnt, budgeted,
+ isProtected, texelLevelCount, levelClearMask);
+ if (tex) {
+ bool markMipLevelsClean = false;
+ // Currently if level 0 does not have pixels then no other level may, as enforced by
+ // validate_texel_levels.
+ if (texelLevelCount && texels[0].fPixels) {
+ if (!this->writePixels(tex.get(), 0, 0, desc.fWidth, desc.fHeight, textureColorType,
+ srcColorType, texels, texelLevelCount)) {
+ return nullptr;
+ }
+ // Currently if level[1] of mip map has pixel data then so must all other levels.
+ // as enforced by validate_texel_levels.
+ markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
+ fStats.incTextureUploads();
+ } else if (levelClearMask && mipLevelCount > 1) {
+ markMipLevelsClean = true;
+ }
+ if (markMipLevelsClean) {
+ tex->texturePriv().markMipMapsClean();
+ }
+ }
+ return tex;
+}
+
+sk_sp<GrTexture> GrGpu::createCompressedTexture(int width, int height,
+ const GrBackendFormat& format,
+ SkImage::CompressionType compressionType,
+ SkBudgeted budgeted, const void* data,
+ size_t dataSize) {
+ // If we ever add a new CompressionType, we should add a check here to make sure the
+ // GrBackendFormat and CompressionType are compatible with eachother.
+ SkASSERT(compressionType == SkImage::kETC1_CompressionType);
+
+ this->handleDirtyContext();
+ if (width < 1 || width > this->caps()->maxTextureSize() ||
+ height < 1 || height > this->caps()->maxTextureSize()) {
+ return nullptr;
+ }
+ // Note if we relax the requirement that data must be provided then we must check
+ // caps()->shouldInitializeTextures() here.
+ if (!data) {
+ return nullptr;
+ }
+ if (!this->caps()->isFormatTexturable(format)) {
+ return nullptr;
+ }
+ if (dataSize < GrCompressedDataSize(compressionType, width, height)) {
+ return nullptr;
+ }
+ return this->onCreateCompressedTexture(width, height, format, compressionType, budgeted, data);
+}
+
+sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType colorType,
+ GrWrapOwnership ownership, GrWrapCacheable cacheable,
+ GrIOType ioType) {
+ SkASSERT(ioType != kWrite_GrIOType);
+ this->handleDirtyContext();
+
+ const GrCaps* caps = this->caps();
+ SkASSERT(caps);
+
+ if (!caps->isFormatTexturable(backendTex.getBackendFormat())) {
+ return nullptr;
+ }
+ if (backendTex.width() > caps->maxTextureSize() ||
+ backendTex.height() > caps->maxTextureSize()) {
+ return nullptr;
+ }
+
+ return this->onWrapBackendTexture(backendTex, colorType, ownership, cacheable, ioType);
+}
+
+sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
+ int sampleCnt, GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable) {
+ this->handleDirtyContext();
+ if (sampleCnt < 1) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = this->caps();
+
+ if (!caps->isFormatTexturable(backendTex.getBackendFormat()) ||
+ !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
+ return nullptr;
+ }
+
+ if (backendTex.width() > caps->maxRenderTargetSize() ||
+ backendTex.height() > caps->maxRenderTargetSize()) {
+ return nullptr;
+ }
+ sk_sp<GrTexture> tex = this->onWrapRenderableBackendTexture(backendTex, sampleCnt, colorType,
+ ownership, cacheable);
+ SkASSERT(!tex || tex->asRenderTarget());
+ if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
+ tex->asRenderTarget()->setRequiresManualMSAAResolve();
+ }
+ return tex;
+}
+
+sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
+ GrColorType colorType) {
+ this->handleDirtyContext();
+
+ const GrCaps* caps = this->caps();
+
+ if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
+ return nullptr;
+ }
+
+ return this->onWrapBackendRenderTarget(backendRT, colorType);
+}
+
+sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex,
+ int sampleCnt,
+ GrColorType colorType) {
+ this->handleDirtyContext();
+
+ const GrCaps* caps = this->caps();
+
+ int maxSize = caps->maxTextureSize();
+ if (backendTex.width() > maxSize || backendTex.height() > maxSize) {
+ return nullptr;
+ }
+
+ if (!caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
+ return nullptr;
+ }
+
+ auto rt = this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt, colorType);
+ if (rt && sampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
+ rt->setRequiresManualMSAAResolve();
+ }
+ return rt;
+}
+
+sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
+ const GrVkDrawableInfo& vkInfo) {
+ return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
+}
+
+sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
+ const GrVkDrawableInfo& vkInfo) {
+ // This is only supported on Vulkan so we default to returning nullptr here
+ return nullptr;
+}
+
+sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ this->handleDirtyContext();
+ sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
+ if (!this->caps()->reuseScratchBuffers()) {
+ buffer->resourcePriv().removeScratchKey();
+ }
+ return buffer;
+}
+
+bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(dst && src);
+
+ if (dst->readOnly()) {
+ return false;
+ }
+
+ this->handleDirtyContext();
+
+ return this->onCopySurface(dst, src, srcRect, dstPoint);
+}
+
+bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(surface);
+ SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
+
+ auto subRect = SkIRect::MakeXYWH(left, top, width, height);
+ auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
+ if (!bounds.contains(subRect)) {
+ return false;
+ }
+
+ size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width);
+ if (!this->caps()->readPixelsRowBytesSupport()) {
+ if (rowBytes != minRowBytes) {
+ return false;
+ }
+ } else {
+ if (rowBytes < minRowBytes) {
+ return false;
+ }
+ if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
+ return false;
+ }
+ }
+
+ if (this->caps()->isFormatCompressed(surface->backendFormat())) {
+ return false;
+ }
+
+ this->handleDirtyContext();
+
+ return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType,
+ buffer, rowBytes);
+}
+
+bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(surface);
+ SkASSERT(this->caps()->isFormatTexturableAndUploadable(surfaceColorType,
+ surface->backendFormat()));
+
+ if (surface->readOnly()) {
+ return false;
+ }
+
+ if (mipLevelCount == 0) {
+ return false;
+ } else if (mipLevelCount == 1) {
+ // We require that if we are not mipped, then the write region is contained in the surface
+ auto subRect = SkIRect::MakeXYWH(left, top, width, height);
+ auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
+ if (!bounds.contains(subRect)) {
+ return false;
+ }
+ } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
+ // We require that if the texels are mipped, than the write region is the entire surface
+ return false;
+ }
+
+ if (!validate_texel_levels(width, height, srcColorType, texels, mipLevelCount, this->caps())) {
+ return false;
+ }
+
+ this->handleDirtyContext();
+ if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
+ texels, mipLevelCount, prepForTexSampling)) {
+ SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
+ this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
+ fStats.incTextureUploads();
+ return true;
+ }
+ return false;
+}
+
+bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(texture);
+ SkASSERT(transferBuffer);
+ SkASSERT(this->caps()->isFormatTexturableAndUploadable(textureColorType,
+ texture->backendFormat()));
+
+ if (texture->readOnly()) {
+ return false;
+ }
+
+ // We require that the write region is contained in the texture
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
+ if (!bounds.contains(subRect)) {
+ return false;
+ }
+
+ size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
+ if (this->caps()->writePixelsRowBytesSupport()) {
+ if (rowBytes < SkToSizeT(bpp * width)) {
+ return false;
+ }
+ if (rowBytes % bpp) {
+ return false;
+ }
+ } else {
+ if (rowBytes != SkToSizeT(bpp * width)) {
+ return false;
+ }
+ }
+
+ this->handleDirtyContext();
+ if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType,
+ bufferColorType, transferBuffer, offset, rowBytes)) {
+ SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
+ this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
+ fStats.incTransfersToTexture();
+
+ return true;
+ }
+ return false;
+}
+
+bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(surface);
+ SkASSERT(transferBuffer);
+ SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
+
+#ifdef SK_DEBUG
+ auto supportedRead = this->caps()->supportedReadPixelsColorType(
+ surfaceColorType, surface->backendFormat(), bufferColorType);
+ SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
+ SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
+#endif
+
+ // We require that the write region is contained in the texture
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
+ if (!bounds.contains(subRect)) {
+ return false;
+ }
+
+ this->handleDirtyContext();
+ if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
+ bufferColorType, transferBuffer, offset)) {
+ fStats.incTransfersFromSurface();
+ return true;
+ }
+ return false;
+}
+
+bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(texture);
+ SkASSERT(this->caps()->mipMapSupport());
+ SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
+ if (!texture->texturePriv().mipMapsAreDirty()) {
+ // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
+ // actual target. This may be caused by things that the drawingManager could not predict,
+ // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
+ // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
+ return true;
+ }
+ if (texture->readOnly()) {
+ return false;
+ }
+ if (this->onRegenerateMipMapLevels(texture)) {
+ texture->texturePriv().markMipMapsClean();
+ return true;
+ }
+ return false;
+}
+
+void GrGpu::resetTextureBindings() {
+ this->handleDirtyContext();
+ this->onResetTextureBindings();
+}
+
+void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin origin, ForExternalIO forExternalIO) {
+ SkASSERT(target);
+ this->handleDirtyContext();
+ this->onResolveRenderTarget(target, resolveRect, origin, forExternalIO);
+}
+
+void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
+ uint32_t mipLevels) const {
+ SkASSERT(surface);
+ SkASSERT(!surface->readOnly());
+ // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
+ if (nullptr == bounds || !bounds->isEmpty()) {
+ GrTexture* texture = surface->asTexture();
+ if (texture && 1 == mipLevels) {
+ texture->texturePriv().markMipMapsDirty();
+ }
+ }
+}
+
+int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) {
+ SkASSERT(this->caps()->sampleLocationsSupport());
+ SkASSERT(renderTarget->numSamples() > 1 ||
+ (renderTarget->renderTargetPriv().getStencilAttachment() &&
+ renderTarget->renderTargetPriv().getStencilAttachment()->numSamples() > 1));
+
+ SkSTArray<16, SkPoint> sampleLocations;
+ this->querySampleLocations(renderTarget, &sampleLocations);
+ return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations);
+}
+
+GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
+ int n,
+ SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info,
+ const GrPrepareForExternalIORequests& externalRequests) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ this->stats()->incNumFinishFlushes();
+ GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
+
+ if (this->caps()->semaphoreSupport()) {
+ for (int i = 0; i < info.fNumSemaphores; ++i) {
+ sk_sp<GrSemaphore> semaphore;
+ if (info.fSignalSemaphores[i].isInitialized()) {
+ semaphore = resourceProvider->wrapBackendSemaphore(
+ info.fSignalSemaphores[i],
+ GrResourceProvider::SemaphoreWrapType::kWillSignal,
+ kBorrow_GrWrapOwnership);
+ } else {
+ semaphore = resourceProvider->makeSemaphore(false);
+ }
+ this->insertSemaphore(semaphore);
+
+ if (!info.fSignalSemaphores[i].isInitialized()) {
+ info.fSignalSemaphores[i] = semaphore->backendSemaphore();
+ }
+ }
+ }
+ this->onFinishFlush(proxies, n, access, info, externalRequests);
+ return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
+ : GrSemaphoresSubmitted::kNo;
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+void GrGpu::dumpJSON(SkJSONWriter* writer) const {
+ writer->beginObject();
+
+ // TODO: Is there anything useful in the base class to dump here?
+
+ this->onDumpJSON(writer);
+
+ writer->endObject();
+}
+#else
+void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
+#endif
+
+#if GR_TEST_UTILS
+
+#if GR_GPU_STATS
+void GrGpu::Stats::dump(SkString* out) {
+ out->appendf("Render Target Binds: %d\n", fRenderTargetBinds);
+ out->appendf("Shader Compilations: %d\n", fShaderCompilations);
+ out->appendf("Textures Created: %d\n", fTextureCreates);
+ out->appendf("Texture Uploads: %d\n", fTextureUploads);
+ out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
+ out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
+ out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
+ out->appendf("Number of draws: %d\n", fNumDraws);
+ out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
+}
+
+void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
+ keys->push_back(SkString("render_target_binds")); values->push_back(fRenderTargetBinds);
+ keys->push_back(SkString("shader_compilations")); values->push_back(fShaderCompilations);
+}
+
+#endif // GR_GPU_STATS
+#endif // GR_TEST_UTILS
+
+
+bool GrGpu::MipMapsAreCorrect(int baseWidth, int baseHeight, GrMipMapped mipMapped,
+ const SkPixmap srcData[], int numMipLevels) {
+ if (!srcData) {
+ return true;
+ }
+
+ if (baseWidth != srcData[0].width() || baseHeight != srcData[0].height()) {
+ return false;
+ }
+
+ if (mipMapped == GrMipMapped::kYes) {
+ if (numMipLevels != SkMipMap::ComputeLevelCount(baseWidth, baseHeight) + 1) {
+ return false;
+ }
+
+ SkColorType colorType = srcData[0].colorType();
+
+ int currentWidth = baseWidth;
+ int currentHeight = baseHeight;
+ for (int i = 1; i < numMipLevels; ++i) {
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+
+ if (srcData[i].colorType() != colorType) { // all levels must have same colorType
+ return false;
+ }
+
+ if (srcData[i].width() != currentWidth || srcData[i].height() != currentHeight) {
+ return false;
+ }
+ }
+ } else if (numMipLevels != 1) {
+ return false;
+ }
+
+ return true;
+}
+
+GrBackendTexture GrGpu::createBackendTexture(int w, int h, const GrBackendFormat& format,
+ GrMipMapped mipMapped, GrRenderable renderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected isProtected) {
+ const GrCaps* caps = this->caps();
+
+ if (!format.isValid()) {
+ return {};
+ }
+
+ if (caps->isFormatCompressed(format)) {
+ // Compressed formats must go through the createCompressedBackendTexture API
+ return {};
+ }
+
+ if (w < 1 || w > caps->maxTextureSize() || h < 1 || h > caps->maxTextureSize()) {
+ return {};
+ }
+
+ // TODO: maybe just ignore the mipMapped parameter in this case
+ if (mipMapped == GrMipMapped::kYes && !this->caps()->mipMapSupport()) {
+ return {};
+ }
+
+ if (!MipMapsAreCorrect(w, h, mipMapped, srcData, numMipLevels)) {
+ return {};
+ }
+
+ return this->onCreateBackendTexture(w, h, format, mipMapped, renderable,
+ srcData, numMipLevels, color, isProtected);
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpu.h b/gfx/skia/skia/src/gpu/GrGpu.h
new file mode 100644
index 0000000000..580345778e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpu.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpu_DEFINED
+#define GrGpu_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkSurface.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/GrSamplePatternDictionary.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/GrTextureProducer.h"
+#include "src/gpu/GrXferProcessor.h"
+#include <map>
+
+class GrBackendRenderTarget;
+class GrBackendSemaphore;
+class GrGpuBuffer;
+class GrContext;
+struct GrContextOptions;
+class GrGLContext;
+class GrMesh;
+class GrPath;
+class GrPathRenderer;
+class GrPathRendererChain;
+class GrPathRendering;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrRenderTarget;
+class GrSemaphore;
+class GrStencilAttachment;
+class GrStencilSettings;
+class GrSurface;
+class GrTexture;
+class SkJSONWriter;
+
+class GrGpu : public SkRefCnt {
+public:
+ GrGpu(GrContext* context);
+ ~GrGpu() override;
+
+ GrContext* getContext() { return fContext; }
+ const GrContext* getContext() const { return fContext; }
+
+ /**
+ * Gets the capabilities of the draw target.
+ */
+ const GrCaps* caps() const { return fCaps.get(); }
+ sk_sp<const GrCaps> refCaps() const { return fCaps; }
+
+ GrPathRendering* pathRendering() { return fPathRendering.get(); }
+
+ enum class DisconnectType {
+ // No cleanup should be attempted, immediately cease making backend API calls
+ kAbandon,
+ // Free allocated resources (not known by GrResourceCache) before returning and
+ // ensure no backend backend 3D API calls will be made after disconnect() returns.
+ kCleanup,
+ };
+
+ // Called by GrContext when the underlying backend context is already or will be destroyed
+ // before GrContext.
+ virtual void disconnect(DisconnectType);
+
+ /**
+ * The GrGpu object normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the GrGpu that the state was modified and it shouldn't make assumptions
+ * about the state.
+ */
+ void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
+
+ /**
+ * Creates a texture object. If renderable is kYes then the returned texture can
+ * be used as a render target by calling GrTexture::asRenderTarget(). Not all
+ * pixel configs can be used as render targets. Support for configs as textures
+ * or render targets can be checked using GrCaps.
+ *
+ * @param desc describes the texture to be created.
+ * @param format the format for the texture (not currently used).
+ * @param renderable should the resulting texture be renderable
+ * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
+ * kYes. If renderable is kNo then this must be 1.
+ * @param budgeted does this texture count against the resource cache budget?
+ * @param isProtected should the texture be created as protected.
+ * @param texels array of mipmap levels containing texel data to load.
+ * If level i has pixels then it is assumed that its dimensions are
+ * max(1, floor(desc.fWidth / 2)) by max(1, floor(desc.fHeight / 2)).
+ * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
+ * mipLevelCount is 0 then the texture's contents are uninitialized.
+ * If a level has non-null pixels, its row bytes must be a multiple of the
+ * config's bytes-per-pixel. The row bytes must be tight to the
+ * level width if !caps->writePixelsRowBytesSupport().
+ * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
+ * then all levels must have non-null pixels. All levels must have
+ * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
+ * @param textureColorType The color type interpretation of the texture for the purpose of
+ * of uploading texel data.
+ * @param srcColorType The color type of data in texels[].
+ * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
+ * floor(max((log2(desc.fWidth), log2(desc.fHeight)))). It must be the
+ * latter if GrCaps::createTextureMustSpecifyAllLevels() is true.
+ * @return The texture object if successful, otherwise nullptr.
+ */
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected isProtected,
+ GrColorType textureColorType,
+ GrColorType srcColorType,
+ const GrMipLevel texels[],
+ int texelLevelCount);
+
+ /**
+ * Simplified createTexture() interface for when there is no initial texel data to upload.
+ */
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrMipMapped,
+ SkBudgeted budgeted,
+ GrProtected isProtected);
+
+ sk_sp<GrTexture> createCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted, const void* data,
+ size_t dataSize);
+
+ /**
+ * Implements GrResourceProvider::wrapBackendTexture
+ */
+ sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&, GrColorType,
+ GrWrapOwnership, GrWrapCacheable, GrIOType);
+
+ /**
+ * Implements GrResourceProvider::wrapRenderableBackendTexture
+ */
+ sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt,
+ GrColorType, GrWrapOwnership, GrWrapCacheable);
+
+ /**
+ * Implements GrResourceProvider::wrapBackendRenderTarget
+ */
+ sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType colorType);
+
+ /**
+ * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget
+ */
+ sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType colorType);
+
+ /**
+ * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
+ */
+ sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
+ const GrVkDrawableInfo&);
+
+ /**
+ * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
+ *
+ * @param size size of buffer to create.
+ * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
+ * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
+ * @param data optional data with which to initialize the buffer.
+ *
+ * @return the buffer if successful, otherwise nullptr.
+ */
+ sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data = nullptr);
+
+ enum class ForExternalIO : bool {
+ kYes = true,
+ kNo = false
+ };
+
+ /**
+ * Resolves MSAA.
+ */
+ void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect, GrSurfaceOrigin,
+ ForExternalIO);
+
+ /**
+ * Uses the base of the texture to recompute the contents of the other levels.
+ */
+ bool regenerateMipMapLevels(GrTexture*);
+
+ /**
+ * If the backend API has stateful texture bindings, this resets them back to defaults.
+ */
+ void resetTextureBindings();
+
+ /**
+ * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
+ *
+ * @param surface The surface to read from
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param surfaceColorType the color type for this use of the surface.
+ * @param dstColorType the color type of the destination buffer.
+ * @param buffer memory to read the rectangle into.
+ * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
+ * dstColorType's bytes-per-pixel. Must be tight to width if
+ * !caps->readPixelsRowBytesSupport().
+ *
+ * @return true if the read succeeded, false if not. The read can fail
+ * because of the surface doesn't support reading, the color type
+ * is not allowed for the format of the surface or if the rectangle
+ * read is not contained in the surface.
+ */
+ bool readPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes);
+
+ /**
+ * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
+ *
+ * @param surface The surface to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param surfaceColorType the color type for this use of the surface.
+ * @param srcColorType the color type of the source buffer.
+ * @param texels array of mipmap levels containing texture data. Row bytes must be a
+ * multiple of srcColorType's bytes-per-pixel. Must be tight to level
+ * width if !caps->writePixelsRowBytesSupport().
+ * @param mipLevelCount number of levels in 'texels'
+ * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
+ * sampling. This is currently only used by Vulkan for inline uploads
+ * to set that layout back to sampled after doing the upload. Inline
+ * uploads currently can happen between draws in a single op so it is
+ * not trivial to break up the GrOpsTask into two tasks when we see
+ * an inline upload. However, once we are able to support doing that
+ * we can remove this parameter.
+ *
+ * @return true if the write succeeded, false if not. The read can fail
+ * because of the surface doesn't support writing (e.g. read only),
+ * the color type is not allowed for the format of the surface or
+ * if the rectangle written is not contained in the surface.
+ */
+ bool writePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling = false);
+
+ /**
+ * Helper for the case of a single level.
+ */
+ bool writePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType, const void* buffer,
+ size_t rowBytes, bool prepForTexSampling = false) {
+ GrMipLevel mipLevel = {buffer, rowBytes};
+ return this->writePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
+ &mipLevel, 1, prepForTexSampling);
+ }
+
+ /**
+ * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
+ * the base level is written to.
+ *
+ * @param texture The texture to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param textureColorType the color type for this use of the surface.
+ * @param bufferColorType the color type of the transfer buffer's pixel data
+ * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
+ * @param offset offset from the start of the buffer
+ * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
+ * multiple of bufferColorType's bytes-per-pixel. Must be tight to width
+ * if !caps->writePixelsRowBytesSupport().
+ */
+ bool transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes);
+
+ /**
+ * Reads the pixels from a rectangle of a surface into a buffer. Use
+ * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
+ * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
+ *
+ * If successful the row bytes in the buffer is always:
+ * GrColorTypeBytesPerPixel(bufferColorType) * width
+ *
+ * Asserts that the caller has passed a properly aligned offset and that the buffer is
+ * large enough to hold the result
+ *
+ * @param surface The surface to read from.
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param surfaceColorType the color type for this use of the surface.
+ * @param bufferColorType the color type of the transfer buffer's pixel data
+ * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
+ * @param offset offset from the start of the buffer
+ */
+ bool transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset);
+
+ // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
+ // take place at higher levels and this function implement faster copy paths. The rect
+ // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
+ // src/dst bounds and non-empty. They must also be in their exact device space coords, including
+ // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
+ // then we don't need to preserve any data on the dst surface outside of the copy.
+ bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ // Queries the per-pixel HW sample locations for the given render target, and then finds or
+ // assigns a key that uniquely identifies the sample pattern. The actual sample locations can be
+ // retrieved with retrieveSampleLocations().
+ int findOrAssignSamplePatternKey(GrRenderTarget*);
+
+ // Retrieves the per-pixel HW sample locations for the given sample pattern key, and, as a
+ // by-product, the actual number of samples in use. (This may differ from the number of samples
+ // requested by the render target.) Sample locations are returned as 0..1 offsets relative to
+ // the top-left corner of the pixel.
+ const SkTArray<SkPoint>& retrieveSampleLocations(int samplePatternKey) const {
+ return fSamplePatternDictionary.retrieveSampleLocations(samplePatternKey);
+ }
+
+ // Returns a GrOpsRenderPass which GrOpsTasks send draw commands to instead of directly
+ // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
+ virtual GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget* renderTarget, GrSurfaceOrigin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) = 0;
+
+ // Called by GrDrawingManager when flushing.
+ // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
+ // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
+ // inserted semaphores.
+ GrSemaphoresSubmitted finishFlush(GrSurfaceProxy*[], int n,
+ SkSurface::BackendSurfaceAccess access, const GrFlushInfo&,
+ const GrPrepareForExternalIORequests&);
+
+ virtual void submit(GrOpsRenderPass*) = 0;
+
+ virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
+ virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0;
+ virtual void deleteFence(GrFence) const = 0;
+
+ virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) = 0;
+ virtual sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) = 0;
+ virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
+ virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
+
+ virtual void checkFinishProcs() = 0;
+
+ /**
+ * Put this texture in a safe and known state for use across multiple GrContexts. Depending on
+ * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
+ * semaphore before using this texture.
+ */
+ virtual sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Debugging and Stats
+
+ class Stats {
+ public:
+#if GR_GPU_STATS
+ Stats() = default;
+
+ void reset() { *this = {}; }
+
+ int renderTargetBinds() const { return fRenderTargetBinds; }
+ void incRenderTargetBinds() { fRenderTargetBinds++; }
+
+ int shaderCompilations() const { return fShaderCompilations; }
+ void incShaderCompilations() { fShaderCompilations++; }
+
+ int textureCreates() const { return fTextureCreates; }
+ void incTextureCreates() { fTextureCreates++; }
+
+ int textureUploads() const { return fTextureUploads; }
+ void incTextureUploads() { fTextureUploads++; }
+
+ int transfersToTexture() const { return fTransfersToTexture; }
+ void incTransfersToTexture() { fTransfersToTexture++; }
+
+ int transfersFromSurface() const { return fTransfersFromSurface; }
+ void incTransfersFromSurface() { fTransfersFromSurface++; }
+
+ int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
+ void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
+
+ int numDraws() const { return fNumDraws; }
+ void incNumDraws() { fNumDraws++; }
+
+ int numFailedDraws() const { return fNumFailedDraws; }
+ void incNumFailedDraws() { ++fNumFailedDraws; }
+
+ int numFinishFlushes() const { return fNumFinishFlushes; }
+ void incNumFinishFlushes() { ++fNumFinishFlushes; }
+
+ int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
+ void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
+
+#if GR_TEST_UTILS
+ void dump(SkString*);
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
+#endif
+ private:
+ int fRenderTargetBinds = 0;
+ int fShaderCompilations = 0;
+ int fTextureCreates = 0;
+ int fTextureUploads = 0;
+ int fTransfersToTexture = 0;
+ int fTransfersFromSurface = 0;
+ int fStencilAttachmentCreates = 0;
+ int fNumDraws = 0;
+ int fNumFailedDraws = 0;
+ int fNumFinishFlushes = 0;
+ int fNumScratchTexturesReused = 0;
+#else
+
+#if GR_TEST_UTILS
+ void dump(SkString*) {}
+ void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
+#endif
+ void incRenderTargetBinds() {}
+ void incShaderCompilations() {}
+ void incTextureCreates() {}
+ void incTextureUploads() {}
+ void incTransfersToTexture() {}
+ void incStencilAttachmentCreates() {}
+ void incNumDraws() {}
+ void incNumFailedDraws() {}
+ void incNumFinishFlushes() {}
+#endif
+ };
+
+ Stats* stats() { return &fStats; }
+ void dumpJSON(SkJSONWriter*) const;
+
+ /**
+ * Creates a texture directly in the backend API without wrapping it in a GrTexture.
+ * Must be matched with a call to deleteBackendTexture().
+ *
+ * If srcData is provided it will be used to initialize the texture. If srcData is
+ * not provided but a color is then it is used to initialize the texture. If neither
+ * srcData nor a color is provided then the texture is left uninitialized.
+ *
+ * If srcData is provided and mipMapped is kYes then data for all the miplevels must be
+ * provided (or the method will fail). If only a color is provided and mipMapped is kYes
+ * then all the mip levels will be allocated and initialized to the color. If neither
+ * srcData nor a color is provided but mipMapped is kYes then the mip levels will be allocated
+ * but left uninitialized.
+ *
+ * Note: if more than one pixmap is provided (i.e., for mipmap levels) they must all share
+ * the same SkColorType.
+ */
+ GrBackendTexture createBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected isProtected);
+
+ /**
+ * Frees a texture created by createBackendTexture(). If ownership of the backend
+ * texture has been transferred to a GrContext using adopt semantics this should not be called.
+ */
+ virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
+
+ virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
+
+#if GR_TEST_UTILS
+ /** Check a handle represents an actual texture in the backend API that has not been freed. */
+ virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
+
+ virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h,
+ GrColorType) = 0;
+
+ virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
+
+ // This is only to be used in GL-specific tests.
+ virtual const GrGLContext* glContextForTesting() const { return nullptr; }
+
+ // This is only to be used by testing code
+ virtual void resetShaderCacheForTesting() const {}
+
+ /**
+ * Flushes all work to the gpu and forces the GPU to wait until all the gpu work has completed.
+ * This is for testing purposes only.
+ */
+ virtual void testingOnly_flushGpuAndSync() = 0;
+
+ /**
+ * Inserted as a pair around a block of code to do a GPU frame capture.
+ * Currently only works with the Metal backend.
+ */
+ virtual void testingOnly_startCapture() {}
+ virtual void testingOnly_endCapture() {}
+#endif
+
+ // width and height may be larger than rt (if underlying API allows it).
+ // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
+ // the GrStencilAttachment.
+ virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(
+ const GrRenderTarget*, int width, int height, int numStencilSamples) = 0;
+
+ // Determines whether a texture will need to be rescaled in order to be used with the
+ // GrSamplerState.
+ static bool IsACopyNeededForRepeatWrapMode(const GrCaps*, GrTextureProxy* texProxy,
+ int width, int height,
+ GrSamplerState::Filter,
+ GrTextureProducer::CopyParams*,
+ SkScalar scaleAdjust[2]);
+
+ // Determines whether a texture will need to be copied because the draw requires mips but the
+ // texutre doesn't have any. This call should be only checked if IsACopyNeededForTextureParams
+ // fails. If the previous call succeeds, then a copy should be done using those params and the
+ // mip mapping requirements will be handled there.
+ static bool IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
+ GrSamplerState::Filter filter,
+ GrTextureProducer::CopyParams* copyParams);
+
+ void handleDirtyContext() {
+ if (fResetBits) {
+ this->resetContext();
+ }
+ }
+
+ /**
+ * Returns a key that represents the sampler that will be created for the passed in parameters.
+ * Currently this key is only used when we are building a vulkan pipeline with immutable
+ * samplers. In that case, we need our cache key to also contain this key.
+ *
+ * A return value of 0 indicates that the program/pipeline we are creating is not affected by
+ * the sampler.
+ */
+ virtual uint32_t getExtraSamplerKeyForProgram(const GrSamplerState&, const GrBackendFormat&) {
+ return 0;
+ }
+
+ virtual void storeVkPipelineCacheData() {}
+
+protected:
+ static bool MipMapsAreCorrect(int baseWidth, int baseHeight, GrMipMapped,
+ const SkPixmap srcData[], int numMipLevels);
+
+ // Handles cases where a surface will be updated without a call to flushRenderTarget.
+ void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
+ uint32_t mipLevels = 1) const;
+
+ Stats fStats;
+ std::unique_ptr<GrPathRendering> fPathRendering;
+ // Subclass must initialize this in its constructor.
+ sk_sp<const GrCaps> fCaps;
+
+private:
+ virtual GrBackendTexture onCreateBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected) = 0;
+
+ // called when the 3D context state is unknown. Subclass should emit any
+ // assumed 3D context state and dirty any state cache.
+ virtual void onResetContext(uint32_t resetBits) = 0;
+
+ // Implementation of resetTextureBindings.
+ virtual void onResetTextureBindings() {}
+
+ // Queries the effective number of samples in use by the hardware for the given render target,
+ // and queries the individual sample locations.
+ virtual void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) = 0;
+
+ // Called before certain draws in order to guarantee coherent results from dst reads.
+ virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
+
+ // overridden by backend-specific derived class to create objects.
+ // Texture size, renderablility, format support, sample count will have already been validated
+ // in base class before onCreateTexture is called.
+ // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
+ virtual sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected,
+ int mipLevelCoont,
+ uint32_t levelClearMask) = 0;
+ virtual sk_sp<GrTexture> onCreateCompressedTexture(int width, int height,
+ const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) = 0;
+ virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType,
+ GrWrapOwnership, GrWrapCacheable, GrIOType) = 0;
+ virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt,
+ GrColorType, GrWrapOwnership,
+ GrWrapCacheable) = 0;
+ virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) = 0;
+ virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType) = 0;
+ virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
+ const GrVkDrawableInfo&);
+
+ virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data) = 0;
+
+ // overridden by backend-specific derived class to perform the surface read
+ virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) = 0;
+
+ // overridden by backend-specific derived class to perform the surface write
+ virtual bool onWritePixels(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) = 0;
+
+ // overridden by backend-specific derived class to perform the texture transfer
+ virtual bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
+ GrColorType textiueColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset,
+ size_t rowBytes) = 0;
+ // overridden by backend-specific derived class to perform the surface transfer
+ virtual bool onTransferPixelsFrom(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) = 0;
+
+ // overridden by backend-specific derived class to perform the resolve
+ virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO) = 0;
+
+ // overridden by backend specific derived class to perform mip map level regeneration.
+ virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
+
+ // overridden by backend specific derived class to perform the copy surface
+ virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) = 0;
+
+ virtual void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo&, const GrPrepareForExternalIORequests&) = 0;
+
+#ifdef SK_ENABLE_DUMP_GPU
+ virtual void onDumpJSON(SkJSONWriter*) const {}
+#endif
+
+ sk_sp<GrTexture> createTextureCommon(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCnt,
+ uint32_t levelClearMask);
+
+ void resetContext() {
+ this->onResetContext(fResetBits);
+ fResetBits = 0;
+ }
+
+ uint32_t fResetBits;
+ // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
+ GrContext* fContext;
+ GrSamplePatternDictionary fSamplePatternDictionary;
+
+ friend class GrPathRendering;
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuBuffer.cpp b/gfx/skia/skia/src/gpu/GrGpuBuffer.cpp
new file mode 100644
index 0000000000..64e1167723
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuBuffer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrGpuBuffer.h"
+
+GrGpuBuffer::GrGpuBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
+ GrAccessPattern pattern)
+ : GrGpuResource(gpu)
+ , fMapPtr(nullptr)
+ , fSizeInBytes(sizeInBytes)
+ , fAccessPattern(pattern)
+ , fIntendedType(type) {}
+
+void* GrGpuBuffer::map() {
+ if (this->wasDestroyed()) {
+ return nullptr;
+ }
+ if (!fMapPtr) {
+ this->onMap();
+ }
+ return fMapPtr;
+}
+
+void GrGpuBuffer::unmap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ SkASSERT(fMapPtr);
+ this->onUnmap();
+ fMapPtr = nullptr;
+}
+
+bool GrGpuBuffer::isMapped() const { return SkToBool(fMapPtr); }
+
+bool GrGpuBuffer::updateData(const void* src, size_t srcSizeInBytes) {
+ SkASSERT(!this->isMapped());
+ SkASSERT(srcSizeInBytes <= fSizeInBytes);
+ if (this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
+ return false;
+ }
+ return this->onUpdateData(src, srcSizeInBytes);
+}
+
+void GrGpuBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
+ GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+ GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
+ // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
+ // a chunk of memory we can use/reuse for any type of data. We really only need to
+ // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
+ builder[0] = SkToU32(intendedType);
+ builder[1] = (uint32_t)size;
+ if (sizeof(size_t) > 4) {
+ builder[2] = (uint32_t)((uint64_t)size >> 32);
+ }
+}
+
+void GrGpuBuffer::computeScratchKey(GrScratchKey* key) const {
+ if (SkIsPow2(fSizeInBytes) && kDynamic_GrAccessPattern == fAccessPattern) {
+ ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpuBuffer.h b/gfx/skia/skia/src/gpu/GrGpuBuffer.h
new file mode 100644
index 0000000000..7e3561eb26
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuBuffer.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuBuffer_DEFINED
+#define GrGpuBuffer_DEFINED
+
+#include "include/gpu/GrGpuResource.h"
+#include "src/gpu/GrBuffer.h"
+
+class GrGpu;
+
+class GrGpuBuffer : public GrGpuResource, public GrBuffer {
+public:
+ /**
+ * Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
+ * "static" and "stream" patterns are disqualified by nature from being cached and reused.)
+ */
+ static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
+
+ GrAccessPattern accessPattern() const { return fAccessPattern; }
+
+ size_t size() const final { return fSizeInBytes; }
+
+ void ref() const final { GrGpuResource::ref(); }
+
+ void unref() const final { GrGpuResource::unref(); }
+
+ /**
+ * Maps the buffer to be read or written by the CPU.
+ *
+ * It is an error to draw from the buffer while it is mapped or transfer to/from the buffer. It
+ * may fail if the backend doesn't support mapping the buffer. Once a buffer is mapped,
+ * subsequent calls to map() trivially succeed. No matter how many times map() is called,
+ * umap() will unmap the buffer on the first call if it is mapped.
+ *
+ * If the buffer is of type GrGpuBufferType::kXferGpuToCpu then it is mapped for reading only.
+ * Otherwise it is mapped writing only. Writing to a buffer that is mapped for reading or vice
+ * versa produces undefined results. If the buffer is mapped for writing then then the buffer's
+ * previous contents are invalidated.
+ *
+ * @return a pointer to the data or nullptr if the map fails.
+ */
+ void* map();
+
+ /**
+ * Unmaps the buffer if it is mapped.
+ *
+ * The pointer returned by the previous map call will no longer be valid.
+ */
+ void unmap();
+
+ /**
+ * Queries whether the buffer has been mapped.
+ *
+ * @return true if the buffer is mapped, false otherwise.
+ */
+ bool isMapped() const;
+
+ bool isCpuBuffer() const final { return false; }
+
+ /**
+ * Updates the buffer data.
+ *
+ * The size of the buffer will be preserved. The src data will be
+ * placed at the beginning of the buffer and any remaining contents will
+ * be undefined. srcSizeInBytes must be <= to the buffer size.
+ *
+ * The buffer must not be mapped.
+ *
+ * Fails for GrGpuBufferType::kXferGpuToCpu.
+ *
+ * Note that buffer updates do not go through GrContext and therefore are
+ * not serialized with other operations.
+ *
+ * @return returns true if the update succeeds, false otherwise.
+ */
+ bool updateData(const void* src, size_t srcSizeInBytes);
+
+protected:
+ GrGpuBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern);
+ GrGpuBufferType intendedType() const { return fIntendedType; }
+
+ void* fMapPtr;
+
+private:
+ virtual void onMap() = 0;
+ virtual void onUnmap() = 0;
+ virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
+
+ size_t onGpuMemorySize() const override { return fSizeInBytes; }
+ const char* getResourceType() const override { return "Buffer Object"; }
+ void computeScratchKey(GrScratchKey* key) const override;
+
+ size_t fSizeInBytes;
+ GrAccessPattern fAccessPattern;
+ GrGpuBufferType fIntendedType;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuResource.cpp b/gfx/skia/skia/src/gpu/GrGpuResource.cpp
new file mode 100644
index 0000000000..71f92932fa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResource.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrGpuResource.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrResourceCache.h"
+#include <atomic>
+
+static inline GrResourceCache* get_resource_cache(GrGpu* gpu) {
+ SkASSERT(gpu);
+ SkASSERT(gpu->getContext());
+ SkASSERT(gpu->getContext()->priv().getResourceCache());
+ return gpu->getContext()->priv().getResourceCache();
+}
+
+GrGpuResource::GrGpuResource(GrGpu* gpu) : fGpu(gpu), fUniqueID(CreateUniqueID()) {
+ SkDEBUGCODE(fCacheArrayIndex = -1);
+}
+
+void GrGpuResource::registerWithCache(SkBudgeted budgeted) {
+ SkASSERT(fBudgetedType == GrBudgetedType::kUnbudgetedUncacheable);
+ fBudgetedType = budgeted == SkBudgeted::kYes ? GrBudgetedType::kBudgeted
+ : GrBudgetedType::kUnbudgetedUncacheable;
+ this->computeScratchKey(&fScratchKey);
+ get_resource_cache(fGpu)->resourceAccess().insertResource(this);
+}
+
+void GrGpuResource::registerWithCacheWrapped(GrWrapCacheable wrapType) {
+ SkASSERT(fBudgetedType == GrBudgetedType::kUnbudgetedUncacheable);
+ // Resources referencing wrapped objects are never budgeted. They may be cached or uncached.
+ fBudgetedType = wrapType == GrWrapCacheable::kNo ? GrBudgetedType::kUnbudgetedUncacheable
+ : GrBudgetedType::kUnbudgetedCacheable;
+ fRefsWrappedObjects = true;
+ get_resource_cache(fGpu)->resourceAccess().insertResource(this);
+}
+
+GrGpuResource::~GrGpuResource() {
+ // The cache should have released or destroyed this resource.
+ SkASSERT(this->wasDestroyed());
+}
+
+void GrGpuResource::release() {
+ SkASSERT(fGpu);
+ this->onRelease();
+ get_resource_cache(fGpu)->resourceAccess().removeResource(this);
+ fGpu = nullptr;
+ fGpuMemorySize = 0;
+}
+
+void GrGpuResource::abandon() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ SkASSERT(fGpu);
+ this->onAbandon();
+ get_resource_cache(fGpu)->resourceAccess().removeResource(this);
+ fGpu = nullptr;
+ fGpuMemorySize = 0;
+}
+
+void GrGpuResource::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ if (this->fRefsWrappedObjects && !traceMemoryDump->shouldDumpWrappedObjects()) {
+ return;
+ }
+
+ this->dumpMemoryStatisticsPriv(traceMemoryDump, this->getResourceName(),
+ this->getResourceType(), this->gpuMemorySize());
+}
+
+void GrGpuResource::dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& resourceName,
+ const char* type, size_t size) const {
+ const char* tag = "Scratch";
+ if (fUniqueKey.isValid()) {
+ tag = (fUniqueKey.tag() != nullptr) ? fUniqueKey.tag() : "Other";
+ }
+
+ traceMemoryDump->dumpNumericValue(resourceName.c_str(), "size", "bytes", size);
+ traceMemoryDump->dumpStringValue(resourceName.c_str(), "type", type);
+ traceMemoryDump->dumpStringValue(resourceName.c_str(), "category", tag);
+ if (this->isPurgeable()) {
+ traceMemoryDump->dumpNumericValue(resourceName.c_str(), "purgeable_size", "bytes", size);
+ }
+
+ this->setMemoryBacking(traceMemoryDump, resourceName);
+}
+
+bool GrGpuResource::isPurgeable() const {
+ // Resources in the kUnbudgetedCacheable state are never purgeable when they have a unique
+ // key. The key must be removed/invalidated to make them purgeable.
+ return !this->hasRef() &&
+ !(fBudgetedType == GrBudgetedType::kUnbudgetedCacheable && fUniqueKey.isValid());
+}
+
+bool GrGpuResource::hasRef() const { return this->internalHasRef(); }
+
+SkString GrGpuResource::getResourceName() const {
+ // Dump resource as "skia/gpu_resources/resource_#".
+ SkString resourceName("skia/gpu_resources/resource_");
+ resourceName.appendU32(this->uniqueID().asUInt());
+ return resourceName;
+}
+
+const GrContext* GrGpuResource::getContext() const {
+ if (fGpu) {
+ return fGpu->getContext();
+ } else {
+ return nullptr;
+ }
+}
+
+GrContext* GrGpuResource::getContext() {
+ if (fGpu) {
+ return fGpu->getContext();
+ } else {
+ return nullptr;
+ }
+}
+
+void GrGpuResource::removeUniqueKey() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ SkASSERT(fUniqueKey.isValid());
+ get_resource_cache(fGpu)->resourceAccess().removeUniqueKey(this);
+}
+
+void GrGpuResource::setUniqueKey(const GrUniqueKey& key) {
+ SkASSERT(this->internalHasRef());
+ SkASSERT(key.isValid());
+
+ // Uncached resources can never have a unique key, unless they're wrapped resources. Wrapped
+ // resources are a special case: the unique keys give us a weak ref so that we can reuse the
+ // same resource (rather than re-wrapping). When a wrapped resource is no longer referenced,
+ // it will always be released - it is never converted to a scratch resource.
+ if (this->resourcePriv().budgetedType() != GrBudgetedType::kBudgeted &&
+ !this->fRefsWrappedObjects) {
+ return;
+ }
+
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ get_resource_cache(fGpu)->resourceAccess().changeUniqueKey(this, key);
+}
+
+void GrGpuResource::notifyRefCntWillBeZero() const {
+ GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
+ mutableThis->willRemoveLastRef();
+}
+
+void GrGpuResource::notifyRefCntIsZero() const {
+ if (this->wasDestroyed()) {
+ // We've already been removed from the cache. Goodbye cruel world!
+ delete this;
+ return;
+ }
+
+ GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
+
+ get_resource_cache(fGpu)->resourceAccess().notifyRefCntReachedZero(mutableThis);
+}
+
+void GrGpuResource::removeScratchKey() {
+ if (!this->wasDestroyed() && fScratchKey.isValid()) {
+ get_resource_cache(fGpu)->resourceAccess().willRemoveScratchKey(this);
+ fScratchKey.reset();
+ }
+}
+
+void GrGpuResource::makeBudgeted() {
+ // We should never make a wrapped resource budgeted.
+ SkASSERT(!fRefsWrappedObjects);
+ // Only wrapped resources can be in the kUnbudgetedCacheable state.
+ SkASSERT(fBudgetedType != GrBudgetedType::kUnbudgetedCacheable);
+ if (!this->wasDestroyed() && fBudgetedType == GrBudgetedType::kUnbudgetedUncacheable) {
+ // Currently resources referencing wrapped objects are not budgeted.
+ fBudgetedType = GrBudgetedType::kBudgeted;
+ get_resource_cache(fGpu)->resourceAccess().didChangeBudgetStatus(this);
+ }
+}
+
+void GrGpuResource::makeUnbudgeted() {
+ if (!this->wasDestroyed() && fBudgetedType == GrBudgetedType::kBudgeted &&
+ !fUniqueKey.isValid()) {
+ fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
+ get_resource_cache(fGpu)->resourceAccess().didChangeBudgetStatus(this);
+ }
+}
+
+uint32_t GrGpuResource::CreateUniqueID() {
+ static std::atomic<uint32_t> nextID{1};
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidUniqueID);
+ return id;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrGpuResource::ProxyAccess::ref(GrResourceCache* cache) {
+ SkASSERT(cache == fResource->getContext()->priv().getResourceCache());
+ cache->resourceAccess().refResource(fResource);
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h b/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h
new file mode 100644
index 0000000000..b18949c262
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResourceCacheAccess_DEFINED
+#define GrGpuResourceCacheAccess_DEFINED
+
+#include "include/gpu/GrGpuResource.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+
+namespace skiatest {
+ class Reporter;
+}
+
+/**
+ * This class allows GrResourceCache increased privileged access to GrGpuResource objects.
+ */
+class GrGpuResource::CacheAccess {
+private:
+ /** The cache is allowed to go from no refs to 1 ref. */
+ void ref() { fResource->addInitialRef(); }
+
+ /**
+ * Is the resource currently cached as scratch? This means it is cached, has a valid scratch
+ * key, and does not have a unique key.
+ */
+ bool isScratch() const {
+ return !fResource->getUniqueKey().isValid() && fResource->fScratchKey.isValid() &&
+ GrBudgetedType::kBudgeted == fResource->resourcePriv().budgetedType();
+ }
+
+ /**
+ * Called by the cache to delete the resource under normal circumstances.
+ */
+ void release() {
+ fResource->release();
+ if (!fResource->hasRef()) {
+ delete fResource;
+ }
+ }
+
+ /**
+ * Called by the cache to delete the resource when the backend 3D context is no longer valid.
+ */
+ void abandon() {
+ fResource->abandon();
+ if (!fResource->hasRef()) {
+ delete fResource;
+ }
+ }
+
+ /** Called by the cache to assign a new unique key. */
+ void setUniqueKey(const GrUniqueKey& key) { fResource->fUniqueKey = key; }
+
+ /** Is the resource ref'ed */
+ bool hasRef() const { return fResource->hasRef(); }
+
+ /** Called by the cache to make the unique key invalid. */
+ void removeUniqueKey() { fResource->fUniqueKey.reset(); }
+
+ uint32_t timestamp() const { return fResource->fTimestamp; }
+ void setTimestamp(uint32_t ts) { fResource->fTimestamp = ts; }
+
+ void setTimeWhenResourceBecomePurgeable() {
+ SkASSERT(fResource->isPurgeable());
+ fResource->fTimeWhenBecamePurgeable = GrStdSteadyClock::now();
+ }
+ /**
+ * Called by the cache to determine whether this resource should be purged based on the length
+ * of time it has been available for purging.
+ */
+ GrStdSteadyClock::time_point timeWhenResourceBecamePurgeable() {
+ SkASSERT(fResource->isPurgeable());
+ return fResource->fTimeWhenBecamePurgeable;
+ }
+
+ int* accessCacheIndex() const { return &fResource->fCacheArrayIndex; }
+
+ CacheAccess(GrGpuResource* resource) : fResource(resource) {}
+ CacheAccess(const CacheAccess& that) : fResource(that.fResource) {}
+ CacheAccess& operator=(const CacheAccess&); // unimpl
+
+ // No taking addresses of this type.
+ const CacheAccess* operator&() const = delete;
+ CacheAccess* operator&() = delete;
+
+ GrGpuResource* fResource;
+
+ friend class GrGpuResource; // to construct/copy this type.
+ friend class GrResourceCache; // to use this type
+ friend void test_unbudgeted_to_scratch(skiatest::Reporter* reporter); // for unit testing
+};
+
+inline GrGpuResource::CacheAccess GrGpuResource::cacheAccess() { return CacheAccess(this); }
+
+inline const GrGpuResource::CacheAccess GrGpuResource::cacheAccess() const {
+ return CacheAccess(const_cast<GrGpuResource*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h b/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h
new file mode 100644
index 0000000000..fc1e5935b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResourcePriv_DEFINED
+#define GrGpuResourcePriv_DEFINED
+
+#include "include/gpu/GrGpuResource.h"
+
+/**
+ * This class allows code internal to Skia privileged access to manage the cache keys and budget
+ * status of a GrGpuResource object.
+ */
+class GrGpuResource::ResourcePriv {
+public:
+ /**
+ * Sets a unique key for the resource. If the resource was previously cached as scratch it will
+ * be converted to a uniquely-keyed resource. If the key is invalid then this is equivalent to
+ * removeUniqueKey(). If another resource is using the key then its unique key is removed and
+ * this resource takes over the key.
+ */
+ void setUniqueKey(const GrUniqueKey& key) { fResource->setUniqueKey(key); }
+
+ /** Removes the unique key from a resource. If the resource has a scratch key, it may be
+ preserved for recycling as scratch. */
+ void removeUniqueKey() { fResource->removeUniqueKey(); }
+
+ /**
+ * If the resource is uncached make it cached. Has no effect on resources that are wrapped or
+ * already cached.
+ */
+ void makeBudgeted() { fResource->makeBudgeted(); }
+
+ /**
+ * If the resource is cached make it uncached. Has no effect on resources that are wrapped or
+ * already uncached. Furthermore, resources with unique keys cannot be made unbudgeted.
+ */
+ void makeUnbudgeted() { fResource->makeUnbudgeted(); }
+
+ /**
+ * Get the resource's budgeted-type which indicates whether it counts against the resource cache
+ * budget and if not whether it is allowed to be cached.
+ */
+ GrBudgetedType budgetedType() const {
+ SkASSERT(GrBudgetedType::kBudgeted == fResource->fBudgetedType ||
+ !fResource->getUniqueKey().isValid() || fResource->fRefsWrappedObjects);
+ return fResource->fBudgetedType;
+ }
+
+ /**
+ * Is the resource object wrapping an externally allocated GPU resource?
+ */
+ bool refsWrappedObjects() const { return fResource->fRefsWrappedObjects; }
+
+ /**
+ * If this resource can be used as a scratch resource this returns a valid scratch key.
+ * Otherwise it returns a key for which isNullScratch is true. The resource may currently be
+ * used as a uniquely keyed resource rather than scratch. Check isScratch().
+ */
+ const GrScratchKey& getScratchKey() const { return fResource->fScratchKey; }
+
+ /**
+ * If the resource has a scratch key, the key will be removed. Since scratch keys are installed
+ * at resource creation time, this means the resource will never again be used as scratch.
+ */
+ void removeScratchKey() const { fResource->removeScratchKey(); }
+
+ bool isPurgeable() const { return fResource->isPurgeable(); }
+
+ bool hasRef() const { return fResource->hasRef(); }
+
+protected:
+ ResourcePriv(GrGpuResource* resource) : fResource(resource) { }
+ ResourcePriv(const ResourcePriv& that) : fResource(that.fResource) {}
+ ResourcePriv& operator=(const CacheAccess&); // unimpl
+
+ // No taking addresses of this type.
+ const ResourcePriv* operator&() const;
+ ResourcePriv* operator&();
+
+ GrGpuResource* fResource;
+
+ friend class GrGpuResource; // to construct/copy this type.
+};
+
+inline GrGpuResource::ResourcePriv GrGpuResource::resourcePriv() { return ResourcePriv(this); }
+
+inline const GrGpuResource::ResourcePriv GrGpuResource::resourcePriv() const {
+ return ResourcePriv(const_cast<GrGpuResource*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrImageContext.cpp b/gfx/skia/skia/src/gpu/GrImageContext.cpp
new file mode 100644
index 0000000000..db4c9cba54
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageContext.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrImageContext.h"
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrImageContextPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+GrImageContext::GrImageContext(GrBackendApi backend,
+ const GrContextOptions& options,
+ uint32_t contextID)
+ : INHERITED(backend, options, contextID) {
+ fProxyProvider.reset(new GrProxyProvider(this));
+}
+
+GrImageContext::~GrImageContext() {}
+
+void GrImageContext::abandonContext() {
+ ASSERT_SINGLE_OWNER
+
+ fAbandoned = true;
+}
+
+bool GrImageContext::abandoned() const {
+ ASSERT_SINGLE_OWNER
+
+ return fAbandoned;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+sk_sp<const GrCaps> GrImageContextPriv::refCaps() const {
+ return fContext->refCaps();
+}
+
+sk_sp<GrSkSLFPFactoryCache> GrImageContextPriv::fpFactoryCache() {
+ return fContext->fpFactoryCache();
+}
diff --git a/gfx/skia/skia/src/gpu/GrImageContextPriv.h b/gfx/skia/skia/src/gpu/GrImageContextPriv.h
new file mode 100644
index 0000000000..db9313e51b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageContextPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageContextPriv_DEFINED
+#define GrImageContextPriv_DEFINED
+
+#include "include/private/GrImageContext.h"
+
+/** Class that exposes methods on GrImageContext that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrImageContext. It should never have
+ additional data members or virtual methods. */
+class GrImageContextPriv {
+public:
+ // from GrContext_Base
+ uint32_t contextID() const { return fContext->contextID(); }
+
+ bool matches(GrContext_Base* candidate) const { return fContext->matches(candidate); }
+
+ const GrContextOptions& options() const { return fContext->options(); }
+
+ const GrCaps* caps() const { return fContext->caps(); }
+ sk_sp<const GrCaps> refCaps() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ GrImageContext* asImageContext() { return fContext->asImageContext(); }
+ GrRecordingContext* asRecordingContext() { return fContext->asRecordingContext(); }
+ GrContext* asDirectContext() { return fContext->asDirectContext(); }
+
+ // from GrImageContext
+ GrProxyProvider* proxyProvider() { return fContext->proxyProvider(); }
+ const GrProxyProvider* proxyProvider() const { return fContext->proxyProvider(); }
+
+ bool abandoned() const { return fContext->abandoned(); }
+
+ /** This is only useful for debug purposes */
+ SkDEBUGCODE(GrSingleOwner* singleOwner() const { return fContext->singleOwner(); } )
+
+private:
+ explicit GrImageContextPriv(GrImageContext* context) : fContext(context) {}
+ GrImageContextPriv(const GrImageContextPriv&); // unimpl
+ GrImageContextPriv& operator=(const GrImageContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrImageContextPriv* operator&() const;
+ GrImageContextPriv* operator&();
+
+ GrImageContext* fContext;
+
+ friend class GrImageContext; // to construct/copy this type.
+};
+
+inline GrImageContextPriv GrImageContext::priv() { return GrImageContextPriv(this); }
+
+inline const GrImageContextPriv GrImageContext::priv () const {
+ return GrImageContextPriv(const_cast<GrImageContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrImageInfo.h b/gfx/skia/skia/src/gpu/GrImageInfo.h
new file mode 100644
index 0000000000..800d3f433f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageInfo.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageInfo_DEFINED
+#define GrImageInfo_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrColorInfo.h"
+
+class GrImageInfo {
+public:
+ GrImageInfo() = default;
+
+ /* implicit */ GrImageInfo(const SkImageInfo& info)
+ : fColorInfo(info.colorInfo()), fDimensions(info.dimensions()) {}
+
+ GrImageInfo(GrColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs, int w, int h)
+ : fColorInfo(ct, at, std::move(cs)), fDimensions{w,h} {}
+
+ GrImageInfo(GrColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs, const SkISize& dimensions)
+ : fColorInfo(ct, at, std::move(cs)), fDimensions(dimensions) {}
+
+ GrImageInfo(const GrImageInfo&) = default;
+ GrImageInfo(GrImageInfo&&) = default;
+ GrImageInfo& operator=(const GrImageInfo&) = default;
+ GrImageInfo& operator=(GrImageInfo&&) = default;
+
+ GrImageInfo makeColorType(GrColorType ct) {
+ return {ct, this->alphaType(), this->refColorSpace(), this->width(), this->height()};
+ }
+
+ GrImageInfo makeAlphaType(SkAlphaType at) {
+ return {this->colorType(), at, this->refColorSpace(), this->width(), this->height()};
+ }
+
+ GrImageInfo makeWH(int width, int height) {
+ return {this->colorType(), this->alphaType(), this->refColorSpace(), width, height};
+ }
+
+ GrColorType colorType() const { return fColorInfo.colorType(); }
+
+ SkAlphaType alphaType() const { return fColorInfo.alphaType(); }
+
+ SkColorSpace* colorSpace() const { return fColorInfo.colorSpace(); }
+
+ sk_sp<SkColorSpace> refColorSpace() const { return fColorInfo.refColorSpace(); }
+
+ SkISize dimensions() const { return fDimensions; }
+
+ int width() const { return fDimensions.width(); }
+
+ int height() const { return fDimensions.height(); }
+
+ size_t bpp() const { return GrColorTypeBytesPerPixel(this->colorType()); }
+
+ size_t minRowBytes() const { return this->bpp() * this->width(); }
+
+ /**
+ * Place this image rect in a surface of dimensions surfaceWidth x surfaceHeight size offset at
+ * surfacePt and then clip the pixel rectangle to the bounds of the surface. If the pixel rect
+ * does not intersect the rectangle or is empty then return false. If clipped, the input
+ * surfacePt, the width/height of this GrImageInfo, and the data pointer will be modified to
+ * reflect the clipped rectangle.
+ */
+ template <typename T>
+ bool clip(int surfaceWidth, int surfaceHeight, SkIPoint* surfacePt, T** data, size_t rowBytes) {
+ auto bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
+ auto rect = SkIRect::MakeXYWH(surfacePt->fX, surfacePt->fY, this->width(), this->height());
+ if (!rect.intersect(bounds)) {
+ return false;
+ }
+ *data = SkTAddOffset<T>(*data, (rect.fTop - surfacePt->fY) * rowBytes +
+ (rect.fLeft - surfacePt->fX) * this->bpp());
+ surfacePt->fX = rect.fLeft;
+ surfacePt->fY = rect.fTop;
+ fDimensions = rect.size();
+ return true;
+ }
+
+ bool isValid() const { return fColorInfo.isValid() && this->width() > 0 && this->height() > 0; }
+
+private:
+ GrColorInfo fColorInfo = {};
+ SkISize fDimensions;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrImageTextureMaker.cpp b/gfx/skia/skia/src/gpu/GrImageTextureMaker.cpp
new file mode 100644
index 0000000000..13ca46dad3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageTextureMaker.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrImageTextureMaker.h"
+
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrYUVtoRGBEffect.h"
+#include "src/image/SkImage_GpuYUVA.h"
+#include "src/image/SkImage_Lazy.h"
+
+GrImageTextureMaker::GrImageTextureMaker(GrRecordingContext* context, const SkImage* client,
+ SkImage::CachingHint chint, bool useDecal)
+ : INHERITED(context, client->width(), client->height(), client->imageInfo().colorInfo(),
+ useDecal)
+ , fImage(static_cast<const SkImage_Lazy*>(client))
+ , fCachingHint(chint) {
+ SkASSERT(client->isLazyGenerated());
+ GrMakeKeyFromImageID(&fOriginalKey, client->uniqueID(),
+ SkIRect::MakeWH(this->width(), this->height()));
+}
+
+sk_sp<GrTextureProxy> GrImageTextureMaker::refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) {
+ return fImage->lockTextureProxy(this->context(), fOriginalKey, fCachingHint,
+ willBeMipped, onlyIfFast);
+}
+
+void GrImageTextureMaker::makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) {
+ if (fOriginalKey.isValid() && SkImage::kAllow_CachingHint == fCachingHint) {
+ GrUniqueKey cacheKey;
+ fImage->makeCacheKeyFromOrigKey(fOriginalKey, &cacheKey);
+ MakeCopyKeyFromOrigKey(cacheKey, stretch, paramsCopyKey);
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrYUVAImageTextureMaker::GrYUVAImageTextureMaker(GrContext* context, const SkImage* client,
+ bool useDecal)
+ : INHERITED(context, client->width(), client->height(), client->imageInfo().colorInfo(),
+ useDecal)
+ , fImage(static_cast<const SkImage_GpuYUVA*>(client)) {
+ SkASSERT(as_IB(client)->isYUVA());
+ GrMakeKeyFromImageID(&fOriginalKey, client->uniqueID(),
+ SkIRect::MakeWH(this->width(), this->height()));
+}
+
+sk_sp<GrTextureProxy> GrYUVAImageTextureMaker::refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) {
+ if (AllowedTexGenType::kCheap == onlyIfFast) {
+ return nullptr;
+ }
+
+ if (willBeMipped) {
+ return fImage->asMippedTextureProxyRef(this->context());
+ } else {
+ return fImage->asTextureProxyRef(this->context());
+ }
+}
+
+void GrYUVAImageTextureMaker::makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) {
+ // TODO: Do we ever want to disable caching?
+ if (fOriginalKey.isValid()) {
+ GrUniqueKey cacheKey;
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(&cacheKey, fOriginalKey, kDomain, 0, "Image");
+ MakeCopyKeyFromOrigKey(cacheKey, stretch, paramsCopyKey);
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrYUVAImageTextureMaker::createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) {
+
+ // Check simple cases to see if we need to fall back to flattening the image (or whether it's
+ // already been flattened.)
+ if (!filterOrNullForBicubic || this->domainNeedsDecal() || fImage->fRGBProxy) {
+ return this->INHERITED::createFragmentProcessor(textureMatrix, constraintRect,
+ filterConstraint,
+ coordsLimitedToConstraintRect,
+ filterOrNullForBicubic);
+ }
+
+ // Check to see if the client has given us pre-mipped textures or we can generate them
+ // If not, fall back to bilerp. Also fall back to bilerp when a domain is requested
+ GrSamplerState::Filter filter = *filterOrNullForBicubic;
+ if (GrSamplerState::Filter::kMipMap == filter &&
+ (filterConstraint == GrTextureProducer::kYes_FilterConstraint ||
+ !fImage->setupMipmapsForPlanes(this->context()))) {
+ filter = GrSamplerState::Filter::kBilerp;
+ }
+
+ // Cannot rely on GrTextureProducer's domain infrastructure since we need to calculate domain's
+ // per plane, which may be different, so respect the filterConstraint without any additional
+ // analysis.
+ const SkRect* domain = nullptr;
+ if (filterConstraint == GrTextureProducer::kYes_FilterConstraint) {
+ domain = &constraintRect;
+ }
+
+ auto fp = GrYUVtoRGBEffect::Make(fImage->fProxies, fImage->fYUVAIndices,
+ fImage->fYUVColorSpace, filter, textureMatrix, domain);
+ if (fImage->fFromColorSpace) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), fImage->fFromColorSpace.get(),
+ fImage->alphaType(), fImage->colorSpace());
+ }
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/GrImageTextureMaker.h b/gfx/skia/skia/src/gpu/GrImageTextureMaker.h
new file mode 100644
index 0000000000..44e74243e5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageTextureMaker.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageTextureMaker_DEFINED
+#define GrImageTextureMaker_DEFINED
+
+#include "include/core/SkImage.h"
+#include "src/gpu/GrTextureMaker.h"
+
+class SkImage_Lazy;
+class SkImage_GpuYUVA;
+
+/** This class manages the conversion of generator-backed images to GrTextures. If the caching hint
+ is kAllow the image's ID is used for the cache key. */
+class GrImageTextureMaker : public GrTextureMaker {
+public:
+ GrImageTextureMaker(GrRecordingContext* context, const SkImage* client,
+ SkImage::CachingHint chint, bool useDecal = false);
+
+protected:
+ // TODO: consider overriding this, for the case where the underlying generator might be
+ // able to efficiently produce a "stretched" texture natively (e.g. picture-backed)
+ // GrTexture* generateTextureForParams(const CopyParams&) override;
+ sk_sp<GrTextureProxy> refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) override;
+
+ void makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) override {}
+
+private:
+ const SkImage_Lazy* fImage;
+ GrUniqueKey fOriginalKey;
+ SkImage::CachingHint fCachingHint;
+
+ typedef GrTextureMaker INHERITED;
+};
+
+/** This class manages the conversion of generator-backed YUVA images to GrTextures. */
+class GrYUVAImageTextureMaker : public GrTextureMaker {
+public:
+ GrYUVAImageTextureMaker(GrContext* context, const SkImage* client, bool useDecal = false);
+
+ // This could be made more nuanced and compare all of the texture proxy resolutions, but
+ // it's probably not worth the effort.
+ bool hasMixedResolutions() const override { return true; }
+protected:
+ // TODO: consider overriding this, for the case where the underlying generator might be
+ // able to efficiently produce a "stretched" texture natively (e.g. picture-backed)
+ // GrTexture* generateTextureForParams(const CopyParams&) override;
+ sk_sp<GrTextureProxy> refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType onlyIfFast) override;
+
+ void makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) override {}
+
+ std::unique_ptr<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) override;
+
+private:
+ const SkImage_GpuYUVA* fImage;
+ GrUniqueKey fOriginalKey;
+
+ typedef GrTextureMaker INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrLegacyDirectContext.cpp b/gfx/skia/skia/src/gpu/GrLegacyDirectContext.cpp
new file mode 100644
index 0000000000..15746e9d71
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrLegacyDirectContext.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/GrContext.h"
+
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/GrGpu.h"
+
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/mock/GrMockGpu.h"
+#include "src/gpu/text/GrStrikeCache.h"
+#ifdef SK_METAL
+#include "src/gpu/mtl/GrMtlTrampoline.h"
+#endif
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkGpu.h"
+#endif
+#ifdef SK_DAWN
+#include "src/gpu/dawn/GrDawnGpu.h"
+#endif
+
+#ifdef SK_DISABLE_REDUCE_OPLIST_SPLITTING
+static const bool kDefaultReduceOpsTaskSplitting = false;
+#else
+static const bool kDefaultReduceOpsTaskSplitting = false;
+#endif
+
+class GrLegacyDirectContext : public GrContext {
+public:
+ GrLegacyDirectContext(GrBackendApi backend, const GrContextOptions& options)
+ : INHERITED(backend, options)
+ , fAtlasManager(nullptr) {
+ }
+
+ ~GrLegacyDirectContext() override {
+ // this if-test protects against the case where the context is being destroyed
+ // before having been fully created
+ if (this->priv().getGpu()) {
+ this->flush();
+ }
+
+ delete fAtlasManager;
+ }
+
+ void abandonContext() override {
+ INHERITED::abandonContext();
+ fAtlasManager->freeAll();
+ }
+
+ void releaseResourcesAndAbandonContext() override {
+ INHERITED::releaseResourcesAndAbandonContext();
+ fAtlasManager->freeAll();
+ }
+
+ void freeGpuResources() override {
+ this->flush();
+ fAtlasManager->freeAll();
+
+ INHERITED::freeGpuResources();
+ }
+
+protected:
+ bool init(sk_sp<const GrCaps> caps, sk_sp<GrSkSLFPFactoryCache> FPFactoryCache) override {
+ SkASSERT(caps && !FPFactoryCache);
+ SkASSERT(!fThreadSafeProxy);
+
+ FPFactoryCache.reset(new GrSkSLFPFactoryCache());
+ fThreadSafeProxy = GrContextThreadSafeProxyPriv::Make(this->backend(),
+ this->options(),
+ this->contextID(),
+ caps, FPFactoryCache);
+
+ if (!INHERITED::init(std::move(caps), std::move(FPFactoryCache))) {
+ return false;
+ }
+
+ bool reduceOpsTaskSplitting = kDefaultReduceOpsTaskSplitting;
+ if (GrContextOptions::Enable::kNo == this->options().fReduceOpsTaskSplitting) {
+ reduceOpsTaskSplitting = false;
+ } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpsTaskSplitting) {
+ reduceOpsTaskSplitting = true;
+ }
+
+ this->setupDrawingManager(true, reduceOpsTaskSplitting);
+
+ SkASSERT(this->caps());
+
+ GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
+ if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
+ // multitexturing supported only if range can represent the index + texcoords fully
+ !(this->caps()->shaderCaps()->floatIs32Bits() ||
+ this->caps()->shaderCaps()->integerSupport())) {
+ allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
+ } else {
+ allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
+ }
+
+ GrStrikeCache* glyphCache = this->priv().getGrStrikeCache();
+ GrProxyProvider* proxyProvider = this->priv().proxyProvider();
+
+ fAtlasManager = new GrAtlasManager(proxyProvider, glyphCache,
+ this->options().fGlyphCacheTextureMaximumBytes,
+ allowMultitexturing);
+ this->priv().addOnFlushCallbackObject(fAtlasManager);
+
+ return true;
+ }
+
+ GrAtlasManager* onGetAtlasManager() override { return fAtlasManager; }
+
+private:
+ GrAtlasManager* fAtlasManager;
+
+ typedef GrContext INHERITED;
+};
+
+sk_sp<GrContext> GrContext::MakeGL(sk_sp<const GrGLInterface> interface) {
+ GrContextOptions defaultOptions;
+ return MakeGL(std::move(interface), defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeGL(const GrContextOptions& options) {
+ return MakeGL(nullptr, options);
+}
+
+sk_sp<GrContext> GrContext::MakeGL() {
+ GrContextOptions defaultOptions;
+ return MakeGL(nullptr, defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeGL(sk_sp<const GrGLInterface> interface,
+ const GrContextOptions& options) {
+ sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kOpenGL, options));
+
+ context->fGpu = GrGLGpu::Make(std::move(interface), options, context.get());
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ if (!context->init(context->fGpu->refCaps(), nullptr)) {
+ return nullptr;
+ }
+ return context;
+}
+
+sk_sp<GrContext> GrContext::MakeMock(const GrMockOptions* mockOptions) {
+ GrContextOptions defaultOptions;
+ return MakeMock(mockOptions, defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeMock(const GrMockOptions* mockOptions,
+ const GrContextOptions& options) {
+ sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kMock, options));
+
+ context->fGpu = GrMockGpu::Make(mockOptions, options, context.get());
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ if (!context->init(context->fGpu->refCaps(), nullptr)) {
+ return nullptr;
+ }
+ return context;
+}
+
+sk_sp<GrContext> GrContext::MakeVulkan(const GrVkBackendContext& backendContext) {
+#ifdef SK_VULKAN
+ GrContextOptions defaultOptions;
+ return MakeVulkan(backendContext, defaultOptions);
+#else
+ return nullptr;
+#endif
+}
+
+sk_sp<GrContext> GrContext::MakeVulkan(const GrVkBackendContext& backendContext,
+ const GrContextOptions& options) {
+#ifdef SK_VULKAN
+ GrContextOptions defaultOptions;
+ sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kVulkan, options));
+
+ context->fGpu = GrVkGpu::Make(backendContext, options, context.get());
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ if (!context->init(context->fGpu->refCaps(), nullptr)) {
+ return nullptr;
+ }
+ return context;
+#else
+ return nullptr;
+#endif
+}
+
+#ifdef SK_METAL
+sk_sp<GrContext> GrContext::MakeMetal(void* device, void* queue) {
+ GrContextOptions defaultOptions;
+ return MakeMetal(device, queue, defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeMetal(void* device, void* queue, const GrContextOptions& options) {
+ sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kMetal, options));
+
+ context->fGpu = GrMtlTrampoline::MakeGpu(context.get(), options, device, queue);
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ if (!context->init(context->fGpu->refCaps(), nullptr)) {
+ return nullptr;
+ }
+ return context;
+}
+#endif
+
+#ifdef SK_DAWN
+sk_sp<GrContext> GrContext::MakeDawn(const dawn::Device& device) {
+ GrContextOptions defaultOptions;
+ return MakeDawn(device, defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeDawn(const dawn::Device& device, const GrContextOptions& options) {
+ sk_sp<GrContext> context(new GrLegacyDirectContext(GrBackendApi::kDawn, options));
+
+ context->fGpu = GrDawnGpu::Make(device, options, context.get());
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ if (!context->init(context->fGpu->refCaps(), nullptr)) {
+ return nullptr;
+ }
+ return context;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrMemoryPool.cpp b/gfx/skia/skia/src/gpu/GrMemoryPool.cpp
new file mode 100644
index 0000000000..8a92a5e384
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMemoryPool.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/ops/GrOp.h"
+#ifdef SK_DEBUG
+ #include <atomic>
+#endif
+
+#ifdef SK_DEBUG
+ #define VALIDATE this->validate()
+#else
+ #define VALIDATE
+#endif
+
+void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
+ GrOp* tmp = op.release();
+ SkASSERT(tmp);
+ tmp->~GrOp();
+ fMemoryPool.release(tmp);
+}
+
+constexpr size_t GrMemoryPool::kSmallestMinAllocSize;
+
+GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
+ SkDEBUGCODE(fAllocationCnt = 0);
+ SkDEBUGCODE(fAllocBlockCnt = 0);
+
+ minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize);
+ preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize);
+
+ fMinAllocSize = minAllocSize;
+ fSize = 0;
+
+ fHead = CreateBlock(preallocSize);
+ fTail = fHead;
+ fHead->fNext = nullptr;
+ fHead->fPrev = nullptr;
+ VALIDATE;
+};
+
+GrMemoryPool::~GrMemoryPool() {
+ VALIDATE;
+#ifdef SK_DEBUG
+ int i = 0;
+ int n = fAllocatedIDs.count();
+ fAllocatedIDs.foreach([&i, n] (int32_t id) {
+ if (++i == 1) {
+ SkDebugf("Leaked IDs (in no particular order): %d", id);
+ } else if (i < 11) {
+ SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
+ } else if (i == 11) {
+ SkDebugf(", ...\n");
+ }
+ });
+#endif
+ SkASSERT(0 == fAllocationCnt);
+ SkASSERT(fHead == fTail);
+ SkASSERT(0 == fHead->fLiveCount);
+ DeleteBlock(fHead);
+};
+
+void* GrMemoryPool::allocate(size_t size) {
+ VALIDATE;
+ size += kPerAllocPad;
+ size = GrSizeAlignUp(size, kAlignment);
+ if (fTail->fFreeSize < size) {
+ size_t blockSize = size + kHeaderSize;
+ blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
+ BlockHeader* block = CreateBlock(blockSize);
+
+ block->fPrev = fTail;
+ block->fNext = nullptr;
+ SkASSERT(nullptr == fTail->fNext);
+ fTail->fNext = block;
+ fTail = block;
+ fSize += block->fSize;
+ SkDEBUGCODE(++fAllocBlockCnt);
+ }
+ SkASSERT(kAssignedMarker == fTail->fBlockSentinal);
+ SkASSERT(fTail->fFreeSize >= size);
+ intptr_t ptr = fTail->fCurrPtr;
+ // We stash a pointer to the block header, just before the allocated space,
+ // so that we can decrement the live count on delete in constant time.
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
+ SkDEBUGCODE(allocData->fSentinal = kAssignedMarker);
+ SkDEBUGCODE(allocData->fID = []{
+ static std::atomic<int32_t> nextID{1};
+ return nextID++;
+ }());
+ // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
+ SkDEBUGCODE(fAllocatedIDs.add(allocData->fID));
+ allocData->fHeader = fTail;
+ ptr += kPerAllocPad;
+ fTail->fPrevPtr = fTail->fCurrPtr;
+ fTail->fCurrPtr += size;
+ fTail->fFreeSize -= size;
+ fTail->fLiveCount += 1;
+ SkDEBUGCODE(++fAllocationCnt);
+ VALIDATE;
+ return reinterpret_cast<void*>(ptr);
+}
+
+void GrMemoryPool::release(void* p) {
+ VALIDATE;
+ intptr_t ptr = reinterpret_cast<intptr_t>(p) - kPerAllocPad;
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
+ SkASSERT(kAssignedMarker == allocData->fSentinal);
+ SkDEBUGCODE(allocData->fSentinal = kFreedMarker);
+ SkDEBUGCODE(fAllocatedIDs.remove(allocData->fID));
+ BlockHeader* block = allocData->fHeader;
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ if (1 == block->fLiveCount) {
+ // the head block is special, it is reset rather than deleted
+ if (fHead == block) {
+ fHead->fCurrPtr = reinterpret_cast<intptr_t>(fHead) + kHeaderSize;
+ fHead->fLiveCount = 0;
+ fHead->fFreeSize = fHead->fSize - kHeaderSize;
+ } else {
+ BlockHeader* prev = block->fPrev;
+ BlockHeader* next = block->fNext;
+ SkASSERT(prev);
+ prev->fNext = next;
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ SkASSERT(fTail == block);
+ fTail = prev;
+ }
+ fSize -= block->fSize;
+ DeleteBlock(block);
+ SkDEBUGCODE(fAllocBlockCnt--);
+ }
+ } else {
+ --block->fLiveCount;
+ // Trivial reclaim: if we're releasing the most recent allocation, reuse it
+ if (block->fPrevPtr == ptr) {
+ block->fFreeSize += (block->fCurrPtr - block->fPrevPtr);
+ block->fCurrPtr = block->fPrevPtr;
+ }
+ }
+ SkDEBUGCODE(--fAllocationCnt);
+ VALIDATE;
+}
+
+GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
+ blockSize = SkTMax<size_t>(blockSize, kHeaderSize);
+ BlockHeader* block =
+ reinterpret_cast<BlockHeader*>(sk_malloc_throw(blockSize));
+ // we assume malloc gives us aligned memory
+ SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
+ SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
+ block->fLiveCount = 0;
+ block->fFreeSize = blockSize - kHeaderSize;
+ block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
+ block->fPrevPtr = 0; // gcc warns on assigning nullptr to an intptr_t.
+ block->fSize = blockSize;
+ return block;
+}
+
+void GrMemoryPool::DeleteBlock(BlockHeader* block) {
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ SkDEBUGCODE(block->fBlockSentinal = kFreedMarker); // FWIW
+ sk_free(block);
+}
+
+void GrMemoryPool::validate() {
+#ifdef SK_DEBUG
+ BlockHeader* block = fHead;
+ BlockHeader* prev = nullptr;
+ SkASSERT(block);
+ int allocCount = 0;
+ do {
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ allocCount += block->fLiveCount;
+ SkASSERT(prev == block->fPrev);
+ if (prev) {
+ SkASSERT(prev->fNext == block);
+ }
+
+ intptr_t b = reinterpret_cast<intptr_t>(block);
+ size_t ptrOffset = block->fCurrPtr - b;
+ size_t totalSize = ptrOffset + block->fFreeSize;
+ intptr_t userStart = b + kHeaderSize;
+
+ SkASSERT(!(b % kAlignment));
+ SkASSERT(!(totalSize % kAlignment));
+ SkASSERT(!(block->fCurrPtr % kAlignment));
+ if (fHead != block) {
+ SkASSERT(block->fLiveCount);
+ SkASSERT(totalSize >= fMinAllocSize);
+ } else {
+ SkASSERT(totalSize == block->fSize);
+ }
+ if (!block->fLiveCount) {
+ SkASSERT(ptrOffset == kHeaderSize);
+ SkASSERT(userStart == block->fCurrPtr);
+ } else {
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(userStart);
+ SkASSERT(allocData->fSentinal == kAssignedMarker ||
+ allocData->fSentinal == kFreedMarker);
+ SkASSERT(block == allocData->fHeader);
+ }
+
+ prev = block;
+ } while ((block = block->fNext));
+ SkASSERT(allocCount == fAllocationCnt);
+ SkASSERT(fAllocationCnt == fAllocatedIDs.count());
+ SkASSERT(prev == fTail);
+ SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/GrMemoryPool.h b/gfx/skia/skia/src/gpu/GrMemoryPool.h
new file mode 100644
index 0000000000..a4428305c0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMemoryPool.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMemoryPool_DEFINED
+#define GrMemoryPool_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkRefCnt.h"
+
+#ifdef SK_DEBUG
+#include "include/private/SkTHash.h"
+#endif
+
+/**
+ * Allocates memory in blocks and parcels out space in the blocks for allocation
+ * requests. It is optimized for allocate / release speed over memory
+ * efficiency. The interface is designed to be used to implement operator new
+ * and delete overrides. All allocations are expected to be released before the
+ * pool's destructor is called. Allocations will be 8-byte aligned.
+ */
+class GrMemoryPool {
+public:
+ /**
+ * Prealloc size is the amount of space to allocate at pool creation
+ * time and keep around until pool destruction. The min alloc size is
+ * the smallest allowed size of additional allocations. Both sizes are
+ * adjusted to ensure that:
+ * 1. they are are 8-byte aligned
+ * 2. minAllocSize >= kSmallestMinAllocSize
+ * 3. preallocSize >= minAllocSize
+ *
+ * Both sizes is what the pool will end up allocating from the system, and
+ * portions of the allocated memory is used for internal bookkeeping.
+ */
+ GrMemoryPool(size_t preallocSize, size_t minAllocSize);
+
+ ~GrMemoryPool();
+
+ /**
+ * Allocates memory. The memory must be freed with release().
+ */
+ void* allocate(size_t size);
+
+ /**
+ * p must have been returned by allocate()
+ */
+ void release(void* p);
+
+ /**
+ * Returns true if there are no unreleased allocations.
+ */
+ bool isEmpty() const { return fTail == fHead && !fHead->fLiveCount; }
+
+ /**
+ * Returns the total allocated size of the GrMemoryPool minus any preallocated amount
+ */
+ size_t size() const { return fSize; }
+
+ /**
+ * Returns the preallocated size of the GrMemoryPool
+ */
+ size_t preallocSize() const { return fHead->fSize; }
+
+ /**
+ * Minimum value of minAllocSize constructor argument.
+ */
+ constexpr static size_t kSmallestMinAllocSize = 1 << 10;
+
+private:
+ struct BlockHeader;
+
+ static BlockHeader* CreateBlock(size_t size);
+
+ static void DeleteBlock(BlockHeader* block);
+
+ void validate();
+
+ struct BlockHeader {
+#ifdef SK_DEBUG
+ uint32_t fBlockSentinal; ///< known value to check for bad back pointers to blocks
+#endif
+ BlockHeader* fNext; ///< doubly-linked list of blocks.
+ BlockHeader* fPrev;
+ int fLiveCount; ///< number of outstanding allocations in the
+ ///< block.
+ intptr_t fCurrPtr; ///< ptr to the start of blocks free space.
+ intptr_t fPrevPtr; ///< ptr to the last allocation made
+ size_t fFreeSize; ///< amount of free space left in the block.
+ size_t fSize; ///< total allocated size of the block
+ };
+
+ static const uint32_t kAssignedMarker = 0xCDCDCDCD;
+ static const uint32_t kFreedMarker = 0xEFEFEFEF;
+
+ struct AllocHeader {
+#ifdef SK_DEBUG
+ uint32_t fSentinal; ///< known value to check for memory stomping (e.g., (CD)*)
+ int32_t fID; ///< ID that can be used to track down leaks by clients.
+#endif
+ BlockHeader* fHeader; ///< pointer back to the block header in which an alloc resides
+ };
+
+ size_t fSize;
+ size_t fMinAllocSize;
+ BlockHeader* fHead;
+ BlockHeader* fTail;
+#ifdef SK_DEBUG
+ int fAllocationCnt;
+ int fAllocBlockCnt;
+ SkTHashSet<int32_t> fAllocatedIDs;
+#endif
+
+protected:
+ enum {
+ // We assume this alignment is good enough for everybody.
+ kAlignment = 8,
+ kHeaderSize = GrSizeAlignUp(sizeof(BlockHeader), kAlignment),
+ kPerAllocPad = GrSizeAlignUp(sizeof(AllocHeader), kAlignment),
+ };
+};
+
+class GrOp;
+
+// DDL TODO: for the DLL use case this could probably be the non-intrinsic-based style of
+// ref counting
+class GrOpMemoryPool : public SkRefCnt {
+public:
+ GrOpMemoryPool(size_t preallocSize, size_t minAllocSize)
+ : fMemoryPool(preallocSize, minAllocSize) {
+ }
+
+ template <typename Op, typename... OpArgs>
+ std::unique_ptr<Op> allocate(OpArgs&&... opArgs) {
+ char* mem = (char*) fMemoryPool.allocate(sizeof(Op));
+ return std::unique_ptr<Op>(new (mem) Op(std::forward<OpArgs>(opArgs)...));
+ }
+
+ void* allocate(size_t size) {
+ return fMemoryPool.allocate(size);
+ }
+
+ void release(std::unique_ptr<GrOp> op);
+
+ bool isEmpty() const { return fMemoryPool.isEmpty(); }
+
+private:
+ GrMemoryPool fMemoryPool;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrMesh.h b/gfx/skia/skia/src/gpu/GrMesh.h
new file mode 100644
index 0000000000..08daf0e32b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMesh.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMesh_DEFINED
+#define GrMesh_DEFINED
+
+#include "src/gpu/GrBuffer.h"
+#include "src/gpu/GrGpuBuffer.h"
+
+class GrPrimitiveProcessor;
+
+/**
+ * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrOp to
+ * GrGpu. It also holds the primitive type for the draw. TODO: Consider moving ownership of this
+ * and draw-issuing responsibility to GrPrimitiveProcessor. The rest of the vertex info lives there
+ * already (stride, attribute mappings).
+ */
+class GrMesh {
+public:
+ GrMesh(GrPrimitiveType primitiveType = GrPrimitiveType::kTriangles)
+ : fPrimitiveType(primitiveType), fBaseVertex(0) {
+ SkDEBUGCODE(fNonIndexNonInstanceData.fVertexCount = -1;)
+ }
+
+ void setPrimitiveType(GrPrimitiveType type) { fPrimitiveType = type; }
+ GrPrimitiveType primitiveType() const { return fPrimitiveType; }
+
+ bool isIndexed() const { return SkToBool(fIndexBuffer.get()); }
+ GrPrimitiveRestart primitiveRestart() const {
+ return GrPrimitiveRestart(fFlags & Flags::kUsePrimitiveRestart);
+ }
+ bool isInstanced() const { return fFlags & Flags::kIsInstanced; }
+ bool hasInstanceData() const { return SkToBool(fInstanceBuffer.get()); }
+ bool hasVertexData() const { return SkToBool(fVertexBuffer.get()); }
+
+ void setNonIndexedNonInstanced(int vertexCount);
+
+ void setIndexed(sk_sp<const GrBuffer> indexBuffer, int indexCount, int baseIndex,
+ uint16_t minIndexValue, uint16_t maxIndexValue, GrPrimitiveRestart);
+ void setIndexedPatterned(sk_sp<const GrBuffer> indexBuffer, int indexCount, int vertexCount,
+ int patternRepeatCount, int maxPatternRepetitionsInIndexBuffer);
+
+ void setInstanced(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
+ int vertexCount);
+ void setIndexedInstanced(sk_sp<const GrBuffer> indexBuffer, int indexCount,
+ sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart);
+
+ void setVertexData(sk_sp<const GrBuffer> vertexBuffer, int baseVertex = 0);
+
+ class SendToGpuImpl {
+ public:
+ virtual void sendMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) = 0;
+
+ virtual void sendIndexedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer,
+ int indexCount, int baseIndex, uint16_t minIndexValue,
+ uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
+ int baseVertex, GrPrimitiveRestart) = 0;
+
+ virtual void sendInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer,
+ int vertexCount, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) = 0;
+
+ virtual void sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer,
+ int indexCount, int baseIndex,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount, int baseInstance,
+ GrPrimitiveRestart) = 0;
+
+ virtual ~SendToGpuImpl() {}
+ };
+
+ void sendToGpu(SendToGpuImpl*) const;
+
+private:
+ enum class Flags {
+ kNone = 0,
+ kUsePrimitiveRestart = 1 << 0,
+ kIsInstanced = 1 << 1,
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(Flags);
+ GR_STATIC_ASSERT(Flags(GrPrimitiveRestart::kNo) == Flags::kNone);
+ GR_STATIC_ASSERT(Flags(GrPrimitiveRestart::kYes) == Flags::kUsePrimitiveRestart);
+
+ GrPrimitiveType fPrimitiveType;
+ sk_sp<const GrBuffer> fIndexBuffer;
+ sk_sp<const GrBuffer> fInstanceBuffer;
+ sk_sp<const GrBuffer> fVertexBuffer;
+ int fBaseVertex;
+ Flags fFlags;
+
+ union {
+ struct { // When fIndexBuffer == nullptr and isInstanced() == false.
+ int fVertexCount;
+ } fNonIndexNonInstanceData;
+
+ struct { // When fIndexBuffer != nullptr and isInstanced() == false.
+ struct {
+ int fIndexCount;
+ int fPatternRepeatCount;
+ } fIndexData;
+
+ union {
+ struct { // When fPatternRepeatCount == 0.
+ int fBaseIndex;
+ uint16_t fMinIndexValue;
+ uint16_t fMaxIndexValue;
+ } fNonPatternIndexData;
+
+ struct { // When fPatternRepeatCount != 0.
+ int fVertexCount;
+ int fMaxPatternRepetitionsInIndexBuffer;
+ } fPatternData;
+ };
+ };
+
+ struct { // When isInstanced() != false.
+ struct {
+ int fInstanceCount;
+ int fBaseInstance;
+ } fInstanceData;
+
+ union { // When fIndexBuffer == nullptr.
+ struct {
+ int fVertexCount;
+ } fInstanceNonIndexData;
+
+ struct { // When fIndexBuffer != nullptr.
+ int fIndexCount;
+ } fInstanceIndexData;
+ };
+ };
+ };
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrMesh::Flags);
+
+inline void GrMesh::setNonIndexedNonInstanced(int vertexCount) {
+ fIndexBuffer.reset();
+ fInstanceBuffer.reset();
+ fNonIndexNonInstanceData.fVertexCount = vertexCount;
+ fFlags = Flags::kNone;
+}
+
+inline void GrMesh::setIndexed(sk_sp<const GrBuffer> indexBuffer, int indexCount, int baseIndex,
+ uint16_t minIndexValue, uint16_t maxIndexValue,
+ GrPrimitiveRestart primitiveRestart) {
+ SkASSERT(indexBuffer);
+ SkASSERT(indexCount >= 1);
+ SkASSERT(baseIndex >= 0);
+ SkASSERT(maxIndexValue >= minIndexValue);
+ fIndexBuffer = std::move(indexBuffer);
+ fInstanceBuffer.reset();
+ fIndexData.fIndexCount = indexCount;
+ fIndexData.fPatternRepeatCount = 0;
+ fNonPatternIndexData.fBaseIndex = baseIndex;
+ fNonPatternIndexData.fMinIndexValue = minIndexValue;
+ fNonPatternIndexData.fMaxIndexValue = maxIndexValue;
+ fFlags = Flags(primitiveRestart);
+}
+
+inline void GrMesh::setIndexedPatterned(sk_sp<const GrBuffer> indexBuffer, int indexCount,
+ int vertexCount, int patternRepeatCount,
+ int maxPatternRepetitionsInIndexBuffer) {
+ SkASSERT(indexBuffer);
+ SkASSERT(indexCount >= 1);
+ SkASSERT(vertexCount >= 1);
+ SkASSERT(patternRepeatCount >= 1);
+ SkASSERT(maxPatternRepetitionsInIndexBuffer >= 1);
+ fIndexBuffer = std::move(indexBuffer);
+ fInstanceBuffer.reset();
+ fIndexData.fIndexCount = indexCount;
+ fIndexData.fPatternRepeatCount = patternRepeatCount;
+ fPatternData.fVertexCount = vertexCount;
+ fPatternData.fMaxPatternRepetitionsInIndexBuffer = maxPatternRepetitionsInIndexBuffer;
+ fFlags = Flags::kNone;
+}
+
+inline void GrMesh::setInstanced(sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, int vertexCount) {
+ SkASSERT(instanceCount >= 1);
+ SkASSERT(baseInstance >= 0);
+ fIndexBuffer.reset();
+ fInstanceBuffer = std::move(instanceBuffer);
+ fInstanceData.fInstanceCount = instanceCount;
+ fInstanceData.fBaseInstance = baseInstance;
+ fInstanceNonIndexData.fVertexCount = vertexCount;
+ fFlags = Flags::kIsInstanced;
+}
+
+inline void GrMesh::setIndexedInstanced(sk_sp<const GrBuffer> indexBuffer, int indexCount,
+ sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart primitiveRestart) {
+ SkASSERT(indexBuffer);
+ SkASSERT(indexCount >= 1);
+ SkASSERT(instanceCount >= 1);
+ SkASSERT(baseInstance >= 0);
+ fIndexBuffer = std::move(indexBuffer);
+ fInstanceBuffer = std::move(instanceBuffer);
+ fInstanceData.fInstanceCount = instanceCount;
+ fInstanceData.fBaseInstance = baseInstance;
+ fInstanceIndexData.fIndexCount = indexCount;
+ fFlags = Flags::kIsInstanced | Flags(primitiveRestart);
+}
+
+inline void GrMesh::setVertexData(sk_sp<const GrBuffer> vertexBuffer, int baseVertex) {
+ SkASSERT(baseVertex >= 0);
+ fVertexBuffer = std::move(vertexBuffer);
+ fBaseVertex = baseVertex;
+}
+
+inline void GrMesh::sendToGpu(SendToGpuImpl* impl) const {
+ if (this->isInstanced()) {
+ if (!this->isIndexed()) {
+ impl->sendInstancedMeshToGpu(fPrimitiveType, fVertexBuffer.get(),
+ fInstanceNonIndexData.fVertexCount, fBaseVertex,
+ fInstanceBuffer.get(), fInstanceData.fInstanceCount,
+ fInstanceData.fBaseInstance);
+ } else {
+ impl->sendIndexedInstancedMeshToGpu(
+ fPrimitiveType, fIndexBuffer.get(), fInstanceIndexData.fIndexCount, 0,
+ fVertexBuffer.get(), fBaseVertex, fInstanceBuffer.get(),
+ fInstanceData.fInstanceCount, fInstanceData.fBaseInstance,
+ this->primitiveRestart());
+ }
+ return;
+ }
+
+ if (!this->isIndexed()) {
+ SkASSERT(fNonIndexNonInstanceData.fVertexCount > 0);
+ impl->sendMeshToGpu(fPrimitiveType, fVertexBuffer.get(),
+ fNonIndexNonInstanceData.fVertexCount, fBaseVertex);
+ return;
+ }
+
+ if (0 == fIndexData.fPatternRepeatCount) {
+ impl->sendIndexedMeshToGpu(
+ fPrimitiveType, fIndexBuffer.get(), fIndexData.fIndexCount,
+ fNonPatternIndexData.fBaseIndex, fNonPatternIndexData.fMinIndexValue,
+ fNonPatternIndexData.fMaxIndexValue, fVertexBuffer.get(), fBaseVertex,
+ this->primitiveRestart());
+ return;
+ }
+
+ SkASSERT(fIndexData.fPatternRepeatCount > 0);
+ int baseRepetition = 0;
+ do {
+ int repeatCount = SkTMin(fPatternData.fMaxPatternRepetitionsInIndexBuffer,
+ fIndexData.fPatternRepeatCount - baseRepetition);
+ // A patterned index buffer must contain indices in the range [0..vertexCount].
+ int minIndexValue = 0;
+ int maxIndexValue = fPatternData.fVertexCount * repeatCount - 1;
+ SkASSERT(!(fFlags & Flags::kUsePrimitiveRestart));
+ impl->sendIndexedMeshToGpu(
+ fPrimitiveType, fIndexBuffer.get(), fIndexData.fIndexCount * repeatCount, 0,
+ minIndexValue, maxIndexValue, fVertexBuffer.get(),
+ fBaseVertex + fPatternData.fVertexCount * baseRepetition, GrPrimitiveRestart::kNo);
+ baseRepetition += repeatCount;
+ } while (baseRepetition < fIndexData.fPatternRepeatCount);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrNativeRect.h b/gfx/skia/skia/src/gpu/GrNativeRect.h
new file mode 100644
index 0000000000..24b5b25011
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrNativeRect.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrNativeRect_DEFINED
+#define GrNativeRect_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Helper struct for dealing with bottom-up surface origins (bottom-up instead of top-down).
+ */
+struct GrNativeRect {
+ int fX;
+ int fY;
+ int fWidth;
+ int fHeight;
+
+ static GrNativeRect MakeRelativeTo(GrSurfaceOrigin org, int rtHeight, const SkIRect& devRect) {
+ GrNativeRect nativeRect;
+ nativeRect.setRelativeTo(org, rtHeight, devRect);
+ return nativeRect;
+ }
+
+ static GrNativeRect MakeRelativeTo(GrSurfaceOrigin origin, int surfaceHeight, int leftOffset,
+ int topOffset, int width, int height) {
+ GrNativeRect nativeRect;
+ nativeRect.setRelativeTo(origin, surfaceHeight, leftOffset, topOffset, width, height);
+ return nativeRect;
+ }
+
+ /**
+ * cast-safe way to treat the rect as an array of (4) ints.
+ */
+ const int* asInts() const {
+ return &fX;
+
+ GR_STATIC_ASSERT(0 == offsetof(GrNativeRect, fX));
+ GR_STATIC_ASSERT(4 == offsetof(GrNativeRect, fY));
+ GR_STATIC_ASSERT(8 == offsetof(GrNativeRect, fWidth));
+ GR_STATIC_ASSERT(12 == offsetof(GrNativeRect, fHeight));
+ GR_STATIC_ASSERT(16 == sizeof(GrNativeRect)); // For an array of GrNativeRect.
+ }
+ int* asInts() { return &fX; }
+
+ SkIRect asSkIRect() const { return SkIRect::MakeXYWH(fX, fY, fWidth, fHeight); }
+
+ // sometimes we have a SkIRect from the client that we
+ // want to simultaneously make relative to GL's viewport
+ // and (optionally) convert from top-down to bottom-up.
+ // The GL's viewport will always be the full size of the
+ // current render target so we just pass in the rtHeight
+ // here.
+ void setRelativeTo(GrSurfaceOrigin org, int rtHeight, const SkIRect& devRect) {
+ this->setRelativeTo(org, rtHeight, devRect.x(), devRect.y(), devRect.width(),
+ devRect.height());
+ }
+
+ void setRelativeTo(GrSurfaceOrigin origin, int surfaceHeight, int leftOffset, int topOffset,
+ int width, int height) {
+ fX = leftOffset;
+ fWidth = width;
+ if (kBottomLeft_GrSurfaceOrigin == origin) {
+ fY = surfaceHeight - topOffset - height;
+ } else {
+ fY = topOffset;
+ }
+ fHeight = height;
+
+ SkASSERT(fWidth >= 0);
+ SkASSERT(fHeight >= 0);
+ }
+
+ bool contains(int width, int height) const {
+ return fX <= 0 &&
+ fY <= 0 &&
+ fX + fWidth >= width &&
+ fY + fHeight >= height;
+ }
+
+ void invalidate() {fX = fWidth = fY = fHeight = -1;}
+ bool isInvalid() const { return fX == -1 && fWidth == -1 && fY == -1
+ && fHeight == -1; }
+
+ bool operator ==(const GrNativeRect& that) const {
+ return 0 == memcmp(this, &that, sizeof(GrNativeRect));
+ }
+
+ bool operator !=(const GrNativeRect& that) const {return !(*this == that);}
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrNonAtomicRef.h b/gfx/skia/skia/src/gpu/GrNonAtomicRef.h
new file mode 100644
index 0000000000..db7eea2c9a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrNonAtomicRef.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrNonAtomicRef_DEFINED
+#define GrNonAtomicRef_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTArray.h"
+
+/**
+ * A simple non-atomic ref used in the GrBackendApi when we don't want to pay for the overhead of a
+ * threadsafe ref counted object
+ */
+template<typename TSubclass> class GrNonAtomicRef : public SkNoncopyable {
+public:
+ GrNonAtomicRef() : fRefCnt(1) {}
+
+#ifdef SK_DEBUG
+ ~GrNonAtomicRef() {
+ // fRefCnt can be one when a subclass is created statically
+ SkASSERT((0 == fRefCnt || 1 == fRefCnt));
+ // Set to invalid values.
+ fRefCnt = -10;
+ }
+#endif
+
+ bool unique() const { return 1 == fRefCnt; }
+
+ // We allow this getter because this type is not thread-safe, meaning only one thread should
+ // have ownership and be manipulating the ref count or querying this.
+ int refCnt() const { return fRefCnt; }
+
+ void ref() const {
+ // Once the ref cnt reaches zero it should never be ref'ed again.
+ SkASSERT(fRefCnt > 0);
+ ++fRefCnt;
+ }
+
+ void unref() const {
+ SkASSERT(fRefCnt > 0);
+ --fRefCnt;
+ if (0 == fRefCnt) {
+ GrTDeleteNonAtomicRef(static_cast<const TSubclass*>(this));
+ return;
+ }
+ }
+
+private:
+ mutable int32_t fRefCnt;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+template<typename T> inline void GrTDeleteNonAtomicRef(const T* ref) {
+ delete ref;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.cpp b/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.cpp
new file mode 100644
index 0000000000..2cf4af869c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrOnFlushResourceProvider.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrTextureResolveRenderTask.h"
+
+std::unique_ptr<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
+ sk_sp<GrSurfaceProxy> proxy, GrColorType colorType, sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ // Since this is at flush time and these won't be allocated for us by the GrResourceAllocator
+ // we have to manually ensure it is allocated here.
+ if (!this->instatiateProxy(proxy.get())) {
+ return nullptr;
+ }
+
+ auto renderTargetContext = fDrawingMgr->makeRenderTargetContext(
+ std::move(proxy), colorType, std::move(colorSpace), props, false);
+
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ renderTargetContext->discard();
+
+ // FIXME: http://skbug.com/9357: This breaks if the renderTargetContext splits its opsTask.
+ fDrawingMgr->fOnFlushRenderTasks.push_back(sk_ref_sp(renderTargetContext->getOpsTask()));
+
+ return renderTargetContext;
+}
+
+void GrOnFlushResourceProvider::addTextureResolveTask(sk_sp<GrTextureProxy> textureProxy,
+ GrSurfaceProxy::ResolveFlags resolveFlags) {
+ // Since we are bypassing normal DAG operation, we need to ensure the textureProxy's last render
+ // task gets closed before making a texture resolve task. makeClosed is what will mark msaa and
+ // mipmaps dirty.
+ if (GrRenderTask* renderTask = textureProxy->getLastRenderTask()) {
+ renderTask->makeClosed(*this->caps());
+ }
+ auto task = static_cast<GrTextureResolveRenderTask*>(fDrawingMgr->fOnFlushRenderTasks.push_back(
+ sk_make_sp<GrTextureResolveRenderTask>()).get());
+ task->addProxy(textureProxy, resolveFlags, *this->caps());
+ task->makeClosed(*this->caps());
+}
+
+bool GrOnFlushResourceProvider::assignUniqueKeyToProxy(const GrUniqueKey& key,
+ GrTextureProxy* proxy) {
+ auto proxyProvider = fDrawingMgr->getContext()->priv().proxyProvider();
+ return proxyProvider->assignUniqueKeyToProxy(key, proxy);
+}
+
+void GrOnFlushResourceProvider::removeUniqueKeyFromProxy(GrTextureProxy* proxy) {
+ auto proxyProvider = fDrawingMgr->getContext()->priv().proxyProvider();
+ proxyProvider->removeUniqueKeyFromProxy(proxy);
+}
+
+void GrOnFlushResourceProvider::processInvalidUniqueKey(const GrUniqueKey& key) {
+ auto proxyProvider = fDrawingMgr->getContext()->priv().proxyProvider();
+ proxyProvider->processInvalidUniqueKey(key, nullptr,
+ GrProxyProvider::InvalidateGPUResource::kYes);
+}
+
+sk_sp<GrTextureProxy> GrOnFlushResourceProvider::findOrCreateProxyByUniqueKey(
+ const GrUniqueKey& key,
+ GrColorType colorType,
+ GrSurfaceOrigin origin,
+ UseAllocator useAllocator) {
+ auto proxyProvider = fDrawingMgr->getContext()->priv().proxyProvider();
+ return proxyProvider->findOrCreateProxyByUniqueKey(key, colorType, origin, useAllocator);
+}
+
+bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) {
+ SkASSERT(proxy->canSkipResourceAllocator());
+
+ // TODO: this class should probably just get a GrDirectContext
+ auto direct = fDrawingMgr->getContext()->priv().asDirectContext();
+ if (!direct) {
+ return false;
+ }
+
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ if (proxy->isLazy()) {
+ return proxy->priv().doLazyInstantiation(resourceProvider);
+ }
+
+ return proxy->instantiate(resourceProvider);
+}
+
+sk_sp<GrGpuBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size,
+ const void* data) {
+ // TODO: this class should probably just get a GrDirectContext
+ auto direct = fDrawingMgr->getContext()->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ return sk_sp<GrGpuBuffer>(
+ resourceProvider->createBuffer(size, intendedType, kDynamic_GrAccessPattern, data));
+}
+
+sk_sp<const GrGpuBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(
+ GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) {
+ // TODO: class should probably just get a GrDirectContext
+ auto direct = fDrawingMgr->getContext()->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ return resourceProvider->findOrMakeStaticBuffer(intendedType, size, data, key);
+}
+
+uint32_t GrOnFlushResourceProvider::contextID() const {
+ return fDrawingMgr->getContext()->priv().contextID();
+}
+
+const GrCaps* GrOnFlushResourceProvider::caps() const {
+ return fDrawingMgr->getContext()->priv().caps();
+}
diff --git a/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.h b/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.h
new file mode 100644
index 0000000000..78a31f3038
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOnFlushResourceProvider.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOnFlushResourceProvider_DEFINED
+#define GrOnFlushResourceProvider_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/GrDeferredUpload.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceProvider.h"
+
+class GrDrawingManager;
+class GrOnFlushResourceProvider;
+class GrRenderTargetContext;
+class GrSurfaceProxy;
+class SkColorSpace;
+class SkSurfaceProps;
+
+/*
+ * This is the base class from which all pre-flush callback objects must be derived. It
+ * provides the "preFlush" / "postFlush" interface.
+ */
+class GrOnFlushCallbackObject {
+public:
+ virtual ~GrOnFlushCallbackObject() {}
+
+ /*
+ * The onFlush callback allows subsystems (e.g., text, path renderers) to create atlases
+ * for a specific flush. All the GrOpsTask IDs required for the flush are passed into the
+ * callback. The callback should return the render target contexts used to render the atlases
+ * in 'results'.
+ */
+ virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs) = 0;
+
+ /**
+ * Called once flushing is complete and all ops indicated by preFlush have been executed and
+ * released. startTokenForNextFlush can be used to track resources used in the current flush.
+ */
+ virtual void postFlush(GrDeferredUploadToken startTokenForNextFlush,
+ const uint32_t* opsTaskIDs, int numOpsTaskIDs) {}
+
+ /**
+ * Tells the callback owner to hold onto this object when freeing GPU resources
+ *
+ * In particular, GrDrawingManager::freeGPUResources() deletes all the path renderers.
+ * Any OnFlushCallbackObject associated with a path renderer will need to be deleted.
+ */
+ virtual bool retainOnFreeGpuResources() { return false; }
+};
+
+/*
+ * This class is a shallow wrapper around the drawing manager. It is passed into the
+ * onFlush callbacks and is intended to limit the functionality available to them.
+ * It should never have additional data members or virtual methods.
+ */
+class GrOnFlushResourceProvider {
+public:
+ using UseAllocator = GrSurfaceProxy::UseAllocator;
+
+ explicit GrOnFlushResourceProvider(GrDrawingManager* drawingMgr) : fDrawingMgr(drawingMgr) {}
+
+ std::unique_ptr<GrRenderTargetContext> makeRenderTargetContext(
+ sk_sp<GrSurfaceProxy>, GrColorType, sk_sp<SkColorSpace>, const SkSurfaceProps*);
+
+ void addTextureResolveTask(sk_sp<GrTextureProxy>, GrSurfaceProxy::ResolveFlags);
+
+ // Proxy unique key management. See GrProxyProvider.h.
+ bool assignUniqueKeyToProxy(const GrUniqueKey&, GrTextureProxy*);
+ void removeUniqueKeyFromProxy(GrTextureProxy*);
+ void processInvalidUniqueKey(const GrUniqueKey&);
+ // GrColorType is necessary to set the proxy's texture swizzle.
+ sk_sp<GrTextureProxy> findOrCreateProxyByUniqueKey(const GrUniqueKey&,
+ GrColorType,
+ GrSurfaceOrigin,
+ UseAllocator);
+
+ bool instatiateProxy(GrSurfaceProxy*);
+
+ // Creates a GPU buffer with a "dynamic" access pattern.
+ sk_sp<GrGpuBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr);
+
+ // Either finds and refs, or creates a static GPU buffer with the given data.
+ sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data,
+ const GrUniqueKey&);
+
+ uint32_t contextID() const;
+ const GrCaps* caps() const;
+
+private:
+ GrOnFlushResourceProvider(const GrOnFlushResourceProvider&) = delete;
+ GrOnFlushResourceProvider& operator=(const GrOnFlushResourceProvider&) = delete;
+
+ GrDrawingManager* fDrawingMgr;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOpFlushState.cpp b/gfx/skia/skia/src/gpu/GrOpFlushState.cpp
new file mode 100644
index 0000000000..05e42e18ac
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpFlushState.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrOpFlushState.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrResourceProvider.h"
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrOpFlushState::GrOpFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider,
+ GrTokenTracker* tokenTracker,
+ sk_sp<GrBufferAllocPool::CpuBufferCache> cpuBufferCache)
+ : fVertexPool(gpu, cpuBufferCache)
+ , fIndexPool(gpu, std::move(cpuBufferCache))
+ , fGpu(gpu)
+ , fResourceProvider(resourceProvider)
+ , fTokenTracker(tokenTracker) {}
+
+const GrCaps& GrOpFlushState::caps() const {
+ return *fGpu->caps();
+}
+
+void GrOpFlushState::executeDrawsAndUploadsForMeshDrawOp(
+ const GrOp* op, const SkRect& chainBounds, GrProcessorSet&& processorSet,
+ GrPipeline::InputFlags pipelineFlags, const GrUserStencilSettings* stencilSettings) {
+ SkASSERT(this->opsRenderPass());
+
+ GrPipeline::InitArgs pipelineArgs;
+ pipelineArgs.fInputFlags = pipelineFlags;
+ pipelineArgs.fDstProxy = this->dstProxy();
+ pipelineArgs.fCaps = &this->caps();
+ pipelineArgs.fUserStencil = stencilSettings;
+ pipelineArgs.fOutputSwizzle = this->drawOpArgs().outputSwizzle();
+ GrPipeline* pipeline = this->allocator()->make<GrPipeline>(pipelineArgs,
+ std::move(processorSet),
+ this->detachAppliedClip());
+
+ while (fCurrDraw != fDraws.end() && fCurrDraw->fOp == op) {
+ GrDeferredUploadToken drawToken = fTokenTracker->nextTokenToFlush();
+ while (fCurrUpload != fInlineUploads.end() &&
+ fCurrUpload->fUploadBeforeToken == drawToken) {
+ this->opsRenderPass()->inlineUpload(this, fCurrUpload->fUpload);
+ ++fCurrUpload;
+ }
+
+ GrProgramInfo programInfo(this->proxy()->numSamples(),
+ this->proxy()->origin(),
+ *pipeline,
+ *fCurrDraw->fGeometryProcessor,
+ fCurrDraw->fFixedDynamicState,
+ fCurrDraw->fDynamicStateArrays,
+ fCurrDraw->fMeshCnt);
+
+ this->opsRenderPass()->draw(programInfo, fCurrDraw->fMeshes,
+ fCurrDraw->fMeshCnt, chainBounds);
+ fTokenTracker->flushToken();
+ ++fCurrDraw;
+ }
+}
+
+void GrOpFlushState::preExecuteDraws() {
+ fVertexPool.unmap();
+ fIndexPool.unmap();
+ for (auto& upload : fASAPUploads) {
+ this->doUpload(upload);
+ }
+ // Setup execution iterators.
+ fCurrDraw = fDraws.begin();
+ fCurrUpload = fInlineUploads.begin();
+}
+
+void GrOpFlushState::reset() {
+ SkASSERT(fCurrDraw == fDraws.end());
+ SkASSERT(fCurrUpload == fInlineUploads.end());
+ fVertexPool.reset();
+ fIndexPool.reset();
+ fArena.reset();
+ fASAPUploads.reset();
+ fInlineUploads.reset();
+ fDraws.reset();
+ fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
+}
+
+void GrOpFlushState::doUpload(GrDeferredTextureUploadFn& upload,
+ bool shouldPrepareSurfaceForSampling) {
+ GrDeferredTextureUploadWritePixelsFn wp = [this, shouldPrepareSurfaceForSampling](
+ GrTextureProxy* dstProxy, int left, int top, int width, int height,
+ GrColorType colorType, const void* buffer, size_t rowBytes) {
+ GrSurface* dstSurface = dstProxy->peekSurface();
+ if (!fGpu->caps()->surfaceSupportsWritePixels(dstSurface)) {
+ return false;
+ }
+ GrCaps::SupportedWrite supportedWrite = fGpu->caps()->supportedWritePixelsColorType(
+ colorType, dstSurface->backendFormat(), colorType);
+ size_t tightRB = width * GrColorTypeBytesPerPixel(supportedWrite.fColorType);
+ SkASSERT(rowBytes >= tightRB);
+ std::unique_ptr<char[]> tmpPixels;
+ if (supportedWrite.fColorType != colorType ||
+ (!fGpu->caps()->writePixelsRowBytesSupport() && rowBytes != tightRB)) {
+ tmpPixels.reset(new char[height * tightRB]);
+ // Use kUnpremul to ensure no alpha type conversions or clamping occur.
+ static constexpr auto kAT = kUnpremul_SkAlphaType;
+ GrImageInfo srcInfo(colorType, kAT, nullptr, width, height);
+ GrImageInfo tmpInfo(supportedWrite.fColorType, kAT, nullptr, width,
+ height);
+ if (!GrConvertPixels(tmpInfo, tmpPixels.get(), tightRB, srcInfo, buffer, rowBytes)) {
+ return false;
+ }
+ rowBytes = tightRB;
+ buffer = tmpPixels.get();
+ }
+ return this->fGpu->writePixels(dstSurface, left, top, width, height, colorType,
+ supportedWrite.fColorType, buffer, rowBytes,
+ shouldPrepareSurfaceForSampling);
+ };
+ upload(wp);
+}
+
+GrDeferredUploadToken GrOpFlushState::addInlineUpload(GrDeferredTextureUploadFn&& upload) {
+ return fInlineUploads.append(&fArena, std::move(upload), fTokenTracker->nextDrawToken())
+ .fUploadBeforeToken;
+}
+
+GrDeferredUploadToken GrOpFlushState::addASAPUpload(GrDeferredTextureUploadFn&& upload) {
+ fASAPUploads.append(&fArena, std::move(upload));
+ return fTokenTracker->nextTokenToFlush();
+}
+
+void GrOpFlushState::recordDraw(
+ sk_sp<const GrGeometryProcessor> gp, const GrMesh meshes[], int meshCnt,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrPipeline::DynamicStateArrays* dynamicStateArrays) {
+ SkASSERT(fOpArgs);
+ SkDEBUGCODE(fOpArgs->validate());
+ bool firstDraw = fDraws.begin() == fDraws.end();
+ auto& draw = fDraws.append(&fArena);
+ GrDeferredUploadToken token = fTokenTracker->issueDrawToken();
+ if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) {
+ for (int i = 0; i < gp->numTextureSamplers(); ++i) {
+ fixedDynamicState->fPrimitiveProcessorTextures[i]->ref();
+ }
+ }
+ if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
+ int n = gp->numTextureSamplers() * meshCnt;
+ for (int i = 0; i < n; ++i) {
+ dynamicStateArrays->fPrimitiveProcessorTextures[i]->ref();
+ }
+ }
+ draw.fGeometryProcessor = std::move(gp);
+ draw.fFixedDynamicState = fixedDynamicState;
+ draw.fDynamicStateArrays = dynamicStateArrays;
+ draw.fMeshes = meshes;
+ draw.fMeshCnt = meshCnt;
+ draw.fOp = fOpArgs->op();
+ if (firstDraw) {
+ fBaseDrawToken = token;
+ }
+}
+
+void* GrOpFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
+ sk_sp<const GrBuffer>* buffer, int* startVertex) {
+ return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
+}
+
+uint16_t* GrOpFlushState::makeIndexSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
+ int* startIndex) {
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
+}
+
+void* GrOpFlushState::makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
+ int fallbackVertexCount, sk_sp<const GrBuffer>* buffer,
+ int* startVertex, int* actualVertexCount) {
+ return fVertexPool.makeSpaceAtLeast(vertexSize, minVertexCount, fallbackVertexCount, buffer,
+ startVertex, actualVertexCount);
+}
+
+uint16_t* GrOpFlushState::makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
+ sk_sp<const GrBuffer>* buffer, int* startIndex,
+ int* actualIndexCount) {
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpaceAtLeast(
+ minIndexCount, fallbackIndexCount, buffer, startIndex, actualIndexCount));
+}
+
+void GrOpFlushState::putBackIndices(int indexCount) {
+ fIndexPool.putBack(indexCount * sizeof(uint16_t));
+}
+
+void GrOpFlushState::putBackVertices(int vertices, size_t vertexStride) {
+ fVertexPool.putBack(vertices * vertexStride);
+}
+
+GrAppliedClip GrOpFlushState::detachAppliedClip() {
+ return fOpArgs->appliedClip() ? std::move(*fOpArgs->appliedClip()) : GrAppliedClip();
+}
+
+GrStrikeCache* GrOpFlushState::glyphCache() const {
+ return fGpu->getContext()->priv().getGrStrikeCache();
+}
+
+GrAtlasManager* GrOpFlushState::atlasManager() const {
+ return fGpu->getContext()->priv().getAtlasManager();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrOpFlushState::Draw::~Draw() {
+ if (fFixedDynamicState && fFixedDynamicState->fPrimitiveProcessorTextures) {
+ for (int i = 0; i < fGeometryProcessor->numTextureSamplers(); ++i) {
+ fFixedDynamicState->fPrimitiveProcessorTextures[i]->unref();
+ }
+ }
+ if (fDynamicStateArrays && fDynamicStateArrays->fPrimitiveProcessorTextures) {
+ int n = fGeometryProcessor->numTextureSamplers() * fMeshCnt;
+ const auto* textures = fDynamicStateArrays->fPrimitiveProcessorTextures;
+ for (int i = 0; i < n; ++i) {
+ textures[i]->unref();
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrOpFlushState.h b/gfx/skia/skia/src/gpu/GrOpFlushState.h
new file mode 100644
index 0000000000..27dac24774
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpFlushState.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOpFlushState_DEFINED
+#define GrOpFlushState_DEFINED
+
+#include <utility>
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkArenaAllocList.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrBufferAllocPool.h"
+#include "src/gpu/GrDeferredUpload.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+
+class GrGpu;
+class GrOpsRenderPass;
+class GrResourceProvider;
+
+/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */
+class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
+public:
+ // vertexSpace and indexSpace may either be null or an alloation of size
+ // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
+ // vertices/indices when a buffer larger than kDefaultBufferSize is required.
+ GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
+ sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
+
+ ~GrOpFlushState() final { this->reset(); }
+
+ /** This is called after each op has a chance to prepare its draws and before the draws are
+ executed. */
+ void preExecuteDraws();
+
+ /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
+ surface needs to be prepared for being sampled in a draw after the upload, the caller
+ should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
+ when doing inline uploads to reset the image layout back to sampled. */
+ void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
+
+ /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
+ void executeDrawsAndUploadsForMeshDrawOp(
+ const GrOp* op, const SkRect& chainBounds, GrProcessorSet&&,
+ GrPipeline::InputFlags = GrPipeline::InputFlags::kNone,
+ const GrUserStencilSettings* = &GrUserStencilSettings::kUnused);
+
+ GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
+ void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
+
+ GrGpu* gpu() { return fGpu; }
+
+ void reset();
+
+ /** Additional data required on a per-op basis when executing GrOps. */
+ struct OpArgs {
+ explicit OpArgs(GrOp* op, GrRenderTargetProxy* proxy, GrAppliedClip* appliedClip,
+ const GrXferProcessor::DstProxy& dstProxy)
+ : fOp(op)
+ , fProxy(proxy)
+ , fAppliedClip(appliedClip)
+ , fDstProxy(dstProxy) {
+ }
+
+ int numSamples() const { return fProxy->numSamples(); }
+ GrSurfaceOrigin origin() const { return fProxy->origin(); }
+ GrSwizzle outputSwizzle() const { return fProxy->outputSwizzle(); }
+
+ GrOp* op() { return fOp; }
+ GrRenderTargetProxy* proxy() const { return fProxy; }
+ GrRenderTarget* renderTarget() const { return fProxy->peekRenderTarget(); }
+ GrAppliedClip* appliedClip() { return fAppliedClip; }
+ const GrAppliedClip* appliedClip() const { return fAppliedClip; }
+ const GrXferProcessor::DstProxy& dstProxy() const { return fDstProxy; }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fOp);
+ SkASSERT(fProxy);
+ }
+#endif
+
+ private:
+ GrOp* fOp;
+ GrRenderTargetProxy* fProxy;
+ GrAppliedClip* fAppliedClip;
+ GrXferProcessor::DstProxy fDstProxy; // TODO: do we still need the dst proxy here?
+ };
+
+ void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
+
+ const OpArgs& drawOpArgs() const {
+ SkASSERT(fOpArgs);
+ SkDEBUGCODE(fOpArgs->validate());
+ return *fOpArgs;
+ }
+
+ void setSampledProxyArray(SkTArray<GrTextureProxy*, true>* sampledProxies) {
+ fSampledProxies = sampledProxies;
+ }
+
+ SkTArray<GrTextureProxy*, true>* sampledProxyArray() override {
+ return fSampledProxies;
+ }
+
+ /** Overrides of GrDeferredUploadTarget. */
+
+ const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
+ GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
+ GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
+
+ /** Overrides of GrMeshDrawOp::Target. */
+ void recordDraw(sk_sp<const GrGeometryProcessor>, const GrMesh[], int meshCnt,
+ const GrPipeline::FixedDynamicState*,
+ const GrPipeline::DynamicStateArrays*) final;
+ void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
+ int* startVertex) final;
+ uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
+ void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
+ sk_sp<const GrBuffer>*, int* startVertex,
+ int* actualVertexCount) final;
+ uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
+ sk_sp<const GrBuffer>*, int* startIndex,
+ int* actualIndexCount) final;
+ void putBackIndices(int indexCount) final;
+ void putBackVertices(int vertices, size_t vertexStride) final;
+ GrRenderTargetProxy* proxy() const final { return fOpArgs->proxy(); }
+ const GrAppliedClip* appliedClip() final { return fOpArgs->appliedClip(); }
+ GrAppliedClip detachAppliedClip() final;
+ const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->dstProxy(); }
+ GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
+ const GrCaps& caps() const final;
+ GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
+
+ GrStrikeCache* glyphCache() const final;
+
+ // At this point we know we're flushing so full access to the GrAtlasManager is required (and
+ // permissible).
+ GrAtlasManager* atlasManager() const final;
+
+ /** GrMeshDrawOp::Target override. */
+ SkArenaAlloc* allocator() override { return &fArena; }
+
+private:
+ struct InlineUpload {
+ InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
+ : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
+ GrDeferredTextureUploadFn fUpload;
+ GrDeferredUploadToken fUploadBeforeToken;
+ };
+
+ // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
+ // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
+ // that share a geometry processor into a Draw is that it allows the Gpu object to setup
+ // the shared state once and then issue draws for each mesh.
+ struct Draw {
+ ~Draw();
+ sk_sp<const GrGeometryProcessor> fGeometryProcessor;
+ const GrPipeline::FixedDynamicState* fFixedDynamicState;
+ const GrPipeline::DynamicStateArrays* fDynamicStateArrays;
+ const GrMesh* fMeshes = nullptr;
+ const GrOp* fOp = nullptr;
+ int fMeshCnt = 0;
+ };
+
+ // Storage for ops' pipelines, draws, and inline uploads.
+ SkArenaAlloc fArena{sizeof(GrPipeline) * 100};
+
+ // Store vertex and index data on behalf of ops that are flushed.
+ GrVertexBufferAllocPool fVertexPool;
+ GrIndexBufferAllocPool fIndexPool;
+
+ // Data stored on behalf of the ops being flushed.
+ SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
+ SkArenaAllocList<InlineUpload> fInlineUploads;
+ SkArenaAllocList<Draw> fDraws;
+
+ // All draws we store have an implicit draw token. This is the draw token for the first draw
+ // in fDraws.
+ GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
+
+ // Info about the op that is currently preparing or executing using the flush state or null if
+ // an op is not currently preparing of executing.
+ OpArgs* fOpArgs = nullptr;
+
+ // This field is only transiently set during flush. Each GrOpsTask will set it to point to an
+ // array of proxies it uses before call onPrepare and onExecute.
+ SkTArray<GrTextureProxy*, true>* fSampledProxies;
+
+ GrGpu* fGpu;
+ GrResourceProvider* fResourceProvider;
+ GrTokenTracker* fTokenTracker;
+ GrOpsRenderPass* fOpsRenderPass = nullptr;
+
+ // Variables that are used to track where we are in lists as ops are executed
+ SkArenaAllocList<Draw>::Iter fCurrDraw;
+ SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOpsRenderPass.cpp b/gfx/skia/skia/src/gpu/GrOpsRenderPass.cpp
new file mode 100644
index 0000000000..1b8ebeae05
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpsRenderPass.cpp
@@ -0,0 +1,68 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/GrOpsRenderPass.h"
+
+#include "include/core/SkRect.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrPrimitiveProcessor.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+
+void GrOpsRenderPass::clear(const GrFixedClip& clip, const SkPMColor4f& color) {
+ SkASSERT(fRenderTarget);
+ // A clear at this level will always be a true clear, so make sure clears were not supposed to
+ // be redirected to draws instead
+ SkASSERT(!this->gpu()->caps()->performColorClearsAsDraws());
+ SkASSERT(!clip.scissorEnabled() || !this->gpu()->caps()->performPartialClearsAsDraws());
+ this->onClear(clip, color);
+}
+
+void GrOpsRenderPass::clearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ // As above, make sure the stencil clear wasn't supposed to be a draw rect with stencil settings
+ SkASSERT(!this->gpu()->caps()->performStencilClearsAsDraws());
+ this->onClearStencilClip(clip, insideStencilMask);
+}
+
+bool GrOpsRenderPass::draw(const GrProgramInfo& programInfo,
+ const GrMesh meshes[], int meshCount, const SkRect& bounds) {
+ if (!meshCount) {
+ return true;
+ }
+
+#ifdef SK_DEBUG
+ SkASSERT(!programInfo.primProc().hasInstanceAttributes() ||
+ this->gpu()->caps()->instanceAttribSupport());
+
+ programInfo.compatibleWithMeshes(meshes, meshCount);
+ programInfo.checkAllInstantiated();
+ programInfo.checkMSAAAndMIPSAreResolved();
+#endif
+
+ if (programInfo.primProc().numVertexAttributes() > this->gpu()->caps()->maxVertexAttributes()) {
+ this->gpu()->stats()->incNumFailedDraws();
+ return false;
+ }
+ this->onDraw(programInfo, meshes, meshCount, bounds);
+
+#ifdef SK_DEBUG
+ GrProcessor::CustomFeatures processorFeatures = programInfo.requestedFeatures();
+ if (GrProcessor::CustomFeatures::kSampleLocations & processorFeatures) {
+ // Verify we always have the same sample pattern key, regardless of graphics state.
+ SkASSERT(this->gpu()->findOrAssignSamplePatternKey(fRenderTarget)
+ == fRenderTarget->renderTargetPriv().getSamplePatternKey());
+ }
+#endif
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrOpsRenderPass.h b/gfx/skia/skia/src/gpu/GrOpsRenderPass.h
new file mode 100644
index 0000000000..e3d24a7b15
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpsRenderPass.h
@@ -0,0 +1,109 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrOpsRenderPass_DEFINED
+#define GrOpsRenderPass_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrOpFlushState;
+class GrFixedClip;
+class GrGpu;
+class GrMesh;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrProgramInfo;
+class GrRenderTarget;
+class GrSemaphore;
+struct SkIRect;
+struct SkRect;
+
+/**
+ * The GrOpsRenderPass is a series of commands (draws, clears, and discards), which all target the
+ * same render target. It is possible that these commands execute immediately (GL), or get buffered
+ * up for later execution (Vulkan). GrOps execute into a GrOpsRenderPass.
+ */
+class GrOpsRenderPass {
+public:
+ virtual ~GrOpsRenderPass() {}
+
+ virtual void insertEventMarker(const char*) = 0;
+
+ struct LoadAndStoreInfo {
+ GrLoadOp fLoadOp;
+ GrStoreOp fStoreOp;
+ SkPMColor4f fClearColor;
+ };
+
+ // Load-time clears of the stencil buffer are always to 0 so we don't store
+ // an 'fStencilClearValue'
+ struct StencilLoadAndStoreInfo {
+ GrLoadOp fLoadOp;
+ GrStoreOp fStoreOp;
+ };
+
+ virtual void begin() = 0;
+ // Signals the end of recording to the GrOpsRenderPass and that it can now be submitted.
+ virtual void end() = 0;
+
+ // We pass in an array of meshCount GrMesh to the draw. The backend should loop over each
+ // GrMesh object and emit a draw for it. Each draw will use the same GrPipeline and
+ // GrPrimitiveProcessor. This may fail if the draw would exceed any resource limits (e.g.
+ // number of vertex attributes is too large).
+ bool draw(const GrProgramInfo&, const GrMesh[], int meshCount, const SkRect& bounds);
+
+ // Performs an upload of vertex data in the middle of a set of a set of draws
+ virtual void inlineUpload(GrOpFlushState*, GrDeferredTextureUploadFn&) = 0;
+
+ /**
+ * Clear the owned render target. Ignores the draw state and clip.
+ */
+ void clear(const GrFixedClip&, const SkPMColor4f&);
+
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask);
+
+ /**
+ * Executes the SkDrawable object for the underlying backend.
+ */
+ virtual void executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler>) {}
+
+protected:
+ GrOpsRenderPass() : fOrigin(kTopLeft_GrSurfaceOrigin), fRenderTarget(nullptr) {}
+
+ GrOpsRenderPass(GrRenderTarget* rt, GrSurfaceOrigin origin)
+ : fOrigin(origin)
+ , fRenderTarget(rt) {
+ }
+
+ void set(GrRenderTarget* rt, GrSurfaceOrigin origin) {
+ SkASSERT(!fRenderTarget);
+
+ fRenderTarget = rt;
+ fOrigin = origin;
+ }
+
+ GrSurfaceOrigin fOrigin;
+ GrRenderTarget* fRenderTarget;
+
+private:
+ virtual GrGpu* gpu() = 0;
+
+ // overridden by backend-specific derived class to perform the draw call.
+ virtual void onDraw(const GrProgramInfo&, const GrMesh[], int meshCount,
+ const SkRect& bounds) = 0;
+
+ // overridden by backend-specific derived class to perform the clear.
+ virtual void onClear(const GrFixedClip&, const SkPMColor4f&) = 0;
+
+ virtual void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) = 0;
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOpsTask.cpp b/gfx/skia/skia/src/gpu/GrOpsTask.cpp
new file mode 100644
index 0000000000..4d4236b2d9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpsTask.cpp
@@ -0,0 +1,856 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrOpsTask.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkExchange.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrResourceAllocator.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/geometry/GrRect.h"
+#include "src/gpu/ops/GrClearOp.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Experimentally we have found that most combining occurs within the first 10 comparisons.
+static const int kMaxOpMergeDistance = 10;
+static const int kMaxOpChainDistance = 10;
+
+////////////////////////////////////////////////////////////////////////////////
+
+using DstProxy = GrXferProcessor::DstProxy;
+
+////////////////////////////////////////////////////////////////////////////////
+
+static inline bool can_reorder(const SkRect& a, const SkRect& b) { return !GrRectsOverlap(a, b); }
+
+////////////////////////////////////////////////////////////////////////////////
+
+inline GrOpsTask::OpChain::List::List(std::unique_ptr<GrOp> op)
+ : fHead(std::move(op)), fTail(fHead.get()) {
+ this->validate();
+}
+
+inline GrOpsTask::OpChain::List::List(List&& that) { *this = std::move(that); }
+
+inline GrOpsTask::OpChain::List& GrOpsTask::OpChain::List::operator=(List&& that) {
+ fHead = std::move(that.fHead);
+ fTail = that.fTail;
+ that.fTail = nullptr;
+ this->validate();
+ return *this;
+}
+
+inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::popHead() {
+ SkASSERT(fHead);
+ auto temp = fHead->cutChain();
+ std::swap(temp, fHead);
+ if (!fHead) {
+ SkASSERT(fTail == temp.get());
+ fTail = nullptr;
+ }
+ return temp;
+}
+
+inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::removeOp(GrOp* op) {
+#ifdef SK_DEBUG
+ auto head = op;
+ while (head->prevInChain()) { head = head->prevInChain(); }
+ SkASSERT(head == fHead.get());
+#endif
+ auto prev = op->prevInChain();
+ if (!prev) {
+ SkASSERT(op == fHead.get());
+ return this->popHead();
+ }
+ auto temp = prev->cutChain();
+ if (auto next = temp->cutChain()) {
+ prev->chainConcat(std::move(next));
+ } else {
+ SkASSERT(fTail == op);
+ fTail = prev;
+ }
+ this->validate();
+ return temp;
+}
+
+inline void GrOpsTask::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
+ SkASSERT(op);
+ SkASSERT(op->isChainHead());
+ SkASSERT(op->isChainTail());
+ if (fHead) {
+ op->chainConcat(std::move(fHead));
+ fHead = std::move(op);
+ } else {
+ fHead = std::move(op);
+ fTail = fHead.get();
+ }
+}
+
+inline void GrOpsTask::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
+ SkASSERT(op->isChainTail());
+ fTail->chainConcat(std::move(op));
+ fTail = fTail->nextInChain();
+}
+
+inline void GrOpsTask::OpChain::List::validate() const {
+#ifdef SK_DEBUG
+ if (fHead) {
+ SkASSERT(fTail);
+ fHead->validateChain(fTail);
+ }
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrOpsTask::OpChain::OpChain(std::unique_ptr<GrOp> op,
+ GrProcessorSet::Analysis processorAnalysis,
+ GrAppliedClip* appliedClip, const DstProxy* dstProxy)
+ : fList{std::move(op)}
+ , fProcessorAnalysis(processorAnalysis)
+ , fAppliedClip(appliedClip) {
+ if (fProcessorAnalysis.requiresDstTexture()) {
+ SkASSERT(dstProxy && dstProxy->proxy());
+ fDstProxy = *dstProxy;
+ }
+ fBounds = fList.head()->bounds();
+}
+
+void GrOpsTask::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
+ if (fList.empty()) {
+ return;
+ }
+ for (const auto& op : GrOp::ChainRange<>(fList.head())) {
+ op.visitProxies(func);
+ }
+ if (fDstProxy.proxy()) {
+ func(fDstProxy.proxy(), GrMipMapped::kNo);
+ }
+ if (fAppliedClip) {
+ fAppliedClip->visitProxies(func);
+ }
+}
+
+void GrOpsTask::OpChain::deleteOps(GrOpMemoryPool* pool) {
+ while (!fList.empty()) {
+ pool->release(fList.popHead());
+ }
+}
+
+// Concatenates two op chains and attempts to merge ops across the chains. Assumes that we know that
+// the two chains are chainable. Returns the new chain.
+GrOpsTask::OpChain::List GrOpsTask::OpChain::DoConcat(
+ List chainA, List chainB, const GrCaps& caps, GrOpMemoryPool* pool,
+ GrAuditTrail* auditTrail) {
+ // We process ops in chain b from head to tail. We attempt to merge with nodes in a, starting
+ // at chain a's tail and working toward the head. We produce one of the following outcomes:
+ // 1) b's head is merged into an op in a.
+ // 2) An op from chain a is merged into b's head. (In this case b's head gets processed again.)
+ // 3) b's head is popped from chain a and added at the tail of a.
+ // After result 3 we don't want to attempt to merge the next head of b with the new tail of a,
+ // as we assume merges were already attempted when chain b was created. So we keep track of the
+ // original tail of a and start our iteration of a there. We also track the bounds of the nodes
+ // appended to chain a that will be skipped for bounds testing. If the original tail of a is
+ // merged into an op in b (case 2) then we advance the "original tail" towards the head of a.
+ GrOp* origATail = chainA.tail();
+ SkRect skipBounds = SkRectPriv::MakeLargestInverted();
+ do {
+ int numMergeChecks = 0;
+ bool merged = false;
+ bool noSkip = (origATail == chainA.tail());
+ SkASSERT(noSkip == (skipBounds == SkRectPriv::MakeLargestInverted()));
+ bool canBackwardMerge = noSkip || can_reorder(chainB.head()->bounds(), skipBounds);
+ SkRect forwardMergeBounds = skipBounds;
+ GrOp* a = origATail;
+ while (a) {
+ bool canForwardMerge =
+ (a == chainA.tail()) || can_reorder(a->bounds(), forwardMergeBounds);
+ if (canForwardMerge || canBackwardMerge) {
+ auto result = a->combineIfPossible(chainB.head(), caps);
+ SkASSERT(result != GrOp::CombineResult::kCannotCombine);
+ merged = (result == GrOp::CombineResult::kMerged);
+ GrOP_INFO("\t\t: (%s opID: %u) -> Combining with (%s, opID: %u)\n",
+ chainB.head()->name(), chainB.head()->uniqueID(), a->name(),
+ a->uniqueID());
+ }
+ if (merged) {
+ GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(auditTrail, a, chainB.head());
+ if (canBackwardMerge) {
+ pool->release(chainB.popHead());
+ } else {
+ // We merged the contents of b's head into a. We will replace b's head with a in
+ // chain b.
+ SkASSERT(canForwardMerge);
+ if (a == origATail) {
+ origATail = a->prevInChain();
+ }
+ std::unique_ptr<GrOp> detachedA = chainA.removeOp(a);
+ pool->release(chainB.popHead());
+ chainB.pushHead(std::move(detachedA));
+ if (chainA.empty()) {
+ // We merged all the nodes in chain a to chain b.
+ return chainB;
+ }
+ }
+ break;
+ } else {
+ if (++numMergeChecks == kMaxOpMergeDistance) {
+ break;
+ }
+ forwardMergeBounds.joinNonEmptyArg(a->bounds());
+ canBackwardMerge =
+ canBackwardMerge && can_reorder(chainB.head()->bounds(), a->bounds());
+ a = a->prevInChain();
+ }
+ }
+ // If we weren't able to merge b's head then pop b's head from chain b and make it the new
+ // tail of a.
+ if (!merged) {
+ chainA.pushTail(chainB.popHead());
+ skipBounds.joinNonEmptyArg(chainA.tail()->bounds());
+ }
+ } while (!chainB.empty());
+ return chainA;
+}
+
+// Attempts to concatenate the given chain onto our own and merge ops across the chains. Returns
+// whether the operation succeeded. On success, the provided list will be returned empty.
+bool GrOpsTask::OpChain::tryConcat(
+ List* list, GrProcessorSet::Analysis processorAnalysis, const DstProxy& dstProxy,
+ const GrAppliedClip* appliedClip, const SkRect& bounds, const GrCaps& caps,
+ GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
+ SkASSERT(!fList.empty());
+ SkASSERT(!list->empty());
+ SkASSERT(fProcessorAnalysis.requiresDstTexture() == SkToBool(fDstProxy.proxy()));
+ SkASSERT(processorAnalysis.requiresDstTexture() == SkToBool(dstProxy.proxy()));
+ // All returns use explicit tuple constructor rather than {a, b} to work around old GCC bug.
+ if (fList.head()->classID() != list->head()->classID() ||
+ SkToBool(fAppliedClip) != SkToBool(appliedClip) ||
+ (fAppliedClip && *fAppliedClip != *appliedClip) ||
+ (fProcessorAnalysis.requiresNonOverlappingDraws() !=
+ processorAnalysis.requiresNonOverlappingDraws()) ||
+ (fProcessorAnalysis.requiresNonOverlappingDraws() &&
+ // Non-overlaping draws are only required when Ganesh will either insert a barrier,
+ // or read back a new dst texture between draws. In either case, we can neither
+ // chain nor combine overlapping Ops.
+ GrRectsTouchOrOverlap(fBounds, bounds)) ||
+ (fProcessorAnalysis.requiresDstTexture() != processorAnalysis.requiresDstTexture()) ||
+ (fProcessorAnalysis.requiresDstTexture() && fDstProxy != dstProxy)) {
+ return false;
+ }
+
+ SkDEBUGCODE(bool first = true;)
+ do {
+ switch (fList.tail()->combineIfPossible(list->head(), caps)) {
+ case GrOp::CombineResult::kCannotCombine:
+ // If an op supports chaining then it is required that chaining is transitive and
+ // that if any two ops in two different chains can merge then the two chains
+ // may also be chained together. Thus, we should only hit this on the first
+ // iteration.
+ SkASSERT(first);
+ return false;
+ case GrOp::CombineResult::kMayChain:
+ fList = DoConcat(std::move(fList), skstd::exchange(*list, List()), caps, pool,
+ auditTrail);
+ // The above exchange cleared out 'list'. The list needs to be empty now for the
+ // loop to terminate.
+ SkASSERT(list->empty());
+ break;
+ case GrOp::CombineResult::kMerged: {
+ GrOP_INFO("\t\t: (%s opID: %u) -> Combining with (%s, opID: %u)\n",
+ list->tail()->name(), list->tail()->uniqueID(), list->head()->name(),
+ list->head()->uniqueID());
+ GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(auditTrail, fList.tail(), list->head());
+ pool->release(list->popHead());
+ break;
+ }
+ }
+ SkDEBUGCODE(first = false);
+ } while (!list->empty());
+
+ // The new ops were successfully merged and/or chained onto our own.
+ fBounds.joinPossiblyEmptyRect(bounds);
+ return true;
+}
+
+bool GrOpsTask::OpChain::prependChain(OpChain* that, const GrCaps& caps, GrOpMemoryPool* pool,
+ GrAuditTrail* auditTrail) {
+ if (!that->tryConcat(
+ &fList, fProcessorAnalysis, fDstProxy, fAppliedClip, fBounds, caps, pool, auditTrail)) {
+ this->validate();
+ // append failed
+ return false;
+ }
+
+ // 'that' owns the combined chain. Move it into 'this'.
+ SkASSERT(fList.empty());
+ fList = std::move(that->fList);
+ fBounds = that->fBounds;
+
+ that->fDstProxy.setProxy(nullptr);
+ if (that->fAppliedClip) {
+ for (int i = 0; i < that->fAppliedClip->numClipCoverageFragmentProcessors(); ++i) {
+ that->fAppliedClip->detachClipCoverageFragmentProcessor(i);
+ }
+ }
+ this->validate();
+ return true;
+}
+
+std::unique_ptr<GrOp> GrOpsTask::OpChain::appendOp(
+ std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis,
+ const DstProxy* dstProxy, const GrAppliedClip* appliedClip, const GrCaps& caps,
+ GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
+ const GrXferProcessor::DstProxy noDstProxy;
+ if (!dstProxy) {
+ dstProxy = &noDstProxy;
+ }
+ SkASSERT(op->isChainHead() && op->isChainTail());
+ SkRect opBounds = op->bounds();
+ List chain(std::move(op));
+ if (!this->tryConcat(
+ &chain, processorAnalysis, *dstProxy, appliedClip, opBounds, caps, pool, auditTrail)) {
+ // append failed, give the op back to the caller.
+ this->validate();
+ return chain.popHead();
+ }
+
+ SkASSERT(chain.empty());
+ this->validate();
+ return nullptr;
+}
+
+inline void GrOpsTask::OpChain::validate() const {
+#ifdef SK_DEBUG
+ fList.validate();
+ for (const auto& op : GrOp::ChainRange<>(fList.head())) {
+ // Not using SkRect::contains because we allow empty rects.
+ SkASSERT(fBounds.fLeft <= op.bounds().fLeft && fBounds.fTop <= op.bounds().fTop &&
+ fBounds.fRight >= op.bounds().fRight && fBounds.fBottom >= op.bounds().fBottom);
+ }
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrOpsTask::GrOpsTask(sk_sp<GrOpMemoryPool> opMemoryPool,
+ sk_sp<GrRenderTargetProxy> rtProxy,
+ GrAuditTrail* auditTrail)
+ : GrRenderTask(std::move(rtProxy))
+ , fOpMemoryPool(std::move(opMemoryPool))
+ , fAuditTrail(auditTrail)
+ , fLastClipStackGenID(SK_InvalidUniqueID)
+ SkDEBUGCODE(, fNumClips(0)) {
+ SkASSERT(fOpMemoryPool);
+ fTarget->setLastRenderTask(this);
+}
+
+void GrOpsTask::deleteOps() {
+ for (auto& chain : fOpChains) {
+ chain.deleteOps(fOpMemoryPool.get());
+ }
+ fOpChains.reset();
+}
+
+GrOpsTask::~GrOpsTask() {
+ this->deleteOps();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrOpsTask::endFlush() {
+ fLastClipStackGenID = SK_InvalidUniqueID;
+ this->deleteOps();
+ fClipAllocator.reset();
+
+ if (fTarget && this == fTarget->getLastRenderTask()) {
+ fTarget->setLastRenderTask(nullptr);
+ }
+
+ fTarget.reset();
+ fDeferredProxies.reset();
+ fSampledProxies.reset();
+ fAuditTrail = nullptr;
+}
+
+void GrOpsTask::onPrePrepare(GrRecordingContext* context) {
+ SkASSERT(this->isClosed());
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+#endif
+ // TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
+ // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
+ // we shouldn't end up with GrOpsTasks with only discard.
+ if (this->isNoOp() || (fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
+ return;
+ }
+
+ for (const auto& chain : fOpChains) {
+ if (chain.shouldExecute()) {
+ chain.head()->prePrepare(context);
+ }
+ }
+}
+
+void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
+ SkASSERT(fTarget->peekRenderTarget());
+ SkASSERT(this->isClosed());
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+#endif
+ // TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
+ // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
+ // we shouldn't end up with GrOpsTasks with only discard.
+ if (this->isNoOp() || (fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
+ return;
+ }
+
+ flushState->setSampledProxyArray(&fSampledProxies);
+ // Loop over the ops that haven't yet been prepared.
+ for (const auto& chain : fOpChains) {
+ if (chain.shouldExecute()) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia.gpu", chain.head()->name());
+#endif
+ GrOpFlushState::OpArgs opArgs(chain.head(),
+ fTarget->asRenderTargetProxy(),
+ chain.appliedClip(),
+ chain.dstProxy());
+
+ flushState->setOpArgs(&opArgs);
+ // GrOp::prePrepare may or may not have been called at this point
+ chain.head()->prepare(flushState);
+ flushState->setOpArgs(nullptr);
+ }
+ }
+ flushState->setSampledProxyArray(nullptr);
+}
+
+static GrOpsRenderPass* create_render_pass(
+ GrGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ GrLoadOp colorLoadOp, const SkPMColor4f& loadClearColor, GrLoadOp stencilLoadOp,
+ GrStoreOp stencilStoreOp, const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ const GrOpsRenderPass::LoadAndStoreInfo kColorLoadStoreInfo {
+ colorLoadOp,
+ GrStoreOp::kStore,
+ loadClearColor
+ };
+
+ // TODO:
+ // We would like to (at this level) only ever clear & discard. We would need
+ // to stop splitting up higher level OpsTasks for copyOps to achieve that.
+ // Note: we would still need SB loads and stores but they would happen at a
+ // lower level (inside the VK command buffer).
+ const GrOpsRenderPass::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
+ stencilLoadOp,
+ stencilStoreOp,
+ };
+
+ return gpu->getOpsRenderPass(rt, origin, bounds, kColorLoadStoreInfo, stencilLoadAndStoreInfo,
+ sampledProxies);
+}
+
+// TODO: this is where GrOp::renderTarget is used (which is fine since it
+// is at flush time). However, we need to store the RenderTargetProxy in the
+// Ops and instantiate them here.
+bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
+ // TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
+ // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
+ // we shouldn't end up with GrOpsTasks with only discard.
+ if (this->isNoOp() || (fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
+ return false;
+ }
+
+ SkASSERT(fTarget->peekRenderTarget());
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ // Make sure load ops are not kClear if the GPU needs to use draws for clears
+ SkASSERT(fColorLoadOp != GrLoadOp::kClear ||
+ !flushState->gpu()->caps()->performColorClearsAsDraws());
+
+ const GrCaps& caps = *flushState->gpu()->caps();
+ GrRenderTarget* renderTarget = fTarget.get()->peekRenderTarget();
+ SkASSERT(renderTarget);
+ GrStencilAttachment* stencil = renderTarget->renderTargetPriv().getStencilAttachment();
+
+ GrLoadOp stencilLoadOp;
+ switch (fInitialStencilContent) {
+ case StencilContent::kDontCare:
+ stencilLoadOp = GrLoadOp::kDiscard;
+ break;
+ case StencilContent::kUserBitsCleared:
+ SkASSERT(!caps.performStencilClearsAsDraws());
+ SkASSERT(stencil);
+ if (caps.discardStencilValuesAfterRenderPass()) {
+ // Always clear the stencil if it is being discarded after render passes. This is
+ // also an optimization because we are on a tiler and it avoids loading the values
+ // from memory.
+ stencilLoadOp = GrLoadOp::kClear;
+ break;
+ }
+ if (!stencil->hasPerformedInitialClear()) {
+ stencilLoadOp = GrLoadOp::kClear;
+ stencil->markHasPerformedInitialClear();
+ break;
+ }
+ // renderTargetContexts are required to leave the user stencil bits in a cleared state
+ // once finished, meaning the stencil values will always remain cleared after the
+ // initial clear. Just fall through to reloading the existing (cleared) stencil values
+ // from memory.
+ case StencilContent::kPreserved:
+ SkASSERT(stencil);
+ stencilLoadOp = GrLoadOp::kLoad;
+ break;
+ }
+
+ // NOTE: If fMustPreserveStencil is set, then we are executing a renderTargetContext that split
+ // its opsTask.
+ //
+ // FIXME: We don't currently flag render passes that don't use stencil at all. In that case
+ // their store op might be "discard", and we currently make the assumption that a discard will
+ // not invalidate what's already in main memory. This is probably ok for now, but certainly
+ // something we want to address soon.
+ GrStoreOp stencilStoreOp = (caps.discardStencilValuesAfterRenderPass() && !fMustPreserveStencil)
+ ? GrStoreOp::kDiscard
+ : GrStoreOp::kStore;
+
+ GrOpsRenderPass* renderPass = create_render_pass(
+ flushState->gpu(), fTarget->peekRenderTarget(), fTarget->origin(),
+ fClippedContentBounds, fColorLoadOp, fLoadClearColor, stencilLoadOp, stencilStoreOp,
+ fSampledProxies);
+ flushState->setOpsRenderPass(renderPass);
+ renderPass->begin();
+
+ // Draw all the generated geometry.
+ for (const auto& chain : fOpChains) {
+ if (!chain.shouldExecute()) {
+ continue;
+ }
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia.gpu", chain.head()->name());
+#endif
+
+ GrOpFlushState::OpArgs opArgs(chain.head(),
+ fTarget->asRenderTargetProxy(),
+ chain.appliedClip(),
+ chain.dstProxy());
+
+ flushState->setOpArgs(&opArgs);
+ chain.head()->execute(flushState, chain.bounds());
+ flushState->setOpArgs(nullptr);
+ }
+
+ renderPass->end();
+ flushState->gpu()->submit(renderPass);
+ flushState->setOpsRenderPass(nullptr);
+
+ return true;
+}
+
+void GrOpsTask::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
+ fColorLoadOp = op;
+ fLoadClearColor = color;
+ if (GrLoadOp::kClear == fColorLoadOp) {
+ fTotalBounds.setWH(fTarget->width(), fTarget->height());
+ }
+}
+
+bool GrOpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
+ // If we previously recorded a wait op, we cannot delete the wait op. Until we track the wait
+ // ops separately from normal ops, we have to avoid clearing out any ops in this case as well.
+ if (fHasWaitOp) {
+ canDiscardPreviousOps = CanDiscardPreviousOps::kNo;
+ }
+
+ if (CanDiscardPreviousOps::kYes == canDiscardPreviousOps || this->isEmpty()) {
+ this->deleteOps();
+ fDeferredProxies.reset();
+ fSampledProxies.reset();
+
+ // If the opsTask is using a render target which wraps a vulkan command buffer, we can't do
+ // a clear load since we cannot change the render pass that we are using. Thus we fall back
+ // to making a clear op in this case.
+ return !fTarget->asRenderTargetProxy()->wrapsVkSecondaryCB();
+ }
+
+ // Could not empty the task, so an op must be added to handle the clear
+ return false;
+}
+
+void GrOpsTask::discard() {
+ // Discard calls to in-progress opsTasks are ignored. Calls at the start update the
+ // opsTasks' color & stencil load ops.
+ if (this->isEmpty()) {
+ fColorLoadOp = GrLoadOp::kDiscard;
+ fInitialStencilContent = StencilContent::kDontCare;
+ fTotalBounds.setEmpty();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void GrOpsTask::dump(bool printDependencies) const {
+ GrRenderTask::dump(printDependencies);
+
+ SkDebugf("fColorLoadOp: ");
+ switch (fColorLoadOp) {
+ case GrLoadOp::kLoad:
+ SkDebugf("kLoad\n");
+ break;
+ case GrLoadOp::kClear:
+ SkDebugf("kClear (0x%x)\n", fLoadClearColor.toBytes_RGBA());
+ break;
+ case GrLoadOp::kDiscard:
+ SkDebugf("kDiscard\n");
+ break;
+ }
+
+ SkDebugf("fInitialStencilContent: ");
+ switch (fInitialStencilContent) {
+ case StencilContent::kDontCare:
+ SkDebugf("kDontCare\n");
+ break;
+ case StencilContent::kUserBitsCleared:
+ SkDebugf("kUserBitsCleared\n");
+ break;
+ case StencilContent::kPreserved:
+ SkDebugf("kPreserved\n");
+ break;
+ }
+
+ SkDebugf("ops (%d):\n", fOpChains.count());
+ for (int i = 0; i < fOpChains.count(); ++i) {
+ SkDebugf("*******************************\n");
+ if (!fOpChains[i].head()) {
+ SkDebugf("%d: <combined forward or failed instantiation>\n", i);
+ } else {
+ SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
+ SkRect bounds = fOpChains[i].bounds();
+ SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+ bounds.fTop, bounds.fRight, bounds.fBottom);
+ for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
+ SkString info = SkTabString(op.dumpInfo(), 1);
+ SkDebugf("%s\n", info.c_str());
+ bounds = op.bounds();
+ SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+ bounds.fTop, bounds.fRight, bounds.fBottom);
+ }
+ }
+ }
+}
+
+void GrOpsTask::visitProxies_debugOnly(const VisitSurfaceProxyFunc& func) const {
+ auto textureFunc = [ func ] (GrTextureProxy* tex, GrMipMapped mipmapped) {
+ func(tex, mipmapped);
+ };
+
+ for (const OpChain& chain : fOpChains) {
+ chain.visitProxies(textureFunc);
+ }
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrOpsTask::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
+ bool used = false;
+
+ auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
+ if (p == proxyToCheck) {
+ used = true;
+ }
+ };
+ for (const OpChain& recordedOp : fOpChains) {
+ recordedOp.visitProxies(visit);
+ }
+
+ return used;
+}
+
+void GrOpsTask::handleInternalAllocationFailure() {
+ bool hasUninstantiatedProxy = false;
+ auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
+ if (!p->isInstantiated()) {
+ hasUninstantiatedProxy = true;
+ }
+ };
+ for (OpChain& recordedOp : fOpChains) {
+ hasUninstantiatedProxy = false;
+ recordedOp.visitProxies(checkInstantiation);
+ if (hasUninstantiatedProxy) {
+ recordedOp.setSkipExecuteFlag();
+ }
+ }
+}
+
+void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+ for (int i = 0; i < fDeferredProxies.count(); ++i) {
+ SkASSERT(!fDeferredProxies[i]->isInstantiated());
+ // We give all the deferred proxies a write usage at the very start of flushing. This
+ // locks them out of being reused for the entire flush until they are read - and then
+ // they can be recycled. This is a bit unfortunate because a flush can proceed in waves
+ // with sub-flushes. The deferred proxies only need to be pinned from the start of
+ // the sub-flush in which they appear.
+ alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
+ }
+
+ // Add the interval for all the writes to this GrOpsTasks's target
+ if (fOpChains.count()) {
+ unsigned int cur = alloc->curOp();
+
+ alloc->addInterval(fTarget.get(), cur, cur + fOpChains.count() - 1,
+ GrResourceAllocator::ActualUse::kYes);
+ } else {
+ // This can happen if there is a loadOp (e.g., a clear) but no other draws. In this case we
+ // still need to add an interval for the destination so we create a fake op# for
+ // the missing clear op.
+ alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
+ alloc->incOps();
+ }
+
+ auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p, GrMipMapped) {
+ alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
+ SkDEBUGCODE(, fTarget.get() == p));
+ };
+ for (const OpChain& recordedOp : fOpChains) {
+ recordedOp.visitProxies(gather);
+
+ // Even though the op may have been (re)moved we still need to increment the op count to
+ // keep all the math consistent.
+ alloc->incOps();
+ }
+}
+
+void GrOpsTask::recordOp(
+ std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis, GrAppliedClip* clip,
+ const DstProxy* dstProxy, const GrCaps& caps) {
+ SkDEBUGCODE(op->validate();)
+ SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxy && dstProxy->proxy()));
+ SkASSERT(fTarget);
+
+ // A closed GrOpsTask should never receive new/more ops
+ SkASSERT(!this->isClosed());
+ if (!op->bounds().isFinite()) {
+ fOpMemoryPool->release(std::move(op));
+ return;
+ }
+
+ // Account for this op's bounds before we attempt to combine.
+ // NOTE: The caller should have already called "op->setClippedBounds()" by now, if applicable.
+ fTotalBounds.join(op->bounds());
+
+ // Check if there is an op we can combine with by linearly searching back until we either
+ // 1) check every op
+ // 2) intersect with something
+ // 3) find a 'blocker'
+ GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
+ GrOP_INFO("opsTask: %d Recording (%s, opID: %u)\n"
+ "\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
+ this->uniqueID(),
+ op->name(),
+ op->uniqueID(),
+ op->bounds().fLeft, op->bounds().fTop,
+ op->bounds().fRight, op->bounds().fBottom);
+ GrOP_INFO(SkTabString(op->dumpInfo(), 1).c_str());
+ GrOP_INFO("\tOutcome:\n");
+ int maxCandidates = SkTMin(kMaxOpChainDistance, fOpChains.count());
+ if (maxCandidates) {
+ int i = 0;
+ while (true) {
+ OpChain& candidate = fOpChains.fromBack(i);
+ op = candidate.appendOp(std::move(op), processorAnalysis, dstProxy, clip, caps,
+ fOpMemoryPool.get(), fAuditTrail);
+ if (!op) {
+ return;
+ }
+ // Stop going backwards if we would cause a painter's order violation.
+ if (!can_reorder(candidate.bounds(), op->bounds())) {
+ GrOP_INFO("\t\tBackward: Intersects with chain (%s, head opID: %u)\n",
+ candidate.head()->name(), candidate.head()->uniqueID());
+ break;
+ }
+ if (++i == maxCandidates) {
+ GrOP_INFO("\t\tBackward: Reached max lookback or beginning of op array %d\n", i);
+ break;
+ }
+ }
+ } else {
+ GrOP_INFO("\t\tBackward: FirstOp\n");
+ }
+ if (clip) {
+ clip = fClipAllocator.make<GrAppliedClip>(std::move(*clip));
+ SkDEBUGCODE(fNumClips++;)
+ }
+ fOpChains.emplace_back(std::move(op), processorAnalysis, clip, dstProxy);
+}
+
+void GrOpsTask::forwardCombine(const GrCaps& caps) {
+ SkASSERT(!this->isClosed());
+ GrOP_INFO("opsTask: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
+
+ for (int i = 0; i < fOpChains.count() - 1; ++i) {
+ OpChain& chain = fOpChains[i];
+ int maxCandidateIdx = SkTMin(i + kMaxOpChainDistance, fOpChains.count() - 1);
+ int j = i + 1;
+ while (true) {
+ OpChain& candidate = fOpChains[j];
+ if (candidate.prependChain(&chain, caps, fOpMemoryPool.get(), fAuditTrail)) {
+ break;
+ }
+ // Stop traversing if we would cause a painter's order violation.
+ if (!can_reorder(chain.bounds(), candidate.bounds())) {
+ GrOP_INFO(
+ "\t\t%d: chain (%s head opID: %u) -> "
+ "Intersects with chain (%s, head opID: %u)\n",
+ i, chain.head()->name(), chain.head()->uniqueID(), candidate.head()->name(),
+ candidate.head()->uniqueID());
+ break;
+ }
+ if (++j > maxCandidateIdx) {
+ GrOP_INFO("\t\t%d: chain (%s opID: %u) -> Reached max lookahead or end of array\n",
+ i, chain.head()->name(), chain.head()->uniqueID());
+ break;
+ }
+ }
+ }
+}
+
+GrRenderTask::ExpectedOutcome GrOpsTask::onMakeClosed(
+ const GrCaps& caps, SkIRect* targetUpdateBounds) {
+ this->forwardCombine(caps);
+ if (!this->isNoOp()) {
+ SkRect clippedContentBounds = SkRect::MakeIWH(fTarget->width(), fTarget->height());
+ // TODO: If we can fix up GLPrograms test to always intersect the fTarget bounds then we can
+ // simply assert here that the bounds intersect.
+ if (clippedContentBounds.intersect(fTotalBounds)) {
+ clippedContentBounds.roundOut(&fClippedContentBounds);
+ *targetUpdateBounds = fClippedContentBounds;
+ return ExpectedOutcome::kTargetDirty;
+ }
+ }
+ return ExpectedOutcome::kTargetUnchanged;
+}
diff --git a/gfx/skia/skia/src/gpu/GrOpsTask.h b/gfx/skia/skia/src/gpu/GrOpsTask.h
new file mode 100644
index 0000000000..c2238120b6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOpsTask.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOpsTask_DEFINED
+#define GrOpsTask_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkTLazy.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrPrimitiveProcessor.h"
+#include "src/gpu/GrRenderTask.h"
+#include "src/gpu/ops/GrDrawOp.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrAuditTrail;
+class GrCaps;
+class GrClearOp;
+class GrGpuBuffer;
+class GrOpMemoryPool;
+class GrRenderTargetProxy;
+
+class GrOpsTask : public GrRenderTask {
+private:
+ using DstProxy = GrXferProcessor::DstProxy;
+
+public:
+ GrOpsTask(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
+ ~GrOpsTask() override;
+
+ GrOpsTask* asOpsTask() override { return this; }
+
+ bool isEmpty() const { return fOpChains.empty(); }
+
+ /**
+ * Empties the draw buffer of any queued up draws.
+ */
+ void endFlush() override;
+
+ void onPrePrepare(GrRecordingContext*) override;
+ /**
+ * Together these two functions flush all queued up draws to GrCommandBuffer. The return value
+ * of executeOps() indicates whether any commands were actually issued to the GPU.
+ */
+ void onPrepare(GrOpFlushState* flushState) override;
+ bool onExecute(GrOpFlushState* flushState) override;
+
+ void addSampledTexture(GrTextureProxy* proxy) {
+ fSampledProxies.push_back(proxy);
+ }
+
+ void addOp(std::unique_ptr<GrOp> op, GrTextureResolveManager textureResolveManager,
+ const GrCaps& caps) {
+ auto addDependency = [ textureResolveManager, &caps, this ] (
+ GrTextureProxy* p, GrMipMapped mipmapped) {
+ this->addDependency(p, mipmapped, textureResolveManager, caps);
+ };
+
+ op->visitProxies(addDependency);
+
+ this->recordOp(std::move(op), GrProcessorSet::EmptySetAnalysis(), nullptr, nullptr, caps);
+ }
+
+ void addWaitOp(std::unique_ptr<GrOp> op, GrTextureResolveManager textureResolveManager,
+ const GrCaps& caps) {
+ fHasWaitOp = true;
+ this->addOp(std::move(op), textureResolveManager, caps);
+ }
+
+ void addDrawOp(std::unique_ptr<GrDrawOp> op, const GrProcessorSet::Analysis& processorAnalysis,
+ GrAppliedClip&& clip, const DstProxy& dstProxy,
+ GrTextureResolveManager textureResolveManager, const GrCaps& caps) {
+ auto addDependency = [ textureResolveManager, &caps, this ] (
+ GrTextureProxy* p, GrMipMapped mipmapped) {
+ this->addSampledTexture(p);
+ this->addDependency(p, mipmapped, textureResolveManager, caps);
+ };
+
+ op->visitProxies(addDependency);
+ clip.visitProxies(addDependency);
+ if (dstProxy.proxy()) {
+ this->addSampledTexture(dstProxy.proxy());
+ addDependency(dstProxy.proxy(), GrMipMapped::kNo);
+ }
+
+ this->recordOp(std::move(op), processorAnalysis, clip.doesClip() ? &clip : nullptr,
+ &dstProxy, caps);
+ }
+
+ void discard();
+
+ SkDEBUGCODE(void dump(bool printDependencies) const override;)
+ SkDEBUGCODE(int numClips() const override { return fNumClips; })
+ SkDEBUGCODE(void visitProxies_debugOnly(const VisitSurfaceProxyFunc&) const override;)
+
+private:
+ bool isNoOp() const {
+ // TODO: GrLoadOp::kDiscard (i.e., storing a discard) should also be grounds for skipping
+ // execution. We currently don't because of Vulkan. See http://skbug.com/9373.
+ //
+ // TODO: We should also consider stencil load/store here. We get away with it for now
+ // because we never discard stencil buffers.
+ return fOpChains.empty() && GrLoadOp::kLoad == fColorLoadOp;
+ }
+
+ void deleteOps();
+
+ enum class StencilContent {
+ kDontCare,
+ kUserBitsCleared, // User bits: cleared
+ // Clip bit: don't care (Ganesh always pre-clears the clip bit.)
+ kPreserved
+ };
+
+ // Lets the caller specify what the content of the stencil buffer should be at the beginning
+ // of the render pass.
+ //
+ // When requesting kClear: Tilers will load the stencil buffer with a "clear" op; non-tilers
+ // will clear the stencil on first load, and then preserve it on subsequent loads. (Preserving
+ // works because renderTargetContexts are required to leave the user bits in a cleared state
+ // once finished.)
+ //
+ // NOTE: initialContent must not be kClear if caps.performStencilClearsAsDraws() is true.
+ void setInitialStencilContent(StencilContent initialContent) {
+ fInitialStencilContent = initialContent;
+ }
+
+ // If a renderTargetContext splits its opsTask, it uses this method to guarantee stencil values
+ // get preserved across its split tasks.
+ void setMustPreserveStencil() { fMustPreserveStencil = true; }
+
+ // Must only be called if native color buffer clearing is enabled.
+ void setColorLoadOp(GrLoadOp op, const SkPMColor4f& color);
+ // Sets the clear color to transparent black
+ void setColorLoadOp(GrLoadOp op) {
+ static const SkPMColor4f kDefaultClearColor = {0.f, 0.f, 0.f, 0.f};
+ this->setColorLoadOp(op, kDefaultClearColor);
+ }
+
+ enum class CanDiscardPreviousOps : bool {
+ kYes = true,
+ kNo = false
+ };
+
+ // Perform book-keeping for a fullscreen clear, regardless of how the clear is implemented later
+ // (i.e. setColorLoadOp(), adding a ClearOp, or adding a GrFillRectOp that covers the device).
+ // Returns true if the clear can be converted into a load op (barring device caps).
+ bool resetForFullscreenClear(CanDiscardPreviousOps);
+
+ class OpChain {
+ public:
+ OpChain(const OpChain&) = delete;
+ OpChain& operator=(const OpChain&) = delete;
+ OpChain(std::unique_ptr<GrOp>, GrProcessorSet::Analysis, GrAppliedClip*, const DstProxy*);
+
+ ~OpChain() {
+ // The ops are stored in a GrMemoryPool and must be explicitly deleted via the pool.
+ SkASSERT(fList.empty());
+ }
+
+ void visitProxies(const GrOp::VisitProxyFunc&) const;
+
+ GrOp* head() const { return fList.head(); }
+
+ GrAppliedClip* appliedClip() const { return fAppliedClip; }
+ const DstProxy& dstProxy() const { return fDstProxy; }
+ const SkRect& bounds() const { return fBounds; }
+
+ // Deletes all the ops in the chain via the pool.
+ void deleteOps(GrOpMemoryPool* pool);
+
+ // Attempts to move the ops from the passed chain to this chain at the head. Also attempts
+ // to merge ops between the chains. Upon success the passed chain is empty.
+ // Fails when the chains aren't of the same op type, have different clips or dst proxies.
+ bool prependChain(OpChain*, const GrCaps&, GrOpMemoryPool*, GrAuditTrail*);
+
+ // Attempts to add 'op' to this chain either by merging or adding to the tail. Returns
+ // 'op' to the caller upon failure, otherwise null. Fails when the op and chain aren't of
+ // the same op type, have different clips or dst proxies.
+ std::unique_ptr<GrOp> appendOp(std::unique_ptr<GrOp> op, GrProcessorSet::Analysis,
+ const DstProxy*, const GrAppliedClip*, const GrCaps&,
+ GrOpMemoryPool*, GrAuditTrail*);
+
+ void setSkipExecuteFlag() { fSkipExecute = true; }
+ bool shouldExecute() const {
+ return SkToBool(this->head()) && !fSkipExecute;
+ }
+
+ private:
+ class List {
+ public:
+ List() = default;
+ List(std::unique_ptr<GrOp>);
+ List(List&&);
+ List& operator=(List&& that);
+
+ bool empty() const { return !SkToBool(fHead); }
+ GrOp* head() const { return fHead.get(); }
+ GrOp* tail() const { return fTail; }
+
+ std::unique_ptr<GrOp> popHead();
+ std::unique_ptr<GrOp> removeOp(GrOp* op);
+ void pushHead(std::unique_ptr<GrOp> op);
+ void pushTail(std::unique_ptr<GrOp>);
+
+ void validate() const;
+
+ private:
+ std::unique_ptr<GrOp> fHead;
+ GrOp* fTail = nullptr;
+ };
+
+ void validate() const;
+
+ bool tryConcat(List*, GrProcessorSet::Analysis, const DstProxy&, const GrAppliedClip*,
+ const SkRect& bounds, const GrCaps&, GrOpMemoryPool*, GrAuditTrail*);
+ static List DoConcat(List, List, const GrCaps&, GrOpMemoryPool*, GrAuditTrail*);
+
+ List fList;
+ GrProcessorSet::Analysis fProcessorAnalysis;
+ DstProxy fDstProxy;
+ GrAppliedClip* fAppliedClip;
+ SkRect fBounds;
+
+ // We set this flag to true if any of the ops' proxies fail to instantiate so that we know
+ // not to try and draw the op.
+ bool fSkipExecute = false;
+ };
+
+
+ bool onIsUsed(GrSurfaceProxy*) const override;
+
+ void handleInternalAllocationFailure() override;
+
+ void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ void recordOp(std::unique_ptr<GrOp>, GrProcessorSet::Analysis, GrAppliedClip*, const DstProxy*,
+ const GrCaps& caps);
+
+ void forwardCombine(const GrCaps&);
+
+ ExpectedOutcome onMakeClosed(const GrCaps& caps, SkIRect* targetUpdateBounds) override;
+
+ friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
+
+ // The RTC and OpsTask have to work together to handle buffer clears. In most cases, buffer
+ // clearing can be done natively, in which case the op list's load ops are sufficient. In other
+ // cases, draw ops must be used, which makes the RTC the best place for those decisions. This,
+ // however, requires that the RTC be able to coordinate with the op list to achieve similar ends
+ friend class GrRenderTargetContext;
+
+ // This is a backpointer to the GrOpMemoryPool that holds the memory for this GrOpsTask's ops.
+ // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
+ // constituent GrOpsTask survives.
+ sk_sp<GrOpMemoryPool> fOpMemoryPool;
+ GrAuditTrail* fAuditTrail;
+
+ GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
+ SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
+ StencilContent fInitialStencilContent = StencilContent::kDontCare;
+ bool fMustPreserveStencil = false;
+
+ uint32_t fLastClipStackGenID;
+ SkIRect fLastDevClipBounds;
+ int fLastClipNumAnalyticFPs;
+
+ // We must track if we have a wait op so that we don't delete the op when we have a full clear.
+ bool fHasWaitOp = false;;
+
+ // For ops/opsTask we have mean: 5 stdDev: 28
+ SkSTArray<25, OpChain, true> fOpChains;
+
+ // MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
+ // Gather statistics to determine the correct size.
+ SkArenaAlloc fClipAllocator{4096};
+ SkDEBUGCODE(int fNumClips;)
+
+ // TODO: We could look into this being a set if we find we're adding a lot of duplicates that is
+ // causing slow downs.
+ SkTArray<GrTextureProxy*, true> fSampledProxies;
+
+ SkRect fTotalBounds = SkRect::MakeEmpty();
+ SkIRect fClippedContentBounds = SkIRect::MakeEmpty();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPaint.cpp b/gfx/skia/skia/src/gpu/GrPaint.cpp
new file mode 100644
index 0000000000..bc7f5384a8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPaint.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/effects/GrCoverageSetOpXP.h"
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+
+GrPaint::GrPaint(const GrPaint& that)
+ : fXPFactory(that.fXPFactory)
+ , fColorFragmentProcessors(that.fColorFragmentProcessors.count())
+ , fCoverageFragmentProcessors(that.fCoverageFragmentProcessors.count())
+ , fTrivial(that.fTrivial)
+ , fColor(that.fColor) {
+ for (int i = 0; i < that.fColorFragmentProcessors.count(); ++i) {
+ fColorFragmentProcessors.push_back(that.fColorFragmentProcessors[i]->clone());
+ SkASSERT(fColorFragmentProcessors[i]);
+ }
+ for (int i = 0; i < that.fCoverageFragmentProcessors.count(); ++i) {
+ fCoverageFragmentProcessors.push_back(that.fCoverageFragmentProcessors[i]->clone());
+ SkASSERT(fCoverageFragmentProcessors[i]);
+ }
+}
+
+void GrPaint::setPorterDuffXPFactory(SkBlendMode mode) {
+ this->setXPFactory(GrPorterDuffXPFactory::Get(mode));
+}
+
+void GrPaint::setCoverageSetOpXPFactory(SkRegion::Op regionOp, bool invertCoverage) {
+ this->setXPFactory(GrCoverageSetOpXPFactory::Get(regionOp, invertCoverage));
+}
+
+void GrPaint::addColorTextureProcessor(sk_sp<GrTextureProxy> proxy, GrColorType srcColorType,
+ const SkMatrix& matrix) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(std::move(proxy), srcColorType,
+ matrix));
+}
+
+void GrPaint::addColorTextureProcessor(sk_sp<GrTextureProxy> proxy, GrColorType srcColorType,
+ const SkMatrix& matrix, const GrSamplerState& samplerState) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(std::move(proxy), srcColorType,
+ matrix, samplerState));
+}
+
+bool GrPaint::isConstantBlendedColor(SkPMColor4f* constantColor) const {
+ // This used to do a more sophisticated analysis but now it just explicitly looks for common
+ // cases.
+ static const GrXPFactory* kSrc = GrPorterDuffXPFactory::Get(SkBlendMode::kSrc);
+ static const GrXPFactory* kClear = GrPorterDuffXPFactory::Get(SkBlendMode::kClear);
+ if (kClear == fXPFactory) {
+ *constantColor = SK_PMColor4fTRANSPARENT;
+ return true;
+ }
+ if (this->numColorFragmentProcessors()) {
+ return false;
+ }
+ if (kSrc == fXPFactory || (!fXPFactory && fColor.isOpaque())) {
+ *constantColor = fColor;
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPaint.h b/gfx/skia/skia/src/gpu/GrPaint.h
new file mode 100644
index 0000000000..c2520b0eaf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPaint.h
@@ -0,0 +1,136 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrPaint_DEFINED
+#define GrPaint_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "src/core/SkTLazy.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrFragmentProcessor.h"
+
+class GrTextureProxy;
+class GrXPFactory;
+
+/**
+ * The paint describes how color and coverage are computed at each pixel by GrContext draw
+ * functions and the how color is blended with the destination pixel.
+ *
+ * The paint allows installation of custom color and coverage stages. New types of stages are
+ * created by subclassing GrProcessor.
+ *
+ * The primitive color computation starts with the color specified by setColor(). This color is the
+ * input to the first color stage. Each color stage feeds its output to the next color stage.
+ *
+ * Fractional pixel coverage follows a similar flow. The GrGeometryProcessor (specified elsewhere)
+ * provides the initial coverage which is passed to the first coverage fragment processor, which
+ * feeds its output to next coverage fragment processor.
+ *
+ * setXPFactory is used to control blending between the output color and dest. It also implements
+ * the application of fractional coverage from the coverage pipeline.
+ */
+class GrPaint {
+public:
+ GrPaint() = default;
+ ~GrPaint() = default;
+
+ static GrPaint Clone(const GrPaint& src) { return GrPaint(src); }
+
+ /**
+ * The initial color of the drawn primitive. Defaults to solid white.
+ */
+ void setColor4f(const SkPMColor4f& color) { fColor = color; }
+ const SkPMColor4f& getColor4f() const { return fColor; }
+
+ void setXPFactory(const GrXPFactory* xpFactory) {
+ fXPFactory = xpFactory;
+ fTrivial &= !SkToBool(xpFactory);
+ }
+
+ void setPorterDuffXPFactory(SkBlendMode mode);
+
+ void setCoverageSetOpXPFactory(SkRegion::Op, bool invertCoverage = false);
+
+ /**
+ * Appends an additional color processor to the color computation.
+ */
+ void addColorFragmentProcessor(std::unique_ptr<GrFragmentProcessor> fp) {
+ SkASSERT(fp);
+ fColorFragmentProcessors.push_back(std::move(fp));
+ fTrivial = false;
+ }
+
+ /**
+ * Appends an additional coverage processor to the coverage computation.
+ */
+ void addCoverageFragmentProcessor(std::unique_ptr<GrFragmentProcessor> fp) {
+ SkASSERT(fp);
+ fCoverageFragmentProcessors.push_back(std::move(fp));
+ fTrivial = false;
+ }
+
+ /**
+ * Helpers for adding color or coverage effects that sample a texture. The matrix is applied
+ * to the src space position to compute texture coordinates.
+ */
+ void addColorTextureProcessor(sk_sp<GrTextureProxy>, GrColorType srcColorType, const SkMatrix&);
+ void addColorTextureProcessor(sk_sp<GrTextureProxy>, GrColorType srcColorType, const SkMatrix&,
+ const GrSamplerState&);
+
+ int numColorFragmentProcessors() const { return fColorFragmentProcessors.count(); }
+ int numCoverageFragmentProcessors() const { return fCoverageFragmentProcessors.count(); }
+ int numTotalFragmentProcessors() const { return this->numColorFragmentProcessors() +
+ this->numCoverageFragmentProcessors(); }
+
+ const GrXPFactory* getXPFactory() const { return fXPFactory; }
+
+ GrFragmentProcessor* getColorFragmentProcessor(int i) const {
+ return fColorFragmentProcessors[i].get();
+ }
+ GrFragmentProcessor* getCoverageFragmentProcessor(int i) const {
+ return fCoverageFragmentProcessors[i].get();
+ }
+
+ /**
+ * Returns true if the paint's output color will be constant after blending. If the result is
+ * true, constantColor will be updated to contain the constant color. Note that we can conflate
+ * coverage and color, so the actual values written to pixels with partial coverage may still
+ * not seem constant, even if this function returns true.
+ */
+ bool isConstantBlendedColor(SkPMColor4f* constantColor) const;
+
+ /**
+ * A trivial paint is one that uses src-over and has no fragment processors.
+ * It may have variable sRGB settings.
+ **/
+ bool isTrivial() const { return fTrivial; }
+
+ friend void assert_alive(GrPaint& p) {
+ SkASSERT(p.fAlive);
+ }
+
+private:
+ // Since paint copying is expensive if there are fragment processors, we require going through
+ // the Clone() method.
+ GrPaint(const GrPaint&);
+ GrPaint& operator=(const GrPaint&) = delete;
+
+ friend class GrProcessorSet;
+
+ const GrXPFactory* fXPFactory = nullptr;
+ SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fColorFragmentProcessors;
+ SkSTArray<2, std::unique_ptr<GrFragmentProcessor>> fCoverageFragmentProcessors;
+ bool fTrivial = true;
+ SkPMColor4f fColor = SK_PMColor4fWHITE;
+ SkDEBUGCODE(bool fAlive = true;) // Set false after moved from.
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPath.cpp b/gfx/skia/skia/src/gpu/GrPath.cpp
new file mode 100644
index 0000000000..625f8c4722
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPath.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrPath.h"
+#include "src/gpu/geometry/GrShape.h"
+
+static inline void write_style_key(uint32_t* key, const GrStyle& style) {
+ // Pass 1 for the scale since the GPU will apply the style not GrStyle::applyToPath().
+ GrStyle::WriteKey(key, style, GrStyle::Apply::kPathEffectAndStrokeRec, SK_Scalar1);
+}
+
+
+void GrPath::ComputeKey(const GrShape& shape, GrUniqueKey* key, bool* outIsVolatile) {
+ int geoCnt = shape.unstyledKeySize();
+ int styleCnt = GrStyle::KeySize(shape.style(), GrStyle::Apply::kPathEffectAndStrokeRec);
+ // This should only fail for an arbitrary path effect, and we should not have gotten
+ // here with anything other than a dash path effect.
+ SkASSERT(styleCnt >= 0);
+ if (geoCnt < 0) {
+ *outIsVolatile = true;
+ return;
+ }
+ static const GrUniqueKey::Domain kGeneralPathDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kGeneralPathDomain, geoCnt + styleCnt, "Path");
+ shape.writeUnstyledKey(&builder[0]);
+ if (styleCnt) {
+ write_style_key(&builder[geoCnt], shape.style());
+ }
+ *outIsVolatile = false;
+}
+
+#ifdef SK_DEBUG
+bool GrPath::isEqualTo(const SkPath& path, const GrStyle& style) const {
+ // Since this is only called in debug we don't care about performance.
+ int cnt0 = GrStyle::KeySize(fStyle, GrStyle::Apply::kPathEffectAndStrokeRec);
+ int cnt1 = GrStyle::KeySize(style, GrStyle::Apply::kPathEffectAndStrokeRec);
+ if (cnt0 < 0 || cnt1 < 0 || cnt0 != cnt1) {
+ return false;
+ }
+ if (cnt0) {
+ SkAutoTArray<uint32_t> key0(cnt0);
+ SkAutoTArray<uint32_t> key1(cnt0);
+ write_style_key(key0.get(), fStyle);
+ write_style_key(key1.get(), style);
+ if (0 != memcmp(key0.get(), key1.get(), cnt0)) {
+ return false;
+ }
+ }
+ return fSkPath == path;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPath.h b/gfx/skia/skia/src/gpu/GrPath.h
new file mode 100644
index 0000000000..6b43af5d10
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPath.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPath_DEFINED
+#define GrPath_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRect.h"
+#include "include/gpu/GrGpuResource.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrStyle.h"
+
+class GrShape;
+
+class GrPath : public GrGpuResource {
+public:
+ /**
+ * Initialize to a path with a fixed stroke. Stroke must not be hairline.
+ */
+ GrPath(GrGpu* gpu, const SkPath& skPath, const GrStyle& style)
+ : INHERITED(gpu)
+ , fBounds(SkRect::MakeEmpty())
+ , fFillType(GrPathRendering::kWinding_FillType)
+#ifdef SK_DEBUG
+ , fSkPath(skPath)
+ , fStyle(style)
+#endif
+ {
+ }
+
+ static void ComputeKey(const GrShape&, GrUniqueKey* key, bool* outIsVolatile);
+
+ const SkRect& getBounds() const { return fBounds; }
+
+ GrPathRendering::FillType getFillType() const { return fFillType; }
+#ifdef SK_DEBUG
+ bool isEqualTo(const SkPath& path, const GrStyle& style) const;
+#endif
+
+protected:
+ // Subclass should init these.
+ SkRect fBounds;
+ GrPathRendering::FillType fFillType;
+#ifdef SK_DEBUG
+ SkPath fSkPath;
+ GrStyle fStyle;
+#endif
+
+private:
+ const char* getResourceType() const override { return "Path Data"; }
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathProcessor.cpp b/gfx/skia/skia/src/gpu/GrPathProcessor.cpp
new file mode 100644
index 0000000000..861ff1bd3c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathProcessor.cpp
@@ -0,0 +1,141 @@
+/*
+* Copyright 2013 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/GrPathProcessor.h"
+
+#include "include/private/SkTo.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLVaryingHandler.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrGLPathProcessor : public GrGLSLPrimitiveProcessor {
+public:
+ GrGLPathProcessor() : fColor(SK_PMColor4fILLEGAL) {}
+
+ static void GenKey(const GrPathProcessor& pathProc,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ b->add32(SkToInt(pathProc.viewMatrix().hasPerspective()));
+ }
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrPathProcessor& pathProc = args.fGP.cast<GrPathProcessor>();
+
+ if (!pathProc.viewMatrix().hasPerspective()) {
+ args.fVaryingHandler->setNoPerspective();
+ }
+
+ // emit transforms
+ this->emitTransforms(args.fVaryingHandler, args.fFPCoordTransformHandler);
+
+ // Setup uniform color
+ const char* stagedLocalVarName;
+ fColorUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType,
+ "Color",
+ &stagedLocalVarName);
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, stagedLocalVarName);
+
+ // setup constant solid coverage
+ fragBuilder->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ }
+
+ SkString matrix_to_sksl(const SkMatrix& m) {
+ return SkStringPrintf("float3x3(%f, %f, %f, %f, %f, %f, %f, %f, %f)", m[0], m[1], m[2],
+ m[3], m[4], m[5], m[6], m[7], m[8]);
+ }
+
+ void emitTransforms(GrGLSLVaryingHandler* varyingHandler,
+ FPCoordTransformHandler* transformHandler) {
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = transformHandler->nextCoordTransform()) {
+ GrSLType varyingType =
+ coordTransform->getMatrix().hasPerspective() ? kHalf3_GrSLType
+ : kHalf2_GrSLType;
+
+ SkString strVaryingName;
+ strVaryingName.printf("TransformedCoord_%d", i);
+ GrGLSLVarying v(varyingType);
+ GrGLVaryingHandler* glVaryingHandler = (GrGLVaryingHandler*) varyingHandler;
+ fInstalledTransforms.push_back().fHandle =
+ glVaryingHandler->addPathProcessingVarying(strVaryingName.c_str(),
+ &v).toIndex();
+ fInstalledTransforms.back().fType = varyingType;
+
+ transformHandler->specifyCoordsForCurrCoordTransform(
+ matrix_to_sksl(coordTransform->getMatrix()),
+ UniformHandle(),
+ GrShaderVar(SkString(v.fsIn()),
+ varyingType));
+ ++i;
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pd,
+ const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>();
+ if (pathProc.color() != fColor) {
+ pd.set4fv(fColorUniform, 1, pathProc.color().vec());
+ fColor = pathProc.color();
+ }
+
+ int t = 0;
+ while (const GrCoordTransform* coordTransform = transformIter.next()) {
+ SkASSERT(fInstalledTransforms[t].fHandle.isValid());
+ const SkMatrix& m = GetTransformMatrix(pathProc.localMatrix(), *coordTransform);
+ if (fInstalledTransforms[t].fCurrentValue.cheapEqualTo(m)) {
+ continue;
+ }
+ fInstalledTransforms[t].fCurrentValue = m;
+
+ SkASSERT(fInstalledTransforms[t].fType == kHalf2_GrSLType ||
+ fInstalledTransforms[t].fType == kHalf3_GrSLType);
+ unsigned components = fInstalledTransforms[t].fType == kHalf2_GrSLType ? 2 : 3;
+ pd.setPathFragmentInputTransform(fInstalledTransforms[t].fHandle, components, m);
+ ++t;
+ }
+ }
+
+private:
+ typedef GrGLSLProgramDataManager::VaryingHandle VaryingHandle;
+ struct TransformVarying {
+ VaryingHandle fHandle;
+ SkMatrix fCurrentValue = SkMatrix::InvalidMatrix();
+ GrSLType fType = kVoid_GrSLType;
+ };
+
+ SkTArray<TransformVarying, true> fInstalledTransforms;
+
+ UniformHandle fColorUniform;
+ SkPMColor4f fColor;
+
+ typedef GrGLSLPrimitiveProcessor INHERITED;
+};
+
+GrPathProcessor::GrPathProcessor(const SkPMColor4f& color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix)
+ : INHERITED(kGrPathProcessor_ClassID)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix) {}
+
+void GrPathProcessor::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLPathProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrPathProcessor::createGLSLInstance(const GrShaderCaps& caps) const {
+ SkASSERT(caps.pathRenderingSupport());
+ return new GrGLPathProcessor();
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathProcessor.h b/gfx/skia/skia/src/gpu/GrPathProcessor.h
new file mode 100644
index 0000000000..aee919f8b2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathProcessor.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathProcessor_DEFINED
+#define GrPathProcessor_DEFINED
+
+#include "src/gpu/GrPrimitiveProcessor.h"
+
+/*
+ * The path equivalent of the GP. For now this just manages color. In the long term we plan on
+ * extending this class to handle all nvpr uniform / varying / program work.
+ */
+class GrPathProcessor : public GrPrimitiveProcessor {
+public:
+ static GrPathProcessor* Create(const SkPMColor4f& color,
+ const SkMatrix& viewMatrix = SkMatrix::I(),
+ const SkMatrix& localMatrix = SkMatrix::I()) {
+ return new GrPathProcessor(color, viewMatrix, localMatrix);
+ }
+
+ const char* name() const override { return "PathProcessor"; }
+
+ const SkPMColor4f& color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool willUseGeoShader() const override { return false; }
+
+ virtual void getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps& caps) const override;
+
+ virtual bool isPathRendering() const override { return true; }
+
+private:
+ GrPathProcessor(const SkPMColor4f&, const SkMatrix& viewMatrix, const SkMatrix& localMatrix);
+
+ SkPMColor4f fColor;
+ const SkMatrix fViewMatrix;
+ const SkMatrix fLocalMatrix;
+
+ typedef GrPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderer.cpp b/gfx/skia/skia/src/gpu/GrPathRenderer.cpp
new file mode 100644
index 0000000000..16d98db460
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderer.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDrawProcs.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/geometry/GrShape.h"
+
+#ifdef SK_DEBUG
+void GrPathRenderer::StencilPathArgs::validate() const {
+ SkASSERT(fContext);
+ SkASSERT(fRenderTargetContext);
+ SkASSERT(fClipConservativeBounds);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ SkASSERT(fShape->style().isSimpleFill());
+ SkPath path;
+ fShape->asPath(&path);
+ SkASSERT(!path.isInverseFillType());
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrPathRenderer::GrPathRenderer() {}
+
+GrPathRenderer::StencilSupport GrPathRenderer::getStencilSupport(const GrShape& shape) const {
+ SkDEBUGCODE(SkPath path;)
+ SkDEBUGCODE(shape.asPath(&path);)
+ SkASSERT(shape.style().isSimpleFill());
+ SkASSERT(!path.isInverseFillType());
+ return this->onGetStencilSupport(shape);
+}
+
+bool GrPathRenderer::drawPath(const DrawPathArgs& args) {
+#ifdef SK_DEBUG
+ args.validate();
+ CanDrawPathArgs canArgs;
+ canArgs.fCaps = args.fContext->priv().caps();
+ canArgs.fProxy = args.fRenderTargetContext->proxy();
+ canArgs.fClipConservativeBounds = args.fClipConservativeBounds;
+ canArgs.fViewMatrix = args.fViewMatrix;
+ canArgs.fShape = args.fShape;
+ canArgs.fAAType = args.fAAType;
+ canArgs.fTargetIsWrappedVkSecondaryCB = args.fRenderTargetContext->wrapsVkSecondaryCB();
+ canArgs.validate();
+
+ canArgs.fHasUserStencilSettings = !args.fUserStencilSettings->isUnused();
+ SkASSERT(CanDrawPath::kNo != this->canDrawPath(canArgs));
+ if (!args.fUserStencilSettings->isUnused()) {
+ SkPath path;
+ args.fShape->asPath(&path);
+ SkASSERT(args.fShape->style().isSimpleFill());
+ SkASSERT(kNoRestriction_StencilSupport == this->getStencilSupport(*args.fShape));
+ }
+#endif
+ return this->onDrawPath(args);
+}
+
+bool GrPathRenderer::IsStrokeHairlineOrEquivalent(const GrStyle& style, const SkMatrix& matrix,
+ SkScalar* outCoverage) {
+ if (style.pathEffect()) {
+ return false;
+ }
+ const SkStrokeRec& stroke = style.strokeRec();
+ if (stroke.isHairlineStyle()) {
+ if (outCoverage) {
+ *outCoverage = SK_Scalar1;
+ }
+ return true;
+ }
+ return stroke.getStyle() == SkStrokeRec::kStroke_Style &&
+ SkDrawTreatAAStrokeAsHairline(stroke.getWidth(), matrix, outCoverage);
+}
+
+
+void GrPathRenderer::GetPathDevBounds(const SkPath& path,
+ int devW, int devH,
+ const SkMatrix& matrix,
+ SkRect* bounds) {
+ if (path.isInverseFillType()) {
+ *bounds = SkRect::MakeWH(SkIntToScalar(devW), SkIntToScalar(devH));
+ return;
+ }
+ *bounds = path.getBounds();
+ matrix.mapRect(bounds);
+}
+
+void GrPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ static constexpr GrUserStencilSettings kIncrementStencil(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>()
+ );
+
+ GrPaint paint;
+ DrawPathArgs drawArgs{args.fContext,
+ std::move(paint),
+ &kIncrementStencil,
+ args.fRenderTargetContext,
+ nullptr, // clip
+ args.fClipConservativeBounds,
+ args.fViewMatrix,
+ args.fShape,
+ (GrAA::kYes == args.fDoStencilMSAA) ? GrAAType::kMSAA : GrAAType::kNone,
+ false};
+ this->drawPath(drawArgs);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderer.h b/gfx/skia/skia/src/gpu/GrPathRenderer.h
new file mode 100644
index 0000000000..a1d074154a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderer.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRenderer_DEFINED
+#define GrPathRenderer_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkTArray.h"
+
+class GrCaps;
+class GrClip;
+class GrFixedClip;
+class GrHardClip;
+class GrPaint;
+class GrRecordingContext;
+class GrRenderTargetContext;
+class GrRenderTargetProxy;
+class GrShape;
+class GrStyle;
+struct GrUserStencilSettings;
+struct SkIRect;
+class SkMatrix;
+class SkPath;
+
+/**
+ * Base class for drawing paths into a GrOpsTask.
+ */
+class GrPathRenderer : public SkRefCnt {
+public:
+ GrPathRenderer();
+
+ /**
+ * A caller may wish to use a path renderer to draw a path into the stencil buffer. However,
+ * the path renderer itself may require use of the stencil buffer. Also a path renderer may
+ * use a GrProcessor coverage stage that sets coverage to zero to eliminate pixels that are
+ * covered by bounding geometry but outside the path. These exterior pixels would still be
+ * rendered into the stencil.
+ *
+ * A GrPathRenderer can provide three levels of support for stenciling paths:
+ * 1) kNoRestriction: This is the most general. The caller passes a GrPaint and calls drawPath().
+ * The path is rendered exactly as the draw state indicates including support
+ * for simultaneous color and stenciling with arbitrary stenciling rules.
+ * Pixels partially covered by AA paths are affected by the stencil settings.
+ * 2) kStencilOnly: The path renderer cannot apply arbitrary stencil rules nor shade and stencil
+ * simultaneously. The path renderer does support the stencilPath() function
+ * which performs no color writes and writes a non-zero stencil value to pixels
+ * covered by the path.
+ * 3) kNoSupport: This path renderer cannot be used to stencil the path.
+ */
+ enum StencilSupport {
+ kNoSupport_StencilSupport,
+ kStencilOnly_StencilSupport,
+ kNoRestriction_StencilSupport,
+ };
+
+ /**
+ * This function is to get the stencil support for a particular path. The path's fill must
+ * not be an inverse type. The path will always be filled and not stroked.
+ *
+ * @param shape the shape that will be drawn. Must be simple fill styled and non-inverse
+ * filled.
+ */
+ StencilSupport getStencilSupport(const GrShape& shape) const;
+
+ enum class CanDrawPath {
+ kNo,
+ kAsBackup, // i.e. This renderer is better than SW fallback if no others can draw the path.
+ kYes
+ };
+
+ struct CanDrawPathArgs {
+ SkDEBUGCODE(CanDrawPathArgs() { memset(this, 0, sizeof(*this)); }) // For validation.
+
+ const GrCaps* fCaps;
+ const GrRenderTargetProxy* fProxy;
+ const SkIRect* fClipConservativeBounds;
+ const SkMatrix* fViewMatrix;
+ const GrShape* fShape;
+ GrAAType fAAType;
+ bool fTargetIsWrappedVkSecondaryCB;
+
+ // This is only used by GrStencilAndCoverPathRenderer
+ bool fHasUserStencilSettings;
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fCaps);
+ SkASSERT(fProxy);
+ SkASSERT(fClipConservativeBounds);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ }
+#endif
+ };
+
+ /**
+ * Returns how well this path renderer is able to render the given path. Returning kNo or
+ * kAsBackup allows the caller to keep searching for a better path renderer. This function is
+ * called when searching for the best path renderer to draw a path.
+ */
+ CanDrawPath canDrawPath(const CanDrawPathArgs& args) const {
+ SkDEBUGCODE(args.validate();)
+ return this->onCanDrawPath(args);
+ }
+
+ struct DrawPathArgs {
+ GrRecordingContext* fContext;
+ GrPaint&& fPaint;
+ const GrUserStencilSettings* fUserStencilSettings;
+ GrRenderTargetContext* fRenderTargetContext;
+ const GrClip* fClip;
+ const SkIRect* fClipConservativeBounds;
+ const SkMatrix* fViewMatrix;
+ const GrShape* fShape;
+ GrAAType fAAType;
+ bool fGammaCorrect;
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fContext);
+ SkASSERT(fUserStencilSettings);
+ SkASSERT(fRenderTargetContext);
+ SkASSERT(fClip);
+ SkASSERT(fClipConservativeBounds);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ }
+#endif
+ };
+
+ /**
+ * Draws the path into the draw target. If getStencilSupport() would return kNoRestriction then
+ * the subclass must respect the stencil settings.
+ */
+ bool drawPath(const DrawPathArgs& args);
+ /**
+ * Args to stencilPath(). fAAType cannot be kCoverage.
+ */
+ struct StencilPathArgs {
+ SkDEBUGCODE(StencilPathArgs() { memset(this, 0, sizeof(*this)); }) // For validation.
+
+ GrRecordingContext* fContext;
+ GrRenderTargetContext* fRenderTargetContext;
+ const GrHardClip* fClip;
+ const SkIRect* fClipConservativeBounds;
+ const SkMatrix* fViewMatrix;
+ const GrShape* fShape;
+ GrAA fDoStencilMSAA;
+
+ SkDEBUGCODE(void validate() const);
+ };
+
+ /**
+ * Draws the path to the stencil buffer. Assume the writable stencil bits are already
+ * initialized to zero. The pixels inside the path will have non-zero stencil values afterwards.
+ */
+ void stencilPath(const StencilPathArgs& args) {
+ SkDEBUGCODE(args.validate();)
+ SkASSERT(kNoSupport_StencilSupport != this->getStencilSupport(*args.fShape));
+ this->onStencilPath(args);
+ }
+
+ // Helper for determining if we can treat a thin stroke as a hairline w/ coverage.
+ // If we can, we draw lots faster (raster device does this same test).
+ static bool IsStrokeHairlineOrEquivalent(const GrStyle&, const SkMatrix&,
+ SkScalar* outCoverage);
+
+protected:
+ // Helper for getting the device bounds of a path. Inverse filled paths will have bounds set
+ // by devSize. Non-inverse path bounds will not necessarily be clipped to devSize.
+ static void GetPathDevBounds(const SkPath& path,
+ int devW,
+ int devH,
+ const SkMatrix& matrix,
+ SkRect* bounds);
+
+private:
+ /**
+ * Subclass overrides if it has any limitations of stenciling support.
+ */
+ virtual StencilSupport onGetStencilSupport(const GrShape&) const {
+ return kNoRestriction_StencilSupport;
+ }
+
+ /**
+ * Subclass implementation of drawPath()
+ */
+ virtual bool onDrawPath(const DrawPathArgs& args) = 0;
+
+ /**
+ * Subclass implementation of canDrawPath()
+ */
+ virtual CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const = 0;
+
+ /**
+ * Subclass implementation of stencilPath(). Subclass must override iff it ever returns
+ * kStencilOnly in onGetStencilSupport().
+ */
+ virtual void onStencilPath(const StencilPathArgs&);
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp b/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp
new file mode 100644
index 0000000000..e21d1bc148
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrPathRendererChain.h"
+
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include "src/gpu/ops/GrAAConvexPathRenderer.h"
+#include "src/gpu/ops/GrAAHairLinePathRenderer.h"
+#include "src/gpu/ops/GrAALinearizingConvexPathRenderer.h"
+#include "src/gpu/ops/GrDashLinePathRenderer.h"
+#include "src/gpu/ops/GrDefaultPathRenderer.h"
+#include "src/gpu/ops/GrSmallPathRenderer.h"
+#include "src/gpu/ops/GrStencilAndCoverPathRenderer.h"
+#include "src/gpu/ops/GrTessellatingPathRenderer.h"
+
+GrPathRendererChain::GrPathRendererChain(GrRecordingContext* context, const Options& options) {
+ const GrCaps& caps = *context->priv().caps();
+ if (options.fGpuPathRenderers & GpuPathRenderers::kDashLine) {
+ fChain.push_back(sk_make_sp<GrDashLinePathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAAConvex) {
+ fChain.push_back(sk_make_sp<GrAAConvexPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
+ using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
+ if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
+ caps, AllowCaching(options.fAllowPathMaskCaching),
+ context->priv().contextID())) {
+ fCoverageCountingPathRenderer = ccpr.get();
+ context->priv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
+ fChain.push_back(std::move(ccpr));
+ }
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAAHairline) {
+ fChain.push_back(sk_make_sp<GrAAHairLinePathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAALinearizing) {
+ fChain.push_back(sk_make_sp<GrAALinearizingConvexPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kSmall) {
+ auto spr = sk_make_sp<GrSmallPathRenderer>();
+ context->priv().addOnFlushCallbackObject(spr.get());
+ fChain.push_back(std::move(spr));
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kStencilAndCover) {
+ auto direct = context->priv().asDirectContext();
+ if (direct) {
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ sk_sp<GrPathRenderer> pr(
+ GrStencilAndCoverPathRenderer::Create(resourceProvider, caps));
+ if (pr) {
+ fChain.push_back(std::move(pr));
+ }
+ }
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kTessellating) {
+ fChain.push_back(sk_make_sp<GrTessellatingPathRenderer>());
+ }
+
+ // We always include the default path renderer (as well as SW), so we can draw any path
+ fChain.push_back(sk_make_sp<GrDefaultPathRenderer>());
+}
+
+GrPathRenderer* GrPathRendererChain::getPathRenderer(
+ const GrPathRenderer::CanDrawPathArgs& args,
+ DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport) {
+ GR_STATIC_ASSERT(GrPathRenderer::kNoSupport_StencilSupport <
+ GrPathRenderer::kStencilOnly_StencilSupport);
+ GR_STATIC_ASSERT(GrPathRenderer::kStencilOnly_StencilSupport <
+ GrPathRenderer::kNoRestriction_StencilSupport);
+ GrPathRenderer::StencilSupport minStencilSupport;
+ if (DrawType::kStencil == drawType) {
+ minStencilSupport = GrPathRenderer::kStencilOnly_StencilSupport;
+ } else if (DrawType::kStencilAndColor == drawType) {
+ minStencilSupport = GrPathRenderer::kNoRestriction_StencilSupport;
+ } else {
+ minStencilSupport = GrPathRenderer::kNoSupport_StencilSupport;
+ }
+ if (minStencilSupport != GrPathRenderer::kNoSupport_StencilSupport) {
+ // We don't support (and shouldn't need) stenciling of non-fill paths.
+ if (!args.fShape->style().isSimpleFill()) {
+ return nullptr;
+ }
+ }
+
+ GrPathRenderer* bestPathRenderer = nullptr;
+ for (const sk_sp<GrPathRenderer>& pr : fChain) {
+ GrPathRenderer::StencilSupport support = GrPathRenderer::kNoSupport_StencilSupport;
+ if (GrPathRenderer::kNoSupport_StencilSupport != minStencilSupport) {
+ support = pr->getStencilSupport(*args.fShape);
+ if (support < minStencilSupport) {
+ continue;
+ }
+ }
+ GrPathRenderer::CanDrawPath canDrawPath = pr->canDrawPath(args);
+ if (GrPathRenderer::CanDrawPath::kNo == canDrawPath) {
+ continue;
+ }
+ if (GrPathRenderer::CanDrawPath::kAsBackup == canDrawPath && bestPathRenderer) {
+ continue;
+ }
+ if (stencilSupport) {
+ *stencilSupport = support;
+ }
+ bestPathRenderer = pr.get();
+ if (GrPathRenderer::CanDrawPath::kYes == canDrawPath) {
+ break;
+ }
+ }
+ return bestPathRenderer;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRendererChain.h b/gfx/skia/skia/src/gpu/GrPathRendererChain.h
new file mode 100644
index 0000000000..3bd955c726
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendererChain.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRendererChain_DEFINED
+#define GrPathRendererChain_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTArray.h"
+
+class GrContext;
+class GrCoverageCountingPathRenderer;
+
+/**
+ * Keeps track of an ordered list of path renderers. When a path needs to be
+ * drawn this list is scanned to find the most preferred renderer. To add your
+ * path renderer to the list implement the GrPathRenderer::AddPathRenderers
+ * function.
+ */
+class GrPathRendererChain : public SkNoncopyable {
+public:
+ struct Options {
+ bool fAllowPathMaskCaching = false;
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kAll;
+ };
+ GrPathRendererChain(GrRecordingContext* context, const Options&);
+
+ /** Documents how the caller plans to use a GrPathRenderer to draw a path. It affects the PR
+ returned by getPathRenderer */
+ enum class DrawType {
+ kColor, // draw to the color buffer, no AA
+ kStencil, // draw just to the stencil buffer
+ kStencilAndColor, // draw the stencil and color buffer, no AA
+ };
+
+ /** Returns a GrPathRenderer compatible with the request if one is available. If the caller
+ is drawing the path to the stencil buffer then stencilSupport can be used to determine
+ whether the path can be rendered with arbitrary stencil rules or not. See comments on
+ StencilSupport in GrPathRenderer.h. */
+ GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport);
+
+ /** Returns a direct pointer to the coverage counting path renderer, or null if it is not in the
+ chain. */
+ GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer() {
+ return fCoverageCountingPathRenderer;
+ }
+
+private:
+ enum {
+ kPreAllocCount = 8,
+ };
+ SkSTArray<kPreAllocCount, sk_sp<GrPathRenderer>> fChain;
+ GrCoverageCountingPathRenderer* fCoverageCountingPathRenderer = nullptr;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRendering.cpp b/gfx/skia/skia/src/gpu/GrPathRendering.cpp
new file mode 100644
index 0000000000..5e44531d2e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendering.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkScalerContext.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRenderTarget.h"
+
+const GrUserStencilSettings& GrPathRendering::GetStencilPassSettings(FillType fill) {
+ switch (fill) {
+ default:
+ SK_ABORT("Unexpected path fill.");
+ case GrPathRendering::kWinding_FillType: {
+ constexpr static GrUserStencilSettings kWindingStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kIncWrap,
+ GrUserStencilOp::kIncWrap,
+ 0xffff>()
+ );
+ return kWindingStencilPass;
+ }
+ case GrPathRendering::kEvenOdd_FillType: {
+ constexpr static GrUserStencilSettings kEvenOddStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kInvert,
+ 0xffff>()
+ );
+ return kEvenOddStencilPass;
+ }
+ }
+}
+
+void GrPathRendering::stencilPath(const StencilPathArgs& args, const GrPath* path) {
+ fGpu->handleDirtyContext();
+ this->onStencilPath(args, path);
+}
+
+void GrPathRendering::drawPath(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ // Cover pass settings in pipeline.
+ const GrStencilSettings& stencilPassSettings,
+ const GrPath* path) {
+ fGpu->handleDirtyContext();
+ if (auto barrierType = programInfo.pipeline().xferBarrierType(renderTarget->asTexture(),
+ *fGpu->caps())) {
+ fGpu->xferBarrier(renderTarget, barrierType);
+ }
+ this->onDrawPath(renderTarget, programInfo, stencilPassSettings, path);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRendering.h b/gfx/skia/skia/src/gpu/GrPathRendering.h
new file mode 100644
index 0000000000..3e143e4d0f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendering.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRendering_DEFINED
+#define GrPathRendering_DEFINED
+
+#include "include/core/SkPath.h"
+
+class GrGpu;
+class GrPath;
+class GrProgramInfo;
+class GrRenderTarget;
+class GrRenderTargetProxy;
+class GrScissorState;
+class GrStencilSettings;
+class GrStyle;
+struct GrUserStencilSettings;
+struct SkScalerContextEffects;
+class SkDescriptor;
+class SkTypeface;
+
+/**
+ * Abstract class wrapping HW path rendering API.
+ *
+ * The subclasses of this class use the possible HW API to render paths (as opposed to path
+ * rendering implemented in Skia on top of a "3d" HW API).
+ * The subclasses hold the global state needed to render paths, including shadow of the global HW
+ * API state. Similar to GrGpu.
+ *
+ * It is expected that the lifetimes of GrGpuXX and GrXXPathRendering are the same. The call context
+ * interface (eg. * the concrete instance of GrGpu subclass) should be provided to the instance
+ * during construction.
+ */
+class GrPathRendering {
+public:
+ virtual ~GrPathRendering() { }
+
+ enum PathTransformType {
+ kNone_PathTransformType, //!< []
+ kTranslateX_PathTransformType, //!< [kMTransX]
+ kTranslateY_PathTransformType, //!< [kMTransY]
+ kTranslate_PathTransformType, //!< [kMTransX, kMTransY]
+ kAffine_PathTransformType, //!< [kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY]
+
+ kLast_PathTransformType = kAffine_PathTransformType
+ };
+
+ static inline int PathTransformSize(PathTransformType type) {
+ switch (type) {
+ case kNone_PathTransformType:
+ return 0;
+ case kTranslateX_PathTransformType:
+ case kTranslateY_PathTransformType:
+ return 1;
+ case kTranslate_PathTransformType:
+ return 2;
+ case kAffine_PathTransformType:
+ return 6;
+
+ default:
+ SK_ABORT("Unknown path transform type");
+ }
+ }
+
+ // No native support for inverse at this time
+ enum FillType {
+ /** Specifies that "inside" is computed by a non-zero sum of signed
+ edge crossings
+ */
+ kWinding_FillType,
+ /** Specifies that "inside" is computed by an odd number of edge
+ crossings
+ */
+ kEvenOdd_FillType,
+ };
+
+ static const GrUserStencilSettings& GetStencilPassSettings(FillType);
+
+ /**
+ * Creates a new gpu path, based on the specified path and stroke and returns it.
+ *
+ * @param SkPath the geometry.
+ * @param GrStyle the style applied to the path. Styles with non-dash path effects are not
+ * allowed.
+ * @return a new GPU path object.
+ */
+ virtual sk_sp<GrPath> createPath(const SkPath&, const GrStyle&) = 0;
+
+ /** None of these params are optional, pointers used just to avoid making copies. */
+ struct StencilPathArgs {
+ StencilPathArgs(bool useHWAA,
+ GrRenderTargetProxy* proxy,
+ const SkMatrix* viewMatrix,
+ const GrScissorState* scissor,
+ const GrStencilSettings* stencil)
+ : fUseHWAA(useHWAA)
+ , fProxy(proxy)
+ , fViewMatrix(viewMatrix)
+ , fScissor(scissor)
+ , fStencil(stencil) {
+ }
+ bool fUseHWAA;
+ GrRenderTargetProxy* fProxy;
+ const SkMatrix* fViewMatrix;
+ const GrScissorState* fScissor;
+ const GrStencilSettings* fStencil;
+ };
+
+ void stencilPath(const StencilPathArgs& args, const GrPath* path);
+
+ void drawPath(GrRenderTarget*,
+ const GrProgramInfo&,
+ const GrStencilSettings& stencilPassSettings, // Cover pass settings in pipeline.
+ const GrPath* path);
+
+protected:
+ GrPathRendering(GrGpu* gpu) : fGpu(gpu) { }
+
+ virtual void onStencilPath(const StencilPathArgs&, const GrPath*) = 0;
+ virtual void onDrawPath(GrRenderTarget*,
+ const GrProgramInfo&,
+ const GrStencilSettings&,
+ const GrPath*) = 0;
+
+ GrGpu* fGpu;
+private:
+ GrPathRendering& operator=(const GrPathRendering&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRendering_none.cpp b/gfx/skia/skia/src/gpu/GrPathRendering_none.cpp
new file mode 100644
index 0000000000..3c2878d083
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendering_none.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrPath.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+#include "src/gpu/ops/GrStencilAndCoverPathRenderer.h"
+#include "src/gpu/ops/GrStencilPathOp.h"
+
+class GrRecordingContext;
+
+GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrResourceProvider* resourceProvider,
+ const GrCaps& caps) {
+ return nullptr;
+}
+
+GrGLPathRendering::GrGLPathRendering(GrGLGpu* gpu)
+ : GrPathRendering(gpu)
+ , fPreallocatedPathCount(0) {}
+
+GrGLPathRendering::~GrGLPathRendering() {}
+
+void GrGLPathRendering::disconnect(GrGpu::DisconnectType) {}
+
+void GrGLPathRendering::resetContext() {}
+
+void GrGLPathRendering::setProgramPathFragmentInputTransform(GrGLuint, GrGLint,
+ GrGLenum, GrGLint,
+ const SkMatrix&) {}
+
+void GrGLPathRendering::setProjectionMatrix(const SkMatrix&, const SkISize&, GrSurfaceOrigin) {}
+
+sk_sp<GrPath> GrGLPathRendering::createPath(const SkPath&, const GrStyle&) { return nullptr; }
+
+void GrGLPathRendering::onDrawPath(GrRenderTarget*,
+ const GrProgramInfo&,
+ const GrStencilSettings&,
+ const GrPath*) {}
+
+void GrGLPathRendering::onStencilPath(const StencilPathArgs&, const GrPath*) {}
+
+std::unique_ptr<GrOp> GrStencilPathOp::Make(GrRecordingContext*,
+ const SkMatrix&,
+ bool,
+ bool,
+ const GrScissorState&,
+ sk_sp<const GrPath>) { return nullptr; }
+
+void GrPath::ComputeKey(const GrShape&, GrUniqueKey*, bool*) {}
diff --git a/gfx/skia/skia/src/gpu/GrPersistentCacheUtils.h b/gfx/skia/skia/src/gpu/GrPersistentCacheUtils.h
new file mode 100644
index 0000000000..b460184616
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPersistentCacheUtils.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPersistentCacheEntry_DEFINED
+#define GrPersistentCacheEntry_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/core/SkReader32.h"
+#include "src/core/SkWriter32.h"
+#include "src/sksl/SkSLString.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+// The GrPersistentCache stores opaque blobs, as far as clients are concerned. It's helpful to
+// inspect certain kinds of cached data within our tools, so for those cases (GLSL, SPIR-V), we
+// put the serialization logic here, to be shared by the backend code and the tool code.
+namespace GrPersistentCacheUtils {
+
+struct ShaderMetadata {
+ SkSL::Program::Settings* fSettings = nullptr;
+ SkTArray<SkSL::String> fAttributeNames;
+ bool fHasCustomColorOutput = false;
+ bool fHasSecondaryColorOutput = false;
+};
+
+static inline sk_sp<SkData> PackCachedShaders(SkFourByteTag shaderType,
+ const SkSL::String shaders[],
+ const SkSL::Program::Inputs inputs[],
+ int numInputs,
+ const ShaderMetadata* meta = nullptr) {
+ // For consistency (so tools can blindly pack and unpack cached shaders), we always write
+ // kGrShaderTypeCount inputs. If the backend gives us fewer, we just replicate the last one.
+ SkASSERT(numInputs >= 1 && numInputs <= kGrShaderTypeCount);
+
+ SkWriter32 writer;
+ writer.write32(shaderType);
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ writer.writeString(shaders[i].c_str(), shaders[i].size());
+ writer.writePad(&inputs[SkTMin(i, numInputs - 1)], sizeof(SkSL::Program::Inputs));
+ }
+ writer.writeBool(SkToBool(meta));
+ if (meta) {
+ writer.writeBool(SkToBool(meta->fSettings));
+ if (meta->fSettings) {
+ writer.writeBool(meta->fSettings->fFlipY);
+ writer.writeBool(meta->fSettings->fFragColorIsInOut);
+ writer.writeBool(meta->fSettings->fForceHighPrecision);
+ }
+
+ writer.writeInt(meta->fAttributeNames.count());
+ for (const auto& attr : meta->fAttributeNames) {
+ writer.writeString(attr.c_str(), attr.size());
+ }
+
+ writer.writeBool(meta->fHasCustomColorOutput);
+ writer.writeBool(meta->fHasSecondaryColorOutput);
+ }
+ return writer.snapshotAsData();
+}
+
+static inline void UnpackCachedShaders(SkReader32* reader,
+ SkSL::String shaders[],
+ SkSL::Program::Inputs inputs[],
+ int numInputs,
+ ShaderMetadata* meta = nullptr) {
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ size_t stringLen = 0;
+ const char* string = reader->readString(&stringLen);
+ shaders[i] = SkSL::String(string, stringLen);
+
+ // GL, for example, only wants one set of Inputs
+ if (i < numInputs) {
+ reader->read(&inputs[i], sizeof(inputs[i]));
+ } else {
+ reader->skip(sizeof(SkSL::Program::Inputs));
+ }
+ }
+ if (reader->readBool() && meta) {
+ SkASSERT(meta->fSettings != nullptr);
+
+ if (reader->readBool()) {
+ meta->fSettings->fFlipY = reader->readBool();
+ meta->fSettings->fFragColorIsInOut = reader->readBool();
+ meta->fSettings->fForceHighPrecision = reader->readBool();
+ }
+
+ meta->fAttributeNames.resize(reader->readInt());
+ for (int i = 0; i < meta->fAttributeNames.count(); ++i) {
+ size_t stringLen = 0;
+ const char* string = reader->readString(&stringLen);
+ meta->fAttributeNames[i] = SkSL::String(string, stringLen);
+ }
+
+ meta->fHasCustomColorOutput = reader->readBool();
+ meta->fHasSecondaryColorOutput = reader->readBool();
+ }
+}
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPipeline.cpp b/gfx/skia/skia/src/gpu/GrPipeline.cpp
new file mode 100644
index 0000000000..3e5e18d68c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipeline.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrPipeline.h"
+
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrXferProcessor.h"
+
+#include "src/gpu/ops/GrOp.h"
+
+GrPipeline::GrPipeline(const InitArgs& args,
+ GrProcessorSet&& processors,
+ GrAppliedClip&& appliedClip)
+ : fOutputSwizzle(args.fOutputSwizzle) {
+ SkASSERT(processors.isFinalized());
+
+ fFlags = (Flags)args.fInputFlags;
+ if (appliedClip.hasStencilClip()) {
+ fFlags |= Flags::kHasStencilClip;
+ }
+ if (appliedClip.scissorState().enabled()) {
+ fFlags |= Flags::kScissorEnabled;
+ }
+
+ fWindowRectsState = appliedClip.windowRectsState();
+ if (!args.fUserStencil->isDisabled(fFlags & Flags::kHasStencilClip)) {
+ fFlags |= Flags::kStencilEnabled;
+ }
+
+ fUserStencilSettings = args.fUserStencil;
+
+ fXferProcessor = processors.refXferProcessor();
+
+ if (args.fDstProxy.proxy()) {
+ SkASSERT(args.fDstProxy.proxy()->isInstantiated());
+
+ fDstTextureProxy = args.fDstProxy.refProxy();
+ fDstTextureOffset = args.fDstProxy.offset();
+ }
+
+ // Copy GrFragmentProcessors from GrProcessorSet to Pipeline
+ fNumColorProcessors = processors.numColorFragmentProcessors();
+ int numTotalProcessors = fNumColorProcessors +
+ processors.numCoverageFragmentProcessors() +
+ appliedClip.numClipCoverageFragmentProcessors();
+ fFragmentProcessors.reset(numTotalProcessors);
+
+ int currFPIdx = 0;
+ for (int i = 0; i < processors.numColorFragmentProcessors(); ++i, ++currFPIdx) {
+ fFragmentProcessors[currFPIdx] = processors.detachColorFragmentProcessor(i);
+ }
+ for (int i = 0; i < processors.numCoverageFragmentProcessors(); ++i, ++currFPIdx) {
+ fFragmentProcessors[currFPIdx] = processors.detachCoverageFragmentProcessor(i);
+ }
+ for (int i = 0; i < appliedClip.numClipCoverageFragmentProcessors(); ++i, ++currFPIdx) {
+ fFragmentProcessors[currFPIdx] = appliedClip.detachClipCoverageFragmentProcessor(i);
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 0; i < numTotalProcessors; ++i) {
+ if (!fFragmentProcessors[i]->isInstantiated()) {
+ this->markAsBad();
+ break;
+ }
+ }
+#endif
+}
+
+GrXferBarrierType GrPipeline::xferBarrierType(GrTexture* texture, const GrCaps& caps) const {
+ if (fDstTextureProxy && fDstTextureProxy->peekTexture() == texture) {
+ return kTexture_GrXferBarrierType;
+ }
+ return this->getXferProcessor().xferBarrierType(caps);
+}
+
+GrPipeline::GrPipeline(GrScissorTest scissorTest, sk_sp<const GrXferProcessor> xp,
+ const GrSwizzle& outputSwizzle, InputFlags inputFlags,
+ const GrUserStencilSettings* userStencil)
+ : fWindowRectsState()
+ , fUserStencilSettings(userStencil)
+ , fFlags((Flags)inputFlags)
+ , fXferProcessor(std::move(xp))
+ , fFragmentProcessors()
+ , fNumColorProcessors(0)
+ , fOutputSwizzle(outputSwizzle) {
+ if (GrScissorTest::kEnabled == scissorTest) {
+ fFlags |= Flags::kScissorEnabled;
+ }
+ if (!userStencil->isDisabled(false)) {
+ fFlags |= Flags::kStencilEnabled;
+ }
+}
+
+uint32_t GrPipeline::getBlendInfoKey() const {
+ const GrXferProcessor::BlendInfo& blendInfo = this->getXferProcessor().getBlendInfo();
+
+ static const uint32_t kBlendWriteShift = 1;
+ static const uint32_t kBlendCoeffShift = 5;
+ GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift));
+ GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4);
+
+ uint32_t key = blendInfo.fWriteColor;
+ key |= (blendInfo.fSrcBlend << kBlendWriteShift);
+ key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift));
+ key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift));
+
+ return key;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPipeline.h b/gfx/skia/skia/src/gpu/GrPipeline.h
new file mode 100644
index 0000000000..b57e66d200
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipeline.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPipeline_DEFINED
+#define GrPipeline_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrProcessorSet.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/GrScissorState.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/GrWindowRectsState.h"
+#include "src/gpu/effects/GrCoverageSetOpXP.h"
+#include "src/gpu/effects/GrDisableColorXP.h"
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#include "src/gpu/geometry/GrRect.h"
+
+class GrAppliedClip;
+class GrOp;
+class GrRenderTargetContext;
+
+/**
+ * This immutable object contains information needed to set build a shader program and set API
+ * state for a draw. It is used along with a GrPrimitiveProcessor and a source of geometric
+ * data (GrMesh or GrPath) to draw.
+ */
+class GrPipeline {
+public:
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Creation
+
+ // Pipeline options that the caller may enable.
+ // NOTE: This enum is extended later by GrPipeline::Flags.
+ enum class InputFlags : uint8_t {
+ kNone = 0,
+ /**
+ * Perform HW anti-aliasing. This means either HW FSAA, if supported by the render target,
+ * or smooth-line rendering if a line primitive is drawn and line smoothing is supported by
+ * the 3D API.
+ */
+ kHWAntialias = (1 << 0),
+ /**
+ * Modifies the vertex shader so that vertices will be positioned at pixel centers.
+ */
+ kSnapVerticesToPixelCenters = (1 << 1), // This value must be last. (See kLastInputFlag.)
+ };
+
+ struct InitArgs {
+ InputFlags fInputFlags = InputFlags::kNone;
+ const GrUserStencilSettings* fUserStencil = &GrUserStencilSettings::kUnused;
+ const GrCaps* fCaps = nullptr;
+ GrXferProcessor::DstProxy fDstProxy;
+ GrSwizzle fOutputSwizzle;
+ };
+
+ /**
+ * Some state can be changed between GrMeshes without changing GrPipelines. This is generally
+ * less expensive then using multiple pipelines. Such state is called "dynamic state". It can
+ * be specified in two ways:
+ * 1) FixedDynamicState - use this to specify state that does not vary between GrMeshes.
+ * 2) DynamicStateArrays - use this to specify per mesh values for dynamic state.
+ **/
+ struct FixedDynamicState {
+ explicit FixedDynamicState(const SkIRect& scissorRect) : fScissorRect(scissorRect) {}
+ FixedDynamicState() = default;
+ SkIRect fScissorRect = SkIRect::EmptyIRect();
+ // Must have GrPrimitiveProcessor::numTextureSamplers() entries. Can be null if no samplers
+ // or textures are passed using DynamicStateArrays.
+ GrTextureProxy** fPrimitiveProcessorTextures = nullptr;
+ };
+
+ /**
+ * Any non-null array overrides the FixedDynamicState on a mesh-by-mesh basis. Arrays must
+ * have one entry for each GrMesh.
+ */
+ struct DynamicStateArrays {
+ const SkIRect* fScissorRects = nullptr;
+ // Must have GrPrimitiveProcessor::numTextureSamplers() * num_meshes entries.
+ // Can be null if no samplers or to use the same textures for all meshes via'
+ // FixedDynamicState.
+ GrTextureProxy** fPrimitiveProcessorTextures = nullptr;
+ };
+
+ /**
+ * Creates a simple pipeline with default settings and no processors. The provided blend mode
+ * must be "Porter Duff" (<= kLastCoeffMode). If using GrScissorTest::kEnabled, the caller must
+ * specify a scissor rectangle through the DynamicState struct.
+ **/
+ GrPipeline(GrScissorTest scissor, SkBlendMode blend, const GrSwizzle& outputSwizzle,
+ InputFlags flags = InputFlags::kNone,
+ const GrUserStencilSettings* stencil = &GrUserStencilSettings::kUnused)
+ : GrPipeline(scissor, GrPorterDuffXPFactory::MakeNoCoverageXP(blend), outputSwizzle,
+ flags, stencil) {
+ }
+
+ GrPipeline(GrScissorTest, sk_sp<const GrXferProcessor>, const GrSwizzle& outputSwizzle,
+ InputFlags = InputFlags::kNone,
+ const GrUserStencilSettings* = &GrUserStencilSettings::kUnused);
+
+ GrPipeline(const InitArgs&, GrProcessorSet&&, GrAppliedClip&&);
+
+ GrPipeline(const GrPipeline&) = delete;
+ GrPipeline& operator=(const GrPipeline&) = delete;
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name GrFragmentProcessors
+
+ int numColorFragmentProcessors() const { return fNumColorProcessors; }
+ int numCoverageFragmentProcessors() const {
+ return fFragmentProcessors.count() - fNumColorProcessors;
+ }
+ int numFragmentProcessors() const { return fFragmentProcessors.count(); }
+
+ const GrXferProcessor& getXferProcessor() const {
+ if (fXferProcessor) {
+ return *fXferProcessor.get();
+ } else {
+ // A null xp member means the common src-over case. GrXferProcessor's ref'ing
+ // mechanism is not thread safe so we do not hold a ref on this global.
+ return GrPorterDuffXPFactory::SimpleSrcOverXP();
+ }
+ }
+
+ /**
+ * If the GrXferProcessor uses a texture to access the dst color, then this returns that
+ * texture and the offset to the dst contents within that texture.
+ */
+ GrTextureProxy* dstTextureProxy(SkIPoint* offset = nullptr) const {
+ if (offset) {
+ *offset = fDstTextureOffset;
+ }
+
+ return fDstTextureProxy.get();
+ }
+
+ GrTexture* peekDstTexture(SkIPoint* offset = nullptr) const {
+ if (GrTextureProxy* dstProxy = this->dstTextureProxy(offset)) {
+ return dstProxy->peekTexture();
+ }
+
+ return nullptr;
+ }
+
+ const GrFragmentProcessor& getColorFragmentProcessor(int idx) const {
+ SkASSERT(idx < this->numColorFragmentProcessors());
+ return *fFragmentProcessors[idx].get();
+ }
+
+ const GrFragmentProcessor& getCoverageFragmentProcessor(int idx) const {
+ SkASSERT(idx < this->numCoverageFragmentProcessors());
+ return *fFragmentProcessors[fNumColorProcessors + idx].get();
+ }
+
+ const GrFragmentProcessor& getFragmentProcessor(int idx) const {
+ return *fFragmentProcessors[idx].get();
+ }
+
+ /// @}
+
+ const GrUserStencilSettings* getUserStencil() const { return fUserStencilSettings; }
+
+ bool isScissorEnabled() const {
+ return SkToBool(fFlags & Flags::kScissorEnabled);
+ }
+
+ const GrWindowRectsState& getWindowRectsState() const { return fWindowRectsState; }
+
+ bool isHWAntialiasState() const { return SkToBool(fFlags & InputFlags::kHWAntialias); }
+ bool snapVerticesToPixelCenters() const {
+ return SkToBool(fFlags & InputFlags::kSnapVerticesToPixelCenters);
+ }
+ bool hasStencilClip() const {
+ return SkToBool(fFlags & Flags::kHasStencilClip);
+ }
+ bool isStencilEnabled() const {
+ return SkToBool(fFlags & Flags::kStencilEnabled);
+ }
+ SkDEBUGCODE(bool isBad() const { return SkToBool(fFlags & Flags::kIsBad); })
+
+ GrXferBarrierType xferBarrierType(GrTexture*, const GrCaps&) const;
+
+ // Used by Vulkan and Metal to cache their respective pipeline objects
+ uint32_t getBlendInfoKey() const;
+
+ const GrSwizzle& outputSwizzle() const { return fOutputSwizzle; }
+
+private:
+
+ SkDEBUGCODE(void markAsBad() { fFlags |= Flags::kIsBad; })
+
+ static constexpr uint8_t kLastInputFlag = (uint8_t)InputFlags::kSnapVerticesToPixelCenters;
+
+ /** This is a continuation of the public "InputFlags" enum. */
+ enum class Flags : uint8_t {
+ kHasStencilClip = (kLastInputFlag << 1),
+ kStencilEnabled = (kLastInputFlag << 2),
+ kScissorEnabled = (kLastInputFlag << 3),
+#ifdef SK_DEBUG
+ kIsBad = (kLastInputFlag << 4),
+#endif
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(Flags);
+
+ friend bool operator&(Flags, InputFlags);
+
+ using FragmentProcessorArray = SkAutoSTArray<8, std::unique_ptr<const GrFragmentProcessor>>;
+
+ sk_sp<GrTextureProxy> fDstTextureProxy;
+ SkIPoint fDstTextureOffset;
+ GrWindowRectsState fWindowRectsState;
+ const GrUserStencilSettings* fUserStencilSettings;
+ Flags fFlags;
+ sk_sp<const GrXferProcessor> fXferProcessor;
+ FragmentProcessorArray fFragmentProcessors;
+
+ // This value is also the index in fFragmentProcessors where coverage processors begin.
+ int fNumColorProcessors;
+
+ GrSwizzle fOutputSwizzle;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrPipeline::InputFlags);
+GR_MAKE_BITFIELD_CLASS_OPS(GrPipeline::Flags);
+
+inline bool operator&(GrPipeline::Flags flags, GrPipeline::InputFlags inputFlag) {
+ return (flags & (GrPipeline::Flags)inputFlag);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp
new file mode 100644
index 0000000000..fea8e2dc6b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrPrimitiveProcessor.h"
+
+#include "src/gpu/GrCoordTransform.h"
+
+/**
+ * We specialize the vertex code for each of these matrix types.
+ */
+enum MatrixType {
+ kNoPersp_MatrixType = 0,
+ kGeneral_MatrixType = 1,
+};
+
+GrPrimitiveProcessor::GrPrimitiveProcessor(ClassID classID) : GrProcessor(classID) {}
+
+const GrPrimitiveProcessor::TextureSampler& GrPrimitiveProcessor::textureSampler(int i) const {
+ SkASSERT(i >= 0 && i < this->numTextureSamplers());
+ return this->onTextureSampler(i);
+}
+
+uint32_t
+GrPrimitiveProcessor::getTransformKey(const SkTArray<GrCoordTransform*, true>& coords,
+ int numCoords) const {
+ uint32_t totalKey = 0;
+ for (int t = 0; t < numCoords; ++t) {
+ uint32_t key = 0;
+ const GrCoordTransform* coordTransform = coords[t];
+ if (coordTransform->getMatrix().hasPerspective()) {
+ key |= kGeneral_MatrixType;
+ } else {
+ key |= kNoPersp_MatrixType;
+ }
+ key <<= t;
+ SkASSERT(0 == (totalKey & key)); // keys for each transform ought not to overlap
+ totalKey |= key;
+ }
+ return totalKey;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline GrSamplerState::Filter clamp_filter(GrTextureType type,
+ GrSamplerState::Filter requestedFilter) {
+ if (GrTextureTypeHasRestrictedSampling(type)) {
+ return SkTMin(requestedFilter, GrSamplerState::Filter::kBilerp);
+ }
+ return requestedFilter;
+}
+
+GrPrimitiveProcessor::TextureSampler::TextureSampler(GrTextureType textureType,
+ const GrSamplerState& samplerState,
+ const GrSwizzle& swizzle,
+ uint32_t extraSamplerKey) {
+ this->reset(textureType, samplerState, swizzle, extraSamplerKey);
+}
+
+void GrPrimitiveProcessor::TextureSampler::reset(GrTextureType textureType,
+ const GrSamplerState& samplerState,
+ const GrSwizzle& swizzle,
+ uint32_t extraSamplerKey) {
+ fSamplerState = samplerState;
+ fSamplerState.setFilterMode(clamp_filter(textureType, samplerState.filter()));
+ fSwizzle = swizzle;
+ fTextureType = textureType;
+ fExtraSamplerKey = extraSamplerKey;
+ fIsInitialized = true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h
new file mode 100644
index 0000000000..41f37a3c48
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPrimitiveProcessor_DEFINED
+#define GrPrimitiveProcessor_DEFINED
+
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrShaderVar.h"
+
+class GrCoordTransform;
+
+/*
+ * The GrPrimitiveProcessor represents some kind of geometric primitive. This includes the shape
+ * of the primitive and the inherent color of the primitive. The GrPrimitiveProcessor is
+ * responsible for providing a color and coverage input into the Ganesh rendering pipeline. Through
+ * optimization, Ganesh may decide a different color, no color, and / or no coverage are required
+ * from the GrPrimitiveProcessor, so the GrPrimitiveProcessor must be able to support this
+ * functionality.
+ *
+ * There are two feedback loops between the GrFragmentProcessors, the GrXferProcessor, and the
+ * GrPrimitiveProcessor. These loops run on the CPU and to determine known properties of the final
+ * color and coverage inputs to the GrXferProcessor in order to perform optimizations that preserve
+ * correctness. The GrDrawOp seeds these loops with initial color and coverage, in its
+ * getProcessorAnalysisInputs implementation. These seed values are processed by the
+ * subsequent
+ * stages of the rendering pipeline and the output is then fed back into the GrDrawOp in
+ * the applyPipelineOptimizations call, where the op can use the information to inform decisions
+ * about GrPrimitiveProcessor creation.
+ */
+
+class GrGLSLPrimitiveProcessor;
+
+/**
+ * GrPrimitiveProcessor defines an interface which all subclasses must implement. All
+ * GrPrimitiveProcessors must proivide seed color and coverage for the Ganesh color / coverage
+ * pipelines, and they must provide some notion of equality
+ *
+ * TODO: This class does not really need to be ref counted. Instances should be allocated using
+ * GrOpFlushState's arena and destroyed when the arena is torn down.
+ */
+class GrPrimitiveProcessor : public GrProcessor, public GrNonAtomicRef<GrPrimitiveProcessor> {
+public:
+ class TextureSampler;
+
+ /** Describes a vertex or instance attribute. */
+ class Attribute {
+ public:
+ constexpr Attribute() = default;
+ constexpr Attribute(const char* name,
+ GrVertexAttribType cpuType,
+ GrSLType gpuType)
+ : fName(name), fCPUType(cpuType), fGPUType(gpuType) {}
+ constexpr Attribute(const Attribute&) = default;
+
+ Attribute& operator=(const Attribute&) = default;
+
+ constexpr bool isInitialized() const { return SkToBool(fName); }
+
+ constexpr const char* name() const { return fName; }
+ constexpr GrVertexAttribType cpuType() const { return fCPUType; }
+ constexpr GrSLType gpuType() const { return fGPUType; }
+
+ inline constexpr size_t size() const;
+ constexpr size_t sizeAlign4() const { return SkAlign4(this->size()); }
+
+ GrShaderVar asShaderVar() const {
+ return {fName, fGPUType, GrShaderVar::kIn_TypeModifier};
+ }
+
+ private:
+ const char* fName = nullptr;
+ GrVertexAttribType fCPUType = kFloat_GrVertexAttribType;
+ GrSLType fGPUType = kFloat_GrSLType;
+ };
+
+ class Iter {
+ public:
+ Iter() : fCurr(nullptr), fRemaining(0) {}
+ Iter(const Iter& iter) : fCurr(iter.fCurr), fRemaining(iter.fRemaining) {}
+ Iter& operator= (const Iter& iter) {
+ fCurr = iter.fCurr;
+ fRemaining = iter.fRemaining;
+ return *this;
+ }
+ Iter(const Attribute* attrs, int count) : fCurr(attrs), fRemaining(count) {
+ this->skipUninitialized();
+ }
+
+ bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
+ const Attribute& operator*() const { return *fCurr; }
+ void operator++() {
+ if (fRemaining) {
+ fRemaining--;
+ fCurr++;
+ this->skipUninitialized();
+ }
+ }
+
+ private:
+ void skipUninitialized() {
+ if (!fRemaining) {
+ fCurr = nullptr;
+ } else {
+ while (!fCurr->isInitialized()) {
+ ++fCurr;
+ }
+ }
+ }
+
+ const Attribute* fCurr;
+ int fRemaining;
+ };
+
+ class AttributeSet {
+ public:
+ Iter begin() const { return Iter(fAttributes, fCount); }
+ Iter end() const { return Iter(); }
+
+ private:
+ friend class GrPrimitiveProcessor;
+
+ void init(const Attribute* attrs, int count) {
+ fAttributes = attrs;
+ fRawCount = count;
+ fCount = 0;
+ fStride = 0;
+ for (int i = 0; i < count; ++i) {
+ if (attrs[i].isInitialized()) {
+ fCount++;
+ fStride += attrs[i].sizeAlign4();
+ }
+ }
+ }
+
+ const Attribute* fAttributes = nullptr;
+ int fRawCount = 0;
+ int fCount = 0;
+ size_t fStride = 0;
+ };
+
+ GrPrimitiveProcessor(ClassID);
+
+ int numTextureSamplers() const { return fTextureSamplerCnt; }
+ const TextureSampler& textureSampler(int index) const;
+ int numVertexAttributes() const { return fVertexAttributes.fCount; }
+ const AttributeSet& vertexAttributes() const { return fVertexAttributes; }
+ int numInstanceAttributes() const { return fInstanceAttributes.fCount; }
+ const AttributeSet& instanceAttributes() const { return fInstanceAttributes; }
+
+ bool hasVertexAttributes() const { return SkToBool(fVertexAttributes.fCount); }
+ bool hasInstanceAttributes() const { return SkToBool(fInstanceAttributes.fCount); }
+
+ /**
+ * A common practice is to populate the the vertex/instance's memory using an implicit array of
+ * structs. In this case, it is best to assert that:
+ * stride == sizeof(struct)
+ */
+ size_t vertexStride() const { return fVertexAttributes.fStride; }
+ size_t instanceStride() const { return fInstanceAttributes.fStride; }
+
+ // Only the GrGeometryProcessor subclass actually has a geo shader or vertex attributes, but
+ // we put these calls on the base class to prevent having to cast
+ virtual bool willUseGeoShader() const = 0;
+
+ /**
+ * Computes a transformKey from an array of coord transforms. Will only look at the first
+ * <numCoords> transforms in the array.
+ *
+ * TODO: A better name for this function would be "compute" instead of "get".
+ */
+ uint32_t getTransformKey(const SkTArray<GrCoordTransform*, true>& coords,
+ int numCoords) const;
+
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder that is directly associated with this geometry
+ * processor's GL backend implementation.
+ *
+ * TODO: A better name for this function would be "compute" instead of "get".
+ */
+ virtual void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
+
+
+ void getAttributeKey(GrProcessorKeyBuilder* b) const {
+ // Ensure that our CPU and GPU type fields fit together in a 32-bit value, and we never
+ // collide with the "uninitialized" value.
+ static_assert(kGrVertexAttribTypeCount < (1 << 8), "");
+ static_assert(kGrSLTypeCount < (1 << 8), "");
+
+ auto add_attributes = [=](const Attribute* attrs, int attrCount) {
+ for (int i = 0; i < attrCount; ++i) {
+ b->add32(attrs[i].isInitialized() ? (attrs[i].cpuType() << 16) | attrs[i].gpuType()
+ : ~0);
+ }
+ };
+ add_attributes(fVertexAttributes.fAttributes, fVertexAttributes.fRawCount);
+ add_attributes(fInstanceAttributes.fAttributes, fInstanceAttributes.fRawCount);
+ }
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const = 0;
+
+ virtual bool isPathRendering() const { return false; }
+
+protected:
+ void setVertexAttributes(const Attribute* attrs, int attrCount) {
+ fVertexAttributes.init(attrs, attrCount);
+ }
+ void setInstanceAttributes(const Attribute* attrs, int attrCount) {
+ SkASSERT(attrCount >= 0);
+ fInstanceAttributes.init(attrs, attrCount);
+ }
+ void setTextureSamplerCnt(int cnt) {
+ SkASSERT(cnt >= 0);
+ fTextureSamplerCnt = cnt;
+ }
+
+ /**
+ * Helper for implementing onTextureSampler(). E.g.:
+ * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
+ */
+ template <typename... Args>
+ static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
+ const Args&... samps) {
+ return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
+ }
+ inline static const TextureSampler& IthTextureSampler(int i);
+
+private:
+ virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
+
+ AttributeSet fVertexAttributes;
+ AttributeSet fInstanceAttributes;
+
+ int fTextureSamplerCnt = 0;
+ typedef GrProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Used to capture the properties of the GrTextureProxies required/expected by a primitiveProcessor
+ * along with an associated GrSamplerState. The actual proxies used are stored in either the
+ * fixed or dynamic state arrays. TextureSamplers don't perform any coord manipulation to account
+ * for texture origin.
+ */
+class GrPrimitiveProcessor::TextureSampler {
+public:
+ TextureSampler() = default;
+
+ TextureSampler(GrTextureType, const GrSamplerState&, const GrSwizzle&,
+ uint32_t extraSamplerKey = 0);
+
+ TextureSampler(const TextureSampler&) = delete;
+ TextureSampler& operator=(const TextureSampler&) = delete;
+
+ void reset(GrTextureType, const GrSamplerState&, const GrSwizzle&,
+ uint32_t extraSamplerKey = 0);
+
+ GrTextureType textureType() const { return fTextureType; }
+
+ const GrSamplerState& samplerState() const { return fSamplerState; }
+ const GrSwizzle& swizzle() const { return fSwizzle; }
+
+ uint32_t extraSamplerKey() const { return fExtraSamplerKey; }
+
+ bool isInitialized() const { return fIsInitialized; }
+
+private:
+ GrSamplerState fSamplerState;
+ GrSwizzle fSwizzle;
+ GrTextureType fTextureType = GrTextureType::k2D;
+ uint32_t fExtraSamplerKey = 0;
+ bool fIsInitialized = false;
+};
+
+const GrPrimitiveProcessor::TextureSampler& GrPrimitiveProcessor::IthTextureSampler(int i) {
+ SK_ABORT("Illegal texture sampler index");
+ static const TextureSampler kBogus;
+ return kBogus;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Returns the size of the attrib type in bytes.
+ * This was moved from include/private/GrTypesPriv.h in service of Skia dependents that build
+ * with C++11.
+ */
+static constexpr inline size_t GrVertexAttribTypeSize(GrVertexAttribType type) {
+ switch (type) {
+ case kFloat_GrVertexAttribType:
+ return sizeof(float);
+ case kFloat2_GrVertexAttribType:
+ return 2 * sizeof(float);
+ case kFloat3_GrVertexAttribType:
+ return 3 * sizeof(float);
+ case kFloat4_GrVertexAttribType:
+ return 4 * sizeof(float);
+ case kHalf_GrVertexAttribType:
+ return sizeof(uint16_t);
+ case kHalf2_GrVertexAttribType:
+ return 2 * sizeof(uint16_t);
+ case kHalf3_GrVertexAttribType:
+ return 3 * sizeof(uint16_t);
+ case kHalf4_GrVertexAttribType:
+ return 4 * sizeof(uint16_t);
+ case kInt2_GrVertexAttribType:
+ return 2 * sizeof(int32_t);
+ case kInt3_GrVertexAttribType:
+ return 3 * sizeof(int32_t);
+ case kInt4_GrVertexAttribType:
+ return 4 * sizeof(int32_t);
+ case kByte_GrVertexAttribType:
+ return 1 * sizeof(char);
+ case kByte2_GrVertexAttribType:
+ return 2 * sizeof(char);
+ case kByte3_GrVertexAttribType:
+ return 3 * sizeof(char);
+ case kByte4_GrVertexAttribType:
+ return 4 * sizeof(char);
+ case kUByte_GrVertexAttribType:
+ return 1 * sizeof(char);
+ case kUByte2_GrVertexAttribType:
+ return 2 * sizeof(char);
+ case kUByte3_GrVertexAttribType:
+ return 3 * sizeof(char);
+ case kUByte4_GrVertexAttribType:
+ return 4 * sizeof(char);
+ case kUByte_norm_GrVertexAttribType:
+ return 1 * sizeof(char);
+ case kUByte4_norm_GrVertexAttribType:
+ return 4 * sizeof(char);
+ case kShort2_GrVertexAttribType:
+ return 2 * sizeof(int16_t);
+ case kShort4_GrVertexAttribType:
+ return 4 * sizeof(int16_t);
+ case kUShort2_GrVertexAttribType: // fall through
+ case kUShort2_norm_GrVertexAttribType:
+ return 2 * sizeof(uint16_t);
+ case kInt_GrVertexAttribType:
+ return sizeof(int32_t);
+ case kUint_GrVertexAttribType:
+ return sizeof(uint32_t);
+ case kUShort_norm_GrVertexAttribType:
+ return sizeof(uint16_t);
+ case kUShort4_norm_GrVertexAttribType:
+ return 4 * sizeof(uint16_t);
+ }
+ // GCC fails because SK_ABORT evaluates to non constexpr. clang and cl.exe think this is
+ // unreachable and don't complain.
+#if defined(__clang__) || !defined(__GNUC__)
+ SK_ABORT("Unsupported type conversion");
+#endif
+ return 0;
+}
+
+constexpr size_t GrPrimitiveProcessor::Attribute::size() const {
+ return GrVertexAttribTypeSize(fCPUType);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessor.cpp b/gfx/skia/skia/src/gpu/GrProcessor.cpp
new file mode 100644
index 0000000000..d7a0c57860
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessor.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+#include "include/private/SkSpinlock.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrXferProcessor.h"
+
+#if GR_TEST_UTILS
+
+GrResourceProvider* GrProcessorTestData::resourceProvider() {
+ return fContext->priv().resourceProvider();
+}
+
+GrProxyProvider* GrProcessorTestData::proxyProvider() {
+ return fContext->priv().proxyProvider();
+}
+
+const GrCaps* GrProcessorTestData::caps() { return fContext->priv().caps(); }
+
+class GrFragmentProcessor;
+class GrGeometryProcessor;
+
+/*
+ * Originally these were both in the processor unit test header, but then it seemed to cause linker
+ * problems on android.
+ */
+template <>
+SkTArray<GrFragmentProcessorTestFactory*, true>* GrFragmentProcessorTestFactory::GetFactories() {
+ static SkTArray<GrFragmentProcessorTestFactory*, true> gFactories;
+ return &gFactories;
+}
+
+template <>
+SkTArray<GrGeometryProcessorTestFactory*, true>* GrGeometryProcessorTestFactory::GetFactories() {
+ static SkTArray<GrGeometryProcessorTestFactory*, true> gFactories;
+ return &gFactories;
+}
+
+SkTArray<GrXPFactoryTestFactory*, true>* GrXPFactoryTestFactory::GetFactories() {
+ static SkTArray<GrXPFactoryTestFactory*, true> gFactories;
+ return &gFactories;
+}
+
+/*
+ * To ensure we always have successful static initialization, before creating from the factories
+ * we verify the count is as expected. If a new factory is added, then these numbers must be
+ * manually adjusted.
+ */
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+static const int kFPFactoryCount = 37;
+static const int kGPFactoryCount = 14;
+static const int kXPFactoryCount = 4;
+#else
+static const int kFPFactoryCount = 0;
+static const int kGPFactoryCount = 0;
+static const int kXPFactoryCount = 0;
+#endif
+
+template <>
+void GrFragmentProcessorTestFactory::VerifyFactoryCount() {
+ if (kFPFactoryCount != GetFactories()->count()) {
+ SkDebugf("\nExpected %d fragment processor factories, found %d.\n",
+ kFPFactoryCount, GetFactories()->count());
+ SK_ABORT("Wrong number of fragment processor factories!");
+ }
+}
+
+template <>
+void GrGeometryProcessorTestFactory::VerifyFactoryCount() {
+ if (kGPFactoryCount != GetFactories()->count()) {
+ SkDebugf("\nExpected %d geometry processor factories, found %d.\n",
+ kGPFactoryCount, GetFactories()->count());
+ SK_ABORT("Wrong number of geometry processor factories!");
+ }
+}
+
+void GrXPFactoryTestFactory::VerifyFactoryCount() {
+ if (kXPFactoryCount != GetFactories()->count()) {
+ SkDebugf("\nExpected %d xp factory factories, found %d.\n",
+ kXPFactoryCount, GetFactories()->count());
+ SK_ABORT("Wrong number of xp factory factories!");
+ }
+}
+
+#endif
+
+
+// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
+// different threads. The GrContext is not used concurrently on different threads and there is a
+// memory barrier between accesses of a context on different threads. Also, there may be multiple
+// GrContexts and those contexts may be in use concurrently on different threads.
+namespace {
+#if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+static SkSpinlock gProcessorSpinlock;
+#endif
+class MemoryPoolAccessor {
+public:
+
+// We know in the Android framework there is only one GrContext.
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ MemoryPoolAccessor() {}
+ ~MemoryPoolAccessor() {}
+#else
+ MemoryPoolAccessor() { gProcessorSpinlock.acquire(); }
+ ~MemoryPoolAccessor() { gProcessorSpinlock.release(); }
+#endif
+
+ GrMemoryPool* pool() const {
+ static GrMemoryPool gPool(4096, 4096);
+ return &gPool;
+ }
+};
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void* GrProcessor::operator new(size_t size) { return MemoryPoolAccessor().pool()->allocate(size); }
+
+void GrProcessor::operator delete(void* target) {
+ return MemoryPoolAccessor().pool()->release(target);
+}
diff --git a/gfx/skia/skia/src/gpu/GrProcessor.h b/gfx/skia/skia/src/gpu/GrProcessor.h
new file mode 100644
index 0000000000..326c6742b8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessor.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessor_DEFINED
+#define GrProcessor_DEFINED
+
+#include "include/core/SkMath.h"
+#include "include/core/SkString.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/GrProcessorUnitTest.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrContext;
+class GrResourceProvider;
+
+/**
+ * Used by processors to build their keys. It incorporates each per-processor key into a larger
+ * shader key.
+ */
+class GrProcessorKeyBuilder {
+public:
+ GrProcessorKeyBuilder(SkTArray<unsigned char, true>* data) : fData(data), fCount(0) {
+ SkASSERT(0 == fData->count() % sizeof(uint32_t));
+ }
+
+ void add32(uint32_t v) {
+ ++fCount;
+ fData->push_back_n(4, reinterpret_cast<uint8_t*>(&v));
+ }
+
+ /** Inserts count uint32_ts into the key. The returned pointer is only valid until the next
+ add*() call. */
+ uint32_t* SK_WARN_UNUSED_RESULT add32n(int count) {
+ SkASSERT(count > 0);
+ fCount += count;
+ return reinterpret_cast<uint32_t*>(fData->push_back_n(4 * count));
+ }
+
+ size_t size() const { return sizeof(uint32_t) * fCount; }
+
+private:
+ SkTArray<uint8_t, true>* fData; // unowned ptr to the larger key.
+ int fCount; // number of uint32_ts added to fData by the processor.
+};
+
+/** Provides custom shader code to the Ganesh shading pipeline. GrProcessor objects *must* be
+ immutable: after being constructed, their fields may not change.
+
+ Dynamically allocated GrProcessors are managed by a per-thread memory pool. The ref count of an
+ processor must reach 0 before the thread terminates and the pool is destroyed.
+ */
+class GrProcessor {
+public:
+ enum ClassID {
+ kBigKeyProcessor_ClassID,
+ kBlockInputFragmentProcessor_ClassID,
+ kButtCapStrokedCircleGeometryProcessor_ClassID,
+ kCircleGeometryProcessor_ClassID,
+ kCircularRRectEffect_ClassID,
+ kClockwiseTestProcessor_ClassID,
+ kColorTableEffect_ClassID,
+ kComposeOneFragmentProcessor_ClassID,
+ kComposeTwoFragmentProcessor_ClassID,
+ kCoverageSetOpXP_ClassID,
+ kCubicStrokeProcessor_ClassID,
+ kCustomXP_ClassID,
+ kDashingCircleEffect_ClassID,
+ kDashingLineEffect_ClassID,
+ kDefaultGeoProc_ClassID,
+ kDIEllipseGeometryProcessor_ClassID,
+ kDisableColorXP_ClassID,
+ kEllipseGeometryProcessor_ClassID,
+ kEllipticalRRectEffect_ClassID,
+ kGP_ClassID,
+ kVertexColorSpaceBenchGP_ClassID,
+ kGrAARectEffect_ClassID,
+ kGrAlphaThresholdFragmentProcessor_ClassID,
+ kGrBicubicEffect_ClassID,
+ kGrBitmapTextGeoProc_ClassID,
+ kGrBlurredEdgeFragmentProcessor_ClassID,
+ kGrCCClipProcessor_ClassID,
+ kGrCCPathProcessor_ClassID,
+ kGrCircleBlurFragmentProcessor_ClassID,
+ kGrCircleEffect_ClassID,
+ kGrClampedGradientEffect_ClassID,
+ kGrColorMatrixFragmentProcessor_ClassID,
+ kGrColorSpaceXformEffect_ClassID,
+ kGrComposeLerpEffect_ClassID,
+ kGrComposeLerpRedEffect_ClassID,
+ kGrConfigConversionEffect_ClassID,
+ kGrConicEffect_ClassID,
+ kGrConstColorProcessor_ClassID,
+ kGrConvexPolyEffect_ClassID,
+ kGrDeviceSpaceTextureDecalFragmentProcessor_ClassID,
+ kGrDiffuseLightingEffect_ClassID,
+ kGrDisplacementMapEffect_ClassID,
+ kGrDistanceFieldA8TextGeoProc_ClassID,
+ kGrDistanceFieldLCDTextGeoProc_ClassID,
+ kGrDistanceFieldPathGeoProc_ClassID,
+ kGrDualIntervalGradientColorizer_ClassID,
+ kGrEllipseEffect_ClassID,
+ kGrFillRRectOp_Processor_ClassID,
+ kGrGaussianConvolutionFragmentProcessor_ClassID,
+ kGrGSCoverageProcessor_ClassID,
+ kGrImprovedPerlinNoiseEffect_ClassID,
+ kGrLinearGradientLayout_ClassID,
+ kGrLumaColorFilterEffect_ClassID,
+ kGrMagnifierEffect_ClassID,
+ kGrMatrixConvolutionEffect_ClassID,
+ kGrMeshTestProcessor_ClassID,
+ kGrMorphologyEffect_ClassID,
+ kGrMixerEffect_ClassID,
+ kGrOverrideInputFragmentProcessor_ClassID,
+ kGrPathProcessor_ClassID,
+ kGrPerlinNoise2Effect_ClassID,
+ kGrPipelineDynamicStateTestProcessor_ClassID,
+ kGrPremulInputFragmentProcessor_ClassID,
+ kGrQuadEffect_ClassID,
+ kGrRadialGradientLayout_ClassID,
+ kGrRectBlurEffect_ClassID,
+ kGrRRectBlurEffect_ClassID,
+ kGrRRectShadowGeoProc_ClassID,
+ kGrSimpleTextureEffect_ClassID,
+ kGrSingleIntervalGradientColorizer_ClassID,
+ kGrSkSLFP_ClassID,
+ kGrSpecularLightingEffect_ClassID,
+ kGrSRGBEffect_ClassID,
+ kGrSampleMaskProcessor_ClassID,
+ kGrSaturateProcessor_ClassID,
+ kGrSweepGradientLayout_ClassID,
+ kGrTextureDomainEffect_ClassID,
+ kGrTextureGradientColorizer_ClassID,
+ kGrTiledGradientEffect_ClassID,
+ kGrTwoPointConicalGradientLayout_ClassID,
+ kGrUnrolledBinaryGradientColorizer_ClassID,
+ kGrVSCoverageProcessor_ClassID,
+ kGrYUVtoRGBEffect_ClassID,
+ kHighContrastFilterEffect_ClassID,
+ kLatticeGP_ClassID,
+ kPDLCDXferProcessor_ClassID,
+ kPorterDuffXferProcessor_ClassID,
+ kPremulFragmentProcessor_ClassID,
+ kQuadEdgeEffect_ClassID,
+ kQuadPerEdgeAAGeometryProcessor_ClassID,
+ kSampleLocationsTestProcessor_ClassID,
+ kSeriesFragmentProcessor_ClassID,
+ kShaderPDXferProcessor_ClassID,
+ kStencilResolveProcessor_ClassID,
+ kFwidthSquircleTestProcessor_ClassID,
+ kSwizzleFragmentProcessor_ClassID,
+ kTestFP_ClassID,
+ kFlatNormalsFP_ClassID,
+ kMappedNormalsFP_ClassID,
+ kLightingFP_ClassID,
+ kLinearStrokeProcessor_ClassID,
+ };
+
+ virtual ~GrProcessor() = default;
+
+ /** Human-meaningful string to identify this prcoessor; may be embedded in generated shader
+ code. */
+ virtual const char* name() const = 0;
+
+ /** Human-readable dump of all information */
+#ifdef SK_DEBUG
+ virtual SkString dumpInfo() const {
+ SkString str;
+ str.appendf("Missing data");
+ return str;
+ }
+#else
+ SkString dumpInfo() const { return SkString("<Processor information unavailable>"); }
+#endif
+
+ /**
+ * Custom shader features provided by the framework. These require special handling when
+ * preparing shaders, so a processor must call setWillUseCustomFeature() from its constructor if
+ * it intends to use one.
+ */
+ enum class CustomFeatures {
+ kNone = 0,
+ kSampleLocations = 1 << 0,
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(CustomFeatures);
+
+ CustomFeatures requestedFeatures() const { return fRequestedFeatures; }
+
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ /** Helper for down-casting to a GrProcessor subclass */
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
+
+ ClassID classID() const { return fClassID; }
+
+protected:
+ GrProcessor(ClassID classID) : fClassID(classID) {}
+ GrProcessor(const GrProcessor&) = delete;
+ GrProcessor& operator=(const GrProcessor&) = delete;
+
+ void setWillUseCustomFeature(CustomFeatures feature) { fRequestedFeatures |= feature; }
+ void resetCustomFeatures() { fRequestedFeatures = CustomFeatures::kNone; }
+
+ const ClassID fClassID;
+ CustomFeatures fRequestedFeatures = CustomFeatures::kNone;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrProcessor::CustomFeatures);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessorAnalysis.cpp b/gfx/skia/skia/src/gpu/GrProcessorAnalysis.cpp
new file mode 100644
index 0000000000..1f98300ffd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorAnalysis.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessorAnalysis.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+GrColorFragmentProcessorAnalysis::GrColorFragmentProcessorAnalysis(
+ const GrProcessorAnalysisColor& input,
+ const GrFragmentProcessor* const* processors,
+ int cnt) {
+ fCompatibleWithCoverageAsAlpha = true;
+ fIsOpaque = input.isOpaque();
+ fUsesLocalCoords = false;
+ fProcessorsToEliminate = 0;
+ fKnowOutputColor = input.isConstant(&fLastKnownOutputColor);
+ for (int i = 0; i < cnt; ++i) {
+ if (fUsesLocalCoords && !fKnowOutputColor && !fCompatibleWithCoverageAsAlpha &&
+ !fIsOpaque) {
+ break;
+ }
+ const GrFragmentProcessor* fp = processors[i];
+ if (fKnowOutputColor &&
+ fp->hasConstantOutputForConstantInput(fLastKnownOutputColor, &fLastKnownOutputColor)) {
+ ++fProcessorsToEliminate;
+ fIsOpaque = fLastKnownOutputColor.isOpaque();
+ // We reset these since the caller is expected to not use the earlier fragment
+ // processors.
+ fCompatibleWithCoverageAsAlpha = true;
+ fUsesLocalCoords = false;
+ } else {
+ fKnowOutputColor = false;
+ if (fIsOpaque && !fp->preservesOpaqueInput()) {
+ fIsOpaque = false;
+ }
+ if (fCompatibleWithCoverageAsAlpha && !fp->compatibleWithCoverageAsAlpha()) {
+ fCompatibleWithCoverageAsAlpha = false;
+ }
+ if (fp->usesLocalCoords()) {
+ fUsesLocalCoords = true;
+ }
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrProcessorAnalysis.h b/gfx/skia/skia/src/gpu/GrProcessorAnalysis.h
new file mode 100644
index 0000000000..5f0f22ed69
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorAnalysis.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessorAnalysis_DEFINED
+#define GrProcessorAnalysis_DEFINED
+
+#include "include/private/SkColorData.h"
+
+class GrDrawOp;
+class GrFragmentProcessor;
+
+class GrProcessorAnalysisColor {
+public:
+ enum class Opaque {
+ kNo,
+ kYes,
+ };
+
+ constexpr GrProcessorAnalysisColor(Opaque opaque = Opaque::kNo)
+ : fFlags(opaque == Opaque::kYes ? kIsOpaque_Flag : 0)
+ , fColor(SK_PMColor4fTRANSPARENT) {}
+
+ GrProcessorAnalysisColor(const SkPMColor4f& color) { this->setToConstant(color); }
+
+ void setToConstant(const SkPMColor4f& color) {
+ fColor = color;
+ if (color.isOpaque()) {
+ fFlags = kColorIsKnown_Flag | kIsOpaque_Flag;
+ } else {
+ fFlags = kColorIsKnown_Flag;
+ }
+ }
+
+ void setToUnknown() { fFlags = 0; }
+
+ void setToUnknownOpaque() { fFlags = kIsOpaque_Flag; }
+
+ bool isUnknown() const { return SkToBool(fFlags == 0); }
+
+ bool isOpaque() const { return SkToBool(kIsOpaque_Flag & fFlags); }
+
+ bool isConstant(SkPMColor4f* color = nullptr) const {
+ if (kColorIsKnown_Flag & fFlags) {
+ if (color) {
+ *color = fColor;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool operator==(const GrProcessorAnalysisColor& that) const {
+ if (fFlags != that.fFlags) {
+ return false;
+ }
+ return (kColorIsKnown_Flag & fFlags) ? fColor == that.fColor : true;
+ }
+
+ /** The returned value reflects the common properties of the two inputs. */
+ static GrProcessorAnalysisColor Combine(const GrProcessorAnalysisColor& a,
+ const GrProcessorAnalysisColor& b) {
+ GrProcessorAnalysisColor result;
+ uint32_t commonFlags = a.fFlags & b.fFlags;
+ if ((kColorIsKnown_Flag & commonFlags) && a.fColor == b.fColor) {
+ result.fColor = a.fColor;
+ result.fFlags = a.fFlags;
+ } else if (kIsOpaque_Flag & commonFlags) {
+ result.fFlags = kIsOpaque_Flag;
+ }
+ return result;
+ }
+
+private:
+ enum Flags {
+ kColorIsKnown_Flag = 0x1,
+ kIsOpaque_Flag = 0x2,
+ };
+ uint32_t fFlags;
+ SkPMColor4f fColor;
+};
+
+enum class GrProcessorAnalysisCoverage { kNone, kSingleChannel, kLCD };
+
+/**
+ * GrColorFragmentProcessorAnalysis gathers invariant data from a set of color fragment processor.
+ * It is used to recognize optimizations that can simplify the generated shader or make blending
+ * more effecient.
+ */
+class GrColorFragmentProcessorAnalysis {
+public:
+ GrColorFragmentProcessorAnalysis() = delete;
+
+ GrColorFragmentProcessorAnalysis(const GrProcessorAnalysisColor& input,
+ const GrFragmentProcessor* const* processors,
+ int cnt);
+
+ bool isOpaque() const { return fIsOpaque; }
+
+ /**
+ * Are all the fragment processors compatible with conflating coverage with color prior to the
+ * the first fragment processor. This result assumes that processors that should be eliminated
+ * as indicated by initialProcessorsToEliminate() are in fact eliminated.
+ */
+ bool allProcessorsCompatibleWithCoverageAsAlpha() const {
+ return fCompatibleWithCoverageAsAlpha;
+ }
+
+ /**
+ * Do any of the fragment processors require local coords. This result assumes that
+ * processors that should be eliminated as indicated by initialProcessorsToEliminate() are in
+ * fact eliminated.
+ */
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ /**
+ * If we detected that the result after the first N processors is a known color then we
+ * eliminate those N processors and replace the GrDrawOp's color input to the GrPipeline with
+ * the known output of the Nth processor, so that the Nth+1 fragment processor (or the XP if
+ * there are only N processors) sees its expected input. If this returns 0 then there are no
+ * processors to eliminate.
+ */
+ int initialProcessorsToEliminate(SkPMColor4f* newPipelineInputColor) const {
+ if (fProcessorsToEliminate > 0) {
+ *newPipelineInputColor = fLastKnownOutputColor;
+ }
+ return fProcessorsToEliminate;
+ }
+
+ /**
+ * Provides known information about the last processor's output color.
+ */
+ GrProcessorAnalysisColor outputColor() const {
+ if (fKnowOutputColor) {
+ return fLastKnownOutputColor;
+ }
+ return fIsOpaque ? GrProcessorAnalysisColor::Opaque::kYes
+ : GrProcessorAnalysisColor::Opaque::kNo;
+ }
+
+private:
+ bool fIsOpaque;
+ bool fCompatibleWithCoverageAsAlpha;
+ bool fUsesLocalCoords;
+ bool fKnowOutputColor;
+ int fProcessorsToEliminate;
+ SkPMColor4f fLastKnownOutputColor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessorSet.cpp b/gfx/skia/skia/src/gpu/GrProcessorSet.cpp
new file mode 100644
index 0000000000..cd30a887d2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorSet.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlendModePriv.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrProcessorSet.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+
+const GrProcessorSet& GrProcessorSet::EmptySet() {
+ static GrProcessorSet gEmpty(GrProcessorSet::Empty::kEmpty);
+ return gEmpty;
+}
+
+GrProcessorSet GrProcessorSet::MakeEmptySet() {
+ return GrProcessorSet(GrProcessorSet::Empty::kEmpty);
+}
+
+GrProcessorSet::GrProcessorSet(GrPaint&& paint) : fXP(paint.getXPFactory()) {
+ fFlags = 0;
+ if (paint.numColorFragmentProcessors() <= kMaxColorProcessors) {
+ fColorFragmentProcessorCnt = paint.numColorFragmentProcessors();
+ fFragmentProcessors.reset(paint.numTotalFragmentProcessors());
+ int i = 0;
+ for (auto& fp : paint.fColorFragmentProcessors) {
+ SkASSERT(fp.get());
+ fFragmentProcessors[i++] = std::move(fp);
+ }
+ for (auto& fp : paint.fCoverageFragmentProcessors) {
+ SkASSERT(fp.get());
+ fFragmentProcessors[i++] = std::move(fp);
+ }
+ } else {
+ SkDebugf("Insane number of color fragment processors in paint. Dropping all processors.");
+ fColorFragmentProcessorCnt = 0;
+ }
+ SkDEBUGCODE(paint.fAlive = false;)
+}
+
+GrProcessorSet::GrProcessorSet(SkBlendMode mode)
+ : fXP(SkBlendMode_AsXPFactory(mode))
+ , fColorFragmentProcessorCnt(0)
+ , fFragmentProcessorOffset(0)
+ , fFlags(0) {}
+
+GrProcessorSet::GrProcessorSet(std::unique_ptr<GrFragmentProcessor> colorFP)
+ : fFragmentProcessors(1)
+ , fXP((const GrXPFactory*)nullptr)
+ , fColorFragmentProcessorCnt(1)
+ , fFragmentProcessorOffset(0)
+ , fFlags(0) {
+ SkASSERT(colorFP);
+ fFragmentProcessors[0] = std::move(colorFP);
+}
+
+GrProcessorSet::GrProcessorSet(GrProcessorSet&& that)
+ : fXP(std::move(that.fXP))
+ , fColorFragmentProcessorCnt(that.fColorFragmentProcessorCnt)
+ , fFragmentProcessorOffset(0)
+ , fFlags(that.fFlags) {
+ fFragmentProcessors.reset(that.fFragmentProcessors.count() - that.fFragmentProcessorOffset);
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ fFragmentProcessors[i] =
+ std::move(that.fFragmentProcessors[i + that.fFragmentProcessorOffset]);
+ }
+ that.fColorFragmentProcessorCnt = 0;
+ that.fFragmentProcessors.reset(0);
+}
+
+GrProcessorSet::~GrProcessorSet() {
+ if (this->isFinalized() && this->xferProcessor()) {
+ this->xferProcessor()->unref();
+ }
+}
+
+#ifdef SK_DEBUG
+SkString dump_fragment_processor_tree(const GrFragmentProcessor* fp, int indentCnt) {
+ SkString result;
+ SkString indentString;
+ for (int i = 0; i < indentCnt; ++i) {
+ indentString.append(" ");
+ }
+ result.appendf("%s%s %s \n", indentString.c_str(), fp->name(), fp->dumpInfo().c_str());
+ if (fp->numChildProcessors()) {
+ for (int i = 0; i < fp->numChildProcessors(); ++i) {
+ result += dump_fragment_processor_tree(&fp->childProcessor(i), indentCnt + 1);
+ }
+ }
+ return result;
+}
+
+SkString GrProcessorSet::dumpProcessors() const {
+ SkString result;
+ if (this->numFragmentProcessors()) {
+ if (this->numColorFragmentProcessors()) {
+ result.append("Color Fragment Processors:\n");
+ for (int i = 0; i < this->numColorFragmentProcessors(); ++i) {
+ result += dump_fragment_processor_tree(this->colorFragmentProcessor(i), 1);
+ }
+ } else {
+ result.append("No color fragment processors.\n");
+ }
+ if (this->numCoverageFragmentProcessors()) {
+ result.append("Coverage Fragment Processors:\n");
+ for (int i = 0; i < this->numColorFragmentProcessors(); ++i) {
+ result += dump_fragment_processor_tree(this->coverageFragmentProcessor(i), 1);
+ }
+ } else {
+ result.append("No coverage fragment processors.\n");
+ }
+ } else {
+ result.append("No color or coverage fragment processors.\n");
+ }
+ if (this->isFinalized()) {
+ result.append("Xfer Processor: ");
+ if (this->xferProcessor()) {
+ result.appendf("%s\n", this->xferProcessor()->name());
+ } else {
+ result.append("SrcOver\n");
+ }
+ } else {
+ result.append("XP Factory dumping not implemented.\n");
+ }
+ return result;
+}
+#endif
+
+bool GrProcessorSet::operator==(const GrProcessorSet& that) const {
+ SkASSERT(this->isFinalized());
+ SkASSERT(that.isFinalized());
+ int fpCount = this->numFragmentProcessors();
+ if (((fFlags ^ that.fFlags) & ~kFinalized_Flag) || fpCount != that.numFragmentProcessors() ||
+ fColorFragmentProcessorCnt != that.fColorFragmentProcessorCnt) {
+ return false;
+ }
+
+ for (int i = 0; i < fpCount; ++i) {
+ int a = i + fFragmentProcessorOffset;
+ int b = i + that.fFragmentProcessorOffset;
+ if (!fFragmentProcessors[a]->isEqual(*that.fFragmentProcessors[b])) {
+ return false;
+ }
+ }
+ // Most of the time both of these are null
+ if (!this->xferProcessor() && !that.xferProcessor()) {
+ return true;
+ }
+ const GrXferProcessor& thisXP = this->xferProcessor()
+ ? *this->xferProcessor()
+ : GrPorterDuffXPFactory::SimpleSrcOverXP();
+ const GrXferProcessor& thatXP = that.xferProcessor()
+ ? *that.xferProcessor()
+ : GrPorterDuffXPFactory::SimpleSrcOverXP();
+ return thisXP.isEqual(thatXP);
+}
+
+GrProcessorSet::Analysis GrProcessorSet::finalize(
+ const GrProcessorAnalysisColor& colorInput, const GrProcessorAnalysisCoverage coverageInput,
+ const GrAppliedClip* clip, const GrUserStencilSettings* userStencil,
+ bool hasMixedSampledCoverage, const GrCaps& caps, GrClampType clampType,
+ SkPMColor4f* overrideInputColor) {
+ SkASSERT(!this->isFinalized());
+ SkASSERT(!fFragmentProcessorOffset);
+
+ GrProcessorSet::Analysis analysis;
+ analysis.fCompatibleWithCoverageAsAlpha = GrProcessorAnalysisCoverage::kLCD != coverageInput;
+
+ const std::unique_ptr<const GrFragmentProcessor>* fps =
+ fFragmentProcessors.get() + fFragmentProcessorOffset;
+ GrColorFragmentProcessorAnalysis colorAnalysis(
+ colorInput, unique_ptr_address_as_pointer_address(fps), fColorFragmentProcessorCnt);
+ fps += fColorFragmentProcessorCnt;
+ int n = this->numCoverageFragmentProcessors();
+ bool hasCoverageFP = n > 0;
+ bool coverageUsesLocalCoords = false;
+ for (int i = 0; i < n; ++i) {
+ if (!fps[i]->compatibleWithCoverageAsAlpha()) {
+ analysis.fCompatibleWithCoverageAsAlpha = false;
+ }
+ coverageUsesLocalCoords |= fps[i]->usesLocalCoords();
+ }
+ if (clip) {
+ hasCoverageFP = hasCoverageFP || clip->numClipCoverageFragmentProcessors();
+ for (int i = 0; i < clip->numClipCoverageFragmentProcessors(); ++i) {
+ const GrFragmentProcessor* clipFP = clip->clipCoverageFragmentProcessor(i);
+ analysis.fCompatibleWithCoverageAsAlpha &= clipFP->compatibleWithCoverageAsAlpha();
+ coverageUsesLocalCoords |= clipFP->usesLocalCoords();
+ }
+ }
+ int colorFPsToEliminate = colorAnalysis.initialProcessorsToEliminate(overrideInputColor);
+ analysis.fInputColorType = static_cast<Analysis::PackedInputColorType>(
+ colorFPsToEliminate ? Analysis::kOverridden_InputColorType
+ : Analysis::kOriginal_InputColorType);
+
+ GrProcessorAnalysisCoverage outputCoverage;
+ if (GrProcessorAnalysisCoverage::kLCD == coverageInput) {
+ outputCoverage = GrProcessorAnalysisCoverage::kLCD;
+ } else if (hasCoverageFP || GrProcessorAnalysisCoverage::kSingleChannel == coverageInput) {
+ outputCoverage = GrProcessorAnalysisCoverage::kSingleChannel;
+ } else {
+ outputCoverage = GrProcessorAnalysisCoverage::kNone;
+ }
+
+ GrXPFactory::AnalysisProperties props = GrXPFactory::GetAnalysisProperties(
+ this->xpFactory(), colorAnalysis.outputColor(), outputCoverage, caps, clampType);
+ if (!this->numCoverageFragmentProcessors() &&
+ GrProcessorAnalysisCoverage::kNone == coverageInput) {
+ }
+ analysis.fRequiresDstTexture =
+ SkToBool(props & GrXPFactory::AnalysisProperties::kRequiresDstTexture);
+ analysis.fCompatibleWithCoverageAsAlpha &=
+ SkToBool(props & GrXPFactory::AnalysisProperties::kCompatibleWithCoverageAsAlpha);
+ analysis.fRequiresNonOverlappingDraws = SkToBool(
+ props & GrXPFactory::AnalysisProperties::kRequiresNonOverlappingDraws);
+ if (props & GrXPFactory::AnalysisProperties::kIgnoresInputColor) {
+ colorFPsToEliminate = this->numColorFragmentProcessors();
+ analysis.fInputColorType =
+ static_cast<Analysis::PackedInputColorType>(Analysis::kIgnored_InputColorType);
+ analysis.fUsesLocalCoords = coverageUsesLocalCoords;
+ } else {
+ analysis.fCompatibleWithCoverageAsAlpha &=
+ colorAnalysis.allProcessorsCompatibleWithCoverageAsAlpha();
+ analysis.fUsesLocalCoords = coverageUsesLocalCoords | colorAnalysis.usesLocalCoords();
+ }
+ for (int i = 0; i < colorFPsToEliminate; ++i) {
+ fFragmentProcessors[i].reset(nullptr);
+ }
+ fFragmentProcessorOffset = colorFPsToEliminate;
+ fColorFragmentProcessorCnt -= colorFPsToEliminate;
+ analysis.fHasColorFragmentProcessor = (fColorFragmentProcessorCnt != 0);
+
+ auto xp = GrXPFactory::MakeXferProcessor(this->xpFactory(), colorAnalysis.outputColor(),
+ outputCoverage, hasMixedSampledCoverage, caps,
+ clampType);
+ fXP.fProcessor = xp.release();
+
+ fFlags |= kFinalized_Flag;
+ analysis.fIsInitialized = true;
+#ifdef SK_DEBUG
+ bool hasXferBarrier =
+ fXP.fProcessor &&
+ GrXferBarrierType::kNone_GrXferBarrierType != fXP.fProcessor->xferBarrierType(caps);
+ bool needsNonOverlappingDraws = analysis.fRequiresDstTexture || hasXferBarrier;
+ SkASSERT(analysis.fRequiresNonOverlappingDraws == needsNonOverlappingDraws);
+#endif
+ return analysis;
+}
diff --git a/gfx/skia/skia/src/gpu/GrProcessorSet.h b/gfx/skia/skia/src/gpu/GrProcessorSet.h
new file mode 100644
index 0000000000..95838645d5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorSet.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessorSet_DEFINED
+#define GrProcessorSet_DEFINED
+
+#include "include/private/SkTemplates.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrProcessorAnalysis.h"
+#include "src/gpu/GrXferProcessor.h"
+
+struct GrUserStencilSettings;
+class GrAppliedClip;
+class GrXPFactory;
+
+class GrProcessorSet {
+private:
+ // Arbitrary constructor arg for empty set and analysis
+ enum class Empty { kEmpty };
+
+public:
+ GrProcessorSet(GrPaint&&);
+ GrProcessorSet(SkBlendMode);
+ GrProcessorSet(std::unique_ptr<GrFragmentProcessor> colorFP);
+ GrProcessorSet(GrProcessorSet&&);
+ GrProcessorSet(const GrProcessorSet&) = delete;
+ GrProcessorSet& operator=(const GrProcessorSet&) = delete;
+
+ ~GrProcessorSet();
+
+ int numColorFragmentProcessors() const { return fColorFragmentProcessorCnt; }
+ int numCoverageFragmentProcessors() const {
+ return this->numFragmentProcessors() - fColorFragmentProcessorCnt;
+ }
+
+ const GrFragmentProcessor* colorFragmentProcessor(int idx) const {
+ SkASSERT(idx < fColorFragmentProcessorCnt);
+ return fFragmentProcessors[idx + fFragmentProcessorOffset].get();
+ }
+ const GrFragmentProcessor* coverageFragmentProcessor(int idx) const {
+ return fFragmentProcessors[idx + fColorFragmentProcessorCnt +
+ fFragmentProcessorOffset].get();
+ }
+
+ const GrXferProcessor* xferProcessor() const {
+ SkASSERT(this->isFinalized());
+ return fXP.fProcessor;
+ }
+ sk_sp<const GrXferProcessor> refXferProcessor() const {
+ SkASSERT(this->isFinalized());
+ return sk_ref_sp(fXP.fProcessor);
+ }
+
+ std::unique_ptr<const GrFragmentProcessor> detachColorFragmentProcessor(int idx) {
+ SkASSERT(idx < fColorFragmentProcessorCnt);
+ return std::move(fFragmentProcessors[idx + fFragmentProcessorOffset]);
+ }
+
+ std::unique_ptr<const GrFragmentProcessor> detachCoverageFragmentProcessor(int idx) {
+ return std::move(
+ fFragmentProcessors[idx + fFragmentProcessorOffset + fColorFragmentProcessorCnt]);
+ }
+
+ /** Comparisons are only legal on finalized processor sets. */
+ bool operator==(const GrProcessorSet& that) const;
+ bool operator!=(const GrProcessorSet& that) const { return !(*this == that); }
+
+ /**
+ * This is used to report results of processor analysis when a processor set is finalized (see
+ * below).
+ */
+ class Analysis {
+ public:
+ Analysis(const Analysis&) = default;
+ Analysis() { *reinterpret_cast<uint32_t*>(this) = 0; }
+
+ bool isInitialized() const { return fIsInitialized; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ bool requiresDstTexture() const { return fRequiresDstTexture; }
+ bool requiresNonOverlappingDraws() const { return fRequiresNonOverlappingDraws; }
+ bool isCompatibleWithCoverageAsAlpha() const { return fCompatibleWithCoverageAsAlpha; }
+ // Indicates whether all color fragment processors were eliminated in the analysis.
+ bool hasColorFragmentProcessor() const { return fHasColorFragmentProcessor; }
+
+ bool inputColorIsIgnored() const { return fInputColorType == kIgnored_InputColorType; }
+ bool inputColorIsOverridden() const {
+ return fInputColorType == kOverridden_InputColorType;
+ }
+
+ private:
+ constexpr Analysis(Empty)
+ : fUsesLocalCoords(false)
+ , fCompatibleWithCoverageAsAlpha(true)
+ , fRequiresDstTexture(false)
+ , fRequiresNonOverlappingDraws(false)
+ , fHasColorFragmentProcessor(false)
+ , fIsInitialized(true)
+ , fInputColorType(kOriginal_InputColorType) {}
+ enum InputColorType : uint32_t {
+ kOriginal_InputColorType,
+ kOverridden_InputColorType,
+ kIgnored_InputColorType
+ };
+
+ // MSVS 2015 won't pack different underlying types
+ using PackedBool = uint32_t;
+ using PackedInputColorType = uint32_t;
+
+ PackedBool fUsesLocalCoords : 1;
+ PackedBool fCompatibleWithCoverageAsAlpha : 1;
+ PackedBool fRequiresDstTexture : 1;
+ PackedBool fRequiresNonOverlappingDraws : 1;
+ PackedBool fHasColorFragmentProcessor : 1;
+ PackedBool fIsInitialized : 1;
+ PackedInputColorType fInputColorType : 2;
+
+ friend class GrProcessorSet;
+ };
+ GR_STATIC_ASSERT(sizeof(Analysis) <= sizeof(uint32_t));
+
+ /**
+ * This analyzes the processors given an op's input color and coverage as well as a clip. The
+ * state of the processor set may change to an equivalent but more optimal set of processors.
+ * This new state requires that the caller respect the returned 'inputColorOverride'. This is
+ * indicated by the returned Analysis's inputColorIsOverridden(). 'inputColorOverride' will not
+ * be written if the analysis does not override the input color.
+ *
+ * This must be called before the processor set is used to construct a GrPipeline and may only
+ * be called once.
+ *
+ * This also puts the processors in "pending execution" state and must be called when an op
+ * that owns a processor set is recorded to ensure pending and writes are propagated to
+ * resources referred to by the processors. Otherwise, data hazards may occur.
+ */
+ Analysis finalize(
+ const GrProcessorAnalysisColor&, const GrProcessorAnalysisCoverage,
+ const GrAppliedClip*, const GrUserStencilSettings*, bool hasMixedSampledCoverage,
+ const GrCaps&, GrClampType, SkPMColor4f* inputColorOverride);
+
+ bool isFinalized() const { return SkToBool(kFinalized_Flag & fFlags); }
+
+ /** These are valid only for non-LCD coverage. */
+ static const GrProcessorSet& EmptySet();
+ static GrProcessorSet MakeEmptySet();
+ static constexpr const Analysis EmptySetAnalysis() { return Analysis(Empty::kEmpty); }
+
+#ifdef SK_DEBUG
+ SkString dumpProcessors() const;
+#endif
+
+ void visitProxies(const GrOp::VisitProxyFunc& func) const {
+ for (int i = 0; i < this->numFragmentProcessors(); ++i) {
+ GrFragmentProcessor::TextureAccessIter iter(this->fragmentProcessor(i));
+ while (const GrFragmentProcessor::TextureSampler* sampler = iter.next()) {
+ bool mipped = (GrSamplerState::Filter::kMipMap == sampler->samplerState().filter());
+ func(sampler->proxy(), GrMipMapped(mipped));
+ }
+ }
+ }
+
+private:
+ GrProcessorSet(Empty) : fXP((const GrXferProcessor*)nullptr), fFlags(kFinalized_Flag) {}
+
+ int numFragmentProcessors() const {
+ return fFragmentProcessors.count() - fFragmentProcessorOffset;
+ }
+
+ const GrFragmentProcessor* fragmentProcessor(int idx) const {
+ return fFragmentProcessors[idx + fFragmentProcessorOffset].get();
+ }
+
+ // This absurdly large limit allows Analysis and this to pack fields together.
+ static constexpr int kMaxColorProcessors = UINT8_MAX;
+
+ enum Flags : uint16_t { kFinalized_Flag = 0x1 };
+
+ union XP {
+ XP(const GrXPFactory* factory) : fFactory(factory) {}
+ XP(const GrXferProcessor* processor) : fProcessor(processor) {}
+ explicit XP(XP&& that) : fProcessor(that.fProcessor) {
+ SkASSERT(fProcessor == that.fProcessor);
+ that.fProcessor = nullptr;
+ }
+ const GrXPFactory* fFactory;
+ const GrXferProcessor* fProcessor;
+ };
+
+ const GrXPFactory* xpFactory() const {
+ SkASSERT(!this->isFinalized());
+ return fXP.fFactory;
+ }
+
+ SkAutoSTArray<4, std::unique_ptr<const GrFragmentProcessor>> fFragmentProcessors;
+ XP fXP;
+ uint8_t fColorFragmentProcessorCnt = 0;
+ uint8_t fFragmentProcessorOffset = 0;
+ uint8_t fFlags;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp
new file mode 100644
index 0000000000..8d8efa7685
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrProcessorUnitTest.h"
+
+#if GR_TEST_UTILS
+
+std::unique_ptr<GrFragmentProcessor> GrProcessorUnitTest::MakeChildFP(GrProcessorTestData* data) {
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ fp = GrFragmentProcessorTestFactory::Make(data);
+ SkASSERT(fp);
+ } while (fp->numChildProcessors() != 0);
+ return fp;
+#else
+ SK_ABORT("Should not be called if !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS");
+#endif
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessorUnitTest.h b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.h
new file mode 100644
index 0000000000..6b0443e2b1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessorUnitTest_DEFINED
+#define GrProcessorUnitTest_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if GR_TEST_UTILS
+
+#include "include/private/SkTArray.h"
+#include "src/gpu/GrTestUtils.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class SkMatrix;
+class GrCaps;
+class GrContext;
+class GrProxyProvider;
+class GrRenderTargetContext;
+struct GrProcessorTestData;
+class GrTexture;
+class GrXPFactory;
+class GrGeometryProcessor;
+
+namespace GrProcessorUnitTest {
+
+// Used to access the dummy textures in TestCreate procs.
+enum {
+ kSkiaPMTextureIdx = 0,
+ kAlphaTextureIdx = 1,
+};
+
+/** This allows parent FPs to implement a test create with known leaf children in order to avoid
+creating an unbounded FP tree which may overflow various shader limits. */
+std::unique_ptr<GrFragmentProcessor> MakeChildFP(GrProcessorTestData*);
+
+}
+
+/*
+ * GrProcessorTestData is an argument struct to TestCreate functions
+ * fTextures are valid textures that can optionally be used to construct
+ * TextureSampler. The first texture has config kSkia8888_GrPixelConfig and the second has
+ * kAlpha_8_GrPixelConfig. TestCreate functions are also free to create additional textures using
+ * the GrContext.
+ */
+struct GrProcessorTestData {
+ GrProcessorTestData(SkRandom* random,
+ GrContext* context,
+ const GrRenderTargetContext* renderTargetContext,
+ sk_sp<GrTextureProxy> proxies[2],
+ GrColorType proxyColorTypes[2])
+ : fRandom(random)
+ , fRenderTargetContext(renderTargetContext)
+ , fContext(context) {
+ SkASSERT(proxies[0] && proxies[1]);
+ fProxies[0] = proxies[0];
+ fProxies[1] = proxies[1];
+ fProxyColorTypes[0] = proxyColorTypes[0];
+ fProxyColorTypes[1] = proxyColorTypes[1];
+ }
+ SkRandom* fRandom;
+ const GrRenderTargetContext* fRenderTargetContext;
+
+ GrContext* context() { return fContext; }
+ GrResourceProvider* resourceProvider();
+ GrProxyProvider* proxyProvider();
+ const GrCaps* caps();
+ sk_sp<GrTextureProxy> textureProxy(int index) { return fProxies[index]; }
+ GrColorType textureProxyColorType(int index) { return fProxyColorTypes[index]; }
+
+private:
+ GrContext* fContext;
+ sk_sp<GrTextureProxy> fProxies[2];
+ GrColorType fProxyColorTypes[2];
+};
+
+class GrProcessor;
+class GrTexture;
+
+template <class ProcessorSmartPtr>
+class GrProcessorTestFactory : private SkNoncopyable {
+public:
+ using Processor = typename ProcessorSmartPtr::element_type;
+ using MakeProc = ProcessorSmartPtr (*)(GrProcessorTestData*);
+
+ GrProcessorTestFactory(MakeProc makeProc) {
+ fMakeProc = makeProc;
+ GetFactories()->push_back(this);
+ }
+
+ /** Pick a random factory function and create a processor. */
+ static ProcessorSmartPtr Make(GrProcessorTestData* data) {
+ VerifyFactoryCount();
+ if (GetFactories()->count() == 0) {
+ return nullptr;
+ }
+ uint32_t idx = data->fRandom->nextRangeU(0, GetFactories()->count() - 1);
+ return MakeIdx(idx, data);
+ }
+
+ /** Number of registered factory functions */
+ static int Count() { return GetFactories()->count(); }
+
+ /** Use factory function at Index idx to create a processor. */
+ static ProcessorSmartPtr MakeIdx(int idx, GrProcessorTestData* data) {
+ SkASSERT(idx < GetFactories()->count());
+ GrProcessorTestFactory<ProcessorSmartPtr>* factory = (*GetFactories())[idx];
+ ProcessorSmartPtr processor = factory->fMakeProc(data);
+ SkASSERT(processor);
+ return processor;
+ }
+
+private:
+ /**
+ * A test function which verifies the count of factories.
+ */
+ static void VerifyFactoryCount();
+
+ MakeProc fMakeProc;
+
+ static SkTArray<GrProcessorTestFactory<ProcessorSmartPtr>*, true>* GetFactories();
+};
+
+using GrFragmentProcessorTestFactory = GrProcessorTestFactory<std::unique_ptr<GrFragmentProcessor>>;
+using GrGeometryProcessorTestFactory = GrProcessorTestFactory<sk_sp<GrGeometryProcessor>>;
+
+class GrXPFactoryTestFactory : private SkNoncopyable {
+public:
+ using GetFn = const GrXPFactory*(GrProcessorTestData*);
+
+ GrXPFactoryTestFactory(GetFn* getProc) : fGetProc(getProc) { GetFactories()->push_back(this); }
+
+ static const GrXPFactory* Get(GrProcessorTestData* data) {
+ VerifyFactoryCount();
+ if (GetFactories()->count() == 0) {
+ return nullptr;
+ }
+ uint32_t idx = data->fRandom->nextRangeU(0, GetFactories()->count() - 1);
+ const GrXPFactory* xpf = (*GetFactories())[idx]->fGetProc(data);
+ SkASSERT(xpf);
+ return xpf;
+ }
+
+private:
+ static void VerifyFactoryCount();
+
+ GetFn* fGetProc;
+ static SkTArray<GrXPFactoryTestFactory*, true>* GetFactories();
+};
+
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+
+/** GrProcessor subclasses should insert this macro in their declaration to be included in the
+ * program generation unit test.
+ */
+#define GR_DECLARE_GEOMETRY_PROCESSOR_TEST \
+ static GrGeometryProcessorTestFactory gTestFactory SK_UNUSED; \
+ static sk_sp<GrGeometryProcessor> TestCreate(GrProcessorTestData*);
+
+#define GR_DECLARE_FRAGMENT_PROCESSOR_TEST \
+ static GrFragmentProcessorTestFactory gTestFactory SK_UNUSED; \
+ static std::unique_ptr<GrFragmentProcessor> TestCreate(GrProcessorTestData*);
+
+#define GR_DECLARE_XP_FACTORY_TEST \
+ static GrXPFactoryTestFactory gTestFactory SK_UNUSED; \
+ static const GrXPFactory* TestGet(GrProcessorTestData*);
+
+/** GrProcessor subclasses should insert this macro in their implementation file. They must then
+ * also implement this static function:
+ * GrProcessor* TestCreate(GrProcessorTestData*);
+ */
+#define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(Effect) \
+ GrFragmentProcessorTestFactory Effect::gTestFactory(Effect::TestCreate)
+
+#define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(Effect) \
+ GrGeometryProcessorTestFactory Effect::gTestFactory(Effect::TestCreate)
+
+#define GR_DEFINE_XP_FACTORY_TEST(Factory) \
+ GrXPFactoryTestFactory Factory::gTestFactory(Factory::TestGet)
+
+#else // !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+
+// The unit test relies on static initializers. Just declare the TestCreate function so that
+// its definitions will compile.
+#define GR_DECLARE_FRAGMENT_PROCESSOR_TEST \
+ static std::unique_ptr<GrFragmentProcessor> TestCreate(GrProcessorTestData*);
+#define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(X)
+
+// The unit test relies on static initializers. Just declare the TestCreate function so that
+// its definitions will compile.
+#define GR_DECLARE_GEOMETRY_PROCESSOR_TEST \
+ static sk_sp<GrGeometryProcessor> TestCreate(GrProcessorTestData*);
+#define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(X)
+
+// The unit test relies on static initializers. Just declare the TestGet function so that
+// its definitions will compile.
+#define GR_DECLARE_XP_FACTORY_TEST \
+ const GrXPFactory* TestGet(GrProcessorTestData*);
+#define GR_DEFINE_XP_FACTORY_TEST(X)
+
+#endif // !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+#else // GR_TEST_UTILS
+ #define GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+ #define GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ #define GR_DECLARE_XP_FACTORY_TEST
+ #define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(...)
+ #define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(...)
+ #define GR_DEFINE_XP_FACTORY_TEST(...)
+ #define GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ #define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(...)
+ #define GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+ #define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(...)
+ #define GR_DECLARE_XP_FACTORY_TEST
+ #define GR_DEFINE_XP_FACTORY_TEST(...)
+#endif // GR_TEST_UTILS
+#endif // GrProcessorUnitTest_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrProgramDesc.cpp b/gfx/skia/skia/src/gpu/GrProgramDesc.cpp
new file mode 100644
index 0000000000..9d42d694a5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramDesc.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrProgramDesc.h"
+
+#include "include/private/SkChecksum.h"
+#include "include/private/SkTo.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrPrimitiveProcessor.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+enum {
+ kSamplerOrImageTypeKeyBits = 4
+};
+
+static inline uint16_t texture_type_key(GrTextureType type) {
+ int value = UINT16_MAX;
+ switch (type) {
+ case GrTextureType::k2D:
+ value = 0;
+ break;
+ case GrTextureType::kExternal:
+ value = 1;
+ break;
+ case GrTextureType::kRectangle:
+ value = 2;
+ break;
+ default:
+ SK_ABORT("Unexpected texture type");
+ value = 3;
+ break;
+ }
+ SkASSERT((value & ((1 << kSamplerOrImageTypeKeyBits) - 1)) == value);
+ return SkToU16(value);
+}
+
+static uint32_t sampler_key(GrTextureType textureType, const GrSwizzle& swizzle,
+ const GrShaderCaps& caps) {
+ int samplerTypeKey = texture_type_key(textureType);
+
+ GR_STATIC_ASSERT(2 == sizeof(swizzle.asKey()));
+ uint16_t swizzleKey = 0;
+ if (caps.textureSwizzleAppliedInShader()) {
+ swizzleKey = swizzle.asKey();
+ }
+ return SkToU32(samplerTypeKey | swizzleKey << kSamplerOrImageTypeKeyBits);
+}
+
+static void add_sampler_keys(GrProcessorKeyBuilder* b, const GrFragmentProcessor& fp,
+ GrGpu* gpu, const GrShaderCaps& caps) {
+ int numTextureSamplers = fp.numTextureSamplers();
+ if (!numTextureSamplers) {
+ return;
+ }
+ for (int i = 0; i < numTextureSamplers; ++i) {
+ const GrFragmentProcessor::TextureSampler& sampler = fp.textureSampler(i);
+ const GrTexture* tex = sampler.peekTexture();
+ uint32_t samplerKey = sampler_key(
+ tex->texturePriv().textureType(), sampler.swizzle(), caps);
+ uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram(
+ sampler.samplerState(), sampler.proxy()->backendFormat());
+ if (extraSamplerKey) {
+ // We first mark the normal sampler key with last bit to flag that it has an extra
+ // sampler key. We then add both keys.
+ SkASSERT((samplerKey & (1 << 31)) == 0);
+ b->add32(samplerKey | (1 << 31));
+ b->add32(extraSamplerKey);
+ } else {
+ b->add32(samplerKey);
+ }
+ }
+}
+
+static void add_sampler_keys(GrProcessorKeyBuilder* b, const GrPrimitiveProcessor& pp,
+ const GrShaderCaps& caps) {
+ int numTextureSamplers = pp.numTextureSamplers();
+ if (!numTextureSamplers) {
+ return;
+ }
+ for (int i = 0; i < numTextureSamplers; ++i) {
+ const GrPrimitiveProcessor::TextureSampler& sampler = pp.textureSampler(i);
+ uint32_t samplerKey = sampler_key(
+ sampler.textureType(), sampler.swizzle(), caps);
+ uint32_t extraSamplerKey = sampler.extraSamplerKey();
+ if (extraSamplerKey) {
+ // We first mark the normal sampler key with last bit to flag that it has an extra
+ // sampler key. We then add both keys.
+ SkASSERT((samplerKey & (1 << 31)) == 0);
+ b->add32(samplerKey | (1 << 31));
+ b->add32(extraSamplerKey);
+ } else {
+ b->add32(samplerKey);
+ }
+ }
+}
+
+/**
+ * A function which emits a meta key into the key builder. This is required because shader code may
+ * be dependent on properties of the effect that the effect itself doesn't use
+ * in its key (e.g. the pixel format of textures used). So we create a meta-key for
+ * every effect using this function. It is also responsible for inserting the effect's class ID
+ * which must be different for every GrProcessor subclass. It can fail if an effect uses too many
+ * transforms, etc, for the space allotted in the meta-key. NOTE, both FPs and GPs share this
+ * function because it is hairy, though FPs do not have attribs, and GPs do not have transforms
+ */
+static bool gen_meta_key(const GrFragmentProcessor& fp,
+ GrGpu* gpu,
+ const GrShaderCaps& shaderCaps,
+ uint32_t transformKey,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = fp.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)UINT16_MAX);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ add_sampler_keys(b, fp, gpu, shaderCaps);
+
+ uint32_t* key = b->add32n(2);
+ key[0] = (classID << 16) | SkToU32(processorKeySize);
+ key[1] = transformKey;
+ return true;
+}
+
+static bool gen_meta_key(const GrPrimitiveProcessor& pp,
+ const GrShaderCaps& shaderCaps,
+ uint32_t transformKey,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = pp.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)UINT16_MAX);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ add_sampler_keys(b, pp, shaderCaps);
+
+ uint32_t* key = b->add32n(2);
+ key[0] = (classID << 16) | SkToU32(processorKeySize);
+ key[1] = transformKey;
+ return true;
+}
+
+static bool gen_meta_key(const GrXferProcessor& xp,
+ const GrShaderCaps& shaderCaps,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = xp.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)UINT16_MAX);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ b->add32((classID << 16) | SkToU32(processorKeySize));
+ return true;
+}
+
+static bool gen_frag_proc_and_meta_keys(const GrPrimitiveProcessor& primProc,
+ const GrFragmentProcessor& fp,
+ GrGpu* gpu,
+ const GrShaderCaps& shaderCaps,
+ GrProcessorKeyBuilder* b) {
+ for (int i = 0; i < fp.numChildProcessors(); ++i) {
+ if (!gen_frag_proc_and_meta_keys(primProc, fp.childProcessor(i), gpu, shaderCaps, b)) {
+ return false;
+ }
+ }
+
+ fp.getGLSLProcessorKey(shaderCaps, b);
+
+ return gen_meta_key(fp, gpu, shaderCaps, primProc.getTransformKey(fp.coordTransforms(),
+ fp.numCoordTransforms()), b);
+}
+
+bool GrProgramDesc::Build(GrProgramDesc* desc, const GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo, GrPrimitiveType primitiveType,
+ GrGpu* gpu) {
+ // The descriptor is used as a cache key. Thus when a field of the
+ // descriptor will not affect program generation (because of the attribute
+ // bindings in use or other descriptor field settings) it should be set
+ // to a canonical value to avoid duplicate programs with different keys.
+
+ const GrShaderCaps& shaderCaps = *gpu->caps()->shaderCaps();
+
+ GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t));
+ // Make room for everything up to the effect keys.
+ desc->key().reset();
+ desc->key().push_back_n(kProcessorKeysOffset);
+
+ GrProcessorKeyBuilder b(&desc->key());
+
+ programInfo.primProc().getGLSLProcessorKey(shaderCaps, &b);
+ programInfo.primProc().getAttributeKey(&b);
+ if (!gen_meta_key(programInfo.primProc(), shaderCaps, 0, &b)) {
+ desc->key().reset();
+ return false;
+ }
+
+ for (int i = 0; i < programInfo.pipeline().numFragmentProcessors(); ++i) {
+ const GrFragmentProcessor& fp = programInfo.pipeline().getFragmentProcessor(i);
+ if (!gen_frag_proc_and_meta_keys(programInfo.primProc(), fp, gpu, shaderCaps, &b)) {
+ desc->key().reset();
+ return false;
+ }
+ }
+
+ const GrXferProcessor& xp = programInfo.pipeline().getXferProcessor();
+ const GrSurfaceOrigin* originIfDstTexture = nullptr;
+ GrSurfaceOrigin origin;
+ if (programInfo.pipeline().dstTextureProxy()) {
+ origin = programInfo.pipeline().dstTextureProxy()->origin();
+ originIfDstTexture = &origin;
+ }
+ xp.getGLSLProcessorKey(shaderCaps, &b, originIfDstTexture);
+ if (!gen_meta_key(xp, shaderCaps, &b)) {
+ desc->key().reset();
+ return false;
+ }
+
+ if (programInfo.requestedFeatures() & GrProcessor::CustomFeatures::kSampleLocations) {
+ SkASSERT(programInfo.pipeline().isHWAntialiasState());
+ b.add32(renderTarget->renderTargetPriv().getSamplePatternKey());
+ }
+
+ // --------DO NOT MOVE HEADER ABOVE THIS LINE--------------------------------------------------
+ // Because header is a pointer into the dynamic array, we can't push any new data into the key
+ // below here.
+ KeyHeader* header = desc->atOffset<KeyHeader, kHeaderOffset>();
+
+ // make sure any padding in the header is zeroed.
+ memset(header, 0, kHeaderSize);
+ header->fOutputSwizzle = programInfo.pipeline().outputSwizzle().asKey();
+ header->fColorFragmentProcessorCnt = programInfo.pipeline().numColorFragmentProcessors();
+ header->fCoverageFragmentProcessorCnt = programInfo.pipeline().numCoverageFragmentProcessors();
+ // Fail if the client requested more processors than the key can fit.
+ if (header->fColorFragmentProcessorCnt != programInfo.pipeline().numColorFragmentProcessors() ||
+ header->fCoverageFragmentProcessorCnt !=
+ programInfo.pipeline().numCoverageFragmentProcessors()) {
+ return false;
+ }
+ // If we knew the shader won't depend on origin, we could skip this (and use the same program
+ // for both origins). Instrumenting all fragment processors would be difficult and error prone.
+ header->fSurfaceOriginKey =
+ GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(programInfo.origin());
+ header->fProcessorFeatures = (uint8_t)programInfo.requestedFeatures();
+ // Ensure enough bits.
+ SkASSERT(header->fProcessorFeatures == (int) programInfo.requestedFeatures());
+ header->fSnapVerticesToPixelCenters = programInfo.pipeline().snapVerticesToPixelCenters();
+ header->fHasPointSize = (primitiveType == GrPrimitiveType::kPoints);
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrProgramDesc.h b/gfx/skia/skia/src/gpu/GrProgramDesc.h
new file mode 100644
index 0000000000..391bfe1a80
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramDesc.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProgramDesc_DEFINED
+#define GrProgramDesc_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkOpts.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrProgramInfo;
+class GrShaderCaps;
+
+/** This class describes a program to generate. It also serves as a program cache key */
+class GrProgramDesc {
+public:
+ // Creates an uninitialized key that must be populated by GrGpu::buildProgramDesc()
+ GrProgramDesc() {}
+
+ /**
+ * Builds a program descriptor. Before the descriptor can be used, the client must call finalize
+ * on the filled in GrProgramDesc.
+ *
+ * @param desc The built and finalized descriptor
+ * @param renderTarget The target of the draw
+ * @param programInfo Program information need to build the key
+ * @param primitiveType Controls whether the shader will output a point size.
+ * @param gpu Pointer to the GrGpu object the program will be used with.
+ **/
+ static bool Build(GrProgramDesc*, const GrRenderTarget*, const GrProgramInfo&,
+ GrPrimitiveType, GrGpu*);
+
+ // This is strictly an OpenGL call since the other backends have additional data in their
+ // keys
+ static bool BuildFromData(GrProgramDesc* desc, const void* keyData, size_t keyLength) {
+ if (!SkTFitsIn<int>(keyLength)) {
+ return false;
+ }
+ desc->fKey.reset(SkToInt(keyLength));
+ memcpy(desc->fKey.begin(), keyData, keyLength);
+ return true;
+ }
+
+ // Returns this as a uint32_t array to be used as a key in the program cache.
+ const uint32_t* asKey() const {
+ return reinterpret_cast<const uint32_t*>(fKey.begin());
+ }
+
+ // Gets the number of bytes in asKey(). It will be a 4-byte aligned value.
+ uint32_t keyLength() const {
+ SkASSERT(0 == (fKey.count() % 4));
+ return fKey.count();
+ }
+
+ GrProgramDesc& operator= (const GrProgramDesc& other) {
+ uint32_t keyLength = other.keyLength();
+ fKey.reset(SkToInt(keyLength));
+ memcpy(fKey.begin(), other.fKey.begin(), keyLength);
+ return *this;
+ }
+
+ bool operator== (const GrProgramDesc& that) const {
+ if (this->keyLength() != that.keyLength()) {
+ return false;
+ }
+
+ SkASSERT(SkIsAlign4(this->keyLength()));
+ int l = this->keyLength() >> 2;
+ const uint32_t* aKey = this->asKey();
+ const uint32_t* bKey = that.asKey();
+ for (int i = 0; i < l; ++i) {
+ if (aKey[i] != bKey[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!= (const GrProgramDesc& other) const {
+ return !(*this == other);
+ }
+
+ // TODO: remove this use of the header
+ bool hasPointSize() const { return this->header().fHasPointSize; }
+
+protected:
+ struct KeyHeader {
+ // Set to uniquely identify any swizzling of the shader's output color(s).
+ uint16_t fOutputSwizzle;
+ uint8_t fColorFragmentProcessorCnt; // Can be packed into 4 bits if required.
+ uint8_t fCoverageFragmentProcessorCnt;
+ // Set to uniquely identify the rt's origin, or 0 if the shader does not require this info.
+ uint8_t fSurfaceOriginKey : 2;
+ uint8_t fProcessorFeatures : 1;
+ bool fSnapVerticesToPixelCenters : 1;
+ bool fHasPointSize : 1;
+ uint8_t fPad : 3;
+ };
+ GR_STATIC_ASSERT(sizeof(KeyHeader) == 6);
+
+ const KeyHeader& header() const { return *this->atOffset<KeyHeader, kHeaderOffset>(); }
+
+ template<typename T, size_t OFFSET> T* atOffset() {
+ return reinterpret_cast<T*>(reinterpret_cast<intptr_t>(fKey.begin()) + OFFSET);
+ }
+
+ template<typename T, size_t OFFSET> const T* atOffset() const {
+ return reinterpret_cast<const T*>(reinterpret_cast<intptr_t>(fKey.begin()) + OFFSET);
+ }
+
+ // The key, stored in fKey, is composed of two parts:
+ // 1. Header struct defined above.
+ // 2. A Backend specific payload which includes the per-processor keys.
+ enum KeyOffsets {
+ kHeaderOffset = 0,
+ kHeaderSize = SkAlign4(sizeof(KeyHeader)),
+ // Part 4.
+ // This is the offset into the backenend specific part of the key, which includes
+ // per-processor keys.
+ kProcessorKeysOffset = kHeaderOffset + kHeaderSize,
+ };
+
+ enum {
+ kMaxPreallocProcessors = 8,
+ kIntsPerProcessor = 4, // This is an overestimate of the average effect key size.
+ kPreAllocSize = kHeaderOffset + kHeaderSize +
+ kMaxPreallocProcessors * sizeof(uint32_t) * kIntsPerProcessor,
+ };
+
+ SkSTArray<kPreAllocSize, uint8_t, true>& key() { return fKey; }
+ const SkSTArray<kPreAllocSize, uint8_t, true>& key() const { return fKey; }
+
+private:
+ SkSTArray<kPreAllocSize, uint8_t, true> fKey;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProgramInfo.cpp b/gfx/skia/skia/src/gpu/GrProgramInfo.cpp
new file mode 100644
index 0000000000..3391b61c27
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramInfo.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrProgramInfo.h"
+
+
+#ifdef SK_DEBUG
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrTexturePriv.h"
+
+void GrProgramInfo::validate() const {
+ SkASSERT(!fPipeline.isBad());
+
+ if (this->hasDynamicPrimProcTextures()) {
+ SkASSERT(!this->hasFixedPrimProcTextures());
+ SkASSERT(fPrimProc.numTextureSamplers());
+ } else if (this->hasFixedPrimProcTextures()) {
+ SkASSERT(fPrimProc.numTextureSamplers());
+ } else {
+ SkASSERT(!fPrimProc.numTextureSamplers());
+ }
+
+ SkASSERT(!fPipeline.isScissorEnabled() || this->hasFixedScissor() ||
+ this->hasDynamicScissors());
+
+ if (this->hasDynamicPrimProcTextures()) {
+ // Check that, for a given sampler, the properties of the dynamic textures remain
+ // the same for all the meshes
+ for (int s = 0; s < this->primProc().numTextureSamplers(); ++s) {
+ auto dynamicPrimProcTextures = this->dynamicPrimProcTextures(0);
+
+ const GrBackendFormat& format = dynamicPrimProcTextures[s]->backendFormat();
+ GrTextureType type = dynamicPrimProcTextures[s]->textureType();
+ GrPixelConfig config = dynamicPrimProcTextures[s]->config();
+
+ for (int m = 1; m < fNumDynamicStateArrays; ++m) {
+ dynamicPrimProcTextures = this->dynamicPrimProcTextures(m);
+
+ auto testProxy = dynamicPrimProcTextures[s];
+ SkASSERT(testProxy->backendFormat() == format);
+ SkASSERT(testProxy->textureType() == type);
+ SkASSERT(testProxy->config() == config);
+ }
+ }
+ }
+}
+
+void GrProgramInfo::checkAllInstantiated() const {
+ if (this->hasFixedPrimProcTextures()) {
+ auto fixedPrimProcTextures = this->fixedPrimProcTextures();
+ for (int s = 0; s < this->primProc().numTextureSamplers(); ++s) {
+ SkASSERT(fixedPrimProcTextures[s]->isInstantiated());
+ }
+ }
+
+ if (this->hasDynamicPrimProcTextures()) {
+ for (int m = 0; m < fNumDynamicStateArrays; ++m) {
+ auto dynamicPrimProcTextures = this->dynamicPrimProcTextures(m);
+ for (int s = 0; s < this->primProc().numTextureSamplers(); ++s) {
+ SkASSERT(dynamicPrimProcTextures[s]->isInstantiated());
+ }
+ }
+ }
+}
+
+void GrProgramInfo::checkMSAAAndMIPSAreResolved() const {
+
+ auto assertResolved = [](GrTexture* tex, const GrSamplerState& sampler) {
+ SkASSERT(tex);
+
+ // Ensure mipmaps were all resolved ahead of time by the DAG.
+ if (GrSamplerState::Filter::kMipMap == sampler.filter() &&
+ (tex->width() != 1 || tex->height() != 1)) {
+ // There are some cases where we might be given a non-mipmapped texture with a mipmap
+ // filter. See skbug.com/7094.
+ SkASSERT(tex->texturePriv().mipMapped() != GrMipMapped::kYes ||
+ !tex->texturePriv().mipMapsAreDirty());
+ }
+ };
+
+ if (this->hasDynamicPrimProcTextures()) {
+ for (int m = 0; m < fNumDynamicStateArrays; ++m) {
+ auto dynamicPrimProcTextures = this->dynamicPrimProcTextures(m);
+
+ for (int s = 0; s < this->primProc().numTextureSamplers(); ++s) {
+ auto* tex = dynamicPrimProcTextures[s]->peekTexture();
+ assertResolved(tex, this->primProc().textureSampler(s).samplerState());
+ }
+ }
+ } else if (this->hasFixedPrimProcTextures()) {
+ auto fixedPrimProcTextures = this->fixedPrimProcTextures();
+
+ for (int s = 0; s < this->primProc().numTextureSamplers(); ++s) {
+ auto* tex = fixedPrimProcTextures[s]->peekTexture();
+ assertResolved(tex, this->primProc().textureSampler(s).samplerState());
+ }
+ }
+
+ GrFragmentProcessor::Iter iter(this->pipeline());
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ for (int s = 0; s < fp->numTextureSamplers(); ++s) {
+ const auto& textureSampler = fp->textureSampler(s);
+ assertResolved(textureSampler.peekTexture(), textureSampler.samplerState());
+ }
+ }
+}
+
+void GrProgramInfo::compatibleWithMeshes(const GrMesh meshes[], int meshCount) const {
+ SkASSERT(!fNumDynamicStateArrays || meshCount == fNumDynamicStateArrays);
+
+ for (int i = 0; i < meshCount; ++i) {
+ SkASSERT(fPrimProc.hasVertexAttributes() == meshes[i].hasVertexData());
+ SkASSERT(fPrimProc.hasInstanceAttributes() == meshes[i].hasInstanceData());
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProgramInfo.h b/gfx/skia/skia/src/gpu/GrProgramInfo.h
new file mode 100644
index 0000000000..a4c46e6749
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramInfo.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProgramInfo_DEFINED
+#define GrProgramInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrPrimitiveProcessor.h"
+
+class GrMesh;
+
+class GrProgramInfo {
+public:
+ GrProgramInfo(int numSamples,
+ GrSurfaceOrigin origin,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrPipeline::DynamicStateArrays* dynamicStateArrays,
+ int numDynamicStateArrays)
+ : fNumSamples(numSamples)
+ , fOrigin(origin)
+ , fPipeline(pipeline)
+ , fPrimProc(primProc)
+ , fFixedDynamicState(fixedDynamicState)
+ , fDynamicStateArrays(dynamicStateArrays)
+ , fNumDynamicStateArrays(numDynamicStateArrays) {
+ fRequestedFeatures = fPrimProc.requestedFeatures();
+ for (int i = 0; i < fPipeline.numFragmentProcessors(); ++i) {
+ fRequestedFeatures |= fPipeline.getFragmentProcessor(i).requestedFeatures();
+ }
+ fRequestedFeatures |= fPipeline.getXferProcessor().requestedFeatures();
+
+ SkDEBUGCODE(this->validate();)
+ (void) fNumDynamicStateArrays; // touch this to quiet unused member warnings
+ }
+
+ GrProcessor::CustomFeatures requestedFeatures() const { return fRequestedFeatures; }
+
+ int numSamples() const { return fNumSamples; }
+ GrSurfaceOrigin origin() const { return fOrigin; }
+ const GrPipeline& pipeline() const { return fPipeline; }
+ const GrPrimitiveProcessor& primProc() const { return fPrimProc; }
+ const GrPipeline::FixedDynamicState* fixedDynamicState() const { return fFixedDynamicState; }
+
+ bool hasDynamicScissors() const {
+ return fPipeline.isScissorEnabled() &&
+ fDynamicStateArrays && fDynamicStateArrays->fScissorRects;
+ }
+
+ const SkIRect& dynamicScissor(int i) const {
+ SkASSERT(this->hasDynamicScissors());
+
+ return fDynamicStateArrays->fScissorRects[i];
+ }
+
+ bool hasFixedScissor() const { return fPipeline.isScissorEnabled() && fFixedDynamicState; }
+
+ const SkIRect& fixedScissor() const {
+ SkASSERT(this->hasFixedScissor());
+
+ return fFixedDynamicState->fScissorRect;
+ }
+
+ bool hasDynamicPrimProcTextures() const {
+ return fDynamicStateArrays && fDynamicStateArrays->fPrimitiveProcessorTextures;
+ }
+
+ const GrTextureProxy* const* dynamicPrimProcTextures(int i) const {
+ SkASSERT(this->hasDynamicPrimProcTextures());
+ SkASSERT(i < fNumDynamicStateArrays);
+
+ return fDynamicStateArrays->fPrimitiveProcessorTextures +
+ i * fPrimProc.numTextureSamplers();
+ }
+
+ bool hasFixedPrimProcTextures() const {
+ return fFixedDynamicState && fFixedDynamicState->fPrimitiveProcessorTextures;
+ }
+
+ const GrTextureProxy* const* fixedPrimProcTextures() const {
+ SkASSERT(this->hasFixedPrimProcTextures());
+
+ return fFixedDynamicState->fPrimitiveProcessorTextures;
+ }
+
+#ifdef SK_DEBUG
+ void validate() const;
+ void checkAllInstantiated() const;
+ void checkMSAAAndMIPSAreResolved() const;
+ void compatibleWithMeshes(const GrMesh meshes[], int meshCount) const;
+
+ bool isNVPR() const {
+ return fPrimProc.isPathRendering() && !fPrimProc.willUseGeoShader() &&
+ !fPrimProc.numVertexAttributes() && !fPrimProc.numInstanceAttributes();
+ }
+#endif
+
+private:
+ const int fNumSamples;
+ const GrSurfaceOrigin fOrigin;
+ const GrPipeline& fPipeline;
+ const GrPrimitiveProcessor& fPrimProc;
+ const GrPipeline::FixedDynamicState* fFixedDynamicState;
+ const GrPipeline::DynamicStateArrays* fDynamicStateArrays;
+ const int fNumDynamicStateArrays;
+ GrProcessor::CustomFeatures fRequestedFeatures;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProxyProvider.cpp b/gfx/skia/skia/src/gpu/GrProxyProvider.cpp
new file mode 100644
index 0000000000..2c7153ae7e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProxyProvider.cpp
@@ -0,0 +1,958 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrProxyProvider.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImage.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrImageContext.h"
+#include "include/private/GrResourceKey.h"
+#include "include/private/GrSingleOwner.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrImageContextPriv.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureProxyCacheAccess.h"
+#include "src/gpu/GrTextureRenderTargetProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/image/SkImage_Base.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fImageContext->priv().singleOwner());)
+
+GrProxyProvider::GrProxyProvider(GrImageContext* imageContext) : fImageContext(imageContext) {}
+
+GrProxyProvider::~GrProxyProvider() {
+ if (this->renderingDirectly()) {
+ // In DDL-mode a proxy provider can still have extant uniquely keyed proxies (since
+ // they need their unique keys to, potentially, find a cached resource when the
+ // DDL is played) but, in non-DDL-mode they should all have been cleaned up by this point.
+ SkASSERT(!fUniquelyKeyedProxies.count());
+ }
+}
+
+bool GrProxyProvider::assignUniqueKeyToProxy(const GrUniqueKey& key, GrTextureProxy* proxy) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(key.isValid());
+ if (this->isAbandoned() || !proxy) {
+ return false;
+ }
+
+#ifdef SK_DEBUG
+ {
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (direct) {
+ GrResourceCache* resourceCache = direct->priv().getResourceCache();
+ // If there is already a GrResource with this key then the caller has violated the
+ // normal usage pattern of uniquely keyed resources (e.g., they have created one w/o
+ // first seeing if it already existed in the cache).
+ SkASSERT(!resourceCache->findAndRefUniqueResource(key));
+ }
+ }
+#endif
+
+ SkASSERT(!fUniquelyKeyedProxies.find(key)); // multiple proxies can't get the same key
+
+ proxy->cacheAccess().setUniqueKey(this, key);
+ SkASSERT(proxy->getUniqueKey() == key);
+ fUniquelyKeyedProxies.add(proxy);
+ return true;
+}
+
+void GrProxyProvider::adoptUniqueKeyFromSurface(GrTextureProxy* proxy, const GrSurface* surf) {
+ SkASSERT(surf->getUniqueKey().isValid());
+ proxy->cacheAccess().setUniqueKey(this, surf->getUniqueKey());
+ SkASSERT(proxy->getUniqueKey() == surf->getUniqueKey());
+ // multiple proxies can't get the same key
+ SkASSERT(!fUniquelyKeyedProxies.find(surf->getUniqueKey()));
+ fUniquelyKeyedProxies.add(proxy);
+}
+
+void GrProxyProvider::removeUniqueKeyFromProxy(GrTextureProxy* proxy) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(proxy);
+ SkASSERT(proxy->getUniqueKey().isValid());
+
+ if (this->isAbandoned()) {
+ return;
+ }
+
+ this->processInvalidUniqueKey(proxy->getUniqueKey(), proxy, InvalidateGPUResource::kYes);
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::findProxyByUniqueKey(const GrUniqueKey& key,
+ GrSurfaceOrigin origin) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ GrTextureProxy* proxy = fUniquelyKeyedProxies.find(key);
+ if (proxy) {
+ SkASSERT(proxy->refCnt() >= 1);
+ SkASSERT(proxy->origin() == origin);
+ return sk_ref_sp(proxy);
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+sk_sp<GrTextureProxy> GrProxyProvider::testingOnly_createInstantiatedProxy(
+ const SkISize& size,
+ GrColorType colorType,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ if (this->caps()->isFormatCompressed(format)) {
+ // TODO: Allow this to go to GrResourceProvider::createCompressedTexture() once we no longer
+ // rely on GrColorType to get to GrPixelConfig. Currently this will cause
+ // makeConfigSpecific() to assert because GrColorTypeToPixelConfig() never returns a
+ // compressed GrPixelConfig.
+ return nullptr;
+ }
+ GrSurfaceDesc desc;
+ desc.fConfig = GrColorTypeToPixelConfig(colorType);
+ desc.fConfig = this->caps()->makeConfigSpecific(desc.fConfig, format);
+ desc.fWidth = size.width();
+ desc.fHeight = size.height();
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+ sk_sp<GrTexture> tex;
+
+ if (SkBackingFit::kApprox == fit) {
+ tex = resourceProvider->createApproxTexture(desc, format, renderable, renderTargetSampleCnt,
+ isProtected);
+ } else {
+ tex = resourceProvider->createTexture(desc, format, renderable, renderTargetSampleCnt,
+ GrMipMapped::kNo, budgeted, isProtected);
+ }
+ if (!tex) {
+ return nullptr;
+ }
+
+ return this->createWrapped(std::move(tex), colorType, origin, UseAllocator::kYes);
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::testingOnly_createInstantiatedProxy(
+ const SkISize& size,
+ GrColorType colorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ auto format = this->caps()->getDefaultBackendFormat(colorType, renderable);
+ return this->testingOnly_createInstantiatedProxy(size,
+ colorType,
+ format,
+ renderable,
+ renderTargetSampleCnt,
+ origin,
+ fit,
+ budgeted,
+ isProtected);
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::testingOnly_createWrapped(sk_sp<GrTexture> tex,
+ GrColorType colorType,
+ GrSurfaceOrigin origin) {
+ return this->createWrapped(std::move(tex), colorType, origin, UseAllocator::kYes);
+}
+#endif
+
+sk_sp<GrTextureProxy> GrProxyProvider::createWrapped(sk_sp<GrTexture> tex,
+ GrColorType colorType,
+ GrSurfaceOrigin origin,
+ UseAllocator useAllocator) {
+#ifdef SK_DEBUG
+ if (tex->getUniqueKey().isValid()) {
+ SkASSERT(!this->findProxyByUniqueKey(tex->getUniqueKey(), origin));
+ }
+#endif
+ GrSwizzle texSwizzle = this->caps()->getTextureSwizzle(tex->backendFormat(), colorType);
+
+ if (tex->asRenderTarget()) {
+ GrSwizzle outSwizzle = this->caps()->getOutputSwizzle(tex->backendFormat(), colorType);
+ return sk_sp<GrTextureProxy>(new GrTextureRenderTargetProxy(
+ std::move(tex), origin, texSwizzle, outSwizzle, useAllocator));
+ } else {
+ return sk_sp<GrTextureProxy>(
+ new GrTextureProxy(std::move(tex), origin, texSwizzle, useAllocator));
+ }
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::findOrCreateProxyByUniqueKey(const GrUniqueKey& key,
+ GrColorType colorType,
+ GrSurfaceOrigin origin,
+ UseAllocator useAllocator) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> result = this->findProxyByUniqueKey(key, origin);
+ if (result) {
+ return result;
+ }
+
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ GrResourceCache* resourceCache = direct->priv().getResourceCache();
+
+ GrGpuResource* resource = resourceCache->findAndRefUniqueResource(key);
+ if (!resource) {
+ return nullptr;
+ }
+
+ sk_sp<GrTexture> texture(static_cast<GrSurface*>(resource)->asTexture());
+ SkASSERT(texture);
+
+ result = this->createWrapped(std::move(texture), colorType, origin, useAllocator);
+ SkASSERT(result->getUniqueKey() == key);
+ // createWrapped should've added this for us
+ SkASSERT(fUniquelyKeyedProxies.find(key));
+ SkASSERT(result->textureSwizzle() ==
+ this->caps()->getTextureSwizzle(result->backendFormat(), colorType));
+ return result;
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::createTextureProxy(sk_sp<SkImage> srcImage,
+ int sampleCnt,
+ SkBudgeted budgeted,
+ SkBackingFit fit,
+ GrInternalSurfaceFlags surfaceFlags) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(srcImage);
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ const SkImageInfo& info = srcImage->imageInfo();
+ GrColorType ct = SkColorTypeToGrColorType(info.colorType());
+
+ GrBackendFormat format = this->caps()->getDefaultBackendFormat(ct, GrRenderable::kNo);
+
+ if (!format.isValid()) {
+ SkBitmap copy8888;
+ if (!copy8888.tryAllocPixels(info.makeColorType(kRGBA_8888_SkColorType)) ||
+ !srcImage->readPixels(copy8888.pixmap(), 0, 0)) {
+ return nullptr;
+ }
+ copy8888.setImmutable();
+ srcImage = SkMakeImageFromRasterBitmap(copy8888, kNever_SkCopyPixelsMode);
+ ct = GrColorType::kRGBA_8888;
+ format = this->caps()->getDefaultBackendFormat(ct, GrRenderable::kNo);
+ if (!format.isValid()) {
+ return nullptr;
+ }
+ }
+
+ GrPixelConfig config = GrColorTypeToPixelConfig(ct);
+ if (kUnknown_GrPixelConfig == config) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = srcImage->width();
+ desc.fHeight = srcImage->height();
+ desc.fConfig = config;
+
+ sk_sp<GrTextureProxy> proxy = this->createLazyProxy(
+ [desc, format, sampleCnt, budgeted, srcImage, fit,
+ ct](GrResourceProvider* resourceProvider) {
+ SkPixmap pixMap;
+ SkAssertResult(srcImage->peekPixels(&pixMap));
+ GrMipLevel mipLevel = { pixMap.addr(), pixMap.rowBytes() };
+
+ return LazyCallbackResult(resourceProvider->createTexture(
+ desc, format, ct, GrRenderable::kNo, sampleCnt, budgeted, fit,
+ GrProtected::kNo, mipLevel));
+ },
+ format, desc, GrRenderable::kNo, sampleCnt, kTopLeft_GrSurfaceOrigin, GrMipMapped::kNo,
+ GrMipMapsStatus::kNotAllocated, surfaceFlags, fit, budgeted, GrProtected::kNo,
+ UseAllocator::kYes);
+
+ if (!proxy) {
+ return nullptr;
+ }
+
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (direct) {
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ // In order to reuse code we always create a lazy proxy. When we aren't in DDL mode however
+ // we're better off instantiating the proxy immediately here.
+ if (!proxy->priv().doLazyInstantiation(resourceProvider)) {
+ return nullptr;
+ }
+ }
+
+ SkASSERT(proxy->width() == desc.fWidth);
+ SkASSERT(proxy->height() == desc.fHeight);
+ return proxy;
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::createProxyFromBitmap(const SkBitmap& bitmap,
+ GrMipMapped mipMapped) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(GrMipMapped::kNo == mipMapped || this->caps()->mipMapSupport());
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ if (!SkImageInfoIsValid(bitmap.info())) {
+ return nullptr;
+ }
+
+ ATRACE_ANDROID_FRAMEWORK("Upload %sTexture [%ux%u]",
+ GrMipMapped::kYes == mipMapped ? "MipMap " : "",
+ bitmap.width(), bitmap.height());
+
+ // In non-ddl we will always instantiate right away. Thus we never want to copy the SkBitmap
+ // even if its mutable. In ddl, if the bitmap is mutable then we must make a copy since the
+ // upload of the data to the gpu can happen at anytime and the bitmap may change by then.
+ SkCopyPixelsMode copyMode = this->renderingDirectly() ? kNever_SkCopyPixelsMode
+ : kIfMutable_SkCopyPixelsMode;
+ sk_sp<SkImage> baseLevel = SkMakeImageFromRasterBitmap(bitmap, copyMode);
+ if (!baseLevel) {
+ return nullptr;
+ }
+
+ // If mips weren't requested (or this was too small to have any), then take the fast path
+ if (GrMipMapped::kNo == mipMapped ||
+ 0 == SkMipMap::ComputeLevelCount(baseLevel->width(), baseLevel->height())) {
+ return this->createTextureProxy(std::move(baseLevel), 1, SkBudgeted::kYes,
+ SkBackingFit::kExact);
+ }
+
+ GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap.info());
+
+ GrBackendFormat format = this->caps()->getDefaultBackendFormat(
+ SkColorTypeToGrColorType(bitmap.info().colorType()), GrRenderable::kNo);
+ if (!format.isValid()) {
+ SkBitmap copy8888;
+ if (!copy8888.tryAllocPixels(bitmap.info().makeColorType(kRGBA_8888_SkColorType)) ||
+ !bitmap.readPixels(copy8888.pixmap())) {
+ return nullptr;
+ }
+ copy8888.setImmutable();
+ baseLevel = SkMakeImageFromRasterBitmap(copy8888, kNever_SkCopyPixelsMode);
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ format = this->caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888, GrRenderable::kNo);
+ if (!format.isValid()) {
+ return nullptr;
+ }
+ }
+
+ SkPixmap pixmap;
+ SkAssertResult(baseLevel->peekPixels(&pixmap));
+ sk_sp<SkMipMap> mipmaps(SkMipMap::Build(pixmap, nullptr));
+ if (!mipmaps) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = this->createLazyProxy(
+ [desc, format, baseLevel, mipmaps](GrResourceProvider* resourceProvider) {
+ const int mipLevelCount = mipmaps->countLevels() + 1;
+ std::unique_ptr<GrMipLevel[]> texels(new GrMipLevel[mipLevelCount]);
+
+ SkPixmap pixmap;
+ SkAssertResult(baseLevel->peekPixels(&pixmap));
+
+ texels[0].fPixels = pixmap.addr();
+ texels[0].fRowBytes = pixmap.rowBytes();
+
+ auto colorType = SkColorTypeToGrColorType(pixmap.colorType());
+ for (int i = 1; i < mipLevelCount; ++i) {
+ SkMipMap::Level generatedMipLevel;
+ mipmaps->getLevel(i - 1, &generatedMipLevel);
+ texels[i].fPixels = generatedMipLevel.fPixmap.addr();
+ texels[i].fRowBytes = generatedMipLevel.fPixmap.rowBytes();
+ SkASSERT(texels[i].fPixels);
+ SkASSERT(generatedMipLevel.fPixmap.colorType() == pixmap.colorType());
+ }
+ return LazyCallbackResult(resourceProvider->createTexture(
+ desc, format, colorType, GrRenderable::kNo, 1, SkBudgeted::kYes,
+ GrProtected::kNo, texels.get(), mipLevelCount));
+ },
+ format, desc, GrRenderable::kNo, 1, kTopLeft_GrSurfaceOrigin, GrMipMapped::kYes,
+ GrMipMapsStatus::kValid, GrInternalSurfaceFlags::kNone, SkBackingFit::kExact,
+ SkBudgeted::kYes, GrProtected::kNo, UseAllocator::kYes);
+
+ if (!proxy) {
+ return nullptr;
+ }
+
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (direct) {
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+ // In order to reuse code we always create a lazy proxy. When we aren't in DDL mode however
+ // we're better off instantiating the proxy immediately here.
+ if (!proxy->priv().doLazyInstantiation(resourceProvider)) {
+ return nullptr;
+ }
+ }
+ return proxy;
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::createProxy(const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ GrSurfaceProxy::UseAllocator useAllocator) {
+ const GrCaps* caps = this->caps();
+
+ if (caps->isFormatCompressed(format)) {
+ // Deferred proxies for compressed textures are not supported.
+ return nullptr;
+ }
+
+ GrColorType colorType = GrPixelConfigToColorType(desc.fConfig);
+
+ SkASSERT(GrCaps::AreConfigsCompatible(desc.fConfig,
+ caps->getConfigFromBackendFormat(format, colorType)));
+
+ if (GrMipMapped::kYes == mipMapped) {
+ // SkMipMap doesn't include the base level in the level count so we have to add 1
+ int mipCount = SkMipMap::ComputeLevelCount(desc.fWidth, desc.fHeight) + 1;
+ if (1 == mipCount) {
+ mipMapped = GrMipMapped::kNo;
+ }
+ }
+
+ if (!caps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
+ renderTargetSampleCnt, mipMapped)) {
+ return nullptr;
+ }
+ GrSurfaceDesc copyDesc = desc;
+ GrMipMapsStatus mipMapsStatus = (GrMipMapped::kYes == mipMapped)
+ ? GrMipMapsStatus::kDirty
+ : GrMipMapsStatus::kNotAllocated;
+ GrSwizzle texSwizzle = caps->getTextureSwizzle(format, colorType);
+ if (renderable == GrRenderable::kYes) {
+ renderTargetSampleCnt =
+ caps->getRenderTargetSampleCount(renderTargetSampleCnt, format);
+ SkASSERT(renderTargetSampleCnt);
+ // We know anything we instantiate later from this deferred path will be
+ // both texturable and renderable
+ GrSwizzle outSwizzle = caps->getOutputSwizzle(format, colorType);
+ return sk_sp<GrTextureProxy>(new GrTextureRenderTargetProxy(
+ *caps, format, copyDesc, renderTargetSampleCnt, origin, mipMapped, mipMapsStatus,
+ texSwizzle, outSwizzle, fit, budgeted, isProtected, surfaceFlags, useAllocator));
+ }
+
+ return sk_sp<GrTextureProxy>(new GrTextureProxy(format, copyDesc, origin, mipMapped,
+ mipMapsStatus, texSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator));
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::createCompressedTextureProxy(
+ int width, int height, SkBudgeted budgeted, SkImage::CompressionType compressionType,
+ sk_sp<SkData> data) {
+
+ GrSurfaceDesc desc;
+ desc.fConfig = GrCompressionTypePixelConfig(compressionType);
+ desc.fWidth = width;
+ desc.fHeight = height;
+
+ GrBackendFormat format = this->caps()->getBackendFormatFromCompressionType(compressionType);
+
+ if (!this->caps()->isFormatTexturable(format)) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = this->createLazyProxy(
+ [width, height, format, compressionType, budgeted,
+ data](GrResourceProvider* resourceProvider) {
+ return LazyCallbackResult(resourceProvider->createCompressedTexture(
+ width, height, format, compressionType, budgeted, data.get()));
+ },
+ format, desc, GrRenderable::kNo, 1, kTopLeft_GrSurfaceOrigin, GrMipMapped::kNo,
+ GrMipMapsStatus::kNotAllocated, GrInternalSurfaceFlags::kNone, SkBackingFit::kExact,
+ SkBudgeted::kYes, GrProtected::kNo, UseAllocator::kYes);
+
+ if (!proxy) {
+ return nullptr;
+ }
+
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (direct) {
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+ // In order to reuse code we always create a lazy proxy. When we aren't in DDL mode however
+ // we're better off instantiating the proxy immediately here.
+ if (!proxy->priv().doLazyInstantiation(resourceProvider)) {
+ return nullptr;
+ }
+ }
+ return proxy;
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::wrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType grColorType,
+ GrSurfaceOrigin origin,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable,
+ GrIOType ioType,
+ ReleaseProc releaseProc,
+ ReleaseContext releaseCtx) {
+ SkASSERT(ioType != kWrite_GrIOType);
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // This is only supported on a direct GrContext.
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = this->caps();
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ sk_sp<GrTexture> tex =
+ resourceProvider->wrapBackendTexture(backendTex, grColorType,
+ ownership, cacheable, ioType);
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (releaseProc) {
+ tex->setRelease(releaseProc, releaseCtx);
+ }
+
+ SkASSERT(!tex->asRenderTarget()); // Strictly a GrTexture
+ // Make sure we match how we created the proxy with SkBudgeted::kNo
+ SkASSERT(GrBudgetedType::kBudgeted != tex->resourcePriv().budgetedType());
+
+ GrSwizzle texSwizzle = caps->getTextureSwizzle(tex->backendFormat(), grColorType);
+
+ return sk_sp<GrTextureProxy>(
+ new GrTextureProxy(std::move(tex), origin, texSwizzle, UseAllocator::kNo));
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::wrapRenderableBackendTexture(
+ const GrBackendTexture& backendTex, GrSurfaceOrigin origin, int sampleCnt,
+ GrColorType colorType, GrWrapOwnership ownership, GrWrapCacheable cacheable,
+ ReleaseProc releaseProc, ReleaseContext releaseCtx) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // This is only supported on a direct GrContext.
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = this->caps();
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ // TODO: This should have been checked and validated before getting into GrProxyProvider.
+ if (!caps->isFormatAsColorTypeRenderable(colorType, backendTex.getBackendFormat(), sampleCnt)) {
+ return nullptr;
+ }
+
+ sampleCnt = caps->getRenderTargetSampleCount(sampleCnt, backendTex.getBackendFormat());
+ SkASSERT(sampleCnt);
+
+ sk_sp<GrTexture> tex = resourceProvider->wrapRenderableBackendTexture(backendTex, sampleCnt,
+ colorType, ownership,
+ cacheable);
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (releaseProc) {
+ tex->setRelease(releaseProc, releaseCtx);
+ }
+
+ SkASSERT(tex->asRenderTarget()); // A GrTextureRenderTarget
+ // Make sure we match how we created the proxy with SkBudgeted::kNo
+ SkASSERT(GrBudgetedType::kBudgeted != tex->resourcePriv().budgetedType());
+
+ GrSwizzle texSwizzle = caps->getTextureSwizzle(tex->backendFormat(), colorType);
+ GrSwizzle outSwizzle = caps->getOutputSwizzle(tex->backendFormat(), colorType);
+
+ return sk_sp<GrTextureProxy>(new GrTextureRenderTargetProxy(std::move(tex), origin, texSwizzle,
+ outSwizzle, UseAllocator::kNo));
+}
+
+sk_sp<GrSurfaceProxy> GrProxyProvider::wrapBackendRenderTarget(
+ const GrBackendRenderTarget& backendRT, GrColorType grColorType,
+ GrSurfaceOrigin origin, ReleaseProc releaseProc, ReleaseContext releaseCtx) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // This is only supported on a direct GrContext.
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = this->caps();
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ sk_sp<GrRenderTarget> rt = resourceProvider->wrapBackendRenderTarget(backendRT, grColorType);
+ if (!rt) {
+ return nullptr;
+ }
+
+ if (releaseProc) {
+ rt->setRelease(releaseProc, releaseCtx);
+ }
+
+ SkASSERT(!rt->asTexture()); // A GrRenderTarget that's not textureable
+ SkASSERT(!rt->getUniqueKey().isValid());
+ // Make sure we match how we created the proxy with SkBudgeted::kNo
+ SkASSERT(GrBudgetedType::kBudgeted != rt->resourcePriv().budgetedType());
+
+ GrSwizzle texSwizzle = caps->getTextureSwizzle(rt->backendFormat(), grColorType);
+ GrSwizzle outSwizzle = caps->getOutputSwizzle(rt->backendFormat(), grColorType);
+
+ return sk_sp<GrRenderTargetProxy>(new GrRenderTargetProxy(std::move(rt), origin, texSwizzle,
+ outSwizzle, UseAllocator::kNo));
+}
+
+sk_sp<GrSurfaceProxy> GrProxyProvider::wrapBackendTextureAsRenderTarget(
+ const GrBackendTexture& backendTex, GrColorType grColorType,
+ GrSurfaceOrigin origin, int sampleCnt) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // This is only supported on a direct GrContext.
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = this->caps();
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ sk_sp<GrRenderTarget> rt =
+ resourceProvider->wrapBackendTextureAsRenderTarget(backendTex, sampleCnt, grColorType);
+ if (!rt) {
+ return nullptr;
+ }
+ SkASSERT(!rt->asTexture()); // A GrRenderTarget that's not textureable
+ SkASSERT(!rt->getUniqueKey().isValid());
+ // This proxy should be unbudgeted because we're just wrapping an external resource
+ SkASSERT(GrBudgetedType::kBudgeted != rt->resourcePriv().budgetedType());
+
+ GrSwizzle texSwizzle = caps->getTextureSwizzle(rt->backendFormat(), grColorType);
+ GrSwizzle outSwizzle = caps->getOutputSwizzle(rt->backendFormat(), grColorType);
+
+ return sk_sp<GrSurfaceProxy>(new GrRenderTargetProxy(std::move(rt), origin, texSwizzle,
+ outSwizzle, UseAllocator::kNo));
+}
+
+sk_sp<GrRenderTargetProxy> GrProxyProvider::wrapVulkanSecondaryCBAsRenderTarget(
+ const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // This is only supported on a direct GrContext.
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+
+ sk_sp<GrRenderTarget> rt = resourceProvider->wrapVulkanSecondaryCBAsRenderTarget(imageInfo,
+ vkInfo);
+ if (!rt) {
+ return nullptr;
+ }
+
+ SkASSERT(!rt->asTexture()); // A GrRenderTarget that's not textureable
+ SkASSERT(!rt->getUniqueKey().isValid());
+ // This proxy should be unbudgeted because we're just wrapping an external resource
+ SkASSERT(GrBudgetedType::kBudgeted != rt->resourcePriv().budgetedType());
+
+ GrColorType colorType = SkColorTypeToGrColorType(imageInfo.colorType());
+ GrSwizzle texSwizzle = this->caps()->getTextureSwizzle(rt->backendFormat(), colorType);
+ GrSwizzle outSwizzle = this->caps()->getOutputSwizzle(rt->backendFormat(), colorType);
+
+ if (!this->caps()->isFormatAsColorTypeRenderable(colorType, rt->backendFormat(),
+ rt->numSamples())) {
+ return nullptr;
+ }
+
+ // All Vulkan surfaces uses top left origins.
+ return sk_sp<GrRenderTargetProxy>(new GrRenderTargetProxy(
+ std::move(rt), kTopLeft_GrSurfaceOrigin, texSwizzle, outSwizzle, UseAllocator::kNo,
+ GrRenderTargetProxy::WrapsVkSecondaryCB::kYes));
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::createLazyProxy(LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ GrMipMapsStatus mipMapsStatus,
+ GrInternalSurfaceFlags surfaceFlags,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrSurfaceProxy::UseAllocator useAllocator) {
+ SkASSERT((desc.fWidth <= 0 && desc.fHeight <= 0) ||
+ (desc.fWidth > 0 && desc.fHeight > 0));
+
+ if (!format.isValid()) {
+ return nullptr;
+ }
+
+ if (desc.fWidth > this->caps()->maxTextureSize() ||
+ desc.fHeight > this->caps()->maxTextureSize()) {
+ return nullptr;
+ }
+
+ GrColorType colorType = GrPixelConfigToColorType(desc.fConfig);
+ GrSwizzle texSwizzle = this->caps()->getTextureSwizzle(format, colorType);
+ GrSwizzle outSwizzle = this->caps()->getOutputSwizzle(format, colorType);
+
+ if (renderable == GrRenderable::kYes) {
+ return sk_sp<GrTextureProxy>(new GrTextureRenderTargetProxy(*this->caps(),
+ std::move(callback),
+ format,
+ desc,
+ renderTargetSampleCnt,
+ origin,
+ mipMapped,
+ mipMapsStatus,
+ texSwizzle,
+ outSwizzle,
+ fit,
+ budgeted,
+ isProtected,
+ surfaceFlags,
+ useAllocator));
+ } else {
+ return sk_sp<GrTextureProxy>(new GrTextureProxy(std::move(callback),
+ format,
+ desc,
+ origin,
+ mipMapped,
+ mipMapsStatus,
+ texSwizzle,
+ fit,
+ budgeted,
+ isProtected,
+ surfaceFlags,
+ useAllocator));
+ }
+}
+
+sk_sp<GrRenderTargetProxy> GrProxyProvider::createLazyRenderTargetProxy(
+ LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ GrSurfaceOrigin origin,
+ GrInternalSurfaceFlags surfaceFlags,
+ const TextureInfo* textureInfo,
+ GrMipMapsStatus mipMapsStatus,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ bool wrapsVkSecondaryCB,
+ UseAllocator useAllocator) {
+ SkASSERT((desc.fWidth <= 0 && desc.fHeight <= 0) ||
+ (desc.fWidth > 0 && desc.fHeight > 0));
+
+ if (desc.fWidth > this->caps()->maxRenderTargetSize() ||
+ desc.fHeight > this->caps()->maxRenderTargetSize()) {
+ return nullptr;
+ }
+
+ GrColorType colorType = GrPixelConfigToColorType(desc.fConfig);
+ GrSwizzle texSwizzle = this->caps()->getTextureSwizzle(format, colorType);
+ GrSwizzle outSwizzle = this->caps()->getOutputSwizzle(format, colorType);
+
+ if (textureInfo) {
+ // Wrapped vulkan secondary command buffers don't support texturing since we won't have an
+ // actual VkImage to texture from.
+ SkASSERT(!wrapsVkSecondaryCB);
+ return sk_sp<GrRenderTargetProxy>(new GrTextureRenderTargetProxy(
+ *this->caps(), std::move(callback), format, desc, sampleCnt, origin,
+ textureInfo->fMipMapped, mipMapsStatus, texSwizzle, outSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator));
+ }
+
+ GrRenderTargetProxy::WrapsVkSecondaryCB vkSCB =
+ wrapsVkSecondaryCB ? GrRenderTargetProxy::WrapsVkSecondaryCB::kYes
+ : GrRenderTargetProxy::WrapsVkSecondaryCB::kNo;
+
+ return sk_sp<GrRenderTargetProxy>(new GrRenderTargetProxy(
+ std::move(callback), format, desc, sampleCnt, origin, texSwizzle, outSwizzle, fit,
+ budgeted, isProtected, surfaceFlags, useAllocator, vkSCB));
+}
+
+sk_sp<GrTextureProxy> GrProxyProvider::MakeFullyLazyProxy(LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrProtected isProtected,
+ GrSurfaceOrigin origin,
+ GrPixelConfig config,
+ const GrCaps& caps,
+ UseAllocator useAllocator) {
+ if (!format.isValid()) {
+ return nullptr;
+ }
+
+ SkASSERT(renderTargetSampleCnt == 1 || renderable == GrRenderable::kYes);
+ GrSurfaceDesc desc;
+ GrInternalSurfaceFlags surfaceFlags = GrInternalSurfaceFlags::kNone;
+ desc.fWidth = -1;
+ desc.fHeight = -1;
+ desc.fConfig = config;
+
+ GrColorType colorType = GrPixelConfigToColorType(desc.fConfig);
+ GrSwizzle texSwizzle = caps.getTextureSwizzle(format, colorType);
+ GrSwizzle outSwizzle = caps.getOutputSwizzle(format, colorType);
+
+ if (GrRenderable::kYes == renderable) {
+ return sk_sp<GrTextureProxy>(new GrTextureRenderTargetProxy(
+ caps, std::move(callback), format, desc, renderTargetSampleCnt, origin,
+ GrMipMapped::kNo, GrMipMapsStatus::kNotAllocated, texSwizzle, outSwizzle,
+ SkBackingFit::kApprox, SkBudgeted::kYes, isProtected, surfaceFlags, useAllocator));
+ } else {
+ return sk_sp<GrTextureProxy>(new GrTextureProxy(
+ std::move(callback), format, desc, origin, GrMipMapped::kNo,
+ GrMipMapsStatus::kNotAllocated, texSwizzle, SkBackingFit::kApprox, SkBudgeted::kYes,
+ isProtected, surfaceFlags, useAllocator));
+ }
+}
+
+bool GrProxyProvider::IsFunctionallyExact(GrSurfaceProxy* proxy) {
+ const bool isInstantiated = proxy->isInstantiated();
+ // A proxy is functionally exact if:
+ // it is exact (obvs)
+ // when it is instantiated it will be exact (i.e., power of two dimensions)
+ // it is already instantiated and the proxy covers the entire backing surface
+ return proxy->priv().isExact() ||
+ (!isInstantiated && SkIsPow2(proxy->width()) && SkIsPow2(proxy->height())) ||
+ (isInstantiated && proxy->worstCaseWidth() == proxy->width() &&
+ proxy->worstCaseHeight() == proxy->height());
+}
+
+void GrProxyProvider::processInvalidUniqueKey(const GrUniqueKey& key, GrTextureProxy* proxy,
+ InvalidateGPUResource invalidateGPUResource) {
+ SkASSERT(key.isValid());
+
+ if (!proxy) {
+ proxy = fUniquelyKeyedProxies.find(key);
+ }
+ SkASSERT(!proxy || proxy->getUniqueKey() == key);
+
+ // Locate the corresponding GrGpuResource (if it needs to be invalidated) before clearing the
+ // proxy's unique key. We must do it in this order because 'key' may alias the proxy's key.
+ sk_sp<GrGpuResource> invalidGpuResource;
+ if (InvalidateGPUResource::kYes == invalidateGPUResource) {
+ GrContext* direct = fImageContext->priv().asDirectContext();
+ if (direct) {
+ GrResourceProvider* resourceProvider = direct->priv().resourceProvider();
+ invalidGpuResource = resourceProvider->findByUniqueKey<GrGpuResource>(key);
+ }
+ SkASSERT(!invalidGpuResource || invalidGpuResource->getUniqueKey() == key);
+ }
+
+ // Note: this method is called for the whole variety of GrGpuResources so often 'key'
+ // will not be in 'fUniquelyKeyedProxies'.
+ if (proxy) {
+ fUniquelyKeyedProxies.remove(key);
+ proxy->cacheAccess().clearUniqueKey();
+ }
+
+ if (invalidGpuResource) {
+ invalidGpuResource->resourcePriv().removeUniqueKey();
+ }
+}
+
+uint32_t GrProxyProvider::contextID() const {
+ return fImageContext->priv().contextID();
+}
+
+const GrCaps* GrProxyProvider::caps() const {
+ return fImageContext->priv().caps();
+}
+
+sk_sp<const GrCaps> GrProxyProvider::refCaps() const {
+ return fImageContext->priv().refCaps();
+}
+
+bool GrProxyProvider::isAbandoned() const {
+ return fImageContext->priv().abandoned();
+}
+
+void GrProxyProvider::orphanAllUniqueKeys() {
+ UniquelyKeyedProxyHash::Iter iter(&fUniquelyKeyedProxies);
+ for (UniquelyKeyedProxyHash::Iter iter(&fUniquelyKeyedProxies); !iter.done(); ++iter) {
+ GrTextureProxy& tmp = *iter;
+
+ tmp.fProxyProvider = nullptr;
+ }
+}
+
+void GrProxyProvider::removeAllUniqueKeys() {
+ UniquelyKeyedProxyHash::Iter iter(&fUniquelyKeyedProxies);
+ for (UniquelyKeyedProxyHash::Iter iter(&fUniquelyKeyedProxies); !iter.done(); ++iter) {
+ GrTextureProxy& tmp = *iter;
+
+ this->processInvalidUniqueKey(tmp.getUniqueKey(), &tmp, InvalidateGPUResource::kNo);
+ }
+ SkASSERT(!fUniquelyKeyedProxies.count());
+}
+
+bool GrProxyProvider::renderingDirectly() const {
+ return fImageContext->priv().asDirectContext();
+}
diff --git a/gfx/skia/skia/src/gpu/GrProxyProvider.h b/gfx/skia/skia/src/gpu/GrProxyProvider.h
new file mode 100644
index 0000000000..b8de8c8983
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProxyProvider.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProxyProvider_DEFINED
+#define GrProxyProvider_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrResourceKey.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrImageContext;
+class GrBackendRenderTarget;
+class SkBitmap;
+class SkImage;
+
+/*
+ * A factory for creating GrSurfaceProxy-derived objects.
+ */
+class GrProxyProvider {
+public:
+ using UseAllocator = GrSurfaceProxy::UseAllocator;
+
+ GrProxyProvider(GrImageContext*);
+
+ ~GrProxyProvider();
+
+ /*
+ * Assigns a unique key to a proxy. The proxy will be findable via this key using
+ * findProxyByUniqueKey(). It is an error if an existing proxy already has a key.
+ */
+ bool assignUniqueKeyToProxy(const GrUniqueKey&, GrTextureProxy*);
+
+ /*
+ * Sets the unique key of the provided proxy to the unique key of the surface. The surface must
+ * have a valid unique key.
+ */
+ void adoptUniqueKeyFromSurface(GrTextureProxy* proxy, const GrSurface*);
+
+ /*
+ * Removes a unique key from a proxy. If the proxy has already been instantiated, it will
+ * also remove the unique key from the target GrSurface.
+ */
+ void removeUniqueKeyFromProxy(GrTextureProxy*);
+
+ /*
+ * Finds a proxy by unique key.
+ */
+ sk_sp<GrTextureProxy> findProxyByUniqueKey(const GrUniqueKey&, GrSurfaceOrigin);
+
+ /*
+ * Finds a proxy by unique key or creates a new one that wraps a resource matching the unique
+ * key. GrColorType is required to set the proxy's texture swizzle on creation. For any key,
+ * each call that might result in a cache hit must provide the same colorType as the call that
+ * caused a cache miss and created the proxy.
+ */
+ sk_sp<GrTextureProxy> findOrCreateProxyByUniqueKey(const GrUniqueKey&,
+ GrColorType colorType,
+ GrSurfaceOrigin,
+ UseAllocator = UseAllocator::kYes);
+
+ /*
+ * Create an un-mipmapped texture proxy with data. The SkImage must be a raster backend image.
+ * Since the SkImage is ref counted, we simply take a ref on it to keep the data alive until we
+ * actually upload the data to the gpu.
+ */
+ sk_sp<GrTextureProxy> createTextureProxy(
+ sk_sp<SkImage> srcImage, int sampleCnt, SkBudgeted, SkBackingFit,
+ GrInternalSurfaceFlags = GrInternalSurfaceFlags::kNone);
+
+ /*
+ * Creates a new texture proxy for the bitmap, optionally with mip levels generated by the cpu.
+ */
+ sk_sp<GrTextureProxy> createProxyFromBitmap(const SkBitmap& bitmap, GrMipMapped);
+
+ /*
+ * Create a GrSurfaceProxy without any data.
+ */
+ sk_sp<GrTextureProxy> createProxy(const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags = GrInternalSurfaceFlags::kNone,
+ UseAllocator useAllocator = UseAllocator::kYes);
+
+ /*
+ * Create a texture proxy from compressed texture data.
+ */
+ sk_sp<GrTextureProxy> createCompressedTextureProxy(int width, int height, SkBudgeted budgeted,
+ SkImage::CompressionType compressionType,
+ sk_sp<SkData> data);
+
+ // These match the definitions in SkImage & GrTexture.h, for whence they came
+ typedef void* ReleaseContext;
+ typedef void (*ReleaseProc)(ReleaseContext);
+
+ /*
+ * Create a texture proxy that wraps a (non-renderable) backend texture. GrIOType must be
+ * kRead or kRW.
+ */
+ sk_sp<GrTextureProxy> wrapBackendTexture(const GrBackendTexture&, GrColorType, GrSurfaceOrigin,
+ GrWrapOwnership, GrWrapCacheable, GrIOType,
+ ReleaseProc = nullptr, ReleaseContext = nullptr);
+
+ /*
+ * Create a texture proxy that wraps a backend texture and is both texture-able and renderable
+ */
+ sk_sp<GrTextureProxy> wrapRenderableBackendTexture(const GrBackendTexture&, GrSurfaceOrigin,
+ int sampleCnt, GrColorType,
+ GrWrapOwnership, GrWrapCacheable,
+ ReleaseProc = nullptr,
+ ReleaseContext = nullptr);
+
+ /*
+ * Create a render target proxy that wraps a backend render target
+ */
+ sk_sp<GrSurfaceProxy> wrapBackendRenderTarget(const GrBackendRenderTarget&, GrColorType,
+ GrSurfaceOrigin,
+ ReleaseProc = nullptr, ReleaseContext = nullptr);
+
+ /*
+ * Create a render target proxy that wraps a backend texture
+ */
+ sk_sp<GrSurfaceProxy> wrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ GrColorType,
+ GrSurfaceOrigin,
+ int sampleCnt);
+
+ sk_sp<GrRenderTargetProxy> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
+ const GrVkDrawableInfo&);
+
+ using LazyInstantiationKeyMode = GrSurfaceProxy::LazyInstantiationKeyMode;
+ using LazyCallbackResult = GrSurfaceProxy::LazyCallbackResult;
+ using LazyInstantiateCallback = GrSurfaceProxy::LazyInstantiateCallback;
+
+ struct TextureInfo {
+ GrMipMapped fMipMapped;
+ GrTextureType fTextureType;
+ };
+
+ /**
+ * Creates a texture proxy that will be instantiated by a user-supplied callback during flush.
+ * (Stencil is not supported by this method.) The width and height must either both be greater
+ * than 0 or both less than or equal to zero. A non-positive value is a signal that the width
+ * and height are currently unknown.
+ *
+ * When called, the callback must be able to cleanup any resources that it captured at creation.
+ * It also must support being passed in a null GrResourceProvider. When this happens, the
+ * callback should cleanup any resources it captured and return an empty sk_sp<GrTextureProxy>.
+ */
+ sk_sp<GrTextureProxy> createLazyProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ GrMipMapsStatus,
+ GrInternalSurfaceFlags,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ UseAllocator);
+
+ /** A null TextureInfo indicates a non-textureable render target. */
+ sk_sp<GrRenderTargetProxy> createLazyRenderTargetProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ GrInternalSurfaceFlags,
+ const TextureInfo*,
+ GrMipMapsStatus,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ bool wrapsVkSecondaryCB,
+ UseAllocator useAllocator);
+
+ /**
+ * Fully lazy proxies have unspecified width and height. Methods that rely on those values
+ * (e.g., width, height, getBoundsRect) should be avoided.
+ */
+ static sk_sp<GrTextureProxy> MakeFullyLazyProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ GrProtected,
+ GrSurfaceOrigin,
+ GrPixelConfig,
+ const GrCaps&,
+ UseAllocator);
+
+ // 'proxy' is about to be used as a texture src or drawn to. This query can be used to
+ // determine if it is going to need a texture domain or a full clear.
+ static bool IsFunctionallyExact(GrSurfaceProxy* proxy);
+
+ enum class InvalidateGPUResource : bool { kNo = false, kYes = true };
+
+ /*
+ * This method ensures that, if a proxy w/ the supplied unique key exists, it is removed from
+ * the proxy provider's map and its unique key is removed. If 'invalidateSurface' is true, it
+ * will independently ensure that the unique key is removed from any GrGpuResources that may
+ * have it.
+ *
+ * If 'proxy' is provided (as an optimization to stop re-looking it up), its unique key must be
+ * valid and match the provided unique key.
+ *
+ * This method is called if either the proxy attached to the unique key is being deleted
+ * (in which case we don't want it cluttering up the hash table) or the client has indicated
+ * that it will never refer to the unique key again.
+ */
+ void processInvalidUniqueKey(const GrUniqueKey&, GrTextureProxy*, InvalidateGPUResource);
+
+ // TODO: remove these entry points - it is a bit sloppy to be getting context info from here
+ uint32_t contextID() const;
+ const GrCaps* caps() const;
+ sk_sp<const GrCaps> refCaps() const;
+
+ int numUniqueKeyProxies_TestOnly() const;
+
+ // This is called on a DDL's proxyprovider when the DDL is finished. The uniquely keyed
+ // proxies need to keep their unique key but cannot hold on to the proxy provider unique
+ // pointer.
+ void orphanAllUniqueKeys();
+ // This is only used by GrContext::releaseResourcesAndAbandonContext()
+ void removeAllUniqueKeys();
+
+ /**
+ * Does the proxy provider have access to a GrDirectContext? If so, proxies will be
+ * instantiated immediately.
+ */
+ bool renderingDirectly() const;
+
+#if GR_TEST_UTILS
+ /*
+ * Create a texture proxy that is backed by an instantiated GrSurface.
+ * TODO: Remove GrColorType. Currently used to infer a GrPixelConfig.
+ */
+ sk_sp<GrTextureProxy> testingOnly_createInstantiatedProxy(const SkISize& size,
+ GrColorType colorType,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected);
+
+ /** Version of above that picks the default format for the color type. */
+ sk_sp<GrTextureProxy> testingOnly_createInstantiatedProxy(const SkISize& size,
+ GrColorType colorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrSurfaceOrigin origin,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected);
+
+ sk_sp<GrTextureProxy> testingOnly_createWrapped(sk_sp<GrTexture>, GrColorType, GrSurfaceOrigin);
+#endif
+
+private:
+ friend class GrAHardwareBufferImageGenerator; // for createWrapped
+ friend class GrResourceProvider; // for createWrapped
+
+ bool isAbandoned() const;
+
+ // GrColorType is used to determine the proxy's texture swizzle.
+ sk_sp<GrTextureProxy> createWrapped(sk_sp<GrTexture> tex, GrColorType, GrSurfaceOrigin origin,
+ UseAllocator useAllocator);
+
+ struct UniquelyKeyedProxyHashTraits {
+ static const GrUniqueKey& GetKey(const GrTextureProxy& p) { return p.getUniqueKey(); }
+
+ static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
+ };
+ typedef SkTDynamicHash<GrTextureProxy, GrUniqueKey, UniquelyKeyedProxyHashTraits> UniquelyKeyedProxyHash;
+
+ // This holds the texture proxies that have unique keys. The resourceCache does not get a ref
+ // on these proxies but they must send a message to the resourceCache when they are deleted.
+ UniquelyKeyedProxyHash fUniquelyKeyedProxies;
+
+ GrImageContext* fImageContext;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRecordingContext.cpp b/gfx/skia/skia/src/gpu/GrRecordingContext.cpp
new file mode 100644
index 0000000000..081b693f51
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRecordingContext.cpp
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/text/GrTextBlobCache.h"
+
+#define ASSERT_SINGLE_OWNER_PRIV \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+
+GrRecordingContext::GrRecordingContext(GrBackendApi backend,
+ const GrContextOptions& options,
+ uint32_t contextID)
+ : INHERITED(backend, options, contextID)
+ , fAuditTrail(new GrAuditTrail()) {
+}
+
+GrRecordingContext::~GrRecordingContext() { }
+
+/**
+ * TODO: move textblob draw calls below context (see comment below)
+ */
+static void textblobcache_overbudget_CB(void* data) {
+ SkASSERT(data);
+ GrRecordingContext* context = reinterpret_cast<GrRecordingContext*>(data);
+
+ GrContext* direct = context->priv().asDirectContext();
+ if (!direct) {
+ return;
+ }
+
+ // TextBlobs are drawn at the SkGpuDevice level, therefore they cannot rely on
+ // GrRenderTargetContext to perform a necessary flush. The solution is to move drawText calls
+ // to below the GrContext level, but this is not trivial because they call drawPath on
+ // SkGpuDevice.
+ direct->flush();
+}
+
+bool GrRecordingContext::init(sk_sp<const GrCaps> caps, sk_sp<GrSkSLFPFactoryCache> cache) {
+
+ if (!INHERITED::init(std::move(caps), std::move(cache))) {
+ return false;
+ }
+
+ fStrikeCache.reset(new GrStrikeCache(this->caps(),
+ this->options().fGlyphCacheTextureMaximumBytes));
+
+ fTextBlobCache.reset(new GrTextBlobCache(textblobcache_overbudget_CB, this,
+ this->contextID()));
+
+ return true;
+}
+
+void GrRecordingContext::setupDrawingManager(bool sortOpsTasks, bool reduceOpsTaskSplitting) {
+ GrPathRendererChain::Options prcOptions;
+ prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
+#if GR_TEST_UTILS
+ prcOptions.fGpuPathRenderers = this->options().fGpuPathRenderers;
+#endif
+ // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
+ if (!this->options().fDisableCoverageCountingPaths) {
+ prcOptions.fGpuPathRenderers |= GpuPathRenderers::kCoverageCounting;
+ }
+ if (this->options().fDisableDistanceFieldPaths) {
+ prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
+ }
+
+ if (!this->proxyProvider()->renderingDirectly()) {
+ // DDL TODO: remove this crippling of the path renderer chain
+ // Disable the small path renderer bc of the proxies in the atlas. They need to be
+ // unified when the opsTasks are added back to the destination drawing manager.
+ prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
+ }
+
+ GrTextContext::Options textContextOptions;
+ textContextOptions.fMaxDistanceFieldFontSize = this->options().fGlyphsAsPathsFontSize;
+ textContextOptions.fMinDistanceFieldFontSize = this->options().fMinDistanceFieldFontSize;
+ textContextOptions.fDistanceFieldVerticesAlwaysHaveW = false;
+#if SK_SUPPORT_ATLAS_TEXT
+ if (GrContextOptions::Enable::kYes == this->options().fDistanceFieldGlyphVerticesAlwaysHaveW) {
+ textContextOptions.fDistanceFieldVerticesAlwaysHaveW = true;
+ }
+#endif
+
+ fDrawingManager.reset(new GrDrawingManager(this,
+ prcOptions,
+ textContextOptions,
+ sortOpsTasks,
+ reduceOpsTaskSplitting));
+}
+
+void GrRecordingContext::abandonContext() {
+ INHERITED::abandonContext();
+
+ fStrikeCache->freeAll();
+ fTextBlobCache->freeAll();
+}
+
+GrDrawingManager* GrRecordingContext::drawingManager() {
+ return fDrawingManager.get();
+}
+
+sk_sp<GrOpMemoryPool> GrRecordingContext::refOpMemoryPool() {
+ if (!fOpMemoryPool) {
+ // DDL TODO: should the size of the memory pool be decreased in DDL mode? CPU-side memory
+ // consumed in DDL mode vs. normal mode for a single skp might be a good metric of wasted
+ // memory.
+ fOpMemoryPool = sk_sp<GrOpMemoryPool>(new GrOpMemoryPool(16384, 16384));
+ }
+
+ SkASSERT(fOpMemoryPool);
+ return fOpMemoryPool;
+}
+
+GrOpMemoryPool* GrRecordingContext::opMemoryPool() {
+ return this->refOpMemoryPool().get();
+}
+
+GrTextBlobCache* GrRecordingContext::getTextBlobCache() {
+ return fTextBlobCache.get();
+}
+
+const GrTextBlobCache* GrRecordingContext::getTextBlobCache() const {
+ return fTextBlobCache.get();
+}
+
+void GrRecordingContext::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
+ this->drawingManager()->addOnFlushCallbackObject(onFlushCBObject);
+}
+
+std::unique_ptr<GrSurfaceContext> GrRecordingContext::makeWrappedSurfaceContext(
+ sk_sp<GrSurfaceProxy> proxy,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ ASSERT_SINGLE_OWNER_PRIV
+
+ SkASSERT(proxy);
+
+ if (proxy->asRenderTargetProxy()) {
+ SkASSERT(kPremul_SkAlphaType == alphaType || kOpaque_SkAlphaType == alphaType);
+ return this->drawingManager()->makeRenderTargetContext(std::move(proxy), colorType,
+ std::move(colorSpace), props);
+ } else {
+ SkASSERT(proxy->asTextureProxy());
+ SkASSERT(!props);
+ return this->drawingManager()->makeTextureContext(std::move(proxy), colorType, alphaType,
+ std::move(colorSpace));
+ }
+}
+
+std::unique_ptr<GrTextureContext> GrRecordingContext::makeDeferredTextureContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ auto format = this->caps()->getDefaultBackendFormat(colorType, GrRenderable::kNo);
+ if (!format.isValid()) {
+ return nullptr;
+ }
+ auto config = this->caps()->getConfigFromBackendFormat(format, colorType);
+ if (config == kUnknown_GrPixelConfig) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = config;
+
+ sk_sp<GrTextureProxy> texture = this->proxyProvider()->createProxy(
+ format, desc, GrRenderable::kNo, 1, origin, mipMapped, fit, budgeted, isProtected);
+ if (!texture) {
+ return nullptr;
+ }
+
+ auto drawingManager = this->drawingManager();
+
+ return drawingManager->makeTextureContext(std::move(texture), colorType, alphaType,
+ std::move(colorSpace));
+}
+
+std::unique_ptr<GrRenderTargetContext> GrRecordingContext::makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ SkASSERT(sampleCnt > 0);
+ if (this->abandoned()) {
+ return nullptr;
+ }
+
+ auto format = this->caps()->getDefaultBackendFormat(colorType, GrRenderable::kYes);
+ if (!format.isValid()) {
+ return nullptr;
+ }
+ auto config = this->caps()->getConfigFromBackendFormat(format, colorType);
+ if (config == kUnknown_GrPixelConfig) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = config;
+
+ sk_sp<GrTextureProxy> rtp =
+ this->proxyProvider()->createProxy(format, desc, GrRenderable::kYes, sampleCnt, origin,
+ mipMapped, fit, budgeted, isProtected);
+ if (!rtp) {
+ return nullptr;
+ }
+
+ auto drawingManager = this->drawingManager();
+
+ auto renderTargetContext = drawingManager->makeRenderTargetContext(
+ std::move(rtp), colorType, std::move(colorSpace), surfaceProps);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ renderTargetContext->discard();
+
+ return renderTargetContext;
+}
+
+static inline GrColorType color_type_fallback(GrColorType ct) {
+ switch (ct) {
+ // kRGBA_8888 is our default fallback for many color types that may not have renderable
+ // backend formats.
+ case GrColorType::kAlpha_8:
+ case GrColorType::kBGR_565:
+ case GrColorType::kABGR_4444:
+ case GrColorType::kBGRA_8888:
+ case GrColorType::kRGBA_1010102:
+ case GrColorType::kRGBA_F16:
+ case GrColorType::kRGBA_F16_Clamped:
+ return GrColorType::kRGBA_8888;
+ case GrColorType::kAlpha_F16:
+ return GrColorType::kRGBA_F16;
+ case GrColorType::kGray_8:
+ return GrColorType::kRGB_888x;
+ default:
+ return GrColorType::kUnknown;
+ }
+}
+
+std::unique_ptr<GrRenderTargetContext>
+GrRecordingContext::makeDeferredRenderTargetContextWithFallback(SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ SkASSERT(sampleCnt > 0);
+ std::unique_ptr<GrRenderTargetContext> rtc;
+ do {
+ rtc = this->makeDeferredRenderTargetContext(fit, width, height, colorType, colorSpace,
+ sampleCnt, mipMapped, origin, surfaceProps,
+ budgeted, isProtected);
+ colorType = color_type_fallback(colorType);
+ } while (!rtc && colorType != GrColorType::kUnknown);
+ return rtc;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+sk_sp<const GrCaps> GrRecordingContextPriv::refCaps() const {
+ return fContext->refCaps();
+}
+
+sk_sp<GrSkSLFPFactoryCache> GrRecordingContextPriv::fpFactoryCache() {
+ return fContext->fpFactoryCache();
+}
+
+sk_sp<GrOpMemoryPool> GrRecordingContextPriv::refOpMemoryPool() {
+ return fContext->refOpMemoryPool();
+}
+
+void GrRecordingContextPriv::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
+ fContext->addOnFlushCallbackObject(onFlushCBObject);
+}
+
+std::unique_ptr<GrSurfaceContext> GrRecordingContextPriv::makeWrappedSurfaceContext(
+ sk_sp<GrSurfaceProxy> proxy,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ return fContext->makeWrappedSurfaceContext(std::move(proxy), colorType, alphaType,
+ std::move(colorSpace), props);
+}
+
+std::unique_ptr<GrTextureContext> GrRecordingContextPriv::makeDeferredTextureContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredTextureContext(fit, width, height, colorType, alphaType,
+ std::move(colorSpace), mipMapped, origin, budgeted,
+ isProtected);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrRecordingContextPriv::makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredRenderTargetContext(fit, width, height, colorType,
+ std::move(colorSpace), sampleCnt, mipMapped,
+ origin, surfaceProps, budgeted, isProtected);
+}
+
+std::unique_ptr<GrRenderTargetContext>
+GrRecordingContextPriv::makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ return fContext->makeDeferredRenderTargetContextWithFallback(fit,
+ width,
+ height,
+ colorType,
+ std::move(colorSpace),
+ sampleCnt,
+ mipMapped,
+ origin,
+ surfaceProps,
+ budgeted,
+ isProtected);
+}
+
+GrContext* GrRecordingContextPriv::backdoor() {
+ return (GrContext*) fContext;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrRecordingContextPriv.h b/gfx/skia/skia/src/gpu/GrRecordingContextPriv.h
new file mode 100644
index 0000000000..2987778d6b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRecordingContextPriv.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRecordingContextPriv_DEFINED
+#define GrRecordingContextPriv_DEFINED
+
+#include "include/private/GrRecordingContext.h"
+
+/** Class that exposes methods to GrRecordingContext that are only intended for use internal to
+ Skia. This class is purely a privileged window into GrRecordingContext. It should never have
+ additional data members or virtual methods. */
+class GrRecordingContextPriv {
+public:
+ // from GrContext_Base
+ uint32_t contextID() const { return fContext->contextID(); }
+
+ bool matches(GrContext_Base* candidate) const { return fContext->matches(candidate); }
+
+ const GrContextOptions& options() const { return fContext->options(); }
+
+ const GrCaps* caps() const { return fContext->caps(); }
+ sk_sp<const GrCaps> refCaps() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fpFactoryCache();
+
+ GrImageContext* asImageContext() { return fContext->asImageContext(); }
+ GrRecordingContext* asRecordingContext() { return fContext->asRecordingContext(); }
+ GrContext* asDirectContext() { return fContext->asDirectContext(); }
+
+ // from GrImageContext
+ GrProxyProvider* proxyProvider() { return fContext->proxyProvider(); }
+ const GrProxyProvider* proxyProvider() const { return fContext->proxyProvider(); }
+
+ bool abandoned() const { return fContext->abandoned(); }
+
+ /** This is only useful for debug purposes */
+ SkDEBUGCODE(GrSingleOwner* singleOwner() const { return fContext->singleOwner(); } )
+
+ // from GrRecordingContext
+ GrDrawingManager* drawingManager() { return fContext->drawingManager(); }
+
+ sk_sp<GrOpMemoryPool> refOpMemoryPool();
+ GrOpMemoryPool* opMemoryPool() { return fContext->opMemoryPool(); }
+
+ GrStrikeCache* getGrStrikeCache() { return fContext->getGrStrikeCache(); }
+ GrTextBlobCache* getTextBlobCache() { return fContext->getTextBlobCache(); }
+
+ /**
+ * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
+ *
+ * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
+ * ensure its lifetime is tied to that of the context.
+ */
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+ std::unique_ptr<GrSurfaceContext> makeWrappedSurfaceContext(sk_sp<GrSurfaceProxy>,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace> = nullptr,
+ const SkSurfaceProps* = nullptr);
+
+ /** Create a new texture context backed by a deferred-style GrTextureProxy. */
+ std::unique_ptr<GrTextureContext> makeDeferredTextureContext(
+ SkBackingFit,
+ int width,
+ int height,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected = GrProtected::kNo);
+
+ /*
+ * Create a new render target context backed by a deferred-style
+ * GrRenderTargetProxy. We guarantee that "asTextureProxy" will succeed for
+ * renderTargetContexts created via this entry point.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContext(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /*
+ * This method will attempt to create a renderTargetContext that has, at least, the number of
+ * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be
+ * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA).
+ * SRGB-ness will be preserved.
+ */
+ std::unique_ptr<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit fit,
+ int width,
+ int height,
+ GrColorType,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 1,
+ GrMipMapped = GrMipMapped::kNo,
+ GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted budgeted = SkBudgeted::kYes,
+ GrProtected isProtected = GrProtected::kNo);
+
+ GrAuditTrail* auditTrail() { return fContext->auditTrail(); }
+
+ // CONTEXT TODO: remove this backdoor
+ // In order to make progress we temporarily need a way to break CL impasses.
+ GrContext* backdoor();
+
+private:
+ explicit GrRecordingContextPriv(GrRecordingContext* context) : fContext(context) {}
+ GrRecordingContextPriv(const GrRecordingContextPriv&); // unimpl
+ GrRecordingContextPriv& operator=(const GrRecordingContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrRecordingContextPriv* operator&() const;
+ GrRecordingContextPriv* operator&();
+
+ GrRecordingContext* fContext;
+
+ friend class GrRecordingContext; // to construct/copy this type.
+};
+
+inline GrRecordingContextPriv GrRecordingContext::priv() { return GrRecordingContextPriv(this); }
+
+inline const GrRecordingContextPriv GrRecordingContext::priv () const {
+ return GrRecordingContextPriv(const_cast<GrRecordingContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer.h b/gfx/skia/skia/src/gpu/GrRectanizer.h
new file mode 100644
index 0000000000..2e558967fb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRectanizer_DEFINED
+#define GrRectanizer_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+struct SkIPoint16;
+
+class GrRectanizer {
+public:
+ GrRectanizer(int width, int height) : fWidth(width), fHeight(height) {
+ SkASSERT(width >= 0);
+ SkASSERT(height >= 0);
+ }
+
+ virtual ~GrRectanizer() {}
+
+ virtual void reset() = 0;
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ // Attempt to add a rect. Return true on success; false on failure. If
+ // successful the position in the atlas is returned in 'loc'.
+ virtual bool addRect(int width, int height, SkIPoint16* loc) = 0;
+ virtual float percentFull() const = 0;
+
+ /**
+ * Our factory, which returns the subclass du jour
+ */
+ static GrRectanizer* Factory(int width, int height);
+
+private:
+ int fWidth;
+ int fHeight;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp
new file mode 100644
index 0000000000..1d0328a8fb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRectanizer_pow2.h"
+
+bool GrRectanizerPow2::addRect(int width, int height, SkIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height; // computed here since height will be modified
+
+ height = GrNextPow2(height);
+ if (height < kMIN_HEIGHT_POW2) {
+ height = kMIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ SkASSERT(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ SkASSERT(row->fRowHeight == height);
+ SkASSERT(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ SkASSERT(row->fLoc.fX <= this->width());
+ SkASSERT(row->fLoc.fY <= this->height());
+ SkASSERT(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// factory is now in GrRectanizer_skyline.cpp
+//GrRectanizer* GrRectanizer::Factory(int width, int height) {
+// return new GrRectanizerPow2 (width, height);
+//}
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h
new file mode 100644
index 0000000000..70e8399337
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h
@@ -0,0 +1,81 @@
+/*
+* Copyright 2014 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrRectanizer_pow2_DEFINED
+#define GrRectanizer_pow2_DEFINED
+
+#include "include/private/SkMalloc.h"
+#include "src/core/SkIPoint16.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrRectanizer.h"
+
+// This Rectanizer quantizes the incoming rects to powers of 2. Each power
+// of two can have, at most, one active row/shelf. Once a row/shelf for
+// a particular power of two gets full its fRows entry is recycled to point
+// to a new row.
+// The skyline algorithm almost always provides a better packing.
+class GrRectanizerPow2 : public GrRectanizer {
+public:
+ GrRectanizerPow2(int w, int h) : INHERITED(w, h) {
+ this->reset();
+ }
+
+ ~GrRectanizerPow2() override {}
+
+ void reset() override {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ sk_bzero(fRows, sizeof(fRows));
+ }
+
+ bool addRect(int w, int h, SkIPoint16* loc) override;
+
+ float percentFull() const override {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+private:
+ static const int kMIN_HEIGHT_POW2 = 2;
+ static const int kMaxExponent = 16;
+
+ struct Row {
+ SkIPoint16 fLoc;
+ // fRowHeight is actually known by this struct's position in fRows
+ // but it is used to signal if there exists an open row of this height
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[kMaxExponent]; // 0-th entry will be unused
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ static int HeightToRowIndex(int height) {
+ SkASSERT(height >= kMIN_HEIGHT_POW2);
+ int index = 32 - SkCLZ(height - 1);
+ SkASSERT(index < kMaxExponent);
+ return index;
+ }
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+
+ typedef GrRectanizer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp
new file mode 100644
index 0000000000..250cfc5ffb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/GrRectanizer_skyline.h"
+
+bool GrRectanizerSkyline::addRect(int width, int height, SkIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ // find position for new rectangle
+ int bestWidth = this->width() + 1;
+ int bestX = 0;
+ int bestY = this->height() + 1;
+ int bestIndex = -1;
+ for (int i = 0; i < fSkyline.count(); ++i) {
+ int y;
+ if (this->rectangleFits(i, width, height, &y)) {
+ // minimize y position first, then width of skyline
+ if (y < bestY || (y == bestY && fSkyline[i].fWidth < bestWidth)) {
+ bestIndex = i;
+ bestWidth = fSkyline[i].fWidth;
+ bestX = fSkyline[i].fX;
+ bestY = y;
+ }
+ }
+ }
+
+ // add rectangle to skyline
+ if (-1 != bestIndex) {
+ this->addSkylineLevel(bestIndex, bestX, bestY, width, height);
+ loc->fX = bestX;
+ loc->fY = bestY;
+
+ fAreaSoFar += width*height;
+ return true;
+ }
+
+ loc->fX = 0;
+ loc->fY = 0;
+ return false;
+}
+
+bool GrRectanizerSkyline::rectangleFits(int skylineIndex, int width, int height, int* ypos) const {
+ int x = fSkyline[skylineIndex].fX;
+ if (x + width > this->width()) {
+ return false;
+ }
+
+ int widthLeft = width;
+ int i = skylineIndex;
+ int y = fSkyline[skylineIndex].fY;
+ while (widthLeft > 0) {
+ y = SkMax32(y, fSkyline[i].fY);
+ if (y + height > this->height()) {
+ return false;
+ }
+ widthLeft -= fSkyline[i].fWidth;
+ ++i;
+ SkASSERT(i < fSkyline.count() || widthLeft <= 0);
+ }
+
+ *ypos = y;
+ return true;
+}
+
+void GrRectanizerSkyline::addSkylineLevel(int skylineIndex, int x, int y, int width, int height) {
+ SkylineSegment newSegment;
+ newSegment.fX = x;
+ newSegment.fY = y + height;
+ newSegment.fWidth = width;
+ fSkyline.insert(skylineIndex, 1, &newSegment);
+
+ SkASSERT(newSegment.fX + newSegment.fWidth <= this->width());
+ SkASSERT(newSegment.fY <= this->height());
+
+ // delete width of the new skyline segment from following ones
+ for (int i = skylineIndex+1; i < fSkyline.count(); ++i) {
+ // The new segment subsumes all or part of fSkyline[i]
+ SkASSERT(fSkyline[i-1].fX <= fSkyline[i].fX);
+
+ if (fSkyline[i].fX < fSkyline[i-1].fX + fSkyline[i-1].fWidth) {
+ int shrink = fSkyline[i-1].fX + fSkyline[i-1].fWidth - fSkyline[i].fX;
+
+ fSkyline[i].fX += shrink;
+ fSkyline[i].fWidth -= shrink;
+
+ if (fSkyline[i].fWidth <= 0) {
+ // fully consumed
+ fSkyline.remove(i);
+ --i;
+ } else {
+ // only partially consumed
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ // merge fSkylines
+ for (int i = 0; i < fSkyline.count()-1; ++i) {
+ if (fSkyline[i].fY == fSkyline[i+1].fY) {
+ fSkyline[i].fWidth += fSkyline[i+1].fWidth;
+ fSkyline.remove(i+1);
+ --i;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerSkyline(width, height);
+}
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h
new file mode 100644
index 0000000000..60663fdc5b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRectanizer_skyline_DEFINED
+#define GrRectanizer_skyline_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrRectanizer.h"
+
+// Pack rectangles and track the current silhouette
+// Based, in part, on Jukka Jylanki's work at http://clb.demon.fi
+class GrRectanizerSkyline : public GrRectanizer {
+public:
+ GrRectanizerSkyline(int w, int h) : INHERITED(w, h) {
+ this->reset();
+ }
+
+ ~GrRectanizerSkyline() override { }
+
+ void reset() override {
+ fAreaSoFar = 0;
+ fSkyline.reset();
+ SkylineSegment* seg = fSkyline.append(1);
+ seg->fX = 0;
+ seg->fY = 0;
+ seg->fWidth = this->width();
+ }
+
+ bool addRect(int w, int h, SkIPoint16* loc) override;
+
+ float percentFull() const override {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+private:
+ struct SkylineSegment {
+ int fX;
+ int fY;
+ int fWidth;
+ };
+
+ SkTDArray<SkylineSegment> fSkyline;
+
+ int32_t fAreaSoFar;
+
+ // Can a width x height rectangle fit in the free space represented by
+ // the skyline segments >= 'skylineIndex'? If so, return true and fill in
+ // 'y' with the y-location at which it fits (the x location is pulled from
+ // 'skylineIndex's segment.
+ bool rectangleFits(int skylineIndex, int width, int height, int* y) const;
+ // Update the skyline structure to include a width x height rect located
+ // at x,y.
+ void addSkylineLevel(int skylineIndex, int x, int y, int width, int height);
+
+ typedef GrRectanizer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrReducedClip.cpp b/gfx/skia/skia/src/gpu/GrReducedClip.cpp
new file mode 100644
index 0000000000..9224e005c6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrReducedClip.cpp
@@ -0,0 +1,991 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkClipOpPriv.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrReducedClip.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrStencilClip.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include "src/gpu/effects/GrConvexPolyEffect.h"
+#include "src/gpu/effects/GrRRectEffect.h"
+#include "src/gpu/effects/generated/GrAARectEffect.h"
+#include "src/gpu/geometry/GrShape.h"
+
+/**
+ * There are plenty of optimizations that could be added here. Maybe flips could be folded into
+ * earlier operations. Or would inserting flips and reversing earlier ops ever be a win? Perhaps
+ * for the case where the bounds are kInsideOut_BoundsType. We could restrict earlier operations
+ * based on later intersect operations, and perhaps remove intersect-rects. We could optionally
+ * take a rect in case the caller knows a bound on what is to be drawn through this clip.
+ */
+GrReducedClip::GrReducedClip(const SkClipStack& stack, const SkRect& queryBounds,
+ const GrCaps* caps, int maxWindowRectangles, int maxAnalyticFPs,
+ int maxCCPRClipPaths)
+ : fCaps(caps)
+ , fMaxWindowRectangles(maxWindowRectangles)
+ , fMaxAnalyticFPs(maxAnalyticFPs)
+ , fMaxCCPRClipPaths(maxCCPRClipPaths) {
+ SkASSERT(!queryBounds.isEmpty());
+ SkASSERT(fMaxWindowRectangles <= GrWindowRectangles::kMaxWindows);
+ SkASSERT(fMaxCCPRClipPaths <= fMaxAnalyticFPs);
+ fHasScissor = false;
+ fAAClipRectGenID = SK_InvalidGenID;
+
+ if (stack.isWideOpen()) {
+ fInitialState = InitialState::kAllIn;
+ return;
+ }
+
+ SkClipStack::BoundsType stackBoundsType;
+ SkRect stackBounds;
+ bool iior;
+ stack.getBounds(&stackBounds, &stackBoundsType, &iior);
+
+ if (GrClip::IsOutsideClip(stackBounds, queryBounds)) {
+ bool insideOut = SkClipStack::kInsideOut_BoundsType == stackBoundsType;
+ fInitialState = insideOut ? InitialState::kAllIn : InitialState::kAllOut;
+ return;
+ }
+
+ if (iior) {
+ // "Is intersection of rects" means the clip is a single rect indicated by the stack bounds.
+ // This should only be true if aa/non-aa status matches among all elements.
+ SkASSERT(SkClipStack::kNormal_BoundsType == stackBoundsType);
+
+ if (GrClip::IsInsideClip(stackBounds, queryBounds)) {
+ fInitialState = InitialState::kAllIn;
+ return;
+ }
+
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kTop_IterStart);
+
+ if (!iter.prev()->isAA() || GrClip::IsPixelAligned(stackBounds)) {
+ // The clip is a non-aa rect. Here we just implement the entire thing using fScissor.
+ stackBounds.round(&fScissor);
+ fHasScissor = true;
+ fInitialState = fScissor.isEmpty() ? InitialState::kAllOut : InitialState::kAllIn;
+ return;
+ }
+
+ SkRect tightBounds;
+ SkAssertResult(tightBounds.intersect(stackBounds, queryBounds));
+ fScissor = GrClip::GetPixelIBounds(tightBounds);
+ if (fScissor.isEmpty()) {
+ fInitialState = InitialState::kAllOut;
+ return;
+ }
+ fHasScissor = true;
+
+ fAAClipRect = stackBounds;
+ fAAClipRectGenID = stack.getTopmostGenID();
+ SkASSERT(SK_InvalidGenID != fAAClipRectGenID);
+
+ fInitialState = InitialState::kAllIn;
+ } else {
+ SkRect tighterQuery = queryBounds;
+ if (SkClipStack::kNormal_BoundsType == stackBoundsType) {
+ // Tighten the query by introducing a new clip at the stack's pixel boundaries. (This
+ // new clip will be enforced by the scissor.)
+ SkAssertResult(tighterQuery.intersect(GrClip::GetPixelBounds(stackBounds)));
+ }
+
+ fScissor = GrClip::GetPixelIBounds(tighterQuery);
+ if (fScissor.isEmpty()) {
+ fInitialState = InitialState::kAllOut;
+ return;
+ }
+ fHasScissor = true;
+
+ // Now that we have determined the bounds to use and filtered out the trivial cases, call
+ // the helper that actually walks the stack.
+ this->walkStack(stack, tighterQuery);
+ }
+
+ if (SK_InvalidGenID != fAAClipRectGenID && // Is there an AA clip rect?
+ ClipResult::kNotClipped == this->addAnalyticFP(fAAClipRect, Invert::kNo, GrAA::kYes)) {
+ if (fMaskElements.isEmpty()) {
+ // Use a replace since it is faster than intersect.
+ fMaskElements.addToHead(fAAClipRect, SkMatrix::I(), kReplace_SkClipOp, true /*doAA*/);
+ fInitialState = InitialState::kAllOut;
+ } else {
+ fMaskElements.addToTail(fAAClipRect, SkMatrix::I(), kIntersect_SkClipOp, true /*doAA*/);
+ }
+ fMaskRequiresAA = true;
+ fMaskGenID = fAAClipRectGenID;
+ }
+}
+
+void GrReducedClip::walkStack(const SkClipStack& stack, const SkRect& queryBounds) {
+ // walk backwards until we get to:
+ // a) the beginning
+ // b) an operation that is known to make the bounds all inside/outside
+ // c) a replace operation
+
+ enum class InitialTriState {
+ kUnknown = -1,
+ kAllIn = (int)GrReducedClip::InitialState::kAllIn,
+ kAllOut = (int)GrReducedClip::InitialState::kAllOut
+ } initialTriState = InitialTriState::kUnknown;
+
+ // During our backwards walk, track whether we've seen ops that either grow or shrink the clip.
+ // TODO: track these per saved clip so that we can consider them on the forward pass.
+ bool embiggens = false;
+ bool emsmallens = false;
+
+ // We use a slightly relaxed set of query bounds for element containment tests. This is to
+ // account for floating point rounding error that may have occurred during coord transforms.
+ SkRect relaxedQueryBounds = queryBounds.makeInset(GrClip::kBoundsTolerance,
+ GrClip::kBoundsTolerance);
+ if (relaxedQueryBounds.isEmpty()) {
+ relaxedQueryBounds = queryBounds;
+ }
+
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kTop_IterStart);
+ int numAAElements = 0;
+ while (InitialTriState::kUnknown == initialTriState) {
+ const Element* element = iter.prev();
+ if (nullptr == element) {
+ initialTriState = InitialTriState::kAllIn;
+ break;
+ }
+ if (SkClipStack::kEmptyGenID == element->getGenID()) {
+ initialTriState = InitialTriState::kAllOut;
+ break;
+ }
+ if (SkClipStack::kWideOpenGenID == element->getGenID()) {
+ initialTriState = InitialTriState::kAllIn;
+ break;
+ }
+
+ bool skippable = false;
+ bool isFlip = false; // does this op just flip the in/out state of every point in the bounds
+
+ switch (element->getOp()) {
+ case kDifference_SkClipOp:
+ // check if the shape subtracted either contains the entire bounds (and makes
+ // the clip empty) or is outside the bounds and therefore can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipInsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ skippable = (ClipResult::kClipped == result);
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipOutsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ skippable = (ClipResult::kClipped == result);
+ }
+ }
+ if (!skippable) {
+ emsmallens = true;
+ }
+ break;
+ case kIntersect_SkClipOp:
+ // check if the shape intersected contains the entire bounds and therefore can
+ // be skipped or it is outside the entire bounds and therefore makes the clip
+ // empty.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipOutsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ skippable = (ClipResult::kClipped == result);
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipInsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ skippable = (ClipResult::kClipped == result);
+ }
+ }
+ if (!skippable) {
+ emsmallens = true;
+ }
+ break;
+ case kUnion_SkClipOp:
+ // If the union-ed shape contains the entire bounds then after this element
+ // the bounds is entirely inside the clip. If the union-ed shape is outside the
+ // bounds then this op can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ embiggens = true;
+ }
+ break;
+ case kXOR_SkClipOp:
+ // If the bounds is entirely inside the shape being xor-ed then the effect is
+ // to flip the inside/outside state of every point in the bounds. We may be
+ // able to take advantage of this in the forward pass. If the xor-ed shape
+ // doesn't intersect the bounds then it can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ isFlip = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ isFlip = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = embiggens = true;
+ }
+ break;
+ case kReverseDifference_SkClipOp:
+ // When the bounds is entirely within the rev-diff shape then this behaves like xor
+ // and reverses every point inside the bounds. If the shape is completely outside
+ // the bounds then we know after this element is applied that the bounds will be
+ // all outside the current clip.B
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ isFlip = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ isFlip = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = embiggens = true;
+ }
+ break;
+
+ case kReplace_SkClipOp:
+ // Replace will always terminate our walk. We will either begin the forward walk
+ // at the replace op or detect here than the shape is either completely inside
+ // or completely outside the bounds. In this latter case it can be skipped by
+ // setting the correct value for initialTriState.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipOutsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ if (ClipResult::kClipped == result) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (!embiggens) {
+ ClipResult result = this->clipInsideElement(element);
+ if (ClipResult::kMadeEmpty == result) {
+ return;
+ }
+ if (ClipResult::kClipped == result) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ }
+ }
+ if (!skippable) {
+ initialTriState = InitialTriState::kAllOut;
+ embiggens = emsmallens = true;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unexpected op.");
+ break;
+ }
+ if (!skippable) {
+ if (fMaskElements.isEmpty()) {
+ // This will be the last element. Record the stricter genID.
+ fMaskGenID = element->getGenID();
+ }
+
+ // if it is a flip, change it to a bounds-filling rect
+ if (isFlip) {
+ SkASSERT(kXOR_SkClipOp == element->getOp() ||
+ kReverseDifference_SkClipOp == element->getOp());
+ fMaskElements.addToHead(SkRect::Make(fScissor), SkMatrix::I(),
+ kReverseDifference_SkClipOp, false);
+ } else {
+ Element* newElement = fMaskElements.addToHead(*element);
+ if (newElement->isAA()) {
+ ++numAAElements;
+ }
+ // Intersecting an inverse shape is the same as differencing the non-inverse shape.
+ // Replacing with an inverse shape is the same as setting initialState=kAllIn and
+ // differencing the non-inverse shape.
+ bool isReplace = kReplace_SkClipOp == newElement->getOp();
+ if (newElement->isInverseFilled() &&
+ (kIntersect_SkClipOp == newElement->getOp() || isReplace)) {
+ newElement->invertShapeFillType();
+ newElement->setOp(kDifference_SkClipOp);
+ if (isReplace) {
+ SkASSERT(InitialTriState::kAllOut == initialTriState);
+ initialTriState = InitialTriState::kAllIn;
+ }
+ }
+ }
+ }
+ }
+
+ if ((InitialTriState::kAllOut == initialTriState && !embiggens) ||
+ (InitialTriState::kAllIn == initialTriState && !emsmallens)) {
+ fMaskElements.reset();
+ numAAElements = 0;
+ } else {
+ Element* element = fMaskElements.headIter().get();
+ while (element) {
+ bool skippable = false;
+ switch (element->getOp()) {
+ case kDifference_SkClipOp:
+ // subtracting from the empty set yields the empty set.
+ skippable = InitialTriState::kAllOut == initialTriState;
+ break;
+ case kIntersect_SkClipOp:
+ // intersecting with the empty set yields the empty set
+ if (InitialTriState::kAllOut == initialTriState) {
+ skippable = true;
+ } else {
+ // We can clear to zero and then simply draw the clip element.
+ initialTriState = InitialTriState::kAllOut;
+ element->setOp(kReplace_SkClipOp);
+ }
+ break;
+ case kUnion_SkClipOp:
+ if (InitialTriState::kAllIn == initialTriState) {
+ // unioning the infinite plane with anything is a no-op.
+ skippable = true;
+ } else {
+ // unioning the empty set with a shape is the shape.
+ element->setOp(kReplace_SkClipOp);
+ }
+ break;
+ case kXOR_SkClipOp:
+ if (InitialTriState::kAllOut == initialTriState) {
+ // xor could be changed to diff in the kAllIn case, not sure it's a win.
+ element->setOp(kReplace_SkClipOp);
+ }
+ break;
+ case kReverseDifference_SkClipOp:
+ if (InitialTriState::kAllIn == initialTriState) {
+ // subtracting the whole plane will yield the empty set.
+ skippable = true;
+ initialTriState = InitialTriState::kAllOut;
+ } else {
+ // this picks up flips inserted in the backwards pass.
+ skippable = element->isInverseFilled() ?
+ GrClip::IsOutsideClip(element->getBounds(), queryBounds) :
+ element->contains(relaxedQueryBounds);
+ if (skippable) {
+ initialTriState = InitialTriState::kAllIn;
+ } else {
+ element->setOp(kReplace_SkClipOp);
+ }
+ }
+ break;
+ case kReplace_SkClipOp:
+ skippable = false; // we would have skipped it in the backwards walk if we
+ // could've.
+ break;
+ default:
+ SkDEBUGFAIL("Unexpected op.");
+ break;
+ }
+ if (!skippable) {
+ break;
+ } else {
+ if (element->isAA()) {
+ --numAAElements;
+ }
+ fMaskElements.popHead();
+ element = fMaskElements.headIter().get();
+ }
+ }
+ }
+ fMaskRequiresAA = numAAElements > 0;
+
+ SkASSERT(InitialTriState::kUnknown != initialTriState);
+ fInitialState = static_cast<GrReducedClip::InitialState>(initialTriState);
+}
+
+GrReducedClip::ClipResult GrReducedClip::clipInsideElement(const Element* element) {
+ SkIRect elementIBounds;
+ if (!element->isAA()) {
+ element->getBounds().round(&elementIBounds);
+ } else {
+ elementIBounds = GrClip::GetPixelIBounds(element->getBounds());
+ }
+ SkASSERT(fHasScissor);
+ if (!fScissor.intersect(elementIBounds)) {
+ this->makeEmpty();
+ return ClipResult::kMadeEmpty;
+ }
+
+ switch (element->getDeviceSpaceType()) {
+ case Element::DeviceSpaceType::kEmpty:
+ return ClipResult::kMadeEmpty;
+
+ case Element::DeviceSpaceType::kRect:
+ SkASSERT(element->getBounds() == element->getDeviceSpaceRect());
+ SkASSERT(!element->isInverseFilled());
+ if (element->isAA()) {
+ if (SK_InvalidGenID == fAAClipRectGenID) { // No AA clip rect yet?
+ fAAClipRect = element->getDeviceSpaceRect();
+ // fAAClipRectGenID is the value we should use for fMaskGenID if we end up
+ // moving the AA clip rect into the mask. The mask GenID is simply the topmost
+ // element's GenID. And since we walk the stack backwards, this means it's just
+ // the first element we don't skip during our walk.
+ fAAClipRectGenID = fMaskElements.isEmpty() ? element->getGenID() : fMaskGenID;
+ SkASSERT(SK_InvalidGenID != fAAClipRectGenID);
+ } else if (!fAAClipRect.intersect(element->getDeviceSpaceRect())) {
+ this->makeEmpty();
+ return ClipResult::kMadeEmpty;
+ }
+ }
+ return ClipResult::kClipped;
+
+ case Element::DeviceSpaceType::kRRect:
+ SkASSERT(!element->isInverseFilled());
+ return this->addAnalyticFP(element->getDeviceSpaceRRect(), Invert::kNo,
+ GrAA(element->isAA()));
+
+ case Element::DeviceSpaceType::kPath:
+ return this->addAnalyticFP(element->getDeviceSpacePath(),
+ Invert(element->isInverseFilled()), GrAA(element->isAA()));
+ }
+
+ SK_ABORT("Unexpected DeviceSpaceType");
+}
+
+GrReducedClip::ClipResult GrReducedClip::clipOutsideElement(const Element* element) {
+ switch (element->getDeviceSpaceType()) {
+ case Element::DeviceSpaceType::kEmpty:
+ return ClipResult::kMadeEmpty;
+
+ case Element::DeviceSpaceType::kRect:
+ SkASSERT(!element->isInverseFilled());
+ if (fWindowRects.count() < fMaxWindowRectangles) {
+ // Clip out the inside of every rect. We won't be able to entirely skip the AA ones,
+ // but it saves processing time.
+ this->addWindowRectangle(element->getDeviceSpaceRect(), element->isAA());
+ if (!element->isAA()) {
+ return ClipResult::kClipped;
+ }
+ }
+ return this->addAnalyticFP(element->getDeviceSpaceRect(), Invert::kYes,
+ GrAA(element->isAA()));
+
+ case Element::DeviceSpaceType::kRRect: {
+ SkASSERT(!element->isInverseFilled());
+ const SkRRect& clipRRect = element->getDeviceSpaceRRect();
+ ClipResult clipResult = this->addAnalyticFP(clipRRect, Invert::kYes,
+ GrAA(element->isAA()));
+ if (fWindowRects.count() >= fMaxWindowRectangles) {
+ return clipResult;
+ }
+
+ // Clip out the interiors of round rects with two window rectangles in the shape of a
+ // "plus". This doesn't let us skip the clip element, but still saves processing time.
+ SkVector insetTL = clipRRect.radii(SkRRect::kUpperLeft_Corner);
+ SkVector insetBR = clipRRect.radii(SkRRect::kLowerRight_Corner);
+ if (SkRRect::kComplex_Type == clipRRect.getType()) {
+ const SkVector& insetTR = clipRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& insetBL = clipRRect.radii(SkRRect::kLowerLeft_Corner);
+ insetTL.fX = SkTMax(insetTL.x(), insetBL.x());
+ insetTL.fY = SkTMax(insetTL.y(), insetTR.y());
+ insetBR.fX = SkTMax(insetBR.x(), insetTR.x());
+ insetBR.fY = SkTMax(insetBR.y(), insetBL.y());
+ }
+ const SkRect& bounds = clipRRect.getBounds();
+ if (insetTL.x() + insetBR.x() >= bounds.width() ||
+ insetTL.y() + insetBR.y() >= bounds.height()) {
+ return clipResult; // The interior "plus" is empty.
+ }
+
+ SkRect horzRect = SkRect::MakeLTRB(bounds.left(), bounds.top() + insetTL.y(),
+ bounds.right(), bounds.bottom() - insetBR.y());
+ this->addWindowRectangle(horzRect, element->isAA());
+
+ if (fWindowRects.count() < fMaxWindowRectangles) {
+ SkRect vertRect = SkRect::MakeLTRB(bounds.left() + insetTL.x(), bounds.top(),
+ bounds.right() - insetBR.x(), bounds.bottom());
+ this->addWindowRectangle(vertRect, element->isAA());
+ }
+
+ return clipResult;
+ }
+
+ case Element::DeviceSpaceType::kPath:
+ return this->addAnalyticFP(element->getDeviceSpacePath(),
+ Invert(!element->isInverseFilled()), GrAA(element->isAA()));
+ }
+
+ SK_ABORT("Unexpected DeviceSpaceType");
+}
+
+inline void GrReducedClip::addWindowRectangle(const SkRect& elementInteriorRect, bool elementIsAA) {
+ SkIRect window;
+ if (!elementIsAA) {
+ elementInteriorRect.round(&window);
+ } else {
+ elementInteriorRect.roundIn(&window);
+ }
+ if (!window.isEmpty()) { // Skip very thin windows that round to zero or negative dimensions.
+ fWindowRects.addWindow(window);
+ }
+}
+
+GrClipEdgeType GrReducedClip::GetClipEdgeType(Invert invert, GrAA aa) {
+ if (Invert::kNo == invert) {
+ return (GrAA::kYes == aa) ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW;
+ } else {
+ return (GrAA::kYes == aa) ? GrClipEdgeType::kInverseFillAA : GrClipEdgeType::kInverseFillBW;
+ }
+}
+
+GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkRect& deviceSpaceRect,
+ Invert invert, GrAA aa) {
+ if (this->numAnalyticFPs() >= fMaxAnalyticFPs) {
+ return ClipResult::kNotClipped;
+ }
+
+ fAnalyticFPs.push_back(GrAARectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRect));
+ SkASSERT(fAnalyticFPs.back());
+
+ return ClipResult::kClipped;
+}
+
+GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkRRect& deviceSpaceRRect,
+ Invert invert, GrAA aa) {
+ if (this->numAnalyticFPs() >= fMaxAnalyticFPs) {
+ return ClipResult::kNotClipped;
+ }
+
+ if (auto fp = GrRRectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRRect,
+ *fCaps->shaderCaps())) {
+ fAnalyticFPs.push_back(std::move(fp));
+ return ClipResult::kClipped;
+ }
+
+ SkPath deviceSpacePath;
+ deviceSpacePath.setIsVolatile(true);
+ deviceSpacePath.addRRect(deviceSpaceRRect);
+ return this->addAnalyticFP(deviceSpacePath, invert, aa);
+}
+
+GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkPath& deviceSpacePath,
+ Invert invert, GrAA aa) {
+ if (this->numAnalyticFPs() >= fMaxAnalyticFPs) {
+ return ClipResult::kNotClipped;
+ }
+
+ if (auto fp = GrConvexPolyEffect::Make(GetClipEdgeType(invert, aa), deviceSpacePath)) {
+ fAnalyticFPs.push_back(std::move(fp));
+ return ClipResult::kClipped;
+ }
+
+ if (fCCPRClipPaths.count() < fMaxCCPRClipPaths && GrAA::kYes == aa) {
+ // Set aside CCPR paths for later. We will create their clip FPs once we know the ID of the
+ // opsTask they will operate in.
+ SkPath& ccprClipPath = fCCPRClipPaths.push_back(deviceSpacePath);
+ if (Invert::kYes == invert) {
+ ccprClipPath.toggleInverseFillType();
+ }
+ return ClipResult::kClipped;
+ }
+
+ return ClipResult::kNotClipped;
+}
+
+void GrReducedClip::makeEmpty() {
+ fHasScissor = false;
+ fAAClipRectGenID = SK_InvalidGenID;
+ fWindowRects.reset();
+ fMaskElements.reset();
+ fInitialState = InitialState::kAllOut;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 8-bit clip mask in alpha
+
+static bool stencil_element(GrRenderTargetContext* rtc,
+ const GrFixedClip& clip,
+ const GrUserStencilSettings* ss,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element) {
+ GrAA aa = GrAA(element->isAA());
+ switch (element->getDeviceSpaceType()) {
+ case SkClipStack::Element::DeviceSpaceType::kEmpty:
+ SkDEBUGFAIL("Should never get here with an empty element.");
+ break;
+ case SkClipStack::Element::DeviceSpaceType::kRect: {
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory((SkRegion::Op)element->getOp(),
+ element->isInverseFilled());
+ rtc->priv().stencilRect(clip, ss, std::move(paint), aa, viewMatrix,
+ element->getDeviceSpaceRect());
+ return true;
+ }
+ default: {
+ SkPath path;
+ element->asDeviceSpacePath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ return rtc->priv().drawAndStencilPath(clip, ss, (SkRegion::Op)element->getOp(),
+ element->isInverseFilled(), aa, viewMatrix, path);
+ }
+ }
+
+ return false;
+}
+
+static void stencil_device_rect(GrRenderTargetContext* rtc,
+ const GrHardClip& clip,
+ const GrUserStencilSettings* ss,
+ GrAA aa,
+ const SkRect& rect) {
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Get());
+ rtc->priv().stencilRect(clip, ss, std::move(paint), aa, SkMatrix::I(), rect);
+}
+
+static void draw_element(GrRenderTargetContext* rtc,
+ const GrClip& clip, // TODO: can this just always be WideOpen?
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element) {
+ // TODO: Draw rrects directly here.
+ switch (element->getDeviceSpaceType()) {
+ case SkClipStack::Element::DeviceSpaceType::kEmpty:
+ SkDEBUGFAIL("Should never get here with an empty element.");
+ break;
+ case SkClipStack::Element::DeviceSpaceType::kRect:
+ rtc->drawRect(clip, std::move(paint), aa, viewMatrix, element->getDeviceSpaceRect());
+ break;
+ default: {
+ SkPath path;
+ element->asDeviceSpacePath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ rtc->drawPath(clip, std::move(paint), aa, viewMatrix, path, GrStyle::SimpleFill());
+ break;
+ }
+ }
+}
+
+bool GrReducedClip::drawAlphaClipMask(GrRenderTargetContext* rtc) const {
+ // The texture may be larger than necessary, this rect represents the part of the texture
+ // we populate with a rasterization of the clip.
+ GrFixedClip clip(SkIRect::MakeWH(fScissor.width(), fScissor.height()));
+
+ if (!fWindowRects.empty()) {
+ clip.setWindowRectangles(fWindowRects.makeOffset(-fScissor.left(), -fScissor.top()),
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ // The scratch texture that we are drawing into can be substantially larger than the mask. Only
+ // clear the part that we care about.
+ SkPMColor4f initialCoverage =
+ InitialState::kAllIn == this->initialState() ? SK_PMColor4fWHITE : SK_PMColor4fTRANSPARENT;
+ rtc->priv().clear(clip, initialCoverage, GrRenderTargetContext::CanClearFullscreen::kYes);
+
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-fScissor.left()), SkIntToScalar(-fScissor.top()));
+
+ // walk through each clip element and perform its set op
+ for (ElementList::Iter iter(fMaskElements); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ SkRegion::Op op = (SkRegion::Op)element->getOp();
+ GrAA aa = GrAA(element->isAA());
+ bool invert = element->isInverseFilled();
+ if (invert || SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) {
+ // draw directly into the result with the stencil set to make the pixels affected
+ // by the clip shape be non-zero.
+ static constexpr GrUserStencilSettings kStencilInElement(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>()
+ );
+ if (!stencil_element(rtc, clip, &kStencilInElement, translate, element)) {
+ return false;
+ }
+
+ // Draw to the exterior pixels (those with a zero stencil value).
+ static constexpr GrUserStencilSettings kDrawOutsideElement(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+ );
+
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory(op, !invert);
+ rtc->priv().stencilRect(clip, &kDrawOutsideElement, std::move(paint), GrAA::kNo,
+ translate, SkRect::Make(fScissor));
+ } else {
+ // all the remaining ops can just be directly draw into the accumulation buffer
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory(op, false);
+
+ draw_element(rtc, clip, std::move(paint), aa, translate, element);
+ }
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 1-bit clip mask in the stencil buffer.
+
+bool GrReducedClip::drawStencilClipMask(GrRecordingContext* context,
+ GrRenderTargetContext* renderTargetContext) const {
+ // We set the current clip to the bounds so that our recursive draws are scissored to them.
+ GrStencilClip stencilClip(fScissor, this->maskGenID());
+
+ if (!fWindowRects.empty()) {
+ stencilClip.fixedClip().setWindowRectangles(fWindowRects,
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ bool initialState = InitialState::kAllIn == this->initialState();
+ renderTargetContext->priv().clearStencilClip(stencilClip.fixedClip(), initialState);
+
+ // walk through each clip element and perform its set op with the existing clip.
+ for (ElementList::Iter iter(fMaskElements); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ // MIXED SAMPLES TODO: We can use stencil with mixed samples as well.
+ bool doStencilMSAA = element->isAA() && renderTargetContext->numSamples() > 1;
+ // Since we are only drawing to the stencil buffer, we can use kMSAA even if the render
+ // target is mixed sampled.
+ auto pathAAType = (doStencilMSAA) ? GrAAType::kMSAA : GrAAType::kNone;
+ bool fillInverted = false;
+
+ // This will be used to determine whether the clip shape can be rendered into the
+ // stencil with arbitrary stencil settings.
+ GrPathRenderer::StencilSupport stencilSupport;
+
+ SkRegion::Op op = (SkRegion::Op)element->getOp();
+
+ GrPathRenderer* pr = nullptr;
+ SkPath clipPath;
+ if (Element::DeviceSpaceType::kRect == element->getDeviceSpaceType()) {
+ stencilSupport = GrPathRenderer::kNoRestriction_StencilSupport;
+ fillInverted = false;
+ } else {
+ element->asDeviceSpacePath(&clipPath);
+ fillInverted = clipPath.isInverseFillType();
+ if (fillInverted) {
+ clipPath.toggleInverseFillType();
+ }
+
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fCaps = context->priv().caps();
+ canDrawArgs.fProxy = renderTargetContext->proxy();
+ canDrawArgs.fClipConservativeBounds = &stencilClip.fixedClip().scissorRect();
+ canDrawArgs.fViewMatrix = &SkMatrix::I();
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAAType = pathAAType;
+ canDrawArgs.fHasUserStencilSettings = false;
+ canDrawArgs.fTargetIsWrappedVkSecondaryCB = renderTargetContext->wrapsVkSecondaryCB();
+
+ GrDrawingManager* dm = context->priv().drawingManager();
+ pr = dm->getPathRenderer(canDrawArgs, false, GrPathRendererChain::DrawType::kStencil,
+ &stencilSupport);
+ if (!pr) {
+ return false;
+ }
+ }
+
+ bool canRenderDirectToStencil =
+ GrPathRenderer::kNoRestriction_StencilSupport == stencilSupport;
+ bool drawDirectToClip; // Given the renderer, the element,
+ // fill rule, and set operation should
+ // we render the element directly to
+ // stencil bit used for clipping.
+ GrUserStencilSettings const* const* stencilPasses =
+ GrStencilSettings::GetClipPasses(op, canRenderDirectToStencil, fillInverted,
+ &drawDirectToClip);
+
+ // draw the element to the client stencil bits if necessary
+ if (!drawDirectToClip) {
+ static constexpr GrUserStencilSettings kDrawToStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kIncMaybeClamp,
+ GrUserStencilOp::kIncMaybeClamp,
+ 0xffff>()
+ );
+ if (Element::DeviceSpaceType::kRect == element->getDeviceSpaceType()) {
+ stencil_device_rect(renderTargetContext, stencilClip.fixedClip(), &kDrawToStencil,
+ GrAA(doStencilMSAA), element->getDeviceSpaceRect());
+ } else {
+ if (!clipPath.isEmpty()) {
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ if (canRenderDirectToStencil) {
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Get());
+
+ GrPathRenderer::DrawPathArgs args{context,
+ std::move(paint),
+ &kDrawToStencil,
+ renderTargetContext,
+ &stencilClip.fixedClip(),
+ &stencilClip.fixedClip().scissorRect(),
+ &SkMatrix::I(),
+ &shape,
+ pathAAType,
+ false};
+ pr->drawPath(args);
+ } else {
+ GrPathRenderer::StencilPathArgs args;
+ args.fContext = context;
+ args.fRenderTargetContext = renderTargetContext;
+ args.fClip = &stencilClip.fixedClip();
+ args.fClipConservativeBounds = &stencilClip.fixedClip().scissorRect();
+ args.fViewMatrix = &SkMatrix::I();
+ args.fDoStencilMSAA = GrAA(doStencilMSAA);
+ args.fShape = &shape;
+ pr->stencilPath(args);
+ }
+ }
+ }
+ }
+
+ // now we modify the clip bit by rendering either the clip
+ // element directly or a bounding rect of the entire clip.
+ for (GrUserStencilSettings const* const* pass = stencilPasses; *pass; ++pass) {
+ if (drawDirectToClip) {
+ if (Element::DeviceSpaceType::kRect == element->getDeviceSpaceType()) {
+ stencil_device_rect(renderTargetContext, stencilClip, *pass,
+ GrAA(doStencilMSAA), element->getDeviceSpaceRect());
+ } else {
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Get());
+ GrPathRenderer::DrawPathArgs args{context,
+ std::move(paint),
+ *pass,
+ renderTargetContext,
+ &stencilClip,
+ &stencilClip.fixedClip().scissorRect(),
+ &SkMatrix::I(),
+ &shape,
+ pathAAType,
+ false};
+ pr->drawPath(args);
+ }
+ } else {
+ // The view matrix is setup to do clip space -> stencil space translation, so
+ // draw rect in clip space.
+ stencil_device_rect(renderTargetContext, stencilClip, *pass, GrAA(doStencilMSAA),
+ SkRect::Make(fScissor));
+ }
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(
+ GrCoverageCountingPathRenderer* ccpr, uint32_t opsTaskID) {
+ // Make sure finishAndDetachAnalyticFPs hasn't been called already.
+ SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); })
+
+ if (!fCCPRClipPaths.empty()) {
+ fAnalyticFPs.reserve(fAnalyticFPs.count() + fCCPRClipPaths.count());
+ for (const SkPath& ccprClipPath : fCCPRClipPaths) {
+ SkASSERT(ccpr);
+ SkASSERT(fHasScissor);
+ auto fp = ccpr->makeClipProcessor(opsTaskID, ccprClipPath, fScissor, *fCaps);
+ fAnalyticFPs.push_back(std::move(fp));
+ }
+ fCCPRClipPaths.reset();
+ }
+
+ return GrFragmentProcessor::RunInSeries(fAnalyticFPs.begin(), fAnalyticFPs.count());
+}
diff --git a/gfx/skia/skia/src/gpu/GrReducedClip.h b/gfx/skia/skia/src/gpu/GrReducedClip.h
new file mode 100644
index 0000000000..ca95bd0994
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrReducedClip.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrReducedClip_DEFINED
+#define GrReducedClip_DEFINED
+
+#include "src/core/SkClipStack.h"
+#include "src/core/SkTLList.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrWindowRectangles.h"
+
+class GrCoverageCountingPathRenderer;
+class GrRecordingContext;
+class GrRenderTargetContext;
+
+/**
+ * This class takes a clip stack and produces a reduced set of elements that are equivalent to
+ * applying that full stack within a specified query rectangle.
+ */
+class GrReducedClip {
+public:
+ using Element = SkClipStack::Element;
+ using ElementList = SkTLList<SkClipStack::Element, 16>;
+
+ GrReducedClip(const SkClipStack&, const SkRect& queryBounds, const GrCaps* caps,
+ int maxWindowRectangles = 0, int maxAnalyticFPs = 0, int maxCCPRClipPaths = 0);
+
+ enum class InitialState : bool {
+ kAllIn,
+ kAllOut
+ };
+
+ InitialState initialState() const { return fInitialState; }
+
+ /**
+ * If hasScissor() is true, the clip mask is not valid outside this rect and the caller must
+ * enforce this scissor during draw.
+ */
+ const SkIRect& scissor() const { SkASSERT(fHasScissor); return fScissor; }
+ int left() const { return this->scissor().left(); }
+ int top() const { return this->scissor().top(); }
+ int width() const { return this->scissor().width(); }
+ int height() const { return this->scissor().height(); }
+
+ /**
+ * Indicates whether scissor() is defined. It will always be defined if the maskElements() are
+ * nonempty.
+ */
+ bool hasScissor() const { return fHasScissor; }
+
+ /**
+ * If nonempty, the clip mask is not valid inside these windows and the caller must clip them
+ * out using the window rectangles GPU extension.
+ */
+ const GrWindowRectangles& windowRectangles() const { return fWindowRects; }
+
+ /**
+ * An ordered list of clip elements that could not be skipped or implemented by other means. If
+ * nonempty, the caller must create an alpha and/or stencil mask for these elements and apply it
+ * during draw.
+ */
+ const ElementList& maskElements() const { return fMaskElements; }
+
+ /**
+ * If maskElements() are nonempty, uniquely identifies the region of the clip mask that falls
+ * inside of scissor().
+ *
+ * NOTE: since clip elements might fall outside the query bounds, different regions of the same
+ * clip stack might have more or less restrictive IDs.
+ *
+ * FIXME: this prevents us from reusing a sub-rect of a perfectly good mask when that rect has
+ * been assigned a less restrictive ID.
+ */
+ uint32_t maskGenID() const { SkASSERT(!fMaskElements.isEmpty()); return fMaskGenID; }
+
+ /**
+ * Indicates whether antialiasing is required to process any of the mask elements.
+ */
+ bool maskRequiresAA() const { SkASSERT(!fMaskElements.isEmpty()); return fMaskRequiresAA; }
+
+ bool drawAlphaClipMask(GrRenderTargetContext*) const;
+ bool drawStencilClipMask(GrRecordingContext*, GrRenderTargetContext*) const;
+
+ int numAnalyticFPs() const { return fAnalyticFPs.count() + fCCPRClipPaths.count(); }
+
+ /**
+ * Called once the client knows the ID of the opsTask that the clip FPs will operate in. This
+ * method finishes any outstanding work that was waiting for the opsTask ID, then detaches and
+ * returns this class's list of FPs that complete the clip.
+ *
+ * NOTE: this must be called AFTER producing the clip mask (if any) because draw calls on
+ * the render target context, surface allocations, and even switching render targets (pre MDB)
+ * may cause flushes or otherwise change which opsTask the actual draw is going into.
+ */
+ std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticFPs(
+ GrCoverageCountingPathRenderer*, uint32_t opsTaskID);
+
+private:
+ void walkStack(const SkClipStack&, const SkRect& queryBounds);
+
+ enum class ClipResult {
+ kNotClipped,
+ kClipped,
+ kMadeEmpty
+ };
+
+ // Intersects the clip with the element's interior, regardless of inverse fill type.
+ // NOTE: do not call for elements followed by ops that can grow the clip.
+ ClipResult clipInsideElement(const Element*);
+
+ // Intersects the clip with the element's exterior, regardless of inverse fill type.
+ // NOTE: do not call for elements followed by ops that can grow the clip.
+ ClipResult clipOutsideElement(const Element*);
+
+ void addWindowRectangle(const SkRect& elementInteriorRect, bool elementIsAA);
+
+ enum class Invert : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ static GrClipEdgeType GetClipEdgeType(Invert, GrAA);
+ ClipResult addAnalyticFP(const SkRect& deviceSpaceRect, Invert, GrAA);
+ ClipResult addAnalyticFP(const SkRRect& deviceSpaceRRect, Invert, GrAA);
+ ClipResult addAnalyticFP(const SkPath& deviceSpacePath, Invert, GrAA);
+
+ void makeEmpty();
+
+ const GrCaps* fCaps;
+ const int fMaxWindowRectangles;
+ const int fMaxAnalyticFPs;
+ const int fMaxCCPRClipPaths;
+
+ InitialState fInitialState;
+ SkIRect fScissor;
+ bool fHasScissor;
+ SkRect fAAClipRect;
+ uint32_t fAAClipRectGenID; // GenID the mask will have if includes the AA clip rect.
+ GrWindowRectangles fWindowRects;
+ ElementList fMaskElements;
+ uint32_t fMaskGenID;
+ bool fMaskRequiresAA;
+ SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fAnalyticFPs;
+ SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opsTask ID for CCPR.
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTarget.cpp b/gfx/skia/skia/src/gpu/GrRenderTarget.cpp
new file mode 100644
index 0000000000..2197d63239
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTarget.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrRenderTarget.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrSamplePatternDictionary.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrStencilSettings.h"
+
+GrRenderTarget::GrRenderTarget(GrGpu* gpu, const SkISize& size, GrPixelConfig config,
+ int sampleCount, GrProtected isProtected,
+ GrStencilAttachment* stencil)
+ : INHERITED(gpu, size, config, isProtected)
+ , fSampleCnt(sampleCount)
+ , fSamplePatternKey(GrSamplePatternDictionary::kInvalidSamplePatternKey)
+ , fStencilAttachment(stencil) {
+}
+
+GrRenderTarget::~GrRenderTarget() = default;
+
+void GrRenderTarget::onRelease() {
+ fStencilAttachment = nullptr;
+
+ INHERITED::onRelease();
+}
+
+void GrRenderTarget::onAbandon() {
+ fStencilAttachment = nullptr;
+
+ INHERITED::onAbandon();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrRenderTargetPriv::attachStencilAttachment(sk_sp<GrStencilAttachment> stencil) {
+#ifdef SK_DEBUG
+ if (1 == fRenderTarget->fSampleCnt) {
+ // TODO: We don't expect a mixed sampled render target to ever change its stencil buffer
+ // right now. But if it does swap in a stencil buffer with a different number of samples,
+ // and if we have a valid fSamplePatternKey, we will need to invalidate fSamplePatternKey
+ // here and add tests to make sure we it properly.
+ SkASSERT(GrSamplePatternDictionary::kInvalidSamplePatternKey ==
+ fRenderTarget->fSamplePatternKey);
+ } else {
+ // Render targets with >1 color sample should never use mixed samples. (This would lead to
+ // different sample patterns, depending on stencil state.)
+ SkASSERT(!stencil || stencil->numSamples() == fRenderTarget->fSampleCnt);
+ }
+#endif
+
+ if (!stencil && !fRenderTarget->fStencilAttachment) {
+ // No need to do any work since we currently don't have a stencil attachment and
+ // we're not actually adding one.
+ return;
+ }
+
+ fRenderTarget->fStencilAttachment = std::move(stencil);
+ if (!fRenderTarget->completeStencilAttachment()) {
+ fRenderTarget->fStencilAttachment = nullptr;
+ }
+}
+
+int GrRenderTargetPriv::numStencilBits() const {
+ SkASSERT(this->getStencilAttachment());
+ return this->getStencilAttachment()->bits();
+}
+
+int GrRenderTargetPriv::getSamplePatternKey() const {
+#ifdef SK_DEBUG
+ GrStencilAttachment* stencil = fRenderTarget->fStencilAttachment.get();
+ if (fRenderTarget->fSampleCnt <= 1) {
+ // If the color buffer is not multisampled, the sample pattern better come from the stencil
+ // buffer (mixed samples).
+ SkASSERT(stencil && stencil->numSamples() > 1);
+ } else {
+ // The color sample count and stencil count cannot both be unequal and both greater than
+ // one. If this were the case, there would be more than one sample pattern associated with
+ // the render target.
+ SkASSERT(!stencil || stencil->numSamples() == fRenderTarget->fSampleCnt);
+ }
+#endif
+ if (GrSamplePatternDictionary::kInvalidSamplePatternKey == fRenderTarget->fSamplePatternKey) {
+ fRenderTarget->fSamplePatternKey =
+ fRenderTarget->getGpu()->findOrAssignSamplePatternKey(fRenderTarget);
+ }
+ SkASSERT(GrSamplePatternDictionary::kInvalidSamplePatternKey
+ != fRenderTarget->fSamplePatternKey);
+ return fRenderTarget->fSamplePatternKey;
+}
diff --git a/gfx/skia/skia/src/gpu/GrRenderTarget.h b/gfx/skia/skia/src/gpu/GrRenderTarget.h
new file mode 100644
index 0000000000..c9e173eec1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTarget.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTarget_DEFINED
+#define GrRenderTarget_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/gpu/GrSurface.h"
+
+class GrCaps;
+class GrRenderTargetPriv;
+class GrStencilAttachment;
+class GrBackendRenderTarget;
+
+/**
+ * GrRenderTarget represents a 2D buffer of pixels that can be rendered to.
+ * A context's render target is set by setRenderTarget(). Render targets are
+ * created by a createTexture with the kRenderTarget_SurfaceFlag flag.
+ * Additionally, GrContext provides methods for creating GrRenderTargets
+ * that wrap externally created render targets.
+ */
+class GrRenderTarget : virtual public GrSurface {
+public:
+ // Make manual MSAA resolve publicly accessible from GrRenderTarget.
+ using GrSurface::setRequiresManualMSAAResolve;
+ using GrSurface::requiresManualMSAAResolve;
+
+ virtual bool alwaysClearStencil() const { return false; }
+
+ // GrSurface overrides
+ GrRenderTarget* asRenderTarget() override { return this; }
+ const GrRenderTarget* asRenderTarget() const override { return this; }
+
+ /**
+ * Returns the number of samples/pixel in the color buffer (One if non-MSAA).
+ */
+ int numSamples() const { return fSampleCnt; }
+
+ virtual GrBackendRenderTarget getBackendRenderTarget() const = 0;
+
+ // Checked when this object is asked to attach a stencil buffer.
+ virtual bool canAttemptStencilAttachment() const = 0;
+
+ // Provides access to functions that aren't part of the public API.
+ GrRenderTargetPriv renderTargetPriv();
+ const GrRenderTargetPriv renderTargetPriv() const;
+
+protected:
+ GrRenderTarget(GrGpu*, const SkISize&, GrPixelConfig, int sampleCount, GrProtected,
+ GrStencilAttachment* = nullptr);
+ ~GrRenderTarget() override;
+
+ // override of GrResource
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ // Allows the backends to perform any additional work that is required for attaching a
+ // GrStencilAttachment. When this is called, the GrStencilAttachment has already been put onto
+ // the GrRenderTarget. This function must return false if any failures occur when completing the
+ // stencil attachment.
+ virtual bool completeStencilAttachment() = 0;
+
+ friend class GrRenderTargetPriv;
+
+ int fSampleCnt;
+ int fSamplePatternKey;
+ sk_sp<GrStencilAttachment> fStencilAttachment;
+
+ typedef GrSurface INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetContext.cpp b/gfx/skia/skia/src/gpu/GrRenderTargetContext.cpp
new file mode 100644
index 0000000000..517a34cf7c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetContext.cpp
@@ -0,0 +1,2466 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTargetContext.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkShadowFlags.h"
+#include "include/utils/SkShadowUtils.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrBlurUtils.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClientMappedBufferManager.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/GrRRectEffect.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/geometry/GrQuadUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrAtlasTextOp.h"
+#include "src/gpu/ops/GrClearOp.h"
+#include "src/gpu/ops/GrClearStencilClipOp.h"
+#include "src/gpu/ops/GrDebugMarkerOp.h"
+#include "src/gpu/ops/GrDrawAtlasOp.h"
+#include "src/gpu/ops/GrDrawOp.h"
+#include "src/gpu/ops/GrDrawVerticesOp.h"
+#include "src/gpu/ops/GrDrawableOp.h"
+#include "src/gpu/ops/GrFillRRectOp.h"
+#include "src/gpu/ops/GrFillRectOp.h"
+#include "src/gpu/ops/GrLatticeOp.h"
+#include "src/gpu/ops/GrOp.h"
+#include "src/gpu/ops/GrOvalOpFactory.h"
+#include "src/gpu/ops/GrRegionOp.h"
+#include "src/gpu/ops/GrShadowRRectOp.h"
+#include "src/gpu/ops/GrStencilPathOp.h"
+#include "src/gpu/ops/GrStrokeRectOp.h"
+#include "src/gpu/ops/GrTextureOp.h"
+#include "src/gpu/text/GrTextContext.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+class GrRenderTargetContext::TextTarget : public GrTextTarget {
+public:
+ TextTarget(GrRenderTargetContext* renderTargetContext)
+ : GrTextTarget(renderTargetContext->width(), renderTargetContext->height(),
+ renderTargetContext->colorInfo())
+ , fRenderTargetContext(renderTargetContext)
+ , fGlyphPainter{*renderTargetContext} {}
+
+ void addDrawOp(const GrClip& clip, std::unique_ptr<GrAtlasTextOp> op) override {
+ fRenderTargetContext->addDrawOp(clip, std::move(op));
+ }
+
+ void drawShape(const GrClip& clip, const SkPaint& paint,
+ const SkMatrix& viewMatrix, const GrShape& shape) override {
+ GrBlurUtils::drawShapeWithMaskFilter(fRenderTargetContext->fContext, fRenderTargetContext,
+ clip, paint, viewMatrix, shape);
+ }
+
+ void makeGrPaint(GrMaskFormat maskFormat, const SkPaint& skPaint, const SkMatrix& viewMatrix,
+ GrPaint* grPaint) override {
+ auto context = fRenderTargetContext->fContext;
+ const GrColorInfo& colorInfo = fRenderTargetContext->colorInfo();
+ if (kARGB_GrMaskFormat == maskFormat) {
+ SkPaintToGrPaintWithPrimitiveColor(context, colorInfo, skPaint, grPaint);
+ } else {
+ SkPaintToGrPaint(context, colorInfo, skPaint, viewMatrix, grPaint);
+ }
+ }
+
+ GrRecordingContext* getContext() override {
+ return fRenderTargetContext->fContext;
+ }
+
+ SkGlyphRunListPainter* glyphPainter() override {
+ return &fGlyphPainter;
+ }
+
+private:
+ GrRenderTargetContext* fRenderTargetContext;
+ SkGlyphRunListPainter fGlyphPainter;
+
+};
+
+#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this->drawingManager()->getContext())
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+#define ASSERT_SINGLE_OWNER_PRIV \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fRenderTargetContext->singleOwner());)
+#define RETURN_IF_ABANDONED if (fContext->priv().abandoned()) { return; }
+#define RETURN_IF_ABANDONED_PRIV if (fRenderTargetContext->fContext->priv().abandoned()) { return; }
+#define RETURN_FALSE_IF_ABANDONED if (fContext->priv().abandoned()) { return false; }
+#define RETURN_FALSE_IF_ABANDONED_PRIV if (fRenderTargetContext->fContext->priv().abandoned()) { return false; }
+#define RETURN_NULL_IF_ABANDONED if (fContext->priv().abandoned()) { return nullptr; }
+
+//////////////////////////////////////////////////////////////////////////////
+
+class AutoCheckFlush {
+public:
+ AutoCheckFlush(GrDrawingManager* drawingManager) : fDrawingManager(drawingManager) {
+ SkASSERT(fDrawingManager);
+ }
+ ~AutoCheckFlush() { fDrawingManager->flushIfNecessary(); }
+
+private:
+ GrDrawingManager* fDrawingManager;
+};
+
+// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
+// GrOpsTask to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpsTask).
+GrRenderTargetContext::GrRenderTargetContext(GrRecordingContext* context,
+ sk_sp<GrRenderTargetProxy> rtp,
+ GrColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ bool managedOpsTask)
+ : GrSurfaceContext(context, colorType, kPremul_SkAlphaType, std::move(colorSpace))
+ , fRenderTargetProxy(std::move(rtp))
+ , fOpsTask(sk_ref_sp(fRenderTargetProxy->getLastOpsTask()))
+ , fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
+ , fManagedOpsTask(managedOpsTask) {
+ fTextTarget.reset(new TextTarget(this));
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+void GrRenderTargetContext::validate() const {
+ SkASSERT(fRenderTargetProxy);
+ fRenderTargetProxy->validate(fContext);
+
+ SkASSERT(fContext->priv().caps()->areColorTypeAndFormatCompatible(
+ this->colorInfo().colorType(), fRenderTargetProxy->backendFormat()));
+
+ if (fOpsTask && !fOpsTask->isClosed()) {
+ SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpsTask.get());
+ }
+}
+#endif
+
+GrRenderTargetContext::~GrRenderTargetContext() {
+ ASSERT_SINGLE_OWNER
+}
+
+inline GrAAType GrRenderTargetContext::chooseAAType(GrAA aa) {
+ if (GrAA::kNo == aa) {
+ // On some devices we cannot disable MSAA if it is enabled so we make the AA type reflect
+ // that.
+ if (this->numSamples() > 1 && !this->caps()->multisampleDisableSupport()) {
+ return GrAAType::kMSAA;
+ }
+ return GrAAType::kNone;
+ }
+ return (this->numSamples() > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
+}
+
+GrTextureProxy* GrRenderTargetContext::asTextureProxy() {
+ return fRenderTargetProxy->asTextureProxy();
+}
+
+const GrTextureProxy* GrRenderTargetContext::asTextureProxy() const {
+ return fRenderTargetProxy->asTextureProxy();
+}
+
+sk_sp<GrTextureProxy> GrRenderTargetContext::asTextureProxyRef() {
+ return sk_ref_sp(fRenderTargetProxy->asTextureProxy());
+}
+
+GrMipMapped GrRenderTargetContext::mipMapped() const {
+ if (const GrTextureProxy* proxy = this->asTextureProxy()) {
+ return proxy->mipMapped();
+ }
+ return GrMipMapped::kNo;
+}
+
+GrOpsTask* GrRenderTargetContext::getOpsTask() {
+ ASSERT_SINGLE_OWNER
+ SkDEBUGCODE(this->validate();)
+
+ if (!fOpsTask || fOpsTask->isClosed()) {
+ sk_sp<GrOpsTask> newOpsTask =
+ this->drawingManager()->newOpsTask(fRenderTargetProxy, fManagedOpsTask);
+ if (fOpsTask && fNumStencilSamples > 0) {
+ // Store the stencil values in memory upon completion of fOpsTask.
+ fOpsTask->setMustPreserveStencil();
+ // Reload the stencil buffer content at the beginning of newOpsTask.
+ // FIXME: Could the topo sort insert a task between these two that modifies the stencil
+ // values?
+ newOpsTask->setInitialStencilContent(GrOpsTask::StencilContent::kPreserved);
+ }
+ fOpsTask = std::move(newOpsTask);
+ }
+
+ return fOpsTask.get();
+}
+
+void GrRenderTargetContext::drawGlyphRunList(
+ const GrClip& clip, const SkMatrix& viewMatrix,
+ const SkGlyphRunList& blob) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawGlyphRunList", fContext);
+
+ // Drawing text can cause us to do inline uploads. This is not supported for wrapped vulkan
+ // secondary command buffers because it would require stopping and starting a render pass which
+ // we don't have access to.
+ if (this->wrapsVkSecondaryCB()) {
+ return;
+ }
+
+ GrTextContext* atlasTextContext = this->drawingManager()->getTextContext();
+ atlasTextContext->drawGlyphRunList(fContext, fTextTarget.get(), clip, viewMatrix,
+ fSurfaceProps, blob);
+}
+
+void GrRenderTargetContext::discard() {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "discard", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ this->getOpsTask()->discard();
+}
+
+void GrRenderTargetContext::clear(const SkIRect* rect,
+ const SkPMColor4f& color,
+ CanClearFullscreen canClearFullscreen) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "clear", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+ this->internalClear(rect ? GrFixedClip(*rect) : GrFixedClip::Disabled(), color,
+ canClearFullscreen);
+}
+
+void GrRenderTargetContextPriv::clear(const GrFixedClip& clip,
+ const SkPMColor4f& color,
+ CanClearFullscreen canClearFullscreen) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fRenderTargetContext->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContextPriv", "clear",
+ fRenderTargetContext->fContext);
+
+ AutoCheckFlush acf(fRenderTargetContext->drawingManager());
+ fRenderTargetContext->internalClear(clip, color, canClearFullscreen);
+}
+
+static void clear_to_grpaint(const SkPMColor4f& color, GrPaint* paint) {
+ paint->setColor4f(color);
+ if (color.isOpaque()) {
+ // Can just rely on the src-over blend mode to do the right thing
+ paint->setPorterDuffXPFactory(SkBlendMode::kSrcOver);
+ } else {
+ // A clear overwrites the prior color, so even if it's transparent, it behaves as if it
+ // were src blended
+ paint->setPorterDuffXPFactory(SkBlendMode::kSrc);
+ }
+}
+
+void GrRenderTargetContext::internalClear(const GrFixedClip& clip,
+ const SkPMColor4f& color,
+ CanClearFullscreen canClearFullscreen) {
+ bool isFull = false;
+ if (!clip.hasWindowRectangles()) {
+ // TODO: wrt the shouldInitializeTextures path, it would be more performant to
+ // only clear the entire target if we knew it had not been cleared before. As
+ // is this could end up doing a lot of redundant clears.
+ isFull = !clip.scissorEnabled() ||
+ (CanClearFullscreen::kYes == canClearFullscreen &&
+ (this->caps()->preferFullscreenClears() || this->caps()->shouldInitializeTextures())) ||
+ clip.scissorRect().contains(SkIRect::MakeWH(this->width(), this->height()));
+ }
+
+ if (isFull) {
+ GrOpsTask* opsTask = this->getOpsTask();
+ if (opsTask->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
+ !this->caps()->performColorClearsAsDraws()) {
+ // The op list was emptied and native clears are allowed, so just use the load op
+ opsTask->setColorLoadOp(GrLoadOp::kClear, color);
+ return;
+ } else {
+ // Will use an op for the clear, reset the load op to discard since the op will
+ // blow away the color buffer contents
+ opsTask->setColorLoadOp(GrLoadOp::kDiscard);
+ }
+
+ // Must add an op to the list (either because we couldn't use a load op, or because the
+ // clear load op isn't supported)
+ if (this->caps()->performColorClearsAsDraws()) {
+ SkRect rtRect = SkRect::MakeWH(this->width(), this->height());
+ GrPaint paint;
+ clear_to_grpaint(color, &paint);
+ this->addDrawOp(GrFixedClip::Disabled(),
+ GrFillRectOp::MakeNonAARect(fContext, std::move(paint), SkMatrix::I(),
+ rtRect));
+ } else {
+ this->addOp(GrClearOp::Make(
+ fContext, SkIRect::MakeEmpty(), color, /* fullscreen */ true));
+ }
+ } else {
+ if (this->caps()->performPartialClearsAsDraws()) {
+ // performPartialClearsAsDraws() also returns true if any clear has to be a draw.
+ GrPaint paint;
+ clear_to_grpaint(color, &paint);
+
+ this->addDrawOp(clip,
+ GrFillRectOp::MakeNonAARect(fContext, std::move(paint), SkMatrix::I(),
+ SkRect::Make(clip.scissorRect())));
+ } else {
+ std::unique_ptr<GrOp> op(GrClearOp::Make(fContext, clip, color,
+ this->asSurfaceProxy()));
+ // This version of the clear op factory can return null if the clip doesn't intersect
+ // with the surface proxy's boundary
+ if (!op) {
+ return;
+ }
+ this->addOp(std::move(op));
+ }
+ }
+}
+
+void GrRenderTargetContext::drawPaint(const GrClip& clip,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix) {
+ // Start with the render target, since that is the maximum content we could possibly fill.
+ // drawFilledQuad() will automatically restrict it to clip bounds for us if possible.
+ SkRect r = fRenderTargetProxy->getBoundsRect();
+ if (!paint.numTotalFragmentProcessors()) {
+ // The paint is trivial so we won't need to use local coordinates, so skip calculating the
+ // inverse view matrix.
+ this->fillRectToRect(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), r, r);
+ } else {
+ // Use the inverse view matrix to arrive at appropriate local coordinates for the paint.
+ SkMatrix localMatrix;
+ if (!viewMatrix.invert(&localMatrix)) {
+ return;
+ }
+ this->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), r,
+ localMatrix);
+ }
+}
+
+enum class GrRenderTargetContext::QuadOptimization {
+ // The rect to draw doesn't intersect clip or render target, so no draw op should be added
+ kDiscarded,
+ // The rect to draw was converted to some other op and appended to the oplist, so no additional
+ // op is necessary. Currently this can convert it to a clear op or a rrect op. Only valid if
+ // a constColor is provided.
+ kSubmitted,
+ // The clip was folded into the device quad, with updated edge flags and local coords, and
+ // caller is responsible for adding an appropriate op.
+ kClipApplied,
+ // No change to clip, but quad updated to better fit clip/render target, and caller is
+ // responsible for adding an appropriate op.
+ kCropped
+};
+
+static bool make_vertex_finite(float* value) {
+ if (SkScalarIsNaN(*value)) {
+ return false;
+ }
+
+ if (!SkScalarIsFinite(*value)) {
+ // +/- infinity at this point. Don't use exactly SK_ScalarMax so that we have some precision
+ // left when calculating crops.
+ static constexpr float kNearInfinity = SK_ScalarMax / 4.f;
+ *value = *value < 0.f ? -kNearInfinity : kNearInfinity;
+ }
+
+ return true;
+}
+
+GrRenderTargetContext::QuadOptimization GrRenderTargetContext::attemptQuadOptimization(
+ const GrClip& clip, const SkPMColor4f* constColor,
+ const GrUserStencilSettings* stencilSettings, GrAA* aa, GrQuadAAFlags* edgeFlags,
+ GrQuad* deviceQuad, GrQuad* localQuad) {
+ // Optimization requirements:
+ // 1. kDiscard applies when clip bounds and quad bounds do not intersect
+ // 2. kClear applies when constColor and final geom is pixel aligned rect;
+ // pixel aligned rect requires rect clip and (rect quad or quad covers clip)
+ // 3. kRRect applies when constColor and rrect clip and quad covers clip
+ // 4. kExplicitClip applies when rect clip and (rect quad or quad covers clip)
+ // 5. kCropped applies when rect quad (currently)
+ // 6. kNone always applies
+ GrQuadAAFlags newFlags = *edgeFlags;
+
+ SkRect rtRect;
+ if (stencilSettings) {
+ // Must use worst case bounds so that stencil buffer updates on approximately sized render
+ // targets don't get corrupted.
+ rtRect = SkRect::MakeWH(fRenderTargetProxy->worstCaseWidth(),
+ fRenderTargetProxy->worstCaseHeight());
+ } else {
+ // Use the logical size of the render target, which allows for "fullscreen" clears even if
+ // the render target has an approximate backing fit
+ rtRect = SkRect::MakeWH(this->width(), this->height());
+ }
+
+ SkRect drawBounds = deviceQuad->bounds();
+ if (constColor) {
+ // Don't bother updating local coordinates when the paint will ignore them anyways
+ localQuad = nullptr;
+ // If the device quad is not finite, coerce into a finite quad. This is acceptable since it
+ // will be cropped to the finite 'clip' or render target and there is no local space mapping
+ if (!deviceQuad->isFinite()) {
+ for (int i = 0; i < 4; ++i) {
+ if (!make_vertex_finite(deviceQuad->xs() + i) ||
+ !make_vertex_finite(deviceQuad->ys() + i) ||
+ !make_vertex_finite(deviceQuad->ws() + i)) {
+ // Discard if we see a nan
+ return QuadOptimization::kDiscarded;
+ }
+ }
+ SkASSERT(deviceQuad->isFinite());
+ }
+ } else {
+ // CropToRect requires the quads to be finite. If they are not finite and we have local
+ // coordinates, the mapping from local space to device space is poorly defined so drop it
+ if (!deviceQuad->isFinite()) {
+ return QuadOptimization::kDiscarded;
+ }
+ }
+
+ // If the quad is entirely off screen, it doesn't matter what the clip does
+ if (!rtRect.intersects(drawBounds)) {
+ return QuadOptimization::kDiscarded;
+ }
+
+ // Check if clip can be represented as a rounded rect (initialize as if clip fully contained
+ // the render target).
+ SkRRect clipRRect = SkRRect::MakeRect(rtRect);
+ // We initialize clipAA to *aa when there are stencil settings so that we don't artificially
+ // encounter mixed-aa edges (not allowed for stencil), but we want to start as non-AA for
+ // regular draws so that if we fully cover the render target, that can stop being anti-aliased.
+ GrAA clipAA = stencilSettings ? *aa : GrAA::kNo;
+ bool axisAlignedClip = true;
+ if (!clip.quickContains(rtRect)) {
+ if (!clip.isRRect(rtRect, &clipRRect, &clipAA)) {
+ axisAlignedClip = false;
+ }
+ }
+
+ // If the clip rrect is valid (i.e. axis-aligned), we can potentially combine it with the
+ // draw geometry so that no clip is needed when drawing.
+ if (axisAlignedClip && (!stencilSettings || clipAA == *aa)) {
+ // Tighten clip bounds (if clipRRect.isRect() is true, clipBounds now holds the intersection
+ // of the render target and the clip rect)
+ SkRect clipBounds = rtRect;
+ if (!clipBounds.intersect(clipRRect.rect()) || !clipBounds.intersects(drawBounds)) {
+ return QuadOptimization::kDiscarded;
+ }
+
+ if (clipRRect.isRect()) {
+ // No rounded corners, so the kClear and kExplicitClip optimizations are possible
+ if (GrQuadUtils::CropToRect(clipBounds, clipAA, &newFlags, deviceQuad, localQuad)) {
+ if (constColor && deviceQuad->quadType() == GrQuad::Type::kAxisAligned) {
+ // Clear optimization is possible
+ drawBounds = deviceQuad->bounds();
+ if (drawBounds.contains(rtRect)) {
+ // Fullscreen clear
+ this->clear(nullptr, *constColor, CanClearFullscreen::kYes);
+ return QuadOptimization::kSubmitted;
+ } else if (GrClip::IsPixelAligned(drawBounds) &&
+ drawBounds.width() > 256 && drawBounds.height() > 256) {
+ // Scissor + clear (round shouldn't do anything since we are pixel aligned)
+ SkIRect scissorRect;
+ drawBounds.round(&scissorRect);
+ this->clear(&scissorRect, *constColor, CanClearFullscreen::kNo);
+ return QuadOptimization::kSubmitted;
+ }
+ }
+
+ // Update overall AA setting.
+ *edgeFlags = newFlags;
+ if (*aa == GrAA::kNo && clipAA == GrAA::kYes &&
+ newFlags != GrQuadAAFlags::kNone) {
+ // The clip was anti-aliased and now the draw needs to be upgraded to AA to
+ // properly reflect the smooth edge of the clip.
+ *aa = GrAA::kYes;
+ }
+ // We intentionally do not downgrade AA here because we don't know if we need to
+ // preserve MSAA (see GrQuadAAFlags docs). But later in the pipeline, the ops can
+ // use GrResolveAATypeForQuad() to turn off coverage AA when all flags are off.
+
+ // deviceQuad is exactly the intersection of original quad and clip, so it can be
+ // drawn with no clip (submitted by caller)
+ return QuadOptimization::kClipApplied;
+ } else {
+ // The quads have been updated to better fit the clip bounds, but can't get rid of
+ // the clip entirely
+ return QuadOptimization::kCropped;
+ }
+ } else if (constColor) {
+ // Rounded corners and constant filled color (limit ourselves to solid colors because
+ // there is no way to use custom local coordinates with drawRRect).
+ if (GrQuadUtils::CropToRect(clipBounds, clipAA, &newFlags, deviceQuad, localQuad) &&
+ deviceQuad->quadType() == GrQuad::Type::kAxisAligned &&
+ deviceQuad->bounds().contains(clipBounds)) {
+ // Since the cropped quad became a rectangle which covered the bounds of the rrect,
+ // we can draw the rrect directly and ignore the edge flags
+ GrPaint paint;
+ clear_to_grpaint(*constColor, &paint);
+ this->drawRRect(GrFixedClip::Disabled(), std::move(paint), clipAA, SkMatrix::I(),
+ clipRRect, GrStyle::SimpleFill());
+ return QuadOptimization::kSubmitted;
+ } else {
+ // The quad has been updated to better fit clip bounds, but can't remove the clip
+ return QuadOptimization::kCropped;
+ }
+ }
+ }
+
+ // Crop the quad to the conservative bounds of the clip.
+ SkIRect clipDevBounds;
+ clip.getConservativeBounds(rtRect.width(), rtRect.height(), &clipDevBounds);
+ SkRect clipBounds = SkRect::Make(clipDevBounds);
+
+ // One final check for discarding, since we may have gone here directly due to a complex clip
+ if (!clipBounds.intersects(drawBounds)) {
+ return QuadOptimization::kDiscarded;
+ }
+
+ // Even if this were to return true, the crop rect does not exactly match the clip, so can not
+ // report explicit-clip. Since these edges aren't visible, don't update the final edge flags.
+ GrQuadUtils::CropToRect(clipBounds, clipAA, &newFlags, deviceQuad, localQuad);
+
+ return QuadOptimization::kCropped;
+}
+
+void GrRenderTargetContext::drawFilledQuad(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ GrQuadAAFlags edgeFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const GrUserStencilSettings* ss) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawFilledQuad", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ SkPMColor4f* constColor = nullptr;
+ SkPMColor4f paintColor;
+ if (!ss && !paint.numCoverageFragmentProcessors() &&
+ paint.isConstantBlendedColor(&paintColor)) {
+ // Only consider clears/rrects when it's easy to guarantee 100% fill with single color
+ constColor = &paintColor;
+ }
+
+ GrQuad croppedDeviceQuad = deviceQuad;
+ GrQuad croppedLocalQuad = localQuad;
+ QuadOptimization opt = this->attemptQuadOptimization(clip, constColor, ss, &aa, &edgeFlags,
+ &croppedDeviceQuad, &croppedLocalQuad);
+ if (opt >= QuadOptimization::kClipApplied) {
+ // These optimizations require caller to add an op themselves
+ const GrClip& finalClip = opt == QuadOptimization::kClipApplied ? GrFixedClip::Disabled()
+ : clip;
+ GrAAType aaType = ss ? (aa == GrAA::kYes ? GrAAType::kMSAA : GrAAType::kNone)
+ : this->chooseAAType(aa);
+ this->addDrawOp(finalClip, GrFillRectOp::Make(fContext, std::move(paint), aaType, edgeFlags,
+ croppedDeviceQuad, croppedLocalQuad, ss));
+ }
+ // All other optimization levels were completely handled inside attempt(), so no extra op needed
+}
+
+void GrRenderTargetContext::drawTexturedQuad(const GrClip& clip,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> textureXform,
+ GrSamplerState::Filter filter,
+ const SkPMColor4f& color,
+ SkBlendMode blendMode,
+ GrAA aa,
+ GrQuadAAFlags edgeFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const SkRect* domain) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ SkASSERT(proxy);
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawTexturedQuad", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ // Functionally this is very similar to drawFilledQuad except that there's no constColor to
+ // enable the kSubmitted optimizations, no stencil settings support, and its a GrTextureOp.
+ GrQuad croppedDeviceQuad = deviceQuad;
+ GrQuad croppedLocalQuad = localQuad;
+ QuadOptimization opt = this->attemptQuadOptimization(clip, nullptr, nullptr, &aa, &edgeFlags,
+ &croppedDeviceQuad, &croppedLocalQuad);
+
+ SkASSERT(opt != QuadOptimization::kSubmitted);
+ if (opt != QuadOptimization::kDiscarded) {
+ // And the texture op if not discarded
+ const GrClip& finalClip = opt == QuadOptimization::kClipApplied ? GrFixedClip::Disabled()
+ : clip;
+ GrAAType aaType = this->chooseAAType(aa);
+ auto clampType = GrColorTypeClampType(this->colorInfo().colorType());
+ auto saturate = clampType == GrClampType::kManual ? GrTextureOp::Saturate::kYes
+ : GrTextureOp::Saturate::kNo;
+ // Use the provided domain, although hypothetically we could detect that the cropped local
+ // quad is sufficiently inside the domain and the constraint could be dropped.
+ this->addDrawOp(finalClip,
+ GrTextureOp::Make(fContext, std::move(proxy), srcColorType,
+ std::move(textureXform), filter, color, saturate,
+ blendMode, aaType, edgeFlags, croppedDeviceQuad,
+ croppedLocalQuad, domain));
+ }
+}
+
+void GrRenderTargetContext::drawRect(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const GrStyle* style) {
+ if (!style) {
+ style = &GrStyle::SimpleFill();
+ }
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawRect", fContext);
+
+ // Path effects should've been devolved to a path in SkGpuDevice
+ SkASSERT(!style->pathEffect());
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ const SkStrokeRec& stroke = style->strokeRec();
+ if (stroke.getStyle() == SkStrokeRec::kFill_Style) {
+ // Fills the rect, using rect as its own local coordinates
+ this->fillRectToRect(clip, std::move(paint), aa, viewMatrix, rect, rect);
+ return;
+ } else if (stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kHairline_Style) {
+ if ((!rect.width() || !rect.height()) &&
+ SkStrokeRec::kHairline_Style != stroke.getStyle()) {
+ SkScalar r = stroke.getWidth() / 2;
+ // TODO: Move these stroke->fill fallbacks to GrShape?
+ switch (stroke.getJoin()) {
+ case SkPaint::kMiter_Join:
+ this->drawRect(
+ clip, std::move(paint), aa, viewMatrix,
+ {rect.fLeft - r, rect.fTop - r, rect.fRight + r, rect.fBottom + r},
+ &GrStyle::SimpleFill());
+ return;
+ case SkPaint::kRound_Join:
+ // Raster draws nothing when both dimensions are empty.
+ if (rect.width() || rect.height()){
+ SkRRect rrect = SkRRect::MakeRectXY(rect.makeOutset(r, r), r, r);
+ this->drawRRect(clip, std::move(paint), aa, viewMatrix, rrect,
+ GrStyle::SimpleFill());
+ return;
+ }
+ case SkPaint::kBevel_Join:
+ if (!rect.width()) {
+ this->drawRect(clip, std::move(paint), aa, viewMatrix,
+ {rect.fLeft - r, rect.fTop, rect.fRight + r, rect.fBottom},
+ &GrStyle::SimpleFill());
+ } else {
+ this->drawRect(clip, std::move(paint), aa, viewMatrix,
+ {rect.fLeft, rect.fTop - r, rect.fRight, rect.fBottom + r},
+ &GrStyle::SimpleFill());
+ }
+ return;
+ }
+ }
+
+ std::unique_ptr<GrDrawOp> op;
+
+ GrAAType aaType = this->chooseAAType(aa);
+ op = GrStrokeRectOp::Make(fContext, std::move(paint), aaType, viewMatrix, rect, stroke);
+ // op may be null if the stroke is not supported or if using coverage aa and the view matrix
+ // does not preserve rectangles.
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ return;
+ }
+ }
+ assert_alive(paint);
+ this->drawShapeUsingPathRenderer(clip, std::move(paint), aa, viewMatrix, GrShape(rect, *style));
+}
+
+void GrRenderTargetContext::drawQuadSet(const GrClip& clip, GrPaint&& paint, GrAA aa,
+ const SkMatrix& viewMatrix, const QuadSetEntry quads[],
+ int cnt) {
+ GrAAType aaType = this->chooseAAType(aa);
+ this->addDrawOp(clip, GrFillRectOp::MakeSet(fContext, std::move(paint), aaType, viewMatrix,
+ quads, cnt));
+}
+
+int GrRenderTargetContextPriv::maxWindowRectangles() const {
+ return fRenderTargetContext->fRenderTargetProxy->maxWindowRectangles(
+ *fRenderTargetContext->caps());
+}
+
+GrOpsTask::CanDiscardPreviousOps GrRenderTargetContext::canDiscardPreviousOpsOnFullClear(
+ ) const {
+#if GR_TEST_UTILS
+ if (fPreserveOpsOnFullClear_TestingOnly) {
+ return GrOpsTask::CanDiscardPreviousOps::kNo;
+ }
+#endif
+ // Regardless of how the clear is implemented (native clear or a fullscreen quad), all prior ops
+ // would normally be overwritten. The one exception is if the render target context is marked as
+ // needing a stencil buffer then there may be a prior op that writes to the stencil buffer.
+ // Although the clear will ignore the stencil buffer, following draw ops may not so we can't get
+ // rid of all the preceding ops. Beware! If we ever add any ops that have a side effect beyond
+ // modifying the stencil buffer we will need a more elaborate tracking system (skbug.com/7002).
+ return GrOpsTask::CanDiscardPreviousOps(!fNumStencilSamples);
+}
+
+void GrRenderTargetContext::setNeedsStencil(bool useMixedSamplesIfNotMSAA) {
+ // Don't clear stencil until after we've changed fNumStencilSamples. This ensures we don't loop
+ // forever in the event that there are driver bugs and we need to clear as a draw.
+ bool hasInitializedStencil = fNumStencilSamples > 0;
+
+ int numRequiredSamples = this->numSamples();
+ if (useMixedSamplesIfNotMSAA && 1 == numRequiredSamples) {
+ SkASSERT(fRenderTargetProxy->canUseMixedSamples(*this->caps()));
+ numRequiredSamples = this->caps()->internalMultisampleCount(
+ this->asSurfaceProxy()->backendFormat());
+ }
+ SkASSERT(numRequiredSamples > 0);
+
+ if (numRequiredSamples > fNumStencilSamples) {
+ fNumStencilSamples = numRequiredSamples;
+ fRenderTargetProxy->setNeedsStencil(fNumStencilSamples);
+ }
+
+ if (!hasInitializedStencil) {
+ if (this->caps()->performStencilClearsAsDraws()) {
+ // There is a driver bug with clearing stencil. We must use an op to manually clear the
+ // stencil buffer before the op that required 'setNeedsStencil'.
+ this->internalStencilClear(GrFixedClip::Disabled(), /* inside mask */ false);
+ } else {
+ this->getOpsTask()->setInitialStencilContent(
+ GrOpsTask::StencilContent::kUserBitsCleared);
+ }
+ }
+}
+
+void GrRenderTargetContextPriv::clearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fRenderTargetContext->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContextPriv", "clearStencilClip",
+ fRenderTargetContext->fContext);
+
+ AutoCheckFlush acf(fRenderTargetContext->drawingManager());
+
+ fRenderTargetContext->internalStencilClear(clip, insideStencilMask);
+}
+
+void GrRenderTargetContext::internalStencilClear(const GrFixedClip& clip, bool insideStencilMask) {
+ this->setNeedsStencil(/* useMixedSamplesIfNotMSAA = */ false);
+
+ if (this->caps()->performStencilClearsAsDraws()) {
+ const GrUserStencilSettings* ss = GrStencilSettings::SetClipBitSettings(insideStencilMask);
+ SkRect rtRect = SkRect::MakeWH(this->width(), this->height());
+
+ // Configure the paint to have no impact on the color buffer
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Get());
+ this->addDrawOp(clip, GrFillRectOp::MakeNonAARect(fContext, std::move(paint), SkMatrix::I(),
+ rtRect, ss));
+ } else {
+ std::unique_ptr<GrOp> op(GrClearStencilClipOp::Make(fContext, clip, insideStencilMask,
+ fRenderTargetProxy.get()));
+ if (!op) {
+ return;
+ }
+ this->addOp(std::move(op));
+ }
+}
+
+void GrRenderTargetContextPriv::stencilPath(const GrHardClip& clip,
+ GrAA doStencilMSAA,
+ const SkMatrix& viewMatrix,
+ sk_sp<const GrPath> path) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fRenderTargetContext->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContextPriv", "stencilPath",
+ fRenderTargetContext->fContext);
+
+ // TODO: extract portions of checkDraw that are relevant to path stenciling.
+ SkASSERT(path);
+ SkASSERT(fRenderTargetContext->caps()->shaderCaps()->pathRenderingSupport());
+
+ // FIXME: Use path bounds instead of this WAR once
+ // https://bugs.chromium.org/p/skia/issues/detail?id=5640 is resolved.
+ SkRect bounds = SkRect::MakeIWH(fRenderTargetContext->width(), fRenderTargetContext->height());
+
+ // Setup clip
+ GrAppliedHardClip appliedClip;
+ if (!clip.apply(fRenderTargetContext->width(), fRenderTargetContext->height(), &appliedClip,
+ &bounds)) {
+ return;
+ }
+
+ std::unique_ptr<GrOp> op = GrStencilPathOp::Make(fRenderTargetContext->fContext,
+ viewMatrix,
+ GrAA::kYes == doStencilMSAA,
+ appliedClip.hasStencilClip(),
+ appliedClip.scissorState(),
+ std::move(path));
+ if (!op) {
+ return;
+ }
+ op->setClippedBounds(bounds);
+
+ fRenderTargetContext->setNeedsStencil(GrAA::kYes == doStencilMSAA);
+ fRenderTargetContext->addOp(std::move(op));
+}
+
+void GrRenderTargetContext::drawTextureSet(const GrClip& clip, const TextureSetEntry set[], int cnt,
+ GrSamplerState::Filter filter, SkBlendMode mode,
+ GrAA aa, SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> texXform) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawTextureSet", fContext);
+
+ if (mode != SkBlendMode::kSrcOver ||
+ !fContext->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
+ // Draw one at a time since the bulk API doesn't support non src-over blending, or the
+ // backend can't support the bulk geometry processor yet.
+ SkMatrix ctm;
+ for (int i = 0; i < cnt; ++i) {
+ float alpha = set[i].fAlpha;
+ ctm = viewMatrix;
+ if (set[i].fPreViewMatrix) {
+ ctm.preConcat(*set[i].fPreViewMatrix);
+ }
+
+ GrQuad quad, srcQuad;
+ if (set[i].fDstClipQuad) {
+ quad = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
+
+ SkPoint srcPts[4];
+ GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
+ srcQuad = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
+ } else {
+ quad = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
+ srcQuad = GrQuad(set[i].fSrcRect);
+ }
+
+ const SkRect* domain = constraint == SkCanvas::kStrict_SrcRectConstraint
+ ? &set[i].fSrcRect : nullptr;
+ this->drawTexturedQuad(clip, set[i].fProxy, set[i].fSrcColorType, texXform, filter,
+ {alpha, alpha, alpha, alpha}, mode, aa, set[i].fAAFlags,
+ quad, srcQuad, domain);
+ }
+ } else {
+ // Can use a single op, avoiding GrPaint creation, and can batch across proxies
+ AutoCheckFlush acf(this->drawingManager());
+ GrAAType aaType = this->chooseAAType(aa);
+ auto clampType = GrColorTypeClampType(this->colorInfo().colorType());
+ auto saturate = clampType == GrClampType::kManual ? GrTextureOp::Saturate::kYes
+ : GrTextureOp::Saturate::kNo;
+ auto op = GrTextureOp::MakeSet(fContext, set, cnt, filter, saturate, aaType, constraint,
+ viewMatrix, std::move(texXform));
+ this->addDrawOp(clip, std::move(op));
+ }
+}
+
+void GrRenderTargetContext::drawVertices(const GrClip& clip,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ sk_sp<SkVertices> vertices,
+ const SkVertices::Bone bones[],
+ int boneCount,
+ GrPrimitiveType* overridePrimType) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawVertices", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ SkASSERT(vertices);
+ GrAAType aaType = this->chooseAAType(GrAA::kNo);
+ std::unique_ptr<GrDrawOp> op = GrDrawVerticesOp::Make(
+ fContext, std::move(paint), std::move(vertices), bones, boneCount, viewMatrix, aaType,
+ this->colorInfo().refColorSpaceXformFromSRGB(), overridePrimType);
+ this->addDrawOp(clip, std::move(op));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrRenderTargetContext::drawAtlas(const GrClip& clip,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ int spriteCount,
+ const SkRSXform xform[],
+ const SkRect texRect[],
+ const SkColor colors[]) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawAtlas", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ GrAAType aaType = this->chooseAAType(GrAA::kNo);
+ std::unique_ptr<GrDrawOp> op = GrDrawAtlasOp::Make(fContext, std::move(paint), viewMatrix,
+ aaType, spriteCount, xform, texRect, colors);
+ this->addDrawOp(clip, std::move(op));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrRenderTargetContext::drawRRect(const GrClip& origClip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawRRect", fContext);
+
+ const SkStrokeRec& stroke = style.strokeRec();
+ if (stroke.getStyle() == SkStrokeRec::kFill_Style && rrect.isEmpty()) {
+ return;
+ }
+
+ GrNoClip noclip;
+ const GrClip* clip = &origClip;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // The Android framework frequently clips rrects to themselves where the clip is non-aa and the
+ // draw is aa. Since our lower level clip code works from op bounds, which are SkRects, it
+ // doesn't detect that the clip can be ignored (modulo antialiasing). The following test
+ // attempts to mitigate the stencil clip cost but will only help when the entire clip stack
+ // can be ignored. We'd prefer to fix this in the framework by removing the clips calls. This
+ // only works for filled rrects since the stroke width outsets beyond the rrect itself.
+ SkRRect devRRect;
+ if (stroke.getStyle() == SkStrokeRec::kFill_Style && rrect.transform(viewMatrix, &devRRect) &&
+ clip->quickContains(devRRect)) {
+ clip = &noclip;
+ }
+#endif
+ SkASSERT(!style.pathEffect()); // this should've been devolved to a path in SkGpuDevice
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ GrAAType aaType = this->chooseAAType(aa);
+
+ std::unique_ptr<GrDrawOp> op;
+ if (GrAAType::kCoverage == aaType && rrect.isSimple() &&
+ rrect.getSimpleRadii().fX == rrect.getSimpleRadii().fY &&
+ viewMatrix.rectStaysRect() && viewMatrix.isSimilarity()) {
+ // In coverage mode, we draw axis-aligned circular roundrects with the GrOvalOpFactory
+ // to avoid perf regressions on some platforms.
+ assert_alive(paint);
+ op = GrOvalOpFactory::MakeCircularRRectOp(
+ fContext, std::move(paint), viewMatrix, rrect, stroke, this->caps()->shaderCaps());
+ }
+ if (!op && style.isSimpleFill()) {
+ assert_alive(paint);
+ op = GrFillRRectOp::Make(
+ fContext, aaType, viewMatrix, rrect, *this->caps(), std::move(paint));
+ }
+ if (!op && GrAAType::kCoverage == aaType) {
+ assert_alive(paint);
+ op = GrOvalOpFactory::MakeRRectOp(
+ fContext, std::move(paint), viewMatrix, rrect, stroke, this->caps()->shaderCaps());
+ }
+ if (op) {
+ this->addDrawOp(*clip, std::move(op));
+ return;
+ }
+
+ assert_alive(paint);
+ this->drawShapeUsingPathRenderer(*clip, std::move(paint), aa, viewMatrix,
+ GrShape(rrect, style));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkPoint3 map(const SkMatrix& m, const SkPoint3& pt) {
+ SkPoint3 result;
+ m.mapXY(pt.fX, pt.fY, (SkPoint*)&result.fX);
+ result.fZ = pt.fZ;
+ return result;
+}
+
+bool GrRenderTargetContext::drawFastShadow(const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const SkDrawShadowRec& rec) {
+ ASSERT_SINGLE_OWNER
+ if (fContext->priv().abandoned()) {
+ return true;
+ }
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawFastShadow", fContext);
+
+ // check z plane
+ bool tiltZPlane = SkToBool(!SkScalarNearlyZero(rec.fZPlaneParams.fX) ||
+ !SkScalarNearlyZero(rec.fZPlaneParams.fY));
+ bool skipAnalytic = SkToBool(rec.fFlags & SkShadowFlags::kGeometricOnly_ShadowFlag);
+ if (tiltZPlane || skipAnalytic || !viewMatrix.rectStaysRect() || !viewMatrix.isSimilarity()) {
+ return false;
+ }
+
+ SkRRect rrect;
+ SkRect rect;
+ // we can only handle rects, circles, and rrects with circular corners
+ bool isRRect = path.isRRect(&rrect) && SkRRectPriv::IsSimpleCircular(rrect) &&
+ rrect.radii(SkRRect::kUpperLeft_Corner).fX > SK_ScalarNearlyZero;
+ if (!isRRect &&
+ path.isOval(&rect) && SkScalarNearlyEqual(rect.width(), rect.height()) &&
+ rect.width() > SK_ScalarNearlyZero) {
+ rrect.setOval(rect);
+ isRRect = true;
+ }
+ if (!isRRect && path.isRect(&rect)) {
+ rrect.setRect(rect);
+ isRRect = true;
+ }
+
+ if (!isRRect) {
+ return false;
+ }
+
+ if (rrect.isEmpty()) {
+ return true;
+ }
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ // transform light
+ SkPoint3 devLightPos = map(viewMatrix, rec.fLightPos);
+
+ // 1/scale
+ SkScalar devToSrcScale = viewMatrix.isScaleTranslate() ?
+ SkScalarInvert(SkScalarAbs(viewMatrix[SkMatrix::kMScaleX])) :
+ sk_float_rsqrt(viewMatrix[SkMatrix::kMScaleX] * viewMatrix[SkMatrix::kMScaleX] +
+ viewMatrix[SkMatrix::kMSkewX] * viewMatrix[SkMatrix::kMSkewX]);
+
+ SkScalar occluderHeight = rec.fZPlaneParams.fZ;
+ bool transparent = SkToBool(rec.fFlags & SkShadowFlags::kTransparentOccluder_ShadowFlag);
+
+ if (SkColorGetA(rec.fAmbientColor) > 0) {
+ SkScalar devSpaceInsetWidth = SkDrawShadowMetrics::AmbientBlurRadius(occluderHeight);
+ const SkScalar umbraRecipAlpha = SkDrawShadowMetrics::AmbientRecipAlpha(occluderHeight);
+ const SkScalar devSpaceAmbientBlur = devSpaceInsetWidth * umbraRecipAlpha;
+
+ // Outset the shadow rrect to the border of the penumbra
+ SkScalar ambientPathOutset = devSpaceInsetWidth * devToSrcScale;
+ SkRRect ambientRRect;
+ SkRect outsetRect = rrect.rect().makeOutset(ambientPathOutset, ambientPathOutset);
+ // If the rrect was an oval then its outset will also be one.
+ // We set it explicitly to avoid errors.
+ if (rrect.isOval()) {
+ ambientRRect = SkRRect::MakeOval(outsetRect);
+ } else {
+ SkScalar outsetRad = SkRRectPriv::GetSimpleRadii(rrect).fX + ambientPathOutset;
+ ambientRRect = SkRRect::MakeRectXY(outsetRect, outsetRad, outsetRad);
+ }
+
+ GrColor ambientColor = SkColorToPremulGrColor(rec.fAmbientColor);
+ if (transparent) {
+ // set a large inset to force a fill
+ devSpaceInsetWidth = ambientRRect.width();
+ }
+
+ std::unique_ptr<GrDrawOp> op = GrShadowRRectOp::Make(fContext,
+ ambientColor,
+ viewMatrix,
+ ambientRRect,
+ devSpaceAmbientBlur,
+ devSpaceInsetWidth);
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ }
+ }
+
+ if (SkColorGetA(rec.fSpotColor) > 0) {
+ SkScalar devSpaceSpotBlur;
+ SkScalar spotScale;
+ SkVector spotOffset;
+ SkDrawShadowMetrics::GetSpotParams(occluderHeight, devLightPos.fX, devLightPos.fY,
+ devLightPos.fZ, rec.fLightRadius,
+ &devSpaceSpotBlur, &spotScale, &spotOffset);
+ // handle scale of radius due to CTM
+ const SkScalar srcSpaceSpotBlur = devSpaceSpotBlur * devToSrcScale;
+
+ // Adjust translate for the effect of the scale.
+ spotOffset.fX += spotScale*viewMatrix[SkMatrix::kMTransX];
+ spotOffset.fY += spotScale*viewMatrix[SkMatrix::kMTransY];
+ // This offset is in dev space, need to transform it into source space.
+ SkMatrix ctmInverse;
+ if (viewMatrix.invert(&ctmInverse)) {
+ ctmInverse.mapPoints(&spotOffset, 1);
+ } else {
+ // Since the matrix is a similarity, this should never happen, but just in case...
+ SkDebugf("Matrix is degenerate. Will not render spot shadow correctly!\n");
+ SkASSERT(false);
+ }
+
+ // Compute the transformed shadow rrect
+ SkRRect spotShadowRRect;
+ SkMatrix shadowTransform;
+ shadowTransform.setScaleTranslate(spotScale, spotScale, spotOffset.fX, spotOffset.fY);
+ rrect.transform(shadowTransform, &spotShadowRRect);
+ SkScalar spotRadius = SkRRectPriv::GetSimpleRadii(spotShadowRRect).fX;
+
+ // Compute the insetWidth
+ SkScalar blurOutset = srcSpaceSpotBlur;
+ SkScalar insetWidth = blurOutset;
+ if (transparent) {
+ // If transparent, just do a fill
+ insetWidth += spotShadowRRect.width();
+ } else {
+ // For shadows, instead of using a stroke we specify an inset from the penumbra
+ // border. We want to extend this inset area so that it meets up with the caster
+ // geometry. The inset geometry will by default already be inset by the blur width.
+ //
+ // We compare the min and max corners inset by the radius between the original
+ // rrect and the shadow rrect. The distance between the two plus the difference
+ // between the scaled radius and the original radius gives the distance from the
+ // transformed shadow shape to the original shape in that corner. The max
+ // of these gives the maximum distance we need to cover.
+ //
+ // Since we are outsetting by 1/2 the blur distance, we just add the maxOffset to
+ // that to get the full insetWidth.
+ SkScalar maxOffset;
+ if (rrect.isRect()) {
+ // Manhattan distance works better for rects
+ maxOffset = SkTMax(SkTMax(SkTAbs(spotShadowRRect.rect().fLeft -
+ rrect.rect().fLeft),
+ SkTAbs(spotShadowRRect.rect().fTop -
+ rrect.rect().fTop)),
+ SkTMax(SkTAbs(spotShadowRRect.rect().fRight -
+ rrect.rect().fRight),
+ SkTAbs(spotShadowRRect.rect().fBottom -
+ rrect.rect().fBottom)));
+ } else {
+ SkScalar dr = spotRadius - SkRRectPriv::GetSimpleRadii(rrect).fX;
+ SkPoint upperLeftOffset = SkPoint::Make(spotShadowRRect.rect().fLeft -
+ rrect.rect().fLeft + dr,
+ spotShadowRRect.rect().fTop -
+ rrect.rect().fTop + dr);
+ SkPoint lowerRightOffset = SkPoint::Make(spotShadowRRect.rect().fRight -
+ rrect.rect().fRight - dr,
+ spotShadowRRect.rect().fBottom -
+ rrect.rect().fBottom - dr);
+ maxOffset = SkScalarSqrt(SkTMax(SkPointPriv::LengthSqd(upperLeftOffset),
+ SkPointPriv::LengthSqd(lowerRightOffset))) + dr;
+ }
+ insetWidth += SkTMax(blurOutset, maxOffset);
+ }
+
+ // Outset the shadow rrect to the border of the penumbra
+ SkRect outsetRect = spotShadowRRect.rect().makeOutset(blurOutset, blurOutset);
+ if (spotShadowRRect.isOval()) {
+ spotShadowRRect = SkRRect::MakeOval(outsetRect);
+ } else {
+ SkScalar outsetRad = spotRadius + blurOutset;
+ spotShadowRRect = SkRRect::MakeRectXY(outsetRect, outsetRad, outsetRad);
+ }
+
+ GrColor spotColor = SkColorToPremulGrColor(rec.fSpotColor);
+
+ std::unique_ptr<GrDrawOp> op = GrShadowRRectOp::Make(fContext,
+ spotColor,
+ viewMatrix,
+ spotShadowRRect,
+ 2.0f * devSpaceSpotBlur,
+ insetWidth);
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrRenderTargetContext::drawFilledDRRect(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRRect& origOuter,
+ const SkRRect& origInner) {
+ SkASSERT(!origInner.isEmpty());
+ SkASSERT(!origOuter.isEmpty());
+
+ SkTCopyOnFirstWrite<SkRRect> inner(origInner), outer(origOuter);
+
+ GrAAType aaType = this->chooseAAType(aa);
+
+ if (GrAAType::kMSAA == aaType) {
+ return false;
+ }
+
+ if (GrAAType::kCoverage == aaType && SkRRectPriv::IsCircle(*inner)
+ && SkRRectPriv::IsCircle(*outer)) {
+ auto outerR = outer->width() / 2.f;
+ auto innerR = inner->width() / 2.f;
+ auto cx = outer->getBounds().fLeft + outerR;
+ auto cy = outer->getBounds().fTop + outerR;
+ if (SkScalarNearlyEqual(cx, inner->getBounds().fLeft + innerR) &&
+ SkScalarNearlyEqual(cy, inner->getBounds().fTop + innerR)) {
+ auto avgR = (innerR + outerR) / 2.f;
+ auto circleBounds = SkRect::MakeLTRB(cx - avgR, cy - avgR, cx + avgR, cy + avgR);
+ SkStrokeRec stroke(SkStrokeRec::kFill_InitStyle);
+ stroke.setStrokeStyle(outerR - innerR);
+ auto op = GrOvalOpFactory::MakeOvalOp(fContext, std::move(paint), viewMatrix,
+ circleBounds, GrStyle(stroke, nullptr),
+ this->caps()->shaderCaps());
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ return true;
+ }
+ assert_alive(paint);
+ }
+ }
+
+ GrClipEdgeType innerEdgeType, outerEdgeType;
+ if (GrAAType::kCoverage == aaType) {
+ innerEdgeType = GrClipEdgeType::kInverseFillAA;
+ outerEdgeType = GrClipEdgeType::kFillAA;
+ } else {
+ innerEdgeType = GrClipEdgeType::kInverseFillBW;
+ outerEdgeType = GrClipEdgeType::kFillBW;
+ }
+
+ SkMatrix inverseVM;
+ if (!viewMatrix.isIdentity()) {
+ if (!origInner.transform(viewMatrix, inner.writable())) {
+ return false;
+ }
+ if (!origOuter.transform(viewMatrix, outer.writable())) {
+ return false;
+ }
+ if (!viewMatrix.invert(&inverseVM)) {
+ return false;
+ }
+ } else {
+ inverseVM.reset();
+ }
+
+ const auto& caps = *this->caps()->shaderCaps();
+ // TODO these need to be a geometry processors
+ auto innerEffect = GrRRectEffect::Make(innerEdgeType, *inner, caps);
+ if (!innerEffect) {
+ return false;
+ }
+
+ auto outerEffect = GrRRectEffect::Make(outerEdgeType, *outer, caps);
+ if (!outerEffect) {
+ return false;
+ }
+
+ paint.addCoverageFragmentProcessor(std::move(innerEffect));
+ paint.addCoverageFragmentProcessor(std::move(outerEffect));
+
+ SkRect bounds = outer->getBounds();
+ if (GrAAType::kCoverage == aaType) {
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+ }
+
+ this->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), bounds,
+ inverseVM);
+ return true;
+}
+
+void GrRenderTargetContext::drawDRRect(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRRect& outer,
+ const SkRRect& inner) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawDRRect", fContext);
+
+ SkASSERT(!outer.isEmpty());
+ SkASSERT(!inner.isEmpty());
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ if (this->drawFilledDRRect(clip, std::move(paint), aa, viewMatrix, outer, inner)) {
+ return;
+ }
+ assert_alive(paint);
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(inner);
+ path.addRRect(outer);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+ this->drawShapeUsingPathRenderer(clip, std::move(paint), aa, viewMatrix, GrShape(path));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrRenderTargetContext::drawRegion(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ const GrStyle& style,
+ const GrUserStencilSettings* ss) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawRegion", fContext);
+
+ if (GrAA::kYes == aa) {
+ // GrRegionOp performs no antialiasing but is much faster, so here we check the matrix
+ // to see whether aa is really required.
+ if (!SkToBool(viewMatrix.getType() & ~(SkMatrix::kTranslate_Mask)) &&
+ SkScalarIsInt(viewMatrix.getTranslateX()) &&
+ SkScalarIsInt(viewMatrix.getTranslateY())) {
+ aa = GrAA::kNo;
+ }
+ }
+ bool complexStyle = !style.isSimpleFill();
+ if (complexStyle || GrAA::kYes == aa) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ path.setIsVolatile(true);
+
+ return this->drawPath(clip, std::move(paint), aa, viewMatrix, path, style);
+ }
+
+ GrAAType aaType = this->chooseAAType(GrAA::kNo);
+ std::unique_ptr<GrDrawOp> op = GrRegionOp::Make(fContext, std::move(paint), viewMatrix, region,
+ aaType, ss);
+ this->addDrawOp(clip, std::move(op));
+}
+
+void GrRenderTargetContext::drawOval(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawOval", fContext);
+
+ const SkStrokeRec& stroke = style.strokeRec();
+
+ if (oval.isEmpty() && !style.pathEffect()) {
+ if (stroke.getStyle() == SkStrokeRec::kFill_Style) {
+ return;
+ }
+
+ this->drawRect(clip, std::move(paint), aa, viewMatrix, oval, &style);
+ return;
+ }
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ GrAAType aaType = this->chooseAAType(aa);
+
+ std::unique_ptr<GrDrawOp> op;
+ if (GrAAType::kCoverage == aaType && oval.width() > SK_ScalarNearlyZero &&
+ oval.width() == oval.height() && viewMatrix.isSimilarity()) {
+ // We don't draw true circles as round rects in coverage mode, because it can
+ // cause perf regressions on some platforms as compared to the dedicated circle Op.
+ assert_alive(paint);
+ op = GrOvalOpFactory::MakeCircleOp(fContext, std::move(paint), viewMatrix, oval, style,
+ this->caps()->shaderCaps());
+ }
+ if (!op && style.isSimpleFill()) {
+ // GrFillRRectOp has special geometry and a fragment-shader branch to conditionally evaluate
+ // the arc equation. This same special geometry and fragment branch also turn out to be a
+ // substantial optimization for drawing ovals (namely, by not evaluating the arc equation
+ // inside the oval's inner diamond). Given these optimizations, it's a clear win to draw
+ // ovals the exact same way we do round rects.
+ assert_alive(paint);
+ op = GrFillRRectOp::Make(fContext, aaType, viewMatrix, SkRRect::MakeOval(oval),
+ *this->caps(), std::move(paint));
+ }
+ if (!op && GrAAType::kCoverage == aaType) {
+ assert_alive(paint);
+ op = GrOvalOpFactory::MakeOvalOp(fContext, std::move(paint), viewMatrix, oval, style,
+ this->caps()->shaderCaps());
+ }
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ return;
+ }
+
+ assert_alive(paint);
+ this->drawShapeUsingPathRenderer(
+ clip, std::move(paint), aa, viewMatrix,
+ GrShape(SkRRect::MakeOval(oval), SkPath::kCW_Direction, 2, false, style));
+}
+
+void GrRenderTargetContext::drawArc(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawArc", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ GrAAType aaType = this->chooseAAType(aa);
+ if (GrAAType::kCoverage == aaType) {
+ const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
+ std::unique_ptr<GrDrawOp> op = GrOvalOpFactory::MakeArcOp(fContext,
+ std::move(paint),
+ viewMatrix,
+ oval,
+ startAngle,
+ sweepAngle,
+ useCenter,
+ style,
+ shaderCaps);
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ return;
+ }
+ assert_alive(paint);
+ }
+ this->drawShapeUsingPathRenderer(
+ clip, std::move(paint), aa, viewMatrix,
+ GrShape::MakeArc(oval, startAngle, sweepAngle, useCenter, style));
+}
+
+void GrRenderTargetContext::drawImageLattice(const GrClip& clip,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrTextureProxy> image,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> csxf,
+ GrSamplerState::Filter filter,
+ std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawImageLattice", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ std::unique_ptr<GrDrawOp> op =
+ GrLatticeOp::MakeNonAA(fContext, std::move(paint), viewMatrix, std::move(image),
+ srcColorType, std::move(csxf), filter, std::move(iter), dst);
+ this->addDrawOp(clip, std::move(op));
+}
+
+void GrRenderTargetContext::drawDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable,
+ const SkRect& bounds) {
+ std::unique_ptr<GrOp> op(GrDrawableOp::Make(fContext, std::move(drawable), bounds));
+ SkASSERT(op);
+ this->addOp(std::move(op));
+}
+
+void GrRenderTargetContext::asyncRescaleAndReadPixels(
+ const SkImageInfo& info, const SkIRect& srcRect, SkSurface::RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality, ReadPixelsCallback callback, ReadPixelsContext context) {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ callback(context, nullptr);
+ return;
+ }
+ if (fRenderTargetProxy->wrapsVkSecondaryCB()) {
+ callback(context, nullptr);
+ return;
+ }
+ auto dstCT = SkColorTypeToGrColorType(info.colorType());
+ // TODO: Support reading to gray.
+ if (dstCT == GrColorType::kUnknown ||
+ GrColorTypeComponentFlags(dstCT) & kGray_SkColorTypeComponentFlag) {
+ callback(context, nullptr);
+ return;
+ }
+ bool needsRescale = srcRect.width() != info.width() || srcRect.height() != info.height();
+ auto colorTypeOfFinalContext = this->colorInfo().colorType();
+ auto backendFormatOfFinalContext = fRenderTargetProxy->backendFormat();
+ if (needsRescale) {
+ colorTypeOfFinalContext = dstCT;
+ backendFormatOfFinalContext = this->caps()->getDefaultBackendFormat(dstCT,
+ GrRenderable::kYes);
+ }
+ auto readInfo = this->caps()->supportedReadPixelsColorType(colorTypeOfFinalContext,
+ backendFormatOfFinalContext, dstCT);
+ // Fail if we can't read from the source surface's color type.
+ if (readInfo.fColorType == GrColorType::kUnknown) {
+ callback(context, nullptr);
+ return;
+ }
+ // Fail if read color type does not have all of dstCT's color channels and those missing color
+ // channels are in the src.
+ uint32_t dstComponents = GrColorTypeComponentFlags(dstCT);
+ uint32_t legalReadComponents = GrColorTypeComponentFlags(readInfo.fColorType);
+ uint32_t srcComponents = GrColorTypeComponentFlags(this->colorInfo().colorType());
+ if ((~legalReadComponents & dstComponents) & srcComponents) {
+ callback(context, nullptr);
+ return;
+ }
+
+ std::unique_ptr<GrRenderTargetContext> tempRTC;
+ int x = srcRect.fLeft;
+ int y = srcRect.fTop;
+ if (needsRescale) {
+ tempRTC = this->rescale(info, srcRect, rescaleGamma, rescaleQuality);
+ if (!tempRTC) {
+ callback(context, nullptr);
+ return;
+ }
+ SkASSERT(SkColorSpace::Equals(tempRTC->colorInfo().colorSpace(), info.colorSpace()));
+ SkASSERT(tempRTC->origin() == kTopLeft_GrSurfaceOrigin);
+ x = y = 0;
+ } else {
+ sk_sp<GrColorSpaceXform> xform = GrColorSpaceXform::Make(this->colorInfo().colorSpace(),
+ this->colorInfo().alphaType(),
+ info.colorSpace(),
+ info.alphaType());
+ // Insert a draw to a temporary surface if we need to do a y-flip or color space conversion.
+ if (this->origin() == kBottomLeft_GrSurfaceOrigin || xform) {
+ // We flip or color convert by drawing and we don't currently support drawing to
+ // kPremul.
+ if (info.alphaType() == kUnpremul_SkAlphaType) {
+ callback(context, nullptr);
+ return;
+ }
+ sk_sp<GrTextureProxy> texProxy = sk_ref_sp(fRenderTargetProxy->asTextureProxy());
+ SkRect srcRectToDraw = SkRect::Make(srcRect);
+ // If the src is not texturable first try to make a copy to a texture.
+ if (!texProxy) {
+ texProxy = GrSurfaceProxy::Copy(fContext, fRenderTargetProxy.get(),
+ this->colorInfo().colorType(),
+ GrMipMapped::kNo, srcRect, SkBackingFit::kApprox,
+ SkBudgeted::kNo);
+ if (!texProxy) {
+ callback(context, nullptr);
+ return;
+ }
+ srcRectToDraw = SkRect::MakeWH(srcRect.width(), srcRect.height());
+ }
+ tempRTC = direct->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox, srcRect.width(), srcRect.height(),
+ this->colorInfo().colorType(), info.refColorSpace(), 1, GrMipMapped::kNo,
+ kTopLeft_GrSurfaceOrigin);
+ if (!tempRTC) {
+ callback(context, nullptr);
+ return;
+ }
+ tempRTC->drawTexture(GrNoClip(), std::move(texProxy), this->colorInfo().colorType(),
+ GrSamplerState::Filter::kNearest, SkBlendMode::kSrc,
+ SK_PMColor4fWHITE, srcRectToDraw,
+ SkRect::MakeWH(srcRect.width(), srcRect.height()), GrAA::kNo,
+ GrQuadAAFlags::kNone, SkCanvas::kFast_SrcRectConstraint,
+ SkMatrix::I(), std::move(xform));
+ x = y = 0;
+ }
+ }
+ auto rtc = tempRTC ? tempRTC.get() : this;
+ return rtc->asyncReadPixels(SkIRect::MakeXYWH(x, y, info.width(), info.height()),
+ info.colorType(), callback, context);
+}
+
+class GrRenderTargetContext::AsyncReadResult : public SkSurface::AsyncReadResult {
+public:
+ AsyncReadResult(uint32_t inboxID) : fInboxID(inboxID) {}
+ ~AsyncReadResult() override {
+ for (int i = 0; i < fPlanes.count(); ++i) {
+ if (!fPlanes[i].fMappedBuffer) {
+ delete[] static_cast<const char*>(fPlanes[i].fData);
+ } else {
+ GrClientMappedBufferManager::BufferFinishedMessageBus::Post(
+ {std::move(fPlanes[i].fMappedBuffer), fInboxID});
+ }
+ }
+ }
+
+ int count() const override { return fPlanes.count(); }
+ const void* data(int i) const override { return fPlanes[i].fData; }
+ size_t rowBytes(int i) const override { return fPlanes[i].fRowBytes; }
+
+ bool addTransferResult(const PixelTransferResult& result,
+ SkISize size,
+ size_t rowBytes,
+ GrClientMappedBufferManager* manager) {
+ SkASSERT(!result.fTransferBuffer->isMapped());
+ const void* mappedData = result.fTransferBuffer->map();
+ if (!mappedData) {
+ return false;
+ }
+ if (result.fPixelConverter) {
+ std::unique_ptr<char[]> convertedData(new char[rowBytes * size.height()]);
+ result.fPixelConverter(convertedData.get(), mappedData);
+ this->addCpuPlane(std::move(convertedData), rowBytes);
+ result.fTransferBuffer->unmap();
+ } else {
+ manager->insert(result.fTransferBuffer);
+ this->addMappedPlane(mappedData, rowBytes, std::move(result.fTransferBuffer));
+ }
+ return true;
+ }
+
+ void addCpuPlane(std::unique_ptr<const char[]> data, size_t rowBytes) {
+ SkASSERT(data);
+ SkASSERT(rowBytes > 0);
+ fPlanes.emplace_back(data.release(), rowBytes, nullptr);
+ }
+
+private:
+ void addMappedPlane(const void* data, size_t rowBytes, sk_sp<GrGpuBuffer> mappedBuffer) {
+ SkASSERT(data);
+ SkASSERT(rowBytes > 0);
+ SkASSERT(mappedBuffer);
+ SkASSERT(mappedBuffer->isMapped());
+ fPlanes.emplace_back(data, rowBytes, std::move(mappedBuffer));
+ }
+
+ struct Plane {
+ Plane(const void* data, size_t rowBytes, sk_sp<GrGpuBuffer> buffer)
+ : fData(data), fRowBytes(rowBytes), fMappedBuffer(std::move(buffer)) {}
+ const void* fData;
+ size_t fRowBytes;
+ // If this is null then fData is heap alloc and must be delete[]ed as const char[].
+ sk_sp<GrGpuBuffer> fMappedBuffer;
+ };
+ SkSTArray<3, Plane> fPlanes;
+ uint32_t fInboxID;
+};
+
+void GrRenderTargetContext::asyncReadPixels(const SkIRect& rect, SkColorType colorType,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ SkASSERT(rect.fLeft >= 0 && rect.fRight <= this->width());
+ SkASSERT(rect.fTop >= 0 && rect.fBottom <= this->height());
+
+ auto directContext = fContext->priv().asDirectContext();
+ SkASSERT(directContext);
+ auto mappedBufferManager = directContext->priv().clientMappedBufferManager();
+
+ auto transferResult = this->transferPixels(SkColorTypeToGrColorType(colorType), rect);
+
+ if (!transferResult.fTransferBuffer) {
+ auto ii = SkImageInfo::Make(rect.size(), colorType,
+ this->colorInfo().alphaType(),
+ this->colorInfo().refColorSpace());
+ auto result = skstd::make_unique<AsyncReadResult>(0);
+ std::unique_ptr<char[]> data(new char[ii.computeMinByteSize()]);
+ SkPixmap pm(ii, data.get(), ii.minRowBytes());
+ result->addCpuPlane(std::move(data), pm.rowBytes());
+
+ if (!this->readPixels(ii, pm.writable_addr(), pm.rowBytes(), {rect.fLeft, rect.fTop})) {
+ callback(context, nullptr);
+ }
+ callback(context, std::move(result));
+ return;
+ }
+
+ struct FinishContext {
+ ReadPixelsCallback* fClientCallback;
+ ReadPixelsContext fClientContext;
+ SkISize fSize;
+ SkColorType fColorType;
+ GrClientMappedBufferManager* fMappedBufferManager;
+ PixelTransferResult fTransferResult;
+ };
+ // Assumption is that the caller would like to flush. We could take a parameter or require an
+ // explicit flush from the caller. We'd have to have a way to defer attaching the finish
+ // callback to GrGpu until after the next flush that flushes our op list, though.
+ auto* finishContext = new FinishContext{callback,
+ context,
+ rect.size(),
+ colorType,
+ mappedBufferManager,
+ std::move(transferResult)};
+ auto finishCallback = [](GrGpuFinishedContext c) {
+ const auto* context = reinterpret_cast<const FinishContext*>(c);
+ auto result = skstd::make_unique<AsyncReadResult>(context->fMappedBufferManager->inboxID());
+ size_t rowBytes = context->fSize.width() * SkColorTypeBytesPerPixel(context->fColorType);
+ if (!result->addTransferResult(context->fTransferResult, context->fSize, rowBytes,
+ context->fMappedBufferManager)) {
+ result.reset();
+ }
+ (*context->fClientCallback)(context->fClientContext, std::move(result));
+ delete context;
+ };
+ GrFlushInfo flushInfo;
+ flushInfo.fFinishedContext = finishContext;
+ flushInfo.fFinishedProc = finishCallback;
+ this->flush(SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo);
+}
+
+void GrRenderTargetContext::asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ SkASSERT(srcRect.fLeft >= 0 && srcRect.fRight <= this->width());
+ SkASSERT(srcRect.fTop >= 0 && srcRect.fBottom <= this->height());
+ SkASSERT(!dstSize.isZero());
+ SkASSERT((dstSize.width() % 2 == 0) && (dstSize.height() % 2 == 0));
+
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ callback(context, nullptr);
+ return;
+ }
+ if (fRenderTargetProxy->wrapsVkSecondaryCB()) {
+ callback(context, nullptr);
+ return;
+ }
+ int x = srcRect.fLeft;
+ int y = srcRect.fTop;
+ std::unique_ptr<GrRenderTargetContext> tempRTC;
+ bool needsRescale = srcRect.size() != dstSize;
+ if (needsRescale) {
+ // We assume the caller wants kPremul. There is no way to indicate a preference.
+ auto info = SkImageInfo::Make(dstSize, kRGBA_8888_SkColorType, kPremul_SkAlphaType,
+ dstColorSpace);
+ // TODO: Incorporate the YUV conversion into last pass of rescaling.
+ tempRTC = this->rescale(info, srcRect, rescaleGamma, rescaleQuality);
+ if (!tempRTC) {
+ callback(context, nullptr);
+ return;
+ }
+ SkASSERT(SkColorSpace::Equals(tempRTC->colorInfo().colorSpace(), info.colorSpace()));
+ SkASSERT(tempRTC->origin() == kTopLeft_GrSurfaceOrigin);
+ x = y = 0;
+ } else {
+ // We assume the caller wants kPremul. There is no way to indicate a preference.
+ sk_sp<GrColorSpaceXform> xform = GrColorSpaceXform::Make(
+ this->colorInfo().colorSpace(), this->colorInfo().alphaType(), dstColorSpace.get(),
+ kPremul_SkAlphaType);
+ if (xform) {
+ sk_sp<GrTextureProxy> texProxy = this->asTextureProxyRef();
+ // TODO: Do something if the input is not a texture already.
+ if (!texProxy) {
+ callback(context, nullptr);
+ return;
+ }
+ SkRect srcRectToDraw = SkRect::Make(srcRect);
+ tempRTC = direct->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox, dstSize.width(), dstSize.height(),
+ this->colorInfo().colorType(), dstColorSpace, 1, GrMipMapped::kNo,
+ kTopLeft_GrSurfaceOrigin);
+ if (!tempRTC) {
+ callback(context, nullptr);
+ return;
+ }
+ tempRTC->drawTexture(GrNoClip(), std::move(texProxy), this->colorInfo().colorType(),
+ GrSamplerState::Filter::kNearest, SkBlendMode::kSrc,
+ SK_PMColor4fWHITE, srcRectToDraw,
+ SkRect::MakeWH(srcRect.width(), srcRect.height()), GrAA::kNo,
+ GrQuadAAFlags::kNone, SkCanvas::kFast_SrcRectConstraint,
+ SkMatrix::I(), std::move(xform));
+ x = y = 0;
+ }
+ }
+ auto srcProxy = tempRTC ? tempRTC->asTextureProxyRef() : this->asTextureProxyRef();
+ // TODO: Do something if the input is not a texture already.
+ if (!srcProxy) {
+ callback(context, nullptr);
+ return;
+ }
+ GrColorType srcColorType = tempRTC ? tempRTC->colorInfo().colorType()
+ : this->colorInfo().colorType();
+
+ auto yRTC = direct->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kApprox, dstSize.width(), dstSize.height(), GrColorType::kAlpha_8,
+ dstColorSpace, 1, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin);
+ int halfW = dstSize.width()/2;
+ int halfH = dstSize.height()/2;
+ auto uRTC = direct->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kApprox, halfW, halfH, GrColorType::kAlpha_8, dstColorSpace, 1,
+ GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin);
+ auto vRTC = direct->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kApprox, halfW, halfH, GrColorType::kAlpha_8, dstColorSpace, 1,
+ GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin);
+ if (!yRTC || !uRTC || !vRTC) {
+ callback(context, nullptr);
+ return;
+ }
+
+ static constexpr float kRec601M[] {
+ 65.481f / 255, 128.553f / 255, 24.966f / 255, 16.f / 255, // y
+ -37.797f / 255, -74.203f / 255, 112.0f / 255, 128.f / 255, // u
+ 112.f / 255, -93.786f / 255, -18.214f / 255, 128.f / 255, // v
+ };
+ static constexpr float kRec709M[] {
+ 45.5594f / 255, 156.6288f / 255, 15.8118f / 255, 16.f / 255, // y
+ -25.6642f / 255, -86.3358f / 255, 112.f / 255, 128.f / 255, // u
+ 112.f / 255, -101.7303f / 255, -10.2697f / 255, 128.f / 255, // v
+ };
+ static constexpr float kJpegM[] {
+ 0.299f , 0.587f , 0.114f , 0.f / 255, // y
+ -0.168736f, -0.331264f, 0.5f , 128.f / 255, // u
+ 0.5f , -0.418688f, -0.081312f, 128.f / 255, // v
+ };
+ static constexpr float kIM[] {
+ 1.f, 0.f, 0.f, 0.f,
+ 0.f, 1.f, 0.f, 0.f,
+ 0.f, 0.f, 1.f, 0.f,
+ };
+ const float* baseM = kIM;
+ switch (yuvColorSpace) {
+ case kRec601_SkYUVColorSpace:
+ baseM = kRec601M;
+ break;
+ case kRec709_SkYUVColorSpace:
+ baseM = kRec709M;
+ break;
+ case kJPEG_SkYUVColorSpace:
+ baseM = kJpegM;
+ break;
+ case kIdentity_SkYUVColorSpace:
+ baseM = kIM;
+ break;
+ }
+ // TODO: Use one transfer buffer for all three planes to reduce map/unmap cost?
+
+ auto texMatrix = SkMatrix::MakeTrans(x, y);
+
+ SkRect dstRectY = SkRect::Make(dstSize);
+ SkRect dstRectUV = SkRect::MakeWH(halfW, halfH);
+
+ // This matrix generates (r,g,b,a) = (0, 0, 0, y)
+ float yM[20];
+ std::fill_n(yM, 15, 0.f);
+ yM[15] = baseM[0]; yM[16] = baseM[1]; yM[17] = baseM[2]; yM[18] = 0; yM[19] = baseM[3];
+ GrPaint yPaint;
+ yPaint.addColorTextureProcessor(srcProxy, srcColorType, texMatrix);
+ auto yFP = GrColorMatrixFragmentProcessor::Make(yM, false, true, false);
+ yPaint.addColorFragmentProcessor(std::move(yFP));
+ yPaint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ yRTC->fillRectToRect(GrNoClip(), std::move(yPaint), GrAA::kNo, SkMatrix::I(),
+ dstRectY, dstRectY);
+ auto yTransfer = yRTC->transferPixels(GrColorType::kAlpha_8,
+ SkIRect::MakeWH(yRTC->width(), yRTC->height()));
+ if (!yTransfer.fTransferBuffer) {
+ callback(context, nullptr);
+ return;
+ }
+
+ texMatrix.preScale(2.f, 2.f);
+ // This matrix generates (r,g,b,a) = (0, 0, 0, u)
+ float uM[20];
+ std::fill_n(uM, 15, 0.f);
+ uM[15] = baseM[4]; uM[16] = baseM[5]; uM[17] = baseM[6]; uM[18] = 0; uM[19] = baseM[7];
+ GrPaint uPaint;
+ uPaint.addColorTextureProcessor(srcProxy, srcColorType, texMatrix,
+ GrSamplerState::ClampBilerp());
+ auto uFP = GrColorMatrixFragmentProcessor::Make(uM, false, true, false);
+ uPaint.addColorFragmentProcessor(std::move(uFP));
+ uPaint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ uRTC->fillRectToRect(GrNoClip(), std::move(uPaint), GrAA::kNo, SkMatrix::I(),
+ dstRectUV, dstRectUV);
+ auto uTransfer = uRTC->transferPixels(GrColorType::kAlpha_8,
+ SkIRect::MakeWH(uRTC->width(), uRTC->height()));
+ if (!uTransfer.fTransferBuffer) {
+ callback(context, nullptr);
+ return;
+ }
+
+ // This matrix generates (r,g,b,a) = (0, 0, 0, v)
+ float vM[20];
+ std::fill_n(vM, 15, 0.f);
+ vM[15] = baseM[8]; vM[16] = baseM[9]; vM[17] = baseM[10]; vM[18] = 0; vM[19] = baseM[11];
+ GrPaint vPaint;
+ vPaint.addColorTextureProcessor(srcProxy, srcColorType, texMatrix,
+ GrSamplerState::ClampBilerp());
+ auto vFP = GrColorMatrixFragmentProcessor::Make(vM, false, true, false);
+ vPaint.addColorFragmentProcessor(std::move(vFP));
+ vPaint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ vRTC->fillRectToRect(GrNoClip(), std::move(vPaint), GrAA::kNo, SkMatrix::I(),
+ dstRectUV, dstRectUV);
+ auto vTransfer = vRTC->transferPixels(GrColorType::kAlpha_8,
+ SkIRect::MakeWH(vRTC->width(), vRTC->height()));
+ if (!vTransfer.fTransferBuffer) {
+ callback(context, nullptr);
+ return;
+ }
+
+ struct FinishContext {
+ ReadPixelsCallback* fClientCallback;
+ ReadPixelsContext fClientContext;
+ GrClientMappedBufferManager* fMappedBufferManager;
+ SkISize fSize;
+ PixelTransferResult fYTransfer;
+ PixelTransferResult fUTransfer;
+ PixelTransferResult fVTransfer;
+ };
+ // Assumption is that the caller would like to flush. We could take a parameter or require an
+ // explicit flush from the caller. We'd have to have a way to defer attaching the finish
+ // callback to GrGpu until after the next flush that flushes our op list, though.
+ auto* finishContext = new FinishContext{callback,
+ context,
+ direct->priv().clientMappedBufferManager(),
+ dstSize,
+ std::move(yTransfer),
+ std::move(uTransfer),
+ std::move(vTransfer)};
+ auto finishCallback = [](GrGpuFinishedContext c) {
+ const auto* context = reinterpret_cast<const FinishContext*>(c);
+ auto result = skstd::make_unique<AsyncReadResult>(context->fMappedBufferManager->inboxID());
+ auto manager = context->fMappedBufferManager;
+ size_t rowBytes = SkToSizeT(context->fSize.width());
+ if (!result->addTransferResult(context->fYTransfer, context->fSize, rowBytes, manager)) {
+ (*context->fClientCallback)(context->fClientContext, nullptr);
+ delete context;
+ return;
+ }
+ rowBytes /= 2;
+ SkISize uvSize = {context->fSize.width()/2, context->fSize.height()/2};
+ if (!result->addTransferResult(context->fUTransfer, uvSize, rowBytes, manager)) {
+ (*context->fClientCallback)(context->fClientContext, nullptr);
+ delete context;
+ return;
+ }
+ if (!result->addTransferResult(context->fVTransfer, uvSize, rowBytes, manager)) {
+ (*context->fClientCallback)(context->fClientContext, nullptr);
+ delete context;
+ return;
+ }
+ (*context->fClientCallback)(context->fClientContext, std::move(result));
+ delete context;
+ };
+ GrFlushInfo flushInfo;
+ flushInfo.fFinishedContext = finishContext;
+ flushInfo.fFinishedProc = finishCallback;
+ this->flush(SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo);
+}
+
+GrSemaphoresSubmitted GrRenderTargetContext::flush(SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info) {
+ ASSERT_SINGLE_OWNER
+ if (fContext->priv().abandoned()) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "flush", fContext);
+
+ return this->drawingManager()->flushSurface(fRenderTargetProxy.get(), access, info);
+}
+
+bool GrRenderTargetContext::waitOnSemaphores(int numSemaphores,
+ const GrBackendSemaphore waitSemaphores[]) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "waitOnSemaphores", fContext);
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ if (numSemaphores && !this->caps()->semaphoreSupport()) {
+ return false;
+ }
+
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ return false;
+ }
+
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ std::unique_ptr<sk_sp<GrSemaphore>[]> grSemaphores(new sk_sp<GrSemaphore>[numSemaphores]);
+ for (int i = 0; i < numSemaphores; ++i) {
+ grSemaphores[i] = resourceProvider->wrapBackendSemaphore(
+ waitSemaphores[i], GrResourceProvider::SemaphoreWrapType::kWillWait,
+ kAdopt_GrWrapOwnership);
+ }
+ this->drawingManager()->newWaitRenderTask(this->asSurfaceProxyRef(), std::move(grSemaphores),
+ numSemaphores);
+ return true;
+}
+
+void GrRenderTargetContext::insertEventMarker(const SkString& str) {
+ std::unique_ptr<GrOp> op(GrDebugMarkerOp::Make(fContext, fRenderTargetProxy.get(), str));
+ this->addOp(std::move(op));
+}
+
+void GrRenderTargetContext::drawPath(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawPath", fContext);
+
+ GrShape shape(path, style);
+
+ this->drawShape(clip, std::move(paint), aa, viewMatrix, shape);
+}
+
+void GrRenderTargetContext::drawShape(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "drawShape", fContext);
+
+ if (shape.isEmpty()) {
+ if (shape.inverseFilled()) {
+ this->drawPaint(clip, std::move(paint), viewMatrix);
+ }
+ return;
+ }
+
+ AutoCheckFlush acf(this->drawingManager());
+
+ if (!shape.style().hasPathEffect()) {
+ GrAAType aaType = this->chooseAAType(aa);
+ SkRRect rrect;
+ // We can ignore the starting point and direction since there is no path effect.
+ bool inverted;
+ if (shape.asRRect(&rrect, nullptr, nullptr, &inverted) && !inverted) {
+ if (rrect.isRect()) {
+ this->drawRect(clip, std::move(paint), aa, viewMatrix, rrect.rect(),
+ &shape.style());
+ return;
+ } else if (rrect.isOval()) {
+ this->drawOval(clip, std::move(paint), aa, viewMatrix, rrect.rect(), shape.style());
+ return;
+ }
+ this->drawRRect(clip, std::move(paint), aa, viewMatrix, rrect, shape.style());
+ return;
+ } else if (GrAAType::kCoverage == aaType && shape.style().isSimpleFill() &&
+ viewMatrix.rectStaysRect()) {
+ // TODO: the rectStaysRect restriction could be lifted if we were willing to apply
+ // the matrix to all the points individually rather than just to the rect
+ SkRect rects[2];
+ if (shape.asNestedRects(rects)) {
+ // Concave AA paths are expensive - try to avoid them for special cases
+ std::unique_ptr<GrDrawOp> op = GrStrokeRectOp::MakeNested(
+ fContext, std::move(paint), viewMatrix, rects);
+ if (op) {
+ this->addDrawOp(clip, std::move(op));
+ }
+ // Returning here indicates that there is nothing to draw in this case.
+ return;
+ }
+ }
+ }
+
+ this->drawShapeUsingPathRenderer(clip, std::move(paint), aa, viewMatrix, shape);
+}
+
+bool GrRenderTargetContextPriv::drawAndStencilPath(const GrHardClip& clip,
+ const GrUserStencilSettings* ss,
+ SkRegion::Op op,
+ bool invert,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkPath& path) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_FALSE_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fRenderTargetContext->validate();)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContextPriv", "drawAndStencilPath",
+ fRenderTargetContext->fContext);
+
+ if (path.isEmpty() && path.isInverseFillType()) {
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory(op, invert);
+ this->stencilRect(clip, ss, std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeIWH(fRenderTargetContext->width(),
+ fRenderTargetContext->height()));
+ return true;
+ }
+
+ AutoCheckFlush acf(fRenderTargetContext->drawingManager());
+
+ // An Assumption here is that path renderer would use some form of tweaking
+ // the src color (either the input alpha or in the frag shader) to implement
+ // aa. If we have some future driver-mojo path AA that can do the right
+ // thing WRT to the blend then we'll need some query on the PR.
+ GrAAType aaType = fRenderTargetContext->chooseAAType(aa);
+ bool hasUserStencilSettings = !ss->isUnused();
+
+ SkIRect clipConservativeBounds;
+ clip.getConservativeBounds(fRenderTargetContext->width(), fRenderTargetContext->height(),
+ &clipConservativeBounds, nullptr);
+
+ GrShape shape(path, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fCaps = fRenderTargetContext->caps();
+ canDrawArgs.fProxy = fRenderTargetContext->proxy();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fClipConservativeBounds = &clipConservativeBounds;
+ canDrawArgs.fAAType = aaType;
+ SkASSERT(!fRenderTargetContext->wrapsVkSecondaryCB());
+ canDrawArgs.fTargetIsWrappedVkSecondaryCB = false;
+ canDrawArgs.fHasUserStencilSettings = hasUserStencilSettings;
+
+ // Don't allow the SW renderer
+ GrPathRenderer* pr = fRenderTargetContext->drawingManager()->getPathRenderer(
+ canDrawArgs, false, GrPathRendererChain::DrawType::kStencilAndColor);
+ if (!pr) {
+ return false;
+ }
+
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory(op, invert);
+
+ GrPathRenderer::DrawPathArgs args{fRenderTargetContext->drawingManager()->getContext(),
+ std::move(paint),
+ ss,
+ fRenderTargetContext,
+ &clip,
+ &clipConservativeBounds,
+ &viewMatrix,
+ &shape,
+ aaType,
+ fRenderTargetContext->colorInfo().isLinearlyBlended()};
+ pr->drawPath(args);
+ return true;
+}
+
+SkBudgeted GrRenderTargetContextPriv::isBudgeted() const {
+ ASSERT_SINGLE_OWNER_PRIV
+
+ if (fRenderTargetContext->fContext->priv().abandoned()) {
+ return SkBudgeted::kNo;
+ }
+
+ SkDEBUGCODE(fRenderTargetContext->validate();)
+
+ return fRenderTargetContext->fRenderTargetProxy->isBudgeted();
+}
+
+void GrRenderTargetContext::drawShapeUsingPathRenderer(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const GrShape& originalShape) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "internalDrawPath", fContext);
+
+ if (!viewMatrix.isFinite() || !originalShape.bounds().isFinite()) {
+ return;
+ }
+
+ SkIRect clipConservativeBounds;
+ clip.getConservativeBounds(this->width(), this->height(), &clipConservativeBounds, nullptr);
+
+ GrShape tempShape;
+ GrAAType aaType = this->chooseAAType(aa);
+
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fCaps = this->caps();
+ canDrawArgs.fProxy = this->proxy();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &originalShape;
+ canDrawArgs.fClipConservativeBounds = &clipConservativeBounds;
+ canDrawArgs.fTargetIsWrappedVkSecondaryCB = this->wrapsVkSecondaryCB();
+ canDrawArgs.fHasUserStencilSettings = false;
+
+ GrPathRenderer* pr;
+ static constexpr GrPathRendererChain::DrawType kType = GrPathRendererChain::DrawType::kColor;
+ if (originalShape.isEmpty() && !originalShape.inverseFilled()) {
+ return;
+ }
+
+ canDrawArgs.fAAType = aaType;
+
+ // Try a 1st time without applying any of the style to the geometry (and barring sw)
+ pr = this->drawingManager()->getPathRenderer(canDrawArgs, false, kType);
+ SkScalar styleScale = GrStyle::MatrixToScaleFactor(viewMatrix);
+
+ if (!pr && originalShape.style().pathEffect()) {
+ // It didn't work above, so try again with the path effect applied.
+ tempShape = originalShape.applyStyle(GrStyle::Apply::kPathEffectOnly, styleScale);
+ if (tempShape.isEmpty()) {
+ return;
+ }
+ canDrawArgs.fShape = &tempShape;
+ pr = this->drawingManager()->getPathRenderer(canDrawArgs, false, kType);
+ }
+ if (!pr) {
+ if (canDrawArgs.fShape->style().applies()) {
+ tempShape = canDrawArgs.fShape->applyStyle(GrStyle::Apply::kPathEffectAndStrokeRec,
+ styleScale);
+ if (tempShape.isEmpty()) {
+ return;
+ }
+ canDrawArgs.fShape = &tempShape;
+ // This time, allow SW renderer
+ pr = this->drawingManager()->getPathRenderer(canDrawArgs, true, kType);
+ } else {
+ pr = this->drawingManager()->getSoftwarePathRenderer();
+ }
+ }
+
+ if (!pr) {
+#ifdef SK_DEBUG
+ SkDebugf("Unable to find path renderer compatible with path.\n");
+#endif
+ return;
+ }
+
+ GrPathRenderer::DrawPathArgs args{this->drawingManager()->getContext(),
+ std::move(paint),
+ &GrUserStencilSettings::kUnused,
+ this,
+ &clip,
+ &clipConservativeBounds,
+ &viewMatrix,
+ canDrawArgs.fShape,
+ aaType,
+ this->colorInfo().isLinearlyBlended()};
+ pr->drawPath(args);
+}
+
+static void op_bounds(SkRect* bounds, const GrOp* op) {
+ *bounds = op->bounds();
+ if (op->hasZeroArea()) {
+ if (op->hasAABloat()) {
+ bounds->outset(0.5f, 0.5f);
+ } else {
+ // We don't know which way the particular GPU will snap lines or points at integer
+ // coords. So we ensure that the bounds is large enough for either snap.
+ SkRect before = *bounds;
+ bounds->roundOut(bounds);
+ if (bounds->fLeft == before.fLeft) {
+ bounds->fLeft -= 1;
+ }
+ if (bounds->fTop == before.fTop) {
+ bounds->fTop -= 1;
+ }
+ if (bounds->fRight == before.fRight) {
+ bounds->fRight += 1;
+ }
+ if (bounds->fBottom == before.fBottom) {
+ bounds->fBottom += 1;
+ }
+ }
+ }
+}
+
+void GrRenderTargetContext::addOp(std::unique_ptr<GrOp> op) {
+ this->getOpsTask()->addOp(
+ std::move(op), GrTextureResolveManager(this->drawingManager()), *this->caps());
+}
+
+void GrRenderTargetContext::addDrawOp(const GrClip& clip, std::unique_ptr<GrDrawOp> op,
+ const std::function<WillAddOpFn>& willAddFn) {
+ ASSERT_SINGLE_OWNER
+ if (fContext->priv().abandoned()) {
+ fContext->priv().opMemoryPool()->release(std::move(op));
+ return;
+ }
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(op->fAddDrawOpCalled = true;)
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrRenderTargetContext", "addDrawOp", fContext);
+
+ // Setup clip
+ SkRect bounds;
+ op_bounds(&bounds, op.get());
+ GrAppliedClip appliedClip;
+ GrDrawOp::FixedFunctionFlags fixedFunctionFlags = op->fixedFunctionFlags();
+ bool usesHWAA = fixedFunctionFlags & GrDrawOp::FixedFunctionFlags::kUsesHWAA;
+ bool usesStencil = fixedFunctionFlags & GrDrawOp::FixedFunctionFlags::kUsesStencil;
+
+ if (usesStencil) {
+ this->setNeedsStencil(usesHWAA);
+ }
+
+ if (!clip.apply(fContext, this, usesHWAA, usesStencil, &appliedClip, &bounds)) {
+ fContext->priv().opMemoryPool()->release(std::move(op));
+ return;
+ }
+
+ SkASSERT((!usesStencil && !appliedClip.hasStencilClip()) || (fNumStencilSamples > 0));
+
+ GrClampType clampType = GrColorTypeClampType(this->colorInfo().colorType());
+ // MIXED SAMPLES TODO: If we start using mixed samples for clips we will need to check the clip
+ // here as well.
+ bool hasMixedSampledCoverage = (usesHWAA && this->numSamples() <= 1);
+#ifdef SK_DEBUG
+ if (hasMixedSampledCoverage) {
+ SkASSERT(usesStencil);
+ SkASSERT(fRenderTargetProxy->canUseMixedSamples(*this->caps()));
+ }
+#endif
+ GrProcessorSet::Analysis analysis = op->finalize(
+ *this->caps(), &appliedClip, hasMixedSampledCoverage, clampType);
+
+ GrXferProcessor::DstProxy dstProxy;
+ if (analysis.requiresDstTexture()) {
+ if (!this->setupDstProxy(clip, *op, &dstProxy)) {
+ fContext->priv().opMemoryPool()->release(std::move(op));
+ return;
+ }
+ }
+
+ op->setClippedBounds(bounds);
+ auto opsTask = this->getOpsTask();
+ if (willAddFn) {
+ willAddFn(op.get(), opsTask->uniqueID());
+ }
+ opsTask->addDrawOp(std::move(op), analysis, std::move(appliedClip), dstProxy,
+ GrTextureResolveManager(this->drawingManager()), *this->caps());
+}
+
+bool GrRenderTargetContext::setupDstProxy(const GrClip& clip, const GrOp& op,
+ GrXferProcessor::DstProxy* dstProxy) {
+ // If we are wrapping a vulkan secondary command buffer, we can't make a dst copy because we
+ // don't actually have a VkImage to make a copy of. Additionally we don't have the power to
+ // start and stop the render pass in order to make the copy.
+ if (fRenderTargetProxy->wrapsVkSecondaryCB()) {
+ return false;
+ }
+
+ if (this->caps()->textureBarrierSupport() && !fRenderTargetProxy->requiresManualMSAAResolve()) {
+ if (GrTextureProxy* texProxy = fRenderTargetProxy->asTextureProxy()) {
+ // The render target is a texture, so we can read from it directly in the shader. The XP
+ // will be responsible to detect this situation and request a texture barrier.
+ dstProxy->setProxy(sk_ref_sp(texProxy));
+ dstProxy->setOffset(0, 0);
+ return true;
+ }
+ }
+
+ SkIRect copyRect = SkIRect::MakeWH(fRenderTargetProxy->width(), fRenderTargetProxy->height());
+
+ SkIRect clippedRect;
+ clip.getConservativeBounds(
+ fRenderTargetProxy->width(), fRenderTargetProxy->height(), &clippedRect);
+ SkRect opBounds = op.bounds();
+ // If the op has aa bloating or is a infinitely thin geometry (hairline) outset the bounds by
+ // 0.5 pixels.
+ if (op.hasAABloat() || op.hasZeroArea()) {
+ opBounds.outset(0.5f, 0.5f);
+ // An antialiased/hairline draw can sometimes bleed outside of the clips bounds. For
+ // performance we may ignore the clip when the draw is entirely inside the clip is float
+ // space but will hit pixels just outside the clip when actually rasterizing.
+ clippedRect.outset(1, 1);
+ clippedRect.intersect(SkIRect::MakeWH(
+ fRenderTargetProxy->width(), fRenderTargetProxy->height()));
+ }
+ SkIRect opIBounds;
+ opBounds.roundOut(&opIBounds);
+ if (!clippedRect.intersect(opIBounds)) {
+#ifdef SK_DEBUG
+ GrCapsDebugf(this->caps(), "setupDstTexture: Missed an early reject bailing on draw.");
+#endif
+ return false;
+ }
+
+ // MSAA consideration: When there is support for reading MSAA samples in the shader we could
+ // have per-sample dst values by making the copy multisampled.
+ GrCaps::DstCopyRestrictions restrictions = this->caps()->getDstCopyRestrictions(
+ fRenderTargetProxy.get(), this->colorInfo().colorType());
+
+ if (!restrictions.fMustCopyWholeSrc) {
+ copyRect = clippedRect;
+ }
+
+ SkIPoint dstOffset;
+ SkBackingFit fit;
+ if (restrictions.fRectsMustMatch == GrSurfaceProxy::RectsMustMatch::kYes) {
+ dstOffset = {0, 0};
+ fit = SkBackingFit::kExact;
+ } else {
+ dstOffset = {copyRect.fLeft, copyRect.fTop};
+ fit = SkBackingFit::kApprox;
+ }
+ sk_sp<GrTextureProxy> newProxy = GrSurfaceProxy::Copy(
+ fContext, fRenderTargetProxy.get(), this->colorInfo().colorType(), GrMipMapped::kNo,
+ copyRect, fit, SkBudgeted::kYes, restrictions.fRectsMustMatch);
+ SkASSERT(newProxy);
+
+ dstProxy->setProxy(std::move(newProxy));
+ dstProxy->setOffset(dstOffset);
+ return true;
+}
+
+bool GrRenderTargetContext::blitTexture(GrTextureProxy* src, GrColorType srcColorType,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) {
+ SkIRect clippedSrcRect;
+ SkIPoint clippedDstPoint;
+ if (!GrClipSrcRectAndDstPoint(this->asSurfaceProxy()->isize(), src->isize(), srcRect, dstPoint,
+ &clippedSrcRect, &clippedDstPoint)) {
+ return false;
+ }
+
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ auto fp = GrSimpleTextureEffect::Make(sk_ref_sp(src), srcColorType, SkMatrix::I());
+ if (!fp) {
+ return false;
+ }
+ paint.addColorFragmentProcessor(std::move(fp));
+
+ this->fillRectToRect(
+ GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeXYWH(clippedDstPoint.fX, clippedDstPoint.fY, clippedSrcRect.width(),
+ clippedSrcRect.height()),
+ SkRect::Make(clippedSrcRect));
+ return true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetContext.h b/gfx/skia/skia/src/gpu/GrRenderTargetContext.h
new file mode 100644
index 0000000000..c10bf008a8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetContext.h
@@ -0,0 +1,657 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetContext_DEFINED
+#define GrRenderTargetContext_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+class GrBackendSemaphore;
+class GrClip;
+class GrColorSpaceXform;
+class GrCoverageCountingPathRenderer;
+class GrDrawingManager;
+class GrDrawOp;
+class GrFixedClip;
+class GrOp;
+class GrRenderTarget;
+class GrRenderTargetContextPriv;
+class GrShape;
+class GrStyle;
+class GrTextureProxy;
+struct GrUserStencilSettings;
+struct SkDrawShadowRec;
+class SkGlyphRunList;
+struct SkIPoint;
+struct SkIRect;
+class SkLatticeIter;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkPoint;
+struct SkRect;
+class SkRegion;
+class SkRRect;
+struct SkRSXform;
+class SkTextBlob;
+class SkVertices;
+
+/**
+ * A helper object to orchestrate commands (draws, etc...) for GrSurfaces that are GrRenderTargets.
+ */
+class GrRenderTargetContext : public GrSurfaceContext {
+public:
+ ~GrRenderTargetContext() override;
+
+ virtual void drawGlyphRunList(const GrClip&, const SkMatrix& viewMatrix, const SkGlyphRunList&);
+
+ /**
+ * Provides a perfomance hint that the render target's contents are allowed
+ * to become undefined.
+ */
+ void discard();
+
+ enum class CanClearFullscreen : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ /**
+ * Clear the entire or rect of the render target, ignoring any clips.
+ * @param rect the rect to clear or the whole thing if rect is NULL.
+ * @param color the color to clear to.
+ * @param CanClearFullscreen allows partial clears to be converted to fullscreen clears on
+ * tiling platforms where that is an optimization.
+ */
+ void clear(const SkIRect* rect, const SkPMColor4f& color, CanClearFullscreen);
+
+ void clear(const SkPMColor4f& color) {
+ return this->clear(nullptr, color, CanClearFullscreen::kYes);
+ }
+
+ /**
+ * Draw everywhere (respecting the clip) with the paint.
+ */
+ void drawPaint(const GrClip&, GrPaint&&, const SkMatrix& viewMatrix);
+
+ /**
+ * Draw the rect using a paint.
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether rect is antialiased
+ * @param viewMatrix transformation matrix
+ * @param style The style to apply. Null means fill. Currently path effects are not
+ * allowed.
+ * The rects coords are used to access the paint (through texture matrix)
+ */
+ void drawRect(const GrClip&,
+ GrPaint&& paint,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRect&,
+ const GrStyle* style = nullptr);
+
+ /**
+ * Maps a rectangle of shader coordinates to a rectangle and fills that rectangle.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether rect is antialiased
+ * @param viewMatrix transformation matrix which applies to rectToDraw
+ * @param rectToDraw the rectangle to draw
+ * @param localRect the rectangle of shader coordinates applied to rectToDraw
+ */
+ void fillRectToRect(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRect& rectToDraw,
+ const SkRect& localRect) {
+ this->drawFilledQuad(clip, std::move(paint), aa,
+ aa == GrAA::kYes ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone,
+ GrQuad::MakeFromRect(rectToDraw, viewMatrix), GrQuad(localRect));
+ }
+
+ /**
+ * Fills a rect with a paint and a localMatrix.
+ */
+ void fillRectWithLocalMatrix(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix) {
+ this->drawFilledQuad(clip, std::move(paint), aa,
+ aa == GrAA::kYes ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone,
+ GrQuad::MakeFromRect(rect, viewMatrix),
+ GrQuad::MakeFromRect(rect, localMatrix));
+ }
+
+ /**
+ * Creates an op that draws a fill rect with per-edge control over anti-aliasing.
+ *
+ * This is a specialized version of fillQuadWithEdgeAA, but is kept separate since knowing
+ * the geometry is a rectangle affords more optimizations.
+ */
+ void fillRectWithEdgeAA(const GrClip& clip, GrPaint&& paint, GrAA aa, GrQuadAAFlags edgeAA,
+ const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect* optionalLocalRect = nullptr) {
+ const SkRect& localRect = optionalLocalRect ? *optionalLocalRect : rect;
+ this->drawFilledQuad(clip, std::move(paint), aa, edgeAA,
+ GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(localRect));
+ }
+
+ /**
+ * Similar to fillRectWithEdgeAA but draws an arbitrary 2D convex quadrilateral transformed
+ * by 'viewMatrix', with per-edge control over anti-aliasing. The quad should follow the
+ * ordering used by SkRect::toQuad(), which determines how the edge AA is applied:
+ * - "top" = points [0] and [1]
+ * - "right" = points[1] and [2]
+ * - "bottom" = points[2] and [3]
+ * - "left" = points[3] and [0]
+ *
+ * The last argument, 'optionalLocalQuad', can be null if no separate local coordinates are
+ * necessary.
+ */
+ void fillQuadWithEdgeAA(const GrClip& clip, GrPaint&& paint, GrAA aa, GrQuadAAFlags edgeAA,
+ const SkMatrix& viewMatrix, const SkPoint quad[4],
+ const SkPoint optionalLocalQuad[4]) {
+ const SkPoint* localQuad = optionalLocalQuad ? optionalLocalQuad : quad;
+ this->drawFilledQuad(clip, std::move(paint), aa, edgeAA,
+ GrQuad::MakeFromSkQuad(quad, viewMatrix),
+ GrQuad::MakeFromSkQuad(localQuad, SkMatrix::I()));
+ }
+
+ /** Used with drawQuadSet */
+ struct QuadSetEntry {
+ SkRect fRect;
+ SkPMColor4f fColor; // Overrides any color on the GrPaint
+ SkMatrix fLocalMatrix;
+ GrQuadAAFlags fAAFlags;
+ };
+
+ // TODO(michaelludwig) - remove if the bulk API is not useful for SkiaRenderer
+ void drawQuadSet(const GrClip& clip, GrPaint&& paint, GrAA aa, const SkMatrix& viewMatrix,
+ const QuadSetEntry[], int cnt);
+
+ /**
+ * Creates an op that draws a subrectangle of a texture. The passed color is modulated by the
+ * texture's color. 'srcRect' specifies the rectangle of the texture to draw. 'dstRect'
+ * specifies the rectangle to draw in local coords which will be transformed by 'viewMatrix' to
+ * device space.
+ */
+ void drawTexture(const GrClip& clip, sk_sp<GrTextureProxy> proxy, GrColorType srcColorType,
+ GrSamplerState::Filter filter, SkBlendMode mode, const SkPMColor4f& color,
+ const SkRect& srcRect, const SkRect& dstRect, GrAA aa, GrQuadAAFlags edgeAA,
+ SkCanvas::SrcRectConstraint constraint, const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> texXform) {
+ const SkRect* domain = constraint == SkCanvas::kStrict_SrcRectConstraint ?
+ &srcRect : nullptr;
+ this->drawTexturedQuad(clip, std::move(proxy), srcColorType, std::move(texXform), filter,
+ color, mode, aa, edgeAA, GrQuad::MakeFromRect(dstRect, viewMatrix),
+ GrQuad(srcRect), domain);
+ }
+
+ /**
+ * Variant of drawTexture that instead draws the texture applied to 'dstQuad' transformed by
+ * 'viewMatrix', using the 'srcQuad' texture coordinates clamped to the optional 'domain'. If
+ * 'domain' is null, it's equivalent to using the fast src rect constraint. If 'domain' is
+ * provided, the strict src rect constraint is applied using 'domain'.
+ */
+ void drawTextureQuad(const GrClip& clip, sk_sp<GrTextureProxy> proxy, GrColorType srcColorType,
+ GrSamplerState::Filter filter, SkBlendMode mode, const SkPMColor4f& color,
+ const SkPoint srcQuad[4], const SkPoint dstQuad[4], GrAA aa,
+ GrQuadAAFlags edgeAA, const SkRect* domain, const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> texXform) {
+ this->drawTexturedQuad(clip, std::move(proxy), srcColorType, std::move(texXform), filter,
+ color, mode, aa, edgeAA, GrQuad::MakeFromSkQuad(dstQuad, viewMatrix),
+ GrQuad::MakeFromSkQuad(srcQuad, SkMatrix::I()), domain);
+ }
+
+ /** Used with drawTextureSet */
+ struct TextureSetEntry {
+ sk_sp<GrTextureProxy> fProxy;
+ GrColorType fSrcColorType;
+ SkRect fSrcRect;
+ SkRect fDstRect;
+ const SkPoint* fDstClipQuad; // Must be null, or point to an array of 4 points
+ const SkMatrix* fPreViewMatrix; // If not null, entry's CTM is 'viewMatrix' * fPreViewMatrix
+ float fAlpha;
+ GrQuadAAFlags fAAFlags;
+ };
+ /**
+ * Draws a set of textures with a shared filter, color, view matrix, color xform, and
+ * texture color xform. The textures must all have the same GrTextureType and GrConfig.
+ *
+ * If any entries provide a non-null fDstClip array, it will be read from immediately based on
+ * fDstClipCount, so the pointer can become invalid after this returns.
+ */
+ void drawTextureSet(const GrClip&, const TextureSetEntry[], int cnt, GrSamplerState::Filter,
+ SkBlendMode mode, GrAA aa, SkCanvas::SrcRectConstraint,
+ const SkMatrix& viewMatrix, sk_sp<GrColorSpaceXform> texXform);
+
+ /**
+ * Draw a roundrect using a paint.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether rrect is antialiased.
+ * @param viewMatrix transformation matrix
+ * @param rrect the roundrect to draw
+ * @param style style to apply to the rrect. Currently path effects are not allowed.
+ */
+ void drawRRect(const GrClip&,
+ GrPaint&&,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const GrStyle& style);
+
+ /**
+ * Use a fast method to render the ambient and spot shadows for a path.
+ * Will return false if not possible for the given path.
+ *
+ * @param viewMatrix transformation matrix
+ * @param path the path to shadow
+ * @param rec parameters for shadow rendering
+ */
+ bool drawFastShadow(const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const SkDrawShadowRec& rec);
+
+ /**
+ * Shortcut for filling a SkPath consisting of nested rrects using a paint. The result is
+ * undefined if outer does not contain inner.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether rrects edges are antialiased
+ * @param viewMatrix transformation matrix
+ * @param outer the outer roundrect
+ * @param inner the inner roundrect
+ */
+ void drawDRRect(const GrClip&,
+ GrPaint&&,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRRect& outer,
+ const SkRRect& inner);
+
+ /**
+ * Draws a path.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether the path is antialiased.
+ * @param viewMatrix transformation matrix
+ * @param path the path to draw
+ * @param style style to apply to the path.
+ */
+ void drawPath(const GrClip&,
+ GrPaint&&,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkPath&,
+ const GrStyle&);
+
+ /**
+ * Draws a shape.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether the path is antialiased.
+ * @param viewMatrix transformation matrix
+ * @param shape the shape to draw
+ */
+ void drawShape(const GrClip&,
+ GrPaint&&,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const GrShape&);
+
+
+ /**
+ * Draws vertices with a paint.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param vertices specifies the mesh to draw.
+ * @param bones bone deformation matrices.
+ * @param boneCount number of bone matrices.
+ * @param overridePrimType primitive type to draw. If NULL, derive prim type from vertices.
+ */
+ void drawVertices(const GrClip&,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ sk_sp<SkVertices> vertices,
+ const SkVertices::Bone bones[],
+ int boneCount,
+ GrPrimitiveType* overridePrimType = nullptr);
+
+ /**
+ * Draws textured sprites from an atlas with a paint. This currently does not support AA for the
+ * sprite rectangle edges.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param spriteCount number of sprites.
+ * @param xform array of compressed transformation data, required.
+ * @param texRect array of texture rectangles used to access the paint.
+ * @param colors optional array of per-sprite colors, supercedes
+ * the paint's color field.
+ */
+ void drawAtlas(const GrClip&,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ int spriteCount,
+ const SkRSXform xform[],
+ const SkRect texRect[],
+ const SkColor colors[]);
+
+ /**
+ * Draws a region.
+ *
+ * @param paint describes how to color pixels
+ * @param viewMatrix transformation matrix
+ * @param aa should the rects of the region be antialiased.
+ * @param region the region to be drawn
+ * @param style style to apply to the region
+ */
+ void drawRegion(const GrClip&,
+ GrPaint&& paint,
+ GrAA aa,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ const GrStyle& style,
+ const GrUserStencilSettings* ss = nullptr);
+
+ /**
+ * Draws an oval.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrAA Controls whether the oval is antialiased.
+ * @param viewMatrix transformation matrix
+ * @param oval the bounding rect of the oval.
+ * @param style style to apply to the oval. Currently path effects are not allowed.
+ */
+ void drawOval(const GrClip&,
+ GrPaint&& paint,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style);
+ /**
+ * Draws a partial arc of an oval.
+ *
+ * @param paint describes how to color pixels.
+ * @param GrGrAA Controls whether the arc is antialiased.
+ * @param viewMatrix transformation matrix.
+ * @param oval the bounding rect of the oval.
+ * @param startAngle starting angle in degrees.
+ * @param sweepAngle angle to sweep in degrees. Must be in (-360, 360)
+ * @param useCenter true means that the implied path begins at the oval center, connects as
+ * a line to the point indicated by the start contains the arc indicated by
+ * the sweep angle. If false the line beginning at the center point is
+ * omitted.
+ * @param style style to apply to the oval.
+ */
+ void drawArc(const GrClip&,
+ GrPaint&& paint,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle& style);
+
+ /**
+ * Draw the image as a set of rects, specified by |iter|.
+ */
+ void drawImageLattice(const GrClip&,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform>,
+ GrSamplerState::Filter,
+ std::unique_ptr<SkLatticeIter>,
+ const SkRect& dst);
+
+ /**
+ * Draws the src texture with no matrix. The dstRect is the dstPoint with the width and height
+ * of the srcRect. The srcRect and dstRect are clipped to the bounds of the src and dst surfaces
+ * respectively.
+ */
+ bool blitTexture(GrTextureProxy* src, GrColorType srcColorType, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ /**
+ * Adds the necessary signal and wait semaphores and adds the passed in SkDrawable to the
+ * command stream.
+ */
+ void drawDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler>, const SkRect& bounds);
+
+ using ReadPixelsCallback = SkSurface::ReadPixelsCallback;
+ using ReadPixelsContext = SkSurface::ReadPixelsContext;
+ using RescaleGamma = SkSurface::RescaleGamma;
+
+ // GPU implementation for SkSurface::asyncRescaleAndReadPixels.
+ void asyncRescaleAndReadPixels(const SkImageInfo& info, const SkIRect& srcRect,
+ RescaleGamma rescaleGamma, SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback, ReadPixelsContext context);
+ // GPU implementation for SkSurface::asyncRescaleAndReadPixelsYUV420.
+ void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context);
+
+ /**
+ * After this returns any pending surface IO will be issued to the backend 3D API and
+ * if the surface has MSAA it will be resolved.
+ */
+ GrSemaphoresSubmitted flush(SkSurface::BackendSurfaceAccess access, const GrFlushInfo&);
+
+ /**
+ * The next time this GrRenderTargetContext is flushed, the gpu will wait on the passed in
+ * semaphores before executing any commands.
+ */
+ bool waitOnSemaphores(int numSemaphores, const GrBackendSemaphore waitSemaphores[]);
+
+ void insertEventMarker(const SkString&);
+
+ const GrRenderTargetProxy* proxy() const { return fRenderTargetProxy.get(); }
+ int width() const { return fRenderTargetProxy->width(); }
+ int height() const { return fRenderTargetProxy->height(); }
+ int numSamples() const { return fRenderTargetProxy->numSamples(); }
+ const SkSurfaceProps& surfaceProps() const { return fSurfaceProps; }
+ GrSurfaceOrigin origin() const { return fRenderTargetProxy->origin(); }
+ bool wrapsVkSecondaryCB() const { return fRenderTargetProxy->wrapsVkSecondaryCB(); }
+ GrMipMapped mipMapped() const;
+
+ // This entry point should only be called if the backing GPU object is known to be
+ // instantiated.
+ GrRenderTarget* accessRenderTarget() { return fRenderTargetProxy->peekRenderTarget(); }
+
+ GrSurfaceProxy* asSurfaceProxy() override { return fRenderTargetProxy.get(); }
+ const GrSurfaceProxy* asSurfaceProxy() const override { return fRenderTargetProxy.get(); }
+ sk_sp<GrSurfaceProxy> asSurfaceProxyRef() override { return fRenderTargetProxy; }
+
+ GrTextureProxy* asTextureProxy() override;
+ const GrTextureProxy* asTextureProxy() const override;
+ sk_sp<GrTextureProxy> asTextureProxyRef() override;
+
+ GrRenderTargetProxy* asRenderTargetProxy() override { return fRenderTargetProxy.get(); }
+ sk_sp<GrRenderTargetProxy> asRenderTargetProxyRef() override { return fRenderTargetProxy; }
+
+ GrRenderTargetContext* asRenderTargetContext() override { return this; }
+
+ // Provides access to functions that aren't part of the public API.
+ GrRenderTargetContextPriv priv();
+ const GrRenderTargetContextPriv priv() const;
+
+ GrTextTarget* textTarget() { return fTextTarget.get(); }
+
+#if GR_TEST_UTILS
+ bool testingOnly_IsInstantiated() const { return fRenderTargetProxy->isInstantiated(); }
+ void testingOnly_SetPreserveOpsOnFullClear() { fPreserveOpsOnFullClear_TestingOnly = true; }
+ GrOpsTask* testingOnly_PeekLastOpsTask() { return fOpsTask.get(); }
+#endif
+
+protected:
+ GrRenderTargetContext(GrRecordingContext*, sk_sp<GrRenderTargetProxy>, GrColorType,
+ sk_sp<SkColorSpace>, const SkSurfaceProps*, bool managedOpsTask = true);
+
+ SkDEBUGCODE(void validate() const override;)
+
+private:
+ class TextTarget;
+ enum class QuadOptimization;
+
+ GrAAType chooseAAType(GrAA);
+
+ friend class GrAtlasTextBlob; // for access to add[Mesh]DrawOp
+ friend class GrClipStackClip; // for access to getOpsTask
+ friend class GrOnFlushResourceProvider; // for access to getOpsTask (http://skbug.com/9357)
+
+ friend class GrDrawingManager; // for ctor
+ friend class GrRenderTargetContextPriv;
+
+ // All the path renderers currently make their own ops
+ friend class GrSoftwarePathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrAAConvexPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrDashLinePathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrAAHairLinePathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrAALinearizingConvexPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrSmallPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrDefaultPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrStencilAndCoverPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrTessellatingPathRenderer; // for access to add[Mesh]DrawOp
+ friend class GrCCPerFlushResources; // for access to addDrawOp
+ friend class GrCoverageCountingPathRenderer; // for access to addDrawOp
+ // for a unit test
+ friend void test_draw_op(GrContext*,
+ GrRenderTargetContext*,
+ std::unique_ptr<GrFragmentProcessor>,
+ sk_sp<GrTextureProxy>,
+ GrColorType);
+
+ GrOpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const;
+ void setNeedsStencil(bool useMixedSamplesIfNotMSAA);
+
+ void internalClear(const GrFixedClip&, const SkPMColor4f&, CanClearFullscreen);
+ void internalStencilClear(const GrFixedClip&, bool insideStencilMask);
+
+ // Only consumes the GrPaint if successful.
+ bool drawFilledDRRect(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA,
+ const SkMatrix& viewMatrix,
+ const SkRRect& origOuter,
+ const SkRRect& origInner);
+
+ // If the drawn quad's paint is a const blended color, provide it as a non-null pointer to
+ // 'constColor', which enables the draw-as-clear optimization. Otherwise it is assumed the paint
+ // requires some form of shading that invalidates using a clear op.
+ //
+ // The non-const pointers should be the original draw request on input, and will be updated as
+ // appropriate depending on the returned optimization level.
+ //
+ // 'stencilSettings' are provided merely for decision making purposes; When non-null,
+ // optimization strategies that submit special ops are avoided.
+ QuadOptimization attemptQuadOptimization(const GrClip& clip,
+ const SkPMColor4f* constColor,
+ const GrUserStencilSettings* stencilSettings,
+ GrAA* aa,
+ GrQuadAAFlags* edgeFlags,
+ GrQuad* deviceQuad,
+ GrQuad* localQuad);
+
+ // If stencil settings, 'ss', are non-null, AA controls MSAA or no AA. If they are null, then AA
+ // can choose between coverage, MSAA as per chooseAAType(). This will always attempt to apply
+ // quad optimizations, so all quad/rect public APIs should rely on this function for consistent
+ // clipping behavior.
+ void drawFilledQuad(const GrClip& clip,
+ GrPaint&& paint,
+ GrAA aa,
+ GrQuadAAFlags edgeFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const GrUserStencilSettings* ss = nullptr);
+
+ // Like drawFilledQuad but does not require using a GrPaint or FP for texturing
+ void drawTexturedQuad(const GrClip& clip,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> textureXform,
+ GrSamplerState::Filter filter,
+ const SkPMColor4f& color,
+ SkBlendMode blendMode,
+ GrAA aa,
+ GrQuadAAFlags edgeFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const SkRect* domain = nullptr);
+
+ void drawShapeUsingPathRenderer(const GrClip&, GrPaint&&, GrAA, const SkMatrix&,
+ const GrShape&);
+
+ void addOp(std::unique_ptr<GrOp>);
+
+ // Allows caller of addDrawOp to know which op list an op will be added to.
+ using WillAddOpFn = void(GrOp*, uint32_t opsTaskID);
+ // These perform processing specific to GrDrawOp-derived ops before recording them into an
+ // op list. Before adding the op to an op list the WillAddOpFn is called. Note that it
+ // will not be called in the event that the op is discarded. Moreover, the op may merge into
+ // another op after the function is called (either before addDrawOp returns or some time later).
+ void addDrawOp(const GrClip&, std::unique_ptr<GrDrawOp>,
+ const std::function<WillAddOpFn>& = std::function<WillAddOpFn>());
+
+ // Makes a copy of the proxy if it is necessary for the draw and places the texture that should
+ // be used by GrXferProcessor to access the destination color in 'result'. If the return
+ // value is false then a texture copy could not be made.
+ bool SK_WARN_UNUSED_RESULT setupDstProxy(const GrClip&, const GrOp& op,
+ GrXferProcessor::DstProxy* result);
+
+ class AsyncReadResult;
+
+ // The async read step of asyncRescaleAndReadPixels()
+ void asyncReadPixels(const SkIRect& rect, SkColorType colorType, ReadPixelsCallback callback,
+ ReadPixelsContext context);
+
+ GrOpsTask* getOpsTask();
+
+ std::unique_ptr<GrTextTarget> fTextTarget;
+ sk_sp<GrRenderTargetProxy> fRenderTargetProxy;
+
+ // In MDB-mode the GrOpsTask can be closed by some other renderTargetContext that has picked
+ // it up. For this reason, the GrOpsTask should only ever be accessed via 'getOpsTask'.
+ sk_sp<GrOpsTask> fOpsTask;
+
+ SkSurfaceProps fSurfaceProps;
+ bool fManagedOpsTask;
+
+ int fNumStencilSamples = 0;
+#if GR_TEST_UTILS
+ bool fPreserveOpsOnFullClear_TestingOnly = false;
+#endif
+
+ typedef GrSurfaceContext INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetContextPriv.h b/gfx/skia/skia/src/gpu/GrRenderTargetContextPriv.h
new file mode 100644
index 0000000000..4a12ba0208
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetContextPriv.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetContextPriv_DEFINED
+#define GrRenderTargetContextPriv_DEFINED
+
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+class GrFixedClip;
+class GrHardClip;
+class GrPath;
+class GrRenderTargetPriv;
+struct GrUserStencilSettings;
+
+/** Class that adds methods to GrRenderTargetContext that are only intended for use internal to
+ Skia. This class is purely a privileged window into GrRenderTargetContext. It should never have
+ additional data members or virtual methods. */
+class GrRenderTargetContextPriv {
+public:
+ // called to note the last clip drawn to the stencil buffer.
+ // TODO: remove after clipping overhaul.
+ void setLastClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
+ int numClipAnalyticFPs) {
+ GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
+ opsTask->fLastClipStackGenID = clipStackGenID;
+ opsTask->fLastDevClipBounds = devClipBounds;
+ opsTask->fLastClipNumAnalyticFPs = numClipAnalyticFPs;
+ }
+
+ // called to determine if we have to render the clip into SB.
+ // TODO: remove after clipping overhaul.
+ bool mustRenderClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
+ int numClipAnalyticFPs) const {
+ GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
+ return opsTask->fLastClipStackGenID != clipStackGenID ||
+ !opsTask->fLastDevClipBounds.contains(devClipBounds) ||
+ opsTask->fLastClipNumAnalyticFPs != numClipAnalyticFPs;
+ }
+
+ using CanClearFullscreen = GrRenderTargetContext::CanClearFullscreen;
+
+ void clear(const GrFixedClip&, const SkPMColor4f&, CanClearFullscreen);
+
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask);
+
+ // While this can take a general clip, since GrReducedClip relies on this function, it must take
+ // care to only provide hard clips or we could get stuck in a loop. The general clip is needed
+ // so that path renderers can use this function.
+ void stencilRect(
+ const GrClip& clip, const GrUserStencilSettings* ss, GrPaint&& paint,
+ GrAA doStencilMSAA, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkMatrix* localMatrix = nullptr) {
+ // Since this provides stencil settings to drawFilledQuad, it performs a different AA type
+ // resolution compared to regular rect draws, which is the main reason it remains separate.
+ GrQuad localQuad = localMatrix ? GrQuad::MakeFromRect(rect, *localMatrix) : GrQuad(rect);
+ fRenderTargetContext->drawFilledQuad(
+ clip, std::move(paint), doStencilMSAA, GrQuadAAFlags::kNone,
+ GrQuad::MakeFromRect(rect, viewMatrix), localQuad, ss);
+ }
+
+ void stencilPath(
+ const GrHardClip&, GrAA doStencilMSAA, const SkMatrix& viewMatrix, sk_sp<const GrPath>);
+
+ /**
+ * Draws a path, either AA or not, and touches the stencil buffer with the user stencil settings
+ * for each color sample written.
+ */
+ bool drawAndStencilPath(const GrHardClip&,
+ const GrUserStencilSettings*,
+ SkRegion::Op op,
+ bool invert,
+ GrAA doStencilMSAA,
+ const SkMatrix& viewMatrix,
+ const SkPath&);
+
+ SkBudgeted isBudgeted() const;
+
+ int maxWindowRectangles() const;
+
+ /*
+ * This unique ID will not change for a given RenderTargetContext. However, it is _NOT_
+ * guaranteed to match the uniqueID of the underlying GrRenderTarget - beware!
+ */
+ GrSurfaceProxy::UniqueID uniqueID() const {
+ return fRenderTargetContext->fRenderTargetProxy->uniqueID();
+ }
+
+ uint32_t testingOnly_getOpsTaskID();
+
+ using WillAddOpFn = GrRenderTargetContext::WillAddOpFn;
+ void testingOnly_addDrawOp(std::unique_ptr<GrDrawOp>);
+ void testingOnly_addDrawOp(const GrClip&, std::unique_ptr<GrDrawOp>,
+ const std::function<WillAddOpFn>& = std::function<WillAddOpFn>());
+
+ bool refsWrappedObjects() const {
+ return fRenderTargetContext->fRenderTargetProxy->refsWrappedObjects();
+ }
+
+private:
+ explicit GrRenderTargetContextPriv(GrRenderTargetContext* renderTargetContext)
+ : fRenderTargetContext(renderTargetContext) {}
+ GrRenderTargetContextPriv(const GrRenderTargetPriv&) {} // unimpl
+ GrRenderTargetContextPriv& operator=(const GrRenderTargetPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrRenderTargetContextPriv* operator&() const;
+ GrRenderTargetContextPriv* operator&();
+
+ GrRenderTargetContext* fRenderTargetContext;
+
+ friend class GrRenderTargetContext; // to construct/copy this type.
+};
+
+inline GrRenderTargetContextPriv GrRenderTargetContext::priv() {
+ return GrRenderTargetContextPriv(this);
+}
+
+inline const GrRenderTargetContextPriv GrRenderTargetContext::priv() const {
+ return GrRenderTargetContextPriv(const_cast<GrRenderTargetContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h b/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h
new file mode 100644
index 0000000000..f81bd1f49a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetPriv_DEFINED
+#define GrRenderTargetPriv_DEFINED
+
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTarget.h"
+
+class GrStencilSettings;
+
+/** Class that adds methods to GrRenderTarget that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrRenderTarget. It should never have additional
+ data members or virtual methods. */
+class GrRenderTargetPriv {
+public:
+ /**
+ * GrStencilAttachment is not part of the public API.
+ */
+ GrStencilAttachment* getStencilAttachment() const {
+ return fRenderTarget->fStencilAttachment.get();
+ }
+
+ /**
+ * Attaches the GrStencilAttachment onto the render target. If stencil is a nullptr then the
+ * currently attached GrStencilAttachment will be removed if one was previously attached. This
+ * function returns false if there were any failure in attaching the GrStencilAttachment.
+ */
+ void attachStencilAttachment(sk_sp<GrStencilAttachment> stencil);
+
+ int numStencilBits() const;
+
+ /**
+ * Returns a unique key that identifies this render target's sample pattern. (Must be
+ * multisampled.)
+ */
+ int getSamplePatternKey() const;
+
+ /**
+ * Retrieves the per-pixel HW sample locations for this render target, and, as a by-product, the
+ * actual number of samples in use. (This may differ from fSampleCnt.) Sample locations are
+ * returned as 0..1 offsets relative to the top-left corner of the pixel.
+ */
+ const SkTArray<SkPoint>& getSampleLocations() const {
+ int samplePatternKey = this->getSamplePatternKey();
+ return fRenderTarget->getGpu()->retrieveSampleLocations(samplePatternKey);
+ }
+
+private:
+ explicit GrRenderTargetPriv(GrRenderTarget* renderTarget) : fRenderTarget(renderTarget) {}
+ GrRenderTargetPriv(const GrRenderTargetPriv&) {} // unimpl
+ GrRenderTargetPriv& operator=(const GrRenderTargetPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrRenderTargetPriv* operator&() const;
+ GrRenderTargetPriv* operator&();
+
+ GrRenderTarget* fRenderTarget;
+
+ friend class GrRenderTarget; // to construct/copy this type.
+};
+
+inline GrRenderTargetPriv GrRenderTarget::renderTargetPriv() { return GrRenderTargetPriv(this); }
+
+inline const GrRenderTargetPriv GrRenderTarget::renderTargetPriv () const {
+ return GrRenderTargetPriv(const_cast<GrRenderTarget*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp
new file mode 100644
index 0000000000..6e7d3a5ed0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTargetProxy.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTextureRenderTargetProxy.h"
+
+// Deferred version
+// TODO: we can probably munge the 'desc' in both the wrapped and deferred
+// cases to make the sampleConfig/numSamples stuff more rational.
+GrRenderTargetProxy::GrRenderTargetProxy(const GrCaps& caps,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : INHERITED(format, desc, GrRenderable::kYes, origin, textureSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator)
+ , fSampleCnt(sampleCount)
+ , fWrapsVkSecondaryCB(WrapsVkSecondaryCB::kNo)
+ , fOutputSwizzle(outputSwizzle) {}
+
+// Lazy-callback version
+GrRenderTargetProxy::GrRenderTargetProxy(LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator,
+ WrapsVkSecondaryCB wrapsVkSecondaryCB)
+ : INHERITED(std::move(callback), format, desc, GrRenderable::kYes, origin, textureSwizzle,
+ fit, budgeted, isProtected, surfaceFlags, useAllocator)
+ , fSampleCnt(sampleCount)
+ , fWrapsVkSecondaryCB(wrapsVkSecondaryCB)
+ , fOutputSwizzle(outputSwizzle) {}
+
+// Wrapped version
+GrRenderTargetProxy::GrRenderTargetProxy(sk_sp<GrSurface> surf,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ UseAllocator useAllocator,
+ WrapsVkSecondaryCB wrapsVkSecondaryCB)
+ : INHERITED(std::move(surf), origin, textureSwizzle, SkBackingFit::kExact, useAllocator)
+ , fSampleCnt(fTarget->asRenderTarget()->numSamples())
+ , fWrapsVkSecondaryCB(wrapsVkSecondaryCB)
+ , fOutputSwizzle(outputSwizzle) {
+ // The kRequiresManualMSAAResolve flag better not be set if we are not multisampled or if
+ // MSAA resolve should happen automatically.
+ //
+ // From the other side, we don't know enough about the wrapped surface to assert when
+ // kRequiresManualMSAAResolve *should* be set. e.g., The caller might be wrapping a backend
+ // texture as a render target at this point but we wouldn't know it.
+ SkASSERT(!(this->numSamples() <= 1 ||
+ fTarget->getContext()->priv().caps()->msaaResolvesAutomatically()) ||
+ !this->requiresManualMSAAResolve());
+}
+
+int GrRenderTargetProxy::maxWindowRectangles(const GrCaps& caps) const {
+ return this->glRTFBOIDIs0() ? 0 : caps.maxWindowRectangles();
+}
+
+bool GrRenderTargetProxy::instantiate(GrResourceProvider* resourceProvider) {
+ if (this->isLazy()) {
+ return false;
+ }
+ if (!this->instantiateImpl(resourceProvider, fSampleCnt, fNumStencilSamples, GrRenderable::kYes,
+ GrMipMapped::kNo, nullptr)) {
+ return false;
+ }
+
+ SkASSERT(this->peekRenderTarget());
+ SkASSERT(!this->peekTexture());
+ return true;
+}
+
+bool GrRenderTargetProxy::canChangeStencilAttachment() const {
+ if (!fTarget) {
+ // If we aren't instantiated, then we definitely are an internal render target. Ganesh is
+ // free to change stencil attachments on internal render targets.
+ return true;
+ }
+ return fTarget->asRenderTarget()->canAttemptStencilAttachment();
+}
+
+sk_sp<GrSurface> GrRenderTargetProxy::createSurface(GrResourceProvider* resourceProvider) const {
+ sk_sp<GrSurface> surface = this->createSurfaceImpl(
+ resourceProvider, fSampleCnt, fNumStencilSamples, GrRenderable::kYes, GrMipMapped::kNo);
+ if (!surface) {
+ return nullptr;
+ }
+ SkASSERT(surface->asRenderTarget());
+ SkASSERT(!surface->asTexture());
+ return surface;
+}
+
+size_t GrRenderTargetProxy::onUninstantiatedGpuMemorySize(const GrCaps& caps) const {
+ int colorSamplesPerPixel = this->numSamples();
+ if (colorSamplesPerPixel > 1) {
+ // Add one for the resolve buffer.
+ ++colorSamplesPerPixel;
+ }
+
+ // TODO: do we have enough information to improve this worst case estimate?
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ colorSamplesPerPixel, GrMipMapped::kNo, !this->priv().isExact());
+}
+
+bool GrRenderTargetProxy::refsWrappedObjects() const {
+ if (!this->isInstantiated()) {
+ return false;
+ }
+
+ GrSurface* surface = this->peekSurface();
+ return surface->resourcePriv().refsWrappedObjects();
+}
+
+#ifdef SK_DEBUG
+void GrRenderTargetProxy::onValidateSurface(const GrSurface* surface) {
+ // We do not check that surface->asTexture returns null since, when replaying DDLs we
+ // can fulfill a renderTarget-only proxy w/ a textureRenderTarget.
+
+ // Anything that is checked here should be duplicated in GrTextureRenderTargetProxy's version
+ SkASSERT(surface->asRenderTarget());
+ SkASSERT(surface->asRenderTarget()->numSamples() == this->numSamples());
+
+ GrInternalSurfaceFlags proxyFlags = fSurfaceFlags;
+ GrInternalSurfaceFlags surfaceFlags = surface->surfacePriv().flags();
+ SkASSERT(((int)proxyFlags & kGrInternalRenderTargetFlagsMask) ==
+ ((int)surfaceFlags & kGrInternalRenderTargetFlagsMask));
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetProxy.h b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.h
new file mode 100644
index 0000000000..d4fad64331
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetProxy_DEFINED
+#define GrRenderTargetProxy_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrSwizzle.h"
+
+class GrResourceProvider;
+class GrRenderTargetProxyPriv;
+
+// This class delays the acquisition of RenderTargets until they are actually
+// required
+// Beware: the uniqueID of the RenderTargetProxy will usually be different than
+// the uniqueID of the RenderTarget it represents!
+class GrRenderTargetProxy : virtual public GrSurfaceProxy {
+public:
+ GrRenderTargetProxy* asRenderTargetProxy() override { return this; }
+ const GrRenderTargetProxy* asRenderTargetProxy() const override { return this; }
+
+ // Actually instantiate the backing rendertarget, if necessary.
+ bool instantiate(GrResourceProvider*) override;
+
+ bool canUseMixedSamples(const GrCaps& caps) const {
+ return caps.mixedSamplesSupport() && !this->glRTFBOIDIs0() &&
+ caps.internalMultisampleCount(this->backendFormat()) > 0 &&
+ this->canChangeStencilAttachment();
+ }
+
+ /*
+ * Indicate that a draw to this proxy requires stencil, and how many stencil samples it needs.
+ * The number of stencil samples on this proxy will be equal to the largest sample count passed
+ * to this method.
+ */
+ void setNeedsStencil(int8_t numStencilSamples) {
+ SkASSERT(numStencilSamples >= fSampleCnt);
+ fNumStencilSamples = SkTMax(numStencilSamples, fNumStencilSamples);
+ }
+
+ /**
+ * Returns the number of stencil samples required by this proxy.
+ * NOTE: Once instantiated, the actual render target may have more samples, but it is guaranteed
+ * to have at least this many. (After a multisample stencil buffer has been attached to a render
+ * target, we never "downgrade" it to one with fewer samples.)
+ */
+ int numStencilSamples() const { return fNumStencilSamples; }
+
+ /**
+ * Returns the number of samples/pixel in the color buffer (One if non-MSAA).
+ */
+ int numSamples() const { return fSampleCnt; }
+
+ int maxWindowRectangles(const GrCaps& caps) const;
+
+ const GrSwizzle& outputSwizzle() const { return fOutputSwizzle; }
+
+ bool wrapsVkSecondaryCB() const { return fWrapsVkSecondaryCB == WrapsVkSecondaryCB::kYes; }
+
+ void markMSAADirty(const SkIRect& dirtyRect) {
+ SkASSERT(SkIRect::MakeWH(this->width(), this->height()).contains(dirtyRect));
+ SkASSERT(this->requiresManualMSAAResolve());
+ fMSAADirtyRect.join(dirtyRect);
+ }
+ void markMSAAResolved() {
+ SkASSERT(this->requiresManualMSAAResolve());
+ fMSAADirtyRect.setEmpty();
+ }
+ bool isMSAADirty() const {
+ SkASSERT(fMSAADirtyRect.isEmpty() || this->requiresManualMSAAResolve());
+ return this->requiresManualMSAAResolve() && !fMSAADirtyRect.isEmpty();
+ }
+ const SkIRect& msaaDirtyRect() const {
+ SkASSERT(this->requiresManualMSAAResolve());
+ return fMSAADirtyRect;
+ }
+
+ // TODO: move this to a priv class!
+ bool refsWrappedObjects() const;
+
+ // Provides access to special purpose functions.
+ GrRenderTargetProxyPriv rtPriv();
+ const GrRenderTargetProxyPriv rtPriv() const;
+
+protected:
+ friend class GrProxyProvider; // for ctors
+ friend class GrRenderTargetProxyPriv;
+
+ // Deferred version
+ GrRenderTargetProxy(const GrCaps&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ int sampleCount,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ enum class WrapsVkSecondaryCB : bool { kNo = false, kYes = true };
+
+ // Lazy-callback version
+ // There are two main use cases for lazily-instantiated proxies:
+ // basic knowledge - width, height, config, samples, origin are known
+ // minimal knowledge - only config is known.
+ //
+ // The basic knowledge version is used for DDL where we know the type of proxy we are going to
+ // use, but we don't have access to the GPU yet to instantiate it.
+ //
+ // The minimal knowledge version is used for CCPR where we are generating an atlas but we do not
+ // know the final size until flush time.
+ GrRenderTargetProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ int sampleCount,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator,
+ WrapsVkSecondaryCB);
+
+ // Wrapped version
+ GrRenderTargetProxy(sk_sp<GrSurface>,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ UseAllocator,
+ WrapsVkSecondaryCB = WrapsVkSecondaryCB::kNo);
+
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
+
+private:
+ void setGLRTFBOIDIs0() {
+ fSurfaceFlags |= GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ }
+ bool glRTFBOIDIs0() const {
+ return fSurfaceFlags & GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ }
+ bool canChangeStencilAttachment() const;
+
+ size_t onUninstantiatedGpuMemorySize(const GrCaps&) const override;
+ SkDEBUGCODE(void onValidateSurface(const GrSurface*) override;)
+
+ // WARNING: Be careful when adding or removing fields here. ASAN is likely to trigger warnings
+ // when instantiating GrTextureRenderTargetProxy. The std::function in GrSurfaceProxy makes
+ // each class in the diamond require 16 byte alignment. Clang appears to layout the fields for
+ // each class to achieve the necessary alignment. However, ASAN checks the alignment of 'this'
+ // in the constructors, and always looks for the full 16 byte alignment, even if the fields in
+ // that particular class don't require it. Changing the size of this object can move the start
+ // address of other types, leading to this problem.
+ int8_t fSampleCnt;
+ int8_t fNumStencilSamples = 0;
+ WrapsVkSecondaryCB fWrapsVkSecondaryCB;
+ GrSwizzle fOutputSwizzle;
+ SkIRect fMSAADirtyRect = SkIRect::MakeEmpty();
+ // This is to fix issue in large comment above. Without the padding we end 6 bytes into a 16
+ // byte range, so the GrTextureProxy ends up starting 8 byte aligned by not 16. We add the
+ // padding here to get us right up to the 16 byte alignment (technically any padding of 3-10
+ // bytes would work since it always goes up to 8 byte alignment, but we use 10 to more explicit
+ // about what we're doing).
+ char fDummyPadding[10];
+
+ typedef GrSurfaceProxy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetProxyPriv.h b/gfx/skia/skia/src/gpu/GrRenderTargetProxyPriv.h
new file mode 100644
index 0000000000..091e4f2f5b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetProxyPriv.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetProxyPriv_DEFINED
+#define GrRenderTargetProxyPriv_DEFINED
+
+#include "src/gpu/GrRenderTargetProxy.h"
+
+/**
+ * This class hides the more specialized capabilities of GrRenderTargetProxy.
+ */
+class GrRenderTargetProxyPriv {
+public:
+ void setGLRTFBOIDIs0() {
+ // FBO0 should never be wrapped as a texture render target.
+ SkASSERT(!fRenderTargetProxy->requiresManualMSAAResolve());
+ SkASSERT(!fRenderTargetProxy->asTextureProxy());
+ fRenderTargetProxy->setGLRTFBOIDIs0();
+ }
+
+ bool glRTFBOIDIs0() const {
+ return fRenderTargetProxy->glRTFBOIDIs0();
+ }
+
+private:
+ explicit GrRenderTargetProxyPriv(GrRenderTargetProxy* renderTargetProxy)
+ : fRenderTargetProxy(renderTargetProxy) {}
+ GrRenderTargetProxyPriv(const GrRenderTargetProxyPriv&) {} // unimpl
+ GrRenderTargetProxyPriv& operator=(const GrRenderTargetProxyPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrRenderTargetProxyPriv* operator&() const;
+ GrRenderTargetProxyPriv* operator&();
+
+ GrRenderTargetProxy* fRenderTargetProxy;
+
+ friend class GrRenderTargetProxy; // to construct/copy this type.
+};
+
+inline GrRenderTargetProxyPriv GrRenderTargetProxy::rtPriv() {
+ return GrRenderTargetProxyPriv(this);
+}
+
+inline const GrRenderTargetProxyPriv GrRenderTargetProxy::rtPriv() const {
+ return GrRenderTargetProxyPriv(const_cast<GrRenderTargetProxy*>(this));
+}
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrRenderTask.cpp b/gfx/skia/skia/src/gpu/GrRenderTask.cpp
new file mode 100644
index 0000000000..a69c48ba0c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTask.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTask.h"
+
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/GrTextureResolveRenderTask.h"
+
+uint32_t GrRenderTask::CreateUniqueID() {
+ static std::atomic<uint32_t> nextID{1};
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidUniqueID);
+ return id;
+}
+
+GrRenderTask::GrRenderTask(sk_sp<GrSurfaceProxy> target)
+ : fTarget(std::move(target))
+ , fUniqueID(CreateUniqueID())
+ , fFlags(0) {
+}
+
+GrRenderTask::~GrRenderTask() {
+ if (fTarget && this == fTarget->getLastRenderTask()) {
+ // Ensure the target proxy doesn't keep hold of a dangling back pointer.
+ fTarget->setLastRenderTask(nullptr);
+ }
+}
+
+#ifdef SK_DEBUG
+bool GrRenderTask::deferredProxiesAreInstantiated() const {
+ for (int i = 0; i < fDeferredProxies.count(); ++i) {
+ if (!fDeferredProxies[i]->isInstantiated()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+void GrRenderTask::makeClosed(const GrCaps& caps) {
+ if (this->isClosed()) {
+ return;
+ }
+
+ SkIRect targetUpdateBounds;
+ if (ExpectedOutcome::kTargetDirty == this->onMakeClosed(caps, &targetUpdateBounds)) {
+ SkASSERT(SkIRect::MakeWH(fTarget->width(), fTarget->height()).contains(targetUpdateBounds));
+ if (fTarget->requiresManualMSAAResolve()) {
+ SkASSERT(fTarget->asRenderTargetProxy());
+ fTarget->asRenderTargetProxy()->markMSAADirty(targetUpdateBounds);
+ }
+ GrTextureProxy* textureProxy = fTarget->asTextureProxy();
+ if (textureProxy && GrMipMapped::kYes == textureProxy->mipMapped()) {
+ textureProxy->markMipMapsDirty();
+ }
+ }
+
+ if (fTextureResolveTask) {
+ this->addDependency(fTextureResolveTask);
+ fTextureResolveTask->makeClosed(caps);
+ fTextureResolveTask = nullptr;
+ }
+
+ this->setFlag(kClosed_Flag);
+}
+
+void GrRenderTask::prepare(GrOpFlushState* flushState) {
+ for (int i = 0; i < fDeferredProxies.count(); ++i) {
+ fDeferredProxies[i]->texPriv().scheduleUpload(flushState);
+ }
+
+ this->onPrepare(flushState);
+}
+
+// Add a GrRenderTask-based dependency
+void GrRenderTask::addDependency(GrRenderTask* dependedOn) {
+ SkASSERT(!dependedOn->dependsOn(this)); // loops are bad
+ SkASSERT(!this->dependsOn(dependedOn)); // caller should weed out duplicates
+
+ fDependencies.push_back(dependedOn);
+ dependedOn->addDependent(this);
+
+ SkDEBUGCODE(this->validate());
+}
+
+void GrRenderTask::addDependenciesFromOtherTask(GrRenderTask* otherTask) {
+ SkASSERT(otherTask);
+ for (GrRenderTask* task : otherTask->fDependencies) {
+ // The task should not be adding a dependency to itself.
+ SkASSERT(task != this);
+ if (!this->dependsOn(task)) {
+ this->addDependency(task);
+ }
+ }
+}
+
+// Convert from a GrSurface-based dependency to a GrRenderTask one
+void GrRenderTask::addDependency(GrSurfaceProxy* dependedOn, GrMipMapped mipMapped,
+ GrTextureResolveManager textureResolveManager,
+ const GrCaps& caps) {
+ // If it is still receiving dependencies, this GrRenderTask shouldn't be closed
+ SkASSERT(!this->isClosed());
+
+ GrRenderTask* dependedOnTask = dependedOn->getLastRenderTask();
+
+ if (dependedOnTask == this) {
+ // self-read - presumably for dst reads. We don't need to do anything in this case. The
+ // XferProcessor will detect what is happening and insert a texture barrier.
+ SkASSERT(GrMipMapped::kNo == mipMapped);
+ // We should never attempt a self-read on a surface that has a separate MSAA renderbuffer.
+ SkASSERT(!dependedOn->requiresManualMSAAResolve());
+ SkASSERT(!dependedOn->asTextureProxy() ||
+ !dependedOn->asTextureProxy()->texPriv().isDeferred());
+ return;
+ }
+
+ if (dependedOnTask) {
+ if (this->dependsOn(dependedOnTask) || fTextureResolveTask == dependedOnTask) {
+ return; // don't add duplicate dependencies
+ }
+
+ // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
+ // renderTask depends on. We need a break in 'dependedOnTask' so that the usage of
+ // that state has a chance to execute.
+ dependedOnTask->makeClosed(caps);
+ }
+
+ auto resolveFlags = GrSurfaceProxy::ResolveFlags::kNone;
+
+ if (dependedOn->requiresManualMSAAResolve()) {
+ auto* renderTargetProxy = dependedOn->asRenderTargetProxy();
+ SkASSERT(renderTargetProxy);
+ if (renderTargetProxy->isMSAADirty()) {
+ resolveFlags |= GrSurfaceProxy::ResolveFlags::kMSAA;
+ }
+ }
+
+ GrTextureProxy* textureProxy = dependedOn->asTextureProxy();
+ if (GrMipMapped::kYes == mipMapped) {
+ SkASSERT(textureProxy);
+ if (GrMipMapped::kYes != textureProxy->mipMapped()) {
+ // There are some cases where we might be given a non-mipmapped texture with a mipmap
+ // filter. See skbug.com/7094.
+ mipMapped = GrMipMapped::kNo;
+ } else if (textureProxy->mipMapsAreDirty()) {
+ resolveFlags |= GrSurfaceProxy::ResolveFlags::kMipMaps;
+ }
+ }
+
+ // Does this proxy have msaa to resolve and/or mipmaps to regenerate?
+ if (GrSurfaceProxy::ResolveFlags::kNone != resolveFlags) {
+ if (!fTextureResolveTask) {
+ fTextureResolveTask = textureResolveManager.newTextureResolveRenderTask(caps);
+ }
+ fTextureResolveTask->addProxy(sk_ref_sp(dependedOn), resolveFlags, caps);
+
+ // addProxy() should have closed the texture proxy's previous task.
+ SkASSERT(!dependedOnTask || dependedOnTask->isClosed());
+ SkASSERT(dependedOn->getLastRenderTask() == fTextureResolveTask);
+
+#ifdef SK_DEBUG
+ // addProxy() should have called addDependency (in this instance, recursively) on
+ // fTextureResolveTask.
+ if (dependedOnTask) {
+ SkASSERT(fTextureResolveTask->dependsOn(dependedOnTask));
+ }
+ if (textureProxy && textureProxy->texPriv().isDeferred()) {
+ SkASSERT(fTextureResolveTask->fDeferredProxies.back() == textureProxy);
+ }
+
+ // The GrTextureResolveRenderTask factory should have also marked the proxy clean, set the
+ // last renderTask on the textureProxy to textureResolveTask, and closed textureResolveTask.
+ if (GrRenderTargetProxy* renderTargetProxy = dependedOn->asRenderTargetProxy()) {
+ SkASSERT(!renderTargetProxy->isMSAADirty());
+ }
+ if (textureProxy) {
+ SkASSERT(!textureProxy->mipMapsAreDirty());
+ }
+ SkASSERT(dependedOn->getLastRenderTask() == fTextureResolveTask);
+#endif
+ return;
+ }
+
+ if (textureProxy && textureProxy->texPriv().isDeferred()) {
+ fDeferredProxies.push_back(textureProxy);
+ }
+
+ if (dependedOnTask) {
+ this->addDependency(dependedOnTask);
+ }
+}
+
+bool GrRenderTask::dependsOn(const GrRenderTask* dependedOn) const {
+ for (int i = 0; i < fDependencies.count(); ++i) {
+ if (fDependencies[i] == dependedOn) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+void GrRenderTask::addDependent(GrRenderTask* dependent) {
+ fDependents.push_back(dependent);
+}
+
+#ifdef SK_DEBUG
+bool GrRenderTask::isDependedent(const GrRenderTask* dependent) const {
+ for (int i = 0; i < fDependents.count(); ++i) {
+ if (fDependents[i] == dependent) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void GrRenderTask::validate() const {
+ // TODO: check for loops and duplicates
+
+ for (int i = 0; i < fDependencies.count(); ++i) {
+ SkASSERT(fDependencies[i]->isDependedent(this));
+ }
+}
+#endif
+
+void GrRenderTask::closeThoseWhoDependOnMe(const GrCaps& caps) {
+ for (int i = 0; i < fDependents.count(); ++i) {
+ if (!fDependents[i]->isClosed()) {
+ fDependents[i]->makeClosed(caps);
+ }
+ }
+}
+
+bool GrRenderTask::isInstantiated() const {
+ // Some renderTasks (e.g. GrTransferFromRenderTask) don't have a target.
+ if (!fTarget) {
+ return true;
+ }
+
+ if (!fTarget->isInstantiated()) {
+ return false;
+ }
+
+ int minStencilSampleCount = (fTarget->asRenderTargetProxy())
+ ? fTarget->asRenderTargetProxy()->numStencilSamples()
+ : 0;
+
+ if (minStencilSampleCount) {
+ GrRenderTarget* rt = fTarget->peekRenderTarget();
+ SkASSERT(rt);
+
+ GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
+ if (!stencil) {
+ return false;
+ }
+ SkASSERT(stencil->numSamples() >= minStencilSampleCount);
+ }
+
+ GrSurface* surface = fTarget->peekSurface();
+ if (surface->wasDestroyed()) {
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef SK_DEBUG
+void GrRenderTask::dump(bool printDependencies) const {
+ SkDebugf("--------------------------------------------------------------\n");
+ SkDebugf("renderTaskID: %d - proxyID: %d - surfaceID: %d\n", fUniqueID,
+ fTarget ? fTarget->uniqueID().asUInt() : -1,
+ fTarget && fTarget->peekSurface()
+ ? fTarget->peekSurface()->uniqueID().asUInt()
+ : -1);
+
+ if (printDependencies) {
+ SkDebugf("I rely On (%d): ", fDependencies.count());
+ for (int i = 0; i < fDependencies.count(); ++i) {
+ SkDebugf("%d, ", fDependencies[i]->fUniqueID);
+ }
+ SkDebugf("\n");
+
+ SkDebugf("(%d) Rely On Me: ", fDependents.count());
+ for (int i = 0; i < fDependents.count(); ++i) {
+ SkDebugf("%d, ", fDependents[i]->fUniqueID);
+ }
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTask.h b/gfx/skia/skia/src/gpu/GrRenderTask.h
new file mode 100644
index 0000000000..3fb3a18bab
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTask.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTask_DEFINED
+#define GrRenderTask_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrTextureResolveManager.h"
+
+class GrOpFlushState;
+class GrOpsTask;
+class GrResourceAllocator;
+class GrTextureResolveRenderTask;
+
+// This class abstracts a task that targets a single GrSurfaceProxy, participates in the
+// GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's
+// contents. (e.g., an opsTask that executes a command buffer, a task to regenerate mipmaps, etc.)
+class GrRenderTask : public SkRefCnt {
+public:
+ GrRenderTask(sk_sp<GrSurfaceProxy> target);
+ ~GrRenderTask() override;
+
+ void makeClosed(const GrCaps&);
+
+ void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); }
+
+ // These two methods are only invoked at flush time
+ void prepare(GrOpFlushState* flushState);
+ bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
+
+ // Called when this class will survive a flush and needs to truncate its ops and start over.
+ // TODO: ultimately it should be invalid for an op list to survive a flush.
+ // https://bugs.chromium.org/p/skia/issues/detail?id=7111
+ virtual void endFlush() {}
+
+ bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
+
+ /*
+ * Notify this GrRenderTask that it relies on the contents of 'dependedOn'
+ */
+ void addDependency(GrSurfaceProxy* dependedOn, GrMipMapped, GrTextureResolveManager,
+ const GrCaps& caps);
+
+ /*
+ * Notify this GrRenderTask that it relies on the contents of all GrRenderTasks which otherTask
+ * depends on.
+ */
+ void addDependenciesFromOtherTask(GrRenderTask* otherTask);
+
+ /*
+ * Does this renderTask depend on 'dependedOn'?
+ */
+ bool dependsOn(const GrRenderTask* dependedOn) const;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /*
+ * Safely cast this GrRenderTask to a GrOpsTask (if possible).
+ */
+ virtual GrOpsTask* asOpsTask() { return nullptr; }
+
+#ifdef SK_DEBUG
+ /*
+ * Dump out the GrRenderTask dependency DAG
+ */
+ virtual void dump(bool printDependencies) const;
+
+ virtual int numClips() const { return 0; }
+
+ using VisitSurfaceProxyFunc = std::function<void(GrSurfaceProxy*, GrMipMapped)>;
+
+ virtual void visitProxies_debugOnly(const VisitSurfaceProxyFunc&) const = 0;
+
+ void visitTargetAndSrcProxies_debugOnly(const VisitSurfaceProxyFunc& fn) const {
+ this->visitProxies_debugOnly(fn);
+ if (fTarget) {
+ fn(fTarget.get(), GrMipMapped::kNo);
+ }
+ }
+#endif
+
+protected:
+ // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if
+ // it is required)?
+ bool isInstantiated() const;
+
+ SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
+
+ enum class ExpectedOutcome : bool {
+ kTargetUnchanged,
+ kTargetDirty,
+ };
+
+ // Performs any work to finalize this renderTask prior to execution. If returning
+ // ExpectedOutcome::kTargetDiry, the caller is also responsible to fill out the area it will
+ // modify in targetUpdateBounds.
+ //
+ // targetUpdateBounds must not extend beyond the proxy bounds.
+ virtual ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect* targetUpdateBounds) = 0;
+
+ sk_sp<GrSurfaceProxy> fTarget;
+
+ // List of texture proxies whose contents are being prepared on a worker thread
+ // TODO: this list exists so we can fire off the proper upload when an renderTask begins
+ // executing. Can this be replaced?
+ SkTArray<GrTextureProxy*, true> fDeferredProxies;
+
+private:
+ // for resetFlag, TopoSortTraits, gatherProxyIntervals, handleInternalAllocationFailure
+ friend class GrDrawingManager;
+
+ // Drops any pending operations that reference proxies that are not instantiated.
+ // NOTE: Derived classes don't need to check fTarget. That is handled when the drawingManager
+ // calls isInstantiated.
+ virtual void handleInternalAllocationFailure() = 0;
+
+ virtual bool onIsUsed(GrSurfaceProxy*) const = 0;
+
+ bool isUsed(GrSurfaceProxy* proxy) const {
+ if (proxy == fTarget.get()) {
+ return true;
+ }
+
+ return this->onIsUsed(proxy);
+ }
+
+ void addDependency(GrRenderTask* dependedOn);
+ void addDependent(GrRenderTask* dependent);
+ SkDEBUGCODE(bool isDependedent(const GrRenderTask* dependent) const;)
+ SkDEBUGCODE(void validate() const;)
+ void closeThoseWhoDependOnMe(const GrCaps&);
+
+ // Feed proxy usage intervals to the GrResourceAllocator class
+ virtual void gatherProxyIntervals(GrResourceAllocator*) const = 0;
+
+ static uint32_t CreateUniqueID();
+
+ enum Flags {
+ kClosed_Flag = 0x01, //!< This GrRenderTask can't accept any more dependencies.
+
+ kWasOutput_Flag = 0x02, //!< Flag for topological sorting
+ kTempMark_Flag = 0x04, //!< Flag for topological sorting
+ };
+
+ void setFlag(uint32_t flag) {
+ fFlags |= flag;
+ }
+
+ void resetFlag(uint32_t flag) {
+ fFlags &= ~flag;
+ }
+
+ bool isSetFlag(uint32_t flag) const {
+ return SkToBool(fFlags & flag);
+ }
+
+ struct TopoSortTraits {
+ static void Output(GrRenderTask* renderTask, int /* index */) {
+ renderTask->setFlag(kWasOutput_Flag);
+ }
+ static bool WasOutput(const GrRenderTask* renderTask) {
+ return renderTask->isSetFlag(kWasOutput_Flag);
+ }
+ static void SetTempMark(GrRenderTask* renderTask) {
+ renderTask->setFlag(kTempMark_Flag);
+ }
+ static void ResetTempMark(GrRenderTask* renderTask) {
+ renderTask->resetFlag(kTempMark_Flag);
+ }
+ static bool IsTempMarked(const GrRenderTask* renderTask) {
+ return renderTask->isSetFlag(kTempMark_Flag);
+ }
+ static int NumDependencies(const GrRenderTask* renderTask) {
+ return renderTask->fDependencies.count();
+ }
+ static GrRenderTask* Dependency(GrRenderTask* renderTask, int index) {
+ return renderTask->fDependencies[index];
+ }
+ };
+
+ // Only the GrOpsTask currently overrides this virtual
+ virtual void onPrePrepare(GrRecordingContext*) {}
+ virtual void onPrepare(GrOpFlushState*) {} // Only the GrOpsTask overrides this virtual
+ virtual bool onExecute(GrOpFlushState* flushState) = 0;
+
+ const uint32_t fUniqueID;
+ uint32_t fFlags;
+
+ // 'this' GrRenderTask relies on the output of the GrRenderTasks in 'fDependencies'
+ SkSTArray<1, GrRenderTask*, true> fDependencies;
+ // 'this' GrRenderTask's output is relied on by the GrRenderTasks in 'fDependents'
+ SkSTArray<1, GrRenderTask*, true> fDependents;
+
+ // For performance reasons, we should perform texture resolves back-to-back as much as possible.
+ // (http://skbug.com/9406). To accomplish this, we make and reuse one single resolve task for
+ // each render task, then add it as a dependency during makeClosed().
+ GrTextureResolveRenderTask* fTextureResolveTask = nullptr;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceAllocator.cpp b/gfx/skia/skia/src/gpu/GrResourceAllocator.cpp
new file mode 100644
index 0000000000..6ab2bf0488
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceAllocator.cpp
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrResourceAllocator.h"
+
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureProxy.h"
+
+#if GR_TRACK_INTERVAL_CREATION
+ #include <atomic>
+
+ uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
+ static std::atomic<uint32_t> nextID{1};
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidUniqueID);
+ return id;
+ }
+#endif
+
+void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
+ SkASSERT(!fAssignedSurface);
+ fAssignedSurface = s;
+ fProxy->priv().assign(std::move(s));
+}
+
+void GrResourceAllocator::determineRecyclability() {
+ for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ if (cur->proxy()->canSkipResourceAllocator()) {
+ // These types of proxies can slip in here if they require a stencil buffer
+ continue;
+ }
+
+ if (cur->uses() >= cur->proxy()->refCnt()) {
+ // All the refs on the proxy are known to the resource allocator thus no one
+ // should be holding onto it outside of Ganesh.
+ SkASSERT(cur->uses() == cur->proxy()->refCnt());
+ cur->markAsRecyclable();
+ }
+ }
+}
+
+void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
+ SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment
+
+ SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
+ if (!fEndOfOpsTaskOpIndices.empty()) {
+ SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
+ }
+
+ // This is the first op index of the next opsTask
+ fEndOfOpsTaskOpIndices.push_back(this->curOp());
+ SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
+}
+
+GrResourceAllocator::~GrResourceAllocator() {
+ SkASSERT(fIntvlList.empty());
+ SkASSERT(fActiveIntvls.empty());
+ SkASSERT(!fIntvlHash.count());
+}
+
+void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
+ ActualUse actualUse
+ SkDEBUGCODE(, bool isDirectDstRead)) {
+ SkASSERT(start <= end);
+ SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
+
+ if (proxy->canSkipResourceAllocator()) {
+ // If the proxy is still not instantiated at this point but will need stencil, it will
+ // attach its own stencil buffer upon onFlush instantiation.
+ if (proxy->isInstantiated()) {
+ auto rt = proxy->asRenderTargetProxy();
+ int minStencilSampleCount = rt ? rt->numStencilSamples() : 0;
+ if (minStencilSampleCount) {
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
+ fResourceProvider, proxy->peekSurface(), minStencilSampleCount)) {
+ SkDebugf("WARNING: failed to attach stencil buffer. "
+ "Rendering may be incorrect.\n");
+ }
+ }
+ }
+ return;
+ }
+
+ // If a proxy is read only it must refer to a texture with specific content that cannot be
+ // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
+ // with the same texture.
+ if (proxy->readOnly()) {
+ if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) {
+ fLazyInstantiationError = true;
+ } else {
+ // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
+ // must already be instantiated or it must be a lazy proxy that we instantiated above.
+ SkASSERT(proxy->isInstantiated());
+ }
+ return;
+ }
+ if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
+ // Revise the interval for an existing use
+#ifdef SK_DEBUG
+ if (0 == start && 0 == end) {
+ // This interval is for the initial upload to a deferred proxy. Due to the vagaries
+ // of how deferred proxies are collected they can appear as uploads multiple times
+ // in a single opsTasks' list and as uploads in several opsTasks.
+ SkASSERT(0 == intvl->start());
+ } else if (isDirectDstRead) {
+ // Direct reads from the render target itself should occur w/in the existing
+ // interval
+ SkASSERT(intvl->start() <= start && intvl->end() >= end);
+ } else {
+ SkASSERT(intvl->end() <= start && intvl->end() <= end);
+ }
+#endif
+ if (ActualUse::kYes == actualUse) {
+ intvl->addUse();
+ }
+ intvl->extendEnd(end);
+ return;
+ }
+ Interval* newIntvl;
+ if (fFreeIntervalList) {
+ newIntvl = fFreeIntervalList;
+ fFreeIntervalList = newIntvl->next();
+ newIntvl->setNext(nullptr);
+ newIntvl->resetTo(proxy, start, end);
+ } else {
+ newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
+ }
+
+ if (ActualUse::kYes == actualUse) {
+ newIntvl->addUse();
+ }
+ fIntvlList.insertByIncreasingStart(newIntvl);
+ fIntvlHash.add(newIntvl);
+}
+
+GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
+ SkDEBUGCODE(this->validate());
+
+ Interval* temp = fHead;
+ if (temp) {
+ fHead = temp->next();
+ if (!fHead) {
+ fTail = nullptr;
+ }
+ temp->setNext(nullptr);
+ }
+
+ SkDEBUGCODE(this->validate());
+ return temp;
+}
+
+// TODO: fuse this with insertByIncreasingEnd
+void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(!intvl->next());
+
+ if (!fHead) {
+ // 14%
+ fHead = fTail = intvl;
+ } else if (intvl->start() <= fHead->start()) {
+ // 3%
+ intvl->setNext(fHead);
+ fHead = intvl;
+ } else if (fTail->start() <= intvl->start()) {
+ // 83%
+ fTail->setNext(intvl);
+ fTail = intvl;
+ } else {
+ // almost never
+ Interval* prev = fHead;
+ Interval* next = prev->next();
+ for (; intvl->start() > next->start(); prev = next, next = next->next()) {
+ }
+
+ SkASSERT(next);
+ intvl->setNext(next);
+ prev->setNext(intvl);
+ }
+
+ SkDEBUGCODE(this->validate());
+}
+
+// TODO: fuse this with insertByIncreasingStart
+void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(!intvl->next());
+
+ if (!fHead) {
+ // 14%
+ fHead = fTail = intvl;
+ } else if (intvl->end() <= fHead->end()) {
+ // 64%
+ intvl->setNext(fHead);
+ fHead = intvl;
+ } else if (fTail->end() <= intvl->end()) {
+ // 3%
+ fTail->setNext(intvl);
+ fTail = intvl;
+ } else {
+ // 19% but 81% of those land right after the list's head
+ Interval* prev = fHead;
+ Interval* next = prev->next();
+ for (; intvl->end() > next->end(); prev = next, next = next->next()) {
+ }
+
+ SkASSERT(next);
+ intvl->setNext(next);
+ prev->setNext(intvl);
+ }
+
+ SkDEBUGCODE(this->validate());
+}
+
+#ifdef SK_DEBUG
+void GrResourceAllocator::IntervalList::validate() const {
+ SkASSERT(SkToBool(fHead) == SkToBool(fTail));
+
+ Interval* prev = nullptr;
+ for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
+ }
+
+ SkASSERT(fTail == prev);
+}
+#endif
+
+ GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
+ Interval* tmp = fHead;
+ fHead = nullptr;
+ fTail = nullptr;
+ return tmp;
+}
+
+// 'surface' can be reused. Add it back to the free pool.
+void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
+ const GrScratchKey &key = surface->resourcePriv().getScratchKey();
+
+ if (!key.isValid()) {
+ return; // can't do it w/o a valid scratch key
+ }
+
+ if (surface->getUniqueKey().isValid()) {
+ // If the surface has a unique key we throw it back into the resource cache.
+ // If things get really tight 'findSurfaceFor' may pull it back out but there is
+ // no need to have it in tight rotation.
+ return;
+ }
+
+#if GR_ALLOCATION_SPEW
+ SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
+#endif
+ // TODO: fix this insertion so we get a more LRU-ish behavior
+ fFreePool.insert(key, surface.release());
+}
+
+// First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
+// If we can't find a useable one, create a new one.
+sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy,
+ int minStencilSampleCount) {
+
+ if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
+ // First try to reattach to a cached version if the proxy is uniquely keyed
+ sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
+ proxy->asTextureProxy()->getUniqueKey());
+ if (surface) {
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
+ minStencilSampleCount)) {
+ return nullptr;
+ }
+
+ return surface;
+ }
+ }
+
+ // First look in the free pool
+ GrScratchKey key;
+
+ proxy->priv().computeScratchKey(&key);
+
+ auto filter = [] (const GrSurface* s) {
+ return true;
+ };
+ sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
+ if (surface) {
+ if (SkBudgeted::kYes == proxy->isBudgeted() &&
+ GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
+ // This gets the job done but isn't quite correct. It would be better to try to
+ // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
+ surface->resourcePriv().makeBudgeted();
+ }
+
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
+ minStencilSampleCount)) {
+ return nullptr;
+ }
+ SkASSERT(!surface->getUniqueKey().isValid());
+ return surface;
+ }
+
+ // Failing that, try to grab a new one from the resource cache
+ return proxy->priv().createSurface(fResourceProvider);
+}
+
+// Remove any intervals that end before the current index. Return their GrSurfaces
+// to the free pool if possible.
+void GrResourceAllocator::expire(unsigned int curIndex) {
+ while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
+ Interval* temp = fActiveIntvls.popHead();
+ SkASSERT(!temp->next());
+
+ if (temp->wasAssignedSurface()) {
+ sk_sp<GrSurface> surface = temp->detachSurface();
+
+ if (temp->isRecyclable()) {
+ this->recycleSurface(std::move(surface));
+ }
+ }
+
+ // Add temp to the free interval list so it can be reused
+ SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
+ temp->setNext(fFreeIntervalList);
+ fFreeIntervalList = temp;
+ }
+}
+
+bool GrResourceAllocator::onOpsTaskBoundary() const {
+ if (fIntvlList.empty()) {
+ SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
+ // Although technically on an opsTask boundary there is no need to force an
+ // intermediate flush here
+ return false;
+ }
+
+ const Interval* tmp = fIntvlList.peekHead();
+ return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
+}
+
+void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
+ *stopIndex = fCurOpsTaskIndex+1;
+
+ // This is interrupting the allocation of resources for this flush. We need to
+ // proactively clear the active interval list of any intervals that aren't
+ // guaranteed to survive the partial flush lest they become zombies (i.e.,
+ // holding a deleted surface proxy).
+ const Interval* tmp = fIntvlList.peekHead();
+ SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
+
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
+
+ this->expire(tmp->start());
+}
+
+bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
+ SkASSERT(outError);
+ *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
+ : AssignError::kNoError;
+
+ SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
+
+ fIntvlHash.reset(); // we don't need the interval hash anymore
+
+ if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
+ return false; // nothing to render
+ }
+
+ *startIndex = fCurOpsTaskIndex;
+ *stopIndex = fEndOfOpsTaskOpIndices.count();
+
+ if (fIntvlList.empty()) {
+ fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
+ return true; // no resources to assign
+ }
+
+#if GR_ALLOCATION_SPEW
+ SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
+ *startIndex, *stopIndex, fNumOpsTasks);
+ SkDebugf("EndOfOpsTaskIndices: ");
+ for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
+ SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
+ }
+ SkDebugf("\n");
+#endif
+
+ SkDEBUGCODE(fAssigned = true;)
+
+#if GR_ALLOCATION_SPEW
+ this->dumpIntervals();
+#endif
+ while (Interval* cur = fIntvlList.popHead()) {
+ while (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
+ }
+
+ this->expire(cur->start());
+
+ int minStencilSampleCount = (cur->proxy()->asRenderTargetProxy())
+ ? cur->proxy()->asRenderTargetProxy()->numStencilSamples()
+ : 0;
+
+ if (cur->proxy()->isInstantiated()) {
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
+ fResourceProvider, cur->proxy()->peekSurface(), minStencilSampleCount)) {
+ *outError = AssignError::kFailedProxyInstantiation;
+ }
+
+ fActiveIntvls.insertByIncreasingEnd(cur);
+
+ if (fResourceProvider->overBudget()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
+ this->forceIntermediateFlush(stopIndex);
+ return true;
+ }
+ }
+
+ continue;
+ }
+
+ if (cur->proxy()->isLazy()) {
+ if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
+ *outError = AssignError::kFailedProxyInstantiation;
+ }
+ } else if (sk_sp<GrSurface> surface =
+ this->findSurfaceFor(cur->proxy(), minStencilSampleCount)) {
+ // TODO: make getUniqueKey virtual on GrSurfaceProxy
+ GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
+
+ if (texProxy && texProxy->getUniqueKey().isValid()) {
+ if (!surface->getUniqueKey().isValid()) {
+ fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
+ surface.get());
+ }
+ SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
+ }
+
+#if GR_ALLOCATION_SPEW
+ SkDebugf("Assigning %d to %d\n",
+ surface->uniqueID().asUInt(),
+ cur->proxy()->uniqueID().asUInt());
+#endif
+
+ cur->assign(std::move(surface));
+ } else {
+ SkASSERT(!cur->proxy()->isInstantiated());
+ *outError = AssignError::kFailedProxyInstantiation;
+ }
+
+ fActiveIntvls.insertByIncreasingEnd(cur);
+
+ if (fResourceProvider->overBudget()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
+ this->forceIntermediateFlush(stopIndex);
+ return true;
+ }
+ }
+ }
+
+ // expire all the remaining intervals to drain the active interval list
+ this->expire(std::numeric_limits<unsigned int>::max());
+ return true;
+}
+
+#if GR_ALLOCATION_SPEW
+void GrResourceAllocator::dumpIntervals() {
+ // Print all the intervals while computing their range
+ SkDebugf("------------------------------------------------------------\n");
+ unsigned int min = std::numeric_limits<unsigned int>::max();
+ unsigned int max = 0;
+ for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d\n",
+ cur->proxy()->uniqueID().asUInt(),
+ cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
+ cur->start(),
+ cur->end(),
+ cur->proxy()->priv().getProxyRefCnt(),
+ cur->proxy()->testingOnly_getBackingRefCnt());
+ min = SkTMin(min, cur->start());
+ max = SkTMax(max, cur->end());
+ }
+
+ // Draw a graph of the useage intervals
+ for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ SkDebugf("{ %3d,%3d }: ",
+ cur->proxy()->uniqueID().asUInt(),
+ cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
+ for (unsigned int i = min; i <= max; ++i) {
+ if (i >= cur->start() && i <= cur->end()) {
+ SkDebugf("x");
+ } else {
+ SkDebugf(" ");
+ }
+ }
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceAllocator.h b/gfx/skia/skia/src/gpu/GrResourceAllocator.h
new file mode 100644
index 0000000000..6108b54537
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceAllocator.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceAllocator_DEFINED
+#define GrResourceAllocator_DEFINED
+
+#include "include/gpu/GrSurface.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrSurfaceProxy.h"
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkTMultiMap.h"
+
+class GrResourceProvider;
+
+// Print out explicit allocation information
+#define GR_ALLOCATION_SPEW 0
+
+// Print out information about interval creation
+#define GR_TRACK_INTERVAL_CREATION 0
+
+/*
+ * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
+ * being given the usage intervals of the various proxies. It keeps these intervals in a singly
+ * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
+ * to interval to find proxy reuse). When it comes time to allocate the resources it
+ * traverses the sorted list and:
+ * removes intervals from the active list that have completed (returning their GrSurfaces
+ * to the free pool)
+
+ * allocates a new resource (preferably from the free pool) for the new interval
+ * adds the new interval to the active list (that is sorted by increasing end index)
+ *
+ * Note: the op indices (used in the usage intervals) come from the order of the ops in
+ * their opsTasks after the opsTask DAG has been linearized.
+ *
+ *************************************************************************************************
+ * How does instantiation failure handling work when explicitly allocating?
+ *
+ * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
+ * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
+ *
+ * The allocator will churn through this list but could fail anywhere.
+ *
+ * Allocation failure handling occurs at two levels:
+ *
+ * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
+ *
+ * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
+ * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
+ *
+ * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
+ * individual ops when something goes wrong in allocation (i.e., when the return code from
+ * GrResourceAllocator::assign is bad)
+ *
+ * All together this means we should never attempt to draw an op which is missing some
+ * required GrSurface.
+ *
+ * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass.
+ * If any of the promise images fail at this stage then the allocator is set into an error
+ * state and all allocations are then scanned for failures during the main allocation pass.
+ */
+class GrResourceAllocator {
+public:
+ GrResourceAllocator(GrResourceProvider* resourceProvider SkDEBUGCODE(, int numOpsTasks))
+ : fResourceProvider(resourceProvider) SkDEBUGCODE(, fNumOpsTasks(numOpsTasks)) {}
+
+ ~GrResourceAllocator();
+
+ unsigned int curOp() const { return fNumOps; }
+ void incOps() { fNumOps++; }
+
+ /** Indicates whether a given call to addInterval represents an actual usage of the
+ * provided proxy. This is mainly here to accomodate deferred proxies attached to opsTasks.
+ * In that case we need to create an extra long interval for them (due to the upload) but
+ * don't want to count that usage/reference towards the proxy's recyclability.
+ */
+ enum class ActualUse : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
+ // If an existing interval already exists it will be expanded to include the new range.
+ void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse
+ SkDEBUGCODE(, bool isDirectDstRead = false));
+
+ enum class AssignError {
+ kNoError,
+ kFailedProxyInstantiation
+ };
+
+ // Returns true when the opsTasks from 'startIndex' to 'stopIndex' should be executed;
+ // false when nothing remains to be executed.
+ // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation.
+ // If this happens, the caller should remove all ops which reference an uninstantiated proxy.
+ // This is used to execute a portion of the queued opsTasks in order to reduce the total
+ // amount of GPU resources required.
+ bool assign(int* startIndex, int* stopIndex, AssignError* outError);
+
+ void determineRecyclability();
+ void markEndOfOpsTask(int opsTaskIndex);
+
+#if GR_ALLOCATION_SPEW
+ void dumpIntervals();
+#endif
+
+private:
+ class Interval;
+
+ // Remove dead intervals from the active list
+ void expire(unsigned int curIndex);
+
+ bool onOpsTaskBoundary() const;
+ void forceIntermediateFlush(int* stopIndex);
+
+ // These two methods wrap the interactions with the free pool
+ void recycleSurface(sk_sp<GrSurface> surface);
+ sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, int minStencilSampleCount);
+
+ struct FreePoolTraits {
+ static const GrScratchKey& GetKey(const GrSurface& s) {
+ return s.resourcePriv().getScratchKey();
+ }
+
+ static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
+ static void OnFree(GrSurface* s) { s->unref(); }
+ };
+ typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
+
+ typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
+
+ class Interval {
+ public:
+ Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
+ : fProxy(proxy)
+ , fProxyID(proxy->uniqueID().asUInt())
+ , fStart(start)
+ , fEnd(end)
+ , fNext(nullptr) {
+ SkASSERT(proxy);
+#if GR_TRACK_INTERVAL_CREATION
+ fUniqueID = CreateUniqueID();
+ SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
+ fUniqueID, proxy->uniqueID().asUInt(), start, end);
+#endif
+ }
+
+ // Used when recycling an interval
+ void resetTo(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) {
+ SkASSERT(proxy);
+ SkASSERT(!fProxy && !fNext);
+
+ fUses = 0;
+ fProxy = proxy;
+ fProxyID = proxy->uniqueID().asUInt();
+ fStart = start;
+ fEnd = end;
+ fNext = nullptr;
+#if GR_TRACK_INTERVAL_CREATION
+ fUniqueID = CreateUniqueID();
+ SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
+ fUniqueID, proxy->uniqueID().asUInt(), start, end);
+#endif
+ }
+
+ ~Interval() {
+ SkASSERT(!fAssignedSurface);
+ }
+
+ const GrSurfaceProxy* proxy() const { return fProxy; }
+ GrSurfaceProxy* proxy() { return fProxy; }
+
+ unsigned int start() const { return fStart; }
+ unsigned int end() const { return fEnd; }
+
+ void setNext(Interval* next) { fNext = next; }
+ const Interval* next() const { return fNext; }
+ Interval* next() { return fNext; }
+
+ void markAsRecyclable() { fIsRecyclable = true;}
+ bool isRecyclable() const { return fIsRecyclable; }
+
+ void addUse() { fUses++; }
+ int uses() { return fUses; }
+
+ void extendEnd(unsigned int newEnd) {
+ if (newEnd > fEnd) {
+ fEnd = newEnd;
+#if GR_TRACK_INTERVAL_CREATION
+ SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
+#endif
+ }
+ }
+
+ void assign(sk_sp<GrSurface>);
+ bool wasAssignedSurface() const { return fAssignedSurface != nullptr; }
+ sk_sp<GrSurface> detachSurface() { return std::move(fAssignedSurface); }
+
+ // for SkTDynamicHash
+ static const uint32_t& GetKey(const Interval& intvl) {
+ return intvl.fProxyID;
+ }
+ static uint32_t Hash(const uint32_t& key) { return key; }
+
+ private:
+ sk_sp<GrSurface> fAssignedSurface;
+ GrSurfaceProxy* fProxy;
+ uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key
+ unsigned int fStart;
+ unsigned int fEnd;
+ Interval* fNext;
+ unsigned int fUses = 0;
+ bool fIsRecyclable = false;
+
+#if GR_TRACK_INTERVAL_CREATION
+ uint32_t fUniqueID;
+
+ uint32_t CreateUniqueID();
+#endif
+ };
+
+ class IntervalList {
+ public:
+ IntervalList() = default;
+ ~IntervalList() {
+ // The only time we delete an IntervalList is in the GrResourceAllocator dtor.
+ // Since the arena allocator will clean up for us we don't bother here.
+ }
+
+ bool empty() const {
+ SkASSERT(SkToBool(fHead) == SkToBool(fTail));
+ return !SkToBool(fHead);
+ }
+ const Interval* peekHead() const { return fHead; }
+ Interval* peekHead() { return fHead; }
+ Interval* popHead();
+ void insertByIncreasingStart(Interval*);
+ void insertByIncreasingEnd(Interval*);
+ Interval* detachAll();
+
+ private:
+ SkDEBUGCODE(void validate() const;)
+
+ Interval* fHead = nullptr;
+ Interval* fTail = nullptr;
+ };
+
+ // Compositing use cases can create > 80 intervals.
+ static const int kInitialArenaSize = 128 * sizeof(Interval);
+
+ GrResourceProvider* fResourceProvider;
+ FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
+ IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
+
+ IntervalList fIntvlList; // All the intervals sorted by increasing start
+ IntervalList fActiveIntvls; // List of live intervals during assignment
+ // (sorted by increasing end)
+ unsigned int fNumOps = 0;
+ SkTArray<unsigned int> fEndOfOpsTaskOpIndices;
+ int fCurOpsTaskIndex = 0;
+ SkDEBUGCODE(const int fNumOpsTasks = -1;)
+
+ SkDEBUGCODE(bool fAssigned = false;)
+
+ char fStorage[kInitialArenaSize];
+ SkArenaAlloc fIntervalAllocator{fStorage, kInitialArenaSize, kInitialArenaSize};
+ Interval* fFreeIntervalList = nullptr;
+ bool fLazyInstantiationError = false;
+};
+
+#endif // GrResourceAllocator_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrResourceCache.cpp b/gfx/skia/skia/src/gpu/GrResourceCache.cpp
new file mode 100644
index 0000000000..6d9a961886
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceCache.cpp
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrResourceCache.h"
+#include <atomic>
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrSingleOwner.h"
+#include "include/private/SkTo.h"
+#include "include/utils/SkRandom.h"
+#include "src/core/SkExchange.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkScopeExit.h"
+#include "src/core/SkTSort.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpuResourceCacheAccess.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrTextureProxyCacheAccess.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/SkGr.h"
+
+DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
+
+DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage);
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
+ static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
+
+ int32_t type = nextType++;
+ if (type > SkTo<int32_t>(UINT16_MAX)) {
+ SK_ABORT("Too many Resource Types");
+ }
+
+ return static_cast<ResourceType>(type);
+}
+
+GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
+ static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
+
+ int32_t domain = nextDomain++;
+ if (domain > SkTo<int32_t>(UINT16_MAX)) {
+ SK_ABORT("Too many GrUniqueKey Domains");
+ }
+
+ return static_cast<Domain>(domain);
+}
+
+uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
+ return SkOpts::hash(data, size);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrResourceCache::AutoValidate : ::SkNoncopyable {
+public:
+ AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
+ ~AutoValidate() { fCache->validate(); }
+private:
+ GrResourceCache* fCache;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
+
+inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
+ : fTexture(texture), fNumUnrefs(1) {}
+
+inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
+ fTexture = skstd::exchange(that.fTexture, nullptr);
+ fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0);
+}
+
+inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
+ TextureAwaitingUnref&& that) {
+ fTexture = skstd::exchange(that.fTexture, nullptr);
+ fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0);
+ return *this;
+}
+
+inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
+ if (fTexture) {
+ for (int i = 0; i < fNumUnrefs; ++i) {
+ fTexture->unref();
+ }
+ }
+}
+
+inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
+
+inline void GrResourceCache::TextureAwaitingUnref::unref() {
+ SkASSERT(fNumUnrefs > 0);
+ fTexture->unref();
+ --fNumUnrefs;
+}
+
+inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner,
+ uint32_t contextUniqueID)
+ : fInvalidUniqueKeyInbox(contextUniqueID)
+ , fFreedTextureInbox(contextUniqueID)
+ , fContextUniqueID(contextUniqueID)
+ , fSingleOwner(singleOwner)
+ , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
+ SkASSERT(contextUniqueID != SK_InvalidUniqueID);
+}
+
+GrResourceCache::~GrResourceCache() {
+ this->releaseAll();
+}
+
+void GrResourceCache::setLimit(size_t bytes) {
+ fMaxBytes = bytes;
+ this->purgeAsNeeded();
+}
+
+void GrResourceCache::insertResource(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource);
+ SkASSERT(!this->isInCache(resource));
+ SkASSERT(!resource->wasDestroyed());
+ SkASSERT(!resource->resourcePriv().isPurgeable());
+
+ // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
+ // up iterating over all the resources that already have timestamps.
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+
+ this->addToNonpurgeableArray(resource);
+
+ size_t size = resource->gpuMemorySize();
+ SkDEBUGCODE(++fCount;)
+ fBytes += size;
+#if GR_CACHE_STATS
+ fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
+ fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
+#endif
+ if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
+ ++fBudgetedCount;
+ fBudgetedBytes += size;
+ TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+#if GR_CACHE_STATS
+ fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
+ fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
+#endif
+ }
+ if (resource->resourcePriv().getScratchKey().isValid() &&
+ !resource->getUniqueKey().isValid()) {
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
+ }
+
+ this->purgeAsNeeded();
+}
+
+void GrResourceCache::removeResource(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ this->validate();
+ SkASSERT(this->isInCache(resource));
+
+ size_t size = resource->gpuMemorySize();
+ if (resource->resourcePriv().isPurgeable()) {
+ fPurgeableQueue.remove(resource);
+ fPurgeableBytes -= size;
+ } else {
+ this->removeFromNonpurgeableArray(resource);
+ }
+
+ SkDEBUGCODE(--fCount;)
+ fBytes -= size;
+ if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
+ --fBudgetedCount;
+ fBudgetedBytes -= size;
+ TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+ }
+
+ if (resource->resourcePriv().getScratchKey().isValid() &&
+ !resource->getUniqueKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+ if (resource->getUniqueKey().isValid()) {
+ fUniqueHash.remove(resource->getUniqueKey());
+ }
+ this->validate();
+}
+
+void GrResourceCache::abandonAll() {
+ AutoValidate av(this);
+
+ // We need to make sure to free any resources that were waiting on a free message but never
+ // received one.
+ fTexturesAwaitingUnref.reset();
+
+ while (fNonpurgeableResources.count()) {
+ GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(!back->wasDestroyed());
+ back->cacheAccess().abandon();
+ }
+
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* top = fPurgeableQueue.peek();
+ SkASSERT(!top->wasDestroyed());
+ top->cacheAccess().abandon();
+ }
+
+ SkASSERT(!fScratchMap.count());
+ SkASSERT(!fUniqueHash.count());
+ SkASSERT(!fCount);
+ SkASSERT(!this->getResourceCount());
+ SkASSERT(!fBytes);
+ SkASSERT(!fBudgetedCount);
+ SkASSERT(!fBudgetedBytes);
+ SkASSERT(!fPurgeableBytes);
+ SkASSERT(!fTexturesAwaitingUnref.count());
+}
+
+void GrResourceCache::releaseAll() {
+ AutoValidate av(this);
+
+ this->processFreedGpuResources();
+
+ // We need to make sure to free any resources that were waiting on a free message but never
+ // received one.
+ fTexturesAwaitingUnref.reset();
+
+ SkASSERT(fProxyProvider); // better have called setProxyProvider
+ // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
+ // they also have a raw pointer back to this class (which is presumably going away)!
+ fProxyProvider->removeAllUniqueKeys();
+
+ while (fNonpurgeableResources.count()) {
+ GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(!back->wasDestroyed());
+ back->cacheAccess().release();
+ }
+
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* top = fPurgeableQueue.peek();
+ SkASSERT(!top->wasDestroyed());
+ top->cacheAccess().release();
+ }
+
+ SkASSERT(!fScratchMap.count());
+ SkASSERT(!fUniqueHash.count());
+ SkASSERT(!fCount);
+ SkASSERT(!this->getResourceCount());
+ SkASSERT(!fBytes);
+ SkASSERT(!fBudgetedCount);
+ SkASSERT(!fBudgetedBytes);
+ SkASSERT(!fPurgeableBytes);
+ SkASSERT(!fTexturesAwaitingUnref.count());
+}
+
+void GrResourceCache::refResource(GrGpuResource* resource) {
+ SkASSERT(resource);
+ SkASSERT(resource->getContext()->priv().getResourceCache() == this);
+ if (resource->cacheAccess().hasRef()) {
+ resource->ref();
+ } else {
+ this->refAndMakeResourceMRU(resource);
+ }
+ this->validate();
+}
+
+class GrResourceCache::AvailableForScratchUse {
+public:
+ AvailableForScratchUse() { }
+
+ bool operator()(const GrGpuResource* resource) const {
+ SkASSERT(!resource->getUniqueKey().isValid() &&
+ resource->resourcePriv().getScratchKey().isValid());
+
+ // isScratch() also tests that the resource is budgeted.
+ if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
+ return false;
+ }
+ return true;
+ }
+};
+
+GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
+ SkASSERT(scratchKey.isValid());
+
+ GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
+ if (resource) {
+ this->refAndMakeResourceMRU(resource);
+ this->validate();
+ }
+ return resource;
+}
+
+void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource->resourcePriv().getScratchKey().isValid());
+ if (!resource->getUniqueKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+}
+
+void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ // Someone has a ref to this resource in order to have removed the key. When the ref count
+ // reaches zero we will get a ref cnt notification and figure out what to do with it.
+ if (resource->getUniqueKey().isValid()) {
+ SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
+ fUniqueHash.remove(resource->getUniqueKey());
+ }
+ resource->cacheAccess().removeUniqueKey();
+ if (resource->resourcePriv().getScratchKey().isValid()) {
+ fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
+ }
+
+ // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
+ // require purging. However, the resource must be ref'ed to get here and therefore can't
+ // be purgeable. We'll purge it when the refs reach zero.
+ SkASSERT(!resource->resourcePriv().isPurgeable());
+ this->validate();
+}
+
+void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ // If another resource has the new key, remove its key then install the key on this resource.
+ if (newKey.isValid()) {
+ if (GrGpuResource* old = fUniqueHash.find(newKey)) {
+ // If the old resource using the key is purgeable and is unreachable, then remove it.
+ if (!old->resourcePriv().getScratchKey().isValid() &&
+ old->resourcePriv().isPurgeable()) {
+ old->cacheAccess().release();
+ } else {
+ // removeUniqueKey expects an external owner of the resource.
+ this->removeUniqueKey(sk_ref_sp(old).get());
+ }
+ }
+ SkASSERT(nullptr == fUniqueHash.find(newKey));
+
+ // Remove the entry for this resource if it already has a unique key.
+ if (resource->getUniqueKey().isValid()) {
+ SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
+ fUniqueHash.remove(resource->getUniqueKey());
+ SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
+ } else {
+ // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
+ // from the ScratchMap
+ if (resource->resourcePriv().getScratchKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+ }
+
+ resource->cacheAccess().setUniqueKey(newKey);
+ fUniqueHash.add(resource);
+ } else {
+ this->removeUniqueKey(resource);
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ if (resource->resourcePriv().isPurgeable()) {
+ // It's about to become unpurgeable.
+ fPurgeableBytes -= resource->gpuMemorySize();
+ fPurgeableQueue.remove(resource);
+ this->addToNonpurgeableArray(resource);
+ } else if (!resource->cacheAccess().hasRef() &&
+ resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
+ SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
+ fNumBudgetedResourcesFlushWillMakePurgeable--;
+ }
+ resource->cacheAccess().ref();
+
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+ this->validate();
+}
+
+void GrResourceCache::notifyRefCntReachedZero(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource);
+ SkASSERT(!resource->wasDestroyed());
+ SkASSERT(this->isInCache(resource));
+ // This resource should always be in the nonpurgeable array when this function is called. It
+ // will be moved to the queue if it is newly purgeable.
+ SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
+
+#ifdef SK_DEBUG
+ // When the timestamp overflows validate() is called. validate() checks that resources in
+ // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
+ // the purgeable queue happens just below in this function. So we mark it as an exception.
+ if (resource->resourcePriv().isPurgeable()) {
+ fNewlyPurgeableResourceForValidation = resource;
+ }
+#endif
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+ SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
+
+ if (!resource->resourcePriv().isPurgeable() &&
+ resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
+ ++fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
+
+ if (!resource->resourcePriv().isPurgeable()) {
+ this->validate();
+ return;
+ }
+
+ this->removeFromNonpurgeableArray(resource);
+ fPurgeableQueue.insert(resource);
+ resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
+ fPurgeableBytes += resource->gpuMemorySize();
+
+ bool hasUniqueKey = resource->getUniqueKey().isValid();
+
+ GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
+
+ if (budgetedType == GrBudgetedType::kBudgeted) {
+ // Purge the resource immediately if we're over budget
+ // Also purge if the resource has neither a valid scratch key nor a unique key.
+ bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
+ if (!this->overBudget() && hasKey) {
+ return;
+ }
+ } else {
+ // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
+ // they can be reused again by the image connected to the unique key.
+ if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
+ return;
+ }
+ // Check whether this resource could still be used as a scratch resource.
+ if (!resource->resourcePriv().refsWrappedObjects() &&
+ resource->resourcePriv().getScratchKey().isValid()) {
+ // We won't purge an existing resource to make room for this one.
+ if (this->wouldFit(resource->gpuMemorySize())) {
+ resource->resourcePriv().makeBudgeted();
+ return;
+ }
+ }
+ }
+
+ SkDEBUGCODE(int beforeCount = this->getResourceCount();)
+ resource->cacheAccess().release();
+ // We should at least free this resource, perhaps dependent resources as well.
+ SkASSERT(this->getResourceCount() < beforeCount);
+ this->validate();
+}
+
+void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ size_t size = resource->gpuMemorySize();
+ // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
+ // resource become purgeable. However, we should never allow that transition. Wrapped
+ // resources are the only resources that can be in that state and they aren't allowed to
+ // transition from one budgeted state to another.
+ SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
+ if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
+ ++fBudgetedCount;
+ fBudgetedBytes += size;
+#if GR_CACHE_STATS
+ fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
+ fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
+#endif
+ if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
+ ++fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
+ this->purgeAsNeeded();
+ } else {
+ SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
+ --fBudgetedCount;
+ fBudgetedBytes -= size;
+ if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
+ --fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
+ }
+ SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
+ TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+
+ this->validate();
+}
+
+void GrResourceCache::purgeAsNeeded() {
+ SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
+ fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
+ if (invalidKeyMsgs.count()) {
+ SkASSERT(fProxyProvider);
+
+ for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
+ fProxyProvider->processInvalidUniqueKey(invalidKeyMsgs[i].key(), nullptr,
+ GrProxyProvider::InvalidateGPUResource::kYes);
+ SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
+ }
+ }
+
+ this->processFreedGpuResources();
+
+ bool stillOverbudget = this->overBudget();
+ while (stillOverbudget && fPurgeableQueue.count()) {
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->resourcePriv().isPurgeable());
+ resource->cacheAccess().release();
+ stillOverbudget = this->overBudget();
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) {
+ if (!scratchResourcesOnly) {
+ // We could disable maintaining the heap property here, but it would add a lot of
+ // complexity. Moreover, this is rarely called.
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->resourcePriv().isPurgeable());
+ resource->cacheAccess().release();
+ }
+ } else {
+ // Sort the queue
+ fPurgeableQueue.sort();
+
+ // Make a list of the scratch resources to delete
+ SkTDArray<GrGpuResource*> scratchResources;
+ for (int i = 0; i < fPurgeableQueue.count(); i++) {
+ GrGpuResource* resource = fPurgeableQueue.at(i);
+ SkASSERT(resource->resourcePriv().isPurgeable());
+ if (!resource->getUniqueKey().isValid()) {
+ *scratchResources.append() = resource;
+ }
+ }
+
+ // Delete the scratch resources. This must be done as a separate pass
+ // to avoid messing up the sorted order of the queue
+ for (int i = 0; i < scratchResources.count(); i++) {
+ scratchResources.getAt(i)->cacheAccess().release();
+ }
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
+ while (fPurgeableQueue.count()) {
+ const GrStdSteadyClock::time_point resourceTime =
+ fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
+ if (resourceTime >= purgeTime) {
+ // Resources were given both LRU timestamps and tagged with a frame number when
+ // they first became purgeable. The LRU timestamp won't change again until the
+ // resource is made non-purgeable again. So, at this point all the remaining
+ // resources in the timestamp-sorted queue will have a frame number >= to this
+ // one.
+ break;
+ }
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->resourcePriv().isPurgeable());
+ resource->cacheAccess().release();
+ }
+}
+
+void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
+
+ const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
+ bool stillOverbudget = tmpByteBudget < fBytes;
+
+ if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
+ // Sort the queue
+ fPurgeableQueue.sort();
+
+ // Make a list of the scratch resources to delete
+ SkTDArray<GrGpuResource*> scratchResources;
+ size_t scratchByteCount = 0;
+ for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
+ GrGpuResource* resource = fPurgeableQueue.at(i);
+ SkASSERT(resource->resourcePriv().isPurgeable());
+ if (!resource->getUniqueKey().isValid()) {
+ *scratchResources.append() = resource;
+ scratchByteCount += resource->gpuMemorySize();
+ stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
+ }
+ }
+
+ // Delete the scratch resources. This must be done as a separate pass
+ // to avoid messing up the sorted order of the queue
+ for (int i = 0; i < scratchResources.count(); i++) {
+ scratchResources.getAt(i)->cacheAccess().release();
+ }
+ stillOverbudget = tmpByteBudget < fBytes;
+
+ this->validate();
+ }
+
+ // Purge any remaining resources in LRU order
+ if (stillOverbudget) {
+ const size_t cachedByteCount = fMaxBytes;
+ fMaxBytes = tmpByteBudget;
+ this->purgeAsNeeded();
+ fMaxBytes = cachedByteCount;
+ }
+}
+bool GrResourceCache::requestsFlush() const {
+ return this->overBudget() && !fPurgeableQueue.count() &&
+ fNumBudgetedResourcesFlushWillMakePurgeable > 0;
+}
+
+
+void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
+ texture->ref();
+ uint32_t id = texture->uniqueID().asUInt();
+ if (auto* data = fTexturesAwaitingUnref.find(id)) {
+ data->addRef();
+ } else {
+ fTexturesAwaitingUnref.set(id, {texture});
+ }
+}
+
+void GrResourceCache::processFreedGpuResources() {
+ if (!fTexturesAwaitingUnref.count()) {
+ return;
+ }
+
+ SkTArray<GrTextureFreedMessage> msgs;
+ fFreedTextureInbox.poll(&msgs);
+ for (int i = 0; i < msgs.count(); ++i) {
+ SkASSERT(msgs[i].fOwningUniqueID == fContextUniqueID);
+ uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
+ TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
+ // If we called release or abandon on the GrContext we will have already released our ref on
+ // the GrGpuResource. If then the message arrives before the actual GrContext gets destroyed
+ // we will try to process the message when we destroy the GrContext. This protects us from
+ // trying to unref the resource twice.
+ if (info) {
+ info->unref();
+ if (info->finished()) {
+ fTexturesAwaitingUnref.remove(id);
+ }
+ }
+ }
+}
+
+void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
+ int index = fNonpurgeableResources.count();
+ *fNonpurgeableResources.append() = resource;
+ *resource->cacheAccess().accessCacheIndex() = index;
+}
+
+void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
+ int* index = resource->cacheAccess().accessCacheIndex();
+ // Fill the whole we will create in the array with the tail object, adjust its index, and
+ // then pop the array
+ GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(fNonpurgeableResources[*index] == resource);
+ fNonpurgeableResources[*index] = tail;
+ *tail->cacheAccess().accessCacheIndex() = *index;
+ fNonpurgeableResources.pop();
+ SkDEBUGCODE(*index = -1);
+}
+
+uint32_t GrResourceCache::getNextTimestamp() {
+ // If we wrap then all the existing resources will appear older than any resources that get
+ // a timestamp after the wrap.
+ if (0 == fTimestamp) {
+ int count = this->getResourceCount();
+ if (count) {
+ // Reset all the timestamps. We sort the resources by timestamp and then assign
+ // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
+ // rare.
+ SkTDArray<GrGpuResource*> sortedPurgeableResources;
+ sortedPurgeableResources.setReserve(fPurgeableQueue.count());
+
+ while (fPurgeableQueue.count()) {
+ *sortedPurgeableResources.append() = fPurgeableQueue.peek();
+ fPurgeableQueue.pop();
+ }
+
+ SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
+ CompareTimestamp);
+
+ // Pick resources out of the purgeable and non-purgeable arrays based on lowest
+ // timestamp and assign new timestamps.
+ int currP = 0;
+ int currNP = 0;
+ while (currP < sortedPurgeableResources.count() &&
+ currNP < fNonpurgeableResources.count()) {
+ uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
+ uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
+ SkASSERT(tsP != tsNP);
+ if (tsP < tsNP) {
+ sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
+ } else {
+ // Correct the index in the nonpurgeable array stored on the resource post-sort.
+ *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
+ fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+ }
+
+ // The above loop ended when we hit the end of one array. Finish the other one.
+ while (currP < sortedPurgeableResources.count()) {
+ sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+ while (currNP < fNonpurgeableResources.count()) {
+ *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
+ fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+
+ // Rebuild the queue.
+ for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
+ fPurgeableQueue.insert(sortedPurgeableResources[i]);
+ }
+
+ this->validate();
+ SkASSERT(count == this->getResourceCount());
+
+ // count should be the next timestamp we return.
+ SkASSERT(fTimestamp == SkToU32(count));
+ }
+ }
+ return fTimestamp++;
+}
+
+void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
+ fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
+ }
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
+ }
+}
+
+#if GR_CACHE_STATS
+void GrResourceCache::getStats(Stats* stats) const {
+ stats->reset();
+
+ stats->fTotal = this->getResourceCount();
+ stats->fNumNonPurgeable = fNonpurgeableResources.count();
+ stats->fNumPurgeable = fPurgeableQueue.count();
+
+ for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
+ stats->update(fNonpurgeableResources[i]);
+ }
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ stats->update(fPurgeableQueue.at(i));
+ }
+}
+
+#if GR_TEST_UTILS
+void GrResourceCache::dumpStats(SkString* out) const {
+ this->validate();
+
+ Stats stats;
+
+ this->getStats(&stats);
+
+ float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
+
+ out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
+ out->appendf("\t\tEntry Count: current %d"
+ " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
+ stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
+ stats.fScratch, fHighWaterCount);
+ out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
+ SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
+ SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
+}
+
+void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
+ SkTArray<double>* values) const {
+ this->validate();
+
+ Stats stats;
+ this->getStats(&stats);
+
+ keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
+}
+#endif
+
+#endif
+
+#ifdef SK_DEBUG
+void GrResourceCache::validate() const {
+ // Reduce the frequency of validations for large resource counts.
+ static SkRandom gRandom;
+ int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
+ if (~mask && (gRandom.nextU() & mask)) {
+ return;
+ }
+
+ struct Stats {
+ size_t fBytes;
+ int fBudgetedCount;
+ size_t fBudgetedBytes;
+ int fLocked;
+ int fScratch;
+ int fCouldBeScratch;
+ int fContent;
+ const ScratchMap* fScratchMap;
+ const UniqueHash* fUniqueHash;
+
+ Stats(const GrResourceCache* cache) {
+ memset(this, 0, sizeof(*this));
+ fScratchMap = &cache->fScratchMap;
+ fUniqueHash = &cache->fUniqueHash;
+ }
+
+ void update(GrGpuResource* resource) {
+ fBytes += resource->gpuMemorySize();
+
+ if (!resource->resourcePriv().isPurgeable()) {
+ ++fLocked;
+ }
+
+ const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
+ const GrUniqueKey& uniqueKey = resource->getUniqueKey();
+
+ if (resource->cacheAccess().isScratch()) {
+ SkASSERT(!uniqueKey.isValid());
+ ++fScratch;
+ SkASSERT(fScratchMap->countForKey(scratchKey));
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ } else if (scratchKey.isValid()) {
+ SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
+ uniqueKey.isValid());
+ if (!uniqueKey.isValid()) {
+ ++fCouldBeScratch;
+ SkASSERT(fScratchMap->countForKey(scratchKey));
+ }
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ }
+ if (uniqueKey.isValid()) {
+ ++fContent;
+ SkASSERT(fUniqueHash->find(uniqueKey) == resource);
+ SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
+ resource->resourcePriv().refsWrappedObjects());
+
+ if (scratchKey.isValid()) {
+ SkASSERT(!fScratchMap->has(resource, scratchKey));
+ }
+ }
+
+ if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
+ ++fBudgetedCount;
+ fBudgetedBytes += resource->gpuMemorySize();
+ }
+ }
+ };
+
+ {
+ ScratchMap::ConstIter iter(&fScratchMap);
+
+ int count = 0;
+ for ( ; !iter.done(); ++iter) {
+ const GrGpuResource* resource = *iter;
+ SkASSERT(resource->resourcePriv().getScratchKey().isValid());
+ SkASSERT(!resource->getUniqueKey().isValid());
+ count++;
+ }
+ SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
+ }
+
+ Stats stats(this);
+ size_t purgeableBytes = 0;
+ int numBudgetedResourcesFlushWillMakePurgeable = 0;
+
+ for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
+ SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
+ fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
+ SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
+ SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
+ if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
+ !fNonpurgeableResources[i]->cacheAccess().hasRef() &&
+ fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
+ ++numBudgetedResourcesFlushWillMakePurgeable;
+ }
+ stats.update(fNonpurgeableResources[i]);
+ }
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
+ SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
+ SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
+ stats.update(fPurgeableQueue.at(i));
+ purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
+ }
+
+ SkASSERT(fCount == this->getResourceCount());
+ SkASSERT(fBudgetedCount <= fCount);
+ SkASSERT(fBudgetedBytes <= fBytes);
+ SkASSERT(stats.fBytes == fBytes);
+ SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
+ numBudgetedResourcesFlushWillMakePurgeable);
+ SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
+ SkASSERT(stats.fBudgetedCount == fBudgetedCount);
+ SkASSERT(purgeableBytes == fPurgeableBytes);
+#if GR_CACHE_STATS
+ SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
+ SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
+ SkASSERT(fBytes <= fHighWaterBytes);
+ SkASSERT(fCount <= fHighWaterCount);
+ SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
+ SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
+#endif
+ SkASSERT(stats.fContent == fUniqueHash.count());
+ SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
+
+ // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
+ // calls. This will be fixed when subresource registration is explicit.
+ // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
+ // SkASSERT(!overBudget || locked == count || fPurging);
+}
+
+bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
+ int index = *resource->cacheAccess().accessCacheIndex();
+ if (index < 0) {
+ return false;
+ }
+ if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
+ return true;
+ }
+ if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
+ return true;
+ }
+ SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
+ return false;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceCache.h b/gfx/skia/skia/src/gpu/GrResourceCache.h
new file mode 100644
index 0000000000..de554b429e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceCache.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceCache_DEFINED
+#define GrResourceCache_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrGpuResource.h"
+#include "include/private/GrResourceKey.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkTDPQueue.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/core/SkTMultiMap.h"
+#include "src/gpu/GrGpuResourceCacheAccess.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+
+class GrCaps;
+class GrProxyProvider;
+class SkString;
+class SkTraceMemoryDump;
+class GrSingleOwner;
+
+struct GrTextureFreedMessage {
+ GrTexture* fTexture;
+ uint32_t fOwningUniqueID;
+};
+
+static inline bool SkShouldPostMessageToBus(
+ const GrTextureFreedMessage& msg, uint32_t msgBusUniqueID) {
+ // The inbox's ID is the unique ID of the owning GrContext.
+ return msgBusUniqueID == msg.fOwningUniqueID;
+}
+
+/**
+ * Manages the lifetime of all GrGpuResource instances.
+ *
+ * Resources may have optionally have two types of keys:
+ * 1) A scratch key. This is for resources whose allocations are cached but not their contents.
+ * Multiple resources can share the same scratch key. This is so a caller can have two
+ * resource instances with the same properties (e.g. multipass rendering that ping-pongs
+ * between two temporary surfaces). The scratch key is set at resource creation time and
+ * should never change. Resources need not have a scratch key.
+ * 2) A unique key. This key's meaning is specific to the domain that created the key. Only one
+ * resource may have a given unique key. The unique key can be set, cleared, or changed
+ * anytime after resource creation.
+ *
+ * A unique key always takes precedence over a scratch key when a resource has both types of keys.
+ * If a resource has neither key type then it will be deleted as soon as the last reference to it
+ * is dropped.
+ */
+class GrResourceCache {
+public:
+ GrResourceCache(const GrCaps*, GrSingleOwner* owner, uint32_t contextUniqueID);
+ ~GrResourceCache();
+
+ // Default maximum number of bytes of gpu memory of budgeted resources in the cache.
+ static const size_t kDefaultMaxSize = 96 * (1 << 20);
+
+ /** Used to access functionality needed by GrGpuResource for lifetime management. */
+ class ResourceAccess;
+ ResourceAccess resourceAccess();
+
+ /** Unique ID of the owning GrContext. */
+ uint32_t contextUniqueID() const { return fContextUniqueID; }
+
+ /** Sets the max gpu memory byte size of the cache. */
+ void setLimit(size_t bytes);
+
+ /**
+ * Returns the number of resources.
+ */
+ int getResourceCount() const {
+ return fPurgeableQueue.count() + fNonpurgeableResources.count();
+ }
+
+ /**
+ * Returns the number of resources that count against the budget.
+ */
+ int getBudgetedResourceCount() const { return fBudgetedCount; }
+
+ /**
+ * Returns the number of bytes consumed by resources.
+ */
+ size_t getResourceBytes() const { return fBytes; }
+
+ /**
+ * Returns the number of bytes held by unlocked reosources which are available for purging.
+ */
+ size_t getPurgeableBytes() const { return fPurgeableBytes; }
+
+ /**
+ * Returns the number of bytes consumed by budgeted resources.
+ */
+ size_t getBudgetedResourceBytes() const { return fBudgetedBytes; }
+
+ /**
+ * Returns the number of bytes consumed by cached resources.
+ */
+ size_t getMaxResourceBytes() const { return fMaxBytes; }
+
+ /**
+ * Abandons the backend API resources owned by all GrGpuResource objects and removes them from
+ * the cache.
+ */
+ void abandonAll();
+
+ /**
+ * Releases the backend API resources owned by all GrGpuResource objects and removes them from
+ * the cache.
+ */
+ void releaseAll();
+
+ /**
+ * Find a resource that matches a scratch key.
+ */
+ GrGpuResource* findAndRefScratchResource(const GrScratchKey& scratchKey);
+
+#ifdef SK_DEBUG
+ // This is not particularly fast and only used for validation, so debug only.
+ int countScratchEntriesForKey(const GrScratchKey& scratchKey) const {
+ return fScratchMap.countForKey(scratchKey);
+ }
+#endif
+
+ /**
+ * Find a resource that matches a unique key.
+ */
+ GrGpuResource* findAndRefUniqueResource(const GrUniqueKey& key) {
+ GrGpuResource* resource = fUniqueHash.find(key);
+ if (resource) {
+ this->refAndMakeResourceMRU(resource);
+ }
+ return resource;
+ }
+
+ /**
+ * Query whether a unique key exists in the cache.
+ */
+ bool hasUniqueKey(const GrUniqueKey& key) const {
+ return SkToBool(fUniqueHash.find(key));
+ }
+
+ /** Purges resources to become under budget and processes resources with invalidated unique
+ keys. */
+ void purgeAsNeeded();
+
+ /** Purges all resources that don't have external owners. */
+ void purgeAllUnlocked() { this->purgeUnlockedResources(false); }
+
+ // Purge unlocked resources. If 'scratchResourcesOnly' is true the purgeable resources
+ // containing persistent data are spared. If it is false then all purgeable resources will
+ // be deleted.
+ void purgeUnlockedResources(bool scratchResourcesOnly);
+
+ /** Purge all resources not used since the passed in time. */
+ void purgeResourcesNotUsedSince(GrStdSteadyClock::time_point);
+
+ bool overBudget() const { return fBudgetedBytes > fMaxBytes; }
+
+ /**
+ * Purge unlocked resources from the cache until the the provided byte count has been reached
+ * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
+ * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
+ * resource types.
+ *
+ * @param maxBytesToPurge the desired number of bytes to be purged.
+ * @param preferScratchResources If true scratch resources will be purged prior to other
+ * resource types.
+ */
+ void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
+
+ /** Returns true if the cache would like a flush to occur in order to make more resources
+ purgeable. */
+ bool requestsFlush() const;
+
+ /** Maintain a ref to this texture until we receive a GrTextureFreedMessage. */
+ void insertDelayedTextureUnref(GrTexture*);
+
+#if GR_CACHE_STATS
+ struct Stats {
+ int fTotal;
+ int fNumPurgeable;
+ int fNumNonPurgeable;
+
+ int fScratch;
+ int fWrapped;
+ size_t fUnbudgetedSize;
+
+ Stats() { this->reset(); }
+
+ void reset() {
+ fTotal = 0;
+ fNumPurgeable = 0;
+ fNumNonPurgeable = 0;
+ fScratch = 0;
+ fWrapped = 0;
+ fUnbudgetedSize = 0;
+ }
+
+ void update(GrGpuResource* resource) {
+ if (resource->cacheAccess().isScratch()) {
+ ++fScratch;
+ }
+ if (resource->resourcePriv().refsWrappedObjects()) {
+ ++fWrapped;
+ }
+ if (GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType()) {
+ fUnbudgetedSize += resource->gpuMemorySize();
+ }
+ }
+ };
+
+ void getStats(Stats*) const;
+
+#if GR_TEST_UTILS
+ void dumpStats(SkString*) const;
+
+ void dumpStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* value) const;
+#endif
+
+#endif
+
+#ifdef SK_DEBUG
+ int countUniqueKeysWithTag(const char* tag) const;
+#endif
+
+ // This function is for unit testing and is only defined in test tools.
+ void changeTimestamp(uint32_t newTimestamp);
+
+ // Enumerates all cached resources and dumps their details to traceMemoryDump.
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ void setProxyProvider(GrProxyProvider* proxyProvider) { fProxyProvider = proxyProvider; }
+
+private:
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Methods accessible via ResourceAccess
+ ////
+ void insertResource(GrGpuResource*);
+ void removeResource(GrGpuResource*);
+ void notifyRefCntReachedZero(GrGpuResource*);
+ void changeUniqueKey(GrGpuResource*, const GrUniqueKey&);
+ void removeUniqueKey(GrGpuResource*);
+ void willRemoveScratchKey(const GrGpuResource*);
+ void didChangeBudgetStatus(GrGpuResource*);
+ void refResource(GrGpuResource* resource);
+ /// @}
+
+ void refAndMakeResourceMRU(GrGpuResource*);
+ void processFreedGpuResources();
+ void addToNonpurgeableArray(GrGpuResource*);
+ void removeFromNonpurgeableArray(GrGpuResource*);
+
+ bool wouldFit(size_t bytes) const { return fBudgetedBytes+bytes <= fMaxBytes; }
+
+ uint32_t getNextTimestamp();
+
+#ifdef SK_DEBUG
+ bool isInCache(const GrGpuResource* r) const;
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate;
+
+ class AvailableForScratchUse;
+
+ struct ScratchMapTraits {
+ static const GrScratchKey& GetKey(const GrGpuResource& r) {
+ return r.resourcePriv().getScratchKey();
+ }
+
+ static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
+ static void OnFree(GrGpuResource*) { }
+ };
+ typedef SkTMultiMap<GrGpuResource, GrScratchKey, ScratchMapTraits> ScratchMap;
+
+ struct UniqueHashTraits {
+ static const GrUniqueKey& GetKey(const GrGpuResource& r) { return r.getUniqueKey(); }
+
+ static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
+ };
+ typedef SkTDynamicHash<GrGpuResource, GrUniqueKey, UniqueHashTraits> UniqueHash;
+
+ class TextureAwaitingUnref {
+ public:
+ TextureAwaitingUnref();
+ TextureAwaitingUnref(GrTexture* texture);
+ TextureAwaitingUnref(const TextureAwaitingUnref&) = delete;
+ TextureAwaitingUnref& operator=(const TextureAwaitingUnref&) = delete;
+ TextureAwaitingUnref(TextureAwaitingUnref&&);
+ TextureAwaitingUnref& operator=(TextureAwaitingUnref&&);
+ ~TextureAwaitingUnref();
+ void addRef();
+ void unref();
+ bool finished();
+
+ private:
+ GrTexture* fTexture = nullptr;
+ int fNumUnrefs = 0;
+ };
+ using TexturesAwaitingUnref = SkTHashMap<uint32_t, TextureAwaitingUnref>;
+
+ static bool CompareTimestamp(GrGpuResource* const& a, GrGpuResource* const& b) {
+ return a->cacheAccess().timestamp() < b->cacheAccess().timestamp();
+ }
+
+ static int* AccessResourceIndex(GrGpuResource* const& res) {
+ return res->cacheAccess().accessCacheIndex();
+ }
+
+ typedef SkMessageBus<GrUniqueKeyInvalidatedMessage>::Inbox InvalidUniqueKeyInbox;
+ typedef SkMessageBus<GrTextureFreedMessage>::Inbox FreedTextureInbox;
+ typedef SkTDPQueue<GrGpuResource*, CompareTimestamp, AccessResourceIndex> PurgeableQueue;
+ typedef SkTDArray<GrGpuResource*> ResourceArray;
+
+ GrProxyProvider* fProxyProvider = nullptr;
+ // Whenever a resource is added to the cache or the result of a cache lookup, fTimestamp is
+ // assigned as the resource's timestamp and then incremented. fPurgeableQueue orders the
+ // purgeable resources by this value, and thus is used to purge resources in LRU order.
+ uint32_t fTimestamp = 0;
+ PurgeableQueue fPurgeableQueue;
+ ResourceArray fNonpurgeableResources;
+
+ // This map holds all resources that can be used as scratch resources.
+ ScratchMap fScratchMap;
+ // This holds all resources that have unique keys.
+ UniqueHash fUniqueHash;
+
+ // our budget, used in purgeAsNeeded()
+ size_t fMaxBytes = kDefaultMaxSize;
+
+#if GR_CACHE_STATS
+ int fHighWaterCount = 0;
+ size_t fHighWaterBytes = 0;
+ int fBudgetedHighWaterCount = 0;
+ size_t fBudgetedHighWaterBytes = 0;
+#endif
+
+ // our current stats for all resources
+ SkDEBUGCODE(int fCount = 0;)
+ size_t fBytes = 0;
+
+ // our current stats for resources that count against the budget
+ int fBudgetedCount = 0;
+ size_t fBudgetedBytes = 0;
+ size_t fPurgeableBytes = 0;
+ int fNumBudgetedResourcesFlushWillMakePurgeable = 0;
+
+ InvalidUniqueKeyInbox fInvalidUniqueKeyInbox;
+ FreedTextureInbox fFreedTextureInbox;
+ TexturesAwaitingUnref fTexturesAwaitingUnref;
+
+ uint32_t fContextUniqueID = SK_InvalidUniqueID;
+ GrSingleOwner* fSingleOwner = nullptr;
+
+ // This resource is allowed to be in the nonpurgeable array for the sake of validate() because
+ // we're in the midst of converting it to purgeable status.
+ SkDEBUGCODE(GrGpuResource* fNewlyPurgeableResourceForValidation = nullptr;)
+
+ bool fPreferVRAMUseOverFlushes = false;
+};
+
+class GrResourceCache::ResourceAccess {
+private:
+ ResourceAccess(GrResourceCache* cache) : fCache(cache) { }
+ ResourceAccess(const ResourceAccess& that) : fCache(that.fCache) { }
+ ResourceAccess& operator=(const ResourceAccess&); // unimpl
+
+ /**
+ * Insert a resource into the cache.
+ */
+ void insertResource(GrGpuResource* resource) { fCache->insertResource(resource); }
+
+ /**
+ * Removes a resource from the cache.
+ */
+ void removeResource(GrGpuResource* resource) { fCache->removeResource(resource); }
+
+ /**
+ * Adds a ref to a resource with proper tracking if the resource has 0 refs prior to
+ * adding the ref.
+ */
+ void refResource(GrGpuResource* resource) { fCache->refResource(resource); }
+
+ /**
+ * Notifications that should be sent to the cache when the ref/io cnt status of resources
+ * changes.
+ */
+ enum RefNotificationFlags {
+ /** All types of refs on the resource have reached zero. */
+ kAllCntsReachedZero_RefNotificationFlag = 0x1,
+ /** The normal (not pending IO type) ref cnt has reached zero. */
+ kRefCntReachedZero_RefNotificationFlag = 0x2,
+ };
+ /**
+ * Called by GrGpuResources when they detect that their ref cnt has reached zero.
+ */
+ void notifyRefCntReachedZero(GrGpuResource* resource) {
+ fCache->notifyRefCntReachedZero(resource);
+ }
+
+ /**
+ * Called by GrGpuResources to change their unique keys.
+ */
+ void changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
+ fCache->changeUniqueKey(resource, newKey);
+ }
+
+ /**
+ * Called by a GrGpuResource to remove its unique key.
+ */
+ void removeUniqueKey(GrGpuResource* resource) { fCache->removeUniqueKey(resource); }
+
+ /**
+ * Called by a GrGpuResource when it removes its scratch key.
+ */
+ void willRemoveScratchKey(const GrGpuResource* resource) {
+ fCache->willRemoveScratchKey(resource);
+ }
+
+ /**
+ * Called by GrGpuResources when they change from budgeted to unbudgeted or vice versa.
+ */
+ void didChangeBudgetStatus(GrGpuResource* resource) { fCache->didChangeBudgetStatus(resource); }
+
+ // No taking addresses of this type.
+ const ResourceAccess* operator&() const;
+ ResourceAccess* operator&();
+
+ GrResourceCache* fCache;
+
+ friend class GrGpuResource; // To access all the proxy inline methods.
+ friend class GrResourceCache; // To create this type.
+};
+
+inline GrResourceCache::ResourceAccess GrResourceCache::resourceAccess() {
+ return ResourceAccess(this);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceHandle.h b/gfx/skia/skia/src/gpu/GrResourceHandle.h
new file mode 100644
index 0000000000..30440a7125
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceHandle.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+*/
+
+#ifndef GrResourceHandle_DEFINED
+#define GrResourceHandle_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// Opaque handle to a resource. Users should always use the macro below to create a specific
+// template instantiation of GrResourceHandle.
+template <typename kind> class GrResourceHandle {
+public:
+ GrResourceHandle(int value) : fValue(value) {
+ SkASSERT(this->isValid());
+ }
+
+ GrResourceHandle() : fValue(kInvalid_ResourceHandle) {}
+
+ bool operator==(const GrResourceHandle& other) const { return other.fValue == fValue; }
+ bool isValid() const { return kInvalid_ResourceHandle != fValue; }
+ int toIndex() const { SkASSERT(this->isValid()); return fValue; }
+
+private:
+ static const int kInvalid_ResourceHandle = -1;
+ int fValue;
+};
+
+// Creates a type "name", which is a specfic template instantiation of GrResourceHandle.
+#define GR_DEFINE_RESOURCE_HANDLE_CLASS(name) \
+ struct name##Kind {}; \
+ using name = GrResourceHandle<name##Kind>;
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceProvider.cpp b/gfx/skia/skia/src/gpu/GrResourceProvider.cpp
new file mode 100644
index 0000000000..9ba9505a4a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceProvider.cpp
@@ -0,0 +1,581 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrResourceProvider.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrContext.h"
+#include "include/private/GrResourceKey.h"
+#include "include/private/GrSingleOwner.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrPath.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/SkGr.h"
+
+const uint32_t GrResourceProvider::kMinScratchTextureSize = 16;
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
+
+GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
+ : fCache(cache)
+ , fGpu(gpu)
+#ifdef SK_DEBUG
+ , fSingleOwner(owner)
+#endif
+{
+ fCaps = sk_ref_sp(fGpu->caps());
+}
+
+sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrColorType colorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ const GrMipLevel texels[],
+ int mipLevelCount) {
+ ASSERT_SINGLE_OWNER
+
+ SkASSERT(mipLevelCount > 0);
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
+ if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
+ renderTargetSampleCnt, mipMapped)) {
+ return nullptr;
+ }
+ // Current rule is that you can provide no level data, just the base, or all the levels.
+ bool hasPixels = mipLevelCount && texels[0].fPixels;
+ auto scratch = this->getExactScratch(desc, format, renderable, renderTargetSampleCnt, budgeted,
+ mipMapped, isProtected);
+ if (scratch) {
+ if (!hasPixels) {
+ return scratch;
+ }
+ return this->writePixels(std::move(scratch), colorType, {desc.fWidth, desc.fHeight}, texels,
+ mipLevelCount);
+ }
+ SkAutoSTMalloc<14, GrMipLevel> tmpTexels;
+ SkAutoSTArray<14, std::unique_ptr<char[]>> tmpDatas;
+ GrColorType tempColorType = GrColorType::kUnknown;
+ if (hasPixels) {
+ tempColorType = this->prepareLevels(format, colorType, {desc.fWidth, desc.fHeight}, texels,
+ mipLevelCount, &tmpTexels, &tmpDatas);
+ if (tempColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+ }
+ return fGpu->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
+ isProtected, colorType, tempColorType, tmpTexels.get(),
+ mipLevelCount);
+}
+
+sk_sp<GrTexture> GrResourceProvider::getExactScratch(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrMipMapped mipMapped,
+ GrProtected isProtected) {
+ sk_sp<GrTexture> tex(this->refScratchTexture(desc, format, renderable, renderTargetSampleCnt,
+ mipMapped, isProtected));
+ if (tex && SkBudgeted::kNo == budgeted) {
+ tex->resourcePriv().makeUnbudgeted();
+ }
+
+ return tex;
+}
+
+sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrColorType colorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ SkBackingFit fit,
+ GrProtected isProtected,
+ const GrMipLevel& mipLevel) {
+ ASSERT_SINGLE_OWNER
+
+ if (!mipLevel.fPixels) {
+ return nullptr;
+ }
+
+ if (SkBackingFit::kApprox == fit) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
+ renderable, renderTargetSampleCnt, GrMipMapped::kNo)) {
+ return nullptr;
+ }
+
+ auto tex = this->createApproxTexture(desc, format, renderable, renderTargetSampleCnt,
+ isProtected);
+ if (!tex) {
+ return nullptr;
+ }
+ return this->writePixels(std::move(tex), colorType, {desc.fWidth, desc.fHeight}, &mipLevel,
+ 1);
+ } else {
+ return this->createTexture(desc, format, colorType, renderable, renderTargetSampleCnt,
+ budgeted, isProtected, &mipLevel, 1);
+ }
+}
+
+sk_sp<GrTexture> GrResourceProvider::createCompressedTexture(int width, int height,
+ const GrBackendFormat& format,
+ SkImage::CompressionType compression,
+ SkBudgeted budgeted, SkData* data) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return fGpu->createCompressedTexture(width, height, format, compression, budgeted, data->data(),
+ data->size());
+}
+
+sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrMipMapped mipMapped,
+ SkBudgeted budgeted,
+ GrProtected isProtected) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
+ renderTargetSampleCnt, mipMapped)) {
+ return nullptr;
+ }
+
+ // Compressed textures are read-only so they don't support re-use for scratch.
+ // TODO: Support GrMipMapped::kYes in scratch texture lookup here.
+ if (!GrPixelConfigIsCompressed(desc.fConfig)) {
+ sk_sp<GrTexture> tex = this->getExactScratch(
+ desc, format, renderable, renderTargetSampleCnt, budgeted, mipMapped, isProtected);
+ if (tex) {
+ return tex;
+ }
+ }
+
+ return fGpu->createTexture(desc, format, renderable, renderTargetSampleCnt, mipMapped, budgeted,
+ isProtected);
+}
+
+// Map 'value' to a larger multiple of 2. Values <= 'kMagicTol' will pop up to
+// the next power of 2. Those above 'kMagicTol' will only go up half the floor power of 2.
+uint32_t GrResourceProvider::MakeApprox(uint32_t value) {
+ static const int kMagicTol = 1024;
+
+ value = SkTMax(kMinScratchTextureSize, value);
+
+ if (SkIsPow2(value)) {
+ return value;
+ }
+
+ uint32_t ceilPow2 = GrNextPow2(value);
+ if (value <= kMagicTol) {
+ return ceilPow2;
+ }
+
+ uint32_t floorPow2 = ceilPow2 >> 1;
+ uint32_t mid = floorPow2 + (floorPow2 >> 1);
+
+ if (value <= mid) {
+ return mid;
+ }
+
+ return ceilPow2;
+}
+
+sk_sp<GrTexture> GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrProtected isProtected) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ // Currently we don't recycle compressed textures as scratch.
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ return nullptr;
+ }
+
+ if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
+ renderTargetSampleCnt, GrMipMapped::kNo)) {
+ return nullptr;
+ }
+
+ // bin by some multiple or power of 2 with a reasonable min
+ GrSurfaceDesc copyDesc(desc);
+ copyDesc.fWidth = MakeApprox(desc.fWidth);
+ copyDesc.fHeight = MakeApprox(desc.fHeight);
+
+ if (auto tex = this->refScratchTexture(copyDesc, format, renderable, renderTargetSampleCnt,
+ GrMipMapped::kNo, isProtected)) {
+ return tex;
+ }
+
+ return fGpu->createTexture(copyDesc, format, renderable, renderTargetSampleCnt,
+ GrMipMapped::kNo, SkBudgeted::kYes, isProtected);
+}
+
+sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrMipMapped mipMapped,
+ GrProtected isProtected) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(!this->isAbandoned());
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
+ SkASSERT(fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
+ renderable, renderTargetSampleCnt, GrMipMapped::kNo));
+
+ // We could make initial clears work with scratch textures but it is a rare case so we just opt
+ // to fall back to making a new texture.
+ if (fGpu->caps()->reuseScratchTextures() || renderable == GrRenderable::kYes) {
+ GrScratchKey key;
+ GrTexturePriv::ComputeScratchKey(desc.fConfig, desc.fWidth, desc.fHeight, renderable,
+ renderTargetSampleCnt, mipMapped, isProtected, &key);
+ GrGpuResource* resource = fCache->findAndRefScratchResource(key);
+ if (resource) {
+ fGpu->stats()->incNumScratchTexturesReused();
+ GrSurface* surface = static_cast<GrSurface*>(resource);
+ return sk_sp<GrTexture>(surface->asTexture());
+ }
+ }
+
+ return nullptr;
+}
+
+sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTexture& tex,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable,
+ GrIOType ioType) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return fGpu->wrapBackendTexture(tex, colorType, ownership, cacheable, ioType);
+}
+
+sk_sp<GrTexture> GrResourceProvider::wrapRenderableBackendTexture(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return fGpu->wrapRenderableBackendTexture(tex, sampleCnt, colorType, ownership, cacheable);
+}
+
+sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
+ const GrBackendRenderTarget& backendRT, GrColorType colorType)
+{
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(backendRT, colorType);
+}
+
+sk_sp<GrRenderTarget> GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget(
+ const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr : fGpu->wrapVulkanSecondaryCBAsRenderTarget(imageInfo,
+ vkInfo);
+
+}
+
+void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
+ GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned() || !resource) {
+ return;
+ }
+ resource->resourcePriv().setUniqueKey(key);
+}
+
+sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueKey& key) {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr
+ : sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
+}
+
+sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
+ size_t size,
+ const void* data,
+ const GrUniqueKey& key) {
+ if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
+ return buffer;
+ }
+ if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) {
+ // We shouldn't bin and/or cache static buffers.
+ SkASSERT(buffer->size() == size);
+ SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
+ buffer->resourcePriv().setUniqueKey(key);
+ return sk_sp<const GrGpuBuffer>(buffer);
+ }
+ return nullptr;
+}
+
+sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey* key) {
+ size_t bufferSize = patternSize * reps * sizeof(uint16_t);
+
+ sk_sp<GrGpuBuffer> buffer(
+ this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern));
+ if (!buffer) {
+ return nullptr;
+ }
+ uint16_t* data = (uint16_t*) buffer->map();
+ SkAutoTArray<uint16_t> temp;
+ if (!data) {
+ temp.reset(reps * patternSize);
+ data = temp.get();
+ }
+ for (int i = 0; i < reps; ++i) {
+ int baseIdx = i * patternSize;
+ uint16_t baseVert = (uint16_t)(i * vertCount);
+ for (int j = 0; j < patternSize; ++j) {
+ data[baseIdx+j] = baseVert + pattern[j];
+ }
+ }
+ if (temp.get()) {
+ if (!buffer->updateData(data, bufferSize)) {
+ return nullptr;
+ }
+ } else {
+ buffer->unmap();
+ }
+ if (key) {
+ SkASSERT(key->isValid());
+ this->assignUniqueKeyToResource(*key, buffer.get());
+ }
+ return buffer;
+}
+
+static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
+
+sk_sp<const GrGpuBuffer> GrResourceProvider::createQuadIndexBuffer() {
+ GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
+ static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 };
+ return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, nullptr);
+}
+
+int GrResourceProvider::QuadCountOfQuadBuffer() { return kMaxQuads; }
+
+sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+
+ SkASSERT(this->gpu()->pathRendering());
+ return this->gpu()->pathRendering()->createPath(path, style);
+}
+
+sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern,
+ const void* data) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ if (kDynamic_GrAccessPattern != accessPattern) {
+ return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
+ }
+ // bin by pow2 with a reasonable min
+ static const size_t MIN_SIZE = 1 << 12;
+ size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
+
+ GrScratchKey key;
+ GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
+ auto buffer =
+ sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
+ key)));
+ if (!buffer) {
+ buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
+ if (!buffer) {
+ return nullptr;
+ }
+ }
+ if (data) {
+ buffer->updateData(data, size);
+ }
+ return buffer;
+}
+
+bool GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt, int minStencilSampleCount) {
+ SkASSERT(rt);
+ GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
+ if (stencil && stencil->numSamples() >= minStencilSampleCount) {
+ return true;
+ }
+
+ if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
+ GrUniqueKey sbKey;
+
+ int width = rt->width();
+ int height = rt->height();
+#if 0
+ if (this->caps()->oversizedStencilSupport()) {
+ width = SkNextPow2(width);
+ height = SkNextPow2(height);
+ }
+#endif
+ GrStencilAttachment::ComputeSharedStencilAttachmentKey(
+ width, height, minStencilSampleCount, &sbKey);
+ auto stencil = this->findByUniqueKey<GrStencilAttachment>(sbKey);
+ if (!stencil) {
+ // Need to try and create a new stencil
+ stencil.reset(this->gpu()->createStencilAttachmentForRenderTarget(
+ rt, width, height, minStencilSampleCount));
+ if (!stencil) {
+ return false;
+ }
+ this->assignUniqueKeyToResource(sbKey, stencil.get());
+ }
+ rt->renderTargetPriv().attachStencilAttachment(std::move(stencil));
+ }
+
+ if (GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment()) {
+ return stencil->numSamples() >= minStencilSampleCount;
+ }
+ return false;
+}
+
+sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
+ const GrBackendTexture& tex, int sampleCnt, GrColorType colorType)
+{
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return fGpu->wrapBackendTextureAsRenderTarget(tex, sampleCnt, colorType);
+}
+
+sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore(bool isOwned) {
+ return fGpu->makeSemaphore(isOwned);
+}
+
+sk_sp<GrSemaphore> GrResourceProvider::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr : fGpu->wrapBackendSemaphore(semaphore,
+ wrapType,
+ ownership);
+}
+
+// Ensures the row bytes are populated (not 0) and makes a copy to a temporary
+// to make the row bytes tight if necessary. Returns false if the input row bytes are invalid.
+static bool prepare_level(const GrMipLevel& inLevel,
+ const SkISize& size,
+ bool rowBytesSupport,
+ GrColorType origColorType,
+ GrColorType allowedColorType,
+ GrMipLevel* outLevel,
+ std::unique_ptr<char[]>* data) {
+ if (!inLevel.fPixels) {
+ outLevel->fPixels = nullptr;
+ outLevel->fRowBytes = 0;
+ return true;
+ }
+ size_t minRB = size.fWidth * GrColorTypeBytesPerPixel(origColorType);
+ size_t actualRB = inLevel.fRowBytes ? inLevel.fRowBytes : minRB;
+ if (actualRB < minRB) {
+ return false;
+ }
+ if (origColorType == allowedColorType && (actualRB == minRB || rowBytesSupport)) {
+ outLevel->fRowBytes = actualRB;
+ outLevel->fPixels = inLevel.fPixels;
+ return true;
+ }
+ auto tempRB = size.fWidth * GrColorTypeBytesPerPixel(allowedColorType);
+ data->reset(new char[tempRB * size.fHeight]);
+ outLevel->fPixels = data->get();
+ outLevel->fRowBytes = tempRB;
+ GrImageInfo srcInfo(origColorType, kUnpremul_SkAlphaType, nullptr, size);
+ GrImageInfo dstInfo(allowedColorType, kUnpremul_SkAlphaType, nullptr, size);
+ return GrConvertPixels(dstInfo, data->get(), tempRB, srcInfo, inLevel.fPixels, actualRB);
+}
+
+GrColorType GrResourceProvider::prepareLevels(const GrBackendFormat& format,
+ GrColorType colorType,
+ const SkISize& baseSize,
+ const GrMipLevel texels[],
+ int mipLevelCount,
+ TempLevels* tempLevels,
+ TempLevelDatas* tempLevelDatas) const {
+ SkASSERT(mipLevelCount && texels && texels[0].fPixels);
+
+ auto allowedColorType =
+ this->caps()->supportedWritePixelsColorType(colorType, format, colorType).fColorType;
+ if (allowedColorType == GrColorType::kUnknown) {
+ return GrColorType::kUnknown;
+ }
+ bool rowBytesSupport = this->caps()->writePixelsRowBytesSupport();
+ tempLevels->reset(mipLevelCount);
+ tempLevelDatas->reset(mipLevelCount);
+ auto size = baseSize;
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (!prepare_level(texels[i], size, rowBytesSupport, colorType, allowedColorType,
+ &(*tempLevels)[i], &(*tempLevelDatas)[i])) {
+ return GrColorType::kUnknown;
+ }
+ size = {std::max(size.fWidth / 2, 1), std::max(size.fHeight / 2, 1)};
+ }
+ return allowedColorType;
+}
+
+sk_sp<GrTexture> GrResourceProvider::writePixels(sk_sp<GrTexture> texture,
+ GrColorType colorType,
+ const SkISize& baseSize,
+ const GrMipLevel texels[],
+ int mipLevelCount) const {
+ SkASSERT(!this->isAbandoned());
+ SkASSERT(texture);
+ SkASSERT(colorType != GrColorType::kUnknown);
+ SkASSERT(mipLevelCount && texels && texels[0].fPixels);
+
+ SkAutoSTMalloc<14, GrMipLevel> tmpTexels;
+ SkAutoSTArray<14, std::unique_ptr<char[]>> tmpDatas;
+ auto tempColorType = this->prepareLevels(texture->backendFormat(), colorType, baseSize, texels,
+ mipLevelCount, &tmpTexels, &tmpDatas);
+ if (tempColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+ SkAssertResult(fGpu->writePixels(texture.get(), 0, 0, baseSize.fWidth, baseSize.fHeight,
+ colorType, tempColorType, tmpTexels.get(), mipLevelCount));
+ return texture;
+}
diff --git a/gfx/skia/skia/src/gpu/GrResourceProvider.h b/gfx/skia/skia/src/gpu/GrResourceProvider.h
new file mode 100644
index 0000000000..12d5d02ce9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceProvider.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceProvider_DEFINED
+#define GrResourceProvider_DEFINED
+
+#include "include/gpu/GrContextOptions.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkScalerContext.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/GrResourceCache.h"
+
+class GrBackendRenderTarget;
+class GrBackendSemaphore;
+class GrBackendTexture;
+class GrGpu;
+class GrPath;
+class GrRenderTarget;
+class GrResourceProviderPriv;
+class GrSemaphore;
+class GrSingleOwner;
+class GrStencilAttachment;
+class GrTexture;
+struct GrVkDrawableInfo;
+
+class GrStyle;
+class SkDescriptor;
+class SkPath;
+class SkTypeface;
+
+/**
+ * A factory for arbitrary resource types.
+ */
+class GrResourceProvider {
+public:
+ GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*);
+
+ /**
+ * Finds a resource in the cache, based on the specified key. Prior to calling this, the caller
+ * must be sure that if a resource of exists in the cache with the given unique key then it is
+ * of type T.
+ */
+ template <typename T = GrGpuResource>
+ typename std::enable_if<std::is_base_of<GrGpuResource, T>::value, sk_sp<T>>::type
+ findByUniqueKey(const GrUniqueKey& key) {
+ return sk_sp<T>(static_cast<T*>(this->findResourceByUniqueKey(key).release()));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Textures
+
+ /**
+ * Finds a texture that approximately matches the descriptor. Will be at least as large in width
+ * and height as desc specifies. If renderable is kYes then the GrTexture will also be a
+ * GrRenderTarget. The texture's format and sample count will always match the request.
+ * The contents of the texture are undefined.
+ */
+ sk_sp<GrTexture> createApproxTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrProtected isProtected);
+
+ /** Create an exact fit texture with no initial data to upload. */
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ GrMipMapped mipMapped,
+ SkBudgeted budgeted,
+ GrProtected isProtected);
+
+ /**
+ * Create an exact fit texture with initial data to upload. The color type must be valid
+ * for the format and also describe the texel data. This will ensure any conversions that
+ * need to get applied to the data before upload are applied.
+ */
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrColorType colorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ const GrMipLevel texels[],
+ int mipLevelCount);
+
+ /**
+ * Create a potentially loose fit texture with the provided data. The color type must be valid
+ * for the format and also describe the texel data. This will ensure any conversions that
+ * need to get applied to the data before upload are applied.
+ */
+ sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrColorType srcColorType,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ SkBackingFit fit,
+ GrProtected isProtected,
+ const GrMipLevel& mipLevel);
+
+ /**
+ * Creates a compressed texture. The GrGpu must support the SkImageImage::Compression type.
+ * This does not currently support MIP maps. It will not be renderable.
+ */
+ sk_sp<GrTexture> createCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted, SkData* data);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Wrapped Backend Surfaces
+
+ /**
+ * Wraps an existing texture with a GrTexture object.
+ *
+ * GrIOType must either be kRead or kRW. kRead blocks any operations that would modify the
+ * pixels (e.g. dst for a copy, regenerating MIP levels, write pixels).
+ *
+ * OpenGL: if the object is a texture Gr may change its GL texture params
+ * when it is drawn.
+ *
+ * @return GrTexture object or NULL on failure.
+ */
+ sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture& tex, GrColorType, GrWrapOwnership,
+ GrWrapCacheable, GrIOType);
+
+ /**
+ * This makes the backend texture be renderable. If sampleCnt is > 1 and the underlying API
+ * uses separate MSAA render buffers then a MSAA render buffer is created that resolves
+ * to the texture.
+ */
+ sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType,
+ GrWrapOwnership,
+ GrWrapCacheable);
+
+ /**
+ * Wraps an existing render target with a GrRenderTarget object. It is
+ * similar to wrapBackendTexture but can be used to draw into surfaces
+ * that are not also textures (e.g. FBO 0 in OpenGL, or an MSAA buffer that
+ * the client will resolve to a texture). Currently wrapped render targets
+ * always use the kBorrow_GrWrapOwnership and GrWrapCacheable::kNo semantics.
+ *
+ * @return GrRenderTarget object or NULL on failure.
+ */
+ sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType colorType);
+
+ sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
+ const GrVkDrawableInfo&);
+
+ static const uint32_t kMinScratchTextureSize;
+
+ /**
+ * Either finds and refs, or creates a static buffer with the given parameters and contents.
+ *
+ * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
+ * @param size minimum size of buffer to return.
+ * @param data optional data with which to initialize the buffer.
+ * @param key Key to be assigned to the buffer.
+ *
+ * @return The buffer if successful, otherwise nullptr.
+ */
+ sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
+ const void* data, const GrUniqueKey& key);
+
+ /**
+ * Either finds and refs, or creates an index buffer with a repeating pattern for drawing
+ * contiguous vertices of a repeated mesh. If the return is non-null, the caller owns a ref on
+ * the returned GrBuffer.
+ *
+ * @param pattern the pattern of indices to repeat
+ * @param patternSize size in bytes of the pattern
+ * @param reps number of times to repeat the pattern
+ * @param vertCount number of vertices the pattern references
+ * @param key Key to be assigned to the index buffer.
+ *
+ * @return The index buffer if successful, otherwise nullptr.
+ */
+ sk_sp<const GrGpuBuffer> findOrCreatePatternedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key) {
+ if (auto buffer = this->findByUniqueKey<const GrGpuBuffer>(key)) {
+ return buffer;
+ }
+ return this->createPatternedIndexBuffer(pattern, patternSize, reps, vertCount, &key);
+ }
+
+ /**
+ * Returns an index buffer that can be used to render quads.
+ * Six indices per quad: 0, 1, 2, 2, 1, 3, etc.
+ * The max number of quads is the buffer's index capacity divided by 6.
+ * Draw with GrPrimitiveType::kTriangles
+ * @ return the quad index buffer
+ */
+ sk_sp<const GrGpuBuffer> refQuadIndexBuffer() {
+ if (!fQuadIndexBuffer) {
+ fQuadIndexBuffer = this->createQuadIndexBuffer();
+ }
+ return fQuadIndexBuffer;
+ }
+
+ static int QuadCountOfQuadBuffer();
+
+ /**
+ * Factories for GrPath objects. It's an error to call these if path rendering
+ * is not supported.
+ */
+ sk_sp<GrPath> createPath(const SkPath&, const GrStyle&);
+
+ /**
+ * Returns a buffer.
+ *
+ * @param size minimum size of buffer to return.
+ * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
+ * @param GrAccessPattern hint to the graphics subsystem about how the data will be accessed.
+ * @param flags see Flags enum.
+ * @param data optional data with which to initialize the buffer.
+ *
+ * @return the buffer if successful, otherwise nullptr.
+ */
+ sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data = nullptr);
+
+ /**
+ * If passed in render target already has a stencil buffer with at least "numSamples" samples,
+ * return true. Otherwise attempt to attach one and return true on success.
+ */
+ bool attachStencilAttachment(GrRenderTarget* rt, int numStencilSamples);
+
+ /**
+ * Wraps an existing texture with a GrRenderTarget object. This is useful when the provided
+ * texture has a format that cannot be textured from by Skia, but we want to raster to it.
+ *
+ * The texture is wrapped as borrowed. The texture object will not be freed once the
+ * render target is destroyed.
+ *
+ * @return GrRenderTarget object or NULL on failure.
+ */
+ sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType);
+
+ /**
+ * Assigns a unique key to a resource. If the key is associated with another resource that
+ * association is removed and replaced by this resource.
+ */
+ void assignUniqueKeyToResource(const GrUniqueKey&, GrGpuResource*);
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true);
+
+ enum class SemaphoreWrapType {
+ kWillSignal,
+ kWillWait,
+ };
+
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
+ SemaphoreWrapType wrapType,
+ GrWrapOwnership = kBorrow_GrWrapOwnership);
+
+ void abandon() {
+ fCache = nullptr;
+ fGpu = nullptr;
+ }
+
+ uint32_t contextUniqueID() const { return fCache->contextUniqueID(); }
+ const GrCaps* caps() const { return fCaps.get(); }
+ bool overBudget() const { return fCache->overBudget(); }
+
+ static uint32_t MakeApprox(uint32_t value);
+
+ inline GrResourceProviderPriv priv();
+ inline const GrResourceProviderPriv priv() const;
+
+private:
+ sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
+
+ // Attempts to find a resource in the cache that exactly matches the GrSurfaceDesc. Failing that
+ // it returns null. If non-null, the resulting texture is always budgeted.
+ sk_sp<GrTexture> refScratchTexture(const GrSurfaceDesc&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ GrMipMapped,
+ GrProtected);
+
+ /*
+ * Try to find an existing scratch texture that exactly matches 'desc'. If successful
+ * update the budgeting accordingly.
+ */
+ sk_sp<GrTexture> getExactScratch(const GrSurfaceDesc&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrMipMapped,
+ GrProtected);
+
+ // Used to perform any conversions necessary to texel data before creating a texture with
+ // existing data or uploading to a scratch texture.
+ using TempLevels = SkAutoSTMalloc<14, GrMipLevel>;
+ using TempLevelDatas = SkAutoSTArray<14, std::unique_ptr<char[]>>;
+ GrColorType prepareLevels(const GrBackendFormat& format,
+ GrColorType,
+ const SkISize& baseSize,
+ const GrMipLevel texels[],
+ int mipLevelCount,
+ TempLevels*,
+ TempLevelDatas*) const;
+
+ // GrResourceProvider may be asked to "create" a new texture with initial pixel data to populate
+ // it. In implementation it may pull an existing texture from GrResourceCache and then write the
+ // pixel data to the texture. It takes a width/height for the base level because we may be
+ // using an approximate-sized scratch texture. On success the texture is returned and nullptr
+ // on failure.
+ sk_sp<GrTexture> writePixels(sk_sp<GrTexture> texture,
+ GrColorType colorType,
+ const SkISize& baseSize,
+ const GrMipLevel texels[],
+ int mipLevelCount) const;
+
+ GrResourceCache* cache() { return fCache; }
+ const GrResourceCache* cache() const { return fCache; }
+
+ friend class GrResourceProviderPriv;
+
+ // Method made available via GrResourceProviderPriv
+ GrGpu* gpu() { return fGpu; }
+ const GrGpu* gpu() const { return fGpu; }
+
+ bool isAbandoned() const {
+ SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
+ return !SkToBool(fCache);
+ }
+
+ sk_sp<const GrGpuBuffer> createPatternedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey* key);
+
+ sk_sp<const GrGpuBuffer> createQuadIndexBuffer();
+
+ GrResourceCache* fCache;
+ GrGpu* fGpu;
+ sk_sp<const GrCaps> fCaps;
+ sk_sp<const GrGpuBuffer> fQuadIndexBuffer;
+
+ // In debug builds we guard against improper thread handling
+ SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceProviderPriv.h b/gfx/skia/skia/src/gpu/GrResourceProviderPriv.h
new file mode 100644
index 0000000000..a440f959b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceProviderPriv.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceProviderPriv_DEFINED
+#define GrResourceProviderPriv_DEFINED
+
+#include "src/gpu/GrResourceProvider.h"
+
+/** Class that adds methods to GrResourceProvider that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrResourceProvider. It should never have
+ additional data members or virtual methods. */
+class GrResourceProviderPriv {
+public:
+ GrGpu* gpu() { return fResourceProvider->gpu(); }
+
+private:
+ explicit GrResourceProviderPriv(GrResourceProvider* provider) : fResourceProvider(provider) {}
+ GrResourceProviderPriv(const GrResourceProviderPriv&); // unimpl
+ GrResourceProviderPriv& operator=(const GrResourceProviderPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrResourceProviderPriv* operator&() const;
+ GrResourceProviderPriv* operator&();
+
+ GrResourceProvider* fResourceProvider;
+ friend class GrResourceProvider; // to construct/copy this type
+};
+
+inline GrResourceProviderPriv GrResourceProvider::priv() { return GrResourceProviderPriv(this); }
+
+inline const GrResourceProviderPriv GrResourceProvider::priv() const {
+ return GrResourceProviderPriv(const_cast<GrResourceProvider*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp b/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp
new file mode 100644
index 0000000000..879a028511
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrSWMaskHelper.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/geometry/GrShape.h"
+
+/*
+ * Convert a boolean operation into a transfer mode code
+ */
+static SkBlendMode op_to_mode(SkRegion::Op op) {
+
+ static const SkBlendMode modeMap[] = {
+ SkBlendMode::kDstOut, // kDifference_Op
+ SkBlendMode::kModulate, // kIntersect_Op
+ SkBlendMode::kSrcOver, // kUnion_Op
+ SkBlendMode::kXor, // kXOR_Op
+ SkBlendMode::kClear, // kReverseDifference_Op
+ SkBlendMode::kSrc, // kReplace_Op
+ };
+
+ return modeMap[op];
+}
+
+/**
+ * Draw a single rect element of the clip stack into the accumulation bitmap
+ */
+void GrSWMaskHelper::drawRect(const SkRect& rect, const SkMatrix& matrix, SkRegion::Op op, GrAA aa,
+ uint8_t alpha) {
+ SkPaint paint;
+ paint.setBlendMode(op_to_mode(op));
+ paint.setAntiAlias(GrAA::kYes == aa);
+ paint.setColor(SkColorSetARGB(alpha, alpha, alpha, alpha));
+
+ SkMatrix translatedMatrix = matrix;
+ translatedMatrix.postTranslate(fTranslate.fX, fTranslate.fY);
+ fDraw.fMatrix = &translatedMatrix;
+
+ fDraw.drawRect(rect, paint);
+}
+
+/**
+ * Draw a single path element of the clip stack into the accumulation bitmap
+ */
+void GrSWMaskHelper::drawShape(const GrShape& shape, const SkMatrix& matrix, SkRegion::Op op,
+ GrAA aa, uint8_t alpha) {
+ SkPaint paint;
+ paint.setPathEffect(shape.style().refPathEffect());
+ shape.style().strokeRec().applyToPaint(&paint);
+ paint.setAntiAlias(GrAA::kYes == aa);
+
+ SkMatrix translatedMatrix = matrix;
+ translatedMatrix.postTranslate(fTranslate.fX, fTranslate.fY);
+ fDraw.fMatrix = &translatedMatrix;
+
+ SkPath path;
+ shape.asPath(&path);
+ if (SkRegion::kReplace_Op == op && 0xFF == alpha) {
+ SkASSERT(0xFF == paint.getAlpha());
+ fDraw.drawPathCoverage(path, paint);
+ } else {
+ paint.setBlendMode(op_to_mode(op));
+ paint.setColor(SkColorSetARGB(alpha, alpha, alpha, alpha));
+ fDraw.drawPath(path, paint);
+ }
+};
+
+bool GrSWMaskHelper::init(const SkIRect& resultBounds) {
+ // We will need to translate draws so the bound's UL corner is at the origin
+ fTranslate = {-SkIntToScalar(resultBounds.fLeft), -SkIntToScalar(resultBounds.fTop)};
+ SkIRect bounds = SkIRect::MakeWH(resultBounds.width(), resultBounds.height());
+
+ const SkImageInfo bmImageInfo = SkImageInfo::MakeA8(bounds.width(), bounds.height());
+ if (!fPixels->tryAlloc(bmImageInfo)) {
+ return false;
+ }
+ fPixels->erase(0);
+
+ fDraw.fDst = *fPixels;
+ fRasterClip.setRect(bounds);
+ fDraw.fRC = &fRasterClip;
+ return true;
+}
+
+sk_sp<GrTextureProxy> GrSWMaskHelper::toTextureProxy(GrRecordingContext* context,
+ SkBackingFit fit) {
+ SkImageInfo ii = SkImageInfo::MakeA8(fPixels->width(), fPixels->height());
+ size_t rowBytes = fPixels->rowBytes();
+
+ sk_sp<SkData> data = fPixels->detachPixelsAsData();
+ if (!data) {
+ return nullptr;
+ }
+
+ sk_sp<SkImage> img = SkImage::MakeRasterData(ii, std::move(data), rowBytes);
+ if (!img) {
+ return nullptr;
+ }
+
+ return context->priv().proxyProvider()->createTextureProxy(std::move(img), 1, SkBudgeted::kYes,
+ fit);
+}
diff --git a/gfx/skia/skia/src/gpu/GrSWMaskHelper.h b/gfx/skia/skia/src/gpu/GrSWMaskHelper.h
new file mode 100644
index 0000000000..0a042e1e54
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSWMaskHelper.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSWMaskHelper_DEFINED
+#define GrSWMaskHelper_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterClip.h"
+
+class GrShape;
+class GrRecordingContext;
+class GrTextureProxy;
+
+/**
+ * The GrSWMaskHelper helps generate clip masks using the software rendering
+ * path. It is intended to be used as:
+ *
+ * GrSWMaskHelper helper(context);
+ * helper.init(...);
+ *
+ * draw one or more paths/rects specifying the required boolean ops
+ *
+ * toTextureProxy(); // to get it from the internal bitmap to the GPU
+ *
+ * The result of this process will be the final mask (on the GPU) in the
+ * upper left hand corner of the texture.
+ */
+class GrSWMaskHelper : SkNoncopyable {
+public:
+ GrSWMaskHelper(SkAutoPixmapStorage* pixels = nullptr)
+ : fPixels(pixels ? pixels : &fPixelsStorage) { }
+
+ // set up the internal state in preparation for draws. Since many masks
+ // may be accumulated in the helper during creation, "resultBounds"
+ // allows the caller to specify the region of interest - to limit the
+ // amount of work.
+ bool init(const SkIRect& resultBounds);
+
+ // Draw a single rect into the accumulation bitmap using the specified op
+ void drawRect(const SkRect& rect, const SkMatrix& matrix, SkRegion::Op op, GrAA, uint8_t alpha);
+
+ // Draw a single path into the accumuation bitmap using the specified op
+ void drawShape(const GrShape&, const SkMatrix& matrix, SkRegion::Op op, GrAA, uint8_t alpha);
+
+ sk_sp<GrTextureProxy> toTextureProxy(GrRecordingContext*, SkBackingFit fit);
+
+ // Reset the internal bitmap
+ void clear(uint8_t alpha) {
+ fPixels->erase(SkColorSetARGB(alpha, 0xFF, 0xFF, 0xFF));
+ }
+
+private:
+ SkVector fTranslate;
+ SkAutoPixmapStorage* fPixels;
+ SkAutoPixmapStorage fPixelsStorage;
+ SkDraw fDraw;
+ SkRasterClip fRasterClip;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif // GrSWMaskHelper_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.cpp b/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.cpp
new file mode 100644
index 0000000000..61142896b2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrSamplePatternDictionary.h"
+
+bool GrSamplePatternDictionary::LessThan::operator()(
+ const SkTArray<SkPoint>& a, const SkTArray<SkPoint>& b) const {
+ if (a.count() != b.count()) {
+ return a.count() < b.count();
+ }
+ for (int i = 0; i < a.count(); ++i) {
+ // This doesn't have geometric meaning. We just need to define an ordering for std::map.
+ if (a[i].x() != b[i].x()) {
+ return a[i].x() < b[i].x();
+ }
+ if (a[i].y() != b[i].y()) {
+ return a[i].y() < b[i].y();
+ }
+ }
+ return false; // Both sample patterns are equal, therefore, "a < b" is false.
+}
+
+int GrSamplePatternDictionary::findOrAssignSamplePatternKey(
+ const SkTArray<SkPoint>& sampleLocations) {
+ if (std::numeric_limits<int>::max() == fSampleLocationsArray.count()) {
+ return 0;
+ }
+ const auto& insertResult = fSamplePatternKeyMap.insert(
+ {sampleLocations, fSampleLocationsArray.count()});
+ if (insertResult.second) {
+ // This means the "insert" call did not find the pattern in the key map already, and
+ // therefore an actual insertion took place. (We don't expect to see many unique sample
+ // patterns.)
+ const SkTArray<SkPoint>& sampleLocations = insertResult.first->first;
+ fSampleLocationsArray.push_back(&sampleLocations);
+ }
+ return insertResult.first->second; // Return the new sample pattern key.
+}
diff --git a/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.h b/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.h
new file mode 100644
index 0000000000..b0044782a6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSamplePatternDictionary.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSamplePatternDictionary_DEFINED
+#define GrSamplePatternDictionary_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/private/SkTArray.h"
+#include <map>
+
+/**
+ * A bidirectional dictionary mapping between sample patterns (i.e., a list of sample locations) and
+ * unique keys. Since we expect that most render targets will draw from the same small pool of
+ * sample patterns, we favor sample pattern keys over actual arrays of points.
+ */
+class GrSamplePatternDictionary {
+public:
+ static constexpr int kInvalidSamplePatternKey = -1;
+
+ int findOrAssignSamplePatternKey(const SkTArray<SkPoint>& sampleLocations);
+
+ const SkTArray<SkPoint>& retrieveSampleLocations(int samplePatternKey) const {
+ return *fSampleLocationsArray[samplePatternKey];
+ }
+
+private:
+ struct LessThan {
+ bool operator()(const SkTArray<SkPoint>&, const SkTArray<SkPoint>&) const;
+ };
+
+ std::map<SkTArray<SkPoint>, int, LessThan> fSamplePatternKeyMap;
+ SkTArray<const SkTArray<SkPoint>*> fSampleLocationsArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSamplerState.h b/gfx/skia/skia/src/gpu/GrSamplerState.h
new file mode 100644
index 0000000000..34915c7bbd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSamplerState.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSamplerState_DEFINED
+#define GrSamplerState_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Represents the filtering and tile modes used to access a texture.
+ */
+class GrSamplerState {
+public:
+ enum class Filter : uint8_t { kNearest, kBilerp, kMipMap };
+ enum class WrapMode : uint8_t { kClamp, kRepeat, kMirrorRepeat, kClampToBorder };
+
+ static constexpr GrSamplerState ClampNearest() { return GrSamplerState(); }
+ static constexpr GrSamplerState ClampBilerp() {
+ return GrSamplerState(WrapMode::kClamp, Filter::kBilerp);
+ }
+
+ constexpr GrSamplerState() : GrSamplerState(WrapMode::kClamp, Filter::kNearest) {}
+
+ constexpr GrSamplerState(WrapMode wrapXAndY, Filter filter)
+ : fWrapModes{wrapXAndY, wrapXAndY}, fFilter(filter) {}
+
+ constexpr GrSamplerState(const WrapMode wrapModes[2], Filter filter)
+ : fWrapModes{wrapModes[0], wrapModes[1]}, fFilter(filter) {}
+
+ constexpr GrSamplerState(const GrSamplerState&) = default;
+
+ GrSamplerState& operator=(const GrSamplerState& that) {
+ fWrapModes[0] = that.fWrapModes[0];
+ fWrapModes[1] = that.fWrapModes[1];
+ fFilter = that.fFilter;
+ return *this;
+ }
+
+ Filter filter() const { return fFilter; }
+
+ void setFilterMode(Filter filterMode) { fFilter = filterMode; }
+
+ void setWrapModeX(const WrapMode wrap) { fWrapModes[0] = wrap; }
+ void setWrapModeY(const WrapMode wrap) { fWrapModes[1] = wrap; }
+
+ WrapMode wrapModeX() const { return fWrapModes[0]; }
+ WrapMode wrapModeY() const { return fWrapModes[1]; }
+
+ bool isRepeated() const {
+ return (WrapMode::kClamp != fWrapModes[0] && WrapMode::kClampToBorder != fWrapModes[0]) ||
+ (WrapMode::kClamp != fWrapModes[1] && WrapMode::kClampToBorder != fWrapModes[1]);
+ }
+
+ bool operator==(const GrSamplerState& that) const {
+ return fWrapModes[0] == that.fWrapModes[0] && fWrapModes[1] == that.fWrapModes[1] &&
+ fFilter == that.fFilter;
+ }
+
+ bool operator!=(const GrSamplerState& that) const { return !(*this == that); }
+
+ static uint8_t GenerateKey(const GrSamplerState& samplerState) {
+ const int kTileModeXShift = 2;
+ const int kTileModeYShift = 4;
+
+ SkASSERT(static_cast<int>(samplerState.filter()) <= 3);
+ uint8_t key = static_cast<uint8_t>(samplerState.filter());
+
+ SkASSERT(static_cast<int>(samplerState.wrapModeX()) <= 3);
+ key |= (static_cast<uint8_t>(samplerState.wrapModeX()) << kTileModeXShift);
+
+ SkASSERT(static_cast<int>(samplerState.wrapModeY()) <= 3);
+ key |= (static_cast<uint8_t>(samplerState.wrapModeY()) << kTileModeYShift);
+
+ return key;
+ }
+
+private:
+ WrapMode fWrapModes[2];
+ Filter fFilter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrScissorState.h b/gfx/skia/skia/src/gpu/GrScissorState.h
new file mode 100644
index 0000000000..79ba47f1e6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrScissorState.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrScissorState_DEFINED
+#define GrScissorState_DEFINED
+
+#include "include/core/SkRect.h"
+
+class GrScissorState {
+public:
+ GrScissorState() : fEnabled(false) {}
+ GrScissorState(const SkIRect& rect) : fEnabled(true), fRect(rect) {}
+ void setDisabled() { fEnabled = false; }
+ void set(const SkIRect& rect) { fRect = rect; fEnabled = true; }
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& rect) {
+ if (!fEnabled) {
+ this->set(rect);
+ return true;
+ }
+ return fRect.intersect(rect);
+ }
+ bool operator==(const GrScissorState& other) const {
+ return fEnabled == other.fEnabled &&
+ (false == fEnabled || fRect == other.fRect);
+ }
+ bool operator!=(const GrScissorState& other) const { return !(*this == other); }
+
+ bool enabled() const { return fEnabled; }
+ const SkIRect& rect() const { return fRect; }
+
+private:
+ bool fEnabled;
+ SkIRect fRect;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSemaphore.h b/gfx/skia/skia/src/gpu/GrSemaphore.h
new file mode 100644
index 0000000000..858cf63cbc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSemaphore.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSemaphore_DEFINED
+#define GrSemaphore_DEFINED
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrGpuResource.h"
+
+/**
+ * Represents a semaphore-like GPU synchronization object. This is a slightly odd fit for
+ * GrGpuResource because we don't care about budgeting, recycling, or read/write references for
+ * these. However, making it a GrGpuResource makes it simpler to handle releasing/abandoning these
+ * along with other resources. If more cases like this arise we could consider moving some of the
+ * unused functionality off of GrGpuResource.
+ */
+class GrSemaphore : public GrGpuResource {
+public:
+ // The derived class can return its GrBackendSemaphore. This is used when flushing with signal
+ // semaphores so we can set the client's GrBackendSemaphore object after we've created the
+ // internal semaphore.
+ virtual GrBackendSemaphore backendSemaphore() const = 0;
+
+ const char* getResourceType() const override { return "semaphore"; }
+
+protected:
+ explicit GrSemaphore(GrGpu* gpu) : INHERITED(gpu) {}
+
+private:
+ size_t onGpuMemorySize() const override { return 0; }
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrShaderCaps.cpp b/gfx/skia/skia/src/gpu/GrShaderCaps.cpp
new file mode 100644
index 0000000000..b2be03bd01
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderCaps.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrShaderCaps.h"
+
+#include "include/gpu/GrContextOptions.h"
+#include "src/utils/SkJSONWriter.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+GrShaderCaps::GrShaderCaps(const GrContextOptions& options) {
+ fGLSLGeneration = k330_GrGLSLGeneration;
+ fShaderDerivativeSupport = false;
+ fGeometryShaderSupport = false;
+ fGSInvocationsSupport = false;
+ fPathRenderingSupport = false;
+ fDstReadInShaderSupport = false;
+ fDualSourceBlendingSupport = false;
+ fIntegerSupport = false;
+ fFBFetchSupport = false;
+ fFBFetchNeedsCustomOutput = false;
+ fUsesPrecisionModifiers = false;
+ fCanUseAnyFunctionInShader = true;
+ fCanUseMinAndAbsTogether = true;
+ fCanUseFractForNegativeValues = true;
+ fMustForceNegatedAtanParamToFloat = false;
+ fAtan2ImplementedAsAtanYOverX = false;
+ fMustDoOpBetweenFloorAndAbs = false;
+ fRequiresLocalOutputColorForFBFetch = false;
+ fMustObfuscateUniformColor = false;
+ fMustGuardDivisionEvenAfterExplicitZeroCheck = false;
+ fCanUseFragCoord = true;
+ fIncompleteShortIntPrecision = false;
+ fAddAndTrueToLoopCondition = false;
+ fUnfoldShortCircuitAsTernary = false;
+ fEmulateAbsIntFunction = false;
+ fRewriteDoWhileLoops = false;
+ fRemovePowWithConstantExponent = false;
+ fMustWriteToFragColor = false;
+ fNoDefaultPrecisionForExternalSamplers = false;
+ fFlatInterpolationSupport = false;
+ fPreferFlatInterpolation = false;
+ fNoPerspectiveInterpolationSupport = false;
+ fSampleVariablesSupport = false;
+ fSampleVariablesStencilSupport = false;
+ fExternalTextureSupport = false;
+ fVertexIDSupport = false;
+ fFPManipulationSupport = false;
+ fFloatIs32Bits = true;
+ fHalfIs32Bits = false;
+ fHasLowFragmentPrecision = false;
+ // Backed API support is required to be able to make swizzle-neutral shaders (e.g.
+ // GL_ARB_texture_swizzle).
+ fTextureSwizzleAppliedInShader = true;
+ fBuiltinFMASupport = false;
+
+ fVersionDeclString = nullptr;
+ fShaderDerivativeExtensionString = nullptr;
+ fGeometryShaderExtensionString = nullptr;
+ fGSInvocationsExtensionString = nullptr;
+ fFragCoordConventionsExtensionString = nullptr;
+ fSecondaryOutputExtensionString = nullptr;
+ fExternalTextureExtensionString = nullptr;
+ fSecondExternalTextureExtensionString = nullptr;
+ fNoPerspectiveInterpolationExtensionString = nullptr;
+ fSampleVariablesExtensionString = nullptr;
+ fFBFetchColorName = nullptr;
+ fFBFetchExtensionString = nullptr;
+ fMaxFragmentSamplers = 0;
+ fAdvBlendEqInteraction = kNotSupported_AdvBlendEqInteraction;
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+void GrShaderCaps::dumpJSON(SkJSONWriter* writer) const {
+ writer->beginObject();
+
+ writer->appendBool("Shader Derivative Support", fShaderDerivativeSupport);
+ writer->appendBool("Geometry Shader Support", fGeometryShaderSupport);
+ writer->appendBool("Geometry Shader Invocations Support", fGSInvocationsSupport);
+ writer->appendBool("Path Rendering Support", fPathRenderingSupport);
+ writer->appendBool("Dst Read In Shader Support", fDstReadInShaderSupport);
+ writer->appendBool("Dual Source Blending Support", fDualSourceBlendingSupport);
+ writer->appendBool("Integer Support", fIntegerSupport);
+
+ static const char* kAdvBlendEqInteractionStr[] = {
+ "Not Supported",
+ "Automatic",
+ "General Enable",
+ "Specific Enables",
+ };
+ GR_STATIC_ASSERT(0 == kNotSupported_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(1 == kAutomatic_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(2 == kGeneralEnable_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(3 == kSpecificEnables_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAdvBlendEqInteractionStr) == kLast_AdvBlendEqInteraction + 1);
+
+ writer->appendBool("FB Fetch Support", fFBFetchSupport);
+ writer->appendBool("Uses precision modifiers", fUsesPrecisionModifiers);
+ writer->appendBool("Can use any() function", fCanUseAnyFunctionInShader);
+ writer->appendBool("Can use min() and abs() together", fCanUseMinAndAbsTogether);
+ writer->appendBool("Can use fract() for negative values", fCanUseFractForNegativeValues);
+ writer->appendBool("Must force negated atan param to float", fMustForceNegatedAtanParamToFloat);
+ writer->appendBool("Must do op between floor and abs", fMustDoOpBetweenFloorAndAbs);
+ writer->appendBool("Must use local out color for FBFetch", fRequiresLocalOutputColorForFBFetch);
+ writer->appendBool("Must obfuscate uniform color", fMustObfuscateUniformColor);
+ writer->appendBool("Must guard division even after explicit zero check",
+ fMustGuardDivisionEvenAfterExplicitZeroCheck);
+ writer->appendBool("Can use gl_FragCoord", fCanUseFragCoord);
+ writer->appendBool("Incomplete short int precision", fIncompleteShortIntPrecision);
+ writer->appendBool("Add and true to loops workaround", fAddAndTrueToLoopCondition);
+ writer->appendBool("Unfold short circuit as ternary", fUnfoldShortCircuitAsTernary);
+ writer->appendBool("Emulate abs(int) function", fEmulateAbsIntFunction);
+ writer->appendBool("Rewrite do while loops", fRewriteDoWhileLoops);
+ writer->appendBool("Rewrite pow with constant exponent", fRemovePowWithConstantExponent);
+ writer->appendBool("Must write to sk_FragColor [workaround]", fMustWriteToFragColor);
+ writer->appendBool("Don't add default precision statement for samplerExternalOES",
+ fNoDefaultPrecisionForExternalSamplers);
+ writer->appendBool("Flat interpolation support", fFlatInterpolationSupport);
+ writer->appendBool("Prefer flat interpolation", fPreferFlatInterpolation);
+ writer->appendBool("No perspective interpolation support", fNoPerspectiveInterpolationSupport);
+ writer->appendBool("Sample variables support", fSampleVariablesSupport);
+ writer->appendBool("Sample variables stencil support [workaround]",
+ fSampleVariablesStencilSupport);
+ writer->appendBool("External texture support", fExternalTextureSupport);
+ writer->appendBool("sk_VertexID support", fVertexIDSupport);
+ writer->appendBool("Floating point manipulation support", fFPManipulationSupport);
+ writer->appendBool("float == fp32", fFloatIs32Bits);
+ writer->appendBool("half == fp32", fHalfIs32Bits);
+ writer->appendBool("Has poor fragment precision", fHasLowFragmentPrecision);
+ writer->appendBool("Texture swizzle applied in shader", fTextureSwizzleAppliedInShader);
+ writer->appendBool("Builtin fma() support", fBuiltinFMASupport);
+
+ writer->appendS32("Max FS Samplers", fMaxFragmentSamplers);
+ writer->appendString("Advanced blend equation interaction",
+ kAdvBlendEqInteractionStr[fAdvBlendEqInteraction]);
+
+ writer->endObject();
+}
+#else
+void GrShaderCaps::dumpJSON(SkJSONWriter* writer) const { }
+#endif
+
+void GrShaderCaps::applyOptionsOverrides(const GrContextOptions& options) {
+ if (options.fDisableDriverCorrectnessWorkarounds) {
+ SkASSERT(fCanUseAnyFunctionInShader);
+ SkASSERT(fCanUseMinAndAbsTogether);
+ SkASSERT(fCanUseFractForNegativeValues);
+ SkASSERT(!fMustForceNegatedAtanParamToFloat);
+ SkASSERT(!fAtan2ImplementedAsAtanYOverX);
+ SkASSERT(!fMustDoOpBetweenFloorAndAbs);
+ SkASSERT(!fRequiresLocalOutputColorForFBFetch);
+ SkASSERT(!fMustObfuscateUniformColor);
+ SkASSERT(!fMustGuardDivisionEvenAfterExplicitZeroCheck);
+ SkASSERT(fCanUseFragCoord);
+ SkASSERT(!fIncompleteShortIntPrecision);
+ SkASSERT(!fAddAndTrueToLoopCondition);
+ SkASSERT(!fUnfoldShortCircuitAsTernary);
+ SkASSERT(!fEmulateAbsIntFunction);
+ SkASSERT(!fRewriteDoWhileLoops);
+ SkASSERT(!fRemovePowWithConstantExponent);
+ SkASSERT(!fMustWriteToFragColor);
+ SkASSERT(!fNoDefaultPrecisionForExternalSamplers);
+ }
+#if GR_TEST_UTILS
+ fDualSourceBlendingSupport = fDualSourceBlendingSupport && !options.fSuppressDualSourceBlending;
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/GrShaderCaps.h b/gfx/skia/skia/src/gpu/GrShaderCaps.h
new file mode 100644
index 0000000000..fb68ec354e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderCaps.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShaderCaps_DEFINED
+#define GrShaderCaps_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/glsl/GrGLSL.h"
+
+namespace SkSL {
+class ShaderCapsFactory;
+}
+
+struct GrContextOptions;
+class SkJSONWriter;
+
+class GrShaderCaps : public SkRefCnt {
+public:
+ /**
+ * Indicates how GLSL must interact with advanced blend equations. The KHR extension requires
+ * special layout qualifiers in the fragment shader.
+ */
+ enum AdvBlendEqInteraction {
+ kNotSupported_AdvBlendEqInteraction, //<! No _blend_equation_advanced extension
+ kAutomatic_AdvBlendEqInteraction, //<! No interaction required
+ kGeneralEnable_AdvBlendEqInteraction, //<! layout(blend_support_all_equations) out
+ kSpecificEnables_AdvBlendEqInteraction, //<! Specific layout qualifiers per equation
+
+ kLast_AdvBlendEqInteraction = kSpecificEnables_AdvBlendEqInteraction
+ };
+
+ GrShaderCaps(const GrContextOptions&);
+
+ void dumpJSON(SkJSONWriter*) const;
+
+ bool supportsDistanceFieldText() const { return fShaderDerivativeSupport; }
+
+ bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
+ bool geometryShaderSupport() const { return fGeometryShaderSupport; }
+ bool gsInvocationsSupport() const { return fGSInvocationsSupport; }
+ bool pathRenderingSupport() const { return fPathRenderingSupport; }
+ bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
+ bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
+ bool integerSupport() const { return fIntegerSupport; }
+
+ /**
+ * Some helper functions for encapsulating various extensions to read FB Buffer on openglES
+ *
+ * TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
+ */
+ bool fbFetchSupport() const { return fFBFetchSupport; }
+
+ bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
+
+ const char* versionDeclString() const { return fVersionDeclString; }
+
+ const char* fbFetchColorName() const { return fFBFetchColorName; }
+
+ const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
+
+ bool flatInterpolationSupport() const { return fFlatInterpolationSupport; }
+
+ bool preferFlatInterpolation() const { return fPreferFlatInterpolation; }
+
+ bool noperspectiveInterpolationSupport() const { return fNoPerspectiveInterpolationSupport; }
+
+ // Can we use sample variables everywhere?
+ bool sampleVariablesSupport() const { return fSampleVariablesSupport; }
+
+ // Can we use sample variables when rendering to stencil? (This is a workaround for platforms
+ // where sample variables are broken in general, but seem to work when rendering to stencil.)
+ bool sampleVariablesStencilSupport() const { return fSampleVariablesStencilSupport; }
+
+ bool externalTextureSupport() const { return fExternalTextureSupport; }
+
+ bool vertexIDSupport() const { return fVertexIDSupport; }
+
+ // frexp, ldexp, etc.
+ bool fpManipulationSupport() const { return fFPManipulationSupport; }
+
+ bool floatIs32Bits() const { return fFloatIs32Bits; }
+
+ bool halfIs32Bits() const { return fHalfIs32Bits; }
+
+ bool hasLowFragmentPrecision() const { return fHasLowFragmentPrecision; }
+
+ // SkSL only.
+ bool builtinFMASupport() const { return fBuiltinFMASupport; }
+
+ AdvBlendEqInteraction advBlendEqInteraction() const { return fAdvBlendEqInteraction; }
+
+ bool mustEnableAdvBlendEqs() const {
+ return fAdvBlendEqInteraction >= kGeneralEnable_AdvBlendEqInteraction;
+ }
+
+ bool mustEnableSpecificAdvBlendEqs() const {
+ return fAdvBlendEqInteraction == kSpecificEnables_AdvBlendEqInteraction;
+ }
+
+ bool mustDeclareFragmentShaderOutput() const { return fGLSLGeneration > k110_GrGLSLGeneration; }
+
+ bool usesPrecisionModifiers() const { return fUsesPrecisionModifiers; }
+
+ // Returns whether we can use the glsl function any() in our shader code.
+ bool canUseAnyFunctionInShader() const { return fCanUseAnyFunctionInShader; }
+
+ bool canUseMinAndAbsTogether() const { return fCanUseMinAndAbsTogether; }
+
+ bool canUseFractForNegativeValues() const { return fCanUseFractForNegativeValues; }
+
+ bool mustForceNegatedAtanParamToFloat() const { return fMustForceNegatedAtanParamToFloat; }
+
+ // Returns whether a device incorrectly implements atan(y,x) as atan(y/x)
+ bool atan2ImplementedAsAtanYOverX() const { return fAtan2ImplementedAsAtanYOverX; }
+
+ // If this returns true some operation (could be a no op) must be called between floor and abs
+ // to make sure the driver compiler doesn't inline them together which can cause a driver bug in
+ // the shader.
+ bool mustDoOpBetweenFloorAndAbs() const { return fMustDoOpBetweenFloorAndAbs; }
+
+ // If false, SkSL uses a workaround so that sk_FragCoord doesn't actually query gl_FragCoord
+ bool canUseFragCoord() const { return fCanUseFragCoord; }
+
+ // If true, short ints can't represent every integer in the 16-bit two's complement range as
+ // required by the spec. SKSL will always emit full ints.
+ bool incompleteShortIntPrecision() const { return fIncompleteShortIntPrecision; }
+
+ // If true, then conditions in for loops need "&& true" to work around driver bugs.
+ bool addAndTrueToLoopCondition() const { return fAddAndTrueToLoopCondition; }
+
+ // If true, then expressions such as "x && y" or "x || y" are rewritten as
+ // ternary to work around driver bugs.
+ bool unfoldShortCircuitAsTernary() const { return fUnfoldShortCircuitAsTernary; }
+
+ bool emulateAbsIntFunction() const { return fEmulateAbsIntFunction; }
+
+ bool rewriteDoWhileLoops() const { return fRewriteDoWhileLoops; }
+
+ bool removePowWithConstantExponent() const { return fRemovePowWithConstantExponent; }
+
+ bool requiresLocalOutputColorForFBFetch() const { return fRequiresLocalOutputColorForFBFetch; }
+
+ bool mustObfuscateUniformColor() const { return fMustObfuscateUniformColor; }
+
+ // The D3D shader compiler, when targeting PS 3.0 (ie within ANGLE) fails to compile certain
+ // constructs. See detailed comments in GrGLCaps.cpp.
+ bool mustGuardDivisionEvenAfterExplicitZeroCheck() const {
+ return fMustGuardDivisionEvenAfterExplicitZeroCheck;
+ }
+
+ // On Nexus 6, the GL context can get lost if a shader does not write a value to gl_FragColor.
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=445377
+ bool mustWriteToFragColor() const { return fMustWriteToFragColor; }
+
+ // The Android emulator claims samplerExternalOES is an unknown type if a default precision
+ // statement is made for the type.
+ bool noDefaultPrecisionForExternalSamplers() const {
+ return fNoDefaultPrecisionForExternalSamplers;
+ }
+
+ // Returns the string of an extension that must be enabled in the shader to support
+ // derivatives. If nullptr is returned then no extension needs to be enabled. Before calling
+ // this function, the caller should check that shaderDerivativeSupport exists.
+ const char* shaderDerivativeExtensionString() const {
+ SkASSERT(this->shaderDerivativeSupport());
+ return fShaderDerivativeExtensionString;
+ }
+
+ // Returns the string of an extension that must be enabled in the shader to support geometry
+ // shaders. If nullptr is returned then no extension needs to be enabled. Before calling this
+ // function, the caller must verify that geometryShaderSupport exists.
+ const char* geometryShaderExtensionString() const {
+ SkASSERT(this->geometryShaderSupport());
+ return fGeometryShaderExtensionString;
+ }
+
+ // Returns the string of an extension that must be enabled in the shader to support
+ // geometry shader invocations. If nullptr is returned then no extension needs to be enabled.
+ // Before calling this function, the caller must verify that gsInvocationsSupport exists.
+ const char* gsInvocationsExtensionString() const {
+ SkASSERT(this->gsInvocationsSupport());
+ return fGSInvocationsExtensionString;
+ }
+
+ // Returns the string of an extension that will do all necessary coord transfomations needed
+ // when reading the fragment position. If such an extension does not exisits, this function
+ // returns a nullptr, and all transforms of the frag position must be done manually in the
+ // shader.
+ const char* fragCoordConventionsExtensionString() const {
+ return fFragCoordConventionsExtensionString;
+ }
+
+ // This returns the name of an extension that must be enabled in the shader, if such a thing is
+ // required in order to use a secondary output in the shader. This returns a nullptr if no such
+ // extension is required. However, the return value of this function does not say whether dual
+ // source blending is supported.
+ const char* secondaryOutputExtensionString() const { return fSecondaryOutputExtensionString; }
+
+ // This returns the name of an extension that must be enabled in the shader to support external
+ // textures. In some cases, two extensions must be enabled - the second extension is returned
+ // by secondExternalTextureExtensionString(). If that function returns nullptr, then only one
+ // extension is required.
+ const char* externalTextureExtensionString() const {
+ SkASSERT(this->externalTextureSupport());
+ return fExternalTextureExtensionString;
+ }
+
+ const char* secondExternalTextureExtensionString() const {
+ SkASSERT(this->externalTextureSupport());
+ return fSecondExternalTextureExtensionString;
+ }
+
+ const char* noperspectiveInterpolationExtensionString() const {
+ SkASSERT(this->noperspectiveInterpolationSupport());
+ return fNoPerspectiveInterpolationExtensionString;
+ }
+
+ const char* sampleVariablesExtensionString() const {
+ SkASSERT(this->sampleVariablesSupport() || this->sampleVariablesStencilSupport());
+ return fSampleVariablesExtensionString;
+ }
+
+ int maxFragmentSamplers() const { return fMaxFragmentSamplers; }
+
+ bool textureSwizzleAppliedInShader() const { return fTextureSwizzleAppliedInShader; }
+
+ GrGLSLGeneration generation() const { return fGLSLGeneration; }
+
+private:
+ void applyOptionsOverrides(const GrContextOptions& options);
+
+ GrGLSLGeneration fGLSLGeneration;
+
+ bool fShaderDerivativeSupport : 1;
+ bool fGeometryShaderSupport : 1;
+ bool fGSInvocationsSupport : 1;
+ bool fPathRenderingSupport : 1;
+ bool fDstReadInShaderSupport : 1;
+ bool fDualSourceBlendingSupport : 1;
+ bool fIntegerSupport : 1;
+ bool fFBFetchSupport : 1;
+ bool fFBFetchNeedsCustomOutput : 1;
+ bool fUsesPrecisionModifiers : 1;
+ bool fFlatInterpolationSupport : 1;
+ bool fPreferFlatInterpolation : 1;
+ bool fNoPerspectiveInterpolationSupport : 1;
+ bool fSampleVariablesSupport : 1;
+ bool fSampleVariablesStencilSupport : 1;
+ bool fExternalTextureSupport : 1;
+ bool fVertexIDSupport : 1;
+ bool fFPManipulationSupport : 1;
+ bool fFloatIs32Bits : 1;
+ bool fHalfIs32Bits : 1;
+ bool fHasLowFragmentPrecision : 1;
+ bool fTextureSwizzleAppliedInShader : 1;
+
+ // Used by SkSL to know when to generate polyfills.
+ bool fBuiltinFMASupport : 1;
+
+ // Used for specific driver bug work arounds
+ bool fCanUseAnyFunctionInShader : 1;
+ bool fCanUseMinAndAbsTogether : 1;
+ bool fCanUseFractForNegativeValues : 1;
+ bool fMustForceNegatedAtanParamToFloat : 1;
+ bool fAtan2ImplementedAsAtanYOverX : 1;
+ bool fMustDoOpBetweenFloorAndAbs : 1;
+ bool fRequiresLocalOutputColorForFBFetch : 1;
+ bool fMustObfuscateUniformColor : 1;
+ bool fMustGuardDivisionEvenAfterExplicitZeroCheck : 1;
+ bool fCanUseFragCoord : 1;
+ bool fIncompleteShortIntPrecision : 1;
+ bool fAddAndTrueToLoopCondition : 1;
+ bool fUnfoldShortCircuitAsTernary : 1;
+ bool fEmulateAbsIntFunction : 1;
+ bool fRewriteDoWhileLoops : 1;
+ bool fRemovePowWithConstantExponent : 1;
+ bool fMustWriteToFragColor : 1;
+ bool fNoDefaultPrecisionForExternalSamplers : 1;
+
+ const char* fVersionDeclString;
+
+ const char* fShaderDerivativeExtensionString;
+ const char* fGeometryShaderExtensionString;
+ const char* fGSInvocationsExtensionString;
+ const char* fFragCoordConventionsExtensionString;
+ const char* fSecondaryOutputExtensionString;
+ const char* fExternalTextureExtensionString;
+ const char* fSecondExternalTextureExtensionString;
+ const char* fNoPerspectiveInterpolationExtensionString;
+ const char* fSampleVariablesExtensionString;
+
+ const char* fFBFetchColorName;
+ const char* fFBFetchExtensionString;
+
+ int fMaxFragmentSamplers;
+
+ AdvBlendEqInteraction fAdvBlendEqInteraction;
+
+ friend class GrCaps; // For initialization.
+ friend class GrDawnCaps;
+ friend class GrGLCaps;
+ friend class GrMockCaps;
+ friend class GrMtlCaps;
+ friend class GrVkCaps;
+ friend class SkSL::ShaderCapsFactory;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrShaderUtils.cpp b/gfx/skia/skia/src/gpu/GrShaderUtils.cpp
new file mode 100644
index 0000000000..335922b403
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderUtils.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/sksl/SkSLString.h"
+
+namespace GrShaderUtils {
+
+class GLSLPrettyPrint {
+public:
+ GLSLPrettyPrint() {}
+
+ SkSL::String prettify(const SkSL::String& string) {
+ fTabs = 0;
+ fFreshline = true;
+
+ // If a string breaks while in the middle 'parse until' we need to continue parsing on the
+ // next string
+ fInParseUntilNewline = false;
+ fInParseUntil = false;
+
+ int parensDepth = 0;
+
+ // setup pretty state
+ fIndex = 0;
+ fLength = string.length();
+ fInput = string.c_str();
+
+ while (fLength > fIndex) {
+ /* the heart and soul of our prettification algorithm. The rules should hopefully
+ * be self explanatory. For '#' and '//' tokens we parse until we reach a newline.
+ *
+ * For long style comments like this one, we search for the ending token. We also
+ * preserve whitespace in these comments WITH THE CAVEAT that we do the newlines
+ * ourselves. This allows us to remain in control of line numbers, and matching
+ * tabs Existing tabs in the input string are copied over too, but this will look
+ * funny
+ *
+ * '{' and '}' are handled in basically the same way. We add a newline if we aren't
+ * on a fresh line, dirty the line, then add a second newline, ie braces are always
+ * on their own lines indented properly. The one funkiness here is structs print
+ * with the semicolon on its own line. Its not a problem for a glsl compiler though
+ *
+ * '(' and ')' are basically ignored, except as a sign we need to ignore ';' ala
+ * in for loops.
+ *
+ * ';' means add a new line
+ *
+ * '\t' and '\n' are ignored in general parsing for backwards compatability with
+ * existing shader code and we also have a special case for handling whitespace
+ * at the beginning of fresh lines.
+ *
+ * Otherwise just add the new character to the pretty string, indenting if
+ * necessary.
+ */
+ if (fInParseUntilNewline) {
+ this->parseUntilNewline();
+ } else if (fInParseUntil) {
+ this->parseUntil(fInParseUntilToken);
+ } else if (this->hasToken("#") || this->hasToken("//")) {
+ this->parseUntilNewline();
+ } else if (this->hasToken("/*")) {
+ this->parseUntil("*/");
+ } else if ('{' == fInput[fIndex]) {
+ this->newline();
+ this->appendChar('{');
+ fTabs++;
+ this->newline();
+ } else if ('}' == fInput[fIndex]) {
+ fTabs--;
+ this->newline();
+ this->appendChar('}');
+ this->newline();
+ } else if (this->hasToken(")")) {
+ parensDepth--;
+ } else if (this->hasToken("(")) {
+ parensDepth++;
+ } else if (!parensDepth && this->hasToken(";")) {
+ this->newline();
+ } else if ('\t' == fInput[fIndex] || '\n' == fInput[fIndex] ||
+ (fFreshline && ' ' == fInput[fIndex])) {
+ fIndex++;
+ } else {
+ this->appendChar(fInput[fIndex]);
+ }
+ }
+
+ return fPretty;
+ }
+
+private:
+ void appendChar(char c) {
+ this->tabString();
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fFreshline = false;
+ }
+
+ // hasToken automatically consumes the next token, if it is a match, and then tabs
+ // if necessary, before inserting the token into the pretty string
+ bool hasToken(const char* token) {
+ size_t i = fIndex;
+ for (size_t j = 0; token[j] && fLength > i; i++, j++) {
+ if (token[j] != fInput[i]) {
+ return false;
+ }
+ }
+ this->tabString();
+ fIndex = i;
+ fPretty.append(token);
+ fFreshline = false;
+ return true;
+ }
+
+ void parseUntilNewline() {
+ while (fLength > fIndex) {
+ if ('\n' == fInput[fIndex]) {
+ fIndex++;
+ this->newline();
+ fInParseUntilNewline = false;
+ break;
+ }
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fInParseUntilNewline = true;
+ }
+ }
+
+ // this code assumes it is not actually searching for a newline. If you need to search for a
+ // newline, then use the function above. If you do search for a newline with this function
+ // it will consume the entire string and the output will certainly not be prettified
+ void parseUntil(const char* token) {
+ while (fLength > fIndex) {
+ // For embedded newlines, this code will make sure to embed the newline in the
+ // pretty string, increase the linecount, and tab out the next line to the appropriate
+ // place
+ if ('\n' == fInput[fIndex]) {
+ this->newline();
+ this->tabString();
+ fIndex++;
+ }
+ if (this->hasToken(token)) {
+ fInParseUntil = false;
+ break;
+ }
+ fFreshline = false;
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fInParseUntil = true;
+ fInParseUntilToken = token;
+ }
+ }
+
+ // We only tab if on a newline, otherwise consider the line tabbed
+ void tabString() {
+ if (fFreshline) {
+ for (int t = 0; t < fTabs; t++) {
+ fPretty.append("\t");
+ }
+ }
+ }
+
+ // newline is really a request to add a newline, if we are on a fresh line there is no reason
+ // to add another newline
+ void newline() {
+ if (!fFreshline) {
+ fFreshline = true;
+ fPretty.append("\n");
+ }
+ }
+
+ bool fFreshline;
+ int fTabs;
+ size_t fIndex, fLength;
+ const char* fInput;
+ SkSL::String fPretty;
+
+ // Some helpers for parseUntil when we go over a string length
+ bool fInParseUntilNewline;
+ bool fInParseUntil;
+ const char* fInParseUntilToken;
+};
+
+SkSL::String PrettyPrint(const SkSL::String& string) {
+ GLSLPrettyPrint pp;
+ return pp.prettify(string);
+}
+
+// Prints shaders one line at the time. This ensures they don't get truncated by the adb log.
+void PrintLineByLine(const char* header, const SkSL::String& text) {
+ if (header) {
+ SkDebugf("%s\n", header);
+ }
+ SkTArray<SkString> lines;
+ SkStrSplit(text.c_str(), "\n", kStrict_SkStrSplitMode, &lines);
+ for (int i = 0; i < lines.count(); ++i) {
+ SkDebugf("%4i\t%s\n", i + 1, lines[i].c_str());
+ }
+}
+
+GrContextOptions::ShaderErrorHandler* DefaultShaderErrorHandler() {
+ class GrDefaultShaderErrorHandler : public GrContextOptions::ShaderErrorHandler {
+ public:
+ void compileError(const char* shader, const char* errors) override {
+ SkDebugf("Shader compilation error\n"
+ "------------------------\n");
+ PrintLineByLine(nullptr, shader);
+ SkDebugf("Errors:\n%s\n", errors);
+ SkDEBUGFAIL("Shader compilation failed!");
+ }
+ };
+
+ static GrDefaultShaderErrorHandler gHandler;
+ return &gHandler;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/gpu/GrShaderUtils.h b/gfx/skia/skia/src/gpu/GrShaderUtils.h
new file mode 100644
index 0000000000..c30120dcc8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderUtils.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShaderUtils_DEFINED
+#define GrShaderUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "src/sksl/SkSLString.h"
+
+namespace GrShaderUtils {
+
+SkSL::String PrettyPrint(const SkSL::String& string);
+void PrintLineByLine(const char* header, const SkSL::String& text);
+GrContextOptions::ShaderErrorHandler* DefaultShaderErrorHandler();
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrShaderVar.cpp b/gfx/skia/skia/src/gpu/GrShaderVar.cpp
new file mode 100644
index 0000000000..6f7ccccd60
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderVar.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrShaderVar.h"
+
+static const char* type_modifier_string(GrShaderVar::TypeModifier t) {
+ switch (t) {
+ case GrShaderVar::kNone_TypeModifier: return "";
+ case GrShaderVar::kIn_TypeModifier: return "in";
+ case GrShaderVar::kInOut_TypeModifier: return "inout";
+ case GrShaderVar::kOut_TypeModifier: return "out";
+ case GrShaderVar::kUniform_TypeModifier: return "uniform";
+ }
+ SK_ABORT("Unknown shader variable type modifier.");
+}
+
+void GrShaderVar::setIOType(GrIOType ioType) {
+ switch (ioType) {
+ case kRW_GrIOType:
+ return;
+ case kRead_GrIOType:
+ this->addModifier("readonly");
+ return;
+ case kWrite_GrIOType:
+ this->addModifier("writeonly");
+ return;
+ }
+ SK_ABORT("Unknown io type.");
+}
+
+void GrShaderVar::appendDecl(const GrShaderCaps* shaderCaps, SkString* out) const {
+ SkString layout = fLayoutQualifier;
+ if (!fLayoutQualifier.isEmpty()) {
+ out->appendf("layout(%s) ", fLayoutQualifier.c_str());
+ }
+ out->append(fExtraModifiers);
+ if (this->getTypeModifier() != kNone_TypeModifier) {
+ out->append(type_modifier_string(this->getTypeModifier()));
+ out->append(" ");
+ }
+ GrSLType effectiveType = this->getType();
+ if (this->isArray()) {
+ if (this->isUnsizedArray()) {
+ out->appendf("%s %s[]", GrGLSLTypeString(effectiveType), this->getName().c_str());
+ } else {
+ SkASSERT(this->getArrayCount() > 0);
+ out->appendf("%s %s[%d]",
+ GrGLSLTypeString(effectiveType),
+ this->getName().c_str(),
+ this->getArrayCount());
+ }
+ } else {
+ out->appendf("%s %s", GrGLSLTypeString(effectiveType), this->getName().c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrShaderVar.h b/gfx/skia/skia/src/gpu/GrShaderVar.h
new file mode 100644
index 0000000000..791d3594ae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShaderVar.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShaderVar_DEFINED
+#define GrShaderVar_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrShaderCaps;
+
+#define USE_UNIFORM_FLOAT_ARRAYS true
+
+/**
+ * Represents a variable in a shader
+ */
+class GrShaderVar {
+public:
+ enum TypeModifier {
+ kNone_TypeModifier,
+ kOut_TypeModifier,
+ kIn_TypeModifier,
+ kInOut_TypeModifier,
+ kUniform_TypeModifier,
+ };
+
+ /**
+ * Values for array count that have special meaning. We allow 1-sized arrays.git
+ */
+ enum {
+ kNonArray = 0, // not an array
+ kUnsizedArray = -1, // an unsized array (declared with [])
+ };
+
+ /**
+ * Defaults to a non-arry half with no type modifier or layout qualifier.
+ */
+ GrShaderVar()
+ : fType(kHalf_GrSLType)
+ , fTypeModifier(kNone_TypeModifier)
+ , fCount(kNonArray)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS) {
+ }
+
+ GrShaderVar(const SkString& name, GrSLType type, int arrayCount = kNonArray)
+ : fType(type)
+ , fTypeModifier(kNone_TypeModifier)
+ , fCount(arrayCount)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS)
+ , fName(name) {
+ SkASSERT(kVoid_GrSLType != type);
+ fUseUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS;
+ }
+
+ GrShaderVar(const char* name, GrSLType type, int arrayCount = kNonArray)
+ : fType(type)
+ , fTypeModifier(kNone_TypeModifier)
+ , fCount(arrayCount)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS)
+ , fName(name) {
+ SkASSERT(kVoid_GrSLType != type);
+ fUseUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS;
+ }
+
+ GrShaderVar(const char* name, GrSLType type, TypeModifier typeModifier)
+ : fType(type)
+ , fTypeModifier(typeModifier)
+ , fCount(kNonArray)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS)
+ , fName(name) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ GrShaderVar(const char* name, GrSLType type, TypeModifier typeModifier, int arrayCount)
+ : fType(type)
+ , fTypeModifier(typeModifier)
+ , fCount(arrayCount)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS)
+ , fName(name) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ GrShaderVar(const GrShaderVar& that)
+ : fType(that.fType)
+ , fTypeModifier(that.fTypeModifier)
+ , fCount(that.fCount)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS)
+ , fName(that.fName)
+ , fLayoutQualifier(that.fLayoutQualifier)
+ , fExtraModifiers(that.fExtraModifiers) {
+ SkASSERT(kVoid_GrSLType != that.getType());
+ }
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(GrSLType type,
+ const SkString& name,
+ TypeModifier typeModifier = kNone_TypeModifier,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = kNonArray;
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(GrSLType type,
+ const char* name,
+ TypeModifier typeModifier = kNone_TypeModifier,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = kNonArray;
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(GrSLType type,
+ const SkString& name,
+ int count,
+ TypeModifier typeModifier,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = count;
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(GrSLType type,
+ const char* name,
+ int count,
+ TypeModifier typeModifier,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = count;
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Is the var an array.
+ */
+ bool isArray() const { return kNonArray != fCount; }
+ /**
+ * Is this an unsized array, (i.e. declared with []).
+ */
+ bool isUnsizedArray() const { return kUnsizedArray == fCount; }
+ /**
+ * Get the array length of the var.
+ */
+ int getArrayCount() const { return fCount; }
+ /**
+ * Set the array length of the var
+ */
+ void setArrayCount(int count) { fCount = count; }
+ /**
+ * Set to be a non-array.
+ */
+ void setNonArray() { fCount = kNonArray; }
+ /**
+ * Set to be an unsized array.
+ */
+ void setUnsizedArray() { fCount = kUnsizedArray; }
+
+ /**
+ * Access the var name as a writable string
+ */
+ SkString* accessName() { return &fName; }
+ /**
+ * Set the var name
+ */
+ void setName(const SkString& n) { fName = n; }
+ void setName(const char* n) { fName = n; }
+
+ /**
+ * Get the var name.
+ */
+ const SkString& getName() const { return fName; }
+
+ /**
+ * Shortcut for this->getName().c_str();
+ */
+ const char* c_str() const { return this->getName().c_str(); }
+
+ /**
+ * Get the type of the var
+ */
+ GrSLType getType() const { return fType; }
+ /**
+ * Set the type of the var
+ */
+ void setType(GrSLType type) { fType = type; }
+
+ TypeModifier getTypeModifier() const { return fTypeModifier; }
+ void setTypeModifier(TypeModifier type) { fTypeModifier = type; }
+
+ /**
+ * Appends to the layout qualifier
+ */
+ void addLayoutQualifier(const char* layoutQualifier) {
+ if (!layoutQualifier || !strlen(layoutQualifier)) {
+ return;
+ }
+ if (fLayoutQualifier.isEmpty()) {
+ fLayoutQualifier = layoutQualifier;
+ } else {
+ fLayoutQualifier.appendf(", %s", layoutQualifier);
+ }
+ }
+
+ void setIOType(GrIOType);
+
+ void addModifier(const char* modifier) {
+ if (modifier) {
+ fExtraModifiers.appendf("%s ", modifier);
+ }
+ }
+
+ /**
+ * Write a declaration of this variable to out.
+ */
+ void appendDecl(const GrShaderCaps*, SkString* out) const;
+
+ void appendArrayAccess(int index, SkString* out) const {
+ out->appendf("%s[%d]%s",
+ this->getName().c_str(),
+ index,
+ fUseUniformFloatArrays ? "" : ".x");
+ }
+
+ void appendArrayAccess(const char* indexName, SkString* out) const {
+ out->appendf("%s[%s]%s",
+ this->getName().c_str(),
+ indexName,
+ fUseUniformFloatArrays ? "" : ".x");
+ }
+
+private:
+ GrSLType fType;
+ TypeModifier fTypeModifier;
+ int fCount;
+ /// Work around driver bugs on some hardware that don't correctly
+ /// support uniform float []
+ bool fUseUniformFloatArrays;
+
+ SkString fName;
+ SkString fLayoutQualifier;
+ SkString fExtraModifiers;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSkSLFPFactoryCache.h b/gfx/skia/skia/src/gpu/GrSkSLFPFactoryCache.h
new file mode 100644
index 0000000000..48869f896b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSkSLFPFactoryCache.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSkSLFPFactoryCache_DEFINED
+#define GrSkSLFPFactoryCache_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#include <vector>
+
+class GrSkSLFPFactory;
+
+// This is a cache used by GrSkSLFP to retain GrSkSLFPFactory instances, so we don't have to
+// re-process the SkSL source code every time we create a GrSkSLFP instance.
+// For thread safety, it is important that GrSkSLFP only interact with the cache from methods that
+// are only called from within the rendering thread, like onCreateGLSLInstance and
+// onGetGLSLProcessorKey.
+class GrSkSLFPFactoryCache : public SkNVRefCnt<GrSkSLFPFactoryCache> {
+public:
+ // Returns a factory by its numeric index, or null if no such factory exists. Indices are
+ // allocated by GrSkSLFP::NewIndex().
+ sk_sp<GrSkSLFPFactory> get(int index);
+
+ // Stores a new factory with the given index.
+ void set(int index, sk_sp<GrSkSLFPFactory> factory);
+
+ ~GrSkSLFPFactoryCache();
+
+private:
+ std::vector<GrSkSLFPFactory*> fFactories;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp
new file mode 100644
index 0000000000..4245e9ed29
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSemaphore.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTaskGroup.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDeferredProxyUploader.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrSWMaskHelper.h"
+#include "src/gpu/GrSoftwarePathRenderer.h"
+#include "src/gpu/GrSurfaceContextPriv.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+////////////////////////////////////////////////////////////////////////////////
+GrPathRenderer::CanDrawPath
+GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // Pass on any style that applies. The caller will apply the style if a suitable renderer is
+ // not found and try again with the new GrShape.
+ if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
+ (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
+ // This is the fallback renderer for when a path is too complicated for the GPU ones.
+ return CanDrawPath::kAsBackup;
+ }
+ return CanDrawPath::kNo;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
+ SkIRect* devBounds) {
+ SkRect shapeBounds = shape.styledBounds();
+ if (shapeBounds.isEmpty()) {
+ return false;
+ }
+ SkRect shapeDevBounds;
+ matrix.mapRect(&shapeDevBounds, shapeBounds);
+ // Even though these are "unclipped" bounds we still clip to the int32_t range.
+ // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
+ // would round down to this value when cast to a float, but who really cares.
+ // INT32_MIN is exactly representable.
+ static constexpr int32_t kMaxInt = 2147483520;
+ if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
+ return false;
+ }
+ // Make sure that the resulting SkIRect can have representable width and height
+ if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
+ SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
+ return false;
+ }
+ shapeDevBounds.roundOut(devBounds);
+ return true;
+}
+
+// Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
+// is no intersection.
+bool GrSoftwarePathRenderer::GetShapeAndClipBounds(GrRenderTargetContext* renderTargetContext,
+ const GrClip& clip,
+ const GrShape& shape,
+ const SkMatrix& matrix,
+ SkIRect* unclippedDevShapeBounds,
+ SkIRect* clippedDevShapeBounds,
+ SkIRect* devClipBounds) {
+ // compute bounds as intersection of rt size, clip, and path
+ clip.getConservativeBounds(renderTargetContext->width(),
+ renderTargetContext->height(),
+ devClipBounds);
+
+ if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
+ *unclippedDevShapeBounds = SkIRect::EmptyIRect();
+ *clippedDevShapeBounds = SkIRect::EmptyIRect();
+ return false;
+ }
+ if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
+ *clippedDevShapeBounds = SkIRect::EmptyIRect();
+ return false;
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix) {
+ renderTargetContext->priv().stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
+ viewMatrix, rect, &localMatrix);
+}
+
+void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ const SkIRect& devPathBounds) {
+ SkMatrix invert;
+ if (!viewMatrix.invert(&invert)) {
+ return;
+ }
+
+ SkRect rect;
+ if (devClipBounds.fTop < devPathBounds.fTop) {
+ rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devClipBounds.fTop),
+ SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop));
+ DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fLeft < devPathBounds.fLeft) {
+ rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop),
+ SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom));
+ DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fRight > devPathBounds.fRight) {
+ rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop),
+ SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom));
+ DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fBottom > devPathBounds.fBottom) {
+ rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fBottom),
+ SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom));
+ DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+}
+
+void GrSoftwarePathRenderer::DrawToTargetWithShapeMask(
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIPoint& textureOriginInDeviceSpace,
+ const SkIRect& deviceSpaceRectToDraw) {
+ SkMatrix invert;
+ if (!viewMatrix.invert(&invert)) {
+ return;
+ }
+
+ SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
+
+ // We use device coords to compute the texture coordinates. We take the device coords and apply
+ // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
+ // matrix to normalized coords.
+ SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX),
+ SkIntToScalar(-textureOriginInDeviceSpace.fY));
+ maskMatrix.preConcat(viewMatrix);
+ paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(
+ std::move(proxy), srcColorType, maskMatrix, GrSamplerState::Filter::kNearest));
+ DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
+ dstRect, invert);
+}
+
+static sk_sp<GrTextureProxy> make_deferred_mask_texture_proxy(GrRecordingContext* context,
+ SkBackingFit fit,
+ int width, int height) {
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ const GrCaps* caps = context->priv().caps();
+
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
+ GrRenderable::kNo);
+
+ return proxyProvider->createProxy(format, desc, GrRenderable::kNo, 1, kTopLeft_GrSurfaceOrigin,
+ GrMipMapped::kNo, fit, SkBudgeted::kYes, GrProtected::kNo);
+}
+
+namespace {
+
+/**
+ * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
+ * a single path into the mask texture. This stores all of the information needed by the worker
+ * thread's call to drawShape (see below, in onDrawPath).
+ */
+class SoftwarePathData {
+public:
+ SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape,
+ GrAA aa)
+ : fMaskBounds(maskBounds)
+ , fViewMatrix(viewMatrix)
+ , fShape(shape)
+ , fAA(aa) {}
+
+ const SkIRect& getMaskBounds() const { return fMaskBounds; }
+ const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
+ const GrShape& getShape() const { return fShape; }
+ GrAA getAA() const { return fAA; }
+
+private:
+ SkIRect fMaskBounds;
+ SkMatrix fViewMatrix;
+ GrShape fShape;
+ GrAA fAA;
+};
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class PathInvalidator : public SkPathRef::GenIDChangeListener {
+public:
+ PathInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
+ : fMsg(key, contextUniqueID) {}
+
+private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
+ }
+};
+
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// return true on success; false on failure
+bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrSoftwarePathRenderer::onDrawPath");
+ if (!fProxyProvider) {
+ return false;
+ }
+
+ SkASSERT(!args.fShape->style().applies());
+ // We really need to know if the shape will be inverse filled or not
+ // If the path is hairline, ignore inverse fill.
+ bool inverseFilled = args.fShape->inverseFilled() &&
+ !IsStrokeHairlineOrEquivalent(args.fShape->style(),
+ *args.fViewMatrix, nullptr);
+
+ SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
+ // To prevent overloading the cache with entries during animations we limit the cache of masks
+ // to cases where the matrix preserves axis alignment.
+ bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
+ args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
+
+ if (!GetShapeAndClipBounds(args.fRenderTargetContext,
+ *args.fClip, *args.fShape,
+ *args.fViewMatrix, &unclippedDevShapeBounds,
+ &clippedDevShapeBounds,
+ &devClipBounds)) {
+ if (inverseFilled) {
+ DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint),
+ *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
+ devClipBounds, unclippedDevShapeBounds);
+ }
+ return true;
+ }
+
+ const SkIRect* boundsForMask = &clippedDevShapeBounds;
+ if (useCache) {
+ // Use the cache only if >50% of the path is visible.
+ int unclippedWidth = unclippedDevShapeBounds.width();
+ int unclippedHeight = unclippedDevShapeBounds.height();
+ int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
+ int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
+ clippedDevShapeBounds.height());
+ int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize();
+ if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
+ unclippedHeight > maxTextureSize) {
+ useCache = false;
+ } else {
+ boundsForMask = &unclippedDevShapeBounds;
+ }
+ }
+
+ GrUniqueKey maskKey;
+ if (useCache) {
+ // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
+ SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
+ SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
+ SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
+ SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize(),
+ "SW Path Mask");
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Fractional translate does not affect caching on Android. This is done for better cache
+ // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
+ // at all when caching paths.
+ SkFixed fracX = 0;
+ SkFixed fracY = 0;
+#else
+ SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
+ SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
+ // Allow 8 bits each in x and y of subpixel positioning.
+ SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
+ SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
+#endif
+ builder[0] = SkFloat2Bits(sx);
+ builder[1] = SkFloat2Bits(sy);
+ builder[2] = SkFloat2Bits(kx);
+ builder[3] = SkFloat2Bits(ky);
+ // Distinguish between hairline and filled paths. For hairlines, we also need to include
+ // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
+ // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
+ // all cases we might see.
+ uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
+ ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
+ builder[4] = fracX | (fracY >> 8) | (styleBits << 16);
+ args.fShape->writeUnstyledKey(&builder[5]);
+ }
+
+ sk_sp<GrTextureProxy> proxy;
+ if (useCache) {
+ proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey, GrColorType::kAlpha_8,
+ kTopLeft_GrSurfaceOrigin);
+ }
+ if (!proxy) {
+ SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
+ GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
+
+ SkTaskGroup* taskGroup = nullptr;
+ if (auto direct = args.fContext->priv().asDirectContext()) {
+ taskGroup = direct->priv().getTaskGroup();
+ }
+
+ if (taskGroup) {
+ proxy = make_deferred_mask_texture_proxy(args.fContext, fit,
+ boundsForMask->width(),
+ boundsForMask->height());
+ if (!proxy) {
+ return false;
+ }
+
+ auto uploader = skstd::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
+ *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
+ GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
+
+ auto drawAndUploadMask = [uploaderRaw] {
+ TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
+ GrSWMaskHelper helper(uploaderRaw->getPixels());
+ if (helper.init(uploaderRaw->data().getMaskBounds())) {
+ helper.drawShape(uploaderRaw->data().getShape(),
+ *uploaderRaw->data().getViewMatrix(),
+ SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
+ } else {
+ SkDEBUGFAIL("Unable to allocate SW mask.");
+ }
+ uploaderRaw->signalAndFreeData();
+ };
+ taskGroup->add(std::move(drawAndUploadMask));
+ proxy->texPriv().setDeferredUploader(std::move(uploader));
+ } else {
+ GrSWMaskHelper helper;
+ if (!helper.init(*boundsForMask)) {
+ return false;
+ }
+ helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
+ proxy = helper.toTextureProxy(args.fContext, fit);
+ }
+
+ if (!proxy) {
+ return false;
+ }
+ if (useCache) {
+ SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
+ fProxyProvider->assignUniqueKeyToProxy(maskKey, proxy.get());
+ args.fShape->addGenIDChangeListener(
+ sk_make_sp<PathInvalidator>(maskKey, args.fContext->priv().contextID()));
+ }
+ }
+ if (inverseFilled) {
+ DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint),
+ *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds,
+ unclippedDevShapeBounds);
+ }
+ DrawToTargetWithShapeMask(
+ std::move(proxy), GrColorType::kAlpha_8, args.fRenderTargetContext,
+ std::move(args.fPaint), *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
+ SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h
new file mode 100644
index 0000000000..d407791137
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSoftwarePathRenderer_DEFINED
+#define GrSoftwarePathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrProxyProvider;
+class GrTextureProxy;
+
+/**
+ * This class uses the software side to render a path to an SkBitmap and
+ * then uploads the result to the gpu
+ */
+class GrSoftwarePathRenderer : public GrPathRenderer {
+public:
+ GrSoftwarePathRenderer(GrProxyProvider* proxyProvider, bool allowCaching)
+ : fProxyProvider(proxyProvider)
+ , fAllowCaching(allowCaching) {
+ }
+
+ static bool GetShapeAndClipBounds(GrRenderTargetContext*,
+ const GrClip& clip,
+ const GrShape& shape,
+ const SkMatrix& matrix,
+ SkIRect* unclippedDevShapeBounds,
+ SkIRect* clippedDevShapeBounds,
+ SkIRect* devClipBounds);
+
+private:
+ static void DrawNonAARect(GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix);
+ static void DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ const SkIRect& devPathBounds);
+
+ // This utility draws a path mask using a provided paint. The rectangle is drawn in device
+ // space. The 'viewMatrix' will be used to ensure the correct local coords are provided to
+ // any fragment processors in the paint.
+ static void DrawToTargetWithShapeMask(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIPoint& textureOriginInDeviceSpace,
+ const SkIRect& deviceSpaceRectToDraw);
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+private:
+ GrProxyProvider* fProxyProvider;
+ bool fAllowCaching;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp b/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp
new file mode 100644
index 0000000000..ac1c88d2c9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrResourceKey.h"
+#include "src/gpu/GrStencilAttachment.h"
+
+void GrStencilAttachment::ComputeSharedStencilAttachmentKey(int width, int height, int sampleCnt,
+ GrUniqueKey* key) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kDomain, 3);
+ builder[0] = width;
+ builder[1] = height;
+ builder[2] = sampleCnt;
+}
diff --git a/gfx/skia/skia/src/gpu/GrStencilAttachment.h b/gfx/skia/skia/src/gpu/GrStencilAttachment.h
new file mode 100644
index 0000000000..5a56d3022b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilAttachment.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrStencilAttachment_DEFINED
+#define GrStencilAttachment_DEFINED
+
+#include "include/gpu/GrGpuResource.h"
+#include "src/core/SkClipStack.h"
+
+class GrRenderTarget;
+class GrResourceKey;
+
+class GrStencilAttachment : public GrGpuResource {
+public:
+ ~GrStencilAttachment() override {
+ // TODO: allow SB to be purged and detach itself from rts
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int bits() const { return fBits; }
+ int numSamples() const { return fSampleCnt; }
+
+ bool hasPerformedInitialClear() const { return fHasPerformedInitialClear; }
+ void markHasPerformedInitialClear() { fHasPerformedInitialClear = true; }
+
+ // We create a unique stencil buffer at each width, height and sampleCnt and share it for
+ // all render targets that require a stencil with those params.
+ static void ComputeSharedStencilAttachmentKey(int width, int height, int sampleCnt,
+ GrUniqueKey* key);
+
+protected:
+ GrStencilAttachment(GrGpu* gpu, int width, int height, int bits, int sampleCnt)
+ : INHERITED(gpu)
+ , fWidth(width)
+ , fHeight(height)
+ , fBits(bits)
+ , fSampleCnt(sampleCnt) {
+ }
+
+private:
+ const char* getResourceType() const override { return "Stencil"; }
+
+ int fWidth;
+ int fHeight;
+ int fBits;
+ int fSampleCnt;
+ bool fHasPerformedInitialClear = false;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStencilClip.h b/gfx/skia/skia/src/gpu/GrStencilClip.h
new file mode 100644
index 0000000000..d45d392e48
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilClip.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStencilClip_DEFINED
+#define GrStencilClip_DEFINED
+
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrFixedClip.h"
+
+/**
+ * Implements GrHardClip with the currently-existing stencil buffer contents and GrFixedClip.
+ */
+class GrStencilClip final : public GrHardClip {
+public:
+ GrStencilClip(uint32_t stencilStackID = SK_InvalidGenID) : fStencilStackID(stencilStackID) {}
+
+ explicit GrStencilClip(const SkIRect& scissorRect, uint32_t stencilStackID = SK_InvalidGenID)
+ : fFixedClip(scissorRect)
+ , fStencilStackID(stencilStackID) {
+ }
+
+ const GrFixedClip& fixedClip() const { return fFixedClip; }
+ GrFixedClip& fixedClip() { return fFixedClip; }
+
+ bool stencilStackID() const { return fStencilStackID; }
+ bool hasStencilClip() const { return SK_InvalidGenID != fStencilStackID; }
+ void setStencilClip(uint32_t stencilStackID) { fStencilStackID = stencilStackID; }
+
+ bool quickContains(const SkRect& rect) const override {
+ return !this->hasStencilClip() && fFixedClip.quickContains(rect);
+ }
+ void getConservativeBounds(int width, int height, SkIRect* bounds, bool* iior) const override {
+ fFixedClip.getConservativeBounds(width, height, bounds, iior);
+ }
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, GrAA* aa) const override {
+ return !this->hasStencilClip() && fFixedClip.isRRect(rtBounds, rr, aa);
+ }
+ bool apply(int rtWidth, int rtHeight, GrAppliedHardClip* out, SkRect* bounds) const override {
+ if (!fFixedClip.apply(rtWidth, rtHeight, out, bounds)) {
+ return false;
+ }
+ if (this->hasStencilClip()) {
+ out->addStencilClip(fStencilStackID);
+ }
+ return true;
+ }
+
+private:
+ GrFixedClip fFixedClip;
+ uint32_t fStencilStackID;
+
+ typedef GrClip INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStencilSettings.cpp b/gfx/skia/skia/src/gpu/GrStencilSettings.cpp
new file mode 100644
index 0000000000..d03165cad4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilSettings.cpp
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/GrStencilSettings.h"
+
+#include "src/gpu/GrProcessor.h"
+
+constexpr const GrUserStencilSettings gUnused(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+GR_STATIC_ASSERT(kAll_StencilFlags == (gUnused.fFrontFlags[0] & gUnused.fBackFlags[0]));
+
+const GrUserStencilSettings& GrUserStencilSettings::kUnused = gUnused;
+
+void GrStencilSettings::reset(const GrUserStencilSettings& user, bool hasStencilClip,
+ int numStencilBits) {
+ uint16_t frontFlags = user.fFrontFlags[hasStencilClip];
+ if (frontFlags & kSingleSided_StencilFlag) {
+ SkASSERT(frontFlags == user.fBackFlags[hasStencilClip]);
+ fFlags = frontFlags;
+ if (!this->isDisabled()) {
+ fFront.reset(user.fFront, hasStencilClip, numStencilBits);
+ }
+ return;
+ }
+
+ uint16_t backFlags = user.fBackFlags[hasStencilClip];
+ fFlags = frontFlags & backFlags;
+ if (this->isDisabled()) {
+ return;
+ }
+ if (!(frontFlags & kDisabled_StencilFlag)) {
+ fFront.reset(user.fFront, hasStencilClip, numStencilBits);
+ } else {
+ fFront.setDisabled();
+ }
+ if (!(backFlags & kDisabled_StencilFlag)) {
+ fBack.reset(user.fBack, hasStencilClip, numStencilBits);
+ } else {
+ fBack.setDisabled();
+ }
+}
+
+void GrStencilSettings::reset(const GrStencilSettings& that) {
+ fFlags = that.fFlags;
+ if ((kInvalid_PrivateFlag | kDisabled_StencilFlag) & fFlags) {
+ return;
+ }
+ if (!this->isTwoSided()) {
+ memcpy(&fFront, &that.fFront, sizeof(Face));
+ } else {
+ memcpy(&fFront, &that.fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+}
+
+bool GrStencilSettings::operator==(const GrStencilSettings& that) const {
+ if ((kInvalid_PrivateFlag | kDisabled_StencilFlag) & (fFlags | that.fFlags)) {
+ // At least one is invalid and/or disabled.
+ if (kInvalid_PrivateFlag & (fFlags | that.fFlags)) {
+ return false; // We never allow invalid stencils to be equal.
+ }
+ // They're only equal if both are disabled.
+ return kDisabled_StencilFlag & (fFlags & that.fFlags);
+ }
+ if (kSingleSided_StencilFlag & (fFlags & that.fFlags)) {
+ return 0 == memcmp(&fFront, &that.fFront, sizeof(Face)); // Both are single sided.
+ } else if (kSingleSided_StencilFlag & (fFlags | that.fFlags)) {
+ return false;
+ } else {
+ return 0 == memcmp(&fFront, &that.fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+ // memcmp relies on GrStencilSettings::Face being tightly packed.
+ GR_STATIC_ASSERT(0 == offsetof(Face, fRef));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fRef));
+ GR_STATIC_ASSERT(2 == offsetof(Face, fTest));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTest));
+ GR_STATIC_ASSERT(4 == offsetof(Face, fTestMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTestMask));
+ GR_STATIC_ASSERT(6 == offsetof(Face, fPassOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fPassOp));
+ GR_STATIC_ASSERT(7 == offsetof(Face, fFailOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fFailOp));
+ GR_STATIC_ASSERT(8 == offsetof(Face, fWriteMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fWriteMask));
+ GR_STATIC_ASSERT(10 == sizeof(Face));
+}
+
+static constexpr GrStencilTest gUserStencilTestToRaw[kGrUserStencilTestCount] = {
+ // Tests that respect the clip.
+ GrStencilTest::kAlways, // kAlwaysIfInClip (This is only for when there is not a stencil clip).
+ GrStencilTest::kEqual, // kEqualIfInClip.
+ GrStencilTest::kLess, // kLessIfInClip.
+ GrStencilTest::kLEqual, // kLEqualIfInClip.
+
+ // Tests that ignore the clip.
+ GrStencilTest::kAlways,
+ GrStencilTest::kNever,
+ GrStencilTest::kGreater,
+ GrStencilTest::kGEqual,
+ GrStencilTest::kLess,
+ GrStencilTest::kLEqual,
+ GrStencilTest::kEqual,
+ GrStencilTest::kNotEqual
+};
+
+GR_STATIC_ASSERT(0 == (int)GrUserStencilTest::kAlwaysIfInClip);
+GR_STATIC_ASSERT(1 == (int)GrUserStencilTest::kEqualIfInClip);
+GR_STATIC_ASSERT(2 == (int)GrUserStencilTest::kLessIfInClip);
+GR_STATIC_ASSERT(3 == (int)GrUserStencilTest::kLEqualIfInClip);
+GR_STATIC_ASSERT(4 == (int)GrUserStencilTest::kAlways);
+GR_STATIC_ASSERT(5 == (int)GrUserStencilTest::kNever);
+GR_STATIC_ASSERT(6 == (int)GrUserStencilTest::kGreater);
+GR_STATIC_ASSERT(7 == (int)GrUserStencilTest::kGEqual);
+GR_STATIC_ASSERT(8 == (int)GrUserStencilTest::kLess);
+GR_STATIC_ASSERT(9 == (int)GrUserStencilTest::kLEqual);
+GR_STATIC_ASSERT(10 == (int)GrUserStencilTest::kEqual);
+GR_STATIC_ASSERT(11 == (int)GrUserStencilTest::kNotEqual);
+
+static constexpr GrStencilOp gUserStencilOpToRaw[kGrUserStencilOpCount] = {
+ GrStencilOp::kKeep,
+
+ // Ops that only modify user bits.
+ GrStencilOp::kZero,
+ GrStencilOp::kReplace,
+ GrStencilOp::kInvert,
+ GrStencilOp::kIncWrap,
+ GrStencilOp::kDecWrap,
+ GrStencilOp::kIncClamp, // kIncMaybeClamp.
+ GrStencilOp::kDecClamp, // kDecMaybeClamp.
+
+ // Ops that only modify the clip bit.
+ GrStencilOp::kZero, // kZeroClipBit.
+ GrStencilOp::kReplace, // kSetClipBit.
+ GrStencilOp::kInvert, // kInvertClipBit.
+
+ // Ops that modify clip and user bits.
+ GrStencilOp::kReplace, // kSetClipAndReplaceUserBits.
+ GrStencilOp::kZero // kZeroClipAndUserBits.
+};
+
+GR_STATIC_ASSERT(0 == (int)GrUserStencilOp::kKeep);
+GR_STATIC_ASSERT(1 == (int)GrUserStencilOp::kZero);
+GR_STATIC_ASSERT(2 == (int)GrUserStencilOp::kReplace);
+GR_STATIC_ASSERT(3 == (int)GrUserStencilOp::kInvert);
+GR_STATIC_ASSERT(4 == (int)GrUserStencilOp::kIncWrap);
+GR_STATIC_ASSERT(5 == (int)GrUserStencilOp::kDecWrap);
+GR_STATIC_ASSERT(6 == (int)GrUserStencilOp::kIncMaybeClamp);
+GR_STATIC_ASSERT(7 == (int)GrUserStencilOp::kDecMaybeClamp);
+GR_STATIC_ASSERT(8 == (int)GrUserStencilOp::kZeroClipBit);
+GR_STATIC_ASSERT(9 == (int)GrUserStencilOp::kSetClipBit);
+GR_STATIC_ASSERT(10 == (int)GrUserStencilOp::kInvertClipBit);
+GR_STATIC_ASSERT(11 == (int)GrUserStencilOp::kSetClipAndReplaceUserBits);
+GR_STATIC_ASSERT(12 == (int)GrUserStencilOp::kZeroClipAndUserBits);
+
+void GrStencilSettings::Face::reset(const GrUserStencilSettings::Face& user, bool hasStencilClip,
+ int numStencilBits) {
+ SkASSERT(user.fTest < (GrUserStencilTest)kGrUserStencilTestCount);
+ SkASSERT(user.fPassOp < (GrUserStencilOp)kGrUserStencilOpCount);
+ SkASSERT(user.fFailOp < (GrUserStencilOp)kGrUserStencilOpCount);
+ SkASSERT(numStencilBits > 0 && numStencilBits <= 16);
+ int clipBit = 1 << (numStencilBits - 1);
+ int userMask = clipBit - 1;
+
+ GrUserStencilOp maxOp = SkTMax(user.fPassOp, user.fFailOp);
+ SkDEBUGCODE(GrUserStencilOp otherOp = SkTMin(user.fPassOp, user.fFailOp);)
+ if (maxOp <= kLastUserOnlyStencilOp) {
+ // Ops that only modify user bits.
+ fWriteMask = user.fWriteMask & userMask;
+ SkASSERT(otherOp <= kLastUserOnlyStencilOp);
+ } else if (maxOp <= kLastClipOnlyStencilOp) {
+ // Ops that only modify the clip bit.
+ fWriteMask = clipBit;
+ SkASSERT(GrUserStencilOp::kKeep == otherOp ||
+ (otherOp > kLastUserOnlyStencilOp && otherOp <= kLastClipOnlyStencilOp));
+ } else {
+ // Ops that modify both clip and user bits.
+ fWriteMask = clipBit | (user.fWriteMask & userMask);
+ SkASSERT(GrUserStencilOp::kKeep == otherOp || otherOp > kLastClipOnlyStencilOp);
+ }
+
+ fFailOp = gUserStencilOpToRaw[(int)user.fFailOp];
+ fPassOp = gUserStencilOpToRaw[(int)user.fPassOp];
+
+ if (!hasStencilClip || user.fTest > kLastClippedStencilTest) {
+ // Ignore the clip.
+ fTestMask = user.fTestMask & userMask;
+ fTest = gUserStencilTestToRaw[(int)user.fTest];
+ } else if (GrUserStencilTest::kAlwaysIfInClip != user.fTest) {
+ // Respect the clip.
+ fTestMask = clipBit | (user.fTestMask & userMask);
+ fTest = gUserStencilTestToRaw[(int)user.fTest];
+ } else {
+ // Test only for clip.
+ fTestMask = clipBit;
+ fTest = GrStencilTest::kEqual;
+ }
+
+ fRef = (clipBit | user.fRef) & (fTestMask | fWriteMask);
+}
+
+void GrStencilSettings::Face::setDisabled() {
+ memset(this, 0, sizeof(*this));
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil Rules for Merging user stencil space into clip
+//
+
+///////
+// Replace
+static constexpr GrUserStencilSettings gUserToClipReplace(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipReplace(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Intersect
+static constexpr GrUserStencilSettings gUserToClipIsect(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kLessIfInClip, // "0 < userBits" is equivalent to "0 != userBits".
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Difference
+static constexpr GrUserStencilSettings gUserToClipDiff(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Union
+static constexpr GrUserStencilSettings gUserToClipUnion(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipUnionPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+///////
+// Xor
+static constexpr GrUserStencilSettings gUserToClipXorPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipXorPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+///////
+// Reverse Diff
+static constexpr GrUserStencilSettings gUserToClipRDiffPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kZeroClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipRDiffPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kZeroClipBit,
+ 0x0000>()
+);
+
+///////
+// Second pass to clear user bits (only needed sometimes)
+static constexpr GrUserStencilSettings gZeroUserBits(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr const GrUserStencilSettings* gUserToClipTable[2][1 + SkRegion::kLastOp][3] = {
+ { /* Normal fill. */
+ {&gUserToClipDiff, nullptr, nullptr}, // kDifference_Op.
+ {&gUserToClipIsect, nullptr, nullptr}, // kIntersect_Op.
+ {&gUserToClipUnion, nullptr, nullptr}, // kUnion_Op.
+ {&gUserToClipXorPass0, &gZeroUserBits, nullptr}, // kXOR_Op.
+ {&gUserToClipRDiffPass0, &gZeroUserBits, nullptr}, // kReverseDifference_Op.
+ {&gUserToClipReplace, nullptr, nullptr} // kReplace_Op.
+
+ }, /* Inverse fill. */ {
+ {&gUserToClipIsect, nullptr, nullptr}, // ~diff (aka isect).
+ {&gUserToClipDiff, nullptr, nullptr}, // ~isect (aka diff).
+ {&gInvUserToClipUnionPass0, &gZeroUserBits, nullptr}, // ~union.
+ {&gInvUserToClipXorPass0, &gZeroUserBits, nullptr}, // ~xor.
+ {&gInvUserToClipRDiffPass0, &gZeroUserBits, nullptr}, // ~reverse diff.
+ {&gInvUserToClipReplace, nullptr, nullptr} // ~replace.
+ }
+};
+
+GR_STATIC_ASSERT(0 == SkRegion::kDifference_Op);
+GR_STATIC_ASSERT(1 == SkRegion::kIntersect_Op);
+GR_STATIC_ASSERT(2 == SkRegion::kUnion_Op);
+GR_STATIC_ASSERT(3 == SkRegion::kXOR_Op);
+GR_STATIC_ASSERT(4 == SkRegion::kReverseDifference_Op);
+GR_STATIC_ASSERT(5 == SkRegion::kReplace_Op);
+
+///////
+// Direct to Stencil
+
+// We can render a clip element directly without first writing to the client
+// portion of the clip when the fill is not inverse and the set operation will
+// only modify the in/out status of samples covered by the clip element.
+
+// this one only works if used right after stencil clip was cleared.
+// Our clip mask creation code doesn't allow midstream replace ops.
+static constexpr GrUserStencilSettings gReplaceClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kSetClipBit,
+ GrUserStencilOp::kSetClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gUnionClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kSetClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gXorClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kInvertClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gDiffClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZeroClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+static constexpr const GrUserStencilSettings* gDirectDrawTable[1 + SkRegion::kLastOp][2] = {
+ {&gDiffClip, nullptr}, // kDifference_Op.
+ {nullptr, nullptr}, // kIntersect_Op.
+ {&gUnionClip, nullptr}, // kUnion_Op.
+ {&gXorClip, nullptr}, // kXOR_Op.
+ {nullptr, nullptr}, // kReverseDifference_Op.
+ {&gReplaceClip, nullptr} // kReplace_Op.
+};
+
+GR_STATIC_ASSERT(0 == SkRegion::kDifference_Op);
+GR_STATIC_ASSERT(1 == SkRegion::kIntersect_Op);
+GR_STATIC_ASSERT(2 == SkRegion::kUnion_Op);
+GR_STATIC_ASSERT(3 == SkRegion::kXOR_Op);
+GR_STATIC_ASSERT(4 == SkRegion::kReverseDifference_Op);
+GR_STATIC_ASSERT(5 == SkRegion::kReplace_Op);
+
+GrUserStencilSettings const* const* GrStencilSettings::GetClipPasses(SkRegion::Op op,
+ bool canBeDirect,
+ bool invertedFill,
+ bool* drawDirectToClip) {
+ SkASSERT((unsigned)op <= SkRegion::kLastOp);
+ if (canBeDirect && !invertedFill) { // TODO: inverse fill + intersect op can be direct.
+ GrUserStencilSettings const* const* directPass = gDirectDrawTable[op];
+ if (directPass[0]) {
+ *drawDirectToClip = true;
+ return directPass;
+ }
+ }
+ *drawDirectToClip = false;
+ return gUserToClipTable[invertedFill][op];
+}
+
+static constexpr GrUserStencilSettings gZeroStencilClipBit(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kZeroClipBit,
+ GrUserStencilOp::kZeroClipBit,
+ 0x0000>()
+);
+
+const GrUserStencilSettings* GrStencilSettings::SetClipBitSettings(bool setToInside) {
+ return setToInside ? &gReplaceClip : &gZeroStencilClipBit;
+}
+
+void GrStencilSettings::genKey(GrProcessorKeyBuilder* b) const {
+ b->add32(fFlags);
+ if (this->isDisabled()) {
+ return;
+ }
+ if (!this->isTwoSided()) {
+ constexpr int kCount16 = sizeof(Face) / sizeof(uint16_t);
+ GR_STATIC_ASSERT(0 == sizeof(Face) % sizeof(uint16_t));
+ uint16_t* key = reinterpret_cast<uint16_t*>(b->add32n((kCount16 + 1) / 2));
+ memcpy(key, &fFront, sizeof(Face));
+ key[kCount16] = 0;
+ GR_STATIC_ASSERT(1 == kCount16 % 2);
+ } else {
+ constexpr int kCount32 = (2 * sizeof(Face)) / sizeof(uint32_t);
+ GR_STATIC_ASSERT(0 == (2 * sizeof(Face)) % sizeof(uint32_t));
+ uint32_t* key = b->add32n(kCount32);
+ memcpy(key, &fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+ // We rely on GrStencilSettings::Face being tightly packed for the key to be reliable.
+ GR_STATIC_ASSERT(0 == offsetof(Face, fRef));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fRef));
+ GR_STATIC_ASSERT(2 == offsetof(Face, fTest));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTest));
+ GR_STATIC_ASSERT(4 == offsetof(Face, fTestMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTestMask));
+ GR_STATIC_ASSERT(6 == offsetof(Face, fPassOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fPassOp));
+ GR_STATIC_ASSERT(7 == offsetof(Face, fFailOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fFailOp));
+ GR_STATIC_ASSERT(8 == offsetof(Face, fWriteMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fWriteMask));
+ GR_STATIC_ASSERT(10 == sizeof(Face));
+}
diff --git a/gfx/skia/skia/src/gpu/GrStencilSettings.h b/gfx/skia/skia/src/gpu/GrStencilSettings.h
new file mode 100644
index 0000000000..e6942361b8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilSettings.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrStencilSettings_DEFINED
+#define GrStencilSettings_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "src/gpu/GrUserStencilSettings.h"
+
+class GrProcessorKeyBuilder;
+
+enum class GrStencilTest : uint16_t {
+ kAlways,
+ kNever,
+ kGreater,
+ kGEqual,
+ kLess,
+ kLEqual,
+ kEqual,
+ kNotEqual
+};
+static constexpr int kGrStencilTestCount = 1 + (int)GrStencilTest::kNotEqual;
+
+enum class GrStencilOp : uint8_t {
+ kKeep,
+ kZero,
+ kReplace, // Replace stencil value with fRef (only the bits enabled in fWriteMask).
+ kInvert,
+ kIncWrap,
+ kDecWrap,
+ // NOTE: clamping occurs before the write mask. So if the MSB is zero and masked out, stencil
+ // values will still wrap when using clamping ops.
+ kIncClamp,
+ kDecClamp
+};
+static constexpr int kGrStencilOpCount = 1 + (int)GrStencilOp::kDecClamp;
+
+/**
+ * This class defines concrete stencil settings that map directly to the underlying hardware. It
+ * is deduced from user stencil settings, stencil clip status, and the number of bits in the
+ * target stencil buffer.
+ */
+class GrStencilSettings {
+public:
+ GrStencilSettings() { this->setDisabled(); }
+ GrStencilSettings(const GrUserStencilSettings& user, bool hasStencilClip, int numStencilBits) {
+ this->reset(user, hasStencilClip, numStencilBits);
+ }
+ GrStencilSettings(const GrStencilSettings& that) { this->reset(that); }
+ GrStencilSettings& operator=(const GrStencilSettings& that) { this->reset(that); return *this; }
+
+ void invalidate() { fFlags |= kInvalid_PrivateFlag; }
+ void setDisabled() { fFlags = kAll_StencilFlags; }
+ void reset(const GrUserStencilSettings&, bool hasStencilClip, int numStencilBits);
+ void reset(const GrStencilSettings&);
+
+ bool isValid() const { return !(fFlags & kInvalid_PrivateFlag); }
+ bool isDisabled() const { SkASSERT(this->isValid()); return fFlags & kDisabled_StencilFlag; }
+ bool doesWrite() const { SkASSERT(this->isValid());
+ return !(fFlags & kNoModifyStencil_StencilFlag); }
+ bool isTwoSided() const { SkASSERT(this->isValid());
+ return !(fFlags & kSingleSided_StencilFlag); }
+ bool usesWrapOp() const { SkASSERT(this->isValid());
+ return !(fFlags & kNoWrapOps_StencilFlag); }
+
+ void genKey(GrProcessorKeyBuilder* b) const;
+
+ bool operator!=(const GrStencilSettings& that) const { return !(*this == that); }
+ bool operator==(const GrStencilSettings&) const;
+
+ struct Face : public GrTStencilFaceSettings<GrStencilTest, GrStencilOp> {
+ void reset(const GrUserStencilSettings::Face&, bool useStencilClip, int numStencilBits);
+ void setDisabled();
+ };
+
+ const Face& frontAndBack() const {
+ SkASSERT(!this->isDisabled());
+ SkASSERT(!this->isTwoSided());
+ return fFront;
+ }
+ const Face& front(GrSurfaceOrigin origin) const {
+ SkASSERT(this->isTwoSided());
+ return (kTopLeft_GrSurfaceOrigin == origin) ? fFront : fBack;
+ }
+ const Face& back(GrSurfaceOrigin origin) const {
+ SkASSERT(this->isTwoSided());
+ return (kTopLeft_GrSurfaceOrigin == origin) ? fBack : fFront;
+ }
+
+ /**
+ * Given a thing to draw into the stencil clip, a fill type, and a set op
+ * this function determines:
+ * 1. Whether the thing can be draw directly to the stencil clip or
+ * needs to be drawn to the client portion of the stencil first.
+ * 2. How many passes are needed.
+ * 3. What those passes are.
+ *
+ * @param op the set op to combine this element with the existing clip
+ * @param canBeDirect can the caller draw this element directly (without using stencil)?
+ * @param invertedFill is this path inverted
+ * @param drawDirectToClip out: true if caller should draw the element directly, false if it
+ * should draw it into the user stencil bits first.
+ *
+ * @return a null-terminated array of settings for stencil passes.
+ *
+ * If drawDirectToClip is false, the caller must first draw the element into the user
+ * stencil bits, and then cover the clip area with multiple passes using the returned
+ * stencil settings.
+ *
+ * If drawDirectToClip is true, the returned array will only have one pass and the
+ * caller should use those stencil settings while drawing the element directly.
+ */
+ static GrUserStencilSettings const* const* GetClipPasses(SkRegion::Op op,
+ bool canBeDirect,
+ bool invertedFill,
+ bool* drawDirectToClip);
+
+ /** Gets the user stencil settings to directly set the clip bit. */
+ static const GrUserStencilSettings* SetClipBitSettings(bool setToInside);
+
+private:
+ // Internal flag for backends to optionally mark their tracked stencil state as invalid.
+ enum { kInvalid_PrivateFlag = (kLast_StencilFlag << 1) };
+
+ uint32_t fFlags;
+ Face fFront;
+ Face fBack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStyle.cpp b/gfx/skia/skia/src/gpu/GrStyle.cpp
new file mode 100644
index 0000000000..305b5b3ab5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStyle.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrStyle.h"
+#include "src/utils/SkDashPathPriv.h"
+
+int GrStyle::KeySize(const GrStyle &style, Apply apply, uint32_t flags) {
+ GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(SkScalar));
+ int size = 0;
+ if (style.isDashed()) {
+ // One scalar for scale, one for dash phase, and one for each dash value.
+ size += 2 + style.dashIntervalCnt();
+ } else if (style.pathEffect()) {
+ // No key for a generic path effect.
+ return -1;
+ }
+
+ if (Apply::kPathEffectOnly == apply) {
+ return size;
+ }
+
+ if (style.strokeRec().needToApply()) {
+ // One for res scale, one for style/cap/join, one for miter limit, and one for width.
+ size += 4;
+ }
+ return size;
+}
+
+void GrStyle::WriteKey(uint32_t *key, const GrStyle &style, Apply apply, SkScalar scale,
+ uint32_t flags) {
+ SkASSERT(key);
+ SkASSERT(KeySize(style, apply) >= 0);
+ GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(SkScalar));
+
+ int i = 0;
+ // The scale can influence both the path effect and stroking. We want to preserve the
+ // property that the following two are equal:
+ // 1. WriteKey with apply == kPathEffectAndStrokeRec
+ // 2. WriteKey with apply == kPathEffectOnly followed by WriteKey of a GrStyle made
+ // from SkStrokeRec output by the the path effect (and no additional path effect).
+ // Since the scale can affect both parts of 2 we write it into the key twice.
+ if (style.isDashed()) {
+ GR_STATIC_ASSERT(sizeof(style.dashPhase()) == sizeof(uint32_t));
+ SkScalar phase = style.dashPhase();
+ memcpy(&key[i++], &scale, sizeof(SkScalar));
+ memcpy(&key[i++], &phase, sizeof(SkScalar));
+
+ int32_t count = style.dashIntervalCnt();
+ // Dash count should always be even.
+ SkASSERT(0 == (count & 0x1));
+ const SkScalar *intervals = style.dashIntervals();
+ int intervalByteCnt = count * sizeof(SkScalar);
+ memcpy(&key[i], intervals, intervalByteCnt);
+ i += count;
+ } else {
+ SkASSERT(!style.pathEffect());
+ }
+
+ if (Apply::kPathEffectAndStrokeRec == apply && style.strokeRec().needToApply()) {
+ memcpy(&key[i++], &scale, sizeof(SkScalar));
+ enum {
+ kStyleBits = 2,
+ kJoinBits = 2,
+ kCapBits = 32 - kStyleBits - kJoinBits,
+
+ kJoinShift = kStyleBits,
+ kCapShift = kJoinShift + kJoinBits,
+ };
+ GR_STATIC_ASSERT(SkStrokeRec::kStyleCount <= (1 << kStyleBits));
+ GR_STATIC_ASSERT(SkPaint::kJoinCount <= (1 << kJoinBits));
+ GR_STATIC_ASSERT(SkPaint::kCapCount <= (1 << kCapBits));
+ // The cap type only matters for unclosed shapes. However, a path effect could unclose
+ // the shape before it is stroked.
+ SkPaint::Cap cap = SkPaint::kDefault_Cap;
+ if (!(flags & kClosed_KeyFlag) || style.pathEffect()) {
+ cap = style.strokeRec().getCap();
+ }
+ SkScalar miter = -1.f;
+ SkPaint::Join join = SkPaint::kDefault_Join;
+
+ // Dashing will not insert joins but other path effects may.
+ if (!(flags & kNoJoins_KeyFlag) || style.hasNonDashPathEffect()) {
+ join = style.strokeRec().getJoin();
+ // Miter limit only affects miter joins
+ if (SkPaint::kMiter_Join == join) {
+ miter = style.strokeRec().getMiter();
+ }
+ }
+
+ key[i++] = style.strokeRec().getStyle() |
+ join << kJoinShift |
+ cap << kCapShift;
+
+ memcpy(&key[i++], &miter, sizeof(miter));
+
+ SkScalar width = style.strokeRec().getWidth();
+ memcpy(&key[i++], &width, sizeof(width));
+ }
+ SkASSERT(KeySize(style, apply) == i);
+}
+
+void GrStyle::initPathEffect(sk_sp<SkPathEffect> pe) {
+ SkASSERT(!fPathEffect);
+ SkASSERT(SkPathEffect::kNone_DashType == fDashInfo.fType);
+ SkASSERT(0 == fDashInfo.fIntervals.count());
+ if (!pe) {
+ return;
+ }
+ SkPathEffect::DashInfo info;
+ if (SkPathEffect::kDash_DashType == pe->asADash(&info)) {
+ SkStrokeRec::Style recStyle = fStrokeRec.getStyle();
+ if (recStyle != SkStrokeRec::kFill_Style && recStyle != SkStrokeRec::kStrokeAndFill_Style) {
+ fDashInfo.fType = SkPathEffect::kDash_DashType;
+ fDashInfo.fIntervals.reset(info.fCount);
+ fDashInfo.fPhase = info.fPhase;
+ info.fIntervals = fDashInfo.fIntervals.get();
+ pe->asADash(&info);
+ fPathEffect = std::move(pe);
+ }
+ } else {
+ fPathEffect = std::move(pe);
+ }
+}
+
+bool GrStyle::applyPathEffect(SkPath* dst, SkStrokeRec* strokeRec, const SkPath& src) const {
+ if (!fPathEffect) {
+ return false;
+ }
+ if (SkPathEffect::kDash_DashType == fDashInfo.fType) {
+ // We apply the dash ourselves here rather than using the path effect. This is so that
+ // we can control whether the dasher applies the strokeRec for special cases. Our keying
+ // depends on the strokeRec being applied separately.
+ SkScalar phase = fDashInfo.fPhase;
+ const SkScalar* intervals = fDashInfo.fIntervals.get();
+ int intervalCnt = fDashInfo.fIntervals.count();
+ SkScalar initialLength;
+ int initialIndex;
+ SkScalar intervalLength;
+ SkDashPath::CalcDashParameters(phase, intervals, intervalCnt, &initialLength,
+ &initialIndex, &intervalLength);
+ if (!SkDashPath::InternalFilter(dst, src, strokeRec,
+ nullptr, intervals, intervalCnt,
+ initialLength, initialIndex, intervalLength,
+ SkDashPath::StrokeRecApplication::kDisallow)) {
+ return false;
+ }
+ } else if (!fPathEffect->filterPath(dst, src, strokeRec, nullptr)) {
+ return false;
+ }
+ dst->setIsVolatile(true);
+ return true;
+}
+
+bool GrStyle::applyPathEffectToPath(SkPath *dst, SkStrokeRec *remainingStroke,
+ const SkPath &src, SkScalar resScale) const {
+ SkASSERT(dst);
+ SkStrokeRec strokeRec = fStrokeRec;
+ strokeRec.setResScale(resScale);
+ if (!this->applyPathEffect(dst, &strokeRec, src)) {
+ return false;
+ }
+ *remainingStroke = strokeRec;
+ return true;
+}
+
+bool GrStyle::applyToPath(SkPath* dst, SkStrokeRec::InitStyle* style, const SkPath& src,
+ SkScalar resScale) const {
+ SkASSERT(style);
+ SkASSERT(dst);
+ SkStrokeRec strokeRec = fStrokeRec;
+ strokeRec.setResScale(resScale);
+ const SkPath* pathForStrokeRec = &src;
+ if (this->applyPathEffect(dst, &strokeRec, src)) {
+ pathForStrokeRec = dst;
+ } else if (fPathEffect) {
+ return false;
+ }
+ if (strokeRec.needToApply()) {
+ if (!strokeRec.applyToPath(dst, *pathForStrokeRec)) {
+ return false;
+ }
+ dst->setIsVolatile(true);
+ *style = SkStrokeRec::kFill_InitStyle;
+ } else if (!fPathEffect) {
+ // Nothing to do for path effect or stroke, fail.
+ return false;
+ } else {
+ SkASSERT(SkStrokeRec::kFill_Style == strokeRec.getStyle() ||
+ SkStrokeRec::kHairline_Style == strokeRec.getStyle());
+ *style = strokeRec.getStyle() == SkStrokeRec::kFill_Style
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrStyle.h b/gfx/skia/skia/src/gpu/GrStyle.h
new file mode 100644
index 0000000000..1dce72d6f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStyle.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStyle_DEFINED
+#define GrStyle_DEFINED
+
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkTemplates.h"
+
+/**
+ * Represents the various ways that a GrShape can be styled. It has fill/stroking information
+ * as well as an optional path effect. If the path effect represents dashing, the dashing
+ * information is extracted from the path effect and stored explicitly.
+ *
+ * This will replace GrStrokeInfo as GrShape is deployed.
+ */
+class GrStyle {
+public:
+ /**
+ * A style object that represents a fill with no path effect.
+ * TODO: constexpr with C++14
+ */
+ static const GrStyle& SimpleFill() {
+ static const GrStyle kFill(SkStrokeRec::kFill_InitStyle);
+ return kFill;
+ }
+
+ /**
+ * A style object that represents a hairline stroke with no path effect.
+ * TODO: constexpr with C++14
+ */
+ static const GrStyle& SimpleHairline() {
+ static const GrStyle kHairline(SkStrokeRec::kHairline_InitStyle);
+ return kHairline;
+ }
+
+ enum class Apply {
+ kPathEffectOnly,
+ kPathEffectAndStrokeRec
+ };
+
+ /**
+ * Optional flags for computing keys that may remove unnecessary variation in the key due to
+ * style settings that don't affect particular classes of geometry.
+ */
+ enum KeyFlags {
+ // The shape being styled has no open contours.
+ kClosed_KeyFlag = 0x1,
+ // The shape being styled doesn't have any joins and so isn't affected by join type.
+ kNoJoins_KeyFlag = 0x2
+ };
+
+ /**
+ * Computes the key length for a GrStyle. The return will be negative if it cannot be turned
+ * into a key. This occurs when there is a path effect that is not a dash. The key can
+ * either reflect just the path effect (if one) or the path effect and the strokerec. Note
+ * that a simple fill has a zero sized key.
+ */
+ static int KeySize(const GrStyle&, Apply, uint32_t flags = 0);
+
+ /**
+ * Writes a unique key for the style into the provided buffer. This function assumes the buffer
+ * has room for at least KeySize() values. It assumes that KeySize() returns a non-negative
+ * value for the combination of GrStyle, Apply and flags params. This is written so that the key
+ * for just dash application followed by the key for the remaining SkStrokeRec is the same as
+ * the key for applying dashing and SkStrokeRec all at once.
+ */
+ static void WriteKey(uint32_t*, const GrStyle&, Apply, SkScalar scale, uint32_t flags = 0);
+
+ GrStyle() : GrStyle(SkStrokeRec::kFill_InitStyle) {}
+
+ explicit GrStyle(SkStrokeRec::InitStyle initStyle) : fStrokeRec(initStyle) {}
+
+ GrStyle(const SkStrokeRec& strokeRec, sk_sp<SkPathEffect> pe) : fStrokeRec(strokeRec) {
+ this->initPathEffect(std::move(pe));
+ }
+
+ GrStyle(const GrStyle& that) = default;
+
+ explicit GrStyle(const SkPaint& paint) : fStrokeRec(paint) {
+ this->initPathEffect(paint.refPathEffect());
+ }
+
+ explicit GrStyle(const SkPaint& paint, SkPaint::Style overrideStyle)
+ : fStrokeRec(paint, overrideStyle) {
+ this->initPathEffect(paint.refPathEffect());
+ }
+
+ GrStyle& operator=(const GrStyle& that) {
+ fPathEffect = that.fPathEffect;
+ fDashInfo = that.fDashInfo;
+ fStrokeRec = that.fStrokeRec;
+ return *this;
+ }
+
+ void resetToInitStyle(SkStrokeRec::InitStyle fillOrHairline) {
+ fDashInfo.reset();
+ fPathEffect.reset(nullptr);
+ if (SkStrokeRec::kFill_InitStyle == fillOrHairline) {
+ fStrokeRec.setFillStyle();
+ } else {
+ fStrokeRec.setHairlineStyle();
+ }
+ }
+
+ /** Is this style a fill with no path effect? */
+ bool isSimpleFill() const { return fStrokeRec.isFillStyle() && !fPathEffect; }
+
+ /** Is this style a hairline with no path effect? */
+ bool isSimpleHairline() const { return fStrokeRec.isHairlineStyle() && !fPathEffect; }
+
+ SkPathEffect* pathEffect() const { return fPathEffect.get(); }
+ sk_sp<SkPathEffect> refPathEffect() const { return fPathEffect; }
+
+ bool hasPathEffect() const { return SkToBool(fPathEffect.get()); }
+
+ bool hasNonDashPathEffect() const { return fPathEffect.get() && !this->isDashed(); }
+
+ bool isDashed() const { return SkPathEffect::kDash_DashType == fDashInfo.fType; }
+ SkScalar dashPhase() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fPhase;
+ }
+ int dashIntervalCnt() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fIntervals.count();
+ }
+ const SkScalar* dashIntervals() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fIntervals.get();
+ }
+
+ const SkStrokeRec& strokeRec() const { return fStrokeRec; }
+
+ /** Hairline or fill styles without path effects make no alterations to a geometry. */
+ bool applies() const {
+ return this->pathEffect() || (!fStrokeRec.isFillStyle() && !fStrokeRec.isHairlineStyle());
+ }
+
+ static SkScalar MatrixToScaleFactor(const SkMatrix& matrix) {
+ // getMaxScale will return -1 if the matrix has perspective. In that case we can use a scale
+ // factor of 1. This isn't necessarily a good choice and in the future we might consider
+ // taking a bounds here for the perspective case.
+ return SkScalarAbs(matrix.getMaxScale());
+ }
+ /**
+ * Applies just the path effect and returns remaining stroke information. This will fail if
+ * there is no path effect. dst may or may not have been overwritten on failure. Scale controls
+ * geometric approximations made by the path effect. It is typically computed from the view
+ * matrix.
+ */
+ bool SK_WARN_UNUSED_RESULT applyPathEffectToPath(SkPath* dst, SkStrokeRec* remainingStoke,
+ const SkPath& src, SkScalar scale) const;
+
+ /**
+ * If this succeeds then the result path should be filled or hairlined as indicated by the
+ * returned SkStrokeRec::InitStyle value. Will fail if there is no path effect and the
+ * strokerec doesn't change the geometry. When this fails the outputs may or may not have
+ * been overwritten. Scale controls geometric approximations made by the path effect and
+ * stroker. It is typically computed from the view matrix.
+ */
+ bool SK_WARN_UNUSED_RESULT applyToPath(SkPath* dst, SkStrokeRec::InitStyle* fillOrHairline,
+ const SkPath& src, SkScalar scale) const;
+
+ /** Given bounds of a path compute the bounds of path with the style applied. */
+ void adjustBounds(SkRect* dst, const SkRect& src) const {
+ if (this->pathEffect()) {
+ this->pathEffect()->computeFastBounds(dst, src);
+ // This may not be the correct SkStrokeRec to use. skbug.com/5299
+ // It happens to work for dashing.
+ SkScalar radius = fStrokeRec.getInflationRadius();
+ dst->outset(radius, radius);
+ } else {
+ SkScalar radius = fStrokeRec.getInflationRadius();
+ *dst = src.makeOutset(radius, radius);
+ }
+ }
+
+private:
+ void initPathEffect(sk_sp<SkPathEffect> pe);
+
+ struct DashInfo {
+ DashInfo() : fType(SkPathEffect::kNone_DashType) {}
+ DashInfo(const DashInfo& that) { *this = that; }
+ DashInfo& operator=(const DashInfo& that) {
+ fType = that.fType;
+ fPhase = that.fPhase;
+ fIntervals.reset(that.fIntervals.count());
+ sk_careful_memcpy(fIntervals.get(), that.fIntervals.get(),
+ sizeof(SkScalar) * that.fIntervals.count());
+ return *this;
+ }
+ void reset() {
+ fType = SkPathEffect::kNone_DashType;
+ fIntervals.reset(0);
+ }
+ SkPathEffect::DashType fType;
+ SkScalar fPhase{0};
+ SkAutoSTArray<4, SkScalar> fIntervals;
+ };
+
+ bool applyPathEffect(SkPath* dst, SkStrokeRec* strokeRec, const SkPath& src) const;
+
+ SkStrokeRec fStrokeRec;
+ sk_sp<SkPathEffect> fPathEffect;
+ DashInfo fDashInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurface.cpp b/gfx/skia/skia/src/gpu/GrSurface.cpp
new file mode 100644
index 0000000000..90f75a82cb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurface.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrSurface.h"
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrSurfacePriv.h"
+
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/SkGr.h"
+
+size_t GrSurface::ComputeSize(const GrCaps& caps,
+ const GrBackendFormat& format,
+ int width,
+ int height,
+ int colorSamplesPerPixel,
+ GrMipMapped mipMapped,
+ bool binSize) {
+ size_t colorSize;
+
+ width = binSize ? GrResourceProvider::MakeApprox(width) : width;
+ height = binSize ? GrResourceProvider::MakeApprox(height) : height;
+
+ // Just setting a defualt value here to appease warnings on uninitialized object.
+ SkImage::CompressionType compressionType = SkImage::kETC1_CompressionType;
+ if (caps.isFormatCompressed(format, &compressionType)) {
+ colorSize = GrCompressedFormatDataSize(compressionType, width, height);
+ } else {
+ colorSize = (size_t)width * height * caps.bytesPerPixel(format);
+ }
+ SkASSERT(colorSize > 0);
+
+ size_t finalSize = colorSamplesPerPixel * colorSize;
+
+ if (GrMipMapped::kYes == mipMapped) {
+ // We don't have to worry about the mipmaps being a different size than
+ // we'd expect because we never change fDesc.fWidth/fHeight.
+ finalSize += colorSize/3;
+ }
+ return finalSize;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrSurface::onRelease() {
+ this->invokeReleaseProc();
+ this->INHERITED::onRelease();
+}
+
+void GrSurface::onAbandon() {
+ this->invokeReleaseProc();
+ this->INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceContext.cpp b/gfx/skia/skia/src/gpu/GrSurfaceContext.cpp
new file mode 100644
index 0000000000..4b5e0a8c7c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceContext.cpp
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrSurfaceContext.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSurfaceContextPriv.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+#define RETURN_FALSE_IF_ABANDONED if (this->fContext->priv().abandoned()) { return false; }
+
+// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
+// GrOpsTasks to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpsTask).
+GrSurfaceContext::GrSurfaceContext(GrRecordingContext* context,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace)
+ : fContext(context), fColorInfo(colorType, alphaType, std::move(colorSpace)) {}
+
+const GrCaps* GrSurfaceContext::caps() const { return fContext->priv().caps(); }
+
+GrAuditTrail* GrSurfaceContext::auditTrail() {
+ return fContext->priv().auditTrail();
+}
+
+GrDrawingManager* GrSurfaceContext::drawingManager() {
+ return fContext->priv().drawingManager();
+}
+
+const GrDrawingManager* GrSurfaceContext::drawingManager() const {
+ return fContext->priv().drawingManager();
+}
+
+#ifdef SK_DEBUG
+GrSingleOwner* GrSurfaceContext::singleOwner() {
+ return fContext->priv().singleOwner();
+}
+#endif
+
+bool GrSurfaceContext::readPixels(const GrImageInfo& origDstInfo, void* dst, size_t rowBytes,
+ SkIPoint pt, GrContext* direct) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrSurfaceContext::readPixels");
+
+ if (!direct && !(direct = fContext->priv().asDirectContext())) {
+ return false;
+ }
+
+ if (!dst) {
+ return false;
+ }
+
+ size_t tightRowBytes = origDstInfo.minRowBytes();
+ if (!rowBytes) {
+ rowBytes = tightRowBytes;
+ } else if (rowBytes < tightRowBytes) {
+ return false;
+ }
+
+ if (!origDstInfo.isValid()) {
+ return false;
+ }
+
+ GrSurfaceProxy* srcProxy = this->asSurfaceProxy();
+
+ // MDB TODO: delay this instantiation until later in the method
+ if (!srcProxy->instantiate(direct->priv().resourceProvider())) {
+ return false;
+ }
+
+ GrSurface* srcSurface = srcProxy->peekSurface();
+
+ auto dstInfo = origDstInfo;
+ if (!dstInfo.clip(this->width(), this->height(), &pt, &dst, rowBytes)) {
+ return false;
+ }
+ // Our tight row bytes may have been changed by clipping.
+ tightRowBytes = dstInfo.minRowBytes();
+
+ bool premul = this->colorInfo().alphaType() == kUnpremul_SkAlphaType &&
+ dstInfo.alphaType() == kPremul_SkAlphaType;
+ bool unpremul = this->colorInfo().alphaType() == kPremul_SkAlphaType &&
+ dstInfo.alphaType() == kUnpremul_SkAlphaType;
+
+ bool needColorConversion =
+ SkColorSpaceXformSteps::Required(this->colorInfo().colorSpace(), dstInfo.colorSpace());
+
+ const GrCaps* caps = direct->priv().caps();
+ // This is the getImageData equivalent to the canvas2D putImageData fast path. We probably don't
+ // care so much about getImageData performance. However, in order to ensure putImageData/
+ // getImageData in "legacy" mode are round-trippable we use the GPU to do the complementary
+ // unpremul step to writeSurfacePixels's premul step (which is determined empirically in
+ // fContext->vaildaPMUPMConversionExists()).
+ GrBackendFormat defaultRGBAFormat = caps->getDefaultBackendFormat(GrColorType::kRGBA_8888,
+ GrRenderable::kYes);
+ bool canvas2DFastPath = unpremul && !needColorConversion &&
+ (GrColorType::kRGBA_8888 == dstInfo.colorType() ||
+ GrColorType::kBGRA_8888 == dstInfo.colorType()) &&
+ SkToBool(srcProxy->asTextureProxy()) &&
+ (srcProxy->config() == kRGBA_8888_GrPixelConfig ||
+ srcProxy->config() == kBGRA_8888_GrPixelConfig) &&
+ defaultRGBAFormat.isValid() &&
+ direct->priv().validPMUPMConversionExists();
+
+ auto readFlag = caps->surfaceSupportsReadPixels(srcSurface);
+ if (readFlag == GrCaps::SurfaceReadPixelsSupport::kUnsupported) {
+ return false;
+ }
+
+ if (readFlag == GrCaps::SurfaceReadPixelsSupport::kCopyToTexture2D || canvas2DFastPath) {
+ GrColorType colorType =
+ canvas2DFastPath ? GrColorType::kRGBA_8888 : this->colorInfo().colorType();
+ sk_sp<SkColorSpace> cs = canvas2DFastPath ? nullptr : this->colorInfo().refColorSpace();
+
+ auto tempCtx = direct->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kApprox, dstInfo.width(), dstInfo.height(), colorType, std::move(cs),
+ 1, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin, nullptr, SkBudgeted::kYes);
+ if (!tempCtx) {
+ return false;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+ if (canvas2DFastPath) {
+ fp = direct->priv().createPMToUPMEffect(
+ GrSimpleTextureEffect::Make(sk_ref_sp(srcProxy->asTextureProxy()),
+ this->colorInfo().colorType(), SkMatrix::I()));
+ if (dstInfo.colorType() == GrColorType::kBGRA_8888) {
+ fp = GrFragmentProcessor::SwizzleOutput(std::move(fp), GrSwizzle::BGRA());
+ dstInfo = dstInfo.makeColorType(GrColorType::kRGBA_8888);
+ }
+ // The render target context is incorrectly tagged as kPremul even though we're writing
+ // unpremul data thanks to the PMToUPM effect. Fake out the dst alpha type so we don't
+ // double unpremul.
+ dstInfo = dstInfo.makeAlphaType(kPremul_SkAlphaType);
+ } else {
+ fp = GrSimpleTextureEffect::Make(sk_ref_sp(srcProxy->asTextureProxy()),
+ this->colorInfo().colorType(), SkMatrix::I());
+ }
+ if (!fp) {
+ return false;
+ }
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.addColorFragmentProcessor(std::move(fp));
+
+ tempCtx->asRenderTargetContext()->fillRectToRect(
+ GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeWH(dstInfo.width(), dstInfo.height()),
+ SkRect::MakeXYWH(pt.fX, pt.fY, dstInfo.width(), dstInfo.height()));
+
+ return tempCtx->readPixels(dstInfo, dst, rowBytes, {0, 0}, direct);
+ }
+
+ bool flip = srcProxy->origin() == kBottomLeft_GrSurfaceOrigin;
+
+ auto supportedRead = caps->supportedReadPixelsColorType(
+ this->colorInfo().colorType(), srcProxy->backendFormat(), dstInfo.colorType());
+
+ bool makeTight = !caps->readPixelsRowBytesSupport() && tightRowBytes != rowBytes;
+
+ bool convert = unpremul || premul || needColorConversion || flip || makeTight ||
+ (dstInfo.colorType() != supportedRead.fColorType);
+
+ std::unique_ptr<char[]> tmpPixels;
+ GrImageInfo tmpInfo;
+ void* readDst = dst;
+ size_t readRB = rowBytes;
+ if (convert) {
+ tmpInfo = {supportedRead.fColorType, this->colorInfo().alphaType(),
+ this->colorInfo().refColorSpace(), dstInfo.width(), dstInfo.height()};
+ size_t tmpRB = tmpInfo.minRowBytes();
+ size_t size = tmpRB * tmpInfo.height();
+ // Chrome MSAN bots require the data to be initialized (hence the ()).
+ tmpPixels.reset(new char[size]());
+
+ readDst = tmpPixels.get();
+ readRB = tmpRB;
+ pt.fY = flip ? srcSurface->height() - pt.fY - dstInfo.height() : pt.fY;
+ }
+
+ direct->priv().flushSurface(srcProxy);
+
+ if (!direct->priv().getGpu()->readPixels(srcSurface, pt.fX, pt.fY, dstInfo.width(),
+ dstInfo.height(), this->colorInfo().colorType(),
+ supportedRead.fColorType, readDst, readRB)) {
+ return false;
+ }
+
+ if (convert) {
+ return GrConvertPixels(dstInfo, dst, rowBytes, tmpInfo, readDst, readRB, flip);
+ }
+ return true;
+}
+
+bool GrSurfaceContext::writePixels(const GrImageInfo& origSrcInfo, const void* src, size_t rowBytes,
+ SkIPoint pt, GrContext* direct) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrSurfaceContext::writePixels");
+
+ if (!direct && !(direct = fContext->priv().asDirectContext())) {
+ return false;
+ }
+
+ if (this->asSurfaceProxy()->readOnly()) {
+ return false;
+ }
+
+ if (!src) {
+ return false;
+ }
+
+ size_t tightRowBytes = origSrcInfo.minRowBytes();
+ if (!rowBytes) {
+ rowBytes = tightRowBytes;
+ } else if (rowBytes < tightRowBytes) {
+ return false;
+ }
+
+ if (!origSrcInfo.isValid()) {
+ return false;
+ }
+
+ GrSurfaceProxy* dstProxy = this->asSurfaceProxy();
+ if (!dstProxy->instantiate(direct->priv().resourceProvider())) {
+ return false;
+ }
+
+ GrSurface* dstSurface = dstProxy->peekSurface();
+
+ auto srcInfo = origSrcInfo;
+ if (!srcInfo.clip(this->width(), this->height(), &pt, &src, rowBytes)) {
+ return false;
+ }
+ // Our tight row bytes may have been changed by clipping.
+ tightRowBytes = srcInfo.minRowBytes();
+
+ bool premul = this->colorInfo().alphaType() == kPremul_SkAlphaType &&
+ srcInfo.alphaType() == kUnpremul_SkAlphaType;
+ bool unpremul = this->colorInfo().alphaType() == kUnpremul_SkAlphaType &&
+ srcInfo.alphaType() == kPremul_SkAlphaType;
+
+ bool needColorConversion =
+ SkColorSpaceXformSteps::Required(srcInfo.colorSpace(), this->colorInfo().colorSpace());
+
+ const GrCaps* caps = direct->priv().caps();
+
+ auto rgbaDefaultFormat = caps->getDefaultBackendFormat(GrColorType::kRGBA_8888,
+ GrRenderable::kNo);
+
+ // For canvas2D putImageData performance we have a special code path for unpremul RGBA_8888 srcs
+ // that are premultiplied on the GPU. This is kept as narrow as possible for now.
+ bool canvas2DFastPath = !caps->avoidWritePixelsFastPath() && premul && !needColorConversion &&
+ (srcInfo.colorType() == GrColorType::kRGBA_8888 ||
+ srcInfo.colorType() == GrColorType::kBGRA_8888) &&
+ SkToBool(this->asRenderTargetContext()) &&
+ (dstProxy->config() == kRGBA_8888_GrPixelConfig ||
+ dstProxy->config() == kBGRA_8888_GrPixelConfig) &&
+ rgbaDefaultFormat.isValid() &&
+ direct->priv().validPMUPMConversionExists();
+
+ if (!caps->surfaceSupportsWritePixels(dstSurface) || canvas2DFastPath) {
+ GrSurfaceDesc desc;
+ desc.fWidth = srcInfo.width();
+ desc.fHeight = srcInfo.height();
+ GrColorType colorType;
+
+ GrBackendFormat format;
+ SkAlphaType alphaType;
+ if (canvas2DFastPath) {
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ colorType = GrColorType::kRGBA_8888;
+ format = rgbaDefaultFormat;
+ alphaType = kUnpremul_SkAlphaType;
+ } else {
+ desc.fConfig = dstProxy->config();
+ colorType = this->colorInfo().colorType();
+ format = dstProxy->backendFormat().makeTexture2D();
+ if (!format.isValid()) {
+ return false;
+ }
+ alphaType = this->colorInfo().alphaType();
+ }
+
+ // It is more efficient for us to write pixels into a top left origin so we prefer that.
+ // However, if the final proxy isn't a render target then we must use a copy to move the
+ // data into it which requires the origins to match. If the final proxy is a render target
+ // we can use a draw instead which doesn't have this origin restriction. Thus for render
+ // targets we will use top left and otherwise we will make the origins match.
+ GrSurfaceOrigin tempOrigin =
+ this->asRenderTargetContext() ? kTopLeft_GrSurfaceOrigin : dstProxy->origin();
+ auto tempProxy = direct->priv().proxyProvider()->createProxy(
+ format, desc, GrRenderable::kNo, 1, tempOrigin, GrMipMapped::kNo,
+ SkBackingFit::kApprox, SkBudgeted::kYes, GrProtected::kNo);
+
+ if (!tempProxy) {
+ return false;
+ }
+ auto tempCtx = direct->priv().drawingManager()->makeTextureContext(
+ tempProxy, colorType, alphaType, this->colorInfo().refColorSpace());
+ if (!tempCtx) {
+ return false;
+ }
+
+ // In the fast path we always write the srcData to the temp context as though it were RGBA.
+ // When the data is really BGRA the write will cause the R and B channels to be swapped in
+ // the intermediate surface which gets corrected by a swizzle effect when drawing to the
+ // dst.
+ if (canvas2DFastPath) {
+ srcInfo = srcInfo.makeColorType(GrColorType::kRGBA_8888);
+ }
+ if (!tempCtx->writePixels(srcInfo, src, rowBytes, {0, 0}, direct)) {
+ return false;
+ }
+
+ if (this->asRenderTargetContext()) {
+ std::unique_ptr<GrFragmentProcessor> fp;
+ if (canvas2DFastPath) {
+ fp = direct->priv().createUPMToPMEffect(
+ GrSimpleTextureEffect::Make(std::move(tempProxy), colorType,
+ SkMatrix::I()));
+ // Important: check the original src color type here!
+ if (origSrcInfo.colorType() == GrColorType::kBGRA_8888) {
+ fp = GrFragmentProcessor::SwizzleOutput(std::move(fp), GrSwizzle::BGRA());
+ }
+ } else {
+ fp = GrSimpleTextureEffect::Make(std::move(tempProxy), colorType, SkMatrix::I());
+ }
+ if (!fp) {
+ return false;
+ }
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.addColorFragmentProcessor(std::move(fp));
+ this->asRenderTargetContext()->fillRectToRect(
+ GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeXYWH(pt.fX, pt.fY, srcInfo.width(), srcInfo.height()),
+ SkRect::MakeWH(srcInfo.width(), srcInfo.height()));
+ } else {
+ SkIRect srcRect = SkIRect::MakeWH(srcInfo.width(), srcInfo.height());
+ SkIPoint dstPoint = SkIPoint::Make(pt.fX, pt.fY);
+ if (!this->copy(tempProxy.get(), srcRect, dstPoint)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ GrColorType allowedColorType =
+ caps->supportedWritePixelsColorType(this->colorInfo().colorType(),
+ dstProxy->backendFormat(),
+ srcInfo.colorType()).fColorType;
+ bool flip = dstProxy->origin() == kBottomLeft_GrSurfaceOrigin;
+ bool makeTight = !caps->writePixelsRowBytesSupport() && rowBytes != tightRowBytes;
+ bool convert = premul || unpremul || needColorConversion || makeTight ||
+ (srcInfo.colorType() != allowedColorType) || flip;
+
+ std::unique_ptr<char[]> tmpPixels;
+ GrColorType srcColorType = srcInfo.colorType();
+ if (convert) {
+ GrImageInfo tmpInfo(allowedColorType, this->colorInfo().alphaType(),
+ this->colorInfo().refColorSpace(), srcInfo.width(), srcInfo.height());
+ auto tmpRB = tmpInfo.minRowBytes();
+ tmpPixels.reset(new char[tmpRB * tmpInfo.height()]);
+
+ GrConvertPixels(tmpInfo, tmpPixels.get(), tmpRB, srcInfo, src, rowBytes, flip);
+
+ srcColorType = tmpInfo.colorType();
+ rowBytes = tmpRB;
+ src = tmpPixels.get();
+ pt.fY = flip ? dstSurface->height() - pt.fY - tmpInfo.height() : pt.fY;
+ }
+
+ // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
+ // complete flush here. On platforms that prefer VRAM use over flushes we're better off
+ // giving the drawing manager the chance of skipping the flush (i.e., by passing in the
+ // destination proxy)
+ // TODO: should this policy decision just be moved into the drawing manager?
+ direct->priv().flushSurface(caps->preferVRAMUseOverFlushes() ? dstProxy : nullptr);
+
+ return direct->priv().getGpu()->writePixels(dstSurface, pt.fX, pt.fY, srcInfo.width(),
+ srcInfo.height(), this->colorInfo().colorType(),
+ srcColorType, src, rowBytes);
+}
+
+bool GrSurfaceContext::copy(GrSurfaceProxy* src, const SkIRect& srcRect, const SkIPoint& dstPoint) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrSurfaceContextPriv::copy");
+
+ const GrCaps* caps = fContext->priv().caps();
+
+ SkASSERT(src->backendFormat().textureType() != GrTextureType::kExternal);
+ SkASSERT(src->origin() == this->asSurfaceProxy()->origin());
+ SkASSERT(caps->makeConfigSpecific(src->config(), src->backendFormat()) ==
+ caps->makeConfigSpecific(this->asSurfaceProxy()->config(),
+ this->asSurfaceProxy()->backendFormat()));
+
+ if (!caps->canCopySurface(this->asSurfaceProxy(), src, srcRect, dstPoint)) {
+ return false;
+ }
+
+ return this->drawingManager()->newCopyRenderTask(sk_ref_sp(src), srcRect,
+ this->asSurfaceProxyRef(), dstPoint);
+}
+
+std::unique_ptr<GrRenderTargetContext> GrSurfaceContext::rescale(
+ const SkImageInfo& info,
+ const SkIRect& srcRect,
+ SkSurface::RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality) {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ return nullptr;
+ }
+ auto rtProxy = this->asRenderTargetProxy();
+ if (rtProxy && rtProxy->wrapsVkSecondaryCB()) {
+ return nullptr;
+ }
+
+ // We rescale by drawing and don't currently support drawing to a kUnpremul destination.
+ if (info.alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+
+ int srcW = srcRect.width();
+ int srcH = srcRect.height();
+ int srcX = srcRect.fLeft;
+ int srcY = srcRect.fTop;
+ sk_sp<GrTextureProxy> texProxy = sk_ref_sp(this->asTextureProxy());
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ GrColorType srcColorType = this->colorInfo().colorType();
+ if (!texProxy) {
+ texProxy = GrSurfaceProxy::Copy(fContext, this->asSurfaceProxy(), srcColorType,
+ GrMipMapped::kNo, srcRect, SkBackingFit::kApprox,
+ SkBudgeted::kNo);
+ if (!texProxy) {
+ return nullptr;
+ }
+ srcX = 0;
+ srcY = 0;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+
+ float sx = (float)info.width() / srcW;
+ float sy = (float)info.height() / srcH;
+
+ // How many bilerp/bicubic steps to do in X and Y. + means upscaling, - means downscaling.
+ int stepsX;
+ int stepsY;
+ if (rescaleQuality > kNone_SkFilterQuality) {
+ stepsX = static_cast<int>((sx > 1.f) ? ceil(log2f(sx)) : floor(log2f(sx)));
+ stepsY = static_cast<int>((sy > 1.f) ? ceil(log2f(sy)) : floor(log2f(sy)));
+ } else {
+ stepsX = sx != 1.f;
+ stepsY = sy != 1.f;
+ }
+ SkASSERT(stepsX || stepsY);
+
+ // Within a rescaling pass A is the input (if not null) and B is the output. At the end of the
+ // pass B is moved to A. If 'this' is the input on the first pass then tempA is null.
+ std::unique_ptr<GrRenderTargetContext> tempA;
+ std::unique_ptr<GrRenderTargetContext> tempB;
+
+ // Assume we should ignore the rescale linear request if the surface has no color space since
+ // it's unclear how we'd linearize from an unknown color space.
+ if (rescaleGamma == SkSurface::kLinear && this->colorInfo().colorSpace() &&
+ !this->colorInfo().colorSpace()->gammaIsLinear()) {
+ auto cs = this->colorInfo().colorSpace()->makeLinearGamma();
+ auto xform = GrColorSpaceXform::Make(this->colorInfo().colorSpace(),
+ this->colorInfo().alphaType(), cs.get(),
+ kPremul_SkAlphaType);
+ // We'll fall back to kRGBA_8888 if half float not supported.
+ auto linearRTC = fContext->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, srcW, srcH, GrColorType::kRGBA_F16, cs, 1, GrMipMapped::kNo,
+ kTopLeft_GrSurfaceOrigin);
+ if (!linearRTC) {
+ return nullptr;
+ }
+ linearRTC->drawTexture(GrNoClip(), texProxy, srcColorType, GrSamplerState::Filter::kNearest,
+ SkBlendMode::kSrc, SK_PMColor4fWHITE, SkRect::Make(srcRect),
+ SkRect::MakeWH(srcW, srcH), GrAA::kNo, GrQuadAAFlags::kNone,
+ constraint, SkMatrix::I(), std::move(xform));
+ texProxy = linearRTC->asTextureProxyRef();
+ tempA = std::move(linearRTC);
+ srcX = 0;
+ srcY = 0;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+ while (stepsX || stepsY) {
+ int nextW = info.width();
+ int nextH = info.height();
+ if (stepsX < 0) {
+ nextW = info.width() << (-stepsX - 1);
+ stepsX++;
+ } else if (stepsX != 0) {
+ if (stepsX > 1) {
+ nextW = srcW * 2;
+ }
+ --stepsX;
+ }
+ if (stepsY < 0) {
+ nextH = info.height() << (-stepsY - 1);
+ stepsY++;
+ } else if (stepsY != 0) {
+ if (stepsY > 1) {
+ nextH = srcH * 2;
+ }
+ --stepsY;
+ }
+ auto input = tempA ? tempA.get() : this;
+ GrColorType colorType = input->colorInfo().colorType();
+ auto cs = input->colorInfo().refColorSpace();
+ sk_sp<GrColorSpaceXform> xform;
+ auto prevAlphaType = input->colorInfo().alphaType();
+ if (!stepsX && !stepsY) {
+ // Might as well fold conversion to final info in the last step.
+ cs = info.refColorSpace();
+ colorType = SkColorTypeToGrColorType(info.colorType());
+ xform = GrColorSpaceXform::Make(input->colorInfo().colorSpace(),
+ input->colorInfo().alphaType(), cs.get(),
+ info.alphaType());
+ }
+ tempB = fContext->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, nextW, nextH, colorType, std::move(cs), 1, GrMipMapped::kNo,
+ kTopLeft_GrSurfaceOrigin);
+ if (!tempB) {
+ return nullptr;
+ }
+ auto dstRect = SkRect::MakeWH(nextW, nextH);
+ if (rescaleQuality == kHigh_SkFilterQuality) {
+ SkMatrix matrix;
+ matrix.setScaleTranslate((float)srcW / nextW, (float)srcH / nextH, srcX, srcY);
+ std::unique_ptr<GrFragmentProcessor> fp;
+ auto dir = GrBicubicEffect::Direction::kXY;
+ if (nextW == srcW) {
+ dir = GrBicubicEffect::Direction::kY;
+ } else if (nextH == srcH) {
+ dir = GrBicubicEffect::Direction::kX;
+ }
+ if (srcW != texProxy->width() || srcH != texProxy->height()) {
+ auto domain = GrTextureDomain::MakeTexelDomain(
+ SkIRect::MakeXYWH(srcX, srcY, srcW, srcH), GrTextureDomain::kClamp_Mode);
+ fp = GrBicubicEffect::Make(texProxy, srcColorType, matrix, domain, dir,
+ prevAlphaType);
+ } else {
+ fp = GrBicubicEffect::Make(texProxy, srcColorType, matrix, dir, prevAlphaType);
+ }
+ if (xform) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(xform));
+ }
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ tempB->fillRectToRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(), dstRect,
+ dstRect);
+ } else {
+ auto filter = rescaleQuality == kNone_SkFilterQuality ? GrSamplerState::Filter::kNearest
+ : GrSamplerState::Filter::kBilerp;
+ auto srcSubset = SkRect::MakeXYWH(srcX, srcY, srcW, srcH);
+ tempB->drawTexture(GrNoClip(), texProxy, srcColorType, filter, SkBlendMode::kSrc,
+ SK_PMColor4fWHITE, srcSubset, dstRect, GrAA::kNo,
+ GrQuadAAFlags::kNone, constraint, SkMatrix::I(), std::move(xform));
+ }
+ texProxy = tempB->asTextureProxyRef();
+ tempA = std::move(tempB);
+ srcX = srcY = 0;
+ srcW = nextW;
+ srcH = nextH;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+ SkASSERT(tempA);
+ return tempA;
+}
+
+GrSurfaceContext::PixelTransferResult GrSurfaceContext::transferPixels(GrColorType dstCT,
+ const SkIRect& rect) {
+ SkASSERT(rect.fLeft >= 0 && rect.fRight <= this->width());
+ SkASSERT(rect.fTop >= 0 && rect.fBottom <= this->height());
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ return {};
+ }
+ auto rtProxy = this->asRenderTargetProxy();
+ if (rtProxy && rtProxy->wrapsVkSecondaryCB()) {
+ return {};
+ }
+
+ auto proxy = this->asSurfaceProxy();
+ auto supportedRead = this->caps()->supportedReadPixelsColorType(this->colorInfo().colorType(),
+ proxy->backendFormat(), dstCT);
+ // Fail if read color type does not have all of dstCT's color channels and those missing color
+ // channels are in the src.
+ uint32_t dstComponents = GrColorTypeComponentFlags(dstCT);
+ uint32_t legalReadComponents = GrColorTypeComponentFlags(supportedRead.fColorType);
+ uint32_t srcComponents = GrColorTypeComponentFlags(this->colorInfo().colorType());
+ if ((~legalReadComponents & dstComponents) & srcComponents) {
+ return {};
+ }
+
+ if (!this->caps()->transferBufferSupport() ||
+ !supportedRead.fOffsetAlignmentForTransferBuffer) {
+ return {};
+ }
+
+ size_t rowBytes = GrColorTypeBytesPerPixel(supportedRead.fColorType) * rect.width();
+ size_t size = rowBytes * rect.height();
+ auto buffer = direct->priv().resourceProvider()->createBuffer(
+ size, GrGpuBufferType::kXferGpuToCpu, GrAccessPattern::kStream_GrAccessPattern);
+ if (!buffer) {
+ return {};
+ }
+ auto srcRect = rect;
+ bool flip = proxy->origin() == kBottomLeft_GrSurfaceOrigin;
+ if (flip) {
+ srcRect = SkIRect::MakeLTRB(rect.fLeft, this->height() - rect.fBottom, rect.fRight,
+ this->height() - rect.fTop);
+ }
+ this->drawingManager()->newTransferFromRenderTask(this->asSurfaceProxyRef(), srcRect,
+ this->colorInfo().colorType(),
+ supportedRead.fColorType, buffer, 0);
+ PixelTransferResult result;
+ result.fTransferBuffer = std::move(buffer);
+ auto at = this->colorInfo().alphaType();
+ if (supportedRead.fColorType != dstCT || flip) {
+ result.fPixelConverter = [w = rect.width(), h = rect.height(), dstCT, supportedRead, at](
+ void* dst, const void* src) {
+ GrImageInfo srcInfo(supportedRead.fColorType, at, nullptr, w, h);
+ GrImageInfo dstInfo(dstCT, at, nullptr, w, h);
+ GrConvertPixels(dstInfo, dst, dstInfo.minRowBytes(),
+ srcInfo, src, srcInfo.minRowBytes(),
+ /* flipY = */ false);
+ };
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceContext.h b/gfx/skia/skia/src/gpu/GrSurfaceContext.h
new file mode 100644
index 0000000000..c9de7d0c16
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceContext.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceContext_DEFINED
+#define GrSurfaceContext_DEFINED
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurface.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrSurfaceProxy.h"
+
+class GrAuditTrail;
+class GrDrawingManager;
+class GrRecordingContext;
+class GrRenderTargetContext;
+class GrRenderTargetProxy;
+class GrSingleOwner;
+class GrSurface;
+class GrSurfaceContextPriv;
+class GrSurfaceProxy;
+class GrTextureProxy;
+struct SkIPoint;
+struct SkIRect;
+
+/**
+ * A helper object to orchestrate commands for a particular surface
+ */
+class GrSurfaceContext {
+public:
+ virtual ~GrSurfaceContext() = default;
+
+ const GrColorInfo& colorInfo() const { return fColorInfo; }
+
+ // TODO: these two calls would be way cooler if this object had a GrSurfaceProxy pointer
+ int width() const { return this->asSurfaceProxy()->width(); }
+ int height() const { return this->asSurfaceProxy()->height(); }
+
+ const GrCaps* caps() const;
+
+ /**
+ * Reads a rectangle of pixels from the render target context.
+ * @param dstInfo image info for the destination
+ * @param dst destination pixels for the read
+ * @param rowBytes bytes in a row of 'dst'
+ * @param srcPt offset w/in the surface context from which to read
+ * @param direct The direct context to use. If null will use our GrRecordingContext if it
+ * is a GrDirectContext and fail otherwise.
+ */
+ bool readPixels(const GrImageInfo& dstInfo, void* dst, size_t rowBytes, SkIPoint srcPt,
+ GrContext* direct = nullptr);
+
+ /**
+ * Writes a rectangle of pixels [srcInfo, srcBuffer, srcRowbytes] into the
+ * renderTargetContext at the specified position.
+ * @param srcInfo image info for the source pixels
+ * @param src source for the write
+ * @param rowBytes bytes in a row of 'src'
+ * @param dstPt offset w/in the surface context at which to write
+ * @param direct The direct context to use. If null will use our GrRecordingContext if it
+ * is a GrDirectContext and fail otherwise.
+ */
+ bool writePixels(const GrImageInfo& srcInfo, const void* src, size_t rowBytes, SkIPoint dstPt,
+ GrContext* direct = nullptr);
+
+ // TODO: this is virtual b.c. this object doesn't have a pointer to the wrapped GrSurfaceProxy?
+ virtual GrSurfaceProxy* asSurfaceProxy() = 0;
+ virtual const GrSurfaceProxy* asSurfaceProxy() const = 0;
+ virtual sk_sp<GrSurfaceProxy> asSurfaceProxyRef() = 0;
+
+ virtual GrTextureProxy* asTextureProxy() = 0;
+ virtual const GrTextureProxy* asTextureProxy() const = 0;
+ virtual sk_sp<GrTextureProxy> asTextureProxyRef() = 0;
+
+ virtual GrRenderTargetProxy* asRenderTargetProxy() = 0;
+ virtual sk_sp<GrRenderTargetProxy> asRenderTargetProxyRef() = 0;
+
+ virtual GrRenderTargetContext* asRenderTargetContext() { return nullptr; }
+
+ GrAuditTrail* auditTrail();
+
+ // Provides access to functions that aren't part of the public API.
+ GrSurfaceContextPriv surfPriv();
+ const GrSurfaceContextPriv surfPriv() const;
+
+#if GR_TEST_UTILS
+ bool testCopy(GrSurfaceProxy* src, const SkIRect& srcRect, const SkIPoint& dstPoint) {
+ return this->copy(src, srcRect, dstPoint);
+ }
+
+ bool testCopy(GrSurfaceProxy* src) {
+ return this->copy(src);
+ }
+#endif
+
+
+protected:
+ friend class GrSurfaceContextPriv;
+
+ GrSurfaceContext(GrRecordingContext*, GrColorType, SkAlphaType, sk_sp<SkColorSpace>);
+
+ GrDrawingManager* drawingManager();
+ const GrDrawingManager* drawingManager() const;
+
+ SkDEBUGCODE(virtual void validate() const = 0;)
+
+ SkDEBUGCODE(GrSingleOwner* singleOwner();)
+
+ GrRecordingContext* fContext;
+
+ // The rescaling step of asyncRescaleAndReadPixels[YUV420]().
+ std::unique_ptr<GrRenderTargetContext> rescale(const SkImageInfo& info, const SkIRect& srcRect,
+ SkSurface::RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality);
+
+ // Inserts a transfer, part of the implementation of asyncReadPixels and
+ // asyncRescaleAndReadPixelsYUV420().
+ struct PixelTransferResult {
+ using ConversionFn = void(void* dst, const void* mappedBuffer);
+ // If null then the transfer could not be performed. Otherwise this buffer will contain
+ // the pixel data when the transfer is complete.
+ sk_sp<GrGpuBuffer> fTransferBuffer;
+ // If this is null then the transfer buffer will contain the data in the requested
+ // color type. Otherwise, when the transfer is done this must be called to convert
+ // from the transfer buffer's color type to the requested color type.
+ std::function<ConversionFn> fPixelConverter;
+ };
+ PixelTransferResult transferPixels(GrColorType colorType, const SkIRect& rect);
+
+private:
+ friend class GrSurfaceProxy; // for copy
+
+ /**
+ * Copy 'src' into the proxy backing this context. This call will not do any draw fallback.
+ * Currently only writePixels and replaceRenderTarget call this directly. All other copies
+ * should go through GrSurfaceProxy::Copy.
+ * @param src src of pixels
+ * @param srcRect the subset of 'src' to copy
+ * @param dstPoint the origin of the 'srcRect' in the destination coordinate space
+ * @return true if the copy succeeded; false otherwise
+ *
+ * Note: Notionally, 'srcRect' is clipped to 'src's extent with 'dstPoint' being adjusted.
+ * Then the 'srcRect' offset by 'dstPoint' is clipped against the dst's extent.
+ * The end result is only valid src pixels and dst pixels will be touched but the copied
+ * regions will not be shifted. The 'src' must have the same origin as the backing proxy
+ * of fSurfaceContext.
+ */
+ bool copy(GrSurfaceProxy* src, const SkIRect& srcRect, const SkIPoint& dstPoint);
+
+ bool copy(GrSurfaceProxy* src) {
+ return this->copy(src, SkIRect::MakeWH(src->width(), src->height()), SkIPoint::Make(0, 0));
+ }
+
+ GrColorInfo fColorInfo;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceContextPriv.h b/gfx/skia/skia/src/gpu/GrSurfaceContextPriv.h
new file mode 100644
index 0000000000..33d2aa7e4c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceContextPriv.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceContextPriv_DEFINED
+#define GrSurfaceContextPriv_DEFINED
+
+#include "src/gpu/GrSurfaceContext.h"
+
+/** Class that adds methods to GrSurfaceContext that are only intended for use internal to
+ Skia. This class is purely a privileged window into GrSurfaceContext. It should never have
+ additional data members or virtual methods. */
+class GrSurfaceContextPriv {
+public:
+ GrRecordingContext* getContext() { return fSurfaceContext->fContext; }
+
+private:
+ explicit GrSurfaceContextPriv(GrSurfaceContext* surfaceContext)
+ : fSurfaceContext(surfaceContext) {
+ }
+
+ GrSurfaceContextPriv(const GrSurfaceContextPriv&) {} // unimpl
+ GrSurfaceContextPriv& operator=(const GrSurfaceContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrSurfaceContextPriv* operator&() const;
+ GrSurfaceContextPriv* operator&();
+
+ GrSurfaceContext* fSurfaceContext;
+
+ friend class GrSurfaceContext; // to construct/copy this type.
+};
+
+inline GrSurfaceContextPriv GrSurfaceContext::surfPriv() {
+ return GrSurfaceContextPriv(this);
+}
+
+inline const GrSurfaceContextPriv GrSurfaceContext::surfPriv() const {
+ return GrSurfaceContextPriv(const_cast<GrSurfaceContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfacePriv.h b/gfx/skia/skia/src/gpu/GrSurfacePriv.h
new file mode 100644
index 0000000000..bd7aa539f6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfacePriv.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfacePriv_DEFINED
+#define GrSurfacePriv_DEFINED
+
+#include "include/gpu/GrSurface.h"
+
+/** Class that adds methods to GrSurface that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrSurface. It should never have additional data
+ members or virtual methods.
+ Non-static methods that are not trivial inlines should be spring-boarded (e.g. declared and
+ implemented privately in GrSurface with a inline public method here). */
+class GrSurfacePriv {
+public:
+ GrInternalSurfaceFlags flags() const { return fSurface->fSurfaceFlags; }
+
+private:
+ explicit GrSurfacePriv(GrSurface* surface) : fSurface(surface) {}
+ GrSurfacePriv(const GrSurfacePriv&); // unimpl
+ GrSurfacePriv& operator=(const GrSurfacePriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrSurfacePriv* operator&() const;
+ GrSurfacePriv* operator&();
+
+ GrSurface* fSurface;
+
+ friend class GrSurface; // to construct/copy this type.
+};
+
+inline GrSurfacePriv GrSurface::surfacePriv() { return GrSurfacePriv(this); }
+
+inline const GrSurfacePriv GrSurface::surfacePriv() const {
+ return GrSurfacePriv(const_cast<GrSurface*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp b/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp
new file mode 100644
index 0000000000..a4878477e9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureRenderTargetProxy.h"
+
+#ifdef SK_DEBUG
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+
+static bool is_valid_lazy(const GrSurfaceDesc& desc, SkBackingFit fit) {
+ // A "fully" lazy proxy's width and height are not known until instantiation time.
+ // So fully lazy proxies are created with width and height < 0. Regular lazy proxies must be
+ // created with positive widths and heights. The width and height are set to 0 only after a
+ // failed instantiation. The former must be "approximate" fit while the latter can be either.
+ return desc.fConfig != kUnknown_GrPixelConfig &&
+ ((desc.fWidth < 0 && desc.fHeight < 0 && SkBackingFit::kApprox == fit) ||
+ (desc.fWidth > 0 && desc.fHeight > 0));
+}
+
+static bool is_valid_non_lazy(const GrSurfaceDesc& desc) {
+ return desc.fWidth > 0 && desc.fHeight > 0 && desc.fConfig != kUnknown_GrPixelConfig;
+}
+#endif
+
+// Deferred version
+GrSurfaceProxy::GrSurfaceProxy(const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ GrRenderable renderable,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : fSurfaceFlags(surfaceFlags)
+ , fFormat(format)
+ , fConfig(desc.fConfig)
+ , fWidth(desc.fWidth)
+ , fHeight(desc.fHeight)
+ , fOrigin(origin)
+ , fTextureSwizzle(textureSwizzle)
+ , fFit(fit)
+ , fBudgeted(budgeted)
+ , fUseAllocator(useAllocator)
+ , fIsProtected(isProtected)
+ , fGpuMemorySize(kInvalidGpuMemorySize) {
+ SkASSERT(fFormat.isValid());
+ SkASSERT(is_valid_non_lazy(desc));
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ SkASSERT(renderable == GrRenderable::kNo);
+ fSurfaceFlags |= GrInternalSurfaceFlags::kReadOnly;
+ }
+}
+
+// Lazy-callback version
+GrSurfaceProxy::GrSurfaceProxy(LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ GrRenderable renderable,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : fSurfaceFlags(surfaceFlags)
+ , fFormat(format)
+ , fConfig(desc.fConfig)
+ , fWidth(desc.fWidth)
+ , fHeight(desc.fHeight)
+ , fOrigin(origin)
+ , fTextureSwizzle(textureSwizzle)
+ , fFit(fit)
+ , fBudgeted(budgeted)
+ , fUseAllocator(useAllocator)
+ , fLazyInstantiateCallback(std::move(callback))
+ , fIsProtected(isProtected)
+ , fGpuMemorySize(kInvalidGpuMemorySize) {
+ SkASSERT(fFormat.isValid());
+ SkASSERT(fLazyInstantiateCallback);
+ SkASSERT(is_valid_lazy(desc, fit));
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ SkASSERT(renderable == GrRenderable::kNo);
+ fSurfaceFlags |= GrInternalSurfaceFlags::kReadOnly;
+ }
+}
+
+// Wrapped version
+GrSurfaceProxy::GrSurfaceProxy(sk_sp<GrSurface> surface,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit fit,
+ UseAllocator useAllocator)
+ : fTarget(std::move(surface))
+ , fSurfaceFlags(fTarget->surfacePriv().flags())
+ , fFormat(fTarget->backendFormat())
+ , fConfig(fTarget->config())
+ , fWidth(fTarget->width())
+ , fHeight(fTarget->height())
+ , fOrigin(origin)
+ , fTextureSwizzle(textureSwizzle)
+ , fFit(fit)
+ , fBudgeted(fTarget->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted
+ ? SkBudgeted::kYes
+ : SkBudgeted::kNo)
+ , fUseAllocator(useAllocator)
+ , fUniqueID(fTarget->uniqueID()) // Note: converting from unique resource ID to a proxy ID!
+ , fIsProtected(fTarget->isProtected() ? GrProtected::kYes : GrProtected::kNo)
+ , fGpuMemorySize(kInvalidGpuMemorySize) {
+ SkASSERT(fFormat.isValid());
+}
+
+GrSurfaceProxy::~GrSurfaceProxy() {
+ // For this to be deleted the opsTask that held a ref on it (if there was one) must have been
+ // deleted. Which would have cleared out this back pointer.
+ SkASSERT(!fLastRenderTask);
+}
+
+bool GrSurfaceProxyPriv::AttachStencilIfNeeded(GrResourceProvider* resourceProvider,
+ GrSurface* surface, int minStencilSampleCount) {
+ if (minStencilSampleCount) {
+ GrRenderTarget* rt = surface->asRenderTarget();
+ if (!rt) {
+ SkASSERT(0);
+ return false;
+ }
+
+ if (!resourceProvider->attachStencilAttachment(rt, minStencilSampleCount)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+sk_sp<GrSurface> GrSurfaceProxy::createSurfaceImpl(GrResourceProvider* resourceProvider,
+ int sampleCnt,
+ int minStencilSampleCount,
+ GrRenderable renderable,
+ GrMipMapped mipMapped) const {
+ SkASSERT(mipMapped == GrMipMapped::kNo || fFit == SkBackingFit::kExact);
+ SkASSERT(!this->isLazy());
+ SkASSERT(!fTarget);
+ GrSurfaceDesc desc;
+ desc.fWidth = fWidth;
+ desc.fHeight = fHeight;
+ desc.fConfig = fConfig;
+
+ sk_sp<GrSurface> surface;
+ if (SkBackingFit::kApprox == fFit) {
+ surface = resourceProvider->createApproxTexture(desc, fFormat, renderable, sampleCnt,
+ fIsProtected);
+ } else {
+ surface = resourceProvider->createTexture(desc, fFormat, renderable, sampleCnt, mipMapped,
+ fBudgeted, fIsProtected);
+ }
+ if (!surface) {
+ return nullptr;
+ }
+
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(resourceProvider, surface.get(),
+ minStencilSampleCount)) {
+ return nullptr;
+ }
+
+ return surface;
+}
+
+bool GrSurfaceProxy::canSkipResourceAllocator() const {
+ if (fUseAllocator == UseAllocator::kNo) {
+ // Usually an atlas or onFlush proxy
+ return true;
+ }
+
+ auto peek = this->peekSurface();
+ if (!peek) {
+ return false;
+ }
+ // If this resource is already allocated and not recyclable then the resource allocator does
+ // not need to do anything with it.
+ return !peek->resourcePriv().getScratchKey().isValid();
+}
+
+void GrSurfaceProxy::assign(sk_sp<GrSurface> surface) {
+ SkASSERT(!fTarget && surface);
+
+ SkDEBUGCODE(this->validateSurface(surface.get());)
+
+ fTarget = std::move(surface);
+
+#ifdef SK_DEBUG
+ if (this->asRenderTargetProxy()) {
+ SkASSERT(fTarget->asRenderTarget());
+ if (int minStencilSampleCount = this->asRenderTargetProxy()->numStencilSamples()) {
+ auto* stencil = fTarget->asRenderTarget()->renderTargetPriv().getStencilAttachment();
+ SkASSERT(stencil);
+ SkASSERT(stencil->numSamples() >= minStencilSampleCount);
+ }
+ }
+
+ if (kInvalidGpuMemorySize != this->getRawGpuMemorySize_debugOnly()) {
+ SkASSERT(fTarget->gpuMemorySize() <= this->getRawGpuMemorySize_debugOnly());
+ }
+#endif
+}
+
+bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
+ int minStencilSampleCount, GrRenderable renderable,
+ GrMipMapped mipMapped, const GrUniqueKey* uniqueKey) {
+ SkASSERT(!this->isLazy());
+ if (fTarget) {
+ if (uniqueKey && uniqueKey->isValid()) {
+ SkASSERT(fTarget->getUniqueKey().isValid() && fTarget->getUniqueKey() == *uniqueKey);
+ }
+ return GrSurfaceProxyPriv::AttachStencilIfNeeded(resourceProvider, fTarget.get(),
+ minStencilSampleCount);
+ }
+
+ sk_sp<GrSurface> surface = this->createSurfaceImpl(
+ resourceProvider, sampleCnt, minStencilSampleCount, renderable, mipMapped);
+ if (!surface) {
+ return false;
+ }
+
+ // If there was an invalidation message pending for this key, we might have just processed it,
+ // causing the key (stored on this proxy) to become invalid.
+ if (uniqueKey && uniqueKey->isValid()) {
+ resourceProvider->assignUniqueKeyToResource(*uniqueKey, surface.get());
+ }
+
+ this->assign(std::move(surface));
+
+ return true;
+}
+
+void GrSurfaceProxy::deinstantiate() {
+ SkASSERT(this->isInstantiated());
+ fTarget = nullptr;
+}
+
+void GrSurfaceProxy::computeScratchKey(GrScratchKey* key) const {
+ SkASSERT(!this->isFullyLazy());
+ GrRenderable renderable = GrRenderable::kNo;
+ int sampleCount = 1;
+ if (const auto* rtp = this->asRenderTargetProxy()) {
+ renderable = GrRenderable::kYes;
+ sampleCount = rtp->numSamples();
+ }
+
+ const GrTextureProxy* tp = this->asTextureProxy();
+ GrMipMapped mipMapped = GrMipMapped::kNo;
+ if (tp) {
+ mipMapped = tp->mipMapped();
+ }
+
+ int width = this->worstCaseWidth();
+ int height = this->worstCaseHeight();
+
+ GrTexturePriv::ComputeScratchKey(this->config(), width, height, renderable, sampleCount,
+ mipMapped, fIsProtected, key);
+}
+
+void GrSurfaceProxy::setLastRenderTask(GrRenderTask* renderTask) {
+#ifdef SK_DEBUG
+ if (fLastRenderTask) {
+ SkASSERT(fLastRenderTask->isClosed());
+ }
+#endif
+
+ // Un-reffed
+ fLastRenderTask = renderTask;
+}
+
+GrOpsTask* GrSurfaceProxy::getLastOpsTask() {
+ return fLastRenderTask ? fLastRenderTask->asOpsTask() : nullptr;
+}
+
+int GrSurfaceProxy::worstCaseWidth() const {
+ SkASSERT(!this->isFullyLazy());
+ if (fTarget) {
+ return fTarget->width();
+ }
+
+ if (SkBackingFit::kExact == fFit) {
+ return fWidth;
+ }
+ return GrResourceProvider::MakeApprox(fWidth);
+}
+
+int GrSurfaceProxy::worstCaseHeight() const {
+ SkASSERT(!this->isFullyLazy());
+ if (fTarget) {
+ return fTarget->height();
+ }
+
+ if (SkBackingFit::kExact == fFit) {
+ return fHeight;
+ }
+ return GrResourceProvider::MakeApprox(fHeight);
+}
+
+#ifdef SK_DEBUG
+void GrSurfaceProxy::validate(GrContext_Base* context) const {
+ if (fTarget) {
+ SkASSERT(fTarget->getContext() == context);
+ }
+}
+#endif
+
+sk_sp<GrTextureProxy> GrSurfaceProxy::Copy(GrRecordingContext* context,
+ GrSurfaceProxy* src,
+ GrColorType srcColorType,
+ GrMipMapped mipMapped,
+ SkIRect srcRect,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ RectsMustMatch rectsMustMatch) {
+ SkASSERT(!src->isFullyLazy());
+ GrProtected isProtected = src->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+ int width;
+ int height;
+
+ SkIPoint dstPoint;
+ if (rectsMustMatch == RectsMustMatch::kYes) {
+ width = src->width();
+ height = src->height();
+ dstPoint = {srcRect.fLeft, srcRect.fTop};
+ } else {
+ width = srcRect.width();
+ height = srcRect.height();
+ dstPoint = {0, 0};
+ }
+
+ if (!srcRect.intersect(SkIRect::MakeWH(src->width(), src->height()))) {
+ return nullptr;
+ }
+ auto colorType = GrPixelConfigToColorType(src->config());
+ if (src->backendFormat().textureType() != GrTextureType::kExternal) {
+ auto dstContext = context->priv().makeDeferredTextureContext(
+ fit, width, height, colorType, kUnknown_SkAlphaType, nullptr, mipMapped,
+ src->origin(), budgeted, isProtected);
+ if (!dstContext) {
+ return nullptr;
+ }
+ if (dstContext->copy(src, srcRect, dstPoint)) {
+ return dstContext->asTextureProxyRef();
+ }
+ }
+ if (src->asTextureProxy()) {
+ auto dstContext = context->priv().makeDeferredRenderTargetContext(
+ fit, width, height, colorType, nullptr, 1, mipMapped, src->origin(), nullptr,
+ budgeted);
+
+ if (dstContext && dstContext->blitTexture(src->asTextureProxy(), srcColorType, srcRect,
+ dstPoint)) {
+ return dstContext->asTextureProxyRef();
+ }
+ }
+ // Can't use backend copies or draws.
+ return nullptr;
+}
+
+sk_sp<GrTextureProxy> GrSurfaceProxy::Copy(GrRecordingContext* context, GrSurfaceProxy* src,
+ GrColorType srcColorType, GrMipMapped mipMapped,
+ SkBackingFit fit, SkBudgeted budgeted) {
+ SkASSERT(!src->isFullyLazy());
+ return Copy(context, src, srcColorType, mipMapped, SkIRect::MakeWH(src->width(), src->height()),
+ fit, budgeted);
+}
+
+#if GR_TEST_UTILS
+int32_t GrSurfaceProxy::testingOnly_getBackingRefCnt() const {
+ if (fTarget) {
+ return fTarget->testingOnly_getRefCnt();
+ }
+
+ return -1; // no backing GrSurface
+}
+
+GrInternalSurfaceFlags GrSurfaceProxy::testingOnly_getFlags() const {
+ return fSurfaceFlags;
+}
+#endif
+
+void GrSurfaceProxyPriv::exactify(bool allocatedCaseOnly) {
+ SkASSERT(!fProxy->isFullyLazy());
+ if (this->isExact()) {
+ return;
+ }
+
+ SkASSERT(SkBackingFit::kApprox == fProxy->fFit);
+
+ if (fProxy->fTarget) {
+ // The kApprox but already instantiated case. Setting the proxy's width & height to
+ // the instantiated width & height could have side-effects going forward, since we're
+ // obliterating the area of interest information. This call (exactify) only used
+ // when converting an SkSpecialImage to an SkImage so the proxy shouldn't be
+ // used for additional draws.
+ fProxy->fWidth = fProxy->fTarget->width();
+ fProxy->fHeight = fProxy->fTarget->height();
+ return;
+ }
+
+#ifndef SK_CRIPPLE_TEXTURE_REUSE
+ // In the post-implicit-allocation world we can't convert this proxy to be exact fit
+ // at this point. With explicit allocation switching this to exact will result in a
+ // different allocation at flush time. With implicit allocation, allocation would occur
+ // at draw time (rather than flush time) so this pathway was encountered less often (if
+ // at all).
+ if (allocatedCaseOnly) {
+ return;
+ }
+#endif
+
+ // The kApprox uninstantiated case. Making this proxy be exact should be okay.
+ // It could mess things up if prior decisions were based on the approximate size.
+ fProxy->fFit = SkBackingFit::kExact;
+ // If fGpuMemorySize is used when caching specialImages for the image filter DAG. If it has
+ // already been computed we want to leave it alone so that amount will be removed when
+ // the special image goes away. If it hasn't been computed yet it might as well compute the
+ // exact amount.
+}
+
+bool GrSurfaceProxyPriv::doLazyInstantiation(GrResourceProvider* resourceProvider) {
+ SkASSERT(fProxy->isLazy());
+
+ sk_sp<GrSurface> surface;
+ if (fProxy->asTextureProxy() && fProxy->asTextureProxy()->getUniqueKey().isValid()) {
+ // First try to reattach to a cached version if the proxy is uniquely keyed
+ surface = resourceProvider->findByUniqueKey<GrSurface>(
+ fProxy->asTextureProxy()->getUniqueKey());
+ }
+
+ bool syncKey = true;
+ bool releaseCallback = false;
+ if (!surface) {
+ auto result = fProxy->fLazyInstantiateCallback(resourceProvider);
+ surface = std::move(result.fSurface);
+ syncKey = result.fKeyMode == GrSurfaceProxy::LazyInstantiationKeyMode::kSynced;
+ releaseCallback = surface && result.fReleaseCallback;
+ }
+ if (!surface) {
+ fProxy->fWidth = 0;
+ fProxy->fHeight = 0;
+ return false;
+ }
+
+ if (fProxy->isFullyLazy()) {
+ // This was a fully lazy proxy. We need to fill in the width & height. For partially
+ // lazy proxies we must preserve the original width & height since that indicates
+ // the content area.
+ fProxy->fWidth = surface->width();
+ fProxy->fHeight = surface->height();
+ }
+
+ SkASSERT(fProxy->fWidth <= surface->width());
+ SkASSERT(fProxy->fHeight <= surface->height());
+
+ auto rt = fProxy->asRenderTargetProxy();
+ int minStencilSampleCount = rt ? rt->numSamples() : 0;
+
+ if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
+ resourceProvider, surface.get(), minStencilSampleCount)) {
+ return false;
+ }
+
+ if (GrTextureProxy* texProxy = fProxy->asTextureProxy()) {
+ texProxy->setTargetKeySync(syncKey);
+ if (syncKey) {
+ const GrUniqueKey& key = texProxy->getUniqueKey();
+ if (key.isValid()) {
+ if (!surface->asTexture()->getUniqueKey().isValid()) {
+ // If 'surface' is newly created, attach the unique key
+ resourceProvider->assignUniqueKeyToResource(key, surface.get());
+ } else {
+ // otherwise we had better have reattached to a cached version
+ SkASSERT(surface->asTexture()->getUniqueKey() == key);
+ }
+ } else {
+ SkASSERT(!surface->getUniqueKey().isValid());
+ }
+ }
+ }
+
+ this->assign(std::move(surface));
+ if (releaseCallback) {
+ fProxy->fLazyInstantiateCallback = nullptr;
+ }
+
+ return true;
+}
+
+#ifdef SK_DEBUG
+void GrSurfaceProxy::validateSurface(const GrSurface* surface) {
+ SkASSERT(surface->config() == fConfig);
+
+ this->onValidateSurface(surface);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceProxy.h b/gfx/skia/skia/src/gpu/GrSurfaceProxy.h
new file mode 100644
index 0000000000..7ce65d82cf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceProxy.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceProxy_DEFINED
+#define GrSurfaceProxy_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrGpuResource.h"
+#include "include/gpu/GrSurface.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrSwizzle.h"
+
+class GrCaps;
+class GrContext_Base;
+class GrOpsTask;
+class GrRecordingContext;
+class GrRenderTargetProxy;
+class GrRenderTask;
+class GrResourceProvider;
+class GrSurfaceContext;
+class GrSurfaceProxyPriv;
+class GrTextureProxy;
+
+class GrSurfaceProxy : public GrNonAtomicRef<GrSurfaceProxy> {
+public:
+ virtual ~GrSurfaceProxy();
+
+ /**
+ * Indicates "resolutions" that need to be done on a surface before its pixels can be accessed.
+ * If both types of resolve are requested, the MSAA resolve will happen first.
+ */
+ enum class ResolveFlags {
+ kNone = 0,
+ kMSAA = 1 << 0, // Blit and resolve an internal MSAA render buffer into the texture.
+ kMipMaps = 1 << 1, // Regenerate all mipmap levels.
+ };
+
+ /**
+ * Some lazy proxy callbacks want to set their own (or no key) on the GrSurfaces they return.
+ * Others want the GrSurface's key to be kept in sync with the proxy's key. This enum controls
+ * the key relationship between proxies and their targets.
+ */
+ enum class LazyInstantiationKeyMode {
+ /**
+ * Don't key the GrSurface with the proxy's key. The lazy instantiation callback is free to
+ * return a GrSurface that already has a unique key unrelated to the proxy's key.
+ */
+ kUnsynced,
+ /**
+ * Keep the GrSurface's unique key in sync with the proxy's unique key. The GrSurface
+ * returned from the lazy instantiation callback must not have a unique key or have the same
+ * same unique key as the proxy. If the proxy is later assigned a key it is in turn assigned
+ * to the GrSurface.
+ */
+ kSynced
+ };
+
+ struct LazyCallbackResult {
+ LazyCallbackResult() = default;
+ LazyCallbackResult(const LazyCallbackResult&) = default;
+ LazyCallbackResult(LazyCallbackResult&& that) = default;
+ LazyCallbackResult(sk_sp<GrSurface> surf,
+ bool releaseCallback = true,
+ LazyInstantiationKeyMode mode = LazyInstantiationKeyMode::kSynced)
+ : fSurface(std::move(surf)), fKeyMode(mode), fReleaseCallback(releaseCallback) {}
+ LazyCallbackResult(sk_sp<GrTexture> tex)
+ : LazyCallbackResult(sk_sp<GrSurface>(std::move(tex))) {}
+
+ LazyCallbackResult& operator=(const LazyCallbackResult&) = default;
+ LazyCallbackResult& operator=(LazyCallbackResult&&) = default;
+
+ sk_sp<GrSurface> fSurface;
+ LazyInstantiationKeyMode fKeyMode = LazyInstantiationKeyMode::kSynced;
+ /**
+ * Should the callback be disposed of after it has returned or preserved until the proxy
+ * is freed. Only honored if fSurface is not-null. If it is null the callback is preserved.
+ */
+ bool fReleaseCallback = true;
+ };
+
+ using LazyInstantiateCallback = std::function<LazyCallbackResult(GrResourceProvider*)>;
+
+ enum class UseAllocator {
+ /**
+ * This proxy will be instantiated outside the allocator (e.g. for proxies that are
+ * instantiated in on-flush callbacks).
+ */
+ kNo = false,
+ /**
+ * GrResourceAllocator should instantiate this proxy.
+ */
+ kYes = true,
+ };
+
+ bool isLazy() const { return !this->isInstantiated() && SkToBool(fLazyInstantiateCallback); }
+
+ bool isFullyLazy() const {
+ bool result = fHeight < 0;
+ SkASSERT(result == (fWidth < 0));
+ SkASSERT(!result || this->isLazy());
+ return result;
+ }
+
+ GrPixelConfig config() const { return fConfig; }
+
+ int width() const {
+ SkASSERT(!this->isFullyLazy());
+ return fWidth;
+ }
+
+ int height() const {
+ SkASSERT(!this->isFullyLazy());
+ return fHeight;
+ }
+
+ SkISize isize() const { return {fWidth, fHeight}; }
+
+ int worstCaseWidth() const;
+ int worstCaseHeight() const;
+ /**
+ * Helper that gets the width and height of the surface as a bounding rectangle.
+ */
+ SkRect getBoundsRect() const {
+ SkASSERT(!this->isFullyLazy());
+ return SkRect::MakeIWH(this->width(), this->height());
+ }
+ /**
+ * Helper that gets the worst case width and height of the surface as a bounding rectangle.
+ */
+ SkRect getWorstCaseBoundsRect() const {
+ SkASSERT(!this->isFullyLazy());
+ return SkRect::MakeIWH(this->worstCaseWidth(), this->worstCaseHeight());
+ }
+
+ GrSurfaceOrigin origin() const {
+ SkASSERT(kTopLeft_GrSurfaceOrigin == fOrigin || kBottomLeft_GrSurfaceOrigin == fOrigin);
+ return fOrigin;
+ }
+
+ const GrSwizzle& textureSwizzle() const { return fTextureSwizzle; }
+
+ const GrBackendFormat& backendFormat() const { return fFormat; }
+
+ class UniqueID {
+ public:
+ static UniqueID InvalidID() {
+ return UniqueID(uint32_t(SK_InvalidUniqueID));
+ }
+
+ // wrapped
+ explicit UniqueID(const GrGpuResource::UniqueID& id) : fID(id.asUInt()) { }
+ // deferred and lazy-callback
+ UniqueID() : fID(GrGpuResource::CreateUniqueID()) { }
+
+ uint32_t asUInt() const { return fID; }
+
+ bool operator==(const UniqueID& other) const {
+ return fID == other.fID;
+ }
+ bool operator!=(const UniqueID& other) const {
+ return !(*this == other);
+ }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isInvalid() const { return SK_InvalidUniqueID == fID; }
+
+ private:
+ explicit UniqueID(uint32_t id) : fID(id) {}
+
+ uint32_t fID;
+ };
+
+ /*
+ * The contract for the uniqueID is:
+ * for wrapped resources:
+ * the uniqueID will match that of the wrapped resource
+ *
+ * for deferred resources:
+ * the uniqueID will be different from the real resource, when it is allocated
+ * the proxy's uniqueID will not change across the instantiate call
+ *
+ * the uniqueIDs of the proxies and the resources draw from the same pool
+ *
+ * What this boils down to is that the uniqueID of a proxy can be used to consistently
+ * track/identify a proxy but should never be used to distinguish between
+ * resources and proxies - beware!
+ */
+ UniqueID uniqueID() const { return fUniqueID; }
+
+ UniqueID underlyingUniqueID() const {
+ if (fTarget) {
+ return UniqueID(fTarget->uniqueID());
+ }
+
+ return fUniqueID;
+ }
+
+ virtual bool instantiate(GrResourceProvider*) = 0;
+
+ void deinstantiate();
+
+ /**
+ * Proxies that are already instantiated and whose backing surface cannot be recycled to
+ * instantiate other proxies do not need to be considered by GrResourceAllocator.
+ */
+ bool canSkipResourceAllocator() const;
+
+ /**
+ * @return the texture proxy associated with the surface proxy, may be NULL.
+ */
+ virtual GrTextureProxy* asTextureProxy() { return nullptr; }
+ virtual const GrTextureProxy* asTextureProxy() const { return nullptr; }
+
+ /**
+ * @return the render target proxy associated with the surface proxy, may be NULL.
+ */
+ virtual GrRenderTargetProxy* asRenderTargetProxy() { return nullptr; }
+ virtual const GrRenderTargetProxy* asRenderTargetProxy() const { return nullptr; }
+
+ bool isInstantiated() const { return SkToBool(fTarget); }
+
+ // If the proxy is already instantiated, return its backing GrTexture; if not, return null.
+ GrSurface* peekSurface() const { return fTarget.get(); }
+
+ // If this is a texture proxy and the proxy is already instantiated, return its backing
+ // GrTexture; if not, return null.
+ GrTexture* peekTexture() const { return fTarget ? fTarget->asTexture() : nullptr; }
+
+ // If this is a render target proxy and the proxy is already instantiated, return its backing
+ // GrRenderTarget; if not, return null.
+ GrRenderTarget* peekRenderTarget() const {
+ return fTarget ? fTarget->asRenderTarget() : nullptr;
+ }
+
+ /**
+ * Does the resource count against the resource budget?
+ */
+ SkBudgeted isBudgeted() const { return fBudgeted; }
+
+ /**
+ * The pixel values of this proxy's surface cannot be modified (e.g. doesn't support write
+ * pixels or MIP map level regen). Read-only proxies also bypass interval tracking and
+ * assignment in GrResourceAllocator.
+ */
+ bool readOnly() const { return fSurfaceFlags & GrInternalSurfaceFlags::kReadOnly; }
+
+ /**
+ * This means surface is a multisampled render target, and internally holds a non-msaa texture
+ * for resolving into. The render target resolves itself by blitting into this internal texture.
+ * (asTexture() might or might not return the internal texture, but if it does, we always
+ * resolve the render target before accessing this texture's data.)
+ */
+ bool requiresManualMSAAResolve() const {
+ return fSurfaceFlags & GrInternalSurfaceFlags::kRequiresManualMSAAResolve;
+ }
+
+ void setLastRenderTask(GrRenderTask*);
+ GrRenderTask* getLastRenderTask() { return fLastRenderTask; }
+
+ GrOpsTask* getLastOpsTask();
+
+ /**
+ * Retrieves the amount of GPU memory that will be or currently is used by this resource
+ * in bytes. It is approximate since we aren't aware of additional padding or copies made
+ * by the driver.
+ *
+ * @return the amount of GPU memory used in bytes
+ */
+ size_t gpuMemorySize(const GrCaps& caps) const {
+ SkASSERT(!this->isFullyLazy());
+ if (fTarget) {
+ return fTarget->gpuMemorySize();
+ }
+ if (kInvalidGpuMemorySize == fGpuMemorySize) {
+ fGpuMemorySize = this->onUninstantiatedGpuMemorySize(caps);
+ SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
+ }
+ return fGpuMemorySize;
+ }
+
+ enum class RectsMustMatch : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ // Helper function that creates a temporary SurfaceContext to perform the copy
+ // The copy is is not a render target and not multisampled.
+ static sk_sp<GrTextureProxy> Copy(GrRecordingContext*, GrSurfaceProxy* src,
+ GrColorType srcColorType, GrMipMapped,
+ SkIRect srcRect, SkBackingFit, SkBudgeted,
+ RectsMustMatch = RectsMustMatch::kNo);
+
+ // Copy the entire 'src'
+ static sk_sp<GrTextureProxy> Copy(GrRecordingContext*, GrSurfaceProxy* src,
+ GrColorType srcColortype, GrMipMapped, SkBackingFit,
+ SkBudgeted);
+
+#if GR_TEST_UTILS
+ int32_t testingOnly_getBackingRefCnt() const;
+ GrInternalSurfaceFlags testingOnly_getFlags() const;
+#endif
+
+ SkDEBUGCODE(void validate(GrContext_Base*) const;)
+
+ // Provides access to functions that aren't part of the public API.
+ inline GrSurfaceProxyPriv priv();
+ inline const GrSurfaceProxyPriv priv() const;
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const { return fIsProtected == GrProtected::kYes; }
+
+protected:
+ // Deferred version - takes a new UniqueID from the shared resource/proxy pool.
+ GrSurfaceProxy(const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ GrRenderable,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+ // Lazy-callback version - takes a new UniqueID from the shared resource/proxy pool.
+ GrSurfaceProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ GrRenderable,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ // Wrapped version - shares the UniqueID of the passed surface.
+ // Takes UseAllocator because even though this is already instantiated it still can participate
+ // in allocation by having its backing resource recycled to other uninstantiated proxies or
+ // not depending on UseAllocator.
+ GrSurfaceProxy(sk_sp<GrSurface>,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit,
+ UseAllocator);
+
+ friend class GrSurfaceProxyPriv;
+
+ // Methods made available via GrSurfaceProxyPriv
+ bool ignoredByResourceAllocator() const { return fIgnoredByResourceAllocator; }
+ void setIgnoredByResourceAllocator() { fIgnoredByResourceAllocator = true; }
+
+ void computeScratchKey(GrScratchKey*) const;
+
+ virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
+ void assign(sk_sp<GrSurface> surface);
+
+ sk_sp<GrSurface> createSurfaceImpl(GrResourceProvider*, int sampleCnt,
+ int minStencilSampleCount, GrRenderable, GrMipMapped) const;
+
+ // Once the size of a fully-lazy proxy is decided, and before it gets instantiated, the client
+ // can use this optional method to specify the proxy's size. (A proxy's size can be less than
+ // the GPU surface that backs it. e.g., SkBackingFit::kApprox.) Otherwise, the proxy's size will
+ // be set to match the underlying GPU surface upon instantiation.
+ void setLazySize(int width, int height) {
+ SkASSERT(this->isFullyLazy());
+ SkASSERT(width > 0 && height > 0);
+ fWidth = width;
+ fHeight = height;
+ }
+
+ bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
+ int minStencilSampleCount, GrRenderable, GrMipMapped, const GrUniqueKey*);
+
+ // For deferred proxies this will be null until the proxy is instantiated.
+ // For wrapped proxies it will point to the wrapped resource.
+ sk_sp<GrSurface> fTarget;
+
+ // In many cases these flags aren't actually known until the proxy has been instantiated.
+ // However, Ganesh frequently needs to change its behavior based on these settings. For
+ // internally create proxies we will know these properties ahead of time. For wrapped
+ // proxies we will copy the properties off of the GrSurface. For lazy proxies we force the
+ // call sites to provide the required information ahead of time. At instantiation time
+ // we verify that the assumed properties match the actual properties.
+ GrInternalSurfaceFlags fSurfaceFlags;
+
+private:
+ // For wrapped resources, 'fFormat', 'fConfig', 'fWidth', 'fHeight', and 'fOrigin; will always
+ // be filled in from the wrapped resource.
+ const GrBackendFormat fFormat;
+ const GrPixelConfig fConfig;
+ int fWidth;
+ int fHeight;
+ const GrSurfaceOrigin fOrigin;
+ const GrSwizzle fTextureSwizzle;
+
+ SkBackingFit fFit; // always kApprox for lazy-callback resources
+ // always kExact for wrapped resources
+ mutable SkBudgeted fBudgeted; // always kYes for lazy-callback resources
+ // set from the backing resource for wrapped resources
+ // mutable bc of SkSurface/SkImage wishy-washiness
+ // Only meaningful if fLazyInstantiateCallback is non-null.
+ UseAllocator fUseAllocator;
+
+ const UniqueID fUniqueID; // set from the backing resource for wrapped resources
+
+ LazyInstantiateCallback fLazyInstantiateCallback;
+
+ SkDEBUGCODE(void validateSurface(const GrSurface*);)
+ SkDEBUGCODE(virtual void onValidateSurface(const GrSurface*) = 0;)
+
+ static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
+ SkDEBUGCODE(size_t getRawGpuMemorySize_debugOnly() const { return fGpuMemorySize; })
+
+ virtual size_t onUninstantiatedGpuMemorySize(const GrCaps&) const = 0;
+
+ bool fIgnoredByResourceAllocator = false;
+ GrProtected fIsProtected;
+
+ // This entry is lazily evaluated so, when the proxy wraps a resource, the resource
+ // will be called but, when the proxy is deferred, it will compute the answer itself.
+ // If the proxy computes its own answer that answer is checked (in debug mode) in
+ // the instantiation method.
+ mutable size_t fGpuMemorySize;
+
+ // The last GrRenderTask that wrote to or is currently going to write to this surface
+ // The GrRenderTask can be closed (e.g., no surface context is currently bound
+ // to this proxy).
+ // This back-pointer is required so that we can add a dependancy between
+ // the GrRenderTask used to create the current contents of this surface
+ // and the GrRenderTask of a destination surface to which this one is being drawn or copied.
+ // This pointer is unreffed. GrRenderTasks own a ref on their surface proxies.
+ GrRenderTask* fLastRenderTask = nullptr;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrSurfaceProxy::ResolveFlags)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceProxyPriv.h b/gfx/skia/skia/src/gpu/GrSurfaceProxyPriv.h
new file mode 100644
index 0000000000..60704ac0d8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceProxyPriv.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceProxyPriv_DEFINED
+#define GrSurfaceProxyPriv_DEFINED
+
+#include "src/gpu/GrSurfaceProxy.h"
+
+#include "src/gpu/GrResourceProvider.h"
+
+/** Class that adds methods to GrSurfaceProxy that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrSurfaceProxy. It should never have additional
+ data members or virtual methods. */
+class GrSurfaceProxyPriv {
+public:
+ void computeScratchKey(GrScratchKey* key) const { return fProxy->computeScratchKey(key); }
+
+ // Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
+ // of the GrSurfaceProxy.
+ sk_sp<GrSurface> createSurface(GrResourceProvider* resourceProvider) const {
+ return fProxy->createSurface(resourceProvider);
+ }
+
+ // Assign this proxy the provided GrSurface as its backing surface
+ void assign(sk_sp<GrSurface> surface) { fProxy->assign(std::move(surface)); }
+
+ // Don't abuse this call!!!!!!!
+ bool isExact() const { return SkBackingFit::kExact == fProxy->fFit; }
+
+ // Don't. Just don't.
+ void exactify(bool allocatedCaseOnly);
+
+ void setLazySize(int width, int height) { fProxy->setLazySize(width, height); }
+
+ bool doLazyInstantiation(GrResourceProvider*);
+
+
+ static bool SK_WARN_UNUSED_RESULT AttachStencilIfNeeded(GrResourceProvider*, GrSurface*,
+ int minStencilSampleCount);
+
+private:
+ explicit GrSurfaceProxyPriv(GrSurfaceProxy* proxy) : fProxy(proxy) {}
+ GrSurfaceProxyPriv(const GrSurfaceProxyPriv&) {} // unimpl
+ GrSurfaceProxyPriv& operator=(const GrSurfaceProxyPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrSurfaceProxyPriv* operator&() const;
+ GrSurfaceProxyPriv* operator&();
+
+ GrSurfaceProxy* fProxy;
+
+ friend class GrSurfaceProxy; // to construct/copy this type.
+};
+
+inline GrSurfaceProxyPriv GrSurfaceProxy::priv() { return GrSurfaceProxyPriv(this); }
+
+inline const GrSurfaceProxyPriv GrSurfaceProxy::priv () const {
+ return GrSurfaceProxyPriv(const_cast<GrSurfaceProxy*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSwizzle.cpp b/gfx/skia/skia/src/gpu/GrSwizzle.cpp
new file mode 100644
index 0000000000..f1b3b8583c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSwizzle.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrSwizzle.h"
+
+#include "src/core/SkRasterPipeline.h"
+
+void GrSwizzle::apply(SkRasterPipeline* pipeline) const {
+ SkASSERT(pipeline);
+ switch (fKey) {
+ case GrSwizzle("rgba").asKey():
+ return;
+ case GrSwizzle("bgra").asKey():
+ pipeline->append(SkRasterPipeline::swap_rb);
+ return;
+ case GrSwizzle("aaa1").asKey():
+ pipeline->append(SkRasterPipeline::alpha_to_gray);
+ return;
+ case GrSwizzle("rgb1").asKey():
+ pipeline->append(SkRasterPipeline::force_opaque);
+ return;
+ default: {
+ GR_STATIC_ASSERT(sizeof(uintptr_t) >= 4 * sizeof(char));
+ // Rather than allocate the 4 control bytes on the heap somewhere, just jam them right
+ // into a uintptr_t context.
+ uintptr_t ctx;
+ memcpy(&ctx, fSwiz, 4 * sizeof(char));
+ pipeline->append(SkRasterPipeline::swizzle, ctx);
+ return;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrSwizzle.h b/gfx/skia/skia/src/gpu/GrSwizzle.h
new file mode 100644
index 0000000000..94562ff527
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSwizzle.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSwizzle_DEFINED
+#define GrSwizzle_DEFINED
+
+#include "include/private/SkColorData.h"
+#include "src/gpu/GrColor.h"
+
+class SkRasterPipeline;
+
+/** Represents a rgba swizzle. It can be converted either into a string or a eight bit int. */
+class GrSwizzle {
+public:
+ constexpr GrSwizzle() : GrSwizzle("rgba") {}
+ explicit constexpr GrSwizzle(const char c[4]);
+
+ constexpr GrSwizzle(const GrSwizzle&);
+ constexpr GrSwizzle& operator=(const GrSwizzle& that);
+
+ static constexpr GrSwizzle Concat(const GrSwizzle& a, const GrSwizzle& b);
+
+ constexpr bool operator==(const GrSwizzle& that) const { return fKey == that.fKey; }
+ constexpr bool operator!=(const GrSwizzle& that) const { return !(*this == that); }
+
+ /** Compact representation of the swizzle suitable for a key. */
+ constexpr uint16_t asKey() const { return fKey; }
+
+ /** 4 char null terminated string consisting only of chars 'r', 'g', 'b', 'a', '0', and '1'. */
+ constexpr const char* c_str() const { return fSwiz; }
+
+ constexpr char operator[](int i) const {
+ SkASSERT(i >= 0 && i < 4);
+ return fSwiz[i];
+ }
+
+ /** Applies this swizzle to the input color and returns the swizzled color. */
+ template <SkAlphaType AlphaType>
+ constexpr SkRGBA4f<AlphaType> applyTo(const SkRGBA4f<AlphaType>& color) const;
+
+ void apply(SkRasterPipeline*) const;
+
+ static constexpr GrSwizzle RGBA() { return GrSwizzle("rgba"); }
+ static constexpr GrSwizzle AAAA() { return GrSwizzle("aaaa"); }
+ static constexpr GrSwizzle RRRR() { return GrSwizzle("rrrr"); }
+ static constexpr GrSwizzle RRRA() { return GrSwizzle("rrra"); }
+ static constexpr GrSwizzle BGRA() { return GrSwizzle("bgra"); }
+ static constexpr GrSwizzle RGB1() { return GrSwizzle("rgb1"); }
+
+private:
+ template <SkAlphaType AlphaType>
+ static constexpr float ComponentIndexToFloat(const SkRGBA4f<AlphaType>& color, int idx);
+ static constexpr int CToI(char c);
+ static constexpr char IToC(int idx);
+
+ char fSwiz[5];
+ uint16_t fKey;
+};
+
+constexpr GrSwizzle::GrSwizzle(const char c[4])
+ : fSwiz{c[0], c[1], c[2], c[3], '\0'}
+ , fKey((CToI(c[0]) << 0) | (CToI(c[1]) << 4) | (CToI(c[2]) << 8) | (CToI(c[3]) << 12)) {}
+
+constexpr GrSwizzle::GrSwizzle(const GrSwizzle& that)
+ : fSwiz{that.fSwiz[0], that.fSwiz[1], that.fSwiz[2], that.fSwiz[3], '\0'}
+ , fKey(that.fKey) {}
+
+constexpr GrSwizzle& GrSwizzle::operator=(const GrSwizzle& that) {
+ fSwiz[0] = that.fSwiz[0];
+ fSwiz[1] = that.fSwiz[1];
+ fSwiz[2] = that.fSwiz[2];
+ fSwiz[3] = that.fSwiz[3];
+ SkASSERT(fSwiz[4] == '\0');
+ fKey = that.fKey;
+ return *this;
+}
+
+template <SkAlphaType AlphaType>
+constexpr SkRGBA4f<AlphaType> GrSwizzle::applyTo(const SkRGBA4f<AlphaType>& color) const {
+ uint32_t key = fKey;
+ // Index of the input color that should be mapped to output r.
+ int idx = (key & 15);
+ float outR = ComponentIndexToFloat(color, idx);
+ key >>= 4;
+ idx = (key & 15);
+ float outG = ComponentIndexToFloat(color, idx);
+ key >>= 4;
+ idx = (key & 15);
+ float outB = ComponentIndexToFloat(color, idx);
+ key >>= 4;
+ idx = (key & 15);
+ float outA = ComponentIndexToFloat(color, idx);
+ return { outR, outG, outB, outA };
+}
+
+template <SkAlphaType AlphaType>
+constexpr float GrSwizzle::ComponentIndexToFloat(const SkRGBA4f<AlphaType>& color, int idx) {
+ if (idx <= 3) {
+ return color[idx];
+ }
+ if (idx == CToI('1')) {
+ return 1.0f;
+ }
+ if (idx == CToI('0')) {
+ return 1.0f;
+ }
+ SkUNREACHABLE;
+}
+
+constexpr int GrSwizzle::CToI(char c) {
+ switch (c) {
+ // r...a must map to 0...3 because other methods use them as indices into fSwiz.
+ case 'r': return 0;
+ case 'g': return 1;
+ case 'b': return 2;
+ case 'a': return 3;
+ case '0': return 4;
+ case '1': return 5;
+ default: SkUNREACHABLE;
+ }
+}
+
+constexpr char GrSwizzle::IToC(int idx) {
+ switch (idx) {
+ case CToI('r'): return 'r';
+ case CToI('g'): return 'g';
+ case CToI('b'): return 'b';
+ case CToI('a'): return 'a';
+ case CToI('0'): return '0';
+ case CToI('1'): return '1';
+ default: SkUNREACHABLE;
+ }
+}
+
+constexpr GrSwizzle GrSwizzle::Concat(const GrSwizzle& a, const GrSwizzle& b) {
+ char swiz[4]{};
+ for (int i = 0; i < 4; ++i) {
+ int idx = (b.fKey >> (4U * i)) & 0xfU;
+ switch (idx) {
+ case CToI('0'): swiz[i] = '0'; break;
+ case CToI('1'): swiz[i] = '1'; break;
+ default: swiz[i] = a.fSwiz[idx]; break;
+ }
+ }
+ return GrSwizzle(swiz);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTRecorder.h b/gfx/skia/skia/src/gpu/GrTRecorder.h
new file mode 100644
index 0000000000..1e87ec8190
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTRecorder.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTRecorder_DEFINED
+#define GrTRecorder_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkTLogic.h"
+#include "src/core/SkArenaAlloc.h"
+
+/**
+ * Records a list of items with a common base type, optional associated data, and
+ * permanent memory addresses. It supports forward iteration.
+ *
+ * This class allocates space for the stored items and associated data in a SkArenaAlloc.
+ * There is an overhead of 1 pointer for each stored item.
+ *
+ * Upon reset or delete, the items are destructed in the same order they were received,
+ * not reverse (stack) order.
+ *
+ * @param TBase Common base type of items in the list. It is assumed that the items are
+ * trivially destructable or that TBase has a virtual destructor as ~TBase()
+ * is called to destroy the items.
+ */
+template <typename TBase> class GrTRecorder {
+private:
+ template <bool IsConst> class IterImpl;
+
+public:
+ using iterator = IterImpl<false>;
+ using const_iterator = IterImpl<true>;
+
+ /**
+ * Create a recorder.
+ *
+ * @param initialSizeInBytes The amount of memory reserved by the recorder initially,
+ and after calls to reset().
+ */
+ explicit GrTRecorder(size_t initialSizeInBytes) : fArena(initialSizeInBytes) {}
+ GrTRecorder(const GrTRecorder&) = delete;
+ GrTRecorder& operator=(const GrTRecorder&) = delete;
+
+ ~GrTRecorder() { this->reset(); }
+
+ bool empty() { return !SkToBool(fTail); }
+
+ /** The last item. Must not be empty. */
+ TBase& back() {
+ SkASSERT(!this->empty());
+ return *fTail->get();
+ }
+
+ /** Forward mutable iteration */
+ iterator begin() { return iterator(fHead); }
+ iterator end() { return iterator(nullptr); }
+
+ /** Forward const iteration */
+ const_iterator begin() const { return const_iterator(fHead); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ /** Destruct all items in the list and reset to empty. Frees memory allocated from arena. */
+ void reset();
+
+ /**
+ * Emplace a new TItem (which derives from TBase) in the recorder. This requires equivalence
+ * between reinterpret_cast<TBase*> and static_cast<TBase*> when operating on TItem*.
+ * Multiple inheritance may make this not true. It is runtime asserted.
+ */
+ template <typename TItem, typename... Args> TItem& emplace(Args&&... args) {
+ return this->emplaceWithData<TItem, Args...>(0, std::forward<Args>(args)...);
+ }
+
+ /**
+ * Emplace a new TItem (which derives from TBase) in the recorder with extra data space. The
+ * extra data immediately follows the stored item with no extra alignment. E.g.,
+ * void* extraData = &recorder->emplaceWithData<Subclass>(dataSize, ...) + 1;
+ *
+ * This requires equivalence between reinterpret_cast<TBase*> and static_cast<TBase*> when
+ * operating on TItem*. Multiple inheritance may make this not true. It is runtime asserted.
+ */
+ template <typename TItem, typename... Args>
+ SK_WHEN((std::is_base_of<TBase, TItem>::value), TItem&)
+ emplaceWithData(size_t extraDataSize, Args... args);
+
+private:
+ struct Header {
+ Header* fNext = nullptr;
+ // We always store the T immediately after the header (and ensure proper alignment). See
+ // emplaceWithData() implementation.
+ TBase* get() const { return reinterpret_cast<TBase*>(const_cast<Header*>(this) + 1); }
+ };
+
+ SkArenaAlloc fArena;
+ Header* fHead = nullptr;
+ Header* fTail = nullptr;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+template <typename TBase>
+template <typename TItem, typename... Args>
+inline SK_WHEN((std::is_base_of<TBase, TItem>::value), TItem&)
+GrTRecorder<TBase>::emplaceWithData(size_t extraDataSize, Args... args) {
+ static constexpr size_t kTAlign = alignof(TItem);
+ static constexpr size_t kHeaderAlign = alignof(Header);
+ static constexpr size_t kAllocAlign = kTAlign > kHeaderAlign ? kTAlign : kHeaderAlign;
+ static constexpr size_t kTItemOffset = GrSizeAlignUp(sizeof(Header), kAllocAlign);
+ // We're assuming if we back up from kItemOffset by sizeof(Header) we will still be aligned.
+ GR_STATIC_ASSERT(sizeof(Header) % alignof(Header) == 0);
+ const size_t totalSize = kTItemOffset + sizeof(TItem) + extraDataSize;
+ auto alloc = reinterpret_cast<char*>(fArena.makeBytesAlignedTo(totalSize, kAllocAlign));
+ Header* header = new (alloc + kTItemOffset - sizeof(Header)) Header();
+ if (fTail) {
+ fTail->fNext = header;
+ }
+ fTail = header;
+ if (!fHead) {
+ fHead = header;
+ }
+ auto* item = new (alloc + kTItemOffset) TItem(std::forward<Args>(args)...);
+ // We require that we can reinterpret_cast between TBase* and TItem*. Could not figure out how
+ // to statically assert this. See proposal for std::is_initial_base_of here:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0466r0.pdf
+ SkASSERT(reinterpret_cast<uintptr_t>(item) ==
+ reinterpret_cast<uintptr_t>(static_cast<TBase*>(item)));
+ return *item;
+}
+
+template <typename TBase> inline void GrTRecorder<TBase>::reset() {
+ for (auto& i : *this) {
+ i.~TBase();
+ }
+ GR_STATIC_ASSERT(std::is_trivially_destructible<Header>::value);
+ fHead = fTail = nullptr;
+ fArena.reset();
+}
+
+/**
+ * Iterates through a recorder front-to-back, const or not.
+ */
+template <typename TBase> template <bool IsConst> class GrTRecorder<TBase>::IterImpl {
+private:
+ using T = typename std::conditional<IsConst, const TBase, TBase>::type;
+
+public:
+ IterImpl() = default;
+
+ IterImpl operator++() {
+ fCurr = fCurr->fNext;
+ return *this;
+ }
+
+ IterImpl operator++(int) {
+ auto old = fCurr;
+ fCurr = fCurr->fNext;
+ return {old};
+ }
+
+ T& operator*() const { return *fCurr->get(); }
+ T* operator->() const { return fCurr->get(); }
+
+ bool operator==(const IterImpl& that) const { return fCurr == that.fCurr; }
+ bool operator!=(const IterImpl& that) const { return !(*this == that); }
+
+private:
+ IterImpl(Header* curr) : fCurr(curr) {}
+ Header* fCurr = nullptr;
+
+ friend class GrTRecorder<TBase>; // To construct from Header.
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTessellator.cpp b/gfx/skia/skia/src/gpu/GrTessellator.cpp
new file mode 100644
index 0000000000..6f4fd44ffe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTessellator.cpp
@@ -0,0 +1,2415 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTessellator.h"
+
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+
+#include "include/core/SkPath.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <queue>
+#include <unordered_map>
+#include <utility>
+
+/*
+ * There are six stages to the basic algorithm:
+ *
+ * 1) Linearize the path contours into piecewise linear segments (path_to_contours()).
+ * 2) Build a mesh of edges connecting the vertices (build_edges()).
+ * 3) Sort the vertices in Y (and secondarily in X) (merge_sort()).
+ * 4) Simplify the mesh by inserting new vertices at intersecting edges (simplify()).
+ * 5) Tessellate the simplified mesh into monotone polygons (tessellate()).
+ * 6) Triangulate the monotone polygons directly into a vertex buffer (polys_to_triangles()).
+ *
+ * For screenspace antialiasing, the algorithm is modified as follows:
+ *
+ * Run steps 1-5 above to produce polygons.
+ * 5b) Apply fill rules to extract boundary contours from the polygons (extract_boundaries()).
+ * 5c) Simplify boundaries to remove "pointy" vertices that cause inversions (simplify_boundary()).
+ * 5d) Displace edges by half a pixel inward and outward along their normals. Intersect to find
+ * new vertices, and set zero alpha on the exterior and one alpha on the interior. Build a new
+ * antialiased mesh from those vertices (stroke_boundary()).
+ * Run steps 3-6 above on the new mesh, and produce antialiased triangles.
+ *
+ * The vertex sorting in step (3) is a merge sort, since it plays well with the linked list
+ * of vertices (and the necessity of inserting new vertices on intersection).
+ *
+ * Stages (4) and (5) use an active edge list -- a list of all edges for which the
+ * sweep line has crossed the top vertex, but not the bottom vertex. It's sorted
+ * left-to-right based on the point where both edges are active (when both top vertices
+ * have been seen, so the "lower" top vertex of the two). If the top vertices are equal
+ * (shared), it's sorted based on the last point where both edges are active, so the
+ * "upper" bottom vertex.
+ *
+ * The most complex step is the simplification (4). It's based on the Bentley-Ottman
+ * line-sweep algorithm, but due to floating point inaccuracy, the intersection points are
+ * not exact and may violate the mesh topology or active edge list ordering. We
+ * accommodate this by adjusting the topology of the mesh and AEL to match the intersection
+ * points. This occurs in two ways:
+ *
+ * A) Intersections may cause a shortened edge to no longer be ordered with respect to its
+ * neighbouring edges at the top or bottom vertex. This is handled by merging the
+ * edges (merge_collinear_edges()).
+ * B) Intersections may cause an edge to violate the left-to-right ordering of the
+ * active edge list. This is handled during merging or splitting by rewind()ing the
+ * active edge list to the vertex before potential violations occur.
+ *
+ * The tessellation steps (5) and (6) are based on "Triangulating Simple Polygons and
+ * Equivalent Problems" (Fournier and Montuno); also a line-sweep algorithm. Note that it
+ * currently uses a linked list for the active edge list, rather than a 2-3 tree as the
+ * paper describes. The 2-3 tree gives O(lg N) lookups, but insertion and removal also
+ * become O(lg N). In all the test cases, it was found that the cost of frequent O(lg N)
+ * insertions and removals was greater than the cost of infrequent O(N) lookups with the
+ * linked list implementation. With the latter, all removals are O(1), and most insertions
+ * are O(1), since we know the adjacent edge in the active edge list based on the topology.
+ * Only type 2 vertices (see paper) require the O(N) lookups, and these are much less
+ * frequent. There may be other data structures worth investigating, however.
+ *
+ * Note that the orientation of the line sweep algorithms is determined by the aspect ratio of the
+ * path bounds. When the path is taller than it is wide, we sort vertices based on increasing Y
+ * coordinate, and secondarily by increasing X coordinate. When the path is wider than it is tall,
+ * we sort by increasing X coordinate, but secondarily by *decreasing* Y coordinate. This is so
+ * that the "left" and "right" orientation in the code remains correct (edges to the left are
+ * increasing in Y; edges to the right are decreasing in Y). That is, the setting rotates 90
+ * degrees counterclockwise, rather that transposing.
+ */
+
+#define LOGGING_ENABLED 0
+
+#if LOGGING_ENABLED
+#define TESS_LOG printf
+#else
+#define TESS_LOG(...)
+#endif
+
+namespace {
+
+const int kArenaChunkSize = 16 * 1024;
+const float kCosMiterAngle = 0.97f; // Corresponds to an angle of ~14 degrees.
+
+struct Vertex;
+struct Edge;
+struct Event;
+struct Poly;
+
+template <class T, T* T::*Prev, T* T::*Next>
+void list_insert(T* t, T* prev, T* next, T** head, T** tail) {
+ t->*Prev = prev;
+ t->*Next = next;
+ if (prev) {
+ prev->*Next = t;
+ } else if (head) {
+ *head = t;
+ }
+ if (next) {
+ next->*Prev = t;
+ } else if (tail) {
+ *tail = t;
+ }
+}
+
+template <class T, T* T::*Prev, T* T::*Next>
+void list_remove(T* t, T** head, T** tail) {
+ if (t->*Prev) {
+ t->*Prev->*Next = t->*Next;
+ } else if (head) {
+ *head = t->*Next;
+ }
+ if (t->*Next) {
+ t->*Next->*Prev = t->*Prev;
+ } else if (tail) {
+ *tail = t->*Prev;
+ }
+ t->*Prev = t->*Next = nullptr;
+}
+
+/**
+ * Vertices are used in three ways: first, the path contours are converted into a
+ * circularly-linked list of Vertices for each contour. After edge construction, the same Vertices
+ * are re-ordered by the merge sort according to the sweep_lt comparator (usually, increasing
+ * in Y) using the same fPrev/fNext pointers that were used for the contours, to avoid
+ * reallocation. Finally, MonotonePolys are built containing a circularly-linked list of
+ * Vertices. (Currently, those Vertices are newly-allocated for the MonotonePolys, since
+ * an individual Vertex from the path mesh may belong to multiple
+ * MonotonePolys, so the original Vertices cannot be re-used.
+ */
+
+struct Vertex {
+ Vertex(const SkPoint& point, uint8_t alpha)
+ : fPoint(point), fPrev(nullptr), fNext(nullptr)
+ , fFirstEdgeAbove(nullptr), fLastEdgeAbove(nullptr)
+ , fFirstEdgeBelow(nullptr), fLastEdgeBelow(nullptr)
+ , fLeftEnclosingEdge(nullptr), fRightEnclosingEdge(nullptr)
+ , fPartner(nullptr)
+ , fAlpha(alpha)
+ , fSynthetic(false)
+#if LOGGING_ENABLED
+ , fID (-1.0f)
+#endif
+ {}
+ SkPoint fPoint; // Vertex position
+ Vertex* fPrev; // Linked list of contours, then Y-sorted vertices.
+ Vertex* fNext; // "
+ Edge* fFirstEdgeAbove; // Linked list of edges above this vertex.
+ Edge* fLastEdgeAbove; // "
+ Edge* fFirstEdgeBelow; // Linked list of edges below this vertex.
+ Edge* fLastEdgeBelow; // "
+ Edge* fLeftEnclosingEdge; // Nearest edge in the AEL left of this vertex.
+ Edge* fRightEnclosingEdge; // Nearest edge in the AEL right of this vertex.
+ Vertex* fPartner; // Corresponding inner or outer vertex (for AA).
+ uint8_t fAlpha;
+ bool fSynthetic; // Is this a synthetic vertex?
+#if LOGGING_ENABLED
+ float fID; // Identifier used for logging.
+#endif
+};
+
+/***************************************************************************************/
+
+typedef bool (*CompareFunc)(const SkPoint& a, const SkPoint& b);
+
+bool sweep_lt_horiz(const SkPoint& a, const SkPoint& b) {
+ return a.fX < b.fX || (a.fX == b.fX && a.fY > b.fY);
+}
+
+bool sweep_lt_vert(const SkPoint& a, const SkPoint& b) {
+ return a.fY < b.fY || (a.fY == b.fY && a.fX < b.fX);
+}
+
+struct Comparator {
+ enum class Direction { kVertical, kHorizontal };
+ Comparator(Direction direction) : fDirection(direction) {}
+ bool sweep_lt(const SkPoint& a, const SkPoint& b) const {
+ return fDirection == Direction::kHorizontal ? sweep_lt_horiz(a, b) : sweep_lt_vert(a, b);
+ }
+ Direction fDirection;
+};
+
+inline void* emit_vertex(Vertex* v, bool emitCoverage, void* data) {
+ GrVertexWriter verts{data};
+ verts.write(v->fPoint);
+
+ if (emitCoverage) {
+ verts.write(GrNormalizeByteToFloat(v->fAlpha));
+ }
+
+ return verts.fPtr;
+}
+
+void* emit_triangle(Vertex* v0, Vertex* v1, Vertex* v2, bool emitCoverage, void* data) {
+ TESS_LOG("emit_triangle %g (%g, %g) %d\n", v0->fID, v0->fPoint.fX, v0->fPoint.fY, v0->fAlpha);
+ TESS_LOG(" %g (%g, %g) %d\n", v1->fID, v1->fPoint.fX, v1->fPoint.fY, v1->fAlpha);
+ TESS_LOG(" %g (%g, %g) %d\n", v2->fID, v2->fPoint.fX, v2->fPoint.fY, v2->fAlpha);
+#if TESSELLATOR_WIREFRAME
+ data = emit_vertex(v0, emitCoverage, data);
+ data = emit_vertex(v1, emitCoverage, data);
+ data = emit_vertex(v1, emitCoverage, data);
+ data = emit_vertex(v2, emitCoverage, data);
+ data = emit_vertex(v2, emitCoverage, data);
+ data = emit_vertex(v0, emitCoverage, data);
+#else
+ data = emit_vertex(v0, emitCoverage, data);
+ data = emit_vertex(v1, emitCoverage, data);
+ data = emit_vertex(v2, emitCoverage, data);
+#endif
+ return data;
+}
+
+struct VertexList {
+ VertexList() : fHead(nullptr), fTail(nullptr) {}
+ VertexList(Vertex* head, Vertex* tail) : fHead(head), fTail(tail) {}
+ Vertex* fHead;
+ Vertex* fTail;
+ void insert(Vertex* v, Vertex* prev, Vertex* next) {
+ list_insert<Vertex, &Vertex::fPrev, &Vertex::fNext>(v, prev, next, &fHead, &fTail);
+ }
+ void append(Vertex* v) {
+ insert(v, fTail, nullptr);
+ }
+ void append(const VertexList& list) {
+ if (!list.fHead) {
+ return;
+ }
+ if (fTail) {
+ fTail->fNext = list.fHead;
+ list.fHead->fPrev = fTail;
+ } else {
+ fHead = list.fHead;
+ }
+ fTail = list.fTail;
+ }
+ void prepend(Vertex* v) {
+ insert(v, nullptr, fHead);
+ }
+ void remove(Vertex* v) {
+ list_remove<Vertex, &Vertex::fPrev, &Vertex::fNext>(v, &fHead, &fTail);
+ }
+ void close() {
+ if (fHead && fTail) {
+ fTail->fNext = fHead;
+ fHead->fPrev = fTail;
+ }
+ }
+};
+
+// Round to nearest quarter-pixel. This is used for screenspace tessellation.
+
+inline void round(SkPoint* p) {
+ p->fX = SkScalarRoundToScalar(p->fX * SkFloatToScalar(4.0f)) * SkFloatToScalar(0.25f);
+ p->fY = SkScalarRoundToScalar(p->fY * SkFloatToScalar(4.0f)) * SkFloatToScalar(0.25f);
+}
+
+inline SkScalar double_to_clamped_scalar(double d) {
+ return SkDoubleToScalar(std::min((double) SK_ScalarMax, std::max(d, (double) -SK_ScalarMax)));
+}
+
+// A line equation in implicit form. fA * x + fB * y + fC = 0, for all points (x, y) on the line.
+struct Line {
+ Line(double a, double b, double c) : fA(a), fB(b), fC(c) {}
+ Line(Vertex* p, Vertex* q) : Line(p->fPoint, q->fPoint) {}
+ Line(const SkPoint& p, const SkPoint& q)
+ : fA(static_cast<double>(q.fY) - p.fY) // a = dY
+ , fB(static_cast<double>(p.fX) - q.fX) // b = -dX
+ , fC(static_cast<double>(p.fY) * q.fX - // c = cross(q, p)
+ static_cast<double>(p.fX) * q.fY) {}
+ double dist(const SkPoint& p) const {
+ return fA * p.fX + fB * p.fY + fC;
+ }
+ Line operator*(double v) const {
+ return Line(fA * v, fB * v, fC * v);
+ }
+ double magSq() const {
+ return fA * fA + fB * fB;
+ }
+ void normalize() {
+ double len = sqrt(this->magSq());
+ if (len == 0.0) {
+ return;
+ }
+ double scale = 1.0f / len;
+ fA *= scale;
+ fB *= scale;
+ fC *= scale;
+ }
+ bool nearParallel(const Line& o) const {
+ return fabs(o.fA - fA) < 0.00001 && fabs(o.fB - fB) < 0.00001;
+ }
+
+ // Compute the intersection of two (infinite) Lines.
+ bool intersect(const Line& other, SkPoint* point) const {
+ double denom = fA * other.fB - fB * other.fA;
+ if (denom == 0.0) {
+ return false;
+ }
+ double scale = 1.0 / denom;
+ point->fX = double_to_clamped_scalar((fB * other.fC - other.fB * fC) * scale);
+ point->fY = double_to_clamped_scalar((other.fA * fC - fA * other.fC) * scale);
+ round(point);
+ return point->isFinite();
+ }
+ double fA, fB, fC;
+};
+
+/**
+ * An Edge joins a top Vertex to a bottom Vertex. Edge ordering for the list of "edges above" and
+ * "edge below" a vertex as well as for the active edge list is handled by isLeftOf()/isRightOf().
+ * Note that an Edge will give occasionally dist() != 0 for its own endpoints (because floating
+ * point). For speed, that case is only tested by the callers that require it. Edges also handle
+ * checking for intersection with other edges. Currently, this converts the edges to the
+ * parametric form, in order to avoid doing a division until an intersection has been confirmed.
+ * This is slightly slower in the "found" case, but a lot faster in the "not found" case.
+ *
+ * The coefficients of the line equation stored in double precision to avoid catastrphic
+ * cancellation in the isLeftOf() and isRightOf() checks. Using doubles ensures that the result is
+ * correct in float, since it's a polynomial of degree 2. The intersect() function, being
+ * degree 5, is still subject to catastrophic cancellation. We deal with that by assuming its
+ * output may be incorrect, and adjusting the mesh topology to match (see comment at the top of
+ * this file).
+ */
+
+struct Edge {
+ enum class Type { kInner, kOuter, kConnector };
+ Edge(Vertex* top, Vertex* bottom, int winding, Type type)
+ : fWinding(winding)
+ , fTop(top)
+ , fBottom(bottom)
+ , fType(type)
+ , fLeft(nullptr)
+ , fRight(nullptr)
+ , fPrevEdgeAbove(nullptr)
+ , fNextEdgeAbove(nullptr)
+ , fPrevEdgeBelow(nullptr)
+ , fNextEdgeBelow(nullptr)
+ , fLeftPoly(nullptr)
+ , fRightPoly(nullptr)
+ , fLeftPolyPrev(nullptr)
+ , fLeftPolyNext(nullptr)
+ , fRightPolyPrev(nullptr)
+ , fRightPolyNext(nullptr)
+ , fUsedInLeftPoly(false)
+ , fUsedInRightPoly(false)
+ , fLine(top, bottom) {
+ }
+ int fWinding; // 1 == edge goes downward; -1 = edge goes upward.
+ Vertex* fTop; // The top vertex in vertex-sort-order (sweep_lt).
+ Vertex* fBottom; // The bottom vertex in vertex-sort-order.
+ Type fType;
+ Edge* fLeft; // The linked list of edges in the active edge list.
+ Edge* fRight; // "
+ Edge* fPrevEdgeAbove; // The linked list of edges in the bottom Vertex's "edges above".
+ Edge* fNextEdgeAbove; // "
+ Edge* fPrevEdgeBelow; // The linked list of edges in the top Vertex's "edges below".
+ Edge* fNextEdgeBelow; // "
+ Poly* fLeftPoly; // The Poly to the left of this edge, if any.
+ Poly* fRightPoly; // The Poly to the right of this edge, if any.
+ Edge* fLeftPolyPrev;
+ Edge* fLeftPolyNext;
+ Edge* fRightPolyPrev;
+ Edge* fRightPolyNext;
+ bool fUsedInLeftPoly;
+ bool fUsedInRightPoly;
+ Line fLine;
+ double dist(const SkPoint& p) const {
+ return fLine.dist(p);
+ }
+ bool isRightOf(Vertex* v) const {
+ return fLine.dist(v->fPoint) < 0.0;
+ }
+ bool isLeftOf(Vertex* v) const {
+ return fLine.dist(v->fPoint) > 0.0;
+ }
+ void recompute() {
+ fLine = Line(fTop, fBottom);
+ }
+ bool intersect(const Edge& other, SkPoint* p, uint8_t* alpha = nullptr) const {
+ TESS_LOG("intersecting %g -> %g with %g -> %g\n",
+ fTop->fID, fBottom->fID, other.fTop->fID, other.fBottom->fID);
+ if (fTop == other.fTop || fBottom == other.fBottom) {
+ return false;
+ }
+ double denom = fLine.fA * other.fLine.fB - fLine.fB * other.fLine.fA;
+ if (denom == 0.0) {
+ return false;
+ }
+ double dx = static_cast<double>(other.fTop->fPoint.fX) - fTop->fPoint.fX;
+ double dy = static_cast<double>(other.fTop->fPoint.fY) - fTop->fPoint.fY;
+ double sNumer = dy * other.fLine.fB + dx * other.fLine.fA;
+ double tNumer = dy * fLine.fB + dx * fLine.fA;
+ // If (sNumer / denom) or (tNumer / denom) is not in [0..1], exit early.
+ // This saves us doing the divide below unless absolutely necessary.
+ if (denom > 0.0 ? (sNumer < 0.0 || sNumer > denom || tNumer < 0.0 || tNumer > denom)
+ : (sNumer > 0.0 || sNumer < denom || tNumer > 0.0 || tNumer < denom)) {
+ return false;
+ }
+ double s = sNumer / denom;
+ SkASSERT(s >= 0.0 && s <= 1.0);
+ p->fX = SkDoubleToScalar(fTop->fPoint.fX - s * fLine.fB);
+ p->fY = SkDoubleToScalar(fTop->fPoint.fY + s * fLine.fA);
+ if (alpha) {
+ if (fType == Type::kConnector) {
+ *alpha = (1.0 - s) * fTop->fAlpha + s * fBottom->fAlpha;
+ } else if (other.fType == Type::kConnector) {
+ double t = tNumer / denom;
+ *alpha = (1.0 - t) * other.fTop->fAlpha + t * other.fBottom->fAlpha;
+ } else if (fType == Type::kOuter && other.fType == Type::kOuter) {
+ *alpha = 0;
+ } else {
+ *alpha = 255;
+ }
+ }
+ return true;
+ }
+};
+
+struct SSEdge;
+
+struct SSVertex {
+ SSVertex(Vertex* v) : fVertex(v), fPrev(nullptr), fNext(nullptr) {}
+ Vertex* fVertex;
+ SSEdge* fPrev;
+ SSEdge* fNext;
+};
+
+struct SSEdge {
+ SSEdge(Edge* edge, SSVertex* prev, SSVertex* next)
+ : fEdge(edge), fEvent(nullptr), fPrev(prev), fNext(next) {
+ }
+ Edge* fEdge;
+ Event* fEvent;
+ SSVertex* fPrev;
+ SSVertex* fNext;
+};
+
+typedef std::unordered_map<Vertex*, SSVertex*> SSVertexMap;
+typedef std::vector<SSEdge*> SSEdgeList;
+
+struct EdgeList {
+ EdgeList() : fHead(nullptr), fTail(nullptr) {}
+ Edge* fHead;
+ Edge* fTail;
+ void insert(Edge* edge, Edge* prev, Edge* next) {
+ list_insert<Edge, &Edge::fLeft, &Edge::fRight>(edge, prev, next, &fHead, &fTail);
+ }
+ void append(Edge* e) {
+ insert(e, fTail, nullptr);
+ }
+ void remove(Edge* edge) {
+ list_remove<Edge, &Edge::fLeft, &Edge::fRight>(edge, &fHead, &fTail);
+ }
+ void removeAll() {
+ while (fHead) {
+ this->remove(fHead);
+ }
+ }
+ void close() {
+ if (fHead && fTail) {
+ fTail->fRight = fHead;
+ fHead->fLeft = fTail;
+ }
+ }
+ bool contains(Edge* edge) const {
+ return edge->fLeft || edge->fRight || fHead == edge;
+ }
+};
+
+struct EventList;
+
+struct Event {
+ Event(SSEdge* edge, const SkPoint& point, uint8_t alpha)
+ : fEdge(edge), fPoint(point), fAlpha(alpha) {
+ }
+ SSEdge* fEdge;
+ SkPoint fPoint;
+ uint8_t fAlpha;
+ void apply(VertexList* mesh, Comparator& c, EventList* events, SkArenaAlloc& alloc);
+};
+
+struct EventComparator {
+ enum class Op { kLessThan, kGreaterThan };
+ EventComparator(Op op) : fOp(op) {}
+ bool operator() (Event* const &e1, Event* const &e2) {
+ return fOp == Op::kLessThan ? e1->fAlpha < e2->fAlpha
+ : e1->fAlpha > e2->fAlpha;
+ }
+ Op fOp;
+};
+
+typedef std::priority_queue<Event*, std::vector<Event*>, EventComparator> EventPQ;
+
+struct EventList : EventPQ {
+ EventList(EventComparator comparison) : EventPQ(comparison) {
+ }
+};
+
+void create_event(SSEdge* e, EventList* events, SkArenaAlloc& alloc) {
+ Vertex* prev = e->fPrev->fVertex;
+ Vertex* next = e->fNext->fVertex;
+ if (prev == next || !prev->fPartner || !next->fPartner) {
+ return;
+ }
+ Edge bisector1(prev, prev->fPartner, 1, Edge::Type::kConnector);
+ Edge bisector2(next, next->fPartner, 1, Edge::Type::kConnector);
+ SkPoint p;
+ uint8_t alpha;
+ if (bisector1.intersect(bisector2, &p, &alpha)) {
+ TESS_LOG("found edge event for %g, %g (original %g -> %g), "
+ "will collapse to %g,%g alpha %d\n",
+ prev->fID, next->fID, e->fEdge->fTop->fID, e->fEdge->fBottom->fID, p.fX, p.fY,
+ alpha);
+ e->fEvent = alloc.make<Event>(e, p, alpha);
+ events->push(e->fEvent);
+ }
+}
+
+void create_event(SSEdge* edge, Vertex* v, SSEdge* other, Vertex* dest, EventList* events,
+ Comparator& c, SkArenaAlloc& alloc) {
+ if (!v->fPartner) {
+ return;
+ }
+ Vertex* top = edge->fEdge->fTop;
+ Vertex* bottom = edge->fEdge->fBottom;
+ if (!top || !bottom ) {
+ return;
+ }
+ Line line = edge->fEdge->fLine;
+ line.fC = -(dest->fPoint.fX * line.fA + dest->fPoint.fY * line.fB);
+ Edge bisector(v, v->fPartner, 1, Edge::Type::kConnector);
+ SkPoint p;
+ uint8_t alpha = dest->fAlpha;
+ if (line.intersect(bisector.fLine, &p) && !c.sweep_lt(p, top->fPoint) &&
+ c.sweep_lt(p, bottom->fPoint)) {
+ TESS_LOG("found p edge event for %g, %g (original %g -> %g), "
+ "will collapse to %g,%g alpha %d\n",
+ dest->fID, v->fID, top->fID, bottom->fID, p.fX, p.fY, alpha);
+ edge->fEvent = alloc.make<Event>(edge, p, alpha);
+ events->push(edge->fEvent);
+ }
+}
+
+/***************************************************************************************/
+
+struct Poly {
+ Poly(Vertex* v, int winding)
+ : fFirstVertex(v)
+ , fWinding(winding)
+ , fHead(nullptr)
+ , fTail(nullptr)
+ , fNext(nullptr)
+ , fPartner(nullptr)
+ , fCount(0)
+ {
+#if LOGGING_ENABLED
+ static int gID = 0;
+ fID = gID++;
+ TESS_LOG("*** created Poly %d\n", fID);
+#endif
+ }
+ typedef enum { kLeft_Side, kRight_Side } Side;
+ struct MonotonePoly {
+ MonotonePoly(Edge* edge, Side side)
+ : fSide(side)
+ , fFirstEdge(nullptr)
+ , fLastEdge(nullptr)
+ , fPrev(nullptr)
+ , fNext(nullptr) {
+ this->addEdge(edge);
+ }
+ Side fSide;
+ Edge* fFirstEdge;
+ Edge* fLastEdge;
+ MonotonePoly* fPrev;
+ MonotonePoly* fNext;
+ void addEdge(Edge* edge) {
+ if (fSide == kRight_Side) {
+ SkASSERT(!edge->fUsedInRightPoly);
+ list_insert<Edge, &Edge::fRightPolyPrev, &Edge::fRightPolyNext>(
+ edge, fLastEdge, nullptr, &fFirstEdge, &fLastEdge);
+ edge->fUsedInRightPoly = true;
+ } else {
+ SkASSERT(!edge->fUsedInLeftPoly);
+ list_insert<Edge, &Edge::fLeftPolyPrev, &Edge::fLeftPolyNext>(
+ edge, fLastEdge, nullptr, &fFirstEdge, &fLastEdge);
+ edge->fUsedInLeftPoly = true;
+ }
+ }
+
+ void* emit(bool emitCoverage, void* data) {
+ Edge* e = fFirstEdge;
+ VertexList vertices;
+ vertices.append(e->fTop);
+ int count = 1;
+ while (e != nullptr) {
+ if (kRight_Side == fSide) {
+ vertices.append(e->fBottom);
+ e = e->fRightPolyNext;
+ } else {
+ vertices.prepend(e->fBottom);
+ e = e->fLeftPolyNext;
+ }
+ count++;
+ }
+ Vertex* first = vertices.fHead;
+ Vertex* v = first->fNext;
+ while (v != vertices.fTail) {
+ SkASSERT(v && v->fPrev && v->fNext);
+ Vertex* prev = v->fPrev;
+ Vertex* curr = v;
+ Vertex* next = v->fNext;
+ if (count == 3) {
+ return emit_triangle(prev, curr, next, emitCoverage, data);
+ }
+ double ax = static_cast<double>(curr->fPoint.fX) - prev->fPoint.fX;
+ double ay = static_cast<double>(curr->fPoint.fY) - prev->fPoint.fY;
+ double bx = static_cast<double>(next->fPoint.fX) - curr->fPoint.fX;
+ double by = static_cast<double>(next->fPoint.fY) - curr->fPoint.fY;
+ if (ax * by - ay * bx >= 0.0) {
+ data = emit_triangle(prev, curr, next, emitCoverage, data);
+ v->fPrev->fNext = v->fNext;
+ v->fNext->fPrev = v->fPrev;
+ count--;
+ if (v->fPrev == first) {
+ v = v->fNext;
+ } else {
+ v = v->fPrev;
+ }
+ } else {
+ v = v->fNext;
+ }
+ }
+ return data;
+ }
+ };
+ Poly* addEdge(Edge* e, Side side, SkArenaAlloc& alloc) {
+ TESS_LOG("addEdge (%g -> %g) to poly %d, %s side\n",
+ e->fTop->fID, e->fBottom->fID, fID, side == kLeft_Side ? "left" : "right");
+ Poly* partner = fPartner;
+ Poly* poly = this;
+ if (side == kRight_Side) {
+ if (e->fUsedInRightPoly) {
+ return this;
+ }
+ } else {
+ if (e->fUsedInLeftPoly) {
+ return this;
+ }
+ }
+ if (partner) {
+ fPartner = partner->fPartner = nullptr;
+ }
+ if (!fTail) {
+ fHead = fTail = alloc.make<MonotonePoly>(e, side);
+ fCount += 2;
+ } else if (e->fBottom == fTail->fLastEdge->fBottom) {
+ return poly;
+ } else if (side == fTail->fSide) {
+ fTail->addEdge(e);
+ fCount++;
+ } else {
+ e = alloc.make<Edge>(fTail->fLastEdge->fBottom, e->fBottom, 1, Edge::Type::kInner);
+ fTail->addEdge(e);
+ fCount++;
+ if (partner) {
+ partner->addEdge(e, side, alloc);
+ poly = partner;
+ } else {
+ MonotonePoly* m = alloc.make<MonotonePoly>(e, side);
+ m->fPrev = fTail;
+ fTail->fNext = m;
+ fTail = m;
+ }
+ }
+ return poly;
+ }
+ void* emit(bool emitCoverage, void *data) {
+ if (fCount < 3) {
+ return data;
+ }
+ TESS_LOG("emit() %d, size %d\n", fID, fCount);
+ for (MonotonePoly* m = fHead; m != nullptr; m = m->fNext) {
+ data = m->emit(emitCoverage, data);
+ }
+ return data;
+ }
+ Vertex* lastVertex() const { return fTail ? fTail->fLastEdge->fBottom : fFirstVertex; }
+ Vertex* fFirstVertex;
+ int fWinding;
+ MonotonePoly* fHead;
+ MonotonePoly* fTail;
+ Poly* fNext;
+ Poly* fPartner;
+ int fCount;
+#if LOGGING_ENABLED
+ int fID;
+#endif
+};
+
+/***************************************************************************************/
+
+bool coincident(const SkPoint& a, const SkPoint& b) {
+ return a == b;
+}
+
+Poly* new_poly(Poly** head, Vertex* v, int winding, SkArenaAlloc& alloc) {
+ Poly* poly = alloc.make<Poly>(v, winding);
+ poly->fNext = *head;
+ *head = poly;
+ return poly;
+}
+
+void append_point_to_contour(const SkPoint& p, VertexList* contour, SkArenaAlloc& alloc) {
+ Vertex* v = alloc.make<Vertex>(p, 255);
+#if LOGGING_ENABLED
+ static float gID = 0.0f;
+ v->fID = gID++;
+#endif
+ contour->append(v);
+}
+
+SkScalar quad_error_at(const SkPoint pts[3], SkScalar t, SkScalar u) {
+ SkQuadCoeff quad(pts);
+ SkPoint p0 = to_point(quad.eval(t - 0.5f * u));
+ SkPoint mid = to_point(quad.eval(t));
+ SkPoint p1 = to_point(quad.eval(t + 0.5f * u));
+ if (!p0.isFinite() || !mid.isFinite() || !p1.isFinite()) {
+ return 0;
+ }
+ return SkPointPriv::DistanceToLineSegmentBetweenSqd(mid, p0, p1);
+}
+
+void append_quadratic_to_contour(const SkPoint pts[3], SkScalar toleranceSqd, VertexList* contour,
+ SkArenaAlloc& alloc) {
+ SkQuadCoeff quad(pts);
+ Sk2s aa = quad.fA * quad.fA;
+ SkScalar denom = 2.0f * (aa[0] + aa[1]);
+ Sk2s ab = quad.fA * quad.fB;
+ SkScalar t = denom ? (-ab[0] - ab[1]) / denom : 0.0f;
+ int nPoints = 1;
+ SkScalar u = 1.0f;
+ // Test possible subdivision values only at the point of maximum curvature.
+ // If it passes the flatness metric there, it'll pass everywhere.
+ while (nPoints < GrPathUtils::kMaxPointsPerCurve) {
+ u = 1.0f / nPoints;
+ if (quad_error_at(pts, t, u) < toleranceSqd) {
+ break;
+ }
+ nPoints++;
+ }
+ for (int j = 1; j <= nPoints; j++) {
+ append_point_to_contour(to_point(quad.eval(j * u)), contour, alloc);
+ }
+}
+
+void generate_cubic_points(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ VertexList* contour,
+ int pointsLeft,
+ SkArenaAlloc& alloc) {
+ SkScalar d1 = SkPointPriv::DistanceToLineSegmentBetweenSqd(p1, p0, p3);
+ SkScalar d2 = SkPointPriv::DistanceToLineSegmentBetweenSqd(p2, p0, p3);
+ if (pointsLeft < 2 || (d1 < tolSqd && d2 < tolSqd) ||
+ !SkScalarIsFinite(d1) || !SkScalarIsFinite(d2)) {
+ append_point_to_contour(p3, contour, alloc);
+ return;
+ }
+ const SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ { SkScalarAve(p2.fX, p3.fX), SkScalarAve(p2.fY, p3.fY) }
+ };
+ const SkPoint r[] = {
+ { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) },
+ { SkScalarAve(q[1].fX, q[2].fX), SkScalarAve(q[1].fY, q[2].fY) }
+ };
+ const SkPoint s = { SkScalarAve(r[0].fX, r[1].fX), SkScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ generate_cubic_points(p0, q[0], r[0], s, tolSqd, contour, pointsLeft, alloc);
+ generate_cubic_points(s, r[1], q[2], p3, tolSqd, contour, pointsLeft, alloc);
+}
+
+// Stage 1: convert the input path to a set of linear contours (linked list of Vertices).
+
+void path_to_contours(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ VertexList* contours, SkArenaAlloc& alloc, bool *isLinear) {
+ SkScalar toleranceSqd = tolerance * tolerance;
+
+ SkPoint pts[4];
+ *isLinear = true;
+ VertexList* contour = contours;
+ SkPath::Iter iter(path, false);
+ if (path.isInverseFillType()) {
+ SkPoint quad[4];
+ clipBounds.toQuad(quad);
+ for (int i = 3; i >= 0; i--) {
+ append_point_to_contour(quad[i], contours, alloc);
+ }
+ contour++;
+ }
+ SkAutoConicToQuads converter;
+ SkPath::Verb verb;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, toleranceSqd);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ append_quadratic_to_contour(quadPts, toleranceSqd, contour, alloc);
+ quadPts += 2;
+ }
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kMove_Verb:
+ if (contour->fHead) {
+ contour++;
+ }
+ append_point_to_contour(pts[0], contour, alloc);
+ break;
+ case SkPath::kLine_Verb: {
+ append_point_to_contour(pts[1], contour, alloc);
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ append_quadratic_to_contour(pts, toleranceSqd, contour, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ int pointsLeft = GrPathUtils::cubicPointCount(pts, tolerance);
+ generate_cubic_points(pts[0], pts[1], pts[2], pts[3], toleranceSqd, contour,
+ pointsLeft, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ break;
+ }
+ }
+}
+
+inline bool apply_fill_type(SkPath::FillType fillType, int winding) {
+ switch (fillType) {
+ case SkPath::kWinding_FillType:
+ return winding != 0;
+ case SkPath::kEvenOdd_FillType:
+ return (winding & 1) != 0;
+ case SkPath::kInverseWinding_FillType:
+ return winding == 1;
+ case SkPath::kInverseEvenOdd_FillType:
+ return (winding & 1) == 1;
+ default:
+ SkASSERT(false);
+ return false;
+ }
+}
+
+inline bool apply_fill_type(SkPath::FillType fillType, Poly* poly) {
+ return poly && apply_fill_type(fillType, poly->fWinding);
+}
+
+Edge* new_edge(Vertex* prev, Vertex* next, Edge::Type type, Comparator& c, SkArenaAlloc& alloc) {
+ int winding = c.sweep_lt(prev->fPoint, next->fPoint) ? 1 : -1;
+ Vertex* top = winding < 0 ? next : prev;
+ Vertex* bottom = winding < 0 ? prev : next;
+ return alloc.make<Edge>(top, bottom, winding, type);
+}
+
+void remove_edge(Edge* edge, EdgeList* edges) {
+ TESS_LOG("removing edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(edges->contains(edge));
+ edges->remove(edge);
+}
+
+void insert_edge(Edge* edge, Edge* prev, EdgeList* edges) {
+ TESS_LOG("inserting edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(!edges->contains(edge));
+ Edge* next = prev ? prev->fRight : edges->fHead;
+ edges->insert(edge, prev, next);
+}
+
+void find_enclosing_edges(Vertex* v, EdgeList* edges, Edge** left, Edge** right) {
+ if (v->fFirstEdgeAbove && v->fLastEdgeAbove) {
+ *left = v->fFirstEdgeAbove->fLeft;
+ *right = v->fLastEdgeAbove->fRight;
+ return;
+ }
+ Edge* next = nullptr;
+ Edge* prev;
+ for (prev = edges->fTail; prev != nullptr; prev = prev->fLeft) {
+ if (prev->isLeftOf(v)) {
+ break;
+ }
+ next = prev;
+ }
+ *left = prev;
+ *right = next;
+}
+
+void insert_edge_above(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_lt(edge->fBottom->fPoint, edge->fTop->fPoint)) {
+ return;
+ }
+ TESS_LOG("insert edge (%g -> %g) above vertex %g\n",
+ edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeAbove; next; next = next->fNextEdgeAbove) {
+ if (next->isRightOf(edge->fTop)) {
+ break;
+ }
+ prev = next;
+ }
+ list_insert<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, prev, next, &v->fFirstEdgeAbove, &v->fLastEdgeAbove);
+}
+
+void insert_edge_below(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_lt(edge->fBottom->fPoint, edge->fTop->fPoint)) {
+ return;
+ }
+ TESS_LOG("insert edge (%g -> %g) below vertex %g\n",
+ edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeBelow; next; next = next->fNextEdgeBelow) {
+ if (next->isRightOf(edge->fBottom)) {
+ break;
+ }
+ prev = next;
+ }
+ list_insert<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, prev, next, &v->fFirstEdgeBelow, &v->fLastEdgeBelow);
+}
+
+void remove_edge_above(Edge* edge) {
+ SkASSERT(edge->fTop && edge->fBottom);
+ TESS_LOG("removing edge (%g -> %g) above vertex %g\n", edge->fTop->fID, edge->fBottom->fID,
+ edge->fBottom->fID);
+ list_remove<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, &edge->fBottom->fFirstEdgeAbove, &edge->fBottom->fLastEdgeAbove);
+}
+
+void remove_edge_below(Edge* edge) {
+ SkASSERT(edge->fTop && edge->fBottom);
+ TESS_LOG("removing edge (%g -> %g) below vertex %g\n",
+ edge->fTop->fID, edge->fBottom->fID, edge->fTop->fID);
+ list_remove<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, &edge->fTop->fFirstEdgeBelow, &edge->fTop->fLastEdgeBelow);
+}
+
+void disconnect(Edge* edge)
+{
+ remove_edge_above(edge);
+ remove_edge_below(edge);
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Vertex** current, Comparator& c);
+
+void rewind(EdgeList* activeEdges, Vertex** current, Vertex* dst, Comparator& c) {
+ if (!current || *current == dst || c.sweep_lt((*current)->fPoint, dst->fPoint)) {
+ return;
+ }
+ Vertex* v = *current;
+ TESS_LOG("rewinding active edges from vertex %g to vertex %g\n", v->fID, dst->fID);
+ while (v != dst) {
+ v = v->fPrev;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ remove_edge(e, activeEdges);
+ }
+ Edge* leftEdge = v->fLeftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ insert_edge(e, leftEdge, activeEdges);
+ leftEdge = e;
+ }
+ }
+ *current = v;
+}
+
+void set_top(Edge* edge, Vertex* v, EdgeList* activeEdges, Vertex** current, Comparator& c) {
+ remove_edge_below(edge);
+ edge->fTop = v;
+ edge->recompute();
+ insert_edge_below(edge, v, c);
+ rewind(activeEdges, current, edge->fTop, c);
+ merge_collinear_edges(edge, activeEdges, current, c);
+}
+
+void set_bottom(Edge* edge, Vertex* v, EdgeList* activeEdges, Vertex** current, Comparator& c) {
+ remove_edge_above(edge);
+ edge->fBottom = v;
+ edge->recompute();
+ insert_edge_above(edge, v, c);
+ rewind(activeEdges, current, edge->fTop, c);
+ merge_collinear_edges(edge, activeEdges, current, c);
+}
+
+void merge_edges_above(Edge* edge, Edge* other, EdgeList* activeEdges, Vertex** current,
+ Comparator& c) {
+ if (coincident(edge->fTop->fPoint, other->fTop->fPoint)) {
+ TESS_LOG("merging coincident above edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ rewind(activeEdges, current, edge->fTop, c);
+ other->fWinding += edge->fWinding;
+ disconnect(edge);
+ edge->fTop = edge->fBottom = nullptr;
+ } else if (c.sweep_lt(edge->fTop->fPoint, other->fTop->fPoint)) {
+ rewind(activeEdges, current, edge->fTop, c);
+ other->fWinding += edge->fWinding;
+ set_bottom(edge, other->fTop, activeEdges, current, c);
+ } else {
+ rewind(activeEdges, current, other->fTop, c);
+ edge->fWinding += other->fWinding;
+ set_bottom(other, edge->fTop, activeEdges, current, c);
+ }
+}
+
+void merge_edges_below(Edge* edge, Edge* other, EdgeList* activeEdges, Vertex** current,
+ Comparator& c) {
+ if (coincident(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ TESS_LOG("merging coincident below edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ rewind(activeEdges, current, edge->fTop, c);
+ other->fWinding += edge->fWinding;
+ disconnect(edge);
+ edge->fTop = edge->fBottom = nullptr;
+ } else if (c.sweep_lt(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ rewind(activeEdges, current, other->fTop, c);
+ edge->fWinding += other->fWinding;
+ set_top(other, edge->fBottom, activeEdges, current, c);
+ } else {
+ rewind(activeEdges, current, edge->fTop, c);
+ other->fWinding += edge->fWinding;
+ set_top(edge, other->fBottom, activeEdges, current, c);
+ }
+}
+
+bool top_collinear(Edge* left, Edge* right) {
+ if (!left || !right) {
+ return false;
+ }
+ return left->fTop->fPoint == right->fTop->fPoint ||
+ !left->isLeftOf(right->fTop) || !right->isRightOf(left->fTop);
+}
+
+bool bottom_collinear(Edge* left, Edge* right) {
+ if (!left || !right) {
+ return false;
+ }
+ return left->fBottom->fPoint == right->fBottom->fPoint ||
+ !left->isLeftOf(right->fBottom) || !right->isRightOf(left->fBottom);
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Vertex** current, Comparator& c) {
+ for (;;) {
+ if (top_collinear(edge->fPrevEdgeAbove, edge)) {
+ merge_edges_above(edge->fPrevEdgeAbove, edge, activeEdges, current, c);
+ } else if (top_collinear(edge, edge->fNextEdgeAbove)) {
+ merge_edges_above(edge->fNextEdgeAbove, edge, activeEdges, current, c);
+ } else if (bottom_collinear(edge->fPrevEdgeBelow, edge)) {
+ merge_edges_below(edge->fPrevEdgeBelow, edge, activeEdges, current, c);
+ } else if (bottom_collinear(edge, edge->fNextEdgeBelow)) {
+ merge_edges_below(edge->fNextEdgeBelow, edge, activeEdges, current, c);
+ } else {
+ break;
+ }
+ }
+ SkASSERT(!top_collinear(edge->fPrevEdgeAbove, edge));
+ SkASSERT(!top_collinear(edge, edge->fNextEdgeAbove));
+ SkASSERT(!bottom_collinear(edge->fPrevEdgeBelow, edge));
+ SkASSERT(!bottom_collinear(edge, edge->fNextEdgeBelow));
+}
+
+bool split_edge(Edge* edge, Vertex* v, EdgeList* activeEdges, Vertex** current, Comparator& c,
+ SkArenaAlloc& alloc) {
+ if (!edge->fTop || !edge->fBottom || v == edge->fTop || v == edge->fBottom) {
+ return false;
+ }
+ TESS_LOG("splitting edge (%g -> %g) at vertex %g (%g, %g)\n",
+ edge->fTop->fID, edge->fBottom->fID, v->fID, v->fPoint.fX, v->fPoint.fY);
+ Vertex* top;
+ Vertex* bottom;
+ int winding = edge->fWinding;
+ if (c.sweep_lt(v->fPoint, edge->fTop->fPoint)) {
+ top = v;
+ bottom = edge->fTop;
+ set_top(edge, v, activeEdges, current, c);
+ } else if (c.sweep_lt(edge->fBottom->fPoint, v->fPoint)) {
+ top = edge->fBottom;
+ bottom = v;
+ set_bottom(edge, v, activeEdges, current, c);
+ } else {
+ top = v;
+ bottom = edge->fBottom;
+ set_bottom(edge, v, activeEdges, current, c);
+ }
+ Edge* newEdge = alloc.make<Edge>(top, bottom, winding, edge->fType);
+ insert_edge_below(newEdge, top, c);
+ insert_edge_above(newEdge, bottom, c);
+ merge_collinear_edges(newEdge, activeEdges, current, c);
+ return true;
+}
+
+bool intersect_edge_pair(Edge* left, Edge* right, EdgeList* activeEdges, Vertex** current, Comparator& c, SkArenaAlloc& alloc) {
+ if (!left->fTop || !left->fBottom || !right->fTop || !right->fBottom) {
+ return false;
+ }
+ if (left->fTop == right->fTop || left->fBottom == right->fBottom) {
+ return false;
+ }
+ if (c.sweep_lt(left->fTop->fPoint, right->fTop->fPoint)) {
+ if (!left->isLeftOf(right->fTop)) {
+ rewind(activeEdges, current, right->fTop, c);
+ return split_edge(left, right->fTop, activeEdges, current, c, alloc);
+ }
+ } else {
+ if (!right->isRightOf(left->fTop)) {
+ rewind(activeEdges, current, left->fTop, c);
+ return split_edge(right, left->fTop, activeEdges, current, c, alloc);
+ }
+ }
+ if (c.sweep_lt(right->fBottom->fPoint, left->fBottom->fPoint)) {
+ if (!left->isLeftOf(right->fBottom)) {
+ rewind(activeEdges, current, right->fBottom, c);
+ return split_edge(left, right->fBottom, activeEdges, current, c, alloc);
+ }
+ } else {
+ if (!right->isRightOf(left->fBottom)) {
+ rewind(activeEdges, current, left->fBottom, c);
+ return split_edge(right, left->fBottom, activeEdges, current, c, alloc);
+ }
+ }
+ return false;
+}
+
+Edge* connect(Vertex* prev, Vertex* next, Edge::Type type, Comparator& c, SkArenaAlloc& alloc,
+ int winding_scale = 1) {
+ if (!prev || !next || prev->fPoint == next->fPoint) {
+ return nullptr;
+ }
+ Edge* edge = new_edge(prev, next, type, c, alloc);
+ insert_edge_below(edge, edge->fTop, c);
+ insert_edge_above(edge, edge->fBottom, c);
+ edge->fWinding *= winding_scale;
+ merge_collinear_edges(edge, nullptr, nullptr, c);
+ return edge;
+}
+
+void merge_vertices(Vertex* src, Vertex* dst, VertexList* mesh, Comparator& c,
+ SkArenaAlloc& alloc) {
+ TESS_LOG("found coincident verts at %g, %g; merging %g into %g\n",
+ src->fPoint.fX, src->fPoint.fY, src->fID, dst->fID);
+ dst->fAlpha = SkTMax(src->fAlpha, dst->fAlpha);
+ if (src->fPartner) {
+ src->fPartner->fPartner = dst;
+ }
+ while (Edge* edge = src->fFirstEdgeAbove) {
+ set_bottom(edge, dst, nullptr, nullptr, c);
+ }
+ while (Edge* edge = src->fFirstEdgeBelow) {
+ set_top(edge, dst, nullptr, nullptr, c);
+ }
+ mesh->remove(src);
+ dst->fSynthetic = true;
+}
+
+Vertex* create_sorted_vertex(const SkPoint& p, uint8_t alpha, VertexList* mesh,
+ Vertex* reference, Comparator& c, SkArenaAlloc& alloc) {
+ Vertex* prevV = reference;
+ while (prevV && c.sweep_lt(p, prevV->fPoint)) {
+ prevV = prevV->fPrev;
+ }
+ Vertex* nextV = prevV ? prevV->fNext : mesh->fHead;
+ while (nextV && c.sweep_lt(nextV->fPoint, p)) {
+ prevV = nextV;
+ nextV = nextV->fNext;
+ }
+ Vertex* v;
+ if (prevV && coincident(prevV->fPoint, p)) {
+ v = prevV;
+ } else if (nextV && coincident(nextV->fPoint, p)) {
+ v = nextV;
+ } else {
+ v = alloc.make<Vertex>(p, alpha);
+#if LOGGING_ENABLED
+ if (!prevV) {
+ v->fID = mesh->fHead->fID - 1.0f;
+ } else if (!nextV) {
+ v->fID = mesh->fTail->fID + 1.0f;
+ } else {
+ v->fID = (prevV->fID + nextV->fID) * 0.5f;
+ }
+#endif
+ mesh->insert(v, prevV, nextV);
+ }
+ return v;
+}
+
+// If an edge's top and bottom points differ only by 1/2 machine epsilon in the primary
+// sort criterion, it may not be possible to split correctly, since there is no point which is
+// below the top and above the bottom. This function detects that case.
+bool nearly_flat(Comparator& c, Edge* edge) {
+ SkPoint diff = edge->fBottom->fPoint - edge->fTop->fPoint;
+ float primaryDiff = c.fDirection == Comparator::Direction::kHorizontal ? diff.fX : diff.fY;
+ return fabs(primaryDiff) < std::numeric_limits<float>::epsilon() && primaryDiff != 0.0f;
+}
+
+SkPoint clamp(SkPoint p, SkPoint min, SkPoint max, Comparator& c) {
+ if (c.sweep_lt(p, min)) {
+ return min;
+ } else if (c.sweep_lt(max, p)) {
+ return max;
+ } else {
+ return p;
+ }
+}
+
+void compute_bisector(Edge* edge1, Edge* edge2, Vertex* v, SkArenaAlloc& alloc) {
+ Line line1 = edge1->fLine;
+ Line line2 = edge2->fLine;
+ line1.normalize();
+ line2.normalize();
+ double cosAngle = line1.fA * line2.fA + line1.fB * line2.fB;
+ if (cosAngle > 0.999) {
+ return;
+ }
+ line1.fC += edge1->fWinding > 0 ? -1 : 1;
+ line2.fC += edge2->fWinding > 0 ? -1 : 1;
+ SkPoint p;
+ if (line1.intersect(line2, &p)) {
+ uint8_t alpha = edge1->fType == Edge::Type::kOuter ? 255 : 0;
+ v->fPartner = alloc.make<Vertex>(p, alpha);
+ TESS_LOG("computed bisector (%g,%g) alpha %d for vertex %g\n", p.fX, p.fY, alpha, v->fID);
+ }
+}
+
+bool check_for_intersection(Edge* left, Edge* right, EdgeList* activeEdges, Vertex** current,
+ VertexList* mesh, Comparator& c, SkArenaAlloc& alloc) {
+ if (!left || !right) {
+ return false;
+ }
+ SkPoint p;
+ uint8_t alpha;
+ if (left->intersect(*right, &p, &alpha) && p.isFinite()) {
+ Vertex* v;
+ TESS_LOG("found intersection, pt is %g, %g\n", p.fX, p.fY);
+ Vertex* top = *current;
+ // If the intersection point is above the current vertex, rewind to the vertex above the
+ // intersection.
+ while (top && c.sweep_lt(p, top->fPoint)) {
+ top = top->fPrev;
+ }
+ if (!nearly_flat(c, left)) {
+ p = clamp(p, left->fTop->fPoint, left->fBottom->fPoint, c);
+ }
+ if (!nearly_flat(c, right)) {
+ p = clamp(p, right->fTop->fPoint, right->fBottom->fPoint, c);
+ }
+ if (p == left->fTop->fPoint) {
+ v = left->fTop;
+ } else if (p == left->fBottom->fPoint) {
+ v = left->fBottom;
+ } else if (p == right->fTop->fPoint) {
+ v = right->fTop;
+ } else if (p == right->fBottom->fPoint) {
+ v = right->fBottom;
+ } else {
+ v = create_sorted_vertex(p, alpha, mesh, top, c, alloc);
+ if (left->fTop->fPartner) {
+ v->fSynthetic = true;
+ compute_bisector(left, right, v, alloc);
+ }
+ }
+ rewind(activeEdges, current, top ? top : v, c);
+ split_edge(left, v, activeEdges, current, c, alloc);
+ split_edge(right, v, activeEdges, current, c, alloc);
+ v->fAlpha = SkTMax(v->fAlpha, alpha);
+ return true;
+ }
+ return intersect_edge_pair(left, right, activeEdges, current, c, alloc);
+}
+
+void sanitize_contours(VertexList* contours, int contourCnt, bool approximate) {
+ for (VertexList* contour = contours; contourCnt > 0; --contourCnt, ++contour) {
+ SkASSERT(contour->fHead);
+ Vertex* prev = contour->fTail;
+ if (approximate) {
+ round(&prev->fPoint);
+ }
+ for (Vertex* v = contour->fHead; v;) {
+ if (approximate) {
+ round(&v->fPoint);
+ }
+ Vertex* next = v->fNext;
+ Vertex* nextWrap = next ? next : contour->fHead;
+ if (coincident(prev->fPoint, v->fPoint)) {
+ TESS_LOG("vertex %g,%g coincident; removing\n", v->fPoint.fX, v->fPoint.fY);
+ contour->remove(v);
+ } else if (!v->fPoint.isFinite()) {
+ TESS_LOG("vertex %g,%g non-finite; removing\n", v->fPoint.fX, v->fPoint.fY);
+ contour->remove(v);
+ } else if (Line(prev->fPoint, nextWrap->fPoint).dist(v->fPoint) == 0.0) {
+ TESS_LOG("vertex %g,%g collinear; removing\n", v->fPoint.fX, v->fPoint.fY);
+ contour->remove(v);
+ } else {
+ prev = v;
+ }
+ v = next;
+ }
+ }
+}
+
+bool merge_coincident_vertices(VertexList* mesh, Comparator& c, SkArenaAlloc& alloc) {
+ if (!mesh->fHead) {
+ return false;
+ }
+ bool merged = false;
+ for (Vertex* v = mesh->fHead->fNext; v;) {
+ Vertex* next = v->fNext;
+ if (c.sweep_lt(v->fPoint, v->fPrev->fPoint)) {
+ v->fPoint = v->fPrev->fPoint;
+ }
+ if (coincident(v->fPrev->fPoint, v->fPoint)) {
+ merge_vertices(v, v->fPrev, mesh, c, alloc);
+ merged = true;
+ }
+ v = next;
+ }
+ return merged;
+}
+
+// Stage 2: convert the contours to a mesh of edges connecting the vertices.
+
+void build_edges(VertexList* contours, int contourCnt, VertexList* mesh, Comparator& c,
+ SkArenaAlloc& alloc) {
+ for (VertexList* contour = contours; contourCnt > 0; --contourCnt, ++contour) {
+ Vertex* prev = contour->fTail;
+ for (Vertex* v = contour->fHead; v;) {
+ Vertex* next = v->fNext;
+ connect(prev, v, Edge::Type::kInner, c, alloc);
+ mesh->append(v);
+ prev = v;
+ v = next;
+ }
+ }
+}
+
+void connect_partners(VertexList* mesh, Comparator& c, SkArenaAlloc& alloc) {
+ for (Vertex* outer = mesh->fHead; outer; outer = outer->fNext) {
+ if (Vertex* inner = outer->fPartner) {
+ if ((inner->fPrev || inner->fNext) && (outer->fPrev || outer->fNext)) {
+ // Connector edges get zero winding, since they're only structural (i.e., to ensure
+ // no 0-0-0 alpha triangles are produced), and shouldn't affect the poly winding
+ // number.
+ connect(outer, inner, Edge::Type::kConnector, c, alloc, 0);
+ inner->fPartner = outer->fPartner = nullptr;
+ }
+ }
+ }
+}
+
+template <CompareFunc sweep_lt>
+void sorted_merge(VertexList* front, VertexList* back, VertexList* result) {
+ Vertex* a = front->fHead;
+ Vertex* b = back->fHead;
+ while (a && b) {
+ if (sweep_lt(a->fPoint, b->fPoint)) {
+ front->remove(a);
+ result->append(a);
+ a = front->fHead;
+ } else {
+ back->remove(b);
+ result->append(b);
+ b = back->fHead;
+ }
+ }
+ result->append(*front);
+ result->append(*back);
+}
+
+void sorted_merge(VertexList* front, VertexList* back, VertexList* result, Comparator& c) {
+ if (c.fDirection == Comparator::Direction::kHorizontal) {
+ sorted_merge<sweep_lt_horiz>(front, back, result);
+ } else {
+ sorted_merge<sweep_lt_vert>(front, back, result);
+ }
+#if LOGGING_ENABLED
+ float id = 0.0f;
+ for (Vertex* v = result->fHead; v; v = v->fNext) {
+ v->fID = id++;
+ }
+#endif
+}
+
+// Stage 3: sort the vertices by increasing sweep direction.
+
+template <CompareFunc sweep_lt>
+void merge_sort(VertexList* vertices) {
+ Vertex* slow = vertices->fHead;
+ if (!slow) {
+ return;
+ }
+ Vertex* fast = slow->fNext;
+ if (!fast) {
+ return;
+ }
+ do {
+ fast = fast->fNext;
+ if (fast) {
+ fast = fast->fNext;
+ slow = slow->fNext;
+ }
+ } while (fast);
+ VertexList front(vertices->fHead, slow);
+ VertexList back(slow->fNext, vertices->fTail);
+ front.fTail->fNext = back.fHead->fPrev = nullptr;
+
+ merge_sort<sweep_lt>(&front);
+ merge_sort<sweep_lt>(&back);
+
+ vertices->fHead = vertices->fTail = nullptr;
+ sorted_merge<sweep_lt>(&front, &back, vertices);
+}
+
+void dump_mesh(const VertexList& mesh) {
+#if LOGGING_ENABLED
+ for (Vertex* v = mesh.fHead; v; v = v->fNext) {
+ TESS_LOG("vertex %g (%g, %g) alpha %d", v->fID, v->fPoint.fX, v->fPoint.fY, v->fAlpha);
+ if (Vertex* p = v->fPartner) {
+ TESS_LOG(", partner %g (%g, %g) alpha %d\n",
+ p->fID, p->fPoint.fX, p->fPoint.fY, p->fAlpha);
+ } else {
+ TESS_LOG(", null partner\n");
+ }
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ TESS_LOG(" edge %g -> %g, winding %d\n", e->fTop->fID, e->fBottom->fID, e->fWinding);
+ }
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ TESS_LOG(" edge %g -> %g, winding %d\n", e->fTop->fID, e->fBottom->fID, e->fWinding);
+ }
+ }
+#endif
+}
+
+void dump_skel(const SSEdgeList& ssEdges) {
+#if LOGGING_ENABLED
+ for (SSEdge* edge : ssEdges) {
+ if (edge->fEdge) {
+ TESS_LOG("skel edge %g -> %g",
+ edge->fPrev->fVertex->fID,
+ edge->fNext->fVertex->fID);
+ if (edge->fEdge->fTop && edge->fEdge->fBottom) {
+ TESS_LOG(" (original %g -> %g)\n",
+ edge->fEdge->fTop->fID,
+ edge->fEdge->fBottom->fID);
+ } else {
+ TESS_LOG("\n");
+ }
+ }
+ }
+#endif
+}
+
+#ifdef SK_DEBUG
+void validate_edge_pair(Edge* left, Edge* right, Comparator& c) {
+ if (!left || !right) {
+ return;
+ }
+ if (left->fTop == right->fTop) {
+ SkASSERT(left->isLeftOf(right->fBottom));
+ SkASSERT(right->isRightOf(left->fBottom));
+ } else if (c.sweep_lt(left->fTop->fPoint, right->fTop->fPoint)) {
+ SkASSERT(left->isLeftOf(right->fTop));
+ } else {
+ SkASSERT(right->isRightOf(left->fTop));
+ }
+ if (left->fBottom == right->fBottom) {
+ SkASSERT(left->isLeftOf(right->fTop));
+ SkASSERT(right->isRightOf(left->fTop));
+ } else if (c.sweep_lt(right->fBottom->fPoint, left->fBottom->fPoint)) {
+ SkASSERT(left->isLeftOf(right->fBottom));
+ } else {
+ SkASSERT(right->isRightOf(left->fBottom));
+ }
+}
+
+void validate_edge_list(EdgeList* edges, Comparator& c) {
+ Edge* left = edges->fHead;
+ if (!left) {
+ return;
+ }
+ for (Edge* right = left->fRight; right; right = right->fRight) {
+ validate_edge_pair(left, right, c);
+ left = right;
+ }
+}
+#endif
+
+// Stage 4: Simplify the mesh by inserting new vertices at intersecting edges.
+
+bool connected(Vertex* v) {
+ return v->fFirstEdgeAbove || v->fFirstEdgeBelow;
+}
+
+bool simplify(VertexList* mesh, Comparator& c, SkArenaAlloc& alloc) {
+ TESS_LOG("simplifying complex polygons\n");
+ EdgeList activeEdges;
+ bool found = false;
+ for (Vertex* v = mesh->fHead; v != nullptr; v = v->fNext) {
+ if (!connected(v)) {
+ continue;
+ }
+ Edge* leftEnclosingEdge;
+ Edge* rightEnclosingEdge;
+ bool restartChecks;
+ do {
+ TESS_LOG("\nvertex %g: (%g,%g), alpha %d\n",
+ v->fID, v->fPoint.fX, v->fPoint.fY, v->fAlpha);
+ restartChecks = false;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ v->fLeftEnclosingEdge = leftEnclosingEdge;
+ v->fRightEnclosingEdge = rightEnclosingEdge;
+ if (v->fFirstEdgeBelow) {
+ for (Edge* edge = v->fFirstEdgeBelow; edge; edge = edge->fNextEdgeBelow) {
+ if (check_for_intersection(leftEnclosingEdge, edge, &activeEdges, &v, mesh, c,
+ alloc)) {
+ restartChecks = true;
+ break;
+ }
+ if (check_for_intersection(edge, rightEnclosingEdge, &activeEdges, &v, mesh, c,
+ alloc)) {
+ restartChecks = true;
+ break;
+ }
+ }
+ } else {
+ if (check_for_intersection(leftEnclosingEdge, rightEnclosingEdge,
+ &activeEdges, &v, mesh, c, alloc)) {
+ restartChecks = true;
+ }
+
+ }
+ found = found || restartChecks;
+ } while (restartChecks);
+#ifdef SK_DEBUG
+ validate_edge_list(&activeEdges, c);
+#endif
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ remove_edge(e, &activeEdges);
+ }
+ Edge* leftEdge = leftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ insert_edge(e, leftEdge, &activeEdges);
+ leftEdge = e;
+ }
+ }
+ SkASSERT(!activeEdges.fHead && !activeEdges.fTail);
+ return found;
+}
+
+// Stage 5: Tessellate the simplified mesh into monotone polygons.
+
+Poly* tessellate(const VertexList& vertices, SkArenaAlloc& alloc) {
+ TESS_LOG("\ntessellating simple polygons\n");
+ EdgeList activeEdges;
+ Poly* polys = nullptr;
+ for (Vertex* v = vertices.fHead; v != nullptr; v = v->fNext) {
+ if (!connected(v)) {
+ continue;
+ }
+#if LOGGING_ENABLED
+ TESS_LOG("\nvertex %g: (%g,%g), alpha %d\n", v->fID, v->fPoint.fX, v->fPoint.fY, v->fAlpha);
+#endif
+ Edge* leftEnclosingEdge;
+ Edge* rightEnclosingEdge;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ Poly* leftPoly;
+ Poly* rightPoly;
+ if (v->fFirstEdgeAbove) {
+ leftPoly = v->fFirstEdgeAbove->fLeftPoly;
+ rightPoly = v->fLastEdgeAbove->fRightPoly;
+ } else {
+ leftPoly = leftEnclosingEdge ? leftEnclosingEdge->fRightPoly : nullptr;
+ rightPoly = rightEnclosingEdge ? rightEnclosingEdge->fLeftPoly : nullptr;
+ }
+#if LOGGING_ENABLED
+ TESS_LOG("edges above:\n");
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ TESS_LOG("%g -> %g, lpoly %d, rpoly %d\n",
+ e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1,
+ e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+ TESS_LOG("edges below:\n");
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ TESS_LOG("%g -> %g, lpoly %d, rpoly %d\n",
+ e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1,
+ e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ if (v->fFirstEdgeAbove) {
+ if (leftPoly) {
+ leftPoly = leftPoly->addEdge(v->fFirstEdgeAbove, Poly::kRight_Side, alloc);
+ }
+ if (rightPoly) {
+ rightPoly = rightPoly->addEdge(v->fLastEdgeAbove, Poly::kLeft_Side, alloc);
+ }
+ for (Edge* e = v->fFirstEdgeAbove; e != v->fLastEdgeAbove; e = e->fNextEdgeAbove) {
+ Edge* rightEdge = e->fNextEdgeAbove;
+ remove_edge(e, &activeEdges);
+ if (e->fRightPoly) {
+ e->fRightPoly->addEdge(e, Poly::kLeft_Side, alloc);
+ }
+ if (rightEdge->fLeftPoly && rightEdge->fLeftPoly != e->fRightPoly) {
+ rightEdge->fLeftPoly->addEdge(e, Poly::kRight_Side, alloc);
+ }
+ }
+ remove_edge(v->fLastEdgeAbove, &activeEdges);
+ if (!v->fFirstEdgeBelow) {
+ if (leftPoly && rightPoly && leftPoly != rightPoly) {
+ SkASSERT(leftPoly->fPartner == nullptr && rightPoly->fPartner == nullptr);
+ rightPoly->fPartner = leftPoly;
+ leftPoly->fPartner = rightPoly;
+ }
+ }
+ }
+ if (v->fFirstEdgeBelow) {
+ if (!v->fFirstEdgeAbove) {
+ if (leftPoly && rightPoly) {
+ if (leftPoly == rightPoly) {
+ if (leftPoly->fTail && leftPoly->fTail->fSide == Poly::kLeft_Side) {
+ leftPoly = new_poly(&polys, leftPoly->lastVertex(),
+ leftPoly->fWinding, alloc);
+ leftEnclosingEdge->fRightPoly = leftPoly;
+ } else {
+ rightPoly = new_poly(&polys, rightPoly->lastVertex(),
+ rightPoly->fWinding, alloc);
+ rightEnclosingEdge->fLeftPoly = rightPoly;
+ }
+ }
+ Edge* join = alloc.make<Edge>(leftPoly->lastVertex(), v, 1, Edge::Type::kInner);
+ leftPoly = leftPoly->addEdge(join, Poly::kRight_Side, alloc);
+ rightPoly = rightPoly->addEdge(join, Poly::kLeft_Side, alloc);
+ }
+ }
+ Edge* leftEdge = v->fFirstEdgeBelow;
+ leftEdge->fLeftPoly = leftPoly;
+ insert_edge(leftEdge, leftEnclosingEdge, &activeEdges);
+ for (Edge* rightEdge = leftEdge->fNextEdgeBelow; rightEdge;
+ rightEdge = rightEdge->fNextEdgeBelow) {
+ insert_edge(rightEdge, leftEdge, &activeEdges);
+ int winding = leftEdge->fLeftPoly ? leftEdge->fLeftPoly->fWinding : 0;
+ winding += leftEdge->fWinding;
+ if (winding != 0) {
+ Poly* poly = new_poly(&polys, v, winding, alloc);
+ leftEdge->fRightPoly = rightEdge->fLeftPoly = poly;
+ }
+ leftEdge = rightEdge;
+ }
+ v->fLastEdgeBelow->fRightPoly = rightPoly;
+ }
+#if LOGGING_ENABLED
+ TESS_LOG("\nactive edges:\n");
+ for (Edge* e = activeEdges.fHead; e != nullptr; e = e->fRight) {
+ TESS_LOG("%g -> %g, lpoly %d, rpoly %d\n",
+ e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1,
+ e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ }
+ return polys;
+}
+
+void remove_non_boundary_edges(const VertexList& mesh, SkPath::FillType fillType,
+ SkArenaAlloc& alloc) {
+ TESS_LOG("removing non-boundary edges\n");
+ EdgeList activeEdges;
+ for (Vertex* v = mesh.fHead; v != nullptr; v = v->fNext) {
+ if (!connected(v)) {
+ continue;
+ }
+ Edge* leftEnclosingEdge;
+ Edge* rightEnclosingEdge;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ bool prevFilled = leftEnclosingEdge &&
+ apply_fill_type(fillType, leftEnclosingEdge->fWinding);
+ for (Edge* e = v->fFirstEdgeAbove; e;) {
+ Edge* next = e->fNextEdgeAbove;
+ remove_edge(e, &activeEdges);
+ bool filled = apply_fill_type(fillType, e->fWinding);
+ if (filled == prevFilled) {
+ disconnect(e);
+ }
+ prevFilled = filled;
+ e = next;
+ }
+ Edge* prev = leftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ if (prev) {
+ e->fWinding += prev->fWinding;
+ }
+ insert_edge(e, prev, &activeEdges);
+ prev = e;
+ }
+ }
+}
+
+// Note: this is the normal to the edge, but not necessarily unit length.
+void get_edge_normal(const Edge* e, SkVector* normal) {
+ normal->set(SkDoubleToScalar(e->fLine.fA),
+ SkDoubleToScalar(e->fLine.fB));
+}
+
+// Stage 5c: detect and remove "pointy" vertices whose edge normals point in opposite directions
+// and whose adjacent vertices are less than a quarter pixel from an edge. These are guaranteed to
+// invert on stroking.
+
+void simplify_boundary(EdgeList* boundary, Comparator& c, SkArenaAlloc& alloc) {
+ Edge* prevEdge = boundary->fTail;
+ SkVector prevNormal;
+ get_edge_normal(prevEdge, &prevNormal);
+ for (Edge* e = boundary->fHead; e != nullptr;) {
+ Vertex* prev = prevEdge->fWinding == 1 ? prevEdge->fTop : prevEdge->fBottom;
+ Vertex* next = e->fWinding == 1 ? e->fBottom : e->fTop;
+ double distPrev = e->dist(prev->fPoint);
+ double distNext = prevEdge->dist(next->fPoint);
+ SkVector normal;
+ get_edge_normal(e, &normal);
+ constexpr double kQuarterPixelSq = 0.25f * 0.25f;
+ if (prev == next) {
+ remove_edge(prevEdge, boundary);
+ remove_edge(e, boundary);
+ prevEdge = boundary->fTail;
+ e = boundary->fHead;
+ if (prevEdge) {
+ get_edge_normal(prevEdge, &prevNormal);
+ }
+ } else if (prevNormal.dot(normal) < 0.0 &&
+ (distPrev * distPrev <= kQuarterPixelSq || distNext * distNext <= kQuarterPixelSq)) {
+ Edge* join = new_edge(prev, next, Edge::Type::kInner, c, alloc);
+ if (prev->fPoint != next->fPoint) {
+ join->fLine.normalize();
+ join->fLine = join->fLine * join->fWinding;
+ }
+ insert_edge(join, e, boundary);
+ remove_edge(prevEdge, boundary);
+ remove_edge(e, boundary);
+ if (join->fLeft && join->fRight) {
+ prevEdge = join->fLeft;
+ e = join;
+ } else {
+ prevEdge = boundary->fTail;
+ e = boundary->fHead; // join->fLeft ? join->fLeft : join;
+ }
+ get_edge_normal(prevEdge, &prevNormal);
+ } else {
+ prevEdge = e;
+ prevNormal = normal;
+ e = e->fRight;
+ }
+ }
+}
+
+void ss_connect(Vertex* v, Vertex* dest, Comparator& c, SkArenaAlloc& alloc) {
+ if (v == dest) {
+ return;
+ }
+ TESS_LOG("ss_connecting vertex %g to vertex %g\n", v->fID, dest->fID);
+ if (v->fSynthetic) {
+ connect(v, dest, Edge::Type::kConnector, c, alloc, 0);
+ } else if (v->fPartner) {
+ TESS_LOG("setting %g's partner to %g ", v->fPartner->fID, dest->fID);
+ TESS_LOG("and %g's partner to null\n", v->fID);
+ v->fPartner->fPartner = dest;
+ v->fPartner = nullptr;
+ }
+}
+
+void Event::apply(VertexList* mesh, Comparator& c, EventList* events, SkArenaAlloc& alloc) {
+ if (!fEdge) {
+ return;
+ }
+ Vertex* prev = fEdge->fPrev->fVertex;
+ Vertex* next = fEdge->fNext->fVertex;
+ SSEdge* prevEdge = fEdge->fPrev->fPrev;
+ SSEdge* nextEdge = fEdge->fNext->fNext;
+ if (!prevEdge || !nextEdge || !prevEdge->fEdge || !nextEdge->fEdge) {
+ return;
+ }
+ Vertex* dest = create_sorted_vertex(fPoint, fAlpha, mesh, prev, c, alloc);
+ dest->fSynthetic = true;
+ SSVertex* ssv = alloc.make<SSVertex>(dest);
+ TESS_LOG("collapsing %g, %g (original edge %g -> %g) to %g (%g, %g) alpha %d\n",
+ prev->fID, next->fID, fEdge->fEdge->fTop->fID, fEdge->fEdge->fBottom->fID, dest->fID,
+ fPoint.fX, fPoint.fY, fAlpha);
+ fEdge->fEdge = nullptr;
+
+ ss_connect(prev, dest, c, alloc);
+ ss_connect(next, dest, c, alloc);
+
+ prevEdge->fNext = nextEdge->fPrev = ssv;
+ ssv->fPrev = prevEdge;
+ ssv->fNext = nextEdge;
+ if (!prevEdge->fEdge || !nextEdge->fEdge) {
+ return;
+ }
+ if (prevEdge->fEvent) {
+ prevEdge->fEvent->fEdge = nullptr;
+ }
+ if (nextEdge->fEvent) {
+ nextEdge->fEvent->fEdge = nullptr;
+ }
+ if (prevEdge->fPrev == nextEdge->fNext) {
+ ss_connect(prevEdge->fPrev->fVertex, dest, c, alloc);
+ prevEdge->fEdge = nextEdge->fEdge = nullptr;
+ } else {
+ compute_bisector(prevEdge->fEdge, nextEdge->fEdge, dest, alloc);
+ SkASSERT(prevEdge != fEdge && nextEdge != fEdge);
+ if (dest->fPartner) {
+ create_event(prevEdge, events, alloc);
+ create_event(nextEdge, events, alloc);
+ } else {
+ create_event(prevEdge, prevEdge->fPrev->fVertex, nextEdge, dest, events, c, alloc);
+ create_event(nextEdge, nextEdge->fNext->fVertex, prevEdge, dest, events, c, alloc);
+ }
+ }
+}
+
+bool is_overlap_edge(Edge* e) {
+ if (e->fType == Edge::Type::kOuter) {
+ return e->fWinding != 0 && e->fWinding != 1;
+ } else if (e->fType == Edge::Type::kInner) {
+ return e->fWinding != 0 && e->fWinding != -2;
+ } else {
+ return false;
+ }
+}
+
+// This is a stripped-down version of tessellate() which computes edges which
+// join two filled regions, which represent overlap regions, and collapses them.
+bool collapse_overlap_regions(VertexList* mesh, Comparator& c, SkArenaAlloc& alloc,
+ EventComparator comp) {
+ TESS_LOG("\nfinding overlap regions\n");
+ EdgeList activeEdges;
+ EventList events(comp);
+ SSVertexMap ssVertices;
+ SSEdgeList ssEdges;
+ for (Vertex* v = mesh->fHead; v != nullptr; v = v->fNext) {
+ if (!connected(v)) {
+ continue;
+ }
+ Edge* leftEnclosingEdge;
+ Edge* rightEnclosingEdge;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ for (Edge* e = v->fLastEdgeAbove; e && e != leftEnclosingEdge;) {
+ Edge* prev = e->fPrevEdgeAbove ? e->fPrevEdgeAbove : leftEnclosingEdge;
+ remove_edge(e, &activeEdges);
+ bool leftOverlap = prev && is_overlap_edge(prev);
+ bool rightOverlap = is_overlap_edge(e);
+ bool isOuterBoundary = e->fType == Edge::Type::kOuter &&
+ (!prev || prev->fWinding == 0 || e->fWinding == 0);
+ if (prev) {
+ e->fWinding -= prev->fWinding;
+ }
+ if (leftOverlap && rightOverlap) {
+ TESS_LOG("found interior overlap edge %g -> %g, disconnecting\n",
+ e->fTop->fID, e->fBottom->fID);
+ disconnect(e);
+ } else if (leftOverlap || rightOverlap) {
+ TESS_LOG("found overlap edge %g -> %g%s\n",
+ e->fTop->fID, e->fBottom->fID,
+ isOuterBoundary ? ", is outer boundary" : "");
+ Vertex* prevVertex = e->fWinding < 0 ? e->fBottom : e->fTop;
+ Vertex* nextVertex = e->fWinding < 0 ? e->fTop : e->fBottom;
+ SSVertex* ssPrev = ssVertices[prevVertex];
+ if (!ssPrev) {
+ ssPrev = ssVertices[prevVertex] = alloc.make<SSVertex>(prevVertex);
+ }
+ SSVertex* ssNext = ssVertices[nextVertex];
+ if (!ssNext) {
+ ssNext = ssVertices[nextVertex] = alloc.make<SSVertex>(nextVertex);
+ }
+ SSEdge* ssEdge = alloc.make<SSEdge>(e, ssPrev, ssNext);
+ ssEdges.push_back(ssEdge);
+// SkASSERT(!ssPrev->fNext && !ssNext->fPrev);
+ ssPrev->fNext = ssNext->fPrev = ssEdge;
+ create_event(ssEdge, &events, alloc);
+ if (!isOuterBoundary) {
+ disconnect(e);
+ }
+ }
+ e = prev;
+ }
+ Edge* prev = leftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ if (prev) {
+ e->fWinding += prev->fWinding;
+ }
+ insert_edge(e, prev, &activeEdges);
+ prev = e;
+ }
+ }
+ bool complex = events.size() > 0;
+
+ TESS_LOG("\ncollapsing overlap regions\n");
+ TESS_LOG("skeleton before:\n");
+ dump_skel(ssEdges);
+ while (events.size() > 0) {
+ Event* event = events.top();
+ events.pop();
+ event->apply(mesh, c, &events, alloc);
+ }
+ TESS_LOG("skeleton after:\n");
+ dump_skel(ssEdges);
+ for (SSEdge* edge : ssEdges) {
+ if (Edge* e = edge->fEdge) {
+ connect(edge->fPrev->fVertex, edge->fNext->fVertex, e->fType, c, alloc, 0);
+ }
+ }
+ return complex;
+}
+
+bool inversion(Vertex* prev, Vertex* next, Edge* origEdge, Comparator& c) {
+ if (!prev || !next) {
+ return true;
+ }
+ int winding = c.sweep_lt(prev->fPoint, next->fPoint) ? 1 : -1;
+ return winding != origEdge->fWinding;
+}
+
+// Stage 5d: Displace edges by half a pixel inward and outward along their normals. Intersect to
+// find new vertices, and set zero alpha on the exterior and one alpha on the interior. Build a
+// new antialiased mesh from those vertices.
+
+void stroke_boundary(EdgeList* boundary, VertexList* innerMesh, VertexList* outerMesh,
+ Comparator& c, SkArenaAlloc& alloc) {
+ TESS_LOG("\nstroking boundary\n");
+ // A boundary with fewer than 3 edges is degenerate.
+ if (!boundary->fHead || !boundary->fHead->fRight || !boundary->fHead->fRight->fRight) {
+ return;
+ }
+ Edge* prevEdge = boundary->fTail;
+ Vertex* prevV = prevEdge->fWinding > 0 ? prevEdge->fTop : prevEdge->fBottom;
+ SkVector prevNormal;
+ get_edge_normal(prevEdge, &prevNormal);
+ double radius = 0.5;
+ Line prevInner(prevEdge->fLine);
+ prevInner.fC -= radius;
+ Line prevOuter(prevEdge->fLine);
+ prevOuter.fC += radius;
+ VertexList innerVertices;
+ VertexList outerVertices;
+ bool innerInversion = true;
+ bool outerInversion = true;
+ for (Edge* e = boundary->fHead; e != nullptr; e = e->fRight) {
+ Vertex* v = e->fWinding > 0 ? e->fTop : e->fBottom;
+ SkVector normal;
+ get_edge_normal(e, &normal);
+ Line inner(e->fLine);
+ inner.fC -= radius;
+ Line outer(e->fLine);
+ outer.fC += radius;
+ SkPoint innerPoint, outerPoint;
+ TESS_LOG("stroking vertex %g (%g, %g)\n", v->fID, v->fPoint.fX, v->fPoint.fY);
+ if (!prevEdge->fLine.nearParallel(e->fLine) && prevInner.intersect(inner, &innerPoint) &&
+ prevOuter.intersect(outer, &outerPoint)) {
+ float cosAngle = normal.dot(prevNormal);
+ if (cosAngle < -kCosMiterAngle) {
+ Vertex* nextV = e->fWinding > 0 ? e->fBottom : e->fTop;
+
+ // This is a pointy vertex whose angle is smaller than the threshold; miter it.
+ Line bisector(innerPoint, outerPoint);
+ Line tangent(v->fPoint, v->fPoint + SkPoint::Make(bisector.fA, bisector.fB));
+ if (tangent.fA == 0 && tangent.fB == 0) {
+ continue;
+ }
+ tangent.normalize();
+ Line innerTangent(tangent);
+ Line outerTangent(tangent);
+ innerTangent.fC -= 0.5;
+ outerTangent.fC += 0.5;
+ SkPoint innerPoint1, innerPoint2, outerPoint1, outerPoint2;
+ if (prevNormal.cross(normal) > 0) {
+ // Miter inner points
+ if (!innerTangent.intersect(prevInner, &innerPoint1) ||
+ !innerTangent.intersect(inner, &innerPoint2) ||
+ !outerTangent.intersect(bisector, &outerPoint)) {
+ continue;
+ }
+ Line prevTangent(prevV->fPoint,
+ prevV->fPoint + SkVector::Make(prevOuter.fA, prevOuter.fB));
+ Line nextTangent(nextV->fPoint,
+ nextV->fPoint + SkVector::Make(outer.fA, outer.fB));
+ if (prevTangent.dist(outerPoint) > 0) {
+ bisector.intersect(prevTangent, &outerPoint);
+ }
+ if (nextTangent.dist(outerPoint) < 0) {
+ bisector.intersect(nextTangent, &outerPoint);
+ }
+ outerPoint1 = outerPoint2 = outerPoint;
+ } else {
+ // Miter outer points
+ if (!outerTangent.intersect(prevOuter, &outerPoint1) ||
+ !outerTangent.intersect(outer, &outerPoint2)) {
+ continue;
+ }
+ Line prevTangent(prevV->fPoint,
+ prevV->fPoint + SkVector::Make(prevInner.fA, prevInner.fB));
+ Line nextTangent(nextV->fPoint,
+ nextV->fPoint + SkVector::Make(inner.fA, inner.fB));
+ if (prevTangent.dist(innerPoint) > 0) {
+ bisector.intersect(prevTangent, &innerPoint);
+ }
+ if (nextTangent.dist(innerPoint) < 0) {
+ bisector.intersect(nextTangent, &innerPoint);
+ }
+ innerPoint1 = innerPoint2 = innerPoint;
+ }
+ if (!innerPoint1.isFinite() || !innerPoint2.isFinite() ||
+ !outerPoint1.isFinite() || !outerPoint2.isFinite()) {
+ continue;
+ }
+ TESS_LOG("inner (%g, %g), (%g, %g), ",
+ innerPoint1.fX, innerPoint1.fY, innerPoint2.fX, innerPoint2.fY);
+ TESS_LOG("outer (%g, %g), (%g, %g)\n",
+ outerPoint1.fX, outerPoint1.fY, outerPoint2.fX, outerPoint2.fY);
+ Vertex* innerVertex1 = alloc.make<Vertex>(innerPoint1, 255);
+ Vertex* innerVertex2 = alloc.make<Vertex>(innerPoint2, 255);
+ Vertex* outerVertex1 = alloc.make<Vertex>(outerPoint1, 0);
+ Vertex* outerVertex2 = alloc.make<Vertex>(outerPoint2, 0);
+ innerVertex1->fPartner = outerVertex1;
+ innerVertex2->fPartner = outerVertex2;
+ outerVertex1->fPartner = innerVertex1;
+ outerVertex2->fPartner = innerVertex2;
+ if (!inversion(innerVertices.fTail, innerVertex1, prevEdge, c)) {
+ innerInversion = false;
+ }
+ if (!inversion(outerVertices.fTail, outerVertex1, prevEdge, c)) {
+ outerInversion = false;
+ }
+ innerVertices.append(innerVertex1);
+ innerVertices.append(innerVertex2);
+ outerVertices.append(outerVertex1);
+ outerVertices.append(outerVertex2);
+ } else {
+ TESS_LOG("inner (%g, %g), ", innerPoint.fX, innerPoint.fY);
+ TESS_LOG("outer (%g, %g)\n", outerPoint.fX, outerPoint.fY);
+ Vertex* innerVertex = alloc.make<Vertex>(innerPoint, 255);
+ Vertex* outerVertex = alloc.make<Vertex>(outerPoint, 0);
+ innerVertex->fPartner = outerVertex;
+ outerVertex->fPartner = innerVertex;
+ if (!inversion(innerVertices.fTail, innerVertex, prevEdge, c)) {
+ innerInversion = false;
+ }
+ if (!inversion(outerVertices.fTail, outerVertex, prevEdge, c)) {
+ outerInversion = false;
+ }
+ innerVertices.append(innerVertex);
+ outerVertices.append(outerVertex);
+ }
+ }
+ prevInner = inner;
+ prevOuter = outer;
+ prevV = v;
+ prevEdge = e;
+ prevNormal = normal;
+ }
+ if (!inversion(innerVertices.fTail, innerVertices.fHead, prevEdge, c)) {
+ innerInversion = false;
+ }
+ if (!inversion(outerVertices.fTail, outerVertices.fHead, prevEdge, c)) {
+ outerInversion = false;
+ }
+ // Outer edges get 1 winding, and inner edges get -2 winding. This ensures that the interior
+ // is always filled (1 + -2 = -1 for normal cases, 1 + 2 = 3 for thin features where the
+ // interior inverts).
+ // For total inversion cases, the shape has now reversed handedness, so invert the winding
+ // so it will be detected during collapse_overlap_regions().
+ int innerWinding = innerInversion ? 2 : -2;
+ int outerWinding = outerInversion ? -1 : 1;
+ for (Vertex* v = innerVertices.fHead; v && v->fNext; v = v->fNext) {
+ connect(v, v->fNext, Edge::Type::kInner, c, alloc, innerWinding);
+ }
+ connect(innerVertices.fTail, innerVertices.fHead, Edge::Type::kInner, c, alloc, innerWinding);
+ for (Vertex* v = outerVertices.fHead; v && v->fNext; v = v->fNext) {
+ connect(v, v->fNext, Edge::Type::kOuter, c, alloc, outerWinding);
+ }
+ connect(outerVertices.fTail, outerVertices.fHead, Edge::Type::kOuter, c, alloc, outerWinding);
+ innerMesh->append(innerVertices);
+ outerMesh->append(outerVertices);
+}
+
+void extract_boundary(EdgeList* boundary, Edge* e, SkPath::FillType fillType, SkArenaAlloc& alloc) {
+ TESS_LOG("\nextracting boundary\n");
+ bool down = apply_fill_type(fillType, e->fWinding);
+ Vertex* start = down ? e->fTop : e->fBottom;
+ do {
+ e->fWinding = down ? 1 : -1;
+ Edge* next;
+ e->fLine.normalize();
+ e->fLine = e->fLine * e->fWinding;
+ boundary->append(e);
+ if (down) {
+ // Find outgoing edge, in clockwise order.
+ if ((next = e->fNextEdgeAbove)) {
+ down = false;
+ } else if ((next = e->fBottom->fLastEdgeBelow)) {
+ down = true;
+ } else if ((next = e->fPrevEdgeAbove)) {
+ down = false;
+ }
+ } else {
+ // Find outgoing edge, in counter-clockwise order.
+ if ((next = e->fPrevEdgeBelow)) {
+ down = true;
+ } else if ((next = e->fTop->fFirstEdgeAbove)) {
+ down = false;
+ } else if ((next = e->fNextEdgeBelow)) {
+ down = true;
+ }
+ }
+ disconnect(e);
+ e = next;
+ } while (e && (down ? e->fTop : e->fBottom) != start);
+}
+
+// Stage 5b: Extract boundaries from mesh, simplify and stroke them into a new mesh.
+
+void extract_boundaries(const VertexList& inMesh, VertexList* innerVertices,
+ VertexList* outerVertices, SkPath::FillType fillType,
+ Comparator& c, SkArenaAlloc& alloc) {
+ remove_non_boundary_edges(inMesh, fillType, alloc);
+ for (Vertex* v = inMesh.fHead; v; v = v->fNext) {
+ while (v->fFirstEdgeBelow) {
+ EdgeList boundary;
+ extract_boundary(&boundary, v->fFirstEdgeBelow, fillType, alloc);
+ simplify_boundary(&boundary, c, alloc);
+ stroke_boundary(&boundary, innerVertices, outerVertices, c, alloc);
+ }
+ }
+}
+
+// This is a driver function that calls stages 2-5 in turn.
+
+void contours_to_mesh(VertexList* contours, int contourCnt, bool antialias,
+ VertexList* mesh, Comparator& c, SkArenaAlloc& alloc) {
+#if LOGGING_ENABLED
+ for (int i = 0; i < contourCnt; ++i) {
+ Vertex* v = contours[i].fHead;
+ SkASSERT(v);
+ TESS_LOG("path.moveTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ for (v = v->fNext; v; v = v->fNext) {
+ TESS_LOG("path.lineTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ }
+ }
+#endif
+ sanitize_contours(contours, contourCnt, antialias);
+ build_edges(contours, contourCnt, mesh, c, alloc);
+}
+
+void sort_mesh(VertexList* vertices, Comparator& c, SkArenaAlloc& alloc) {
+ if (!vertices || !vertices->fHead) {
+ return;
+ }
+
+ // Sort vertices in Y (secondarily in X).
+ if (c.fDirection == Comparator::Direction::kHorizontal) {
+ merge_sort<sweep_lt_horiz>(vertices);
+ } else {
+ merge_sort<sweep_lt_vert>(vertices);
+ }
+#if LOGGING_ENABLED
+ for (Vertex* v = vertices->fHead; v != nullptr; v = v->fNext) {
+ static float gID = 0.0f;
+ v->fID = gID++;
+ }
+#endif
+}
+
+Poly* contours_to_polys(VertexList* contours, int contourCnt, SkPath::FillType fillType,
+ const SkRect& pathBounds, bool antialias, VertexList* outerMesh,
+ SkArenaAlloc& alloc) {
+ Comparator c(pathBounds.width() > pathBounds.height() ? Comparator::Direction::kHorizontal
+ : Comparator::Direction::kVertical);
+ VertexList mesh;
+ contours_to_mesh(contours, contourCnt, antialias, &mesh, c, alloc);
+ sort_mesh(&mesh, c, alloc);
+ merge_coincident_vertices(&mesh, c, alloc);
+ simplify(&mesh, c, alloc);
+ TESS_LOG("\nsimplified mesh:\n");
+ dump_mesh(mesh);
+ if (antialias) {
+ VertexList innerMesh;
+ extract_boundaries(mesh, &innerMesh, outerMesh, fillType, c, alloc);
+ sort_mesh(&innerMesh, c, alloc);
+ sort_mesh(outerMesh, c, alloc);
+ merge_coincident_vertices(&innerMesh, c, alloc);
+ bool was_complex = merge_coincident_vertices(outerMesh, c, alloc);
+ was_complex = simplify(&innerMesh, c, alloc) || was_complex;
+ was_complex = simplify(outerMesh, c, alloc) || was_complex;
+ TESS_LOG("\ninner mesh before:\n");
+ dump_mesh(innerMesh);
+ TESS_LOG("\nouter mesh before:\n");
+ dump_mesh(*outerMesh);
+ EventComparator eventLT(EventComparator::Op::kLessThan);
+ EventComparator eventGT(EventComparator::Op::kGreaterThan);
+ was_complex = collapse_overlap_regions(&innerMesh, c, alloc, eventLT) || was_complex;
+ was_complex = collapse_overlap_regions(outerMesh, c, alloc, eventGT) || was_complex;
+ if (was_complex) {
+ TESS_LOG("found complex mesh; taking slow path\n");
+ VertexList aaMesh;
+ TESS_LOG("\ninner mesh after:\n");
+ dump_mesh(innerMesh);
+ TESS_LOG("\nouter mesh after:\n");
+ dump_mesh(*outerMesh);
+ connect_partners(outerMesh, c, alloc);
+ connect_partners(&innerMesh, c, alloc);
+ sorted_merge(&innerMesh, outerMesh, &aaMesh, c);
+ merge_coincident_vertices(&aaMesh, c, alloc);
+ simplify(&aaMesh, c, alloc);
+ TESS_LOG("combined and simplified mesh:\n");
+ dump_mesh(aaMesh);
+ outerMesh->fHead = outerMesh->fTail = nullptr;
+ return tessellate(aaMesh, alloc);
+ } else {
+ TESS_LOG("no complex polygons; taking fast path\n");
+ return tessellate(innerMesh, alloc);
+ }
+ } else {
+ return tessellate(mesh, alloc);
+ }
+}
+
+// Stage 6: Triangulate the monotone polygons into a vertex buffer.
+void* polys_to_triangles(Poly* polys, SkPath::FillType fillType, bool emitCoverage, void* data) {
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly)) {
+ data = poly->emit(emitCoverage, data);
+ }
+ }
+ return data;
+}
+
+Poly* path_to_polys(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ int contourCnt, SkArenaAlloc& alloc, bool antialias, bool* isLinear,
+ VertexList* outerMesh) {
+ SkPath::FillType fillType = path.getFillType();
+ if (SkPath::IsInverseFillType(fillType)) {
+ contourCnt++;
+ }
+ std::unique_ptr<VertexList[]> contours(new VertexList[contourCnt]);
+
+ path_to_contours(path, tolerance, clipBounds, contours.get(), alloc, isLinear);
+ return contours_to_polys(contours.get(), contourCnt, path.getFillType(), path.getBounds(),
+ antialias, outerMesh, alloc);
+}
+
+int get_contour_count(const SkPath& path, SkScalar tolerance) {
+ int contourCnt;
+ int maxPts = GrPathUtils::worstCasePointCount(path, &contourCnt, tolerance);
+ if (maxPts <= 0) {
+ return 0;
+ }
+ return contourCnt;
+}
+
+int64_t count_points(Poly* polys, SkPath::FillType fillType) {
+ int64_t count = 0;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly) && poly->fCount >= 3) {
+ count += (poly->fCount - 2) * (TESSELLATOR_WIREFRAME ? 6 : 3);
+ }
+ }
+ return count;
+}
+
+int64_t count_outer_mesh_points(const VertexList& outerMesh) {
+ int64_t count = 0;
+ for (Vertex* v = outerMesh.fHead; v; v = v->fNext) {
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ count += TESSELLATOR_WIREFRAME ? 12 : 6;
+ }
+ }
+ return count;
+}
+
+void* outer_mesh_to_triangles(const VertexList& outerMesh, bool emitCoverage, void* data) {
+ for (Vertex* v = outerMesh.fHead; v; v = v->fNext) {
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ Vertex* v0 = e->fTop;
+ Vertex* v1 = e->fBottom;
+ Vertex* v2 = e->fBottom->fPartner;
+ Vertex* v3 = e->fTop->fPartner;
+ data = emit_triangle(v0, v1, v2, emitCoverage, data);
+ data = emit_triangle(v0, v2, v3, emitCoverage, data);
+ }
+ }
+ return data;
+}
+
+} // namespace
+
+namespace GrTessellator {
+
+// Stage 6: Triangulate the monotone polygons into a vertex buffer.
+
+int PathToTriangles(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ VertexAllocator* vertexAllocator, bool antialias, bool* isLinear) {
+ int contourCnt = get_contour_count(path, tolerance);
+ if (contourCnt <= 0) {
+ *isLinear = true;
+ return 0;
+ }
+ SkArenaAlloc alloc(kArenaChunkSize);
+ VertexList outerMesh;
+ Poly* polys = path_to_polys(path, tolerance, clipBounds, contourCnt, alloc, antialias,
+ isLinear, &outerMesh);
+ SkPath::FillType fillType = antialias ? SkPath::kWinding_FillType : path.getFillType();
+ int64_t count64 = count_points(polys, fillType);
+ if (antialias) {
+ count64 += count_outer_mesh_points(outerMesh);
+ }
+ if (0 == count64 || count64 > SK_MaxS32) {
+ return 0;
+ }
+ int count = count64;
+
+ void* verts = vertexAllocator->lock(count);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return 0;
+ }
+
+ TESS_LOG("emitting %d verts\n", count);
+ void* end = polys_to_triangles(polys, fillType, antialias, verts);
+ end = outer_mesh_to_triangles(outerMesh, true, end);
+
+ int actualCount = static_cast<int>((static_cast<uint8_t*>(end) - static_cast<uint8_t*>(verts))
+ / vertexAllocator->stride());
+ SkASSERT(actualCount <= count);
+ vertexAllocator->unlock(actualCount);
+ return actualCount;
+}
+
+int PathToVertices(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ GrTessellator::WindingVertex** verts) {
+ int contourCnt = get_contour_count(path, tolerance);
+ if (contourCnt <= 0) {
+ *verts = nullptr;
+ return 0;
+ }
+ SkArenaAlloc alloc(kArenaChunkSize);
+ bool isLinear;
+ Poly* polys = path_to_polys(path, tolerance, clipBounds, contourCnt, alloc, false, &isLinear,
+ nullptr);
+ SkPath::FillType fillType = path.getFillType();
+ int64_t count64 = count_points(polys, fillType);
+ if (0 == count64 || count64 > SK_MaxS32) {
+ *verts = nullptr;
+ return 0;
+ }
+ int count = count64;
+
+ *verts = new GrTessellator::WindingVertex[count];
+ GrTessellator::WindingVertex* vertsEnd = *verts;
+ SkPoint* points = new SkPoint[count];
+ SkPoint* pointsEnd = points;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly)) {
+ SkPoint* start = pointsEnd;
+ pointsEnd = static_cast<SkPoint*>(poly->emit(false, pointsEnd));
+ while (start != pointsEnd) {
+ vertsEnd->fPos = *start;
+ vertsEnd->fWinding = poly->fWinding;
+ ++start;
+ ++vertsEnd;
+ }
+ }
+ }
+ int actualCount = static_cast<int>(vertsEnd - *verts);
+ SkASSERT(actualCount <= count);
+ SkASSERT(pointsEnd - points == actualCount);
+ delete[] points;
+ return actualCount;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/gpu/GrTessellator.h b/gfx/skia/skia/src/gpu/GrTessellator.h
new file mode 100644
index 0000000000..e892a19b14
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTessellator.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTessellator_DEFINED
+#define GrTessellator_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/private/SkColorData.h"
+#include "src/gpu/GrColor.h"
+
+class SkPath;
+struct SkRect;
+
+/**
+ * Provides utility functions for converting paths to a collection of triangles.
+ */
+
+#define TESSELLATOR_WIREFRAME 0
+
+namespace GrTessellator {
+
+class VertexAllocator {
+public:
+ VertexAllocator(size_t stride) : fStride(stride) {}
+ virtual ~VertexAllocator() {}
+ virtual void* lock(int vertexCount) = 0;
+ virtual void unlock(int actualCount) = 0;
+ size_t stride() const { return fStride; }
+private:
+ size_t fStride;
+};
+
+struct WindingVertex {
+ SkPoint fPos;
+ int fWinding;
+};
+
+// Triangulates a path to an array of vertices. Each triangle is represented as a set of three
+// WindingVertex entries, each of which contains the position and winding count (which is the same
+// for all three vertices of a triangle). The 'verts' out parameter is set to point to the resultant
+// vertex array. CALLER IS RESPONSIBLE for deleting this buffer to avoid a memory leak!
+int PathToVertices(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ WindingVertex** verts);
+
+int PathToTriangles(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ VertexAllocator*, bool antialias, bool *isLinear);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTestUtils.cpp b/gfx/skia/skia/src/gpu/GrTestUtils.cpp
new file mode 100644
index 0000000000..42aba5ef53
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTestUtils.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTestUtils.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/gpu/GrContext.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrProcessorUnitTest.h"
+#include "src/gpu/GrStyle.h"
+#include "src/utils/SkDashPathPriv.h"
+
+#if GR_TEST_UTILS
+
+static const SkMatrix& test_matrix(SkRandom* random,
+ bool includeNonPerspective,
+ bool includePerspective) {
+ static SkMatrix gMatrices[5];
+ static const int kPerspectiveCount = 1;
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gMatrices[0].reset();
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ gMatrices[2].setRotate(SkIntToScalar(17));
+ gMatrices[3].setRotate(SkIntToScalar(185));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ gMatrices[3].postScale(SkIntToScalar(2), SK_ScalarHalf);
+
+ // Perspective matrices
+ gMatrices[4].setRotate(SkIntToScalar(215));
+ gMatrices[4].set(SkMatrix::kMPersp0, 0.00013f);
+ gMatrices[4].set(SkMatrix::kMPersp1, -0.000039f);
+ }
+
+ uint32_t count = static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices));
+ if (includeNonPerspective && includePerspective) {
+ return gMatrices[random->nextULessThan(count)];
+ } else if (!includeNonPerspective) {
+ return gMatrices[count - 1 - random->nextULessThan(kPerspectiveCount)];
+ } else {
+ SkASSERT(includeNonPerspective && !includePerspective);
+ return gMatrices[random->nextULessThan(count - kPerspectiveCount)];
+ }
+}
+
+namespace GrTest {
+const SkMatrix& TestMatrix(SkRandom* random) { return test_matrix(random, true, true); }
+
+const SkMatrix& TestMatrixPreservesRightAngles(SkRandom* random) {
+ static SkMatrix gMatrices[5];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // identity
+ gMatrices[0].reset();
+ // translation
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ // scale
+ gMatrices[2].setScale(SkIntToScalar(17), SkIntToScalar(17));
+ // scale + translation
+ gMatrices[3].setScale(SkIntToScalar(-17), SkIntToScalar(-17));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ // orthogonal basis vectors
+ gMatrices[4].reset();
+ gMatrices[4].setScale(SkIntToScalar(-1), SkIntToScalar(-1));
+ gMatrices[4].setRotate(47);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gMatrices); i++) {
+ SkASSERT(gMatrices[i].preservesRightAngles());
+ }
+ }
+ return gMatrices[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices)))];
+}
+
+const SkMatrix& TestMatrixRectStaysRect(SkRandom* random) {
+ static SkMatrix gMatrices[6];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // identity
+ gMatrices[0].reset();
+ // translation
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ // scale
+ gMatrices[2].setScale(SkIntToScalar(17), SkIntToScalar(17));
+ // scale + translation
+ gMatrices[3].setScale(SkIntToScalar(-17), SkIntToScalar(-17));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ // reflection
+ gMatrices[4].setScale(SkIntToScalar(-1), SkIntToScalar(-1));
+ // 90 degress rotation
+ gMatrices[5].setRotate(90);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gMatrices); i++) {
+ SkASSERT(gMatrices[i].rectStaysRect());
+ }
+ }
+ return gMatrices[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices)))];
+}
+
+const SkMatrix& TestMatrixInvertible(SkRandom* random) { return test_matrix(random, true, false); }
+const SkMatrix& TestMatrixPerspective(SkRandom* random) { return test_matrix(random, false, true); }
+
+void TestWrapModes(SkRandom* random, GrSamplerState::WrapMode wrapModes[2]) {
+ static const GrSamplerState::WrapMode kWrapModes[] = {
+ GrSamplerState::WrapMode::kClamp,
+ GrSamplerState::WrapMode::kRepeat,
+ GrSamplerState::WrapMode::kMirrorRepeat,
+ };
+ wrapModes[0] = kWrapModes[random->nextULessThan(SK_ARRAY_COUNT(kWrapModes))];
+ wrapModes[1] = kWrapModes[random->nextULessThan(SK_ARRAY_COUNT(kWrapModes))];
+}
+const SkRect& TestRect(SkRandom* random) {
+ static SkRect gRects[7];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gRects[0] = SkRect::MakeWH(1.f, 1.f);
+ gRects[1] = SkRect::MakeWH(1.0f, 256.0f);
+ gRects[2] = SkRect::MakeWH(256.0f, 1.0f);
+ gRects[3] = SkRectPriv::MakeLargest();
+ gRects[4] = SkRect::MakeLTRB(-65535.0f, -65535.0f, 65535.0f, 65535.0f);
+ gRects[5] = SkRect::MakeLTRB(-10.0f, -10.0f, 10.0f, 10.0f);
+ }
+ return gRects[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRects)))];
+}
+
+// Just some simple rects for code which expects its input very sanitized
+const SkRect& TestSquare(SkRandom* random) {
+ static SkRect gRects[2];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gRects[0] = SkRect::MakeWH(128.f, 128.f);
+ gRects[1] = SkRect::MakeWH(256.0f, 256.0f);
+ }
+ return gRects[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRects)))];
+}
+
+const SkRRect& TestRRectSimple(SkRandom* random) {
+ static SkRRect gRRect[2];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ SkRect rectangle = SkRect::MakeWH(10.f, 20.f);
+ // true round rect with circular corners
+ gRRect[0].setRectXY(rectangle, 1.f, 1.f);
+ // true round rect with elliptical corners
+ gRRect[1].setRectXY(rectangle, 2.0f, 1.0f);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gRRect); i++) {
+ SkASSERT(gRRect[i].isSimple());
+ }
+ }
+ return gRRect[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRRect)))];
+}
+
+const SkPath& TestPath(SkRandom* random) {
+ static SkPath gPath[7];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // line
+ gPath[0].moveTo(0.f, 0.f);
+ gPath[0].lineTo(10.f, 10.f);
+ // quad
+ gPath[1].moveTo(0.f, 0.f);
+ gPath[1].quadTo(10.f, 10.f, 20.f, 20.f);
+ // conic
+ gPath[2].moveTo(0.f, 0.f);
+ gPath[2].conicTo(10.f, 10.f, 20.f, 20.f, 1.f);
+ // cubic
+ gPath[3].moveTo(0.f, 0.f);
+ gPath[3].cubicTo(10.f, 10.f, 20.f, 20.f, 30.f, 30.f);
+ // all three
+ gPath[4].moveTo(0.f, 0.f);
+ gPath[4].lineTo(10.f, 10.f);
+ gPath[4].quadTo(10.f, 10.f, 20.f, 20.f);
+ gPath[4].conicTo(10.f, 10.f, 20.f, 20.f, 1.f);
+ gPath[4].cubicTo(10.f, 10.f, 20.f, 20.f, 30.f, 30.f);
+ // convex
+ gPath[5].moveTo(0.0f, 0.0f);
+ gPath[5].lineTo(10.0f, 0.0f);
+ gPath[5].lineTo(10.0f, 10.0f);
+ gPath[5].lineTo(0.0f, 10.0f);
+ gPath[5].close();
+ // concave
+ gPath[6].moveTo(0.0f, 0.0f);
+ gPath[6].lineTo(5.0f, 5.0f);
+ gPath[6].lineTo(10.0f, 0.0f);
+ gPath[6].lineTo(10.0f, 10.0f);
+ gPath[6].lineTo(0.0f, 10.0f);
+ gPath[6].close();
+ }
+
+ return gPath[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gPath)))];
+}
+
+const SkPath& TestPathConvex(SkRandom* random) {
+ static SkPath gPath[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // narrow rect
+ gPath[0].moveTo(-1.5f, -50.0f);
+ gPath[0].lineTo(-1.5f, -50.0f);
+ gPath[0].lineTo( 1.5f, -50.0f);
+ gPath[0].lineTo( 1.5f, 50.0f);
+ gPath[0].lineTo(-1.5f, 50.0f);
+ // degenerate
+ gPath[1].moveTo(-0.025f, -0.025f);
+ gPath[1].lineTo(-0.025f, -0.025f);
+ gPath[1].lineTo( 0.025f, -0.025f);
+ gPath[1].lineTo( 0.025f, 0.025f);
+ gPath[1].lineTo(-0.025f, 0.025f);
+ // clipped triangle
+ gPath[2].moveTo(-10.0f, -50.0f);
+ gPath[2].lineTo(-10.0f, -50.0f);
+ gPath[2].lineTo( 10.0f, -50.0f);
+ gPath[2].lineTo( 50.0f, 31.0f);
+ gPath[2].lineTo( 40.0f, 50.0f);
+ gPath[2].lineTo(-40.0f, 50.0f);
+ gPath[2].lineTo(-50.0f, 31.0f);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPath); i++) {
+ SkASSERT(SkPath::kConvex_Convexity == gPath[i].getConvexity());
+ }
+ }
+
+ return gPath[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gPath)))];
+}
+
+static void randomize_stroke_rec(SkStrokeRec* rec, SkRandom* random) {
+ bool strokeAndFill = random->nextBool();
+ SkScalar strokeWidth = random->nextBool() ? 0.f : 1.f;
+ rec->setStrokeStyle(strokeWidth, strokeAndFill);
+
+ SkPaint::Cap cap = SkPaint::Cap(random->nextULessThan(SkPaint::kCapCount));
+ SkPaint::Join join = SkPaint::Join(random->nextULessThan(SkPaint::kJoinCount));
+ SkScalar miterLimit = random->nextRangeScalar(1.f, 5.f);
+ rec->setStrokeParams(cap, join, miterLimit);
+}
+
+SkStrokeRec TestStrokeRec(SkRandom* random) {
+ SkStrokeRec::InitStyle style =
+ SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
+ SkStrokeRec rec(style);
+ randomize_stroke_rec(&rec, random);
+ return rec;
+}
+
+void TestStyle(SkRandom* random, GrStyle* style) {
+ SkStrokeRec::InitStyle initStyle =
+ SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
+ SkStrokeRec stroke(initStyle);
+ randomize_stroke_rec(&stroke, random);
+ sk_sp<SkPathEffect> pe;
+ if (random->nextBool()) {
+ int cnt = random->nextRangeU(1, 50) * 2;
+ std::unique_ptr<SkScalar[]> intervals(new SkScalar[cnt]);
+ SkScalar sum = 0;
+ for (int i = 0; i < cnt; i++) {
+ intervals[i] = random->nextRangeScalar(SkDoubleToScalar(0.01),
+ SkDoubleToScalar(10.0));
+ sum += intervals[i];
+ }
+ SkScalar phase = random->nextRangeScalar(0, sum);
+ pe = TestDashPathEffect::Make(intervals.get(), cnt, phase);
+ }
+ *style = GrStyle(stroke, std::move(pe));
+}
+
+TestDashPathEffect::TestDashPathEffect(const SkScalar* intervals, int count, SkScalar phase) {
+ fCount = count;
+ fIntervals.reset(count);
+ memcpy(fIntervals.get(), intervals, count * sizeof(SkScalar));
+ SkDashPath::CalcDashParameters(phase, intervals, count, &fInitialDashLength,
+ &fInitialDashIndex, &fIntervalLength, &fPhase);
+}
+
+ bool TestDashPathEffect::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const {
+ return SkDashPath::InternalFilter(dst, src, rec, cullRect, fIntervals.get(), fCount,
+ fInitialDashLength, fInitialDashIndex, fIntervalLength);
+}
+
+SkPathEffect::DashType TestDashPathEffect::onAsADash(DashInfo* info) const {
+ if (info) {
+ if (info->fCount >= fCount && info->fIntervals) {
+ memcpy(info->fIntervals, fIntervals.get(), fCount * sizeof(SkScalar));
+ }
+ info->fCount = fCount;
+ info->fPhase = fPhase;
+ }
+ return kDash_DashType;
+}
+
+sk_sp<SkColorSpace> TestColorSpace(SkRandom* random) {
+ static sk_sp<SkColorSpace> gColorSpaces[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // No color space (legacy mode)
+ gColorSpaces[0] = nullptr;
+ // sRGB or color-spin sRGB
+ gColorSpaces[1] = SkColorSpace::MakeSRGB();
+ gColorSpaces[2] = SkColorSpace::MakeSRGB()->makeColorSpin();
+ }
+ return gColorSpaces[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gColorSpaces)))];
+}
+
+sk_sp<GrColorSpaceXform> TestColorXform(SkRandom* random) {
+ // TODO: Add many more kinds of xforms here
+ static sk_sp<GrColorSpaceXform> gXforms[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ sk_sp<SkColorSpace> srgb = SkColorSpace::MakeSRGB();
+ sk_sp<SkColorSpace> spin = SkColorSpace::MakeSRGB()->makeColorSpin();
+ // No gamut change
+ gXforms[0] = nullptr;
+ gXforms[1] = GrColorSpaceXform::Make(srgb.get(), kPremul_SkAlphaType,
+ spin.get(), kPremul_SkAlphaType);
+ gXforms[2] = GrColorSpaceXform::Make(spin.get(), kPremul_SkAlphaType,
+ srgb.get(), kPremul_SkAlphaType);
+ }
+ return gXforms[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gXforms)))];
+}
+
+TestAsFPArgs::TestAsFPArgs(GrProcessorTestData* d)
+ : fViewMatrixStorage(TestMatrix(d->fRandom))
+ , fColorInfoStorage(skstd::make_unique<GrColorInfo>(
+ GrColorType::kRGBA_8888, kPremul_SkAlphaType, TestColorSpace(d->fRandom)))
+ , fArgs(d->context(), &fViewMatrixStorage, kNone_SkFilterQuality, fColorInfoStorage.get()) {
+}
+
+TestAsFPArgs::~TestAsFPArgs() {}
+
+} // namespace GrTest
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTestUtils.h b/gfx/skia/skia/src/gpu/GrTestUtils.h
new file mode 100644
index 0000000000..b3e71a29a3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTestUtils.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTestUtils_DEFINED
+#define GrTestUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if GR_TEST_UTILS
+
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTemplates.h"
+#include "include/utils/SkRandom.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrFPArgs.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/shaders/SkShaderBase.h"
+
+class GrColorInfo;
+class GrColorSpaceXform;
+struct GrProcessorTestData;
+class GrStyle;
+class SkMatrix;
+class SkPath;
+class SkRRect;
+struct SkRect;
+
+namespace GrTest {
+/**
+ * Helpers for use in Test functions.
+ */
+const SkMatrix& TestMatrix(SkRandom*);
+const SkMatrix& TestMatrixPreservesRightAngles(SkRandom*);
+const SkMatrix& TestMatrixRectStaysRect(SkRandom*);
+const SkMatrix& TestMatrixInvertible(SkRandom*);
+const SkMatrix& TestMatrixPerspective(SkRandom*);
+void TestWrapModes(SkRandom*, GrSamplerState::WrapMode[2]);
+const SkRect& TestRect(SkRandom*);
+const SkRect& TestSquare(SkRandom*);
+const SkRRect& TestRRectSimple(SkRandom*);
+const SkPath& TestPath(SkRandom*);
+const SkPath& TestPathConvex(SkRandom*);
+SkStrokeRec TestStrokeRec(SkRandom*);
+/** Creates styles with dash path effects and null path effects */
+void TestStyle(SkRandom*, GrStyle*);
+sk_sp<SkColorSpace> TestColorSpace(SkRandom*);
+sk_sp<GrColorSpaceXform> TestColorXform(SkRandom*);
+
+class TestAsFPArgs {
+public:
+ TestAsFPArgs(GrProcessorTestData*);
+ ~TestAsFPArgs();
+ const GrFPArgs& args() const { return fArgs; }
+
+private:
+ SkMatrix fViewMatrixStorage;
+ std::unique_ptr<GrColorInfo> fColorInfoStorage;
+ GrFPArgs fArgs;
+};
+
+// We have a simplified dash path effect here to avoid relying on SkDashPathEffect which
+// is in the optional build target effects.
+class TestDashPathEffect : public SkPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(const SkScalar* intervals, int count, SkScalar phase) {
+ return sk_sp<SkPathEffect>(new TestDashPathEffect(intervals, count, phase));
+ }
+
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+protected:
+ bool onFilterPath(SkPath* dst, const SkPath&, SkStrokeRec* , const SkRect*) const override;
+ DashType onAsADash(DashInfo* info) const override;
+
+private:
+ TestDashPathEffect(const SkScalar* intervals, int count, SkScalar phase);
+
+ int fCount;
+ SkAutoTArray<SkScalar> fIntervals;
+ SkScalar fPhase;
+ SkScalar fInitialDashLength;
+ int fInitialDashIndex;
+ SkScalar fIntervalLength;
+};
+
+} // namespace GrTest
+
+static inline GrColor GrRandomColor(SkRandom* random) {
+ // There are only a few cases of random colors which interest us
+ enum ColorMode {
+ kAllOnes_ColorMode,
+ kAllZeros_ColorMode,
+ kAlphaOne_ColorMode,
+ kRandom_ColorMode,
+ kLast_ColorMode = kRandom_ColorMode
+ };
+
+ ColorMode colorMode = ColorMode(random->nextULessThan(kLast_ColorMode + 1));
+ GrColor color SK_INIT_TO_AVOID_WARNING;
+ switch (colorMode) {
+ case kAllOnes_ColorMode:
+ color = GrColorPackRGBA(0xFF, 0xFF, 0xFF, 0xFF);
+ break;
+ case kAllZeros_ColorMode:
+ color = GrColorPackRGBA(0, 0, 0, 0);
+ break;
+ case kAlphaOne_ColorMode:
+ color = GrColorPackRGBA(random->nextULessThan(256),
+ random->nextULessThan(256),
+ random->nextULessThan(256),
+ 0xFF);
+ break;
+ case kRandom_ColorMode: {
+ uint8_t alpha = random->nextULessThan(256);
+ color = GrColorPackRGBA(random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ alpha);
+ break;
+ }
+ }
+ return color;
+}
+
+static inline uint8_t GrRandomCoverage(SkRandom* random) {
+ enum CoverageMode {
+ kZero_CoverageMode,
+ kAllOnes_CoverageMode,
+ kRandom_CoverageMode,
+ kLast_CoverageMode = kRandom_CoverageMode
+ };
+
+ CoverageMode colorMode = CoverageMode(random->nextULessThan(kLast_CoverageMode + 1));
+ uint8_t coverage SK_INIT_TO_AVOID_WARNING;
+ switch (colorMode) {
+ case kZero_CoverageMode:
+ coverage = 0;
+ break;
+ case kAllOnes_CoverageMode:
+ coverage = 0xff;
+ break;
+ case kRandom_CoverageMode:
+ coverage = random->nextULessThan(256);
+ break;
+ }
+ return coverage;
+}
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTexture.cpp b/gfx/skia/skia/src/gpu/GrTexture.cpp
new file mode 100644
index 0000000000..df0f2ebbbd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTexture.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMath.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrResourceKey.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTexturePriv.h"
+
+void GrTexture::markMipMapsDirty() {
+ if (GrMipMapsStatus::kValid == fMipMapsStatus) {
+ fMipMapsStatus = GrMipMapsStatus::kDirty;
+ }
+}
+
+void GrTexture::markMipMapsClean() {
+ SkASSERT(GrMipMapsStatus::kNotAllocated != fMipMapsStatus);
+ fMipMapsStatus = GrMipMapsStatus::kValid;
+}
+
+size_t GrTexture::onGpuMemorySize() const {
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ 1, this->texturePriv().mipMapped());
+}
+
+/////////////////////////////////////////////////////////////////////////////
+GrTexture::GrTexture(GrGpu* gpu, const SkISize& size, GrPixelConfig config, GrProtected isProtected,
+ GrTextureType textureType, GrMipMapsStatus mipMapsStatus)
+ : INHERITED(gpu, size, config, isProtected)
+ , fTextureType(textureType)
+ , fMipMapsStatus(mipMapsStatus) {
+ if (GrMipMapsStatus::kNotAllocated == fMipMapsStatus) {
+ fMaxMipMapLevel = 0;
+ } else {
+ fMaxMipMapLevel = SkMipMap::ComputeLevelCount(this->width(), this->height());
+ }
+}
+
+bool GrTexture::StealBackendTexture(sk_sp<GrTexture> texture,
+ GrBackendTexture* backendTexture,
+ SkImage::BackendTextureReleaseProc* releaseProc) {
+ if (!texture->unique()) {
+ return false;
+ }
+
+ if (!texture->onStealBackendTexture(backendTexture, releaseProc)) {
+ return false;
+ }
+#ifdef SK_DEBUG
+ GrResourceCache* cache = texture->getContext()->priv().getResourceCache();
+ int preCount = cache->getResourceCount();
+#endif
+ // Ensure that the texture will be released by the cache when we drop the last ref.
+ // A texture that has no refs and no keys should be immediately removed.
+ if (texture->getUniqueKey().isValid()) {
+ texture->resourcePriv().removeUniqueKey();
+ }
+ if (texture->resourcePriv().getScratchKey().isValid()) {
+ texture->resourcePriv().removeScratchKey();
+ }
+#ifdef SK_DEBUG
+ texture.reset();
+ int postCount = cache->getResourceCount();
+ SkASSERT(postCount < preCount);
+#endif
+ return true;
+}
+
+void GrTexture::computeScratchKey(GrScratchKey* key) const {
+ if (!this->getGpu()->caps()->isFormatCompressed(this->backendFormat())) {
+ int sampleCount = 1;
+ GrRenderable renderable = GrRenderable::kNo;
+ if (const auto* rt = this->asRenderTarget()) {
+ sampleCount = rt->numSamples();
+ renderable = GrRenderable::kYes;
+ }
+ auto isProtected = this->isProtected() ? GrProtected::kYes : GrProtected::kNo;
+ GrTexturePriv::ComputeScratchKey(this->config(), this->width(), this->height(), renderable,
+ sampleCount, this->texturePriv().mipMapped(), isProtected,
+ key);
+ }
+}
+
+void GrTexturePriv::ComputeScratchKey(GrPixelConfig config,
+ int width,
+ int height,
+ GrRenderable renderable,
+ int sampleCnt,
+ GrMipMapped mipMapped,
+ GrProtected isProtected,
+ GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+ SkASSERT(sampleCnt > 0);
+ SkASSERT(1 == sampleCnt || renderable == GrRenderable::kYes);
+
+ // make sure desc.fConfig fits in 5 bits
+ SkASSERT(sk_float_log2(kLast_GrPixelConfig) <= 5);
+ SkASSERT(static_cast<uint32_t>(config) < (1 << 5));
+ SkASSERT(static_cast<uint32_t>(mipMapped) <= 1);
+ SkASSERT(static_cast<uint32_t>(isProtected) <= 1);
+ SkASSERT(static_cast<uint32_t>(renderable) <= 1);
+ SkASSERT(static_cast<uint32_t>(sampleCnt) < (1 << (32 - 8)));
+
+ GrScratchKey::Builder builder(key, kType, 3);
+ builder[0] = width;
+ builder[1] = height;
+ builder[2] = (static_cast<uint32_t>(config) << 0)
+ | (static_cast<uint32_t>(mipMapped) << 5)
+ | (static_cast<uint32_t>(isProtected) << 6)
+ | (static_cast<uint32_t>(renderable) << 7)
+ | (static_cast<uint32_t>(sampleCnt) << 8);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureAdjuster.cpp b/gfx/skia/skia/src/gpu/GrTextureAdjuster.cpp
new file mode 100644
index 0000000000..9b6ce36311
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureAdjuster.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/SkGr.h"
+
+GrTextureAdjuster::GrTextureAdjuster(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> original,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ uint32_t uniqueID,
+ SkColorSpace* cs,
+ bool useDecal)
+ : INHERITED(context, original->width(), original->height(),
+ GrColorInfo(colorType, alphaType, sk_ref_sp(cs)), useDecal)
+ , fOriginal(std::move(original))
+ , fUniqueID(uniqueID) {}
+
+GrTextureAdjuster::GrTextureAdjuster(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> original,
+ const GrColorInfo& colorInfo,
+ uint32_t uniqueID,
+ bool useDecal)
+ : INHERITED(context, original->width(), original->height(), colorInfo, useDecal)
+ , fOriginal(std::move(original))
+ , fUniqueID(uniqueID) {}
+
+void GrTextureAdjuster::makeCopyKey(const CopyParams& params, GrUniqueKey* copyKey) {
+ // Destination color space is irrelevant - we already have a texture so we're just sub-setting
+ GrUniqueKey baseKey;
+ GrMakeKeyFromImageID(&baseKey, fUniqueID, SkIRect::MakeWH(this->width(), this->height()));
+ MakeCopyKeyFromOrigKey(baseKey, params, copyKey);
+}
+
+void GrTextureAdjuster::didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) {
+ // We don't currently have a mechanism for notifications on Images!
+}
+
+sk_sp<GrTextureProxy> GrTextureAdjuster::refTextureProxyCopy(const CopyParams& copyParams,
+ bool willBeMipped) {
+ GrProxyProvider* proxyProvider = this->context()->priv().proxyProvider();
+
+ GrUniqueKey key;
+ this->makeCopyKey(copyParams, &key);
+ sk_sp<GrTextureProxy> cachedCopy;
+ if (key.isValid()) {
+ cachedCopy = proxyProvider->findOrCreateProxyByUniqueKey(key, this->colorType(),
+ this->originalProxy()->origin());
+ if (cachedCopy && (!willBeMipped || GrMipMapped::kYes == cachedCopy->mipMapped())) {
+ return cachedCopy;
+ }
+ }
+
+ sk_sp<GrTextureProxy> proxy = this->originalProxyRef();
+
+ sk_sp<GrTextureProxy> copy = CopyOnGpu(this->context(), std::move(proxy), this->colorType(),
+ copyParams, willBeMipped);
+ if (copy) {
+ if (key.isValid()) {
+ SkASSERT(copy->origin() == this->originalProxy()->origin());
+ if (cachedCopy) {
+ SkASSERT(GrMipMapped::kYes == copy->mipMapped() &&
+ GrMipMapped::kNo == cachedCopy->mipMapped());
+ // If we had a cachedProxy, that means there already is a proxy in the cache which
+ // matches the key, but it does not have mip levels and we require them. Thus we
+ // must remove the unique key from that proxy.
+ SkASSERT(cachedCopy->getUniqueKey() == key);
+ proxyProvider->removeUniqueKeyFromProxy(cachedCopy.get());
+ }
+ proxyProvider->assignUniqueKeyToProxy(key, copy.get());
+ this->didCacheCopy(key, proxyProvider->contextID());
+ }
+ }
+ return copy;
+}
+
+sk_sp<GrTextureProxy> GrTextureAdjuster::onRefTextureProxyForParams(
+ const GrSamplerState& params,
+ bool willBeMipped,
+ SkScalar scaleAdjust[2]) {
+ sk_sp<GrTextureProxy> proxy = this->originalProxyRef();
+ CopyParams copyParams;
+
+ if (this->context()->priv().abandoned()) {
+ // The texture was abandoned.
+ return nullptr;
+ }
+
+ SkASSERT(this->width() <= this->context()->priv().caps()->maxTextureSize() &&
+ this->height() <= this->context()->priv().caps()->maxTextureSize());
+
+ bool needsCopyForMipsOnly = false;
+ if (!params.isRepeated() ||
+ !GrGpu::IsACopyNeededForRepeatWrapMode(this->context()->priv().caps(), proxy.get(),
+ proxy->width(), proxy->height(), params.filter(),
+ &copyParams, scaleAdjust)) {
+ needsCopyForMipsOnly = GrGpu::IsACopyNeededForMips(this->context()->priv().caps(),
+ proxy.get(), params.filter(),
+ &copyParams);
+ if (!needsCopyForMipsOnly) {
+ return proxy;
+ }
+ }
+
+ sk_sp<GrTextureProxy> result = this->refTextureProxyCopy(copyParams, willBeMipped);
+ if (!result && needsCopyForMipsOnly) {
+ // If we were unable to make a copy and we only needed a copy for mips, then we will return
+ // the source texture here and require that the GPU backend is able to fall back to using
+ // bilerp if mips are required.
+ return this->originalProxyRef();
+ }
+ return result;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrTextureAdjuster::createFragmentProcessor(
+ const SkMatrix& origTextureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) {
+ SkMatrix textureMatrix = origTextureMatrix;
+
+ SkScalar scaleAdjust[2] = { 1.0f, 1.0f };
+ sk_sp<GrTextureProxy> proxy(
+ this->refTextureProxyForParams(filterOrNullForBicubic, scaleAdjust));
+ if (!proxy) {
+ return nullptr;
+ }
+ // If we made a copy then we only copied the contentArea, in which case the new texture is all
+ // content.
+ if (proxy.get() != this->originalProxy()) {
+ textureMatrix.postScale(scaleAdjust[0], scaleAdjust[1]);
+ }
+
+ SkRect domain;
+ DomainMode domainMode =
+ DetermineDomainMode(constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ proxy.get(), filterOrNullForBicubic, &domain);
+ if (kTightCopy_DomainMode == domainMode) {
+ // TODO: Copy the texture and adjust the texture matrix (both parts need to consider
+ // non-int constraint rect)
+ // For now: treat as bilerp and ignore what goes on above level 0.
+
+ // We only expect MIP maps to require a tight copy.
+ SkASSERT(filterOrNullForBicubic &&
+ GrSamplerState::Filter::kMipMap == *filterOrNullForBicubic);
+ static const GrSamplerState::Filter kBilerp = GrSamplerState::Filter::kBilerp;
+ domainMode =
+ DetermineDomainMode(constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ proxy.get(), &kBilerp, &domain);
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ }
+ SkASSERT(kNoDomain_DomainMode == domainMode ||
+ (domain.fLeft <= domain.fRight && domain.fTop <= domain.fBottom));
+ return this->createFragmentProcessorForDomainAndFilter(
+ std::move(proxy), textureMatrix, domainMode, domain, filterOrNullForBicubic);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureAdjuster.h b/gfx/skia/skia/src/gpu/GrTextureAdjuster.h
new file mode 100644
index 0000000000..ce93eee61e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureAdjuster.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureAdjuster_DEFINED
+#define GrTextureAdjuster_DEFINED
+
+#include "src/core/SkTLazy.h"
+#include "src/gpu/GrTextureProducer.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrRecordingContext;
+
+/**
+ * Base class for sources that start out as textures. Optionally allows for a content area subrect.
+ * The intent is not to use content area for subrect rendering. Rather, the pixels outside the
+ * content area have undefined values and shouldn't be read *regardless* of filtering mode or
+ * the SkCanvas::SrcRectConstraint used for subrect draws.
+ */
+class GrTextureAdjuster : public GrTextureProducer {
+public:
+ std::unique_ptr<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) override;
+
+ GrTextureAdjuster(GrRecordingContext*, sk_sp<GrTextureProxy>, GrColorType, SkAlphaType,
+ uint32_t uniqueID, SkColorSpace*, bool useDecal = false);
+
+ GrTextureAdjuster(GrRecordingContext*, sk_sp<GrTextureProxy>, const GrColorInfo&,
+ uint32_t uniqueID, bool useDecal = false);
+
+protected:
+ void makeCopyKey(const CopyParams& params, GrUniqueKey* copyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) override;
+
+ GrTextureProxy* originalProxy() const { return fOriginal.get(); }
+ sk_sp<GrTextureProxy> originalProxyRef() const { return fOriginal; }
+
+private:
+ sk_sp<GrTextureProxy> onRefTextureProxyForParams(const GrSamplerState&,
+ bool willBeMipped,
+ SkScalar scaleAdjust[2]) override;
+
+ sk_sp<GrTextureProxy> refTextureProxyCopy(const CopyParams& copyParams, bool willBeMipped);
+
+ sk_sp<GrTextureProxy> fOriginal;
+ uint32_t fUniqueID;
+
+ typedef GrTextureProducer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureContext.cpp b/gfx/skia/skia/src/gpu/GrTextureContext.cpp
new file mode 100644
index 0000000000..ccdcc800e3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureContext.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTextureContext.h"
+
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+#define RETURN_FALSE_IF_ABANDONED if (this->drawingManager()->wasAbandoned()) { return false; }
+
+GrTextureContext::GrTextureContext(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> textureProxy,
+ GrColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace)
+ : GrSurfaceContext(context, colorType, alphaType, std::move(colorSpace))
+ , fTextureProxy(std::move(textureProxy)) {
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+void GrTextureContext::validate() const {
+ SkASSERT(fTextureProxy);
+ fTextureProxy->validate(fContext);
+ SkASSERT(fContext->priv().caps()->areColorTypeAndFormatCompatible(
+ this->colorInfo().colorType(), fTextureProxy->backendFormat()));
+}
+#endif
+
+GrTextureContext::~GrTextureContext() {
+ ASSERT_SINGLE_OWNER
+}
+
+GrRenderTargetProxy* GrTextureContext::asRenderTargetProxy() {
+ // If the proxy can return an RTProxy it should've been wrapped in a RTContext
+ SkASSERT(!fTextureProxy->asRenderTargetProxy());
+ return nullptr;
+}
+
+sk_sp<GrRenderTargetProxy> GrTextureContext::asRenderTargetProxyRef() {
+ // If the proxy can return an RTProxy it should've been wrapped in a RTContext
+ SkASSERT(!fTextureProxy->asRenderTargetProxy());
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureContext.h b/gfx/skia/skia/src/gpu/GrTextureContext.h
new file mode 100644
index 0000000000..626c2d3451
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureContext.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureContext_DEFINED
+#define GrTextureContext_DEFINED
+
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrContext;
+class GrDrawingManager;
+class GrSurface;
+class GrTextureProxy;
+struct SkIPoint;
+struct SkIRect;
+
+/**
+ * A helper object to orchestrate commands (currently just copies) for GrSurfaces that are
+ * GrTextures and not GrRenderTargets.
+ */
+class GrTextureContext : public GrSurfaceContext {
+public:
+ ~GrTextureContext() override;
+
+ GrSurfaceProxy* asSurfaceProxy() override { return fTextureProxy.get(); }
+ const GrSurfaceProxy* asSurfaceProxy() const override { return fTextureProxy.get(); }
+ sk_sp<GrSurfaceProxy> asSurfaceProxyRef() override { return fTextureProxy; }
+
+ GrTextureProxy* asTextureProxy() override { return fTextureProxy.get(); }
+ const GrTextureProxy* asTextureProxy() const override { return fTextureProxy.get(); }
+ sk_sp<GrTextureProxy> asTextureProxyRef() override { return fTextureProxy; }
+
+ GrRenderTargetProxy* asRenderTargetProxy() override;
+ sk_sp<GrRenderTargetProxy> asRenderTargetProxyRef() override;
+
+protected:
+ GrTextureContext(GrRecordingContext*,
+ sk_sp<GrTextureProxy>,
+ GrColorType,
+ SkAlphaType,
+ sk_sp<SkColorSpace>);
+
+ SkDEBUGCODE(void validate() const override;)
+
+private:
+ friend class GrDrawingManager; // for ctor
+
+ sk_sp<GrTextureProxy> fTextureProxy;
+
+ typedef GrSurfaceContext INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureMaker.cpp b/gfx/skia/skia/src/gpu/GrTextureMaker.cpp
new file mode 100644
index 0000000000..fa428ed0fb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureMaker.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTextureMaker.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+sk_sp<GrTextureProxy> GrTextureMaker::onRefTextureProxyForParams(const GrSamplerState& params,
+ bool willBeMipped,
+ SkScalar scaleAdjust[2]) {
+ if (this->width() > this->context()->priv().caps()->maxTextureSize() ||
+ this->height() > this->context()->priv().caps()->maxTextureSize()) {
+ return nullptr;
+ }
+
+ CopyParams copyParams;
+
+ sk_sp<GrTextureProxy> original(this->refOriginalTextureProxy(willBeMipped,
+ AllowedTexGenType::kCheap));
+ bool needsCopyForMipsOnly = false;
+ if (original) {
+ if (!params.isRepeated() ||
+ !GrGpu::IsACopyNeededForRepeatWrapMode(this->context()->priv().caps(), original.get(),
+ original->width(), original->height(),
+ params.filter(), &copyParams, scaleAdjust)) {
+ needsCopyForMipsOnly = GrGpu::IsACopyNeededForMips(this->context()->priv().caps(),
+ original.get(), params.filter(),
+ &copyParams);
+ if (!needsCopyForMipsOnly) {
+ return original;
+ }
+ }
+ } else {
+ if (!params.isRepeated() ||
+ !GrGpu::IsACopyNeededForRepeatWrapMode(this->context()->priv().caps(), nullptr,
+ this->width(), this->height(),
+ params.filter(), &copyParams, scaleAdjust)) {
+ return this->refOriginalTextureProxy(willBeMipped, AllowedTexGenType::kAny);
+ }
+ }
+
+ GrProxyProvider* proxyProvider = this->context()->priv().proxyProvider();
+
+ GrSurfaceOrigin origOrigin = original ? original->origin() : kTopLeft_GrSurfaceOrigin;
+ GrUniqueKey copyKey;
+ this->makeCopyKey(copyParams, &copyKey);
+ sk_sp<GrTextureProxy> cachedProxy;
+ if (copyKey.isValid()) {
+ cachedProxy =
+ proxyProvider->findOrCreateProxyByUniqueKey(copyKey, this->colorType(), origOrigin);
+ if (cachedProxy && (!willBeMipped || GrMipMapped::kYes == cachedProxy->mipMapped())) {
+ return cachedProxy;
+ }
+ }
+
+ sk_sp<GrTextureProxy> source;
+ if (original) {
+ source = std::move(original);
+ } else if (cachedProxy) {
+ source = cachedProxy;
+ } else {
+ // Since we will be copying this texture there is no reason to make it mipped
+ source = this->refOriginalTextureProxy(false, AllowedTexGenType::kAny);
+ }
+
+ if (!source) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> result =
+ CopyOnGpu(this->context(), source, this->colorType(), copyParams, willBeMipped);
+
+ if (!result) {
+ // If we were unable to make a copy and we only needed a copy for mips, then we will return
+ // the source texture here and require that the GPU backend is able to fall back to using
+ // bilerp if mips are required.
+ if (needsCopyForMipsOnly) {
+ return source;
+ }
+ return nullptr;
+ }
+
+ if (copyKey.isValid()) {
+ SkASSERT(result->origin() == origOrigin);
+ if (cachedProxy) {
+ SkASSERT(GrMipMapped::kYes == result->mipMapped() &&
+ GrMipMapped::kNo == cachedProxy->mipMapped());
+ // If we had a cachedProxy, that means there already is a proxy in the cache which
+ // matches the key, but it does not have mip levels and we require them. Thus we must
+ // remove the unique key from that proxy.
+ SkASSERT(cachedProxy->getUniqueKey() == copyKey);
+ proxyProvider->removeUniqueKeyFromProxy(cachedProxy.get());
+ }
+ proxyProvider->assignUniqueKeyToProxy(copyKey, result.get());
+ this->didCacheCopy(copyKey, proxyProvider->contextID());
+ }
+ return result;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrTextureMaker::createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) {
+ const GrSamplerState::Filter* fmForDetermineDomain = filterOrNullForBicubic;
+ if (filterOrNullForBicubic && GrSamplerState::Filter::kMipMap == *filterOrNullForBicubic &&
+ kYes_FilterConstraint == filterConstraint) {
+ // TODO: Here we should force a copy restricted to the constraintRect since MIP maps will
+ // read outside the constraint rect. However, as in the adjuster case, we aren't currently
+ // doing that.
+ // We instead we compute the domain as though were bilerping which is only correct if we
+ // only sample level 0.
+ static const GrSamplerState::Filter kBilerp = GrSamplerState::Filter::kBilerp;
+ fmForDetermineDomain = &kBilerp;
+ }
+
+ SkScalar scaleAdjust[2] = { 1.0f, 1.0f };
+ sk_sp<GrTextureProxy> proxy(this->refTextureProxyForParams(filterOrNullForBicubic,
+ scaleAdjust));
+ if (!proxy) {
+ return nullptr;
+ }
+ SkMatrix adjustedMatrix = textureMatrix;
+ adjustedMatrix.postScale(scaleAdjust[0], scaleAdjust[1]);
+
+ SkRect domain;
+ DomainMode domainMode =
+ DetermineDomainMode(constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ proxy.get(), fmForDetermineDomain, &domain);
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ return this->createFragmentProcessorForDomainAndFilter(
+ std::move(proxy), adjustedMatrix, domainMode, domain, filterOrNullForBicubic);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureMaker.h b/gfx/skia/skia/src/gpu/GrTextureMaker.h
new file mode 100644
index 0000000000..5d558d1c68
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureMaker.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureMaker_DEFINED
+#define GrTextureMaker_DEFINED
+
+#include "src/gpu/GrTextureProducer.h"
+
+/**
+ * Base class for sources that start out as something other than a texture (encoded image,
+ * picture, ...).
+ */
+class GrTextureMaker : public GrTextureProducer {
+public:
+ enum class AllowedTexGenType : bool { kCheap, kAny };
+
+ std::unique_ptr<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) override;
+
+protected:
+ GrTextureMaker(GrRecordingContext* context, int width, int height, const GrColorInfo& info,
+ bool domainNeedsLocal)
+ : INHERITED(context, width, height, info, domainNeedsLocal) {}
+
+ /**
+ * Return the maker's "original" texture. It is the responsibility of the maker to handle any
+ * caching of the original if desired.
+ * If "genType" argument equals AllowedTexGenType::kCheap and the texture is not trivial to
+ * construct then refOriginalTextureProxy should return nullptr (for example if texture is made
+ * by drawing into a render target).
+ */
+ virtual sk_sp<GrTextureProxy> refOriginalTextureProxy(bool willBeMipped,
+ AllowedTexGenType genType) = 0;
+
+private:
+ sk_sp<GrTextureProxy> onRefTextureProxyForParams(const GrSamplerState&,
+ bool willBeMipped,
+ SkScalar scaleAdjust[2]) override;
+
+ typedef GrTextureProducer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTexturePriv.h b/gfx/skia/skia/src/gpu/GrTexturePriv.h
new file mode 100644
index 0000000000..4ce3e057ea
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTexturePriv.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTexturePriv_DEFINED
+#define GrTexturePriv_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrSamplerState.h"
+
+/** Class that adds methods to GrTexture that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrTexture. It should never have additional data
+ members or virtual methods.
+ Non-static methods that are not trivial inlines should be spring-boarded (e.g. declared and
+ implemented privately in GrTexture with a inline public method here). */
+class GrTexturePriv {
+public:
+ void markMipMapsDirty() {
+ fTexture->markMipMapsDirty();
+ }
+
+ void markMipMapsClean() {
+ fTexture->markMipMapsClean();
+ }
+
+ GrMipMapsStatus mipMapsStatus() const { return fTexture->fMipMapsStatus; }
+
+ bool mipMapsAreDirty() const {
+ return GrMipMapsStatus::kValid != this->mipMapsStatus();
+ }
+
+ GrMipMapped mipMapped() const {
+ if (GrMipMapsStatus::kNotAllocated != this->mipMapsStatus()) {
+ return GrMipMapped::kYes;
+ }
+ return GrMipMapped::kNo;
+ }
+
+ int maxMipMapLevel() const {
+ return fTexture->fMaxMipMapLevel;
+ }
+
+ GrTextureType textureType() const { return fTexture->fTextureType; }
+ bool hasRestrictedSampling() const {
+ return GrTextureTypeHasRestrictedSampling(this->textureType());
+ }
+ /** Filtering is clamped to this value. */
+ GrSamplerState::Filter highestFilterMode() const {
+ return this->hasRestrictedSampling() ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kMipMap;
+ }
+
+ static void ComputeScratchKey(GrPixelConfig config,
+ int width,
+ int height,
+ GrRenderable,
+ int sampleCnt,
+ GrMipMapped,
+ GrProtected,
+ GrScratchKey* key);
+
+private:
+ GrTexturePriv(GrTexture* texture) : fTexture(texture) { }
+ GrTexturePriv(const GrTexturePriv& that) : fTexture(that.fTexture) { }
+ GrTexturePriv& operator=(const GrTexturePriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrTexturePriv* operator&() const;
+ GrTexturePriv* operator&();
+
+ GrTexture* fTexture;
+
+ friend class GrTexture; // to construct/copy this type.
+};
+
+inline GrTexturePriv GrTexture::texturePriv() { return GrTexturePriv(this); }
+
+inline const GrTexturePriv GrTexture::texturePriv () const {
+ return GrTexturePriv(const_cast<GrTexture*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureProducer.cpp b/gfx/skia/skia/src/gpu/GrTextureProducer.cpp
new file mode 100644
index 0000000000..7b54e9c9d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProducer.cpp
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProducer.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+
+sk_sp<GrTextureProxy> GrTextureProducer::CopyOnGpu(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> inputProxy,
+ GrColorType colorType,
+ const CopyParams& copyParams,
+ bool dstWillRequireMipMaps) {
+ SkASSERT(context);
+
+ const SkRect dstRect = SkRect::MakeIWH(copyParams.fWidth, copyParams.fHeight);
+ GrMipMapped mipMapped = dstWillRequireMipMaps ? GrMipMapped::kYes : GrMipMapped::kNo;
+
+ SkRect localRect = SkRect::MakeWH(inputProxy->width(), inputProxy->height());
+
+ bool needsDomain = false;
+ bool resizing = false;
+ if (copyParams.fFilter != GrSamplerState::Filter::kNearest) {
+ bool resizing = localRect.width() != dstRect.width() ||
+ localRect.height() != dstRect.height();
+ needsDomain = resizing && !GrProxyProvider::IsFunctionallyExact(inputProxy.get());
+ }
+
+ if (copyParams.fFilter == GrSamplerState::Filter::kNearest && !needsDomain && !resizing &&
+ dstWillRequireMipMaps) {
+ sk_sp<GrTextureProxy> proxy = GrCopyBaseMipMapToTextureProxy(context, inputProxy.get(),
+ colorType);
+ if (proxy) {
+ return proxy;
+ }
+ }
+
+ auto copyRTC = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, dstRect.width(), dstRect.height(), colorType, nullptr, 1,
+ mipMapped, inputProxy->origin());
+ if (!copyRTC) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+
+ if (needsDomain) {
+ const SkRect domain = localRect.makeInset(0.5f, 0.5f);
+ // This would cause us to read values from outside the subset. Surely, the caller knows
+ // better!
+ SkASSERT(copyParams.fFilter != GrSamplerState::Filter::kMipMap);
+ paint.addColorFragmentProcessor(
+ GrTextureDomainEffect::Make(std::move(inputProxy), colorType, SkMatrix::I(), domain,
+ GrTextureDomain::kClamp_Mode, copyParams.fFilter));
+ } else {
+ GrSamplerState samplerState(GrSamplerState::WrapMode::kClamp, copyParams.fFilter);
+ paint.addColorTextureProcessor(std::move(inputProxy), colorType, SkMatrix::I(),
+ samplerState);
+ }
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ copyRTC->fillRectToRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(), dstRect,
+ localRect);
+ return copyRTC->asTextureProxyRef();
+}
+
+/** Determines whether a texture domain is necessary and if so what domain to use. There are two
+ * rectangles to consider:
+ * - The first is the content area specified by the texture adjuster (i.e., textureContentArea).
+ * We can *never* allow filtering to cause bleed of pixels outside this rectangle.
+ * - The second rectangle is the constraint rectangle (i.e., constraintRect), which is known to
+ * be contained by the content area. The filterConstraint specifies whether we are allowed to
+ * bleed across this rect.
+ *
+ * We want to avoid using a domain if possible. We consider the above rectangles, the filter type,
+ * and whether the coords generated by the draw would all fall within the constraint rect. If the
+ * latter is true we only need to consider whether the filter would extend beyond the rects.
+ */
+GrTextureProducer::DomainMode GrTextureProducer::DetermineDomainMode(
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ GrTextureProxy* proxy,
+ const GrSamplerState::Filter* filterModeOrNullForBicubic,
+ SkRect* domainRect) {
+ const SkIRect proxyBounds = SkIRect::MakeWH(proxy->width(), proxy->height());
+
+ SkASSERT(proxyBounds.contains(constraintRect));
+
+ const bool proxyIsExact = GrProxyProvider::IsFunctionallyExact(proxy);
+
+ // If the constraint rectangle contains the whole proxy then no need for a domain.
+ if (constraintRect.contains(proxyBounds) && proxyIsExact) {
+ return kNoDomain_DomainMode;
+ }
+
+ bool restrictFilterToRect = (filterConstraint == GrTextureProducer::kYes_FilterConstraint);
+
+ // If we can filter outside the constraint rect, and there is no non-content area of the
+ // proxy, and we aren't going to generate sample coords outside the constraint rect then we
+ // don't need a domain.
+ if (!restrictFilterToRect && proxyIsExact && coordsLimitedToConstraintRect) {
+ return kNoDomain_DomainMode;
+ }
+
+ // Get the domain inset based on sampling mode (or bail if mipped)
+ SkScalar filterHalfWidth = 0.f;
+ if (filterModeOrNullForBicubic) {
+ switch (*filterModeOrNullForBicubic) {
+ case GrSamplerState::Filter::kNearest:
+ if (coordsLimitedToConstraintRect) {
+ return kNoDomain_DomainMode;
+ } else {
+ filterHalfWidth = 0.f;
+ }
+ break;
+ case GrSamplerState::Filter::kBilerp:
+ filterHalfWidth = .5f;
+ break;
+ case GrSamplerState::Filter::kMipMap:
+ if (restrictFilterToRect || !proxyIsExact) {
+ // No domain can save us here.
+ return kTightCopy_DomainMode;
+ }
+ return kNoDomain_DomainMode;
+ }
+ } else {
+ // bicubic does nearest filtering internally.
+ filterHalfWidth = 1.5f;
+ }
+
+ // Both bilerp and bicubic use bilinear filtering and so need to be clamped to the center
+ // of the edge texel. Pinning to the texel center has no impact on nearest mode and MIP-maps
+
+ static const SkScalar kDomainInset = 0.5f;
+ // Figure out the limits of pixels we're allowed to sample from.
+ // Unless we know the amount of outset and the texture matrix we have to conservatively enforce
+ // the domain.
+ if (restrictFilterToRect) {
+ *domainRect = constraintRect.makeInset(kDomainInset, kDomainInset);
+ } else if (!proxyIsExact) {
+ // If we got here then: proxy is not exact, the coords are limited to the
+ // constraint rect, and we're allowed to filter across the constraint rect boundary. So
+ // we check whether the filter would reach across the edge of the proxy.
+ // We will only set the sides that are required.
+
+ *domainRect = SkRectPriv::MakeLargest();
+ if (coordsLimitedToConstraintRect) {
+ // We may be able to use the fact that the texture coords are limited to the constraint
+ // rect in order to avoid having to add a domain.
+ bool needContentAreaConstraint = false;
+ if (proxyBounds.fRight - filterHalfWidth < constraintRect.fRight) {
+ domainRect->fRight = proxyBounds.fRight - kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (proxyBounds.fBottom - filterHalfWidth < constraintRect.fBottom) {
+ domainRect->fBottom = proxyBounds.fBottom - kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (!needContentAreaConstraint) {
+ return kNoDomain_DomainMode;
+ }
+ } else {
+ // Our sample coords for the texture are allowed to be outside the constraintRect so we
+ // don't consider it when computing the domain.
+ domainRect->fRight = proxyBounds.fRight - kDomainInset;
+ domainRect->fBottom = proxyBounds.fBottom - kDomainInset;
+ }
+ } else {
+ return kNoDomain_DomainMode;
+ }
+
+ if (domainRect->fLeft > domainRect->fRight) {
+ domainRect->fLeft = domainRect->fRight = SkScalarAve(domainRect->fLeft, domainRect->fRight);
+ }
+ if (domainRect->fTop > domainRect->fBottom) {
+ domainRect->fTop = domainRect->fBottom = SkScalarAve(domainRect->fTop, domainRect->fBottom);
+ }
+ return kDomain_DomainMode;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrTextureProducer::createFragmentProcessorForDomainAndFilter(
+ sk_sp<GrTextureProxy> proxy,
+ const SkMatrix& textureMatrix,
+ DomainMode domainMode,
+ const SkRect& domain,
+ const GrSamplerState::Filter* filterOrNullForBicubic) {
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ bool clampToBorderSupport = fContext->priv().caps()->clampToBorderSupport();
+ GrColorType srcColorType = this->colorType();
+ if (filterOrNullForBicubic) {
+ if (kDomain_DomainMode == domainMode || (fDomainNeedsDecal && !clampToBorderSupport)) {
+ GrTextureDomain::Mode wrapMode = fDomainNeedsDecal ? GrTextureDomain::kDecal_Mode
+ : GrTextureDomain::kClamp_Mode;
+ return GrTextureDomainEffect::Make(std::move(proxy), srcColorType, textureMatrix,
+ domain, wrapMode, *filterOrNullForBicubic);
+ } else {
+ GrSamplerState::WrapMode wrapMode =
+ fDomainNeedsDecal ? GrSamplerState::WrapMode::kClampToBorder
+ : GrSamplerState::WrapMode::kClamp;
+ GrSamplerState samplerState(wrapMode, *filterOrNullForBicubic);
+ return GrSimpleTextureEffect::Make(std::move(proxy), srcColorType, textureMatrix,
+ samplerState);
+ }
+ } else {
+ static const GrSamplerState::WrapMode kClampClamp[] = {
+ GrSamplerState::WrapMode::kClamp, GrSamplerState::WrapMode::kClamp};
+ static const GrSamplerState::WrapMode kDecalDecal[] = {
+ GrSamplerState::WrapMode::kClampToBorder, GrSamplerState::WrapMode::kClampToBorder};
+
+ static constexpr auto kDir = GrBicubicEffect::Direction::kXY;
+ if (kDomain_DomainMode == domainMode || (fDomainNeedsDecal && !clampToBorderSupport)) {
+ GrTextureDomain::Mode wrapMode = fDomainNeedsDecal ? GrTextureDomain::kDecal_Mode
+ : GrTextureDomain::kClamp_Mode;
+ return GrBicubicEffect::Make(std::move(proxy), srcColorType, textureMatrix, kClampClamp,
+ wrapMode, wrapMode, kDir, this->alphaType(),
+ kDomain_DomainMode == domainMode ? &domain : nullptr);
+ } else {
+ return GrBicubicEffect::Make(std::move(proxy), srcColorType, textureMatrix,
+ fDomainNeedsDecal ? kDecalDecal : kClampClamp, kDir,
+ this->alphaType());
+ }
+ }
+}
+
+sk_sp<GrTextureProxy> GrTextureProducer::refTextureProxyForParams(
+ const GrSamplerState::Filter* filterOrNullForBicubic,
+ SkScalar scaleAdjust[2]) {
+ GrSamplerState sampler; // Default is nearest + clamp
+ if (filterOrNullForBicubic) {
+ sampler.setFilterMode(*filterOrNullForBicubic);
+ }
+ if (fDomainNeedsDecal) {
+ // Assuming hardware support, switch to clamp-to-border instead of clamp
+ if (fContext->priv().caps()->clampToBorderSupport()) {
+ sampler.setWrapModeX(GrSamplerState::WrapMode::kClampToBorder);
+ sampler.setWrapModeY(GrSamplerState::WrapMode::kClampToBorder);
+ }
+ }
+ return this->refTextureProxyForParams(sampler, scaleAdjust);
+}
+
+sk_sp<GrTextureProxy> GrTextureProducer::refTextureProxyForParams(
+ const GrSamplerState& sampler,
+ SkScalar scaleAdjust[2]) {
+ // Check that the caller pre-initialized scaleAdjust
+ SkASSERT(!scaleAdjust || (scaleAdjust[0] == 1 && scaleAdjust[1] == 1));
+ // Check that if the caller passed nullptr for scaleAdjust that we're in the case where there
+ // can be no scaling.
+ SkDEBUGCODE(bool expectNoScale = (sampler.filter() != GrSamplerState::Filter::kMipMap &&
+ !sampler.isRepeated()));
+ SkASSERT(scaleAdjust || expectNoScale);
+
+ int mipCount = SkMipMap::ComputeLevelCount(this->width(), this->height());
+ bool willBeMipped = GrSamplerState::Filter::kMipMap == sampler.filter() && mipCount &&
+ this->context()->priv().caps()->mipMapSupport();
+
+ auto result = this->onRefTextureProxyForParams(sampler, willBeMipped, scaleAdjust);
+
+ // Check to make sure that if we say the texture willBeMipped that the returned texture has mip
+ // maps, unless the config is not copyable.
+ SkASSERT(!result || !willBeMipped || result->mipMapped() == GrMipMapped::kYes ||
+ !this->context()->priv().caps()->isFormatCopyable(result->backendFormat()));
+
+ // Check that the "no scaling expected" case always returns a proxy of the same size as the
+ // producer.
+ SkASSERT(!result || !expectNoScale ||
+ (result->width() == this->width() && result->height() == this->height()));
+ return result;
+}
+
+sk_sp<GrTextureProxy> GrTextureProducer::refTextureProxy(GrMipMapped willNeedMips) {
+ GrSamplerState::Filter filter =
+ GrMipMapped::kNo == willNeedMips ? GrSamplerState::Filter::kNearest
+ : GrSamplerState::Filter::kMipMap;
+ GrSamplerState sampler(GrSamplerState::WrapMode::kClamp, filter);
+
+ int mipCount = SkMipMap::ComputeLevelCount(this->width(), this->height());
+ bool willBeMipped = GrSamplerState::Filter::kMipMap == sampler.filter() && mipCount &&
+ this->context()->priv().caps()->mipMapSupport();
+
+ auto result = this->onRefTextureProxyForParams(sampler, willBeMipped, nullptr);
+
+ // Check to make sure that if we say the texture willBeMipped that the returned texture has mip
+ // maps, unless the config is not copyable.
+ SkASSERT(!result || !willBeMipped || result->mipMapped() == GrMipMapped::kYes ||
+ !this->context()->priv().caps()->isFormatCopyable(result->backendFormat()));
+
+ // Check that no scaling occured and we returned a proxy of the same size as the producer.
+ SkASSERT(!result || (result->width() == this->width() && result->height() == this->height()));
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureProducer.h b/gfx/skia/skia/src/gpu/GrTextureProducer.h
new file mode 100644
index 0000000000..d124bf7328
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProducer.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProducer_DEFINED
+#define GrTextureProducer_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/private/GrResourceKey.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrSamplerState.h"
+
+class GrFragmentProcessor;
+class GrRecordingContext;
+class GrTexture;
+class GrTextureProxy;
+class SkColorSpace;
+class SkMatrix;
+struct SkRect;
+
+/**
+ * Different GPUs and API extensions have different requirements with respect to what texture
+ * sampling parameters may be used with textures of various types. This class facilitates making
+ * texture compatible with a given GrSamplerState. There are two immediate subclasses defined
+ * below. One is a base class for sources that are inherently texture-backed (e.g. a texture-backed
+ * SkImage). It supports subsetting the original texture. The other is for use cases where the
+ * source can generate a texture that represents some content (e.g. cpu pixels, SkPicture, ...).
+ */
+class GrTextureProducer : public SkNoncopyable {
+public:
+ struct CopyParams {
+ GrSamplerState::Filter fFilter;
+ int fWidth;
+ int fHeight;
+ };
+
+ enum FilterConstraint {
+ kYes_FilterConstraint,
+ kNo_FilterConstraint,
+ };
+
+ /**
+ * Helper for creating a fragment processor to sample the texture with a given filtering mode.
+ * It attempts to avoid making texture copies or using domains whenever possible.
+ *
+ * @param textureMatrix Matrix used to access the texture. It is applied to
+ * the local coords. The post-transformed coords should
+ * be in texel units (rather than normalized) with
+ * respect to this Producer's bounds (width()/height()).
+ * @param constraintRect A rect that represents the area of the texture to be
+ * sampled. It must be contained in the Producer's
+ * bounds as defined by width()/height().
+ * @param filterConstriant Indicates whether filtering is limited to
+ * constraintRect.
+ * @param coordsLimitedToConstraintRect Is it known that textureMatrix*localCoords is bound
+ * by the portion of the texture indicated by
+ * constraintRect (without consideration of filter
+ * width, just the raw coords).
+ * @param filterOrNullForBicubic If non-null indicates the filter mode. If null means
+ * use bicubic filtering.
+ **/
+ virtual std::unique_ptr<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrSamplerState::Filter* filterOrNullForBicubic) = 0;
+
+ /**
+ * Returns a texture that is safe for use with the params.
+ *
+ * If the size of the returned texture does not match width()/height() then the contents of the
+ * original may have been scaled to fit the texture or the original may have been copied into
+ * a subrect of the copy. 'scaleAdjust' must be applied to the normalized texture coordinates
+ * in order to correct for the latter case.
+ *
+ * If the GrSamplerState is known to clamp and use kNearest or kBilerp filter mode then the
+ * proxy will always be unscaled and nullptr can be passed for scaleAdjust. There is a weird
+ * contract that if scaleAdjust is not null it must be initialized to {1, 1} before calling
+ * this method. (TODO: Fix this and make this function always initialize scaleAdjust).
+ */
+ sk_sp<GrTextureProxy> refTextureProxyForParams(const GrSamplerState&,
+ SkScalar scaleAdjust[2]);
+
+ sk_sp<GrTextureProxy> refTextureProxyForParams(
+ const GrSamplerState::Filter* filterOrNullForBicubic, SkScalar scaleAdjust[2]);
+
+ /**
+ * Returns a texture. If willNeedMips is true then the returned texture is guaranteed to have
+ * allocated mip map levels. This can be a performance win if future draws with the texture
+ * require mip maps.
+ */
+ // TODO: Once we remove support for npot textures, we should add a flag for must support repeat
+ // wrap mode. To support that flag now would require us to support scaleAdjust array like in
+ // refTextureProxyForParams, however the current public API that uses this call does not expose
+ // that array.
+ sk_sp<GrTextureProxy> refTextureProxy(GrMipMapped willNeedMips);
+
+ virtual ~GrTextureProducer() {}
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ const GrColorInfo& colorInfo() const { return fColorInfo; }
+ GrColorType colorType() const { return fColorInfo.colorType(); }
+ SkAlphaType alphaType() const { return fColorInfo.alphaType(); }
+ SkColorSpace* colorSpace() const { return fColorInfo.colorSpace(); }
+ bool isAlphaOnly() const { return GrColorTypeIsAlphaOnly(fColorInfo.colorType()); }
+ bool domainNeedsDecal() const { return fDomainNeedsDecal; }
+ // If the "texture" samples multiple images that have different resolutions (e.g. YUV420)
+ virtual bool hasMixedResolutions() const { return false; }
+
+protected:
+ friend class GrTextureProducer_TestAccess;
+
+ GrTextureProducer(GrRecordingContext* context, int width, int height,
+ const GrColorInfo& colorInfo, bool domainNeedsDecal)
+ : fContext(context)
+ , fWidth(width)
+ , fHeight(height)
+ , fColorInfo(colorInfo)
+ , fDomainNeedsDecal(domainNeedsDecal) {}
+
+ /** Helper for creating a key for a copy from an original key. */
+ static void MakeCopyKeyFromOrigKey(const GrUniqueKey& origKey,
+ const CopyParams& copyParams,
+ GrUniqueKey* copyKey) {
+ SkASSERT(!copyKey->isValid());
+ if (origKey.isValid()) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(copyKey, origKey, kDomain, 3);
+ builder[0] = static_cast<uint32_t>(copyParams.fFilter);
+ builder[1] = copyParams.fWidth;
+ builder[2] = copyParams.fHeight;
+ }
+ }
+
+ /**
+ * If we need to make a copy in order to be compatible with GrTextureParams producer is asked to
+ * return a key that identifies its original content + the CopyParms parameter. If the producer
+ * does not want to cache the stretched version (e.g. the producer is volatile), this should
+ * simply return without initializing the copyKey. If the texture generated by this producer
+ * depends on the destination color space, then that information should also be incorporated
+ * in the key.
+ */
+ virtual void makeCopyKey(const CopyParams&, GrUniqueKey* copyKey) = 0;
+
+ /**
+ * If a stretched version of the texture is generated, it may be cached (assuming that
+ * makeCopyKey() returns true). In that case, the maker is notified in case it
+ * wants to note that for when the maker is destroyed.
+ */
+ virtual void didCacheCopy(const GrUniqueKey& copyKey, uint32_t contextUniqueID) = 0;
+
+ enum DomainMode {
+ kNoDomain_DomainMode,
+ kDomain_DomainMode,
+ kTightCopy_DomainMode
+ };
+
+ // This can draw to accomplish the copy, thus the recording context is needed
+ static sk_sp<GrTextureProxy> CopyOnGpu(GrRecordingContext*,
+ sk_sp<GrTextureProxy> inputProxy,
+ GrColorType,
+ const CopyParams& copyParams,
+ bool dstWillRequireMipMaps);
+
+ static DomainMode DetermineDomainMode(const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ GrTextureProxy*,
+ const GrSamplerState::Filter* filterModeOrNullForBicubic,
+ SkRect* domainRect);
+
+ std::unique_ptr<GrFragmentProcessor> createFragmentProcessorForDomainAndFilter(
+ sk_sp<GrTextureProxy> proxy,
+ const SkMatrix& textureMatrix,
+ DomainMode,
+ const SkRect& domain,
+ const GrSamplerState::Filter* filterOrNullForBicubic);
+
+ GrRecordingContext* context() const { return fContext; }
+
+private:
+ virtual sk_sp<GrTextureProxy> onRefTextureProxyForParams(const GrSamplerState&,
+ bool willBeMipped,
+ SkScalar scaleAdjust[2]) = 0;
+
+ GrRecordingContext* fContext;
+ const int fWidth;
+ const int fHeight;
+ const GrColorInfo fColorInfo;
+ // If true, any domain effect uses kDecal instead of kClamp, and sampler filter uses
+ // kClampToBorder instead of kClamp.
+ const bool fDomainNeedsDecal;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureProxy.cpp b/gfx/skia/skia/src/gpu/GrTextureProxy.cpp
new file mode 100644
index 0000000000..ba8530e32d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProxy.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDeferredProxyUploader.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTexturePriv.h"
+
+// Deferred version - no data
+GrTextureProxy::GrTextureProxy(const GrBackendFormat& format,
+ const GrSurfaceDesc& srcDesc,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ GrMipMapsStatus mipMapsStatus,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : INHERITED(format, srcDesc, GrRenderable::kNo, origin, textureSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator)
+ , fMipMapped(mipMapped)
+ , fMipMapsStatus(mipMapsStatus) SkDEBUGCODE(, fInitialMipMapsStatus(fMipMapsStatus))
+ , fProxyProvider(nullptr)
+ , fDeferredUploader(nullptr) {}
+
+// Lazy-callback version
+GrTextureProxy::GrTextureProxy(LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ GrMipMapsStatus mipMapsStatus,
+ const GrSwizzle& texSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : INHERITED(std::move(callback), format, desc, GrRenderable::kNo, origin, texSwizzle, fit,
+ budgeted, isProtected, surfaceFlags, useAllocator)
+ , fMipMapped(mipMapped)
+ , fMipMapsStatus(mipMapsStatus) SkDEBUGCODE(, fInitialMipMapsStatus(fMipMapsStatus))
+ , fProxyProvider(nullptr)
+ , fDeferredUploader(nullptr) {}
+
+// Wrapped version
+GrTextureProxy::GrTextureProxy(sk_sp<GrSurface> surf,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& textureSwizzle,
+ UseAllocator useAllocator)
+ : INHERITED(std::move(surf), origin, textureSwizzle, SkBackingFit::kExact, useAllocator)
+ , fMipMapped(fTarget->asTexture()->texturePriv().mipMapped())
+ , fMipMapsStatus(fTarget->asTexture()->texturePriv().mipMapsStatus())
+ SkDEBUGCODE(, fInitialMipMapsStatus(fMipMapsStatus))
+ , fProxyProvider(nullptr)
+ , fDeferredUploader(nullptr) {
+ if (fTarget->getUniqueKey().isValid()) {
+ fProxyProvider = fTarget->asTexture()->getContext()->priv().proxyProvider();
+ fProxyProvider->adoptUniqueKeyFromSurface(this, fTarget.get());
+ }
+}
+
+GrTextureProxy::~GrTextureProxy() {
+ // Due to the order of cleanup the GrSurface this proxy may have wrapped may have gone away
+ // at this point. Zero out the pointer so the cache invalidation code doesn't try to use it.
+ fTarget = nullptr;
+
+ // In DDL-mode, uniquely keyed proxies keep their key even after their originating
+ // proxy provider has gone away. In that case there is noone to send the invalid key
+ // message to (Note: in this case we don't want to remove its cached resource).
+ if (fUniqueKey.isValid() && fProxyProvider) {
+ fProxyProvider->processInvalidUniqueKey(fUniqueKey, this,
+ GrProxyProvider::InvalidateGPUResource::kNo);
+ } else {
+ SkASSERT(!fProxyProvider);
+ }
+}
+
+bool GrTextureProxy::instantiate(GrResourceProvider* resourceProvider) {
+ if (this->isLazy()) {
+ return false;
+ }
+ if (!this->instantiateImpl(resourceProvider, 1, /* needsStencil = */ false, GrRenderable::kNo,
+ fMipMapped, fUniqueKey.isValid() ? &fUniqueKey : nullptr)) {
+ return false;
+ }
+
+ SkASSERT(!this->peekRenderTarget());
+ SkASSERT(this->peekTexture());
+ return true;
+}
+
+sk_sp<GrSurface> GrTextureProxy::createSurface(GrResourceProvider* resourceProvider) const {
+ sk_sp<GrSurface> surface =
+ this->createSurfaceImpl(resourceProvider, 1,
+ /* needsStencil = */ false, GrRenderable::kNo, fMipMapped);
+ if (!surface) {
+ return nullptr;
+ }
+
+ SkASSERT(!surface->asRenderTarget());
+ SkASSERT(surface->asTexture());
+ return surface;
+}
+
+void GrTextureProxyPriv::setDeferredUploader(std::unique_ptr<GrDeferredProxyUploader> uploader) {
+ SkASSERT(!fTextureProxy->fDeferredUploader);
+ fTextureProxy->fDeferredUploader = std::move(uploader);
+}
+
+void GrTextureProxyPriv::scheduleUpload(GrOpFlushState* flushState) {
+ // The texture proxy's contents may already have been uploaded or instantiation may have failed
+ if (fTextureProxy->fDeferredUploader && fTextureProxy->isInstantiated()) {
+ fTextureProxy->fDeferredUploader->scheduleUpload(flushState, fTextureProxy);
+ }
+}
+
+void GrTextureProxyPriv::resetDeferredUploader() {
+ SkASSERT(fTextureProxy->fDeferredUploader);
+ fTextureProxy->fDeferredUploader.reset();
+}
+
+GrSamplerState::Filter GrTextureProxy::highestFilterMode() const {
+ return this->hasRestrictedSampling() ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kMipMap;
+}
+
+GrMipMapped GrTextureProxy::mipMapped() const {
+ if (this->isInstantiated()) {
+ return this->peekTexture()->texturePriv().mipMapped();
+ }
+ return fMipMapped;
+}
+
+size_t GrTextureProxy::onUninstantiatedGpuMemorySize(const GrCaps& caps) const {
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ 1, this->proxyMipMapped(), !this->priv().isExact());
+}
+
+bool GrTextureProxy::ProxiesAreCompatibleAsDynamicState(const GrTextureProxy* first,
+ const GrTextureProxy* second) {
+ return first->config() == second->config() &&
+ first->textureType() == second->textureType() &&
+ first->backendFormat() == second->backendFormat();
+}
+
+void GrTextureProxy::setUniqueKey(GrProxyProvider* proxyProvider, const GrUniqueKey& key) {
+ SkASSERT(key.isValid());
+ SkASSERT(!fUniqueKey.isValid()); // proxies can only ever get one uniqueKey
+
+ if (fTarget && fSyncTargetKey) {
+ if (!fTarget->getUniqueKey().isValid()) {
+ fTarget->resourcePriv().setUniqueKey(key);
+ }
+ SkASSERT(fTarget->getUniqueKey() == key);
+ }
+
+ fUniqueKey = key;
+ fProxyProvider = proxyProvider;
+}
+
+void GrTextureProxy::clearUniqueKey() {
+ fUniqueKey.reset();
+ fProxyProvider = nullptr;
+}
+
+#ifdef SK_DEBUG
+void GrTextureProxy::onValidateSurface(const GrSurface* surface) {
+ SkASSERT(!surface->asRenderTarget());
+
+ // Anything that is checked here should be duplicated in GrTextureRenderTargetProxy's version
+ SkASSERT(surface->asTexture());
+ // It is possible to fulfill a non-mipmapped proxy with a mipmapped texture.
+ SkASSERT(GrMipMapped::kNo == this->proxyMipMapped() ||
+ GrMipMapped::kYes == surface->asTexture()->texturePriv().mipMapped());
+
+ SkASSERT(surface->asTexture()->texturePriv().textureType() == this->textureType());
+
+ GrInternalSurfaceFlags proxyFlags = fSurfaceFlags;
+ GrInternalSurfaceFlags surfaceFlags = surface->surfacePriv().flags();
+ SkASSERT(((int)proxyFlags & kGrInternalTextureFlagsMask) ==
+ ((int)surfaceFlags & kGrInternalTextureFlagsMask));
+}
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrTextureProxy.h b/gfx/skia/skia/src/gpu/GrTextureProxy.h
new file mode 100644
index 0000000000..1718794815
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProxy.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProxy_DEFINED
+#define GrTextureProxy_DEFINED
+
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/GrSurfaceProxy.h"
+
+class GrCaps;
+class GrDeferredProxyUploader;
+class GrProxyProvider;
+class GrResourceProvider;
+class GrTextureProxyPriv;
+
+// This class delays the acquisition of textures until they are actually required
+class GrTextureProxy : virtual public GrSurfaceProxy {
+public:
+ GrTextureProxy* asTextureProxy() override { return this; }
+ const GrTextureProxy* asTextureProxy() const override { return this; }
+
+ // Actually instantiate the backing texture, if necessary
+ bool instantiate(GrResourceProvider*) override;
+
+ GrSamplerState::Filter highestFilterMode() const;
+
+ // If we are instantiated and have a target, return the mip state of that target. Otherwise
+ // returns the proxy's mip state from creation time. This is useful for lazy proxies which may
+ // claim to not need mips at creation time, but the instantiation happens to give us a mipped
+ // target. In that case we should use that for our benefit to avoid possible copies/mip
+ // generation later.
+ GrMipMapped mipMapped() const;
+
+ bool mipMapsAreDirty() const {
+ SkASSERT((GrMipMapped::kNo == fMipMapped) ==
+ (GrMipMapsStatus::kNotAllocated == fMipMapsStatus));
+ return GrMipMapped::kYes == fMipMapped && GrMipMapsStatus::kValid != fMipMapsStatus;
+ }
+ void markMipMapsDirty() {
+ SkASSERT(GrMipMapped::kYes == fMipMapped);
+ fMipMapsStatus = GrMipMapsStatus::kDirty;
+ }
+ void markMipMapsClean() {
+ SkASSERT(GrMipMapped::kYes == fMipMapped);
+ fMipMapsStatus = GrMipMapsStatus::kValid;
+ }
+
+ // Returns the GrMipMapped value of the proxy from creation time regardless of whether it has
+ // been instantiated or not.
+ GrMipMapped proxyMipMapped() const { return fMipMapped; }
+
+ GrTextureType textureType() const { return this->backendFormat().textureType(); }
+
+ /** If true then the texture does not support MIP maps and only supports clamp wrap mode. */
+ bool hasRestrictedSampling() const {
+ return GrTextureTypeHasRestrictedSampling(this->textureType());
+ }
+
+ // Returns true if the passed in proxies can be used as dynamic state together when flushing
+ // draws to the gpu.
+ static bool ProxiesAreCompatibleAsDynamicState(const GrTextureProxy* first,
+ const GrTextureProxy* second);
+
+ /**
+ * Return the texture proxy's unique key. It will be invalid if the proxy doesn't have one.
+ */
+ const GrUniqueKey& getUniqueKey() const {
+#ifdef SK_DEBUG
+ if (this->isInstantiated() && fUniqueKey.isValid() && fSyncTargetKey) {
+ GrSurface* surface = this->peekSurface();
+ SkASSERT(surface);
+
+ SkASSERT(surface->getUniqueKey().isValid());
+ // It is possible for a non-keyed proxy to have a uniquely keyed resource assigned to
+ // it. This just means that a future user of the resource will be filling it with unique
+ // data. However, if the proxy has a unique key its attached resource should also
+ // have that key.
+ SkASSERT(fUniqueKey == surface->getUniqueKey());
+ }
+#endif
+
+ return fUniqueKey;
+ }
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by the cache.
+ */
+ class CacheAccess;
+ inline CacheAccess cacheAccess();
+ inline const CacheAccess cacheAccess() const;
+
+ // Provides access to special purpose functions.
+ GrTextureProxyPriv texPriv();
+ const GrTextureProxyPriv texPriv() const;
+
+protected:
+ // DDL TODO: rm the GrSurfaceProxy friending
+ friend class GrSurfaceProxy; // for ctors
+ friend class GrProxyProvider; // for ctors
+ friend class GrTextureProxyPriv;
+ friend class GrSurfaceProxyPriv; // ability to change key sync state after lazy instantiation.
+
+ // Deferred version - no data.
+ GrTextureProxy(const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ GrMipMapsStatus,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ // Lazy-callback version
+ // There are two main use cases for lazily-instantiated proxies:
+ // basic knowledge - width, height, config, origin are known
+ // minimal knowledge - only config is known.
+ //
+ // The basic knowledge version is used for DDL where we know the type of proxy we are going to
+ // use, but we don't have access to the GPU yet to instantiate it.
+ //
+ // The minimal knowledge version is used for CCPR where we are generating an atlas but we do not
+ // know the final size until flush time.
+ GrTextureProxy(LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc& desc,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ GrMipMapsStatus,
+ const GrSwizzle& textureSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ // Wrapped version
+ GrTextureProxy(sk_sp<GrSurface>, GrSurfaceOrigin, const GrSwizzle&, UseAllocator);
+
+ ~GrTextureProxy() override;
+
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
+
+ void setTargetKeySync(bool sync) { fSyncTargetKey = sync; }
+
+private:
+ // WARNING: Be careful when adding or removing fields here. ASAN is likely to trigger warnings
+ // when instantiating GrTextureRenderTargetProxy. The std::function in GrSurfaceProxy makes
+ // each class in the diamond require 16 byte alignment. Clang appears to layout the fields for
+ // each class to achieve the necessary alignment. However, ASAN checks the alignment of 'this'
+ // in the constructors, and always looks for the full 16 byte alignment, even if the fields in
+ // that particular class don't require it. Changing the size of this object can move the start
+ // address of other types, leading to this problem.
+
+ GrMipMapped fMipMapped;
+
+ // This tracks the mipmap status at the proxy level and is thus somewhat distinct from the
+ // backing GrTexture's mipmap status. In particular, this status is used to determine when
+ // mipmap levels need to be explicitly regenerated during the execution of a DAG of opsTasks.
+ GrMipMapsStatus fMipMapsStatus;
+ // TEMPORARY: We are in the process of moving GrMipMapsStatus from the texture to the proxy.
+ // We track the fInitialMipMapsStatus here so we can assert that the proxy did indeed expect
+ // the correct mipmap status immediately after instantiation.
+ //
+ // NOTE: fMipMapsStatus may no longer be equal to fInitialMipMapsStatus by the time the texture
+ // is instantiated, since it tracks mipmaps in the time frame in which the DAG is being built.
+ SkDEBUGCODE(const GrMipMapsStatus fInitialMipMapsStatus);
+
+ bool fSyncTargetKey = true; // Should target's unique key be sync'ed with ours.
+
+ GrUniqueKey fUniqueKey;
+ GrProxyProvider* fProxyProvider; // only set when fUniqueKey is valid
+
+ // Only used for proxies whose contents are being prepared on a worker thread. This object
+ // stores the texture data, allowing the proxy to remain uninstantiated until flush. At that
+ // point, the proxy is instantiated, and this data is used to perform an ASAP upload.
+ std::unique_ptr<GrDeferredProxyUploader> fDeferredUploader;
+
+ size_t onUninstantiatedGpuMemorySize(const GrCaps&) const override;
+
+ // Methods made available via GrTextureProxy::CacheAccess
+ void setUniqueKey(GrProxyProvider*, const GrUniqueKey&);
+ void clearUniqueKey();
+
+ SkDEBUGCODE(void onValidateSurface(const GrSurface*) override;)
+
+ typedef GrSurfaceProxy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureProxyCacheAccess.h b/gfx/skia/skia/src/gpu/GrTextureProxyCacheAccess.h
new file mode 100644
index 0000000000..26478c6101
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProxyCacheAccess.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProxyCacheAccess_DEFINED
+#define GrTextureProxyCacheAccess_DEFINED
+
+#include "src/gpu/GrTextureProxy.h"
+
+/**
+ * This class allows GrResourceCache increased privileged access to GrTextureProxy objects.
+ */
+class GrTextureProxy::CacheAccess {
+private:
+ void setUniqueKey(GrProxyProvider* proxyProvider, const GrUniqueKey& key) {
+ fTextureProxy->setUniqueKey(proxyProvider, key);
+ }
+
+ void clearUniqueKey() {
+ fTextureProxy->clearUniqueKey();
+ }
+
+ explicit CacheAccess(GrTextureProxy* textureProxy) : fTextureProxy(textureProxy) {}
+ CacheAccess(const CacheAccess&) {} // unimpl
+ CacheAccess& operator=(const CacheAccess&); // unimpl
+
+ // No taking addresses of this type.
+ const CacheAccess* operator&() const;
+ CacheAccess* operator&();
+
+ GrTextureProxy* fTextureProxy;
+
+ friend class GrTextureProxy; // to construct/copy this type.
+ friend class GrProxyProvider; // to use this type
+};
+
+inline GrTextureProxy::CacheAccess GrTextureProxy::cacheAccess() { return CacheAccess(this); }
+
+inline const GrTextureProxy::CacheAccess GrTextureProxy::cacheAccess() const {
+ return CacheAccess(const_cast<GrTextureProxy*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureProxyPriv.h b/gfx/skia/skia/src/gpu/GrTextureProxyPriv.h
new file mode 100644
index 0000000000..1e64194523
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProxyPriv.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProxyPriv_DEFINED
+#define GrTextureProxyPriv_DEFINED
+
+#include "src/gpu/GrTextureProxy.h"
+
+class GrDeferredProxyUploader;
+class GrOpFlushState;
+
+/**
+ * This class hides the more specialized capabilities of GrTextureProxy.
+ */
+class GrTextureProxyPriv {
+public:
+ // Attach a deferred uploader to the proxy. Holds data being prepared by a worker thread.
+ void setDeferredUploader(std::unique_ptr<GrDeferredProxyUploader>);
+ bool isDeferred() const { return SkToBool(fTextureProxy->fDeferredUploader.get()); }
+ // For a deferred proxy (one that has a deferred uploader attached), this schedules an ASAP
+ // upload of that data to the instantiated texture.
+ void scheduleUpload(GrOpFlushState*);
+ // Clears any deferred uploader object on the proxy. Used to free the CPU data after the
+ // contents have been uploaded.
+ void resetDeferredUploader();
+
+private:
+ explicit GrTextureProxyPriv(GrTextureProxy* textureProxy) : fTextureProxy(textureProxy) {}
+ GrTextureProxyPriv(const GrTextureProxyPriv&) {} // unimpl
+ GrTextureProxyPriv& operator=(const GrTextureProxyPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrTextureProxyPriv* operator&() const;
+ GrTextureProxyPriv* operator&();
+
+ GrTextureProxy* fTextureProxy;
+
+ friend class GrTextureProxy; // to construct/copy this type.
+};
+
+inline GrTextureProxyPriv GrTextureProxy::texPriv() { return GrTextureProxyPriv(this); }
+
+inline const GrTextureProxyPriv GrTextureProxy::texPriv() const {
+ return GrTextureProxyPriv(const_cast<GrTextureProxy*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.cpp b/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.cpp
new file mode 100644
index 0000000000..ae70c0fa76
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTextureRenderTargetProxy.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetProxyPriv.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+
+// Deferred version
+// This class is virtually derived from GrSurfaceProxy (via both GrTextureProxy and
+// GrRenderTargetProxy) so its constructor must be explicitly called.
+GrTextureRenderTargetProxy::GrTextureRenderTargetProxy(const GrCaps& caps,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ GrMipMapsStatus mipMapsStatus,
+ const GrSwizzle& texSwizzle,
+ const GrSwizzle& outSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : GrSurfaceProxy(format, desc, GrRenderable::kYes, origin, texSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator)
+ // for now textures w/ data are always wrapped
+ , GrRenderTargetProxy(caps, format, desc, sampleCnt, origin, texSwizzle, outSwizzle, fit,
+ budgeted, isProtected, surfaceFlags, useAllocator)
+ , GrTextureProxy(format, desc, origin, mipMapped, mipMapsStatus, texSwizzle, fit, budgeted,
+ isProtected, surfaceFlags, useAllocator) {
+ this->initSurfaceFlags(caps);
+}
+
+// Lazy-callback version
+GrTextureRenderTargetProxy::GrTextureRenderTargetProxy(const GrCaps& caps,
+ LazyInstantiateCallback&& callback,
+ const GrBackendFormat& format,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ GrSurfaceOrigin origin,
+ GrMipMapped mipMapped,
+ GrMipMapsStatus mipMapsStatus,
+ const GrSwizzle& texSwizzle,
+ const GrSwizzle& outSwizzle,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ GrInternalSurfaceFlags surfaceFlags,
+ UseAllocator useAllocator)
+ : GrSurfaceProxy(std::move(callback), format, desc, GrRenderable::kYes, origin, texSwizzle,
+ fit, budgeted, isProtected, surfaceFlags, useAllocator)
+ // Since we have virtual inheritance, we initialize GrSurfaceProxy directly. Send null
+ // callbacks to the texture and RT proxies simply to route to the appropriate constructors.
+ , GrRenderTargetProxy(LazyInstantiateCallback(), format, desc, sampleCnt, origin,
+ texSwizzle, outSwizzle, fit, budgeted, isProtected, surfaceFlags,
+ useAllocator, WrapsVkSecondaryCB::kNo)
+ , GrTextureProxy(LazyInstantiateCallback(), format, desc, origin, mipMapped, mipMapsStatus,
+ texSwizzle, fit, budgeted, isProtected, surfaceFlags, useAllocator) {
+ this->initSurfaceFlags(caps);
+}
+
+// Wrapped version
+// This class is virtually derived from GrSurfaceProxy (via both GrTextureProxy and
+// GrRenderTargetProxy) so its constructor must be explicitly called.
+GrTextureRenderTargetProxy::GrTextureRenderTargetProxy(sk_sp<GrSurface> surf,
+ GrSurfaceOrigin origin,
+ const GrSwizzle& texSwizzle,
+ const GrSwizzle& outSwizzle,
+ UseAllocator useAllocator)
+ : GrSurfaceProxy(surf, origin, texSwizzle, SkBackingFit::kExact, useAllocator)
+ , GrRenderTargetProxy(surf, origin, texSwizzle, outSwizzle, useAllocator)
+ , GrTextureProxy(surf, origin, texSwizzle, useAllocator) {
+ SkASSERT(surf->asTexture());
+ SkASSERT(surf->asRenderTarget());
+ SkASSERT(fSurfaceFlags == fTarget->surfacePriv().flags());
+ SkASSERT((this->numSamples() <= 1 ||
+ fTarget->getContext()->priv().caps()->msaaResolvesAutomatically()) !=
+ this->requiresManualMSAAResolve());
+}
+
+void GrTextureRenderTargetProxy::initSurfaceFlags(const GrCaps& caps) {
+ // FBO 0 should never be wrapped as a texture render target.
+ SkASSERT(!this->rtPriv().glRTFBOIDIs0());
+ if (this->numSamples() > 1 && !caps.msaaResolvesAutomatically()) {
+ // MSAA texture-render-targets always require manual resolve if we are not using a
+ // multisampled-render-to-texture extension.
+ //
+ // NOTE: This is the only instance where we need to set the manual resolve flag on a proxy.
+ // Any other proxies that require manual resolve (e.g., wrapBackendTextureAsRenderTarget())
+ // will be wrapped, and the wrapped version of the GrSurface constructor will automatically
+ // get the manual resolve flag when copying the target GrSurface's flags.
+ fSurfaceFlags |= GrInternalSurfaceFlags::kRequiresManualMSAAResolve;
+ }
+}
+
+size_t GrTextureRenderTargetProxy::onUninstantiatedGpuMemorySize(const GrCaps& caps) const {
+ int colorSamplesPerPixel = this->numSamples();
+ if (colorSamplesPerPixel > 1) {
+ // Add one to account for the resolve buffer.
+ ++colorSamplesPerPixel;
+ }
+
+ // TODO: do we have enough information to improve this worst case estimate?
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ colorSamplesPerPixel, this->proxyMipMapped(),
+ !this->priv().isExact());
+}
+
+bool GrTextureRenderTargetProxy::instantiate(GrResourceProvider* resourceProvider) {
+ if (this->isLazy()) {
+ return false;
+ }
+
+ const GrUniqueKey& key = this->getUniqueKey();
+
+ if (!this->instantiateImpl(resourceProvider, this->numSamples(), this->numStencilSamples(),
+ GrRenderable::kYes, this->mipMapped(),
+ key.isValid() ? &key : nullptr)) {
+ return false;
+ }
+ if (key.isValid()) {
+ SkASSERT(key == this->getUniqueKey());
+ }
+
+ SkASSERT(this->peekRenderTarget());
+ SkASSERT(this->peekTexture());
+
+ return true;
+}
+
+sk_sp<GrSurface> GrTextureRenderTargetProxy::createSurface(
+ GrResourceProvider* resourceProvider) const {
+ sk_sp<GrSurface> surface =
+ this->createSurfaceImpl(resourceProvider, this->numSamples(), this->numStencilSamples(),
+ GrRenderable::kYes, this->mipMapped());
+ if (!surface) {
+ return nullptr;
+ }
+ SkASSERT(surface->asRenderTarget());
+ SkASSERT(surface->asTexture());
+
+ return surface;
+}
+
+#ifdef SK_DEBUG
+void GrTextureRenderTargetProxy::onValidateSurface(const GrSurface* surface) {
+ // Anything checked here should also be checking the GrTextureProxy version
+ SkASSERT(surface->asTexture());
+ SkASSERT(GrMipMapped::kNo == this->proxyMipMapped() ||
+ GrMipMapped::kYes == surface->asTexture()->texturePriv().mipMapped());
+
+ // Anything checked here should also be checking the GrRenderTargetProxy version
+ SkASSERT(surface->asRenderTarget());
+ SkASSERT(surface->asRenderTarget()->numSamples() == this->numSamples());
+
+ SkASSERT(surface->asTexture()->texturePriv().textureType() == this->textureType());
+
+ GrInternalSurfaceFlags proxyFlags = fSurfaceFlags;
+ GrInternalSurfaceFlags surfaceFlags = surface->surfacePriv().flags();
+
+ // Only non-RT textures can be read only.
+ SkASSERT(!(proxyFlags & GrInternalSurfaceFlags::kReadOnly));
+ SkASSERT(!(surfaceFlags & GrInternalSurfaceFlags::kReadOnly));
+
+ SkASSERT(((int)proxyFlags & kGrInternalTextureRenderTargetFlagsMask) ==
+ ((int)surfaceFlags & kGrInternalTextureRenderTargetFlagsMask));
+}
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.h b/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.h
new file mode 100644
index 0000000000..46c273d6a2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureRenderTargetProxy.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureRenderTargetProxy_DEFINED
+#define GrTextureRenderTargetProxy_DEFINED
+
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrTextureProxy.h"
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives warnings about inheriting asTextureProxy/asRenderTargetProxy via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+// This class delays the acquisition of RenderTargets that are also textures until
+// they are actually required
+// Beware: the uniqueID of the TextureRenderTargetProxy will usually be different than
+// the uniqueID of the RenderTarget/Texture it represents!
+class GrTextureRenderTargetProxy : public GrRenderTargetProxy, public GrTextureProxy {
+private:
+ // DDL TODO: rm the GrSurfaceProxy friending
+ friend class GrSurfaceProxy; // for ctors
+ friend class GrProxyProvider; // for ctors
+
+ // Deferred version
+ GrTextureRenderTargetProxy(const GrCaps&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ GrMipMapsStatus,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ // Lazy-callback version
+ GrTextureRenderTargetProxy(const GrCaps&,
+ LazyInstantiateCallback&&,
+ const GrBackendFormat&,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ GrSurfaceOrigin,
+ GrMipMapped,
+ GrMipMapsStatus,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ SkBackingFit,
+ SkBudgeted,
+ GrProtected,
+ GrInternalSurfaceFlags,
+ UseAllocator);
+
+ // Wrapped version
+ GrTextureRenderTargetProxy(sk_sp<GrSurface>,
+ GrSurfaceOrigin,
+ const GrSwizzle& textureSwizzle,
+ const GrSwizzle& outputSwizzle,
+ UseAllocator);
+
+ void initSurfaceFlags(const GrCaps&);
+
+ bool instantiate(GrResourceProvider*) override;
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
+
+ size_t onUninstantiatedGpuMemorySize(const GrCaps&) const override;
+
+ SkDEBUGCODE(void onValidateSurface(const GrSurface*) override;)
+};
+
+#ifdef SK_BUILD_FOR_WIN
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureResolveManager.h b/gfx/skia/skia/src/gpu/GrTextureResolveManager.h
new file mode 100644
index 0000000000..f610c6b64a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureResolveManager.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureResolveManager_DEFINED
+#define GrTextureResolveManager_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrDrawingManager.h"
+
+class GrCaps;
+class GrDrawingManager;
+class GrRenderTask;
+
+/*
+ * This class is a shallow view of the drawing manager. It is passed to render tasks when setting up
+ * the dependency DAG, and gives them limited access to functionality for making new tasks that
+ * regenerate mipmaps and/or resolve MSAA.
+ */
+class GrTextureResolveManager {
+public:
+ explicit GrTextureResolveManager(GrDrawingManager* drawingManager)
+ : fDrawingManager(drawingManager) {}
+
+ GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps& caps) const {
+ SkASSERT(fDrawingManager);
+ return fDrawingManager->newTextureResolveRenderTask(caps);
+ }
+
+private:
+ GrDrawingManager* fDrawingManager;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.cpp b/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.cpp
new file mode 100644
index 0000000000..e12bbc8e30
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTextureResolveRenderTask.h"
+
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrResourceAllocator.h"
+#include "src/gpu/GrTexturePriv.h"
+
+GrTextureResolveRenderTask::~GrTextureResolveRenderTask() {
+ for (const auto& resolve : fResolves) {
+ // Ensure the proxy doesn't keep hold of a dangling back pointer.
+ resolve.fProxy->setLastRenderTask(nullptr);
+ }
+}
+
+void GrTextureResolveRenderTask::addProxy(
+ sk_sp<GrSurfaceProxy> proxyHolder, GrSurfaceProxy::ResolveFlags flags, const GrCaps& caps) {
+ fResolves.emplace_back(std::move(proxyHolder), flags);
+ GrSurfaceProxy* proxy = fResolves.back().fProxy.get();
+
+ // Ensure the last render task that operated on the proxy is closed. That's where msaa and
+ // mipmaps should have been marked dirty.
+ SkASSERT(!proxy->getLastRenderTask() || proxy->getLastRenderTask()->isClosed());
+ SkASSERT(GrSurfaceProxy::ResolveFlags::kNone != flags);
+
+ if (GrSurfaceProxy::ResolveFlags::kMSAA & flags) {
+ GrRenderTargetProxy* renderTargetProxy = proxy->asRenderTargetProxy();
+ SkASSERT(renderTargetProxy);
+ SkASSERT(renderTargetProxy->isMSAADirty());
+ fResolves.back().fMSAAResolveRect = renderTargetProxy->msaaDirtyRect();
+ renderTargetProxy->markMSAAResolved();
+ }
+
+ if (GrSurfaceProxy::ResolveFlags::kMipMaps & flags) {
+ GrTextureProxy* textureProxy = proxy->asTextureProxy();
+ SkASSERT(GrMipMapped::kYes == textureProxy->mipMapped());
+ SkASSERT(textureProxy->mipMapsAreDirty());
+ textureProxy->markMipMapsClean();
+ }
+
+ // Add the proxy as a dependency: We will read the existing contents of this texture while
+ // generating mipmap levels and/or resolving MSAA.
+ this->addDependency(proxy, GrMipMapped::kNo, GrTextureResolveManager(nullptr), caps);
+ proxy->setLastRenderTask(this);
+}
+
+void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+ // This renderTask doesn't have "normal" ops, however we still need to add intervals so
+ // fEndOfOpsTaskOpIndices will remain in sync. We create fake op#'s to capture the fact that we
+ // manipulate the resolve proxies.
+ auto fakeOp = alloc->curOp();
+ for (const auto& resolve : fResolves) {
+ alloc->addInterval(resolve.fProxy.get(), fakeOp, fakeOp,
+ GrResourceAllocator::ActualUse::kYes);
+ }
+ alloc->incOps();
+}
+
+bool GrTextureResolveRenderTask::onExecute(GrOpFlushState* flushState) {
+ // Resolve all msaa back-to-back, before regenerating mipmaps.
+ for (const auto& resolve : fResolves) {
+ if (GrSurfaceProxy::ResolveFlags::kMSAA & resolve.fFlags) {
+ // peekRenderTarget might be null if there was an instantiation error.
+ if (GrRenderTarget* renderTarget = resolve.fProxy->peekRenderTarget()) {
+ flushState->gpu()->resolveRenderTarget(renderTarget, resolve.fMSAAResolveRect,
+ resolve.fProxy->origin(),
+ GrGpu::ForExternalIO::kNo);
+ }
+ }
+ }
+ // Regenerate all mipmaps back-to-back.
+ for (const auto& resolve : fResolves) {
+ if (GrSurfaceProxy::ResolveFlags::kMipMaps & resolve.fFlags) {
+ // peekTexture might be null if there was an instantiation error.
+ GrTexture* texture = resolve.fProxy->peekTexture();
+ if (texture && texture->texturePriv().mipMapsAreDirty()) {
+ flushState->gpu()->regenerateMipMapLevels(texture);
+ SkASSERT(!texture->texturePriv().mipMapsAreDirty());
+ }
+ }
+ }
+
+ return true;
+}
+
+#ifdef SK_DEBUG
+void GrTextureResolveRenderTask::visitProxies_debugOnly(const VisitSurfaceProxyFunc& fn) const {
+ for (const auto& resolve : fResolves) {
+ fn(resolve.fProxy.get(), GrMipMapped::kNo);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.h b/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.h
new file mode 100644
index 0000000000..48fea3905b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureResolveRenderTask.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureResolveRenderTask_DEFINED
+#define GrTextureResolveRenderTask_DEFINED
+
+#include "src/gpu/GrRenderTask.h"
+
+class GrTextureResolveRenderTask final : public GrRenderTask {
+public:
+ GrTextureResolveRenderTask() : GrRenderTask(nullptr) {}
+ ~GrTextureResolveRenderTask() override;
+
+ void addProxy(sk_sp<GrSurfaceProxy>, GrSurfaceProxy::ResolveFlags, const GrCaps&);
+
+private:
+ bool onIsUsed(GrSurfaceProxy* proxy) const override {
+ SkASSERT(proxy != fTarget.get()); // This case should be handled by GrRenderTask.
+ return false;
+ }
+ void handleInternalAllocationFailure() override {
+ // No need to do anything special here. We just double check the proxies during onExecute.
+ }
+ void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect*) override {
+ return ExpectedOutcome::kTargetUnchanged;
+ }
+
+ bool onExecute(GrOpFlushState*) override;
+
+#ifdef SK_DEBUG
+ SkDEBUGCODE(void visitProxies_debugOnly(const VisitSurfaceProxyFunc&) const override;)
+#endif
+
+ struct Resolve {
+ Resolve(sk_sp<GrSurfaceProxy> proxy, GrSurfaceProxy::ResolveFlags flags)
+ : fProxy(std::move(proxy)), fFlags(flags) {}
+ sk_sp<GrSurfaceProxy> fProxy;
+ GrSurfaceProxy::ResolveFlags fFlags;
+ SkIRect fMSAAResolveRect;
+ };
+
+ SkSTArray<4, Resolve> fResolves;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTracing.h b/gfx/skia/skia/src/gpu/GrTracing.h
new file mode 100644
index 0000000000..df66eb81d5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTracing.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTracing_DEFINED
+#define GrTracing_DEFINED
+
+#include "src/core/SkTraceEvent.h"
+
+#include "src/gpu/GrAuditTrail.h"
+
+class GrContext;
+
+/**
+ * Context level GrTracing macros, classname and op are const char*, context is GrContext
+ */
+#define GR_CREATE_TRACE_MARKER_CONTEXT(classname, op, context) \
+ GR_AUDIT_TRAIL_AUTO_FRAME(context->priv().auditTrail(), classname "::" op); \
+ TRACE_EVENT0("skia.gpu", classname "::" op)
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.cpp b/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.cpp
new file mode 100644
index 0000000000..7826bceca9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrTransferFromRenderTask.h"
+
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceAllocator.h"
+
+void GrTransferFromRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+ // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // we read fSrcProxy.
+ alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
+ alloc->incOps();
+}
+
+bool GrTransferFromRenderTask::onExecute(GrOpFlushState* flushState) {
+ if (!fSrcProxy->isInstantiated()) {
+ return false;
+ }
+ return flushState->gpu()->transferPixelsFrom(
+ fSrcProxy->peekSurface(), fSrcRect.fLeft, fSrcRect.fTop, fSrcRect.width(),
+ fSrcRect.height(), fSurfaceColorType, fDstColorType, fDstBuffer.get(), fDstOffset);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.h b/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.h
new file mode 100644
index 0000000000..40e89da6be
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTransferFromRenderTask.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTransferFromRenderTask_DEFINED
+#define GrTransferFromRenderTask_DEFINED
+
+#include "src/gpu/GrRenderTask.h"
+
+class GrTransferFromRenderTask final : public GrRenderTask {
+public:
+ GrTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
+ const SkIRect& srcRect,
+ GrColorType surfaceColorType,
+ GrColorType dstColorType,
+ sk_sp<GrGpuBuffer> dstBuffer,
+ size_t dstOffset)
+ : GrRenderTask(nullptr)
+ , fSrcProxy(std::move(srcProxy))
+ , fSrcRect(srcRect)
+ , fSurfaceColorType(surfaceColorType)
+ , fDstColorType(dstColorType)
+ , fDstBuffer(std::move(dstBuffer))
+ , fDstOffset(dstOffset) {}
+
+private:
+ bool onIsUsed(GrSurfaceProxy* proxy) const override {
+ SkASSERT(!fTarget);
+ return proxy == fSrcProxy.get();
+ }
+ // If fSrcProxy is uninstantiated at flush time we simply will skip doing the transfer.
+ void handleInternalAllocationFailure() override {}
+ void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect*) override {
+ return ExpectedOutcome::kTargetUnchanged;
+ }
+
+ bool onExecute(GrOpFlushState*) override;
+
+#ifdef SK_DEBUG
+ void visitProxies_debugOnly(const VisitSurfaceProxyFunc& fn) const override {
+ fn(fSrcProxy.get(), GrMipMapped::kNo);
+ }
+#endif
+
+ sk_sp<GrSurfaceProxy> fSrcProxy;
+ SkIRect fSrcRect;
+ GrColorType fSurfaceColorType;
+ GrColorType fDstColorType;
+ sk_sp<GrGpuBuffer> fDstBuffer;
+ size_t fDstOffset;
+
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/GrUserStencilSettings.h b/gfx/skia/skia/src/gpu/GrUserStencilSettings.h
new file mode 100644
index 0000000000..83a19b776e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrUserStencilSettings.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrUserStencilSettings_DEFINED
+#define GrUserStencilSettings_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Gr uses the stencil buffer to implement complex clipping inside the
+ * GrOpsTask class. The GrOpsTask makes a subset of the stencil buffer
+ * bits available for other uses by external code (user bits). Client code can
+ * modify these bits. GrOpsTask will ignore ref, mask, and writemask bits
+ * provided by clients that fall outside the user range.
+ *
+ * When code outside the GrOpsTask class uses the stencil buffer the contract
+ * is as follows:
+ *
+ * > Normal stencil funcs allow the client to pass / fail regardless of the
+ * reserved clip bits.
+ * > Additional functions allow a test against the clip along with a limited
+ * set of tests against the user bits.
+ * > Client can assume all user bits are zero initially.
+ * > Client must ensure that after all its passes are finished it has only
+ * written to the color buffer in the region inside the clip. Furthermore, it
+ * must zero all user bits that were modifed (both inside and outside the
+ * clip).
+ */
+
+enum GrStencilFlags {
+ kDisabled_StencilFlag = (1 << 0),
+ kTestAlwaysPasses_StencilFlag = (1 << 1),
+ kNoModifyStencil_StencilFlag = (1 << 2),
+ kNoWrapOps_StencilFlag = (1 << 3),
+ kSingleSided_StencilFlag = (1 << 4),
+
+ kLast_StencilFlag = kSingleSided_StencilFlag,
+ kAll_StencilFlags = kLast_StencilFlag | (kLast_StencilFlag - 1)
+};
+
+template<typename TTest, typename TOp> struct GrTStencilFaceSettings {
+ uint16_t fRef; // Reference value for stencil test and ops.
+ TTest fTest; // Stencil test function, where fRef is on the left side.
+ uint16_t fTestMask; // Bitwise "and" to perform on fRef and stencil values before testing.
+ // (e.g. (fRef & fTestMask) < (stencil & fTestMask))
+ TOp fPassOp; // Op to perform when the test passes.
+ TOp fFailOp; // Op to perform when the test fails.
+ uint16_t fWriteMask; // Indicates which bits in the stencil buffer should be updated.
+ // (e.g. stencil = (newValue & fWriteMask) | (stencil & ~fWriteMask))
+};
+
+enum class GrUserStencilTest : uint16_t {
+ // Tests that respect the clip bit. If a stencil clip is not in effect, the "IfInClip" is
+ // ignored and these only act on user bits.
+ kAlwaysIfInClip,
+ kEqualIfInClip,
+ kLessIfInClip,
+ kLEqualIfInClip,
+
+ // Tests that ignore the clip bit. The client is responsible to ensure no color write occurs
+ // outside the clip if it is in use.
+ kAlways,
+ kNever,
+ kGreater,
+ kGEqual,
+ kLess,
+ kLEqual,
+ kEqual,
+ kNotEqual
+};
+constexpr static GrUserStencilTest kLastClippedStencilTest = GrUserStencilTest::kLEqualIfInClip;
+constexpr static int kGrUserStencilTestCount = 1 + (int)GrUserStencilTest::kNotEqual;
+
+enum class GrUserStencilOp : uint8_t {
+ kKeep,
+
+ // Ops that only modify user bits. These must not be paired with ops that modify the clip bit.
+ kZero,
+ kReplace, // Replace stencil value with fRef (only the bits enabled in fWriteMask).
+ kInvert,
+ kIncWrap,
+ kDecWrap,
+ // These two should only be used if wrap ops are not supported, or if the math is guaranteed
+ // to not overflow. The user bits may or may not clamp, depending on the state of non-user bits.
+ kIncMaybeClamp,
+ kDecMaybeClamp,
+
+ // Ops that only modify the clip bit. These must not be paired with ops that modify user bits.
+ kZeroClipBit,
+ kSetClipBit,
+ kInvertClipBit,
+
+ // Ops that modify both clip and user bits. These can only be paired with kKeep or each other.
+ kSetClipAndReplaceUserBits,
+ kZeroClipAndUserBits
+};
+constexpr static GrUserStencilOp kLastUserOnlyStencilOp = GrUserStencilOp::kDecMaybeClamp;
+constexpr static GrUserStencilOp kLastClipOnlyStencilOp = GrUserStencilOp::kInvertClipBit;
+constexpr static int kGrUserStencilOpCount = 1 + (int)GrUserStencilOp::kZeroClipAndUserBits;
+
+/**
+ * This struct is a compile-time constant representation of user stencil settings. It describes in
+ * abstract terms how a draw will use the stencil buffer. It gets ODR-used at runtime to define a
+ * draw's stencil settings, and is later translated into concrete settings when the pipeline is
+ * finalized.
+ */
+struct GrUserStencilSettings {
+ typedef GrTStencilFaceSettings<GrUserStencilTest, GrUserStencilOp> Face;
+
+ template<GrUserStencilTest, GrUserStencilOp PassOp, GrUserStencilOp FailOp> struct Attrs;
+
+ // Unfortunately, this is the only way to pass template arguments to a constructor.
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask> struct Init {};
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask> struct InitSeparate {};
+
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask>
+ constexpr static Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask> StaticInit() {
+ return Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask>();
+ }
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask>
+ constexpr static InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask,
+ BkWriteMask> StaticInitSeparate() {
+ return InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask, BkWriteMask>();
+ }
+
+ // We construct with template arguments in order to enforce that the struct be compile-time
+ // constant and to make use of static asserts.
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask,
+ typename Attrs = Attrs<Test, PassOp, FailOp> >
+ constexpr explicit GrUserStencilSettings(
+ const Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask>&)
+ : fFrontFlags{(uint16_t)(Attrs::Flags(false) | kSingleSided_StencilFlag),
+ (uint16_t)(Attrs::Flags(true) | kSingleSided_StencilFlag)}
+ , fFront{Ref, Test, Attrs::EffectiveTestMask(TestMask), PassOp, FailOp,
+ Attrs::EffectiveWriteMask(WriteMask)}
+ , fBackFlags{(uint16_t)(Attrs::Flags(false) | kSingleSided_StencilFlag),
+ (uint16_t)(Attrs::Flags(true) | kSingleSided_StencilFlag)}
+ , fBack{Ref, Test, Attrs::EffectiveTestMask(TestMask), PassOp, FailOp,
+ Attrs::EffectiveWriteMask(WriteMask)} {
+ }
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask,
+ typename FtAttrs = Attrs<FtTest, FtPassOp, FtFailOp>,
+ typename BkAttrs = Attrs<BkTest, BkPassOp, BkFailOp> >
+ constexpr explicit GrUserStencilSettings(
+ const InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask, BkWriteMask>&)
+ : fFrontFlags{FtAttrs::Flags(false), FtAttrs::Flags(true)}
+ , fFront{FtRef, FtTest, FtAttrs::EffectiveTestMask(FtTestMask), FtPassOp, FtFailOp,
+ FtAttrs::EffectiveWriteMask(FtWriteMask)}
+ , fBackFlags{BkAttrs::Flags(false), BkAttrs::Flags(true)}
+ , fBack{BkRef, BkTest, BkAttrs::EffectiveTestMask(BkTestMask), BkPassOp, BkFailOp,
+ BkAttrs::EffectiveWriteMask(BkWriteMask)} {}
+
+ // This struct can only be constructed with static initializers.
+ GrUserStencilSettings() = delete;
+ GrUserStencilSettings(const GrUserStencilSettings&) = delete;
+
+ uint16_t flags(bool hasStencilClip) const {
+ return fFrontFlags[hasStencilClip] & fBackFlags[hasStencilClip];
+ }
+ bool isDisabled(bool hasStencilClip) const {
+ return this->flags(hasStencilClip) & kDisabled_StencilFlag;
+ }
+ bool testAlwaysPasses(bool hasStencilClip) const {
+ return this->flags(hasStencilClip) & kTestAlwaysPasses_StencilFlag;
+ }
+ bool isTwoSided(bool hasStencilClip) const {
+ return !(this->flags(hasStencilClip) & kSingleSided_StencilFlag);
+ }
+ bool usesWrapOp(bool hasStencilClip) const {
+ return !(this->flags(hasStencilClip) & kNoWrapOps_StencilFlag);
+ }
+
+ const uint16_t fFrontFlags[2]; // frontFlagsForDraw = fFrontFlags[hasStencilClip].
+ const Face fFront;
+ const uint16_t fBackFlags[2]; // backFlagsForDraw = fBackFlags[hasStencilClip].
+ const Face fBack;
+
+ static const GrUserStencilSettings& kUnused;
+
+ bool isUnused() const { return this == &kUnused; }
+};
+
+template<GrUserStencilTest Test, GrUserStencilOp PassOp, GrUserStencilOp FailOp>
+struct GrUserStencilSettings::Attrs {
+ // Ensure an op that only modifies user bits isn't paired with one that modifies clip bits.
+ GR_STATIC_ASSERT(GrUserStencilOp::kKeep == PassOp || GrUserStencilOp::kKeep == FailOp ||
+ (PassOp <= kLastUserOnlyStencilOp) == (FailOp <= kLastUserOnlyStencilOp));
+ // Ensure an op that only modifies clip bits isn't paired with one that modifies clip and user.
+ GR_STATIC_ASSERT(GrUserStencilOp::kKeep == PassOp || GrUserStencilOp::kKeep == FailOp ||
+ (PassOp <= kLastClipOnlyStencilOp) == (FailOp <= kLastClipOnlyStencilOp));
+
+ constexpr static bool TestAlwaysPasses(bool hasStencilClip) {
+ return (!hasStencilClip && GrUserStencilTest::kAlwaysIfInClip == Test) ||
+ GrUserStencilTest::kAlways == Test;
+ }
+ constexpr static bool DoesNotModifyStencil(bool hasStencilClip) {
+ return (GrUserStencilTest::kNever == Test || GrUserStencilOp::kKeep == PassOp) &&
+ (TestAlwaysPasses(hasStencilClip) || GrUserStencilOp::kKeep == FailOp);
+ }
+ constexpr static bool IsDisabled(bool hasStencilClip) {
+ return TestAlwaysPasses(hasStencilClip) && DoesNotModifyStencil(hasStencilClip);
+ }
+ constexpr static bool UsesWrapOps() {
+ return GrUserStencilOp::kIncWrap == PassOp || GrUserStencilOp::kDecWrap == PassOp ||
+ GrUserStencilOp::kIncWrap == FailOp || GrUserStencilOp::kDecWrap == FailOp;
+ }
+ constexpr static bool TestIgnoresRef() {
+ return (GrUserStencilTest::kAlwaysIfInClip == Test || GrUserStencilTest::kAlways == Test ||
+ GrUserStencilTest::kNever == Test);
+ }
+ constexpr static uint16_t Flags(bool hasStencilClip) {
+ return (IsDisabled(hasStencilClip) ? kDisabled_StencilFlag : 0) |
+ (TestAlwaysPasses(hasStencilClip) ? kTestAlwaysPasses_StencilFlag : 0) |
+ (DoesNotModifyStencil(hasStencilClip) ? kNoModifyStencil_StencilFlag : 0) |
+ (UsesWrapOps() ? 0 : kNoWrapOps_StencilFlag);
+ }
+ constexpr static uint16_t EffectiveTestMask(uint16_t testMask) {
+ return TestIgnoresRef() ? 0 : testMask;
+ }
+ constexpr static uint16_t EffectiveWriteMask(uint16_t writeMask) {
+ // We don't modify the mask differently when hasStencilClip=false because either the entire
+ // face gets disabled in that case (e.g. Test=kAlwaysIfInClip, PassOp=kKeep), or else the
+ // effective mask stays the same either way.
+ return DoesNotModifyStencil(true) ? 0 : writeMask;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrUtil.h b/gfx/skia/skia/src/gpu/GrUtil.h
new file mode 100644
index 0000000000..5412f0b292
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrUtil.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrUtil_DEFINED
+#define GrUtil_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum GrIntelGpuFamily {
+ kUnknown_IntelGpuFamily,
+
+ // 6th gen
+ kSandyBridge_IntelGpuFamily,
+
+ // 7th gen
+ kIvyBridge_IntelGpuFamily,
+ kValleyView_IntelGpuFamily, // aka BayTrail
+ kHaswell_IntelGpuFamily,
+
+ // 8th gen
+ kCherryView_IntelGpuFamily, // aka Braswell
+ kBroadwell_IntelGpuFamily,
+
+ // 9th gen
+ kApolloLake_IntelGpuFamily,
+ kSkyLake_IntelGpuFamily,
+ kGeminiLake_IntelGpuFamily,
+ kKabyLake_IntelGpuFamily,
+ kCoffeeLake_IntelGpuFamily,
+
+ // 11th gen
+ kIceLake_IntelGpuFamily,
+};
+
+GrIntelGpuFamily GrGetIntelGpuFamily(uint32_t deviceID) {
+ // https://en.wikipedia.org/wiki/List_of_Intel_graphics_processing_units
+ uint32_t maskedID = deviceID & 0xFF00;
+ switch (maskedID) {
+ case 0x0100:
+ switch (deviceID & 0xFFF0) {
+ case 0x0100:
+ case 0x0110:
+ case 0x0120:
+ return kSandyBridge_IntelGpuFamily;
+ case 0x0150:
+ if (deviceID == 0x0155 || deviceID == 0x0157) {
+ return kValleyView_IntelGpuFamily;
+ }
+ if (deviceID == 0x0152 || deviceID == 0x015A) {
+ return kIvyBridge_IntelGpuFamily;
+ }
+ break;
+ case 0x0160:
+ return kIvyBridge_IntelGpuFamily;
+ default:
+ break;
+ }
+ break;
+ case 0x0F00:
+ return kValleyView_IntelGpuFamily;
+ case 0x0400:
+ case 0x0A00:
+ case 0x0D00:
+ return kHaswell_IntelGpuFamily;
+ case 0x2200:
+ return kCherryView_IntelGpuFamily;
+ case 0x1600:
+ return kBroadwell_IntelGpuFamily;
+ case 0x5A00:
+ return kApolloLake_IntelGpuFamily;
+ case 0x1900:
+ return kSkyLake_IntelGpuFamily;
+ case 0x3100:
+ return kGeminiLake_IntelGpuFamily;
+ case 0x5900:
+ return kKabyLake_IntelGpuFamily;
+ case 0x3E00:
+ return kCoffeeLake_IntelGpuFamily;
+ case 0x8A00:
+ return kIceLake_IntelGpuFamily;
+ default:
+ break;
+ }
+ return kUnknown_IntelGpuFamily;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrVertexWriter.h b/gfx/skia/skia/src/gpu/GrVertexWriter.h
new file mode 100644
index 0000000000..7b1ddc1d6b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrVertexWriter.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVertexWriter_DEFINED
+#define GrVertexWriter_DEFINED
+
+#include "include/private/SkTemplates.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include <type_traits>
+
+/**
+ * Helper for writing vertex data to a buffer. Usage:
+ * GrVertexWriter vertices{target->makeVertexSpace(...)};
+ * vertices.write(A0, B0, C0, ...);
+ * vertices.write(A1, B1, C1, ...);
+ *
+ * Supports any number of arguments. Each argument must be POD (plain old data), or an array
+ * thereof.
+ */
+struct GrVertexWriter {
+ void* fPtr;
+
+ template <typename T>
+ class Conditional {
+ public:
+ explicit Conditional(bool condition, const T& value)
+ : fCondition(condition), fValue(value) {}
+ private:
+ friend struct GrVertexWriter;
+
+ bool fCondition;
+ T fValue;
+ };
+
+ template <typename T>
+ static Conditional<T> If(bool condition, const T& value) {
+ return Conditional<T>(condition, value);
+ }
+
+ template <typename T>
+ struct Skip {};
+
+ template <typename T, typename... Args>
+ void write(const T& val, const Args&... remainder) {
+ static_assert(std::is_pod<T>::value, "");
+ // This assert is barely related to what we're trying to check - that our vertex data
+ // matches our attribute layouts, where each attribute is aligned to four bytes. If this
+ // becomes a problem, just remove it.
+ static_assert(alignof(T) <= 4, "");
+ memcpy(fPtr, &val, sizeof(T));
+ fPtr = SkTAddOffset<void>(fPtr, sizeof(T));
+ this->write(remainder...);
+ }
+
+ template <typename T, size_t N, typename... Args>
+ void write(const T(&val)[N], const Args&... remainder) {
+ static_assert(std::is_pod<T>::value, "");
+ static_assert(alignof(T) <= 4, "");
+ memcpy(fPtr, val, N * sizeof(T));
+ fPtr = SkTAddOffset<void>(fPtr, N * sizeof(T));
+ this->write(remainder...);
+ }
+
+ template <typename... Args>
+ void write(const GrVertexColor& color, const Args&... remainder) {
+ this->write(color.fColor[0]);
+ if (color.fWideColor) {
+ this->write(color.fColor[1]);
+ }
+ this->write(remainder...);
+ }
+
+ template <typename T, typename... Args>
+ void write(const Conditional<T>& val, const Args&... remainder) {
+ if (val.fCondition) {
+ this->write(val.fValue);
+ }
+ this->write(remainder...);
+ }
+
+ template <typename T, typename... Args>
+ void write(const Skip<T>& val, const Args&... remainder) {
+ fPtr = SkTAddOffset<void>(fPtr, sizeof(T));
+ this->write(remainder...);
+ }
+
+ template <typename... Args>
+ void write(const Sk4f& vector, const Args&... remainder) {
+ float buffer[4];
+ vector.store(buffer);
+ this->write<float, 4>(buffer);
+ this->write(remainder...);
+ }
+
+ void write() {}
+
+ /**
+ * Specialized utility for writing a four-vertices, with some data being replicated at each
+ * vertex, and other data being the appropriate 2-components from an SkRect to construct a
+ * triangle strip.
+ *
+ * writeQuad(A, B, C, ...) is similar to write(A, B, C, ...), except that:
+ *
+ * - Four sets of data will be written
+ * - For any arguments of type TriStrip, a unique SkPoint will be written at each vertex,
+ * in this order: left-top, left-bottom, right-top, right-bottom.
+ */
+ template <typename T>
+ struct TriStrip { T l, t, r, b; };
+
+ static TriStrip<float> TriStripFromRect(const SkRect& r) {
+ return { r.fLeft, r.fTop, r.fRight, r.fBottom };
+ }
+
+ template <typename T>
+ struct TriFan { T l, t, r, b; };
+
+ static TriFan<float> TriFanFromRect(const SkRect& r) {
+ return { r.fLeft, r.fTop, r.fRight, r.fBottom };
+ }
+
+ template <typename... Args>
+ void writeQuad(const Args&... remainder) {
+ this->writeQuadVert<0>(remainder...);
+ this->writeQuadVert<1>(remainder...);
+ this->writeQuadVert<2>(remainder...);
+ this->writeQuadVert<3>(remainder...);
+ }
+
+private:
+ template <int corner, typename T, typename... Args>
+ void writeQuadVert(const T& val, const Args&... remainder) {
+ this->writeQuadValue<corner>(val);
+ this->writeQuadVert<corner>(remainder...);
+ }
+
+ template <int corner>
+ void writeQuadVert() {}
+
+ template <int corner, typename T>
+ void writeQuadValue(const T& val) {
+ this->write(val);
+ }
+
+ template <int corner, typename T>
+ void writeQuadValue(const TriStrip<T>& r) {
+ switch (corner) {
+ case 0: this->write(r.l, r.t); break;
+ case 1: this->write(r.l, r.b); break;
+ case 2: this->write(r.r, r.t); break;
+ case 3: this->write(r.r, r.b); break;
+ }
+ }
+
+ template <int corner, typename T>
+ void writeQuadValue(const TriFan<T>& r) {
+ switch (corner) {
+ case 0: this->write(r.l, r.t); break;
+ case 1: this->write(r.l, r.b); break;
+ case 2: this->write(r.r, r.b); break;
+ case 3: this->write(r.r, r.t); break;
+ }
+ }
+
+ template <int corner>
+ void writeQuadValue(const GrQuad& q) {
+ this->write(q.point(corner));
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrWaitRenderTask.cpp b/gfx/skia/skia/src/gpu/GrWaitRenderTask.cpp
new file mode 100644
index 0000000000..a09a9a617e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWaitRenderTask.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrWaitRenderTask.h"
+
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceAllocator.h"
+
+void GrWaitRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+ // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // we manipulate fTarget.
+ alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
+ alloc->incOps();
+}
+
+bool GrWaitRenderTask::onExecute(GrOpFlushState* flushState) {
+ for (int i = 0; i < fNumSemaphores; ++i) {
+ flushState->gpu()->waitSemaphore(fSemaphores[i]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrWaitRenderTask.h b/gfx/skia/skia/src/gpu/GrWaitRenderTask.h
new file mode 100644
index 0000000000..fc736e1c9b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWaitRenderTask.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrWaitRenderTask_DEFINED
+#define GrWaitRenderTask_DEFINED
+
+#include "src/gpu/GrRenderTask.h"
+#include "src/gpu/GrSemaphore.h"
+
+class GrWaitRenderTask final : public GrRenderTask {
+public:
+ GrWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, std::unique_ptr<sk_sp<GrSemaphore>[]> semaphores,
+ int numSemaphores)
+ : GrRenderTask(std::move(proxy))
+ , fSemaphores(std::move(semaphores))
+ , fNumSemaphores(numSemaphores){}
+
+private:
+ bool onIsUsed(GrSurfaceProxy* proxy) const override {
+ SkASSERT(proxy != fTarget.get()); // This case should be handled by GrRenderTask.
+ return false;
+ }
+ void handleInternalAllocationFailure() override {}
+ void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect*) override {
+ return ExpectedOutcome::kTargetUnchanged;
+ }
+
+ bool onExecute(GrOpFlushState*) override;
+
+#ifdef SK_DEBUG
+ // No non-dst proxies.
+ void visitProxies_debugOnly(const VisitSurfaceProxyFunc& fn) const override {}
+#endif
+ std::unique_ptr<sk_sp<GrSemaphore>[]> fSemaphores;
+ int fNumSemaphores;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrWindowRectangles.h b/gfx/skia/skia/src/gpu/GrWindowRectangles.h
new file mode 100644
index 0000000000..1b6bc4dfee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWindowRectangles.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrWindowRectangles_DEFINED
+#define GrWindowRectangles_DEFINED
+
+#include "include/core/SkRect.h"
+#include "src/gpu/GrNonAtomicRef.h"
+
+class GrWindowRectangles {
+public:
+ constexpr static int kMaxWindows = 8;
+
+ GrWindowRectangles() : fCount(0) {}
+ GrWindowRectangles(const GrWindowRectangles& that) : fCount(0) { *this = that; }
+ ~GrWindowRectangles() { SkSafeUnref(this->rec()); }
+
+ GrWindowRectangles makeOffset(int dx, int dy) const;
+
+ bool empty() const { return !fCount; }
+ int count() const { return fCount; }
+ const SkIRect* data() const;
+
+ void reset();
+ GrWindowRectangles& operator=(const GrWindowRectangles&);
+
+ SkIRect& addWindow(const SkIRect& window) { return this->addWindow() = window; }
+ SkIRect& addWindow();
+
+ bool operator!=(const GrWindowRectangles& that) const { return !(*this == that); }
+ bool operator==(const GrWindowRectangles&) const;
+
+private:
+ constexpr static int kNumLocalWindows = 1;
+ struct Rec;
+
+ const Rec* rec() const { return fCount <= kNumLocalWindows ? nullptr : fRec; }
+
+ int fCount;
+ union {
+ SkIRect fLocalWindows[kNumLocalWindows]; // If fCount <= kNumLocalWindows.
+ Rec* fRec; // If fCount > kNumLocalWindows.
+ };
+};
+
+struct GrWindowRectangles::Rec : public GrNonAtomicRef<Rec> {
+ Rec(const SkIRect* windows, int numWindows) {
+ SkASSERT(numWindows < kMaxWindows);
+ memcpy(fData, windows, sizeof(SkIRect) * numWindows);
+ }
+ Rec() = default;
+
+ SkIRect fData[kMaxWindows];
+};
+
+inline const SkIRect* GrWindowRectangles::data() const {
+ return fCount <= kNumLocalWindows ? fLocalWindows : fRec->fData;
+}
+
+inline void GrWindowRectangles::reset() {
+ SkSafeUnref(this->rec());
+ fCount = 0;
+}
+
+inline GrWindowRectangles& GrWindowRectangles::operator=(const GrWindowRectangles& that) {
+ SkSafeUnref(this->rec());
+ fCount = that.fCount;
+ if (fCount <= kNumLocalWindows) {
+ memcpy(fLocalWindows, that.fLocalWindows, fCount * sizeof(SkIRect));
+ } else {
+ fRec = SkRef(that.fRec);
+ }
+ return *this;
+}
+
+inline GrWindowRectangles GrWindowRectangles::makeOffset(int dx, int dy) const {
+ if (!dx && !dy) {
+ return *this;
+ }
+ GrWindowRectangles result;
+ result.fCount = fCount;
+ SkIRect* windows;
+ if (result.fCount > kNumLocalWindows) {
+ result.fRec = new Rec();
+ windows = result.fRec->fData;
+ } else {
+ windows = result.fLocalWindows;
+ }
+ for (int i = 0; i < fCount; ++i) {
+ windows[i] = this->data()[i].makeOffset(dx, dy);
+ }
+ return result;
+}
+
+inline SkIRect& GrWindowRectangles::addWindow() {
+ SkASSERT(fCount < kMaxWindows);
+ if (fCount < kNumLocalWindows) {
+ return fLocalWindows[fCount++];
+ }
+ if (fCount == kNumLocalWindows) {
+ fRec = new Rec(fLocalWindows, kNumLocalWindows);
+ } else if (!fRec->unique()) { // Simple copy-on-write.
+ fRec->unref();
+ fRec = new Rec(fRec->fData, fCount);
+ }
+ return fRec->fData[fCount++];
+}
+
+inline bool GrWindowRectangles::operator==(const GrWindowRectangles& that) const {
+ if (fCount != that.fCount) {
+ return false;
+ }
+ if (fCount > kNumLocalWindows && fRec == that.fRec) {
+ return true;
+ }
+ return !fCount || !memcmp(this->data(), that.data(), sizeof(SkIRect) * fCount);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrWindowRectsState.h b/gfx/skia/skia/src/gpu/GrWindowRectsState.h
new file mode 100644
index 0000000000..59ff938bfd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWindowRectsState.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrWindowRectsState_DEFINED
+#define GrWindowRectsState_DEFINED
+
+#include "src/gpu/GrWindowRectangles.h"
+
+class GrWindowRectsState {
+public:
+ enum class Mode : bool {
+ kExclusive,
+ kInclusive
+ };
+
+ GrWindowRectsState() : fMode(Mode::kExclusive) {}
+ GrWindowRectsState(const GrWindowRectangles& windows, Mode mode)
+ : fMode(mode)
+ , fWindows(windows) {
+ }
+
+ bool enabled() const { return Mode::kInclusive == fMode || !fWindows.empty(); }
+ Mode mode() const { return fMode; }
+ const GrWindowRectangles& windows() const { return fWindows; }
+ int numWindows() const { return fWindows.count(); }
+
+ void setDisabled() {
+ fMode = Mode::kExclusive;
+ fWindows.reset();
+ }
+
+ void set(const GrWindowRectangles& windows, Mode mode) {
+ fMode = mode;
+ fWindows = windows;
+ }
+
+ bool operator==(const GrWindowRectsState& that) const {
+ if (fMode != that.fMode) {
+ return false;
+ }
+ return fWindows == that.fWindows;
+ }
+ bool operator!=(const GrWindowRectsState& that) const { return !(*this == that); }
+
+private:
+ Mode fMode;
+ GrWindowRectangles fWindows;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrXferProcessor.cpp b/gfx/skia/skia/src/gpu/GrXferProcessor.cpp
new file mode 100644
index 0000000000..14d0355a06
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrXferProcessor.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrXferProcessor.h"
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrPipeline.h"
+
+GrXferProcessor::GrXferProcessor(ClassID classID)
+ : INHERITED(classID)
+ , fWillReadDstColor(false)
+ , fDstReadUsesMixedSamples(false)
+ , fIsLCD(false) {}
+
+GrXferProcessor::GrXferProcessor(ClassID classID, bool willReadDstColor, bool hasMixedSamples,
+ GrProcessorAnalysisCoverage coverage)
+ : INHERITED(classID)
+ , fWillReadDstColor(willReadDstColor)
+ , fDstReadUsesMixedSamples(willReadDstColor && hasMixedSamples)
+ , fIsLCD(GrProcessorAnalysisCoverage::kLCD == coverage) {}
+
+bool GrXferProcessor::hasSecondaryOutput() const {
+ if (!this->willReadDstColor()) {
+ return this->onHasSecondaryOutput();
+ }
+ return this->dstReadUsesMixedSamples();
+}
+
+void GrXferProcessor::getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b,
+ const GrSurfaceOrigin* originIfDstTexture) const {
+ uint32_t key = this->willReadDstColor() ? 0x1 : 0x0;
+ if (key) {
+ if (originIfDstTexture) {
+ key |= 0x2;
+ if (kTopLeft_GrSurfaceOrigin == *originIfDstTexture) {
+ key |= 0x4;
+ }
+ }
+ if (this->dstReadUsesMixedSamples()) {
+ key |= 0x8;
+ }
+ }
+ if (fIsLCD) {
+ key |= 0x10;
+ }
+ b->add32(key);
+ this->onGetGLSLProcessorKey(caps, b);
+}
+
+#ifdef SK_DEBUG
+static const char* equation_string(GrBlendEquation eq) {
+ switch (eq) {
+ case kAdd_GrBlendEquation:
+ return "add";
+ case kSubtract_GrBlendEquation:
+ return "subtract";
+ case kReverseSubtract_GrBlendEquation:
+ return "reverse_subtract";
+ case kScreen_GrBlendEquation:
+ return "screen";
+ case kOverlay_GrBlendEquation:
+ return "overlay";
+ case kDarken_GrBlendEquation:
+ return "darken";
+ case kLighten_GrBlendEquation:
+ return "lighten";
+ case kColorDodge_GrBlendEquation:
+ return "color_dodge";
+ case kColorBurn_GrBlendEquation:
+ return "color_burn";
+ case kHardLight_GrBlendEquation:
+ return "hard_light";
+ case kSoftLight_GrBlendEquation:
+ return "soft_light";
+ case kDifference_GrBlendEquation:
+ return "difference";
+ case kExclusion_GrBlendEquation:
+ return "exclusion";
+ case kMultiply_GrBlendEquation:
+ return "multiply";
+ case kHSLHue_GrBlendEquation:
+ return "hsl_hue";
+ case kHSLSaturation_GrBlendEquation:
+ return "hsl_saturation";
+ case kHSLColor_GrBlendEquation:
+ return "hsl_color";
+ case kHSLLuminosity_GrBlendEquation:
+ return "hsl_luminosity";
+ case kIllegal_GrBlendEquation:
+ SkASSERT(false);
+ return "<illegal>";
+ }
+ return "";
+}
+
+static const char* coeff_string(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kZero_GrBlendCoeff:
+ return "zero";
+ case kOne_GrBlendCoeff:
+ return "one";
+ case kSC_GrBlendCoeff:
+ return "src_color";
+ case kISC_GrBlendCoeff:
+ return "inv_src_color";
+ case kDC_GrBlendCoeff:
+ return "dst_color";
+ case kIDC_GrBlendCoeff:
+ return "inv_dst_color";
+ case kSA_GrBlendCoeff:
+ return "src_alpha";
+ case kISA_GrBlendCoeff:
+ return "inv_src_alpha";
+ case kDA_GrBlendCoeff:
+ return "dst_alpha";
+ case kIDA_GrBlendCoeff:
+ return "inv_dst_alpha";
+ case kConstC_GrBlendCoeff:
+ return "const_color";
+ case kIConstC_GrBlendCoeff:
+ return "inv_const_color";
+ case kConstA_GrBlendCoeff:
+ return "const_alpha";
+ case kIConstA_GrBlendCoeff:
+ return "inv_const_alpha";
+ case kS2C_GrBlendCoeff:
+ return "src2_color";
+ case kIS2C_GrBlendCoeff:
+ return "inv_src2_color";
+ case kS2A_GrBlendCoeff:
+ return "src2_alpha";
+ case kIS2A_GrBlendCoeff:
+ return "inv_src2_alpha";
+ case kIllegal_GrBlendCoeff:
+ SkASSERT(false);
+ return "<illegal>";
+ }
+ return "";
+}
+
+SkString GrXferProcessor::BlendInfo::dump() const {
+ SkString out;
+ out.printf("write_color(%d) equation(%s) src_coeff(%s) dst_coeff:(%s) const(0x%08x)",
+ fWriteColor, equation_string(fEquation), coeff_string(fSrcBlend),
+ coeff_string(fDstBlend), fBlendConstant.toBytes_RGBA());
+ return out;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrXPFactory::AnalysisProperties GrXPFactory::GetAnalysisProperties(
+ const GrXPFactory* factory,
+ const GrProcessorAnalysisColor& color,
+ const GrProcessorAnalysisCoverage& coverage,
+ const GrCaps& caps,
+ GrClampType clampType) {
+ AnalysisProperties result;
+ if (factory) {
+ result = factory->analysisProperties(color, coverage, caps, clampType);
+ } else {
+ result = GrPorterDuffXPFactory::SrcOverAnalysisProperties(color, coverage, caps,
+ clampType);
+ }
+ SkASSERT(!(result & AnalysisProperties::kRequiresDstTexture));
+ if ((result & AnalysisProperties::kReadsDstInShader) &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ result |= AnalysisProperties::kRequiresDstTexture |
+ AnalysisProperties::kRequiresNonOverlappingDraws;
+ }
+ return result;
+}
+
+sk_sp<const GrXferProcessor> GrXPFactory::MakeXferProcessor(const GrXPFactory* factory,
+ const GrProcessorAnalysisColor& color,
+ GrProcessorAnalysisCoverage coverage,
+ bool hasMixedSamples,
+ const GrCaps& caps,
+ GrClampType clampType) {
+ SkASSERT(!hasMixedSamples || caps.shaderCaps()->dualSourceBlendingSupport());
+ if (factory) {
+ return factory->makeXferProcessor(color, coverage, hasMixedSamples, caps, clampType);
+ } else {
+ return GrPorterDuffXPFactory::MakeSrcOverXferProcessor(color, coverage, hasMixedSamples,
+ caps);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrXferProcessor.h b/gfx/skia/skia/src/gpu/GrXferProcessor.h
new file mode 100644
index 0000000000..18525e7c05
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrXferProcessor.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrXferProcessor_DEFINED
+#define GrXferProcessor_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrProcessorAnalysis.h"
+
+class GrGLSLXferProcessor;
+class GrProcessorSet;
+class GrShaderCaps;
+
+/**
+ * Barriers for blending. When a shader reads the dst directly, an Xfer barrier is sometimes
+ * required after a pixel has been written, before it can be safely read again.
+ */
+enum GrXferBarrierType {
+ kNone_GrXferBarrierType = 0, //<! No barrier is required
+ kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
+ kBlend_GrXferBarrierType, //<! Required by certain blend extensions.
+};
+/** Should be able to treat kNone as false in boolean expressions */
+GR_STATIC_ASSERT(SkToBool(kNone_GrXferBarrierType) == false);
+
+/**
+ * GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst
+ * color, and for applying any coverage. It does this by emitting fragment shader code and
+ * controlling the fixed-function blend state. When dual-source blending is available, it may also
+ * write a seconday fragment shader output color. GrXferProcessor has two modes of operation:
+ *
+ * Dst read: When allowed by the backend API, or when supplied a texture of the destination, the
+ * GrXferProcessor may read the destination color. While operating in this mode, the subclass only
+ * provides shader code that blends the src and dst colors, and the base class applies coverage.
+ *
+ * No dst read: When not performing a dst read, the subclass is given full control of the fixed-
+ * function blend state and/or secondary output, and is responsible to apply coverage on its own.
+ *
+ * A GrXferProcessor is never installed directly into our draw state, but instead is created from a
+ * GrXPFactory once we have finalized the state of our draw.
+ */
+class GrXferProcessor : public GrProcessor, public GrNonAtomicRef<GrXferProcessor> {
+public:
+ /**
+ * A texture that contains the dst pixel values and an integer coord offset from device space
+ * to the space of the texture. Depending on GPU capabilities a DstTexture may be used by a
+ * GrXferProcessor for blending in the fragment shader.
+ */
+ class DstProxy {
+ public:
+ DstProxy() { fOffset.set(0, 0); }
+
+ DstProxy(const DstProxy& other) {
+ *this = other;
+ }
+
+ DstProxy(sk_sp<GrTextureProxy> proxy, const SkIPoint& offset)
+ : fProxy(std::move(proxy)) {
+ if (fProxy) {
+ fOffset = offset;
+ } else {
+ fOffset.set(0, 0);
+ }
+ }
+
+ DstProxy& operator=(const DstProxy& other) {
+ fProxy = other.fProxy;
+ fOffset = other.fOffset;
+ return *this;
+ }
+
+ bool operator==(const DstProxy& that) const {
+ return fProxy == that.fProxy && fOffset == that.fOffset;
+ }
+ bool operator!=(const DstProxy& that) const { return !(*this == that); }
+
+ const SkIPoint& offset() const { return fOffset; }
+
+ void setOffset(const SkIPoint& offset) { fOffset = offset; }
+ void setOffset(int ox, int oy) { fOffset.set(ox, oy); }
+
+ GrTextureProxy* proxy() const { return fProxy.get(); }
+ sk_sp<GrTextureProxy> refProxy() const { return fProxy; }
+
+ void setProxy(sk_sp<GrTextureProxy> proxy) {
+ fProxy = std::move(proxy);
+ if (!fProxy) {
+ fOffset = {0, 0};
+ }
+ }
+
+ private:
+ sk_sp<GrTextureProxy> fProxy;
+ SkIPoint fOffset;
+ };
+
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder calls onGetGLSLProcessorKey(...) to get the
+ * specific subclass's key.
+ */
+ void getGLSLProcessorKey(const GrShaderCaps&,
+ GrProcessorKeyBuilder*,
+ const GrSurfaceOrigin* originIfDstTexture) const;
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrXferProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLXferProcessor* createGLSLInstance() const = 0;
+
+ /**
+ * Returns the barrier type, if any, that this XP will require. Note that the possibility
+ * that a kTexture type barrier is required is handled by the GrPipeline and need not be
+ * considered by subclass overrides of this function.
+ */
+ virtual GrXferBarrierType xferBarrierType(const GrCaps& caps) const {
+ return kNone_GrXferBarrierType;
+ }
+
+ struct BlendInfo {
+ SkDEBUGCODE(SkString dump() const;)
+
+ GrBlendEquation fEquation = kAdd_GrBlendEquation;
+ GrBlendCoeff fSrcBlend = kOne_GrBlendCoeff;
+ GrBlendCoeff fDstBlend = kZero_GrBlendCoeff;
+ SkPMColor4f fBlendConstant = SK_PMColor4fTRANSPARENT;
+ bool fWriteColor = true;
+ };
+
+ inline BlendInfo getBlendInfo() const {
+ BlendInfo blendInfo;
+ if (!this->willReadDstColor()) {
+ this->onGetBlendInfo(&blendInfo);
+ } else if (this->dstReadUsesMixedSamples()) {
+ blendInfo.fDstBlend = kIS2A_GrBlendCoeff;
+ }
+ return blendInfo;
+ }
+
+ bool willReadDstColor() const { return fWillReadDstColor; }
+
+ /**
+ * If we are performing a dst read, returns whether the base class will use mixed samples to
+ * antialias the shader's final output. If not doing a dst read, the subclass is responsible
+ * for antialiasing and this returns false.
+ */
+ bool dstReadUsesMixedSamples() const { return fDstReadUsesMixedSamples; }
+
+ /**
+ * Returns whether or not this xferProcossor will set a secondary output to be used with dual
+ * source blending.
+ */
+ bool hasSecondaryOutput() const;
+
+ bool isLCD() const { return fIsLCD; }
+
+ /** Returns true if this and other processor conservatively draw identically. It can only return
+ true when the two processor are of the same subclass (i.e. they return the same object from
+ from getFactory()).
+
+ A return value of true from isEqual() should not be used to test whether the processor would
+ generate the same shader code. To test for identical code generation use getGLSLProcessorKey
+ */
+
+ bool isEqual(const GrXferProcessor& that) const {
+ if (this->classID() != that.classID()) {
+ return false;
+ }
+ if (this->fWillReadDstColor != that.fWillReadDstColor) {
+ return false;
+ }
+ if (this->fDstReadUsesMixedSamples != that.fDstReadUsesMixedSamples) {
+ return false;
+ }
+ if (fIsLCD != that.fIsLCD) {
+ return false;
+ }
+ return this->onIsEqual(that);
+ }
+
+protected:
+ GrXferProcessor(ClassID classID);
+ GrXferProcessor(ClassID classID, bool willReadDstColor, bool hasMixedSamples,
+ GrProcessorAnalysisCoverage);
+
+private:
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder that is directly associated with this xfer
+ * processor's GL backend implementation.
+ */
+ virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
+
+ /**
+ * If we are not performing a dst read, returns whether the subclass will set a secondary
+ * output. When using dst reads, the base class controls the secondary output and this method
+ * will not be called.
+ */
+ virtual bool onHasSecondaryOutput() const { return false; }
+
+ /**
+ * If we are not performing a dst read, retrieves the fixed-function blend state required by the
+ * subclass. When using dst reads, the base class controls the fixed-function blend state and
+ * this method will not be called. The BlendInfo struct comes initialized to "no blending".
+ */
+ virtual void onGetBlendInfo(BlendInfo*) const {}
+
+ virtual bool onIsEqual(const GrXferProcessor&) const = 0;
+
+ bool fWillReadDstColor;
+ bool fDstReadUsesMixedSamples;
+ bool fIsLCD;
+
+ typedef GrProcessor INHERITED;
+};
+
+/**
+ * We install a GrXPFactory (XPF) early on in the pipeline before all the final draw information is
+ * known (e.g. whether there is fractional pixel coverage, will coverage be 1 or 4 channel, is the
+ * draw opaque, etc.). Once the state of the draw is finalized, we use the XPF along with all the
+ * draw information to create a GrXferProcessor (XP) which can implement the desired blending for
+ * the draw.
+ *
+ * Before the XP is created, the XPF is able to answer queries about what functionality the XPs it
+ * creates will have. For example, can it create an XP that supports RGB coverage or will the XP
+ * blend with the destination color.
+ *
+ * GrXPFactories are intended to be static immutable objects. We pass them around as raw pointers
+ * and expect the pointers to always be valid and for the factories to be reusable and thread safe.
+ * Equality is tested for using pointer comparison. GrXPFactory destructors must be no-ops.
+ */
+
+// In order to construct GrXPFactory subclass instances as constexpr the subclass, and therefore
+// GrXPFactory, must be a literal type. One requirement is having a trivial destructor. This is ok
+// since these objects have no need for destructors. However, GCC and clang throw a warning when a
+// class has virtual functions and a non-virtual destructor. We suppress that warning here and
+// for the subclasses.
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+class GrXPFactory {
+public:
+ typedef GrXferProcessor::DstProxy DstProxy;
+
+ enum class AnalysisProperties : unsigned {
+ kNone = 0x0,
+ /**
+ * The fragment shader will require the destination color.
+ */
+ kReadsDstInShader = 0x1,
+ /**
+ * The op may apply coverage as alpha and still blend correctly.
+ */
+ kCompatibleWithCoverageAsAlpha = 0x2,
+ /**
+ * The color input to the GrXferProcessor will be ignored.
+ */
+ kIgnoresInputColor = 0x4,
+ /**
+ * The destination color will be provided to the fragment processor using a texture. This is
+ * additional information about the implementation of kReadsDstInShader.
+ */
+ kRequiresDstTexture = 0x10,
+ /**
+ * If set, each pixel can only be touched once during a draw (e.g., because we have a dst
+ * texture or because we need an xfer barrier).
+ */
+ kRequiresNonOverlappingDraws = 0x20,
+ };
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(AnalysisProperties);
+
+ static sk_sp<const GrXferProcessor> MakeXferProcessor(const GrXPFactory*,
+ const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps& caps,
+ GrClampType);
+
+ static AnalysisProperties GetAnalysisProperties(const GrXPFactory*,
+ const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType);
+
+protected:
+ constexpr GrXPFactory() {}
+
+private:
+ virtual sk_sp<const GrXferProcessor> makeXferProcessor(const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps&,
+ GrClampType) const = 0;
+
+ /**
+ * Subclass analysis implementation. This should not return kNeedsDstInTexture as that will be
+ * inferred by the base class based on kReadsDstInShader and the caps.
+ */
+ virtual AnalysisProperties analysisProperties(const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType) const = 0;
+};
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrXPFactory::AnalysisProperties);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrYUVProvider.cpp b/gfx/skia/skia/src/gpu/GrYUVProvider.cpp
new file mode 100644
index 0000000000..b61b8c38cc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrYUVProvider.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrYUVProvider.h"
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkYUVAIndex.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkYUVPlanesCache.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/effects/GrYUVtoRGBEffect.h"
+
+sk_sp<SkCachedData> GrYUVProvider::getPlanes(SkYUVASizeInfo* size,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace,
+ const void* constPlanes[SkYUVASizeInfo::kMaxCount]) {
+ sk_sp<SkCachedData> data;
+ SkYUVPlanesCache::Info yuvInfo;
+ data.reset(SkYUVPlanesCache::FindAndRef(this->onGetID(), &yuvInfo));
+
+ void* planes[SkYUVASizeInfo::kMaxCount];
+
+ if (data.get()) {
+ planes[0] = (void*)data->data(); // we should always have at least one plane
+
+ for (int i = 1; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ if (!yuvInfo.fSizeInfo.fWidthBytes[i]) {
+ SkASSERT(!yuvInfo.fSizeInfo.fWidthBytes[i] &&
+ !yuvInfo.fSizeInfo.fSizes[i].fHeight);
+ planes[i] = nullptr;
+ continue;
+ }
+
+ planes[i] = (uint8_t*)planes[i-1] + (yuvInfo.fSizeInfo.fWidthBytes[i-1] *
+ yuvInfo.fSizeInfo.fSizes[i-1].fHeight);
+ }
+ } else {
+ // Fetch yuv plane sizes for memory allocation.
+ if (!this->onQueryYUVA8(&yuvInfo.fSizeInfo, yuvInfo.fYUVAIndices, &yuvInfo.fColorSpace)) {
+ return nullptr;
+ }
+
+ // Allocate the memory for YUVA
+ size_t totalSize(0);
+ for (int i = 0; i < SkYUVASizeInfo::kMaxCount; i++) {
+ SkASSERT((yuvInfo.fSizeInfo.fWidthBytes[i] && yuvInfo.fSizeInfo.fSizes[i].fHeight) ||
+ (!yuvInfo.fSizeInfo.fWidthBytes[i] && !yuvInfo.fSizeInfo.fSizes[i].fHeight));
+
+ totalSize += yuvInfo.fSizeInfo.fWidthBytes[i] * yuvInfo.fSizeInfo.fSizes[i].fHeight;
+ }
+
+ data.reset(SkResourceCache::NewCachedData(totalSize));
+
+ planes[0] = data->writable_data();
+
+ for (int i = 1; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ if (!yuvInfo.fSizeInfo.fWidthBytes[i]) {
+ SkASSERT(!yuvInfo.fSizeInfo.fWidthBytes[i] &&
+ !yuvInfo.fSizeInfo.fSizes[i].fHeight);
+ planes[i] = nullptr;
+ continue;
+ }
+
+ planes[i] = (uint8_t*)planes[i-1] + (yuvInfo.fSizeInfo.fWidthBytes[i-1] *
+ yuvInfo.fSizeInfo.fSizes[i-1].fHeight);
+ }
+
+ // Get the YUV planes.
+ if (!this->onGetYUVA8Planes(yuvInfo.fSizeInfo, yuvInfo.fYUVAIndices, planes)) {
+ return nullptr;
+ }
+
+ // Decoding is done, cache the resulting YUV planes
+ SkYUVPlanesCache::Add(this->onGetID(), data.get(), &yuvInfo);
+ }
+
+ *size = yuvInfo.fSizeInfo;
+ memcpy(yuvaIndices, yuvInfo.fYUVAIndices, sizeof(yuvInfo.fYUVAIndices));
+ *colorSpace = yuvInfo.fColorSpace;
+ constPlanes[0] = planes[0];
+ constPlanes[1] = planes[1];
+ constPlanes[2] = planes[2];
+ constPlanes[3] = planes[3];
+ return data;
+}
+
+void GrYUVProvider::YUVGen_DataReleaseProc(const void*, void* data) {
+ SkCachedData* cachedData = static_cast<SkCachedData*>(data);
+ SkASSERT(cachedData);
+ cachedData->unref();
+}
+
+sk_sp<GrTextureProxy> GrYUVProvider::refAsTextureProxy(GrRecordingContext* ctx,
+ const GrSurfaceDesc& desc,
+ GrColorType colorType,
+ SkColorSpace* srcColorSpace,
+ SkColorSpace* dstColorSpace) {
+ SkYUVASizeInfo yuvSizeInfo;
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount];
+ SkYUVColorSpace yuvColorSpace;
+ const void* planes[SkYUVASizeInfo::kMaxCount];
+
+ sk_sp<SkCachedData> dataStorage = this->getPlanes(&yuvSizeInfo, yuvaIndices,
+ &yuvColorSpace, planes);
+ if (!dataStorage) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> yuvTextureProxies[SkYUVASizeInfo::kMaxCount];
+ for (int i = 0; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ if (yuvSizeInfo.fSizes[i].isEmpty()) {
+ SkASSERT(!yuvSizeInfo.fWidthBytes[i]);
+ continue;
+ }
+
+ int componentWidth = yuvSizeInfo.fSizes[i].fWidth;
+ int componentHeight = yuvSizeInfo.fSizes[i].fHeight;
+ // If the sizes of the components are not all the same we choose to create exact-match
+ // textures for the smaller ones rather than add a texture domain to the draw.
+ // TODO: revisit this decision to improve texture reuse?
+ SkBackingFit fit =
+ (componentWidth != yuvSizeInfo.fSizes[0].fWidth) ||
+ (componentHeight != yuvSizeInfo.fSizes[0].fHeight)
+ ? SkBackingFit::kExact : SkBackingFit::kApprox;
+
+ SkImageInfo imageInfo = SkImageInfo::MakeA8(componentWidth, componentHeight);
+ SkPixmap pixmap(imageInfo, planes[i], yuvSizeInfo.fWidthBytes[i]);
+ SkCachedData* dataStoragePtr = dataStorage.get();
+ // We grab a ref to cached yuv data. When the SkImage we create below goes away it will call
+ // the YUVGen_DataReleaseProc which will release this ref.
+ // DDL TODO: Currently we end up creating a lazy proxy that will hold onto a ref to the
+ // SkImage in its lambda. This means that we'll keep the ref on the YUV data around for the
+ // life time of the proxy and not just upload. For non-DDL draws we should look into
+ // releasing this SkImage after uploads (by deleting the lambda after instantiation).
+ dataStoragePtr->ref();
+ sk_sp<SkImage> yuvImage = SkImage::MakeFromRaster(pixmap, YUVGen_DataReleaseProc,
+ dataStoragePtr);
+
+ auto proxyProvider = ctx->priv().proxyProvider();
+ yuvTextureProxies[i] =
+ proxyProvider->createTextureProxy(yuvImage, 1, SkBudgeted::kYes, fit);
+
+ SkASSERT(yuvTextureProxies[i]->width() == yuvSizeInfo.fSizes[i].fWidth);
+ SkASSERT(yuvTextureProxies[i]->height() == yuvSizeInfo.fSizes[i].fHeight);
+ }
+
+ // TODO: investigate preallocating mip maps here
+ auto renderTargetContext = ctx->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kExact, desc.fWidth, desc.fHeight, colorType, nullptr, 1,
+ GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ auto yuvToRgbProcessor = GrYUVtoRGBEffect::Make(yuvTextureProxies, yuvaIndices, yuvColorSpace,
+ GrSamplerState::Filter::kNearest);
+ paint.addColorFragmentProcessor(std::move(yuvToRgbProcessor));
+
+ // If the caller expects the pixels in a different color space than the one from the image,
+ // apply a color conversion to do this.
+ std::unique_ptr<GrFragmentProcessor> colorConversionProcessor =
+ GrColorSpaceXformEffect::Make(srcColorSpace, kOpaque_SkAlphaType,
+ dstColorSpace, kOpaque_SkAlphaType);
+ if (colorConversionProcessor) {
+ paint.addColorFragmentProcessor(std::move(colorConversionProcessor));
+ }
+
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ const SkRect r = SkRect::MakeIWH(yuvSizeInfo.fSizes[0].fWidth,
+ yuvSizeInfo.fSizes[0].fHeight);
+
+ SkMatrix m = SkEncodedOriginToMatrix(yuvSizeInfo.fOrigin, r.width(), r.height());
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, m, r);
+
+ return renderTargetContext->asTextureProxyRef();
+}
diff --git a/gfx/skia/skia/src/gpu/GrYUVProvider.h b/gfx/skia/skia/src/gpu/GrYUVProvider.h
new file mode 100644
index 0000000000..330c7fbe64
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrYUVProvider.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVProvider_DEFINED
+#define GrYUVProvider_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkYUVAIndex.h"
+#include "include/core/SkYUVASizeInfo.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrBackendFormat;
+class GrRecordingContext;
+struct GrSurfaceDesc;
+class GrTexture;
+class GrTextureProxy;
+class SkCachedData;
+
+/**
+ * There are at least 2 different ways to extract/retrieve YUV planar data...
+ * - SkPixelRef
+ * - SkImageGenerator
+ *
+ * To share common functionality around using the planar data, we use this abstract base-class
+ * to represent accessing that data.
+ */
+class GrYUVProvider {
+public:
+ virtual ~GrYUVProvider() {}
+
+ /**
+ * On success, this returns a texture proxy that has converted the YUV data from the provider
+ * into a form that is supported by the GPU (typically transformed into RGB). The texture will
+ * automatically have a key added, so it can be retrieved from the cache (assuming it is
+ * requested by a provider w/ the same genID). If srcColorSpace and dstColorSpace are
+ * specified, then a color conversion from src to dst will be applied to the pixels.
+ *
+ * On failure (e.g. the provider had no data), this returns NULL.
+ */
+ sk_sp<GrTextureProxy> refAsTextureProxy(GrRecordingContext*,
+ const GrSurfaceDesc&,
+ GrColorType colorType,
+ SkColorSpace* srcColorSpace,
+ SkColorSpace* dstColorSpace);
+
+ sk_sp<SkCachedData> getPlanes(SkYUVASizeInfo*, SkYUVAIndex[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace*, const void* planes[SkYUVASizeInfo::kMaxCount]);
+
+private:
+ virtual uint32_t onGetID() const = 0;
+
+ // These are not meant to be called by a client, only by the implementation
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, V, and A planes.
+ * @param yuvaIndices How the YUVA planes are used/organized
+ * @param colorSpace Output parameter.
+ */
+ virtual bool onQueryYUVA8(SkYUVASizeInfo* sizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace) const = 0;
+
+ /**
+ * Returns true on success and false on failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call onQueryYUVA8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param yuvaIndices How the YUVA planes are used/organized
+ * @param planes Memory for each of the Y, U, V, and A planes.
+ */
+ virtual bool onGetYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
+ const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ void* planes[]) = 0;
+
+ // This is used as release callback for the YUV data that we capture in an SkImage when
+ // uploading to a gpu. When the upload is complete and we release the SkImage this callback will
+ // release the underlying data.
+ static void YUVGen_DataReleaseProc(const void*, void* data);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice.cpp b/gfx/skia/skia/src/gpu/SkGpuDevice.cpp
new file mode 100644
index 0000000000..77b9963ba5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice.cpp
@@ -0,0 +1,1671 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/SkGpuDevice.h"
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkVertices.h"
+#include "include/gpu/GrContext.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkShadowFlags.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkStroke.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkVertState.h"
+#include "src/gpu/GrBitmapTextureMaker.h"
+#include "src/gpu/GrBlurUtils.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrImageTextureMaker.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/text/GrTextTarget.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkReadPixelsRec.h"
+#include "src/image/SkSurface_Gpu.h"
+#include "src/utils/SkUTF.h"
+
+#define ASSERT_SINGLE_OWNER \
+SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fContext->priv().singleOwner());)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Checks that the alpha type is legal and gets constructor flags. Returns false if device creation
+ should fail. */
+bool SkGpuDevice::CheckAlphaTypeAndGetFlags(
+ const SkImageInfo* info, SkGpuDevice::InitContents init, unsigned* flags) {
+ *flags = 0;
+ if (info) {
+ switch (info->alphaType()) {
+ case kPremul_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ *flags |= SkGpuDevice::kIsOpaque_Flag;
+ break;
+ default: // If it is unpremul or unknown don't try to render
+ return false;
+ }
+ }
+ if (kClear_InitContents == init) {
+ *flags |= kNeedClear_Flag;
+ }
+ return true;
+}
+
+sk_sp<SkGpuDevice> SkGpuDevice::Make(GrContext* context,
+ std::unique_ptr<GrRenderTargetContext> renderTargetContext,
+ InitContents init) {
+ if (!renderTargetContext || context->priv().abandoned()) {
+ return nullptr;
+ }
+
+ SkColorType ct = GrColorTypeToSkColorType(renderTargetContext->colorInfo().colorType());
+
+ unsigned flags;
+ if (!context->colorTypeSupportedAsSurface(ct) ||
+ !CheckAlphaTypeAndGetFlags(nullptr, init, &flags)) {
+ return nullptr;
+ }
+ return sk_sp<SkGpuDevice>(new SkGpuDevice(context, std::move(renderTargetContext), flags));
+}
+
+sk_sp<SkGpuDevice> SkGpuDevice::Make(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& info, int sampleCount,
+ GrSurfaceOrigin origin, const SkSurfaceProps* props,
+ GrMipMapped mipMapped, InitContents init) {
+ unsigned flags;
+ if (!context->colorTypeSupportedAsSurface(info.colorType()) ||
+ !CheckAlphaTypeAndGetFlags(&info, init, &flags)) {
+ return nullptr;
+ }
+
+ auto renderTargetContext =
+ MakeRenderTargetContext(context, budgeted, info, sampleCount, origin, props, mipMapped);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ return sk_sp<SkGpuDevice>(new SkGpuDevice(context, std::move(renderTargetContext), flags));
+}
+
+static SkImageInfo make_info(GrRenderTargetContext* context, bool opaque) {
+ SkColorType colorType = GrColorTypeToSkColorType(context->colorInfo().colorType());
+ return SkImageInfo::Make(context->width(), context->height(), colorType,
+ opaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType,
+ context->colorInfo().refColorSpace());
+}
+
+SkGpuDevice::SkGpuDevice(GrContext* context,
+ std::unique_ptr<GrRenderTargetContext> renderTargetContext,
+ unsigned flags)
+ : INHERITED(make_info(renderTargetContext.get(), SkToBool(flags & kIsOpaque_Flag)),
+ renderTargetContext->surfaceProps())
+ , fContext(SkRef(context))
+ , fRenderTargetContext(std::move(renderTargetContext)) {
+ if (flags & kNeedClear_Flag) {
+ this->clearAll();
+ }
+}
+
+std::unique_ptr<GrRenderTargetContext> SkGpuDevice::MakeRenderTargetContext(
+ GrContext* context,
+ SkBudgeted budgeted,
+ const SkImageInfo& origInfo,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ GrMipMapped mipMapped) {
+ if (!context) {
+ return nullptr;
+ }
+
+ // This method is used to create SkGpuDevice's for SkSurface_Gpus. In this case
+ // they need to be exact.
+ return context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kExact, origInfo.width(), origInfo.height(),
+ SkColorTypeToGrColorType(origInfo.colorType()), origInfo.refColorSpace(), sampleCount,
+ mipMapped, origin, surfaceProps, budgeted);
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::filterTexture(SkSpecialImage* srcImg,
+ int left, int top,
+ SkIPoint* offset,
+ const SkImageFilter* filter) {
+ SkASSERT(srcImg->isTextureBacked());
+ SkASSERT(filter);
+
+ SkMatrix matrix = this->ctm();
+ matrix.postTranslate(SkIntToScalar(-left), SkIntToScalar(-top));
+ const SkIRect clipBounds = this->devClipBounds().makeOffset(-left, -top);
+ sk_sp<SkImageFilterCache> cache(this->getImageFilterCache());
+ SkColorType colorType = GrColorTypeToSkColorType(fRenderTargetContext->colorInfo().colorType());
+ if (colorType == kUnknown_SkColorType) {
+ colorType = kRGBA_8888_SkColorType;
+ }
+ SkImageFilter_Base::Context ctx(matrix, clipBounds, cache.get(), colorType,
+ fRenderTargetContext->colorInfo().colorSpace(), srcImg);
+
+ return as_IFB(filter)->filterImage(ctx).imageAndOffset(offset);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGpuDevice::onReadPixels(const SkPixmap& pm, int x, int y) {
+ ASSERT_SINGLE_OWNER
+
+ if (!SkImageInfoValidConversion(pm.info(), this->imageInfo())) {
+ return false;
+ }
+
+ return fRenderTargetContext->readPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), {x, y});
+}
+
+bool SkGpuDevice::onWritePixels(const SkPixmap& pm, int x, int y) {
+ ASSERT_SINGLE_OWNER
+
+ if (!SkImageInfoValidConversion(this->imageInfo(), pm.info())) {
+ return false;
+ }
+
+ return fRenderTargetContext->writePixels(pm.info(), pm.addr(), pm.rowBytes(), {x, y});
+}
+
+bool SkGpuDevice::onAccessPixels(SkPixmap* pmap) {
+ ASSERT_SINGLE_OWNER
+ return false;
+}
+
+GrRenderTargetContext* SkGpuDevice::accessRenderTargetContext() {
+ ASSERT_SINGLE_OWNER
+ return fRenderTargetContext.get();
+}
+
+void SkGpuDevice::clearAll() {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "clearAll", fContext.get());
+
+ SkIRect rect = SkIRect::MakeWH(this->width(), this->height());
+ fRenderTargetContext->clear(&rect, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+}
+
+void SkGpuDevice::replaceRenderTargetContext(std::unique_ptr<GrRenderTargetContext> rtc,
+ bool shouldRetainContent) {
+ SkASSERT(rtc->width() == this->width());
+ SkASSERT(rtc->height() == this->height());
+ SkASSERT(rtc->numSamples() == fRenderTargetContext->numSamples());
+ SkASSERT(rtc->asSurfaceProxy()->priv().isExact());
+ if (shouldRetainContent) {
+ if (this->context()->abandoned()) {
+ return;
+ }
+
+ SkASSERT(fRenderTargetContext->asTextureProxy());
+ SkAssertResult(rtc->blitTexture(fRenderTargetContext->asTextureProxy(),
+ fRenderTargetContext->colorInfo().colorType(),
+ SkIRect::MakeWH(this->width(), this->height()),
+ SkIPoint::Make(0,0)));
+ }
+
+ fRenderTargetContext = std::move(rtc);
+}
+
+void SkGpuDevice::replaceRenderTargetContext(bool shouldRetainContent) {
+ ASSERT_SINGLE_OWNER
+
+ SkBudgeted budgeted = fRenderTargetContext->priv().isBudgeted();
+
+ // This entry point is used by SkSurface_Gpu::onCopyOnWrite so it must create a
+ // kExact-backed render target context.
+ auto newRTC = MakeRenderTargetContext(this->context(),
+ budgeted,
+ this->imageInfo(),
+ fRenderTargetContext->numSamples(),
+ fRenderTargetContext->origin(),
+ &this->surfaceProps(),
+ fRenderTargetContext->mipMapped());
+ if (!newRTC) {
+ return;
+ }
+ this->replaceRenderTargetContext(std::move(newRTC), shouldRetainContent);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawPaint(const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPaint", fContext.get());
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawPaint(this->clip(), std::move(grPaint), this->ctm());
+}
+
+static inline GrPrimitiveType point_mode_to_primitive_type(SkCanvas::PointMode mode) {
+ switch (mode) {
+ case SkCanvas::kPoints_PointMode:
+ return GrPrimitiveType::kPoints;
+ case SkCanvas::kLines_PointMode:
+ return GrPrimitiveType::kLines;
+ case SkCanvas::kPolygon_PointMode:
+ return GrPrimitiveType::kLineStrip;
+ }
+ SK_ABORT("Unexpected mode");
+}
+
+void SkGpuDevice::drawPoints(SkCanvas::PointMode mode,
+ size_t count, const SkPoint pts[], const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPoints", fContext.get());
+ SkScalar width = paint.getStrokeWidth();
+ if (width < 0) {
+ return;
+ }
+
+ if (paint.getPathEffect() && 2 == count && SkCanvas::kLines_PointMode == mode) {
+ GrStyle style(paint, SkPaint::kStroke_Style);
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint,
+ this->ctm(), &grPaint)) {
+ return;
+ }
+ SkPath path;
+ path.setIsVolatile(true);
+ path.moveTo(pts[0]);
+ path.lineTo(pts[1]);
+ fRenderTargetContext->drawPath(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), path, style);
+ return;
+ }
+
+ SkScalar scales[2];
+ bool isHairline = (0 == width) || (1 == width && this->ctm().getMinMaxScales(scales) &&
+ SkScalarNearlyEqual(scales[0], 1.f) &&
+ SkScalarNearlyEqual(scales[1], 1.f));
+ // we only handle non-antialiased hairlines and paints without path effects or mask filters,
+ // else we let the SkDraw call our drawPath()
+ if (!isHairline || paint.getPathEffect() || paint.getMaskFilter() || paint.isAntiAlias()) {
+ SkRasterClip rc(this->devClipBounds());
+ SkDraw draw;
+ draw.fDst = SkPixmap(SkImageInfo::MakeUnknown(this->width(), this->height()), nullptr, 0);
+ draw.fMatrix = &this->ctm();
+ draw.fRC = &rc;
+ draw.drawPoints(mode, count, pts, paint, this);
+ return;
+ }
+
+ GrPrimitiveType primitiveType = point_mode_to_primitive_type(mode);
+
+ const SkMatrix* viewMatrix = &this->ctm();
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // This offsetting in device space matches the expectations of the Android framework for non-AA
+ // points and lines.
+ SkMatrix tempMatrix;
+ if (GrIsPrimTypeLines(primitiveType) || GrPrimitiveType::kPoints == primitiveType) {
+ tempMatrix = *viewMatrix;
+ static const SkScalar kOffset = 0.063f; // Just greater than 1/16.
+ tempMatrix.postTranslate(kOffset, kOffset);
+ viewMatrix = &tempMatrix;
+ }
+#endif
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, *viewMatrix,
+ &grPaint)) {
+ return;
+ }
+
+ static constexpr SkVertices::VertexMode kIgnoredMode = SkVertices::kTriangles_VertexMode;
+ sk_sp<SkVertices> vertices = SkVertices::MakeCopy(kIgnoredMode, SkToS32(count), pts, nullptr,
+ nullptr);
+
+ fRenderTargetContext->drawVertices(this->clip(), std::move(grPaint), *viewMatrix,
+ std::move(vertices), nullptr, 0, &primitiveType);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRect(const SkRect& rect, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawRect", fContext.get());
+
+ GrStyle style(paint);
+
+ // A couple reasons we might need to call drawPath.
+ if (paint.getMaskFilter() || paint.getPathEffect()) {
+ GrShape shape(rect, style);
+
+ GrBlurUtils::drawShapeWithMaskFilter(fContext.get(), fRenderTargetContext.get(),
+ this->clip(), paint, this->ctm(), shape);
+ return;
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawRect(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), rect, &style);
+}
+
+void SkGpuDevice::drawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color,
+ SkBlendMode mode) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawEdgeAAQuad", fContext.get());
+
+ SkPMColor4f dstColor = SkColor4fPrepForDst(color, fRenderTargetContext->colorInfo()).premul();
+
+ GrPaint grPaint;
+ grPaint.setColor4f(dstColor);
+ if (mode != SkBlendMode::kSrcOver) {
+ grPaint.setXPFactory(SkBlendMode_AsXPFactory(mode));
+ }
+
+ // This is exclusively meant for tiling operations, so keep AA enabled to handle MSAA seaming
+ GrQuadAAFlags grAA = SkToGrQuadAAFlags(aaFlags);
+ if (clip) {
+ // Use fillQuadWithEdgeAA
+ fRenderTargetContext->fillQuadWithEdgeAA(this->clip(), std::move(grPaint), GrAA::kYes, grAA,
+ this->ctm(), clip, nullptr);
+ } else {
+ // Use fillRectWithEdgeAA to preserve mathematical properties of dst being rectangular
+ fRenderTargetContext->fillRectWithEdgeAA(this->clip(), std::move(grPaint), GrAA::kYes, grAA,
+ this->ctm(), rect);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawRRect", fContext.get());
+
+ SkMaskFilterBase* mf = as_MFB(paint.getMaskFilter());
+ if (mf) {
+ if (mf->hasFragmentProcessor()) {
+ mf = nullptr; // already handled in SkPaintToGrPaint
+ }
+ }
+
+ GrStyle style(paint);
+
+ if (mf || style.pathEffect()) {
+ // A path effect will presumably transform this rrect into something else.
+ GrShape shape(rrect, style);
+
+ GrBlurUtils::drawShapeWithMaskFilter(fContext.get(), fRenderTargetContext.get(),
+ this->clip(), paint, this->ctm(), shape);
+ return;
+ }
+
+ SkASSERT(!style.pathEffect());
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawRRect(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), rrect, style);
+}
+
+
+void SkGpuDevice::drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawDRRect", fContext.get());
+ if (outer.isEmpty()) {
+ return;
+ }
+
+ if (inner.isEmpty()) {
+ return this->drawRRect(outer, paint);
+ }
+
+ SkStrokeRec stroke(paint);
+
+ if (stroke.isFillStyle() && !paint.getMaskFilter() && !paint.getPathEffect()) {
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint,
+ this->ctm(), &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawDRRect(this->clip(), std::move(grPaint),
+ GrAA(paint.isAntiAlias()), this->ctm(), outer, inner);
+ return;
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(outer);
+ path.addRRect(inner);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+
+ // TODO: We are losing the possible mutability of the path here but this should probably be
+ // fixed by upgrading GrShape to handle DRRects.
+ GrShape shape(path, paint);
+
+ GrBlurUtils::drawShapeWithMaskFilter(fContext.get(), fRenderTargetContext.get(), this->clip(),
+ paint, this->ctm(), shape);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ if (paint.getMaskFilter()) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ path.setIsVolatile(true);
+ return this->drawPath(path, paint, true);
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawRegion(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), region, GrStyle(paint));
+}
+
+void SkGpuDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawOval", fContext.get());
+
+ if (paint.getMaskFilter()) {
+ // The RRect path can handle special case blurring
+ SkRRect rr = SkRRect::MakeOval(oval);
+ return this->drawRRect(rr, paint);
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawOval(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), oval, GrStyle(paint));
+}
+
+void SkGpuDevice::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawArc", fContext.get());
+ if (paint.getMaskFilter()) {
+ this->INHERITED::drawArc(oval, startAngle, sweepAngle, useCenter, paint);
+ return;
+ }
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->drawArc(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), oval, startAngle, sweepAngle, useCenter,
+ GrStyle(paint));
+}
+
+#include "include/core/SkMaskFilter.h"
+
+///////////////////////////////////////////////////////////////////////////////
+void SkGpuDevice::drawStrokedLine(const SkPoint points[2],
+ const SkPaint& origPaint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawStrokedLine", fContext.get());
+ // Adding support for round capping would require a
+ // GrRenderTargetContext::fillRRectWithLocalMatrix entry point
+ SkASSERT(SkPaint::kRound_Cap != origPaint.getStrokeCap());
+ SkASSERT(SkPaint::kStroke_Style == origPaint.getStyle());
+ SkASSERT(!origPaint.getPathEffect());
+ SkASSERT(!origPaint.getMaskFilter());
+
+ const SkScalar halfWidth = 0.5f * origPaint.getStrokeWidth();
+ SkASSERT(halfWidth > 0);
+
+ SkVector v = points[1] - points[0];
+
+ SkScalar length = SkPoint::Normalize(&v);
+ if (!length) {
+ v.fX = 1.0f;
+ v.fY = 0.0f;
+ }
+
+ SkPaint newPaint(origPaint);
+ newPaint.setStyle(SkPaint::kFill_Style);
+
+ SkScalar xtraLength = 0.0f;
+ if (SkPaint::kButt_Cap != origPaint.getStrokeCap()) {
+ xtraLength = halfWidth;
+ }
+
+ SkPoint mid = points[0] + points[1];
+ mid.scale(0.5f);
+
+ SkRect rect = SkRect::MakeLTRB(mid.fX-halfWidth, mid.fY - 0.5f*length - xtraLength,
+ mid.fX+halfWidth, mid.fY + 0.5f*length + xtraLength);
+ SkMatrix m;
+ m.setSinCos(v.fX, -v.fY, mid.fX, mid.fY);
+
+ SkMatrix local = m;
+
+ m.postConcat(this->ctm());
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), newPaint, m,
+ &grPaint)) {
+ return;
+ }
+
+ fRenderTargetContext->fillRectWithLocalMatrix(
+ this->clip(), std::move(grPaint), GrAA(newPaint.isAntiAlias()), m, rect, local);
+}
+
+void SkGpuDevice::drawPath(const SkPath& origSrcPath, const SkPaint& paint, bool pathIsMutable) {
+ ASSERT_SINGLE_OWNER
+ if (!origSrcPath.isInverseFillType() && !paint.getPathEffect()) {
+ SkPoint points[2];
+ if (SkPaint::kStroke_Style == paint.getStyle() && paint.getStrokeWidth() > 0 &&
+ !paint.getMaskFilter() && SkPaint::kRound_Cap != paint.getStrokeCap() &&
+ this->ctm().preservesRightAngles() && origSrcPath.isLine(points)) {
+ // Path-based stroking looks better for thin rects
+ SkScalar strokeWidth = this->ctm().getMaxScale() * paint.getStrokeWidth();
+ if (strokeWidth >= 1.0f) {
+ // Round capping support is currently disabled b.c. it would require a RRect
+ // GrDrawOp that takes a localMatrix.
+ this->drawStrokedLine(points, paint);
+ return;
+ }
+ }
+ }
+
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPath", fContext.get());
+ if (!paint.getMaskFilter()) {
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), paint,
+ this->ctm(), &grPaint)) {
+ return;
+ }
+ fRenderTargetContext->drawPath(this->clip(), std::move(grPaint), GrAA(paint.isAntiAlias()),
+ this->ctm(), origSrcPath, GrStyle(paint));
+ return;
+ }
+
+ // TODO: losing possible mutability of 'origSrcPath' here
+ GrShape shape(origSrcPath, paint);
+
+ GrBlurUtils::drawShapeWithMaskFilter(fContext.get(), fRenderTargetContext.get(), this->clip(),
+ paint, this->ctm(), shape);
+}
+
+static const int kBmpSmallTileSize = 1 << 10;
+
+static inline int get_tile_count(const SkIRect& srcRect, int tileSize) {
+ int tilesX = (srcRect.fRight / tileSize) - (srcRect.fLeft / tileSize) + 1;
+ int tilesY = (srcRect.fBottom / tileSize) - (srcRect.fTop / tileSize) + 1;
+ return tilesX * tilesY;
+}
+
+static int determine_tile_size(const SkIRect& src, int maxTileSize) {
+ if (maxTileSize <= kBmpSmallTileSize) {
+ return maxTileSize;
+ }
+
+ size_t maxTileTotalTileSize = get_tile_count(src, maxTileSize);
+ size_t smallTotalTileSize = get_tile_count(src, kBmpSmallTileSize);
+
+ maxTileTotalTileSize *= maxTileSize * maxTileSize;
+ smallTotalTileSize *= kBmpSmallTileSize * kBmpSmallTileSize;
+
+ if (maxTileTotalTileSize > 2 * smallTotalTileSize) {
+ return kBmpSmallTileSize;
+ } else {
+ return maxTileSize;
+ }
+}
+
+// Given a bitmap, an optional src rect, and a context with a clip and matrix determine what
+// pixels from the bitmap are necessary.
+static void determine_clipped_src_rect(int width, int height,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect,
+ const SkISize& imageSize,
+ const SkRect* srcRectPtr,
+ SkIRect* clippedSrcIRect) {
+ clip.getConservativeBounds(width, height, clippedSrcIRect, nullptr);
+ SkMatrix inv = SkMatrix::Concat(viewMatrix, srcToDstRect);
+ if (!inv.invert(&inv)) {
+ clippedSrcIRect->setEmpty();
+ return;
+ }
+ SkRect clippedSrcRect = SkRect::Make(*clippedSrcIRect);
+ inv.mapRect(&clippedSrcRect);
+ if (srcRectPtr) {
+ if (!clippedSrcRect.intersect(*srcRectPtr)) {
+ clippedSrcIRect->setEmpty();
+ return;
+ }
+ }
+ clippedSrcRect.roundOut(clippedSrcIRect);
+ SkIRect bmpBounds = SkIRect::MakeSize(imageSize);
+ if (!clippedSrcIRect->intersect(bmpBounds)) {
+ clippedSrcIRect->setEmpty();
+ }
+}
+
+const GrCaps* SkGpuDevice::caps() const {
+ return fContext->priv().caps();
+}
+
+bool SkGpuDevice::shouldTileImageID(uint32_t imageID,
+ const SkIRect& imageRect,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect,
+ const GrSamplerState& params,
+ const SkRect* srcRectPtr,
+ int maxTileSize,
+ int* tileSize,
+ SkIRect* clippedSubset) const {
+ ASSERT_SINGLE_OWNER
+ // if it's larger than the max tile size, then we have no choice but tiling.
+ if (imageRect.width() > maxTileSize || imageRect.height() > maxTileSize) {
+ determine_clipped_src_rect(fRenderTargetContext->width(), fRenderTargetContext->height(),
+ this->clip(), viewMatrix, srcToDstRect, imageRect.size(),
+ srcRectPtr, clippedSubset);
+ *tileSize = determine_tile_size(*clippedSubset, maxTileSize);
+ return true;
+ }
+
+ // If the image would only produce 4 tiles of the smaller size, don't bother tiling it.
+ const size_t area = imageRect.width() * imageRect.height();
+ if (area < 4 * kBmpSmallTileSize * kBmpSmallTileSize) {
+ return false;
+ }
+
+ // At this point we know we could do the draw by uploading the entire bitmap
+ // as a texture. However, if the texture would be large compared to the
+ // cache size and we don't require most of it for this draw then tile to
+ // reduce the amount of upload and cache spill.
+
+ // assumption here is that sw bitmap size is a good proxy for its size as
+ // a texture
+ size_t bmpSize = area * sizeof(SkPMColor); // assume 32bit pixels
+ size_t cacheSize = fContext->getResourceCacheLimit();
+ if (bmpSize < cacheSize / 2) {
+ return false;
+ }
+
+ // Figure out how much of the src we will need based on the src rect and clipping. Reject if
+ // tiling memory savings would be < 50%.
+ determine_clipped_src_rect(fRenderTargetContext->width(), fRenderTargetContext->height(),
+ this->clip(), viewMatrix, srcToDstRect, imageRect.size(), srcRectPtr,
+ clippedSubset);
+ *tileSize = kBmpSmallTileSize; // already know whole bitmap fits in one max sized tile.
+ size_t usedTileBytes = get_tile_count(*clippedSubset, kBmpSmallTileSize) *
+ kBmpSmallTileSize * kBmpSmallTileSize *
+ sizeof(SkPMColor); // assume 32bit pixels;
+
+ return usedTileBytes * 2 < bmpSize;
+}
+
+bool SkGpuDevice::shouldTileImage(const SkImage* image, const SkRect* srcRectPtr,
+ SkCanvas::SrcRectConstraint constraint, SkFilterQuality quality,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect) const {
+ ASSERT_SINGLE_OWNER
+ // If image is explicitly texture backed then we shouldn't get here.
+ SkASSERT(!image->isTextureBacked());
+
+ GrSamplerState samplerState;
+ bool doBicubic;
+ GrSamplerState::Filter textureFilterMode = GrSkFilterQualityToGrFilterMode(
+ image->width(), image->height(), quality, viewMatrix, srcToDstRect,
+ fContext->priv().options().fSharpenMipmappedTextures, &doBicubic);
+
+ int tileFilterPad;
+ if (doBicubic) {
+ tileFilterPad = GrBicubicEffect::kFilterTexelPad;
+ } else if (GrSamplerState::Filter::kNearest == textureFilterMode) {
+ tileFilterPad = 0;
+ } else {
+ tileFilterPad = 1;
+ }
+ samplerState.setFilterMode(textureFilterMode);
+
+ int maxTileSize = this->caps()->maxTileSize() - 2 * tileFilterPad;
+
+ // these are output, which we safely ignore, as we just want to know the predicate
+ int outTileSize;
+ SkIRect outClippedSrcRect;
+
+ return this->shouldTileImageID(image->unique(), image->bounds(), viewMatrix, srcToDstRect,
+ samplerState, srcRectPtr, maxTileSize, &outTileSize,
+ &outClippedSrcRect);
+}
+
+// This method outsets 'iRect' by 'outset' all around and then clamps its extents to
+// 'clamp'. 'offset' is adjusted to remain positioned over the top-left corner
+// of 'iRect' for all possible outsets/clamps.
+static inline void clamped_outset_with_offset(SkIRect* iRect,
+ int outset,
+ SkPoint* offset,
+ const SkIRect& clamp) {
+ iRect->outset(outset, outset);
+
+ int leftClampDelta = clamp.fLeft - iRect->fLeft;
+ if (leftClampDelta > 0) {
+ offset->fX -= outset - leftClampDelta;
+ iRect->fLeft = clamp.fLeft;
+ } else {
+ offset->fX -= outset;
+ }
+
+ int topClampDelta = clamp.fTop - iRect->fTop;
+ if (topClampDelta > 0) {
+ offset->fY -= outset - topClampDelta;
+ iRect->fTop = clamp.fTop;
+ } else {
+ offset->fY -= outset;
+ }
+
+ if (iRect->fRight > clamp.fRight) {
+ iRect->fRight = clamp.fRight;
+ }
+ if (iRect->fBottom > clamp.fBottom) {
+ iRect->fBottom = clamp.fBottom;
+ }
+}
+
+// Break 'bitmap' into several tiles to draw it since it has already
+// been determined to be too large to fit in VRAM
+void SkGpuDevice::drawTiledBitmap(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& dstMatrix,
+ const SkRect& srcRect,
+ const SkIRect& clippedSrcIRect,
+ const GrSamplerState& params,
+ const SkPaint& origPaint,
+ SkCanvas::SrcRectConstraint constraint,
+ int tileSize,
+ bool bicubic) {
+ ASSERT_SINGLE_OWNER
+
+ // This is the funnel for all paths that draw tiled bitmaps/images. Log histogram entries.
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", true);
+ LogDrawScaleFactor(viewMatrix, SkMatrix::I(), origPaint.getFilterQuality());
+
+ const SkPaint* paint = &origPaint;
+ SkPaint tempPaint;
+ if (origPaint.isAntiAlias() && fRenderTargetContext->numSamples() <= 1) {
+ // Drop antialiasing to avoid seams at tile boundaries.
+ tempPaint = origPaint;
+ tempPaint.setAntiAlias(false);
+ paint = &tempPaint;
+ }
+ SkRect clippedSrcRect = SkRect::Make(clippedSrcIRect);
+
+ int nx = bitmap.width() / tileSize;
+ int ny = bitmap.height() / tileSize;
+ for (int x = 0; x <= nx; x++) {
+ for (int y = 0; y <= ny; y++) {
+ SkRect tileR;
+ tileR.setLTRB(SkIntToScalar(x * tileSize), SkIntToScalar(y * tileSize),
+ SkIntToScalar((x + 1) * tileSize), SkIntToScalar((y + 1) * tileSize));
+
+ if (!SkRect::Intersects(tileR, clippedSrcRect)) {
+ continue;
+ }
+
+ if (!tileR.intersect(srcRect)) {
+ continue;
+ }
+
+ SkIRect iTileR;
+ tileR.roundOut(&iTileR);
+ SkVector offset = SkPoint::Make(SkIntToScalar(iTileR.fLeft),
+ SkIntToScalar(iTileR.fTop));
+ SkRect rectToDraw = tileR;
+ dstMatrix.mapRect(&rectToDraw);
+ if (GrSamplerState::Filter::kNearest != params.filter() || bicubic) {
+ SkIRect iClampRect;
+
+ if (SkCanvas::kFast_SrcRectConstraint == constraint) {
+ // In bleed mode we want to always expand the tile on all edges
+ // but stay within the bitmap bounds
+ iClampRect = SkIRect::MakeWH(bitmap.width(), bitmap.height());
+ } else {
+ // In texture-domain/clamp mode we only want to expand the
+ // tile on edges interior to "srcRect" (i.e., we want to
+ // not bleed across the original clamped edges)
+ srcRect.roundOut(&iClampRect);
+ }
+ int outset = bicubic ? GrBicubicEffect::kFilterTexelPad : 1;
+ clamped_outset_with_offset(&iTileR, outset, &offset, iClampRect);
+ }
+
+ SkBitmap tmpB;
+ if (bitmap.extractSubset(&tmpB, iTileR)) {
+ // now offset it to make it "local" to our tmp bitmap
+ tileR.offset(-offset.fX, -offset.fY);
+ // de-optimized this determination
+ bool needsTextureDomain = true;
+ this->drawBitmapTile(tmpB,
+ viewMatrix,
+ rectToDraw,
+ tileR,
+ params,
+ *paint,
+ constraint,
+ bicubic,
+ needsTextureDomain);
+ }
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapTile(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkRect& dstRect,
+ const SkRect& srcRect,
+ const GrSamplerState& samplerState,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint,
+ bool bicubic,
+ bool needsTextureDomain) {
+ // We should have already handled bitmaps larger than the max texture size.
+ SkASSERT(bitmap.width() <= this->caps()->maxTextureSize() &&
+ bitmap.height() <= this->caps()->maxTextureSize());
+ // We should be respecting the max tile size by the time we get here.
+ SkASSERT(bitmap.width() <= this->caps()->maxTileSize() &&
+ bitmap.height() <= this->caps()->maxTileSize());
+ SkASSERT(!samplerState.isRepeated());
+
+ SkScalar scales[2] = {1.f, 1.f};
+ sk_sp<GrTextureProxy> proxy =
+ GrRefCachedBitmapTextureProxy(fContext.get(), bitmap, samplerState, scales);
+ if (!proxy) {
+ return;
+ }
+
+ // Compute a matrix that maps the rect we will draw to the src rect.
+ SkMatrix texMatrix = SkMatrix::MakeRectToRect(dstRect, srcRect, SkMatrix::kFill_ScaleToFit);
+ texMatrix.postScale(scales[0], scales[1]);
+
+ GrColorType srcColorType = SkColorTypeToGrColorType(bitmap.colorType());
+
+ // Construct a GrPaint by setting the bitmap texture as the first effect and then configuring
+ // the rest from the SkPaint.
+ std::unique_ptr<GrFragmentProcessor> fp;
+
+ if (needsTextureDomain && (SkCanvas::kStrict_SrcRectConstraint == constraint)) {
+ // Use a constrained texture domain to avoid color bleeding
+ SkRect domain;
+ if (srcRect.width() > SK_Scalar1) {
+ domain.fLeft = srcRect.fLeft + 0.5f;
+ domain.fRight = srcRect.fRight - 0.5f;
+ } else {
+ domain.fLeft = domain.fRight = srcRect.centerX();
+ }
+ if (srcRect.height() > SK_Scalar1) {
+ domain.fTop = srcRect.fTop + 0.5f;
+ domain.fBottom = srcRect.fBottom - 0.5f;
+ } else {
+ domain.fTop = domain.fBottom = srcRect.centerY();
+ }
+ if (bicubic) {
+ static constexpr auto kDir = GrBicubicEffect::Direction::kXY;
+ fp = GrBicubicEffect::Make(std::move(proxy), srcColorType, texMatrix, domain, kDir,
+ bitmap.alphaType());
+ } else {
+ fp = GrTextureDomainEffect::Make(std::move(proxy), srcColorType, texMatrix, domain,
+ GrTextureDomain::kClamp_Mode, samplerState.filter());
+ }
+ } else if (bicubic) {
+ SkASSERT(GrSamplerState::Filter::kNearest == samplerState.filter());
+ GrSamplerState::WrapMode wrapMode[2] = {samplerState.wrapModeX(), samplerState.wrapModeY()};
+ static constexpr auto kDir = GrBicubicEffect::Direction::kXY;
+ fp = GrBicubicEffect::Make(std::move(proxy), srcColorType, texMatrix, wrapMode, kDir,
+ bitmap.alphaType());
+ } else {
+ fp = GrSimpleTextureEffect::Make(std::move(proxy), srcColorType, texMatrix, samplerState);
+ }
+
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), bitmap.colorSpace(), bitmap.alphaType(),
+ fRenderTargetContext->colorInfo().colorSpace());
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(this->context(), fRenderTargetContext->colorInfo(), paint,
+ viewMatrix, std::move(fp),
+ kAlpha_8_SkColorType == bitmap.colorType(), &grPaint)) {
+ return;
+ }
+
+ // Coverage-based AA would cause seams between tiles.
+ GrAA aa = GrAA(paint.isAntiAlias() && fRenderTargetContext->numSamples() > 1);
+ fRenderTargetContext->drawRect(this->clip(), std::move(grPaint), aa, viewMatrix, dstRect);
+}
+
+void SkGpuDevice::drawSprite(const SkBitmap& bitmap,
+ int left, int top, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawSprite", fContext.get());
+
+ if (fContext->priv().abandoned()) {
+ return;
+ }
+
+ sk_sp<SkSpecialImage> srcImg = this->makeSpecial(bitmap);
+ if (!srcImg) {
+ return;
+ }
+
+ this->drawSpecial(srcImg.get(), left, top, paint, nullptr, SkMatrix::I());
+}
+
+
+void SkGpuDevice::drawSpecial(SkSpecialImage* special, int left, int top, const SkPaint& paint,
+ SkImage* clipImage, const SkMatrix& clipMatrix) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawSpecial", fContext.get());
+
+ sk_sp<SkSpecialImage> result;
+ if (paint.getImageFilter()) {
+ SkIPoint offset = { 0, 0 };
+
+ result = this->filterTexture(special, left, top, &offset, paint.getImageFilter());
+ if (!result) {
+ return;
+ }
+
+ left += offset.fX;
+ top += offset.fY;
+ } else {
+ result = sk_ref_sp(special);
+ }
+
+ SkASSERT(result->isTextureBacked());
+ sk_sp<GrTextureProxy> proxy = result->asTextureProxyRef(this->context());
+ if (!proxy) {
+ return;
+ }
+
+ SkMatrix ctm = this->ctm();
+ ctm.postTranslate(-SkIntToScalar(left), -SkIntToScalar(top));
+
+ SkPaint tmpUnfiltered(paint);
+ if (tmpUnfiltered.getMaskFilter()) {
+ tmpUnfiltered.setMaskFilter(tmpUnfiltered.getMaskFilter()->makeWithMatrix(ctm));
+ }
+
+ tmpUnfiltered.setImageFilter(nullptr);
+
+ GrColorType srcColorType = SkColorTypeToGrColorType(result->colorType());
+ auto fp = GrSimpleTextureEffect::Make(std::move(proxy), srcColorType, SkMatrix::I());
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), result->getColorSpace(), result->alphaType(),
+ fRenderTargetContext->colorInfo().colorSpace());
+ if (GrColorTypeIsAlphaOnly(SkColorTypeToGrColorType(result->colorType()))) {
+ fp = GrFragmentProcessor::MakeInputPremulAndMulByOutput(std::move(fp));
+ } else {
+ if (paint.getColor4f().isOpaque()) {
+ fp = GrFragmentProcessor::OverrideInput(std::move(fp), SK_PMColor4fWHITE, false);
+ } else {
+ fp = GrFragmentProcessor::MulChildByInputAlpha(std::move(fp));
+ }
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintReplaceShader(this->context(), fRenderTargetContext->colorInfo(),
+ tmpUnfiltered, std::move(fp), &grPaint)) {
+ return;
+ }
+
+ const SkIRect& subset = result->subset();
+ SkRect dstRect = SkRect::Make(SkIRect::MakeXYWH(left, top, subset.width(), subset.height()));
+ SkRect srcRect = SkRect::Make(subset);
+ if (clipImage) {
+ // Add the image as a simple texture effect applied to coverage. Accessing content outside
+ // of the clip image should behave as if it were a decal (i.e. zero coverage). However, to
+ // limit pixels touched and hardware checks, we draw the clip image geometry to get the
+ // decal effect.
+ GrSamplerState sampler = paint.getFilterQuality() > kNone_SkFilterQuality ?
+ GrSamplerState::ClampBilerp() : GrSamplerState::ClampNearest();
+ sk_sp<GrTextureProxy> clipProxy = as_IB(clipImage)->asTextureProxyRef(this->context(),
+ sampler, nullptr);
+ // Fold clip matrix into ctm
+ ctm.preConcat(clipMatrix);
+ SkMatrix inverseClipMatrix;
+
+ std::unique_ptr<GrFragmentProcessor> cfp;
+ if (clipProxy && ctm.invert(&inverseClipMatrix)) {
+ GrColorType srcColorType = SkColorTypeToGrColorType(clipImage->colorType());
+ cfp = GrSimpleTextureEffect::Make(std::move(clipProxy), srcColorType, inverseClipMatrix,
+ sampler);
+ if (srcColorType != GrColorType::kAlpha_8) {
+ cfp = GrFragmentProcessor::SwizzleOutput(std::move(cfp), GrSwizzle::AAAA());
+ }
+ }
+
+ if (cfp) {
+ // If the grPaint already has coverage, this adds an additional stage that multiples
+ // the image's alpha channel with the prior coverage.
+ grPaint.addCoverageFragmentProcessor(std::move(cfp));
+
+ // Undo the offset that was needed for shader coord transforms to get the transform for
+ // the actual drawn geometry.
+ ctm.postTranslate(SkIntToScalar(left), SkIntToScalar(top));
+ inverseClipMatrix.preTranslate(-SkIntToScalar(left), -SkIntToScalar(top));
+ SkRect clipGeometry = SkRect::MakeWH(clipImage->width(), clipImage->height());
+ if (!clipGeometry.contains(inverseClipMatrix.mapRect(dstRect))) {
+ // Draw the clip geometry since it is smaller, using dstRect as an extra scissor
+ SkClipStack clip(this->cs());
+ clip.clipDevRect(SkIRect::MakeXYWH(left, top, subset.width(), subset.height()),
+ SkClipOp::kIntersect);
+ SkMatrix local = SkMatrix::Concat(SkMatrix::MakeRectToRect(
+ dstRect, srcRect, SkMatrix::kFill_ScaleToFit), ctm);
+ fRenderTargetContext->fillRectWithLocalMatrix(GrClipStackClip(&clip),
+ std::move(grPaint), GrAA(paint.isAntiAlias()), ctm, clipGeometry, local);
+ return;
+ }
+ // Else fall through and draw the subset since that is contained in the clip geometry
+ }
+ // Else some issue configuring the coverage FP, so just draw without the clip mask image
+ }
+ // Draw directly in screen space, possibly with an extra coverage processor
+ fRenderTargetContext->fillRectToRect(this->clip(), std::move(grPaint),
+ GrAA(paint.isAntiAlias()), SkMatrix::I(), dstRect, srcRect);
+}
+
+void SkGpuDevice::drawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src, const SkRect& origDst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ ASSERT_SINGLE_OWNER
+ // The src rect is inferred to be the bmp bounds if not provided. Otherwise, the src rect must
+ // be clipped to the bmp bounds. To determine tiling parameters we need the filter mode which
+ // in turn requires knowing the src-to-dst mapping. If the src was clipped to the bmp bounds
+ // then we use the src-to-dst mapping to compute a new clipped dst rect.
+ const SkRect* dst = &origDst;
+ const SkRect bmpBounds = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ // Compute matrix from the two rectangles
+ if (!src) {
+ src = &bmpBounds;
+ }
+
+ SkMatrix srcToDstMatrix;
+ if (!srcToDstMatrix.setRectToRect(*src, *dst, SkMatrix::kFill_ScaleToFit)) {
+ return;
+ }
+ SkRect tmpSrc, tmpDst;
+ if (src != &bmpBounds) {
+ if (!bmpBounds.contains(*src)) {
+ tmpSrc = *src;
+ if (!tmpSrc.intersect(bmpBounds)) {
+ return; // nothing to draw
+ }
+ src = &tmpSrc;
+ srcToDstMatrix.mapRect(&tmpDst, *src);
+ dst = &tmpDst;
+ }
+ }
+
+ int maxTileSize = this->caps()->maxTileSize();
+
+ // The tile code path doesn't currently support AA, so if the paint asked for aa and we could
+ // draw untiled, then we bypass checking for tiling purely for optimization reasons.
+ bool useCoverageAA = fRenderTargetContext->numSamples() <= 1 &&
+ paint.isAntiAlias() && bitmap.width() <= maxTileSize &&
+ bitmap.height() <= maxTileSize;
+
+ bool skipTileCheck = useCoverageAA || paint.getMaskFilter();
+
+ if (!skipTileCheck) {
+ int tileSize;
+ SkIRect clippedSrcRect;
+
+ GrSamplerState sampleState;
+ bool doBicubic;
+ GrSamplerState::Filter textureFilterMode = GrSkFilterQualityToGrFilterMode(
+ bitmap.width(), bitmap.height(), paint.getFilterQuality(), this->ctm(),
+ srcToDstMatrix, fContext->priv().options().fSharpenMipmappedTextures, &doBicubic);
+
+ int tileFilterPad;
+
+ if (doBicubic) {
+ tileFilterPad = GrBicubicEffect::kFilterTexelPad;
+ } else if (GrSamplerState::Filter::kNearest == textureFilterMode) {
+ tileFilterPad = 0;
+ } else {
+ tileFilterPad = 1;
+ }
+ sampleState.setFilterMode(textureFilterMode);
+
+ int maxTileSizeForFilter = this->caps()->maxTileSize() - 2 * tileFilterPad;
+ if (this->shouldTileImageID(bitmap.getGenerationID(), bitmap.getSubset(), this->ctm(),
+ srcToDstMatrix, sampleState, src, maxTileSizeForFilter,
+ &tileSize, &clippedSrcRect)) {
+ this->drawTiledBitmap(bitmap, this->ctm(), srcToDstMatrix, *src, clippedSrcRect,
+ sampleState, paint, constraint, tileSize, doBicubic);
+ return;
+ }
+ }
+ GrBitmapTextureMaker maker(fContext.get(), bitmap);
+ this->drawTextureProducer(&maker, src, dst, constraint, this->ctm(), paint, true);
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::makeSpecial(const SkBitmap& bitmap) {
+ // TODO: this makes a tight copy of 'bitmap' but it doesn't have to be (given SkSpecialImage's
+ // semantics). Since this is cached we would have to bake the fit into the cache key though.
+ sk_sp<GrTextureProxy> proxy = GrMakeCachedBitmapProxy(fContext->priv().proxyProvider(),
+ bitmap);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ const SkIRect rect = SkIRect::MakeWH(proxy->width(), proxy->height());
+
+ // GrMakeCachedBitmapProxy creates a tight copy of 'bitmap' so we don't have to subset
+ // the special image
+ return SkSpecialImage::MakeDeferredFromGpu(fContext.get(),
+ rect,
+ bitmap.getGenerationID(),
+ std::move(proxy),
+ SkColorTypeToGrColorType(bitmap.colorType()),
+ bitmap.refColorSpace(),
+ &this->surfaceProps());
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::makeSpecial(const SkImage* image) {
+ SkPixmap pm;
+ if (image->isTextureBacked()) {
+ sk_sp<GrTextureProxy> proxy = as_IB(image)->asTextureProxyRef(this->context());
+
+ return SkSpecialImage::MakeDeferredFromGpu(fContext.get(),
+ SkIRect::MakeWH(image->width(), image->height()),
+ image->uniqueID(),
+ std::move(proxy),
+ SkColorTypeToGrColorType(image->colorType()),
+ image->refColorSpace(),
+ &this->surfaceProps());
+ } else if (image->peekPixels(&pm)) {
+ SkBitmap bm;
+
+ bm.installPixels(pm);
+ return this->makeSpecial(bm);
+ } else {
+ return nullptr;
+ }
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::snapSpecial(const SkIRect& subset, bool forceCopy) {
+ GrRenderTargetContext* rtc = this->accessRenderTargetContext();
+
+ // If we are wrapping a vulkan secondary command buffer, then we can't snap off a special image
+ // since it would require us to make a copy of the underlying VkImage which we don't have access
+ // to. Additionaly we can't stop and start the render pass that is used with the secondary
+ // command buffer.
+ if (rtc->wrapsVkSecondaryCB()) {
+ return nullptr;
+ }
+
+ SkASSERT(rtc->asSurfaceProxy());
+
+ SkIRect finalSubset = subset;
+ sk_sp<GrTextureProxy> proxy(rtc->asTextureProxyRef());
+ if (forceCopy || !proxy) {
+ // When the device doesn't have a texture, or a copy is requested, we create a temporary
+ // texture that matches the device contents
+ proxy = GrSurfaceProxy::Copy(fContext.get(),
+ rtc->asSurfaceProxy(),
+ rtc->colorInfo().colorType(),
+ GrMipMapped::kNo, // Don't auto generate mips
+ subset,
+ SkBackingFit::kApprox,
+ SkBudgeted::kYes); // Always budgeted
+ if (!proxy) {
+ return nullptr;
+ }
+
+ // Since this copied only the requested subset, the special image wrapping the proxy no
+ // longer needs the original subset.
+ finalSubset = SkIRect::MakeSize(proxy->isize());
+ }
+
+ GrColorType ct = SkColorTypeToGrColorType(this->imageInfo().colorType());
+
+ return SkSpecialImage::MakeDeferredFromGpu(fContext.get(),
+ finalSubset,
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(proxy),
+ ct,
+ this->imageInfo().refColorSpace(),
+ &this->surfaceProps());
+}
+
+void SkGpuDevice::drawDevice(SkBaseDevice* device,
+ int left, int top, const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+
+ ASSERT_SINGLE_OWNER
+ // clear of the source device must occur before CHECK_SHOULD_DRAW
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawDevice", fContext.get());
+
+ // drawDevice is defined to be in device coords.
+ SkGpuDevice* dev = static_cast<SkGpuDevice*>(device);
+ sk_sp<SkSpecialImage> srcImg(dev->snapSpecial(SkIRect::MakeWH(dev->width(), dev->height())));
+ if (!srcImg) {
+ return;
+ }
+
+ this->drawSpecial(srcImg.get(), left, top, paint, nullptr, SkMatrix::I());
+}
+
+void SkGpuDevice::drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ ASSERT_SINGLE_OWNER
+ GrQuadAAFlags aaFlags = paint.isAntiAlias() ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
+ this->drawImageQuad(image, src, &dst, nullptr, GrAA(paint.isAntiAlias()), aaFlags, nullptr,
+ paint, constraint);
+}
+
+// When drawing nine-patches or n-patches, cap the filter quality at kBilerp.
+static GrSamplerState::Filter compute_lattice_filter_mode(const SkPaint& paint) {
+ if (paint.getFilterQuality() == kNone_SkFilterQuality) {
+ return GrSamplerState::Filter::kNearest;
+ }
+
+ return GrSamplerState::Filter::kBilerp;
+}
+
+void SkGpuDevice::drawImageNine(const SkImage* image,
+ const SkIRect& center, const SkRect& dst, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ uint32_t pinnedUniqueID;
+ auto iter = skstd::make_unique<SkLatticeIter>(image->width(), image->height(), center, dst);
+ if (sk_sp<GrTextureProxy> proxy = as_IB(image)->refPinnedTextureProxy(this->context(),
+ &pinnedUniqueID)) {
+ GrTextureAdjuster adjuster(this->context(), std::move(proxy),
+ SkColorTypeToGrColorType(image->colorType()), image->alphaType(),
+ pinnedUniqueID, image->colorSpace());
+ this->drawProducerLattice(&adjuster, std::move(iter), dst, paint);
+ } else {
+ SkBitmap bm;
+ if (image->isLazyGenerated()) {
+ GrImageTextureMaker maker(fContext.get(), image, SkImage::kAllow_CachingHint);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ GrBitmapTextureMaker maker(fContext.get(), bm);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ auto iter = skstd::make_unique<SkLatticeIter>(bitmap.width(), bitmap.height(), center, dst);
+ GrBitmapTextureMaker maker(fContext.get(), bitmap);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+}
+
+void SkGpuDevice::drawProducerLattice(GrTextureProducer* producer,
+ std::unique_ptr<SkLatticeIter> iter, const SkRect& dst,
+ const SkPaint& origPaint) {
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawProducerLattice", fContext.get());
+ SkTCopyOnFirstWrite<SkPaint> paint(&origPaint);
+
+ if (!producer->isAlphaOnly() && (paint->getColor() & 0x00FFFFFF) != 0x00FFFFFF) {
+ paint.writable()->setColor(SkColorSetARGB(origPaint.getAlpha(), 0xFF, 0xFF, 0xFF));
+ }
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithPrimitiveColor(this->context(), fRenderTargetContext->colorInfo(),
+ *paint, &grPaint)) {
+ return;
+ }
+
+ auto dstColorSpace = fRenderTargetContext->colorInfo().colorSpace();
+ const GrSamplerState::Filter filter = compute_lattice_filter_mode(*paint);
+ auto proxy = producer->refTextureProxyForParams(&filter, nullptr);
+ if (!proxy) {
+ return;
+ }
+ auto csxf = GrColorSpaceXform::Make(producer->colorSpace(), producer->alphaType(),
+ dstColorSpace, kPremul_SkAlphaType);
+
+ fRenderTargetContext->drawImageLattice(this->clip(), std::move(grPaint), this->ctm(),
+ std::move(proxy), producer->colorType(), std::move(csxf),
+ filter, std::move(iter), dst);
+}
+
+void SkGpuDevice::drawImageLattice(const SkImage* image,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ uint32_t pinnedUniqueID;
+ auto iter = skstd::make_unique<SkLatticeIter>(lattice, dst);
+ if (sk_sp<GrTextureProxy> proxy = as_IB(image)->refPinnedTextureProxy(this->context(),
+ &pinnedUniqueID)) {
+ GrTextureAdjuster adjuster(this->context(), std::move(proxy),
+ SkColorTypeToGrColorType(image->colorType()), image->alphaType(),
+ pinnedUniqueID, image->colorSpace());
+ this->drawProducerLattice(&adjuster, std::move(iter), dst, paint);
+ } else {
+ SkBitmap bm;
+ if (image->isLazyGenerated()) {
+ GrImageTextureMaker maker(fContext.get(), image, SkImage::kAllow_CachingHint);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ GrBitmapTextureMaker maker(fContext.get(), bm);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapLattice(const SkBitmap& bitmap,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ auto iter = skstd::make_unique<SkLatticeIter>(lattice, dst);
+ GrBitmapTextureMaker maker(fContext.get(), bitmap);
+ this->drawProducerLattice(&maker, std::move(iter), dst, paint);
+}
+
+static bool init_vertices_paint(GrContext* context, const GrColorInfo& colorInfo,
+ const SkPaint& skPaint, const SkMatrix& matrix, SkBlendMode bmode,
+ bool hasTexs, bool hasColors, GrPaint* grPaint) {
+ if (hasTexs && skPaint.getShader()) {
+ if (hasColors) {
+ // When there are texs and colors the shader and colors are combined using bmode.
+ return SkPaintToGrPaintWithXfermode(context, colorInfo, skPaint, matrix, bmode,
+ grPaint);
+ } else {
+ // We have a shader, but no colors to blend it against.
+ return SkPaintToGrPaint(context, colorInfo, skPaint, matrix, grPaint);
+ }
+ } else {
+ if (hasColors) {
+ // We have colors, but either have no shader or no texture coords (which implies that
+ // we should ignore the shader).
+ return SkPaintToGrPaintWithPrimitiveColor(context, colorInfo, skPaint, grPaint);
+ } else {
+ // No colors and no shaders. Just draw with the paint color.
+ return SkPaintToGrPaintNoShader(context, colorInfo, skPaint, grPaint);
+ }
+ }
+}
+
+void SkGpuDevice::wireframeVertices(SkVertices::VertexMode vmode, int vertexCount,
+ const SkPoint vertices[],
+ const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode bmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "wireframeVertices", fContext.get());
+
+ SkPaint copy(paint);
+ copy.setStyle(SkPaint::kStroke_Style);
+ copy.setStrokeWidth(0);
+
+ GrPaint grPaint;
+ // we ignore the shader since we have no texture coordinates.
+ if (!SkPaintToGrPaintNoShader(this->context(), fRenderTargetContext->colorInfo(), copy,
+ &grPaint)) {
+ return;
+ }
+
+ int triangleCount = 0;
+ int n = (nullptr == indices) ? vertexCount : indexCount;
+ switch (vmode) {
+ case SkVertices::kTriangles_VertexMode:
+ triangleCount = n / 3;
+ break;
+ case SkVertices::kTriangleStrip_VertexMode:
+ triangleCount = n - 2;
+ break;
+ case SkVertices::kTriangleFan_VertexMode:
+ SK_ABORT("Unexpected triangle fan.");
+ break;
+ }
+
+ VertState state(vertexCount, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(vmode);
+
+ //number of indices for lines per triangle with kLines
+ indexCount = triangleCount * 6;
+
+ static constexpr SkVertices::VertexMode kIgnoredMode = SkVertices::kTriangles_VertexMode;
+ SkVertices::Builder builder(kIgnoredMode, vertexCount, indexCount, 0);
+ memcpy(builder.positions(), vertices, vertexCount * sizeof(SkPoint));
+
+ uint16_t* lineIndices = builder.indices();
+ int i = 0;
+ while (vertProc(&state)) {
+ lineIndices[i] = state.f0;
+ lineIndices[i + 1] = state.f1;
+ lineIndices[i + 2] = state.f1;
+ lineIndices[i + 3] = state.f2;
+ lineIndices[i + 4] = state.f2;
+ lineIndices[i + 5] = state.f0;
+ i += 6;
+ }
+
+ GrPrimitiveType primitiveType = GrPrimitiveType::kLines;
+ fRenderTargetContext->drawVertices(this->clip(),
+ std::move(grPaint),
+ this->ctm(),
+ builder.detach(),
+ bones,
+ boneCount,
+ &primitiveType);
+}
+
+void SkGpuDevice::drawVertices(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode mode, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawVertices", fContext.get());
+
+ SkASSERT(vertices);
+ GrPaint grPaint;
+ bool hasColors = vertices->hasColors();
+ bool hasTexs = vertices->hasTexCoords();
+ if ((!hasTexs || !paint.getShader()) && !hasColors) {
+ // The dreaded wireframe mode. Fallback to drawVertices and go so slooooooow.
+ this->wireframeVertices(vertices->mode(), vertices->vertexCount(), vertices->positions(),
+ bones, boneCount, mode, vertices->indices(), vertices->indexCount(),
+ paint);
+ return;
+ }
+ if (!init_vertices_paint(fContext.get(), fRenderTargetContext->colorInfo(), paint, this->ctm(),
+ mode, hasTexs, hasColors, &grPaint)) {
+ return;
+ }
+ fRenderTargetContext->drawVertices(this->clip(), std::move(grPaint), this->ctm(),
+ sk_ref_sp(const_cast<SkVertices*>(vertices)),
+ bones, boneCount);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawShadow(const SkPath& path, const SkDrawShadowRec& rec) {
+
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawShadow", fContext.get());
+
+ if (!fRenderTargetContext->drawFastShadow(this->clip(), this->ctm(), path, rec)) {
+ // failed to find an accelerated case
+ this->INHERITED::drawShadow(path, rec);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawAtlas(const SkImage* atlas, const SkRSXform xform[],
+ const SkRect texRect[], const SkColor colors[], int count,
+ SkBlendMode mode, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawAtlas", fContext.get());
+
+ SkPaint p(paint);
+ p.setShader(atlas->makeShader());
+
+ GrPaint grPaint;
+ if (colors) {
+ if (!SkPaintToGrPaintWithXfermode(this->context(), fRenderTargetContext->colorInfo(), p,
+ this->ctm(), (SkBlendMode)mode, &grPaint)) {
+ return;
+ }
+ } else {
+ if (!SkPaintToGrPaint(this->context(), fRenderTargetContext->colorInfo(), p, this->ctm(),
+ &grPaint)) {
+ return;
+ }
+ }
+
+ fRenderTargetContext->drawAtlas(
+ this->clip(), std::move(grPaint), this->ctm(), count, xform, texRect, colors);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawGlyphRunList(const SkGlyphRunList& glyphRunList) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawGlyphRunList", fContext.get());
+
+ // Check for valid input
+ const SkMatrix& ctm = this->ctm();
+ if (!ctm.isFinite() || !glyphRunList.allFontsFinite()) {
+ return;
+ }
+
+ fRenderTargetContext->drawGlyphRunList(this->clip(), ctm, glyphRunList);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawDrawable(SkDrawable* drawable, const SkMatrix* matrix, SkCanvas* canvas) {
+ GrBackendApi api = this->context()->backend();
+ if (GrBackendApi::kVulkan == api) {
+ const SkMatrix& ctm = canvas->getTotalMatrix();
+ const SkMatrix& combinedMatrix = matrix ? SkMatrix::Concat(ctm, *matrix) : ctm;
+ std::unique_ptr<SkDrawable::GpuDrawHandler> gpuDraw =
+ drawable->snapGpuDrawHandler(api, combinedMatrix, canvas->getDeviceClipBounds(),
+ this->imageInfo());
+ if (gpuDraw) {
+ fRenderTargetContext->drawDrawable(std::move(gpuDraw), drawable->getBounds());
+ return;
+ }
+ }
+ this->INHERITED::drawDrawable(drawable, matrix, canvas);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::flush() {
+ this->flush(SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
+}
+
+GrSemaphoresSubmitted SkGpuDevice::flush(SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info) {
+ ASSERT_SINGLE_OWNER
+
+ return fRenderTargetContext->flush(access, info);
+}
+
+bool SkGpuDevice::wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores) {
+ ASSERT_SINGLE_OWNER
+
+ return fRenderTargetContext->waitOnSemaphores(numSemaphores, waitSemaphores);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBaseDevice* SkGpuDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint*) {
+ ASSERT_SINGLE_OWNER
+
+ SkSurfaceProps props(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+
+ // layers are never drawn in repeat modes, so we can request an approx
+ // match and ignore any padding.
+ SkBackingFit fit = kNever_TileUsage == cinfo.fTileUsage ? SkBackingFit::kApprox
+ : SkBackingFit::kExact;
+
+ SkASSERT(cinfo.fInfo.colorType() != kRGBA_1010102_SkColorType);
+
+ auto rtc = fContext->priv().makeDeferredRenderTargetContextWithFallback(
+ fit,
+ cinfo.fInfo.width(),
+ cinfo.fInfo.height(),
+ SkColorTypeToGrColorType(cinfo.fInfo.colorType()),
+ fRenderTargetContext->colorInfo().refColorSpace(),
+ fRenderTargetContext->numSamples(),
+ GrMipMapped::kNo,
+ kBottomLeft_GrSurfaceOrigin,
+ &props,
+ SkBudgeted::kYes,
+ fRenderTargetContext->asSurfaceProxy()->isProtected() ? GrProtected::kYes
+ : GrProtected::kNo);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ // Skia's convention is to only clear a device if it is non-opaque.
+ InitContents init = cinfo.fInfo.isOpaque() ? kUninit_InitContents : kClear_InitContents;
+
+ return SkGpuDevice::Make(fContext.get(), std::move(rtc), init).release();
+}
+
+sk_sp<SkSurface> SkGpuDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ ASSERT_SINGLE_OWNER
+ // TODO: Change the signature of newSurface to take a budgeted parameter.
+ static const SkBudgeted kBudgeted = SkBudgeted::kNo;
+ return SkSurface::MakeRenderTarget(fContext.get(), kBudgeted, info,
+ fRenderTargetContext->numSamples(),
+ fRenderTargetContext->origin(), &props);
+}
+
+SkImageFilterCache* SkGpuDevice::getImageFilterCache() {
+ ASSERT_SINGLE_OWNER
+ // We always return a transient cache, so it is freed after each
+ // filter traversal.
+ return SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize);
+}
+
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice.h b/gfx/skia/skia/src/gpu/SkGpuDevice.h
new file mode 100644
index 0000000000..a37d75245b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGpuDevice_DEFINED
+#define SkGpuDevice_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTypes.h"
+#include "src/core/SkClipStackDevice.h"
+#include "src/gpu/GrClipStackClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/SkGr.h"
+
+class GrAccelData;
+class GrTextureMaker;
+class GrTextureProducer;
+struct GrCachedLayer;
+
+class SkSpecialImage;
+
+/**
+ * Subclass of SkBaseDevice, which directs all drawing to the GrGpu owned by the
+ * canvas.
+ */
+class SkGpuDevice : public SkClipStackDevice {
+public:
+ enum InitContents {
+ kClear_InitContents,
+ kUninit_InitContents
+ };
+
+ /**
+ * Creates an SkGpuDevice from a GrRenderTargetContext whose backing width/height is
+ * different than its actual width/height (e.g., approx-match scratch texture).
+ */
+ static sk_sp<SkGpuDevice> Make(
+ GrContext*, std::unique_ptr<GrRenderTargetContext>, InitContents);
+
+ /**
+ * New device that will create an offscreen renderTarget based on the ImageInfo and
+ * sampleCount. The mipMapped flag tells the gpu to create the underlying render target with
+ * mips. The Budgeted param controls whether the device's backing store counts against the
+ * resource cache budget. On failure, returns nullptr.
+ * This entry point creates a kExact backing store. It is used when creating SkGpuDevices
+ * for SkSurfaces.
+ */
+ static sk_sp<SkGpuDevice> Make(GrContext*, SkBudgeted, const SkImageInfo&,
+ int sampleCount, GrSurfaceOrigin, const SkSurfaceProps*,
+ GrMipMapped mipMapped, InitContents);
+
+ ~SkGpuDevice() override {}
+
+ GrContext* context() const override { return fContext.get(); }
+
+ // set all pixels to 0
+ void clearAll();
+
+ void replaceRenderTargetContext(bool shouldRetainContent);
+ void replaceRenderTargetContext(std::unique_ptr<GrRenderTargetContext>,
+ bool shouldRetainContent);
+
+ GrRenderTargetContext* accessRenderTargetContext() override;
+
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode, size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& r, const SkPaint& paint) override;
+ void drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) override;
+ void drawRegion(const SkRegion& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) override;
+ void drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) override;
+ void drawBitmapRect(const SkBitmap&, const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint) override;
+ void drawSprite(const SkBitmap& bitmap, int x, int y,
+ const SkPaint& paint) override;
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override;
+ void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const SkPaint&) override;
+ void drawShadow(const SkPath&, const SkDrawShadowRec&) override;
+ void drawAtlas(const SkImage* atlas, const SkRSXform[], const SkRect[],
+ const SkColor[], int count, SkBlendMode, const SkPaint&) override;
+ void drawDevice(SkBaseDevice*, int x, int y, const SkPaint&) override;
+
+ void drawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint&, SkCanvas::SrcRectConstraint) override;
+
+ void drawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) override;
+ void drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) override;
+
+ void drawImageLattice(const SkImage*, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&) override;
+ void drawBitmapLattice(const SkBitmap&, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&) override;
+
+ void drawDrawable(SkDrawable*, const SkMatrix*, SkCanvas* canvas) override;
+
+ void drawSpecial(SkSpecialImage*, int left, int top, const SkPaint& paint,
+ SkImage*, const SkMatrix&) override;
+
+ void drawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], SkCanvas::QuadAAFlags aaFlags,
+ const SkColor4f& color, SkBlendMode mode) override;
+ void drawEdgeAAImageSet(const SkCanvas::ImageSetEntry[], int count, const SkPoint dstClips[],
+ const SkMatrix[], const SkPaint&, SkCanvas::SrcRectConstraint) override;
+
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial(const SkIRect&, bool = false) override;
+
+ void flush() override;
+ GrSemaphoresSubmitted flush(SkSurface::BackendSurfaceAccess access, const GrFlushInfo&);
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores);
+
+ bool onAccessPixels(SkPixmap*) override;
+
+protected:
+ bool onReadPixels(const SkPixmap&, int, int) override;
+ bool onWritePixels(const SkPixmap&, int, int) override;
+
+private:
+ // We want these unreffed in RenderTargetContext, GrContext order.
+ sk_sp<GrContext> fContext;
+ std::unique_ptr<GrRenderTargetContext> fRenderTargetContext;
+
+ enum Flags {
+ kNeedClear_Flag = 1 << 0, //!< Surface requires an initial clear
+ kIsOpaque_Flag = 1 << 1, //!< Hint from client that rendering to this device will be
+ // opaque even if the config supports alpha.
+ };
+ static bool CheckAlphaTypeAndGetFlags(const SkImageInfo* info, InitContents init,
+ unsigned* flags);
+
+ SkGpuDevice(GrContext*, std::unique_ptr<GrRenderTargetContext>, unsigned flags);
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ SkImageFilterCache* getImageFilterCache() override;
+
+ bool forceConservativeRasterClip() const override { return true; }
+
+ GrClipStackClip clip() const { return GrClipStackClip(&this->cs()); }
+
+ const GrCaps* caps() const;
+
+ /**
+ * Helper functions called by drawBitmapCommon. By the time these are called the SkDraw's
+ * matrix, clip, and the device's render target has already been set on GrContext.
+ */
+
+ // The tileSize and clippedSrcRect will be valid only if true is returned.
+ bool shouldTileImageID(uint32_t imageID,
+ const SkIRect& imageRect,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRectMatrix,
+ const GrSamplerState& params,
+ const SkRect* srcRectPtr,
+ int maxTileSize,
+ int* tileSize,
+ SkIRect* clippedSubset) const;
+ // Just returns the predicate, not the out-tileSize or out-clippedSubset, as they are not
+ // needed at the moment.
+ bool shouldTileImage(const SkImage* image, const SkRect* srcRectPtr,
+ SkCanvas::SrcRectConstraint constraint, SkFilterQuality quality,
+ const SkMatrix& viewMatrix, const SkMatrix& srcToDstRect) const;
+
+ sk_sp<SkSpecialImage> filterTexture(SkSpecialImage*,
+ int left, int top,
+ SkIPoint* offset,
+ const SkImageFilter* filter);
+
+ // Splits bitmap into tiles of tileSize and draws them using separate textures for each tile.
+ void drawTiledBitmap(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstMatrix,
+ const SkRect& srcRect,
+ const SkIRect& clippedSrcRect,
+ const GrSamplerState& params,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint,
+ int tileSize,
+ bool bicubic);
+
+ // Used by drawTiledBitmap to draw each tile.
+ void drawBitmapTile(const SkBitmap&,
+ const SkMatrix& viewMatrix,
+ const SkRect& dstRect,
+ const SkRect& srcRect,
+ const GrSamplerState& samplerState,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint,
+ bool bicubic,
+ bool needsTextureDomain);
+
+ // If not null, dstClip must be contained inside dst and will also respect the edge AA flags.
+ // If 'preViewMatrix' is not null, final CTM will be this->ctm() * preViewMatrix.
+ void drawImageQuad(const SkImage*, const SkRect* src, const SkRect* dst,
+ const SkPoint dstClip[4], GrAA aa, GrQuadAAFlags aaFlags,
+ const SkMatrix* preViewMatrix, const SkPaint&, SkCanvas::SrcRectConstraint);
+
+ // TODO(michaelludwig): This can be removed once drawBitmapRect is removed from SkDevice
+ // so that drawImageQuad is the sole entry point into the draw-single-image op
+ void drawTextureProducer(GrTextureProducer*,
+ const SkRect* srcRect,
+ const SkRect* dstRect,
+ SkCanvas::SrcRectConstraint,
+ const SkMatrix& viewMatrix,
+ const SkPaint&,
+ bool attemptDrawTexture);
+
+ void drawProducerLattice(GrTextureProducer*, std::unique_ptr<SkLatticeIter>, const SkRect& dst,
+ const SkPaint&);
+
+ void drawStrokedLine(const SkPoint pts[2], const SkPaint&);
+
+ void wireframeVertices(SkVertices::VertexMode, int vertexCount, const SkPoint verts[],
+ const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const uint16_t indices[], int indexCount, const SkPaint&);
+
+ static std::unique_ptr<GrRenderTargetContext> MakeRenderTargetContext(GrContext*,
+ SkBudgeted,
+ const SkImageInfo&,
+ int sampleCount,
+ GrSurfaceOrigin,
+ const SkSurfaceProps*,
+ GrMipMapped);
+
+ friend class GrAtlasTextContext;
+ friend class SkSurface_Gpu; // for access to surfaceProps
+ typedef SkClipStackDevice INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp b/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp
new file mode 100644
index 0000000000..20c4976fd4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp
@@ -0,0 +1,608 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/SkGpuDevice.h"
+
+#include "include/core/SkYUVAIndex.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/gpu/GrBitmapTextureMaker.h"
+#include "src/gpu/GrBlurUtils.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrImageTextureMaker.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/GrTextureMaker.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/image/SkImage_Base.h"
+
+namespace {
+
+static inline bool use_shader(bool textureIsAlphaOnly, const SkPaint& paint) {
+ return textureIsAlphaOnly && paint.getShader();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Helper functions for dropping src rect constraint in bilerp mode.
+
+static const SkScalar kColorBleedTolerance = 0.001f;
+
+static bool has_aligned_samples(const SkRect& srcRect, const SkRect& transformedRect) {
+ // detect pixel disalignment
+ if (SkScalarAbs(SkScalarRoundToScalar(transformedRect.left()) - transformedRect.left()) < kColorBleedTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(transformedRect.top()) - transformedRect.top()) < kColorBleedTolerance &&
+ SkScalarAbs(transformedRect.width() - srcRect.width()) < kColorBleedTolerance &&
+ SkScalarAbs(transformedRect.height() - srcRect.height()) < kColorBleedTolerance) {
+ return true;
+ }
+ return false;
+}
+
+static bool may_color_bleed(const SkRect& srcRect,
+ const SkRect& transformedRect,
+ const SkMatrix& m,
+ int numSamples) {
+ // Only gets called if has_aligned_samples returned false.
+ // So we can assume that sampling is axis aligned but not texel aligned.
+ SkASSERT(!has_aligned_samples(srcRect, transformedRect));
+ SkRect innerSrcRect(srcRect), innerTransformedRect, outerTransformedRect(transformedRect);
+ if (numSamples > 1) {
+ innerSrcRect.inset(SK_Scalar1, SK_Scalar1);
+ } else {
+ innerSrcRect.inset(SK_ScalarHalf, SK_ScalarHalf);
+ }
+ m.mapRect(&innerTransformedRect, innerSrcRect);
+
+ // The gap between outerTransformedRect and innerTransformedRect
+ // represents the projection of the source border area, which is
+ // problematic for color bleeding. We must check whether any
+ // destination pixels sample the border area.
+ outerTransformedRect.inset(kColorBleedTolerance, kColorBleedTolerance);
+ innerTransformedRect.outset(kColorBleedTolerance, kColorBleedTolerance);
+ SkIRect outer, inner;
+ outerTransformedRect.round(&outer);
+ innerTransformedRect.round(&inner);
+ // If the inner and outer rects round to the same result, it means the
+ // border does not overlap any pixel centers. Yay!
+ return inner != outer;
+}
+
+static bool can_ignore_bilerp_constraint(const GrTextureProducer& producer,
+ const SkRect& srcRect,
+ const SkMatrix& srcRectToDeviceSpace,
+ int numSamples) {
+ if (srcRectToDeviceSpace.rectStaysRect()) {
+ // sampling is axis-aligned
+ SkRect transformedRect;
+ srcRectToDeviceSpace.mapRect(&transformedRect, srcRect);
+
+ if (has_aligned_samples(srcRect, transformedRect) ||
+ !may_color_bleed(srcRect, transformedRect, srcRectToDeviceSpace, numSamples)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+enum class ImageDrawMode {
+ // Src and dst have been restricted to the image content. May need to clamp, no need to decal.
+ kOptimized,
+ // Src and dst are their original sizes, requires use of a decal instead of plain clamping.
+ // This is used when a dst clip is provided and extends outside of the optimized dst rect.
+ kDecal,
+ // Src or dst are empty, or do not intersect the image content so don't draw anything.
+ kSkip
+};
+
+/**
+ * Optimize the src rect sampling area within an image (sized 'width' x 'height') such that
+ * 'outSrcRect' will be completely contained in the image's bounds. The corresponding rect
+ * to draw will be output to 'outDstRect'. The mapping between src and dst will be cached in
+ * 'srcToDst'. Outputs are not always updated when kSkip is returned.
+ *
+ * If 'origSrcRect' is null, implicitly use the image bounds. If 'origDstRect' is null, use the
+ * original src rect. 'dstClip' should be null when there is no additional clipping.
+ */
+static ImageDrawMode optimize_sample_area(const SkISize& image, const SkRect* origSrcRect,
+ const SkRect* origDstRect, const SkPoint dstClip[4],
+ SkRect* outSrcRect, SkRect* outDstRect,
+ SkMatrix* srcToDst) {
+ SkRect srcBounds = SkRect::MakeIWH(image.fWidth, image.fHeight);
+
+ SkRect src = origSrcRect ? *origSrcRect : srcBounds;
+ SkRect dst = origDstRect ? *origDstRect : src;
+
+ if (src.isEmpty() || dst.isEmpty()) {
+ return ImageDrawMode::kSkip;
+ }
+
+ if (outDstRect) {
+ srcToDst->setRectToRect(src, dst, SkMatrix::kFill_ScaleToFit);
+ } else {
+ srcToDst->setIdentity();
+ }
+
+ if (origSrcRect && !srcBounds.contains(src)) {
+ if (!src.intersect(srcBounds)) {
+ return ImageDrawMode::kSkip;
+ }
+ srcToDst->mapRect(&dst, src);
+
+ // Both src and dst have gotten smaller. If dstClip is provided, confirm it is still
+ // contained in dst, otherwise cannot optimize the sample area and must use a decal instead
+ if (dstClip) {
+ for (int i = 0; i < 4; ++i) {
+ if (!dst.contains(dstClip[i].fX, dstClip[i].fY)) {
+ // Must resort to using a decal mode restricted to the clipped 'src', and
+ // use the original dst rect (filling in src bounds as needed)
+ *outSrcRect = src;
+ *outDstRect = (origDstRect ? *origDstRect
+ : (origSrcRect ? *origSrcRect : srcBounds));
+ return ImageDrawMode::kDecal;
+ }
+ }
+ }
+ }
+
+ // The original src and dst were fully contained in the image, or there was no dst clip to
+ // worry about, or the clip was still contained in the restricted dst rect.
+ *outSrcRect = src;
+ *outDstRect = dst;
+ return ImageDrawMode::kOptimized;
+}
+
+/**
+ * Checks whether the paint is compatible with using GrRenderTargetContext::drawTexture. It is more
+ * efficient than the GrTextureProducer general case.
+ */
+static bool can_use_draw_texture(const SkPaint& paint) {
+ return (!paint.getColorFilter() && !paint.getShader() && !paint.getMaskFilter() &&
+ !paint.getImageFilter() && paint.getFilterQuality() < kMedium_SkFilterQuality);
+}
+
+// Assumes srcRect and dstRect have already been optimized to fit the proxy
+static void draw_texture(GrRenderTargetContext* rtc, const GrClip& clip, const SkMatrix& ctm,
+ const SkPaint& paint, const SkRect& srcRect, const SkRect& dstRect,
+ const SkPoint dstClip[4], GrAA aa, GrQuadAAFlags aaFlags,
+ SkCanvas::SrcRectConstraint constraint, sk_sp<GrTextureProxy> proxy,
+ const GrColorInfo& srcColorInfo) {
+ const GrColorInfo& dstInfo(rtc->colorInfo());
+ auto textureXform =
+ GrColorSpaceXform::Make(srcColorInfo.colorSpace(), srcColorInfo.alphaType(),
+ dstInfo.colorSpace(), kPremul_SkAlphaType);
+ GrSamplerState::Filter filter;
+ switch (paint.getFilterQuality()) {
+ case kNone_SkFilterQuality:
+ filter = GrSamplerState::Filter::kNearest;
+ break;
+ case kLow_SkFilterQuality:
+ filter = GrSamplerState::Filter::kBilerp;
+ break;
+ case kMedium_SkFilterQuality:
+ case kHigh_SkFilterQuality:
+ SK_ABORT("Quality level not allowed.");
+ }
+
+ // Must specify the strict constraint when the proxy is not functionally exact and the src
+ // rect would access pixels outside the proxy's content area without the constraint.
+ if (constraint != SkCanvas::kStrict_SrcRectConstraint &&
+ !GrProxyProvider::IsFunctionallyExact(proxy.get())) {
+ // Conservative estimate of how much a coord could be outset from src rect:
+ // 1/2 pixel for AA and 1/2 pixel for bilerp
+ float buffer = 0.5f * (aa == GrAA::kYes) +
+ 0.5f * (filter == GrSamplerState::Filter::kBilerp);
+ SkRect safeBounds = SkRect::MakeWH(proxy->width(), proxy->height());
+ safeBounds.inset(buffer, buffer);
+ if (!safeBounds.contains(srcRect)) {
+ constraint = SkCanvas::kStrict_SrcRectConstraint;
+ }
+ }
+ SkPMColor4f color;
+ if (GrColorTypeIsAlphaOnly(srcColorInfo.colorType())) {
+ color = SkColor4fPrepForDst(paint.getColor4f(), dstInfo).premul();
+ } else {
+ float paintAlpha = paint.getColor4f().fA;
+ color = { paintAlpha, paintAlpha, paintAlpha, paintAlpha };
+ }
+
+ if (dstClip) {
+ // Get source coords corresponding to dstClip
+ SkPoint srcQuad[4];
+ GrMapRectPoints(dstRect, srcRect, dstClip, srcQuad, 4);
+
+ rtc->drawTextureQuad(clip, std::move(proxy), srcColorInfo.colorType(), filter,
+ paint.getBlendMode(), color, srcQuad, dstClip, aa, aaFlags,
+ constraint == SkCanvas::kStrict_SrcRectConstraint ? &srcRect : nullptr,
+ ctm, std::move(textureXform));
+ } else {
+ rtc->drawTexture(clip, std::move(proxy), srcColorInfo.colorType(), filter,
+ paint.getBlendMode(), color, srcRect, dstRect, aa, aaFlags, constraint,
+ ctm, std::move(textureXform));
+ }
+}
+
+// Assumes srcRect and dstRect have already been optimized to fit the proxy.
+static void draw_texture_producer(GrContext* context, GrRenderTargetContext* rtc,
+ const GrClip& clip, const SkMatrix& ctm,
+ const SkPaint& paint, GrTextureProducer* producer,
+ const SkRect& src, const SkRect& dst, const SkPoint dstClip[4],
+ const SkMatrix& srcToDst, GrAA aa, GrQuadAAFlags aaFlags,
+ SkCanvas::SrcRectConstraint constraint, bool attemptDrawTexture) {
+ if (attemptDrawTexture && can_use_draw_texture(paint)) {
+ // We've done enough checks above to allow us to pass ClampNearest() and not check for
+ // scaling adjustments.
+ auto proxy = producer->refTextureProxyForParams(GrSamplerState::ClampNearest(), nullptr);
+ if (!proxy) {
+ return;
+ }
+
+ draw_texture(rtc, clip, ctm, paint, src, dst, dstClip, aa, aaFlags, constraint,
+ std::move(proxy), producer->colorInfo());
+ return;
+ }
+
+ const SkMaskFilter* mf = paint.getMaskFilter();
+
+ // The shader expects proper local coords, so we can't replace local coords with texture coords
+ // if the shader will be used. If we have a mask filter we will change the underlying geometry
+ // that is rendered.
+ bool canUseTextureCoordsAsLocalCoords = !use_shader(producer->isAlphaOnly(), paint) && !mf;
+
+ // Specifying the texture coords as local coordinates is an attempt to enable more GrDrawOp
+ // combining by not baking anything about the srcRect, dstRect, or ctm, into the texture
+ // FP. In the future this should be an opaque optimization enabled by the combination of
+ // GrDrawOp/GP and FP.
+ if (mf && as_MFB(mf)->hasFragmentProcessor()) {
+ mf = nullptr;
+ }
+ bool doBicubic;
+ GrSamplerState::Filter fm = GrSkFilterQualityToGrFilterMode(
+ producer->width(), producer->height(), paint.getFilterQuality(), ctm, srcToDst,
+ context->priv().options().fSharpenMipmappedTextures, &doBicubic);
+ const GrSamplerState::Filter* filterMode = doBicubic ? nullptr : &fm;
+
+ GrTextureProducer::FilterConstraint constraintMode;
+ if (SkCanvas::kFast_SrcRectConstraint == constraint) {
+ constraintMode = GrTextureAdjuster::kNo_FilterConstraint;
+ } else {
+ constraintMode = GrTextureAdjuster::kYes_FilterConstraint;
+ }
+
+ // If we have to outset for AA then we will generate texture coords outside the src rect. The
+ // same happens for any mask filter that extends the bounds rendered in the dst.
+ // This is conservative as a mask filter does not have to expand the bounds rendered.
+ bool coordsAllInsideSrcRect = aaFlags == GrQuadAAFlags::kNone && !mf;
+
+ // Check for optimization to drop the src rect constraint when on bilerp.
+ if (filterMode && GrSamplerState::Filter::kBilerp == *filterMode &&
+ GrTextureAdjuster::kYes_FilterConstraint == constraintMode && coordsAllInsideSrcRect &&
+ !producer->hasMixedResolutions()) {
+ SkMatrix combinedMatrix;
+ combinedMatrix.setConcat(ctm, srcToDst);
+ if (can_ignore_bilerp_constraint(*producer, src, combinedMatrix, rtc->numSamples())) {
+ constraintMode = GrTextureAdjuster::kNo_FilterConstraint;
+ }
+ }
+
+ SkMatrix textureMatrix;
+ if (canUseTextureCoordsAsLocalCoords) {
+ textureMatrix = SkMatrix::I();
+ } else {
+ if (!srcToDst.invert(&textureMatrix)) {
+ return;
+ }
+ }
+ auto fp = producer->createFragmentProcessor(textureMatrix, src, constraintMode,
+ coordsAllInsideSrcRect, filterMode);
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), producer->colorSpace(), producer->alphaType(),
+ rtc->colorInfo().colorSpace());
+ if (!fp) {
+ return;
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(context, rtc->colorInfo(), paint, ctm, std::move(fp),
+ producer->isAlphaOnly(), &grPaint)) {
+ return;
+ }
+
+ if (!mf) {
+ // Can draw the image directly (any mask filter on the paint was converted to an FP already)
+ if (dstClip) {
+ SkPoint srcClipPoints[4];
+ SkPoint* srcClip = nullptr;
+ if (canUseTextureCoordsAsLocalCoords) {
+ // Calculate texture coordinates that match the dst clip
+ GrMapRectPoints(dst, src, dstClip, srcClipPoints, 4);
+ srcClip = srcClipPoints;
+ }
+ rtc->fillQuadWithEdgeAA(clip, std::move(grPaint), aa, aaFlags, ctm, dstClip, srcClip);
+ } else {
+ // Provide explicit texture coords when possible, otherwise rely on texture matrix
+ rtc->fillRectWithEdgeAA(clip, std::move(grPaint), aa, aaFlags, ctm, dst,
+ canUseTextureCoordsAsLocalCoords ? &src : nullptr);
+ }
+ } else {
+ // Must draw the mask filter as a GrShape. For now, this loses the per-edge AA information
+ // since it always draws with AA, but that is should not be noticeable since the mask filter
+ // is probably a blur.
+ GrShape shape;
+ if (dstClip) {
+ // Represent it as an SkPath formed from the dstClip
+ SkPath path;
+ path.addPoly(dstClip, 4, true);
+ shape = GrShape(path);
+ } else {
+ shape = GrShape(dst);
+ }
+
+ GrBlurUtils::drawShapeWithMaskFilter(
+ context, rtc, clip, shape, std::move(grPaint), ctm, mf);
+ }
+}
+
+} // anonymous namespace
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawImageQuad(const SkImage* image, const SkRect* srcRect, const SkRect* dstRect,
+ const SkPoint dstClip[4], GrAA aa, GrQuadAAFlags aaFlags,
+ const SkMatrix* preViewMatrix, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkRect src;
+ SkRect dst;
+ SkMatrix srcToDst;
+ ImageDrawMode mode = optimize_sample_area(SkISize::Make(image->width(), image->height()),
+ srcRect, dstRect, dstClip, &src, &dst, &srcToDst);
+ if (mode == ImageDrawMode::kSkip) {
+ return;
+ }
+
+ if (src.contains(image->bounds())) {
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+ // Depending on the nature of image, it can flow through more or less optimal pipelines
+ bool useDecal = mode == ImageDrawMode::kDecal;
+ bool attemptDrawTexture = !useDecal; // rtc->drawTexture() only clamps
+
+ // Get final CTM matrix
+ SkMatrix ctm = this->ctm();
+ if (preViewMatrix) {
+ ctm.preConcat(*preViewMatrix);
+ }
+
+ // YUVA images can be stored in multiple images with different plane resolutions, so this
+ // uses an effect to combine them dynamically on the GPU. This is done before requesting a
+ // pinned texture proxy because YUV images force-flatten to RGBA in that scenario.
+ if (as_IB(image)->isYUVA()) {
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", false);
+ LogDrawScaleFactor(ctm, srcToDst, paint.getFilterQuality());
+
+ GrYUVAImageTextureMaker maker(fContext.get(), image, useDecal);
+ draw_texture_producer(fContext.get(), fRenderTargetContext.get(), this->clip(), ctm,
+ paint, &maker, src, dst, dstClip, srcToDst, aa, aaFlags, constraint,
+ /* attempt draw texture */ false);
+ return;
+ }
+
+ // Pinned texture proxies can be rendered directly as textures, or with relatively simple
+ // adjustments applied to the image content (scaling, mipmaps, color space, etc.)
+ uint32_t pinnedUniqueID;
+ if (sk_sp<GrTextureProxy> proxy = as_IB(image)->refPinnedTextureProxy(this->context(),
+ &pinnedUniqueID)) {
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", false);
+ LogDrawScaleFactor(ctm, srcToDst, paint.getFilterQuality());
+
+ GrColorInfo colorInfo(image->imageInfo().colorInfo());
+
+ if (attemptDrawTexture && can_use_draw_texture(paint)) {
+ draw_texture(fRenderTargetContext.get(), this->clip(), ctm, paint, src, dst,
+ dstClip, aa, aaFlags, constraint, std::move(proxy), colorInfo);
+ return;
+ }
+ GrTextureAdjuster adjuster(fContext.get(), std::move(proxy), colorInfo, pinnedUniqueID,
+ useDecal);
+ draw_texture_producer(fContext.get(), fRenderTargetContext.get(), this->clip(), ctm,
+ paint, &adjuster, src, dst, dstClip, srcToDst, aa, aaFlags,
+ constraint, /* attempt draw_texture */ false);
+ return;
+ }
+
+ // Next up, try tiling the image
+ // TODO (michaelludwig): Implement this with per-edge AA flags to handle seaming properly
+ // instead of going through drawBitmapRect (which will be removed from SkDevice in the future)
+ SkBitmap bm;
+ if (this->shouldTileImage(image, &src, constraint, paint.getFilterQuality(), ctm, srcToDst)) {
+ // only support tiling as bitmap at the moment, so force raster-version
+ if (!as_IB(image)->getROPixels(&bm)) {
+ return;
+ }
+ this->drawBitmapRect(bm, &src, dst, paint, constraint);
+ return;
+ }
+
+ // This is the funnel for all non-tiled bitmap/image draw calls. Log a histogram entry.
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", false);
+ LogDrawScaleFactor(ctm, srcToDst, paint.getFilterQuality());
+
+ // Lazily generated images must get drawn as a texture producer that handles the final
+ // texture creation.
+ if (image->isLazyGenerated()) {
+ GrImageTextureMaker maker(fContext.get(), image, SkImage::kAllow_CachingHint, useDecal);
+ draw_texture_producer(fContext.get(), fRenderTargetContext.get(), this->clip(), ctm,
+ paint, &maker, src, dst, dstClip, srcToDst, aa, aaFlags, constraint,
+ attemptDrawTexture);
+ return;
+ }
+ if (as_IB(image)->getROPixels(&bm)) {
+ GrBitmapTextureMaker maker(fContext.get(), bm, useDecal);
+ draw_texture_producer(fContext.get(), fRenderTargetContext.get(), this->clip(), ctm,
+ paint, &maker, src, dst, dstClip, srcToDst, aa, aaFlags, constraint,
+ attemptDrawTexture);
+ }
+
+ // Otherwise don't know how to draw it
+}
+
+void SkGpuDevice::drawEdgeAAImageSet(const SkCanvas::ImageSetEntry set[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ SkASSERT(count > 0);
+ if (!can_use_draw_texture(paint)) {
+ // Send every entry through drawImageQuad() to handle the more complicated paint
+ int dstClipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ // Only no clip or quad clip are supported
+ SkASSERT(!set[i].fHasClip || dstClips);
+ SkASSERT(set[i].fMatrixIndex < 0 || preViewMatrices);
+
+ SkTCopyOnFirstWrite<SkPaint> entryPaint(paint);
+ if (set[i].fAlpha != 1.f) {
+ auto paintAlpha = paint.getAlphaf();
+ entryPaint.writable()->setAlphaf(paintAlpha * set[i].fAlpha);
+ }
+ // Always send GrAA::kYes to preserve seaming across tiling in MSAA
+ this->drawImageQuad(
+ set[i].fImage.get(), &set[i].fSrcRect, &set[i].fDstRect,
+ set[i].fHasClip ? dstClips + dstClipIndex : nullptr, GrAA::kYes,
+ SkToGrQuadAAFlags(set[i].fAAFlags),
+ set[i].fMatrixIndex < 0 ? nullptr : preViewMatrices + set[i].fMatrixIndex,
+ *entryPaint, constraint);
+ dstClipIndex += 4 * set[i].fHasClip;
+ }
+ return;
+ }
+
+ GrSamplerState::Filter filter = kNone_SkFilterQuality == paint.getFilterQuality() ?
+ GrSamplerState::Filter::kNearest : GrSamplerState::Filter::kBilerp;
+ SkBlendMode mode = paint.getBlendMode();
+
+ SkAutoTArray<GrRenderTargetContext::TextureSetEntry> textures(count);
+ // We accumulate compatible proxies until we find an an incompatible one or reach the end and
+ // issue the accumulated 'n' draws starting at 'base'.
+ int base = 0, n = 0;
+ auto draw = [&] {
+ if (n > 0) {
+ auto textureXform = GrColorSpaceXform::Make(
+ set[base].fImage->colorSpace(), set[base].fImage->alphaType(),
+ fRenderTargetContext->colorInfo().colorSpace(), kPremul_SkAlphaType);
+ fRenderTargetContext->drawTextureSet(this->clip(), textures.get() + base, n,
+ filter, mode, GrAA::kYes, constraint, this->ctm(),
+ std::move(textureXform));
+ }
+ };
+ int dstClipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(!set[i].fHasClip || dstClips);
+ SkASSERT(set[i].fMatrixIndex < 0 || preViewMatrices);
+
+ // Manage the dst clip pointer tracking before any continues are used so we don't lose
+ // our place in the dstClips array.
+ const SkPoint* clip = set[i].fHasClip ? dstClips + dstClipIndex : nullptr;
+ dstClipIndex += 4 * set[i].fHasClip;
+
+ // The default SkBaseDevice implementation is based on drawImageRect which does not allow
+ // non-sorted src rects. TODO: Decide this is OK or make sure we handle it.
+ if (!set[i].fSrcRect.isSorted()) {
+ draw();
+ base = i + 1;
+ n = 0;
+ continue;
+ }
+
+ sk_sp<GrTextureProxy> proxy;
+ const SkImage_Base* image = as_IB(set[i].fImage.get());
+ // Extract proxy from image, but skip YUV images so they get processed through
+ // drawImageQuad and the proper effect to dynamically sample their planes.
+ if (!image->isYUVA()) {
+ uint32_t uniqueID;
+ proxy = image->refPinnedTextureProxy(this->context(), &uniqueID);
+ if (!proxy) {
+ proxy = image->asTextureProxyRef(this->context(), GrSamplerState::ClampBilerp(),
+ nullptr);
+ }
+ }
+
+ if (!proxy) {
+ // This image can't go through the texture op, send through general image pipeline
+ // after flushing current batch.
+ draw();
+ base = i + 1;
+ n = 0;
+ SkTCopyOnFirstWrite<SkPaint> entryPaint(paint);
+ if (set[i].fAlpha != 1.f) {
+ auto paintAlpha = paint.getAlphaf();
+ entryPaint.writable()->setAlphaf(paintAlpha * set[i].fAlpha);
+ }
+ this->drawImageQuad(
+ image, &set[i].fSrcRect, &set[i].fDstRect, clip, GrAA::kYes,
+ SkToGrQuadAAFlags(set[i].fAAFlags),
+ set[i].fMatrixIndex < 0 ? nullptr : preViewMatrices + set[i].fMatrixIndex,
+ *entryPaint, constraint);
+ continue;
+ }
+
+ textures[i].fProxy = std::move(proxy);
+ textures[i].fSrcColorType = SkColorTypeToGrColorType(image->colorType());
+ textures[i].fSrcRect = set[i].fSrcRect;
+ textures[i].fDstRect = set[i].fDstRect;
+ textures[i].fDstClipQuad = clip;
+ textures[i].fPreViewMatrix =
+ set[i].fMatrixIndex < 0 ? nullptr : preViewMatrices + set[i].fMatrixIndex;
+ textures[i].fAlpha = set[i].fAlpha * paint.getAlphaf();
+ textures[i].fAAFlags = SkToGrQuadAAFlags(set[i].fAAFlags);
+
+ if (n > 0 &&
+ (!GrTextureProxy::ProxiesAreCompatibleAsDynamicState(textures[i].fProxy.get(),
+ textures[base].fProxy.get()) ||
+ set[i].fImage->alphaType() != set[base].fImage->alphaType() ||
+ !SkColorSpace::Equals(set[i].fImage->colorSpace(), set[base].fImage->colorSpace()))) {
+ draw();
+ base = i;
+ n = 1;
+ } else {
+ ++n;
+ }
+ }
+ draw();
+}
+
+// TODO (michaelludwig) - to be removed when drawBitmapRect doesn't need it anymore
+void SkGpuDevice::drawTextureProducer(GrTextureProducer* producer,
+ const SkRect* srcRect,
+ const SkRect* dstRect,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ const SkPaint& paint,
+ bool attemptDrawTexture) {
+ // The texture refactor split the old logic of drawTextureProducer into the beginning of
+ // drawImageQuad() and into the static draw_texture_producer. Replicate necessary logic that
+ // drawImageQuad() handles.
+ SkRect src;
+ SkRect dst;
+ SkMatrix srcToDst;
+ ImageDrawMode mode = optimize_sample_area(SkISize::Make(producer->width(), producer->height()),
+ srcRect, dstRect, nullptr, &src, &dst, &srcToDst);
+ if (mode == ImageDrawMode::kSkip) {
+ return;
+ }
+ // There's no dstClip to worry about and the producer is already made so we wouldn't be able
+ // to tell it to use decals if we had to
+ SkASSERT(mode != ImageDrawMode::kDecal);
+
+ draw_texture_producer(fContext.get(), fRenderTargetContext.get(), this->clip(), viewMatrix,
+ paint, producer, src, dst, /* clip */ nullptr, srcToDst,
+ GrAA(paint.isAntiAlias()),
+ paint.isAntiAlias() ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone,
+ constraint, attemptDrawTexture);
+}
diff --git a/gfx/skia/skia/src/gpu/SkGr.cpp b/gfx/skia/skia/src/gpu/SkGr.cpp
new file mode 100644
index 0000000000..bdb3c3c95d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGr.cpp
@@ -0,0 +1,609 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/SkGr.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPixelRef.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrBitmapTextureMaker.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/effects/generated/GrSaturateProcessor.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkShaderBase.h"
+
+GR_FP_SRC_STRING SKSL_DITHER_SRC = R"(
+// This controls the range of values added to color channels
+in int rangeType;
+
+void main(float x, float y, inout half4 color) {
+ half value;
+ half range;
+ @switch (rangeType) {
+ case 0:
+ range = 1.0 / 255.0;
+ break;
+ case 1:
+ range = 1.0 / 63.0;
+ break;
+ default:
+ // Experimentally this looks better than the expected value of 1/15.
+ range = 1.0 / 15.0;
+ break;
+ }
+ @if (sk_Caps.integerSupport) {
+ // This ordered-dither code is lifted from the cpu backend.
+ uint x = uint(x);
+ uint y = uint(y);
+ uint m = (y & 1) << 5 | (x & 1) << 4 |
+ (y & 2) << 2 | (x & 2) << 1 |
+ (y & 4) >> 1 | (x & 4) >> 2;
+ value = half(m) * 1.0 / 64.0 - 63.0 / 128.0;
+ } else {
+ // Simulate the integer effect used above using step/mod. For speed, simulates a 4x4
+ // dither pattern rather than an 8x8 one.
+ half4 modValues = mod(half4(half(x), half(y), half(x), half(y)), half4(2.0, 2.0, 4.0, 4.0));
+ half4 stepValues = step(modValues, half4(1.0, 1.0, 2.0, 2.0));
+ value = dot(stepValues, half4(8.0 / 16.0, 4.0 / 16.0, 2.0 / 16.0, 1.0 / 16.0)) - 15.0 / 32.0;
+ }
+ // For each color channel, add the random offset to the channel value and then clamp
+ // between 0 and alpha to keep the color premultiplied.
+ color = half4(clamp(color.rgb + value * range, 0.0, color.a), color.a);
+}
+)";
+
+GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo& info) {
+ GrSurfaceDesc desc;
+ desc.fWidth = info.width();
+ desc.fHeight = info.height();
+ desc.fConfig = SkImageInfo2GrPixelConfig(info);
+ return desc;
+}
+
+void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds) {
+ SkASSERT(key);
+ SkASSERT(imageID);
+ SkASSERT(!imageBounds.isEmpty());
+ static const GrUniqueKey::Domain kImageIDDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kImageIDDomain, 5, "Image");
+ builder[0] = imageID;
+ builder[1] = imageBounds.fLeft;
+ builder[2] = imageBounds.fTop;
+ builder[3] = imageBounds.fRight;
+ builder[4] = imageBounds.fBottom;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrInstallBitmapUniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID,
+ SkPixelRef* pixelRef) {
+ class Invalidator : public SkPixelRef::GenIDChangeListener {
+ public:
+ explicit Invalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
+ : fMsg(key, contextUniqueID) {}
+
+ private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override { SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg); }
+ };
+
+ pixelRef->addGenIDChangeListener(new Invalidator(key, contextUniqueID));
+}
+
+sk_sp<GrTextureProxy> GrCopyBaseMipMapToTextureProxy(GrRecordingContext* ctx,
+ GrTextureProxy* baseProxy,
+ GrColorType srcColorType) {
+ SkASSERT(baseProxy);
+
+ if (!ctx->priv().caps()->isFormatCopyable(baseProxy->backendFormat())) {
+ return nullptr;
+ }
+ return GrSurfaceProxy::Copy(ctx, baseProxy, srcColorType, GrMipMapped::kYes,
+ SkBackingFit::kExact, SkBudgeted::kYes);
+}
+
+sk_sp<GrTextureProxy> GrRefCachedBitmapTextureProxy(GrRecordingContext* ctx,
+ const SkBitmap& bitmap,
+ const GrSamplerState& params,
+ SkScalar scaleAdjust[2]) {
+ return GrBitmapTextureMaker(ctx, bitmap).refTextureProxyForParams(params, scaleAdjust);
+}
+
+sk_sp<GrTextureProxy> GrMakeCachedBitmapProxy(GrProxyProvider* proxyProvider,
+ const SkBitmap& bitmap,
+ SkBackingFit fit) {
+ if (!bitmap.peekPixels(nullptr)) {
+ return nullptr;
+ }
+
+ // In non-ddl we will always instantiate right away. Thus we never want to copy the SkBitmap
+ // even if its mutable. In ddl, if the bitmap is mutable then we must make a copy since the
+ // upload of the data to the gpu can happen at anytime and the bitmap may change by then.
+ SkCopyPixelsMode cpyMode = proxyProvider->renderingDirectly() ? kNever_SkCopyPixelsMode
+ : kIfMutable_SkCopyPixelsMode;
+ sk_sp<SkImage> image = SkMakeImageFromRasterBitmap(bitmap, cpyMode);
+
+ if (!image) {
+ return nullptr;
+ }
+
+ return GrMakeCachedImageProxy(proxyProvider, std::move(image), fit);
+}
+
+static void create_unique_key_for_image(const SkImage* image, GrUniqueKey* result) {
+ if (!image) {
+ result->reset(); // will be invalid
+ return;
+ }
+
+ if (const SkBitmap* bm = as_IB(image)->onPeekBitmap()) {
+ if (!bm->isVolatile()) {
+ SkIPoint origin = bm->pixelRefOrigin();
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, bm->width(), bm->height());
+ GrMakeKeyFromImageID(result, bm->getGenerationID(), subset);
+ }
+ return;
+ }
+
+ GrMakeKeyFromImageID(result, image->uniqueID(), image->bounds());
+}
+
+sk_sp<GrTextureProxy> GrMakeCachedImageProxy(GrProxyProvider* proxyProvider,
+ sk_sp<SkImage> srcImage,
+ SkBackingFit fit) {
+ sk_sp<GrTextureProxy> proxy;
+ GrUniqueKey originalKey;
+
+ create_unique_key_for_image(srcImage.get(), &originalKey);
+
+ if (originalKey.isValid()) {
+ proxy = proxyProvider->findOrCreateProxyByUniqueKey(
+ originalKey, SkColorTypeToGrColorType(srcImage->colorType()),
+ kTopLeft_GrSurfaceOrigin);
+ }
+ if (!proxy) {
+ proxy = proxyProvider->createTextureProxy(srcImage, 1, SkBudgeted::kYes, fit);
+ if (proxy && originalKey.isValid()) {
+ proxyProvider->assignUniqueKeyToProxy(originalKey, proxy.get());
+ const SkBitmap* bm = as_IB(srcImage.get())->onPeekBitmap();
+ // When recording DDLs we do not want to install change listeners because doing
+ // so isn't threadsafe.
+ if (bm && proxyProvider->renderingDirectly()) {
+ GrInstallBitmapUniqueKeyInvalidator(originalKey, proxyProvider->contextID(),
+ bm->pixelRef());
+ }
+ }
+ }
+
+ return proxy;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPMColor4f SkColorToPMColor4f(SkColor c, const GrColorInfo& colorInfo) {
+ SkColor4f color = SkColor4f::FromColor(c);
+ if (auto* xform = colorInfo.colorSpaceXformFromSRGB()) {
+ color = xform->apply(color);
+ }
+ return color.premul();
+}
+
+SkColor4f SkColor4fPrepForDst(SkColor4f color, const GrColorInfo& colorInfo) {
+ if (auto* xform = colorInfo.colorSpaceXformFromSRGB()) {
+ color = xform->apply(color);
+ }
+ return color;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPixelConfig SkColorType2GrPixelConfig(const SkColorType type) {
+ switch (type) {
+ case kUnknown_SkColorType:
+ return kUnknown_GrPixelConfig;
+ case kAlpha_8_SkColorType:
+ return kAlpha_8_GrPixelConfig;
+ case kRGB_565_SkColorType:
+ return kRGB_565_GrPixelConfig;
+ case kARGB_4444_SkColorType:
+ return kRGBA_4444_GrPixelConfig;
+ case kRGBA_8888_SkColorType:
+ return kRGBA_8888_GrPixelConfig;
+ case kRGB_888x_SkColorType:
+ return kRGB_888_GrPixelConfig;
+ case kBGRA_8888_SkColorType:
+ return kBGRA_8888_GrPixelConfig;
+ case kRGBA_1010102_SkColorType:
+ return kRGBA_1010102_GrPixelConfig;
+ case kRGB_101010x_SkColorType:
+ return kUnknown_GrPixelConfig;
+ case kGray_8_SkColorType:
+ return kGray_8_GrPixelConfig;
+ case kRGBA_F16Norm_SkColorType:
+ return kRGBA_half_Clamped_GrPixelConfig;
+ case kRGBA_F16_SkColorType:
+ return kRGBA_half_GrPixelConfig;
+ case kRGBA_F32_SkColorType:
+ return kUnknown_GrPixelConfig;
+ case kR8G8_unorm_SkColorType:
+ return kRG_88_GrPixelConfig;
+ case kR16G16_unorm_SkColorType:
+ return kRG_1616_GrPixelConfig;
+ case kA16_unorm_SkColorType:
+ return kAlpha_16_GrPixelConfig;
+ case kA16_float_SkColorType:
+ return kAlpha_half_GrPixelConfig;
+ case kR16G16_float_SkColorType:
+ return kRG_half_GrPixelConfig;
+ case kR16G16B16A16_unorm_SkColorType:
+ return kRGBA_16161616_GrPixelConfig;
+ }
+ SkUNREACHABLE;
+}
+
+GrPixelConfig SkImageInfo2GrPixelConfig(const SkImageInfo& info) {
+ return SkColorType2GrPixelConfig(info.colorType());
+}
+
+bool GrPixelConfigToColorType(GrPixelConfig config, SkColorType* ctOut) {
+ SkColorType ct = GrColorTypeToSkColorType(GrPixelConfigToColorType(config));
+ if (kUnknown_SkColorType != ct) {
+ if (ctOut) {
+ *ctOut = ct;
+ }
+ return true;
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline bool blend_requires_shader(const SkBlendMode mode) {
+ return SkBlendMode::kDst != mode;
+}
+
+#ifndef SK_IGNORE_GPU_DITHER
+static inline int32_t dither_range_type_for_config(GrColorType dstColorType) {
+ switch (dstColorType) {
+ case GrColorType::kGray_8:
+ case GrColorType::kRGBA_8888:
+ case GrColorType::kRGB_888x:
+ case GrColorType::kRG_88:
+ case GrColorType::kBGRA_8888:
+ case GrColorType::kRG_1616:
+ case GrColorType::kRGBA_16161616:
+ case GrColorType::kRG_F16:
+ return 0;
+ case GrColorType::kBGR_565:
+ return 1;
+ case GrColorType::kABGR_4444:
+ return 2;
+ case GrColorType::kUnknown:
+ case GrColorType::kRGBA_8888_SRGB:
+ case GrColorType::kRGBA_1010102:
+ case GrColorType::kAlpha_F16:
+ case GrColorType::kRGBA_F32:
+ case GrColorType::kRGBA_F16:
+ case GrColorType::kRGBA_F16_Clamped:
+ case GrColorType::kAlpha_8:
+ case GrColorType::kAlpha_8xxx:
+ case GrColorType::kAlpha_16:
+ case GrColorType::kAlpha_F32xxx:
+ case GrColorType::kGray_8xxx:
+ return -1;
+ }
+ SkUNREACHABLE;
+}
+#endif
+
+static inline bool skpaint_to_grpaint_impl(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ std::unique_ptr<GrFragmentProcessor>* shaderProcessor,
+ SkBlendMode* primColorMode,
+ GrPaint* grPaint) {
+ // Convert SkPaint color to 4f format in the destination color space
+ SkColor4f origColor = SkColor4fPrepForDst(skPaint.getColor4f(), dstColorInfo);
+
+ GrFPArgs fpArgs(context, &viewM, skPaint.getFilterQuality(), &dstColorInfo);
+
+ // Setup the initial color considering the shader, the SkPaint color, and the presence or not
+ // of per-vertex colors.
+ std::unique_ptr<GrFragmentProcessor> shaderFP;
+ if (!primColorMode || blend_requires_shader(*primColorMode)) {
+ fpArgs.fInputColorIsOpaque = origColor.isOpaque();
+ if (shaderProcessor) {
+ shaderFP = std::move(*shaderProcessor);
+ } else if (const auto* shader = as_SB(skPaint.getShader())) {
+ shaderFP = shader->asFragmentProcessor(fpArgs);
+ if (!shaderFP) {
+ return false;
+ }
+ }
+ }
+
+ // Set this in below cases if the output of the shader/paint-color/paint-alpha/primXfermode is
+ // a known constant value. In that case we can simply apply a color filter during this
+ // conversion without converting the color filter to a GrFragmentProcessor.
+ bool applyColorFilterToPaintColor = false;
+ if (shaderFP) {
+ if (primColorMode) {
+ // There is a blend between the primitive color and the shader color. The shader sees
+ // the opaque paint color. The shader's output is blended using the provided mode by
+ // the primitive color. The blended color is then modulated by the paint's alpha.
+
+ // The geometry processor will insert the primitive color to start the color chain, so
+ // the GrPaint color will be ignored.
+
+ SkPMColor4f shaderInput = origColor.makeOpaque().premul();
+ shaderFP = GrFragmentProcessor::OverrideInput(std::move(shaderFP), shaderInput);
+ shaderFP = GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(shaderFP),
+ *primColorMode);
+
+ // The above may return null if compose results in a pass through of the prim color.
+ if (shaderFP) {
+ grPaint->addColorFragmentProcessor(std::move(shaderFP));
+ }
+
+ // We can ignore origColor here - alpha is unchanged by gamma
+ float paintAlpha = skPaint.getColor4f().fA;
+ if (1.0f != paintAlpha) {
+ // No gamut conversion - paintAlpha is a (linear) alpha value, splatted to all
+ // color channels. It's value should be treated as the same in ANY color space.
+ grPaint->addColorFragmentProcessor(GrConstColorProcessor::Make(
+ { paintAlpha, paintAlpha, paintAlpha, paintAlpha },
+ GrConstColorProcessor::InputMode::kModulateRGBA));
+ }
+ } else {
+ // The shader's FP sees the paint *unpremul* color
+ SkPMColor4f origColorAsPM = { origColor.fR, origColor.fG, origColor.fB, origColor.fA };
+ grPaint->setColor4f(origColorAsPM);
+ grPaint->addColorFragmentProcessor(std::move(shaderFP));
+ }
+ } else {
+ if (primColorMode) {
+ // There is a blend between the primitive color and the paint color. The blend considers
+ // the opaque paint color. The paint's alpha is applied to the post-blended color.
+ SkPMColor4f opaqueColor = origColor.makeOpaque().premul();
+ auto processor = GrConstColorProcessor::Make(opaqueColor,
+ GrConstColorProcessor::InputMode::kIgnore);
+ processor = GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(processor),
+ *primColorMode);
+ if (processor) {
+ grPaint->addColorFragmentProcessor(std::move(processor));
+ }
+
+ grPaint->setColor4f(opaqueColor);
+
+ // We can ignore origColor here - alpha is unchanged by gamma
+ float paintAlpha = skPaint.getColor4f().fA;
+ if (1.0f != paintAlpha) {
+ // No gamut conversion - paintAlpha is a (linear) alpha value, splatted to all
+ // color channels. It's value should be treated as the same in ANY color space.
+ grPaint->addColorFragmentProcessor(GrConstColorProcessor::Make(
+ { paintAlpha, paintAlpha, paintAlpha, paintAlpha },
+ GrConstColorProcessor::InputMode::kModulateRGBA));
+ }
+ } else {
+ // No shader, no primitive color.
+ grPaint->setColor4f(origColor.premul());
+ applyColorFilterToPaintColor = true;
+ }
+ }
+
+ SkColorFilter* colorFilter = skPaint.getColorFilter();
+ if (colorFilter) {
+ if (applyColorFilterToPaintColor) {
+ SkColorSpace* dstCS = dstColorInfo.colorSpace();
+ grPaint->setColor4f(colorFilter->filterColor4f(origColor, dstCS, dstCS).premul());
+ } else {
+ auto cfFP = colorFilter->asFragmentProcessor(context, dstColorInfo);
+ if (cfFP) {
+ grPaint->addColorFragmentProcessor(std::move(cfFP));
+ } else {
+ return false;
+ }
+ }
+ }
+
+ SkMaskFilterBase* maskFilter = as_MFB(skPaint.getMaskFilter());
+ if (maskFilter) {
+ // We may have set this before passing to the SkShader.
+ fpArgs.fInputColorIsOpaque = false;
+ if (auto mfFP = maskFilter->asFragmentProcessor(fpArgs)) {
+ grPaint->addCoverageFragmentProcessor(std::move(mfFP));
+ }
+ }
+
+ // When the xfermode is null on the SkPaint (meaning kSrcOver) we need the XPFactory field on
+ // the GrPaint to also be null (also kSrcOver).
+ SkASSERT(!grPaint->getXPFactory());
+ if (!skPaint.isSrcOver()) {
+ grPaint->setXPFactory(SkBlendMode_AsXPFactory(skPaint.getBlendMode()));
+ }
+
+#ifndef SK_IGNORE_GPU_DITHER
+ // Conservative default, in case GrPixelConfigToColorType() fails.
+ GrColorType ct = dstColorInfo.colorType();
+ if (SkPaintPriv::ShouldDither(skPaint, GrColorTypeToSkColorType(ct)) &&
+ grPaint->numColorFragmentProcessors() > 0) {
+ int32_t ditherRange = dither_range_type_for_config(ct);
+ if (ditherRange >= 0) {
+ static int ditherIndex = GrSkSLFP::NewIndex();
+ auto ditherFP = GrSkSLFP::Make(context, ditherIndex, "Dither", SKSL_DITHER_SRC,
+ &ditherRange, sizeof(ditherRange));
+ if (ditherFP) {
+ grPaint->addColorFragmentProcessor(std::move(ditherFP));
+ }
+ }
+ }
+#endif
+ if (GrColorTypeClampType(dstColorInfo.colorType()) == GrClampType::kManual) {
+ if (grPaint->numColorFragmentProcessors()) {
+ grPaint->addColorFragmentProcessor(GrSaturateProcessor::Make());
+ } else {
+ auto color = grPaint->getColor4f();
+ grPaint->setColor4f({SkTPin(color.fR, 0.f, 1.f),
+ SkTPin(color.fG, 0.f, 1.f),
+ SkTPin(color.fB, 0.f, 1.f),
+ SkTPin(color.fA, 0.f, 1.f)});
+ }
+ }
+ return true;
+}
+
+bool SkPaintToGrPaint(GrRecordingContext* context, const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint, const SkMatrix& viewM, GrPaint* grPaint) {
+ return skpaint_to_grpaint_impl(context, dstColorInfo, skPaint, viewM, nullptr, nullptr,
+ grPaint);
+}
+
+/** Replaces the SkShader (if any) on skPaint with the passed in GrFragmentProcessor. */
+bool SkPaintToGrPaintReplaceShader(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ std::unique_ptr<GrFragmentProcessor> shaderFP,
+ GrPaint* grPaint) {
+ if (!shaderFP) {
+ return false;
+ }
+ return skpaint_to_grpaint_impl(context, dstColorInfo, skPaint, SkMatrix::I(), &shaderFP,
+ nullptr, grPaint);
+}
+
+/** Ignores the SkShader (if any) on skPaint. */
+bool SkPaintToGrPaintNoShader(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ GrPaint* grPaint) {
+ // Use a ptr to a nullptr to to indicate that the SkShader is ignored and not replaced.
+ std::unique_ptr<GrFragmentProcessor> nullShaderFP(nullptr);
+ return skpaint_to_grpaint_impl(context, dstColorInfo, skPaint, SkMatrix::I(), &nullShaderFP,
+ nullptr, grPaint);
+}
+
+/** Blends the SkPaint's shader (or color if no shader) with a per-primitive color which must
+be setup as a vertex attribute using the specified SkBlendMode. */
+bool SkPaintToGrPaintWithXfermode(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ SkBlendMode primColorMode,
+ GrPaint* grPaint) {
+ return skpaint_to_grpaint_impl(context, dstColorInfo, skPaint, viewM, nullptr, &primColorMode,
+ grPaint);
+}
+
+bool SkPaintToGrPaintWithTexture(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& paint,
+ const SkMatrix& viewM,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ bool textureIsAlphaOnly,
+ GrPaint* grPaint) {
+ std::unique_ptr<GrFragmentProcessor> shaderFP;
+ if (textureIsAlphaOnly) {
+ if (const auto* shader = as_SB(paint.getShader())) {
+ shaderFP = shader->asFragmentProcessor(
+ GrFPArgs(context, &viewM, paint.getFilterQuality(), &dstColorInfo));
+ if (!shaderFP) {
+ return false;
+ }
+ std::unique_ptr<GrFragmentProcessor> fpSeries[] = { std::move(shaderFP), std::move(fp) };
+ shaderFP = GrFragmentProcessor::RunInSeries(fpSeries, 2);
+ } else {
+ shaderFP = GrFragmentProcessor::MakeInputPremulAndMulByOutput(std::move(fp));
+ }
+ } else {
+ if (paint.getColor4f().isOpaque()) {
+ shaderFP = GrFragmentProcessor::OverrideInput(std::move(fp), SK_PMColor4fWHITE, false);
+ } else {
+ shaderFP = GrFragmentProcessor::MulChildByInputAlpha(std::move(fp));
+ }
+ }
+
+ return SkPaintToGrPaintReplaceShader(context, dstColorInfo, paint, std::move(shaderFP),
+ grPaint);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrSamplerState::Filter GrSkFilterQualityToGrFilterMode(int imageWidth, int imageHeight,
+ SkFilterQuality paintFilterQuality,
+ const SkMatrix& viewM,
+ const SkMatrix& localM,
+ bool sharpenMipmappedTextures,
+ bool* doBicubic) {
+ *doBicubic = false;
+ if (imageWidth <= 1 && imageHeight <= 1) {
+ return GrSamplerState::Filter::kNearest;
+ }
+ switch (paintFilterQuality) {
+ case kNone_SkFilterQuality:
+ return GrSamplerState::Filter::kNearest;
+ case kLow_SkFilterQuality:
+ return GrSamplerState::Filter::kBilerp;
+ case kMedium_SkFilterQuality: {
+ SkMatrix matrix;
+ matrix.setConcat(viewM, localM);
+ // With sharp mips, we bias lookups by -0.5. That means our final LOD is >= 0 until the
+ // computed LOD is >= 0.5. At what scale factor does a texture get an LOD of 0.5?
+ //
+ // Want: 0 = log2(1/s) - 0.5
+ // 0.5 = log2(1/s)
+ // 2^0.5 = 1/s
+ // 1/2^0.5 = s
+ // 2^0.5/2 = s
+ SkScalar mipScale = sharpenMipmappedTextures ? SK_ScalarRoot2Over2 : SK_Scalar1;
+ if (matrix.getMinScale() < mipScale) {
+ return GrSamplerState::Filter::kMipMap;
+ } else {
+ // Don't trigger MIP level generation unnecessarily.
+ return GrSamplerState::Filter::kBilerp;
+ }
+ }
+ case kHigh_SkFilterQuality: {
+ SkMatrix matrix;
+ matrix.setConcat(viewM, localM);
+ GrSamplerState::Filter textureFilterMode;
+ *doBicubic = GrBicubicEffect::ShouldUseBicubic(matrix, &textureFilterMode);
+ return textureFilterMode;
+ }
+ }
+ SkUNREACHABLE;
+}
diff --git a/gfx/skia/skia/src/gpu/SkGr.h b/gfx/skia/skia/src/gpu/SkGr.h
new file mode 100644
index 0000000000..79a68822b9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGr.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGr_DEFINED
+#define SkGr_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkVertices.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrSamplerState.h"
+
+class GrCaps;
+class GrColorInfo;
+class GrColorSpaceXform;
+class GrContext;
+class GrFragmentProcessor;
+class GrPaint;
+class GrRecordingContext;
+class GrResourceProvider;
+class GrTextureProxy;
+class GrUniqueKey;
+class SkBitmap;
+class SkData;
+class SkPaint;
+class SkPixelRef;
+class SkPixmap;
+struct SkIRect;
+
+////////////////////////////////////////////////////////////////////////////////
+// Color type conversions
+
+static inline GrColor SkColorToPremulGrColor(SkColor c) {
+ SkPMColor pm = SkPreMultiplyColor(c);
+ unsigned r = SkGetPackedR32(pm);
+ unsigned g = SkGetPackedG32(pm);
+ unsigned b = SkGetPackedB32(pm);
+ unsigned a = SkGetPackedA32(pm);
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+static inline GrColor SkColorToUnpremulGrColor(SkColor c) {
+ unsigned r = SkColorGetR(c);
+ unsigned g = SkColorGetG(c);
+ unsigned b = SkColorGetB(c);
+ unsigned a = SkColorGetA(c);
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+/** Similar, but using SkPMColor4f. */
+SkPMColor4f SkColorToPMColor4f(SkColor, const GrColorInfo&);
+
+/** Converts an SkColor4f to the destination color space. */
+SkColor4f SkColor4fPrepForDst(SkColor4f, const GrColorInfo&);
+
+/** Returns true if half-floats are required to store the color in a vertex (and half-floats
+ are supported). */
+static inline bool SkPMColor4fNeedsWideColor(SkPMColor4f color, GrClampType clampType,
+ const GrCaps& caps) {
+ return GrClampType::kNone == clampType &&
+ caps.halfFloatVertexAttributeSupport() &&
+ !color.fitsInBytes();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Paint conversion
+
+/** Converts an SkPaint to a GrPaint for a given GrRecordingContext. The matrix is required in order
+ to convert the SkShader (if any) on the SkPaint. The primitive itself has no color. */
+bool SkPaintToGrPaint(GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ GrPaint* grPaint);
+
+/** Same as above but ignores the SkShader (if any) on skPaint. */
+bool SkPaintToGrPaintNoShader(GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ GrPaint* grPaint);
+
+/** Replaces the SkShader (if any) on skPaint with the passed in GrFragmentProcessor. The processor
+ should expect an unpremul input color and produce a premultiplied output color. There is
+ no primitive color. */
+bool SkPaintToGrPaintReplaceShader(GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ std::unique_ptr<GrFragmentProcessor> shaderFP,
+ GrPaint* grPaint);
+
+/** Blends the SkPaint's shader (or color if no shader) with the color which specified via a
+ GrOp's GrPrimitiveProcesssor. */
+bool SkPaintToGrPaintWithXfermode(GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ SkBlendMode primColorMode,
+ GrPaint* grPaint);
+
+/** This is used when there is a primitive color, but the shader should be ignored. Currently,
+ the expectation is that the primitive color will be premultiplied, though it really should be
+ unpremultiplied so that interpolation is done in unpremul space. The paint's alpha will be
+ applied to the primitive color after interpolation. */
+inline bool SkPaintToGrPaintWithPrimitiveColor(GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ GrPaint* grPaint) {
+ return SkPaintToGrPaintWithXfermode(context, dstColorInfo, skPaint, SkMatrix::I(),
+ SkBlendMode::kDst, grPaint);
+}
+
+/** This is used when there may or may not be a shader, and the caller wants to plugin a texture
+ lookup. If there is a shader, then its output will only be used if the texture is alpha8. */
+bool SkPaintToGrPaintWithTexture(GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ bool textureIsAlphaOnly,
+ GrPaint* grPaint);
+
+////////////////////////////////////////////////////////////////////////////////
+// Misc Sk to Gr type conversions
+
+GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo&);
+GrPixelConfig SkColorType2GrPixelConfig(const SkColorType);
+GrPixelConfig SkImageInfo2GrPixelConfig(const SkImageInfo& info);
+
+bool GrPixelConfigToColorType(GrPixelConfig, SkColorType*);
+
+GrSamplerState::Filter GrSkFilterQualityToGrFilterMode(int imageWidth, int imageHeight,
+ SkFilterQuality paintFilterQuality,
+ const SkMatrix& viewM,
+ const SkMatrix& localM,
+ bool sharpenMipmappedTextures,
+ bool* doBicubic);
+
+//////////////////////////////////////////////////////////////////////////////
+
+static inline GrPrimitiveType SkVertexModeToGrPrimitiveType(SkVertices::VertexMode mode) {
+ switch (mode) {
+ case SkVertices::kTriangles_VertexMode:
+ return GrPrimitiveType::kTriangles;
+ case SkVertices::kTriangleStrip_VertexMode:
+ return GrPrimitiveType::kTriangleStrip;
+ case SkVertices::kTriangleFan_VertexMode:
+ break;
+ }
+ SK_ABORT("Invalid mode");
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_STATIC_ASSERT((int)kZero_GrBlendCoeff == (int)SkBlendModeCoeff::kZero);
+GR_STATIC_ASSERT((int)kOne_GrBlendCoeff == (int)SkBlendModeCoeff::kOne);
+GR_STATIC_ASSERT((int)kSC_GrBlendCoeff == (int)SkBlendModeCoeff::kSC);
+GR_STATIC_ASSERT((int)kISC_GrBlendCoeff == (int)SkBlendModeCoeff::kISC);
+GR_STATIC_ASSERT((int)kDC_GrBlendCoeff == (int)SkBlendModeCoeff::kDC);
+GR_STATIC_ASSERT((int)kIDC_GrBlendCoeff == (int)SkBlendModeCoeff::kIDC);
+GR_STATIC_ASSERT((int)kSA_GrBlendCoeff == (int)SkBlendModeCoeff::kSA);
+GR_STATIC_ASSERT((int)kISA_GrBlendCoeff == (int)SkBlendModeCoeff::kISA);
+GR_STATIC_ASSERT((int)kDA_GrBlendCoeff == (int)SkBlendModeCoeff::kDA);
+GR_STATIC_ASSERT((int)kIDA_GrBlendCoeff == (int)SkBlendModeCoeff::kIDA);
+//GR_STATIC_ASSERT(SkXfermode::kCoeffCount == 10);
+
+////////////////////////////////////////////////////////////////////////////////
+// Texture management
+
+/** Returns a texture representing the bitmap that is compatible with the GrSamplerState. The
+ * texture is inserted into the cache (unless the bitmap is marked volatile) and can be
+ * retrieved again via this function.
+ * The 'scaleAdjust' in/out parameter will be updated to hold any rescaling that needs to be
+ * performed on the absolute texture coordinates (e.g., if the texture is resized out to
+ * the next power of two). It can be null if the caller is sure the bitmap won't be resized.
+ */
+sk_sp<GrTextureProxy> GrRefCachedBitmapTextureProxy(GrRecordingContext*,
+ const SkBitmap&,
+ const GrSamplerState&,
+ SkScalar scaleAdjust[2]);
+
+/**
+ * Creates a new texture with mipmap levels and copies the baseProxy into the base layer.
+ */
+sk_sp<GrTextureProxy> GrCopyBaseMipMapToTextureProxy(GrRecordingContext*,
+ GrTextureProxy* baseProxy,
+ GrColorType srcColorType);
+
+/*
+ * Create a texture proxy from the provided bitmap by wrapping it in an image and calling
+ * GrMakeCachedImageProxy.
+ */
+sk_sp<GrTextureProxy> GrMakeCachedBitmapProxy(GrProxyProvider*, const SkBitmap& bitmap,
+ SkBackingFit fit = SkBackingFit::kExact);
+
+/*
+ * Create a texture proxy from the provided 'srcImage' and add it to the texture cache
+ * using the key also extracted from 'srcImage'.
+ */
+sk_sp<GrTextureProxy> GrMakeCachedImageProxy(GrProxyProvider*, sk_sp<SkImage> srcImage,
+ SkBackingFit fit = SkBackingFit::kExact);
+
+/**
+ * Our key includes the offset, width, and height so that bitmaps created by extractSubset()
+ * are unique.
+ *
+ * The imageID is in the shared namespace (see SkNextID::ImageID())
+ * - SkBitmap/SkPixelRef
+ * - SkImage
+ * - SkImageGenerator
+ *
+ * Note: width/height must fit in 16bits for this impl.
+ */
+void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds);
+
+/** Call this after installing a GrUniqueKey on texture. It will cause the texture's key to be
+ removed should the bitmap's contents change or be destroyed. */
+void GrInstallBitmapUniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextID,
+ SkPixelRef* pixelRef);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.cpp
new file mode 100644
index 0000000000..55d8f970b0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCAtlas.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/core/SkIPoint16.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRectanizer_skyline.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/ccpr/GrCCPathCache.h"
+#include <atomic>
+
+class GrCCAtlas::Node {
+public:
+ Node(std::unique_ptr<Node> previous, int l, int t, int r, int b)
+ : fPrevious(std::move(previous)), fX(l), fY(t), fRectanizer(r - l, b - t) {}
+
+ Node* previous() const { return fPrevious.get(); }
+
+ bool addRect(int w, int h, SkIPoint16* loc, int maxAtlasSize) {
+ // Pad all paths except those that are expected to take up an entire physical texture.
+ if (w < maxAtlasSize) {
+ w = SkTMin(w + kPadding, maxAtlasSize);
+ }
+ if (h < maxAtlasSize) {
+ h = SkTMin(h + kPadding, maxAtlasSize);
+ }
+ if (!fRectanizer.addRect(w, h, loc)) {
+ return false;
+ }
+ loc->fX += fX;
+ loc->fY += fY;
+ return true;
+ }
+
+private:
+ const std::unique_ptr<Node> fPrevious;
+ const int fX, fY;
+ GrRectanizerSkyline fRectanizer;
+};
+
+sk_sp<GrTextureProxy> GrCCAtlas::MakeLazyAtlasProxy(const LazyInstantiateAtlasCallback& callback,
+ CoverageType coverageType,
+ const GrCaps& caps,
+ GrSurfaceProxy::UseAllocator useAllocator) {
+ GrPixelConfig pixelConfig;
+ int sampleCount;
+
+ auto colorType = CoverageTypeToColorType(coverageType);
+ GrBackendFormat format = caps.getDefaultBackendFormat(colorType, GrRenderable::kYes);
+ switch (coverageType) {
+ case CoverageType::kFP16_CoverageCount:
+ pixelConfig = kAlpha_half_GrPixelConfig;
+ sampleCount = 1;
+ break;
+ case CoverageType::kA8_Multisample:
+ SkASSERT(caps.internalMultisampleCount(format) > 1);
+ pixelConfig = kAlpha_8_GrPixelConfig;
+ sampleCount = (caps.mixedSamplesSupport()) ? 1 : caps.internalMultisampleCount(format);
+ break;
+ case CoverageType::kA8_LiteralCoverage:
+ pixelConfig = kAlpha_8_GrPixelConfig;
+ sampleCount = 1;
+ break;
+ }
+
+ auto instantiate = [cb = std::move(callback), pixelConfig, format,
+ sampleCount](GrResourceProvider* rp) {
+ return cb(rp, pixelConfig, format, sampleCount);
+ };
+ sk_sp<GrTextureProxy> proxy = GrProxyProvider::MakeFullyLazyProxy(
+ std::move(instantiate), format, GrRenderable::kYes, sampleCount, GrProtected::kNo,
+ kTextureOrigin, pixelConfig, caps, useAllocator);
+
+ return proxy;
+}
+
+GrCCAtlas::GrCCAtlas(CoverageType coverageType, const Specs& specs, const GrCaps& caps)
+ : fCoverageType(coverageType)
+ , fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
+ specs.fMaxPreferredTextureSize)) {
+ // Caller should have cropped any paths to the destination render target instead of asking for
+ // an atlas larger than maxRenderTargetSize.
+ SkASSERT(fMaxTextureSize <= caps.maxTextureSize());
+ SkASSERT(specs.fMaxPreferredTextureSize > 0);
+
+ // Begin with the first pow2 dimensions whose area is theoretically large enough to contain the
+ // pending paths, favoring height over width if necessary.
+ int log2area = SkNextLog2(SkTMax(specs.fApproxNumPixels, 1));
+ fHeight = 1 << ((log2area + 1) / 2);
+ fWidth = 1 << (log2area / 2);
+
+ fWidth = SkTClamp(fWidth, specs.fMinTextureSize, specs.fMaxPreferredTextureSize);
+ fHeight = SkTClamp(fHeight, specs.fMinTextureSize, specs.fMaxPreferredTextureSize);
+
+ if (fWidth < specs.fMinWidth || fHeight < specs.fMinHeight) {
+ // They want to stuff a particularly large path into the atlas. Just punt and go with their
+ // min width and height. The atlas will grow as needed.
+ fWidth = SkTMin(specs.fMinWidth + kPadding, fMaxTextureSize);
+ fHeight = SkTMin(specs.fMinHeight + kPadding, fMaxTextureSize);
+ }
+
+ fTopNode = skstd::make_unique<Node>(nullptr, 0, 0, fWidth, fHeight);
+
+ fTextureProxy = MakeLazyAtlasProxy(
+ [this](GrResourceProvider* resourceProvider, GrPixelConfig pixelConfig,
+ const GrBackendFormat& format, int sampleCount) {
+ if (!fBackingTexture) {
+ GrSurfaceDesc desc;
+ desc.fWidth = fWidth;
+ desc.fHeight = fHeight;
+ desc.fConfig = pixelConfig;
+ fBackingTexture = resourceProvider->createTexture(
+ desc, format, GrRenderable::kYes, sampleCount, GrMipMapped::kNo,
+ SkBudgeted::kYes, GrProtected::kNo);
+ }
+ return fBackingTexture;
+ },
+ fCoverageType, caps, GrSurfaceProxy::UseAllocator::kNo);
+}
+
+GrCCAtlas::~GrCCAtlas() {
+}
+
+bool GrCCAtlas::addRect(const SkIRect& devIBounds, SkIVector* offset) {
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->isInstantiated());
+
+ SkIPoint16 location;
+ if (!this->internalPlaceRect(devIBounds.width(), devIBounds.height(), &location)) {
+ return false;
+ }
+ offset->set(location.x() - devIBounds.left(), location.y() - devIBounds.top());
+
+ fDrawBounds.fWidth = SkTMax(fDrawBounds.width(), location.x() + devIBounds.width());
+ fDrawBounds.fHeight = SkTMax(fDrawBounds.height(), location.y() + devIBounds.height());
+ return true;
+}
+
+bool GrCCAtlas::internalPlaceRect(int w, int h, SkIPoint16* loc) {
+ for (Node* node = fTopNode.get(); node; node = node->previous()) {
+ if (node->addRect(w, h, loc, fMaxTextureSize)) {
+ return true;
+ }
+ }
+
+ // The rect didn't fit. Grow the atlas and try again.
+ do {
+ if (fWidth == fMaxTextureSize && fHeight == fMaxTextureSize) {
+ return false;
+ }
+ if (fHeight <= fWidth) {
+ int top = fHeight;
+ fHeight = SkTMin(fHeight * 2, fMaxTextureSize);
+ fTopNode = skstd::make_unique<Node>(std::move(fTopNode), 0, top, fWidth, fHeight);
+ } else {
+ int left = fWidth;
+ fWidth = SkTMin(fWidth * 2, fMaxTextureSize);
+ fTopNode = skstd::make_unique<Node>(std::move(fTopNode), left, 0, fWidth, fHeight);
+ }
+ } while (!fTopNode->addRect(w, h, loc, fMaxTextureSize));
+
+ return true;
+}
+
+void GrCCAtlas::setFillBatchID(int id) {
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->isInstantiated());
+ fFillBatchID = id;
+}
+
+void GrCCAtlas::setStrokeBatchID(int id) {
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->isInstantiated());
+ fStrokeBatchID = id;
+}
+
+void GrCCAtlas::setEndStencilResolveInstance(int idx) {
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->isInstantiated());
+ fEndStencilResolveInstance = idx;
+}
+
+static uint32_t next_atlas_unique_id() {
+ static std::atomic<uint32_t> nextID;
+ return nextID++;
+}
+
+sk_sp<GrCCCachedAtlas> GrCCAtlas::refOrMakeCachedAtlas(GrOnFlushResourceProvider* onFlushRP) {
+ if (!fCachedAtlas) {
+ static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
+
+ GrUniqueKey atlasUniqueKey;
+ GrUniqueKey::Builder builder(&atlasUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
+ builder[0] = next_atlas_unique_id();
+ builder.finish();
+
+ onFlushRP->assignUniqueKeyToProxy(atlasUniqueKey, fTextureProxy.get());
+
+ fCachedAtlas = sk_make_sp<GrCCCachedAtlas>(fCoverageType, atlasUniqueKey, fTextureProxy);
+ }
+
+ SkASSERT(fCachedAtlas->coverageType() == fCoverageType);
+ SkASSERT(fCachedAtlas->getOnFlushProxy() == fTextureProxy.get());
+ return fCachedAtlas;
+}
+
+std::unique_ptr<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
+ GrOnFlushResourceProvider* onFlushRP, sk_sp<GrTexture> backingTexture) {
+ SkASSERT(!fTextureProxy->isInstantiated()); // This method should only be called once.
+ // Caller should have cropped any paths to the destination render target instead of asking for
+ // an atlas larger than maxRenderTargetSize.
+ SkASSERT(SkTMax(fHeight, fWidth) <= fMaxTextureSize);
+ SkASSERT(fMaxTextureSize <= onFlushRP->caps()->maxRenderTargetSize());
+
+ // Finalize the content size of our proxy. The GPU can potentially make optimizations if it
+ // knows we only intend to write out a smaller sub-rectangle of the backing texture.
+ fTextureProxy->priv().setLazySize(fDrawBounds.width(), fDrawBounds.height());
+
+ if (backingTexture) {
+#ifdef SK_DEBUG
+ auto backingRT = backingTexture->asRenderTarget();
+ SkASSERT(backingRT);
+ SkASSERT(backingRT->config() == fTextureProxy->config());
+ SkASSERT(backingRT->numSamples() == fTextureProxy->asRenderTargetProxy()->numSamples());
+ SkASSERT(backingRT->width() == fWidth);
+ SkASSERT(backingRT->height() == fHeight);
+#endif
+ fBackingTexture = std::move(backingTexture);
+ }
+ auto colorType = (CoverageType::kFP16_CoverageCount == fCoverageType)
+ ? GrColorType::kAlpha_F16 : GrColorType::kAlpha_8;
+ auto rtc = onFlushRP->makeRenderTargetContext(fTextureProxy, colorType, nullptr, nullptr);
+ if (!rtc) {
+ SkDebugf("WARNING: failed to allocate a %ix%i atlas. Some paths will not be drawn.\n",
+ fWidth, fHeight);
+ return nullptr;
+ }
+
+ SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
+ rtc->clear(&clearRect, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+ return rtc;
+}
+
+GrCCAtlas* GrCCAtlasStack::addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset) {
+ GrCCAtlas* retiredAtlas = nullptr;
+ if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, devToAtlasOffset)) {
+ // The retired atlas is out of room and can't grow any bigger.
+ retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
+ fAtlases.emplace_back(fCoverageType, fSpecs, *fCaps);
+ SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
+ SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
+ SkAssertResult(fAtlases.back().addRect(devIBounds, devToAtlasOffset));
+ }
+ return retiredAtlas;
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.h b/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.h
new file mode 100644
index 0000000000..ac7faaa41d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCAtlas.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCAtlas_DEFINED
+#define GrCCAtlas_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrResourceKey.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrSurfaceProxy.h"
+
+class GrCCCachedAtlas;
+class GrOnFlushResourceProvider;
+class GrRenderTargetContext;
+class GrResourceProvider;
+class GrTextureProxy;
+struct SkIPoint16;
+struct SkIRect;
+
+/**
+ * This class implements a dynamic size GrRectanizer that grows until it reaches the implementation-
+ * dependent max texture size. When finalized, it also creates and stores a GrTextureProxy for the
+ * underlying atlas.
+ */
+class GrCCAtlas {
+public:
+ // As long as GrSurfaceOrigin exists, we just have to decide on one for the atlas texture.
+ static constexpr GrSurfaceOrigin kTextureOrigin = kTopLeft_GrSurfaceOrigin;
+ static constexpr int kPadding = 1; // Amount of padding below and to the right of each path.
+
+ // This struct encapsulates the minimum and desired requirements for an atlas, as well as an
+ // approximate number of pixels to help select a good initial size.
+ struct Specs {
+ int fMaxPreferredTextureSize = 0;
+ int fMinTextureSize = 0;
+ int fMinWidth = 0; // If there are 100 20x10 paths, this should be 20.
+ int fMinHeight = 0; // If there are 100 20x10 paths, this should be 10.
+ int fApproxNumPixels = 0;
+
+ // Add space for a rect in the desired atlas specs.
+ void accountForSpace(int width, int height);
+ };
+
+ enum class CoverageType {
+ kFP16_CoverageCount,
+ kA8_Multisample,
+ kA8_LiteralCoverage
+ };
+
+ static constexpr GrColorType CoverageTypeToColorType(CoverageType coverageType) {
+ switch (coverageType) {
+ case CoverageType::kFP16_CoverageCount:
+ return GrColorType::kAlpha_F16;
+ case CoverageType::kA8_Multisample:
+ case CoverageType::kA8_LiteralCoverage:
+ return GrColorType::kAlpha_8;
+ }
+ SkUNREACHABLE;
+ }
+
+ using LazyInstantiateAtlasCallback = std::function<sk_sp<GrTexture>(
+ GrResourceProvider*, GrPixelConfig, const GrBackendFormat&, int sampleCount)>;
+
+ static sk_sp<GrTextureProxy> MakeLazyAtlasProxy(const LazyInstantiateAtlasCallback&,
+ CoverageType,
+ const GrCaps&,
+ GrSurfaceProxy::UseAllocator);
+
+ GrCCAtlas(CoverageType, const Specs&, const GrCaps&);
+ ~GrCCAtlas();
+
+ GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
+ int currentWidth() const { return fWidth; }
+ int currentHeight() const { return fHeight; }
+
+ // Attempts to add a rect to the atlas. If successful, returns the integer offset from
+ // device-space pixels where the path will be drawn, to atlas pixels where its mask resides.
+ bool addRect(const SkIRect& devIBounds, SkIVector* atlasOffset);
+ const SkISize& drawBounds() { return fDrawBounds; }
+
+ // This is an optional space for the caller to jot down user-defined instance data to use when
+ // rendering atlas content.
+ void setFillBatchID(int id);
+ int getFillBatchID() const { return fFillBatchID; }
+ void setStrokeBatchID(int id);
+ int getStrokeBatchID() const { return fStrokeBatchID; }
+ void setEndStencilResolveInstance(int idx);
+ int getEndStencilResolveInstance() const { return fEndStencilResolveInstance; }
+
+ sk_sp<GrCCCachedAtlas> refOrMakeCachedAtlas(GrOnFlushResourceProvider*);
+
+ // Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
+ // that the caller may use to render the content. After this call, it is no longer valid to call
+ // addRect(), setUserBatchID(), or this method again.
+ //
+ // 'backingTexture', if provided, is a renderable texture with which to instantiate our proxy.
+ // If null then we will create a texture using the resource provider. The purpose of this param
+ // is to provide a guaranteed way to recycle a stashed atlas texture from a previous flush.
+ std::unique_ptr<GrRenderTargetContext> makeRenderTargetContext(
+ GrOnFlushResourceProvider*, sk_sp<GrTexture> backingTexture = nullptr);
+
+private:
+ class Node;
+
+ bool internalPlaceRect(int w, int h, SkIPoint16* loc);
+
+ const CoverageType fCoverageType;
+ const int fMaxTextureSize;
+ int fWidth, fHeight;
+ std::unique_ptr<Node> fTopNode;
+ SkISize fDrawBounds = {0, 0};
+
+ int fFillBatchID;
+ int fStrokeBatchID;
+ int fEndStencilResolveInstance;
+
+ sk_sp<GrCCCachedAtlas> fCachedAtlas;
+ sk_sp<GrTextureProxy> fTextureProxy;
+ sk_sp<GrTexture> fBackingTexture;
+};
+
+/**
+ * This class implements an unbounded stack of atlases. When the current atlas reaches the
+ * implementation-dependent max texture size, a new one is pushed to the back and we continue on.
+ */
+class GrCCAtlasStack {
+public:
+ using CoverageType = GrCCAtlas::CoverageType;
+
+ GrCCAtlasStack(CoverageType coverageType, const GrCCAtlas::Specs& specs, const GrCaps* caps)
+ : fCoverageType(coverageType), fSpecs(specs), fCaps(caps) {}
+
+ CoverageType coverageType() const { return fCoverageType; }
+ bool empty() const { return fAtlases.empty(); }
+ const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
+ GrCCAtlas& front() { SkASSERT(!this->empty()); return fAtlases.front(); }
+ GrCCAtlas& current() { SkASSERT(!this->empty()); return fAtlases.back(); }
+
+ class Iter {
+ public:
+ Iter(GrCCAtlasStack& stack) : fImpl(&stack.fAtlases) {}
+ bool next() { return fImpl.next(); }
+ GrCCAtlas* operator->() const { return fImpl.get(); }
+ private:
+ typename GrTAllocator<GrCCAtlas>::Iter fImpl;
+ };
+
+ // Adds a rect to the current atlas and returns the offset from device space to atlas space.
+ // Call current() to get the atlas it was added to.
+ //
+ // If the return value is non-null, it means the given rect did not fit in the then-current
+ // atlas, so it was retired and a new one was added to the stack. The return value is the
+ // newly-retired atlas. The caller should call setUserBatchID() on the retired atlas before
+ // moving on.
+ GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset);
+
+private:
+ const CoverageType fCoverageType;
+ const GrCCAtlas::Specs fSpecs;
+ const GrCaps* const fCaps;
+ GrSTAllocator<4, GrCCAtlas> fAtlases;
+};
+
+inline void GrCCAtlas::Specs::accountForSpace(int width, int height) {
+ fMinWidth = SkTMax(width, fMinWidth);
+ fMinHeight = SkTMax(height, fMinHeight);
+ fApproxNumPixels += (width + kPadding) * (height + kPadding);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.cpp
new file mode 100644
index 0000000000..9e86d2e2d2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCClipPath.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+
+void GrCCClipPath::init(
+ const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ GrCCAtlas::CoverageType atlasCoverageType, const GrCaps& caps) {
+ SkASSERT(!this->isInitialized());
+
+ fAtlasLazyProxy = GrCCAtlas::MakeLazyAtlasProxy(
+ [this](GrResourceProvider* resourceProvider, GrPixelConfig,
+ const GrBackendFormat& format, int sampleCount) {
+ SkASSERT(fHasAtlas);
+ SkASSERT(!fHasAtlasTransform);
+
+ GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr;
+
+ if (!textureProxy || !textureProxy->instantiate(resourceProvider)) {
+ fAtlasScale = fAtlasTranslate = {0, 0};
+ SkDEBUGCODE(fHasAtlasTransform = true);
+ return sk_sp<GrTexture>();
+ }
+
+ sk_sp<GrTexture> texture = sk_ref_sp(textureProxy->peekTexture());
+ SkASSERT(texture);
+ SkASSERT(texture->backendFormat() == format);
+ SkASSERT(texture->asRenderTarget()->numSamples() == sampleCount);
+ SkASSERT(textureProxy->origin() == kTopLeft_GrSurfaceOrigin);
+
+ fAtlasScale = {1.f / texture->width(), 1.f / texture->height()};
+ fAtlasTranslate.set(fDevToAtlasOffset.fX * fAtlasScale.x(),
+ fDevToAtlasOffset.fY * fAtlasScale.y());
+ SkDEBUGCODE(fHasAtlasTransform = true);
+
+ return texture;
+ },
+ atlasCoverageType, caps, GrSurfaceProxy::UseAllocator::kYes);
+
+ fDeviceSpacePath = deviceSpacePath;
+ fDeviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
+ fAccessRect = accessRect;
+}
+
+void GrCCClipPath::accountForOwnPath(GrCCPerFlushResourceSpecs* specs) const {
+ SkASSERT(this->isInitialized());
+
+ ++specs->fNumClipPaths;
+ specs->fRenderedPathStats[GrCCPerFlushResourceSpecs::kFillIdx].statPath(fDeviceSpacePath);
+
+ SkIRect ibounds;
+ if (ibounds.intersect(fAccessRect, fPathDevIBounds)) {
+ specs->fRenderedAtlasSpecs.accountForSpace(ibounds.width(), ibounds.height());
+ }
+}
+
+void GrCCClipPath::renderPathInAtlas(GrCCPerFlushResources* resources,
+ GrOnFlushResourceProvider* onFlushRP) {
+ SkASSERT(this->isInitialized());
+ SkASSERT(!fHasAtlas);
+ fAtlas = resources->renderDeviceSpacePathInAtlas(
+ fAccessRect, fDeviceSpacePath, fPathDevIBounds, GrFillRuleForSkPath(fDeviceSpacePath),
+ &fDevToAtlasOffset);
+ SkDEBUGCODE(fHasAtlas = true);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.h b/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.h
new file mode 100644
index 0000000000..32e5ae49f7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCClipPath.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCClipPath_DEFINED
+#define GrCCClipPath_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/ccpr/GrCCAtlas.h"
+
+struct GrCCPerFlushResourceSpecs;
+class GrCCPerFlushResources;
+class GrOnFlushResourceProvider;
+class GrProxyProvider;
+
+/**
+ * These are keyed by SkPath generation ID, and store which device-space paths are accessed and
+ * where by clip FPs in a given opsTask. A single GrCCClipPath can be referenced by multiple FPs. At
+ * flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
+ */
+class GrCCClipPath {
+public:
+ GrCCClipPath() = default;
+ GrCCClipPath(const GrCCClipPath&) = delete;
+
+ ~GrCCClipPath() {
+ // Ensure no clip FP exists with a dangling pointer back into this class. This works because
+ // a clip FP will have a ref on the proxy if it exists.
+ //
+ // This assert also guarantees there won't be a lazy proxy callback with a dangling pointer
+ // back into this class, since no proxy will exist after we destruct, if the assert passes.
+ SkASSERT(!fAtlasLazyProxy || fAtlasLazyProxy->unique());
+ }
+
+ bool isInitialized() const { return fAtlasLazyProxy != nullptr; }
+ void init(const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ GrCCAtlas::CoverageType atlasCoverageType, const GrCaps&);
+
+ void addAccess(const SkIRect& accessRect) {
+ SkASSERT(this->isInitialized());
+ fAccessRect.join(accessRect);
+ }
+ GrTextureProxy* atlasLazyProxy() const {
+ SkASSERT(this->isInitialized());
+ return fAtlasLazyProxy.get();
+ }
+ const SkPath& deviceSpacePath() const {
+ SkASSERT(this->isInitialized());
+ return fDeviceSpacePath;
+ }
+ const SkIRect& pathDevIBounds() const {
+ SkASSERT(this->isInitialized());
+ return fPathDevIBounds;
+ }
+
+ void accountForOwnPath(GrCCPerFlushResourceSpecs*) const;
+ void renderPathInAtlas(GrCCPerFlushResources*, GrOnFlushResourceProvider*);
+
+ const SkVector& atlasScale() const { SkASSERT(fHasAtlasTransform); return fAtlasScale; }
+ const SkVector& atlasTranslate() const { SkASSERT(fHasAtlasTransform); return fAtlasTranslate; }
+
+private:
+ sk_sp<GrTextureProxy> fAtlasLazyProxy;
+ SkPath fDeviceSpacePath;
+ SkIRect fPathDevIBounds;
+ SkIRect fAccessRect;
+
+ const GrCCAtlas* fAtlas = nullptr;
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
+ SkDEBUGCODE(bool fHasAtlas = false;)
+
+ SkVector fAtlasScale;
+ SkVector fAtlasTranslate;
+ SkDEBUGCODE(bool fHasAtlasTransform = false;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.cpp
new file mode 100644
index 0000000000..75535dd58a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCClipProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/ccpr/GrCCClipPath.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+GrCCClipProcessor::GrCCClipProcessor(const GrCCClipPath* clipPath, IsCoverageCount isCoverageCount,
+ MustCheckBounds mustCheckBounds)
+ : INHERITED(kGrCCClipProcessor_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fClipPath(clipPath)
+ , fIsCoverageCount(IsCoverageCount::kYes == isCoverageCount)
+ , fMustCheckBounds(MustCheckBounds::kYes == mustCheckBounds)
+ , fAtlasAccess(sk_ref_sp(fClipPath->atlasLazyProxy()), GrSamplerState::ClampNearest()) {
+ SkASSERT(fAtlasAccess.proxy());
+ this->setTextureSamplerCnt(1);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrCCClipProcessor::clone() const {
+ return skstd::make_unique<GrCCClipProcessor>(
+ fClipPath, IsCoverageCount(fIsCoverageCount), MustCheckBounds(fMustCheckBounds));
+}
+
+void GrCCClipProcessor::onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const {
+ const SkPath& clipPath = fClipPath->deviceSpacePath();
+ uint32_t key = (fIsCoverageCount) ? (uint32_t)GrFillRuleForSkPath(clipPath) : 0;
+ key = (key << 1) | ((clipPath.isInverseFillType()) ? 1 : 0);
+ key = (key << 1) | ((fMustCheckBounds) ? 1 : 0);
+ b->add32(key);
+}
+
+bool GrCCClipProcessor::onIsEqual(const GrFragmentProcessor& fp) const {
+ const GrCCClipProcessor& that = fp.cast<GrCCClipProcessor>();
+ // Each ClipPath path has a unique atlas proxy, so hasSameSamplersAndAccesses should have
+ // already weeded out FPs with different ClipPaths.
+ SkASSERT(that.fClipPath->deviceSpacePath().getGenerationID() ==
+ fClipPath->deviceSpacePath().getGenerationID());
+ return that.fClipPath->deviceSpacePath().getFillType() ==
+ fClipPath->deviceSpacePath().getFillType() &&
+ that.fIsCoverageCount == fIsCoverageCount && that.fMustCheckBounds == fMustCheckBounds;
+}
+
+class GrCCClipProcessor::Impl : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrCCClipProcessor& proc = args.fFp.cast<GrCCClipProcessor>();
+ GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+
+ f->codeAppend ("half coverage;");
+
+ if (proc.fMustCheckBounds) {
+ const char* pathIBounds;
+ fPathIBoundsUniform = uniHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "path_ibounds", &pathIBounds);
+ f->codeAppendf("if (all(greaterThan(float4(sk_FragCoord.xy, %s.zw), "
+ "float4(%s.xy, sk_FragCoord.xy)))) {",
+ pathIBounds, pathIBounds);
+ }
+
+ const char* atlasTransform;
+ fAtlasTransformUniform = uniHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "atlas_transform", &atlasTransform);
+ f->codeAppendf("float2 texcoord = sk_FragCoord.xy * %s.xy + %s.zw;",
+ atlasTransform, atlasTransform);
+
+ f->codeAppend ("coverage = ");
+ f->appendTextureLookup(args.fTexSamplers[0], "texcoord", kHalf2_GrSLType);
+ f->codeAppend (".a;");
+
+ if (proc.fIsCoverageCount) {
+ auto fillRule = GrFillRuleForSkPath(proc.fClipPath->deviceSpacePath());
+ if (GrFillRule::kEvenOdd == fillRule) {
+ f->codeAppend ("half t = mod(abs(coverage), 2);");
+ f->codeAppend ("coverage = 1 - abs(t - 1);");
+ } else {
+ SkASSERT(GrFillRule::kNonzero == fillRule);
+ f->codeAppend ("coverage = min(abs(coverage), 1);");
+ }
+ }
+
+ if (proc.fMustCheckBounds) {
+ f->codeAppend ("} else {");
+ f->codeAppend ( "coverage = 0;");
+ f->codeAppend ("}");
+ }
+
+ if (proc.fClipPath->deviceSpacePath().isInverseFillType()) {
+ f->codeAppend ("coverage = 1 - coverage;");
+ }
+
+ f->codeAppendf("%s = %s * coverage;", args.fOutputColor, args.fInputColor);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& fp) override {
+ const GrCCClipProcessor& proc = fp.cast<GrCCClipProcessor>();
+ if (proc.fMustCheckBounds) {
+ const SkRect pathIBounds = SkRect::Make(proc.fClipPath->pathDevIBounds());
+ pdman.set4f(fPathIBoundsUniform, pathIBounds.left(), pathIBounds.top(),
+ pathIBounds.right(), pathIBounds.bottom());
+ }
+ const SkVector& scale = proc.fClipPath->atlasScale();
+ const SkVector& trans = proc.fClipPath->atlasTranslate();
+ pdman.set4f(fAtlasTransformUniform, scale.x(), scale.y(), trans.x(), trans.y());
+ }
+
+private:
+ UniformHandle fPathIBoundsUniform;
+ UniformHandle fAtlasTransformUniform;
+};
+
+GrGLSLFragmentProcessor* GrCCClipProcessor::onCreateGLSLInstance() const {
+ return new Impl();
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.h
new file mode 100644
index 0000000000..0a719f80e9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCClipProcessor.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCClipProcessor_DEFINED
+#define GrCCClipProcessor_DEFINED
+
+#include "src/gpu/GrFragmentProcessor.h"
+
+class GrCCClipPath;
+
+class GrCCClipProcessor : public GrFragmentProcessor {
+public:
+ enum class IsCoverageCount : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ enum class MustCheckBounds : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ GrCCClipProcessor(const GrCCClipPath*, IsCoverageCount, MustCheckBounds);
+
+ const char* name() const override { return "GrCCClipProcessor"; }
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ const TextureSampler& onTextureSampler(int) const override { return fAtlasAccess; }
+
+private:
+ const GrCCClipPath* const fClipPath;
+ const bool fIsCoverageCount;
+ const bool fMustCheckBounds;
+ const TextureSampler fAtlasAccess;
+
+ class Impl;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.cpp
new file mode 100644
index 0000000000..82a15609e0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCConicShader.h"
+
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+void GrCCConicShader::emitSetupCode(
+ GrGLSLVertexGeoBuilder* s, const char* pts, const char** outHull4) const {
+ // K is distance from the line P2 -> P0. L is distance from the line P0 -> P1, scaled by 2w.
+ // M is distance from the line P1 -> P2, scaled by 2w. We do this in a space where P1=0.
+ s->declareGlobal(fKLMMatrix);
+ s->codeAppendf("float x0 = %s[0].x - %s[1].x, x2 = %s[2].x - %s[1].x;", pts, pts, pts, pts);
+ s->codeAppendf("float y0 = %s[0].y - %s[1].y, y2 = %s[2].y - %s[1].y;", pts, pts, pts, pts);
+ s->codeAppendf("float w = %s[3].x;", pts);
+ s->codeAppendf("%s = float3x3(y2 - y0, x0 - x2, x2*y0 - x0*y2, "
+ "2*w * float2(+y0, -x0), 0, "
+ "2*w * float2(-y2, +x2), 0);", fKLMMatrix.c_str());
+
+ s->declareGlobal(fControlPoint);
+ s->codeAppendf("%s = %s[1];", fControlPoint.c_str(), pts);
+
+ // Scale KLM by the inverse Manhattan width of K, and make sure K is positive. This allows K to
+ // double as the flat opposite edge AA. kwidth will not be 0 because we cull degenerate conics
+ // on the CPU.
+ s->codeAppendf("float kwidth = 2*bloat * (abs(%s[0].x) + abs(%s[0].y)) * sign(%s[0].z);",
+ fKLMMatrix.c_str(), fKLMMatrix.c_str(), fKLMMatrix.c_str());
+ s->codeAppendf("%s *= 1/kwidth;", fKLMMatrix.c_str());
+
+ if (outHull4) {
+ // Clip the conic triangle by the tangent line at maximum height. Conics have the nice
+ // property that maximum height always occurs at T=.5. This is a simple application for
+ // De Casteljau's algorithm.
+ s->codeAppendf("float2 p1w = %s[1]*w;", pts);
+ s->codeAppend ("float r = 1 / (1 + w);");
+ s->codeAppend ("float2 conic_hull[4];");
+ s->codeAppendf("conic_hull[0] = %s[0];", pts);
+ s->codeAppendf("conic_hull[1] = (%s[0] + p1w) * r;", pts);
+ s->codeAppendf("conic_hull[2] = (p1w + %s[2]) * r;", pts);
+ s->codeAppendf("conic_hull[3] = %s[2];", pts);
+ *outHull4 = "conic_hull";
+ }
+}
+
+void GrCCConicShader::onEmitVaryings(
+ GrGLSLVaryingHandler* varyingHandler, GrGLSLVarying::Scope scope, SkString* code,
+ const char* position, const char* coverage, const char* cornerCoverage, const char* wind) {
+ code->appendf("float3 klm = float3(%s - %s, 1) * %s;",
+ position, fControlPoint.c_str(), fKLMMatrix.c_str());
+ if (coverage) {
+ fKLM_fWind.reset(kFloat4_GrSLType, scope);
+ varyingHandler->addVarying("klm_and_wind", &fKLM_fWind);
+ code->appendf("%s.w = %s;", OutName(fKLM_fWind), wind);
+ } else {
+ fKLM_fWind.reset(kFloat3_GrSLType, scope);
+ varyingHandler->addVarying("klm", &fKLM_fWind);
+ }
+ code->appendf("%s.xyz = klm;", OutName(fKLM_fWind));
+
+ fGrad_fCorner.reset(cornerCoverage ? kFloat4_GrSLType : kFloat2_GrSLType, scope);
+ varyingHandler->addVarying((cornerCoverage) ? "grad_and_corner" : "grad", &fGrad_fCorner);
+ code->appendf("%s.xy = 2*bloat * (float3x2(%s) * float3(2*klm[0], -klm[2], -klm[1]));",
+ OutName(fGrad_fCorner), fKLMMatrix.c_str());
+
+ if (cornerCoverage) {
+ SkASSERT(coverage);
+ code->appendf("half hull_coverage;");
+ this->calcHullCoverage(code, "klm", OutName(fGrad_fCorner), "hull_coverage");
+ code->appendf("%s.zw = half2(hull_coverage, 1) * %s;",
+ OutName(fGrad_fCorner), cornerCoverage);
+ }
+}
+
+void GrCCConicShader::emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder* f, const char* outputCoverage) const {
+ this->calcHullCoverage(&AccessCodeString(f), fKLM_fWind.fsIn(), fGrad_fCorner.fsIn(),
+ outputCoverage);
+ f->codeAppendf("%s *= half(%s.w);", outputCoverage, fKLM_fWind.fsIn()); // Wind.
+
+ if (kFloat4_GrSLType == fGrad_fCorner.type()) {
+ f->codeAppendf("%s = fma(half(%s.z), half(%s.w), %s);", // Attenuated corner coverage.
+ outputCoverage, fGrad_fCorner.fsIn(), fGrad_fCorner.fsIn(),
+ outputCoverage);
+ }
+}
+
+void GrCCConicShader::calcHullCoverage(SkString* code, const char* klm, const char* grad,
+ const char* outputCoverage) const {
+ code->appendf("float k = %s.x, l = %s.y, m = %s.z;", klm, klm, klm);
+ code->append ("float f = k*k - l*m;");
+ code->appendf("float fwidth = abs(%s.x) + abs(%s.y);", grad, grad);
+ code->appendf("float curve_coverage = min(0.5 - f/fwidth, 1);");
+ // K doubles as the flat opposite edge's AA.
+ code->append ("float edge_coverage = min(k - 0.5, 0);");
+ // Total hull coverage.
+ code->appendf("%s = max(half(curve_coverage + edge_coverage), 0);", outputCoverage);
+}
+
+void GrCCConicShader::emitSampleMaskCode(GrGLSLFPFragmentBuilder* f) const {
+ f->codeAppendf("float k = %s.x, l = %s.y, m = %s.z;",
+ fKLM_fWind.fsIn(), fKLM_fWind.fsIn(), fKLM_fWind.fsIn());
+ f->codeAppendf("float f = k*k - l*m;");
+ f->codeAppendf("float2 grad = %s;", fGrad_fCorner.fsIn());
+ f->applyFnToMultisampleMask("f", "grad", GrGLSLFPFragmentBuilder::ScopeFlags::kTopLevel);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.h b/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.h
new file mode 100644
index 0000000000..3645c7e5ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCConicShader.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCConicShader_DEFINED
+#define GrCCConicShader_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class renders the coverage of closed conic curves using the techniques outlined in
+ * "Resolution Independent Curve Rendering using Programmable Graphics Hardware" by Charles Loop and
+ * Jim Blinn:
+ *
+ * https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ *
+ * The provided curves must be monotonic with respect to the vector of their closing edge [P2 - P0].
+ * (Use GrCCGeometry::conicTo().)
+ */
+class GrCCConicShader : public GrCCCoverageProcessor::Shader {
+public:
+ bool calculatesOwnEdgeCoverage() const override { return true; }
+
+ void emitSetupCode(
+ GrGLSLVertexGeoBuilder*, const char* pts, const char** outHull4) const override;
+
+ void onEmitVaryings(
+ GrGLSLVaryingHandler*, GrGLSLVarying::Scope, SkString* code, const char* position,
+ const char* coverage, const char* cornerCoverage, const char* wind) override;
+
+ void emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder*, const char* outputCoverage) const override;
+
+ void emitSampleMaskCode(GrGLSLFPFragmentBuilder*) const override;
+
+private:
+ void calcHullCoverage(SkString* code, const char* klm, const char* grad,
+ const char* outputCoverage) const;
+
+ const GrShaderVar fKLMMatrix{"klm_matrix", kFloat3x3_GrSLType};
+ const GrShaderVar fControlPoint{"control_point", kFloat2_GrSLType};
+ GrGLSLVarying fKLM_fWind;
+ GrGLSLVarying fGrad_fCorner;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.cpp
new file mode 100644
index 0000000000..6abb04e859
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/ccpr/GrCCConicShader.h"
+#include "src/gpu/ccpr/GrCCCubicShader.h"
+#include "src/gpu/ccpr/GrCCQuadraticShader.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+class GrCCCoverageProcessor::TriangleShader : public GrCCCoverageProcessor::Shader {
+ void onEmitVaryings(
+ GrGLSLVaryingHandler* varyingHandler, GrGLSLVarying::Scope scope, SkString* code,
+ const char* position, const char* coverage, const char* cornerCoverage,
+ const char* /*wind*/) override {
+ if (!cornerCoverage) {
+ fCoverages.reset(kHalf_GrSLType, scope);
+ varyingHandler->addVarying("coverage", &fCoverages);
+ code->appendf("%s = %s;", OutName(fCoverages), coverage);
+ } else {
+ fCoverages.reset(kHalf3_GrSLType, scope);
+ varyingHandler->addVarying("coverages", &fCoverages);
+ code->appendf("%s = half3(%s, %s);", OutName(fCoverages), coverage, cornerCoverage);
+ }
+ }
+
+ void emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder* f, const char* outputCoverage) const override {
+ if (kHalf_GrSLType == fCoverages.type()) {
+ f->codeAppendf("%s = %s;", outputCoverage, fCoverages.fsIn());
+ } else {
+ f->codeAppendf("%s = %s.z * %s.y + %s.x;",
+ outputCoverage, fCoverages.fsIn(), fCoverages.fsIn(), fCoverages.fsIn());
+ }
+ }
+
+ void emitSampleMaskCode(GrGLSLFPFragmentBuilder*) const override { return; }
+
+ GrGLSLVarying fCoverages;
+};
+
+void GrCCCoverageProcessor::Shader::CalcWind(const GrCCCoverageProcessor& proc,
+ GrGLSLVertexGeoBuilder* s, const char* pts,
+ const char* outputWind) {
+ if (3 == proc.numInputPoints()) {
+ s->codeAppendf("float2 a = %s[0] - %s[1], "
+ "b = %s[0] - %s[2];", pts, pts, pts, pts);
+ } else {
+ // All inputs are convex, so it's sufficient to just average the middle two input points.
+ SkASSERT(4 == proc.numInputPoints());
+ s->codeAppendf("float2 p12 = (%s[1] + %s[2]) * .5;", pts, pts);
+ s->codeAppendf("float2 a = %s[0] - p12, "
+ "b = %s[0] - %s[3];", pts, pts, pts);
+ }
+
+ s->codeAppend ("float area_x2 = determinant(float2x2(a, b));");
+ if (proc.isTriangles()) {
+ // We cull extremely thin triangles by zeroing wind. When a triangle gets too thin it's
+ // possible for FP round-off error to actually give us the wrong winding direction, causing
+ // rendering artifacts. The criteria we choose is "height <~ 1/1024". So we drop a triangle
+ // if the max effect it can have on any single pixel is <~ 1/1024, or 1/4 of a bit in 8888.
+ s->codeAppend ("float2 bbox_size = max(abs(a), abs(b));");
+ s->codeAppend ("float basewidth = max(bbox_size.x + bbox_size.y, 1);");
+ s->codeAppendf("%s = (abs(area_x2 * 1024) > basewidth) ? sign(half(area_x2)) : 0;",
+ outputWind);
+ } else {
+ // We already converted nearly-flat curves to lines on the CPU, so no need to worry about
+ // thin curve hulls at this point.
+ s->codeAppendf("%s = sign(half(area_x2));", outputWind);
+ }
+}
+
+void GrCCCoverageProcessor::Shader::CalcEdgeCoverageAtBloatVertex(GrGLSLVertexGeoBuilder* s,
+ const char* leftPt,
+ const char* rightPt,
+ const char* rasterVertexDir,
+ const char* outputCoverage) {
+ // Here we find an edge's coverage at one corner of a conservative raster bloat box whose center
+ // falls on the edge in question. (A bloat box is axis-aligned and the size of one pixel.) We
+ // always set up coverage so it is -1 at the outermost corner, 0 at the innermost, and -.5 at
+ // the center. Interpolated, these coverage values convert jagged conservative raster edges into
+ // smooth antialiased edges.
+ //
+ // d1 == (P + sign(n) * bloat) dot n (Distance at the bloat box vertex whose
+ // == P dot n + (abs(n.x) + abs(n.y)) * bloatSize coverage=-1, where the bloat box is
+ // centered on P.)
+ //
+ // d0 == (P - sign(n) * bloat) dot n (Distance at the bloat box vertex whose
+ // == P dot n - (abs(n.x) + abs(n.y)) * bloatSize coverage=0, where the bloat box is
+ // centered on P.)
+ //
+ // d == (P + rasterVertexDir * bloatSize) dot n (Distance at the bloat box vertex whose
+ // == P dot n + (rasterVertexDir dot n) * bloatSize coverage we wish to calculate.)
+ //
+ // coverage == -(d - d0) / (d1 - d0) (coverage=-1 at d=d1; coverage=0 at d=d0)
+ //
+ // == (rasterVertexDir dot n) / (abs(n.x) + abs(n.y)) * -.5 - .5
+ //
+ s->codeAppendf("float2 n = float2(%s.y - %s.y, %s.x - %s.x);",
+ rightPt, leftPt, leftPt, rightPt);
+ s->codeAppend ("float nwidth = abs(n.x) + abs(n.y);");
+ s->codeAppendf("float t = dot(%s, n);", rasterVertexDir);
+ // The below conditional guarantees we get exactly 1 on the divide when nwidth=t (in case the
+ // GPU divides by multiplying by the reciprocal?) It also guards against NaN when nwidth=0.
+ s->codeAppendf("%s = half(abs(t) != nwidth ? t / nwidth : sign(t)) * -.5 - .5;",
+ outputCoverage);
+}
+
+void GrCCCoverageProcessor::Shader::CalcEdgeCoveragesAtBloatVertices(GrGLSLVertexGeoBuilder* s,
+ const char* leftPt,
+ const char* rightPt,
+ const char* bloatDir1,
+ const char* bloatDir2,
+ const char* outputCoverages) {
+ // See comments in CalcEdgeCoverageAtBloatVertex.
+ s->codeAppendf("float2 n = float2(%s.y - %s.y, %s.x - %s.x);",
+ rightPt, leftPt, leftPt, rightPt);
+ s->codeAppend ("float nwidth = abs(n.x) + abs(n.y);");
+ s->codeAppendf("float2 t = n * float2x2(%s, %s);", bloatDir1, bloatDir2);
+ s->codeAppendf("for (int i = 0; i < 2; ++i) {");
+ s->codeAppendf( "%s[i] = half(abs(t[i]) != nwidth ? t[i] / nwidth : sign(t[i])) * -.5 - .5;",
+ outputCoverages);
+ s->codeAppendf("}");
+}
+
+void GrCCCoverageProcessor::Shader::CalcCornerAttenuation(GrGLSLVertexGeoBuilder* s,
+ const char* leftDir, const char* rightDir,
+ const char* outputAttenuation) {
+ // obtuseness = cos(corner_angle) if corner_angle > 90 degrees
+ // 0 if corner_angle <= 90 degrees
+ //
+ // NOTE: leftDir and rightDir are normalized and point in the same direction the path was
+ // defined with, i.e., leftDir points into the corner and rightDir points away from the corner.
+ s->codeAppendf("half obtuseness = max(half(dot(%s, %s)), 0);", leftDir, rightDir);
+
+ // axis_alignedness = 1 - tan(angle_to_nearest_axis_from_corner_bisector)
+ // (i.e., 1 when the corner bisector is aligned with the x- or y-axis
+ // 0 when the corner bisector falls on a 45 degree angle
+ // 0..1 when the corner bisector falls somewhere in between
+ s->codeAppendf("half2 abs_bisect_maybe_transpose = abs((0 == obtuseness) ? half2(%s - %s) : "
+ "half2(%s + %s));",
+ leftDir, rightDir, leftDir, rightDir);
+ s->codeAppend ("half axis_alignedness = "
+ "1 - min(abs_bisect_maybe_transpose.y, abs_bisect_maybe_transpose.x) / "
+ "max(abs_bisect_maybe_transpose.x, abs_bisect_maybe_transpose.y);");
+
+ // ninety_degreesness = sin^2(corner_angle)
+ // sin^2 just because... it's always positive and the results looked better than plain sine... ?
+ s->codeAppendf("half ninety_degreesness = determinant(half2x2(%s, %s));", leftDir, rightDir);
+ s->codeAppend ("ninety_degreesness = ninety_degreesness * ninety_degreesness;");
+
+ // The below formula is not smart. It was just arrived at by considering the following
+ // observations:
+ //
+ // 1. 90-degree, axis-aligned corners have full attenuation along the bisector.
+ // (i.e. coverage = 1 - distance_to_corner^2)
+ // (i.e. outputAttenuation = 0)
+ //
+ // 2. 180-degree corners always have zero attenuation.
+ // (i.e. coverage = 1 - distance_to_corner)
+ // (i.e. outputAttenuation = 1)
+ //
+ // 3. 90-degree corners whose bisector falls on a 45 degree angle also do not attenuate.
+ // (i.e. outputAttenuation = 1)
+ s->codeAppendf("%s = max(obtuseness, axis_alignedness * ninety_degreesness);",
+ outputAttenuation);
+}
+
+GrGLSLPrimitiveProcessor* GrCCCoverageProcessor::createGLSLInstance(const GrShaderCaps&) const {
+ std::unique_ptr<Shader> shader;
+ switch (fPrimitiveType) {
+ case PrimitiveType::kTriangles:
+ case PrimitiveType::kWeightedTriangles:
+ shader = skstd::make_unique<TriangleShader>();
+ break;
+ case PrimitiveType::kQuadratics:
+ shader = skstd::make_unique<GrCCQuadraticShader>();
+ break;
+ case PrimitiveType::kCubics:
+ shader = skstd::make_unique<GrCCCubicShader>();
+ break;
+ case PrimitiveType::kConics:
+ shader = skstd::make_unique<GrCCConicShader>();
+ break;
+ }
+ return this->onCreateGLSLInstance(std::move(shader));
+}
+
+void GrCCCoverageProcessor::draw(
+ GrOpFlushState* flushState, const GrPipeline& pipeline, const SkIRect scissorRects[],
+ const GrMesh meshes[], int meshCount, const SkRect& drawBounds) const {
+ GrPipeline::DynamicStateArrays dynamicStateArrays;
+ dynamicStateArrays.fScissorRects = scissorRects;
+ GrOpsRenderPass* renderPass = flushState->opsRenderPass();
+
+ GrProgramInfo programInfo(flushState->drawOpArgs().numSamples(),
+ flushState->drawOpArgs().origin(),
+ pipeline,
+ *this,
+ nullptr,
+ &dynamicStateArrays, 0);
+
+
+ renderPass->draw(programInfo, meshes, meshCount, drawBounds);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.h
new file mode 100644
index 0000000000..5fdb488ea5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCCoverageProcessor.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCCoverageProcessor_DEFINED
+#define GrCCCoverageProcessor_DEFINED
+
+#include "include/private/SkNx.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrGLSLFPFragmentBuilder;
+class GrGLSLVertexGeoBuilder;
+class GrMesh;
+class GrOpFlushState;
+
+/**
+ * This is the geometry processor for the simple convex primitive shapes (triangles and closed,
+ * convex bezier curves) from which ccpr paths are composed. The output is a single-channel alpha
+ * value, positive for clockwise shapes and negative for counter-clockwise, that indicates coverage.
+ *
+ * The caller is responsible to draw all primitives as produced by GrCCGeometry into a cleared,
+ * floating point, alpha-only render target using SkBlendMode::kPlus. Once all of a path's
+ * primitives have been drawn, the render target contains a composite coverage count that can then
+ * be used to draw the path (see GrCCPathProcessor).
+ *
+ * To draw primitives, use appendMesh() and draw() (defined below).
+ */
+class GrCCCoverageProcessor : public GrGeometryProcessor {
+public:
+ enum class PrimitiveType {
+ kTriangles,
+ kWeightedTriangles, // Triangles (from the tessellator) whose winding magnitude > 1.
+ kQuadratics,
+ kCubics,
+ kConics
+ };
+ static const char* PrimitiveTypeName(PrimitiveType);
+
+ // Defines a single primitive shape with 3 input points (i.e. Triangles and Quadratics).
+ // X,Y point values are transposed.
+ struct TriPointInstance {
+ float fValues[6];
+
+ enum class Ordering : bool {
+ kXYTransposed,
+ kXYInterleaved,
+ };
+
+ void set(const SkPoint[3], const Sk2f& translate, Ordering);
+ void set(const SkPoint&, const SkPoint&, const SkPoint&, const Sk2f& translate, Ordering);
+ void set(const Sk2f& P0, const Sk2f& P1, const Sk2f& P2, const Sk2f& translate, Ordering);
+ };
+
+ // Defines a single primitive shape with 4 input points, or 3 input points plus a "weight"
+ // parameter duplicated in both lanes of the 4th input (i.e. Cubics, Conics, and Triangles with
+ // a weighted winding number). X,Y point values are transposed.
+ struct QuadPointInstance {
+ float fX[4];
+ float fY[4];
+
+ void set(const SkPoint[4], float dx, float dy);
+ void setW(const SkPoint[3], const Sk2f& trans, float w);
+ void setW(const SkPoint&, const SkPoint&, const SkPoint&, const Sk2f& trans, float w);
+ void setW(const Sk2f& P0, const Sk2f& P1, const Sk2f& P2, const Sk2f& trans, float w);
+ };
+
+ virtual void reset(PrimitiveType, GrResourceProvider*) = 0;
+
+ PrimitiveType primitiveType() const { return fPrimitiveType; }
+
+ // Number of bezier points for curves, or 3 for triangles.
+ int numInputPoints() const { return PrimitiveType::kCubics == fPrimitiveType ? 4 : 3; }
+
+ bool isTriangles() const {
+ return PrimitiveType::kTriangles == fPrimitiveType ||
+ PrimitiveType::kWeightedTriangles == fPrimitiveType;
+ }
+
+ int hasInputWeight() const {
+ return PrimitiveType::kWeightedTriangles == fPrimitiveType ||
+ PrimitiveType::kConics == fPrimitiveType;
+ }
+
+ // GrPrimitiveProcessor overrides.
+ const char* name() const override { return PrimitiveTypeName(fPrimitiveType); }
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ return SkStringPrintf("%s\n%s", this->name(), this->INHERITED::dumpInfo().c_str());
+ }
+#endif
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ SkDEBUGCODE(this->getDebugBloatKey(b));
+ b->add32((int)fPrimitiveType);
+ }
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const final;
+
+#ifdef SK_DEBUG
+ // Increases the 1/2 pixel AA bloat by a factor of debugBloat.
+ void enableDebugBloat(float debugBloat) { fDebugBloat = debugBloat; }
+ bool debugBloatEnabled() const { return fDebugBloat > 0; }
+ float debugBloat() const { SkASSERT(this->debugBloatEnabled()); return fDebugBloat; }
+ void getDebugBloatKey(GrProcessorKeyBuilder* b) const {
+ uint32_t bloatBits;
+ memcpy(&bloatBits, &fDebugBloat, 4);
+ b->add32(bloatBits);
+ }
+#endif
+
+ // Appends a GrMesh that will draw the provided instances. The instanceBuffer must be an array
+ // of either TriPointInstance or QuadPointInstance, depending on this processor's RendererPass,
+ // with coordinates in the desired shape's final atlas-space position.
+ virtual void appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, SkTArray<GrMesh>* out) const = 0;
+
+ virtual void draw(GrOpFlushState*, const GrPipeline&, const SkIRect scissorRects[],
+ const GrMesh[], int meshCount, const SkRect& drawBounds) const;
+
+ // The Shader provides code to calculate each pixel's coverage in a RenderPass. It also
+ // provides details about shape-specific geometry.
+ class Shader {
+ public:
+ // Returns true if the Impl should not calculate the coverage argument for emitVaryings().
+ // If true, then "coverage" will have a signed magnitude of 1.
+ virtual bool calculatesOwnEdgeCoverage() const { return false; }
+
+ // Called before generating geometry. Subclasses may set up internal member variables during
+ // this time that will be needed during onEmitVaryings (e.g. transformation matrices).
+ //
+ // If the 'outHull4' parameter is provided, and there are not 4 input points, the subclass
+ // is required to fill it with the name of a 4-point hull around which the Impl can generate
+ // its geometry. If it is left unchanged, the Impl will use the regular input points.
+ virtual void emitSetupCode(
+ GrGLSLVertexGeoBuilder*, const char* pts, const char** outHull4 = nullptr) const {
+ SkASSERT(!outHull4);
+ }
+
+ void emitVaryings(
+ GrGLSLVaryingHandler* varyingHandler, GrGLSLVarying::Scope scope, SkString* code,
+ const char* position, const char* coverage, const char* cornerCoverage,
+ const char* wind) {
+ SkASSERT(GrGLSLVarying::Scope::kVertToGeo != scope);
+ this->onEmitVaryings(
+ varyingHandler, scope, code, position, coverage, cornerCoverage, wind);
+ }
+
+ // Writes the signed coverage value at the current pixel to "outputCoverage".
+ virtual void emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder*, const char* outputCoverage) const = 0;
+
+ // Assigns the built-in sample mask at the current pixel.
+ virtual void emitSampleMaskCode(GrGLSLFPFragmentBuilder*) const = 0;
+
+ // Calculates the winding direction of the input points (+1, -1, or 0). Wind for extremely
+ // thin triangles gets rounded to zero.
+ static void CalcWind(const GrCCCoverageProcessor&, GrGLSLVertexGeoBuilder*, const char* pts,
+ const char* outputWind);
+
+ // Calculates an edge's coverage at a conservative raster vertex. The edge is defined by two
+ // clockwise-ordered points, 'leftPt' and 'rightPt'. 'rasterVertexDir' is a pair of +/-1
+ // values that point in the direction of conservative raster bloat, starting from an
+ // endpoint.
+ //
+ // Coverage values ramp from -1 (completely outside the edge) to 0 (completely inside).
+ static void CalcEdgeCoverageAtBloatVertex(GrGLSLVertexGeoBuilder*, const char* leftPt,
+ const char* rightPt, const char* rasterVertexDir,
+ const char* outputCoverage);
+
+ // Calculates an edge's coverage at two conservative raster vertices.
+ // (See CalcEdgeCoverageAtBloatVertex).
+ static void CalcEdgeCoveragesAtBloatVertices(GrGLSLVertexGeoBuilder*, const char* leftPt,
+ const char* rightPt, const char* bloatDir1,
+ const char* bloatDir2,
+ const char* outputCoverages);
+
+ // Corner boxes require an additional "attenuation" varying that is multiplied by the
+ // regular (linearly-interpolated) coverage. This function calculates the attenuation value
+ // to use in the single, outermost vertex. The remaining three vertices of the corner box
+ // all use an attenuation value of 1.
+ static void CalcCornerAttenuation(GrGLSLVertexGeoBuilder*, const char* leftDir,
+ const char* rightDir, const char* outputAttenuation);
+
+ virtual ~Shader() {}
+
+ protected:
+ // Here the subclass adds its internal varyings to the handler and produces code to
+ // initialize those varyings from a given position and coverage values.
+ //
+ // NOTE: the coverage values are signed appropriately for wind.
+ // 'coverage' will only be +1 or -1 on curves.
+ virtual void onEmitVaryings(
+ GrGLSLVaryingHandler*, GrGLSLVarying::Scope, SkString* code, const char* position,
+ const char* coverage, const char* cornerCoverage, const char* wind) = 0;
+
+ // Returns the name of a Shader's internal varying at the point where where its value is
+ // assigned. This is intended to work whether called for a vertex or a geometry shader.
+ const char* OutName(const GrGLSLVarying& varying) const {
+ using Scope = GrGLSLVarying::Scope;
+ SkASSERT(Scope::kVertToGeo != varying.scope());
+ return Scope::kGeoToFrag == varying.scope() ? varying.gsOut() : varying.vsOut();
+ }
+
+ // Our friendship with GrGLSLShaderBuilder does not propagate to subclasses.
+ inline static SkString& AccessCodeString(GrGLSLShaderBuilder* s) { return s->code(); }
+ };
+
+protected:
+ // Slightly undershoot a bloat radius of 0.5 so vertices that fall on integer boundaries don't
+ // accidentally bleed into neighbor pixels.
+ static constexpr float kAABloatRadius = 0.491111f;
+
+ GrCCCoverageProcessor(ClassID classID) : INHERITED(classID) {}
+
+ virtual GrGLSLPrimitiveProcessor* onCreateGLSLInstance(std::unique_ptr<Shader>) const = 0;
+
+ // Our friendship with GrGLSLShaderBuilder does not propagate to subclasses.
+ inline static SkString& AccessCodeString(GrGLSLShaderBuilder* s) { return s->code(); }
+
+ PrimitiveType fPrimitiveType;
+ SkDEBUGCODE(float fDebugBloat = 0);
+
+ class TriangleShader;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+inline const char* GrCCCoverageProcessor::PrimitiveTypeName(PrimitiveType type) {
+ switch (type) {
+ case PrimitiveType::kTriangles: return "kTriangles";
+ case PrimitiveType::kWeightedTriangles: return "kWeightedTriangles";
+ case PrimitiveType::kQuadratics: return "kQuadratics";
+ case PrimitiveType::kCubics: return "kCubics";
+ case PrimitiveType::kConics: return "kConics";
+ }
+ SK_ABORT("Invalid PrimitiveType");
+}
+
+inline void GrCCCoverageProcessor::TriPointInstance::set(
+ const SkPoint p[3], const Sk2f& translate, Ordering ordering) {
+ this->set(p[0], p[1], p[2], translate, ordering);
+}
+
+inline void GrCCCoverageProcessor::TriPointInstance::set(
+ const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, const Sk2f& translate,
+ Ordering ordering) {
+ Sk2f P0 = Sk2f::Load(&p0);
+ Sk2f P1 = Sk2f::Load(&p1);
+ Sk2f P2 = Sk2f::Load(&p2);
+ this->set(P0, P1, P2, translate, ordering);
+}
+
+inline void GrCCCoverageProcessor::TriPointInstance::set(
+ const Sk2f& P0, const Sk2f& P1, const Sk2f& P2, const Sk2f& translate, Ordering ordering) {
+ if (Ordering::kXYTransposed == ordering) {
+ Sk2f::Store3(fValues, P0 + translate, P1 + translate, P2 + translate);
+ } else {
+ (P0 + translate).store(fValues);
+ (P1 + translate).store(fValues + 2);
+ (P2 + translate).store(fValues + 4);
+ }
+}
+
+inline void GrCCCoverageProcessor::QuadPointInstance::set(const SkPoint p[4], float dx, float dy) {
+ Sk4f X,Y;
+ Sk4f::Load2(p, &X, &Y);
+ (X + dx).store(&fX);
+ (Y + dy).store(&fY);
+}
+
+inline void GrCCCoverageProcessor::QuadPointInstance::setW(const SkPoint p[3], const Sk2f& trans,
+ float w) {
+ this->setW(p[0], p[1], p[2], trans, w);
+}
+
+inline void GrCCCoverageProcessor::QuadPointInstance::setW(const SkPoint& p0, const SkPoint& p1,
+ const SkPoint& p2, const Sk2f& trans,
+ float w) {
+ Sk2f P0 = Sk2f::Load(&p0);
+ Sk2f P1 = Sk2f::Load(&p1);
+ Sk2f P2 = Sk2f::Load(&p2);
+ this->setW(P0, P1, P2, trans, w);
+}
+
+inline void GrCCCoverageProcessor::QuadPointInstance::setW(const Sk2f& P0, const Sk2f& P1,
+ const Sk2f& P2, const Sk2f& trans,
+ float w) {
+ Sk2f W = Sk2f(w);
+ Sk2f::Store4(this, P0 + trans, P1 + trans, P2 + trans, W);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.cpp
new file mode 100644
index 0000000000..aff20194e5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCCubicShader.h"
+
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+using Shader = GrCCCoverageProcessor::Shader;
+
+void GrCCCubicShader::emitSetupCode(
+ GrGLSLVertexGeoBuilder* s, const char* pts, const char** /*outHull4*/) const {
+ // Find the cubic's power basis coefficients.
+ s->codeAppendf("float2x4 C = float4x4(-1, 3, -3, 1, "
+ " 3, -6, 3, 0, "
+ "-3, 3, 0, 0, "
+ " 1, 0, 0, 0) * transpose(%s);", pts);
+
+ // Find the cubic's inflection function.
+ s->codeAppend ("float D3 = +determinant(float2x2(C[0].yz, C[1].yz));");
+ s->codeAppend ("float D2 = -determinant(float2x2(C[0].xz, C[1].xz));");
+ s->codeAppend ("float D1 = +determinant(float2x2(C));");
+
+ // Shift the exponents in D so the largest magnitude falls somewhere in 1..2. This protects us
+ // from overflow while solving for roots and KLM functionals.
+ s->codeAppend ("float Dmax = max(max(abs(D1), abs(D2)), abs(D3));");
+ s->codeAppend ("float norm;");
+ if (s->getProgramBuilder()->shaderCaps()->fpManipulationSupport()) {
+ s->codeAppend ("int exp;");
+ s->codeAppend ("frexp(Dmax, exp);");
+ s->codeAppend ("norm = ldexp(1, 1 - exp);");
+ } else {
+ s->codeAppend ("norm = 1/Dmax;"); // Dmax will not be 0 because we cull line cubics on CPU.
+ }
+ s->codeAppend ("D3 *= norm;");
+ s->codeAppend ("D2 *= norm;");
+ s->codeAppend ("D1 *= norm;");
+
+ // Calculate the KLM matrix.
+ s->declareGlobal(fKLMMatrix);
+ s->codeAppend ("float discr = 3*D2*D2 - 4*D1*D3;");
+ s->codeAppend ("float x = discr >= 0 ? 3 : 1;");
+ s->codeAppend ("float q = sqrt(x * abs(discr));");
+ s->codeAppend ("q = x*D2 + (D2 >= 0 ? q : -q);");
+
+ s->codeAppend ("float2 l, m;");
+ s->codeAppend ("l.ts = float2(q, 2*x * D1);");
+ s->codeAppend ("m.ts = float2(2, q) * (discr >= 0 ? float2(D3, 1) "
+ ": float2(D2*D2 - D3*D1, D1));");
+
+ s->codeAppend ("float4 K;");
+ s->codeAppend ("float4 lm = l.sstt * m.stst;");
+ s->codeAppend ("K = float4(0, lm.x, -lm.y - lm.z, lm.w);");
+
+ s->codeAppend ("float4 L, M;");
+ s->codeAppend ("lm.yz += 2*lm.zy;");
+ s->codeAppend ("L = float4(-1,x,-x,1) * l.sstt * (discr >= 0 ? l.ssst * l.sttt : lm);");
+ s->codeAppend ("M = float4(-1,x,-x,1) * m.sstt * (discr >= 0 ? m.ssst * m.sttt : lm.xzyw);");
+
+ s->codeAppend ("int middlerow = abs(D2) > abs(D1) ? 2 : 1;");
+ s->codeAppend ("float3x3 CI = inverse(float3x3(C[0][0], C[0][middlerow], C[0][3], "
+ "C[1][0], C[1][middlerow], C[1][3], "
+ " 0, 0, 1));");
+ s->codeAppendf("%s = CI * float3x3(K[0], K[middlerow], K[3], "
+ "L[0], L[middlerow], L[3], "
+ "M[0], M[middlerow], M[3]);", fKLMMatrix.c_str());
+
+ // Evaluate the cubic at T=.5 for a mid-ish point.
+ s->codeAppendf("float2 midpoint = %s * float4(.125, .375, .375, .125);", pts);
+
+ // Orient the KLM matrix so L & M are both positive on the side of the curve we wish to fill.
+ s->codeAppendf("float2 orientation = sign(float3(midpoint, 1) * float2x3(%s[1], %s[2]));",
+ fKLMMatrix.c_str(), fKLMMatrix.c_str());
+ s->codeAppendf("%s *= float3x3(orientation[0] * orientation[1], 0, 0, "
+ "0, orientation[0], 0, "
+ "0, 0, orientation[1]);", fKLMMatrix.c_str());
+}
+
+void GrCCCubicShader::onEmitVaryings(
+ GrGLSLVaryingHandler* varyingHandler, GrGLSLVarying::Scope scope, SkString* code,
+ const char* position, const char* coverage, const char* cornerCoverage, const char* wind) {
+ code->appendf("float3 klm = float3(%s, 1) * %s;", position, fKLMMatrix.c_str());
+ if (coverage) {
+ fKLM_fEdge.reset(kFloat4_GrSLType, scope);
+ varyingHandler->addVarying("klm_and_edge", &fKLM_fEdge);
+ // Give L&M both the same sign as wind, in order to pass this value to the fragment shader.
+ // (Cubics are pre-chopped such that L&M do not change sign within any individual segment.)
+ code->appendf("%s.xyz = klm * float3(1, %s, %s);", OutName(fKLM_fEdge), wind, wind);
+ // Flat edge opposite the curve.
+ code->appendf("%s.w = %s;", OutName(fKLM_fEdge), coverage);
+ } else {
+ fKLM_fEdge.reset(kFloat3_GrSLType, scope);
+ varyingHandler->addVarying("klm", &fKLM_fEdge);
+ code->appendf("%s = klm;", OutName(fKLM_fEdge));
+ }
+
+ fGradMatrix.reset(kFloat4_GrSLType, scope);
+ varyingHandler->addVarying("grad_matrix", &fGradMatrix);
+ code->appendf("%s.xy = 2*bloat * 3 * klm[0] * %s[0].xy;",
+ OutName(fGradMatrix), fKLMMatrix.c_str());
+ code->appendf("%s.zw = -2*bloat * (klm[1] * %s[2].xy + klm[2] * %s[1].xy);",
+ OutName(fGradMatrix), fKLMMatrix.c_str(), fKLMMatrix.c_str());
+
+ if (cornerCoverage) {
+ SkASSERT(coverage);
+ code->appendf("half hull_coverage; {");
+ this->calcHullCoverage(code, OutName(fKLM_fEdge), OutName(fGradMatrix), "hull_coverage");
+ code->appendf("}");
+ fCornerCoverage.reset(kHalf2_GrSLType, scope);
+ varyingHandler->addVarying("corner_coverage", &fCornerCoverage);
+ code->appendf("%s = half2(hull_coverage, 1) * %s;",
+ OutName(fCornerCoverage), cornerCoverage);
+ }
+}
+
+void GrCCCubicShader::emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder* f, const char* outputCoverage) const {
+ this->calcHullCoverage(
+ &AccessCodeString(f), fKLM_fEdge.fsIn(), fGradMatrix.fsIn(), outputCoverage);
+
+ // Wind is the sign of both L and/or M. Take the sign of whichever has the larger magnitude.
+ // (In reality, either would be fine because we chop cubics with more than a half pixel of
+ // padding around the L & M lines, so neither should approach zero.)
+ f->codeAppend ("half wind = sign(half(l + m));");
+ f->codeAppendf("%s *= wind;", outputCoverage);
+
+ if (fCornerCoverage.fsIn()) {
+ f->codeAppendf("%s = %s.x * %s.y + %s;", // Attenuated corner coverage.
+ outputCoverage, fCornerCoverage.fsIn(), fCornerCoverage.fsIn(),
+ outputCoverage);
+ }
+}
+
+void GrCCCubicShader::calcHullCoverage(SkString* code, const char* klmAndEdge,
+ const char* gradMatrix, const char* outputCoverage) const {
+ code->appendf("float k = %s.x, l = %s.y, m = %s.z;", klmAndEdge, klmAndEdge, klmAndEdge);
+ code->append ("float f = k*k*k - l*m;");
+ code->appendf("float2 grad = %s.xy * k + %s.zw;", gradMatrix, gradMatrix);
+ code->append ("float fwidth = abs(grad.x) + abs(grad.y);");
+ code->appendf("float curve_coverage = min(0.5 - f/fwidth, 1);");
+ // Flat edge opposite the curve.
+ code->appendf("float edge_coverage = min(%s.w, 0);", klmAndEdge);
+ // Total hull coverage.
+ code->appendf("%s = max(half(curve_coverage + edge_coverage), 0);", outputCoverage);
+}
+
+void GrCCCubicShader::emitSampleMaskCode(GrGLSLFPFragmentBuilder* f) const {
+ f->codeAppendf("float k = %s.x, l = %s.y, m = %s.z;",
+ fKLM_fEdge.fsIn(), fKLM_fEdge.fsIn(), fKLM_fEdge.fsIn());
+ f->codeAppendf("float f = k*k*k - l*m;");
+ f->codeAppendf("float2x2 grad_matrix = float2x2(%s);", fGradMatrix.fsIn());
+ f->codeAppendf("float2 grad = grad_matrix * float2(k, 1);");
+ f->applyFnToMultisampleMask("f", "grad", GrGLSLFPFragmentBuilder::ScopeFlags::kTopLevel);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.h b/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.h
new file mode 100644
index 0000000000..723c21ea16
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCCubicShader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCCubicShader_DEFINED
+#define GrCCCubicShader_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class renders the coverage of convex closed cubic segments using the techniques outlined in
+ * "Resolution Independent Curve Rendering using Programmable Graphics Hardware" by Charles Loop and
+ * Jim Blinn:
+ *
+ * https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ *
+ * The provided curve segments must be convex, monotonic with respect to the vector of their closing
+ * edge [P3 - P0], and must not contain or be near any inflection points or loop intersections.
+ * (Use GrCCGeometry::cubicTo().)
+ */
+class GrCCCubicShader : public GrCCCoverageProcessor::Shader {
+public:
+ void emitSetupCode(
+ GrGLSLVertexGeoBuilder*, const char* pts, const char** outHull4) const override;
+
+ void onEmitVaryings(
+ GrGLSLVaryingHandler*, GrGLSLVarying::Scope, SkString* code, const char* position,
+ const char* coverage, const char* cornerCoverage, const char* wind) override;
+
+ void emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder*, const char* outputCoverage) const override;
+
+ void emitSampleMaskCode(GrGLSLFPFragmentBuilder*) const override;
+
+private:
+ void calcHullCoverage(SkString* code, const char* klmAndEdge, const char* gradMatrix,
+ const char* outputCoverage) const;
+
+ const GrShaderVar fKLMMatrix{"klm_matrix", kFloat3x3_GrSLType};
+ GrGLSLVarying fKLM_fEdge;
+ GrGLSLVarying fGradMatrix;
+ GrGLSLVarying fCornerCoverage;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.cpp
new file mode 100644
index 0000000000..06556c493d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -0,0 +1,461 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCDrawPathsOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/ccpr/GrCCPathCache.h"
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include "src/gpu/ccpr/GrOctoBounds.h"
+
+static bool has_coord_transforms(const GrPaint& paint) {
+ GrFragmentProcessor::Iter iter(paint);
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ if (!fp->coordTransforms().empty()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(
+ GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
+ const GrShape& shape, GrPaint&& paint) {
+ SkRect conservativeDevBounds;
+ m.mapRect(&conservativeDevBounds, shape.bounds());
+
+ const SkStrokeRec& stroke = shape.style().strokeRec();
+ float strokeDevWidth = 0;
+ float conservativeInflationRadius = 0;
+ if (!stroke.isFillStyle()) {
+ strokeDevWidth = GrCoverageCountingPathRenderer::GetStrokeDevWidth(
+ m, stroke, &conservativeInflationRadius);
+ conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
+ }
+
+ std::unique_ptr<GrCCDrawPathsOp> op;
+ float conservativeSize = SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width());
+ if (conservativeSize > GrCoverageCountingPathRenderer::kPathCropThreshold) {
+ // The path is too large. Crop it or analytic AA can run out of fp32 precision.
+ SkPath croppedDevPath;
+ shape.asPath(&croppedDevPath);
+ croppedDevPath.transform(m, &croppedDevPath);
+
+ SkIRect cropBox = clipIBounds;
+ GrShape croppedDevShape;
+ if (stroke.isFillStyle()) {
+ GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
+ croppedDevShape = GrShape(croppedDevPath);
+ conservativeDevBounds = croppedDevShape.bounds();
+ } else {
+ int r = SkScalarCeilToInt(conservativeInflationRadius);
+ cropBox.outset(r, r);
+ GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
+ SkStrokeRec devStroke = stroke;
+ devStroke.setStrokeStyle(strokeDevWidth);
+ croppedDevShape = GrShape(croppedDevPath, GrStyle(devStroke, nullptr));
+ conservativeDevBounds = croppedDevPath.getBounds();
+ conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
+ }
+
+ // FIXME: This breaks local coords: http://skbug.com/8003
+ return InternalMake(context, clipIBounds, SkMatrix::I(), croppedDevShape, strokeDevWidth,
+ conservativeDevBounds, std::move(paint));
+ }
+
+ return InternalMake(context, clipIBounds, m, shape, strokeDevWidth, conservativeDevBounds,
+ std::move(paint));
+}
+
+std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::InternalMake(
+ GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
+ const GrShape& shape, float strokeDevWidth, const SkRect& conservativeDevBounds,
+ GrPaint&& paint) {
+ // The path itself should have been cropped if larger than kPathCropThreshold. If it had a
+ // stroke, that would have further inflated its draw bounds.
+ SkASSERT(SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width()) <
+ GrCoverageCountingPathRenderer::kPathCropThreshold +
+ GrCoverageCountingPathRenderer::kMaxBoundsInflationFromStroke*2 + 1);
+
+ SkIRect shapeConservativeIBounds;
+ conservativeDevBounds.roundOut(&shapeConservativeIBounds);
+
+ SkIRect maskDevIBounds;
+ if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
+ return nullptr;
+ }
+
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+ return pool->allocate<GrCCDrawPathsOp>(m, shape, strokeDevWidth, shapeConservativeIBounds,
+ maskDevIBounds, conservativeDevBounds, std::move(paint));
+}
+
+GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
+ const SkIRect& shapeConservativeIBounds,
+ const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
+ GrPaint&& paint)
+ : GrDrawOp(ClassID())
+ , fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
+ , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
+ paint.getColor4f())
+ , fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
+ SkDEBUGCODE(fBaseInstance = -1);
+ // If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
+ // since it eliminates the need for scissor when drawing to the main canvas.
+ // FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
+ // give the opsTask, as well as enabling threaded parsing when using DDL.
+ SkRect clippedDrawBounds;
+ if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
+ clippedDrawBounds.setEmpty();
+ }
+ // We always have AA bloat, even in MSAA atlas mode. This is because by the time this Op comes
+ // along and draws to the main canvas, the atlas has been resolved to analytic coverage.
+ this->setBounds(clippedDrawBounds, GrOp::HasAABloat::kYes, GrOp::IsHairline::kNo);
+}
+
+GrCCDrawPathsOp::~GrCCDrawPathsOp() {
+ if (fOwningPerOpsTaskPaths) {
+ // Remove the list's dangling pointer to this Op before deleting it.
+ fOwningPerOpsTaskPaths->fDrawOps.remove(this);
+ }
+}
+
+GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrShape& shape,
+ float strokeDevWidth,
+ const SkIRect& shapeConservativeIBounds,
+ const SkIRect& maskDevIBounds, const SkPMColor4f& color)
+ : fMatrix(m)
+ , fShape(shape)
+ , fStrokeDevWidth(strokeDevWidth)
+ , fShapeConservativeIBounds(shapeConservativeIBounds)
+ , fMaskDevIBounds(maskDevIBounds)
+ , fColor(color) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (fShape.hasUnstyledKey()) {
+ // On AOSP we round view matrix translates to integer values for cachable paths. We do this
+ // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
+ fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
+ fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
+ }
+#endif
+}
+
+GrProcessorSet::Analysis GrCCDrawPathsOp::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ SkASSERT(1 == fNumDraws); // There should only be one single path draw in this Op right now.
+ return fDraws.head().finalize(caps, clip, hasMixedSampledCoverage, clampType, &fProcessors);
+}
+
+GrProcessorSet::Analysis GrCCDrawPathsOp::SingleDraw::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, GrClampType
+ clampType, GrProcessorSet* processors) {
+ const GrProcessorSet::Analysis& analysis = processors->finalize(
+ fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
+ &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps, clampType, &fColor);
+
+ // Lines start looking jagged when they get thinner than 1px. For thin strokes it looks better
+ // if we can convert them to hairline (i.e., inflate the stroke width to 1px), and instead
+ // reduce the opacity to create the illusion of thin-ness. This strategy also helps reduce
+ // artifacts from coverage dilation when there are self intersections.
+ if (analysis.isCompatibleWithCoverageAsAlpha() &&
+ !fShape.style().strokeRec().isFillStyle() && fStrokeDevWidth < 1) {
+ // Modifying the shape affects its cache key. The draw can't have a cache entry yet or else
+ // our next step would invalidate it.
+ SkASSERT(!fCacheEntry);
+ SkASSERT(SkStrokeRec::kStroke_Style == fShape.style().strokeRec().getStyle());
+
+ SkPath path;
+ fShape.asPath(&path);
+
+ // Create a hairline version of our stroke.
+ SkStrokeRec hairlineStroke = fShape.style().strokeRec();
+ hairlineStroke.setStrokeStyle(0);
+
+ // How transparent does a 1px stroke have to be in order to appear as thin as the real one?
+ float coverage = fStrokeDevWidth;
+
+ fShape = GrShape(path, GrStyle(hairlineStroke, nullptr));
+ fStrokeDevWidth = 1;
+
+ // fShapeConservativeIBounds already accounted for this possibility of inflating the stroke.
+ fColor = fColor * coverage;
+ }
+
+ return analysis;
+}
+
+GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
+ GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
+ SkASSERT(fOwningPerOpsTaskPaths);
+ SkASSERT(fNumDraws);
+ SkASSERT(!that->fOwningPerOpsTaskPaths ||
+ that->fOwningPerOpsTaskPaths == fOwningPerOpsTaskPaths);
+ SkASSERT(that->fNumDraws);
+
+ if (fProcessors != that->fProcessors ||
+ fViewMatrixIfUsingLocalCoords != that->fViewMatrixIfUsingLocalCoords) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fDraws.append(std::move(that->fDraws), &fOwningPerOpsTaskPaths->fAllocator);
+
+ SkDEBUGCODE(fNumDraws += that->fNumDraws);
+ SkDEBUGCODE(that->fNumDraws = 0);
+ return CombineResult::kMerged;
+}
+
+void GrCCDrawPathsOp::addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths) {
+ SkASSERT(1 == fNumDraws);
+ SkASSERT(!fOwningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths = std::move(owningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths->fDrawOps.addToTail(this);
+}
+
+void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
+ GrOnFlushResourceProvider* onFlushRP,
+ GrCCPerFlushResourceSpecs* specs) {
+ for (SingleDraw& draw : fDraws) {
+ draw.accountForOwnPath(pathCache, onFlushRP, specs);
+ }
+}
+
+void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
+ GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
+ GrCCPerFlushResourceSpecs* specs) {
+ using CoverageType = GrCCAtlas::CoverageType;
+
+ SkPath path;
+ fShape.asPath(&path);
+
+ SkASSERT(!fCacheEntry);
+
+ if (pathCache) {
+ fCacheEntry = pathCache->find(
+ onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
+ }
+
+ if (fCacheEntry) {
+ if (const GrCCCachedAtlas* cachedAtlas = fCacheEntry->cachedAtlas()) {
+ SkASSERT(cachedAtlas->getOnFlushProxy());
+ if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
+ ++specs->fNumCachedPaths;
+ } else {
+ // Suggest that this path be copied to a literal coverage atlas, to save memory.
+ // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
+ int idx = (fShape.style().strokeRec().isFillStyle())
+ ? GrCCPerFlushResourceSpecs::kFillIdx
+ : GrCCPerFlushResourceSpecs::kStrokeIdx;
+ ++specs->fNumCopiedPaths[idx];
+ specs->fCopyPathStats[idx].statPath(path);
+ specs->fCopyAtlasSpecs.accountForSpace(fCacheEntry->width(), fCacheEntry->height());
+ fDoCopyToA8Coverage = true;
+ }
+ return;
+ }
+
+ if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
+ fDoCachePathMask = true;
+ // We don't cache partial masks; ensure the bounds include the entire path.
+ fMaskDevIBounds = fShapeConservativeIBounds;
+ }
+ }
+
+ // Plan on rendering this path in a new atlas.
+ int idx = (fShape.style().strokeRec().isFillStyle())
+ ? GrCCPerFlushResourceSpecs::kFillIdx
+ : GrCCPerFlushResourceSpecs::kStrokeIdx;
+ ++specs->fNumRenderedPaths[idx];
+ specs->fRenderedPathStats[idx].statPath(path);
+ specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
+ SkDEBUGCODE(fWasCountedAsRender = true);
+}
+
+bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
+ SkASSERT(fCacheEntry);
+ SkASSERT(!fCacheEntry->cachedAtlas());
+ if (fCacheEntry->hitCount() <= 1) {
+ return false; // Don't cache a path mask until at least its second hit.
+ }
+
+ int shapeMaxDimension = SkTMax(
+ fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
+ if (shapeMaxDimension > maxRenderTargetSize) {
+ return false; // This path isn't cachable.
+ }
+
+ int64_t shapeArea = sk_64_mul(
+ fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
+ if (shapeArea < 100*100) {
+ // If a path is small enough, we might as well try to render and cache the entire thing, no
+ // matter how much of it is actually visible.
+ return true;
+ }
+
+ // The hitRect should already be contained within the shape's bounds, but we still intersect it
+ // because it's possible for edges very near pixel boundaries (e.g., 0.999999), to round out
+ // inconsistently, depending on the integer translation values and fp32 precision.
+ SkIRect hitRect = fCacheEntry->hitRect().makeOffset(fCachedMaskShift);
+ hitRect.intersect(fShapeConservativeIBounds);
+
+ // Render and cache the entire path mask if we see enough of it to justify rendering all the
+ // pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
+ // past, and in this particular draw we must see at least 10% of it.
+ int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
+ int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
+ return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
+}
+
+void GrCCDrawPathsOp::setupResources(
+ GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
+ GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
+ SkASSERT(fNumDraws > 0);
+ SkASSERT(-1 == fBaseInstance);
+ fBaseInstance = resources->nextPathInstanceIdx();
+
+ for (SingleDraw& draw : fDraws) {
+ draw.setupResources(pathCache, onFlushRP, resources, doCopies, this);
+ }
+
+ if (!fInstanceRanges.empty()) {
+ fInstanceRanges.back().fEndInstanceIdx = resources->nextPathInstanceIdx();
+ }
+}
+
+void GrCCDrawPathsOp::SingleDraw::setupResources(
+ GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
+ GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies, GrCCDrawPathsOp* op) {
+ SkPath path;
+ fShape.asPath(&path);
+
+ auto fillRule = (fShape.style().strokeRec().isFillStyle())
+ ? GrFillRuleForSkPath(path)
+ : GrFillRule::kNonzero;
+
+ if (fCacheEntry) {
+ // Does the path already exist in a cached atlas texture?
+ if (fCacheEntry->cachedAtlas()) {
+ SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
+ if (DoCopiesToA8Coverage::kYes == doCopies && fDoCopyToA8Coverage) {
+ resources->upgradeEntryToLiteralCoverageAtlas(
+ pathCache, onFlushRP, fCacheEntry.get(), fillRule);
+ SkASSERT(fCacheEntry->cachedAtlas());
+ SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage
+ == fCacheEntry->cachedAtlas()->coverageType());
+ SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
+ }
+#if 0
+ // Simple color manipulation to visualize cached paths.
+ fColor = (GrCCAtlas::CoverageType::kA8_LiteralCoverage
+ == fCacheEntry->cachedAtlas()->coverageType())
+ ? SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
+#endif
+ auto coverageMode = GrCCPathProcessor::GetCoverageMode(
+ fCacheEntry->cachedAtlas()->coverageType());
+ op->recordInstance(coverageMode, fCacheEntry->cachedAtlas()->getOnFlushProxy(),
+ resources->nextPathInstanceIdx());
+ resources->appendDrawPathInstance().set(
+ *fCacheEntry, fCachedMaskShift, SkPMColor4f_toFP16(fColor), fillRule);
+#ifdef SK_DEBUG
+ if (fWasCountedAsRender) {
+ // A path mask didn't exist for this path at the beginning of flush, but we have one
+ // now. What this means is that we've drawn the same path multiple times this flush.
+ // Let the resources know that we reused one for their internal debug counters.
+ resources->debugOnly_didReuseRenderedPath();
+ }
+#endif
+ return;
+ }
+ }
+
+ // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
+ // bounding boxes: One in device space, as well as a second one rotated an additional 45
+ // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
+ // circumscribes the path.
+ GrOctoBounds octoBounds;
+ SkIRect devIBounds;
+ SkIVector devToAtlasOffset;
+ if (auto atlas = resources->renderShapeInAtlas(
+ fMaskDevIBounds, fMatrix, fShape, fStrokeDevWidth, &octoBounds, &devIBounds,
+ &devToAtlasOffset)) {
+ auto coverageMode = GrCCPathProcessor::GetCoverageMode(
+ resources->renderedPathCoverageType());
+ op->recordInstance(coverageMode, atlas->textureProxy(), resources->nextPathInstanceIdx());
+ resources->appendDrawPathInstance().set(
+ octoBounds, devToAtlasOffset, SkPMColor4f_toFP16(fColor), fillRule);
+
+ if (fDoCachePathMask) {
+ SkASSERT(fCacheEntry);
+ SkASSERT(!fCacheEntry->cachedAtlas());
+ SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
+ fCacheEntry->setCoverageCountAtlas(
+ onFlushRP, atlas, devToAtlasOffset, octoBounds, devIBounds, fCachedMaskShift);
+ }
+ }
+}
+
+inline void GrCCDrawPathsOp::recordInstance(
+ GrCCPathProcessor::CoverageMode coverageMode, GrTextureProxy* atlasProxy, int instanceIdx) {
+ if (fInstanceRanges.empty()) {
+ fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
+ } else if (fInstanceRanges.back().fAtlasProxy != atlasProxy) {
+ fInstanceRanges.back().fEndInstanceIdx = instanceIdx;
+ fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
+ }
+ SkASSERT(fInstanceRanges.back().fCoverageMode == coverageMode);
+ SkASSERT(fInstanceRanges.back().fAtlasProxy == atlasProxy);
+}
+
+void GrCCDrawPathsOp::onPrepare(GrOpFlushState* flushState) {
+ // The CCPR ops don't know their atlas textures until after the preFlush calls have been
+ // executed at the start GrDrawingManger::flush. Thus the proxies are not added during the
+ // normal visitProxies calls doing addDrawOp. Therefore, the atlas proxies are added now.
+ for (const InstanceRange& range : fInstanceRanges) {
+ flushState->sampledProxyArray()->push_back(range.fAtlasProxy);
+ }
+}
+
+void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ SkASSERT(fOwningPerOpsTaskPaths);
+
+ const GrCCPerFlushResources* resources = fOwningPerOpsTaskPaths->fFlushResources.get();
+ if (!resources) {
+ return; // Setup failed.
+ }
+
+ GrPipeline::InitArgs initArgs;
+ initArgs.fCaps = &flushState->caps();
+ initArgs.fDstProxy = flushState->drawOpArgs().dstProxy();
+ initArgs.fOutputSwizzle = flushState->drawOpArgs().outputSwizzle();
+ auto clip = flushState->detachAppliedClip();
+ GrPipeline::FixedDynamicState fixedDynamicState(clip.scissorState().rect());
+ GrPipeline pipeline(initArgs, std::move(fProcessors), std::move(clip));
+
+ int baseInstance = fBaseInstance;
+ SkASSERT(baseInstance >= 0); // Make sure setupResources() has been called.
+
+ for (const InstanceRange& range : fInstanceRanges) {
+ SkASSERT(range.fEndInstanceIdx > baseInstance);
+
+ const GrTextureProxy* atlas = range.fAtlasProxy;
+ SkASSERT(atlas->isInstantiated());
+
+ GrCCPathProcessor pathProc(
+ range.fCoverageMode, atlas->peekTexture(), atlas->textureSwizzle(), atlas->origin(),
+ fViewMatrixIfUsingLocalCoords);
+ GrTextureProxy* atlasProxy = range.fAtlasProxy;
+ fixedDynamicState.fPrimitiveProcessorTextures = &atlasProxy;
+ pathProc.drawPaths(flushState, pipeline, &fixedDynamicState, *resources, baseInstance,
+ range.fEndInstanceIdx, this->bounds());
+
+ baseInstance = range.fEndInstanceIdx;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.h b/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.h
new file mode 100644
index 0000000000..2a63953326
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCDrawPathsOp_DEFINED
+#define GrCCDrawPathsOp_DEFINED
+
+#include "src/core/SkTInternalLList.h"
+#include "src/gpu/ccpr/GrCCPathCache.h"
+#include "src/gpu/ccpr/GrCCSTLList.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrCCAtlas;
+class GrCCPerFlushResources;
+struct GrCCPerFlushResourceSpecs;
+struct GrCCPerOpsTaskPaths;
+class GrOnFlushResourceProvider;
+class GrRecordingContext;
+
+/**
+ * This is the Op that draws paths to the actual canvas, using atlases generated by CCPR.
+ */
+class GrCCDrawPathsOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCDrawPathsOp);
+
+ static std::unique_ptr<GrCCDrawPathsOp> Make(GrRecordingContext*, const SkIRect& clipIBounds,
+ const SkMatrix&, const GrShape&, GrPaint&&);
+ ~GrCCDrawPathsOp() override;
+
+ const char* name() const override { return "GrCCDrawPathsOp"; }
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override;
+ CombineResult onCombineIfPossible(GrOp*, const GrCaps&) override;
+ void visitProxies(const VisitProxyFunc& fn) const override {
+ for (const auto& range : fInstanceRanges) {
+ fn(range.fAtlasProxy, GrMipMapped::kNo);
+ }
+ fProcessors.visitProxies(fn);
+ }
+ void onPrepare(GrOpFlushState*) override;
+
+ void addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths);
+
+ // Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
+ // increments/fills out the corresponding GrCCPerFlushResourceSpecs.
+ void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResourceSpecs*);
+
+ // Allows the caller to decide whether to actually do the suggested copies from cached 16-bit
+ // coverage count atlases, and into 8-bit literal coverage atlases. Purely to save space.
+ enum class DoCopiesToA8Coverage : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ // Allocates the GPU resources indicated by accountForOwnPaths(), in preparation for drawing. If
+ // DoCopiesToA8Coverage is kNo, the paths slated for copy will instead be left in their 16-bit
+ // coverage count atlases.
+ //
+ // NOTE: If using DoCopiesToA8Coverage::kNo, it is the caller's responsibility to have called
+ // cancelCopies() on the GrCCPerFlushResourceSpecs, prior to making this call.
+ void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
+ DoCopiesToA8Coverage);
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+private:
+ friend class GrOpMemoryPool;
+
+ static std::unique_ptr<GrCCDrawPathsOp> InternalMake(GrRecordingContext*,
+ const SkIRect& clipIBounds,
+ const SkMatrix&, const GrShape&,
+ float strokeDevWidth,
+ const SkRect& conservativeDevBounds,
+ GrPaint&&);
+
+ GrCCDrawPathsOp(const SkMatrix&, const GrShape&, float strokeDevWidth,
+ const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
+ const SkRect& conservativeDevBounds, GrPaint&&);
+
+ void recordInstance(
+ GrCCPathProcessor::CoverageMode, GrTextureProxy* atlasProxy, int instanceIdx);
+
+ const SkMatrix fViewMatrixIfUsingLocalCoords;
+
+ class SingleDraw {
+ public:
+ SingleDraw(const SkMatrix&, const GrShape&, float strokeDevWidth,
+ const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
+ const SkPMColor4f&);
+
+ // See the corresponding methods in GrCCDrawPathsOp.
+ GrProcessorSet::Analysis finalize(
+ const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType,
+ GrProcessorSet*);
+ void accountForOwnPath(GrCCPathCache*, GrOnFlushResourceProvider*,
+ GrCCPerFlushResourceSpecs*);
+ void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
+ DoCopiesToA8Coverage, GrCCDrawPathsOp*);
+
+ private:
+ bool shouldCachePathMask(int maxRenderTargetSize) const;
+
+ SkMatrix fMatrix;
+ GrShape fShape;
+ float fStrokeDevWidth;
+ const SkIRect fShapeConservativeIBounds;
+ SkIRect fMaskDevIBounds;
+ SkPMColor4f fColor;
+
+ GrCCPathCache::OnFlushEntryRef fCacheEntry;
+ SkIVector fCachedMaskShift;
+ bool fDoCopyToA8Coverage = false;
+ bool fDoCachePathMask = false;
+ SkDEBUGCODE(bool fWasCountedAsRender = false);
+
+ SingleDraw* fNext = nullptr;
+
+ friend class GrCCSTLList<SingleDraw>; // To access fNext.
+ };
+
+ // Declare fOwningPerOpsTaskPaths first, before fDraws. The draws use memory allocated by
+ // fOwningPerOpsTaskPaths, so it must not be unreffed until after fDraws is destroyed.
+ sk_sp<GrCCPerOpsTaskPaths> fOwningPerOpsTaskPaths;
+
+ GrCCSTLList<SingleDraw> fDraws;
+ SkDEBUGCODE(int fNumDraws = 1);
+
+ GrProcessorSet fProcessors;
+
+ struct InstanceRange {
+ GrCCPathProcessor::CoverageMode fCoverageMode;
+ GrTextureProxy* fAtlasProxy;
+ int fEndInstanceIdx;
+ };
+
+ SkSTArray<2, InstanceRange, true> fInstanceRanges;
+ int fBaseInstance SkDEBUGCODE(= -1);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.cpp
new file mode 100644
index 0000000000..75b7fd05e5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.cpp
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCFillGeometry.h"
+
+#include "include/gpu/GrTypes.h"
+#include "src/core/SkGeometry.h"
+#include <algorithm>
+#include <cmath>
+#include <cstdlib>
+
+static constexpr float kFlatnessThreshold = 1/16.f; // 1/16 of a pixel.
+
+void GrCCFillGeometry::beginPath() {
+ SkASSERT(!fBuildingContour);
+ fVerbs.push_back(Verb::kBeginPath);
+}
+
+void GrCCFillGeometry::beginContour(const SkPoint& pt) {
+ SkASSERT(!fBuildingContour);
+ // Store the current verb count in the fTriangles field for now. When we close the contour we
+ // will use this value to calculate the actual number of triangles in its fan.
+ fCurrContourTallies = {fVerbs.count(), 0, 0, 0, 0};
+
+ fPoints.push_back(pt);
+ fVerbs.push_back(Verb::kBeginContour);
+ fCurrAnchorPoint = pt;
+
+ SkDEBUGCODE(fBuildingContour = true);
+}
+
+void GrCCFillGeometry::lineTo(const SkPoint P[2]) {
+ SkASSERT(fBuildingContour);
+ SkASSERT(P[0] == fPoints.back());
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ this->appendLine(p0, p1);
+}
+
+inline void GrCCFillGeometry::appendLine(const Sk2f& p0, const Sk2f& p1) {
+ SkASSERT(fPoints.back() == SkPoint::Make(p0[0], p0[1]));
+ if ((p0 == p1).allTrue()) {
+ return;
+ }
+ p1.store(&fPoints.push_back());
+ fVerbs.push_back(Verb::kLineTo);
+}
+
+static inline Sk2f normalize(const Sk2f& n) {
+ Sk2f nn = n*n;
+ return n * (nn + SkNx_shuffle<1,0>(nn)).rsqrt();
+}
+
+static inline float dot(const Sk2f& a, const Sk2f& b) {
+ float product[2];
+ (a * b).store(product);
+ return product[0] + product[1];
+}
+
+static inline bool are_collinear(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ float tolerance = kFlatnessThreshold) {
+ Sk2f l = p2 - p0; // Line from p0 -> p2.
+
+ // lwidth = Manhattan width of l.
+ Sk2f labs = l.abs();
+ float lwidth = labs[0] + labs[1];
+
+ // d = |p1 - p0| dot | l.y|
+ // |-l.x| = distance from p1 to l.
+ Sk2f dd = (p1 - p0) * SkNx_shuffle<1,0>(l);
+ float d = dd[0] - dd[1];
+
+ // We are collinear if a box with radius "tolerance", centered on p1, touches the line l.
+ // To decide this, we check if the distance from p1 to the line is less than the distance from
+ // p1 to the far corner of this imaginary box, along that same normal vector.
+ // The far corner of the box can be found at "p1 + sign(n) * tolerance", where n is normal to l:
+ //
+ // abs(dot(p1 - p0, n)) <= dot(sign(n) * tolerance, n)
+ //
+ // Which reduces to:
+ //
+ // abs(d) <= (n.x * sign(n.x) + n.y * sign(n.y)) * tolerance
+ // abs(d) <= (abs(n.x) + abs(n.y)) * tolerance
+ //
+ // Use "<=" in case l == 0.
+ return std::abs(d) <= lwidth * tolerance;
+}
+
+static inline bool are_collinear(const SkPoint P[4], float tolerance = kFlatnessThreshold) {
+ Sk4f Px, Py; // |Px Py| |p0 - p3|
+ Sk4f::Load2(P, &Px, &Py); // |. . | = |p1 - p3|
+ Px -= Px[3]; // |. . | |p2 - p3|
+ Py -= Py[3]; // |. . | | 0 |
+
+ // Find [lx, ly] = the line from p3 to the furthest-away point from p3.
+ Sk4f Pwidth = Px.abs() + Py.abs(); // Pwidth = Manhattan width of each point.
+ int lidx = Pwidth[0] > Pwidth[1] ? 0 : 1;
+ lidx = Pwidth[lidx] > Pwidth[2] ? lidx : 2;
+ float lx = Px[lidx], ly = Py[lidx];
+ float lwidth = Pwidth[lidx]; // lwidth = Manhattan width of [lx, ly].
+
+ // |Px Py|
+ // d = |. . | * | ly| = distances from each point to l (two of the distances will be zero).
+ // |. . | |-lx|
+ // |. . |
+ Sk4f d = Px*ly - Py*lx;
+
+ // We are collinear if boxes with radius "tolerance", centered on all 4 points all touch line l.
+ // (See the rationale for this formula in the above, 3-point version of this function.)
+ // Use "<=" in case l == 0.
+ return (d.abs() <= lwidth * tolerance).allTrue();
+}
+
+// Returns whether the (convex) curve segment is monotonic with respect to [endPt - startPt].
+static inline bool is_convex_curve_monotonic(const Sk2f& startPt, const Sk2f& tan0,
+ const Sk2f& endPt, const Sk2f& tan1) {
+ Sk2f v = endPt - startPt;
+ float dot0 = dot(tan0, v);
+ float dot1 = dot(tan1, v);
+
+ // A small, negative tolerance handles floating-point error in the case when one tangent
+ // approaches 0 length, meaning the (convex) curve segment is effectively a flat line.
+ float tolerance = -std::max(std::abs(dot0), std::abs(dot1)) * SK_ScalarNearlyZero;
+ return dot0 >= tolerance && dot1 >= tolerance;
+}
+
+template<int N> static inline SkNx<N,float> lerp(const SkNx<N,float>& a, const SkNx<N,float>& b,
+ const SkNx<N,float>& t) {
+ return SkNx_fma(t, b - a, a);
+}
+
+void GrCCFillGeometry::quadraticTo(const SkPoint P[3]) {
+ SkASSERT(fBuildingContour);
+ SkASSERT(P[0] == fPoints.back());
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+
+ // Don't crunch on the curve if it is nearly flat (or just very small). Flat curves can break
+ // The monotonic chopping math.
+ if (are_collinear(p0, p1, p2)) {
+ this->appendLine(p0, p2);
+ return;
+ }
+
+ this->appendQuadratics(p0, p1, p2);
+}
+
+inline void GrCCFillGeometry::appendQuadratics(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2) {
+ Sk2f tan0 = p1 - p0;
+ Sk2f tan1 = p2 - p1;
+
+ // This should almost always be this case for well-behaved curves in the real world.
+ if (is_convex_curve_monotonic(p0, tan0, p2, tan1)) {
+ this->appendMonotonicQuadratic(p0, p1, p2);
+ return;
+ }
+
+ // Chop the curve into two segments with equal curvature. To do this we find the T value whose
+ // tangent angle is halfway between tan0 and tan1.
+ Sk2f n = normalize(tan0) - normalize(tan1);
+
+ // The midtangent can be found where (dQ(t) dot n) = 0:
+ //
+ // 0 = (dQ(t) dot n) = | 2*t 1 | * | p0 - 2*p1 + p2 | * | n |
+ // | -2*p0 + 2*p1 | | . |
+ //
+ // = | 2*t 1 | * | tan1 - tan0 | * | n |
+ // | 2*tan0 | | . |
+ //
+ // = 2*t * ((tan1 - tan0) dot n) + (2*tan0 dot n)
+ //
+ // t = (tan0 dot n) / ((tan0 - tan1) dot n)
+ Sk2f dQ1n = (tan0 - tan1) * n;
+ Sk2f dQ0n = tan0 * n;
+ Sk2f t = (dQ0n + SkNx_shuffle<1,0>(dQ0n)) / (dQ1n + SkNx_shuffle<1,0>(dQ1n));
+ t = Sk2f::Min(Sk2f::Max(t, 0), 1); // Clamp for FP error.
+
+ Sk2f p01 = SkNx_fma(t, tan0, p0);
+ Sk2f p12 = SkNx_fma(t, tan1, p1);
+ Sk2f p012 = lerp(p01, p12, t);
+
+ this->appendMonotonicQuadratic(p0, p01, p012);
+ this->appendMonotonicQuadratic(p012, p12, p2);
+}
+
+inline void GrCCFillGeometry::appendMonotonicQuadratic(const Sk2f& p0, const Sk2f& p1,
+ const Sk2f& p2) {
+ // Don't send curves to the GPU if we know they are nearly flat (or just very small).
+ if (are_collinear(p0, p1, p2)) {
+ this->appendLine(p0, p2);
+ return;
+ }
+
+ SkASSERT(fPoints.back() == SkPoint::Make(p0[0], p0[1]));
+ SkASSERT((p0 != p2).anyTrue());
+ p1.store(&fPoints.push_back());
+ p2.store(&fPoints.push_back());
+ fVerbs.push_back(Verb::kMonotonicQuadraticTo);
+ ++fCurrContourTallies.fQuadratics;
+}
+
+static inline Sk2f first_unless_nearly_zero(const Sk2f& a, const Sk2f& b) {
+ Sk2f aa = a*a;
+ aa += SkNx_shuffle<1,0>(aa);
+ SkASSERT(aa[0] == aa[1]);
+
+ Sk2f bb = b*b;
+ bb += SkNx_shuffle<1,0>(bb);
+ SkASSERT(bb[0] == bb[1]);
+
+ return (aa > bb * SK_ScalarNearlyZero).thenElse(a, b);
+}
+
+static inline void get_cubic_tangents(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3, Sk2f* tan0, Sk2f* tan1) {
+ *tan0 = first_unless_nearly_zero(p1 - p0, p2 - p0);
+ *tan1 = first_unless_nearly_zero(p3 - p2, p3 - p1);
+}
+
+static inline bool is_cubic_nearly_quadratic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3, const Sk2f& tan0, const Sk2f& tan1,
+ Sk2f* c) {
+ Sk2f c1 = SkNx_fma(Sk2f(1.5f), tan0, p0);
+ Sk2f c2 = SkNx_fma(Sk2f(-1.5f), tan1, p3);
+ *c = (c1 + c2) * .5f; // Hopefully optimized out if not used?
+ return ((c1 - c2).abs() <= 1).allTrue();
+}
+
+enum class ExcludedTerm : bool {
+ kQuadraticTerm,
+ kLinearTerm
+};
+
+// Finds where to chop a non-loop around its inflection points. The resulting cubic segments will be
+// chopped such that a box of radius 'padRadius', centered at any point along the curve segment, is
+// guaranteed to not cross the tangent lines at the inflection points (a.k.a lines L & M).
+//
+// 'chops' will be filled with 0, 2, or 4 T values. The segments between T0..T1 and T2..T3 must be
+// drawn with flat lines instead of cubics.
+//
+// A serpentine cubic has two inflection points, so this method takes Sk2f and computes the padding
+// for both in SIMD.
+static inline void find_chops_around_inflection_points(float padRadius, Sk2f tl, Sk2f sl,
+ const Sk2f& C0, const Sk2f& C1,
+ ExcludedTerm skipTerm, float Cdet,
+ SkSTArray<4, float>* chops) {
+ SkASSERT(chops->empty());
+ SkASSERT(padRadius >= 0);
+
+ padRadius /= std::abs(Cdet); // Scale this single value rather than all of C^-1 later on.
+
+ // The homogeneous parametric functions for distance from lines L & M are:
+ //
+ // l(t,s) = (t*sl - s*tl)^3
+ // m(t,s) = (t*sm - s*tm)^3
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.3 Finding klmn:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ //
+ // From here on we use Sk2f with "L" names, but the second lane will be for line M.
+ tl = (sl > 0).thenElse(tl, -tl); // Tl=tl/sl is the triple root of l(t,s). Normalize so s >= 0.
+ sl = sl.abs();
+
+ // Convert l(t,s), m(t,s) to power-basis form:
+ //
+ // | l3 m3 |
+ // |l(t,s) m(t,s)| = |t^3 t^2*s t*s^2 s^3| * | l2 m2 |
+ // | l1 m1 |
+ // | l0 m0 |
+ //
+ Sk2f l3 = sl*sl*sl;
+ Sk2f l2or1 = (ExcludedTerm::kLinearTerm == skipTerm) ? sl*sl*tl*-3 : sl*tl*tl*3;
+
+ // The equation for line L can be found as follows:
+ //
+ // L = C^-1 * (l excluding skipTerm)
+ //
+ // (See comments for GrPathUtils::calcCubicInverseTransposePowerBasisMatrix.)
+ // We are only interested in the normal to L, so only need the upper 2x2 of C^-1. And rather
+ // than divide by determinant(C) here, we have already performed this divide on padRadius.
+ Sk2f Lx = C1[1]*l3 - C0[1]*l2or1;
+ Sk2f Ly = -C1[0]*l3 + C0[0]*l2or1;
+
+ // A box of radius "padRadius" is touching line L if "center dot L" is less than the Manhattan
+ // with of L. (See rationale in are_collinear.)
+ Sk2f Lwidth = Lx.abs() + Ly.abs();
+ Sk2f pad = Lwidth * padRadius;
+
+ // Will T=(t + cbrt(pad))/s be greater than 0? No need to solve roots outside T=0..1.
+ Sk2f insideLeftPad = pad + tl*tl*tl;
+
+ // Will T=(t - cbrt(pad))/s be less than 1? No need to solve roots outside T=0..1.
+ Sk2f tms = tl - sl;
+ Sk2f insideRightPad = pad - tms*tms*tms;
+
+ // Solve for the T values where abs(l(T)) = pad.
+ if (insideLeftPad[0] > 0 && insideRightPad[0] > 0) {
+ float padT = cbrtf(pad[0]);
+ Sk2f pts = (tl[0] + Sk2f(-padT, +padT)) / sl[0];
+ pts.store(chops->push_back_n(2));
+ }
+
+ // Solve for the T values where abs(m(T)) = pad.
+ if (insideLeftPad[1] > 0 && insideRightPad[1] > 0) {
+ float padT = cbrtf(pad[1]);
+ Sk2f pts = (tl[1] + Sk2f(-padT, +padT)) / sl[1];
+ pts.store(chops->push_back_n(2));
+ }
+}
+
+static inline void swap_if_greater(float& a, float& b) {
+ if (a > b) {
+ std::swap(a, b);
+ }
+}
+
+// Finds where to chop a non-loop around its intersection point. The resulting cubic segments will
+// be chopped such that a box of radius 'padRadius', centered at any point along the curve segment,
+// is guaranteed to not cross the tangent lines at the intersection point (a.k.a lines L & M).
+//
+// 'chops' will be filled with 0, 2, or 4 T values. The segments between T0..T1 and T2..T3 must be
+// drawn with quadratic splines instead of cubics.
+//
+// A loop intersection falls at two different T values, so this method takes Sk2f and computes the
+// padding for both in SIMD.
+static inline void find_chops_around_loop_intersection(float padRadius, Sk2f t2, Sk2f s2,
+ const Sk2f& C0, const Sk2f& C1,
+ ExcludedTerm skipTerm, float Cdet,
+ SkSTArray<4, float>* chops) {
+ SkASSERT(chops->empty());
+ SkASSERT(padRadius >= 0);
+
+ padRadius /= std::abs(Cdet); // Scale this single value rather than all of C^-1 later on.
+
+ // The parametric functions for distance from lines L & M are:
+ //
+ // l(T) = (T - Td)^2 * (T - Te)
+ // m(T) = (T - Td) * (T - Te)^2
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.3 Finding klmn:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ Sk2f T2 = t2/s2; // T2 is the double root of l(T).
+ Sk2f T1 = SkNx_shuffle<1,0>(T2); // T1 is the other root of l(T).
+
+ // Convert l(T), m(T) to power-basis form:
+ //
+ // | 1 1 |
+ // |l(T) m(T)| = |T^3 T^2 T 1| * | l2 m2 |
+ // | l1 m1 |
+ // | l0 m0 |
+ //
+ // From here on we use Sk2f with "L" names, but the second lane will be for line M.
+ Sk2f l2 = SkNx_fma(Sk2f(-2), T2, -T1);
+ Sk2f l1 = T2 * SkNx_fma(Sk2f(2), T1, T2);
+ Sk2f l0 = -T2*T2*T1;
+
+ // The equation for line L can be found as follows:
+ //
+ // L = C^-1 * (l excluding skipTerm)
+ //
+ // (See comments for GrPathUtils::calcCubicInverseTransposePowerBasisMatrix.)
+ // We are only interested in the normal to L, so only need the upper 2x2 of C^-1. And rather
+ // than divide by determinant(C) here, we have already performed this divide on padRadius.
+ Sk2f l2or1 = (ExcludedTerm::kLinearTerm == skipTerm) ? l2 : l1;
+ Sk2f Lx = -C0[1]*l2or1 + C1[1]; // l3 is always 1.
+ Sk2f Ly = C0[0]*l2or1 - C1[0];
+
+ // A box of radius "padRadius" is touching line L if "center dot L" is less than the Manhattan
+ // with of L. (See rationale in are_collinear.)
+ Sk2f Lwidth = Lx.abs() + Ly.abs();
+ Sk2f pad = Lwidth * padRadius;
+
+ // Is l(T=0) outside the padding around line L?
+ Sk2f lT0 = l0; // l(T=0) = |0 0 0 1| dot |1 l2 l1 l0| = l0
+ Sk2f outsideT0 = lT0.abs() - pad;
+
+ // Is l(T=1) outside the padding around line L?
+ Sk2f lT1 = (Sk2f(1) + l2 + l1 + l0).abs(); // l(T=1) = |1 1 1 1| dot |1 l2 l1 l0|
+ Sk2f outsideT1 = lT1.abs() - pad;
+
+ // Values for solving the cubic.
+ Sk2f p, q, qqq, discr, numRoots, D;
+ bool hasDiscr = false;
+
+ // Values for calculating one root (rarely needed).
+ Sk2f R, QQ;
+ bool hasOneRootVals = false;
+
+ // Values for calculating three roots.
+ Sk2f P, cosTheta3;
+ bool hasThreeRootVals = false;
+
+ // Solve for the T values where l(T) = +pad and m(T) = -pad.
+ for (int i = 0; i < 2; ++i) {
+ float T = T2[i]; // T is the point we are chopping around.
+ if ((T < 0 && outsideT0[i] >= 0) || (T > 1 && outsideT1[i] >= 0)) {
+ // The padding around T is completely out of range. No point solving for it.
+ continue;
+ }
+
+ if (!hasDiscr) {
+ p = Sk2f(+.5f, -.5f) * pad;
+ q = (1.f/3) * (T2 - T1);
+ qqq = q*q*q;
+ discr = qqq*p*2 + p*p;
+ numRoots = (discr < 0).thenElse(3, 1);
+ D = T2 - q;
+ hasDiscr = true;
+ }
+
+ if (1 == numRoots[i]) {
+ if (!hasOneRootVals) {
+ Sk2f r = qqq + p;
+ Sk2f s = r.abs() + discr.sqrt();
+ R = (r > 0).thenElse(-s, s);
+ QQ = q*q;
+ hasOneRootVals = true;
+ }
+
+ float A = cbrtf(R[i]);
+ float B = A != 0 ? QQ[i]/A : 0;
+ // When there is only one root, ine L chops from root..1, line M chops from 0..root.
+ if (1 == i) {
+ chops->push_back(0);
+ }
+ chops->push_back(A + B + D[i]);
+ if (0 == i) {
+ chops->push_back(1);
+ }
+ continue;
+ }
+
+ if (!hasThreeRootVals) {
+ P = q.abs() * -2;
+ cosTheta3 = (q >= 0).thenElse(1, -1) + p / qqq.abs();
+ hasThreeRootVals = true;
+ }
+
+ static constexpr float k2PiOver3 = 2 * SK_ScalarPI / 3;
+ float theta = std::acos(cosTheta3[i]) * (1.f/3);
+ float roots[3] = {P[i] * std::cos(theta) + D[i],
+ P[i] * std::cos(theta + k2PiOver3) + D[i],
+ P[i] * std::cos(theta - k2PiOver3) + D[i]};
+
+ // Sort the three roots.
+ swap_if_greater(roots[0], roots[1]);
+ swap_if_greater(roots[1], roots[2]);
+ swap_if_greater(roots[0], roots[1]);
+
+ // Line L chops around the first 2 roots, line M chops around the second 2.
+ chops->push_back_n(2, &roots[i]);
+ }
+}
+
+void GrCCFillGeometry::cubicTo(const SkPoint P[4], float inflectPad, float loopIntersectPad) {
+ SkASSERT(fBuildingContour);
+ SkASSERT(P[0] == fPoints.back());
+
+ // Don't crunch on the curve or inflate geometry if it is nearly flat (or just very small).
+ // Flat curves can break the math below.
+ if (are_collinear(P)) {
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p3 = Sk2f::Load(P+3);
+ this->appendLine(p0, p3);
+ return;
+ }
+
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+ Sk2f p3 = Sk2f::Load(P+3);
+
+ // Also detect near-quadratics ahead of time.
+ Sk2f tan0, tan1, c;
+ get_cubic_tangents(p0, p1, p2, p3, &tan0, &tan1);
+ if (is_cubic_nearly_quadratic(p0, p1, p2, p3, tan0, tan1, &c)) {
+ this->appendQuadratics(p0, c, p3);
+ return;
+ }
+
+ double tt[2], ss[2], D[4];
+ fCurrCubicType = SkClassifyCubic(P, tt, ss, D);
+ SkASSERT(!SkCubicIsDegenerate(fCurrCubicType));
+ Sk2f t = Sk2f(static_cast<float>(tt[0]), static_cast<float>(tt[1]));
+ Sk2f s = Sk2f(static_cast<float>(ss[0]), static_cast<float>(ss[1]));
+
+ ExcludedTerm skipTerm = (std::abs(D[2]) > std::abs(D[1]))
+ ? ExcludedTerm::kQuadraticTerm
+ : ExcludedTerm::kLinearTerm;
+ Sk2f C0 = SkNx_fma(Sk2f(3), p1 - p2, p3 - p0);
+ Sk2f C1 = (ExcludedTerm::kLinearTerm == skipTerm
+ ? SkNx_fma(Sk2f(-2), p1, p0 + p2)
+ : p1 - p0) * 3;
+ Sk2f C0x1 = C0 * SkNx_shuffle<1,0>(C1);
+ float Cdet = C0x1[0] - C0x1[1];
+
+ SkSTArray<4, float> chops;
+ if (SkCubicType::kLoop != fCurrCubicType) {
+ find_chops_around_inflection_points(inflectPad, t, s, C0, C1, skipTerm, Cdet, &chops);
+ } else {
+ find_chops_around_loop_intersection(loopIntersectPad, t, s, C0, C1, skipTerm, Cdet, &chops);
+ }
+ if (4 == chops.count() && chops[1] >= chops[2]) {
+ // This just the means the KLM roots are so close that their paddings overlap. We will
+ // approximate the entire middle section, but still have it chopped midway. For loops this
+ // chop guarantees the append code only sees convex segments. Otherwise, it means we are (at
+ // least almost) a cusp and the chop makes sure we get a sharp point.
+ Sk2f ts = t * SkNx_shuffle<1,0>(s);
+ chops[1] = chops[2] = (ts[0] + ts[1]) / (2*s[0]*s[1]);
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 1; i < chops.count(); ++i) {
+ SkASSERT(chops[i] >= chops[i - 1]);
+ }
+#endif
+ this->appendCubics(AppendCubicMode::kLiteral, p0, p1, p2, p3, chops.begin(), chops.count());
+}
+
+static inline void chop_cubic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2, const Sk2f& p3,
+ float T, Sk2f* ab, Sk2f* abc, Sk2f* abcd, Sk2f* bcd, Sk2f* cd) {
+ Sk2f TT = T;
+ *ab = lerp(p0, p1, TT);
+ Sk2f bc = lerp(p1, p2, TT);
+ *cd = lerp(p2, p3, TT);
+ *abc = lerp(*ab, bc, TT);
+ *bcd = lerp(bc, *cd, TT);
+ *abcd = lerp(*abc, *bcd, TT);
+}
+
+void GrCCFillGeometry::appendCubics(AppendCubicMode mode, const Sk2f& p0, const Sk2f& p1,
+ const Sk2f& p2, const Sk2f& p3, const float chops[],
+ int numChops, float localT0, float localT1) {
+ if (numChops) {
+ SkASSERT(numChops > 0);
+ int midChopIdx = numChops/2;
+ float T = chops[midChopIdx];
+ // Chops alternate between literal and approximate mode.
+ AppendCubicMode rightMode = (AppendCubicMode)((bool)mode ^ (midChopIdx & 1) ^ 1);
+
+ if (T <= localT0) {
+ // T is outside 0..1. Append the right side only.
+ this->appendCubics(rightMode, p0, p1, p2, p3, &chops[midChopIdx + 1],
+ numChops - midChopIdx - 1, localT0, localT1);
+ return;
+ }
+
+ if (T >= localT1) {
+ // T is outside 0..1. Append the left side only.
+ this->appendCubics(mode, p0, p1, p2, p3, chops, midChopIdx, localT0, localT1);
+ return;
+ }
+
+ float localT = (T - localT0) / (localT1 - localT0);
+ Sk2f p01, p02, pT, p11, p12;
+ chop_cubic(p0, p1, p2, p3, localT, &p01, &p02, &pT, &p11, &p12);
+ this->appendCubics(mode, p0, p01, p02, pT, chops, midChopIdx, localT0, T);
+ this->appendCubics(rightMode, pT, p11, p12, p3, &chops[midChopIdx + 1],
+ numChops - midChopIdx - 1, T, localT1);
+ return;
+ }
+
+ this->appendCubics(mode, p0, p1, p2, p3);
+}
+
+void GrCCFillGeometry::appendCubics(AppendCubicMode mode, const Sk2f& p0, const Sk2f& p1,
+ const Sk2f& p2, const Sk2f& p3, int maxSubdivisions) {
+ if (SkCubicType::kLoop != fCurrCubicType) {
+ // Serpentines and cusps are always monotonic after chopping around inflection points.
+ SkASSERT(!SkCubicIsDegenerate(fCurrCubicType));
+
+ if (AppendCubicMode::kApproximate == mode) {
+ // This section passes through an inflection point, so we can get away with a flat line.
+ // This can cause some curves to feel slightly more flat when inspected rigorously back
+ // and forth against another renderer, but for now this seems acceptable given the
+ // simplicity.
+ this->appendLine(p0, p3);
+ return;
+ }
+ } else {
+ Sk2f tan0, tan1;
+ get_cubic_tangents(p0, p1, p2, p3, &tan0, &tan1);
+
+ if (maxSubdivisions && !is_convex_curve_monotonic(p0, tan0, p3, tan1)) {
+ this->chopAndAppendCubicAtMidTangent(mode, p0, p1, p2, p3, tan0, tan1,
+ maxSubdivisions - 1);
+ return;
+ }
+
+ if (AppendCubicMode::kApproximate == mode) {
+ Sk2f c;
+ if (!is_cubic_nearly_quadratic(p0, p1, p2, p3, tan0, tan1, &c) && maxSubdivisions) {
+ this->chopAndAppendCubicAtMidTangent(mode, p0, p1, p2, p3, tan0, tan1,
+ maxSubdivisions - 1);
+ return;
+ }
+
+ this->appendMonotonicQuadratic(p0, c, p3);
+ return;
+ }
+ }
+
+ // Don't send curves to the GPU if we know they are nearly flat (or just very small).
+ // Since the cubic segment is known to be convex at this point, our flatness check is simple.
+ if (are_collinear(p0, (p1 + p2) * .5f, p3)) {
+ this->appendLine(p0, p3);
+ return;
+ }
+
+ SkASSERT(fPoints.back() == SkPoint::Make(p0[0], p0[1]));
+ SkASSERT((p0 != p3).anyTrue());
+ p1.store(&fPoints.push_back());
+ p2.store(&fPoints.push_back());
+ p3.store(&fPoints.push_back());
+ fVerbs.push_back(Verb::kMonotonicCubicTo);
+ ++fCurrContourTallies.fCubics;
+}
+
+// Given a convex curve segment with the following order-2 tangent function:
+//
+// |C2x C2y|
+// tan = some_scale * |dx/dt dy/dt| = |t^2 t 1| * |C1x C1y|
+// |C0x C0y|
+//
+// This function finds the T value whose tangent angle is halfway between the tangents at T=0 and
+// T=1 (tan0 and tan1).
+static inline float find_midtangent(const Sk2f& tan0, const Sk2f& tan1,
+ const Sk2f& C2, const Sk2f& C1, const Sk2f& C0) {
+ // Tangents point in the direction of increasing T, so tan0 and -tan1 both point toward the
+ // midtangent. 'n' will therefore bisect tan0 and -tan1, giving us the normal to the midtangent.
+ //
+ // n dot midtangent = 0
+ //
+ Sk2f n = normalize(tan0) - normalize(tan1);
+
+ // Find the T value at the midtangent. This is a simple quadratic equation:
+ //
+ // midtangent dot n = 0
+ //
+ // (|t^2 t 1| * C) dot n = 0
+ //
+ // |t^2 t 1| dot C*n = 0
+ //
+ // First find coeffs = C*n.
+ Sk4f C[2];
+ Sk2f::Store4(C, C2, C1, C0, 0);
+ Sk4f coeffs = C[0]*n[0] + C[1]*n[1];
+
+ // Now solve the quadratic.
+ float a = coeffs[0], b = coeffs[1], c = coeffs[2];
+ float discr = b*b - 4*a*c;
+ if (discr < 0) {
+ return 0; // This will only happen if the curve is a line.
+ }
+
+ // The roots are q/a and c/q. Pick the one closer to T=.5.
+ float q = -.5f * (b + copysignf(std::sqrt(discr), b));
+ float r = .5f*q*a;
+ return std::abs(q*q - r) < std::abs(a*c - r) ? q/a : c/q;
+}
+
+inline void GrCCFillGeometry::chopAndAppendCubicAtMidTangent(AppendCubicMode mode, const Sk2f& p0,
+ const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3, const Sk2f& tan0,
+ const Sk2f& tan1,
+ int maxFutureSubdivisions) {
+ float midT = find_midtangent(tan0, tan1, p3 + (p1 - p2)*3 - p0,
+ (p0 - p1*2 + p2)*2,
+ p1 - p0);
+ // Use positive logic since NaN fails comparisons. (However midT should not be NaN since we cull
+ // near-flat cubics in cubicTo().)
+ if (!(midT > 0 && midT < 1)) {
+ // The cubic is flat. Otherwise there would be a real midtangent inside T=0..1.
+ this->appendLine(p0, p3);
+ return;
+ }
+
+ Sk2f p01, p02, pT, p11, p12;
+ chop_cubic(p0, p1, p2, p3, midT, &p01, &p02, &pT, &p11, &p12);
+ this->appendCubics(mode, p0, p01, p02, pT, maxFutureSubdivisions);
+ this->appendCubics(mode, pT, p11, p12, p3, maxFutureSubdivisions);
+}
+
+void GrCCFillGeometry::conicTo(const SkPoint P[3], float w) {
+ SkASSERT(fBuildingContour);
+ SkASSERT(P[0] == fPoints.back());
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+
+ Sk2f tan0 = p1 - p0;
+ Sk2f tan1 = p2 - p1;
+
+ if (!is_convex_curve_monotonic(p0, tan0, p2, tan1)) {
+ // The derivative of a conic has a cumbersome order-4 denominator. However, this isn't
+ // necessary if we are only interested in a vector in the same *direction* as a given
+ // tangent line. Since the denominator scales dx and dy uniformly, we can throw it out
+ // completely after evaluating the derivative with the standard quotient rule. This leaves
+ // us with a simpler quadratic function that we use to find the midtangent.
+ float midT = find_midtangent(tan0, tan1, (w - 1) * (p2 - p0),
+ (p2 - p0) - 2*w*(p1 - p0),
+ w*(p1 - p0));
+ // Use positive logic since NaN fails comparisons. (However midT should not be NaN since we
+ // cull near-linear conics above. And while w=0 is flat, it's not a line and has valid
+ // midtangents.)
+ if (!(midT > 0 && midT < 1)) {
+ // The conic is flat. Otherwise there would be a real midtangent inside T=0..1.
+ this->appendLine(p0, p2);
+ return;
+ }
+
+ // Chop the conic at midtangent to produce two monotonic segments.
+ Sk4f p3d0 = Sk4f(p0[0], p0[1], 1, 0);
+ Sk4f p3d1 = Sk4f(p1[0], p1[1], 1, 0) * w;
+ Sk4f p3d2 = Sk4f(p2[0], p2[1], 1, 0);
+ Sk4f midT4 = midT;
+
+ Sk4f p3d01 = lerp(p3d0, p3d1, midT4);
+ Sk4f p3d12 = lerp(p3d1, p3d2, midT4);
+ Sk4f p3d012 = lerp(p3d01, p3d12, midT4);
+
+ Sk2f midpoint = Sk2f(p3d012[0], p3d012[1]) / p3d012[2];
+ Sk2f ww = Sk2f(p3d01[2], p3d12[2]) * Sk2f(p3d012[2]).rsqrt();
+
+ this->appendMonotonicConic(p0, Sk2f(p3d01[0], p3d01[1]) / p3d01[2], midpoint, ww[0]);
+ this->appendMonotonicConic(midpoint, Sk2f(p3d12[0], p3d12[1]) / p3d12[2], p2, ww[1]);
+ return;
+ }
+
+ this->appendMonotonicConic(p0, p1, p2, w);
+}
+
+void GrCCFillGeometry::appendMonotonicConic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ float w) {
+ SkASSERT(w >= 0);
+
+ Sk2f base = p2 - p0;
+ Sk2f baseAbs = base.abs();
+ float baseWidth = baseAbs[0] + baseAbs[1];
+
+ // Find the height of the curve. Max height always occurs at T=.5 for conics.
+ Sk2f d = (p1 - p0) * SkNx_shuffle<1,0>(base);
+ float h1 = std::abs(d[1] - d[0]); // Height of p1 above the base.
+ float ht = h1*w, hs = 1 + w; // Height of the conic = ht/hs.
+
+ // i.e. (ht/hs <= baseWidth * kFlatnessThreshold). Use "<=" in case base == 0.
+ if (ht <= (baseWidth*hs) * kFlatnessThreshold) {
+ // We are flat. (See rationale in are_collinear.)
+ this->appendLine(p0, p2);
+ return;
+ }
+
+ // i.e. (w > 1 && h1 - ht/hs < baseWidth).
+ if (w > 1 && h1*hs - ht < baseWidth*hs) {
+ // If we get within 1px of p1 when w > 1, we will pick up artifacts from the implicit
+ // function's reflection. Chop at max height (T=.5) and draw a triangle instead.
+ Sk2f p1w = p1*w;
+ Sk2f ab = p0 + p1w;
+ Sk2f bc = p1w + p2;
+ Sk2f highpoint = (ab + bc) / (2*(1 + w));
+ this->appendLine(p0, highpoint);
+ this->appendLine(highpoint, p2);
+ return;
+ }
+
+ SkASSERT(fPoints.back() == SkPoint::Make(p0[0], p0[1]));
+ SkASSERT((p0 != p2).anyTrue());
+ p1.store(&fPoints.push_back());
+ p2.store(&fPoints.push_back());
+ fConicWeights.push_back(w);
+ fVerbs.push_back(Verb::kMonotonicConicTo);
+ ++fCurrContourTallies.fConics;
+}
+
+GrCCFillGeometry::PrimitiveTallies GrCCFillGeometry::endContour() {
+ SkASSERT(fBuildingContour);
+ SkASSERT(fVerbs.count() >= fCurrContourTallies.fTriangles);
+
+ // The fTriangles field currently contains this contour's starting verb index. We can now
+ // use it to calculate the size of the contour's fan.
+ int fanSize = fVerbs.count() - fCurrContourTallies.fTriangles;
+ if (fPoints.back() == fCurrAnchorPoint) {
+ --fanSize;
+ fVerbs.push_back(Verb::kEndClosedContour);
+ } else {
+ fVerbs.push_back(Verb::kEndOpenContour);
+ }
+
+ fCurrContourTallies.fTriangles = SkTMax(fanSize - 2, 0);
+
+ SkDEBUGCODE(fBuildingContour = false);
+ return fCurrContourTallies;
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.h b/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.h
new file mode 100644
index 0000000000..d0a3c1e12d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCFillGeometry.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGrCCFillGeometry_DEFINED
+#define GrGrCCFillGeometry_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkGeometry.h"
+
+/**
+ * This class chops device-space contours up into a series of segments that CCPR knows how to
+ * fill. (See GrCCFillGeometry::Verb.)
+ *
+ * NOTE: This must be done in device space, since an affine transformation can change whether a
+ * curve is monotonic.
+ */
+class GrCCFillGeometry {
+public:
+ // These are the verbs that CCPR knows how to fill. If a path has any segments that don't map to
+ // this list, then they are chopped into smaller ones that do. A list of these comprise a
+ // compact representation of what can later be expanded into GPU instance data.
+ enum class Verb : uint8_t {
+ kBeginPath, // Included only for caller convenience.
+ kBeginContour,
+ kLineTo,
+ kMonotonicQuadraticTo, // Monotonic relative to the vector between its endpoints [P2 - P0].
+ kMonotonicCubicTo,
+ kMonotonicConicTo,
+ kEndClosedContour, // endPt == startPt.
+ kEndOpenContour // endPt != startPt.
+ };
+
+ // These tallies track numbers of CCPR primitives that are required to draw a contour.
+ struct PrimitiveTallies {
+ int fTriangles; // Number of triangles in the contour's fan.
+ int fWeightedTriangles; // Triangles (from the tessellator) whose winding magnitude > 1.
+ int fQuadratics;
+ int fCubics;
+ int fConics;
+
+ void operator+=(const PrimitiveTallies&);
+ PrimitiveTallies operator-(const PrimitiveTallies&) const;
+ bool operator==(const PrimitiveTallies&);
+ };
+
+ GrCCFillGeometry(int numSkPoints = 0, int numSkVerbs = 0, int numConicWeights = 0)
+ : fPoints(numSkPoints * 3) // Reserve for a 3x expansion in points and verbs.
+ , fVerbs(numSkVerbs * 3)
+ , fConicWeights(numConicWeights * 3/2) {}
+
+ const SkTArray<SkPoint, true>& points() const { SkASSERT(!fBuildingContour); return fPoints; }
+ const SkTArray<Verb, true>& verbs() const { SkASSERT(!fBuildingContour); return fVerbs; }
+ float getConicWeight(int idx) const { SkASSERT(!fBuildingContour); return fConicWeights[idx]; }
+
+ void reset() {
+ SkASSERT(!fBuildingContour);
+ fPoints.reset();
+ fVerbs.reset();
+ }
+
+ void beginPath();
+ void beginContour(const SkPoint&);
+ void lineTo(const SkPoint P[2]);
+ void quadraticTo(const SkPoint[3]);
+
+ // We pass through inflection points and loop intersections using a line and quadratic(s)
+ // respectively. 'inflectPad' and 'loopIntersectPad' specify how close (in pixels) cubic
+ // segments are allowed to get to these points. For normal rendering you will want to use the
+ // default values, but these can be overridden for testing purposes.
+ //
+ // NOTE: loops do appear to require two full pixels of padding around the intersection point.
+ // With just one pixel-width of pad, we start to see bad pixels. Ultimately this has a
+ // minimal effect on the total amount of segments produced. Most sections that pass
+ // through the loop intersection can be approximated with a single quadratic anyway,
+ // regardless of whether we are use one pixel of pad or two (1.622 avg. quads per loop
+ // intersection vs. 1.489 on the tiger).
+ void cubicTo(const SkPoint[4], float inflectPad = 0.55f, float loopIntersectPad = 2);
+
+ void conicTo(const SkPoint[3], float w);
+
+ PrimitiveTallies endContour(); // Returns the numbers of primitives needed to draw the contour.
+
+private:
+ inline void appendLine(const Sk2f& p0, const Sk2f& p1);
+
+ inline void appendQuadratics(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2);
+ inline void appendMonotonicQuadratic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2);
+
+ enum class AppendCubicMode : bool {
+ kLiteral,
+ kApproximate
+ };
+ void appendCubics(AppendCubicMode, const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3, const float chops[], int numChops, float localT0 = 0,
+ float localT1 = 1);
+ void appendCubics(AppendCubicMode, const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3, int maxSubdivisions = 2);
+ void chopAndAppendCubicAtMidTangent(AppendCubicMode, const Sk2f& p0, const Sk2f& p1,
+ const Sk2f& p2, const Sk2f& p3, const Sk2f& tan0,
+ const Sk2f& tan1, int maxFutureSubdivisions);
+
+ void appendMonotonicConic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2, float w);
+
+ // Transient state used while building a contour.
+ SkPoint fCurrAnchorPoint;
+ PrimitiveTallies fCurrContourTallies;
+ SkCubicType fCurrCubicType;
+ SkDEBUGCODE(bool fBuildingContour = false);
+
+ SkSTArray<128, SkPoint, true> fPoints;
+ SkSTArray<128, Verb, true> fVerbs;
+ SkSTArray<32, float, true> fConicWeights;
+};
+
+inline void GrCCFillGeometry::PrimitiveTallies::operator+=(const PrimitiveTallies& b) {
+ fTriangles += b.fTriangles;
+ fWeightedTriangles += b.fWeightedTriangles;
+ fQuadratics += b.fQuadratics;
+ fCubics += b.fCubics;
+ fConics += b.fConics;
+}
+
+GrCCFillGeometry::PrimitiveTallies
+inline GrCCFillGeometry::PrimitiveTallies::operator-(const PrimitiveTallies& b) const {
+ return {fTriangles - b.fTriangles,
+ fWeightedTriangles - b.fWeightedTriangles,
+ fQuadratics - b.fQuadratics,
+ fCubics - b.fCubics,
+ fConics - b.fConics};
+}
+
+inline bool GrCCFillGeometry::PrimitiveTallies::operator==(const PrimitiveTallies& b) {
+ return fTriangles == b.fTriangles && fWeightedTriangles == b.fWeightedTriangles &&
+ fQuadratics == b.fQuadratics && fCubics == b.fCubics && fConics == b.fConics;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.cpp
new file mode 100644
index 0000000000..4553c62bac
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.cpp
@@ -0,0 +1,583 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCFiller.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkPathPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpFlushState.h"
+#include <stdlib.h>
+
+using TriPointInstance = GrCCCoverageProcessor::TriPointInstance;
+using QuadPointInstance = GrCCCoverageProcessor::QuadPointInstance;
+
+GrCCFiller::GrCCFiller(Algorithm algorithm, int numPaths, int numSkPoints, int numSkVerbs,
+ int numConicWeights)
+ : fAlgorithm(algorithm)
+ , fGeometry(numSkPoints, numSkVerbs, numConicWeights)
+ , fPathInfos(numPaths)
+ , fScissorSubBatches(numPaths)
+ , fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
+ // Batches decide what to draw by looking where the previous one ended. Define initial batches
+ // that "end" at the beginning of the data. These will not be drawn, but will only be be read by
+ // the first actual batch.
+ fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
+ fBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count(), PrimitiveTallies()};
+}
+
+void GrCCFiller::parseDeviceSpaceFill(const SkPath& path, const SkPoint* deviceSpacePts,
+ GrScissorTest scissorTest, const SkIRect& clippedDevIBounds,
+ const SkIVector& devToAtlasOffset) {
+ SkASSERT(!fInstanceBuffer); // Can't call after prepareToDraw().
+ SkASSERT(!path.isEmpty());
+
+ int currPathPointsIdx = fGeometry.points().count();
+ int currPathVerbsIdx = fGeometry.verbs().count();
+ PrimitiveTallies currPathPrimitiveCounts = PrimitiveTallies();
+
+ fGeometry.beginPath();
+
+ const float* conicWeights = SkPathPriv::ConicWeightData(path);
+ int ptsIdx = 0;
+ int conicWeightsIdx = 0;
+ bool insideContour = false;
+
+ for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (insideContour) {
+ currPathPrimitiveCounts += fGeometry.endContour();
+ }
+ fGeometry.beginContour(deviceSpacePts[ptsIdx]);
+ ++ptsIdx;
+ insideContour = true;
+ continue;
+ case SkPath::kClose_Verb:
+ if (insideContour) {
+ currPathPrimitiveCounts += fGeometry.endContour();
+ }
+ insideContour = false;
+ continue;
+ case SkPath::kLine_Verb:
+ fGeometry.lineTo(&deviceSpacePts[ptsIdx - 1]);
+ ++ptsIdx;
+ continue;
+ case SkPath::kQuad_Verb:
+ fGeometry.quadraticTo(&deviceSpacePts[ptsIdx - 1]);
+ ptsIdx += 2;
+ continue;
+ case SkPath::kCubic_Verb:
+ fGeometry.cubicTo(&deviceSpacePts[ptsIdx - 1]);
+ ptsIdx += 3;
+ continue;
+ case SkPath::kConic_Verb:
+ fGeometry.conicTo(&deviceSpacePts[ptsIdx - 1], conicWeights[conicWeightsIdx]);
+ ptsIdx += 2;
+ ++conicWeightsIdx;
+ continue;
+ default:
+ SK_ABORT("Unexpected path verb.");
+ }
+ }
+ SkASSERT(ptsIdx == path.countPoints());
+ SkASSERT(conicWeightsIdx == SkPathPriv::ConicWeightCnt(path));
+
+ if (insideContour) {
+ currPathPrimitiveCounts += fGeometry.endContour();
+ }
+
+ fPathInfos.emplace_back(scissorTest, devToAtlasOffset);
+
+ // Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
+ int numVerbs = fGeometry.verbs().count() - currPathVerbsIdx - 1;
+ int64_t tessellationWork = (int64_t)numVerbs * (32 - SkCLZ(numVerbs)); // N log N.
+ int64_t fanningWork = (int64_t)clippedDevIBounds.height() * clippedDevIBounds.width();
+ if (tessellationWork * (50*50) + (100*100) < fanningWork) { // Don't tessellate under 100x100.
+ fPathInfos.back().tessellateFan(
+ fAlgorithm, path, fGeometry, currPathVerbsIdx, currPathPointsIdx, clippedDevIBounds,
+ &currPathPrimitiveCounts);
+ }
+
+ fTotalPrimitiveCounts[(int)scissorTest] += currPathPrimitiveCounts;
+
+ if (GrScissorTest::kEnabled == scissorTest) {
+ fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled],
+ clippedDevIBounds.makeOffset(devToAtlasOffset)};
+ }
+}
+
+void GrCCFiller::PathInfo::tessellateFan(
+ Algorithm algorithm, const SkPath& originalPath, const GrCCFillGeometry& geometry,
+ int verbsIdx, int ptsIdx, const SkIRect& clippedDevIBounds,
+ PrimitiveTallies* newTriangleCounts) {
+ using Verb = GrCCFillGeometry::Verb;
+ SkASSERT(-1 == fFanTessellationCount);
+ SkASSERT(!fFanTessellation);
+
+ const SkTArray<Verb, true>& verbs = geometry.verbs();
+ const SkTArray<SkPoint, true>& pts = geometry.points();
+
+ newTriangleCounts->fTriangles =
+ newTriangleCounts->fWeightedTriangles = 0;
+
+ // Build an SkPath of the Redbook fan.
+ SkPath fan;
+ if (Algorithm::kCoverageCount == algorithm) {
+ // We use "winding" fill type right now because we are producing a coverage count, and must
+ // fill in every region that has non-zero wind. The path processor will convert coverage
+ // count to the appropriate fill type later.
+ fan.setFillType(SkPath::kWinding_FillType);
+ } else {
+ // When counting winding numbers in the stencil buffer, it works to use even/odd for the fan
+ // tessellation (where applicable). But we need to strip out inverse fill info because
+ // inverse-ness gets accounted for later on.
+ fan.setFillType(SkPath::ConvertToNonInverseFillType(originalPath.getFillType()));
+ }
+ SkASSERT(Verb::kBeginPath == verbs[verbsIdx]);
+ for (int i = verbsIdx + 1; i < verbs.count(); ++i) {
+ switch (verbs[i]) {
+ case Verb::kBeginPath:
+ SK_ABORT("Invalid GrCCFillGeometry");
+ continue;
+
+ case Verb::kBeginContour:
+ fan.moveTo(pts[ptsIdx++]);
+ continue;
+
+ case Verb::kLineTo:
+ fan.lineTo(pts[ptsIdx++]);
+ continue;
+
+ case Verb::kMonotonicQuadraticTo:
+ case Verb::kMonotonicConicTo:
+ fan.lineTo(pts[ptsIdx + 1]);
+ ptsIdx += 2;
+ continue;
+
+ case Verb::kMonotonicCubicTo:
+ fan.lineTo(pts[ptsIdx + 2]);
+ ptsIdx += 3;
+ continue;
+
+ case Verb::kEndClosedContour:
+ case Verb::kEndOpenContour:
+ fan.close();
+ continue;
+ }
+ }
+
+ GrTessellator::WindingVertex* vertices = nullptr;
+ SkASSERT(!fan.isInverseFillType());
+ fFanTessellationCount = GrTessellator::PathToVertices(
+ fan, std::numeric_limits<float>::infinity(), SkRect::Make(clippedDevIBounds),
+ &vertices);
+ if (fFanTessellationCount <= 0) {
+ SkASSERT(0 == fFanTessellationCount);
+ SkASSERT(nullptr == vertices);
+ return;
+ }
+
+ SkASSERT(0 == fFanTessellationCount % 3);
+ for (int i = 0; i < fFanTessellationCount; i += 3) {
+ int tessWinding = vertices[i].fWinding;
+ SkASSERT(tessWinding == vertices[i + 1].fWinding);
+ SkASSERT(tessWinding == vertices[i + 2].fWinding);
+
+ // Ensure this triangle's points actually wind in the same direction as tessWinding.
+ // CCPR shaders use the sign of wind to determine which direction to bloat, so even for
+ // "wound" triangles the winding sign and point ordering need to agree.
+ float ax = vertices[i].fPos.fX - vertices[i + 1].fPos.fX;
+ float ay = vertices[i].fPos.fY - vertices[i + 1].fPos.fY;
+ float bx = vertices[i].fPos.fX - vertices[i + 2].fPos.fX;
+ float by = vertices[i].fPos.fY - vertices[i + 2].fPos.fY;
+ float wind = ax*by - ay*bx;
+ if ((wind > 0) != (-tessWinding > 0)) { // Tessellator has opposite winding sense.
+ std::swap(vertices[i + 1].fPos, vertices[i + 2].fPos);
+ }
+
+ int weight = abs(tessWinding);
+ SkASSERT(SkPath::kEvenOdd_FillType != fan.getFillType() || weight == 1);
+ if (weight > 1 && Algorithm::kCoverageCount == algorithm) {
+ ++newTriangleCounts->fWeightedTriangles;
+ } else {
+ newTriangleCounts->fTriangles += weight;
+ }
+ }
+
+ fFanTessellation.reset(vertices);
+}
+
+GrCCFiller::BatchID GrCCFiller::closeCurrentBatch() {
+ SkASSERT(!fInstanceBuffer);
+ SkASSERT(!fBatches.empty());
+
+ const auto& lastBatch = fBatches.back();
+ int maxMeshes = 1 + fScissorSubBatches.count() - lastBatch.fEndScissorSubBatchIdx;
+ fMaxMeshesPerDraw = SkTMax(fMaxMeshesPerDraw, maxMeshes);
+
+ const auto& lastScissorSubBatch = fScissorSubBatches[lastBatch.fEndScissorSubBatchIdx - 1];
+ PrimitiveTallies batchTotalCounts = fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled] -
+ lastBatch.fEndNonScissorIndices;
+ batchTotalCounts += fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled] -
+ lastScissorSubBatch.fEndPrimitiveIndices;
+
+ // This will invalidate lastBatch.
+ fBatches.push_back() = {
+ fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled],
+ fScissorSubBatches.count(),
+ batchTotalCounts
+ };
+ return fBatches.count() - 1;
+}
+
+// Emits a contour's triangle fan.
+//
+// Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1].
+//
+// This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The
+// advantage to this approach is that for a convex-ish contour, it generates larger triangles.
+// Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they
+// have a longer perimeter to rasterize and antialias.
+//
+// The indices array indexes the fan's points (think: glDrawElements), and must have at least log3
+// elements past the end for this method to use as scratch space.
+//
+// Returns the next triangle instance after the final one emitted.
+static TriPointInstance* emit_recursive_fan(
+ const SkTArray<SkPoint, true>& pts, SkTArray<int32_t, true>& indices, int firstIndex,
+ int indexCount, const Sk2f& devToAtlasOffset, TriPointInstance::Ordering ordering,
+ TriPointInstance out[]) {
+ if (indexCount < 3) {
+ return out;
+ }
+
+ int32_t oneThirdCount = indexCount / 3;
+ int32_t twoThirdsCount = (2 * indexCount) / 3;
+ out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
+ pts[indices[firstIndex + twoThirdsCount]], devToAtlasOffset, ordering);
+
+ out = emit_recursive_fan(
+ pts, indices, firstIndex, oneThirdCount + 1, devToAtlasOffset, ordering, out);
+ out = emit_recursive_fan(
+ pts, indices, firstIndex + oneThirdCount, twoThirdsCount - oneThirdCount + 1,
+ devToAtlasOffset, ordering, out);
+
+ int endIndex = firstIndex + indexCount;
+ int32_t oldValue = indices[endIndex];
+ indices[endIndex] = indices[firstIndex];
+ out = emit_recursive_fan(
+ pts, indices, firstIndex + twoThirdsCount, indexCount - twoThirdsCount + 1,
+ devToAtlasOffset, ordering, out);
+ indices[endIndex] = oldValue;
+
+ return out;
+}
+
+void GrCCFiller::emitTessellatedFan(
+ const GrTessellator::WindingVertex* vertices, int numVertices, const Sk2f& devToAtlasOffset,
+ TriPointInstance::Ordering ordering, TriPointInstance* triPointInstanceData,
+ QuadPointInstance* quadPointInstanceData, GrCCFillGeometry::PrimitiveTallies* indices) {
+ for (int i = 0; i < numVertices; i += 3) {
+ int weight = abs(vertices[i].fWinding);
+ SkASSERT(weight >= 1);
+ if (weight > 1 && Algorithm::kStencilWindingCount != fAlgorithm) {
+ quadPointInstanceData[indices->fWeightedTriangles++].setW(
+ vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
+ static_cast<float>(abs(vertices[i].fWinding)));
+ } else for (int j = 0; j < weight; ++j) {
+ // Unfortunately, there is not a way to increment stencil values by an amount larger
+ // than 1. Instead we draw the triangle 'weight' times.
+ triPointInstanceData[indices->fTriangles++].set(
+ vertices[i].fPos, vertices[i + 1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
+ ordering);
+ }
+ }
+}
+
+bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
+ using Verb = GrCCFillGeometry::Verb;
+ SkASSERT(!fInstanceBuffer);
+ SkASSERT(fBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
+ fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled]);
+ SkASSERT(fBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
+
+ auto triangleOrdering = (Algorithm::kCoverageCount == fAlgorithm)
+ ? TriPointInstance::Ordering::kXYTransposed
+ : TriPointInstance::Ordering::kXYInterleaved;
+
+ // Here we build a single instance buffer to share with every internal batch.
+ //
+ // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
+ // type is further divided into instances that require a scissor and those that don't. This
+ // leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
+ //
+ // Rather than place each instance array in its own GPU buffer, we allocate a single
+ // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
+ // our draw calls to direct the GPU to the applicable elements within a given array.
+ //
+ // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
+ // straightforward. Start with triangles and quadratics. They both view the instance buffer as
+ // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other.
+ fBaseInstances[0].fTriangles = 0;
+ fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
+ fTotalPrimitiveCounts[0].fTriangles;
+ fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
+ fTotalPrimitiveCounts[1].fTriangles;
+ fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
+ fTotalPrimitiveCounts[0].fQuadratics;
+ int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
+
+ // Wound triangles and cubics both view the same instance buffer as an array of
+ // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
+ // them on the first index that will not overwrite previous TriPointInstance data.
+ int quadBaseIdx =
+ GrSizeDivRoundUp(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
+ fBaseInstances[0].fWeightedTriangles = quadBaseIdx;
+ fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles +
+ fTotalPrimitiveCounts[0].fWeightedTriangles;
+ fBaseInstances[0].fCubics = fBaseInstances[1].fWeightedTriangles +
+ fTotalPrimitiveCounts[1].fWeightedTriangles;
+ fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
+ fBaseInstances[0].fConics = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
+ fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
+ int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
+
+ fInstanceBuffer =
+ onFlushRP->makeBuffer(GrGpuBufferType::kVertex, quadEndIdx * sizeof(QuadPointInstance));
+ if (!fInstanceBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
+ return false;
+ }
+
+ TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map());
+ QuadPointInstance* quadPointInstanceData =
+ reinterpret_cast<QuadPointInstance*>(triPointInstanceData);
+ SkASSERT(quadPointInstanceData);
+
+ PathInfo* nextPathInfo = fPathInfos.begin();
+ Sk2f devToAtlasOffset;
+ PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
+ PrimitiveTallies* currIndices = nullptr;
+ SkSTArray<256, int32_t, true> currFan;
+ bool currFanIsTessellated = false;
+
+ const SkTArray<SkPoint, true>& pts = fGeometry.points();
+ int ptsIdx = -1;
+ int nextConicWeightIdx = 0;
+
+ // Expand the ccpr verbs into GPU instance buffers.
+ for (Verb verb : fGeometry.verbs()) {
+ switch (verb) {
+ case Verb::kBeginPath:
+ SkASSERT(currFan.empty());
+ currIndices = &instanceIndices[(int)nextPathInfo->scissorTest()];
+ devToAtlasOffset = Sk2f(static_cast<float>(nextPathInfo->devToAtlasOffset().fX),
+ static_cast<float>(nextPathInfo->devToAtlasOffset().fY));
+ currFanIsTessellated = nextPathInfo->hasFanTessellation();
+ if (currFanIsTessellated) {
+ this->emitTessellatedFan(
+ nextPathInfo->fanTessellation(), nextPathInfo->fanTessellationCount(),
+ devToAtlasOffset, triangleOrdering, triPointInstanceData,
+ quadPointInstanceData, currIndices);
+ }
+ ++nextPathInfo;
+ continue;
+
+ case Verb::kBeginContour:
+ SkASSERT(currFan.empty());
+ ++ptsIdx;
+ if (!currFanIsTessellated) {
+ currFan.push_back(ptsIdx);
+ }
+ continue;
+
+ case Verb::kLineTo:
+ ++ptsIdx;
+ if (!currFanIsTessellated) {
+ SkASSERT(!currFan.empty());
+ currFan.push_back(ptsIdx);
+ }
+ continue;
+
+ case Verb::kMonotonicQuadraticTo:
+ triPointInstanceData[currIndices->fQuadratics++].set(
+ &pts[ptsIdx], devToAtlasOffset, TriPointInstance::Ordering::kXYTransposed);
+ ptsIdx += 2;
+ if (!currFanIsTessellated) {
+ SkASSERT(!currFan.empty());
+ currFan.push_back(ptsIdx);
+ }
+ continue;
+
+ case Verb::kMonotonicCubicTo:
+ quadPointInstanceData[currIndices->fCubics++].set(
+ &pts[ptsIdx], devToAtlasOffset[0], devToAtlasOffset[1]);
+ ptsIdx += 3;
+ if (!currFanIsTessellated) {
+ SkASSERT(!currFan.empty());
+ currFan.push_back(ptsIdx);
+ }
+ continue;
+
+ case Verb::kMonotonicConicTo:
+ quadPointInstanceData[currIndices->fConics++].setW(
+ &pts[ptsIdx], devToAtlasOffset,
+ fGeometry.getConicWeight(nextConicWeightIdx));
+ ptsIdx += 2;
+ ++nextConicWeightIdx;
+ if (!currFanIsTessellated) {
+ SkASSERT(!currFan.empty());
+ currFan.push_back(ptsIdx);
+ }
+ continue;
+
+ case Verb::kEndClosedContour: // endPt == startPt.
+ if (!currFanIsTessellated) {
+ SkASSERT(!currFan.empty());
+ currFan.pop_back();
+ }
+ // fallthru.
+ case Verb::kEndOpenContour: // endPt != startPt.
+ SkASSERT(!currFanIsTessellated || currFan.empty());
+ if (!currFanIsTessellated && currFan.count() >= 3) {
+ int fanSize = currFan.count();
+ // Reserve space for emit_recursive_fan. Technically this can grow to
+ // fanSize + log3(fanSize), but we approximate with log2.
+ currFan.push_back_n(SkNextLog2(fanSize));
+ SkDEBUGCODE(TriPointInstance* end =) emit_recursive_fan(
+ pts, currFan, 0, fanSize, devToAtlasOffset, triangleOrdering,
+ triPointInstanceData + currIndices->fTriangles);
+ currIndices->fTriangles += fanSize - 2;
+ SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
+ }
+ currFan.reset();
+ continue;
+ }
+ }
+
+ fInstanceBuffer->unmap();
+
+ SkASSERT(nextPathInfo == fPathInfos.end());
+ SkASSERT(ptsIdx == pts.count() - 1);
+ SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
+ SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
+ SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
+ SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
+ SkASSERT(instanceIndices[0].fWeightedTriangles == fBaseInstances[1].fWeightedTriangles);
+ SkASSERT(instanceIndices[1].fWeightedTriangles == fBaseInstances[0].fCubics);
+ SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
+ SkASSERT(instanceIndices[1].fCubics == fBaseInstances[0].fConics);
+ SkASSERT(instanceIndices[0].fConics == fBaseInstances[1].fConics);
+ SkASSERT(instanceIndices[1].fConics == quadEndIdx);
+
+ fMeshesScratchBuffer.reserve(fMaxMeshesPerDraw);
+ fScissorRectScratchBuffer.reserve(fMaxMeshesPerDraw);
+
+ return true;
+}
+
+void GrCCFiller::drawFills(
+ GrOpFlushState* flushState, GrCCCoverageProcessor* proc, const GrPipeline& pipeline,
+ BatchID batchID, const SkIRect& drawBounds) const {
+ using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
+
+ SkASSERT(fInstanceBuffer);
+
+ GrResourceProvider* rp = flushState->resourceProvider();
+ const PrimitiveTallies& batchTotalCounts = fBatches[batchID].fTotalPrimitiveCounts;
+
+ if (batchTotalCounts.fTriangles) {
+ proc->reset(PrimitiveType::kTriangles, rp);
+ this->drawPrimitives(
+ flushState, *proc, pipeline, batchID, &PrimitiveTallies::fTriangles, drawBounds);
+ }
+
+ if (batchTotalCounts.fWeightedTriangles) {
+ SkASSERT(Algorithm::kStencilWindingCount != fAlgorithm);
+ proc->reset(PrimitiveType::kWeightedTriangles, rp);
+ this->drawPrimitives(
+ flushState, *proc, pipeline, batchID, &PrimitiveTallies::fWeightedTriangles,
+ drawBounds);
+ }
+
+ if (batchTotalCounts.fQuadratics) {
+ proc->reset(PrimitiveType::kQuadratics, rp);
+ this->drawPrimitives(
+ flushState, *proc, pipeline, batchID, &PrimitiveTallies::fQuadratics, drawBounds);
+ }
+
+ if (batchTotalCounts.fCubics) {
+ proc->reset(PrimitiveType::kCubics, rp);
+ this->drawPrimitives(
+ flushState, *proc, pipeline, batchID, &PrimitiveTallies::fCubics, drawBounds);
+ }
+
+ if (batchTotalCounts.fConics) {
+ proc->reset(PrimitiveType::kConics, rp);
+ this->drawPrimitives(
+ flushState, *proc, pipeline, batchID, &PrimitiveTallies::fConics, drawBounds);
+ }
+}
+
+void GrCCFiller::drawPrimitives(
+ GrOpFlushState* flushState, const GrCCCoverageProcessor& proc, const GrPipeline& pipeline,
+ BatchID batchID, int PrimitiveTallies::*instanceType, const SkIRect& drawBounds) const {
+ SkASSERT(pipeline.isScissorEnabled());
+
+ // Don't call reset(), as that also resets the reserve count.
+ fMeshesScratchBuffer.pop_back_n(fMeshesScratchBuffer.count());
+ fScissorRectScratchBuffer.pop_back_n(fScissorRectScratchBuffer.count());
+
+ SkASSERT(batchID > 0);
+ SkASSERT(batchID < fBatches.count());
+ const Batch& previousBatch = fBatches[batchID - 1];
+ const Batch& batch = fBatches[batchID];
+ SkDEBUGCODE(int totalInstanceCount = 0);
+
+ if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
+ previousBatch.fEndNonScissorIndices.*instanceType) {
+ SkASSERT(instanceCount > 0);
+ int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].*instanceType +
+ previousBatch.fEndNonScissorIndices.*instanceType;
+ proc.appendMesh(fInstanceBuffer, instanceCount, baseInstance, &fMeshesScratchBuffer);
+ fScissorRectScratchBuffer.push_back().setXYWH(0, 0, drawBounds.width(),
+ drawBounds.height());
+ SkDEBUGCODE(totalInstanceCount += instanceCount);
+ }
+
+ SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
+ SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
+ int baseScissorInstance = fBaseInstances[(int)GrScissorTest::kEnabled].*instanceType;
+ for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
+ const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
+ const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
+ int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
+ int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
+ if (!instanceCount) {
+ continue;
+ }
+ SkASSERT(instanceCount > 0);
+ proc.appendMesh(fInstanceBuffer, instanceCount, baseScissorInstance + startIndex,
+ &fMeshesScratchBuffer);
+ fScissorRectScratchBuffer.push_back() = scissorSubBatch.fScissor;
+ SkDEBUGCODE(totalInstanceCount += instanceCount);
+ }
+
+ SkASSERT(fMeshesScratchBuffer.count() == fScissorRectScratchBuffer.count());
+ SkASSERT(fMeshesScratchBuffer.count() <= fMaxMeshesPerDraw);
+ SkASSERT(totalInstanceCount == batch.fTotalPrimitiveCounts.*instanceType);
+
+ if (!fMeshesScratchBuffer.empty()) {
+ proc.draw(flushState, pipeline, fScissorRectScratchBuffer.begin(),
+ fMeshesScratchBuffer.begin(), fMeshesScratchBuffer.count(),
+ SkRect::Make(drawBounds));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.h b/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.h
new file mode 100644
index 0000000000..9a3b2e4d51
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCFiller.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPathParser_DEFINED
+#define GrCCPathParser_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkPathPriv.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrTessellator.h"
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+#include "src/gpu/ccpr/GrCCFillGeometry.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrOnFlushResourceProvider;
+class SkMatrix;
+class SkPath;
+
+/**
+ * This class parses SkPaths into CCPR primitives in GPU buffers, then issues calls to draw their
+ * coverage counts.
+ */
+class GrCCFiller {
+public:
+ enum class Algorithm : bool {
+ kCoverageCount,
+ kStencilWindingCount
+ };
+
+ GrCCFiller(Algorithm, int numPaths, int numSkPoints, int numSkVerbs, int numConicWeights);
+
+ // Parses a device-space SkPath into the current batch, using the SkPath's original verbs and
+ // 'deviceSpacePts'. Accepts an optional post-device-space translate for placement in an atlas.
+ void parseDeviceSpaceFill(const SkPath&, const SkPoint* deviceSpacePts, GrScissorTest,
+ const SkIRect& clippedDevIBounds, const SkIVector& devToAtlasOffset);
+
+ using BatchID = int;
+
+ // Compiles the outstanding parsed paths into a batch, and returns an ID that can be used to
+ // draw their fills in the future.
+ BatchID closeCurrentBatch();
+
+ // Builds internal GPU buffers and prepares for calls to drawFills(). Caller must close the
+ // current batch before calling this method, and cannot parse new paths afer.
+ bool prepareToDraw(GrOnFlushResourceProvider*);
+
+ // Called after prepareToDraw(). Draws the given batch of path fills.
+ void drawFills(GrOpFlushState*, GrCCCoverageProcessor*, const GrPipeline&, BatchID,
+ const SkIRect& drawBounds) const;
+
+private:
+ static constexpr int kNumScissorModes = 2;
+ using PrimitiveTallies = GrCCFillGeometry::PrimitiveTallies;
+
+ // Every kBeginPath verb has a corresponding PathInfo entry.
+ class PathInfo {
+ public:
+ PathInfo(GrScissorTest scissorTest, const SkIVector& devToAtlasOffset)
+ : fScissorTest(scissorTest), fDevToAtlasOffset(devToAtlasOffset) {}
+
+ GrScissorTest scissorTest() const { return fScissorTest; }
+ const SkIVector& devToAtlasOffset() const { return fDevToAtlasOffset; }
+
+ // An empty tessellation fan is also valid; we use negative count to denote not tessellated.
+ bool hasFanTessellation() const { return fFanTessellationCount >= 0; }
+ int fanTessellationCount() const {
+ SkASSERT(this->hasFanTessellation());
+ return fFanTessellationCount;
+ }
+ const GrTessellator::WindingVertex* fanTessellation() const {
+ SkASSERT(this->hasFanTessellation());
+ return fFanTessellation.get();
+ }
+ void tessellateFan(
+ Algorithm, const SkPath& originalPath, const GrCCFillGeometry&, int verbsIdx,
+ int ptsIdx, const SkIRect& clippedDevIBounds, PrimitiveTallies* newTriangleCounts);
+
+ private:
+ GrScissorTest fScissorTest;
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
+ int fFanTessellationCount = -1;
+ std::unique_ptr<const GrTessellator::WindingVertex[]> fFanTessellation;
+ };
+
+ // Defines a batch of CCPR primitives. Start indices are deduced by looking at the previous
+ // Batch in the list.
+ struct Batch {
+ PrimitiveTallies fEndNonScissorIndices;
+ int fEndScissorSubBatchIdx;
+ PrimitiveTallies fTotalPrimitiveCounts;
+ };
+
+ // Defines a sub-batch that will be drawn with the given scissor rect. Start indices are deduced
+ // by looking at the previous ScissorSubBatch in the list.
+ struct ScissorSubBatch {
+ PrimitiveTallies fEndPrimitiveIndices;
+ SkIRect fScissor;
+ };
+
+ void emitTessellatedFan(
+ const GrTessellator::WindingVertex*, int numVertices, const Sk2f& devToAtlasOffset,
+ GrCCCoverageProcessor::TriPointInstance::Ordering,
+ GrCCCoverageProcessor::TriPointInstance*, GrCCCoverageProcessor::QuadPointInstance*,
+ GrCCFillGeometry::PrimitiveTallies*);
+ void drawPrimitives(GrOpFlushState*, const GrCCCoverageProcessor&, const GrPipeline&, BatchID,
+ int PrimitiveTallies::*instanceType, const SkIRect& drawBounds) const;
+
+ const Algorithm fAlgorithm;
+ GrCCFillGeometry fGeometry;
+ SkSTArray<32, PathInfo, true> fPathInfos;
+ SkSTArray<32, Batch, true> fBatches;
+ SkSTArray<32, ScissorSubBatch, true> fScissorSubBatches;
+ PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes];
+ int fMaxMeshesPerDraw = 0;
+
+ sk_sp<GrGpuBuffer> fInstanceBuffer;
+ PrimitiveTallies fBaseInstances[kNumScissorModes];
+ mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer;
+ mutable SkSTArray<32, SkIRect> fScissorRectScratchBuffer;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.cpp
new file mode 100644
index 0000000000..347ce3dca5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.cpp
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCPathCache.h"
+
+#include "include/private/SkNx.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrProxyProvider.h"
+
+static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
+
+DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
+
+static inline uint32_t next_path_cache_id() {
+ static std::atomic<uint32_t> gNextID(1);
+ for (;;) {
+ uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
+ if (SK_InvalidUniqueID != id) {
+ return id;
+ }
+ }
+}
+
+static inline bool SkShouldPostMessageToBus(
+ const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
+ return key->pathCacheUniqueID() == msgBusUniqueID;
+}
+
+// The maximum number of cache entries we allow in our own cache.
+static constexpr int kMaxCacheCount = 1 << 16;
+
+
+GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
+ : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
+ SkASSERT(!m.hasPerspective());
+ Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
+ Sk2f transFloor;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // On Android framework we pre-round view matrix translates to integers for better caching.
+ transFloor = translate;
+#else
+ transFloor = translate.floor();
+ (translate - transFloor).store(fSubpixelTranslate);
+#endif
+ shift->set((int)transFloor[0], (int)transFloor[1]);
+ SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
+ SkASSERT((float)shift->fY == transFloor[1]);
+}
+
+inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
+ const GrCCPathCache::MaskTransform& b) {
+ if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
+ return false;
+ }
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (((Sk2f::Load(a.fSubpixelTranslate) -
+ Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
+ int dataCountU32, const void* data) {
+ void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
+ sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
+ if (data) {
+ memcpy(key->data(), data, key->dataSizeInBytes());
+ }
+ return key;
+}
+
+void GrCCPathCache::Key::operator delete(void* p) { ::operator delete(p); }
+
+const uint32_t* GrCCPathCache::Key::data() const {
+ // The shape key is a variable-length footer to the entry allocation.
+ return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
+}
+
+uint32_t* GrCCPathCache::Key::data() {
+ // The shape key is a variable-length footer to the entry allocation.
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
+}
+
+void GrCCPathCache::Key::onChange() {
+ // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
+ SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
+}
+
+GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
+ : fContextUniqueID(contextUniqueID)
+ , fInvalidatedKeysInbox(next_path_cache_id())
+ , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
+}
+
+GrCCPathCache::~GrCCPathCache() {
+ while (!fLRU.isEmpty()) {
+ this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
+ }
+ SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent.
+
+ // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
+ // We just purge via message bus since we don't have any access to the resource cache right now.
+ for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
+ GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
+ }
+ for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
+ GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
+ }
+}
+
+namespace {
+
+// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
+class WriteKeyHelper {
+public:
+ static constexpr int kStrokeWidthIdx = 0;
+ static constexpr int kStrokeMiterIdx = 1;
+ static constexpr int kStrokeCapJoinIdx = 2;
+ static constexpr int kShapeUnstyledKeyIdx = 3;
+
+ WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
+
+ // Returns the total number of uint32_t's to allocate for the key.
+ int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
+
+ // Writes the key data to out[].
+ void write(const GrShape& shape, uint32_t* out) {
+ // Stroke key.
+ // We don't use GrStyle::WriteKey() because it does not account for hairlines.
+ // http://skbug.com/8273
+ SkASSERT(!shape.style().hasPathEffect());
+ const SkStrokeRec& stroke = shape.style().strokeRec();
+ if (stroke.isFillStyle()) {
+ // Use a value for width that won't collide with a valid fp32 value >= 0.
+ out[kStrokeWidthIdx] = ~0;
+ out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
+ } else {
+ float width = stroke.getWidth(), miterLimit = stroke.getMiter();
+ memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
+ memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
+ out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
+ GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
+ }
+
+ // Shape unstyled key.
+ shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
+ }
+
+private:
+ int fShapeUnstyledKeyCount;
+};
+
+}
+
+GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
+ GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
+ const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
+ if (!shape.hasUnstyledKey()) {
+ return OnFlushEntryRef();
+ }
+
+ WriteKeyHelper writeKeyHelper(shape);
+ if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
+ return OnFlushEntryRef();
+ }
+
+ SkASSERT(fScratchKey->unique());
+ fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
+ writeKeyHelper.write(shape, fScratchKey->data());
+
+ MaskTransform m(viewMatrix, maskShift);
+ GrCCPathCacheEntry* entry = nullptr;
+ if (HashNode* node = fHashTable.find(*fScratchKey)) {
+ entry = node->entry();
+ SkASSERT(fLRU.isInList(entry));
+
+ if (!fuzzy_equals(m, entry->fMaskTransform)) {
+ // The path was reused with an incompatible matrix.
+ if (entry->unique()) {
+ // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
+ SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
+ entry->fMaskTransform = m;
+ entry->fHitCount = 0;
+ entry->fHitRect = SkIRect::MakeEmpty();
+ entry->releaseCachedAtlas(this);
+ } else {
+ this->evict(*fScratchKey);
+ entry = nullptr;
+ }
+ }
+ }
+
+ if (!entry) {
+ if (fHashTable.count() >= kMaxCacheCount) {
+ SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
+ SkASSERT(node && node->entry() == fLRU.tail());
+ this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
+ }
+
+ // Create a new entry in the cache.
+ sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
+ writeKeyHelper.allocCountU32(), fScratchKey->data());
+ SkASSERT(*permanentKey == *fScratchKey);
+ SkASSERT(!fHashTable.find(*permanentKey));
+ entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
+
+ SkASSERT(fHashTable.count() <= kMaxCacheCount);
+ } else {
+ fLRU.remove(entry); // Will be re-added at head.
+ }
+
+ SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
+ SkASSERT(node && node->entry() == entry);
+ fLRU.addToHead(entry);
+
+ if (0 == entry->fOnFlushRefCnt) {
+ // Only update the time stamp and hit count if we haven't seen this entry yet during the
+ // current flush.
+ entry->fTimestamp = this->quickPerFlushTimestamp();
+ ++entry->fHitCount;
+
+ if (entry->fCachedAtlas) {
+ SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
+ SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
+ if (!entry->fCachedAtlas->getOnFlushProxy()) {
+ auto ct = GrCCAtlas::CoverageTypeToColorType(entry->fCachedAtlas->coverageType());
+ if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
+ entry->fCachedAtlas->textureKey(), ct, GrCCAtlas::kTextureOrigin,
+ GrSurfaceProxy::UseAllocator::kNo)) {
+ entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
+ }
+ }
+ if (!entry->fCachedAtlas->getOnFlushProxy()) {
+ // Our atlas's backing texture got purged from the GrResourceCache. Release the
+ // cached atlas.
+ entry->releaseCachedAtlas(this);
+ }
+ }
+ }
+ entry->fHitRect.join(clippedDrawBounds.makeOffset(-*maskShift));
+ SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
+ return OnFlushEntryRef::OnFlushRef(entry);
+}
+
+void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
+ if (!entry) {
+ HashNode* node = fHashTable.find(key);
+ SkASSERT(node);
+ entry = node->entry();
+ }
+ SkASSERT(*entry->fCacheKey == key);
+ SkASSERT(!entry->hasBeenEvicted());
+ entry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
+ entry->releaseCachedAtlas(this);
+ fLRU.remove(entry);
+ fHashTable.remove(key);
+}
+
+void GrCCPathCache::doPreFlushProcessing() {
+ this->evictInvalidatedCacheKeys();
+
+ // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
+ fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
+}
+
+void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
+ const GrStdSteadyClock::time_point& purgeTime) {
+ this->evictInvalidatedCacheKeys();
+
+#ifdef SK_DEBUG
+ auto lastTimestamp = (fLRU.isEmpty())
+ ? GrStdSteadyClock::time_point::max()
+ : fLRU.tail()->fTimestamp;
+#endif
+
+ // Evict every entry from our local path cache whose timestamp is older than purgeTime.
+ while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
+#ifdef SK_DEBUG
+ // Verify that fLRU is sorted by timestamp.
+ auto timestamp = fLRU.tail()->fTimestamp;
+ SkASSERT(timestamp >= lastTimestamp);
+ lastTimestamp = timestamp;
+#endif
+ this->evict(*fLRU.tail()->fCacheKey);
+ }
+
+ // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
+ this->purgeInvalidatedAtlasTextures(proxyProvider);
+}
+
+void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
+ for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+ onFlushRP->removeUniqueKeyFromProxy(proxy.get());
+ }
+ fInvalidatedProxies.reset();
+
+ for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+ onFlushRP->processInvalidUniqueKey(key);
+ }
+ fInvalidatedProxyUniqueKeys.reset();
+}
+
+void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
+ for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+ proxyProvider->removeUniqueKeyFromProxy(proxy.get());
+ }
+ fInvalidatedProxies.reset();
+
+ for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+ proxyProvider->processInvalidUniqueKey(key, nullptr,
+ GrProxyProvider::InvalidateGPUResource::kYes);
+ }
+ fInvalidatedProxyUniqueKeys.reset();
+}
+
+void GrCCPathCache::evictInvalidatedCacheKeys() {
+ SkTArray<sk_sp<Key>> invalidatedKeys;
+ fInvalidatedKeysInbox.poll(&invalidatedKeys);
+ for (const sk_sp<Key>& key : invalidatedKeys) {
+ bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
+ if (isInCache) {
+ this->evict(*key);
+ }
+ }
+}
+
+GrCCPathCache::OnFlushEntryRef
+GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
+ entry->ref();
+ ++entry->fOnFlushRefCnt;
+ if (entry->fCachedAtlas) {
+ entry->fCachedAtlas->incrOnFlushRefCnt();
+ }
+ return OnFlushEntryRef(entry);
+}
+
+GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
+ if (!fEntry) {
+ return;
+ }
+ --fEntry->fOnFlushRefCnt;
+ SkASSERT(fEntry->fOnFlushRefCnt >= 0);
+ if (fEntry->fCachedAtlas) {
+ fEntry->fCachedAtlas->decrOnFlushRefCnt();
+ }
+ fEntry->unref();
+}
+
+
+void GrCCPathCacheEntry::setCoverageCountAtlas(
+ GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
+ const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
+ SkASSERT(fOnFlushRefCnt > 0);
+ SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas().
+
+ if (this->hasBeenEvicted()) {
+ // This entry will never be found in the path cache again. Don't bother trying to save an
+ // atlas texture for it in the GrResourceCache.
+ return;
+ }
+
+ fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
+ fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
+ fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
+
+ fAtlasOffset = atlasOffset + maskShift;
+
+ fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
+ fDevIBounds = devIBounds.makeOffset(-maskShift);
+}
+
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
+ GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
+ const SkIVector& newAtlasOffset) {
+ SkASSERT(!this->hasBeenEvicted());
+ SkASSERT(fOnFlushRefCnt > 0);
+ SkASSERT(fCachedAtlas);
+ SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
+
+ ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
+
+ fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
+ fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
+ fCachedAtlas->addPathPixels(this->height() * this->width());
+
+ fAtlasOffset = newAtlasOffset;
+ return releaseAtlasResult;
+}
+
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
+ GrCCPathCache* pathCache) {
+ ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
+ if (fCachedAtlas) {
+ result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
+ if (fOnFlushRefCnt) {
+ SkASSERT(fOnFlushRefCnt > 0);
+ fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
+ }
+ fCachedAtlas = nullptr;
+ }
+ return result;
+}
+
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
+ GrCCPathCache* pathCache, int numPixels) {
+ // Mark the pixels invalid in the cached atlas texture.
+ fNumInvalidatedPathPixels += numPixels;
+ SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
+ if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
+ // Too many invalidated pixels: purge the atlas texture from the resource cache.
+ if (fOnFlushProxy) {
+ // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
+ // reference on this atlas and expect to use our proxy during the current flush.
+ // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
+ pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
+ } else {
+ pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
+ }
+ fIsInvalidatedFromResourceCache = true;
+ return ReleaseAtlasResult::kDidInvalidateFromCache;
+ }
+ return ReleaseAtlasResult::kNone;
+}
+
+void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
+ SkASSERT(count > 0);
+ fOnFlushRefCnt -= count;
+ SkASSERT(fOnFlushRefCnt >= 0);
+ if (0 == fOnFlushRefCnt) {
+ // Don't hold the actual proxy past the end of the current flush.
+ SkASSERT(fOnFlushProxy);
+ fOnFlushProxy = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.h b/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.h
new file mode 100644
index 0000000000..a49d7585be
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPathCache.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPathCache_DEFINED
+#define GrCCPathCache_DEFINED
+
+#include "include/private/SkTHash.h"
+#include "src/core/SkExchange.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/gpu/ccpr/GrCCAtlas.h"
+#include "src/gpu/ccpr/GrCCPathProcessor.h"
+#include "src/gpu/geometry/GrShape.h"
+
+class GrCCPathCacheEntry;
+class GrShape;
+
+/**
+ * This class implements an LRU cache that maps from GrShape to GrCCPathCacheEntry objects. Shapes
+ * are only given one entry in the cache, so any time they are accessed with a different matrix, the
+ * old entry gets evicted.
+ */
+class GrCCPathCache {
+public:
+ GrCCPathCache(uint32_t contextUniqueID);
+ ~GrCCPathCache();
+
+ class Key : public SkPathRef::GenIDChangeListener {
+ public:
+ static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32,
+ const void* data = nullptr);
+
+ uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
+
+ int dataSizeInBytes() const { return fDataSizeInBytes; }
+ const uint32_t* data() const;
+
+ void resetDataCountU32(int dataCountU32) {
+ SkASSERT(dataCountU32 <= fDataReserveCountU32);
+ fDataSizeInBytes = dataCountU32 * sizeof(uint32_t);
+ }
+ uint32_t* data();
+
+ bool operator==(const Key& that) const {
+ return fDataSizeInBytes == that.fDataSizeInBytes &&
+ !memcmp(this->data(), that.data(), fDataSizeInBytes);
+ }
+
+ // Called when our corresponding path is modified or deleted. Not threadsafe.
+ void onChange() override;
+
+ // TODO(b/30449950): use sized delete once P0722R3 is available
+ static void operator delete(void* p);
+
+ private:
+ Key(uint32_t pathCacheUniqueID, int dataCountU32)
+ : fPathCacheUniqueID(pathCacheUniqueID)
+ , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t))
+ SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) {
+ SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
+ }
+
+ const uint32_t fPathCacheUniqueID;
+ int fDataSizeInBytes;
+ SkDEBUGCODE(const int fDataReserveCountU32);
+ // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey
+ // provides access to it.
+ };
+
+ // Stores the components of a transformation that affect a path mask (i.e. everything but
+ // integer translation). During construction, any integer portions of the matrix's translate are
+ // shaved off and returned to the caller. The caller is responsible for those integer shifts.
+ struct MaskTransform {
+ MaskTransform(const SkMatrix& m, SkIVector* shift);
+ float fMatrix2x2[4];
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
+ // On AOSP we follow after HWUI and ignore the subpixel translate.
+ float fSubpixelTranslate[2];
+#endif
+ };
+
+ // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush.
+ class OnFlushEntryRef : SkNoncopyable {
+ public:
+ static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*);
+ OnFlushEntryRef() = default;
+ OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(skstd::exchange(ref.fEntry, nullptr)) {}
+ ~OnFlushEntryRef();
+
+ GrCCPathCacheEntry* get() const { return fEntry; }
+ GrCCPathCacheEntry* operator->() const { return fEntry; }
+ GrCCPathCacheEntry& operator*() const { return *fEntry; }
+ explicit operator bool() const { return fEntry; }
+ void operator=(OnFlushEntryRef&& ref) { fEntry = skstd::exchange(ref.fEntry, nullptr); }
+
+ private:
+ OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {}
+ GrCCPathCacheEntry* fEntry = nullptr;
+ };
+
+ // Finds an entry in the cache that matches the given shape and transformation matrix.
+ // 'maskShift' is filled with an integer post-translate that the caller must apply when drawing
+ // the entry's mask to the device.
+ //
+ // NOTE: Shapes are only given one entry, so any time they are accessed with a new
+ // transformation, the old entry gets evicted.
+ OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&,
+ const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix,
+ SkIVector* maskShift);
+
+ void doPreFlushProcessing();
+
+ void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime);
+
+ // As we evict entries from our local path cache, we accumulate a list of invalidated atlas
+ // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache.
+ // This call is available with two different "provider" objects, to accomodate whatever might
+ // be available at the callsite.
+ void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*);
+ void purgeInvalidatedAtlasTextures(GrProxyProvider*);
+
+private:
+ // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
+ // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
+ // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
+ // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
+ class HashNode : SkNoncopyable {
+ public:
+ static const Key& GetKey(const HashNode&);
+ inline static uint32_t Hash(const Key& key) {
+ return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
+ }
+
+ HashNode() = default;
+ HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
+ HashNode(HashNode&& node)
+ : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
+ SkASSERT(!node.fEntry);
+ }
+
+ ~HashNode();
+
+ void operator=(HashNode&& node);
+
+ GrCCPathCacheEntry* entry() const { return fEntry.get(); }
+
+ private:
+ GrCCPathCache* fPathCache = nullptr;
+ sk_sp<GrCCPathCacheEntry> fEntry;
+ };
+
+ GrStdSteadyClock::time_point quickPerFlushTimestamp() {
+ // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read.
+ if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) {
+ fPerFlushTimestamp = GrStdSteadyClock::now();
+ }
+ return fPerFlushTimestamp;
+ }
+
+ void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr);
+
+ // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via
+ // SkPath listeners.
+ void evictInvalidatedCacheKeys();
+
+ const uint32_t fContextUniqueID;
+
+ SkTHashTable<HashNode, const Key&> fHashTable;
+ SkTInternalLList<GrCCPathCacheEntry> fLRU;
+ SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
+ sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method.
+
+ // We only read the clock once per flush, and cache it in this variable. This prevents us from
+ // excessive clock reads for cache timestamps that might degrade performance.
+ GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
+
+ // As we evict entries from our local path cache, we accumulate lists of invalidated atlas
+ // textures in these two members. We hold these until we purge them from the GrResourceCache
+ // (e.g. via purgeInvalidatedAtlasTextures().)
+ SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies;
+ SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys;
+
+ friend class GrCCCachedAtlas; // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys.
+
+public:
+ const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const;
+ const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const;
+};
+
+/**
+ * This class stores all the data necessary to draw a specific path + matrix combination from their
+ * corresponding cached atlas.
+ */
+class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> {
+public:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
+
+ ~GrCCPathCacheEntry() {
+ SkASSERT(this->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
+ SkASSERT(!fCachedAtlas);
+ SkASSERT(0 == fOnFlushRefCnt);
+ }
+
+ const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
+
+ // The number of flushes during which this specific entry (path + matrix combination) has been
+ // pulled from the path cache. If a path is pulled from the cache more than once in a single
+ // flush, the hit count is only incremented once.
+ //
+ // If the entry did not previously exist, its hit count will be 1.
+ int hitCount() const { return fHitCount; }
+
+ // The accumulative region of the path that has been drawn during the lifetime of this cache
+ // entry (as defined by the 'clippedDrawBounds' parameter for GrCCPathCache::find).
+ const SkIRect& hitRect() const { return fHitRect; }
+
+ const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
+
+ const SkIRect& devIBounds() const { return fDevIBounds; }
+ int width() const { return fDevIBounds.width(); }
+ int height() const { return fDevIBounds.height(); }
+
+ enum class ReleaseAtlasResult : bool {
+ kNone,
+ kDidInvalidateFromCache
+ };
+
+ // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
+ // The caller will stash this atlas texture away after drawing, and during the next flush,
+ // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
+ void setCoverageCountAtlas(
+ GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset,
+ const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift);
+
+ // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
+ // the entry at the new atlas and updates the GrCCCCachedAtlas data.
+ ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
+ GrCCAtlas*, const SkIVector& newAtlasOffset);
+
+private:
+ using MaskTransform = GrCCPathCache::MaskTransform;
+
+ GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform)
+ : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) {
+ }
+
+ bool hasBeenEvicted() const { return fCacheKey->shouldUnregisterFromPath(); }
+
+ // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
+ // resource cache if needed.
+ ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*);
+
+ sk_sp<GrCCPathCache::Key> fCacheKey;
+ GrStdSteadyClock::time_point fTimestamp;
+ int fHitCount = 0;
+ SkIRect fHitRect = SkIRect::MakeEmpty();
+
+ sk_sp<GrCCCachedAtlas> fCachedAtlas;
+ SkIVector fAtlasOffset;
+
+ MaskTransform fMaskTransform;
+ GrOctoBounds fOctoBounds;
+ SkIRect fDevIBounds;
+
+ int fOnFlushRefCnt = 0;
+
+ friend class GrCCPathCache;
+ friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
+ uint64_t color, GrFillRule); // To access data.
+
+public:
+ int testingOnly_peekOnFlushRefCnt() const;
+};
+
+/**
+ * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many
+ * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas.
+ *
+ * We use this object to track the percentage of the original atlas pixels that could still ever
+ * potentially be reused (i.e., those which still represent an extant path). When the percentage
+ * of useful pixels drops below 50%, we purge the entire texture from the resource cache.
+ *
+ * This object also holds a ref on the atlas's actual texture proxy during flush. When
+ * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null.
+ */
+class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> {
+public:
+ using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
+
+ GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey,
+ sk_sp<GrTextureProxy> onFlushProxy)
+ : fCoverageType(type)
+ , fTextureKey(textureKey)
+ , fOnFlushProxy(std::move(onFlushProxy)) {}
+
+ ~GrCCCachedAtlas() {
+ SkASSERT(!fOnFlushProxy);
+ SkASSERT(!fOnFlushRefCnt);
+ }
+
+ GrCCAtlas::CoverageType coverageType() const { return fCoverageType; }
+ const GrUniqueKey& textureKey() const { return fTextureKey; }
+
+ GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); }
+
+ void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) {
+ SkASSERT(!fOnFlushProxy);
+ fOnFlushProxy = std::move(proxy);
+ }
+
+ void addPathPixels(int numPixels) { fNumPathPixels += numPixels; }
+ ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels);
+
+ int peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
+ void incrOnFlushRefCnt(int count = 1) const {
+ SkASSERT(count > 0);
+ SkASSERT(fOnFlushProxy);
+ fOnFlushRefCnt += count;
+ }
+ void decrOnFlushRefCnt(int count = 1) const;
+
+private:
+ const GrCCAtlas::CoverageType fCoverageType;
+ const GrUniqueKey fTextureKey;
+
+ int fNumPathPixels = 0;
+ int fNumInvalidatedPathPixels = 0;
+ bool fIsInvalidatedFromResourceCache = false;
+
+ mutable sk_sp<GrTextureProxy> fOnFlushProxy;
+ mutable int fOnFlushRefCnt = 0;
+
+public:
+ int testingOnly_peekOnFlushRefCnt() const;
+};
+
+
+inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
+ const MaskTransform& m, const GrShape& shape)
+ : fPathCache(pathCache)
+ , fEntry(new GrCCPathCacheEntry(key, m)) {
+ SkASSERT(shape.hasUnstyledKey());
+ shape.addGenIDChangeListener(std::move(key));
+}
+
+inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
+ const GrCCPathCache::HashNode& node) {
+ return *node.entry()->fCacheKey;
+}
+
+inline GrCCPathCache::HashNode::~HashNode() {
+ SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
+}
+
+inline void GrCCPathCache::HashNode::operator=(HashNode&& node) {
+ SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
+ fEntry = skstd::exchange(node.fEntry, nullptr);
+}
+
+inline void GrCCPathProcessor::Instance::set(
+ const GrCCPathCacheEntry& entry, const SkIVector& shift, uint64_t color,
+ GrFillRule fillRule) {
+ float dx = (float)shift.fX, dy = (float)shift.fY;
+ this->set(entry.fOctoBounds.makeOffset(dx, dy), entry.fAtlasOffset - shift, color, fillRule);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.cpp
new file mode 100644
index 0000000000..9fba60df05
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCPathProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+// Paths are drawn as octagons. Each point on the octagon is the intersection of two lines: one edge
+// from the path's bounding box and one edge from its 45-degree bounding box. The selectors
+// below indicate one corner from the bounding box, paired with a corner from the 45-degree bounding
+// box. The octagon vertex is the point that lies between these two corners, found by intersecting
+// their edges.
+static constexpr float kOctoEdgeNorms[8*4] = {
+ // bbox // bbox45
+ 0,0, 0,0,
+ 0,0, 1,0,
+ 1,0, 1,0,
+ 1,0, 1,1,
+ 1,1, 1,1,
+ 1,1, 0,1,
+ 0,1, 0,1,
+ 0,1, 0,0,
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
+
+sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms),
+ kOctoEdgeNorms, gVertexBufferKey);
+}
+
+static constexpr uint16_t kRestartStrip = 0xffff;
+
+static constexpr uint16_t kOctoIndicesAsStrips[] = {
+ 3, 4, 2, 0, 1, kRestartStrip, // First half.
+ 7, 0, 6, 4, 5 // Second half.
+};
+
+static constexpr uint16_t kOctoIndicesAsTris[] = {
+ // First half.
+ 3, 4, 2,
+ 4, 0, 2,
+ 2, 0, 1,
+
+ // Second half.
+ 7, 0, 6,
+ 0, 4, 6,
+ 6, 4, 5,
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
+
+constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[];
+constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kCornersAttrib;
+
+sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
+ if (onFlushRP->caps()->usePrimitiveRestart()) {
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
+ sizeof(kOctoIndicesAsStrips), kOctoIndicesAsStrips,
+ gIndexBufferKey);
+ } else {
+ return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
+ sizeof(kOctoIndicesAsTris), kOctoIndicesAsTris,
+ gIndexBufferKey);
+ }
+}
+
+GrCCPathProcessor::GrCCPathProcessor(CoverageMode coverageMode, const GrTexture* atlasTexture,
+ const GrSwizzle& swizzle, GrSurfaceOrigin atlasOrigin,
+ const SkMatrix& viewMatrixIfUsingLocalCoords)
+ : INHERITED(kGrCCPathProcessor_ClassID)
+ , fCoverageMode(coverageMode)
+ , fAtlasAccess(atlasTexture->texturePriv().textureType(), GrSamplerState::ClampNearest(),
+ swizzle)
+ , fAtlasSize(SkISize::Make(atlasTexture->width(), atlasTexture->height()))
+ , fAtlasOrigin(atlasOrigin) {
+ // TODO: Can we just assert that atlas has GrCCAtlas::kTextureOrigin and remove fAtlasOrigin?
+ this->setInstanceAttributes(kInstanceAttribs, SK_ARRAY_COUNT(kInstanceAttribs));
+ SkASSERT(this->instanceStride() == sizeof(Instance));
+
+ this->setVertexAttributes(&kCornersAttrib, 1);
+ this->setTextureSamplerCnt(1);
+
+ if (!viewMatrixIfUsingLocalCoords.invert(&fLocalMatrix)) {
+ fLocalMatrix.setIdentity();
+ }
+}
+
+class GrCCPathProcessor::Impl : public GrGLSLGeometryProcessor {
+public:
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override;
+
+private:
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const auto& proc = primProc.cast<GrCCPathProcessor>();
+ pdman.set2f(
+ fAtlasAdjustUniform, 1.0f / proc.fAtlasSize.fWidth, 1.0f / proc.fAtlasSize.fHeight);
+ this->setTransformDataHelper(proc.fLocalMatrix, pdman, &transformIter);
+ }
+
+ GrGLSLUniformHandler::UniformHandle fAtlasAdjustUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLSLPrimitiveProcessor* GrCCPathProcessor::createGLSLInstance(const GrShaderCaps&) const {
+ return new Impl();
+}
+
+void GrCCPathProcessor::drawPaths(GrOpFlushState* flushState, const GrPipeline& pipeline,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrCCPerFlushResources& resources, int baseInstance,
+ int endInstance, const SkRect& bounds) const {
+ const GrCaps& caps = flushState->caps();
+ GrPrimitiveType primitiveType = caps.usePrimitiveRestart()
+ ? GrPrimitiveType::kTriangleStrip
+ : GrPrimitiveType::kTriangles;
+ int numIndicesPerInstance = caps.usePrimitiveRestart()
+ ? SK_ARRAY_COUNT(kOctoIndicesAsStrips)
+ : SK_ARRAY_COUNT(kOctoIndicesAsTris);
+ GrMesh mesh(primitiveType);
+ auto enablePrimitiveRestart = GrPrimitiveRestart(flushState->caps().usePrimitiveRestart());
+
+ mesh.setIndexedInstanced(resources.refIndexBuffer(), numIndicesPerInstance,
+ resources.refInstanceBuffer(), endInstance - baseInstance,
+ baseInstance, enablePrimitiveRestart);
+ mesh.setVertexData(resources.refVertexBuffer());
+
+ GrProgramInfo programInfo(flushState->drawOpArgs().numSamples(),
+ flushState->drawOpArgs().origin(),
+ pipeline,
+ *this,
+ fixedDynamicState,
+ nullptr, 0);
+
+ flushState->opsRenderPass()->draw(programInfo, &mesh, 1, bounds);
+}
+
+void GrCCPathProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ using Interpolation = GrGLSLVaryingHandler::Interpolation;
+
+ const GrCCPathProcessor& proc = args.fGP.cast<GrCCPathProcessor>();
+ GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ bool isCoverageCount = (CoverageMode::kCoverageCount == proc.fCoverageMode);
+
+ const char* atlasAdjust;
+ fAtlasAdjustUniform = uniHandler->addUniform(
+ kVertex_GrShaderFlag, kFloat2_GrSLType, "atlas_adjust", &atlasAdjust);
+
+ varyingHandler->emitAttributes(proc);
+
+ GrGLSLVarying texcoord((isCoverageCount) ? kFloat3_GrSLType : kFloat2_GrSLType);
+ varyingHandler->addVarying("texcoord", &texcoord);
+
+ GrGLSLVarying color(kHalf4_GrSLType);
+ varyingHandler->addPassThroughAttribute(
+ kInstanceAttribs[kColorAttribIdx], args.fOutputColor, Interpolation::kCanBeFlat);
+
+ // The vertex shader bloats and intersects the devBounds and devBounds45 rectangles, in order to
+ // find an octagon that circumscribes the (bloated) path.
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+
+ // Are we clockwise? (Positive wind => nonzero fill rule.)
+ // Or counter-clockwise? (negative wind => even/odd fill rule.)
+ v->codeAppendf("float wind = sign(devbounds.z - devbounds.x);");
+
+ // Find our reference corner from the device-space bounding box.
+ v->codeAppendf("float2 refpt = mix(devbounds.xy, devbounds.zw, corners.xy);");
+
+ // Find our reference corner from the 45-degree bounding box.
+ v->codeAppendf("float2 refpt45 = mix(devbounds45.xy, devbounds45.zw, corners.zw);");
+ // Transform back to device space.
+ v->codeAppendf("refpt45 *= float2x2(+1, +1, -wind, +wind) * .5;");
+
+ // Find the normals to each edge, then intersect them to find our octagon vertex.
+ v->codeAppendf("float2x2 N = float2x2("
+ "corners.z + corners.w - 1, corners.w - corners.z, "
+ "corners.xy*2 - 1);");
+ v->codeAppendf("N = float2x2(wind, 0, 0, 1) * N;");
+ v->codeAppendf("float2 K = float2(dot(N[0], refpt), dot(N[1], refpt45));");
+ v->codeAppendf("float2 octocoord = K * inverse(N);");
+
+ // Round the octagon out to ensure we rasterize every pixel the path might touch. (Positive
+ // bloatdir means we should take the "ceil" and negative means to take the "floor".)
+ //
+ // NOTE: If we were just drawing a rect, ceil/floor would be enough. But since there are also
+ // diagonals in the octagon that cross through pixel centers, we need to outset by another
+ // quarter px to ensure those pixels get rasterized.
+ v->codeAppendf("float2 bloatdir = (0 != N[0].x) "
+ "? float2(N[0].x, N[1].y)"
+ ": float2(N[1].x, N[0].y);");
+ v->codeAppendf("octocoord = (ceil(octocoord * bloatdir - 1e-4) + 0.25) * bloatdir;");
+ v->codeAppendf("float2 atlascoord = octocoord + float2(dev_to_atlas_offset);");
+
+ // Convert to atlas coordinates in order to do our texture lookup.
+ if (kTopLeft_GrSurfaceOrigin == proc.fAtlasOrigin) {
+ v->codeAppendf("%s.xy = atlascoord * %s;", texcoord.vsOut(), atlasAdjust);
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == proc.fAtlasOrigin);
+ v->codeAppendf("%s.xy = float2(atlascoord.x * %s.x, 1 - atlascoord.y * %s.y);",
+ texcoord.vsOut(), atlasAdjust, atlasAdjust);
+ }
+ if (isCoverageCount) {
+ v->codeAppendf("%s.z = wind * .5;", texcoord.vsOut());
+ }
+
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "octocoord");
+ this->emitTransforms(v, varyingHandler, uniHandler, gpArgs->fPositionVar, proc.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+
+ // Fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+
+ // Look up coverage in the atlas.
+ f->codeAppendf("half coverage = ");
+ f->appendTextureLookup(args.fTexSamplers[0], SkStringPrintf("%s.xy", texcoord.fsIn()).c_str(),
+ kFloat2_GrSLType);
+ f->codeAppendf(".a;");
+
+ if (isCoverageCount) {
+ f->codeAppendf("coverage = abs(coverage);");
+
+ // Scale coverage count by .5. Make it negative for even-odd paths and positive for
+ // winding ones. Clamp winding coverage counts at 1.0 (i.e. min(coverage/2, .5)).
+ f->codeAppendf("coverage = min(abs(coverage) * half(%s.z), .5);", texcoord.fsIn());
+
+ // For negative values, this finishes the even-odd sawtooth function. Since positive
+ // (winding) values were clamped at "coverage/2 = .5", this only undoes the previous
+ // multiply by .5.
+ f->codeAppend ("coverage = 1 - abs(fract(coverage) * 2 - 1);");
+ }
+
+ f->codeAppendf("%s = half4(coverage);", args.fOutputCoverage);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.h
new file mode 100644
index 0000000000..6ad872043e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPathProcessor.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPathProcessor_DEFINED
+#define GrCCPathProcessor_DEFINED
+
+#include <array>
+#include "include/core/SkPath.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/ccpr/GrCCAtlas.h"
+#include "src/gpu/ccpr/GrOctoBounds.h"
+
+class GrCCPathCacheEntry;
+class GrCCPerFlushResources;
+class GrOnFlushResourceProvider;
+class GrOpFlushState;
+
+/**
+ * This class draws AA paths using the coverage count masks produced by GrCCCoverageProcessor.
+ *
+ * Paths are drawn as bloated octagons, and coverage is derived from the coverage count mask and
+ * fill rule.
+ *
+ * To draw paths, the caller must set up an instance buffer as detailed below, then call drawPaths()
+ * providing its own instance buffer alongside the buffers found by calling FindIndexBuffer/
+ * FindVertexBuffer.
+ */
+class GrCCPathProcessor : public GrGeometryProcessor {
+public:
+ struct Instance {
+ SkRect fDevBounds; // "right < left" indicates even-odd fill type.
+ SkRect fDevBounds45; // Bounding box in "| 1 -1 | * devCoords" space. See GrOctoBounds.
+ // | 1 1 |
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
+ uint64_t fColor; // Color always stored as 4 x fp16
+
+ void set(const GrOctoBounds&, const SkIVector& devToAtlasOffset, uint64_t, GrFillRule);
+ void set(const GrCCPathCacheEntry&, const SkIVector& shift, uint64_t, GrFillRule);
+ };
+
+ GR_STATIC_ASSERT(4 * 12 == sizeof(Instance));
+
+ static sk_sp<const GrGpuBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
+ static sk_sp<const GrGpuBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
+
+ enum class CoverageMode : bool {
+ kCoverageCount,
+ kLiteral
+ };
+
+ static CoverageMode GetCoverageMode(GrCCAtlas::CoverageType coverageType) {
+ return (GrCCAtlas::CoverageType::kFP16_CoverageCount == coverageType)
+ ? CoverageMode::kCoverageCount
+ : CoverageMode::kLiteral;
+ }
+
+ GrCCPathProcessor(
+ CoverageMode, const GrTexture* atlasTexture, const GrSwizzle&,
+ GrSurfaceOrigin atlasOrigin,
+ const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I());
+
+ const char* name() const override { return "GrCCPathProcessor"; }
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ b->add32((uint32_t)fCoverageMode);
+ }
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+ void drawPaths(GrOpFlushState*, const GrPipeline&, const GrPipeline::FixedDynamicState*,
+ const GrCCPerFlushResources&, int baseInstance, int endInstance,
+ const SkRect& bounds) const;
+
+private:
+ const TextureSampler& onTextureSampler(int) const override { return fAtlasAccess; }
+
+ const CoverageMode fCoverageMode;
+ const TextureSampler fAtlasAccess;
+ SkISize fAtlasSize;
+ GrSurfaceOrigin fAtlasOrigin;
+
+ SkMatrix fLocalMatrix;
+ static constexpr Attribute kInstanceAttribs[] = {
+ {"devbounds", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"devbounds45", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"dev_to_atlas_offset", kInt2_GrVertexAttribType, kInt2_GrSLType},
+ {"color", kHalf4_GrVertexAttribType, kHalf4_GrSLType}
+ };
+ static constexpr int kColorAttribIdx = 3;
+ static constexpr Attribute kCornersAttrib =
+ {"corners", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+
+ class Impl;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+inline void GrCCPathProcessor::Instance::set(
+ const GrOctoBounds& octoBounds, const SkIVector& devToAtlasOffset, uint64_t color,
+ GrFillRule fillRule) {
+ if (GrFillRule::kNonzero == fillRule) {
+ // We cover "nonzero" paths with clockwise triangles, which is the default result from
+ // normal octo bounds.
+ fDevBounds = octoBounds.bounds();
+ fDevBounds45 = octoBounds.bounds45();
+ } else {
+ // We cover "even/odd" paths with counterclockwise triangles. Here we reorder the bounding
+ // box vertices so the output is flipped horizontally.
+ fDevBounds.setLTRB(
+ octoBounds.right(), octoBounds.top(), octoBounds.left(), octoBounds.bottom());
+ fDevBounds45.setLTRB(
+ octoBounds.bottom45(), octoBounds.right45(), octoBounds.top45(),
+ octoBounds.left45());
+ }
+ fDevToAtlasOffset = devToAtlasOffset;
+ fColor = color;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.cpp
new file mode 100644
index 0000000000..4174dba318
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -0,0 +1,611 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSurfaceContextPriv.h"
+#include "src/gpu/ccpr/GrCCPathCache.h"
+#include "src/gpu/ccpr/GrGSCoverageProcessor.h"
+#include "src/gpu/ccpr/GrSampleMaskProcessor.h"
+#include "src/gpu/ccpr/GrVSCoverageProcessor.h"
+#include "src/gpu/geometry/GrShape.h"
+
+using CoverageType = GrCCAtlas::CoverageType;
+using FillBatchID = GrCCFiller::BatchID;
+using StrokeBatchID = GrCCStroker::BatchID;
+using PathInstance = GrCCPathProcessor::Instance;
+
+static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx;
+static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx;
+
+namespace {
+
+// Base class for an Op that renders a CCPR atlas.
+class AtlasOp : public GrDrawOp {
+public:
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override {
+ return GrProcessorSet::EmptySetAnalysis();
+ }
+ CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
+ // We will only make multiple copy ops if they have different source proxies.
+ // TODO: make use of texture chaining.
+ return CombineResult::kCannotCombine;
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+protected:
+ AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
+ const SkISize& drawBounds)
+ : GrDrawOp(classID)
+ , fResources(std::move(resources)) {
+ this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
+ GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo);
+ }
+
+ const sk_sp<const GrCCPerFlushResources> fResources;
+};
+
+// Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas.
+class CopyAtlasOp : public AtlasOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(
+ GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
+ sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance,
+ const SkISize& drawBounds) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy), baseInstance,
+ endInstance, drawBounds);
+ }
+
+ const char* name() const override { return "CopyAtlasOp (CCPR)"; }
+
+ void visitProxies(const VisitProxyFunc& fn) const override {
+ fn(fSrcProxy.get(), GrMipMapped::kNo);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ SkASSERT(fSrcProxy);
+ auto srcProxy = fSrcProxy.get();
+ SkASSERT(srcProxy->isInstantiated());
+
+ auto coverageMode = GrCCPathProcessor::GetCoverageMode(
+ fResources->renderedPathCoverageType());
+ GrCCPathProcessor pathProc(coverageMode, srcProxy->peekTexture(),
+ srcProxy->textureSwizzle(), srcProxy->origin());
+
+ GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc,
+ flushState->drawOpArgs().outputSwizzle());
+ GrPipeline::FixedDynamicState dynamicState;
+ dynamicState.fPrimitiveProcessorTextures = &srcProxy;
+
+ pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
+ fEndInstance, this->bounds());
+ }
+
+private:
+ friend class ::GrOpMemoryPool; // for ctor
+
+ CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
+ int baseInstance, int endInstance, const SkISize& drawBounds)
+ : AtlasOp(ClassID(), std::move(resources), drawBounds)
+ , fSrcProxy(srcProxy)
+ , fBaseInstance(baseInstance)
+ , fEndInstance(endInstance) {
+ }
+ sk_sp<GrTextureProxy> fSrcProxy;
+ const int fBaseInstance;
+ const int fEndInstance;
+};
+
+// Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
+template<typename ProcessorType> class RenderAtlasOp : public AtlasOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(
+ GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
+ FillBatchID fillBatchID, StrokeBatchID strokeBatchID, const SkISize& drawBounds) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<RenderAtlasOp>(
+ std::move(resources), fillBatchID, strokeBatchID, drawBounds);
+ }
+
+ // GrDrawOp interface.
+ const char* name() const override { return "RenderAtlasOp (CCPR)"; }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ ProcessorType proc;
+ GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus,
+ flushState->drawOpArgs().outputSwizzle());
+ fResources->filler().drawFills(flushState, &proc, pipeline, fFillBatchID, fDrawBounds);
+ fResources->stroker().drawStrokes(flushState, &proc, fStrokeBatchID, fDrawBounds);
+ }
+
+private:
+ friend class ::GrOpMemoryPool; // for ctor
+
+ RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
+ StrokeBatchID strokeBatchID, const SkISize& drawBounds)
+ : AtlasOp(ClassID(), std::move(resources), drawBounds)
+ , fFillBatchID(fillBatchID)
+ , fStrokeBatchID(strokeBatchID)
+ , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
+ }
+
+ const FillBatchID fFillBatchID;
+ const StrokeBatchID fStrokeBatchID;
+ const SkIRect fDrawBounds;
+};
+
+}
+
+static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
+ return specs.fNumCachedPaths +
+ // Copies get two instances per draw: 1 copy + 1 draw.
+ (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 +
+ specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx];
+ // No clips in instance buffers.
+}
+
+GrCCPerFlushResources::GrCCPerFlushResources(
+ GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType,
+ const GrCCPerFlushResourceSpecs& specs)
+ // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
+ // (See transform_path_pts below.)
+ // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
+ : fLocalDevPtsBuffer(SkTMax(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
+ specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
+ , fFiller((CoverageType::kFP16_CoverageCount == coverageType)
+ ? GrCCFiller::Algorithm::kCoverageCount
+ : GrCCFiller::Algorithm::kStencilWindingCount,
+ specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
+ specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints,
+ specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs,
+ specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights)
+ , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
+ specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
+ specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
+ , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
+ onFlushRP->caps())
+ , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
+ , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
+ , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
+ , fInstanceBuffer(onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
+ inst_buffer_count(specs) * sizeof(PathInstance)))
+ , fNextCopyInstanceIdx(0)
+ , fNextPathInstanceIdx(
+ specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) {
+ if (!fIndexBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
+ return;
+ }
+ if (!fVertexBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
+ return;
+ }
+ if (!fInstanceBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
+ return;
+ }
+ fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
+ SkASSERT(fPathInstanceData);
+
+ if (CoverageType::kA8_Multisample == coverageType) {
+ int numRenderedPaths =
+ specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] +
+ specs.fNumClipPaths;
+ fStencilResolveBuffer = onFlushRP->makeBuffer(
+ GrGpuBufferType::kVertex,
+ numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
+ fStencilResolveInstanceData = static_cast<GrStencilAtlasOp::ResolveRectInstance*>(
+ fStencilResolveBuffer->map());
+ SkASSERT(fStencilResolveInstanceData);
+ SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
+ }
+
+ SkDEBUGCODE(fEndCopyInstance =
+ specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]);
+ SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
+}
+
+void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
+ GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
+ GrFillRule fillRule) {
+ using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
+ SkASSERT(this->isMapped());
+ SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
+
+ const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
+ SkASSERT(cachedAtlas);
+ SkASSERT(cachedAtlas->getOnFlushProxy());
+
+ if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
+ // This entry has already been upgraded to literal coverage. The path must have been drawn
+ // multiple times during the flush.
+ SkDEBUGCODE(--fEndCopyInstance);
+ return;
+ }
+
+ SkIVector newAtlasOffset;
+ if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
+ // We did not fit in the previous copy atlas and it was retired. We will render the ranges
+ // up until fCopyPathRanges.count() into the retired atlas during finalize().
+ retiredAtlas->setFillBatchID(fCopyPathRanges.count());
+ fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
+ }
+
+ this->recordCopyPathInstance(
+ *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy()));
+
+ sk_sp<GrTexture> previousAtlasTexture =
+ sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
+ GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
+ if (ReleaseAtlasResult::kDidInvalidateFromCache ==
+ entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
+ // This texture just got booted out of the cache. Keep it around, in case we might be able
+ // to recycle it for a new atlas. We can recycle it because copying happens before rendering
+ // new paths, and every path from the atlas that we're planning to use this flush will be
+ // copied to a new atlas. We'll never copy some and leave others.
+ fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
+ }
+}
+
+template<typename T, typename... Args>
+static void emplace_at_memcpy(SkTArray<T>* array, int idx, Args&&... args) {
+ if (int moveCount = array->count() - idx) {
+ array->push_back();
+ T* location = array->begin() + idx;
+ memcpy(location+1, location, moveCount * sizeof(T));
+ new (location) T(std::forward<Args>(args)...);
+ } else {
+ array->emplace_back(std::forward<Args>(args)...);
+ }
+}
+
+void GrCCPerFlushResources::recordCopyPathInstance(
+ const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule,
+ sk_sp<GrTextureProxy> srcProxy) {
+ SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
+
+ // Write the instance at the back of the array.
+ int currentInstanceIdx = fNextCopyInstanceIdx++;
+ constexpr uint64_t kWhite = (((uint64_t) SK_Half1) << 0) |
+ (((uint64_t) SK_Half1) << 16) |
+ (((uint64_t) SK_Half1) << 32) |
+ (((uint64_t) SK_Half1) << 48);
+ fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
+
+ // Percolate the instance forward until it's contiguous with other instances that share the same
+ // proxy.
+ for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
+ if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
+ ++fCopyPathRanges[i].fCount;
+ return;
+ }
+ int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
+ std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
+ currentInstanceIdx = rangeFirstInstanceIdx;
+ }
+
+ // An instance with this particular proxy did not yet exist in the array. Add a range for it.
+ emplace_at_memcpy(&fCopyPathRanges, fCurrCopyAtlasRangesIdx, std::move(srcProxy), 1);
+}
+
+static bool transform_path_pts(
+ const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts,
+ GrOctoBounds* octoBounds) {
+ const SkPoint* pts = SkPathPriv::PointData(path);
+ int numPts = path.countPoints();
+ SkASSERT(numPts + 1 <= outDevPts.count());
+ SkASSERT(numPts);
+
+ // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
+ // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
+ // transform is not necessary as long as the shader uses the correct inverse.
+ SkMatrix m45;
+ m45.setSinCos(1, 1);
+ m45.preConcat(m);
+
+ // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
+ // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
+ // | 1 1 |
+ Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
+ Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
+ Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
+
+ // Map the path's points to device space and accumulate bounding boxes.
+ Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
+ devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
+ Sk4f topLeft = devPt;
+ Sk4f bottomRight = devPt;
+
+ // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
+ // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
+ // be at least one larger than the number of points.
+ devPt.store(&outDevPts[0]);
+
+ for (int i = 1; i < numPts; ++i) {
+ devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
+ devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
+ topLeft = Sk4f::Min(topLeft, devPt);
+ bottomRight = Sk4f::Max(bottomRight, devPt);
+ devPt.store(&outDevPts[i]);
+ }
+
+ if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
+ // The bounds are infinite or NaN.
+ return false;
+ }
+
+ SkPoint topLeftPts[2], bottomRightPts[2];
+ topLeft.store(topLeftPts);
+ bottomRight.store(bottomRightPts);
+
+ const SkRect& devBounds = SkRect::MakeLTRB(
+ topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y());
+ const SkRect& devBounds45 = SkRect::MakeLTRB(
+ topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y());
+
+ octoBounds->set(devBounds, devBounds45);
+ return true;
+}
+
+GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
+ const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
+ GrOctoBounds* octoBounds, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
+ SkASSERT(this->isMapped());
+ SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
+
+ SkPath path;
+ shape.asPath(&path);
+ if (path.isEmpty()) {
+ SkDEBUGCODE(--fEndPathInstance);
+ SkDEBUGCODE(--fEndStencilResolveInstance);
+ return nullptr;
+ }
+ if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) {
+ // The transformed path had infinite or NaN bounds.
+ SkDEBUGCODE(--fEndPathInstance);
+ SkDEBUGCODE(--fEndStencilResolveInstance);
+ return nullptr;
+ }
+
+ const SkStrokeRec& stroke = shape.style().strokeRec();
+ if (!stroke.isFillStyle()) {
+ float r = SkStrokeRec::GetInflationRadius(
+ stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth);
+ octoBounds->outset(r);
+ }
+
+ GrScissorTest enableScissorInAtlas;
+ if (clipIBounds.contains(octoBounds->bounds())) {
+ enableScissorInAtlas = GrScissorTest::kDisabled;
+ } else if (octoBounds->clip(clipIBounds)) {
+ enableScissorInAtlas = GrScissorTest::kEnabled;
+ } else {
+ // The clip and octo bounds do not intersect. Draw nothing.
+ SkDEBUGCODE(--fEndPathInstance);
+ SkDEBUGCODE(--fEndStencilResolveInstance);
+ return nullptr;
+ }
+ octoBounds->roundOut(devIBounds);
+ SkASSERT(clipIBounds.contains(*devIBounds));
+
+ this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset);
+
+ GrFillRule fillRule;
+ if (stroke.isFillStyle()) {
+ SkASSERT(0 == strokeDevWidth);
+ fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas,
+ *devIBounds, *devToAtlasOffset);
+ fillRule = GrFillRuleForSkPath(path);
+ } else {
+ // Stroke-and-fill is not yet supported.
+ SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle());
+ SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth);
+ fStroker.parseDeviceSpaceStroke(
+ path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth, enableScissorInAtlas,
+ *devIBounds, *devToAtlasOffset);
+ fillRule = GrFillRule::kNonzero;
+ }
+
+ if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
+ this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule);
+ }
+
+ return &fRenderedAtlasStack.current();
+}
+
+const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
+ const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
+ GrFillRule fillRule, SkIVector* devToAtlasOffset) {
+ SkASSERT(this->isMapped());
+
+ if (devPath.isEmpty()) {
+ SkDEBUGCODE(--fEndStencilResolveInstance);
+ return nullptr;
+ }
+
+ GrScissorTest enableScissorInAtlas;
+ SkIRect clippedPathIBounds;
+ if (clipIBounds.contains(devPathIBounds)) {
+ clippedPathIBounds = devPathIBounds;
+ enableScissorInAtlas = GrScissorTest::kDisabled;
+ } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) {
+ enableScissorInAtlas = GrScissorTest::kEnabled;
+ } else {
+ // The clip and path bounds do not intersect. Draw nothing.
+ SkDEBUGCODE(--fEndStencilResolveInstance);
+ return nullptr;
+ }
+
+ this->placeRenderedPathInAtlas(clippedPathIBounds, enableScissorInAtlas, devToAtlasOffset);
+ fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), enableScissorInAtlas,
+ clippedPathIBounds, *devToAtlasOffset);
+
+ // In MSAA mode we also record an internal draw instance that will be used to resolve stencil
+ // winding values to coverage when the atlas is generated.
+ if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
+ this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
+ }
+
+ return &fRenderedAtlasStack.current();
+}
+
+void GrCCPerFlushResources::placeRenderedPathInAtlas(
+ const SkIRect& clippedPathIBounds, GrScissorTest scissorTest, SkIVector* devToAtlasOffset) {
+ if (GrCCAtlas* retiredAtlas =
+ fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
+ // We did not fit in the previous coverage count atlas and it was retired. Close the path
+ // parser's current batch (which does not yet include the path we just parsed). We will
+ // render this batch into the retired atlas during finalize().
+ retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch());
+ retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch());
+ retiredAtlas->setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
+ }
+}
+
+void GrCCPerFlushResources::recordStencilResolveInstance(
+ const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) {
+ SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType());
+ SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance);
+
+ SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset);
+ if (GrFillRule::kEvenOdd == fillRule) {
+ // Make even/odd fills counterclockwise. The resolve draw uses two-sided stencil, with
+ // "nonzero" settings in front and "even/odd" settings in back.
+ std::swap(atlasIBounds.fLeft, atlasIBounds.fRight);
+ }
+ fStencilResolveInstanceData[fNextStencilResolveInstanceIdx++] = {
+ (int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(),
+ (int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
+}
+
+bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
+ SkASSERT(this->isMapped());
+ SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
+ SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
+ SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
+ fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
+
+ fInstanceBuffer->unmap();
+ fPathInstanceData = nullptr;
+
+ if (fStencilResolveBuffer) {
+ fStencilResolveBuffer->unmap();
+ fStencilResolveInstanceData = nullptr;
+ }
+
+ if (!fCopyAtlasStack.empty()) {
+ fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
+ fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
+ }
+ if (!fRenderedAtlasStack.empty()) {
+ fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
+ fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch());
+ fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
+ }
+
+ // Build the GPU buffers to render path coverage counts. (This must not happen until after the
+ // final calls to fFiller/fStroker.closeCurrentBatch().)
+ if (!fFiller.prepareToDraw(onFlushRP)) {
+ return false;
+ }
+ if (!fStroker.prepareToDraw(onFlushRP)) {
+ return false;
+ }
+
+ // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es).
+ int copyRangeIdx = 0;
+ int baseCopyInstance = 0;
+ for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
+ int endCopyRange = atlas->getFillBatchID();
+ SkASSERT(endCopyRange > copyRangeIdx);
+
+ auto rtc = atlas->makeRenderTargetContext(onFlushRP);
+ for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
+ const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
+ int endCopyInstance = baseCopyInstance + copyRange.fCount;
+ if (rtc) {
+ auto op = CopyAtlasOp::Make(
+ rtc->surfPriv().getContext(), sk_ref_sp(this), copyRange.fSrcProxy,
+ baseCopyInstance, endCopyInstance, atlas->drawBounds());
+ rtc->addDrawOp(GrNoClip(), std::move(op));
+ }
+ baseCopyInstance = endCopyInstance;
+ }
+ }
+ SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
+ SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
+ SkASSERT(baseCopyInstance == fEndCopyInstance);
+
+ // Render the coverage count atlas(es).
+ int baseStencilResolveInstance = 0;
+ for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
+ // Copies will be finished by the time we get to rendering new atlases. See if we can
+ // recycle any previous invalidated atlas textures instead of creating new ones.
+ sk_sp<GrTexture> backingTexture;
+ for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
+ if (texture && atlas->currentHeight() == texture->height() &&
+ atlas->currentWidth() == texture->width()) {
+ backingTexture = skstd::exchange(texture, nullptr);
+ break;
+ }
+ }
+
+ if (auto rtc = atlas->makeRenderTargetContext(onFlushRP, std::move(backingTexture))) {
+ std::unique_ptr<GrDrawOp> op;
+ if (CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType()) {
+ op = GrStencilAtlasOp::Make(
+ rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
+ atlas->getStrokeBatchID(), baseStencilResolveInstance,
+ atlas->getEndStencilResolveInstance(), atlas->drawBounds());
+ } else if (onFlushRP->caps()->shaderCaps()->geometryShaderSupport()) {
+ op = RenderAtlasOp<GrGSCoverageProcessor>::Make(
+ rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
+ atlas->getStrokeBatchID(), atlas->drawBounds());
+ } else {
+ op = RenderAtlasOp<GrVSCoverageProcessor>::Make(
+ rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
+ atlas->getStrokeBatchID(), atlas->drawBounds());
+ }
+ rtc->addDrawOp(GrNoClip(), std::move(op));
+ if (rtc->proxy()->requiresManualMSAAResolve()) {
+ onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->proxy()->asTextureProxy()),
+ GrSurfaceProxy::ResolveFlags::kMSAA);
+ }
+ }
+
+ SkASSERT(atlas->getEndStencilResolveInstance() >= baseStencilResolveInstance);
+ baseStencilResolveInstance = atlas->getEndStencilResolveInstance();
+ }
+ SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
+ baseStencilResolveInstance == fEndStencilResolveInstance);
+
+ return true;
+}
+
+void GrCCPerFlushResourceSpecs::cancelCopies() {
+ // Convert copies to cached draws.
+ fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
+ fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
+ fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
+ fCopyAtlasSpecs = GrCCAtlas::Specs();
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.h b/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.h
new file mode 100644
index 0000000000..d446da7829
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPerFlushResources_DEFINED
+#define GrCCPerFlushResources_DEFINED
+
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/ccpr/GrCCAtlas.h"
+#include "src/gpu/ccpr/GrCCFiller.h"
+#include "src/gpu/ccpr/GrCCPathProcessor.h"
+#include "src/gpu/ccpr/GrCCStroker.h"
+#include "src/gpu/ccpr/GrStencilAtlasOp.h"
+
+class GrCCPathCache;
+class GrCCPathCacheEntry;
+class GrOctoBounds;
+class GrOnFlushResourceProvider;
+class GrShape;
+
+/**
+ * This struct counts values that help us preallocate buffers for rendered path geometry.
+ */
+struct GrCCRenderedPathStats {
+ int fMaxPointsPerPath = 0;
+ int fNumTotalSkPoints = 0;
+ int fNumTotalSkVerbs = 0;
+ int fNumTotalConicWeights = 0;
+
+ void statPath(const SkPath&);
+};
+
+/**
+ * This struct encapsulates the minimum and desired requirements for the GPU resources required by
+ * CCPR in a given flush.
+ */
+struct GrCCPerFlushResourceSpecs {
+ static constexpr int kFillIdx = 0;
+ static constexpr int kStrokeIdx = 1;
+
+ int fNumCachedPaths = 0;
+
+ int fNumCopiedPaths[2] = {0, 0};
+ GrCCRenderedPathStats fCopyPathStats[2];
+ GrCCAtlas::Specs fCopyAtlasSpecs;
+
+ int fNumRenderedPaths[2] = {0, 0};
+ int fNumClipPaths = 0;
+ GrCCRenderedPathStats fRenderedPathStats[2];
+ GrCCAtlas::Specs fRenderedAtlasSpecs;
+
+ bool isEmpty() const {
+ return 0 == fNumCachedPaths + fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx] +
+ fNumRenderedPaths[kFillIdx] + fNumRenderedPaths[kStrokeIdx] + fNumClipPaths;
+ }
+ // Converts the copies to normal cached draws.
+ void cancelCopies();
+};
+
+/**
+ * This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
+ * preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
+ * It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
+ */
+class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
+public:
+ GrCCPerFlushResources(
+ GrOnFlushResourceProvider*, GrCCAtlas::CoverageType,const GrCCPerFlushResourceSpecs&);
+
+ bool isMapped() const { return SkToBool(fPathInstanceData); }
+
+ GrCCAtlas::CoverageType renderedPathCoverageType() const {
+ return fRenderedAtlasStack.coverageType();
+ }
+
+ // Copies a coverage-counted path out of the given texture proxy, and into a cached, 8-bit,
+ // literal coverage atlas. Updates the cache entry to reference the new atlas.
+ void upgradeEntryToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
+ GrCCPathCacheEntry*, GrFillRule);
+
+ // These two methods render a path into a temporary coverage count atlas. See
+ // GrCCPathProcessor::Instance for a description of the outputs.
+ //
+ // strokeDevWidth must be 0 for fills, 1 for hairlines, or the stroke width in device-space
+ // pixels for non-hairline strokes (implicitly requiring a rigid-body transform).
+ GrCCAtlas* renderShapeInAtlas(
+ const SkIRect& clipIBounds, const SkMatrix&, const GrShape&, float strokeDevWidth,
+ GrOctoBounds*, SkIRect* devIBounds, SkIVector* devToAtlasOffset);
+ const GrCCAtlas* renderDeviceSpacePathInAtlas(
+ const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
+ GrFillRule fillRule, SkIVector* devToAtlasOffset);
+
+ // Returns the index in instanceBuffer() of the next instance that will be added by
+ // appendDrawPathInstance().
+ int nextPathInstanceIdx() const { return fNextPathInstanceIdx; }
+
+ // Appends an instance to instanceBuffer() that will draw a path to the destination render
+ // target. The caller is responsible to call set() on the returned instance, to keep track of
+ // its atlas and index (see nextPathInstanceIdx()), and to issue the actual draw call.
+ GrCCPathProcessor::Instance& appendDrawPathInstance() {
+ SkASSERT(this->isMapped());
+ SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
+ return fPathInstanceData[fNextPathInstanceIdx++];
+ }
+
+ // Finishes off the GPU buffers and renders the atlas(es).
+ bool finalize(GrOnFlushResourceProvider*);
+
+ // Accessors used by draw calls, once the resources have been finalized.
+ const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
+ const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; }
+ sk_sp<const GrGpuBuffer> refIndexBuffer() const {
+ SkASSERT(!this->isMapped());
+ return fIndexBuffer;
+ }
+ sk_sp<const GrGpuBuffer> refVertexBuffer() const {
+ SkASSERT(!this->isMapped());
+ return fVertexBuffer;
+ }
+ sk_sp<const GrGpuBuffer> refInstanceBuffer() const {
+ SkASSERT(!this->isMapped());
+ return fInstanceBuffer;
+ }
+ sk_sp<const GrGpuBuffer> refStencilResolveBuffer() const {
+ SkASSERT(!this->isMapped());
+ return fStencilResolveBuffer;
+ }
+
+private:
+ void recordCopyPathInstance(const GrCCPathCacheEntry&, const SkIVector& newAtlasOffset,
+ GrFillRule, sk_sp<GrTextureProxy> srcProxy);
+ void placeRenderedPathInAtlas(
+ const SkIRect& clippedPathIBounds, GrScissorTest, SkIVector* devToAtlasOffset);
+
+ // In MSAA mode we record an additional instance per path that draws a rectangle on top of its
+ // corresponding path in the atlas and resolves stencil winding values to coverage.
+ void recordStencilResolveInstance(
+ const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule);
+
+ const SkAutoSTArray<32, SkPoint> fLocalDevPtsBuffer;
+ GrCCFiller fFiller;
+ GrCCStroker fStroker;
+ GrCCAtlasStack fCopyAtlasStack;
+ GrCCAtlasStack fRenderedAtlasStack;
+
+ const sk_sp<const GrGpuBuffer> fIndexBuffer;
+ const sk_sp<const GrGpuBuffer> fVertexBuffer;
+ const sk_sp<GrGpuBuffer> fInstanceBuffer;
+
+ GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
+ int fNextCopyInstanceIdx;
+ SkDEBUGCODE(int fEndCopyInstance);
+ int fNextPathInstanceIdx;
+ int fBasePathInstanceIdx;
+ SkDEBUGCODE(int fEndPathInstance);
+
+ // Represents a range of copy-path instances that all share the same source proxy. (i.e. Draw
+ // instances that copy a path mask from a 16-bit coverage count atlas into an 8-bit literal
+ // coverage atlas.)
+ struct CopyPathRange {
+ CopyPathRange() = default;
+ CopyPathRange(sk_sp<GrTextureProxy> srcProxy, int count)
+ : fSrcProxy(std::move(srcProxy)), fCount(count) {}
+ sk_sp<GrTextureProxy> fSrcProxy;
+ int fCount;
+ };
+
+ SkSTArray<4, CopyPathRange> fCopyPathRanges;
+ int fCurrCopyAtlasRangesIdx = 0;
+
+ // This is a list of coverage count atlas textures that have been invalidated due to us copying
+ // their paths into new 8-bit literal coverage atlases. Since copying is finished by the time
+ // we begin rendering new atlases, we can recycle these textures for the rendered atlases rather
+ // than allocating new texture objects upon instantiation.
+ SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
+
+ // Used in MSAA mode make an intermediate draw that resolves stencil winding values to coverage.
+ sk_sp<GrGpuBuffer> fStencilResolveBuffer;
+ GrStencilAtlasOp::ResolveRectInstance* fStencilResolveInstanceData = nullptr;
+ int fNextStencilResolveInstanceIdx = 0;
+ SkDEBUGCODE(int fEndStencilResolveInstance);
+
+public:
+#ifdef SK_DEBUG
+ void debugOnly_didReuseRenderedPath() {
+ if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
+ --fEndStencilResolveInstance;
+ }
+ }
+#endif
+ const GrTexture* testingOnly_frontCopyAtlasTexture() const;
+ const GrTexture* testingOnly_frontRenderedAtlasTexture() const;
+};
+
+inline void GrCCRenderedPathStats::statPath(const SkPath& path) {
+ fMaxPointsPerPath = SkTMax(fMaxPointsPerPath, path.countPoints());
+ fNumTotalSkPoints += path.countPoints();
+ fNumTotalSkVerbs += path.countVerbs();
+ fNumTotalConicWeights += SkPathPriv::ConicWeightCnt(path);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCPerOpsTaskPaths.h b/gfx/skia/skia/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
new file mode 100644
index 0000000000..ff8a224e66
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPerOpsTaskPaths_DEFINED
+#define GrCCPerOpsTaskPaths_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/gpu/ccpr/GrCCClipPath.h"
+
+#include <map>
+
+class GrCCDrawPathsOp;
+class GrCCPerFlushResources;
+
+/**
+ * Tracks all the CCPR paths in a given opsTask that will be drawn when it flushes.
+ */
+// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
+struct GrCCPerOpsTaskPaths : public SkRefCnt {
+ SkTInternalLList<GrCCDrawPathsOp> fDrawOps; // This class does not own these ops.
+ std::map<uint32_t, GrCCClipPath> fClipPaths;
+ SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
+ sk_sp<const GrCCPerFlushResources> fFlushResources;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.cpp
new file mode 100644
index 0000000000..5012e3c41b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCQuadraticShader.h"
+
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+void GrCCQuadraticShader::emitSetupCode(
+ GrGLSLVertexGeoBuilder* s, const char* pts, const char** outHull4) const {
+ s->declareGlobal(fQCoordMatrix);
+ s->codeAppendf("%s = float2x2(1, 1, .5, 0) * inverse(float2x2(%s[2] - %s[0], %s[1] - %s[0]));",
+ fQCoordMatrix.c_str(), pts, pts, pts, pts);
+
+ s->declareGlobal(fQCoord0);
+ s->codeAppendf("%s = %s[0];", fQCoord0.c_str(), pts);
+
+ if (outHull4) {
+ // Clip the bezier triangle by the tangent line at maximum height. Quadratics have the nice
+ // property that maximum height always occurs at T=.5. This is a simple application for
+ // De Casteljau's algorithm.
+ s->codeAppend ("float2 quadratic_hull[4];");
+ s->codeAppendf("quadratic_hull[0] = %s[0];", pts);
+ s->codeAppendf("quadratic_hull[1] = (%s[0] + %s[1]) * .5;", pts, pts);
+ s->codeAppendf("quadratic_hull[2] = (%s[1] + %s[2]) * .5;", pts, pts);
+ s->codeAppendf("quadratic_hull[3] = %s[2];", pts);
+ *outHull4 = "quadratic_hull";
+ }
+}
+
+void GrCCQuadraticShader::onEmitVaryings(
+ GrGLSLVaryingHandler* varyingHandler, GrGLSLVarying::Scope scope, SkString* code,
+ const char* position, const char* coverage, const char* cornerCoverage, const char* wind) {
+ fCoord_fGrad.reset(kFloat4_GrSLType, scope);
+ varyingHandler->addVarying("coord_and_grad", &fCoord_fGrad);
+ code->appendf("%s.xy = %s * (%s - %s);", // Quadratic coords.
+ OutName(fCoord_fGrad), fQCoordMatrix.c_str(), position, fQCoord0.c_str());
+ code->appendf("%s.zw = 2*bloat * float2(2 * %s.x, -1) * %s;", // Gradient.
+ OutName(fCoord_fGrad), OutName(fCoord_fGrad), fQCoordMatrix.c_str());
+
+ if (coverage) {
+ // Coverages need full precision since distance to the opposite edge can be large.
+ fEdge_fWind_fCorner.reset((cornerCoverage) ? kFloat4_GrSLType : kFloat2_GrSLType, scope);
+ varyingHandler->addVarying((cornerCoverage) ? "edge_and_wind_and_corner" : "edge_and_wind",
+ &fEdge_fWind_fCorner);
+ code->appendf("%s.x = %s;", OutName(fEdge_fWind_fCorner), coverage);
+ code->appendf("%s.y = %s;", OutName(fEdge_fWind_fCorner), wind);
+ }
+
+ if (cornerCoverage) {
+ SkASSERT(coverage);
+ code->appendf("half hull_coverage;");
+ this->calcHullCoverage(code, OutName(fCoord_fGrad), coverage, "hull_coverage");
+ code->appendf("%s.zw = half2(hull_coverage, 1) * %s;",
+ OutName(fEdge_fWind_fCorner), cornerCoverage);
+ }
+}
+
+void GrCCQuadraticShader::emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder* f, const char* outputCoverage) const {
+ this->calcHullCoverage(&AccessCodeString(f), fCoord_fGrad.fsIn(),
+ SkStringPrintf("%s.x", fEdge_fWind_fCorner.fsIn()).c_str(),
+ outputCoverage);
+ f->codeAppendf("%s *= half(%s.y);", outputCoverage, fEdge_fWind_fCorner.fsIn()); // Wind.
+
+ if (kFloat4_GrSLType == fEdge_fWind_fCorner.type()) {
+ f->codeAppendf("%s = half(%s.z * %s.w) + %s;", // Attenuated corner coverage.
+ outputCoverage, fEdge_fWind_fCorner.fsIn(), fEdge_fWind_fCorner.fsIn(),
+ outputCoverage);
+ }
+}
+
+void GrCCQuadraticShader::calcHullCoverage(SkString* code, const char* coordAndGrad,
+ const char* edge, const char* outputCoverage) const {
+ code->appendf("float x = %s.x, y = %s.y;", coordAndGrad, coordAndGrad);
+ code->appendf("float2 grad = %s.zw;", coordAndGrad);
+ code->append ("float f = x*x - y;");
+ code->append ("float fwidth = abs(grad.x) + abs(grad.y);");
+ code->appendf("float curve_coverage = min(0.5 - f/fwidth, 1);");
+ // Flat edge opposite the curve.
+ code->appendf("float edge_coverage = min(%s, 0);", edge);
+ // Total hull coverage.
+ code->appendf("%s = max(half(curve_coverage + edge_coverage), 0);", outputCoverage);
+}
+
+void GrCCQuadraticShader::emitSampleMaskCode(GrGLSLFPFragmentBuilder* f) const {
+ f->codeAppendf("float x = %s.x, y = %s.y;", fCoord_fGrad.fsIn(), fCoord_fGrad.fsIn());
+ f->codeAppendf("float f = x*x - y;");
+ f->codeAppendf("float2 grad = %s.zw;", fCoord_fGrad.fsIn());
+ f->applyFnToMultisampleMask("f", "grad", GrGLSLFPFragmentBuilder::ScopeFlags::kTopLevel);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.h b/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.h
new file mode 100644
index 0000000000..2942ad1841
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCQuadraticShader.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCQuadraticShader_DEFINED
+#define GrCCQuadraticShader_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class renders the coverage of closed quadratic curves using the techniques outlined in
+ * "Resolution Independent Curve Rendering using Programmable Graphics Hardware" by Charles Loop and
+ * Jim Blinn:
+ *
+ * https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ *
+ * The provided curves must be monotonic with respect to the vector of their closing edge [P2 - P0].
+ * (Use GrCCGeometry::quadraticTo().)
+ */
+class GrCCQuadraticShader : public GrCCCoverageProcessor::Shader {
+public:
+ void emitSetupCode(
+ GrGLSLVertexGeoBuilder*, const char* pts, const char** outHull4) const override;
+
+ void onEmitVaryings(
+ GrGLSLVaryingHandler*, GrGLSLVarying::Scope, SkString* code, const char* position,
+ const char* coverage, const char* cornerCoverage, const char* wind) override;
+
+ void emitFragmentCoverageCode(
+ GrGLSLFPFragmentBuilder*, const char* outputCoverage) const override;
+
+ void emitSampleMaskCode(GrGLSLFPFragmentBuilder*) const override;
+
+private:
+ void calcHullCoverage(SkString* code, const char* coordAndGrad, const char* d,
+ const char* outputCoverage) const;
+
+ const GrShaderVar fQCoordMatrix{"qcoord_matrix", kFloat2x2_GrSLType};
+ const GrShaderVar fQCoord0{"qcoord0", kFloat2_GrSLType};
+ GrGLSLVarying fCoord_fGrad;
+ GrGLSLVarying fEdge_fWind_fCorner;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCSTLList.h b/gfx/skia/skia/src/gpu/ccpr/GrCCSTLList.h
new file mode 100644
index 0000000000..29f26b95c1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCSTLList.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCSTLList_DEFINED
+#define GrCCSTLList_DEFINED
+
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkArenaAlloc.h"
+#include <new>
+
+/**
+ * A singly-linked list whose head element is a local class member. This is required by
+ * GrCCDrawPathsOp because the owning opsTask is unknown at the time of creation, so we can't use
+ * its associated allocator to create the first element.
+ */
+template<typename T> class GrCCSTLList : SkNoncopyable {
+public:
+ template <typename ...Args>
+ GrCCSTLList(Args&&... args) : fHead(std::forward<Args>(args)...) {}
+
+ ~GrCCSTLList() {
+ T* draw = fHead.fNext; // fHead will be destructed automatically.
+ while (draw) {
+ T* next = draw->fNext;
+ draw->~T();
+ draw = next;
+ }
+ }
+
+ const T& head() const { return fHead; }
+ T& head() { return fHead; }
+
+ void append(GrCCSTLList&& right, SkArenaAlloc* alloc) {
+ T* nextTail = (&right.fHead == right.fTail) ? nullptr : right.fTail;
+ T* newRightHead =
+ new (alloc->makeBytesAlignedTo(sizeof(T), alignof(T))) T(std::move(right.fHead));
+
+ // Finish the move of right.fHead.
+ right.fHead.fNext = nullptr;
+ right.fTail = &right.fHead;
+
+ fTail->fNext = newRightHead;
+ fTail = !nextTail ? newRightHead : nextTail;
+ }
+
+ template<typename U> struct Iter {
+ bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
+ U& operator*() { return *fCurr; }
+ void operator++() { fCurr = fCurr->fNext; }
+ U* fCurr;
+ };
+ Iter<const T> begin() const { return Iter<const T>{&fHead}; }
+ Iter<const T> end() const { return Iter<const T>{nullptr}; }
+ Iter<T> begin() { return Iter<T>{&fHead}; }
+ Iter<T> end() { return Iter<T>{nullptr}; }
+
+private:
+ T fHead;
+ T* fTail = &fHead;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.cpp
new file mode 100644
index 0000000000..40884d7d8f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.cpp
@@ -0,0 +1,583 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCStrokeGeometry.h"
+
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkMathPriv.h"
+
+// This is the maximum distance in pixels that we can stray from the edge of a stroke when
+// converting it to flat line segments.
+static constexpr float kMaxErrorFromLinearization = 1/8.f;
+
+static inline float length(const Sk2f& n) {
+ Sk2f nn = n*n;
+ return SkScalarSqrt(nn[0] + nn[1]);
+}
+
+static inline Sk2f normalize(const Sk2f& v) {
+ Sk2f vv = v*v;
+ vv += SkNx_shuffle<1,0>(vv);
+ return v * vv.rsqrt();
+}
+
+static inline void transpose(const Sk2f& a, const Sk2f& b, Sk2f* X, Sk2f* Y) {
+ float transpose[4];
+ a.store(transpose);
+ b.store(transpose+2);
+ Sk2f::Load2(transpose, X, Y);
+}
+
+static inline void normalize2(const Sk2f& v0, const Sk2f& v1, SkPoint out[2]) {
+ Sk2f X, Y;
+ transpose(v0, v1, &X, &Y);
+ Sk2f invlength = (X*X + Y*Y).rsqrt();
+ Sk2f::Store2(out, Y * invlength, -X * invlength);
+}
+
+static inline float calc_curvature_costheta(const Sk2f& leftTan, const Sk2f& rightTan) {
+ Sk2f X, Y;
+ transpose(leftTan, rightTan, &X, &Y);
+ Sk2f invlength = (X*X + Y*Y).rsqrt();
+ Sk2f dotprod = leftTan * rightTan;
+ return (dotprod[0] + dotprod[1]) * invlength[0] * invlength[1];
+}
+
+static GrCCStrokeGeometry::Verb join_verb_from_join(SkPaint::Join join) {
+ using Verb = GrCCStrokeGeometry::Verb;
+ switch (join) {
+ case SkPaint::kBevel_Join:
+ return Verb::kBevelJoin;
+ case SkPaint::kMiter_Join:
+ return Verb::kMiterJoin;
+ case SkPaint::kRound_Join:
+ return Verb::kRoundJoin;
+ }
+ SK_ABORT("Invalid SkPaint::Join.");
+}
+
+void GrCCStrokeGeometry::beginPath(const SkStrokeRec& stroke, float strokeDevWidth,
+ InstanceTallies* tallies) {
+ SkASSERT(!fInsideContour);
+ // Client should have already converted the stroke to device space (i.e. width=1 for hairline).
+ SkASSERT(strokeDevWidth > 0);
+
+ fCurrStrokeRadius = strokeDevWidth/2;
+ fCurrStrokeJoinVerb = join_verb_from_join(stroke.getJoin());
+ fCurrStrokeCapType = stroke.getCap();
+ fCurrStrokeTallies = tallies;
+
+ if (Verb::kMiterJoin == fCurrStrokeJoinVerb) {
+ // We implement miters by placing a triangle-shaped cap on top of a bevel join. Convert the
+ // "miter limit" to how tall that triangle cap can be.
+ float m = stroke.getMiter();
+ fMiterMaxCapHeightOverWidth = .5f * SkScalarSqrt(m*m - 1);
+ }
+
+ // Find the angle of curvature where the arc height above a simple line from point A to point B
+ // is equal to kMaxErrorFromLinearization.
+ float r = SkTMax(1 - kMaxErrorFromLinearization / fCurrStrokeRadius, 0.f);
+ fMaxCurvatureCosTheta = 2*r*r - 1;
+
+ fCurrContourFirstPtIdx = -1;
+ fCurrContourFirstNormalIdx = -1;
+
+ fVerbs.push_back(Verb::kBeginPath);
+}
+
+void GrCCStrokeGeometry::moveTo(SkPoint pt) {
+ SkASSERT(!fInsideContour);
+ fCurrContourFirstPtIdx = fPoints.count();
+ fCurrContourFirstNormalIdx = fNormals.count();
+ fPoints.push_back(pt);
+ SkDEBUGCODE(fInsideContour = true);
+}
+
+void GrCCStrokeGeometry::lineTo(SkPoint pt) {
+ SkASSERT(fInsideContour);
+ this->lineTo(fCurrStrokeJoinVerb, pt);
+}
+
+void GrCCStrokeGeometry::lineTo(Verb leftJoinVerb, SkPoint pt) {
+ Sk2f tan = Sk2f::Load(&pt) - Sk2f::Load(&fPoints.back());
+ if ((tan == 0).allTrue()) {
+ return;
+ }
+
+ tan = normalize(tan);
+ SkVector n = SkVector::Make(tan[1], -tan[0]);
+
+ this->recordLeftJoinIfNotEmpty(leftJoinVerb, n);
+ fNormals.push_back(n);
+
+ this->recordStroke(Verb::kLinearStroke, 0);
+ fPoints.push_back(pt);
+}
+
+void GrCCStrokeGeometry::quadraticTo(const SkPoint P[3]) {
+ SkASSERT(fInsideContour);
+ this->quadraticTo(fCurrStrokeJoinVerb, P, SkFindQuadMaxCurvature(P));
+}
+
+// Wang's formula for quadratics (1985) gives us the number of evenly spaced (in the parametric
+// sense) line segments that are guaranteed to be within a distance of "kMaxErrorFromLinearization"
+// from the actual curve.
+static inline float wangs_formula_quadratic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2) {
+ static constexpr float k = 2 / (8 * kMaxErrorFromLinearization);
+ float f = SkScalarSqrt(k * length(p2 - p1*2 + p0));
+ return SkScalarCeilToInt(f);
+}
+
+void GrCCStrokeGeometry::quadraticTo(Verb leftJoinVerb, const SkPoint P[3], float maxCurvatureT) {
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+
+ Sk2f tan0 = p1 - p0;
+ Sk2f tan1 = p2 - p1;
+
+ // Snap to a "lineTo" if the control point is so close to an endpoint that FP error will become
+ // an issue.
+ if ((tan0.abs() < SK_ScalarNearlyZero).allTrue() || // p0 ~= p1
+ (tan1.abs() < SK_ScalarNearlyZero).allTrue()) { // p1 ~= p2
+ this->lineTo(leftJoinVerb, P[2]);
+ return;
+ }
+
+ SkPoint normals[2];
+ normalize2(tan0, tan1, normals);
+
+ // Decide how many flat line segments to chop the curve into.
+ int numSegments = wangs_formula_quadratic(p0, p1, p2);
+ numSegments = SkTMin(numSegments, 1 << kMaxNumLinearSegmentsLog2);
+ if (numSegments <= 1) {
+ this->rotateTo(leftJoinVerb, normals[0]);
+ this->lineTo(Verb::kInternalRoundJoin, P[2]);
+ this->rotateTo(Verb::kInternalRoundJoin, normals[1]);
+ return;
+ }
+
+ // At + B gives a vector tangent to the quadratic.
+ Sk2f A = p0 - p1*2 + p2;
+ Sk2f B = p1 - p0;
+
+ // Find a line segment that crosses max curvature.
+ float segmentLength = SkScalarInvert(numSegments);
+ float leftT = maxCurvatureT - segmentLength/2;
+ float rightT = maxCurvatureT + segmentLength/2;
+ Sk2f leftTan, rightTan;
+ if (leftT <= 0) {
+ leftT = 0;
+ leftTan = tan0;
+ rightT = segmentLength;
+ rightTan = A*rightT + B;
+ } else if (rightT >= 1) {
+ leftT = 1 - segmentLength;
+ leftTan = A*leftT + B;
+ rightT = 1;
+ rightTan = tan1;
+ } else {
+ leftTan = A*leftT + B;
+ rightTan = A*rightT + B;
+ }
+
+ // Check if curvature is too strong for a triangle strip on the line segment that crosses max
+ // curvature. If it is, we will chop and convert the segment to a "lineTo" with round joins.
+ //
+ // FIXME: This is quite costly and the vast majority of curves only have moderate curvature. We
+ // would benefit significantly from a quick reject that detects curves that don't need special
+ // treatment for strong curvature.
+ bool isCurvatureTooStrong = calc_curvature_costheta(leftTan, rightTan) < fMaxCurvatureCosTheta;
+ if (isCurvatureTooStrong) {
+ SkPoint ptsBuffer[5];
+ const SkPoint* currQuadratic = P;
+
+ if (leftT > 0) {
+ SkChopQuadAt(currQuadratic, ptsBuffer, leftT);
+ this->quadraticTo(leftJoinVerb, ptsBuffer, /*maxCurvatureT=*/1);
+ if (rightT < 1) {
+ rightT = (rightT - leftT) / (1 - leftT);
+ }
+ currQuadratic = ptsBuffer + 2;
+ } else {
+ this->rotateTo(leftJoinVerb, normals[0]);
+ }
+
+ if (rightT < 1) {
+ SkChopQuadAt(currQuadratic, ptsBuffer, rightT);
+ this->lineTo(Verb::kInternalRoundJoin, ptsBuffer[2]);
+ this->quadraticTo(Verb::kInternalRoundJoin, ptsBuffer + 2, /*maxCurvatureT=*/0);
+ } else {
+ this->lineTo(Verb::kInternalRoundJoin, currQuadratic[2]);
+ this->rotateTo(Verb::kInternalRoundJoin, normals[1]);
+ }
+ return;
+ }
+
+ this->recordLeftJoinIfNotEmpty(leftJoinVerb, normals[0]);
+ fNormals.push_back_n(2, normals);
+
+ this->recordStroke(Verb::kQuadraticStroke, SkNextLog2(numSegments));
+ p1.store(&fPoints.push_back());
+ p2.store(&fPoints.push_back());
+}
+
+void GrCCStrokeGeometry::cubicTo(const SkPoint P[4]) {
+ SkASSERT(fInsideContour);
+ float roots[3];
+ int numRoots = SkFindCubicMaxCurvature(P, roots);
+ this->cubicTo(fCurrStrokeJoinVerb, P,
+ numRoots > 0 ? roots[numRoots/2] : 0,
+ numRoots > 1 ? roots[0] : kLeftMaxCurvatureNone,
+ numRoots > 2 ? roots[2] : kRightMaxCurvatureNone);
+}
+
+// Wang's formula for cubics (1985) gives us the number of evenly spaced (in the parametric sense)
+// line segments that are guaranteed to be within a distance of "kMaxErrorFromLinearization"
+// from the actual curve.
+static inline float wangs_formula_cubic(const Sk2f& p0, const Sk2f& p1, const Sk2f& p2,
+ const Sk2f& p3) {
+ static constexpr float k = (3 * 2) / (8 * kMaxErrorFromLinearization);
+ float f = SkScalarSqrt(k * length(Sk2f::Max((p2 - p1*2 + p0).abs(),
+ (p3 - p2*2 + p1).abs())));
+ return SkScalarCeilToInt(f);
+}
+
+void GrCCStrokeGeometry::cubicTo(Verb leftJoinVerb, const SkPoint P[4], float maxCurvatureT,
+ float leftMaxCurvatureT, float rightMaxCurvatureT) {
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+ Sk2f p3 = Sk2f::Load(P+3);
+
+ Sk2f tan0 = p1 - p0;
+ Sk2f tan1 = p3 - p2;
+
+ // Snap control points to endpoints if they are so close that FP error will become an issue.
+ if ((tan0.abs() < SK_ScalarNearlyZero).allTrue()) { // p0 ~= p1
+ p1 = p0;
+ tan0 = p2 - p0;
+ if ((tan0.abs() < SK_ScalarNearlyZero).allTrue()) { // p0 ~= p1 ~= p2
+ this->lineTo(leftJoinVerb, P[3]);
+ return;
+ }
+ }
+ if ((tan1.abs() < SK_ScalarNearlyZero).allTrue()) { // p2 ~= p3
+ p2 = p3;
+ tan1 = p3 - p1;
+ if ((tan1.abs() < SK_ScalarNearlyZero).allTrue() || // p1 ~= p2 ~= p3
+ (p0 == p1).allTrue()) { // p0 ~= p1 AND p2 ~= p3
+ this->lineTo(leftJoinVerb, P[3]);
+ return;
+ }
+ }
+
+ SkPoint normals[2];
+ normalize2(tan0, tan1, normals);
+
+ // Decide how many flat line segments to chop the curve into.
+ int numSegments = wangs_formula_cubic(p0, p1, p2, p3);
+ numSegments = SkTMin(numSegments, 1 << kMaxNumLinearSegmentsLog2);
+ if (numSegments <= 1) {
+ this->rotateTo(leftJoinVerb, normals[0]);
+ this->lineTo(leftJoinVerb, P[3]);
+ this->rotateTo(Verb::kInternalRoundJoin, normals[1]);
+ return;
+ }
+
+ // At^2 + Bt + C gives a vector tangent to the cubic. (More specifically, it's the derivative
+ // minus an irrelevant scale by 3, since all we care about is the direction.)
+ Sk2f A = p3 + (p1 - p2)*3 - p0;
+ Sk2f B = (p0 - p1*2 + p2)*2;
+ Sk2f C = p1 - p0;
+
+ // Find a line segment that crosses max curvature.
+ float segmentLength = SkScalarInvert(numSegments);
+ float leftT = maxCurvatureT - segmentLength/2;
+ float rightT = maxCurvatureT + segmentLength/2;
+ Sk2f leftTan, rightTan;
+ if (leftT <= 0) {
+ leftT = 0;
+ leftTan = tan0;
+ rightT = segmentLength;
+ rightTan = A*rightT*rightT + B*rightT + C;
+ } else if (rightT >= 1) {
+ leftT = 1 - segmentLength;
+ leftTan = A*leftT*leftT + B*leftT + C;
+ rightT = 1;
+ rightTan = tan1;
+ } else {
+ leftTan = A*leftT*leftT + B*leftT + C;
+ rightTan = A*rightT*rightT + B*rightT + C;
+ }
+
+ // Check if curvature is too strong for a triangle strip on the line segment that crosses max
+ // curvature. If it is, we will chop and convert the segment to a "lineTo" with round joins.
+ //
+ // FIXME: This is quite costly and the vast majority of curves only have moderate curvature. We
+ // would benefit significantly from a quick reject that detects curves that don't need special
+ // treatment for strong curvature.
+ bool isCurvatureTooStrong = calc_curvature_costheta(leftTan, rightTan) < fMaxCurvatureCosTheta;
+ if (isCurvatureTooStrong) {
+ SkPoint ptsBuffer[7];
+ p0.store(ptsBuffer);
+ p1.store(ptsBuffer + 1);
+ p2.store(ptsBuffer + 2);
+ p3.store(ptsBuffer + 3);
+ const SkPoint* currCubic = ptsBuffer;
+
+ if (leftT > 0) {
+ SkChopCubicAt(currCubic, ptsBuffer, leftT);
+ this->cubicTo(leftJoinVerb, ptsBuffer, /*maxCurvatureT=*/1,
+ (kLeftMaxCurvatureNone != leftMaxCurvatureT)
+ ? leftMaxCurvatureT/leftT : kLeftMaxCurvatureNone,
+ kRightMaxCurvatureNone);
+ if (rightT < 1) {
+ rightT = (rightT - leftT) / (1 - leftT);
+ }
+ if (rightMaxCurvatureT < 1 && kRightMaxCurvatureNone != rightMaxCurvatureT) {
+ rightMaxCurvatureT = (rightMaxCurvatureT - leftT) / (1 - leftT);
+ }
+ currCubic = ptsBuffer + 3;
+ } else {
+ this->rotateTo(leftJoinVerb, normals[0]);
+ }
+
+ if (rightT < 1) {
+ SkChopCubicAt(currCubic, ptsBuffer, rightT);
+ this->lineTo(Verb::kInternalRoundJoin, ptsBuffer[3]);
+ currCubic = ptsBuffer + 3;
+ this->cubicTo(Verb::kInternalRoundJoin, currCubic, /*maxCurvatureT=*/0,
+ kLeftMaxCurvatureNone, kRightMaxCurvatureNone);
+ } else {
+ this->lineTo(Verb::kInternalRoundJoin, currCubic[3]);
+ this->rotateTo(Verb::kInternalRoundJoin, normals[1]);
+ }
+ return;
+ }
+
+ // Recurse and check the other two points of max curvature, if any.
+ if (kRightMaxCurvatureNone != rightMaxCurvatureT) {
+ this->cubicTo(leftJoinVerb, P, rightMaxCurvatureT, leftMaxCurvatureT,
+ kRightMaxCurvatureNone);
+ return;
+ }
+ if (kLeftMaxCurvatureNone != leftMaxCurvatureT) {
+ SkASSERT(kRightMaxCurvatureNone == rightMaxCurvatureT);
+ this->cubicTo(leftJoinVerb, P, leftMaxCurvatureT, kLeftMaxCurvatureNone,
+ kRightMaxCurvatureNone);
+ return;
+ }
+
+ this->recordLeftJoinIfNotEmpty(leftJoinVerb, normals[0]);
+ fNormals.push_back_n(2, normals);
+
+ this->recordStroke(Verb::kCubicStroke, SkNextLog2(numSegments));
+ p1.store(&fPoints.push_back());
+ p2.store(&fPoints.push_back());
+ p3.store(&fPoints.push_back());
+}
+
+void GrCCStrokeGeometry::recordStroke(Verb verb, int numSegmentsLog2) {
+ SkASSERT(Verb::kLinearStroke != verb || 0 == numSegmentsLog2);
+ SkASSERT(numSegmentsLog2 <= kMaxNumLinearSegmentsLog2);
+ fVerbs.push_back(verb);
+ if (Verb::kLinearStroke != verb) {
+ SkASSERT(numSegmentsLog2 > 0);
+ fParams.push_back().fNumLinearSegmentsLog2 = numSegmentsLog2;
+ }
+ ++fCurrStrokeTallies->fStrokes[numSegmentsLog2];
+}
+
+void GrCCStrokeGeometry::rotateTo(Verb leftJoinVerb, SkVector normal) {
+ this->recordLeftJoinIfNotEmpty(leftJoinVerb, normal);
+ fNormals.push_back(normal);
+}
+
+void GrCCStrokeGeometry::recordLeftJoinIfNotEmpty(Verb joinVerb, SkVector nextNormal) {
+ if (fNormals.count() <= fCurrContourFirstNormalIdx) {
+ // The contour is empty. Nothing to join with.
+ SkASSERT(fNormals.count() == fCurrContourFirstNormalIdx);
+ return;
+ }
+
+ if (Verb::kBevelJoin == joinVerb) {
+ this->recordBevelJoin(Verb::kBevelJoin);
+ return;
+ }
+
+ Sk2f n0 = Sk2f::Load(&fNormals.back());
+ Sk2f n1 = Sk2f::Load(&nextNormal);
+ Sk2f base = n1 - n0;
+ if ((base.abs() * fCurrStrokeRadius < kMaxErrorFromLinearization).allTrue()) {
+ // Treat any join as a bevel when the outside corners of the two adjoining strokes are
+ // close enough to each other. This is important because "miterCapHeightOverWidth" becomes
+ // unstable when n0 and n1 are nearly equal.
+ this->recordBevelJoin(joinVerb);
+ return;
+ }
+
+ // We implement miters and round joins by placing a triangle-shaped cap on top of a bevel join.
+ // (For round joins this triangle cap comprises the conic control points.) Find how tall to make
+ // this triangle cap, relative to its width.
+ //
+ // NOTE: This value would be infinite at 180 degrees, but we clamp miterCapHeightOverWidth at
+ // near-infinity. 180-degree round joins still look perfectly acceptable like this (though
+ // technically not pure arcs).
+ Sk2f cross = base * SkNx_shuffle<1,0>(n0);
+ Sk2f dot = base * n0;
+ float miterCapHeight = SkScalarAbs(dot[0] + dot[1]);
+ float miterCapWidth = SkScalarAbs(cross[0] - cross[1]) * 2;
+
+ if (Verb::kMiterJoin == joinVerb) {
+ if (miterCapHeight > fMiterMaxCapHeightOverWidth * miterCapWidth) {
+ // This join is tighter than the miter limit. Treat it as a bevel.
+ this->recordBevelJoin(Verb::kMiterJoin);
+ return;
+ }
+ this->recordMiterJoin(miterCapHeight / miterCapWidth);
+ return;
+ }
+
+ SkASSERT(Verb::kRoundJoin == joinVerb || Verb::kInternalRoundJoin == joinVerb);
+
+ // Conic arcs become unstable when they approach 180 degrees. When the conic control point
+ // begins shooting off to infinity (i.e., height/width > 32), split the conic into two.
+ static constexpr float kAlmost180Degrees = 32;
+ if (miterCapHeight > kAlmost180Degrees * miterCapWidth) {
+ Sk2f bisect = normalize(n0 - n1);
+ this->rotateTo(joinVerb, SkVector::Make(-bisect[1], bisect[0]));
+ this->recordLeftJoinIfNotEmpty(joinVerb, nextNormal);
+ return;
+ }
+
+ float miterCapHeightOverWidth = miterCapHeight / miterCapWidth;
+
+ // Find the heights of this round join's conic control point as well as the arc itself.
+ Sk2f X, Y;
+ transpose(base * base, n0 * n1, &X, &Y);
+ Sk2f r = Sk2f::Max(X + Y + Sk2f(0, 1), 0.f).sqrt();
+ Sk2f heights = SkNx_fma(r, Sk2f(miterCapHeightOverWidth, -SK_ScalarRoot2Over2), Sk2f(0, 1));
+ float controlPointHeight = SkScalarAbs(heights[0]);
+ float curveHeight = heights[1];
+ if (curveHeight * fCurrStrokeRadius < kMaxErrorFromLinearization) {
+ // Treat round joins as bevels when their curvature is nearly flat.
+ this->recordBevelJoin(joinVerb);
+ return;
+ }
+
+ float w = curveHeight / (controlPointHeight - curveHeight);
+ this->recordRoundJoin(joinVerb, miterCapHeightOverWidth, w);
+}
+
+void GrCCStrokeGeometry::recordBevelJoin(Verb originalJoinVerb) {
+ if (!IsInternalJoinVerb(originalJoinVerb)) {
+ fVerbs.push_back(Verb::kBevelJoin);
+ ++fCurrStrokeTallies->fTriangles;
+ } else {
+ fVerbs.push_back(Verb::kInternalBevelJoin);
+ fCurrStrokeTallies->fTriangles += 2;
+ }
+}
+
+void GrCCStrokeGeometry::recordMiterJoin(float miterCapHeightOverWidth) {
+ fVerbs.push_back(Verb::kMiterJoin);
+ fParams.push_back().fMiterCapHeightOverWidth = miterCapHeightOverWidth;
+ fCurrStrokeTallies->fTriangles += 2;
+}
+
+void GrCCStrokeGeometry::recordRoundJoin(Verb joinVerb, float miterCapHeightOverWidth,
+ float conicWeight) {
+ fVerbs.push_back(joinVerb);
+ fParams.push_back().fConicWeight = conicWeight;
+ fParams.push_back().fMiterCapHeightOverWidth = miterCapHeightOverWidth;
+ if (Verb::kRoundJoin == joinVerb) {
+ ++fCurrStrokeTallies->fTriangles;
+ ++fCurrStrokeTallies->fConics;
+ } else {
+ SkASSERT(Verb::kInternalRoundJoin == joinVerb);
+ fCurrStrokeTallies->fTriangles += 2;
+ fCurrStrokeTallies->fConics += 2;
+ }
+}
+
+void GrCCStrokeGeometry::closeContour() {
+ SkASSERT(fInsideContour);
+ SkASSERT(fPoints.count() > fCurrContourFirstPtIdx);
+ if (fPoints.back() != fPoints[fCurrContourFirstPtIdx]) {
+ // Draw a line back to the beginning.
+ this->lineTo(fCurrStrokeJoinVerb, fPoints[fCurrContourFirstPtIdx]);
+ }
+ if (fNormals.count() > fCurrContourFirstNormalIdx) {
+ // Join the first and last lines.
+ this->rotateTo(fCurrStrokeJoinVerb,fNormals[fCurrContourFirstNormalIdx]);
+ } else {
+ // This contour is empty. Add a bogus normal since the iterator always expects one.
+ SkASSERT(fNormals.count() == fCurrContourFirstNormalIdx);
+ fNormals.push_back({0, 0});
+ }
+ fVerbs.push_back(Verb::kEndContour);
+ SkDEBUGCODE(fInsideContour = false);
+}
+
+void GrCCStrokeGeometry::capContourAndExit() {
+ SkASSERT(fInsideContour);
+ if (fCurrContourFirstNormalIdx >= fNormals.count()) {
+ // This contour is empty. Add a normal in the direction that caps orient on empty geometry.
+ SkASSERT(fNormals.count() == fCurrContourFirstNormalIdx);
+ fNormals.push_back({1, 0});
+ }
+
+ this->recordCapsIfAny();
+ fVerbs.push_back(Verb::kEndContour);
+
+ SkDEBUGCODE(fInsideContour = false);
+}
+
+void GrCCStrokeGeometry::recordCapsIfAny() {
+ SkASSERT(fInsideContour);
+ SkASSERT(fCurrContourFirstNormalIdx < fNormals.count());
+
+ if (SkPaint::kButt_Cap == fCurrStrokeCapType) {
+ return;
+ }
+
+ Verb capVerb;
+ if (SkPaint::kSquare_Cap == fCurrStrokeCapType) {
+ if (fCurrStrokeRadius * SK_ScalarRoot2Over2 < kMaxErrorFromLinearization) {
+ return;
+ }
+ capVerb = Verb::kSquareCap;
+ fCurrStrokeTallies->fStrokes[0] += 2;
+ } else {
+ SkASSERT(SkPaint::kRound_Cap == fCurrStrokeCapType);
+ if (fCurrStrokeRadius < kMaxErrorFromLinearization) {
+ return;
+ }
+ capVerb = Verb::kRoundCap;
+ fCurrStrokeTallies->fTriangles += 2;
+ fCurrStrokeTallies->fConics += 4;
+ }
+
+ fVerbs.push_back(capVerb);
+ fVerbs.push_back(Verb::kEndContour);
+
+ fVerbs.push_back(capVerb);
+
+ // Reserve the space first, since push_back() takes the point by reference and might
+ // invalidate the reference if the array grows.
+ fPoints.reserve(fPoints.count() + 1);
+ fPoints.push_back(fPoints[fCurrContourFirstPtIdx]);
+
+ // Reserve the space first, since push_back() takes the normal by reference and might
+ // invalidate the reference if the array grows. (Although in this case we should be fine
+ // since there is a negate operator.)
+ fNormals.reserve(fNormals.count() + 1);
+ fNormals.push_back(-fNormals[fCurrContourFirstNormalIdx]);
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.h b/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.h
new file mode 100644
index 0000000000..24569accc4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCStrokeGeometry.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGrCCStrokeGeometry_DEFINED
+#define GrGrCCStrokeGeometry_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/private/SkTArray.h"
+
+class SkStrokeRec;
+
+/**
+ * This class converts device-space stroked paths into a set of independent strokes, joins, and caps
+ * that map directly to coverage-counted GPU instances. Non-hairline strokes can only be drawn with
+ * rigid body transforms; we don't yet support skewing the stroke lines themselves.
+ */
+class GrCCStrokeGeometry {
+public:
+ static constexpr int kMaxNumLinearSegmentsLog2 = 15;
+
+ GrCCStrokeGeometry(int numSkPoints = 0, int numSkVerbs = 0)
+ : fVerbs(numSkVerbs * 5/2) // Reserve for a 2.5x expansion in verbs. (Joins get their
+ // own separate verb in our representation.)
+ , fParams(numSkVerbs * 3) // Somewhere around 1-2 params per verb.
+ , fPoints(numSkPoints * 5/4) // Reserve for a 1.25x expansion in points and normals.
+ , fNormals(numSkPoints * 5/4) {}
+
+ // A string of verbs and their corresponding, params, points, and normals are a compact
+ // representation of what will eventually be independent instances in GPU buffers. When added
+ // up, the combined coverage of all these instances will make complete stroked paths.
+ enum class Verb : uint8_t {
+ kBeginPath, // Instructs the iterator to advance its stroke width, atlas offset, etc.
+
+ // Independent strokes of a single line or curve, with (antialiased) butt caps on the ends.
+ kLinearStroke,
+ kQuadraticStroke,
+ kCubicStroke,
+
+ // Joins are a triangles that connect the outer corners of two adjoining strokes. Miters
+ // have an additional triangle cap on top of the bevel, and round joins have an arc on top.
+ kBevelJoin,
+ kMiterJoin,
+ kRoundJoin,
+
+ // We use internal joins when we have to internally break up a stroke because its curvature
+ // is too strong for a triangle strip. They are coverage-counted, self-intersecting
+ // quadrilaterals that tie the four corners of two adjoining strokes together a like a
+ // shoelace. (Coverage is negative on the inside half.) We place an arc on both ends of an
+ // internal round join.
+ kInternalBevelJoin,
+ kInternalRoundJoin,
+
+ kSquareCap,
+ kRoundCap,
+
+ kEndContour // Instructs the iterator to advance its internal point and normal ptrs.
+ };
+ static bool IsInternalJoinVerb(Verb verb);
+
+ // Some verbs require additional parameters(s).
+ union Parameter {
+ // For cubic and quadratic strokes: How many flat line segments to chop the curve into?
+ int fNumLinearSegmentsLog2;
+ // For miter and round joins: How tall should the triangle cap be on top of the join?
+ // (This triangle is the conic control points for a round join.)
+ float fMiterCapHeightOverWidth;
+ float fConicWeight; // Round joins only.
+ };
+
+ const SkTArray<Verb, true>& verbs() const { SkASSERT(!fInsideContour); return fVerbs; }
+ const SkTArray<Parameter, true>& params() const { SkASSERT(!fInsideContour); return fParams; }
+ const SkTArray<SkPoint, true>& points() const { SkASSERT(!fInsideContour); return fPoints; }
+ const SkTArray<SkVector, true>& normals() const { SkASSERT(!fInsideContour); return fNormals; }
+
+ // These track the numbers of instances required to draw all the recorded strokes.
+ struct InstanceTallies {
+ int fStrokes[kMaxNumLinearSegmentsLog2 + 1];
+ int fTriangles;
+ int fConics;
+
+ InstanceTallies operator+(const InstanceTallies&) const;
+ };
+
+ void beginPath(const SkStrokeRec&, float strokeDevWidth, InstanceTallies*);
+ void moveTo(SkPoint);
+ void lineTo(SkPoint);
+ void quadraticTo(const SkPoint[3]);
+ void cubicTo(const SkPoint[4]);
+ void closeContour(); // Connect back to the first point in the contour and exit.
+ void capContourAndExit(); // Add endcaps (if any) and exit the contour.
+
+private:
+ void lineTo(Verb leftJoinVerb, SkPoint);
+ void quadraticTo(Verb leftJoinVerb, const SkPoint[3], float maxCurvatureT);
+
+ static constexpr float kLeftMaxCurvatureNone = 1;
+ static constexpr float kRightMaxCurvatureNone = 0;
+ void cubicTo(Verb leftJoinVerb, const SkPoint[4], float maxCurvatureT, float leftMaxCurvatureT,
+ float rightMaxCurvatureT);
+
+ // Pushes a new normal to fNormals and records a join, without changing the current position.
+ void rotateTo(Verb leftJoinVerb, SkVector normal);
+
+ // Records a stroke in fElememts.
+ void recordStroke(Verb, int numSegmentsLog2);
+
+ // Records a join in fElememts with the previous stroke, if the cuurent contour is not empty.
+ void recordLeftJoinIfNotEmpty(Verb joinType, SkVector nextNormal);
+ void recordBevelJoin(Verb originalJoinVerb);
+ void recordMiterJoin(float miterCapHeightOverWidth);
+ void recordRoundJoin(Verb roundJoinVerb, float miterCapHeightOverWidth, float conicWeight);
+
+ void recordCapsIfAny();
+
+ float fCurrStrokeRadius;
+ Verb fCurrStrokeJoinVerb;
+ SkPaint::Cap fCurrStrokeCapType;
+ InstanceTallies* fCurrStrokeTallies = nullptr;
+
+ // We implement miters by placing a triangle-shaped cap on top of a bevel join. This field tells
+ // us what the miter limit is, restated in terms of how tall that triangle cap can be.
+ float fMiterMaxCapHeightOverWidth;
+
+ // Any curvature on the original curve gets magnified on the outer edge of the stroke,
+ // proportional to how thick the stroke radius is. This field tells us the maximum curvature we
+ // can tolerate using the current stroke radius, before linearization artifacts begin to appear
+ // on the outer edge.
+ //
+ // (Curvature this strong is quite rare in practice, but when it does happen, we decompose the
+ // section with strong curvature into lineTo's with round joins in between.)
+ float fMaxCurvatureCosTheta;
+
+ int fCurrContourFirstPtIdx;
+ int fCurrContourFirstNormalIdx;
+
+ SkDEBUGCODE(bool fInsideContour = false);
+
+ SkSTArray<128, Verb, true> fVerbs;
+ SkSTArray<128, Parameter, true> fParams;
+ SkSTArray<128, SkPoint, true> fPoints;
+ SkSTArray<128, SkVector, true> fNormals;
+};
+
+inline GrCCStrokeGeometry::InstanceTallies GrCCStrokeGeometry::InstanceTallies::operator+(
+ const InstanceTallies& t) const {
+ InstanceTallies ret;
+ for (int i = 0; i <= kMaxNumLinearSegmentsLog2; ++i) {
+ ret.fStrokes[i] = fStrokes[i] + t.fStrokes[i];
+ }
+ ret.fTriangles = fTriangles + t.fTriangles;
+ ret.fConics = fConics + t.fConics;
+ return ret;
+}
+
+inline bool GrCCStrokeGeometry::IsInternalJoinVerb(Verb verb) {
+ switch (verb) {
+ case Verb::kInternalBevelJoin:
+ case Verb::kInternalRoundJoin:
+ return true;
+ case Verb::kBeginPath:
+ case Verb::kLinearStroke:
+ case Verb::kQuadraticStroke:
+ case Verb::kCubicStroke:
+ case Verb::kBevelJoin:
+ case Verb::kMiterJoin:
+ case Verb::kRoundJoin:
+ case Verb::kSquareCap:
+ case Verb::kRoundCap:
+ case Verb::kEndContour:
+ return false;
+ }
+ SK_ABORT("Invalid GrCCStrokeGeometry::Verb.");
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.cpp
new file mode 100644
index 0000000000..cbfa5ae7f2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.cpp
@@ -0,0 +1,836 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCCStroker.h"
+
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkPathPriv.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+static constexpr int kMaxNumLinearSegmentsLog2 = GrCCStrokeGeometry::kMaxNumLinearSegmentsLog2;
+using TriangleInstance = GrCCCoverageProcessor::TriPointInstance;
+using ConicInstance = GrCCCoverageProcessor::QuadPointInstance;
+
+namespace {
+
+struct LinearStrokeInstance {
+ float fEndpoints[4];
+ float fStrokeRadius;
+
+ inline void set(const SkPoint[2], float dx, float dy, float strokeRadius);
+};
+
+inline void LinearStrokeInstance::set(const SkPoint P[2], float dx, float dy, float strokeRadius) {
+ Sk2f X, Y;
+ Sk2f::Load2(P, &X, &Y);
+ Sk2f::Store2(fEndpoints, X + dx, Y + dy);
+ fStrokeRadius = strokeRadius;
+}
+
+struct CubicStrokeInstance {
+ float fX[4];
+ float fY[4];
+ float fStrokeRadius;
+ float fNumSegments;
+
+ inline void set(const SkPoint[4], float dx, float dy, float strokeRadius, int numSegments);
+ inline void set(const Sk4f& X, const Sk4f& Y, float dx, float dy, float strokeRadius,
+ int numSegments);
+};
+
+inline void CubicStrokeInstance::set(const SkPoint P[4], float dx, float dy, float strokeRadius,
+ int numSegments) {
+ Sk4f X, Y;
+ Sk4f::Load2(P, &X, &Y);
+ this->set(X, Y, dx, dy, strokeRadius, numSegments);
+}
+
+inline void CubicStrokeInstance::set(const Sk4f& X, const Sk4f& Y, float dx, float dy,
+ float strokeRadius, int numSegments) {
+ (X + dx).store(&fX);
+ (Y + dy).store(&fY);
+ fStrokeRadius = strokeRadius;
+ fNumSegments = static_cast<float>(numSegments);
+}
+
+// This class draws stroked lines in post-transform device space (a.k.a. rectangles). Rigid-body
+// transforms can be achieved by transforming the line ahead of time and adjusting the stroke
+// width. Skews of the stroke itself are not yet supported.
+//
+// Corner coverage is AA-correct, meaning, n^2 attenuation along the diagonals. This is important
+// for seamless integration with the connecting geometry.
+class LinearStrokeProcessor : public GrGeometryProcessor {
+public:
+ LinearStrokeProcessor() : GrGeometryProcessor(kLinearStrokeProcessor_ClassID) {
+ this->setInstanceAttributes(kInstanceAttribs, 2);
+#ifdef SK_DEBUG
+ using Instance = LinearStrokeInstance;
+ SkASSERT(this->instanceStride() == sizeof(Instance));
+#endif
+ }
+
+private:
+ const char* name() const override { return "LinearStrokeProcessor"; }
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+
+ static constexpr Attribute kInstanceAttribs[2] = {
+ {"endpts", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"stroke_radius", kFloat_GrVertexAttribType, kFloat_GrSLType}
+ };
+
+ class Impl : public GrGLSLGeometryProcessor {
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) override {}
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override;
+ };
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new Impl();
+ }
+};
+
+void LinearStrokeProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
+
+ varyingHandler->emitAttributes(args.fGP.cast<LinearStrokeProcessor>());
+
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ v->codeAppend ("float2 tan = normalize(endpts.zw - endpts.xy);");
+ v->codeAppend ("float2 n = float2(tan.y, -tan.x);");
+ v->codeAppend ("float nwidth = abs(n.x) + abs(n.y);");
+
+ // Outset the vertex position for AA butt caps.
+ v->codeAppend ("float2 outset = tan*nwidth/2;");
+ v->codeAppend ("float2 position = (sk_VertexID < 2) "
+ "? endpts.xy - outset : endpts.zw + outset;");
+
+ // Calculate Manhattan distance from both butt caps, where distance=0 on the actual endpoint and
+ // distance=-.5 on the outset edge.
+ GrGLSLVarying edgeDistances(kFloat4_GrSLType);
+ varyingHandler->addVarying("edge_distances", &edgeDistances);
+ v->codeAppendf("%s.xz = float2(-.5, dot(endpts.zw - endpts.xy, tan) / nwidth + .5);",
+ edgeDistances.vsOut());
+ v->codeAppendf("%s.xz = (sk_VertexID < 2) ? %s.xz : %s.zx;",
+ edgeDistances.vsOut(), edgeDistances.vsOut(), edgeDistances.vsOut());
+
+ // Outset the vertex position for stroke radius plus edge AA.
+ v->codeAppend ("outset = n * (stroke_radius + nwidth/2);");
+ v->codeAppend ("position += (0 == (sk_VertexID & 1)) ? +outset : -outset;");
+
+ // Calculate Manhattan distance from both edges, where distance=0 on the actual edge and
+ // distance=-.5 on the outset.
+ v->codeAppendf("%s.yw = float2(-.5, 2*stroke_radius / nwidth + .5);", edgeDistances.vsOut());
+ v->codeAppendf("%s.yw = (0 == (sk_VertexID & 1)) ? %s.yw : %s.wy;",
+ edgeDistances.vsOut(), edgeDistances.vsOut(), edgeDistances.vsOut());
+
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "position");
+ this->emitTransforms(v, varyingHandler, uniHandler, GrShaderVar("position", kFloat2_GrSLType),
+ SkMatrix::I(), args.fFPCoordTransformHandler);
+
+ // Use the 4 edge distances to calculate coverage in the fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+ f->codeAppendf("half2 coverages = half2(min(%s.xy, .5) + min(%s.zw, .5));",
+ edgeDistances.fsIn(), edgeDistances.fsIn());
+ f->codeAppendf("%s = half4(coverages.x * coverages.y);", args.fOutputColor);
+
+ // This shader doesn't use the built-in Ganesh coverage.
+ f->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+}
+
+constexpr GrPrimitiveProcessor::Attribute LinearStrokeProcessor::kInstanceAttribs[];
+
+// This class draws stroked cubics in post-transform device space. Rigid-body transforms can be
+// achieved by transforming the curve ahead of time and adjusting the stroke width. Skews of the
+// stroke itself are not yet supported. Quadratics can be drawn by converting them to cubics.
+//
+// This class works by finding stroke-width line segments orthogonal to the curve at a
+// pre-determined number of evenly spaced points along the curve (evenly spaced in the parametric
+// sense). It then connects the segments with a triangle strip. As for common in CCPR, clockwise-
+// winding triangles from the strip emit positive coverage, counter-clockwise triangles emit
+// negative, and we use SkBlendMode::kPlus.
+class CubicStrokeProcessor : public GrGeometryProcessor {
+public:
+ CubicStrokeProcessor() : GrGeometryProcessor(kCubicStrokeProcessor_ClassID) {
+ this->setInstanceAttributes(kInstanceAttribs, 3);
+#ifdef SK_DEBUG
+ using Instance = CubicStrokeInstance;
+ SkASSERT(this->instanceStride() == sizeof(Instance));
+#endif
+ }
+
+private:
+ const char* name() const override { return "CubicStrokeProcessor"; }
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+
+ static constexpr Attribute kInstanceAttribs[3] = {
+ {"X", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"Y", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"stroke_info", kFloat2_GrVertexAttribType, kFloat2_GrSLType}
+ };
+
+ class Impl : public GrGLSLGeometryProcessor {
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) override {}
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override;
+ };
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new Impl();
+ }
+};
+
+void CubicStrokeProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
+
+ varyingHandler->emitAttributes(args.fGP.cast<CubicStrokeProcessor>());
+
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ v->codeAppend ("float4x2 P = transpose(float2x4(X, Y));");
+ v->codeAppend ("float stroke_radius = stroke_info[0];");
+ v->codeAppend ("float num_segments = stroke_info[1];");
+
+ // Find the parametric T value at which we will emit our orthogonal line segment. We emit two
+ // line segments at T=0 and double at T=1 as well for AA butt caps.
+ v->codeAppend ("float point_id = float(sk_VertexID/2);");
+ v->codeAppend ("float T = max((point_id - 1) / num_segments, 0);");
+ v->codeAppend ("T = (point_id >= num_segments + 1) ? 1 : T;"); // In case x/x !== 1.
+
+ // Use De Casteljau's algorithm to find the position and tangent for our orthogonal line
+ // segment. De Casteljau's is more numerically stable than evaluating the curve and derivative
+ // directly.
+ v->codeAppend ("float2 ab = mix(P[0], P[1], T);");
+ v->codeAppend ("float2 bc = mix(P[1], P[2], T);");
+ v->codeAppend ("float2 cd = mix(P[2], P[3], T);");
+ v->codeAppend ("float2 abc = mix(ab, bc, T);");
+ v->codeAppend ("float2 bcd = mix(bc, cd, T);");
+ v->codeAppend ("float2 position = mix(abc, bcd, T);");
+ v->codeAppend ("float2 tan = bcd - abc;");
+
+ // Find actual tangents for the corner cases when De Casteljau's yields tan=0. (We shouldn't
+ // encounter other numerically unstable cases where tan ~= 0, because GrCCStrokeGeometry snaps
+ // control points to endpoints in curves where they are almost equal.)
+ v->codeAppend ("if (0 == T && P[0] == P[1]) {");
+ v->codeAppend ( "tan = P[2] - P[0];");
+ v->codeAppend ("}");
+ v->codeAppend ("if (1 == T && P[2] == P[3]) {");
+ v->codeAppend ( "tan = P[3] - P[1];");
+ v->codeAppend ("}");
+ v->codeAppend ("tan = normalize(tan);");
+ v->codeAppend ("float2 n = float2(tan.y, -tan.x);");
+ v->codeAppend ("float nwidth = abs(n.x) + abs(n.y);");
+
+ // Outset the vertex position for stroke radius plus edge AA.
+ v->codeAppend ("float2 outset = n * (stroke_radius + nwidth/2);");
+ v->codeAppend ("position += (0 == (sk_VertexID & 1)) ? -outset : +outset;");
+
+ // Calculate the Manhattan distance from both edges, where distance=0 on the actual edge and
+ // distance=-.5 on the outset.
+ GrGLSLVarying coverages(kFloat3_GrSLType);
+ varyingHandler->addVarying("coverages", &coverages);
+ v->codeAppendf("%s.xy = float2(-.5, 2*stroke_radius / nwidth + .5);", coverages.vsOut());
+ v->codeAppendf("%s.xy = (0 == (sk_VertexID & 1)) ? %s.xy : %s.yx;",
+ coverages.vsOut(), coverages.vsOut(), coverages.vsOut());
+
+ // Adjust the orthogonal line segments on the endpoints so they straddle the actual endpoint
+ // at a Manhattan distance of .5 on either side.
+ v->codeAppend ("if (0 == point_id || num_segments+1 == point_id) {");
+ v->codeAppend ( "position -= tan*nwidth/2;");
+ v->codeAppend ("}");
+ v->codeAppend ("if (1 == point_id || num_segments+2 == point_id) {");
+ v->codeAppend ( "position += tan*nwidth/2;");
+ v->codeAppend ("}");
+
+ // Interpolate coverage for butt cap AA from 0 on the outer segment to 1 on the inner.
+ v->codeAppendf("%s.z = (0 == point_id || num_segments+2 == point_id) ? 0 : 1;",
+ coverages.vsOut());
+
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "position");
+ this->emitTransforms(v, varyingHandler, uniHandler, GrShaderVar("position", kFloat2_GrSLType),
+ SkMatrix::I(), args.fFPCoordTransformHandler);
+
+ // Use the 2 edge distances and interpolated butt cap AA to calculate fragment coverage.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+ f->codeAppendf("half2 edge_coverages = min(half2(%s.xy), .5);", coverages.fsIn());
+ f->codeAppend ("half coverage = edge_coverages.x + edge_coverages.y;");
+ f->codeAppendf("coverage *= half(%s.z);", coverages.fsIn()); // Butt cap AA.
+
+ // As is common for CCPR, clockwise-winding triangles from the strip emit positive coverage, and
+ // counter-clockwise triangles emit negative.
+ f->codeAppendf("%s = half4(sk_Clockwise ? +coverage : -coverage);", args.fOutputColor);
+
+ // This shader doesn't use the built-in Ganesh coverage.
+ f->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+}
+
+constexpr GrPrimitiveProcessor::Attribute CubicStrokeProcessor::kInstanceAttribs[];
+
+} // anonymous namespace
+
+void GrCCStroker::parseDeviceSpaceStroke(const SkPath& path, const SkPoint* deviceSpacePts,
+ const SkStrokeRec& stroke, float strokeDevWidth,
+ GrScissorTest scissorTest,
+ const SkIRect& clippedDevIBounds,
+ const SkIVector& devToAtlasOffset) {
+ SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() ||
+ SkStrokeRec::kHairline_Style == stroke.getStyle());
+ SkASSERT(!fInstanceBuffer);
+ SkASSERT(!path.isEmpty());
+
+ if (!fHasOpenBatch) {
+ fBatches.emplace_back(&fTalliesAllocator, *fInstanceCounts[(int)GrScissorTest::kDisabled],
+ fScissorSubBatches.count());
+ fInstanceCounts[(int)GrScissorTest::kDisabled] = fBatches.back().fNonScissorEndInstances;
+ fHasOpenBatch = true;
+ }
+
+ InstanceTallies* currStrokeEndIndices;
+ if (GrScissorTest::kEnabled == scissorTest) {
+ SkASSERT(fBatches.back().fEndScissorSubBatch == fScissorSubBatches.count());
+ fScissorSubBatches.emplace_back(&fTalliesAllocator,
+ *fInstanceCounts[(int)GrScissorTest::kEnabled],
+ clippedDevIBounds.makeOffset(devToAtlasOffset));
+ fBatches.back().fEndScissorSubBatch = fScissorSubBatches.count();
+ fInstanceCounts[(int)GrScissorTest::kEnabled] =
+ currStrokeEndIndices = fScissorSubBatches.back().fEndInstances;
+ } else {
+ currStrokeEndIndices = fBatches.back().fNonScissorEndInstances;
+ }
+
+ fGeometry.beginPath(stroke, strokeDevWidth, currStrokeEndIndices);
+
+ fPathInfos.push_back() = {devToAtlasOffset, strokeDevWidth/2, scissorTest};
+
+ int devPtsIdx = 0;
+ SkPath::Verb previousVerb = SkPath::kClose_Verb;
+
+ for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
+ SkASSERT(SkPath::kDone_Verb != previousVerb);
+ const SkPoint* P = &deviceSpacePts[devPtsIdx - 1];
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (devPtsIdx > 0 && SkPath::kClose_Verb != previousVerb) {
+ fGeometry.capContourAndExit();
+ }
+ fGeometry.moveTo(deviceSpacePts[devPtsIdx]);
+ ++devPtsIdx;
+ break;
+ case SkPath::kClose_Verb:
+ SkASSERT(SkPath::kClose_Verb != previousVerb);
+ fGeometry.closeContour();
+ break;
+ case SkPath::kLine_Verb:
+ SkASSERT(SkPath::kClose_Verb != previousVerb);
+ fGeometry.lineTo(P[1]);
+ ++devPtsIdx;
+ break;
+ case SkPath::kQuad_Verb:
+ SkASSERT(SkPath::kClose_Verb != previousVerb);
+ fGeometry.quadraticTo(P);
+ devPtsIdx += 2;
+ break;
+ case SkPath::kCubic_Verb: {
+ SkASSERT(SkPath::kClose_Verb != previousVerb);
+ fGeometry.cubicTo(P);
+ devPtsIdx += 3;
+ break;
+ }
+ case SkPath::kConic_Verb:
+ SkASSERT(SkPath::kClose_Verb != previousVerb);
+ SK_ABORT("Stroked conics not supported.");
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ }
+ previousVerb = verb;
+ }
+
+ if (devPtsIdx > 0 && SkPath::kClose_Verb != previousVerb) {
+ fGeometry.capContourAndExit();
+ }
+}
+
+// This class encapsulates the process of expanding ready-to-draw geometry from GrCCStrokeGeometry
+// directly into GPU instance buffers.
+class GrCCStroker::InstanceBufferBuilder {
+public:
+ InstanceBufferBuilder(GrOnFlushResourceProvider* onFlushRP, GrCCStroker* stroker) {
+ memcpy(fNextInstances, stroker->fBaseInstances, sizeof(fNextInstances));
+#ifdef SK_DEBUG
+ fEndInstances[0] = stroker->fBaseInstances[0] + *stroker->fInstanceCounts[0];
+ fEndInstances[1] = stroker->fBaseInstances[1] + *stroker->fInstanceCounts[1];
+#endif
+
+ int endConicsIdx = stroker->fBaseInstances[1].fConics +
+ stroker->fInstanceCounts[1]->fConics;
+ fInstanceBuffer = onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
+ endConicsIdx * sizeof(ConicInstance));
+ if (!fInstanceBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR stroke instance buffer.\n");
+ return;
+ }
+ fInstanceBufferData = fInstanceBuffer->map();
+ }
+
+ bool isMapped() const { return SkToBool(fInstanceBufferData); }
+
+ void updateCurrentInfo(const PathInfo& pathInfo) {
+ SkASSERT(this->isMapped());
+ fCurrDX = static_cast<float>(pathInfo.fDevToAtlasOffset.x());
+ fCurrDY = static_cast<float>(pathInfo.fDevToAtlasOffset.y());
+ fCurrStrokeRadius = pathInfo.fStrokeRadius;
+ fCurrNextInstances = &fNextInstances[(int)pathInfo.fScissorTest];
+ SkDEBUGCODE(fCurrEndInstances = &fEndInstances[(int)pathInfo.fScissorTest]);
+ }
+
+ void appendLinearStroke(const SkPoint endpts[2]) {
+ SkASSERT(this->isMapped());
+ this->appendLinearStrokeInstance().set(endpts, fCurrDX, fCurrDY, fCurrStrokeRadius);
+ }
+
+ void appendQuadraticStroke(const SkPoint P[3], int numLinearSegmentsLog2) {
+ SkASSERT(this->isMapped());
+ SkASSERT(numLinearSegmentsLog2 > 0);
+
+ Sk4f ptsT[2];
+ Sk2f p0 = Sk2f::Load(P);
+ Sk2f p1 = Sk2f::Load(P+1);
+ Sk2f p2 = Sk2f::Load(P+2);
+
+ // Convert the quadratic to cubic.
+ Sk2f c1 = SkNx_fma(Sk2f(2/3.f), p1 - p0, p0);
+ Sk2f c2 = SkNx_fma(Sk2f(1/3.f), p2 - p1, p1);
+ Sk2f::Store4(ptsT, p0, c1, c2, p2);
+
+ this->appendCubicStrokeInstance(numLinearSegmentsLog2).set(
+ ptsT[0], ptsT[1], fCurrDX, fCurrDY, fCurrStrokeRadius, 1 << numLinearSegmentsLog2);
+ }
+
+ void appendCubicStroke(const SkPoint P[3], int numLinearSegmentsLog2) {
+ SkASSERT(this->isMapped());
+ SkASSERT(numLinearSegmentsLog2 > 0);
+ this->appendCubicStrokeInstance(numLinearSegmentsLog2).set(
+ P, fCurrDX, fCurrDY, fCurrStrokeRadius, 1 << numLinearSegmentsLog2);
+ }
+
+ void appendJoin(Verb joinVerb, const SkPoint& center, const SkVector& leftNorm,
+ const SkVector& rightNorm, float miterCapHeightOverWidth, float conicWeight) {
+ SkASSERT(this->isMapped());
+
+ Sk2f offset = Sk2f::Load(&center) + Sk2f(fCurrDX, fCurrDY);
+ Sk2f n0 = Sk2f::Load(&leftNorm);
+ Sk2f n1 = Sk2f::Load(&rightNorm);
+
+ // Identify the outer edge.
+ Sk2f cross = n0 * SkNx_shuffle<1,0>(n1);
+ if (cross[0] < cross[1]) {
+ Sk2f tmp = n0;
+ n0 = -n1;
+ n1 = -tmp;
+ }
+
+ if (!GrCCStrokeGeometry::IsInternalJoinVerb(joinVerb)) {
+ // Normal joins are a triangle that connects the outer corners of two adjoining strokes.
+ this->appendTriangleInstance().set(
+ n1 * fCurrStrokeRadius, Sk2f(0, 0), n0 * fCurrStrokeRadius, offset,
+ TriangleInstance::Ordering::kXYTransposed);
+ if (Verb::kBevelJoin == joinVerb) {
+ return;
+ }
+ } else {
+ // Internal joins are coverage-counted, self-intersecting quadrilaterals that tie the
+ // four corners of two adjoining strokes together a like a shoelace. Coverage is
+ // negative on the inside half. We implement this geometry with a pair of triangles.
+ this->appendTriangleInstance().set(
+ -n0 * fCurrStrokeRadius, n0 * fCurrStrokeRadius, n1 * fCurrStrokeRadius,
+ offset, TriangleInstance::Ordering::kXYTransposed);
+ if (Verb::kBevelJoin == joinVerb) {
+ return;
+ }
+ this->appendTriangleInstance().set(
+ -n0 * fCurrStrokeRadius, n1 * fCurrStrokeRadius, -n1 * fCurrStrokeRadius,
+ offset, TriangleInstance::Ordering::kXYTransposed);
+ if (Verb::kBevelJoin == joinVerb) {
+ return;
+ }
+ if (Verb::kInternalBevelJoin == joinVerb) {
+ return;
+ }
+ }
+
+ // For miter and round joins, we place an additional triangle cap on top of the bevel. This
+ // triangle is literal for miters and is conic control points for round joins.
+ SkASSERT(miterCapHeightOverWidth >= 0 || SkScalarIsNaN(miterCapHeightOverWidth));
+ Sk2f base = n1 - n0;
+ Sk2f baseNorm = Sk2f(base[1], -base[0]);
+ Sk2f c = (n0 + n1) * .5f + baseNorm * miterCapHeightOverWidth;
+
+ if (Verb::kMiterJoin == joinVerb) {
+ this->appendTriangleInstance().set(
+ n0 * fCurrStrokeRadius, c * fCurrStrokeRadius, n1 * fCurrStrokeRadius, offset,
+ TriangleInstance::Ordering::kXYTransposed);
+ } else {
+ SkASSERT(Verb::kRoundJoin == joinVerb || Verb::kInternalRoundJoin == joinVerb);
+ this->appendConicInstance().setW(n0 * fCurrStrokeRadius, c * fCurrStrokeRadius,
+ n1 * fCurrStrokeRadius, offset, conicWeight);
+ if (Verb::kInternalRoundJoin == joinVerb) {
+ this->appendConicInstance().setW(-n1 * fCurrStrokeRadius, c * -fCurrStrokeRadius,
+ -n0 * fCurrStrokeRadius, offset, conicWeight);
+ }
+ }
+ }
+
+ void appendCap(Verb capType, const SkPoint& pt, const SkVector& norm) {
+ SkASSERT(this->isMapped());
+
+ Sk2f n = Sk2f::Load(&norm) * fCurrStrokeRadius;
+ Sk2f v = Sk2f(-n[1], n[0]);
+ Sk2f offset = Sk2f::Load(&pt) + Sk2f(fCurrDX, fCurrDY);
+
+ if (Verb::kSquareCap == capType) {
+ SkPoint endPts[2] = {{0, 0}, {v[0], v[1]}};
+ this->appendLinearStrokeInstance().set(endPts, offset[0], offset[1], fCurrStrokeRadius);
+ } else {
+ SkASSERT(Verb::kRoundCap == capType);
+ this->appendTriangleInstance().set(
+ n, v, -n, offset, TriangleInstance::Ordering::kXYTransposed);
+ this->appendConicInstance().setW(n, n + v, v, offset, SK_ScalarRoot2Over2);
+ this->appendConicInstance().setW(v, v - n, -n, offset, SK_ScalarRoot2Over2);
+ }
+ }
+
+ sk_sp<GrGpuBuffer> finish() {
+ SkASSERT(this->isMapped());
+ SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances)));
+ fInstanceBuffer->unmap();
+ fInstanceBufferData = nullptr;
+ SkASSERT(!this->isMapped());
+ return std::move(fInstanceBuffer);
+ }
+
+private:
+ LinearStrokeInstance& appendLinearStrokeInstance() {
+ int instanceIdx = fCurrNextInstances->fStrokes[0]++;
+ SkASSERT(instanceIdx < fCurrEndInstances->fStrokes[0]);
+
+ return reinterpret_cast<LinearStrokeInstance*>(fInstanceBufferData)[instanceIdx];
+ }
+
+ CubicStrokeInstance& appendCubicStrokeInstance(int numLinearSegmentsLog2) {
+ SkASSERT(numLinearSegmentsLog2 > 0);
+ SkASSERT(numLinearSegmentsLog2 <= kMaxNumLinearSegmentsLog2);
+
+ int instanceIdx = fCurrNextInstances->fStrokes[numLinearSegmentsLog2]++;
+ SkASSERT(instanceIdx < fCurrEndInstances->fStrokes[numLinearSegmentsLog2]);
+
+ return reinterpret_cast<CubicStrokeInstance*>(fInstanceBufferData)[instanceIdx];
+ }
+
+ TriangleInstance& appendTriangleInstance() {
+ int instanceIdx = fCurrNextInstances->fTriangles++;
+ SkASSERT(instanceIdx < fCurrEndInstances->fTriangles);
+
+ return reinterpret_cast<TriangleInstance*>(fInstanceBufferData)[instanceIdx];
+ }
+
+ ConicInstance& appendConicInstance() {
+ int instanceIdx = fCurrNextInstances->fConics++;
+ SkASSERT(instanceIdx < fCurrEndInstances->fConics);
+
+ return reinterpret_cast<ConicInstance*>(fInstanceBufferData)[instanceIdx];
+ }
+
+ float fCurrDX, fCurrDY;
+ float fCurrStrokeRadius;
+ InstanceTallies* fCurrNextInstances;
+ SkDEBUGCODE(const InstanceTallies* fCurrEndInstances);
+
+ sk_sp<GrGpuBuffer> fInstanceBuffer;
+ void* fInstanceBufferData = nullptr;
+ InstanceTallies fNextInstances[2];
+ SkDEBUGCODE(InstanceTallies fEndInstances[2]);
+};
+
+GrCCStroker::BatchID GrCCStroker::closeCurrentBatch() {
+ if (!fHasOpenBatch) {
+ return kEmptyBatchID;
+ }
+ int start = (fBatches.count() < 2) ? 0 : fBatches[fBatches.count() - 2].fEndScissorSubBatch;
+ int end = fBatches.back().fEndScissorSubBatch;
+ fMaxNumScissorSubBatches = SkTMax(fMaxNumScissorSubBatches, end - start);
+ fHasOpenBatch = false;
+ return fBatches.count() - 1;
+}
+
+bool GrCCStroker::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
+ SkASSERT(!fInstanceBuffer);
+ SkASSERT(!fHasOpenBatch); // Call closeCurrentBatch() first.
+
+ // Here we layout a single instance buffer to share with every internal batch.
+ //
+ // Rather than place each instance array in its own GPU buffer, we allocate a single
+ // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
+ // our draw calls to direct the GPU to the applicable elements within a given array.
+ fBaseInstances[0].fStrokes[0] = 0;
+ fBaseInstances[1].fStrokes[0] = fInstanceCounts[0]->fStrokes[0];
+ int endLinearStrokesIdx = fBaseInstances[1].fStrokes[0] + fInstanceCounts[1]->fStrokes[0];
+
+ int cubicStrokesIdx = GrSizeDivRoundUp(endLinearStrokesIdx * sizeof(LinearStrokeInstance),
+ sizeof(CubicStrokeInstance));
+ for (int i = 1; i <= kMaxNumLinearSegmentsLog2; ++i) {
+ for (int j = 0; j < kNumScissorModes; ++j) {
+ fBaseInstances[j].fStrokes[i] = cubicStrokesIdx;
+ cubicStrokesIdx += fInstanceCounts[j]->fStrokes[i];
+ }
+ }
+
+ int trianglesIdx = GrSizeDivRoundUp(cubicStrokesIdx * sizeof(CubicStrokeInstance),
+ sizeof(TriangleInstance));
+ fBaseInstances[0].fTriangles = trianglesIdx;
+ fBaseInstances[1].fTriangles =
+ fBaseInstances[0].fTriangles + fInstanceCounts[0]->fTriangles;
+ int endTrianglesIdx =
+ fBaseInstances[1].fTriangles + fInstanceCounts[1]->fTriangles;
+
+ int conicsIdx =
+ GrSizeDivRoundUp(endTrianglesIdx * sizeof(TriangleInstance), sizeof(ConicInstance));
+ fBaseInstances[0].fConics = conicsIdx;
+ fBaseInstances[1].fConics = fBaseInstances[0].fConics + fInstanceCounts[0]->fConics;
+
+ InstanceBufferBuilder builder(onFlushRP, this);
+ if (!builder.isMapped()) {
+ return false; // Buffer allocation failed.
+ }
+
+ // Now parse the GrCCStrokeGeometry and expand it into the instance buffer.
+ int pathIdx = 0;
+ int ptsIdx = 0;
+ int paramsIdx = 0;
+ int normalsIdx = 0;
+
+ const SkTArray<GrCCStrokeGeometry::Parameter, true>& params = fGeometry.params();
+ const SkTArray<SkPoint, true>& pts = fGeometry.points();
+ const SkTArray<SkVector, true>& normals = fGeometry.normals();
+
+ float miterCapHeightOverWidth=0, conicWeight=0;
+
+ for (Verb verb : fGeometry.verbs()) {
+ switch (verb) {
+ case Verb::kBeginPath:
+ builder.updateCurrentInfo(fPathInfos[pathIdx]);
+ ++pathIdx;
+ continue;
+
+ case Verb::kLinearStroke:
+ builder.appendLinearStroke(&pts[ptsIdx]);
+ ++ptsIdx;
+ continue;
+ case Verb::kQuadraticStroke:
+ builder.appendQuadraticStroke(&pts[ptsIdx],
+ params[paramsIdx++].fNumLinearSegmentsLog2);
+ ptsIdx += 2;
+ ++normalsIdx;
+ continue;
+ case Verb::kCubicStroke:
+ builder.appendCubicStroke(&pts[ptsIdx], params[paramsIdx++].fNumLinearSegmentsLog2);
+ ptsIdx += 3;
+ ++normalsIdx;
+ continue;
+
+ case Verb::kRoundJoin:
+ case Verb::kInternalRoundJoin:
+ conicWeight = params[paramsIdx++].fConicWeight;
+ // fallthru
+ case Verb::kMiterJoin:
+ miterCapHeightOverWidth = params[paramsIdx++].fMiterCapHeightOverWidth;
+ // fallthru
+ case Verb::kBevelJoin:
+ case Verb::kInternalBevelJoin:
+ builder.appendJoin(verb, pts[ptsIdx], normals[normalsIdx], normals[normalsIdx + 1],
+ miterCapHeightOverWidth, conicWeight);
+ ++normalsIdx;
+ continue;
+
+ case Verb::kSquareCap:
+ case Verb::kRoundCap:
+ builder.appendCap(verb, pts[ptsIdx], normals[normalsIdx]);
+ continue;
+
+ case Verb::kEndContour:
+ ++ptsIdx;
+ ++normalsIdx;
+ continue;
+ }
+ SK_ABORT("Invalid CCPR stroke element.");
+ }
+
+ fInstanceBuffer = builder.finish();
+ SkASSERT(fPathInfos.count() == pathIdx);
+ SkASSERT(pts.count() == ptsIdx);
+ SkASSERT(normals.count() == normalsIdx);
+
+ fMeshesBuffer.reserve((1 + fMaxNumScissorSubBatches) * kMaxNumLinearSegmentsLog2);
+ fScissorsBuffer.reserve((1 + fMaxNumScissorSubBatches) * kMaxNumLinearSegmentsLog2);
+ return true;
+}
+
+void GrCCStroker::drawStrokes(GrOpFlushState* flushState, GrCCCoverageProcessor* proc,
+ BatchID batchID, const SkIRect& drawBounds) const {
+ using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
+ SkASSERT(fInstanceBuffer);
+
+ if (kEmptyBatchID == batchID) {
+ return;
+ }
+ const Batch& batch = fBatches[batchID];
+ int startScissorSubBatch = (!batchID) ? 0 : fBatches[batchID - 1].fEndScissorSubBatch;
+
+ const InstanceTallies* startIndices[2];
+ startIndices[(int)GrScissorTest::kDisabled] = (!batchID)
+ ? &fZeroTallies : fBatches[batchID - 1].fNonScissorEndInstances;
+ startIndices[(int)GrScissorTest::kEnabled] = (!startScissorSubBatch)
+ ? &fZeroTallies : fScissorSubBatches[startScissorSubBatch - 1].fEndInstances;
+
+ GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus,
+ flushState->drawOpArgs().outputSwizzle());
+
+ // Draw linear strokes.
+ this->appendStrokeMeshesToBuffers(0, batch, startIndices, startScissorSubBatch, drawBounds);
+ if (!fMeshesBuffer.empty()) {
+ LinearStrokeProcessor linearProc;
+ this->flushBufferedMeshesAsStrokes(linearProc, flushState, pipeline, drawBounds);
+ }
+
+ // Draw cubic strokes. (Quadratics were converted to cubics for GPU processing.)
+ for (int i = 1; i <= kMaxNumLinearSegmentsLog2; ++i) {
+ this->appendStrokeMeshesToBuffers(i, batch, startIndices, startScissorSubBatch, drawBounds);
+ }
+ if (!fMeshesBuffer.empty()) {
+ CubicStrokeProcessor cubicProc;
+ this->flushBufferedMeshesAsStrokes(cubicProc, flushState, pipeline, drawBounds);
+ }
+
+ // Draw triangles.
+ proc->reset(PrimitiveType::kTriangles, flushState->resourceProvider());
+ this->drawConnectingGeometry<&InstanceTallies::fTriangles>(
+ flushState, pipeline, *proc, batch, startIndices, startScissorSubBatch, drawBounds);
+
+ // Draw conics.
+ proc->reset(PrimitiveType::kConics, flushState->resourceProvider());
+ this->drawConnectingGeometry<&InstanceTallies::fConics>(
+ flushState, pipeline, *proc, batch, startIndices, startScissorSubBatch, drawBounds);
+}
+
+void GrCCStroker::appendStrokeMeshesToBuffers(int numSegmentsLog2, const Batch& batch,
+ const InstanceTallies* startIndices[2],
+ int startScissorSubBatch,
+ const SkIRect& drawBounds) const {
+ // Linear strokes draw a quad. Cubic strokes emit a strip with normals at "numSegments"
+ // evenly-spaced points along the curve, plus one more for the final endpoint, plus two more for
+ // AA butt caps. (i.e., 2 vertices * (numSegments + 3).)
+ int numStripVertices = (0 == numSegmentsLog2) ? 4 : ((1 << numSegmentsLog2) + 3) * 2;
+
+ // Append non-scissored meshes.
+ int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].fStrokes[numSegmentsLog2];
+ int startIdx = startIndices[(int)GrScissorTest::kDisabled]->fStrokes[numSegmentsLog2];
+ int endIdx = batch.fNonScissorEndInstances->fStrokes[numSegmentsLog2];
+ SkASSERT(endIdx >= startIdx);
+ if (int instanceCount = endIdx - startIdx) {
+ GrMesh& mesh = fMeshesBuffer.emplace_back(GrPrimitiveType::kTriangleStrip);
+ mesh.setInstanced(fInstanceBuffer, instanceCount, baseInstance + startIdx,
+ numStripVertices);
+ fScissorsBuffer.push_back(drawBounds);
+ }
+
+ // Append scissored meshes.
+ baseInstance = fBaseInstances[(int)GrScissorTest::kEnabled].fStrokes[numSegmentsLog2];
+ startIdx = startIndices[(int)GrScissorTest::kEnabled]->fStrokes[numSegmentsLog2];
+ for (int i = startScissorSubBatch; i < batch.fEndScissorSubBatch; ++i) {
+ const ScissorSubBatch& subBatch = fScissorSubBatches[i];
+ endIdx = subBatch.fEndInstances->fStrokes[numSegmentsLog2];
+ SkASSERT(endIdx >= startIdx);
+ if (int instanceCount = endIdx - startIdx) {
+ GrMesh& mesh = fMeshesBuffer.emplace_back(GrPrimitiveType::kTriangleStrip);
+ mesh.setInstanced(fInstanceBuffer, instanceCount, baseInstance + startIdx,
+ numStripVertices);
+ fScissorsBuffer.push_back(subBatch.fScissor);
+ startIdx = endIdx;
+ }
+ }
+}
+
+void GrCCStroker::flushBufferedMeshesAsStrokes(const GrPrimitiveProcessor& processor,
+ GrOpFlushState* flushState,
+ const GrPipeline& pipeline,
+ const SkIRect& drawBounds) const {
+ SkASSERT(fMeshesBuffer.count() == fScissorsBuffer.count());
+ GrPipeline::DynamicStateArrays dynamicStateArrays;
+ dynamicStateArrays.fScissorRects = fScissorsBuffer.begin();
+
+ GrProgramInfo programInfo(flushState->drawOpArgs().numSamples(),
+ flushState->drawOpArgs().origin(),
+ pipeline,
+ processor,
+ nullptr,
+ &dynamicStateArrays, 0);
+
+ flushState->opsRenderPass()->draw(programInfo,
+ fMeshesBuffer.begin(), fMeshesBuffer.count(),
+ SkRect::Make(drawBounds));
+ // Don't call reset(), as that also resets the reserve count.
+ fMeshesBuffer.pop_back_n(fMeshesBuffer.count());
+ fScissorsBuffer.pop_back_n(fScissorsBuffer.count());
+}
+
+template<int GrCCStrokeGeometry::InstanceTallies::* InstanceType>
+void GrCCStroker::drawConnectingGeometry(GrOpFlushState* flushState, const GrPipeline& pipeline,
+ const GrCCCoverageProcessor& processor,
+ const Batch& batch, const InstanceTallies* startIndices[2],
+ int startScissorSubBatch,
+ const SkIRect& drawBounds) const {
+ // Append non-scissored meshes.
+ int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].*InstanceType;
+ int startIdx = startIndices[(int)GrScissorTest::kDisabled]->*InstanceType;
+ int endIdx = batch.fNonScissorEndInstances->*InstanceType;
+ SkASSERT(endIdx >= startIdx);
+ if (int instanceCount = endIdx - startIdx) {
+ processor.appendMesh(fInstanceBuffer, instanceCount, baseInstance + startIdx,
+ &fMeshesBuffer);
+ fScissorsBuffer.push_back(drawBounds);
+ }
+
+ // Append scissored meshes.
+ baseInstance = fBaseInstances[(int)GrScissorTest::kEnabled].*InstanceType;
+ startIdx = startIndices[(int)GrScissorTest::kEnabled]->*InstanceType;
+ for (int i = startScissorSubBatch; i < batch.fEndScissorSubBatch; ++i) {
+ const ScissorSubBatch& subBatch = fScissorSubBatches[i];
+ endIdx = subBatch.fEndInstances->*InstanceType;
+ SkASSERT(endIdx >= startIdx);
+ if (int instanceCount = endIdx - startIdx) {
+ processor.appendMesh(fInstanceBuffer, instanceCount, baseInstance + startIdx,
+ &fMeshesBuffer);
+ fScissorsBuffer.push_back(subBatch.fScissor);
+ startIdx = endIdx;
+ }
+ }
+
+ // Flush the geometry.
+ if (!fMeshesBuffer.empty()) {
+ SkASSERT(fMeshesBuffer.count() == fScissorsBuffer.count());
+ processor.draw(flushState, pipeline, fScissorsBuffer.begin(), fMeshesBuffer.begin(),
+ fMeshesBuffer.count(), SkRect::Make(drawBounds));
+ // Don't call reset(), as that also resets the reserve count.
+ fMeshesBuffer.pop_back_n(fMeshesBuffer.count());
+ fScissorsBuffer.pop_back_n(fScissorsBuffer.count());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.h b/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.h
new file mode 100644
index 0000000000..985ac38400
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCCStroker.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCStroker_DEFINED
+#define GrCCStroker_DEFINED
+
+#include "include/private/SkNx.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/ccpr/GrCCStrokeGeometry.h"
+
+class GrGpuBuffer;
+class GrCCCoverageProcessor;
+class GrOnFlushResourceProvider;
+class GrOpFlushState;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class SkMatrix;
+class SkPath;
+class SkStrokeRec;
+
+/**
+ * This class parses stroked SkPaths into a GPU instance buffer, then issues calls to draw their
+ * coverage counts.
+ */
+class GrCCStroker {
+public:
+ GrCCStroker(int numPaths, int numSkPoints, int numSkVerbs)
+ : fGeometry(numSkPoints, numSkVerbs), fPathInfos(numPaths) {}
+
+ // Parses a device-space SkPath into the current batch, using the SkPath's original verbs with
+ // 'deviceSpacePts', and the SkStrokeRec's original settings with 'strokeDevWidth'. Accepts an
+ // optional post-device-space translate for placement in an atlas.
+ //
+ // Strokes intended as hairlines must have a strokeDevWidth of 1. Non-hairline strokes can only
+ // be drawn with rigid body transforms; affine transformation of the stroke lines themselves is
+ // not yet supported.
+ void parseDeviceSpaceStroke(const SkPath&, const SkPoint* deviceSpacePts, const SkStrokeRec&,
+ float strokeDevWidth, GrScissorTest,
+ const SkIRect& clippedDevIBounds,
+ const SkIVector& devToAtlasOffset);
+
+ using BatchID = int;
+
+ // Compiles the outstanding parsed paths into a batch, and returns an ID that can be used to
+ // draw their strokes in the future.
+ BatchID closeCurrentBatch();
+
+ // Builds an internal GPU buffer and prepares for calls to drawStrokes(). Caller must close the
+ // current batch before calling this method, and cannot parse new paths afer.
+ bool prepareToDraw(GrOnFlushResourceProvider*);
+
+ // Called after prepareToDraw(). Draws the given batch of path strokes.
+ void drawStrokes(
+ GrOpFlushState*, GrCCCoverageProcessor*, BatchID, const SkIRect& drawBounds) const;
+
+private:
+ static constexpr int kNumScissorModes = 2;
+ static constexpr BatchID kEmptyBatchID = -1;
+ using Verb = GrCCStrokeGeometry::Verb;
+ using InstanceTallies = GrCCStrokeGeometry::InstanceTallies;
+
+ // Every kBeginPath verb has a corresponding PathInfo entry.
+ struct PathInfo {
+ SkIVector fDevToAtlasOffset;
+ float fStrokeRadius;
+ GrScissorTest fScissorTest;
+ };
+
+ // Defines a sub-batch of stroke instances that have a scissor test and the same scissor rect.
+ // Start indices are deduced by looking at the previous ScissorSubBatch.
+ struct ScissorSubBatch {
+ ScissorSubBatch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startIndices,
+ const SkIRect& scissor)
+ : fEndInstances(&alloc->emplace_back(startIndices)), fScissor(scissor) {}
+ InstanceTallies* fEndInstances;
+ SkIRect fScissor;
+ };
+
+ // Defines a batch of stroke instances that can be drawn with drawStrokes(). Start indices are
+ // deduced by looking at the previous Batch in the list.
+ struct Batch {
+ Batch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startNonScissorIndices,
+ int startScissorSubBatch)
+ : fNonScissorEndInstances(&alloc->emplace_back(startNonScissorIndices))
+ , fEndScissorSubBatch(startScissorSubBatch) {}
+ InstanceTallies* fNonScissorEndInstances;
+ int fEndScissorSubBatch;
+ };
+
+ class InstanceBufferBuilder;
+
+ void appendStrokeMeshesToBuffers(int numSegmentsLog2, const Batch&,
+ const InstanceTallies* startIndices[2],
+ int startScissorSubBatch, const SkIRect& drawBounds) const;
+ void flushBufferedMeshesAsStrokes(const GrPrimitiveProcessor&, GrOpFlushState*, const
+ GrPipeline&, const SkIRect& drawBounds) const;
+
+ template<int GrCCStrokeGeometry::InstanceTallies::* InstanceType>
+ void drawConnectingGeometry(GrOpFlushState*, const GrPipeline&,
+ const GrCCCoverageProcessor&, const Batch&,
+ const InstanceTallies* startIndices[2], int startScissorSubBatch,
+ const SkIRect& drawBounds) const;
+
+ GrCCStrokeGeometry fGeometry;
+ SkSTArray<32, PathInfo> fPathInfos;
+ SkSTArray<32, Batch> fBatches;
+ SkSTArray<32, ScissorSubBatch> fScissorSubBatches;
+ int fMaxNumScissorSubBatches = 0;
+ bool fHasOpenBatch = false;
+
+ const InstanceTallies fZeroTallies = InstanceTallies();
+ GrSTAllocator<128, InstanceTallies> fTalliesAllocator;
+ const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies};
+
+ sk_sp<GrGpuBuffer> fInstanceBuffer;
+ // The indices stored in batches are relative to these base instances.
+ InstanceTallies fBaseInstances[kNumScissorModes];
+
+ mutable SkSTArray<32, GrMesh> fMeshesBuffer;
+ mutable SkSTArray<32, SkIRect> fScissorsBuffer;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
new file mode 100644
index 0000000000..4e86d7e071
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/ccpr/GrCCClipProcessor.h"
+#include "src/gpu/ccpr/GrCCDrawPathsOp.h"
+#include "src/gpu/ccpr/GrCCPathCache.h"
+
+using PathInstance = GrCCPathProcessor::Instance;
+
+bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType* coverageType) {
+ const GrShaderCaps& shaderCaps = *caps.shaderCaps();
+ GrBackendFormat defaultA8Format = caps.getDefaultBackendFormat(GrColorType::kAlpha_8,
+ GrRenderable::kYes);
+ if (caps.driverBlacklistCCPR() || !shaderCaps.integerSupport() ||
+ !caps.instanceAttribSupport() || !shaderCaps.floatIs32Bits() ||
+ GrCaps::kNone_MapFlags == caps.mapBufferFlags() ||
+ !defaultA8Format.isValid() || // This checks both texturable and renderable
+ !caps.halfFloatVertexAttributeSupport()) {
+ return false;
+ }
+
+ GrBackendFormat defaultAHalfFormat = caps.getDefaultBackendFormat(GrColorType::kAlpha_F16,
+ GrRenderable::kYes);
+ if (caps.allowCoverageCounting() &&
+ defaultAHalfFormat.isValid()) { // This checks both texturable and renderable
+ if (coverageType) {
+ *coverageType = CoverageType::kFP16_CoverageCount;
+ }
+ return true;
+ }
+
+ if (!caps.driverBlacklistMSAACCPR() &&
+ caps.internalMultisampleCount(defaultA8Format) > 1 &&
+ caps.sampleLocationsSupport() &&
+ shaderCaps.sampleVariablesStencilSupport()) {
+ if (coverageType) {
+ *coverageType = CoverageType::kA8_Multisample;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
+ const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+ CoverageType coverageType;
+ if (IsSupported(caps, &coverageType)) {
+ return sk_sp<GrCoverageCountingPathRenderer>(new GrCoverageCountingPathRenderer(
+ coverageType, allowCaching, contextUniqueID));
+ }
+ return nullptr;
+}
+
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(
+ CoverageType coverageType, AllowCaching allowCaching, uint32_t contextUniqueID)
+ : fCoverageType(coverageType) {
+ if (AllowCaching::kYes == allowCaching) {
+ fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
+ }
+}
+
+GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
+ auto it = fPendingPaths.find(opsTaskID);
+ if (fPendingPaths.end() == it) {
+ sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
+ it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
+ }
+ return it->second.get();
+}
+
+GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
+ const CanDrawPathArgs& args) const {
+ const GrShape& shape = *args.fShape;
+ // We use "kCoverage", or analytic AA, no mater what the coverage type of our atlas: Even if the
+ // atlas is multisampled, that resolves into analytic coverage before we draw the path to the
+ // main canvas.
+ if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
+ args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
+ return CanDrawPath::kNo;
+ }
+
+ SkPath path;
+ shape.asPath(&path);
+
+ const SkStrokeRec& stroke = shape.style().strokeRec();
+ switch (stroke.getStyle()) {
+ case SkStrokeRec::kFill_Style: {
+ SkRect devBounds;
+ args.fViewMatrix->mapRect(&devBounds, path.getBounds());
+
+ SkIRect clippedIBounds;
+ devBounds.roundOut(&clippedIBounds);
+ if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
+ // The path is completely clipped away. Our code will eventually notice this before
+ // doing any real work.
+ return CanDrawPath::kYes;
+ }
+
+ int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
+ if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
+ // This is a complicated path that has more vertices than pixels! Let's let the SW
+ // renderer have this one: It will probably be faster and a bitmap will require less
+ // total memory on the GPU than CCPR instance buffers would for the raw path data.
+ return CanDrawPath::kNo;
+ }
+
+ if (numPixels > 256 * 256) {
+ // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
+ // rendering algorithm. Give the simpler direct renderers a chance before we commit
+ // to drawing it.
+ return CanDrawPath::kAsBackup;
+ }
+
+ if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
+ // Complex paths do better cached in an SDF, if the renderer will accept them.
+ return CanDrawPath::kAsBackup;
+ }
+
+ return CanDrawPath::kYes;
+ }
+
+ case SkStrokeRec::kStroke_Style:
+ if (!args.fViewMatrix->isSimilarity()) {
+ // The stroker currently only supports rigid-body transfoms for the stroke lines
+ // themselves. This limitation doesn't affect hairlines since their stroke lines are
+ // defined relative to device space.
+ return CanDrawPath::kNo;
+ }
+ // fallthru
+ case SkStrokeRec::kHairline_Style: {
+ if (CoverageType::kFP16_CoverageCount != fCoverageType) {
+ // Stroking is not yet supported in MSAA atlas mode.
+ return CanDrawPath::kNo;
+ }
+ float inflationRadius;
+ GetStrokeDevWidth(*args.fViewMatrix, stroke, &inflationRadius);
+ if (!(inflationRadius <= kMaxBoundsInflationFromStroke)) {
+ // Let extremely wide strokes be converted to fill paths and drawn by the CCPR
+ // filler instead. (Cast the logic negatively in order to also catch r=NaN.)
+ return CanDrawPath::kNo;
+ }
+ SkASSERT(!SkScalarIsNaN(inflationRadius));
+ if (SkPathPriv::ConicWeightCnt(path)) {
+ // The stroker does not support conics yet.
+ return CanDrawPath::kNo;
+ }
+ return CanDrawPath::kYes;
+ }
+
+ case SkStrokeRec::kStrokeAndFill_Style:
+ return CanDrawPath::kNo;
+ }
+
+ SK_ABORT("Invalid stroke style.");
+}
+
+bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkASSERT(!fFlushing);
+
+ SkIRect clipIBounds;
+ GrRenderTargetContext* rtc = args.fRenderTargetContext;
+ args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
+
+ auto op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
+ std::move(args.fPaint));
+ this->recordOp(std::move(op), args);
+ return true;
+}
+
+void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
+ const DrawPathArgs& args) {
+ if (op) {
+ auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
+ op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
+ sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
+ };
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op),
+ addToOwningPerOpsTaskPaths);
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ const GrCaps& caps) {
+ SkASSERT(!fFlushing);
+
+ uint32_t key = deviceSpacePath.getGenerationID();
+ if (CoverageType::kA8_Multisample == fCoverageType) {
+ // We only need to consider fill rule in MSAA mode. In coverage count mode Even/Odd and
+ // Nonzero both reference the same coverage count mask.
+ key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
+ }
+ GrCCClipPath& clipPath =
+ this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
+ if (!clipPath.isInitialized()) {
+ // This ClipPath was just created during lookup. Initialize it.
+ const SkRect& pathDevBounds = deviceSpacePath.getBounds();
+ if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
+ // The path is too large. Crop it or analytic AA can run out of fp32 precision.
+ SkPath croppedPath;
+ int maxRTSize = caps.maxRenderTargetSize();
+ CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
+ clipPath.init(croppedPath, accessRect, fCoverageType, caps);
+ } else {
+ clipPath.init(deviceSpacePath, accessRect, fCoverageType, caps);
+ }
+ } else {
+ clipPath.addAccess(accessRect);
+ }
+
+ auto isCoverageCount = GrCCClipProcessor::IsCoverageCount(
+ CoverageType::kFP16_CoverageCount == fCoverageType);
+ auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
+ !clipPath.pathDevIBounds().contains(accessRect));
+ return skstd::make_unique<GrCCClipProcessor>(&clipPath, isCoverageCount, mustCheckBounds);
+}
+
+void GrCoverageCountingPathRenderer::preFlush(
+ GrOnFlushResourceProvider* onFlushRP, const uint32_t* opsTaskIDs, int numOpsTaskIDs) {
+ using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
+ SkASSERT(!fFlushing);
+ SkASSERT(fFlushingPaths.empty());
+ SkDEBUGCODE(fFlushing = true);
+
+ if (fPathCache) {
+ fPathCache->doPreFlushProcessing();
+ }
+
+ if (fPendingPaths.empty()) {
+ return; // Nothing to draw.
+ }
+
+ GrCCPerFlushResourceSpecs specs;
+ int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
+ specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
+ SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
+ specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
+ specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
+
+ // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
+ // and count them up so we can preallocate buffers.
+ fFlushingPaths.reserve(numOpsTaskIDs);
+ for (int i = 0; i < numOpsTaskIDs; ++i) {
+ auto iter = fPendingPaths.find(opsTaskIDs[i]);
+ if (fPendingPaths.end() == iter) {
+ continue; // No paths on this opsTask.
+ }
+
+ fFlushingPaths.push_back(std::move(iter->second));
+ fPendingPaths.erase(iter);
+
+ for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
+ op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
+ }
+ for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
+ clipsIter.second.accountForOwnPath(&specs);
+ }
+ }
+
+ if (specs.isEmpty()) {
+ return; // Nothing to draw.
+ }
+
+ // Determine if there are enough reusable paths from last flush for it to be worth our time to
+ // copy them to cached atlas(es).
+ int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
+ specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
+ auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
+ specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
+ if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
+ specs.cancelCopies();
+ }
+
+ auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, fCoverageType, specs);
+ if (!resources->isMapped()) {
+ return; // Some allocation failed.
+ }
+
+ // Layout the atlas(es) and parse paths.
+ for (const auto& flushingPaths : fFlushingPaths) {
+ for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
+ op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
+ }
+ for (auto& clipsIter : flushingPaths->fClipPaths) {
+ clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
+ }
+ }
+
+ if (fPathCache) {
+ // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
+ // the underlying textures objects can be freed up and reused for the next atlases.
+ fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
+ }
+
+ // Allocate resources and then render the atlas(es).
+ if (!resources->finalize(onFlushRP)) {
+ return;
+ }
+
+ // Commit flushing paths to the resources once they are successfully completed.
+ for (auto& flushingPaths : fFlushingPaths) {
+ SkASSERT(!flushingPaths->fFlushResources);
+ flushingPaths->fFlushResources = resources;
+ }
+}
+
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs) {
+ SkASSERT(fFlushing);
+
+ if (!fFlushingPaths.empty()) {
+ // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
+ // resources manually.
+ for (auto& flushingPaths : fFlushingPaths) {
+ flushingPaths->fFlushResources = nullptr;
+ }
+
+ // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
+ fFlushingPaths.reset();
+ }
+
+ SkDEBUGCODE(fFlushing = false);
+}
+
+void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
+ GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
+ if (fPathCache) {
+ fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
+ }
+}
+
+void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
+ SkPath* out) {
+ SkPath cropboxPath;
+ cropboxPath.addRect(SkRect::Make(cropbox));
+ if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
+ // This can fail if the PathOps encounter NaN or infinities.
+ out->reset();
+ }
+ out->setIsVolatile(true);
+}
+
+float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
+ const SkStrokeRec& stroke,
+ float* inflationRadius) {
+ float strokeDevWidth;
+ if (stroke.isHairlineStyle()) {
+ strokeDevWidth = 1;
+ } else {
+ SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
+ SkASSERT(m.isSimilarity()); // Otherwise matrixScaleFactor = m.getMaxScale().
+ float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
+ strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
+ }
+ if (inflationRadius) {
+ // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
+ // wide, we may inflate it to 1px and instead reduce the opacity.
+ *inflationRadius = SkStrokeRec::GetInflationRadius(
+ stroke.getJoin(), stroke.getMiter(), stroke.getCap(), SkTMax(strokeDevWidth, 1.f));
+ }
+ return strokeDevWidth;
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
new file mode 100644
index 0000000000..e70a15a6bc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCoverageCountingPathRenderer_DEFINED
+#define GrCoverageCountingPathRenderer_DEFINED
+
+#include <map>
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
+
+class GrCCDrawPathsOp;
+class GrCCPathCache;
+
+/**
+ * This is a path renderer that draws antialiased paths by counting coverage in an offscreen
+ * buffer. (See GrCCCoverageProcessor, GrCCPathProcessor.)
+ *
+ * It also serves as the per-render-target tracker for pending path draws, and at the start of
+ * flush, it compiles GPU buffers and renders a "coverage count atlas" for the upcoming paths.
+ */
+class GrCoverageCountingPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
+public:
+ using CoverageType = GrCCAtlas::CoverageType;
+
+ static bool IsSupported(const GrCaps&, CoverageType* = nullptr);
+
+ enum class AllowCaching : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(
+ const GrCaps&, AllowCaching, uint32_t contextUniqueID);
+
+ CoverageType coverageType() const { return fCoverageType; }
+
+ using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
+
+ // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpsTaskPaths to the DDL
+ // object (detachPendingPaths) and then return them upon replay (mergePendingPaths).
+ PendingPathsMap detachPendingPaths() { return std::move(fPendingPaths); }
+
+ void mergePendingPaths(const PendingPathsMap& paths) {
+#ifdef SK_DEBUG
+ // Ensure there are no duplicate opsTask IDs between the incoming path map and ours.
+ // This should always be true since opsTask IDs are globally unique and these are coming
+ // from different DDL recordings.
+ for (const auto& it : paths) {
+ SkASSERT(!fPendingPaths.count(it.first));
+ }
+#endif
+
+ fPendingPaths.insert(paths.begin(), paths.end());
+ }
+
+ std::unique_ptr<GrFragmentProcessor> makeClipProcessor(
+ uint32_t oplistID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ const GrCaps&);
+
+ // GrOnFlushCallbackObject overrides.
+ void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs) override;
+ void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
+
+ void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
+
+ // If a path spans more pixels than this, we need to crop it or else analytic AA can run out of
+ // fp32 precision.
+ static constexpr float kPathCropThreshold = 1 << 16;
+
+ static void CropPath(const SkPath&, const SkIRect& cropbox, SkPath* out);
+
+ // Maximum inflation of path bounds due to stroking (from width, miter, caps). Strokes wider
+ // than this will be converted to fill paths and drawn by the CCPR filler instead.
+ static constexpr float kMaxBoundsInflationFromStroke = 4096;
+
+ static float GetStrokeDevWidth(const SkMatrix&, const SkStrokeRec&,
+ float* inflationRadius = nullptr);
+
+private:
+ GrCoverageCountingPathRenderer(CoverageType, AllowCaching, uint32_t contextUniqueID);
+
+ // GrPathRenderer overrides.
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
+ void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
+
+ const CoverageType fCoverageType;
+
+ // fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
+ // flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
+ // reside in fPendingPaths, then be moved to fFlushingPaths during preFlush().
+ PendingPathsMap fPendingPaths;
+
+ // fFlushingPaths holds the GrCCPerOpsTaskPaths objects that are currently being flushed.
+ // (It will only contain elements when fFlushing is true.)
+ SkSTArray<4, sk_sp<GrCCPerOpsTaskPaths>> fFlushingPaths;
+
+ std::unique_ptr<GrCCPathCache> fPathCache;
+
+ SkDEBUGCODE(bool fFlushing = false);
+
+public:
+ void testingOnly_drawPathDirectly(const DrawPathArgs&);
+ const GrCCPerFlushResources* testingOnly_getCurrentFlushResources();
+ const GrCCPathCache* testingOnly_getPathCache() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
new file mode 100644
index 0000000000..1b2a55e0e7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+
+bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType*) {
+ return false;
+}
+
+sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
+ const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+ return nullptr;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ const GrCaps& caps) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.cpp
new file mode 100644
index 0000000000..1398d5b5f0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrGSCoverageProcessor.h"
+
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+using InputType = GrGLSLGeometryBuilder::InputType;
+using OutputType = GrGLSLGeometryBuilder::OutputType;
+
+/**
+ * This class and its subclasses implement the coverage processor with geometry shaders.
+ */
+class GrGSCoverageProcessor::Impl : public GrGLSLGeometryProcessor {
+protected:
+ Impl(std::unique_ptr<Shader> shader) : fShader(std::move(shader)) {}
+
+ virtual bool hasCoverage(const GrGSCoverageProcessor& proc) const { return false; }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) final {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) final {
+ const GrGSCoverageProcessor& proc = args.fGP.cast<GrGSCoverageProcessor>();
+
+ // The vertex shader simply forwards transposed x or y values to the geometry shader.
+ SkASSERT(1 == proc.numVertexAttributes());
+ gpArgs->fPositionVar = proc.fInputXOrYValues.asShaderVar();
+
+ // Geometry shader.
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ this->emitGeometryShader(proc, varyingHandler, args.fGeomBuilder, args.fRTAdjustName);
+ varyingHandler->emitAttributes(proc);
+ varyingHandler->setNoPerspective();
+ SkASSERT(!args.fFPCoordTransformHandler->nextCoordTransform());
+
+ // Fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+ f->codeAppendf("half coverage;");
+ fShader->emitFragmentCoverageCode(f, "coverage");
+ f->codeAppendf("%s = half4(coverage);", args.fOutputColor);
+ f->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ }
+
+ void emitGeometryShader(
+ const GrGSCoverageProcessor& proc, GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLGeometryBuilder* g, const char* rtAdjust) const {
+ int numInputPoints = proc.numInputPoints();
+ SkASSERT(3 == numInputPoints || 4 == numInputPoints);
+
+ int inputWidth = (4 == numInputPoints || proc.hasInputWeight()) ? 4 : 3;
+ const char* posValues = (4 == inputWidth) ? "sk_Position" : "sk_Position.xyz";
+ g->codeAppendf("float%ix2 pts = transpose(float2x%i(sk_in[0].%s, sk_in[1].%s));",
+ inputWidth, inputWidth, posValues, posValues);
+
+ GrShaderVar wind("wind", kHalf_GrSLType);
+ g->declareGlobal(wind);
+ Shader::CalcWind(proc, g, "pts", wind.c_str());
+ if (PrimitiveType::kWeightedTriangles == proc.primitiveType()) {
+ SkASSERT(3 == numInputPoints);
+ SkASSERT(kFloat4_GrVertexAttribType == proc.fInputXOrYValues.cpuType());
+ g->codeAppendf("%s *= half(sk_in[0].sk_Position.w);", wind.c_str());
+ }
+
+ SkString emitVertexFn;
+ SkSTArray<3, GrShaderVar> emitArgs;
+ const char* corner = emitArgs.emplace_back("corner", kFloat2_GrSLType).c_str();
+ const char* bloatdir = emitArgs.emplace_back("bloatdir", kFloat2_GrSLType).c_str();
+ const char* inputCoverage = nullptr;
+ if (this->hasCoverage(proc)) {
+ inputCoverage = emitArgs.emplace_back("coverage", kHalf_GrSLType).c_str();
+ }
+ const char* cornerCoverage = nullptr;
+ if (Subpass::kCorners == proc.fSubpass) {
+ cornerCoverage = emitArgs.emplace_back("corner_coverage", kHalf2_GrSLType).c_str();
+ }
+ g->emitFunction(kVoid_GrSLType, "emitVertex", emitArgs.count(), emitArgs.begin(), [&]() {
+ SkString fnBody;
+ fnBody.appendf("float2 vertexpos = fma(%s, float2(bloat), %s);", bloatdir, corner);
+ const char* coverage = inputCoverage;
+ if (!coverage) {
+ if (!fShader->calculatesOwnEdgeCoverage()) {
+ // Flat edge opposite the curve. Coverages need full precision since distance
+ // to the opposite edge can be large.
+ fnBody.appendf("float coverage = dot(float3(vertexpos, 1), %s);",
+ fEdgeDistanceEquation.c_str());
+ } else {
+ // The "coverage" param should hold only the signed winding value.
+ fnBody.appendf("float coverage = 1;");
+ }
+ coverage = "coverage";
+ }
+ fnBody.appendf("%s *= %s;", coverage, wind.c_str());
+ if (cornerCoverage) {
+ fnBody.appendf("%s.x *= %s;", cornerCoverage, wind.c_str());
+ }
+ fShader->emitVaryings(varyingHandler, GrGLSLVarying::Scope::kGeoToFrag, &fnBody,
+ "vertexpos", coverage, cornerCoverage, wind.c_str());
+ g->emitVertex(&fnBody, "vertexpos", rtAdjust);
+ return fnBody;
+ }().c_str(), &emitVertexFn);
+
+ float bloat = kAABloatRadius;
+#ifdef SK_DEBUG
+ if (proc.debugBloatEnabled()) {
+ bloat *= proc.debugBloat();
+ }
+#endif
+ g->defineConstant("bloat", bloat);
+
+ if (!this->hasCoverage(proc) && !fShader->calculatesOwnEdgeCoverage()) {
+ // Determine the amount of coverage to subtract out for the flat edge of the curve.
+ g->declareGlobal(fEdgeDistanceEquation);
+ g->codeAppendf("float2 p0 = pts[0], p1 = pts[%i];", numInputPoints - 1);
+ g->codeAppendf("float2 n = float2(p0.y - p1.y, p1.x - p0.x);");
+ g->codeAppend ("float nwidth = bloat*2 * (abs(n.x) + abs(n.y));");
+ // When nwidth=0, wind must also be 0 (and coverage * wind = 0). So it doesn't matter
+ // what we come up with here as long as it isn't NaN or Inf.
+ g->codeAppend ("n /= (0 != nwidth) ? nwidth : 1;");
+ g->codeAppendf("%s = float3(-n, dot(n, p0) - .5*sign(%s));",
+ fEdgeDistanceEquation.c_str(), wind.c_str());
+ }
+
+ this->onEmitGeometryShader(proc, g, wind, emitVertexFn.c_str());
+ }
+
+ virtual void onEmitGeometryShader(const GrGSCoverageProcessor&, GrGLSLGeometryBuilder*,
+ const GrShaderVar& wind, const char* emitVertexFn) const = 0;
+
+ const std::unique_ptr<Shader> fShader;
+ const GrShaderVar fEdgeDistanceEquation{"edge_distance_equation", kFloat3_GrSLType};
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+/**
+ * Generates conservative rasters around a triangle and its edges, and calculates coverage ramps.
+ *
+ * Triangle rough outlines are drawn in two steps: (1) draw a conservative raster of the entire
+ * triangle, with a coverage of +1, and (2) draw conservative rasters around each edge, with a
+ * coverage ramp from -1 to 0. These edge coverage values convert jagged conservative raster edges
+ * into smooth, antialiased ones.
+ *
+ * The final corners get touched up in a later step by TriangleCornerImpl.
+ */
+class GrGSCoverageProcessor::TriangleHullImpl : public GrGSCoverageProcessor::Impl {
+public:
+ TriangleHullImpl(std::unique_ptr<Shader> shader) : Impl(std::move(shader)) {}
+
+ bool hasCoverage(const GrGSCoverageProcessor& proc) const override { return true; }
+
+ void onEmitGeometryShader(const GrGSCoverageProcessor&, GrGLSLGeometryBuilder* g,
+ const GrShaderVar& wind, const char* emitVertexFn) const override {
+ fShader->emitSetupCode(g, "pts");
+
+ // Visualize the input triangle as upright and equilateral, with a flat base. Paying special
+ // attention to wind, we can identify the points as top, bottom-left, and bottom-right.
+ //
+ // NOTE: We generate the rasters in 5 independent invocations, so each invocation designates
+ // the corner it will begin with as the top.
+ g->codeAppendf("int i = (%s > 0 ? sk_InvocationID : 4 - sk_InvocationID) %% 3;",
+ wind.c_str());
+ g->codeAppend ("float2 top = pts[i];");
+ g->codeAppendf("float2 right = pts[(i + (%s > 0 ? 1 : 2)) %% 3];", wind.c_str());
+ g->codeAppendf("float2 left = pts[(i + (%s > 0 ? 2 : 1)) %% 3];", wind.c_str());
+
+ // Determine which direction to outset the conservative raster from each of the three edges.
+ g->codeAppend ("float2 leftbloat = sign(top - left);");
+ g->codeAppend ("leftbloat = float2(0 != leftbloat.y ? leftbloat.y : leftbloat.x, "
+ "0 != leftbloat.x ? -leftbloat.x : -leftbloat.y);");
+
+ g->codeAppend ("float2 rightbloat = sign(right - top);");
+ g->codeAppend ("rightbloat = float2(0 != rightbloat.y ? rightbloat.y : rightbloat.x, "
+ "0 != rightbloat.x ? -rightbloat.x : -rightbloat.y);");
+
+ g->codeAppend ("float2 downbloat = sign(left - right);");
+ g->codeAppend ("downbloat = float2(0 != downbloat.y ? downbloat.y : downbloat.x, "
+ "0 != downbloat.x ? -downbloat.x : -downbloat.y);");
+
+ // The triangle's conservative raster has a coverage of +1 all around.
+ g->codeAppend ("half4 coverages = half4(+1);");
+
+ // Edges have coverage ramps.
+ g->codeAppend ("if (sk_InvocationID >= 2) {"); // Are we an edge?
+ Shader::CalcEdgeCoverageAtBloatVertex(g, "top", "right",
+ "float2(+rightbloat.y, -rightbloat.x)",
+ "coverages[0]");
+ g->codeAppend ( "coverages.yzw = half3(-1, 0, -1 - coverages[0]);");
+ // Reassign bloats to characterize a conservative raster around a single edge, rather than
+ // the entire triangle.
+ g->codeAppend ( "leftbloat = downbloat = -rightbloat;");
+ g->codeAppend ("}");
+
+ // Here we generate the conservative raster geometry. The triangle's conservative raster is
+ // the convex hull of 3 pixel-size boxes centered on the input points. This translates to a
+ // convex polygon with either one, two, or three vertices at each input point (depending on
+ // how sharp the corner is) that we split between two invocations. Edge conservative rasters
+ // are convex hulls of 2 pixel-size boxes, one at each endpoint. For more details on
+ // conservative raster, see:
+ // https://developer.nvidia.com/gpugems/GPUGems2/gpugems2_chapter42.html
+ g->codeAppendf("bool2 left_right_notequal = notEqual(leftbloat, rightbloat);");
+ g->codeAppend ("if (all(left_right_notequal)) {");
+ // The top corner will have three conservative raster vertices. Emit the
+ // middle one first to the triangle strip.
+ g->codeAppendf( "%s(top, float2(-leftbloat.y, +leftbloat.x), coverages[0]);",
+ emitVertexFn);
+ g->codeAppend ("}");
+ g->codeAppend ("if (any(left_right_notequal)) {");
+ // Second conservative raster vertex for the top corner.
+ g->codeAppendf( "%s(top, rightbloat, coverages[1]);", emitVertexFn);
+ g->codeAppend ("}");
+
+ // Main interior body.
+ g->codeAppendf("%s(top, leftbloat, coverages[2]);", emitVertexFn);
+ g->codeAppendf("%s(right, rightbloat, coverages[1]);", emitVertexFn);
+
+ // Here the invocations diverge slightly. We can't symmetrically divide three triangle
+ // points between two invocations, so each does the following:
+ //
+ // sk_InvocationID=0: Finishes the main interior body of the triangle hull.
+ // sk_InvocationID=1: Remaining two conservative raster vertices for the third hull corner.
+ // sk_InvocationID=2..4: Finish the opposite endpoint of their corresponding edge.
+ g->codeAppendf("bool2 right_down_notequal = notEqual(rightbloat, downbloat);");
+ g->codeAppend ("if (any(right_down_notequal) || 0 == sk_InvocationID) {");
+ g->codeAppendf( "%s((0 == sk_InvocationID) ? left : right, "
+ "(0 == sk_InvocationID) ? leftbloat : downbloat, "
+ "coverages[2]);", emitVertexFn);
+ g->codeAppend ("}");
+ g->codeAppend ("if (all(right_down_notequal) && 0 != sk_InvocationID) {");
+ g->codeAppendf( "%s(right, float2(-rightbloat.y, +rightbloat.x), coverages[3]);",
+ emitVertexFn);
+ g->codeAppend ("}");
+
+ // 5 invocations: 2 triangle hull invocations and 3 edges.
+ g->configure(InputType::kLines, OutputType::kTriangleStrip, 6, 5);
+ }
+};
+
+/**
+ * Generates a conservative raster around a convex quadrilateral that encloses a cubic or quadratic.
+ */
+class GrGSCoverageProcessor::CurveHullImpl : public GrGSCoverageProcessor::Impl {
+public:
+ CurveHullImpl(std::unique_ptr<Shader> shader) : Impl(std::move(shader)) {}
+
+ void onEmitGeometryShader(const GrGSCoverageProcessor&, GrGLSLGeometryBuilder* g,
+ const GrShaderVar& wind, const char* emitVertexFn) const override {
+ const char* hullPts = "pts";
+ fShader->emitSetupCode(g, "pts", &hullPts);
+
+ // Visualize the input (convex) quadrilateral as a square. Paying special attention to wind,
+ // we can identify the points by their corresponding corner.
+ //
+ // NOTE: We split the square down the diagonal from top-right to bottom-left, and generate
+ // the hull in two independent invocations. Each invocation designates the corner it will
+ // begin with as top-left.
+ g->codeAppend ("int i = sk_InvocationID * 2;");
+ g->codeAppendf("float2 topleft = %s[i];", hullPts);
+ g->codeAppendf("float2 topright = %s[%s > 0 ? i + 1 : 3 - i];", hullPts, wind.c_str());
+ g->codeAppendf("float2 bottomleft = %s[%s > 0 ? 3 - i : i + 1];", hullPts, wind.c_str());
+ g->codeAppendf("float2 bottomright = %s[2 - i];", hullPts);
+
+ // Determine how much to outset the conservative raster hull from the relevant edges.
+ g->codeAppend ("float2 leftbloat = float2(topleft.y > bottomleft.y ? +1 : -1, "
+ "topleft.x > bottomleft.x ? -1 : +1);");
+ g->codeAppend ("float2 upbloat = float2(topright.y > topleft.y ? +1 : -1, "
+ "topright.x > topleft.x ? -1 : +1);");
+ g->codeAppend ("float2 rightbloat = float2(bottomright.y > topright.y ? +1 : -1, "
+ "bottomright.x > topright.x ? -1 : +1);");
+
+ // Here we generate the conservative raster geometry. It is the convex hull of 4 pixel-size
+ // boxes centered on the input points, split evenly between two invocations. This translates
+ // to a polygon with either one, two, or three vertices at each input point, depending on
+ // how sharp the corner is. For more details on conservative raster, see:
+ // https://developer.nvidia.com/gpugems/GPUGems2/gpugems2_chapter42.html
+ g->codeAppendf("bool2 left_up_notequal = notEqual(leftbloat, upbloat);");
+ g->codeAppend ("if (all(left_up_notequal)) {");
+ // The top-left corner will have three conservative raster vertices.
+ // Emit the middle one first to the triangle strip.
+ g->codeAppendf( "%s(topleft, float2(-leftbloat.y, leftbloat.x));", emitVertexFn);
+ g->codeAppend ("}");
+ g->codeAppend ("if (any(left_up_notequal)) {");
+ // Second conservative raster vertex for the top-left corner.
+ g->codeAppendf( "%s(topleft, leftbloat);", emitVertexFn);
+ g->codeAppend ("}");
+
+ // Main interior body of this invocation's half of the hull.
+ g->codeAppendf("%s(topleft, upbloat);", emitVertexFn);
+ g->codeAppendf("%s(bottomleft, leftbloat);", emitVertexFn);
+ g->codeAppendf("%s(topright, upbloat);", emitVertexFn);
+
+ // Remaining two conservative raster vertices for the top-right corner.
+ g->codeAppendf("bool2 up_right_notequal = notEqual(upbloat, rightbloat);");
+ g->codeAppend ("if (any(up_right_notequal)) {");
+ g->codeAppendf( "%s(topright, rightbloat);", emitVertexFn);
+ g->codeAppend ("}");
+ g->codeAppend ("if (all(up_right_notequal)) {");
+ g->codeAppendf( "%s(topright, float2(-upbloat.y, upbloat.x));", emitVertexFn);
+ g->codeAppend ("}");
+
+ g->configure(InputType::kLines, OutputType::kTriangleStrip, 7, 2);
+ }
+};
+
+/**
+ * Generates conservative rasters around corners (aka pixel-size boxes) and calculates
+ * coverage and attenuation ramps to fix up the coverage values written by the hulls.
+ */
+class GrGSCoverageProcessor::CornerImpl : public GrGSCoverageProcessor::Impl {
+public:
+ CornerImpl(std::unique_ptr<Shader> shader) : Impl(std::move(shader)) {}
+
+ bool hasCoverage(const GrGSCoverageProcessor& proc) const override {
+ return proc.isTriangles();
+ }
+
+ void onEmitGeometryShader(const GrGSCoverageProcessor& proc, GrGLSLGeometryBuilder* g,
+ const GrShaderVar& wind, const char* emitVertexFn) const override {
+ fShader->emitSetupCode(g, "pts");
+
+ g->codeAppendf("int corneridx = sk_InvocationID;");
+ if (!proc.isTriangles()) {
+ g->codeAppendf("corneridx *= %i;", proc.numInputPoints() - 1);
+ }
+
+ g->codeAppendf("float2 corner = pts[corneridx];");
+ g->codeAppendf("float2 left = pts[(corneridx + (%s > 0 ? %i : 1)) %% %i];",
+ wind.c_str(), proc.numInputPoints() - 1, proc.numInputPoints());
+ g->codeAppendf("float2 right = pts[(corneridx + (%s > 0 ? 1 : %i)) %% %i];",
+ wind.c_str(), proc.numInputPoints() - 1, proc.numInputPoints());
+
+ g->codeAppend ("float2 leftdir = corner - left;");
+ g->codeAppend ("leftdir = (float2(0) != leftdir) ? normalize(leftdir) : float2(1, 0);");
+
+ g->codeAppend ("float2 rightdir = right - corner;");
+ g->codeAppend ("rightdir = (float2(0) != rightdir) ? normalize(rightdir) : float2(1, 0);");
+
+ // Find "outbloat" and "crossbloat" at our corner. The outbloat points diagonally out of the
+ // triangle, in the direction that should ramp to zero coverage with attenuation. The
+ // crossbloat runs perpindicular to outbloat.
+ g->codeAppend ("float2 outbloat = float2(leftdir.x > rightdir.x ? +1 : -1, "
+ "leftdir.y > rightdir.y ? +1 : -1);");
+ g->codeAppend ("float2 crossbloat = float2(-outbloat.y, +outbloat.x);");
+
+ g->codeAppend ("half attenuation; {");
+ Shader::CalcCornerAttenuation(g, "leftdir", "rightdir", "attenuation");
+ g->codeAppend ("}");
+
+ if (proc.isTriangles()) {
+ g->codeAppend ("half2 left_coverages; {");
+ Shader::CalcEdgeCoveragesAtBloatVertices(g, "left", "corner", "-outbloat",
+ "-crossbloat", "left_coverages");
+ g->codeAppend ("}");
+
+ g->codeAppend ("half2 right_coverages; {");
+ Shader::CalcEdgeCoveragesAtBloatVertices(g, "corner", "right", "-outbloat",
+ "crossbloat", "right_coverages");
+ g->codeAppend ("}");
+
+ // Emit a corner box. The first coverage argument erases the values that were written
+ // previously by the hull and edge geometry. The second pair are multiplied together by
+ // the fragment shader. They ramp to 0 with attenuation in the direction of outbloat,
+ // and linearly from left-edge coverage to right-edge coverage in the direction of
+ // crossbloat.
+ //
+ // NOTE: Since this is not a linear mapping, it is important that the box's diagonal
+ // shared edge points in the direction of outbloat.
+ g->codeAppendf("%s(corner, -crossbloat, right_coverages[1] - left_coverages[1],"
+ "half2(1 + left_coverages[1], 1));",
+ emitVertexFn);
+
+ g->codeAppendf("%s(corner, outbloat, 1 + left_coverages[0] + right_coverages[0], "
+ "half2(0, attenuation));",
+ emitVertexFn);
+
+ g->codeAppendf("%s(corner, -outbloat, -1 - left_coverages[0] - right_coverages[0], "
+ "half2(1 + left_coverages[0] + right_coverages[0], 1));",
+ emitVertexFn);
+
+ g->codeAppendf("%s(corner, crossbloat, left_coverages[1] - right_coverages[1],"
+ "half2(1 + right_coverages[1], 1));",
+ emitVertexFn);
+ } else {
+ // Curves are simpler. Setting "wind = -wind" causes the Shader to erase what it had
+ // written in the previous pass hull. Then, at each vertex of the corner box, the Shader
+ // will calculate the curve's local coverage value, interpolate it alongside our
+ // attenuation parameter, and multiply the two together for a final coverage value.
+ g->codeAppendf("%s = -%s;", wind.c_str(), wind.c_str());
+ if (!fShader->calculatesOwnEdgeCoverage()) {
+ g->codeAppendf("%s = -%s;",
+ fEdgeDistanceEquation.c_str(), fEdgeDistanceEquation.c_str());
+ }
+ g->codeAppendf("%s(corner, -crossbloat, half2(-1, 1));", emitVertexFn);
+ g->codeAppendf("%s(corner, outbloat, half2(0, attenuation));",
+ emitVertexFn);
+ g->codeAppendf("%s(corner, -outbloat, half2(-1, 1));", emitVertexFn);
+ g->codeAppendf("%s(corner, crossbloat, half2(-1, 1));", emitVertexFn);
+ }
+
+ g->configure(InputType::kLines, OutputType::kTriangleStrip, 4, proc.isTriangles() ? 3 : 2);
+ }
+};
+
+void GrGSCoverageProcessor::reset(PrimitiveType primitiveType, GrResourceProvider*) {
+ fPrimitiveType = primitiveType; // This will affect the return values for numInputPoints, etc.
+
+ if (4 == this->numInputPoints() || this->hasInputWeight()) {
+ fInputXOrYValues =
+ {"x_or_y_values", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ GR_STATIC_ASSERT(sizeof(QuadPointInstance) ==
+ 2 * GrVertexAttribTypeSize(kFloat4_GrVertexAttribType));
+ GR_STATIC_ASSERT(offsetof(QuadPointInstance, fY) ==
+ GrVertexAttribTypeSize(kFloat4_GrVertexAttribType));
+ } else {
+ fInputXOrYValues =
+ {"x_or_y_values", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ GR_STATIC_ASSERT(sizeof(TriPointInstance) ==
+ 2 * GrVertexAttribTypeSize(kFloat3_GrVertexAttribType));
+ }
+
+ this->setVertexAttributes(&fInputXOrYValues, 1);
+}
+
+void GrGSCoverageProcessor::appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, SkTArray<GrMesh>* out) const {
+ // We don't actually make instanced draw calls. Instead, we feed transposed x,y point values to
+ // the GPU in a regular vertex array and draw kLines (see initGS). Then, each vertex invocation
+ // receives either the shape's x or y values as inputs, which it forwards to the geometry
+ // shader.
+ GrMesh& mesh = out->emplace_back(GrPrimitiveType::kLines);
+ mesh.setNonIndexedNonInstanced(instanceCount * 2);
+ mesh.setVertexData(std::move(instanceBuffer), baseInstance * 2);
+}
+
+void GrGSCoverageProcessor::draw(
+ GrOpFlushState* flushState, const GrPipeline& pipeline, const SkIRect scissorRects[],
+ const GrMesh meshes[], int meshCount, const SkRect& drawBounds) const {
+ // The geometry shader impl draws primitives in two subpasses: The first pass fills the interior
+ // and does edge AA. The second pass does touch up on corner pixels.
+ for (int i = 0; i < 2; ++i) {
+ fSubpass = (Subpass) i;
+ this->GrCCCoverageProcessor::draw(
+ flushState, pipeline, scissorRects, meshes, meshCount, drawBounds);
+ }
+}
+
+GrGLSLPrimitiveProcessor* GrGSCoverageProcessor::onCreateGLSLInstance(
+ std::unique_ptr<Shader> shader) const {
+ if (Subpass::kHulls == fSubpass) {
+ return this->isTriangles()
+ ? (Impl*) new TriangleHullImpl(std::move(shader))
+ : (Impl*) new CurveHullImpl(std::move(shader));
+ }
+ SkASSERT(Subpass::kCorners == fSubpass);
+ return new CornerImpl(std::move(shader));
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.h
new file mode 100644
index 0000000000..2e3f7dc248
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrGSCoverageProcessor.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGSCoverageProcessor_DEFINED
+#define GrGSCoverageProcessor_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class implements GrCCCoverageProcessor with analytic coverage using geometry shaders.
+ */
+class GrGSCoverageProcessor : public GrCCCoverageProcessor {
+public:
+ GrGSCoverageProcessor() : GrCCCoverageProcessor(kGrGSCoverageProcessor_ClassID) {
+ this->setWillUseGeoShader();
+ }
+
+private:
+ void reset(PrimitiveType, GrResourceProvider*) override;
+
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ SkDEBUGCODE(this->getDebugBloatKey(b));
+ b->add32(((int)fPrimitiveType << 16) | (int)fSubpass);
+ }
+
+ void appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
+ SkTArray<GrMesh>* out) const override;
+
+ void draw(GrOpFlushState*, const GrPipeline&, const SkIRect scissorRects[], const GrMesh[],
+ int meshCount, const SkRect& drawBounds) const override;
+
+ GrGLSLPrimitiveProcessor* onCreateGLSLInstance(std::unique_ptr<Shader>) const override;
+
+ // The geometry shader impl draws primitives in two subpasses. The first pass fills the interior
+ // and does edge AA. The second pass does touch up on corner pixels.
+ enum class Subpass : bool {
+ kHulls,
+ kCorners
+ };
+
+ Attribute fInputXOrYValues;
+ mutable Subpass fSubpass = Subpass::kHulls;
+
+ class Impl;
+ class TriangleHullImpl;
+ class CurveHullImpl;
+ class CornerImpl;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.cpp b/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.cpp
new file mode 100644
index 0000000000..72aa6f0630
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrOctoBounds.h"
+#include <algorithm>
+
+bool GrOctoBounds::clip(const SkIRect& clipRect) {
+ // Intersect dev bounds with the clip rect.
+ float l = std::max(fBounds.left(), (float)clipRect.left());
+ float t = std::max(fBounds.top(), (float)clipRect.top());
+ float r = std::min(fBounds.right(), (float)clipRect.right());
+ float b = std::min(fBounds.bottom(), (float)clipRect.bottom());
+
+ float l45 = fBounds45.left();
+ float t45 = fBounds45.top();
+ float r45 = fBounds45.right();
+ float b45 = fBounds45.bottom();
+
+ // Check if either the bounds or 45-degree bounds are empty. We write this check as the NOT of
+ // non-empty rects, so we will return false if any values are NaN.
+ if (!(l < r && t < b && l45 < r45 && t45 < b45)) {
+ return false;
+ }
+
+ // Tighten dev bounds around the new (octagonal) intersection that results after clipping. This
+ // may be tighter now even than the clipped bounds, depending on the diagonals. Shader code that
+ // emits octagons expects both bounding boxes to circumcribe the inner octagon, and will fail if
+ // they do not.
+ if (l45 > Get_x45(r,b)) {
+ // Slide the bottom upward until it crosses the l45 diagonal at x=r.
+ // y = x + (y0 - x0)
+ // Substitute: l45 = x0 - y0
+ // y = x - l45
+ b = SkScalarPin(r - l45, t, b);
+ } else if (r45 < Get_x45(r,b)) {
+ // Slide the right side leftward until it crosses the r45 diagonal at y=b.
+ // x = y + (x0 - y0)
+ // Substitute: r45 = x0 - y0
+ // x = y + r45
+ r = SkScalarPin(b + r45, l, r);
+ }
+ if (l45 > Get_x45(l,t)) {
+ // Slide the left side rightward until it crosses the l45 diagonal at y=t.
+ // x = y + (x0 - y0)
+ // Substitute: l45 = x0 - y0
+ // x = y + l45
+ l = SkScalarPin(t + l45, l, r);
+ } else if (r45 < Get_x45(l,t)) {
+ // Slide the top downward until it crosses the r45 diagonal at x=l.
+ // y = x + (y0 - x0)
+ // Substitute: r45 = x0 - y0
+ // y = x - r45
+ t = SkScalarPin(l - r45, t, b);
+ }
+ if (t45 > Get_y45(l,b)) {
+ // Slide the left side rightward until it crosses the t45 diagonal at y=b.
+ // x = -y + (x0 + y0)
+ // Substitute: t45 = x0 + y0
+ // x = -y + t45
+ l = SkScalarPin(t45 - b, l, r);
+ } else if (b45 < Get_y45(l,b)) {
+ // Slide the bottom upward until it crosses the b45 diagonal at x=l.
+ // y = -x + (y0 + x0)
+ // Substitute: b45 = x0 + y0
+ // y = -x + b45
+ b = SkScalarPin(b45 - l, t, b);
+ }
+ if (t45 > Get_y45(r,t)) {
+ // Slide the top downward until it crosses the t45 diagonal at x=r.
+ // y = -x + (y0 + x0)
+ // Substitute: t45 = x0 + y0
+ // y = -x + t45
+ t = SkScalarPin(t45 - r, t, b);
+ } else if (b45 < Get_y45(r,t)) {
+ // Slide the right side leftward until it crosses the b45 diagonal at y=t.
+ // x = -y + (x0 + y0)
+ // Substitute: b45 = x0 + y0
+ // x = -y + b45
+ r = SkScalarPin(b45 - t, l, r);
+ }
+
+ // Tighten the 45-degree bounding box. Since the dev bounds are now fully tightened, we only
+ // have to clamp the diagonals to outer corners.
+ // NOTE: This will not cause l,t,r,b to need more insetting. We only ever change a diagonal by
+ // pinning it to a FAR corner, which, by definition, is still outside the other corners.
+ l45 = SkScalarPin(Get_x45(l,b), l45, r45);
+ t45 = SkScalarPin(Get_y45(l,t), t45, b45);
+ r45 = SkScalarPin(Get_x45(r,t), l45, r45);
+ b45 = SkScalarPin(Get_y45(r,b), t45, b45);
+
+ // Make one final check for empty or NaN bounds. If the dev bounds were clipped completely
+ // outside one of the diagonals, they will have been pinned to empty. It's also possible that
+ // some Infs crept in and turned into NaNs.
+ if (!(l < r && t < b && l45 < r45 && t45 < b45)) {
+ return false;
+ }
+
+ fBounds.setLTRB(l, t, r, b);
+ fBounds45.setLTRB(l45, t45, r45, b45);
+
+#ifdef SK_DEBUG
+ // Verify dev bounds are inside the clip rect.
+ SkASSERT(l >= (float)clipRect.left());
+ SkASSERT(t >= (float)clipRect.top());
+ SkASSERT(r <= (float)clipRect.right());
+ SkASSERT(b <= (float)clipRect.bottom());
+ this->validateBoundsAreTight();
+#endif
+
+ return true;
+}
+
+#if defined(SK_DEBUG) || defined(GR_TEST_UTILS)
+void GrOctoBounds::validateBoundsAreTight() const {
+ this->validateBoundsAreTight([](bool cond, const char* file, int line, const char* code) {
+ SkASSERTF(cond, "%s(%d): assertion failure: \"assert(%s)\"", file, line, code);
+ });
+}
+
+void GrOctoBounds::validateBoundsAreTight(const std::function<void(
+ bool cond, const char* file, int line, const char* code)>& validateFn) const {
+ // The octobounds calculated in GrCCPerFlushResources::renderShapeInAtlas use FMAs to compute
+ // M * (x,y) and T45 * M * (x,y) in parallel. This leads to a not-insignificant floating point
+ // difference between (T45 * M * (x,y)) stored in fBounds45, and T45 * (M * (x,y)) calculated
+ // here from fBounds with the Get_xy45 functions.
+ constexpr static float epsilon = 1e-2f;
+
+ float l=fBounds.left(), l45=fBounds45.left();
+ float t=fBounds.top(), t45=fBounds45.top();
+ float r=fBounds.right(), r45=fBounds45.right();
+ float b=fBounds.bottom(), b45=fBounds45.bottom();
+
+#define VALIDATE(CODE) validateFn(CODE, __FILE__, __LINE__, #CODE)
+ // Verify diagonals are inside far corners of the dev bounds.
+ VALIDATE(l45 >= Get_x45(l,b) - epsilon);
+ VALIDATE(t45 >= Get_y45(l,t) - epsilon);
+ VALIDATE(r45 <= Get_x45(r,t) + epsilon);
+ VALIDATE(b45 <= Get_y45(r,b) + epsilon);
+ // Verify verticals and horizontals are inside far corners of the 45-degree dev bounds.
+ VALIDATE(l >= Get_x(l45,t45) - epsilon);
+ VALIDATE(t >= Get_y(r45,t45) - epsilon);
+ VALIDATE(r <= Get_x(r45,b45) + epsilon);
+ VALIDATE(b <= Get_y(l45,b45) + epsilon);
+ // Verify diagonals are outside middle corners of the dev bounds.
+ VALIDATE(l45 <= Get_x45(r,b) + epsilon);
+ VALIDATE(l45 <= Get_x45(l,t) + epsilon);
+ VALIDATE(t45 <= Get_y45(l,b) + epsilon);
+ VALIDATE(t45 <= Get_y45(r,t) + epsilon);
+ VALIDATE(r45 >= Get_x45(l,t) - epsilon);
+ VALIDATE(r45 >= Get_x45(r,b) - epsilon);
+ VALIDATE(b45 >= Get_y45(r,t) - epsilon);
+ VALIDATE(b45 >= Get_y45(l,b) - epsilon);
+ // Verify verticals and horizontals are outside middle corners of the 45-degree dev bounds.
+ VALIDATE(l <= Get_x(l45,b45) + epsilon);
+ VALIDATE(l <= Get_x(r45,t45) + epsilon);
+ VALIDATE(t <= Get_y(r45,b45) + epsilon);
+ VALIDATE(t <= Get_y(l45,t45) + epsilon);
+ VALIDATE(r >= Get_x(r45,t45) - epsilon);
+ VALIDATE(r >= Get_x(l45,b45) - epsilon);
+ VALIDATE(b >= Get_y(l45,t45) - epsilon);
+ VALIDATE(b >= Get_y(r45,b45) - epsilon);
+#undef VALIDATE
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.h b/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.h
new file mode 100644
index 0000000000..d2272ae005
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrOctoBounds.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOctoBounds_DEFINED
+#define GrOctoBounds_DEFINED
+
+#include "include/core/SkRect.h"
+#include <functional>
+
+/**
+ * This class is composed of two bounding boxes: one in device space, and one in a 45-degree rotated
+ * space.
+ *
+ * The 45-degree bounding box resides in "| 1 -1 | * coords" space.
+ * | 1 1 |
+ *
+ * The intersection of these two boxes defines the bounding octagon of a shape.
+ *
+ * Furthermore, both bounding boxes are fully tightened. This means we can blindly find the
+ * intersections between each diagonal and its vertical and horizontal neighbors, and be left with
+ * 8 points that define a convex (possibly degenerate) octagon.
+ */
+class GrOctoBounds {
+public:
+ GrOctoBounds() = default;
+ GrOctoBounds(const SkRect& bounds, const SkRect& bounds45) {
+ this->set(bounds, bounds45);
+ }
+
+ void set(const SkRect& bounds, const SkRect& bounds45) {
+ fBounds = bounds;
+ fBounds45 = bounds45;
+ SkDEBUGCODE(this->validateBoundsAreTight());
+ }
+
+ bool operator==(const GrOctoBounds& that) const {
+ return fBounds == that.fBounds && fBounds45 == that.fBounds45;
+ }
+ bool operator!=(const GrOctoBounds& that) const { return !(*this == that); }
+
+ const SkRect& bounds() const { return fBounds; }
+ float left() const { return fBounds.left(); }
+ float top() const { return fBounds.top(); }
+ float right() const { return fBounds.right(); }
+ float bottom() const { return fBounds.bottom(); }
+
+
+ // The 45-degree bounding box resides in "| 1 -1 | * coords" space.
+ // | 1 1 |
+ const SkRect& bounds45() const { return fBounds45; }
+ float left45() const { return fBounds45.left(); }
+ float top45() const { return fBounds45.top(); }
+ float right45() const { return fBounds45.right(); }
+ float bottom45() const { return fBounds45.bottom(); }
+
+ void roundOut(SkIRect* out) const {
+ // The octagon is the intersection of fBounds and fBounds45 (see the comment at the start of
+ // the class). The octagon's bounding box is therefore just fBounds. And the integer
+ // bounding box can be found by simply rounding out fBounds.
+ fBounds.roundOut(out);
+ }
+
+ GrOctoBounds makeOffset(float dx, float dy) const {
+ GrOctoBounds offset;
+ offset.setOffset(*this, dx, dy);
+ return offset;
+ }
+
+ void setOffset(const GrOctoBounds& octoBounds, float dx, float dy) {
+ fBounds = octoBounds.fBounds.makeOffset(dx, dy);
+ fBounds45 = octoBounds.fBounds45.makeOffset(dx - dy, dx + dy);
+ SkDEBUGCODE(this->validateBoundsAreTight());
+ }
+
+ void outset(float radius) {
+ fBounds.outset(radius, radius);
+ fBounds45.outset(radius*SK_ScalarSqrt2, radius*SK_ScalarSqrt2);
+ SkDEBUGCODE(this->validateBoundsAreTight());
+ }
+
+ // Clips the octo bounds by a clip rect and ensures the resulting bounds are fully tightened.
+ // Returns false if the octagon and clipRect do not intersect at all.
+ //
+ // NOTE: Does not perform a trivial containment test before the clip routine. It is probably a
+ // good idea to not call this method if 'this->bounds()' are fully contained within 'clipRect'.
+ bool SK_WARN_UNUSED_RESULT clip(const SkIRect& clipRect);
+
+ // The 45-degree bounding box resides in "| 1 -1 | * coords" space.
+ // | 1 1 |
+ //
+ // i.e., | x45 | = | x - y |
+ // | y45 | = | x + y |
+ //
+ // These methods transform points between device space and 45-degree space.
+ constexpr static float Get_x45(float x, float y) { return x - y; }
+ constexpr static float Get_y45(float x, float y) { return x + y; }
+ constexpr static float Get_x(float x45, float y45) { return (x45 + y45) * .5f; }
+ constexpr static float Get_y(float x45, float y45) { return (y45 - x45) * .5f; }
+
+#if defined(SK_DEBUG) || defined(GR_TEST_UTILS)
+ void validateBoundsAreTight() const;
+ void validateBoundsAreTight(const std::function<void(
+ bool cond, const char* file, int line, const char* code)>& validateFn) const;
+#endif
+
+private:
+ SkRect fBounds;
+ SkRect fBounds45;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.cpp
new file mode 100644
index 0000000000..41da1f9bf7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrSampleMaskProcessor.h"
+
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+class GrSampleMaskProcessor::Impl : public GrGLSLGeometryProcessor {
+public:
+ Impl(std::unique_ptr<Shader> shader) : fShader(std::move(shader)) {}
+
+private:
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) override {}
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ const std::unique_ptr<Shader> fShader;
+};
+
+void GrSampleMaskProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const GrSampleMaskProcessor& proc = args.fGP.cast<GrSampleMaskProcessor>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ int numInputPoints = proc.numInputPoints();
+ int inputWidth = (4 == numInputPoints || proc.hasInputWeight()) ? 4 : 3;
+
+ varyingHandler->emitAttributes(proc);
+ SkASSERT(!args.fFPCoordTransformHandler->nextCoordTransform());
+
+ if (PrimitiveType::kTriangles == proc.fPrimitiveType) {
+ SkASSERT(!proc.hasInstanceAttributes()); // Triangles are drawn with vertex arrays.
+ gpArgs->fPositionVar = proc.fInputAttribs.front().asShaderVar();
+ } else {
+ SkASSERT(!proc.hasVertexAttributes()); // Curves are drawn with instanced rendering.
+
+ // Shaders expect a global "bloat" variable when calculating gradients.
+ v->defineConstant("half", "bloat", ".5");
+
+ const char* swizzle = (4 == numInputPoints || proc.hasInputWeight()) ? "xyzw" : "xyz";
+ v->codeAppendf("float%ix2 pts = transpose(float2x%i(X.%s, Y.%s));",
+ inputWidth, inputWidth, swizzle, swizzle);
+
+ const char* hullPts = "pts";
+ fShader->emitSetupCode(v, "pts", &hullPts);
+ v->codeAppendf("float2 vertexpos = %s[sk_VertexID ^ (sk_VertexID >> 1)];", hullPts);
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "vertexpos");
+
+ fShader->emitVaryings(varyingHandler, GrGLSLVarying::Scope::kVertToFrag,
+ &AccessCodeString(v), "vertexpos", nullptr, nullptr, nullptr);
+ }
+
+ // Fragment shader.
+ fShader->emitSampleMaskCode(args.fFragBuilder);
+}
+
+void GrSampleMaskProcessor::reset(PrimitiveType primitiveType, GrResourceProvider* rp) {
+ fPrimitiveType = primitiveType; // This will affect the return values for numInputPoints, etc.
+ SkASSERT(PrimitiveType::kWeightedTriangles != fPrimitiveType);
+
+ this->resetCustomFeatures();
+ fInputAttribs.reset();
+
+ switch (fPrimitiveType) {
+ case PrimitiveType::kTriangles:
+ case PrimitiveType::kWeightedTriangles:
+ fInputAttribs.emplace_back("point", kFloat2_GrVertexAttribType, kFloat2_GrSLType);
+ this->setVertexAttributes(fInputAttribs.begin(), 1);
+ this->setInstanceAttributes(nullptr, 0);
+ break;
+ case PrimitiveType::kQuadratics:
+ case PrimitiveType::kCubics:
+ case PrimitiveType::kConics: {
+ auto instanceAttribType = (PrimitiveType::kQuadratics == fPrimitiveType)
+ ? kFloat3_GrVertexAttribType : kFloat4_GrVertexAttribType;
+ auto shaderVarType = (PrimitiveType::kQuadratics == fPrimitiveType)
+ ? kFloat3_GrSLType : kFloat4_GrSLType;
+ fInputAttribs.emplace_back("X", instanceAttribType, shaderVarType);
+ fInputAttribs.emplace_back("Y", instanceAttribType, shaderVarType);
+ this->setVertexAttributes(nullptr, 0);
+ this->setInstanceAttributes(fInputAttribs.begin(), fInputAttribs.count());
+ this->setWillUseCustomFeature(CustomFeatures::kSampleLocations);
+ break;
+ }
+ }
+}
+
+void GrSampleMaskProcessor::appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, SkTArray<GrMesh>* out) const {
+ SkASSERT(PrimitiveType::kWeightedTriangles != fPrimitiveType);
+
+ switch (fPrimitiveType) {
+ case PrimitiveType::kTriangles:
+ case PrimitiveType::kWeightedTriangles: {
+ GrMesh& mesh = out->emplace_back(GrPrimitiveType::kTriangles);
+ mesh.setNonIndexedNonInstanced(instanceCount * 3);
+ mesh.setVertexData(std::move(instanceBuffer), baseInstance * 3);
+ break;
+ }
+ case PrimitiveType::kQuadratics:
+ case PrimitiveType::kCubics:
+ case PrimitiveType::kConics: {
+ GrMesh& mesh = out->emplace_back(GrPrimitiveType::kTriangleStrip);
+ mesh.setInstanced(std::move(instanceBuffer), instanceCount, baseInstance, 4);
+ break;
+ }
+ }
+}
+
+GrGLSLPrimitiveProcessor* GrSampleMaskProcessor::onCreateGLSLInstance(
+ std::unique_ptr<Shader> shader) const {
+ return new Impl(std::move(shader));
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.h
new file mode 100644
index 0000000000..3fdd96f5e8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrSampleMaskProcessor.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSampleMaskProcessor_DEFINED
+#define GrSampleMaskProcessor_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class implements GrCCCoverageProcessor with MSAA using the sample mask.
+ */
+class GrSampleMaskProcessor : public GrCCCoverageProcessor {
+public:
+ GrSampleMaskProcessor() : GrCCCoverageProcessor(kGrSampleMaskProcessor_ClassID) {}
+
+private:
+ void reset(PrimitiveType, GrResourceProvider*) override;
+
+ void appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
+ SkTArray<GrMesh>* out) const override;
+
+ GrGLSLPrimitiveProcessor* onCreateGLSLInstance(std::unique_ptr<Shader>) const override;
+
+ SkSTArray<2, Attribute> fInputAttribs;
+
+ class Impl;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.cpp b/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.cpp
new file mode 100644
index 0000000000..c7ec6da90c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrStencilAtlasOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/ccpr/GrCCPerFlushResources.h"
+#include "src/gpu/ccpr/GrSampleMaskProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+namespace {
+
+class StencilResolveProcessor : public GrGeometryProcessor {
+public:
+ StencilResolveProcessor() : GrGeometryProcessor(kStencilResolveProcessor_ClassID) {
+ static constexpr Attribute kIBounds = {
+ "ibounds", kShort4_GrVertexAttribType, kShort4_GrSLType};
+ this->setInstanceAttributes(&kIBounds, 1);
+ SkASSERT(this->instanceStride() == sizeof(GrStencilAtlasOp::ResolveRectInstance));
+ }
+
+private:
+ const char* name() const override { return "GrCCPathProcessor"; }
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+ class Impl;
+};
+
+// This processor draws pixel-aligned rectangles directly on top of every path in the atlas.
+// The caller should have set up the instance data such that "Nonzero" paths get clockwise
+// rectangles (l < r) and "even/odd" paths get counter-clockwise (r < l). Its purpose
+// is to convert winding counts in the stencil buffer to A8 coverage in the color buffer.
+class StencilResolveProcessor::Impl : public GrGLSLGeometryProcessor {
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ args.fVaryingHandler->emitAttributes(args.fGP.cast<StencilResolveProcessor>());
+
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ v->codeAppendf("short2 devcoord;");
+ v->codeAppendf("devcoord.x = (0 == (sk_VertexID & 1)) ? ibounds.x : ibounds.z;");
+ v->codeAppendf("devcoord.y = (sk_VertexID < 2) ? ibounds.y : ibounds.w;");
+
+ v->codeAppendf("float2 atlascoord = float2(devcoord);");
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "atlascoord");
+
+ // Just output "1" for coverage. This will be modulated by the MSAA stencil test.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+ f->codeAppendf("%s = %s = half4(1);", args.fOutputColor, args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) override {}
+};
+
+GrGLSLPrimitiveProcessor* StencilResolveProcessor::createGLSLInstance(const GrShaderCaps&) const {
+ return new Impl();
+}
+
+}
+
+std::unique_ptr<GrDrawOp> GrStencilAtlasOp::Make(
+ GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
+ FillBatchID fillBatchID, StrokeBatchID strokeBatchID, int baseStencilResolveInstance,
+ int endStencilResolveInstance, const SkISize& drawBounds) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrStencilAtlasOp>(
+ std::move(resources), fillBatchID, strokeBatchID, baseStencilResolveInstance,
+ endStencilResolveInstance, drawBounds);
+}
+
+// Increments clockwise triangles and decrements counterclockwise. We use the same incr/decr
+// settings regardless of fill rule; fill rule is accounted for during the resolve step.
+static constexpr GrUserStencilSettings kIncrDecrStencil(
+ GrUserStencilSettings::StaticInitSeparate<
+ 0x0000, 0x0000,
+ GrUserStencilTest::kNever, GrUserStencilTest::kNever,
+ 0xffff, 0xffff,
+ GrUserStencilOp::kIncWrap, GrUserStencilOp::kDecWrap,
+ GrUserStencilOp::kIncWrap, GrUserStencilOp::kDecWrap,
+ 0xffff, 0xffff>()
+);
+
+// Resolves stencil winding counts to A8 coverage. Leaves stencil values untouched.
+static constexpr GrUserStencilSettings kResolveStencilCoverage(
+ GrUserStencilSettings::StaticInitSeparate<
+ 0x0000, 0x0000,
+ GrUserStencilTest::kNotEqual, GrUserStencilTest::kNotEqual,
+ 0xffff, 0x1,
+ GrUserStencilOp::kKeep, GrUserStencilOp::kKeep,
+ GrUserStencilOp::kKeep, GrUserStencilOp::kKeep,
+ 0xffff, 0xffff>()
+);
+
+// Same as above, but also resets stencil values to zero. This is better for non-tilers
+// where we prefer to not clear the stencil buffer at the beginning of every render pass.
+static constexpr GrUserStencilSettings kResolveStencilCoverageAndReset(
+ GrUserStencilSettings::StaticInitSeparate<
+ 0x0000, 0x0000,
+ GrUserStencilTest::kNotEqual, GrUserStencilTest::kNotEqual,
+ 0xffff, 0x1,
+ GrUserStencilOp::kZero, GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep, GrUserStencilOp::kKeep,
+ 0xffff, 0xffff>()
+);
+
+void GrStencilAtlasOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ SkIRect drawBoundsRect = SkIRect::MakeWH(fDrawBounds.width(), fDrawBounds.height());
+
+ GrPipeline pipeline(
+ GrScissorTest::kEnabled, GrDisableColorXPFactory::MakeXferProcessor(),
+ flushState->drawOpArgs().outputSwizzle(), GrPipeline::InputFlags::kHWAntialias,
+ &kIncrDecrStencil);
+
+ GrSampleMaskProcessor sampleMaskProc;
+
+ fResources->filler().drawFills(
+ flushState, &sampleMaskProc, pipeline, fFillBatchID, drawBoundsRect);
+
+ fResources->stroker().drawStrokes(
+ flushState, &sampleMaskProc, fStrokeBatchID, drawBoundsRect);
+
+ // We resolve the stencil coverage to alpha by drawing pixel-aligned boxes. Fine raster is
+ // not necessary, and will even cause artifacts if using mixed samples.
+ constexpr auto noHWAA = GrPipeline::InputFlags::kNone;
+
+ const auto* stencilResolveSettings = (flushState->caps().discardStencilValuesAfterRenderPass())
+ // The next draw will be the final op in the renderTargetContext. So if Ganesh is
+ // planning to discard the stencil values anyway, we don't actually need to reset them
+ // back to zero.
+ ? &kResolveStencilCoverage
+ : &kResolveStencilCoverageAndReset;
+
+ GrPipeline resolvePipeline(GrScissorTest::kEnabled, SkBlendMode::kSrc,
+ flushState->drawOpArgs().outputSwizzle(), noHWAA,
+ stencilResolveSettings);
+ GrPipeline::FixedDynamicState scissorRectState(drawBoundsRect);
+
+ GrMesh mesh(GrPrimitiveType::kTriangleStrip);
+ mesh.setInstanced(fResources->refStencilResolveBuffer(),
+ fEndStencilResolveInstance - fBaseStencilResolveInstance,
+ fBaseStencilResolveInstance, 4);
+
+ StencilResolveProcessor primProc;
+
+ GrProgramInfo programInfo(flushState->drawOpArgs().numSamples(),
+ flushState->drawOpArgs().origin(),
+ resolvePipeline,
+ primProc,
+ &scissorRectState,
+ nullptr, 0);
+
+ flushState->opsRenderPass()->draw(programInfo, &mesh, 1, SkRect::Make(drawBoundsRect));
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.h b/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.h
new file mode 100644
index 0000000000..8c1f4fbddd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrStencilAtlasOp.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStencilAtlasOp_DEFINED
+#define GrStencilAtlasOp_DEFINED
+
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/ccpr/GrCCFiller.h"
+#include "src/gpu/ccpr/GrCCStroker.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrCCPerFlushResources;
+
+// Renders literal A8 coverage to a CCPR atlas using an intermediate MSAA stencil buffer.
+class GrStencilAtlasOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ using FillBatchID = GrCCFiller::BatchID;
+ using StrokeBatchID = GrCCStroker::BatchID;
+
+ // Once all the paths in an atlas have been drawn to the stencil buffer, we make a final pass
+ // where we draw "resolve" rects over each path whose purpose is to convert winding counts to A8
+ // coverage.
+ struct ResolveRectInstance {
+ int16_t l, t, r, b;
+ };
+
+ // GrDrawOp interface.
+ const char* name() const override { return "StencilAtlasOp (CCPR)"; }
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ return FixedFunctionFlags::kUsesHWAA | FixedFunctionFlags::kUsesStencil;
+ }
+
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override {
+ return GrProcessorSet::EmptySetAnalysis();
+ }
+ CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
+ // We will only make multiple copy ops if they have different source proxies.
+ // TODO: make use of texture chaining.
+ return CombineResult::kCannotCombine;
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+ static std::unique_ptr<GrDrawOp> Make(
+ GrRecordingContext*, sk_sp<const GrCCPerFlushResources>, FillBatchID, StrokeBatchID,
+ int baseStencilResolveInstance, int endStencilResolveInstance,
+ const SkISize& drawBounds);
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override;
+
+private:
+ friend class ::GrOpMemoryPool; // for ctor
+
+ GrStencilAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
+ StrokeBatchID strokeBatchID, int baseStencilResolveInstance,
+ int endStencilResolveInstance, const SkISize& drawBounds)
+ : GrDrawOp(ClassID())
+ , fResources(std::move(resources))
+ , fFillBatchID(fillBatchID)
+ , fStrokeBatchID(strokeBatchID)
+ , fBaseStencilResolveInstance(baseStencilResolveInstance)
+ , fEndStencilResolveInstance(endStencilResolveInstance)
+ , fDrawBounds(drawBounds) {
+ this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
+ GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo);
+ }
+
+ const sk_sp<const GrCCPerFlushResources> fResources;
+ const FillBatchID fFillBatchID;
+ const StrokeBatchID fStrokeBatchID;
+ const int fBaseStencilResolveInstance;
+ const int fEndStencilResolveInstance;
+ const SkISize fDrawBounds;
+ int fResolveBaseVertex;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.cpp b/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.cpp
new file mode 100644
index 0000000000..ae5cfca504
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.cpp
@@ -0,0 +1,553 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ccpr/GrVSCoverageProcessor.h"
+
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+// This class implements the coverage processor with vertex shaders.
+class GrVSCoverageProcessor::Impl : public GrGLSLGeometryProcessor {
+public:
+ Impl(std::unique_ptr<Shader> shader, int numSides)
+ : fShader(std::move(shader)), fNumSides(numSides) {}
+
+private:
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) final {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ const std::unique_ptr<Shader> fShader;
+ const int fNumSides;
+};
+
+static constexpr int kInstanceAttribIdx_X = 0; // Transposed X values of all input points.
+static constexpr int kInstanceAttribIdx_Y = 1; // Transposed Y values of all input points.
+
+// Vertex data tells the shader how to offset vertices for conservative raster, as well as how to
+// calculate coverage values for corners and edges.
+static constexpr int kVertexData_LeftNeighborIdShift = 10;
+static constexpr int kVertexData_RightNeighborIdShift = 8;
+static constexpr int kVertexData_BloatIdxShift = 6;
+static constexpr int kVertexData_InvertNegativeCoverageBit = 1 << 5;
+static constexpr int kVertexData_IsCornerBit = 1 << 4;
+static constexpr int kVertexData_IsEdgeBit = 1 << 3;
+static constexpr int kVertexData_IsHullBit = 1 << 2;
+
+static constexpr int32_t pack_vertex_data(int32_t leftNeighborID, int32_t rightNeighborID,
+ int32_t bloatIdx, int32_t cornerID,
+ int32_t extraData = 0) {
+ return (leftNeighborID << kVertexData_LeftNeighborIdShift) |
+ (rightNeighborID << kVertexData_RightNeighborIdShift) |
+ (bloatIdx << kVertexData_BloatIdxShift) |
+ cornerID | extraData;
+}
+
+static constexpr int32_t hull_vertex_data(int32_t cornerID, int32_t bloatIdx, int n) {
+ return pack_vertex_data((cornerID + n - 1) % n, (cornerID + 1) % n, bloatIdx, cornerID,
+ kVertexData_IsHullBit);
+}
+
+static constexpr int32_t edge_vertex_data(int32_t edgeID, int32_t endptIdx, int32_t bloatIdx,
+ int n) {
+ return pack_vertex_data(0 == endptIdx ? (edgeID + 1) % n : edgeID,
+ 0 == endptIdx ? (edgeID + 1) % n : edgeID,
+ bloatIdx, 0 == endptIdx ? edgeID : (edgeID + 1) % n,
+ kVertexData_IsEdgeBit |
+ (!endptIdx ? kVertexData_InvertNegativeCoverageBit : 0));
+}
+
+static constexpr int32_t corner_vertex_data(int32_t leftID, int32_t cornerID, int32_t rightID,
+ int32_t bloatIdx) {
+ return pack_vertex_data(leftID, rightID, bloatIdx, cornerID, kVertexData_IsCornerBit);
+}
+
+static constexpr int32_t kTriangleVertices[] = {
+ hull_vertex_data(0, 0, 3),
+ hull_vertex_data(0, 1, 3),
+ hull_vertex_data(0, 2, 3),
+ hull_vertex_data(1, 0, 3),
+ hull_vertex_data(1, 1, 3),
+ hull_vertex_data(1, 2, 3),
+ hull_vertex_data(2, 0, 3),
+ hull_vertex_data(2, 1, 3),
+ hull_vertex_data(2, 2, 3),
+
+ edge_vertex_data(0, 0, 0, 3),
+ edge_vertex_data(0, 0, 1, 3),
+ edge_vertex_data(0, 0, 2, 3),
+ edge_vertex_data(0, 1, 0, 3),
+ edge_vertex_data(0, 1, 1, 3),
+ edge_vertex_data(0, 1, 2, 3),
+
+ edge_vertex_data(1, 0, 0, 3),
+ edge_vertex_data(1, 0, 1, 3),
+ edge_vertex_data(1, 0, 2, 3),
+ edge_vertex_data(1, 1, 0, 3),
+ edge_vertex_data(1, 1, 1, 3),
+ edge_vertex_data(1, 1, 2, 3),
+
+ edge_vertex_data(2, 0, 0, 3),
+ edge_vertex_data(2, 0, 1, 3),
+ edge_vertex_data(2, 0, 2, 3),
+ edge_vertex_data(2, 1, 0, 3),
+ edge_vertex_data(2, 1, 1, 3),
+ edge_vertex_data(2, 1, 2, 3),
+
+ corner_vertex_data(2, 0, 1, 0),
+ corner_vertex_data(2, 0, 1, 1),
+ corner_vertex_data(2, 0, 1, 2),
+ corner_vertex_data(2, 0, 1, 3),
+
+ corner_vertex_data(0, 1, 2, 0),
+ corner_vertex_data(0, 1, 2, 1),
+ corner_vertex_data(0, 1, 2, 2),
+ corner_vertex_data(0, 1, 2, 3),
+
+ corner_vertex_data(1, 2, 0, 0),
+ corner_vertex_data(1, 2, 0, 1),
+ corner_vertex_data(1, 2, 0, 2),
+ corner_vertex_data(1, 2, 0, 3),
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gTriangleVertexBufferKey);
+
+static constexpr uint16_t kRestartStrip = 0xffff;
+
+static constexpr uint16_t kTriangleIndicesAsStrips[] = {
+ 1, 2, 0, 3, 8, kRestartStrip, // First corner and main body of the hull.
+ 4, 5, 3, 6, 8, 7, kRestartStrip, // Opposite side and corners of the hull.
+ 10, 9, 11, 14, 12, 13, kRestartStrip, // First edge.
+ 16, 15, 17, 20, 18, 19, kRestartStrip, // Second edge.
+ 22, 21, 23, 26, 24, 25, kRestartStrip, // Third edge.
+ 28, 27, 29, 30, kRestartStrip, // First corner.
+ 32, 31, 33, 34, kRestartStrip, // Second corner.
+ 36, 35, 37, 38 // Third corner.
+};
+
+static constexpr uint16_t kTriangleIndicesAsTris[] = {
+ // First corner and main body of the hull.
+ 1, 2, 0,
+ 2, 3, 0,
+ 0, 3, 8, // Main body.
+
+ // Opposite side and corners of the hull.
+ 4, 5, 3,
+ 5, 6, 3,
+ 3, 6, 8,
+ 6, 7, 8,
+
+ // First edge.
+ 10, 9, 11,
+ 9, 14, 11,
+ 11, 14, 12,
+ 14, 13, 12,
+
+ // Second edge.
+ 16, 15, 17,
+ 15, 20, 17,
+ 17, 20, 18,
+ 20, 19, 18,
+
+ // Third edge.
+ 22, 21, 23,
+ 21, 26, 23,
+ 23, 26, 24,
+ 26, 25, 24,
+
+ // First corner.
+ 28, 27, 29,
+ 27, 30, 29,
+
+ // Second corner.
+ 32, 31, 33,
+ 31, 34, 33,
+
+ // Third corner.
+ 36, 35, 37,
+ 35, 38, 37,
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gTriangleIndexBufferKey);
+
+// Curves, including quadratics, are drawn with a four-sided hull.
+static constexpr int32_t kCurveVertices[] = {
+ hull_vertex_data(0, 0, 4),
+ hull_vertex_data(0, 1, 4),
+ hull_vertex_data(0, 2, 4),
+ hull_vertex_data(1, 0, 4),
+ hull_vertex_data(1, 1, 4),
+ hull_vertex_data(1, 2, 4),
+ hull_vertex_data(2, 0, 4),
+ hull_vertex_data(2, 1, 4),
+ hull_vertex_data(2, 2, 4),
+ hull_vertex_data(3, 0, 4),
+ hull_vertex_data(3, 1, 4),
+ hull_vertex_data(3, 2, 4),
+
+ corner_vertex_data(3, 0, 1, 0),
+ corner_vertex_data(3, 0, 1, 1),
+ corner_vertex_data(3, 0, 1, 2),
+ corner_vertex_data(3, 0, 1, 3),
+
+ corner_vertex_data(2, 3, 0, 0),
+ corner_vertex_data(2, 3, 0, 1),
+ corner_vertex_data(2, 3, 0, 2),
+ corner_vertex_data(2, 3, 0, 3),
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gCurveVertexBufferKey);
+
+static constexpr uint16_t kCurveIndicesAsStrips[] = {
+ 1, 0, 2, 11, 3, 5, 4, kRestartStrip, // First half of the hull (split diagonally).
+ 7, 6, 8, 5, 9, 11, 10, kRestartStrip, // Second half of the hull.
+ 13, 12, 14, 15, kRestartStrip, // First corner.
+ 17, 16, 18, 19 // Final corner.
+};
+
+static constexpr uint16_t kCurveIndicesAsTris[] = {
+ // First half of the hull (split diagonally).
+ 1, 0, 2,
+ 0, 11, 2,
+ 2, 11, 3,
+ 11, 5, 3,
+ 3, 5, 4,
+
+ // Second half of the hull.
+ 7, 6, 8,
+ 6, 5, 8,
+ 8, 5, 9,
+ 5, 11, 9,
+ 9, 11, 10,
+
+ // First corner.
+ 13, 12, 14,
+ 12, 15, 14,
+
+ // Final corner.
+ 17, 16, 18,
+ 16, 19, 18,
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gCurveIndexBufferKey);
+
+// Generates a conservative raster hull around a triangle or curve. For triangles we generate
+// additional conservative rasters with coverage ramps around the edges and corners.
+//
+// Triangles are drawn in three steps: (1) Draw a conservative raster of the entire triangle, with a
+// coverage of +1. (2) Draw conservative rasters around each edge, with a coverage ramp from -1 to
+// 0. These edge coverage values convert jagged conservative raster edges into smooth, antialiased
+// ones. (3) Draw conservative rasters (aka pixel-size boxes) around each corner, replacing the
+// previous coverage values with ones that ramp to zero in the bloat vertices that fall outside the
+// triangle.
+//
+// Curve shaders handle the opposite edge and corners on their own. For curves we just generate a
+// conservative raster here and the shader does the rest.
+void GrVSCoverageProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const GrVSCoverageProcessor& proc = args.fGP.cast<GrVSCoverageProcessor>();
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ int numInputPoints = proc.numInputPoints();
+
+ int inputWidth = (4 == numInputPoints || proc.hasInputWeight()) ? 4 : 3;
+ const char* swizzle = (4 == inputWidth) ? "xyzw" : "xyz";
+ v->codeAppendf("float%ix2 pts = transpose(float2x%i(%s.%s, %s.%s));", inputWidth, inputWidth,
+ proc.fInputXAndYValues[kInstanceAttribIdx_X].name(), swizzle,
+ proc.fInputXAndYValues[kInstanceAttribIdx_Y].name(), swizzle);
+
+ v->codeAppend ("half wind;");
+ Shader::CalcWind(proc, v, "pts", "wind");
+ if (PrimitiveType::kWeightedTriangles == proc.fPrimitiveType) {
+ SkASSERT(3 == numInputPoints);
+ SkASSERT(kFloat4_GrVertexAttribType ==
+ proc.fInputXAndYValues[kInstanceAttribIdx_X].cpuType());
+ v->codeAppendf("wind *= half(%s.w);",
+ proc.fInputXAndYValues[kInstanceAttribIdx_X].name());
+ }
+
+ float bloat = kAABloatRadius;
+#ifdef SK_DEBUG
+ if (proc.debugBloatEnabled()) {
+ bloat *= proc.debugBloat();
+ }
+#endif
+ v->defineConstant("bloat", bloat);
+
+ const char* hullPts = "pts";
+ fShader->emitSetupCode(v, "pts", (4 == fNumSides) ? &hullPts : nullptr);
+
+ // Reverse all indices if the wind is counter-clockwise: [0, 1, 2] -> [2, 1, 0].
+ v->codeAppendf("int clockwise_indices = wind > 0 ? %s : 0x%x - %s;",
+ proc.fPerVertexData.name(),
+ ((fNumSides - 1) << kVertexData_LeftNeighborIdShift) |
+ ((fNumSides - 1) << kVertexData_RightNeighborIdShift) |
+ (((1 << kVertexData_RightNeighborIdShift) - 1) ^ 3) |
+ (fNumSides - 1),
+ proc.fPerVertexData.name());
+
+ // Here we generate conservative raster geometry for the input polygon. It is the convex
+ // hull of N pixel-size boxes, one centered on each the input points. Each corner has three
+ // vertices, where one or two may cause degenerate triangles. The vertex data tells us how
+ // to offset each vertex. Triangle edges and corners are also handled here using the same
+ // concept. For more details on conservative raster, see:
+ // https://developer.nvidia.com/gpugems/GPUGems2/gpugems2_chapter42.html
+ v->codeAppendf("float2 corner = %s[clockwise_indices & 3];", hullPts);
+ v->codeAppendf("float2 left = %s[clockwise_indices >> %i];",
+ hullPts, kVertexData_LeftNeighborIdShift);
+ v->codeAppendf("float2 right = %s[(clockwise_indices >> %i) & 3];",
+ hullPts, kVertexData_RightNeighborIdShift);
+
+ v->codeAppend ("float2 leftbloat = sign(corner - left);");
+ v->codeAppend ("leftbloat = float2(0 != leftbloat.y ? leftbloat.y : leftbloat.x, "
+ "0 != leftbloat.x ? -leftbloat.x : -leftbloat.y);");
+
+ v->codeAppend ("float2 rightbloat = sign(right - corner);");
+ v->codeAppend ("rightbloat = float2(0 != rightbloat.y ? rightbloat.y : rightbloat.x, "
+ "0 != rightbloat.x ? -rightbloat.x : -rightbloat.y);");
+
+ v->codeAppend ("bool2 left_right_notequal = notEqual(leftbloat, rightbloat);");
+
+ v->codeAppend ("float2 bloatdir = leftbloat;");
+
+ v->codeAppend ("float2 leftdir = corner - left;");
+ v->codeAppend ("leftdir = (float2(0) != leftdir) ? normalize(leftdir) : float2(1, 0);");
+
+ v->codeAppend ("float2 rightdir = right - corner;");
+ v->codeAppend ("rightdir = (float2(0) != rightdir) ? normalize(rightdir) : float2(1, 0);");
+
+ v->codeAppendf("if (0 != (%s & %i)) {", // Are we a corner?
+ proc.fPerVertexData.name(), kVertexData_IsCornerBit);
+
+ // In corner boxes, all 4 coverage values will not map linearly.
+ // Therefore it is important to align the box so its diagonal shared
+ // edge points out of the triangle, in the direction that ramps to 0.
+ v->codeAppend ( "bloatdir = float2(leftdir.x > rightdir.x ? +1 : -1, "
+ "leftdir.y > rightdir.y ? +1 : -1);");
+
+ // For corner boxes, we hack left_right_notequal to always true. This
+ // in turn causes the upcoming code to always rotate, generating all
+ // 4 vertices of the corner box.
+ v->codeAppendf( "left_right_notequal = bool2(true);");
+ v->codeAppend ("}");
+
+ // At each corner of the polygon, our hull will have either 1, 2, or 3 vertices (or 4 if
+ // it's a corner box). We begin with this corner's first raster vertex (leftbloat), then
+ // continue rotating 90 degrees clockwise until we reach the desired raster vertex for this
+ // invocation. Corners with less than 3 corresponding raster vertices will result in
+ // redundant vertices and degenerate triangles.
+ v->codeAppendf("int bloatidx = (%s >> %i) & 3;", proc.fPerVertexData.name(),
+ kVertexData_BloatIdxShift);
+ v->codeAppend ("switch (bloatidx) {");
+ v->codeAppend ( "case 3:");
+ // Only corners will have bloatidx=3, and corners always rotate.
+ v->codeAppend ( "bloatdir = float2(-bloatdir.y, +bloatdir.x);"); // 90 deg CW.
+ // fallthru.
+ v->codeAppend ( "case 2:");
+ v->codeAppendf( "if (all(left_right_notequal)) {");
+ v->codeAppend ( "bloatdir = float2(-bloatdir.y, +bloatdir.x);"); // 90 deg CW.
+ v->codeAppend ( "}");
+ // fallthru.
+ v->codeAppend ( "case 1:");
+ v->codeAppendf( "if (any(left_right_notequal)) {");
+ v->codeAppend ( "bloatdir = float2(-bloatdir.y, +bloatdir.x);"); // 90 deg CW.
+ v->codeAppend ( "}");
+ // fallthru.
+ v->codeAppend ("}");
+
+ v->codeAppend ("float2 vertexpos = fma(bloatdir, float2(bloat), corner);");
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "vertexpos");
+
+ // Hulls have a coverage of +1 all around.
+ v->codeAppend ("half coverage = +1;");
+
+ if (3 == fNumSides) {
+ v->codeAppend ("half left_coverage; {");
+ Shader::CalcEdgeCoverageAtBloatVertex(v, "left", "corner", "bloatdir", "left_coverage");
+ v->codeAppend ("}");
+
+ v->codeAppend ("half right_coverage; {");
+ Shader::CalcEdgeCoverageAtBloatVertex(v, "corner", "right", "bloatdir", "right_coverage");
+ v->codeAppend ("}");
+
+ v->codeAppendf("if (0 != (%s & %i)) {", // Are we an edge?
+ proc.fPerVertexData.name(), kVertexData_IsEdgeBit);
+ v->codeAppend ( "coverage = left_coverage;");
+ v->codeAppend ("}");
+
+ v->codeAppendf("if (0 != (%s & %i)) {", // Invert coverage?
+ proc.fPerVertexData.name(),
+ kVertexData_InvertNegativeCoverageBit);
+ v->codeAppend ( "coverage = -1 - coverage;");
+ v->codeAppend ("}");
+ } else if (!fShader->calculatesOwnEdgeCoverage()) {
+ // Determine the amount of coverage to subtract out for the flat edge of the curve.
+ v->codeAppendf("float2 p0 = pts[0], p1 = pts[%i];", numInputPoints - 1);
+ v->codeAppendf("float2 n = float2(p0.y - p1.y, p1.x - p0.x);");
+ v->codeAppend ("float nwidth = bloat*2 * (abs(n.x) + abs(n.y));");
+ // When nwidth=0, wind must also be 0 (and coverage * wind = 0). So it doesn't matter
+ // what we come up with here as long as it isn't NaN or Inf.
+ v->codeAppend ("float d = dot(p0 - vertexpos, n);");
+ v->codeAppend ("d /= (0 != nwidth) ? nwidth : 1;");
+ v->codeAppend ("coverage = half(d) - .5*sign(wind);");
+ }
+
+ // Non-corner geometry should have zero effect from corner coverage.
+ v->codeAppend ("half2 corner_coverage = half2(0);");
+
+ v->codeAppendf("if (0 != (%s & %i)) {", // Are we a corner?
+ proc.fPerVertexData.name(), kVertexData_IsCornerBit);
+ // Erase what the previous geometry wrote.
+ v->codeAppend ( "wind = -wind;");
+ if (3 == fNumSides) {
+ v->codeAppend ("coverage = 1 + left_coverage + right_coverage;");
+ } else if (!fShader->calculatesOwnEdgeCoverage()) {
+ v->codeAppend ("coverage = -coverage;");
+ }
+
+ // Corner boxes require attenuated coverage.
+ v->codeAppend ( "half attenuation; {");
+ Shader::CalcCornerAttenuation(v, "leftdir", "rightdir", "attenuation");
+ v->codeAppend ( "}");
+
+ // Attenuate corner coverage towards the outermost vertex (where bloatidx=0).
+ // This is all that curves need: At each vertex of the corner box, the curve
+ // Shader will calculate the curve's local coverage value, interpolate it
+ // alongside our attenuation parameter, and multiply the two together for a
+ // final coverage value.
+ v->codeAppend ( "corner_coverage = (0 == bloatidx) ? half2(0, attenuation) : half2(-1,+1);");
+
+ if (3 == fNumSides) {
+ // For triangles we also provide the actual coverage values at each vertex of
+ // the corner box.
+ v->codeAppend ("if (1 == bloatidx || 2 == bloatidx) {");
+ v->codeAppend ( "corner_coverage.x -= right_coverage;");
+ v->codeAppend ("}");
+ v->codeAppend ("if (bloatidx >= 2) {");
+ v->codeAppend ( "corner_coverage.x -= left_coverage;");
+ v->codeAppend ("}");
+ }
+ v->codeAppend ("}");
+
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ v->codeAppend ("coverage *= wind;");
+ v->codeAppend ("corner_coverage.x *= wind;");
+ fShader->emitVaryings(varyingHandler, GrGLSLVarying::Scope::kVertToFrag, &AccessCodeString(v),
+ "vertexpos", "coverage", "corner_coverage", "wind");
+
+ varyingHandler->emitAttributes(proc);
+ SkASSERT(!args.fFPCoordTransformHandler->nextCoordTransform());
+
+ // Fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+ f->codeAppendf("half coverage;");
+ fShader->emitFragmentCoverageCode(f, "coverage");
+ f->codeAppendf("%s = half4(coverage);", args.fOutputColor);
+ f->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+}
+
+void GrVSCoverageProcessor::reset(PrimitiveType primitiveType, GrResourceProvider* rp) {
+ const GrCaps& caps = *rp->caps();
+
+ fPrimitiveType = primitiveType;
+ switch (fPrimitiveType) {
+ case PrimitiveType::kTriangles:
+ case PrimitiveType::kWeightedTriangles: {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gTriangleVertexBufferKey);
+ fVertexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kTriangleVertices), kTriangleVertices,
+ gTriangleVertexBufferKey);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gTriangleIndexBufferKey);
+ if (caps.usePrimitiveRestart()) {
+ fIndexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kTriangleIndicesAsStrips),
+ kTriangleIndicesAsStrips, gTriangleIndexBufferKey);
+ fNumIndicesPerInstance = SK_ARRAY_COUNT(kTriangleIndicesAsStrips);
+ } else {
+ fIndexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kTriangleIndicesAsTris),
+ kTriangleIndicesAsTris, gTriangleIndexBufferKey);
+ fNumIndicesPerInstance = SK_ARRAY_COUNT(kTriangleIndicesAsTris);
+ }
+ break;
+ }
+
+ case PrimitiveType::kQuadratics:
+ case PrimitiveType::kCubics:
+ case PrimitiveType::kConics: {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gCurveVertexBufferKey);
+ fVertexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kCurveVertices), kCurveVertices,
+ gCurveVertexBufferKey);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gCurveIndexBufferKey);
+ if (caps.usePrimitiveRestart()) {
+ fIndexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kCurveIndicesAsStrips),
+ kCurveIndicesAsStrips, gCurveIndexBufferKey);
+ fNumIndicesPerInstance = SK_ARRAY_COUNT(kCurveIndicesAsStrips);
+ } else {
+ fIndexBuffer = rp->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kCurveIndicesAsTris), kCurveIndicesAsTris,
+ gCurveIndexBufferKey);
+ fNumIndicesPerInstance = SK_ARRAY_COUNT(kCurveIndicesAsTris);
+ }
+ break;
+ }
+ }
+
+ GrVertexAttribType xyAttribType;
+ GrSLType xySLType;
+ if (4 == this->numInputPoints() || this->hasInputWeight()) {
+ GR_STATIC_ASSERT(offsetof(QuadPointInstance, fX) == 0);
+ GR_STATIC_ASSERT(sizeof(QuadPointInstance::fX) ==
+ GrVertexAttribTypeSize(kFloat4_GrVertexAttribType));
+ GR_STATIC_ASSERT(sizeof(QuadPointInstance::fY) ==
+ GrVertexAttribTypeSize(kFloat4_GrVertexAttribType));
+ xyAttribType = kFloat4_GrVertexAttribType;
+ xySLType = kFloat4_GrSLType;
+ } else {
+ GR_STATIC_ASSERT(sizeof(TriPointInstance) ==
+ 2 * GrVertexAttribTypeSize(kFloat3_GrVertexAttribType));
+ xyAttribType = kFloat3_GrVertexAttribType;
+ xySLType = kFloat3_GrSLType;
+ }
+ fInputXAndYValues[kInstanceAttribIdx_X] = {"X", xyAttribType, xySLType};
+ fInputXAndYValues[kInstanceAttribIdx_Y] = {"Y", xyAttribType, xySLType};
+ this->setInstanceAttributes(fInputXAndYValues, 2);
+ fPerVertexData = {"vertexdata", kInt_GrVertexAttribType, kInt_GrSLType};
+ this->setVertexAttributes(&fPerVertexData, 1);
+
+ if (caps.usePrimitiveRestart()) {
+ fTriangleType = GrPrimitiveType::kTriangleStrip;
+ } else {
+ fTriangleType = GrPrimitiveType::kTriangles;
+ }
+}
+
+void GrVSCoverageProcessor::appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
+ int baseInstance, SkTArray<GrMesh>* out) const {
+ GrMesh& mesh = out->emplace_back(fTriangleType);
+ auto primitiveRestart = GrPrimitiveRestart(GrPrimitiveType::kTriangleStrip == fTriangleType);
+ mesh.setIndexedInstanced(fIndexBuffer, fNumIndicesPerInstance, std::move(instanceBuffer),
+ instanceCount, baseInstance, primitiveRestart);
+ mesh.setVertexData(fVertexBuffer, 0);
+}
+
+GrGLSLPrimitiveProcessor* GrVSCoverageProcessor::onCreateGLSLInstance(
+ std::unique_ptr<Shader> shader) const {
+ switch (fPrimitiveType) {
+ case PrimitiveType::kTriangles:
+ case PrimitiveType::kWeightedTriangles:
+ return new Impl(std::move(shader), 3);
+ case PrimitiveType::kQuadratics:
+ case PrimitiveType::kCubics:
+ case PrimitiveType::kConics:
+ return new Impl(std::move(shader), 4);
+ }
+ SK_ABORT("Invalid PrimitiveType");
+}
diff --git a/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.h b/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.h
new file mode 100644
index 0000000000..6f4ce45c72
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ccpr/GrVSCoverageProcessor.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVSCoverageProcessor_DEFINED
+#define GrVSCoverageProcessor_DEFINED
+
+#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
+
+/**
+ * This class implements GrCCCoverageProcessor with analytic coverage using vertex shaders.
+ */
+class GrVSCoverageProcessor : public GrCCCoverageProcessor {
+public:
+ GrVSCoverageProcessor() : GrCCCoverageProcessor(kGrVSCoverageProcessor_ClassID) {}
+
+private:
+ void reset(PrimitiveType, GrResourceProvider*) override;
+
+ void appendMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
+ SkTArray<GrMesh>* out) const override;
+
+ GrGLSLPrimitiveProcessor* onCreateGLSLInstance(std::unique_ptr<Shader>) const override;
+
+ Attribute fPerVertexData;
+ Attribute fInputXAndYValues[2];
+ sk_sp<const GrGpuBuffer> fVertexBuffer;
+ sk_sp<const GrGpuBuffer> fIndexBuffer;
+ int fNumIndicesPerInstance;
+ GrPrimitiveType fTriangleType;
+
+ class Impl;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.cpp
new file mode 100644
index 0000000000..b01cbcddff
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnBuffer.h"
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+
+namespace {
+ dawn::BufferUsage GrGpuBufferTypeToDawnUsageBit(GrGpuBufferType type) {
+ switch (type) {
+ case GrGpuBufferType::kVertex:
+ return dawn::BufferUsage::Vertex;
+ case GrGpuBufferType::kIndex:
+ return dawn::BufferUsage::Index;
+ case GrGpuBufferType::kXferCpuToGpu:
+ return dawn::BufferUsage::CopySrc;
+ case GrGpuBufferType::kXferGpuToCpu:
+ return dawn::BufferUsage::CopyDst;
+ default:
+ SkASSERT(!"buffer type not supported by Dawn");
+ return dawn::BufferUsage::Vertex;
+ }
+ }
+}
+
+GrDawnBuffer::GrDawnBuffer(GrDawnGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
+ GrAccessPattern pattern)
+ : INHERITED(gpu, sizeInBytes, type, pattern)
+ , fStagingBuffer(nullptr) {
+ dawn::BufferDescriptor bufferDesc;
+ bufferDesc.size = sizeInBytes;
+ bufferDesc.usage = GrGpuBufferTypeToDawnUsageBit(type) | dawn::BufferUsage::CopyDst;
+ fBuffer = this->getDawnGpu()->device().CreateBuffer(&bufferDesc);
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrDawnBuffer::~GrDawnBuffer() {
+}
+
+void GrDawnBuffer::onMap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ fStagingBuffer = getDawnGpu()->getStagingBuffer(this->size());
+ fMapPtr = fStagingBuffer->fData;
+}
+
+void GrDawnBuffer::onUnmap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ fStagingBuffer->fBuffer.Unmap();
+ fMapPtr = nullptr;
+ getDawnGpu()->getCopyEncoder()
+ .CopyBufferToBuffer(fStagingBuffer->fBuffer, 0, fBuffer, 0, this->size());
+}
+
+bool GrDawnBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (this->wasDestroyed()) {
+ return false;
+ }
+ this->onMap();
+ memcpy(fStagingBuffer->fData, src, srcSizeInBytes);
+ this->onUnmap();
+ return true;
+}
+
+GrDawnGpu* GrDawnBuffer::getDawnGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrDawnGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.h b/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.h
new file mode 100644
index 0000000000..17fa1c8d6e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnBuffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnBuffer_DEFINED
+#define GrDawnBuffer_DEFINED
+
+#include "src/gpu/GrGpuBuffer.h"
+#include "dawn/dawncpp.h"
+
+class GrDawnGpu;
+struct GrDawnStagingBuffer;
+
+class GrDawnBuffer : public GrGpuBuffer {
+public:
+ GrDawnBuffer(GrDawnGpu* gpu, size_t sizeInBytes, GrGpuBufferType tpye, GrAccessPattern pattern);
+ ~GrDawnBuffer() override;
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrDawnGpu* getDawnGpu() const;
+ dawn::Buffer get() const { return fBuffer; }
+
+private:
+ dawn::Buffer fBuffer;
+ GrDawnStagingBuffer* fStagingBuffer;
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.cpp
new file mode 100644
index 0000000000..8caa134c62
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnCaps.h"
+
+GrDawnCaps::GrDawnCaps(const GrContextOptions& contextOptions) : INHERITED(contextOptions) {
+ fMipMapSupport = true;
+ fBufferMapThreshold = SK_MaxS32; // FIXME: get this from Dawn?
+ fShaderCaps.reset(new GrShaderCaps(contextOptions));
+ fMaxTextureSize = fMaxRenderTargetSize = 4096; // FIXME
+ fMaxVertexAttributes = 16; // FIXME
+ fClampToBorderSupport = false;
+ fPerformPartialClearsAsDraws = true;
+
+ fShaderCaps->fFlatInterpolationSupport = true;
+ fShaderCaps->fIntegerSupport = true;
+ // FIXME: each fragment sampler takes two binding slots in Dawn (sampler + texture). Limit to
+ // 6 * 2 = 12, since kMaxBindingsPerGroup is 16 in Dawn, and we need to keep a few for
+ // non-texture bindings. Eventually, we may be able to increase kMaxBindingsPerGroup in Dawn.
+ fShaderCaps->fMaxFragmentSamplers = 6;
+ fShaderCaps->fShaderDerivativeSupport = true;
+
+ this->applyOptionsOverrides(contextOptions);
+ fShaderCaps->applyOptionsOverrides(contextOptions);
+}
+
+bool GrDawnCaps::isFormatSRGB(const GrBackendFormat& format) const {
+ return false;
+}
+
+bool GrDawnCaps::isFormatCompressed(const GrBackendFormat& format,
+ SkImage::CompressionType* compressionType) const {
+ return false;
+}
+
+bool GrDawnCaps::isFormatTexturable(const GrBackendFormat& format) const {
+ // Currently, all the formats in GrDawnFormatToPixelConfig are texturable.
+ dawn::TextureFormat dawnFormat;
+ return format.asDawnFormat(&dawnFormat);
+}
+
+GrPixelConfig GrDawnCaps::onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType colorType) const {
+ dawn::TextureFormat dawnFormat;
+ if (!format.asDawnFormat(&dawnFormat)) {
+ return kUnknown_GrPixelConfig;
+ }
+ switch (colorType) {
+ case GrColorType::kUnknown:
+ return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_8:
+ if (dawn::TextureFormat::R8Unorm == dawnFormat) {
+ return kAlpha_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888:
+ if (dawn::TextureFormat::RGBA8Unorm == dawnFormat) {
+ return kRGBA_8888_GrPixelConfig;
+ } else if (dawn::TextureFormat::BGRA8Unorm == dawnFormat) {
+ // FIXME: This shouldn't be necessary, but on some platforms (Mac)
+ // Skia byte order is RGBA, while preferred swap format is BGRA.
+ return kBGRA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGB_888x:
+ break;
+ case GrColorType::kBGRA_8888:
+ if (dawn::TextureFormat::BGRA8Unorm == dawnFormat) {
+ return kBGRA_8888_GrPixelConfig;
+ } else if (dawn::TextureFormat::RGBA8Unorm == dawnFormat) {
+ return kRGBA_8888_GrPixelConfig;
+ }
+ break;
+ default:
+ break;
+ }
+ return kUnknown_GrPixelConfig;
+}
+
+static GrSwizzle get_swizzle(const GrBackendFormat& format, GrColorType colorType,
+ bool forOutput) {
+ switch (colorType) {
+ case GrColorType::kAlpha_8: // fall through
+ case GrColorType::kAlpha_F16:
+ if (forOutput) {
+ return GrSwizzle::AAAA();
+ } else {
+ return GrSwizzle::RRRR();
+ }
+ case GrColorType::kGray_8:
+ if (!forOutput) {
+ return GrSwizzle::RRRA();
+ }
+ break;
+ case GrColorType::kRGB_888x:
+ if (!forOutput) {
+ return GrSwizzle::RGB1();
+ }
+ default:
+ return GrSwizzle::RGBA();
+ }
+ return GrSwizzle::RGBA();
+}
+
+bool GrDawnCaps::isFormatTexturableAndUploadable(GrColorType ct,
+ const GrBackendFormat& format) const {
+ dawn::TextureFormat dawnFormat;
+ if (!format.asDawnFormat(&dawnFormat)) {
+ return false;
+ }
+ switch (ct) {
+ case GrColorType::kAlpha_8:
+ return dawn::TextureFormat::R8Unorm == dawnFormat;
+ case GrColorType::kRGBA_8888:
+ case GrColorType::kRGB_888x:
+ case GrColorType::kBGRA_8888:
+ return dawn::TextureFormat::RGBA8Unorm == dawnFormat ||
+ dawn::TextureFormat::BGRA8Unorm == dawnFormat;
+ default:
+ return false;
+ }
+}
+
+bool GrDawnCaps::isFormatRenderable(const GrBackendFormat& format,
+ int sampleCount) const {
+ dawn::TextureFormat dawnFormat;
+ if (!format.isValid() || sampleCount > 1 || !format.asDawnFormat(&dawnFormat)) {
+ return false;
+ }
+
+ return GrDawnFormatIsRenderable(dawnFormat);
+}
+
+bool GrDawnCaps::isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount) const {
+ return isFormatRenderable(format, sampleCount);
+}
+
+size_t GrDawnCaps::bytesPerPixel(const GrBackendFormat& backendFormat) const {
+ dawn::TextureFormat dawnFormat;
+ if (!backendFormat.asDawnFormat(&dawnFormat)) {
+ return 0;
+ }
+ return GrDawnBytesPerPixel(dawnFormat);
+}
+
+int GrDawnCaps::getRenderTargetSampleCount(int requestedCount,
+ const GrBackendFormat& backendFormat) const {
+ dawn::TextureFormat dawnFormat;
+ if (!backendFormat.asDawnFormat(&dawnFormat)) {
+ return 0;
+ }
+ return GrDawnFormatIsRenderable(dawnFormat) ? 1 : 0;
+}
+
+int GrDawnCaps::maxRenderTargetSampleCount(const GrBackendFormat& format) const {
+ return format.isValid() ? 1 : 0;
+}
+
+GrBackendFormat GrDawnCaps::onGetDefaultBackendFormat(GrColorType ct,
+ GrRenderable renderable) const {
+ GrPixelConfig config = GrColorTypeToPixelConfig(ct);
+ if (config == kUnknown_GrPixelConfig) {
+ return GrBackendFormat();
+ }
+ dawn::TextureFormat format;
+ if (!GrPixelConfigToDawnFormat(config, &format)) {
+ return GrBackendFormat();
+ }
+ return GrBackendFormat::MakeDawn(format);
+}
+
+GrBackendFormat GrDawnCaps::getBackendFormatFromCompressionType(SkImage::CompressionType type) const
+{
+ return GrBackendFormat();
+}
+
+GrSwizzle GrDawnCaps::getTextureSwizzle(const GrBackendFormat& format, GrColorType colorType) const
+{
+ return get_swizzle(format, colorType, false);
+}
+
+GrSwizzle GrDawnCaps::getOutputSwizzle(const GrBackendFormat& format, GrColorType colorType) const
+{
+ return get_swizzle(format, colorType, true);
+}
+
+bool GrDawnCaps::onAreColorTypeAndFormatCompatible(GrColorType ct,
+ const GrBackendFormat& format) const {
+ return true;
+}
+
+GrColorType GrDawnCaps::getYUVAColorTypeFromBackendFormat(const GrBackendFormat& backendFormat,
+ bool isAlphaChannel) const {
+ dawn::TextureFormat textureFormat;
+ if (!backendFormat.asDawnFormat(&textureFormat)) {
+ return GrColorType::kUnknown;
+ }
+ switch (textureFormat) {
+ case dawn::TextureFormat::R8Unorm: return isAlphaChannel ? GrColorType::kAlpha_8
+ : GrColorType::kGray_8;
+ case dawn::TextureFormat::RGBA8Unorm: return GrColorType::kRGBA_8888;
+ case dawn::TextureFormat::BGRA8Unorm: return GrColorType::kBGRA_8888;
+ default: return GrColorType::kUnknown;
+ }
+}
+
+#if GR_TEST_UTILS
+std::vector<GrCaps::TestFormatColorTypeCombination> GrDawnCaps::getTestingCombinations() const {
+ std::vector<GrCaps::TestFormatColorTypeCombination> combos = {
+ { GrColorType::kAlpha_8, GrBackendFormat::MakeDawn(dawn::TextureFormat::R8Unorm) },
+ { GrColorType::kRGBA_8888, GrBackendFormat::MakeDawn(dawn::TextureFormat::RGBA8Unorm) },
+ { GrColorType::kRGBA_8888, GrBackendFormat::MakeDawn(dawn::TextureFormat::BGRA8Unorm) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeDawn(dawn::TextureFormat::RGBA8Unorm) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeDawn(dawn::TextureFormat::BGRA8Unorm) },
+ { GrColorType::kBGRA_8888, GrBackendFormat::MakeDawn(dawn::TextureFormat::BGRA8Unorm) },
+ { GrColorType::kBGRA_8888, GrBackendFormat::MakeDawn(dawn::TextureFormat::RGBA8Unorm) },
+ };
+
+#ifdef SK_DEBUG
+ for (auto combo : combos) {
+ SkASSERT(this->onAreColorTypeAndFormatCompatible(combo.fColorType, combo.fFormat));
+ }
+#endif
+ return combos;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.h b/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.h
new file mode 100644
index 0000000000..6af5d7d0a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnCaps.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnCaps_DEFINED
+#define GrDawnCaps_DEFINED
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+class GrDawnCaps : public GrCaps {
+public:
+ GrDawnCaps(const GrContextOptions& contextOptions);
+
+ bool isFormatSRGB(const GrBackendFormat&) const override;
+ bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const override;
+
+ bool isFormatTexturableAndUploadable(GrColorType, const GrBackendFormat& format) const override;
+ bool isFormatRenderable(const GrBackendFormat& format,
+ int sampleCount = 1) const override;
+ bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const override;
+
+
+ bool isFormatCopyable(const GrBackendFormat& format) const override { return true; }
+
+ bool isFormatTexturable(const GrBackendFormat& format) const override;
+
+ SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const override {
+ return {surfaceColorType, GrColorTypeBytesPerPixel(surfaceColorType)};
+ }
+
+ SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override {
+ return SurfaceReadPixelsSupport::kSupported;
+ }
+
+ size_t bytesPerPixel(const GrBackendFormat&) const override;
+
+ int getRenderTargetSampleCount(int requestedCount,
+ const GrBackendFormat&) const override;
+
+ int maxRenderTargetSampleCount(const GrBackendFormat& format) const override;
+
+ GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override;
+
+ GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const override;
+
+ GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const override;
+
+ GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat&,
+ bool isAlphaChannel) const override;
+
+#if GR_TEST_UTILS
+ std::vector<TestFormatColorTypeCombination> getTestingCombinations() const override;
+#endif
+
+private:
+ bool onSurfaceSupportsWritePixels(const GrSurface* surface) const override {
+ return true;
+ }
+ bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const override {
+ return true;
+ }
+ GrBackendFormat onGetDefaultBackendFormat(GrColorType, GrRenderable) const override;
+
+ GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat&, GrColorType) const override;
+
+ bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const override;
+
+ SupportedRead onSupportedReadPixelsColorType(GrColorType srcColorType,
+ const GrBackendFormat& backendFormat,
+ GrColorType dstColorType) const override {
+ return { srcColorType, GrColorTypeBytesPerPixel(srcColorType) };
+ }
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.cpp
new file mode 100644
index 0000000000..54c7c569f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.cpp
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrGpuResourceCacheAccess.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/dawn/GrDawnBuffer.h"
+#include "src/gpu/dawn/GrDawnCaps.h"
+#include "src/gpu/dawn/GrDawnOpsRenderPass.h"
+#include "src/gpu/dawn/GrDawnProgramBuilder.h"
+#include "src/gpu/dawn/GrDawnRenderTarget.h"
+#include "src/gpu/dawn/GrDawnStencilAttachment.h"
+#include "src/gpu/dawn/GrDawnTexture.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkMipMap.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#if !defined(SK_BUILD_FOR_WIN)
+#include <unistd.h>
+#endif // !defined(SK_BUILD_FOR_WIN)
+
+const int kMaxRenderPipelineEntries = 1024;
+
+static dawn::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
+ switch (filter) {
+ case GrSamplerState::Filter::kNearest:
+ return dawn::FilterMode::Nearest;
+ case GrSamplerState::Filter::kBilerp:
+ case GrSamplerState::Filter::kMipMap:
+ return dawn::FilterMode::Linear;
+ default:
+ SkASSERT(!"unsupported filter mode");
+ return dawn::FilterMode::Nearest;
+ }
+}
+
+static dawn::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
+ switch (wrapMode) {
+ case GrSamplerState::WrapMode::kClamp:
+ return dawn::AddressMode::ClampToEdge;
+ case GrSamplerState::WrapMode::kRepeat:
+ return dawn::AddressMode::Repeat;
+ case GrSamplerState::WrapMode::kMirrorRepeat:
+ return dawn::AddressMode::MirrorRepeat;
+ case GrSamplerState::WrapMode::kClampToBorder:
+ SkASSERT(!"unsupported address mode");
+ }
+ SkASSERT(!"unsupported address mode");
+ return dawn::AddressMode::ClampToEdge;
+
+}
+
+// FIXME: taken from GrVkPipelineState; refactor.
+static uint32_t get_blend_info_key(const GrPipeline& pipeline) {
+ GrXferProcessor::BlendInfo blendInfo = pipeline.getXferProcessor().getBlendInfo();
+
+ static const uint32_t kBlendWriteShift = 1;
+ static const uint32_t kBlendCoeffShift = 5;
+ GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift));
+ GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4);
+
+ uint32_t key = blendInfo.fWriteColor;
+ key |= (blendInfo.fSrcBlend << kBlendWriteShift);
+ key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift));
+ key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift));
+
+ return key;
+}
+
+class Desc : public GrProgramDesc {
+public:
+ static bool Build(Desc* desc,
+ GrRenderTarget* rt,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ bool hasDepthStencil,
+ GrGpu* gpu) {
+ if (!GrProgramDesc::Build(desc, rt, programInfo, primitiveType, gpu)) {
+ return false;
+ }
+ GrProcessorKeyBuilder b(&desc->key());
+
+ GrStencilSettings stencil;
+ const GrPipeline& pipeline = programInfo.pipeline();
+ stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(), 8);
+ stencil.genKey(&b);
+ b.add32(rt->config());
+ b.add32(static_cast<int32_t>(hasDepthStencil));
+ b.add32(get_blend_info_key(pipeline));
+ b.add32(static_cast<uint32_t>(primitiveType));
+ return true;
+ }
+};
+
+sk_sp<GrGpu> GrDawnGpu::Make(const dawn::Device& device,
+ const GrContextOptions& options, GrContext* context) {
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_sp<GrGpu>(new GrDawnGpu(context, options, device));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDawnGpu::GrDawnGpu(GrContext* context, const GrContextOptions& options,
+ const dawn::Device& device)
+ : INHERITED(context)
+ , fDevice(device)
+ , fQueue(device.CreateQueue())
+ , fCompiler(new SkSL::Compiler())
+ , fUniformRingBuffer(this, dawn::BufferUsage::Uniform)
+ , fRenderPipelineCache(kMaxRenderPipelineEntries)
+ , fStagingManager(fDevice) {
+ fCaps.reset(new GrDawnCaps(options));
+}
+
+GrDawnGpu::~GrDawnGpu() {
+}
+
+
+void GrDawnGpu::disconnect(DisconnectType type) {
+ SkASSERT(!"unimplemented");
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrOpsRenderPass* GrDawnGpu::getOpsRenderPass(
+ GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
+ return fOpsRenderPass.get();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
+ GrAccessPattern accessPattern, const void* data) {
+ sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
+ if (data && b) {
+ b->updateData(data, size);
+ }
+ return b;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrDawnGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) {
+ GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
+ if (!texture) {
+ return false;
+ }
+ texture->upload(texels, mipLevelCount, SkIRect::MakeXYWH(left, top, width, height),
+ this->getCopyEncoder());
+ return true;
+}
+
+bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t bufferOffset,
+ size_t rowBytes) {
+ SkASSERT(!"unimplemented");
+ return false;
+}
+
+bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) {
+ SkASSERT(!"unimplemented");
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+sk_sp<GrTexture> GrDawnGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& backendFormat,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ SkASSERT(!levelClearMask);
+ dawn::TextureFormat format;
+ if (!backendFormat.asDawnFormat(&format)) {
+ return nullptr;
+ }
+
+ GrMipMapsStatus mipMapsStatus =
+ mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
+
+ return GrDawnTexture::Make(this, { desc.fWidth, desc.fHeight },
+ desc.fConfig, format, renderable,
+ renderTargetSampleCnt, budgeted, mipLevelCount,
+ mipMapsStatus);
+}
+
+sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) {
+ SkASSERT(!"unimplemented");
+ return nullptr;
+}
+
+sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable,
+ GrIOType) {
+ GrDawnImageInfo info;
+ if (!backendTex.getDawnImageInfo(&info)) {
+ return nullptr;
+ }
+
+ SkISize size = { backendTex.width(), backendTex.height() };
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ colorType);
+ GrMipMapsStatus status = GrMipMapsStatus::kNotAllocated;
+ return GrDawnTexture::MakeWrapped(this, size, config, GrRenderable::kNo, 1, status, cacheable,
+ info);
+}
+
+sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
+ int sampleCnt, GrColorType colorType,
+ GrWrapOwnership,
+ GrWrapCacheable cacheable) {
+ GrDawnImageInfo info;
+ if (!tex.getDawnImageInfo(&info) || !info.fTexture) {
+ return nullptr;
+ }
+
+ SkISize size = { tex.width(), tex.height() };
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(tex.getBackendFormat(),
+ colorType);
+ sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
+ if (sampleCnt < 1) {
+ return nullptr;
+ }
+
+ GrMipMapsStatus status = GrMipMapsStatus::kNotAllocated;
+ return GrDawnTexture::MakeWrapped(this, size, config, GrRenderable::kYes, sampleCnt, status,
+ cacheable, info);
+}
+
+sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt,
+ GrColorType colorType) {
+ GrDawnImageInfo info;
+ if (!rt.getDawnImageInfo(&info) && !info.fTexture) {
+ return nullptr;
+ }
+
+ SkISize size = { rt.width(), rt.height() };
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(rt.getBackendFormat(),
+ colorType);
+ int sampleCnt = 1;
+ return GrDawnRenderTarget::MakeWrapped(this, size, config, sampleCnt, info);
+}
+
+sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType colorType) {
+ GrDawnImageInfo info;
+ if (!tex.getDawnImageInfo(&info) || !info.fTexture) {
+ return nullptr;
+ }
+
+ SkISize size = { tex.width(), tex.height() };
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(tex.getBackendFormat(),
+ colorType);
+ sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
+ if (sampleCnt < 1) {
+ return nullptr;
+ }
+
+ return GrDawnRenderTarget::MakeWrapped(this, size, config, sampleCnt, info);
+}
+
+GrStencilAttachment* GrDawnGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height,
+ int numStencilSamples) {
+ GrDawnStencilAttachment* stencil(GrDawnStencilAttachment::Create(this,
+ width,
+ height,
+ numStencilSamples));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+GrBackendTexture GrDawnGpu::onCreateBackendTexture(int width, int height,
+ const GrBackendFormat& backendFormat,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ const SkPixmap srcData[],
+ int numMipLevels,
+ const SkColor4f* color,
+ GrProtected isProtected) {
+ dawn::TextureFormat format;
+ if (!backendFormat.asDawnFormat(&format)) {
+ return GrBackendTexture();
+ }
+
+ SkASSERT(width <= this->caps()->maxTextureSize() && height <= this->caps()->maxTextureSize());
+
+ // FIXME: Dawn doesn't support mipmapped render targets (yet).
+ if (GrMipMapped::kYes == mipMapped && GrRenderable::kYes == renderable) {
+ return GrBackendTexture();
+ }
+
+ dawn::TextureDescriptor desc;
+ desc.usage =
+ dawn::TextureUsage::Sampled |
+ dawn::TextureUsage::CopySrc |
+ dawn::TextureUsage::CopyDst;
+
+ if (GrRenderable::kYes == renderable) {
+ desc.usage |= dawn::TextureUsage::OutputAttachment;
+ }
+
+ desc.size.width = width;
+ desc.size.height = height;
+ desc.size.depth = 1;
+ desc.format = format;
+
+ // Figure out the number of mip levels.
+ if (srcData) {
+ desc.mipLevelCount = numMipLevels;
+ } else if (GrMipMapped::kYes == mipMapped) {
+ desc.mipLevelCount = SkMipMap::ComputeLevelCount(width, height) + 1;
+ }
+
+ dawn::Texture tex = this->device().CreateTexture(&desc);
+
+ size_t bpp = GrDawnBytesPerPixel(format);
+ size_t baseLayerSize = bpp * width * height;
+ const void* pixels;
+ SkAutoMalloc defaultStorage(baseLayerSize);
+ if (srcData) {
+ pixels = srcData->addr();
+ } else {
+ pixels = defaultStorage.get();
+ memset(defaultStorage.get(), 0, baseLayerSize);
+ }
+ dawn::Device device = this->device();
+ dawn::CommandEncoder copyEncoder = fDevice.CreateCommandEncoder();
+ int w = width, h = height;
+ for (uint32_t i = 0; i < desc.mipLevelCount; i++) {
+ size_t origRowBytes = bpp * w;
+ size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
+ size_t size = rowBytes * h;
+ GrDawnStagingBuffer* stagingBuffer = this->getStagingBuffer(size);
+ if (rowBytes == origRowBytes) {
+ memcpy(stagingBuffer->fData, pixels, size);
+ } else {
+ const char* src = static_cast<const char*>(pixels);
+ char* dst = static_cast<char*>(stagingBuffer->fData);
+ for (int row = 0; row < h; row++) {
+ memcpy(dst, src, origRowBytes);
+ dst += rowBytes;
+ src += origRowBytes;
+ }
+ }
+ dawn::Buffer buffer = stagingBuffer->fBuffer;
+ buffer.Unmap();
+ stagingBuffer->fData = nullptr;
+ dawn::BufferCopyView srcBuffer;
+ srcBuffer.buffer = buffer;
+ srcBuffer.offset = 0;
+ srcBuffer.rowPitch = rowBytes;
+ srcBuffer.imageHeight = h;
+ dawn::TextureCopyView dstTexture;
+ dstTexture.texture = tex;
+ dstTexture.mipLevel = i;
+ dstTexture.origin = {0, 0, 0};
+ dawn::Extent3D copySize = {(uint32_t) w, (uint32_t) h, 1};
+ copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, &copySize);
+ w = SkTMax(1, w / 2);
+ h = SkTMax(1, h / 2);
+ }
+ dawn::CommandBuffer cmdBuf = copyEncoder.Finish();
+ fQueue.Submit(1, &cmdBuf);
+ GrDawnImageInfo info;
+ info.fTexture = tex;
+ info.fFormat = desc.format;
+ info.fLevelCount = desc.mipLevelCount;
+ return GrBackendTexture(width, height, info);
+}
+
+void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
+ GrDawnImageInfo info;
+ if (tex.getDawnImageInfo(&info)) {
+ info.fTexture = nullptr;
+ }
+}
+
+#if GR_TEST_UTILS
+bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ GrDawnImageInfo info;
+ if (!tex.getDawnImageInfo(&info)) {
+ return false;
+ }
+
+ return info.fTexture.Get();
+}
+
+GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(int width, int height,
+ GrColorType colorType) {
+ GrPixelConfig config = GrColorTypeToPixelConfig(colorType);
+
+ if (width > this->caps()->maxTextureSize() || height > this->caps()->maxTextureSize()) {
+ return GrBackendRenderTarget();
+ }
+
+ dawn::TextureFormat format;
+ if (!GrPixelConfigToDawnFormat(config, &format)) {
+ return GrBackendRenderTarget();
+ }
+
+ dawn::TextureDescriptor desc;
+ desc.usage =
+ dawn::TextureUsage::CopySrc |
+ dawn::TextureUsage::OutputAttachment;
+
+ desc.size.width = width;
+ desc.size.height = height;
+ desc.size.depth = 1;
+ desc.format = format;
+
+ dawn::Texture tex = this->device().CreateTexture(&desc);
+
+ GrDawnImageInfo info;
+ info.fTexture = tex;
+ info.fFormat = desc.format;
+ info.fLevelCount = desc.mipLevelCount;
+ return GrBackendRenderTarget(width, height, 1, 0, info);
+}
+
+void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
+ GrDawnImageInfo info;
+ if (rt.getDawnImageInfo(&info)) {
+ info.fTexture = nullptr;
+ }
+}
+
+void GrDawnGpu::testingOnly_flushGpuAndSync() {
+ this->flush();
+}
+
+#endif
+
+void GrDawnGpu::flush() {
+ this->flushCopyEncoder();
+ if (!fCommandBuffers.empty()) {
+ fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
+ fCommandBuffers.clear();
+ }
+ fStagingManager.mapBusyList();
+ fDevice.Tick();
+}
+
+void GrDawnGpu::onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
+ this->flush();
+}
+
+static dawn::Texture get_dawn_texture_from_surface(GrSurface* src) {
+ if (auto rt = static_cast<GrDawnRenderTarget*>(src->asRenderTarget())) {
+ return rt->texture();
+ } else if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
+ return t->texture();
+ } else {
+ return nullptr;
+ }
+}
+
+bool GrDawnGpu::onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ dawn::Texture srcTexture = get_dawn_texture_from_surface(src);
+ dawn::Texture dstTexture = get_dawn_texture_from_surface(dst);
+ if (!srcTexture || !dstTexture) {
+ return false;
+ }
+
+ uint32_t width = srcRect.width(), height = srcRect.height();
+
+ dawn::TextureCopyView srcTextureView, dstTextureView;
+ srcTextureView.texture = srcTexture;
+ srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
+ dstTextureView.texture = dstTexture;
+ dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
+
+ dawn::Extent3D copySize = {width, height, 1};
+ this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, &copySize);
+ return true;
+}
+
+static void callback(DawnBufferMapAsyncStatus status, const void* data, uint64_t dataLength,
+ void* userdata) {
+ (*reinterpret_cast<const void**>(userdata)) = data;
+}
+
+bool GrDawnGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) {
+ dawn::Texture tex = get_dawn_texture_from_surface(surface);
+
+ if (0 == rowBytes) {
+ return false;
+ }
+ size_t origRowBytes = rowBytes;
+ int origSizeInBytes = origRowBytes * height;
+ rowBytes = GrDawnRoundRowBytes(rowBytes);
+ int sizeInBytes = rowBytes * height;
+
+ dawn::BufferDescriptor desc;
+ desc.usage = dawn::BufferUsage::CopyDst | dawn::BufferUsage::MapRead;
+ desc.size = sizeInBytes;
+
+ dawn::Buffer buf = device().CreateBuffer(&desc);
+
+ dawn::TextureCopyView srcTexture;
+ srcTexture.texture = tex;
+ srcTexture.origin = {(uint32_t) left, (uint32_t) top, 0};
+
+ dawn::BufferCopyView dstBuffer;
+ dstBuffer.buffer = buf;
+ dstBuffer.offset = 0;
+ dstBuffer.rowPitch = rowBytes;
+ dstBuffer.imageHeight = height;
+
+ dawn::Extent3D copySize = {(uint32_t) width, (uint32_t) height, 1};
+ this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize);
+ flush();
+
+ const void *readPixelsPtr = nullptr;
+ buf.MapReadAsync(callback, &readPixelsPtr);
+ while (!readPixelsPtr) {
+ device().Tick();
+ }
+
+ if (rowBytes == origRowBytes) {
+ memcpy(buffer, readPixelsPtr, origSizeInBytes);
+ } else {
+ const char* src = static_cast<const char*>(readPixelsPtr);
+ char* dst = static_cast<char*>(buffer);
+ for (int row = 0; row < height; row++) {
+ memcpy(dst, src, origRowBytes);
+ dst += origRowBytes;
+ src += rowBytes;
+ }
+ }
+ buf.Unmap();
+ return true;
+}
+
+bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture*) {
+ SkASSERT(!"unimplemented");
+ return false;
+}
+
+void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
+ this->flushCopyEncoder();
+ static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
+ SkASSERT(!"unimplemented");
+ return GrFence();
+}
+
+bool GrDawnGpu::waitFence(GrFence fence, uint64_t timeout) {
+ SkASSERT(!"unimplemented");
+ return false;
+}
+
+void GrDawnGpu::deleteFence(GrFence fence) const {
+ SkASSERT(!"unimplemented");
+}
+
+sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
+ SkASSERT(!"unimplemented");
+ return nullptr;
+}
+
+sk_sp<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) {
+ SkASSERT(!"unimplemented");
+ return nullptr;
+}
+
+void GrDawnGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
+ SkASSERT(!"unimplemented");
+}
+
+void GrDawnGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
+ SkASSERT(!"unimplemented");
+}
+
+void GrDawnGpu::checkFinishProcs() {
+ SkASSERT(!"unimplemented");
+}
+
+sk_sp<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
+ SkASSERT(!"unimplemented");
+ return nullptr;
+}
+
+sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
+ GrRenderTarget* rt,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType) {
+ bool hasDepthStencil = rt->renderTargetPriv().getStencilAttachment() != nullptr;
+ Desc desc;
+ if (!Desc::Build(&desc, rt, programInfo, primitiveType, hasDepthStencil, this)) {
+ return nullptr;
+ }
+
+ if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
+ return *program;
+ }
+
+ dawn::TextureFormat colorFormat;
+ SkAssertResult(GrPixelConfigToDawnFormat(rt->config(), &colorFormat));
+ dawn::TextureFormat stencilFormat = dawn::TextureFormat::Depth24PlusStencil8;
+
+ sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
+ this, rt, programInfo, primitiveType, colorFormat,
+ hasDepthStencil, stencilFormat, &desc);
+ fRenderPipelineCache.insert(desc, program);
+ return program;
+}
+
+dawn::Sampler GrDawnGpu::getOrCreateSampler(const GrSamplerState& samplerState) {
+ auto i = fSamplers.find(samplerState);
+ if (i != fSamplers.end()) {
+ return i->second;
+ }
+ dawn::SamplerDescriptor desc;
+ desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
+ desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
+ desc.addressModeW = dawn::AddressMode::ClampToEdge;
+ desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
+ desc.mipmapFilter = dawn::FilterMode::Linear;
+ desc.lodMinClamp = 0.0f;
+ desc.lodMaxClamp = 1000.0f;
+ desc.compare = dawn::CompareFunction::Never;
+ dawn::Sampler sampler = device().CreateSampler(&desc);
+ fSamplers.insert(std::pair<GrSamplerState, dawn::Sampler>(samplerState, sampler));
+ return sampler;
+}
+
+GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
+ return fUniformRingBuffer.allocate(size);
+}
+
+GrDawnStagingBuffer* GrDawnGpu::getStagingBuffer(size_t size) {
+ return fStagingManager.findOrCreateStagingBuffer(size);
+}
+
+void GrDawnGpu::appendCommandBuffer(dawn::CommandBuffer commandBuffer) {
+ if (commandBuffer) {
+ fCommandBuffers.push_back(commandBuffer);
+ }
+}
+
+dawn::CommandEncoder GrDawnGpu::getCopyEncoder() {
+ if (!fCopyEncoder) {
+ fCopyEncoder = fDevice.CreateCommandEncoder();
+ }
+ return fCopyEncoder;
+}
+
+void GrDawnGpu::flushCopyEncoder() {
+ if (fCopyEncoder) {
+ fCommandBuffers.push_back(fCopyEncoder.Finish());
+ fCopyEncoder = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.h b/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.h
new file mode 100644
index 0000000000..96cb54b8b9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnGpu.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnGpu_DEFINED
+#define GrDawnGpu_DEFINED
+
+#include "src/gpu/GrGpu.h"
+#include "dawn/dawncpp.h"
+#include "src/core/SkLRUCache.h"
+#include "src/gpu/dawn/GrDawnRingBuffer.h"
+#include "src/gpu/dawn/GrDawnStagingManager.h"
+
+#include <unordered_map>
+
+class GrDawnOpsRenderPass;
+class GrPipeline;
+struct GrDawnProgram;
+
+namespace SkSL {
+ class Compiler;
+}
+
+class GrDawnGpu : public GrGpu {
+public:
+ static sk_sp<GrGpu> Make(const dawn::Device& device, const GrContextOptions&, GrContext*);
+ GrDawnGpu(GrContext* context, const GrContextOptions& options, const dawn::Device& device);
+
+ ~GrDawnGpu() override;
+
+ void disconnect(DisconnectType) override;
+
+ const dawn::Device& device() const { return fDevice; }
+ const dawn::Queue& queue() const { return fQueue; }
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ GrBackendTexture onCreateBackendTexture(int w, int h,
+ const GrBackendFormat&,
+ GrMipMapped,
+ GrRenderable,
+ const SkPixmap srcData[],
+ int numMipLevels,
+ const SkColor4f* color,
+ GrProtected isProtected) override;
+ void deleteBackendTexture(const GrBackendTexture&) override;
+#if GR_TEST_UTILS
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+
+ GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
+ void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
+
+ void testingOnly_flushGpuAndSync() override;
+#endif
+ void flush();
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
+ int width,
+ int height,
+ int numStencilSamples) override;
+
+ GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget*, GrSurfaceOrigin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) override;
+
+ SkSL::Compiler* shaderCompiler() const {
+ return fCompiler.get();
+ }
+
+ void submit(GrOpsRenderPass*) override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() override;
+ bool waitFence(GrFence, uint64_t timeout) override;
+ void deleteFence(GrFence) const override;
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) override;
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) override;
+ void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void checkFinishProcs() override;
+
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
+
+ sk_sp<GrDawnProgram> getOrCreateRenderPipeline(GrRenderTarget*,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType);
+
+ dawn::Sampler getOrCreateSampler(const GrSamplerState& samplerState);
+
+ GrDawnRingBuffer::Slice allocateUniformRingBufferSlice(int size);
+ GrDawnStagingBuffer* getStagingBuffer(size_t size);
+ GrDawnStagingManager* getStagingManager() { return &fStagingManager; }
+ dawn::CommandEncoder getCopyEncoder();
+ void flushCopyEncoder();
+ void appendCommandBuffer(dawn::CommandBuffer commandBuffer);
+
+private:
+ void onResetContext(uint32_t resetBits) override {}
+
+ virtual void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) override {}
+
+ sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) override;
+
+ sk_sp<GrTexture> onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) override;
+
+ sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType, GrWrapOwnership,
+ GrWrapCacheable, GrIOType) override;
+ sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt,
+ GrColorType, GrWrapOwnership,
+ GrWrapCacheable) override;
+ sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, int sampleCnt,
+ GrColorType) override;
+
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
+ const void* data) override;
+
+ bool onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) override;
+
+ bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
+
+ bool onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) override;
+
+ void onResolveRenderTarget(GrRenderTarget*, const SkIRect&, GrSurfaceOrigin,
+ ForExternalIO) override {}
+
+ bool onRegenerateMipMapLevels(GrTexture*) override;
+
+ bool onCopySurface(GrSurface* dst, GrSurface* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) override;
+
+ void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override;
+
+ dawn::Device fDevice;
+ dawn::Queue fQueue;
+ std::unique_ptr<SkSL::Compiler> fCompiler;
+ std::unique_ptr<GrDawnOpsRenderPass> fOpsRenderPass;
+ GrDawnRingBuffer fUniformRingBuffer;
+ dawn::CommandEncoder fCopyEncoder;
+ std::vector<dawn::CommandBuffer> fCommandBuffers;
+
+ struct ProgramDescHash {
+ uint32_t operator()(const GrProgramDesc& desc) const {
+ return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
+ }
+ };
+
+ struct SamplerHash {
+ size_t operator()(const GrSamplerState& samplerState) const {
+ return SkOpts::hash_fn(&samplerState, sizeof(samplerState), 0);
+ }
+ };
+
+ SkLRUCache<GrProgramDesc, sk_sp<GrDawnProgram>, ProgramDescHash> fRenderPipelineCache;
+ std::unordered_map<GrSamplerState, dawn::Sampler, SamplerHash> fSamplers;
+ GrDawnStagingManager fStagingManager;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.cpp
new file mode 100644
index 0000000000..48fc8a7f48
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnOpsRenderPass.h"
+
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/dawn/GrDawnBuffer.h"
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnProgramBuilder.h"
+#include "src/gpu/dawn/GrDawnRenderTarget.h"
+#include "src/gpu/dawn/GrDawnStencilAttachment.h"
+#include "src/gpu/dawn/GrDawnTexture.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+#include "src/sksl/SkSLCompiler.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+static dawn::LoadOp to_dawn_load_op(GrLoadOp loadOp) {
+ switch (loadOp) {
+ case GrLoadOp::kLoad:
+ return dawn::LoadOp::Load;
+ case GrLoadOp::kDiscard:
+ // Use LoadOp::Load to emulate DontCare.
+ // Dawn doesn't have DontCare, for security reasons.
+ // Load should be equivalent to DontCare for desktop; Clear would
+ // probably be better for tilers. If Dawn does add DontCare
+ // as an extension, use it here.
+ return dawn::LoadOp::Load;
+ case GrLoadOp::kClear:
+ return dawn::LoadOp::Clear;
+ default:
+ SK_ABORT("Invalid LoadOp");
+ }
+}
+
+GrDawnOpsRenderPass::GrDawnOpsRenderPass(GrDawnGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const LoadAndStoreInfo& colorInfo,
+ const StencilLoadAndStoreInfo& stencilInfo)
+ : INHERITED(rt, origin)
+ , fGpu(gpu)
+ , fColorInfo(colorInfo) {
+ fEncoder = fGpu->device().CreateCommandEncoder();
+ dawn::LoadOp colorOp = to_dawn_load_op(colorInfo.fLoadOp);
+ dawn::LoadOp stencilOp = to_dawn_load_op(stencilInfo.fLoadOp);
+ fPassEncoder = beginRenderPass(colorOp, stencilOp);
+}
+
+dawn::RenderPassEncoder GrDawnOpsRenderPass::beginRenderPass(dawn::LoadOp colorOp,
+ dawn::LoadOp stencilOp) {
+ dawn::Texture texture = static_cast<GrDawnRenderTarget*>(fRenderTarget)->texture();
+ auto stencilAttachment = static_cast<GrDawnStencilAttachment*>(
+ fRenderTarget->renderTargetPriv().getStencilAttachment());
+ dawn::TextureView colorView = texture.CreateView();
+ const float *c = fColorInfo.fClearColor.vec();
+
+ dawn::RenderPassColorAttachmentDescriptor colorAttachment;
+ colorAttachment.attachment = colorView;
+ colorAttachment.resolveTarget = nullptr;
+ colorAttachment.clearColor = { c[0], c[1], c[2], c[3] };
+ colorAttachment.loadOp = colorOp;
+ colorAttachment.storeOp = dawn::StoreOp::Store;
+ dawn::RenderPassColorAttachmentDescriptor* colorAttachments = { &colorAttachment };
+ dawn::RenderPassDescriptor renderPassDescriptor;
+ renderPassDescriptor.colorAttachmentCount = 1;
+ renderPassDescriptor.colorAttachments = colorAttachments;
+ if (stencilAttachment) {
+ dawn::RenderPassDepthStencilAttachmentDescriptor depthStencilAttachment;
+ depthStencilAttachment.attachment = stencilAttachment->view();
+ depthStencilAttachment.depthLoadOp = stencilOp;
+ depthStencilAttachment.stencilLoadOp = stencilOp;
+ depthStencilAttachment.clearDepth = 1.0f;
+ depthStencilAttachment.clearStencil = 0;
+ depthStencilAttachment.depthStoreOp = dawn::StoreOp::Store;
+ depthStencilAttachment.stencilStoreOp = dawn::StoreOp::Store;
+ renderPassDescriptor.depthStencilAttachment = &depthStencilAttachment;
+ } else {
+ renderPassDescriptor.depthStencilAttachment = nullptr;
+ }
+ return fEncoder.BeginRenderPass(&renderPassDescriptor);
+}
+
+GrDawnOpsRenderPass::~GrDawnOpsRenderPass() {
+}
+
+GrGpu* GrDawnOpsRenderPass::gpu() { return fGpu; }
+
+void GrDawnOpsRenderPass::end() {
+ fPassEncoder.EndPass();
+}
+
+void GrDawnOpsRenderPass::submit() {
+ fGpu->appendCommandBuffer(fEncoder.Finish());
+}
+
+void GrDawnOpsRenderPass::insertEventMarker(const char* msg) {
+ SkASSERT(!"unimplemented");
+}
+
+void GrDawnOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ fPassEncoder.EndPass();
+ fPassEncoder = beginRenderPass(dawn::LoadOp::Load, dawn::LoadOp::Clear);
+}
+
+void GrDawnOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
+ fPassEncoder.EndPass();
+ fPassEncoder = beginRenderPass(dawn::LoadOp::Clear, dawn::LoadOp::Load);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrDawnOpsRenderPass::inlineUpload(GrOpFlushState* state,
+ GrDeferredTextureUploadFn& upload) {
+ SkASSERT(!"unimplemented");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrDawnOpsRenderPass::setScissorState(const GrProgramInfo& programInfo) {
+ SkIRect rect;
+ if (programInfo.pipeline().isScissorEnabled()) {
+ constexpr SkIRect kBogusScissor{0, 0, 1, 1};
+ rect = programInfo.hasFixedScissor() ? programInfo.fixedScissor() : kBogusScissor;
+ if (kBottomLeft_GrSurfaceOrigin == fOrigin) {
+ rect.setXYWH(rect.x(), fRenderTarget->height() - rect.bottom(),
+ rect.width(), rect.height());
+ }
+ } else {
+ rect = SkIRect::MakeWH(fRenderTarget->width(), fRenderTarget->height());
+ }
+ fPassEncoder.SetScissorRect(rect.x(), rect.y(), rect.width(), rect.height());
+}
+
+void GrDawnOpsRenderPass::applyState(const GrProgramInfo& programInfo,
+ const GrPrimitiveType primitiveType) {
+ sk_sp<GrDawnProgram> program = fGpu->getOrCreateRenderPipeline(fRenderTarget,
+ programInfo,
+ primitiveType);
+ auto bindGroup = program->setData(fGpu, fRenderTarget, programInfo);
+ fPassEncoder.SetPipeline(program->fRenderPipeline);
+ fPassEncoder.SetBindGroup(0, bindGroup, 0, nullptr);
+ const GrPipeline& pipeline = programInfo.pipeline();
+ if (pipeline.isStencilEnabled()) {
+ fPassEncoder.SetStencilReference(pipeline.getUserStencil()->fFront.fRef);
+ }
+ GrXferProcessor::BlendInfo blendInfo = pipeline.getXferProcessor().getBlendInfo();
+ const float* c = blendInfo.fBlendConstant.vec();
+ dawn::Color color{c[0], c[1], c[2], c[3]};
+ fPassEncoder.SetBlendColor(&color);
+ this->setScissorState(programInfo);
+}
+
+void GrDawnOpsRenderPass::onDraw(const GrProgramInfo& programInfo,
+ const GrMesh meshes[],
+ int meshCount,
+ const SkRect& bounds) {
+ if (!meshCount) {
+ return;
+ }
+ for (int i = 0; i < meshCount; ++i) {
+ applyState(programInfo, meshes[0].primitiveType());
+ meshes[i].sendToGpu(this);
+ }
+}
+
+void GrDawnOpsRenderPass::sendInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
+ dawn::Buffer vb = static_cast<const GrDawnBuffer*>(vertexBuffer)->get();
+ fPassEncoder.SetVertexBuffer(0, vb);
+ fPassEncoder.Draw(vertexCount, 1, baseVertex, baseInstance);
+ fGpu->stats()->incNumDraws();
+}
+
+void GrDawnOpsRenderPass::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance,
+ GrPrimitiveRestart restart) {
+ dawn::Buffer vb = static_cast<const GrDawnBuffer*>(vertexBuffer)->get();
+ dawn::Buffer ib = static_cast<const GrDawnBuffer*>(indexBuffer)->get();
+ fPassEncoder.SetIndexBuffer(ib);
+ fPassEncoder.SetVertexBuffer(0, vb);
+ fPassEncoder.DrawIndexed(indexCount, 1, baseIndex, baseVertex, baseInstance);
+ fGpu->stats()->incNumDraws();
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.h b/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.h
new file mode 100644
index 0000000000..a1a9036236
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnOpsRenderPass.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnOpsRenderPass_DEFINED
+#define GrDawnOpsRenderPass_DEFINED
+
+#include "src/gpu/GrOpsRenderPass.h"
+
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrMesh.h"
+#include "dawn/dawncpp.h"
+
+class GrDawnGpu;
+class GrDawnRenderTarget;
+
+class GrDawnOpsRenderPass : public GrOpsRenderPass, private GrMesh::SendToGpuImpl {
+public:
+ GrDawnOpsRenderPass(GrDawnGpu*, GrRenderTarget*, GrSurfaceOrigin,
+ const LoadAndStoreInfo&, const StencilLoadAndStoreInfo&);
+
+ ~GrDawnOpsRenderPass() override;
+
+ void begin() override { }
+ void end() override;
+
+ dawn::RenderPassEncoder beginRenderPass(dawn::LoadOp colorOp, dawn::LoadOp stencilOp);
+ void insertEventMarker(const char*) override;
+
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
+
+ void submit();
+
+private:
+ GrGpu* gpu() override;
+
+ void setScissorState(const GrProgramInfo&);
+ void applyState(const GrProgramInfo& programInfo,
+ const GrPrimitiveType primitiveType);
+
+ void onDraw(const GrProgramInfo& programInfo,
+ const GrMesh mesh[],
+ int meshCount,
+ const SkRect& bounds) override;
+
+ void sendMeshToGpu(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) final {
+ this->sendInstancedMeshToGpu(primType, vertexBuffer, vertexCount, baseVertex,
+ nullptr, 1, 0);
+ }
+
+ void sendIndexedMeshToGpu(GrPrimitiveType primType,
+ const GrBuffer* indexBuffer, int indexCount, int baseIndex,
+ uint16_t /*minIndexValue*/, uint16_t /*maxIndexValue*/,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ GrPrimitiveRestart restart) final {
+ this->sendIndexedInstancedMeshToGpu(primType, indexBuffer, indexCount, baseIndex,
+ vertexBuffer, baseVertex, nullptr, 1, 0, restart);
+ }
+
+ void sendInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* vertexBuffer, int vertexCount, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) final;
+
+ void sendIndexedInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* indexBuffer, int indexCount, int baseIndex,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart) final;
+
+ void onClear(const GrFixedClip&, const SkPMColor4f& color) override;
+
+ void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) override;
+
+ struct InlineUploadInfo {
+ InlineUploadInfo(GrOpFlushState* state, const GrDeferredTextureUploadFn& upload)
+ : fFlushState(state), fUpload(upload) {}
+
+ GrOpFlushState* fFlushState;
+ GrDeferredTextureUploadFn fUpload;
+ };
+
+ GrDawnGpu* fGpu;
+ dawn::CommandEncoder fEncoder;
+ dawn::RenderPassEncoder fPassEncoder;
+ LoadAndStoreInfo fColorInfo;
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.cpp
new file mode 100644
index 0000000000..a33e9bbeb7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.cpp
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnProgramBuilder.h"
+
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnTexture.h"
+#include "src/sksl/SkSLCompiler.h"
+
+static SkSL::String sksl_to_spirv(const GrDawnGpu* gpu, const char* shaderString,
+ SkSL::Program::Kind kind, bool flipY,
+ SkSL::Program::Inputs* inputs) {
+ SkSL::Program::Settings settings;
+ settings.fCaps = gpu->caps()->shaderCaps();
+ settings.fFlipY = flipY;
+ std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
+ kind,
+ shaderString,
+ settings);
+ if (!program) {
+ SkDebugf("SkSL error:\n%s\n", gpu->shaderCompiler()->errorText().c_str());
+ SkASSERT(false);
+ return "";
+ }
+ *inputs = program->fInputs;
+ SkSL::String code;
+ if (!gpu->shaderCompiler()->toSPIRV(*program, &code)) {
+ return "";
+ }
+ return code;
+}
+
+static dawn::BlendFactor to_dawn_blend_factor(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kZero_GrBlendCoeff:
+ return dawn::BlendFactor::Zero;
+ case kOne_GrBlendCoeff:
+ return dawn::BlendFactor::One;
+ case kSC_GrBlendCoeff:
+ return dawn::BlendFactor::SrcColor;
+ case kISC_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusSrcColor;
+ case kDC_GrBlendCoeff:
+ return dawn::BlendFactor::DstColor;
+ case kIDC_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusDstColor;
+ case kSA_GrBlendCoeff:
+ return dawn::BlendFactor::SrcAlpha;
+ case kISA_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusSrcAlpha;
+ case kDA_GrBlendCoeff:
+ return dawn::BlendFactor::DstAlpha;
+ case kIDA_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusDstAlpha;
+ case kConstC_GrBlendCoeff:
+ return dawn::BlendFactor::BlendColor;
+ case kIConstC_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusBlendColor;
+ case kConstA_GrBlendCoeff:
+ case kIConstA_GrBlendCoeff:
+ case kS2C_GrBlendCoeff:
+ case kIS2C_GrBlendCoeff:
+ case kS2A_GrBlendCoeff:
+ case kIS2A_GrBlendCoeff:
+ default:
+ SkASSERT(!"unsupported blend coefficient");
+ return dawn::BlendFactor::One;
+ }
+}
+
+static dawn::BlendFactor to_dawn_blend_factor_for_alpha(GrBlendCoeff coeff) {
+ switch (coeff) {
+ // Force all srcColor used in alpha slot to alpha version.
+ case kSC_GrBlendCoeff:
+ return dawn::BlendFactor::SrcAlpha;
+ case kISC_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusSrcAlpha;
+ case kDC_GrBlendCoeff:
+ return dawn::BlendFactor::DstAlpha;
+ case kIDC_GrBlendCoeff:
+ return dawn::BlendFactor::OneMinusDstAlpha;
+ default:
+ return to_dawn_blend_factor(coeff);
+ }
+}
+
+static dawn::BlendOperation to_dawn_blend_operation(GrBlendEquation equation) {
+ switch (equation) {
+ case kAdd_GrBlendEquation:
+ return dawn::BlendOperation::Add;
+ case kSubtract_GrBlendEquation:
+ return dawn::BlendOperation::Subtract;
+ case kReverseSubtract_GrBlendEquation:
+ return dawn::BlendOperation::ReverseSubtract;
+ default:
+ SkASSERT(!"unsupported blend equation");
+ return dawn::BlendOperation::Add;
+ }
+}
+
+static dawn::CompareFunction to_dawn_compare_function(GrStencilTest test) {
+ switch (test) {
+ case GrStencilTest::kAlways:
+ return dawn::CompareFunction::Always;
+ case GrStencilTest::kNever:
+ return dawn::CompareFunction::Never;
+ case GrStencilTest::kGreater:
+ return dawn::CompareFunction::Greater;
+ case GrStencilTest::kGEqual:
+ return dawn::CompareFunction::GreaterEqual;
+ case GrStencilTest::kLess:
+ return dawn::CompareFunction::Less;
+ case GrStencilTest::kLEqual:
+ return dawn::CompareFunction::LessEqual;
+ case GrStencilTest::kEqual:
+ return dawn::CompareFunction::Equal;
+ case GrStencilTest::kNotEqual:
+ return dawn::CompareFunction::NotEqual;
+ default:
+ SkASSERT(!"unsupported stencil test");
+ return dawn::CompareFunction::Always;
+ }
+}
+
+static dawn::StencilOperation to_dawn_stencil_operation(GrStencilOp op) {
+ switch (op) {
+ case GrStencilOp::kKeep:
+ return dawn::StencilOperation::Keep;
+ case GrStencilOp::kZero:
+ return dawn::StencilOperation::Zero;
+ case GrStencilOp::kReplace:
+ return dawn::StencilOperation::Replace;
+ case GrStencilOp::kInvert:
+ return dawn::StencilOperation::Invert;
+ case GrStencilOp::kIncClamp:
+ return dawn::StencilOperation::IncrementClamp;
+ case GrStencilOp::kDecClamp:
+ return dawn::StencilOperation::DecrementClamp;
+ case GrStencilOp::kIncWrap:
+ return dawn::StencilOperation::IncrementWrap;
+ case GrStencilOp::kDecWrap:
+ return dawn::StencilOperation::DecrementWrap;
+ default:
+ SkASSERT(!"unsupported stencil function");
+ return dawn::StencilOperation::Keep;
+ }
+}
+
+static dawn::PrimitiveTopology to_dawn_primitive_topology(GrPrimitiveType primitiveType) {
+ switch (primitiveType) {
+ case GrPrimitiveType::kTriangles:
+ return dawn::PrimitiveTopology::TriangleList;
+ case GrPrimitiveType::kTriangleStrip:
+ return dawn::PrimitiveTopology::TriangleStrip;
+ case GrPrimitiveType::kPoints:
+ return dawn::PrimitiveTopology::PointList;
+ case GrPrimitiveType::kLines:
+ return dawn::PrimitiveTopology::LineList;
+ case GrPrimitiveType::kLineStrip:
+ return dawn::PrimitiveTopology::LineStrip;
+ case GrPrimitiveType::kPath:
+ default:
+ SkASSERT(!"unsupported primitive topology");
+ return dawn::PrimitiveTopology::TriangleList;
+ }
+}
+
+static dawn::VertexFormat to_dawn_vertex_format(GrVertexAttribType type) {
+ switch (type) {
+ case kFloat_GrVertexAttribType:
+ case kHalf_GrVertexAttribType:
+ return dawn::VertexFormat::Float;
+ case kFloat2_GrVertexAttribType:
+ case kHalf2_GrVertexAttribType:
+ return dawn::VertexFormat::Float2;
+ case kFloat3_GrVertexAttribType:
+ case kHalf3_GrVertexAttribType:
+ return dawn::VertexFormat::Float3;
+ case kFloat4_GrVertexAttribType:
+ case kHalf4_GrVertexAttribType:
+ return dawn::VertexFormat::Float4;
+ case kUShort2_GrVertexAttribType:
+ return dawn::VertexFormat::UShort2;
+ case kInt_GrVertexAttribType:
+ return dawn::VertexFormat::Int;
+ case kUByte4_norm_GrVertexAttribType:
+ return dawn::VertexFormat::UChar4Norm;
+ default:
+ SkASSERT(!"unsupported vertex format");
+ return dawn::VertexFormat::Float4;
+ }
+}
+
+static dawn::ColorStateDescriptor create_color_state(const GrDawnGpu* gpu,
+ const GrPipeline& pipeline,
+ dawn::TextureFormat colorFormat) {
+ GrXferProcessor::BlendInfo blendInfo = pipeline.getXferProcessor().getBlendInfo();
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+
+ dawn::BlendFactor srcFactor = to_dawn_blend_factor(srcCoeff);
+ dawn::BlendFactor dstFactor = to_dawn_blend_factor(dstCoeff);
+ dawn::BlendFactor srcFactorAlpha = to_dawn_blend_factor_for_alpha(srcCoeff);
+ dawn::BlendFactor dstFactorAlpha = to_dawn_blend_factor_for_alpha(dstCoeff);
+ dawn::BlendOperation operation = to_dawn_blend_operation(equation);
+ auto mask = blendInfo.fWriteColor ? dawn::ColorWriteMask::All : dawn::ColorWriteMask::None;
+
+ dawn::BlendDescriptor colorDesc = {operation, srcFactor, dstFactor};
+ dawn::BlendDescriptor alphaDesc = {operation, srcFactorAlpha, dstFactorAlpha};
+
+ dawn::ColorStateDescriptor descriptor;
+ descriptor.format = colorFormat;
+ descriptor.alphaBlend = alphaDesc;
+ descriptor.colorBlend = colorDesc;
+ descriptor.nextInChain = nullptr;
+ descriptor.writeMask = mask;
+
+ return descriptor;
+}
+
+static dawn::StencilStateFaceDescriptor to_stencil_state_face(const GrStencilSettings::Face& face) {
+ dawn::StencilStateFaceDescriptor desc;
+ desc.compare = to_dawn_compare_function(face.fTest);
+ desc.failOp = desc.depthFailOp = to_dawn_stencil_operation(face.fFailOp);
+ desc.passOp = to_dawn_stencil_operation(face.fPassOp);
+ return desc;
+}
+
+static dawn::DepthStencilStateDescriptor create_depth_stencil_state(
+ const GrStencilSettings& stencilSettings,
+ dawn::TextureFormat depthStencilFormat,
+ GrSurfaceOrigin origin) {
+ dawn::DepthStencilStateDescriptor state;
+ state.format = depthStencilFormat;
+ if (!stencilSettings.isDisabled()) {
+ const GrStencilSettings::Face& front = stencilSettings.front(origin);
+ state.stencilReadMask = front.fTestMask;
+ state.stencilWriteMask = front.fWriteMask;
+ state.stencilFront = to_stencil_state_face(stencilSettings.front(origin));
+ if (stencilSettings.isTwoSided()) {
+ state.stencilBack = to_stencil_state_face(stencilSettings.back(origin));
+ } else {
+ state.stencilBack = state.stencilFront;
+ }
+ }
+ return state;
+}
+
+static dawn::BindGroupBinding make_bind_group_binding(uint32_t binding, const dawn::Buffer& buffer,
+ uint32_t offset, uint32_t size, const
+ dawn::Sampler& sampler,
+ const dawn::TextureView& textureView) {
+ dawn::BindGroupBinding result;
+ result.binding = binding;
+ result.buffer = buffer;
+ result.offset = offset;
+ result.size = size;
+ result.sampler = sampler;
+ result.textureView = textureView;
+ return result;
+}
+
+static dawn::BindGroupBinding make_bind_group_binding(uint32_t binding, const dawn::Buffer& buffer,
+ uint32_t offset, uint32_t size) {
+ return make_bind_group_binding(binding, buffer, offset, size, nullptr, nullptr);
+}
+
+static dawn::BindGroupBinding make_bind_group_binding(uint32_t binding,
+ const dawn::Sampler& sampler) {
+ return make_bind_group_binding(binding, nullptr, 0, 0, sampler, nullptr);
+}
+
+static dawn::BindGroupBinding make_bind_group_binding(uint32_t binding,
+ const dawn::TextureView& textureView) {
+ return make_bind_group_binding(binding, nullptr, 0, 0, nullptr, textureView);
+}
+
+sk_sp<GrDawnProgram> GrDawnProgramBuilder::Build(GrDawnGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ dawn::TextureFormat colorFormat,
+ bool hasDepthStencil,
+ dawn::TextureFormat depthStencilFormat,
+ GrProgramDesc* desc) {
+ GrDawnProgramBuilder builder(gpu, renderTarget, programInfo, desc);
+ if (!builder.emitAndInstallProcs()) {
+ return nullptr;
+ }
+
+ builder.fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ builder.fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ builder.fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ builder.fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ builder.finalizeShaders();
+
+ SkSL::Program::Inputs vertInputs, fragInputs;
+ GrDawnUniformHandler::UniformInfoArray& uniforms = builder.fUniformHandler.fUniforms;
+ uint32_t geometryUniformSize = builder.fUniformHandler.fCurrentGeometryUBOOffset;
+ uint32_t fragmentUniformSize = builder.fUniformHandler.fCurrentFragmentUBOOffset;
+ sk_sp<GrDawnProgram> result(
+ new GrDawnProgram(uniforms, geometryUniformSize, fragmentUniformSize));
+ bool flipY = programInfo.origin() != kTopLeft_GrSurfaceOrigin;
+ auto vsModule = builder.createShaderModule(builder.fVS, SkSL::Program::kVertex_Kind, flipY,
+ &vertInputs);
+ auto fsModule = builder.createShaderModule(builder.fFS, SkSL::Program::kFragment_Kind, flipY,
+ &fragInputs);
+ result->fGeometryProcessor = std::move(builder.fGeometryProcessor);
+ result->fXferProcessor = std::move(builder.fXferProcessor);
+ result->fFragmentProcessors = std::move(builder.fFragmentProcessors);
+ result->fFragmentProcessorCnt = builder.fFragmentProcessorCnt;
+ std::vector<dawn::BindGroupLayoutBinding> layoutBindings;
+ if (0 != geometryUniformSize) {
+ layoutBindings.push_back({ GrDawnUniformHandler::kGeometryBinding,
+ dawn::ShaderStage::Vertex,
+ dawn::BindingType::UniformBuffer});
+ }
+ if (0 != fragmentUniformSize) {
+ layoutBindings.push_back({ GrDawnUniformHandler::kFragBinding,
+ dawn::ShaderStage::Fragment,
+ dawn::BindingType::UniformBuffer});
+ }
+ uint32_t binding = GrDawnUniformHandler::kSamplerBindingBase;
+ for (int i = 0; i < builder.fUniformHandler.fSamplers.count(); ++i) {
+ layoutBindings.push_back({ binding++, dawn::ShaderStage::Fragment,
+ dawn::BindingType::Sampler});
+ layoutBindings.push_back({ binding++, dawn::ShaderStage::Fragment,
+ dawn::BindingType::SampledTexture});
+ }
+ dawn::BindGroupLayoutDescriptor bindGroupLayoutDesc;
+ bindGroupLayoutDesc.bindingCount = layoutBindings.size();
+ bindGroupLayoutDesc.bindings = layoutBindings.data();
+ result->fBindGroupLayout = gpu->device().CreateBindGroupLayout(&bindGroupLayoutDesc);
+ dawn::PipelineLayoutDescriptor pipelineLayoutDesc;
+ pipelineLayoutDesc.bindGroupLayoutCount = 1;
+ pipelineLayoutDesc.bindGroupLayouts = &result->fBindGroupLayout;
+ auto pipelineLayout = gpu->device().CreatePipelineLayout(&pipelineLayoutDesc);
+ result->fBuiltinUniformHandles = builder.fUniformHandles;
+ const GrPipeline& pipeline = programInfo.pipeline();
+ auto colorState = create_color_state(gpu, pipeline, colorFormat);
+ dawn::DepthStencilStateDescriptor depthStencilState;
+ GrStencilSettings stencil;
+ if (pipeline.isStencilEnabled()) {
+ int numStencilBits = renderTarget->renderTargetPriv().numStencilBits();
+ stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(), numStencilBits);
+ }
+ depthStencilState = create_depth_stencil_state(stencil, depthStencilFormat,
+ programInfo.origin());
+
+ std::vector<dawn::VertexBufferDescriptor> inputs;
+
+ std::vector<dawn::VertexAttributeDescriptor> vertexAttributes;
+ const GrPrimitiveProcessor& primProc = programInfo.primProc();
+ if (primProc.numVertexAttributes() > 0) {
+ size_t offset = 0;
+ int i = 0;
+ for (const auto& attrib : primProc.vertexAttributes()) {
+ dawn::VertexAttributeDescriptor attribute;
+ attribute.shaderLocation = i;
+ attribute.offset = offset;
+ attribute.format = to_dawn_vertex_format(attrib.cpuType());
+ vertexAttributes.push_back(attribute);
+ offset += attrib.sizeAlign4();
+ i++;
+ }
+ dawn::VertexBufferDescriptor input;
+ input.stride = offset;
+ input.stepMode = dawn::InputStepMode::Vertex;
+ input.attributeCount = vertexAttributes.size();
+ input.attributes = &vertexAttributes.front();
+ inputs.push_back(input);
+ }
+ std::vector<dawn::VertexAttributeDescriptor> instanceAttributes;
+ if (primProc.numInstanceAttributes() > 0) {
+ size_t offset = 0;
+ int i = 0;
+ for (const auto& attrib : primProc.instanceAttributes()) {
+ dawn::VertexAttributeDescriptor attribute;
+ attribute.shaderLocation = i;
+ attribute.offset = offset;
+ attribute.format = to_dawn_vertex_format(attrib.cpuType());
+ instanceAttributes.push_back(attribute);
+ offset += attrib.sizeAlign4();
+ i++;
+ }
+ dawn::VertexBufferDescriptor input;
+ input.stride = offset;
+ input.stepMode = dawn::InputStepMode::Instance;
+ input.attributeCount = instanceAttributes.size();
+ input.attributes = &instanceAttributes.front();
+ inputs.push_back(input);
+ }
+ dawn::VertexInputDescriptor vertexInput;
+ vertexInput.indexFormat = dawn::IndexFormat::Uint16;
+ vertexInput.bufferCount = inputs.size();
+ vertexInput.buffers = &inputs.front();
+
+ dawn::ProgrammableStageDescriptor vsDesc;
+ vsDesc.module = vsModule;
+ vsDesc.entryPoint = "main";
+
+ dawn::ProgrammableStageDescriptor fsDesc;
+ fsDesc.module = fsModule;
+ fsDesc.entryPoint = "main";
+
+ dawn::RenderPipelineDescriptor rpDesc;
+ rpDesc.layout = pipelineLayout;
+ rpDesc.vertexStage = vsDesc;
+ rpDesc.fragmentStage = &fsDesc;
+ rpDesc.vertexInput = &vertexInput;
+ rpDesc.primitiveTopology = to_dawn_primitive_topology(primitiveType);
+ if (hasDepthStencil) {
+ rpDesc.depthStencilState = &depthStencilState;
+ }
+ rpDesc.colorStateCount = 1;
+ rpDesc.colorStates = &colorState;
+ result->fRenderPipeline = gpu->device().CreateRenderPipeline(&rpDesc);
+ return result;
+}
+
+GrDawnProgramBuilder::GrDawnProgramBuilder(GrDawnGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrProgramDesc* desc)
+ : INHERITED(renderTarget, programInfo, desc)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {
+}
+
+dawn::ShaderModule GrDawnProgramBuilder::createShaderModule(const GrGLSLShaderBuilder& builder,
+ SkSL::Program::Kind kind,
+ bool flipY,
+ SkSL::Program::Inputs* inputs) {
+ dawn::Device device = fGpu->device();
+ SkString source(builder.fCompilerString.c_str());
+
+#if 0
+ SkSL::String sksl = GrShaderUtils::PrettyPrint(builder.fCompilerString);
+ printf("converting program:\n%s\n", sksl.c_str());
+#endif
+
+ SkSL::String spirvSource = sksl_to_spirv(fGpu, source.c_str(), kind, flipY, inputs);
+
+ dawn::ShaderModuleDescriptor desc;
+ desc.codeSize = spirvSource.size() / 4;
+ desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
+
+ return device.CreateShaderModule(&desc);
+};
+
+const GrCaps* GrDawnProgramBuilder::caps() const {
+ return fGpu->caps();
+}
+
+void GrDawnProgram::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
+ fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
+ }
+
+ // set RT adjustment
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
+ if (fRenderTargetState.fRenderTargetOrigin != origin ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = origin;
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+}
+
+static void setTexture(GrDawnGpu* gpu, const GrSamplerState& state, GrTexture* texture,
+ std::vector<dawn::BindGroupBinding> *bindings, int* binding) {
+ // FIXME: could probably cache samplers in GrDawnProgram
+ dawn::Sampler sampler = gpu->getOrCreateSampler(state);
+ bindings->push_back(make_bind_group_binding((*binding)++, sampler));
+ GrDawnTexture* tex = static_cast<GrDawnTexture*>(texture);
+ dawn::TextureView textureView = tex->textureView();
+ bindings->push_back(make_bind_group_binding((*binding)++, textureView));
+}
+
+dawn::BindGroup GrDawnProgram::setData(GrDawnGpu* gpu, const GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo) {
+ std::vector<dawn::BindGroupBinding> bindings;
+ GrDawnRingBuffer::Slice geom, frag;
+ uint32_t geometryUniformSize = fDataManager.geometryUniformSize();
+ uint32_t fragmentUniformSize = fDataManager.fragmentUniformSize();
+ if (0 != geometryUniformSize) {
+ geom = gpu->allocateUniformRingBufferSlice(geometryUniformSize);
+ bindings.push_back(make_bind_group_binding(GrDawnUniformHandler::kGeometryBinding,
+ geom.fBuffer, geom.fOffset,
+ geometryUniformSize));
+ }
+ if (0 != fragmentUniformSize) {
+ frag = gpu->allocateUniformRingBufferSlice(fragmentUniformSize);
+ bindings.push_back(make_bind_group_binding(GrDawnUniformHandler::kFragBinding,
+ frag.fBuffer, frag.fOffset,
+ fragmentUniformSize));
+ }
+ this->setRenderTargetState(renderTarget, programInfo.origin());
+ const GrPipeline& pipeline = programInfo.pipeline();
+ const GrPrimitiveProcessor& primProc = programInfo.primProc();
+ fGeometryProcessor->setData(fDataManager, primProc,
+ GrFragmentProcessor::CoordTransformIter(pipeline));
+ int binding = GrDawnUniformHandler::kSamplerBindingBase;
+ auto primProcTextures = programInfo.hasFixedPrimProcTextures() ?
+ programInfo.fixedPrimProcTextures() : nullptr;
+
+ for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
+ auto& sampler = primProc.textureSampler(i);
+ setTexture(gpu, sampler.samplerState(), primProcTextures[i]->peekTexture(), &bindings,
+ &binding);
+ }
+ GrFragmentProcessor::Iter iter(pipeline);
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fDataManager, *fp);
+ for (int i = 0; i < fp->numTextureSamplers(); ++i) {
+ auto& s = fp->textureSampler(i);
+ setTexture(gpu, s.samplerState(), s.peekTexture(), &bindings, &binding);
+ }
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkIPoint offset;
+ GrTexture* dstTexture = pipeline.peekDstTexture(&offset);
+ fXferProcessor->setData(fDataManager, pipeline.getXferProcessor(), dstTexture, offset);
+ if (GrTextureProxy* proxy = pipeline.dstTextureProxy()) {
+ GrFragmentProcessor::TextureSampler sampler(sk_ref_sp(proxy));
+ setTexture(gpu, sampler.samplerState(), sampler.peekTexture(), &bindings, &binding);
+ }
+ fDataManager.uploadUniformBuffers(gpu, geom, frag);
+ dawn::BindGroupDescriptor descriptor;
+ descriptor.layout = fBindGroupLayout;
+ descriptor.bindingCount = bindings.size();
+ descriptor.bindings = bindings.data();
+ return gpu->device().CreateBindGroup(&descriptor);
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.h b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.h
new file mode 100644
index 0000000000..1cc3231741
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramBuilder.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnProgramBuilder_DEFINED
+#define GrDawnProgramBuilder_DEFINED
+
+#include "src/gpu/dawn/GrDawnProgramDataManager.h"
+#include "src/gpu/dawn/GrDawnUniformHandler.h"
+#include "src/gpu/dawn/GrDawnVaryingHandler.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "dawn/dawncpp.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+class GrPipeline;
+
+struct GrDawnProgram : public SkRefCnt {
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin) -1;
+ }
+
+ /**
+ * Gets a float4 that adjusts the position from Skia device coords to GL's normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous float3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kTopLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+ typedef GrGLSLBuiltinUniformHandles BuiltinUniformHandles;
+ GrDawnProgram(const GrDawnUniformHandler::UniformInfoArray& uniforms,
+ uint32_t geometryUniformSize,
+ uint32_t fragmentUniformSize)
+ : fDataManager(uniforms, geometryUniformSize, fragmentUniformSize) {
+ }
+ std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
+ int fFragmentProcessorCnt;
+ dawn::BindGroupLayout fBindGroupLayout;
+ dawn::RenderPipeline fRenderPipeline;
+ GrDawnProgramDataManager fDataManager;
+ RenderTargetState fRenderTargetState;
+ BuiltinUniformHandles fBuiltinUniformHandles;
+
+ void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
+ dawn::BindGroup setData(GrDawnGpu* gpu, const GrRenderTarget*, const GrProgramInfo&);
+};
+
+class GrDawnProgramBuilder : public GrGLSLProgramBuilder {
+public:
+ static sk_sp<GrDawnProgram> Build(GrDawnGpu*,
+ GrRenderTarget* rt,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ dawn::TextureFormat colorFormat,
+ bool hasDepthStencil,
+ dawn::TextureFormat depthStencilFormat,
+ GrProgramDesc* desc);
+ const GrCaps* caps() const override;
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrDawnGpu* gpu() const { return fGpu; }
+
+private:
+ GrDawnProgramBuilder(GrDawnGpu*,
+ GrRenderTarget*,
+ const GrProgramInfo&,
+ GrProgramDesc*);
+ dawn::ShaderModule createShaderModule(const GrGLSLShaderBuilder&, SkSL::Program::Kind,
+ bool flipY, SkSL::Program::Inputs* inputs);
+ GrDawnGpu* fGpu;
+ GrDawnVaryingHandler fVaryingHandler;
+ GrDawnUniformHandler fUniformHandler;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.cpp
new file mode 100644
index 0000000000..db22f255f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnProgramDataManager.h"
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+
+GrDawnProgramDataManager::GrDawnProgramDataManager(const UniformInfoArray& uniforms,
+ uint32_t geometryUniformSize,
+ uint32_t fragmentUniformSize)
+ : fGeometryUniformSize(geometryUniformSize)
+ , fFragmentUniformSize(fragmentUniformSize)
+ , fGeometryUniformsDirty(false)
+ , fFragmentUniformsDirty(false) {
+ fGeometryUniformData.reset(geometryUniformSize);
+ fFragmentUniformData.reset(fragmentUniformSize);
+ memset(fGeometryUniformData.get(), 0, fGeometryUniformSize);
+ memset(fFragmentUniformData.get(), 0, fFragmentUniformSize);
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already
+ // owned by other objects will still match up here.
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const GrDawnUniformHandler::UniformInfo uniformInfo = uniforms[i];
+ SkDEBUGCODE(
+ uniform.fArrayCount = uniformInfo.fVar.getArrayCount();
+ uniform.fType = uniformInfo.fVar.getType();
+ );
+
+ if (!(kFragment_GrShaderFlag & uniformInfo.fVisibility)) {
+ uniform.fBinding = GrDawnUniformHandler::kGeometryBinding;
+ } else {
+ SkASSERT(kFragment_GrShaderFlag == uniformInfo.fVisibility);
+ uniform.fBinding = GrDawnUniformHandler::kFragBinding;
+ }
+ uniform.fOffset = uniformInfo.fUBOOffset;
+ }
+}
+
+void* GrDawnProgramDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
+ void* buffer;
+ if (GrDawnUniformHandler::kGeometryBinding == uni.fBinding) {
+ buffer = fGeometryUniformData.get();
+ fGeometryUniformsDirty = true;
+ } else {
+ SkASSERT(GrDawnUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ fFragmentUniformsDirty = true;
+ }
+ buffer = static_cast<char*>(buffer)+uni.fOffset;
+ return buffer;
+}
+
+void GrDawnProgramDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, &i, sizeof(int32_t));
+}
+
+void GrDawnProgramDataManager::set1iv(UniformHandle u, int arrayCount, const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ uint32_t* buffer = static_cast<uint32_t*>(this->getBufferPtrAndMarkDirty(uni));
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(int32_t));
+ buffer += 4;
+ }
+}
+
+void GrDawnProgramDataManager::set2i(UniformHandle u, int32_t i0, int32_t i1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[2] = { i0, i1 };
+ memcpy(buffer, v, 2 * sizeof(int32_t));
+}
+
+void GrDawnProgramDataManager::set2iv(UniformHandle u, int arrayCount, const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ uint32_t* buffer = static_cast<uint32_t*>(this->getBufferPtrAndMarkDirty(uni));
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[2 * i];
+ memcpy(buffer, curVec, 2 * sizeof(int32_t));
+ buffer += 4;
+ }
+}
+
+void GrDawnProgramDataManager::set3i(UniformHandle u, int32_t i0, int32_t i1, int32_t i2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[3] = { i0, i1, i2 };
+ memcpy(buffer, v, 3 * sizeof(int32_t));
+}
+
+void GrDawnProgramDataManager::set3iv(UniformHandle u, int arrayCount, const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ uint32_t* buffer = static_cast<uint32_t*>(this->getBufferPtrAndMarkDirty(uni));
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(int32_t));
+ buffer += 4;
+ }
+}
+
+void GrDawnProgramDataManager::set4i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2,
+ int32_t i3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[4] = { i0, i1, i2, i3 };
+ memcpy(buffer, v, sizeof(v));
+}
+
+void GrDawnProgramDataManager::set4iv(UniformHandle u, int arrayCount, const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ uint32_t* buffer = static_cast<uint32_t*>(this->getBufferPtrAndMarkDirty(uni));
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[4 * i];
+ memcpy(buffer, curVec, 4 * sizeof(int32_t));
+ buffer += 4;
+ }
+}
+
+void GrDawnProgramDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, &v0, sizeof(float));
+}
+
+void GrDawnProgramDataManager::set1fv(UniformHandle u, int arrayCount, const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrDawnProgramDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ float v[2] = { v0, v1 };
+ memcpy(buffer, v, 2 * sizeof(float));
+}
+
+void GrDawnProgramDataManager::set2fv(UniformHandle u, int arrayCount, const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[2 * i];
+ memcpy(buffer, curVec, 2 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrDawnProgramDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ float v[3] = { v0, v1, v2 };
+ memcpy(buffer, v, 3 * sizeof(float));
+}
+
+void GrDawnProgramDataManager::set3fv(UniformHandle u, int arrayCount, const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrDawnProgramDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ float v[4] = { v0, v1, v2, v3 };
+ memcpy(buffer, v, 4 * sizeof(float));
+}
+
+void GrDawnProgramDataManager::set4fv(UniformHandle u, int arrayCount, const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));
+}
+
+void GrDawnProgramDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrDawnProgramDataManager::setMatrix2fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrDawnProgramDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrDawnProgramDataManager::setMatrix3fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrDawnProgramDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrDawnProgramDataManager::setMatrix4fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrDawnProgramDataManager::setMatrices(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ void* buffer;
+ if (GrDawnUniformHandler::kGeometryBinding == uni.fBinding) {
+ buffer = fGeometryUniformData.get();
+ fGeometryUniformsDirty = true;
+ } else {
+ SkASSERT(GrDawnUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ fFragmentUniformsDirty = true;
+ }
+
+ set_uniform_matrix<N>::set(buffer, uni.fOffset, arrayCount, matrices);
+}
+
+template<int N> struct set_uniform_matrix {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ for (int i = 0; i < count; ++i) {
+ const float* matrix = &matrices[N * N * i];
+ for (int j = 0; j < N; ++j) {
+ memcpy(buffer, &matrix[j * N], N * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
+ }
+ }
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ memcpy(buffer, matrices, count * 16 * sizeof(float));
+ }
+};
+
+void GrDawnProgramDataManager::uploadUniformBuffers(GrDawnGpu* gpu,
+ GrDawnRingBuffer::Slice geometryBuffer,
+ GrDawnRingBuffer::Slice fragmentBuffer) const {
+ dawn::Buffer geom = geometryBuffer.fBuffer;
+ uint32_t geomOffset = geometryBuffer.fOffset;
+ dawn::Buffer frag = fragmentBuffer.fBuffer;
+ uint32_t fragOffset = fragmentBuffer.fOffset;
+ auto copyEncoder = gpu->getCopyEncoder();
+ if (geom && fGeometryUniformsDirty) {
+ GrDawnStagingBuffer* stagingBuffer = gpu->getStagingBuffer(fGeometryUniformSize);
+ memcpy(stagingBuffer->fData, fGeometryUniformData.get(), fGeometryUniformSize);
+ stagingBuffer->fBuffer.Unmap();
+ copyEncoder
+ .CopyBufferToBuffer(stagingBuffer->fBuffer, 0, geom, geomOffset, fGeometryUniformSize);
+ }
+ if (frag && fFragmentUniformsDirty) {
+ GrDawnStagingBuffer* stagingBuffer = gpu->getStagingBuffer(fFragmentUniformSize);
+ memcpy(stagingBuffer->fData, fFragmentUniformData.get(), fFragmentUniformSize);
+ stagingBuffer->fBuffer.Unmap();
+ copyEncoder
+ .CopyBufferToBuffer(stagingBuffer->fBuffer, 0, frag, fragOffset, fFragmentUniformSize);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.h b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.h
new file mode 100644
index 0000000000..b77d5ffe3f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnProgramDataManager.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnProgramDataManager_DEFINED
+#define GrDawnProgramDataManager_DEFINED
+
+#include "src/gpu/dawn/GrDawnRingBuffer.h"
+#include "src/gpu/dawn/GrDawnUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "dawn/dawncpp.h"
+
+#include "src/core/SkAutoMalloc.h"
+
+class GrDawnGpu;
+class GrDawnUniformBuffer;
+
+class GrDawnProgramDataManager : public GrGLSLProgramDataManager {
+public:
+ typedef GrDawnUniformHandler::UniformInfoArray UniformInfoArray;
+
+ GrDawnProgramDataManager(const UniformInfoArray&,
+ uint32_t geometryUniformSize,
+ uint32_t fragmentUniformSize);
+
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set2i(UniformHandle, int32_t, int32_t) const override;
+ void set2iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set3i(UniformHandle, int32_t, int32_t, int32_t) const override;
+ void set3iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set4i(UniformHandle, int32_t, int32_t, int32_t, int32_t) const override;
+ void set4iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first two upload a single matrix, the latter two upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override {
+ SK_ABORT("Only supported in NVPR, which is not in Dawn");
+ }
+
+ void uploadUniformBuffers(GrDawnGpu* gpu,
+ GrDawnRingBuffer::Slice geometryBuffer,
+ GrDawnRingBuffer::Slice fragmentBuffer) const;
+
+ uint32_t geometryUniformSize() const { return fGeometryUniformSize; }
+ uint32_t fragmentUniformSize() const { return fFragmentUniformSize; }
+private:
+ struct Uniform {
+ uint32_t fBinding;
+ uint32_t fOffset;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ void* getBufferPtrAndMarkDirty(const Uniform& uni) const;
+
+ uint32_t fGeometryUniformSize;
+ uint32_t fFragmentUniformSize;
+
+ SkTArray<Uniform, true> fUniforms;
+
+ mutable SkAutoMalloc fGeometryUniformData;
+ mutable SkAutoMalloc fFragmentUniformData;
+ mutable bool fGeometryUniformsDirty;
+ mutable bool fFragmentUniformsDirty;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.cpp
new file mode 100644
index 0000000000..a75ab13fba
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnRenderTarget.h"
+
+#include "include/gpu/GrBackendSurface.h"
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+GrDawnRenderTarget::GrDawnRenderTarget(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ int sampleCnt,
+ const GrDawnImageInfo& info)
+ : GrSurface(gpu, size, config, GrProtected::kNo)
+ , GrRenderTarget(gpu, size, config, sampleCnt, GrProtected::kNo)
+ , fInfo(info) {
+}
+
+sk_sp<GrDawnRenderTarget>
+GrDawnRenderTarget::MakeWrapped(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ int sampleCnt,
+ const GrDawnImageInfo& info) {
+ sk_sp<GrDawnRenderTarget> rt(new GrDawnRenderTarget(gpu, size, config, sampleCnt, info));
+ rt->registerWithCacheWrapped(GrWrapCacheable::kNo);
+ return rt;
+}
+
+size_t GrDawnRenderTarget::onGpuMemorySize() const {
+ // The plus 1 is to account for the resolve texture or if not using msaa the RT itself
+ int numSamples = this->numSamples() + 1;
+ const GrCaps& caps = *getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numSamples, GrMipMapped::kNo);
+}
+
+bool GrDawnRenderTarget::completeStencilAttachment() {
+ return true;
+}
+
+GrDawnRenderTarget::~GrDawnRenderTarget() {
+}
+
+void GrDawnRenderTarget::onRelease() {
+ INHERITED::onRelease();
+}
+
+void GrDawnRenderTarget::onAbandon() {
+ INHERITED::onAbandon();
+}
+
+GrBackendRenderTarget GrDawnRenderTarget::getBackendRenderTarget() const {
+ return GrBackendRenderTarget(this->width(), this->height(), this->numSamples(),
+ this->numSamples(), fInfo);
+}
+
+GrBackendFormat GrDawnRenderTarget::backendFormat() const {
+ return GrBackendFormat::MakeDawn(fInfo.fFormat);
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.h b/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.h
new file mode 100644
index 0000000000..fe5c77fba1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnRenderTarget.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnRenderTarget_DEFINED
+#define GrDawnRenderTarget_DEFINED
+
+#include "include/gpu/dawn/GrDawnTypes.h"
+#include "src/gpu/GrRenderTarget.h"
+
+class GrDawnGpu;
+
+class GrDawnRenderTarget: public GrRenderTarget {
+public:
+ static sk_sp<GrDawnRenderTarget> MakeWrapped(GrDawnGpu*, const SkISize& size,
+ GrPixelConfig config, int sampleCnt,
+ const GrDawnImageInfo&);
+
+ ~GrDawnRenderTarget() override;
+
+ bool canAttemptStencilAttachment() const override {
+ return true;
+ }
+
+ GrBackendRenderTarget getBackendRenderTarget() const override;
+ GrBackendFormat backendFormat() const override;
+ dawn::Texture texture() const { return fInfo.fTexture; }
+
+protected:
+ GrDawnRenderTarget(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ int sampleCnt,
+ const GrDawnImageInfo& info);
+
+ void onAbandon() override;
+ void onRelease() override;
+ void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {}
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override;
+
+ static GrDawnRenderTarget* Create(GrDawnGpu*, const GrSurfaceDesc&, int sampleCnt,
+ const GrDawnImageInfo&);
+
+ bool completeStencilAttachment() override;
+ GrDawnImageInfo fInfo;
+ typedef GrRenderTarget INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.cpp
new file mode 100644
index 0000000000..caf0d578ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnRingBuffer.h"
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+namespace {
+ const int kDefaultSize = 512 * 1024;
+}
+
+GrDawnRingBuffer::GrDawnRingBuffer(GrDawnGpu* gpu, dawn::BufferUsage usage)
+ : fGpu(gpu) , fUsage(usage) {
+}
+
+GrDawnRingBuffer::~GrDawnRingBuffer() {
+}
+
+GrDawnRingBuffer::Slice GrDawnRingBuffer::allocate(int size) {
+ if (!fBuffer || fOffset + size > kDefaultSize) {
+ dawn::BufferDescriptor desc;
+ desc.usage = fUsage | dawn::BufferUsage::CopyDst;
+ desc.size = kDefaultSize;
+ fBuffer = fGpu->device().CreateBuffer(&desc);
+ fOffset = 0;
+ }
+ int offset = fOffset;
+ fOffset += size;
+ fOffset = GrDawnRoundRowBytes(fOffset);
+ return Slice(fBuffer, offset);
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.h b/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.h
new file mode 100644
index 0000000000..0dbe5059af
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnRingBuffer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnRingBuffer_DEFINED
+#define GrDawnRingBuffer_DEFINED
+
+#include "src/gpu/GrBuffer.h"
+#include "src/gpu/dawn/GrDawnBuffer.h"
+#include "dawn/dawncpp.h"
+
+class GrDawnGpu;
+
+class GrDawnRingBuffer : public SkRefCnt {
+public:
+ GrDawnRingBuffer(GrDawnGpu* gpu, dawn::BufferUsage usage);
+ ~GrDawnRingBuffer() override;
+
+ struct Slice {
+ Slice(dawn::Buffer buffer, int offset) : fBuffer(buffer), fOffset(offset) {}
+ Slice() : fBuffer(nullptr), fOffset(0) {}
+ Slice(const Slice& other) : fBuffer(other.fBuffer), fOffset(other.fOffset) {}
+ Slice& operator=(const Slice& other) {
+ fBuffer = other.fBuffer;
+ fOffset = other.fOffset;
+ return *this;
+ }
+ dawn::Buffer fBuffer;
+ int fOffset;
+ };
+ Slice allocate(int size);
+
+private:
+ GrDawnGpu* fGpu;
+ dawn::BufferUsage fUsage;
+ dawn::Buffer fBuffer;
+ int fOffset = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.cpp
new file mode 100644
index 0000000000..68e0e9785f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnStagingManager.h"
+
+#include "src/core/SkMathPriv.h"
+
+GrDawnStagingManager::GrDawnStagingManager(dawn::Device device) : fDevice(device) {
+}
+
+GrDawnStagingManager::~GrDawnStagingManager() {
+ // Clean up any pending callbacks before destroying the StagingBuffers.
+ while (fWaitingCount > 0) {
+ fDevice.Tick();
+ }
+}
+
+GrDawnStagingBuffer* GrDawnStagingManager::findOrCreateStagingBuffer(size_t size) {
+ size_t sizePow2 = GrNextPow2(size);
+ GrDawnStagingBuffer* stagingBuffer;
+ auto i = fReadyPool.find(sizePow2);
+ if (i != fReadyPool.end()) {
+ stagingBuffer = i->second;
+ fReadyPool.erase(i);
+ } else {
+ dawn::BufferDescriptor desc;
+ desc.usage = dawn::BufferUsage::MapWrite | dawn::BufferUsage::CopySrc;
+ desc.size = sizePow2;
+ dawn::CreateBufferMappedResult result = fDevice.CreateBufferMapped(&desc);
+ std::unique_ptr<GrDawnStagingBuffer> b(new GrDawnStagingBuffer(
+ this, result.buffer, sizePow2, result.data));
+ stagingBuffer = b.get();
+ fBuffers.push_back(std::move(b));
+ }
+ fBusyList.push_back(stagingBuffer);
+ return stagingBuffer;
+}
+
+static void callback(DawnBufferMapAsyncStatus status, void* data, uint64_t dataLength,
+ void* userData) {
+ GrDawnStagingBuffer* buffer = static_cast<GrDawnStagingBuffer*>(userData);
+ buffer->fData = data;
+ if (buffer->fManager) {
+ buffer->fManager->addToReadyPool(buffer);
+ }
+}
+
+void GrDawnStagingManager::mapBusyList() {
+ // Map all buffers on the busy list for writing. When they're no longer in flight on the GPU,
+ // their callback will be called and they'll be moved to the ready pool.
+ for (GrDawnStagingBuffer* buffer : fBusyList) {
+ buffer->fBuffer.MapWriteAsync(callback, buffer);
+ fWaitingCount++;
+ }
+ fBusyList.clear();
+}
+
+void GrDawnStagingManager::addToReadyPool(GrDawnStagingBuffer* buffer) {
+ fWaitingCount--;
+ fReadyPool.insert(std::pair<size_t, GrDawnStagingBuffer*>(buffer->fSize, buffer));
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.h b/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.h
new file mode 100644
index 0000000000..b3974ada6c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnStagingManager.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnStagingManager_DEFINED
+#define GrDawnStagingManager_DEFINED
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "dawn/dawncpp.h"
+
+struct GrDawnStagingBuffer;
+
+class GrDawnStagingManager {
+public:
+ GrDawnStagingManager(dawn::Device device);
+ ~GrDawnStagingManager();
+ GrDawnStagingBuffer* findOrCreateStagingBuffer(size_t size);
+
+ void addToReadyPool(GrDawnStagingBuffer* buffer);
+ void mapBusyList();
+
+private:
+ dawn::Device fDevice;
+ std::vector<std::unique_ptr<GrDawnStagingBuffer>> fBuffers;
+ std::multimap<size_t, GrDawnStagingBuffer*> fReadyPool;
+ std::vector<GrDawnStagingBuffer*> fBusyList;
+ int fWaitingCount = 0;
+};
+
+struct GrDawnStagingBuffer {
+ GrDawnStagingBuffer(GrDawnStagingManager* manager, dawn::Buffer buffer, size_t size,
+ void* data)
+ : fManager(manager), fBuffer(buffer), fSize(size), fData(data) {}
+ ~GrDawnStagingBuffer() {
+ fManager = nullptr;
+ }
+ GrDawnStagingManager* fManager;
+ dawn::Buffer fBuffer;
+ size_t fSize;
+ void* fData;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.cpp
new file mode 100644
index 0000000000..7e4047a146
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnStencilAttachment.h"
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrDawnStencilAttachment::GrDawnStencilAttachment(GrDawnGpu* gpu,
+ int width,
+ int height,
+ int bits,
+ int samples,
+ dawn::Texture texture,
+ dawn::TextureView view)
+ : INHERITED(gpu, width, height, bits, samples)
+ , fTexture(texture)
+ , fView(view) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrDawnStencilAttachment* GrDawnStencilAttachment::Create(GrDawnGpu* gpu,
+ int width,
+ int height,
+ int sampleCnt) {
+ dawn::TextureDescriptor desc;
+ desc.usage = dawn::TextureUsage::OutputAttachment;
+ desc.size.width = width;
+ desc.size.height = height;
+ desc.size.depth = 1;
+ desc.format = dawn::TextureFormat::Depth24PlusStencil8;
+ dawn::Texture texture = gpu->device().CreateTexture(&desc);
+ if (!texture) {
+ return nullptr;
+ }
+ dawn::TextureView view = texture.CreateView();
+ if (!view) {
+ return nullptr;
+ }
+ return new GrDawnStencilAttachment(gpu, width, height, 8, sampleCnt, texture, view);
+}
+
+GrDawnStencilAttachment::~GrDawnStencilAttachment() {
+}
+
+size_t GrDawnStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= 32;
+ size *= SkTMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrDawnStencilAttachment::onRelease() {
+ GrStencilAttachment::onRelease();
+}
+
+void GrDawnStencilAttachment::onAbandon() {
+ GrStencilAttachment::onAbandon();
+}
+
+GrDawnGpu* GrDawnStencilAttachment::getDawnGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrDawnGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.h b/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.h
new file mode 100644
index 0000000000..b0afc23585
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnStencilAttachment.h
@@ -0,0 +1,43 @@
+/*
+* Copyright 2019 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrDawnStencil_DEFINED
+#define GrDawnStencil_DEFINED
+
+#include "src/gpu/GrStencilAttachment.h"
+
+#include "dawn/dawncpp.h"
+
+class GrDawnGpu;
+
+class GrDawnStencilAttachment : public GrStencilAttachment {
+public:
+ static GrDawnStencilAttachment* Create(GrDawnGpu* gpu, int width, int height,
+ int sampleCnt);
+
+ ~GrDawnStencilAttachment() override;
+ dawn::TextureView view() const { return fView; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrDawnStencilAttachment(GrDawnGpu* gpu, int width, int height, int bits, int samples,
+ dawn::Texture texture, dawn::TextureView view);
+
+ GrDawnGpu* getDawnGpu() const;
+
+ dawn::Texture fTexture;
+ dawn::TextureView fView;
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.cpp
new file mode 100644
index 0000000000..aa7f60755b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnTexture.h"
+
+#include "src/gpu/dawn/GrDawnGpu.h"
+#include "src/gpu/dawn/GrDawnTextureRenderTarget.h"
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+GrDawnTexture::GrDawnTexture(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ dawn::TextureView textureView,
+ const GrDawnImageInfo& info,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, size, config, GrProtected::kNo)
+ , GrTexture(gpu, size, config, GrProtected::kNo, GrTextureType::k2D, mipMapsStatus)
+ , fInfo(info)
+ , fTextureView(textureView) {
+}
+
+sk_sp<GrDawnTexture> GrDawnTexture::Make(GrDawnGpu* gpu, const SkISize& size, GrPixelConfig config,
+ dawn::TextureFormat format, GrRenderable renderable,
+ int sampleCnt, SkBudgeted budgeted, int mipLevels,
+ GrMipMapsStatus status) {
+ bool renderTarget = renderable == GrRenderable::kYes;
+ dawn::TextureDescriptor textureDesc;
+
+ textureDesc.usage =
+ dawn::TextureUsage::Sampled |
+ dawn::TextureUsage::CopySrc |
+ dawn::TextureUsage::CopyDst;
+
+ if (renderTarget) {
+ textureDesc.usage |= dawn::TextureUsage::OutputAttachment;
+ }
+
+ textureDesc.size.width = size.fWidth;
+ textureDesc.size.height = size.fHeight;
+ textureDesc.size.depth = 1;
+ textureDesc.format = format;
+ textureDesc.mipLevelCount = std::max(mipLevels, 1);
+ textureDesc.sampleCount = sampleCnt;
+
+ dawn::Texture tex = gpu->device().CreateTexture(&textureDesc);
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ dawn::TextureView textureView = tex.CreateView();
+
+ if (!textureView) {
+ return nullptr;
+ }
+
+ GrDawnImageInfo info;
+ info.fTexture = tex;
+ info.fFormat = textureDesc.format;
+ info.fLevelCount = mipLevels;
+ sk_sp<GrDawnTexture> result;
+ if (renderTarget) {
+ result = sk_sp<GrDawnTextureRenderTarget>(new GrDawnTextureRenderTarget(gpu,
+ size,
+ config,
+ textureView,
+ sampleCnt,
+ info,
+ status));
+ } else {
+ result = sk_sp<GrDawnTexture>(new GrDawnTexture(gpu, size, config, textureView, info,
+ status));
+ }
+ result->registerWithCache(budgeted);
+ return result;
+}
+
+GrBackendFormat GrDawnTexture::backendFormat() const {
+ return GrBackendFormat::MakeDawn(fInfo.fFormat);
+}
+
+sk_sp<GrDawnTexture> GrDawnTexture::MakeWrapped(GrDawnGpu* gpu, const SkISize& size,
+ GrPixelConfig config, GrRenderable renderable,
+ int sampleCnt, GrMipMapsStatus status,
+ GrWrapCacheable cacheable,
+ const GrDawnImageInfo& info) {
+ dawn::TextureView textureView = info.fTexture.CreateView();
+ if (!textureView) {
+ return nullptr;
+ }
+
+ sk_sp<GrDawnTexture> tex;
+ if (GrRenderable::kYes == renderable) {
+ tex = sk_sp<GrDawnTexture>(new GrDawnTextureRenderTarget(gpu, size, config, textureView,
+ sampleCnt, info, status));
+ } else {
+ tex = sk_sp<GrDawnTexture>(new GrDawnTexture(gpu, size, config, textureView, info, status));
+ }
+ tex->registerWithCacheWrapped(cacheable);
+ return tex;
+}
+
+GrDawnTexture::~GrDawnTexture() {
+}
+
+GrDawnGpu* GrDawnTexture::getDawnGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrDawnGpu*>(this->getGpu());
+}
+
+void GrDawnTexture::onRelease() {
+ INHERITED::onRelease();
+}
+
+void GrDawnTexture::onAbandon() {
+ INHERITED::onAbandon();
+}
+
+GrBackendTexture GrDawnTexture::getBackendTexture() const {
+ return GrBackendTexture(this->width(), this->height(), fInfo);
+}
+
+void GrDawnTexture::upload(const GrMipLevel texels[], int mipLevels,
+ dawn::CommandEncoder copyEncoder) {
+ this->upload(texels, mipLevels, SkIRect::MakeWH(width(), height()), copyEncoder);
+}
+
+void GrDawnTexture::upload(const GrMipLevel texels[], int mipLevels, const SkIRect& rect,
+ dawn::CommandEncoder copyEncoder) {
+ dawn::Device device = this->getDawnGpu()->device();
+
+ uint32_t x = rect.x();
+ uint32_t y = rect.y();
+ uint32_t width = rect.width();
+ uint32_t height = rect.height();
+
+ for (int i = 0; i < mipLevels; i++) {
+ size_t origRowBytes = texels[i].fRowBytes;
+ SkBitmap bitmap;
+ SkPixmap pixmap;
+ const char* src;
+ if (kRGBA_4444_GrPixelConfig == this->config() ||
+ kRGB_565_GrPixelConfig == this->config() ||
+ kGray_8_GrPixelConfig == this->config()) {
+ SkImageInfo info;
+ info = SkImageInfo::Make(width, height, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
+ SkImageInfo srcInfo;
+ SkColorType colorType =
+ GrColorTypeToSkColorType(GrPixelConfigToColorType(this->config()));
+ srcInfo = SkImageInfo::Make(width, height, colorType, kOpaque_SkAlphaType);
+ SkPixmap srcPixmap(srcInfo, texels[i].fPixels, origRowBytes);
+ origRowBytes = GrDawnRoundRowBytes(info.minRowBytes());
+ bitmap.allocPixels(info, origRowBytes);
+ bitmap.writePixels(srcPixmap);
+ if (!bitmap.peekPixels(&pixmap)) {
+ continue;
+ }
+ src = static_cast<const char*>(pixmap.addr());
+ } else {
+ src = static_cast<const char*>(texels[i].fPixels);
+ }
+ size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
+ size_t size = rowBytes * height;
+ GrDawnStagingBuffer* stagingBuffer = getDawnGpu()->getStagingBuffer(size);
+ if (rowBytes == origRowBytes) {
+ memcpy(stagingBuffer->fData, src, size);
+ } else {
+ char* dst = static_cast<char*>(stagingBuffer->fData);
+ for (uint32_t row = 0; row < height; row++) {
+ memcpy(dst, src, origRowBytes);
+ dst += rowBytes;
+ src += texels[i].fRowBytes;
+ }
+ }
+ dawn::Buffer buffer = stagingBuffer->fBuffer;
+ buffer.Unmap();
+ stagingBuffer->fData = nullptr;
+
+ dawn::BufferCopyView srcBuffer;
+ srcBuffer.buffer = buffer;
+ srcBuffer.offset = 0;
+ srcBuffer.rowPitch = rowBytes;
+ srcBuffer.imageHeight = height;
+
+ dawn::TextureCopyView dstTexture;
+ dstTexture.texture = fInfo.fTexture;
+ dstTexture.mipLevel = i;
+ dstTexture.origin = {x, y, 0};
+
+ dawn::Extent3D copySize = {width, height, 1};
+ copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, &copySize);
+ x /= 2;
+ y /= 2;
+ width /= 2;
+ height /= 2;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.h b/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.h
new file mode 100644
index 0000000000..cc69fd7d9f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnTexture.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTexture_DEFINED
+#define GrDawnTexture_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "dawn/dawncpp.h"
+
+class GrDawnGpu;
+struct GrDawnImageInfo;
+
+class GrDawnTexture : public GrTexture {
+public:
+ static sk_sp<GrDawnTexture> Make(GrDawnGpu*, const SkISize& size, GrPixelConfig config,
+ dawn::TextureFormat format, GrRenderable, int sampleCnt,
+ SkBudgeted, int mipLevels, GrMipMapsStatus);
+
+ static sk_sp<GrDawnTexture> MakeWrapped(GrDawnGpu*, const SkISize& size, GrPixelConfig config,
+ GrRenderable, int sampleCnt,
+ GrMipMapsStatus, GrWrapCacheable,
+ const GrDawnImageInfo&);
+
+ ~GrDawnTexture() override;
+
+ GrBackendTexture getBackendTexture() const override;
+ GrBackendFormat backendFormat() const override;
+
+ void textureParamsModified() override {}
+
+ void upload(const GrMipLevel texels[], int mipLevels, dawn::CommandEncoder copyEncoder);
+ void upload(const GrMipLevel texels[], int mipLevels, const SkIRect& dstRect,
+ dawn::CommandEncoder copyEncoder);
+
+ dawn::Texture texture() const { return fInfo.fTexture; }
+ dawn::TextureView textureView() const { return fTextureView; }
+protected:
+ GrDawnTexture(GrDawnGpu*, const SkISize& size, GrPixelConfig config,
+ dawn::TextureView, const GrDawnImageInfo&, GrMipMapsStatus);
+
+ GrDawnGpu* getDawnGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) override {
+ return false;
+ }
+
+private:
+ GrDawnTexture(GrDawnGpu*, const GrSurfaceDesc&, const GrDawnImageInfo&, GrMipMapsStatus);
+
+ GrDawnImageInfo fInfo;
+ dawn::TextureView fTextureView;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.cpp
new file mode 100644
index 0000000000..a9e70d71ba
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnTextureRenderTarget.h"
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/dawn/GrDawnGpu.h"
+
+GrDawnTextureRenderTarget::GrDawnTextureRenderTarget(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ dawn::TextureView textureView,
+ int sampleCnt,
+ const GrDawnImageInfo& info,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, size, config, GrProtected::kNo)
+ , GrDawnTexture(gpu, size, config, textureView, info, mipMapsStatus)
+ , GrDawnRenderTarget(gpu, size, config, sampleCnt, info) {
+}
+
+bool GrDawnTextureRenderTarget::canAttemptStencilAttachment() const {
+ return true;
+}
+
+size_t GrDawnTextureRenderTarget::onGpuMemorySize() const {
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ 1, // FIXME: for MSAA
+ this->texturePriv().mipMapped());
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.h b/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.h
new file mode 100644
index 0000000000..69c94b034e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnTextureRenderTarget.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTextureRenderTarget_DEFINED
+#define GrDawnTextureRenderTarget_DEFINED
+
+#include "src/gpu/dawn/GrDawnRenderTarget.h"
+#include "src/gpu/dawn/GrDawnTexture.h"
+
+class GrDawnGpu;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrDawnTextureRenderTarget : public GrDawnTexture, public GrDawnRenderTarget {
+public:
+ GrDawnTextureRenderTarget(GrDawnGpu* gpu,
+ const SkISize& size,
+ GrPixelConfig config,
+ const dawn::TextureView textureView,
+ int sampleCnt,
+ const GrDawnImageInfo& info,
+ GrMipMapsStatus mipMapsStatus);
+
+ bool canAttemptStencilAttachment() const override;
+
+ GrBackendFormat backendFormat() const override { return GrDawnTexture::backendFormat(); }
+
+protected:
+ void onAbandon() override {
+ GrDawnRenderTarget::onAbandon();
+ GrDawnTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrDawnRenderTarget::onRelease();
+ GrDawnTexture::onRelease();
+ }
+
+private:
+ size_t onGpuMemorySize() const override;
+};
+
+#ifdef SK_BUILD_FOR_WIN
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.cpp
new file mode 100644
index 0000000000..74fd1e31a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+GrDawnUniformHandler::GrDawnUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fSamplers(kUniformsPerBlock)
+ , fTextures(kUniformsPerBlock)
+{
+}
+
+const GrShaderVar& GrDawnUniformHandler::getUniformVariable(UniformHandle u) const {
+ return fUniforms[u.toIndex()].fVar;
+}
+
+const char* GrDawnUniformHandler::getUniformCStr(UniformHandle u) const {
+ return fUniforms[u.toIndex()].fVar.getName().c_str();
+}
+
+// FIXME: this code was ripped from GrVkUniformHandler; should be refactored.
+namespace {
+
+uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType: // fall through
+ case kUByte_GrSLType:
+ return 0x0;
+ case kByte2_GrSLType: // fall through
+ case kUByte2_GrSLType:
+ return 0x1;
+ case kByte3_GrSLType: // fall through
+ case kByte4_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ return 0x3;
+ case kShort_GrSLType: // fall through
+ case kUShort_GrSLType:
+ return 0x1;
+ case kShort2_GrSLType: // fall through
+ case kUShort2_GrSLType:
+ return 0x3;
+ case kShort3_GrSLType: // fall through
+ case kShort4_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ return 0x7;
+ case kInt_GrSLType:
+ case kUint_GrSLType:
+ return 0x3;
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return 0x3;
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 0x7;
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 0xF;
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 0xF;
+ case kUint2_GrSLType:
+ return 0x7;
+ case kInt2_GrSLType:
+ return 0x7;
+ case kInt3_GrSLType:
+ return 0xF;
+ case kInt4_GrSLType:
+ return 0xF;
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ return 0x7;
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 0xF;
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 0xF;
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ case kSampler_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+static inline uint32_t grsltype_to_size(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType:
+ case kUByte_GrSLType:
+ return 1;
+ case kByte2_GrSLType:
+ case kUByte2_GrSLType:
+ return 2;
+ case kByte3_GrSLType:
+ case kUByte3_GrSLType:
+ return 3;
+ case kByte4_GrSLType:
+ case kUByte4_GrSLType:
+ return 4;
+ case kShort_GrSLType:
+ return sizeof(int16_t);
+ case kShort2_GrSLType:
+ return 2 * sizeof(int16_t);
+ case kShort3_GrSLType:
+ return 3 * sizeof(int16_t);
+ case kShort4_GrSLType:
+ return 4 * sizeof(int16_t);
+ case kUShort_GrSLType:
+ return sizeof(uint16_t);
+ case kUShort2_GrSLType:
+ return 2 * sizeof(uint16_t);
+ case kUShort3_GrSLType:
+ return 3 * sizeof(uint16_t);
+ case kUShort4_GrSLType:
+ return 4 * sizeof(uint16_t);
+ case kInt_GrSLType:
+ return sizeof(int32_t);
+ case kUint_GrSLType:
+ return sizeof(int32_t);
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return sizeof(float);
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 2 * sizeof(float);
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 3 * sizeof(float);
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 4 * sizeof(float);
+ case kUint2_GrSLType:
+ return 2 * sizeof(uint32_t);
+ case kInt2_GrSLType:
+ return 2 * sizeof(int32_t);
+ case kInt3_GrSLType:
+ return 3 * sizeof(int32_t);
+ case kInt4_GrSLType:
+ return 4 * sizeof(int32_t);
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ //TODO: this will be 4 * szof(float) on std430.
+ return 8 * sizeof(float);
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 12 * sizeof(float);
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 16 * sizeof(float);
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ case kSampler_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+uint32_t get_ubo_offset(uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ // We want to use the std140 layout here, so we must make arrays align to 16 bytes.
+ if (arrayCount || type == kFloat2x2_GrSLType) {
+ alignmentMask = 0xF;
+ }
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ uint32_t uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ if (arrayCount) {
+ uint32_t elementSize = SkTMax<uint32_t>(16, grsltype_to_size(type));
+ SkASSERT(0 == (elementSize & 0xF));
+ *currentOffset = uniformOffset + elementSize * arrayCount;
+ } else {
+ *currentOffset = uniformOffset + grsltype_to_size(type);
+ }
+ return uniformOffset;
+}
+
+}
+
+GrGLSLUniformHandler::UniformHandle GrDawnUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ UniformInfo& info = fUniforms.push_back();
+ info.fVisibility = visibility;
+ if (visibility == kFragment_GrShaderFlag) {
+ info.fUBOOffset = get_ubo_offset(&fCurrentFragmentUBOOffset, type, arrayCount);
+ } else {
+ info.fUBOOffset = get_ubo_offset(&fCurrentGeometryUBOOffset, type, arrayCount);
+ }
+ GrShaderVar& var = info.fVar;
+ char prefix = 'u';
+ if ('u' == name[0] || !strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(var.accessName(), prefix, name, mangleName);
+ var.setType(type);
+ var.setTypeModifier(GrShaderVar::kNone_TypeModifier);
+ var.setArrayCount(arrayCount);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("offset = %d", info.fUBOOffset);
+ var.addLayoutQualifier(layoutQualifier.c_str());
+ if (outName) {
+ *outName = var.c_str();
+ }
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+void GrDawnUniformHandler::updateUniformVisibility(UniformHandle u, uint32_t visibility) {
+ fUniforms[u.toIndex()].fVisibility |= visibility;
+}
+
+GrGLSLUniformHandler::SamplerHandle GrDawnUniformHandler::addSampler(const GrTextureProxy*,
+ const GrSamplerState&,
+ const GrSwizzle& swizzle,
+ const char* name,
+ const GrShaderCaps* caps) {
+ SkString mangleName;
+ char prefix = 's';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+
+ GrSLType samplerType = kSampler_GrSLType, textureType = kTexture2D_GrSLType;
+ int binding = kSamplerBindingBase + fSamplers.count() * 2;
+ UniformInfo& info = fSamplers.push_back();
+ info.fVar.setType(samplerType);
+ info.fVar.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ info.fVar.setName(mangleName);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("set = 0, binding = %d", binding);
+ info.fVar.addLayoutQualifier(layoutQualifier.c_str());
+ info.fVisibility = kFragment_GrShaderFlag;
+ info.fUBOOffset = 0;
+ fSamplerSwizzles.push_back(swizzle);
+ SkASSERT(fSamplerSwizzles.count() == fSamplers.count());
+
+ SkString mangleTexName;
+ char texPrefix = 't';
+ fProgramBuilder->nameVariable(&mangleTexName, texPrefix, name, true);
+ UniformInfo& texInfo = fTextures.push_back();
+ texInfo.fVar.setType(textureType);
+ texInfo.fVar.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ texInfo.fVar.setName(mangleTexName);
+ SkString texLayoutQualifier;
+ texLayoutQualifier.appendf("set = 0, binding = %d", binding + 1);
+ texInfo.fVar.addLayoutQualifier(texLayoutQualifier.c_str());
+ texInfo.fVisibility = kFragment_GrShaderFlag;
+ texInfo.fUBOOffset = 0;
+
+ SkString reference;
+ reference.printf("makeSampler2D(%s, %s)", texInfo.fVar.getName().c_str(),
+ info.fVar.getName().c_str());
+ fSamplerReferences.push_back() = reference;
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+const char* GrDawnUniformHandler::samplerVariable(
+ GrGLSLUniformHandler::SamplerHandle handle) const {
+ return fSamplerReferences[handle.toIndex()].c_str();
+}
+
+GrSwizzle GrDawnUniformHandler::samplerSwizzle(GrGLSLUniformHandler::SamplerHandle handle) const {
+ return fSamplerSwizzles[handle.toIndex()];
+}
+
+void GrDawnUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ if (fSamplers[i].fVisibility & visibility) {
+ fSamplers[i].fVar.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ fTextures[i].fVar.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+ }
+ SkString uniformsString;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ if (fUniforms[i].fVisibility & visibility) {
+ fUniforms[i].fVar.appendDecl(fProgramBuilder->shaderCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ }
+ }
+ if (!uniformsString.isEmpty()) {
+ uint32_t uniformBinding;
+ const char* stage;
+ if (kVertex_GrShaderFlag == visibility) {
+ uniformBinding = kGeometryBinding;
+ stage = "vertex";
+ } else if (kGeometry_GrShaderFlag == visibility) {
+ uniformBinding = kGeometryBinding;
+ stage = "geometry";
+ } else {
+ SkASSERT(kFragment_GrShaderFlag == visibility);
+ uniformBinding = kFragBinding;
+ stage = "fragment";
+ }
+ out->appendf("layout (set = 0, binding = %d) uniform %sUniformBuffer\n{\n",
+ uniformBinding, stage);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.h b/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.h
new file mode 100644
index 0000000000..e5d9950253
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnUniformHandler.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnUniformHandler_DEFINED
+#define GrDawnUniformHandler_DEFINED
+
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrDawnGpu;
+
+class GrDawnUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ const GrShaderVar& getUniformVariable(UniformHandle u) const override;
+ const char* getUniformCStr(UniformHandle u) const override;
+
+ struct UniformInfo {
+ GrShaderVar fVar;
+ int fUBOOffset;
+ int fVisibility;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+ enum {
+ kGeometryBinding = 0,
+ kFragBinding = 1,
+ kSamplerBindingBase = 2,
+ };
+
+private:
+ explicit GrDawnUniformHandler(GrGLSLProgramBuilder* program);
+
+ SamplerHandle addSampler(const GrTextureProxy*, const GrSamplerState&, const GrSwizzle&,
+ const char* name, const GrShaderCaps*) override;
+ const char* samplerVariable(SamplerHandle handle) const override;
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const override;
+ void appendUniformDecls(GrShaderFlags visibility, SkString*) const override;
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void updateUniformVisibility(UniformHandle u, uint32_t visibility) override;
+
+ UniformInfoArray fUniforms;
+ UniformInfoArray fSamplers;
+ UniformInfoArray fTextures;
+ SkTArray<GrSwizzle> fSamplerSwizzles;
+ SkTArray<SkString> fSamplerReferences;
+
+ uint32_t fCurrentGeometryUBOOffset = 0;
+ uint32_t fCurrentFragmentUBOOffset = 0;
+
+ friend class GrDawnProgramBuilder;
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.cpp
new file mode 100644
index 0000000000..7404dac008
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnUtil.h"
+
+size_t GrDawnBytesPerPixel(dawn::TextureFormat format) {
+ switch (format) {
+ case dawn::TextureFormat::RGBA8Unorm:
+ case dawn::TextureFormat::BGRA8Unorm:
+ return 4;
+ case dawn::TextureFormat::R8Unorm:
+ return 1;
+ case dawn::TextureFormat::Depth24PlusStencil8:
+ return 4;
+ default:
+ SkASSERT(false);
+ return 4;
+ }
+}
+
+bool GrDawnFormatIsRenderable(dawn::TextureFormat format) {
+ // For now, all the formats above are renderable. If a non-renderable format is added
+ // (see dawn/src/dawn_native/Format.cpp), an exception should be added here.
+ return true;
+}
+
+bool GrPixelConfigToDawnFormat(GrPixelConfig config, dawn::TextureFormat* format) {
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ case kRGBA_4444_GrPixelConfig:
+ case kRGB_565_GrPixelConfig:
+ case kGray_8_GrPixelConfig:
+ *format = dawn::TextureFormat::RGBA8Unorm;
+ return true;
+ case kBGRA_8888_GrPixelConfig:
+ *format = dawn::TextureFormat::BGRA8Unorm;
+ return true;
+ case kAlpha_8_GrPixelConfig:
+ case kAlpha_8_as_Red_GrPixelConfig:
+ *format = dawn::TextureFormat::R8Unorm;
+ return true;
+ default:
+ return false;
+ }
+}
+
+size_t GrDawnRoundRowBytes(size_t rowBytes) {
+ // Dawn requires that rowBytes be a multiple of 256. (This is actually imposed by D3D12.)
+ return (rowBytes + 0xFF) & ~0xFF;
+}
+
+#if GR_TEST_UTILS
+const char* GrDawnFormatToStr(dawn::TextureFormat format) {
+ switch (format) {
+ case dawn::TextureFormat::RGBA8Unorm:
+ return "RGBA8Unorm";
+ case dawn::TextureFormat::BGRA8Unorm:
+ return "BGRA8Unorm";
+ case dawn::TextureFormat::R8Unorm:
+ return "R8Unorm";
+ case dawn::TextureFormat::Depth24PlusStencil8:
+ return "Depth24PlusStencil8";
+ default:
+ SkASSERT(false);
+ return "Unknown";
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.h b/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.h
new file mode 100644
index 0000000000..0e5a41bade
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnUtil.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnUtil_DEFINED
+#define GrDawnUtil_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "dawn/dawncpp.h"
+
+size_t GrDawnBytesPerPixel(dawn::TextureFormat format);
+bool GrDawnFormatIsRenderable(dawn::TextureFormat format);
+bool GrPixelConfigToDawnFormat(GrPixelConfig config, dawn::TextureFormat* format);
+size_t GrDawnRoundRowBytes(size_t rowBytes);
+#if GR_TEST_UTILS
+const char* GrDawnFormatToStr(dawn::TextureFormat format);
+#endif
+
+#endif // GrDawnUtil_DEFINED
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.cpp b/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.cpp
new file mode 100644
index 0000000000..b3885d20c9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/dawn/GrDawnVaryingHandler.h"
+
+/** Returns the number of locations take up by a given GrSLType. We assume that all
+ scalar values are 32 bits. */
+static inline int grsltype_to_location_size(GrSLType type) {
+ switch(type) {
+ case kVoid_GrSLType:
+ return 0;
+ case kFloat_GrSLType: // fall through
+ case kHalf_GrSLType:
+ return 1;
+ case kFloat2_GrSLType: // fall through
+ case kHalf2_GrSLType:
+ return 1;
+ case kFloat3_GrSLType:
+ case kHalf3_GrSLType:
+ return 1;
+ case kFloat4_GrSLType:
+ case kHalf4_GrSLType:
+ return 1;
+ case kUint2_GrSLType:
+ return 1;
+ case kInt2_GrSLType:
+ case kShort2_GrSLType:
+ case kUShort2_GrSLType:
+ case kByte2_GrSLType:
+ case kUByte2_GrSLType:
+ return 1;
+ case kInt3_GrSLType:
+ case kShort3_GrSLType:
+ case kUShort3_GrSLType:
+ case kByte3_GrSLType:
+ case kUByte3_GrSLType:
+ return 1;
+ case kInt4_GrSLType:
+ case kShort4_GrSLType:
+ case kUShort4_GrSLType:
+ case kByte4_GrSLType:
+ case kUByte4_GrSLType:
+ return 1;
+ case kFloat2x2_GrSLType:
+ case kHalf2x2_GrSLType:
+ return 2;
+ case kFloat3x3_GrSLType:
+ case kHalf3x3_GrSLType:
+ return 3;
+ case kFloat4x4_GrSLType:
+ case kHalf4x4_GrSLType:
+ return 4;
+ case kTexture2DSampler_GrSLType:
+ return 0;
+ case kTextureExternalSampler_GrSLType:
+ return 0;
+ case kTexture2DRectSampler_GrSLType:
+ return 0;
+ case kBool_GrSLType:
+ return 1;
+ case kInt_GrSLType: // fall through
+ case kShort_GrSLType:
+ case kByte_GrSLType:
+ return 1;
+ case kUint_GrSLType: // fall through
+ case kUShort_GrSLType:
+ case kUByte_GrSLType:
+ return 1;
+ case kTexture2D_GrSLType:
+ return 0;
+ case kSampler_GrSLType:
+ return 0;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+static void finalize_helper(GrDawnVaryingHandler::VarArray& vars) {
+ int locationIndex = 0;
+ for (int i = 0; i < vars.count(); ++i) {
+ GrShaderVar& var = vars[i];
+ SkString location;
+ location.appendf("location = %d", locationIndex);
+ var.addLayoutQualifier(location.c_str());
+
+ int elementSize = grsltype_to_location_size(var.getType());
+ SkASSERT(elementSize > 0);
+ int numElements = 1;
+ if (var.isArray() && !var.isUnsizedArray()) {
+ numElements = var.getArrayCount();
+ }
+ SkASSERT(numElements > 0);
+ locationIndex += elementSize * numElements;
+ }
+ // TODO: determine the layout limits for Dawn, and enforce them via asserts here.
+}
+
+void GrDawnVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+}
diff --git a/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.h b/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.h
new file mode 100644
index 0000000000..d0d0ecd9d3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/dawn/GrDawnVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnVaryingHandler_DEFINED
+#define GrDawnVaryingHandler_DEFINED
+
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrDawnVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrDawnVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrDawnPipelineStateBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrAARectEffect.fp b/gfx/skia/skia/src/gpu/effects/GrAARectEffect.fp
new file mode 100644
index 0000000000..856b761a0f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrAARectEffect.fp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+layout(key) in GrClipEdgeType edgeType;
+layout(ctype=SkRect) in float4 rect;
+layout(ctype=SkRect) float4 prevRect = float4(-1);
+uniform float4 rectUniform;
+
+@optimizationFlags { kCompatibleWithCoverageAsAlpha_OptimizationFlag }
+
+void main() {
+ half alpha;
+ @switch (edgeType) {
+ case GrClipEdgeType::kFillBW: // fall through
+ case GrClipEdgeType::kInverseFillBW:
+ // non-AA
+ alpha = all(greaterThan(float4(sk_FragCoord.xy, rectUniform.zw),
+ float4(rectUniform.xy, sk_FragCoord.xy))) ? 1 : 0;
+ break;
+ default:
+ // The amount of coverage removed in x and y by the edges is computed as a pair of
+ // negative numbers, xSub and ySub.
+ half xSub, ySub;
+ xSub = min(half(sk_FragCoord.x - rectUniform.x), 0.0);
+ xSub += min(half(rectUniform.z - sk_FragCoord.x), 0.0);
+ ySub = min(half(sk_FragCoord.y - rectUniform.y), 0.0);
+ ySub += min(half(rectUniform.w - sk_FragCoord.y), 0.0);
+ // Now compute coverage in x and y and multiply them to get the fraction of the pixel
+ // covered.
+ alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));
+ }
+
+ @if (edgeType == GrClipEdgeType::kInverseFillBW || edgeType == GrClipEdgeType::kInverseFillAA) {
+ alpha = 1.0 - alpha;
+ }
+ sk_OutColor = sk_InColor * alpha;
+}
+
+@setData(pdman) {
+ const SkRect& newRect = GrProcessorEdgeTypeIsAA(edgeType) ?
+ rect.makeInset(.5f, .5f) : rect;
+ if (newRect != prevRect) {
+ pdman.set4f(rectUniform, newRect.fLeft, newRect.fTop, newRect.fRight, newRect.fBottom);
+ prevRect = newRect;
+ }
+}
+
+@test(d) {
+ SkRect rect = SkRect::MakeLTRB(d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1());
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ GrClipEdgeType edgeType = static_cast<GrClipEdgeType>(
+ d->fRandom->nextULessThan(kGrClipEdgeTypeCnt));
+
+ fp = GrAARectEffect::Make(edgeType, rect);
+ } while (nullptr == fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrAlphaThresholdFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrAlphaThresholdFragmentProcessor.fp
new file mode 100644
index 0000000000..cab332bd9c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrAlphaThresholdFragmentProcessor.fp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in uniform sampler2D mask;
+in uniform half innerThreshold;
+in uniform half outerThreshold;
+
+@class {
+ inline OptimizationFlags optFlags(float outerThreshold);
+}
+
+@constructorParams {
+ const SkIRect& bounds
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> mask,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrAlphaThresholdFragmentProcessor(
+ mask, innerThreshold, outerThreshold, bounds));
+ }
+}
+
+@coordTransform(mask) {
+ SkMatrix::MakeTrans(SkIntToScalar(-bounds.x()), SkIntToScalar(-bounds.y()))
+}
+
+@cpp {
+ inline GrFragmentProcessor::OptimizationFlags GrAlphaThresholdFragmentProcessor::optFlags(
+ float outerThreshold) {
+ if (outerThreshold >= 1.0) {
+ return kPreservesOpaqueInput_OptimizationFlag |
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ } else {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+ }
+}
+
+void main() {
+ half4 color = sk_InColor;
+ half4 mask_color = sample(mask, sk_TransformedCoords2D[0]);
+ if (mask_color.a < 0.5) {
+ if (color.a > outerThreshold) {
+ half scale = outerThreshold / color.a;
+ color.rgb *= scale;
+ color.a = outerThreshold;
+ }
+ } else if (color.a < innerThreshold) {
+ half scale = innerThreshold / max(0.001, color.a);
+ color.rgb *= scale;
+ color.a = innerThreshold;
+ }
+ sk_OutColor = color;
+}
+
+@test(testData) {
+ sk_sp<GrTextureProxy> maskProxy = testData->textureProxy(GrProcessorUnitTest::kAlphaTextureIdx);
+ // Make the inner and outer thresholds be in (0, 1) exclusive and be sorted correctly.
+ float innerThresh = testData->fRandom->nextUScalar1() * .99f + 0.005f;
+ float outerThresh = testData->fRandom->nextUScalar1() * .99f + 0.005f;
+ const int kMaxWidth = 1000;
+ const int kMaxHeight = 1000;
+ uint32_t width = testData->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = testData->fRandom->nextULessThan(kMaxHeight);
+ uint32_t x = testData->fRandom->nextULessThan(kMaxWidth - width);
+ uint32_t y = testData->fRandom->nextULessThan(kMaxHeight - height);
+ SkIRect bounds = SkIRect::MakeXYWH(x, y, width, height);
+ return GrAlphaThresholdFragmentProcessor::Make(std::move(maskProxy), innerThresh, outerThresh,
+ bounds);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrAtlasedShaderHelpers.h b/gfx/skia/skia/src/gpu/effects/GrAtlasedShaderHelpers.h
new file mode 100644
index 0000000000..94b6f98a51
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrAtlasedShaderHelpers.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasedShaderHelpers_DEFINED
+#define GrAtlasedShaderHelpers_DEFINED
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+static void append_index_uv_varyings(GrGLSLPrimitiveProcessor::EmitArgs& args,
+ const char* inTexCoordsName,
+ const char* atlasSizeInvName,
+ GrGLSLVarying *uv,
+ GrGLSLVarying *texIdx,
+ GrGLSLVarying *st) {
+ using Interpolation = GrGLSLVaryingHandler::Interpolation;
+
+ // This extracts the texture index and texel coordinates from the same variable
+ // Packing structure: texel coordinates are multiplied by 2 (or shifted left 1)
+ // texture index is stored as lower bits of both x and y
+ if (args.fShaderCaps->integerSupport()) {
+ args.fVertBuilder->codeAppendf("int2 signedCoords = int2(%s.x, %s.y);",
+ inTexCoordsName, inTexCoordsName);
+ args.fVertBuilder->codeAppend("int texIdx = 2*(signedCoords.x & 0x1) + (signedCoords.y & 0x1);");
+ args.fVertBuilder->codeAppend("float2 unormTexCoords = float2(signedCoords.x/2, signedCoords.y/2);");
+ } else {
+ args.fVertBuilder->codeAppendf("float2 indexTexCoords = float2(%s.x, %s.y);",
+ inTexCoordsName, inTexCoordsName);
+ args.fVertBuilder->codeAppend("float2 unormTexCoords = floor(0.5*indexTexCoords);");
+ args.fVertBuilder->codeAppend("float2 diff = indexTexCoords - 2.0*unormTexCoords;");
+ args.fVertBuilder->codeAppend("float texIdx = 2.0*diff.x + diff.y;");
+ }
+
+ // Multiply by 1/atlasSize to get normalized texture coordinates
+ args.fVaryingHandler->addVarying("TextureCoords", uv);
+ args.fVertBuilder->codeAppendf("%s = unormTexCoords * %s;", uv->vsOut(), atlasSizeInvName);
+
+ args.fVaryingHandler->addVarying("TexIndex", texIdx, args.fShaderCaps->integerSupport()
+ ? Interpolation::kMustBeFlat
+ : Interpolation::kCanBeFlat);
+ args.fVertBuilder->codeAppendf("%s = texIdx;", texIdx->vsOut());
+
+ if (st) {
+ args.fVaryingHandler->addVarying("IntTextureCoords", st);
+ args.fVertBuilder->codeAppendf("%s = unormTexCoords;", st->vsOut());
+ }
+}
+
+static void append_multitexture_lookup(GrGLSLPrimitiveProcessor::EmitArgs& args,
+ int numTextureSamplers,
+ const GrGLSLVarying &texIdx,
+ const char* coordName,
+ const char* colorName) {
+ // conditionally load from the indexed texture sampler
+ for (int i = 0; i < numTextureSamplers-1; ++i) {
+ args.fFragBuilder->codeAppendf("if (%s == %d) { %s = ", texIdx.fsIn(), i, colorName);
+ args.fFragBuilder->appendTextureLookup(args.fTexSamplers[i], coordName,
+ kFloat2_GrSLType);
+ args.fFragBuilder->codeAppend("; } else ");
+ }
+ args.fFragBuilder->codeAppendf("{ %s = ", colorName);
+ args.fFragBuilder->appendTextureLookup(args.fTexSamplers[numTextureSamplers-1], coordName,
+ kFloat2_GrSLType);
+ args.fFragBuilder->codeAppend("; }");
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp
new file mode 100644
index 0000000000..7c9e5f0983
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp
@@ -0,0 +1,462 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/effects/GrBezierEffect.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+class GrGLConicEffect : public GrGLSLGeometryProcessor {
+public:
+ GrGLConicEffect(const GrGeometryProcessor&);
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrConicEffect& ce = primProc.cast<GrConicEffect>();
+
+ if (!ce.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(ce.viewMatrix())) {
+ fViewMatrix = ce.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (ce.color() != fColor) {
+ pdman.set4fv(fColorUniform, 1, ce.color().vec());
+ fColor = ce.color();
+ }
+
+ if (ce.coverageScale() != 0xff && ce.coverageScale() != fCoverageScale) {
+ pdman.set1f(fCoverageScaleUniform, GrNormalizeByteToFloat(ce.coverageScale()));
+ fCoverageScale = ce.coverageScale();
+ }
+ this->setTransformDataHelper(ce.localMatrix(), pdman, &transformIter);
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ SkPMColor4f fColor;
+ uint8_t fCoverageScale;
+ GrClipEdgeType fEdgeType;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageScaleUniform;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLConicEffect::GrGLConicEffect(const GrGeometryProcessor& processor)
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(SK_PMColor4fILLEGAL), fCoverageScale(0xff) {
+ const GrConicEffect& ce = processor.cast<GrConicEffect>();
+ fEdgeType = ce.getEdgeType();
+}
+
+void GrGLConicEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ const GrConicEffect& gp = args.fGP.cast<GrConicEffect>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ GrGLSLVarying v(kFloat4_GrSLType);
+ varyingHandler->addVarying("ConicCoeffs", &v);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), gp.inConicCoeffs().name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition().name(),
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gp.inPosition().asShaderVar(),
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // TODO: we should check on the number of bits float and half provide and use the smallest one
+ // that suffices. Additionally we should assert that the upstream code only lets us get here if
+ // either float or half provides the required number of bits.
+
+ GrShaderVar edgeAlpha("edgeAlpha", kHalf_GrSLType, 0);
+ GrShaderVar dklmdx("dklmdx", kFloat3_GrSLType, 0);
+ GrShaderVar dklmdy("dklmdy", kFloat3_GrSLType, 0);
+ GrShaderVar dfdx("dfdx", kFloat_GrSLType, 0);
+ GrShaderVar dfdy("dfdy", kFloat_GrSLType, 0);
+ GrShaderVar gF("gF", kFloat2_GrSLType, 0);
+ GrShaderVar gFM("gFM", kFloat_GrSLType, 0);
+ GrShaderVar func("func", kFloat_GrSLType, 0);
+
+ fragBuilder->declAppend(edgeAlpha);
+ fragBuilder->declAppend(dklmdx);
+ fragBuilder->declAppend(dklmdy);
+ fragBuilder->declAppend(dfdx);
+ fragBuilder->declAppend(dfdy);
+ fragBuilder->declAppend(gF);
+ fragBuilder->declAppend(gFM);
+ fragBuilder->declAppend(func);
+
+ switch (fEdgeType) {
+ case GrClipEdgeType::kHairlineAA: {
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = 2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s = 2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = float2(%s, %s);", gF.c_str(), dfdx.c_str(),
+ dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x*%s.x - %s.y*%s.z;",
+ func.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = abs(%s);", func.c_str(), func.c_str());
+ fragBuilder->codeAppendf("%s = half(%s / %s);",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = max(1.0 - %s, 0.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case GrClipEdgeType::kFillAA: {
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s ="
+ "2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s ="
+ "2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = float2(%s, %s);", gF.c_str(), dfdx.c_str(),
+ dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x * %s.x - %s.y * %s.z;",
+ func.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = half(%s / %s);",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = saturate(0.5 - %s);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case GrClipEdgeType::kFillBW: {
+ fragBuilder->codeAppendf("%s = half(%s.x * %s.x - %s.y * %s.z);",
+ edgeAlpha.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = half(%s < 0.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ break;
+ }
+ default:
+ SK_ABORT("Shouldn't get here");
+ }
+
+ // TODO should we really be doing this?
+ if (gp.coverageScale() != 0xff) {
+ const char* coverageScale;
+ fCoverageScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ "Coverage",
+ &coverageScale);
+ fragBuilder->codeAppendf("%s = half4(half(%s) * %s);",
+ args.fOutputCoverage, coverageScale, edgeAlpha.c_str());
+ } else {
+ fragBuilder->codeAppendf("%s = half4(%s);", args.fOutputCoverage, edgeAlpha.c_str());
+ }
+}
+
+void GrGLConicEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConicEffect& ce = gp.cast<GrConicEffect>();
+ uint32_t key = ce.isAntiAliased() ? (ce.isFilled() ? 0x0 : 0x1) : 0x2;
+ key |= 0xff != ce.coverageScale() ? 0x8 : 0x0;
+ key |= ce.usesLocalCoords() && ce.localMatrix().hasPerspective() ? 0x10 : 0x0;
+ key |= ComputePosKey(ce.viewMatrix()) << 5;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+constexpr GrPrimitiveProcessor::Attribute GrConicEffect::kAttributes[];
+
+GrConicEffect::~GrConicEffect() {}
+
+void GrConicEffect::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConicEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrConicEffect::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLConicEffect(*this);
+}
+
+GrConicEffect::GrConicEffect(const SkPMColor4f& color, const SkMatrix& viewMatrix, uint8_t coverage,
+ GrClipEdgeType edgeType, const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : INHERITED(kGrConicEffect_ClassID)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(viewMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fCoverageScale(coverage)
+ , fEdgeType(edgeType) {
+ this->setVertexAttributes(kAttributes, SK_ARRAY_COUNT(kAttributes));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrConicEffect);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrConicEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrGeometryProcessor> gp;
+ do {
+ GrClipEdgeType edgeType =
+ static_cast<GrClipEdgeType>(
+ d->fRandom->nextULessThan(kGrClipEdgeTypeCnt));
+ gp = GrConicEffect::Make(SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ GrTest::TestMatrix(d->fRandom), edgeType, *d->caps(),
+ GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool());
+ } while (nullptr == gp);
+ return gp;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// Quad
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLQuadEffect : public GrGLSLGeometryProcessor {
+public:
+ GrGLQuadEffect(const GrGeometryProcessor&);
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrQuadEffect& qe = primProc.cast<GrQuadEffect>();
+
+ if (!qe.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(qe.viewMatrix())) {
+ fViewMatrix = qe.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (qe.color() != fColor) {
+ pdman.set4fv(fColorUniform, 1, qe.color().vec());
+ fColor = qe.color();
+ }
+
+ if (qe.coverageScale() != 0xff && qe.coverageScale() != fCoverageScale) {
+ pdman.set1f(fCoverageScaleUniform, GrNormalizeByteToFloat(qe.coverageScale()));
+ fCoverageScale = qe.coverageScale();
+ }
+ this->setTransformDataHelper(qe.localMatrix(), pdman, &transformIter);
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ SkPMColor4f fColor;
+ uint8_t fCoverageScale;
+ GrClipEdgeType fEdgeType;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageScaleUniform;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLQuadEffect::GrGLQuadEffect(const GrGeometryProcessor& processor)
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(SK_PMColor4fILLEGAL), fCoverageScale(0xff) {
+ const GrQuadEffect& ce = processor.cast<GrQuadEffect>();
+ fEdgeType = ce.getEdgeType();
+}
+
+void GrGLQuadEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ const GrQuadEffect& gp = args.fGP.cast<GrQuadEffect>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ GrGLSLVarying v(kHalf4_GrSLType);
+ varyingHandler->addVarying("HairQuadEdge", &v);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), gp.inHairQuadEdge().name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition().name(),
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gp.inPosition().asShaderVar(),
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppendf("half edgeAlpha;");
+
+ switch (fEdgeType) {
+ case GrClipEdgeType::kHairlineAA: {
+ fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("half2 gF = half2(2.0 * %s.x * duvdx.x - duvdx.y,"
+ " 2.0 * %s.x * duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = half(%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = sqrt(edgeAlpha * edgeAlpha / dot(gF, gF));");
+ fragBuilder->codeAppend("edgeAlpha = max(1.0 - edgeAlpha, 0.0);");
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case GrClipEdgeType::kFillAA: {
+ fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("half2 gF = half2(2.0 * %s.x * duvdx.x - duvdx.y,"
+ " 2.0 * %s.x * duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = half(%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = edgeAlpha / sqrt(dot(gF, gF));");
+ fragBuilder->codeAppend("edgeAlpha = saturate(0.5 - edgeAlpha);");
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case GrClipEdgeType::kFillBW: {
+ fragBuilder->codeAppendf("edgeAlpha = half(%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = half(edgeAlpha < 0.0);");
+ break;
+ }
+ default:
+ SK_ABORT("Shouldn't get here");
+ }
+
+ if (0xff != gp.coverageScale()) {
+ const char* coverageScale;
+ fCoverageScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf_GrSLType,
+ "Coverage",
+ &coverageScale);
+ fragBuilder->codeAppendf("%s = half4(%s * edgeAlpha);", args.fOutputCoverage,
+ coverageScale);
+ } else {
+ fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage);
+ }
+}
+
+void GrGLQuadEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrQuadEffect& ce = gp.cast<GrQuadEffect>();
+ uint32_t key = ce.isAntiAliased() ? (ce.isFilled() ? 0x0 : 0x1) : 0x2;
+ key |= ce.coverageScale() != 0xff ? 0x8 : 0x0;
+ key |= ce.usesLocalCoords() && ce.localMatrix().hasPerspective() ? 0x10 : 0x0;
+ key |= ComputePosKey(ce.viewMatrix()) << 5;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+constexpr GrPrimitiveProcessor::Attribute GrQuadEffect::kAttributes[];
+
+GrQuadEffect::~GrQuadEffect() {}
+
+void GrQuadEffect::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLQuadEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrQuadEffect::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLQuadEffect(*this);
+}
+
+GrQuadEffect::GrQuadEffect(const SkPMColor4f& color, const SkMatrix& viewMatrix, uint8_t coverage,
+ GrClipEdgeType edgeType, const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : INHERITED(kGrQuadEffect_ClassID)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fCoverageScale(coverage)
+ , fEdgeType(edgeType) {
+ this->setVertexAttributes(kAttributes, SK_ARRAY_COUNT(kAttributes));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrQuadEffect);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrQuadEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrGeometryProcessor> gp;
+ do {
+ GrClipEdgeType edgeType = static_cast<GrClipEdgeType>(
+ d->fRandom->nextULessThan(kGrClipEdgeTypeCnt));
+ gp = GrQuadEffect::Make(SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ GrTest::TestMatrix(d->fRandom), edgeType, *d->caps(),
+ GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool());
+ } while (nullptr == gp);
+ return gp;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h
new file mode 100644
index 0000000000..c63002bb53
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBezierEffect_DEFINED
+#define GrBezierEffect_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessor.h"
+
+/**
+ * Shader is based off of Loop-Blinn Quadratic GPU Rendering
+ * The output of this effect is a hairline edge for conics.
+ * Conics specified by implicit equation K^2 - LM.
+ * K, L, and M, are the first three values of the vertex attribute,
+ * the fourth value is not used. Distance is calculated using a
+ * first order approximation from the taylor series.
+ * Coverage for AA is max(0, 1-distance).
+ *
+ * Test were also run using a second order distance approximation.
+ * There were two versions of the second order approx. The first version
+ * is of roughly the form:
+ * f(q) = |f(p)| - ||f'(p)||*||q-p|| - ||f''(p)||*||q-p||^2.
+ * The second is similar:
+ * f(q) = |f(p)| + ||f'(p)||*||q-p|| + ||f''(p)||*||q-p||^2.
+ * The exact version of the equations can be found in the paper
+ * "Distance Approximations for Rasterizing Implicit Curves" by Gabriel Taubin
+ *
+ * In both versions we solve the quadratic for ||q-p||.
+ * Version 1:
+ * gFM is magnitude of first partials and gFM2 is magnitude of 2nd partials (as derived from paper)
+ * builder->fsCodeAppend("\t\tedgeAlpha = (sqrt(gFM*gFM+4.0*func*gF2M) - gFM)/(2.0*gF2M);\n");
+ * Version 2:
+ * builder->fsCodeAppend("\t\tedgeAlpha = (gFM - sqrt(gFM*gFM-4.0*func*gF2M))/(2.0*gF2M);\n");
+ *
+ * Also note that 2nd partials of k,l,m are zero
+ *
+ * When comparing the two second order approximations to the first order approximations,
+ * the following results were found. Version 1 tends to underestimate the distances, thus it
+ * basically increases all the error that we were already seeing in the first order
+ * approx. So this version is not the one to use. Version 2 has the opposite effect
+ * and tends to overestimate the distances. This is much closer to what we are
+ * looking for. It is able to render ellipses (even thin ones) without the need to chop.
+ * However, it can not handle thin hyperbolas well and thus would still rely on
+ * chopping to tighten the clipping. Another side effect of the overestimating is
+ * that the curves become much thinner and "ropey". If all that was ever rendered
+ * were "not too thin" curves and ellipses then 2nd order may have an advantage since
+ * only one geometry would need to be rendered. However no benches were run comparing
+ * chopped first order and non chopped 2nd order.
+ */
+class GrGLConicEffect;
+
+class GrConicEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(const SkPMColor4f& color,
+ const SkMatrix& viewMatrix,
+ const GrClipEdgeType edgeType,
+ const GrCaps& caps,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords,
+ uint8_t coverage = 0xff) {
+ switch (edgeType) {
+ case GrClipEdgeType::kFillAA:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage, GrClipEdgeType::kFillAA,
+ localMatrix, usesLocalCoords));
+ case GrClipEdgeType::kHairlineAA:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage,
+ GrClipEdgeType::kHairlineAA, localMatrix,
+ usesLocalCoords));
+ case GrClipEdgeType::kFillBW:
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage, GrClipEdgeType::kFillBW,
+ localMatrix, usesLocalCoords));
+ default:
+ return nullptr;
+ }
+ }
+
+ ~GrConicEffect() override;
+
+ const char* name() const override { return "Conic"; }
+
+ inline const Attribute& inPosition() const { return kAttributes[0]; }
+ inline const Attribute& inConicCoeffs() const { return kAttributes[1]; }
+ inline bool isAntiAliased() const { return GrProcessorEdgeTypeIsAA(fEdgeType); }
+ inline bool isFilled() const { return GrProcessorEdgeTypeIsFill(fEdgeType); }
+ inline GrClipEdgeType getEdgeType() const { return fEdgeType; }
+ const SkPMColor4f& color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ uint8_t coverageScale() const { return fCoverageScale; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrConicEffect(const SkPMColor4f&, const SkMatrix& viewMatrix, uint8_t coverage, GrClipEdgeType,
+ const SkMatrix& localMatrix, bool usesLocalCoords);
+
+ SkPMColor4f fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ uint8_t fCoverageScale;
+ GrClipEdgeType fEdgeType;
+ static constexpr Attribute kAttributes[] = {
+ {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType},
+ {"inConicCoeffs", kFloat4_GrVertexAttribType, kHalf4_GrSLType}
+ };
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * The output of this effect is a hairline edge for quadratics.
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. At the three control points that define
+ * the Quadratic, u, v have the values {0,0}, {1/2, 0}, and {1, 1} respectively.
+ * Coverage for AA is min(0, 1-distance). 3rd & 4th cimponent unused.
+ * Requires shader derivative instruction support.
+ */
+class GrGLQuadEffect;
+
+class GrQuadEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(const SkPMColor4f& color,
+ const SkMatrix& viewMatrix,
+ const GrClipEdgeType edgeType,
+ const GrCaps& caps,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords,
+ uint8_t coverage = 0xff) {
+ switch (edgeType) {
+ case GrClipEdgeType::kFillAA:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage, GrClipEdgeType::kFillAA,
+ localMatrix, usesLocalCoords));
+ case GrClipEdgeType::kHairlineAA:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage,
+ GrClipEdgeType::kHairlineAA, localMatrix,
+ usesLocalCoords));
+ case GrClipEdgeType::kFillBW:
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage, GrClipEdgeType::kFillBW,
+ localMatrix, usesLocalCoords));
+ default:
+ return nullptr;
+ }
+ }
+
+ ~GrQuadEffect() override;
+
+ const char* name() const override { return "Quad"; }
+
+ inline const Attribute& inPosition() const { return kAttributes[0]; }
+ inline const Attribute& inHairQuadEdge() const { return kAttributes[1]; }
+ inline bool isAntiAliased() const { return GrProcessorEdgeTypeIsAA(fEdgeType); }
+ inline bool isFilled() const { return GrProcessorEdgeTypeIsFill(fEdgeType); }
+ inline GrClipEdgeType getEdgeType() const { return fEdgeType; }
+ const SkPMColor4f& color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ uint8_t coverageScale() const { return fCoverageScale; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrQuadEffect(const SkPMColor4f&, const SkMatrix& viewMatrix, uint8_t coverage, GrClipEdgeType,
+ const SkMatrix& localMatrix, bool usesLocalCoords);
+
+ SkPMColor4f fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ uint8_t fCoverageScale;
+ GrClipEdgeType fEdgeType;
+
+ static constexpr Attribute kAttributes[] = {
+ {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType},
+ {"inHairQuadEdge", kFloat4_GrVertexAttribType, kHalf4_GrSLType}
+ };
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp
new file mode 100644
index 0000000000..6d7b431ab3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMatrixPriv.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrGLBicubicEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor& effect, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrBicubicEffect& bicubicEffect = effect.cast<GrBicubicEffect>();
+ b->add32(GrTextureDomain::GLDomain::DomainKey(bicubicEffect.domain()));
+ uint32_t bidir = bicubicEffect.direction() == GrBicubicEffect::Direction::kXY ? 1 : 0;
+ b->add32(bidir | (bicubicEffect.alphaType() << 1));
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fDimensions;
+ GrTextureDomain::GLDomain fDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLBicubicEffect::emitCode(EmitArgs& args) {
+ const GrBicubicEffect& bicubicEffect = args.fFp.cast<GrBicubicEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fDimensions = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType, "Dimensions");
+
+ const char* dims = uniformHandler->getUniformCStr(fDimensions);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ /*
+ * Filter weights come from Don Mitchell & Arun Netravali's 'Reconstruction Filters in Computer
+ * Graphics', ACM SIGGRAPH Computer Graphics 22, 4 (Aug. 1988).
+ * ACM DL: http://dl.acm.org/citation.cfm?id=378514
+ * Free : http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
+ *
+ * The authors define a family of cubic filters with two free parameters (B and C):
+ *
+ * { (12 - 9B - 6C)|x|^3 + (-18 + 12B + 6C)|x|^2 + (6 - 2B) if |x| < 1
+ * k(x) = 1/6 { (-B - 6C)|x|^3 + (6B + 30C)|x|^2 + (-12B - 48C)|x| + (8B + 24C) if 1 <= |x| < 2
+ * { 0 otherwise
+ *
+ * Various well-known cubic splines can be generated, and the authors select (1/3, 1/3) as their
+ * favorite overall spline - this is now commonly known as the Mitchell filter, and is the
+ * source of the specific weights below.
+ *
+ * This is GLSL, so the matrix is column-major (transposed from standard matrix notation).
+ */
+ fragBuilder->codeAppend("half4x4 kMitchellCoefficients = half4x4("
+ " 1.0 / 18.0, 16.0 / 18.0, 1.0 / 18.0, 0.0 / 18.0,"
+ "-9.0 / 18.0, 0.0 / 18.0, 9.0 / 18.0, 0.0 / 18.0,"
+ "15.0 / 18.0, -36.0 / 18.0, 27.0 / 18.0, -6.0 / 18.0,"
+ "-7.0 / 18.0, 21.0 / 18.0, -21.0 / 18.0, 7.0 / 18.0);");
+ fragBuilder->codeAppendf("float2 coord = %s - %s.xy * float2(0.5);", coords2D.c_str(), dims);
+ // We unnormalize the coord in order to determine our fractional offset (f) within the texel
+ // We then snap coord to a texel center and renormalize. The snap prevents cases where the
+ // starting coords are near a texel boundary and accumulations of dims would cause us to skip/
+ // double hit a texel.
+ fragBuilder->codeAppendf("half2 f = half2(fract(coord * %s.zw));", dims);
+ fragBuilder->codeAppendf("coord = coord + (half2(0.5) - f) * %s.xy;", dims);
+ if (bicubicEffect.direction() == GrBicubicEffect::Direction::kXY) {
+ fragBuilder->codeAppend(
+ "half4 wx = kMitchellCoefficients * half4(1.0, f.x, f.x * f.x, f.x * f.x * f.x);");
+ fragBuilder->codeAppend(
+ "half4 wy = kMitchellCoefficients * half4(1.0, f.y, f.y * f.y, f.y * f.y * f.y);");
+ fragBuilder->codeAppend("half4 rowColors[4];");
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ SkString coord;
+ coord.printf("coord + %s.xy * float2(%d, %d)", dims, x - 1, y - 1);
+ SkString sampleVar;
+ sampleVar.printf("rowColors[%d]", x);
+ fDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ bicubicEffect.domain(),
+ sampleVar.c_str(),
+ coord,
+ args.fTexSamplers[0]);
+ }
+ fragBuilder->codeAppendf(
+ "half4 s%d = wx.x * rowColors[0] + wx.y * rowColors[1] + wx.z * rowColors[2] + "
+ "wx.w * rowColors[3];",
+ y);
+ }
+ fragBuilder->codeAppend(
+ "half4 bicubicColor = wy.x * s0 + wy.y * s1 + wy.z * s2 + wy.w * s3;");
+ } else {
+ // One of the dims.xy values will be zero. So v here selects the nonzero value of f.
+ fragBuilder->codeAppend("half v = f.x + f.y;");
+ fragBuilder->codeAppend("half v2 = v * v;");
+ fragBuilder->codeAppend("half4 w = kMitchellCoefficients * half4(1.0, v, v2, v2 * v);");
+ fragBuilder->codeAppend("half4 c[4];");
+ for (int i = 0; i < 4; ++i) {
+ SkString coord;
+ coord.printf("coord + %s.xy * half(%d)", dims, i - 1);
+ SkString samplerVar;
+ samplerVar.printf("c[%d]", i);
+ // With added complexity we could apply the domain once in X or Y depending on
+ // direction rather than for each of the four lookups, but then we might not be
+ // be able to share code for Direction::kX and ::kY.
+ fDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ bicubicEffect.domain(),
+ samplerVar.c_str(),
+ coord,
+ args.fTexSamplers[0]);
+ }
+ fragBuilder->codeAppend(
+ "half4 bicubicColor = c[0] * w.x + c[1] * w.y + c[2] * w.z + c[3] * w.w;");
+ }
+ // Bicubic can send colors out of range, so clamp to get them back in (source) gamut.
+ // The kind of clamp we have to do depends on the alpha type.
+ if (kPremul_SkAlphaType == bicubicEffect.alphaType()) {
+ fragBuilder->codeAppend("bicubicColor.a = saturate(bicubicColor.a);");
+ fragBuilder->codeAppend(
+ "bicubicColor.rgb = max(half3(0.0), min(bicubicColor.rgb, bicubicColor.aaa));");
+ } else {
+ fragBuilder->codeAppend("bicubicColor = saturate(bicubicColor);");
+ }
+ fragBuilder->codeAppendf("%s = bicubicColor * %s;", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLBicubicEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ const GrBicubicEffect& bicubicEffect = processor.cast<GrBicubicEffect>();
+ GrTextureProxy* proxy = processor.textureSampler(0).proxy();
+ GrTexture* texture = proxy->peekTexture();
+
+ float dims[4] = {0, 0, 0, 0};
+ if (bicubicEffect.direction() != GrBicubicEffect::Direction::kY) {
+ dims[0] = 1.0f / texture->width();
+ dims[2] = texture->width();
+ }
+ if (bicubicEffect.direction() != GrBicubicEffect::Direction::kX) {
+ dims[1] = 1.0f / texture->height();
+ dims[3] = texture->height();
+ }
+ pdman.set4fv(fDimensions, 1, dims);
+ fDomain.setData(pdman, bicubicEffect.domain(), proxy,
+ processor.textureSampler(0).samplerState());
+}
+
+GrBicubicEffect::GrBicubicEffect(sk_sp<GrTextureProxy> proxy, GrColorType srcColorType,
+ const SkMatrix& matrix, const SkRect& domain,
+ const GrSamplerState::WrapMode wrapModes[2],
+ GrTextureDomain::Mode modeX, GrTextureDomain::Mode modeY,
+ Direction direction, SkAlphaType alphaType)
+ : INHERITED{kGrBicubicEffect_ClassID,
+ ModulateForSamplerOptFlags(
+ srcColorType,
+ GrTextureDomain::IsDecalSampled(wrapModes, modeX, modeY))}
+ , fCoordTransform(matrix, proxy.get())
+ , fDomain(proxy.get(), domain, modeX, modeY)
+ , fTextureSampler(std::move(proxy),
+ GrSamplerState(wrapModes, GrSamplerState::Filter::kNearest))
+ , fAlphaType(alphaType)
+ , fDirection(direction) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+GrBicubicEffect::GrBicubicEffect(const GrBicubicEffect& that)
+ : INHERITED(kGrBicubicEffect_ClassID, that.optimizationFlags())
+ , fCoordTransform(that.fCoordTransform)
+ , fDomain(that.fDomain)
+ , fTextureSampler(that.fTextureSampler)
+ , fAlphaType(that.fAlphaType)
+ , fDirection(that.fDirection) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+void GrBicubicEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLBicubicEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrBicubicEffect::onCreateGLSLInstance() const {
+ return new GrGLBicubicEffect;
+}
+
+bool GrBicubicEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrBicubicEffect& s = sBase.cast<GrBicubicEffect>();
+ return fDomain == s.fDomain && fDirection == s.fDirection && fAlphaType == s.fAlphaType;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrBicubicEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrBicubicEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ static const GrSamplerState::WrapMode kClampClamp[] = {GrSamplerState::WrapMode::kClamp,
+ GrSamplerState::WrapMode::kClamp};
+ SkAlphaType alphaType = d->fRandom->nextBool() ? kPremul_SkAlphaType : kUnpremul_SkAlphaType;
+ Direction direction = Direction::kX;
+ switch (d->fRandom->nextULessThan(3)) {
+ case 0:
+ direction = Direction::kX;
+ break;
+ case 1:
+ direction = Direction::kY;
+ break;
+ case 2:
+ direction = Direction::kXY;
+ break;
+ }
+ return GrBicubicEffect::Make(d->textureProxy(texIdx), d->textureProxyColorType(texIdx),
+ SkMatrix::I(), kClampClamp, direction, alphaType);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+bool GrBicubicEffect::ShouldUseBicubic(const SkMatrix& matrix, GrSamplerState::Filter* filterMode) {
+ switch (SkMatrixPriv::AdjustHighQualityFilterLevel(matrix)) {
+ case kNone_SkFilterQuality:
+ *filterMode = GrSamplerState::Filter::kNearest;
+ break;
+ case kLow_SkFilterQuality:
+ *filterMode = GrSamplerState::Filter::kBilerp;
+ break;
+ case kMedium_SkFilterQuality:
+ *filterMode = GrSamplerState::Filter::kMipMap;
+ break;
+ case kHigh_SkFilterQuality:
+ // When we use the bicubic filtering effect each sample is read from the texture using
+ // nearest neighbor sampling.
+ *filterMode = GrSamplerState::Filter::kNearest;
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h
new file mode 100644
index 0000000000..a2dc249000
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBicubicTextureEffect_DEFINED
+#define GrBicubicTextureEffect_DEFINED
+
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+
+class GrInvariantOutput;
+
+class GrBicubicEffect : public GrFragmentProcessor {
+public:
+ enum {
+ kFilterTexelPad = 2, // Given a src rect in texels to be filtered, this number of
+ // surrounding texels are needed by the kernel in x and y.
+ };
+
+ enum class Direction {
+ /** Apply bicubic kernel in local coord x, nearest neighbor in y. */
+ kX,
+ /** Apply bicubic kernel in local coord y, nearest neighbor in x. */
+ kY,
+ /** Apply bicubic in both x and y. */
+ kXY
+ };
+
+ const char* name() const override { return "Bicubic"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrBicubicEffect(*this));
+ }
+
+ const GrTextureDomain& domain() const { return fDomain; }
+
+ Direction direction() const { return fDirection; }
+
+ SkAlphaType alphaType() const { return fAlphaType; }
+
+ /**
+ * Create a Mitchell filter effect with specified texture matrix with clamp wrap mode.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ Direction direction,
+ SkAlphaType alphaType) {
+ static constexpr GrSamplerState::WrapMode kClampClamp[] = {
+ GrSamplerState::WrapMode::kClamp, GrSamplerState::WrapMode::kClamp};
+ return Make(std::move(proxy), srcColorType, matrix, kClampClamp,
+ GrTextureDomain::kIgnore_Mode, GrTextureDomain::kIgnore_Mode, direction,
+ alphaType);
+ }
+
+ /**
+ * Create a Mitchell filter effect with specified texture matrix and x/y tile modes.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const GrSamplerState::WrapMode wrapModes[2],
+ Direction direction,
+ SkAlphaType alphaType) {
+ // Ignore the domain on x and y, since this factory relies solely on the wrap mode of the
+ // sampler to constrain texture coordinates
+ return Make(std::move(proxy), srcColorType, matrix, wrapModes,
+ GrTextureDomain::kIgnore_Mode, GrTextureDomain::kIgnore_Mode, direction,
+ alphaType);
+ }
+
+ /**
+ * Create a Mitchell filter effect with specified texture matrix and x/y tile modes. This
+ * supports providing modes for the texture domain explicitly, in the event that it should
+ * override the behavior of the sampler's tile mode (e.g. clamp to border unsupported).
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const GrSamplerState::WrapMode wrapModes[2],
+ GrTextureDomain::Mode modeX,
+ GrTextureDomain::Mode modeY,
+ Direction direction,
+ SkAlphaType alphaType,
+ const SkRect* domain = nullptr) {
+ SkRect resolvedDomain = domain ? *domain : GrTextureDomain::MakeTexelDomain(
+ SkIRect::MakeWH(proxy->width(), proxy->height()), modeX, modeY);
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrBicubicEffect(std::move(proxy), srcColorType, matrix, resolvedDomain,
+ wrapModes, modeX, modeY, direction, alphaType));
+ }
+
+ /**
+ * Create a Mitchell filter effect with a texture matrix and a domain.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ Direction direction,
+ SkAlphaType alphaType) {
+ static const GrSamplerState::WrapMode kClampClamp[] = {
+ GrSamplerState::WrapMode::kClamp, GrSamplerState::WrapMode::kClamp};
+ return Make(std::move(proxy), srcColorType, matrix, kClampClamp,
+ GrTextureDomain::kClamp_Mode, GrTextureDomain::kClamp_Mode, direction,
+ alphaType, &domain);
+ }
+
+ /**
+ * Determines whether the bicubic effect should be used based on the transformation from the
+ * local coords to the device. Returns true if the bicubic effect should be used. filterMode
+ * is set to appropriate filtering mode to use regardless of the return result (e.g. when this
+ * returns false it may indicate that the best fallback is to use kMipMap, kBilerp, or
+ * kNearest).
+ */
+ static bool ShouldUseBicubic(const SkMatrix& localCoordsToDevice,
+ GrSamplerState::Filter* filterMode);
+
+private:
+ GrBicubicEffect(sk_sp<GrTextureProxy>, GrColorType srcColorType, const SkMatrix& matrix,
+ const SkRect& domain, const GrSamplerState::WrapMode wrapModes[2],
+ GrTextureDomain::Mode modeX, GrTextureDomain::Mode modeY, Direction direction,
+ SkAlphaType alphaType);
+ explicit GrBicubicEffect(const GrBicubicEffect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GrCoordTransform fCoordTransform;
+ GrTextureDomain fDomain;
+ TextureSampler fTextureSampler;
+ SkAlphaType fAlphaType;
+ Direction fDirection;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp
new file mode 100644
index 0000000000..4a8c25da6c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrBitmapTextGeoProc.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/effects/GrAtlasedShaderHelpers.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+class GrGLBitmapTextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLBitmapTextGeoProc() : fColor(SK_PMColor4fILLEGAL), fAtlasSize({0,0}) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const GrBitmapTextGeoProc& btgp = args.fGP.cast<GrBitmapTextGeoProc>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(btgp);
+
+ const char* atlasSizeInvName;
+ fAtlasSizeInvUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat2_GrSLType,
+ "AtlasSizeInv",
+ &atlasSizeInvName);
+
+ GrGLSLVarying uv(kFloat2_GrSLType);
+ GrSLType texIdxType = args.fShaderCaps->integerSupport() ? kInt_GrSLType : kFloat_GrSLType;
+ GrGLSLVarying texIdx(texIdxType);
+ append_index_uv_varyings(args, btgp.inTextureCoords().name(), atlasSizeInvName, &uv,
+ &texIdx, nullptr);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (btgp.hasVertexColor()) {
+ varyingHandler->addPassThroughAttribute(btgp.inColor(), args.fOutputColor);
+ } else {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+
+ // Setup position
+ gpArgs->fPositionVar = btgp.inPosition().asShaderVar();
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ btgp.inPosition().asShaderVar(),
+ btgp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppend("half4 texColor;");
+ append_multitexture_lookup(args, btgp.numTextureSamplers(),
+ texIdx, uv.fsIn(), "texColor");
+
+ if (btgp.maskFormat() == kARGB_GrMaskFormat) {
+ // modulate by color
+ fragBuilder->codeAppendf("%s = %s * texColor;", args.fOutputColor, args.fOutputColor);
+ fragBuilder->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = texColor;", args.fOutputCoverage);
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const GrBitmapTextGeoProc& btgp = gp.cast<GrBitmapTextGeoProc>();
+ if (btgp.color() != fColor && !btgp.hasVertexColor()) {
+ pdman.set4fv(fColorUniform, 1, btgp.color().vec());
+ fColor = btgp.color();
+ }
+
+ const SkISize& atlasSize = btgp.atlasSize();
+ SkASSERT(SkIsPow2(atlasSize.fWidth) && SkIsPow2(atlasSize.fHeight));
+
+ if (fAtlasSize != atlasSize) {
+ pdman.set2f(fAtlasSizeInvUniform, 1.0f / atlasSize.fWidth, 1.0f / atlasSize.fHeight);
+ fAtlasSize = atlasSize;
+ }
+ this->setTransformDataHelper(btgp.localMatrix(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& proc,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrBitmapTextGeoProc& btgp = proc.cast<GrBitmapTextGeoProc>();
+ uint32_t key = 0;
+ key |= btgp.usesW() ? 0x1 : 0x0;
+ key |= btgp.maskFormat() << 1;
+ b->add32(key);
+ b->add32(btgp.numTextureSamplers());
+ }
+
+private:
+ SkPMColor4f fColor;
+ UniformHandle fColorUniform;
+
+ SkISize fAtlasSize;
+ UniformHandle fAtlasSizeInvUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrBitmapTextGeoProc::GrBitmapTextGeoProc(const GrShaderCaps& caps,
+ const SkPMColor4f& color,
+ bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params, GrMaskFormat format,
+ const SkMatrix& localMatrix, bool usesW)
+ : INHERITED(kGrBitmapTextGeoProc_ClassID)
+ , fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesW(usesW)
+ , fMaskFormat(format) {
+ SkASSERT(numActiveProxies <= kMaxTextures);
+
+ if (usesW) {
+ fInPosition = {"inPosition", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ }
+
+ bool hasVertexColor = kA8_GrMaskFormat == fMaskFormat ||
+ kA565_GrMaskFormat == fMaskFormat;
+ if (hasVertexColor) {
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ }
+
+ fInTextureCoords = {"inTextureCoords", kUShort2_GrVertexAttribType,
+ caps.integerSupport() ? kUShort2_GrSLType : kFloat2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+
+ if (numActiveProxies) {
+ fAtlasSize = proxies[0]->isize();
+ }
+ for (int i = 0; i < numActiveProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params, proxies[i]->textureSwizzle());
+ }
+ this->setTextureSamplerCnt(numActiveProxies);
+}
+
+void GrBitmapTextGeoProc::addNewProxies(const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params) {
+ SkASSERT(numActiveProxies <= kMaxTextures);
+
+ if (!fTextureSamplers[0].isInitialized()) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numActiveProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+
+ if (!fTextureSamplers[i].isInitialized()) {
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params,
+ proxies[i]->textureSwizzle());
+ }
+ }
+ this->setTextureSamplerCnt(numActiveProxies);
+}
+
+void GrBitmapTextGeoProc::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLBitmapTextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrBitmapTextGeoProc::createGLSLInstance(const GrShaderCaps& caps) const {
+ return new GrGLBitmapTextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrBitmapTextGeoProc);
+
+#if GR_TEST_UTILS
+
+sk_sp<GrGeometryProcessor> GrBitmapTextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxies[kMaxTextures] = {
+ d->textureProxy(texIdx),
+ nullptr,
+ nullptr,
+ nullptr
+ };
+
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(d->fRandom, wrapModes);
+ GrSamplerState samplerState(wrapModes, d->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+
+ GrMaskFormat format = kARGB_GrMaskFormat; // init to avoid warning
+ switch (d->fRandom->nextULessThan(3)) {
+ case 0:
+ format = kA8_GrMaskFormat;
+ break;
+ case 1:
+ format = kA565_GrMaskFormat;
+ break;
+ case 2:
+ format = kARGB_GrMaskFormat;
+ break;
+ }
+
+ return GrBitmapTextGeoProc::Make(*d->caps()->shaderCaps(),
+ SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ d->fRandom->nextBool(),
+ proxies, 1, samplerState, format,
+ GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool());
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h
new file mode 100644
index 0000000000..3c29c4a585
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBitmapTextGeoProc_DEFINED
+#define GrBitmapTextGeoProc_DEFINED
+
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessor.h"
+
+class GrGLBitmapTextGeoProc;
+class GrInvariantOutput;
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a texture.
+ * It allows explicit specification of the filtering and wrap modes (GrSamplerState). The input
+ * coords are a custom attribute.
+ */
+class GrBitmapTextGeoProc : public GrGeometryProcessor {
+public:
+ static constexpr int kMaxTextures = 4;
+
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps& caps,
+ const SkPMColor4f& color, bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& p, GrMaskFormat format,
+ const SkMatrix& localMatrix, bool usesW) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrBitmapTextGeoProc(caps, color, wideColor, proxies, numActiveProxies, p, format,
+ localMatrix, usesW));
+ }
+
+ ~GrBitmapTextGeoProc() override {}
+
+ const char* name() const override { return "Texture"; }
+
+ const Attribute& inPosition() const { return fInPosition; }
+ const Attribute& inColor() const { return fInColor; }
+ const Attribute& inTextureCoords() const { return fInTextureCoords; }
+ GrMaskFormat maskFormat() const { return fMaskFormat; }
+ const SkPMColor4f& color() const { return fColor; }
+ bool hasVertexColor() const { return fInColor.isInitialized(); }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesW() const { return fUsesW; }
+ const SkISize& atlasSize() const { return fAtlasSize; }
+
+ void addNewProxies(const sk_sp<GrTextureProxy>*, int numActiveProxies, const GrSamplerState&);
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps& caps) const override;
+
+private:
+ GrBitmapTextGeoProc(const GrShaderCaps&, const SkPMColor4f&, bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies, int numProxies,
+ const GrSamplerState& params, GrMaskFormat format,
+ const SkMatrix& localMatrix, bool usesW);
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSamplers[i]; }
+
+ SkPMColor4f fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesW;
+ SkISize fAtlasSize; // size for all textures used with fTextureSamplers[].
+ TextureSampler fTextureSamplers[kMaxTextures];
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInTextureCoords;
+ GrMaskFormat fMaskFormat;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBlurredEdgeFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrBlurredEdgeFragmentProcessor.fp
new file mode 100644
index 0000000000..6126c7d5f6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBlurredEdgeFragmentProcessor.fp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+enum class Mode {
+ kGaussian = 0,
+ kSmoothStep = 1
+};
+
+layout(key) in Mode mode;
+
+void main() {
+ half factor = 1.0 - sk_InColor.a;
+ @switch (mode) {
+ case Mode::kGaussian:
+ factor = half(exp(-factor * factor * 4.0) - 0.018);
+ break;
+ case Mode::kSmoothStep:
+ factor = smoothstep(1.0, 0.0, factor);
+ break;
+ }
+ sk_OutColor = half4(factor);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrCircleBlurFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrCircleBlurFragmentProcessor.fp
new file mode 100644
index 0000000000..4228a0333f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCircleBlurFragmentProcessor.fp
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in half4 circleRect;
+in half textureRadius;
+in half solidRadius;
+in uniform sampler2D blurProfileSampler;
+
+// The data is formatted as:
+// x, y - the center of the circle
+// z - inner radius that should map to 0th entry in the texture.
+// w - the inverse of the distance over which the texture is stretched.
+uniform half4 circleData;
+
+@optimizationFlags {
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(GrProxyProvider*,
+ const SkRect& circle, float sigma);
+}
+
+@setData(data) {
+ data.set4f(circleData, circleRect.centerX(), circleRect.centerY(), solidRadius,
+ 1.f / textureRadius);
+}
+
+@cpp {
+ #include "src/gpu/GrProxyProvider.h"
+
+ // Computes an unnormalized half kernel (right side). Returns the summation of all the half
+ // kernel values.
+ static float make_unnormalized_half_kernel(float* halfKernel, int halfKernelSize, float sigma) {
+ const float invSigma = 1.f / sigma;
+ const float b = -0.5f * invSigma * invSigma;
+ float tot = 0.0f;
+ // Compute half kernel values at half pixel steps out from the center.
+ float t = 0.5f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ float value = expf(t * t * b);
+ tot += value;
+ halfKernel[i] = value;
+ t += 1.f;
+ }
+ return tot;
+ }
+
+ // Create a Gaussian half-kernel (right side) and a summed area table given a sigma and number
+ // of discrete steps. The half kernel is normalized to sum to 0.5.
+ static void make_half_kernel_and_summed_table(float* halfKernel, float* summedHalfKernel,
+ int halfKernelSize, float sigma) {
+ // The half kernel should sum to 0.5 not 1.0.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel, halfKernelSize, sigma);
+ float sum = 0.f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[i] /= tot;
+ sum += halfKernel[i];
+ summedHalfKernel[i] = sum;
+ }
+ }
+
+ // Applies the 1D half kernel vertically at points along the x axis to a circle centered at the
+ // origin with radius circleR.
+ void apply_kernel_in_y(float* results, int numSteps, float firstX, float circleR,
+ int halfKernelSize, const float* summedHalfKernelTable) {
+ float x = firstX;
+ for (int i = 0; i < numSteps; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ results[i] = 0;
+ continue;
+ }
+ float y = sqrtf(circleR * circleR - x * x);
+ // In the column at x we exit the circle at +y and -y
+ // The summed table entry j is actually reflects an offset of j + 0.5.
+ y -= 0.5f;
+ int yInt = SkScalarFloorToInt(y);
+ SkASSERT(yInt >= -1);
+ if (y < 0) {
+ results[i] = (y + 0.5f) * summedHalfKernelTable[0];
+ } else if (yInt >= halfKernelSize - 1) {
+ results[i] = 0.5f;
+ } else {
+ float yFrac = y - yInt;
+ results[i] = (1.f - yFrac) * summedHalfKernelTable[yInt] +
+ yFrac * summedHalfKernelTable[yInt + 1];
+ }
+ }
+ }
+
+ // Apply a Gaussian at point (evalX, 0) to a circle centered at the origin with radius circleR.
+ // This relies on having a half kernel computed for the Gaussian and a table of applications of
+ // the half kernel in y to columns at (evalX - halfKernel, evalX - halfKernel + 1, ..., evalX +
+ // halfKernel) passed in as yKernelEvaluations.
+ static uint8_t eval_at(float evalX, float circleR, const float* halfKernel, int halfKernelSize,
+ const float* yKernelEvaluations) {
+ float acc = 0;
+
+ float x = evalX - halfKernelSize;
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i];
+ acc += verticalEval * halfKernel[halfKernelSize - i - 1];
+ }
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i + halfKernelSize];
+ acc += verticalEval * halfKernel[i];
+ }
+ // Since we applied a half kernel in y we multiply acc by 2 (the circle is symmetric about
+ // the x axis).
+ return SkUnitScalarClampToByte(2.f * acc);
+ }
+
+ // This function creates a profile of a blurred circle. It does this by computing a kernel for
+ // half the Gaussian and a matching summed area table. The summed area table is used to compute
+ // an array of vertical applications of the half kernel to the circle along the x axis. The
+ // table of y evaluations has 2 * k + n entries where k is the size of the half kernel and n is
+ // the size of the profile being computed. Then for each of the n profile entries we walk out k
+ // steps in each horizontal direction multiplying the corresponding y evaluation by the half
+ // kernel entry and sum these values to compute the profile entry.
+ static void create_circle_profile(uint8_t* weights, float sigma, float circleR,
+ int profileTextureWidth) {
+ const int numSteps = profileTextureWidth;
+
+ // The full kernel is 6 sigmas wide.
+ int halfKernelSize = SkScalarCeilToInt(6.0f * sigma);
+ // round up to next multiple of 2 and then divide by 2
+ halfKernelSize = ((halfKernelSize + 1) & ~1) >> 1;
+
+ // Number of x steps at which to apply kernel in y to cover all the profile samples in x.
+ int numYSteps = numSteps + 2 * halfKernelSize;
+
+ SkAutoTArray<float> bulkAlloc(halfKernelSize + halfKernelSize + numYSteps);
+ float* halfKernel = bulkAlloc.get();
+ float* summedKernel = bulkAlloc.get() + halfKernelSize;
+ float* yEvals = bulkAlloc.get() + 2 * halfKernelSize;
+ make_half_kernel_and_summed_table(halfKernel, summedKernel, halfKernelSize, sigma);
+
+ float firstX = -halfKernelSize + 0.5f;
+ apply_kernel_in_y(yEvals, numYSteps, firstX, circleR, halfKernelSize, summedKernel);
+
+ for (int i = 0; i < numSteps - 1; ++i) {
+ float evalX = i + 0.5f;
+ weights[i] = eval_at(evalX, circleR, halfKernel, halfKernelSize, yEvals + i);
+ }
+ // Ensure the tail of the Gaussian goes to zero.
+ weights[numSteps - 1] = 0;
+ }
+
+ static void create_half_plane_profile(uint8_t* profile, int profileWidth) {
+ SkASSERT(!(profileWidth & 0x1));
+ // The full kernel is 6 sigmas wide.
+ float sigma = profileWidth / 6.f;
+ int halfKernelSize = profileWidth / 2;
+
+ SkAutoTArray<float> halfKernel(halfKernelSize);
+
+ // The half kernel should sum to 0.5.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel.get(), halfKernelSize,
+ sigma);
+ float sum = 0.f;
+ // Populate the profile from the right edge to the middle.
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[halfKernelSize - i - 1] /= tot;
+ sum += halfKernel[halfKernelSize - i - 1];
+ profile[profileWidth - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Populate the profile from the middle to the left edge (by flipping the half kernel and
+ // continuing the summation).
+ for (int i = 0; i < halfKernelSize; ++i) {
+ sum += halfKernel[i];
+ profile[halfKernelSize - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Ensure tail goes to 0.
+ profile[profileWidth - 1] = 0;
+ }
+
+ static sk_sp<GrTextureProxy> create_profile_texture(GrProxyProvider* proxyProvider,
+ const SkRect& circle,
+ float sigma,
+ float* solidRadius, float* textureRadius) {
+ float circleR = circle.width() / 2.0f;
+ if (circleR < SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+ // Profile textures are cached by the ratio of sigma to circle radius and by the size of the
+ // profile texture (binned by powers of 2).
+ SkScalar sigmaToCircleRRatio = sigma / circleR;
+ // When sigma is really small this becomes a equivalent to convolving a Gaussian with a
+ // half-plane. Similarly, in the extreme high ratio cases circle becomes a point WRT to the
+ // Guassian and the profile texture is a just a Gaussian evaluation. However, we haven't yet
+ // implemented this latter optimization.
+ sigmaToCircleRRatio = SkTMin(sigmaToCircleRRatio, 8.f);
+ SkFixed sigmaToCircleRRatioFixed;
+ static const SkScalar kHalfPlaneThreshold = 0.1f;
+ bool useHalfPlaneApprox = false;
+ if (sigmaToCircleRRatio <= kHalfPlaneThreshold) {
+ useHalfPlaneApprox = true;
+ sigmaToCircleRRatioFixed = 0;
+ *solidRadius = circleR - 3 * sigma;
+ *textureRadius = 6 * sigma;
+ } else {
+ // Convert to fixed point for the key.
+ sigmaToCircleRRatioFixed = SkScalarToFixed(sigmaToCircleRRatio);
+ // We shave off some bits to reduce the number of unique entries. We could probably
+ // shave off more than we do.
+ sigmaToCircleRRatioFixed &= ~0xff;
+ sigmaToCircleRRatio = SkFixedToScalar(sigmaToCircleRRatioFixed);
+ sigma = circleR * sigmaToCircleRRatio;
+ *solidRadius = 0;
+ *textureRadius = circleR + 3 * sigma;
+ }
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
+ builder[0] = sigmaToCircleRRatioFixed;
+ builder.finish();
+
+ sk_sp<GrTextureProxy> blurProfile = proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin);
+ if (!blurProfile) {
+ static constexpr int kProfileTextureWidth = 512;
+
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(SkImageInfo::MakeA8(kProfileTextureWidth, 1))) {
+ return nullptr;
+ }
+
+ if (useHalfPlaneApprox) {
+ create_half_plane_profile(bm.getAddr8(0, 0), kProfileTextureWidth);
+ } else {
+ // Rescale params to the size of the texture we're creating.
+ SkScalar scale = kProfileTextureWidth / *textureRadius;
+ create_circle_profile(bm.getAddr8(0, 0), sigma * scale, circleR * scale,
+ kProfileTextureWidth);
+ }
+
+ bm.setImmutable();
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bm);
+
+ blurProfile = proxyProvider->createTextureProxy(std::move(image), 1,
+ SkBudgeted::kYes, SkBackingFit::kExact);
+ if (!blurProfile) {
+ return nullptr;
+ }
+
+ SkASSERT(blurProfile->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, blurProfile.get());
+ }
+
+ return blurProfile;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> GrCircleBlurFragmentProcessor::Make(
+ GrProxyProvider* proxyProvider, const SkRect& circle, float sigma) {
+ float solidRadius;
+ float textureRadius;
+ sk_sp<GrTextureProxy> profile(create_profile_texture(proxyProvider, circle, sigma,
+ &solidRadius, &textureRadius));
+ if (!profile) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleBlurFragmentProcessor(
+ circle, textureRadius, solidRadius, std::move(profile)));
+ }
+}
+
+void main() {
+ // We just want to compute "(length(vec) - circleData.z + 0.5) * circleData.w" but need to
+ // rearrange for precision.
+ half2 vec = half2(half((sk_FragCoord.x - circleData.x) * circleData.w),
+ half((sk_FragCoord.y - circleData.y) * circleData.w));
+ half dist = length(vec) + (0.5 - circleData.z) * circleData.w;
+ sk_OutColor = sk_InColor * sample(blurProfileSampler, half2(dist, 0.5)).a;
+}
+
+@test(testData) {
+ SkScalar wh = testData->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar sigma = testData->fRandom->nextRangeF(1.f,10.f);
+ SkRect circle = SkRect::MakeWH(wh, wh);
+ return GrCircleBlurFragmentProcessor::Make(testData->proxyProvider(), circle, sigma);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrCircleEffect.fp b/gfx/skia/skia/src/gpu/effects/GrCircleEffect.fp
new file mode 100644
index 0000000000..957497a3fc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCircleEffect.fp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+layout(key) in GrClipEdgeType edgeType;
+in float2 center;
+in float radius;
+
+float2 prevCenter;
+float prevRadius = -1;
+// The circle uniform is (center.x, center.y, radius + 0.5, 1 / (radius + 0.5)) for regular
+// fills and (..., radius - 0.5, 1 / (radius - 0.5)) for inverse fills.
+uniform float4 circle;
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, SkPoint center,
+ float radius) {
+ // A radius below half causes the implicit insetting done by this processor to become
+ // inverted. We could handle this case by making the processor code more complicated.
+ if (radius < .5f && GrProcessorEdgeTypeIsInverseFill(edgeType)) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleEffect(edgeType, center, radius));
+ }
+}
+
+@optimizationFlags { kCompatibleWithCoverageAsAlpha_OptimizationFlag }
+
+@setData(pdman) {
+ if (radius != prevRadius || center != prevCenter) {
+ SkScalar effectiveRadius = radius;
+ if (GrProcessorEdgeTypeIsInverseFill((GrClipEdgeType) edgeType)) {
+ effectiveRadius -= 0.5f;
+ // When the radius is 0.5 effectiveRadius is 0 which causes an inf * 0 in the shader.
+ effectiveRadius = SkTMax(0.001f, effectiveRadius);
+ } else {
+ effectiveRadius += 0.5f;
+ }
+ pdman.set4f(circle, center.fX, center.fY, effectiveRadius,
+ SkScalarInvert(effectiveRadius));
+ prevCenter = center;
+ prevRadius = radius;
+ }
+}
+
+void main() {
+ // TODO: Right now the distance to circle calculation is performed in a space normalized to the
+ // radius and then denormalized. This is to mitigate overflow on devices that don't have full
+ // float.
+ half d;
+ @if (edgeType == GrClipEdgeType::kInverseFillBW ||
+ edgeType == GrClipEdgeType::kInverseFillAA) {
+ d = half((length((circle.xy - sk_FragCoord.xy) * circle.w) - 1.0) * circle.z);
+ } else {
+ d = half((1.0 - length((circle.xy - sk_FragCoord.xy) * circle.w)) * circle.z);
+ }
+ @if (edgeType == GrClipEdgeType::kFillAA ||
+ edgeType == GrClipEdgeType::kInverseFillAA ||
+ edgeType == GrClipEdgeType::kHairlineAA) {
+ sk_OutColor = sk_InColor * saturate(d);
+ } else {
+ sk_OutColor = d > 0.5 ? sk_InColor : half4(0);
+ }
+}
+
+@test(testData) {
+ SkPoint center;
+ center.fX = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar radius = testData->fRandom->nextRangeF(1.f, 1000.f);
+ GrClipEdgeType et;
+ do {
+ et = (GrClipEdgeType) testData->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ } while (GrClipEdgeType::kHairlineAA == et);
+ return GrCircleEffect::Make(et, center, radius);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrColorMatrixFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrColorMatrixFragmentProcessor.fp
new file mode 100644
index 0000000000..74203b5c7f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrColorMatrixFragmentProcessor.fp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+layout(ctype=SkMatrix44, tracked) in uniform half4x4 m;
+layout(ctype=SkVector4, tracked) in uniform half4 v;
+layout(key) in bool unpremulInput;
+layout(key) in bool clampRGBOutput;
+layout(key) in bool premulOutput;
+
+@optimizationFlags {
+ kConstantOutputForConstantInput_OptimizationFlag
+}
+
+void main() {
+ half4 inputColor = sk_InColor;
+ @if (unpremulInput) {
+ // The max() is to guard against 0 / 0 during unpremul when the incoming color is
+ // transparent black.
+ half nonZeroAlpha = max(inputColor.a, 0.0001);
+ inputColor = half4(inputColor.rgb / nonZeroAlpha, nonZeroAlpha);
+ }
+ sk_OutColor = m * inputColor + v;
+ @if (clampRGBOutput) {
+ sk_OutColor = saturate(sk_OutColor);
+ } else {
+ sk_OutColor.a = saturate(sk_OutColor.a);
+ }
+ @if (premulOutput) {
+ sk_OutColor.rgb *= sk_OutColor.a;
+ }
+}
+
+@class {
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ SkColor4f color;
+ if (unpremulInput) {
+ color = input.unpremul();
+ } else {
+ color.fR = input.fR;
+ color.fG = input.fG;
+ color.fB = input.fB;
+ color.fA = input.fA;
+ }
+ m.mapScalars(color.vec());
+ color.fR += v.fData[0];
+ color.fG += v.fData[1];
+ color.fB += v.fData[2];
+ color.fA += v.fData[3];
+ color.fA = SkTPin(color.fA, 0.f, 1.f);
+ if (clampRGBOutput) {
+ color.fR = SkTPin(color.fR, 0.f, 1.f);
+ color.fG = SkTPin(color.fG, 0.f, 1.f);
+ color.fB = SkTPin(color.fB, 0.f, 1.f);
+ }
+ if (premulOutput) {
+ return color.premul();
+ } else {
+ return {color.fR, color.fG, color.fB, color.fA};
+ }
+ }
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const float matrix[20], bool unpremulInput, bool clampRGBOutput, bool premulOutput) {
+ SkMatrix44 m44;
+ m44.set4x4(
+ matrix[0], matrix[5], matrix[10], matrix[15],
+ matrix[1], matrix[6], matrix[11], matrix[16],
+ matrix[2], matrix[7], matrix[12], matrix[17],
+ matrix[3], matrix[8], matrix[13], matrix[18]
+ );
+ auto v4 = SkVector4(matrix[4], matrix[9], matrix[14], matrix[19]);
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorMatrixFragmentProcessor(m44, v4, unpremulInput, clampRGBOutput, premulOutput));
+ }
+}
+
+@test(d) {
+ float m[20];
+ for (int i = 0; i < 20; ++i) {
+ m[i] = d->fRandom->nextRangeScalar(-10.f, 10.f);
+ }
+ bool unpremul = d->fRandom->nextBool();
+ bool clampRGB = d->fRandom->nextBool();
+ bool premul = d->fRandom->nextBool();
+ return Make(m, unpremul, clampRGB, premul);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrComposeLerpEffect.fp b/gfx/skia/skia/src/gpu/effects/GrComposeLerpEffect.fp
new file mode 100644
index 0000000000..8b03de228d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrComposeLerpEffect.fp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in fragmentProcessor? child1;
+in fragmentProcessor? child2;
+in uniform float weight;
+
+void main() {
+ sk_OutColor = mix(child1 != null ? sample(child1) : sk_InColor,
+ child2 != null ? sample(child2) : sk_InColor,
+ half(weight));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrComposeLerpRedEffect.fp b/gfx/skia/skia/src/gpu/effects/GrComposeLerpRedEffect.fp
new file mode 100644
index 0000000000..e0f7486c4e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrComposeLerpRedEffect.fp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in fragmentProcessor? child1;
+in fragmentProcessor? child2;
+in fragmentProcessor lerp;
+
+void main() {
+ sk_OutColor = mix(child1 != null ? sample(child1) : sk_InColor,
+ child2 != null ? sample(child2) : sk_InColor,
+ sample(lerp).r);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.fp b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.fp
new file mode 100644
index 0000000000..0387c3b39c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.fp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+@header {
+ #include "include/gpu/GrContext.h"
+ #include "src/gpu/GrClip.h"
+ #include "src/gpu/GrContextPriv.h"
+ #include "src/gpu/GrImageInfo.h"
+ #include "src/gpu/GrProxyProvider.h"
+ #include "src/gpu/GrRenderTargetContext.h"
+}
+
+@class {
+ static bool TestForPreservingPMConversions(GrContext* context) {
+ static constexpr int kSize = 256;
+ static constexpr GrColorType kColorType = GrColorType::kRGBA_8888;
+ SkAutoTMalloc<uint32_t> data(kSize * kSize * 3);
+ uint32_t* srcData = data.get();
+ uint32_t* firstRead = data.get() + kSize * kSize;
+ uint32_t* secondRead = data.get() + 2 * kSize * kSize;
+
+ // Fill with every possible premultiplied A, color channel value. There will be 256-y
+ // duplicate values in row y. We set r, g, and b to the same value since they are handled
+ // identically.
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x < kSize; ++x) {
+ uint8_t* color = reinterpret_cast<uint8_t*>(&srcData[kSize*y + x]);
+ color[3] = y;
+ color[2] = SkTMin(x, y);
+ color[1] = SkTMin(x, y);
+ color[0] = SkTMin(x, y);
+ }
+ }
+ memset(firstRead, 0, kSize * kSize * sizeof(uint32_t));
+ memset(secondRead, 0, kSize * kSize * sizeof(uint32_t));
+
+ const SkImageInfo ii = SkImageInfo::Make(kSize, kSize,
+ kRGBA_8888_SkColorType, kPremul_SkAlphaType);
+
+ auto readRTC =
+ context->priv().makeDeferredRenderTargetContext(SkBackingFit::kExact,
+ kSize, kSize,
+ kColorType, nullptr);
+ auto tempRTC =
+ context->priv().makeDeferredRenderTargetContext(SkBackingFit::kExact,
+ kSize, kSize,
+ kColorType, nullptr);
+ if (!readRTC || !readRTC->asTextureProxy() || !tempRTC) {
+ return false;
+ }
+ // Adding discard to appease vulkan validation warning about loading uninitialized data on
+ // draw
+ readRTC->discard();
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ SkPixmap pixmap(ii, srcData, 4 * kSize);
+
+ // This function is only ever called if we are in a GrContext that has a GrGpu since we are
+ // calling read pixels here. Thus the pixel data will be uploaded immediately and we don't
+ // need to keep the pixel data alive in the proxy. Therefore the ReleaseProc is nullptr.
+ sk_sp<SkImage> image = SkImage::MakeFromRaster(pixmap, nullptr, nullptr);
+ GrColorType dataColorType = SkColorTypeToGrColorType(image->colorType());
+ sk_sp<GrTextureProxy> dataProxy = proxyProvider->createTextureProxy(std::move(image),
+ 1,
+ SkBudgeted::kYes,
+ SkBackingFit::kExact);
+ if (!dataProxy) {
+ return false;
+ }
+
+ static const SkRect kRect = SkRect::MakeIWH(kSize, kSize);
+
+ // We do a PM->UPM draw from dataTex to readTex and read the data. Then we do a UPM->PM draw
+ // from readTex to tempTex followed by a PM->UPM draw to readTex and finally read the data.
+ // We then verify that two reads produced the same values.
+
+ GrPaint paint1;
+ GrPaint paint2;
+ GrPaint paint3;
+ std::unique_ptr<GrFragmentProcessor> pmToUPM(
+ new GrConfigConversionEffect(PMConversion::kToUnpremul));
+ std::unique_ptr<GrFragmentProcessor> upmToPM(
+ new GrConfigConversionEffect(PMConversion::kToPremul));
+
+ paint1.addColorTextureProcessor(dataProxy, dataColorType, SkMatrix::I());
+ paint1.addColorFragmentProcessor(pmToUPM->clone());
+ paint1.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readRTC->fillRectToRect(GrNoClip(), std::move(paint1), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+ if (!readRTC->readPixels(ii, firstRead, 0, {0, 0})) {
+ return false;
+ }
+
+ // Adding discard to appease vulkan validation warning about loading uninitialized data on
+ // draw
+ tempRTC->discard();
+
+ paint2.addColorTextureProcessor(readRTC->asTextureProxyRef(), kColorType, SkMatrix::I());
+ paint2.addColorFragmentProcessor(std::move(upmToPM));
+ paint2.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ tempRTC->fillRectToRect(GrNoClip(), std::move(paint2), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+
+ paint3.addColorTextureProcessor(tempRTC->asTextureProxyRef(), kColorType, SkMatrix::I());
+ paint3.addColorFragmentProcessor(std::move(pmToUPM));
+ paint3.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readRTC->fillRectToRect(GrNoClip(), std::move(paint3), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+
+ if (!readRTC->readPixels(ii, secondRead, 0, {0, 0})) {
+ return false;
+ }
+
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x <= y; ++x) {
+ if (firstRead[kSize * y + x] != secondRead[kSize * y + x]) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp,
+ PMConversion pmConversion) {
+ if (!fp) {
+ return nullptr;
+ }
+ std::unique_ptr<GrFragmentProcessor> ccFP(new GrConfigConversionEffect(pmConversion));
+ std::unique_ptr<GrFragmentProcessor> fpPipeline[] = { std::move(fp), std::move(ccFP) };
+ return GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+ }
+}
+
+layout(key) in PMConversion pmConversion;
+
+@emitCode {
+ fragBuilder->forceHighPrecision();
+}
+
+void main() {
+ // Aggressively round to the nearest exact (N / 255) floating point value. This lets us find a
+ // round-trip preserving pair on some GPUs that do odd byte to float conversion.
+ sk_OutColor = floor(sk_InColor * 255 + 0.5) / 255;
+
+ @switch (pmConversion) {
+ case PMConversion::kToPremul:
+ sk_OutColor.rgb = floor(sk_OutColor.rgb * sk_OutColor.a * 255 + 0.5) / 255;
+ break;
+
+ case PMConversion::kToUnpremul:
+ sk_OutColor.rgb = sk_OutColor.a <= 0.0 ?
+ half3(0) :
+ floor(sk_OutColor.rgb / sk_OutColor.a * 255 + 0.5) / 255;
+ break;
+ }
+}
+
+@test(data) {
+ PMConversion pmConv = static_cast<PMConversion>(data->fRandom->nextULessThan(
+ (int) PMConversion::kPMConversionCnt));
+ return std::unique_ptr<GrFragmentProcessor>(new GrConfigConversionEffect(pmConv));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.fp
new file mode 100644
index 0000000000..18cc91a4b1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.fp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+enum class InputMode {
+ kIgnore,
+ kModulateRGBA,
+ kModulateA,
+
+ kLast = kModulateA
+};
+
+layout(ctype=SkPMColor4f, tracked) in uniform half4 color;
+layout(key) in InputMode mode;
+
+@optimizationFlags {
+ OptFlags(color, mode)
+}
+
+void main() {
+ @switch (mode) {
+ case InputMode::kIgnore:
+ sk_OutColor = color;
+ break;
+ case InputMode::kModulateRGBA:
+ sk_OutColor = sk_InColor * color;
+ break;
+ case InputMode::kModulateA:
+ sk_OutColor = sk_InColor.a * color;
+ break;
+ }
+}
+
+@class {
+ static const int kInputModeCnt = (int) InputMode::kLast + 1;
+
+ static OptimizationFlags OptFlags(const SkPMColor4f& color, InputMode mode) {
+ OptimizationFlags flags = kConstantOutputForConstantInput_OptimizationFlag;
+ if (mode != InputMode::kIgnore) {
+ flags |= kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+ if (color.isOpaque()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ switch (mode) {
+ case InputMode::kIgnore:
+ return color;
+ case InputMode::kModulateA:
+ return color * input.fA;
+ case InputMode::kModulateRGBA:
+ return color * input;
+ }
+ SK_ABORT("Unexpected mode");
+ }
+}
+
+@test(d) {
+ SkPMColor4f color;
+ int colorPicker = d->fRandom->nextULessThan(3);
+ switch (colorPicker) {
+ case 0: {
+ uint32_t a = d->fRandom->nextULessThan(0x100);
+ uint32_t r = d->fRandom->nextULessThan(a+1);
+ uint32_t g = d->fRandom->nextULessThan(a+1);
+ uint32_t b = d->fRandom->nextULessThan(a+1);
+ color = SkPMColor4f::FromBytes_RGBA(GrColorPackRGBA(r, g, b, a));
+ break;
+ }
+ case 1:
+ color = SK_PMColor4fTRANSPARENT;
+ break;
+ case 2:
+ uint32_t c = d->fRandom->nextULessThan(0x100);
+ color = SkPMColor4f::FromBytes_RGBA(c | (c << 8) | (c << 16) | (c << 24));
+ break;
+ }
+ InputMode mode = static_cast<InputMode>(d->fRandom->nextULessThan(kInputModeCnt));
+ return GrConstColorProcessor::Make(color, mode);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp
new file mode 100644
index 0000000000..64911cc78f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPathPriv.h"
+#include "src/gpu/effects/GrConvexPolyEffect.h"
+#include "src/gpu/effects/generated/GrAARectEffect.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLConvexPolyEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLConvexPolyEffect() {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fPrevEdges); ++i) {
+ fPrevEdges[i] = SK_ScalarNaN;
+ }
+ }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fEdgeUniform;
+ SkScalar fPrevEdges[3 * GrConvexPolyEffect::kMaxEdges];
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLConvexPolyEffect::emitCode(EmitArgs& args) {
+ const GrConvexPolyEffect& cpe = args.fFp.cast<GrConvexPolyEffect>();
+
+ const char *edgeArrayName;
+ fEdgeUniform = args.fUniformHandler->addUniformArray(kFragment_GrShaderFlag,
+ kHalf3_GrSLType,
+ "edges",
+ cpe.getEdgeCount(),
+ &edgeArrayName);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppend("\t\thalf alpha = 1.0;\n");
+ fragBuilder->codeAppend("\t\thalf edge;\n");
+ for (int i = 0; i < cpe.getEdgeCount(); ++i) {
+ fragBuilder->codeAppendf("\t\tedge = dot(%s[%d], half3(half(sk_FragCoord.x), "
+ "half(sk_FragCoord.y), "
+ "1));\n",
+ edgeArrayName, i);
+ if (GrProcessorEdgeTypeIsAA(cpe.getEdgeType())) {
+ fragBuilder->codeAppend("\t\tedge = saturate(edge);\n");
+ } else {
+ fragBuilder->codeAppend("\t\tedge = edge >= 0.5 ? 1.0 : 0.0;\n");
+ }
+ fragBuilder->codeAppend("\t\talpha *= edge;\n");
+ }
+
+ if (GrProcessorEdgeTypeIsInverseFill(cpe.getEdgeType())) {
+ fragBuilder->codeAppend("\talpha = 1.0 - alpha;\n");
+ }
+ fragBuilder->codeAppendf("\t%s = %s * alpha;\n", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLConvexPolyEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& effect) {
+ const GrConvexPolyEffect& cpe = effect.cast<GrConvexPolyEffect>();
+ size_t byteSize = 3 * cpe.getEdgeCount() * sizeof(SkScalar);
+ if (0 != memcmp(fPrevEdges, cpe.getEdges(), byteSize)) {
+ pdman.set3fv(fEdgeUniform, cpe.getEdgeCount(), cpe.getEdges());
+ memcpy(fPrevEdges, cpe.getEdges(), byteSize);
+ }
+}
+
+void GrGLConvexPolyEffect::GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConvexPolyEffect& cpe = processor.cast<GrConvexPolyEffect>();
+ GR_STATIC_ASSERT(kGrClipEdgeTypeCnt <= 8);
+ uint32_t key = (cpe.getEdgeCount() << 3) | (int) cpe.getEdgeType();
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> GrConvexPolyEffect::Make(GrClipEdgeType type,
+ const SkPath& path) {
+ if (GrClipEdgeType::kHairlineAA == type) {
+ return nullptr;
+ }
+ if (path.getSegmentMasks() != SkPath::kLine_SegmentMask ||
+ !path.isConvex()) {
+ return nullptr;
+ }
+
+ SkPathPriv::FirstDirection dir;
+ // The only way this should fail is if the clip is effectively a infinitely thin line. In that
+ // case nothing is inside the clip. It'd be nice to detect this at a higher level and either
+ // skip the draw or omit the clip element.
+ if (!SkPathPriv::CheapComputeFirstDirection(path, &dir)) {
+ if (GrProcessorEdgeTypeIsInverseFill(type)) {
+ return GrConstColorProcessor::Make(SK_PMColor4fWHITE,
+ GrConstColorProcessor::InputMode::kModulateRGBA);
+ }
+ // This could use kIgnore instead of kModulateRGBA but it would trigger a debug print
+ // about a coverage processor not being compatible with the alpha-as-coverage optimization.
+ // We don't really care about this unlikely case so we just use kModulateRGBA to suppress
+ // the print.
+ return GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kModulateRGBA);
+ }
+
+ SkScalar edges[3 * kMaxEdges];
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ SkPath::Iter iter(path, true);
+
+ // SkPath considers itself convex so long as there is a convex contour within it,
+ // regardless of any degenerate contours such as a string of moveTos before it.
+ // Iterate here to consume any degenerate contours and only process the points
+ // on the actual convex contour.
+ int n = 0;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ SkASSERT(n == 0);
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb: {
+ if (n >= kMaxEdges) {
+ return nullptr;
+ }
+ if (pts[0] != pts[1]) {
+ SkVector v = pts[1] - pts[0];
+ v.normalize();
+ if (SkPathPriv::kCCW_FirstDirection == dir) {
+ edges[3 * n] = v.fY;
+ edges[3 * n + 1] = -v.fX;
+ } else {
+ edges[3 * n] = -v.fY;
+ edges[3 * n + 1] = v.fX;
+ }
+ edges[3 * n + 2] = -(edges[3 * n] * pts[1].fX + edges[3 * n + 1] * pts[1].fY);
+ ++n;
+ }
+ break;
+ }
+ default:
+ return nullptr;
+ }
+ }
+
+ if (path.isInverseFillType()) {
+ type = GrInvertProcessorEdgeType(type);
+ }
+ return Make(type, n, edges);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrConvexPolyEffect::Make(GrClipEdgeType edgeType,
+ const SkRect& rect) {
+ if (GrClipEdgeType::kHairlineAA == edgeType){
+ return nullptr;
+ }
+ return GrAARectEffect::Make(edgeType, rect);
+}
+
+GrConvexPolyEffect::~GrConvexPolyEffect() {}
+
+void GrConvexPolyEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConvexPolyEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrConvexPolyEffect::onCreateGLSLInstance() const {
+ return new GrGLConvexPolyEffect;
+}
+
+GrConvexPolyEffect::GrConvexPolyEffect(GrClipEdgeType edgeType, int n, const SkScalar edges[])
+ : INHERITED(kGrConvexPolyEffect_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fEdgeType(edgeType)
+ , fEdgeCount(n) {
+ // Factory function should have already ensured this.
+ SkASSERT(n <= kMaxEdges);
+ memcpy(fEdges, edges, 3 * n * sizeof(SkScalar));
+ // Outset the edges by 0.5 so that a pixel with center on an edge is 50% covered in the AA case
+ // and 100% covered in the non-AA case.
+ for (int i = 0; i < n; ++i) {
+ fEdges[3 * i + 2] += SK_ScalarHalf;
+ }
+}
+
+GrConvexPolyEffect::GrConvexPolyEffect(const GrConvexPolyEffect& that)
+ : INHERITED(kGrConvexPolyEffect_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fEdgeType(that.fEdgeType)
+ , fEdgeCount(that.fEdgeCount) {
+ memcpy(fEdges, that.fEdges, 3 * that.fEdgeCount * sizeof(SkScalar));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrConvexPolyEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrConvexPolyEffect(*this));
+}
+
+bool GrConvexPolyEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrConvexPolyEffect& cpe = other.cast<GrConvexPolyEffect>();
+ // ignore the fact that 0 == -0 and just use memcmp.
+ return (cpe.fEdgeType == fEdgeType && cpe.fEdgeCount == fEdgeCount &&
+ 0 == memcmp(cpe.fEdges, fEdges, 3 * fEdgeCount * sizeof(SkScalar)));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConvexPolyEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrConvexPolyEffect::TestCreate(GrProcessorTestData* d) {
+ int count = d->fRandom->nextULessThan(kMaxEdges) + 1;
+ SkScalar edges[kMaxEdges * 3];
+ for (int i = 0; i < 3 * count; ++i) {
+ edges[i] = d->fRandom->nextSScalar1();
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ GrClipEdgeType edgeType = static_cast<GrClipEdgeType>(
+ d->fRandom->nextULessThan(kGrClipEdgeTypeCnt));
+ fp = GrConvexPolyEffect::Make(edgeType, count, edges);
+ } while (nullptr == fp);
+ return fp;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h
new file mode 100644
index 0000000000..2bb33736f8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrConvexPolyEffect_DEFINED
+#define GrConvexPolyEffect_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrProcessor.h"
+
+class GrInvariantOutput;
+class SkPath;
+
+/**
+ * An effect that renders a convex polygon. It is intended to be used as a coverage effect.
+ * Bounding geometry is rendered and the effect computes coverage based on the fragment's
+ * position relative to the polygon.
+ */
+class GrConvexPolyEffect : public GrFragmentProcessor {
+public:
+ enum {
+ kMaxEdges = 8,
+ };
+
+ /**
+ * edges is a set of n edge equations where n is limited to kMaxEdges. It contains 3*n values.
+ * The edges should form a convex polygon. The positive half-plane is considered to be the
+ * inside. The equations should be normalized such that the first two coefficients are a unit
+ * 2d vector.
+ *
+ * Currently the edges are specified in device space. In the future we may prefer to specify
+ * them in src space. There are a number of ways this could be accomplished but we'd probably
+ * have to modify the effect/shaderbuilder interface to make it possible (e.g. give access
+ * to the view matrix or untransformed positions in the fragment shader).
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, int n,
+ const SkScalar edges[]) {
+ if (n <= 0 || n > kMaxEdges || GrClipEdgeType::kHairlineAA == edgeType) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrConvexPolyEffect(edgeType, n, edges));
+ }
+
+ /**
+ * Creates an effect that clips against the path. If the path is not a convex polygon, is
+ * inverse filled, or has too many edges, this will return nullptr.
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType, const SkPath&);
+
+ /**
+ * Creates an effect that fills inside the rect with AA edges..
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType, const SkRect&);
+
+ ~GrConvexPolyEffect() override;
+
+ const char* name() const override { return "ConvexPoly"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ GrClipEdgeType getEdgeType() const { return fEdgeType; }
+
+ int getEdgeCount() const { return fEdgeCount; }
+
+ const SkScalar* getEdges() const { return fEdges; }
+
+private:
+ GrConvexPolyEffect(GrClipEdgeType edgeType, int n, const SkScalar edges[]);
+ GrConvexPolyEffect(const GrConvexPolyEffect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ GrClipEdgeType fEdgeType;
+ int fEdgeCount;
+ SkScalar fEdges[3 * kMaxEdges];
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp
new file mode 100644
index 0000000000..b75842c94a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/effects/GrCoverageSetOpXP.h"
+#include "src/gpu/glsl/GrGLSLBlend.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+class CoverageSetOpXP : public GrXferProcessor {
+public:
+ CoverageSetOpXP(SkRegion::Op regionOp, bool invertCoverage)
+ : INHERITED(kCoverageSetOpXP_ClassID)
+ , fRegionOp(regionOp)
+ , fInvertCoverage(invertCoverage) {}
+
+ const char* name() const override { return "Coverage Set Op"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ bool invertCoverage() const { return fInvertCoverage; }
+
+private:
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const CoverageSetOpXP& xp = xpBase.cast<CoverageSetOpXP>();
+ return (fRegionOp == xp.fRegionOp &&
+ fInvertCoverage == xp.fInvertCoverage);
+ }
+
+ SkRegion::Op fRegionOp;
+ bool fInvertCoverage;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLCoverageSetOpXP : public GrGLSLXferProcessor {
+public:
+ GLCoverageSetOpXP(const GrProcessor&) {}
+
+ ~GLCoverageSetOpXP() override {}
+
+ static void GenKey(const GrProcessor& processor, const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) {
+ const CoverageSetOpXP& xp = processor.cast<CoverageSetOpXP>();
+ uint32_t key = xp.invertCoverage() ? 0x0 : 0x1;
+ b->add32(key);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const CoverageSetOpXP& xp = args.fXP.cast<CoverageSetOpXP>();
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+
+ if (xp.invertCoverage()) {
+ fragBuilder->codeAppendf("%s = 1.0 - %s;", args.fOutputPrimary, args.fInputCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputPrimary, args.fInputCoverage);
+ }
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void CoverageSetOpXP::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLCoverageSetOpXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* CoverageSetOpXP::createGLSLInstance() const {
+ return new GLCoverageSetOpXP(*this);
+}
+
+void CoverageSetOpXP::onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const {
+ switch (fRegionOp) {
+ case SkRegion::kReplace_Op:
+ blendInfo->fSrcBlend = kOne_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ case SkRegion::kIntersect_Op:
+ blendInfo->fSrcBlend = kDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ case SkRegion::kUnion_Op:
+ blendInfo->fSrcBlend = kOne_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kXOR_Op:
+ blendInfo->fSrcBlend = kIDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kDifference_Op:
+ blendInfo->fSrcBlend = kZero_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kReverseDifference_Op:
+ blendInfo->fSrcBlend = kIDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ }
+ blendInfo->fBlendConstant = SK_PMColor4fTRANSPARENT;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+constexpr GrCoverageSetOpXPFactory::GrCoverageSetOpXPFactory(SkRegion::Op regionOp,
+ bool invertCoverage)
+ : fRegionOp(regionOp), fInvertCoverage(invertCoverage) {}
+
+const GrXPFactory* GrCoverageSetOpXPFactory::Get(SkRegion::Op regionOp, bool invertCoverage) {
+ // If these objects are constructed as static constexpr by cl.exe (2015 SP2) the vtables are
+ // null.
+#ifdef SK_BUILD_FOR_WIN
+#define _CONSTEXPR_
+#else
+#define _CONSTEXPR_ constexpr
+#endif
+ switch (regionOp) {
+ case SkRegion::kReplace_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gReplaceCDXPFI(
+ SkRegion::kReplace_Op, true);
+ return &gReplaceCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gReplaceCDXPF(
+ SkRegion::kReplace_Op, false);
+ return &gReplaceCDXPF;
+ }
+ }
+ case SkRegion::kIntersect_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gIntersectCDXPFI(
+ SkRegion::kIntersect_Op, true);
+ return &gIntersectCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gIntersectCDXPF(
+ SkRegion::kIntersect_Op, false);
+ return &gIntersectCDXPF;
+ }
+ }
+ case SkRegion::kUnion_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gUnionCDXPFI(SkRegion::kUnion_Op,
+ true);
+ return &gUnionCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gUnionCDXPF(SkRegion::kUnion_Op,
+ false);
+ return &gUnionCDXPF;
+ }
+ }
+ case SkRegion::kXOR_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gXORCDXPFI(SkRegion::kXOR_Op,
+ true);
+ return &gXORCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gXORCDXPF(SkRegion::kXOR_Op,
+ false);
+ return &gXORCDXPF;
+ }
+ }
+ case SkRegion::kDifference_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gDifferenceCDXPFI(
+ SkRegion::kDifference_Op, true);
+ return &gDifferenceCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gDifferenceCDXPF(
+ SkRegion::kDifference_Op, false);
+ return &gDifferenceCDXPF;
+ }
+ }
+ case SkRegion::kReverseDifference_Op: {
+ if (invertCoverage) {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gRevDiffCDXPFI(
+ SkRegion::kReverseDifference_Op, true);
+ return &gRevDiffCDXPFI;
+ } else {
+ static _CONSTEXPR_ const GrCoverageSetOpXPFactory gRevDiffCDXPF(
+ SkRegion::kReverseDifference_Op, false);
+ return &gRevDiffCDXPF;
+ }
+ }
+ }
+#undef _CONSTEXPR_
+ SK_ABORT("Unknown region op.");
+}
+
+sk_sp<const GrXferProcessor> GrCoverageSetOpXPFactory::makeXferProcessor(
+ const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps& caps,
+ GrClampType) const {
+ // We don't support inverting coverage with mixed samples. We don't expect to ever want this in
+ // the future, however we could at some point make this work using an inverted coverage
+ // modulation table. Note that an inverted table still won't work if there are coverage procs.
+ if (fInvertCoverage && hasMixedSamples) {
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ return sk_sp<GrXferProcessor>(new CoverageSetOpXP(fRegionOp, fInvertCoverage));
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrCoverageSetOpXPFactory);
+
+#if GR_TEST_UTILS
+const GrXPFactory* GrCoverageSetOpXPFactory::TestGet(GrProcessorTestData* d) {
+ SkRegion::Op regionOp = SkRegion::Op(d->fRandom->nextULessThan(SkRegion::kLastOp + 1));
+ bool invertCoverage = d->fRandom->nextBool();
+ return GrCoverageSetOpXPFactory::Get(regionOp, invertCoverage);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.h b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.h
new file mode 100644
index 0000000000..3510505d44
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCoverageSetOpXP_DEFINED
+#define GrCoverageSetOpXP_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrXferProcessor.h"
+
+// See the comment above GrXPFactory's definition about this warning suppression.
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+
+/**
+ * This xfer processor directly blends the the src coverage with the dst using a set operator. It is
+ * useful for rendering coverage masks using CSG. It can optionally invert the src coverage before
+ * applying the set operator.
+ */
+class GrCoverageSetOpXPFactory : public GrXPFactory {
+public:
+ static const GrXPFactory* Get(SkRegion::Op regionOp, bool invertCoverage = false);
+
+private:
+ constexpr GrCoverageSetOpXPFactory(SkRegion::Op regionOp, bool invertCoverage);
+
+ sk_sp<const GrXferProcessor> makeXferProcessor(const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps&,
+ GrClampType) const override;
+
+ AnalysisProperties analysisProperties(const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType) const override {
+ return AnalysisProperties::kIgnoresInputColor;
+ }
+
+
+ GR_DECLARE_XP_FACTORY_TEST
+
+ SkRegion::Op fRegionOp;
+ bool fInvertCoverage;
+
+ typedef GrXPFactory INHERITED;
+};
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp
new file mode 100644
index 0000000000..4af53458c1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrCustomXfermode.h"
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/glsl/GrGLSLBlend.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+bool GrCustomXfermode::IsSupportedMode(SkBlendMode mode) {
+ return (int)mode > (int)SkBlendMode::kLastCoeffMode &&
+ (int)mode <= (int)SkBlendMode::kLastMode;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Static helpers
+///////////////////////////////////////////////////////////////////////////////
+
+static constexpr GrBlendEquation hw_blend_equation(SkBlendMode mode) {
+// In C++14 this could be a constexpr int variable.
+#define EQ_OFFSET (kOverlay_GrBlendEquation - (int)SkBlendMode::kOverlay)
+ GR_STATIC_ASSERT(kOverlay_GrBlendEquation == (int)SkBlendMode::kOverlay + EQ_OFFSET);
+ GR_STATIC_ASSERT(kDarken_GrBlendEquation == (int)SkBlendMode::kDarken + EQ_OFFSET);
+ GR_STATIC_ASSERT(kLighten_GrBlendEquation == (int)SkBlendMode::kLighten + EQ_OFFSET);
+ GR_STATIC_ASSERT(kColorDodge_GrBlendEquation == (int)SkBlendMode::kColorDodge + EQ_OFFSET);
+ GR_STATIC_ASSERT(kColorBurn_GrBlendEquation == (int)SkBlendMode::kColorBurn + EQ_OFFSET);
+ GR_STATIC_ASSERT(kHardLight_GrBlendEquation == (int)SkBlendMode::kHardLight + EQ_OFFSET);
+ GR_STATIC_ASSERT(kSoftLight_GrBlendEquation == (int)SkBlendMode::kSoftLight + EQ_OFFSET);
+ GR_STATIC_ASSERT(kDifference_GrBlendEquation == (int)SkBlendMode::kDifference + EQ_OFFSET);
+ GR_STATIC_ASSERT(kExclusion_GrBlendEquation == (int)SkBlendMode::kExclusion + EQ_OFFSET);
+ GR_STATIC_ASSERT(kMultiply_GrBlendEquation == (int)SkBlendMode::kMultiply + EQ_OFFSET);
+ GR_STATIC_ASSERT(kHSLHue_GrBlendEquation == (int)SkBlendMode::kHue + EQ_OFFSET);
+ GR_STATIC_ASSERT(kHSLSaturation_GrBlendEquation == (int)SkBlendMode::kSaturation + EQ_OFFSET);
+ GR_STATIC_ASSERT(kHSLColor_GrBlendEquation == (int)SkBlendMode::kColor + EQ_OFFSET);
+ GR_STATIC_ASSERT(kHSLLuminosity_GrBlendEquation == (int)SkBlendMode::kLuminosity + EQ_OFFSET);
+
+ // There's an illegal GrBlendEquation that corresponds to no SkBlendMode, hence the extra +1.
+ GR_STATIC_ASSERT(kGrBlendEquationCnt == (int)SkBlendMode::kLastMode + 1 + 1 + EQ_OFFSET);
+
+ return static_cast<GrBlendEquation>((int)mode + EQ_OFFSET);
+#undef EQ_OFFSET
+}
+
+static bool can_use_hw_blend_equation(GrBlendEquation equation,
+ GrProcessorAnalysisCoverage coverage, const GrCaps& caps) {
+ if (!caps.advancedBlendEquationSupport()) {
+ return false;
+ }
+ if (GrProcessorAnalysisCoverage::kLCD == coverage) {
+ return false; // LCD coverage must be applied after the blend equation.
+ }
+ if (caps.isAdvancedBlendEquationBlacklisted(equation)) {
+ return false;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Xfer Processor
+///////////////////////////////////////////////////////////////////////////////
+
+class CustomXP : public GrXferProcessor {
+public:
+ CustomXP(SkBlendMode mode, GrBlendEquation hwBlendEquation)
+ : INHERITED(kCustomXP_ClassID)
+ , fMode(mode)
+ , fHWBlendEquation(hwBlendEquation) {}
+
+ CustomXP(bool hasMixedSamples, SkBlendMode mode, GrProcessorAnalysisCoverage coverage)
+ : INHERITED(kCustomXP_ClassID, true, hasMixedSamples, coverage)
+ , fMode(mode)
+ , fHWBlendEquation(kIllegal_GrBlendEquation) {
+ }
+
+ const char* name() const override { return "Custom Xfermode"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ SkBlendMode mode() const { return fMode; }
+ bool hasHWBlendEquation() const { return kIllegal_GrBlendEquation != fHWBlendEquation; }
+
+ GrBlendEquation hwBlendEquation() const {
+ SkASSERT(this->hasHWBlendEquation());
+ return fHWBlendEquation;
+ }
+
+ GrXferBarrierType xferBarrierType(const GrCaps&) const override;
+
+private:
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(BlendInfo*) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override;
+
+ const SkBlendMode fMode;
+ const GrBlendEquation fHWBlendEquation;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLCustomXP : public GrGLSLXferProcessor {
+public:
+ GLCustomXP(const GrXferProcessor&) {}
+ ~GLCustomXP() override {}
+
+ static void GenKey(const GrXferProcessor& p, const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) {
+ const CustomXP& xp = p.cast<CustomXP>();
+ uint32_t key = 0;
+ if (xp.hasHWBlendEquation()) {
+ SkASSERT(caps.advBlendEqInteraction() > 0); // 0 will mean !xp.hasHWBlendEquation().
+ key |= caps.advBlendEqInteraction();
+ GR_STATIC_ASSERT(GrShaderCaps::kLast_AdvBlendEqInteraction < 4);
+ }
+ if (!xp.hasHWBlendEquation() || caps.mustEnableSpecificAdvBlendEqs()) {
+ key |= (int)xp.mode() << 3;
+ }
+ b->add32(key);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const CustomXP& xp = args.fXP.cast<CustomXP>();
+ SkASSERT(xp.hasHWBlendEquation());
+
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ fragBuilder->enableAdvancedBlendEquationIfNeeded(xp.hwBlendEquation());
+
+ // Apply coverage by multiplying it into the src color before blending. Mixed samples will
+ // "just work" automatically. (See onGetOptimizations())
+ fragBuilder->codeAppendf("%s = %s * %s;", args.fOutputPrimary, args.fInputCoverage,
+ args.fInputColor);
+ }
+
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ const CustomXP& xp = proc.cast<CustomXP>();
+ SkASSERT(!xp.hasHWBlendEquation());
+
+ GrGLSLBlend::AppendMode(fragBuilder, srcColor, dstColor, outColor, xp.mode());
+
+ // Apply coverage.
+ INHERITED::DefaultCoverageModulation(fragBuilder, srcCoverage, dstColor, outColor,
+ outColorSecondary, xp);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void CustomXP::onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLCustomXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* CustomXP::createGLSLInstance() const {
+ SkASSERT(this->willReadDstColor() != this->hasHWBlendEquation());
+ return new GLCustomXP(*this);
+}
+
+bool CustomXP::onIsEqual(const GrXferProcessor& other) const {
+ const CustomXP& s = other.cast<CustomXP>();
+ return fMode == s.fMode && fHWBlendEquation == s.fHWBlendEquation;
+}
+
+GrXferBarrierType CustomXP::xferBarrierType(const GrCaps& caps) const {
+ if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) {
+ return kBlend_GrXferBarrierType;
+ }
+ return kNone_GrXferBarrierType;
+}
+
+void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const {
+ if (this->hasHWBlendEquation()) {
+ blendInfo->fEquation = this->hwBlendEquation();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// See the comment above GrXPFactory's definition about this warning suppression.
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+class CustomXPFactory : public GrXPFactory {
+public:
+ constexpr CustomXPFactory(SkBlendMode mode)
+ : fMode(mode), fHWBlendEquation(hw_blend_equation(mode)) {}
+
+private:
+ sk_sp<const GrXferProcessor> makeXferProcessor(const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps&,
+ GrClampType) const override;
+
+ AnalysisProperties analysisProperties(const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType) const override;
+
+ GR_DECLARE_XP_FACTORY_TEST
+
+ SkBlendMode fMode;
+ GrBlendEquation fHWBlendEquation;
+
+ typedef GrXPFactory INHERITED;
+};
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+sk_sp<const GrXferProcessor> CustomXPFactory::makeXferProcessor(
+ const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage coverage,
+ bool hasMixedSamples,
+ const GrCaps& caps,
+ GrClampType clampType) const {
+ SkASSERT(GrCustomXfermode::IsSupportedMode(fMode));
+ if (can_use_hw_blend_equation(fHWBlendEquation, coverage, caps)) {
+ return sk_sp<GrXferProcessor>(new CustomXP(fMode, fHWBlendEquation));
+ }
+ return sk_sp<GrXferProcessor>(new CustomXP(hasMixedSamples, fMode, coverage));
+}
+
+GrXPFactory::AnalysisProperties CustomXPFactory::analysisProperties(
+ const GrProcessorAnalysisColor&, const GrProcessorAnalysisCoverage& coverage,
+ const GrCaps& caps, GrClampType clampType) const {
+ /*
+ The general SVG blend equation is defined in the spec as follows:
+
+ Dca' = B(Sc, Dc) * Sa * Da + Y * Sca * (1-Da) + Z * Dca * (1-Sa)
+ Da' = X * Sa * Da + Y * Sa * (1-Da) + Z * Da * (1-Sa)
+
+ (Note that Sca, Dca indicate RGB vectors that are premultiplied by alpha,
+ and that B(Sc, Dc) is a mode-specific function that accepts non-multiplied
+ RGB colors.)
+
+ For every blend mode supported by this class, i.e. the "advanced" blend
+ modes, X=Y=Z=1 and this equation reduces to the PDF blend equation.
+
+ It can be shown that when X=Y=Z=1, these equations can modulate alpha for
+ coverage.
+
+
+ == Color ==
+
+ We substitute Y=Z=1 and define a blend() function that calculates Dca' in
+ terms of premultiplied alpha only:
+
+ blend(Sca, Dca, Sa, Da) = {Dca : if Sa == 0,
+ Sca : if Da == 0,
+ B(Sca/Sa, Dca/Da) * Sa * Da + Sca * (1-Da) + Dca * (1-Sa) : if
+ Sa,Da != 0}
+
+ And for coverage modulation, we use a post blend src-over model:
+
+ Dca'' = f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+
+ (Where f is the fractional coverage.)
+
+ Next we show that canTweakAlphaForCoverage() is true by proving the
+ following relationship:
+
+ blend(f*Sca, Dca, f*Sa, Da) == f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+
+ General case (f,Sa,Da != 0):
+
+ f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * (B(Sca/Sa, Dca/Da) * Sa * Da + Sca * (1-Da) + Dca * (1-Sa)) + (1-f) * Dca [Sa,Da !=
+ 0, definition of blend()]
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + f*Dca * (1-Sa) + Dca - f*Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca - f*Sca * Da + f*Dca - f*Dca * Sa + Dca - f*Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca - f*Sca * Da - f*Dca * Sa + Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) - f*Dca * Sa + Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + Dca * (1 - f*Sa)
+ = B(f*Sca/f*Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + Dca * (1 - f*Sa) [f!=0]
+ = blend(f*Sca, Dca, f*Sa, Da) [definition of blend()]
+
+ Corner cases (Sa=0, Da=0, and f=0):
+
+ Sa=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * Dca + (1-f) * Dca [Sa=0, definition of blend()]
+ = Dca
+ = blend(0, Dca, 0, Da) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [Sa=0]
+
+ Da=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * Sca + (1-f) * Dca [Da=0, definition of blend()]
+ = f * Sca [Da=0]
+ = blend(f*Sca, 0, f*Sa, 0) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [Da=0]
+
+ f=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = Dca [f=0]
+ = blend(0, Dca, 0, Da) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [f=0]
+
+ == Alpha ==
+
+ We substitute X=Y=Z=1 and define a blend() function that calculates Da':
+
+ blend(Sa, Da) = Sa * Da + Sa * (1-Da) + Da * (1-Sa)
+ = Sa * Da + Sa - Sa * Da + Da - Da * Sa
+ = Sa + Da - Sa * Da
+
+ We use the same model for coverage modulation as we did with color:
+
+ Da'' = f * blend(Sa, Da) + (1-f) * Da
+
+ And show that canTweakAlphaForCoverage() is true by proving the following
+ relationship:
+
+ blend(f*Sa, Da) == f * blend(Sa, Da) + (1-f) * Da
+
+
+ f * blend(Sa, Da) + (1-f) * Da
+ = f * (Sa + Da - Sa * Da) + (1-f) * Da
+ = f*Sa + f*Da - f*Sa * Da + Da - f*Da
+ = f*Sa - f*Sa * Da + Da
+ = f*Sa + Da - f*Sa * Da
+ = blend(f*Sa, Da)
+ */
+ if (can_use_hw_blend_equation(fHWBlendEquation, coverage, caps)) {
+ if (caps.blendEquationSupport() == GrCaps::kAdvancedCoherent_BlendEquationSupport) {
+ return AnalysisProperties::kCompatibleWithCoverageAsAlpha;
+ } else {
+ return AnalysisProperties::kCompatibleWithCoverageAsAlpha |
+ AnalysisProperties::kRequiresNonOverlappingDraws;
+ }
+ }
+ return AnalysisProperties::kCompatibleWithCoverageAsAlpha |
+ AnalysisProperties::kReadsDstInShader;
+}
+
+GR_DEFINE_XP_FACTORY_TEST(CustomXPFactory);
+#if GR_TEST_UTILS
+const GrXPFactory* CustomXPFactory::TestGet(GrProcessorTestData* d) {
+ int mode = d->fRandom->nextRangeU((int)SkBlendMode::kLastCoeffMode + 1,
+ (int)SkBlendMode::kLastSeparableMode);
+
+ return GrCustomXfermode::Get((SkBlendMode)mode);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+const GrXPFactory* GrCustomXfermode::Get(SkBlendMode mode) {
+ // If these objects are constructed as static constexpr by cl.exe (2015 SP2) the vtables are
+ // null.
+#ifdef SK_BUILD_FOR_WIN
+#define _CONSTEXPR_
+#else
+#define _CONSTEXPR_ constexpr
+#endif
+ static _CONSTEXPR_ const CustomXPFactory gOverlay(SkBlendMode::kOverlay);
+ static _CONSTEXPR_ const CustomXPFactory gDarken(SkBlendMode::kDarken);
+ static _CONSTEXPR_ const CustomXPFactory gLighten(SkBlendMode::kLighten);
+ static _CONSTEXPR_ const CustomXPFactory gColorDodge(SkBlendMode::kColorDodge);
+ static _CONSTEXPR_ const CustomXPFactory gColorBurn(SkBlendMode::kColorBurn);
+ static _CONSTEXPR_ const CustomXPFactory gHardLight(SkBlendMode::kHardLight);
+ static _CONSTEXPR_ const CustomXPFactory gSoftLight(SkBlendMode::kSoftLight);
+ static _CONSTEXPR_ const CustomXPFactory gDifference(SkBlendMode::kDifference);
+ static _CONSTEXPR_ const CustomXPFactory gExclusion(SkBlendMode::kExclusion);
+ static _CONSTEXPR_ const CustomXPFactory gMultiply(SkBlendMode::kMultiply);
+ static _CONSTEXPR_ const CustomXPFactory gHue(SkBlendMode::kHue);
+ static _CONSTEXPR_ const CustomXPFactory gSaturation(SkBlendMode::kSaturation);
+ static _CONSTEXPR_ const CustomXPFactory gColor(SkBlendMode::kColor);
+ static _CONSTEXPR_ const CustomXPFactory gLuminosity(SkBlendMode::kLuminosity);
+#undef _CONSTEXPR_
+ switch (mode) {
+ case SkBlendMode::kOverlay:
+ return &gOverlay;
+ case SkBlendMode::kDarken:
+ return &gDarken;
+ case SkBlendMode::kLighten:
+ return &gLighten;
+ case SkBlendMode::kColorDodge:
+ return &gColorDodge;
+ case SkBlendMode::kColorBurn:
+ return &gColorBurn;
+ case SkBlendMode::kHardLight:
+ return &gHardLight;
+ case SkBlendMode::kSoftLight:
+ return &gSoftLight;
+ case SkBlendMode::kDifference:
+ return &gDifference;
+ case SkBlendMode::kExclusion:
+ return &gExclusion;
+ case SkBlendMode::kMultiply:
+ return &gMultiply;
+ case SkBlendMode::kHue:
+ return &gHue;
+ case SkBlendMode::kSaturation:
+ return &gSaturation;
+ case SkBlendMode::kColor:
+ return &gColor;
+ case SkBlendMode::kLuminosity:
+ return &gLuminosity;
+ default:
+ SkASSERT(!GrCustomXfermode::IsSupportedMode(mode));
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.h b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.h
new file mode 100644
index 0000000000..672a512d09
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCustomXfermode_DEFINED
+#define GrCustomXfermode_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkRefCnt.h"
+
+class GrTexture;
+class GrXPFactory;
+
+/**
+ * Custom Xfer modes are used for blending when the blend mode cannot be represented using blend
+ * coefficients.
+ */
+namespace GrCustomXfermode {
+ bool IsSupportedMode(SkBlendMode mode);
+ const GrXPFactory* Get(SkBlendMode mode);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp
new file mode 100644
index 0000000000..81ccb5f5d1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/effects/GrDisableColorXP.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+/**
+ * This xfer processor disables color writing. Thus color and coverage and ignored and no blending
+ * occurs. This XP is usful for things like stenciling.
+ */
+class DisableColorXP : public GrXferProcessor {
+public:
+ DisableColorXP() : INHERITED(kDisableColorXP_ClassID) {}
+
+private:
+ const char* name() const override { return "Disable Color"; }
+ bool onIsEqual(const GrXferProcessor& xpBase) const override { return true; }
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ return; // No key.
+ }
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override {
+ blendInfo->fWriteColor = false;
+ }
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+class GLDisableColorXP : public GrGLSLXferProcessor {
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ if (args.fShaderCaps->mustWriteToFragColor()) {
+ // This emit code should be empty. However, on the nexus 6 there is a driver bug where
+ // if you do not give gl_FragColor a value, the gl context is lost and we end up drawing
+ // nothing. So this fix just sets the gl_FragColor arbitrarily to 0.
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=445377
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ fragBuilder->codeAppendf("%s = half4(0);", args.fOutputPrimary);
+ }
+ }
+
+ void emitOutputSwizzle(
+ GrGLSLXPFragmentBuilder*, const GrSwizzle&, const char*, const char*) const override {
+ // Don't write any swizzling. This makes sure the final shader does not output a color.
+ return;
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+GrGLSLXferProcessor* DisableColorXP::createGLSLInstance() const {
+ return new GLDisableColorXP();
+}
+
+sk_sp<const GrXferProcessor> GrDisableColorXPFactory::MakeXferProcessor() {
+ return sk_make_sp<DisableColorXP>();
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrDisableColorXPFactory);
+
+#if GR_TEST_UTILS
+const GrXPFactory* GrDisableColorXPFactory::TestGet(GrProcessorTestData*) {
+ return GrDisableColorXPFactory::Get();
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h
new file mode 100644
index 0000000000..a78adef9e3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDisableColorXP_DEFINED
+#define GrDisableColorXP_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrXferProcessor.h"
+
+// See the comment above GrXPFactory's definition about this warning suppression.
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+class GrDisableColorXPFactory : public GrXPFactory {
+public:
+ static const GrDisableColorXPFactory* Get();
+
+ static sk_sp<const GrXferProcessor> MakeXferProcessor();
+
+private:
+ constexpr GrDisableColorXPFactory() {}
+
+ AnalysisProperties analysisProperties(
+ const GrProcessorAnalysisColor&, const GrProcessorAnalysisCoverage&, const GrCaps&,
+ GrClampType) const override {
+ return AnalysisProperties::kCompatibleWithCoverageAsAlpha |
+ AnalysisProperties::kIgnoresInputColor;
+ }
+
+ sk_sp<const GrXferProcessor> makeXferProcessor(
+ const GrProcessorAnalysisColor&, GrProcessorAnalysisCoverage, bool hasMixedSamples,
+ const GrCaps&, GrClampType) const override {
+ return MakeXferProcessor();
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST
+
+ typedef GrXPFactory INHERITED;
+};
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+inline const GrDisableColorXPFactory* GrDisableColorXPFactory::Get() {
+ // If this is constructed as static constexpr by cl.exe (2015 SP2) the vtable is null.
+#ifdef SK_BUILD_FOR_WIN
+ static const GrDisableColorXPFactory gDisableColorXPFactory;
+#else
+ static constexpr const GrDisableColorXPFactory gDisableColorXPFactory;
+#endif
+ return &gDisableColorXPFactory;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp
new file mode 100644
index 0000000000..08c36dc015
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp
@@ -0,0 +1,928 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrTexture.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/effects/GrAtlasedShaderHelpers.h"
+#include "src/gpu/effects/GrDistanceFieldGeoProc.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+// Assuming a radius of a little less than the diagonal of the fragment
+#define SK_DistanceFieldAAFactor "0.65"
+
+class GrGLDistanceFieldA8TextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldA8TextGeoProc() = default;
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldA8TextGeoProc& dfTexEffect =
+ args.fGP.cast<GrDistanceFieldA8TextGeoProc>();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfTexEffect);
+
+ const char* atlasSizeInvName;
+ fAtlasSizeInvUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat2_GrSLType,
+ "AtlasSizeInv",
+ &atlasSizeInvName);
+#ifdef SK_GAMMA_APPLY_TO_A8
+ // adjust based on gamma
+ const char* distanceAdjustUniName = nullptr;
+ // width, height, 1/(3*width)
+ fDistanceAdjustUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "DistanceAdjust", &distanceAdjustUniName);
+#endif
+
+ // Setup pass through color
+ varyingHandler->addPassThroughAttribute(dfTexEffect.inColor(), args.fOutputColor);
+
+ // Setup position
+ gpArgs->fPositionVar = dfTexEffect.inPosition().asShaderVar();
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ dfTexEffect.inPosition().asShaderVar(),
+ dfTexEffect.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // add varyings
+ GrGLSLVarying uv(kFloat2_GrSLType);
+ GrSLType texIdxType = args.fShaderCaps->integerSupport() ? kInt_GrSLType : kFloat_GrSLType;
+ GrGLSLVarying texIdx(texIdxType);
+ GrGLSLVarying st(kFloat2_GrSLType);
+ append_index_uv_varyings(args, dfTexEffect.inTextureCoords().name(), atlasSizeInvName, &uv,
+ &texIdx, &st);
+
+ bool isUniformScale = (dfTexEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfTexEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfTexEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+ bool isAliased =
+ SkToBool(dfTexEffect.getFlags() & kAliased_DistanceFieldEffectFlag);
+
+ // Use highp to work around aliasing issues
+ fragBuilder->codeAppendf("float2 uv = %s;\n", uv.fsIn());
+ fragBuilder->codeAppend("half4 texColor;");
+ append_multitexture_lookup(args, dfTexEffect.numTextureSamplers(),
+ texIdx, "uv", "texColor");
+
+ fragBuilder->codeAppend("half distance = "
+ SK_DistanceFieldMultiplier "*(texColor.r - " SK_DistanceFieldThreshold ");");
+#ifdef SK_GAMMA_APPLY_TO_A8
+ // adjust width based on gamma
+ fragBuilder->codeAppendf("distance -= %s;", distanceAdjustUniName);
+#endif
+
+ fragBuilder->codeAppend("half afwidth;");
+ if (isUniformScale) {
+ // For uniform scale, we adjust for the effect of the transformation on the distance
+ // by using the length of the gradient of the t coordinate in the y direction.
+ // We use st coordinates to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor
+ "*half(dFdx(%s.x)));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor
+ "*half(dFdy(%s.y)));", st.fsIn());
+#endif
+ } else if (isSimilarity) {
+ // For similarity transform, we adjust the effect of the transformation on the distance
+ // by using the length of the gradient of the texture coordinates. We use st coordinates
+ // to ensure we're mapping 1:1 from texel space to pixel space.
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("half st_grad_len = length(half2(dFdx(%s)));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("half st_grad_len = length(half2(dFdy(%s)));", st.fsIn());
+#endif
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*st_grad_len);");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("half2 dist_grad = half2(float2(dFdx(distance), "
+ "dFdy(distance)));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("half dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = half2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*half(inversesqrt(dg_len2));");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppendf("half2 Jdx = half2(dFdx(%s));", st.fsIn());
+ fragBuilder->codeAppendf("half2 Jdy = half2(dFdy(%s));", st.fsIn());
+ fragBuilder->codeAppend("half2 grad = half2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+
+ if (isAliased) {
+ fragBuilder->codeAppend("half val = distance > 0 ? 1.0 : 0.0;");
+ } else if (isGammaCorrect) {
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want
+ // distance mapped linearly to coverage, so use a linear step:
+ fragBuilder->codeAppend(
+ "half val = saturate((distance + afwidth) / (2.0 * afwidth));");
+ } else {
+ fragBuilder->codeAppend("half val = smoothstep(-afwidth, afwidth, distance);");
+ }
+
+ fragBuilder->codeAppendf("%s = half4(val);", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrDistanceFieldA8TextGeoProc& dfa8gp = proc.cast<GrDistanceFieldA8TextGeoProc>();
+
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float distanceAdjust = dfa8gp.getDistanceAdjust();
+ if (distanceAdjust != fDistanceAdjust) {
+ fDistanceAdjust = distanceAdjust;
+ pdman.set1f(fDistanceAdjustUni, distanceAdjust);
+ }
+#endif
+
+ const SkISize& atlasSize = dfa8gp.atlasSize();
+ SkASSERT(SkIsPow2(atlasSize.fWidth) && SkIsPow2(atlasSize.fHeight));
+
+ if (fAtlasSize != atlasSize) {
+ pdman.set2f(fAtlasSizeInvUniform, 1.0f / atlasSize.fWidth, 1.0f / atlasSize.fHeight);
+ fAtlasSize = atlasSize;
+ }
+ this->setTransformDataHelper(dfa8gp.localMatrix(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldA8TextGeoProc& dfTexEffect = gp.cast<GrDistanceFieldA8TextGeoProc>();
+ uint32_t key = dfTexEffect.getFlags();
+ b->add32(key);
+ b->add32(dfTexEffect.numTextureSamplers());
+ }
+
+private:
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float fDistanceAdjust = -1.f;
+ UniformHandle fDistanceAdjustUni;
+#endif
+ SkISize fAtlasSize = {0, 0};
+ UniformHandle fAtlasSizeInvUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldA8TextGeoProc::GrDistanceFieldA8TextGeoProc(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float distanceAdjust,
+#endif
+ uint32_t flags,
+ const SkMatrix& localMatrix)
+ : INHERITED(kGrDistanceFieldA8TextGeoProc_ClassID)
+ , fLocalMatrix(localMatrix)
+ , fFlags(flags & kNonLCD_DistanceFieldEffectMask)
+#ifdef SK_GAMMA_APPLY_TO_A8
+ , fDistanceAdjust(distanceAdjust)
+#endif
+{
+ SkASSERT(numProxies <= kMaxTextures);
+ SkASSERT(!(flags & ~kNonLCD_DistanceFieldEffectMask));
+
+ if (flags & kPerspective_DistanceFieldEffectFlag) {
+ fInPosition = {"inPosition", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ }
+ fInColor = {"inColor", kUByte4_norm_GrVertexAttribType, kHalf4_GrSLType };
+ fInTextureCoords = {"inTextureCoords", kUShort2_GrVertexAttribType,
+ caps.integerSupport() ? kUShort2_GrSLType : kFloat2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+
+ if (numProxies) {
+ fAtlasSize = proxies[0]->isize();
+ }
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params, proxies[i]->textureSwizzle());
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldA8TextGeoProc::addNewProxies(const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params) {
+ SkASSERT(numProxies <= kMaxTextures);
+
+ if (!fTextureSamplers[0].isInitialized()) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+ if (!fTextureSamplers[i].isInitialized()) {
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params,
+ proxies[i]->textureSwizzle());
+ }
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldA8TextGeoProc::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldA8TextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor*
+GrDistanceFieldA8TextGeoProc::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLDistanceFieldA8TextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldA8TextGeoProc);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrDistanceFieldA8TextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxies[kMaxTextures] = {
+ d->textureProxy(texIdx),
+ nullptr,
+ nullptr,
+ nullptr
+ };
+
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(d->fRandom, wrapModes);
+ GrSamplerState samplerState(wrapModes, d->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+
+ uint32_t flags = 0;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+ SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float lum = d->fRandom->nextF();
+#endif
+ return GrDistanceFieldA8TextGeoProc::Make(*d->caps()->shaderCaps(),
+ proxies, 1,
+ samplerState,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ lum,
+#endif
+ flags, localMatrix);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistanceFieldPathGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldPathGeoProc()
+ : fMatrix(SkMatrix::InvalidMatrix())
+ , fAtlasSize({0,0}) {
+ }
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldPathGeoProc& dfPathEffect =
+ args.fGP.cast<GrDistanceFieldPathGeoProc>();
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfPathEffect);
+
+ const char* atlasSizeInvName;
+ fAtlasSizeInvUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat2_GrSLType,
+ "AtlasSizeInv",
+ &atlasSizeInvName);
+
+ GrGLSLVarying uv(kFloat2_GrSLType);
+ GrSLType texIdxType = args.fShaderCaps->integerSupport() ? kInt_GrSLType : kFloat_GrSLType;
+ GrGLSLVarying texIdx(texIdxType);
+ GrGLSLVarying st(kFloat2_GrSLType);
+ append_index_uv_varyings(args, dfPathEffect.inTextureCoords().name(), atlasSizeInvName, &uv,
+ &texIdx, &st);
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(dfPathEffect.inColor(), args.fOutputColor);
+
+ if (dfPathEffect.matrix().hasPerspective()) {
+ // Setup position
+ this->writeOutputPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ dfPathEffect.inPosition().name(),
+ dfPathEffect.matrix(),
+ &fMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ dfPathEffect.inPosition().asShaderVar(),
+ args.fFPCoordTransformHandler);
+ } else {
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, dfPathEffect.inPosition().name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ dfPathEffect.inPosition().asShaderVar(),
+ dfPathEffect.matrix(),
+ args.fFPCoordTransformHandler);
+ }
+
+ // Use highp to work around aliasing issues
+ fragBuilder->codeAppendf("float2 uv = %s;", uv.fsIn());
+ fragBuilder->codeAppend("half4 texColor;");
+ append_multitexture_lookup(args, dfPathEffect.numTextureSamplers(), texIdx, "uv",
+ "texColor");
+
+ fragBuilder->codeAppend("half distance = "
+ SK_DistanceFieldMultiplier "*(texColor.r - " SK_DistanceFieldThreshold ");");
+
+ fragBuilder->codeAppend("half afwidth;");
+ bool isUniformScale = (dfPathEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfPathEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfPathEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+ if (isUniformScale) {
+ // For uniform scale, we adjust for the effect of the transformation on the distance
+ // by using the length of the gradient of the t coordinate in the y direction.
+ // We use st coordinates to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor
+ "*half(dFdx(%s.x)));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor
+ "*half(dFdy(%s.y)));", st.fsIn());
+#endif
+ } else if (isSimilarity) {
+ // For similarity transform, we adjust the effect of the transformation on the distance
+ // by using the length of the gradient of the texture coordinates. We use st coordinates
+ // to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("half st_grad_len = half(length(dFdx(%s)));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("half st_grad_len = half(length(dFdy(%s)));", st.fsIn());
+#endif
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*st_grad_len);");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("half2 dist_grad = half2(dFdx(distance), "
+ "dFdy(distance));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("half dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = half2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*half(inversesqrt(dg_len2));");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppendf("half2 Jdx = half2(dFdx(%s));", st.fsIn());
+ fragBuilder->codeAppendf("half2 Jdy = half2(dFdy(%s));", st.fsIn());
+ fragBuilder->codeAppend("half2 grad = half2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want distance
+ // mapped linearly to coverage, so use a linear step:
+ if (isGammaCorrect) {
+ fragBuilder->codeAppend(
+ "half val = saturate((distance + afwidth) / (2.0 * afwidth));");
+ } else {
+ fragBuilder->codeAppend("half val = smoothstep(-afwidth, afwidth, distance);");
+ }
+
+ fragBuilder->codeAppendf("%s = half4(val);", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+
+ const GrDistanceFieldPathGeoProc& dfpgp = proc.cast<GrDistanceFieldPathGeoProc>();
+
+ if (dfpgp.matrix().hasPerspective() && !fMatrix.cheapEqualTo(dfpgp.matrix())) {
+ fMatrix = dfpgp.matrix();
+ float matrix[3 * 3];
+ GrGLSLGetMatrix<3>(matrix, fMatrix);
+ pdman.setMatrix3f(fMatrixUniform, matrix);
+ }
+
+ const SkISize& atlasSize = dfpgp.atlasSize();
+ SkASSERT(SkIsPow2(atlasSize.fWidth) && SkIsPow2(atlasSize.fHeight));
+ if (fAtlasSize != atlasSize) {
+ pdman.set2f(fAtlasSizeInvUniform, 1.0f / atlasSize.fWidth, 1.0f / atlasSize.fHeight);
+ fAtlasSize = atlasSize;
+ }
+
+ if (dfpgp.matrix().hasPerspective()) {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ } else {
+ this->setTransformDataHelper(dfpgp.matrix(), pdman, &transformIter);
+ }
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldPathGeoProc& dfTexEffect = gp.cast<GrDistanceFieldPathGeoProc>();
+
+ uint32_t key = dfTexEffect.getFlags();
+ key |= ComputePosKey(dfTexEffect.matrix()) << 16;
+ b->add32(key);
+ b->add32(dfTexEffect.matrix().hasPerspective());
+ b->add32(dfTexEffect.numTextureSamplers());
+ }
+
+private:
+ SkMatrix fMatrix; // view matrix if perspective, local matrix otherwise
+ UniformHandle fMatrixUniform;
+
+ SkISize fAtlasSize;
+ UniformHandle fAtlasSizeInvUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldPathGeoProc::GrDistanceFieldPathGeoProc(const GrShaderCaps& caps,
+ const SkMatrix& matrix,
+ bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params,
+ uint32_t flags)
+ : INHERITED(kGrDistanceFieldPathGeoProc_ClassID)
+ , fMatrix(matrix)
+ , fFlags(flags & kNonLCD_DistanceFieldEffectMask) {
+ SkASSERT(numProxies <= kMaxTextures);
+ SkASSERT(!(flags & ~kNonLCD_DistanceFieldEffectMask));
+
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ fInTextureCoords = {"inTextureCoords", kUShort2_GrVertexAttribType,
+ caps.integerSupport() ? kUShort2_GrSLType : kFloat2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+
+ if (numProxies) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params, proxies[i]->textureSwizzle());
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldPathGeoProc::addNewProxies(const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params) {
+ SkASSERT(numProxies <= kMaxTextures);
+
+ if (!fTextureSamplers[0].isInitialized()) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+
+ if (!fTextureSamplers[i].isInitialized()) {
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params,
+ proxies[i]->textureSwizzle());
+ }
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldPathGeoProc::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldPathGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor*
+GrDistanceFieldPathGeoProc::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLDistanceFieldPathGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldPathGeoProc);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrDistanceFieldPathGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxies[kMaxTextures] = {
+ d->textureProxy(texIdx),
+ nullptr,
+ nullptr,
+ nullptr
+ };
+
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(d->fRandom, wrapModes);
+ GrSamplerState samplerState(wrapModes, d->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+
+ uint32_t flags = 0;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+
+ return GrDistanceFieldPathGeoProc::Make(*d->caps()->shaderCaps(),
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool(),
+ proxies, 1,
+ samplerState,
+ flags);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistanceFieldLCDTextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldLCDTextGeoProc() : fAtlasSize({0, 0}) {
+ fDistanceAdjust = GrDistanceFieldLCDTextGeoProc::DistanceAdjust::Make(1.0f, 1.0f, 1.0f);
+ }
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldLCDTextGeoProc& dfTexEffect =
+ args.fGP.cast<GrDistanceFieldLCDTextGeoProc>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfTexEffect);
+
+ const char* atlasSizeInvName;
+ fAtlasSizeInvUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat2_GrSLType,
+ "AtlasSizeInv",
+ &atlasSizeInvName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(dfTexEffect.inColor(), args.fOutputColor);
+
+ // Setup position
+ gpArgs->fPositionVar = dfTexEffect.inPosition().asShaderVar();
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ dfTexEffect.inPosition().asShaderVar(),
+ dfTexEffect.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // set up varyings
+ GrGLSLVarying uv(kFloat2_GrSLType);
+ GrSLType texIdxType = args.fShaderCaps->integerSupport() ? kInt_GrSLType : kFloat_GrSLType;
+ GrGLSLVarying texIdx(texIdxType);
+ GrGLSLVarying st(kFloat2_GrSLType);
+ append_index_uv_varyings(args, dfTexEffect.inTextureCoords().name(), atlasSizeInvName, &uv,
+ &texIdx, &st);
+
+ GrGLSLVarying delta(kFloat_GrSLType);
+ varyingHandler->addVarying("Delta", &delta);
+ if (dfTexEffect.getFlags() & kBGR_DistanceFieldEffectFlag) {
+ vertBuilder->codeAppendf("%s = -%s.x/3.0;", delta.vsOut(), atlasSizeInvName);
+ } else {
+ vertBuilder->codeAppendf("%s = %s.x/3.0;", delta.vsOut(), atlasSizeInvName);
+ }
+
+ // add frag shader code
+ bool isUniformScale = (dfTexEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfTexEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfTexEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+
+ // create LCD offset adjusted by inverse of transform
+ // Use highp to work around aliasing issues
+ fragBuilder->codeAppendf("float2 uv = %s;\n", uv.fsIn());
+
+ if (isUniformScale) {
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("half st_grad_len = half(abs(dFdx(%s.x)));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("half st_grad_len = half(abs(dFdy(%s.y)));", st.fsIn());
+#endif
+ fragBuilder->codeAppendf("half2 offset = half2(half(st_grad_len*%s), 0.0);",
+ delta.fsIn());
+ } else if (isSimilarity) {
+ // For a similarity matrix with rotation, the gradient will not be aligned
+ // with the texel coordinate axes, so we need to calculate it.
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("half2 st_grad = half2(dFdx(%s));", st.fsIn());
+ fragBuilder->codeAppendf("half2 offset = half(%s)*st_grad;", delta.fsIn());
+#else
+ // We use dFdy because of a Mali 400 bug, and rotate -90 degrees to
+ // get the gradient in the x direction.
+ fragBuilder->codeAppendf("half2 st_grad = half2(dFdy(%s));", st.fsIn());
+ fragBuilder->codeAppendf("half2 offset = half2(%s*float2(st_grad.y, -st_grad.x));",
+ delta.fsIn());
+#endif
+ fragBuilder->codeAppend("half st_grad_len = length(st_grad);");
+ } else {
+ fragBuilder->codeAppendf("half2 st = half2(%s);\n", st.fsIn());
+
+ fragBuilder->codeAppend("half2 Jdx = half2(dFdx(st));");
+ fragBuilder->codeAppend("half2 Jdy = half2(dFdy(st));");
+ fragBuilder->codeAppendf("half2 offset = half2(half(%s))*Jdx;", delta.fsIn());
+ }
+
+ // sample the texture by index
+ fragBuilder->codeAppend("half4 texColor;");
+ append_multitexture_lookup(args, dfTexEffect.numTextureSamplers(),
+ texIdx, "uv", "texColor");
+
+ // green is distance to uv center
+ fragBuilder->codeAppend("half3 distance;");
+ fragBuilder->codeAppend("distance.y = texColor.r;");
+ // red is distance to left offset
+ fragBuilder->codeAppend("half2 uv_adjusted = half2(uv) - offset;");
+ append_multitexture_lookup(args, dfTexEffect.numTextureSamplers(),
+ texIdx, "uv_adjusted", "texColor");
+ fragBuilder->codeAppend("distance.x = texColor.r;");
+ // blue is distance to right offset
+ fragBuilder->codeAppend("uv_adjusted = half2(uv) + offset;");
+ append_multitexture_lookup(args, dfTexEffect.numTextureSamplers(),
+ texIdx, "uv_adjusted", "texColor");
+ fragBuilder->codeAppend("distance.z = texColor.r;");
+
+ fragBuilder->codeAppend("distance = "
+ "half3(" SK_DistanceFieldMultiplier ")*(distance - half3(" SK_DistanceFieldThreshold"));");
+
+ // adjust width based on gamma
+ const char* distanceAdjustUniName = nullptr;
+ fDistanceAdjustUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType,
+ "DistanceAdjust", &distanceAdjustUniName);
+ fragBuilder->codeAppendf("distance -= %s;", distanceAdjustUniName);
+
+ // To be strictly correct, we should compute the anti-aliasing factor separately
+ // for each color component. However, this is only important when using perspective
+ // transformations, and even then using a single factor seems like a reasonable
+ // trade-off between quality and speed.
+ fragBuilder->codeAppend("half afwidth;");
+ if (isSimilarity) {
+ // For similarity transform (uniform scale-only is a subset of this), we adjust for the
+ // effect of the transformation on the distance by using the length of the gradient of
+ // the texture coordinates. We use st coordinates to ensure we're mapping 1:1 from texel
+ // space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*st_grad_len;");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("half2 dist_grad = half2(half(dFdx(distance.r)), "
+ "half(dFdy(distance.r)));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("half dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = half2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*half(inversesqrt(dg_len2));");
+ fragBuilder->codeAppend("}");
+ fragBuilder->codeAppend("half2 grad = half2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want distance
+ // mapped linearly to coverage, so use a linear step:
+ if (isGammaCorrect) {
+ fragBuilder->codeAppendf("%s = "
+ "half4(saturate((distance + half3(afwidth)) / half3(2.0 * afwidth)), 1.0);",
+ args.fOutputCoverage);
+ } else {
+ fragBuilder->codeAppendf(
+ "%s = half4(smoothstep(half3(-afwidth), half3(afwidth), distance), 1.0);",
+ args.fOutputCoverage);
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) override {
+ SkASSERT(fDistanceAdjustUni.isValid());
+
+ const GrDistanceFieldLCDTextGeoProc& dflcd = processor.cast<GrDistanceFieldLCDTextGeoProc>();
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust wa = dflcd.getDistanceAdjust();
+ if (wa != fDistanceAdjust) {
+ pdman.set3f(fDistanceAdjustUni,
+ wa.fR,
+ wa.fG,
+ wa.fB);
+ fDistanceAdjust = wa;
+ }
+
+ const SkISize& atlasSize = dflcd.atlasSize();
+ SkASSERT(SkIsPow2(atlasSize.fWidth) && SkIsPow2(atlasSize.fHeight));
+ if (fAtlasSize != atlasSize) {
+ pdman.set2f(fAtlasSizeInvUniform, 1.0f / atlasSize.fWidth, 1.0f / atlasSize.fHeight);
+ fAtlasSize = atlasSize;
+ }
+ this->setTransformDataHelper(dflcd.localMatrix(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldLCDTextGeoProc& dfTexEffect = gp.cast<GrDistanceFieldLCDTextGeoProc>();
+
+ uint32_t key = dfTexEffect.getFlags();
+ b->add32(key);
+ b->add32(dfTexEffect.numTextureSamplers());
+ }
+
+private:
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust fDistanceAdjust;
+ UniformHandle fDistanceAdjustUni;
+
+ SkISize fAtlasSize;
+ UniformHandle fAtlasSizeInvUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldLCDTextGeoProc::GrDistanceFieldLCDTextGeoProc(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params,
+ DistanceAdjust distanceAdjust,
+ uint32_t flags,
+ const SkMatrix& localMatrix)
+ : INHERITED(kGrDistanceFieldLCDTextGeoProc_ClassID)
+ , fLocalMatrix(localMatrix)
+ , fDistanceAdjust(distanceAdjust)
+ , fFlags(flags & kLCD_DistanceFieldEffectMask) {
+ SkASSERT(numProxies <= kMaxTextures);
+ SkASSERT(!(flags & ~kLCD_DistanceFieldEffectMask) && (flags & kUseLCD_DistanceFieldEffectFlag));
+
+ if (fFlags & kPerspective_DistanceFieldEffectFlag) {
+ fInPosition = {"inPosition", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ }
+ fInColor = {"inColor", kUByte4_norm_GrVertexAttribType, kHalf4_GrSLType};
+ fInTextureCoords = {"inTextureCoords", kUShort2_GrVertexAttribType,
+ caps.integerSupport() ? kUShort2_GrSLType : kFloat2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+
+ if (numProxies) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params, proxies[i]->textureSwizzle());
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldLCDTextGeoProc::addNewProxies(const sk_sp<GrTextureProxy>* proxies,
+ int numProxies,
+ const GrSamplerState& params) {
+ SkASSERT(numProxies <= kMaxTextures);
+
+ if (!fTextureSamplers[0].isInitialized()) {
+ fAtlasSize = proxies[0]->isize();
+ }
+
+ for (int i = 0; i < numProxies; ++i) {
+ SkASSERT(proxies[i]);
+ SkASSERT(proxies[i]->isize() == fAtlasSize);
+
+ if (!fTextureSamplers[i].isInitialized()) {
+ fTextureSamplers[i].reset(proxies[i]->textureType(), params,
+ proxies[i]->textureSwizzle());
+ }
+ }
+ this->setTextureSamplerCnt(numProxies);
+}
+
+void GrDistanceFieldLCDTextGeoProc::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldLCDTextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrDistanceFieldLCDTextGeoProc::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLDistanceFieldLCDTextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldLCDTextGeoProc);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrDistanceFieldLCDTextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxies[kMaxTextures] = {
+ d->textureProxy(texIdx),
+ nullptr,
+ nullptr,
+ nullptr
+ };
+
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(d->fRandom, wrapModes);
+ GrSamplerState samplerState(wrapModes, d->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+ DistanceAdjust wa = { 0.0f, 0.1f, -0.1f };
+ uint32_t flags = kUseLCD_DistanceFieldEffectFlag;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+ flags |= d->fRandom->nextBool() ? kBGR_DistanceFieldEffectFlag : 0;
+ SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
+ return GrDistanceFieldLCDTextGeoProc::Make(*d->caps()->shaderCaps(), proxies, 1, samplerState,
+ wa, flags, localMatrix);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h
new file mode 100644
index 0000000000..20bb6262ae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDistanceFieldGeoProc_DEFINED
+#define GrDistanceFieldGeoProc_DEFINED
+
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessor.h"
+
+class GrGLDistanceFieldA8TextGeoProc;
+class GrGLDistanceFieldPathGeoProc;
+class GrGLDistanceFieldLCDTextGeoProc;
+class GrInvariantOutput;
+
+enum GrDistanceFieldEffectFlags {
+ kSimilarity_DistanceFieldEffectFlag = 0x01, // ctm is similarity matrix
+ kScaleOnly_DistanceFieldEffectFlag = 0x02, // ctm has only scale and translate
+ kPerspective_DistanceFieldEffectFlag = 0x04, // ctm has perspective (and positions are x,y,w)
+ kUseLCD_DistanceFieldEffectFlag = 0x08, // use lcd text
+ kBGR_DistanceFieldEffectFlag = 0x10, // lcd display has bgr order
+ kPortrait_DistanceFieldEffectFlag = 0x20, // lcd display is in portrait mode (not used yet)
+ kGammaCorrect_DistanceFieldEffectFlag = 0x40, // assume gamma-correct output (linear blending)
+ kAliased_DistanceFieldEffectFlag = 0x80, // monochrome output
+
+ kInvalid_DistanceFieldEffectFlag = 0x100, // invalid state (for initialization)
+
+ kUniformScale_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag,
+ // The subset of the flags relevant to GrDistanceFieldA8TextGeoProc
+ kNonLCD_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag |
+ kPerspective_DistanceFieldEffectFlag |
+ kGammaCorrect_DistanceFieldEffectFlag |
+ kAliased_DistanceFieldEffectFlag,
+ // The subset of the flags relevant to GrDistanceFieldLCDTextGeoProc
+ kLCD_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag |
+ kPerspective_DistanceFieldEffectFlag |
+ kUseLCD_DistanceFieldEffectFlag |
+ kBGR_DistanceFieldEffectFlag |
+ kGammaCorrect_DistanceFieldEffectFlag,
+};
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a
+ * distance field texture (using a smoothed step function near 0.5).
+ * It allows explicit specification of the filtering and wrap modes (GrSamplerState). The input
+ * coords are a custom attribute. Gamma correction is handled via a texture LUT.
+ */
+class GrDistanceFieldA8TextGeoProc : public GrGeometryProcessor {
+public:
+ static constexpr int kMaxTextures = 4;
+
+ /** The local matrix should be identity if local coords are not required by the GrPipeline. */
+#ifdef SK_GAMMA_APPLY_TO_A8
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params, float lum, uint32_t flags,
+ const SkMatrix& localMatrixIfUsesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(new GrDistanceFieldA8TextGeoProc(
+ caps, proxies, numActiveProxies, params, lum, flags, localMatrixIfUsesLocalCoords));
+ }
+#else
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params, uint32_t flags,
+ const SkMatrix& localMatrixIfUsesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(new GrDistanceFieldA8TextGeoProc(
+ caps, proxies, numActiveProxies, params, flags, localMatrixIfUsesLocalCoords));
+ }
+#endif
+
+ ~GrDistanceFieldA8TextGeoProc() override {}
+
+ const char* name() const override { return "DistanceFieldA8Text"; }
+
+ const Attribute& inPosition() const { return fInPosition; }
+ const Attribute& inColor() const { return fInColor; }
+ const Attribute& inTextureCoords() const { return fInTextureCoords; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float getDistanceAdjust() const { return fDistanceAdjust; }
+#endif
+ uint32_t getFlags() const { return fFlags; }
+ const SkISize& atlasSize() const { return fAtlasSize; }
+
+ void addNewProxies(const sk_sp<GrTextureProxy>* proxies, int numProxies, const GrSamplerState&);
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrDistanceFieldA8TextGeoProc(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float distanceAdjust,
+#endif
+ uint32_t flags, const SkMatrix& localMatrix);
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSamplers[i]; }
+
+ TextureSampler fTextureSamplers[kMaxTextures];
+ SkISize fAtlasSize; // size for all textures used with fTextureSamplers[].
+ SkMatrix fLocalMatrix;
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInTextureCoords;
+ uint32_t fFlags;
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float fDistanceAdjust;
+#endif
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a
+ * distance field texture (using a smoothed step function near 0.5).
+ * It allows explicit specification of the filtering and wrap modes (GrSamplerState). The input
+ * coords are a custom attribute. No gamma correct blending is applied. Used for paths only.
+ */
+class GrDistanceFieldPathGeoProc : public GrGeometryProcessor {
+public:
+ static constexpr int kMaxTextures = 4;
+
+ /** The local matrix should be identity if local coords are not required by the GrPipeline. */
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps& caps,
+ const SkMatrix& matrix,
+ bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params, uint32_t flags) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldPathGeoProc(caps, matrix, wideColor, proxies, numActiveProxies,
+ params, flags));
+ }
+
+ ~GrDistanceFieldPathGeoProc() override {}
+
+ const char* name() const override { return "DistanceFieldPath"; }
+
+ const Attribute& inPosition() const { return fInPosition; }
+ const Attribute& inColor() const { return fInColor; }
+ const Attribute& inTextureCoords() const { return fInTextureCoords; }
+ const SkMatrix& matrix() const { return fMatrix; }
+ uint32_t getFlags() const { return fFlags; }
+ const SkISize& atlasSize() const { return fAtlasSize; }
+
+ void addNewProxies(const sk_sp<GrTextureProxy>*, int numActiveProxies, const GrSamplerState&);
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrDistanceFieldPathGeoProc(const GrShaderCaps& caps,
+ const SkMatrix& matrix,
+ bool wideColor,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState&, uint32_t flags);
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSamplers[i]; }
+
+ SkMatrix fMatrix; // view matrix if perspective, local matrix otherwise
+ TextureSampler fTextureSamplers[kMaxTextures];
+ SkISize fAtlasSize; // size for all textures used with fTextureSamplers[].
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInTextureCoords;
+ uint32_t fFlags;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+/**
+ * The output color of this effect is a modulation of the input color and samples from a
+ * distance field texture (using a smoothed step function near 0.5), adjusted for LCD displays.
+ * It allows explicit specification of the filtering and wrap modes (GrSamplerState). The input
+ * coords are a custom attribute. Gamma correction is handled via a texture LUT.
+ */
+class GrDistanceFieldLCDTextGeoProc : public GrGeometryProcessor {
+public:
+ static constexpr int kMaxTextures = 4;
+
+ struct DistanceAdjust {
+ SkScalar fR, fG, fB;
+ static DistanceAdjust Make(SkScalar r, SkScalar g, SkScalar b) {
+ DistanceAdjust result;
+ result.fR = r; result.fG = g; result.fB = b;
+ return result;
+ }
+ bool operator==(const DistanceAdjust& wa) const {
+ return (fR == wa.fR && fG == wa.fG && fB == wa.fB);
+ }
+ bool operator!=(const DistanceAdjust& wa) const {
+ return !(*this == wa);
+ }
+ };
+
+ static sk_sp<GrGeometryProcessor> Make(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies,
+ const GrSamplerState& params,
+ DistanceAdjust distanceAdjust,
+ uint32_t flags,
+ const SkMatrix& localMatrixIfUsesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldLCDTextGeoProc(caps, proxies, numActiveProxies, params,
+ distanceAdjust, flags, localMatrixIfUsesLocalCoords));
+ }
+
+ ~GrDistanceFieldLCDTextGeoProc() override {}
+
+ const char* name() const override { return "DistanceFieldLCDText"; }
+
+ const Attribute& inPosition() const { return fInPosition; }
+ const Attribute& inColor() const { return fInColor; }
+ const Attribute& inTextureCoords() const { return fInTextureCoords; }
+ DistanceAdjust getDistanceAdjust() const { return fDistanceAdjust; }
+ uint32_t getFlags() const { return fFlags; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ const SkISize& atlasSize() const { return fAtlasSize; }
+
+ void addNewProxies(const sk_sp<GrTextureProxy>*, int numActiveProxies, const GrSamplerState&);
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrDistanceFieldLCDTextGeoProc(const GrShaderCaps& caps, const sk_sp<GrTextureProxy>* proxies,
+ int numActiveProxies, const GrSamplerState& params,
+ DistanceAdjust wa, uint32_t flags, const SkMatrix& localMatrix);
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSamplers[i]; }
+
+ TextureSampler fTextureSamplers[kMaxTextures];
+ SkISize fAtlasSize; // size for all textures used with fTextureSamplers[].
+ const SkMatrix fLocalMatrix;
+ DistanceAdjust fDistanceAdjust;
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInTextureCoords;
+ uint32_t fFlags;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrEllipseEffect.fp b/gfx/skia/skia/src/gpu/effects/GrEllipseEffect.fp
new file mode 100644
index 0000000000..8e8c766d0f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrEllipseEffect.fp
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+@header {
+ #include "src/gpu/GrShaderCaps.h"
+}
+
+layout(key) in GrClipEdgeType edgeType;
+in float2 center;
+in float2 radii;
+
+float2 prevCenter;
+float2 prevRadii = float2(-1);
+// The ellipse uniform is (center.x, center.y, 1 / rx^2, 1 / ry^2)
+// The last two terms can underflow when float != fp32, so we also provide a workaround.
+uniform float4 ellipse;
+
+bool medPrecision = !sk_Caps.floatIs32Bits;
+layout(when=medPrecision) uniform float2 scale;
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, SkPoint center,
+ SkPoint radii, const GrShaderCaps& caps) {
+ // Small radii produce bad results on devices without full float.
+ if (!caps.floatIs32Bits() && (radii.fX < 0.5f || radii.fY < 0.5f)) {
+ return nullptr;
+ }
+ // Very narrow ellipses produce bad results on devices without full float
+ if (!caps.floatIs32Bits() && (radii.fX > 255*radii.fY || radii.fY > 255*radii.fX)) {
+ return nullptr;
+ }
+ // Very large ellipses produce bad results on devices without full float
+ if (!caps.floatIs32Bits() && (radii.fX > 16384 || radii.fY > 16384)) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrEllipseEffect(edgeType, center, radii));
+ }
+}
+
+@optimizationFlags { kCompatibleWithCoverageAsAlpha_OptimizationFlag }
+
+@setData(pdman) {
+ if (radii != prevRadii || center != prevCenter) {
+ float invRXSqd;
+ float invRYSqd;
+ // If we're using a scale factor to work around precision issues, choose the larger
+ // radius as the scale factor. The inv radii need to be pre-adjusted by the scale
+ // factor.
+ if (scale.isValid()) {
+ if (radii.fX > radii.fY) {
+ invRXSqd = 1.f;
+ invRYSqd = (radii.fX * radii.fX) / (radii.fY * radii.fY);
+ pdman.set2f(scale, radii.fX, 1.f / radii.fX);
+ } else {
+ invRXSqd = (radii.fY * radii.fY) / (radii.fX * radii.fX);
+ invRYSqd = 1.f;
+ pdman.set2f(scale, radii.fY, 1.f / radii.fY);
+ }
+ } else {
+ invRXSqd = 1.f / (radii.fX * radii.fX);
+ invRYSqd = 1.f / (radii.fY * radii.fY);
+ }
+ pdman.set4f(ellipse, center.fX, center.fY, invRXSqd, invRYSqd);
+ prevCenter = center;
+ prevRadii = radii;
+ }
+}
+
+void main() {
+ // d is the offset to the ellipse center
+ float2 d = sk_FragCoord.xy - ellipse.xy;
+ // If we're on a device with a "real" mediump then we'll do the distance computation in a space
+ // that is normalized by the larger radius or 128, whichever is smaller. The scale uniform will
+ // be scale, 1/scale. The inverse squared radii uniform values are already in this normalized space.
+ // The center is not.
+ @if (medPrecision) {
+ d *= scale.y;
+ }
+ float2 Z = d * ellipse.zw;
+ // implicit is the evaluation of (x/rx)^2 + (y/ry)^2 - 1.
+ float implicit = dot(Z, d) - 1;
+ // grad_dot is the squared length of the gradient of the implicit.
+ float grad_dot = 4 * dot(Z, Z);
+ // Avoid calling inversesqrt on zero.
+ @if (medPrecision) {
+ grad_dot = max(grad_dot, 6.1036e-5);
+ } else {
+ grad_dot = max(grad_dot, 1.1755e-38);
+ }
+ float approx_dist = implicit * inversesqrt(grad_dot);
+ @if (medPrecision) {
+ approx_dist *= scale.x;
+ }
+
+ half alpha;
+ @switch (edgeType) {
+ case GrClipEdgeType::kFillBW:
+ alpha = approx_dist > 0.0 ? 0.0 : 1.0;
+ break;
+ case GrClipEdgeType::kFillAA:
+ alpha = saturate(0.5 - half(approx_dist));
+ break;
+ case GrClipEdgeType::kInverseFillBW:
+ alpha = approx_dist > 0.0 ? 1.0 : 0.0;
+ break;
+ case GrClipEdgeType::kInverseFillAA:
+ alpha = saturate(0.5 + half(approx_dist));
+ break;
+ default:
+ // hairline not supported
+ discard;
+ }
+ sk_OutColor = sk_InColor * alpha;
+}
+
+@test(testData) {
+ SkPoint center;
+ center.fX = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar rx = testData->fRandom->nextRangeF(0.f, 1000.f);
+ SkScalar ry = testData->fRandom->nextRangeF(0.f, 1000.f);
+ GrClipEdgeType et;
+ do {
+ et = (GrClipEdgeType) testData->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ } while (GrClipEdgeType::kHairlineAA == et);
+ return GrEllipseEffect::Make(et, center, SkPoint::Make(rx, ry),
+ *testData->caps()->shaderCaps());
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.cpp
new file mode 100644
index 0000000000..fd70616a6b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+// For brevity
+using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+using Direction = GrGaussianConvolutionFragmentProcessor::Direction;
+
+class GrGLConvolutionEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ UniformHandle fKernelUni;
+ UniformHandle fImageIncrementUni;
+ UniformHandle fBoundsUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLConvolutionEffect::emitCode(EmitArgs& args) {
+ const GrGaussianConvolutionFragmentProcessor& ce =
+ args.fFp.cast<GrGaussianConvolutionFragmentProcessor>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "ImageIncrement");
+ if (ce.useBounds()) {
+ fBoundsUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "Bounds");
+ }
+
+ int width = ce.width();
+
+ int arrayCount = (width + 3) / 4;
+ SkASSERT(4 * arrayCount >= width);
+
+ fKernelUni = uniformHandler->addUniformArray(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ "Kernel", arrayCount);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ fragBuilder->codeAppendf("%s = half4(0, 0, 0, 0);", args.fOutputColor);
+
+ const GrShaderVar& kernel = uniformHandler->getUniformVariable(fKernelUni);
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+
+ fragBuilder->codeAppendf("float2 coord = %s - %d.0 * %s;", coords2D.c_str(), ce.radius(), imgInc);
+ fragBuilder->codeAppend("float2 coordSampled = half2(0, 0);");
+
+ // Manually unroll loop because some drivers don't; yields 20-30% speedup.
+ const char* kVecSuffix[4] = {".x", ".y", ".z", ".w"};
+ for (int i = 0; i < width; i++) {
+ SkString index;
+ SkString kernelIndex;
+ index.appendS32(i / 4);
+ kernel.appendArrayAccess(index.c_str(), &kernelIndex);
+ kernelIndex.append(kVecSuffix[i & 0x3]);
+
+ fragBuilder->codeAppend("coordSampled = coord;");
+ if (ce.useBounds()) {
+ // We used to compute a bool indicating whether we're in bounds or not, cast it to a
+ // float, and then mul weight*texture_sample by the float. However, the Adreno 430 seems
+ // to have a bug that caused corruption.
+ const char* bounds = uniformHandler->getUniformCStr(fBoundsUni);
+ const char* component = ce.direction() == Direction::kY ? "y" : "x";
+
+ switch (ce.mode()) {
+ case GrTextureDomain::kClamp_Mode: {
+ fragBuilder->codeAppendf("coordSampled.%s = clamp(coord.%s, %s.x, %s.y);\n",
+ component, component, bounds, bounds);
+ break;
+ }
+ case GrTextureDomain::kRepeat_Mode: {
+ fragBuilder->codeAppendf("coordSampled.%s = "
+ "mod(coord.%s - %s.x, %s.y - %s.x) + %s.x;\n",
+ component, component, bounds, bounds, bounds, bounds);
+ break;
+ }
+ case GrTextureDomain::kDecal_Mode: {
+ fragBuilder->codeAppendf("if (coord.%s >= %s.x && coord.%s <= %s.y) {",
+ component, bounds, component, bounds);
+ break;
+ }
+ default: {
+ SK_ABORT("Unsupported operation.");
+ }
+ }
+ }
+ fragBuilder->codeAppendf("%s += ", args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "coordSampled");
+ fragBuilder->codeAppendf(" * %s;\n", kernelIndex.c_str());
+ if (GrTextureDomain::kDecal_Mode == ce.mode()) {
+ fragBuilder->codeAppend("}");
+ }
+ fragBuilder->codeAppendf("coord += %s;\n", imgInc);
+ }
+ fragBuilder->codeAppendf("%s *= %s;\n", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLConvolutionEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ const GrGaussianConvolutionFragmentProcessor& conv =
+ processor.cast<GrGaussianConvolutionFragmentProcessor>();
+ GrSurfaceProxy* proxy = conv.textureSampler(0).proxy();
+ GrTexture& texture = *proxy->peekTexture();
+
+ float imageIncrement[2] = {0};
+ float ySign = proxy->origin() != kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f;
+ switch (conv.direction()) {
+ case Direction::kX:
+ imageIncrement[0] = 1.0f / texture.width();
+ break;
+ case Direction::kY:
+ imageIncrement[1] = ySign / texture.height();
+ break;
+ default:
+ SK_ABORT("Unknown filter direction.");
+ }
+ pdman.set2fv(fImageIncrementUni, 1, imageIncrement);
+ if (conv.useBounds()) {
+ float bounds[2] = {0};
+ bounds[0] = conv.bounds()[0];
+ bounds[1] = conv.bounds()[1];
+ if (GrTextureDomain::kClamp_Mode == conv.mode()) {
+ bounds[0] += SK_ScalarHalf;
+ bounds[1] -= SK_ScalarHalf;
+ }
+ if (Direction::kX == conv.direction()) {
+ SkScalar inv = SkScalarInvert(SkIntToScalar(texture.width()));
+ bounds[0] *= inv;
+ bounds[1] *= inv;
+ } else {
+ SkScalar inv = SkScalarInvert(SkIntToScalar(texture.height()));
+ if (proxy->origin() != kTopLeft_GrSurfaceOrigin) {
+ float tmp = bounds[0];
+ bounds[0] = 1.0f - (inv * bounds[1]);
+ bounds[1] = 1.0f - (inv * tmp);
+ } else {
+ bounds[0] *= inv;
+ bounds[1] *= inv;
+ }
+ }
+
+ SkASSERT(bounds[0] <= bounds[1]);
+ pdman.set2f(fBoundsUni, bounds[0], bounds[1]);
+ }
+ int width = conv.width();
+
+ int arrayCount = (width + 3) / 4;
+ SkASSERT(4 * arrayCount >= width);
+ pdman.set4fv(fKernelUni, arrayCount, conv.kernel());
+}
+
+void GrGLConvolutionEffect::GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrGaussianConvolutionFragmentProcessor& conv =
+ processor.cast<GrGaussianConvolutionFragmentProcessor>();
+ uint32_t key = conv.radius();
+ key <<= 3;
+ if (conv.useBounds()) {
+ key |= Direction::kY == conv.direction() ? 0x4 : 0x0;
+ }
+ key |= static_cast<uint32_t>(conv.mode());
+ b->add32(key);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+static void fill_in_1D_gaussian_kernel(float* kernel, int width, float gaussianSigma, int radius) {
+ const float twoSigmaSqrd = 2.0f * gaussianSigma * gaussianSigma;
+ if (SkScalarNearlyZero(twoSigmaSqrd, SK_ScalarNearlyZero)) {
+ for (int i = 0; i < width; ++i) {
+ kernel[i] = 0.0f;
+ }
+ return;
+ }
+
+ const float denom = 1.0f / twoSigmaSqrd;
+
+ float sum = 0.0f;
+ for (int i = 0; i < width; ++i) {
+ float x = static_cast<float>(i - radius);
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[i] = sk_float_exp(-x * x * denom);
+ sum += kernel[i];
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < width; ++i) {
+ kernel[i] *= scale;
+ }
+}
+
+GrGaussianConvolutionFragmentProcessor::GrGaussianConvolutionFragmentProcessor(
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ Direction direction,
+ int radius,
+ float gaussianSigma,
+ GrTextureDomain::Mode mode,
+ int bounds[2])
+ : INHERITED(kGrGaussianConvolutionFragmentProcessor_ClassID,
+ ModulateForSamplerOptFlags(srcColorType,
+ mode == GrTextureDomain::kDecal_Mode))
+ , fCoordTransform(proxy.get())
+ , fTextureSampler(std::move(proxy))
+ , fRadius(radius)
+ , fDirection(direction)
+ , fMode(mode) {
+ // Make sure the sampler's ctor uses the clamp wrap mode
+ SkASSERT(fTextureSampler.samplerState().wrapModeX() == GrSamplerState::WrapMode::kClamp &&
+ fTextureSampler.samplerState().wrapModeY() == GrSamplerState::WrapMode::kClamp);
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ SkASSERT(radius <= kMaxKernelRadius);
+
+ fill_in_1D_gaussian_kernel(fKernel, this->width(), gaussianSigma, this->radius());
+
+ memcpy(fBounds, bounds, sizeof(fBounds));
+}
+
+GrGaussianConvolutionFragmentProcessor::GrGaussianConvolutionFragmentProcessor(
+ const GrGaussianConvolutionFragmentProcessor& that)
+ : INHERITED(kGrGaussianConvolutionFragmentProcessor_ClassID, that.optimizationFlags())
+ , fCoordTransform(that.fCoordTransform)
+ , fTextureSampler(that.fTextureSampler)
+ , fRadius(that.fRadius)
+ , fDirection(that.fDirection)
+ , fMode(that.fMode) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ memcpy(fKernel, that.fKernel, that.width() * sizeof(float));
+ memcpy(fBounds, that.fBounds, sizeof(fBounds));
+}
+
+void GrGaussianConvolutionFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConvolutionEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrGaussianConvolutionFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLConvolutionEffect;
+}
+
+bool GrGaussianConvolutionFragmentProcessor::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrGaussianConvolutionFragmentProcessor& s =
+ sBase.cast<GrGaussianConvolutionFragmentProcessor>();
+ return (this->radius() == s.radius() && this->direction() == s.direction() &&
+ this->mode() == s.mode() &&
+ 0 == memcmp(fBounds, s.fBounds, sizeof(fBounds)) &&
+ 0 == memcmp(fKernel, s.fKernel, this->width() * sizeof(float)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrGaussianConvolutionFragmentProcessor);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrGaussianConvolutionFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+
+ int bounds[2];
+ int modeIdx = d->fRandom->nextRangeU(0, GrTextureDomain::kModeCount-1);
+
+ Direction dir;
+ if (d->fRandom->nextBool()) {
+ dir = Direction::kX;
+ bounds[0] = d->fRandom->nextRangeU(0, proxy->width()-2);
+ bounds[1] = d->fRandom->nextRangeU(bounds[0]+1, proxy->width()-1);
+ } else {
+ dir = Direction::kY;
+ bounds[0] = d->fRandom->nextRangeU(0, proxy->height()-2);
+ bounds[1] = d->fRandom->nextRangeU(bounds[0]+1, proxy->height()-1);
+ }
+
+ int radius = d->fRandom->nextRangeU(1, kMaxKernelRadius);
+ float sigma = radius / 3.f;
+
+ return GrGaussianConvolutionFragmentProcessor::Make(
+ std::move(proxy), d->textureProxyColorType(texIdx),
+ dir, radius, sigma, static_cast<GrTextureDomain::Mode>(modeIdx), bounds);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h
new file mode 100644
index 0000000000..843ed30d65
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrGaussianConvolutionFragmentProcessor.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGaussianConvolutionFragmentProcessor_DEFINED
+#define GrGaussianConvolutionFragmentProcessor_DEFINED
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+
+/**
+ * A 1D Gaussian convolution effect. The kernel is computed as an array of 2 * half-width weights.
+ * Each texel is multiplied by it's weight and summed to determine the filtered color. The output
+ * color is set to a modulation of the filtered and input colors.
+ */
+class GrGaussianConvolutionFragmentProcessor : public GrFragmentProcessor {
+public:
+ enum class Direction { kX, kY };
+
+ /// Convolve with a Gaussian kernel
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ Direction dir,
+ int halfWidth,
+ float gaussianSigma,
+ GrTextureDomain::Mode mode,
+ int* bounds) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrGaussianConvolutionFragmentProcessor(
+ std::move(proxy), srcColorType, dir, halfWidth, gaussianSigma, mode, bounds));
+ }
+
+ const float* kernel() const { return fKernel; }
+
+ const int* bounds() const { return fBounds; }
+ bool useBounds() const { return fMode != GrTextureDomain::kIgnore_Mode; }
+ int radius() const { return fRadius; }
+ int width() const { return 2 * fRadius + 1; }
+ Direction direction() const { return fDirection; }
+
+ GrTextureDomain::Mode mode() const { return fMode; }
+
+ const char* name() const override { return "GaussianConvolution"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("dir: %s radius: %d bounds: [%d %d]",
+ Direction::kX == fDirection ? "X" : "Y",
+ fRadius,
+ fBounds[0], fBounds[1]);
+ return str;
+ }
+#endif
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrGaussianConvolutionFragmentProcessor(*this));
+ }
+
+ // This was decided based on the min allowed value for the max texture
+ // samples per fragment program run in DX9SM2 (32). A sigma param of 4.0
+ // on a blur filter gives a kernel width of 25 while a sigma of 5.0
+ // would exceed a 32 wide kernel.
+ static const int kMaxKernelRadius = 12;
+ // With a C++11 we could have a constexpr version of WidthFromRadius()
+ // and not have to duplicate this calculation.
+ static const int kMaxKernelWidth = 2 * kMaxKernelRadius + 1;
+
+private:
+ /// Convolve with a Gaussian kernel
+ GrGaussianConvolutionFragmentProcessor(sk_sp<GrTextureProxy>, GrColorType srcColorType,
+ Direction, int halfWidth, float gaussianSigma,
+ GrTextureDomain::Mode mode, int bounds[2]);
+
+ explicit GrGaussianConvolutionFragmentProcessor(const GrGaussianConvolutionFragmentProcessor&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ GrCoordTransform fCoordTransform;
+ TextureSampler fTextureSampler;
+ // TODO: Inline the kernel constants into the generated shader code. This may involve pulling
+ // some of the logic from SkGpuBlurUtils into this class related to radius/sigma calculations.
+ float fKernel[kMaxKernelWidth];
+ int fBounds[2];
+ int fRadius;
+ Direction fDirection;
+ GrTextureDomain::Mode fMode;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrLumaColorFilterEffect.fp b/gfx/skia/skia/src/gpu/effects/GrLumaColorFilterEffect.fp
new file mode 100644
index 0000000000..1548b26e0b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrLumaColorFilterEffect.fp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+@optimizationFlags {
+ kConstantOutputForConstantInput_OptimizationFlag
+}
+
+@class {
+ #include "include/private/SkColorData.h"
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ float luma = SK_ITU_BT709_LUM_COEFF_R * input.fR +
+ SK_ITU_BT709_LUM_COEFF_G * input.fG +
+ SK_ITU_BT709_LUM_COEFF_B * input.fB;
+ return { 0, 0, 0, SkTPin(luma, 0.0f, 1.0f) };
+ }
+}
+
+void main() {
+ const half3 SK_ITU_BT709_LUM_COEFF = half3(0.2126, 0.7152, 0.0722);
+ half luma = saturate(dot(SK_ITU_BT709_LUM_COEFF, sk_InColor.rgb));
+ sk_OutColor = half4(0, 0, 0, luma);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrMagnifierEffect.fp b/gfx/skia/skia/src/gpu/effects/GrMagnifierEffect.fp
new file mode 100644
index 0000000000..e6a9aa257a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMagnifierEffect.fp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in uniform sampler2D src;
+layout(ctype=SkIRect) in int4 bounds;
+uniform float4 boundsUniform;
+layout(ctype=SkRect) in float4 srcRect;
+in uniform float xInvZoom;
+in uniform float yInvZoom;
+in uniform float xInvInset;
+in uniform float yInvInset;
+
+uniform half2 offset;
+
+@coordTransform(src) {
+ SkMatrix::I()
+}
+
+void main() {
+ float2 coord = sk_TransformedCoords2D[0];
+ float2 zoom_coord = offset + coord * float2(xInvZoom, yInvZoom);
+ float2 delta = (coord - boundsUniform.xy) * boundsUniform.zw;
+ delta = min(delta, half2(1.0, 1.0) - delta);
+ delta *= float2(xInvInset, yInvInset);
+
+ float weight = 0.0;
+ if (delta.s < 2.0 && delta.t < 2.0) {
+ delta = half2(2.0, 2.0) - delta;
+ float dist = length(delta);
+ dist = max(2.0 - dist, 0.0);
+ weight = min(dist * dist, 1.0);
+ } else {
+ float2 delta_squared = delta * delta;
+ weight = min(min(delta_squared.x, delta_squared.y), 1.0);
+ }
+
+ sk_OutColor = sample(src, mix(coord, zoom_coord, weight));
+}
+
+@setData(pdman) {
+ SkScalar invW = 1.0f / src.width();
+ SkScalar invH = 1.0f / src.height();
+
+ {
+ SkScalar y = srcRect.y() * invH;
+ if (srcProxy.origin() != kTopLeft_GrSurfaceOrigin) {
+ y = 1.0f - (srcRect.height() / bounds.height()) - y;
+ }
+
+ pdman.set2f(offset, srcRect.x() * invW, y);
+ }
+
+ {
+ SkScalar y = bounds.y() * invH;
+ SkScalar hSign = 1.f;
+ if (srcProxy.origin() != kTopLeft_GrSurfaceOrigin) {
+ y = 1.0f - bounds.y() * invH;
+ hSign = -1.f;
+ }
+
+ pdman.set4f(boundsUniform,
+ bounds.x() * invW,
+ y,
+ SkIntToScalar(src.width()) / bounds.width(),
+ hSign * SkIntToScalar(src.height()) / bounds.height());
+ }
+}
+
+@test(d) {
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(0);
+ const int kMaxWidth = 200;
+ const int kMaxHeight = 200;
+ const SkScalar kMaxInset = 20.0f;
+ uint32_t width = d->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = d->fRandom->nextULessThan(kMaxHeight);
+ SkScalar inset = d->fRandom->nextRangeScalar(1.0f, kMaxInset);
+
+ SkIRect bounds = SkIRect::MakeWH(SkIntToScalar(kMaxWidth), SkIntToScalar(kMaxHeight));
+ SkRect srcRect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+
+ auto effect = GrMagnifierEffect::Make(std::move(proxy),
+ bounds,
+ srcRect,
+ srcRect.width() / bounds.width(),
+ srcRect.height() / bounds.height(),
+ bounds.width() / inset,
+ bounds.height() / inset);
+ SkASSERT(effect);
+ return effect;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp
new file mode 100644
index 0000000000..86df7a02ec
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/gpu/effects/GrMatrixConvolutionEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrGLMatrixConvolutionEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fKernelUni;
+ UniformHandle fImageIncrementUni;
+ UniformHandle fKernelOffsetUni;
+ UniformHandle fGainUni;
+ UniformHandle fBiasUni;
+ GrTextureDomain::GLDomain fDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLMatrixConvolutionEffect::emitCode(EmitArgs& args) {
+ const GrMatrixConvolutionEffect& mce = args.fFp.cast<GrMatrixConvolutionEffect>();
+ const GrTextureDomain& domain = mce.domain();
+
+ int kWidth = mce.kernelSize().width();
+ int kHeight = mce.kernelSize().height();
+
+ int arrayCount = (kWidth * kHeight + 3) / 4;
+ SkASSERT(4 * arrayCount >= kWidth * kHeight);
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "ImageIncrement");
+ fKernelUni = uniformHandler->addUniformArray(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ "Kernel",
+ arrayCount);
+ fKernelOffsetUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "KernelOffset");
+ fGainUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "Gain");
+ fBiasUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "Bias");
+
+ const char* kernelOffset = uniformHandler->getUniformCStr(fKernelOffsetUni);
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+ const char* kernel = uniformHandler->getUniformCStr(fKernelUni);
+ const char* gain = uniformHandler->getUniformCStr(fGainUni);
+ const char* bias = uniformHandler->getUniformCStr(fBiasUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppend("half4 sum = half4(0, 0, 0, 0);");
+ fragBuilder->codeAppendf("float2 coord = %s - %s * %s;", coords2D.c_str(), kernelOffset, imgInc);
+ fragBuilder->codeAppend("half4 c;");
+
+ const char* kVecSuffix[4] = { ".x", ".y", ".z", ".w" };
+ for (int y = 0; y < kHeight; y++) {
+ for (int x = 0; x < kWidth; x++) {
+ GrGLSLShaderBuilder::ShaderBlock block(fragBuilder);
+ int offset = y*kWidth + x;
+
+ fragBuilder->codeAppendf("half k = %s[%d]%s;", kernel, offset / 4,
+ kVecSuffix[offset & 0x3]);
+ SkString coord;
+ coord.printf("coord + half2(%d, %d) * %s", x, y, imgInc);
+ fDomain.sampleTexture(fragBuilder,
+ uniformHandler,
+ args.fShaderCaps,
+ domain,
+ "c",
+ coord,
+ args.fTexSamplers[0]);
+ if (!mce.convolveAlpha()) {
+ fragBuilder->codeAppend("c.rgb /= c.a;");
+ fragBuilder->codeAppend("c.rgb = saturate(c.rgb);");
+ }
+ fragBuilder->codeAppend("sum += c * k;");
+ }
+ }
+ if (mce.convolveAlpha()) {
+ fragBuilder->codeAppendf("%s = sum * %s + %s;", args.fOutputColor, gain, bias);
+ fragBuilder->codeAppendf("%s.a = saturate(%s.a);", args.fOutputColor, args.fOutputColor);
+ fragBuilder->codeAppendf("%s.rgb = clamp(%s.rgb, 0.0, %s.a);",
+ args.fOutputColor, args.fOutputColor, args.fOutputColor);
+ } else {
+ fDomain.sampleTexture(fragBuilder,
+ uniformHandler,
+ args.fShaderCaps,
+ domain,
+ "c",
+ coords2D,
+ args.fTexSamplers[0]);
+ fragBuilder->codeAppendf("%s.a = c.a;", args.fOutputColor);
+ fragBuilder->codeAppendf("%s.rgb = saturate(sum.rgb * %s + %s);", args.fOutputColor, gain, bias);
+ fragBuilder->codeAppendf("%s.rgb *= %s.a;", args.fOutputColor, args.fOutputColor);
+ }
+ fragBuilder->codeAppendf("%s *= %s;\n", args.fOutputColor, args.fInputColor);
+}
+
+void GrGLMatrixConvolutionEffect::GenKey(const GrProcessor& processor,
+ const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ const GrMatrixConvolutionEffect& m = processor.cast<GrMatrixConvolutionEffect>();
+ SkASSERT(m.kernelSize().width() <= 0x7FFF && m.kernelSize().height() <= 0xFFFF);
+ uint32_t key = m.kernelSize().width() << 16 | m.kernelSize().height();
+ key |= m.convolveAlpha() ? 1U << 31 : 0;
+ b->add32(key);
+ b->add32(GrTextureDomain::GLDomain::DomainKey(m.domain()));
+}
+
+void GrGLMatrixConvolutionEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ const GrMatrixConvolutionEffect& conv = processor.cast<GrMatrixConvolutionEffect>();
+ GrTextureProxy* proxy = conv.textureSampler(0).proxy();
+ GrTexture* texture = proxy->peekTexture();
+
+ float imageIncrement[2];
+ float ySign = proxy->origin() == kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f;
+ imageIncrement[0] = 1.0f / texture->width();
+ imageIncrement[1] = ySign / texture->height();
+ pdman.set2fv(fImageIncrementUni, 1, imageIncrement);
+ pdman.set2fv(fKernelOffsetUni, 1, conv.kernelOffset());
+ int kernelCount = conv.kernelSize().width() * conv.kernelSize().height();
+ int arrayCount = (kernelCount + 3) / 4;
+ SkASSERT(4 * arrayCount >= kernelCount);
+ pdman.set4fv(fKernelUni, arrayCount, conv.kernel());
+ pdman.set1f(fGainUni, conv.gain());
+ pdman.set1f(fBiasUni, conv.bias());
+ fDomain.setData(pdman, conv.domain(), proxy, conv.textureSampler(0).samplerState());
+}
+
+GrMatrixConvolutionEffect::GrMatrixConvolutionEffect(sk_sp<GrTextureProxy> srcProxy,
+ const SkIRect& srcBounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha)
+ // To advertise either the modulation or opaqueness optimizations we'd have to examine the
+ // parameters.
+ : INHERITED(kGrMatrixConvolutionEffect_ClassID, kNone_OptimizationFlags)
+ , fCoordTransform(srcProxy.get())
+ , fDomain(srcProxy.get(), GrTextureDomain::MakeTexelDomain(srcBounds, tileMode),
+ tileMode, tileMode)
+ , fTextureSampler(std::move(srcProxy))
+ , fKernelSize(kernelSize)
+ , fGain(SkScalarToFloat(gain))
+ , fBias(SkScalarToFloat(bias) / 255.0f)
+ , fConvolveAlpha(convolveAlpha) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ for (int i = 0; i < kernelSize.width() * kernelSize.height(); i++) {
+ fKernel[i] = SkScalarToFloat(kernel[i]);
+ }
+ fKernelOffset[0] = static_cast<float>(kernelOffset.x());
+ fKernelOffset[1] = static_cast<float>(kernelOffset.y());
+}
+
+GrMatrixConvolutionEffect::GrMatrixConvolutionEffect(const GrMatrixConvolutionEffect& that)
+ : INHERITED(kGrMatrixConvolutionEffect_ClassID, kNone_OptimizationFlags)
+ , fCoordTransform(that.fCoordTransform)
+ , fDomain(that.fDomain)
+ , fTextureSampler(that.fTextureSampler)
+ , fKernelSize(that.fKernelSize)
+ , fGain(that.fGain)
+ , fBias(that.fBias)
+ , fConvolveAlpha(that.fConvolveAlpha) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+ memcpy(fKernel, that.fKernel, sizeof(float) * fKernelSize.width() * fKernelSize.height());
+ memcpy(fKernelOffset, that.fKernelOffset, sizeof(fKernelOffset));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrMatrixConvolutionEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMatrixConvolutionEffect(*this));
+}
+
+void GrMatrixConvolutionEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLMatrixConvolutionEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrMatrixConvolutionEffect::onCreateGLSLInstance() const {
+ return new GrGLMatrixConvolutionEffect;
+}
+
+bool GrMatrixConvolutionEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMatrixConvolutionEffect& s = sBase.cast<GrMatrixConvolutionEffect>();
+ return fKernelSize == s.kernelSize() &&
+ !memcmp(fKernel, s.kernel(),
+ fKernelSize.width() * fKernelSize.height() * sizeof(float)) &&
+ fGain == s.gain() &&
+ fBias == s.bias() &&
+ !memcmp(fKernelOffset, s.kernelOffset(), sizeof(fKernelOffset)) &&
+ fConvolveAlpha == s.convolveAlpha() &&
+ fDomain == s.domain();
+}
+
+static void fill_in_1D_gaussian_kernel_with_stride(float* kernel, int size, int stride,
+ float twoSigmaSqrd) {
+ SkASSERT(!SkScalarNearlyZero(twoSigmaSqrd, SK_ScalarNearlyZero));
+
+ const float sigmaDenom = 1.0f / twoSigmaSqrd;
+ const int radius = size / 2;
+
+ float sum = 0.0f;
+ for (int i = 0; i < size; ++i) {
+ float term = static_cast<float>(i - radius);
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[i * stride] = sk_float_exp(-term * term * sigmaDenom);
+ sum += kernel[i * stride];
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < size; ++i) {
+ kernel[i * stride] *= scale;
+ }
+}
+
+static void fill_in_2D_gaussian_kernel(float* kernel, int width, int height,
+ SkScalar sigmaX, SkScalar sigmaY) {
+ SkASSERT(width * height <= MAX_KERNEL_SIZE);
+ const float twoSigmaSqrdX = 2.0f * SkScalarToFloat(SkScalarSquare(sigmaX));
+ const float twoSigmaSqrdY = 2.0f * SkScalarToFloat(SkScalarSquare(sigmaY));
+
+ // TODO: in all of these degenerate cases we're uploading (and using) a whole lot of zeros.
+ if (SkScalarNearlyZero(twoSigmaSqrdX, SK_ScalarNearlyZero) ||
+ SkScalarNearlyZero(twoSigmaSqrdY, SK_ScalarNearlyZero)) {
+ // In this case the 2D Gaussian degenerates to a 1D Gaussian (in X or Y) or a point
+ SkASSERT(3 == width || 3 == height);
+ memset(kernel, 0, width*height*sizeof(float));
+
+ if (SkScalarNearlyZero(twoSigmaSqrdX, SK_ScalarNearlyZero) &&
+ SkScalarNearlyZero(twoSigmaSqrdY, SK_ScalarNearlyZero)) {
+ // A point
+ SkASSERT(3 == width && 3 == height);
+ kernel[4] = 1.0f;
+ } else if (SkScalarNearlyZero(twoSigmaSqrdX, SK_ScalarNearlyZero)) {
+ // A 1D Gaussian in Y
+ SkASSERT(3 == width);
+ // Down the middle column of the kernel with a stride of width
+ fill_in_1D_gaussian_kernel_with_stride(&kernel[1], height, width, twoSigmaSqrdY);
+ } else {
+ // A 1D Gaussian in X
+ SkASSERT(SkScalarNearlyZero(twoSigmaSqrdY, SK_ScalarNearlyZero));
+ SkASSERT(3 == height);
+ // Down the middle row of the kernel with a stride of 1
+ fill_in_1D_gaussian_kernel_with_stride(&kernel[width], width, 1, twoSigmaSqrdX);
+ }
+ return;
+ }
+
+ const float sigmaXDenom = 1.0f / twoSigmaSqrdX;
+ const float sigmaYDenom = 1.0f / twoSigmaSqrdY;
+ const int xRadius = width / 2;
+ const int yRadius = height / 2;
+
+ float sum = 0.0f;
+ for (int x = 0; x < width; x++) {
+ float xTerm = static_cast<float>(x - xRadius);
+ xTerm = xTerm * xTerm * sigmaXDenom;
+ for (int y = 0; y < height; y++) {
+ float yTerm = static_cast<float>(y - yRadius);
+ float xyTerm = sk_float_exp(-(xTerm + yTerm * yTerm * sigmaYDenom));
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[y * width + x] = xyTerm;
+ sum += xyTerm;
+ }
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < width * height; ++i) {
+ kernel[i] *= scale;
+ }
+}
+
+// Static function to create a 2D convolution
+std::unique_ptr<GrFragmentProcessor> GrMatrixConvolutionEffect::MakeGaussian(
+ sk_sp<GrTextureProxy> srcProxy,
+ const SkIRect& srcBounds,
+ const SkISize& kernelSize,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha,
+ SkScalar sigmaX,
+ SkScalar sigmaY) {
+ float kernel[MAX_KERNEL_SIZE];
+
+ fill_in_2D_gaussian_kernel(kernel, kernelSize.width(), kernelSize.height(), sigmaX, sigmaY);
+
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMatrixConvolutionEffect(std::move(srcProxy), srcBounds, kernelSize, kernel,
+ gain, bias, kernelOffset, tileMode, convolveAlpha));
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMatrixConvolutionEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrMatrixConvolutionEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+
+ int width = d->fRandom->nextRangeU(1, MAX_KERNEL_SIZE);
+ int height = d->fRandom->nextRangeU(1, MAX_KERNEL_SIZE / width);
+ SkISize kernelSize = SkISize::Make(width, height);
+ std::unique_ptr<SkScalar[]> kernel(new SkScalar[width * height]);
+ for (int i = 0; i < width * height; i++) {
+ kernel.get()[i] = d->fRandom->nextSScalar1();
+ }
+ SkScalar gain = d->fRandom->nextSScalar1();
+ SkScalar bias = d->fRandom->nextSScalar1();
+ SkIPoint kernelOffset = SkIPoint::Make(d->fRandom->nextRangeU(0, kernelSize.width()),
+ d->fRandom->nextRangeU(0, kernelSize.height()));
+ SkIRect bounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()),
+ d->fRandom->nextRangeU(0, proxy->width()),
+ d->fRandom->nextRangeU(0, proxy->height()));
+ GrTextureDomain::Mode tileMode =
+ static_cast<GrTextureDomain::Mode>(d->fRandom->nextRangeU(0, 2));
+ bool convolveAlpha = d->fRandom->nextBool();
+ return GrMatrixConvolutionEffect::Make(std::move(proxy),
+ bounds,
+ kernelSize,
+ kernel.get(),
+ gain,
+ bias,
+ kernelOffset,
+ tileMode,
+ convolveAlpha);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h
new file mode 100644
index 0000000000..212d5c0973
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMatrixConvolutionEffect_DEFINED
+#define GrMatrixConvolutionEffect_DEFINED
+
+#include "src/gpu/effects/GrTextureDomain.h"
+
+// A little bit less than the minimum # uniforms required by DX9SM2 (32).
+// Allows for a 5x5 kernel (or 25x1, for that matter).
+#define MAX_KERNEL_SIZE 25
+
+class GrMatrixConvolutionEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> srcProxy,
+ const SkIRect& srcBounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMatrixConvolutionEffect(std::move(srcProxy), srcBounds, kernelSize, kernel,
+ gain, bias, kernelOffset, tileMode, convolveAlpha));
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> MakeGaussian(sk_sp<GrTextureProxy> srcProxy,
+ const SkIRect& srcBounds,
+ const SkISize& kernelSize,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha,
+ SkScalar sigmaX,
+ SkScalar sigmaY);
+
+ const SkIRect& bounds() const { return fBounds; }
+ const SkISize& kernelSize() const { return fKernelSize; }
+ const float* kernelOffset() const { return fKernelOffset; }
+ const float* kernel() const { return fKernel; }
+ float gain() const { return fGain; }
+ float bias() const { return fBias; }
+ bool convolveAlpha() const { return fConvolveAlpha; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+ const char* name() const override { return "MatrixConvolution"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ // srcProxy is the texture that is going to be convolved
+ // srcBounds is the subset of 'srcProxy' that will be used (e.g., for clamp mode)
+ GrMatrixConvolutionEffect(sk_sp<GrTextureProxy> srcProxy,
+ const SkIRect& srcBounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha);
+
+ GrMatrixConvolutionEffect(const GrMatrixConvolutionEffect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ const TextureSampler& onTextureSampler(int i) const override { return fTextureSampler; }
+
+ GrCoordTransform fCoordTransform;
+ GrTextureDomain fDomain;
+ TextureSampler fTextureSampler;
+ SkIRect fBounds;
+ SkISize fKernelSize;
+ float fKernel[MAX_KERNEL_SIZE];
+ float fGain;
+ float fBias;
+ float fKernelOffset[2];
+ bool fConvolveAlpha;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrMixerEffect.fp b/gfx/skia/skia/src/gpu/effects/GrMixerEffect.fp
new file mode 100644
index 0000000000..f2bf79ec98
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMixerEffect.fp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Mixes the output of two FPs.
+
+in fragmentProcessor fp0;
+in fragmentProcessor? fp1;
+in uniform half weight;
+
+@class {
+
+ static OptimizationFlags OptFlags(const std::unique_ptr<GrFragmentProcessor>& fp0,
+ const std::unique_ptr<GrFragmentProcessor>& fp1) {
+ auto flags = ProcessorOptimizationFlags(fp0.get());
+ if (fp1) {
+ flags &= ProcessorOptimizationFlags(fp1.get());
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ const auto c0 = ConstantOutputForConstantInput(this->childProcessor(0), input),
+ c1 = (this->numChildProcessors() > 1)
+ ? ConstantOutputForConstantInput(this->childProcessor(1), input)
+ : input;
+ return {
+ c0.fR + (c1.fR - c0.fR) * weight,
+ c0.fG + (c1.fG - c0.fG) * weight,
+ c0.fB + (c1.fB - c0.fB) * weight,
+ c0.fA + (c1.fA - c0.fA) * weight
+ };
+ }
+}
+
+@optimizationFlags { OptFlags(fp0, fp1) }
+
+void main() {
+ half4 in0 = sample(fp0, sk_InColor);
+ half4 in1 = (fp1 != null) ? sample(fp1, sk_InColor) : sk_InColor;
+
+ sk_OutColor = mix(in0, in1, weight);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp
new file mode 100644
index 0000000000..b7c001cd54
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrOvalEffect.h"
+
+#include "include/core/SkRect.h"
+#include "src/gpu/effects/generated/GrCircleEffect.h"
+#include "src/gpu/effects/generated/GrEllipseEffect.h"
+
+std::unique_ptr<GrFragmentProcessor> GrOvalEffect::Make(GrClipEdgeType edgeType, const SkRect& oval,
+ const GrShaderCaps& caps) {
+ if (GrClipEdgeType::kHairlineAA == edgeType) {
+ return nullptr;
+ }
+ SkScalar w = oval.width();
+ SkScalar h = oval.height();
+ if (SkScalarNearlyEqual(w, h)) {
+ w /= 2;
+ return GrCircleEffect::Make(edgeType, SkPoint::Make(oval.fLeft + w, oval.fTop + w),
+ w);
+ } else {
+ w /= 2;
+ h /= 2;
+ return GrEllipseEffect::Make(edgeType, SkPoint::Make(oval.fLeft + w, oval.fTop + h),
+ SkPoint::Make(w, h), caps);
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h
new file mode 100644
index 0000000000..8b4c95d71b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOvalEffect_DEFINED
+#define GrOvalEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrFragmentProcessor;
+class GrShaderCaps;
+struct SkRect;
+
+namespace GrOvalEffect {
+
+/**
+ * Creates an effect that performs clipping against an oval.
+ */
+std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType, const SkRect&, const GrShaderCaps&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrOverrideInputFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrOverrideInputFragmentProcessor.fp
new file mode 100644
index 0000000000..786178879d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrOverrideInputFragmentProcessor.fp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Ignores its own input color and invokes 'fp' with a constant color
+// The constant color can either be specified as a literal or as a
+// uniform, controlled by useUniform.
+
+in fragmentProcessor fp;
+layout(key) in bool useUniform;
+layout(when=useUniform, ctype=SkPMColor4f) in uniform half4 uniformColor;
+layout(when=!useUniform, key, ctype=SkPMColor4f) in half4 literalColor;
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp,
+ const SkPMColor4f& color,
+ bool useUniform = true) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrOverrideInputFragmentProcessor(std::move(fp), useUniform, color, color));
+ }
+}
+
+@class {
+ static OptimizationFlags OptFlags(const std::unique_ptr<GrFragmentProcessor>& fp,
+ const SkPMColor4f& color) {
+ auto childFlags = ProcessorOptimizationFlags(fp.get());
+ auto flags = kNone_OptimizationFlags;
+ if (childFlags & kConstantOutputForConstantInput_OptimizationFlag) {
+ flags |= kConstantOutputForConstantInput_OptimizationFlag;
+ }
+ if ((childFlags & kPreservesOpaqueInput_OptimizationFlag) && color.isOpaque()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return ConstantOutputForConstantInput(this->childProcessor(0), uniformColor);
+ }
+}
+
+@optimizationFlags { OptFlags(fp, useUniform ? uniformColor : literalColor) }
+
+void main() {
+ half4 constColor;
+ @if(useUniform) {
+ constColor = uniformColor;
+ } else {
+ constColor = literalColor;
+ }
+ sk_OutColor = sample(fp, constColor);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp
new file mode 100644
index 0000000000..47d92dbde4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrPorterDuffXferProcessor.h"
+
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkTo.h"
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrProcessorAnalysis.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/glsl/GrGLSLBlend.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+/**
+ * Wraps the shader outputs and HW blend state that comprise a Porter Duff blend mode with coverage.
+ */
+class BlendFormula {
+public:
+ /**
+ * Values the shader can write to primary and secondary outputs. These must all be modulated by
+ * coverage to support mixed samples. The XP will ignore the multiplies when not using coverage.
+ */
+ enum OutputType {
+ kNone_OutputType, //<! 0
+ kCoverage_OutputType, //<! inputCoverage
+ kModulate_OutputType, //<! inputColor * inputCoverage
+ kSAModulate_OutputType, //<! inputColor.a * inputCoverage
+ kISAModulate_OutputType, //<! (1 - inputColor.a) * inputCoverage
+ kISCModulate_OutputType, //<! (1 - inputColor) * inputCoverage
+
+ kLast_OutputType = kISCModulate_OutputType
+ };
+
+ BlendFormula() = default;
+
+ constexpr BlendFormula(OutputType primaryOut, OutputType secondaryOut, GrBlendEquation equation,
+ GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff)
+ : fPrimaryOutputType(primaryOut)
+ , fSecondaryOutputType(secondaryOut)
+ , fBlendEquation(equation)
+ , fSrcCoeff(srcCoeff)
+ , fDstCoeff(dstCoeff)
+ , fProps(GetProperties(primaryOut, secondaryOut, equation, srcCoeff, dstCoeff)) {}
+
+ BlendFormula& operator=(const BlendFormula& other) {
+ SkDEBUGCODE(other.validatePreoptimized());
+ fData = other.fData;
+ return *this;
+ }
+
+ bool operator==(const BlendFormula& other) const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ SkDEBUGCODE(other.validatePreoptimized());
+ return fData == other.fData;
+ }
+
+ bool hasSecondaryOutput() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return kNone_OutputType != fSecondaryOutputType;
+ }
+ bool modifiesDst() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return SkToBool(fProps & kModifiesDst_Property);
+ }
+ bool usesDstColor() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return SkToBool(fProps & kUsesDstColor_Property);
+ }
+ bool usesInputColor() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return SkToBool(fProps & kUsesInputColor_Property);
+ }
+ bool canTweakAlphaForCoverage() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return SkToBool(fProps & kCanTweakAlphaForCoverage_Property);
+ }
+
+ GrBlendEquation equation() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return fBlendEquation;
+ }
+
+ GrBlendCoeff srcCoeff() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return fSrcCoeff;
+ }
+
+ GrBlendCoeff dstCoeff() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return fDstCoeff;
+ }
+
+ OutputType primaryOutput() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return fPrimaryOutputType;
+ }
+
+ OutputType secondaryOutput() const {
+ SkDEBUGCODE(this->validatePreoptimized());
+ return fSecondaryOutputType;
+ }
+
+private:
+ enum Properties {
+ kModifiesDst_Property = 1,
+ kUsesDstColor_Property = 1 << 1,
+ kUsesInputColor_Property = 1 << 2,
+ kCanTweakAlphaForCoverage_Property = 1 << 3,
+
+ kLast_Property = kCanTweakAlphaForCoverage_Property
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(Properties)
+
+#ifdef SK_DEBUG
+ void validatePreoptimized() const {
+ // The provided formula should already be optimized before a BlendFormula is constructed.
+ // Preferably these asserts would be done statically in the constexpr constructor, but this
+ // is not allowed in C++11.
+ SkASSERT((kNone_OutputType == fPrimaryOutputType) ==
+ !GrBlendCoeffsUseSrcColor(fSrcCoeff, fDstCoeff));
+ SkASSERT(!GrBlendCoeffRefsSrc2(fSrcCoeff));
+ SkASSERT((kNone_OutputType == fSecondaryOutputType) == !GrBlendCoeffRefsSrc2(fDstCoeff));
+ SkASSERT(fPrimaryOutputType != fSecondaryOutputType ||
+ kNone_OutputType == fPrimaryOutputType);
+ SkASSERT(kNone_OutputType != fPrimaryOutputType ||
+ kNone_OutputType == fSecondaryOutputType);
+ }
+#endif
+
+ /**
+ * Deduce the properties of a BlendFormula.
+ */
+ static constexpr Properties GetProperties(OutputType PrimaryOut, OutputType SecondaryOut,
+ GrBlendEquation BlendEquation, GrBlendCoeff SrcCoeff,
+ GrBlendCoeff DstCoeff);
+
+ union {
+ struct {
+ // We allot the enums one more bit than they require because MSVC seems to sign-extend
+ // them when the top bit is set. (This is in violation of the C++03 standard 9.6/4)
+ OutputType fPrimaryOutputType : 4;
+ OutputType fSecondaryOutputType : 4;
+ GrBlendEquation fBlendEquation : 6;
+ GrBlendCoeff fSrcCoeff : 6;
+ GrBlendCoeff fDstCoeff : 6;
+ Properties fProps : 32 - (4 + 4 + 6 + 6 + 6);
+ };
+ uint32_t fData;
+ };
+
+ GR_STATIC_ASSERT(kLast_OutputType < (1 << 3));
+ GR_STATIC_ASSERT(kLast_GrBlendEquation < (1 << 5));
+ GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << 5));
+ GR_STATIC_ASSERT(kLast_Property < (1 << 6));
+};
+
+GR_STATIC_ASSERT(4 == sizeof(BlendFormula));
+
+GR_MAKE_BITFIELD_OPS(BlendFormula::Properties);
+
+constexpr BlendFormula::Properties BlendFormula::GetProperties(OutputType PrimaryOut,
+ OutputType SecondaryOut,
+ GrBlendEquation BlendEquation,
+ GrBlendCoeff SrcCoeff,
+ GrBlendCoeff DstCoeff) {
+ return static_cast<Properties>(
+ (GrBlendModifiesDst(BlendEquation, SrcCoeff, DstCoeff) ? kModifiesDst_Property : 0) |
+ (GrBlendCoeffsUseDstColor(SrcCoeff, DstCoeff) ? kUsesDstColor_Property : 0) |
+ ((PrimaryOut >= kModulate_OutputType && GrBlendCoeffsUseSrcColor(SrcCoeff, DstCoeff)) ||
+ (SecondaryOut >= kModulate_OutputType &&
+ GrBlendCoeffRefsSrc2(DstCoeff))
+ ? kUsesInputColor_Property
+ : 0) | // We assert later that SrcCoeff doesn't ref src2.
+ ((kModulate_OutputType == PrimaryOut || kNone_OutputType == PrimaryOut) &&
+ kNone_OutputType == SecondaryOut &&
+ GrBlendAllowsCoverageAsAlpha(BlendEquation, SrcCoeff, DstCoeff)
+ ? kCanTweakAlphaForCoverage_Property
+ : 0));
+}
+
+/**
+ * When there is no coverage, or the blend mode can tweak alpha for coverage, we use the standard
+ * Porter Duff formula.
+ */
+static constexpr BlendFormula MakeCoeffFormula(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) {
+ // When the coeffs are (Zero, Zero) or (Zero, One) we set the primary output to none.
+ return (kZero_GrBlendCoeff == srcCoeff &&
+ (kZero_GrBlendCoeff == dstCoeff || kOne_GrBlendCoeff == dstCoeff))
+ ? BlendFormula(BlendFormula::kNone_OutputType, BlendFormula::kNone_OutputType,
+ kAdd_GrBlendEquation, kZero_GrBlendCoeff, dstCoeff)
+ : BlendFormula(BlendFormula::kModulate_OutputType, BlendFormula::kNone_OutputType,
+ kAdd_GrBlendEquation, srcCoeff, dstCoeff);
+}
+
+/**
+ * Basic coeff formula similar to MakeCoeffFormula but we will make the src f*Sa. This is used in
+ * LCD dst-out.
+ */
+static constexpr BlendFormula MakeSAModulateFormula(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) {
+ return BlendFormula(BlendFormula::kSAModulate_OutputType, BlendFormula::kNone_OutputType,
+ kAdd_GrBlendEquation, srcCoeff, dstCoeff);
+}
+
+/**
+ * When there is coverage, the equation with f=coverage is:
+ *
+ * D' = f * (S * srcCoeff + D * dstCoeff) + (1-f) * D
+ *
+ * This can be rewritten as:
+ *
+ * D' = f * S * srcCoeff + D * (1 - [f * (1 - dstCoeff)])
+ *
+ * To implement this formula, we output [f * (1 - dstCoeff)] for the secondary color and replace the
+ * HW dst coeff with IS2C.
+ *
+ * Xfer modes: dst-atop (Sa!=1)
+ */
+static constexpr BlendFormula MakeCoverageFormula(
+ BlendFormula::OutputType oneMinusDstCoeffModulateOutput, GrBlendCoeff srcCoeff) {
+ return BlendFormula(BlendFormula::kModulate_OutputType, oneMinusDstCoeffModulateOutput,
+ kAdd_GrBlendEquation, srcCoeff, kIS2C_GrBlendCoeff);
+}
+
+/**
+ * When there is coverage and the src coeff is Zero, the equation with f=coverage becomes:
+ *
+ * D' = f * D * dstCoeff + (1-f) * D
+ *
+ * This can be rewritten as:
+ *
+ * D' = D - D * [f * (1 - dstCoeff)]
+ *
+ * To implement this formula, we output [f * (1 - dstCoeff)] for the primary color and use a reverse
+ * subtract HW blend equation with coeffs of (DC, One).
+ *
+ * Xfer modes: clear, dst-out (Sa=1), dst-in (Sa!=1), modulate (Sc!=1)
+ */
+static constexpr BlendFormula MakeCoverageSrcCoeffZeroFormula(
+ BlendFormula::OutputType oneMinusDstCoeffModulateOutput) {
+ return BlendFormula(oneMinusDstCoeffModulateOutput, BlendFormula::kNone_OutputType,
+ kReverseSubtract_GrBlendEquation, kDC_GrBlendCoeff, kOne_GrBlendCoeff);
+}
+
+/**
+ * When there is coverage and the dst coeff is Zero, the equation with f=coverage becomes:
+ *
+ * D' = f * S * srcCoeff + (1-f) * D
+ *
+ * To implement this formula, we output [f] for the secondary color and replace the HW dst coeff
+ * with IS2A. (Note that we can avoid dual source blending when Sa=1 by using ISA.)
+ *
+ * Xfer modes (Sa!=1): src, src-in, src-out
+ */
+static constexpr BlendFormula MakeCoverageDstCoeffZeroFormula(GrBlendCoeff srcCoeff) {
+ return BlendFormula(BlendFormula::kModulate_OutputType, BlendFormula::kCoverage_OutputType,
+ kAdd_GrBlendEquation, srcCoeff, kIS2A_GrBlendCoeff);
+}
+
+// Older GCC won't like the constexpr arrays because of
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61484.
+// MSVC 2015 crashes with an internal compiler error.
+#if !defined(__clang__) && ((defined(__GNUC__) && __GNUC__ < 5) || (defined(_MSC_VER) && _MSC_VER <= 1910))
+# define MAYBE_CONSTEXPR const
+#else
+# define MAYBE_CONSTEXPR constexpr
+#endif
+
+/**
+ * This table outlines the blend formulas we will use with each xfermode, with and without coverage,
+ * with and without an opaque input color. Optimization properties are deduced at compile time so we
+ * can make runtime decisions quickly. RGB coverage is not supported.
+ */
+static MAYBE_CONSTEXPR BlendFormula gBlendTable[2][2][(int)SkBlendMode::kLastCoeffMode + 1] = {
+ /*>> No coverage, input color unknown <<*/ {{
+
+ /* clear */ MakeCoeffFormula(kZero_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* src */ MakeCoeffFormula(kOne_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-over */ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ MakeCoeffFormula(kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-in */ MakeCoeffFormula(kZero_GrBlendCoeff, kSA_GrBlendCoeff),
+ /* src-out */ MakeCoeffFormula(kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-out */ MakeCoeffFormula(kZero_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* src-atop */ MakeCoeffFormula(kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ MakeCoeffFormula(kIDA_GrBlendCoeff, kSA_GrBlendCoeff),
+ /* xor */ MakeCoeffFormula(kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ MakeCoeffFormula(kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ MakeCoeffFormula(kZero_GrBlendCoeff, kSC_GrBlendCoeff),
+ /* screen */ MakeCoeffFormula(kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }, /*>> Has coverage, input color unknown <<*/ {
+
+ /* clear */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kCoverage_OutputType),
+ /* src */ MakeCoverageDstCoeffZeroFormula(kOne_GrBlendCoeff),
+ /* dst */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-over */ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ MakeCoverageDstCoeffZeroFormula(kDA_GrBlendCoeff),
+ /* dst-in */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kISAModulate_OutputType),
+ /* src-out */ MakeCoverageDstCoeffZeroFormula(kIDA_GrBlendCoeff),
+ /* dst-out */ MakeCoeffFormula(kZero_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* src-atop */ MakeCoeffFormula(kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ MakeCoverageFormula(BlendFormula::kISAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* xor */ MakeCoeffFormula(kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ MakeCoeffFormula(kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kISCModulate_OutputType),
+ /* screen */ MakeCoeffFormula(kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }}, /*>> No coverage, input color opaque <<*/ {{
+
+ /* clear */ MakeCoeffFormula(kZero_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* src */ MakeCoeffFormula(kOne_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-over */ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff), // see comment below
+ /* dst-over */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ MakeCoeffFormula(kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-in */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-out */ MakeCoeffFormula(kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-out */ MakeCoeffFormula(kZero_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* src-atop */ MakeCoeffFormula(kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-atop */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* xor */ MakeCoeffFormula(kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* plus */ MakeCoeffFormula(kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ MakeCoeffFormula(kZero_GrBlendCoeff, kSC_GrBlendCoeff),
+ /* screen */ MakeCoeffFormula(kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }, /*>> Has coverage, input color opaque <<*/ {
+
+ /* clear */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kCoverage_OutputType),
+ /* src */ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-over */ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ MakeCoeffFormula(kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-in */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-out */ MakeCoeffFormula(kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-out */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kCoverage_OutputType),
+ /* src-atop */ MakeCoeffFormula(kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* xor */ MakeCoeffFormula(kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ MakeCoeffFormula(kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kISCModulate_OutputType),
+ /* screen */ MakeCoeffFormula(kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+}}};
+// In the above table src-over is not optimized to src mode when the color is opaque because we
+// found no advantage to doing so. Also, we are using a global src-over XP in most cases which is
+// not specialized for opaque input. If the table were set to use the src formula then we'd have to
+// change when we use this global XP to keep analysis and practice in sync.
+
+static MAYBE_CONSTEXPR BlendFormula gLCDBlendTable[(int)SkBlendMode::kLastCoeffMode + 1] = {
+ /* clear */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kCoverage_OutputType),
+ /* src */ MakeCoverageFormula(BlendFormula::kCoverage_OutputType, kOne_GrBlendCoeff),
+ /* dst */ MakeCoeffFormula(kZero_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-over */ MakeCoverageFormula(BlendFormula::kSAModulate_OutputType, kOne_GrBlendCoeff),
+ /* dst-over */ MakeCoeffFormula(kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ MakeCoverageFormula(BlendFormula::kCoverage_OutputType, kDA_GrBlendCoeff),
+ /* dst-in */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kISAModulate_OutputType),
+ /* src-out */ MakeCoverageFormula(BlendFormula::kCoverage_OutputType, kIDA_GrBlendCoeff),
+ /* dst-out */ MakeSAModulateFormula(kZero_GrBlendCoeff, kISC_GrBlendCoeff),
+ /* src-atop */ MakeCoverageFormula(BlendFormula::kSAModulate_OutputType, kDA_GrBlendCoeff),
+ /* dst-atop */ MakeCoverageFormula(BlendFormula::kISAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* xor */ MakeCoverageFormula(BlendFormula::kSAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* plus */ MakeCoeffFormula(kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ MakeCoverageSrcCoeffZeroFormula(BlendFormula::kISCModulate_OutputType),
+ /* screen */ MakeCoeffFormula(kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+};
+
+#undef MAYBE_CONSTEXPR
+
+static BlendFormula get_blend_formula(bool isOpaque,
+ bool hasCoverage,
+ bool hasMixedSamples,
+ SkBlendMode xfermode) {
+ SkASSERT((unsigned)xfermode <= (unsigned)SkBlendMode::kLastCoeffMode);
+ bool conflatesCoverage = hasCoverage || hasMixedSamples;
+ return gBlendTable[isOpaque][conflatesCoverage][(int)xfermode];
+}
+
+static BlendFormula get_lcd_blend_formula(SkBlendMode xfermode) {
+ SkASSERT((unsigned)xfermode <= (unsigned)SkBlendMode::kLastCoeffMode);
+
+ return gLCDBlendTable[(int)xfermode];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class PorterDuffXferProcessor : public GrXferProcessor {
+public:
+ PorterDuffXferProcessor(BlendFormula blendFormula, GrProcessorAnalysisCoverage coverage)
+ : INHERITED(kPorterDuffXferProcessor_ClassID, false, false, coverage)
+ , fBlendFormula(blendFormula) {
+ }
+
+ const char* name() const override { return "Porter Duff"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ BlendFormula getBlendFormula() const { return fBlendFormula; }
+
+private:
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onHasSecondaryOutput() const override { return fBlendFormula.hasSecondaryOutput(); }
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override {
+ blendInfo->fEquation = fBlendFormula.equation();
+ blendInfo->fSrcBlend = fBlendFormula.srcCoeff();
+ blendInfo->fDstBlend = fBlendFormula.dstCoeff();
+ blendInfo->fWriteColor = fBlendFormula.modifiesDst();
+ }
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const PorterDuffXferProcessor& xp = xpBase.cast<PorterDuffXferProcessor>();
+ return fBlendFormula == xp.fBlendFormula;
+ }
+
+ const BlendFormula fBlendFormula;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void append_color_output(const PorterDuffXferProcessor& xp,
+ GrGLSLXPFragmentBuilder* fragBuilder,
+ BlendFormula::OutputType outputType, const char* output,
+ const char* inColor, const char* inCoverage) {
+ SkASSERT(inCoverage);
+ SkASSERT(inColor);
+ switch (outputType) {
+ case BlendFormula::kNone_OutputType:
+ fragBuilder->codeAppendf("%s = half4(0.0);", output);
+ break;
+ case BlendFormula::kCoverage_OutputType:
+ // We can have a coverage formula while not reading coverage if there are mixed samples.
+ fragBuilder->codeAppendf("%s = %s;", output, inCoverage);
+ break;
+ case BlendFormula::kModulate_OutputType:
+ fragBuilder->codeAppendf("%s = %s * %s;", output, inColor, inCoverage);
+ break;
+ case BlendFormula::kSAModulate_OutputType:
+ fragBuilder->codeAppendf("%s = %s.a * %s;", output, inColor, inCoverage);
+ break;
+ case BlendFormula::kISAModulate_OutputType:
+ fragBuilder->codeAppendf("%s = (1.0 - %s.a) * %s;", output, inColor, inCoverage);
+ break;
+ case BlendFormula::kISCModulate_OutputType:
+ fragBuilder->codeAppendf("%s = (half4(1.0) - %s) * %s;", output, inColor, inCoverage);
+ break;
+ default:
+ SK_ABORT("Unsupported output type.");
+ break;
+ }
+}
+
+class GLPorterDuffXferProcessor : public GrGLSLXferProcessor {
+public:
+ static void GenKey(const GrProcessor& processor, GrProcessorKeyBuilder* b) {
+ const PorterDuffXferProcessor& xp = processor.cast<PorterDuffXferProcessor>();
+ b->add32(xp.getBlendFormula().primaryOutput() |
+ (xp.getBlendFormula().secondaryOutput() << 3));
+ GR_STATIC_ASSERT(BlendFormula::kLast_OutputType < 8);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const PorterDuffXferProcessor& xp = args.fXP.cast<PorterDuffXferProcessor>();
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+
+ BlendFormula blendFormula = xp.getBlendFormula();
+ if (blendFormula.hasSecondaryOutput()) {
+ append_color_output(xp, fragBuilder, blendFormula.secondaryOutput(),
+ args.fOutputSecondary, args.fInputColor, args.fInputCoverage);
+ }
+ append_color_output(xp, fragBuilder, blendFormula.primaryOutput(), args.fOutputPrimary,
+ args.fInputColor, args.fInputCoverage);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void PorterDuffXferProcessor::onGetGLSLProcessorKey(const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) const {
+ GLPorterDuffXferProcessor::GenKey(*this, b);
+}
+
+GrGLSLXferProcessor* PorterDuffXferProcessor::createGLSLInstance() const {
+ return new GLPorterDuffXferProcessor;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ShaderPDXferProcessor : public GrXferProcessor {
+public:
+ ShaderPDXferProcessor(bool hasMixedSamples, SkBlendMode xfermode,
+ GrProcessorAnalysisCoverage coverage)
+ : INHERITED(kShaderPDXferProcessor_ClassID, true, hasMixedSamples, coverage)
+ , fXfermode(xfermode) {
+ }
+
+ const char* name() const override { return "Porter Duff Shader"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ SkBlendMode getXfermode() const { return fXfermode; }
+
+private:
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const ShaderPDXferProcessor& xp = xpBase.cast<ShaderPDXferProcessor>();
+ return fXfermode == xp.fXfermode;
+ }
+
+ const SkBlendMode fXfermode;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLShaderPDXferProcessor : public GrGLSLXferProcessor {
+public:
+ static void GenKey(const GrProcessor& processor, GrProcessorKeyBuilder* b) {
+ const ShaderPDXferProcessor& xp = processor.cast<ShaderPDXferProcessor>();
+ b->add32((int)xp.getXfermode());
+ }
+
+private:
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ const ShaderPDXferProcessor& xp = proc.cast<ShaderPDXferProcessor>();
+
+ GrGLSLBlend::AppendMode(fragBuilder, srcColor, dstColor, outColor, xp.getXfermode());
+
+ // Apply coverage.
+ INHERITED::DefaultCoverageModulation(fragBuilder, srcCoverage, dstColor, outColor,
+ outColorSecondary, xp);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ShaderPDXferProcessor::onGetGLSLProcessorKey(const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) const {
+ GLShaderPDXferProcessor::GenKey(*this, b);
+}
+
+GrGLSLXferProcessor* ShaderPDXferProcessor::createGLSLInstance() const {
+ return new GLShaderPDXferProcessor;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class PDLCDXferProcessor : public GrXferProcessor {
+public:
+ static sk_sp<const GrXferProcessor> Make(SkBlendMode mode,
+ const GrProcessorAnalysisColor& inputColor);
+
+ ~PDLCDXferProcessor() override;
+
+ const char* name() const override { return "Porter Duff LCD"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ float alpha() const { return fAlpha; }
+
+private:
+ PDLCDXferProcessor(const SkPMColor4f& blendConstant, float alpha);
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override {
+ blendInfo->fSrcBlend = kConstC_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ blendInfo->fBlendConstant = fBlendConstant;
+ }
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const PDLCDXferProcessor& xp = xpBase.cast<PDLCDXferProcessor>();
+ if (fBlendConstant != xp.fBlendConstant || fAlpha != xp.fAlpha) {
+ return false;
+ }
+ return true;
+ }
+
+ SkPMColor4f fBlendConstant;
+ float fAlpha;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLPDLCDXferProcessor : public GrGLSLXferProcessor {
+public:
+ GLPDLCDXferProcessor(const GrProcessor&) : fLastAlpha(SK_FloatNaN) {}
+
+ ~GLPDLCDXferProcessor() override {}
+
+ static void GenKey(const GrProcessor& processor, const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) {}
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const char* alpha;
+ fAlphaUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "alpha", &alpha);
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ // We want to force our primary output to be alpha * Coverage, where alpha is the alpha
+ // value of the src color. We know that there are no color stages (or we wouldn't have
+ // created this xp) and the r,g, and b channels of the op's input color are baked into the
+ // blend constant.
+ SkASSERT(args.fInputCoverage);
+ fragBuilder->codeAppendf("%s = %s * %s;", args.fOutputPrimary, alpha, args.fInputCoverage);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdm, const GrXferProcessor& xp) override {
+ float alpha = xp.cast<PDLCDXferProcessor>().alpha();
+ if (fLastAlpha != alpha) {
+ pdm.set1f(fAlphaUniform, alpha);
+ fLastAlpha = alpha;
+ }
+ }
+
+ GrGLSLUniformHandler::UniformHandle fAlphaUniform;
+ float fLastAlpha;
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+PDLCDXferProcessor::PDLCDXferProcessor(const SkPMColor4f& blendConstant, float alpha)
+ : INHERITED(kPDLCDXferProcessor_ClassID, false, false, GrProcessorAnalysisCoverage::kLCD)
+ , fBlendConstant(blendConstant)
+ , fAlpha(alpha) {
+}
+
+sk_sp<const GrXferProcessor> PDLCDXferProcessor::Make(SkBlendMode mode,
+ const GrProcessorAnalysisColor& color) {
+ if (SkBlendMode::kSrcOver != mode) {
+ return nullptr;
+ }
+ SkPMColor4f blendConstantPM;
+ if (!color.isConstant(&blendConstantPM)) {
+ return nullptr;
+ }
+ SkColor4f blendConstantUPM = blendConstantPM.unpremul();
+ float alpha = blendConstantUPM.fA;
+ blendConstantPM = { blendConstantUPM.fR, blendConstantUPM.fG, blendConstantUPM.fB, 1 };
+ return sk_sp<GrXferProcessor>(new PDLCDXferProcessor(blendConstantPM, alpha));
+}
+
+PDLCDXferProcessor::~PDLCDXferProcessor() {
+}
+
+void PDLCDXferProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLPDLCDXferProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* PDLCDXferProcessor::createGLSLInstance() const {
+ return new GLPDLCDXferProcessor(*this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+constexpr GrPorterDuffXPFactory::GrPorterDuffXPFactory(SkBlendMode xfermode)
+ : fBlendMode(xfermode) {}
+
+const GrXPFactory* GrPorterDuffXPFactory::Get(SkBlendMode blendMode) {
+ SkASSERT((unsigned)blendMode <= (unsigned)SkBlendMode::kLastCoeffMode);
+
+ // If these objects are constructed as static constexpr by cl.exe (2015 SP2) the vtables are
+ // null.
+#ifdef SK_BUILD_FOR_WIN
+#define _CONSTEXPR_
+#else
+#define _CONSTEXPR_ constexpr
+#endif
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gClearPDXPF(SkBlendMode::kClear);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gSrcPDXPF(SkBlendMode::kSrc);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gDstPDXPF(SkBlendMode::kDst);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gSrcOverPDXPF(SkBlendMode::kSrcOver);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gDstOverPDXPF(SkBlendMode::kDstOver);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gSrcInPDXPF(SkBlendMode::kSrcIn);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gDstInPDXPF(SkBlendMode::kDstIn);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gSrcOutPDXPF(SkBlendMode::kSrcOut);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gDstOutPDXPF(SkBlendMode::kDstOut);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gSrcATopPDXPF(SkBlendMode::kSrcATop);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gDstATopPDXPF(SkBlendMode::kDstATop);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gXorPDXPF(SkBlendMode::kXor);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gPlusPDXPF(SkBlendMode::kPlus);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gModulatePDXPF(SkBlendMode::kModulate);
+ static _CONSTEXPR_ const GrPorterDuffXPFactory gScreenPDXPF(SkBlendMode::kScreen);
+#undef _CONSTEXPR_
+
+ switch (blendMode) {
+ case SkBlendMode::kClear:
+ return &gClearPDXPF;
+ case SkBlendMode::kSrc:
+ return &gSrcPDXPF;
+ case SkBlendMode::kDst:
+ return &gDstPDXPF;
+ case SkBlendMode::kSrcOver:
+ return &gSrcOverPDXPF;
+ case SkBlendMode::kDstOver:
+ return &gDstOverPDXPF;
+ case SkBlendMode::kSrcIn:
+ return &gSrcInPDXPF;
+ case SkBlendMode::kDstIn:
+ return &gDstInPDXPF;
+ case SkBlendMode::kSrcOut:
+ return &gSrcOutPDXPF;
+ case SkBlendMode::kDstOut:
+ return &gDstOutPDXPF;
+ case SkBlendMode::kSrcATop:
+ return &gSrcATopPDXPF;
+ case SkBlendMode::kDstATop:
+ return &gDstATopPDXPF;
+ case SkBlendMode::kXor:
+ return &gXorPDXPF;
+ case SkBlendMode::kPlus:
+ return &gPlusPDXPF;
+ case SkBlendMode::kModulate:
+ return &gModulatePDXPF;
+ case SkBlendMode::kScreen:
+ return &gScreenPDXPF;
+ default:
+ SK_ABORT("Unexpected blend mode.");
+ }
+}
+
+sk_sp<const GrXferProcessor> GrPorterDuffXPFactory::makeXferProcessor(
+ const GrProcessorAnalysisColor& color, GrProcessorAnalysisCoverage coverage,
+ bool hasMixedSamples, const GrCaps& caps, GrClampType clampType) const {
+ BlendFormula blendFormula;
+ bool isLCD = coverage == GrProcessorAnalysisCoverage::kLCD;
+ if (isLCD) {
+ // See comment in MakeSrcOverXferProcessor about color.isOpaque here
+ if (SkBlendMode::kSrcOver == fBlendMode && color.isConstant() && /*color.isOpaque() &&*/
+ !caps.shaderCaps()->dualSourceBlendingSupport() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ // If we don't have dual source blending or in shader dst reads, we fall back to this
+ // trick for rendering SrcOver LCD text instead of doing a dst copy.
+ return PDLCDXferProcessor::Make(fBlendMode, color);
+ }
+ blendFormula = get_lcd_blend_formula(fBlendMode);
+ } else {
+ blendFormula =
+ get_blend_formula(color.isOpaque(), GrProcessorAnalysisCoverage::kNone != coverage,
+ hasMixedSamples, fBlendMode);
+ }
+
+ // Skia always saturates after the kPlus blend mode, so it requires shader-based blending when
+ // pixels aren't guaranteed to automatically be normalized (i.e. any floating point config).
+ if ((blendFormula.hasSecondaryOutput() && !caps.shaderCaps()->dualSourceBlendingSupport()) ||
+ (isLCD && (SkBlendMode::kSrcOver != fBlendMode /*|| !color.isOpaque()*/)) ||
+ (GrClampType::kAuto != clampType && SkBlendMode::kPlus == fBlendMode)) {
+ return sk_sp<const GrXferProcessor>(new ShaderPDXferProcessor(hasMixedSamples, fBlendMode,
+ coverage));
+ }
+ return sk_sp<const GrXferProcessor>(new PorterDuffXferProcessor(blendFormula, coverage));
+}
+
+static inline GrXPFactory::AnalysisProperties analysis_properties(
+ const GrProcessorAnalysisColor& color, const GrProcessorAnalysisCoverage& coverage,
+ const GrCaps& caps, GrClampType clampType, SkBlendMode mode) {
+ using AnalysisProperties = GrXPFactory::AnalysisProperties;
+ AnalysisProperties props = AnalysisProperties::kNone;
+ bool hasCoverage = GrProcessorAnalysisCoverage::kNone != coverage;
+ bool isLCD = GrProcessorAnalysisCoverage::kLCD == coverage;
+ BlendFormula formula;
+ if (isLCD) {
+ formula = gLCDBlendTable[(int)mode];
+ } else {
+ formula = gBlendTable[color.isOpaque()][hasCoverage][(int)mode];
+ }
+
+ if (formula.canTweakAlphaForCoverage() && !isLCD) {
+ props |= AnalysisProperties::kCompatibleWithCoverageAsAlpha;
+ }
+
+ if (isLCD) {
+ // See comment in MakeSrcOverXferProcessor about color.isOpaque here
+ if (SkBlendMode::kSrcOver == mode && color.isConstant() && /*color.isOpaque() &&*/
+ !caps.shaderCaps()->dualSourceBlendingSupport() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ props |= AnalysisProperties::kIgnoresInputColor;
+ } else {
+ // For LCD blending, if the color is not opaque we must read the dst in shader even if
+ // we have dual source blending. The opaqueness check must be done after blending so for
+ // simplicity we only allow src-over to not take the dst read path (though src, src-in,
+ // and DstATop would also work). We also fall into the dst read case for src-over if we
+ // do not have dual source blending.
+ if (SkBlendMode::kSrcOver != mode ||
+ /*!color.isOpaque() ||*/ // See comment in MakeSrcOverXferProcessor about isOpaque.
+ (formula.hasSecondaryOutput() && !caps.shaderCaps()->dualSourceBlendingSupport())) {
+ props |= AnalysisProperties::kReadsDstInShader;
+ }
+ }
+ } else {
+ // With dual-source blending we never need the destination color in the shader.
+ if (!caps.shaderCaps()->dualSourceBlendingSupport()) {
+ // Mixed samples implicity computes a fractional coverage from sample coverage. This
+ // could affect the formula used. However, we don't expect to have mixed samples without
+ // dual source blending.
+ SkASSERT(!caps.mixedSamplesSupport());
+ if (formula.hasSecondaryOutput()) {
+ props |= AnalysisProperties::kReadsDstInShader;
+ }
+ }
+ }
+
+ if (GrClampType::kAuto != clampType && SkBlendMode::kPlus == mode) {
+ props |= AnalysisProperties::kReadsDstInShader;
+ }
+
+ if (!formula.modifiesDst() || !formula.usesInputColor()) {
+ props |= AnalysisProperties::kIgnoresInputColor;
+ }
+ return props;
+}
+
+GrXPFactory::AnalysisProperties GrPorterDuffXPFactory::analysisProperties(
+ const GrProcessorAnalysisColor& color,
+ const GrProcessorAnalysisCoverage& coverage,
+ const GrCaps& caps,
+ GrClampType clampType) const {
+ return analysis_properties(color, coverage, caps, clampType, fBlendMode);
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrPorterDuffXPFactory);
+
+#if GR_TEST_UTILS
+const GrXPFactory* GrPorterDuffXPFactory::TestGet(GrProcessorTestData* d) {
+ SkBlendMode mode = SkBlendMode(d->fRandom->nextULessThan((int)SkBlendMode::kLastCoeffMode));
+ return GrPorterDuffXPFactory::Get(mode);
+}
+#endif
+
+void GrPorterDuffXPFactory::TestGetXPOutputTypes(const GrXferProcessor* xp,
+ int* outPrimary,
+ int* outSecondary) {
+ if (!!strcmp(xp->name(), "Porter Duff")) {
+ *outPrimary = *outSecondary = -1;
+ return;
+ }
+ BlendFormula blendFormula = static_cast<const PorterDuffXferProcessor*>(xp)->getBlendFormula();
+ *outPrimary = blendFormula.primaryOutput();
+ *outSecondary = blendFormula.secondaryOutput();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// SrcOver Global functions
+////////////////////////////////////////////////////////////////////////////////////////////////
+const GrXferProcessor& GrPorterDuffXPFactory::SimpleSrcOverXP() {
+ static BlendFormula gSrcOverBlendFormula =
+ MakeCoeffFormula(kOne_GrBlendCoeff, kISA_GrBlendCoeff);
+ static PorterDuffXferProcessor gSrcOverXP(gSrcOverBlendFormula,
+ GrProcessorAnalysisCoverage::kSingleChannel);
+ return gSrcOverXP;
+}
+
+sk_sp<const GrXferProcessor> GrPorterDuffXPFactory::MakeSrcOverXferProcessor(
+ const GrProcessorAnalysisColor& color, GrProcessorAnalysisCoverage coverage,
+ bool hasMixedSamples, const GrCaps& caps) {
+ // We want to not make an xfer processor if possible. Thus for the simple case where we are not
+ // doing lcd blending we will just use our global SimpleSrcOverXP. This slightly differs from
+ // the general case where we convert a src-over blend that has solid coverage and an opaque
+ // color to src-mode, which allows disabling of blending.
+ if (coverage != GrProcessorAnalysisCoverage::kLCD) {
+ // We return nullptr here, which our caller interprets as meaning "use SimpleSrcOverXP".
+ // We don't simply return the address of that XP here because our caller would have to unref
+ // it and since it is a global object and GrProgramElement's ref-cnting system is not thread
+ // safe.
+ return nullptr;
+ }
+
+ // Currently up the stack Skia is requiring that the dst is opaque or that the client has said
+ // the opaqueness doesn't matter. Thus for src-over we don't need to worry about the src color
+ // being opaque or not. This allows us to use faster code paths as well as avoid various bugs
+ // that occur with dst reads in the shader blending. For now we disable the check for
+ // opaqueness, but in the future we should pass down the knowledge about dst opaqueness and make
+ // the correct decision here.
+ //
+ // This also fixes a chrome bug on macs where we are getting random fuzziness when doing
+ // blending in the shader for non opaque sources.
+ if (color.isConstant() && /*color.isOpaque() &&*/
+ !caps.shaderCaps()->dualSourceBlendingSupport() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ // If we don't have dual source blending or in shader dst reads, we fall
+ // back to this trick for rendering SrcOver LCD text instead of doing a
+ // dst copy.
+ return PDLCDXferProcessor::Make(SkBlendMode::kSrcOver, color);
+ }
+
+ BlendFormula blendFormula;
+ blendFormula = get_lcd_blend_formula(SkBlendMode::kSrcOver);
+ // See comment above regarding why the opaque check is commented out here.
+ if (/*!color.isOpaque() ||*/
+ (blendFormula.hasSecondaryOutput() && !caps.shaderCaps()->dualSourceBlendingSupport())) {
+ return sk_sp<GrXferProcessor>(
+ new ShaderPDXferProcessor(hasMixedSamples, SkBlendMode::kSrcOver, coverage));
+ }
+ return sk_sp<GrXferProcessor>(new PorterDuffXferProcessor(blendFormula, coverage));
+}
+
+sk_sp<const GrXferProcessor> GrPorterDuffXPFactory::MakeNoCoverageXP(SkBlendMode blendmode) {
+ BlendFormula formula = get_blend_formula(false, false, false, blendmode);
+ return sk_make_sp<PorterDuffXferProcessor>(formula, GrProcessorAnalysisCoverage::kNone);
+}
+
+GrXPFactory::AnalysisProperties GrPorterDuffXPFactory::SrcOverAnalysisProperties(
+ const GrProcessorAnalysisColor& color,
+ const GrProcessorAnalysisCoverage& coverage,
+ const GrCaps& caps,
+ GrClampType clampType) {
+ return analysis_properties(color, coverage, caps, clampType, SkBlendMode::kSrcOver);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.h b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.h
new file mode 100644
index 0000000000..22453131b5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPorterDuffXferProcessor_DEFINED
+#define GrPorterDuffXferProcessor_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/GrXferProcessor.h"
+
+// See the comment above GrXPFactory's definition about this warning suppression.
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+class GrPorterDuffXPFactory : public GrXPFactory {
+public:
+ static const GrXPFactory* Get(SkBlendMode blendMode);
+
+ /** Because src-over is so common we special case it for performance reasons. If this returns
+ null then the SimpleSrcOverXP() below should be used. */
+ static sk_sp<const GrXferProcessor> MakeSrcOverXferProcessor(const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps&);
+
+ /** Returns a simple non-LCD porter duff blend XP with no optimizations or coverage. */
+ static sk_sp<const GrXferProcessor> MakeNoCoverageXP(SkBlendMode);
+
+ /** This XP implements non-LCD src-over using hw blend with no optimizations. It is returned
+ by reference because it is global and its ref-cnting methods are not thread safe. */
+ static const GrXferProcessor& SimpleSrcOverXP();
+
+ static AnalysisProperties SrcOverAnalysisProperties(const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType);
+
+private:
+ constexpr GrPorterDuffXPFactory(SkBlendMode);
+
+ sk_sp<const GrXferProcessor> makeXferProcessor(const GrProcessorAnalysisColor&,
+ GrProcessorAnalysisCoverage,
+ bool hasMixedSamples,
+ const GrCaps&,
+ GrClampType) const override;
+
+ AnalysisProperties analysisProperties(const GrProcessorAnalysisColor&,
+ const GrProcessorAnalysisCoverage&,
+ const GrCaps&,
+ GrClampType) const override;
+
+ GR_DECLARE_XP_FACTORY_TEST
+ static void TestGetXPOutputTypes(const GrXferProcessor*, int* outPrimary, int* outSecondary);
+
+ SkBlendMode fBlendMode;
+
+ friend class GrPorterDuffTest; // for TestGetXPOutputTypes()
+ typedef GrXPFactory INHERITED;
+};
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrPremulInputFragmentProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrPremulInputFragmentProcessor.fp
new file mode 100644
index 0000000000..cbba607074
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrPremulInputFragmentProcessor.fp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+@optimizationFlags {
+ kPreservesOpaqueInput_OptimizationFlag | kConstantOutputForConstantInput_OptimizationFlag
+}
+
+void main() {
+ sk_OutColor = sk_InColor;
+ sk_OutColor.rgb *= sk_InColor.a;
+}
+
+@class {
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return SkColor4f { input.fR, input.fG, input.fB, input.fA }.premul();
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrRRectBlurEffect.fp b/gfx/skia/skia/src/gpu/effects/GrRRectBlurEffect.fp
new file mode 100644
index 0000000000..b0b770e3c2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRRectBlurEffect.fp
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in float sigma;
+layout(ctype=SkRect) in float4 rect;
+in uniform half cornerRadius;
+in uniform sampler2D ninePatchSampler;
+layout(ctype=SkRect) uniform float4 proxyRect;
+uniform half blurRadius;
+
+@header {
+ #include "include/effects/SkBlurMaskFilter.h"
+ #include "include/gpu/GrContext.h"
+ #include "include/private/GrRecordingContext.h"
+ #include "src/core/SkBlurPriv.h"
+ #include "src/core/SkGpuBlurUtils.h"
+ #include "src/core/SkRRectPriv.h"
+ #include "src/gpu/GrCaps.h"
+ #include "src/gpu/GrClip.h"
+ #include "src/gpu/GrPaint.h"
+ #include "src/gpu/GrProxyProvider.h"
+ #include "src/gpu/GrRecordingContextPriv.h"
+ #include "src/gpu/GrRenderTargetContext.h"
+ #include "src/gpu/GrStyle.h"
+}
+
+@class {
+ static sk_sp<GrTextureProxy> find_or_create_rrect_blur_mask(GrRecordingContext* context,
+ const SkRRect& rrectToDraw,
+ const SkISize& size,
+ float xformedSigma) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 9, "RoundRect Blur Mask");
+ builder[0] = SkScalarCeilToInt(xformedSigma-1/6.0f);
+
+ int index = 1;
+ for (auto c : { SkRRect::kUpperLeft_Corner, SkRRect::kUpperRight_Corner,
+ SkRRect::kLowerRight_Corner, SkRRect::kLowerLeft_Corner }) {
+ SkASSERT(SkScalarIsInt(rrectToDraw.radii(c).fX) &&
+ SkScalarIsInt(rrectToDraw.radii(c).fY));
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fX);
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fY);
+ }
+ builder.finish();
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ sk_sp<GrTextureProxy> mask(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kBottomLeft_GrSurfaceOrigin));
+ if (!mask) {
+ // TODO: this could be SkBackingFit::kApprox, but:
+ // 1) The texture coords would need to be updated.
+ // 2) We would have to use GrTextureDomain::kClamp_Mode for the GaussianBlur.
+ auto rtc = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, size.fWidth, size.fHeight, GrColorType::kAlpha_8,
+ nullptr);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+
+ rtc->clear(nullptr, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+ rtc->drawRRect(GrNoClip(), std::move(paint), GrAA::kYes, SkMatrix::I(), rrectToDraw,
+ GrStyle::SimpleFill());
+
+ sk_sp<GrTextureProxy> srcProxy(rtc->asTextureProxyRef());
+ if (!srcProxy) {
+ return nullptr;
+ }
+ auto rtc2 =
+ SkGpuBlurUtils::GaussianBlur(context,
+ std::move(srcProxy),
+ rtc->colorInfo().colorType(),
+ rtc->colorInfo().alphaType(),
+ SkIPoint::Make(0, 0),
+ nullptr,
+ SkIRect::MakeWH(size.fWidth, size.fHeight),
+ SkIRect::EmptyIRect(),
+ xformedSigma,
+ xformedSigma,
+ GrTextureDomain::kIgnore_Mode,
+ SkBackingFit::kExact);
+ if (!rtc2) {
+ return nullptr;
+ }
+
+ mask = rtc2->asTextureProxyRef();
+ if (!mask) {
+ return nullptr;
+ }
+ SkASSERT(mask->origin() == kBottomLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, mask.get());
+ }
+
+ return mask;
+ }
+}
+
+@optimizationFlags {
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(GrRecordingContext* context,
+ float sigma,
+ float xformedSigma,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect);
+}
+
+@cpp {
+ std::unique_ptr<GrFragmentProcessor> GrRRectBlurEffect::Make(GrRecordingContext* context,
+ float sigma,
+ float xformedSigma,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect) {
+ SkASSERT(!SkRRectPriv::IsCircle(devRRect) && !devRRect.isRect()); // Should've been caught up-stream
+
+ // TODO: loosen this up
+ if (!SkRRectPriv::IsSimpleCircular(devRRect)) {
+ return nullptr;
+ }
+
+ // Make sure we can successfully ninepatch this rrect -- the blur sigma has to be
+ // sufficiently small relative to both the size of the corner radius and the
+ // width (and height) of the rrect.
+ SkRRect rrectToDraw;
+ SkISize size;
+ SkScalar ignored[kSkBlurRRectMaxDivisions];
+ int ignoredSize;
+ uint32_t ignored32;
+
+ bool ninePatchable = SkComputeBlurredRRectParams(srcRRect, devRRect,
+ SkRect::MakeEmpty(),
+ sigma, xformedSigma,
+ &rrectToDraw, &size,
+ ignored, ignored,
+ ignored, ignored,
+ &ignoredSize, &ignoredSize,
+ &ignored32);
+ if (!ninePatchable) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> mask(find_or_create_rrect_blur_mask(context, rrectToDraw,
+ size, xformedSigma));
+ if (!mask) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrRRectBlurEffect(xformedSigma, devRRect.getBounds(),
+ SkRRectPriv::GetSimpleRadii(devRRect).fX, std::move(mask)));
+ }
+}
+
+@test(d) {
+ SkScalar w = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar r = d->fRandom->nextRangeF(1.f, 9.f);
+ SkScalar sigma = d->fRandom->nextRangeF(1.f,10.f);
+ SkRRect rrect;
+ rrect.setRectXY(SkRect::MakeWH(w, h), r, r);
+ return GrRRectBlurEffect::Make(d->context(), sigma, sigma, rrect, rrect);
+}
+
+void main() {
+ // warp the fragment position to the appropriate part of the 9patch blur texture
+
+ half2 rectCenter = half2((proxyRect.xy + proxyRect.zw) / 2.0);
+ half2 translatedFragPos = half2(sk_FragCoord.xy - proxyRect.xy);
+ half threshold = cornerRadius + 2.0 * blurRadius;
+ half2 middle = half2(proxyRect.zw - proxyRect.xy - 2.0 * threshold);
+
+ if (translatedFragPos.x >= threshold && translatedFragPos.x < (middle.x + threshold)) {
+ translatedFragPos.x = threshold;
+ } else if (translatedFragPos.x >= (middle.x + threshold)) {
+ translatedFragPos.x -= middle.x - 1.0;
+ }
+
+ if (translatedFragPos.y > threshold && translatedFragPos.y < (middle.y+threshold)) {
+ translatedFragPos.y = threshold;
+ } else if (translatedFragPos.y >= (middle.y + threshold)) {
+ translatedFragPos.y -= middle.y - 1.0;
+ }
+
+ half2 proxyDims = half2(2.0 * threshold + 1.0);
+ half2 texCoord = translatedFragPos / proxyDims;
+
+ sk_OutColor = sk_InColor * sample(ninePatchSampler, texCoord);
+}
+
+@setData(pdman) {
+ float blurRadiusValue = 3.f * SkScalarCeilToScalar(sigma - 1 / 6.0f);
+ pdman.set1f(blurRadius, blurRadiusValue);
+
+ SkRect outset = rect;
+ outset.outset(blurRadiusValue, blurRadiusValue);
+ pdman.set4f(proxyRect, outset.fLeft, outset.fTop, outset.fRight, outset.fBottom);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp
new file mode 100644
index 0000000000..9c9dc31320
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp
@@ -0,0 +1,769 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrRRectEffect.h"
+
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkTLazy.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/effects/GrConvexPolyEffect.h"
+#include "src/gpu/effects/GrOvalEffect.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+// The effects defined here only handle rrect radii >= kRadiusMin.
+static const SkScalar kRadiusMin = SK_ScalarHalf;
+
+//////////////////////////////////////////////////////////////////////////////
+
+class CircularRRectEffect : public GrFragmentProcessor {
+public:
+
+ enum CornerFlags {
+ kTopLeft_CornerFlag = (1 << SkRRect::kUpperLeft_Corner),
+ kTopRight_CornerFlag = (1 << SkRRect::kUpperRight_Corner),
+ kBottomRight_CornerFlag = (1 << SkRRect::kLowerRight_Corner),
+ kBottomLeft_CornerFlag = (1 << SkRRect::kLowerLeft_Corner),
+
+ kLeft_CornerFlags = kTopLeft_CornerFlag | kBottomLeft_CornerFlag,
+ kTop_CornerFlags = kTopLeft_CornerFlag | kTopRight_CornerFlag,
+ kRight_CornerFlags = kTopRight_CornerFlag | kBottomRight_CornerFlag,
+ kBottom_CornerFlags = kBottomLeft_CornerFlag | kBottomRight_CornerFlag,
+
+ kAll_CornerFlags = kTopLeft_CornerFlag | kTopRight_CornerFlag |
+ kBottomLeft_CornerFlag | kBottomRight_CornerFlag,
+
+ kNone_CornerFlags = 0
+ };
+
+ // The flags are used to indicate which corners are circluar (unflagged corners are assumed to
+ // be square).
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType,
+ uint32_t circularCornerFlags, const SkRRect&);
+
+ ~CircularRRectEffect() override {}
+
+ const char* name() const override { return "CircularRRect"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ const SkRRect& getRRect() const { return fRRect; }
+
+ uint32_t getCircularCornerFlags() const { return fCircularCornerFlags; }
+
+ GrClipEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ CircularRRectEffect(GrClipEdgeType, uint32_t circularCornerFlags, const SkRRect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ SkRRect fRRect;
+ GrClipEdgeType fEdgeType;
+ uint32_t fCircularCornerFlags;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+std::unique_ptr<GrFragmentProcessor> CircularRRectEffect::Make(GrClipEdgeType edgeType,
+ uint32_t circularCornerFlags,
+ const SkRRect& rrect) {
+ if (GrClipEdgeType::kFillAA != edgeType && GrClipEdgeType::kInverseFillAA != edgeType) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(
+ new CircularRRectEffect(edgeType, circularCornerFlags, rrect));
+}
+
+CircularRRectEffect::CircularRRectEffect(GrClipEdgeType edgeType, uint32_t circularCornerFlags,
+ const SkRRect& rrect)
+ : INHERITED(kCircularRRectEffect_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fRRect(rrect)
+ , fEdgeType(edgeType)
+ , fCircularCornerFlags(circularCornerFlags) {
+}
+
+std::unique_ptr<GrFragmentProcessor> CircularRRectEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new CircularRRectEffect(fEdgeType, fCircularCornerFlags, fRRect));
+}
+
+bool CircularRRectEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const CircularRRectEffect& crre = other.cast<CircularRRectEffect>();
+ // The corner flags are derived from fRRect, so no need to check them.
+ return fEdgeType == crre.fEdgeType && fRRect == crre.fRRect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(CircularRRectEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> CircularRRectEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar r = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ SkRRect rrect;
+ rrect.setRectXY(SkRect::MakeWH(w, h), r, r);
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ GrClipEdgeType et =
+ (GrClipEdgeType)d->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ fp = GrRRectEffect::Make(et, rrect, *d->caps()->shaderCaps());
+ } while (nullptr == fp);
+ return fp;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLCircularRRectEffect : public GrGLSLFragmentProcessor {
+public:
+ GLCircularRRectEffect() = default;
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fInnerRectUniform;
+ GrGLSLProgramDataManager::UniformHandle fRadiusPlusHalfUniform;
+ SkRRect fPrevRRect;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLCircularRRectEffect::emitCode(EmitArgs& args) {
+ const CircularRRectEffect& crre = args.fFp.cast<CircularRRectEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char *rectName;
+ const char *radiusPlusHalfName;
+ // The inner rect is the rrect bounds inset by the radius. Its left, top, right, and bottom
+ // edges correspond to components x, y, z, and w, respectively. When a side of the rrect has
+ // only rectangular corners, that side's value corresponds to the rect edge's value outset by
+ // half a pixel.
+ fInnerRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "innerRect", &rectName);
+ // x is (r + .5) and y is 1/(r + .5)
+ fRadiusPlusHalfUniform = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "radiusPlusHalf", &radiusPlusHalfName);
+
+ // If we're on a device where float != fp32 then the length calculation could overflow.
+ SkString clampedCircleDistance;
+ if (!args.fShaderCaps->floatIs32Bits()) {
+ clampedCircleDistance.printf("saturate(%s.x * (1.0 - length(dxy * %s.y)))",
+ radiusPlusHalfName, radiusPlusHalfName);
+ } else {
+ clampedCircleDistance.printf("saturate(%s.x - length(dxy))", radiusPlusHalfName);
+ }
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // At each quarter-circle corner we compute a vector that is the offset of the fragment position
+ // from the circle center. The vector is pinned in x and y to be in the quarter-plane relevant
+ // to that corner. This means that points near the interior near the rrect top edge will have
+ // a vector that points straight up for both the TL left and TR corners. Computing an
+ // alpha from this vector at either the TR or TL corner will give the correct result. Similarly,
+ // fragments near the other three edges will get the correct AA. Fragments in the interior of
+ // the rrect will have a (0,0) vector at all four corners. So long as the radius > 0.5 they will
+ // correctly produce an alpha value of 1 at all four corners. We take the min of all the alphas.
+ // The code below is a simplified version of the above that performs maxs on the vector
+ // components before computing distances and alpha values so that only one distance computation
+ // need be computed to determine the min alpha.
+ //
+ // For the cases where one half of the rrect is rectangular we drop one of the x or y
+ // computations, compute a separate rect edge alpha for the rect side, and mul the two computed
+ // alphas together.
+ switch (crre.getCircularCornerFlags()) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ fragBuilder->codeAppendf("float2 dxy0 = %s.xy - sk_FragCoord.xy;", rectName);
+ fragBuilder->codeAppendf("float2 dxy1 = sk_FragCoord.xy - %s.zw;", rectName);
+ fragBuilder->codeAppend("float2 dxy = max(max(dxy0, dxy1), 0.0);");
+ fragBuilder->codeAppendf("half alpha = half(%s);", clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ fragBuilder->codeAppendf("float2 dxy = max(%s.xy - sk_FragCoord.xy, 0.0);",
+ rectName);
+ fragBuilder->codeAppendf("half rightAlpha = half(saturate(%s.z - sk_FragCoord.x));",
+ rectName);
+ fragBuilder->codeAppendf("half bottomAlpha = half(saturate(%s.w - sk_FragCoord.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = bottomAlpha * rightAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ fragBuilder->codeAppendf("float2 dxy = max(float2(sk_FragCoord.x - %s.z, "
+ "%s.y - sk_FragCoord.y), 0.0);",
+ rectName, rectName);
+ fragBuilder->codeAppendf("half leftAlpha = half(saturate(sk_FragCoord.x - %s.x));",
+ rectName);
+ fragBuilder->codeAppendf("half bottomAlpha = half(saturate(%s.w - sk_FragCoord.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = bottomAlpha * leftAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ fragBuilder->codeAppendf("float2 dxy = max(sk_FragCoord.xy - %s.zw, 0.0);",
+ rectName);
+ fragBuilder->codeAppendf("half leftAlpha = half(saturate(sk_FragCoord.x - %s.x));",
+ rectName);
+ fragBuilder->codeAppendf("half topAlpha = half(saturate(sk_FragCoord.y - %s.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = topAlpha * leftAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ fragBuilder->codeAppendf("float2 dxy = max(float2(%s.x - sk_FragCoord.x, "
+ "sk_FragCoord.y - %s.w), 0.0);",
+ rectName, rectName);
+ fragBuilder->codeAppendf("half rightAlpha = half(saturate(%s.z - sk_FragCoord.x));",
+ rectName);
+ fragBuilder->codeAppendf("half topAlpha = half(saturate(sk_FragCoord.y - %s.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = topAlpha * rightAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kLeft_CornerFlags:
+ fragBuilder->codeAppendf("float2 dxy0 = %s.xy - sk_FragCoord.xy;", rectName);
+ fragBuilder->codeAppendf("float dy1 = sk_FragCoord.y - %s.w;", rectName);
+ fragBuilder->codeAppend("float2 dxy = max(float2(dxy0.x, max(dxy0.y, dy1)), 0.0);");
+ fragBuilder->codeAppendf("half rightAlpha = half(saturate(%s.z - sk_FragCoord.x));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = rightAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTop_CornerFlags:
+ fragBuilder->codeAppendf("float2 dxy0 = %s.xy - sk_FragCoord.xy;", rectName);
+ fragBuilder->codeAppendf("float dx1 = sk_FragCoord.x - %s.z;", rectName);
+ fragBuilder->codeAppend("float2 dxy = max(float2(max(dxy0.x, dx1), dxy0.y), 0.0);");
+ fragBuilder->codeAppendf("half bottomAlpha = half(saturate(%s.w - sk_FragCoord.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = bottomAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kRight_CornerFlags:
+ fragBuilder->codeAppendf("float dy0 = %s.y - sk_FragCoord.y;", rectName);
+ fragBuilder->codeAppendf("float2 dxy1 = sk_FragCoord.xy - %s.zw;", rectName);
+ fragBuilder->codeAppend("float2 dxy = max(float2(dxy1.x, max(dy0, dxy1.y)), 0.0);");
+ fragBuilder->codeAppendf("half leftAlpha = half(saturate(sk_FragCoord.x - %s.x));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = leftAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottom_CornerFlags:
+ fragBuilder->codeAppendf("float dx0 = %s.x - sk_FragCoord.x;", rectName);
+ fragBuilder->codeAppendf("float2 dxy1 = sk_FragCoord.xy - %s.zw;", rectName);
+ fragBuilder->codeAppend("float2 dxy = max(float2(max(dx0, dxy1.x), dxy1.y), 0.0);");
+ fragBuilder->codeAppendf("half topAlpha = half(saturate(sk_FragCoord.y - %s.y));",
+ rectName);
+ fragBuilder->codeAppendf("half alpha = topAlpha * half(%s);",
+ clampedCircleDistance.c_str());
+ break;
+ }
+
+ if (GrClipEdgeType::kInverseFillAA == crre.getEdgeType()) {
+ fragBuilder->codeAppend("alpha = 1.0 - alpha;");
+ }
+
+ fragBuilder->codeAppendf("%s = %s * alpha;", args.fOutputColor, args.fInputColor);
+}
+
+void GLCircularRRectEffect::GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const CircularRRectEffect& crre = processor.cast<CircularRRectEffect>();
+ GR_STATIC_ASSERT(kGrClipEdgeTypeCnt <= 8);
+ b->add32((crre.getCircularCornerFlags() << 3) | (int) crre.getEdgeType());
+}
+
+void GLCircularRRectEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ const CircularRRectEffect& crre = processor.cast<CircularRRectEffect>();
+ const SkRRect& rrect = crre.getRRect();
+ if (rrect != fPrevRRect) {
+ SkRect rect = rrect.getBounds();
+ SkScalar radius = 0;
+ switch (crre.getCircularCornerFlags()) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ SkASSERT(SkRRectPriv::IsSimpleCircular(rrect));
+ radius = SkRRectPriv::GetSimpleRadii(rrect).fX;
+ SkASSERT(radius >= kRadiusMin);
+ rect.inset(radius, radius);
+ break;
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight += 0.5f;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ radius = rrect.radii(SkRRect::kUpperRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ radius = rrect.radii(SkRRect::kLowerRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop -= 0.5f;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ radius = rrect.radii(SkRRect::kLowerLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop -= 0.5f;
+ rect.fRight += 0.5f;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kLeft_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight += 0.5f;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kTop_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kRight_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kBottom_CornerFlags:
+ radius = rrect.radii(SkRRect::kLowerLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop -= 0.5f;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ default:
+ SK_ABORT("Should have been one of the above cases.");
+ }
+ pdman.set4f(fInnerRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ radius += 0.5f;
+ pdman.set2f(fRadiusPlusHalfUniform, radius, 1.f / radius);
+ fPrevRRect = rrect;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void CircularRRectEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLCircularRRectEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* CircularRRectEffect::onCreateGLSLInstance() const {
+ return new GLCircularRRectEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class EllipticalRRectEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType, const SkRRect&);
+
+ ~EllipticalRRectEffect() override {}
+
+ const char* name() const override { return "EllipticalRRect"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ const SkRRect& getRRect() const { return fRRect; }
+
+ GrClipEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ EllipticalRRectEffect(GrClipEdgeType, const SkRRect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ SkRRect fRRect;
+ GrClipEdgeType fEdgeType;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+std::unique_ptr<GrFragmentProcessor> EllipticalRRectEffect::Make(GrClipEdgeType edgeType,
+ const SkRRect& rrect) {
+ if (GrClipEdgeType::kFillAA != edgeType && GrClipEdgeType::kInverseFillAA != edgeType) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new EllipticalRRectEffect(edgeType, rrect));
+}
+
+EllipticalRRectEffect::EllipticalRRectEffect(GrClipEdgeType edgeType, const SkRRect& rrect)
+ : INHERITED(kEllipticalRRectEffect_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fRRect(rrect)
+ , fEdgeType(edgeType) {
+}
+
+std::unique_ptr<GrFragmentProcessor> EllipticalRRectEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new EllipticalRRectEffect(fEdgeType, fRRect));
+}
+
+bool EllipticalRRectEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const EllipticalRRectEffect& erre = other.cast<EllipticalRRectEffect>();
+ return fEdgeType == erre.fEdgeType && fRRect == erre.fRRect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(EllipticalRRectEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> EllipticalRRectEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkVector r[4];
+ r[SkRRect::kUpperLeft_Corner].fX = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ // ensure at least one corner really is elliptical
+ do {
+ r[SkRRect::kUpperLeft_Corner].fY = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ } while (r[SkRRect::kUpperLeft_Corner].fY == r[SkRRect::kUpperLeft_Corner].fX);
+
+ SkRRect rrect;
+ if (d->fRandom->nextBool()) {
+ // half the time create a four-radii rrect.
+ r[SkRRect::kLowerRight_Corner].fX = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ r[SkRRect::kLowerRight_Corner].fY = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+
+ r[SkRRect::kUpperRight_Corner].fX = r[SkRRect::kLowerRight_Corner].fX;
+ r[SkRRect::kUpperRight_Corner].fY = r[SkRRect::kUpperLeft_Corner].fY;
+
+ r[SkRRect::kLowerLeft_Corner].fX = r[SkRRect::kUpperLeft_Corner].fX;
+ r[SkRRect::kLowerLeft_Corner].fY = r[SkRRect::kLowerRight_Corner].fY;
+
+ rrect.setRectRadii(SkRect::MakeWH(w, h), r);
+ } else {
+ rrect.setRectXY(SkRect::MakeWH(w, h), r[SkRRect::kUpperLeft_Corner].fX,
+ r[SkRRect::kUpperLeft_Corner].fY);
+ }
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ GrClipEdgeType et = (GrClipEdgeType)d->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ fp = GrRRectEffect::Make(et, rrect, *d->caps()->shaderCaps());
+ } while (nullptr == fp);
+ return fp;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLEllipticalRRectEffect : public GrGLSLFragmentProcessor {
+public:
+ GLEllipticalRRectEffect() = default;
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fInnerRectUniform;
+ GrGLSLProgramDataManager::UniformHandle fInvRadiiSqdUniform;
+ GrGLSLProgramDataManager::UniformHandle fScaleUniform;
+ SkRRect fPrevRRect;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLEllipticalRRectEffect::emitCode(EmitArgs& args) {
+ const EllipticalRRectEffect& erre = args.fFp.cast<EllipticalRRectEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char *rectName;
+ // The inner rect is the rrect bounds inset by the x/y radii
+ fInnerRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "innerRect", &rectName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // At each quarter-ellipse corner we compute a vector that is the offset of the fragment pos
+ // to the ellipse center. The vector is pinned in x and y to be in the quarter-plane relevant
+ // to that corner. This means that points near the interior near the rrect top edge will have
+ // a vector that points straight up for both the TL left and TR corners. Computing an
+ // alpha from this vector at either the TR or TL corner will give the correct result. Similarly,
+ // fragments near the other three edges will get the correct AA. Fragments in the interior of
+ // the rrect will have a (0,0) vector at all four corners. So long as the radii > 0.5 they will
+ // correctly produce an alpha value of 1 at all four corners. We take the min of all the alphas.
+ //
+ // The code below is a simplified version of the above that performs maxs on the vector
+ // components before computing distances and alpha values so that only one distance computation
+ // need be computed to determine the min alpha.
+ fragBuilder->codeAppendf("float2 dxy0 = %s.xy - sk_FragCoord.xy;", rectName);
+ fragBuilder->codeAppendf("float2 dxy1 = sk_FragCoord.xy - %s.zw;", rectName);
+
+ // If we're on a device where float != fp32 then we'll do the distance computation in a space
+ // that is normalized by the largest radius. The scale uniform will be scale, 1/scale. The
+ // radii uniform values are already in this normalized space.
+ const char* scaleName = nullptr;
+ if (!args.fShaderCaps->floatIs32Bits()) {
+ fScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType, "scale",
+ &scaleName);
+ }
+
+ // The uniforms with the inv squared radii are highp to prevent underflow.
+ switch (erre.getRRect().getType()) {
+ case SkRRect::kSimple_Type: {
+ const char *invRadiiXYSqdName;
+ fInvRadiiSqdUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat2_GrSLType,
+ "invRadiiXY",
+ &invRadiiXYSqdName);
+ fragBuilder->codeAppend("float2 dxy = max(max(dxy0, dxy1), 0.0);");
+ if (scaleName) {
+ fragBuilder->codeAppendf("dxy *= %s.y;", scaleName);
+ }
+ // Z is the x/y offsets divided by squared radii.
+ fragBuilder->codeAppendf("float2 Z = dxy * %s.xy;", invRadiiXYSqdName);
+ break;
+ }
+ case SkRRect::kNinePatch_Type: {
+ const char *invRadiiLTRBSqdName;
+ fInvRadiiSqdUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType,
+ "invRadiiLTRB",
+ &invRadiiLTRBSqdName);
+ if (scaleName) {
+ fragBuilder->codeAppendf("dxy0 *= %s.y;", scaleName);
+ fragBuilder->codeAppendf("dxy1 *= %s.y;", scaleName);
+ }
+ fragBuilder->codeAppend("float2 dxy = max(max(dxy0, dxy1), 0.0);");
+ // Z is the x/y offsets divided by squared radii. We only care about the (at most) one
+ // corner where both the x and y offsets are positive, hence the maxes. (The inverse
+ // squared radii will always be positive.)
+ fragBuilder->codeAppendf("float2 Z = max(max(dxy0 * %s.xy, dxy1 * %s.zw), 0.0);",
+ invRadiiLTRBSqdName, invRadiiLTRBSqdName);
+
+ break;
+ }
+ default:
+ SK_ABORT("RRect should always be simple or nine-patch.");
+ }
+ // implicit is the evaluation of (x/a)^2 + (y/b)^2 - 1.
+ fragBuilder->codeAppend("half implicit = half(dot(Z, dxy) - 1.0);");
+ // grad_dot is the squared length of the gradient of the implicit.
+ fragBuilder->codeAppend("half grad_dot = half(4.0 * dot(Z, Z));");
+ // avoid calling inversesqrt on zero.
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.0e-4);");
+ fragBuilder->codeAppend("half approx_dist = implicit * half(inversesqrt(grad_dot));");
+ if (scaleName) {
+ fragBuilder->codeAppendf("approx_dist *= %s.x;", scaleName);
+ }
+
+ if (GrClipEdgeType::kFillAA == erre.getEdgeType()) {
+ fragBuilder->codeAppend("half alpha = clamp(0.5 - approx_dist, 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("half alpha = clamp(0.5 + approx_dist, 0.0, 1.0);");
+ }
+
+ fragBuilder->codeAppendf("%s = %s * alpha;", args.fOutputColor, args.fInputColor);
+}
+
+void GLEllipticalRRectEffect::GenKey(const GrProcessor& effect, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const EllipticalRRectEffect& erre = effect.cast<EllipticalRRectEffect>();
+ GR_STATIC_ASSERT((int) GrClipEdgeType::kLast < (1 << 3));
+ b->add32(erre.getRRect().getType() | (int) erre.getEdgeType() << 3);
+}
+
+void GLEllipticalRRectEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& effect) {
+ const EllipticalRRectEffect& erre = effect.cast<EllipticalRRectEffect>();
+ const SkRRect& rrect = erre.getRRect();
+ // If we're using a scale factor to work around precision issues, choose the largest radius
+ // as the scale factor. The inv radii need to be pre-adjusted by the scale factor.
+ if (rrect != fPrevRRect) {
+ SkRect rect = rrect.getBounds();
+ const SkVector& r0 = rrect.radii(SkRRect::kUpperLeft_Corner);
+ SkASSERT(r0.fX >= kRadiusMin);
+ SkASSERT(r0.fY >= kRadiusMin);
+ switch (erre.getRRect().getType()) {
+ case SkRRect::kSimple_Type:
+ rect.inset(r0.fX, r0.fY);
+ if (fScaleUniform.isValid()) {
+ if (r0.fX > r0.fY) {
+ pdman.set2f(fInvRadiiSqdUniform, 1.f, (r0.fX * r0.fX) / (r0.fY * r0.fY));
+ pdman.set2f(fScaleUniform, r0.fX, 1.f / r0.fX);
+ } else {
+ pdman.set2f(fInvRadiiSqdUniform, (r0.fY * r0.fY) / (r0.fX * r0.fX), 1.f);
+ pdman.set2f(fScaleUniform, r0.fY, 1.f / r0.fY);
+ }
+ } else {
+ pdman.set2f(fInvRadiiSqdUniform, 1.f / (r0.fX * r0.fX),
+ 1.f / (r0.fY * r0.fY));
+ }
+ break;
+ case SkRRect::kNinePatch_Type: {
+ const SkVector& r1 = rrect.radii(SkRRect::kLowerRight_Corner);
+ SkASSERT(r1.fX >= kRadiusMin);
+ SkASSERT(r1.fY >= kRadiusMin);
+ rect.fLeft += r0.fX;
+ rect.fTop += r0.fY;
+ rect.fRight -= r1.fX;
+ rect.fBottom -= r1.fY;
+ if (fScaleUniform.isValid()) {
+ float scale = SkTMax(SkTMax(r0.fX, r0.fY), SkTMax(r1.fX, r1.fY));
+ float scaleSqd = scale * scale;
+ pdman.set4f(fInvRadiiSqdUniform, scaleSqd / (r0.fX * r0.fX),
+ scaleSqd / (r0.fY * r0.fY),
+ scaleSqd / (r1.fX * r1.fX),
+ scaleSqd / (r1.fY * r1.fY));
+ pdman.set2f(fScaleUniform, scale, 1.f / scale);
+ } else {
+ pdman.set4f(fInvRadiiSqdUniform, 1.f / (r0.fX * r0.fX),
+ 1.f / (r0.fY * r0.fY),
+ 1.f / (r1.fX * r1.fX),
+ 1.f / (r1.fY * r1.fY));
+ }
+ break;
+ }
+ default:
+ SK_ABORT("RRect should always be simple or nine-patch.");
+ }
+ pdman.set4f(fInnerRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ fPrevRRect = rrect;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void EllipticalRRectEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLEllipticalRRectEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* EllipticalRRectEffect::onCreateGLSLInstance() const {
+ return new GLEllipticalRRectEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> GrRRectEffect::Make(GrClipEdgeType edgeType,
+ const SkRRect& rrect,
+ const GrShaderCaps& caps) {
+ if (rrect.isRect()) {
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ }
+
+ if (rrect.isOval()) {
+ return GrOvalEffect::Make(edgeType, rrect.getBounds(), caps);
+ }
+
+ if (rrect.isSimple()) {
+ if (SkRRectPriv::GetSimpleRadii(rrect).fX < kRadiusMin ||
+ SkRRectPriv::GetSimpleRadii(rrect).fY < kRadiusMin) {
+ // In this case the corners are extremely close to rectangular and we collapse the
+ // clip to a rectangular clip.
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ }
+ if (SkRRectPriv::GetSimpleRadii(rrect).fX == SkRRectPriv::GetSimpleRadii(rrect).fY) {
+ return CircularRRectEffect::Make(edgeType, CircularRRectEffect::kAll_CornerFlags,
+ rrect);
+ } else {
+ return EllipticalRRectEffect::Make(edgeType, rrect);
+ }
+ }
+
+ if (rrect.isComplex() || rrect.isNinePatch()) {
+ // Check for the "tab" cases - two adjacent circular corners and two square corners.
+ SkScalar circularRadius = 0;
+ uint32_t cornerFlags = 0;
+
+ SkVector radii[4];
+ bool squashedRadii = false;
+ for (int c = 0; c < 4; ++c) {
+ radii[c] = rrect.radii((SkRRect::Corner)c);
+ SkASSERT((0 == radii[c].fX) == (0 == radii[c].fY));
+ if (0 == radii[c].fX) {
+ // The corner is square, so no need to squash or flag as circular.
+ continue;
+ }
+ if (radii[c].fX < kRadiusMin || radii[c].fY < kRadiusMin) {
+ radii[c].set(0, 0);
+ squashedRadii = true;
+ continue;
+ }
+ if (radii[c].fX != radii[c].fY) {
+ cornerFlags = ~0U;
+ break;
+ }
+ if (!cornerFlags) {
+ circularRadius = radii[c].fX;
+ cornerFlags = 1 << c;
+ } else {
+ if (radii[c].fX != circularRadius) {
+ cornerFlags = ~0U;
+ break;
+ }
+ cornerFlags |= 1 << c;
+ }
+ }
+
+ switch (cornerFlags) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ // This rrect should have been caught in the simple case above. Though, it would
+ // be correctly handled in the fallthrough code.
+ SkASSERT(false);
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ case CircularRRectEffect::kLeft_CornerFlags:
+ case CircularRRectEffect::kTop_CornerFlags:
+ case CircularRRectEffect::kRight_CornerFlags:
+ case CircularRRectEffect::kBottom_CornerFlags: {
+ SkTCopyOnFirstWrite<SkRRect> rr(rrect);
+ if (squashedRadii) {
+ rr.writable()->setRectRadii(rrect.getBounds(), radii);
+ }
+ return CircularRRectEffect::Make(edgeType, cornerFlags, *rr);
+ }
+ case CircularRRectEffect::kNone_CornerFlags:
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ default: {
+ if (squashedRadii) {
+ // If we got here then we squashed some but not all the radii to zero. (If all
+ // had been squashed cornerFlags would be 0.) The elliptical effect doesn't
+ // support some rounded and some square corners.
+ return nullptr;
+ }
+ if (rrect.isNinePatch()) {
+ return EllipticalRRectEffect::Make(edgeType, rrect);
+ }
+ return nullptr;
+ }
+ }
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h
new file mode 100644
index 0000000000..426cb8f3c2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRRectEffect_DEFINED
+#define GrRRectEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrFragmentProcessor;
+class GrShaderCaps;
+class GrProcessor;
+class SkRRect;
+
+namespace GrRRectEffect {
+
+/**
+ * Creates an effect that performs anti-aliased clipping against a SkRRect. It doesn't support
+ * all varieties of SkRRect so the caller must check for a nullptr return.
+ */
+std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType, const SkRRect&, const GrShaderCaps&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrRectBlurEffect.fp b/gfx/skia/skia/src/gpu/effects/GrRectBlurEffect.fp
new file mode 100644
index 0000000000..175fa658f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRectBlurEffect.fp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+@header {
+#include <cmath>
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrShaderCaps.h"
+}
+
+in float4 rect;
+
+layout(key) bool highp = abs(rect.x) > 16000 || abs(rect.y) > 16000 ||
+ abs(rect.z) > 16000 || abs(rect.w) > 16000;
+
+layout(when= highp) uniform float4 rectF;
+layout(when=!highp) uniform half4 rectH;
+
+// Texture that is a LUT for integral of normal distribution. The value at x (where x is a texture
+// coord between 0 and 1) is the integral from -inf to (3 * sigma * (-2 * x - 1)). I.e. x is mapped
+// 0 3*sigma to -3 sigma. The flip saves a reversal in the shader.
+in uniform sampler2D integral;
+// Used to produce normalized texture coords for lookups in 'integral'
+in uniform half invSixSigma;
+
+// There is a fast variant of the effect that does 2 texture lookups and a more general one for
+// wider blurs relative to rect sizes that does 4.
+layout(key) in bool isFast;
+
+@constructorParams {
+ GrSamplerState samplerParams
+}
+
+@samplerParams(integral) {
+ samplerParams
+}
+@class {
+static sk_sp<GrTextureProxy> CreateIntegralTexture(GrProxyProvider* proxyProvider,
+ float sixSigma) {
+ // The texture we're producing represents the integral of a normal distribution over a six-sigma
+ // range centered at zero. We want enough resolution so that the linear interpolation done in
+ // texture lookup doesn't introduce noticeable artifacts. We conservatively choose to have 2
+ // texels for each dst pixel.
+ int minWidth = 2 * sk_float_ceil2int(sixSigma);
+ // Bin by powers of 2 with a minimum so we get good profile reuse.
+ int width = SkTMax(SkNextPow2(minWidth), 32);
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
+ builder[0] = width;
+ builder.finish();
+
+ sk_sp<GrTextureProxy> proxy(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin));
+ if (!proxy) {
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(SkImageInfo::MakeA8(width, 1))) {
+ return nullptr;
+ }
+ *bitmap.getAddr8(0, 0) = 255;
+ const float invWidth = 1.f / width;
+ for (int i = 1; i < width - 1; ++i) {
+ float x = (i + 0.5f) * invWidth;
+ x = (-6 * x + 3) * SK_ScalarRoot2Over2;
+ float integral = 0.5f * (std::erf(x) + 1.f);
+ *bitmap.getAddr8(i, 0) = SkToU8(sk_float_round2int(255.f * integral));
+ }
+ *bitmap.getAddr8(width - 1, 0) = 0;
+ bitmap.setImmutable();
+ proxy = proxyProvider->createProxyFromBitmap(bitmap, GrMipMapped::kNo);
+ if (!proxy) {
+ return nullptr;
+ }
+ SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, proxy.get());
+ }
+ return proxy;
+}
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(GrProxyProvider* proxyProvider,
+ const GrShaderCaps& caps,
+ const SkRect& rect, float sigma) {
+ SkASSERT(rect.isSorted());
+ if (!caps.floatIs32Bits()) {
+ // We promote the math that gets us into the Gaussian space to full float when the rect
+ // coords are large. If we don't have full float then fail. We could probably clip the
+ // rect to an outset device bounds instead.
+ if (SkScalarAbs(rect.fLeft) > 16000.f || SkScalarAbs(rect.fTop) > 16000.f ||
+ SkScalarAbs(rect.fRight) > 16000.f || SkScalarAbs(rect.fBottom) > 16000.f) {
+ return nullptr;
+ }
+ }
+
+ const float sixSigma = 6 * sigma;
+ auto integral = CreateIntegralTexture(proxyProvider, sixSigma);
+ if (!integral) {
+ return nullptr;
+ }
+
+ // In the fast variant we think of the midpoint of the integral texture as aligning
+ // with the closest rect edge both in x and y. To simplify texture coord calculation we
+ // inset the rect so that the edge of the inset rect corresponds to t = 0 in the texture.
+ // It actually simplifies things a bit in the !isFast case, too.
+ float threeSigma = sixSigma / 2;
+ SkRect insetRect = {rect.fLeft + threeSigma,
+ rect.fTop + threeSigma,
+ rect.fRight - threeSigma,
+ rect.fBottom - threeSigma};
+
+ // In our fast variant we find the nearest horizontal and vertical edges and for each
+ // do a lookup in the integral texture for each and multiply them. When the rect is
+ // less than 6 sigma wide then things aren't so simple and we have to consider both the
+ // left and right edge of the rectangle (and similar in y).
+ bool isFast = insetRect.isSorted();
+ // 1 / (6 * sigma) is the domain of the integral texture. We use the inverse to produce
+ // normalized texture coords from frag coord distances.
+ float invSixSigma = 1.f / sixSigma;
+ return std::unique_ptr<GrFragmentProcessor>(new GrRectBlurEffect(insetRect,
+ std::move(integral), invSixSigma, isFast, GrSamplerState::ClampBilerp()));
+ }
+}
+
+void main() {
+ half xCoverage, yCoverage;
+ @if (isFast) {
+ // Get the smaller of the signed distance from the frag coord to the left and right
+ // edges and similar for y.
+ // The integral texture goes "backwards" (from 3*sigma to -3*sigma), So, the below
+ // computations align the left edge of the integral texture with the inset rect's edge
+ // extending outward 6 * sigma from the inset rect.
+ half x, y;
+ @if (highp) {
+ x = max(half(rectF.x - sk_FragCoord.x), half(sk_FragCoord.x - rectF.z));
+ y = max(half(rectF.y - sk_FragCoord.y), half(sk_FragCoord.y - rectF.w));
+ } else {
+ x = max(half(rectH.x - sk_FragCoord.x), half(sk_FragCoord.x - rectH.z));
+ y = max(half(rectH.y - sk_FragCoord.y), half(sk_FragCoord.y - rectH.w));
+ }
+ xCoverage = sample(integral, half2(x * invSixSigma, 0.5)).a;
+ yCoverage = sample(integral, half2(y * invSixSigma, 0.5)).a;
+ sk_OutColor = sk_InColor * xCoverage * yCoverage;
+ } else {
+ // We just consider just the x direction here. In practice we compute x and y separately
+ // and multiply them together.
+ // We define our coord system so that the point at which we're evaluating a kernel
+ // defined by the normal distribution (K) as 0. In this coord system let L be left
+ // edge and R be the right edge of the rectangle.
+ // We can calculate C by integrating K with the half infinite ranges outside the L to R
+ // range and subtracting from 1:
+ // C = 1 - <integral of K from from -inf to L> - <integral of K from R to inf>
+ // K is symmetric about x=0 so:
+ // C = 1 - <integral of K from from -inf to L> - <integral of K from -inf to -R>
+
+ // The integral texture goes "backwards" (from 3*sigma to -3*sigma) which is factored
+ // in to the below calculations.
+ // Also, our rect uniform was pre-inset by 3 sigma from the actual rect being blurred,
+ // also factored in.
+ half l, r, t, b;
+ @if (highp) {
+ l = half(sk_FragCoord.x - rectF.x);
+ r = half(rectF.z - sk_FragCoord.x);
+ t = half(sk_FragCoord.y - rectF.y);
+ b = half(rectF.w - sk_FragCoord.y);
+ } else {
+ l = half(sk_FragCoord.x - rectH.x);
+ r = half(rectH.z - sk_FragCoord.x);
+ t = half(sk_FragCoord.y - rectH.y);
+ b = half(rectH.w - sk_FragCoord.y);
+ }
+ half il = 1 + l * invSixSigma;
+ half ir = 1 + r * invSixSigma;
+ half it = 1 + t * invSixSigma;
+ half ib = 1 + b * invSixSigma;
+ xCoverage = 1 - sample(integral, half2(il, 0.5)).a
+ - sample(integral, half2(ir, 0.5)).a;
+ yCoverage = 1 - sample(integral, half2(it, 0.5)).a
+ - sample(integral, half2(ib, 0.5)).a;
+ }
+ sk_OutColor = sk_InColor * xCoverage * yCoverage;
+}
+
+@setData(pdman) {
+ float r[] {rect.fLeft, rect.fTop, rect.fRight, rect.fBottom};
+ pdman.set4fv(highp ? rectF : rectH, 1, r);
+}
+
+@optimizationFlags { kCompatibleWithCoverageAsAlpha_OptimizationFlag }
+
+@test(data) {
+ float sigma = data->fRandom->nextRangeF(3,8);
+ float width = data->fRandom->nextRangeF(200,300);
+ float height = data->fRandom->nextRangeF(200,300);
+ return GrRectBlurEffect::Make(data->proxyProvider(), *data->caps()->shaderCaps(),
+ SkRect::MakeWH(width, height), sigma);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.cpp
new file mode 100644
index 0000000000..b7e27a6b1f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrSRGBEffect.h"
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrGLSRGBEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrSRGBEffect& srgbe = args.fFp.cast<GrSRGBEffect>();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ SkString srgbFuncName;
+ const GrShaderVar gSrgbArgs[] = {
+ GrShaderVar("x", kHalf_GrSLType),
+ };
+ switch (srgbe.mode()) {
+ case GrSRGBEffect::Mode::kLinearToSRGB:
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "linear_to_srgb",
+ SK_ARRAY_COUNT(gSrgbArgs),
+ gSrgbArgs,
+ "return (x <= 0.0031308) ? (x * 12.92) "
+ ": (1.055 * pow(x, 0.416666667) - 0.055);",
+ &srgbFuncName);
+ break;
+ case GrSRGBEffect::Mode::kSRGBToLinear:
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "srgb_to_linear",
+ SK_ARRAY_COUNT(gSrgbArgs),
+ gSrgbArgs,
+ "return (x <= 0.04045) ? (x / 12.92) "
+ ": pow((x + 0.055) / 1.055, 2.4);",
+ &srgbFuncName);
+ break;
+ }
+
+ // Mali Bifrost uses fp16 for mediump. Making the intermediate color variable highp causes
+ // calculations to be performed with sufficient precision.
+ fragBuilder->codeAppendf("float4 color = %s;", args.fInputColor);
+ if (srgbe.alpha() == GrSRGBEffect::Alpha::kPremul) {
+ fragBuilder->codeAppendf("float nonZeroAlpha = max(color.a, 0.0001);");
+ fragBuilder->codeAppendf("color = float4(color.rgb / nonZeroAlpha, color.a);");
+ }
+ fragBuilder->codeAppendf("color = float4(%s(half(color.r)), %s(half(color.g)), "
+ "%s(half(color.b)), color.a);",
+ srgbFuncName.c_str(),
+ srgbFuncName.c_str(),
+ srgbFuncName.c_str());
+ if (srgbe.alpha() == GrSRGBEffect::Alpha::kPremul) {
+ fragBuilder->codeAppendf("color = float4(color.rgb, 1) * color.a;");
+ }
+ fragBuilder->codeAppendf("%s = half4(color);", args.fOutputColor);
+ }
+
+ static inline void GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrSRGBEffect& srgbe = processor.cast<GrSRGBEffect>();
+ uint32_t key = static_cast<uint32_t>(srgbe.mode()) |
+ (static_cast<uint32_t>(srgbe.alpha()) << 1);
+ b->add32(key);
+ }
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrSRGBEffect::GrSRGBEffect(Mode mode, Alpha alpha)
+ : INHERITED(kGrSRGBEffect_ClassID, kPreservesOpaqueInput_OptimizationFlag |
+ kConstantOutputForConstantInput_OptimizationFlag)
+ , fMode(mode)
+ , fAlpha(alpha)
+{
+}
+
+std::unique_ptr<GrFragmentProcessor> GrSRGBEffect::clone() const { return Make(fMode, fAlpha); }
+
+bool GrSRGBEffect::onIsEqual(const GrFragmentProcessor& s) const {
+ const GrSRGBEffect& other = s.cast<GrSRGBEffect>();
+ return other.fMode == fMode;
+}
+
+static inline float srgb_to_linear(float srgb) {
+ return (srgb <= 0.04045f) ? srgb / 12.92f : powf((srgb + 0.055f) / 1.055f, 2.4f);
+}
+static inline float linear_to_srgb(float linear) {
+ return (linear <= 0.0031308) ? linear * 12.92f : 1.055f * powf(linear, 1.f / 2.4f) - 0.055f;
+}
+
+SkPMColor4f GrSRGBEffect::constantOutputForConstantInput(const SkPMColor4f& inColor) const {
+ SkColor4f color = inColor.unpremul();
+ switch (fMode) {
+ case Mode::kLinearToSRGB:
+ color = { linear_to_srgb(color.fR), linear_to_srgb(color.fG), linear_to_srgb(color.fB),
+ color.fA };
+ break;
+ case Mode::kSRGBToLinear:
+ color = { srgb_to_linear(color.fR), srgb_to_linear(color.fG), srgb_to_linear(color.fB),
+ color.fA };
+ break;
+ }
+ return color.premul();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSRGBEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrSRGBEffect::TestCreate(GrProcessorTestData* d) {
+ Mode testMode = static_cast<Mode>(d->fRandom->nextRangeU(0, 1));
+ return GrSRGBEffect::Make(testMode, Alpha::kPremul);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrSRGBEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLSRGBEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrSRGBEffect::onCreateGLSLInstance() const {
+ return new GrGLSRGBEffect;
+}
+
diff --git a/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.h b/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.h
new file mode 100644
index 0000000000..2135a50279
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSRGBEffect.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSRGBEffect_DEFINED
+#define GrSRGBEffect_DEFINED
+
+#include "src/gpu/GrFragmentProcessor.h"
+
+class GrSRGBEffect : public GrFragmentProcessor {
+public:
+ enum class Mode {
+ kLinearToSRGB,
+ kSRGBToLinear,
+ };
+
+ enum class Alpha {
+ kPremul,
+ kOpaque,
+ };
+
+ /**
+ * Creates an effect that applies the sRGB transfer function (or its inverse)
+ */
+ static std::unique_ptr<GrFragmentProcessor> Make(Mode mode, Alpha alpha) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSRGBEffect(mode, alpha));
+ }
+
+ const char* name() const override { return "sRGB"; }
+
+ Mode mode() const { return fMode; }
+ Alpha alpha() const { return fAlpha; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ GrSRGBEffect(Mode mode, Alpha);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override;
+
+ Mode fMode;
+ Alpha fAlpha;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrSaturateProcessor.fp b/gfx/skia/skia/src/gpu/effects/GrSaturateProcessor.fp
new file mode 100644
index 0000000000..137205c790
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSaturateProcessor.fp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+@optimizationFlags {
+ kConstantOutputForConstantInput_OptimizationFlag |
+ kPreservesOpaqueInput_OptimizationFlag
+}
+
+void main() { sk_OutColor = saturate(sk_InColor); }
+
+@class {
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return {SkTPin(input.fR, 0.f, 1.f),
+ SkTPin(input.fG, 0.f, 1.f),
+ SkTPin(input.fB, 0.f, 1.f),
+ SkTPin(input.fA, 0.f, 1.f)};
+ }
+}
+
+@test(d) { return GrSaturateProcessor::Make(); }
diff --git a/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.cpp b/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.cpp
new file mode 100644
index 0000000000..5af096c63d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrShadowGeoProc.h"
+
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+class GrGLSLRRectShadowGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLSLRRectShadowGeoProc() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const GrRRectShadowGeoProc& rsgp = args.fGP.cast<GrRRectShadowGeoProc>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // emit attributes
+ varyingHandler->emitAttributes(rsgp);
+ fragBuilder->codeAppend("half3 shadowParams;");
+ varyingHandler->addPassThroughAttribute(rsgp.inShadowParams(), "shadowParams");
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(rsgp.inColor(), args.fOutputColor);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, rsgp.inPosition().name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ rsgp.inPosition().asShaderVar(),
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppend("half d = length(shadowParams.xy);");
+ fragBuilder->codeAppend("half distance = shadowParams.z * (1.0 - d);");
+
+ fragBuilder->codeAppend("half factor = 1.0 - clamp(distance, 0.0, 1.0);");
+ fragBuilder->codeAppend("factor = exp(-factor * factor * 4.0) - 0.018;");
+ fragBuilder->codeAppendf("%s = half4(factor);",
+ args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRRectShadowGeoProc::GrRRectShadowGeoProc() : INHERITED(kGrRRectShadowGeoProc_ClassID) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = {"inColor", kUByte4_norm_GrVertexAttribType, kHalf4_GrSLType};
+ fInShadowParams = {"inShadowParams", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+}
+
+GrGLSLPrimitiveProcessor* GrRRectShadowGeoProc::createGLSLInstance(const GrShaderCaps&) const {
+ return new GrGLSLRRectShadowGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrRRectShadowGeoProc);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> GrRRectShadowGeoProc::TestCreate(GrProcessorTestData* d) {
+ return GrRRectShadowGeoProc::Make();
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.h b/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.h
new file mode 100644
index 0000000000..b821a6a505
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrShadowGeoProc.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShadowGeoProc_DEFINED
+#define GrShadowGeoProc_DEFINED
+
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessor.h"
+
+class GrGLRRectShadowGeoProc;
+
+/**
+ * The output color of this effect is a coverage mask for a rrect shadow,
+ * assuming circular corner geometry.
+ */
+class GrRRectShadowGeoProc : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make() {
+ return sk_sp<GrGeometryProcessor>(new GrRRectShadowGeoProc());
+ }
+
+ const char* name() const override { return "RRectShadow"; }
+
+ const Attribute& inPosition() const { return fInPosition; }
+ const Attribute& inColor() const { return fInColor; }
+ const Attribute& inShadowParams() const { return fInShadowParams; }
+ GrColor color() const { return fColor; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {}
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ GrRRectShadowGeoProc();
+
+ GrColor fColor;
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInShadowParams;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.fp b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.fp
new file mode 100644
index 0000000000..5c30cc070f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.fp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in uniform sampler2D image;
+in half4x4 matrix;
+
+@constructorParams {
+ GrColorType srcColorType,
+ GrSamplerState samplerParams
+}
+
+@coordTransform(image) {
+ matrix
+}
+
+@samplerParams(image) {
+ samplerParams
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(std::move(proxy), matrix, srcColorType,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, GrSamplerState::Filter::kNearest)));
+ }
+
+ /* clamp mode */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ GrSamplerState::Filter filter) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(std::move(proxy), matrix, srcColorType,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, filter)));
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const GrSamplerState& p) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(std::move(proxy), matrix, srcColorType, p));
+ }
+}
+
+@optimizationFlags {
+ ModulateForSamplerOptFlags(srcColorType,
+ samplerParams.wrapModeX() == GrSamplerState::WrapMode::kClampToBorder ||
+ samplerParams.wrapModeY() == GrSamplerState::WrapMode::kClampToBorder)
+}
+
+void main() {
+ sk_OutColor = sk_InColor * sample(image, sk_TransformedCoords2D[0]);
+}
+
+@test(testData) {
+ int texIdx = testData->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(testData->fRandom, wrapModes);
+ if (!testData->caps()->npotTextureTileSupport()) {
+ // Performing repeat sampling on npot textures will cause asserts on HW
+ // that lacks support.
+ wrapModes[0] = GrSamplerState::WrapMode::kClamp;
+ wrapModes[1] = GrSamplerState::WrapMode::kClamp;
+ }
+
+ GrSamplerState params(wrapModes, testData->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+
+ const SkMatrix& matrix = GrTest::TestMatrix(testData->fRandom);
+ return GrSimpleTextureEffect::Make(testData->textureProxy(texIdx),
+ testData->textureProxyColorType(texIdx), matrix, params);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrSkSLFP.cpp b/gfx/skia/skia/src/gpu/effects/GrSkSLFP.cpp
new file mode 100644
index 0000000000..9269ebf668
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSkSLFP.cpp
@@ -0,0 +1,573 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrSkSLFP.h"
+
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrContext_Base.h"
+#include "src/gpu/GrBaseContextPriv.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+GrSkSLFPFactory::GrSkSLFPFactory(const char* name, const GrShaderCaps* shaderCaps, const char* sksl,
+ SkSL::Program::Kind kind)
+ : fKind(kind)
+ , fName(name) {
+ SkSL::Program::Settings settings;
+ settings.fCaps = shaderCaps;
+ fBaseProgram = fCompiler.convertProgram(fKind, SkSL::String(sksl), settings);
+ if (fCompiler.errorCount()) {
+ SkDebugf("%s\n", fCompiler.errorText().c_str());
+ }
+ SkASSERT(fBaseProgram);
+ SkASSERT(!fCompiler.errorCount());
+ for (const auto& e : *fBaseProgram) {
+ if (e.fKind == SkSL::ProgramElement::kVar_Kind) {
+ SkSL::VarDeclarations& v = (SkSL::VarDeclarations&) e;
+ for (const auto& varStatement : v.fVars) {
+ const SkSL::Variable& var = *((SkSL::VarDeclaration&) *varStatement).fVar;
+ if ((var.fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) ||
+ (var.fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag)) {
+ fInAndUniformVars.push_back(&var);
+ }
+ // "in uniform" doesn't make sense outside of .fp files
+ SkASSERT((var.fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) == 0 ||
+ (var.fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag) == 0);
+ // "layout(key)" doesn't make sense outside of .fp files; all 'in' variables are
+ // part of the key
+ SkASSERT(!var.fModifiers.fLayout.fKey);
+ }
+ }
+ }
+}
+
+const SkSL::Program* GrSkSLFPFactory::getSpecialization(const SkSL::String& key, const void* inputs,
+ size_t inputSize) {
+ const auto& found = fSpecializations.find(key);
+ if (found != fSpecializations.end()) {
+ return found->second.get();
+ }
+
+ std::unordered_map<SkSL::String, SkSL::Program::Settings::Value> inputMap;
+ size_t offset = 0;
+ for (const auto& v : fInAndUniformVars) {
+ SkSL::String name(v->fName);
+ if (&v->fType == fCompiler.context().fInt_Type.get() ||
+ &v->fType == fCompiler.context().fShort_Type.get()) {
+ offset = SkAlign4(offset);
+ int32_t v = *(int32_t*) (((uint8_t*) inputs) + offset);
+ inputMap.insert(std::make_pair(name, SkSL::Program::Settings::Value(v)));
+ offset += sizeof(int32_t);
+ } else if (&v->fType == fCompiler.context().fFloat_Type.get() ||
+ &v->fType == fCompiler.context().fHalf_Type.get()) {
+ offset = SkAlign4(offset);
+ float v = *(float*) (((uint8_t*) inputs) + offset);
+ inputMap.insert(std::make_pair(name, SkSL::Program::Settings::Value(v)));
+ offset += sizeof(float);
+ } else if (&v->fType == fCompiler.context().fBool_Type.get()) {
+ bool v = *(((bool*) inputs) + offset);
+ inputMap.insert(std::make_pair(name, SkSL::Program::Settings::Value(v)));
+ offset += sizeof(bool);
+ } else if (&v->fType == fCompiler.context().fFloat4_Type.get() ||
+ &v->fType == fCompiler.context().fHalf4_Type.get()) {
+ offset = SkAlign4(offset) + sizeof(float) * 4;
+ } else if (&v->fType == fCompiler.context().fFragmentProcessor_Type.get()) {
+ // do nothing
+ } else {
+ printf("can't handle input var: %s\n", SkSL::String(v->fType.fName).c_str());
+ SkASSERT(false);
+ }
+ }
+
+ std::unique_ptr<SkSL::Program> specialized = fCompiler.specialize(*fBaseProgram, inputMap);
+ bool optimized = fCompiler.optimize(*specialized);
+ if (!optimized) {
+ SkDebugf("%s\n", fCompiler.errorText().c_str());
+ SkASSERT(false);
+ }
+ const SkSL::Program* result = specialized.get();
+ fSpecializations.insert(std::make_pair(key, std::move(specialized)));
+ return result;
+}
+
+static SkSL::Layout::CType get_ctype(const SkSL::Context& context, const SkSL::Variable& v) {
+ SkSL::Layout::CType result = v.fModifiers.fLayout.fCType;
+ if (result == SkSL::Layout::CType::kDefault) {
+ if (&v.fType == context.fFloat_Type.get()) {
+ result = SkSL::Layout::CType::kFloat;
+ } else if (&v.fType == context.fFloat4_Type.get()) {
+ result = SkSL::Layout::CType::kSkRect;
+ } else if (&v.fType == context.fHalf4_Type.get()) {
+ result = SkSL::Layout::CType::kSkPMColor;
+ } else if (&v.fType == context.fInt_Type.get()) {
+ result = SkSL::Layout::CType::kInt32;
+ } else if (&v.fType == context.fBool_Type.get()) {
+ result = SkSL::Layout::CType::kBool;
+ } else {
+ return SkSL::Layout::CType::kDefault;
+ }
+ }
+ return result;
+}
+
+class GrGLSLSkSLFP : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLSkSLFP(const SkSL::Context* context,
+ const std::vector<const SkSL::Variable*>* inAndUniformVars,
+ SkSL::String glsl, std::vector<SkSL::Compiler::FormatArg> formatArgs,
+ std::vector<SkSL::Compiler::GLSLFunction> functions)
+ : fContext(*context)
+ , fInAndUniformVars(*inAndUniformVars)
+ , fGLSL(glsl)
+ , fFormatArgs(std::move(formatArgs))
+ , fFunctions(std::move(functions)) {}
+
+ GrSLType uniformType(const SkSL::Type& type) {
+ if (type == *fContext.fFloat_Type) {
+ return kFloat_GrSLType;
+ } else if (type == *fContext.fHalf_Type) {
+ return kHalf_GrSLType;
+ } else if (type == *fContext.fFloat2_Type) {
+ return kFloat2_GrSLType;
+ } else if (type == *fContext.fHalf2_Type) {
+ return kHalf2_GrSLType;
+ } else if (type == *fContext.fFloat4_Type) {
+ return kFloat4_GrSLType;
+ } else if (type == *fContext.fHalf4_Type) {
+ return kHalf4_GrSLType;
+ } else if (type == *fContext.fFloat4x4_Type) {
+ return kFloat4x4_GrSLType;
+ } else if (type == *fContext.fHalf4x4_Type) {
+ return kHalf4x4_GrSLType;
+ } else if (type == *fContext.fBool_Type) {
+ return kBool_GrSLType;
+ } else if (type == *fContext.fInt_Type) {
+ return kInt_GrSLType;
+ }
+ printf("%s\n", SkSL::String(type.fName).c_str());
+ SK_ABORT("unsupported uniform type");
+ }
+
+ void emitCode(EmitArgs& args) override {
+ for (const auto& v : fInAndUniformVars) {
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag && v->fType !=
+ *fContext.fFragmentProcessor_Type) {
+ fUniformHandles.push_back(args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag,
+ this->uniformType(v->fType),
+ SkSL::String(v->fName).c_str()));
+ }
+ }
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ for (const auto& f : fFunctions) {
+ fFunctionNames.emplace_back();
+ fragBuilder->emitFunction(f.fReturnType,
+ f.fName.c_str(),
+ f.fParameters.size(),
+ f.fParameters.data(),
+ f.fBody.c_str(),
+ &fFunctionNames.back());
+ }
+ std::vector<SkString> childNames;
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ childNames.push_back(SkStringPrintf("_child%d", i));
+ this->invokeChild(i, &childNames[i], args);
+ }
+ int substringStartIndex = 0;
+ int formatArgIndex = 0;
+ SkString coords = args.fTransformedCoords.count()
+ ? fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint)
+ : SkString("sk_FragCoord");
+ for (size_t i = 0; i < fGLSL.length(); ++i) {
+ char c = fGLSL[i];
+ if (c == '%') {
+ fragBuilder->codeAppend(fGLSL.c_str() + substringStartIndex,
+ i - substringStartIndex);
+ ++i;
+ c = fGLSL[i];
+ switch (c) {
+ case 's': {
+ SkSL::Compiler::FormatArg& arg = fFormatArgs[formatArgIndex++];
+ switch (arg.fKind) {
+ case SkSL::Compiler::FormatArg::Kind::kInput:
+ fragBuilder->codeAppend(args.fInputColor);
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kOutput:
+ fragBuilder->codeAppend(args.fOutputColor);
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kCoordX:
+ fragBuilder->codeAppendf("%s.x", coords.c_str());
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kCoordY:
+ fragBuilder->codeAppendf("%s.y", coords.c_str());
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kUniform:
+ fragBuilder->codeAppend(args.fUniformHandler->getUniformCStr(
+ fUniformHandles[arg.fIndex]));
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kChildProcessor:
+ fragBuilder->codeAppend(childNames[arg.fIndex].c_str());
+ break;
+ case SkSL::Compiler::FormatArg::Kind::kFunctionName:
+ fragBuilder->codeAppend(fFunctionNames[arg.fIndex].c_str());
+ break;
+ }
+ break;
+ }
+ default:
+ fragBuilder->codeAppendf("%c", c);
+ }
+ substringStartIndex = i + 1;
+ }
+ }
+ fragBuilder->codeAppend(fGLSL.c_str() + substringStartIndex,
+ fGLSL.length() - substringStartIndex);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ size_t uniformIndex = 0;
+ size_t offset = 0;
+ const GrSkSLFP& outer = _proc.cast<GrSkSLFP>();
+ char* inputs = (char*) outer.fInputs.get();
+ for (const auto& v : outer.fFactory->fInAndUniformVars) {
+ switch (get_ctype(fContext, *v)) {
+ case SkSL::Layout::CType::kSkPMColor: {
+ float f1 = ((uint8_t*) inputs)[offset++] / 255.0;
+ float f2 = ((uint8_t*) inputs)[offset++] / 255.0;
+ float f3 = ((uint8_t*) inputs)[offset++] / 255.0;
+ float f4 = ((uint8_t*) inputs)[offset++] / 255.0;
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag) {
+ pdman.set4f(fUniformHandles[uniformIndex++], f1, f2, f3, f4);
+ }
+ break;
+ }
+ case SkSL::Layout::CType::kSkPMColor4f:
+ case SkSL::Layout::CType::kSkRect: {
+ offset = SkAlign4(offset);
+ float f1 = *(float*) (inputs + offset);
+ offset += sizeof(float);
+ float f2 = *(float*) (inputs + offset);
+ offset += sizeof(float);
+ float f3 = *(float*) (inputs + offset);
+ offset += sizeof(float);
+ float f4 = *(float*) (inputs + offset);
+ offset += sizeof(float);
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag) {
+ pdman.set4f(fUniformHandles[uniformIndex++], f1, f2, f3, f4);
+ }
+ break;
+ }
+ case SkSL::Layout::CType::kInt32: {
+ int32_t i = *(int32_t*) (inputs + offset);
+ offset += sizeof(int32_t);
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag) {
+ pdman.set1i(fUniformHandles[uniformIndex++], i);
+ }
+ break;
+ }
+ case SkSL::Layout::CType::kFloat: {
+ float f = *(float*) (inputs + offset);
+ offset += sizeof(float);
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag) {
+ pdman.set1f(fUniformHandles[uniformIndex++], f);
+ }
+ break;
+ }
+ case SkSL::Layout::CType::kBool:
+ SkASSERT(!(v->fModifiers.fFlags & SkSL::Modifiers::kUniform_Flag));
+ ++offset;
+ break;
+ default:
+ SkASSERT(&v->fType == fContext.fFragmentProcessor_Type.get());
+ }
+ }
+ }
+
+ const SkSL::Context& fContext;
+ const std::vector<const SkSL::Variable*>& fInAndUniformVars;
+ // nearly-finished GLSL; still contains printf-style "%s" format tokens
+ const SkSL::String fGLSL;
+ std::vector<SkSL::Compiler::FormatArg> fFormatArgs;
+ std::vector<SkSL::Compiler::GLSLFunction> fFunctions;
+ std::vector<UniformHandle> fUniformHandles;
+ std::vector<SkString> fFunctionNames;
+};
+
+std::unique_ptr<GrSkSLFP> GrSkSLFP::Make(GrContext_Base* context, int index, const char* name,
+ const char* sksl, const void* inputs,
+ size_t inputSize, SkSL::Program::Kind kind,
+ const SkMatrix* matrix) {
+ return std::unique_ptr<GrSkSLFP>(new GrSkSLFP(context->priv().fpFactoryCache(),
+ context->priv().caps()->shaderCaps(),
+ kind, index, name, sksl, SkString(),
+ inputs, inputSize, matrix));
+}
+
+std::unique_ptr<GrSkSLFP> GrSkSLFP::Make(GrContext_Base* context, int index, const char* name,
+ SkString sksl, const void* inputs, size_t inputSize,
+ SkSL::Program::Kind kind, const SkMatrix* matrix) {
+ return std::unique_ptr<GrSkSLFP>(new GrSkSLFP(context->priv().fpFactoryCache(),
+ context->priv().caps()->shaderCaps(),
+ kind, index, name, nullptr, std::move(sksl),
+ inputs, inputSize, matrix));
+}
+
+GrSkSLFP::GrSkSLFP(sk_sp<GrSkSLFPFactoryCache> factoryCache, const GrShaderCaps* shaderCaps,
+ SkSL::Program::Kind kind, int index, const char* name, const char* sksl,
+ SkString skslString, const void* inputs, size_t inputSize,
+ const SkMatrix* matrix)
+ : INHERITED(kGrSkSLFP_ClassID, kNone_OptimizationFlags)
+ , fFactoryCache(factoryCache)
+ , fShaderCaps(sk_ref_sp(shaderCaps))
+ , fKind(kind)
+ , fIndex(index)
+ , fName(name)
+ , fSkSLString(skslString)
+ , fSkSL(sksl ? sksl : fSkSLString.c_str())
+ , fInputs(new int8_t[inputSize])
+ , fInputSize(inputSize) {
+ if (fInputSize) {
+ memcpy(fInputs.get(), inputs, inputSize);
+ }
+ if (matrix) {
+ fCoordTransform = GrCoordTransform(*matrix);
+ this->addCoordTransform(&fCoordTransform);
+ }
+}
+
+GrSkSLFP::GrSkSLFP(const GrSkSLFP& other)
+ : INHERITED(kGrSkSLFP_ClassID, kNone_OptimizationFlags)
+ , fFactoryCache(other.fFactoryCache)
+ , fShaderCaps(other.fShaderCaps)
+ , fFactory(other.fFactory)
+ , fKind(other.fKind)
+ , fIndex(other.fIndex)
+ , fName(other.fName)
+ , fSkSLString(other.fSkSLString)
+ , fSkSL(other.fSkSL)
+ , fInputs(new int8_t[other.fInputSize])
+ , fInputSize(other.fInputSize) {
+ if (fInputSize) {
+ memcpy(fInputs.get(), other.fInputs.get(), fInputSize);
+ }
+ if (other.numCoordTransforms()) {
+ fCoordTransform = other.fCoordTransform;
+ this->addCoordTransform(&fCoordTransform);
+ }
+}
+
+const char* GrSkSLFP::name() const {
+ return fName;
+}
+
+void GrSkSLFP::createFactory() const {
+ if (!fFactory) {
+ fFactory = fFactoryCache->get(fIndex);
+ if (!fFactory) {
+ fFactory = sk_sp<GrSkSLFPFactory>(new GrSkSLFPFactory(fName, fShaderCaps.get(), fSkSL,
+ fKind));
+ fFactoryCache->set(fIndex, fFactory);
+ }
+ }
+}
+
+void GrSkSLFP::addChild(std::unique_ptr<GrFragmentProcessor> child) {
+ this->registerChildProcessor(std::move(child));
+}
+
+GrGLSLFragmentProcessor* GrSkSLFP::onCreateGLSLInstance() const {
+ this->createFactory();
+ const SkSL::Program* specialized = fFactory->getSpecialization(fKey, fInputs.get(), fInputSize);
+ SkSL::String glsl;
+ std::vector<SkSL::Compiler::FormatArg> formatArgs;
+ std::vector<SkSL::Compiler::GLSLFunction> functions;
+ if (!fFactory->fCompiler.toPipelineStage(*specialized, &glsl, &formatArgs, &functions)) {
+ printf("%s\n", fFactory->fCompiler.errorText().c_str());
+ SkASSERT(false);
+ }
+ return new GrGLSLSkSLFP(specialized->fContext.get(), &fFactory->fInAndUniformVars, glsl,
+ formatArgs, functions);
+}
+
+void GrSkSLFP::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ this->createFactory();
+ b->add32(fIndex);
+ size_t offset = 0;
+ char* inputs = (char*) fInputs.get();
+ const SkSL::Context& context = fFactory->fCompiler.context();
+ for (const auto& v : fFactory->fInAndUniformVars) {
+ if (&v->fType == context.fFragmentProcessor_Type.get()) {
+ continue;
+ }
+ switch (get_ctype(context, *v)) {
+ case SkSL::Layout::CType::kBool:
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) {
+ fKey += inputs[offset];
+ b->add32(inputs[offset]);
+ }
+ ++offset;
+ break;
+ case SkSL::Layout::CType::kInt32: {
+ offset = SkAlign4(offset);
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) {
+ fKey += inputs[offset + 0];
+ fKey += inputs[offset + 1];
+ fKey += inputs[offset + 2];
+ fKey += inputs[offset + 3];
+ b->add32(*(int32_t*) (inputs + offset));
+ }
+ offset += sizeof(int32_t);
+ break;
+ }
+ case SkSL::Layout::CType::kFloat: {
+ offset = SkAlign4(offset);
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) {
+ fKey += inputs[offset + 0];
+ fKey += inputs[offset + 1];
+ fKey += inputs[offset + 2];
+ fKey += inputs[offset + 3];
+ b->add32(*(float*) (inputs + offset));
+ }
+ offset += sizeof(float);
+ break;
+ }
+ case SkSL::Layout::CType::kSkPMColor:
+ case SkSL::Layout::CType::kSkPMColor4f:
+ case SkSL::Layout::CType::kSkRect:
+ if (v->fModifiers.fFlags & SkSL::Modifiers::kIn_Flag) {
+ for (size_t i = 0; i < sizeof(float) * 4; ++i) {
+ fKey += inputs[offset + i];
+ }
+ b->add32(*(int32_t*) (inputs + offset));
+ offset += sizeof(float);
+ b->add32(*(int32_t*) (inputs + offset));
+ offset += sizeof(float);
+ b->add32(*(int32_t*) (inputs + offset));
+ offset += sizeof(float);
+ b->add32(*(int32_t*) (inputs + offset));
+ offset += sizeof(float);
+ } else {
+ offset += sizeof(float) * 4;
+ }
+ break;
+ default:
+ // unsupported input var type
+ printf("%s\n", SkSL::String(v->fType.fName).c_str());
+ SkASSERT(false);
+ }
+ }
+}
+
+bool GrSkSLFP::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrSkSLFP& sk = other.cast<GrSkSLFP>();
+ SkASSERT(fIndex != sk.fIndex || fInputSize == sk.fInputSize);
+ return fIndex == sk.fIndex &&
+ !memcmp(fInputs.get(), sk.fInputs.get(), fInputSize);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrSkSLFP::clone() const {
+ std::unique_ptr<GrSkSLFP> result(new GrSkSLFP(*this));
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ result->registerChildProcessor(this->childProcessor(i).clone());
+ }
+ return std::unique_ptr<GrFragmentProcessor>(result.release());
+}
+
+// We have to do a bit of manual refcounting in the cache methods below. Ideally, we could just
+// define fFactories to contain sk_sp<GrSkSLFPFactory> rather than GrSkSLFPFactory*, but that would
+// require GrContext to include GrSkSLFP, which creates much bigger headaches than a few manual
+// refcounts.
+
+sk_sp<GrSkSLFPFactory> GrSkSLFPFactoryCache::get(int index) {
+ if (index >= (int) fFactories.size()) {
+ return nullptr;
+ }
+ GrSkSLFPFactory* result = fFactories[index];
+ SkSafeRef(result);
+ return sk_sp<GrSkSLFPFactory>(result);
+}
+
+void GrSkSLFPFactoryCache::set(int index, sk_sp<GrSkSLFPFactory> factory) {
+ while (index >= (int) fFactories.size()) {
+ fFactories.emplace_back();
+ }
+ factory->ref();
+ SkASSERT(!fFactories[index]);
+ fFactories[index] = factory.get();
+}
+
+GrSkSLFPFactoryCache::~GrSkSLFPFactoryCache() {
+ for (GrSkSLFPFactory* factory : fFactories) {
+ if (factory) {
+ factory->unref();
+ }
+ }
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSkSLFP);
+
+#if GR_TEST_UTILS
+
+#include "include/effects/SkArithmeticImageFilter.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+
+extern const char* SKSL_ARITHMETIC_SRC;
+extern const char* SKSL_DITHER_SRC;
+extern const char* SKSL_OVERDRAW_SRC;
+
+using Value = SkSL::Program::Settings::Value;
+
+std::unique_ptr<GrFragmentProcessor> GrSkSLFP::TestCreate(GrProcessorTestData* d) {
+ int type = d->fRandom->nextULessThan(3);
+ switch (type) {
+ case 0: {
+ static int ditherIndex = NewIndex();
+ int rangeType = d->fRandom->nextULessThan(3);
+ std::unique_ptr<GrSkSLFP> result = GrSkSLFP::Make(d->context(), ditherIndex, "Dither",
+ SKSL_DITHER_SRC, &rangeType,
+ sizeof(rangeType));
+ return std::unique_ptr<GrFragmentProcessor>(result.release());
+ }
+ case 1: {
+ static int arithmeticIndex = NewIndex();
+ ArithmeticFPInputs inputs;
+ inputs.k[0] = d->fRandom->nextF();
+ inputs.k[1] = d->fRandom->nextF();
+ inputs.k[2] = d->fRandom->nextF();
+ inputs.k[3] = d->fRandom->nextF();
+ inputs.enforcePMColor = d->fRandom->nextBool();
+ std::unique_ptr<GrSkSLFP> result = GrSkSLFP::Make(d->context(), arithmeticIndex,
+ "Arithmetic", SKSL_ARITHMETIC_SRC,
+ &inputs, sizeof(inputs));
+ result->addChild(GrConstColorProcessor::Make(
+ SK_PMColor4fWHITE,
+ GrConstColorProcessor::InputMode::kIgnore));
+ return std::unique_ptr<GrFragmentProcessor>(result.release());
+ }
+ case 2: {
+ static int overdrawIndex = NewIndex();
+ SkPMColor inputs[6];
+ for (int i = 0; i < 6; ++i) {
+ inputs[i] = d->fRandom->nextU();
+ }
+ std::unique_ptr<GrSkSLFP> result = GrSkSLFP::Make(d->context(), overdrawIndex,
+ "Overdraw", SKSL_OVERDRAW_SRC,
+ &inputs, sizeof(inputs));
+ return std::unique_ptr<GrFragmentProcessor>(result.release());
+ }
+ }
+ SK_ABORT("unreachable");
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrSkSLFP.h b/gfx/skia/skia/src/gpu/effects/GrSkSLFP.h
new file mode 100644
index 0000000000..eba33c6112
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSkSLFP.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSkSLFP_DEFINED
+#define GrSkSLFP_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSkSLFPFactoryCache.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLPipelineStageCodeGenerator.h"
+#include <atomic>
+
+#if GR_TEST_UTILS
+#define GR_FP_SRC_STRING const char*
+#else
+#define GR_FP_SRC_STRING static const char*
+#endif
+
+class GrContext_Base;
+class GrSkSLFPFactory;
+
+class GrSkSLFP : public GrFragmentProcessor {
+public:
+ /**
+ * Returns a new unique identifier. Each different SkSL fragment processor should call
+ * NewIndex once, statically, and use this index for all calls to Make.
+ */
+ static int NewIndex() {
+ static std::atomic<int> nextIndex{0};
+ return nextIndex++;
+ }
+
+ /**
+ * Creates a new fragment processor from an SkSL source string and a struct of inputs to the
+ * program. The input struct's type is derived from the 'in' and 'uniform' variables in the SkSL
+ * source, so e.g. the shader:
+ *
+ * in bool dither;
+ * uniform float x;
+ * uniform float y;
+ * ....
+ *
+ * would expect a pointer to a struct set up like:
+ *
+ * struct {
+ * bool dither;
+ * float x;
+ * float y;
+ * };
+ *
+ * While both 'in' and 'uniform' variables go into this struct, the difference between them is
+ * that 'in' variables are statically "baked in" to the generated code, becoming literals,
+ * whereas uniform variables may be changed from invocation to invocation without having to
+ * recompile the shader.
+ *
+ * As the decision of whether to create a new shader or just upload new uniforms all happens
+ * behind the scenes, the difference between the two from an end-user perspective is primarily
+ * in performance: on the one hand, changing the value of an 'in' variable is very expensive
+ * (requiring the compiler to regenerate the code, upload a new shader to the GPU, and so
+ * forth), but on the other hand the compiler can optimize around its value because it is known
+ * at compile time. 'in' variables are therefore suitable for things like flags, where there are
+ * only a few possible values and a known-in-advance value can cause entire chunks of code to
+ * become dead (think static @ifs), while 'uniform's are used for continuous values like colors
+ * and coordinates, where it would be silly to create a separate shader for each possible set of
+ * values. Other than the (significant) performance implications, the only difference between
+ * the two is that 'in' variables can be used in static @if / @switch tests. When in doubt, use
+ * 'uniform'.
+ *
+ * As turning SkSL into GLSL / SPIR-V / etc. is fairly expensive, and the output may differ
+ * based on the inputs, internally the process is divided into two steps: we first parse and
+ * semantically analyze the SkSL into an internal representation, and then "specialize" this
+ * internal representation based on the inputs. The unspecialized internal representation of
+ * the program is cached, so further specializations of the same code are much faster than the
+ * first call.
+ *
+ * This caching is based on the 'index' parameter, which should be derived by statically calling
+ * 'NewIndex()'. Each given SkSL string should have a single, statically defined index
+ * associated with it.
+ */
+ static std::unique_ptr<GrSkSLFP> Make(
+ GrContext_Base* context,
+ int index,
+ const char* name,
+ const char* sksl,
+ const void* inputs,
+ size_t inputSize,
+ SkSL::Program::Kind kind = SkSL::Program::kPipelineStage_Kind,
+ const SkMatrix* matrix = nullptr);
+
+ static std::unique_ptr<GrSkSLFP> Make(
+ GrContext_Base* context,
+ int index,
+ const char* name,
+ SkString sksl,
+ const void* inputs,
+ size_t inputSize,
+ SkSL::Program::Kind kind = SkSL::Program::kPipelineStage_Kind,
+ const SkMatrix* matrix = nullptr);
+
+ const char* name() const override;
+
+ void addChild(std::unique_ptr<GrFragmentProcessor> child);
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ GrSkSLFP(sk_sp<GrSkSLFPFactoryCache> factoryCache, const GrShaderCaps* shaderCaps,
+ SkSL::Program::Kind kind, int fIndex, const char* name, const char* sksl,
+ SkString skslString, const void* inputs, size_t inputSize, const SkMatrix* matrix);
+
+ GrSkSLFP(const GrSkSLFP& other);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void createFactory() const;
+
+ sk_sp<GrSkSLFPFactoryCache> fFactoryCache;
+
+ const sk_sp<GrShaderCaps> fShaderCaps;
+
+ mutable sk_sp<GrSkSLFPFactory> fFactory;
+
+ SkSL::Program::Kind fKind;
+
+ int fIndex;
+
+ const char* fName;
+
+ // For object lifetime purposes, we have fields for the SkSL as both a const char* and a
+ // SkString. The const char* is the one we actually use, but it may point to the SkString's
+ // bytes. Since GrSkSLFPs are frequently created from constant strings, this allows us to
+ // generally avoid the overhead of copying the bytes into an SkString (in which case fSkSLString
+ // is the empty string), while still allowing the GrSkSLFP to manage the string's lifetime when
+ // needed.
+ SkString fSkSLString;
+
+ const char* fSkSL;
+
+ const std::unique_ptr<int8_t[]> fInputs;
+
+ size_t fInputSize;
+
+ GrCoordTransform fCoordTransform;
+
+ mutable SkSL::String fKey;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+
+ friend class GrGLSLSkSLFP;
+
+ friend class GrSkSLFPFactory;
+};
+
+/**
+ * Produces GrFragmentProcessors from SkSL code. As the shader code produced from the SkSL depends
+ * upon the inputs to the SkSL (static if's, etc.) we first create a factory for a given SkSL
+ * string, then use that to create the actual GrFragmentProcessor.
+ */
+class GrSkSLFPFactory : public SkNVRefCnt<GrSkSLFPFactory> {
+public:
+ /**
+ * Constructs a GrSkSLFPFactory for a given SkSL source string. Creating a factory will
+ * preprocess the SkSL and determine which of its inputs are declared "key" (meaning they cause
+ * the produced shaders to differ), so it is important to reuse the same factory instance for
+ * the same shader in order to avoid repeatedly re-parsing the SkSL.
+ */
+ GrSkSLFPFactory(const char* name, const GrShaderCaps* shaderCaps, const char* sksl,
+ SkSL::Program::Kind kind = SkSL::Program::kPipelineStage_Kind);
+
+ const SkSL::Program* getSpecialization(const SkSL::String& key, const void* inputs,
+ size_t inputSize);
+
+ SkSL::Program::Kind fKind;
+
+ const char* fName;
+
+ SkSL::Compiler fCompiler;
+
+ std::shared_ptr<SkSL::Program> fBaseProgram;
+
+ std::vector<const SkSL::Variable*> fInAndUniformVars;
+
+ std::unordered_map<SkSL::String, std::unique_ptr<const SkSL::Program>> fSpecializations;
+
+ friend class GrSkSLFP;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp
new file mode 100644
index 0000000000..25b5bb3d6b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrTextureDomain.h"
+
+#include "include/gpu/GrTexture.h"
+#include "include/private/SkFloatingPoint.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+#include <utility>
+
+GrTextureDomain::GrTextureDomain(GrTextureProxy* proxy, const SkRect& domain, Mode modeX,
+ Mode modeY, int index)
+ : fModeX(modeX)
+ , fModeY(modeY)
+ , fIndex(index) {
+
+ if (!proxy) {
+ SkASSERT(modeX == kIgnore_Mode && modeY == kIgnore_Mode);
+ return;
+ }
+
+ const SkRect kFullRect = SkRect::MakeIWH(proxy->width(), proxy->height());
+
+ // We don't currently handle domains that are empty or don't intersect the texture.
+ // It is OK if the domain rect is a line or point, but it should not be inverted. We do not
+ // handle rects that do not intersect the [0..1]x[0..1] rect.
+ SkASSERT(domain.fLeft <= domain.fRight);
+ SkASSERT(domain.fTop <= domain.fBottom);
+ fDomain.fLeft = SkScalarPin(domain.fLeft, 0.0f, kFullRect.fRight);
+ fDomain.fRight = SkScalarPin(domain.fRight, fDomain.fLeft, kFullRect.fRight);
+ fDomain.fTop = SkScalarPin(domain.fTop, 0.0f, kFullRect.fBottom);
+ fDomain.fBottom = SkScalarPin(domain.fBottom, fDomain.fTop, kFullRect.fBottom);
+ SkASSERT(fDomain.fLeft <= fDomain.fRight);
+ SkASSERT(fDomain.fTop <= fDomain.fBottom);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static SkString clamp_expression(GrTextureDomain::Mode mode, const char* inCoord,
+ const char* coordSwizzle, const char* domain,
+ const char* minSwizzle, const char* maxSwizzle) {
+ SkString clampedExpr;
+ switch(mode) {
+ case GrTextureDomain::kIgnore_Mode:
+ clampedExpr.printf("%s.%s\n", inCoord, coordSwizzle);
+ break;
+ case GrTextureDomain::kDecal_Mode:
+ // The lookup coordinate to use for decal will be clamped just like kClamp_Mode,
+ // it's just that the post-processing will be different, so fall through
+ case GrTextureDomain::kClamp_Mode:
+ clampedExpr.printf("clamp(%s.%s, %s.%s, %s.%s)",
+ inCoord, coordSwizzle, domain, minSwizzle, domain, maxSwizzle);
+ break;
+ case GrTextureDomain::kRepeat_Mode:
+ clampedExpr.printf("mod(%s.%s - %s.%s, %s.%s - %s.%s) + %s.%s",
+ inCoord, coordSwizzle, domain, minSwizzle, domain, maxSwizzle,
+ domain, minSwizzle, domain, minSwizzle);
+ break;
+ default:
+ SkASSERTF(false, "Unknown texture domain mode: %u\n", (uint32_t) mode);
+ break;
+ }
+ return clampedExpr;
+}
+
+void GrTextureDomain::GLDomain::sampleTexture(GrGLSLShaderBuilder* builder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderCaps* shaderCaps,
+ const GrTextureDomain& textureDomain,
+ const char* outColor,
+ const SkString& inCoords,
+ GrGLSLFragmentProcessor::SamplerHandle sampler,
+ const char* inModulateColor) {
+ SkASSERT(!fHasMode || (textureDomain.modeX() == fModeX && textureDomain.modeY() == fModeY));
+ SkDEBUGCODE(fModeX = textureDomain.modeX();)
+ SkDEBUGCODE(fModeY = textureDomain.modeY();)
+ SkDEBUGCODE(fHasMode = true;)
+
+ if ((textureDomain.modeX() != kIgnore_Mode || textureDomain.modeY() != kIgnore_Mode) &&
+ !fDomainUni.isValid()) {
+ // Must include the domain uniform since at least one axis uses it
+ const char* name;
+ SkString uniName("TexDom");
+ if (textureDomain.fIndex >= 0) {
+ uniName.appendS32(textureDomain.fIndex);
+ }
+ fDomainUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ uniName.c_str(), &name);
+ fDomainName = name;
+ }
+
+ bool decalX = textureDomain.modeX() == kDecal_Mode;
+ bool decalY = textureDomain.modeY() == kDecal_Mode;
+ if ((decalX || decalY) && !fDecalUni.isValid()) {
+ const char* name;
+ SkString uniName("DecalParams");
+ if (textureDomain.fIndex >= 0) {
+ uniName.appendS32(textureDomain.fIndex);
+ }
+ // Half3 since this will hold texture width, height, and then a step function control param
+ fDecalUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf3_GrSLType,
+ uniName.c_str(), &name);
+ fDecalName = name;
+ }
+
+ // Add a block so that we can declare variables
+ GrGLSLShaderBuilder::ShaderBlock block(builder);
+ // Always use a local variable for the input coordinates; often callers pass in an expression
+ // and we want to cache it across all of its references in the code below
+ builder->codeAppendf("float2 origCoord = %s;", inCoords.c_str());
+ builder->codeAppend("float2 clampedCoord = ");
+ if (textureDomain.modeX() != textureDomain.modeY()) {
+ // The wrap modes differ on the two axes, so build up a coordinate that respects each axis'
+ // domain rule independently before sampling the texture.
+ SkString tcX = clamp_expression(textureDomain.modeX(), "origCoord", "x",
+ fDomainName.c_str(), "x", "z");
+ SkString tcY = clamp_expression(textureDomain.modeY(), "origCoord", "y",
+ fDomainName.c_str(), "y", "w");
+ builder->codeAppendf("float2(%s, %s)", tcX.c_str(), tcY.c_str());
+ } else {
+ // Since the x and y axis wrap modes are the same, they can be calculated together using
+ // more efficient vector operations
+ SkString tc = clamp_expression(textureDomain.modeX(), "origCoord", "xy",
+ fDomainName.c_str(), "xy", "zw");
+ builder->codeAppend(tc.c_str());
+ }
+ builder->codeAppend(";");
+
+ // Look up the texture sample at the clamped coordinate location
+ builder->codeAppend("half4 inside = ");
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler, "clampedCoord",
+ kFloat2_GrSLType);
+ builder->codeAppend(";");
+
+ // Apply decal mode's transparency interpolation if needed
+ if (decalX || decalY) {
+ // The decal err is the max absoluate value between the clamped coordinate and the original
+ // pixel coordinate. This will then be clamped to 1.f if it's greater than the control
+ // parameter, which simulates kNearest and kBilerp behavior depending on if it's 0 or 1.
+ if (decalX && decalY) {
+ builder->codeAppendf("half err = max(half(abs(clampedCoord.x - origCoord.x) * %s.x), "
+ "half(abs(clampedCoord.y - origCoord.y) * %s.y));",
+ fDecalName.c_str(), fDecalName.c_str());
+ } else if (decalX) {
+ builder->codeAppendf("half err = half(abs(clampedCoord.x - origCoord.x) * %s.x);",
+ fDecalName.c_str());
+ } else {
+ SkASSERT(decalY);
+ builder->codeAppendf("half err = half(abs(clampedCoord.y - origCoord.y) * %s.y);",
+ fDecalName.c_str());
+ }
+
+ // Apply a transform to the error rate, which let's us simulate nearest or bilerp filtering
+ // in the same shader. When the texture is nearest filtered, fSizeName.z is set to 1/2 so
+ // this becomes a step function centered at .5 away from the clamped coordinate (but the
+ // domain for decal is inset by .5 so the edge lines up properly). When bilerp, fSizeName.z
+ // is set to 1 and it becomes a simple linear blend between texture and transparent.
+ builder->codeAppendf("if (err > %s.z) { err = 1.0; } else if (%s.z < 1) { err = 0.0; }",
+ fDecalName.c_str(), fDecalName.c_str());
+ builder->codeAppendf("%s = mix(inside, half4(0, 0, 0, 0), err);", outColor);
+ } else {
+ // A simple look up
+ builder->codeAppendf("%s = inside;", outColor);
+ }
+}
+
+void GrTextureDomain::GLDomain::setData(const GrGLSLProgramDataManager& pdman,
+ const GrTextureDomain& textureDomain,
+ GrTextureProxy* proxy,
+ const GrSamplerState& sampler) {
+ GrTexture* tex = proxy->peekTexture();
+ SkASSERT(fHasMode && textureDomain.modeX() == fModeX && textureDomain.modeY() == fModeY);
+ if (kIgnore_Mode != textureDomain.modeX() || kIgnore_Mode != textureDomain.modeY()) {
+ bool sendDecalData = textureDomain.modeX() == kDecal_Mode ||
+ textureDomain.modeY() == kDecal_Mode;
+
+ // If the texture is using nearest filtering, then the decal filter weight should step from
+ // 0 (texture) to 1 (transparent) one half pixel away from the domain. When doing any other
+ // form of filtering, the weight should be 1.0 so that it smoothly interpolates between the
+ // texture and transparent.
+ SkScalar decalFilterWeight = sampler.filter() == GrSamplerState::Filter::kNearest ?
+ SK_ScalarHalf : 1.0f;
+ SkScalar wInv, hInv, h;
+ if (proxy->textureType() == GrTextureType::kRectangle) {
+ wInv = hInv = 1.f;
+ h = tex->height();
+
+ // Don't do any scaling by texture size for decal filter rate, it's already in pixels
+ if (sendDecalData) {
+ pdman.set3f(fDecalUni, 1.f, 1.f, decalFilterWeight);
+ }
+ } else {
+ wInv = SK_Scalar1 / tex->width();
+ hInv = SK_Scalar1 / tex->height();
+ h = 1.f;
+
+ if (sendDecalData) {
+ pdman.set3f(fDecalUni, tex->width(), tex->height(), decalFilterWeight);
+ }
+ }
+
+ float values[kPrevDomainCount] = {
+ SkScalarToFloat(textureDomain.domain().fLeft * wInv),
+ SkScalarToFloat(textureDomain.domain().fTop * hInv),
+ SkScalarToFloat(textureDomain.domain().fRight * wInv),
+ SkScalarToFloat(textureDomain.domain().fBottom * hInv)
+ };
+
+ if (proxy->textureType() == GrTextureType::kRectangle) {
+ SkASSERT(values[0] >= 0.0f && values[0] <= proxy->width());
+ SkASSERT(values[1] >= 0.0f && values[1] <= proxy->height());
+ SkASSERT(values[2] >= 0.0f && values[2] <= proxy->width());
+ SkASSERT(values[3] >= 0.0f && values[3] <= proxy->height());
+ } else {
+ SkASSERT(values[0] >= 0.0f && values[0] <= 1.0f);
+ SkASSERT(values[1] >= 0.0f && values[1] <= 1.0f);
+ SkASSERT(values[2] >= 0.0f && values[2] <= 1.0f);
+ SkASSERT(values[3] >= 0.0f && values[3] <= 1.0f);
+ }
+
+ // vertical flip if necessary
+ if (kBottomLeft_GrSurfaceOrigin == proxy->origin()) {
+ values[1] = h - values[1];
+ values[3] = h - values[3];
+
+ // The top and bottom were just flipped, so correct the ordering
+ // of elements so that values = (l, t, r, b).
+ using std::swap;
+ swap(values[1], values[3]);
+ }
+ if (0 != memcmp(values, fPrevDomain, kPrevDomainCount * sizeof(float))) {
+ pdman.set4fv(fDomainUni, 1, values);
+ memcpy(fPrevDomain, values, kPrevDomainCount * sizeof(float));
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> GrTextureDomainEffect::Make(
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ GrTextureDomain::Mode mode,
+ GrSamplerState::Filter filterMode) {
+ return Make(std::move(proxy), srcColorType, matrix, domain, mode, mode,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, filterMode));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrTextureDomainEffect::Make(
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ GrTextureDomain::Mode modeX,
+ GrTextureDomain::Mode modeY,
+ const GrSamplerState& sampler) {
+ // If both domain modes happen to be ignore, it would be faster to just drop the domain logic
+ // entirely Technically, we could also use the simple texture effect if the domain modes agree
+ // with the sampler modes and the proxy is the same size as the domain. It's a lot easier for
+ // calling code to detect these cases and handle it themselves.
+ return std::unique_ptr<GrFragmentProcessor>(new GrTextureDomainEffect(
+ std::move(proxy), srcColorType, matrix, domain, modeX, modeY, sampler));
+}
+
+GrTextureDomainEffect::GrTextureDomainEffect(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ GrTextureDomain::Mode modeX,
+ GrTextureDomain::Mode modeY,
+ const GrSamplerState& sampler)
+ : INHERITED(kGrTextureDomainEffect_ClassID,
+ ModulateForSamplerOptFlags(srcColorType,
+ GrTextureDomain::IsDecalSampled(sampler, modeX, modeY)))
+ , fCoordTransform(matrix, proxy.get())
+ , fTextureDomain(proxy.get(), domain, modeX, modeY)
+ , fTextureSampler(std::move(proxy), sampler) {
+ SkASSERT((modeX != GrTextureDomain::kRepeat_Mode && modeY != GrTextureDomain::kRepeat_Mode) ||
+ sampler.filter() == GrSamplerState::Filter::kNearest);
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+GrTextureDomainEffect::GrTextureDomainEffect(const GrTextureDomainEffect& that)
+ : INHERITED(kGrTextureDomainEffect_ClassID, that.optimizationFlags())
+ , fCoordTransform(that.fCoordTransform)
+ , fTextureDomain(that.fTextureDomain)
+ , fTextureSampler(that.fTextureSampler) {
+ this->addCoordTransform(&fCoordTransform);
+ this->setTextureSamplerCnt(1);
+}
+
+void GrTextureDomainEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32(GrTextureDomain::GLDomain::DomainKey(fTextureDomain));
+}
+
+GrGLSLFragmentProcessor* GrTextureDomainEffect::onCreateGLSLInstance() const {
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ const GrTextureDomainEffect& tde = args.fFp.cast<GrTextureDomainEffect>();
+ const GrTextureDomain& domain = tde.fTextureDomain;
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ fGLDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ domain,
+ args.fOutputColor,
+ coords2D,
+ args.fTexSamplers[0],
+ args.fInputColor);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& fp) override {
+ const GrTextureDomainEffect& tde = fp.cast<GrTextureDomainEffect>();
+ const GrTextureDomain& domain = tde.fTextureDomain;
+ GrTextureProxy* proxy = tde.textureSampler(0).proxy();
+
+ fGLDomain.setData(pdman, domain, proxy, tde.textureSampler(0).samplerState());
+ }
+
+ private:
+ GrTextureDomain::GLDomain fGLDomain;
+ };
+
+ return new GLSLProcessor;
+}
+
+bool GrTextureDomainEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrTextureDomainEffect& s = sBase.cast<GrTextureDomainEffect>();
+ return this->fTextureDomain == s.fTextureDomain;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrTextureDomainEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrTextureDomainEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+ SkRect domain;
+ domain.fLeft = d->fRandom->nextRangeScalar(0, proxy->width());
+ domain.fRight = d->fRandom->nextRangeScalar(domain.fLeft, proxy->width());
+ domain.fTop = d->fRandom->nextRangeScalar(0, proxy->height());
+ domain.fBottom = d->fRandom->nextRangeScalar(domain.fTop, proxy->height());
+ GrTextureDomain::Mode modeX =
+ (GrTextureDomain::Mode) d->fRandom->nextULessThan(GrTextureDomain::kModeCount);
+ GrTextureDomain::Mode modeY =
+ (GrTextureDomain::Mode) d->fRandom->nextULessThan(GrTextureDomain::kModeCount);
+ const SkMatrix& matrix = GrTest::TestMatrix(d->fRandom);
+ bool bilerp = modeX != GrTextureDomain::kRepeat_Mode && modeY != GrTextureDomain::kRepeat_Mode ?
+ d->fRandom->nextBool() : false;
+ return GrTextureDomainEffect::Make(
+ std::move(proxy),
+ d->textureProxyColorType(texIdx),
+ matrix,
+ domain,
+ modeX,
+ modeY,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, bilerp ?
+ GrSamplerState::Filter::kBilerp : GrSamplerState::Filter::kNearest));
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+std::unique_ptr<GrFragmentProcessor> GrDeviceSpaceTextureDecalFragmentProcessor::Make(
+ sk_sp<GrTextureProxy> proxy, const SkIRect& subset, const SkIPoint& deviceSpaceOffset) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDeviceSpaceTextureDecalFragmentProcessor(
+ std::move(proxy), subset, deviceSpaceOffset));
+}
+
+GrDeviceSpaceTextureDecalFragmentProcessor::GrDeviceSpaceTextureDecalFragmentProcessor(
+ sk_sp<GrTextureProxy> proxy, const SkIRect& subset, const SkIPoint& deviceSpaceOffset)
+ : INHERITED(kGrDeviceSpaceTextureDecalFragmentProcessor_ClassID,
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fTextureSampler(proxy, GrSamplerState::ClampNearest())
+ , fTextureDomain(proxy.get(),
+ GrTextureDomain::MakeTexelDomain(subset, GrTextureDomain::kDecal_Mode),
+ GrTextureDomain::kDecal_Mode, GrTextureDomain::kDecal_Mode) {
+ this->setTextureSamplerCnt(1);
+ fDeviceSpaceOffset.fX = deviceSpaceOffset.fX - subset.fLeft;
+ fDeviceSpaceOffset.fY = deviceSpaceOffset.fY - subset.fTop;
+}
+
+GrDeviceSpaceTextureDecalFragmentProcessor::GrDeviceSpaceTextureDecalFragmentProcessor(
+ const GrDeviceSpaceTextureDecalFragmentProcessor& that)
+ : INHERITED(kGrDeviceSpaceTextureDecalFragmentProcessor_ClassID,
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , fTextureSampler(that.fTextureSampler)
+ , fTextureDomain(that.fTextureDomain)
+ , fDeviceSpaceOffset(that.fDeviceSpaceOffset) {
+ this->setTextureSamplerCnt(1);
+}
+
+std::unique_ptr<GrFragmentProcessor> GrDeviceSpaceTextureDecalFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrDeviceSpaceTextureDecalFragmentProcessor(*this));
+}
+
+GrGLSLFragmentProcessor* GrDeviceSpaceTextureDecalFragmentProcessor::onCreateGLSLInstance() const {
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ args.fFp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ const char* scaleAndTranslateName;
+ fScaleAndTranslateUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType,
+ "scaleAndTranslate",
+ &scaleAndTranslateName);
+ args.fFragBuilder->codeAppendf("half2 coords = half2(sk_FragCoord.xy * %s.xy + %s.zw);",
+ scaleAndTranslateName, scaleAndTranslateName);
+ fGLDomain.sampleTexture(args.fFragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ dstdfp.fTextureDomain,
+ args.fOutputColor,
+ SkString("coords"),
+ args.fTexSamplers[0],
+ args.fInputColor);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& fp) override {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ fp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ GrTextureProxy* proxy = dstdfp.textureSampler(0).proxy();
+ GrTexture* texture = proxy->peekTexture();
+
+ fGLDomain.setData(pdman, dstdfp.fTextureDomain, proxy,
+ dstdfp.textureSampler(0).samplerState());
+ float iw = 1.f / texture->width();
+ float ih = 1.f / texture->height();
+ float scaleAndTransData[4] = {
+ iw, ih,
+ -dstdfp.fDeviceSpaceOffset.fX * iw, -dstdfp.fDeviceSpaceOffset.fY * ih
+ };
+ if (proxy->origin() == kBottomLeft_GrSurfaceOrigin) {
+ scaleAndTransData[1] = -scaleAndTransData[1];
+ scaleAndTransData[3] = 1 - scaleAndTransData[3];
+ }
+ pdman.set4fv(fScaleAndTranslateUni, 1, scaleAndTransData);
+ }
+
+ private:
+ GrTextureDomain::GLDomain fGLDomain;
+ UniformHandle fScaleAndTranslateUni;
+ };
+
+ return new GLSLProcessor;
+}
+
+bool GrDeviceSpaceTextureDecalFragmentProcessor::onIsEqual(const GrFragmentProcessor& fp) const {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ fp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ return dstdfp.fTextureSampler.proxy()->underlyingUniqueID() ==
+ fTextureSampler.proxy()->underlyingUniqueID() &&
+ dstdfp.fDeviceSpaceOffset == fDeviceSpaceOffset &&
+ dstdfp.fTextureDomain == fTextureDomain;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDeviceSpaceTextureDecalFragmentProcessor);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrDeviceSpaceTextureDecalFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(texIdx);
+ SkIRect subset;
+ subset.fLeft = d->fRandom->nextULessThan(proxy->width() - 1);
+ subset.fRight = d->fRandom->nextRangeU(subset.fLeft, proxy->width());
+ subset.fTop = d->fRandom->nextULessThan(proxy->height() - 1);
+ subset.fBottom = d->fRandom->nextRangeU(subset.fTop, proxy->height());
+ SkIPoint pt;
+ pt.fX = d->fRandom->nextULessThan(2048);
+ pt.fY = d->fRandom->nextULessThan(2048);
+ return GrDeviceSpaceTextureDecalFragmentProcessor::Make(std::move(proxy), subset, pt);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h
new file mode 100644
index 0000000000..96c2d2019b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureDomainEffect_DEFINED
+#define GrTextureDomainEffect_DEFINED
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+class GrGLProgramBuilder;
+class GrGLSLShaderBuilder;
+class GrInvariantOutput;
+class GrGLSLUniformHandler;
+struct SkRect;
+
+/**
+ * Limits a texture's lookup coordinates to a domain. Samples outside the domain are either clamped
+ * the edge of the domain or result in a half4 of zeros (decal mode). The domain is clipped to
+ * normalized texture coords ([0,1]x[0,1] square). Bilinear filtering can cause texels outside the
+ * domain to affect the read value unless the caller considers this when calculating the domain.
+ */
+class GrTextureDomain {
+public:
+ enum Mode {
+ // Ignore the texture domain rectangle.
+ kIgnore_Mode,
+ // Clamp texture coords to the domain rectangle.
+ kClamp_Mode,
+ // Treat the area outside the domain rectangle as fully transparent.
+ kDecal_Mode,
+ // Wrap texture coordinates. NOTE: filtering may not work as expected because Bilerp will
+ // read texels outside of the domain. We could perform additional texture reads and filter
+ // in the shader, but are not currently doing this for performance reasons
+ kRepeat_Mode,
+
+ kLastMode = kRepeat_Mode
+ };
+ static const int kModeCount = kLastMode + 1;
+
+ static const GrTextureDomain& IgnoredDomain() {
+ static const GrTextureDomain gDomain((GrTextureProxy*)nullptr,
+ SkRect::MakeEmpty(), kIgnore_Mode, kIgnore_Mode);
+ return gDomain;
+ }
+
+ /**
+ * @param index Pass a value >= 0 if using multiple texture domains in the same effect.
+ * It is used to keep inserted variables from causing name collisions.
+ */
+ GrTextureDomain(GrTextureProxy*, const SkRect& domain, Mode modeX, Mode modeY, int index = -1);
+
+ GrTextureDomain(const GrTextureDomain&) = default;
+
+ const SkRect& domain() const { return fDomain; }
+ Mode modeX() const { return fModeX; }
+ Mode modeY() const { return fModeY; }
+
+ /*
+ * Computes a domain that bounds all the texels in texelRect, possibly insetting by half a pixel
+ * depending on the mode. The mode is used for both axes.
+ */
+ static const SkRect MakeTexelDomain(const SkIRect& texelRect, Mode mode) {
+ return MakeTexelDomain(texelRect, mode, mode);
+ }
+
+ static const SkRect MakeTexelDomain(const SkIRect& texelRect, Mode modeX, Mode modeY) {
+ // For Clamp and decal modes, inset by half a texel
+ SkScalar insetX = ((modeX == kClamp_Mode || modeX == kDecal_Mode) && texelRect.width() > 0)
+ ? SK_ScalarHalf : 0;
+ SkScalar insetY = ((modeY == kClamp_Mode || modeY == kDecal_Mode) && texelRect.height() > 0)
+ ? SK_ScalarHalf : 0;
+ return SkRect::MakeLTRB(texelRect.fLeft + insetX, texelRect.fTop + insetY,
+ texelRect.fRight - insetX, texelRect.fBottom - insetY);
+ }
+
+ // Convenience to determine if any axis of a texture uses an explicit decal mode or the hardware
+ // clamp to border decal mode.
+ static bool IsDecalSampled(GrSamplerState::WrapMode wrapX, GrSamplerState::WrapMode wrapY,
+ Mode modeX, Mode modeY) {
+ return wrapX == GrSamplerState::WrapMode::kClampToBorder ||
+ wrapY == GrSamplerState::WrapMode::kClampToBorder ||
+ modeX == kDecal_Mode ||
+ modeY == kDecal_Mode;
+ }
+
+ static bool IsDecalSampled(const GrSamplerState::WrapMode wraps[2], Mode modeX, Mode modeY) {
+ return IsDecalSampled(wraps[0], wraps[1], modeX, modeY);
+ }
+
+ static bool IsDecalSampled(const GrSamplerState& sampler, Mode modeX, Mode modeY) {
+ return IsDecalSampled(sampler.wrapModeX(), sampler.wrapModeY(), modeX, modeY);
+ }
+
+ bool operator==(const GrTextureDomain& that) const {
+ return fModeX == that.fModeX && fModeY == that.fModeY &&
+ (kIgnore_Mode == fModeX || (fDomain.fLeft == that.fDomain.fLeft &&
+ fDomain.fRight == that.fDomain.fRight)) &&
+ (kIgnore_Mode == fModeY || (fDomain.fTop == that.fDomain.fTop &&
+ fDomain.fBottom == that.fDomain.fBottom));
+ }
+
+ /**
+ * A GrGLSLFragmentProcessor subclass that corresponds to a GrProcessor subclass that uses
+ * GrTextureDomain should include this helper. It generates the texture domain GLSL, produces
+ * the part of the effect key that reflects the texture domain code, and performs the uniform
+ * uploads necessary for texture domains.
+ */
+ class GLDomain {
+ public:
+ GLDomain() {
+ for (int i = 0; i < kPrevDomainCount; i++) {
+ fPrevDomain[i] = SK_FloatNaN;
+ }
+ }
+
+ /**
+ * Call this from GrGLSLFragmentProcessor::emitCode() to sample the texture W.R.T. the
+ * domain and mode.
+ *
+ * @param outcolor name of half4 variable to hold the sampled color.
+ * @param inCoords name of float2 variable containing the coords to be used with the domain.
+ * It is assumed that this is a variable and not an expression.
+ * @param inModulateColor if non-nullptr the sampled color will be modulated with this
+ * expression before being written to outColor.
+ */
+ void sampleTexture(GrGLSLShaderBuilder* builder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderCaps* shaderCaps,
+ const GrTextureDomain& textureDomain,
+ const char* outColor,
+ const SkString& inCoords,
+ GrGLSLFragmentProcessor::SamplerHandle sampler,
+ const char* inModulateColor = nullptr);
+
+ /**
+ * Call this from GrGLSLFragmentProcessor::setData() to upload uniforms necessary for the
+ * texture domain. The rectangle is automatically adjusted to account for the texture's
+ * origin.
+ */
+ void setData(const GrGLSLProgramDataManager&, const GrTextureDomain&, GrTextureProxy*,
+ const GrSamplerState& sampler);
+
+ enum {
+ kModeBits = 2, // See DomainKey().
+ kDomainKeyBits = 4
+ };
+
+ /**
+ * GrGLSLFragmentProcessor::GenKey() must call this and include the returned value in it's
+ * computed key. The returned will be limited to the lower kDomainKeyBits bits.
+ */
+ static uint32_t DomainKey(const GrTextureDomain& domain) {
+ GR_STATIC_ASSERT(kModeCount <= (1 << kModeBits));
+ return domain.modeX() | (domain.modeY() << kModeBits);
+ }
+
+ private:
+ static const int kPrevDomainCount = 4;
+ SkDEBUGCODE(Mode fModeX;)
+ SkDEBUGCODE(Mode fModeY;)
+ SkDEBUGCODE(bool fHasMode = false;)
+ GrGLSLProgramDataManager::UniformHandle fDomainUni;
+ SkString fDomainName;
+
+ // Only initialized if the domain has at least one decal axis
+ GrGLSLProgramDataManager::UniformHandle fDecalUni;
+ SkString fDecalName;
+
+ float fPrevDomain[kPrevDomainCount];
+ };
+
+protected:
+ Mode fModeX;
+ Mode fModeY;
+ SkRect fDomain;
+ int fIndex;
+};
+
+/**
+ * A basic texture effect that uses GrTextureDomain.
+ */
+class GrTextureDomainEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ const SkMatrix&,
+ const SkRect& domain,
+ GrTextureDomain::Mode mode,
+ GrSamplerState::Filter filterMode);
+
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ const SkMatrix&,
+ const SkRect& domain,
+ GrTextureDomain::Mode modeX,
+ GrTextureDomain::Mode modeY,
+ const GrSamplerState& sampler);
+
+ const char* name() const override { return "TextureDomain"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTextureDomainEffect(*this));
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Domain: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
+ fTextureDomain.domain().fLeft, fTextureDomain.domain().fTop,
+ fTextureDomain.domain().fRight, fTextureDomain.domain().fBottom);
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+#endif
+
+private:
+ GrCoordTransform fCoordTransform;
+ GrTextureDomain fTextureDomain;
+ TextureSampler fTextureSampler;
+
+ GrTextureDomainEffect(sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ const SkMatrix&,
+ const SkRect& domain,
+ GrTextureDomain::Mode modeX,
+ GrTextureDomain::Mode modeY,
+ const GrSamplerState&);
+
+ explicit GrTextureDomainEffect(const GrTextureDomainEffect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GrDeviceSpaceTextureDecalFragmentProcessor : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy>,
+ const SkIRect& subset,
+ const SkIPoint& deviceSpaceOffset);
+
+ const char* name() const override { return "GrDeviceSpaceTextureDecalFragmentProcessor"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Domain: [L: %.2f, T: %.2f, R: %.2f, B: %.2f] Offset: [%d %d]",
+ fTextureDomain.domain().fLeft, fTextureDomain.domain().fTop,
+ fTextureDomain.domain().fRight, fTextureDomain.domain().fBottom,
+ fDeviceSpaceOffset.fX, fDeviceSpaceOffset.fY);
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+#endif
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ TextureSampler fTextureSampler;
+ GrTextureDomain fTextureDomain;
+ SkIPoint fDeviceSpaceOffset;
+
+ GrDeviceSpaceTextureDecalFragmentProcessor(sk_sp<GrTextureProxy>,
+ const SkIRect&, const SkIPoint&);
+ GrDeviceSpaceTextureDecalFragmentProcessor(const GrDeviceSpaceTextureDecalFragmentProcessor&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ // Since we always use decal mode, there is no need for key data.
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor& fp) const override;
+
+ const TextureSampler& onTextureSampler(int) const override { return fTextureSampler; }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp
new file mode 100644
index 0000000000..d21b4987a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+
+#include "src/core/SkXfermodePriv.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/glsl/GrGLSLBlend.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+
+// Some of the cpu implementations of blend modes differ too much from the GPU enough that
+// we can't use the cpu implementation to implement constantOutputForConstantInput.
+static inline bool does_cpu_blend_impl_match_gpu(SkBlendMode mode) {
+ // The non-seperable modes differ too much. So does SoftLight. ColorBurn differs too much on our
+ // test iOS device (but we just disable it across the aboard since it may happen on untested
+ // GPUs).
+ return mode <= SkBlendMode::kLastSeparableMode && mode != SkBlendMode::kSoftLight &&
+ mode != SkBlendMode::kColorBurn;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class ComposeTwoFragmentProcessor : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> src,
+ std::unique_ptr<GrFragmentProcessor> dst,
+ SkBlendMode mode) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeTwoFragmentProcessor(std::move(src), std::move(dst), mode));
+ }
+
+ const char* name() const override { return "ComposeTwo"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+
+ str.appendf("Mode: %s", SkBlendMode_Name(fMode));
+
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ str.appendf(" [%s %s]",
+ this->childProcessor(i).name(), this->childProcessor(i).dumpInfo().c_str());
+ }
+ return str;
+ }
+#endif
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ SkBlendMode getMode() const { return fMode; }
+
+private:
+ ComposeTwoFragmentProcessor(std::unique_ptr<GrFragmentProcessor> src,
+ std::unique_ptr<GrFragmentProcessor> dst,
+ SkBlendMode mode)
+ : INHERITED(kComposeTwoFragmentProcessor_ClassID, OptFlags(src.get(), dst.get(), mode))
+ , fMode(mode) {
+ SkDEBUGCODE(int shaderAChildIndex = )this->registerChildProcessor(std::move(src));
+ SkDEBUGCODE(int shaderBChildIndex = )this->registerChildProcessor(std::move(dst));
+ SkASSERT(0 == shaderAChildIndex);
+ SkASSERT(1 == shaderBChildIndex);
+ }
+
+ static OptimizationFlags OptFlags(const GrFragmentProcessor* src,
+ const GrFragmentProcessor* dst, SkBlendMode mode) {
+ OptimizationFlags flags;
+ switch (mode) {
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kDst:
+ SK_ABORT("Should never create clear, src, or dst compose two FP.");
+ flags = kNone_OptimizationFlags;
+ break;
+
+ // Produces opaque if both src and dst are opaque.
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kModulate:
+ flags = src->preservesOpaqueInput() && dst->preservesOpaqueInput()
+ ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+
+ // Produces zero when both are opaque, indeterminate if one is opaque.
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kXor:
+ flags = kNone_OptimizationFlags;
+ break;
+
+ // Is opaque if the dst is opaque.
+ case SkBlendMode::kSrcATop:
+ flags = dst->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+
+ // DstATop is the converse of kSrcATop. Screen is also opaque if the src is a opaque.
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kScreen:
+ flags = src->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+
+ // These modes are all opaque if either src or dst is opaque. All the advanced modes
+ // compute alpha as src-over.
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kPlus:
+ case SkBlendMode::kOverlay:
+ case SkBlendMode::kDarken:
+ case SkBlendMode::kLighten:
+ case SkBlendMode::kColorDodge:
+ case SkBlendMode::kColorBurn:
+ case SkBlendMode::kHardLight:
+ case SkBlendMode::kSoftLight:
+ case SkBlendMode::kDifference:
+ case SkBlendMode::kExclusion:
+ case SkBlendMode::kMultiply:
+ case SkBlendMode::kHue:
+ case SkBlendMode::kSaturation:
+ case SkBlendMode::kColor:
+ case SkBlendMode::kLuminosity:
+ flags = src->preservesOpaqueInput() || dst->preservesOpaqueInput()
+ ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+ }
+ if (does_cpu_blend_impl_match_gpu(mode) && src->hasConstantOutputForConstantInput() &&
+ dst->hasConstantOutputForConstantInput()) {
+ flags |= kConstantOutputForConstantInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ b->add32((int)fMode);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const ComposeTwoFragmentProcessor& cs = other.cast<ComposeTwoFragmentProcessor>();
+ return fMode == cs.fMode;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ SkPMColor4f opaqueInput = { input.fR, input.fG, input.fB, 1 };
+ SkPMColor4f src = ConstantOutputForConstantInput(this->childProcessor(0), opaqueInput);
+ SkPMColor4f dst = ConstantOutputForConstantInput(this->childProcessor(1), opaqueInput);
+ SkPMColor4f res = SkBlendMode_Apply(fMode, src, dst);
+ return res * input.fA;
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkBlendMode fMode;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GLComposeTwoFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ComposeTwoFragmentProcessor);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> ComposeTwoFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ // Create two random frag procs.
+ std::unique_ptr<GrFragmentProcessor> fpA(GrProcessorUnitTest::MakeChildFP(d));
+ std::unique_ptr<GrFragmentProcessor> fpB(GrProcessorUnitTest::MakeChildFP(d));
+
+ SkBlendMode mode;
+ do {
+ mode = static_cast<SkBlendMode>(d->fRandom->nextRangeU(0, (int)SkBlendMode::kLastMode));
+ } while (SkBlendMode::kClear == mode || SkBlendMode::kSrc == mode || SkBlendMode::kDst == mode);
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeTwoFragmentProcessor(std::move(fpA), std::move(fpB), mode));
+}
+#endif
+
+std::unique_ptr<GrFragmentProcessor> ComposeTwoFragmentProcessor::clone() const {
+ auto src = this->childProcessor(0).clone();
+ auto dst = this->childProcessor(1).clone();
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeTwoFragmentProcessor(std::move(src), std::move(dst), fMode));
+}
+
+GrGLSLFragmentProcessor* ComposeTwoFragmentProcessor::onCreateGLSLInstance() const{
+ return new GLComposeTwoFragmentProcessor;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GLComposeTwoFragmentProcessor::emitCode(EmitArgs& args) {
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const ComposeTwoFragmentProcessor& cs = args.fFp.cast<ComposeTwoFragmentProcessor>();
+
+ const char* inputColor = nullptr;
+ if (args.fInputColor) {
+ inputColor = "inputColor";
+ fragBuilder->codeAppendf("half4 inputColor = half4(%s.rgb, 1.0);", args.fInputColor);
+ }
+
+ // declare outputColor and emit the code for each of the two children
+ SkString srcColor("xfer_src");
+ this->invokeChild(0, inputColor, &srcColor, args);
+
+ SkString dstColor("xfer_dst");
+ this->invokeChild(1, inputColor, &dstColor, args);
+
+ // emit blend code
+ SkBlendMode mode = cs.getMode();
+ fragBuilder->codeAppendf("// Compose Xfer Mode: %s\n", SkBlendMode_Name(mode));
+ GrGLSLBlend::AppendMode(fragBuilder,
+ srcColor.c_str(),
+ dstColor.c_str(),
+ args.fOutputColor,
+ mode);
+
+ // re-multiply the output color by the input color's alpha
+ if (args.fInputColor) {
+ fragBuilder->codeAppendf("%s *= %s.a;", args.fOutputColor, args.fInputColor);
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromTwoProcessors(
+ std::unique_ptr<GrFragmentProcessor> src,
+ std::unique_ptr<GrFragmentProcessor> dst,
+ SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kClear:
+ return GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ case SkBlendMode::kSrc:
+ return src;
+ case SkBlendMode::kDst:
+ return dst;
+ default:
+ return ComposeTwoFragmentProcessor::Make(std::move(src), std::move(dst), mode);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class ComposeOneFragmentProcessor : public GrFragmentProcessor {
+public:
+ enum Child {
+ kDst_Child,
+ kSrc_Child,
+ };
+
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp,
+ SkBlendMode mode, Child child) {
+ if (!fp) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeOneFragmentProcessor(std::move(fp), mode, child));
+ }
+
+ const char* name() const override { return "ComposeOne"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+
+ str.appendf("Mode: %s, Child: %s",
+ SkBlendMode_Name(fMode), kDst_Child == fChild ? "Dst" : "Src");
+
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ str.appendf(" [%s %s]",
+ this->childProcessor(i).name(), this->childProcessor(i).dumpInfo().c_str());
+ }
+ return str;
+ }
+#endif
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+ SkBlendMode mode() const { return fMode; }
+
+ Child child() const { return fChild; }
+
+private:
+ OptimizationFlags OptFlags(const GrFragmentProcessor* fp, SkBlendMode mode, Child child) {
+ OptimizationFlags flags;
+ switch (mode) {
+ case SkBlendMode::kClear:
+ SK_ABORT("Should never create clear compose one FP.");
+ flags = kNone_OptimizationFlags;
+ break;
+
+ case SkBlendMode::kSrc:
+ SkASSERT(child == kSrc_Child);
+ flags = fp->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+
+ case SkBlendMode::kDst:
+ SkASSERT(child == kDst_Child);
+ flags = fp->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ break;
+
+ // Produces opaque if both src and dst are opaque. These also will modulate the child's
+ // output by either the input color or alpha. However, if the child is not compatible
+ // with the coverage as alpha then it may produce a color that is not valid premul.
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kModulate:
+ flags = ProcessorOptimizationFlags(fp) &
+ ~kConstantOutputForConstantInput_OptimizationFlag;
+ break;
+
+ // Produces zero when both are opaque, indeterminate if one is opaque.
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kXor:
+ flags = kNone_OptimizationFlags;
+ break;
+
+ // Is opaque if the dst is opaque.
+ case SkBlendMode::kSrcATop:
+ if (child == kDst_Child) {
+ flags = fp->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ } else {
+ flags = kPreservesOpaqueInput_OptimizationFlag;
+ }
+ break;
+
+ // DstATop is the converse of kSrcATop. Screen is also opaque if the src is a opaque.
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kScreen:
+ if (child == kSrc_Child) {
+ flags = fp->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags;
+ } else {
+ flags = kPreservesOpaqueInput_OptimizationFlag;
+ }
+ break;
+
+ // These modes are all opaque if either src or dst is opaque. All the advanced modes
+ // compute alpha as src-over.
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kPlus:
+ case SkBlendMode::kOverlay:
+ case SkBlendMode::kDarken:
+ case SkBlendMode::kLighten:
+ case SkBlendMode::kColorDodge:
+ case SkBlendMode::kColorBurn:
+ case SkBlendMode::kHardLight:
+ case SkBlendMode::kSoftLight:
+ case SkBlendMode::kDifference:
+ case SkBlendMode::kExclusion:
+ case SkBlendMode::kMultiply:
+ case SkBlendMode::kHue:
+ case SkBlendMode::kSaturation:
+ case SkBlendMode::kColor:
+ case SkBlendMode::kLuminosity:
+ flags = kPreservesOpaqueInput_OptimizationFlag;
+ break;
+ }
+ if (does_cpu_blend_impl_match_gpu(mode) && fp->hasConstantOutputForConstantInput()) {
+ flags |= kConstantOutputForConstantInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GR_STATIC_ASSERT(((int)SkBlendMode::kLastMode & UINT16_MAX) == (int)SkBlendMode::kLastMode);
+ b->add32((int)fMode | (fChild << 16));
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& that) const override {
+ return fMode == that.cast<ComposeOneFragmentProcessor>().fMode;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& inputColor) const override {
+ SkPMColor4f childColor = ConstantOutputForConstantInput(this->childProcessor(0),
+ SK_PMColor4fWHITE);
+ SkPMColor4f src, dst;
+ if (kSrc_Child == fChild) {
+ src = childColor;
+ dst = inputColor;
+ } else {
+ src = inputColor;
+ dst = childColor;
+ }
+ return SkBlendMode_Apply(fMode, src, dst);
+ }
+
+private:
+ ComposeOneFragmentProcessor(std::unique_ptr<GrFragmentProcessor> fp, SkBlendMode mode,
+ Child child)
+ : INHERITED(kComposeOneFragmentProcessor_ClassID, OptFlags(fp.get(), mode, child))
+ , fMode(mode)
+ , fChild(child) {
+ SkDEBUGCODE(int dstIndex =) this->registerChildProcessor(std::move(fp));
+ SkASSERT(0 == dstIndex);
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkBlendMode fMode;
+ Child fChild;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLComposeOneFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkBlendMode mode = args.fFp.cast<ComposeOneFragmentProcessor>().mode();
+ ComposeOneFragmentProcessor::Child child =
+ args.fFp.cast<ComposeOneFragmentProcessor>().child();
+ SkString childColor("child");
+ this->invokeChild(0, &childColor, args);
+
+ // emit blend code
+ fragBuilder->codeAppendf("// Compose Xfer Mode: %s\n", SkBlendMode_Name(mode));
+ const char* childStr = childColor.c_str();
+ if (ComposeOneFragmentProcessor::kDst_Child == child) {
+ GrGLSLBlend::AppendMode(fragBuilder, args.fInputColor, childStr, args.fOutputColor, mode);
+ } else {
+ GrGLSLBlend::AppendMode(fragBuilder, childStr, args.fInputColor, args.fOutputColor, mode);
+ }
+ }
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ComposeOneFragmentProcessor);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> ComposeOneFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ // Create one random frag procs.
+ // For now, we'll prevent either children from being a shader with children to prevent the
+ // possibility of an arbitrarily large tree of procs.
+ std::unique_ptr<GrFragmentProcessor> dst(GrProcessorUnitTest::MakeChildFP(d));
+ SkBlendMode mode;
+ ComposeOneFragmentProcessor::Child child;
+ do {
+ mode = static_cast<SkBlendMode>(d->fRandom->nextRangeU(0, (int)SkBlendMode::kLastMode));
+ child = d->fRandom->nextBool() ? kDst_Child : kSrc_Child;
+ } while (SkBlendMode::kClear == mode || (SkBlendMode::kDst == mode && child == kSrc_Child) ||
+ (SkBlendMode::kSrc == mode && child == kDst_Child));
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeOneFragmentProcessor(std::move(dst), mode, child));
+}
+#endif
+
+GrGLSLFragmentProcessor* ComposeOneFragmentProcessor::onCreateGLSLInstance() const {
+ return new GLComposeOneFragmentProcessor;
+}
+
+std::unique_ptr<GrFragmentProcessor> ComposeOneFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new ComposeOneFragmentProcessor(this->childProcessor(0).clone(), fMode, fChild));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+// It may seems as though when the input FP is the dst and the mode is kDst (or same for src/kSrc)
+// that these factories could simply return the input FP. However, that doesn't have quite
+// the same effect as the returned compose FP will replace the FP's input with solid white and
+// ignore the original input. This could be implemented as:
+// RunInSeries(ConstColor(WHITE, kIgnoreInput), inputFP).
+
+std::unique_ptr<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromDstProcessor(
+ std::unique_ptr<GrFragmentProcessor> dst, SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kClear:
+ return GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ case SkBlendMode::kSrc:
+ return nullptr;
+ default:
+ return ComposeOneFragmentProcessor::Make(std::move(dst), mode,
+ ComposeOneFragmentProcessor::kDst_Child);
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromSrcProcessor(
+ std::unique_ptr<GrFragmentProcessor> src, SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kClear:
+ return GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ case SkBlendMode::kDst:
+ return nullptr;
+ default:
+ return ComposeOneFragmentProcessor::Make(std::move(src), mode,
+ ComposeOneFragmentProcessor::kSrc_Child);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.h
new file mode 100644
index 0000000000..0100339154
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrXfermodeFragmentProcessor_DEFINED
+#define GrXfermodeFragmentProcessor_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkRefCnt.h"
+
+class GrFragmentProcessor;
+
+namespace GrXfermodeFragmentProcessor {
+
+/** The color input to the returned processor is treated as the src and the passed in processor is
+ the dst. */
+std::unique_ptr<GrFragmentProcessor> MakeFromDstProcessor(std::unique_ptr<GrFragmentProcessor> dst,
+ SkBlendMode mode);
+
+/** The color input to the returned processor is treated as the dst and the passed in processor is
+ the src. */
+std::unique_ptr<GrFragmentProcessor> MakeFromSrcProcessor(std::unique_ptr<GrFragmentProcessor> src,
+ SkBlendMode mode);
+
+/** Takes the input color, which is assumed to be unpremultiplied, passes it as an opaque color
+ to both src and dst. The outputs of a src and dst are blended using mode and the original
+ input's alpha is applied to the blended color to produce a premul output. */
+std::unique_ptr<GrFragmentProcessor> MakeFromTwoProcessors(std::unique_ptr<GrFragmentProcessor> src,
+ std::unique_ptr<GrFragmentProcessor> dst,
+ SkBlendMode mode);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.cpp
new file mode 100644
index 0000000000..a82aec7076
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.cpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/effects/GrYUVtoRGBEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+
+static const float kJPEGConversionMatrix[16] = {
+ 1.0f, 0.0f, 1.402f, -0.703749f,
+ 1.0f, -0.344136f, -0.714136f, 0.531211f,
+ 1.0f, 1.772f, 0.0f, -0.889475f,
+ 0.0f, 0.0f, 0.0f, 1.0
+};
+
+static const float kRec601ConversionMatrix[16] = {
+ 1.164f, 0.0f, 1.596f, -0.87075f,
+ 1.164f, -0.391f, -0.813f, 0.52925f,
+ 1.164f, 2.018f, 0.0f, -1.08175f,
+ 0.0f, 0.0f, 0.0f, 1.0
+};
+
+static const float kRec709ConversionMatrix[16] = {
+ 1.164f, 0.0f, 1.793f, -0.96925f,
+ 1.164f, -0.213f, -0.533f, 0.30025f,
+ 1.164f, 2.112f, 0.0f, -1.12875f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+std::unique_ptr<GrFragmentProcessor> GrYUVtoRGBEffect::Make(const sk_sp<GrTextureProxy> proxies[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkYUVColorSpace yuvColorSpace,
+ GrSamplerState::Filter filterMode,
+ const SkMatrix& localMatrix,
+ const SkRect* domain) {
+ int numPlanes;
+ SkAssertResult(SkYUVAIndex::AreValidIndices(yuvaIndices, &numPlanes));
+
+ const SkISize YSize = proxies[yuvaIndices[SkYUVAIndex::kY_Index].fIndex]->isize();
+
+ GrSamplerState::Filter minimizeFilterMode = GrSamplerState::Filter::kMipMap == filterMode ?
+ GrSamplerState::Filter::kMipMap :
+ GrSamplerState::Filter::kBilerp;
+
+ GrSamplerState::Filter filterModes[4];
+ SkSize scales[4];
+ for (int i = 0; i < numPlanes; ++i) {
+ SkISize size = proxies[i]->isize();
+ scales[i] = SkSize::Make(SkIntToScalar(size.width()) / SkIntToScalar(YSize.width()),
+ SkIntToScalar(size.height()) / SkIntToScalar(YSize.height()));
+ filterModes[i] = (size == YSize) ? filterMode : minimizeFilterMode;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrYUVtoRGBEffect(
+ proxies, scales, filterModes, numPlanes, yuvaIndices, yuvColorSpace, localMatrix,
+ domain));
+}
+
+#ifdef SK_DEBUG
+SkString GrYUVtoRGBEffect::dumpInfo() const {
+ SkString str;
+ for (int i = 0; i < this->numTextureSamplers(); ++i) {
+ str.appendf("%d: %d %d ", i,
+ this->textureSampler(i).proxy()->uniqueID().asUInt(),
+ this->textureSampler(i).proxy()->underlyingUniqueID().asUInt());
+ }
+ str.appendf("\n");
+
+ return str;
+}
+#endif
+
+GrGLSLFragmentProcessor* GrYUVtoRGBEffect::onCreateGLSLInstance() const {
+ class GrGLSLYUVtoRGBEffect : public GrGLSLFragmentProcessor {
+ public:
+ GrGLSLYUVtoRGBEffect() {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrYUVtoRGBEffect& _outer = args.fFp.cast<GrYUVtoRGBEffect>();
+ (void)_outer;
+
+ if (kIdentity_SkYUVColorSpace != _outer.yuvColorSpace()) {
+ fColorSpaceMatrixVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4x4_GrSLType,
+ "colorSpaceMatrix");
+ }
+
+ int numSamplers = args.fTexSamplers.count();
+
+ SkString coords[4];
+ for (int i = 0; i < numSamplers; ++i) {
+ coords[i] = fragBuilder->ensureCoords2D(args.fTransformedCoords[i].fVaryingPoint);
+ }
+
+ for (int i = 0; i < numSamplers; ++i) {
+ SkString sampleVar;
+ sampleVar.printf("tmp%d", i);
+ fragBuilder->codeAppendf("half4 %s;", sampleVar.c_str());
+ fGLDomains[i].sampleTexture(fragBuilder, args.fUniformHandler, args.fShaderCaps,
+ _outer.fDomains[i], sampleVar.c_str(), coords[i], args.fTexSamplers[i]);
+ }
+
+ static const char kChannelToChar[4] = { 'x', 'y', 'z', 'w' };
+
+ fragBuilder->codeAppendf(
+ "half4 yuvOne = half4(tmp%d.%c, tmp%d.%c, tmp%d.%c, 1.0);",
+ _outer.yuvaIndex(0).fIndex, kChannelToChar[(int)_outer.yuvaIndex(0).fChannel],
+ _outer.yuvaIndex(1).fIndex, kChannelToChar[(int)_outer.yuvaIndex(1).fChannel],
+ _outer.yuvaIndex(2).fIndex, kChannelToChar[(int)_outer.yuvaIndex(2).fChannel]);
+
+ if (kIdentity_SkYUVColorSpace != _outer.yuvColorSpace()) {
+ SkASSERT(fColorSpaceMatrixVar.isValid());
+ fragBuilder->codeAppendf(
+ "yuvOne *= %s;", args.fUniformHandler->getUniformCStr(fColorSpaceMatrixVar));
+ }
+
+ if (_outer.yuvaIndex(3).fIndex >= 0) {
+ fragBuilder->codeAppendf(
+ "half a = tmp%d.%c;", _outer.yuvaIndex(3).fIndex,
+ kChannelToChar[(int)_outer.yuvaIndex(3).fChannel]);
+ // premultiply alpha
+ fragBuilder->codeAppend("yuvOne *= a;");
+ } else {
+ fragBuilder->codeAppend("half a = 1.0;");
+ }
+
+ fragBuilder->codeAppendf("%s = half4(yuvOne.xyz, a);", args.fOutputColor);
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrYUVtoRGBEffect& _outer = _proc.cast<GrYUVtoRGBEffect>();
+
+ switch (_outer.yuvColorSpace()) {
+ case kJPEG_SkYUVColorSpace:
+ SkASSERT(fColorSpaceMatrixVar.isValid());
+ pdman.setMatrix4f(fColorSpaceMatrixVar, kJPEGConversionMatrix);
+ break;
+ case kRec601_SkYUVColorSpace:
+ SkASSERT(fColorSpaceMatrixVar.isValid());
+ pdman.setMatrix4f(fColorSpaceMatrixVar, kRec601ConversionMatrix);
+ break;
+ case kRec709_SkYUVColorSpace:
+ SkASSERT(fColorSpaceMatrixVar.isValid());
+ pdman.setMatrix4f(fColorSpaceMatrixVar, kRec709ConversionMatrix);
+ break;
+ case kIdentity_SkYUVColorSpace:
+ break;
+ }
+
+ int numSamplers = _outer.numTextureSamplers();
+ for (int i = 0; i < numSamplers; ++i) {
+ fGLDomains[i].setData(pdman, _outer.fDomains[i],
+ _outer.textureSampler(i).proxy(), _outer.textureSampler(i).samplerState());
+ }
+ }
+
+ UniformHandle fColorSpaceMatrixVar;
+ GrTextureDomain::GLDomain fGLDomains[4];
+ };
+
+ return new GrGLSLYUVtoRGBEffect;
+}
+void GrYUVtoRGBEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ using Domain = GrTextureDomain::GLDomain;
+
+ b->add32(this->numTextureSamplers());
+
+ uint32_t packed = 0;
+ uint32_t domain = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (this->yuvaIndex(i).fIndex < 0) {
+ continue;
+ }
+
+ uint8_t index = this->yuvaIndex(i).fIndex;
+ uint8_t chann = (uint8_t) this->yuvaIndex(i).fChannel;
+
+ SkASSERT(index < 4 && chann < 4);
+
+ packed |= (index | (chann << 2)) << (i * 4);
+
+ domain |= Domain::DomainKey(fDomains[i]) << (i * Domain::kDomainKeyBits);
+ }
+ if (kIdentity_SkYUVColorSpace == this->yuvColorSpace()) {
+ packed |= 0x1 << 16;
+ }
+
+ b->add32(packed);
+ b->add32(domain);
+}
+bool GrYUVtoRGBEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrYUVtoRGBEffect& that = other.cast<GrYUVtoRGBEffect>();
+
+ for (int i = 0; i < 4; ++i) {
+ if (fYUVAIndices[i] != that.fYUVAIndices[i]) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < this->numTextureSamplers(); ++i) {
+ // 'fSamplers' is checked by the base class
+ if (fSamplerTransforms[i] != that.fSamplerTransforms[i]) {
+ return false;
+ }
+ if (!(fDomains[i] == that.fDomains[i])) {
+ return false;
+ }
+ }
+
+ if (fYUVColorSpace != that.fYUVColorSpace) {
+ return false;
+ }
+
+ return true;
+}
+GrYUVtoRGBEffect::GrYUVtoRGBEffect(const GrYUVtoRGBEffect& src)
+ : INHERITED(kGrYUVtoRGBEffect_ClassID, src.optimizationFlags())
+ , fDomains{src.fDomains[0], src.fDomains[1], src.fDomains[2], src.fDomains[3]}
+ , fYUVColorSpace(src.fYUVColorSpace) {
+ int numPlanes = src.numTextureSamplers();
+ for (int i = 0; i < numPlanes; ++i) {
+ fSamplers[i].reset(sk_ref_sp(src.fSamplers[i].proxy()), src.fSamplers[i].samplerState());
+ fSamplerTransforms[i] = src.fSamplerTransforms[i];
+ fSamplerCoordTransforms[i] = src.fSamplerCoordTransforms[i];
+ }
+
+ this->setTextureSamplerCnt(numPlanes);
+ for (int i = 0; i < numPlanes; ++i) {
+ this->addCoordTransform(&fSamplerCoordTransforms[i]);
+ }
+
+ memcpy(fYUVAIndices, src.fYUVAIndices, sizeof(fYUVAIndices));
+}
+std::unique_ptr<GrFragmentProcessor> GrYUVtoRGBEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrYUVtoRGBEffect(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrYUVtoRGBEffect::onTextureSampler(int index) const {
+ SkASSERT(index < this->numTextureSamplers());
+ return fSamplers[index];
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.h b/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.h
new file mode 100644
index 0000000000..338ea14aa8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrYUVtoRGBEffect.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVtoRGBEffect_DEFINED
+#define GrYUVtoRGBEffect_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+
+#include "include/core/SkYUVAIndex.h"
+
+class GrYUVtoRGBEffect : public GrFragmentProcessor {
+public:
+ // The domain supported by this effect is more limited than the general GrTextureDomain due
+ // to the multi-planar, varying resolution images that it has to sample. If 'domain' is provided
+ // it is the Y plane's domain. This will automatically inset for bilinear filtering, and only
+ // the clamp wrap mode is supported.
+ static std::unique_ptr<GrFragmentProcessor> Make(const sk_sp<GrTextureProxy> proxies[],
+ const SkYUVAIndex indices[4],
+ SkYUVColorSpace yuvColorSpace,
+ GrSamplerState::Filter filterMode,
+ const SkMatrix& localMatrix = SkMatrix::I(),
+ const SkRect* domain = nullptr);
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override;
+#endif
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVColorSpace; }
+ const SkYUVAIndex& yuvaIndex(int i) const { return fYUVAIndices[i]; }
+
+ GrYUVtoRGBEffect(const GrYUVtoRGBEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "YUVtoRGBEffect"; }
+
+private:
+ GrYUVtoRGBEffect(const sk_sp<GrTextureProxy> proxies[], const SkSize scales[],
+ const GrSamplerState::Filter filterModes[], int numPlanes,
+ const SkYUVAIndex yuvaIndices[4], SkYUVColorSpace yuvColorSpace,
+ const SkMatrix& localMatrix, const SkRect* domain)
+ : INHERITED(kGrYUVtoRGBEffect_ClassID, kNone_OptimizationFlags)
+ , fDomains{GrTextureDomain::IgnoredDomain(), GrTextureDomain::IgnoredDomain(),
+ GrTextureDomain::IgnoredDomain(), GrTextureDomain::IgnoredDomain()}
+ , fYUVColorSpace(yuvColorSpace) {
+ for (int i = 0; i < numPlanes; ++i) {
+ SkMatrix planeMatrix = SkMatrix::MakeScale(scales[i].width(), scales[i].height());
+ if (domain) {
+ SkASSERT(filterModes[i] != GrSamplerState::Filter::kMipMap);
+
+ SkRect scaledDomain = planeMatrix.mapRect(*domain);
+ if (filterModes[i] != GrSamplerState::Filter::kNearest) {
+ // Inset by half a pixel for bilerp, after scaling to the size of the plane
+ scaledDomain.inset(0.5f, 0.5f);
+ }
+
+ fDomains[i] = GrTextureDomain(proxies[i].get(), scaledDomain,
+ GrTextureDomain::kClamp_Mode, GrTextureDomain::kClamp_Mode, i);
+ }
+
+ planeMatrix.preConcat(localMatrix);
+ fSamplers[i].reset(std::move(proxies[i]),
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, filterModes[i]));
+ fSamplerTransforms[i] = planeMatrix;
+ fSamplerCoordTransforms[i] =
+ GrCoordTransform(fSamplerTransforms[i], fSamplers[i].proxy());
+ }
+
+ this->setTextureSamplerCnt(numPlanes);
+ for (int i = 0; i < numPlanes; ++i) {
+ this->addCoordTransform(&fSamplerCoordTransforms[i]);
+ }
+
+ memcpy(fYUVAIndices, yuvaIndices, sizeof(fYUVAIndices));
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ TextureSampler fSamplers[4];
+ SkMatrix44 fSamplerTransforms[4];
+ GrCoordTransform fSamplerCoordTransforms[4];
+ GrTextureDomain fDomains[4];
+ SkYUVAIndex fYUVAIndices[4];
+ SkYUVColorSpace fYUVColorSpace;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.cpp
new file mode 100644
index 0000000000..6d5833e7cd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrAARectEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrAARectEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLAARectEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLAARectEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrAARectEffect& _outer = args.fFp.cast<GrAARectEffect>();
+ (void)_outer;
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto rect = _outer.rect;
+ (void)rect;
+ prevRect = float4(-1.0);
+ rectUniformVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat4_GrSLType, "rectUniform");
+ fragBuilder->codeAppendf(
+ "float4 prevRect = float4(%f, %f, %f, %f);\nhalf alpha;\n@switch (%d) {\n case "
+ "0:\n case 2:\n alpha = half(all(greaterThan(float4(sk_FragCoord.xy, "
+ "%s.zw), float4(%s.xy, sk_FragCoord.xy))) ? 1 : 0);\n break;\n "
+ "default:\n half xSub, ySub;\n xSub = min(half(sk_FragCoord.x - "
+ "%s.x), 0.0);\n xSub += min(half(%s.z - sk_FragCoord.x), 0.0);\n "
+ "ySub = min(half(sk_FragCoord.y - %s.y), 0.0);\n ySub += min(half(%s.w - "
+ "sk_FragCoord.y), 0.0);\n alpha = (1.0 + ",
+ prevRect.left(),
+ prevRect.top(),
+ prevRect.right(),
+ prevRect.bottom(),
+ (int)_outer.edgeType,
+ args.fUniformHandler->getUniformCStr(rectUniformVar),
+ args.fUniformHandler->getUniformCStr(rectUniformVar),
+ args.fUniformHandler->getUniformCStr(rectUniformVar),
+ args.fUniformHandler->getUniformCStr(rectUniformVar),
+ args.fUniformHandler->getUniformCStr(rectUniformVar),
+ args.fUniformHandler->getUniformCStr(rectUniformVar));
+ fragBuilder->codeAppendf(
+ "max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));\n}\n@if (%d == 2 || %d == 3) {\n "
+ "alpha = 1.0 - alpha;\n}\n%s = %s * alpha;\n",
+ (int)_outer.edgeType,
+ (int)_outer.edgeType,
+ args.fOutputColor,
+ args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrAARectEffect& _outer = _proc.cast<GrAARectEffect>();
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto rect = _outer.rect;
+ (void)rect;
+ UniformHandle& rectUniform = rectUniformVar;
+ (void)rectUniform;
+
+ const SkRect& newRect = GrProcessorEdgeTypeIsAA(edgeType) ? rect.makeInset(.5f, .5f) : rect;
+ if (newRect != prevRect) {
+ pdman.set4f(rectUniform, newRect.fLeft, newRect.fTop, newRect.fRight, newRect.fBottom);
+ prevRect = newRect;
+ }
+ }
+ SkRect prevRect = float4(0);
+ UniformHandle rectUniformVar;
+};
+GrGLSLFragmentProcessor* GrAARectEffect::onCreateGLSLInstance() const {
+ return new GrGLSLAARectEffect();
+}
+void GrAARectEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)edgeType);
+}
+bool GrAARectEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrAARectEffect& that = other.cast<GrAARectEffect>();
+ (void)that;
+ if (edgeType != that.edgeType) return false;
+ if (rect != that.rect) return false;
+ return true;
+}
+GrAARectEffect::GrAARectEffect(const GrAARectEffect& src)
+ : INHERITED(kGrAARectEffect_ClassID, src.optimizationFlags())
+ , edgeType(src.edgeType)
+ , rect(src.rect) {}
+std::unique_ptr<GrFragmentProcessor> GrAARectEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrAARectEffect(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrAARectEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrAARectEffect::TestCreate(GrProcessorTestData* d) {
+ SkRect rect = SkRect::MakeLTRB(d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1());
+ std::unique_ptr<GrFragmentProcessor> fp;
+ do {
+ GrClipEdgeType edgeType =
+ static_cast<GrClipEdgeType>(d->fRandom->nextULessThan(kGrClipEdgeTypeCnt));
+
+ fp = GrAARectEffect::Make(edgeType, rect);
+ } while (nullptr == fp);
+ return fp;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.h
new file mode 100644
index 0000000000..15d5fb7594
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrAARectEffect.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrAARectEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrAARectEffect_DEFINED
+#define GrAARectEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrAARectEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, SkRect rect) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrAARectEffect(edgeType, rect));
+ }
+ GrAARectEffect(const GrAARectEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "AARectEffect"; }
+ GrClipEdgeType edgeType;
+ SkRect rect;
+
+private:
+ GrAARectEffect(GrClipEdgeType edgeType, SkRect rect)
+ : INHERITED(kGrAARectEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , edgeType(edgeType)
+ , rect(rect) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.cpp
new file mode 100644
index 0000000000..506cedba5b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrAlphaThresholdFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrAlphaThresholdFragmentProcessor.h"
+
+inline GrFragmentProcessor::OptimizationFlags GrAlphaThresholdFragmentProcessor::optFlags(
+ float outerThreshold) {
+ if (outerThreshold >= 1.0) {
+ return kPreservesOpaqueInput_OptimizationFlag |
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ } else {
+ return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+}
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLAlphaThresholdFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLAlphaThresholdFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrAlphaThresholdFragmentProcessor& _outer =
+ args.fFp.cast<GrAlphaThresholdFragmentProcessor>();
+ (void)_outer;
+ auto innerThreshold = _outer.innerThreshold;
+ (void)innerThreshold;
+ auto outerThreshold = _outer.outerThreshold;
+ (void)outerThreshold;
+ innerThresholdVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "innerThreshold");
+ outerThresholdVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "outerThreshold");
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "half4 color = %s;\nhalf4 mask_color = sample(%s, %s).%s;\nif (mask_color.w < 0.5) "
+ "{\n if (color.w > %s) {\n half scale = %s / color.w;\n color.xyz "
+ "*= scale;\n color.w = %s;\n }\n} else if (color.w < %s) {\n half "
+ "scale = %s / max(0.0010000000474974513, color.w);\n color.xyz *= scale;\n "
+ "color.w = %s;\n}\n%s = color;\n",
+ args.fInputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ args.fUniformHandler->getUniformCStr(outerThresholdVar),
+ args.fUniformHandler->getUniformCStr(outerThresholdVar),
+ args.fUniformHandler->getUniformCStr(outerThresholdVar),
+ args.fUniformHandler->getUniformCStr(innerThresholdVar),
+ args.fUniformHandler->getUniformCStr(innerThresholdVar),
+ args.fUniformHandler->getUniformCStr(innerThresholdVar), args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrAlphaThresholdFragmentProcessor& _outer =
+ _proc.cast<GrAlphaThresholdFragmentProcessor>();
+ {
+ pdman.set1f(innerThresholdVar, (_outer.innerThreshold));
+ pdman.set1f(outerThresholdVar, (_outer.outerThreshold));
+ }
+ }
+ UniformHandle innerThresholdVar;
+ UniformHandle outerThresholdVar;
+};
+GrGLSLFragmentProcessor* GrAlphaThresholdFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLAlphaThresholdFragmentProcessor();
+}
+void GrAlphaThresholdFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrAlphaThresholdFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrAlphaThresholdFragmentProcessor& that = other.cast<GrAlphaThresholdFragmentProcessor>();
+ (void)that;
+ if (mask != that.mask) return false;
+ if (innerThreshold != that.innerThreshold) return false;
+ if (outerThreshold != that.outerThreshold) return false;
+ return true;
+}
+GrAlphaThresholdFragmentProcessor::GrAlphaThresholdFragmentProcessor(
+ const GrAlphaThresholdFragmentProcessor& src)
+ : INHERITED(kGrAlphaThresholdFragmentProcessor_ClassID, src.optimizationFlags())
+ , maskCoordTransform(src.maskCoordTransform)
+ , mask(src.mask)
+ , innerThreshold(src.innerThreshold)
+ , outerThreshold(src.outerThreshold) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&maskCoordTransform);
+}
+std::unique_ptr<GrFragmentProcessor> GrAlphaThresholdFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrAlphaThresholdFragmentProcessor(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrAlphaThresholdFragmentProcessor::onTextureSampler(
+ int index) const {
+ return IthTextureSampler(index, mask);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrAlphaThresholdFragmentProcessor);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrAlphaThresholdFragmentProcessor::TestCreate(
+ GrProcessorTestData* testData) {
+ sk_sp<GrTextureProxy> maskProxy = testData->textureProxy(GrProcessorUnitTest::kAlphaTextureIdx);
+ // Make the inner and outer thresholds be in (0, 1) exclusive and be sorted correctly.
+ float innerThresh = testData->fRandom->nextUScalar1() * .99f + 0.005f;
+ float outerThresh = testData->fRandom->nextUScalar1() * .99f + 0.005f;
+ const int kMaxWidth = 1000;
+ const int kMaxHeight = 1000;
+ uint32_t width = testData->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = testData->fRandom->nextULessThan(kMaxHeight);
+ uint32_t x = testData->fRandom->nextULessThan(kMaxWidth - width);
+ uint32_t y = testData->fRandom->nextULessThan(kMaxHeight - height);
+ SkIRect bounds = SkIRect::MakeXYWH(x, y, width, height);
+ return GrAlphaThresholdFragmentProcessor::Make(std::move(maskProxy), innerThresh, outerThresh,
+ bounds);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.h
new file mode 100644
index 0000000000..730054b567
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrAlphaThresholdFragmentProcessor.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrAlphaThresholdFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrAlphaThresholdFragmentProcessor_DEFINED
+#define GrAlphaThresholdFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrAlphaThresholdFragmentProcessor : public GrFragmentProcessor {
+public:
+ inline OptimizationFlags optFlags(float outerThreshold);
+
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> mask,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrAlphaThresholdFragmentProcessor(
+ mask, innerThreshold, outerThreshold, bounds));
+ }
+ GrAlphaThresholdFragmentProcessor(const GrAlphaThresholdFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "AlphaThresholdFragmentProcessor"; }
+ GrCoordTransform maskCoordTransform;
+ TextureSampler mask;
+ float innerThreshold;
+ float outerThreshold;
+
+private:
+ GrAlphaThresholdFragmentProcessor(sk_sp<GrTextureProxy> mask, float innerThreshold,
+ float outerThreshold, const SkIRect& bounds)
+ : INHERITED(kGrAlphaThresholdFragmentProcessor_ClassID, kNone_OptimizationFlags)
+ , maskCoordTransform(
+ SkMatrix::MakeTrans(SkIntToScalar(-bounds.x()), SkIntToScalar(-bounds.y())),
+ mask.get())
+ , mask(std::move(mask))
+ , innerThreshold(innerThreshold)
+ , outerThreshold(outerThreshold) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&maskCoordTransform);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.cpp
new file mode 100644
index 0000000000..f286f0e3ea
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrBlurredEdgeFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrBlurredEdgeFragmentProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLBlurredEdgeFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLBlurredEdgeFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrBlurredEdgeFragmentProcessor& _outer =
+ args.fFp.cast<GrBlurredEdgeFragmentProcessor>();
+ (void)_outer;
+ auto mode = _outer.mode;
+ (void)mode;
+ fragBuilder->codeAppendf(
+ "half factor = 1.0 - %s.w;\n@switch (%d) {\n case 0:\n factor = "
+ "exp((-factor * factor) * 4.0) - 0.017999999225139618;\n break;\n case "
+ "1:\n factor = smoothstep(1.0, 0.0, factor);\n break;\n}\n%s = "
+ "half4(factor);\n",
+ args.fInputColor, (int)_outer.mode, args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrBlurredEdgeFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLBlurredEdgeFragmentProcessor();
+}
+void GrBlurredEdgeFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)mode);
+}
+bool GrBlurredEdgeFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrBlurredEdgeFragmentProcessor& that = other.cast<GrBlurredEdgeFragmentProcessor>();
+ (void)that;
+ if (mode != that.mode) return false;
+ return true;
+}
+GrBlurredEdgeFragmentProcessor::GrBlurredEdgeFragmentProcessor(
+ const GrBlurredEdgeFragmentProcessor& src)
+ : INHERITED(kGrBlurredEdgeFragmentProcessor_ClassID, src.optimizationFlags())
+ , mode(src.mode) {}
+std::unique_ptr<GrFragmentProcessor> GrBlurredEdgeFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrBlurredEdgeFragmentProcessor(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.h
new file mode 100644
index 0000000000..e71eafb56c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrBlurredEdgeFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrBlurredEdgeFragmentProcessor_DEFINED
+#define GrBlurredEdgeFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrBlurredEdgeFragmentProcessor : public GrFragmentProcessor {
+public:
+ enum class Mode { kGaussian = 0, kSmoothStep = 1 };
+ static std::unique_ptr<GrFragmentProcessor> Make(Mode mode) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrBlurredEdgeFragmentProcessor(mode));
+ }
+ GrBlurredEdgeFragmentProcessor(const GrBlurredEdgeFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "BlurredEdgeFragmentProcessor"; }
+ Mode mode;
+
+private:
+ GrBlurredEdgeFragmentProcessor(Mode mode)
+ : INHERITED(kGrBlurredEdgeFragmentProcessor_ClassID, kNone_OptimizationFlags)
+ , mode(mode) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.cpp
new file mode 100644
index 0000000000..ccef347e1b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrCircleBlurFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrCircleBlurFragmentProcessor.h"
+
+#include "src/gpu/GrProxyProvider.h"
+
+// Computes an unnormalized half kernel (right side). Returns the summation of all the half
+// kernel values.
+static float make_unnormalized_half_kernel(float* halfKernel, int halfKernelSize, float sigma) {
+ const float invSigma = 1.f / sigma;
+ const float b = -0.5f * invSigma * invSigma;
+ float tot = 0.0f;
+ // Compute half kernel values at half pixel steps out from the center.
+ float t = 0.5f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ float value = expf(t * t * b);
+ tot += value;
+ halfKernel[i] = value;
+ t += 1.f;
+ }
+ return tot;
+}
+
+// Create a Gaussian half-kernel (right side) and a summed area table given a sigma and number
+// of discrete steps. The half kernel is normalized to sum to 0.5.
+static void make_half_kernel_and_summed_table(float* halfKernel, float* summedHalfKernel,
+ int halfKernelSize, float sigma) {
+ // The half kernel should sum to 0.5 not 1.0.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel, halfKernelSize, sigma);
+ float sum = 0.f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[i] /= tot;
+ sum += halfKernel[i];
+ summedHalfKernel[i] = sum;
+ }
+}
+
+// Applies the 1D half kernel vertically at points along the x axis to a circle centered at the
+// origin with radius circleR.
+void apply_kernel_in_y(float* results, int numSteps, float firstX, float circleR,
+ int halfKernelSize, const float* summedHalfKernelTable) {
+ float x = firstX;
+ for (int i = 0; i < numSteps; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ results[i] = 0;
+ continue;
+ }
+ float y = sqrtf(circleR * circleR - x * x);
+ // In the column at x we exit the circle at +y and -y
+ // The summed table entry j is actually reflects an offset of j + 0.5.
+ y -= 0.5f;
+ int yInt = SkScalarFloorToInt(y);
+ SkASSERT(yInt >= -1);
+ if (y < 0) {
+ results[i] = (y + 0.5f) * summedHalfKernelTable[0];
+ } else if (yInt >= halfKernelSize - 1) {
+ results[i] = 0.5f;
+ } else {
+ float yFrac = y - yInt;
+ results[i] = (1.f - yFrac) * summedHalfKernelTable[yInt] +
+ yFrac * summedHalfKernelTable[yInt + 1];
+ }
+ }
+}
+
+// Apply a Gaussian at point (evalX, 0) to a circle centered at the origin with radius circleR.
+// This relies on having a half kernel computed for the Gaussian and a table of applications of
+// the half kernel in y to columns at (evalX - halfKernel, evalX - halfKernel + 1, ..., evalX +
+// halfKernel) passed in as yKernelEvaluations.
+static uint8_t eval_at(float evalX, float circleR, const float* halfKernel, int halfKernelSize,
+ const float* yKernelEvaluations) {
+ float acc = 0;
+
+ float x = evalX - halfKernelSize;
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i];
+ acc += verticalEval * halfKernel[halfKernelSize - i - 1];
+ }
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i + halfKernelSize];
+ acc += verticalEval * halfKernel[i];
+ }
+ // Since we applied a half kernel in y we multiply acc by 2 (the circle is symmetric about
+ // the x axis).
+ return SkUnitScalarClampToByte(2.f * acc);
+}
+
+// This function creates a profile of a blurred circle. It does this by computing a kernel for
+// half the Gaussian and a matching summed area table. The summed area table is used to compute
+// an array of vertical applications of the half kernel to the circle along the x axis. The
+// table of y evaluations has 2 * k + n entries where k is the size of the half kernel and n is
+// the size of the profile being computed. Then for each of the n profile entries we walk out k
+// steps in each horizontal direction multiplying the corresponding y evaluation by the half
+// kernel entry and sum these values to compute the profile entry.
+static void create_circle_profile(uint8_t* weights, float sigma, float circleR,
+ int profileTextureWidth) {
+ const int numSteps = profileTextureWidth;
+
+ // The full kernel is 6 sigmas wide.
+ int halfKernelSize = SkScalarCeilToInt(6.0f * sigma);
+ // round up to next multiple of 2 and then divide by 2
+ halfKernelSize = ((halfKernelSize + 1) & ~1) >> 1;
+
+ // Number of x steps at which to apply kernel in y to cover all the profile samples in x.
+ int numYSteps = numSteps + 2 * halfKernelSize;
+
+ SkAutoTArray<float> bulkAlloc(halfKernelSize + halfKernelSize + numYSteps);
+ float* halfKernel = bulkAlloc.get();
+ float* summedKernel = bulkAlloc.get() + halfKernelSize;
+ float* yEvals = bulkAlloc.get() + 2 * halfKernelSize;
+ make_half_kernel_and_summed_table(halfKernel, summedKernel, halfKernelSize, sigma);
+
+ float firstX = -halfKernelSize + 0.5f;
+ apply_kernel_in_y(yEvals, numYSteps, firstX, circleR, halfKernelSize, summedKernel);
+
+ for (int i = 0; i < numSteps - 1; ++i) {
+ float evalX = i + 0.5f;
+ weights[i] = eval_at(evalX, circleR, halfKernel, halfKernelSize, yEvals + i);
+ }
+ // Ensure the tail of the Gaussian goes to zero.
+ weights[numSteps - 1] = 0;
+}
+
+static void create_half_plane_profile(uint8_t* profile, int profileWidth) {
+ SkASSERT(!(profileWidth & 0x1));
+ // The full kernel is 6 sigmas wide.
+ float sigma = profileWidth / 6.f;
+ int halfKernelSize = profileWidth / 2;
+
+ SkAutoTArray<float> halfKernel(halfKernelSize);
+
+ // The half kernel should sum to 0.5.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel.get(), halfKernelSize, sigma);
+ float sum = 0.f;
+ // Populate the profile from the right edge to the middle.
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[halfKernelSize - i - 1] /= tot;
+ sum += halfKernel[halfKernelSize - i - 1];
+ profile[profileWidth - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Populate the profile from the middle to the left edge (by flipping the half kernel and
+ // continuing the summation).
+ for (int i = 0; i < halfKernelSize; ++i) {
+ sum += halfKernel[i];
+ profile[halfKernelSize - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Ensure tail goes to 0.
+ profile[profileWidth - 1] = 0;
+}
+
+static sk_sp<GrTextureProxy> create_profile_texture(GrProxyProvider* proxyProvider,
+ const SkRect& circle, float sigma,
+ float* solidRadius, float* textureRadius) {
+ float circleR = circle.width() / 2.0f;
+ if (circleR < SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+ // Profile textures are cached by the ratio of sigma to circle radius and by the size of the
+ // profile texture (binned by powers of 2).
+ SkScalar sigmaToCircleRRatio = sigma / circleR;
+ // When sigma is really small this becomes a equivalent to convolving a Gaussian with a
+ // half-plane. Similarly, in the extreme high ratio cases circle becomes a point WRT to the
+ // Guassian and the profile texture is a just a Gaussian evaluation. However, we haven't yet
+ // implemented this latter optimization.
+ sigmaToCircleRRatio = SkTMin(sigmaToCircleRRatio, 8.f);
+ SkFixed sigmaToCircleRRatioFixed;
+ static const SkScalar kHalfPlaneThreshold = 0.1f;
+ bool useHalfPlaneApprox = false;
+ if (sigmaToCircleRRatio <= kHalfPlaneThreshold) {
+ useHalfPlaneApprox = true;
+ sigmaToCircleRRatioFixed = 0;
+ *solidRadius = circleR - 3 * sigma;
+ *textureRadius = 6 * sigma;
+ } else {
+ // Convert to fixed point for the key.
+ sigmaToCircleRRatioFixed = SkScalarToFixed(sigmaToCircleRRatio);
+ // We shave off some bits to reduce the number of unique entries. We could probably
+ // shave off more than we do.
+ sigmaToCircleRRatioFixed &= ~0xff;
+ sigmaToCircleRRatio = SkFixedToScalar(sigmaToCircleRRatioFixed);
+ sigma = circleR * sigmaToCircleRRatio;
+ *solidRadius = 0;
+ *textureRadius = circleR + 3 * sigma;
+ }
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
+ builder[0] = sigmaToCircleRRatioFixed;
+ builder.finish();
+
+ sk_sp<GrTextureProxy> blurProfile = proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin);
+ if (!blurProfile) {
+ static constexpr int kProfileTextureWidth = 512;
+
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(SkImageInfo::MakeA8(kProfileTextureWidth, 1))) {
+ return nullptr;
+ }
+
+ if (useHalfPlaneApprox) {
+ create_half_plane_profile(bm.getAddr8(0, 0), kProfileTextureWidth);
+ } else {
+ // Rescale params to the size of the texture we're creating.
+ SkScalar scale = kProfileTextureWidth / *textureRadius;
+ create_circle_profile(bm.getAddr8(0, 0), sigma * scale, circleR * scale,
+ kProfileTextureWidth);
+ }
+
+ bm.setImmutable();
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bm);
+
+ blurProfile = proxyProvider->createTextureProxy(std::move(image), 1, SkBudgeted::kYes,
+ SkBackingFit::kExact);
+ if (!blurProfile) {
+ return nullptr;
+ }
+
+ SkASSERT(blurProfile->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, blurProfile.get());
+ }
+
+ return blurProfile;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrCircleBlurFragmentProcessor::Make(
+ GrProxyProvider* proxyProvider, const SkRect& circle, float sigma) {
+ float solidRadius;
+ float textureRadius;
+ sk_sp<GrTextureProxy> profile(
+ create_profile_texture(proxyProvider, circle, sigma, &solidRadius, &textureRadius));
+ if (!profile) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleBlurFragmentProcessor(
+ circle, textureRadius, solidRadius, std::move(profile)));
+}
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLCircleBlurFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLCircleBlurFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrCircleBlurFragmentProcessor& _outer =
+ args.fFp.cast<GrCircleBlurFragmentProcessor>();
+ (void)_outer;
+ auto circleRect = _outer.circleRect;
+ (void)circleRect;
+ auto textureRadius = _outer.textureRadius;
+ (void)textureRadius;
+ auto solidRadius = _outer.solidRadius;
+ (void)solidRadius;
+ circleDataVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ "circleData");
+ fragBuilder->codeAppendf(
+ "half2 vec = half2(half((sk_FragCoord.x - float(%s.x)) * float(%s.w)), "
+ "half((sk_FragCoord.y - float(%s.y)) * float(%s.w)));\nhalf dist = length(vec) + "
+ "(0.5 - %s.z) * %s.w;\n%s = %s * sample(%s, float2(half2(dist, 0.5))).%s.w;\n",
+ args.fUniformHandler->getUniformCStr(circleDataVar),
+ args.fUniformHandler->getUniformCStr(circleDataVar),
+ args.fUniformHandler->getUniformCStr(circleDataVar),
+ args.fUniformHandler->getUniformCStr(circleDataVar),
+ args.fUniformHandler->getUniformCStr(circleDataVar),
+ args.fUniformHandler->getUniformCStr(circleDataVar), args.fOutputColor,
+ args.fInputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& data,
+ const GrFragmentProcessor& _proc) override {
+ const GrCircleBlurFragmentProcessor& _outer = _proc.cast<GrCircleBlurFragmentProcessor>();
+ auto circleRect = _outer.circleRect;
+ (void)circleRect;
+ auto textureRadius = _outer.textureRadius;
+ (void)textureRadius;
+ auto solidRadius = _outer.solidRadius;
+ (void)solidRadius;
+ GrSurfaceProxy& blurProfileSamplerProxy = *_outer.textureSampler(0).proxy();
+ GrTexture& blurProfileSampler = *blurProfileSamplerProxy.peekTexture();
+ (void)blurProfileSampler;
+ UniformHandle& circleData = circleDataVar;
+ (void)circleData;
+
+ data.set4f(circleData, circleRect.centerX(), circleRect.centerY(), solidRadius,
+ 1.f / textureRadius);
+ }
+ UniformHandle circleDataVar;
+};
+GrGLSLFragmentProcessor* GrCircleBlurFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLCircleBlurFragmentProcessor();
+}
+void GrCircleBlurFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrCircleBlurFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrCircleBlurFragmentProcessor& that = other.cast<GrCircleBlurFragmentProcessor>();
+ (void)that;
+ if (circleRect != that.circleRect) return false;
+ if (textureRadius != that.textureRadius) return false;
+ if (solidRadius != that.solidRadius) return false;
+ if (blurProfileSampler != that.blurProfileSampler) return false;
+ return true;
+}
+GrCircleBlurFragmentProcessor::GrCircleBlurFragmentProcessor(
+ const GrCircleBlurFragmentProcessor& src)
+ : INHERITED(kGrCircleBlurFragmentProcessor_ClassID, src.optimizationFlags())
+ , circleRect(src.circleRect)
+ , textureRadius(src.textureRadius)
+ , solidRadius(src.solidRadius)
+ , blurProfileSampler(src.blurProfileSampler) {
+ this->setTextureSamplerCnt(1);
+}
+std::unique_ptr<GrFragmentProcessor> GrCircleBlurFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleBlurFragmentProcessor(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrCircleBlurFragmentProcessor::onTextureSampler(
+ int index) const {
+ return IthTextureSampler(index, blurProfileSampler);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrCircleBlurFragmentProcessor);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrCircleBlurFragmentProcessor::TestCreate(
+ GrProcessorTestData* testData) {
+ SkScalar wh = testData->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar sigma = testData->fRandom->nextRangeF(1.f, 10.f);
+ SkRect circle = SkRect::MakeWH(wh, wh);
+ return GrCircleBlurFragmentProcessor::Make(testData->proxyProvider(), circle, sigma);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h
new file mode 100644
index 0000000000..93d24038a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrCircleBlurFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrCircleBlurFragmentProcessor_DEFINED
+#define GrCircleBlurFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrCircleBlurFragmentProcessor : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrProxyProvider*, const SkRect& circle,
+ float sigma);
+ GrCircleBlurFragmentProcessor(const GrCircleBlurFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "CircleBlurFragmentProcessor"; }
+ SkRect circleRect;
+ float textureRadius;
+ float solidRadius;
+ TextureSampler blurProfileSampler;
+
+private:
+ GrCircleBlurFragmentProcessor(SkRect circleRect, float textureRadius, float solidRadius,
+ sk_sp<GrTextureProxy> blurProfileSampler)
+ : INHERITED(kGrCircleBlurFragmentProcessor_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , circleRect(circleRect)
+ , textureRadius(textureRadius)
+ , solidRadius(solidRadius)
+ , blurProfileSampler(std::move(blurProfileSampler)) {
+ this->setTextureSamplerCnt(1);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.cpp
new file mode 100644
index 0000000000..e616fee504
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrCircleEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrCircleEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLCircleEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLCircleEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrCircleEffect& _outer = args.fFp.cast<GrCircleEffect>();
+ (void)_outer;
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto center = _outer.center;
+ (void)center;
+ auto radius = _outer.radius;
+ (void)radius;
+ prevRadius = -1.0;
+ circleVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "circle");
+ fragBuilder->codeAppendf(
+ "float2 prevCenter;\nfloat prevRadius = %f;\nhalf d;\n@if (%d == 2 || %d == 3) {\n "
+ " d = half((length((%s.xy - sk_FragCoord.xy) * %s.w) - 1.0) * %s.z);\n} else {\n "
+ " d = half((1.0 - length((%s.xy - sk_FragCoord.xy) * %s.w)) * %s.z);\n}\n@if "
+ "((%d == 1 || %d == 3) || %d == 4) {\n %s = %s * clamp(d, 0.0, 1.0);\n} else "
+ "{\n %s = d > 0.5 ? %s : half4(0.0);\n}\n",
+ prevRadius, (int)_outer.edgeType, (int)_outer.edgeType,
+ args.fUniformHandler->getUniformCStr(circleVar),
+ args.fUniformHandler->getUniformCStr(circleVar),
+ args.fUniformHandler->getUniformCStr(circleVar),
+ args.fUniformHandler->getUniformCStr(circleVar),
+ args.fUniformHandler->getUniformCStr(circleVar),
+ args.fUniformHandler->getUniformCStr(circleVar), (int)_outer.edgeType,
+ (int)_outer.edgeType, (int)_outer.edgeType, args.fOutputColor, args.fInputColor,
+ args.fOutputColor, args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrCircleEffect& _outer = _proc.cast<GrCircleEffect>();
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto center = _outer.center;
+ (void)center;
+ auto radius = _outer.radius;
+ (void)radius;
+ UniformHandle& circle = circleVar;
+ (void)circle;
+
+ if (radius != prevRadius || center != prevCenter) {
+ SkScalar effectiveRadius = radius;
+ if (GrProcessorEdgeTypeIsInverseFill((GrClipEdgeType)edgeType)) {
+ effectiveRadius -= 0.5f;
+ // When the radius is 0.5 effectiveRadius is 0 which causes an inf * 0 in the
+ // shader.
+ effectiveRadius = SkTMax(0.001f, effectiveRadius);
+ } else {
+ effectiveRadius += 0.5f;
+ }
+ pdman.set4f(circle, center.fX, center.fY, effectiveRadius,
+ SkScalarInvert(effectiveRadius));
+ prevCenter = center;
+ prevRadius = radius;
+ }
+ }
+ SkPoint prevCenter = float2(0);
+ float prevRadius = 0;
+ UniformHandle circleVar;
+};
+GrGLSLFragmentProcessor* GrCircleEffect::onCreateGLSLInstance() const {
+ return new GrGLSLCircleEffect();
+}
+void GrCircleEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)edgeType);
+}
+bool GrCircleEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrCircleEffect& that = other.cast<GrCircleEffect>();
+ (void)that;
+ if (edgeType != that.edgeType) return false;
+ if (center != that.center) return false;
+ if (radius != that.radius) return false;
+ return true;
+}
+GrCircleEffect::GrCircleEffect(const GrCircleEffect& src)
+ : INHERITED(kGrCircleEffect_ClassID, src.optimizationFlags())
+ , edgeType(src.edgeType)
+ , center(src.center)
+ , radius(src.radius) {}
+std::unique_ptr<GrFragmentProcessor> GrCircleEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleEffect(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrCircleEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrCircleEffect::TestCreate(GrProcessorTestData* testData) {
+ SkPoint center;
+ center.fX = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar radius = testData->fRandom->nextRangeF(1.f, 1000.f);
+ GrClipEdgeType et;
+ do {
+ et = (GrClipEdgeType)testData->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ } while (GrClipEdgeType::kHairlineAA == et);
+ return GrCircleEffect::Make(et, center, radius);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.h
new file mode 100644
index 0000000000..c43ab987ea
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrCircleEffect.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrCircleEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrCircleEffect_DEFINED
+#define GrCircleEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrCircleEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, SkPoint center,
+ float radius) {
+ // A radius below half causes the implicit insetting done by this processor to become
+ // inverted. We could handle this case by making the processor code more complicated.
+ if (radius < .5f && GrProcessorEdgeTypeIsInverseFill(edgeType)) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrCircleEffect(edgeType, center, radius));
+ }
+ GrCircleEffect(const GrCircleEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "CircleEffect"; }
+ GrClipEdgeType edgeType;
+ SkPoint center;
+ float radius;
+
+private:
+ GrCircleEffect(GrClipEdgeType edgeType, SkPoint center, float radius)
+ : INHERITED(kGrCircleEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , edgeType(edgeType)
+ , center(center)
+ , radius(radius) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.cpp
new file mode 100644
index 0000000000..8afed91062
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrColorMatrixFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrColorMatrixFragmentProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLColorMatrixFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLColorMatrixFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrColorMatrixFragmentProcessor& _outer =
+ args.fFp.cast<GrColorMatrixFragmentProcessor>();
+ (void)_outer;
+ auto m = _outer.m;
+ (void)m;
+ auto v = _outer.v;
+ (void)v;
+ auto unpremulInput = _outer.unpremulInput;
+ (void)unpremulInput;
+ auto clampRGBOutput = _outer.clampRGBOutput;
+ (void)clampRGBOutput;
+ auto premulOutput = _outer.premulOutput;
+ (void)premulOutput;
+ mVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4x4_GrSLType, "m");
+ vVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType, "v");
+ fragBuilder->codeAppendf(
+ "half4 inputColor = %s;\n@if (%s) {\n half nonZeroAlpha = max(inputColor.w, "
+ "9.9999997473787516e-05);\n inputColor = half4(inputColor.xyz / nonZeroAlpha, "
+ "nonZeroAlpha);\n}\n%s = %s * inputColor + %s;\n@if (%s) {\n %s = clamp(%s, "
+ "0.0, 1.0);\n} else {\n %s.w = clamp(%s.w, 0.0, 1.0);\n}\n@if (%s) {\n "
+ "%s.xyz *= %s.w;\n}\n",
+ args.fInputColor, (_outer.unpremulInput ? "true" : "false"), args.fOutputColor,
+ args.fUniformHandler->getUniformCStr(mVar),
+ args.fUniformHandler->getUniformCStr(vVar),
+ (_outer.clampRGBOutput ? "true" : "false"), args.fOutputColor, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor, (_outer.premulOutput ? "true" : "false"),
+ args.fOutputColor, args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrColorMatrixFragmentProcessor& _outer = _proc.cast<GrColorMatrixFragmentProcessor>();
+ {
+ const SkMatrix44& mValue = _outer.m;
+ if (mPrev != (mValue)) {
+ mPrev = mValue;
+ pdman.setSkMatrix44(mVar, mValue);
+ }
+ const SkVector4& vValue = _outer.v;
+ if (vPrev != (vValue)) {
+ vPrev = vValue;
+ pdman.set4fv(vVar, 1, vValue.fData);
+ }
+ }
+ }
+ SkMatrix44 mPrev = SkMatrix44(SkMatrix44::kNaN_Constructor);
+ SkVector4 vPrev = SkVector4(SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN);
+ UniformHandle mVar;
+ UniformHandle vVar;
+};
+GrGLSLFragmentProcessor* GrColorMatrixFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLColorMatrixFragmentProcessor();
+}
+void GrColorMatrixFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)unpremulInput);
+ b->add32((int32_t)clampRGBOutput);
+ b->add32((int32_t)premulOutput);
+}
+bool GrColorMatrixFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrColorMatrixFragmentProcessor& that = other.cast<GrColorMatrixFragmentProcessor>();
+ (void)that;
+ if (m != that.m) return false;
+ if (v != that.v) return false;
+ if (unpremulInput != that.unpremulInput) return false;
+ if (clampRGBOutput != that.clampRGBOutput) return false;
+ if (premulOutput != that.premulOutput) return false;
+ return true;
+}
+GrColorMatrixFragmentProcessor::GrColorMatrixFragmentProcessor(
+ const GrColorMatrixFragmentProcessor& src)
+ : INHERITED(kGrColorMatrixFragmentProcessor_ClassID, src.optimizationFlags())
+ , m(src.m)
+ , v(src.v)
+ , unpremulInput(src.unpremulInput)
+ , clampRGBOutput(src.clampRGBOutput)
+ , premulOutput(src.premulOutput) {}
+std::unique_ptr<GrFragmentProcessor> GrColorMatrixFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorMatrixFragmentProcessor(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrColorMatrixFragmentProcessor);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrColorMatrixFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ float m[20];
+ for (int i = 0; i < 20; ++i) {
+ m[i] = d->fRandom->nextRangeScalar(-10.f, 10.f);
+ }
+ bool unpremul = d->fRandom->nextBool();
+ bool clampRGB = d->fRandom->nextBool();
+ bool premul = d->fRandom->nextBool();
+ return Make(m, unpremul, clampRGB, premul);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h
new file mode 100644
index 0000000000..a9609aa3dd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrColorMatrixFragmentProcessor.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrColorMatrixFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrColorMatrixFragmentProcessor_DEFINED
+#define GrColorMatrixFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrColorMatrixFragmentProcessor : public GrFragmentProcessor {
+public:
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ SkColor4f color;
+ if (unpremulInput) {
+ color = input.unpremul();
+ } else {
+ color.fR = input.fR;
+ color.fG = input.fG;
+ color.fB = input.fB;
+ color.fA = input.fA;
+ }
+ m.mapScalars(color.vec());
+ color.fR += v.fData[0];
+ color.fG += v.fData[1];
+ color.fB += v.fData[2];
+ color.fA += v.fData[3];
+ color.fA = SkTPin(color.fA, 0.f, 1.f);
+ if (clampRGBOutput) {
+ color.fR = SkTPin(color.fR, 0.f, 1.f);
+ color.fG = SkTPin(color.fG, 0.f, 1.f);
+ color.fB = SkTPin(color.fB, 0.f, 1.f);
+ }
+ if (premulOutput) {
+ return color.premul();
+ } else {
+ return {color.fR, color.fG, color.fB, color.fA};
+ }
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(const float matrix[20], bool unpremulInput,
+ bool clampRGBOutput, bool premulOutput) {
+ SkMatrix44 m44;
+ m44.set4x4(matrix[0], matrix[5], matrix[10], matrix[15], matrix[1], matrix[6], matrix[11],
+ matrix[16], matrix[2], matrix[7], matrix[12], matrix[17], matrix[3], matrix[8],
+ matrix[13], matrix[18]);
+ auto v4 = SkVector4(matrix[4], matrix[9], matrix[14], matrix[19]);
+ return std::unique_ptr<GrFragmentProcessor>(new GrColorMatrixFragmentProcessor(
+ m44, v4, unpremulInput, clampRGBOutput, premulOutput));
+ }
+ GrColorMatrixFragmentProcessor(const GrColorMatrixFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ColorMatrixFragmentProcessor"; }
+ SkMatrix44 m;
+ SkVector4 v;
+ bool unpremulInput;
+ bool clampRGBOutput;
+ bool premulOutput;
+
+private:
+ GrColorMatrixFragmentProcessor(SkMatrix44 m, SkVector4 v, bool unpremulInput,
+ bool clampRGBOutput, bool premulOutput)
+ : INHERITED(kGrColorMatrixFragmentProcessor_ClassID,
+ (OptimizationFlags)kConstantOutputForConstantInput_OptimizationFlag)
+ , m(m)
+ , v(v)
+ , unpremulInput(unpremulInput)
+ , clampRGBOutput(clampRGBOutput)
+ , premulOutput(premulOutput) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.cpp
new file mode 100644
index 0000000000..1af5e938c3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrComposeLerpEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrComposeLerpEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLComposeLerpEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLComposeLerpEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrComposeLerpEffect& _outer = args.fFp.cast<GrComposeLerpEffect>();
+ (void)_outer;
+ auto weight = _outer.weight;
+ (void)weight;
+ weightVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat_GrSLType, "weight");
+ SkString _sample290("_sample290");
+ if (_outer.child1_index >= 0) {
+ this->invokeChild(_outer.child1_index, &_sample290, args);
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", _sample290.c_str());
+ }
+ SkString _sample358("_sample358");
+ if (_outer.child2_index >= 0) {
+ this->invokeChild(_outer.child2_index, &_sample358, args);
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", _sample358.c_str());
+ }
+ fragBuilder->codeAppendf("%s = mix(%s ? %s : %s, %s ? %s : %s, half(%s));\n",
+ args.fOutputColor, _outer.child1_index >= 0 ? "true" : "false",
+ _sample290.c_str(), args.fInputColor,
+ _outer.child2_index >= 0 ? "true" : "false", _sample358.c_str(),
+ args.fInputColor, args.fUniformHandler->getUniformCStr(weightVar));
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrComposeLerpEffect& _outer = _proc.cast<GrComposeLerpEffect>();
+ { pdman.set1f(weightVar, (_outer.weight)); }
+ }
+ UniformHandle weightVar;
+};
+GrGLSLFragmentProcessor* GrComposeLerpEffect::onCreateGLSLInstance() const {
+ return new GrGLSLComposeLerpEffect();
+}
+void GrComposeLerpEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrComposeLerpEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrComposeLerpEffect& that = other.cast<GrComposeLerpEffect>();
+ (void)that;
+ if (weight != that.weight) return false;
+ return true;
+}
+GrComposeLerpEffect::GrComposeLerpEffect(const GrComposeLerpEffect& src)
+ : INHERITED(kGrComposeLerpEffect_ClassID, src.optimizationFlags())
+ , child1_index(src.child1_index)
+ , child2_index(src.child2_index)
+ , weight(src.weight) {
+ if (child1_index >= 0) {
+ this->registerChildProcessor(src.childProcessor(child1_index).clone());
+ }
+ if (child2_index >= 0) {
+ this->registerChildProcessor(src.childProcessor(child2_index).clone());
+ }
+}
+std::unique_ptr<GrFragmentProcessor> GrComposeLerpEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrComposeLerpEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.h
new file mode 100644
index 0000000000..02753b7d45
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpEffect.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrComposeLerpEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrComposeLerpEffect_DEFINED
+#define GrComposeLerpEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrComposeLerpEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> child1,
+ std::unique_ptr<GrFragmentProcessor> child2,
+ float weight) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrComposeLerpEffect(std::move(child1), std::move(child2), weight));
+ }
+ GrComposeLerpEffect(const GrComposeLerpEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ComposeLerpEffect"; }
+ int child1_index = -1;
+ int child2_index = -1;
+ float weight;
+
+private:
+ GrComposeLerpEffect(std::unique_ptr<GrFragmentProcessor> child1,
+ std::unique_ptr<GrFragmentProcessor> child2, float weight)
+ : INHERITED(kGrComposeLerpEffect_ClassID, kNone_OptimizationFlags), weight(weight) {
+ if (child1) {
+ child1_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(child1));
+ }
+ if (child2) {
+ child2_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(child2));
+ }
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.cpp
new file mode 100644
index 0000000000..8a6bc6defc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrComposeLerpRedEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrComposeLerpRedEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLComposeLerpRedEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLComposeLerpRedEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrComposeLerpRedEffect& _outer = args.fFp.cast<GrComposeLerpRedEffect>();
+ (void)_outer;
+ SkString _sample292("_sample292");
+ if (_outer.child1_index >= 0) {
+ this->invokeChild(_outer.child1_index, &_sample292, args);
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", _sample292.c_str());
+ }
+ SkString _sample360("_sample360");
+ if (_outer.child2_index >= 0) {
+ this->invokeChild(_outer.child2_index, &_sample360, args);
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", _sample360.c_str());
+ }
+ SkString _sample411("_sample411");
+ this->invokeChild(_outer.lerp_index, &_sample411, args);
+ fragBuilder->codeAppendf("%s = mix(%s ? %s : %s, %s ? %s : %s, %s.x);\n", args.fOutputColor,
+ _outer.child1_index >= 0 ? "true" : "false", _sample292.c_str(),
+ args.fInputColor, _outer.child2_index >= 0 ? "true" : "false",
+ _sample360.c_str(), args.fInputColor, _sample411.c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrComposeLerpRedEffect::onCreateGLSLInstance() const {
+ return new GrGLSLComposeLerpRedEffect();
+}
+void GrComposeLerpRedEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrComposeLerpRedEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrComposeLerpRedEffect& that = other.cast<GrComposeLerpRedEffect>();
+ (void)that;
+ return true;
+}
+GrComposeLerpRedEffect::GrComposeLerpRedEffect(const GrComposeLerpRedEffect& src)
+ : INHERITED(kGrComposeLerpRedEffect_ClassID, src.optimizationFlags())
+ , child1_index(src.child1_index)
+ , child2_index(src.child2_index)
+ , lerp_index(src.lerp_index) {
+ if (child1_index >= 0) {
+ this->registerChildProcessor(src.childProcessor(child1_index).clone());
+ }
+ if (child2_index >= 0) {
+ this->registerChildProcessor(src.childProcessor(child2_index).clone());
+ }
+ this->registerChildProcessor(src.childProcessor(lerp_index).clone());
+}
+std::unique_ptr<GrFragmentProcessor> GrComposeLerpRedEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrComposeLerpRedEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.h
new file mode 100644
index 0000000000..d48725fdb7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrComposeLerpRedEffect.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrComposeLerpRedEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrComposeLerpRedEffect_DEFINED
+#define GrComposeLerpRedEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrComposeLerpRedEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> child1,
+ std::unique_ptr<GrFragmentProcessor> child2,
+ std::unique_ptr<GrFragmentProcessor> lerp) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrComposeLerpRedEffect(std::move(child1), std::move(child2), std::move(lerp)));
+ }
+ GrComposeLerpRedEffect(const GrComposeLerpRedEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ComposeLerpRedEffect"; }
+ int child1_index = -1;
+ int child2_index = -1;
+ int lerp_index = -1;
+
+private:
+ GrComposeLerpRedEffect(std::unique_ptr<GrFragmentProcessor> child1,
+ std::unique_ptr<GrFragmentProcessor> child2,
+ std::unique_ptr<GrFragmentProcessor> lerp)
+ : INHERITED(kGrComposeLerpRedEffect_ClassID, kNone_OptimizationFlags) {
+ if (child1) {
+ child1_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(child1));
+ }
+ if (child2) {
+ child2_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(child2));
+ }
+ SkASSERT(lerp);
+ lerp_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(lerp));
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.cpp
new file mode 100644
index 0000000000..c16796deda
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrConfigConversionEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrConfigConversionEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLConfigConversionEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLConfigConversionEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrConfigConversionEffect& _outer = args.fFp.cast<GrConfigConversionEffect>();
+ (void)_outer;
+ auto pmConversion = _outer.pmConversion;
+ (void)pmConversion;
+
+ fragBuilder->forceHighPrecision();
+ fragBuilder->codeAppendf(
+ "%s = floor(%s * 255.0 + 0.5) / 255.0;\n@switch (%d) {\n case 0:\n "
+ "%s.xyz = floor((%s.xyz * %s.w) * 255.0 + 0.5) / 255.0;\n break;\n case "
+ "1:\n %s.xyz = %s.w <= 0.0 ? half3(0.0) : floor((%s.xyz / %s.w) * 255.0 + "
+ "0.5) / 255.0;\n break;\n}\n",
+ args.fOutputColor, args.fInputColor, (int)_outer.pmConversion, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor, args.fOutputColor, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrConfigConversionEffect::onCreateGLSLInstance() const {
+ return new GrGLSLConfigConversionEffect();
+}
+void GrConfigConversionEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)pmConversion);
+}
+bool GrConfigConversionEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrConfigConversionEffect& that = other.cast<GrConfigConversionEffect>();
+ (void)that;
+ if (pmConversion != that.pmConversion) return false;
+ return true;
+}
+GrConfigConversionEffect::GrConfigConversionEffect(const GrConfigConversionEffect& src)
+ : INHERITED(kGrConfigConversionEffect_ClassID, src.optimizationFlags())
+ , pmConversion(src.pmConversion) {}
+std::unique_ptr<GrFragmentProcessor> GrConfigConversionEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrConfigConversionEffect(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConfigConversionEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrConfigConversionEffect::TestCreate(
+ GrProcessorTestData* data) {
+ PMConversion pmConv = static_cast<PMConversion>(
+ data->fRandom->nextULessThan((int)PMConversion::kPMConversionCnt));
+ return std::unique_ptr<GrFragmentProcessor>(new GrConfigConversionEffect(pmConv));
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.h
new file mode 100644
index 0000000000..acbc4cc30f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrConfigConversionEffect.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrConfigConversionEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrConfigConversionEffect_DEFINED
+#define GrConfigConversionEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrConfigConversionEffect : public GrFragmentProcessor {
+public:
+ static bool TestForPreservingPMConversions(GrContext* context) {
+ static constexpr int kSize = 256;
+ static constexpr GrColorType kColorType = GrColorType::kRGBA_8888;
+ SkAutoTMalloc<uint32_t> data(kSize * kSize * 3);
+ uint32_t* srcData = data.get();
+ uint32_t* firstRead = data.get() + kSize * kSize;
+ uint32_t* secondRead = data.get() + 2 * kSize * kSize;
+
+ // Fill with every possible premultiplied A, color channel value. There will be 256-y
+ // duplicate values in row y. We set r, g, and b to the same value since they are handled
+ // identically.
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x < kSize; ++x) {
+ uint8_t* color = reinterpret_cast<uint8_t*>(&srcData[kSize * y + x]);
+ color[3] = y;
+ color[2] = SkTMin(x, y);
+ color[1] = SkTMin(x, y);
+ color[0] = SkTMin(x, y);
+ }
+ }
+ memset(firstRead, 0, kSize * kSize * sizeof(uint32_t));
+ memset(secondRead, 0, kSize * kSize * sizeof(uint32_t));
+
+ const SkImageInfo ii =
+ SkImageInfo::Make(kSize, kSize, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
+
+ auto readRTC = context->priv().makeDeferredRenderTargetContext(SkBackingFit::kExact, kSize,
+ kSize, kColorType, nullptr);
+ auto tempRTC = context->priv().makeDeferredRenderTargetContext(SkBackingFit::kExact, kSize,
+ kSize, kColorType, nullptr);
+ if (!readRTC || !readRTC->asTextureProxy() || !tempRTC) {
+ return false;
+ }
+ // Adding discard to appease vulkan validation warning about loading uninitialized data on
+ // draw
+ readRTC->discard();
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ SkPixmap pixmap(ii, srcData, 4 * kSize);
+
+ // This function is only ever called if we are in a GrContext that has a GrGpu since we are
+ // calling read pixels here. Thus the pixel data will be uploaded immediately and we don't
+ // need to keep the pixel data alive in the proxy. Therefore the ReleaseProc is nullptr.
+ sk_sp<SkImage> image = SkImage::MakeFromRaster(pixmap, nullptr, nullptr);
+ GrColorType dataColorType = SkColorTypeToGrColorType(image->colorType());
+ sk_sp<GrTextureProxy> dataProxy = proxyProvider->createTextureProxy(
+ std::move(image), 1, SkBudgeted::kYes, SkBackingFit::kExact);
+ if (!dataProxy) {
+ return false;
+ }
+
+ static const SkRect kRect = SkRect::MakeIWH(kSize, kSize);
+
+ // We do a PM->UPM draw from dataTex to readTex and read the data. Then we do a UPM->PM draw
+ // from readTex to tempTex followed by a PM->UPM draw to readTex and finally read the data.
+ // We then verify that two reads produced the same values.
+
+ GrPaint paint1;
+ GrPaint paint2;
+ GrPaint paint3;
+ std::unique_ptr<GrFragmentProcessor> pmToUPM(
+ new GrConfigConversionEffect(PMConversion::kToUnpremul));
+ std::unique_ptr<GrFragmentProcessor> upmToPM(
+ new GrConfigConversionEffect(PMConversion::kToPremul));
+
+ paint1.addColorTextureProcessor(dataProxy, dataColorType, SkMatrix::I());
+ paint1.addColorFragmentProcessor(pmToUPM->clone());
+ paint1.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readRTC->fillRectToRect(GrNoClip(), std::move(paint1), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+ if (!readRTC->readPixels(ii, firstRead, 0, {0, 0})) {
+ return false;
+ }
+
+ // Adding discard to appease vulkan validation warning about loading uninitialized data on
+ // draw
+ tempRTC->discard();
+
+ paint2.addColorTextureProcessor(readRTC->asTextureProxyRef(), kColorType, SkMatrix::I());
+ paint2.addColorFragmentProcessor(std::move(upmToPM));
+ paint2.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ tempRTC->fillRectToRect(GrNoClip(), std::move(paint2), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+
+ paint3.addColorTextureProcessor(tempRTC->asTextureProxyRef(), kColorType, SkMatrix::I());
+ paint3.addColorFragmentProcessor(std::move(pmToUPM));
+ paint3.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readRTC->fillRectToRect(GrNoClip(), std::move(paint3), GrAA::kNo, SkMatrix::I(), kRect,
+ kRect);
+
+ if (!readRTC->readPixels(ii, secondRead, 0, {0, 0})) {
+ return false;
+ }
+
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x <= y; ++x) {
+ if (firstRead[kSize * y + x] != secondRead[kSize * y + x]) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp,
+ PMConversion pmConversion) {
+ if (!fp) {
+ return nullptr;
+ }
+ std::unique_ptr<GrFragmentProcessor> ccFP(new GrConfigConversionEffect(pmConversion));
+ std::unique_ptr<GrFragmentProcessor> fpPipeline[] = {std::move(fp), std::move(ccFP)};
+ return GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+ }
+ GrConfigConversionEffect(const GrConfigConversionEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ConfigConversionEffect"; }
+ PMConversion pmConversion;
+
+private:
+ GrConfigConversionEffect(PMConversion pmConversion)
+ : INHERITED(kGrConfigConversionEffect_ClassID, kNone_OptimizationFlags)
+ , pmConversion(pmConversion) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.cpp
new file mode 100644
index 0000000000..8f3079bff3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrConstColorProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrConstColorProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLConstColorProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLConstColorProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrConstColorProcessor& _outer = args.fFp.cast<GrConstColorProcessor>();
+ (void)_outer;
+ auto color = _outer.color;
+ (void)color;
+ auto mode = _outer.mode;
+ (void)mode;
+ colorVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType, "color");
+ fragBuilder->codeAppendf(
+ "@switch (%d) {\n case 0:\n %s = %s;\n break;\n case 1:\n "
+ " %s = %s * %s;\n break;\n case 2:\n %s = %s.w * %s;\n "
+ "break;\n}\n",
+ (int)_outer.mode, args.fOutputColor, args.fUniformHandler->getUniformCStr(colorVar),
+ args.fOutputColor, args.fInputColor, args.fUniformHandler->getUniformCStr(colorVar),
+ args.fOutputColor, args.fInputColor,
+ args.fUniformHandler->getUniformCStr(colorVar));
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrConstColorProcessor& _outer = _proc.cast<GrConstColorProcessor>();
+ {
+ const SkPMColor4f& colorValue = _outer.color;
+ if (colorPrev != colorValue) {
+ colorPrev = colorValue;
+ pdman.set4fv(colorVar, 1, colorValue.vec());
+ }
+ }
+ }
+ SkPMColor4f colorPrev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ UniformHandle colorVar;
+};
+GrGLSLFragmentProcessor* GrConstColorProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLConstColorProcessor();
+}
+void GrConstColorProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)mode);
+}
+bool GrConstColorProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrConstColorProcessor& that = other.cast<GrConstColorProcessor>();
+ (void)that;
+ if (color != that.color) return false;
+ if (mode != that.mode) return false;
+ return true;
+}
+GrConstColorProcessor::GrConstColorProcessor(const GrConstColorProcessor& src)
+ : INHERITED(kGrConstColorProcessor_ClassID, src.optimizationFlags())
+ , color(src.color)
+ , mode(src.mode) {}
+std::unique_ptr<GrFragmentProcessor> GrConstColorProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrConstColorProcessor(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConstColorProcessor);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrConstColorProcessor::TestCreate(GrProcessorTestData* d) {
+ SkPMColor4f color;
+ int colorPicker = d->fRandom->nextULessThan(3);
+ switch (colorPicker) {
+ case 0: {
+ uint32_t a = d->fRandom->nextULessThan(0x100);
+ uint32_t r = d->fRandom->nextULessThan(a + 1);
+ uint32_t g = d->fRandom->nextULessThan(a + 1);
+ uint32_t b = d->fRandom->nextULessThan(a + 1);
+ color = SkPMColor4f::FromBytes_RGBA(GrColorPackRGBA(r, g, b, a));
+ break;
+ }
+ case 1:
+ color = SK_PMColor4fTRANSPARENT;
+ break;
+ case 2:
+ uint32_t c = d->fRandom->nextULessThan(0x100);
+ color = SkPMColor4f::FromBytes_RGBA(c | (c << 8) | (c << 16) | (c << 24));
+ break;
+ }
+ InputMode mode = static_cast<InputMode>(d->fRandom->nextULessThan(kInputModeCnt));
+ return GrConstColorProcessor::Make(color, mode);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.h
new file mode 100644
index 0000000000..df64bce9b7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrConstColorProcessor.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrConstColorProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrConstColorProcessor_DEFINED
+#define GrConstColorProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrConstColorProcessor : public GrFragmentProcessor {
+public:
+ enum class InputMode { kIgnore = 0, kLast = 2, kModulateA = 2, kModulateRGBA = 1 };
+
+ static const int kInputModeCnt = (int)InputMode::kLast + 1;
+
+ static OptimizationFlags OptFlags(const SkPMColor4f& color, InputMode mode) {
+ OptimizationFlags flags = kConstantOutputForConstantInput_OptimizationFlag;
+ if (mode != InputMode::kIgnore) {
+ flags |= kCompatibleWithCoverageAsAlpha_OptimizationFlag;
+ }
+ if (color.isOpaque()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ switch (mode) {
+ case InputMode::kIgnore:
+ return color;
+ case InputMode::kModulateA:
+ return color * input.fA;
+ case InputMode::kModulateRGBA:
+ return color * input;
+ }
+ SK_ABORT("Unexpected mode");
+ }
+ static std::unique_ptr<GrFragmentProcessor> Make(SkPMColor4f color, InputMode mode) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrConstColorProcessor(color, mode));
+ }
+ GrConstColorProcessor(const GrConstColorProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ConstColorProcessor"; }
+ SkPMColor4f color;
+ InputMode mode;
+
+private:
+ GrConstColorProcessor(SkPMColor4f color, InputMode mode)
+ : INHERITED(kGrConstColorProcessor_ClassID, (OptimizationFlags)OptFlags(color, mode))
+ , color(color)
+ , mode(mode) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.cpp
new file mode 100644
index 0000000000..b28f9612f1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrEllipseEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrEllipseEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLEllipseEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLEllipseEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrEllipseEffect& _outer = args.fFp.cast<GrEllipseEffect>();
+ (void)_outer;
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto center = _outer.center;
+ (void)center;
+ auto radii = _outer.radii;
+ (void)radii;
+ prevRadii = float2(-1.0);
+ medPrecision = !sk_Caps.floatIs32Bits;
+ ellipseVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "ellipse");
+ if (medPrecision) {
+ scaleVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat2_GrSLType,
+ "scale");
+ }
+ fragBuilder->codeAppendf(
+ "float2 prevCenter;\nfloat2 prevRadii = float2(%f, %f);\nbool medPrecision = "
+ "%s;\nfloat2 d = sk_FragCoord.xy - %s.xy;\n@if (medPrecision) {\n d *= "
+ "%s.y;\n}\nfloat2 Z = d * %s.zw;\nfloat implicit = dot(Z, d) - 1.0;\nfloat "
+ "grad_dot = 4.0 * dot(Z, Z);\n@if (medPrecision) {\n grad_dot = max(grad_dot, "
+ "6.1036000261083245e-05);\n} else {\n grad_dot = max(grad_dot, "
+ "1.1754999560161448e-38);\n}\nfloat approx_dist = implicit * "
+ "inversesqrt(grad_dot);\n@if (medPrecision) {\n approx_dist *= %s.x;\n}\nhalf "
+ "alph",
+ prevRadii.fX, prevRadii.fY, (medPrecision ? "true" : "false"),
+ args.fUniformHandler->getUniformCStr(ellipseVar),
+ scaleVar.isValid() ? args.fUniformHandler->getUniformCStr(scaleVar) : "float2(0)",
+ args.fUniformHandler->getUniformCStr(ellipseVar),
+ scaleVar.isValid() ? args.fUniformHandler->getUniformCStr(scaleVar) : "float2(0)");
+ fragBuilder->codeAppendf(
+ "a;\n@switch (%d) {\n case 0:\n alpha = approx_dist > 0.0 ? 0.0 : 1.0;\n "
+ " break;\n case 1:\n alpha = clamp(0.5 - half(approx_dist), 0.0, "
+ "1.0);\n break;\n case 2:\n alpha = approx_dist > 0.0 ? 1.0 : "
+ "0.0;\n break;\n case 3:\n alpha = clamp(0.5 + half(approx_dist), "
+ "0.0, 1.0);\n break;\n default:\n discard;\n}\n%s = %s * alpha;\n",
+ (int)_outer.edgeType, args.fOutputColor, args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrEllipseEffect& _outer = _proc.cast<GrEllipseEffect>();
+ auto edgeType = _outer.edgeType;
+ (void)edgeType;
+ auto center = _outer.center;
+ (void)center;
+ auto radii = _outer.radii;
+ (void)radii;
+ UniformHandle& ellipse = ellipseVar;
+ (void)ellipse;
+ UniformHandle& scale = scaleVar;
+ (void)scale;
+
+ if (radii != prevRadii || center != prevCenter) {
+ float invRXSqd;
+ float invRYSqd;
+ // If we're using a scale factor to work around precision issues, choose the larger
+ // radius as the scale factor. The inv radii need to be pre-adjusted by the scale
+ // factor.
+ if (scale.isValid()) {
+ if (radii.fX > radii.fY) {
+ invRXSqd = 1.f;
+ invRYSqd = (radii.fX * radii.fX) / (radii.fY * radii.fY);
+ pdman.set2f(scale, radii.fX, 1.f / radii.fX);
+ } else {
+ invRXSqd = (radii.fY * radii.fY) / (radii.fX * radii.fX);
+ invRYSqd = 1.f;
+ pdman.set2f(scale, radii.fY, 1.f / radii.fY);
+ }
+ } else {
+ invRXSqd = 1.f / (radii.fX * radii.fX);
+ invRYSqd = 1.f / (radii.fY * radii.fY);
+ }
+ pdman.set4f(ellipse, center.fX, center.fY, invRXSqd, invRYSqd);
+ prevCenter = center;
+ prevRadii = radii;
+ }
+ }
+ SkPoint prevCenter = float2(0);
+ SkPoint prevRadii = float2(0);
+ bool medPrecision = false;
+ UniformHandle ellipseVar;
+ UniformHandle scaleVar;
+};
+GrGLSLFragmentProcessor* GrEllipseEffect::onCreateGLSLInstance() const {
+ return new GrGLSLEllipseEffect();
+}
+void GrEllipseEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)edgeType);
+}
+bool GrEllipseEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrEllipseEffect& that = other.cast<GrEllipseEffect>();
+ (void)that;
+ if (edgeType != that.edgeType) return false;
+ if (center != that.center) return false;
+ if (radii != that.radii) return false;
+ return true;
+}
+GrEllipseEffect::GrEllipseEffect(const GrEllipseEffect& src)
+ : INHERITED(kGrEllipseEffect_ClassID, src.optimizationFlags())
+ , edgeType(src.edgeType)
+ , center(src.center)
+ , radii(src.radii) {}
+std::unique_ptr<GrFragmentProcessor> GrEllipseEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrEllipseEffect(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrEllipseEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrEllipseEffect::TestCreate(GrProcessorTestData* testData) {
+ SkPoint center;
+ center.fX = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = testData->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar rx = testData->fRandom->nextRangeF(0.f, 1000.f);
+ SkScalar ry = testData->fRandom->nextRangeF(0.f, 1000.f);
+ GrClipEdgeType et;
+ do {
+ et = (GrClipEdgeType)testData->fRandom->nextULessThan(kGrClipEdgeTypeCnt);
+ } while (GrClipEdgeType::kHairlineAA == et);
+ return GrEllipseEffect::Make(et, center, SkPoint::Make(rx, ry),
+ *testData->caps()->shaderCaps());
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.h
new file mode 100644
index 0000000000..a86427268e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrEllipseEffect.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrEllipseEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrEllipseEffect_DEFINED
+#define GrEllipseEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrShaderCaps.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrEllipseEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrClipEdgeType edgeType, SkPoint center,
+ SkPoint radii, const GrShaderCaps& caps) {
+ // Small radii produce bad results on devices without full float.
+ if (!caps.floatIs32Bits() && (radii.fX < 0.5f || radii.fY < 0.5f)) {
+ return nullptr;
+ }
+ // Very narrow ellipses produce bad results on devices without full float
+ if (!caps.floatIs32Bits() && (radii.fX > 255 * radii.fY || radii.fY > 255 * radii.fX)) {
+ return nullptr;
+ }
+ // Very large ellipses produce bad results on devices without full float
+ if (!caps.floatIs32Bits() && (radii.fX > 16384 || radii.fY > 16384)) {
+ return nullptr;
+ }
+ return std::unique_ptr<GrFragmentProcessor>(new GrEllipseEffect(edgeType, center, radii));
+ }
+ GrEllipseEffect(const GrEllipseEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "EllipseEffect"; }
+ GrClipEdgeType edgeType;
+ SkPoint center;
+ SkPoint radii;
+
+private:
+ GrEllipseEffect(GrClipEdgeType edgeType, SkPoint center, SkPoint radii)
+ : INHERITED(kGrEllipseEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , edgeType(edgeType)
+ , center(center)
+ , radii(radii) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.cpp
new file mode 100644
index 0000000000..3c9f24dc6a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrLumaColorFilterEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrLumaColorFilterEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLLumaColorFilterEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLLumaColorFilterEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrLumaColorFilterEffect& _outer = args.fFp.cast<GrLumaColorFilterEffect>();
+ (void)_outer;
+ fragBuilder->codeAppendf(
+ "\nhalf luma = clamp(dot(half3(0.2125999927520752, 0.71520000696182251, "
+ "0.072200000286102295), %s.xyz), 0.0, 1.0);\n%s = half4(0.0, 0.0, 0.0, luma);\n",
+ args.fInputColor, args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrLumaColorFilterEffect::onCreateGLSLInstance() const {
+ return new GrGLSLLumaColorFilterEffect();
+}
+void GrLumaColorFilterEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrLumaColorFilterEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrLumaColorFilterEffect& that = other.cast<GrLumaColorFilterEffect>();
+ (void)that;
+ return true;
+}
+GrLumaColorFilterEffect::GrLumaColorFilterEffect(const GrLumaColorFilterEffect& src)
+ : INHERITED(kGrLumaColorFilterEffect_ClassID, src.optimizationFlags()) {}
+std::unique_ptr<GrFragmentProcessor> GrLumaColorFilterEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrLumaColorFilterEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.h
new file mode 100644
index 0000000000..91939217d1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrLumaColorFilterEffect.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrLumaColorFilterEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrLumaColorFilterEffect_DEFINED
+#define GrLumaColorFilterEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrLumaColorFilterEffect : public GrFragmentProcessor {
+public:
+#include "include/private/SkColorData.h"
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ float luma = SK_ITU_BT709_LUM_COEFF_R * input.fR + SK_ITU_BT709_LUM_COEFF_G * input.fG +
+ SK_ITU_BT709_LUM_COEFF_B * input.fB;
+ return {0, 0, 0, SkTPin(luma, 0.0f, 1.0f)};
+ }
+ static std::unique_ptr<GrFragmentProcessor> Make() {
+ return std::unique_ptr<GrFragmentProcessor>(new GrLumaColorFilterEffect());
+ }
+ GrLumaColorFilterEffect(const GrLumaColorFilterEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "LumaColorFilterEffect"; }
+
+private:
+ GrLumaColorFilterEffect()
+ : INHERITED(kGrLumaColorFilterEffect_ClassID,
+ (OptimizationFlags)kConstantOutputForConstantInput_OptimizationFlag) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.cpp
new file mode 100644
index 0000000000..4f7a2eaefb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrMagnifierEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrMagnifierEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLMagnifierEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLMagnifierEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrMagnifierEffect& _outer = args.fFp.cast<GrMagnifierEffect>();
+ (void)_outer;
+ auto bounds = _outer.bounds;
+ (void)bounds;
+ auto srcRect = _outer.srcRect;
+ (void)srcRect;
+ auto xInvZoom = _outer.xInvZoom;
+ (void)xInvZoom;
+ auto yInvZoom = _outer.yInvZoom;
+ (void)yInvZoom;
+ auto xInvInset = _outer.xInvInset;
+ (void)xInvInset;
+ auto yInvInset = _outer.yInvInset;
+ (void)yInvInset;
+ boundsUniformVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat4_GrSLType, "boundsUniform");
+ xInvZoomVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat_GrSLType, "xInvZoom");
+ yInvZoomVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat_GrSLType, "yInvZoom");
+ xInvInsetVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat_GrSLType, "xInvInset");
+ yInvInsetVar = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kFloat_GrSLType, "yInvInset");
+ offsetVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType, "offset");
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "float2 coord = %s;\nfloat2 zoom_coord = float2(%s) + coord * float2(%s, "
+ "%s);\nfloat2 delta = (coord - %s.xy) * %s.zw;\ndelta = min(delta, "
+ "float2(half2(1.0, 1.0)) - delta);\ndelta *= float2(%s, %s);\nfloat weight = "
+ "0.0;\nif (delta.x < 2.0 && delta.y < 2.0) {\n delta = float2(half2(2.0, 2.0)) "
+ "- delta;\n float dist = length(delta);\n dist = max(2.0 - dist, 0.0);\n "
+ "weight = min(dist * dist, 1.0);\n} else {\n float2 delta_squared = delta * "
+ "delta;\n weight = min(min(delta_squared.x, delta_square",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ args.fUniformHandler->getUniformCStr(offsetVar),
+ args.fUniformHandler->getUniformCStr(xInvZoomVar),
+ args.fUniformHandler->getUniformCStr(yInvZoomVar),
+ args.fUniformHandler->getUniformCStr(boundsUniformVar),
+ args.fUniformHandler->getUniformCStr(boundsUniformVar),
+ args.fUniformHandler->getUniformCStr(xInvInsetVar),
+ args.fUniformHandler->getUniformCStr(yInvInsetVar));
+ fragBuilder->codeAppendf(
+ "d.y), 1.0);\n}\n%s = sample(%s, mix(coord, zoom_coord, weight)).%s;\n",
+ args.fOutputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrMagnifierEffect& _outer = _proc.cast<GrMagnifierEffect>();
+ {
+ pdman.set1f(xInvZoomVar, (_outer.xInvZoom));
+ pdman.set1f(yInvZoomVar, (_outer.yInvZoom));
+ pdman.set1f(xInvInsetVar, (_outer.xInvInset));
+ pdman.set1f(yInvInsetVar, (_outer.yInvInset));
+ }
+ GrSurfaceProxy& srcProxy = *_outer.textureSampler(0).proxy();
+ GrTexture& src = *srcProxy.peekTexture();
+ (void)src;
+ auto bounds = _outer.bounds;
+ (void)bounds;
+ UniformHandle& boundsUniform = boundsUniformVar;
+ (void)boundsUniform;
+ auto srcRect = _outer.srcRect;
+ (void)srcRect;
+ UniformHandle& xInvZoom = xInvZoomVar;
+ (void)xInvZoom;
+ UniformHandle& yInvZoom = yInvZoomVar;
+ (void)yInvZoom;
+ UniformHandle& xInvInset = xInvInsetVar;
+ (void)xInvInset;
+ UniformHandle& yInvInset = yInvInsetVar;
+ (void)yInvInset;
+ UniformHandle& offset = offsetVar;
+ (void)offset;
+
+ SkScalar invW = 1.0f / src.width();
+ SkScalar invH = 1.0f / src.height();
+
+ {
+ SkScalar y = srcRect.y() * invH;
+ if (srcProxy.origin() != kTopLeft_GrSurfaceOrigin) {
+ y = 1.0f - (srcRect.height() / bounds.height()) - y;
+ }
+
+ pdman.set2f(offset, srcRect.x() * invW, y);
+ }
+
+ {
+ SkScalar y = bounds.y() * invH;
+ SkScalar hSign = 1.f;
+ if (srcProxy.origin() != kTopLeft_GrSurfaceOrigin) {
+ y = 1.0f - bounds.y() * invH;
+ hSign = -1.f;
+ }
+
+ pdman.set4f(boundsUniform,
+ bounds.x() * invW,
+ y,
+ SkIntToScalar(src.width()) / bounds.width(),
+ hSign * SkIntToScalar(src.height()) / bounds.height());
+ }
+ }
+ UniformHandle boundsUniformVar;
+ UniformHandle offsetVar;
+ UniformHandle xInvZoomVar;
+ UniformHandle yInvZoomVar;
+ UniformHandle xInvInsetVar;
+ UniformHandle yInvInsetVar;
+};
+GrGLSLFragmentProcessor* GrMagnifierEffect::onCreateGLSLInstance() const {
+ return new GrGLSLMagnifierEffect();
+}
+void GrMagnifierEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrMagnifierEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrMagnifierEffect& that = other.cast<GrMagnifierEffect>();
+ (void)that;
+ if (src != that.src) return false;
+ if (bounds != that.bounds) return false;
+ if (srcRect != that.srcRect) return false;
+ if (xInvZoom != that.xInvZoom) return false;
+ if (yInvZoom != that.yInvZoom) return false;
+ if (xInvInset != that.xInvInset) return false;
+ if (yInvInset != that.yInvInset) return false;
+ return true;
+}
+GrMagnifierEffect::GrMagnifierEffect(const GrMagnifierEffect& src)
+ : INHERITED(kGrMagnifierEffect_ClassID, src.optimizationFlags())
+ , srcCoordTransform(src.srcCoordTransform)
+ , src(src.src)
+ , bounds(src.bounds)
+ , srcRect(src.srcRect)
+ , xInvZoom(src.xInvZoom)
+ , yInvZoom(src.yInvZoom)
+ , xInvInset(src.xInvInset)
+ , yInvInset(src.yInvInset) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&srcCoordTransform);
+}
+std::unique_ptr<GrFragmentProcessor> GrMagnifierEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMagnifierEffect(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrMagnifierEffect::onTextureSampler(int index) const {
+ return IthTextureSampler(index, src);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMagnifierEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrMagnifierEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrTextureProxy> proxy = d->textureProxy(0);
+ const int kMaxWidth = 200;
+ const int kMaxHeight = 200;
+ const SkScalar kMaxInset = 20.0f;
+ uint32_t width = d->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = d->fRandom->nextULessThan(kMaxHeight);
+ SkScalar inset = d->fRandom->nextRangeScalar(1.0f, kMaxInset);
+
+ SkIRect bounds = SkIRect::MakeWH(SkIntToScalar(kMaxWidth), SkIntToScalar(kMaxHeight));
+ SkRect srcRect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+
+ auto effect = GrMagnifierEffect::Make(std::move(proxy),
+ bounds,
+ srcRect,
+ srcRect.width() / bounds.width(),
+ srcRect.height() / bounds.height(),
+ bounds.width() / inset,
+ bounds.height() / inset);
+ SkASSERT(effect);
+ return effect;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.h
new file mode 100644
index 0000000000..cb0d00524b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrMagnifierEffect.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrMagnifierEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrMagnifierEffect_DEFINED
+#define GrMagnifierEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrMagnifierEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> src, SkIRect bounds,
+ SkRect srcRect, float xInvZoom, float yInvZoom,
+ float xInvInset, float yInvInset) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMagnifierEffect(
+ src, bounds, srcRect, xInvZoom, yInvZoom, xInvInset, yInvInset));
+ }
+ GrMagnifierEffect(const GrMagnifierEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "MagnifierEffect"; }
+ GrCoordTransform srcCoordTransform;
+ TextureSampler src;
+ SkIRect bounds;
+ SkRect srcRect;
+ float xInvZoom;
+ float yInvZoom;
+ float xInvInset;
+ float yInvInset;
+
+private:
+ GrMagnifierEffect(sk_sp<GrTextureProxy> src, SkIRect bounds, SkRect srcRect, float xInvZoom,
+ float yInvZoom, float xInvInset, float yInvInset)
+ : INHERITED(kGrMagnifierEffect_ClassID, kNone_OptimizationFlags)
+ , srcCoordTransform(SkMatrix::I(), src.get())
+ , src(std::move(src))
+ , bounds(bounds)
+ , srcRect(srcRect)
+ , xInvZoom(xInvZoom)
+ , yInvZoom(yInvZoom)
+ , xInvInset(xInvInset)
+ , yInvInset(yInvInset) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&srcCoordTransform);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.cpp
new file mode 100644
index 0000000000..88ca234d2f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrMixerEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrMixerEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLMixerEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLMixerEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrMixerEffect& _outer = args.fFp.cast<GrMixerEffect>();
+ (void)_outer;
+ auto weight = _outer.weight;
+ (void)weight;
+ weightVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "weight");
+ SkString _input1278 = SkStringPrintf("%s", args.fInputColor);
+ SkString _sample1278("_sample1278");
+ this->invokeChild(_outer.fp0_index, _input1278.c_str(), &_sample1278, args);
+ fragBuilder->codeAppendf("half4 in0 = %s;", _sample1278.c_str());
+ SkString _input1335 = SkStringPrintf("%s", args.fInputColor);
+ SkString _sample1335("_sample1335");
+ if (_outer.fp1_index >= 0) {
+ this->invokeChild(_outer.fp1_index, _input1335.c_str(), &_sample1335, args);
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", _sample1335.c_str());
+ }
+ fragBuilder->codeAppendf("\nhalf4 in1 = %s ? %s : %s;\n%s = mix(in0, in1, %s);\n",
+ _outer.fp1_index >= 0 ? "true" : "false", _sample1335.c_str(),
+ args.fInputColor, args.fOutputColor,
+ args.fUniformHandler->getUniformCStr(weightVar));
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrMixerEffect& _outer = _proc.cast<GrMixerEffect>();
+ { pdman.set1f(weightVar, (_outer.weight)); }
+ }
+ UniformHandle weightVar;
+};
+GrGLSLFragmentProcessor* GrMixerEffect::onCreateGLSLInstance() const {
+ return new GrGLSLMixerEffect();
+}
+void GrMixerEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrMixerEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrMixerEffect& that = other.cast<GrMixerEffect>();
+ (void)that;
+ if (weight != that.weight) return false;
+ return true;
+}
+GrMixerEffect::GrMixerEffect(const GrMixerEffect& src)
+ : INHERITED(kGrMixerEffect_ClassID, src.optimizationFlags())
+ , fp0_index(src.fp0_index)
+ , fp1_index(src.fp1_index)
+ , weight(src.weight) {
+ this->registerChildProcessor(src.childProcessor(fp0_index).clone());
+ if (fp1_index >= 0) {
+ this->registerChildProcessor(src.childProcessor(fp1_index).clone());
+ }
+}
+std::unique_ptr<GrFragmentProcessor> GrMixerEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMixerEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.h
new file mode 100644
index 0000000000..44d345b761
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrMixerEffect.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrMixerEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrMixerEffect_DEFINED
+#define GrMixerEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrMixerEffect : public GrFragmentProcessor {
+public:
+ static OptimizationFlags OptFlags(const std::unique_ptr<GrFragmentProcessor>& fp0,
+ const std::unique_ptr<GrFragmentProcessor>& fp1) {
+ auto flags = ProcessorOptimizationFlags(fp0.get());
+ if (fp1) {
+ flags &= ProcessorOptimizationFlags(fp1.get());
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ const auto c0 = ConstantOutputForConstantInput(this->childProcessor(0), input),
+ c1 = (this->numChildProcessors() > 1)
+ ? ConstantOutputForConstantInput(this->childProcessor(1), input)
+ : input;
+ return {c0.fR + (c1.fR - c0.fR) * weight, c0.fG + (c1.fG - c0.fG) * weight,
+ c0.fB + (c1.fB - c0.fB) * weight, c0.fA + (c1.fA - c0.fA) * weight};
+ }
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp0,
+ std::unique_ptr<GrFragmentProcessor>
+ fp1,
+ float weight) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMixerEffect(std::move(fp0), std::move(fp1), weight));
+ }
+ GrMixerEffect(const GrMixerEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "MixerEffect"; }
+ int fp0_index = -1;
+ int fp1_index = -1;
+ float weight;
+
+private:
+ GrMixerEffect(std::unique_ptr<GrFragmentProcessor> fp0,
+ std::unique_ptr<GrFragmentProcessor>
+ fp1,
+ float weight)
+ : INHERITED(kGrMixerEffect_ClassID, (OptimizationFlags)OptFlags(fp0, fp1))
+ , weight(weight) {
+ SkASSERT(fp0);
+ fp0_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(fp0));
+ if (fp1) {
+ fp1_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(fp1));
+ }
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.cpp
new file mode 100644
index 0000000000..56a55dc871
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrOverrideInputFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrOverrideInputFragmentProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLOverrideInputFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLOverrideInputFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrOverrideInputFragmentProcessor& _outer =
+ args.fFp.cast<GrOverrideInputFragmentProcessor>();
+ (void)_outer;
+ auto useUniform = _outer.useUniform;
+ (void)useUniform;
+ auto uniformColor = _outer.uniformColor;
+ (void)uniformColor;
+ auto literalColor = _outer.literalColor;
+ (void)literalColor;
+ if (useUniform) {
+ uniformColorVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType, "uniformColor");
+ }
+ fragBuilder->codeAppendf(
+ "half4 constColor;\n@if (%s) {\n constColor = %s;\n} else {\n constColor = "
+ "half4(%f, %f, %f, %f);\n}",
+ (_outer.useUniform ? "true" : "false"),
+ uniformColorVar.isValid() ? args.fUniformHandler->getUniformCStr(uniformColorVar)
+ : "half4(0)",
+ _outer.literalColor.fR, _outer.literalColor.fG, _outer.literalColor.fB,
+ _outer.literalColor.fA);
+ SkString _input1992("constColor");
+ SkString _sample1992("_sample1992");
+ this->invokeChild(_outer.fp_index, _input1992.c_str(), &_sample1992, args);
+ fragBuilder->codeAppendf("\n%s = %s;\n", args.fOutputColor, _sample1992.c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrOverrideInputFragmentProcessor& _outer =
+ _proc.cast<GrOverrideInputFragmentProcessor>();
+ {
+ if (uniformColorVar.isValid()) {
+ pdman.set4fv(uniformColorVar, 1, (_outer.uniformColor).vec());
+ }
+ }
+ }
+ UniformHandle uniformColorVar;
+};
+GrGLSLFragmentProcessor* GrOverrideInputFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLOverrideInputFragmentProcessor();
+}
+void GrOverrideInputFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)useUniform);
+ if (!useUniform) {
+ uint16_t red = SkFloatToHalf(literalColor.fR);
+ uint16_t green = SkFloatToHalf(literalColor.fG);
+ uint16_t blue = SkFloatToHalf(literalColor.fB);
+ uint16_t alpha = SkFloatToHalf(literalColor.fA);
+ b->add32(((uint32_t)red << 16) | green);
+ b->add32(((uint32_t)blue << 16) | alpha);
+ }
+}
+bool GrOverrideInputFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrOverrideInputFragmentProcessor& that = other.cast<GrOverrideInputFragmentProcessor>();
+ (void)that;
+ if (useUniform != that.useUniform) return false;
+ if (uniformColor != that.uniformColor) return false;
+ if (literalColor != that.literalColor) return false;
+ return true;
+}
+GrOverrideInputFragmentProcessor::GrOverrideInputFragmentProcessor(
+ const GrOverrideInputFragmentProcessor& src)
+ : INHERITED(kGrOverrideInputFragmentProcessor_ClassID, src.optimizationFlags())
+ , fp_index(src.fp_index)
+ , useUniform(src.useUniform)
+ , uniformColor(src.uniformColor)
+ , literalColor(src.literalColor) {
+ this->registerChildProcessor(src.childProcessor(fp_index).clone());
+}
+std::unique_ptr<GrFragmentProcessor> GrOverrideInputFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrOverrideInputFragmentProcessor(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.h
new file mode 100644
index 0000000000..7e4106fd74
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrOverrideInputFragmentProcessor.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrOverrideInputFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrOverrideInputFragmentProcessor_DEFINED
+#define GrOverrideInputFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrOverrideInputFragmentProcessor : public GrFragmentProcessor {
+public:
+ static OptimizationFlags OptFlags(const std::unique_ptr<GrFragmentProcessor>& fp,
+ const SkPMColor4f& color) {
+ auto childFlags = ProcessorOptimizationFlags(fp.get());
+ auto flags = kNone_OptimizationFlags;
+ if (childFlags & kConstantOutputForConstantInput_OptimizationFlag) {
+ flags |= kConstantOutputForConstantInput_OptimizationFlag;
+ }
+ if ((childFlags & kPreservesOpaqueInput_OptimizationFlag) && color.isOpaque()) {
+ flags |= kPreservesOpaqueInput_OptimizationFlag;
+ }
+ return flags;
+ }
+
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return ConstantOutputForConstantInput(this->childProcessor(0), uniformColor);
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> fp,
+ const SkPMColor4f& color,
+ bool useUniform = true) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrOverrideInputFragmentProcessor(std::move(fp), useUniform, color, color));
+ }
+ GrOverrideInputFragmentProcessor(const GrOverrideInputFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "OverrideInputFragmentProcessor"; }
+ int fp_index = -1;
+ bool useUniform;
+ SkPMColor4f uniformColor;
+ SkPMColor4f literalColor;
+
+private:
+ GrOverrideInputFragmentProcessor(std::unique_ptr<GrFragmentProcessor> fp,
+ bool useUniform,
+ SkPMColor4f uniformColor,
+ SkPMColor4f literalColor)
+ : INHERITED(kGrOverrideInputFragmentProcessor_ClassID,
+ (OptimizationFlags)OptFlags(fp, useUniform ? uniformColor : literalColor))
+ , useUniform(useUniform)
+ , uniformColor(uniformColor)
+ , literalColor(literalColor) {
+ SkASSERT(fp);
+ fp_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(fp));
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.cpp
new file mode 100644
index 0000000000..728e6712bd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrPremulInputFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrPremulInputFragmentProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLPremulInputFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLPremulInputFragmentProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrPremulInputFragmentProcessor& _outer =
+ args.fFp.cast<GrPremulInputFragmentProcessor>();
+ (void)_outer;
+ fragBuilder->codeAppendf("%s = %s;\n%s.xyz *= %s.w;\n", args.fOutputColor, args.fInputColor,
+ args.fOutputColor, args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrPremulInputFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLPremulInputFragmentProcessor();
+}
+void GrPremulInputFragmentProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrPremulInputFragmentProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrPremulInputFragmentProcessor& that = other.cast<GrPremulInputFragmentProcessor>();
+ (void)that;
+ return true;
+}
+GrPremulInputFragmentProcessor::GrPremulInputFragmentProcessor(
+ const GrPremulInputFragmentProcessor& src)
+ : INHERITED(kGrPremulInputFragmentProcessor_ClassID, src.optimizationFlags()) {}
+std::unique_ptr<GrFragmentProcessor> GrPremulInputFragmentProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrPremulInputFragmentProcessor(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.h
new file mode 100644
index 0000000000..ccb876787d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrPremulInputFragmentProcessor.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrPremulInputFragmentProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrPremulInputFragmentProcessor_DEFINED
+#define GrPremulInputFragmentProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrPremulInputFragmentProcessor : public GrFragmentProcessor {
+public:
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return SkColor4f{input.fR, input.fG, input.fB, input.fA}.premul();
+ }
+ static std::unique_ptr<GrFragmentProcessor> Make() {
+ return std::unique_ptr<GrFragmentProcessor>(new GrPremulInputFragmentProcessor());
+ }
+ GrPremulInputFragmentProcessor(const GrPremulInputFragmentProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "PremulInputFragmentProcessor"; }
+
+private:
+ GrPremulInputFragmentProcessor()
+ : INHERITED(kGrPremulInputFragmentProcessor_ClassID,
+ (OptimizationFlags)kPreservesOpaqueInput_OptimizationFlag |
+ kConstantOutputForConstantInput_OptimizationFlag) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.cpp
new file mode 100644
index 0000000000..88ab11a8f2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRRectBlurEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrRRectBlurEffect.h"
+
+std::unique_ptr<GrFragmentProcessor> GrRRectBlurEffect::Make(GrRecordingContext* context,
+ float sigma,
+ float xformedSigma,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect) {
+ SkASSERT(!SkRRectPriv::IsCircle(devRRect) &&
+ !devRRect.isRect()); // Should've been caught up-stream
+
+ // TODO: loosen this up
+ if (!SkRRectPriv::IsSimpleCircular(devRRect)) {
+ return nullptr;
+ }
+
+ // Make sure we can successfully ninepatch this rrect -- the blur sigma has to be
+ // sufficiently small relative to both the size of the corner radius and the
+ // width (and height) of the rrect.
+ SkRRect rrectToDraw;
+ SkISize size;
+ SkScalar ignored[kSkBlurRRectMaxDivisions];
+ int ignoredSize;
+ uint32_t ignored32;
+
+ bool ninePatchable = SkComputeBlurredRRectParams(
+ srcRRect, devRRect, SkRect::MakeEmpty(), sigma, xformedSigma, &rrectToDraw, &size,
+ ignored, ignored, ignored, ignored, &ignoredSize, &ignoredSize, &ignored32);
+ if (!ninePatchable) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> mask(
+ find_or_create_rrect_blur_mask(context, rrectToDraw, size, xformedSigma));
+ if (!mask) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrRRectBlurEffect(xformedSigma, devRRect.getBounds(),
+ SkRRectPriv::GetSimpleRadii(devRRect).fX, std::move(mask)));
+}
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLRRectBlurEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLRRectBlurEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrRRectBlurEffect& _outer = args.fFp.cast<GrRRectBlurEffect>();
+ (void)_outer;
+ auto sigma = _outer.sigma;
+ (void)sigma;
+ auto rect = _outer.rect;
+ (void)rect;
+ auto cornerRadius = _outer.cornerRadius;
+ (void)cornerRadius;
+ cornerRadiusVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "cornerRadius");
+ proxyRectVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "proxyRect");
+ blurRadiusVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "blurRadius");
+ fragBuilder->codeAppendf(
+ "\nhalf2 translatedFragPos = half2(sk_FragCoord.xy - %s.xy);\nhalf threshold = %s "
+ "+ 2.0 * %s;\nhalf2 middle = half2((%s.zw - %s.xy) - float(2.0 * threshold));\nif "
+ "(translatedFragPos.x >= threshold && translatedFragPos.x < middle.x + threshold) "
+ "{\n translatedFragPos.x = threshold;\n} else if (translatedFragPos.x >= "
+ "middle.x + threshold) {\n translatedFragPos.x -= middle.x - 1.0;\n}\nif "
+ "(translatedFragPos.y > threshold && translatedFragPos.y < middle.y + threshold) "
+ "{\n translatedFragPos.y = threshold;",
+ args.fUniformHandler->getUniformCStr(proxyRectVar),
+ args.fUniformHandler->getUniformCStr(cornerRadiusVar),
+ args.fUniformHandler->getUniformCStr(blurRadiusVar),
+ args.fUniformHandler->getUniformCStr(proxyRectVar),
+ args.fUniformHandler->getUniformCStr(proxyRectVar));
+ fragBuilder->codeAppendf(
+ "\n} else if (translatedFragPos.y >= middle.y + threshold) {\n "
+ "translatedFragPos.y -= middle.y - 1.0;\n}\nhalf2 proxyDims = half2(2.0 * "
+ "threshold + 1.0);\nhalf2 texCoord = translatedFragPos / proxyDims;\n%s = %s * "
+ "sample(%s, float2(texCoord)).%s;\n",
+ args.fOutputColor, args.fInputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrRRectBlurEffect& _outer = _proc.cast<GrRRectBlurEffect>();
+ { pdman.set1f(cornerRadiusVar, (_outer.cornerRadius)); }
+ auto sigma = _outer.sigma;
+ (void)sigma;
+ auto rect = _outer.rect;
+ (void)rect;
+ UniformHandle& cornerRadius = cornerRadiusVar;
+ (void)cornerRadius;
+ GrSurfaceProxy& ninePatchSamplerProxy = *_outer.textureSampler(0).proxy();
+ GrTexture& ninePatchSampler = *ninePatchSamplerProxy.peekTexture();
+ (void)ninePatchSampler;
+ UniformHandle& proxyRect = proxyRectVar;
+ (void)proxyRect;
+ UniformHandle& blurRadius = blurRadiusVar;
+ (void)blurRadius;
+
+ float blurRadiusValue = 3.f * SkScalarCeilToScalar(sigma - 1 / 6.0f);
+ pdman.set1f(blurRadius, blurRadiusValue);
+
+ SkRect outset = rect;
+ outset.outset(blurRadiusValue, blurRadiusValue);
+ pdman.set4f(proxyRect, outset.fLeft, outset.fTop, outset.fRight, outset.fBottom);
+ }
+ UniformHandle proxyRectVar;
+ UniformHandle blurRadiusVar;
+ UniformHandle cornerRadiusVar;
+};
+GrGLSLFragmentProcessor* GrRRectBlurEffect::onCreateGLSLInstance() const {
+ return new GrGLSLRRectBlurEffect();
+}
+void GrRRectBlurEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrRRectBlurEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrRRectBlurEffect& that = other.cast<GrRRectBlurEffect>();
+ (void)that;
+ if (sigma != that.sigma) return false;
+ if (rect != that.rect) return false;
+ if (cornerRadius != that.cornerRadius) return false;
+ if (ninePatchSampler != that.ninePatchSampler) return false;
+ return true;
+}
+GrRRectBlurEffect::GrRRectBlurEffect(const GrRRectBlurEffect& src)
+ : INHERITED(kGrRRectBlurEffect_ClassID, src.optimizationFlags())
+ , sigma(src.sigma)
+ , rect(src.rect)
+ , cornerRadius(src.cornerRadius)
+ , ninePatchSampler(src.ninePatchSampler) {
+ this->setTextureSamplerCnt(1);
+}
+std::unique_ptr<GrFragmentProcessor> GrRRectBlurEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrRRectBlurEffect(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrRRectBlurEffect::onTextureSampler(int index) const {
+ return IthTextureSampler(index, ninePatchSampler);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRRectBlurEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrRRectBlurEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar r = d->fRandom->nextRangeF(1.f, 9.f);
+ SkScalar sigma = d->fRandom->nextRangeF(1.f, 10.f);
+ SkRRect rrect;
+ rrect.setRectXY(SkRect::MakeWH(w, h), r, r);
+ return GrRRectBlurEffect::Make(d->context(), sigma, sigma, rrect, rrect);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.h
new file mode 100644
index 0000000000..436f604800
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrRRectBlurEffect.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRRectBlurEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrRRectBlurEffect_DEFINED
+#define GrRRectBlurEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "include/effects/SkBlurMaskFilter.h"
+#include "include/gpu/GrContext.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkBlurPriv.h"
+#include "src/core/SkGpuBlurUtils.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrStyle.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrRRectBlurEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrTextureProxy> find_or_create_rrect_blur_mask(GrRecordingContext* context,
+ const SkRRect& rrectToDraw,
+ const SkISize& size,
+ float xformedSigma) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 9, "RoundRect Blur Mask");
+ builder[0] = SkScalarCeilToInt(xformedSigma - 1 / 6.0f);
+
+ int index = 1;
+ for (auto c : {SkRRect::kUpperLeft_Corner, SkRRect::kUpperRight_Corner,
+ SkRRect::kLowerRight_Corner, SkRRect::kLowerLeft_Corner}) {
+ SkASSERT(SkScalarIsInt(rrectToDraw.radii(c).fX) &&
+ SkScalarIsInt(rrectToDraw.radii(c).fY));
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fX);
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fY);
+ }
+ builder.finish();
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ sk_sp<GrTextureProxy> mask(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kBottomLeft_GrSurfaceOrigin));
+ if (!mask) {
+ // TODO: this could be SkBackingFit::kApprox, but:
+ // 1) The texture coords would need to be updated.
+ // 2) We would have to use GrTextureDomain::kClamp_Mode for the GaussianBlur.
+ auto rtc = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, size.fWidth, size.fHeight, GrColorType::kAlpha_8,
+ nullptr);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+
+ rtc->clear(nullptr, SK_PMColor4fTRANSPARENT,
+ GrRenderTargetContext::CanClearFullscreen::kYes);
+ rtc->drawRRect(GrNoClip(), std::move(paint), GrAA::kYes, SkMatrix::I(), rrectToDraw,
+ GrStyle::SimpleFill());
+
+ sk_sp<GrTextureProxy> srcProxy(rtc->asTextureProxyRef());
+ if (!srcProxy) {
+ return nullptr;
+ }
+ auto rtc2 = SkGpuBlurUtils::GaussianBlur(context,
+ std::move(srcProxy),
+ rtc->colorInfo().colorType(),
+ rtc->colorInfo().alphaType(),
+ SkIPoint::Make(0, 0),
+ nullptr,
+ SkIRect::MakeWH(size.fWidth, size.fHeight),
+ SkIRect::EmptyIRect(),
+ xformedSigma,
+ xformedSigma,
+ GrTextureDomain::kIgnore_Mode,
+ SkBackingFit::kExact);
+ if (!rtc2) {
+ return nullptr;
+ }
+
+ mask = rtc2->asTextureProxyRef();
+ if (!mask) {
+ return nullptr;
+ }
+ SkASSERT(mask->origin() == kBottomLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, mask.get());
+ }
+
+ return mask;
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(GrRecordingContext* context,
+ float sigma,
+ float xformedSigma,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect);
+ GrRRectBlurEffect(const GrRRectBlurEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "RRectBlurEffect"; }
+ float sigma;
+ SkRect rect;
+ float cornerRadius;
+ TextureSampler ninePatchSampler;
+
+private:
+ GrRRectBlurEffect(float sigma, SkRect rect, float cornerRadius,
+ sk_sp<GrTextureProxy> ninePatchSampler)
+ : INHERITED(kGrRRectBlurEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , sigma(sigma)
+ , rect(rect)
+ , cornerRadius(cornerRadius)
+ , ninePatchSampler(std::move(ninePatchSampler)) {
+ this->setTextureSamplerCnt(1);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.cpp
new file mode 100644
index 0000000000..186dcfe365
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRectBlurEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrRectBlurEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLRectBlurEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLRectBlurEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrRectBlurEffect& _outer = args.fFp.cast<GrRectBlurEffect>();
+ (void)_outer;
+ auto rect = _outer.rect;
+ (void)rect;
+ auto invSixSigma = _outer.invSixSigma;
+ (void)invSixSigma;
+ auto isFast = _outer.isFast;
+ (void)isFast;
+ highp = ((abs(rect.left()) > 16000.0 || abs(rect.top()) > 16000.0) ||
+ abs(rect.right()) > 16000.0) ||
+ abs(rect.bottom()) > 16000.0;
+ if (highp) {
+ rectFVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "rectF");
+ }
+ if (!highp) {
+ rectHVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ "rectH");
+ }
+ invSixSigmaVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "invSixSigma");
+ fragBuilder->codeAppendf(
+ "/* key */ bool highp = %s;\nhalf xCoverage, yCoverage;\n@if (%s) {\n half x, "
+ "y;\n @if (highp) {\n x = max(half(%s.x - sk_FragCoord.x), "
+ "half(sk_FragCoord.x - %s.z));\n y = max(half(%s.y - sk_FragCoord.y), "
+ "half(sk_FragCoord.y - %s.w));\n } else {\n x = max(half(float(%s.x) - "
+ "sk_FragCoord.x), half(sk_FragCoord.x - float(%s.z)));\n y = "
+ "max(half(float(%s.y) - sk_FragCoord.y), half(sk_FragCoord.y - float(%s.w)));\n "
+ "}\n xCoverage = sample(%s, float2(half2(x * %s, 0.5))).",
+ (highp ? "true" : "false"), (_outer.isFast ? "true" : "false"),
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar));
+ fragBuilder->codeAppendf(
+ "%s.w;\n yCoverage = sample(%s, float2(half2(y * %s, 0.5))).%s.w;\n %s = (%s "
+ "* xCoverage) * yCoverage;\n} else {\n half l, r, t, b;\n @if (highp) {\n "
+ " l = half(sk_FragCoord.x - %s.x);\n r = half(%s.z - sk_FragCoord.x);\n "
+ " t = half(sk_FragCoord.y - %s.y);\n b = half(%s.w - "
+ "sk_FragCoord.y);\n } else {\n l = half(sk_FragCoord.x - float(%s.x));\n "
+ " r = half(float(%s.z) - sk_FragCoord.x);\n t = half(sk_FragCoord.y - "
+ "float(%s.y));\n b = half(float(",
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ args.fOutputColor, args.fInputColor,
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectFVar.isValid() ? args.fUniformHandler->getUniformCStr(rectFVar) : "float4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)");
+ fragBuilder->codeAppendf(
+ "%s.w) - sk_FragCoord.y);\n }\n half il = 1.0 + l * %s;\n half ir = 1.0 + "
+ "r * %s;\n half it = 1.0 + t * %s;\n half ib = 1.0 + b * %s;\n xCoverage "
+ "= (1.0 - sample(%s, float2(half2(il, 0.5))).%s.w) - sample(%s, float2(half2(ir, "
+ "0.5))).%s.w;\n yCoverage = (1.0 - sample(%s, float2(half2(it, 0.5))).%s.w) - "
+ "sample(%s, float2(half2(ib, 0.5))).%s.w;\n}\n%s = (%s * xCoverage) * yCoverage;\n",
+ rectHVar.isValid() ? args.fUniformHandler->getUniformCStr(rectHVar) : "half4(0)",
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar),
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar),
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar),
+ args.fUniformHandler->getUniformCStr(invSixSigmaVar),
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str(),
+ args.fOutputColor, args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrRectBlurEffect& _outer = _proc.cast<GrRectBlurEffect>();
+ { pdman.set1f(invSixSigmaVar, (_outer.invSixSigma)); }
+ auto rect = _outer.rect;
+ (void)rect;
+ UniformHandle& rectF = rectFVar;
+ (void)rectF;
+ UniformHandle& rectH = rectHVar;
+ (void)rectH;
+ GrSurfaceProxy& integralProxy = *_outer.textureSampler(0).proxy();
+ GrTexture& integral = *integralProxy.peekTexture();
+ (void)integral;
+ UniformHandle& invSixSigma = invSixSigmaVar;
+ (void)invSixSigma;
+ auto isFast = _outer.isFast;
+ (void)isFast;
+
+ float r[]{rect.fLeft, rect.fTop, rect.fRight, rect.fBottom};
+ pdman.set4fv(highp ? rectF : rectH, 1, r);
+ }
+ bool highp = false;
+ UniformHandle rectFVar;
+ UniformHandle rectHVar;
+ UniformHandle invSixSigmaVar;
+};
+GrGLSLFragmentProcessor* GrRectBlurEffect::onCreateGLSLInstance() const {
+ return new GrGLSLRectBlurEffect();
+}
+void GrRectBlurEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ bool highp = ((abs(rect.left()) > 16000.0 || abs(rect.top()) > 16000.0) ||
+ abs(rect.right()) > 16000.0) ||
+ abs(rect.bottom()) > 16000.0;
+ b->add32((int32_t)highp);
+ b->add32((int32_t)isFast);
+}
+bool GrRectBlurEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrRectBlurEffect& that = other.cast<GrRectBlurEffect>();
+ (void)that;
+ if (rect != that.rect) return false;
+ if (integral != that.integral) return false;
+ if (invSixSigma != that.invSixSigma) return false;
+ if (isFast != that.isFast) return false;
+ return true;
+}
+GrRectBlurEffect::GrRectBlurEffect(const GrRectBlurEffect& src)
+ : INHERITED(kGrRectBlurEffect_ClassID, src.optimizationFlags())
+ , rect(src.rect)
+ , integral(src.integral)
+ , invSixSigma(src.invSixSigma)
+ , isFast(src.isFast) {
+ this->setTextureSamplerCnt(1);
+}
+std::unique_ptr<GrFragmentProcessor> GrRectBlurEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrRectBlurEffect(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrRectBlurEffect::onTextureSampler(int index) const {
+ return IthTextureSampler(index, integral);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRectBlurEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrRectBlurEffect::TestCreate(GrProcessorTestData* data) {
+ float sigma = data->fRandom->nextRangeF(3, 8);
+ float width = data->fRandom->nextRangeF(200, 300);
+ float height = data->fRandom->nextRangeF(200, 300);
+ return GrRectBlurEffect::Make(data->proxyProvider(), *data->caps()->shaderCaps(),
+ SkRect::MakeWH(width, height), sigma);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.h
new file mode 100644
index 0000000000..b0c86bda1c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrRectBlurEffect.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRectBlurEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrRectBlurEffect_DEFINED
+#define GrRectBlurEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include <cmath>
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkMathPriv.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrShaderCaps.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrRectBlurEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrTextureProxy> CreateIntegralTexture(GrProxyProvider* proxyProvider,
+ float sixSigma) {
+ // The texture we're producing represents the integral of a normal distribution over a
+ // six-sigma range centered at zero. We want enough resolution so that the linear
+ // interpolation done in texture lookup doesn't introduce noticeable artifacts. We
+ // conservatively choose to have 2 texels for each dst pixel.
+ int minWidth = 2 * sk_float_ceil2int(sixSigma);
+ // Bin by powers of 2 with a minimum so we get good profile reuse.
+ int width = SkTMax(SkNextPow2(minWidth), 32);
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
+ builder[0] = width;
+ builder.finish();
+
+ sk_sp<GrTextureProxy> proxy(proxyProvider->findOrCreateProxyByUniqueKey(
+ key, GrColorType::kAlpha_8, kTopLeft_GrSurfaceOrigin));
+ if (!proxy) {
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(SkImageInfo::MakeA8(width, 1))) {
+ return nullptr;
+ }
+ *bitmap.getAddr8(0, 0) = 255;
+ const float invWidth = 1.f / width;
+ for (int i = 1; i < width - 1; ++i) {
+ float x = (i + 0.5f) * invWidth;
+ x = (-6 * x + 3) * SK_ScalarRoot2Over2;
+ float integral = 0.5f * (std::erf(x) + 1.f);
+ *bitmap.getAddr8(i, 0) = SkToU8(sk_float_round2int(255.f * integral));
+ }
+ *bitmap.getAddr8(width - 1, 0) = 0;
+ bitmap.setImmutable();
+ proxy = proxyProvider->createProxyFromBitmap(bitmap, GrMipMapped::kNo);
+ if (!proxy) {
+ return nullptr;
+ }
+ SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
+ proxyProvider->assignUniqueKeyToProxy(key, proxy.get());
+ }
+ return proxy;
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(GrProxyProvider* proxyProvider,
+ const GrShaderCaps& caps, const SkRect& rect,
+ float sigma) {
+ SkASSERT(rect.isSorted());
+ if (!caps.floatIs32Bits()) {
+ // We promote the math that gets us into the Gaussian space to full float when the rect
+ // coords are large. If we don't have full float then fail. We could probably clip the
+ // rect to an outset device bounds instead.
+ if (SkScalarAbs(rect.fLeft) > 16000.f || SkScalarAbs(rect.fTop) > 16000.f ||
+ SkScalarAbs(rect.fRight) > 16000.f || SkScalarAbs(rect.fBottom) > 16000.f) {
+ return nullptr;
+ }
+ }
+
+ const float sixSigma = 6 * sigma;
+ auto integral = CreateIntegralTexture(proxyProvider, sixSigma);
+ if (!integral) {
+ return nullptr;
+ }
+
+ // In the fast variant we think of the midpoint of the integral texture as aligning
+ // with the closest rect edge both in x and y. To simplify texture coord calculation we
+ // inset the rect so that the edge of the inset rect corresponds to t = 0 in the texture.
+ // It actually simplifies things a bit in the !isFast case, too.
+ float threeSigma = sixSigma / 2;
+ SkRect insetRect = {rect.fLeft + threeSigma, rect.fTop + threeSigma,
+ rect.fRight - threeSigma, rect.fBottom - threeSigma};
+
+ // In our fast variant we find the nearest horizontal and vertical edges and for each
+ // do a lookup in the integral texture for each and multiply them. When the rect is
+ // less than 6 sigma wide then things aren't so simple and we have to consider both the
+ // left and right edge of the rectangle (and similar in y).
+ bool isFast = insetRect.isSorted();
+ // 1 / (6 * sigma) is the domain of the integral texture. We use the inverse to produce
+ // normalized texture coords from frag coord distances.
+ float invSixSigma = 1.f / sixSigma;
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrRectBlurEffect(insetRect, std::move(integral), invSixSigma, isFast,
+ GrSamplerState::ClampBilerp()));
+ }
+ GrRectBlurEffect(const GrRectBlurEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "RectBlurEffect"; }
+ SkRect rect;
+ TextureSampler integral;
+ float invSixSigma;
+ bool isFast;
+
+private:
+ GrRectBlurEffect(SkRect rect, sk_sp<GrTextureProxy> integral, float invSixSigma, bool isFast,
+ GrSamplerState samplerParams)
+ : INHERITED(kGrRectBlurEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag)
+ , rect(rect)
+ , integral(std::move(integral), samplerParams)
+ , invSixSigma(invSixSigma)
+ , isFast(isFast) {
+ this->setTextureSamplerCnt(1);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.cpp
new file mode 100644
index 0000000000..6f1734d139
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSaturateProcessor.fp; do not modify.
+ **************************************************************************************************/
+#include "GrSaturateProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLSaturateProcessor : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLSaturateProcessor() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrSaturateProcessor& _outer = args.fFp.cast<GrSaturateProcessor>();
+ (void)_outer;
+ fragBuilder->codeAppendf("%s = clamp(%s, 0.0, 1.0);\n", args.fOutputColor,
+ args.fInputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrSaturateProcessor::onCreateGLSLInstance() const {
+ return new GrGLSLSaturateProcessor();
+}
+void GrSaturateProcessor::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrSaturateProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrSaturateProcessor& that = other.cast<GrSaturateProcessor>();
+ (void)that;
+ return true;
+}
+GrSaturateProcessor::GrSaturateProcessor(const GrSaturateProcessor& src)
+ : INHERITED(kGrSaturateProcessor_ClassID, src.optimizationFlags()) {}
+std::unique_ptr<GrFragmentProcessor> GrSaturateProcessor::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSaturateProcessor(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSaturateProcessor);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrSaturateProcessor::TestCreate(GrProcessorTestData* d) {
+ return GrSaturateProcessor::Make();
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.h b/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.h
new file mode 100644
index 0000000000..28af190a33
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrSaturateProcessor.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSaturateProcessor.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrSaturateProcessor_DEFINED
+#define GrSaturateProcessor_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrSaturateProcessor : public GrFragmentProcessor {
+public:
+ SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& input) const override {
+ return {SkTPin(input.fR, 0.f, 1.f), SkTPin(input.fG, 0.f, 1.f), SkTPin(input.fB, 0.f, 1.f),
+ SkTPin(input.fA, 0.f, 1.f)};
+ }
+ static std::unique_ptr<GrFragmentProcessor> Make() {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSaturateProcessor());
+ }
+ GrSaturateProcessor(const GrSaturateProcessor& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "SaturateProcessor"; }
+
+private:
+ GrSaturateProcessor()
+ : INHERITED(kGrSaturateProcessor_ClassID,
+ (OptimizationFlags)kConstantOutputForConstantInput_OptimizationFlag |
+ kPreservesOpaqueInput_OptimizationFlag) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.cpp b/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.cpp
new file mode 100644
index 0000000000..dd843b9bad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSimpleTextureEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrSimpleTextureEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLSimpleTextureEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLSimpleTextureEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrSimpleTextureEffect& _outer = args.fFp.cast<GrSimpleTextureEffect>();
+ (void)_outer;
+ auto matrix = _outer.matrix;
+ (void)matrix;
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "%s = %s * sample(%s, %s).%s;\n", args.fOutputColor, args.fInputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrSimpleTextureEffect::onCreateGLSLInstance() const {
+ return new GrGLSLSimpleTextureEffect();
+}
+void GrSimpleTextureEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrSimpleTextureEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrSimpleTextureEffect& that = other.cast<GrSimpleTextureEffect>();
+ (void)that;
+ if (image != that.image) return false;
+ if (matrix != that.matrix) return false;
+ return true;
+}
+GrSimpleTextureEffect::GrSimpleTextureEffect(const GrSimpleTextureEffect& src)
+ : INHERITED(kGrSimpleTextureEffect_ClassID, src.optimizationFlags())
+ , imageCoordTransform(src.imageCoordTransform)
+ , image(src.image)
+ , matrix(src.matrix) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&imageCoordTransform);
+}
+std::unique_ptr<GrFragmentProcessor> GrSimpleTextureEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSimpleTextureEffect(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrSimpleTextureEffect::onTextureSampler(
+ int index) const {
+ return IthTextureSampler(index, image);
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSimpleTextureEffect);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrSimpleTextureEffect::TestCreate(
+ GrProcessorTestData* testData) {
+ int texIdx = testData->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ GrSamplerState::WrapMode wrapModes[2];
+ GrTest::TestWrapModes(testData->fRandom, wrapModes);
+ if (!testData->caps()->npotTextureTileSupport()) {
+ // Performing repeat sampling on npot textures will cause asserts on HW
+ // that lacks support.
+ wrapModes[0] = GrSamplerState::WrapMode::kClamp;
+ wrapModes[1] = GrSamplerState::WrapMode::kClamp;
+ }
+
+ GrSamplerState params(wrapModes, testData->fRandom->nextBool()
+ ? GrSamplerState::Filter::kBilerp
+ : GrSamplerState::Filter::kNearest);
+
+ const SkMatrix& matrix = GrTest::TestMatrix(testData->fRandom);
+ return GrSimpleTextureEffect::Make(testData->textureProxy(texIdx),
+ testData->textureProxyColorType(texIdx), matrix, params);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.h b/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.h
new file mode 100644
index 0000000000..d70dd56773
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/generated/GrSimpleTextureEffect.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSimpleTextureEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrSimpleTextureEffect_DEFINED
+#define GrSimpleTextureEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrSimpleTextureEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(std::move(proxy), matrix, srcColorType,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp,
+ GrSamplerState::Filter::kNearest)));
+ }
+
+ /* clamp mode */
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ GrSamplerState::Filter filter) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSimpleTextureEffect(
+ std::move(proxy), matrix, srcColorType,
+ GrSamplerState(GrSamplerState::WrapMode::kClamp, filter)));
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ const SkMatrix& matrix,
+ const GrSamplerState& p) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(std::move(proxy), matrix, srcColorType, p));
+ }
+ GrSimpleTextureEffect(const GrSimpleTextureEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "SimpleTextureEffect"; }
+ GrCoordTransform imageCoordTransform;
+ TextureSampler image;
+ SkMatrix44 matrix;
+
+private:
+ GrSimpleTextureEffect(sk_sp<GrTextureProxy> image, SkMatrix44 matrix, GrColorType srcColorType,
+ GrSamplerState samplerParams)
+ : INHERITED(kGrSimpleTextureEffect_ClassID,
+ (OptimizationFlags)ModulateForSamplerOptFlags(
+ srcColorType,
+ samplerParams.wrapModeX() ==
+ GrSamplerState::WrapMode::kClampToBorder ||
+ samplerParams.wrapModeY() ==
+ GrSamplerState::WrapMode::kClampToBorder))
+ , imageCoordTransform(matrix, image.get())
+ , image(std::move(image), samplerParams)
+ , matrix(matrix) {
+ this->setTextureSamplerCnt(1);
+ this->addCoordTransform(&imageCoordTransform);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/geometry/GrPathUtils.cpp b/gfx/skia/skia/src/gpu/geometry/GrPathUtils.cpp
new file mode 100644
index 0000000000..0b3dd23f15
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrPathUtils.cpp
@@ -0,0 +1,859 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/geometry/GrPathUtils.h"
+
+#include "include/gpu/GrTypes.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkPointPriv.h"
+
+static const SkScalar gMinCurveTol = 0.0001f;
+
+SkScalar GrPathUtils::scaleToleranceToSrc(SkScalar devTol,
+ const SkMatrix& viewM,
+ const SkRect& pathBounds) {
+ // In order to tesselate the path we get a bound on how much the matrix can
+ // scale when mapping to screen coordinates.
+ SkScalar stretch = viewM.getMaxScale();
+
+ if (stretch < 0) {
+ // take worst case mapRadius amoung four corners.
+ // (less than perfect)
+ for (int i = 0; i < 4; ++i) {
+ SkMatrix mat;
+ mat.setTranslate((i % 2) ? pathBounds.fLeft : pathBounds.fRight,
+ (i < 2) ? pathBounds.fTop : pathBounds.fBottom);
+ mat.postConcat(viewM);
+ stretch = SkMaxScalar(stretch, mat.mapRadius(SK_Scalar1));
+ }
+ }
+ SkScalar srcTol = 0;
+ if (stretch <= 0) {
+ // We have degenerate bounds or some degenerate matrix. Thus we set the tolerance to be the
+ // max of the path pathBounds width and height.
+ srcTol = SkTMax(pathBounds.width(), pathBounds.height());
+ } else {
+ srcTol = devTol / stretch;
+ }
+ if (srcTol < gMinCurveTol) {
+ srcTol = gMinCurveTol;
+ }
+ return srcTol;
+}
+
+uint32_t GrPathUtils::quadraticPointCount(const SkPoint points[], SkScalar tol) {
+ // You should have called scaleToleranceToSrc, which guarantees this
+ SkASSERT(tol >= gMinCurveTol);
+
+ SkScalar d = SkPointPriv::DistanceToLineSegmentBetween(points[1], points[0], points[2]);
+ if (!SkScalarIsFinite(d)) {
+ return kMaxPointsPerCurve;
+ } else if (d <= tol) {
+ return 1;
+ } else {
+ // Each time we subdivide, d should be cut in 4. So we need to
+ // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
+ // points.
+ // 2^(log4(x)) = sqrt(x);
+ SkScalar divSqrt = SkScalarSqrt(d / tol);
+ if (((SkScalar)SK_MaxS32) <= divSqrt) {
+ return kMaxPointsPerCurve;
+ } else {
+ int temp = SkScalarCeilToInt(divSqrt);
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return SkTMin(pow2, kMaxPointsPerCurve);
+ }
+ }
+}
+
+uint32_t GrPathUtils::generateQuadraticPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (SkPointPriv::DistanceToLineSegmentBetweenSqd(p1, p0, p2)) < tolSqd) {
+ (*points)[0] = p2;
+ *points += 1;
+ return 1;
+ }
+
+ SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ };
+ SkPoint r = { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) };
+
+ pointsLeft >>= 1;
+ uint32_t a = generateQuadraticPoints(p0, q[0], r, tolSqd, points, pointsLeft);
+ uint32_t b = generateQuadraticPoints(r, q[1], p2, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+uint32_t GrPathUtils::cubicPointCount(const SkPoint points[],
+ SkScalar tol) {
+ // You should have called scaleToleranceToSrc, which guarantees this
+ SkASSERT(tol >= gMinCurveTol);
+
+ SkScalar d = SkTMax(
+ SkPointPriv::DistanceToLineSegmentBetweenSqd(points[1], points[0], points[3]),
+ SkPointPriv::DistanceToLineSegmentBetweenSqd(points[2], points[0], points[3]));
+ d = SkScalarSqrt(d);
+ if (!SkScalarIsFinite(d)) {
+ return kMaxPointsPerCurve;
+ } else if (d <= tol) {
+ return 1;
+ } else {
+ SkScalar divSqrt = SkScalarSqrt(d / tol);
+ if (((SkScalar)SK_MaxS32) <= divSqrt) {
+ return kMaxPointsPerCurve;
+ } else {
+ int temp = SkScalarCeilToInt(SkScalarSqrt(d / tol));
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return SkTMin(pow2, kMaxPointsPerCurve);
+ }
+ }
+}
+
+uint32_t GrPathUtils::generateCubicPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (SkPointPriv::DistanceToLineSegmentBetweenSqd(p1, p0, p3) < tolSqd &&
+ SkPointPriv::DistanceToLineSegmentBetweenSqd(p2, p0, p3) < tolSqd)) {
+ (*points)[0] = p3;
+ *points += 1;
+ return 1;
+ }
+ SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ { SkScalarAve(p2.fX, p3.fX), SkScalarAve(p2.fY, p3.fY) }
+ };
+ SkPoint r[] = {
+ { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) },
+ { SkScalarAve(q[1].fX, q[2].fX), SkScalarAve(q[1].fY, q[2].fY) }
+ };
+ SkPoint s = { SkScalarAve(r[0].fX, r[1].fX), SkScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ uint32_t a = generateCubicPoints(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
+ uint32_t b = generateCubicPoints(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+int GrPathUtils::worstCasePointCount(const SkPath& path, int* subpaths, SkScalar tol) {
+ // You should have called scaleToleranceToSrc, which guarantees this
+ SkASSERT(tol >= gMinCurveTol);
+
+ int pointCount = 0;
+ *subpaths = 1;
+
+ bool first = true;
+
+ SkPath::Iter iter(path, false);
+ SkPath::Verb verb;
+
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ pointCount += 1;
+ break;
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ pointCount += quadraticPointCount(quadPts + 2*i, tol);
+ }
+ }
+ case SkPath::kQuad_Verb:
+ pointCount += quadraticPointCount(pts, tol);
+ break;
+ case SkPath::kCubic_Verb:
+ pointCount += cubicPointCount(pts, tol);
+ break;
+ case SkPath::kMove_Verb:
+ pointCount += 1;
+ if (!first) {
+ ++(*subpaths);
+ }
+ break;
+ default:
+ break;
+ }
+ first = false;
+ }
+ return pointCount;
+}
+
+void GrPathUtils::QuadUVMatrix::set(const SkPoint qPts[3]) {
+ SkMatrix m;
+ // We want M such that M * xy_pt = uv_pt
+ // We know M * control_pts = [0 1/2 1]
+ // [0 0 1]
+ // [1 1 1]
+ // And control_pts = [x0 x1 x2]
+ // [y0 y1 y2]
+ // [1 1 1 ]
+ // We invert the control pt matrix and post concat to both sides to get M.
+ // Using the known form of the control point matrix and the result, we can
+ // optimize and improve precision.
+
+ double x0 = qPts[0].fX;
+ double y0 = qPts[0].fY;
+ double x1 = qPts[1].fX;
+ double y1 = qPts[1].fY;
+ double x2 = qPts[2].fX;
+ double y2 = qPts[2].fY;
+ double det = x0*y1 - y0*x1 + x2*y0 - y2*x0 + x1*y2 - y1*x2;
+
+ if (!sk_float_isfinite(det)
+ || SkScalarNearlyZero((float)det, SK_ScalarNearlyZero * SK_ScalarNearlyZero)) {
+ // The quad is degenerate. Hopefully this is rare. Find the pts that are
+ // farthest apart to compute a line (unless it is really a pt).
+ SkScalar maxD = SkPointPriv::DistanceToSqd(qPts[0], qPts[1]);
+ int maxEdge = 0;
+ SkScalar d = SkPointPriv::DistanceToSqd(qPts[1], qPts[2]);
+ if (d > maxD) {
+ maxD = d;
+ maxEdge = 1;
+ }
+ d = SkPointPriv::DistanceToSqd(qPts[2], qPts[0]);
+ if (d > maxD) {
+ maxD = d;
+ maxEdge = 2;
+ }
+ // We could have a tolerance here, not sure if it would improve anything
+ if (maxD > 0) {
+ // Set the matrix to give (u = 0, v = distance_to_line)
+ SkVector lineVec = qPts[(maxEdge + 1)%3] - qPts[maxEdge];
+ // when looking from the point 0 down the line we want positive
+ // distances to be to the left. This matches the non-degenerate
+ // case.
+ lineVec = SkPointPriv::MakeOrthog(lineVec, SkPointPriv::kLeft_Side);
+ // first row
+ fM[0] = 0;
+ fM[1] = 0;
+ fM[2] = 0;
+ // second row
+ fM[3] = lineVec.fX;
+ fM[4] = lineVec.fY;
+ fM[5] = -lineVec.dot(qPts[maxEdge]);
+ } else {
+ // It's a point. It should cover zero area. Just set the matrix such
+ // that (u, v) will always be far away from the quad.
+ fM[0] = 0; fM[1] = 0; fM[2] = 100.f;
+ fM[3] = 0; fM[4] = 0; fM[5] = 100.f;
+ }
+ } else {
+ double scale = 1.0/det;
+
+ // compute adjugate matrix
+ double a2, a3, a4, a5, a6, a7, a8;
+ a2 = x1*y2-x2*y1;
+
+ a3 = y2-y0;
+ a4 = x0-x2;
+ a5 = x2*y0-x0*y2;
+
+ a6 = y0-y1;
+ a7 = x1-x0;
+ a8 = x0*y1-x1*y0;
+
+ // this performs the uv_pts*adjugate(control_pts) multiply,
+ // then does the scale by 1/det afterwards to improve precision
+ m[SkMatrix::kMScaleX] = (float)((0.5*a3 + a6)*scale);
+ m[SkMatrix::kMSkewX] = (float)((0.5*a4 + a7)*scale);
+ m[SkMatrix::kMTransX] = (float)((0.5*a5 + a8)*scale);
+
+ m[SkMatrix::kMSkewY] = (float)(a6*scale);
+ m[SkMatrix::kMScaleY] = (float)(a7*scale);
+ m[SkMatrix::kMTransY] = (float)(a8*scale);
+
+ // kMPersp0 & kMPersp1 should algebraically be zero
+ m[SkMatrix::kMPersp0] = 0.0f;
+ m[SkMatrix::kMPersp1] = 0.0f;
+ m[SkMatrix::kMPersp2] = (float)((a2 + a5 + a8)*scale);
+
+ // It may not be normalized to have 1.0 in the bottom right
+ float m33 = m.get(SkMatrix::kMPersp2);
+ if (1.f != m33) {
+ m33 = 1.f / m33;
+ fM[0] = m33 * m.get(SkMatrix::kMScaleX);
+ fM[1] = m33 * m.get(SkMatrix::kMSkewX);
+ fM[2] = m33 * m.get(SkMatrix::kMTransX);
+ fM[3] = m33 * m.get(SkMatrix::kMSkewY);
+ fM[4] = m33 * m.get(SkMatrix::kMScaleY);
+ fM[5] = m33 * m.get(SkMatrix::kMTransY);
+ } else {
+ fM[0] = m.get(SkMatrix::kMScaleX);
+ fM[1] = m.get(SkMatrix::kMSkewX);
+ fM[2] = m.get(SkMatrix::kMTransX);
+ fM[3] = m.get(SkMatrix::kMSkewY);
+ fM[4] = m.get(SkMatrix::kMScaleY);
+ fM[5] = m.get(SkMatrix::kMTransY);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// k = (y2 - y0, x0 - x2, x2*y0 - x0*y2)
+// l = (y1 - y0, x0 - x1, x1*y0 - x0*y1) * 2*w
+// m = (y2 - y1, x1 - x2, x2*y1 - x1*y2) * 2*w
+void GrPathUtils::getConicKLM(const SkPoint p[3], const SkScalar weight, SkMatrix* out) {
+ SkMatrix& klm = *out;
+ const SkScalar w2 = 2.f * weight;
+ klm[0] = p[2].fY - p[0].fY;
+ klm[1] = p[0].fX - p[2].fX;
+ klm[2] = p[2].fX * p[0].fY - p[0].fX * p[2].fY;
+
+ klm[3] = w2 * (p[1].fY - p[0].fY);
+ klm[4] = w2 * (p[0].fX - p[1].fX);
+ klm[5] = w2 * (p[1].fX * p[0].fY - p[0].fX * p[1].fY);
+
+ klm[6] = w2 * (p[2].fY - p[1].fY);
+ klm[7] = w2 * (p[1].fX - p[2].fX);
+ klm[8] = w2 * (p[2].fX * p[1].fY - p[1].fX * p[2].fY);
+
+ // scale the max absolute value of coeffs to 10
+ SkScalar scale = 0.f;
+ for (int i = 0; i < 9; ++i) {
+ scale = SkMaxScalar(scale, SkScalarAbs(klm[i]));
+ }
+ SkASSERT(scale > 0.f);
+ scale = 10.f / scale;
+ for (int i = 0; i < 9; ++i) {
+ klm[i] *= scale;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// a is the first control point of the cubic.
+// ab is the vector from a to the second control point.
+// dc is the vector from the fourth to the third control point.
+// d is the fourth control point.
+// p is the candidate quadratic control point.
+// this assumes that the cubic doesn't inflect and is simple
+bool is_point_within_cubic_tangents(const SkPoint& a,
+ const SkVector& ab,
+ const SkVector& dc,
+ const SkPoint& d,
+ SkPathPriv::FirstDirection dir,
+ const SkPoint p) {
+ SkVector ap = p - a;
+ SkScalar apXab = ap.cross(ab);
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ if (apXab > 0) {
+ return false;
+ }
+ } else {
+ SkASSERT(SkPathPriv::kCCW_FirstDirection == dir);
+ if (apXab < 0) {
+ return false;
+ }
+ }
+
+ SkVector dp = p - d;
+ SkScalar dpXdc = dp.cross(dc);
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ if (dpXdc < 0) {
+ return false;
+ }
+ } else {
+ SkASSERT(SkPathPriv::kCCW_FirstDirection == dir);
+ if (dpXdc > 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void convert_noninflect_cubic_to_quads(const SkPoint p[4],
+ SkScalar toleranceSqd,
+ SkTArray<SkPoint, true>* quads,
+ int sublevel = 0,
+ bool preserveFirstTangent = true,
+ bool preserveLastTangent = true) {
+ // Notation: Point a is always p[0]. Point b is p[1] unless p[1] == p[0], in which case it is
+ // p[2]. Point d is always p[3]. Point c is p[2] unless p[2] == p[3], in which case it is p[1].
+ SkVector ab = p[1] - p[0];
+ SkVector dc = p[2] - p[3];
+
+ if (SkPointPriv::LengthSqd(ab) < SK_ScalarNearlyZero) {
+ if (SkPointPriv::LengthSqd(dc) < SK_ScalarNearlyZero) {
+ SkPoint* degQuad = quads->push_back_n(3);
+ degQuad[0] = p[0];
+ degQuad[1] = p[0];
+ degQuad[2] = p[3];
+ return;
+ }
+ ab = p[2] - p[0];
+ }
+ if (SkPointPriv::LengthSqd(dc) < SK_ScalarNearlyZero) {
+ dc = p[1] - p[3];
+ }
+
+ static const SkScalar kLengthScale = 3 * SK_Scalar1 / 2;
+ static const int kMaxSubdivs = 10;
+
+ ab.scale(kLengthScale);
+ dc.scale(kLengthScale);
+
+ // c0 and c1 are extrapolations along vectors ab and dc.
+ SkPoint c0 = p[0] + ab;
+ SkPoint c1 = p[3] + dc;
+
+ SkScalar dSqd = sublevel > kMaxSubdivs ? 0 : SkPointPriv::DistanceToSqd(c0, c1);
+ if (dSqd < toleranceSqd) {
+ SkPoint newC;
+ if (preserveFirstTangent == preserveLastTangent) {
+ // We used to force a split when both tangents need to be preserved and c0 != c1.
+ // This introduced a large performance regression for tiny paths for no noticeable
+ // quality improvement. However, we aren't quite fulfilling our contract of guaranteeing
+ // the two tangent vectors and this could introduce a missed pixel in
+ // GrAAHairlinePathRenderer.
+ newC = (c0 + c1) * 0.5f;
+ } else if (preserveFirstTangent) {
+ newC = c0;
+ } else {
+ newC = c1;
+ }
+
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = p[0];
+ pts[1] = newC;
+ pts[2] = p[3];
+ return;
+ }
+ SkPoint choppedPts[7];
+ SkChopCubicAtHalf(p, choppedPts);
+ convert_noninflect_cubic_to_quads(
+ choppedPts + 0, toleranceSqd, quads, sublevel + 1, preserveFirstTangent, false);
+ convert_noninflect_cubic_to_quads(
+ choppedPts + 3, toleranceSqd, quads, sublevel + 1, false, preserveLastTangent);
+}
+
+void convert_noninflect_cubic_to_quads_with_constraint(const SkPoint p[4],
+ SkScalar toleranceSqd,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads,
+ int sublevel = 0) {
+ // Notation: Point a is always p[0]. Point b is p[1] unless p[1] == p[0], in which case it is
+ // p[2]. Point d is always p[3]. Point c is p[2] unless p[2] == p[3], in which case it is p[1].
+
+ SkVector ab = p[1] - p[0];
+ SkVector dc = p[2] - p[3];
+
+ if (SkPointPriv::LengthSqd(ab) < SK_ScalarNearlyZero) {
+ if (SkPointPriv::LengthSqd(dc) < SK_ScalarNearlyZero) {
+ SkPoint* degQuad = quads->push_back_n(3);
+ degQuad[0] = p[0];
+ degQuad[1] = p[0];
+ degQuad[2] = p[3];
+ return;
+ }
+ ab = p[2] - p[0];
+ }
+ if (SkPointPriv::LengthSqd(dc) < SK_ScalarNearlyZero) {
+ dc = p[1] - p[3];
+ }
+
+ // When the ab and cd tangents are degenerate or nearly parallel with vector from d to a the
+ // constraint that the quad point falls between the tangents becomes hard to enforce and we are
+ // likely to hit the max subdivision count. However, in this case the cubic is approaching a
+ // line and the accuracy of the quad point isn't so important. We check if the two middle cubic
+ // control points are very close to the baseline vector. If so then we just pick quadratic
+ // points on the control polygon.
+
+ SkVector da = p[0] - p[3];
+ bool doQuads = SkPointPriv::LengthSqd(dc) < SK_ScalarNearlyZero ||
+ SkPointPriv::LengthSqd(ab) < SK_ScalarNearlyZero;
+ if (!doQuads) {
+ SkScalar invDALengthSqd = SkPointPriv::LengthSqd(da);
+ if (invDALengthSqd > SK_ScalarNearlyZero) {
+ invDALengthSqd = SkScalarInvert(invDALengthSqd);
+ // cross(ab, da)^2/length(da)^2 == sqd distance from b to line from d to a.
+ // same goes for point c using vector cd.
+ SkScalar detABSqd = ab.cross(da);
+ detABSqd = SkScalarSquare(detABSqd);
+ SkScalar detDCSqd = dc.cross(da);
+ detDCSqd = SkScalarSquare(detDCSqd);
+ if (detABSqd * invDALengthSqd < toleranceSqd &&
+ detDCSqd * invDALengthSqd < toleranceSqd) {
+ doQuads = true;
+ }
+ }
+ }
+ if (doQuads) {
+ SkPoint b = p[0] + ab;
+ SkPoint c = p[3] + dc;
+ SkPoint mid = b + c;
+ mid.scale(SK_ScalarHalf);
+ // Insert two quadratics to cover the case when ab points away from d and/or dc
+ // points away from a.
+ if (SkVector::DotProduct(da, dc) < 0 || SkVector::DotProduct(ab, da) > 0) {
+ SkPoint* qpts = quads->push_back_n(6);
+ qpts[0] = p[0];
+ qpts[1] = b;
+ qpts[2] = mid;
+ qpts[3] = mid;
+ qpts[4] = c;
+ qpts[5] = p[3];
+ } else {
+ SkPoint* qpts = quads->push_back_n(3);
+ qpts[0] = p[0];
+ qpts[1] = mid;
+ qpts[2] = p[3];
+ }
+ return;
+ }
+
+ static const SkScalar kLengthScale = 3 * SK_Scalar1 / 2;
+ static const int kMaxSubdivs = 10;
+
+ ab.scale(kLengthScale);
+ dc.scale(kLengthScale);
+
+ // c0 and c1 are extrapolations along vectors ab and dc.
+ SkVector c0 = p[0] + ab;
+ SkVector c1 = p[3] + dc;
+
+ SkScalar dSqd = sublevel > kMaxSubdivs ? 0 : SkPointPriv::DistanceToSqd(c0, c1);
+ if (dSqd < toleranceSqd) {
+ SkPoint cAvg = (c0 + c1) * 0.5f;
+ bool subdivide = false;
+
+ if (!is_point_within_cubic_tangents(p[0], ab, dc, p[3], dir, cAvg)) {
+ // choose a new cAvg that is the intersection of the two tangent lines.
+ ab = SkPointPriv::MakeOrthog(ab);
+ SkScalar z0 = -ab.dot(p[0]);
+ dc = SkPointPriv::MakeOrthog(dc);
+ SkScalar z1 = -dc.dot(p[3]);
+ cAvg.fX = ab.fY * z1 - z0 * dc.fY;
+ cAvg.fY = z0 * dc.fX - ab.fX * z1;
+ SkScalar z = ab.fX * dc.fY - ab.fY * dc.fX;
+ z = SkScalarInvert(z);
+ cAvg.fX *= z;
+ cAvg.fY *= z;
+ if (sublevel <= kMaxSubdivs) {
+ SkScalar d0Sqd = SkPointPriv::DistanceToSqd(c0, cAvg);
+ SkScalar d1Sqd = SkPointPriv::DistanceToSqd(c1, cAvg);
+ // We need to subdivide if d0 + d1 > tolerance but we have the sqd values. We know
+ // the distances and tolerance can't be negative.
+ // (d0 + d1)^2 > toleranceSqd
+ // d0Sqd + 2*d0*d1 + d1Sqd > toleranceSqd
+ SkScalar d0d1 = SkScalarSqrt(d0Sqd * d1Sqd);
+ subdivide = 2 * d0d1 + d0Sqd + d1Sqd > toleranceSqd;
+ }
+ }
+ if (!subdivide) {
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = p[0];
+ pts[1] = cAvg;
+ pts[2] = p[3];
+ return;
+ }
+ }
+ SkPoint choppedPts[7];
+ SkChopCubicAtHalf(p, choppedPts);
+ convert_noninflect_cubic_to_quads_with_constraint(
+ choppedPts + 0, toleranceSqd, dir, quads, sublevel + 1);
+ convert_noninflect_cubic_to_quads_with_constraint(
+ choppedPts + 3, toleranceSqd, dir, quads, sublevel + 1);
+}
+}
+
+void GrPathUtils::convertCubicToQuads(const SkPoint p[4],
+ SkScalar tolScale,
+ SkTArray<SkPoint, true>* quads) {
+ if (!p[0].isFinite() || !p[1].isFinite() || !p[2].isFinite() || !p[3].isFinite()) {
+ return;
+ }
+ if (!SkScalarIsFinite(tolScale)) {
+ return;
+ }
+ SkPoint chopped[10];
+ int count = SkChopCubicAtInflections(p, chopped);
+
+ const SkScalar tolSqd = SkScalarSquare(tolScale);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint* cubic = chopped + 3*i;
+ convert_noninflect_cubic_to_quads(cubic, tolSqd, quads);
+ }
+}
+
+void GrPathUtils::convertCubicToQuadsConstrainToTangents(const SkPoint p[4],
+ SkScalar tolScale,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads) {
+ if (!p[0].isFinite() || !p[1].isFinite() || !p[2].isFinite() || !p[3].isFinite()) {
+ return;
+ }
+ if (!SkScalarIsFinite(tolScale)) {
+ return;
+ }
+ SkPoint chopped[10];
+ int count = SkChopCubicAtInflections(p, chopped);
+
+ const SkScalar tolSqd = SkScalarSquare(tolScale);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint* cubic = chopped + 3*i;
+ convert_noninflect_cubic_to_quads_with_constraint(cubic, tolSqd, dir, quads);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+using ExcludedTerm = GrPathUtils::ExcludedTerm;
+
+ExcludedTerm GrPathUtils::calcCubicInverseTransposePowerBasisMatrix(const SkPoint p[4],
+ SkMatrix* out) {
+ GR_STATIC_ASSERT(SK_SCALAR_IS_FLOAT);
+
+ // First convert the bezier coordinates p[0..3] to power basis coefficients X,Y(,W=[0 0 0 1]).
+ // M3 is the matrix that does this conversion. The homogeneous equation for the cubic becomes:
+ //
+ // | X Y 0 |
+ // C(t,s) = [t^3 t^2*s t*s^2 s^3] * | . . 0 |
+ // | . . 0 |
+ // | . . 1 |
+ //
+ const Sk4f M3[3] = {Sk4f(-1, 3, -3, 1),
+ Sk4f(3, -6, 3, 0),
+ Sk4f(-3, 3, 0, 0)};
+ // 4th col of M3 = Sk4f(1, 0, 0, 0)};
+ Sk4f X(p[3].x(), 0, 0, 0);
+ Sk4f Y(p[3].y(), 0, 0, 0);
+ for (int i = 2; i >= 0; --i) {
+ X += M3[i] * p[i].x();
+ Y += M3[i] * p[i].y();
+ }
+
+ // The matrix is 3x4. In order to invert it, we first need to make it square by throwing out one
+ // of the middle two rows. We toss the row that leaves us with the largest absolute determinant.
+ // Since the right column will be [0 0 1], the respective determinants reduce to x0*y2 - y0*x2
+ // and x0*y1 - y0*x1.
+ SkScalar dets[4];
+ Sk4f D = SkNx_shuffle<0,0,2,1>(X) * SkNx_shuffle<2,1,0,0>(Y);
+ D -= SkNx_shuffle<2,3,0,1>(D);
+ D.store(dets);
+ ExcludedTerm skipTerm = SkScalarAbs(dets[0]) > SkScalarAbs(dets[1]) ?
+ ExcludedTerm::kQuadraticTerm : ExcludedTerm::kLinearTerm;
+ SkScalar det = dets[ExcludedTerm::kQuadraticTerm == skipTerm ? 0 : 1];
+ if (0 == det) {
+ return ExcludedTerm::kNonInvertible;
+ }
+ SkScalar rdet = 1 / det;
+
+ // Compute the inverse-transpose of the power basis matrix with the 'skipRow'th row removed.
+ // Since W=[0 0 0 1], it follows that our corresponding solution will be equal to:
+ //
+ // | y1 -x1 x1*y2 - y1*x2 |
+ // 1/det * | -y0 x0 -x0*y2 + y0*x2 |
+ // | 0 0 det |
+ //
+ SkScalar x[4], y[4], z[4];
+ X.store(x);
+ Y.store(y);
+ (X * SkNx_shuffle<3,3,3,3>(Y) - Y * SkNx_shuffle<3,3,3,3>(X)).store(z);
+
+ int middleRow = ExcludedTerm::kQuadraticTerm == skipTerm ? 2 : 1;
+ out->setAll( y[middleRow] * rdet, -x[middleRow] * rdet, z[middleRow] * rdet,
+ -y[0] * rdet, x[0] * rdet, -z[0] * rdet,
+ 0, 0, 1);
+
+ return skipTerm;
+}
+
+inline static void calc_serp_kcoeffs(SkScalar tl, SkScalar sl, SkScalar tm, SkScalar sm,
+ ExcludedTerm skipTerm, SkScalar outCoeffs[3]) {
+ SkASSERT(ExcludedTerm::kQuadraticTerm == skipTerm || ExcludedTerm::kLinearTerm == skipTerm);
+ outCoeffs[0] = 0;
+ outCoeffs[1] = (ExcludedTerm::kLinearTerm == skipTerm) ? sl*sm : -tl*sm - tm*sl;
+ outCoeffs[2] = tl*tm;
+}
+
+inline static void calc_serp_lmcoeffs(SkScalar t, SkScalar s, ExcludedTerm skipTerm,
+ SkScalar outCoeffs[3]) {
+ SkASSERT(ExcludedTerm::kQuadraticTerm == skipTerm || ExcludedTerm::kLinearTerm == skipTerm);
+ outCoeffs[0] = -s*s*s;
+ outCoeffs[1] = (ExcludedTerm::kLinearTerm == skipTerm) ? 3*s*s*t : -3*s*t*t;
+ outCoeffs[2] = t*t*t;
+}
+
+inline static void calc_loop_kcoeffs(SkScalar td, SkScalar sd, SkScalar te, SkScalar se,
+ SkScalar tdse, SkScalar tesd, ExcludedTerm skipTerm,
+ SkScalar outCoeffs[3]) {
+ SkASSERT(ExcludedTerm::kQuadraticTerm == skipTerm || ExcludedTerm::kLinearTerm == skipTerm);
+ outCoeffs[0] = 0;
+ outCoeffs[1] = (ExcludedTerm::kLinearTerm == skipTerm) ? sd*se : -tdse - tesd;
+ outCoeffs[2] = td*te;
+}
+
+inline static void calc_loop_lmcoeffs(SkScalar t2, SkScalar s2, SkScalar t1, SkScalar s1,
+ SkScalar t2s1, SkScalar t1s2, ExcludedTerm skipTerm,
+ SkScalar outCoeffs[3]) {
+ SkASSERT(ExcludedTerm::kQuadraticTerm == skipTerm || ExcludedTerm::kLinearTerm == skipTerm);
+ outCoeffs[0] = -s2*s2*s1;
+ outCoeffs[1] = (ExcludedTerm::kLinearTerm == skipTerm) ? s2 * (2*t2s1 + t1s2)
+ : -t2 * (t2s1 + 2*t1s2);
+ outCoeffs[2] = t2*t2*t1;
+}
+
+// For the case when a cubic bezier is actually a quadratic. We duplicate k in l so that the
+// implicit becomes:
+//
+// k^3 - l*m == k^3 - l*k == k * (k^2 - l)
+//
+// In the quadratic case we can simply assign fixed values at each control point:
+//
+// | ..K.. | | pts[0] pts[1] pts[2] pts[3] | | 0 1/3 2/3 1 |
+// | ..L.. | * | . . . . | == | 0 0 1/3 1 |
+// | ..K.. | | 1 1 1 1 | | 0 1/3 2/3 1 |
+//
+static void calc_quadratic_klm(const SkPoint pts[4], double d3, SkMatrix* klm) {
+ SkMatrix klmAtPts;
+ klmAtPts.setAll(0, 1.f/3, 1,
+ 0, 0, 1,
+ 0, 1.f/3, 1);
+
+ SkMatrix inversePts;
+ inversePts.setAll(pts[0].x(), pts[1].x(), pts[3].x(),
+ pts[0].y(), pts[1].y(), pts[3].y(),
+ 1, 1, 1);
+ SkAssertResult(inversePts.invert(&inversePts));
+
+ klm->setConcat(klmAtPts, inversePts);
+
+ // If d3 > 0 we need to flip the orientation of our curve
+ // This is done by negating the k and l values
+ if (d3 > 0) {
+ klm->postScale(-1, -1);
+ }
+}
+
+// For the case when a cubic bezier is actually a line. We set K=0, L=1, M=-line, which results in
+// the following implicit:
+//
+// k^3 - l*m == 0^3 - 1*(-line) == -(-line) == line
+//
+static void calc_line_klm(const SkPoint pts[4], SkMatrix* klm) {
+ SkScalar ny = pts[0].x() - pts[3].x();
+ SkScalar nx = pts[3].y() - pts[0].y();
+ SkScalar k = nx * pts[0].x() + ny * pts[0].y();
+ klm->setAll( 0, 0, 0,
+ 0, 0, 1,
+ -nx, -ny, k);
+}
+
+SkCubicType GrPathUtils::getCubicKLM(const SkPoint src[4], SkMatrix* klm, double tt[2],
+ double ss[2]) {
+ double d[4];
+ SkCubicType type = SkClassifyCubic(src, tt, ss, d);
+
+ if (SkCubicType::kLineOrPoint == type) {
+ calc_line_klm(src, klm);
+ return SkCubicType::kLineOrPoint;
+ }
+
+ if (SkCubicType::kQuadratic == type) {
+ calc_quadratic_klm(src, d[3], klm);
+ return SkCubicType::kQuadratic;
+ }
+
+ SkMatrix CIT;
+ ExcludedTerm skipTerm = calcCubicInverseTransposePowerBasisMatrix(src, &CIT);
+ if (ExcludedTerm::kNonInvertible == skipTerm) {
+ // This could technically also happen if the curve were quadratic, but SkClassifyCubic
+ // should have detected that case already with tolerance.
+ calc_line_klm(src, klm);
+ return SkCubicType::kLineOrPoint;
+ }
+
+ const SkScalar t0 = static_cast<SkScalar>(tt[0]), t1 = static_cast<SkScalar>(tt[1]),
+ s0 = static_cast<SkScalar>(ss[0]), s1 = static_cast<SkScalar>(ss[1]);
+
+ SkMatrix klmCoeffs;
+ switch (type) {
+ case SkCubicType::kCuspAtInfinity:
+ SkASSERT(1 == t1 && 0 == s1); // Infinity.
+ // fallthru.
+ case SkCubicType::kLocalCusp:
+ case SkCubicType::kSerpentine:
+ calc_serp_kcoeffs(t0, s0, t1, s1, skipTerm, &klmCoeffs[0]);
+ calc_serp_lmcoeffs(t0, s0, skipTerm, &klmCoeffs[3]);
+ calc_serp_lmcoeffs(t1, s1, skipTerm, &klmCoeffs[6]);
+ break;
+ case SkCubicType::kLoop: {
+ const SkScalar tdse = t0 * s1;
+ const SkScalar tesd = t1 * s0;
+ calc_loop_kcoeffs(t0, s0, t1, s1, tdse, tesd, skipTerm, &klmCoeffs[0]);
+ calc_loop_lmcoeffs(t0, s0, t1, s1, tdse, tesd, skipTerm, &klmCoeffs[3]);
+ calc_loop_lmcoeffs(t1, s1, t0, s0, tesd, tdse, skipTerm, &klmCoeffs[6]);
+ break;
+ }
+ default:
+ SK_ABORT("Unexpected cubic type.");
+ break;
+ }
+
+ klm->setConcat(klmCoeffs, CIT);
+ return type;
+}
+
+int GrPathUtils::chopCubicAtLoopIntersection(const SkPoint src[4], SkPoint dst[10], SkMatrix* klm,
+ int* loopIndex) {
+ SkSTArray<2, SkScalar> chops;
+ *loopIndex = -1;
+
+ double t[2], s[2];
+ if (SkCubicType::kLoop == GrPathUtils::getCubicKLM(src, klm, t, s)) {
+ SkScalar t0 = static_cast<SkScalar>(t[0] / s[0]);
+ SkScalar t1 = static_cast<SkScalar>(t[1] / s[1]);
+ SkASSERT(t0 <= t1); // Technically t0 != t1 in a loop, but there may be FP error.
+
+ if (t0 < 1 && t1 > 0) {
+ *loopIndex = 0;
+ if (t0 > 0) {
+ chops.push_back(t0);
+ *loopIndex = 1;
+ }
+ if (t1 < 1) {
+ chops.push_back(t1);
+ *loopIndex = chops.count() - 1;
+ }
+ }
+ }
+
+ SkChopCubicAt(src, dst, chops.begin(), chops.count());
+ return chops.count() + 1;
+}
diff --git a/gfx/skia/skia/src/gpu/geometry/GrPathUtils.h b/gfx/skia/skia/src/gpu/geometry/GrPathUtils.h
new file mode 100644
index 0000000000..f44f2c0276
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrPathUtils.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathUtils_DEFINED
+#define GrPathUtils_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+
+class SkMatrix;
+
+/**
+ * Utilities for evaluating paths.
+ */
+namespace GrPathUtils {
+ // Very small tolerances will be increased to a minimum threshold value, to avoid division
+ // problems in subsequent math.
+ SkScalar scaleToleranceToSrc(SkScalar devTol,
+ const SkMatrix& viewM,
+ const SkRect& pathBounds);
+
+ int worstCasePointCount(const SkPath&,
+ int* subpaths,
+ SkScalar tol);
+
+ uint32_t quadraticPointCount(const SkPoint points[], SkScalar tol);
+
+ uint32_t generateQuadraticPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft);
+
+ uint32_t cubicPointCount(const SkPoint points[], SkScalar tol);
+
+ uint32_t generateCubicPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft);
+
+ // A 2x3 matrix that goes from the 2d space coordinates to UV space where
+ // u^2-v = 0 specifies the quad. The matrix is determined by the control
+ // points of the quadratic.
+ class QuadUVMatrix {
+ public:
+ QuadUVMatrix() {}
+ // Initialize the matrix from the control pts
+ QuadUVMatrix(const SkPoint controlPts[3]) { this->set(controlPts); }
+ void set(const SkPoint controlPts[3]);
+
+ /**
+ * Applies the matrix to vertex positions to compute UV coords.
+ *
+ * vertices is a pointer to the first vertex.
+ * vertexCount is the number of vertices.
+ * stride is the size of each vertex.
+ * uvOffset is the offset of the UV values within each vertex.
+ */
+ void apply(void* vertices, int vertexCount, size_t stride, size_t uvOffset) const {
+ intptr_t xyPtr = reinterpret_cast<intptr_t>(vertices);
+ intptr_t uvPtr = reinterpret_cast<intptr_t>(vertices) + uvOffset;
+ float sx = fM[0];
+ float kx = fM[1];
+ float tx = fM[2];
+ float ky = fM[3];
+ float sy = fM[4];
+ float ty = fM[5];
+ for (int i = 0; i < vertexCount; ++i) {
+ const SkPoint* xy = reinterpret_cast<const SkPoint*>(xyPtr);
+ SkPoint* uv = reinterpret_cast<SkPoint*>(uvPtr);
+ uv->fX = sx * xy->fX + kx * xy->fY + tx;
+ uv->fY = ky * xy->fX + sy * xy->fY + ty;
+ xyPtr += stride;
+ uvPtr += stride;
+ }
+ }
+ private:
+ float fM[6];
+ };
+
+ // Input is 3 control points and a weight for a bezier conic. Calculates the
+ // three linear functionals (K,L,M) that represent the implicit equation of the
+ // conic, k^2 - lm.
+ //
+ // Output: klm holds the linear functionals K,L,M as row vectors:
+ //
+ // | ..K.. | | x | | k |
+ // | ..L.. | * | y | == | l |
+ // | ..M.. | | 1 | | m |
+ //
+ void getConicKLM(const SkPoint p[3], const SkScalar weight, SkMatrix* klm);
+
+ // Converts a cubic into a sequence of quads. If working in device space
+ // use tolScale = 1, otherwise set based on stretchiness of the matrix. The
+ // result is sets of 3 points in quads. This will preserve the starting and
+ // ending tangent vectors (modulo FP precision).
+ void convertCubicToQuads(const SkPoint p[4],
+ SkScalar tolScale,
+ SkTArray<SkPoint, true>* quads);
+
+ // When we approximate a cubic {a,b,c,d} with a quadratic we may have to
+ // ensure that the new control point lies between the lines ab and cd. The
+ // convex path renderer requires this. It starts with a path where all the
+ // control points taken together form a convex polygon. It relies on this
+ // property and the quadratic approximation of cubics step cannot alter it.
+ // This variation enforces this constraint. The cubic must be simple and dir
+ // must specify the orientation of the contour containing the cubic.
+ void convertCubicToQuadsConstrainToTangents(const SkPoint p[4],
+ SkScalar tolScale,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads);
+
+ enum class ExcludedTerm {
+ kNonInvertible,
+ kQuadraticTerm,
+ kLinearTerm
+ };
+
+ // Computes the inverse-transpose of the cubic's power basis matrix, after removing a specific
+ // row of coefficients.
+ //
+ // E.g. if the cubic is defined in power basis form as follows:
+ //
+ // | x3 y3 0 |
+ // C(t,s) = [t^3 t^2*s t*s^2 s^3] * | x2 y2 0 |
+ // | x1 y1 0 |
+ // | x0 y0 1 |
+ //
+ // And the excluded term is "kQuadraticTerm", then the resulting inverse-transpose will be:
+ //
+ // | x3 y3 0 | -1 T
+ // | x1 y1 0 |
+ // | x0 y0 1 |
+ //
+ // (The term to exclude is chosen based on maximizing the resulting matrix determinant.)
+ //
+ // This can be used to find the KLM linear functionals:
+ //
+ // | ..K.. | | ..kcoeffs.. |
+ // | ..L.. | = | ..lcoeffs.. | * inverse_transpose_power_basis_matrix
+ // | ..M.. | | ..mcoeffs.. |
+ //
+ // NOTE: the same term that was excluded here must also be removed from the corresponding column
+ // of the klmcoeffs matrix.
+ //
+ // Returns which row of coefficients was removed, or kNonInvertible if the cubic was degenerate.
+ ExcludedTerm calcCubicInverseTransposePowerBasisMatrix(const SkPoint p[4], SkMatrix* out);
+
+ // Computes the KLM linear functionals for the cubic implicit form. The "right" side of the
+ // curve (when facing in the direction of increasing parameter values) will be the area that
+ // satisfies:
+ //
+ // k^3 < l*m
+ //
+ // Output:
+ //
+ // klm: Holds the linear functionals K,L,M as row vectors:
+ //
+ // | ..K.. | | x | | k |
+ // | ..L.. | * | y | == | l |
+ // | ..M.. | | 1 | | m |
+ //
+ // NOTE: the KLM lines are calculated in the same space as the input control points. If you
+ // transform the points the lines will also need to be transformed. This can be done by mapping
+ // the lines with the inverse-transpose of the matrix used to map the points.
+ //
+ // t[],s[]: These are set to the two homogeneous parameter values at which points the lines L&M
+ // intersect with K (See SkClassifyCubic).
+ //
+ // Returns the cubic's classification.
+ SkCubicType getCubicKLM(const SkPoint src[4], SkMatrix* klm, double t[2], double s[2]);
+
+ // Chops the cubic bezier passed in by src, at the double point (intersection point)
+ // if the curve is a cubic loop. If it is a loop, there will be two parametric values for
+ // the double point: t1 and t2. We chop the cubic at these values if they are between 0 and 1.
+ // Return value:
+ // Value of 3: t1 and t2 are both between (0,1), and dst will contain the three cubics,
+ // dst[0..3], dst[3..6], and dst[6..9] if dst is not nullptr
+ // Value of 2: Only one of t1 and t2 are between (0,1), and dst will contain the two cubics,
+ // dst[0..3] and dst[3..6] if dst is not nullptr
+ // Value of 1: Neither t1 nor t2 are between (0,1), and dst will contain the one original cubic,
+ // src[0..3]
+ //
+ // Output:
+ //
+ // klm: Holds the linear functionals K,L,M as row vectors. (See getCubicKLM().)
+ //
+ // loopIndex: This value will tell the caller which of the chopped sections (if any) are the
+ // actual loop. A value of -1 means there is no loop section. The caller can then use
+ // this value to decide how/if they want to flip the orientation of this section.
+ // The flip should be done by negating the k and l values as follows:
+ //
+ // KLM.postScale(-1, -1)
+ int chopCubicAtLoopIntersection(const SkPoint src[4], SkPoint dst[10], SkMatrix* klm,
+ int* loopIndex);
+
+ // When tessellating curved paths into linear segments, this defines the maximum distance
+ // in screen space which a segment may deviate from the mathmatically correct value.
+ // Above this value, the segment will be subdivided.
+ // This value was chosen to approximate the supersampling accuracy of the raster path (16
+ // samples, or one quarter pixel).
+ static const SkScalar kDefaultTolerance = SkDoubleToScalar(0.25);
+
+ // We guarantee that no quad or cubic will ever produce more than this many points
+ static const int kMaxPointsPerCurve = 1 << 10;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/geometry/GrQuad.cpp b/gfx/skia/skia/src/gpu/geometry/GrQuad.cpp
new file mode 100644
index 0000000000..3df11cafe5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrQuad.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/geometry/GrQuad.h"
+
+#include "include/core/SkMatrix.h"
+
+using V4f = skvx::Vec<4, float>;
+
+static bool aa_affects_rect(float ql, float qt, float qr, float qb) {
+ return !SkScalarIsInt(ql) || !SkScalarIsInt(qr) || !SkScalarIsInt(qt) || !SkScalarIsInt(qb);
+}
+
+static void map_rect_translate_scale(const SkRect& rect, const SkMatrix& m,
+ V4f* xs, V4f* ys) {
+ SkMatrix::TypeMask tm = m.getType();
+ SkASSERT(tm <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask));
+
+ V4f r = V4f::Load(&rect);
+ if (tm > SkMatrix::kIdentity_Mask) {
+ const V4f t{m.getTranslateX(), m.getTranslateY(), m.getTranslateX(), m.getTranslateY()};
+ if (tm <= SkMatrix::kTranslate_Mask) {
+ r += t;
+ } else {
+ const V4f s{m.getScaleX(), m.getScaleY(), m.getScaleX(), m.getScaleY()};
+ r = r * s + t;
+ }
+ }
+ *xs = skvx::shuffle<0, 0, 2, 2>(r);
+ *ys = skvx::shuffle<1, 3, 1, 3>(r);
+}
+
+static void map_quad_general(const V4f& qx, const V4f& qy, const SkMatrix& m,
+ V4f* xs, V4f* ys, V4f* ws) {
+ *xs = mad(m.getScaleX(), qx, mad(m.getSkewX(), qy, m.getTranslateX()));
+ *ys = mad(m.getSkewY(), qx, mad(m.getScaleY(), qy, m.getTranslateY()));
+ if (m.hasPerspective()) {
+ V4f w = mad(m.getPerspX(), qx,
+ mad(m.getPerspY(), qy, m.get(SkMatrix::kMPersp2)));
+ if (ws) {
+ // Output the calculated w coordinates
+ *ws = w;
+ } else {
+ // Apply perspective division immediately
+ V4f iw = 1.f / w;
+ *xs *= iw;
+ *ys *= iw;
+ }
+ } else if (ws) {
+ *ws = 1.f;
+ }
+}
+
+static void map_rect_general(const SkRect& rect, const SkMatrix& matrix,
+ V4f* xs, V4f* ys, V4f* ws) {
+ V4f rx{rect.fLeft, rect.fLeft, rect.fRight, rect.fRight};
+ V4f ry{rect.fTop, rect.fBottom, rect.fTop, rect.fBottom};
+ map_quad_general(rx, ry, matrix, xs, ys, ws);
+}
+
+// Rearranges (top-left, top-right, bottom-right, bottom-left) ordered skQuadPts into xs and ys
+// ordered (top-left, bottom-left, top-right, bottom-right)
+static void rearrange_sk_to_gr_points(const SkPoint skQuadPts[4], V4f* xs, V4f* ys) {
+ *xs = V4f{skQuadPts[0].fX, skQuadPts[3].fX, skQuadPts[1].fX, skQuadPts[2].fX};
+ *ys = V4f{skQuadPts[0].fY, skQuadPts[3].fY, skQuadPts[1].fY, skQuadPts[2].fY};
+}
+
+// If an SkRect is transformed by this matrix, what class of quad is required to represent it.
+static GrQuad::Type quad_type_for_transformed_rect(const SkMatrix& matrix) {
+ if (matrix.rectStaysRect()) {
+ return GrQuad::Type::kAxisAligned;
+ } else if (matrix.preservesRightAngles()) {
+ return GrQuad::Type::kRectilinear;
+ } else if (matrix.hasPerspective()) {
+ return GrQuad::Type::kPerspective;
+ } else {
+ return GrQuad::Type::kGeneral;
+ }
+}
+
+// Perform minimal analysis of 'pts' (which are suitable for MakeFromSkQuad), and determine a
+// quad type that will be as minimally general as possible.
+static GrQuad::Type quad_type_for_points(const SkPoint pts[4], const SkMatrix& matrix) {
+ if (matrix.hasPerspective()) {
+ return GrQuad::Type::kPerspective;
+ }
+ // If 'pts' was formed by SkRect::toQuad() and not transformed further, it is safe to use the
+ // quad type derived from 'matrix'. Otherwise don't waste any more time and assume kStandard
+ // (most general 2D quad).
+ if ((pts[0].fX == pts[3].fX && pts[1].fX == pts[2].fX) &&
+ (pts[0].fY == pts[1].fY && pts[2].fY == pts[3].fY)) {
+ return quad_type_for_transformed_rect(matrix);
+ } else {
+ return GrQuad::Type::kGeneral;
+ }
+}
+
+GrQuad GrQuad::MakeFromRect(const SkRect& rect, const SkMatrix& m) {
+ V4f x, y, w;
+ SkMatrix::TypeMask tm = m.getType();
+ Type type;
+ if (tm <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ map_rect_translate_scale(rect, m, &x, &y);
+ w = 1.f;
+ type = Type::kAxisAligned;
+ } else {
+ map_rect_general(rect, m, &x, &y, &w);
+ type = quad_type_for_transformed_rect(m);
+ }
+ return GrQuad(x, y, w, type);
+}
+
+GrQuad GrQuad::MakeFromSkQuad(const SkPoint pts[4], const SkMatrix& matrix) {
+ V4f xs, ys;
+ rearrange_sk_to_gr_points(pts, &xs, &ys);
+ Type type = quad_type_for_points(pts, matrix);
+ if (matrix.isIdentity()) {
+ return GrQuad(xs, ys, 1.f, type);
+ } else {
+ V4f mx, my, mw;
+ map_quad_general(xs, ys, matrix, &mx, &my, &mw);
+ return GrQuad(mx, my, mw, type);
+ }
+}
+
+bool GrQuad::aaHasEffectOnRect() const {
+ SkASSERT(this->quadType() == Type::kAxisAligned);
+ // If rect, ws must all be 1s so no need to divide
+ return aa_affects_rect(fX[0], fY[0], fX[3], fY[3]);
+}
+
+bool GrQuad::asRect(SkRect* rect) const {
+ if (this->quadType() != Type::kAxisAligned) {
+ return false;
+ }
+
+ *rect = this->bounds();
+ // v0 at the geometric top-left is unique amongst axis-aligned vertex orders
+ // (90, 180, 270 rotations or axis flips all move v0).
+ return fX[0] == rect->fLeft && fY[0] == rect->fTop;
+}
diff --git a/gfx/skia/skia/src/gpu/geometry/GrQuad.h b/gfx/skia/skia/src/gpu/geometry/GrQuad.h
new file mode 100644
index 0000000000..9131ff6466
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrQuad.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrQuad_DEFINED
+#define GrQuad_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/private/SkVx.h"
+
+/**
+ * GrQuad is a collection of 4 points which can be used to represent an arbitrary quadrilateral. The
+ * points make a triangle strip with CCW triangles (top-left, bottom-left, top-right, bottom-right).
+ */
+class GrQuad {
+public:
+ // Quadrilaterals can be classified in several useful ways that assist AA tessellation and other
+ // analysis when drawing, in particular, knowing if it was originally a rectangle transformed by
+ // certain types of matrices:
+ enum class Type {
+ // The 4 points remain an axis-aligned rectangle; their logical indices may not respect
+ // TL, BL, TR, BR ordering if the transform was a 90 degre rotation or mirror.
+ kAxisAligned,
+ // The 4 points represent a rectangle subjected to a rotation, its corners are right angles.
+ kRectilinear,
+ // Arbitrary 2D quadrilateral; may have been a rectangle transformed with skew or some
+ // clipped polygon. Its w coordinates will all be 1.
+ kGeneral,
+ // Even more general-purpose than kGeneral, this allows the w coordinates to be non-unity.
+ kPerspective,
+ kLast = kPerspective
+ };
+ static const int kTypeCount = static_cast<int>(Type::kLast) + 1;
+
+ GrQuad() = default;
+
+ explicit GrQuad(const SkRect& rect)
+ : fX{rect.fLeft, rect.fLeft, rect.fRight, rect.fRight}
+ , fY{rect.fTop, rect.fBottom, rect.fTop, rect.fBottom}
+ , fW{1.f, 1.f, 1.f, 1.f}
+ , fType(Type::kAxisAligned) {}
+
+ GrQuad(const skvx::Vec<4, float>& xs, const skvx::Vec<4, float>& ys, Type type)
+ : fType(type) {
+ SkASSERT(type != Type::kPerspective);
+ xs.store(fX);
+ ys.store(fY);
+ fW[0] = fW[1] = fW[2] = fW[3] = 1.f;
+ }
+
+ GrQuad(const skvx::Vec<4, float>& xs, const skvx::Vec<4, float>& ys,
+ const skvx::Vec<4, float>& ws, Type type)
+ : fType(type) {
+ xs.store(fX);
+ ys.store(fY);
+ ws.store(fW);
+ }
+
+ // Copy 4 values from each of the arrays into the quad's components
+ GrQuad(const float xs[4], const float ys[4], const float ws[4], Type type)
+ : fType(type) {
+ memcpy(fX, xs, 4 * sizeof(float));
+ memcpy(fY, ys, 4 * sizeof(float));
+ memcpy(fW, ws, 4 * sizeof(float));
+ }
+
+ static GrQuad MakeFromRect(const SkRect&, const SkMatrix&);
+
+ // Creates a GrQuad from the quadrilateral 'pts', transformed by the matrix. The input
+ // points array is arranged as per SkRect::toQuad (top-left, top-right, bottom-right,
+ // bottom-left). The returned instance's point order will still be CCW tri-strip order.
+ static GrQuad MakeFromSkQuad(const SkPoint pts[4], const SkMatrix&);
+
+ GrQuad& operator=(const GrQuad&) = default;
+
+ SkPoint3 point3(int i) const { return {fX[i], fY[i], fW[i]}; }
+
+ SkPoint point(int i) const {
+ if (fType == Type::kPerspective) {
+ return {fX[i] / fW[i], fY[i] / fW[i]};
+ } else {
+ return {fX[i], fY[i]};
+ }
+ }
+
+ SkRect bounds() const {
+ auto x = this->x4f();
+ auto y = this->y4f();
+ if (fType == Type::kPerspective) {
+ auto iw = this->iw4f();
+ x *= iw;
+ y *= iw;
+ }
+
+ return {min(x), min(y), max(x), max(y)};
+ }
+
+ bool isFinite() const {
+ // If any coordinate is infinity or NaN, then multiplying it with 0 will make accum NaN
+ float accum = 0;
+ for (int i = 0; i < 4; ++i) {
+ accum *= fX[i];
+ accum *= fY[i];
+ accum *= fW[i];
+ }
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+ return !SkScalarIsNaN(accum);
+ }
+
+ float x(int i) const { return fX[i]; }
+ float y(int i) const { return fY[i]; }
+ float w(int i) const { return fW[i]; }
+ float iw(int i) const { return sk_ieee_float_divide(1.f, fW[i]); }
+
+ skvx::Vec<4, float> x4f() const { return skvx::Vec<4, float>::Load(fX); }
+ skvx::Vec<4, float> y4f() const { return skvx::Vec<4, float>::Load(fY); }
+ skvx::Vec<4, float> w4f() const { return skvx::Vec<4, float>::Load(fW); }
+ skvx::Vec<4, float> iw4f() const { return 1.f / this->w4f(); }
+
+ Type quadType() const { return fType; }
+
+ bool hasPerspective() const { return fType == Type::kPerspective; }
+
+ // True if anti-aliasing affects this quad. Only valid when quadType == kAxisAligned
+ bool aaHasEffectOnRect() const;
+
+ // True if this quad is axis-aligned and still has its top-left corner at v0. Equivalently,
+ // quad == GrQuad(quad->bounds()). Axis-aligned quads with flips and rotations may exactly
+ // fill their bounds, but their vertex order will not match TL BL TR BR anymore.
+ bool asRect(SkRect* rect) const;
+
+ // The non-const pointers are provided to support modifying a GrQuad in-place, but care must be
+ // taken to keep its quad type aligned with the geometric nature of the new coordinates. This is
+ // no different than using the constructors that accept a quad type.
+ const float* xs() const { return fX; }
+ float* xs() { return fX; }
+ const float* ys() const { return fY; }
+ float* ys() { return fY; }
+ const float* ws() const { return fW; }
+ float* ws() { return fW; }
+
+ void setQuadType(Type newType) { fType = newType; }
+private:
+ template<typename T>
+ friend class GrQuadListBase; // for access to fX, fY, fW
+
+ float fX[4];
+ float fY[4];
+ float fW[4];
+
+ Type fType;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/geometry/GrQuadBuffer.h b/gfx/skia/skia/src/gpu/geometry/GrQuadBuffer.h
new file mode 100644
index 0000000000..dbdc92ccc7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrQuadBuffer.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrQuadBuffer_DEFINED
+#define GrQuadBuffer_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/gpu/geometry/GrQuad.h"
+
+template<typename T>
+class GrQuadBuffer {
+public:
+ GrQuadBuffer()
+ : fCount(0)
+ , fDeviceType(GrQuad::Type::kAxisAligned)
+ , fLocalType(GrQuad::Type::kAxisAligned) {
+ // Pre-allocate space for 1 2D device-space quad, metadata, and header
+ fData.reserve(this->entrySize(fDeviceType, nullptr));
+ }
+
+ // Reserves space for the given number of entries; if 'needsLocals' is true, space will be
+ // reserved for each entry to also have a 2D local quad. The reserved space assumes 2D device
+ // quad for simplicity. Since this buffer has a variable bitrate encoding for quads, this may
+ // over or under reserve, but pre-allocating still helps when possible.
+ GrQuadBuffer(int count, bool needsLocals = false)
+ : fCount(0)
+ , fDeviceType(GrQuad::Type::kAxisAligned)
+ , fLocalType(GrQuad::Type::kAxisAligned) {
+ int entrySize = this->entrySize(fDeviceType, needsLocals ? &fLocalType : nullptr);
+ fData.reserve(count * entrySize);
+ }
+
+ // The number of device-space quads (and metadata, and optional local quads) that are in the
+ // the buffer.
+ int count() const { return fCount; }
+
+ // The most general type for the device-space quads in this buffer
+ GrQuad::Type deviceQuadType() const { return fDeviceType; }
+
+ // The most general type for the local quads; if no local quads are ever added, this will
+ // return kAxisAligned.
+ GrQuad::Type localQuadType() const { return fLocalType; }
+
+ // Append the given 'deviceQuad' to this buffer, with its associated 'metadata'. If 'localQuad'
+ // is not null, the local coordinates will also be attached to the entry. When an entry
+ // has local coordinates, during iteration, the Iter::hasLocals() will return true and its
+ // Iter::localQuad() will be equivalent to the provided local coordinates. If 'localQuad' is
+ // null then Iter::hasLocals() will report false for the added entry.
+ void append(const GrQuad& deviceQuad, T&& metadata, const GrQuad* localQuad = nullptr);
+
+ // Copies all entries from 'that' to this buffer
+ void concat(const GrQuadBuffer<T>& that);
+
+ // Provides a read-only iterator over a quad buffer, giving access to the device quad, metadata
+ // and optional local quad.
+ class Iter {
+ public:
+ Iter(const GrQuadBuffer<T>* buffer)
+ : fDeviceQuad(SkRect::MakeEmpty())
+ , fLocalQuad(SkRect::MakeEmpty())
+ , fBuffer(buffer)
+ , fCurrentEntry(nullptr)
+ , fNextEntry(buffer->fData.begin()) {
+ SkDEBUGCODE(fExpectedCount = buffer->count();)
+ }
+
+ bool next();
+
+ const T& metadata() const { this->validate(); return *(fBuffer->metadata(fCurrentEntry)); }
+
+ const GrQuad& deviceQuad() const { this->validate(); return fDeviceQuad; }
+
+ // If isLocalValid() returns false, this returns an empty quad (all 0s) so that localQuad()
+ // can be called without triggering any sanitizers, for convenience when some other state
+ // ensures that the quad will eventually not be used.
+ const GrQuad& localQuad() const {
+ this->validate();
+ return fLocalQuad;
+ }
+
+ bool isLocalValid() const {
+ this->validate();
+ return fBuffer->header(fCurrentEntry)->fHasLocals;
+ }
+
+ private:
+ // Quads are stored locally so that calling code doesn't need to re-declare their own quads
+ GrQuad fDeviceQuad;
+ GrQuad fLocalQuad;
+
+ const GrQuadBuffer<T>* fBuffer;
+ // The pointer to the current entry to read metadata/header details from
+ const char* fCurrentEntry;
+ // The pointer to replace fCurrentEntry when next() is called, cached since it is calculated
+ // automatically while unpacking the quad data.
+ const char* fNextEntry;
+
+ SkDEBUGCODE(int fExpectedCount;)
+
+ void validate() const {
+ SkDEBUGCODE(fBuffer->validate(fCurrentEntry, fExpectedCount);)
+ }
+ };
+
+ Iter iterator() const { return Iter(this); }
+
+ // Provides a *mutable* iterator over just the metadata stored in the quad buffer. This skips
+ // unpacking the device and local quads into GrQuads and is intended for use during op
+ // finalization, which may require rewriting state such as color.
+ class MetadataIter {
+ public:
+ MetadataIter(GrQuadBuffer<T>* list)
+ : fBuffer(list)
+ , fCurrentEntry(nullptr) {
+ SkDEBUGCODE(fExpectedCount = list->count();)
+ }
+
+ bool next();
+
+ T& operator*() { this->validate(); return *(fBuffer->metadata(fCurrentEntry)); }
+
+ T* operator->() { this->validate(); return fBuffer->metadata(fCurrentEntry); }
+
+ private:
+ GrQuadBuffer<T>* fBuffer;
+ char* fCurrentEntry;
+
+ SkDEBUGCODE(int fExpectedCount;)
+
+ void validate() const {
+ SkDEBUGCODE(fBuffer->validate(fCurrentEntry, fExpectedCount);)
+ }
+ };
+
+ MetadataIter metadata() { return MetadataIter(this); }
+
+private:
+ struct alignas(int32_t) Header {
+ unsigned fDeviceType : 2;
+ unsigned fLocalType : 2; // Ignore if fHasLocals is false
+ unsigned fHasLocals : 1;
+ // Known value to detect if iteration doesn't properly advance through the buffer
+ SkDEBUGCODE(unsigned fSentinel : 27;)
+ };
+ static_assert(sizeof(Header) == sizeof(int32_t), "Header should be 4 bytes");
+
+ static constexpr unsigned kSentinel = 0xbaffe;
+ static constexpr int kMetaSize = sizeof(Header) + sizeof(T);
+ static constexpr int k2DQuadFloats = 8;
+ static constexpr int k3DQuadFloats = 12;
+
+ // Each logical entry in the buffer is a variable length tuple storing device coordinates,
+ // optional local coordinates, and metadata. An entry always has a header that defines the
+ // quad types of device and local coordinates, and always has metadata of type T. The device
+ // and local quads' data follows as a variable length array of floats:
+ // [ header ] = 4 bytes
+ // [ metadata ] = sizeof(T), assert alignof(T) == 4 so that pointer casts are valid
+ // [ device xs ] = 4 floats = 16 bytes
+ // [ device ys ] = 4 floats
+ // [ device ws ] = 4 floats or 0 floats depending on fDeviceType in header
+ // [ local xs ] = 4 floats or 0 floats depending on fHasLocals in header
+ // [ local ys ] = 4 floats or 0 floats depending on fHasLocals in header
+ // [ local ws ] = 4 floats or 0 floats depending on fHasLocals and fLocalType in header
+ // FIXME (michaelludwig) - Since this is intended only for ops, can we use the arena to
+ // allocate storage for the quad buffer? Since this is forward-iteration only, could also
+ // explore a linked-list structure for concatenating quads when batching ops
+ SkTDArray<char> fData;
+
+ int fCount; // Number of (device, local, metadata) entries
+ GrQuad::Type fDeviceType; // Most general type of all entries
+ GrQuad::Type fLocalType;
+
+ inline int entrySize(GrQuad::Type deviceType, const GrQuad::Type* localType) const {
+ int size = kMetaSize;
+ size += (deviceType == GrQuad::Type::kPerspective ? k3DQuadFloats
+ : k2DQuadFloats) * sizeof(float);
+ if (localType) {
+ size += (*localType == GrQuad::Type::kPerspective ? k3DQuadFloats
+ : k2DQuadFloats) * sizeof(float);
+ }
+ return size;
+ }
+ inline int entrySize(const Header* header) const {
+ if (header->fHasLocals) {
+ GrQuad::Type localType = static_cast<GrQuad::Type>(header->fLocalType);
+ return this->entrySize(static_cast<GrQuad::Type>(header->fDeviceType), &localType);
+ } else {
+ return this->entrySize(static_cast<GrQuad::Type>(header->fDeviceType), nullptr);
+ }
+ }
+
+ // Helpers to access typed sections of the buffer, given the start of an entry
+ inline Header* header(char* entry) {
+ return static_cast<Header*>(static_cast<void*>(entry));
+ }
+ inline const Header* header(const char* entry) const {
+ return static_cast<const Header*>(static_cast<const void*>(entry));
+ }
+
+ inline T* metadata(char* entry) {
+ return static_cast<T*>(static_cast<void*>(entry + sizeof(Header)));
+ }
+ inline const T* metadata(const char* entry) const {
+ return static_cast<const T*>(static_cast<const void*>(entry + sizeof(Header)));
+ }
+
+ inline float* coords(char* entry) {
+ return static_cast<float*>(static_cast<void*>(entry + kMetaSize));
+ }
+ inline const float* coords(const char* entry) const {
+ return static_cast<const float*>(static_cast<const void*>(entry + kMetaSize));
+ }
+
+ // Helpers to convert from coordinates to GrQuad and vice versa, returning pointer to the
+ // next packed quad coordinates.
+ float* packQuad(const GrQuad& quad, float* coords);
+ const float* unpackQuad(GrQuad::Type type, const float* coords, GrQuad* quad) const;
+
+#ifdef SK_DEBUG
+ void validate(const char* entry, int expectedCount) const;
+#endif
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Buffer implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<typename T>
+float* GrQuadBuffer<T>::packQuad(const GrQuad& quad, float* coords) {
+ // Copies all 12 (or 8) floats at once, so requires the 3 arrays to be contiguous
+ // FIXME(michaelludwig) - If this turns out not to be the case, just do 4 copies
+ SkASSERT(quad.xs() + 4 == quad.ys() && quad.xs() + 8 == quad.ws());
+ if (quad.hasPerspective()) {
+ memcpy(coords, quad.xs(), k3DQuadFloats * sizeof(float));
+ return coords + k3DQuadFloats;
+ } else {
+ memcpy(coords, quad.xs(), k2DQuadFloats * sizeof(float));
+ return coords + k2DQuadFloats;
+ }
+}
+
+template<typename T>
+const float* GrQuadBuffer<T>::unpackQuad(GrQuad::Type type, const float* coords, GrQuad* quad) const {
+ SkASSERT(quad->xs() + 4 == quad->ys() && quad->xs() + 8 == quad->ws());
+ if (type == GrQuad::Type::kPerspective) {
+ // Fill in X, Y, and W in one go
+ memcpy(quad->xs(), coords, k3DQuadFloats * sizeof(float));
+ coords = coords + k3DQuadFloats;
+ } else {
+ // Fill in X and Y of the quad, and set W to 1s if needed
+ memcpy(quad->xs(), coords, k2DQuadFloats * sizeof(float));
+ coords = coords + k2DQuadFloats;
+
+ if (quad->quadType() == GrQuad::Type::kPerspective) {
+ // The output quad was previously perspective, so its ws are not 1s
+ static constexpr float kNoPerspectiveWs[4] = {1.f, 1.f, 1.f, 1.f};
+ memcpy(quad->ws(), kNoPerspectiveWs, 4 * sizeof(float));
+ }
+ // Else the quad should already have 1s in w
+ SkASSERT(quad->w(0) == 1.f && quad->w(1) == 1.f &&
+ quad->w(2) == 1.f && quad->w(3) == 1.f);
+ }
+
+ quad->setQuadType(type);
+ return coords;
+}
+
+template<typename T>
+void GrQuadBuffer<T>::append(const GrQuad& deviceQuad, T&& metadata, const GrQuad* localQuad) {
+ GrQuad::Type localType = localQuad ? localQuad->quadType() : GrQuad::Type::kAxisAligned;
+ int entrySize = this->entrySize(deviceQuad.quadType(), localQuad ? &localType : nullptr);
+
+ // Fill in the entry, as described in fData's declaration
+ char* entry = fData.append(entrySize);
+ // First the header
+ Header* h = this->header(entry);
+ h->fDeviceType = static_cast<unsigned>(deviceQuad.quadType());
+ h->fHasLocals = static_cast<unsigned>(localQuad != nullptr);
+ h->fLocalType = static_cast<unsigned>(localQuad ? localQuad->quadType()
+ : GrQuad::Type::kAxisAligned);
+ SkDEBUGCODE(h->fSentinel = static_cast<unsigned>(kSentinel);)
+
+ // Second, the fixed-size metadata
+ static_assert(alignof(T) == 4, "Metadata must be 4 byte aligned");
+ *(this->metadata(entry)) = std::move(metadata);
+
+ // Then the variable blocks of x, y, and w float coordinates
+ float* coords = this->coords(entry);
+ coords = this->packQuad(deviceQuad, coords);
+ if (localQuad) {
+ coords = this->packQuad(*localQuad, coords);
+ }
+ SkASSERT((char*)coords - entry == entrySize);
+
+ // Entry complete, update buffer-level state
+ fCount++;
+ if (deviceQuad.quadType() > fDeviceType) {
+ fDeviceType = deviceQuad.quadType();
+ }
+ if (localQuad && localQuad->quadType() > fLocalType) {
+ fLocalType = localQuad->quadType();
+ }
+}
+
+template<typename T>
+void GrQuadBuffer<T>::concat(const GrQuadBuffer<T>& that) {
+ fData.append(that.fData.count(), that.fData.begin());
+ fCount += that.fCount;
+ if (that.fDeviceType > fDeviceType) {
+ fDeviceType = that.fDeviceType;
+ }
+ if (that.fLocalType > fLocalType) {
+ fLocalType = that.fLocalType;
+ }
+}
+
+#ifdef SK_DEBUG
+template<typename T>
+void GrQuadBuffer<T>::validate(const char* entry, int expectedCount) const {
+ // Triggers if accessing before next() is called on an iterator
+ SkASSERT(entry);
+ // Triggers if accessing after next() returns false
+ SkASSERT(entry < fData.end());
+ // Triggers if elements have been added to the buffer while iterating entries
+ SkASSERT(expectedCount == fCount);
+ // Make sure the start of the entry looks like a header
+ SkASSERT(this->header(entry)->fSentinel == kSentinel);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Iterator implementations
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<typename T>
+bool GrQuadBuffer<T>::Iter::next() {
+ SkASSERT(fNextEntry);
+ if (fNextEntry >= fBuffer->fData.end()) {
+ return false;
+ }
+ // There is at least one more entry, so store the current start for metadata access
+ fCurrentEntry = fNextEntry;
+
+ // And then unpack the device and optional local coordinates into fDeviceQuad and fLocalQuad
+ const Header* h = fBuffer->header(fCurrentEntry);
+ const float* coords = fBuffer->coords(fCurrentEntry);
+ coords = fBuffer->unpackQuad(static_cast<GrQuad::Type>(h->fDeviceType), coords, &fDeviceQuad);
+ if (h->fHasLocals) {
+ coords = fBuffer->unpackQuad(static_cast<GrQuad::Type>(h->fLocalType), coords, &fLocalQuad);
+ } else {
+ static const GrQuad kEmptyLocal(SkRect::MakeEmpty());
+ fLocalQuad = kEmptyLocal;
+ }
+ // At this point, coords points to the start of the next entry
+ fNextEntry = static_cast<const char*>(static_cast<const void*>(coords));
+ SkASSERT((fNextEntry - fCurrentEntry) == fBuffer->entrySize(h));
+ return true;
+}
+
+template<typename T>
+bool GrQuadBuffer<T>::MetadataIter::next() {
+ if (fCurrentEntry) {
+ // Advance pointer by entry size
+ if (fCurrentEntry < fBuffer->fData.end()) {
+ const Header* h = fBuffer->header(fCurrentEntry);
+ fCurrentEntry += fBuffer->entrySize(h);
+ }
+ } else {
+ // First call to next
+ fCurrentEntry = fBuffer->fData.begin();
+ }
+ // Nothing else is needed to do but report whether or not the updated pointer is valid
+ return fCurrentEntry < fBuffer->fData.end();
+}
+#endif // GrQuadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.cpp b/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.cpp
new file mode 100644
index 0000000000..78edc3ccbb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.cpp
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/geometry/GrQuadUtils.h"
+
+#include "include/core/SkRect.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkVx.h"
+#include "src/gpu/geometry/GrQuad.h"
+
+using V4f = skvx::Vec<4, float>;
+using M4f = skvx::Vec<4, int32_t>;
+
+// Since the local quad may not be type kRect, this uses the opposites for each vertex when
+// interpolating, and calculates new ws in addition to new xs, ys.
+static void interpolate_local(float alpha, int v0, int v1, int v2, int v3,
+ float lx[4], float ly[4], float lw[4]) {
+ SkASSERT(v0 >= 0 && v0 < 4);
+ SkASSERT(v1 >= 0 && v1 < 4);
+ SkASSERT(v2 >= 0 && v2 < 4);
+ SkASSERT(v3 >= 0 && v3 < 4);
+
+ float beta = 1.f - alpha;
+ lx[v0] = alpha * lx[v0] + beta * lx[v2];
+ ly[v0] = alpha * ly[v0] + beta * ly[v2];
+ lw[v0] = alpha * lw[v0] + beta * lw[v2];
+
+ lx[v1] = alpha * lx[v1] + beta * lx[v3];
+ ly[v1] = alpha * ly[v1] + beta * ly[v3];
+ lw[v1] = alpha * lw[v1] + beta * lw[v3];
+}
+
+// Crops v0 to v1 based on the clipDevRect. v2 is opposite of v0, v3 is opposite of v1.
+// It is written to not modify coordinates if there's no intersection along the edge.
+// Ideally this would have been detected earlier and the entire draw is skipped.
+static bool crop_rect_edge(const SkRect& clipDevRect, int v0, int v1, int v2, int v3,
+ float x[4], float y[4], float lx[4], float ly[4], float lw[4]) {
+ SkASSERT(v0 >= 0 && v0 < 4);
+ SkASSERT(v1 >= 0 && v1 < 4);
+ SkASSERT(v2 >= 0 && v2 < 4);
+ SkASSERT(v3 >= 0 && v3 < 4);
+
+ if (SkScalarNearlyEqual(x[v0], x[v1])) {
+ // A vertical edge
+ if (x[v0] < clipDevRect.fLeft && x[v2] >= clipDevRect.fLeft) {
+ // Overlapping with left edge of clipDevRect
+ if (lx) {
+ float alpha = (x[v2] - clipDevRect.fLeft) / (x[v2] - x[v0]);
+ interpolate_local(alpha, v0, v1, v2, v3, lx, ly, lw);
+ }
+ x[v0] = clipDevRect.fLeft;
+ x[v1] = clipDevRect.fLeft;
+ return true;
+ } else if (x[v0] > clipDevRect.fRight && x[v2] <= clipDevRect.fRight) {
+ // Overlapping with right edge of clipDevRect
+ if (lx) {
+ float alpha = (clipDevRect.fRight - x[v2]) / (x[v0] - x[v2]);
+ interpolate_local(alpha, v0, v1, v2, v3, lx, ly, lw);
+ }
+ x[v0] = clipDevRect.fRight;
+ x[v1] = clipDevRect.fRight;
+ return true;
+ }
+ } else {
+ // A horizontal edge
+ SkASSERT(SkScalarNearlyEqual(y[v0], y[v1]));
+ if (y[v0] < clipDevRect.fTop && y[v2] >= clipDevRect.fTop) {
+ // Overlapping with top edge of clipDevRect
+ if (lx) {
+ float alpha = (y[v2] - clipDevRect.fTop) / (y[v2] - y[v0]);
+ interpolate_local(alpha, v0, v1, v2, v3, lx, ly, lw);
+ }
+ y[v0] = clipDevRect.fTop;
+ y[v1] = clipDevRect.fTop;
+ return true;
+ } else if (y[v0] > clipDevRect.fBottom && y[v2] <= clipDevRect.fBottom) {
+ // Overlapping with bottom edge of clipDevRect
+ if (lx) {
+ float alpha = (clipDevRect.fBottom - y[v2]) / (y[v0] - y[v2]);
+ interpolate_local(alpha, v0, v1, v2, v3, lx, ly, lw);
+ }
+ y[v0] = clipDevRect.fBottom;
+ y[v1] = clipDevRect.fBottom;
+ return true;
+ }
+ }
+
+ // No overlap so don't crop it
+ return false;
+}
+
+// Updates x and y to intersect with clipDevRect. lx, ly, and lw are updated appropriately and may
+// be null to skip calculations. Returns bit mask of edges that were clipped.
+static GrQuadAAFlags crop_rect(const SkRect& clipDevRect, float x[4], float y[4],
+ float lx[4], float ly[4], float lw[4]) {
+ GrQuadAAFlags clipEdgeFlags = GrQuadAAFlags::kNone;
+
+ // The quad's left edge may not align with the SkRect notion of left due to 90 degree rotations
+ // or mirrors. So, this processes the logical edges of the quad and clamps it to the 4 sides of
+ // clipDevRect.
+
+ // Quad's left is v0 to v1 (op. v2 and v3)
+ if (crop_rect_edge(clipDevRect, 0, 1, 2, 3, x, y, lx, ly, lw)) {
+ clipEdgeFlags |= GrQuadAAFlags::kLeft;
+ }
+ // Quad's top edge is v0 to v2 (op. v1 and v3)
+ if (crop_rect_edge(clipDevRect, 0, 2, 1, 3, x, y, lx, ly, lw)) {
+ clipEdgeFlags |= GrQuadAAFlags::kTop;
+ }
+ // Quad's right edge is v2 to v3 (op. v0 and v1)
+ if (crop_rect_edge(clipDevRect, 2, 3, 0, 1, x, y, lx, ly, lw)) {
+ clipEdgeFlags |= GrQuadAAFlags::kRight;
+ }
+ // Quad's bottom edge is v1 to v3 (op. v0 and v2)
+ if (crop_rect_edge(clipDevRect, 1, 3, 0, 2, x, y, lx, ly, lw)) {
+ clipEdgeFlags |= GrQuadAAFlags::kBottom;
+ }
+
+ return clipEdgeFlags;
+}
+
+// Similar to crop_rect, but assumes that both the device coordinates and optional local coordinates
+// geometrically match the TL, BL, TR, BR vertex ordering, i.e. axis-aligned but not flipped, etc.
+static GrQuadAAFlags crop_simple_rect(const SkRect& clipDevRect, float x[4], float y[4],
+ float lx[4], float ly[4]) {
+ GrQuadAAFlags clipEdgeFlags = GrQuadAAFlags::kNone;
+
+ // Update local coordinates proportionately to how much the device rect edge was clipped
+ const SkScalar dx = lx ? (lx[2] - lx[0]) / (x[2] - x[0]) : 0.f;
+ const SkScalar dy = ly ? (ly[1] - ly[0]) / (y[1] - y[0]) : 0.f;
+ if (clipDevRect.fLeft > x[0]) {
+ if (lx) {
+ lx[0] += (clipDevRect.fLeft - x[0]) * dx;
+ lx[1] = lx[0];
+ }
+ x[0] = clipDevRect.fLeft;
+ x[1] = clipDevRect.fLeft;
+ clipEdgeFlags |= GrQuadAAFlags::kLeft;
+ }
+ if (clipDevRect.fTop > y[0]) {
+ if (ly) {
+ ly[0] += (clipDevRect.fTop - y[0]) * dy;
+ ly[2] = ly[0];
+ }
+ y[0] = clipDevRect.fTop;
+ y[2] = clipDevRect.fTop;
+ clipEdgeFlags |= GrQuadAAFlags::kTop;
+ }
+ if (clipDevRect.fRight < x[2]) {
+ if (lx) {
+ lx[2] -= (x[2] - clipDevRect.fRight) * dx;
+ lx[3] = lx[2];
+ }
+ x[2] = clipDevRect.fRight;
+ x[3] = clipDevRect.fRight;
+ clipEdgeFlags |= GrQuadAAFlags::kRight;
+ }
+ if (clipDevRect.fBottom < y[1]) {
+ if (ly) {
+ ly[1] -= (y[1] - clipDevRect.fBottom) * dy;
+ ly[3] = ly[1];
+ }
+ y[1] = clipDevRect.fBottom;
+ y[3] = clipDevRect.fBottom;
+ clipEdgeFlags |= GrQuadAAFlags::kBottom;
+ }
+
+ return clipEdgeFlags;
+}
+// Consistent with GrQuad::asRect()'s return value but requires fewer operations since we don't need
+// to calculate the bounds of the quad.
+static bool is_simple_rect(const GrQuad& quad) {
+ if (quad.quadType() != GrQuad::Type::kAxisAligned) {
+ return false;
+ }
+ // v0 at the geometric top-left is unique, so we only need to compare x[0] < x[2] for left
+ // and y[0] < y[1] for top, but add a little padding to protect against numerical precision
+ // on R90 and R270 transforms tricking this check.
+ return ((quad.x(0) + SK_ScalarNearlyZero) < quad.x(2)) &&
+ ((quad.y(0) + SK_ScalarNearlyZero) < quad.y(1));
+}
+
+// Calculates barycentric coordinates for each point in (testX, testY) in the triangle formed by
+// (x0,y0) - (x1,y1) - (x2, y2) and stores them in u, v, w.
+static void barycentric_coords(float x0, float y0, float x1, float y1, float x2, float y2,
+ const V4f& testX, const V4f& testY,
+ V4f* u, V4f* v, V4f* w) {
+ // Modeled after SkPathOpsQuad::pointInTriangle() but uses float instead of double, is
+ // vectorized and outputs normalized barycentric coordinates instead of inside/outside test
+ float v0x = x2 - x0;
+ float v0y = y2 - y0;
+ float v1x = x1 - x0;
+ float v1y = y1 - y0;
+ V4f v2x = testX - x0;
+ V4f v2y = testY - y0;
+
+ float dot00 = v0x * v0x + v0y * v0y;
+ float dot01 = v0x * v1x + v0y * v1y;
+ V4f dot02 = v0x * v2x + v0y * v2y;
+ float dot11 = v1x * v1x + v1y * v1y;
+ V4f dot12 = v1x * v2x + v1y * v2y;
+ float invDenom = sk_ieee_float_divide(1.f, dot00 * dot11 - dot01 * dot01);
+ *u = (dot11 * dot02 - dot01 * dot12) * invDenom;
+ *v = (dot00 * dot12 - dot01 * dot02) * invDenom;
+ *w = 1.f - *u - *v;
+}
+
+static M4f inside_triangle(const V4f& u, const V4f& v, const V4f& w) {
+ return ((u >= 0.f) & (u <= 1.f)) & ((v >= 0.f) & (v <= 1.f)) & ((w >= 0.f) & (w <= 1.f));
+}
+
+namespace GrQuadUtils {
+
+void ResolveAAType(GrAAType requestedAAType, GrQuadAAFlags requestedEdgeFlags, const GrQuad& quad,
+ GrAAType* outAAType, GrQuadAAFlags* outEdgeFlags) {
+ // Most cases will keep the requested types unchanged
+ *outAAType = requestedAAType;
+ *outEdgeFlags = requestedEdgeFlags;
+
+ switch (requestedAAType) {
+ // When aa type is coverage, disable AA if the edge configuration doesn't actually need it
+ case GrAAType::kCoverage:
+ if (requestedEdgeFlags == GrQuadAAFlags::kNone) {
+ // Turn off anti-aliasing
+ *outAAType = GrAAType::kNone;
+ } else {
+ // For coverage AA, if the quad is a rect and it lines up with pixel boundaries
+ // then overall aa and per-edge aa can be completely disabled
+ if (quad.quadType() == GrQuad::Type::kAxisAligned && !quad.aaHasEffectOnRect()) {
+ *outAAType = GrAAType::kNone;
+ *outEdgeFlags = GrQuadAAFlags::kNone;
+ }
+ }
+ break;
+ // For no or msaa anti aliasing, override the edge flags since edge flags only make sense
+ // when coverage aa is being used.
+ case GrAAType::kNone:
+ *outEdgeFlags = GrQuadAAFlags::kNone;
+ break;
+ case GrAAType::kMSAA:
+ *outEdgeFlags = GrQuadAAFlags::kAll;
+ break;
+ }
+}
+
+bool CropToRect(const SkRect& cropRect, GrAA cropAA, GrQuadAAFlags* edgeFlags, GrQuad* quad,
+ GrQuad* local) {
+ SkASSERT(quad->isFinite());
+
+ if (quad->quadType() == GrQuad::Type::kAxisAligned) {
+ // crop_rect and crop_rect_simple keep the rectangles as rectangles, so the intersection
+ // of the crop and quad can be calculated exactly. Some care must be taken if the quad
+ // is axis-aligned but does not satisfy asRect() due to flips, etc.
+ GrQuadAAFlags clippedEdges;
+ if (local) {
+ if (is_simple_rect(*quad) && is_simple_rect(*local)) {
+ clippedEdges = crop_simple_rect(cropRect, quad->xs(), quad->ys(),
+ local->xs(), local->ys());
+ } else {
+ clippedEdges = crop_rect(cropRect, quad->xs(), quad->ys(),
+ local->xs(), local->ys(), local->ws());
+ }
+ } else {
+ if (is_simple_rect(*quad)) {
+ clippedEdges = crop_simple_rect(cropRect, quad->xs(), quad->ys(), nullptr, nullptr);
+ } else {
+ clippedEdges = crop_rect(cropRect, quad->xs(), quad->ys(),
+ nullptr, nullptr, nullptr);
+ }
+ }
+
+ // Apply the clipped edge updates to the original edge flags
+ if (cropAA == GrAA::kYes) {
+ // Turn on all edges that were clipped
+ *edgeFlags |= clippedEdges;
+ } else {
+ // Turn off all edges that were clipped
+ *edgeFlags &= ~clippedEdges;
+ }
+ return true;
+ }
+
+ if (local) {
+ // FIXME (michaelludwig) Calculate cropped local coordinates when not kAxisAligned
+ return false;
+ }
+
+ V4f devX = quad->x4f();
+ V4f devY = quad->y4f();
+ V4f devIW = quad->iw4f();
+ // Project the 3D coordinates to 2D
+ if (quad->quadType() == GrQuad::Type::kPerspective) {
+ devX *= devIW;
+ devY *= devIW;
+ }
+
+ V4f clipX = {cropRect.fLeft, cropRect.fLeft, cropRect.fRight, cropRect.fRight};
+ V4f clipY = {cropRect.fTop, cropRect.fBottom, cropRect.fTop, cropRect.fBottom};
+
+ // Calculate barycentric coordinates for the 4 rect corners in the 2 triangles that the quad
+ // is tessellated into when drawn.
+ V4f u1, v1, w1;
+ barycentric_coords(devX[0], devY[0], devX[1], devY[1], devX[2], devY[2], clipX, clipY,
+ &u1, &v1, &w1);
+ V4f u2, v2, w2;
+ barycentric_coords(devX[1], devY[1], devX[3], devY[3], devX[2], devY[2], clipX, clipY,
+ &u2, &v2, &w2);
+
+ // clipDevRect is completely inside this quad if each corner is in at least one of two triangles
+ M4f inTri1 = inside_triangle(u1, v1, w1);
+ M4f inTri2 = inside_triangle(u2, v2, w2);
+ if (all(inTri1 | inTri2)) {
+ // We can crop to exactly the clipDevRect.
+ // FIXME (michaelludwig) - there are other ways to have determined quad covering the clip
+ // rect, but the barycentric coords will be useful to derive local coordinates in the future
+
+ // Since we are cropped to exactly clipDevRect, we have discarded any perspective and the
+ // type becomes kRect. If updated locals were requested, they will incorporate perspective.
+ // FIXME (michaelludwig) - once we have local coordinates handled, it may be desirable to
+ // keep the draw as perspective so that the hardware does perspective interpolation instead
+ // of pushing it into a local coord w and having the shader do an extra divide.
+ clipX.store(quad->xs());
+ clipY.store(quad->ys());
+ quad->ws()[0] = 1.f;
+ quad->ws()[1] = 1.f;
+ quad->ws()[2] = 1.f;
+ quad->ws()[3] = 1.f;
+ quad->setQuadType(GrQuad::Type::kAxisAligned);
+
+ // Update the edge flags to match the clip setting since all 4 edges have been clipped
+ *edgeFlags = cropAA == GrAA::kYes ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
+
+ return true;
+ }
+
+ // FIXME (michaelludwig) - use the GrQuadPerEdgeAA tessellation inset/outset math to move
+ // edges to the closest clip corner they are outside of
+
+ return false;
+}
+
+}; // namespace GrQuadUtils
diff --git a/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.h b/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.h
new file mode 100644
index 0000000000..53b53d2522
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrQuadUtils.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrQuadUtils_DEFINED
+#define GrQuadUtils_DEFINED
+
+enum class GrQuadAAFlags;
+enum class GrAA : bool;
+enum class GrAAType : unsigned;
+class GrQuad;
+struct SkRect;
+
+namespace GrQuadUtils {
+
+ // Resolve disagreements between the overall requested AA type and the per-edge quad AA flags.
+ // Both outAAType and outEdgeFlags will be updated.
+ void ResolveAAType(GrAAType requestedAAType, GrQuadAAFlags requestedEdgeFlags,
+ const GrQuad& quad, GrAAType* outAAtype, GrQuadAAFlags* outEdgeFlags);
+
+ /**
+ * Crops quad to the provided device-space axis-aligned rectangle. If the intersection of this
+ * quad (projected) and cropRect results in a quadrilateral, this returns true. If not, this
+ * quad may be updated to be a smaller quad of the same type such that its intersection with
+ * cropRect is visually the same. This function assumes that the 'quad' coordinates are finite.
+ *
+ * The provided edge flags are updated to reflect edges clipped by cropRect (toggling on or off
+ * based on cropAA policy). If provided, the local coordinates will be updated to reflect the
+ * updated device coordinates of this quad.
+ *
+ * 'local' may be null, in which case the new local coordinates will not be calculated. This is
+ * useful when it's known a paint does not require local coordinates. However, neither
+ * 'edgeFlags' nore 'quad' can be null.
+ */
+ bool CropToRect(const SkRect& cropRect, GrAA cropAA, GrQuadAAFlags* edgeFlags, GrQuad* quad,
+ GrQuad* local=nullptr);
+
+}; // namespace GrQuadUtils
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/geometry/GrRect.h b/gfx/skia/skia/src/gpu/geometry/GrRect.h
new file mode 100644
index 0000000000..9a0bee7d8f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrRect.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRect_DEFINED
+#define GrRect_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+
+struct GrIRect16 {
+ int16_t fLeft, fTop, fRight, fBottom;
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeEmpty() {
+ GrIRect16 r;
+ r.setEmpty();
+ return r;
+ }
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeWH(int16_t w, int16_t h) {
+ GrIRect16 r;
+ r.set(0, 0, w, h);
+ return r;
+ }
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeXYWH(int16_t x, int16_t y, int16_t w, int16_t h) {
+ GrIRect16 r;
+ r.set(x, y, x + w, y + h);
+ return r;
+ }
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT Make(const SkIRect& ir) {
+ GrIRect16 r;
+ r.set(ir);
+ return r;
+ }
+
+ int width() const { return fRight - fLeft; }
+ int height() const { return fBottom - fTop; }
+ int area() const { return this->width() * this->height(); }
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+
+ void setEmpty() { memset(this, 0, sizeof(*this)); }
+
+ void set(int16_t left, int16_t top, int16_t right, int16_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ void set(const SkIRect& r) {
+ fLeft = SkToS16(r.fLeft);
+ fTop = SkToS16(r.fTop);
+ fRight = SkToS16(r.fRight);
+ fBottom = SkToS16(r.fBottom);
+ }
+};
+
+/** Returns true if the rectangles have a nonzero area of overlap. It assumed that rects can be
+ infinitely small but not "inverted". */
+static inline bool GrRectsOverlap(const SkRect& a, const SkRect& b) {
+ // See skbug.com/6607 about the isFinite() checks.
+ SkASSERT(!a.isFinite() || (a.fLeft <= a.fRight && a.fTop <= a.fBottom));
+ SkASSERT(!b.isFinite() || (b.fLeft <= b.fRight && b.fTop <= b.fBottom));
+ return a.fRight > b.fLeft && a.fBottom > b.fTop && b.fRight > a.fLeft && b.fBottom > a.fTop;
+}
+
+/** Returns true if the rectangles overlap or share an edge or corner. It assumed that rects can be
+ infinitely small but not "inverted". */
+static inline bool GrRectsTouchOrOverlap(const SkRect& a, const SkRect& b) {
+ // See skbug.com/6607 about the isFinite() checks.
+ SkASSERT(!a.isFinite() || (a.fLeft <= a.fRight && a.fTop <= a.fBottom));
+ SkASSERT(!b.isFinite() || (b.fLeft <= b.fRight && b.fTop <= b.fBottom));
+ return a.fRight >= b.fLeft && a.fBottom >= b.fTop && b.fRight >= a.fLeft && b.fBottom >= a.fTop;
+}
+
+/**
+ * Apply the transform from 'inRect' to 'outRect' to each point in 'inPts', storing the mapped point
+ * into the parallel index of 'outPts'.
+ */
+static inline void GrMapRectPoints(const SkRect& inRect, const SkRect& outRect,
+ const SkPoint inPts[], SkPoint outPts[], int ptCount) {
+ SkMatrix rectTransform = SkMatrix::MakeRectToRect(inRect, outRect, SkMatrix::kFill_ScaleToFit);
+ rectTransform.mapPoints(outPts, inPts, ptCount);
+}
+
+/**
+ * Clips the srcRect and the dstPoint to the bounds of the srcSize and dstSize respectively. Returns
+ * true if the srcRect and dstRect intersect the srcRect and dst rect (dstPoint with srcRect
+ * width/height). Returns false otherwise. The clipped values are returned in clippedSrcRect and
+ * clippedDstPoint.
+ */
+static inline bool GrClipSrcRectAndDstPoint(const SkISize& dstSize,
+ const SkISize& srcSize,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint,
+ SkIRect* clippedSrcRect,
+ SkIPoint* clippedDstPoint) {
+ *clippedSrcRect = srcRect;
+ *clippedDstPoint = dstPoint;
+
+ // clip the left edge to src and dst bounds, adjusting dstPoint if necessary
+ if (clippedSrcRect->fLeft < 0) {
+ clippedDstPoint->fX -= clippedSrcRect->fLeft;
+ clippedSrcRect->fLeft = 0;
+ }
+ if (clippedDstPoint->fX < 0) {
+ clippedSrcRect->fLeft -= clippedDstPoint->fX;
+ clippedDstPoint->fX = 0;
+ }
+
+ // clip the top edge to src and dst bounds, adjusting dstPoint if necessary
+ if (clippedSrcRect->fTop < 0) {
+ clippedDstPoint->fY -= clippedSrcRect->fTop;
+ clippedSrcRect->fTop = 0;
+ }
+ if (clippedDstPoint->fY < 0) {
+ clippedSrcRect->fTop -= clippedDstPoint->fY;
+ clippedDstPoint->fY = 0;
+ }
+
+ // clip the right edge to the src and dst bounds.
+ if (clippedSrcRect->fRight > srcSize.width()) {
+ clippedSrcRect->fRight = srcSize.width();
+ }
+ if (clippedDstPoint->fX + clippedSrcRect->width() > dstSize.width()) {
+ clippedSrcRect->fRight = clippedSrcRect->fLeft + dstSize.width() - clippedDstPoint->fX;
+ }
+
+ // clip the bottom edge to the src and dst bounds.
+ if (clippedSrcRect->fBottom > srcSize.height()) {
+ clippedSrcRect->fBottom = srcSize.height();
+ }
+ if (clippedDstPoint->fY + clippedSrcRect->height() > dstSize.height()) {
+ clippedSrcRect->fBottom = clippedSrcRect->fTop + dstSize.height() - clippedDstPoint->fY;
+ }
+
+ // The above clipping steps may have inverted the rect if it didn't intersect either the src or
+ // dst bounds.
+ return !clippedSrcRect->isEmpty();
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/geometry/GrShape.cpp b/gfx/skia/skia/src/gpu/geometry/GrShape.cpp
new file mode 100644
index 0000000000..4fc3472ffb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrShape.cpp
@@ -0,0 +1,765 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/geometry/GrShape.h"
+
+#include <utility>
+
+GrShape& GrShape::operator=(const GrShape& that) {
+ fStyle = that.fStyle;
+ this->changeType(that.fType, Type::kPath == that.fType ? &that.path() : nullptr);
+ switch (fType) {
+ case Type::kEmpty:
+ break;
+ case Type::kInvertedEmpty:
+ break;
+ case Type::kRRect:
+ fRRectData = that.fRRectData;
+ break;
+ case Type::kArc:
+ fArcData = that.fArcData;
+ break;
+ case Type::kLine:
+ fLineData = that.fLineData;
+ break;
+ case Type::kPath:
+ fPathData.fGenID = that.fPathData.fGenID;
+ break;
+ }
+ fInheritedKey.reset(that.fInheritedKey.count());
+ sk_careful_memcpy(fInheritedKey.get(), that.fInheritedKey.get(),
+ sizeof(uint32_t) * fInheritedKey.count());
+ if (that.fInheritedPathForListeners.isValid()) {
+ fInheritedPathForListeners.set(*that.fInheritedPathForListeners.get());
+ } else {
+ fInheritedPathForListeners.reset();
+ }
+ return *this;
+}
+
+static bool flip_inversion(bool originalIsInverted, GrShape::FillInversion inversion) {
+ switch (inversion) {
+ case GrShape::FillInversion::kPreserve:
+ return false;
+ case GrShape::FillInversion::kFlip:
+ return true;
+ case GrShape::FillInversion::kForceInverted:
+ return !originalIsInverted;
+ case GrShape::FillInversion::kForceNoninverted:
+ return originalIsInverted;
+ }
+ return false;
+}
+
+static bool is_inverted(bool originalIsInverted, GrShape::FillInversion inversion) {
+ switch (inversion) {
+ case GrShape::FillInversion::kPreserve:
+ return originalIsInverted;
+ case GrShape::FillInversion::kFlip:
+ return !originalIsInverted;
+ case GrShape::FillInversion::kForceInverted:
+ return true;
+ case GrShape::FillInversion::kForceNoninverted:
+ return false;
+ }
+ return false;
+}
+
+GrShape GrShape::MakeFilled(const GrShape& original, FillInversion inversion) {
+ if (original.style().isSimpleFill() && !flip_inversion(original.inverseFilled(), inversion)) {
+ // By returning the original rather than falling through we can preserve any inherited style
+ // key. Otherwise, we wipe it out below since the style change invalidates it.
+ return original;
+ }
+ GrShape result;
+ if (original.fInheritedPathForListeners.isValid()) {
+ result.fInheritedPathForListeners.set(*original.fInheritedPathForListeners.get());
+ }
+ switch (original.fType) {
+ case Type::kRRect:
+ result.fType = original.fType;
+ result.fRRectData.fRRect = original.fRRectData.fRRect;
+ result.fRRectData.fDir = kDefaultRRectDir;
+ result.fRRectData.fStart = kDefaultRRectStart;
+ result.fRRectData.fInverted = is_inverted(original.fRRectData.fInverted, inversion);
+ break;
+ case Type::kArc:
+ result.fType = original.fType;
+ result.fArcData.fOval = original.fArcData.fOval;
+ result.fArcData.fStartAngleDegrees = original.fArcData.fStartAngleDegrees;
+ result.fArcData.fSweepAngleDegrees = original.fArcData.fSweepAngleDegrees;
+ result.fArcData.fUseCenter = original.fArcData.fUseCenter;
+ result.fArcData.fInverted = is_inverted(original.fArcData.fInverted, inversion);
+ break;
+ case Type::kLine:
+ // Lines don't fill.
+ if (is_inverted(original.fLineData.fInverted, inversion)) {
+ result.fType = Type::kInvertedEmpty;
+ } else {
+ result.fType = Type::kEmpty;
+ }
+ break;
+ case Type::kEmpty:
+ result.fType = is_inverted(false, inversion) ? Type::kInvertedEmpty : Type::kEmpty;
+ break;
+ case Type::kInvertedEmpty:
+ result.fType = is_inverted(true, inversion) ? Type::kInvertedEmpty : Type::kEmpty;
+ break;
+ case Type::kPath:
+ result.initType(Type::kPath, &original.fPathData.fPath);
+ result.fPathData.fGenID = original.fPathData.fGenID;
+ if (flip_inversion(original.fPathData.fPath.isInverseFillType(), inversion)) {
+ result.fPathData.fPath.toggleInverseFillType();
+ }
+ if (!original.style().isSimpleFill()) {
+ // Going from a non-filled style to fill may allow additional simplifications (e.g.
+ // closing an open rect that wasn't closed in the original shape because it had
+ // stroke style).
+ result.attemptToSimplifyPath();
+ }
+ break;
+ }
+ // We don't copy the inherited key since it can contain path effect information that we just
+ // stripped.
+ return result;
+}
+
+SkRect GrShape::bounds() const {
+ // Bounds where left == bottom or top == right can indicate a line or point shape. We return
+ // inverted bounds for a truly empty shape.
+ static constexpr SkRect kInverted = SkRect::MakeLTRB(1, 1, -1, -1);
+ switch (fType) {
+ case Type::kEmpty:
+ return kInverted;
+ case Type::kInvertedEmpty:
+ return kInverted;
+ case Type::kLine: {
+ SkRect bounds;
+ if (fLineData.fPts[0].fX < fLineData.fPts[1].fX) {
+ bounds.fLeft = fLineData.fPts[0].fX;
+ bounds.fRight = fLineData.fPts[1].fX;
+ } else {
+ bounds.fLeft = fLineData.fPts[1].fX;
+ bounds.fRight = fLineData.fPts[0].fX;
+ }
+ if (fLineData.fPts[0].fY < fLineData.fPts[1].fY) {
+ bounds.fTop = fLineData.fPts[0].fY;
+ bounds.fBottom = fLineData.fPts[1].fY;
+ } else {
+ bounds.fTop = fLineData.fPts[1].fY;
+ bounds.fBottom = fLineData.fPts[0].fY;
+ }
+ return bounds;
+ }
+ case Type::kRRect:
+ return fRRectData.fRRect.getBounds();
+ case Type::kArc:
+ // Could make this less conservative by looking at angles.
+ return fArcData.fOval;
+ case Type::kPath:
+ return this->path().getBounds();
+ }
+ SK_ABORT("Unknown shape type");
+}
+
+SkRect GrShape::styledBounds() const {
+ if (this->isEmpty() && !fStyle.hasNonDashPathEffect()) {
+ return SkRect::MakeEmpty();
+ }
+
+ SkRect bounds;
+ fStyle.adjustBounds(&bounds, this->bounds());
+ return bounds;
+}
+
+// If the path is small enough to be keyed from its data this returns key length, otherwise -1.
+static int path_key_from_data_size(const SkPath& path) {
+ const int verbCnt = path.countVerbs();
+ if (verbCnt > GrShape::kMaxKeyFromDataVerbCnt) {
+ return -1;
+ }
+ const int pointCnt = path.countPoints();
+ const int conicWeightCnt = SkPathPriv::ConicWeightCnt(path);
+
+ GR_STATIC_ASSERT(sizeof(SkPoint) == 2 * sizeof(uint32_t));
+ GR_STATIC_ASSERT(sizeof(SkScalar) == sizeof(uint32_t));
+ // 2 is for the verb cnt and a fill type. Each verb is a byte but we'll pad the verb data out to
+ // a uint32_t length.
+ return 2 + (SkAlign4(verbCnt) >> 2) + 2 * pointCnt + conicWeightCnt;
+}
+
+// Writes the path data key into the passed pointer.
+static void write_path_key_from_data(const SkPath& path, uint32_t* origKey) {
+ uint32_t* key = origKey;
+ // The check below should take care of negative values casted positive.
+ const int verbCnt = path.countVerbs();
+ const int pointCnt = path.countPoints();
+ const int conicWeightCnt = SkPathPriv::ConicWeightCnt(path);
+ SkASSERT(verbCnt <= GrShape::kMaxKeyFromDataVerbCnt);
+ SkASSERT(pointCnt && verbCnt);
+ *key++ = path.getFillType();
+ *key++ = verbCnt;
+ memcpy(key, SkPathPriv::VerbData(path), verbCnt * sizeof(uint8_t));
+ int verbKeySize = SkAlign4(verbCnt);
+ // pad out to uint32_t alignment using value that will stand out when debugging.
+ uint8_t* pad = reinterpret_cast<uint8_t*>(key)+ verbCnt;
+ memset(pad, 0xDE, verbKeySize - verbCnt);
+ key += verbKeySize >> 2;
+
+ memcpy(key, SkPathPriv::PointData(path), sizeof(SkPoint) * pointCnt);
+ GR_STATIC_ASSERT(sizeof(SkPoint) == 2 * sizeof(uint32_t));
+ key += 2 * pointCnt;
+ sk_careful_memcpy(key, SkPathPriv::ConicWeightData(path), sizeof(SkScalar) * conicWeightCnt);
+ GR_STATIC_ASSERT(sizeof(SkScalar) == sizeof(uint32_t));
+ SkDEBUGCODE(key += conicWeightCnt);
+ SkASSERT(key - origKey == path_key_from_data_size(path));
+}
+
+int GrShape::unstyledKeySize() const {
+ if (fInheritedKey.count()) {
+ return fInheritedKey.count();
+ }
+ switch (fType) {
+ case Type::kEmpty:
+ return 1;
+ case Type::kInvertedEmpty:
+ return 1;
+ case Type::kRRect:
+ SkASSERT(!fInheritedKey.count());
+ GR_STATIC_ASSERT(0 == SkRRect::kSizeInMemory % sizeof(uint32_t));
+ // + 1 for the direction, start index, and inverseness.
+ return SkRRect::kSizeInMemory / sizeof(uint32_t) + 1;
+ case Type::kArc:
+ SkASSERT(!fInheritedKey.count());
+ GR_STATIC_ASSERT(0 == sizeof(fArcData) % sizeof(uint32_t));
+ return sizeof(fArcData) / sizeof(uint32_t);
+ case Type::kLine:
+ GR_STATIC_ASSERT(2 * sizeof(uint32_t) == sizeof(SkPoint));
+ // 4 for the end points and 1 for the inverseness
+ return 5;
+ case Type::kPath: {
+ if (0 == fPathData.fGenID) {
+ return -1;
+ }
+ int dataKeySize = path_key_from_data_size(fPathData.fPath);
+ if (dataKeySize >= 0) {
+ return dataKeySize;
+ }
+ // The key is the path ID and fill type.
+ return 2;
+ }
+ }
+ SK_ABORT("Should never get here.");
+}
+
+void GrShape::writeUnstyledKey(uint32_t* key) const {
+ SkASSERT(this->unstyledKeySize());
+ SkDEBUGCODE(uint32_t* origKey = key;)
+ if (fInheritedKey.count()) {
+ memcpy(key, fInheritedKey.get(), sizeof(uint32_t) * fInheritedKey.count());
+ SkDEBUGCODE(key += fInheritedKey.count();)
+ } else {
+ switch (fType) {
+ case Type::kEmpty:
+ *key++ = 1;
+ break;
+ case Type::kInvertedEmpty:
+ *key++ = 2;
+ break;
+ case Type::kRRect:
+ fRRectData.fRRect.writeToMemory(key);
+ key += SkRRect::kSizeInMemory / sizeof(uint32_t);
+ *key = (fRRectData.fDir == SkPath::kCCW_Direction) ? (1 << 31) : 0;
+ *key |= fRRectData.fInverted ? (1 << 30) : 0;
+ *key++ |= fRRectData.fStart;
+ SkASSERT(fRRectData.fStart < 8);
+ break;
+ case Type::kArc:
+ memcpy(key, &fArcData, sizeof(fArcData));
+ key += sizeof(fArcData) / sizeof(uint32_t);
+ break;
+ case Type::kLine:
+ memcpy(key, fLineData.fPts, 2 * sizeof(SkPoint));
+ key += 4;
+ *key++ = fLineData.fInverted ? 1 : 0;
+ break;
+ case Type::kPath: {
+ SkASSERT(fPathData.fGenID);
+ int dataKeySize = path_key_from_data_size(fPathData.fPath);
+ if (dataKeySize >= 0) {
+ write_path_key_from_data(fPathData.fPath, key);
+ return;
+ }
+ *key++ = fPathData.fGenID;
+ // We could canonicalize the fill rule for paths that don't differentiate between
+ // even/odd or winding fill (e.g. convex).
+ *key++ = this->path().getFillType();
+ break;
+ }
+ }
+ }
+ SkASSERT(key - origKey == this->unstyledKeySize());
+}
+
+void GrShape::setInheritedKey(const GrShape &parent, GrStyle::Apply apply, SkScalar scale) {
+ SkASSERT(!fInheritedKey.count());
+ // If the output shape turns out to be simple, then we will just use its geometric key
+ if (Type::kPath == fType) {
+ // We want ApplyFullStyle(ApplyPathEffect(shape)) to have the same key as
+ // ApplyFullStyle(shape).
+ // The full key is structured as (geo,path_effect,stroke).
+ // If we do ApplyPathEffect we get geo,path_effect as the inherited key. If we then
+ // do ApplyFullStyle we'll memcpy geo,path_effect into the new inherited key
+ // and then append the style key (which should now be stroke only) at the end.
+ int parentCnt = parent.fInheritedKey.count();
+ bool useParentGeoKey = !parentCnt;
+ if (useParentGeoKey) {
+ parentCnt = parent.unstyledKeySize();
+ if (parentCnt < 0) {
+ // The parent's geometry has no key so we will have no key.
+ fPathData.fGenID = 0;
+ return;
+ }
+ }
+ uint32_t styleKeyFlags = 0;
+ if (parent.knownToBeClosed()) {
+ styleKeyFlags |= GrStyle::kClosed_KeyFlag;
+ }
+ if (parent.asLine(nullptr, nullptr)) {
+ styleKeyFlags |= GrStyle::kNoJoins_KeyFlag;
+ }
+ int styleCnt = GrStyle::KeySize(parent.fStyle, apply, styleKeyFlags);
+ if (styleCnt < 0) {
+ // The style doesn't allow a key, set the path gen ID to 0 so that we fail when
+ // we try to get a key for the shape.
+ fPathData.fGenID = 0;
+ return;
+ }
+ fInheritedKey.reset(parentCnt + styleCnt);
+ if (useParentGeoKey) {
+ // This will be the geo key.
+ parent.writeUnstyledKey(fInheritedKey.get());
+ } else {
+ // This should be (geo,path_effect).
+ memcpy(fInheritedKey.get(), parent.fInheritedKey.get(),
+ parentCnt * sizeof(uint32_t));
+ }
+ // Now turn (geo,path_effect) or (geo) into (geo,path_effect,stroke)
+ GrStyle::WriteKey(fInheritedKey.get() + parentCnt, parent.fStyle, apply, scale,
+ styleKeyFlags);
+ }
+}
+
+const SkPath* GrShape::originalPathForListeners() const {
+ if (fInheritedPathForListeners.isValid()) {
+ return fInheritedPathForListeners.get();
+ } else if (Type::kPath == fType && !fPathData.fPath.isVolatile()) {
+ return &fPathData.fPath;
+ }
+ return nullptr;
+}
+
+void GrShape::addGenIDChangeListener(sk_sp<SkPathRef::GenIDChangeListener> listener) const {
+ if (const auto* lp = this->originalPathForListeners()) {
+ SkPathPriv::AddGenIDChangeListener(*lp, std::move(listener));
+ }
+}
+
+GrShape GrShape::MakeArc(const SkRect& oval, SkScalar startAngleDegrees, SkScalar sweepAngleDegrees,
+ bool useCenter, const GrStyle& style) {
+ GrShape result;
+ result.changeType(Type::kArc);
+ result.fArcData.fOval = oval;
+ result.fArcData.fStartAngleDegrees = startAngleDegrees;
+ result.fArcData.fSweepAngleDegrees = sweepAngleDegrees;
+ result.fArcData.fUseCenter = useCenter;
+ result.fArcData.fInverted = false;
+ result.fStyle = style;
+ result.attemptToSimplifyArc();
+ return result;
+}
+
+GrShape::GrShape(const GrShape& that) : fStyle(that.fStyle) {
+ const SkPath* thatPath = Type::kPath == that.fType ? &that.fPathData.fPath : nullptr;
+ this->initType(that.fType, thatPath);
+ switch (fType) {
+ case Type::kEmpty:
+ break;
+ case Type::kInvertedEmpty:
+ break;
+ case Type::kRRect:
+ fRRectData = that.fRRectData;
+ break;
+ case Type::kArc:
+ fArcData = that.fArcData;
+ break;
+ case Type::kLine:
+ fLineData = that.fLineData;
+ break;
+ case Type::kPath:
+ fPathData.fGenID = that.fPathData.fGenID;
+ break;
+ }
+ fInheritedKey.reset(that.fInheritedKey.count());
+ sk_careful_memcpy(fInheritedKey.get(), that.fInheritedKey.get(),
+ sizeof(uint32_t) * fInheritedKey.count());
+ if (that.fInheritedPathForListeners.isValid()) {
+ fInheritedPathForListeners.set(*that.fInheritedPathForListeners.get());
+ }
+}
+
+GrShape::GrShape(const GrShape& parent, GrStyle::Apply apply, SkScalar scale) {
+ // TODO: Add some quantization of scale for better cache performance here or leave that up
+ // to caller?
+ // TODO: For certain shapes and stroke params we could ignore the scale. (e.g. miter or bevel
+ // stroke of a rect).
+ if (!parent.style().applies() ||
+ (GrStyle::Apply::kPathEffectOnly == apply && !parent.style().pathEffect())) {
+ this->initType(Type::kEmpty);
+ *this = parent;
+ return;
+ }
+
+ SkPathEffect* pe = parent.fStyle.pathEffect();
+ SkTLazy<SkPath> tmpPath;
+ const GrShape* parentForKey = &parent;
+ SkTLazy<GrShape> tmpParent;
+ this->initType(Type::kPath);
+ fPathData.fGenID = 0;
+ if (pe) {
+ const SkPath* srcForPathEffect;
+ if (parent.fType == Type::kPath) {
+ srcForPathEffect = &parent.path();
+ } else {
+ srcForPathEffect = tmpPath.init();
+ parent.asPath(tmpPath.get());
+ }
+ // Should we consider bounds? Would have to include in key, but it'd be nice to know
+ // if the bounds actually modified anything before including in key.
+ SkStrokeRec strokeRec = parent.fStyle.strokeRec();
+ if (!parent.fStyle.applyPathEffectToPath(&this->path(), &strokeRec, *srcForPathEffect,
+ scale)) {
+ tmpParent.init(*srcForPathEffect, GrStyle(strokeRec, nullptr));
+ *this = tmpParent.get()->applyStyle(apply, scale);
+ return;
+ }
+ // A path effect has access to change the res scale but we aren't expecting it to and it
+ // would mess up our key computation.
+ SkASSERT(scale == strokeRec.getResScale());
+ if (GrStyle::Apply::kPathEffectAndStrokeRec == apply && strokeRec.needToApply()) {
+ // The intermediate shape may not be a general path. If we we're just applying
+ // the path effect then attemptToReduceFromPath would catch it. This means that
+ // when we subsequently applied the remaining strokeRec we would have a non-path
+ // parent shape that would be used to determine the the stroked path's key.
+ // We detect that case here and change parentForKey to a temporary that represents
+ // the simpler shape so that applying both path effect and the strokerec all at
+ // once produces the same key.
+ tmpParent.init(this->path(), GrStyle(strokeRec, nullptr));
+ tmpParent.get()->setInheritedKey(parent, GrStyle::Apply::kPathEffectOnly, scale);
+ if (!tmpPath.isValid()) {
+ tmpPath.init();
+ }
+ tmpParent.get()->asPath(tmpPath.get());
+ SkStrokeRec::InitStyle fillOrHairline;
+ // The parent shape may have simplified away the strokeRec, check for that here.
+ if (tmpParent.get()->style().applies()) {
+ SkAssertResult(tmpParent.get()->style().applyToPath(&this->path(), &fillOrHairline,
+ *tmpPath.get(), scale));
+ } else if (tmpParent.get()->style().isSimpleFill()) {
+ fillOrHairline = SkStrokeRec::kFill_InitStyle;
+ } else {
+ SkASSERT(tmpParent.get()->style().isSimpleHairline());
+ fillOrHairline = SkStrokeRec::kHairline_InitStyle;
+ }
+ fStyle.resetToInitStyle(fillOrHairline);
+ parentForKey = tmpParent.get();
+ } else {
+ fStyle = GrStyle(strokeRec, nullptr);
+ }
+ } else {
+ const SkPath* srcForParentStyle;
+ if (parent.fType == Type::kPath) {
+ srcForParentStyle = &parent.path();
+ } else {
+ srcForParentStyle = tmpPath.init();
+ parent.asPath(tmpPath.get());
+ }
+ SkStrokeRec::InitStyle fillOrHairline;
+ SkASSERT(parent.fStyle.applies());
+ SkASSERT(!parent.fStyle.pathEffect());
+ SkAssertResult(parent.fStyle.applyToPath(&this->path(), &fillOrHairline, *srcForParentStyle,
+ scale));
+ fStyle.resetToInitStyle(fillOrHairline);
+ }
+ if (parent.fInheritedPathForListeners.isValid()) {
+ fInheritedPathForListeners.set(*parent.fInheritedPathForListeners.get());
+ } else if (Type::kPath == parent.fType && !parent.fPathData.fPath.isVolatile()) {
+ fInheritedPathForListeners.set(parent.fPathData.fPath);
+ }
+ this->attemptToSimplifyPath();
+ this->setInheritedKey(*parentForKey, apply, scale);
+}
+
+void GrShape::attemptToSimplifyPath() {
+ SkRect rect;
+ SkRRect rrect;
+ SkPath::Direction rrectDir;
+ unsigned rrectStart;
+ bool inverted = this->path().isInverseFillType();
+ SkPoint pts[2];
+ if (this->path().isEmpty()) {
+ // Dashing ignores inverseness skbug.com/5421.
+ this->changeType(inverted && !this->style().isDashed() ? Type::kInvertedEmpty
+ : Type::kEmpty);
+ } else if (this->path().isLine(pts)) {
+ this->changeType(Type::kLine);
+ fLineData.fPts[0] = pts[0];
+ fLineData.fPts[1] = pts[1];
+ fLineData.fInverted = inverted;
+ } else if (SkPathPriv::IsRRect(this->path(), &rrect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fDir = rrectDir;
+ fRRectData.fStart = rrectStart;
+ fRRectData.fInverted = inverted;
+ SkASSERT(!fRRectData.fRRect.isEmpty());
+ } else if (SkPathPriv::IsOval(this->path(), &rect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect.setOval(rect);
+ fRRectData.fDir = rrectDir;
+ fRRectData.fInverted = inverted;
+ // convert from oval indexing to rrect indexiing.
+ fRRectData.fStart = 2 * rrectStart;
+ } else if (SkPathPriv::IsSimpleClosedRect(this->path(), &rect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ // When there is a path effect we restrict rect detection to the narrower API that
+ // gives us the starting position. Otherwise, we will retry with the more aggressive
+ // isRect().
+ fRRectData.fRRect.setRect(rect);
+ fRRectData.fInverted = inverted;
+ fRRectData.fDir = rrectDir;
+ // convert from rect indexing to rrect indexiing.
+ fRRectData.fStart = 2 * rrectStart;
+ } else if (!this->style().hasPathEffect()) {
+ bool closed;
+ if (this->path().isRect(&rect, &closed, nullptr)) {
+ if (closed || this->style().isSimpleFill()) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect.setRect(rect);
+ // Since there is no path effect the dir and start index is immaterial.
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ // There isn't dashing so we will have to preserver inverseness.
+ fRRectData.fInverted = inverted;
+ }
+ }
+ }
+ if (Type::kPath != fType) {
+ fInheritedKey.reset(0);
+ // Whenever we simplify to a non-path, break the chain so we no longer refer to the
+ // original path. This prevents attaching genID listeners to temporary paths created when
+ // drawing simple shapes.
+ fInheritedPathForListeners.reset();
+ if (Type::kRRect == fType) {
+ this->attemptToSimplifyRRect();
+ } else if (Type::kLine == fType) {
+ this->attemptToSimplifyLine();
+ }
+ } else {
+ if (fInheritedKey.count() || this->path().isVolatile()) {
+ fPathData.fGenID = 0;
+ } else {
+ fPathData.fGenID = this->path().getGenerationID();
+ }
+ if (!this->style().hasNonDashPathEffect()) {
+ if (this->style().strokeRec().getStyle() == SkStrokeRec::kStroke_Style ||
+ this->style().strokeRec().getStyle() == SkStrokeRec::kHairline_Style) {
+ // Stroke styles don't differentiate between winding and even/odd.
+ // Moreover, dashing ignores inverseness (skbug.com/5421)
+ bool inverse = !this->style().isDashed() && this->path().isInverseFillType();
+ if (inverse) {
+ this->path().setFillType(kDefaultPathInverseFillType);
+ } else {
+ this->path().setFillType(kDefaultPathFillType);
+ }
+ } else if (this->path().isConvex()) {
+ // There is no distinction between even/odd and non-zero winding count for convex
+ // paths.
+ if (this->path().isInverseFillType()) {
+ this->path().setFillType(kDefaultPathInverseFillType);
+ } else {
+ this->path().setFillType(kDefaultPathFillType);
+ }
+ }
+ }
+ }
+}
+
+void GrShape::attemptToSimplifyRRect() {
+ SkASSERT(Type::kRRect == fType);
+ SkASSERT(!fInheritedKey.count());
+ if (fRRectData.fRRect.isEmpty()) {
+ // An empty filled rrect is equivalent to a filled empty path with inversion preserved.
+ if (fStyle.isSimpleFill()) {
+ fType = fRRectData.fInverted ? Type::kInvertedEmpty : Type::kEmpty;
+ fStyle = GrStyle::SimpleFill();
+ return;
+ }
+ // Dashing a rrect with no width or height is equivalent to filling an emtpy path.
+ // When skbug.com/7387 is fixed this should be modified or removed as a dashed zero length
+ // line will produce cap geometry if the effect begins in an "on" interval.
+ if (fStyle.isDashed() && !fRRectData.fRRect.width() && !fRRectData.fRRect.height()) {
+ // Dashing ignores the inverseness (currently). skbug.com/5421.
+ fType = Type::kEmpty;
+ fStyle = GrStyle::SimpleFill();
+ return;
+ }
+ }
+ if (!this->style().hasPathEffect()) {
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ } else if (fStyle.isDashed()) {
+ // Dashing ignores the inverseness (currently). skbug.com/5421
+ fRRectData.fInverted = false;
+ // Possible TODO here: Check whether the dash results in a single arc or line.
+ }
+ // Turn a stroke-and-filled miter rect into a filled rect. TODO: more rrect stroke shortcuts.
+ if (!fStyle.hasPathEffect() &&
+ fStyle.strokeRec().getStyle() == SkStrokeRec::kStrokeAndFill_Style &&
+ fStyle.strokeRec().getJoin() == SkPaint::kMiter_Join &&
+ fStyle.strokeRec().getMiter() >= SK_ScalarSqrt2 &&
+ fRRectData.fRRect.isRect()) {
+ SkScalar r = fStyle.strokeRec().getWidth() / 2;
+ fRRectData.fRRect = SkRRect::MakeRect(fRRectData.fRRect.rect().makeOutset(r, r));
+ fStyle = GrStyle::SimpleFill();
+ }
+}
+
+void GrShape::attemptToSimplifyLine() {
+ SkASSERT(Type::kLine == fType);
+ SkASSERT(!fInheritedKey.count());
+ if (fStyle.isDashed()) {
+ bool allOffsZero = true;
+ for (int i = 1; i < fStyle.dashIntervalCnt() && allOffsZero; i += 2) {
+ allOffsZero = !fStyle.dashIntervals()[i];
+ }
+ if (allOffsZero && this->attemptToSimplifyStrokedLineToRRect()) {
+ return;
+ }
+ // Dashing ignores inverseness.
+ fLineData.fInverted = false;
+ return;
+ } else if (fStyle.hasPathEffect()) {
+ return;
+ }
+ if (fStyle.strokeRec().getStyle() == SkStrokeRec::kStrokeAndFill_Style) {
+ // Make stroke + fill be stroke since the fill is empty.
+ SkStrokeRec rec = fStyle.strokeRec();
+ rec.setStrokeStyle(fStyle.strokeRec().getWidth(), false);
+ fStyle = GrStyle(rec, nullptr);
+ }
+ if (fStyle.isSimpleFill()) {
+ this->changeType(fLineData.fInverted ? Type::kInvertedEmpty : Type::kEmpty);
+ return;
+ }
+ if (fStyle.strokeRec().getStyle() == SkStrokeRec::kStroke_Style &&
+ this->attemptToSimplifyStrokedLineToRRect()) {
+ return;
+ }
+ // Only path effects could care about the order of the points. Otherwise canonicalize
+ // the point order.
+ SkPoint* pts = fLineData.fPts;
+ if (pts[1].fY < pts[0].fY || (pts[1].fY == pts[0].fY && pts[1].fX < pts[0].fX)) {
+ using std::swap;
+ swap(pts[0], pts[1]);
+ }
+}
+
+void GrShape::attemptToSimplifyArc() {
+ SkASSERT(fType == Type::kArc);
+ SkASSERT(!fArcData.fInverted);
+ if (fArcData.fOval.isEmpty() || !fArcData.fSweepAngleDegrees) {
+ this->changeType(Type::kEmpty);
+ return;
+ }
+
+ // Assuming no path effect, a filled, stroked, hairline, or stroke-and-filled arc that traverses
+ // the full circle and doesn't use the center point is an oval. Unless it has square or round
+ // caps. They may protrude out of the oval. Round caps can't protrude out of a circle but we're
+ // ignoring that for now.
+ if (fStyle.isSimpleFill() || (!fStyle.pathEffect() && !fArcData.fUseCenter &&
+ fStyle.strokeRec().getCap() == SkPaint::kButt_Cap)) {
+ if (fArcData.fSweepAngleDegrees >= 360.f || fArcData.fSweepAngleDegrees <= -360.f) {
+ auto oval = fArcData.fOval;
+ this->changeType(Type::kRRect);
+ this->fRRectData.fRRect.setOval(oval);
+ this->fRRectData.fDir = kDefaultRRectDir;
+ this->fRRectData.fStart = kDefaultRRectStart;
+ this->fRRectData.fInverted = false;
+ return;
+ }
+ }
+ if (!fStyle.pathEffect()) {
+ // Canonicalize the arc such that the start is always in [0, 360) and the sweep is always
+ // positive.
+ if (fArcData.fSweepAngleDegrees < 0) {
+ fArcData.fStartAngleDegrees = fArcData.fStartAngleDegrees + fArcData.fSweepAngleDegrees;
+ fArcData.fSweepAngleDegrees = -fArcData.fSweepAngleDegrees;
+ }
+ }
+ if (this->fArcData.fStartAngleDegrees < 0 || this->fArcData.fStartAngleDegrees >= 360.f) {
+ this->fArcData.fStartAngleDegrees = SkScalarMod(this->fArcData.fStartAngleDegrees, 360.f);
+ }
+ // Possible TODOs here: Look at whether dash pattern results in a single dash and convert to
+ // non-dashed stroke. Stroke and fill can be fill if circular and no path effect. Just stroke
+ // could as well if the stroke fills the center.
+}
+
+bool GrShape::attemptToSimplifyStrokedLineToRRect() {
+ SkASSERT(Type::kLine == fType);
+ SkASSERT(fStyle.strokeRec().getStyle() == SkStrokeRec::kStroke_Style);
+
+ SkRect rect;
+ SkVector outset;
+ // If we allowed a rotation angle for rrects we could capture all cases here.
+ if (fLineData.fPts[0].fY == fLineData.fPts[1].fY) {
+ rect.fLeft = SkTMin(fLineData.fPts[0].fX, fLineData.fPts[1].fX);
+ rect.fRight = SkTMax(fLineData.fPts[0].fX, fLineData.fPts[1].fX);
+ rect.fTop = rect.fBottom = fLineData.fPts[0].fY;
+ outset.fY = fStyle.strokeRec().getWidth() / 2.f;
+ outset.fX = SkPaint::kButt_Cap == fStyle.strokeRec().getCap() ? 0.f : outset.fY;
+ } else if (fLineData.fPts[0].fX == fLineData.fPts[1].fX) {
+ rect.fTop = SkTMin(fLineData.fPts[0].fY, fLineData.fPts[1].fY);
+ rect.fBottom = SkTMax(fLineData.fPts[0].fY, fLineData.fPts[1].fY);
+ rect.fLeft = rect.fRight = fLineData.fPts[0].fX;
+ outset.fX = fStyle.strokeRec().getWidth() / 2.f;
+ outset.fY = SkPaint::kButt_Cap == fStyle.strokeRec().getCap() ? 0.f : outset.fX;
+ } else {
+ return false;
+ }
+ rect.outset(outset.fX, outset.fY);
+ if (rect.isEmpty()) {
+ this->changeType(Type::kEmpty);
+ fStyle = GrStyle::SimpleFill();
+ return true;
+ }
+ SkRRect rrect;
+ if (fStyle.strokeRec().getCap() == SkPaint::kRound_Cap) {
+ SkASSERT(outset.fX == outset.fY);
+ rrect = SkRRect::MakeRectXY(rect, outset.fX, outset.fY);
+ } else {
+ rrect = SkRRect::MakeRect(rect);
+ }
+ bool inverted = fLineData.fInverted && !fStyle.hasPathEffect();
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = inverted;
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ fStyle = GrStyle::SimpleFill();
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/geometry/GrShape.h b/gfx/skia/skia/src/gpu/geometry/GrShape.h
new file mode 100644
index 0000000000..37f38f8844
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/geometry/GrShape.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShape_DEFINED
+#define GrShape_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkTLazy.h"
+#include "src/gpu/GrStyle.h"
+#include <new>
+
+/**
+ * Represents a geometric shape (rrect or path) and the GrStyle that it should be rendered with.
+ * It is possible to apply the style to the GrShape to produce a new GrShape where the geometry
+ * reflects the styling information (e.g. is stroked). It is also possible to apply just the
+ * path effect from the style. In this case the resulting shape will include any remaining
+ * stroking information that is to be applied after the path effect.
+ *
+ * Shapes can produce keys that represent only the geometry information, not the style. Note that
+ * when styling information is applied to produce a new shape then the style has been converted
+ * to geometric information and is included in the new shape's key. When the same style is applied
+ * to two shapes that reflect the same underlying geometry the computed keys of the stylized shapes
+ * will be the same.
+ *
+ * Currently this can only be constructed from a path, rect, or rrect though it can become a path
+ * applying style to the geometry. The idea is to expand this to cover most or all of the geometries
+ * that have fast paths in the GPU backend.
+ */
+class GrShape {
+public:
+ // Keys for paths may be extracted from the path data for small paths. Clients aren't supposed
+ // to have to worry about this. This value is exposed for unit tests.
+ static constexpr int kMaxKeyFromDataVerbCnt = 10;
+
+ GrShape() { this->initType(Type::kEmpty); }
+
+ explicit GrShape(const SkPath& path) : GrShape(path, GrStyle::SimpleFill()) {}
+
+ explicit GrShape(const SkRRect& rrect) : GrShape(rrect, GrStyle::SimpleFill()) {}
+
+ explicit GrShape(const SkRect& rect) : GrShape(rect, GrStyle::SimpleFill()) {}
+
+ GrShape(const SkPath& path, const GrStyle& style) : fStyle(style) {
+ this->initType(Type::kPath, &path);
+ this->attemptToSimplifyPath();
+ }
+
+ GrShape(const SkRRect& rrect, const GrStyle& style) : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, style.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRRect& rrect, SkPath::Direction dir, unsigned start, bool inverted,
+ const GrStyle& style)
+ : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = inverted;
+ if (style.pathEffect()) {
+ fRRectData.fDir = dir;
+ fRRectData.fStart = start;
+ if (fRRectData.fRRect.getType() == SkRRect::kRect_Type) {
+ fRRectData.fStart = (fRRectData.fStart + 1) & 0b110;
+ } else if (fRRectData.fRRect.getType() == SkRRect::kOval_Type) {
+ fRRectData.fStart &= 0b110;
+ }
+ } else {
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, false, &fRRectData.fDir);
+ }
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRect& rect, const GrStyle& style) : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRectDirAndStartIndex(rect, style.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkPath& path, const SkPaint& paint) : fStyle(paint) {
+ this->initType(Type::kPath, &path);
+ this->attemptToSimplifyPath();
+ }
+
+ GrShape(const SkRRect& rrect, const SkPaint& paint) : fStyle(paint) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, fStyle.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRect& rect, const SkPaint& paint) : fStyle(paint) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRectDirAndStartIndex(rect, fStyle.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ static GrShape MakeArc(const SkRect& oval, SkScalar startAngleDegrees,
+ SkScalar sweepAngleDegrees, bool useCenter, const GrStyle& style);
+
+ GrShape(const GrShape&);
+ GrShape& operator=(const GrShape& that);
+
+ ~GrShape() { this->changeType(Type::kEmpty); }
+
+ /**
+ * Informs MakeFilled on how to modify that shape's fill rule when making a simple filled
+ * version of the shape.
+ */
+ enum class FillInversion {
+ kPreserve,
+ kFlip,
+ kForceNoninverted,
+ kForceInverted
+ };
+ /**
+ * Makes a filled shape from the pre-styled original shape and optionally modifies whether
+ * the fill is inverted or not. It's important to note that the original shape's geometry
+ * may already have been modified if doing so was neutral with respect to its style
+ * (e.g. filled paths are always closed when stored in a shape and dashed paths are always
+ * made non-inverted since dashing ignores inverseness).
+ */
+ static GrShape MakeFilled(const GrShape& original, FillInversion = FillInversion::kPreserve);
+
+ const GrStyle& style() const { return fStyle; }
+
+ /**
+ * Returns a shape that has either applied the path effect or path effect and stroking
+ * information from this shape's style to its geometry. Scale is used when approximating the
+ * output geometry and typically is computed from the view matrix
+ */
+ GrShape applyStyle(GrStyle::Apply apply, SkScalar scale) const {
+ return GrShape(*this, apply, scale);
+ }
+
+ bool isRect() const {
+ if (Type::kRRect != fType) {
+ return false;
+ }
+
+ return fRRectData.fRRect.isRect();
+ }
+
+ /** Returns the unstyled geometry as a rrect if possible. */
+ bool asRRect(SkRRect* rrect, SkPath::Direction* dir, unsigned* start, bool* inverted) const {
+ if (Type::kRRect != fType) {
+ return false;
+ }
+ if (rrect) {
+ *rrect = fRRectData.fRRect;
+ }
+ if (dir) {
+ *dir = fRRectData.fDir;
+ }
+ if (start) {
+ *start = fRRectData.fStart;
+ }
+ if (inverted) {
+ *inverted = fRRectData.fInverted;
+ }
+ return true;
+ }
+
+ /**
+ * If the unstyled shape is a straight line segment, returns true and sets pts to the endpoints.
+ * An inverse filled line path is still considered a line.
+ */
+ bool asLine(SkPoint pts[2], bool* inverted) const {
+ if (fType != Type::kLine) {
+ return false;
+ }
+ if (pts) {
+ pts[0] = fLineData.fPts[0];
+ pts[1] = fLineData.fPts[1];
+ }
+ if (inverted) {
+ *inverted = fLineData.fInverted;
+ }
+ return true;
+ }
+
+ /** Returns the unstyled geometry as a path. */
+ void asPath(SkPath* out) const {
+ switch (fType) {
+ case Type::kEmpty:
+ out->reset();
+ break;
+ case Type::kInvertedEmpty:
+ out->reset();
+ out->setFillType(kDefaultPathInverseFillType);
+ break;
+ case Type::kRRect:
+ out->reset();
+ out->addRRect(fRRectData.fRRect, fRRectData.fDir, fRRectData.fStart);
+ // Below matches the fill type that attemptToSimplifyPath uses.
+ if (fRRectData.fInverted) {
+ out->setFillType(kDefaultPathInverseFillType);
+ } else {
+ out->setFillType(kDefaultPathFillType);
+ }
+ break;
+ case Type::kArc:
+ SkPathPriv::CreateDrawArcPath(out, fArcData.fOval, fArcData.fStartAngleDegrees,
+ fArcData.fSweepAngleDegrees, fArcData.fUseCenter,
+ fStyle.isSimpleFill());
+ if (fArcData.fInverted) {
+ out->setFillType(kDefaultPathInverseFillType);
+ } else {
+ out->setFillType(kDefaultPathFillType);
+ }
+ break;
+ case Type::kLine:
+ out->reset();
+ out->moveTo(fLineData.fPts[0]);
+ out->lineTo(fLineData.fPts[1]);
+ if (fLineData.fInverted) {
+ out->setFillType(kDefaultPathInverseFillType);
+ } else {
+ out->setFillType(kDefaultPathFillType);
+ }
+ break;
+ case Type::kPath:
+ *out = this->path();
+ break;
+ }
+ }
+
+ // Can this shape be drawn as a pair of filled nested rectangles?
+ bool asNestedRects(SkRect rects[2]) const {
+ if (Type::kPath != fType) {
+ return false;
+ }
+
+ // TODO: it would be better two store DRRects natively in the shape rather than converting
+ // them to a path and then reextracting the nested rects
+ if (this->path().isInverseFillType()) {
+ return false;
+ }
+
+ SkPath::Direction dirs[2];
+ if (!SkPathPriv::IsNestedFillRects(this->path(), rects, dirs)) {
+ return false;
+ }
+
+ if (SkPath::kWinding_FillType == this->path().getFillType() && dirs[0] == dirs[1]) {
+ // The two rects need to be wound opposite to each other
+ return false;
+ }
+
+ // Right now, nested rects where the margin is not the same width
+ // all around do not render correctly
+ const SkScalar* outer = rects[0].asScalars();
+ const SkScalar* inner = rects[1].asScalars();
+
+ bool allEq = true;
+
+ SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
+ bool allGoE1 = margin >= SK_Scalar1;
+
+ for (int i = 1; i < 4; ++i) {
+ SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
+ if (temp < SK_Scalar1) {
+ allGoE1 = false;
+ }
+ if (!SkScalarNearlyEqual(margin, temp)) {
+ allEq = false;
+ }
+ }
+
+ return allEq || allGoE1;
+ }
+
+ /**
+ * Returns whether the geometry is empty. Note that applying the style could produce a
+ * non-empty shape. It also may have an inverse fill.
+ */
+ bool isEmpty() const { return Type::kEmpty == fType || Type::kInvertedEmpty == fType; }
+
+ /**
+ * Gets the bounds of the geometry without reflecting the shape's styling. This ignores
+ * the inverse fill nature of the geometry.
+ */
+ SkRect bounds() const;
+
+ /**
+ * Gets the bounds of the geometry reflecting the shape's styling (ignoring inverse fill
+ * status).
+ */
+ SkRect styledBounds() const;
+
+ /**
+ * Is this shape known to be convex, before styling is applied. An unclosed but otherwise
+ * convex path is considered to be closed if they styling reflects a fill and not otherwise.
+ * This is because filling closes all contours in the path.
+ */
+ bool knownToBeConvex() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return true;
+ case Type::kInvertedEmpty:
+ return true;
+ case Type::kRRect:
+ return true;
+ case Type::kArc:
+ return SkPathPriv::DrawArcIsConvex(fArcData.fSweepAngleDegrees,
+ SkToBool(fArcData.fUseCenter),
+ fStyle.isSimpleFill());
+ case Type::kLine:
+ return true;
+ case Type::kPath:
+ // SkPath.isConvex() really means "is this path convex were it to be closed" and
+ // thus doesn't give the correct answer for stroked paths, hence we also check
+ // whether the path is either filled or closed. Convex paths may only have one
+ // contour hence isLastContourClosed() is a sufficient for a convex path.
+ return (this->style().isSimpleFill() || this->path().isLastContourClosed()) &&
+ this->path().isConvex();
+ }
+ return false;
+ }
+
+ /**
+ * Does the shape have a known winding direction. Some degenerate convex shapes may not have
+ * a computable direction, but this is not always a requirement for path renderers so it is
+ * kept separate from knownToBeConvex().
+ */
+ bool knownDirection() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return true;
+ case Type::kInvertedEmpty:
+ return true;
+ case Type::kRRect:
+ return true;
+ case Type::kArc:
+ return true;
+ case Type::kLine:
+ return true;
+ case Type::kPath:
+ // Assuming this is called after knownToBeConvex(), this should just be relying on
+ // cached convexity and direction and will be cheap.
+ return !SkPathPriv::CheapIsFirstDirection(this->path(),
+ SkPathPriv::kUnknown_FirstDirection);
+ }
+ return false;
+ }
+
+ /** Is the pre-styled geometry inverse filled? */
+ bool inverseFilled() const {
+ bool ret = false;
+ switch (fType) {
+ case Type::kEmpty:
+ ret = false;
+ break;
+ case Type::kInvertedEmpty:
+ ret = true;
+ break;
+ case Type::kRRect:
+ ret = fRRectData.fInverted;
+ break;
+ case Type::kArc:
+ ret = fArcData.fInverted;
+ break;
+ case Type::kLine:
+ ret = fLineData.fInverted;
+ break;
+ case Type::kPath:
+ ret = this->path().isInverseFillType();
+ break;
+ }
+ // Dashing ignores inverseness. We should have caught this earlier. skbug.com/5421
+ SkASSERT(!(ret && this->style().isDashed()));
+ return ret;
+ }
+
+ /**
+ * Might applying the styling to the geometry produce an inverse fill. The "may" part comes in
+ * because an arbitrary path effect could produce an inverse filled path. In other cases this
+ * can be thought of as "inverseFilledAfterStyling()".
+ */
+ bool mayBeInverseFilledAfterStyling() const {
+ // An arbitrary path effect can produce an arbitrary output path, which may be inverse
+ // filled.
+ if (this->style().hasNonDashPathEffect()) {
+ return true;
+ }
+ return this->inverseFilled();
+ }
+
+ /**
+ * Is it known that the unstyled geometry has no unclosed contours. This means that it will
+ * not have any caps if stroked (modulo the effect of any path effect).
+ */
+ bool knownToBeClosed() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return true;
+ case Type::kInvertedEmpty:
+ return true;
+ case Type::kRRect:
+ return true;
+ case Type::kArc:
+ return fArcData.fUseCenter;
+ case Type::kLine:
+ return false;
+ case Type::kPath:
+ // SkPath doesn't keep track of the closed status of each contour.
+ return SkPathPriv::IsClosedSingleContour(this->path());
+ }
+ return false;
+ }
+
+ uint32_t segmentMask() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return 0;
+ case Type::kInvertedEmpty:
+ return 0;
+ case Type::kRRect:
+ if (fRRectData.fRRect.getType() == SkRRect::kOval_Type) {
+ return SkPath::kConic_SegmentMask;
+ } else if (fRRectData.fRRect.getType() == SkRRect::kRect_Type ||
+ fRRectData.fRRect.getType() == SkRRect::kEmpty_Type) {
+ return SkPath::kLine_SegmentMask;
+ }
+ return SkPath::kLine_SegmentMask | SkPath::kConic_SegmentMask;
+ case Type::kArc:
+ if (fArcData.fUseCenter) {
+ return SkPath::kConic_SegmentMask | SkPath::kLine_SegmentMask;
+ }
+ return SkPath::kConic_SegmentMask;
+ case Type::kLine:
+ return SkPath::kLine_SegmentMask;
+ case Type::kPath:
+ return this->path().getSegmentMasks();
+ }
+ return 0;
+ }
+
+ /**
+ * Gets the size of the key for the shape represented by this GrShape (ignoring its styling).
+ * A negative value is returned if the shape has no key (shouldn't be cached).
+ */
+ int unstyledKeySize() const;
+
+ bool hasUnstyledKey() const { return this->unstyledKeySize() >= 0; }
+
+ /**
+ * Writes unstyledKeySize() bytes into the provided pointer. Assumes that there is enough
+ * space allocated for the key and that unstyledKeySize() does not return a negative value
+ * for this shape.
+ */
+ void writeUnstyledKey(uint32_t* key) const;
+
+ /**
+ * Adds a listener to the *original* path. Typically used to invalidate cached resources when
+ * a path is no longer in-use. If the shape started out as something other than a path, this
+ * does nothing.
+ */
+ void addGenIDChangeListener(sk_sp<SkPathRef::GenIDChangeListener>) const;
+
+ /**
+ * Helpers that are only exposed for unit tests, to determine if the shape is a path, and get
+ * the generation ID of the *original* path. This is the path that will receive
+ * GenIDChangeListeners added to this shape.
+ */
+ uint32_t testingOnly_getOriginalGenerationID() const;
+ bool testingOnly_isPath() const;
+ bool testingOnly_isNonVolatilePath() const;
+
+private:
+ enum class Type {
+ kEmpty,
+ kInvertedEmpty,
+ kRRect,
+ kArc,
+ kLine,
+ kPath,
+ };
+
+ void initType(Type type, const SkPath* path = nullptr) {
+ fType = Type::kEmpty;
+ this->changeType(type, path);
+ }
+
+ void changeType(Type type, const SkPath* path = nullptr) {
+ bool wasPath = Type::kPath == fType;
+ fType = type;
+ bool isPath = Type::kPath == type;
+ SkASSERT(!path || isPath);
+ if (wasPath && !isPath) {
+ fPathData.fPath.~SkPath();
+ } else if (!wasPath && isPath) {
+ if (path) {
+ new (&fPathData.fPath) SkPath(*path);
+ } else {
+ new (&fPathData.fPath) SkPath();
+ }
+ } else if (isPath && path) {
+ fPathData.fPath = *path;
+ }
+ // Whether or not we use the path's gen ID is decided in attemptToSimplifyPath.
+ fPathData.fGenID = 0;
+ }
+
+ SkPath& path() {
+ SkASSERT(Type::kPath == fType);
+ return fPathData.fPath;
+ }
+
+ const SkPath& path() const {
+ SkASSERT(Type::kPath == fType);
+ return fPathData.fPath;
+ }
+
+ /** Constructor used by the applyStyle() function */
+ GrShape(const GrShape& parentShape, GrStyle::Apply, SkScalar scale);
+
+ /**
+ * Determines the key we should inherit from the input shape's geometry and style when
+ * we are applying the style to create a new shape.
+ */
+ void setInheritedKey(const GrShape& parentShape, GrStyle::Apply, SkScalar scale);
+
+ void attemptToSimplifyPath();
+ void attemptToSimplifyRRect();
+ void attemptToSimplifyLine();
+ void attemptToSimplifyArc();
+
+ bool attemptToSimplifyStrokedLineToRRect();
+
+ /** Gets the path that gen id listeners should be added to. */
+ const SkPath* originalPathForListeners() const;
+
+ // Defaults to use when there is no distinction between even/odd and winding fills.
+ static constexpr SkPath::FillType kDefaultPathFillType = SkPath::kEvenOdd_FillType;
+ static constexpr SkPath::FillType kDefaultPathInverseFillType =
+ SkPath::kInverseEvenOdd_FillType;
+
+ static constexpr SkPath::Direction kDefaultRRectDir = SkPath::kCW_Direction;
+ static constexpr unsigned kDefaultRRectStart = 0;
+
+ static unsigned DefaultRectDirAndStartIndex(const SkRect& rect, bool hasPathEffect,
+ SkPath::Direction* dir) {
+ *dir = kDefaultRRectDir;
+ // This comes from SkPath's interface. The default for adding a SkRect is counter clockwise
+ // beginning at index 0 (which happens to correspond to rrect index 0 or 7).
+ if (!hasPathEffect) {
+ // It doesn't matter what start we use, just be consistent to avoid redundant keys.
+ return kDefaultRRectStart;
+ }
+ // In SkPath a rect starts at index 0 by default. This is the top left corner. However,
+ // we store rects as rrects. RRects don't preserve the invertedness, but rather sort the
+ // rect edges. Thus, we may need to modify the rrect's start index to account for the sort.
+ bool swapX = rect.fLeft > rect.fRight;
+ bool swapY = rect.fTop > rect.fBottom;
+ if (swapX && swapY) {
+ // 0 becomes start index 2 and times 2 to convert from rect the rrect indices.
+ return 2 * 2;
+ } else if (swapX) {
+ *dir = SkPath::kCCW_Direction;
+ // 0 becomes start index 1 and times 2 to convert from rect the rrect indices.
+ return 2 * 1;
+ } else if (swapY) {
+ *dir = SkPath::kCCW_Direction;
+ // 0 becomes start index 3 and times 2 to convert from rect the rrect indices.
+ return 2 * 3;
+ }
+ return 0;
+ }
+
+ static unsigned DefaultRRectDirAndStartIndex(const SkRRect& rrect, bool hasPathEffect,
+ SkPath::Direction* dir) {
+ // This comes from SkPath's interface. The default for adding a SkRRect to a path is
+ // clockwise beginning at starting index 6.
+ static constexpr unsigned kPathRRectStartIdx = 6;
+ *dir = kDefaultRRectDir;
+ if (!hasPathEffect) {
+ // It doesn't matter what start we use, just be consistent to avoid redundant keys.
+ return kDefaultRRectStart;
+ }
+ return kPathRRectStartIdx;
+ }
+
+ union {
+ struct {
+ SkRRect fRRect;
+ SkPath::Direction fDir;
+ unsigned fStart;
+ bool fInverted;
+ } fRRectData;
+ struct {
+ SkRect fOval;
+ SkScalar fStartAngleDegrees;
+ SkScalar fSweepAngleDegrees;
+ int16_t fUseCenter;
+ int16_t fInverted;
+ } fArcData;
+ struct {
+ SkPath fPath;
+ // Gen ID of the original path (fPath may be modified)
+ int32_t fGenID;
+ } fPathData;
+ struct {
+ SkPoint fPts[2];
+ bool fInverted;
+ } fLineData;
+ };
+ GrStyle fStyle;
+ SkTLazy<SkPath> fInheritedPathForListeners;
+ SkAutoSTArray<8, uint32_t> fInheritedKey;
+ Type fType;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLESInterfaceAutogen.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLESInterfaceAutogen.cpp
new file mode 100644
index 0000000000..afaf4d3b42
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLESInterfaceAutogen.cpp
@@ -0,0 +1,509 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * THIS FILE IS AUTOGENERATED
+ * Make edits to tools/gpu/gl/interface/templates.go or they will
+ * be overwritten.
+ */
+
+#include "include/gpu/gl/GrGLAssembleHelpers.h"
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#define GET_PROC(F) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+#define GET_PROC_SUFFIX(F, S) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F #S)
+#define GET_PROC_LOCAL(F) GrGL##F##Fn* F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+
+#define GET_EGL_PROC_SUFFIX(F, S) functions->fEGL##F = (GrEGL##F##Fn*)get(ctx, "egl" #F #S)
+
+#if SK_DISABLE_GL_ES_INTERFACE
+sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get) {
+ return nullptr;
+}
+#else
+sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ if (nullptr == GetString) {
+ return nullptr;
+ }
+
+ const char* verStr = reinterpret_cast<const char*>(GetString(GR_GL_VERSION));
+ GrGLVersion glVer = GrGLGetVersionFromString(verStr);
+
+ if (glVer < GR_GL_VER(2,0)) {
+ return nullptr;
+ }
+
+ GET_PROC_LOCAL(GetIntegerv);
+ GET_PROC_LOCAL(GetStringi);
+ GrEGLQueryStringFn* queryString;
+ GrEGLDisplay display;
+ GrGetEGLQueryAndDisplay(&queryString, &display, ctx, get);
+ GrGLExtensions extensions;
+ if (!extensions.init(kGLES_GrGLStandard, GetString, GetStringi, GetIntegerv, queryString,
+ display)) {
+ return nullptr;
+ }
+
+ sk_sp<GrGLInterface> interface(new GrGLInterface);
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+
+ // Autogenerated content follows
+ GET_PROC(ActiveTexture);
+ GET_PROC(AttachShader);
+ GET_PROC(BindAttribLocation);
+ GET_PROC(BindBuffer);
+ GET_PROC(BindTexture);
+ GET_PROC(BlendColor);
+ GET_PROC(BlendEquation);
+ GET_PROC(BlendFunc);
+ GET_PROC(BufferData);
+ GET_PROC(BufferSubData);
+ GET_PROC(Clear);
+ GET_PROC(ClearColor);
+ GET_PROC(ClearStencil);
+ GET_PROC(ColorMask);
+ GET_PROC(CompileShader);
+ GET_PROC(CompressedTexImage2D);
+ GET_PROC(CompressedTexSubImage2D);
+ GET_PROC(CopyTexSubImage2D);
+ GET_PROC(CreateProgram);
+ GET_PROC(CreateShader);
+ GET_PROC(CullFace);
+ GET_PROC(DeleteBuffers);
+ GET_PROC(DeleteProgram);
+ GET_PROC(DeleteShader);
+ GET_PROC(DeleteTextures);
+ GET_PROC(DepthMask);
+ GET_PROC(Disable);
+ GET_PROC(DisableVertexAttribArray);
+ GET_PROC(DrawArrays);
+ GET_PROC(DrawElements);
+ GET_PROC(Enable);
+ GET_PROC(EnableVertexAttribArray);
+ GET_PROC(Finish);
+ GET_PROC(Flush);
+ GET_PROC(FrontFace);
+ GET_PROC(GenBuffers);
+ GET_PROC(GenTextures);
+ GET_PROC(GetBufferParameteriv);
+ GET_PROC(GetError);
+ GET_PROC(GetIntegerv);
+ GET_PROC(GetProgramInfoLog);
+ GET_PROC(GetProgramiv);
+ GET_PROC(GetShaderInfoLog);
+ GET_PROC(GetShaderiv);
+ GET_PROC(GetString);
+ GET_PROC(GetUniformLocation);
+ GET_PROC(IsTexture);
+ GET_PROC(LineWidth);
+ GET_PROC(LinkProgram);
+ GET_PROC(PixelStorei);
+ GET_PROC(ReadPixels);
+ GET_PROC(Scissor);
+ GET_PROC(ShaderSource);
+ GET_PROC(StencilFunc);
+ GET_PROC(StencilFuncSeparate);
+ GET_PROC(StencilMask);
+ GET_PROC(StencilMaskSeparate);
+ GET_PROC(StencilOp);
+ GET_PROC(StencilOpSeparate);
+ GET_PROC(TexImage2D);
+ GET_PROC(TexParameterf);
+ GET_PROC(TexParameterfv);
+ GET_PROC(TexParameteri);
+ GET_PROC(TexParameteriv);
+ GET_PROC(TexSubImage2D);
+ GET_PROC(Uniform1f);
+ GET_PROC(Uniform1fv);
+ GET_PROC(Uniform1i);
+ GET_PROC(Uniform1iv);
+ GET_PROC(Uniform2f);
+ GET_PROC(Uniform2fv);
+ GET_PROC(Uniform2i);
+ GET_PROC(Uniform2iv);
+ GET_PROC(Uniform3f);
+ GET_PROC(Uniform3fv);
+ GET_PROC(Uniform3i);
+ GET_PROC(Uniform3iv);
+ GET_PROC(Uniform4f);
+ GET_PROC(Uniform4fv);
+ GET_PROC(Uniform4i);
+ GET_PROC(Uniform4iv);
+ GET_PROC(UniformMatrix2fv);
+ GET_PROC(UniformMatrix3fv);
+ GET_PROC(UniformMatrix4fv);
+ GET_PROC(UseProgram);
+ GET_PROC(VertexAttrib1f);
+ GET_PROC(VertexAttrib2fv);
+ GET_PROC(VertexAttrib3fv);
+ GET_PROC(VertexAttrib4fv);
+ GET_PROC(VertexAttribPointer);
+ GET_PROC(Viewport);
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(GetStringi);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BindVertexArray);
+ GET_PROC(DeleteVertexArrays);
+ GET_PROC(GenVertexArrays);
+ } else if (extensions.has("GL_OES_vertex_array_object")) {
+ GET_PROC_SUFFIX(BindVertexArray, OES);
+ GET_PROC_SUFFIX(DeleteVertexArrays, OES);
+ GET_PROC_SUFFIX(GenVertexArrays, OES);
+ }
+
+ if (glVer >= GR_GL_VER(3,0) && extensions.has("GL_EXT_blend_func_extended")) {
+ GET_PROC_SUFFIX(BindFragDataLocation, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0) && extensions.has("GL_EXT_blend_func_extended")) {
+ GET_PROC_SUFFIX(BindFragDataLocationIndexed, EXT);
+ }
+
+ if (extensions.has("GL_KHR_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, KHR);
+ } else if (extensions.has("GL_NV_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, NV);
+ }
+
+ if (extensions.has("GL_EXT_clear_texture")) {
+ GET_PROC_SUFFIX(ClearTexImage, EXT);
+ GET_PROC_SUFFIX(ClearTexSubImage, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ } else if (extensions.has("GL_EXT_draw_instanced")) {
+ GET_PROC_SUFFIX(DrawArraysInstanced, EXT);
+ GET_PROC_SUFFIX(DrawElementsInstanced, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(DrawBuffers);
+ GET_PROC(ReadBuffer);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(DrawArraysIndirect);
+ GET_PROC(DrawElementsIndirect);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(DrawRangeElements);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(GetMultisamplefv);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(GetTexLevelParameteriv);
+ }
+
+ if (extensions.has("GL_EXT_multi_draw_indirect")) {
+ GET_PROC_SUFFIX(MultiDrawArraysIndirect, EXT);
+ GET_PROC_SUFFIX(MultiDrawElementsIndirect, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(TexBuffer);
+ } else if (extensions.has("GL_OES_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBuffer, OES);
+ } else if (extensions.has("GL_EXT_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBuffer, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(TexBufferRange);
+ } else if (extensions.has("GL_OES_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBufferRange, OES);
+ } else if (extensions.has("GL_EXT_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBufferRange, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(TexStorage2D);
+ } else if (extensions.has("GL_EXT_texture_storage")) {
+ GET_PROC_SUFFIX(TexStorage2D, EXT);
+ }
+
+ if (extensions.has("GL_NV_texture_barrier")) {
+ GET_PROC_SUFFIX(TextureBarrier, NV);
+ }
+
+ if (extensions.has("GL_EXT_discard_framebuffer")) {
+ GET_PROC_SUFFIX(DiscardFramebuffer, EXT);
+ }
+
+ if (extensions.has("GL_QCOM_tiled_rendering")) {
+ GET_PROC_SUFFIX(EndTiling, QCOM);
+ GET_PROC_SUFFIX(StartTiling, QCOM);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribDivisor);
+ } else if (extensions.has("GL_EXT_instanced_arrays")) {
+ GET_PROC_SUFFIX(VertexAttribDivisor, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribIPointer);
+ }
+
+ GET_PROC(BindFramebuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(FramebufferTexture2D);
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(RenderbufferStorage);
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BlitFramebuffer);
+ } else if (extensions.has("GL_CHROMIUM_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(BlitFramebuffer, CHROMIUM);
+ } else if (extensions.has("GL_ANGLE_framebuffer_blit")) {
+ GET_PROC_SUFFIX(BlitFramebuffer, ANGLE);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(RenderbufferStorageMultisample);
+ } else if (extensions.has("GL_CHROMIUM_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(RenderbufferStorageMultisample, CHROMIUM);
+ } else if (extensions.has("GL_ANGLE_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(RenderbufferStorageMultisample, ANGLE);
+ }
+
+ if (extensions.has("GL_CHROMIUM_map_sub")) {
+ GET_PROC_SUFFIX(MapBufferSubData, CHROMIUM);
+ GET_PROC_SUFFIX(MapTexSubImage2D, CHROMIUM);
+ GET_PROC_SUFFIX(UnmapBufferSubData, CHROMIUM);
+ GET_PROC_SUFFIX(UnmapTexSubImage2D, CHROMIUM);
+ }
+
+ if (extensions.has("GL_EXT_multisampled_render_to_texture")) {
+ GET_PROC_SUFFIX(FramebufferTexture2DMultisample, EXT);
+ } else if (extensions.has("GL_IMG_multisampled_render_to_texture")) {
+ GET_PROC_SUFFIX(FramebufferTexture2DMultisample, IMG);
+ }
+
+ if (extensions.has("GL_EXT_multisampled_render_to_texture")) {
+ functions->fRenderbufferStorageMultisampleES2EXT =(GrGLRenderbufferStorageMultisampleFn*)get(ctx, "glRenderbufferStorageMultisampleEXT");
+ }
+
+ if (extensions.has("GL_IMG_multisampled_render_to_texture")) {
+ functions->fRenderbufferStorageMultisampleES2EXT =(GrGLRenderbufferStorageMultisampleFn*)get(ctx, "glRenderbufferStorageMultisampleIMG");
+ }
+
+ if (extensions.has("GL_APPLE_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(ResolveMultisampleFramebuffer, APPLE);
+ functions->fRenderbufferStorageMultisampleES2APPLE =(GrGLRenderbufferStorageMultisampleFn*)get(ctx, "glRenderbufferStorageMultisampleAPPLE");
+ }
+
+ if (extensions.has("GL_OES_mapbuffer")) {
+ GET_PROC_SUFFIX(MapBuffer, OES);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(UnmapBuffer);
+ } else if (extensions.has("GL_OES_mapbuffer")) {
+ GET_PROC_SUFFIX(UnmapBuffer, OES);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(FlushMappedBufferRange);
+ GET_PROC(MapBufferRange);
+ } else if (extensions.has("GL_EXT_map_buffer_range")) {
+ GET_PROC_SUFFIX(FlushMappedBufferRange, EXT);
+ GET_PROC_SUFFIX(MapBufferRange, EXT);
+ }
+
+ if (extensions.has("GL_EXT_debug_marker")) {
+ GET_PROC_SUFFIX(InsertEventMarker, EXT);
+ GET_PROC_SUFFIX(PopGroupMarker, EXT);
+ GET_PROC_SUFFIX(PushGroupMarker, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(GetProgramResourceLocation);
+ }
+
+ if (extensions.has("GL_CHROMIUM_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadIdentity, CHROMIUM);
+ GET_PROC_SUFFIX(MatrixLoadf, CHROMIUM);
+ } else if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadIdentity, EXT);
+ GET_PROC_SUFFIX(MatrixLoadf, EXT);
+ }
+
+ if (extensions.has("GL_CHROMIUM_path_rendering")) {
+ GET_PROC_SUFFIX(CoverFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(CoverStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(DeletePaths, CHROMIUM);
+ GET_PROC_SUFFIX(GenPaths, CHROMIUM);
+ GET_PROC_SUFFIX(IsPath, CHROMIUM);
+ GET_PROC_SUFFIX(PathCommands, CHROMIUM);
+ GET_PROC_SUFFIX(PathParameterf, CHROMIUM);
+ GET_PROC_SUFFIX(PathParameteri, CHROMIUM);
+ GET_PROC_SUFFIX(PathStencilFunc, CHROMIUM);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, CHROMIUM);
+ GET_PROC_SUFFIX(StencilFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, CHROMIUM);
+ } else if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(CoverFillPath, NV);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(CoverStrokePath, NV);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(DeletePaths, NV);
+ GET_PROC_SUFFIX(GenPaths, NV);
+ GET_PROC_SUFFIX(IsPath, NV);
+ GET_PROC_SUFFIX(PathCommands, NV);
+ GET_PROC_SUFFIX(PathParameterf, NV);
+ GET_PROC_SUFFIX(PathParameteri, NV);
+ GET_PROC_SUFFIX(PathStencilFunc, NV);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, NV);
+ GET_PROC_SUFFIX(StencilFillPath, NV);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilStrokePath, NV);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, NV);
+ }
+
+ if (extensions.has("GL_CHROMIUM_path_rendering")) {
+ GET_PROC_SUFFIX(BindFragmentInputLocation, CHROMIUM);
+ }
+
+ if (extensions.has("GL_CHROMIUM_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, CHROMIUM);
+ } else if (extensions.has("GL_NV_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, NV);
+ }
+
+ if (extensions.has("GL_KHR_debug")) {
+ GET_PROC_SUFFIX(DebugMessageCallback, KHR);
+ GET_PROC_SUFFIX(DebugMessageControl, KHR);
+ GET_PROC_SUFFIX(DebugMessageInsert, KHR);
+ GET_PROC_SUFFIX(GetDebugMessageLog, KHR);
+ GET_PROC_SUFFIX(ObjectLabel, KHR);
+ GET_PROC_SUFFIX(PopDebugGroup, KHR);
+ GET_PROC_SUFFIX(PushDebugGroup, KHR);
+ }
+
+ if (extensions.has("GL_CHROMIUM_bind_uniform_location")) {
+ GET_PROC_SUFFIX(BindUniformLocation, CHROMIUM);
+ }
+
+ if (extensions.has("GL_EXT_window_rectangles")) {
+ GET_PROC_SUFFIX(WindowRectangles, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ GET_PROC(FenceSync);
+ GET_PROC(IsSync);
+ GET_PROC(WaitSync);
+ } else if (extensions.has("GL_APPLE_sync")) {
+ GET_PROC_SUFFIX(ClientWaitSync, APPLE);
+ GET_PROC_SUFFIX(DeleteSync, APPLE);
+ GET_PROC_SUFFIX(FenceSync, APPLE);
+ GET_PROC_SUFFIX(IsSync, APPLE);
+ GET_PROC_SUFFIX(WaitSync, APPLE);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(GetInternalformativ);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(GetProgramBinary);
+ GET_PROC(ProgramBinary);
+ } else if (extensions.has("GL_OES_get_program_binary")) {
+ GET_PROC_SUFFIX(GetProgramBinary, OES);
+ GET_PROC_SUFFIX(ProgramBinary, OES);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(ProgramParameteri);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BindSampler);
+ GET_PROC(DeleteSamplers);
+ GET_PROC(GenSamplers);
+ GET_PROC(SamplerParameteri);
+ GET_PROC(SamplerParameteriv);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+#if GR_TEST_UTILS
+ GET_PROC(BeginQuery);
+ GET_PROC(DeleteQueries);
+ GET_PROC(EndQuery);
+ GET_PROC(GenQueries);
+ GET_PROC(GetQueryObjectuiv);
+ GET_PROC(GetQueryiv);
+#endif
+ } else if (extensions.has("GL_EXT_occlusion_query_boolean")) {
+#if GR_TEST_UTILS
+ GET_PROC_SUFFIX(BeginQuery, EXT);
+ GET_PROC_SUFFIX(DeleteQueries, EXT);
+ GET_PROC_SUFFIX(EndQuery, EXT);
+ GET_PROC_SUFFIX(GenQueries, EXT);
+ GET_PROC_SUFFIX(GetQueryObjectuiv, EXT);
+ GET_PROC_SUFFIX(GetQueryiv, EXT);
+#endif
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ }
+
+ GET_PROC(GetShaderPrecisionFormat);
+
+
+ // End autogenerated content
+ // TODO(kjlubick): Do we want a feature that removes the extension if it doesn't have
+ // the function? This is common on some low-end GPUs.
+
+ if (extensions.has("GL_KHR_debug")) {
+ // In general we have a policy against removing extension strings when the driver does
+ // not provide function pointers for an advertised extension. However, because there is a
+ // known device that advertises GL_KHR_debug but fails to provide the functions and this is
+ // a debugging- only extension we've made an exception. This also can happen when using
+ // APITRACE.
+ if (!interface->fFunctions.fDebugMessageControl) {
+ extensions.remove("GL_KHR_debug");
+ }
+ }
+ interface->fStandard = kGLES_GrGLStandard;
+ interface->fExtensions.swap(&extensions);
+
+ return interface;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLInterfaceAutogen.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLInterfaceAutogen.cpp
new file mode 100644
index 0000000000..c4c8c6a9f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleGLInterfaceAutogen.cpp
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * THIS FILE IS AUTOGENERATED
+ * Make edits to tools/gpu/gl/interface/templates.go or they will
+ * be overwritten.
+ */
+
+#include "include/gpu/gl/GrGLAssembleHelpers.h"
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#define GET_PROC(F) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+#define GET_PROC_SUFFIX(F, S) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F #S)
+#define GET_PROC_LOCAL(F) GrGL##F##Fn* F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+
+#define GET_EGL_PROC_SUFFIX(F, S) functions->fEGL##F = (GrEGL##F##Fn*)get(ctx, "egl" #F #S)
+
+#if SK_DISABLE_GL_INTERFACE
+sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get) {
+ return nullptr;
+}
+#else
+sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ GET_PROC_LOCAL(GetStringi);
+ GET_PROC_LOCAL(GetIntegerv);
+
+ // GetStringi may be nullptr depending on the GL version.
+ if (nullptr == GetString || nullptr == GetIntegerv) {
+ return nullptr;
+ }
+
+ const char* versionString = (const char*) GetString(GR_GL_VERSION);
+ GrGLVersion glVer = GrGLGetVersionFromString(versionString);
+
+ if (glVer < GR_GL_VER(2,0) || GR_GL_INVALID_VER == glVer) {
+ // This is our minimum for non-ES GL.
+ return nullptr;
+ }
+
+ GrEGLQueryStringFn* queryString;
+ GrEGLDisplay display;
+ GrGetEGLQueryAndDisplay(&queryString, &display, ctx, get);
+ GrGLExtensions extensions;
+ if (!extensions.init(kGL_GrGLStandard, GetString, GetStringi, GetIntegerv, queryString,
+ display)) {
+ return nullptr;
+ }
+
+ sk_sp<GrGLInterface> interface(new GrGLInterface());
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+
+ // Autogenerated content follows
+ GET_PROC(ActiveTexture);
+ GET_PROC(AttachShader);
+ GET_PROC(BindAttribLocation);
+ GET_PROC(BindBuffer);
+ GET_PROC(BindTexture);
+ GET_PROC(BlendColor);
+ GET_PROC(BlendEquation);
+ GET_PROC(BlendFunc);
+ GET_PROC(BufferData);
+ GET_PROC(BufferSubData);
+ GET_PROC(Clear);
+ GET_PROC(ClearColor);
+ GET_PROC(ClearStencil);
+ GET_PROC(ColorMask);
+ GET_PROC(CompileShader);
+ GET_PROC(CompressedTexImage2D);
+ GET_PROC(CompressedTexSubImage2D);
+ GET_PROC(CopyTexSubImage2D);
+ GET_PROC(CreateProgram);
+ GET_PROC(CreateShader);
+ GET_PROC(CullFace);
+ GET_PROC(DeleteBuffers);
+ GET_PROC(DeleteProgram);
+ GET_PROC(DeleteShader);
+ GET_PROC(DeleteTextures);
+ GET_PROC(DepthMask);
+ GET_PROC(Disable);
+ GET_PROC(DisableVertexAttribArray);
+ GET_PROC(DrawArrays);
+ GET_PROC(DrawElements);
+ GET_PROC(Enable);
+ GET_PROC(EnableVertexAttribArray);
+ GET_PROC(Finish);
+ GET_PROC(Flush);
+ GET_PROC(FrontFace);
+ GET_PROC(GenBuffers);
+ GET_PROC(GenTextures);
+ GET_PROC(GetBufferParameteriv);
+ GET_PROC(GetError);
+ GET_PROC(GetIntegerv);
+ GET_PROC(GetProgramInfoLog);
+ GET_PROC(GetProgramiv);
+ GET_PROC(GetShaderInfoLog);
+ GET_PROC(GetShaderiv);
+ GET_PROC(GetString);
+ GET_PROC(GetUniformLocation);
+ GET_PROC(IsTexture);
+ GET_PROC(LineWidth);
+ GET_PROC(LinkProgram);
+ GET_PROC(PixelStorei);
+ GET_PROC(ReadPixels);
+ GET_PROC(Scissor);
+ GET_PROC(ShaderSource);
+ GET_PROC(StencilFunc);
+ GET_PROC(StencilFuncSeparate);
+ GET_PROC(StencilMask);
+ GET_PROC(StencilMaskSeparate);
+ GET_PROC(StencilOp);
+ GET_PROC(StencilOpSeparate);
+ GET_PROC(TexImage2D);
+ GET_PROC(TexParameterf);
+ GET_PROC(TexParameterfv);
+ GET_PROC(TexParameteri);
+ GET_PROC(TexParameteriv);
+ GET_PROC(TexSubImage2D);
+ GET_PROC(Uniform1f);
+ GET_PROC(Uniform1fv);
+ GET_PROC(Uniform1i);
+ GET_PROC(Uniform1iv);
+ GET_PROC(Uniform2f);
+ GET_PROC(Uniform2fv);
+ GET_PROC(Uniform2i);
+ GET_PROC(Uniform2iv);
+ GET_PROC(Uniform3f);
+ GET_PROC(Uniform3fv);
+ GET_PROC(Uniform3i);
+ GET_PROC(Uniform3iv);
+ GET_PROC(Uniform4f);
+ GET_PROC(Uniform4fv);
+ GET_PROC(Uniform4i);
+ GET_PROC(Uniform4iv);
+ GET_PROC(UniformMatrix2fv);
+ GET_PROC(UniformMatrix3fv);
+ GET_PROC(UniformMatrix4fv);
+ GET_PROC(UseProgram);
+ GET_PROC(VertexAttrib1f);
+ GET_PROC(VertexAttrib2fv);
+ GET_PROC(VertexAttrib3fv);
+ GET_PROC(VertexAttrib4fv);
+ GET_PROC(VertexAttribPointer);
+ GET_PROC(Viewport);
+
+ GET_PROC(DrawBuffer);
+ GET_PROC(PolygonMode);
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(GetStringi);
+ }
+
+ GET_PROC(BindVertexArray);
+ GET_PROC(DeleteVertexArrays);
+ GET_PROC(GenVertexArrays);
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BindFragDataLocation);
+ }
+
+ if (glVer >= GR_GL_VER(3,3)) {
+ GET_PROC(BindFragDataLocationIndexed);
+ } else if (extensions.has("GL_ARB_blend_func_extended")) {
+ GET_PROC(BindFragDataLocationIndexed);
+ }
+
+ if (extensions.has("GL_KHR_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, KHR);
+ } else if (extensions.has("GL_NV_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, NV);
+ }
+
+ if (glVer >= GR_GL_VER(4,4)) {
+ GET_PROC(ClearTexImage);
+ GET_PROC(ClearTexSubImage);
+ } else if (extensions.has("GL_ARB_clear_texture")) {
+ GET_PROC(ClearTexImage);
+ GET_PROC(ClearTexSubImage);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ } else if (extensions.has("GL_ARB_draw_instanced")) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ } else if (extensions.has("GL_EXT_draw_instanced")) {
+ GET_PROC_SUFFIX(DrawArraysInstanced, EXT);
+ GET_PROC_SUFFIX(DrawElementsInstanced, EXT);
+ }
+
+ GET_PROC(DrawBuffers);
+ GET_PROC(ReadBuffer);
+
+ if (glVer >= GR_GL_VER(4,0)) {
+ GET_PROC(DrawArraysIndirect);
+ GET_PROC(DrawElementsIndirect);
+ } else if (extensions.has("GL_ARB_draw_indirect")) {
+ GET_PROC(DrawArraysIndirect);
+ GET_PROC(DrawElementsIndirect);
+ }
+
+ GET_PROC(DrawRangeElements);
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(GetMultisamplefv);
+ } else if (extensions.has("GL_ARB_texture_multisample")) {
+ GET_PROC(GetMultisamplefv);
+ }
+
+ GET_PROC(GetTexLevelParameteriv);
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(MultiDrawArraysIndirect);
+ GET_PROC(MultiDrawElementsIndirect);
+ } else if (extensions.has("GL_ARB_multi_draw_indirect")) {
+ GET_PROC(MultiDrawArraysIndirect);
+ GET_PROC(MultiDrawElementsIndirect);
+ }
+
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(TexBuffer);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(TexBufferRange);
+ }
+
+ if (glVer >= GR_GL_VER(4,2)) {
+ GET_PROC(TexStorage2D);
+ } else if (extensions.has("GL_ARB_texture_storage")) {
+ GET_PROC(TexStorage2D);
+ } else if (extensions.has("GL_EXT_texture_storage")) {
+ GET_PROC_SUFFIX(TexStorage2D, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(4,5)) {
+ GET_PROC(TextureBarrier);
+ } else if (extensions.has("GL_ARB_texture_barrier")) {
+ GET_PROC(TextureBarrier);
+ } else if (extensions.has("GL_NV_texture_barrier")) {
+ GET_PROC_SUFFIX(TextureBarrier, NV);
+ }
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(VertexAttribDivisor);
+ } else if (extensions.has("GL_ARB_instanced_arrays")) {
+ GET_PROC(VertexAttribDivisor);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribIPointer);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BindFramebuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(FramebufferTexture2D);
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(RenderbufferStorage);
+ } else if (extensions.has("GL_ARB_framebuffer_object")) {
+ GET_PROC(BindFramebuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(FramebufferTexture2D);
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(RenderbufferStorage);
+ } else if (extensions.has("GL_EXT_framebuffer_object")) {
+ GET_PROC_SUFFIX(BindFramebuffer, EXT);
+ GET_PROC_SUFFIX(BindRenderbuffer, EXT);
+ GET_PROC_SUFFIX(CheckFramebufferStatus, EXT);
+ GET_PROC_SUFFIX(DeleteFramebuffers, EXT);
+ GET_PROC_SUFFIX(DeleteRenderbuffers, EXT);
+ GET_PROC_SUFFIX(FramebufferRenderbuffer, EXT);
+ GET_PROC_SUFFIX(FramebufferTexture2D, EXT);
+ GET_PROC_SUFFIX(GenFramebuffers, EXT);
+ GET_PROC_SUFFIX(GenRenderbuffers, EXT);
+ GET_PROC_SUFFIX(GenerateMipmap, EXT);
+ GET_PROC_SUFFIX(GetFramebufferAttachmentParameteriv, EXT);
+ GET_PROC_SUFFIX(GetRenderbufferParameteriv, EXT);
+ GET_PROC_SUFFIX(RenderbufferStorage, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BlitFramebuffer);
+ } else if (extensions.has("GL_ARB_framebuffer_object")) {
+ GET_PROC(BlitFramebuffer);
+ } else if (extensions.has("GL_EXT_framebuffer_blit")) {
+ GET_PROC_SUFFIX(BlitFramebuffer, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(RenderbufferStorageMultisample);
+ } else if (extensions.has("GL_ARB_framebuffer_object")) {
+ GET_PROC(RenderbufferStorageMultisample);
+ } else if (extensions.has("GL_EXT_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(RenderbufferStorageMultisample, EXT);
+ }
+
+ GET_PROC(MapBuffer);
+
+ GET_PROC(UnmapBuffer);
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(FlushMappedBufferRange);
+ GET_PROC(MapBufferRange);
+ } else if (extensions.has("GL_ARB_map_buffer_range")) {
+ GET_PROC(FlushMappedBufferRange);
+ GET_PROC(MapBufferRange);
+ }
+
+ if (extensions.has("GL_EXT_debug_marker")) {
+ GET_PROC_SUFFIX(InsertEventMarker, EXT);
+ GET_PROC_SUFFIX(PopGroupMarker, EXT);
+ GET_PROC_SUFFIX(PushGroupMarker, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(GetProgramResourceLocation);
+ } else if (extensions.has("GL_ARB_program_interface_query")) {
+ GET_PROC(GetProgramResourceLocation);
+ }
+
+ if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadIdentity, EXT);
+ GET_PROC_SUFFIX(MatrixLoadf, EXT);
+ }
+
+ if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(CoverFillPath, NV);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(CoverStrokePath, NV);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(DeletePaths, NV);
+ GET_PROC_SUFFIX(GenPaths, NV);
+ GET_PROC_SUFFIX(IsPath, NV);
+ GET_PROC_SUFFIX(PathCommands, NV);
+ GET_PROC_SUFFIX(PathParameterf, NV);
+ GET_PROC_SUFFIX(PathParameteri, NV);
+ GET_PROC_SUFFIX(PathStencilFunc, NV);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, NV);
+ GET_PROC_SUFFIX(StencilFillPath, NV);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilStrokePath, NV);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, NV);
+ }
+
+ if (extensions.has("GL_NV_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, NV);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(DebugMessageCallback);
+ GET_PROC(DebugMessageControl);
+ GET_PROC(DebugMessageInsert);
+ GET_PROC(GetDebugMessageLog);
+ GET_PROC(ObjectLabel);
+ GET_PROC(PopDebugGroup);
+ GET_PROC(PushDebugGroup);
+ } else if (extensions.has("GL_KHR_debug")) {
+ GET_PROC(DebugMessageCallback);
+ GET_PROC(DebugMessageControl);
+ GET_PROC(DebugMessageInsert);
+ GET_PROC(GetDebugMessageLog);
+ GET_PROC(ObjectLabel);
+ GET_PROC(PopDebugGroup);
+ GET_PROC(PushDebugGroup);
+ }
+
+ if (extensions.has("GL_EXT_window_rectangles")) {
+ GET_PROC_SUFFIX(WindowRectangles, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ GET_PROC(FenceSync);
+ GET_PROC(IsSync);
+ GET_PROC(WaitSync);
+ } else if (extensions.has("GL_ARB_sync")) {
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ GET_PROC(FenceSync);
+ GET_PROC(IsSync);
+ GET_PROC(WaitSync);
+ }
+
+ if (glVer >= GR_GL_VER(4,2)) {
+ GET_PROC(GetInternalformativ);
+ } else if (extensions.has("GL_ARB_internalformat_query")) {
+ GET_PROC(GetInternalformativ);
+ }
+
+ if (glVer >= GR_GL_VER(4,1)) {
+ GET_PROC(GetProgramBinary);
+ GET_PROC(ProgramBinary);
+ }
+
+ if (glVer >= GR_GL_VER(4,1)) {
+ GET_PROC(ProgramParameteri);
+ }
+
+ if (glVer >= GR_GL_VER(3,2)) {
+ GET_PROC(BindSampler);
+ GET_PROC(DeleteSamplers);
+ GET_PROC(GenSamplers);
+ GET_PROC(SamplerParameteri);
+ GET_PROC(SamplerParameteriv);
+ } else if (extensions.has("GL_ARB_sampler_objects")) {
+ GET_PROC(BindSampler);
+ GET_PROC(DeleteSamplers);
+ GET_PROC(GenSamplers);
+ GET_PROC(SamplerParameteri);
+ GET_PROC(SamplerParameteriv);
+ }
+
+ GET_PROC(GetQueryObjectiv);
+
+#if GR_TEST_UTILS
+ GET_PROC(BeginQuery);
+ GET_PROC(DeleteQueries);
+ GET_PROC(EndQuery);
+ GET_PROC(GenQueries);
+ GET_PROC(GetQueryObjectuiv);
+ GET_PROC(GetQueryiv);
+#endif
+
+ if (glVer >= GR_GL_VER(3,3)) {
+ GET_PROC(GetQueryObjecti64v);
+ GET_PROC(GetQueryObjectui64v);
+ } else if (extensions.has("GL_ARB_timer_query")) {
+ GET_PROC(GetQueryObjecti64v);
+ GET_PROC(GetQueryObjectui64v);
+ } else if (extensions.has("GL_EXT_timer_query")) {
+ GET_PROC_SUFFIX(GetQueryObjecti64v, EXT);
+ GET_PROC_SUFFIX(GetQueryObjectui64v, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(3,3)) {
+ GET_PROC(QueryCounter);
+ } else if (extensions.has("GL_ARB_timer_query")) {
+ GET_PROC(QueryCounter);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(InvalidateBufferData);
+ GET_PROC(InvalidateBufferSubData);
+ GET_PROC(InvalidateTexImage);
+ GET_PROC(InvalidateTexSubImage);
+ } else if (extensions.has("GL_ARB_invalidate_subdata")) {
+ GET_PROC(InvalidateBufferData);
+ GET_PROC(InvalidateBufferSubData);
+ GET_PROC(InvalidateTexImage);
+ GET_PROC(InvalidateTexSubImage);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ } else if (extensions.has("GL_ARB_invalidate_subdata")) {
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ }
+
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(GetShaderPrecisionFormat);
+ } else if (extensions.has("GL_ARB_ES2_compatibility")) {
+ GET_PROC(GetShaderPrecisionFormat);
+ }
+
+
+ // End autogenerated content
+ interface->fStandard = kGL_GrGLStandard;
+ interface->fExtensions.swap(&extensions);
+
+ return interface;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleHelpers.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleHelpers.cpp
new file mode 100644
index 0000000000..5e5163f26a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleHelpers.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLAssembleHelpers.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+void GrGetEGLQueryAndDisplay(GrEGLQueryStringFn** queryString, GrEGLDisplay* display,
+ void* ctx, GrGLGetProc get) {
+ *queryString = (GrEGLQueryStringFn*)get(ctx, "eglQueryString");
+ *display = GR_EGL_NO_DISPLAY;
+ if (*queryString) {
+ GrEGLGetCurrentDisplayFn* getCurrentDisplay =
+ (GrEGLGetCurrentDisplayFn*)get(ctx, "eglGetCurrentDisplay");
+ if (getCurrentDisplay) {
+ *display = getCurrentDisplay();
+ } else {
+ *queryString = nullptr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp
new file mode 100644
index 0000000000..2a0f774f0e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/gl/GrGLAssembleHelpers.h"
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#define GET_PROC_LOCAL(F) GrGL##F##Fn* F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+
+sk_sp<const GrGLInterface> GrGLMakeAssembledInterface(void *ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ if (nullptr == GetString) {
+ return nullptr;
+ }
+
+ const char* verStr = reinterpret_cast<const char*>(GetString(GR_GL_VERSION));
+ if (nullptr == verStr) {
+ return nullptr;
+ }
+
+ GrGLStandard standard = GrGLGetStandardInUseFromString(verStr);
+ // standard can be unused (optimzed away) if SK_ASSUME_GL_ES is set
+ sk_ignore_unused_variable(standard);
+
+ if (GR_IS_GR_GL_ES(standard)) {
+ return GrGLMakeAssembledGLESInterface(ctx, get);
+ } else if (GR_IS_GR_GL(standard)) {
+ return GrGLMakeAssembledGLInterface(ctx, get);
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ return GrGLMakeAssembledWebGLInterface(ctx, get);
+ }
+ return nullptr;
+}
+
+const GrGLInterface* GrGLAssembleInterface(void *ctx, GrGLGetProc get) {
+ return GrGLMakeAssembledInterface(ctx, get).release();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleWebGLInterfaceAutogen.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleWebGLInterfaceAutogen.cpp
new file mode 100644
index 0000000000..9202de8c64
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleWebGLInterfaceAutogen.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * THIS FILE IS AUTOGENERATED
+ * Make edits to tools/gpu/gl/interface/templates.go or they will
+ * be overwritten.
+ */
+
+#include "include/gpu/gl/GrGLAssembleHelpers.h"
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#define GET_PROC(F) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+#define GET_PROC_SUFFIX(F, S) functions->f##F = (GrGL##F##Fn*)get(ctx, "gl" #F #S)
+#define GET_PROC_LOCAL(F) GrGL##F##Fn* F = (GrGL##F##Fn*)get(ctx, "gl" #F)
+
+#define GET_EGL_PROC_SUFFIX(F, S) functions->fEGL##F = (GrEGL##F##Fn*)get(ctx, "egl" #F #S)
+
+#if SK_DISABLE_WEBGL_INTERFACE
+sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get) {
+ return nullptr;
+}
+#else
+sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ if (nullptr == GetString) {
+ return nullptr;
+ }
+
+ const char* verStr = reinterpret_cast<const char*>(GetString(GR_GL_VERSION));
+ GrGLVersion glVer = GrGLGetVersionFromString(verStr);
+
+ if (glVer < GR_GL_VER(1,0)) {
+ return nullptr;
+ }
+
+ GET_PROC_LOCAL(GetIntegerv);
+ GET_PROC_LOCAL(GetStringi);
+ GrEGLQueryStringFn* queryString;
+ GrEGLDisplay display;
+ GrGetEGLQueryAndDisplay(&queryString, &display, ctx, get);
+ GrGLExtensions extensions;
+ if (!extensions.init(kWebGL_GrGLStandard, GetString, GetStringi, GetIntegerv, queryString,
+ display)) {
+ return nullptr;
+ }
+
+ sk_sp<GrGLInterface> interface(new GrGLInterface);
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+
+ // Autogenerated content follows
+ GET_PROC(ActiveTexture);
+ GET_PROC(AttachShader);
+ GET_PROC(BindAttribLocation);
+ GET_PROC(BindBuffer);
+ GET_PROC(BindTexture);
+ GET_PROC(BlendColor);
+ GET_PROC(BlendEquation);
+ GET_PROC(BlendFunc);
+ GET_PROC(BufferData);
+ GET_PROC(BufferSubData);
+ GET_PROC(Clear);
+ GET_PROC(ClearColor);
+ GET_PROC(ClearStencil);
+ GET_PROC(ColorMask);
+ GET_PROC(CompileShader);
+ GET_PROC(CompressedTexImage2D);
+ GET_PROC(CompressedTexSubImage2D);
+ GET_PROC(CopyTexSubImage2D);
+ GET_PROC(CreateProgram);
+ GET_PROC(CreateShader);
+ GET_PROC(CullFace);
+ GET_PROC(DeleteBuffers);
+ GET_PROC(DeleteProgram);
+ GET_PROC(DeleteShader);
+ GET_PROC(DeleteTextures);
+ GET_PROC(DepthMask);
+ GET_PROC(Disable);
+ GET_PROC(DisableVertexAttribArray);
+ GET_PROC(DrawArrays);
+ GET_PROC(DrawElements);
+ GET_PROC(Enable);
+ GET_PROC(EnableVertexAttribArray);
+ GET_PROC(Finish);
+ GET_PROC(Flush);
+ GET_PROC(FrontFace);
+ GET_PROC(GenBuffers);
+ GET_PROC(GenTextures);
+ GET_PROC(GetBufferParameteriv);
+ GET_PROC(GetError);
+ GET_PROC(GetIntegerv);
+ GET_PROC(GetProgramInfoLog);
+ GET_PROC(GetProgramiv);
+ GET_PROC(GetShaderInfoLog);
+ GET_PROC(GetShaderiv);
+ GET_PROC(GetString);
+ GET_PROC(GetUniformLocation);
+ GET_PROC(IsTexture);
+ GET_PROC(LineWidth);
+ GET_PROC(LinkProgram);
+ GET_PROC(PixelStorei);
+ GET_PROC(ReadPixels);
+ GET_PROC(Scissor);
+ GET_PROC(ShaderSource);
+ GET_PROC(StencilFunc);
+ GET_PROC(StencilFuncSeparate);
+ GET_PROC(StencilMask);
+ GET_PROC(StencilMaskSeparate);
+ GET_PROC(StencilOp);
+ GET_PROC(StencilOpSeparate);
+ GET_PROC(TexImage2D);
+ GET_PROC(TexParameterf);
+ GET_PROC(TexParameterfv);
+ GET_PROC(TexParameteri);
+ GET_PROC(TexParameteriv);
+ GET_PROC(TexSubImage2D);
+ GET_PROC(Uniform1f);
+ GET_PROC(Uniform1fv);
+ GET_PROC(Uniform1i);
+ GET_PROC(Uniform1iv);
+ GET_PROC(Uniform2f);
+ GET_PROC(Uniform2fv);
+ GET_PROC(Uniform2i);
+ GET_PROC(Uniform2iv);
+ GET_PROC(Uniform3f);
+ GET_PROC(Uniform3fv);
+ GET_PROC(Uniform3i);
+ GET_PROC(Uniform3iv);
+ GET_PROC(Uniform4f);
+ GET_PROC(Uniform4fv);
+ GET_PROC(Uniform4i);
+ GET_PROC(Uniform4iv);
+ GET_PROC(UniformMatrix2fv);
+ GET_PROC(UniformMatrix3fv);
+ GET_PROC(UniformMatrix4fv);
+ GET_PROC(UseProgram);
+ GET_PROC(VertexAttrib1f);
+ GET_PROC(VertexAttrib2fv);
+ GET_PROC(VertexAttrib3fv);
+ GET_PROC(VertexAttrib4fv);
+ GET_PROC(VertexAttribPointer);
+ GET_PROC(Viewport);
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(GetStringi);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(BindVertexArray);
+ GET_PROC(DeleteVertexArrays);
+ GET_PROC(GenVertexArrays);
+ } else if (extensions.has("GL_OES_vertex_array_object")) {
+ GET_PROC_SUFFIX(BindVertexArray, OES);
+ GET_PROC_SUFFIX(DeleteVertexArrays, OES);
+ GET_PROC_SUFFIX(GenVertexArrays, OES);
+ } else if (extensions.has("OES_vertex_array_object")) {
+ GET_PROC_SUFFIX(BindVertexArray, OES);
+ GET_PROC_SUFFIX(DeleteVertexArrays, OES);
+ GET_PROC_SUFFIX(GenVertexArrays, OES);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(DrawBuffers);
+ GET_PROC(ReadBuffer);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(DrawRangeElements);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(TexStorage2D);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(VertexAttribDivisor);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(VertexAttribIPointer);
+ }
+
+ GET_PROC(BindFramebuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(FramebufferTexture2D);
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(RenderbufferStorage);
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(RenderbufferStorageMultisample);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ GET_PROC(FenceSync);
+ GET_PROC(IsSync);
+ GET_PROC(WaitSync);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(BindSampler);
+ GET_PROC(DeleteSamplers);
+ GET_PROC(GenSamplers);
+ GET_PROC(SamplerParameteri);
+ GET_PROC(SamplerParameteriv);
+ }
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ }
+
+ GET_PROC(GetShaderPrecisionFormat);
+
+
+ // End autogenerated content
+
+ interface->fStandard = kWebGL_GrGLStandard;
+ interface->fExtensions.swap(&extensions);
+
+ return interface;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp
new file mode 100644
index 0000000000..516c10ea34
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/gl/GrGLBuffer.h"
+#include "src/gpu/gl/GrGLGpu.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
+
+#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
+#else
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
+#endif
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ if (gpu->glCaps().transferBufferType() == GrGLCaps::kNone_TransferBufferType &&
+ (GrGpuBufferType::kXferCpuToGpu == intendedType ||
+ GrGpuBufferType::kXferGpuToCpu == intendedType)) {
+ return nullptr;
+ }
+
+ sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data));
+ if (0 == buffer->bufferID()) {
+ return nullptr;
+ }
+ return buffer;
+}
+
+// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
+// objects are implemented as client-side-arrays on tile-deferred architectures.
+#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
+
+inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
+ GrAccessPattern accessPattern) {
+ auto drawUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
+ return DYNAMIC_DRAW_PARAM;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_DRAW;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_DRAW;
+ }
+ SK_ABORT("Unexpected access pattern");
+ };
+
+ auto readUsage = [](GrAccessPattern pattern) {
+ switch (pattern) {
+ case kDynamic_GrAccessPattern:
+ return GR_GL_DYNAMIC_READ;
+ case kStatic_GrAccessPattern:
+ return GR_GL_STATIC_READ;
+ case kStream_GrAccessPattern:
+ return GR_GL_STREAM_READ;
+ }
+ SK_ABORT("Unexpected access pattern");
+ };
+
+ auto usageType = [&drawUsage, &readUsage](GrGpuBufferType type, GrAccessPattern pattern) {
+ switch (type) {
+ case GrGpuBufferType::kVertex:
+ case GrGpuBufferType::kIndex:
+ case GrGpuBufferType::kXferCpuToGpu:
+ return drawUsage(pattern);
+ case GrGpuBufferType::kXferGpuToCpu:
+ return readUsage(pattern);
+ }
+ SK_ABORT("Unexpected gpu buffer type.");
+ };
+
+ return usageType(bufferType, accessPattern);
+}
+
+GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data)
+ : INHERITED(gpu, size, intendedType, accessPattern)
+ , fIntendedType(intendedType)
+ , fBufferID(0)
+ , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
+ , fGLSizeInBytes(0)
+ , fHasAttachedToTexture(false) {
+ GL_CALL(GenBuffers(1, &fBufferID));
+ if (fBufferID) {
+ GrGLenum target = gpu->bindBuffer(fIntendedType, this);
+ CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
+ // make sure driver can allocate memory for this buffer
+ GL_ALLOC_CALL(gpu->glInterface(), BufferData(target,
+ (GrGLsizeiptr) size,
+ data,
+ fUsage));
+ if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ } else {
+ fGLSizeInBytes = size;
+ }
+ }
+ VALIDATE();
+ this->registerWithCache(SkBudgeted::kYes);
+ if (!fBufferID) {
+ this->resourcePriv().removeScratchKey();
+ }
+}
+
+inline GrGLGpu* GrGLBuffer::glGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrGLGpu*>(this->getGpu());
+}
+
+inline const GrGLCaps& GrGLBuffer::glCaps() const {
+ return this->glGpu()->glCaps();
+}
+
+void GrGLBuffer::onRelease() {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ if (!this->wasDestroyed()) {
+ VALIDATE();
+ // make sure we've not been abandoned or already released
+ if (fBufferID) {
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ }
+ fMapPtr = nullptr;
+ VALIDATE();
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLBuffer::onAbandon() {
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ fMapPtr = nullptr;
+ VALIDATE();
+ INHERITED::onAbandon();
+}
+
+void GrGLBuffer::onMap() {
+ SkASSERT(fBufferID);
+ SkASSERT(!this->wasDestroyed());
+ VALIDATE();
+ SkASSERT(!this->isMapped());
+
+ // TODO: Make this a function parameter.
+ bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
+
+ // Handling dirty context is done in the bindBuffer call
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ return;
+ case GrGLCaps::kMapBuffer_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ if (!readOnly) {
+ // Let driver know it can discard the old data
+ if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+ }
+ }
+ GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ }
+ case GrGLCaps::kMapBufferRange_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+ }
+ GrGLbitfield access;
+ if (readOnly) {
+ access = GR_GL_MAP_READ_BIT;
+ } else {
+ access = GR_GL_MAP_WRITE_BIT;
+ if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
+ // TODO: Make this a function parameter.
+ access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
+ }
+ }
+ GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
+ break;
+ }
+ case GrGLCaps::kChromium_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != this->size()) {
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+ }
+ GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
+ readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ }
+ }
+ fGLSizeInBytes = this->size();
+ VALIDATE();
+}
+
+void GrGLBuffer::onUnmap() {
+ SkASSERT(fBufferID);
+ VALIDATE();
+ SkASSERT(this->isMapped());
+ if (0 == fBufferID) {
+ fMapPtr = nullptr;
+ return;
+ }
+ // bind buffer handles the dirty context
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ SkDEBUGFAIL("Shouldn't get here.");
+ return;
+ case GrGLCaps::kMapBuffer_MapBufferType: // fall through
+ case GrGLCaps::kMapBufferRange_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ GL_CALL(UnmapBuffer(target));
+ break;
+ }
+ case GrGLCaps::kChromium_MapBufferType:
+ this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
+ GL_CALL(UnmapBufferSubData(fMapPtr));
+ break;
+ }
+ fMapPtr = nullptr;
+}
+
+bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ SkASSERT(fBufferID);
+ if (this->wasDestroyed()) {
+ return false;
+ }
+
+ SkASSERT(!this->isMapped());
+ VALIDATE();
+ if (srcSizeInBytes > this->size()) {
+ return false;
+ }
+ SkASSERT(srcSizeInBytes <= this->size());
+ // bindbuffer handles dirty context
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+
+ if (this->glCaps().useBufferDataNullHint()) {
+ if (this->size() == srcSizeInBytes) {
+ GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
+ } else {
+ // Before we call glBufferSubData we give the driver a hint using
+ // glBufferData with nullptr. This makes the old buffer contents
+ // inaccessible to future draws. The GPU may still be processing
+ // draws that reference the old contents. With this hint it can
+ // assign a different allocation for the new contents to avoid
+ // flushing the gpu past draws consuming the old contents.
+ // TODO I think we actually want to try calling bufferData here
+ GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
+ GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
+ }
+ fGLSizeInBytes = this->size();
+ } else {
+ // Note that we're cheating on the size here. Currently no methods
+ // allow a partial update that preserves contents of non-updated
+ // portions of the buffer (map() does a glBufferData(..size, nullptr..))
+ GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage));
+ fGLSizeInBytes = srcSizeInBytes;
+ }
+ VALIDATE();
+ return true;
+}
+
+void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU32(this->bufferID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
+ buffer_id.c_str());
+}
+
+#ifdef SK_DEBUG
+
+void GrGLBuffer::validate() const {
+ SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
+ SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h
new file mode 100644
index 0000000000..d408e468cd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLBuffer_DEFINED
+#define GrGLBuffer_DEFINED
+
+#include "include/gpu/gl/GrGLTypes.h"
+#include "src/gpu/GrGpuBuffer.h"
+
+class GrGLGpu;
+class GrGLCaps;
+
+class GrGLBuffer : public GrGpuBuffer {
+public:
+ static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data = nullptr);
+
+ ~GrGLBuffer() override {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(0 == fBufferID);
+ }
+
+ GrGLuint bufferID() const { return fBufferID; }
+
+ /**
+ * Returns the actual size of the underlying GL buffer object. In certain cases we may make this
+ * smaller than the size reported by GrGpuBuffer.
+ */
+ size_t glSizeInBytes() const { return fGLSizeInBytes; }
+
+ void setHasAttachedToTexture() { fHasAttachedToTexture = true; }
+ bool hasAttachedToTexture() const { return fHasAttachedToTexture; }
+
+protected:
+ GrGLBuffer(GrGLGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data);
+
+ void onAbandon() override;
+ void onRelease() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ GrGLGpu* glGpu() const;
+ const GrGLCaps& glCaps() const;
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#endif
+
+ GrGpuBufferType fIntendedType;
+ GrGLuint fBufferID;
+ GrGLenum fUsage;
+ size_t fGLSizeInBytes;
+ bool fHasAttachedToTexture;
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp b/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp
new file mode 100644
index 0000000000..3142399c58
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp
@@ -0,0 +1,4246 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrContextOptions.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTSort.h"
+#include "src/gpu/GrRenderTargetProxyPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/gl/GrGLCaps.h"
+#include "src/gpu/gl/GrGLContext.h"
+#include "src/gpu/gl/GrGLRenderTarget.h"
+#include "src/gpu/gl/GrGLTexture.h"
+#include "src/utils/SkJSONWriter.h"
+
+GrGLCaps::GrGLCaps(const GrContextOptions& contextOptions,
+ const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* glInterface) : INHERITED(contextOptions) {
+ fStandard = ctxInfo.standard();
+
+ fStencilFormats.reset();
+ fMSFBOType = kNone_MSFBOType;
+ fInvalidateFBType = kNone_InvalidateFBType;
+ fMapBufferType = kNone_MapBufferType;
+ fTransferBufferType = kNone_TransferBufferType;
+ fMaxFragmentUniformVectors = 0;
+ fPackFlipYSupport = false;
+ fTextureUsageSupport = false;
+ fImagingSupport = false;
+ fVertexArrayObjectSupport = false;
+ fDebugSupport = false;
+ fES2CompatibilitySupport = false;
+ fDrawIndirectSupport = false;
+ fMultiDrawIndirectSupport = false;
+ fBaseInstanceSupport = false;
+ fIsCoreProfile = false;
+ fBindFragDataLocationSupport = false;
+ fRectangleTextureSupport = false;
+ fRGBA8888PixelsOpsAreSlow = false;
+ fPartialFBOReadIsSlow = false;
+ fMipMapLevelAndLodControlSupport = false;
+ fRGBAToBGRAReadbackConversionsAreSlow = false;
+ fUseBufferDataNullHint = false;
+ fDoManualMipmapping = false;
+ fClearToBoundaryValuesIsBroken = false;
+ fClearTextureSupport = false;
+ fDrawArraysBaseVertexIsBroken = false;
+ fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO = false;
+ fUseDrawInsteadOfAllRenderTargetWrites = false;
+ fRequiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines = false;
+ fDetachStencilFromMSAABuffersBeforeReadPixels = false;
+ fDontSetBaseOrMaxLevelForExternalTextures = false;
+ fNeverDisableColorWrites = false;
+ fProgramBinarySupport = false;
+ fProgramParameterSupport = false;
+ fSamplerObjectSupport = false;
+ fTiledRenderingSupport = false;
+ fFBFetchRequiresEnablePerSample = false;
+ fSRGBWriteControl = false;
+
+ fBlitFramebufferFlags = kNoSupport_BlitFramebufferFlag;
+ fMaxInstancesPerDrawWithoutCrashing = 0;
+
+ fShaderCaps.reset(new GrShaderCaps(contextOptions));
+
+ this->init(contextOptions, ctxInfo, glInterface);
+}
+
+void GrGLCaps::init(const GrContextOptions& contextOptions,
+ const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* gli) {
+ GrGLStandard standard = ctxInfo.standard();
+ // standard can be unused (optimzed away) if SK_ASSUME_GL_ES is set
+ sk_ignore_unused_variable(standard);
+ GrGLVersion version = ctxInfo.version();
+
+ if (GR_IS_GR_GL(standard)) {
+ GrGLint max;
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max);
+ fMaxFragmentUniformVectors = max / 4;
+ if (version >= GR_GL_VER(3, 2)) {
+ GrGLint profileMask;
+ GR_GL_GetIntegerv(gli, GR_GL_CONTEXT_PROFILE_MASK, &profileMask);
+ fIsCoreProfile = SkToBool(profileMask & GR_GL_CONTEXT_CORE_PROFILE_BIT);
+ }
+ } else if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ &fMaxFragmentUniformVectors);
+ }
+
+ if (fDriverBugWorkarounds.max_fragment_uniform_vectors_32) {
+ fMaxFragmentUniformVectors = SkMin32(fMaxFragmentUniformVectors, 32);
+ }
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_VERTEX_ATTRIBS, &fMaxVertexAttributes);
+
+ if (GR_IS_GR_GL(standard)) {
+ fWritePixelsRowBytesSupport = true;
+ fReadPixelsRowBytesSupport = true;
+ fPackFlipYSupport = false;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fWritePixelsRowBytesSupport =
+ version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_unpack_subimage");
+ fReadPixelsRowBytesSupport =
+ version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_NV_pack_subimage");
+ fPackFlipYSupport =
+ ctxInfo.hasExtension("GL_ANGLE_pack_reverse_row_order");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL 2.0 has these
+ fWritePixelsRowBytesSupport = version >= GR_GL_VER(2, 0);
+ fReadPixelsRowBytesSupport = version >= GR_GL_VER(2, 0);
+ }
+ if (fDriverBugWorkarounds.pack_parameters_workaround_with_pack_buffer) {
+ // In some cases drivers handle copying the last row incorrectly
+ // when using GL_PACK_ROW_LENGTH. Chromium handles this by iterating
+ // through every row and conditionally clobbering that value, but
+ // Skia already has a scratch buffer workaround when pack row length
+ // is not supported, so just use that.
+ fReadPixelsRowBytesSupport = false;
+ }
+
+ fTextureUsageSupport = GR_IS_GR_GL_ES(standard) &&
+ ctxInfo.hasExtension("GL_ANGLE_texture_usage");
+
+ if (GR_IS_GR_GL(standard)) {
+ fTextureBarrierSupport = version >= GR_GL_VER(4,5) ||
+ ctxInfo.hasExtension("GL_ARB_texture_barrier") ||
+ ctxInfo.hasExtension("GL_NV_texture_barrier");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fTextureBarrierSupport = ctxInfo.hasExtension("GL_NV_texture_barrier");
+ } // no WebGL support
+
+ if (GR_IS_GR_GL(standard)) {
+ fSampleLocationsSupport = version >= GR_GL_VER(3,2) ||
+ ctxInfo.hasExtension("GL_ARB_texture_multisample");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fSampleLocationsSupport = version >= GR_GL_VER(3,1);
+ } // no WebGL support
+
+ fImagingSupport = GR_IS_GR_GL(standard) &&
+ ctxInfo.hasExtension("GL_ARB_imaging");
+
+ if (((GR_IS_GR_GL(standard) && version >= GR_GL_VER(4,3)) ||
+ (GR_IS_GR_GL_ES(standard) && version >= GR_GL_VER(3,0)) ||
+ ctxInfo.hasExtension("GL_ARB_invalidate_subdata"))) {
+ fInvalidateFBType = kInvalidate_InvalidateFBType;
+ } else if (ctxInfo.hasExtension("GL_EXT_discard_framebuffer")) {
+ fInvalidateFBType = kDiscard_InvalidateFBType;
+ }
+
+ // For future reference on Desktop GL, GL_PRIMITIVE_RESTART_FIXED_INDEX appears in 4.3, and
+ // GL_PRIMITIVE_RESTART (where the client must call glPrimitiveRestartIndex) appears in 3.1.
+ if (GR_IS_GR_GL_ES(standard)) {
+ // Primitive restart can cause a 3x slowdown on Adreno. Enable conservatively.
+ // FIXME: Primitive restart would likely be a win on iOS if we had an enum value for it.
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ fUsePrimitiveRestart = version >= GR_GL_VER(3,0);
+ }
+ }
+
+ if (kARM_GrGLVendor == ctxInfo.vendor() ||
+ kImagination_GrGLVendor == ctxInfo.vendor() ||
+ kQualcomm_GrGLVendor == ctxInfo.vendor() ) {
+ fPreferFullscreenClears = true;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ fVertexArrayObjectSupport = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_vertex_array_object") ||
+ ctxInfo.hasExtension("GL_APPLE_vertex_array_object");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fVertexArrayObjectSupport = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_vertex_array_object");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ fVertexArrayObjectSupport = version >= GR_GL_VER(2, 0) ||
+ ctxInfo.hasExtension("GL_OES_vertex_array_object") ||
+ ctxInfo.hasExtension("OES_vertex_array_object");
+ }
+
+ if (GR_IS_GR_GL(standard) && version >= GR_GL_VER(4,3)) {
+ fDebugSupport = true;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fDebugSupport = ctxInfo.hasExtension("GL_KHR_debug");
+ } // no WebGL support
+
+ if (GR_IS_GR_GL(standard)) {
+ fES2CompatibilitySupport = ctxInfo.hasExtension("GL_ARB_ES2_compatibility");
+ }
+ else if (GR_IS_GR_GL_ES(standard)) {
+ fES2CompatibilitySupport = true;
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ fES2CompatibilitySupport = true;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ fMultisampleDisableSupport = true;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fMultisampleDisableSupport = ctxInfo.hasExtension("GL_EXT_multisample_compatibility");
+ } // no WebGL support
+
+ if (GR_IS_GR_GL(standard)) {
+ // 3.1 has draw_instanced but not instanced_arrays, for the time being we only care about
+ // instanced arrays, but we could make this more granular if we wanted
+ fInstanceAttribSupport =
+ version >= GR_GL_VER(3, 2) ||
+ (ctxInfo.hasExtension("GL_ARB_draw_instanced") &&
+ ctxInfo.hasExtension("GL_ARB_instanced_arrays"));
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fInstanceAttribSupport =
+ version >= GR_GL_VER(3, 0) ||
+ (ctxInfo.hasExtension("GL_EXT_draw_instanced") &&
+ ctxInfo.hasExtension("GL_EXT_instanced_arrays"));
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL 2.0 has DrawArraysInstanced and drawElementsInstanced
+ fInstanceAttribSupport = version >= GR_GL_VER(2, 0);
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 0)) {
+ fBindFragDataLocationSupport = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0) && ctxInfo.hasExtension("GL_EXT_blend_func_extended")) {
+ fBindFragDataLocationSupport = true;
+ }
+ } // no WebGL support
+
+ fBindUniformLocationSupport = ctxInfo.hasExtension("GL_CHROMIUM_bind_uniform_location");
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 1) || ctxInfo.hasExtension("GL_ARB_texture_rectangle") ||
+ ctxInfo.hasExtension("GL_ANGLE_texture_rectangle")) {
+ fRectangleTextureSupport = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (kChromium_GrGLDriver == ctxInfo.driver()) {
+ fRectangleTextureSupport = ctxInfo.hasExtension("GL_ARB_texture_rectangle");
+ } else {
+ // ANGLE will advertise the extension in ES2 contexts but actually using the texture in
+ // a shader requires ES3 shading language.
+ fRectangleTextureSupport = ctxInfo.hasExtension("GL_ANGLE_texture_rectangle") &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration;
+ }
+ } // no WebGL support
+
+ // GrCaps defaults fClampToBorderSupport to true, so disable when unsupported
+ if (GR_IS_GR_GL(standard)) {
+ // Clamp to border added in 1.3
+ if (version < GR_GL_VER(1, 3) && !ctxInfo.hasExtension("GL_ARB_texture_border_clamp")) {
+ fClampToBorderSupport = false;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // GLES didn't have clamp to border until 3.2, but provides several alternative extensions
+ if (version < GR_GL_VER(3, 2) && !ctxInfo.hasExtension("GL_EXT_texture_border_clamp") &&
+ !ctxInfo.hasExtension("GL_NV_texture_border_clamp") &&
+ !ctxInfo.hasExtension("GL_OES_texture_border_clamp")) {
+ fClampToBorderSupport = false;
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL appears to only have REPEAT, CLAMP_TO_EDGE and MIRRORED_REPEAT
+ fClampToBorderSupport = false;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3,3) || ctxInfo.hasExtension("GL_ARB_texture_swizzle")) {
+ this->fShaderCaps->fTextureSwizzleAppliedInShader = false;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3,0)) {
+ this->fShaderCaps->fTextureSwizzleAppliedInShader = false;
+ }
+ } // no WebGL support
+
+ if (GR_IS_GR_GL(standard)) {
+ fMipMapLevelAndLodControlSupport = true;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3,0)) {
+ fMipMapLevelAndLodControlSupport = true;
+ }
+ } // no WebGL support
+
+#ifdef SK_BUILD_FOR_WIN
+ // We're assuming that on Windows Chromium we're using ANGLE.
+ bool isANGLE = kANGLE_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver();
+ // Angle has slow read/write pixel paths for 32bit RGBA (but fast for BGRA).
+ fRGBA8888PixelsOpsAreSlow = isANGLE;
+ // On DX9 ANGLE reading a partial FBO is slow. TODO: Check whether this is still true and
+ // check DX11 ANGLE.
+ fPartialFBOReadIsSlow = isANGLE;
+#endif
+
+ bool isMESA = kMesa_GrGLDriver == ctxInfo.driver();
+ bool isMAC = false;
+#ifdef SK_BUILD_FOR_MAC
+ isMAC = true;
+#endif
+
+ // Both mesa and mac have reduced performance if reading back an RGBA framebuffer as BGRA or
+ // vis-versa.
+ fRGBAToBGRAReadbackConversionsAreSlow = isMESA || isMAC;
+
+ // Chrome's command buffer will zero out a buffer if null is passed to glBufferData to
+ // avoid letting an application see uninitialized memory.
+ if (GR_IS_GR_GL(standard) || GR_IS_GR_GL_ES(standard)) {
+ fUseBufferDataNullHint = kChromium_GrGLDriver != ctxInfo.driver();
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL spec explicitly disallows null values.
+ fUseBufferDataNullHint = false;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ fClearTextureSupport = (version >= GR_GL_VER(4,4) ||
+ ctxInfo.hasExtension("GL_ARB_clear_texture"));
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fClearTextureSupport = ctxInfo.hasExtension("GL_EXT_clear_texture");
+ } // no WebGL support
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+ fSupportsAHardwareBufferImages = true;
+#endif
+
+ if (GR_IS_GR_GL(standard)) {
+ fSRGBWriteControl = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_sRGB") ||
+ ctxInfo.hasExtension("GL_EXT_framebuffer_sRGB");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // ES through 3.2 requires EXT_srgb_write_control to support toggling
+ // sRGB writing for destinations.
+ fSRGBWriteControl = ctxInfo.hasExtension("GL_EXT_sRGB_write_control");
+ } // No WebGL support
+
+ /**************************************************************************
+ * GrShaderCaps fields
+ **************************************************************************/
+
+ // This must be called after fCoreProfile is set on the GrGLCaps
+ this->initGLSL(ctxInfo, gli);
+ GrShaderCaps* shaderCaps = fShaderCaps.get();
+
+ shaderCaps->fPathRenderingSupport = this->hasPathRenderingSupport(ctxInfo, gli);
+
+ // Enable supported shader-related caps
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fDualSourceBlendingSupport = (version >= GR_GL_VER(3, 3) ||
+ ctxInfo.hasExtension("GL_ARB_blend_func_extended")) &&
+ ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+
+ shaderCaps->fShaderDerivativeSupport = true;
+
+ // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
+ shaderCaps->fGeometryShaderSupport = version >= GR_GL_VER(3, 2) &&
+ ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
+ if (shaderCaps->fGeometryShaderSupport) {
+ if (ctxInfo.glslGeneration() >= k400_GrGLSLGeneration) {
+ shaderCaps->fGSInvocationsSupport = true;
+ } else if (ctxInfo.hasExtension("GL_ARB_gpu_shader5")) {
+ shaderCaps->fGSInvocationsSupport = true;
+ shaderCaps->fGSInvocationsExtensionString = "GL_ARB_gpu_shader5";
+ }
+ }
+
+ shaderCaps->fIntegerSupport = version >= GR_GL_VER(3, 0) &&
+ ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ shaderCaps->fDualSourceBlendingSupport = ctxInfo.hasExtension("GL_EXT_blend_func_extended");
+
+ shaderCaps->fShaderDerivativeSupport = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_standard_derivatives");
+
+ // Mali and early Adreno both have support for geometry shaders, but they appear to be
+ // implemented in software. In practice with ccpr, they are slower than the backup impl that
+ // only uses vertex shaders.
+ if (kARM_GrGLVendor != ctxInfo.vendor() &&
+ kAdreno3xx_GrGLRenderer != ctxInfo.renderer() &&
+ kAdreno4xx_other_GrGLRenderer != ctxInfo.renderer()) {
+
+ if (version >= GR_GL_VER(3,2)) {
+ shaderCaps->fGeometryShaderSupport = true;
+ } else if (ctxInfo.hasExtension("GL_EXT_geometry_shader")) {
+ shaderCaps->fGeometryShaderSupport = true;
+ shaderCaps->fGeometryShaderExtensionString = "GL_EXT_geometry_shader";
+ }
+ shaderCaps->fGSInvocationsSupport = shaderCaps->fGeometryShaderSupport;
+ }
+
+ shaderCaps->fIntegerSupport = version >= GR_GL_VER(3, 0) &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration; // We use this value for GLSL ES 3.0.
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ shaderCaps->fShaderDerivativeSupport = ctxInfo.hasExtension("GL_OES_standard_derivatives") ||
+ ctxInfo.hasExtension("OES_standard_derivatives");
+ }
+
+ // Protect ourselves against tracking huge amounts of texture state.
+ static const uint8_t kMaxSaneSamplers = 32;
+ GrGLint maxSamplers;
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_TEXTURE_IMAGE_UNITS, &maxSamplers);
+ shaderCaps->fMaxFragmentSamplers = SkTMin<GrGLint>(kMaxSaneSamplers, maxSamplers);
+
+ // SGX and Mali GPUs have tiled architectures that have trouble with frequently changing VBOs.
+ // We've measured a performance increase using non-VBO vertex data for dynamic content on these
+ // GPUs. Perhaps we should read the renderer string and limit this decision to specific GPU
+ // families rather than basing it on the vendor alone.
+ // The Chrome command buffer blocks the use of client side buffers (but may emulate VBOs with
+ // them). Client side buffers are not allowed in core profiles.
+ if (GR_IS_GR_GL(standard) || GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.driver() != kChromium_GrGLDriver && !fIsCoreProfile &&
+ (ctxInfo.vendor() == kARM_GrGLVendor || ctxInfo.vendor() == kImagination_GrGLVendor ||
+ ctxInfo.vendor() == kQualcomm_GrGLVendor)) {
+ fPreferClientSideDynamicBuffers = true;
+ }
+ } // No client side arrays in WebGL https://www.khronos.org/registry/webgl/specs/1.0/#6.2
+
+ if (!contextOptions.fAvoidStencilBuffers) {
+ // To reduce surface area, if we avoid stencil buffers, we also disable MSAA.
+ this->initFSAASupport(contextOptions, ctxInfo, gli);
+ this->initStencilSupport(ctxInfo);
+ }
+
+ // Setup blit framebuffer
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object") ||
+ ctxInfo.hasExtension("GL_EXT_framebuffer_blit")) {
+ fBlitFramebufferFlags = 0;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0)) {
+ fBlitFramebufferFlags = kNoFormatConversionForMSAASrc_BlitFramebufferFlag |
+ kNoMSAADst_BlitFramebufferFlag |
+ kRectsMustMatchForMSAASrc_BlitFramebufferFlag;
+ } else if (ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_multisample") ||
+ ctxInfo.hasExtension("GL_ANGLE_framebuffer_blit")) {
+ // The CHROMIUM extension uses the ANGLE version of glBlitFramebuffer and includes its
+ // limitations.
+ fBlitFramebufferFlags = kNoScalingOrMirroring_BlitFramebufferFlag |
+ kResolveMustBeFull_BlitFrambufferFlag |
+ kNoMSAADst_BlitFramebufferFlag |
+ kNoFormatConversion_BlitFramebufferFlag |
+ kRectsMustMatchForMSAASrc_BlitFramebufferFlag;
+ }
+ } // No WebGL 1.0 support for BlitFramebuffer
+
+ this->initBlendEqationSupport(ctxInfo);
+
+ if (GR_IS_GR_GL(standard)) {
+ fMapBufferFlags = kCanMap_MapFlag; // we require VBO support and the desktop VBO
+ // extension includes glMapBuffer.
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_map_buffer_range")) {
+ fMapBufferFlags |= kSubset_MapFlag;
+ fMapBufferType = kMapBufferRange_MapBufferType;
+ } else {
+ fMapBufferType = kMapBuffer_MapBufferType;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // Unextended GLES2 doesn't have any buffer mapping.
+ fMapBufferFlags = kNone_MapBufferType;
+ if (ctxInfo.hasExtension("GL_CHROMIUM_map_sub")) {
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
+ fMapBufferType = kChromium_MapBufferType;
+ } else if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_map_buffer_range")) {
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
+ fMapBufferType = kMapBufferRange_MapBufferType;
+ } else if (ctxInfo.hasExtension("GL_OES_mapbuffer")) {
+ fMapBufferFlags = kCanMap_MapFlag;
+ fMapBufferType = kMapBuffer_MapBufferType;
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // explicitly removed https://www.khronos.org/registry/webgl/specs/2.0/#5.14
+ fMapBufferFlags = kNone_MapBufferType;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(2, 1) || ctxInfo.hasExtension("GL_ARB_pixel_buffer_object") ||
+ ctxInfo.hasExtension("GL_EXT_pixel_buffer_object")) {
+ fTransferBufferSupport = true;
+ fTransferBufferType = kPBO_TransferBufferType;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0) ||
+ (ctxInfo.hasExtension("GL_NV_pixel_buffer_object") &&
+ // GL_EXT_unpack_subimage needed to support subtexture rectangles
+ ctxInfo.hasExtension("GL_EXT_unpack_subimage"))) {
+ fTransferBufferSupport = true;
+ fTransferBufferType = kPBO_TransferBufferType;
+// TODO: get transfer buffers working in Chrome
+// } else if (ctxInfo.hasExtension("GL_CHROMIUM_pixel_transfer_buffer_object")) {
+// fTransferBufferSupport = true;
+// fTransferBufferType = kChromium_TransferBufferType;
+ }
+ } // no WebGL support
+
+ // On many GPUs, map memory is very expensive, so we effectively disable it here by setting the
+ // threshold to the maximum unless the client gives us a hint that map memory is cheap.
+ if (fBufferMapThreshold < 0) {
+#if 0
+ // We think mapping on Chromium will be cheaper once we know ahead of time how much space
+ // we will use for all GrMeshDrawOps. Right now we might wind up mapping a large buffer and
+ // using a small subset.
+ fBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
+#else
+ fBufferMapThreshold = SK_MaxS32;
+#endif
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ fNPOTTextureTileSupport = true;
+ fMipMapSupport = true;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // Unextended ES2 supports NPOT textures with clamp_to_edge and non-mip filters only
+ // ES3 has no limitations.
+ fNPOTTextureTileSupport = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_OES_texture_npot");
+ // ES2 supports MIP mapping for POT textures but our caps don't allow for limited MIP
+ // support. The OES extension or ES 3.0 allow for MIPS on NPOT textures. So, apparently,
+ // does the undocumented GL_IMG_texture_npot extension. This extension does not seem to
+ // to alllow arbitrary wrap modes, however.
+ fMipMapSupport = fNPOTTextureTileSupport || ctxInfo.hasExtension("GL_IMG_texture_npot");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // Texture access works in the WebGL 2.0 API as in the OpenGL ES 3.0 API
+ fNPOTTextureTileSupport = version >= GR_GL_VER(2,0);
+ // All mipmapping and all wrapping modes are supported for non-power-of-
+ // two images [in WebGL 2.0].
+ fMipMapSupport = fNPOTTextureTileSupport;
+ }
+
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_TEXTURE_SIZE, &fMaxTextureSize);
+
+ if (fDriverBugWorkarounds.max_texture_size_limit_4096) {
+ fMaxTextureSize = SkTMin(fMaxTextureSize, 4096);
+ }
+
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_RENDERBUFFER_SIZE, &fMaxRenderTargetSize);
+ // Our render targets are always created with textures as the color
+ // attachment, hence this min:
+ fMaxRenderTargetSize = SkTMin(fMaxTextureSize, fMaxRenderTargetSize);
+ fMaxPreferredRenderTargetSize = fMaxRenderTargetSize;
+
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ // On Mali G71, RT's above 4k have been observed to incur a performance cost.
+ fMaxPreferredRenderTargetSize = SkTMin(4096, fMaxPreferredRenderTargetSize);
+ }
+
+ fGpuTracingSupport = ctxInfo.hasExtension("GL_EXT_debug_marker");
+
+ // Disable scratch texture reuse on Mali and Adreno devices
+ fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor();
+
+#if 0
+ fReuseScratchBuffers = kARM_GrGLVendor != ctxInfo.vendor() &&
+ kQualcomm_GrGLVendor != ctxInfo.vendor();
+#endif
+
+ if (ctxInfo.hasExtension("GL_EXT_window_rectangles")) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_WINDOW_RECTANGLES, &fMaxWindowRectangles);
+ }
+
+#ifdef SK_BUILD_FOR_WIN
+ // On ANGLE deferring flushes can lead to GPU starvation
+ fPreferVRAMUseOverFlushes = !isANGLE;
+#endif
+
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ // ARM seems to do better with larger quantities of fine triangles, as opposed to using the
+ // sample mask. (At least in our current round rect op.)
+ fPreferTrianglesOverSampleMask = true;
+ }
+
+ if (kChromium_GrGLDriver == ctxInfo.driver()) {
+ fMustClearUploadedBufferData = true;
+ }
+
+ // In a WASM build on Firefox, we see warnings like
+ // WebGL warning: texSubImage2D: This operation requires zeroing texture data. This is slow.
+ // WebGL warning: texSubImage2D: Texture has not been initialized prior to a partial upload,
+ // forcing the browser to clear it. This may be slow.
+ // Setting the initial clear seems to make those warnings go away and offers a substantial
+ // boost in performance in Firefox. Chrome sees a more modest increase.
+ if (GR_IS_GR_WEBGL(standard)) {
+ fShouldInitializeTextures = true;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ // ARB allows mixed size FBO attachments, EXT does not.
+ if (version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object")) {
+ fOversizedStencilSupport = true;
+ } else {
+ SkASSERT(ctxInfo.hasExtension("GL_EXT_framebuffer_object"));
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // ES 3.0 supports mixed size FBO attachments, 2.0 does not.
+ fOversizedStencilSupport = version >= GR_GL_VER(3, 0);
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL 1.0 has some constraints for FBO attachments:
+ // https://www.khronos.org/registry/webgl/specs/1.0/index.html#6.6
+ // These constraints "no longer apply in WebGL 2"
+ fOversizedStencilSupport = version >= GR_GL_VER(2, 0);
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ fDrawIndirectSupport = version >= GR_GL_VER(4,0) ||
+ ctxInfo.hasExtension("GL_ARB_draw_indirect");
+ fBaseInstanceSupport = version >= GR_GL_VER(4,2);
+ fMultiDrawIndirectSupport = version >= GR_GL_VER(4,3) ||
+ (fDrawIndirectSupport &&
+ !fBaseInstanceSupport && // The ARB extension has no base inst.
+ ctxInfo.hasExtension("GL_ARB_multi_draw_indirect"));
+ fDrawRangeElementsSupport = version >= GR_GL_VER(2,0);
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fDrawIndirectSupport = version >= GR_GL_VER(3,1);
+ fMultiDrawIndirectSupport = fDrawIndirectSupport &&
+ ctxInfo.hasExtension("GL_EXT_multi_draw_indirect");
+ fBaseInstanceSupport = fDrawIndirectSupport &&
+ ctxInfo.hasExtension("GL_EXT_base_instance");
+ fDrawRangeElementsSupport = version >= GR_GL_VER(3,0);
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL lacks indirect support, but drawRange was added in WebGL 2.0
+ fDrawRangeElementsSupport = version >= GR_GL_VER(2,0);
+ }
+
+ // TODO: support CHROMIUM_sync_point and maybe KHR_fence_sync
+ if (GR_IS_GR_GL(standard)) {
+ fFenceSyncSupport = (version >= GR_GL_VER(3, 2) || ctxInfo.hasExtension("GL_ARB_sync"));
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fFenceSyncSupport = (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_APPLE_sync"));
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // Only in WebGL 2.0
+ fFenceSyncSupport = version >= GR_GL_VER(2, 0);
+ }
+ // The same objects (GL sync objects) are used to implement GPU/CPU fence syncs and GPU/GPU
+ // semaphores.
+ fSemaphoreSupport = fFenceSyncSupport;
+
+ // Safely moving textures between contexts requires semaphores.
+ fCrossContextTextureSupport = fSemaphoreSupport;
+
+ // Half float vertex attributes requires GL3 or ES3
+ // It can also work with OES_VERTEX_HALF_FLOAT, but that requires a different enum.
+ if (GR_IS_GR_GL(standard)) {
+ fHalfFloatVertexAttributeSupport = (version >= GR_GL_VER(3, 0));
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fHalfFloatVertexAttributeSupport = (version >= GR_GL_VER(3, 0));
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // This appears to be supported in 2.0, looking at the spec.
+ fHalfFloatVertexAttributeSupport = (version >= GR_GL_VER(2, 0));
+ }
+
+ fDynamicStateArrayGeometryProcessorTextureSupport = true;
+
+ if (GR_IS_GR_GL(standard)) {
+ fProgramBinarySupport = (version >= GR_GL_VER(4, 1));
+ fProgramParameterSupport = (version >= GR_GL_VER(4, 1));
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fProgramBinarySupport =
+ (version >= GR_GL_VER(3, 0)) || ctxInfo.hasExtension("GL_OES_get_program_binary");
+ fProgramParameterSupport = (version >= GR_GL_VER(3, 0));
+ } // Explicitly not supported in WebGL 2.0
+ // https://www.khronos.org/registry/webgl/specs/2.0/#5.4
+ if (fProgramBinarySupport) {
+ GrGLint count;
+ GR_GL_GetIntegerv(gli, GR_GL_NUM_PROGRAM_BINARY_FORMATS, &count);
+ fProgramBinarySupport = count > 0;
+ }
+ if (GR_IS_GR_GL(standard)) {
+ fSamplerObjectSupport =
+ version >= GR_GL_VER(3,3) || ctxInfo.hasExtension("GL_ARB_sampler_objects");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ fSamplerObjectSupport = version >= GR_GL_VER(3,0);
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ fSamplerObjectSupport = version >= GR_GL_VER(2,0);
+ }
+
+ if (GR_IS_GR_GL_ES(standard)) {
+ fTiledRenderingSupport = ctxInfo.hasExtension("GL_QCOM_tiled_rendering");
+ }
+
+ FormatWorkarounds formatWorkarounds;
+
+ if (!contextOptions.fDisableDriverCorrectnessWorkarounds) {
+ this->applyDriverCorrectnessWorkarounds(ctxInfo, contextOptions, shaderCaps,
+ &formatWorkarounds);
+ }
+
+ // Requires fTextureSwizzleSupport, msaa support, ES compatibility have
+ // already been detected.
+ this->initFormatTable(ctxInfo, gli, formatWorkarounds);
+
+ this->applyOptionsOverrides(contextOptions);
+ shaderCaps->applyOptionsOverrides(contextOptions);
+
+ // For now these two are equivalent but we could have dst read in shader via some other method.
+ shaderCaps->fDstReadInShaderSupport = shaderCaps->fFBFetchSupport;
+}
+
+const char* get_glsl_version_decl_string(GrGLStandard standard, GrGLSLGeneration generation,
+ bool isCoreProfile) {
+ if (GR_IS_GR_GL(standard)) {
+ switch (generation) {
+ case k110_GrGLSLGeneration:
+ return "#version 110\n";
+ case k130_GrGLSLGeneration:
+ return "#version 130\n";
+ case k140_GrGLSLGeneration:
+ return "#version 140\n";
+ case k150_GrGLSLGeneration:
+ if (isCoreProfile) {
+ return "#version 150\n";
+ } else {
+ return "#version 150 compatibility\n";
+ }
+ case k330_GrGLSLGeneration:
+ if (isCoreProfile) {
+ return "#version 330\n";
+ } else {
+ return "#version 330 compatibility\n";
+ }
+ case k400_GrGLSLGeneration:
+ if (isCoreProfile) {
+ return "#version 400\n";
+ } else {
+ return "#version 400 compatibility\n";
+ }
+ case k420_GrGLSLGeneration:
+ if (isCoreProfile) {
+ return "#version 420\n";
+ } else {
+ return "#version 420 compatibility\n";
+ }
+ default:
+ break;
+ }
+ } else if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ switch (generation) {
+ case k110_GrGLSLGeneration:
+ // ES2s shader language is based on version 1.20 but is version
+ // 1.00 of the ES language.
+ return "#version 100\n";
+ case k330_GrGLSLGeneration:
+ return "#version 300 es\n";
+ case k310es_GrGLSLGeneration:
+ return "#version 310 es\n";
+ case k320es_GrGLSLGeneration:
+ return "#version 320 es\n";
+ default:
+ break;
+ }
+ }
+ return "<no version>";
+}
+
+bool is_float_fp32(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli, GrGLenum precision) {
+ if (GR_IS_GR_GL(ctxInfo.standard()) &&
+ ctxInfo.version() < GR_GL_VER(4,1) &&
+ !ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
+ // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
+ return true;
+ }
+ // glGetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Hopefully the
+ // geometry shaders don't have lower precision than vertex and fragment.
+ for (GrGLenum shader : {GR_GL_FRAGMENT_SHADER, GR_GL_VERTEX_SHADER}) {
+ GrGLint range[2];
+ GrGLint bits;
+ GR_GL_GetShaderPrecisionFormat(gli, shader, precision, range, &bits);
+ if (range[0] < 127 || range[1] < 127 || bits < 23) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrGLCaps::initGLSL(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+ GrGLStandard standard = ctxInfo.standard();
+ GrGLVersion version = ctxInfo.version();
+
+ /**************************************************************************
+ * Caps specific to GrShaderCaps
+ **************************************************************************/
+
+ GrShaderCaps* shaderCaps = fShaderCaps.get();
+ shaderCaps->fGLSLGeneration = ctxInfo.glslGeneration();
+ if (GR_IS_GR_GL_ES(standard)) {
+ // fFBFetchRequiresEnablePerSample is not a shader cap but is initialized below to keep it
+ // with related FB fetch logic.
+ if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
+ shaderCaps->fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
+ shaderCaps->fFBFetchSupport = true;
+ shaderCaps->fFBFetchColorName = "gl_LastFragData[0]";
+ shaderCaps->fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
+ fFBFetchRequiresEnablePerSample = false;
+ } else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
+ // Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know.
+ shaderCaps->fFBFetchNeedsCustomOutput = false;
+ shaderCaps->fFBFetchSupport = true;
+ shaderCaps->fFBFetchColorName = "gl_LastFragData[0]";
+ shaderCaps->fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
+ fFBFetchRequiresEnablePerSample = false;
+ } else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
+ // The arm extension also requires an additional flag which we will set onResetContext.
+ shaderCaps->fFBFetchNeedsCustomOutput = false;
+ shaderCaps->fFBFetchSupport = true;
+ shaderCaps->fFBFetchColorName = "gl_LastFragColorARM";
+ shaderCaps->fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
+ fFBFetchRequiresEnablePerSample = true;
+ }
+ shaderCaps->fUsesPrecisionModifiers = true;
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ shaderCaps->fUsesPrecisionModifiers = true;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fFlatInterpolationSupport = ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ shaderCaps->fFlatInterpolationSupport =
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration; // This is the value for GLSL ES 3.0.
+ } // not sure for WebGL
+
+ // Flat interpolation appears to be slow on Qualcomm GPUs (tested Adreno 405 and 530). ANGLE
+ // Avoid on ANGLE too, it inserts a geometry shader into the pipeline to implement flat interp.
+ shaderCaps->fPreferFlatInterpolation = shaderCaps->fFlatInterpolationSupport &&
+ kQualcomm_GrGLVendor != ctxInfo.vendor() &&
+ kANGLE_GrGLDriver != ctxInfo.driver();
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fNoPerspectiveInterpolationSupport =
+ ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.hasExtension("GL_NV_shader_noperspective_interpolation") &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration /* GLSL ES 3.0 */) {
+ shaderCaps->fNoPerspectiveInterpolationSupport = true;
+ shaderCaps->fNoPerspectiveInterpolationExtensionString =
+ "GL_NV_shader_noperspective_interpolation";
+ }
+ } // Not sure for WebGL
+
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fSampleVariablesSupport = ctxInfo.glslGeneration() >= k400_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.glslGeneration() >= k320es_GrGLSLGeneration) {
+ shaderCaps->fSampleVariablesSupport = true;
+ } else if (ctxInfo.hasExtension("GL_OES_sample_variables")) {
+ shaderCaps->fSampleVariablesSupport = true;
+ shaderCaps->fSampleVariablesExtensionString = "GL_OES_sample_variables";
+ }
+ }
+ shaderCaps->fSampleVariablesStencilSupport = shaderCaps->fSampleVariablesSupport;
+
+ if (kQualcomm_GrGLVendor == ctxInfo.vendor() || kATI_GrGLVendor == ctxInfo.vendor()) {
+ // FIXME: The sample mask round rect op draws nothing on several Adreno and Radeon bots.
+ // Other ops that use sample mask while rendering to stencil seem to work fine. Temporarily
+ // disable sample mask on color buffers while we investigate.
+ // http://skbug.com/8921
+ shaderCaps->fSampleVariablesSupport = false;
+ }
+
+ shaderCaps->fVersionDeclString = get_glsl_version_decl_string(standard,
+ shaderCaps->fGLSLGeneration,
+ fIsCoreProfile);
+
+ if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ if (k110_GrGLSLGeneration == shaderCaps->fGLSLGeneration) {
+ shaderCaps->fShaderDerivativeExtensionString = "GL_OES_standard_derivatives";
+ }
+ } // WebGL might have to check for OES_standard_derivatives
+
+ // Frag Coords Convention support is not part of ES
+ if (GR_IS_GR_GL(standard) &&
+ (ctxInfo.glslGeneration() >= k150_GrGLSLGeneration ||
+ ctxInfo.hasExtension("GL_ARB_fragment_coord_conventions"))) {
+ shaderCaps->fFragCoordConventionsExtensionString = "GL_ARB_fragment_coord_conventions";
+ }
+
+ if (GR_IS_GR_GL_ES(standard)) {
+ shaderCaps->fSecondaryOutputExtensionString = "GL_EXT_blend_func_extended";
+ }
+
+ if (ctxInfo.hasExtension("GL_OES_EGL_image_external")) {
+ if (ctxInfo.glslGeneration() == k110_GrGLSLGeneration) {
+ shaderCaps->fExternalTextureSupport = true;
+ shaderCaps->fExternalTextureExtensionString = "GL_OES_EGL_image_external";
+ } else if (ctxInfo.hasExtension("GL_OES_EGL_image_external_essl3") ||
+ ctxInfo.hasExtension("OES_EGL_image_external_essl3")) {
+ // At least one driver has been found that has this extension without the "GL_" prefix.
+ shaderCaps->fExternalTextureSupport = true;
+ shaderCaps->fExternalTextureExtensionString = "GL_OES_EGL_image_external_essl3";
+ }
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fVertexIDSupport = true;
+ } else if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ // Desktop GLSL 3.30 == ES GLSL 3.00.
+ shaderCaps->fVertexIDSupport = ctxInfo.glslGeneration() >= k330_GrGLSLGeneration;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fFPManipulationSupport = ctxInfo.glslGeneration() >= k400_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ shaderCaps->fFPManipulationSupport = ctxInfo.glslGeneration() >= k310es_GrGLSLGeneration;
+ }
+
+ shaderCaps->fFloatIs32Bits = is_float_fp32(ctxInfo, gli, GR_GL_HIGH_FLOAT);
+ shaderCaps->fHalfIs32Bits = is_float_fp32(ctxInfo, gli, GR_GL_MEDIUM_FLOAT);
+ shaderCaps->fHasLowFragmentPrecision = kMali4xx_GrGLRenderer == ctxInfo.renderer();
+
+ if (GR_IS_GR_GL(standard)) {
+ shaderCaps->fBuiltinFMASupport = ctxInfo.glslGeneration() >= k400_GrGLSLGeneration;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ shaderCaps->fBuiltinFMASupport = ctxInfo.glslGeneration() >= k320es_GrGLSLGeneration;
+ }
+}
+
+bool GrGLCaps::hasPathRenderingSupport(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+ bool hasChromiumPathRendering = ctxInfo.hasExtension("GL_CHROMIUM_path_rendering");
+
+ if (!(ctxInfo.hasExtension("GL_NV_path_rendering") || hasChromiumPathRendering)) {
+ return false;
+ }
+
+ if (GR_IS_GR_GL(ctxInfo.standard())) {
+ if (ctxInfo.version() < GR_GL_VER(4, 3) &&
+ !ctxInfo.hasExtension("GL_ARB_program_interface_query")) {
+ return false;
+ }
+ } else if (GR_IS_GR_GL_ES(ctxInfo.standard())) {
+ if (!hasChromiumPathRendering &&
+ ctxInfo.version() < GR_GL_VER(3, 1)) {
+ return false;
+ }
+ } else if (GR_IS_GR_WEBGL(ctxInfo.standard())) {
+ // No WebGL support
+ return false;
+ }
+ // We only support v1.3+ of GL_NV_path_rendering which allows us to
+ // set individual fragment inputs with ProgramPathFragmentInputGen. The API
+ // additions are detected by checking the existence of the function.
+ // We also use *Then* functions that not all drivers might have. Check
+ // them for consistency.
+ if (!gli->fFunctions.fStencilThenCoverFillPath ||
+ !gli->fFunctions.fStencilThenCoverStrokePath ||
+ !gli->fFunctions.fStencilThenCoverFillPathInstanced ||
+ !gli->fFunctions.fStencilThenCoverStrokePathInstanced ||
+ !gli->fFunctions.fProgramPathFragmentInputGen) {
+ return false;
+ }
+ return true;
+}
+
+void GrGLCaps::initFSAASupport(const GrContextOptions& contextOptions, const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* gli) {
+ // We need dual source blending and the ability to disable multisample in order to support mixed
+ // samples in every corner case.
+ if (fMultisampleDisableSupport && this->shaderCaps()->dualSourceBlendingSupport()) {
+ fMixedSamplesSupport = ctxInfo.hasExtension("GL_NV_framebuffer_mixed_samples") ||
+ ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_mixed_samples");
+ }
+
+ if (GR_IS_GR_GL(ctxInfo.standard())) {
+ if (ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object")) {
+ fMSFBOType = kStandard_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_EXT_framebuffer_multisample") &&
+ ctxInfo.hasExtension("GL_EXT_framebuffer_blit")) {
+ fMSFBOType = kStandard_MSFBOType;
+ }
+ } else if (GR_IS_GR_GL_ES(ctxInfo.standard())) {
+ // We prefer multisampled-render-to-texture extensions over ES3 MSAA because we've observed
+ // ES3 driver bugs on at least one device with a tiled GPU (N10).
+ if (ctxInfo.hasExtension("GL_EXT_multisampled_render_to_texture")) {
+ fMSFBOType = kES_EXT_MsToTexture_MSFBOType;
+ fMSAAResolvesAutomatically = true;
+ } else if (ctxInfo.hasExtension("GL_IMG_multisampled_render_to_texture")) {
+ fMSFBOType = kES_IMG_MsToTexture_MSFBOType;
+ fMSAAResolvesAutomatically = true;
+ } else if (ctxInfo.version() >= GR_GL_VER(3,0)) {
+ fMSFBOType = kStandard_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ fMSFBOType = kStandard_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_ANGLE_framebuffer_multisample")) {
+ fMSFBOType = kStandard_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_APPLE_framebuffer_multisample")) {
+ fMSFBOType = kES_Apple_MSFBOType;
+ }
+ } else if (GR_IS_GR_WEBGL(ctxInfo.standard())) {
+ // No support in WebGL 1, but there is for 2.0
+ if (ctxInfo.version() >= GR_GL_VER(2,0)) {
+ fMSFBOType = kStandard_MSFBOType;
+ } else {
+ fMSFBOType = kNone_MSFBOType;
+ }
+ }
+
+ // We disable MSAA for all Intel GPUs. Before Gen9, performance was very bad. Even with Gen9,
+ // we've seen driver crashes in the wild. We don't have data on Gen11 yet.
+ // chromium:527565, chromium:983926
+ if (kIntel_GrGLVendor == ctxInfo.vendor()) {
+ fMSFBOType = kNone_MSFBOType;
+ }
+}
+
+void GrGLCaps::initBlendEqationSupport(const GrGLContextInfo& ctxInfo) {
+ GrShaderCaps* shaderCaps = static_cast<GrShaderCaps*>(fShaderCaps.get());
+
+ bool layoutQualifierSupport = false;
+ if ((GR_IS_GR_GL(fStandard) && shaderCaps->generation() >= k140_GrGLSLGeneration) ||
+ (GR_IS_GR_GL_ES(fStandard) && shaderCaps->generation() >= k330_GrGLSLGeneration)) {
+ layoutQualifierSupport = true;
+ } else if (GR_IS_GR_WEBGL(fStandard)) {
+ return;
+ }
+
+ if (ctxInfo.hasExtension("GL_NV_blend_equation_advanced_coherent")) {
+ fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kAutomatic_AdvBlendEqInteraction;
+ } else if (ctxInfo.hasExtension("GL_KHR_blend_equation_advanced_coherent") &&
+ layoutQualifierSupport) {
+ fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kGeneralEnable_AdvBlendEqInteraction;
+ } else if (ctxInfo.hasExtension("GL_NV_blend_equation_advanced")) {
+ fBlendEquationSupport = kAdvanced_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kAutomatic_AdvBlendEqInteraction;
+ } else if (ctxInfo.hasExtension("GL_KHR_blend_equation_advanced") && layoutQualifierSupport) {
+ fBlendEquationSupport = kAdvanced_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kGeneralEnable_AdvBlendEqInteraction;
+ // TODO: Use kSpecificEnables_AdvBlendEqInteraction if "blend_support_all_equations" is
+ // slow on a particular platform.
+ }
+}
+
+namespace {
+const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
+}
+
+void GrGLCaps::initStencilSupport(const GrGLContextInfo& ctxInfo) {
+
+ // Build up list of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least.
+
+ // these consts are in order of most preferred to least preferred
+ // we don't bother with GL_STENCIL_INDEX1 or GL_DEPTH32F_STENCIL8
+
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = {GR_GL_STENCIL_INDEX8, 8, 8, false},
+ gS16 = {GR_GL_STENCIL_INDEX16, 16, 16, false},
+ gD24S8 = {GR_GL_DEPTH24_STENCIL8, 8, 32, true },
+ gS4 = {GR_GL_STENCIL_INDEX4, 4, 4, false},
+ // gS = {GR_GL_STENCIL_INDEX, kUnknownBitCount, kUnknownBitCount, false},
+ gDS = {GR_GL_DEPTH_STENCIL, kUnknownBitCount, kUnknownBitCount, true };
+
+ if (GR_IS_GR_GL(ctxInfo.standard())) {
+ bool supportsPackedDS =
+ ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_EXT_packed_depth_stencil") ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object");
+
+ // S1 thru S16 formats are in GL 3.0+, EXT_FBO, and ARB_FBO since we
+ // require FBO support we can expect these are legal formats and don't
+ // check. These also all support the unsized GL_STENCIL_INDEX.
+ fStencilFormats.push_back() = gS8;
+ fStencilFormats.push_back() = gS16;
+ if (supportsPackedDS) {
+ fStencilFormats.push_back() = gD24S8;
+ }
+ fStencilFormats.push_back() = gS4;
+ if (supportsPackedDS) {
+ fStencilFormats.push_back() = gDS;
+ }
+ } else if (GR_IS_GR_GL_ES(ctxInfo.standard())) {
+ // ES2 has STENCIL_INDEX8 without extensions but requires extensions
+ // for other formats.
+ // ES doesn't support using the unsized format.
+
+ fStencilFormats.push_back() = gS8;
+ //fStencilFormats.push_back() = gS16;
+ if (ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_OES_packed_depth_stencil")) {
+ fStencilFormats.push_back() = gD24S8;
+ }
+ if (ctxInfo.hasExtension("GL_OES_stencil4")) {
+ fStencilFormats.push_back() = gS4;
+ }
+ } else if (GR_IS_GR_WEBGL(ctxInfo.standard())) {
+ fStencilFormats.push_back() = gS8;
+ if (ctxInfo.version() >= GR_GL_VER(2,0)) {
+ fStencilFormats.push_back() = gD24S8;
+ }
+ }
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+void GrGLCaps::onDumpJSON(SkJSONWriter* writer) const {
+
+ // We are called by the base class, which has already called beginObject(). We choose to nest
+ // all of our caps information in a named sub-object.
+ writer->beginObject("GL caps");
+
+ writer->beginArray("Stencil Formats");
+
+ for (int i = 0; i < fStencilFormats.count(); ++i) {
+ writer->beginObject(nullptr, false);
+ writer->appendS32("stencil bits", fStencilFormats[i].fStencilBits);
+ writer->appendS32("total bits", fStencilFormats[i].fTotalBits);
+ writer->endObject();
+ }
+
+ writer->endArray();
+
+ static const char* kMSFBOExtStr[] = {
+ "None",
+ "Standard",
+ "Apple",
+ "IMG MS To Texture",
+ "EXT MS To Texture",
+ };
+ GR_STATIC_ASSERT(0 == kNone_MSFBOType);
+ GR_STATIC_ASSERT(1 == kStandard_MSFBOType);
+ GR_STATIC_ASSERT(2 == kES_Apple_MSFBOType);
+ GR_STATIC_ASSERT(3 == kES_IMG_MsToTexture_MSFBOType);
+ GR_STATIC_ASSERT(4 == kES_EXT_MsToTexture_MSFBOType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kMSFBOExtStr) == kLast_MSFBOType + 1);
+
+ static const char* kInvalidateFBTypeStr[] = {
+ "None",
+ "Discard",
+ "Invalidate",
+ };
+ GR_STATIC_ASSERT(0 == kNone_InvalidateFBType);
+ GR_STATIC_ASSERT(1 == kDiscard_InvalidateFBType);
+ GR_STATIC_ASSERT(2 == kInvalidate_InvalidateFBType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kInvalidateFBTypeStr) == kLast_InvalidateFBType + 1);
+
+ static const char* kMapBufferTypeStr[] = {
+ "None",
+ "MapBuffer",
+ "MapBufferRange",
+ "Chromium",
+ };
+ GR_STATIC_ASSERT(0 == kNone_MapBufferType);
+ GR_STATIC_ASSERT(1 == kMapBuffer_MapBufferType);
+ GR_STATIC_ASSERT(2 == kMapBufferRange_MapBufferType);
+ GR_STATIC_ASSERT(3 == kChromium_MapBufferType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kMapBufferTypeStr) == kLast_MapBufferType + 1);
+
+ writer->appendBool("Core Profile", fIsCoreProfile);
+ writer->appendString("MSAA Type", kMSFBOExtStr[fMSFBOType]);
+ writer->appendString("Invalidate FB Type", kInvalidateFBTypeStr[fInvalidateFBType]);
+ writer->appendString("Map Buffer Type", kMapBufferTypeStr[fMapBufferType]);
+ writer->appendS32("Max FS Uniform Vectors", fMaxFragmentUniformVectors);
+ writer->appendBool("Pack Flip Y support", fPackFlipYSupport);
+
+ writer->appendBool("Texture Usage support", fTextureUsageSupport);
+ writer->appendBool("GL_ARB_imaging support", fImagingSupport);
+ writer->appendBool("Vertex array object support", fVertexArrayObjectSupport);
+ writer->appendBool("Debug support", fDebugSupport);
+ writer->appendBool("Draw indirect support", fDrawIndirectSupport);
+ writer->appendBool("Multi draw indirect support", fMultiDrawIndirectSupport);
+ writer->appendBool("Base instance support", fBaseInstanceSupport);
+ writer->appendBool("RGBA 8888 pixel ops are slow", fRGBA8888PixelsOpsAreSlow);
+ writer->appendBool("Partial FBO read is slow", fPartialFBOReadIsSlow);
+ writer->appendBool("Bind uniform location support", fBindUniformLocationSupport);
+ writer->appendBool("Rectangle texture support", fRectangleTextureSupport);
+ writer->appendBool("BGRA to RGBA readback conversions are slow",
+ fRGBAToBGRAReadbackConversionsAreSlow);
+ writer->appendBool("Use buffer data null hint", fUseBufferDataNullHint);
+ writer->appendBool("Clear texture support", fClearTextureSupport);
+ writer->appendBool("Program binary support", fProgramBinarySupport);
+ writer->appendBool("Program parameters support", fProgramParameterSupport);
+ writer->appendBool("Sampler object support", fSamplerObjectSupport);
+ writer->appendBool("Tiled rendering support", fTiledRenderingSupport);
+ writer->appendBool("FB fetch requires enable per sample", fFBFetchRequiresEnablePerSample);
+ writer->appendBool("sRGB Write Control", fSRGBWriteControl);
+
+ writer->appendBool("Intermediate texture for partial updates of unorm textures ever bound to FBOs",
+ fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO);
+ writer->appendBool("Intermediate texture for all updates of textures bound to FBOs",
+ fUseDrawInsteadOfAllRenderTargetWrites);
+ writer->appendBool("Max instances per draw without crashing (or zero)",
+ fMaxInstancesPerDrawWithoutCrashing);
+
+ writer->beginArray("formats");
+
+ for (int i = 0; i < kGrGLFormatCount; ++i) {
+ writer->beginObject(nullptr, false);
+ writer->appendHexU32("flags", fFormatTable[i].fFlags);
+ writer->appendHexU32("f_type", (uint32_t)fFormatTable[i].fFormatType);
+ writer->appendHexU32("c_internal", fFormatTable[i].fCompressedInternalFormat);
+ writer->appendHexU32("i_for_teximage", fFormatTable[i].fInternalFormatForTexImageOrStorage);
+ writer->appendHexU32("i_for_renderbuffer", fFormatTable[i].fInternalFormatForRenderbuffer);
+ writer->appendHexU32("default_ex_format", fFormatTable[i].fDefaultExternalFormat);
+ writer->appendHexU32("default_ex_type", fFormatTable[i].fDefaultExternalType);
+ writer->appendHexU64("bpp", fFormatTable[i].fBytesPerPixel);
+
+ writer->beginArray("surface color types");
+ for (int j = 0; j < fFormatTable[i].fColorTypeInfoCount; ++j) {
+ const auto& ctInfo = fFormatTable[i].fColorTypeInfos[j];
+ writer->beginObject(nullptr, false);
+ writer->appendHexU32("colorType", (uint32_t)ctInfo.fColorType);
+ writer->appendHexU32("flags", ctInfo.fFlags);
+
+ writer->beginArray("data color types");
+ for (int k = 0; k < ctInfo.fExternalIOFormatCount; ++k) {
+ const auto& ioInfo = ctInfo.fExternalIOFormats[k];
+ writer->beginObject(nullptr, false);
+ writer->appendHexU32("colorType", (uint32_t)ioInfo.fColorType);
+ writer->appendHexU32("ex_type", ioInfo.fExternalType);
+ writer->appendHexU32("ex_teximage", ioInfo.fExternalTexImageFormat);
+ writer->appendHexU32("ex_read", ioInfo.fExternalReadFormat);
+ writer->endObject();
+ }
+ writer->endArray();
+ writer->endObject();
+ }
+ writer->endArray();
+ writer->endObject();
+ }
+
+ writer->endArray();
+ writer->endObject();
+}
+#else
+void GrGLCaps::onDumpJSON(SkJSONWriter* writer) const { }
+#endif
+
+void GrGLCaps::getTexImageExternalFormatAndType(GrGLFormat surfaceFormat, GrGLenum* externalFormat,
+ GrGLenum* externalType) const {
+ const auto& info = this->getFormatInfo(surfaceFormat);
+ *externalType = info.fDefaultExternalType;
+ *externalFormat = info.fDefaultExternalFormat;
+}
+
+void GrGLCaps::getTexSubImageZeroFormatTypeAndBpp(GrGLFormat format, GrGLenum* externalFormat,
+ GrGLenum* externalType, size_t* bpp) const {
+ const auto& info = this->getFormatInfo(format);
+ *externalType = info.fDefaultExternalType;
+ *externalFormat = info.fDefaultExternalFormat;
+ *bpp = info.fBytesPerPixel;
+}
+
+void GrGLCaps::getTexSubImageExternalFormatAndType(GrGLFormat surfaceFormat,
+ GrColorType surfaceColorType,
+ GrColorType memoryColorType,
+ GrGLenum* externalFormat,
+ GrGLenum* externalType) const {
+ this->getExternalFormat(surfaceFormat, surfaceColorType, memoryColorType,
+ kTexImage_ExternalFormatUsage, externalFormat, externalType);
+}
+
+void GrGLCaps::getReadPixelsFormat(GrGLFormat surfaceFormat, GrColorType surfaceColorType,
+ GrColorType memoryColorType, GrGLenum* externalFormat,
+ GrGLenum* externalType) const {
+ this->getExternalFormat(surfaceFormat, surfaceColorType, memoryColorType,
+ kReadPixels_ExternalFormatUsage, externalFormat, externalType);
+}
+
+void GrGLCaps::getExternalFormat(GrGLFormat surfaceFormat, GrColorType surfaceColorType,
+ GrColorType memoryColorType, ExternalFormatUsage usage,
+ GrGLenum* externalFormat, GrGLenum* externalType) const {
+ SkASSERT(externalFormat && externalType);
+ *externalFormat = this->getFormatInfo(surfaceFormat).externalFormat(
+ surfaceColorType, memoryColorType, usage);
+ *externalType = this->getFormatInfo(surfaceFormat).externalType(
+ surfaceColorType, memoryColorType);
+}
+
+void GrGLCaps::setStencilFormatIndexForFormat(GrGLFormat format, int index) {
+ SkASSERT(!this->hasStencilFormatBeenDeterminedForFormat(format));
+ this->getFormatInfo(format).fStencilFormatIndex =
+ index < 0 ? FormatInfo::kUnsupported_StencilFormatIndex : index;
+}
+
+void GrGLCaps::setColorTypeFormat(GrColorType colorType, GrGLFormat format) {
+ int idx = static_cast<int>(colorType);
+ SkASSERT(fColorTypeToFormatTable[idx] == GrGLFormat::kUnknown);
+ fColorTypeToFormatTable[idx] = format;
+}
+
+void GrGLCaps::initFormatTable(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli,
+ const FormatWorkarounds& formatWorkarounds) {
+ GrGLStandard standard = ctxInfo.standard();
+ // standard can be unused (optimized away) if SK_ASSUME_GL_ES is set
+ sk_ignore_unused_variable(standard);
+ GrGLVersion version = ctxInfo.version();
+
+ uint32_t nonMSAARenderFlags = FormatInfo::kFBOColorAttachment_Flag;
+ uint32_t msaaRenderFlags = nonMSAARenderFlags;
+ if (kNone_MSFBOType != fMSFBOType) {
+ msaaRenderFlags |= FormatInfo::kFBOColorAttachmentWithMSAA_Flag;
+ }
+
+ bool texStorageSupported = false;
+ if (GR_IS_GR_GL(standard)) {
+ // The EXT version can apply to either GL or GLES.
+ texStorageSupported = version >= GR_GL_VER(4,2) ||
+ ctxInfo.hasExtension("GL_ARB_texture_storage") ||
+ ctxInfo.hasExtension("GL_EXT_texture_storage");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ texStorageSupported = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_EXT_texture_storage");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ texStorageSupported = version >= GR_GL_VER(2,0);
+ }
+ if (fDriverBugWorkarounds.disable_texture_storage) {
+ texStorageSupported = false;
+ }
+#ifdef SK_BUILD_FOR_ANDROID
+ // crbug.com/945506. Telemetry reported a memory usage regression for Android Go Chrome/WebView
+ // when using glTexStorage2D. This appears to affect OOP-R (so not just over command buffer).
+ if (!formatWorkarounds.fDontDisableTexStorageOnAndroid) {
+ texStorageSupported = false;
+ }
+#endif
+
+ // ES 2.0 requires that the internal/external formats match so we can't use sized internal
+ // formats for glTexImage until ES 3.0. TODO: Support sized internal formats in WebGL2.
+ bool texImageSupportsSizedInternalFormat =
+ (GR_IS_GR_GL(standard) || (GR_IS_GR_GL_ES(standard) && version >= GR_GL_VER(3,0)));
+
+ // for now we don't support floating point MSAA on ES
+ uint32_t fpRenderFlags = (GR_IS_GR_GL(standard)) ? msaaRenderFlags : nonMSAARenderFlags;
+
+ for (int i = 0; i < kGrColorTypeCnt; ++i) {
+ fColorTypeToFormatTable[i] = GrGLFormat::kUnknown;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ GrGLenum halfFloatType = GR_GL_HALF_FLOAT;
+ if ((GR_IS_GR_GL_ES(standard) && version < GR_GL_VER(3, 0)) ||
+ (GR_IS_GR_WEBGL(standard) && version < GR_GL_VER(2, 0))) {
+ halfFloatType = GR_GL_HALF_FLOAT_OES;
+ }
+
+ // Format: RGBA8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGBA8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGBA8;
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 4;
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (GR_IS_GR_GL(standard)) {
+ info.fFlags |= msaaRenderFlags;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3,0) || ctxInfo.hasExtension("GL_OES_rgb8_rgba8") ||
+ ctxInfo.hasExtension("GL_ARM_rgba8")) {
+ info.fFlags |= msaaRenderFlags;
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ info.fFlags |= msaaRenderFlags;
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGBA8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGBA8 : GR_GL_RGBA;
+ }
+
+ bool supportsBGRAColorType = GR_IS_GR_GL(standard) &&
+ (version >= GR_GL_VER(1, 2) || ctxInfo.hasExtension("GL_EXT_bgra"));
+ info.fColorTypeInfoCount = supportsBGRAColorType ? 3 : 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA8, Surface: kRGBA_8888
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_8888;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRGBA_8888, GrGLFormat::kRGBA8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 1;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA8, Surface: kRGBA_8888, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+
+ // Format: RGBA8, Surface: kBGRA_8888
+ if (supportsBGRAColorType) {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kBGRA_8888;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kBGRA_8888, GrGLFormat::kRGBA8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA8, Surface: kBGRA_8888, Data: kBGRA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kBGRA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_BGRA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGBA8, Surface: kBGRA_8888, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+
+ // Format: RGBA8, Surface: kRGB_888x
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGB_888x;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RGB1();
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 1;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA8, Surface: kRGB_888x, Data: kRGBA_888x
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGB_888x;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+
+ // Format: R8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kR8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_R8;
+ info.fDefaultExternalFormat = GR_GL_RED;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 1;
+ bool r8Support = false;
+ if (GR_IS_GR_GL(standard)) {
+ r8Support = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_rg");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ r8Support = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_texture_rg");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ r8Support = ctxInfo.version() >= GR_GL_VER(2, 0);
+ }
+
+ if (r8Support) {
+ info.fFlags |= FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_R8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_R8 : GR_GL_RED;
+ }
+
+ if (r8Support) {
+ info.fColorTypeInfoCount = 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: R8, Surface: kAlpha_8
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ this->setColorTypeFormat(GrColorType::kAlpha_8, GrGLFormat::kR8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: R8, Surface: kAlpha_8, Data: kAlpha_8
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_8;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_RED;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: R8, Surface: kAlpha_8, Data: kAlpha_8xxx
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_8xxx;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+
+ // Format: R8, Surface: kGray_8
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kGray_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle("rrr1");
+ this->setColorTypeFormat(GrColorType::kGray_8, GrGLFormat::kR8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: R8, Surface: kGray_8, Data: kGray_8
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kGray_8;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_RED;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: R8, Surface: kGray_8, Data: kGray_8xxx
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kGray_8xxx;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: ALPHA8
+ {
+ bool alpha8IsValidForGL = GR_IS_GR_GL(standard) &&
+ (!fIsCoreProfile || version <= GR_GL_VER(3, 0));
+ bool alpha8IsValidForGLES = GR_IS_GR_GL_ES(standard);
+ bool alpha8IsValidForWebGL = GR_IS_GR_WEBGL(standard);
+
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kALPHA8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ // GL_EXT_texture_storage adds GL_ALPHA8 for texture storage. However, ES3 has glTexStorage
+ // but does not have GL_ALPHA8 (and requires a sized internal format for glTexStorage).
+ // WebGL never has GL_ALPHA8.
+ bool alpha8SizedEnumSupported =
+ alpha8IsValidForGL ||
+ (alpha8IsValidForGLES && ctxInfo.hasExtension("GL_EXT_texture_storage"));
+ bool alpha8TexStorageSupported = alpha8SizedEnumSupported && texStorageSupported;
+
+ bool alpha8IsRenderable = false;
+ if (alpha8IsValidForGL) {
+ // Core profile removes ALPHA8 support.
+ // OpenGL 3.0+ (and GL_ARB_framebuffer_object) supports ALPHA8 as renderable.
+ alpha8IsRenderable = ctxInfo.version() >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object");
+ }
+ info.fInternalFormatForRenderbuffer = GR_GL_ALPHA8;
+ info.fDefaultExternalFormat = GR_GL_ALPHA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 1;
+ if (alpha8IsValidForGL || alpha8IsValidForGLES || alpha8IsValidForWebGL) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ }
+ if (alpha8IsRenderable && alpha8IsValidForGL) {
+ // We will use ALPHA8 to create MSAA renderbuffers.
+ SkASSERT(alpha8SizedEnumSupported);
+ info.fFlags |= msaaRenderFlags;
+ }
+ if (alpha8TexStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_ALPHA8;
+ } else {
+ // Even if GL_ALPHA8 is added to ES by GL_EXT_texture_storage it doesn't become legal
+ // for glTexImage2D.
+ if (!GR_IS_GR_GL_ES(standard) && texImageSupportsSizedInternalFormat &&
+ alpha8SizedEnumSupported) {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_ALPHA8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_ALPHA;
+ }
+ }
+
+ if (alpha8IsValidForGL || alpha8IsValidForGLES || alpha8IsValidForWebGL) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: ALPHA8, Surface: kAlpha_8
+ {
+ if (alpha8IsValidForGL || alpha8IsValidForGLES || alpha8IsValidForWebGL) {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag |
+ ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::AAAA();
+ int idx = static_cast<int>(GrColorType::kAlpha_8);
+ if (fColorTypeToFormatTable[idx] == GrGLFormat::kUnknown) {
+ this->setColorTypeFormat(GrColorType::kAlpha_8, GrGLFormat::kALPHA8);
+ }
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: ALPHA8, Surface: kAlpha_8, Data: kAlpha_8
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_8;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_ALPHA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: ALPHA8, Surface: kAlpha_8, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+ }
+
+ // Format: LUMINANCE8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kLUMINANCE8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_LUMINANCE8;
+ info.fDefaultExternalFormat = GR_GL_LUMINANCE;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 1;
+ bool lum8Supported = false;
+ bool lum8SizedFormatSupported = false;
+ if (GR_IS_GR_GL(standard) && !fIsCoreProfile) {
+ lum8Supported = true;
+ lum8SizedFormatSupported = true;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ lum8Supported = true;
+ // Even on ES3 this extension is required to define LUMINANCE8.
+ lum8SizedFormatSupported = ctxInfo.hasExtension("GL_EXT_texture_storage");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ lum8Supported = true;
+ }
+ if (lum8Supported) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ }
+ if (texStorageSupported && lum8SizedFormatSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE8;
+ } else if (texImageSupportsSizedInternalFormat && lum8SizedFormatSupported) {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE;
+ }
+ // We are not enabling attaching to an FBO for LUMINANCE8 mostly because of confusion in the
+ // spec. For GLES it does not seem to ever support LUMINANCE8 being color-renderable. For GL
+ // versions less than 3.0 it is provided by GL_ARB_framebuffer_object. However, the original
+ // version of that extension did not add LUMINANCE8, but was added in a later revsion. So
+ // even the presence of that extension does not guarantee support. GL 3.0 and higher (core
+ // or compatibility) do not list LUMINANCE8 as color-renderable (which is strange since the
+ // GL_ARB_framebuffer_object extension was meant to bring 3.0 functionality to lower
+ // versions).
+
+ if (lum8Supported) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: LUMINANCE8, Surface: kGray_8
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kGray_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ int idx = static_cast<int>(GrColorType::kGray_8);
+ if (fColorTypeToFormatTable[idx] == GrGLFormat::kUnknown) {
+ this->setColorTypeFormat(GrColorType::kGray_8, GrGLFormat::kLUMINANCE8);
+ }
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: LUMINANCE8, Surface: kGray_8, Data: kGray_8
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kGray_8;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_LUMINANCE;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: LUMINANCE8, Surface: kGray_8, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: BGRA8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kBGRA8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+
+ // We currently only use the renderbuffer format when allocating msaa renderbuffers, so we
+ // are making decisions here based on that use case. The GL_EXT_texture_format_BGRA8888
+ // extension adds BGRA color renderbuffer support for ES 2.0, but this does not guarantee
+ // support for MSAA renderbuffers. Additionally, the renderable support was added in a later
+ // revision of the extension. So it is possible for older drivers to support the extension
+ // but only an early revision of it without renderable support. We have no way of
+ // distinguishing between the two. The GL_APPLE_texture_format_BGRA8888 does not add support
+ // for BGRA color renderbuffers at all. Ideally, for both cases we would use RGBA8 for our
+ // format for the MSAA buffer. In the GL_EXT_texture_format_BGRA8888 case we can still
+ // make the resolve BGRA and which will work for glBlitFramebuffer for resolving which just
+ // requires the src and dst be bindable to FBOs. However, we can't do this in the current
+ // world since some devices (e.g. chromium & angle) require the formats in glBlitFramebuffer
+ // to match. We don't have a way to really check this during resolve since we only actually
+ // have one GrPixelConfig and one GrBackendFormat that is shared by the GrGLRenderTarget.
+ // Once we break those up into different surface we can revisit doing this change.
+ if (ctxInfo.hasExtension("GL_APPLE_texture_format_BGRA8888")) {
+ info.fInternalFormatForRenderbuffer = GR_GL_RGBA8;
+ } else {
+ info.fInternalFormatForRenderbuffer = GR_GL_BGRA8;
+ }
+
+ info.fDefaultExternalFormat = GR_GL_BGRA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 4;
+
+ GrGLenum bgraTexImageFormat;
+ // If BGRA is supported as an internal format it must always be specified to glTex[Sub]Image
+ // as a base format. Which base format depends on which extension is used.
+ if (ctxInfo.hasExtension("GL_APPLE_texture_format_BGRA8888")) {
+ // GL_APPLE_texture_format_BGRA8888:
+ // ES 2.0: the extension makes BGRA an external format but not an internal format.
+ // ES 3.0: the extension explicitly states GL_BGRA8 is not a valid internal format
+ // for glTexImage (just for glTexStorage).
+ bgraTexImageFormat = GR_GL_RGBA;
+ } else {
+ // GL_EXT_texture_format_BGRA8888:
+ // This extension adds GL_BGRA as an unsized internal format. However, it is
+ // written against ES 2.0 and therefore doesn't define a GL_BGRA8 as ES 2.0 doesn't
+ // have sized internal formats. See later where we check for tex storage BGRA8
+ // support.
+ bgraTexImageFormat = GR_GL_BGRA;
+ }
+
+ // TexStorage requires using a sized internal format and BGRA8 is only supported if we have
+ // the GL_APPLE_texture_format_BGRA8888 extension or if we have GL_EXT_texture_storage and
+ // GL_EXT_texture_format_BGRA8888.
+ bool supportsBGRATexStorage = false;
+
+ if (GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.hasExtension("GL_EXT_texture_format_BGRA8888")) {
+ info.fFlags = FormatInfo::kTexturable_Flag | nonMSAARenderFlags;
+ // GL_EXT_texture storage has defined interactions with
+ // GL_EXT_texture_format_BGRA8888. However, ES3 supports glTexStorage but
+ // without GL_EXT_texture_storage it does not allow the BGRA8 sized internal format.
+ if (ctxInfo.hasExtension("GL_EXT_texture_storage") &&
+ !formatWorkarounds.fDisableBGRATextureStorageForIntelWindowsES) {
+ supportsBGRATexStorage = true;
+ }
+ } else if (ctxInfo.hasExtension("GL_APPLE_texture_format_BGRA8888")) {
+ // This APPLE extension introduces complexity on ES2. It leaves the internal format
+ // as RGBA, but allows BGRA as the external format. From testing, it appears that
+ // the driver remembers the external format when the texture is created (with
+ // TexImage). If you then try to upload data in the other swizzle (with
+ // TexSubImage), it fails. We could work around this, but it adds even more state
+ // tracking to code that is already too tricky. Instead, we opt not to support BGRA
+ // on ES2 with this extension. This also side-steps some ambiguous interactions with
+ // the texture storage extension.
+ if (version >= GR_GL_VER(3,0)) {
+ // The APPLE extension doesn't explicitly make this renderable, but
+ // internally it appears to use RGBA8, which we'll patch up below.
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ supportsBGRATexStorage = true;
+ }
+ }
+ }
+ if (texStorageSupported && supportsBGRATexStorage) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_BGRA8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage = bgraTexImageFormat;
+ }
+
+ if (SkToBool(info.fFlags &FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: BGRA8, Surface: kBGRA_8888
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kBGRA_8888;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kBGRA_8888, GrGLFormat::kBGRA8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: BGRA8, Surface: kBGRA_8888, Data: kBGRA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kBGRA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_BGRA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: BGRA8, Surface: kBGRA_8888, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGB565
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGB565);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGB565;
+ info.fDefaultExternalFormat = GR_GL_RGB;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_SHORT_5_6_5;
+ info.fBytesPerPixel = 2;
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(4, 2) || ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+ // 565 is not a sized internal format on desktop GL. So on desktop with
+ // 565 we always use an unsized internal format to let the system pick
+ // the best sized format to convert the 565 data to. Since TexStorage
+ // only allows sized internal formats we disallow it.
+ //
+ // TODO: As of 4.2, regular GL supports 565. This logic is due for an
+ // update.
+ if (texStorageSupported && GR_IS_GR_GL_ES(standard)) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGB565;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGB565 : GR_GL_RGB;
+ }
+
+ if (SkToBool(info.fFlags &FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGB565, Surface: kBGR_565
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kBGR_565;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kBGR_565, GrGLFormat::kRGB565);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGB565, Surface: kBGR_565, Data: kBGR_565
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kBGR_565;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_SHORT_5_6_5;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGB;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGB565, Surface: kBGR_565, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGBA16F
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGBA16F);
+ info.fFormatType = FormatType::kFloat;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGBA16F;
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ info.fDefaultExternalType = halfFloatType;
+ info.fBytesPerPixel = 8;
+ bool rgba16FTextureSupport = false;
+ bool rgba16FRenderTargetSupport = false;
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 0)) {
+ rgba16FTextureSupport = true;
+ rgba16FRenderTargetSupport = true;
+ } else if (ctxInfo.hasExtension("GL_ARB_texture_float")) {
+ rgba16FTextureSupport = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0)) {
+ rgba16FTextureSupport = true;
+ rgba16FRenderTargetSupport =
+ version >= GR_GL_VER(3, 2) ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float");
+ } else if (ctxInfo.hasExtension("GL_OES_texture_half_float") &&
+ ctxInfo.hasExtension("GL_OES_texture_half_float_linear")) {
+ rgba16FTextureSupport = true;
+ rgba16FRenderTargetSupport = ctxInfo.hasExtension("GL_EXT_color_buffer_half_float");
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ if (version >= GR_GL_VER(2, 0)) {
+ rgba16FTextureSupport = true;
+ rgba16FRenderTargetSupport =
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_float");
+ } else if ((ctxInfo.hasExtension("GL_OES_texture_half_float") ||
+ ctxInfo.hasExtension("OES_texture_half_float")) &&
+ (ctxInfo.hasExtension("GL_OES_texture_half_float_linear") ||
+ ctxInfo.hasExtension("OES_texture_half_float_linear"))) {
+ rgba16FTextureSupport = true;
+ // We don't check for EXT_color_buffer_float as it's only defined for WebGL 2.
+ rgba16FRenderTargetSupport =
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_half_float");
+ }
+ }
+
+ if (rgba16FTextureSupport) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (rgba16FRenderTargetSupport) {
+ info.fFlags |= fpRenderFlags;
+ }
+ }
+ if (texStorageSupported && !formatWorkarounds.fDisableRGBA16FTexStorageForCrBug1008003) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGBA16F;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGBA16F : GR_GL_RGBA;
+ }
+
+ if (rgba16FTextureSupport) {
+ uint32_t flags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+
+ info.fColorTypeInfoCount = 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA16F, Surface: kRGBA_F16
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_F16;
+ ctInfo.fFlags = flags;
+ this->setColorTypeFormat(GrColorType::kRGBA_F16, GrGLFormat::kRGBA16F);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA16F, Surface: kRGBA_F16, Data: kRGBA_F16
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F16;
+ ioFormat.fExternalType = halfFloatType;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGBA16F, Surface: kRGBA_F16, Data: kRGBA_F32
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F32;
+ ioFormat.fExternalType = GR_GL_FLOAT;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+
+ // Format: RGBA16F, Surface: kRGBA_F16_Clamped
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_F16_Clamped;
+ ctInfo.fFlags = flags;
+ this->setColorTypeFormat(GrColorType::kRGBA_F16_Clamped, GrGLFormat::kRGBA16F);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA16F, Surface: kRGBA_F16_Clamped, Data: kRGBA_F16_Clamped
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F16_Clamped;
+ ioFormat.fExternalType = halfFloatType;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGBA16F, Surface: kRGBA_F16_Clamped, Data: kRGBA_F32
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F32;
+ ioFormat.fExternalType = GR_GL_FLOAT;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: R16F
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kR16F);
+ info.fFormatType = FormatType::kFloat;
+ info.fInternalFormatForRenderbuffer = GR_GL_R16F;
+ info.fDefaultExternalFormat = GR_GL_RED;
+ info.fDefaultExternalType = halfFloatType;
+ info.fBytesPerPixel = 2;
+ bool r16FTextureSupport = false;
+ bool r16FRenderTargetSupport = false;
+
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_rg")) {
+ r16FTextureSupport = true;
+ r16FRenderTargetSupport = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // It seems possible that a combination of GL_EXT_texture_rg and
+ // GL_EXT_color_buffer_half_float might add this format to ES 2.0 but it is not entirely
+ // clear. The latter mentions interaction but that may only be for renderbuffers as
+ // neither adds the texture format explicitly.
+ // GL_OES_texture_format_half_float makes no reference to RED formats.
+ if (version >= GR_GL_VER(3, 0)) {
+ r16FTextureSupport = true;
+ r16FRenderTargetSupport = version >= GR_GL_VER(3, 2) ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float") ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float");
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ if (version >= GR_GL_VER(2, 0)) {
+ r16FTextureSupport = true;
+ r16FRenderTargetSupport = ctxInfo.hasExtension("GL_EXT_color_buffer_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_float");
+ }
+ }
+
+ if (r16FTextureSupport) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (r16FRenderTargetSupport) {
+ info.fFlags |= fpRenderFlags;
+ }
+ }
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_R16F;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_R16F : GR_GL_RED;
+ }
+
+ if (r16FTextureSupport) {
+ // Format: R16F, Surface: kAlpha_F16
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ this->setColorTypeFormat(GrColorType::kAlpha_F16, GrGLFormat::kR16F);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: R16F, Surface: kAlpha_F16, Data: kAlpha_F16
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_F16;
+ ioFormat.fExternalType = halfFloatType;
+ ioFormat.fExternalTexImageFormat = GR_GL_RED;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: R16F, Surface: kAlpha_F16, Data: kAlpha_F32xxx
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_F32xxx;
+ ioFormat.fExternalType = GR_GL_FLOAT;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: LUMINANCE16F
+ {
+ // NOTE: We disallow lum16f on ES devices if linear filtering modes are not
+ // supported. This is for simplicity, but a more granular approach is possible.
+ bool lum16FSupported = false;
+ bool lum16FSizedFormatSupported = false;
+ if (GR_IS_GR_GL(standard)) {
+ if (!fIsCoreProfile && ctxInfo.hasExtension("GL_ARB_texture_float")) {
+ lum16FSupported = true;
+ lum16FSizedFormatSupported = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.hasExtension("GL_OES_texture_half_float_linear") &&
+ ctxInfo.hasExtension("GL_OES_texture_half_float")) {
+ lum16FSupported = true;
+ // Even on ES3 this extension is required to define LUMINANCE16F.
+ lum16FSizedFormatSupported = ctxInfo.hasExtension("GL_EXT_texture_storage");
+ }
+ } // No WebGL support
+
+ if (formatWorkarounds.fDisableLuminance16F) {
+ lum16FSupported = false;
+ }
+
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kLUMINANCE16F);
+ info.fFormatType = FormatType::kFloat;
+ info.fInternalFormatForRenderbuffer = GR_GL_LUMINANCE16F;
+ info.fDefaultExternalFormat = GR_GL_LUMINANCE;
+ info.fDefaultExternalType = halfFloatType;
+ info.fBytesPerPixel = 2;
+
+ if (lum16FSupported) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+
+ if (texStorageSupported && lum16FSizedFormatSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE16F;
+ } else if (texImageSupportsSizedInternalFormat && lum16FSizedFormatSupported) {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE16F;
+ } else {
+ info.fInternalFormatForTexImageOrStorage = GR_GL_LUMINANCE;
+ }
+
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: LUMINANCE16F, Surface: kAlpha_F16
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+
+ int idx = static_cast<int>(GrColorType::kAlpha_F16);
+ if (fColorTypeToFormatTable[idx] == GrGLFormat::kUnknown) {
+ this->setColorTypeFormat(GrColorType::kAlpha_F16, GrGLFormat::kLUMINANCE16F);
+ }
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: LUMINANCE16F, Surface: kAlpha_F16, Data: kAlpha_F16
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_F16;
+ ioFormat.fExternalType = halfFloatType;
+ ioFormat.fExternalTexImageFormat = GR_GL_LUMINANCE;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: LUMINANCE16F, Surface: kAlpha_F16, Data: kRGBA_F32
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F32;
+ ioFormat.fExternalType = GR_GL_FLOAT;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGB8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGB8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGB8;
+ info.fDefaultExternalFormat = GR_GL_RGB;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 4; // We assume the GPU stores this format 4 byte aligned
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (GR_IS_GR_GL(standard)) {
+ // Even in OpenGL 4.6 GL_RGB8 is required to be color renderable but not required to be
+ // a supported render buffer format. Since we usually use render buffers for MSAA on
+ // non-ES GL we don't support MSAA for GL_RGB8. On 4.2+ we could check using
+ // glGetInternalFormativ(GL_RENDERBUFFER, GL_RGB8, GL_INTERNALFORMAT_SUPPORTED, ...) if
+ // this becomes an issue.
+ // This also would probably work in mixed-samples mode where there is no MSAA color
+ // buffer but we don't support that just for simplicity's sake.
+ info.fFlags |= nonMSAARenderFlags;
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // 3.0 and the extension support this as a render buffer format.
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_OES_rgb8_rgba8")) {
+ info.fFlags |= msaaRenderFlags;
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL seems to support RBG8
+ info.fFlags |= msaaRenderFlags;
+ }
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGB8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGB8 : GR_GL_RGB;
+ }
+ if (formatWorkarounds.fDisableRGB8ForMali400) {
+ info.fFlags = 0;
+ }
+
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGB8, Surface: kRGB_888x
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGB_888x;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRGB_888x, GrGLFormat::kRGB8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGB8, Surface: kRGB_888x, Data: kRGB_888x
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGB_888x;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ // This is technically the wrong format to use for this color type since the color
+ // type is 4 bytes but the format is 3. However, we don't currently upload data of
+ // this type so the format is only used when creating an empty texture. If we want
+ // to support uploading data we should add in RGB_888 GrColorType. Additionally, on
+ // the FormatInfo we should have a default format to use when we want to create an
+ // empty texture.
+ ioFormat.fExternalTexImageFormat = GR_GL_RGB;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGB8, Surface: kRGB_888x, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+
+ // Format: RG8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRG8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RG8;
+ info.fDefaultExternalFormat = GR_GL_RG;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 2;
+ bool rg8Support = false;
+ if (GR_IS_GR_GL(standard)) {
+ rg8Support = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_rg");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ rg8Support = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_texture_rg");
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ rg8Support = version >= GR_GL_VER(2, 0);
+ }
+ if (rg8Support) {
+ info.fFlags |= FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RG8;
+ }
+ }
+ if (!(info.fFlags & FormatInfo::kUseTexStorage_Flag)) {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RG8 : GR_GL_RG;
+ }
+ if (rg8Support) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RG8, Surface: kRG_88
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_88;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRG_88, GrGLFormat::kRG8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RG8, Surface: kRG_88, Data: kRG_88
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRG_88;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = GR_GL_RG;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RG8, Surface: kRG_88, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGB10_A2
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGB10_A2);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGB10_A2;
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_INT_2_10_10_10_REV;
+ info.fBytesPerPixel = 4;
+ if (GR_IS_GR_GL(standard) ||
+ (GR_IS_GR_GL_ES(standard) && version >= GR_GL_VER(3, 0))) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ } else if (GR_IS_GR_GL_ES(standard) &&
+ ctxInfo.hasExtension("GL_EXT_texture_type_2_10_10_10_REV")) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ } // No WebGL support
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGB10_A2;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGB10_A2 : GR_GL_RGBA;
+ }
+
+ if (SkToBool(info.fFlags &FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGB10_A2, Surface: kRGBA_1010102
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_1010102;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRGBA_1010102, GrGLFormat::kRGB10_A2);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGB10_A2, Surface: kRGBA_1010102, Data: kRGBA_1010102
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_1010102;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_INT_2_10_10_10_REV;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGB10_A2, Surface: kRGBA_1010102, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGBA4
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGBA4);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_RGBA4;
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
+ info.fBytesPerPixel = 2;
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(4, 2)) {
+ info.fFlags |= msaaRenderFlags;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ info.fFlags |= msaaRenderFlags;
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ info.fFlags |= msaaRenderFlags;
+ }
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGBA4;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGBA4 : GR_GL_RGBA;
+ }
+
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA4, Surface: kABGR_4444
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kABGR_4444;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kABGR_4444, GrGLFormat::kRGBA4);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: RGBA4, Surface: kABGR_4444, Data: kABGR_4444
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kABGR_4444;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: RGBA4, Surface: kABGR_4444, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+
+ // Format: SRGB8_ALPHA8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kSRGB8_ALPHA8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_SRGB8_ALPHA8;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_BYTE;
+ info.fBytesPerPixel = 4;
+
+ // We may modify the default external format below.
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ bool srgb8Alpha8TexStorageSupported = texStorageSupported;
+ bool srgb8Alpha8TextureSupport = false;
+ bool srgb8Alpha8RenderTargetSupport = false;
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 0)) {
+ srgb8Alpha8TextureSupport = true;
+ srgb8Alpha8RenderTargetSupport = true;
+ } else if (ctxInfo.hasExtension("GL_EXT_texture_sRGB")) {
+ srgb8Alpha8TextureSupport = true;
+ if (ctxInfo.hasExtension("GL_ARB_framebuffer_sRGB") ||
+ ctxInfo.hasExtension("GL_EXT_framebuffer_sRGB")) {
+ srgb8Alpha8RenderTargetSupport = true;
+ }
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_sRGB")) {
+ srgb8Alpha8TextureSupport = true;
+ srgb8Alpha8RenderTargetSupport = true;
+ }
+ if (version < GR_GL_VER(3, 0)) {
+ // ES 2.0 requires that the external format matches the internal format.
+ info.fDefaultExternalFormat = GR_GL_SRGB_ALPHA;
+ // There is no defined interaction between GL_EXT_sRGB and GL_EXT_texture_storage.
+ srgb8Alpha8TexStorageSupported = false;
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // sRGB extension should be on most WebGL 1.0 contexts, although sometimes under 2
+ // names.
+ if (version >= GR_GL_VER(2, 0) || ctxInfo.hasExtension("GL_EXT_sRGB") ||
+ ctxInfo.hasExtension("EXT_sRGB")) {
+ srgb8Alpha8TextureSupport = true;
+ srgb8Alpha8RenderTargetSupport = true;
+ }
+ if (version < GR_GL_VER(2, 0)) {
+ // WebGL 1.0 requires that the external format matches the internal format.
+ info.fDefaultExternalFormat = GR_GL_SRGB_ALPHA;
+ // There is no extension to WebGL 1 that adds glTexStorage.
+ SkASSERT(!srgb8Alpha8TexStorageSupported);
+ }
+ }
+
+ if (srgb8Alpha8TextureSupport) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ if (srgb8Alpha8RenderTargetSupport) {
+ info.fFlags |= formatWorkarounds.fDisableSRGBRenderWithMSAAForMacAMD
+ ? nonMSAARenderFlags
+ : msaaRenderFlags;
+ }
+ }
+ if (srgb8Alpha8TexStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_SRGB8_ALPHA8;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_SRGB8_ALPHA8 : GR_GL_SRGB_ALPHA;
+ }
+
+ if (srgb8Alpha8TextureSupport) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: SRGB8_ALPHA8, Surface: kRGBA_8888_SRGB
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_8888_SRGB;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRGBA_8888_SRGB, GrGLFormat::kSRGB8_ALPHA8);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 1;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+
+ // Format: SRGB8_ALPHA8, Surface: kRGBA_8888_SRGB, Data: kRGBA_8888_SRGB
+ {
+ // GL does not do srgb<->rgb conversions when transferring between cpu and gpu.
+ // Thus, the external format is GL_RGBA. See below for note about ES2.0 and
+ // glTex[Sub]Image.
+ GrGLenum texImageExternalFormat = GR_GL_RGBA;
+
+ // OpenGL ES 2.0 + GL_EXT_sRGB allows GL_SRGB_ALPHA to be specified as the
+ // <format> param to Tex(Sub)Image. ES 2.0 requires the <internalFormat> and
+ // <format> params to match. Thus, on ES 2.0 we will use GL_SRGB_ALPHA as the
+ // <format> param. On OpenGL and ES 3.0+ GL_SRGB_ALPHA does not work for the
+ // <format> param to glTexImage.
+ if (GR_IS_GR_GL_ES(standard) && version == GR_GL_VER(2,0)) {
+ texImageExternalFormat = GR_GL_SRGB_ALPHA;
+ }
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888_SRGB;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = texImageExternalFormat;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: COMPRESSED_RGB8_ETC2
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kCOMPRESSED_RGB8_ETC2);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_COMPRESSED_RGB8_ETC2;
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(4, 3) || ctxInfo.hasExtension("GL_ARB_ES3_compatibility")) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_compressed_ETC2_RGB8_texture")) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ }
+ } // No WebGL support
+
+ // There are no support GrColorTypes for this format
+ }
+
+ // Format: COMPRESSED_ETC1_RGB8
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kCOMPRESSED_ETC1_RGB8);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_COMPRESSED_ETC1_RGB8;
+ if (GR_IS_GR_GL_ES(standard)) {
+ if (ctxInfo.hasExtension("GL_OES_compressed_ETC1_RGB8_texture")) {
+ info.fFlags = FormatInfo::kTexturable_Flag;
+ }
+ } // No GL or WebGL support
+
+ // There are no support GrColorTypes for this format
+ }
+
+ // Format: R16
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kR16);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForRenderbuffer = GR_GL_R16;
+ info.fDefaultExternalFormat = GR_GL_RED;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_SHORT;
+ info.fBytesPerPixel = 2;
+ bool r16Supported = false;
+ if (GR_IS_GR_GL(standard)) {
+ r16Supported = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_rg");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ r16Supported = ctxInfo.hasExtension("GL_EXT_texture_norm16");
+ } // No WebGL support
+
+ if (r16Supported) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_R16;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_R16 : GR_GL_RED;
+ }
+
+ if (r16Supported) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: R16, Surface: kAlpha_16
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ this->setColorTypeFormat(GrColorType::kAlpha_16, GrGLFormat::kR16);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: R16, Surface: kAlpha_16, Data: kAlpha_16
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_16;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_SHORT;
+ ioFormat.fExternalTexImageFormat = GR_GL_RED;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: R16, Surface: kAlpha_16, Data: kAlpha_8xxx
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kAlpha_8xxx;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RG16
+ {
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRG16);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RG16 : GR_GL_RG;
+ info.fInternalFormatForRenderbuffer = GR_GL_RG16;
+ info.fDefaultExternalFormat = GR_GL_RG;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_SHORT;
+ info.fBytesPerPixel = 4;
+ bool rg16Supported = false;
+ if (GR_IS_GR_GL(standard)) {
+ rg16Supported = version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_rg");
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ rg16Supported = ctxInfo.hasExtension("GL_EXT_texture_norm16");
+ } // No WebGL support
+
+ if (rg16Supported) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RG16;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RG16 : GR_GL_RG;
+ }
+
+ if (rg16Supported) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: GR_GL_RG16, Surface: kRG_1616
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_1616;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRG_1616, GrGLFormat::kRG16);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: GR_GL_RG16, Surface: kRG_1616, Data: kRG_1616
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRG_1616;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_SHORT;
+ ioFormat.fExternalTexImageFormat = GR_GL_RG;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: GR_GL_RG16, Surface: kRG_1616, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format: RGBA16
+ {
+ bool rgba16Support = false;
+ if (GR_IS_GR_GL(standard)) {
+ rgba16Support = version >= GR_GL_VER(3, 0);
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ rgba16Support = ctxInfo.hasExtension("GL_EXT_texture_norm16");
+ } // No WebGL support
+
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRGBA16);
+ info.fFormatType = FormatType::kNormalizedFixedPoint;
+
+ info.fInternalFormatForRenderbuffer = GR_GL_RGBA16;
+ info.fDefaultExternalFormat = GR_GL_RGBA;
+ info.fDefaultExternalType = GR_GL_UNSIGNED_SHORT;
+ info.fBytesPerPixel = 8;
+ if (rgba16Support) {
+ info.fFlags = FormatInfo::kTexturable_Flag | msaaRenderFlags;
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RGBA16;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RGBA16 : GR_GL_RGBA;
+ }
+
+ if (rgba16Support) {
+ // Format: GR_GL_RGBA16, Surface: kRGBA_16161616
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_16161616;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRGBA_16161616, GrGLFormat::kRGBA16);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: GR_GL_RGBA16, Surface: kRGBA_16161616, Data: kRGBA_16161616
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_16161616;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_SHORT;
+ ioFormat.fExternalTexImageFormat = GR_GL_RGBA;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: GR_GL_RGBA16, Surface: kRGBA_16161616, Data: kRGBA_8888
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_8888;
+ ioFormat.fExternalType = GR_GL_UNSIGNED_BYTE;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ // Format:RG16F
+ {
+ bool rg16FTextureSupport = false;
+ bool rg16FRenderTargetSupport = false;
+ if (GR_IS_GR_GL(standard)) {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_float")) {
+ rg16FTextureSupport = true;
+ rg16FRenderTargetSupport = true;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ // It seems possible that a combination of GL_EXT_texture_rg and
+ // GL_EXT_color_buffer_half_float might add this format to ES 2.0 but it is not entirely
+ // clear. The latter mentions interaction but that may only be for renderbuffers as
+ // neither adds the texture format explicitly.
+ // GL_OES_texture_format_half_float makes no reference to RG formats.
+ if (version >= GR_GL_VER(3, 0)) {
+ rg16FTextureSupport = true;
+ rg16FRenderTargetSupport = version >= GR_GL_VER(3, 2) ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float") ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float");
+ }
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ if (version >= GR_GL_VER(2, 0)) {
+ rg16FTextureSupport = true;
+ rg16FRenderTargetSupport = ctxInfo.hasExtension("GL_EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_half_float") ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float") ||
+ ctxInfo.hasExtension("EXT_color_buffer_float");
+ }
+ }
+
+ FormatInfo& info = this->getFormatInfo(GrGLFormat::kRG16F);
+ info.fFormatType = FormatType::kFloat;
+ info.fInternalFormatForRenderbuffer = GR_GL_RG16F;
+ info.fDefaultExternalFormat = GR_GL_RG;
+ info.fDefaultExternalType = halfFloatType;
+ info.fBytesPerPixel = 4;
+ if (rg16FTextureSupport) {
+ info.fFlags |= FormatInfo::kTexturable_Flag;
+ if (rg16FRenderTargetSupport) {
+ info.fFlags |= fpRenderFlags;
+ }
+ }
+
+ if (texStorageSupported) {
+ info.fFlags |= FormatInfo::kUseTexStorage_Flag;
+ info.fInternalFormatForTexImageOrStorage = GR_GL_RG16F;
+ } else {
+ info.fInternalFormatForTexImageOrStorage =
+ texImageSupportsSizedInternalFormat ? GR_GL_RG16F : GR_GL_RG;
+ }
+
+ if (rg16FTextureSupport) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: GR_GL_RG16F, Surface: kRG_F16
+ {
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ this->setColorTypeFormat(GrColorType::kRG_F16, GrGLFormat::kRG16F);
+
+ // External IO ColorTypes:
+ ctInfo.fExternalIOFormatCount = 2;
+ ctInfo.fExternalIOFormats.reset(
+ new ColorTypeInfo::ExternalIOFormats[ctInfo.fExternalIOFormatCount]());
+ int ioIdx = 0;
+ // Format: GR_GL_RG16F, Surface: kRG_F16, Data: kRG_F16
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRG_F16;
+ ioFormat.fExternalType = halfFloatType;
+ ioFormat.fExternalTexImageFormat = GR_GL_RG;
+ ioFormat.fExternalReadFormat = 0;
+ }
+
+ // Format: GR_GL_RG16F, Surface: kRG_F16, Data: kRGBA_F32
+ {
+ auto& ioFormat = ctInfo.fExternalIOFormats[ioIdx++];
+ ioFormat.fColorType = GrColorType::kRGBA_F32;
+ ioFormat.fExternalType = GR_GL_FLOAT;
+ ioFormat.fExternalTexImageFormat = 0;
+ ioFormat.fExternalReadFormat = GR_GL_RGBA;
+ }
+ }
+ }
+ }
+
+ this->setupSampleCounts(ctxInfo, gli);
+
+#ifdef SK_DEBUG
+ for (int i = 0; i < kGrGLFormatCount; ++i) {
+ if (GrGLFormat::kUnknown == static_cast<GrGLFormat>(i)) {
+ continue;
+ }
+ const auto& formatInfo = fFormatTable[i];
+ // Make sure we didn't set fbo attachable with msaa and not fbo attachable.
+ SkASSERT(!((formatInfo.fFlags & FormatInfo::kFBOColorAttachmentWithMSAA_Flag) &&
+ !(formatInfo.fFlags & FormatInfo::kFBOColorAttachment_Flag)));
+
+ // Make sure we set all the formats' FormatType
+ SkASSERT(formatInfo.fFormatType != FormatType::kUnknown);
+
+ // Make sure if we added a ColorTypeInfo we filled it out
+ for (int j = 0; j < formatInfo.fColorTypeInfoCount; ++j) {
+ const auto& ctInfo = formatInfo.fColorTypeInfos[j];
+ SkASSERT(ctInfo.fColorType != GrColorType::kUnknown);
+ // Seems silly to add a color type if we don't support any flags on it.
+ SkASSERT(ctInfo.fFlags);
+ // Make sure if we added any ExternalIOFormats we filled it out
+ for (int k = 0; k < ctInfo.fExternalIOFormatCount; ++k) {
+ const auto& ioInfo = ctInfo.fExternalIOFormats[k];
+ SkASSERT(ioInfo.fColorType != GrColorType::kUnknown);
+ // Make sure we at least support either reading or tex image.
+ SkASSERT(ioInfo.fExternalReadFormat || ioInfo.fExternalTexImageFormat);
+ }
+ }
+ }
+#endif
+}
+
+void GrGLCaps::setupSampleCounts(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+ GrGLStandard standard = ctxInfo.standard();
+ // standard can be unused (optimized away) if SK_ASSUME_GL_ES is set
+ sk_ignore_unused_variable(standard);
+ GrGLVersion version = ctxInfo.version();
+
+ for (int i = 0; i < kGrGLFormatCount; ++i) {
+ if (FormatInfo::kFBOColorAttachmentWithMSAA_Flag & fFormatTable[i].fFlags) {
+ // We assume that MSAA rendering is supported only if we support non-MSAA rendering.
+ SkASSERT(FormatInfo::kFBOColorAttachment_Flag & fFormatTable[i].fFlags);
+ if ((GR_IS_GR_GL(standard) &&
+ (version >= GR_GL_VER(4,2) ||
+ ctxInfo.hasExtension("GL_ARB_internalformat_query"))) ||
+ (GR_IS_GR_GL_ES(standard) && version >= GR_GL_VER(3,0))) {
+ int count;
+ GrGLFormat grGLFormat = static_cast<GrGLFormat>(i);
+ GrGLenum glFormat = this->getRenderbufferInternalFormat(grGLFormat);
+ GR_GL_GetInternalformativ(gli, GR_GL_RENDERBUFFER, glFormat,
+ GR_GL_NUM_SAMPLE_COUNTS, 1, &count);
+ if (count) {
+ std::unique_ptr<int[]> temp(new int[count]);
+ GR_GL_GetInternalformativ(gli, GR_GL_RENDERBUFFER, glFormat, GR_GL_SAMPLES,
+ count, temp.get());
+ // GL has a concept of MSAA rasterization with a single sample but we do not.
+ if (count && temp[count - 1] == 1) {
+ --count;
+ SkASSERT(!count || temp[count -1] > 1);
+ }
+ fFormatTable[i].fColorSampleCounts.setCount(count+1);
+ // We initialize our supported values with 1 (no msaa) and reverse the order
+ // returned by GL so that the array is ascending.
+ fFormatTable[i].fColorSampleCounts[0] = 1;
+ for (int j = 0; j < count; ++j) {
+ fFormatTable[i].fColorSampleCounts[j+1] = temp[count - j - 1];
+ }
+ }
+ } else {
+ // Fake out the table using some semi-standard counts up to the max allowed sample
+ // count.
+ int maxSampleCnt = 1;
+ if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &maxSampleCnt);
+ } else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES, &maxSampleCnt);
+ }
+ // Chrome has a mock GL implementation that returns 0.
+ maxSampleCnt = SkTMax(1, maxSampleCnt);
+
+ static constexpr int kDefaultSamples[] = {1, 2, 4, 8};
+ int count = SK_ARRAY_COUNT(kDefaultSamples);
+ for (; count > 0; --count) {
+ if (kDefaultSamples[count - 1] <= maxSampleCnt) {
+ break;
+ }
+ }
+ if (count > 0) {
+ fFormatTable[i].fColorSampleCounts.append(count, kDefaultSamples);
+ }
+ }
+ } else if (FormatInfo::kFBOColorAttachment_Flag & fFormatTable[i].fFlags) {
+ fFormatTable[i].fColorSampleCounts.setCount(1);
+ fFormatTable[i].fColorSampleCounts[0] = 1;
+ }
+ }
+}
+
+bool GrGLCaps::canCopyTexSubImage(GrGLFormat dstFormat, bool dstHasMSAARenderBuffer,
+ const GrTextureType* dstTypeIfTexture,
+ GrGLFormat srcFormat, bool srcHasMSAARenderBuffer,
+ const GrTextureType* srcTypeIfTexture) const {
+ // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
+ // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
+ // many drivers would allow it to work, but ANGLE does not.
+ if (GR_IS_GR_GL_ES(fStandard) &&
+ (dstFormat == GrGLFormat::kBGRA8 || srcFormat == GrGLFormat::kBGRA8)) {
+ return false;
+ }
+
+ // CopyTexSubImage is invalid or doesn't copy what we want when we have msaa render buffers.
+ if (dstHasMSAARenderBuffer || srcHasMSAARenderBuffer) {
+ return false;
+ }
+
+ // CopyTex(Sub)Image writes to a texture and we have no way of dynamically wrapping a RT in a
+ // texture.
+ if (!dstTypeIfTexture) {
+ return false;
+ }
+
+ // Check that we could wrap the source in an FBO, that the dst is not TEXTURE_EXTERNAL, that no
+ // mirroring is required
+ return this->canFormatBeFBOColorAttachment(srcFormat) &&
+ (!srcTypeIfTexture || *srcTypeIfTexture != GrTextureType::kExternal) &&
+ *dstTypeIfTexture != GrTextureType::kExternal;
+}
+
+bool GrGLCaps::canCopyAsBlit(GrGLFormat dstFormat, int dstSampleCnt,
+ const GrTextureType* dstTypeIfTexture,
+ GrGLFormat srcFormat, int srcSampleCnt,
+ const GrTextureType* srcTypeIfTexture,
+ const SkRect& srcBounds, bool srcBoundsExact,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ auto blitFramebufferFlags = this->blitFramebufferSupportFlags();
+ if (!this->canFormatBeFBOColorAttachment(dstFormat) ||
+ !this->canFormatBeFBOColorAttachment(srcFormat)) {
+ return false;
+ }
+
+ if (dstTypeIfTexture && *dstTypeIfTexture == GrTextureType::kExternal) {
+ return false;
+ }
+ if (srcTypeIfTexture && *srcTypeIfTexture == GrTextureType::kExternal) {
+ return false;
+ }
+
+ if (GrGLCaps::kNoSupport_BlitFramebufferFlag & blitFramebufferFlags) {
+ return false;
+ }
+
+ if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & blitFramebufferFlags) {
+ if (srcSampleCnt > 1) {
+ if (1 == dstSampleCnt) {
+ return false;
+ }
+ if (SkRect::Make(srcRect) != srcBounds || !srcBoundsExact) {
+ return false;
+ }
+ }
+ }
+
+ if (GrGLCaps::kNoMSAADst_BlitFramebufferFlag & blitFramebufferFlags) {
+ if (dstSampleCnt > 1) {
+ return false;
+ }
+ }
+
+ if (GrGLCaps::kNoFormatConversion_BlitFramebufferFlag & blitFramebufferFlags) {
+ if (srcFormat != dstFormat) {
+ return false;
+ }
+ } else if (GrGLCaps::kNoFormatConversionForMSAASrc_BlitFramebufferFlag & blitFramebufferFlags) {
+ if (srcSampleCnt > 1 && srcFormat != dstFormat) {
+ return false;
+ }
+ }
+
+ if (GrGLCaps::kRectsMustMatchForMSAASrc_BlitFramebufferFlag & blitFramebufferFlags) {
+ if (srcSampleCnt > 1) {
+ if (dstPoint.fX != srcRect.fLeft || dstPoint.fY != srcRect.fTop) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool GrGLCaps::canCopyAsDraw(GrGLFormat dstFormat, bool srcIsTexturable) const {
+ return this->isFormatRenderable(dstFormat, 1) && srcIsTexturable;
+}
+
+static bool has_msaa_render_buffer(const GrSurfaceProxy* surf, const GrGLCaps& glCaps) {
+ const GrRenderTargetProxy* rt = surf->asRenderTargetProxy();
+ if (!rt) {
+ return false;
+ }
+ // A RT has a separate MSAA renderbuffer if:
+ // 1) It's multisampled
+ // 2) We're using an extension with separate MSAA renderbuffers
+ // 3) It's not FBO 0, which is special and always auto-resolves
+ return rt->numSamples() > 1 &&
+ glCaps.usesMSAARenderBuffers() &&
+ !rt->rtPriv().glRTFBOIDIs0();
+}
+
+bool GrGLCaps::onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ int dstSampleCnt = 0;
+ int srcSampleCnt = 0;
+ if (const GrRenderTargetProxy* rtProxy = dst->asRenderTargetProxy()) {
+ dstSampleCnt = rtProxy->numSamples();
+ }
+ if (const GrRenderTargetProxy* rtProxy = src->asRenderTargetProxy()) {
+ srcSampleCnt = rtProxy->numSamples();
+ }
+ SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTargetProxy()));
+ SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTargetProxy()));
+
+ const GrTextureProxy* dstTex = dst->asTextureProxy();
+ const GrTextureProxy* srcTex = src->asTextureProxy();
+
+ GrTextureType dstTexType;
+ GrTextureType* dstTexTypePtr = nullptr;
+ GrTextureType srcTexType;
+ GrTextureType* srcTexTypePtr = nullptr;
+ if (dstTex) {
+ dstTexType = dstTex->textureType();
+ dstTexTypePtr = &dstTexType;
+ }
+ if (srcTex) {
+ srcTexType = srcTex->textureType();
+ srcTexTypePtr = &srcTexType;
+ }
+
+ auto dstFormat = dst->backendFormat().asGLFormat();
+ auto srcFormat = src->backendFormat().asGLFormat();
+ return this->canCopyTexSubImage(dstFormat, has_msaa_render_buffer(dst, *this), dstTexTypePtr,
+ srcFormat, has_msaa_render_buffer(src, *this), srcTexTypePtr) ||
+ this->canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr, srcFormat, srcSampleCnt,
+ srcTexTypePtr, src->getBoundsRect(), src->priv().isExact(), srcRect,
+ dstPoint) ||
+ this->canCopyAsDraw(dstFormat, SkToBool(srcTex));
+}
+
+GrCaps::DstCopyRestrictions GrGLCaps::getDstCopyRestrictions(const GrRenderTargetProxy* src,
+ GrColorType colorType) const {
+ // If the src is a texture, we can implement the blit as a draw assuming the config is
+ // renderable.
+ if (src->asTextureProxy() && !this->isFormatAsColorTypeRenderable(colorType,
+ src->backendFormat())) {
+ return {};
+ }
+
+ if (const auto* texProxy = src->asTextureProxy()) {
+ if (texProxy->textureType() == GrTextureType::kExternal) {
+ // Not supported for FBO blit or CopyTexSubImage. Caller will have to fall back to a
+ // draw (if the source is also a texture).
+ return {};
+ }
+ }
+
+ // We look for opportunities to use CopyTexSubImage, or fbo blit. If neither are
+ // possible and we return false to fallback to creating a render target dst for render-to-
+ // texture. This code prefers CopyTexSubImage to fbo blit and avoids triggering temporary fbo
+ // creation. It isn't clear that avoiding temporary fbo creation is actually optimal.
+ DstCopyRestrictions blitFramebufferRestrictions = {};
+ if (src->numSamples() > 1 &&
+ (this->blitFramebufferSupportFlags() & kResolveMustBeFull_BlitFrambufferFlag)) {
+ blitFramebufferRestrictions.fRectsMustMatch = GrSurfaceProxy::RectsMustMatch::kYes;
+ blitFramebufferRestrictions.fMustCopyWholeSrc = true;
+ // Mirroring causes rects to mismatch later, don't allow it.
+ } else if (src->numSamples() > 1 && (this->blitFramebufferSupportFlags() &
+ kRectsMustMatchForMSAASrc_BlitFramebufferFlag)) {
+ blitFramebufferRestrictions.fRectsMustMatch = GrSurfaceProxy::RectsMustMatch::kYes;
+ }
+
+ auto srcFormat = src->backendFormat().asGLFormat();
+ // Check for format issues with glCopyTexSubImage2D
+ if (srcFormat == GrGLFormat::kBGRA8) {
+ // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit
+ // then we set up for that, otherwise fail.
+ if (this->canFormatBeFBOColorAttachment(srcFormat)) {
+ return blitFramebufferRestrictions;
+ }
+ // Caller will have to use a draw.
+ return {};
+ }
+
+ {
+ bool srcIsMSAARenderbuffer = src->numSamples() > 1 &&
+ this->usesMSAARenderBuffers();
+ if (srcIsMSAARenderbuffer) {
+ // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up for FBO
+ // blit or fail.
+ if (this->canFormatBeFBOColorAttachment(srcFormat)) {
+ return blitFramebufferRestrictions;
+ }
+ // Caller will have to use a draw.
+ return {};
+ }
+ }
+
+ // We'll do a CopyTexSubImage, no restrictions.
+ return {};
+}
+
+void GrGLCaps::applyDriverCorrectnessWorkarounds(const GrGLContextInfo& ctxInfo,
+ const GrContextOptions& contextOptions,
+ GrShaderCaps* shaderCaps,
+ FormatWorkarounds* formatWorkarounds) {
+ // A driver but on the nexus 6 causes incorrect dst copies when invalidate is called beforehand.
+ // Thus we are blacklisting this extension for now on Adreno4xx devices.
+ if (kAdreno430_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno4xx_other_GrGLRenderer == ctxInfo.renderer() ||
+ fDriverBugWorkarounds.disable_discard_framebuffer) {
+ fInvalidateFBType = kNone_InvalidateFBType;
+ }
+
+ // glClearTexImage seems to have a bug in NVIDIA drivers that was fixed sometime between
+ // 340.96 and 367.57.
+ if (GR_IS_GR_GL(ctxInfo.standard()) &&
+ ctxInfo.driver() == kNVIDIA_GrGLDriver &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(367, 57, 0)) {
+ fClearTextureSupport = false;
+ }
+
+#ifdef SK_BUILD_FOR_MAC
+ // Radeon MacBooks hit a crash in glReadPixels() when using geometry shaders.
+ // http://skbug.com/8097
+ if (kATI_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fGeometryShaderSupport = false;
+ }
+ // On at least some MacBooks, GLSL 4.0 geometry shaders break if we use invocations.
+ shaderCaps->fGSInvocationsSupport = false;
+#endif
+
+ // Qualcomm driver @103.0 has been observed to crash compiling ccpr geometry
+ // shaders. @127.0 is the earliest verified driver to not crash.
+ if (kQualcomm_GrGLDriver == ctxInfo.driver() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(127, 0, 0)) {
+ shaderCaps->fGeometryShaderSupport = false;
+ }
+
+#if defined(__has_feature)
+#if defined(SK_BUILD_FOR_MAC) && __has_feature(thread_sanitizer)
+ // See skbug.com/7058
+ fMapBufferType = kNone_MapBufferType;
+ fMapBufferFlags = kNone_MapFlags;
+ fTransferBufferSupport = false;
+ fTransferBufferType = kNone_TransferBufferType;
+#endif
+#endif
+
+ // We found that the Galaxy J5 with an Adreno 306 running 6.0.1 has a bug where
+ // GL_INVALID_OPERATION thrown by glDrawArrays when using a buffer that was mapped. The same bug
+ // did not reproduce on a Nexus7 2013 with a 320 running Android M with driver 127.0. It's
+ // unclear whether this really affects a wide range of devices.
+ if (ctxInfo.renderer() == kAdreno3xx_GrGLRenderer &&
+ ctxInfo.driverVersion() > GR_GL_DRIVER_VER(127, 0, 0)) {
+ fMapBufferType = kNone_MapBufferType;
+ fMapBufferFlags = kNone_MapFlags;
+ fTransferBufferSupport = false;
+ fTransferBufferType = kNone_TransferBufferType;
+ }
+
+ // TODO: re-enable for ANGLE
+ if (kANGLE_GrGLDriver == ctxInfo.driver()) {
+ fTransferBufferSupport = false;
+ fTransferBufferType = kNone_TransferBufferType;
+ }
+
+ // Using MIPs on this GPU seems to be a source of trouble.
+ if (kPowerVR54x_GrGLRenderer == ctxInfo.renderer()) {
+ fMipMapSupport = false;
+ }
+
+#ifndef SK_BUILD_FOR_IOS
+ if (kPowerVR54x_GrGLRenderer == ctxInfo.renderer() ||
+ kPowerVRRogue_GrGLRenderer == ctxInfo.renderer() ||
+ (kAdreno3xx_GrGLRenderer == ctxInfo.renderer() &&
+ ctxInfo.driver() != kChromium_GrGLDriver)) {
+ fPerformColorClearsAsDraws = true;
+ }
+#endif
+
+ // A lot of GPUs have trouble with full screen clears (skbug.com/7195)
+ if (kAMDRadeonHD7xxx_GrGLRenderer == ctxInfo.renderer() ||
+ kAMDRadeonR9M4xx_GrGLRenderer == ctxInfo.renderer()) {
+ fPerformColorClearsAsDraws = true;
+ }
+
+#ifdef SK_BUILD_FOR_MAC
+ // crbug.com/768134 - On MacBook Pros, the Intel Iris Pro doesn't always perform
+ // full screen clears
+ // crbug.com/773107 - On MacBook Pros, a wide range of Intel GPUs don't always
+ // perform full screen clears.
+ // Update on 4/4/2018 - This appears to be fixed on driver 10.30.12 on a macOS 10.13.2 on a
+ // Retina MBP Early 2015 with Iris 6100. It is possibly fixed on earlier drivers as well.
+ if (kIntel_GrGLVendor == ctxInfo.vendor() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(10, 30, 12)) {
+ fPerformColorClearsAsDraws = true;
+ }
+ // crbug.com/969609 - NVIDIA on Mac sometimes segfaults during glClear in chrome. It seems
+ // mostly concentrated in 10.13/14, GT 650Ms, driver 12+. But there are instances of older
+ // drivers and GTX 775s, so we'll start with a broader workaround.
+ if (kNVIDIA_GrGLVendor == ctxInfo.vendor()) {
+ fPerformColorClearsAsDraws = true;
+ }
+#endif
+
+ // See crbug.com/755871. This could probably be narrowed to just partial clears as the driver
+ // bugs seems to involve clearing too much and not skipping the clear.
+ // See crbug.com/768134. This is also needed for full clears and was seen on an nVidia K620
+ // but only for D3D11 ANGLE.
+ if (GrGLANGLEBackend::kD3D11 == ctxInfo.angleBackend()) {
+ fPerformColorClearsAsDraws = true;
+ }
+
+ if (kAdreno430_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno4xx_other_GrGLRenderer == ctxInfo.renderer()) {
+ // This is known to be fixed sometime between driver 145.0 and 219.0
+ if (ctxInfo.driverVersion() <= GR_GL_DRIVER_VER(219, 0, 0)) {
+ fPerformStencilClearsAsDraws = true;
+ }
+ fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO = true;
+ }
+
+ if (fDriverBugWorkarounds.gl_clear_broken) {
+ fPerformColorClearsAsDraws = true;
+ fPerformStencilClearsAsDraws = true;
+ }
+
+ // This was reproduced on the following configurations:
+ // - A Galaxy J5 (Adreno 306) running Android 6 with driver 140.0
+ // - A Nexus 7 2013 (Adreno 320) running Android 5 with driver 104.0
+ // - A Nexus 7 2013 (Adreno 320) running Android 6 with driver 127.0
+ // - A Nexus 5 (Adreno 330) running Android 6 with driver 127.0
+ // and not produced on:
+ // - A Nexus 7 2013 (Adreno 320) running Android 4 with driver 53.0
+ // The particular lines that get dropped from test images varies across different devices.
+ if (kAdreno3xx_GrGLRenderer == ctxInfo.renderer() &&
+ ctxInfo.driverVersion() > GR_GL_DRIVER_VER(53, 0, 0)) {
+ fRequiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines = true;
+ }
+
+ // This was reproduced on a Pixel 1, but the unit test + config + options that exercise it are
+ // only tested on very specific bots. The driver claims that ReadPixels is an invalid operation
+ // when reading from an auto-resolving MSAA framebuffer that has stencil attached.
+ if (kQualcomm_GrGLDriver == ctxInfo.driver()) {
+ fDetachStencilFromMSAABuffersBeforeReadPixels = true;
+ }
+
+ // TODO: Don't apply this on iOS?
+ if (kPowerVRRogue_GrGLRenderer == ctxInfo.renderer()) {
+ // Our Chromebook with kPowerVRRogue_GrGLRenderer crashes on large instanced draws. The
+ // current minimum number of instances observed to crash is somewhere between 2^14 and 2^15.
+ // Keep the number of instances below 1000, just to be safe.
+ fMaxInstancesPerDrawWithoutCrashing = 999;
+ } else if (fDriverBugWorkarounds.disallow_large_instanced_draw) {
+ fMaxInstancesPerDrawWithoutCrashing = 0x4000000;
+ }
+
+ // Texture uploads sometimes seem to be ignored to textures bound to FBOS on Tegra3.
+ if (kTegra_PreK1_GrGLRenderer == ctxInfo.renderer()) {
+ fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO = true;
+ fUseDrawInsteadOfAllRenderTargetWrites = true;
+ }
+
+#ifdef SK_BUILD_FOR_MAC
+ static constexpr bool isMAC = true;
+#else
+ static constexpr bool isMAC = false;
+#endif
+
+ // We support manual mip-map generation (via iterative downsampling draw calls). This fixes
+ // bugs on some cards/drivers that produce incorrect mip-maps for sRGB textures when using
+ // glGenerateMipmap. Our implementation requires mip-level sampling control. Additionally,
+ // it can be much slower (especially on mobile GPUs), so we opt-in only when necessary:
+ if (fMipMapLevelAndLodControlSupport &&
+ (contextOptions.fDoManualMipmapping ||
+ (kIntel_GrGLVendor == ctxInfo.vendor()) ||
+ (kNVIDIA_GrGLDriver == ctxInfo.driver() && isMAC) ||
+ (kATI_GrGLVendor == ctxInfo.vendor()))) {
+ fDoManualMipmapping = true;
+ }
+
+ // See http://crbug.com/710443
+#ifdef SK_BUILD_FOR_MAC
+ if (kIntelBroadwell_GrGLRenderer == ctxInfo.renderer()) {
+ fClearToBoundaryValuesIsBroken = true;
+ }
+#endif
+ if (kQualcomm_GrGLVendor == ctxInfo.vendor()) {
+ fDrawArraysBaseVertexIsBroken = true;
+ }
+
+ // Currently the extension is advertised but fb fetch is broken on 500 series Adrenos like the
+ // Galaxy S7.
+ // TODO: Once this is fixed we can update the check here to look at a driver version number too.
+ if (kAdreno5xx_GrGLRenderer == ctxInfo.renderer()) {
+ shaderCaps->fFBFetchSupport = false;
+ }
+
+ // On the NexusS and GalaxyNexus, the use of 'any' causes the compilation error "Calls to any
+ // function that may require a gradient calculation inside a conditional block may return
+ // undefined results". This appears to be an issue with the 'any' call since even the simple
+ // "result=black; if (any()) result=white;" code fails to compile. This issue comes into play
+ // from our GrTextureDomain processor.
+ shaderCaps->fCanUseAnyFunctionInShader = kImagination_GrGLVendor != ctxInfo.vendor();
+
+ // Known issue on at least some Intel platforms:
+ // http://code.google.com/p/skia/issues/detail?id=946
+ if (kIntel_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fFragCoordConventionsExtensionString = nullptr;
+ }
+
+ if (kTegra_PreK1_GrGLRenderer == ctxInfo.renderer()) {
+ // The Tegra3 compiler will sometimes never return if we have min(abs(x), 1.0),
+ // so we must do the abs first in a separate expression.
+ shaderCaps->fCanUseMinAndAbsTogether = false;
+
+ // Tegra3 fract() seems to trigger undefined behavior for negative values, so we
+ // must avoid this condition.
+ shaderCaps->fCanUseFractForNegativeValues = false;
+ }
+
+ // On Intel GPU there is an issue where it reads the second argument to atan "- %s.x" as an int
+ // thus must us -1.0 * %s.x to work correctly
+ if (kIntel_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fMustForceNegatedAtanParamToFloat = true;
+ }
+
+ // On some Intel GPUs there is an issue where the driver outputs bogus values in the shader
+ // when floor and abs are called on the same line. Thus we must execute an Op between them to
+ // make sure the compiler doesn't re-inline them even if we break the calls apart.
+ if (kIntel_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fMustDoOpBetweenFloorAndAbs = true;
+ }
+
+ // On Adreno devices with framebuffer fetch support, there is a bug where they always return
+ // the original dst color when reading the outColor even after being written to. By using a
+ // local outColor we can work around this bug.
+ if (shaderCaps->fFBFetchSupport && kQualcomm_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fRequiresLocalOutputColorForFBFetch = true;
+ }
+
+ // Newer Mali GPUs do incorrect static analysis in specific situations: If there is uniform
+ // color, and that uniform contains an opaque color, and the output of the shader is only based
+ // on that uniform plus soemthing un-trackable (like a texture read), the compiler will deduce
+ // that the shader always outputs opaque values. In that case, it appears to remove the shader
+ // based blending code it normally injects, turning SrcOver into Src. To fix this, we always
+ // insert an extra bit of math on the uniform that confuses the compiler just enough...
+ if (kMaliT_GrGLRenderer == ctxInfo.renderer()) {
+ shaderCaps->fMustObfuscateUniformColor = true;
+ }
+#ifdef SK_BUILD_FOR_WIN
+ // Check for ANGLE on Windows, so we can workaround a bug in D3D itself (anglebug.com/2098).
+ //
+ // Basically, if a shader has a construct like:
+ //
+ // float x = someCondition ? someValue : 0;
+ // float2 result = (0 == x) ? float2(x, x)
+ // : float2(2 * x / x, 0);
+ //
+ // ... the compiler will produce an error 'NaN and infinity literals not allowed', even though
+ // we've explicitly guarded the division with a check against zero. This manifests in much
+ // more complex ways in some of our shaders, so we use this caps bit to add an epsilon value
+ // to the denominator of divisions, even when we've added checks that the denominator isn't 0.
+ if (kANGLE_GrGLDriver == ctxInfo.driver() || kChromium_GrGLDriver == ctxInfo.driver()) {
+ shaderCaps->fMustGuardDivisionEvenAfterExplicitZeroCheck = true;
+ }
+#endif
+
+ // We've seen Adreno 3xx devices produce incorrect (flipped) values for gl_FragCoord, in some
+ // (rare) situations. It's sporadic, and mostly on older drivers. Additionally, old Adreno
+ // compilers (see crbug.com/skia/4078) crash when accessing .zw of gl_FragCoord, so just bypass
+ // using gl_FragCoord at all to get around it.
+ if (kAdreno3xx_GrGLRenderer == ctxInfo.renderer()) {
+ shaderCaps->fCanUseFragCoord = false;
+ }
+
+ // gl_FragCoord has an incorrect subpixel offset on legacy Tegra hardware.
+ if (kTegra_PreK1_GrGLRenderer == ctxInfo.renderer()) {
+ shaderCaps->fCanUseFragCoord = false;
+ }
+
+ // On Mali G71, mediump ints don't appear capable of representing every integer beyond +/-2048.
+ // (Are they implemented with fp16?)
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ shaderCaps->fIncompleteShortIntPrecision = true;
+ }
+
+ if (fDriverBugWorkarounds.add_and_true_to_loop_condition) {
+ shaderCaps->fAddAndTrueToLoopCondition = true;
+ }
+
+ if (fDriverBugWorkarounds.unfold_short_circuit_as_ternary_operation) {
+ shaderCaps->fUnfoldShortCircuitAsTernary = true;
+ }
+
+ if (fDriverBugWorkarounds.emulate_abs_int_function) {
+ shaderCaps->fEmulateAbsIntFunction = true;
+ }
+
+ if (fDriverBugWorkarounds.rewrite_do_while_loops) {
+ shaderCaps->fRewriteDoWhileLoops = true;
+ }
+
+ if (fDriverBugWorkarounds.remove_pow_with_constant_exponent) {
+ shaderCaps->fRemovePowWithConstantExponent = true;
+ }
+
+ if (kAdreno3xx_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno4xx_other_GrGLRenderer == ctxInfo.renderer()) {
+ shaderCaps->fMustWriteToFragColor = true;
+ }
+
+ // Disabling advanced blend on various platforms with major known issues. We also block Chrome
+ // for now until its own blacklists can be updated.
+ if (kAdreno430_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno4xx_other_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno5xx_GrGLRenderer == ctxInfo.renderer() ||
+ kIntel_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver()) {
+ fBlendEquationSupport = kBasic_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kNotSupported_AdvBlendEqInteraction;
+ }
+
+ // Non-coherent advanced blend has an issue on NVIDIA pre 337.00.
+ if (kNVIDIA_GrGLDriver == ctxInfo.driver() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(337, 00, 0) &&
+ kAdvanced_BlendEquationSupport == fBlendEquationSupport) {
+ fBlendEquationSupport = kBasic_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kNotSupported_AdvBlendEqInteraction;
+ }
+
+ if (fDriverBugWorkarounds.disable_blend_equation_advanced) {
+ fBlendEquationSupport = kBasic_BlendEquationSupport;
+ shaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kNotSupported_AdvBlendEqInteraction;
+ }
+
+ if (this->advancedBlendEquationSupport()) {
+ if (kNVIDIA_GrGLDriver == ctxInfo.driver() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(355, 00, 0)) {
+ // Blacklist color-dodge and color-burn on pre-355.00 NVIDIA.
+ fAdvBlendEqBlacklist |= (1 << kColorDodge_GrBlendEquation) |
+ (1 << kColorBurn_GrBlendEquation);
+ }
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ // Blacklist color-burn on ARM until the fix is released.
+ fAdvBlendEqBlacklist |= (1 << kColorBurn_GrBlendEquation);
+ }
+ }
+
+ // Workaround NVIDIA bug related to glInvalidateFramebuffer and mixed samples.
+ if (fMultisampleDisableSupport &&
+ this->shaderCaps()->dualSourceBlendingSupport() &&
+ this->shaderCaps()->pathRenderingSupport() &&
+ fMixedSamplesSupport &&
+#if GR_TEST_UTILS
+ (contextOptions.fGpuPathRenderers & GpuPathRenderers::kStencilAndCover) &&
+#endif
+ (kNVIDIA_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver())) {
+ fInvalidateFBType = kNone_InvalidateFBType;
+ }
+
+ // Many ES3 drivers only advertise the ES2 image_external extension, but support the _essl3
+ // extension, and require that it be enabled to work with ESSL3. Other devices require the ES2
+ // extension to be enabled, even when using ESSL3. Enabling both extensions fixes both cases.
+ // skbug.com/7713
+ if (ctxInfo.hasExtension("GL_OES_EGL_image_external") &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration &&
+ !shaderCaps->fExternalTextureSupport) { // i.e. Missing the _essl3 extension
+ shaderCaps->fExternalTextureSupport = true;
+ shaderCaps->fExternalTextureExtensionString = "GL_OES_EGL_image_external";
+ shaderCaps->fSecondExternalTextureExtensionString = "GL_OES_EGL_image_external_essl3";
+ }
+
+#ifdef SK_BUILD_FOR_IOS
+ // iOS drivers appear to implement TexSubImage by creating a staging buffer, and copying
+ // UNPACK_ROW_LENGTH * height bytes. That's unsafe in several scenarios, and the simplest fix
+ // is to just blacklist the feature.
+ // https://github.com/flutter/flutter/issues/16718
+ // https://bugreport.apple.com/web/?problemID=39948888
+ fWritePixelsRowBytesSupport = false;
+#endif
+
+ // CCPR edge AA is busted on Mesa, Sandy Bridge/Valley View (Bay Trail).
+ // http://skbug.com/8162
+ if (kMesa_GrGLDriver == ctxInfo.driver() &&
+ (kIntelSandyBridge_GrGLRenderer == ctxInfo.renderer() ||
+ kIntelIvyBridge_GrGLRenderer == ctxInfo.renderer() ||
+ kIntelValleyView_GrGLRenderer == ctxInfo.renderer())) {
+ fDriverBlacklistCCPR = true;
+ }
+
+ // Temporarily disable the MSAA implementation of CCPR on various platforms while we work out
+ // specific issues.
+ if (kATI_GrGLVendor == ctxInfo.vendor() || // Radeon drops stencil draws that use sample mask.
+ kImagination_GrGLVendor == ctxInfo.vendor() || // PowerVR produces flaky results on Gold.
+ kQualcomm_GrGLVendor == ctxInfo.vendor() /* Pixel2 crashes in nanobench. */) {
+ fDriverBlacklistMSAACCPR = true;
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // Older versions of Android have problems with setting GL_TEXTURE_BASE_LEVEL or
+ // GL_TEXTURE_MAX_LEVEL on GL_TEXTURE_EXTERTNAL_OES textures. We just leave them as is and hope
+ // the client never changes them either.
+ fDontSetBaseOrMaxLevelForExternalTextures = true;
+#endif
+
+ // PowerVRGX6250 drops every pixel if we modify the sample mask while color writes are disabled.
+ if (kPowerVRRogue_GrGLRenderer == ctxInfo.renderer()) {
+ fNeverDisableColorWrites = true;
+ shaderCaps->fMustWriteToFragColor = true;
+ }
+
+ // It appears that Qualcomm drivers don't actually support
+ // GL_NV_shader_noperspective_interpolation in ES 3.00 or 3.10 shaders, only 3.20.
+ // https://crbug.com/986581
+ if (kQualcomm_GrGLVendor == ctxInfo.vendor() &&
+ k320es_GrGLSLGeneration != ctxInfo.glslGeneration()) {
+ shaderCaps->fNoPerspectiveInterpolationSupport = false;
+ }
+
+ // We disable srgb write control for Adreno4xx devices.
+ // see: https://bug.skia.org/5329
+ if (kAdreno430_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno4xx_other_GrGLRenderer == ctxInfo.renderer()) {
+ fSRGBWriteControl = false;
+ }
+
+ // MacPro devices with AMD cards fail to create MSAA sRGB render buffers.
+#if defined(SK_BUILD_FOR_MAC)
+ formatWorkarounds->fDisableSRGBRenderWithMSAAForMacAMD = kATI_GrGLVendor == ctxInfo.vendor();
+#endif
+
+ // Command buffer fails glTexSubImage2D with type == GL_HALF_FLOAT_OES if a GL_RGBA16F texture
+ // is created with glTexStorage2D. See crbug.com/1008003.
+ formatWorkarounds->fDisableRGBA16FTexStorageForCrBug1008003 =
+ kChromium_GrGLDriver == ctxInfo.driver() && ctxInfo.version() < GR_GL_VER(3, 0);
+
+#if defined(SK_BUILD_FOR_WIN)
+ // On Intel Windows ES contexts it seems that using texture storage with BGRA causes
+ // problems with cross-context SkImages.
+ formatWorkarounds->fDisableBGRATextureStorageForIntelWindowsES =
+ kIntel_GrGLDriver == ctxInfo.driver() && GR_IS_GR_GL_ES(ctxInfo.standard());
+#endif
+
+ // Mali-400 fails ReadPixels tests, mostly with non-0xFF alpha values when read as GL_RGBA8.
+ formatWorkarounds->fDisableRGB8ForMali400 = kMali4xx_GrGLRenderer == ctxInfo.renderer();
+
+ // On the Intel Iris 6100, interacting with LUM16F seems to confuse the driver. After
+ // writing to/reading from a LUM16F texture reads from/writes to other formats behave
+ // erratically.
+ // All Adrenos claim to support LUM16F but don't appear to actually do so.
+ // The failing devices/gpus were: Nexus5/Adreno330, Nexus5x/Adreno418, Pixel/Adreno530,
+ // Pixel2XL/Adreno540 and Pixel3/Adreno630
+ formatWorkarounds->fDisableLuminance16F = kIntelBroadwell_GrGLRenderer == ctxInfo.renderer() ||
+ ctxInfo.vendor() == kQualcomm_GrGLVendor;
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // We don't usually use glTexStorage() on Android for performance reasons. (crbug.com/945506).
+ // On a NVIDIA Shield TV running Android 7.0 creating a texture with glTexImage2D() with
+ // internal format GL_LUMINANCE8 fails. However, it succeeds with glTexStorage2D().
+ //
+ // Additionally, on the Nexus 9 running Android 6.0.1 formats added by GL_EXT_texture_rg and
+ // GL_EXT_texture_norm16 cause errors if they are created with glTexImage2D() with
+ // an unsized internal format. We wouldn't normally do that but Chrome can limit us
+ // artificially to ES2. (crbug.com/1003481)
+ if (kNVIDIA_GrGLVendor == ctxInfo.vendor()) {
+ formatWorkarounds->fDontDisableTexStorageOnAndroid = true;
+ }
+#endif
+
+ // https://github.com/flutter/flutter/issues/38700
+ if (kAndroidEmulator_GrGLDriver == ctxInfo.driver()) {
+ shaderCaps->fNoDefaultPrecisionForExternalSamplers = true;
+ }
+
+ // http://skbug.com/9491: Nexus5 produces rendering artifacts when we use QCOM_tiled_rendering.
+ if (kAdreno3xx_GrGLRenderer == ctxInfo.renderer()) {
+ fTiledRenderingSupport = false;
+ }
+}
+
+void GrGLCaps::onApplyOptionsOverrides(const GrContextOptions& options) {
+ if (options.fDisableDriverCorrectnessWorkarounds) {
+ SkASSERT(!fDoManualMipmapping);
+ SkASSERT(!fClearToBoundaryValuesIsBroken);
+ SkASSERT(0 == fMaxInstancesPerDrawWithoutCrashing);
+ SkASSERT(!fDrawArraysBaseVertexIsBroken);
+ SkASSERT(!fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO);
+ SkASSERT(!fUseDrawInsteadOfAllRenderTargetWrites);
+ SkASSERT(!fRequiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines);
+ SkASSERT(!fDetachStencilFromMSAABuffersBeforeReadPixels);
+ SkASSERT(!fDontSetBaseOrMaxLevelForExternalTextures);
+ SkASSERT(!fNeverDisableColorWrites);
+ }
+ if (options.fDoManualMipmapping) {
+ fDoManualMipmapping = true;
+ }
+ if (options.fShaderCacheStrategy < GrContextOptions::ShaderCacheStrategy::kBackendBinary) {
+ fProgramBinarySupport = false;
+ }
+}
+
+bool GrGLCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
+ if (fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO) {
+ if (auto tex = static_cast<const GrGLTexture*>(surface->asTexture())) {
+ if (tex->hasBaseLevelBeenBoundToFBO()) {
+ return false;
+ }
+ }
+ }
+ if (auto rt = surface->asRenderTarget()) {
+ if (fUseDrawInsteadOfAllRenderTargetWrites) {
+ return false;
+ }
+ if (rt->numSamples() > 1 && this->usesMSAARenderBuffers()) {
+ return false;
+ }
+ return SkToBool(surface->asTexture());
+ }
+ return true;
+}
+
+GrCaps::SurfaceReadPixelsSupport GrGLCaps::surfaceSupportsReadPixels(
+ const GrSurface* surface) const {
+ if (auto tex = static_cast<const GrGLTexture*>(surface->asTexture())) {
+ // We don't support reading pixels directly from EXTERNAL textures as it would require
+ // binding the texture to a FBO.
+ if (tex->target() == GR_GL_TEXTURE_EXTERNAL) {
+ return SurfaceReadPixelsSupport::kCopyToTexture2D;
+ }
+ }
+ return SurfaceReadPixelsSupport::kSupported;
+}
+
+size_t offset_alignment_for_transfer_buffer(GrGLenum externalType) {
+ // This switch is derived from a table titled "Pixel data type parameter values and the
+ // corresponding GL data types" in the OpenGL spec (Table 8.2 in OpenGL 4.5).
+ switch (externalType) {
+ case GR_GL_UNSIGNED_BYTE: return sizeof(GrGLubyte);
+ case GR_GL_BYTE: return sizeof(GrGLbyte);
+ case GR_GL_UNSIGNED_SHORT: return sizeof(GrGLushort);
+ case GR_GL_SHORT: return sizeof(GrGLshort);
+ case GR_GL_UNSIGNED_INT: return sizeof(GrGLuint);
+ case GR_GL_INT: return sizeof(GrGLint);
+ case GR_GL_HALF_FLOAT: return sizeof(GrGLhalf);
+ case GR_GL_FLOAT: return sizeof(GrGLfloat);
+ case GR_GL_UNSIGNED_SHORT_5_6_5: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_SHORT_4_4_4_4: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_SHORT_5_5_5_1: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_INT_2_10_10_10_REV: return sizeof(GrGLuint);
+#if 0 // GL types we currently don't use. Here for future reference.
+ case GR_GL_UNSIGNED_BYTE_3_3_2: return sizeof(GrGLubyte);
+ case GR_GL_UNSIGNED_BYTE_2_3_3_REV: return sizeof(GrGLubyte);
+ case GR_GL_UNSIGNED_SHORT_5_6_5_REV: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_SHORT_4_4_4_4_REV: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_SHORT_1_5_5_5_REV: return sizeof(GrGLushort);
+ case GR_GL_UNSIGNED_INT_8_8_8_8: return sizeof(GrGLuint);
+ case GR_GL_UNSIGNED_INT_8_8_8_8_REV: return sizeof(GrGLuint);
+ case GR_GL_UNSIGNED_INT_10_10_10_2: return sizeof(GrGLuint);
+ case GR_GL_UNSIGNED_INT_24_8: return sizeof(GrGLuint);
+ case GR_GL_UNSIGNED_INT_10F_11F_11F_REV: return sizeof(GrGLuint);
+ case GR_GL_UNSIGNED_INT_5_9_9_9_REV: return sizeof(GrGLuint);
+ // This one is not corresponding to a GL data type and the spec just says it is 4.
+ case GR_GL_FLOAT_32_UNSIGNED_INT_24_8_REV: return 4;
+#endif
+ default: return 0;
+ }
+}
+
+GrCaps::SupportedRead GrGLCaps::onSupportedReadPixelsColorType(
+ GrColorType srcColorType, const GrBackendFormat& srcBackendFormat,
+ GrColorType dstColorType) const {
+ // We first try to find a supported read pixels GrColorType that matches the requested
+ // dstColorType. If that doesn't exists we will use any valid read pixels GrColorType.
+ GrCaps::SupportedRead fallbackRead = {GrColorType::kUnknown, 0};
+ const auto& formatInfo = this->getFormatInfo(srcBackendFormat.asGLFormat());
+ bool foundSrcCT = false;
+ for (int i = 0; !foundSrcCT && i < formatInfo.fColorTypeInfoCount; ++i) {
+ if (formatInfo.fColorTypeInfos[i].fColorType == srcColorType) {
+ const ColorTypeInfo& ctInfo = formatInfo.fColorTypeInfos[i];
+ foundSrcCT = true;
+ for (int j = 0; j < ctInfo.fExternalIOFormatCount; ++j) {
+ const auto& ioInfo = ctInfo.fExternalIOFormats[j];
+ if (ioInfo.fExternalReadFormat != 0) {
+ GrGLenum transferOffsetAlignment =
+ offset_alignment_for_transfer_buffer(ioInfo.fExternalType);
+ if (ioInfo.fColorType == dstColorType) {
+ return {dstColorType, transferOffsetAlignment};
+ }
+ // Currently we just pick the first supported format that we find as our
+ // fallback.
+ if (fallbackRead.fColorType == GrColorType::kUnknown) {
+ fallbackRead = {ioInfo.fColorType, transferOffsetAlignment};
+ }
+ }
+ }
+ }
+ }
+ return fallbackRead;
+}
+
+GrCaps::SupportedWrite GrGLCaps::supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const {
+ // We first try to find a supported write pixels GrColorType that matches the data's
+ // srcColorType. If that doesn't exists we will use any supported GrColorType.
+ GrColorType fallbackCT = GrColorType::kUnknown;
+ const auto& formatInfo = this->getFormatInfo(surfaceFormat.asGLFormat());
+ bool foundSurfaceCT = false;
+ for (int i = 0; !foundSurfaceCT && i < formatInfo.fColorTypeInfoCount; ++i) {
+ if (formatInfo.fColorTypeInfos[i].fColorType == surfaceColorType) {
+ const ColorTypeInfo& ctInfo = formatInfo.fColorTypeInfos[i];
+ foundSurfaceCT = true;
+ for (int j = 0; j < ctInfo.fExternalIOFormatCount; ++j) {
+ const auto& ioInfo = ctInfo.fExternalIOFormats[j];
+ if (ioInfo.fExternalTexImageFormat != 0) {
+ if (ioInfo.fColorType == srcColorType) {
+ return {srcColorType, 1};
+ }
+ // Currently we just pick the first supported format that we find as our
+ // fallback.
+ if (fallbackCT == GrColorType::kUnknown) {
+ fallbackCT = ioInfo.fColorType;
+ }
+ }
+ }
+ }
+ }
+ return {fallbackCT, 1};
+}
+
+bool GrGLCaps::onIsWindowRectanglesSupportedForRT(const GrBackendRenderTarget& backendRT) const {
+ GrGLFramebufferInfo fbInfo;
+ SkAssertResult(backendRT.getGLFramebufferInfo(&fbInfo));
+ // Window Rectangles are not supported for FBO 0;
+ return fbInfo.fFBOID != 0;
+}
+
+bool GrGLCaps::isFormatSRGB(const GrBackendFormat& format) const {
+ return format.asGLFormat() == GrGLFormat::kSRGB8_ALPHA8;
+}
+
+bool GrGLCaps::isFormatCompressed(const GrBackendFormat& format,
+ SkImage::CompressionType* compressionType) const {
+ auto fmt = format.asGLFormat();
+
+ SkImage::CompressionType dummyType;
+ SkImage::CompressionType* compressionTypePtr = compressionType ? compressionType : &dummyType;
+
+ switch (fmt) {
+ case GrGLFormat::kCOMPRESSED_RGB8_ETC2: // fall through
+ case GrGLFormat::kCOMPRESSED_ETC1_RGB8:
+ // ETC2 uses the same compression layout as ETC1
+ *compressionTypePtr = SkImage::kETC1_CompressionType;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GrGLCaps::isFormatTexturableAndUploadable(GrColorType ct,
+ const GrBackendFormat& format) const {
+ auto glFormat = format.asGLFormat();
+ const FormatInfo& info = this->getFormatInfo(glFormat);
+
+ return this->isFormatTexturable(glFormat) &&
+ SkToBool(info.colorTypeFlags(ct) & ColorTypeInfo::kUploadData_Flag);
+}
+
+bool GrGLCaps::isFormatTexturable(const GrBackendFormat& format) const {
+ return this->isFormatTexturable(format.asGLFormat());
+}
+
+bool GrGLCaps::isFormatTexturable(GrGLFormat format) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+ return SkToBool(info.fFlags & FormatInfo::kTexturable_Flag);
+}
+
+bool GrGLCaps::isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount) const {
+ auto f = format.asGLFormat();
+ const FormatInfo& info = this->getFormatInfo(f);
+ if (!SkToBool(info.colorTypeFlags(ct) & ColorTypeInfo::kRenderable_Flag)) {
+ return false;
+ }
+
+ return this->isFormatRenderable(f, sampleCount);
+}
+
+bool GrGLCaps::isFormatRenderable(const GrBackendFormat& format, int sampleCount) const {
+ return this->isFormatRenderable(format.asGLFormat(), sampleCount);
+}
+
+int GrGLCaps::getRenderTargetSampleCount(int requestedCount, GrGLFormat format) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+
+ int count = info.fColorSampleCounts.count();
+ if (!count) {
+ return 0;
+ }
+
+ requestedCount = SkTMax(1, requestedCount);
+ if (1 == requestedCount) {
+ return info.fColorSampleCounts[0] == 1 ? 1 : 0;
+ }
+
+ for (int i = 0; i < count; ++i) {
+ if (info.fColorSampleCounts[i] >= requestedCount) {
+ int count = info.fColorSampleCounts[i];
+ if (fDriverBugWorkarounds.max_msaa_sample_count_4) {
+ count = SkTMin(count, 4);
+ }
+ return count;
+ }
+ }
+ return 0;
+}
+
+int GrGLCaps::maxRenderTargetSampleCount(GrGLFormat format) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+ const auto& table = info.fColorSampleCounts;
+ if (!table.count()) {
+ return 0;
+ }
+ int count = table[table.count() - 1];
+ if (fDriverBugWorkarounds.max_msaa_sample_count_4) {
+ count = SkTMin(count, 4);
+ }
+ return count;
+}
+
+size_t GrGLCaps::bytesPerPixel(GrGLFormat format) const {
+ return this->getFormatInfo(format).fBytesPerPixel;
+}
+
+size_t GrGLCaps::bytesPerPixel(const GrBackendFormat& format) const {
+ auto glFormat = format.asGLFormat();
+ return this->bytesPerPixel(glFormat);
+}
+
+bool GrGLCaps::canFormatBeFBOColorAttachment(GrGLFormat format) const {
+ return SkToBool(this->getFormatInfo(format).fFlags & FormatInfo::kFBOColorAttachment_Flag);
+}
+
+bool GrGLCaps::isFormatCopyable(const GrBackendFormat& format) const {
+ // In GL we have three ways to be able to copy. CopyTexImage, blit, and draw. CopyTexImage
+ // requires the src to be an FBO attachment, blit requires both src and dst to be FBO
+ // attachments, and draw requires the dst to be an FBO attachment. Thus to copy from and to
+ // the same config, we need that config to be bindable to an FBO.
+ return this->canFormatBeFBOColorAttachment(format.asGLFormat());
+}
+
+bool GrGLCaps::formatSupportsTexStorage(GrGLFormat format) const {
+ return SkToBool(this->getFormatInfo(format).fFlags & FormatInfo::kUseTexStorage_Flag);
+}
+
+static GrPixelConfig validate_sized_format(GrGLFormat format,
+ GrColorType ct,
+ GrGLStandard standard) {
+ switch (ct) {
+ case GrColorType::kUnknown:
+ return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_8:
+ if (format == GrGLFormat::kALPHA8) {
+ return kAlpha_8_as_Alpha_GrPixelConfig;
+ } else if (format == GrGLFormat::kR8) {
+ return kAlpha_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kBGR_565:
+ if (format == GrGLFormat::kRGB565) {
+ return kRGB_565_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kABGR_4444:
+ if (format == GrGLFormat::kRGBA4) {
+ return kRGBA_4444_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888:
+ if (format == GrGLFormat::kRGBA8) {
+ return kRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888_SRGB:
+ if (format == GrGLFormat::kSRGB8_ALPHA8) {
+ return kSRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGB_888x:
+ if (format == GrGLFormat::kRGB8) {
+ return kRGB_888_GrPixelConfig;
+ } else if (format == GrGLFormat::kRGBA8) {
+ return kRGB_888X_GrPixelConfig;
+ } else if (format == GrGLFormat::kCOMPRESSED_RGB8_ETC2 ||
+ format == GrGLFormat::kCOMPRESSED_ETC1_RGB8) {
+ return kRGB_ETC1_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_88:
+ if (format == GrGLFormat::kRG8) {
+ return kRG_88_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kBGRA_8888:
+ if (format == GrGLFormat::kRGBA8) {
+ if (GR_IS_GR_GL(standard)) {
+ return kBGRA_8888_GrPixelConfig;
+ }
+ } else if (format == GrGLFormat::kBGRA8) {
+ if (GR_IS_GR_GL_ES(standard) || GR_IS_GR_WEBGL(standard)) {
+ return kBGRA_8888_GrPixelConfig;
+ }
+ }
+ break;
+ case GrColorType::kRGBA_1010102:
+ if (format == GrGLFormat::kRGB10_A2) {
+ return kRGBA_1010102_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kGray_8:
+ if (format == GrGLFormat::kLUMINANCE8) {
+ return kGray_8_as_Lum_GrPixelConfig;
+ } else if (format == GrGLFormat::kR8) {
+ return kGray_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_F16:
+ if (format == GrGLFormat::kLUMINANCE16F) {
+ return kAlpha_half_as_Lum_GrPixelConfig;
+ } else if (format == GrGLFormat::kR16F) {
+ return kAlpha_half_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16:
+ if (format == GrGLFormat::kRGBA16F) {
+ return kRGBA_half_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16_Clamped:
+ if (format == GrGLFormat::kRGBA16F) {
+ return kRGBA_half_Clamped_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_16:
+ if (format == GrGLFormat::kR16) {
+ return kAlpha_16_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_1616:
+ if (format == GrGLFormat::kRG16) {
+ return kRG_1616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_16161616:
+ if (format == GrGLFormat::kRGBA16) {
+ return kRGBA_16161616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_F16:
+ if (format == GrGLFormat::kRG16F) {
+ return kRG_half_GrPixelConfig;
+ }
+ break;
+
+ // These have no equivalent config:
+ case GrColorType::kRGBA_F32:
+ case GrColorType::kAlpha_8xxx:
+ case GrColorType::kAlpha_F32xxx:
+ case GrColorType::kGray_8xxx:
+ break;
+ }
+
+ SkDebugf("Unknown pixel config 0x%x\n", format);
+ return kUnknown_GrPixelConfig;
+}
+
+bool GrGLCaps::onAreColorTypeAndFormatCompatible(GrColorType ct,
+ const GrBackendFormat& format) const {
+ GrGLFormat glFormat = format.asGLFormat();
+ const auto& info = this->getFormatInfo(glFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ if (info.fColorTypeInfos[i].fColorType == ct) {
+ return true;
+ }
+ }
+ return false;
+}
+
+GrPixelConfig GrGLCaps::onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType ct) const {
+ return validate_sized_format(format.asGLFormat(), ct, fStandard);
+}
+
+GrColorType GrGLCaps::getYUVAColorTypeFromBackendFormat(const GrBackendFormat& format,
+ bool isAlphaChannel) const {
+ switch (format.asGLFormat()) {
+ case GrGLFormat::kLUMINANCE8: // <missing kAlpha_8_as_Lum>/kGray_8_as_Lum_GrPixelConfig
+ case GrGLFormat::kR8: // kAlpha_8_as_Red_GrPixelConfig/kGray_8_as_Red_GrPixelConfig
+ case GrGLFormat::kALPHA8: // kAlpha_8_as_Alpha_GrPixelConfig/<missing kGray_8_as_Alpha>
+ return isAlphaChannel ? GrColorType::kAlpha_8
+ : GrColorType::kGray_8;
+ case GrGLFormat::kRG8: return GrColorType::kRG_88;
+ case GrGLFormat::kRGBA8: return GrColorType::kRGBA_8888;
+ case GrGLFormat::kRGB8: return GrColorType::kRGB_888x;
+ case GrGLFormat::kBGRA8: return GrColorType::kBGRA_8888;
+ case GrGLFormat::kRGB10_A2: return GrColorType::kRGBA_1010102;
+ case GrGLFormat::kLUMINANCE16F: // fall through
+ case GrGLFormat::kR16F: return GrColorType::kAlpha_F16;
+ case GrGLFormat::kR16: return GrColorType::kAlpha_16;
+ case GrGLFormat::kRG16: return GrColorType::kRG_1616;
+ case GrGLFormat::kRGBA16: return GrColorType::kRGBA_16161616;
+ case GrGLFormat::kRG16F: return GrColorType::kRG_F16;
+ default: return GrColorType::kUnknown;
+ }
+
+ SkUNREACHABLE;
+}
+
+GrBackendFormat GrGLCaps::onGetDefaultBackendFormat(GrColorType ct,
+ GrRenderable renderable) const {
+ // TODO: make use of renderable.
+ auto format = this->getFormatFromColorType(ct);
+ if (format == GrGLFormat::kUnknown) {
+ return GrBackendFormat();
+ }
+ return GrBackendFormat::MakeGL(GrGLFormatToEnum(format), GR_GL_TEXTURE_2D);
+}
+
+GrBackendFormat GrGLCaps::getBackendFormatFromCompressionType(
+ SkImage::CompressionType compressionType) const {
+ switch (compressionType) {
+ case SkImage::kETC1_CompressionType:
+ // if ETC2 is available default to that format
+ if (this->isFormatTexturable(GrGLFormat::kCOMPRESSED_RGB8_ETC2)) {
+ return GrBackendFormat::MakeGL(GR_GL_COMPRESSED_RGB8_ETC2, GR_GL_TEXTURE_2D);
+ }
+ return GrBackendFormat::MakeGL(GR_GL_COMPRESSED_ETC1_RGB8, GR_GL_TEXTURE_2D);
+ }
+ SK_ABORT("Invalid compression type");
+}
+
+GrSwizzle GrGLCaps::getTextureSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ const auto& info = this->getFormatInfo(format.asGLFormat());
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fTextureSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+GrSwizzle GrGLCaps::getOutputSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ const auto& info = this->getFormatInfo(format.asGLFormat());
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fOutputSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+
+#if GR_TEST_UTILS
+std::vector<GrCaps::TestFormatColorTypeCombination> GrGLCaps::getTestingCombinations() const {
+ std::vector<GrCaps::TestFormatColorTypeCombination> combos = {
+ { GrColorType::kAlpha_8,
+ GrBackendFormat::MakeGL(GR_GL_ALPHA8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kAlpha_8,
+ GrBackendFormat::MakeGL(GR_GL_R8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kBGR_565,
+ GrBackendFormat::MakeGL(GR_GL_RGB565, GR_GL_TEXTURE_2D) },
+ { GrColorType::kABGR_4444,
+ GrBackendFormat::MakeGL(GR_GL_RGBA4, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_8888,
+ GrBackendFormat::MakeGL(GR_GL_RGBA8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_8888_SRGB,
+ GrBackendFormat::MakeGL(GR_GL_SRGB8_ALPHA8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGB_888x,
+ GrBackendFormat::MakeGL(GR_GL_RGBA8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGB_888x,
+ GrBackendFormat::MakeGL(GR_GL_RGB8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGB_888x,
+ GrBackendFormat::MakeGL(GR_GL_COMPRESSED_RGB8_ETC2, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGB_888x,
+ GrBackendFormat::MakeGL(GR_GL_COMPRESSED_ETC1_RGB8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRG_88,
+ GrBackendFormat::MakeGL(GR_GL_RG8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_1010102,
+ GrBackendFormat::MakeGL(GR_GL_RGB10_A2, GR_GL_TEXTURE_2D) },
+ { GrColorType::kGray_8,
+ GrBackendFormat::MakeGL(GR_GL_LUMINANCE8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kGray_8,
+ GrBackendFormat::MakeGL(GR_GL_R8, GR_GL_TEXTURE_2D) },
+ { GrColorType::kAlpha_F16,
+ GrBackendFormat::MakeGL(GR_GL_R16F, GR_GL_TEXTURE_2D) },
+ { GrColorType::kAlpha_F16,
+ GrBackendFormat::MakeGL(GR_GL_LUMINANCE16F, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_F16,
+ GrBackendFormat::MakeGL(GR_GL_RGBA16F, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_F16_Clamped,
+ GrBackendFormat::MakeGL(GR_GL_RGBA16F, GR_GL_TEXTURE_2D) },
+ { GrColorType::kAlpha_16,
+ GrBackendFormat::MakeGL(GR_GL_R16, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRG_1616,
+ GrBackendFormat::MakeGL(GR_GL_RG16, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRGBA_16161616,
+ GrBackendFormat::MakeGL(GR_GL_RGBA16, GR_GL_TEXTURE_2D) },
+ { GrColorType::kRG_F16,
+ GrBackendFormat::MakeGL(GR_GL_RG16F, GR_GL_TEXTURE_2D) },
+ };
+
+ if (GR_IS_GR_GL(fStandard)) {
+ combos.push_back({ GrColorType::kBGRA_8888,
+ GrBackendFormat::MakeGL(GR_GL_RGBA8, GR_GL_TEXTURE_2D) });
+ } else {
+ SkASSERT(GR_IS_GR_GL_ES(fStandard) || GR_IS_GR_WEBGL(fStandard));
+
+ combos.push_back({ GrColorType::kBGRA_8888,
+ GrBackendFormat::MakeGL(GR_GL_BGRA8, GR_GL_TEXTURE_2D) });
+ }
+
+ return combos;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCaps.h b/gfx/skia/skia/src/gpu/gl/GrGLCaps.h
new file mode 100644
index 0000000000..ef10d76815
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCaps.h
@@ -0,0 +1,691 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLCaps_DEFINED
+#define GrGLCaps_DEFINED
+
+#include <functional>
+#include "include/private/GrGLTypesPriv.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTHash.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/gl/GrGLStencilAttachment.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+class GrGLContextInfo;
+class GrGLRenderTarget;
+
+/**
+ * Stores some capabilities of a GL context. Most are determined by the GL
+ * version and the extensions string. It also tracks formats that have passed
+ * the FBO completeness test.
+ */
+class GrGLCaps : public GrCaps {
+public:
+ typedef GrGLStencilAttachment::Format StencilFormat;
+
+ /**
+ * The type of MSAA for FBOs supported. Different extensions have different
+ * semantics of how / when a resolve is performed.
+ */
+ enum MSFBOType {
+ /**
+ * no support for MSAA FBOs
+ */
+ kNone_MSFBOType = 0,
+ /**
+ * OpenGL 3.0+, OpenGL ES 3.0+, GL_ARB_framebuffer_object,
+ * GL_CHROMIUM_framebuffer_multisample, GL_ANGLE_framebuffer_multisample,
+ * or GL_EXT_framebuffer_multisample
+ */
+ kStandard_MSFBOType,
+ /**
+ * GL_APPLE_framebuffer_multisample ES extension
+ */
+ kES_Apple_MSFBOType,
+ /**
+ * GL_IMG_multisampled_render_to_texture. This variation does not have MSAA renderbuffers.
+ * Instead the texture is multisampled when bound to the FBO and then resolved automatically
+ * when read. It also defines an alternate value for GL_MAX_SAMPLES (which we call
+ * GR_GL_MAX_SAMPLES_IMG).
+ */
+ kES_IMG_MsToTexture_MSFBOType,
+ /**
+ * GL_EXT_multisampled_render_to_texture. Same as the IMG one above but uses the standard
+ * GL_MAX_SAMPLES value.
+ */
+ kES_EXT_MsToTexture_MSFBOType,
+
+ kLast_MSFBOType = kES_EXT_MsToTexture_MSFBOType
+ };
+
+ enum BlitFramebufferFlags {
+ kNoSupport_BlitFramebufferFlag = 1 << 0,
+ kNoScalingOrMirroring_BlitFramebufferFlag = 1 << 1,
+ kResolveMustBeFull_BlitFrambufferFlag = 1 << 2,
+ kNoMSAADst_BlitFramebufferFlag = 1 << 3,
+ kNoFormatConversion_BlitFramebufferFlag = 1 << 4,
+ kNoFormatConversionForMSAASrc_BlitFramebufferFlag = 1 << 5,
+ kRectsMustMatchForMSAASrc_BlitFramebufferFlag = 1 << 6,
+ };
+
+ enum InvalidateFBType {
+ kNone_InvalidateFBType,
+ kDiscard_InvalidateFBType, //<! glDiscardFramebuffer()
+ kInvalidate_InvalidateFBType, //<! glInvalidateFramebuffer()
+
+ kLast_InvalidateFBType = kInvalidate_InvalidateFBType
+ };
+
+ enum MapBufferType {
+ kNone_MapBufferType,
+ kMapBuffer_MapBufferType, // glMapBuffer()
+ kMapBufferRange_MapBufferType, // glMapBufferRange()
+ kChromium_MapBufferType, // GL_CHROMIUM_map_sub
+
+ kLast_MapBufferType = kChromium_MapBufferType,
+ };
+
+ enum TransferBufferType {
+ kNone_TransferBufferType,
+ kPBO_TransferBufferType, // ARB_pixel_buffer_object
+ kChromium_TransferBufferType, // CHROMIUM_pixel_transfer_buffer_object
+
+ kLast_TransferBufferType = kChromium_TransferBufferType,
+ };
+
+ /**
+ * Initializes the GrGLCaps to the set of features supported in the current
+ * OpenGL context accessible via ctxInfo.
+ */
+ GrGLCaps(const GrContextOptions& contextOptions, const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* glInterface);
+
+ bool isFormatSRGB(const GrBackendFormat&) const override;
+ bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const override;
+
+ bool isFormatTexturableAndUploadable(GrColorType, const GrBackendFormat&) const override;
+ bool isFormatTexturable(const GrBackendFormat&) const override;
+ bool isFormatTexturable(GrGLFormat) const;
+
+ bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const override;
+ bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const override;
+ bool isFormatRenderable(GrGLFormat format, int sampleCount) const {
+ return sampleCount <= this->maxRenderTargetSampleCount(format);
+ }
+
+ int getRenderTargetSampleCount(int requestedCount,
+ const GrBackendFormat& format) const override {
+ return this->getRenderTargetSampleCount(requestedCount, format.asGLFormat());
+ }
+ int getRenderTargetSampleCount(int requestedCount, GrGLFormat) const;
+
+ int maxRenderTargetSampleCount(const GrBackendFormat& format) const override {
+ return this->maxRenderTargetSampleCount(format.asGLFormat());
+ }
+ int maxRenderTargetSampleCount(GrGLFormat) const;
+
+ size_t bytesPerPixel(GrGLFormat) const;
+ size_t bytesPerPixel(const GrBackendFormat&) const override;
+
+ bool isFormatCopyable(const GrBackendFormat&) const override;
+
+ bool canFormatBeFBOColorAttachment(GrGLFormat) const;
+
+ GrGLFormat getFormatFromColorType(GrColorType colorType) const {
+ int idx = static_cast<int>(colorType);
+ return fColorTypeToFormatTable[idx];
+ }
+
+ /**
+ * Gets the internal format to use with glTexImage...() and glTexStorage...(). May be sized or
+ * base depending upon the GL. Not applicable to compressed textures.
+ */
+ GrGLenum getTexImageOrStorageInternalFormat(GrGLFormat format) const {
+ return this->getFormatInfo(format).fInternalFormatForTexImageOrStorage;
+ }
+
+ /**
+ * Gets the external format and type to pass to glTexImage2D with nullptr to create an
+ * uninitialized texture. See getTexImageOrStorageInternalFormat() for the internal format.
+ */
+ void getTexImageExternalFormatAndType(GrGLFormat surfaceFormat, GrGLenum* externalFormat,
+ GrGLenum* externalType) const;
+
+ /**
+ * Given a src data color type and a color type interpretation for a texture of a given format
+ * this provides the external GL format and type to use with glTexSubImage2d. The color types
+ * should originate from supportedWritePixelsColorType().
+ */
+ void getTexSubImageExternalFormatAndType(GrGLFormat surfaceFormat, GrColorType surfaceColorType,
+ GrColorType memoryColorType, GrGLenum* externalFormat,
+ GrGLenum* externalType) const;
+
+ /**
+ * Gets the external format, type, and bytes per pixel to use when uploading zeros via
+ * glTexSubImage...() to clear the texture at creation.
+ */
+ void getTexSubImageZeroFormatTypeAndBpp(GrGLFormat format, GrGLenum* externalFormat,
+ GrGLenum* externalType, size_t* bpp) const;
+
+ void getReadPixelsFormat(GrGLFormat surfaceFormat, GrColorType surfaceColorType,
+ GrColorType memoryColorType, GrGLenum* externalFormat,
+ GrGLenum* externalType) const;
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed
+ * to be supported by the driver but are legal GLenum names given the GL
+ * version and extensions supported.
+ */
+ const SkTArray<StencilFormat, true>& stencilFormats() const {
+ return fStencilFormats;
+ }
+
+ bool formatSupportsTexStorage(GrGLFormat) const;
+
+ /**
+ * Gets the internal format to use with glRenderbufferStorageMultisample...(). May be sized or
+ * base depending upon the GL. Not applicable to compressed textures.
+ */
+ GrGLenum getRenderbufferInternalFormat(GrGLFormat format) const {
+ return this->getFormatInfo(format).fInternalFormatForRenderbuffer;
+ }
+
+ /**
+ * Gets the default external type to use with glTex[Sub]Image... when the data pointer is null.
+ */
+ GrGLenum getFormatDefaultExternalType(GrGLFormat format) const {
+ return this->getFormatInfo(format).fDefaultExternalType;
+ }
+
+ /**
+ * Has a stencil format index been found for the format (or we've found that no format works).
+ */
+ bool hasStencilFormatBeenDeterminedForFormat(GrGLFormat format) const {
+ return this->getFormatInfo(format).fStencilFormatIndex != FormatInfo::kUnknown_StencilIndex;
+ }
+
+ /**
+ * Gets the stencil format index for the format. This assumes
+ * hasStencilFormatBeenDeterminedForFormat has already been checked. Returns a value < 0 if
+ * no stencil format is supported with the format. Otherwise, returned index refers to the array
+ * returned by stencilFormats().
+ */
+ int getStencilFormatIndexForFormat(GrGLFormat format) const {
+ SkASSERT(this->hasStencilFormatBeenDeterminedForFormat(format));
+ return this->getFormatInfo(format).fStencilFormatIndex;
+ }
+
+ /**
+ * If index is >= 0 this records an index into stencilFormats() as the best stencil format for
+ * the format. If < 0 it records that the format has no supported stencil format index.
+ */
+ void setStencilFormatIndexForFormat(GrGLFormat, int index);
+
+ /**
+ * Reports the type of MSAA FBO support.
+ */
+ MSFBOType msFBOType() const { return fMSFBOType; }
+
+ /**
+ * Does the preferred MSAA FBO extension have MSAA renderbuffers?
+ */
+ bool usesMSAARenderBuffers() const {
+ return kNone_MSFBOType != fMSFBOType &&
+ kES_IMG_MsToTexture_MSFBOType != fMSFBOType &&
+ kES_EXT_MsToTexture_MSFBOType != fMSFBOType;
+ }
+
+ /**
+ * What functionality is supported by glBlitFramebuffer.
+ */
+ uint32_t blitFramebufferSupportFlags() const { return fBlitFramebufferFlags; }
+
+ /**
+ * Is the MSAA FBO extension one where the texture is multisampled when bound to an FBO and
+ * then implicitly resolved when read.
+ */
+ bool usesImplicitMSAAResolve() const {
+ return kES_IMG_MsToTexture_MSFBOType == fMSFBOType ||
+ kES_EXT_MsToTexture_MSFBOType == fMSFBOType;
+ }
+
+ InvalidateFBType invalidateFBType() const { return fInvalidateFBType; }
+
+ /// What type of buffer mapping is supported?
+ MapBufferType mapBufferType() const { return fMapBufferType; }
+
+ /// What type of transfer buffer is supported?
+ TransferBufferType transferBufferType() const { return fTransferBufferType; }
+
+ /// The maximum number of fragment uniform vectors (GLES has min. 16).
+ int maxFragmentUniformVectors() const { return fMaxFragmentUniformVectors; }
+
+ /// Is there support for GL_PACK_REVERSE_ROW_ORDER
+ bool packFlipYSupport() const { return fPackFlipYSupport; }
+
+ /// Is there support for texture parameter GL_TEXTURE_USAGE
+ bool textureUsageSupport() const { return fTextureUsageSupport; }
+
+ /// Is GL_ARB_IMAGING supported
+ bool imagingSupport() const { return fImagingSupport; }
+
+ /// Is there support for Vertex Array Objects?
+ bool vertexArrayObjectSupport() const { return fVertexArrayObjectSupport; }
+
+ /// Is there support for GL_KHR_debug?
+ bool debugSupport() const { return fDebugSupport; }
+
+ /// Is there support for ES2 compatability?
+ bool ES2CompatibilitySupport() const { return fES2CompatibilitySupport; }
+
+ /// Is there support for glDraw*Instanced?
+ bool drawInstancedSupport() const { return fDrawInstancedSupport; }
+
+ /// Is there support for glDraw*Indirect? Note that the baseInstance fields of indirect draw
+ /// commands cannot be used unless we have base instance support.
+ bool drawIndirectSupport() const { return fDrawIndirectSupport; }
+
+ /// Is there support for glMultiDraw*Indirect? Note that the baseInstance fields of indirect
+ /// draw commands cannot be used unless we have base instance support.
+ bool multiDrawIndirectSupport() const { return fMultiDrawIndirectSupport; }
+
+ /// Is there support for glDrawRangeElements?
+ bool drawRangeElementsSupport() const { return fDrawRangeElementsSupport; }
+
+ /// Are the baseInstance fields supported in indirect draw commands?
+ bool baseInstanceSupport() const { return fBaseInstanceSupport; }
+
+ /// Use indices or vertices in CPU arrays rather than VBOs for dynamic content.
+ bool useNonVBOVertexAndIndexDynamicData() const { return fUseNonVBOVertexAndIndexDynamicData; }
+
+ SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override;
+
+ SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const override;
+
+ bool isCoreProfile() const { return fIsCoreProfile; }
+
+ bool bindFragDataLocationSupport() const { return fBindFragDataLocationSupport; }
+
+ bool bindUniformLocationSupport() const { return fBindUniformLocationSupport; }
+
+ /// Are textures with GL_TEXTURE_RECTANGLE type supported.
+ bool rectangleTextureSupport() const { return fRectangleTextureSupport; }
+
+ bool mipMapLevelAndLodControlSupport() const { return fMipMapLevelAndLodControlSupport; }
+
+ bool doManualMipmapping() const { return fDoManualMipmapping; }
+
+ void onDumpJSON(SkJSONWriter*) const override;
+
+ bool rgba8888PixelsOpsAreSlow() const { return fRGBA8888PixelsOpsAreSlow; }
+ bool partialFBOReadIsSlow() const { return fPartialFBOReadIsSlow; }
+ bool rgbaToBgraReadbackConversionsAreSlow() const {
+ return fRGBAToBGRAReadbackConversionsAreSlow;
+ }
+
+ bool useBufferDataNullHint() const { return fUseBufferDataNullHint; }
+
+ // Certain Intel GPUs on Mac fail to clear if the glClearColor is made up of only 1s and 0s.
+ bool clearToBoundaryValuesIsBroken() const { return fClearToBoundaryValuesIsBroken; }
+
+ /// glClearTex(Sub)Image support
+ bool clearTextureSupport() const { return fClearTextureSupport; }
+
+ // Adreno/MSAA drops a draw on the imagefiltersbase GM if the base vertex param to
+ // glDrawArrays is nonzero.
+ // https://bugs.chromium.org/p/skia/issues/detail?id=6650
+ bool drawArraysBaseVertexIsBroken() const { return fDrawArraysBaseVertexIsBroken; }
+
+ // If true then we must use an intermediate surface to perform partial updates to unorm textures
+ // that have ever been bound to a FBO.
+ bool disallowTexSubImageForUnormConfigTexturesEverBoundToFBO() const {
+ return fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO;
+ }
+
+ // Use an intermediate surface to write pixels (full or partial overwrite) to into a texture
+ // that is bound to an FBO.
+ bool useDrawInsteadOfAllRenderTargetWrites() const {
+ return fUseDrawInsteadOfAllRenderTargetWrites;
+ }
+
+ // At least some Adreno 3xx drivers draw lines incorrectly after drawing non-lines. Toggling
+ // face culling on and off seems to resolve this.
+ bool requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() const {
+ return fRequiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines;
+ }
+
+ // Some Adreno drivers refuse to ReadPixels from an MSAA buffer that has stencil attached.
+ bool detachStencilFromMSAABuffersBeforeReadPixels() const {
+ return fDetachStencilFromMSAABuffersBeforeReadPixels;
+ }
+
+ // Older Android versions seem to have an issue with setting GL_TEXTURE_BASE_LEVEL or
+ // GL_TEXTURE_MAX_LEVEL for GL_TEXTURE_EXTERNAL_OES textures.
+ bool dontSetBaseOrMaxLevelForExternalTextures() const {
+ return fDontSetBaseOrMaxLevelForExternalTextures;
+ }
+
+ // PowerVRGX6250 drops every pixel if we modify the sample mask while color writes are disabled.
+ bool neverDisableColorWrites() const { return fNeverDisableColorWrites; }
+
+ // Returns the observed maximum number of instances the driver can handle in a single draw call
+ // without crashing, or 'pendingInstanceCount' if this workaround is not necessary.
+ // NOTE: the return value may be larger than pendingInstanceCount.
+ int maxInstancesPerDrawWithoutCrashing(int pendingInstanceCount) const {
+ return (fMaxInstancesPerDrawWithoutCrashing)
+ ? fMaxInstancesPerDrawWithoutCrashing : pendingInstanceCount;
+ }
+
+ bool canCopyTexSubImage(GrGLFormat dstFormat, bool dstHasMSAARenderBuffer,
+ const GrTextureType* dstTypeIfTexture,
+ GrGLFormat srcFormat, bool srcHasMSAARenderBuffer,
+ const GrTextureType* srcTypeIfTexture) const;
+ bool canCopyAsBlit(GrGLFormat dstFormat, int dstSampleCnt,
+ const GrTextureType* dstTypeIfTexture,
+ GrGLFormat srcFormat, int srcSampleCnt,
+ const GrTextureType* srcTypeIfTexture,
+ const SkRect& srcBounds, bool srcBoundsExact,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const;
+ bool canCopyAsDraw(GrGLFormat dstFormat, bool srcIsTexturable) const;
+
+ DstCopyRestrictions getDstCopyRestrictions(const GrRenderTargetProxy* src,
+ GrColorType) const override;
+
+ bool programBinarySupport() const { return fProgramBinarySupport; }
+ bool programParameterSupport() const { return fProgramParameterSupport; }
+
+ bool samplerObjectSupport() const { return fSamplerObjectSupport; }
+
+ bool tiledRenderingSupport() const { return fTiledRenderingSupport; }
+
+ bool fbFetchRequiresEnablePerSample() const { return fFBFetchRequiresEnablePerSample; }
+
+ /* Is there support for enabling/disabling sRGB writes for sRGB-capable color buffers? */
+ bool srgbWriteControl() const { return fSRGBWriteControl; }
+
+ GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat&,
+ bool isAlphaChannel) const override;
+
+ GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override;
+
+ GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const override;
+ GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const override;
+
+#if GR_TEST_UTILS
+ GrGLStandard standard() const { return fStandard; }
+
+ std::vector<TestFormatColorTypeCombination> getTestingCombinations() const override;
+#endif
+
+private:
+ enum ExternalFormatUsage {
+ kTexImage_ExternalFormatUsage,
+ kReadPixels_ExternalFormatUsage,
+ };
+ void getExternalFormat(GrGLFormat surfaceFormat, GrColorType surfaceColorType,
+ GrColorType memoryColorType, ExternalFormatUsage usage,
+ GrGLenum* externalFormat, GrGLenum* externalType) const;
+
+ void init(const GrContextOptions&, const GrGLContextInfo&, const GrGLInterface*);
+ void initGLSL(const GrGLContextInfo&, const GrGLInterface*);
+ bool hasPathRenderingSupport(const GrGLContextInfo&, const GrGLInterface*);
+
+ struct FormatWorkarounds {
+ bool fDisableSRGBRenderWithMSAAForMacAMD = false;
+ bool fDisableRGBA16FTexStorageForCrBug1008003 = false;
+ bool fDisableBGRATextureStorageForIntelWindowsES = false;
+ bool fDisableRGB8ForMali400 = false;
+ bool fDisableLuminance16F = false;
+ bool fDontDisableTexStorageOnAndroid = false;
+ };
+
+ void applyDriverCorrectnessWorkarounds(const GrGLContextInfo&, const GrContextOptions&,
+ GrShaderCaps*, FormatWorkarounds*);
+
+ void onApplyOptionsOverrides(const GrContextOptions& options) override;
+
+ bool onIsWindowRectanglesSupportedForRT(const GrBackendRenderTarget&) const override;
+
+ void initFSAASupport(const GrContextOptions& contextOptions, const GrGLContextInfo&,
+ const GrGLInterface*);
+ void initBlendEqationSupport(const GrGLContextInfo&);
+ void initStencilSupport(const GrGLContextInfo&);
+ // This must be called after initFSAASupport().
+ void initFormatTable(const GrGLContextInfo&, const GrGLInterface*, const FormatWorkarounds&);
+ void setupSampleCounts(const GrGLContextInfo&, const GrGLInterface*);
+ bool onSurfaceSupportsWritePixels(const GrSurface*) const override;
+ bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const override;
+ GrBackendFormat onGetDefaultBackendFormat(GrColorType, GrRenderable) const override;
+ GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat&, GrColorType) const override;
+ bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const override;
+
+ SupportedRead onSupportedReadPixelsColorType(GrColorType, const GrBackendFormat&,
+ GrColorType) const override;
+
+ GrGLStandard fStandard;
+
+ SkTArray<StencilFormat, true> fStencilFormats;
+
+ int fMaxFragmentUniformVectors;
+
+ MSFBOType fMSFBOType;
+ InvalidateFBType fInvalidateFBType;
+ MapBufferType fMapBufferType;
+ TransferBufferType fTransferBufferType;
+
+ bool fPackFlipYSupport : 1;
+ bool fTextureUsageSupport : 1;
+ bool fImagingSupport : 1;
+ bool fVertexArrayObjectSupport : 1;
+ bool fDebugSupport : 1;
+ bool fES2CompatibilitySupport : 1;
+ bool fDrawInstancedSupport : 1;
+ bool fDrawIndirectSupport : 1;
+ bool fDrawRangeElementsSupport : 1;
+ bool fMultiDrawIndirectSupport : 1;
+ bool fBaseInstanceSupport : 1;
+ bool fUseNonVBOVertexAndIndexDynamicData : 1;
+ bool fIsCoreProfile : 1;
+ bool fBindFragDataLocationSupport : 1;
+ bool fRGBA8888PixelsOpsAreSlow : 1;
+ bool fPartialFBOReadIsSlow : 1;
+ bool fBindUniformLocationSupport : 1;
+ bool fRectangleTextureSupport : 1;
+ bool fMipMapLevelAndLodControlSupport : 1;
+ bool fRGBAToBGRAReadbackConversionsAreSlow : 1;
+ bool fUseBufferDataNullHint : 1;
+ bool fClearTextureSupport : 1;
+ bool fProgramBinarySupport : 1;
+ bool fProgramParameterSupport : 1;
+ bool fSamplerObjectSupport : 1;
+ bool fTiledRenderingSupport : 1;
+ bool fFBFetchRequiresEnablePerSample : 1;
+ bool fSRGBWriteControl : 1;
+
+ // Driver workarounds
+ bool fDoManualMipmapping : 1;
+ bool fClearToBoundaryValuesIsBroken : 1;
+ bool fDrawArraysBaseVertexIsBroken : 1;
+ bool fDisallowTexSubImageForUnormConfigTexturesEverBoundToFBO : 1;
+ bool fUseDrawInsteadOfAllRenderTargetWrites : 1;
+ bool fRequiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines : 1;
+ bool fDetachStencilFromMSAABuffersBeforeReadPixels : 1;
+ bool fDontSetBaseOrMaxLevelForExternalTextures : 1;
+ bool fNeverDisableColorWrites : 1;
+ int fMaxInstancesPerDrawWithoutCrashing;
+
+ uint32_t fBlitFramebufferFlags;
+
+ struct ReadPixelsFormat {
+ ReadPixelsFormat() : fFormat(0), fType(0) {}
+ GrGLenum fFormat;
+ GrGLenum fType;
+ };
+
+ /** Number type of the components (with out considering number of bits.) */
+ enum class FormatType {
+ kUnknown,
+ kNormalizedFixedPoint,
+ kFloat,
+ };
+
+ // ColorTypeInfo for a specific format
+ struct ColorTypeInfo {
+ GrColorType fColorType = GrColorType::kUnknown;
+ enum {
+ kUploadData_Flag = 0x1,
+ // Does Ganesh itself support rendering to this colorType & format pair. Renderability
+ // still additionally depends on if the format can be an FBO color attachment.
+ kRenderable_Flag = 0x2,
+ };
+ uint32_t fFlags = 0;
+
+ GrSwizzle fTextureSwizzle;
+ GrSwizzle fOutputSwizzle;
+
+ struct ExternalIOFormats {
+ GrColorType fColorType = GrColorType::kUnknown;
+
+ /** The external format and type are to be used when uploading/downloading data using
+ data of fColorType and uploading to a texture of a given GrGLFormat and its
+ intended GrColorType. The fExternalTexImageFormat is the format to use for TexImage
+ calls. The fExternalReadFormat is used when calling ReadPixels. If either is zero
+ that signals that either TexImage or ReadPixels is not supported for the combination
+ of format and color types. */
+ GrGLenum fExternalType = 0;
+ GrGLenum fExternalTexImageFormat = 0;
+ GrGLenum fExternalReadFormat = 0;
+ };
+
+ GrGLenum externalFormat(GrColorType externalColorType, ExternalFormatUsage usage) const {
+ for (int i = 0; i < fExternalIOFormatCount; ++i) {
+ if (fExternalIOFormats[i].fColorType == externalColorType) {
+ if (usage == kTexImage_ExternalFormatUsage) {
+ return fExternalIOFormats[i].fExternalTexImageFormat;
+ } else {
+ SkASSERT(usage == kReadPixels_ExternalFormatUsage);
+ return fExternalIOFormats[i].fExternalReadFormat;
+ }
+ }
+ }
+ return 0;
+ }
+
+ GrGLenum externalType(GrColorType externalColorType) const {
+ for (int i = 0; i < fExternalIOFormatCount; ++i) {
+ if (fExternalIOFormats[i].fColorType == externalColorType) {
+ return fExternalIOFormats[i].fExternalType;
+ }
+ }
+ return 0;
+ }
+
+ std::unique_ptr<ExternalIOFormats[]> fExternalIOFormats;
+ int fExternalIOFormatCount = 0;
+ };
+
+ struct FormatInfo {
+ uint32_t colorTypeFlags(GrColorType colorType) const {
+ for (int i = 0; i < fColorTypeInfoCount; ++i) {
+ if (fColorTypeInfos[i].fColorType == colorType) {
+ return fColorTypeInfos[i].fFlags;
+ }
+ }
+ return 0;
+ }
+
+ GrGLenum externalFormat(GrColorType surfaceColorType, GrColorType externalColorType,
+ ExternalFormatUsage usage) const {
+ for (int i = 0; i < fColorTypeInfoCount; ++i) {
+ if (fColorTypeInfos[i].fColorType == surfaceColorType) {
+ return fColorTypeInfos[i].externalFormat(externalColorType, usage);
+ }
+ }
+ return 0;
+ }
+
+ GrGLenum externalType(GrColorType surfaceColorType, GrColorType externalColorType) const {
+ for (int i = 0; i < fColorTypeInfoCount; ++i) {
+ if (fColorTypeInfos[i].fColorType == surfaceColorType) {
+ return fColorTypeInfos[i].externalType(externalColorType);
+ }
+ }
+ return 0;
+ }
+
+ enum {
+ kTexturable_Flag = 0x1,
+ /** kFBOColorAttachment means that even if the format cannot be a GrRenderTarget, we can
+ still attach it to a FBO for blitting or reading pixels. */
+ kFBOColorAttachment_Flag = 0x2,
+ kFBOColorAttachmentWithMSAA_Flag = 0x4,
+ kUseTexStorage_Flag = 0x8,
+ };
+ uint32_t fFlags = 0;
+
+ FormatType fFormatType = FormatType::kUnknown;
+
+ // Not defined for uncompressed formats. Passed to glCompressedTexImage...
+ GrGLenum fCompressedInternalFormat = 0;
+
+ // Value to uses as the "internalformat" argument to glTexImage or glTexStorage. It is
+ // initialized in coordination with the presence/absence of the kUseTexStorage flag. In
+ // other words, it is only guaranteed to be compatible with glTexImage if the flag is not
+ // set and or with glTexStorage if the flag is set.
+ GrGLenum fInternalFormatForTexImageOrStorage = 0;
+
+ // Value to uses as the "internalformat" argument to glRenderbufferStorageMultisample...
+ GrGLenum fInternalFormatForRenderbuffer = 0;
+
+ // Default values to use along with fInternalFormatForTexImageOrStorage for function
+ // glTexImage2D when not input providing data (passing nullptr). Not defined for compressed
+ // formats. Also used to upload zeros to initially clear a texture.
+ GrGLenum fDefaultExternalFormat = 0;
+ GrGLenum fDefaultExternalType = 0;
+
+ // This value is only valid for regular formats. Compressed formats will be 0.
+ GrGLenum fBytesPerPixel = 0;
+
+ enum {
+ // This indicates that a stencil format has not yet been determined for the config.
+ kUnknown_StencilIndex = -1,
+ // This indicates that there is no supported stencil format for the config.
+ kUnsupported_StencilFormatIndex = -2
+ };
+
+ // Index fStencilFormats.
+ int fStencilFormatIndex = kUnknown_StencilIndex;
+
+ SkTDArray<int> fColorSampleCounts;
+
+ std::unique_ptr<ColorTypeInfo[]> fColorTypeInfos;
+ int fColorTypeInfoCount = 0;
+ };
+
+ FormatInfo fFormatTable[kGrGLFormatCount];
+
+ FormatInfo& getFormatInfo(GrGLFormat format) { return fFormatTable[static_cast<int>(format)]; }
+ const FormatInfo& getFormatInfo(GrGLFormat format) const {
+ return fFormatTable[static_cast<int>(format)];
+ }
+
+ GrGLFormat fColorTypeToFormatTable[kGrColorTypeCnt];
+ void setColorTypeFormat(GrColorType, GrGLFormat);
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp b/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp
new file mode 100644
index 0000000000..14a40704f6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLContext.h"
+#include "src/gpu/gl/GrGLGLSL.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#ifdef SK_BUILD_FOR_ANDROID
+#include <sys/system_properties.h>
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrGLContext> GrGLContext::Make(sk_sp<const GrGLInterface> interface,
+ const GrContextOptions& options) {
+ if (!interface->validate()) {
+ return nullptr;
+ }
+
+ const GrGLubyte* verUByte;
+ GR_GL_CALL_RET(interface.get(), verUByte, GetString(GR_GL_VERSION));
+ const char* ver = reinterpret_cast<const char*>(verUByte);
+
+ const GrGLubyte* rendererUByte;
+ GR_GL_CALL_RET(interface.get(), rendererUByte, GetString(GR_GL_RENDERER));
+ const char* renderer = reinterpret_cast<const char*>(rendererUByte);
+
+ ConstructorArgs args;
+ args.fGLVersion = GrGLGetVersionFromString(ver);
+ if (GR_GL_INVALID_VER == args.fGLVersion) {
+ return nullptr;
+ }
+
+ if (!GrGLGetGLSLGeneration(interface.get(), &args.fGLSLGeneration)) {
+ return nullptr;
+ }
+
+ args.fVendor = GrGLGetVendor(interface.get());
+
+ args.fRenderer = GrGLGetRendererFromStrings(renderer, interface->fExtensions);
+
+ GrGLGetANGLEInfoFromString(renderer, &args.fANGLEBackend, &args.fANGLEVendor,
+ &args.fANGLERenderer);
+
+ /*
+ * Qualcomm drivers for the 3xx series have a horrendous bug with some drivers. Though they
+ * claim to support GLES 3.00, some perfectly valid GLSL300 shaders will only compile with
+ * #version 100, and will fail to compile with #version 300 es. In the long term, we
+ * need to lock this down to a specific driver version.
+ * ?????/2019 - Qualcomm has fixed this for Android O+ devices (API 26+)
+ * ?????/2015 - This bug is still present in Lollipop pre-mr1
+ * 06/18/2015 - This bug does not affect the nexus 6 (which has an Adreno 4xx).
+ */
+#ifdef SK_BUILD_FOR_ANDROID
+ if (!options.fDisableDriverCorrectnessWorkarounds &&
+ kAdreno3xx_GrGLRenderer == args.fRenderer) {
+ char androidAPIVersion[PROP_VALUE_MAX];
+ int strLength = __system_property_get("ro.build.version.sdk", androidAPIVersion);
+ if (strLength == 0 || atoi(androidAPIVersion) < 26) {
+ args.fGLSLGeneration = k110_GrGLSLGeneration;
+ }
+ }
+#endif
+
+ // Many ES3 drivers only advertise the ES2 image_external extension, but support the _essl3
+ // extension, and require that it be enabled to work with ESSL3. Other devices require the ES2
+ // extension to be enabled, even when using ESSL3. Some devices appear to only support the ES2
+ // extension. As an extreme (optional) solution, we can fallback to using ES2 shading language
+ // if we want to prioritize external texture support. skbug.com/7713
+ if (GR_IS_GR_GL_ES(interface->fStandard) &&
+ options.fPreferExternalImagesOverES3 &&
+ !options.fDisableDriverCorrectnessWorkarounds &&
+ interface->hasExtension("GL_OES_EGL_image_external") &&
+ args.fGLSLGeneration >= k330_GrGLSLGeneration &&
+ !interface->hasExtension("GL_OES_EGL_image_external_essl3") &&
+ !interface->hasExtension("OES_EGL_image_external_essl3")) {
+ args.fGLSLGeneration = k110_GrGLSLGeneration;
+ }
+
+ GrGLGetDriverInfo(interface->fStandard, args.fVendor, renderer, ver,
+ &args.fDriver, &args.fDriverVersion);
+
+ args.fContextOptions = &options;
+ args.fInterface = std::move(interface);
+
+ return std::unique_ptr<GrGLContext>(new GrGLContext(std::move(args)));
+}
+
+GrGLContext::~GrGLContext() {
+ delete fCompiler;
+}
+
+SkSL::Compiler* GrGLContext::compiler() const {
+ if (!fCompiler) {
+ fCompiler = new SkSL::Compiler();
+ }
+ return fCompiler;
+}
+
+GrGLContextInfo::GrGLContextInfo(ConstructorArgs&& args) {
+ fInterface = std::move(args.fInterface);
+ fGLVersion = args.fGLVersion;
+ fGLSLGeneration = args.fGLSLGeneration;
+ fVendor = args.fVendor;
+ fRenderer = args.fRenderer;
+ fDriver = args.fDriver;
+ fDriverVersion = args.fDriverVersion;
+ fANGLEBackend = args.fANGLEBackend;
+ fANGLEVendor = args.fANGLEVendor;
+ fANGLERenderer = args.fANGLERenderer;
+
+ fGLCaps = sk_make_sp<GrGLCaps>(*args.fContextOptions, *this, fInterface.get());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLContext.h b/gfx/skia/skia/src/gpu/gl/GrGLContext.h
new file mode 100644
index 0000000000..5ea81781fe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLContext.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLContext_DEFINED
+#define GrGLContext_DEFINED
+
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLCaps.h"
+#include "src/gpu/gl/GrGLUtil.h"
+#include "src/gpu/glsl/GrGLSL.h"
+
+struct GrContextOptions;
+namespace SkSL {
+ class Compiler;
+}
+
+/**
+ * Encapsulates information about an OpenGL context including the OpenGL
+ * version, the GrGLStandard type of the context, and GLSL version.
+ */
+class GrGLContextInfo {
+public:
+ GrGLContextInfo(const GrGLContextInfo&) = delete;
+ GrGLContextInfo& operator=(const GrGLContextInfo&) = delete;
+
+ virtual ~GrGLContextInfo() {}
+
+ GrGLStandard standard() const { return fInterface->fStandard; }
+ GrGLVersion version() const { return fGLVersion; }
+ GrGLSLGeneration glslGeneration() const { return fGLSLGeneration; }
+ GrGLVendor vendor() const { return fVendor; }
+ GrGLRenderer renderer() const { return fRenderer; }
+ GrGLANGLEBackend angleBackend() const { return fANGLEBackend; }
+ GrGLANGLEVendor angleVendor() const { return fANGLEVendor; }
+ GrGLANGLERenderer angleRenderer() const { return fANGLERenderer; }
+ /** What driver is running our GL implementation? This is not necessarily related to the vendor.
+ (e.g. Intel GPU being driven by Mesa) */
+ GrGLDriver driver() const { return fDriver; }
+ GrGLDriverVersion driverVersion() const { return fDriverVersion; }
+ const GrGLCaps* caps() const { return fGLCaps.get(); }
+ GrGLCaps* caps() { return fGLCaps.get(); }
+ bool hasExtension(const char* ext) const {
+ return fInterface->hasExtension(ext);
+ }
+
+ const GrGLExtensions& extensions() const { return fInterface->fExtensions; }
+
+protected:
+ struct ConstructorArgs {
+ sk_sp<const GrGLInterface> fInterface;
+ GrGLVersion fGLVersion;
+ GrGLSLGeneration fGLSLGeneration;
+ GrGLVendor fVendor;
+ GrGLRenderer fRenderer;
+ GrGLDriver fDriver;
+ GrGLDriverVersion fDriverVersion;
+ GrGLANGLEBackend fANGLEBackend;
+ GrGLANGLEVendor fANGLEVendor;
+ GrGLANGLERenderer fANGLERenderer;
+ const GrContextOptions* fContextOptions;
+ };
+
+ GrGLContextInfo(ConstructorArgs&&);
+
+ sk_sp<const GrGLInterface> fInterface;
+ GrGLVersion fGLVersion;
+ GrGLSLGeneration fGLSLGeneration;
+ GrGLVendor fVendor;
+ GrGLRenderer fRenderer;
+ GrGLDriver fDriver;
+ GrGLDriverVersion fDriverVersion;
+ GrGLANGLEBackend fANGLEBackend;
+ GrGLANGLEVendor fANGLEVendor;
+ GrGLANGLERenderer fANGLERenderer;
+ sk_sp<GrGLCaps> fGLCaps;
+};
+
+/**
+ * Extension of GrGLContextInfo that also provides access to GrGLInterface and SkSL::Compiler.
+ */
+class GrGLContext : public GrGLContextInfo {
+public:
+ /**
+ * Creates a GrGLContext from a GrGLInterface and the currently
+ * bound OpenGL context accessible by the GrGLInterface.
+ */
+ static std::unique_ptr<GrGLContext> Make(sk_sp<const GrGLInterface>, const GrContextOptions&);
+
+ const GrGLInterface* interface() const { return fInterface.get(); }
+
+ SkSL::Compiler* compiler() const;
+
+ ~GrGLContext() override;
+
+private:
+ GrGLContext(ConstructorArgs&& args) : INHERITED(std::move(args)), fCompiler(nullptr) {}
+
+ mutable SkSL::Compiler* fCompiler;
+
+ typedef GrGLContextInfo INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLDefines.h b/gfx/skia/skia/src/gpu/gl/GrGLDefines.h
new file mode 100644
index 0000000000..1a6d6c8058
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLDefines.h
@@ -0,0 +1,1112 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLDefines_DEFINED
+#define GrGLDefines_DEFINED
+
+/* Profiles */
+#define GR_GL_CONTEXT_PROFILE_MASK 0x9126
+#define GR_GL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define GR_GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+
+// The following constants consist of the intersection of GL constants
+// exported by GLES 1.0, GLES 2.0, and desktop GL required by the system.
+
+#define GR_GL_DEPTH_BUFFER_BIT 0x00000100
+#define GR_GL_STENCIL_BUFFER_BIT 0x00000400
+#define GR_GL_COLOR_BUFFER_BIT 0x00004000
+
+/* Boolean */
+#define GR_GL_FALSE 0
+#define GR_GL_TRUE 1
+
+/* BeginMode */
+#define GR_GL_POINTS 0x0000
+#define GR_GL_LINES 0x0001
+#define GR_GL_LINE_LOOP 0x0002
+#define GR_GL_LINE_STRIP 0x0003
+#define GR_GL_TRIANGLES 0x0004
+#define GR_GL_TRIANGLE_STRIP 0x0005
+#define GR_GL_TRIANGLE_FAN 0x0006
+
+/* AlphaFunction (not supported in ES20) */
+/* GL_NEVER */
+/* GL_LESS */
+/* GL_EQUAL */
+/* GL_LEQUAL */
+/* GL_GREATER */
+/* GL_NOTEQUAL */
+/* GL_GEQUAL */
+/* GL_ALWAYS */
+
+/* Basic OpenGL blend equations */
+#define GR_GL_FUNC_ADD 0x8006
+#define GR_GL_FUNC_SUBTRACT 0x800A
+#define GR_GL_FUNC_REVERSE_SUBTRACT 0x800B
+
+/* GL_KHR_blend_equation_advanced */
+#define GR_GL_SCREEN 0x9295
+#define GR_GL_OVERLAY 0x9296
+#define GR_GL_DARKEN 0x9297
+#define GR_GL_LIGHTEN 0x9298
+#define GR_GL_COLORDODGE 0x9299
+#define GR_GL_COLORBURN 0x929A
+#define GR_GL_HARDLIGHT 0x929B
+#define GR_GL_SOFTLIGHT 0x929C
+#define GR_GL_DIFFERENCE 0x929E
+#define GR_GL_EXCLUSION 0x92A0
+#define GR_GL_MULTIPLY 0x9294
+#define GR_GL_HSL_HUE 0x92AD
+#define GR_GL_HSL_SATURATION 0x92AE
+#define GR_GL_HSL_COLOR 0x92AF
+#define GR_GL_HSL_LUMINOSITY 0x92B0
+
+/* BlendingFactorDest */
+#define GR_GL_ZERO 0
+#define GR_GL_ONE 1
+#define GR_GL_SRC_COLOR 0x0300
+#define GR_GL_ONE_MINUS_SRC_COLOR 0x0301
+#define GR_GL_SRC_ALPHA 0x0302
+#define GR_GL_ONE_MINUS_SRC_ALPHA 0x0303
+#define GR_GL_DST_ALPHA 0x0304
+#define GR_GL_ONE_MINUS_DST_ALPHA 0x0305
+
+/* BlendingFactorSrc */
+/* GL_ZERO */
+/* GL_ONE */
+#define GR_GL_DST_COLOR 0x0306
+#define GR_GL_ONE_MINUS_DST_COLOR 0x0307
+#define GR_GL_SRC_ALPHA_SATURATE 0x0308
+/* GL_SRC_ALPHA */
+/* GL_ONE_MINUS_SRC_ALPHA */
+/* GL_DST_ALPHA */
+/* GL_ONE_MINUS_DST_ALPHA */
+
+/* ExtendedBlendFactors */
+#define GR_GL_SRC1_COLOR 0x88F9
+#define GR_GL_ONE_MINUS_SRC1_COLOR 0x88FA
+/* GL_SRC1_ALPHA */
+#define GR_GL_ONE_MINUS_SRC1_ALPHA 0x88FB
+
+/* Separate Blend Functions */
+#define GR_GL_BLEND_DST_RGB 0x80C8
+#define GR_GL_BLEND_SRC_RGB 0x80C9
+#define GR_GL_BLEND_DST_ALPHA 0x80CA
+#define GR_GL_BLEND_SRC_ALPHA 0x80CB
+#define GR_GL_CONSTANT_COLOR 0x8001
+#define GR_GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GR_GL_CONSTANT_ALPHA 0x8003
+#define GR_GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GR_GL_BLEND_COLOR 0x8005
+
+/* Buffer Objects */
+#define GR_GL_ARRAY_BUFFER 0x8892
+#define GR_GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define GR_GL_DRAW_INDIRECT_BUFFER 0x8F3F
+#define GR_GL_TEXTURE_BUFFER 0x8C2A
+#define GR_GL_ARRAY_BUFFER_BINDING 0x8894
+#define GR_GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
+#define GR_GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43
+#define GR_GL_PIXEL_PACK_BUFFER 0x88EB
+#define GR_GL_PIXEL_UNPACK_BUFFER 0x88EC
+
+#define GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM 0x78EC
+#define GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM 0x78ED
+
+#define GR_GL_STREAM_DRAW 0x88E0
+#define GR_GL_STREAM_READ 0x88E1
+#define GR_GL_STATIC_DRAW 0x88E4
+#define GR_GL_STATIC_READ 0x88E5
+#define GR_GL_DYNAMIC_DRAW 0x88E8
+#define GR_GL_DYNAMIC_READ 0x88E9
+
+#define GR_GL_BUFFER_SIZE 0x8764
+#define GR_GL_BUFFER_USAGE 0x8765
+
+#define GR_GL_CURRENT_VERTEX_ATTRIB 0x8626
+
+/* CullFaceMode */
+#define GR_GL_FRONT 0x0404
+#define GR_GL_BACK 0x0405
+#define GR_GL_FRONT_AND_BACK 0x0408
+
+/* DepthFunction */
+/* GL_NEVER */
+/* GL_LESS */
+/* GL_EQUAL */
+/* GL_LEQUAL */
+/* GL_GREATER */
+/* GL_NOTEQUAL */
+/* GL_GEQUAL */
+/* GL_ALWAYS */
+
+/* EnableCap */
+#define GR_GL_TEXTURE_NONE 0x0000
+#define GR_GL_TEXTURE_2D 0x0DE1
+#define GR_GL_CULL_FACE 0x0B44
+#define GR_GL_BLEND 0x0BE2
+#define GR_GL_DITHER 0x0BD0
+#define GR_GL_STENCIL_TEST 0x0B90
+#define GR_GL_DEPTH_TEST 0x0B71
+#define GR_GL_SCISSOR_TEST 0x0C11
+#define GR_GL_POLYGON_OFFSET_FILL 0x8037
+#define GR_GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GR_GL_SAMPLE_COVERAGE 0x80A0
+#define GR_GL_POLYGON_OFFSET_FILL 0x8037
+#define GR_GL_POLYGON_SMOOTH 0x0B41
+#define GR_GL_POLYGON_STIPPLE 0x0B42
+#define GR_GL_COLOR_LOGIC_OP 0x0BF2
+#define GR_GL_COLOR_TABLE 0x80D0
+#define GR_GL_INDEX_LOGIC_OP 0x0BF1
+#define GR_GL_VERTEX_PROGRAM_POINT_SIZE 0x8642
+#define GR_GL_LINE_STIPPLE 0x0B24
+#define GR_GL_FRAMEBUFFER_SRGB 0x8DB9
+#define GR_GL_SHADER_PIXEL_LOCAL_STORAGE 0x8F64
+#define GR_GL_SAMPLE_SHADING 0x8C36
+
+/* ErrorCode */
+#define GR_GL_NO_ERROR 0
+#define GR_GL_INVALID_ENUM 0x0500
+#define GR_GL_INVALID_VALUE 0x0501
+#define GR_GL_INVALID_OPERATION 0x0502
+#define GR_GL_OUT_OF_MEMORY 0x0505
+#define GR_GL_CONTEXT_LOST 0x300E // TODO(gman): What value?
+
+/* FrontFaceDirection */
+#define GR_GL_CW 0x0900
+#define GR_GL_CCW 0x0901
+
+/* GetPName */
+#define GR_GL_LINE_WIDTH 0x0B21
+#define GR_GL_ALIASED_POINT_SIZE_RANGE 0x846D
+#define GR_GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GR_GL_CULL_FACE_MODE 0x0B45
+#define GR_GL_FRONT_FACE 0x0B46
+#define GR_GL_DEPTH_RANGE 0x0B70
+#define GR_GL_DEPTH_WRITEMASK 0x0B72
+#define GR_GL_DEPTH_CLEAR_VALUE 0x0B73
+#define GR_GL_DEPTH_FUNC 0x0B74
+#define GR_GL_STENCIL_CLEAR_VALUE 0x0B91
+#define GR_GL_STENCIL_FUNC 0x0B92
+#define GR_GL_STENCIL_FAIL 0x0B94
+#define GR_GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
+#define GR_GL_STENCIL_PASS_DEPTH_PASS 0x0B96
+#define GR_GL_STENCIL_REF 0x0B97
+#define GR_GL_STENCIL_VALUE_MASK 0x0B93
+#define GR_GL_STENCIL_WRITEMASK 0x0B98
+#define GR_GL_STENCIL_BACK_FUNC 0x8800
+#define GR_GL_STENCIL_BACK_FAIL 0x8801
+#define GR_GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
+#define GR_GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
+#define GR_GL_STENCIL_BACK_REF 0x8CA3
+#define GR_GL_STENCIL_BACK_VALUE_MASK 0x8CA4
+#define GR_GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GR_GL_VIEWPORT 0x0BA2
+#define GR_GL_SCISSOR_BOX 0x0C10
+/* GL_SCISSOR_TEST */
+#define GR_GL_COLOR_CLEAR_VALUE 0x0C22
+#define GR_GL_COLOR_WRITEMASK 0x0C23
+#define GR_GL_UNPACK_ALIGNMENT 0x0CF5
+#define GR_GL_PACK_ALIGNMENT 0x0D05
+#define GR_GL_PACK_REVERSE_ROW_ORDER 0x93A4
+#define GR_GL_MAX_TEXTURE_SIZE 0x0D33
+#define GR_GL_TEXTURE_MIN_LOD 0x813A
+#define GR_GL_TEXTURE_MAX_LOD 0x813B
+#define GR_GL_TEXTURE_BASE_LEVEL 0x813C
+#define GR_GL_TEXTURE_MAX_LEVEL 0x813D
+#define GR_GL_MAX_VIEWPORT_DIMS 0x0D3A
+#define GR_GL_SUBPIXEL_BITS 0x0D50
+#define GR_GL_RED_BITS 0x0D52
+#define GR_GL_GREEN_BITS 0x0D53
+#define GR_GL_BLUE_BITS 0x0D54
+#define GR_GL_ALPHA_BITS 0x0D55
+#define GR_GL_DEPTH_BITS 0x0D56
+#define GR_GL_STENCIL_BITS 0x0D57
+#define GR_GL_POLYGON_OFFSET_UNITS 0x2A00
+/* GL_POLYGON_OFFSET_FILL */
+#define GR_GL_POLYGON_OFFSET_FACTOR 0x8038
+#define GR_GL_TEXTURE_BINDING_2D 0x8069
+#define GR_GL_SAMPLE_BUFFERS 0x80A8
+#define GR_GL_SAMPLES 0x80A9
+#define GR_GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GR_GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GR_GL_RENDERBUFFER_COVERAGE_SAMPLES 0x8CAB
+#define GR_GL_RENDERBUFFER_COLOR_SAMPLES 0x8E10
+#define GR_GL_MAX_MULTISAMPLE_COVERAGE_MODES 0x8E11
+#define GR_GL_MULTISAMPLE_COVERAGE_MODES 0x8E12
+#define GR_GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B
+
+/* GetTextureParameter */
+/* GL_TEXTURE_MAG_FILTER */
+/* GL_TEXTURE_MIN_FILTER */
+/* GL_TEXTURE_WRAP_S */
+/* GL_TEXTURE_WRAP_T */
+
+#define GR_GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GR_GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+
+/* Compressed Texture Formats */
+#define GR_GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
+
+#define GR_GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00
+#define GR_GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01
+#define GR_GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
+#define GR_GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03
+
+#define GR_GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG 0x9137
+#define GR_GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG 0x9138
+
+#define GR_GL_COMPRESSED_ETC1_RGB8 0x8D64
+
+#define GR_GL_COMPRESSED_R11_EAC 0x9270
+#define GR_GL_COMPRESSED_SIGNED_R11_EAC 0x9271
+#define GR_GL_COMPRESSED_RG11_EAC 0x9272
+#define GR_GL_COMPRESSED_SIGNED_RG11_EAC 0x9273
+
+#define GR_GL_COMPRESSED_RGB8_ETC2 0x9274
+#define GR_GL_COMPRESSED_SRGB8 0x9275
+#define GR_GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1 0x9276
+#define GR_GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1 0x9277
+#define GR_GL_COMPRESSED_RGBA8_ETC2 0x9278
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ETC2 0x9279
+
+#define GR_GL_COMPRESSED_LUMINANCE_LATC1 0x8C70
+#define GR_GL_COMPRESSED_SIGNED_LUMINANCE_LATC1 0x8C71
+#define GR_GL_COMPRESSED_LUMINANCE_ALPHA_LATC2 0x8C72
+#define GR_GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 0x8C73
+
+#define GR_GL_COMPRESSED_RED_RGTC1 0x8DBB
+#define GR_GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC
+#define GR_GL_COMPRESSED_RED_GREEN_RGTC2 0x8DBD
+#define GR_GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 0x8DBE
+
+#define GR_GL_COMPRESSED_3DC_X 0x87F9
+#define GR_GL_COMPRESSED_3DC_XY 0x87FA
+
+#define GR_GL_COMPRESSED_RGBA_BPTC_UNORM 0x8E8C
+#define GR_GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM 0x8E8D
+#define GR_GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT 0x8E8E
+#define GR_GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT 0x8E8F
+
+#define GR_GL_COMPRESSED_RGBA_ASTC_4x4 0x93B0
+#define GR_GL_COMPRESSED_RGBA_ASTC_5x4 0x93B1
+#define GR_GL_COMPRESSED_RGBA_ASTC_5x5 0x93B2
+#define GR_GL_COMPRESSED_RGBA_ASTC_6x5 0x93B3
+#define GR_GL_COMPRESSED_RGBA_ASTC_6x6 0x93B4
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x5 0x93B5
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x6 0x93B6
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x8 0x93B7
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x5 0x93B8
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x6 0x93B9
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x8 0x93BA
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x10 0x93BB
+#define GR_GL_COMPRESSED_RGBA_ASTC_12x10 0x93BC
+#define GR_GL_COMPRESSED_RGBA_ASTC_12x12 0x93BD
+
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4 0x93D0
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4 0x93D1
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5 0x93D2
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5 0x93D3
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6 0x93D4
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5 0x93D5
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6 0x93D6
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8 0x93D7
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5 0x93D8
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6 0x93D9
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8 0x93DA
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10 0x93DB
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10 0x93DC
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12 0x93DD
+
+/* HintMode */
+#define GR_GL_DONT_CARE 0x1100
+#define GR_GL_FASTEST 0x1101
+#define GR_GL_NICEST 0x1102
+
+/* HintTarget */
+#define GR_GL_GENERATE_MIPMAP_HINT 0x8192
+
+/* DataType */
+#define GR_GL_BYTE 0x1400
+#define GR_GL_UNSIGNED_BYTE 0x1401
+#define GR_GL_SHORT 0x1402
+#define GR_GL_UNSIGNED_SHORT 0x1403
+#define GR_GL_INT 0x1404
+#define GR_GL_UNSIGNED_INT 0x1405
+#define GR_GL_FLOAT 0x1406
+#define GR_GL_HALF_FLOAT 0x140B
+#define GR_GL_FIXED 0x140C
+#define GR_GL_HALF_FLOAT_OES 0x8D61
+
+/* Lighting */
+#define GR_GL_LIGHTING 0x0B50
+#define GR_GL_LIGHT0 0x4000
+#define GR_GL_LIGHT1 0x4001
+#define GR_GL_LIGHT2 0x4002
+#define GR_GL_LIGHT3 0x4003
+#define GR_GL_LIGHT4 0x4004
+#define GR_GL_LIGHT5 0x4005
+#define GR_GL_LIGHT6 0x4006
+#define GR_GL_LIGHT7 0x4007
+#define GR_GL_SPOT_EXPONENT 0x1205
+#define GR_GL_SPOT_CUTOFF 0x1206
+#define GR_GL_CONSTANT_ATTENUATION 0x1207
+#define GR_GL_LINEAR_ATTENUATION 0x1208
+#define GR_GL_QUADRATIC_ATTENUATION 0x1209
+#define GR_GL_AMBIENT 0x1200
+#define GR_GL_DIFFUSE 0x1201
+#define GR_GL_SPECULAR 0x1202
+#define GR_GL_SHININESS 0x1601
+#define GR_GL_EMISSION 0x1600
+#define GR_GL_POSITION 0x1203
+#define GR_GL_SPOT_DIRECTION 0x1204
+#define GR_GL_AMBIENT_AND_DIFFUSE 0x1602
+#define GR_GL_COLOR_INDEXES 0x1603
+#define GR_GL_LIGHT_MODEL_TWO_SIDE 0x0B52
+#define GR_GL_LIGHT_MODEL_LOCAL_VIEWER 0x0B51
+#define GR_GL_LIGHT_MODEL_AMBIENT 0x0B53
+#define GR_GL_FRONT_AND_BACK 0x0408
+#define GR_GL_SHADE_MODEL 0x0B54
+#define GR_GL_FLAT 0x1D00
+#define GR_GL_SMOOTH 0x1D01
+#define GR_GL_COLOR_MATERIAL 0x0B57
+#define GR_GL_COLOR_MATERIAL_FACE 0x0B55
+#define GR_GL_COLOR_MATERIAL_PARAMETER 0x0B56
+#define GR_GL_NORMALIZE 0x0BA1
+
+/* Matrix Mode */
+#define GR_GL_MATRIX_MODE 0x0BA0
+#define GR_GL_MODELVIEW 0x1700
+#define GR_GL_PROJECTION 0x1701
+#define GR_GL_TEXTURE 0x1702
+
+/* multisample */
+#define GR_GL_MULTISAMPLE 0x809D
+#define GR_GL_SAMPLE_POSITION 0x8E50
+
+/* Points */
+#define GR_GL_POINT_SMOOTH 0x0B10
+#define GR_GL_POINT_SIZE 0x0B11
+#define GR_GL_POINT_SIZE_GRANULARITY 0x0B13
+#define GR_GL_POINT_SIZE_RANGE 0x0B12
+
+/* Lines */
+#define GR_GL_LINE_SMOOTH 0x0B20
+#define GR_GL_LINE_STIPPLE 0x0B24
+#define GR_GL_LINE_STIPPLE_PATTERN 0x0B25
+#define GR_GL_LINE_STIPPLE_REPEAT 0x0B26
+#define GR_GL_LINE_WIDTH 0x0B21
+#define GR_GL_LINE_WIDTH_GRANULARITY 0x0B23
+#define GR_GL_LINE_WIDTH_RANGE 0x0B22
+
+/* PolygonMode */
+#define GR_GL_POINT 0x1B00
+#define GR_GL_LINE 0x1B01
+#define GR_GL_FILL 0x1B02
+
+/* Unsized formats */
+#define GR_GL_STENCIL_INDEX 0x1901
+#define GR_GL_DEPTH_COMPONENT 0x1902
+#define GR_GL_DEPTH_STENCIL 0x84F9
+#define GR_GL_RED 0x1903
+#define GR_GL_RED_INTEGER 0x8D94
+#define GR_GL_GREEN 0x1904
+#define GR_GL_BLUE 0x1905
+#define GR_GL_ALPHA 0x1906
+#define GR_GL_LUMINANCE 0x1909
+#define GR_GL_LUMINANCE_ALPHA 0x190A
+#define GR_GL_RG_INTEGER 0x8228
+#define GR_GL_RGB 0x1907
+#define GR_GL_RGB_INTEGER 0x8D98
+#define GR_GL_SRGB 0x8C40
+#define GR_GL_RGBA 0x1908
+#define GR_GL_RG 0x8227
+#define GR_GL_SRGB_ALPHA 0x8C42
+#define GR_GL_RGBA_INTEGER 0x8D99
+#define GR_GL_BGRA 0x80E1
+
+/* Stencil index sized formats */
+#define GR_GL_STENCIL_INDEX4 0x8D47
+#define GR_GL_STENCIL_INDEX8 0x8D48
+#define GR_GL_STENCIL_INDEX16 0x8D49
+
+/* Depth component sized formats */
+#define GR_GL_DEPTH_COMPONENT16 0x81A5
+
+/* Depth stencil sized formats */
+#define GR_GL_DEPTH24_STENCIL8 0x88F0
+
+/* Red sized formats */
+#define GR_GL_R8 0x8229
+#define GR_GL_R16 0x822A
+#define GR_GL_R16F 0x822D
+#define GR_GL_R32F 0x822E
+
+/* Red integer sized formats */
+#define GR_GL_R8I 0x8231
+#define GR_GL_R8UI 0x8232
+#define GR_GL_R16I 0x8233
+#define GR_GL_R16UI 0x8234
+#define GR_GL_R32I 0x8235
+#define GR_GL_R32UI 0x8236
+
+/* Luminance sized formats */
+#define GR_GL_LUMINANCE8 0x8040
+#define GR_GL_LUMINANCE16F 0x881E
+
+/* Alpha sized formats */
+#define GR_GL_ALPHA8 0x803C
+#define GR_GL_ALPHA16 0x803E
+#define GR_GL_ALPHA16F 0x881C
+#define GR_GL_ALPHA32F 0x8816
+
+/* Alpha integer sized formats */
+#define GR_GL_ALPHA8I 0x8D90
+#define GR_GL_ALPHA8UI 0x8D7E
+#define GR_GL_ALPHA16I 0x8D8A
+#define GR_GL_ALPHA16UI 0x8D78
+#define GR_GL_ALPHA32I 0x8D84
+#define GR_GL_ALPHA32UI 0x8D72
+
+/* RG sized formats */
+#define GR_GL_RG8 0x822B
+#define GR_GL_RG16 0x822C
+#define GR_GL_R16F 0x822D
+#define GR_GL_R32F 0x822E
+#define GR_GL_RG16F 0x822F
+
+/* RG sized integer formats */
+#define GR_GL_RG8I 0x8237
+#define GR_GL_RG8UI 0x8238
+#define GR_GL_RG16I 0x8239
+#define GR_GL_RG16UI 0x823A
+#define GR_GL_RG32I 0x823B
+#define GR_GL_RG32UI 0x823C
+
+/* RGB sized formats */
+#define GR_GL_RGB5 0x8050
+#define GR_GL_RGB565 0x8D62
+#define GR_GL_RGB8 0x8051
+#define GR_GL_SRGB8 0x8C41
+
+/* RGB integer sized formats */
+#define GR_GL_RGB8I 0x8D8F
+#define GR_GL_RGB8UI 0x8D7D
+#define GR_GL_RGB16I 0x8D89
+#define GR_GL_RGB16UI 0x8D77
+#define GR_GL_RGB32I 0x8D83
+#define GR_GL_RGB32UI 0x8D71
+
+/* RGBA sized formats */
+#define GR_GL_RGBA4 0x8056
+#define GR_GL_RGB5_A1 0x8057
+#define GR_GL_RGBA8 0x8058
+#define GR_GL_RGB10_A2 0x8059
+#define GR_GL_SRGB8_ALPHA8 0x8C43
+#define GR_GL_RGBA16F 0x881A
+#define GR_GL_RGBA32F 0x8814
+#define GR_GL_RG32F 0x8230
+#define GR_GL_RGBA16 0x805B
+
+/* RGBA integer sized formats */
+#define GR_GL_RGBA8I 0x8D8E
+#define GR_GL_RGBA8UI 0x8D7C
+#define GR_GL_RGBA16I 0x8D88
+#define GR_GL_RGBA16UI 0x8D76
+#define GR_GL_RGBA32I 0x8D82
+#define GR_GL_RGBA32UI 0x8D70
+
+/* BGRA sized formats */
+#define GR_GL_BGRA8 0x93A1
+
+/* PixelType */
+/* GL_UNSIGNED_BYTE */
+#define GR_GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GR_GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GR_GL_UNSIGNED_SHORT_5_6_5 0x8363
+#define GR_GL_UNSIGNED_INT_2_10_10_10_REV 0x8368
+
+/* Shaders */
+#define GR_GL_FRAGMENT_SHADER 0x8B30
+#define GR_GL_VERTEX_SHADER 0x8B31
+#define GR_GL_GEOMETRY_SHADER 0x8DD9
+#define GR_GL_MAX_VERTEX_ATTRIBS 0x8869
+#define GR_GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
+#define GR_GL_MAX_VARYING_VECTORS 0x8DFC
+#define GR_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
+#define GR_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
+#define GR_GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29
+#define GR_GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
+#define GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
+#define GR_GL_SHADER_TYPE 0x8B4F
+#define GR_GL_DELETE_STATUS 0x8B80
+#define GR_GL_LINK_STATUS 0x8B82
+#define GR_GL_VALIDATE_STATUS 0x8B83
+#define GR_GL_ATTACHED_SHADERS 0x8B85
+#define GR_GL_ACTIVE_UNIFORMS 0x8B86
+#define GR_GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
+#define GR_GL_ACTIVE_ATTRIBUTES 0x8B89
+#define GR_GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
+#define GR_GL_SHADING_LANGUAGE_VERSION 0x8B8C
+#define GR_GL_CURRENT_PROGRAM 0x8B8D
+#define GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49
+#define GR_GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A
+#define GR_GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE 0x8F63
+#define GR_GL_SHADER_BINARY_FORMATS 0x8DF8
+
+/* StencilFunction */
+#define GR_GL_NEVER 0x0200
+#define GR_GL_LESS 0x0201
+#define GR_GL_EQUAL 0x0202
+#define GR_GL_LEQUAL 0x0203
+#define GR_GL_GREATER 0x0204
+#define GR_GL_NOTEQUAL 0x0205
+#define GR_GL_GEQUAL 0x0206
+#define GR_GL_ALWAYS 0x0207
+
+/* StencilOp */
+/* GL_ZERO */
+#define GR_GL_KEEP 0x1E00
+#define GR_GL_REPLACE 0x1E01
+#define GR_GL_INCR 0x1E02
+#define GR_GL_DECR 0x1E03
+#define GR_GL_INVERT 0x150A
+#define GR_GL_INCR_WRAP 0x8507
+#define GR_GL_DECR_WRAP 0x8508
+
+/* StringName */
+#define GR_GL_VENDOR 0x1F00
+#define GR_GL_RENDERER 0x1F01
+#define GR_GL_VERSION 0x1F02
+#define GR_GL_EXTENSIONS 0x1F03
+
+/* StringCounts */
+#define GR_GL_NUM_EXTENSIONS 0x821D
+
+/* Pixel Mode / Transfer */
+#define GR_GL_UNPACK_ROW_LENGTH 0x0CF2
+#define GR_GL_PACK_ROW_LENGTH 0x0D02
+
+
+/* TextureMagFilter */
+#define GR_GL_NEAREST 0x2600
+#define GR_GL_LINEAR 0x2601
+
+/* TextureMinFilter */
+/* GL_NEAREST */
+/* GL_LINEAR */
+#define GR_GL_NEAREST_MIPMAP_NEAREST 0x2700
+#define GR_GL_LINEAR_MIPMAP_NEAREST 0x2701
+#define GR_GL_NEAREST_MIPMAP_LINEAR 0x2702
+#define GR_GL_LINEAR_MIPMAP_LINEAR 0x2703
+
+/* TextureUsage */
+#define GR_GL_FRAMEBUFFER_ATTACHMENT 0x93A3
+
+/* TextureParameterName */
+#define GR_GL_TEXTURE_MAG_FILTER 0x2800
+#define GR_GL_TEXTURE_MIN_FILTER 0x2801
+#define GR_GL_TEXTURE_WRAP_S 0x2802
+#define GR_GL_TEXTURE_WRAP_T 0x2803
+#define GR_GL_TEXTURE_USAGE 0x93A2
+
+/* TextureTarget */
+/* GL_TEXTURE_2D */
+#define GR_GL_TEXTURE 0x1702
+#define GR_GL_TEXTURE_CUBE_MAP 0x8513
+#define GR_GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GR_GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+
+/* TextureUnit */
+#define GR_GL_TEXTURE0 0x84C0
+#define GR_GL_TEXTURE1 0x84C1
+#define GR_GL_TEXTURE2 0x84C2
+#define GR_GL_TEXTURE3 0x84C3
+#define GR_GL_TEXTURE4 0x84C4
+#define GR_GL_TEXTURE5 0x84C5
+#define GR_GL_TEXTURE6 0x84C6
+#define GR_GL_TEXTURE7 0x84C7
+#define GR_GL_TEXTURE8 0x84C8
+#define GR_GL_TEXTURE9 0x84C9
+#define GR_GL_TEXTURE10 0x84CA
+#define GR_GL_TEXTURE11 0x84CB
+#define GR_GL_TEXTURE12 0x84CC
+#define GR_GL_TEXTURE13 0x84CD
+#define GR_GL_TEXTURE14 0x84CE
+#define GR_GL_TEXTURE15 0x84CF
+#define GR_GL_TEXTURE16 0x84D0
+#define GR_GL_TEXTURE17 0x84D1
+#define GR_GL_TEXTURE18 0x84D2
+#define GR_GL_TEXTURE19 0x84D3
+#define GR_GL_TEXTURE20 0x84D4
+#define GR_GL_TEXTURE21 0x84D5
+#define GR_GL_TEXTURE22 0x84D6
+#define GR_GL_TEXTURE23 0x84D7
+#define GR_GL_TEXTURE24 0x84D8
+#define GR_GL_TEXTURE25 0x84D9
+#define GR_GL_TEXTURE26 0x84DA
+#define GR_GL_TEXTURE27 0x84DB
+#define GR_GL_TEXTURE28 0x84DC
+#define GR_GL_TEXTURE29 0x84DD
+#define GR_GL_TEXTURE30 0x84DE
+#define GR_GL_TEXTURE31 0x84DF
+#define GR_GL_ACTIVE_TEXTURE 0x84E0
+#define GR_GL_MAX_TEXTURE_UNITS 0x84E2
+#define GR_GL_MAX_TEXTURE_COORDS 0x8871
+
+/* TextureWrapMode */
+#define GR_GL_REPEAT 0x2901
+#define GR_GL_CLAMP_TO_EDGE 0x812F
+#define GR_GL_MIRRORED_REPEAT 0x8370
+#define GR_GL_CLAMP_TO_BORDER 0x812D
+
+/* Texture Swizzle */
+#define GR_GL_TEXTURE_SWIZZLE_R 0x8E42
+#define GR_GL_TEXTURE_SWIZZLE_G 0x8E43
+#define GR_GL_TEXTURE_SWIZZLE_B 0x8E44
+#define GR_GL_TEXTURE_SWIZZLE_A 0x8E45
+#define GR_GL_TEXTURE_SWIZZLE_RGBA 0x8E46
+
+/* Texture mapping */
+#define GR_GL_TEXTURE_ENV 0x2300
+#define GR_GL_TEXTURE_ENV_MODE 0x2200
+#define GR_GL_TEXTURE_1D 0x0DE0
+/* GL_TEXTURE_2D */
+/* GL_TEXTURE_WRAP_S */
+/* GL_TEXTURE_WRAP_T */
+/* GL_TEXTURE_MAG_FILTER */
+/* GL_TEXTURE_MIN_FILTER */
+#define GR_GL_TEXTURE_ENV_COLOR 0x2201
+#define GR_GL_TEXTURE_GEN_S 0x0C60
+#define GR_GL_TEXTURE_GEN_T 0x0C61
+#define GR_GL_TEXTURE_GEN_R 0x0C62
+#define GR_GL_TEXTURE_GEN_Q 0x0C63
+#define GR_GL_TEXTURE_GEN_MODE 0x2500
+#define GR_GL_TEXTURE_BORDER_COLOR 0x1004
+#define GR_GL_TEXTURE_WIDTH 0x1000
+#define GR_GL_TEXTURE_HEIGHT 0x1001
+#define GR_GL_TEXTURE_BORDER 0x1005
+#define GR_GL_TEXTURE_COMPONENTS 0x1003
+#define GR_GL_TEXTURE_RED_SIZE 0x805C
+#define GR_GL_TEXTURE_GREEN_SIZE 0x805D
+#define GR_GL_TEXTURE_BLUE_SIZE 0x805E
+#define GR_GL_TEXTURE_ALPHA_SIZE 0x805F
+#define GR_GL_TEXTURE_LUMINANCE_SIZE 0x8060
+#define GR_GL_TEXTURE_INTENSITY_SIZE 0x8061
+#define GR_GL_TEXTURE_INTERNAL_FORMAT 0x1003
+/* GL_NEAREST_MIPMAP_NEAREST */
+/* GL_NEAREST_MIPMAP_LINEAR */
+/* GL_LINEAR_MIPMAP_NEAREST */
+/* GL_LINEAR_MIPMAP_LINEAR */
+#define GR_GL_OBJECT_LINEAR 0x2401
+#define GR_GL_OBJECT_PLANE 0x2501
+#define GR_GL_EYE_LINEAR 0x2400
+#define GR_GL_EYE_PLANE 0x2502
+#define GR_GL_SPHERE_MAP 0x2402
+#define GR_GL_DECAL 0x2101
+#define GR_GL_MODULATE 0x2100
+/* GL_NEAREST */
+/* GL_REPEAT */
+#define GR_GL_CLAMP 0x2900
+#define GR_GL_S 0x2000
+#define GR_GL_T 0x2001
+#define GR_GL_R 0x2002
+#define GR_GL_Q 0x2003
+#define GR_GL_TEXTURE_GEN_R 0x0C62
+#define GR_GL_TEXTURE_GEN_Q 0x0C63
+
+/* texture_env_combine */
+#define GR_GL_COMBINE 0x8570
+#define GR_GL_COMBINE_RGB 0x8571
+#define GR_GL_COMBINE_ALPHA 0x8572
+#define GR_GL_SOURCE0_RGB 0x8580
+#define GR_GL_SOURCE1_RGB 0x8581
+#define GR_GL_SOURCE2_RGB 0x8582
+#define GR_GL_SOURCE0_ALPHA 0x8588
+#define GR_GL_SOURCE1_ALPHA 0x8589
+#define GR_GL_SOURCE2_ALPHA 0x858A
+#define GR_GL_OPERAND0_RGB 0x8590
+#define GR_GL_OPERAND1_RGB 0x8591
+#define GR_GL_OPERAND2_RGB 0x8592
+#define GR_GL_OPERAND0_ALPHA 0x8598
+#define GR_GL_OPERAND1_ALPHA 0x8599
+#define GR_GL_OPERAND2_ALPHA 0x859A
+#define GR_GL_RGB_SCALE 0x8573
+#define GR_GL_ADD_SIGNED 0x8574
+#define GR_GL_INTERPOLATE 0x8575
+#define GR_GL_SUBTRACT 0x84E7
+#define GR_GL_CONSTANT 0x8576
+#define GR_GL_PRIMARY_COLOR 0x8577
+#define GR_GL_PREVIOUS 0x8578
+#define GR_GL_SRC0_RGB 0x8580
+#define GR_GL_SRC1_RGB 0x8581
+#define GR_GL_SRC2_RGB 0x8582
+#define GR_GL_SRC0_ALPHA 0x8588
+#define GR_GL_SRC1_ALPHA 0x8589
+#define GR_GL_SRC2_ALPHA 0x858A
+
+/* Uniform Types */
+#define GR_GL_FLOAT_VEC2 0x8B50
+#define GR_GL_FLOAT_VEC3 0x8B51
+#define GR_GL_FLOAT_VEC4 0x8B52
+#define GR_GL_INT_VEC2 0x8B53
+#define GR_GL_INT_VEC3 0x8B54
+#define GR_GL_INT_VEC4 0x8B55
+#define GR_GL_BOOL 0x8B56
+#define GR_GL_BOOL_VEC2 0x8B57
+#define GR_GL_BOOL_VEC3 0x8B58
+#define GR_GL_BOOL_VEC4 0x8B59
+#define GR_GL_FLOAT_MAT2 0x8B5A
+#define GR_GL_FLOAT_MAT3 0x8B5B
+#define GR_GL_FLOAT_MAT4 0x8B5C
+#define GR_GL_SAMPLER_2D 0x8B5E
+#define GR_GL_SAMPLER_CUBE 0x8B60
+
+/* Vertex Arrays */
+#define GR_GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
+#define GR_GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
+#define GR_GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
+#define GR_GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
+#define GR_GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
+#define GR_GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
+#define GR_GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
+#define GR_GL_VERTEX_ARRAY 0x8074
+#define GR_GL_NORMAL_ARRAY 0x8075
+#define GR_GL_COLOR_ARRAY 0x8076
+#define GR_GL_SECONDARY_COLOR_ARRAY 0x845E
+#define GR_GL_INDEX_ARRAY 0x8077
+#define GR_GL_TEXTURE_COORD_ARRAY 0x8078
+#define GR_GL_EDGE_FLAG_ARRAY 0x8079
+#define GR_GL_VERTEX_ARRAY_SIZE 0x807A
+#define GR_GL_VERTEX_ARRAY_TYPE 0x807B
+#define GR_GL_VERTEX_ARRAY_STRIDE 0x807C
+#define GR_GL_NORMAL_ARRAY_TYPE 0x807E
+#define GR_GL_NORMAL_ARRAY_STRIDE 0x807F
+#define GR_GL_COLOR_ARRAY_SIZE 0x8081
+#define GR_GL_COLOR_ARRAY_TYPE 0x8082
+#define GR_GL_COLOR_ARRAY_STRIDE 0x8083
+#define GR_GL_INDEX_ARRAY_TYPE 0x8085
+#define GR_GL_INDEX_ARRAY_STRIDE 0x8086
+#define GR_GL_TEXTURE_COORD_ARRAY_SIZE 0x8088
+#define GR_GL_TEXTURE_COORD_ARRAY_TYPE 0x8089
+#define GR_GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A
+#define GR_GL_EDGE_FLAG_ARRAY_STRIDE 0x808C
+#define GR_GL_VERTEX_ARRAY_POINTER 0x808E
+#define GR_GL_NORMAL_ARRAY_POINTER 0x808F
+#define GR_GL_COLOR_ARRAY_POINTER 0x8090
+#define GR_GL_INDEX_ARRAY_POINTER 0x8091
+#define GR_GL_TEXTURE_COORD_ARRAY_POINTER 0x8092
+#define GR_GL_EDGE_FLAG_ARRAY_POINTER 0x8093
+#define GR_GL_V2F 0x2A20
+#define GR_GL_V3F 0x2A21
+#define GR_GL_C4UB_V2F 0x2A22
+#define GR_GL_C4UB_V3F 0x2A23
+#define GR_GL_C3F_V3F 0x2A24
+#define GR_GL_N3F_V3F 0x2A25
+#define GR_GL_C4F_N3F_V3F 0x2A26
+#define GR_GL_T2F_V3F 0x2A27
+#define GR_GL_T4F_V4F 0x2A28
+#define GR_GL_T2F_C4UB_V3F 0x2A29
+#define GR_GL_T2F_C3F_V3F 0x2A2A
+#define GR_GL_T2F_N3F_V3F 0x2A2B
+#define GR_GL_T2F_C4F_N3F_V3F 0x2A2C
+#define GR_GL_T4F_C4F_N3F_V4F 0x2A2D
+#define GR_GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69
+
+/* Buffer Object */
+#define GR_GL_READ_ONLY 0x88B8
+#define GR_GL_WRITE_ONLY 0x88B9
+#define GR_GL_READ_WRITE 0x88BA
+#define GR_GL_BUFFER_MAPPED 0x88BC
+
+#define GR_GL_MAP_READ_BIT 0x0001
+#define GR_GL_MAP_WRITE_BIT 0x0002
+#define GR_GL_MAP_INVALIDATE_RANGE_BIT 0x0004
+#define GR_GL_MAP_INVALIDATE_BUFFER_BIT 0x0008
+#define GR_GL_MAP_FLUSH_EXPLICIT_BIT 0x0010
+#define GR_GL_MAP_UNSYNCHRONIZED_BIT 0x0020
+
+/* Read Format */
+#define GR_GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
+#define GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
+
+/* Shader Source */
+#define GR_GL_COMPILE_STATUS 0x8B81
+#define GR_GL_INFO_LOG_LENGTH 0x8B84
+#define GR_GL_SHADER_SOURCE_LENGTH 0x8B88
+#define GR_GL_SHADER_COMPILER 0x8DFA
+
+/* Shader Binary */
+#define GR_GL_SHADER_BINARY_FORMATS 0x8DF8
+#define GR_GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
+
+/* Program Binary */
+#define GR_GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE
+
+/* Shader Precision-Specified Types */
+#define GR_GL_LOW_FLOAT 0x8DF0
+#define GR_GL_MEDIUM_FLOAT 0x8DF1
+#define GR_GL_HIGH_FLOAT 0x8DF2
+#define GR_GL_LOW_INT 0x8DF3
+#define GR_GL_MEDIUM_INT 0x8DF4
+#define GR_GL_HIGH_INT 0x8DF5
+
+/* Queries */
+#define GR_GL_QUERY_COUNTER_BITS 0x8864
+#define GR_GL_CURRENT_QUERY 0x8865
+#define GR_GL_QUERY_RESULT 0x8866
+#define GR_GL_QUERY_RESULT_AVAILABLE 0x8867
+#define GR_GL_SAMPLES_PASSED 0x8914
+#define GR_GL_ANY_SAMPLES_PASSED 0x8C2F
+#define GR_GL_TIME_ELAPSED 0x88BF
+#define GR_GL_TIMESTAMP 0x8E28
+#define GR_GL_PRIMITIVES_GENERATED 0x8C87
+#define GR_GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88
+
+
+/* Framebuffer Object. */
+#define GR_GL_FRAMEBUFFER 0x8D40
+#define GR_GL_READ_FRAMEBUFFER 0x8CA8
+#define GR_GL_DRAW_FRAMEBUFFER 0x8CA9
+
+#define GR_GL_RENDERBUFFER 0x8D41
+
+#define GR_GL_MAX_SAMPLES 0x8D57
+// GL_IMG_multisampled_render_to_texture uses a different value for GL_MAX_SAMPLES
+#define GR_GL_MAX_SAMPLES_IMG 0x9135
+
+#define GR_GL_RENDERBUFFER_WIDTH 0x8D42
+#define GR_GL_RENDERBUFFER_HEIGHT 0x8D43
+#define GR_GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
+#define GR_GL_RENDERBUFFER_RED_SIZE 0x8D50
+#define GR_GL_RENDERBUFFER_GREEN_SIZE 0x8D51
+#define GR_GL_RENDERBUFFER_BLUE_SIZE 0x8D52
+#define GR_GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
+#define GR_GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
+#define GR_GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
+
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217
+
+#define GR_GL_COLOR_ATTACHMENT0 0x8CE0
+#define GR_GL_DEPTH_ATTACHMENT 0x8D00
+#define GR_GL_STENCIL_ATTACHMENT 0x8D20
+
+// GL_EXT_discard_framebuffer
+#define GR_GL_COLOR 0x1800
+#define GR_GL_DEPTH 0x1801
+#define GR_GL_STENCIL 0x1802
+
+#define GR_GL_NONE 0
+#define GR_GL_FRAMEBUFFER_DEFAULT 0x8218
+
+#define GR_GL_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9
+#define GR_GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
+
+#define GR_GL_FRAMEBUFFER_BINDING 0x8CA6
+#define GR_GL_RENDERBUFFER_BINDING 0x8CA7
+#define GR_GL_MAX_RENDERBUFFER_SIZE 0x84E8
+
+#define GR_GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
+
+/* Path Rendering */
+// commands
+#define GR_GL_CLOSE_PATH 0x00
+#define GR_GL_MOVE_TO 0x02
+#define GR_GL_LINE_TO 0x04
+#define GR_GL_QUADRATIC_CURVE_TO 0x0A
+#define GR_GL_CUBIC_CURVE_TO 0x0C
+#define GR_GL_CONIC_CURVE_TO 0x1A
+
+// path parameters
+#define GR_GL_PATH_STROKE_WIDTH 0x9075
+#define GR_GL_PATH_END_CAPS 0x9076
+#define GR_GL_PATH_JOIN_STYLE 0x9079
+#define GR_GL_PATH_MITER_LIMIT 0x907A
+#define GR_GL_PATH_STROKE_BOUND 0x9086
+
+// fill modes
+#define GR_GL_COUNT_UP 0x9088
+
+// cover mode
+#define GR_GL_BOUNDING_BOX 0x908D
+#define GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES 0x909C
+
+// transform type
+#define GR_GL_TRANSLATE_X 0x908E
+#define GR_GL_TRANSLATE_Y 0x908F
+#define GR_GL_TRANSLATE_2D 0x9090
+#define GR_GL_TRANSPOSE_AFFINE_2D 0x9096
+
+// cap/dash values
+#define GR_GL_SQUARE 0x90A3
+#define GR_GL_ROUND 0x90A4
+
+// join values
+#define GR_GL_BEVEL 0x90A6
+#define GR_GL_MITER_REVERT 0x90A7
+
+// glyph loading values
+#define GR_GL_STANDARD_FONT_FORMAT 0x936C
+#define GR_GL_FONT_GLYPHS_AVAILABLE 0x9368
+
+// NV_path_rendering extension to ARB_program_interface_query:
+// .. corresponds to the set of active input variables used by the fragment
+// shader stage of <program> (if a fragment stage exists).
+#define GR_GL_FRAGMENT_INPUT 0x936D
+
+// NV_path_rendering extension to EXT_direct_state_access:
+// [the matrix functions] must support the PATH_PROJECTION_NV and
+// PATH_MODELVIEW_NV tokens for matrixMode.
+#define GR_GL_PATH_PROJECTION 0x1701
+#define GR_GL_PATH_MODELVIEW 0x1700
+
+/* ARM specific define for MSAA support on framebuffer fetch */
+#define GR_GL_FETCH_PER_SAMPLE 0x8F65
+
+/* GL_KHR_debug */
+#define GR_GL_DEBUG_OUTPUT 0x92E0
+#define GR_GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242
+#define GR_GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002
+#define GR_GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143
+#define GR_GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144
+#define GR_GL_DEBUG_LOGGED_MESSAGES 0x9145
+#define GR_GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243
+#define GR_GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C
+#define GR_GL_DEBUG_GROUP_STACK_DEPTH 0x826D
+#define GR_GL_MAX_LABEL_LENGTH 0x82E8
+#define GR_GL_DEBUG_SOURCE_API 0x8246
+#define GR_GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247
+#define GR_GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248
+#define GR_GL_DEBUG_SOURCE_THIRD_PARTY 0x8249
+#define GR_GL_DEBUG_SOURCE_APPLICATION 0x824A
+#define GR_GL_DEBUG_SOURCE_OTHER 0x824B
+#define GR_GL_DEBUG_TYPE_ERROR 0x824C
+#define GR_GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D
+#define GR_GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E
+#define GR_GL_DEBUG_TYPE_PORTABILITY 0x824F
+#define GR_GL_DEBUG_TYPE_PERFORMANCE 0x8250
+#define GR_GL_DEBUG_TYPE_OTHER 0x8251
+#define GR_GL_DEBUG_TYPE_MARKER 0x8268
+#define GR_GL_DEBUG_TYPE_PUSH_GROUP 0x8269
+#define GR_GL_DEBUG_TYPE_POP_GROUP 0x826A
+#define GR_GL_DEBUG_SEVERITY_HIGH 0x9146
+#define GR_GL_DEBUG_SEVERITY_MEDIUM 0x9147
+#define GR_GL_DEBUG_SEVERITY_LOW 0x9148
+#define GR_GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
+#define GR_GL_STACK_UNDERFLOW 0x0504
+#define GR_GL_STACK_OVERFLOW 0x0503
+#define GR_GL_BUFFER 0x82E0
+#define GR_GL_SHADER 0x82E1
+#define GR_GL_PROGRAM 0x82E2
+#define GR_GL_QUERY 0x82E3
+#define GR_GL_PROGRAM_PIPELINE 0x82E4
+#define GR_GL_SAMPLER 0x82E6
+
+/* GL_OES_EGL_image_external */
+#define GR_GL_TEXTURE_EXTERNAL 0x8D65
+#define GR_GL_TEXTURE_BINDING_EXTERNAL 0x8D67
+
+/* GL_ARB_texture_rectangle or GL_ANGLE_texture_rectangle */
+#define GR_GL_TEXTURE_RECTANGLE 0x84F5
+#define GR_GL_TEXTURE_BINDING_RECTANGLE 0x84F6
+
+/* GL_EXT_window_rectangles */
+#define GR_GL_MAX_WINDOW_RECTANGLES 0x8f14
+#define GR_GL_INCLUSIVE 0x8f10
+#define GR_GL_EXCLUSIVE 0x8f11
+
+/** GL_QCOM_tiled_rendering */
+#define GR_GL_COLOR_BUFFER_BIT0 0x00000001
+#define GR_GL_COLOR_BUFFER_BIT1 0x00000002
+#define GR_GL_COLOR_BUFFER_BIT2 0x00000004
+#define GR_GL_COLOR_BUFFER_BIT3 0x00000008
+#define GR_GL_COLOR_BUFFER_BIT4 0x00000010
+#define GR_GL_COLOR_BUFFER_BIT5 0x00000020
+#define GR_GL_COLOR_BUFFER_BIT6 0x00000040
+#define GR_GL_COLOR_BUFFER_BIT7 0x00000080
+#define GR_GL_DEPTH_BUFFER_BIT0 0x00000100
+#define GR_GL_DEPTH_BUFFER_BIT1 0x00000200
+#define GR_GL_DEPTH_BUFFER_BIT2 0x00000400
+#define GR_GL_DEPTH_BUFFER_BIT3 0x00000800
+#define GR_GL_DEPTH_BUFFER_BIT4 0x00001000
+#define GR_GL_DEPTH_BUFFER_BIT5 0x00002000
+#define GR_GL_DEPTH_BUFFER_BIT6 0x00004000
+#define GR_GL_DEPTH_BUFFER_BIT7 0x00008000
+#define GR_GL_STENCIL_BUFFER_BIT0 0x00010000
+#define GR_GL_STENCIL_BUFFER_BIT1 0x00020000
+#define GR_GL_STENCIL_BUFFER_BIT2 0x00040000
+#define GR_GL_STENCIL_BUFFER_BIT3 0x00080000
+#define GR_GL_STENCIL_BUFFER_BIT4 0x00100000
+#define GR_GL_STENCIL_BUFFER_BIT5 0x00200000
+#define GR_GL_STENCIL_BUFFER_BIT6 0x00400000
+#define GR_GL_STENCIL_BUFFER_BIT7 0x00800000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT0 0x01000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT1 0x02000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT2 0x04000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT3 0x08000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT4 0x10000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT5 0x20000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT6 0x40000000
+#define GR_GL_MULTISAMPLE_BUFFER_BIT7 0x80000000
+
+/* GL_ARB_sync */
+#define GR_GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117
+#define GR_GL_ALREADY_SIGNALED 0x911A
+#define GR_GL_TIMEOUT_EXPIRED 0x911B
+#define GR_GL_CONDITION_SATISFIED 0x911C
+#define GR_GL_WAIT_FAILED 0x911D
+#define GR_GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001
+#define GR_GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull
+
+/* GL_EXT_geometry_shader */
+#define GR_GL_LINES_ADJACENCY 0x000A
+
+/* GL_ARB_internalformat_query */
+#define GR_GL_NUM_SAMPLE_COUNTS 0x9380
+
+/* EGL Defines */
+#define GR_EGL_NO_DISPLAY ((GrEGLDisplay)0)
+#define GR_EGL_EXTENSIONS 0x3055
+#define GR_EGL_GL_TEXTURE_2D 0x30B1
+#define GR_EGL_GL_TEXTURE_LEVEL 0x30BC
+#define GR_EGL_IMAGE_PRESERVED 0x30D2
+#define GR_EGL_FALSE 0x0
+#define GR_EGL_TRUE 0x1
+#define GR_EGL_NONE 0x3038
+#define GR_EGL_NO_IMAGE ((GrEGLImage)0)
+
+/* Programs */
+#define GR_GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257
+#define GL_PROGRAM_BINARY_LENGTH 0x8741
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp b/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp
new file mode 100644
index 0000000000..e5a989f95b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "src/gpu/gl/GrGLDefines.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTSort.h"
+#include "src/utils/SkJSONWriter.h"
+
+namespace { // This cannot be static because it is used as a template parameter.
+inline bool extension_compare(const SkString& a, const SkString& b) {
+ return strcmp(a.c_str(), b.c_str()) < 0;
+}
+}
+
+// finds the index of ext in strings or a negative result if ext is not found.
+static int find_string(const SkTArray<SkString>& strings, const char ext[]) {
+ if (strings.empty()) {
+ return -1;
+ }
+ SkString extensionStr(ext);
+ int idx = SkTSearch<SkString, extension_compare>(&strings.front(),
+ strings.count(),
+ extensionStr,
+ sizeof(SkString));
+ return idx;
+}
+
+GrGLExtensions::GrGLExtensions(const GrGLExtensions& that) {
+ *this = that;
+}
+
+GrGLExtensions& GrGLExtensions::operator=(const GrGLExtensions& that) {
+ if (this != &that) {
+ fStrings = that.fStrings;
+ fInitialized = that.fInitialized;
+ }
+ return *this;
+}
+
+static void eat_space_sep_strings(SkTArray<SkString>* out, const char in[]) {
+ if (!in) {
+ return;
+ }
+ while (true) {
+ // skip over multiple spaces between extensions
+ while (' ' == *in) {
+ ++in;
+ }
+ // quit once we reach the end of the string.
+ if ('\0' == *in) {
+ break;
+ }
+ // we found an extension
+ size_t length = strcspn(in, " ");
+ out->push_back().set(in, length);
+ in += length;
+ }
+}
+
+bool GrGLExtensions::init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringFn> getString,
+ GrGLFunction<GrGLGetStringiFn> getStringi,
+ GrGLFunction<GrGLGetIntegervFn> getIntegerv,
+ GrGLFunction<GrEGLQueryStringFn> queryString,
+ GrEGLDisplay eglDisplay) {
+ fInitialized = false;
+ fStrings.reset();
+
+ if (!getString) {
+ return false;
+ }
+
+ const GrGLubyte* verString = getString(GR_GL_VERSION);
+ GrGLVersion version = GrGLGetVersionFromString((const char*) verString);
+ if (GR_GL_INVALID_VER == version) {
+ return false;
+ }
+
+ bool indexed = false;
+ if (GR_IS_GR_GL(standard) || GR_IS_GR_GL_ES(standard)) {
+ // glGetStringi and indexed extensions were added in version 3.0 of desktop GL and ES.
+ indexed = version >= GR_GL_VER(3, 0);
+ } else if (GR_IS_GR_WEBGL(standard)) {
+ // WebGL (1.0 or 2.0) doesn't natively support glGetStringi, but enscripten adds it in
+ // https://github.com/emscripten-core/emscripten/issues/3472
+ indexed = version >= GR_GL_VER(2, 0);
+ }
+
+ if (indexed) {
+ if (!getStringi || !getIntegerv) {
+ return false;
+ }
+ GrGLint extensionCnt = 0;
+ getIntegerv(GR_GL_NUM_EXTENSIONS, &extensionCnt);
+ fStrings.push_back_n(extensionCnt);
+ for (int i = 0; i < extensionCnt; ++i) {
+ const char* ext = (const char*) getStringi(GR_GL_EXTENSIONS, i);
+ fStrings[i] = ext;
+ }
+ } else {
+ const char* extensions = (const char*) getString(GR_GL_EXTENSIONS);
+ if (!extensions) {
+ return false;
+ }
+ eat_space_sep_strings(&fStrings, extensions);
+ }
+ if (queryString) {
+ const char* extensions = queryString(eglDisplay, GR_EGL_EXTENSIONS);
+
+ eat_space_sep_strings(&fStrings, extensions);
+ }
+ if (!fStrings.empty()) {
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTQSort(&fStrings.front(), &fStrings.back(), cmp);
+ }
+ fInitialized = true;
+ return true;
+}
+
+bool GrGLExtensions::has(const char ext[]) const {
+ SkASSERT(fInitialized);
+ return find_string(fStrings, ext) >= 0;
+}
+
+bool GrGLExtensions::remove(const char ext[]) {
+ SkASSERT(fInitialized);
+ int idx = find_string(fStrings, ext);
+ if (idx < 0) {
+ return false;
+ }
+
+ // This is not terribly effecient but we really only expect this function to be called at
+ // most a handful of times when our test programs start.
+ fStrings.removeShuffle(idx);
+ if (idx != fStrings.count()) {
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTInsertionSort(&(fStrings.operator[](idx)), &fStrings.back(), cmp);
+ }
+ return true;
+}
+
+void GrGLExtensions::add(const char ext[]) {
+ int idx = find_string(fStrings, ext);
+ if (idx < 0) {
+ // This is not the most effecient approach since we end up looking at all of the
+ // extensions after the add
+ fStrings.emplace_back(ext);
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTInsertionSort(&fStrings.front(), &fStrings.back(), cmp);
+ }
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+void GrGLExtensions::dumpJSON(SkJSONWriter* writer) const {
+ writer->beginArray();
+ for (int i = 0; i < fStrings.count(); ++i) {
+ writer->appendString(fStrings[i].c_str());
+ }
+ writer->endArray();
+}
+#else
+void GrGLExtensions::dumpJSON(SkJSONWriter* writer) const { }
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp
new file mode 100644
index 0000000000..2ad38bc35e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLGLSL.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+bool GrGLGetGLSLGeneration(const GrGLInterface* gl, GrGLSLGeneration* generation) {
+ SkASSERT(generation);
+ GrGLSLVersion ver = GrGLGetGLSLVersion(gl);
+ if (GR_GLSL_INVALID_VER == ver) {
+ return false;
+ }
+
+ // Workaround for a bug on some Adreno 308 devices with Android 9. The driver reports a GL
+ // version of 3.0, and a GLSL version of 3.1. If we use version 310 shaders, the driver reports
+ // that it's not supported. To keep things simple, we pin the GLSL version to the GL version.
+ // Note that GLSL versions have an extra digit on their minor level, so we have to scale up
+ // the GL version's minor revision to get a comparable GLSL version. This logic can easily
+ // create invalid GLSL versions (older GL didn't keep the versions in sync), but the checks
+ // below will further pin the GLSL generation correctly.
+ // https://github.com/flutter/flutter/issues/36130
+ GrGLVersion glVer = GrGLGetVersion(gl);
+ uint32_t glMajor = GR_GL_MAJOR_VER(glVer),
+ glMinor = GR_GL_MINOR_VER(glVer);
+ ver = SkTMin(ver, GR_GLSL_VER(glMajor, 10 * glMinor));
+
+ if (GR_IS_GR_GL(gl->fStandard)) {
+ SkASSERT(ver >= GR_GLSL_VER(1,10));
+ if (ver >= GR_GLSL_VER(4,20)) {
+ *generation = k420_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(4,00)) {
+ *generation = k400_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,30)) {
+ *generation = k330_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,50)) {
+ *generation = k150_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,40)) {
+ *generation = k140_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,30)) {
+ *generation = k130_GrGLSLGeneration;
+ } else {
+ *generation = k110_GrGLSLGeneration;
+ }
+ return true;
+ } else if (GR_IS_GR_GL_ES(gl->fStandard)) {
+ SkASSERT(ver >= GR_GL_VER(1,00));
+ if (ver >= GR_GLSL_VER(3,20)) {
+ *generation = k320es_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,10)) {
+ *generation = k310es_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,00)) {
+ *generation = k330_GrGLSLGeneration;
+ } else {
+ *generation = k110_GrGLSLGeneration;
+ }
+ return true;
+ } else if (GR_IS_GR_WEBGL(gl->fStandard)) {
+ SkASSERT(ver >= GR_GL_VER(1,0));
+ if (ver >= GR_GLSL_VER(2,0)) {
+ *generation = k330_GrGLSLGeneration; // ES 3.0
+ } else {
+ *generation = k110_GrGLSLGeneration;
+ }
+ return true;
+ }
+ SK_ABORT("Unknown GL Standard");
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h
new file mode 100644
index 0000000000..de86cd1f31
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLGLSL_DEFINED
+#define GrGLGLSL_DEFINED
+
+#include "src/gpu/glsl/GrGLSL.h"
+
+struct GrGLInterface;
+
+/**
+ * Gets the most recent GLSL Generation compatible with the OpenGL context.
+ */
+bool GrGLGetGLSLGeneration(const GrGLInterface* gl, GrGLSLGeneration* generation);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp
new file mode 100644
index 0000000000..937e168db5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp
@@ -0,0 +1,4035 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrCpuBuffer.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/gl/GrGLBuffer.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLOpsRenderPass.h"
+#include "src/gpu/gl/GrGLSemaphore.h"
+#include "src/gpu/gl/GrGLStencilAttachment.h"
+#include "src/gpu/gl/GrGLTextureRenderTarget.h"
+#include "src/gpu/gl/builders/GrGLShaderStringBuilder.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#include <cmath>
+
+#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
+
+#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
+#else
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
+#endif
+
+//#define USE_NSIGHT
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const GrGLenum gXfermodeEquation2Blend[] = {
+ // Basic OpenGL blend equations.
+ GR_GL_FUNC_ADD,
+ GR_GL_FUNC_SUBTRACT,
+ GR_GL_FUNC_REVERSE_SUBTRACT,
+
+ // GL_KHR_blend_equation_advanced.
+ GR_GL_SCREEN,
+ GR_GL_OVERLAY,
+ GR_GL_DARKEN,
+ GR_GL_LIGHTEN,
+ GR_GL_COLORDODGE,
+ GR_GL_COLORBURN,
+ GR_GL_HARDLIGHT,
+ GR_GL_SOFTLIGHT,
+ GR_GL_DIFFERENCE,
+ GR_GL_EXCLUSION,
+ GR_GL_MULTIPLY,
+ GR_GL_HSL_HUE,
+ GR_GL_HSL_SATURATION,
+ GR_GL_HSL_COLOR,
+ GR_GL_HSL_LUMINOSITY,
+
+ // Illegal... needs to map to something.
+ GR_GL_FUNC_ADD,
+};
+GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
+GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
+GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
+GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
+GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
+GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
+GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
+GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
+GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
+GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
+GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
+GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
+GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
+GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
+GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
+GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
+
+static const GrGLenum gXfermodeCoeff2Blend[] = {
+ GR_GL_ZERO,
+ GR_GL_ONE,
+ GR_GL_SRC_COLOR,
+ GR_GL_ONE_MINUS_SRC_COLOR,
+ GR_GL_DST_COLOR,
+ GR_GL_ONE_MINUS_DST_COLOR,
+ GR_GL_SRC_ALPHA,
+ GR_GL_ONE_MINUS_SRC_ALPHA,
+ GR_GL_DST_ALPHA,
+ GR_GL_ONE_MINUS_DST_ALPHA,
+ GR_GL_CONSTANT_COLOR,
+ GR_GL_ONE_MINUS_CONSTANT_COLOR,
+ GR_GL_CONSTANT_ALPHA,
+ GR_GL_ONE_MINUS_CONSTANT_ALPHA,
+
+ // extended blend coeffs
+ GR_GL_SRC1_COLOR,
+ GR_GL_ONE_MINUS_SRC1_COLOR,
+ GR_GL_SRC1_ALPHA,
+ GR_GL_ONE_MINUS_SRC1_ALPHA,
+
+ // Illegal... needs to map to something.
+ GR_GL_ZERO,
+};
+
+bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+
+ // Illegal.
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static int gl_target_to_binding_index(GrGLenum target) {
+ switch (target) {
+ case GR_GL_TEXTURE_2D:
+ return 0;
+ case GR_GL_TEXTURE_RECTANGLE:
+ return 1;
+ case GR_GL_TEXTURE_EXTERNAL:
+ return 2;
+ }
+ SK_ABORT("Unexpected GL texture target.");
+}
+
+GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const {
+ return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID;
+}
+
+bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const {
+ return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified;
+}
+
+void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) {
+ int targetIndex = gl_target_to_binding_index(target);
+ fTargetBindings[targetIndex].fBoundResourceID = resourceID;
+ fTargetBindings[targetIndex].fHasBeenModified = true;
+}
+
+void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) {
+ this->setBoundID(target, GrGpuResource::UniqueID());
+}
+
+void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) {
+ for (auto& targetBinding : fTargetBindings) {
+ targetBinding.fBoundResourceID.makeInvalid();
+ if (markUnmodified) {
+ targetBinding.fHasBeenModified = false;
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
+ switch (filter) {
+ case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
+ case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR;
+ case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR;
+ }
+ SK_ABORT("Unknown filter");
+}
+
+static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) {
+ switch (filter) {
+ case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
+ case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR;
+ case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR_MIPMAP_LINEAR;
+ }
+ SK_ABORT("Unknown filter");
+}
+
+static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
+ const GrCaps& caps) {
+ switch (wrapMode) {
+ case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
+ case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
+ case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
+ case GrSamplerState::WrapMode::kClampToBorder:
+ // May not be supported but should have been caught earlier
+ SkASSERT(caps.clampToBorderSupport());
+ return GR_GL_CLAMP_TO_BORDER;
+ }
+ SK_ABORT("Unknown wrap mode");
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLGpu::SamplerObjectCache {
+public:
+ SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
+ fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers();
+ fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]);
+ std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
+ std::fill_n(fSamplers, kNumSamplers, 0);
+ }
+
+ ~SamplerObjectCache() {
+ if (!fNumTextureUnits) {
+ // We've already been abandoned.
+ return;
+ }
+ for (GrGLuint sampler : fSamplers) {
+ // The spec states that "zero" values should be silently ignored, however they still
+ // trigger GL errors on some NVIDIA platforms.
+ if (sampler) {
+ GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(1, &sampler));
+ }
+ }
+ }
+
+ void bindSampler(int unitIdx, const GrSamplerState& state) {
+ int index = StateToIndex(state);
+ if (!fSamplers[index]) {
+ GrGLuint s;
+ GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
+ if (!s) {
+ return;
+ }
+ fSamplers[index] = s;
+ auto minFilter = filter_to_gl_min_filter(state.filter());
+ auto magFilter = filter_to_gl_mag_filter(state.filter());
+ auto wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
+ auto wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
+ GR_GL_CALL(fGpu->glInterface(),
+ SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
+ GR_GL_CALL(fGpu->glInterface(),
+ SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
+ GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
+ GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
+ }
+ if (fHWBoundSamplers[unitIdx] != fSamplers[index]) {
+ GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index]));
+ fHWBoundSamplers[unitIdx] = fSamplers[index];
+ }
+ }
+
+ void invalidateBindings() {
+ // When we have sampler support we always use samplers. So setting these to zero will cause
+ // a rebind on next usage.
+ std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
+ }
+
+ void abandon() {
+ fHWBoundSamplers.reset();
+ fNumTextureUnits = 0;
+ }
+
+ void release() {
+ if (!fNumTextureUnits) {
+ // We've already been abandoned.
+ return;
+ }
+ GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers));
+ std::fill_n(fSamplers, kNumSamplers, 0);
+ // Deleting a bound sampler implicitly binds sampler 0.
+ std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
+ }
+
+private:
+ static int StateToIndex(const GrSamplerState& state) {
+ int filter = static_cast<int>(state.filter());
+ SkASSERT(filter >= 0 && filter < 3);
+ int wrapX = static_cast<int>(state.wrapModeX());
+ SkASSERT(wrapX >= 0 && wrapX < 4);
+ int wrapY = static_cast<int>(state.wrapModeY());
+ SkASSERT(wrapY >= 0 && wrapY < 4);
+ int idx = 16 * filter + 4 * wrapX + wrapY;
+ SkASSERT(idx < kNumSamplers);
+ return idx;
+ }
+
+ GrGLGpu* fGpu;
+ static constexpr int kNumSamplers = 48;
+ std::unique_ptr<GrGLuint[]> fHWBoundSamplers;
+ GrGLuint fSamplers[kNumSamplers];
+ int fNumTextureUnits;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options,
+ GrContext* context) {
+ if (!interface) {
+ interface = GrGLMakeNativeInterface();
+ // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated
+ // to GrGLMakeNativeInterface.
+ if (!interface) {
+ interface = sk_ref_sp(GrGLCreateNativeInterface());
+ }
+ if (!interface) {
+ return nullptr;
+ }
+ }
+#ifdef USE_NSIGHT
+ const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
+#endif
+ auto glContext = GrGLContext::Make(std::move(interface), options);
+ if (!glContext) {
+ return nullptr;
+ }
+ return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context));
+}
+
+GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context)
+ : GrGpu(context)
+ , fGLContext(std::move(ctx))
+ , fProgramCache(new ProgramCache(this))
+ , fHWProgramID(0)
+ , fTempSrcFBOID(0)
+ , fTempDstFBOID(0)
+ , fStencilClearFBOID(0) {
+ SkASSERT(fGLContext);
+ GrGLClearErr(this->glInterface());
+ fCaps = sk_ref_sp(fGLContext->caps());
+
+ fHWTextureUnitBindings.reset(this->numTextureUnits());
+
+ this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
+ if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
+ GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
+ GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
+ } else {
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
+ }
+ for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
+ fHWBufferState[i].invalidate();
+ }
+ GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(fHWBufferState));
+
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
+ fPathRendering.reset(new GrGLPathRendering(this));
+ }
+
+ if (this->glCaps().samplerObjectSupport()) {
+ fSamplerObjectCache.reset(new SamplerObjectCache(this));
+ }
+}
+
+GrGLGpu::~GrGLGpu() {
+ // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
+ // to release the resources held by the objects themselves.
+ fPathRendering.reset();
+ fCopyProgramArrayBuffer.reset();
+ fMipmapProgramArrayBuffer.reset();
+
+ fHWProgram.reset();
+ if (fHWProgramID) {
+ // detach the current program so there is no confusion on OpenGL's part
+ // that we want it to be deleted
+ GL_CALL(UseProgram(0));
+ }
+
+ if (fTempSrcFBOID) {
+ this->deleteFramebuffer(fTempSrcFBOID);
+ }
+ if (fTempDstFBOID) {
+ this->deleteFramebuffer(fTempDstFBOID);
+ }
+ if (fStencilClearFBOID) {
+ this->deleteFramebuffer(fStencilClearFBOID);
+ }
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ if (0 != fCopyPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
+ }
+ }
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ if (0 != fMipmapPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
+ }
+ }
+
+ delete fProgramCache;
+ fSamplerObjectCache.reset();
+}
+
+void GrGLGpu::disconnect(DisconnectType type) {
+ INHERITED::disconnect(type);
+ if (DisconnectType::kCleanup == type) {
+ if (fHWProgramID) {
+ GL_CALL(UseProgram(0));
+ }
+ if (fTempSrcFBOID) {
+ this->deleteFramebuffer(fTempSrcFBOID);
+ }
+ if (fTempDstFBOID) {
+ this->deleteFramebuffer(fTempDstFBOID);
+ }
+ if (fStencilClearFBOID) {
+ this->deleteFramebuffer(fStencilClearFBOID);
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ if (fCopyPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
+ }
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ if (fMipmapPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
+ }
+ }
+
+ if (fSamplerObjectCache) {
+ fSamplerObjectCache->release();
+ }
+ } else {
+ if (fProgramCache) {
+ fProgramCache->abandon();
+ }
+ if (fSamplerObjectCache) {
+ fSamplerObjectCache->abandon();
+ }
+ }
+
+ fHWProgram.reset();
+ delete fProgramCache;
+ fProgramCache = nullptr;
+
+ fHWProgramID = 0;
+ fTempSrcFBOID = 0;
+ fTempDstFBOID = 0;
+ fStencilClearFBOID = 0;
+ fCopyProgramArrayBuffer.reset();
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ fCopyPrograms[i].fProgram = 0;
+ }
+ fMipmapProgramArrayBuffer.reset();
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ fMipmapPrograms[i].fProgram = 0;
+ }
+
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
+ this->glPathRendering()->disconnect(type);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLGpu::onResetContext(uint32_t resetBits) {
+ if (resetBits & kMisc_GrGLBackendState) {
+ // we don't use the zb at all
+ GL_CALL(Disable(GR_GL_DEPTH_TEST));
+ GL_CALL(DepthMask(GR_GL_FALSE));
+
+ // We don't use face culling.
+ GL_CALL(Disable(GR_GL_CULL_FACE));
+ // We do use separate stencil. Our algorithms don't care which face is front vs. back so
+ // just set this to the default for self-consistency.
+ GL_CALL(FrontFace(GR_GL_CCW));
+
+ this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
+
+ if (GR_IS_GR_GL(this->glStandard())) {
+#ifndef USE_NSIGHT
+ // Desktop-only state that we never change
+ if (!this->glCaps().isCoreProfile()) {
+ GL_CALL(Disable(GR_GL_POINT_SMOOTH));
+ GL_CALL(Disable(GR_GL_LINE_SMOOTH));
+ GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
+ GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
+ GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
+ GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
+ }
+ // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
+ // core profile. This seems like a bug since the core spec removes any mention of
+ // GL_ARB_imaging.
+ if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
+ GL_CALL(Disable(GR_GL_COLOR_TABLE));
+ }
+ GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
+
+ if (this->caps()->wireframeMode()) {
+ GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
+ } else {
+ GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
+ }
+#endif
+ // Since ES doesn't support glPointSize at all we always use the VS to
+ // set the point size
+ GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
+
+ }
+
+ if (GR_IS_GR_GL_ES(this->glStandard()) &&
+ this->glCaps().fbFetchRequiresEnablePerSample()) {
+ // The arm extension requires specifically enabling MSAA fetching per sample.
+ // On some devices this may have a perf hit. Also multiple render targets are disabled
+ GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
+ }
+ fHWWriteToColor = kUnknown_TriState;
+ // we only ever use lines in hairline mode
+ GL_CALL(LineWidth(1));
+ GL_CALL(Disable(GR_GL_DITHER));
+
+ fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
+ }
+
+ if (resetBits & kMSAAEnable_GrGLBackendState) {
+ fMSAAEnabled = kUnknown_TriState;
+
+ if (this->caps()->mixedSamplesSupport()) {
+ // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage
+ // modulation. This state has no effect when not rendering to a mixed sampled target.
+ GL_CALL(CoverageModulation(GR_GL_RGBA));
+ }
+ }
+
+ fHWActiveTextureUnitIdx = -1; // invalid
+ fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
+
+ if (resetBits & kTextureBinding_GrGLBackendState) {
+ for (int s = 0; s < this->numTextureUnits(); ++s) {
+ fHWTextureUnitBindings[s].invalidateAllTargets(false);
+ }
+ if (fSamplerObjectCache) {
+ fSamplerObjectCache->invalidateBindings();
+ }
+ }
+
+ if (resetBits & kBlend_GrGLBackendState) {
+ fHWBlendState.invalidate();
+ }
+
+ if (resetBits & kView_GrGLBackendState) {
+ fHWScissorSettings.invalidate();
+ fHWWindowRectsState.invalidate();
+ fHWViewport.invalidate();
+ }
+
+ if (resetBits & kStencil_GrGLBackendState) {
+ fHWStencilSettings.invalidate();
+ fHWStencilTestEnabled = kUnknown_TriState;
+ }
+
+ // Vertex
+ if (resetBits & kVertex_GrGLBackendState) {
+ fHWVertexArrayState.invalidate();
+ this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
+ this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
+ }
+
+ if (resetBits & kRenderTarget_GrGLBackendState) {
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ fHWSRGBFramebuffer = kUnknown_TriState;
+ }
+
+ if (resetBits & kPathRendering_GrGLBackendState) {
+ if (this->caps()->shaderCaps()->pathRenderingSupport()) {
+ this->glPathRendering()->resetContext();
+ }
+ }
+
+ // we assume these values
+ if (resetBits & kPixelStore_GrGLBackendState) {
+ if (this->caps()->writePixelsRowBytesSupport()) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+ if (this->glCaps().readPixelsRowBytesSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
+ }
+ if (this->glCaps().packFlipYSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
+ }
+ }
+
+ if (resetBits & kProgram_GrGLBackendState) {
+ fHWProgramID = 0;
+ fHWProgram.reset();
+ }
+ ++fResetTimestampForTextureParameters;
+}
+
+static bool check_backend_texture(const GrBackendTexture& backendTex, const GrColorType colorType,
+ const GrGLCaps& caps, GrGLTexture::Desc* desc,
+ bool skipRectTexSupportCheck = false) {
+ GrGLTextureInfo info;
+ if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) {
+ return false;
+ }
+
+ desc->fSize = {backendTex.width(), backendTex.height()};
+ desc->fTarget = info.fTarget;
+ desc->fID = info.fID;
+ desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
+
+ if (desc->fFormat == GrGLFormat::kUnknown) {
+ return false;
+ }
+ if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) {
+ if (!caps.shaderCaps()->externalTextureSupport()) {
+ return false;
+ }
+ } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) {
+ if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) {
+ return false;
+ }
+ } else if (GR_GL_TEXTURE_2D != desc->fTarget) {
+ return false;
+ }
+ if (backendTex.isProtected()) {
+ // Not supported in GL backend at this time.
+ return false;
+ }
+
+ desc->fConfig = caps.getConfigFromBackendFormat(backendTex.getBackendFormat(), colorType);
+ SkASSERT(desc->fConfig != kUnknown_GrPixelConfig);
+
+ return true;
+}
+
+sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType colorType, GrWrapOwnership ownership,
+ GrWrapCacheable cacheable, GrIOType ioType) {
+ GrGLTexture::Desc desc;
+ if (!check_backend_texture(backendTex, colorType, this->glCaps(), &desc)) {
+ return nullptr;
+ }
+
+ if (kBorrow_GrWrapOwnership == ownership) {
+ desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
+ } else {
+ desc.fOwnership = GrBackendObjectOwnership::kOwned;
+ }
+
+ GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid
+ : GrMipMapsStatus::kNotAllocated;
+
+ auto texture = GrGLTexture::MakeWrapped(this, mipMapsStatus, desc,
+ backendTex.getGLTextureParams(), cacheable, ioType);
+ // We don't know what parameters are already set on wrapped textures.
+ texture->textureParamsModified();
+ return texture;
+}
+
+sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable) {
+ const GrGLCaps& caps = this->glCaps();
+
+ GrGLTexture::Desc desc;
+ if (!check_backend_texture(backendTex, colorType, this->glCaps(), &desc)) {
+ return nullptr;
+ }
+ SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt));
+ SkASSERT(caps.isFormatTexturable(desc.fFormat));
+
+ // We don't support rendering to a EXTERNAL texture.
+ if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) {
+ return nullptr;
+ }
+
+ if (kBorrow_GrWrapOwnership == ownership) {
+ desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
+ } else {
+ desc.fOwnership = GrBackendObjectOwnership::kOwned;
+ }
+
+
+ sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat);
+ SkASSERT(sampleCnt);
+
+ GrGLRenderTarget::IDs rtIDs;
+ if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) {
+ return nullptr;
+ }
+
+ GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty
+ : GrMipMapsStatus::kNotAllocated;
+
+ sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped(
+ this, sampleCnt, desc, backendTex.getGLTextureParams(), rtIDs, cacheable,
+ mipMapsStatus));
+ texRT->baseLevelWasBoundToFBO();
+ // We don't know what parameters are already set on wrapped textures.
+ texRT->textureParamsModified();
+ return texRT;
+}
+
+sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
+ GrColorType grColorType) {
+ GrGLFramebufferInfo info;
+ if (!backendRT.getGLFramebufferInfo(&info)) {
+ return nullptr;
+ }
+
+ if (backendRT.isProtected()) {
+ // Not supported in GL at this time.
+ return nullptr;
+ }
+
+ const auto format = backendRT.getBackendFormat().asGLFormat();
+ if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) {
+ return nullptr;
+ }
+
+ GrGLRenderTarget::IDs rtIDs;
+ rtIDs.fRTFBOID = info.fFBOID;
+ rtIDs.fMSColorRenderbufferID = 0;
+ rtIDs.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
+ rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendRT.getBackendFormat(),
+ grColorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ const auto size = SkISize::Make(backendRT.width(), backendRT.height());
+ int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format);
+
+ return GrGLRenderTarget::MakeWrapped(this, size, format, config, sampleCount, rtIDs,
+ backendRT.stencilBits());
+}
+
+sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType colorType) {
+ GrGLTexture::Desc desc;
+ // We do not check whether texture rectangle is supported by Skia - if the caller provided us
+ // with a texture rectangle,we assume the necessary support exists.
+ if (!check_backend_texture(tex, colorType, this->glCaps(), &desc, true)) {
+ return nullptr;
+ }
+
+ if (!this->glCaps().isFormatRenderable(desc.fFormat, sampleCnt)) {
+ return nullptr;
+ }
+
+ const int sampleCount = this->glCaps().getRenderTargetSampleCount(sampleCnt, desc.fFormat);
+ GrGLRenderTarget::IDs rtIDs;
+ if (!this->createRenderTargetObjects(desc, sampleCount, &rtIDs)) {
+ return nullptr;
+ }
+ return GrGLRenderTarget::MakeWrapped(this, desc.fSize, desc.fFormat, desc.fConfig, sampleCount,
+ rtIDs, 0);
+}
+
+static bool check_write_and_transfer_input(GrGLTexture* glTex) {
+ if (!glTex) {
+ return false;
+ }
+
+ // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
+ if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) {
+ auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
+
+ if (!check_write_and_transfer_input(glTex)) {
+ return false;
+ }
+
+ this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
+
+ SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
+ return this->uploadTexData(glTex->format(), surfaceColorType, glTex->width(), glTex->height(),
+ glTex->target(), left, top, width, height, srcColorType, texels,
+ mipLevelCount);
+}
+
+bool GrGLGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) {
+ GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
+
+ // Can't transfer compressed data
+ SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
+
+ if (!check_write_and_transfer_input(glTex)) {
+ return false;
+ }
+
+ static_assert(sizeof(int) == sizeof(int32_t), "");
+ if (width <= 0 || height <= 0) {
+ return false;
+ }
+
+ this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
+
+ SkASSERT(!transferBuffer->isMapped());
+ SkASSERT(!transferBuffer->isCpuBuffer());
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
+ this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
+
+ SkDEBUGCODE(
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
+ SkASSERT(bounds.contains(subRect));
+ )
+
+ size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
+ const size_t trimRowBytes = width * bpp;
+ const void* pixels = (void*)offset;
+ if (width < 0 || height < 0) {
+ return false;
+ }
+
+ bool restoreGLRowLength = false;
+ if (trimRowBytes != rowBytes) {
+ // we should have checked for this support already
+ SkASSERT(this->glCaps().writePixelsRowBytesSupport());
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
+ restoreGLRowLength = true;
+ }
+
+ GrGLFormat textureFormat = glTex->format();
+ // External format and type come from the upload data.
+ GrGLenum externalFormat = 0;
+ GrGLenum externalType = 0;
+ this->glCaps().getTexSubImageExternalFormatAndType(
+ textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType);
+ if (!externalFormat || !externalType) {
+ return false;
+ }
+
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
+ GL_CALL(TexSubImage2D(glTex->target(),
+ 0,
+ left, top,
+ width,
+ height,
+ externalFormat, externalType,
+ pixels));
+
+ if (restoreGLRowLength) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+
+ return true;
+}
+
+bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) {
+ auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer);
+ this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer);
+ auto offsetAsPtr = reinterpret_cast<void*>(offset);
+ return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
+ dstColorType, offsetAsPtr, width);
+}
+
+void GrGLGpu::unbindCpuToGpuXferBuffer() {
+ auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu);
+ if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) {
+ GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
+ xferBufferState->invalidate();
+ }
+}
+
+bool GrGLGpu::uploadTexData(GrGLFormat textureFormat, GrColorType textureColorType, int texWidth,
+ int texHeight, GrGLenum target, int left, int top, int width,
+ int height, GrColorType srcColorType, const GrMipLevel texels[],
+ int mipLevelCount, GrMipMapsStatus* mipMapsStatus) {
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrGLFormatIsCompressed(textureFormat));
+
+ SkASSERT(this->glCaps().isFormatTexturable(textureFormat));
+ SkDEBUGCODE(
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight);
+ SkASSERT(bounds.contains(subRect));
+ )
+ SkASSERT(1 == mipLevelCount ||
+ (0 == left && 0 == top && width == texWidth && height == texHeight));
+
+ this->unbindCpuToGpuXferBuffer();
+
+ const GrGLInterface* interface = this->glInterface();
+ const GrGLCaps& caps = this->glCaps();
+
+ size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ // External format and type come from the upload data.
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+ this->glCaps().getTexSubImageExternalFormatAndType(
+ textureFormat, textureColorType, srcColorType, &externalFormat, &externalType);
+ if (!externalFormat || !externalType) {
+ return false;
+ }
+
+ /*
+ * Check whether to allocate a temporary buffer for flipping y or
+ * because our srcData has extra bytes past each row. If so, we need
+ * to trim those off here, since GL ES may not let us specify
+ * GL_UNPACK_ROW_LENGTH.
+ */
+ bool restoreGLRowLength = false;
+
+ if (mipMapsStatus) {
+ *mipMapsStatus = (mipLevelCount > 1) ?
+ GrMipMapsStatus::kValid : GrMipMapsStatus::kNotAllocated;
+ }
+
+ GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
+
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (!texels[currentMipLevel].fPixels) {
+ if (mipMapsStatus) {
+ *mipMapsStatus = GrMipMapsStatus::kDirty;
+ }
+ continue;
+ }
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ const int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ const int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+ const size_t trimRowBytes = currentWidth * bpp;
+ const size_t rowBytes = texels[currentMipLevel].fRowBytes;
+
+ if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) {
+ GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
+ GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
+ restoreGLRowLength = true;
+ }
+
+ GL_CALL(TexSubImage2D(target, currentMipLevel, left, top, currentWidth, currentHeight,
+ externalFormat, externalType, texels[currentMipLevel].fPixels));
+ }
+ if (restoreGLRowLength) {
+ SkASSERT(caps.writePixelsRowBytesSupport());
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+ return true;
+}
+
+bool GrGLGpu::uploadCompressedTexData(GrGLFormat format,
+ SkImage::CompressionType compressionType,
+ const SkISize& size,
+ GrGLenum target,
+ const void* data) {
+ SkASSERT(format != GrGLFormat::kUnknown);
+ const GrGLCaps& caps = this->glCaps();
+
+ // We only need the internal format for compressed 2D textures.
+ GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format);
+ if (!internalFormat) {
+ return 0;
+ }
+
+ bool useTexStorage = caps.formatSupportsTexStorage(format);
+
+ static constexpr int kMipLevelCount = 1;
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedDataSize(compressionType, size.width(), size.height());
+
+ if (useTexStorage) {
+ // We never resize or change formats of textures.
+ GL_ALLOC_CALL(
+ this->glInterface(),
+ TexStorage2D(target, kMipLevelCount, internalFormat, size.width(), size.height()));
+ GrGLenum error = CHECK_ALLOC_ERROR(this->glInterface());
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ GL_CALL(CompressedTexSubImage2D(target,
+ 0, // level
+ 0, // left
+ 0, // top
+ size.width(),
+ size.height(),
+ internalFormat,
+ SkToInt(dataSize),
+ data));
+ } else {
+ GL_ALLOC_CALL(this->glInterface(), CompressedTexImage2D(target,
+ 0, // level
+ internalFormat,
+ size.width(),
+ size.height(),
+ 0, // border
+ SkToInt(dataSize),
+ data));
+
+ GrGLenum error = CHECK_ALLOC_ERROR(this->glInterface());
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
+ int sampleCount,
+ GrGLenum format,
+ int width, int height) {
+ CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
+ SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
+ switch (ctx.caps()->msFBOType()) {
+ case GrGLCaps::kStandard_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kES_Apple_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
+ case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kNone_MSFBOType:
+ SK_ABORT("Shouldn't be here if we don't support multisampled renderbuffers.");
+ break;
+ }
+ return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
+}
+
+bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc,
+ int sampleCount,
+ GrGLRenderTarget::IDs* rtIDs) {
+ rtIDs->fMSColorRenderbufferID = 0;
+ rtIDs->fRTFBOID = 0;
+ rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
+ rtIDs->fTexFBOID = 0;
+
+ GrGLenum colorRenderbufferFormat = 0; // suppress warning
+
+ if (desc.fFormat == GrGLFormat::kUnknown) {
+ goto FAILED;
+ }
+
+ if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
+ goto FAILED;
+ }
+
+ GL_CALL(GenFramebuffers(1, &rtIDs->fTexFBOID));
+ if (!rtIDs->fTexFBOID) {
+ goto FAILED;
+ }
+
+ // If we are using multisampling we will create two FBOS. We render to one and then resolve to
+ // the texture bound to the other. The exception is the IMG multisample extension. With this
+ // extension the texture is multisampled when rendered to and then auto-resolves it when it is
+ // rendered from.
+ if (sampleCount > 1 && this->glCaps().usesMSAARenderBuffers()) {
+ GL_CALL(GenFramebuffers(1, &rtIDs->fRTFBOID));
+ GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
+ if (!rtIDs->fRTFBOID || !rtIDs->fMSColorRenderbufferID) {
+ goto FAILED;
+ }
+ colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat);
+ } else {
+ rtIDs->fRTFBOID = rtIDs->fTexFBOID;
+ }
+
+ // below here we may bind the FBO
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) {
+ SkASSERT(sampleCount > 1);
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID));
+ if (!renderbuffer_storage_msaa(*fGLContext, sampleCount, colorRenderbufferFormat,
+ desc.fSize.width(), desc.fSize.height())) {
+ goto FAILED;
+ }
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fRTFBOID);
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_RENDERBUFFER,
+ rtIDs->fMSColorRenderbufferID));
+ }
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fTexFBOID);
+
+ if (this->glCaps().usesImplicitMSAAResolve() && sampleCount > 1) {
+ GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ desc.fTarget,
+ desc.fID,
+ 0,
+ sampleCount));
+ } else {
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ desc.fTarget,
+ desc.fID,
+ 0));
+ }
+
+ return true;
+
+FAILED:
+ if (rtIDs->fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
+ }
+ if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) {
+ this->deleteFramebuffer(rtIDs->fRTFBOID);
+ }
+ if (rtIDs->fTexFBOID) {
+ this->deleteFramebuffer(rtIDs->fTexFBOID);
+ }
+ return false;
+}
+
+// good to set a break-point here to know when createTexture fails
+static sk_sp<GrTexture> return_null_texture() {
+// SkDEBUGFAIL("null texture");
+ return nullptr;
+}
+
+static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params(
+ const GrGLInterface* interface, GrGLenum target) {
+ // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
+ // drivers have a bug where an FBO won't be complete if it includes a
+ // texture that is not mipmap complete (considering the filter in use).
+ GrGLTextureParameters::SamplerOverriddenState state;
+ state.fMinFilter = GR_GL_NEAREST;
+ state.fMagFilter = GR_GL_NEAREST;
+ state.fWrapS = GR_GL_CLAMP_TO_EDGE;
+ state.fWrapT = GR_GL_CLAMP_TO_EDGE;
+ GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter));
+ GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter));
+ GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS));
+ GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT));
+ return state;
+}
+
+sk_sp<GrTexture> GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ // We don't support protected textures in GL.
+ if (isProtected == GrProtected::kYes) {
+ return nullptr;
+ }
+ SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1);
+
+ SkASSERT(mipLevelCount > 0);
+ GrMipMapsStatus mipMapsStatus =
+ mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
+ GrGLTextureParameters::SamplerOverriddenState initialState;
+ GrGLTexture::Desc texDesc;
+ texDesc.fSize = {desc.fWidth, desc.fHeight};
+ texDesc.fTarget = GR_GL_TEXTURE_2D;
+ texDesc.fFormat = format.asGLFormat();
+ texDesc.fConfig = desc.fConfig;
+ texDesc.fOwnership = GrBackendObjectOwnership::kOwned;
+ SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown);
+ SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat));
+
+ texDesc.fID = this->createTexture2D({desc.fWidth, desc.fHeight}, texDesc.fFormat, renderable,
+ &initialState, mipLevelCount);
+
+ if (!texDesc.fID) {
+ return return_null_texture();
+ }
+
+ sk_sp<GrGLTexture> tex;
+ if (renderable == GrRenderable::kYes) {
+ // unbind the texture from the texture unit before binding it to the frame buffer
+ GL_CALL(BindTexture(texDesc.fTarget, 0));
+ GrGLRenderTarget::IDs rtIDDesc;
+
+ if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) {
+ GL_CALL(DeleteTextures(1, &texDesc.fID));
+ return return_null_texture();
+ }
+ tex = sk_make_sp<GrGLTextureRenderTarget>(
+ this, budgeted, renderTargetSampleCnt, texDesc, rtIDDesc, mipMapsStatus);
+ tex->baseLevelWasBoundToFBO();
+ } else {
+ tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipMapsStatus);
+ }
+ // The non-sampler params are still at their default values.
+ tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
+ fResetTimestampForTextureParameters);
+ if (levelClearMask) {
+ GrGLenum externalFormat, externalType;
+ size_t bpp;
+ this->glCaps().getTexSubImageZeroFormatTypeAndBpp(texDesc.fFormat, &externalFormat,
+ &externalType, &bpp);
+ if (this->glCaps().clearTextureSupport()) {
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (levelClearMask & (1U << i)) {
+ GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType,
+ nullptr));
+ }
+ }
+ } else if (this->glCaps().canFormatBeFBOColorAttachment(format.asGLFormat()) &&
+ !this->glCaps().performColorClearsAsDraws()) {
+ this->disableScissor();
+ this->disableWindowRectangles();
+ this->flushColorWrite(true);
+ this->flushClearColor(SK_PMColor4fTRANSPARENT);
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (levelClearMask & (1U << i)) {
+ this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER,
+ kDst_TempFBOTarget);
+ GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
+ this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER);
+ }
+ }
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ } else {
+ std::unique_ptr<char[]> zeros;
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (levelClearMask & (1U << i)) {
+ int levelWidth = SkTMax(1, texDesc.fSize.width() >> i);
+ int levelHeight = SkTMax(1, texDesc.fSize.height() >> i);
+ // Levels only get smaller as we proceed. Once we create a zeros use it for all
+ // smaller levels that need clearing.
+ if (!zeros) {
+ size_t size = levelWidth * levelHeight * bpp;
+ zeros.reset(new char[size]());
+ }
+ this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, tex->textureID());
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, i, 0, 0, levelWidth, levelHeight,
+ externalFormat, externalType, zeros.get()));
+ }
+ }
+ }
+ }
+ return tex;
+}
+
+sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(int width, int height,
+ const GrBackendFormat& format,
+ SkImage::CompressionType compression,
+ SkBudgeted budgeted, const void* data) {
+ GrGLTextureParameters::SamplerOverriddenState initialState;
+ GrGLTexture::Desc desc;
+ desc.fSize = {width, height};
+ desc.fTarget = GR_GL_TEXTURE_2D;
+ desc.fConfig = GrCompressionTypePixelConfig(compression);
+ desc.fOwnership = GrBackendObjectOwnership::kOwned;
+ desc.fFormat = format.asGLFormat();
+ desc.fID = this->createCompressedTexture2D(desc.fSize, desc.fFormat, compression, &initialState,
+ data);
+ if (!desc.fID) {
+ return nullptr;
+ }
+ auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, GrMipMapsStatus::kNotAllocated);
+ // The non-sampler params are still at their default values.
+ tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
+ fResetTimestampForTextureParameters);
+ return tex;
+}
+
+namespace {
+
+const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
+
+void inline get_stencil_rb_sizes(const GrGLInterface* gl,
+ GrGLStencilAttachment::Format* format) {
+
+ // we shouldn't ever know one size and not the other
+ SkASSERT((kUnknownBitCount == format->fStencilBits) ==
+ (kUnknownBitCount == format->fTotalBits));
+ if (kUnknownBitCount == format->fStencilBits) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_STENCIL_SIZE,
+ (GrGLint*)&format->fStencilBits);
+ if (format->fPacked) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_DEPTH_SIZE,
+ (GrGLint*)&format->fTotalBits);
+ format->fTotalBits += format->fStencilBits;
+ } else {
+ format->fTotalBits = format->fStencilBits;
+ }
+ }
+}
+}
+
+int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) {
+ static const int kSize = 16;
+ SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format));
+
+ if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) {
+ // Default to unsupported, set this if we find a stencil format that works.
+ int firstWorkingStencilFormatIndex = -1;
+
+ GrGLuint colorID =
+ this->createTexture2D({kSize, kSize}, format, GrRenderable::kYes, nullptr, 1);
+ if (!colorID) {
+ return -1;
+ }
+ // unbind the texture from the texture unit before binding it to the frame buffer
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
+
+ // Create Framebuffer
+ GrGLuint fb = 0;
+ GL_CALL(GenFramebuffers(1, &fb));
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D,
+ colorID,
+ 0));
+ GrGLuint sbRBID = 0;
+ GL_CALL(GenRenderbuffers(1, &sbRBID));
+
+ // look over formats till I find a compatible one
+ int stencilFmtCnt = this->glCaps().stencilFormats().count();
+ if (sbRBID) {
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
+ for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
+ const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i];
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
+ sFmt.fInternalFormat,
+ kSize, kSize));
+ if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, sbRBID));
+ if (sFmt.fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, sbRBID));
+ } else {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
+ firstWorkingStencilFormatIndex = i;
+ break;
+ }
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ if (sFmt.fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+ }
+ }
+ GL_CALL(DeleteRenderbuffers(1, &sbRBID));
+ }
+ GL_CALL(DeleteTextures(1, &colorID));
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
+ this->deleteFramebuffer(fb);
+ fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex);
+ }
+ return this->glCaps().getStencilFormatIndexForFormat(format);
+}
+
+GrGLuint GrGLGpu::createCompressedTexture2D(
+ const SkISize& size,
+ GrGLFormat format,
+ SkImage::CompressionType compression,
+ GrGLTextureParameters::SamplerOverriddenState* initialState,
+ const void* data) {
+ if (format == GrGLFormat::kUnknown) {
+ return 0;
+ }
+ GrGLuint id = 0;
+ GL_CALL(GenTextures(1, &id));
+ if (!id) {
+ return 0;
+ }
+
+ this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
+
+ *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D);
+
+ if (!this->uploadCompressedTexData(format, compression, size, GR_GL_TEXTURE_2D, data)) {
+ GL_CALL(DeleteTextures(1, &id));
+ return 0;
+ }
+ return id;
+}
+
+GrGLuint GrGLGpu::createTexture2D(const SkISize& size,
+ GrGLFormat format,
+ GrRenderable renderable,
+ GrGLTextureParameters::SamplerOverriddenState* initialState,
+ int mipLevelCount) {
+ SkASSERT(format != GrGLFormat::kUnknown);
+ SkASSERT(!GrGLFormatIsCompressed(format));
+
+ GrGLuint id = 0;
+ GL_CALL(GenTextures(1, &id));
+
+ if (!id) {
+ return 0;
+ }
+
+ this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
+
+ if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) {
+ // provides a hint about how this texture will be used
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT));
+ }
+
+ if (initialState) {
+ *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D);
+ } else {
+ set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D);
+ }
+
+ GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format);
+
+ bool success = false;
+ if (internalFormat) {
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ if (this->glCaps().formatSupportsTexStorage(format)) {
+ GL_ALLOC_CALL(this->glInterface(),
+ TexStorage2D(GR_GL_TEXTURE_2D, SkTMax(mipLevelCount, 1), internalFormat,
+ size.width(), size.height()));
+ success = (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface()));
+ } else {
+ GrGLenum externalFormat, externalType;
+ this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType);
+ GrGLenum error = GR_GL_NO_ERROR;
+ if (externalFormat && externalType) {
+ for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) {
+ const int twoToTheMipLevel = 1 << level;
+ const int currentWidth = SkTMax(1, size.width() / twoToTheMipLevel);
+ const int currentHeight = SkTMax(1, size.height() / twoToTheMipLevel);
+ GL_ALLOC_CALL(
+ this->glInterface(),
+ TexImage2D(GR_GL_TEXTURE_2D, level, internalFormat, currentWidth,
+ currentHeight, 0, externalFormat, externalType, nullptr));
+ error = CHECK_ALLOC_ERROR(this->glInterface());
+ }
+ success = (GR_GL_NO_ERROR == error);
+ }
+ }
+ }
+ if (success) {
+ return id;
+ }
+ GL_CALL(DeleteTextures(1, &id));
+ return 0;
+}
+
+GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(
+ const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ GrGLStencilAttachment::IDDesc sbDesc;
+
+ int sIdx = this->getCompatibleStencilIndex(rt->backendFormat().asGLFormat());
+ if (sIdx < 0) {
+ return nullptr;
+ }
+
+ if (!sbDesc.fRenderbufferID) {
+ GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
+ }
+ if (!sbDesc.fRenderbufferID) {
+ return nullptr;
+ }
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
+ const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ // we do this "if" so that we don't call the multisample
+ // version on a GL that doesn't have an MSAA extension.
+ if (numStencilSamples > 1) {
+ SkAssertResult(renderbuffer_storage_msaa(*fGLContext,
+ numStencilSamples,
+ sFmt.fInternalFormat,
+ width, height));
+ } else {
+ GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
+ sFmt.fInternalFormat,
+ width, height));
+ SkASSERT(GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface()));
+ }
+ fStats.incStencilAttachmentCreates();
+ // After sized formats we attempt an unsized format and take
+ // whatever sizes GL gives us. In that case we query for the size.
+ GrGLStencilAttachment::Format format = sFmt;
+ get_stencil_rb_sizes(this->glInterface(), &format);
+ GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this,
+ sbDesc,
+ width,
+ height,
+ numStencilSamples,
+ format);
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
+}
+
+void GrGLGpu::flushScissor(const GrScissorState& scissorState, int rtWidth, int rtHeight,
+ GrSurfaceOrigin rtOrigin) {
+ if (scissorState.enabled()) {
+ auto scissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissorState.rect());
+ // if the scissor fully contains the viewport then we fall through and
+ // disable the scissor test.
+ if (!scissor.contains(rtWidth, rtHeight)) {
+ if (fHWScissorSettings.fRect != scissor) {
+ GL_CALL(Scissor(scissor.fX, scissor.fY, scissor.fWidth, scissor.fHeight));
+ fHWScissorSettings.fRect = scissor;
+ }
+ if (kYes_TriState != fHWScissorSettings.fEnabled) {
+ GL_CALL(Enable(GR_GL_SCISSOR_TEST));
+ fHWScissorSettings.fEnabled = kYes_TriState;
+ }
+ return;
+ }
+ }
+
+ // See fall through note above
+ this->disableScissor();
+}
+
+void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
+ const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
+#ifndef USE_NSIGHT
+ typedef GrWindowRectsState::Mode Mode;
+ SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen.
+ SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
+
+ if (!this->caps()->maxWindowRectangles() ||
+ fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) {
+ return;
+ }
+
+ // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
+ // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
+ int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
+ SkASSERT(windowState.numWindows() == numWindows);
+
+ GrNativeRect glwindows[GrWindowRectangles::kMaxWindows];
+ const SkIRect* skwindows = windowState.windows().data();
+ for (int i = 0; i < numWindows; ++i) {
+ glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]);
+ }
+
+ GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
+ GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
+
+ fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState);
+#endif
+}
+
+void GrGLGpu::disableWindowRectangles() {
+#ifndef USE_NSIGHT
+ if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
+ return;
+ }
+ GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
+ fHWWindowRectsState.setDisabled();
+#endif
+}
+
+bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType) {
+
+ sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, renderTarget, programInfo,
+ primitiveType));
+ if (!program) {
+ GrCapsDebugf(this->caps(), "Failed to create program!\n");
+ return false;
+ }
+
+ this->flushProgram(std::move(program));
+
+ // Swizzle the blend to match what the shader will output.
+ this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(),
+ programInfo.pipeline().outputSwizzle());
+
+ fHWProgram->updateUniformsAndTextureBindings(renderTarget, programInfo);
+
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
+ GrStencilSettings stencil;
+ if (programInfo.pipeline().isStencilEnabled()) {
+ // TODO: attach stencil and create settings during render target flush.
+ SkASSERT(glRT->renderTargetPriv().getStencilAttachment());
+ stencil.reset(*programInfo.pipeline().getUserStencil(),
+ programInfo.pipeline().hasStencilClip(),
+ glRT->renderTargetPriv().numStencilBits());
+ }
+ this->flushStencil(stencil, programInfo.origin());
+ if (programInfo.pipeline().isScissorEnabled()) {
+ static constexpr SkIRect kBogusScissor{0, 0, 1, 1};
+ GrScissorState state(programInfo.fixedDynamicState() ? programInfo.fixedScissor()
+ : kBogusScissor);
+ this->flushScissor(state, glRT->width(), glRT->height(), programInfo.origin());
+ } else {
+ this->disableScissor();
+ }
+ this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(),
+ glRT, programInfo.origin());
+ this->flushHWAAState(glRT, programInfo.pipeline().isHWAntialiasState());
+
+ // This must come after textures are flushed because a texture may need
+ // to be msaa-resolved (which will modify bound FBO state).
+ this->flushRenderTarget(glRT);
+
+ return true;
+}
+
+void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
+ if (!program) {
+ fHWProgram.reset();
+ fHWProgramID = 0;
+ return;
+ }
+ SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
+ if (program == fHWProgram) {
+ return;
+ }
+ auto id = program->programID();
+ SkASSERT(id);
+ GL_CALL(UseProgram(id));
+ fHWProgram = std::move(program);
+ fHWProgramID = id;
+}
+
+void GrGLGpu::flushProgram(GrGLuint id) {
+ SkASSERT(id);
+ if (fHWProgramID == id) {
+ SkASSERT(!fHWProgram);
+ return;
+ }
+ fHWProgram.reset();
+ GL_CALL(UseProgram(id));
+ fHWProgramID = id;
+}
+
+void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int baseInstance,
+ GrPrimitiveRestart enablePrimitiveRestart) {
+ SkASSERT((enablePrimitiveRestart == GrPrimitiveRestart::kNo) || indexBuffer);
+
+ GrGLAttribArrayState* attribState;
+ if (indexBuffer) {
+ SkASSERT(indexBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
+ attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer);
+ } else {
+ attribState = fHWVertexArrayState.bindInternalVertexArray(this);
+ }
+
+ int numAttribs = fHWProgram->numVertexAttributes() + fHWProgram->numInstanceAttributes();
+ attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart);
+
+ if (int vertexStride = fHWProgram->vertexStride()) {
+ SkASSERT(vertexBuffer);
+ SkASSERT(vertexBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
+ size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride);
+ for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) {
+ const auto& attrib = fHWProgram->vertexAttribute(i);
+ static constexpr int kDivisor = 0;
+ attribState->set(this, attrib.fLocation, vertexBuffer, attrib.fCPUType, attrib.fGPUType,
+ vertexStride, bufferOffset + attrib.fOffset, kDivisor);
+ }
+ }
+ if (int instanceStride = fHWProgram->instanceStride()) {
+ SkASSERT(instanceBuffer);
+ SkASSERT(instanceBuffer->isCpuBuffer() ||
+ !static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
+ size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride);
+ int attribIdx = fHWProgram->numVertexAttributes();
+ for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) {
+ const auto& attrib = fHWProgram->instanceAttribute(i);
+ static constexpr int kDivisor = 1;
+ attribState->set(this, attrib.fLocation, instanceBuffer, attrib.fCPUType,
+ attrib.fGPUType, instanceStride, bufferOffset + attrib.fOffset,
+ kDivisor);
+ }
+ }
+}
+
+GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
+ this->handleDirtyContext();
+
+ // Index buffer state is tied to the vertex array.
+ if (GrGpuBufferType::kIndex == type) {
+ this->bindVertexArray(0);
+ }
+
+ auto* bufferState = this->hwBufferState(type);
+ if (buffer->isCpuBuffer()) {
+ if (!bufferState->fBufferZeroKnownBound) {
+ GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
+ bufferState->fBufferZeroKnownBound = true;
+ bufferState->fBoundBufferUniqueID.makeInvalid();
+ }
+ } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
+ bufferState->fBoundBufferUniqueID) {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
+ GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
+ bufferState->fBufferZeroKnownBound = false;
+ bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
+ }
+
+ return bufferState->fGLTarget;
+}
+void GrGLGpu::disableScissor() {
+ if (kNo_TriState != fHWScissorSettings.fEnabled) {
+ GL_CALL(Disable(GR_GL_SCISSOR_TEST));
+ fHWScissorSettings.fEnabled = kNo_TriState;
+ return;
+ }
+}
+
+void GrGLGpu::clear(const GrFixedClip& clip, const SkPMColor4f& color,
+ GrRenderTarget* target, GrSurfaceOrigin origin) {
+ // parent class should never let us get here with no RT
+ SkASSERT(target);
+ SkASSERT(!this->caps()->performColorClearsAsDraws());
+ SkASSERT(!clip.scissorEnabled() || !this->caps()->performPartialClearsAsDraws());
+
+ this->handleDirtyContext();
+
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+
+ if (clip.scissorEnabled()) {
+ this->flushRenderTarget(glRT, origin, clip.scissorRect());
+ } else {
+ this->flushRenderTarget(glRT);
+ }
+ this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin);
+ this->flushWindowRectangles(clip.windowRectsState(), glRT, origin);
+ this->flushColorWrite(true);
+ this->flushClearColor(color);
+ GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
+}
+
+void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) {
+ SkASSERT(!this->caps()->performStencilClearsAsDraws());
+
+ if (!target) {
+ return;
+ }
+
+ // This should only be called internally when we know we have a stencil buffer.
+ SkASSERT(target->renderTargetPriv().getStencilAttachment());
+
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+ this->flushRenderTargetNoColorWrites(glRT);
+
+ this->disableScissor();
+ this->disableWindowRectangles();
+
+ GL_CALL(StencilMask(0xffffffff));
+ GL_CALL(ClearStencil(clearValue));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWStencilSettings.invalidate();
+}
+
+static bool use_tiled_rendering(const GrGLCaps& glCaps,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
+ // Only use the tiled rendering extension if we can explicitly clear and discard the stencil.
+ // Otherwise it's faster to just not use it.
+ return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp &&
+ GrStoreOp::kDiscard == stencilLoadStore.fStoreOp;
+}
+
+void GrGLGpu::beginCommandBuffer(GrRenderTarget* rt, const SkIRect& bounds, GrSurfaceOrigin origin,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
+ SkASSERT(!fIsExecutingCommandBuffer_DebugOnly);
+
+ this->handleDirtyContext();
+
+ auto glRT = static_cast<GrGLRenderTarget*>(rt);
+ this->flushRenderTarget(glRT);
+ SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true);
+
+ if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
+ auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, glRT->height(), bounds);
+ GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp)
+ ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
+ SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering().
+ GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth,
+ nativeBounds.fHeight, preserveMask));
+ }
+
+ GrGLbitfield clearMask = 0;
+ if (GrLoadOp::kClear == colorLoadStore.fLoadOp) {
+ SkASSERT(!this->caps()->performColorClearsAsDraws());
+ this->flushClearColor(colorLoadStore.fClearColor);
+ this->flushColorWrite(true);
+ clearMask |= GR_GL_COLOR_BUFFER_BIT;
+ }
+ if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) {
+ SkASSERT(!this->caps()->performStencilClearsAsDraws());
+ GL_CALL(StencilMask(0xffffffff));
+ GL_CALL(ClearStencil(0));
+ clearMask |= GR_GL_STENCIL_BUFFER_BIT;
+ }
+ if (clearMask) {
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GL_CALL(Clear(clearMask));
+ }
+}
+
+void GrGLGpu::endCommandBuffer(GrRenderTarget* rt,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
+ SkASSERT(fIsExecutingCommandBuffer_DebugOnly);
+
+ this->handleDirtyContext();
+
+ if (rt->uniqueID() != fHWBoundRenderTargetUniqueID) {
+ // The framebuffer binding changed in the middle of a command buffer. We should have already
+ // printed a warning during onFBOChanged.
+ return;
+ }
+
+ if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) {
+ auto glRT = static_cast<GrGLRenderTarget*>(rt);
+
+ SkSTArray<2, GrGLenum> discardAttachments;
+ if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) {
+ discardAttachments.push_back(
+ (0 == glRT->renderFBOID()) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0);
+ }
+ if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) {
+ discardAttachments.push_back(
+ (0 == glRT->renderFBOID()) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT);
+ }
+
+ if (!discardAttachments.empty()) {
+ if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) {
+ GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(),
+ discardAttachments.begin()));
+ } else {
+ SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType());
+ GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(),
+ discardAttachments.begin()));
+ }
+ }
+ }
+
+ if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
+ GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp)
+ ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
+ // Handled by use_tiled_rendering().
+ SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp);
+ GL_CALL(EndTiling(preserveMask));
+ }
+
+ SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false);
+}
+
+void GrGLGpu::clearStencilClip(const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTarget* target, GrSurfaceOrigin origin) {
+ SkASSERT(target);
+ SkASSERT(!this->caps()->performStencilClearsAsDraws());
+ this->handleDirtyContext();
+
+ GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ SkASSERT(sb);
+ GrGLint stencilBitCount = sb->bits();
+#if 0
+ SkASSERT(stencilBitCount > 0);
+ GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
+#else
+ // we could just clear the clip bit but when we go through
+ // ANGLE a partial stencil mask will cause clears to be
+ // turned into draws. Our contract on GrOpsTask says that
+ // changing the clip between stencil passes may or may not
+ // zero the client's clip bits. So we just clear the whole thing.
+ static const GrGLint clipStencilMask = ~0;
+#endif
+ GrGLint value;
+ if (insideStencilMask) {
+ value = (1 << (stencilBitCount - 1));
+ } else {
+ value = 0;
+ }
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+ this->flushRenderTargetNoColorWrites(glRT);
+
+ this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin);
+ this->flushWindowRectangles(clip.windowRectsState(), glRT, origin);
+
+ GL_CALL(StencilMask((uint32_t) clipStencilMask));
+ GL_CALL(ClearStencil(value));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWStencilSettings.invalidate();
+}
+
+bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType,
+ void* offsetOrPtr, int rowWidthInPixels) {
+ SkASSERT(surface);
+
+ auto format = surface->backendFormat().asGLFormat();
+ GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
+ if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) {
+ return false;
+ }
+ GrGLenum externalFormat = 0;
+ GrGLenum externalType = 0;
+ this->glCaps().getReadPixelsFormat(surface->backendFormat().asGLFormat(),
+ surfaceColorType,
+ dstColorType,
+ &externalFormat,
+ &externalType);
+ if (!externalFormat || !externalType) {
+ return false;
+ }
+
+ if (renderTarget) {
+ if (renderTarget->numSamples() <= 1 ||
+ renderTarget->renderFBOID() == renderTarget->textureFBOID()) { // Also catches FBO 0.
+ SkASSERT(!renderTarget->requiresManualMSAAResolve());
+ this->flushRenderTargetNoColorWrites(renderTarget);
+ } else if (GrGLRenderTarget::kUnresolvableFBOID == renderTarget->textureFBOID()) {
+ SkASSERT(!renderTarget->requiresManualMSAAResolve());
+ return false;
+ } else {
+ SkASSERT(renderTarget->requiresManualMSAAResolve());
+ // we don't track the state of the READ FBO ID.
+ this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID());
+ }
+ } else {
+ // Use a temporary FBO.
+ this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ }
+
+ // the read rect is viewport-relative
+ GrNativeRect readRect = {left, top, width, height};
+
+ // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
+ if (rowWidthInPixels != width) {
+ SkASSERT(this->glCaps().readPixelsRowBytesSupport());
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels));
+ }
+ GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1));
+
+ bool reattachStencil = false;
+ if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() &&
+ renderTarget &&
+ renderTarget->renderTargetPriv().getStencilAttachment() &&
+ renderTarget->numSamples() > 1) {
+ // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached
+ reattachStencil = true;
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+
+ GL_CALL(ReadPixels(readRect.fX, readRect.fY, readRect.fWidth, readRect.fHeight,
+ externalFormat, externalType, offsetOrPtr));
+
+ if (reattachStencil) {
+ GrGLStencilAttachment* stencilAttachment = static_cast<GrGLStencilAttachment*>(
+ renderTarget->renderTargetPriv().getStencilAttachment());
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID()));
+ }
+
+ if (rowWidthInPixels != width) {
+ SkASSERT(this->glCaps().readPixelsRowBytesSupport());
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
+ }
+
+ if (!renderTarget) {
+ this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER);
+ }
+ return true;
+}
+
+bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) {
+ SkASSERT(surface);
+
+ size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType);
+
+ // GL_PACK_ROW_LENGTH is in terms of pixels not bytes.
+ int rowPixelWidth;
+
+ if (rowBytes == SkToSizeT(width * bytesPerPixel)) {
+ rowPixelWidth = width;
+ } else {
+ SkASSERT(!(rowBytes % bytesPerPixel));
+ rowPixelWidth = rowBytes / bytesPerPixel;
+ }
+ return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
+ dstColorType, buffer, rowPixelWidth);
+}
+
+GrOpsRenderPass* GrGLGpu::getOpsRenderPass(
+ GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ if (!fCachedOpsRenderPass) {
+ fCachedOpsRenderPass.reset(new GrGLOpsRenderPass(this));
+ }
+
+ fCachedOpsRenderPass->set(rt, bounds, origin, colorInfo, stencilInfo);
+ return fCachedOpsRenderPass.get();
+}
+
+void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin,
+ const SkIRect& bounds) {
+ this->flushRenderTargetNoColorWrites(target);
+ this->didWriteToSurface(target, origin, &bounds);
+}
+
+void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target) {
+ this->flushRenderTargetNoColorWrites(target);
+ this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr);
+}
+
+void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target) {
+ SkASSERT(target);
+ GrGpuResource::UniqueID rtID = target->uniqueID();
+ if (fHWBoundRenderTargetUniqueID != rtID) {
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID());
+#ifdef SK_DEBUG
+ // don't do this check in Chromium -- this is causing
+ // lots of repeated command buffer flushes when the compositor is
+ // rendering with Ganesh, which is really slow; even too slow for
+ // Debug mode.
+ if (kChromium_GrGLDriver != this->glContext().driver()) {
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
+ }
+ }
+#endif
+ fHWBoundRenderTargetUniqueID = rtID;
+ this->flushViewport(target->width(), target->height());
+ }
+
+ if (this->glCaps().srgbWriteControl()) {
+ this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat()));
+ }
+}
+
+void GrGLGpu::flushFramebufferSRGB(bool enable) {
+ if (enable && kYes_TriState != fHWSRGBFramebuffer) {
+ GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
+ fHWSRGBFramebuffer = kYes_TriState;
+ } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
+ GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
+ fHWSRGBFramebuffer = kNo_TriState;
+ }
+}
+
+void GrGLGpu::flushViewport(int width, int height) {
+ GrNativeRect viewport = {0, 0, width, height};
+ if (fHWViewport != viewport) {
+ GL_CALL(Viewport(viewport.fX, viewport.fY, viewport.fWidth, viewport.fHeight));
+ fHWViewport = viewport;
+ }
+}
+
+#define SWAP_PER_DRAW 0
+
+#if SWAP_PER_DRAW
+ #if defined(SK_BUILD_FOR_MAC)
+ #include <AGL/agl.h>
+ #elif defined(SK_BUILD_FOR_WIN)
+ #include <gl/GL.h>
+ void SwapBuf() {
+ DWORD procID = GetCurrentProcessId();
+ HWND hwnd = GetTopWindow(GetDesktopWindow());
+ while(hwnd) {
+ DWORD wndProcID = 0;
+ GetWindowThreadProcessId(hwnd, &wndProcID);
+ if(wndProcID == procID) {
+ SwapBuffers(GetDC(hwnd));
+ }
+ hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
+ }
+ }
+ #endif
+#endif
+
+void GrGLGpu::draw(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ const GrMesh meshes[],
+ int meshCount) {
+ this->handleDirtyContext();
+
+ SkASSERT(meshCount); // guaranteed by GrOpsRenderPass::draw
+
+ GrPrimitiveType primitiveType = meshes[0].primitiveType();
+
+#ifdef SK_DEBUG
+ // kPoints should never be intermingled in with the other primitive types
+ for (int i = 1; i < meshCount; ++i) {
+ if (primitiveType == GrPrimitiveType::kPoints) {
+ SkASSERT(meshes[i].primitiveType() == GrPrimitiveType::kPoints);
+ } else {
+ SkASSERT(meshes[i].primitiveType() != GrPrimitiveType::kPoints);
+ }
+ }
+#endif
+
+ // Passing 'primitiveType' here is a bit misleading. In GL's case it works out, since
+ // GL only cares if it is kPoints or not.
+ if (!this->flushGLState(renderTarget, programInfo, primitiveType)) {
+ return;
+ }
+
+ bool hasDynamicScissors = programInfo.hasDynamicScissors();
+ bool hasDynamicPrimProcTextures = programInfo.hasDynamicPrimProcTextures();
+
+ for (int m = 0; m < meshCount; ++m) {
+ if (auto barrierType = programInfo.pipeline().xferBarrierType(renderTarget->asTexture(),
+ *this->caps())) {
+ this->xferBarrier(renderTarget, barrierType);
+ }
+
+ if (hasDynamicScissors) {
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
+ this->flushScissor(GrScissorState(programInfo.dynamicScissor(m)),
+ glRT->width(), glRT->height(), programInfo.origin());
+ }
+ if (hasDynamicPrimProcTextures) {
+ auto texProxyArray = programInfo.dynamicPrimProcTextures(m);
+ fHWProgram->updatePrimitiveProcessorTextureBindings(programInfo.primProc(),
+ texProxyArray);
+ }
+ if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
+ GrIsPrimTypeLines(meshes[m].primitiveType()) &&
+ !GrIsPrimTypeLines(fLastPrimitiveType)) {
+ GL_CALL(Enable(GR_GL_CULL_FACE));
+ GL_CALL(Disable(GR_GL_CULL_FACE));
+ }
+ meshes[m].sendToGpu(this);
+ fLastPrimitiveType = meshes[m].primitiveType();
+ }
+
+#if SWAP_PER_DRAW
+ glFlush();
+ #if defined(SK_BUILD_FOR_MAC)
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+ #elif defined(SK_BUILD_FOR_WIN)
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+ #endif
+#endif
+}
+
+static GrGLenum gr_primitive_type_to_gl_mode(GrPrimitiveType primitiveType) {
+ switch (primitiveType) {
+ case GrPrimitiveType::kTriangles:
+ return GR_GL_TRIANGLES;
+ case GrPrimitiveType::kTriangleStrip:
+ return GR_GL_TRIANGLE_STRIP;
+ case GrPrimitiveType::kPoints:
+ return GR_GL_POINTS;
+ case GrPrimitiveType::kLines:
+ return GR_GL_LINES;
+ case GrPrimitiveType::kLineStrip:
+ return GR_GL_LINE_STRIP;
+ case GrPrimitiveType::kPath:
+ SK_ABORT("non-mesh-based GrPrimitiveType");
+ return 0;
+ }
+ SK_ABORT("invalid GrPrimitiveType");
+}
+
+void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer,
+ int vertexCount, int baseVertex) {
+ const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
+ if (this->glCaps().drawArraysBaseVertexIsBroken()) {
+ this->setupGeometry(nullptr, vertexBuffer, baseVertex, nullptr, 0, GrPrimitiveRestart::kNo);
+ GL_CALL(DrawArrays(glPrimType, 0, vertexCount));
+ } else {
+ this->setupGeometry(nullptr, vertexBuffer, 0, nullptr, 0, GrPrimitiveRestart::kNo);
+ GL_CALL(DrawArrays(glPrimType, baseVertex, vertexCount));
+ }
+ fStats.incNumDraws();
+}
+
+static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) {
+ size_t baseOffset = baseIndex * sizeof(uint16_t);
+ if (indexBuffer->isCpuBuffer()) {
+ return static_cast<const GrCpuBuffer*>(indexBuffer)->data() + baseOffset;
+ } else {
+ return reinterpret_cast<const GrGLvoid*>(baseOffset);
+ }
+}
+
+void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer,
+ int indexCount, int baseIndex, uint16_t minIndexValue,
+ uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
+ int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) {
+ const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
+ const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
+
+ this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart);
+
+ if (this->glCaps().drawRangeElementsSupport()) {
+ GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount,
+ GR_GL_UNSIGNED_SHORT, elementPtr));
+ } else {
+ GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr));
+ }
+ fStats.incNumDraws();
+}
+
+void GrGLGpu::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer,
+ int vertexCount, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) {
+ GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
+ int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
+ for (int i = 0; i < instanceCount; i += maxInstances) {
+ this->setupGeometry(nullptr, vertexBuffer, 0, instanceBuffer, baseInstance + i,
+ GrPrimitiveRestart::kNo);
+ GL_CALL(DrawArraysInstanced(glPrimType, baseVertex, vertexCount,
+ SkTMin(instanceCount - i, maxInstances)));
+ fStats.incNumDraws();
+ }
+}
+
+void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, const GrBuffer* vertexBuffer,
+ int baseVertex, const GrBuffer* instanceBuffer,
+ int instanceCount, int baseInstance,
+ GrPrimitiveRestart enablePrimitiveRestart) {
+ const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
+ const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
+ int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
+ for (int i = 0; i < instanceCount; i += maxInstances) {
+ this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i,
+ enablePrimitiveRestart);
+ GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr,
+ SkTMin(instanceCount - i, maxInstances)));
+ fStats.incNumDraws();
+ }
+}
+
+void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO) {
+ // Some extensions automatically resolves the texture when it is read.
+ SkASSERT(this->glCaps().usesMSAARenderBuffers());
+
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
+ SkASSERT(rt->textureFBOID() != rt->renderFBOID());
+ SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0);
+ this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID());
+ this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID());
+
+ // make sure we go through flushRenderTarget() since we've modified
+ // the bound DRAW FBO ID.
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
+ // Apple's extension uses the scissor as the blit bounds.
+ GrScissorState scissorState;
+ scissorState.set(resolveRect);
+ this->flushScissor(scissorState, rt->width(), rt->height(), resolveOrigin);
+ this->disableWindowRectangles();
+ GL_CALL(ResolveMultisampleFramebuffer());
+ } else {
+ int l, b, r, t;
+ if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag &
+ this->glCaps().blitFramebufferSupportFlags()) {
+ l = 0;
+ b = 0;
+ r = target->width();
+ t = target->height();
+ } else {
+ auto rect = GrNativeRect::MakeRelativeTo(
+ resolveOrigin, rt->height(), resolveRect);
+ l = rect.fX;
+ b = rect.fY;
+ r = rect.fX + rect.fWidth;
+ t = rect.fY + rect.fHeight;
+ }
+
+ // BlitFrameBuffer respects the scissor, so disable it.
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
+ }
+}
+
+namespace {
+
+
+GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
+ static const GrGLenum gTable[kGrStencilOpCount] = {
+ GR_GL_KEEP, // kKeep
+ GR_GL_ZERO, // kZero
+ GR_GL_REPLACE, // kReplace
+ GR_GL_INVERT, // kInvert
+ GR_GL_INCR_WRAP, // kIncWrap
+ GR_GL_DECR_WRAP, // kDecWrap
+ GR_GL_INCR, // kIncClamp
+ GR_GL_DECR, // kDecClamp
+ };
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+ GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
+ GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
+ GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
+ GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
+ GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
+ GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
+ GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
+ SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
+ return gTable[(int)op];
+}
+
+void set_gl_stencil(const GrGLInterface* gl,
+ const GrStencilSettings::Face& face,
+ GrGLenum glFace) {
+ GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
+ GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
+ GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
+
+ GrGLint ref = face.fRef;
+ GrGLint mask = face.fTestMask;
+ GrGLint writeMask = face.fWriteMask;
+
+ if (GR_GL_FRONT_AND_BACK == glFace) {
+ // we call the combined func just in case separate stencil is not
+ // supported.
+ GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
+ GR_GL_CALL(gl, StencilMask(writeMask));
+ GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
+ } else {
+ GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
+ GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
+ GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
+ }
+}
+}
+
+void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) {
+ if (stencilSettings.isDisabled()) {
+ this->disableStencil();
+ } else if (fHWStencilSettings != stencilSettings ||
+ (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) {
+ if (kYes_TriState != fHWStencilTestEnabled) {
+ GL_CALL(Enable(GR_GL_STENCIL_TEST));
+
+ fHWStencilTestEnabled = kYes_TriState;
+ }
+ if (stencilSettings.isTwoSided()) {
+ set_gl_stencil(this->glInterface(), stencilSettings.front(origin), GR_GL_FRONT);
+ set_gl_stencil(this->glInterface(), stencilSettings.back(origin), GR_GL_BACK);
+ } else {
+ set_gl_stencil(
+ this->glInterface(), stencilSettings.frontAndBack(), GR_GL_FRONT_AND_BACK);
+ }
+ fHWStencilSettings = stencilSettings;
+ fHWStencilOrigin = origin;
+ }
+}
+
+void GrGLGpu::disableStencil() {
+ if (kNo_TriState != fHWStencilTestEnabled) {
+ GL_CALL(Disable(GR_GL_STENCIL_TEST));
+
+ fHWStencilTestEnabled = kNo_TriState;
+ fHWStencilSettings.invalidate();
+ }
+}
+
+void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA) {
+ // rt is only optional if useHWAA is false.
+ SkASSERT(rt || !useHWAA);
+#ifdef SK_DEBUG
+ if (useHWAA && rt->numSamples() <= 1) {
+ SkASSERT(this->caps()->mixedSamplesSupport());
+ SkASSERT(0 != static_cast<GrGLRenderTarget*>(rt)->renderFBOID());
+ SkASSERT(rt->renderTargetPriv().getStencilAttachment());
+ }
+#endif
+
+ if (this->caps()->multisampleDisableSupport()) {
+ if (useHWAA) {
+ if (kYes_TriState != fMSAAEnabled) {
+ GL_CALL(Enable(GR_GL_MULTISAMPLE));
+ fMSAAEnabled = kYes_TriState;
+ }
+ } else {
+ if (kNo_TriState != fMSAAEnabled) {
+ GL_CALL(Disable(GR_GL_MULTISAMPLE));
+ fMSAAEnabled = kNo_TriState;
+ }
+ }
+ }
+}
+
+void GrGLGpu::flushBlendAndColorWrite(
+ const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
+ if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWriteColor) {
+ // We need to work around a driver bug by using a blend state that preserves the dst color,
+ // rather than disabling color writes.
+ GrXferProcessor::BlendInfo preserveDstBlend;
+ preserveDstBlend.fSrcBlend = kZero_GrBlendCoeff;
+ preserveDstBlend.fDstBlend = kOne_GrBlendCoeff;
+ this->flushBlendAndColorWrite(preserveDstBlend, swizzle);
+ return;
+ }
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+
+ // Any optimization to disable blending should have already been applied and
+ // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
+ bool blendOff =
+ ((kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff) ||
+ !blendInfo.fWriteColor;
+
+ if (blendOff) {
+ if (kNo_TriState != fHWBlendState.fEnabled) {
+ GL_CALL(Disable(GR_GL_BLEND));
+
+ // Workaround for the ARM KHR_blend_equation_advanced blacklist issue
+ // https://code.google.com/p/skia/issues/detail?id=3943
+ if (kARM_GrGLVendor == this->ctxInfo().vendor() &&
+ GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
+ SkASSERT(this->caps()->advancedBlendEquationSupport());
+ // Set to any basic blending equation.
+ GrBlendEquation blend_equation = kAdd_GrBlendEquation;
+ GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
+ fHWBlendState.fEquation = blend_equation;
+ }
+
+ fHWBlendState.fEnabled = kNo_TriState;
+ }
+ } else {
+ if (kYes_TriState != fHWBlendState.fEnabled) {
+ GL_CALL(Enable(GR_GL_BLEND));
+
+ fHWBlendState.fEnabled = kYes_TriState;
+ }
+
+ if (fHWBlendState.fEquation != equation) {
+ GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
+ fHWBlendState.fEquation = equation;
+ }
+
+ if (GrBlendEquationIsAdvanced(equation)) {
+ SkASSERT(this->caps()->advancedBlendEquationSupport());
+ // Advanced equations have no other blend state.
+ return;
+ }
+
+ if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
+ GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
+ gXfermodeCoeff2Blend[dstCoeff]));
+ fHWBlendState.fSrcCoeff = srcCoeff;
+ fHWBlendState.fDstCoeff = dstCoeff;
+ }
+
+ if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) {
+ SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
+ if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
+ GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
+ fHWBlendState.fConstColor = blendConst;
+ fHWBlendState.fConstColorValid = true;
+ }
+ }
+ }
+
+ this->flushColorWrite(blendInfo.fWriteColor);
+}
+
+static void get_gl_swizzle_values(const GrSwizzle& swizzle, GrGLenum glValues[4]) {
+ for (int i = 0; i < 4; ++i) {
+ switch (swizzle[i]) {
+ case 'r': glValues[i] = GR_GL_RED; break;
+ case 'g': glValues[i] = GR_GL_GREEN; break;
+ case 'b': glValues[i] = GR_GL_BLUE; break;
+ case 'a': glValues[i] = GR_GL_ALPHA; break;
+ case '0': glValues[i] = GR_GL_ZERO; break;
+ case '1': glValues[i] = GR_GL_ONE; break;
+ default: SK_ABORT("Unsupported component");
+ }
+ }
+}
+
+void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const GrSwizzle& swizzle,
+ GrGLTexture* texture) {
+ SkASSERT(texture);
+
+#ifdef SK_DEBUG
+ if (!this->caps()->npotTextureTileSupport()) {
+ if (samplerState.isRepeated()) {
+ const int w = texture->width();
+ const int h = texture->height();
+ SkASSERT(SkIsPow2(w) && SkIsPow2(h));
+ }
+ }
+#endif
+
+ GrGpuResource::UniqueID textureID = texture->uniqueID();
+ GrGLenum target = texture->target();
+ if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(BindTexture(target, texture->textureID()));
+ fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID);
+ }
+
+ if (samplerState.filter() == GrSamplerState::Filter::kMipMap) {
+ if (!this->caps()->mipMapSupport() ||
+ texture->texturePriv().mipMapped() == GrMipMapped::kNo) {
+ samplerState.setFilterMode(GrSamplerState::Filter::kBilerp);
+ }
+ }
+
+#ifdef SK_DEBUG
+ // We were supposed to ensure MipMaps were up-to-date before getting here.
+ if (samplerState.filter() == GrSamplerState::Filter::kMipMap) {
+ SkASSERT(!texture->texturePriv().mipMapsAreDirty());
+ }
+#endif
+
+ auto timestamp = texture->parameters()->resetTimestamp();
+ bool setAll = timestamp < fResetTimestampForTextureParameters;
+
+ const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr;
+ GrGLTextureParameters::SamplerOverriddenState newSamplerState;
+ if (fSamplerObjectCache) {
+ fSamplerObjectCache->bindSampler(unitIdx, samplerState);
+ } else {
+ const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
+ texture->parameters()->samplerOverriddenState();
+ samplerStateToRecord = &newSamplerState;
+
+ newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter());
+ newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
+
+ newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
+ newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
+
+ // These are the OpenGL default values.
+ newSamplerState.fMinLOD = -1000.f;
+ newSamplerState.fMaxLOD = 1000.f;
+
+ if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter));
+ }
+ if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter));
+ }
+ if (this->glCaps().mipMapLevelAndLodControlSupport()) {
+ if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD));
+ }
+ if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD));
+ }
+ }
+ if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS));
+ }
+ if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT));
+ }
+ if (this->glCaps().clampToBorderSupport()) {
+ // Make sure the border color is transparent black (the default)
+ if (setAll || oldSamplerState.fBorderColorInvalid) {
+ this->setTextureUnit(unitIdx);
+ static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
+ GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
+ }
+ }
+ }
+ GrGLTextureParameters::NonsamplerState newNonsamplerState;
+ newNonsamplerState.fBaseMipMapLevel = 0;
+ newNonsamplerState.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
+
+ const GrGLTextureParameters::NonsamplerState& oldNonsamplerState =
+ texture->parameters()->nonsamplerState();
+ if (!this->caps()->shaderCaps()->textureSwizzleAppliedInShader()) {
+ newNonsamplerState.fSwizzleKey = swizzle.asKey();
+ if (setAll || swizzle.asKey() != oldNonsamplerState.fSwizzleKey) {
+ GrGLenum glValues[4];
+ get_gl_swizzle_values(swizzle, glValues);
+ this->setTextureUnit(unitIdx);
+ if (GR_IS_GR_GL(this->glStandard())) {
+ GR_STATIC_ASSERT(sizeof(glValues[0]) == sizeof(GrGLint));
+ GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
+ reinterpret_cast<const GrGLint*>(glValues)));
+ } else if (GR_IS_GR_GL_ES(this->glStandard())) {
+ // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, glValues[0]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, glValues[1]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, glValues[2]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, glValues[3]));
+ }
+ }
+ }
+ // These are not supported in ES2 contexts
+ if (this->glCaps().mipMapLevelAndLodControlSupport() &&
+ (texture->texturePriv().textureType() != GrTextureType::kExternal ||
+ !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
+ if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
+ newNonsamplerState.fBaseMipMapLevel));
+ }
+ if (newNonsamplerState.fMaxMipMapLevel != oldNonsamplerState.fMaxMipMapLevel) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
+ newNonsamplerState.fMaxMipMapLevel));
+ }
+ }
+ texture->parameters()->set(samplerStateToRecord, newNonsamplerState,
+ fResetTimestampForTextureParameters);
+}
+
+void GrGLGpu::onResetTextureBindings() {
+ static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE,
+ GR_GL_TEXTURE_EXTERNAL};
+ for (int i = 0; i < this->numTextureUnits(); ++i) {
+ this->setTextureUnit(i);
+ for (auto target : kTargets) {
+ if (fHWTextureUnitBindings[i].hasBeenModified(target)) {
+ GL_CALL(BindTexture(target, 0));
+ }
+ }
+ fHWTextureUnitBindings[i].invalidateAllTargets(true);
+ }
+}
+
+void GrGLGpu::flushColorWrite(bool writeColor) {
+ if (!writeColor) {
+ if (kNo_TriState != fHWWriteToColor) {
+ GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
+ GR_GL_FALSE, GR_GL_FALSE));
+ fHWWriteToColor = kNo_TriState;
+ }
+ } else {
+ if (kYes_TriState != fHWWriteToColor) {
+ GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
+ fHWWriteToColor = kYes_TriState;
+ }
+ }
+}
+
+void GrGLGpu::flushClearColor(const SkPMColor4f& color) {
+ GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA;
+ if (this->glCaps().clearToBoundaryValuesIsBroken() &&
+ (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
+ static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
+ static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
+ a = (1 == a) ? safeAlpha1 : safeAlpha0;
+ }
+ if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
+ b != fHWClearColor[2] || a != fHWClearColor[3]) {
+ GL_CALL(ClearColor(r, g, b, a));
+ fHWClearColor[0] = r;
+ fHWClearColor[1] = g;
+ fHWClearColor[2] = b;
+ fHWClearColor[3] = a;
+ }
+}
+
+void GrGLGpu::setTextureUnit(int unit) {
+ SkASSERT(unit >= 0 && unit < this->numTextureUnits());
+ if (unit != fHWActiveTextureUnitIdx) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
+ fHWActiveTextureUnitIdx = unit;
+ }
+}
+
+void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) {
+ // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
+ int lastUnitIdx = this->numTextureUnits() - 1;
+ if (lastUnitIdx != fHWActiveTextureUnitIdx) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
+ fHWActiveTextureUnitIdx = lastUnitIdx;
+ }
+ // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the
+ // correct texture.
+ fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target);
+ GL_CALL(BindTexture(target, textureID));
+}
+
+// Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
+static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
+ const GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint,
+ const GrGLCaps& caps) {
+ int dstSampleCnt = 0;
+ int srcSampleCnt = 0;
+ if (const GrRenderTarget* rt = dst->asRenderTarget()) {
+ dstSampleCnt = rt->numSamples();
+ }
+ if (const GrRenderTarget* rt = src->asRenderTarget()) {
+ srcSampleCnt = rt->numSamples();
+ }
+ SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
+ SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
+
+ GrGLFormat dstFormat = dst->backendFormat().asGLFormat();
+ GrGLFormat srcFormat = src->backendFormat().asGLFormat();
+
+ const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
+ const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
+
+ GrTextureType dstTexType;
+ GrTextureType* dstTexTypePtr = nullptr;
+ GrTextureType srcTexType;
+ GrTextureType* srcTexTypePtr = nullptr;
+ if (dstTex) {
+ dstTexType = dstTex->texturePriv().textureType();
+ dstTexTypePtr = &dstTexType;
+ }
+ if (srcTex) {
+ srcTexType = srcTex->texturePriv().textureType();
+ srcTexTypePtr = &srcTexType;
+ }
+
+ return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr,
+ srcFormat, srcSampleCnt, srcTexTypePtr,
+ src->getBoundsRect(), true, srcRect, dstPoint);
+}
+
+static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
+ // A RT has a separate MSAA renderbuffer if:
+ // 1) It's multisampled
+ // 2) We're using an extension with separate MSAA renderbuffers
+ // 3) It's not FBO 0, which is special and always auto-resolves
+ return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && rt->renderFBOID() != 0;
+}
+
+static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src,
+ const GrGLCaps& caps) {
+
+ const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
+ const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
+ const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
+ const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
+
+ bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
+ bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
+
+ GrGLFormat dstFormat = dst->backendFormat().asGLFormat();
+ GrGLFormat srcFormat = src->backendFormat().asGLFormat();
+
+ GrTextureType dstTexType;
+ GrTextureType* dstTexTypePtr = nullptr;
+ GrTextureType srcTexType;
+ GrTextureType* srcTexTypePtr = nullptr;
+ if (dstTex) {
+ dstTexType = dstTex->texturePriv().textureType();
+ dstTexTypePtr = &dstTexType;
+ }
+ if (srcTex) {
+ srcTexType = srcTex->texturePriv().textureType();
+ srcTexTypePtr = &srcTexType;
+ }
+
+ return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr,
+ srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr);
+}
+
+// If a temporary FBO was created, its non-zero ID is returned.
+void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
+ TempFBOTarget tempFBOTarget) {
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
+ if (!rt || mipLevel > 0) {
+ SkASSERT(surface->asTexture());
+ GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
+ GrGLuint texID = texture->textureID();
+ GrGLenum target = texture->target();
+ GrGLuint* tempFBOID;
+ tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
+
+ if (0 == *tempFBOID) {
+ GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
+ }
+
+ this->bindFramebuffer(fboTarget, *tempFBOID);
+ GR_GL_CALL(
+ this->glInterface(),
+ FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel));
+ if (mipLevel == 0) {
+ texture->baseLevelWasBoundToFBO();
+ }
+ } else {
+ this->bindFramebuffer(fboTarget, rt->renderFBOID());
+ }
+}
+
+void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) {
+ // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
+ if (mipLevel > 0 || !surface->asRenderTarget()) {
+ SkASSERT(surface->asTexture());
+ GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
+ GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
+ GR_GL_COLOR_ATTACHMENT0,
+ textureTarget,
+ 0,
+ 0));
+ }
+}
+
+void GrGLGpu::onFBOChanged() {
+ if (this->caps()->workarounds().flush_on_framebuffer_change ||
+ this->caps()->workarounds().restore_scissor_on_fbo_change) {
+ GL_CALL(Flush());
+ }
+#ifdef SK_DEBUG
+ if (fIsExecutingCommandBuffer_DebugOnly) {
+ SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. "
+ "This will severely hurt performance.\n");
+ }
+#endif
+}
+
+void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(target, fboid));
+ if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
+ fBoundDrawFramebuffer = fboid;
+ }
+
+ if (this->caps()->workarounds().restore_scissor_on_fbo_change) {
+ // The driver forgets the correct scissor when modifying the FBO binding.
+ if (!fHWScissorSettings.fRect.isInvalid()) {
+ const GrNativeRect& r = fHWScissorSettings.fRect;
+ GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight));
+ }
+ }
+
+ this->onFBOChanged();
+}
+
+void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
+ if (fboid == fBoundDrawFramebuffer &&
+ this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
+ // This workaround only applies to deleting currently bound framebuffers
+ // on Adreno 420. Because this is a somewhat rare case, instead of
+ // tracking all the attachments of every framebuffer instead just always
+ // unbind all attachments.
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_RENDERBUFFER, 0));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+
+ GL_CALL(DeleteFramebuffers(1, &fboid));
+
+ // Deleting the currently bound framebuffer rebinds to 0.
+ if (fboid == fBoundDrawFramebuffer) {
+ this->onFBOChanged();
+ }
+}
+
+bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
+ // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
+ bool preferCopy = SkToBool(dst->asRenderTarget());
+ auto dstFormat = dst->backendFormat().asGLFormat();
+ if (preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) {
+ if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
+ return true;
+ }
+ }
+
+ if (can_copy_texsubimage(dst, src, this->glCaps())) {
+ this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())) {
+ return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
+ }
+
+ if (!preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) {
+ if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ int progIdx = TextureToCopyProgramIdx(srcTex);
+ const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
+ GrSLType samplerType =
+ GrSLCombinedSamplerTypeForTextureType(srcTex->texturePriv().textureType());
+
+ if (!fCopyProgramArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
+ kStatic_GrAccessPattern, vdata);
+ }
+ if (!fCopyProgramArrayBuffer) {
+ return false;
+ }
+
+ SkASSERT(!fCopyPrograms[progIdx].fProgram);
+ GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
+ if (!fCopyPrograms[progIdx].fProgram) {
+ return false;
+ }
+
+ GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier);
+ GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrShaderVar uPosXform("u_posXform", kHalf4_GrSLType, GrShaderVar::kUniform_TypeModifier);
+ GrShaderVar uTexture("u_texture", samplerType, GrShaderVar::kUniform_TypeModifier);
+ GrShaderVar vTexCoord("v_texCoord", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier);
+ GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType, GrShaderVar::kOut_TypeModifier);
+
+ SkString vshaderTxt;
+ if (shaderCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
+ vshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ vTexCoord.addModifier("noperspective");
+ }
+
+ aVertex.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uPosXform.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+
+ vshaderTxt.append(
+ "// Copy Program VS\n"
+ "void main() {"
+ " v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);"
+ " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
+ " sk_Position.zw = half2(0, 1);"
+ "}"
+ );
+
+ SkString fshaderTxt;
+ if (shaderCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
+ fshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ }
+ vTexCoord.setTypeModifier(GrShaderVar::kIn_TypeModifier);
+ vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ uTexture.appendDecl(shaderCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ fshaderTxt.appendf(
+ "// Copy Program FS\n"
+ "void main() {"
+ " sk_FragColor = sample(u_texture, v_texCoord);"
+ "}"
+ );
+
+ auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
+ SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size());
+ SkSL::Program::Settings settings;
+ settings.fCaps = shaderCaps;
+ SkSL::String glsl;
+ std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind,
+ sksl, settings, &glsl, errorHandler);
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
+ GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler);
+ SkASSERT(program->fInputs.isEmpty());
+
+ sksl.assign(fshaderTxt.c_str(), fshaderTxt.size());
+ program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl,
+ errorHandler);
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
+ GR_GL_FRAGMENT_SHADER, glsl, &fStats,
+ errorHandler);
+ SkASSERT(program->fInputs.isEmpty());
+
+ GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
+
+ GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
+ GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
+ GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
+
+ GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+bool GrGLGpu::createMipmapProgram(int progIdx) {
+ const bool oddWidth = SkToBool(progIdx & 0x2);
+ const bool oddHeight = SkToBool(progIdx & 0x1);
+ const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
+
+ const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
+
+ SkASSERT(!fMipmapPrograms[progIdx].fProgram);
+ GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
+ if (!fMipmapPrograms[progIdx].fProgram) {
+ return false;
+ }
+
+ GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier);
+ GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
+ GrShaderVar vTexCoords[] = {
+ GrShaderVar("v_texCoord0", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
+ GrShaderVar("v_texCoord1", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
+ GrShaderVar("v_texCoord2", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
+ GrShaderVar("v_texCoord3", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
+ };
+ GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType,GrShaderVar::kOut_TypeModifier);
+
+ SkString vshaderTxt;
+ if (shaderCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
+ vshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ vTexCoords[0].addModifier("noperspective");
+ vTexCoords[1].addModifier("noperspective");
+ vTexCoords[2].addModifier("noperspective");
+ vTexCoords[3].addModifier("noperspective");
+ }
+
+ aVertex.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ for (int i = 0; i < numTaps; ++i) {
+ vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ }
+
+ vshaderTxt.append(
+ "// Mipmap Program VS\n"
+ "void main() {"
+ " sk_Position.xy = a_vertex * half2(2, 2) - half2(1, 1);"
+ " sk_Position.zw = half2(0, 1);"
+ );
+
+ // Insert texture coordinate computation:
+ if (oddWidth && oddHeight) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
+ " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
+ " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
+ " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
+ );
+ } else if (oddWidth) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
+ " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
+ );
+ } else if (oddHeight) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
+ " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
+ );
+ } else {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy;"
+ );
+ }
+
+ vshaderTxt.append("}");
+
+ SkString fshaderTxt;
+ if (shaderCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
+ fshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ }
+ for (int i = 0; i < numTaps; ++i) {
+ vTexCoords[i].setTypeModifier(GrShaderVar::kIn_TypeModifier);
+ vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ }
+ uTexture.appendDecl(shaderCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ fshaderTxt.append(
+ "// Mipmap Program FS\n"
+ "void main() {"
+ );
+
+ if (oddWidth && oddHeight) {
+ fshaderTxt.append(
+ " sk_FragColor = (sample(u_texture, v_texCoord0) + "
+ " sample(u_texture, v_texCoord1) + "
+ " sample(u_texture, v_texCoord2) + "
+ " sample(u_texture, v_texCoord3)) * 0.25;"
+ );
+ } else if (oddWidth || oddHeight) {
+ fshaderTxt.append(
+ " sk_FragColor = (sample(u_texture, v_texCoord0) + "
+ " sample(u_texture, v_texCoord1)) * 0.5;"
+ );
+ } else {
+ fshaderTxt.append(
+ " sk_FragColor = sample(u_texture, v_texCoord0);"
+ );
+ }
+
+ fshaderTxt.append("}");
+
+ auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
+ SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size());
+ SkSL::Program::Settings settings;
+ settings.fCaps = shaderCaps;
+ SkSL::String glsl;
+ std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind,
+ sksl, settings, &glsl, errorHandler);
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
+ GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler);
+ SkASSERT(program->fInputs.isEmpty());
+
+ sksl.assign(fshaderTxt.c_str(), fshaderTxt.size());
+ program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl,
+ errorHandler);
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
+ GR_GL_FRAGMENT_SHADER, glsl, &fStats,
+ errorHandler);
+ SkASSERT(program->fInputs.isEmpty());
+
+ GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
+
+ GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
+ GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
+ GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
+ GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
+
+ GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ auto* srcTex = static_cast<GrGLTexture*>(src->asTexture());
+ auto* dstTex = static_cast<GrGLTexture*>(src->asTexture());
+ auto* dstRT = static_cast<GrGLRenderTarget*>(src->asRenderTarget());
+ if (!srcTex) {
+ return false;
+ }
+ int progIdx = TextureToCopyProgramIdx(srcTex);
+ if (!dstRT) {
+ SkASSERT(dstTex);
+ if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) {
+ return false;
+ }
+ }
+ if (!fCopyPrograms[progIdx].fProgram) {
+ if (!this->createCopyProgram(srcTex)) {
+ SkDebugf("Failed to create copy program.\n");
+ return false;
+ }
+ }
+ int w = srcRect.width();
+ int h = srcRect.height();
+ // We don't swizzle at all in our copies.
+ this->bindTexture(0, GrSamplerState::ClampNearest(), GrSwizzle::RGBA(), srcTex);
+ this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget);
+ this->flushViewport(dst->width(), dst->height());
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
+ this->flushProgram(fCopyPrograms[progIdx].fProgram);
+ fHWVertexArrayState.setVertexArrayID(this, 0);
+ GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->enableVertexArrays(this, 1);
+ attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
+ kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
+ // dst rect edges in NDC (-1 to 1)
+ int dw = dst->width();
+ int dh = dst->height();
+ GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
+ GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
+ GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
+ GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
+ GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
+ GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
+ GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
+ GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
+ int sw = src->width();
+ int sh = src->height();
+ if (srcTex->texturePriv().textureType() != GrTextureType::kRectangle) {
+ // src rect edges in normalized texture space (0 to 1)
+ sx0 /= sw;
+ sx1 /= sw;
+ sy0 /= sh;
+ sy1 /= sh;
+ }
+ GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
+ GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
+ sx1 - sx0, sy1 - sy0, sx0, sy0));
+ GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
+ this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA());
+ this->flushHWAAState(nullptr, false);
+ this->disableScissor();
+ this->disableWindowRectangles();
+ this->disableStencil();
+ if (this->glCaps().srgbWriteControl()) {
+ this->flushFramebufferSRGB(true);
+ }
+ GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
+ this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER);
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+ return true;
+}
+
+void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_texsubimage(dst, src, this->glCaps()));
+ this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
+ GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
+ SkASSERT(dstTex);
+ // We modified the bound FBO
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+
+ this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID());
+ GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
+ dstPoint.fX, dstPoint.fY,
+ srcRect.fLeft, srcRect.fTop,
+ srcRect.width(), srcRect.height()));
+ this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER);
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+}
+
+bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps()));
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ if (dst == src) {
+ if (SkIRect::Intersects(dstRect, srcRect)) {
+ return false;
+ }
+ }
+
+ this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget);
+ this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget);
+ // We modified the bound FBO
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+
+ // BlitFrameBuffer respects the scissor, so disable it.
+ this->disableScissor();
+ this->disableWindowRectangles();
+
+ GL_CALL(BlitFramebuffer(srcRect.fLeft,
+ srcRect.fTop,
+ srcRect.fRight,
+ srcRect.fBottom,
+ dstRect.fLeft,
+ dstRect.fTop,
+ dstRect.fRight,
+ dstRect.fBottom,
+ GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
+ this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER);
+ this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER);
+
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+ return true;
+}
+
+bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
+ auto glTex = static_cast<GrGLTexture*>(texture);
+ // Mipmaps are only supported on 2D textures:
+ if (GR_GL_TEXTURE_2D != glTex->target()) {
+ return false;
+ }
+ GrGLFormat format = glTex->format();
+ // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
+ // Uses draw calls to do a series of downsample operations to successive mips.
+
+ // The manual approach requires the ability to limit which level we're sampling and that the
+ // destination can be bound to a FBO:
+ if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) {
+ GrGLenum target = glTex->target();
+ this->bindTextureToScratchUnit(target, glTex->textureID());
+ GL_CALL(GenerateMipmap(glTex->target()));
+ return true;
+ }
+
+ int width = texture->width();
+ int height = texture->height();
+ int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1;
+ SkASSERT(levelCount == texture->texturePriv().maxMipMapLevel() + 1);
+
+ // Create (if necessary), then bind temporary FBO:
+ if (0 == fTempDstFBOID) {
+ GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
+ }
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+
+ // Bind the texture, to get things configured for filtering.
+ // We'll be changing our base level further below:
+ this->setTextureUnit(0);
+ // The mipmap program does not do any swizzling.
+ this->bindTexture(0, GrSamplerState::ClampBilerp(), GrSwizzle::RGBA(), glTex);
+
+ // Vertex data:
+ if (!fMipmapProgramArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
+ kStatic_GrAccessPattern, vdata);
+ }
+ if (!fMipmapProgramArrayBuffer) {
+ return false;
+ }
+
+ fHWVertexArrayState.setVertexArrayID(this, 0);
+
+ GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->enableVertexArrays(this, 1);
+ attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
+ kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
+
+ // Set "simple" state once:
+ this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA());
+ this->flushHWAAState(nullptr, false);
+ this->disableScissor();
+ this->disableWindowRectangles();
+ this->disableStencil();
+
+ // Do all the blits:
+ width = texture->width();
+ height = texture->height();
+
+ for (GrGLint level = 1; level < levelCount; ++level) {
+ // Get and bind the program for this particular downsample (filter shape can vary):
+ int progIdx = TextureSizeToMipmapProgramIdx(width, height);
+ if (!fMipmapPrograms[progIdx].fProgram) {
+ if (!this->createMipmapProgram(progIdx)) {
+ SkDebugf("Failed to create mipmap program.\n");
+ // Invalidate all params to cover base level change in a previous iteration.
+ glTex->textureParamsModified();
+ return false;
+ }
+ }
+ this->flushProgram(fMipmapPrograms[progIdx].fProgram);
+
+ // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
+ const float invWidth = 1.0f / width;
+ const float invHeight = 1.0f / height;
+ GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
+ invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
+ GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
+
+ // Only sample from previous mip
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
+
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
+ glTex->textureID(), level));
+
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+ this->flushViewport(width, height);
+
+ GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
+ }
+
+ // Unbind:
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D, 0, 0));
+
+ // We modified the base level param.
+ GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState();
+ // We drew the 2nd to last level into the last level.
+ nonsamplerState.fBaseMipMapLevel = levelCount - 2;
+ glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
+
+ return true;
+}
+
+void GrGLGpu::querySampleLocations(
+ GrRenderTarget* renderTarget, SkTArray<SkPoint>* sampleLocations) {
+ this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(renderTarget));
+
+ int effectiveSampleCnt;
+ GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, &effectiveSampleCnt);
+ SkASSERT(effectiveSampleCnt >= renderTarget->numSamples());
+
+ sampleLocations->reset(effectiveSampleCnt);
+ for (int i = 0; i < effectiveSampleCnt; ++i) {
+ GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, &(*sampleLocations)[i].fX));
+ }
+}
+
+void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
+ SkASSERT(type);
+ switch (type) {
+ case kTexture_GrXferBarrierType: {
+ GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
+ SkASSERT(glrt->textureFBOID() != 0 && glrt->renderFBOID() != 0);
+ if (glrt->textureFBOID() != glrt->renderFBOID()) {
+ // The render target uses separate storage so no need for glTextureBarrier.
+ // FIXME: The render target will resolve automatically when its texture is bound,
+ // but we could resolve only the bounds that will be read if we do it here instead.
+ return;
+ }
+ SkASSERT(this->caps()->textureBarrierSupport());
+ GL_CALL(TextureBarrier());
+ return;
+ }
+ case kBlend_GrXferBarrierType:
+ SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
+ this->caps()->blendEquationSupport());
+ GL_CALL(BlendBarrier());
+ return;
+ default: break; // placate compiler warnings that kNone not handled
+ }
+}
+
+static GrPixelConfig gl_format_to_pixel_config(GrGLFormat format) {
+ switch (format) {
+ case GrGLFormat::kRGBA8: return kRGBA_8888_GrPixelConfig;
+ case GrGLFormat::kRGB8: return kRGB_888_GrPixelConfig;
+ case GrGLFormat::kRG8: return kRG_88_GrPixelConfig;
+ case GrGLFormat::kBGRA8: return kBGRA_8888_GrPixelConfig;
+ case GrGLFormat::kLUMINANCE8: return kGray_8_GrPixelConfig;
+ case GrGLFormat::kSRGB8_ALPHA8: return kSRGBA_8888_GrPixelConfig;
+ case GrGLFormat::kRGB10_A2: return kRGBA_1010102_GrPixelConfig;
+ case GrGLFormat::kRGB565: return kRGB_565_GrPixelConfig;
+ case GrGLFormat::kRGBA4: return kRGBA_4444_GrPixelConfig;
+ case GrGLFormat::kRGBA16F: return kRGBA_half_GrPixelConfig;
+ case GrGLFormat::kR16: return kAlpha_16_GrPixelConfig;
+ case GrGLFormat::kRG16: return kRG_1616_GrPixelConfig;
+ case GrGLFormat::kRGBA16: return kRGBA_16161616_GrPixelConfig;
+ case GrGLFormat::kRG16F: return kRG_half_GrPixelConfig;
+ case GrGLFormat::kUnknown: return kUnknown_GrPixelConfig;
+
+ // Configs with multiple equivalent formats.
+
+ case GrGLFormat::kR16F: return kAlpha_half_GrPixelConfig;
+ case GrGLFormat::kLUMINANCE16F: return kAlpha_half_GrPixelConfig;
+
+ case GrGLFormat::kALPHA8: return kAlpha_8_GrPixelConfig;
+ case GrGLFormat::kR8: return kAlpha_8_GrPixelConfig;
+
+ case GrGLFormat::kCOMPRESSED_RGB8_ETC2: return kRGB_ETC1_GrPixelConfig;
+ case GrGLFormat::kCOMPRESSED_ETC1_RGB8: return kRGB_ETC1_GrPixelConfig;
+ }
+ SkUNREACHABLE;
+}
+
+GrBackendTexture GrGLGpu::onCreateBackendTexture(int w, int h,
+ const GrBackendFormat& format,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color,
+ GrProtected isProtected) {
+ this->handleDirtyContext();
+
+ SkDEBUGCODE(const GrCaps* caps = this->caps();)
+
+ // GrGpu::createBackendTexture should've ensured these conditions
+ SkASSERT(w >= 1 && w <= caps->maxTextureSize() && h >= 1 && h <= caps->maxTextureSize());
+ SkASSERT(GrGpu::MipMapsAreCorrect(w, h, mipMapped, srcData, numMipLevels));
+ SkASSERT(mipMapped == GrMipMapped::kNo || caps->mipMapSupport());
+
+ GrGLFormat glFormat = format.asGLFormat();
+ if (glFormat == GrGLFormat::kUnknown) {
+ return GrBackendTexture(); // invalid
+ }
+
+ // Compressed formats go through onCreateCompressedBackendTexture
+ SkASSERT(!GrGLFormatIsCompressed(glFormat));
+
+ GrPixelConfig config = gl_format_to_pixel_config(glFormat);
+
+ if (config == kUnknown_GrPixelConfig) {
+ return GrBackendTexture(); // invalid
+ }
+
+ auto textureColorType = GrPixelConfigToColorType(config);
+
+ // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
+ if (!this->caps()->isFormatTexturableAndUploadable(textureColorType, format)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ GrGLTextureInfo info;
+ GrGLTextureParameters::SamplerOverriddenState initialState;
+
+ SkTDArray<GrMipLevel> texels;
+ SkAutoMalloc pixelStorage;
+
+ int mipLevelCount = 1;
+ if (srcData) {
+ mipLevelCount = numMipLevels;
+ texels.append(mipLevelCount);
+ for (int i = 0; i < mipLevelCount; ++i) {
+ texels[i] = { srcData[i].addr(), srcData[i].rowBytes() };
+ }
+ } else if (color) {
+ if (GrMipMapped::kYes == mipMapped) {
+ mipLevelCount = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ texels.append(mipLevelCount);
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+
+ size_t bytesPerPixel = this->glCaps().bytesPerPixel(glFormat);
+
+ size_t totalSize = GrComputeTightCombinedBufferSize(
+ bytesPerPixel, w, h, &individualMipOffsets, mipLevelCount);
+
+ char* tmpPixels = (char*)pixelStorage.reset(totalSize);
+
+ GrFillInData(textureColorType, w, h, individualMipOffsets, tmpPixels, *color);
+ for (int i = 0; i < mipLevelCount; ++i) {
+ size_t offset = individualMipOffsets[i];
+
+ int twoToTheMipLevel = 1 << i;
+ int currentWidth = SkTMax(1, w / twoToTheMipLevel);
+
+ texels[i] = {&(tmpPixels[offset]), currentWidth * bytesPerPixel};
+ }
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = w;
+ desc.fHeight = h;
+ desc.fConfig = config;
+
+ info.fTarget = GR_GL_TEXTURE_2D;
+ info.fFormat = GrGLFormatToEnum(glFormat);
+ info.fID = this->createTexture2D({desc.fWidth, desc.fHeight}, glFormat, renderable,
+ &initialState, SkTMax(1, texels.count()));
+ if (!info.fID) {
+ return GrBackendTexture(); // invalid
+ }
+ auto srcColorType = GrPixelConfigToColorType(desc.fConfig);
+ if (!texels.empty() &&
+ !this->uploadTexData(glFormat, textureColorType, desc.fWidth, desc.fHeight,
+ GR_GL_TEXTURE_2D, 0, 0, desc.fWidth, desc.fHeight, srcColorType,
+ texels.begin(), texels.count())) {
+ GL_CALL(DeleteTextures(1, &info.fID));
+ return GrBackendTexture();
+ }
+
+ // unbind the texture from the texture unit to avoid asserts
+ GL_CALL(BindTexture(info.fTarget, 0));
+
+ auto parameters = sk_make_sp<GrGLTextureParameters>();
+ parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
+ fResetTimestampForTextureParameters);
+
+ return GrBackendTexture(w, h, mipMapped, info, std::move(parameters));
+}
+
+void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) {
+ SkASSERT(GrBackendApi::kOpenGL == tex.backend());
+
+ GrGLTextureInfo info;
+ if (tex.getGLTextureInfo(&info)) {
+ GL_CALL(DeleteTextures(1, &info.fID));
+ }
+}
+
+#if GR_TEST_UTILS
+
+bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(GrBackendApi::kOpenGL == tex.backend());
+
+ GrGLTextureInfo info;
+ if (!tex.getGLTextureInfo(&info)) {
+ return false;
+ }
+
+ GrGLboolean result;
+ GL_CALL_RET(result, IsTexture(info.fID));
+
+ return (GR_GL_TRUE == result);
+}
+
+GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(int w, int h,
+ GrColorType colorType) {
+ if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
+ return GrBackendRenderTarget(); // invalid
+ }
+ this->handleDirtyContext();
+ auto format = this->glCaps().getFormatFromColorType(colorType);
+ if (!this->glCaps().isFormatRenderable(format, 1)) {
+ return {};
+ }
+ bool useTexture = format == GrGLFormat::kBGRA8;
+ int sFormatIdx = this->getCompatibleStencilIndex(format);
+ if (sFormatIdx < 0) {
+ return {};
+ }
+ GrGLuint colorID = 0;
+ GrGLuint stencilID = 0;
+ auto deleteIDs = [&] {
+ if (colorID) {
+ if (useTexture) {
+ GL_CALL(DeleteTextures(1, &colorID));
+ } else {
+ GL_CALL(DeleteRenderbuffers(1, &colorID));
+ }
+ }
+ if (stencilID) {
+ GL_CALL(DeleteRenderbuffers(1, &stencilID));
+ }
+ };
+
+ if (useTexture) {
+ GL_CALL(GenTextures(1, &colorID));
+ } else {
+ GL_CALL(GenRenderbuffers(1, &colorID));
+ }
+ GL_CALL(GenRenderbuffers(1, &stencilID));
+ if (!stencilID || !colorID) {
+ deleteIDs();
+ return {};
+ }
+
+ GrGLFramebufferInfo info;
+ info.fFBOID = 0;
+ info.fFormat = GrGLFormatToEnum(format);
+ GL_CALL(GenFramebuffers(1, &info.fFBOID));
+ if (!info.fFBOID) {
+ deleteIDs();
+ return {};
+ }
+
+ this->invalidateBoundRenderTarget();
+
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
+ if (useTexture) {
+ GrGLTextureParameters::SamplerOverriddenState initialState;
+ colorID = this->createTexture2D({w, h}, format, GrRenderable::kYes, &initialState, 1);
+ if (!colorID) {
+ deleteIDs();
+ return {};
+ }
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
+ colorID, 0));
+ } else {
+ GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format);
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
+ GL_ALLOC_CALL(this->glInterface(),
+ RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, w, h));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_RENDERBUFFER, colorID));
+ }
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
+ auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx].fInternalFormat;
+ GL_ALLOC_CALL(this->glInterface(),
+ RenderbufferStorage(GR_GL_RENDERBUFFER, stencilBufferFormat, w, h));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER,
+ stencilID));
+ if (this->glCaps().stencilFormats()[sFormatIdx].fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, stencilID));
+ }
+
+ // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
+ // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
+ // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
+ // renderbuffers/texture.
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
+ deleteIDs();
+
+ this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
+ this->deleteFramebuffer(info.fFBOID);
+ return {};
+ }
+ auto stencilBits = SkToInt(this->glCaps().stencilFormats()[sFormatIdx].fStencilBits);
+
+ GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, stencilBits, info);
+ SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat()));
+ return beRT;
+}
+
+void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
+ SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
+ GrGLFramebufferInfo info;
+ if (backendRT.getGLFramebufferInfo(&info)) {
+ if (info.fFBOID) {
+ this->deleteFramebuffer(info.fFBOID);
+ }
+ }
+}
+
+void GrGLGpu::testingOnly_flushGpuAndSync() {
+ GL_CALL(Finish());
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
+ const GrBuffer* ibuf) {
+ GrGLAttribArrayState* attribState;
+
+ if (gpu->glCaps().isCoreProfile()) {
+ if (!fCoreProfileVertexArray) {
+ GrGLuint arrayID;
+ GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
+ int attrCount = gpu->glCaps().maxVertexAttributes();
+ fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
+ }
+ if (ibuf) {
+ attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
+ } else {
+ attribState = fCoreProfileVertexArray->bind(gpu);
+ }
+ } else {
+ if (ibuf) {
+ // bindBuffer implicitly binds VAO 0 when binding an index buffer.
+ gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
+ } else {
+ this->setVertexArrayID(gpu, 0);
+ }
+ int attrCount = gpu->glCaps().maxVertexAttributes();
+ if (fDefaultVertexArrayAttribState.count() != attrCount) {
+ fDefaultVertexArrayAttribState.resize(attrCount);
+ }
+ attribState = &fDefaultVertexArrayAttribState;
+ }
+ return attribState;
+}
+
+void GrGLGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
+ // If we inserted semaphores during the flush, we need to call GLFlush.
+ bool insertedSemaphore = info.fNumSemaphores > 0 && this->caps()->semaphoreSupport();
+ // We call finish if the client told us to sync or if we have a finished proc but don't support
+ // GLsync objects.
+ bool finish = (info.fFlags & kSyncCpu_GrFlushFlag) ||
+ (info.fFinishedProc && !this->caps()->fenceSyncSupport());
+ if (finish) {
+ GL_CALL(Finish());
+ // After a finish everything previously sent to GL is done.
+ for (const auto& cb : fFinishCallbacks) {
+ cb.fCallback(cb.fContext);
+ this->deleteSync(cb.fSync);
+ }
+ fFinishCallbacks.clear();
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ } else {
+ if (info.fFinishedProc) {
+ FinishCallback callback;
+ callback.fCallback = info.fFinishedProc;
+ callback.fContext = info.fFinishedContext;
+ callback.fSync = (GrGLsync)this->insertFence();
+ fFinishCallbacks.push_back(callback);
+ GL_CALL(Flush());
+ } else if (insertedSemaphore) {
+ // Must call flush after semaphores in case they are waited on another GL context.
+ GL_CALL(Flush());
+ }
+ // See if any previously inserted finish procs are good to go.
+ this->checkFinishProcs();
+ }
+}
+
+void GrGLGpu::submit(GrOpsRenderPass* renderPass) {
+ // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here
+ SkASSERT(fCachedOpsRenderPass.get() == renderPass);
+ fCachedOpsRenderPass->reset();
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() {
+ SkASSERT(this->caps()->fenceSyncSupport());
+ GrGLsync sync;
+ GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
+ GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(GrGLsync));
+ return (GrFence)sync;
+}
+
+bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) {
+ GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0;
+ GrGLenum result;
+ GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout));
+ return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
+}
+
+bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) {
+ return this->waitSync((GrGLsync)fence, timeout, /* flush = */ true);
+}
+
+void GrGLGpu::deleteFence(GrFence fence) const {
+ this->deleteSync((GrGLsync)fence);
+}
+
+sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) {
+ SkASSERT(this->caps()->semaphoreSupport());
+ return GrGLSemaphore::Make(this, isOwned);
+}
+
+sk_sp<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) {
+ SkASSERT(this->caps()->semaphoreSupport());
+ return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership);
+}
+
+void GrGLGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
+ GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
+
+ GrGLsync sync;
+ GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
+ glSem->setSync(sync);
+}
+
+void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
+ GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
+
+ GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
+}
+
+void GrGLGpu::checkFinishProcs() {
+ // Bail after the first unfinished sync since we expect they signal in the order inserted.
+ while (!fFinishCallbacks.empty() && this->waitSync(fFinishCallbacks.front().fSync,
+ /* timeout = */ 0, /* flush = */ false)) {
+ fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext);
+ this->deleteSync(fFinishCallbacks.front().fSync);
+ fFinishCallbacks.pop_front();
+ }
+}
+
+void GrGLGpu::deleteSync(GrGLsync sync) const {
+ GL_CALL(DeleteSync(sync));
+}
+
+void GrGLGpu::insertEventMarker(const char* msg) {
+ GL_CALL(InsertEventMarker(strlen(msg), msg));
+}
+
+sk_sp<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
+ // Set up a semaphore to be signaled once the data is ready, and flush GL
+ sk_sp<GrSemaphore> semaphore = this->makeSemaphore(true);
+ this->insertSemaphore(semaphore);
+ // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu.
+ GL_CALL(Flush());
+
+ return semaphore;
+}
+
+int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
+ switch (GrSLCombinedSamplerTypeForTextureType(texture->texturePriv().textureType())) {
+ case kTexture2DSampler_GrSLType:
+ return 0;
+ case kTexture2DRectSampler_GrSLType:
+ return 1;
+ case kTextureExternalSampler_GrSLType:
+ return 2;
+ default:
+ SK_ABORT("Unexpected samper type");
+ }
+}
+
+#ifdef SK_ENABLE_DUMP_GPU
+#include "src/utils/SkJSONWriter.h"
+void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
+ // We are called by the base class, which has already called beginObject(). We choose to nest
+ // all of our caps information in a named sub-object.
+ writer->beginObject("GL GPU");
+
+ const GrGLubyte* str;
+ GL_CALL_RET(str, GetString(GR_GL_VERSION));
+ writer->appendString("GL_VERSION", (const char*)(str));
+ GL_CALL_RET(str, GetString(GR_GL_RENDERER));
+ writer->appendString("GL_RENDERER", (const char*)(str));
+ GL_CALL_RET(str, GetString(GR_GL_VENDOR));
+ writer->appendString("GL_VENDOR", (const char*)(str));
+ GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
+ writer->appendString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
+
+ writer->appendName("extensions");
+ glInterface()->fExtensions.dumpJSON(writer);
+
+ writer->endObject();
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpu.h b/gfx/skia/skia/src/gpu/gl/GrGLGpu.h
new file mode 100644
index 0000000000..e49f45915e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpu.h
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLGpu_DEFINED
+#define GrGLGpu_DEFINED
+
+#include <list>
+#include "include/core/SkTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkLRUCache.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrNativeRect.h"
+#include "src/gpu/GrWindowRectsState.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/gl/GrGLContext.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+#include "src/gpu/gl/GrGLProgram.h"
+#include "src/gpu/gl/GrGLRenderTarget.h"
+#include "src/gpu/gl/GrGLStencilAttachment.h"
+#include "src/gpu/gl/GrGLTexture.h"
+#include "src/gpu/gl/GrGLVertexArray.h"
+
+class GrGLBuffer;
+class GrGLOpsRenderPass;
+class GrPipeline;
+class GrSwizzle;
+
+class GrGLGpu final : public GrGpu, private GrMesh::SendToGpuImpl {
+public:
+ static sk_sp<GrGpu> Make(sk_sp<const GrGLInterface>, const GrContextOptions&, GrContext*);
+ ~GrGLGpu() override;
+
+ void disconnect(DisconnectType) override;
+
+ const GrGLContext& glContext() const { return *fGLContext; }
+
+ const GrGLInterface* glInterface() const { return fGLContext->interface(); }
+ const GrGLContextInfo& ctxInfo() const { return *fGLContext; }
+ GrGLStandard glStandard() const { return fGLContext->standard(); }
+ GrGLVersion glVersion() const { return fGLContext->version(); }
+ GrGLSLGeneration glslGeneration() const { return fGLContext->glslGeneration(); }
+ const GrGLCaps& glCaps() const { return *fGLContext->caps(); }
+
+ GrGLPathRendering* glPathRendering() {
+ SkASSERT(glCaps().shaderCaps()->pathRenderingSupport());
+ return static_cast<GrGLPathRendering*>(pathRendering());
+ }
+
+ // Used by GrGLProgram to configure OpenGL state.
+ void bindTexture(int unitIdx, GrSamplerState samplerState, const GrSwizzle&, GrGLTexture*);
+
+ // These functions should be used to bind GL objects. They track the GL state and skip redundant
+ // bindings. Making the equivalent glBind calls directly will confuse the state tracking.
+ void bindVertexArray(GrGLuint id) {
+ fHWVertexArrayState.setVertexArrayID(this, id);
+ }
+
+ // These callbacks update state tracking when GL objects are deleted. They are called from
+ // GrGLResource onRelease functions.
+ void notifyVertexArrayDelete(GrGLuint id) {
+ fHWVertexArrayState.notifyVertexArrayDelete(id);
+ }
+
+ // Binds a buffer to the GL target corresponding to 'type', updates internal state tracking, and
+ // returns the GL target the buffer was bound to.
+ // When 'type' is kIndex_GrBufferType, this function will also implicitly bind the default VAO.
+ // If the caller wishes to bind an index buffer to a specific VAO, it can call glBind directly.
+ GrGLenum bindBuffer(GrGpuBufferType type, const GrBuffer*);
+
+ // The GrGLOpsRenderPass does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the draw call for the corresponding passthrough function
+ // on GrGLOpsRenderPass.
+ void draw(GrRenderTarget*, const GrProgramInfo&, const GrMesh[], int meshCount);
+
+ // GrMesh::SendToGpuImpl methods. These issue the actual GL draw calls.
+ // Marked final as a hint to the compiler to not use virtual dispatch.
+ void sendMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) final;
+
+ void sendIndexedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ GrPrimitiveRestart) final;
+
+ void sendInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex, const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) final;
+
+ void sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart) final;
+
+ // The GrGLOpsRenderPass does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the clear call for the corresponding passthrough function
+ // on GrGLOpsRenderPass.
+ void clear(const GrFixedClip&, const SkPMColor4f&, GrRenderTarget*, GrSurfaceOrigin);
+
+ // The GrGLOpsRenderPass does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the clearStencil call for the corresponding passthrough
+ // function on GrGLOpsrenderPass.
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask,
+ GrRenderTarget*, GrSurfaceOrigin);
+
+ // FIXME (michaelludwig): Can this go away and just use clearStencilClip() + marking the
+ // stencil buffer as not dirty?
+ void clearStencil(GrRenderTarget*, int clearValue);
+
+ void beginCommandBuffer(GrRenderTarget*, const SkIRect& bounds, GrSurfaceOrigin,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore);
+
+ void endCommandBuffer(GrRenderTarget*, const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore);
+
+ GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget*, GrSurfaceOrigin, const SkIRect&,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) override;
+
+ void invalidateBoundRenderTarget() {
+ fHWBoundRenderTargetUniqueID.makeInvalid();
+ }
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(
+ const GrRenderTarget* rt, int width, int height, int numStencilSamples) override;
+ void deleteBackendTexture(const GrBackendTexture&) override;
+
+ bool precompileShader(const SkData& key, const SkData& data) override {
+ return fProgramCache->precompileShader(key, data);
+ }
+
+#if GR_TEST_UTILS
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+
+ GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
+ void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
+
+ const GrGLContext* glContextForTesting() const override { return &this->glContext(); }
+
+ void resetShaderCacheForTesting() const override { fProgramCache->reset(); }
+
+ void testingOnly_flushGpuAndSync() override;
+#endif
+
+ void submit(GrOpsRenderPass* renderPass) override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() override;
+ bool waitFence(GrFence, uint64_t timeout) override;
+ void deleteFence(GrFence) const override;
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) override;
+ void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+
+ void checkFinishProcs() override;
+
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
+
+ void deleteSync(GrGLsync) const;
+
+ void insertEventMarker(const char*);
+
+ void bindFramebuffer(GrGLenum fboTarget, GrGLuint fboid);
+ void deleteFramebuffer(GrGLuint fboid);
+
+private:
+ GrGLGpu(std::unique_ptr<GrGLContext>, GrContext*);
+
+ // GrGpu overrides
+ GrBackendTexture onCreateBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected) override;
+
+ void onResetContext(uint32_t resetBits) override;
+
+ void onResetTextureBindings() override;
+
+ void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) override;
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
+
+ sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) override;
+ sk_sp<GrTexture> onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType compression, SkBudgeted,
+ const void* data) override;
+
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
+ const void* data) override;
+
+ sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType, GrWrapOwnership,
+ GrWrapCacheable, GrIOType) override;
+ sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt,
+ GrColorType, GrWrapOwnership,
+ GrWrapCacheable) override;
+ sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) override;
+ sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt, GrColorType) override;
+
+ // Given a GL format return the index into the stencil format array on GrGLCaps to a
+ // compatible stencil format, or negative if there is no compatible stencil format.
+ int getCompatibleStencilIndex(GrGLFormat format);
+
+ void onFBOChanged();
+
+ // Returns whether the texture is successfully created. On success, a non-zero texture ID is
+ // returned. On failure, zero is returned.
+ // The texture is populated with |texels|, if it is non-null.
+ // The texture parameters are cached in |initialTexParams|.
+ GrGLuint createTexture2D(const SkISize& size,
+ GrGLFormat format,
+ GrRenderable,
+ GrGLTextureParameters::SamplerOverriddenState* initialState,
+ int mipLevelCount);
+
+ GrGLuint createCompressedTexture2D(const SkISize& size, GrGLFormat format,
+ SkImage::CompressionType compression,
+ GrGLTextureParameters::SamplerOverriddenState* initialState,
+ const void* data);
+
+ bool onReadPixels(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) override;
+
+ bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
+ bool onTransferPixelsFrom(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) override;
+ bool readOrTransferPixelsFrom(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType,
+ void* offsetOrPtr, int rowWidthInPixels);
+
+ // Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the
+ // PIXEL_UNPACK_BUFFER is unbound.
+ void unbindCpuToGpuXferBuffer();
+
+ void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO) override;
+
+ bool onRegenerateMipMapLevels(GrTexture*) override;
+
+ bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ // binds texture unit in GL
+ void setTextureUnit(int unitIdx);
+
+ // Flushes state from GrPipeline to GL. Returns false if the state couldn't be set.
+ // willDrawPoints must be true if point primitives will be rendered after setting the GL state.
+ // If DynamicStateArrays is not null then dynamicStateArraysLength is the number of dynamic
+ // state entries in each array.
+ bool flushGLState(GrRenderTarget*, const GrProgramInfo&, GrPrimitiveType);
+
+ void flushProgram(sk_sp<GrGLProgram>);
+
+ // Version for programs that aren't GrGLProgram.
+ void flushProgram(GrGLuint);
+
+ // Sets up vertex/instance attribute pointers and strides.
+ void setupGeometry(const GrBuffer* indexBuffer,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int baseInstance,
+ GrPrimitiveRestart);
+
+ void flushBlendAndColorWrite(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle&);
+
+ void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
+
+ bool waitSync(GrGLsync, uint64_t timeout, bool flush);
+
+ bool copySurfaceAsDraw(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+ void copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+ bool copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ static bool BlendCoeffReferencesConstant(GrBlendCoeff coeff);
+
+ class ProgramCache : public ::SkNoncopyable {
+ public:
+ ProgramCache(GrGLGpu* gpu);
+ ~ProgramCache();
+
+ void abandon();
+ void reset();
+ GrGLProgram* refProgram(GrGLGpu*, GrRenderTarget*, const GrProgramInfo&, GrPrimitiveType);
+ bool precompileShader(const SkData& key, const SkData& data);
+
+ private:
+ struct Entry;
+
+ struct DescHash {
+ uint32_t operator()(const GrProgramDesc& desc) const {
+ return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
+ }
+ };
+
+ SkLRUCache<GrProgramDesc, std::unique_ptr<Entry>, DescHash> fMap;
+
+ GrGLGpu* fGpu;
+ };
+
+ void flushColorWrite(bool writeColor);
+ void flushClearColor(const SkPMColor4f&);
+
+ // flushes the scissor. see the note on flushBoundTextureAndParams about
+ // flushing the scissor after that function is called.
+ void flushScissor(const GrScissorState&, int rtWidth, int rtHeight, GrSurfaceOrigin rtOrigin);
+
+ // disables the scissor
+ void disableScissor();
+
+ void flushWindowRectangles(const GrWindowRectsState&, const GrGLRenderTarget*, GrSurfaceOrigin);
+ void disableWindowRectangles();
+
+ int numTextureUnits() const { return this->caps()->shaderCaps()->maxFragmentSamplers(); }
+
+ // Binds a texture to a target on the "scratch" texture unit to use for texture operations
+ // other than usual draw flow (i.e. a GrGLProgram derived from a GrPipeline used to draw
+ // GrMesh). It ensures that such operations don't negatively interact with draws.
+ // The active texture unit and the binding for 'target' will change.
+ void bindTextureToScratchUnit(GrGLenum target, GrGLint textureID);
+
+ // The passed bounds contains the render target's color values that will subsequently be
+ // written.
+ void flushRenderTarget(GrGLRenderTarget*, GrSurfaceOrigin, const SkIRect& bounds);
+ // This version has an implicit bounds of the entire render target.
+ void flushRenderTarget(GrGLRenderTarget*);
+ // This version can be used when the render target's colors will not be written.
+ void flushRenderTargetNoColorWrites(GrGLRenderTarget*);
+
+ // Need not be called if flushRenderTarget is used.
+ void flushViewport(int width, int height);
+
+ void flushStencil(const GrStencilSettings&, GrSurfaceOrigin);
+ void disableStencil();
+
+ // rt is used only if useHWAA is true.
+ void flushHWAAState(GrRenderTarget* rt, bool useHWAA);
+
+ void flushFramebufferSRGB(bool enable);
+
+ bool uploadTexData(GrGLFormat textureFormat, GrColorType textureColorType, int texWidth,
+ int texHeight, GrGLenum target, int left, int top, int width, int height,
+ GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount,
+ GrMipMapsStatus* mipMapsStatus = nullptr);
+
+ // Helper for onCreateCompressedTexture. Compressed textures are read-only so we only use this
+ // to populate a new texture. Returns false if we failed to create and upload the texture.
+ bool uploadCompressedTexData(GrGLFormat,
+ SkImage::CompressionType,
+ const SkISize& size,
+ GrGLenum target,
+ const void* data);
+
+ bool createRenderTargetObjects(const GrGLTexture::Desc&,
+ int sampleCount,
+ GrGLRenderTarget::IDs*);
+
+ enum TempFBOTarget {
+ kSrc_TempFBOTarget,
+ kDst_TempFBOTarget
+ };
+
+ // Binds a surface as a FBO for copying, reading, or clearing. If the surface already owns an
+ // FBO ID then that ID is bound. If not the surface is temporarily bound to a FBO and that FBO
+ // is bound. This must be paired with a call to unbindSurfaceFBOForPixelOps().
+ void bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
+ TempFBOTarget tempFBOTarget);
+
+ // Must be called if bindSurfaceFBOForPixelOps was used to bind a surface for copying.
+ void unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget);
+
+#ifdef SK_ENABLE_DUMP_GPU
+ void onDumpJSON(SkJSONWriter*) const override;
+#endif
+
+ bool createCopyProgram(GrTexture* srcTexture);
+ bool createMipmapProgram(int progIdx);
+
+ std::unique_ptr<GrGLContext> fGLContext;
+
+ // GL program-related state
+ ProgramCache* fProgramCache;
+
+ ///////////////////////////////////////////////////////////////////////////
+ ///@name Caching of GL State
+ ///@{
+ int fHWActiveTextureUnitIdx;
+
+ GrGLuint fHWProgramID;
+ sk_sp<GrGLProgram> fHWProgram;
+
+ enum TriState {
+ kNo_TriState,
+ kYes_TriState,
+ kUnknown_TriState
+ };
+
+ GrGLuint fTempSrcFBOID;
+ GrGLuint fTempDstFBOID;
+
+ GrGLuint fStencilClearFBOID;
+
+ // last scissor / viewport scissor state seen by the GL.
+ struct {
+ TriState fEnabled;
+ GrNativeRect fRect;
+ void invalidate() {
+ fEnabled = kUnknown_TriState;
+ fRect.invalidate();
+ }
+ } fHWScissorSettings;
+
+ class {
+ public:
+ bool valid() const { return kInvalidSurfaceOrigin != fRTOrigin; }
+ void invalidate() { fRTOrigin = kInvalidSurfaceOrigin; }
+ bool knownDisabled() const { return this->valid() && !fWindowState.enabled(); }
+ void setDisabled() {
+ fRTOrigin = kTopLeft_GrSurfaceOrigin;
+ fWindowState.setDisabled();
+ }
+
+ void set(GrSurfaceOrigin rtOrigin, int width, int height,
+ const GrWindowRectsState& windowState) {
+ fRTOrigin = rtOrigin;
+ fWidth = width;
+ fHeight = height;
+ fWindowState = windowState;
+ }
+
+ bool knownEqualTo(GrSurfaceOrigin rtOrigin, int width, int height,
+ const GrWindowRectsState& windowState) const {
+ if (!this->valid()) {
+ return false;
+ }
+ if (fWindowState.numWindows() &&
+ (fRTOrigin != rtOrigin || fWidth != width || fHeight != height)) {
+ return false;
+ }
+ return fWindowState == windowState;
+ }
+
+ private:
+ enum { kInvalidSurfaceOrigin = -1 };
+
+ int fRTOrigin;
+ int fWidth;
+ int fHeight;
+ GrWindowRectsState fWindowState;
+ } fHWWindowRectsState;
+
+ GrNativeRect fHWViewport;
+
+ /**
+ * Tracks vertex attrib array state.
+ */
+ class HWVertexArrayState {
+ public:
+ HWVertexArrayState() : fCoreProfileVertexArray(nullptr) { this->invalidate(); }
+
+ ~HWVertexArrayState() { delete fCoreProfileVertexArray; }
+
+ void invalidate() {
+ fBoundVertexArrayIDIsValid = false;
+ fDefaultVertexArrayAttribState.invalidate();
+ if (fCoreProfileVertexArray) {
+ fCoreProfileVertexArray->invalidateCachedState();
+ }
+ }
+
+ void notifyVertexArrayDelete(GrGLuint id) {
+ if (fBoundVertexArrayIDIsValid && fBoundVertexArrayID == id) {
+ // Does implicit bind to 0
+ fBoundVertexArrayID = 0;
+ }
+ }
+
+ void setVertexArrayID(GrGLGpu* gpu, GrGLuint arrayID) {
+ if (!gpu->glCaps().vertexArrayObjectSupport()) {
+ SkASSERT(0 == arrayID);
+ return;
+ }
+ if (!fBoundVertexArrayIDIsValid || arrayID != fBoundVertexArrayID) {
+ GR_GL_CALL(gpu->glInterface(), BindVertexArray(arrayID));
+ fBoundVertexArrayIDIsValid = true;
+ fBoundVertexArrayID = arrayID;
+ }
+ }
+
+ /**
+ * Binds the vertex array that should be used for internal draws, and returns its attrib
+ * state. This binds the default VAO (ID=zero) unless we are on a core profile, in which
+ * case we use a dummy array instead.
+ *
+ * If an index buffer is privided, it will be bound to the vertex array. Otherwise the
+ * index buffer binding will be left unchanged.
+ *
+ * The returned GrGLAttribArrayState should be used to set vertex attribute arrays.
+ */
+ GrGLAttribArrayState* bindInternalVertexArray(GrGLGpu*, const GrBuffer* ibuff = nullptr);
+
+ private:
+ GrGLuint fBoundVertexArrayID;
+ bool fBoundVertexArrayIDIsValid;
+
+ // We return a non-const pointer to this from bindArrayAndBuffersToDraw when vertex array 0
+ // is bound. However, this class is internal to GrGLGpu and this object never leaks out of
+ // GrGLGpu.
+ GrGLAttribArrayState fDefaultVertexArrayAttribState;
+
+ // This is used when we're using a core profile.
+ GrGLVertexArray* fCoreProfileVertexArray;
+ } fHWVertexArrayState;
+
+ struct {
+ GrGLenum fGLTarget;
+ GrGpuResource::UniqueID fBoundBufferUniqueID;
+ bool fBufferZeroKnownBound;
+
+ void invalidate() {
+ fBoundBufferUniqueID.makeInvalid();
+ fBufferZeroKnownBound = false;
+ }
+ } fHWBufferState[kGrGpuBufferTypeCount];
+
+ auto* hwBufferState(GrGpuBufferType type) {
+ unsigned typeAsUInt = static_cast<unsigned>(type);
+ SkASSERT(typeAsUInt < SK_ARRAY_COUNT(fHWBufferState));
+ return &fHWBufferState[typeAsUInt];
+ }
+
+ struct {
+ GrBlendEquation fEquation;
+ GrBlendCoeff fSrcCoeff;
+ GrBlendCoeff fDstCoeff;
+ SkPMColor4f fConstColor;
+ bool fConstColorValid;
+ TriState fEnabled;
+
+ void invalidate() {
+ fEquation = kIllegal_GrBlendEquation;
+ fSrcCoeff = kIllegal_GrBlendCoeff;
+ fDstCoeff = kIllegal_GrBlendCoeff;
+ fConstColorValid = false;
+ fEnabled = kUnknown_TriState;
+ }
+ } fHWBlendState;
+
+ TriState fMSAAEnabled;
+
+ GrStencilSettings fHWStencilSettings;
+ GrSurfaceOrigin fHWStencilOrigin;
+ TriState fHWStencilTestEnabled;
+
+
+ TriState fHWWriteToColor;
+ GrGpuResource::UniqueID fHWBoundRenderTargetUniqueID;
+ TriState fHWSRGBFramebuffer;
+
+ class TextureUnitBindings {
+ public:
+ TextureUnitBindings() = default;
+ TextureUnitBindings(const TextureUnitBindings&) = delete;
+ TextureUnitBindings& operator=(const TextureUnitBindings&) = delete;
+
+ GrGpuResource::UniqueID boundID(GrGLenum target) const;
+ bool hasBeenModified(GrGLenum target) const;
+ void setBoundID(GrGLenum target, GrGpuResource::UniqueID);
+ void invalidateForScratchUse(GrGLenum target);
+ void invalidateAllTargets(bool markUnmodified);
+
+ private:
+ struct TargetBinding {
+ GrGpuResource::UniqueID fBoundResourceID;
+ bool fHasBeenModified = false;
+ };
+ TargetBinding fTargetBindings[3];
+ };
+ SkAutoTArray<TextureUnitBindings> fHWTextureUnitBindings;
+
+ GrGLfloat fHWClearColor[4];
+
+ GrGLuint fBoundDrawFramebuffer = 0;
+
+ /** IDs for copy surface program. (3 sampler types) */
+ struct {
+ GrGLuint fProgram = 0;
+ GrGLint fTextureUniform = 0;
+ GrGLint fTexCoordXformUniform = 0;
+ GrGLint fPosXformUniform = 0;
+ } fCopyPrograms[3];
+ sk_sp<GrGLBuffer> fCopyProgramArrayBuffer;
+
+ /** IDs for texture mipmap program. (4 filter configurations) */
+ struct {
+ GrGLuint fProgram = 0;
+ GrGLint fTextureUniform = 0;
+ GrGLint fTexCoordXformUniform = 0;
+ } fMipmapPrograms[4];
+ sk_sp<GrGLBuffer> fMipmapProgramArrayBuffer;
+
+ static int TextureToCopyProgramIdx(GrTexture* texture);
+
+ static int TextureSizeToMipmapProgramIdx(int width, int height) {
+ const bool wide = (width > 1) && SkToBool(width & 0x1);
+ const bool tall = (height > 1) && SkToBool(height & 0x1);
+ return (wide ? 0x2 : 0x0) | (tall ? 0x1 : 0x0);
+ }
+
+ GrPrimitiveType fLastPrimitiveType;
+
+ GrGLTextureParameters::ResetTimestamp fResetTimestampForTextureParameters = 0;
+
+ class SamplerObjectCache;
+ std::unique_ptr<SamplerObjectCache> fSamplerObjectCache;
+
+ std::unique_ptr<GrGLOpsRenderPass> fCachedOpsRenderPass;
+
+ struct FinishCallback {
+ GrGpuFinishedProc fCallback;
+ GrGpuFinishedContext fContext;
+ GrGLsync fSync;
+ };
+ std::list<FinishCallback> fFinishCallbacks;
+
+ SkDEBUGCODE(bool fIsExecutingCommandBuffer_DebugOnly = false);
+
+ friend class GrGLPathRendering; // For accessing setTextureUnit.
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp
new file mode 100644
index 0000000000..0109741edb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLGpu.h"
+
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/gl/builders/GrGLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+
+struct GrGLGpu::ProgramCache::Entry {
+ Entry(sk_sp<GrGLProgram> program)
+ : fProgram(std::move(program)) {}
+
+ Entry(const GrGLPrecompiledProgram& precompiledProgram)
+ : fPrecompiledProgram(precompiledProgram) {}
+
+ sk_sp<GrGLProgram> fProgram;
+ GrGLPrecompiledProgram fPrecompiledProgram;
+};
+
+GrGLGpu::ProgramCache::ProgramCache(GrGLGpu* gpu)
+ : fMap(gpu->getContext()->priv().options().fRuntimeProgramCacheSize)
+ , fGpu(gpu) {}
+
+GrGLGpu::ProgramCache::~ProgramCache() {}
+
+void GrGLGpu::ProgramCache::abandon() {
+ fMap.foreach([](std::unique_ptr<Entry>* e) {
+ if ((*e)->fProgram) {
+ (*e)->fProgram->abandon();
+ }
+ });
+
+ this->reset();
+}
+
+void GrGLGpu::ProgramCache::reset() {
+ fMap.reset();
+}
+
+GrGLProgram* GrGLGpu::ProgramCache::refProgram(GrGLGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType) {
+ // TODO: can this be unified between GL, Vk and Mtl?
+ // Get GrGLProgramDesc
+ GrProgramDesc desc;
+ if (!GrProgramDesc::Build(&desc, renderTarget, programInfo, primitiveType, gpu)) {
+ GrCapsDebugf(gpu->caps(), "Failed to gl program descriptor!\n");
+ return nullptr;
+ }
+
+ std::unique_ptr<Entry>* entry = fMap.find(desc);
+ if (entry && !(*entry)->fProgram) {
+ // We've pre-compiled the GL program, but don't have the GrGLProgram scaffolding
+ const GrGLPrecompiledProgram* precompiledProgram = &((*entry)->fPrecompiledProgram);
+ SkASSERT(precompiledProgram->fProgramID != 0);
+ GrGLProgram* program = GrGLProgramBuilder::CreateProgram(renderTarget, programInfo,
+ &desc, fGpu,
+ precompiledProgram);
+ if (nullptr == program) {
+ // Should we purge the program ID from the cache at this point?
+ SkDEBUGFAIL("Couldn't create program from precompiled program");
+ return nullptr;
+ }
+ (*entry)->fProgram.reset(program);
+ } else if (!entry) {
+ // We have a cache miss
+ GrGLProgram* program = GrGLProgramBuilder::CreateProgram(renderTarget, programInfo,
+ &desc, fGpu);
+ if (nullptr == program) {
+ return nullptr;
+ }
+ entry = fMap.insert(desc, std::unique_ptr<Entry>(new Entry(sk_sp<GrGLProgram>(program))));
+ }
+
+ return SkRef((*entry)->fProgram.get());
+}
+
+bool GrGLGpu::ProgramCache::precompileShader(const SkData& key, const SkData& data) {
+ GrProgramDesc desc;
+ if (!GrProgramDesc::BuildFromData(&desc, key.data(), key.size())) {
+ return false;
+ }
+
+ std::unique_ptr<Entry>* entry = fMap.find(desc);
+ if (entry) {
+ // We've already seen/compiled this shader
+ return true;
+ }
+
+ GrGLPrecompiledProgram precompiledProgram;
+ if (!GrGLProgramBuilder::PrecompileProgram(&precompiledProgram, fGpu, data)) {
+ return false;
+ }
+
+ fMap.insert(desc, std::unique_ptr<Entry>(new Entry(precompiledProgram)));
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLInterfaceAutogen.cpp b/gfx/skia/skia/src/gpu/gl/GrGLInterfaceAutogen.cpp
new file mode 100644
index 0000000000..dd1207e72e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLInterfaceAutogen.cpp
@@ -0,0 +1,742 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * THIS FILE IS AUTOGENERATED
+ * Make edits to tools/gpu/gl/interface/templates.go or they will
+ * be overwritten.
+ */
+
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#include <stdio.h>
+
+GrGLInterface::GrGLInterface() {
+ fStandard = kNone_GrGLStandard;
+}
+
+#define RETURN_FALSE_INTERFACE \
+ SkDEBUGF("%s:%d GrGLInterface::validate() failed.\n", __FILE__, __LINE__); \
+ return false
+
+bool GrGLInterface::validate() const {
+
+ if (kNone_GrGLStandard == fStandard) {
+ RETURN_FALSE_INTERFACE;
+ }
+
+ if (!fExtensions.isInitialized()) {
+ RETURN_FALSE_INTERFACE;
+ }
+
+ GrGLVersion glVer = GrGLGetVersion(this);
+ if (GR_GL_INVALID_VER == glVer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ // Autogenerated content follows
+ if (!fFunctions.fActiveTexture ||
+ !fFunctions.fAttachShader ||
+ !fFunctions.fBindAttribLocation ||
+ !fFunctions.fBindBuffer ||
+ !fFunctions.fBindTexture ||
+ !fFunctions.fBlendColor ||
+ !fFunctions.fBlendEquation ||
+ !fFunctions.fBlendFunc ||
+ !fFunctions.fBufferData ||
+ !fFunctions.fBufferSubData ||
+ !fFunctions.fClear ||
+ !fFunctions.fClearColor ||
+ !fFunctions.fClearStencil ||
+ !fFunctions.fColorMask ||
+ !fFunctions.fCompileShader ||
+ !fFunctions.fCompressedTexImage2D ||
+ !fFunctions.fCompressedTexSubImage2D ||
+ !fFunctions.fCopyTexSubImage2D ||
+ !fFunctions.fCreateProgram ||
+ !fFunctions.fCreateShader ||
+ !fFunctions.fCullFace ||
+ !fFunctions.fDeleteBuffers ||
+ !fFunctions.fDeleteProgram ||
+ !fFunctions.fDeleteShader ||
+ !fFunctions.fDeleteTextures ||
+ !fFunctions.fDepthMask ||
+ !fFunctions.fDisable ||
+ !fFunctions.fDisableVertexAttribArray ||
+ !fFunctions.fDrawArrays ||
+ !fFunctions.fDrawElements ||
+ !fFunctions.fEnable ||
+ !fFunctions.fEnableVertexAttribArray ||
+ !fFunctions.fFinish ||
+ !fFunctions.fFlush ||
+ !fFunctions.fFrontFace ||
+ !fFunctions.fGenBuffers ||
+ !fFunctions.fGenTextures ||
+ !fFunctions.fGetBufferParameteriv ||
+ !fFunctions.fGetError ||
+ !fFunctions.fGetIntegerv ||
+ !fFunctions.fGetProgramInfoLog ||
+ !fFunctions.fGetProgramiv ||
+ !fFunctions.fGetShaderInfoLog ||
+ !fFunctions.fGetShaderiv ||
+ !fFunctions.fGetString ||
+ !fFunctions.fGetUniformLocation ||
+ !fFunctions.fIsTexture ||
+ !fFunctions.fLineWidth ||
+ !fFunctions.fLinkProgram ||
+ !fFunctions.fPixelStorei ||
+ !fFunctions.fReadPixels ||
+ !fFunctions.fScissor ||
+ !fFunctions.fShaderSource ||
+ !fFunctions.fStencilFunc ||
+ !fFunctions.fStencilFuncSeparate ||
+ !fFunctions.fStencilMask ||
+ !fFunctions.fStencilMaskSeparate ||
+ !fFunctions.fStencilOp ||
+ !fFunctions.fStencilOpSeparate ||
+ !fFunctions.fTexImage2D ||
+ !fFunctions.fTexParameterf ||
+ !fFunctions.fTexParameterfv ||
+ !fFunctions.fTexParameteri ||
+ !fFunctions.fTexParameteriv ||
+ !fFunctions.fTexSubImage2D ||
+ !fFunctions.fUniform1f ||
+ !fFunctions.fUniform1fv ||
+ !fFunctions.fUniform1i ||
+ !fFunctions.fUniform1iv ||
+ !fFunctions.fUniform2f ||
+ !fFunctions.fUniform2fv ||
+ !fFunctions.fUniform2i ||
+ !fFunctions.fUniform2iv ||
+ !fFunctions.fUniform3f ||
+ !fFunctions.fUniform3fv ||
+ !fFunctions.fUniform3i ||
+ !fFunctions.fUniform3iv ||
+ !fFunctions.fUniform4f ||
+ !fFunctions.fUniform4fv ||
+ !fFunctions.fUniform4i ||
+ !fFunctions.fUniform4iv ||
+ !fFunctions.fUniformMatrix2fv ||
+ !fFunctions.fUniformMatrix3fv ||
+ !fFunctions.fUniformMatrix4fv ||
+ !fFunctions.fUseProgram ||
+ !fFunctions.fVertexAttrib1f ||
+ !fFunctions.fVertexAttrib2fv ||
+ !fFunctions.fVertexAttrib3fv ||
+ !fFunctions.fVertexAttrib4fv ||
+ !fFunctions.fVertexAttribPointer ||
+ !fFunctions.fViewport) {
+ RETURN_FALSE_INTERFACE;
+ }
+
+ if (GR_IS_GR_GL(fStandard)) {
+ if (!fFunctions.fDrawBuffer ||
+ !fFunctions.fPolygonMode) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fGetStringi) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_OES_vertex_array_object"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0)) ||
+ fExtensions.has("GL_OES_vertex_array_object") ||
+ fExtensions.has("OES_vertex_array_object")))) {
+ if (!fFunctions.fBindVertexArray ||
+ !fFunctions.fDeleteVertexArrays ||
+ !fFunctions.fGenVertexArrays) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0) && fExtensions.has("GL_EXT_blend_func_extended"))))) {
+ if (!fFunctions.fBindFragDataLocation) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,3)) ||
+ fExtensions.has("GL_ARB_blend_func_extended"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0) && fExtensions.has("GL_EXT_blend_func_extended"))))) {
+ if (!fFunctions.fBindFragDataLocationIndexed) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_KHR_blend_equation_advanced") ||
+ fExtensions.has("GL_NV_blend_equation_advanced"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_KHR_blend_equation_advanced") ||
+ fExtensions.has("GL_NV_blend_equation_advanced")))) {
+ if (!fFunctions.fBlendBarrier) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,4)) ||
+ fExtensions.has("GL_ARB_clear_texture"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_clear_texture")))) {
+ // all functions were marked optional or test_only
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,1)) ||
+ fExtensions.has("GL_ARB_draw_instanced") ||
+ fExtensions.has("GL_EXT_draw_instanced"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_EXT_draw_instanced"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fDrawArraysInstanced ||
+ !fFunctions.fDrawElementsInstanced) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fDrawBuffers ||
+ !fFunctions.fReadBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,0)) ||
+ fExtensions.has("GL_ARB_draw_indirect"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,1))))) {
+ if (!fFunctions.fDrawArraysIndirect ||
+ !fFunctions.fDrawElementsIndirect) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fDrawRangeElements) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_ARB_texture_multisample"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,1))))) {
+ if (!fFunctions.fGetMultisamplefv) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,1))))) {
+ if (!fFunctions.fGetTexLevelParameteriv) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_multi_draw_indirect"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_multi_draw_indirect")))) {
+ if (!fFunctions.fMultiDrawArraysIndirect ||
+ !fFunctions.fMultiDrawElementsIndirect) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,1)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_OES_texture_buffer") ||
+ fExtensions.has("GL_EXT_texture_buffer")))) {
+ if (!fFunctions.fTexBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_OES_texture_buffer") ||
+ fExtensions.has("GL_EXT_texture_buffer")))) {
+ if (!fFunctions.fTexBufferRange) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,2)) ||
+ fExtensions.has("GL_ARB_texture_storage") ||
+ fExtensions.has("GL_EXT_texture_storage"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_EXT_texture_storage"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fTexStorage2D) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,5)) ||
+ fExtensions.has("GL_ARB_texture_barrier") ||
+ fExtensions.has("GL_NV_texture_barrier"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_NV_texture_barrier")))) {
+ if (!fFunctions.fTextureBarrier) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_discard_framebuffer")))) {
+ if (!fFunctions.fDiscardFramebuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_QCOM_tiled_rendering")))) {
+ if (!fFunctions.fEndTiling ||
+ !fFunctions.fStartTiling) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_ARB_instanced_arrays"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_EXT_instanced_arrays"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fVertexAttribDivisor) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fVertexAttribIPointer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_ARB_framebuffer_object") ||
+ fExtensions.has("GL_EXT_framebuffer_object"))) ||
+ GR_IS_GR_GL_ES(fStandard) ||
+ GR_IS_GR_WEBGL(fStandard)) {
+ if (!fFunctions.fBindFramebuffer ||
+ !fFunctions.fBindRenderbuffer ||
+ !fFunctions.fCheckFramebufferStatus ||
+ !fFunctions.fDeleteFramebuffers ||
+ !fFunctions.fDeleteRenderbuffers ||
+ !fFunctions.fFramebufferRenderbuffer ||
+ !fFunctions.fFramebufferTexture2D ||
+ !fFunctions.fGenFramebuffers ||
+ !fFunctions.fGenRenderbuffers ||
+ !fFunctions.fGenerateMipmap ||
+ !fFunctions.fGetFramebufferAttachmentParameteriv ||
+ !fFunctions.fGetRenderbufferParameteriv ||
+ !fFunctions.fRenderbufferStorage) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_ARB_framebuffer_object") ||
+ fExtensions.has("GL_EXT_framebuffer_blit"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_CHROMIUM_framebuffer_multisample") ||
+ fExtensions.has("GL_ANGLE_framebuffer_blit")))) {
+ if (!fFunctions.fBlitFramebuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_ARB_framebuffer_object") ||
+ fExtensions.has("GL_EXT_framebuffer_multisample"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_CHROMIUM_framebuffer_multisample") ||
+ fExtensions.has("GL_ANGLE_framebuffer_multisample"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fRenderbufferStorageMultisample) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_map_sub")))) {
+ if (!fFunctions.fMapBufferSubData ||
+ !fFunctions.fMapTexSubImage2D ||
+ !fFunctions.fUnmapBufferSubData ||
+ !fFunctions.fUnmapTexSubImage2D) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_multisampled_render_to_texture") ||
+ fExtensions.has("GL_IMG_multisampled_render_to_texture")))) {
+ if (!fFunctions.fFramebufferTexture2DMultisample) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_multisampled_render_to_texture")))) {
+ if (!fFunctions.fRenderbufferStorageMultisampleES2EXT) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_IMG_multisampled_render_to_texture")))) {
+ if (!fFunctions.fRenderbufferStorageMultisampleES2EXT) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_APPLE_framebuffer_multisample")))) {
+ if (!fFunctions.fResolveMultisampleFramebuffer ||
+ !fFunctions.fRenderbufferStorageMultisampleES2APPLE) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_OES_mapbuffer")))) {
+ if (!fFunctions.fMapBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_OES_mapbuffer")))) {
+ if (!fFunctions.fUnmapBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_ARB_map_buffer_range"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_EXT_map_buffer_range")))) {
+ if (!fFunctions.fFlushMappedBufferRange ||
+ !fFunctions.fMapBufferRange) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_EXT_debug_marker"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_debug_marker")))) {
+ if (!fFunctions.fInsertEventMarker ||
+ !fFunctions.fPopGroupMarker ||
+ !fFunctions.fPushGroupMarker) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_program_interface_query"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,1))))) {
+ if (!fFunctions.fGetProgramResourceLocation) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_NV_path_rendering"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_path_rendering") ||
+ fExtensions.has("GL_NV_path_rendering")))) {
+ if (!fFunctions.fMatrixLoadIdentity ||
+ !fFunctions.fMatrixLoadf) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_NV_path_rendering"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_path_rendering") ||
+ fExtensions.has("GL_NV_path_rendering")))) {
+ if (!fFunctions.fCoverFillPath ||
+ !fFunctions.fCoverFillPathInstanced ||
+ !fFunctions.fCoverStrokePath ||
+ !fFunctions.fCoverStrokePathInstanced ||
+ !fFunctions.fDeletePaths ||
+ !fFunctions.fGenPaths ||
+ !fFunctions.fIsPath ||
+ !fFunctions.fPathCommands ||
+ !fFunctions.fPathParameterf ||
+ !fFunctions.fPathParameteri ||
+ !fFunctions.fPathStencilFunc ||
+ !fFunctions.fStencilFillPath ||
+ !fFunctions.fStencilFillPathInstanced ||
+ !fFunctions.fStencilStrokePath ||
+ !fFunctions.fStencilStrokePathInstanced) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_path_rendering")))) {
+ if (!fFunctions.fBindFragmentInputLocation) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_NV_framebuffer_mixed_samples"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_framebuffer_mixed_samples") ||
+ fExtensions.has("GL_NV_framebuffer_mixed_samples")))) {
+ if (!fFunctions.fCoverageModulation) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_KHR_debug"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_KHR_debug")))) {
+ if (!fFunctions.fDebugMessageCallback ||
+ !fFunctions.fDebugMessageControl ||
+ !fFunctions.fDebugMessageInsert ||
+ !fFunctions.fGetDebugMessageLog ||
+ !fFunctions.fObjectLabel ||
+ !fFunctions.fPopDebugGroup ||
+ !fFunctions.fPushDebugGroup) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_CHROMIUM_bind_uniform_location")))) {
+ if (!fFunctions.fBindUniformLocation) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ fExtensions.has("GL_EXT_window_rectangles"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ fExtensions.has("GL_EXT_window_rectangles")))) {
+ if (!fFunctions.fWindowRectangles) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_ARB_sync"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_APPLE_sync"))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fClientWaitSync ||
+ !fFunctions.fDeleteSync ||
+ !fFunctions.fFenceSync ||
+ !fFunctions.fIsSync ||
+ !fFunctions.fWaitSync) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,2)) ||
+ fExtensions.has("GL_ARB_internalformat_query"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0))))) {
+ if (!fFunctions.fGetInternalformativ) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,1)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_OES_get_program_binary")))) {
+ if (!fFunctions.fGetProgramBinary ||
+ !fFunctions.fProgramBinary) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,1)))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0))))) {
+ if (!fFunctions.fProgramParameteri) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,2)) ||
+ fExtensions.has("GL_ARB_sampler_objects"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fBindSampler ||
+ !fFunctions.fDeleteSamplers ||
+ !fFunctions.fGenSamplers ||
+ !fFunctions.fSamplerParameteri ||
+ !fFunctions.fSamplerParameteriv) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard)) {
+ if (!fFunctions.fGetQueryObjectiv) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (GR_IS_GR_GL(fStandard) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)) ||
+ fExtensions.has("GL_EXT_occlusion_query_boolean")))) {
+#if GR_TEST_UTILS
+ if (!fFunctions.fBeginQuery ||
+ !fFunctions.fDeleteQueries ||
+ !fFunctions.fEndQuery ||
+ !fFunctions.fGenQueries ||
+ !fFunctions.fGetQueryObjectuiv ||
+ !fFunctions.fGetQueryiv) {
+ RETURN_FALSE_INTERFACE;
+ }
+#endif
+ // all functions were marked optional or test_only
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,3)) ||
+ fExtensions.has("GL_ARB_timer_query") ||
+ fExtensions.has("GL_EXT_timer_query")))) {
+ if (!fFunctions.fGetQueryObjecti64v ||
+ !fFunctions.fGetQueryObjectui64v) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(3,3)) ||
+ fExtensions.has("GL_ARB_timer_query")))) {
+ if (!fFunctions.fQueryCounter) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_invalidate_subdata")))) {
+ if (!fFunctions.fInvalidateBufferData ||
+ !fFunctions.fInvalidateBufferSubData ||
+ !fFunctions.fInvalidateTexImage ||
+ !fFunctions.fInvalidateTexSubImage) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_invalidate_subdata"))) ||
+ (GR_IS_GR_GL_ES(fStandard) && (
+ (glVer >= GR_GL_VER(3,0)))) ||
+ (GR_IS_GR_WEBGL(fStandard) && (
+ (glVer >= GR_GL_VER(2,0))))) {
+ if (!fFunctions.fInvalidateFramebuffer ||
+ !fFunctions.fInvalidateSubFramebuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((GR_IS_GR_GL(fStandard) && (
+ (glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_ES2_compatibility"))) ||
+ GR_IS_GR_GL_ES(fStandard) ||
+ GR_IS_GR_WEBGL(fStandard)) {
+ if (!fFunctions.fGetShaderPrecisionFormat) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+
+ // End autogenerated content
+ return true;
+}
+
+#if GR_TEST_UTILS
+
+void GrGLInterface::abandon() const {
+ const_cast<GrGLInterface*>(this)->fFunctions = GrGLInterface::Functions();
+}
+
+#endif // GR_TEST_UTILS
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLMakeNativeInterface_none.cpp b/gfx/skia/skia/src/gpu/gl/GrGLMakeNativeInterface_none.cpp
new file mode 100644
index 0000000000..d4ac8bcfe4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLMakeNativeInterface_none.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLInterface.h"
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() { return nullptr; }
+
+const GrGLInterface* GrGLCreateNativeInterface() { return nullptr; }
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.cpp b/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.cpp
new file mode 100644
index 0000000000..217fdb0a28
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLOpsRenderPass.h"
+
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+
+void GrGLOpsRenderPass::set(GrRenderTarget* rt, const SkIRect& contentBounds,
+ GrSurfaceOrigin origin, const LoadAndStoreInfo& colorInfo,
+ const StencilLoadAndStoreInfo& stencilInfo) {
+ SkASSERT(fGpu);
+ SkASSERT(!fRenderTarget);
+ SkASSERT(fGpu == rt->getContext()->priv().getGpu());
+
+ this->INHERITED::set(rt, origin);
+ fContentBounds = contentBounds;
+ fColorLoadAndStoreInfo = colorInfo;
+ fStencilLoadAndStoreInfo = stencilInfo;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.h b/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.h
new file mode 100644
index 0000000000..42702008d3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLOpsRenderPass.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLOpsRenderPass_DEFINED
+#define GrGLOpsRenderPass_DEFINED
+
+#include "src/gpu/GrOpsRenderPass.h"
+
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLRenderTarget.h"
+
+class GrGLGpu;
+class GrGLRenderTarget;
+
+class GrGLOpsRenderPass : public GrOpsRenderPass {
+/**
+ * We do not actually buffer up draws or do any work in the this class for GL. Instead commands
+ * are immediately sent to the gpu to execute. Thus all the commands in this class are simply
+ * pass through functions to corresponding calls in the GrGLGpu class.
+ */
+public:
+ GrGLOpsRenderPass(GrGLGpu* gpu) : fGpu(gpu) {}
+
+ void begin() override {
+ fGpu->beginCommandBuffer(fRenderTarget, fContentBounds, fOrigin, fColorLoadAndStoreInfo,
+ fStencilLoadAndStoreInfo);
+ }
+
+ void end() override {
+ fGpu->endCommandBuffer(fRenderTarget, fColorLoadAndStoreInfo, fStencilLoadAndStoreInfo);
+ }
+
+ void insertEventMarker(const char* msg) override {
+ fGpu->insertEventMarker(msg);
+ }
+
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {
+ state->doUpload(upload);
+ }
+
+ void set(GrRenderTarget*, const SkIRect& contentBounds, GrSurfaceOrigin,
+ const LoadAndStoreInfo&, const StencilLoadAndStoreInfo&);
+
+ void reset() {
+ fRenderTarget = nullptr;
+ }
+
+private:
+ GrGpu* gpu() override { return fGpu; }
+
+ void onDraw(const GrProgramInfo& programInfo, const GrMesh mesh[], int meshCount,
+ const SkRect& bounds) override {
+ fGpu->draw(fRenderTarget, programInfo, mesh, meshCount);
+ }
+
+ void onClear(const GrFixedClip& clip, const SkPMColor4f& color) override {
+ fGpu->clear(clip, color, fRenderTarget, fOrigin);
+ }
+
+ void onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) override {
+ fGpu->clearStencilClip(clip, insideStencilMask, fRenderTarget, fOrigin);
+ }
+
+ GrGLGpu* fGpu;
+ SkIRect fContentBounds;
+ LoadAndStoreInfo fColorLoadAndStoreInfo;
+ StencilLoadAndStoreInfo fStencilLoadAndStoreInfo;
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp b/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp
new file mode 100644
index 0000000000..e001bf93a1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLPath.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+
+namespace {
+inline GrGLubyte verb_to_gl_path_cmd(SkPath::Verb verb) {
+ static const GrGLubyte gTable[] = {
+ GR_GL_MOVE_TO,
+ GR_GL_LINE_TO,
+ GR_GL_QUADRATIC_CURVE_TO,
+ GR_GL_CONIC_CURVE_TO,
+ GR_GL_CUBIC_CURVE_TO,
+ GR_GL_CLOSE_PATH,
+ };
+ GR_STATIC_ASSERT(0 == SkPath::kMove_Verb);
+ GR_STATIC_ASSERT(1 == SkPath::kLine_Verb);
+ GR_STATIC_ASSERT(2 == SkPath::kQuad_Verb);
+ GR_STATIC_ASSERT(3 == SkPath::kConic_Verb);
+ GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
+ GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
+
+ SkASSERT(verb >= 0 && (size_t)verb < SK_ARRAY_COUNT(gTable));
+ return gTable[verb];
+}
+
+#ifdef SK_DEBUG
+inline int num_coords(SkPath::Verb verb) {
+ static const int gTable[] = {
+ 2, // move
+ 2, // line
+ 4, // quad
+ 5, // conic
+ 6, // cubic
+ 0, // close
+ };
+ GR_STATIC_ASSERT(0 == SkPath::kMove_Verb);
+ GR_STATIC_ASSERT(1 == SkPath::kLine_Verb);
+ GR_STATIC_ASSERT(2 == SkPath::kQuad_Verb);
+ GR_STATIC_ASSERT(3 == SkPath::kConic_Verb);
+ GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
+ GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
+
+ SkASSERT(verb >= 0 && (size_t)verb < SK_ARRAY_COUNT(gTable));
+ return gTable[verb];
+}
+#endif
+
+inline GrGLenum join_to_gl_join(SkPaint::Join join) {
+ static GrGLenum gSkJoinsToGrGLJoins[] = {
+ GR_GL_MITER_REVERT,
+ GR_GL_ROUND,
+ GR_GL_BEVEL
+ };
+ return gSkJoinsToGrGLJoins[join];
+ GR_STATIC_ASSERT(0 == SkPaint::kMiter_Join);
+ GR_STATIC_ASSERT(1 == SkPaint::kRound_Join);
+ GR_STATIC_ASSERT(2 == SkPaint::kBevel_Join);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gSkJoinsToGrGLJoins) == SkPaint::kJoinCount);
+}
+
+inline GrGLenum cap_to_gl_cap(SkPaint::Cap cap) {
+ static GrGLenum gSkCapsToGrGLCaps[] = {
+ GR_GL_FLAT,
+ GR_GL_ROUND,
+ GR_GL_SQUARE
+ };
+ return gSkCapsToGrGLCaps[cap];
+ GR_STATIC_ASSERT(0 == SkPaint::kButt_Cap);
+ GR_STATIC_ASSERT(1 == SkPaint::kRound_Cap);
+ GR_STATIC_ASSERT(2 == SkPaint::kSquare_Cap);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gSkCapsToGrGLCaps) == SkPaint::kCapCount);
+}
+
+#ifdef SK_DEBUG
+inline void verify_floats(const float* floats, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(!SkScalarIsNaN(SkFloatToScalar(floats[i])));
+ }
+}
+#endif
+
+inline void points_to_coords(const SkPoint points[], size_t first_point, size_t amount,
+ GrGLfloat coords[]) {
+ for (size_t i = 0; i < amount; ++i) {
+ coords[i * 2] = SkScalarToFloat(points[first_point + i].fX);
+ coords[i * 2 + 1] = SkScalarToFloat(points[first_point + i].fY);
+ }
+}
+
+template<bool checkForDegenerates>
+inline bool init_path_object_for_general_path(GrGLGpu* gpu, GrGLuint pathID,
+ const SkPath& skPath) {
+ SkDEBUGCODE(int numCoords = 0);
+ int verbCnt = skPath.countVerbs();
+ int pointCnt = skPath.countPoints();
+ int minCoordCnt = pointCnt * 2;
+
+ SkSTArray<16, GrGLubyte, true> pathCommands(verbCnt);
+ SkSTArray<16, GrGLfloat, true> pathCoords(minCoordCnt);
+ bool lastVerbWasMove = true; // A path with just "close;" means "moveto(0,0); close;"
+ SkPoint points[4];
+ SkPath::RawIter iter(skPath);
+ SkPath::Verb verb;
+ while ((verb = iter.next(points)) != SkPath::kDone_Verb) {
+ pathCommands.push_back(verb_to_gl_path_cmd(verb));
+ GrGLfloat coords[6];
+ int coordsForVerb;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (checkForDegenerates) {
+ lastVerbWasMove = true;
+ }
+ points_to_coords(points, 0, 1, coords);
+ coordsForVerb = 2;
+ break;
+ case SkPath::kLine_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsLineDegenerate(points[0], points[1], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+
+ points_to_coords(points, 1, 1, coords);
+ coordsForVerb = 2;
+ break;
+ case SkPath::kConic_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsQuadDegenerate(points[0], points[1], points[2], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 2, coords);
+ coords[4] = SkScalarToFloat(iter.conicWeight());
+ coordsForVerb = 5;
+ break;
+ case SkPath::kQuad_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsQuadDegenerate(points[0], points[1], points[2], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 2, coords);
+ coordsForVerb = 4;
+ break;
+ case SkPath::kCubic_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsCubicDegenerate(points[0], points[1], points[2], points[3],
+ true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 3, coords);
+ coordsForVerb = 6;
+ break;
+ case SkPath::kClose_Verb:
+ if (checkForDegenerates) {
+ if (lastVerbWasMove) {
+ // Interpret "move(x,y);close;" as "move(x,y);lineto(x,y);close;".
+ // which produces a degenerate segment.
+ return false;
+ }
+ }
+ continue;
+ default:
+ SkASSERT(false); // Not reached.
+ continue;
+ }
+ SkDEBUGCODE(numCoords += num_coords(verb));
+ SkDEBUGCODE(verify_floats(coords, coordsForVerb));
+ pathCoords.push_back_n(coordsForVerb, coords);
+ }
+ SkASSERT(verbCnt == pathCommands.count());
+ SkASSERT(numCoords == pathCoords.count());
+
+ GR_GL_CALL(gpu->glInterface(),
+ PathCommands(pathID, pathCommands.count(), pathCommands.begin(),
+ pathCoords.count(), GR_GL_FLOAT, pathCoords.begin()));
+ return true;
+}
+
+/*
+ * For now paths only natively support winding and even odd fill types
+ */
+static GrPathRendering::FillType convert_skpath_filltype(SkPath::FillType fill) {
+ switch (fill) {
+ default:
+ SK_ABORT("Incomplete Switch\n");
+ case SkPath::kWinding_FillType:
+ case SkPath::kInverseWinding_FillType:
+ return GrPathRendering::kWinding_FillType;
+ case SkPath::kEvenOdd_FillType:
+ case SkPath::kInverseEvenOdd_FillType:
+ return GrPathRendering::kEvenOdd_FillType;
+ }
+}
+
+} // namespace
+
+bool GrGLPath::InitPathObjectPathDataCheckingDegenerates(GrGLGpu* gpu, GrGLuint pathID,
+ const SkPath& skPath) {
+ return init_path_object_for_general_path<true>(gpu, pathID, skPath);
+}
+
+void GrGLPath::InitPathObjectPathData(GrGLGpu* gpu,
+ GrGLuint pathID,
+ const SkPath& skPath) {
+ SkASSERT(!skPath.isEmpty());
+
+#if 1 // SK_SCALAR_IS_FLOAT
+ // This branch does type punning, converting SkPoint* to GrGLfloat*.
+ if ((skPath.getSegmentMasks() & SkPath::kConic_SegmentMask) == 0) {
+ int verbCnt = skPath.countVerbs();
+ int pointCnt = skPath.countPoints();
+ int coordCnt = pointCnt * 2;
+ SkSTArray<16, GrGLubyte, true> pathCommands(verbCnt);
+ SkSTArray<16, GrGLfloat, true> pathCoords(coordCnt);
+
+ static_assert(sizeof(SkPoint) == sizeof(GrGLfloat) * 2, "sk_point_not_two_floats");
+
+ pathCommands.resize_back(verbCnt);
+ pathCoords.resize_back(coordCnt);
+ skPath.getPoints(reinterpret_cast<SkPoint*>(&pathCoords[0]), pointCnt);
+ skPath.getVerbs(&pathCommands[0], verbCnt);
+
+ SkDEBUGCODE(int verbCoordCnt = 0);
+ for (int i = 0; i < verbCnt; ++i) {
+ SkPath::Verb v = static_cast<SkPath::Verb>(pathCommands[i]);
+ pathCommands[i] = verb_to_gl_path_cmd(v);
+ SkDEBUGCODE(verbCoordCnt += num_coords(v));
+ }
+ SkASSERT(verbCnt == pathCommands.count());
+ SkASSERT(verbCoordCnt == pathCoords.count());
+ SkDEBUGCODE(verify_floats(&pathCoords[0], pathCoords.count()));
+ GR_GL_CALL(gpu->glInterface(), PathCommands(pathID, pathCommands.count(), &pathCommands[0],
+ pathCoords.count(), GR_GL_FLOAT,
+ &pathCoords[0]));
+ return;
+ }
+#endif
+ SkAssertResult(init_path_object_for_general_path<false>(gpu, pathID, skPath));
+}
+
+void GrGLPath::InitPathObjectStroke(GrGLGpu* gpu, GrGLuint pathID, const SkStrokeRec& stroke) {
+ SkASSERT(!stroke.isHairlineStyle());
+ GR_GL_CALL(gpu->glInterface(),
+ PathParameterf(pathID, GR_GL_PATH_STROKE_WIDTH, SkScalarToFloat(stroke.getWidth())));
+ GR_GL_CALL(gpu->glInterface(),
+ PathParameterf(pathID, GR_GL_PATH_MITER_LIMIT, SkScalarToFloat(stroke.getMiter())));
+ GrGLenum join = join_to_gl_join(stroke.getJoin());
+ GR_GL_CALL(gpu->glInterface(), PathParameteri(pathID, GR_GL_PATH_JOIN_STYLE, join));
+ GrGLenum cap = cap_to_gl_cap(stroke.getCap());
+ GR_GL_CALL(gpu->glInterface(), PathParameteri(pathID, GR_GL_PATH_END_CAPS, cap));
+ GR_GL_CALL(gpu->glInterface(), PathParameterf(pathID, GR_GL_PATH_STROKE_BOUND, 0.02f));
+}
+
+void GrGLPath::InitPathObjectEmptyPath(GrGLGpu* gpu, GrGLuint pathID) {
+ GR_GL_CALL(gpu->glInterface(), PathCommands(pathID, 0, nullptr, 0, GR_GL_FLOAT, nullptr));
+}
+
+GrGLPath::GrGLPath(GrGLGpu* gpu, const SkPath& origSkPath, const GrStyle& style)
+ : INHERITED(gpu, origSkPath, style),
+ fPathID(gpu->glPathRendering()->genPaths(1)) {
+
+ if (origSkPath.isEmpty()) {
+ InitPathObjectEmptyPath(gpu, fPathID);
+ fShouldStroke = false;
+ fShouldFill = false;
+ } else {
+ const SkPath* skPath = &origSkPath;
+ SkTLazy<SkPath> tmpPath;
+ SkStrokeRec stroke(SkStrokeRec::kFill_InitStyle);
+
+ if (style.pathEffect()) {
+ // Skia stroking and NVPR stroking differ with respect to dashing
+ // pattern.
+ // Convert a dashing (or other path effect) to either a stroke or a fill.
+ if (style.applyPathEffectToPath(tmpPath.init(), &stroke, *skPath, SK_Scalar1)) {
+ skPath = tmpPath.get();
+ }
+ } else {
+ stroke = style.strokeRec();
+ }
+
+ bool didInit = false;
+ if (stroke.needToApply() && stroke.getCap() != SkPaint::kButt_Cap) {
+ // Skia stroking and NVPR stroking differ with respect to stroking
+ // end caps of empty subpaths.
+ // Convert stroke to fill if path contains empty subpaths.
+ didInit = InitPathObjectPathDataCheckingDegenerates(gpu, fPathID, *skPath);
+ if (!didInit) {
+ if (!tmpPath.isValid()) {
+ tmpPath.init();
+ }
+ SkAssertResult(stroke.applyToPath(tmpPath.get(), *skPath));
+ skPath = tmpPath.get();
+ stroke.setFillStyle();
+ }
+ }
+
+ if (!didInit) {
+ InitPathObjectPathData(gpu, fPathID, *skPath);
+ }
+
+ fShouldStroke = stroke.needToApply();
+ fShouldFill = stroke.isFillStyle() ||
+ stroke.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
+
+ fFillType = convert_skpath_filltype(skPath->getFillType());
+ fBounds = skPath->getBounds();
+ SkScalar radius = stroke.getInflationRadius();
+ fBounds.outset(radius, radius);
+ if (fShouldStroke) {
+ InitPathObjectStroke(gpu, fPathID, stroke);
+ }
+ }
+
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+void GrGLPath::onRelease() {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ if (0 != fPathID) {
+ static_cast<GrGLGpu*>(this->getGpu())->glPathRendering()->deletePaths(fPathID, 1);
+ fPathID = 0;
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLPath::onAbandon() {
+ fPathID = 0;
+
+ INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPath.h b/gfx/skia/skia/src/gpu/gl/GrGLPath.h
new file mode 100644
index 0000000000..905012057c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPath.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLPath_DEFINED
+#define GrGLPath_DEFINED
+
+#include "include/gpu/gl/GrGLTypes.h"
+#include "src/gpu/GrPath.h"
+
+class GrGLGpu;
+class GrStyle;
+
+/**
+ * Currently this represents a path built using GL_NV_path_rendering. If we
+ * support other GL path extensions then this would have to have a type enum
+ * and/or be subclassed.
+ */
+
+class GrGLPath : public GrPath {
+public:
+ static bool InitPathObjectPathDataCheckingDegenerates(GrGLGpu*,
+ GrGLuint pathID,
+ const SkPath&);
+ static void InitPathObjectPathData(GrGLGpu*,
+ GrGLuint pathID,
+ const SkPath&);
+ static void InitPathObjectStroke(GrGLGpu*, GrGLuint pathID, const SkStrokeRec&);
+
+ static void InitPathObjectEmptyPath(GrGLGpu*, GrGLuint pathID);
+
+
+ GrGLPath(GrGLGpu*, const SkPath&, const GrStyle&);
+ GrGLuint pathID() const { return fPathID; }
+
+ bool shouldStroke() const { return fShouldStroke; }
+ bool shouldFill() const { return fShouldFill; }
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ // TODO: Figure out how to get an approximate size of the path in Gpu memory.
+ size_t onGpuMemorySize() const override { return 100; }
+
+ GrGLuint fPathID;
+ bool fShouldStroke;
+ bool fShouldFill;
+
+ typedef GrPath INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp
new file mode 100644
index 0000000000..abcae33067
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/gl/GrGLPath.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->gpu()->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->gpu()->glInterface(), RET, X)
+
+// Number of paths to allocate per glGenPaths call. The call can be overly slow on command buffer GL
+// implementation. The call has a result value, and thus waiting for the call completion is needed.
+static const GrGLsizei kPathIDPreallocationAmount = 65536;
+
+GR_STATIC_ASSERT(0 == GrPathRendering::kNone_PathTransformType);
+GR_STATIC_ASSERT(1 == GrPathRendering::kTranslateX_PathTransformType);
+GR_STATIC_ASSERT(2 == GrPathRendering::kTranslateY_PathTransformType);
+GR_STATIC_ASSERT(3 == GrPathRendering::kTranslate_PathTransformType);
+GR_STATIC_ASSERT(4 == GrPathRendering::kAffine_PathTransformType);
+GR_STATIC_ASSERT(GrPathRendering::kAffine_PathTransformType == GrPathRendering::kLast_PathTransformType);
+
+#ifdef SK_DEBUG
+
+static void verify_floats(const float* floats, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(!SkScalarIsNaN(SkFloatToScalar(floats[i])));
+ }
+}
+#endif
+
+static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
+ switch (op) {
+ default:
+ SK_ABORT("Unexpected path fill.");
+ /* fallthrough */
+ case GrStencilOp::kIncWrap:
+ return GR_GL_COUNT_UP;
+ case GrStencilOp::kInvert:
+ return GR_GL_INVERT;
+ }
+}
+
+GrGLPathRendering::GrGLPathRendering(GrGLGpu* gpu)
+ : GrPathRendering(gpu)
+ , fPreallocatedPathCount(0) {
+ const GrGLInterface* glInterface = gpu->glInterface();
+ fCaps.bindFragmentInputSupport = (bool)glInterface->fFunctions.fBindFragmentInputLocation;
+}
+
+GrGLPathRendering::~GrGLPathRendering() {
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ }
+}
+
+void GrGLPathRendering::disconnect(GrGpu::DisconnectType type) {
+ if (GrGpu::DisconnectType::kCleanup == type) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ }
+ fPreallocatedPathCount = 0;
+}
+
+void GrGLPathRendering::resetContext() {
+ fHWProjectionMatrixState.invalidate();
+ // we don't use the model view matrix.
+ GL_CALL(MatrixLoadIdentity(GR_GL_PATH_MODELVIEW));
+
+ fHWPathStencilSettings.invalidate();
+}
+
+sk_sp<GrPath> GrGLPathRendering::createPath(const SkPath& inPath, const GrStyle& style) {
+ return sk_make_sp<GrGLPath>(this->gpu(), inPath, style);
+}
+
+void GrGLPathRendering::onStencilPath(const StencilPathArgs& args, const GrPath* path) {
+ GrGLGpu* gpu = this->gpu();
+ SkASSERT(gpu->caps()->shaderCaps()->pathRenderingSupport());
+ gpu->flushColorWrite(false);
+
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(args.fProxy->peekRenderTarget());
+ SkISize size = SkISize::Make(rt->width(), rt->height());
+ this->setProjectionMatrix(*args.fViewMatrix, size, args.fProxy->origin());
+ gpu->flushScissor(*args.fScissor, rt->width(), rt->height(), args.fProxy->origin());
+ gpu->flushHWAAState(rt, args.fUseHWAA);
+ gpu->flushRenderTarget(rt);
+
+ const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
+
+ this->flushPathStencilSettings(*args.fStencil);
+
+ GrGLenum fillMode =
+ gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.frontAndBack().fPassOp);
+ GrGLint writeMask = fHWPathStencilSettings.frontAndBack().fWriteMask;
+
+ if (glPath->shouldFill()) {
+ GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
+ }
+ if (glPath->shouldStroke()) {
+ GL_CALL(StencilStrokePath(glPath->pathID(), 0xffff, writeMask));
+ }
+}
+
+void GrGLPathRendering::onDrawPath(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ const GrStencilSettings& stencilPassSettings,
+ const GrPath* path) {
+ if (!this->gpu()->flushGLState(renderTarget, programInfo, GrPrimitiveType::kPath)) {
+ return;
+ }
+
+ const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
+
+ this->flushPathStencilSettings(stencilPassSettings);
+
+ GrGLenum fillMode =
+ gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.frontAndBack().fPassOp);
+ GrGLint writeMask = fHWPathStencilSettings.frontAndBack().fWriteMask;
+
+ if (glPath->shouldStroke()) {
+ if (glPath->shouldFill()) {
+ GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
+ }
+ GL_CALL(StencilThenCoverStrokePath(glPath->pathID(), 0xffff, writeMask,
+ GR_GL_BOUNDING_BOX));
+ } else {
+ GL_CALL(StencilThenCoverFillPath(glPath->pathID(), fillMode, writeMask,
+ GR_GL_BOUNDING_BOX));
+ }
+}
+
+void GrGLPathRendering::setProgramPathFragmentInputTransform(GrGLuint program, GrGLint location,
+ GrGLenum genMode, GrGLint components,
+ const SkMatrix& matrix) {
+ float coefficients[3 * 3];
+ SkASSERT(components >= 1 && components <= 3);
+
+ coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
+ coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
+ coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
+
+ if (components >= 2) {
+ coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
+ coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
+ coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
+ }
+
+ if (components >= 3) {
+ coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
+ coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
+ coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
+ }
+ SkDEBUGCODE(verify_floats(coefficients, components * 3));
+
+ GL_CALL(ProgramPathFragmentInputGen(program, location, genMode, components, coefficients));
+}
+
+void GrGLPathRendering::setProjectionMatrix(const SkMatrix& matrix,
+ const SkISize& renderTargetSize,
+ GrSurfaceOrigin renderTargetOrigin) {
+
+ SkASSERT(this->gpu()->glCaps().shaderCaps()->pathRenderingSupport());
+
+ if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
+ renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
+ matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
+ return;
+ }
+
+ fHWProjectionMatrixState.fViewMatrix = matrix;
+ fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
+ fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
+
+ float glMatrix[4 * 4];
+ fHWProjectionMatrixState.getRTAdjustedGLMatrix<4>(glMatrix);
+ SkDEBUGCODE(verify_floats(glMatrix, SK_ARRAY_COUNT(glMatrix)));
+ GL_CALL(MatrixLoadf(GR_GL_PATH_PROJECTION, glMatrix));
+}
+
+GrGLuint GrGLPathRendering::genPaths(GrGLsizei range) {
+ SkASSERT(range > 0);
+ GrGLuint firstID;
+ if (fPreallocatedPathCount >= range) {
+ firstID = fFirstPreallocatedPathID;
+ fPreallocatedPathCount -= range;
+ fFirstPreallocatedPathID += range;
+ return firstID;
+ }
+ // Allocate range + the amount to fill up preallocation amount. If succeed, either join with
+ // the existing preallocation range or delete the existing and use the new (potentially partial)
+ // preallocation range.
+ GrGLsizei allocAmount = range + (kPathIDPreallocationAmount - fPreallocatedPathCount);
+ if (allocAmount >= range) {
+ GL_CALL_RET(firstID, GenPaths(allocAmount));
+
+ if (firstID != 0) {
+ if (fPreallocatedPathCount > 0 &&
+ firstID == fFirstPreallocatedPathID + fPreallocatedPathCount) {
+ firstID = fFirstPreallocatedPathID;
+ fPreallocatedPathCount += allocAmount - range;
+ fFirstPreallocatedPathID += range;
+ return firstID;
+ }
+
+ if (allocAmount > range) {
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ }
+ fFirstPreallocatedPathID = firstID + range;
+ fPreallocatedPathCount = allocAmount - range;
+ }
+ // Special case: if allocAmount == range, we have full preallocated range.
+ return firstID;
+ }
+ }
+ // Failed to allocate with preallocation. Remove existing preallocation and try to allocate just
+ // the range.
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ fPreallocatedPathCount = 0;
+ }
+
+ GL_CALL_RET(firstID, GenPaths(range));
+ if (firstID == 0) {
+ SkDebugf("Warning: Failed to allocate path\n");
+ }
+ return firstID;
+}
+
+void GrGLPathRendering::deletePaths(GrGLuint path, GrGLsizei range) {
+ GL_CALL(DeletePaths(path, range));
+}
+
+void GrGLPathRendering::flushPathStencilSettings(const GrStencilSettings& stencilSettings) {
+ SkASSERT(!stencilSettings.isTwoSided());
+ if (fHWPathStencilSettings != stencilSettings) {
+ SkASSERT(stencilSettings.isValid());
+ // Just the func, ref, and mask is set here. The op and write mask are params to the call
+ // that draws the path to the SB (glStencilFillPath)
+ uint16_t ref = stencilSettings.frontAndBack().fRef;
+ GrStencilTest test = stencilSettings.frontAndBack().fTest;
+ uint16_t testMask = stencilSettings.frontAndBack().fTestMask;
+
+ if (!fHWPathStencilSettings.isValid() ||
+ ref != fHWPathStencilSettings.frontAndBack().fRef ||
+ test != fHWPathStencilSettings.frontAndBack().fTest ||
+ testMask != fHWPathStencilSettings.frontAndBack().fTestMask) {
+ GL_CALL(PathStencilFunc(GrToGLStencilFunc(test), ref, testMask));
+ }
+ fHWPathStencilSettings = stencilSettings;
+ }
+}
+
+inline GrGLGpu* GrGLPathRendering::gpu() {
+ return static_cast<GrGLGpu*>(fGpu);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h
new file mode 100644
index 0000000000..14284b4888
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLPathRendering_DEFINED
+#define GrGLPathRendering_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLTypes.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+
+class GrGLNameAllocator;
+class GrGLGpu;
+class GrStyle;
+
+/**
+ * This class wraps the NV_path_rendering extension and manages its various
+ * API versions. If a method is not present in the GrGLInterface of the GrGLGpu
+ * (because the driver version is old), it tries to provide a backup
+ * implementation. But if a backup implementation is not practical, it marks the
+ * method as not supported.
+ */
+class GrGLPathRendering : public GrPathRendering {
+public:
+ /**
+ * Create a new GrGLPathRendering object from a given GrGLGpu.
+ */
+ GrGLPathRendering(GrGLGpu* gpu);
+ ~GrGLPathRendering() override;
+
+ // GrPathRendering implementations.
+ sk_sp<GrPath> createPath(const SkPath&, const GrStyle&) override;
+
+ /* Called when the 3D context state is unknown. */
+ void resetContext();
+
+ /**
+ * Called when the context either is about to be lost or is lost. DisconnectType indicates
+ * whether GPU resources should be cleaned up or abandoned when this is called.
+ */
+ void disconnect(GrGpu::DisconnectType);
+
+ bool shouldBindFragmentInputs() const {
+ return fCaps.bindFragmentInputSupport;
+ }
+
+ // Functions for "separable shader" texturing support.
+ void setProgramPathFragmentInputTransform(GrGLuint program, GrGLint location,
+ GrGLenum genMode, GrGLint components,
+ const SkMatrix&);
+
+ /* Sets the projection matrix for path rendering */
+ void setProjectionMatrix(const SkMatrix& matrix,
+ const SkISize& renderTargetSize,
+ GrSurfaceOrigin renderTargetOrigin);
+
+ GrGLuint genPaths(GrGLsizei range);
+ GrGLvoid deletePaths(GrGLuint path, GrGLsizei range);
+
+protected:
+ void onStencilPath(const StencilPathArgs&, const GrPath*) override;
+ void onDrawPath(GrRenderTarget*, const GrProgramInfo&, const GrStencilSettings&,
+ const GrPath*) override;
+
+private:
+ /**
+ * Mark certain functionality as not supported.
+ */
+ struct Caps {
+ bool bindFragmentInputSupport : 1;
+ };
+
+ void flushPathStencilSettings(const GrStencilSettings&);
+
+ struct MatrixState {
+ SkMatrix fViewMatrix;
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ MatrixState() { this->invalidate(); }
+ void invalidate() {
+ fViewMatrix = SkMatrix::InvalidMatrix();
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin) -1;
+ }
+
+ /**
+ * Gets a matrix that goes from local coordinates to GL normalized device coords.
+ */
+ template<int Size> void getRTAdjustedGLMatrix(float* destMatrix) {
+ SkMatrix combined;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ combined.setAll(SkIntToScalar(2) / fRenderTargetSize.fWidth, 0, -SK_Scalar1,
+ 0, -SkIntToScalar(2) / fRenderTargetSize.fHeight, SK_Scalar1,
+ 0, 0, 1);
+ } else {
+ combined.setAll(SkIntToScalar(2) / fRenderTargetSize.fWidth, 0, -SK_Scalar1,
+ 0, SkIntToScalar(2) / fRenderTargetSize.fHeight, -SK_Scalar1,
+ 0, 0, 1);
+ }
+ combined.preConcat(fViewMatrix);
+ GrGLSLGetMatrix<Size>(destMatrix, combined);
+ }
+ };
+ GrGLGpu* gpu();
+
+ GrGLuint fFirstPreallocatedPathID;
+ GrGLsizei fPreallocatedPathCount;
+ MatrixState fHWProjectionMatrixState;
+ GrStencilSettings fHWPathStencilSettings;
+ Caps fCaps;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp b/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp
new file mode 100644
index 0000000000..74cf5392fa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrPathProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/gl/GrGLBuffer.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLPathRendering.h"
+#include "src/gpu/gl/GrGLProgram.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+#define GL_CALL(X) GR_GL_CALL(fGpu->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(fGpu->glInterface(), R, X)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLProgram::GrGLProgram(
+ GrGLGpu* gpu,
+ const GrGLSLBuiltinUniformHandles& builtinUniforms,
+ GrGLuint programID,
+ const UniformInfoArray& uniforms,
+ const UniformInfoArray& textureSamplers,
+ const VaryingInfoArray& pathProcVaryings,
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fragmentProcessorCnt,
+ std::unique_ptr<Attribute[]> attributes,
+ int vertexAttributeCnt,
+ int instanceAttributeCnt,
+ int vertexStride,
+ int instanceStride)
+ : fBuiltinUniformHandles(builtinUniforms)
+ , fProgramID(programID)
+ , fPrimitiveProcessor(std::move(geometryProcessor))
+ , fXferProcessor(std::move(xferProcessor))
+ , fFragmentProcessors(std::move(fragmentProcessors))
+ , fFragmentProcessorCnt(fragmentProcessorCnt)
+ , fAttributes(std::move(attributes))
+ , fVertexAttributeCnt(vertexAttributeCnt)
+ , fInstanceAttributeCnt(instanceAttributeCnt)
+ , fVertexStride(vertexStride)
+ , fInstanceStride(instanceStride)
+ , fGpu(gpu)
+ , fProgramDataManager(gpu, programID, uniforms, pathProcVaryings)
+ , fNumTextureSamplers(textureSamplers.count()) {
+ // Assign texture units to sampler uniforms one time up front.
+ GL_CALL(UseProgram(fProgramID));
+ fProgramDataManager.setSamplerUniforms(textureSamplers, 0);
+}
+
+GrGLProgram::~GrGLProgram() {
+ if (fProgramID) {
+ GL_CALL(DeleteProgram(fProgramID));
+ }
+}
+
+void GrGLProgram::abandon() {
+ fProgramID = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLProgram::updateUniformsAndTextureBindings(const GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo) {
+
+ this->setRenderTargetState(renderTarget, programInfo.origin(), programInfo.primProc());
+
+ // we set the textures, and uniforms for installed processors in a generic way, but subclasses
+ // of GLProgram determine how to set coord transforms
+
+ // We must bind to texture units in the same order in which we set the uniforms in
+ // GrGLProgramDataManager. That is, we bind textures for processors in this order:
+ // primProc, fragProcs, XP.
+ fPrimitiveProcessor->setData(fProgramDataManager, programInfo.primProc(),
+ GrFragmentProcessor::CoordTransformIter(programInfo.pipeline()));
+ if (programInfo.hasFixedPrimProcTextures()) {
+ this->updatePrimitiveProcessorTextureBindings(programInfo.primProc(),
+ programInfo.fixedPrimProcTextures());
+ }
+ int nextTexSamplerIdx = programInfo.primProc().numTextureSamplers();
+
+ this->setFragmentData(programInfo.pipeline(), &nextTexSamplerIdx);
+
+ const GrXferProcessor& xp = programInfo.pipeline().getXferProcessor();
+ SkIPoint offset;
+ GrTexture* dstTexture = programInfo.pipeline().peekDstTexture(&offset);
+
+ fXferProcessor->setData(fProgramDataManager, xp, dstTexture, offset);
+ if (dstTexture) {
+ fGpu->bindTexture(nextTexSamplerIdx++, GrSamplerState::ClampNearest(),
+ programInfo.pipeline().dstTextureProxy()->textureSwizzle(),
+ static_cast<GrGLTexture*>(dstTexture));
+ }
+ SkASSERT(nextTexSamplerIdx == fNumTextureSamplers);
+}
+
+void GrGLProgram::updatePrimitiveProcessorTextureBindings(const GrPrimitiveProcessor& primProc,
+ const GrTextureProxy* const proxies[]) {
+ for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
+ auto* tex = static_cast<GrGLTexture*>(proxies[i]->peekTexture());
+ fGpu->bindTexture(i, primProc.textureSampler(i).samplerState(),
+ primProc.textureSampler(i).swizzle(), tex);
+ }
+}
+
+void GrGLProgram::setFragmentData(const GrPipeline& pipeline, int* nextTexSamplerIdx) {
+ GrFragmentProcessor::Iter iter(pipeline);
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fProgramDataManager, *fp);
+ for (int i = 0; i < fp->numTextureSamplers(); ++i) {
+ const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
+ fGpu->bindTexture((*nextTexSamplerIdx)++, sampler.samplerState(), sampler.swizzle(),
+ static_cast<GrGLTexture*>(sampler.peekTexture()));
+ }
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+}
+
+void GrGLProgram::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const GrPrimitiveProcessor& primProc) {
+ // Load the RT size uniforms if they are needed
+ if (fBuiltinUniformHandles.fRTWidthUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fWidth != rt->width()) {
+ fProgramDataManager.set1f(fBuiltinUniformHandles.fRTWidthUni, SkIntToScalar(rt->width()));
+ }
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
+ fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
+ }
+
+ // set RT adjustment
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ if (!primProc.isPathRendering()) {
+ if (fRenderTargetState.fRenderTargetOrigin != origin ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = origin;
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+ } else {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>();
+ fGpu->glPathRendering()->setProjectionMatrix(pathProc.viewMatrix(),
+ size, origin);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgram.h b/gfx/skia/skia/src/gpu/gl/GrGLProgram.h
new file mode 100644
index 0000000000..cf2e2aab6e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgram.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLProgram_DEFINED
+#define GrGLProgram_DEFINED
+
+#include "src/gpu/gl/GrGLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrGLSLFragmentProcessor;
+class GrGLSLPrimitiveProcessor;
+class GrGLSLXferProcessor;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrProgramInfo;
+class GrRenderTarget;
+class GrTextureProxy;
+
+/**
+ * This class manages a GPU program and records per-program information. It also records the vertex
+ * and instance attribute layouts that are to be used with the program.
+ */
+class GrGLProgram : public SkRefCnt {
+public:
+ /**
+ * This class has its own Attribute representation as it does not need the name and we don't
+ * want to worry about copying the name string to memory with life time of GrGLProgram.
+ * Additionally, these store the attribute location.
+ */
+ struct Attribute {
+ GrVertexAttribType fCPUType;
+ GrSLType fGPUType;
+ size_t fOffset;
+ GrGLint fLocation;
+ };
+
+ using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+ using UniformInfoArray = GrGLProgramDataManager::UniformInfoArray;
+ using VaryingInfoArray = GrGLProgramDataManager::VaryingInfoArray;
+
+ /**
+ * The attribute array consists of vertexAttributeCnt + instanceAttributeCnt elements with
+ * the vertex attributes preceding the instance attributes.
+ */
+ GrGLProgram(GrGLGpu*,
+ const GrGLSLBuiltinUniformHandles&,
+ GrGLuint programID,
+ const UniformInfoArray& uniforms,
+ const UniformInfoArray& textureSamplers,
+ const VaryingInfoArray&, // used for NVPR only currently
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fragmentProcessorCnt,
+ std::unique_ptr<Attribute[]>,
+ int vertexAttributeCnt,
+ int instanceAttributeCnt,
+ int vertexStride,
+ int instanceStride);
+
+ ~GrGLProgram();
+
+ /**
+ * Call to abandon GL objects owned by this program.
+ */
+ void abandon();
+
+ /**
+ * Gets the GL program ID for this program.
+ */
+ GrGLuint programID() const { return fProgramID; }
+
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to OpenGL normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin) -1;
+ }
+
+ /**
+ * Gets a float4 that adjusts the position from Skia device coords to GL's normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous float3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ /**
+ * This function uploads uniforms, calls each GrGLSL*Processor's setData. It binds all fragment
+ * processor textures. Primitive process textures can be bound using this function or by
+ * calling updatePrimitiveProcessorTextureBindings.
+ *
+ * It is the caller's responsibility to ensure the program is bound before calling.
+ */
+ void updateUniformsAndTextureBindings(const GrRenderTarget*, const GrProgramInfo&);
+
+ void updatePrimitiveProcessorTextureBindings(const GrPrimitiveProcessor&,
+ const GrTextureProxy* const[]);
+
+ int vertexStride() const { return fVertexStride; }
+ int instanceStride() const { return fInstanceStride; }
+
+ int numVertexAttributes() const { return fVertexAttributeCnt; }
+ const Attribute& vertexAttribute(int i) const {
+ SkASSERT(i >= 0 && i < fVertexAttributeCnt);
+ return fAttributes[i];
+ }
+
+ int numInstanceAttributes() const { return fInstanceAttributeCnt; }
+ const Attribute& instanceAttribute(int i) const {
+ SkASSERT(i >= 0 && i < fInstanceAttributeCnt);
+ return fAttributes[i + fVertexAttributeCnt];
+ }
+
+private:
+ // A helper to loop over effects, set the transforms (via subclass) and bind textures
+ void setFragmentData(const GrPipeline&, int* nextTexSamplerIdx);
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform
+ void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin, const GrPrimitiveProcessor&);
+
+ // these reflect the current values of uniforms (GL uniform values travel with program)
+ RenderTargetState fRenderTargetState;
+ GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;
+ GrGLuint fProgramID;
+
+ // the installed effects
+ std::unique_ptr<GrGLSLPrimitiveProcessor> fPrimitiveProcessor;
+ std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
+ int fFragmentProcessorCnt;
+
+ std::unique_ptr<Attribute[]> fAttributes;
+ int fVertexAttributeCnt;
+ int fInstanceAttributeCnt;
+ int fVertexStride;
+ int fInstanceStride;
+
+ GrGLGpu* fGpu;
+ GrGLProgramDataManager fProgramDataManager;
+
+ int fNumTextureSamplers;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp
new file mode 100644
index 0000000000..15d9d36eee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+#define ASSERT_ARRAY_UPLOAD_IN_BOUNDS(UNI, COUNT) \
+ SkASSERT((COUNT) <= (UNI).fArrayCount || \
+ (1 == (COUNT) && GrShaderVar::kNonArray == (UNI).fArrayCount))
+
+GrGLProgramDataManager::GrGLProgramDataManager(GrGLGpu* gpu, GrGLuint programID,
+ const UniformInfoArray& uniforms,
+ const VaryingInfoArray& pathProcVaryings)
+ : fGpu(gpu)
+ , fProgramID(programID) {
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const UniformInfo& builderUniform = uniforms[i];
+ SkASSERT(GrShaderVar::kNonArray == builderUniform.fVariable.getArrayCount() ||
+ builderUniform.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = builderUniform.fVariable.getArrayCount();
+ uniform.fType = builderUniform.fVariable.getType();
+ )
+ uniform.fLocation = builderUniform.fLocation;
+ }
+
+ // NVPR programs have separable varyings
+ count = pathProcVaryings.count();
+ fPathProcVaryings.push_back_n(count);
+ for (int i = 0; i < count; i++) {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ PathProcVarying& pathProcVarying = fPathProcVaryings[i];
+ const VaryingInfo& builderPathProcVarying = pathProcVaryings[i];
+ SkASSERT(GrShaderVar::kNonArray == builderPathProcVarying.fVariable.getArrayCount() ||
+ builderPathProcVarying.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ pathProcVarying.fArrayCount = builderPathProcVarying.fVariable.getArrayCount();
+ pathProcVarying.fType = builderPathProcVarying.fVariable.getType();
+ )
+ pathProcVarying.fLocation = builderPathProcVarying.fLocation;
+ }
+}
+
+void GrGLProgramDataManager::setSamplerUniforms(const UniformInfoArray& samplers,
+ int startUnit) const {
+ for (int i = 0; i < samplers.count(); ++i) {
+ const UniformInfo& sampler = samplers[i];
+ SkASSERT(sampler.fVisibility);
+ if (kUnusedUniform != sampler.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(sampler.fLocation, i + startUnit));
+ }
+ }
+}
+
+void GrGLProgramDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(uni.fLocation, i));
+ }
+}
+
+void GrGLProgramDataManager::set1iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1iv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1f(uni.fLocation, v0));
+ }
+}
+
+void GrGLProgramDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ // This assert fires in some instances of the two-pt gradient for its VSParams.
+ // Once the uniform manager is responsible for inserting the duplicate uniform
+ // arrays in VS and FS driver bug workaround, this can be enabled.
+ // this->printUni(uni);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1fv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set2i(UniformHandle u, int32_t i0, int32_t i1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2i(uni.fLocation, i0, i1));
+ }
+}
+
+void GrGLProgramDataManager::set2iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2iv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2f(uni.fLocation, v0, v1));
+ }
+}
+
+void GrGLProgramDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2fv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set3i(UniformHandle u, int32_t i0, int32_t i1, int32_t i2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3i(uni.fLocation, i0, i1, i2));
+ }
+}
+
+void GrGLProgramDataManager::set3iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3iv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3f(uni.fLocation, v0, v1, v2));
+ }
+}
+
+void GrGLProgramDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3fv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set4i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2,
+ int32_t i3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4i(uni.fLocation, i0, i1, i2, i3));
+ }
+}
+
+void GrGLProgramDataManager::set4iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4iv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4f(uni.fLocation, v0, v1, v2, v3));
+ }
+}
+
+void GrGLProgramDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4fv(uni.fLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix2fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrGLProgramDataManager::setMatrix3fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrGLProgramDataManager::setMatrix4fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrGLProgramDataManager::setMatrices(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2x2_GrSLType + (N - 2) ||
+ uni.fType == kHalf2x2_GrSLType + (N - 2));
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fLocation) {
+ set_uniform_matrix<N>::set(fGpu->glInterface(), uni.fLocation, arrayCount, matrices);
+ }
+}
+
+template<> struct set_uniform_matrix<2> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix2fv(loc, cnt, false, m));
+ }
+};
+
+template<> struct set_uniform_matrix<3> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix3fv(loc, cnt, false, m));
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix4fv(loc, cnt, false, m));
+ }
+};
+
+void GrGLProgramDataManager::setPathFragmentInputTransform(VaryingHandle u,
+ int components,
+ const SkMatrix& matrix) const {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ const PathProcVarying& fragmentInput = fPathProcVaryings[u.toIndex()];
+
+ SkASSERT((components == 2 && (fragmentInput.fType == kFloat2_GrSLType ||
+ fragmentInput.fType == kHalf2_GrSLType)) ||
+ (components == 3 && (fragmentInput.fType == kFloat3_GrSLType ||
+ fragmentInput.fType == kHalf3_GrSLType)));
+
+ fGpu->glPathRendering()->setProgramPathFragmentInputTransform(fProgramID,
+ fragmentInput.fLocation,
+ GR_GL_OBJECT_LINEAR,
+ components,
+ matrix);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h
new file mode 100644
index 0000000000..3a67fe07cd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLProgramDataManager_DEFINED
+#define GrGLProgramDataManager_DEFINED
+
+#include "include/gpu/gl/GrGLTypes.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+#include "include/private/SkTArray.h"
+
+class GrGLGpu;
+class SkMatrix;
+class GrGLProgram;
+
+/** Manages the resources used by a shader program.
+ * The resources are objects the program uses to communicate with the
+ * application code.
+ */
+class GrGLProgramDataManager : public GrGLSLProgramDataManager {
+public:
+ struct UniformInfo {
+ GrShaderVar fVariable;
+ uint32_t fVisibility;
+ GrGLint fLocation;
+ };
+
+ struct VaryingInfo {
+ GrShaderVar fVariable;
+ GrGLint fLocation;
+ };
+
+ // This uses an allocator rather than array so that the GrShaderVars don't move in memory
+ // after they are inserted. Users of GrGLShaderBuilder get refs to the vars and ptrs to their
+ // name strings. Otherwise, we'd have to hand out copies.
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+ typedef GrTAllocator<VaryingInfo> VaryingInfoArray;
+
+ GrGLProgramDataManager(GrGLGpu*, GrGLuint programID, const UniformInfoArray&,
+ const VaryingInfoArray&);
+
+ void setSamplerUniforms(const UniformInfoArray& samplers, int startUnit) const;
+
+ /** Functions for uploading uniform values. The varities ending in v can be used to upload to an
+ * array of uniforms. arrayCount must be <= the array count of the uniform.
+ */
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2i(UniformHandle, int32_t, int32_t) const override;
+ void set2iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3i(UniformHandle, int32_t, int32_t, int32_t) const override;
+ void set3iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4i(UniformHandle, int32_t, int32_t, int32_t, int32_t) const override;
+ void set4iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first three upload a single matrix, the latter three upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override;
+
+private:
+ enum {
+ kUnusedUniform = -1,
+ };
+
+ struct Uniform {
+ GrGLint fLocation;
+#ifdef SK_DEBUG
+ GrSLType fType;
+ int fArrayCount;
+#endif
+ };
+
+ enum {
+ kUnusedPathProcVarying = -1,
+ };
+ struct PathProcVarying {
+ GrGLint fLocation;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ SkTArray<Uniform, true> fUniforms;
+ SkTArray<PathProcVarying, true> fPathProcVaryings;
+ GrGLGpu* fGpu;
+ GrGLuint fProgramID;
+
+ typedef GrGLSLProgramDataManager INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp
new file mode 100644
index 0000000000..0442945f07
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLRenderTarget.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#define GPUGL static_cast<GrGLGpu*>(this->getGpu())
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+// Constructor for wrapped render targets.
+GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu,
+ const SkISize& size,
+ GrGLFormat format,
+ GrPixelConfig config,
+ int sampleCount,
+ const IDs& ids,
+ GrGLStencilAttachment* stencil)
+ : GrSurface(gpu, size, config, GrProtected::kNo)
+ , INHERITED(gpu, size, config, sampleCount, GrProtected::kNo, stencil) {
+ this->setFlags(gpu->glCaps(), ids);
+ this->init(format, ids);
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu,
+ const SkISize& size,
+ GrGLFormat format,
+ GrPixelConfig config,
+ int sampleCount,
+ const IDs& ids)
+ : GrSurface(gpu, size, config, GrProtected::kNo)
+ , INHERITED(gpu, size, config, sampleCount, GrProtected::kNo) {
+ this->setFlags(gpu->glCaps(), ids);
+ this->init(format, ids);
+}
+
+inline void GrGLRenderTarget::setFlags(const GrGLCaps& glCaps, const IDs& idDesc) {
+ if (!idDesc.fRTFBOID) {
+ this->setGLRTFBOIDIs0();
+ }
+}
+
+void GrGLRenderTarget::init(GrGLFormat format, const IDs& idDesc) {
+ fRTFBOID = idDesc.fRTFBOID;
+ fTexFBOID = idDesc.fTexFBOID;
+ fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID;
+ fRTFBOOwnership = idDesc.fRTFBOOwnership;
+ fRTFormat = format;
+ fNumSamplesOwnedPerPixel = this->totalSamples();
+}
+
+sk_sp<GrGLRenderTarget> GrGLRenderTarget::MakeWrapped(GrGLGpu* gpu,
+ const SkISize& size,
+ GrGLFormat format,
+ GrPixelConfig config,
+ int sampleCount,
+ const IDs& idDesc,
+ int stencilBits) {
+ GrGLStencilAttachment* sb = nullptr;
+ if (stencilBits) {
+ GrGLStencilAttachment::IDDesc sbDesc;
+ GrGLStencilAttachment::Format format;
+ format.fInternalFormat = GrGLStencilAttachment::kUnknownInternalFormat;
+ format.fPacked = false;
+ format.fStencilBits = stencilBits;
+ format.fTotalBits = stencilBits;
+ // Ownership of sb is passed to the GrRenderTarget so doesn't need to be deleted
+ sb = new GrGLStencilAttachment(gpu, sbDesc, size.width(), size.height(), sampleCount,
+ format);
+ }
+ return sk_sp<GrGLRenderTarget>(
+ new GrGLRenderTarget(gpu, size, format, config, sampleCount, idDesc, sb));
+}
+
+GrBackendRenderTarget GrGLRenderTarget::getBackendRenderTarget() const {
+ GrGLFramebufferInfo fbi;
+ fbi.fFBOID = fRTFBOID;
+ fbi.fFormat = GrGLFormatToEnum(this->format());
+ int numStencilBits = 0;
+ if (GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment()) {
+ numStencilBits = stencil->bits();
+ }
+
+ return GrBackendRenderTarget(
+ this->width(), this->height(), this->numSamples(), numStencilBits, fbi);
+}
+
+GrBackendFormat GrGLRenderTarget::backendFormat() const {
+ // We should never have a GrGLRenderTarget (even a textureable one with a target that is not
+ // texture 2D.
+ return GrBackendFormat::MakeGL(GrGLFormatToEnum(fRTFormat), GR_GL_TEXTURE_2D);
+}
+
+size_t GrGLRenderTarget::onGpuMemorySize() const {
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ fNumSamplesOwnedPerPixel, GrMipMapped::kNo);
+}
+
+bool GrGLRenderTarget::completeStencilAttachment() {
+ GrGLGpu* gpu = this->getGLGpu();
+ const GrGLInterface* interface = gpu->glInterface();
+ GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (nullptr == stencil) {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+#ifdef SK_DEBUG
+ if (kChromium_GrGLDriver != gpu->glContext().driver()) {
+ // This check can cause problems in Chromium if the context has been asynchronously
+ // abandoned (see skbug.com/5200)
+ GrGLenum status;
+ GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
+ }
+#endif
+ return true;
+ } else {
+ const GrGLStencilAttachment* glStencil = static_cast<const GrGLStencilAttachment*>(stencil);
+ GrGLuint rb = glStencil->renderbufferID();
+
+ gpu->invalidateBoundRenderTarget();
+ gpu->bindFramebuffer(GR_GL_FRAMEBUFFER, this->renderFBOID());
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ if (glStencil->format().fPacked) {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ } else {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+
+
+#ifdef SK_DEBUG
+ if (kChromium_GrGLDriver != gpu->glContext().driver()) {
+ // This check can cause problems in Chromium if the context has been asynchronously
+ // abandoned (see skbug.com/5200)
+ GrGLenum status;
+ GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
+ }
+#endif
+ return true;
+ }
+}
+
+void GrGLRenderTarget::onRelease() {
+ if (GrBackendObjectOwnership::kBorrowed != fRTFBOOwnership) {
+ GrGLGpu* gpu = this->getGLGpu();
+ if (fTexFBOID) {
+ gpu->deleteFramebuffer(fTexFBOID);
+ }
+ if (fRTFBOID && fRTFBOID != fTexFBOID) {
+ gpu->deleteFramebuffer(fRTFBOID);
+ }
+ if (fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &fMSColorRenderbufferID));
+ }
+ }
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ INHERITED::onRelease();
+}
+
+void GrGLRenderTarget::onAbandon() {
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ INHERITED::onAbandon();
+}
+
+GrGLGpu* GrGLRenderTarget::getGLGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrGLGpu*>(this->getGpu());
+}
+
+bool GrGLRenderTarget::canAttemptStencilAttachment() const {
+ if (this->getGpu()->getContext()->priv().caps()->avoidStencilBuffers()) {
+ return false;
+ }
+
+ // Only modify the FBO's attachments if we have created the FBO. Public APIs do not currently
+ // allow for borrowed FBO ownership, so we can safely assume that if an object is owned,
+ // Skia created it.
+ return this->fRTFBOOwnership == GrBackendObjectOwnership::kOwned;
+}
+
+void GrGLRenderTarget::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ // Don't check this->fRefsWrappedObjects, as we might be the base of a GrGLTextureRenderTarget
+ // which is multiply inherited from both ourselves and a texture. In these cases, one part
+ // (texture, rt) may be wrapped, while the other is owned by Skia.
+ bool refsWrappedRenderTargetObjects =
+ this->fRTFBOOwnership == GrBackendObjectOwnership::kBorrowed;
+ if (refsWrappedRenderTargetObjects && !traceMemoryDump->shouldDumpWrappedObjects()) {
+ return;
+ }
+
+ // Don't log the framebuffer, as the framebuffer itself doesn't contribute to meaningful
+ // memory usage. It is always a wrapper around either:
+ // - a texture, which is owned elsewhere, and will be dumped there
+ // - a renderbuffer, which will be dumped below.
+
+ // Log any renderbuffer's contribution to memory.
+ if (fMSColorRenderbufferID) {
+ const GrCaps& caps = *this->getGpu()->caps();
+ size_t size = GrSurface::ComputeSize(caps, this->backendFormat(), this->width(),
+ this->height(), this->msaaSamples(), GrMipMapped::kNo);
+
+ // Due to this resource having both a texture and a renderbuffer component, dump as
+ // skia/gpu_resources/resource_#/renderbuffer
+ SkString resourceName = this->getResourceName();
+ resourceName.append("/renderbuffer");
+
+ this->dumpMemoryStatisticsPriv(traceMemoryDump, resourceName, "RenderTarget", size);
+
+ SkString renderbuffer_id;
+ renderbuffer_id.appendU32(fMSColorRenderbufferID);
+ traceMemoryDump->setMemoryBacking(resourceName.c_str(), "gl_renderbuffer",
+ renderbuffer_id.c_str());
+ }
+}
+
+int GrGLRenderTarget::msaaSamples() const {
+ if (fTexFBOID == kUnresolvableFBOID || fTexFBOID != fRTFBOID) {
+ // If the render target's FBO is external (fTexFBOID == kUnresolvableFBOID), or if we own
+ // the render target's FBO (fTexFBOID == fRTFBOID) then we use the provided sample count.
+ return this->numSamples();
+ }
+
+ // When fTexFBOID == fRTFBOID, we either are not using MSAA, or MSAA is auto resolving, so use
+ // 0 for the sample count.
+ return 0;
+}
+
+int GrGLRenderTarget::totalSamples() const {
+ int total_samples = this->msaaSamples();
+
+ if (fTexFBOID != kUnresolvableFBOID) {
+ // If we own the resolve buffer then that is one more sample per pixel.
+ total_samples += 1;
+ }
+
+ return total_samples;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h
new file mode 100644
index 0000000000..5349e536db
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLRenderTarget_DEFINED
+#define GrGLRenderTarget_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "src/gpu/GrRenderTarget.h"
+
+class GrGLCaps;
+class GrGLGpu;
+class GrGLStencilAttachment;
+
+class GrGLRenderTarget : public GrRenderTarget {
+public:
+ bool alwaysClearStencil() const override { return 0 == fRTFBOID; }
+
+ // set fTexFBOID to this value to indicate that it is multisampled but
+ // Gr doesn't know how to resolve it.
+ enum { kUnresolvableFBOID = 0 };
+
+ struct IDs {
+ GrGLuint fRTFBOID;
+ GrBackendObjectOwnership fRTFBOOwnership;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+ };
+
+ static sk_sp<GrGLRenderTarget> MakeWrapped(GrGLGpu*,
+ const SkISize&,
+ GrGLFormat,
+ GrPixelConfig,
+ int sampleCount,
+ const IDs&,
+ int stencilBits);
+
+ // The following two functions return the same ID when a texture/render target is not
+ // multisampled, and different IDs when it is multisampled.
+ // FBO ID used to render into
+ GrGLuint renderFBOID() const { return fRTFBOID; }
+ // FBO ID that has texture ID attached.
+ GrGLuint textureFBOID() const { return fTexFBOID; }
+
+ GrBackendRenderTarget getBackendRenderTarget() const override;
+
+ GrBackendFormat backendFormat() const override;
+
+ bool canAttemptStencilAttachment() const override;
+
+ // GrGLRenderTarget overrides dumpMemoryStatistics so it can log its texture and renderbuffer
+ // components seperately.
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const override;
+
+ GrGLFormat format() const { return fRTFormat; }
+
+protected:
+ // Constructor for subclasses.
+ GrGLRenderTarget(GrGLGpu*,
+ const SkISize&,
+ GrGLFormat,
+ GrPixelConfig,
+ int sampleCount,
+ const IDs&);
+
+ void init(GrGLFormat, const IDs&);
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ int numSamplesOwnedPerPixel() const { return fNumSamplesOwnedPerPixel; }
+
+private:
+ // Constructor for instances wrapping backend objects.
+ GrGLRenderTarget(GrGLGpu*,
+ const SkISize&,
+ GrGLFormat,
+ GrPixelConfig,
+ int sampleCount,
+ const IDs&,
+ GrGLStencilAttachment*);
+
+ void setFlags(const GrGLCaps&, const IDs&);
+
+ GrGLGpu* getGLGpu() const;
+ bool completeStencilAttachment() override;
+
+ size_t onGpuMemorySize() const override;
+
+ int msaaSamples() const;
+ // The number total number of samples, including both MSAA and resolve texture samples.
+ int totalSamples() const;
+
+ GrGLuint fRTFBOID;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+ GrGLFormat fRTFormat;
+
+ GrBackendObjectOwnership fRTFBOOwnership;
+
+ // The RenderTarget needs to be able to report its VRAM footprint even after abandon and
+ // release have potentially zeroed out the IDs (e.g., so the cache can reset itself). Since
+ // the IDs are just required for the computation in totalSamples we cache that result here.
+ int fNumSamplesOwnedPerPixel;
+
+ typedef GrRenderTarget INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.cpp b/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.cpp
new file mode 100644
index 0000000000..d3a224ee1b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLSemaphore.h"
+
+#include "src/gpu/gl/GrGLGpu.h"
+
+GrGLSemaphore::GrGLSemaphore(GrGLGpu* gpu, bool isOwned)
+ : INHERITED(gpu), fSync(0), fIsOwned(isOwned) {
+ isOwned ? this->registerWithCache(SkBudgeted::kNo)
+ : this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+void GrGLSemaphore::onRelease() {
+ if (fSync && fIsOwned) {
+ static_cast<const GrGLGpu*>(this->getGpu())->deleteSync(fSync);
+ }
+ fSync = 0;
+ INHERITED::onRelease();
+}
+
+void GrGLSemaphore::onAbandon() {
+ fSync = 0;
+ INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.h b/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.h
new file mode 100644
index 0000000000..aff3b73248
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLSemaphore.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSemaphore_DEFINED
+#define GrGLSemaphore_DEFINED
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrSemaphore.h"
+
+class GrGLGpu;
+
+class GrGLSemaphore : public GrSemaphore {
+public:
+ static sk_sp<GrGLSemaphore> Make(GrGLGpu* gpu, bool isOwned) {
+ return sk_sp<GrGLSemaphore>(new GrGLSemaphore(gpu, isOwned));
+ }
+
+ static sk_sp<GrGLSemaphore> MakeWrapped(GrGLGpu* gpu,
+ GrGLsync sync,
+ GrWrapOwnership ownership) {
+ auto sema = sk_sp<GrGLSemaphore>(new GrGLSemaphore(gpu,
+ kBorrow_GrWrapOwnership != ownership));
+ sema->setSync(sync);
+ return sema;
+ }
+
+ GrGLsync sync() const { return fSync; }
+ void setSync(const GrGLsync& sync) { fSync = sync; }
+
+ GrBackendSemaphore backendSemaphore() const override {
+ GrBackendSemaphore backendSemaphore;
+ backendSemaphore.initGL(fSync);
+ return backendSemaphore;
+ }
+
+private:
+ GrGLSemaphore(GrGLGpu* gpu, bool isOwned);
+
+ void onRelease() override;
+ void onAbandon() override;
+
+ GrGLsync fSync;
+ bool fIsOwned;
+
+ typedef GrSemaphore INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp
new file mode 100644
index 0000000000..4b749c6f3e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLStencilAttachment.h"
+
+size_t GrGLStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= this->numSamples();
+ return static_cast<size_t>(size / 8);
+}
+
+void GrGLStencilAttachment::onRelease() {
+ if (0 != fRenderbufferID) {
+ GrGLGpu* gpuGL = (GrGLGpu*) this->getGpu();
+ const GrGLInterface* gl = gpuGL->glInterface();
+ GR_GL_CALL(gl, DeleteRenderbuffers(1, &fRenderbufferID));
+ fRenderbufferID = 0;
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLStencilAttachment::onAbandon() {
+ fRenderbufferID = 0;
+
+ INHERITED::onAbandon();
+}
+
+void GrGLStencilAttachment::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString renderbuffer_id;
+ renderbuffer_id.appendU32(this->renderbufferID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_renderbuffer",
+ renderbuffer_id.c_str());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h
new file mode 100644
index 0000000000..5be4954d05
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLStencilAttachment_DEFINED
+#define GrGLStencilAttachment_DEFINED
+
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/GrStencilAttachment.h"
+
+class GrGLStencilAttachment : public GrStencilAttachment {
+public:
+ static const GrGLenum kUnknownInternalFormat = ~0U;
+ static const GrGLuint kUnknownBitCount = ~0U;
+ struct Format {
+ GrGLenum fInternalFormat;
+ GrGLuint fStencilBits;
+ GrGLuint fTotalBits;
+ bool fPacked;
+ };
+
+ struct IDDesc {
+ IDDesc() : fRenderbufferID(0) {}
+ GrGLuint fRenderbufferID;
+ };
+
+ GrGLStencilAttachment(GrGpu* gpu,
+ const IDDesc& idDesc,
+ int width, int height,
+ int sampleCnt,
+ const Format& format)
+ : GrStencilAttachment(gpu, width, height, format.fStencilBits, sampleCnt)
+ , fFormat(format)
+ , fRenderbufferID(idDesc.fRenderbufferID) {
+ this->registerWithCache(SkBudgeted::kYes);
+ }
+
+ GrGLuint renderbufferID() const {
+ return fRenderbufferID;
+ }
+
+ const Format& format() const { return fFormat; }
+
+protected:
+ // overrides of GrResource
+ void onRelease() override;
+ void onAbandon() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ Format fFormat;
+ // may be zero for external SBs associated with external RTs
+ // (we don't require the client to give us the id, just tell
+ // us how many bits of stencil there are).
+ GrGLuint fRenderbufferID;
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp
new file mode 100644
index 0000000000..a5a1a3708b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLTexture.h"
+
+#define GPUGL static_cast<GrGLGpu*>(this->getGpu())
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+GrTextureType GrGLTexture::TextureTypeFromTarget(GrGLenum target) {
+ switch (target) {
+ case GR_GL_TEXTURE_2D:
+ return GrTextureType::k2D;
+ case GR_GL_TEXTURE_RECTANGLE:
+ return GrTextureType::kRectangle;
+ case GR_GL_TEXTURE_EXTERNAL:
+ return GrTextureType::kExternal;
+ }
+ SK_ABORT("Unexpected texture target");
+}
+
+static inline GrGLenum target_from_texture_type(GrTextureType type) {
+ switch (type) {
+ case GrTextureType::k2D:
+ return GR_GL_TEXTURE_2D;
+ case GrTextureType::kRectangle:
+ return GR_GL_TEXTURE_RECTANGLE;
+ case GrTextureType::kExternal:
+ return GR_GL_TEXTURE_EXTERNAL;
+ default:
+ SK_ABORT("Unexpected texture target");
+ }
+ SK_ABORT("Unexpected texture type");
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, SkBudgeted budgeted, const Desc& desc,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, desc.fSize, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, desc.fSize, desc.fConfig, GrProtected::kNo,
+ TextureTypeFromTarget(desc.fTarget), mipMapsStatus)
+ , fParameters(sk_make_sp<GrGLTextureParameters>()) {
+ this->init(desc);
+ this->registerWithCache(budgeted);
+ if (GrGLFormatIsCompressed(desc.fFormat)) {
+ this->setReadOnly();
+ }
+}
+
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, const Desc& desc, GrMipMapsStatus mipMapsStatus,
+ sk_sp<GrGLTextureParameters> parameters, GrWrapCacheable cacheable,
+ GrIOType ioType)
+ : GrSurface(gpu, desc.fSize, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, desc.fSize, desc.fConfig, GrProtected::kNo,
+ TextureTypeFromTarget(desc.fTarget), mipMapsStatus)
+ , fParameters(std::move(parameters)) {
+ SkASSERT(fParameters);
+ this->init(desc);
+ this->registerWithCacheWrapped(cacheable);
+ if (ioType == kRead_GrIOType) {
+ this->setReadOnly();
+ }
+}
+
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, const Desc& desc, sk_sp<GrGLTextureParameters> parameters,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, desc.fSize, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, desc.fSize, desc.fConfig, GrProtected::kNo,
+ TextureTypeFromTarget(desc.fTarget), mipMapsStatus) {
+ SkASSERT(parameters || desc.fOwnership == GrBackendObjectOwnership::kOwned);
+ fParameters = parameters ? std::move(parameters) : sk_make_sp<GrGLTextureParameters>();
+ this->init(desc);
+}
+
+void GrGLTexture::init(const Desc& desc) {
+ SkASSERT(0 != desc.fID);
+ SkASSERT(GrGLFormat::kUnknown != desc.fFormat);
+ fID = desc.fID;
+ fFormat = desc.fFormat;
+ fTextureIDOwnership = desc.fOwnership;
+}
+
+GrGLenum GrGLTexture::target() const {
+ return target_from_texture_type(this->texturePriv().textureType());
+}
+
+void GrGLTexture::onRelease() {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ if (fID) {
+ if (GrBackendObjectOwnership::kBorrowed != fTextureIDOwnership) {
+ GL_CALL(DeleteTextures(1, &fID));
+ }
+ fID = 0;
+ }
+ INHERITED::onRelease();
+}
+
+void GrGLTexture::onAbandon() {
+ fID = 0;
+ INHERITED::onAbandon();
+}
+
+GrBackendTexture GrGLTexture::getBackendTexture() const {
+ GrGLTextureInfo info;
+ info.fTarget = target_from_texture_type(this->texturePriv().textureType());
+ info.fID = fID;
+ info.fFormat = GrGLFormatToEnum(fFormat);
+ return GrBackendTexture(this->width(), this->height(), this->texturePriv().mipMapped(), info,
+ fParameters);
+}
+
+GrBackendFormat GrGLTexture::backendFormat() const {
+ return GrBackendFormat::MakeGL(GrGLFormatToEnum(fFormat),
+ target_from_texture_type(this->texturePriv().textureType()));
+}
+
+sk_sp<GrGLTexture> GrGLTexture::MakeWrapped(GrGLGpu* gpu,
+ GrMipMapsStatus mipMapsStatus,
+ const Desc& desc,
+ sk_sp<GrGLTextureParameters> parameters,
+ GrWrapCacheable cacheable,
+ GrIOType ioType) {
+ return sk_sp<GrGLTexture>(
+ new GrGLTexture(gpu, desc, mipMapsStatus, std::move(parameters), cacheable, ioType));
+}
+
+bool GrGLTexture::onStealBackendTexture(GrBackendTexture* backendTexture,
+ SkImage::BackendTextureReleaseProc* releaseProc) {
+ *backendTexture = this->getBackendTexture();
+ // Set the release proc to a no-op function. GL doesn't require any special cleanup.
+ *releaseProc = [](GrBackendTexture){};
+
+ // It's important that we only abandon this texture's objects, not subclass objects such as
+ // those held by GrGLTextureRenderTarget. Those objects are not being stolen and need to be
+ // cleaned up by us.
+ this->GrGLTexture::onAbandon();
+ return true;
+}
+
+void GrGLTexture::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ // Don't check this->fRefsWrappedObjects, as we might be the base of a GrGLTextureRenderTarget
+ // which is multiply inherited from both ourselves and a texture. In these cases, one part
+ // (texture, rt) may be wrapped, while the other is owned by Skia.
+ bool refsWrappedTextureObjects =
+ this->fTextureIDOwnership == GrBackendObjectOwnership::kBorrowed;
+ if (refsWrappedTextureObjects && !traceMemoryDump->shouldDumpWrappedObjects()) {
+ return;
+ }
+
+ // Dump as skia/gpu_resources/resource_#/texture, to avoid conflicts in the
+ // GrGLTextureRenderTarget case, where multiple things may dump to the same resource. This
+ // has no downside in the normal case.
+ SkString resourceName = this->getResourceName();
+ resourceName.append("/texture");
+
+ // As we are only dumping our texture memory (not any additional memory tracked by classes
+ // which may inherit from us), specifically call GrGLTexture::gpuMemorySize to avoid
+ // hitting an override.
+ this->dumpMemoryStatisticsPriv(traceMemoryDump, resourceName, "Texture",
+ GrGLTexture::gpuMemorySize());
+
+ SkString texture_id;
+ texture_id.appendU32(this->textureID());
+ traceMemoryDump->setMemoryBacking(resourceName.c_str(), "gl_texture", texture_id.c_str());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTexture.h b/gfx/skia/skia/src/gpu/gl/GrGLTexture.h
new file mode 100644
index 0000000000..1474ec7aa1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTexture.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLTexture_DEFINED
+#define GrGLTexture_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrGLTypesPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+class GrGLGpu;
+
+class GrGLTexture : public GrTexture {
+public:
+ struct Desc {
+ SkISize fSize = {-1, -1};
+ GrGLenum fTarget = 0;
+ GrGLuint fID = 0;
+ GrGLFormat fFormat = GrGLFormat::kUnknown;
+ GrPixelConfig fConfig = kUnknown_GrPixelConfig;
+ GrBackendObjectOwnership fOwnership = GrBackendObjectOwnership::kOwned;
+ };
+
+ static GrTextureType TextureTypeFromTarget(GrGLenum textureTarget);
+
+ GrGLTexture(GrGLGpu*, SkBudgeted, const Desc&, GrMipMapsStatus);
+
+ ~GrGLTexture() override {}
+
+ GrBackendTexture getBackendTexture() const override;
+
+ GrBackendFormat backendFormat() const override;
+
+ // TODO: Remove once clients are no longer calling this.
+ void textureParamsModified() override { fParameters->invalidate(); }
+
+ GrGLTextureParameters* parameters() { return fParameters.get(); }
+
+ GrGLuint textureID() const { return fID; }
+
+ GrGLenum target() const;
+
+ GrGLFormat format() const { return fFormat; }
+
+ bool hasBaseLevelBeenBoundToFBO() const { return fBaseLevelHasBeenBoundToFBO; }
+ void baseLevelWasBoundToFBO() { fBaseLevelHasBeenBoundToFBO = true; }
+
+ static sk_sp<GrGLTexture> MakeWrapped(GrGLGpu*,
+ GrMipMapsStatus,
+ const Desc&,
+ sk_sp<GrGLTextureParameters>,
+ GrWrapCacheable, GrIOType);
+
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const override;
+
+protected:
+ // Constructor for subclasses.
+ GrGLTexture(GrGLGpu*, const Desc&, sk_sp<GrGLTextureParameters>, GrMipMapsStatus);
+
+ // Constructor for instances wrapping backend objects.
+ GrGLTexture(GrGLGpu*,
+ const Desc&,
+ GrMipMapsStatus,
+ sk_sp<GrGLTextureParameters>,
+ GrWrapCacheable,
+ GrIOType);
+
+ void init(const Desc&);
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) override;
+
+private:
+ sk_sp<GrGLTextureParameters> fParameters;
+ GrGLuint fID;
+ GrGLFormat fFormat;
+ GrBackendObjectOwnership fTextureIDOwnership;
+ bool fBaseLevelHasBeenBoundToFBO = false;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp
new file mode 100644
index 0000000000..38bc4ef4c7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLTextureRenderTarget.h"
+
+GrGLTextureRenderTarget::GrGLTextureRenderTarget(GrGLGpu* gpu,
+ SkBudgeted budgeted,
+ int sampleCount,
+ const GrGLTexture::Desc& texDesc,
+ const GrGLRenderTarget::IDs& rtIDs,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, texDesc.fSize, texDesc.fConfig, GrProtected::kNo)
+ , GrGLTexture(gpu, texDesc, nullptr, mipMapsStatus)
+ , GrGLRenderTarget(gpu, texDesc.fSize, texDesc.fFormat, texDesc.fConfig, sampleCount,
+ rtIDs) {
+ this->registerWithCache(budgeted);
+}
+
+GrGLTextureRenderTarget::GrGLTextureRenderTarget(GrGLGpu* gpu,
+ int sampleCount,
+ const GrGLTexture::Desc& texDesc,
+ sk_sp<GrGLTextureParameters> parameters,
+ const GrGLRenderTarget::IDs& rtIDs,
+ GrWrapCacheable cacheable,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, texDesc.fSize, texDesc.fConfig, GrProtected::kNo)
+ , GrGLTexture(gpu, texDesc, std::move(parameters), mipMapsStatus)
+ , GrGLRenderTarget(gpu, texDesc.fSize, texDesc.fFormat, texDesc.fConfig, sampleCount,
+ rtIDs) {
+ this->registerWithCacheWrapped(cacheable);
+}
+
+void GrGLTextureRenderTarget::dumpMemoryStatistics(
+ SkTraceMemoryDump* traceMemoryDump) const {
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Delegate to the base classes
+ GrGLRenderTarget::dumpMemoryStatistics(traceMemoryDump);
+ GrGLTexture::dumpMemoryStatistics(traceMemoryDump);
+#else
+ SkString resourceName = this->getResourceName();
+ resourceName.append("/texture_renderbuffer");
+ this->dumpMemoryStatisticsPriv(traceMemoryDump, resourceName, "RenderTarget",
+ this->gpuMemorySize());
+#endif
+}
+
+bool GrGLTextureRenderTarget::canAttemptStencilAttachment() const {
+ // The RT FBO of GrGLTextureRenderTarget is never created from a
+ // wrapped FBO, so we only care about the flag.
+ return !this->getGpu()->getContext()->priv().caps()->avoidStencilBuffers();
+}
+
+sk_sp<GrGLTextureRenderTarget> GrGLTextureRenderTarget::MakeWrapped(
+ GrGLGpu* gpu,
+ int sampleCount,
+ const GrGLTexture::Desc& texDesc,
+ sk_sp<GrGLTextureParameters> parameters,
+ const GrGLRenderTarget::IDs& rtIDs,
+ GrWrapCacheable cacheable,
+ GrMipMapsStatus mipMapsStatus) {
+ return sk_sp<GrGLTextureRenderTarget>(new GrGLTextureRenderTarget(
+ gpu, sampleCount, texDesc, std::move(parameters), rtIDs, cacheable, mipMapsStatus));
+}
+
+size_t GrGLTextureRenderTarget::onGpuMemorySize() const {
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ this->numSamplesOwnedPerPixel(), this->texturePriv().mipMapped());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h
new file mode 100644
index 0000000000..96f1fefe8a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLTextureRenderTarget_DEFINED
+#define GrGLTextureRenderTarget_DEFINED
+
+#include "src/gpu/gl/GrGLRenderTarget.h"
+#include "src/gpu/gl/GrGLTexture.h"
+
+class GrGLGpu;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrGLTextureRenderTarget : public GrGLTexture, public GrGLRenderTarget {
+public:
+ // We're virtually derived from GrSurface (via both GrGLTexture and GrGLRenderTarget) so its
+ // constructor must be explicitly called.
+ GrGLTextureRenderTarget(GrGLGpu* gpu,
+ SkBudgeted budgeted,
+ int sampleCount,
+ const GrGLTexture::Desc& texDesc,
+ const GrGLRenderTarget::IDs&,
+ GrMipMapsStatus);
+
+ bool canAttemptStencilAttachment() const override;
+
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const override;
+
+ static sk_sp<GrGLTextureRenderTarget> MakeWrapped(GrGLGpu* gpu,
+ int sampleCount,
+ const GrGLTexture::Desc&,
+ sk_sp<GrGLTextureParameters>,
+ const GrGLRenderTarget::IDs&,
+ GrWrapCacheable,
+ GrMipMapsStatus);
+
+ GrBackendFormat backendFormat() const override {
+ // It doesn't matter if we take the texture or render target path, so just pick texture.
+ return GrGLTexture::backendFormat();
+ }
+
+protected:
+ void onAbandon() override {
+ GrGLRenderTarget::onAbandon();
+ GrGLTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrGLRenderTarget::onRelease();
+ GrGLTexture::onRelease();
+ }
+
+private:
+ // Constructor for instances wrapping backend objects.
+ GrGLTextureRenderTarget(GrGLGpu* gpu,
+ int sampleCount,
+ const GrGLTexture::Desc& texDesc,
+ sk_sp<GrGLTextureParameters> parameters,
+ const GrGLRenderTarget::IDs& ids,
+ GrWrapCacheable,
+ GrMipMapsStatus);
+
+ size_t onGpuMemorySize() const override;
+};
+
+#ifdef SK_BUILD_FOR_WIN
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTypesPriv.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTypesPriv.cpp
new file mode 100644
index 0000000000..8c66fa558a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTypesPriv.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkScalar.h"
+#include "include/private/GrGLTypesPriv.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/gl/GrGLDefines.h"
+
+GrGLTextureParameters::SamplerOverriddenState::SamplerOverriddenState()
+ // These are the OpenGL defaults.
+ : fMinFilter(GR_GL_NEAREST_MIPMAP_LINEAR)
+ , fMagFilter(GR_GL_LINEAR)
+ , fWrapS(GR_GL_REPEAT)
+ , fWrapT(GR_GL_REPEAT)
+ , fMinLOD(-1000.f)
+ , fMaxLOD(1000.f)
+ , fBorderColorInvalid(false) {}
+
+void GrGLTextureParameters::SamplerOverriddenState::invalidate() {
+ fMinFilter = ~0U;
+ fMagFilter = ~0U;
+ fWrapS = ~0U;
+ fWrapT = ~0U;
+ fMinLOD = SK_ScalarNaN;
+ fMaxLOD = SK_ScalarNaN;
+ fBorderColorInvalid = true;
+}
+
+GrGLTextureParameters::NonsamplerState::NonsamplerState()
+ // These are the OpenGL defaults.
+ : fSwizzleKey(GrSwizzle::RGBA().asKey()), fBaseMipMapLevel(0), fMaxMipMapLevel(1000) {}
+
+void GrGLTextureParameters::NonsamplerState::invalidate() {
+ fSwizzleKey = ~0U;
+ fBaseMipMapLevel = ~0;
+ fMaxMipMapLevel = ~0;
+}
+
+void GrGLTextureParameters::invalidate() {
+ fSamplerOverriddenState.invalidate();
+ fNonsamplerState.invalidate();
+}
+
+void GrGLTextureParameters::set(const SamplerOverriddenState* samplerState,
+ const NonsamplerState& nonsamplerState,
+ ResetTimestamp currTimestamp) {
+ if (samplerState) {
+ fSamplerOverriddenState = *samplerState;
+ }
+ fNonsamplerState = nonsamplerState;
+ fResetTimestamp = currTimestamp;
+}
+
+void GrGLBackendTextureInfo::assign(const GrGLBackendTextureInfo& that, bool thisIsValid) {
+ fInfo = that.fInfo;
+ SkSafeRef(that.fParams);
+ if (thisIsValid) {
+ SkSafeUnref(fParams);
+ }
+ fParams = that.fParams;
+}
+
+void GrGLBackendTextureInfo::cleanup() { SkSafeUnref(fParams); }
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp
new file mode 100644
index 0000000000..56ed244dbf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLUniformHandler.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/gl/GrGLCaps.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/builders/GrGLProgramBuilder.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), R, X)
+
+bool valid_name(const char* name) {
+ // disallow unknown names that start with "sk_"
+ if (!strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ return !strcmp(name, SkSL::Compiler::RTADJUST_NAME);
+ }
+ return true;
+}
+
+GrGLSLUniformHandler::UniformHandle GrGLUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkASSERT(valid_name(name));
+ SkASSERT(0 != visibility);
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ uni.fVariable.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0] || !strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ uni.fVisibility = visibility;
+ uni.fLocation = -1;
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrGLUniformHandler::addSampler(const GrTextureProxy* texture,
+ const GrSamplerState&,
+ const GrSwizzle& swizzle,
+ const char* name,
+ const GrShaderCaps* shaderCaps) {
+ SkASSERT(name && strlen(name));
+
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+
+ GrTextureType type = texture->textureType();
+
+ UniformInfo& sampler = fSamplers.push_back();
+ sampler.fVariable.setType(GrSLCombinedSamplerTypeForTextureType(type));
+ sampler.fVariable.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ sampler.fVariable.setName(mangleName);
+ sampler.fLocation = -1;
+ sampler.fVisibility = kFragment_GrShaderFlag;
+ if (shaderCaps->textureSwizzleAppliedInShader()) {
+ fSamplerSwizzles.push_back(swizzle);
+ SkASSERT(fSamplers.count() == fSamplerSwizzles.count());
+ }
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrGLUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ if (fUniforms[i].fVisibility & visibility) {
+ fUniforms[i].fVariable.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";");
+ }
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ if (fSamplers[i].fVisibility & visibility) {
+ fSamplers[i].fVariable.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+ }
+}
+
+void GrGLUniformHandler::bindUniformLocations(GrGLuint programID, const GrGLCaps& caps) {
+ if (caps.bindUniformLocationSupport()) {
+ int currUniform = 0;
+ for (int i = 0; i < fUniforms.count(); ++i, ++currUniform) {
+ GL_CALL(BindUniformLocation(programID, currUniform, fUniforms[i].fVariable.c_str()));
+ fUniforms[i].fLocation = currUniform;
+ }
+ for (int i = 0; i < fSamplers.count(); ++i, ++currUniform) {
+ GL_CALL(BindUniformLocation(programID, currUniform, fSamplers[i].fVariable.c_str()));
+ fSamplers[i].fLocation = currUniform;
+ }
+ }
+}
+
+void GrGLUniformHandler::getUniformLocations(GrGLuint programID, const GrGLCaps& caps, bool force) {
+ if (!caps.bindUniformLocationSupport() || force) {
+ int count = fUniforms.count();
+ for (int i = 0; i < count; ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetUniformLocation(programID, fUniforms[i].fVariable.c_str()));
+ fUniforms[i].fLocation = location;
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetUniformLocation(programID, fSamplers[i].fVariable.c_str()));
+ fSamplers[i].fLocation = location;
+ }
+ }
+}
+
+const GrGLGpu* GrGLUniformHandler::glGpu() const {
+ GrGLProgramBuilder* glPB = (GrGLProgramBuilder*) fProgramBuilder;
+ return glPB->gpu();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h
new file mode 100644
index 0000000000..f577755e3a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLUniformHandler_DEFINED
+#define GrGLUniformHandler_DEFINED
+
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+#include "src/gpu/gl/GrGLProgramDataManager.h"
+
+class GrGLCaps;
+
+class GrGLUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ const GrShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+private:
+ explicit GrGLUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fSamplers(kUniformsPerBlock) {}
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void updateUniformVisibility(UniformHandle u, uint32_t visibility) override {
+ fUniforms[u.toIndex()].fVisibility |= visibility;
+ }
+
+ SamplerHandle addSampler(const GrTextureProxy*, const GrSamplerState&, const GrSwizzle&,
+ const char* name, const GrShaderCaps*) override;
+
+ const char* samplerVariable(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()].fVariable.c_str();
+ }
+
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const override {
+ return fSamplerSwizzles[handle.toIndex()];
+ }
+
+ void appendUniformDecls(GrShaderFlags visibility, SkString*) const override;
+
+ // Manually set uniform locations for all our uniforms.
+ void bindUniformLocations(GrGLuint programID, const GrGLCaps& caps);
+
+ // Updates the loction of the Uniforms if we cannot bind uniform locations manually
+ void getUniformLocations(GrGLuint programID, const GrGLCaps& caps, bool force);
+
+ const GrGLGpu* glGpu() const;
+
+ typedef GrGLProgramDataManager::UniformInfo UniformInfo;
+ typedef GrGLProgramDataManager::UniformInfoArray UniformInfoArray;
+
+ UniformInfoArray fUniforms;
+ UniformInfoArray fSamplers;
+ SkTArray<GrSwizzle> fSamplerSwizzles;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp b/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp
new file mode 100644
index 0000000000..9a79f6da38
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkMatrix.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/gl/GrGLUtil.h"
+#include <stdio.h>
+
+void GrGLClearErr(const GrGLInterface* gl) {
+ while (GR_GL_NO_ERROR != gl->fFunctions.fGetError()) {}
+}
+
+namespace {
+const char *get_error_string(uint32_t err) {
+ switch (err) {
+ case GR_GL_NO_ERROR:
+ return "";
+ case GR_GL_INVALID_ENUM:
+ return "Invalid Enum";
+ case GR_GL_INVALID_VALUE:
+ return "Invalid Value";
+ case GR_GL_INVALID_OPERATION:
+ return "Invalid Operation";
+ case GR_GL_OUT_OF_MEMORY:
+ return "Out of Memory";
+ case GR_GL_CONTEXT_LOST:
+ return "Context Lost";
+ }
+ return "Unknown";
+}
+}
+
+void GrGLCheckErr(const GrGLInterface* gl,
+ const char* location,
+ const char* call) {
+ uint32_t err = GR_GL_GET_ERROR(gl);
+ if (GR_GL_NO_ERROR != err) {
+ SkDebugf("---- glGetError 0x%x(%s)", err, get_error_string(err));
+ if (location) {
+ SkDebugf(" at\n\t%s", location);
+ }
+ if (call) {
+ SkDebugf("\n\t\t%s", call);
+ }
+ SkDebugf("\n");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_GL_LOG_CALLS
+ bool gLogCallsGL = !!(GR_GL_LOG_CALLS_START);
+#endif
+
+#if GR_GL_CHECK_ERROR
+ bool gCheckErrorGL = !!(GR_GL_CHECK_ERROR_START);
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLStandard GrGLGetStandardInUseFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GL version string.");
+ return kNone_GrGLStandard;
+ }
+
+ int major, minor;
+
+ // check for desktop
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return kGL_GrGLStandard;
+ }
+
+ // WebGL might look like "OpenGL ES 2.0 (WebGL 1.0 (OpenGL ES 2.0 Chromium))"
+ int esMajor, esMinor;
+ n = sscanf(versionString, "OpenGL ES %d.%d (WebGL %d.%d", &esMajor, &esMinor, &major, &minor);
+ if (4 == n) {
+ return kWebGL_GrGLStandard;
+ }
+
+ // check for ES 1
+ char profile[2];
+ n = sscanf(versionString, "OpenGL ES-%c%c %d.%d", profile, profile+1, &major, &minor);
+ if (4 == n) {
+ // we no longer support ES1.
+ return kNone_GrGLStandard;
+ }
+
+ // check for ES2
+ n = sscanf(versionString, "OpenGL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return kGLES_GrGLStandard;
+ }
+ return kNone_GrGLStandard;
+}
+
+void GrGLGetDriverInfo(GrGLStandard standard,
+ GrGLVendor vendor,
+ const char* rendererString,
+ const char* versionString,
+ GrGLDriver* outDriver,
+ GrGLDriverVersion* outVersion) {
+ int major, minor, rev, driverMajor, driverMinor, driverPoint;
+
+ *outDriver = kUnknown_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_UNKNOWN_VER;
+ // These null checks are for test GL contexts that return nullptr in their
+ // glGetString implementation.
+ if (!rendererString) {
+ rendererString = "";
+ }
+ if (!versionString) {
+ versionString = "";
+ }
+
+ static const char kChromium[] = "Chromium";
+ char suffix[SK_ARRAY_COUNT(kChromium)];
+ if (0 == strcmp(rendererString, kChromium) ||
+ (3 == sscanf(versionString, "OpenGL ES %d.%d %8s", &major, &minor, suffix) &&
+ 0 == strcmp(kChromium, suffix))) {
+ *outDriver = kChromium_GrGLDriver;
+ return;
+ }
+
+ if (GR_IS_GR_GL(standard)) {
+ if (kNVIDIA_GrGLVendor == vendor) {
+ *outDriver = kNVIDIA_GrGLDriver;
+ int n = sscanf(versionString, "%d.%d.%d NVIDIA %d.%d",
+ &major, &minor, &rev, &driverMajor, &driverMinor);
+ // Some older NVIDIA drivers don't report the driver version.
+ if (5 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ }
+ return;
+ }
+ int n = sscanf(versionString, "%d.%d Mesa %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ if (4 != n) {
+ n = sscanf(versionString, "%d.%d (Core Profile) Mesa %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ }
+ if (4 == n) {
+ *outDriver = kMesa_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ return;
+ }
+ } else if (GR_IS_GR_GL_ES(standard)) {
+ if (kNVIDIA_GrGLVendor == vendor) {
+ *outDriver = kNVIDIA_GrGLDriver;
+ int n = sscanf(versionString, "OpenGL ES %d.%d NVIDIA %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ // Some older NVIDIA drivers don't report the driver version.
+ if (4 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ }
+ return;
+ }
+
+ int n = sscanf(versionString, "OpenGL ES %d.%d Mesa %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ if (4 == n) {
+ *outDriver = kMesa_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ return;
+ }
+ if (0 == strncmp("ANGLE", rendererString, 5)) {
+ *outDriver = kANGLE_GrGLDriver;
+ n = sscanf(versionString, "OpenGL ES %d.%d (ANGLE %d.%d", &major, &minor, &driverMajor,
+ &driverMinor);
+ if (4 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ }
+ return;
+ }
+ }
+
+ if (kGoogle_GrGLVendor == vendor) {
+ // Swiftshader is the only Google vendor at the moment
+ *outDriver = kSwiftShader_GrGLDriver;
+
+ // Swiftshader has a strange version string: w.x.y.z Going to arbitrarily ignore
+ // y and assume w,x and z are major, minor, point.
+ // As of writing, version is 4.0.0.6
+ int n = sscanf(versionString, "OpenGL ES %d.%d SwiftShader %d.%d.0.%d", &major, &minor,
+ &driverMajor, &driverMinor, &driverPoint);
+ if (5 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, driverPoint);
+ }
+ return;
+ }
+
+ if (kIntel_GrGLVendor == vendor) {
+ // We presume we're on the Intel driver since it hasn't identified itself as Mesa.
+ *outDriver = kIntel_GrGLDriver;
+
+ //This is how the macOS version strings are structured. This might be different on different
+ // OSes.
+ int n = sscanf(versionString, "%d.%d INTEL-%d.%d.%d", &major, &minor, &driverMajor,
+ &driverMinor, &driverPoint);
+ if (5 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, driverPoint);
+ }
+ }
+
+ if (kQualcomm_GrGLVendor == vendor) {
+ *outDriver = kQualcomm_GrGLDriver;
+ int n = sscanf(versionString, "OpenGL ES %d.%d V@%d.%d", &major, &minor, &driverMajor,
+ &driverMinor);
+ if (4 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor, 0);
+ }
+ return;
+ }
+ static constexpr char kEmulatorPrefix[] = "Android Emulator OpenGL ES Translator";
+ if (0 == strncmp(kEmulatorPrefix, rendererString, strlen(kEmulatorPrefix))) {
+ *outDriver = kAndroidEmulator_GrGLDriver;
+ }
+}
+
+GrGLVersion GrGLGetVersionFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GL version string.");
+ return GR_GL_INVALID_VER;
+ }
+
+ int major, minor;
+
+ // check for mesa
+ int mesaMajor, mesaMinor;
+ int n = sscanf(versionString, "%d.%d Mesa %d.%d", &major, &minor, &mesaMajor, &mesaMinor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ // WebGL might look like "OpenGL ES 2.0 (WebGL 1.0 (OpenGL ES 2.0 Chromium))"
+ int esMajor, esMinor;
+ n = sscanf(versionString, "OpenGL ES %d.%d (WebGL %d.%d", &esMajor, &esMinor, &major, &minor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ char profile[2];
+ n = sscanf(versionString, "OpenGL ES-%c%c %d.%d", profile, profile+1,
+ &major, &minor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ return GR_GL_INVALID_VER;
+}
+
+GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GLSL version string.");
+ return GR_GLSL_INVALID_VER;
+ }
+
+ int major, minor;
+
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES GLSL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // android hack until the gpu vender updates their drivers
+ n = sscanf(versionString, "OpenGL ES GLSL %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+#endif
+
+ return GR_GLSL_INVALID_VER;
+}
+
+GrGLVendor GrGLGetVendorFromString(const char* vendorString) {
+ if (vendorString) {
+ if (0 == strcmp(vendorString, "ARM")) {
+ return kARM_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "Google Inc.")) {
+ return kGoogle_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "Imagination Technologies")) {
+ return kImagination_GrGLVendor;
+ }
+ if (0 == strncmp(vendorString, "Intel ", 6) || 0 == strcmp(vendorString, "Intel")) {
+ return kIntel_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "Qualcomm")) {
+ return kQualcomm_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "NVIDIA Corporation")) {
+ return kNVIDIA_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "ATI Technologies Inc.")) {
+ return kATI_GrGLVendor;
+ }
+ }
+ return kOther_GrGLVendor;
+}
+
+static bool is_renderer_angle(const char* rendererString) {
+ static constexpr char kHeader[] = "ANGLE ";
+ static constexpr size_t kHeaderLength = SK_ARRAY_COUNT(kHeader) - 1;
+ return rendererString && 0 == strncmp(rendererString, kHeader, kHeaderLength);
+}
+
+GrGLRenderer GrGLGetRendererFromStrings(const char* rendererString,
+ const GrGLExtensions& extensions) {
+ if (rendererString) {
+ static const char kTegraStr[] = "NVIDIA Tegra";
+ if (0 == strncmp(rendererString, kTegraStr, SK_ARRAY_COUNT(kTegraStr) - 1)) {
+ // Tegra strings are not very descriptive. We distinguish between the modern and legacy
+ // architectures by the presence of NV_path_rendering.
+ return extensions.has("GL_NV_path_rendering") ? kTegra_GrGLRenderer
+ : kTegra_PreK1_GrGLRenderer;
+ }
+ int lastDigit;
+ int n = sscanf(rendererString, "PowerVR SGX 54%d", &lastDigit);
+ if (1 == n && lastDigit >= 0 && lastDigit <= 9) {
+ return kPowerVR54x_GrGLRenderer;
+ }
+ // certain iOS devices also use PowerVR54x GPUs
+ static const char kAppleA4Str[] = "Apple A4";
+ static const char kAppleA5Str[] = "Apple A5";
+ static const char kAppleA6Str[] = "Apple A6";
+ if (0 == strncmp(rendererString, kAppleA4Str,
+ SK_ARRAY_COUNT(kAppleA4Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA5Str,
+ SK_ARRAY_COUNT(kAppleA5Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA6Str,
+ SK_ARRAY_COUNT(kAppleA6Str)-1)) {
+ return kPowerVR54x_GrGLRenderer;
+ }
+ static const char kPowerVRRogueStr[] = "PowerVR Rogue";
+ static const char kAppleA7Str[] = "Apple A7";
+ static const char kAppleA8Str[] = "Apple A8";
+ if (0 == strncmp(rendererString, kPowerVRRogueStr,
+ SK_ARRAY_COUNT(kPowerVRRogueStr)-1) ||
+ 0 == strncmp(rendererString, kAppleA7Str,
+ SK_ARRAY_COUNT(kAppleA7Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA8Str,
+ SK_ARRAY_COUNT(kAppleA8Str)-1)) {
+ return kPowerVRRogue_GrGLRenderer;
+ }
+ int adrenoNumber;
+ n = sscanf(rendererString, "Adreno (TM) %d", &adrenoNumber);
+ if (1 == n) {
+ if (adrenoNumber >= 300) {
+ if (adrenoNumber < 400) {
+ return kAdreno3xx_GrGLRenderer;
+ }
+ if (adrenoNumber < 500) {
+ return adrenoNumber >= 430
+ ? kAdreno430_GrGLRenderer : kAdreno4xx_other_GrGLRenderer;
+ }
+ if (adrenoNumber < 600) {
+ return kAdreno5xx_GrGLRenderer;
+ }
+ }
+ }
+ if (0 == strcmp("Google SwiftShader", rendererString)) {
+ return kGoogleSwiftShader_GrGLRenderer;
+ }
+
+ if (const char* intelString = strstr(rendererString, "Intel")) {
+ // These generic strings seem to always come from Haswell: Iris 5100 or Iris Pro 5200
+ if (0 == strcmp("Intel Iris OpenGL Engine", intelString) ||
+ 0 == strcmp("Intel Iris Pro OpenGL Engine", intelString)) {
+ return kIntelHaswell_GrGLRenderer;
+ }
+ if (strstr(intelString, "Sandybridge")) {
+ return kIntelSandyBridge_GrGLRenderer;
+ }
+ if (strstr(intelString, "Bay Trail")) {
+ return kIntelValleyView_GrGLRenderer;
+ }
+ // There are many possible intervening strings here:
+ // 'Intel(R)' is a common prefix
+ // 'Iris' may appear, followed by '(R)' or '(TM)'
+ // 'Iris' can then be followed by 'Graphics', 'Pro Graphics', or 'Plus Graphics'
+ // If 'Iris' isn't there, we might have 'HD Graphics' or 'UHD Graphics'
+ //
+ // In all cases, though, we end with 'Graphics ', an optional 'P', and a number,
+ // so just skip to that and handle two cases:
+ if (const char* intelGfxString = strstr(intelString, "Graphics")) {
+ int intelNumber;
+ if (sscanf(intelGfxString, "Graphics %d", &intelNumber) ||
+ sscanf(intelGfxString, "Graphics P%d", &intelNumber)) {
+
+ if (intelNumber == 2000 || intelNumber == 3000) {
+ return kIntelSandyBridge_GrGLRenderer;
+ }
+ if (intelNumber == 2500 || intelNumber == 4000) {
+ return kIntelIvyBridge_GrGLRenderer;
+ }
+ if (intelNumber >= 4200 && intelNumber <= 5200) {
+ return kIntelHaswell_GrGLRenderer;
+ }
+ if (intelNumber >= 400 && intelNumber <= 405) {
+ return kIntelCherryView_GrGLRenderer;
+ }
+ if (intelNumber >= 5300 && intelNumber <= 6300) {
+ return kIntelBroadwell_GrGLRenderer;
+ }
+ if (intelNumber >= 500 && intelNumber <= 505) {
+ return kIntelApolloLake_GrGLRenderer;
+ }
+ if (intelNumber >= 510 && intelNumber <= 580) {
+ return kIntelSkyLake_GrGLRenderer;
+ }
+ if (intelNumber >= 600 && intelNumber <= 605) {
+ return kIntelGeminiLake_GrGLRenderer;
+ }
+ // 610 and 630 are reused from KabyLake to CoffeeLake. The CoffeeLake variants
+ // are "UHD Graphics", while the KabyLake ones are "HD Graphics"
+ if (intelNumber == 610 || intelNumber == 630) {
+ return strstr(intelString, "UHD") ? kIntelCoffeeLake_GrGLRenderer
+ : kIntelKabyLake_GrGLRenderer;
+ }
+ if (intelNumber >= 610 && intelNumber <= 650) {
+ return kIntelKabyLake_GrGLRenderer;
+ }
+ if (intelNumber == 655) {
+ return kIntelCoffeeLake_GrGLRenderer;
+ }
+ if (intelNumber >= 910 && intelNumber <= 950) {
+ return kIntelIceLake_GrGLRenderer;
+ }
+ }
+ }
+ }
+
+ // The AMD string can have a somewhat arbitrary preamble (see skbug.com/7195)
+ if (const char* amdString = strstr(rendererString, "Radeon")) {
+ char amdGeneration, amdTier, amdRevision;
+ n = sscanf(amdString, "Radeon (TM) R9 M%c%c%c",
+ &amdGeneration, &amdTier, &amdRevision);
+ if (3 == n) {
+ if ('4' == amdGeneration) {
+ return kAMDRadeonR9M4xx_GrGLRenderer;
+ }
+ }
+
+ char amd0, amd1, amd2;
+ n = sscanf(amdString, "Radeon HD 7%c%c%c Series", &amd0, &amd1, &amd2);
+ if (3 == n) {
+ return kAMDRadeonHD7xxx_GrGLRenderer;
+ }
+ }
+
+ if (strstr(rendererString, "llvmpipe")) {
+ return kGalliumLLVM_GrGLRenderer;
+ }
+ static const char kMaliTStr[] = "Mali-T";
+ if (0 == strncmp(rendererString, kMaliTStr, SK_ARRAY_COUNT(kMaliTStr) - 1)) {
+ return kMaliT_GrGLRenderer;
+ }
+ int mali400Num;
+ if (1 == sscanf(rendererString, "Mali-%d", &mali400Num) && mali400Num >= 400 &&
+ mali400Num < 500) {
+ return kMali4xx_GrGLRenderer;
+ }
+ if (is_renderer_angle(rendererString)) {
+ return kANGLE_GrGLRenderer;
+ }
+ }
+ return kOther_GrGLRenderer;
+}
+
+void GrGLGetANGLEInfoFromString(const char* rendererString, GrGLANGLEBackend* backend,
+ GrGLANGLEVendor* vendor, GrGLANGLERenderer* renderer) {
+ *backend = GrGLANGLEBackend::kUnknown;
+ *vendor = GrGLANGLEVendor::kUnknown;
+ *renderer = GrGLANGLERenderer::kUnknown;
+ if (!is_renderer_angle(rendererString)) {
+ return;
+ }
+ if (strstr(rendererString, "Intel")) {
+ *vendor = GrGLANGLEVendor::kIntel;
+
+ const char* modelStr;
+ int modelNumber;
+ if ((modelStr = strstr(rendererString, "HD Graphics")) &&
+ (1 == sscanf(modelStr, "HD Graphics %i", &modelNumber) ||
+ 1 == sscanf(modelStr, "HD Graphics P%i", &modelNumber))) {
+ switch (modelNumber) {
+ case 2000:
+ case 3000:
+ *renderer = GrGLANGLERenderer::kSandyBridge;
+ break;
+ case 4000:
+ case 2500:
+ *renderer = GrGLANGLERenderer::kIvyBridge;
+ break;
+ case 510:
+ case 515:
+ case 520:
+ case 530:
+ *renderer = GrGLANGLERenderer::kSkylake;
+ break;
+ }
+ } else if ((modelStr = strstr(rendererString, "Iris")) &&
+ (1 == sscanf(modelStr, "Iris(TM) Graphics %i", &modelNumber) ||
+ 1 == sscanf(modelStr, "Iris(TM) Pro Graphics %i", &modelNumber) ||
+ 1 == sscanf(modelStr, "Iris(TM) Pro Graphics P%i", &modelNumber))) {
+ switch (modelNumber) {
+ case 540:
+ case 550:
+ case 555:
+ case 580:
+ *renderer = GrGLANGLERenderer::kSkylake;
+ break;
+ }
+ }
+ }
+ if (strstr(rendererString, "Direct3D11")) {
+ *backend = GrGLANGLEBackend::kD3D11;
+ } else if (strstr(rendererString, "Direct3D9")) {
+ *backend = GrGLANGLEBackend::kD3D9;
+ } else if (strstr(rendererString, "OpenGL")) {
+ *backend = GrGLANGLEBackend::kOpenGL;
+ }
+}
+
+GrGLVersion GrGLGetVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_VERSION));
+ return GrGLGetVersionFromString((const char*) v);
+}
+
+GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
+ return GrGLGetGLSLVersionFromString((const char*) v);
+}
+
+GrGLVendor GrGLGetVendor(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_VENDOR));
+ return GrGLGetVendorFromString((const char*) v);
+}
+
+GrGLRenderer GrGLGetRenderer(const GrGLInterface* gl) {
+ const GrGLubyte* rendererString;
+ GR_GL_CALL_RET(gl, rendererString, GetString(GR_GL_RENDERER));
+
+ return GrGLGetRendererFromStrings((const char*)rendererString, gl->fExtensions);
+}
+
+GrGLenum GrToGLStencilFunc(GrStencilTest test) {
+ static const GrGLenum gTable[kGrStencilTestCount] = {
+ GR_GL_ALWAYS, // kAlways
+ GR_GL_NEVER, // kNever
+ GR_GL_GREATER, // kGreater
+ GR_GL_GEQUAL, // kGEqual
+ GR_GL_LESS, // kLess
+ GR_GL_LEQUAL, // kLEqual
+ GR_GL_EQUAL, // kEqual
+ GR_GL_NOTEQUAL, // kNotEqual
+ };
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(1 == (int)GrStencilTest::kNever);
+ GR_STATIC_ASSERT(2 == (int)GrStencilTest::kGreater);
+ GR_STATIC_ASSERT(3 == (int)GrStencilTest::kGEqual);
+ GR_STATIC_ASSERT(4 == (int)GrStencilTest::kLess);
+ GR_STATIC_ASSERT(5 == (int)GrStencilTest::kLEqual);
+ GR_STATIC_ASSERT(6 == (int)GrStencilTest::kEqual);
+ GR_STATIC_ASSERT(7 == (int)GrStencilTest::kNotEqual);
+ SkASSERT(test < (GrStencilTest)kGrStencilTestCount);
+
+ return gTable[(int)test];
+}
+
+bool GrGLFormatIsCompressed(GrGLFormat format) {
+ switch (format) {
+ case GrGLFormat::kCOMPRESSED_RGB8_ETC2:
+ case GrGLFormat::kCOMPRESSED_ETC1_RGB8:
+ return true;
+
+ case GrGLFormat::kRGBA8:
+ case GrGLFormat::kR8:
+ case GrGLFormat::kALPHA8:
+ case GrGLFormat::kLUMINANCE8:
+ case GrGLFormat::kBGRA8:
+ case GrGLFormat::kRGB565:
+ case GrGLFormat::kRGBA16F:
+ case GrGLFormat::kR16F:
+ case GrGLFormat::kLUMINANCE16F:
+ case GrGLFormat::kRGB8:
+ case GrGLFormat::kRG8:
+ case GrGLFormat::kRGB10_A2:
+ case GrGLFormat::kRGBA4:
+ case GrGLFormat::kSRGB8_ALPHA8:
+ case GrGLFormat::kR16:
+ case GrGLFormat::kRG16:
+ case GrGLFormat::kRGBA16:
+ case GrGLFormat::kRG16F:
+ case GrGLFormat::kUnknown:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+bool GrGLFormatToCompressionType(GrGLFormat format, SkImage::CompressionType* compressionType) {
+ switch (format) {
+ case GrGLFormat::kCOMPRESSED_RGB8_ETC2:
+ case GrGLFormat::kCOMPRESSED_ETC1_RGB8:
+ *compressionType = SkImage::kETC1_CompressionType;
+ return true;
+
+ case GrGLFormat::kRGBA8:
+ case GrGLFormat::kR8:
+ case GrGLFormat::kALPHA8:
+ case GrGLFormat::kLUMINANCE8:
+ case GrGLFormat::kBGRA8:
+ case GrGLFormat::kRGB565:
+ case GrGLFormat::kRGBA16F:
+ case GrGLFormat::kR16F:
+ case GrGLFormat::kLUMINANCE16F:
+ case GrGLFormat::kRGB8:
+ case GrGLFormat::kRG8:
+ case GrGLFormat::kRGB10_A2:
+ case GrGLFormat::kRGBA4:
+ case GrGLFormat::kSRGB8_ALPHA8:
+ case GrGLFormat::kR16:
+ case GrGLFormat::kRG16:
+ case GrGLFormat::kRGBA16:
+ case GrGLFormat::kRG16F:
+ case GrGLFormat::kUnknown:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUtil.h b/gfx/skia/skia/src/gpu/gl/GrGLUtil.h
new file mode 100644
index 0000000000..35d9ad83bd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUtil.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLUtil_DEFINED
+#define GrGLUtil_DEFINED
+
+#include "include/gpu/gl/GrGLInterface.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/gl/GrGLDefines.h"
+
+class SkMatrix;
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef uint32_t GrGLVersion;
+typedef uint32_t GrGLSLVersion;
+typedef uint64_t GrGLDriverVersion;
+
+#define GR_GL_VER(major, minor) ((static_cast<uint32_t>(major) << 16) | \
+ static_cast<uint32_t>(minor))
+#define GR_GLSL_VER(major, minor) ((static_cast<uint32_t>(major) << 16) | \
+ static_cast<uint32_t>(minor))
+#define GR_GL_DRIVER_VER(major, minor, point) ((static_cast<uint64_t>(major) << 32) | \
+ (static_cast<uint64_t>(minor) << 16) | \
+ static_cast<uint64_t>(point))
+
+#define GR_GL_MAJOR_VER(version) (static_cast<uint32_t>(version) >> 16)
+#define GR_GL_MINOR_VER(version) (static_cast<uint32_t>(version) & 0xFFFF)
+
+#define GR_GL_INVALID_VER GR_GL_VER(0, 0)
+#define GR_GLSL_INVALID_VER GR_GLSL_VER(0, 0)
+#define GR_GL_DRIVER_UNKNOWN_VER GR_GL_DRIVER_VER(0, 0, 0)
+
+/**
+ * The Vendor and Renderer enum values are lazily updated as required.
+ */
+enum GrGLVendor {
+ kARM_GrGLVendor,
+ kGoogle_GrGLVendor,
+ kImagination_GrGLVendor,
+ kIntel_GrGLVendor,
+ kQualcomm_GrGLVendor,
+ kNVIDIA_GrGLVendor,
+ kATI_GrGLVendor,
+
+ kOther_GrGLVendor
+};
+
+enum GrGLRenderer {
+ kTegra_PreK1_GrGLRenderer, // Legacy Tegra architecture (pre-K1).
+ kTegra_GrGLRenderer, // Tegra with the same architecture as NVIDIA desktop GPUs (K1+).
+ kPowerVR54x_GrGLRenderer,
+ kPowerVRRogue_GrGLRenderer,
+ kAdreno3xx_GrGLRenderer,
+ kAdreno430_GrGLRenderer,
+ kAdreno4xx_other_GrGLRenderer,
+ kAdreno5xx_GrGLRenderer,
+ kGoogleSwiftShader_GrGLRenderer,
+
+ /** Intel GPU families, ordered by generation **/
+ // 6th gen
+ kIntelSandyBridge_GrGLRenderer,
+
+ // 7th gen
+ kIntelIvyBridge_GrGLRenderer,
+ kIntelValleyView_GrGLRenderer, // aka BayTrail
+ kIntelHaswell_GrGLRenderer,
+
+ // 8th gen
+ kIntelCherryView_GrGLRenderer, // aka Braswell
+ kIntelBroadwell_GrGLRenderer,
+
+ // 9th gen
+ kIntelApolloLake_GrGLRenderer,
+ kIntelSkyLake_GrGLRenderer,
+ kIntelGeminiLake_GrGLRenderer,
+ kIntelKabyLake_GrGLRenderer,
+ kIntelCoffeeLake_GrGLRenderer,
+
+ // 11th gen
+ kIntelIceLake_GrGLRenderer,
+
+ kGalliumLLVM_GrGLRenderer,
+ kMali4xx_GrGLRenderer,
+ /** T-6xx, T-7xx, or T-8xx */
+ kMaliT_GrGLRenderer,
+ kANGLE_GrGLRenderer,
+
+ kAMDRadeonHD7xxx_GrGLRenderer, // AMD Radeon HD 7000 Series
+ kAMDRadeonR9M4xx_GrGLRenderer, // AMD Radeon R9 M400 Series
+
+ kOther_GrGLRenderer
+};
+
+enum GrGLDriver {
+ kMesa_GrGLDriver,
+ kChromium_GrGLDriver,
+ kNVIDIA_GrGLDriver,
+ kIntel_GrGLDriver,
+ kANGLE_GrGLDriver,
+ kSwiftShader_GrGLDriver,
+ kQualcomm_GrGLDriver,
+ kAndroidEmulator_GrGLDriver,
+ kUnknown_GrGLDriver
+};
+
+enum class GrGLANGLEBackend {
+ kUnknown,
+ kD3D9,
+ kD3D11,
+ kOpenGL
+};
+
+enum class GrGLANGLEVendor {
+ kUnknown,
+ kIntel
+};
+
+enum class GrGLANGLERenderer {
+ kUnknown,
+ kSandyBridge,
+ kIvyBridge,
+ kSkylake
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Some drivers want the var-int arg to be zero-initialized on input.
+ */
+#define GR_GL_INIT_ZERO 0
+#define GR_GL_GetIntegerv(gl, e, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetIntegerv(e, p)); \
+ } while (0)
+
+#define GR_GL_GetFramebufferAttachmentParameteriv(gl, t, a, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetFramebufferAttachmentParameteriv(t, a, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetInternalformativ(gl, t, f, n, s, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetInternalformativ(t, f, n, s, p)); \
+ } while (0)
+
+#define GR_GL_GetNamedFramebufferAttachmentParameteriv(gl, fb, a, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetNamedFramebufferAttachmentParameteriv(fb, a, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetRenderbufferParameteriv(gl, t, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetRenderbufferParameteriv(t, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetTexLevelParameteriv(gl, t, l, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetTexLevelParameteriv(t, l, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetShaderPrecisionFormat(gl, st, pt, range, precision) \
+ do { \
+ (range)[0] = GR_GL_INIT_ZERO; \
+ (range)[1] = GR_GL_INIT_ZERO; \
+ (*precision) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetShaderPrecisionFormat(st, pt, range, precision)); \
+ } while (0)
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Helpers for glGetString()
+ */
+
+// these variants assume caller already has a string from glGetString()
+GrGLVersion GrGLGetVersionFromString(const char* versionString);
+GrGLStandard GrGLGetStandardInUseFromString(const char* versionString);
+GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString);
+GrGLVendor GrGLGetVendorFromString(const char* vendorString);
+GrGLRenderer GrGLGetRendererFromStrings(const char* rendererString, const GrGLExtensions&);
+void GrGLGetANGLEInfoFromString(const char* rendererString, GrGLANGLEBackend*,
+ GrGLANGLEVendor*, GrGLANGLERenderer*);
+
+void GrGLGetDriverInfo(GrGLStandard standard,
+ GrGLVendor vendor,
+ const char* rendererString,
+ const char* versionString,
+ GrGLDriver* outDriver,
+ GrGLDriverVersion* outVersion);
+
+// these variants call glGetString()
+GrGLVersion GrGLGetVersion(const GrGLInterface*);
+GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface*);
+GrGLVendor GrGLGetVendor(const GrGLInterface*);
+GrGLRenderer GrGLGetRenderer(const GrGLInterface*);
+
+/**
+ * Helpers for glGetError()
+ */
+
+void GrGLCheckErr(const GrGLInterface* gl,
+ const char* location,
+ const char* call);
+
+void GrGLClearErr(const GrGLInterface* gl);
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Macros for using GrGLInterface to make GL calls
+ */
+
+// internal macro to conditionally call glGetError based on compile-time and
+// run-time flags.
+#if GR_GL_CHECK_ERROR
+ extern bool gCheckErrorGL;
+ #define GR_GL_CHECK_ERROR_IMPL(IFACE, X) \
+ if (gCheckErrorGL) \
+ GrGLCheckErr(IFACE, GR_FILE_AND_LINE_STR, #X)
+#else
+ #define GR_GL_CHECK_ERROR_IMPL(IFACE, X)
+#endif
+
+// internal macro to conditionally log the gl call using SkDebugf based on
+// compile-time and run-time flags.
+#if GR_GL_LOG_CALLS
+ extern bool gLogCallsGL;
+ #define GR_GL_LOG_CALLS_IMPL(X) \
+ if (gLogCallsGL) \
+ SkDebugf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+#else
+ #define GR_GL_LOG_CALLS_IMPL(X)
+#endif
+
+// makes a GL call on the interface and does any error checking and logging
+#define GR_GL_CALL(IFACE, X) \
+ do { \
+ GR_GL_CALL_NOERRCHECK(IFACE, X); \
+ GR_GL_CHECK_ERROR_IMPL(IFACE, X); \
+ } while (false)
+
+// Variant of above that always skips the error check. This is useful when
+// the caller wants to do its own glGetError() call and examine the error value.
+#define GR_GL_CALL_NOERRCHECK(IFACE, X) \
+ do { \
+ (IFACE)->fFunctions.f##X; \
+ GR_GL_LOG_CALLS_IMPL(X); \
+ } while (false)
+
+// same as GR_GL_CALL but stores the return value of the gl call in RET
+#define GR_GL_CALL_RET(IFACE, RET, X) \
+ do { \
+ GR_GL_CALL_RET_NOERRCHECK(IFACE, RET, X); \
+ GR_GL_CHECK_ERROR_IMPL(IFACE, X); \
+ } while (false)
+
+// same as GR_GL_CALL_RET but always skips the error check.
+#define GR_GL_CALL_RET_NOERRCHECK(IFACE, RET, X) \
+ do { \
+ (RET) = (IFACE)->fFunctions.f##X; \
+ GR_GL_LOG_CALLS_IMPL(X); \
+ } while (false)
+
+// call glGetError without doing a redundant error check or logging.
+#define GR_GL_GET_ERROR(IFACE) (IFACE)->fFunctions.fGetError()
+
+static constexpr GrGLFormat GrGLFormatFromGLEnum(GrGLenum glFormat) {
+ switch (glFormat) {
+ case GR_GL_RGBA8: return GrGLFormat::kRGBA8;
+ case GR_GL_R8: return GrGLFormat::kR8;
+ case GR_GL_ALPHA8: return GrGLFormat::kALPHA8;
+ case GR_GL_LUMINANCE8: return GrGLFormat::kLUMINANCE8;
+ case GR_GL_BGRA8: return GrGLFormat::kBGRA8;
+ case GR_GL_RGB565: return GrGLFormat::kRGB565;
+ case GR_GL_RGBA16F: return GrGLFormat::kRGBA16F;
+ case GR_GL_LUMINANCE16F: return GrGLFormat::kLUMINANCE16F;
+ case GR_GL_R16F: return GrGLFormat::kR16F;
+ case GR_GL_RGB8: return GrGLFormat::kRGB8;
+ case GR_GL_RG8: return GrGLFormat::kRG8;
+ case GR_GL_RGB10_A2: return GrGLFormat::kRGB10_A2;
+ case GR_GL_RGBA4: return GrGLFormat::kRGBA4;
+ case GR_GL_SRGB8_ALPHA8: return GrGLFormat::kSRGB8_ALPHA8;
+ case GR_GL_COMPRESSED_RGB8_ETC2: return GrGLFormat::kCOMPRESSED_RGB8_ETC2;
+ case GR_GL_COMPRESSED_ETC1_RGB8: return GrGLFormat::kCOMPRESSED_ETC1_RGB8;
+ case GR_GL_R16: return GrGLFormat::kR16;
+ case GR_GL_RG16: return GrGLFormat::kRG16;
+ case GR_GL_RGBA16: return GrGLFormat::kRGBA16;
+ case GR_GL_RG16F: return GrGLFormat::kRG16F;
+
+ default: return GrGLFormat::kUnknown;
+ }
+}
+
+/** Returns either the sized internal format or compressed internal format of the GrGLFormat. */
+static constexpr GrGLenum GrGLFormatToEnum(GrGLFormat format) {
+ switch (format) {
+ case GrGLFormat::kRGBA8: return GR_GL_RGBA8;
+ case GrGLFormat::kR8: return GR_GL_R8;
+ case GrGLFormat::kALPHA8: return GR_GL_ALPHA8;
+ case GrGLFormat::kLUMINANCE8: return GR_GL_LUMINANCE8;
+ case GrGLFormat::kBGRA8: return GR_GL_BGRA8;
+ case GrGLFormat::kRGB565: return GR_GL_RGB565;
+ case GrGLFormat::kRGBA16F: return GR_GL_RGBA16F;
+ case GrGLFormat::kLUMINANCE16F: return GR_GL_LUMINANCE16F;
+ case GrGLFormat::kR16F: return GR_GL_R16F;
+ case GrGLFormat::kRGB8: return GR_GL_RGB8;
+ case GrGLFormat::kRG8: return GR_GL_RG8;
+ case GrGLFormat::kRGB10_A2: return GR_GL_RGB10_A2;
+ case GrGLFormat::kRGBA4: return GR_GL_RGBA4;
+ case GrGLFormat::kSRGB8_ALPHA8: return GR_GL_SRGB8_ALPHA8;
+ case GrGLFormat::kCOMPRESSED_RGB8_ETC2: return GR_GL_COMPRESSED_RGB8_ETC2;
+ case GrGLFormat::kCOMPRESSED_ETC1_RGB8: return GR_GL_COMPRESSED_ETC1_RGB8;
+ case GrGLFormat::kR16: return GR_GL_R16;
+ case GrGLFormat::kRG16: return GR_GL_RG16;
+ case GrGLFormat::kRGBA16: return GR_GL_RGBA16;
+ case GrGLFormat::kRG16F: return GR_GL_RG16F;
+ case GrGLFormat::kUnknown: return 0;
+ }
+ SkUNREACHABLE;
+}
+
+#if GR_TEST_UTILS
+static constexpr const char* GrGLFormatToStr(GrGLenum glFormat) {
+ switch (glFormat) {
+ case GR_GL_RGBA8: return "RGBA8";
+ case GR_GL_R8: return "R8";
+ case GR_GL_ALPHA8: return "ALPHA8";
+ case GR_GL_LUMINANCE8: return "LUMINANCE8";
+ case GR_GL_BGRA8: return "BGRA8";
+ case GR_GL_RGB565: return "RGB565";
+ case GR_GL_RGBA16F: return "RGBA16F";
+ case GR_GL_LUMINANCE16F: return "LUMINANCE16F";
+ case GR_GL_R16F: return "R16F";
+ case GR_GL_RGB8: return "RGB8";
+ case GR_GL_RG8: return "RG8";
+ case GR_GL_RGB10_A2: return "RGB10_A2";
+ case GR_GL_RGBA4: return "RGBA4";
+ case GR_GL_RGBA32F: return "RGBA32F";
+ case GR_GL_SRGB8_ALPHA8: return "SRGB8_ALPHA8";
+ case GR_GL_COMPRESSED_RGB8_ETC2: return "ETC2";
+ case GR_GL_COMPRESSED_ETC1_RGB8: return "ETC1";
+ case GR_GL_R16: return "R16";
+ case GR_GL_RG16: return "RG16";
+ case GR_GL_RGBA16: return "RGBA16";
+ case GR_GL_RG16F: return "RG16F";
+
+ default: return "Unknown";
+ }
+}
+#endif
+
+GrGLenum GrToGLStencilFunc(GrStencilTest test);
+
+/**
+ * Returns true if the format is compressed.
+ */
+bool GrGLFormatIsCompressed(GrGLFormat);
+
+/**
+ * Maps a GrGLFormat into the CompressionType enum if appropriate.
+ */
+bool GrGLFormatToCompressionType(GrGLFormat, SkImage::CompressionType*);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp
new file mode 100644
index 0000000000..76ab1285d1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/GrGLVaryingHandler.h"
+
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/builders/GrGLProgramBuilder.h"
+
+
+GrGLSLVaryingHandler::VaryingHandle GrGLVaryingHandler::addPathProcessingVarying(
+ const char* name,
+ GrGLSLVarying* v) {
+#ifdef SK_DEBUG
+ GrGLProgramBuilder* glPB = (GrGLProgramBuilder*) fProgramBuilder;
+ // This call is not used for non-NVPR backends.
+ SkASSERT(glPB->gpu()->glCaps().shaderCaps()->pathRenderingSupport() &&
+ fProgramBuilder->fProgramInfo.isNVPR());
+#endif
+ this->addVarying(name, v);
+ auto varyingInfo = fPathProcVaryingInfos.push_back();
+ varyingInfo.fLocation = fPathProcVaryingInfos.count() - 1;
+ return VaryingHandle(varyingInfo.fLocation);
+}
+
+void GrGLVaryingHandler::onFinalize() {
+ SkASSERT(fPathProcVaryingInfos.empty() || fPathProcVaryingInfos.count() == fFragInputs.count());
+ for (int i = 0; i < fPathProcVaryingInfos.count(); ++i) {
+ fPathProcVaryingInfos[i].fVariable = fFragInputs[i];
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h
new file mode 100644
index 0000000000..da35e66b04
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLVaryingHandler_DEFINED
+#define GrGLVaryingHandler_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/gl/GrGLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrGLVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrGLVaryingHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program),
+ fPathProcVaryingInfos(kVaryingsPerBlock) {}
+
+ // This function is used by the NVPR PathProcessor to add a varying directly into the fragment
+ // shader since there is no vertex shader.
+ VaryingHandle addPathProcessingVarying(const char* name, GrGLSLVarying*);
+
+private:
+ void onFinalize() override;
+
+ GrGLProgramDataManager::VaryingInfoArray fPathProcVaryingInfos;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp
new file mode 100644
index 0000000000..cfac573241
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCpuBuffer.h"
+#include "src/gpu/gl/GrGLBuffer.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLVertexArray.h"
+
+struct AttribLayout {
+ bool fNormalized; // Only used by floating point types.
+ uint8_t fCount;
+ uint16_t fType;
+};
+
+GR_STATIC_ASSERT(4 == sizeof(AttribLayout));
+
+static AttribLayout attrib_layout(GrVertexAttribType type) {
+ switch (type) {
+ case kFloat_GrVertexAttribType:
+ return {false, 1, GR_GL_FLOAT};
+ case kFloat2_GrVertexAttribType:
+ return {false, 2, GR_GL_FLOAT};
+ case kFloat3_GrVertexAttribType:
+ return {false, 3, GR_GL_FLOAT};
+ case kFloat4_GrVertexAttribType:
+ return {false, 4, GR_GL_FLOAT};
+ case kHalf_GrVertexAttribType:
+ return {false, 1, GR_GL_HALF_FLOAT};
+ case kHalf2_GrVertexAttribType:
+ return {false, 2, GR_GL_HALF_FLOAT};
+ case kHalf3_GrVertexAttribType:
+ return {false, 3, GR_GL_HALF_FLOAT};
+ case kHalf4_GrVertexAttribType:
+ return {false, 4, GR_GL_HALF_FLOAT};
+ case kInt2_GrVertexAttribType:
+ return {false, 2, GR_GL_INT};
+ case kInt3_GrVertexAttribType:
+ return {false, 3, GR_GL_INT};
+ case kInt4_GrVertexAttribType:
+ return {false, 4, GR_GL_INT};
+ case kByte_GrVertexAttribType:
+ return {false, 1, GR_GL_BYTE};
+ case kByte2_GrVertexAttribType:
+ return {false, 2, GR_GL_BYTE};
+ case kByte3_GrVertexAttribType:
+ return {false, 3, GR_GL_BYTE};
+ case kByte4_GrVertexAttribType:
+ return {false, 4, GR_GL_BYTE};
+ case kUByte_GrVertexAttribType:
+ return {false, 1, GR_GL_UNSIGNED_BYTE};
+ case kUByte2_GrVertexAttribType:
+ return {false, 2, GR_GL_UNSIGNED_BYTE};
+ case kUByte3_GrVertexAttribType:
+ return {false, 3, GR_GL_UNSIGNED_BYTE};
+ case kUByte4_GrVertexAttribType:
+ return {false, 4, GR_GL_UNSIGNED_BYTE};
+ case kUByte_norm_GrVertexAttribType:
+ return {true, 1, GR_GL_UNSIGNED_BYTE};
+ case kUByte4_norm_GrVertexAttribType:
+ return {true, 4, GR_GL_UNSIGNED_BYTE};
+ case kShort2_GrVertexAttribType:
+ return {false, 2, GR_GL_SHORT};
+ case kShort4_GrVertexAttribType:
+ return {false, 4, GR_GL_SHORT};
+ case kUShort2_GrVertexAttribType:
+ return {false, 2, GR_GL_UNSIGNED_SHORT};
+ case kUShort2_norm_GrVertexAttribType:
+ return {true, 2, GR_GL_UNSIGNED_SHORT};
+ case kInt_GrVertexAttribType:
+ return {false, 1, GR_GL_INT};
+ case kUint_GrVertexAttribType:
+ return {false, 1, GR_GL_UNSIGNED_INT};
+ case kUShort_norm_GrVertexAttribType:
+ return {true, 1, GR_GL_UNSIGNED_SHORT};
+ case kUShort4_norm_GrVertexAttribType:
+ return {true, 4, GR_GL_UNSIGNED_SHORT};
+ }
+ SK_ABORT("Unknown vertex attrib type");
+};
+
+void GrGLAttribArrayState::set(GrGLGpu* gpu,
+ int index,
+ const GrBuffer* vertexBuffer,
+ GrVertexAttribType cpuType,
+ GrSLType gpuType,
+ GrGLsizei stride,
+ size_t offsetInBytes,
+ int divisor) {
+ SkASSERT(index >= 0 && index < fAttribArrayStates.count());
+ SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport());
+ AttribArrayState* array = &fAttribArrayStates[index];
+ const char* offsetAsPtr;
+ bool bufferChanged = false;
+ if (vertexBuffer->isCpuBuffer()) {
+ if (!array->fUsingCpuBuffer) {
+ bufferChanged = true;
+ array->fUsingCpuBuffer = true;
+ }
+ offsetAsPtr = static_cast<const GrCpuBuffer*>(vertexBuffer)->data() + offsetInBytes;
+ } else {
+ auto gpuBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
+ if (array->fUsingCpuBuffer || array->fVertexBufferUniqueID != gpuBuffer->uniqueID()) {
+ bufferChanged = true;
+ array->fVertexBufferUniqueID = gpuBuffer->uniqueID();
+ }
+ offsetAsPtr = reinterpret_cast<const char*>(offsetInBytes);
+ }
+ if (bufferChanged ||
+ array->fCPUType != cpuType ||
+ array->fGPUType != gpuType ||
+ array->fStride != stride ||
+ array->fOffset != offsetAsPtr) {
+ // We always have to call this if we're going to change the array pointer. 'array' is
+ // tracking the last buffer used to setup attrib pointers, not the last buffer bound.
+ // GrGLGpu will avoid redundant binds.
+ gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
+ const AttribLayout& layout = attrib_layout(cpuType);
+ if (GrSLTypeIsFloatType(gpuType)) {
+ GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
+ layout.fCount,
+ layout.fType,
+ layout.fNormalized,
+ stride,
+ offsetAsPtr));
+ } else {
+ SkASSERT(gpu->caps()->shaderCaps()->integerSupport());
+ SkASSERT(!layout.fNormalized);
+ GR_GL_CALL(gpu->glInterface(), VertexAttribIPointer(index,
+ layout.fCount,
+ layout.fType,
+ stride,
+ offsetAsPtr));
+ }
+ array->fCPUType = cpuType;
+ array->fGPUType = gpuType;
+ array->fStride = stride;
+ array->fOffset = offsetAsPtr;
+ }
+ if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) {
+ SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect.
+ GR_GL_CALL(gpu->glInterface(), VertexAttribDivisor(index, divisor));
+ array->fDivisor = divisor;
+ }
+}
+
+void GrGLAttribArrayState::enableVertexArrays(const GrGLGpu* gpu, int enabledCount,
+ GrPrimitiveRestart enablePrimitiveRestart) {
+ SkASSERT(enabledCount <= fAttribArrayStates.count());
+
+ if (!fEnableStateIsValid || enabledCount != fNumEnabledArrays) {
+ int firstIdxToEnable = fEnableStateIsValid ? fNumEnabledArrays : 0;
+ for (int i = firstIdxToEnable; i < enabledCount; ++i) {
+ GR_GL_CALL(gpu->glInterface(), EnableVertexAttribArray(i));
+ }
+
+ int endIdxToDisable = fEnableStateIsValid ? fNumEnabledArrays : fAttribArrayStates.count();
+ for (int i = enabledCount; i < endIdxToDisable; ++i) {
+ GR_GL_CALL(gpu->glInterface(), DisableVertexAttribArray(i));
+ }
+
+ fNumEnabledArrays = enabledCount;
+ }
+
+ SkASSERT(GrPrimitiveRestart::kNo == enablePrimitiveRestart ||
+ gpu->caps()->usePrimitiveRestart());
+
+ if (gpu->caps()->usePrimitiveRestart() &&
+ (!fEnableStateIsValid || enablePrimitiveRestart != fPrimitiveRestartEnabled)) {
+ if (GrPrimitiveRestart::kYes == enablePrimitiveRestart) {
+ GR_GL_CALL(gpu->glInterface(), Enable(GR_GL_PRIMITIVE_RESTART_FIXED_INDEX));
+ } else {
+ GR_GL_CALL(gpu->glInterface(), Disable(GR_GL_PRIMITIVE_RESTART_FIXED_INDEX));
+ }
+
+ fPrimitiveRestartEnabled = enablePrimitiveRestart;
+ }
+
+ fEnableStateIsValid = true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLVertexArray::GrGLVertexArray(GrGLint id, int attribCount)
+ : fID(id)
+ , fAttribArrays(attribCount)
+ , fIndexBufferUniqueID(SK_InvalidUniqueID) {
+}
+
+GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
+ if (0 == fID) {
+ return nullptr;
+ }
+ gpu->bindVertexArray(fID);
+ return &fAttribArrays;
+}
+
+GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
+ GrGLAttribArrayState* state = this->bind(gpu);
+ if (!state) {
+ return nullptr;
+ }
+ if (ibuff->isCpuBuffer()) {
+ GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
+ } else {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
+ if (fIndexBufferUniqueID != glBuffer->uniqueID()) {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
+ GR_GL_CALL(gpu->glInterface(),
+ BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, glBuffer->bufferID()));
+ fIndexBufferUniqueID = glBuffer->uniqueID();
+ }
+ }
+ return state;
+}
+
+void GrGLVertexArray::invalidateCachedState() {
+ fAttribArrays.invalidate();
+ fIndexBufferUniqueID.makeInvalid();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h
new file mode 100644
index 0000000000..a253379938
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLVertexArray_DEFINED
+#define GrGLVertexArray_DEFINED
+
+#include "include/gpu/GrGpuResource.h"
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/gl/GrGLDefines.h"
+
+class GrBuffer;
+class GrGLGpu;
+
+/**
+ * This sets and tracks the vertex attribute array state. It is used internally by GrGLVertexArray
+ * (below) but is separate because it is also used to track the state of vertex array object 0.
+ */
+class GrGLAttribArrayState {
+public:
+ explicit GrGLAttribArrayState(int arrayCount = 0) {
+ this->resize(arrayCount);
+ }
+
+ void resize(int newCount) {
+ fAttribArrayStates.resize_back(newCount);
+ this->invalidate();
+ }
+
+ /**
+ * This function enables and sets vertex attrib state for the specified attrib index. It is
+ * assumed that the GrGLAttribArrayState is tracking the state of the currently bound vertex
+ * array object.
+ */
+ void set(GrGLGpu*,
+ int attribIndex,
+ const GrBuffer* vertexBuffer,
+ GrVertexAttribType cpuType,
+ GrSLType gpuType,
+ GrGLsizei stride,
+ size_t offsetInBytes,
+ int divisor = 0);
+
+ /**
+ * This function enables the first 'enabledCount' vertex arrays and disables the rest.
+ */
+ void enableVertexArrays(const GrGLGpu*, int enabledCount,
+ GrPrimitiveRestart = GrPrimitiveRestart::kNo);
+
+ void invalidate() {
+ int count = fAttribArrayStates.count();
+ for (int i = 0; i < count; ++i) {
+ fAttribArrayStates[i].invalidate();
+ }
+ fEnableStateIsValid = false;
+ }
+
+ /**
+ * The number of attrib arrays that this object is configured to track.
+ */
+ int count() const { return fAttribArrayStates.count(); }
+
+private:
+ static constexpr int kInvalidDivisor = -1;
+
+ /**
+ * Tracks the state of glVertexAttribArray for an attribute index.
+ */
+ struct AttribArrayState {
+ void invalidate() {
+ fVertexBufferUniqueID.makeInvalid();
+ fDivisor = kInvalidDivisor;
+ fUsingCpuBuffer = false;
+ }
+
+ GrGpuResource::UniqueID fVertexBufferUniqueID;
+ bool fUsingCpuBuffer;
+ GrVertexAttribType fCPUType;
+ GrSLType fGPUType;
+ GrGLsizei fStride;
+ const GrGLvoid* fOffset;
+ int fDivisor;
+ };
+
+ SkSTArray<16, AttribArrayState, true> fAttribArrayStates;
+ int fNumEnabledArrays;
+ GrPrimitiveRestart fPrimitiveRestartEnabled;
+ bool fEnableStateIsValid = false;
+};
+
+/**
+ * This class represents an OpenGL vertex array object. It manages the lifetime of the vertex array
+ * and is used to track the state of the vertex array to avoid redundant GL calls.
+ */
+class GrGLVertexArray {
+public:
+ GrGLVertexArray(GrGLint id, int attribCount);
+
+ /**
+ * Binds this vertex array. If the ID has been deleted or abandoned then nullptr is returned.
+ * Otherwise, the GrGLAttribArrayState that is tracking this vertex array's attrib bindings is
+ * returned.
+ */
+ GrGLAttribArrayState* bind(GrGLGpu*);
+
+ /**
+ * This is a version of the above function that also binds an index buffer to the vertex
+ * array object.
+ */
+ GrGLAttribArrayState* bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* indexBuffer);
+
+ GrGLuint arrayID() const { return fID; }
+
+ void invalidateCachedState();
+
+private:
+ GrGLuint fID;
+ GrGLAttribArrayState fAttribArrays;
+ GrGpuResource::UniqueID fIndexBufferUniqueID;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/android/GrGLMakeNativeInterface_android.cpp b/gfx/skia/skia/src/gpu/gl/android/GrGLMakeNativeInterface_android.cpp
new file mode 100644
index 0000000000..5e5d4c2ebe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/android/GrGLMakeNativeInterface_android.cpp
@@ -0,0 +1,7 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "../egl/GrGLMakeNativeInterface_egl.cpp"
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp
new file mode 100644
index 0000000000..3ce3267fdd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp
@@ -0,0 +1,609 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gl/builders/GrGLProgramBuilder.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/core/SkATrace.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkReader32.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkWriter32.h"
+#include "src/gpu/GrAutoLocaleSetter.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrPersistentCacheUtils.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/GrGLProgram.h"
+#include "src/gpu/gl/builders/GrGLProgramBuilder.h"
+#include "src/gpu/gl/builders/GrGLShaderStringBuilder.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->gpu()->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(this->gpu()->glInterface(), R, X)
+
+static void cleanup_shaders(GrGLGpu* gpu, const SkTDArray<GrGLuint>& shaderIDs) {
+ for (int i = 0; i < shaderIDs.count(); ++i) {
+ GR_GL_CALL(gpu->glInterface(), DeleteShader(shaderIDs[i]));
+ }
+}
+
+static void cleanup_program(GrGLGpu* gpu, GrGLuint programID,
+ const SkTDArray<GrGLuint>& shaderIDs) {
+ GR_GL_CALL(gpu->glInterface(), DeleteProgram(programID));
+ cleanup_shaders(gpu, shaderIDs);
+}
+
+GrGLProgram* GrGLProgramBuilder::CreateProgram(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrProgramDesc* desc,
+ GrGLGpu* gpu,
+ const GrGLPrecompiledProgram* precompiledProgram) {
+ ATRACE_ANDROID_FRAMEWORK("Shader Compile");
+ GrAutoLocaleSetter als("C");
+
+ // create a builder. This will be handed off to effects so they can use it to add
+ // uniforms, varyings, textures, etc
+ GrGLProgramBuilder builder(gpu, renderTarget, programInfo, desc);
+
+ auto persistentCache = gpu->getContext()->priv().getPersistentCache();
+ if (persistentCache && !precompiledProgram) {
+ sk_sp<SkData> key = SkData::MakeWithoutCopy(desc->asKey(), desc->keyLength());
+ builder.fCached = persistentCache->load(*key);
+ // the eventual end goal is to completely skip emitAndInstallProcs on a cache hit, but it's
+ // doing necessary setup in addition to generating the SkSL code. Currently we are only able
+ // to skip the SkSL->GLSL step on a cache hit.
+ }
+ if (!builder.emitAndInstallProcs()) {
+ return nullptr;
+ }
+ return builder.finalize(precompiledProgram);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+GrGLProgramBuilder::GrGLProgramBuilder(GrGLGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrProgramDesc* desc)
+ : INHERITED(renderTarget, programInfo, desc)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this)
+ , fVertexAttributeCnt(0)
+ , fInstanceAttributeCnt(0)
+ , fVertexStride(0)
+ , fInstanceStride(0) {}
+
+const GrCaps* GrGLProgramBuilder::caps() const {
+ return fGpu->caps();
+}
+
+bool GrGLProgramBuilder::compileAndAttachShaders(const SkSL::String& glsl,
+ GrGLuint programId,
+ GrGLenum type,
+ SkTDArray<GrGLuint>* shaderIds,
+ GrContextOptions::ShaderErrorHandler* errHandler) {
+ GrGLGpu* gpu = this->gpu();
+ GrGLuint shaderId = GrGLCompileAndAttachShader(gpu->glContext(),
+ programId,
+ type,
+ glsl,
+ gpu->stats(),
+ errHandler);
+ if (!shaderId) {
+ return false;
+ }
+
+ *shaderIds->append() = shaderId;
+ return true;
+}
+
+void GrGLProgramBuilder::computeCountsAndStrides(GrGLuint programID,
+ const GrPrimitiveProcessor& primProc,
+ bool bindAttribLocations) {
+ fVertexAttributeCnt = primProc.numVertexAttributes();
+ fInstanceAttributeCnt = primProc.numInstanceAttributes();
+ fAttributes.reset(
+ new GrGLProgram::Attribute[fVertexAttributeCnt + fInstanceAttributeCnt]);
+ auto addAttr = [&](int i, const auto& a, size_t* stride) {
+ fAttributes[i].fCPUType = a.cpuType();
+ fAttributes[i].fGPUType = a.gpuType();
+ fAttributes[i].fOffset = *stride;
+ *stride += a.sizeAlign4();
+ fAttributes[i].fLocation = i;
+ if (bindAttribLocations) {
+ GL_CALL(BindAttribLocation(programID, i, a.name()));
+ }
+ };
+ fVertexStride = 0;
+ int i = 0;
+ for (const auto& attr : primProc.vertexAttributes()) {
+ addAttr(i++, attr, &fVertexStride);
+ }
+ SkASSERT(fVertexStride == primProc.vertexStride());
+ fInstanceStride = 0;
+ for (const auto& attr : primProc.instanceAttributes()) {
+ addAttr(i++, attr, &fInstanceStride);
+ }
+ SkASSERT(fInstanceStride == primProc.instanceStride());
+}
+
+void GrGLProgramBuilder::addInputVars(const SkSL::Program::Inputs& inputs) {
+ if (inputs.fRTWidth) {
+ this->addRTWidthUniform(SKSL_RTWIDTH_NAME);
+ }
+ if (inputs.fRTHeight) {
+ this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
+ }
+}
+
+static constexpr SkFourByteTag kSKSL_Tag = SkSetFourByteTag('S', 'K', 'S', 'L');
+static constexpr SkFourByteTag kGLSL_Tag = SkSetFourByteTag('G', 'L', 'S', 'L');
+static constexpr SkFourByteTag kGLPB_Tag = SkSetFourByteTag('G', 'L', 'P', 'B');
+
+void GrGLProgramBuilder::storeShaderInCache(const SkSL::Program::Inputs& inputs, GrGLuint programID,
+ const SkSL::String shaders[], bool isSkSL,
+ SkSL::Program::Settings* settings) {
+ if (!this->gpu()->getContext()->priv().getPersistentCache()) {
+ return;
+ }
+ sk_sp<SkData> key = SkData::MakeWithoutCopy(this->desc()->asKey(), this->desc()->keyLength());
+ if (fGpu->glCaps().programBinarySupport()) {
+ // binary cache
+ GrGLsizei length = 0;
+ GL_CALL(GetProgramiv(programID, GL_PROGRAM_BINARY_LENGTH, &length));
+ if (length > 0) {
+ SkWriter32 writer;
+ writer.write32(kGLPB_Tag);
+
+ writer.writePad(&inputs, sizeof(inputs));
+ writer.write32(length);
+
+ void* binary = writer.reservePad(length);
+ GrGLenum binaryFormat;
+ GL_CALL(GetProgramBinary(programID, length, &length, &binaryFormat, binary));
+ writer.write32(binaryFormat);
+
+ auto data = writer.snapshotAsData();
+ this->gpu()->getContext()->priv().getPersistentCache()->store(*key, *data);
+ }
+ } else {
+ // source cache, plus metadata to allow for a complete precompile
+ GrPersistentCacheUtils::ShaderMetadata meta;
+ meta.fSettings = settings;
+ meta.fHasCustomColorOutput = fFS.hasCustomColorOutput();
+ meta.fHasSecondaryColorOutput = fFS.hasSecondaryOutput();
+ for (const auto& attr : this->primitiveProcessor().vertexAttributes()) {
+ meta.fAttributeNames.emplace_back(attr.name());
+ }
+ for (const auto& attr : this->primitiveProcessor().instanceAttributes()) {
+ meta.fAttributeNames.emplace_back(attr.name());
+ }
+
+ auto data = GrPersistentCacheUtils::PackCachedShaders(isSkSL ? kSKSL_Tag : kGLSL_Tag,
+ shaders, &inputs, 1, &meta);
+ this->gpu()->getContext()->priv().getPersistentCache()->store(*key, *data);
+ }
+}
+
+GrGLProgram* GrGLProgramBuilder::finalize(const GrGLPrecompiledProgram* precompiledProgram) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ // verify we can get a program id
+ GrGLuint programID;
+ if (precompiledProgram) {
+ programID = precompiledProgram->fProgramID;
+ } else {
+ GL_CALL_RET(programID, CreateProgram());
+ }
+ if (0 == programID) {
+ return nullptr;
+ }
+
+ if (this->gpu()->glCaps().programBinarySupport() &&
+ this->gpu()->glCaps().programParameterSupport() &&
+ this->gpu()->getContext()->priv().getPersistentCache() &&
+ !precompiledProgram) {
+ GL_CALL(ProgramParameteri(programID, GR_GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GR_GL_TRUE));
+ }
+
+ this->finalizeShaders();
+
+ // compile shaders and bind attributes / uniforms
+ auto errorHandler = this->gpu()->getContext()->priv().getShaderErrorHandler();
+ const GrPrimitiveProcessor& primProc = this->primitiveProcessor();
+ SkSL::Program::Settings settings;
+ settings.fCaps = this->gpu()->glCaps().shaderCaps();
+ settings.fFlipY = this->origin() != kTopLeft_GrSurfaceOrigin;
+ settings.fSharpenTextures =
+ this->gpu()->getContext()->priv().options().fSharpenMipmappedTextures;
+ settings.fFragColorIsInOut = this->fragColorIsInOut();
+
+ SkSL::Program::Inputs inputs;
+ SkTDArray<GrGLuint> shadersToDelete;
+ // Calling GetProgramiv is expensive in Chromium. Assume success in release builds.
+ bool checkLinked = kChromium_GrGLDriver != fGpu->ctxInfo().driver();
+#ifdef SK_DEBUG
+ checkLinked = true;
+#endif
+ bool cached = fCached.get() != nullptr;
+ bool usedProgramBinaries = false;
+ SkSL::String glsl[kGrShaderTypeCount];
+ SkSL::String* sksl[kGrShaderTypeCount] = {
+ &fVS.fCompilerString,
+ &fGS.fCompilerString,
+ &fFS.fCompilerString,
+ };
+ SkSL::String cached_sksl[kGrShaderTypeCount];
+ if (precompiledProgram) {
+ // This is very similar to when we get program binaries. We even set that flag, as it's
+ // used to prevent other compile work later, and to force re-querying uniform locations.
+ this->addInputVars(precompiledProgram->fInputs);
+ this->computeCountsAndStrides(programID, primProc, false);
+ usedProgramBinaries = true;
+ } else if (cached) {
+ SkReader32 reader(fCached->data(), fCached->size());
+ SkFourByteTag shaderType = reader.readU32();
+
+ switch (shaderType) {
+ case kGLPB_Tag: {
+ // Program binary cache hit. We may opt not to use this if we don't trust program
+ // binaries on this driver
+ if (!fGpu->glCaps().programBinarySupport()) {
+ cached = false;
+ break;
+ }
+ reader.read(&inputs, sizeof(inputs));
+ GrGLsizei length = reader.readInt();
+ const void* binary = reader.skip(length);
+ GrGLenum binaryFormat = reader.readU32();
+ GrGLClearErr(this->gpu()->glInterface());
+ GR_GL_CALL_NOERRCHECK(this->gpu()->glInterface(),
+ ProgramBinary(programID, binaryFormat,
+ const_cast<void*>(binary), length));
+ if (GR_GL_GET_ERROR(this->gpu()->glInterface()) == GR_GL_NO_ERROR) {
+ if (checkLinked) {
+ cached = this->checkLinkStatus(programID, errorHandler, nullptr, nullptr);
+ }
+ if (cached) {
+ this->addInputVars(inputs);
+ this->computeCountsAndStrides(programID, primProc, false);
+ }
+ } else {
+ cached = false;
+ }
+ usedProgramBinaries = cached;
+ break;
+ }
+
+ case kGLSL_Tag:
+ // Source cache hit, we don't need to compile the SkSL->GLSL
+ GrPersistentCacheUtils::UnpackCachedShaders(&reader, glsl, &inputs, 1);
+ break;
+
+ case kSKSL_Tag:
+ // SkSL cache hit, this should only happen in tools overriding the generated SkSL
+ GrPersistentCacheUtils::UnpackCachedShaders(&reader, cached_sksl, &inputs, 1);
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ sksl[i] = &cached_sksl[i];
+ }
+ break;
+ }
+ }
+ if (!usedProgramBinaries) {
+ // Either a cache miss, or we got something other than binaries from the cache
+
+ /*
+ Fragment Shader
+ */
+ if (glsl[kFragment_GrShaderType].empty()) {
+ // Don't have cached GLSL, need to compile SkSL->GLSL
+ if (fFS.fForceHighPrecision) {
+ settings.fForceHighPrecision = true;
+ }
+ std::unique_ptr<SkSL::Program> fs = GrSkSLtoGLSL(gpu()->glContext(),
+ SkSL::Program::kFragment_Kind,
+ *sksl[kFragment_GrShaderType],
+ settings,
+ &glsl[kFragment_GrShaderType],
+ errorHandler);
+ if (!fs) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+ inputs = fs->fInputs;
+ }
+
+ this->addInputVars(inputs);
+ if (!this->compileAndAttachShaders(glsl[kFragment_GrShaderType], programID,
+ GR_GL_FRAGMENT_SHADER, &shadersToDelete, errorHandler)) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+
+ /*
+ Vertex Shader
+ */
+ if (glsl[kVertex_GrShaderType].empty()) {
+ // Don't have cached GLSL, need to compile SkSL->GLSL
+ std::unique_ptr<SkSL::Program> vs = GrSkSLtoGLSL(gpu()->glContext(),
+ SkSL::Program::kVertex_Kind,
+ *sksl[kVertex_GrShaderType],
+ settings,
+ &glsl[kVertex_GrShaderType],
+ errorHandler);
+ if (!vs) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+ }
+ if (!this->compileAndAttachShaders(glsl[kVertex_GrShaderType], programID,
+ GR_GL_VERTEX_SHADER, &shadersToDelete, errorHandler)) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+
+ // This also binds vertex attribute locations. NVPR doesn't really use vertices,
+ // even though it requires a vertex shader in the program.
+ if (!primProc.isPathRendering()) {
+ this->computeCountsAndStrides(programID, primProc, true);
+ }
+
+ /*
+ Geometry Shader
+ */
+ if (primProc.willUseGeoShader()) {
+ if (glsl[kGeometry_GrShaderType].empty()) {
+ // Don't have cached GLSL, need to compile SkSL->GLSL
+ std::unique_ptr<SkSL::Program> gs;
+ gs = GrSkSLtoGLSL(gpu()->glContext(),
+ SkSL::Program::kGeometry_Kind,
+ *sksl[kGeometry_GrShaderType],
+ settings,
+ &glsl[kGeometry_GrShaderType],
+ errorHandler);
+ if (!gs) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+ }
+ if (!this->compileAndAttachShaders(glsl[kGeometry_GrShaderType], programID,
+ GR_GL_GEOMETRY_SHADER, &shadersToDelete,
+ errorHandler)) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+ }
+ this->bindProgramResourceLocations(programID);
+
+ GL_CALL(LinkProgram(programID));
+ if (checkLinked) {
+ if (!this->checkLinkStatus(programID, errorHandler, sksl, glsl)) {
+ cleanup_program(fGpu, programID, shadersToDelete);
+ return nullptr;
+ }
+ }
+ }
+ this->resolveProgramResourceLocations(programID, usedProgramBinaries);
+
+ cleanup_shaders(fGpu, shadersToDelete);
+
+ // With ANGLE, we can't cache path-rendering programs. We use ProgramPathFragmentInputGen,
+ // and ANGLE's deserialized program state doesn't restore enough state to handle that.
+ // The native NVIDIA drivers do, but this is such an edge case that it's easier to just
+ // black-list caching these programs in all cases. See: anglebug.com/3619
+ // We also can't cache SkSL or GLSL if we were given a precompiled program, but there's not
+ // much point in doing so.
+ if (!cached && !primProc.isPathRendering() && !precompiledProgram) {
+ bool isSkSL = false;
+ if (fGpu->getContext()->priv().options().fShaderCacheStrategy ==
+ GrContextOptions::ShaderCacheStrategy::kSkSL) {
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ glsl[i] = GrShaderUtils::PrettyPrint(*sksl[i]);
+ }
+ isSkSL = true;
+ }
+ this->storeShaderInCache(inputs, programID, glsl, isSkSL, &settings);
+ }
+ return this->createProgram(programID);
+}
+
+void GrGLProgramBuilder::bindProgramResourceLocations(GrGLuint programID) {
+ fUniformHandler.bindUniformLocations(programID, fGpu->glCaps());
+
+ const GrGLCaps& caps = this->gpu()->glCaps();
+ if (fFS.hasCustomColorOutput() && caps.bindFragDataLocationSupport()) {
+ GL_CALL(BindFragDataLocation(programID, 0,
+ GrGLSLFragmentShaderBuilder::DeclaredColorOutputName()));
+ }
+ if (fFS.hasSecondaryOutput() && caps.shaderCaps()->mustDeclareFragmentShaderOutput()) {
+ GL_CALL(BindFragDataLocationIndexed(programID, 0, 1,
+ GrGLSLFragmentShaderBuilder::DeclaredSecondaryColorOutputName()));
+ }
+
+ // handle NVPR separable varyings
+ if (!fGpu->glCaps().shaderCaps()->pathRenderingSupport() ||
+ !fGpu->glPathRendering()->shouldBindFragmentInputs()) {
+ return;
+ }
+ int count = fVaryingHandler.fPathProcVaryingInfos.count();
+ for (int i = 0; i < count; ++i) {
+ GL_CALL(BindFragmentInputLocation(programID, i,
+ fVaryingHandler.fPathProcVaryingInfos[i].fVariable.c_str()));
+ fVaryingHandler.fPathProcVaryingInfos[i].fLocation = i;
+ }
+}
+
+bool GrGLProgramBuilder::checkLinkStatus(GrGLuint programID,
+ GrContextOptions::ShaderErrorHandler* errorHandler,
+ SkSL::String* sksl[], const SkSL::String glsl[]) {
+ GrGLint linked = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramiv(programID, GR_GL_LINK_STATUS, &linked));
+ if (!linked) {
+ SkSL::String allShaders;
+ if (sksl) {
+ allShaders.appendf("// Vertex SKSL\n%s\n", sksl[kVertex_GrShaderType]->c_str());
+ if (!sksl[kGeometry_GrShaderType]->empty()) {
+ allShaders.appendf("// Geometry SKSL\n%s\n", sksl[kGeometry_GrShaderType]->c_str());
+ }
+ allShaders.appendf("// Fragment SKSL\n%s\n", sksl[kFragment_GrShaderType]->c_str());
+ }
+ if (glsl) {
+ allShaders.appendf("// Vertex GLSL\n%s\n", glsl[kVertex_GrShaderType].c_str());
+ if (!glsl[kGeometry_GrShaderType].empty()) {
+ allShaders.appendf("// Geometry GLSL\n%s\n", glsl[kGeometry_GrShaderType].c_str());
+ }
+ allShaders.appendf("// Fragment GLSL\n%s\n", glsl[kFragment_GrShaderType].c_str());
+ }
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramiv(programID, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround
+ // bug in chrome cmd buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramInfoLog(programID, infoLen+1, &length, (char*)log.get()));
+ }
+ errorHandler->compileError(allShaders.c_str(), infoLen > 0 ? (const char*)log.get() : "");
+ }
+ return SkToBool(linked);
+}
+
+void GrGLProgramBuilder::resolveProgramResourceLocations(GrGLuint programID, bool force) {
+ fUniformHandler.getUniformLocations(programID, fGpu->glCaps(), force);
+
+ // handle NVPR separable varyings
+ if (!fGpu->glCaps().shaderCaps()->pathRenderingSupport() ||
+ fGpu->glPathRendering()->shouldBindFragmentInputs()) {
+ return;
+ }
+ int count = fVaryingHandler.fPathProcVaryingInfos.count();
+ for (int i = 0; i < count; ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetProgramResourceLocation(
+ programID,
+ GR_GL_FRAGMENT_INPUT,
+ fVaryingHandler.fPathProcVaryingInfos[i].fVariable.c_str()));
+ fVaryingHandler.fPathProcVaryingInfos[i].fLocation = location;
+ }
+}
+
+GrGLProgram* GrGLProgramBuilder::createProgram(GrGLuint programID) {
+ return new GrGLProgram(fGpu,
+ fUniformHandles,
+ programID,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fSamplers,
+ fVaryingHandler.fPathProcVaryingInfos,
+ std::move(fGeometryProcessor),
+ std::move(fXferProcessor),
+ std::move(fFragmentProcessors),
+ fFragmentProcessorCnt,
+ std::move(fAttributes),
+ fVertexAttributeCnt,
+ fInstanceAttributeCnt,
+ fVertexStride,
+ fInstanceStride);
+}
+
+bool GrGLProgramBuilder::PrecompileProgram(GrGLPrecompiledProgram* precompiledProgram,
+ GrGLGpu* gpu,
+ const SkData& cachedData) {
+ SkReader32 reader(cachedData.data(), cachedData.size());
+ SkFourByteTag shaderType = reader.readU32();
+ if (shaderType != kSKSL_Tag) {
+ // TODO: Support GLSL, and maybe even program binaries, too?
+ return false;
+ }
+
+ const GrGLInterface* gl = gpu->glInterface();
+ auto errorHandler = gpu->getContext()->priv().getShaderErrorHandler();
+ GrGLuint programID;
+ GR_GL_CALL_RET(gl, programID, CreateProgram());
+ if (0 == programID) {
+ return false;
+ }
+
+ SkTDArray<GrGLuint> shadersToDelete;
+
+ SkSL::Program::Settings settings;
+ const GrGLCaps& caps = gpu->glCaps();
+ settings.fCaps = caps.shaderCaps();
+ settings.fSharpenTextures = gpu->getContext()->priv().options().fSharpenMipmappedTextures;
+ GrPersistentCacheUtils::ShaderMetadata meta;
+ meta.fSettings = &settings;
+
+ SkSL::String shaders[kGrShaderTypeCount];
+ SkSL::Program::Inputs inputs;
+ GrPersistentCacheUtils::UnpackCachedShaders(&reader, shaders, &inputs, 1, &meta);
+
+ auto compileShader = [&](SkSL::Program::Kind kind, const SkSL::String& sksl, GrGLenum type) {
+ SkSL::String glsl;
+ auto program = GrSkSLtoGLSL(gpu->glContext(), kind, sksl, settings, &glsl, errorHandler);
+ if (!program) {
+ return false;
+ }
+
+ if (GrGLuint shaderID = GrGLCompileAndAttachShader(gpu->glContext(), programID, type, glsl,
+ gpu->stats(), errorHandler)) {
+ shadersToDelete.push_back(shaderID);
+ return true;
+ } else {
+ return false;
+ }
+ };
+
+ if (!compileShader(SkSL::Program::kFragment_Kind,
+ shaders[kFragment_GrShaderType],
+ GR_GL_FRAGMENT_SHADER) ||
+ !compileShader(SkSL::Program::kVertex_Kind,
+ shaders[kVertex_GrShaderType],
+ GR_GL_VERTEX_SHADER) ||
+ (!shaders[kGeometry_GrShaderType].empty() &&
+ !compileShader(SkSL::Program::kGeometry_Kind,
+ shaders[kGeometry_GrShaderType],
+ GR_GL_GEOMETRY_SHADER))) {
+ cleanup_program(gpu, programID, shadersToDelete);
+ return false;
+ }
+
+ for (int i = 0; i < meta.fAttributeNames.count(); ++i) {
+ GR_GL_CALL(gpu->glInterface(), BindAttribLocation(programID, i,
+ meta.fAttributeNames[i].c_str()));
+ }
+
+ if (meta.fHasCustomColorOutput && caps.bindFragDataLocationSupport()) {
+ GR_GL_CALL(gpu->glInterface(), BindFragDataLocation(programID, 0,
+ GrGLSLFragmentShaderBuilder::DeclaredColorOutputName()));
+ }
+ if (meta.fHasSecondaryColorOutput && caps.shaderCaps()->mustDeclareFragmentShaderOutput()) {
+ GR_GL_CALL(gpu->glInterface(), BindFragDataLocationIndexed(programID, 0, 1,
+ GrGLSLFragmentShaderBuilder::DeclaredSecondaryColorOutputName()));
+ }
+
+ GR_GL_CALL(gpu->glInterface(), LinkProgram(programID));
+ GrGLint linked = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gpu->glInterface(), GetProgramiv(programID, GR_GL_LINK_STATUS, &linked));
+ if (!linked) {
+ cleanup_program(gpu, programID, shadersToDelete);
+ return false;
+ }
+
+ cleanup_shaders(gpu, shadersToDelete);
+
+ precompiledProgram->fProgramID = programID;
+ precompiledProgram->fInputs = inputs;
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h
new file mode 100644
index 0000000000..800c64a687
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLProgramBuilder_DEFINED
+#define GrGLProgramBuilder_DEFINED
+
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/gl/GrGLProgram.h"
+#include "src/gpu/gl/GrGLProgramDataManager.h"
+#include "src/gpu/gl/GrGLUniformHandler.h"
+#include "src/gpu/gl/GrGLVaryingHandler.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+class GrFragmentProcessor;
+class GrGLContextInfo;
+class GrProgramDesc;
+class GrGLSLShaderBuilder;
+class GrShaderCaps;
+
+struct GrGLPrecompiledProgram {
+ GrGLPrecompiledProgram(GrGLuint programID = 0,
+ SkSL::Program::Inputs inputs = SkSL::Program::Inputs())
+ : fProgramID(programID)
+ , fInputs(inputs) {}
+
+ GrGLuint fProgramID;
+ SkSL::Program::Inputs fInputs;
+};
+
+class GrGLProgramBuilder : public GrGLSLProgramBuilder {
+public:
+ /** Generates a shader program.
+ *
+ * The program implements what is specified in the stages given as input.
+ * After successful generation, the builder result objects are available
+ * to be used.
+ * This function may modify the GrProgramDesc by setting the surface origin
+ * key to 0 (unspecified) if it turns out the program does not care about
+ * the surface origin.
+ * If a GL program has already been created, the program ID and inputs can
+ * be supplied to skip the shader compilation.
+ * @return true if generation was successful.
+ */
+ static GrGLProgram* CreateProgram(GrRenderTarget*,
+ const GrProgramInfo&,
+ GrProgramDesc*,
+ GrGLGpu*,
+ const GrGLPrecompiledProgram* = nullptr);
+
+ static bool PrecompileProgram(GrGLPrecompiledProgram*, GrGLGpu*, const SkData&);
+
+ const GrCaps* caps() const override;
+
+ GrGLGpu* gpu() const { return fGpu; }
+
+private:
+ GrGLProgramBuilder(GrGLGpu*, GrRenderTarget*, const GrProgramInfo&, GrProgramDesc*);
+
+ void addInputVars(const SkSL::Program::Inputs& inputs);
+ bool compileAndAttachShaders(const SkSL::String& glsl,
+ GrGLuint programId,
+ GrGLenum type,
+ SkTDArray<GrGLuint>* shaderIds,
+ GrContextOptions::ShaderErrorHandler* errorHandler);
+
+ void computeCountsAndStrides(GrGLuint programID, const GrPrimitiveProcessor& primProc,
+ bool bindAttribLocations);
+ void storeShaderInCache(const SkSL::Program::Inputs& inputs, GrGLuint programID,
+ const SkSL::String shaders[], bool isSkSL,
+ SkSL::Program::Settings* settings);
+ GrGLProgram* finalize(const GrGLPrecompiledProgram*);
+ void bindProgramResourceLocations(GrGLuint programID);
+ bool checkLinkStatus(GrGLuint programID, GrContextOptions::ShaderErrorHandler* errorHandler,
+ SkSL::String* sksl[], const SkSL::String glsl[]);
+ void resolveProgramResourceLocations(GrGLuint programID, bool force);
+
+ // Subclasses create different programs
+ GrGLProgram* createProgram(GrGLuint programID);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrGLGpu* fGpu;
+ GrGLVaryingHandler fVaryingHandler;
+ GrGLUniformHandler fUniformHandler;
+
+ std::unique_ptr<GrGLProgram::Attribute[]> fAttributes;
+ int fVertexAttributeCnt;
+ int fInstanceAttributeCnt;
+ size_t fVertexStride;
+ size_t fInstanceStride;
+
+ // shader pulled from cache. Data is organized as:
+ // SkSL::Program::Inputs inputs
+ // int binaryFormat
+ // (all remaining bytes) char[] binary
+ sk_sp<SkData> fCached;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp
new file mode 100644
index 0000000000..53b03eec62
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkAutoMalloc.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/gl/builders/GrGLShaderStringBuilder.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+// Print the source code for all shaders generated.
+static const bool gPrintSKSL = false;
+static const bool gPrintGLSL = false;
+
+void print_shader_banner(SkSL::Program::Kind programKind) {
+ const char* typeName = "Unknown";
+ switch (programKind) {
+ case SkSL::Program::kVertex_Kind: typeName = "Vertex"; break;
+ case SkSL::Program::kGeometry_Kind: typeName = "Geometry"; break;
+ case SkSL::Program::kFragment_Kind: typeName = "Fragment"; break;
+ default: break;
+ }
+ SkDebugf("---- %s shader ----------------------------------------------------\n", typeName);
+}
+
+std::unique_ptr<SkSL::Program> GrSkSLtoGLSL(const GrGLContext& context,
+ SkSL::Program::Kind programKind,
+ const SkSL::String& sksl,
+ const SkSL::Program::Settings& settings,
+ SkSL::String* glsl,
+ GrContextOptions::ShaderErrorHandler* errorHandler) {
+ SkSL::Compiler* compiler = context.compiler();
+ std::unique_ptr<SkSL::Program> program;
+#ifdef SK_DEBUG
+ SkSL::String src = GrShaderUtils::PrettyPrint(sksl);
+#else
+ const SkSL::String& src = sksl;
+#endif
+ program = compiler->convertProgram(programKind, src, settings);
+ if (!program || !compiler->toGLSL(*program, glsl)) {
+ errorHandler->compileError(src.c_str(), compiler->errorText().c_str());
+ return nullptr;
+ }
+
+ if (gPrintSKSL || gPrintGLSL) {
+ print_shader_banner(programKind);
+ if (gPrintSKSL) {
+ GrShaderUtils::PrintLineByLine("SKSL:", GrShaderUtils::PrettyPrint(sksl));
+ }
+ if (gPrintGLSL) {
+ GrShaderUtils::PrintLineByLine("GLSL:", GrShaderUtils::PrettyPrint(*glsl));
+ }
+ }
+
+ return program;
+}
+
+GrGLuint GrGLCompileAndAttachShader(const GrGLContext& glCtx,
+ GrGLuint programId,
+ GrGLenum type,
+ const SkSL::String& glsl,
+ GrGpu::Stats* stats,
+ GrContextOptions::ShaderErrorHandler* errorHandler) {
+ const GrGLInterface* gli = glCtx.interface();
+
+ // Specify GLSL source to the driver.
+ GrGLuint shaderId;
+ GR_GL_CALL_RET(gli, shaderId, CreateShader(type));
+ if (0 == shaderId) {
+ return 0;
+ }
+ const GrGLchar* source = glsl.c_str();
+ GrGLint sourceLength = glsl.size();
+ GR_GL_CALL(gli, ShaderSource(shaderId, 1, &source, &sourceLength));
+
+ stats->incShaderCompilations();
+ GR_GL_CALL(gli, CompileShader(shaderId));
+
+ // Calling GetShaderiv in Chromium is quite expensive. Assume success in release builds.
+ bool checkCompiled = kChromium_GrGLDriver != glCtx.driver();
+#ifdef SK_DEBUG
+ checkCompiled = true;
+#endif
+ if (checkCompiled) {
+ GrGLint compiled = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_COMPILE_STATUS, &compiled));
+
+ if (!compiled) {
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround bug in Chromium cmd
+ // buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderInfoLog(shaderId, infoLen+1, &length, (char*)log.get()));
+ }
+ errorHandler->compileError(glsl.c_str(), infoLen > 0 ? (const char*)log.get() : "");
+ GR_GL_CALL(gli, DeleteShader(shaderId));
+ return 0;
+ }
+ }
+
+ // Attach the shader, but defer deletion until after we have linked the program.
+ // This works around a bug in the Android emulator's GLES2 wrapper which
+ // will immediately delete the shader object and free its memory even though it's
+ // attached to a program, which then causes glLinkProgram to fail.
+ GR_GL_CALL(gli, AttachShader(programId, shaderId));
+ return shaderId;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h
new file mode 100644
index 0000000000..12bebc3cfe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLShaderStringBuilder_DEFINED
+#define GrGLShaderStringBuilder_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/gl/GrGLContext.h"
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+
+std::unique_ptr<SkSL::Program> GrSkSLtoGLSL(const GrGLContext& context,
+ SkSL::Program::Kind programKind,
+ const SkSL::String& sksl,
+ const SkSL::Program::Settings& settings,
+ SkSL::String* glsl,
+ GrContextOptions::ShaderErrorHandler* errorHandler);
+
+GrGLuint GrGLCompileAndAttachShader(const GrGLContext& glCtx,
+ GrGLuint programId,
+ GrGLenum type,
+ const SkSL::String& glsl,
+ GrGpu::Stats*,
+ GrContextOptions::ShaderErrorHandler* errorHandler);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/egl/GrGLMakeNativeInterface_egl.cpp b/gfx/skia/skia/src/gpu/gl/egl/GrGLMakeNativeInterface_egl.cpp
new file mode 100644
index 0000000000..3af6d84ecd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/egl/GrGLMakeNativeInterface_egl.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#include <EGL/egl.h>
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES
+#endif
+#include <GLES2/gl2.h>
+
+static GrGLFuncPtr egl_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(nullptr == ctx);
+ // https://www.khronos.org/registry/EGL/extensions/KHR/EGL_KHR_get_all_proc_addresses.txt
+ // eglGetProcAddress() is not guaranteed to support the querying of non-extension EGL functions.
+ #define M(X) if (0 == strcmp(#X, name)) { return (GrGLFuncPtr) X; }
+ M(eglGetCurrentDisplay)
+ M(eglQueryString)
+ M(glActiveTexture)
+ M(glAttachShader)
+ M(glBindAttribLocation)
+ M(glBindBuffer)
+ M(glBindFramebuffer)
+ M(glBindRenderbuffer)
+ M(glBindTexture)
+ M(glBlendColor)
+ M(glBlendEquation)
+ M(glBlendFunc)
+ M(glBufferData)
+ M(glBufferSubData)
+ M(glCheckFramebufferStatus)
+ M(glClear)
+ M(glClearColor)
+ M(glClearStencil)
+ M(glColorMask)
+ M(glCompileShader)
+ M(glCompressedTexImage2D)
+ M(glCompressedTexSubImage2D)
+ M(glCopyTexSubImage2D)
+ M(glCreateProgram)
+ M(glCreateShader)
+ M(glCullFace)
+ M(glDeleteBuffers)
+ M(glDeleteFramebuffers)
+ M(glDeleteProgram)
+ M(glDeleteRenderbuffers)
+ M(glDeleteShader)
+ M(glDeleteTextures)
+ M(glDepthMask)
+ M(glDisable)
+ M(glDisableVertexAttribArray)
+ M(glDrawArrays)
+ M(glDrawElements)
+ M(glEnable)
+ M(glEnableVertexAttribArray)
+ M(glFinish)
+ M(glFlush)
+ M(glFramebufferRenderbuffer)
+ M(glFramebufferTexture2D)
+ M(glFrontFace)
+ M(glGenBuffers)
+ M(glGenFramebuffers)
+ M(glGenRenderbuffers)
+ M(glGenTextures)
+ M(glGenerateMipmap)
+ M(glGetBufferParameteriv)
+ M(glGetError)
+ M(glGetFramebufferAttachmentParameteriv)
+ M(glGetIntegerv)
+ M(glGetProgramInfoLog)
+ M(glGetProgramiv)
+ M(glGetRenderbufferParameteriv)
+ M(glGetShaderInfoLog)
+ M(glGetShaderPrecisionFormat)
+ M(glGetShaderiv)
+ M(glGetString)
+ M(glGetUniformLocation)
+ M(glIsTexture)
+ M(glLineWidth)
+ M(glLinkProgram)
+ M(glPixelStorei)
+ M(glReadPixels)
+ M(glRenderbufferStorage)
+ M(glScissor)
+ M(glShaderSource)
+ M(glStencilFunc)
+ M(glStencilFuncSeparate)
+ M(glStencilMask)
+ M(glStencilMaskSeparate)
+ M(glStencilOp)
+ M(glStencilOpSeparate)
+ M(glTexImage2D)
+ M(glTexParameterf)
+ M(glTexParameterfv)
+ M(glTexParameteri)
+ M(glTexParameteriv)
+ M(glTexSubImage2D)
+ M(glUniform1f)
+ M(glUniform1fv)
+ M(glUniform1i)
+ M(glUniform1iv)
+ M(glUniform2f)
+ M(glUniform2fv)
+ M(glUniform2i)
+ M(glUniform2iv)
+ M(glUniform3f)
+ M(glUniform3fv)
+ M(glUniform3i)
+ M(glUniform3iv)
+ M(glUniform4f)
+ M(glUniform4fv)
+ M(glUniform4i)
+ M(glUniform4iv)
+ M(glUniformMatrix2fv)
+ M(glUniformMatrix3fv)
+ M(glUniformMatrix4fv)
+ M(glUseProgram)
+ M(glVertexAttrib1f)
+ M(glVertexAttrib2fv)
+ M(glVertexAttrib3fv)
+ M(glVertexAttrib4fv)
+ M(glVertexAttribPointer)
+ M(glViewport)
+ #undef M
+ return eglGetProcAddress(name);
+}
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ return GrGLMakeAssembledInterface(nullptr, egl_get_gl_proc);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
diff --git a/gfx/skia/skia/src/gpu/gl/glfw/GrGLMakeNativeInterface_glfw.cpp b/gfx/skia/skia/src/gpu/gl/glfw/GrGLMakeNativeInterface_glfw.cpp
new file mode 100644
index 0000000000..a2579688d8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/glfw/GrGLMakeNativeInterface_glfw.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#include <GLFW/glfw3.h>
+
+static GrGLFuncPtr glfw_get(void* ctx, const char name[]) {
+ SkASSERT(nullptr == ctx);
+ SkASSERT(glfwGetCurrentContext());
+ return glfwGetProcAddress(name);
+}
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ if (nullptr == glfwGetCurrentContext()) {
+ return nullptr;
+ }
+
+ return GrGLMakeAssembledInterface(nullptr, glfw_get);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
diff --git a/gfx/skia/skia/src/gpu/gl/glx/GrGLMakeNativeInterface_glx.cpp b/gfx/skia/skia/src/gpu/gl/glx/GrGLMakeNativeInterface_glx.cpp
new file mode 100644
index 0000000000..20187eb1d3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/glx/GrGLMakeNativeInterface_glx.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+// Define this to get a prototype for glXGetProcAddress on some systems
+#define GLX_GLXEXT_PROTOTYPES 1
+#include <GL/glx.h>
+
+static GrGLFuncPtr glx_get(void* ctx, const char name[]) {
+ // Avoid calling glXGetProcAddress() for EGL procs.
+ // We don't expect it to ever succeed, but somtimes it returns non-null anyway.
+ if (0 == strncmp(name, "egl", 3)) {
+ return nullptr;
+ }
+
+ SkASSERT(nullptr == ctx);
+ SkASSERT(glXGetCurrentContext());
+ return glXGetProcAddress(reinterpret_cast<const GLubyte*>(name));
+}
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ if (nullptr == glXGetCurrentContext()) {
+ return nullptr;
+ }
+
+ return GrGLMakeAssembledInterface(nullptr, glx_get);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
diff --git a/gfx/skia/skia/src/gpu/gl/iOS/GrGLMakeNativeInterface_iOS.cpp b/gfx/skia/skia/src/gpu/gl/iOS/GrGLMakeNativeInterface_iOS.cpp
new file mode 100644
index 0000000000..10e37d2b84
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/iOS/GrGLMakeNativeInterface_iOS.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include <dlfcn.h>
+
+class GLLoader {
+public:
+ GLLoader() {
+ fLibrary = dlopen(
+ "/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib",
+ RTLD_LAZY);
+ }
+
+ ~GLLoader() {
+ if (fLibrary) {
+ dlclose(fLibrary);
+ }
+ }
+
+ void* handle() const {
+ return nullptr == fLibrary ? RTLD_DEFAULT : fLibrary;
+ }
+
+private:
+ void* fLibrary;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() {}
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ return (GrGLFuncPtr) dlsym(fLoader.handle(), name);
+ }
+
+private:
+ GLLoader fLoader;
+};
+
+static GrGLFuncPtr ios_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ GLProcGetter getter;
+ return GrGLMakeAssembledGLESInterface(&getter, ios_get_gl_proc);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
diff --git a/gfx/skia/skia/src/gpu/gl/mac/GrGLMakeNativeInterface_mac.cpp b/gfx/skia/skia/src/gpu/gl/mac/GrGLMakeNativeInterface_mac.cpp
new file mode 100644
index 0000000000..5a5c9e088a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/mac/GrGLMakeNativeInterface_mac.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC)
+
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+
+#include <dlfcn.h>
+
+class GLLoader {
+public:
+ GLLoader() {
+ fLibrary = dlopen(
+ "/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib",
+ RTLD_LAZY);
+ }
+
+ ~GLLoader() {
+ if (fLibrary) {
+ dlclose(fLibrary);
+ }
+ }
+
+ void* handle() const {
+ return nullptr == fLibrary ? RTLD_DEFAULT : fLibrary;
+ }
+
+private:
+ void* fLibrary;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() {}
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ return (GrGLFuncPtr) dlsym(fLoader.handle(), name);
+ }
+
+private:
+ GLLoader fLoader;
+};
+
+static GrGLFuncPtr mac_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ GLProcGetter getter;
+ return GrGLMakeAssembledGLInterface(&getter, mac_get_gl_proc);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
+
+#endif//defined(SK_BUILD_FOR_MAC)
diff --git a/gfx/skia/skia/src/gpu/gl/win/GrGLMakeNativeInterface_win.cpp b/gfx/skia/skia/src/gpu/gl/win/GrGLMakeNativeInterface_win.cpp
new file mode 100644
index 0000000000..e8d43a76f6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/win/GrGLMakeNativeInterface_win.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/core/SkLeanWindows.h"
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+#include "include/gpu/gl/GrGLInterface.h"
+#include "src/gpu/gl/GrGLUtil.h"
+
+#if defined(_M_ARM64)
+
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() { return nullptr; }
+
+#else
+
+class AutoLibraryUnload {
+public:
+ AutoLibraryUnload(const char* moduleName) {
+ fModule = LoadLibraryA(moduleName);
+ }
+ ~AutoLibraryUnload() {
+ if (fModule) {
+ FreeLibrary(fModule);
+ }
+ }
+ HMODULE get() const { return fModule; }
+
+private:
+ HMODULE fModule;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() : fGLLib("opengl32.dll") {}
+
+ bool isInitialized() const { return SkToBool(fGLLib.get()); }
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ GrGLFuncPtr proc;
+ if ((proc = (GrGLFuncPtr) GetProcAddress(fGLLib.get(), name))) {
+ return proc;
+ }
+ if ((proc = (GrGLFuncPtr) wglGetProcAddress(name))) {
+ return proc;
+ }
+ return nullptr;
+ }
+
+private:
+ AutoLibraryUnload fGLLib;
+};
+
+static GrGLFuncPtr win_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ SkASSERT(wglGetCurrentContext());
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+/*
+ * Windows makes the GL funcs all be __stdcall instead of __cdecl :(
+ * This implementation will only work if GR_GL_FUNCTION_TYPE is __stdcall.
+ * Otherwise, a springboard would be needed that hides the calling convention.
+ */
+sk_sp<const GrGLInterface> GrGLMakeNativeInterface() {
+ if (nullptr == wglGetCurrentContext()) {
+ return nullptr;
+ }
+
+ GLProcGetter getter;
+ if (!getter.isInitialized()) {
+ return nullptr;
+ }
+
+ GrGLGetStringFn* getString = (GrGLGetStringFn*)getter.getProc("glGetString");
+ if (nullptr == getString) {
+ return nullptr;
+ }
+ const char* verStr = reinterpret_cast<const char*>(getString(GR_GL_VERSION));
+ GrGLStandard standard = GrGLGetStandardInUseFromString(verStr);
+
+ if (GR_IS_GR_GL_ES(standard)) {
+ return GrGLMakeAssembledGLESInterface(&getter, win_get_gl_proc);
+ } else if (GR_IS_GR_GL(standard)) {
+ return GrGLMakeAssembledGLInterface(&getter, win_get_gl_proc);
+ }
+ return nullptr;
+}
+
+#endif // ARM64
+
+const GrGLInterface* GrGLCreateNativeInterface() { return GrGLMakeNativeInterface().release(); }
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp
new file mode 100644
index 0000000000..afa9159bb0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/glsl/GrGLSL.h"
+
+const char* GrGLSLTypeString(GrSLType t) {
+ switch (t) {
+ case kVoid_GrSLType:
+ return "void";
+ case kHalf_GrSLType:
+ return "half";
+ case kHalf2_GrSLType:
+ return "half2";
+ case kHalf3_GrSLType:
+ return "half3";
+ case kHalf4_GrSLType:
+ return "half4";
+ case kFloat_GrSLType:
+ return "float";
+ case kFloat2_GrSLType:
+ return "float2";
+ case kFloat3_GrSLType:
+ return "float3";
+ case kFloat4_GrSLType:
+ return "float4";
+ case kUint2_GrSLType:
+ return "uint2";
+ case kInt2_GrSLType:
+ return "int2";
+ case kInt3_GrSLType:
+ return "int3";
+ case kInt4_GrSLType:
+ return "int4";
+ case kFloat2x2_GrSLType:
+ return "float2x2";
+ case kFloat3x3_GrSLType:
+ return "float3x3";
+ case kFloat4x4_GrSLType:
+ return "float4x4";
+ case kHalf2x2_GrSLType:
+ return "half2x2";
+ case kHalf3x3_GrSLType:
+ return "half3x3";
+ case kHalf4x4_GrSLType:
+ return "half4x4";
+ case kTexture2DSampler_GrSLType:
+ return "sampler2D";
+ case kTextureExternalSampler_GrSLType:
+ return "samplerExternalOES";
+ case kTexture2DRectSampler_GrSLType:
+ return "sampler2DRect";
+ case kBool_GrSLType:
+ return "bool";
+ case kInt_GrSLType:
+ return "int";
+ case kUint_GrSLType:
+ return "uint";
+ case kShort_GrSLType:
+ return "short";
+ case kShort2_GrSLType:
+ return "short2";
+ case kShort3_GrSLType:
+ return "short3";
+ case kShort4_GrSLType:
+ return "short4";
+ case kUShort_GrSLType:
+ return "ushort";
+ case kUShort2_GrSLType:
+ return "ushort2";
+ case kUShort3_GrSLType:
+ return "ushort3";
+ case kUShort4_GrSLType:
+ return "ushort4";
+ case kByte_GrSLType:
+ return "byte";
+ case kByte2_GrSLType:
+ return "byte2";
+ case kByte3_GrSLType:
+ return "byte3";
+ case kByte4_GrSLType:
+ return "byte4";
+ case kUByte_GrSLType:
+ return "ubyte";
+ case kUByte2_GrSLType:
+ return "ubyte2";
+ case kUByte3_GrSLType:
+ return "ubyte3";
+ case kUByte4_GrSLType:
+ return "ubyte4";
+ case kTexture2D_GrSLType:
+ return "texture2D";
+ case kSampler_GrSLType:
+ return "sampler";
+ }
+ SK_ABORT("Unknown shader var type.");
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSL.h b/gfx/skia/skia/src/gpu/glsl/GrGLSL.h
new file mode 100644
index 0000000000..0bee458b6e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSL.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSL_DEFINED
+#define GrGLSL_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+
+class GrShaderCaps;
+
+// Limited set of GLSL versions we build shaders for. Caller should round
+// down the GLSL version to one of these enums.
+enum GrGLSLGeneration {
+ /**
+ * Desktop GLSL 1.10 and ES2 shading language (based on desktop GLSL 1.20)
+ */
+ k110_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.30
+ */
+ k130_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.40
+ */
+ k140_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.50
+ */
+ k150_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 3.30, and ES GLSL 3.00
+ */
+ k330_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 4.00
+ */
+ k400_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 4.20
+ */
+ k420_GrGLSLGeneration,
+ /**
+ * ES GLSL 3.10 only TODO Make GLSLCap objects to make this more granular
+ */
+ k310es_GrGLSLGeneration,
+ /**
+ * ES GLSL 3.20
+ */
+ k320es_GrGLSLGeneration,
+};
+
+const char* GrGLSLTypeString(GrSLType);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp
new file mode 100644
index 0000000000..2d1e93c777
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlendModePriv.h"
+#include "src/gpu/glsl/GrGLSLBlend.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// Advanced (non-coeff) blend helpers
+//////////////////////////////////////////////////////////////////////////////
+
+static void hard_light(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst) {
+ static const char kComponents[] = { 'r', 'g', 'b' };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kComponents); ++i) {
+ char component = kComponents[i];
+ fsBuilder->codeAppendf("if (2.0 * %s.%c <= %s.a) {", src, component, src);
+ fsBuilder->codeAppendf("%s.%c = 2.0 * %s.%c * %s.%c;",
+ final, component, src, component, dst, component);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a - 2.0 * (%s.a - %s.%c) * (%s.a - %s.%c);",
+ final, component, src, dst, dst, dst, component, src, src,
+ component);
+ fsBuilder->codeAppend("}");
+ }
+ fsBuilder->codeAppendf("%s.rgb += %s.rgb * (1.0 - %s.a) + %s.rgb * (1.0 - %s.a);",
+ final, src, dst, dst, src);
+}
+
+// Does one component of color-dodge
+static void color_dodge_component(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ const char* divisorGuard = "";
+ const GrShaderCaps* shaderCaps = fsBuilder->getProgramBuilder()->shaderCaps();
+ if (shaderCaps->mustGuardDivisionEvenAfterExplicitZeroCheck()) {
+ divisorGuard = "+ 0.00000001";
+ }
+
+ fsBuilder->codeAppendf("if (0.0 == %s.%c) {", dst, component);
+ fsBuilder->codeAppendf("%s.%c = %s.%c * (1.0 - %s.a);",
+ final, component, src, component, dst);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("half d = %s.a - %s.%c;", src, src, component);
+ fsBuilder->codeAppend("if (0.0 == d) {");
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, dst, src, component, dst, dst, component,
+ src);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("d = min(%s.a, %s.%c * %s.a / (d %s));",
+ dst, dst, component, src, divisorGuard);
+ fsBuilder->codeAppendf("%s.%c = d * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, src, component, dst, dst, component, src);
+ fsBuilder->codeAppend("}");
+ fsBuilder->codeAppend("}");
+}
+
+// Does one component of color-burn
+static void color_burn_component(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ const char* divisorGuard = "";
+ const GrShaderCaps* shaderCaps = fsBuilder->getProgramBuilder()->shaderCaps();
+ if (shaderCaps->mustGuardDivisionEvenAfterExplicitZeroCheck()) {
+ divisorGuard = "+ 0.00000001";
+ }
+
+ fsBuilder->codeAppendf("if (%s.a == %s.%c) {", dst, dst, component);
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, dst, src, component, dst, dst, component,
+ src);
+ fsBuilder->codeAppendf("} else if (0.0 == %s.%c) {", src, component);
+ fsBuilder->codeAppendf("%s.%c = %s.%c * (1.0 - %s.a);",
+ final, component, dst, component, src);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("half d = max(0.0, %s.a - (%s.a - %s.%c) * %s.a / (%s.%c %s));",
+ dst, dst, dst, component, src, src, component, divisorGuard);
+ fsBuilder->codeAppendf("%s.%c = %s.a * d + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, src, component, dst, dst, component, src);
+ fsBuilder->codeAppend("}");
+}
+
+// Does one component of soft-light. Caller should have already checked that dst alpha > 0.
+static void soft_light_component_pos_dst_alpha(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ const char* divisorGuard = "";
+ const GrShaderCaps* shaderCaps = fsBuilder->getProgramBuilder()->shaderCaps();
+ if (shaderCaps->mustGuardDivisionEvenAfterExplicitZeroCheck()) {
+ divisorGuard = "+ 0.00000001";
+ }
+
+ // if (2S < Sa)
+ fsBuilder->codeAppendf("if (2.0 * %s.%c <= %s.a) {", src, component, src);
+ // (D^2 (Sa-2 S))/Da+(1-Da) S+D (-Sa+2 S+1)
+ fsBuilder->codeAppendf("%s.%c = (%s.%c*%s.%c*(%s.a - 2.0*%s.%c)) / (%s.a %s) +"
+ "(1.0 - %s.a) * %s.%c + %s.%c*(-%s.a + 2.0*%s.%c + 1.0);",
+ final, component, dst, component, dst, component, src, src,
+ component, dst, divisorGuard, dst, src, component, dst, component, src,
+ src, component);
+ // else if (4D < Da)
+ fsBuilder->codeAppendf("} else if (4.0 * %s.%c <= %s.a) {",
+ dst, component, dst);
+ fsBuilder->codeAppendf("half DSqd = %s.%c * %s.%c;",
+ dst, component, dst, component);
+ fsBuilder->codeAppendf("half DCub = DSqd * %s.%c;", dst, component);
+ fsBuilder->codeAppendf("half DaSqd = %s.a * %s.a;", dst, dst);
+ fsBuilder->codeAppendf("half DaCub = DaSqd * %s.a;", dst);
+ // (Da^3 (-S)+Da^2 (S-D (3 Sa-6 S-1))+12 Da D^2 (Sa-2 S)-16 D^3 (Sa-2 S))/Da^2
+ fsBuilder->codeAppendf("%s.%c ="
+ "(DaSqd*(%s.%c - %s.%c * (3.0*%s.a - 6.0*%s.%c - 1.0)) +"
+ " 12.0*%s.a*DSqd*(%s.a - 2.0*%s.%c) - 16.0*DCub * (%s.a - 2.0*%s.%c) -"
+ " DaCub*%s.%c) / (DaSqd %s);",
+ final, component, src, component, dst, component,
+ src, src, component, dst, src, src, component, src, src,
+ component, src, component, divisorGuard);
+ fsBuilder->codeAppendf("} else {");
+ // -sqrt(Da * D) (Sa-2 S)-Da S+D (Sa-2 S+1)+S
+ fsBuilder->codeAppendf("%s.%c = %s.%c*(%s.a - 2.0*%s.%c + 1.0) + %s.%c -"
+ " sqrt(%s.a*%s.%c)*(%s.a - 2.0*%s.%c) - %s.a*%s.%c;",
+ final, component, dst, component, src, src, component, src, component,
+ dst, dst, component, src, src, component, dst, src, component);
+ fsBuilder->codeAppendf("}");
+}
+
+// Adds a function that takes two colors and an alpha as input. It produces a color with the
+// hue and saturation of the first color, the luminosity of the second color, and the input
+// alpha. It has this signature:
+// float3 set_luminance(float3 hueSatColor, float alpha, float3 lumColor).
+static void add_lum_function(GrGLSLFragmentBuilder* fsBuilder, SkString* setLumFunction) {
+ // Emit a helper that gets the luminance of a color.
+ SkString getFunction;
+ GrShaderVar getLumArgs[] = {
+ GrShaderVar("color", kHalf3_GrSLType),
+ };
+ SkString getLumBody("return dot(half3(0.3, 0.59, 0.11), color);");
+ fsBuilder->emitFunction(kHalf_GrSLType,
+ "luminance",
+ SK_ARRAY_COUNT(getLumArgs), getLumArgs,
+ getLumBody.c_str(),
+ &getFunction);
+
+ // Emit the set luminance function.
+ GrShaderVar setLumArgs[] = {
+ GrShaderVar("hueSat", kHalf3_GrSLType),
+ GrShaderVar("alpha", kHalf_GrSLType),
+ GrShaderVar("lumColor", kHalf3_GrSLType),
+ };
+ SkString setLumBody;
+ setLumBody.printf("half outLum = %s(lumColor);", getFunction.c_str());
+ setLumBody.appendf("half3 outColor = outLum - %s(hueSat) + hueSat;", getFunction.c_str());
+ setLumBody.append("half minComp = min(min(outColor.r, outColor.g), outColor.b);"
+ "half maxComp = max(max(outColor.r, outColor.g), outColor.b);"
+ "if (minComp < 0.0 && outLum != minComp) {"
+ "outColor = outLum + ((outColor - half3(outLum, outLum, outLum)) * outLum) /"
+ "(outLum - minComp);"
+ "}"
+ "if (maxComp > alpha && maxComp != outLum) {"
+ "outColor = outLum +"
+ "((outColor - half3(outLum, outLum, outLum)) * (alpha - outLum)) /"
+ "(maxComp - outLum);"
+ "}"
+ "return outColor;");
+ fsBuilder->emitFunction(kHalf3_GrSLType,
+ "set_luminance",
+ SK_ARRAY_COUNT(setLumArgs), setLumArgs,
+ setLumBody.c_str(),
+ setLumFunction);
+}
+
+// Adds a function that creates a color with the hue and luminosity of one input color and
+// the saturation of another color. It will have this signature:
+// float set_saturation(float3 hueLumColor, float3 satColor)
+static void add_sat_function(GrGLSLFragmentBuilder* fsBuilder, SkString* setSatFunction) {
+ // Emit a helper that gets the saturation of a color
+ SkString getFunction;
+ GrShaderVar getSatArgs[] = { GrShaderVar("color", kHalf3_GrSLType) };
+ SkString getSatBody;
+ getSatBody.printf("return max(max(color.r, color.g), color.b) - "
+ "min(min(color.r, color.g), color.b);");
+ fsBuilder->emitFunction(kHalf_GrSLType,
+ "saturation",
+ SK_ARRAY_COUNT(getSatArgs), getSatArgs,
+ getSatBody.c_str(),
+ &getFunction);
+
+ // Emit a helper that sets the saturation given sorted input channels. This used
+ // to use inout params for min, mid, and max components but that seems to cause
+ // problems on PowerVR drivers. So instead it returns a float3 where r, g ,b are the
+ // adjusted min, mid, and max inputs, respectively.
+ SkString helperFunction;
+ GrShaderVar helperArgs[] = {
+ GrShaderVar("minComp", kHalf_GrSLType),
+ GrShaderVar("midComp", kHalf_GrSLType),
+ GrShaderVar("maxComp", kHalf_GrSLType),
+ GrShaderVar("sat", kHalf_GrSLType),
+ };
+ static const char kHelperBody[] = "if (minComp < maxComp) {"
+ "half3 result;"
+ "result.r = 0.0;"
+ "result.g = sat * (midComp - minComp) / (maxComp - minComp);"
+ "result.b = sat;"
+ "return result;"
+ "} else {"
+ "return half3(0, 0, 0);"
+ "}";
+ fsBuilder->emitFunction(kHalf3_GrSLType,
+ "set_saturation_helper",
+ SK_ARRAY_COUNT(helperArgs), helperArgs,
+ kHelperBody,
+ &helperFunction);
+
+ GrShaderVar setSatArgs[] = {
+ GrShaderVar("hueLumColor", kHalf3_GrSLType),
+ GrShaderVar("satColor", kHalf3_GrSLType),
+ };
+ const char* helpFunc = helperFunction.c_str();
+ SkString setSatBody;
+ setSatBody.appendf("half sat = %s(satColor);"
+ "if (hueLumColor.r <= hueLumColor.g) {"
+ "if (hueLumColor.g <= hueLumColor.b) {"
+ "hueLumColor.rgb = %s(hueLumColor.r, hueLumColor.g, hueLumColor.b, sat);"
+ "} else if (hueLumColor.r <= hueLumColor.b) {"
+ "hueLumColor.rbg = %s(hueLumColor.r, hueLumColor.b, hueLumColor.g, sat);"
+ "} else {"
+ "hueLumColor.brg = %s(hueLumColor.b, hueLumColor.r, hueLumColor.g, sat);"
+ "}"
+ "} else if (hueLumColor.r <= hueLumColor.b) {"
+ "hueLumColor.grb = %s(hueLumColor.g, hueLumColor.r, hueLumColor.b, sat);"
+ "} else if (hueLumColor.g <= hueLumColor.b) {"
+ "hueLumColor.gbr = %s(hueLumColor.g, hueLumColor.b, hueLumColor.r, sat);"
+ "} else {"
+ "hueLumColor.bgr = %s(hueLumColor.b, hueLumColor.g, hueLumColor.r, sat);"
+ "}"
+ "return hueLumColor;",
+ getFunction.c_str(), helpFunc, helpFunc, helpFunc, helpFunc,
+ helpFunc, helpFunc);
+ fsBuilder->emitFunction(kHalf3_GrSLType,
+ "set_saturation",
+ SK_ARRAY_COUNT(setSatArgs), setSatArgs,
+ setSatBody.c_str(),
+ setSatFunction);
+}
+
+static void emit_advanced_xfermode_code(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outputColor,
+ SkBlendMode mode) {
+ SkASSERT(srcColor);
+ SkASSERT(dstColor);
+ SkASSERT(outputColor);
+ // These all perform src-over on the alpha channel.
+ fsBuilder->codeAppendf("%s.a = %s.a + (1.0 - %s.a) * %s.a;",
+ outputColor, srcColor, srcColor, dstColor);
+
+ switch (mode) {
+ case SkBlendMode::kOverlay:
+ // Overlay is Hard-Light with the src and dst reversed
+ hard_light(fsBuilder, outputColor, dstColor, srcColor);
+ break;
+ case SkBlendMode::kDarken:
+ fsBuilder->codeAppendf("%s.rgb = min((1.0 - %s.a) * %s.rgb + %s.rgb, "
+ "(1.0 - %s.a) * %s.rgb + %s.rgb);",
+ outputColor,
+ srcColor, dstColor, srcColor,
+ dstColor, srcColor, dstColor);
+ break;
+ case SkBlendMode::kLighten:
+ fsBuilder->codeAppendf("%s.rgb = max((1.0 - %s.a) * %s.rgb + %s.rgb, "
+ "(1.0 - %s.a) * %s.rgb + %s.rgb);",
+ outputColor,
+ srcColor, dstColor, srcColor,
+ dstColor, srcColor, dstColor);
+ break;
+ case SkBlendMode::kColorDodge:
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ break;
+ case SkBlendMode::kColorBurn:
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ break;
+ case SkBlendMode::kHardLight:
+ hard_light(fsBuilder, outputColor, srcColor, dstColor);
+ break;
+ case SkBlendMode::kSoftLight:
+ fsBuilder->codeAppendf("if (0.0 == %s.a) {", dstColor);
+ fsBuilder->codeAppendf("%s.rgba = %s;", outputColor, srcColor);
+ fsBuilder->codeAppendf("} else {");
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ fsBuilder->codeAppendf("}");
+ break;
+ case SkBlendMode::kDifference:
+ fsBuilder->codeAppendf("%s.rgb = %s.rgb + %s.rgb -"
+ "2.0 * min(%s.rgb * %s.a, %s.rgb * %s.a);",
+ outputColor, srcColor, dstColor, srcColor, dstColor,
+ dstColor, srcColor);
+ break;
+ case SkBlendMode::kExclusion:
+ fsBuilder->codeAppendf("%s.rgb = %s.rgb + %s.rgb - "
+ "2.0 * %s.rgb * %s.rgb;",
+ outputColor, dstColor, srcColor, dstColor, srcColor);
+ break;
+ case SkBlendMode::kMultiply:
+ fsBuilder->codeAppendf("%s.rgb = (1.0 - %s.a) * %s.rgb + "
+ "(1.0 - %s.a) * %s.rgb + "
+ "%s.rgb * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor,
+ srcColor, dstColor);
+ break;
+ case SkBlendMode::kHue: {
+ // SetLum(SetSat(S * Da, Sat(D * Sa)), Sa*Da, D*Sa) + (1 - Sa) * D + (1 - Da) * S
+ SkString setSat, setLum;
+ add_sat_function(fsBuilder, &setSat);
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("half4 dstSrcAlpha = %s * %s.a;",
+ dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s(%s.rgb * %s.a, dstSrcAlpha.rgb),"
+ "dstSrcAlpha.a, dstSrcAlpha.rgb);",
+ outputColor, setLum.c_str(), setSat.c_str(), srcColor,
+ dstColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkBlendMode::kSaturation: {
+ // SetLum(SetSat(D * Sa, Sat(S * Da)), Sa*Da, D*Sa)) + (1 - Sa) * D + (1 - Da) * S
+ SkString setSat, setLum;
+ add_sat_function(fsBuilder, &setSat);
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("half4 dstSrcAlpha = %s * %s.a;",
+ dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s(dstSrcAlpha.rgb, %s.rgb * %s.a),"
+ "dstSrcAlpha.a, dstSrcAlpha.rgb);",
+ outputColor, setLum.c_str(), setSat.c_str(), srcColor,
+ dstColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkBlendMode::kColor: {
+ // SetLum(S * Da, Sa* Da, D * Sa) + (1 - Sa) * D + (1 - Da) * S
+ SkString setLum;
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("half4 srcDstAlpha = %s * %s.a;",
+ srcColor, dstColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(srcDstAlpha.rgb, srcDstAlpha.a, %s.rgb * %s.a);",
+ outputColor, setLum.c_str(), dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkBlendMode::kLuminosity: {
+ // SetLum(D * Sa, Sa* Da, S * Da) + (1 - Sa) * D + (1 - Da) * S
+ SkString setLum;
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("half4 srcDstAlpha = %s * %s.a;",
+ srcColor, dstColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s.rgb * %s.a, srcDstAlpha.a, srcDstAlpha.rgb);",
+ outputColor, setLum.c_str(), dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ default:
+ SK_ABORT("Unknown Custom Xfer mode.");
+ break;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Porter-Duff blend helper
+//////////////////////////////////////////////////////////////////////////////
+
+static bool append_porterduff_term(GrGLSLFragmentBuilder* fsBuilder, SkBlendModeCoeff coeff,
+ const char* colorName, const char* srcColorName,
+ const char* dstColorName, bool hasPrevious) {
+ if (SkBlendModeCoeff::kZero == coeff) {
+ return hasPrevious;
+ } else {
+ if (hasPrevious) {
+ fsBuilder->codeAppend(" + ");
+ }
+ fsBuilder->codeAppendf("%s", colorName);
+ switch (coeff) {
+ case SkBlendModeCoeff::kOne:
+ break;
+ case SkBlendModeCoeff::kSC:
+ fsBuilder->codeAppendf(" * %s", srcColorName);
+ break;
+ case SkBlendModeCoeff::kISC:
+ fsBuilder->codeAppendf(" * (half4(1.0) - %s)", srcColorName);
+ break;
+ case SkBlendModeCoeff::kDC:
+ fsBuilder->codeAppendf(" * %s", dstColorName);
+ break;
+ case SkBlendModeCoeff::kIDC:
+ fsBuilder->codeAppendf(" * (half4(1.0) - %s)", dstColorName);
+ break;
+ case SkBlendModeCoeff::kSA:
+ fsBuilder->codeAppendf(" * %s.a", srcColorName);
+ break;
+ case SkBlendModeCoeff::kISA:
+ fsBuilder->codeAppendf(" * (1.0 - %s.a)", srcColorName);
+ break;
+ case SkBlendModeCoeff::kDA:
+ fsBuilder->codeAppendf(" * %s.a", dstColorName);
+ break;
+ case SkBlendModeCoeff::kIDA:
+ fsBuilder->codeAppendf(" * (1.0 - %s.a)", dstColorName);
+ break;
+ default:
+ SK_ABORT("Unsupported Blend Coeff");
+ }
+ return true;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrGLSLBlend::AppendMode(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor,
+ SkBlendMode mode) {
+
+ SkBlendModeCoeff srcCoeff, dstCoeff;
+ if (SkBlendMode_AsCoeff(mode, &srcCoeff, &dstCoeff)) {
+ // The only coeff mode that can go out of range is plus.
+ bool clamp = mode == SkBlendMode::kPlus;
+
+ fsBuilder->codeAppendf("%s = ", outColor);
+ if (clamp) {
+ fsBuilder->codeAppend("clamp(");
+ }
+ // append src blend
+ bool didAppend = append_porterduff_term(fsBuilder, srcCoeff, srcColor, srcColor, dstColor,
+ false);
+ // append dst blend
+ if(!append_porterduff_term(fsBuilder, dstCoeff, dstColor, srcColor, dstColor, didAppend)) {
+ fsBuilder->codeAppend("half4(0, 0, 0, 0)");
+ }
+ if (clamp) {
+ fsBuilder->codeAppend(", 0, 1);");
+ }
+ fsBuilder->codeAppend(";");
+ } else {
+ emit_advanced_xfermode_code(fsBuilder, srcColor, dstColor, outColor, mode);
+ }
+}
+
+void GrGLSLBlend::AppendRegionOp(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor,
+ SkRegion::Op regionOp) {
+ SkBlendModeCoeff srcCoeff, dstCoeff;
+ switch (regionOp) {
+ case SkRegion::kReplace_Op:
+ srcCoeff = SkBlendModeCoeff::kOne;
+ dstCoeff = SkBlendModeCoeff::kZero;
+ break;
+ case SkRegion::kIntersect_Op:
+ srcCoeff = SkBlendModeCoeff::kDC;
+ dstCoeff = SkBlendModeCoeff::kZero;
+ break;
+ case SkRegion::kUnion_Op:
+ srcCoeff = SkBlendModeCoeff::kOne;
+ dstCoeff = SkBlendModeCoeff::kISC;
+ break;
+ case SkRegion::kXOR_Op:
+ srcCoeff = SkBlendModeCoeff::kIDC;
+ dstCoeff = SkBlendModeCoeff::kISC;
+ break;
+ case SkRegion::kDifference_Op:
+ srcCoeff = SkBlendModeCoeff::kZero;
+ dstCoeff = SkBlendModeCoeff::kISC;
+ break;
+ case SkRegion::kReverseDifference_Op:
+ srcCoeff = SkBlendModeCoeff::kIDC;
+ dstCoeff = SkBlendModeCoeff::kZero;
+ break;
+ default:
+ SK_ABORT("Unsupported Op");
+ // We should never get here but to make compiler happy
+ srcCoeff = SkBlendModeCoeff::kZero;
+ dstCoeff = SkBlendModeCoeff::kZero;
+ }
+ fsBuilder->codeAppendf("%s = ", outColor);
+ // append src blend
+ bool didAppend = append_porterduff_term(fsBuilder, srcCoeff, srcColor, srcColor, dstColor,
+ false);
+ // append dst blend
+ if(!append_porterduff_term(fsBuilder, dstCoeff, dstColor, srcColor, dstColor, didAppend)) {
+ fsBuilder->codeAppend("half4(0, 0, 0, 0)");
+ }
+ fsBuilder->codeAppend(";");
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h
new file mode 100644
index 0000000000..69b821e158
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLBlend_DEFINED
+#define GrGLBlend_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkRegion.h"
+
+class GrGLSLFragmentBuilder;
+
+namespace GrGLSLBlend {
+ /*
+ * Appends GLSL code to fsBuilder that assigns a specified blend of the srcColor and dstColor
+ * variables to the outColor variable.
+ */
+ void AppendMode(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor, SkBlendMode mode);
+
+ void AppendRegionOp(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor, SkRegion::Op regionOp);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h
new file mode 100644
index 0000000000..1e68897b1b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLColorSpaceXformHelper_DEFINED
+#define GrGLSLColorSpaceXformHelper_DEFINED
+
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+/**
+ * Helper class to assist with using GrColorSpaceXform within an FP. This manages all of the
+ * uniforms needed, and can be passed to shader builder functions to automatically generate the
+ * correct color space transformation code.
+ */
+class GrGLSLColorSpaceXformHelper : public SkNoncopyable {
+public:
+ GrGLSLColorSpaceXformHelper() {
+ memset(&fFlags, 0, sizeof(fFlags));
+ }
+
+ void emitCode(GrGLSLUniformHandler* uniformHandler, const GrColorSpaceXform* colorSpaceXform,
+ uint32_t visibility = kFragment_GrShaderFlag) {
+ SkASSERT(uniformHandler);
+ if (colorSpaceXform) {
+ fFlags = colorSpaceXform->fSteps.flags;
+ if (this->applySrcTF()) {
+ fSrcTFVar = uniformHandler->addUniformArray(visibility, kHalf_GrSLType,
+ "SrcTF", kNumTransferFnCoeffs);
+ fSrcTFKind = classify_transfer_fn(colorSpaceXform->fSteps.srcTF);
+ }
+ if (this->applyGamutXform()) {
+ fGamutXformVar = uniformHandler->addUniform(visibility, kHalf3x3_GrSLType,
+ "ColorXform");
+ }
+ if (this->applyDstTF()) {
+ fDstTFVar = uniformHandler->addUniformArray(visibility, kHalf_GrSLType,
+ "DstTF", kNumTransferFnCoeffs);
+ fDstTFKind = classify_transfer_fn(colorSpaceXform->fSteps.dstTFInv);
+ }
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrColorSpaceXform* colorSpaceXform) {
+ if (this->applySrcTF()) {
+ pdman.set1fv(fSrcTFVar, kNumTransferFnCoeffs, &colorSpaceXform->fSteps.srcTF.g);
+ }
+ if (this->applyGamutXform()) {
+ pdman.setMatrix3f(fGamutXformVar, colorSpaceXform->fSteps.src_to_dst_matrix);
+ }
+ if (this->applyDstTF()) {
+ pdman.set1fv(fDstTFVar, kNumTransferFnCoeffs, &colorSpaceXform->fSteps.dstTFInv.g);
+ }
+ }
+
+ bool isNoop() const { return (0 == fFlags.mask()); }
+
+ bool applyUnpremul() const { return fFlags.unpremul; }
+ bool applySrcTF() const { return fFlags.linearize; }
+ bool applyGamutXform() const { return fFlags.gamut_transform; }
+ bool applyDstTF() const { return fFlags.encode; }
+ bool applyPremul() const { return fFlags.premul; }
+
+ TFKind srcTFKind() const { return fSrcTFKind; }
+ TFKind dstTFKind() const { return fDstTFKind; }
+
+ GrGLSLProgramDataManager::UniformHandle srcTFUniform() const { return fSrcTFVar; }
+ GrGLSLProgramDataManager::UniformHandle gamutXformUniform() const { return fGamutXformVar; }
+ GrGLSLProgramDataManager::UniformHandle dstTFUniform() const { return fDstTFVar; }
+
+private:
+ static const int kNumTransferFnCoeffs = 7;
+
+ GrGLSLProgramDataManager::UniformHandle fSrcTFVar;
+ GrGLSLProgramDataManager::UniformHandle fGamutXformVar;
+ GrGLSLProgramDataManager::UniformHandle fDstTFVar;
+ SkColorSpaceXformSteps::Flags fFlags;
+ TFKind fSrcTFKind;
+ TFKind fDstTFKind;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp
new file mode 100644
index 0000000000..e5e97deccf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+void GrGLSLFragmentProcessor::setData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ this->onSetData(pdman, processor);
+}
+
+void GrGLSLFragmentProcessor::invokeChild(int childIndex, const char* inputColor, EmitArgs& args,
+ SkSL::String skslCoords) {
+ while (childIndex >= (int) fFunctionNames.size()) {
+ fFunctionNames.emplace_back();
+ }
+ this->internalInvokeChild(childIndex, inputColor, args.fOutputColor, args, skslCoords);
+}
+
+void GrGLSLFragmentProcessor::writeChildCall(GrGLSLFPFragmentBuilder* fragBuilder, int childIndex,
+ TransformedCoordVars coordVars, const char* inputColor,
+ const char* outputColor, EmitArgs& args,
+ SkSL::String skslCoords) {
+ std::vector<SkString> coordParams;
+ for (int i = 0; i < coordVars.count(); ++i) {
+ coordParams.push_back(fragBuilder->ensureCoords2D(coordVars[i].fVaryingPoint));
+ }
+ // if the fragment processor is invoked with overridden coordinates, it must *always* be invoked
+ // with overridden coords
+ SkASSERT(args.fFp.computeLocalCoordsInVertexShader() == (skslCoords.length() == 0));
+ fragBuilder->codeAppendf("%s = %s(%s", outputColor, fFunctionNames[childIndex].c_str(),
+ inputColor ? inputColor : "half4(1)");
+ if (skslCoords.length()) {
+ fragBuilder->codeAppendf(", %s", skslCoords.c_str());
+ }
+ fragBuilder->codeAppend(");\n");
+}
+
+void GrGLSLFragmentProcessor::invokeChild(int childIndex, const char* inputColor,
+ SkString* outputColor, EmitArgs& args,
+ SkSL::String skslCoords) {
+ SkASSERT(outputColor);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ outputColor->append(fragBuilder->getMangleString());
+ fragBuilder->codeAppendf("half4 %s;", outputColor->c_str());
+ while (childIndex >= (int) fFunctionNames.size()) {
+ fFunctionNames.emplace_back();
+ }
+ if (!args.fFp.computeLocalCoordsInVertexShader() && skslCoords.length() == 0) {
+ skslCoords = "_coords";
+ }
+ if (fFunctionNames[childIndex].size() == 0) {
+ this->internalInvokeChild(childIndex, inputColor, outputColor->c_str(), args, skslCoords);
+ } else {
+ const GrFragmentProcessor& childProc = args.fFp.childProcessor(childIndex);
+
+ TransformedCoordVars coordVars = args.fTransformedCoords.childInputs(childIndex);
+ TextureSamplers textureSamplers = args.fTexSamplers.childInputs(childIndex);
+ EmitArgs childArgs(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ childProc,
+ outputColor->c_str(),
+ "_input",
+ coordVars,
+ textureSamplers);
+ this->writeChildCall(fragBuilder, childIndex, coordVars, inputColor, outputColor->c_str(),
+ childArgs, skslCoords);
+ }
+}
+
+void GrGLSLFragmentProcessor::internalInvokeChild(int childIndex, const char* inputColor,
+ const char* outputColor, EmitArgs& args,
+ SkSL::String skslCoords) {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->onBeforeChildProcEmitCode(); // call first so mangleString is updated
+
+ // Prepare a mangled input color variable if the default is not used,
+ // inputName remains the empty string if no variable is needed.
+ SkString inputName;
+ if (inputColor&& strcmp("half4(1.0)", inputColor) != 0 && strcmp("half4(1)", inputColor) != 0) {
+ // The input name is based off of the current mangle string, and
+ // since this is called after onBeforeChildProcEmitCode(), it will be
+ // unique to the child processor (exactly what we want for its input).
+ inputName.appendf("_childInput%s", fragBuilder->getMangleString().c_str());
+ fragBuilder->codeAppendf("half4 %s = %s;", inputName.c_str(), inputColor);
+ }
+
+ const GrFragmentProcessor& childProc = args.fFp.childProcessor(childIndex);
+ TransformedCoordVars coordVars = args.fTransformedCoords.childInputs(childIndex);
+ TextureSamplers textureSamplers = args.fTexSamplers.childInputs(childIndex);
+
+ EmitArgs childArgs(fragBuilder,
+ args.fUniformHandler,
+ args.fShaderCaps,
+ childProc,
+ outputColor,
+ "_input",
+ coordVars,
+ textureSamplers);
+ fFunctionNames[childIndex] = fragBuilder->writeProcessorFunction(
+ this->childProcessor(childIndex),
+ childArgs);
+ this->writeChildCall(fragBuilder, childIndex, coordVars, inputColor, outputColor, childArgs,
+ skslCoords);
+ fragBuilder->onAfterChildProcEmitCode();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrGLSLFragmentProcessor::Iter::next() {
+ if (fFPStack.empty()) {
+ return nullptr;
+ }
+ GrGLSLFragmentProcessor* back = fFPStack.back();
+ fFPStack.pop_back();
+ for (int i = back->numChildProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(back->childProcessor(i));
+ }
+ return back;
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h
new file mode 100644
index 0000000000..ad441d8efc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLFragmentProcessor_DEFINED
+#define GrGLSLFragmentProcessor_DEFINED
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/sksl/SkSLString.h"
+
+class GrProcessor;
+class GrProcessorKeyBuilder;
+class GrGLSLFPBuilder;
+class GrGLSLFPFragmentBuilder;
+
+class GrGLSLFragmentProcessor {
+public:
+ GrGLSLFragmentProcessor() {}
+
+ virtual ~GrGLSLFragmentProcessor() {
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ delete fChildProcessors[i];
+ }
+ }
+
+ using UniformHandle = GrGLSLUniformHandler::UniformHandle;
+ using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
+
+private:
+ /**
+ * This class allows the shader builder to provide each GrGLSLFragmentProcesor with an array of
+ * generated variables where each generated variable corresponds to an element of an array on
+ * the GrFragmentProcessor that generated the GLSLFP. For example, this is used to provide a
+ * variable holding transformed coords for each GrCoordTransform owned by the FP.
+ */
+ template <typename T, int (GrFragmentProcessor::*COUNT)() const>
+ class BuilderInputProvider {
+ public:
+ BuilderInputProvider(const GrFragmentProcessor* fp, const T* ts) : fFP(fp) , fTs(ts) {}
+
+ const T& operator[] (int i) const {
+ SkASSERT(i >= 0 && i < (fFP->*COUNT)());
+ return fTs[i];
+ }
+
+ int count() const { return (fFP->*COUNT)(); }
+
+ BuilderInputProvider childInputs(int childIdx) const {
+ const GrFragmentProcessor* child = &fFP->childProcessor(childIdx);
+ GrFragmentProcessor::Iter iter(fFP);
+ int numToSkip = 0;
+ while (true) {
+ const GrFragmentProcessor* fp = iter.next();
+ if (fp == child) {
+ return BuilderInputProvider(child, fTs + numToSkip);
+ }
+ numToSkip += (fp->*COUNT)();
+ }
+ }
+
+ private:
+ const GrFragmentProcessor* fFP;
+ const T* fTs;
+ };
+
+public:
+ using TransformedCoordVars = BuilderInputProvider<GrGLSLPrimitiveProcessor::TransformVar,
+ &GrFragmentProcessor::numCoordTransforms>;
+ using TextureSamplers =
+ BuilderInputProvider<SamplerHandle, &GrFragmentProcessor::numTextureSamplers>;
+
+ /** Called when the program stage should insert its code into the shaders. The code in each
+ shader will be in its own block ({}) and so locally scoped names will not collide across
+ stages.
+
+ @param fragBuilder Interface used to emit code in the shaders.
+ @param fp The processor that generated this program stage.
+ @param key The key that was computed by GenKey() from the generating
+ GrProcessor.
+ @param outputColor A predefined half4 in the FS in which the stage should place its
+ output color (or coverage).
+ @param inputColor A half4 that holds the input color to the stage in the FS. This may
+ be nullptr in which case the fInputColor is set to "half4(1.0)"
+ (solid white) so this is guaranteed non-null.
+ TODO: Better system for communicating optimization info
+ (e.g. input color is solid white, trans black, known to be opaque,
+ etc.) that allows the processor to communicate back similar known
+ info about its output.
+ @param transformedCoords Fragment shader variables containing the coords computed using
+ each of the GrFragmentProcessor's GrCoordTransforms.
+ @param texSamplers Contains one entry for each TextureSampler of the GrProcessor.
+ These can be passed to the builder to emit texture reads in the
+ generated code.
+ */
+ struct EmitArgs {
+ EmitArgs(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderCaps* caps,
+ const GrFragmentProcessor& fp,
+ const char* outputColor,
+ const char* inputColor,
+ const TransformedCoordVars& transformedCoordVars,
+ const TextureSamplers& textureSamplers)
+ : fFragBuilder(fragBuilder)
+ , fUniformHandler(uniformHandler)
+ , fShaderCaps(caps)
+ , fFp(fp)
+ , fOutputColor(outputColor)
+ , fInputColor(inputColor ? inputColor : "half4(1.0)")
+ , fTransformedCoords(transformedCoordVars)
+ , fTexSamplers(textureSamplers) {}
+ GrGLSLFPFragmentBuilder* fFragBuilder;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrShaderCaps* fShaderCaps;
+ const GrFragmentProcessor& fFp;
+ const char* fOutputColor;
+ const char* fInputColor;
+ const TransformedCoordVars& fTransformedCoords;
+ const TextureSamplers& fTexSamplers;
+ };
+
+ virtual void emitCode(EmitArgs&) = 0;
+
+ // This does not recurse to any attached child processors. Recursing the entire processor tree
+ // is the responsibility of the caller.
+ void setData(const GrGLSLProgramDataManager& pdman, const GrFragmentProcessor& processor);
+
+ int numChildProcessors() const { return fChildProcessors.count(); }
+
+ GrGLSLFragmentProcessor* childProcessor(int index) {
+ return fChildProcessors[index];
+ }
+
+ // Invoke the child with the default input color (solid white)
+ inline void invokeChild(int childIndex, SkString* outputColor, EmitArgs& parentArgs,
+ SkSL::String skslCoords = "") {
+ this->invokeChild(childIndex, nullptr, outputColor, parentArgs, skslCoords);
+ }
+
+ /** Invokes a child proc in its own scope. Pass in the parent's EmitArgs and invokeChild will
+ * automatically extract the coords and samplers of that child and pass them on to the child's
+ * emitCode(). Also, any uniforms or functions emitted by the child will have their names
+ * mangled to prevent redefinitions. The output color name is also mangled therefore in an
+ * in/out param. It will be declared in mangled form by invokeChild(). It is legal to pass
+ * nullptr as inputColor, since all fragment processors are required to work without an input
+ * color.
+ */
+ void invokeChild(int childIndex, const char* inputColor, SkString* outputColor,
+ EmitArgs& parentArgs, SkSL::String skslCoords = "");
+
+ // Use the parent's output color to hold child's output, and use the
+ // default input color of solid white
+ inline void invokeChild(int childIndex, EmitArgs& args, SkSL::String skslCoords = "") {
+ // null pointer cast required to disambiguate the function call
+ this->invokeChild(childIndex, (const char*) nullptr, args, skslCoords);
+ }
+
+ /** Variation that uses the parent's output color variable to hold the child's output.*/
+ void invokeChild(int childIndex, const char* inputColor, EmitArgs& parentArgs,
+ SkSL::String skslCoords = "");
+
+ /**
+ * Pre-order traversal of a GLSLFP hierarchy, or of multiple trees with roots in an array of
+ * GLSLFPS. This agrees with the traversal order of GrFragmentProcessor::Iter
+ */
+ class Iter : public SkNoncopyable {
+ public:
+ explicit Iter(GrGLSLFragmentProcessor* fp) { fFPStack.push_back(fp); }
+ explicit Iter(std::unique_ptr<GrGLSLFragmentProcessor> fps[], int cnt) {
+ for (int i = cnt - 1; i >= 0; --i) {
+ fFPStack.push_back(fps[i].get());
+ }
+ }
+ GrGLSLFragmentProcessor* next();
+
+ private:
+ SkSTArray<4, GrGLSLFragmentProcessor*, true> fFPStack;
+ };
+
+protected:
+ /** A GrGLSLFragmentProcessor instance can be reused with any GrFragmentProcessor that produces
+ the same stage key; this function reads data from a GrFragmentProcessor and uploads any
+ uniform variables required by the shaders created in emitCode(). The GrFragmentProcessor
+ parameter is guaranteed to be of the same type that created this GrGLSLFragmentProcessor and
+ to have an identical processor key as the one that created this GrGLSLFragmentProcessor. */
+ virtual void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) {}
+
+private:
+ void writeChildCall(GrGLSLFPFragmentBuilder* fragBuilder, int childIndex,
+ TransformedCoordVars coordVars, const char* inputColor,
+ const char* outputColor, EmitArgs& args,
+ SkSL::String skslCoords);
+
+ void internalInvokeChild(int, const char*, const char*, EmitArgs&, SkSL::String);
+
+ // one per child; either not present or empty string if not yet emitted
+ SkTArray<SkString> fFunctionNames;
+
+ SkTArray<GrGLSLFragmentProcessor*, true> fChildProcessors;
+
+ friend class GrFragmentProcessor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp
new file mode 100644
index 0000000000..85dc827f73
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/gl/GrGLGpu.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+const char* GrGLSLFragmentShaderBuilder::kDstColorName = "_dstColor";
+
+static const char* specific_layout_qualifier_name(GrBlendEquation equation) {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+
+ static const char* kLayoutQualifierNames[] = {
+ "blend_support_screen",
+ "blend_support_overlay",
+ "blend_support_darken",
+ "blend_support_lighten",
+ "blend_support_colordodge",
+ "blend_support_colorburn",
+ "blend_support_hardlight",
+ "blend_support_softlight",
+ "blend_support_difference",
+ "blend_support_exclusion",
+ "blend_support_multiply",
+ "blend_support_hsl_hue",
+ "blend_support_hsl_saturation",
+ "blend_support_hsl_color",
+ "blend_support_hsl_luminosity"
+ };
+ return kLayoutQualifierNames[equation - kFirstAdvancedGrBlendEquation];
+
+ GR_STATIC_ASSERT(0 == kScreen_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(1 == kOverlay_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(2 == kDarken_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(3 == kLighten_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(4 == kColorDodge_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(5 == kColorBurn_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(6 == kHardLight_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(7 == kSoftLight_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(8 == kDifference_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(9 == kExclusion_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(10 == kMultiply_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(11 == kHSLHue_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(12 == kHSLSaturation_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(13 == kHSLColor_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(14 == kHSLLuminosity_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ // There's an illegal GrBlendEquation at the end there, hence the -1.
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kLayoutQualifierNames) ==
+ kGrBlendEquationCnt - kFirstAdvancedGrBlendEquation - 1);
+}
+
+uint8_t GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(GrSurfaceOrigin origin) {
+ SkASSERT(kTopLeft_GrSurfaceOrigin == origin || kBottomLeft_GrSurfaceOrigin == origin);
+ return origin + 1;
+
+ GR_STATIC_ASSERT(0 == kTopLeft_GrSurfaceOrigin);
+ GR_STATIC_ASSERT(1 == kBottomLeft_GrSurfaceOrigin);
+}
+
+GrGLSLFragmentShaderBuilder::GrGLSLFragmentShaderBuilder(GrGLSLProgramBuilder* program)
+ : GrGLSLFragmentBuilder(program) {
+ fSubstageIndices.push_back(0);
+}
+
+SkString GrGLSLFragmentShaderBuilder::ensureCoords2D(const GrShaderVar& coords) {
+ if (kFloat3_GrSLType != coords.getType() && kHalf3_GrSLType != coords.getType()) {
+ SkASSERT(kFloat2_GrSLType == coords.getType() || kHalf2_GrSLType == coords.getType());
+ return coords.getName();
+ }
+
+ SkString coords2D;
+ coords2D.printf("%s_ensure2D", coords.c_str());
+ this->codeAppendf("\tfloat2 %s = %s.xy / %s.z;", coords2D.c_str(), coords.c_str(),
+ coords.c_str());
+ return coords2D;
+}
+
+const char* GrGLSLFragmentShaderBuilder::sampleOffsets() {
+ SkASSERT(CustomFeatures::kSampleLocations & fProgramBuilder->processorFeatures());
+ SkDEBUGCODE(fUsedProcessorFeaturesThisStage_DebugOnly |= CustomFeatures::kSampleLocations);
+ SkDEBUGCODE(fUsedProcessorFeaturesAllStages_DebugOnly |= CustomFeatures::kSampleLocations);
+ return "_sampleOffsets";
+}
+
+void GrGLSLFragmentShaderBuilder::maskOffMultisampleCoverage(
+ const char* mask, ScopeFlags scopeFlags) {
+ const GrShaderCaps& shaderCaps = *fProgramBuilder->shaderCaps();
+ if (!shaderCaps.sampleVariablesSupport() && !shaderCaps.sampleVariablesStencilSupport()) {
+ SkDEBUGFAIL("Attempted to mask sample coverage without support.");
+ return;
+ }
+ if (const char* extension = shaderCaps.sampleVariablesExtensionString()) {
+ this->addFeature(1 << kSampleVariables_GLSLPrivateFeature, extension);
+ }
+
+ if (!fHasModifiedSampleMask) {
+ fHasModifiedSampleMask = true;
+ if (ScopeFlags::kTopLevel != scopeFlags) {
+ this->codePrependf("gl_SampleMask[0] = ~0;");
+ }
+ if (!(ScopeFlags::kInsideLoop & scopeFlags)) {
+ this->codeAppendf("gl_SampleMask[0] = (%s);", mask);
+ return;
+ }
+ }
+
+ this->codeAppendf("gl_SampleMask[0] &= (%s);", mask);
+}
+
+void GrGLSLFragmentShaderBuilder::applyFnToMultisampleMask(
+ const char* fn, const char* grad, ScopeFlags scopeFlags) {
+ SkASSERT(CustomFeatures::kSampleLocations & fProgramBuilder->processorFeatures());
+ SkDEBUGCODE(fUsedProcessorFeaturesThisStage_DebugOnly |= CustomFeatures::kSampleLocations);
+ SkDEBUGCODE(fUsedProcessorFeaturesAllStages_DebugOnly |= CustomFeatures::kSampleLocations);
+
+ int sampleCnt = fProgramBuilder->effectiveSampleCnt();
+ SkASSERT(sampleCnt > 1);
+
+ this->codeAppendf("{");
+
+ if (!grad) {
+ SkASSERT(fProgramBuilder->shaderCaps()->shaderDerivativeSupport());
+ // In order to use HW derivatives, our neighbors within the same primitive must also be
+ // executing the same code. A per-pixel branch makes this pre-condition impossible to
+ // fulfill.
+ SkASSERT(!(ScopeFlags::kInsidePerPixelBranch & scopeFlags));
+ this->codeAppendf("float2 grad = float2(dFdx(%s), dFdy(%s));", fn, fn);
+ this->codeAppendf("float fnwidth = fwidth(%s);", fn);
+ grad = "grad";
+ } else {
+ this->codeAppendf("float fnwidth = abs(%s.x) + abs(%s.y);", grad, grad);
+ }
+
+ this->codeAppendf("int mask = 0;");
+ this->codeAppendf("if (%s*2 < fnwidth) {", fn); // Are ANY samples inside the implicit fn?
+ this->codeAppendf( "if (%s*-2 >= fnwidth) {", fn); // Are ALL samples inside the implicit?
+ this->codeAppendf( "mask = ~0;");
+ this->codeAppendf( "} else for (int i = 0; i < %i; ++i) {", sampleCnt);
+ this->codeAppendf( "float fnsample = dot(%s, _sampleOffsets[i]) + %s;", grad, fn);
+ this->codeAppendf( "if (fnsample < 0) {");
+ this->codeAppendf( "mask |= (1 << i);");
+ this->codeAppendf( "}");
+ this->codeAppendf( "}");
+ this->codeAppendf("}");
+ this->maskOffMultisampleCoverage("mask", scopeFlags);
+
+ this->codeAppendf("}");
+}
+
+SkString GrGLSLFPFragmentBuilder::writeProcessorFunction(GrGLSLFragmentProcessor* fp,
+ GrGLSLFragmentProcessor::EmitArgs& args) {
+ this->onBeforeChildProcEmitCode();
+ this->nextStage();
+ if (!args.fFp.computeLocalCoordsInVertexShader() && args.fTransformedCoords.count() > 0) {
+ // we currently only support overriding a single coordinate pair
+ SkASSERT(args.fTransformedCoords.count() == 1);
+ const GrGLSLProgramDataManager::UniformHandle& mat =
+ args.fTransformedCoords[0].fUniformMatrix;
+ if (mat.isValid()) {
+ args.fUniformHandler->updateUniformVisibility(mat, kFragment_GrShaderFlag);
+ this->codeAppendf("_coords = (float3(_coords, 1) * %s).xy;\n",
+ args.fTransformedCoords[0].fMatrixCode.c_str());
+ }
+ }
+ this->codeAppendf("half4 %s;\n", args.fOutputColor);
+ fp->emitCode(args);
+ this->codeAppendf("return %s;\n", args.fOutputColor);
+ GrShaderVar params[] = { GrShaderVar(args.fInputColor, kHalf4_GrSLType),
+ GrShaderVar("_coords", kFloat2_GrSLType) };
+ SkString result;
+ this->emitFunction(kHalf4_GrSLType,
+ "stage",
+ args.fFp.computeLocalCoordsInVertexShader() ? 1 : 2,
+ params,
+ this->code().c_str(),
+ &result);
+ this->deleteStage();
+ this->onAfterChildProcEmitCode();
+ return result;
+}
+
+const char* GrGLSLFragmentShaderBuilder::dstColor() {
+ SkDEBUGCODE(fHasReadDstColorThisStage_DebugOnly = true;)
+
+ const GrShaderCaps* shaderCaps = fProgramBuilder->shaderCaps();
+ if (shaderCaps->fbFetchSupport()) {
+ this->addFeature(1 << kFramebufferFetch_GLSLPrivateFeature,
+ shaderCaps->fbFetchExtensionString());
+
+ // Some versions of this extension string require declaring custom color output on ES 3.0+
+ const char* fbFetchColorName = "sk_LastFragColor";
+ if (shaderCaps->fbFetchNeedsCustomOutput()) {
+ this->enableCustomOutput();
+ fOutputs[fCustomColorOutputIndex].setTypeModifier(GrShaderVar::kInOut_TypeModifier);
+ fbFetchColorName = DeclaredColorOutputName();
+ // Set the dstColor to an intermediate variable so we don't override it with the output
+ this->codeAppendf("half4 %s = %s;", kDstColorName, fbFetchColorName);
+ } else {
+ return fbFetchColorName;
+ }
+ }
+ return kDstColorName;
+}
+
+void GrGLSLFragmentShaderBuilder::enableAdvancedBlendEquationIfNeeded(GrBlendEquation equation) {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+
+ const GrShaderCaps& caps = *fProgramBuilder->shaderCaps();
+ if (!caps.mustEnableAdvBlendEqs()) {
+ return;
+ }
+
+ this->addFeature(1 << kBlendEquationAdvanced_GLSLPrivateFeature,
+ "GL_KHR_blend_equation_advanced");
+ if (caps.mustEnableSpecificAdvBlendEqs()) {
+ this->addLayoutQualifier(specific_layout_qualifier_name(equation), kOut_InterfaceQualifier);
+ } else {
+ this->addLayoutQualifier("blend_support_all_equations", kOut_InterfaceQualifier);
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::enableCustomOutput() {
+ if (!fHasCustomColorOutput) {
+ fHasCustomColorOutput = true;
+ fCustomColorOutputIndex = fOutputs.count();
+ fOutputs.push_back().set(kHalf4_GrSLType, DeclaredColorOutputName(),
+ GrShaderVar::kOut_TypeModifier);
+ fProgramBuilder->finalizeFragmentOutputColor(fOutputs.back());
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::enableSecondaryOutput() {
+ SkASSERT(!fHasSecondaryOutput);
+ fHasSecondaryOutput = true;
+ const GrShaderCaps& caps = *fProgramBuilder->shaderCaps();
+ if (const char* extension = caps.secondaryOutputExtensionString()) {
+ this->addFeature(1 << kBlendFuncExtended_GLSLPrivateFeature, extension);
+ }
+
+ // If the primary output is declared, we must declare also the secondary output
+ // and vice versa, since it is not allowed to use a built-in gl_FragColor and a custom
+ // output. The condition also co-incides with the condition in whici GLES SL 2.0
+ // requires the built-in gl_SecondaryFragColorEXT, where as 3.0 requires a custom output.
+ if (caps.mustDeclareFragmentShaderOutput()) {
+ fOutputs.push_back().set(kHalf4_GrSLType, DeclaredSecondaryColorOutputName(),
+ GrShaderVar::kOut_TypeModifier);
+ fProgramBuilder->finalizeFragmentSecondaryColor(fOutputs.back());
+ }
+}
+
+const char* GrGLSLFragmentShaderBuilder::getPrimaryColorOutputName() const {
+ return fHasCustomColorOutput ? DeclaredColorOutputName() : "sk_FragColor";
+}
+
+bool GrGLSLFragmentShaderBuilder::primaryColorOutputIsInOut() const {
+ return fHasCustomColorOutput &&
+ fOutputs[fCustomColorOutputIndex].getTypeModifier() == GrShaderVar::kInOut_TypeModifier;
+}
+
+void GrGLSLFragmentBuilder::declAppendf(const char* fmt, ...) {
+ va_list argp;
+ va_start(argp, fmt);
+ inputs().appendVAList(fmt, argp);
+ va_end(argp);
+}
+
+const char* GrGLSLFragmentShaderBuilder::getSecondaryColorOutputName() const {
+ if (this->hasSecondaryOutput()) {
+ return (fProgramBuilder->shaderCaps()->mustDeclareFragmentShaderOutput())
+ ? DeclaredSecondaryColorOutputName()
+ : "gl_SecondaryFragColorEXT";
+ }
+ return nullptr;
+}
+
+GrSurfaceOrigin GrGLSLFragmentShaderBuilder::getSurfaceOrigin() const {
+ return fProgramBuilder->origin();
+}
+
+void GrGLSLFragmentShaderBuilder::onFinalize() {
+ SkASSERT(fProgramBuilder->processorFeatures() == fUsedProcessorFeaturesAllStages_DebugOnly);
+
+ if (CustomFeatures::kSampleLocations & fProgramBuilder->processorFeatures()) {
+ const SkTArray<SkPoint>& sampleLocations = fProgramBuilder->getSampleLocations();
+ this->definitions().append("const float2 _sampleOffsets[] = float2[](");
+ for (int i = 0; i < sampleLocations.count(); ++i) {
+ SkPoint offset = sampleLocations[i] - SkPoint::Make(.5f, .5f);
+ if (kBottomLeft_GrSurfaceOrigin == this->getSurfaceOrigin()) {
+ offset.fY = -offset.fY;
+ }
+ this->definitions().appendf("float2(%f, %f)", offset.x(), offset.y());
+ this->definitions().append((i + 1 != sampleLocations.count()) ? ", " : ");");
+ }
+ }
+
+ fProgramBuilder->varyingHandler()->getFragDecls(&this->inputs(), &this->outputs());
+}
+
+void GrGLSLFragmentShaderBuilder::onBeforeChildProcEmitCode() {
+ SkASSERT(fSubstageIndices.count() >= 1);
+ fSubstageIndices.push_back(0);
+ // second-to-last value in the fSubstageIndices stack is the index of the child proc
+ // at that level which is currently emitting code.
+ fMangleString.appendf("_c%d", fSubstageIndices[fSubstageIndices.count() - 2]);
+}
+
+void GrGLSLFragmentShaderBuilder::onAfterChildProcEmitCode() {
+ SkASSERT(fSubstageIndices.count() >= 2);
+ fSubstageIndices.pop_back();
+ fSubstageIndices.back()++;
+ int removeAt = fMangleString.findLastOf('_');
+ fMangleString.remove(removeAt, fMangleString.size() - removeAt);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
new file mode 100644
index 0000000000..6d5bfe862c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLFragmentShaderBuilder_DEFINED
+#define GrGLSLFragmentShaderBuilder_DEFINED
+
+#include "src/gpu/GrBlend.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLShaderBuilder.h"
+
+class GrRenderTarget;
+class GrGLSLVarying;
+
+/*
+ * This base class encapsulates the common functionality which all processors use to build fragment
+ * shaders.
+ */
+class GrGLSLFragmentBuilder : public GrGLSLShaderBuilder {
+public:
+ GrGLSLFragmentBuilder(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+ virtual ~GrGLSLFragmentBuilder() {}
+
+ /**
+ * This returns a variable name to access the 2D, perspective correct version of the coords in
+ * the fragment shader. The passed in coordinates must either be of type kHalf2 or kHalf3. If
+ * the coordinates are 3-dimensional, it a perspective divide into is emitted into the
+ * fragment shader (xy / z) to convert them to 2D.
+ */
+ virtual SkString ensureCoords2D(const GrShaderVar&) = 0;
+
+ // TODO: remove this method.
+ void declAppendf(const char* fmt, ...);
+
+private:
+ typedef GrGLSLShaderBuilder INHERITED;
+};
+
+/*
+ * This class is used by fragment processors to build their fragment code.
+ */
+class GrGLSLFPFragmentBuilder : virtual public GrGLSLFragmentBuilder {
+public:
+ /** Appease the compiler; the derived class initializes GrGLSLFragmentBuilder. */
+ GrGLSLFPFragmentBuilder() : GrGLSLFragmentBuilder(nullptr) {}
+
+ /**
+ * Returns the variable name that holds the array of sample offsets from pixel center to each
+ * sample location. Before this is called, a processor must have advertised that it will use
+ * CustomFeatures::kSampleLocations.
+ */
+ virtual const char* sampleOffsets() = 0;
+
+ enum class ScopeFlags {
+ // Every fragment will always execute this code, and will do it exactly once.
+ kTopLevel = 0,
+ // Either all fragments in a given primitive, or none, will execute this code.
+ kInsidePerPrimitiveBranch = (1 << 0),
+ // Any given fragment may or may not execute this code.
+ kInsidePerPixelBranch = (1 << 1),
+ // This code will be executed more than once.
+ kInsideLoop = (1 << 2)
+ };
+
+ /**
+ * Subtracts multisample coverage by AND-ing the sample mask with the provided "mask".
+ * Sample N corresponds to bit "1 << N".
+ *
+ * If the given scope is "kTopLevel" and the sample mask has not yet been modified, this method
+ * assigns the sample mask in place rather than pre-initializing it to ~0 then AND-ing it.
+ *
+ * Requires MSAA and GLSL support for sample variables.
+ */
+ virtual void maskOffMultisampleCoverage(const char* mask, ScopeFlags) = 0;
+
+ /**
+ * Turns off coverage at each sample where the implicit function fn > 0.
+ *
+ * The provided "fn" value represents the implicit function at pixel center. We then approximate
+ * the implicit at each sample by riding the gradient, "grad", linearly from pixel center to
+ * each sample location.
+ *
+ * If "grad" is null, we approximate the gradient using HW derivatives.
+ *
+ * Requires MSAA and GLSL support for sample variables. Also requires HW derivatives if not
+ * providing a gradient.
+ */
+ virtual void applyFnToMultisampleMask(const char* fn, const char* grad, ScopeFlags) = 0;
+
+ /**
+ * Fragment procs with child procs should call these functions before/after calling emitCode
+ * on a child proc.
+ */
+ virtual void onBeforeChildProcEmitCode() = 0;
+ virtual void onAfterChildProcEmitCode() = 0;
+
+ virtual SkString writeProcessorFunction(GrGLSLFragmentProcessor* fp,
+ GrGLSLFragmentProcessor::EmitArgs& args);
+
+ virtual const SkString& getMangleString() const = 0;
+
+ virtual void forceHighPrecision() = 0;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrGLSLFPFragmentBuilder::ScopeFlags);
+
+/*
+ * This class is used by Xfer processors to build their fragment code.
+ */
+class GrGLSLXPFragmentBuilder : virtual public GrGLSLFragmentBuilder {
+public:
+ /** Appease the compiler; the derived class initializes GrGLSLFragmentBuilder. */
+ GrGLSLXPFragmentBuilder() : GrGLSLFragmentBuilder(nullptr) {}
+
+ virtual bool hasCustomColorOutput() const = 0;
+ virtual bool hasSecondaryOutput() const = 0;
+
+ /** Returns the variable name that holds the color of the destination pixel. This may be nullptr
+ * if no effect advertised that it will read the destination. */
+ virtual const char* dstColor() = 0;
+
+ /** Adds any necessary layout qualifiers in order to legalize the supplied blend equation with
+ this shader. It is only legal to call this method with an advanced blend equation, and only
+ if these equations are supported. */
+ virtual void enableAdvancedBlendEquationIfNeeded(GrBlendEquation) = 0;
+};
+
+/*
+ * This class implements the various fragment builder interfaces.
+ */
+class GrGLSLFragmentShaderBuilder : public GrGLSLFPFragmentBuilder, public GrGLSLXPFragmentBuilder {
+public:
+ /** Returns a nonzero key for a surface's origin. This should only be called if a processor will
+ use the fragment position and/or sample locations. */
+ static uint8_t KeyForSurfaceOrigin(GrSurfaceOrigin);
+
+ GrGLSLFragmentShaderBuilder(GrGLSLProgramBuilder* program);
+
+ // Shared GrGLSLFragmentBuilder interface.
+ virtual SkString ensureCoords2D(const GrShaderVar&) override;
+
+ // GrGLSLFPFragmentBuilder interface.
+ const char* sampleOffsets() override;
+ void maskOffMultisampleCoverage(const char* mask, ScopeFlags) override;
+ void applyFnToMultisampleMask(const char* fn, const char* grad, ScopeFlags) override;
+ const SkString& getMangleString() const override { return fMangleString; }
+ void onBeforeChildProcEmitCode() override;
+ void onAfterChildProcEmitCode() override;
+ void forceHighPrecision() override { fForceHighPrecision = true; }
+
+ // GrGLSLXPFragmentBuilder interface.
+ bool hasCustomColorOutput() const override { return fHasCustomColorOutput; }
+ bool hasSecondaryOutput() const override { return fHasSecondaryOutput; }
+ const char* dstColor() override;
+ void enableAdvancedBlendEquationIfNeeded(GrBlendEquation) override;
+
+private:
+ using CustomFeatures = GrProcessor::CustomFeatures;
+
+ // Private public interface, used by GrGLProgramBuilder to build a fragment shader
+ void enableCustomOutput();
+ void enableSecondaryOutput();
+ const char* getPrimaryColorOutputName() const;
+ const char* getSecondaryColorOutputName() const;
+ bool primaryColorOutputIsInOut() const;
+
+#ifdef SK_DEBUG
+ // As GLSLProcessors emit code, there are some conditions we need to verify. We use the below
+ // state to track this. The reset call is called per processor emitted.
+ bool fHasReadDstColorThisStage_DebugOnly = false;
+ CustomFeatures fUsedProcessorFeaturesThisStage_DebugOnly = CustomFeatures::kNone;
+ CustomFeatures fUsedProcessorFeaturesAllStages_DebugOnly = CustomFeatures::kNone;
+
+ void debugOnly_resetPerStageVerification() {
+ fHasReadDstColorThisStage_DebugOnly = false;
+ fUsedProcessorFeaturesThisStage_DebugOnly = CustomFeatures::kNone;
+ }
+#endif
+
+ static const char* DeclaredColorOutputName() { return "sk_FragColor"; }
+ static const char* DeclaredSecondaryColorOutputName() { return "fsSecondaryColorOut"; }
+
+ GrSurfaceOrigin getSurfaceOrigin() const;
+
+ void onFinalize() override;
+
+ static const char* kDstColorName;
+
+ /*
+ * State that tracks which child proc in the proc tree is currently emitting code. This is
+ * used to update the fMangleString, which is used to mangle the names of uniforms and functions
+ * emitted by the proc. fSubstageIndices is a stack: its count indicates how many levels deep
+ * we are in the tree, and its second-to-last value is the index of the child proc at that
+ * level which is currently emitting code. For example, if fSubstageIndices = [3, 1, 2, 0], that
+ * means we're currently emitting code for the base proc's 3rd child's 1st child's 2nd child.
+ */
+ SkTArray<int> fSubstageIndices;
+
+ /*
+ * The mangle string is used to mangle the names of uniforms/functions emitted by the child
+ * procs so no duplicate uniforms/functions appear in the generated shader program. The mangle
+ * string is simply based on fSubstageIndices. For example, if fSubstageIndices = [3, 1, 2, 0],
+ * then the manglestring will be "_c3_c1_c2", and any uniform/function emitted by that proc will
+ * have "_c3_c1_c2" appended to its name, which can be interpreted as "base proc's 3rd child's
+ * 1st child's 2nd child".
+ */
+ SkString fMangleString;
+
+ bool fSetupFragPosition = false;
+ bool fHasCustomColorOutput = false;
+ int fCustomColorOutputIndex = -1;
+ bool fHasSecondaryOutput = false;
+ bool fHasModifiedSampleMask = false;
+ bool fForceHighPrecision = false;
+
+ friend class GrGLSLProgramBuilder;
+ friend class GrGLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp
new file mode 100644
index 0000000000..d99239ebb9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+void GrGLSLGeometryProcessor::emitCode(EmitArgs& args) {
+ GrGPArgs gpArgs;
+ this->onEmitCode(args, &gpArgs);
+
+ GrGLSLVertexBuilder* vBuilder = args.fVertBuilder;
+ if (!args.fGP.willUseGeoShader()) {
+ // Emit the vertex position to the hardware in the normalized window coordinates it expects.
+ SkASSERT(kFloat2_GrSLType == gpArgs.fPositionVar.getType() ||
+ kFloat3_GrSLType == gpArgs.fPositionVar.getType());
+ vBuilder->emitNormalizedSkPosition(gpArgs.fPositionVar.c_str(), args.fRTAdjustName,
+ gpArgs.fPositionVar.getType());
+ if (kFloat2_GrSLType == gpArgs.fPositionVar.getType()) {
+ args.fVaryingHandler->setNoPerspective();
+ }
+ } else {
+ // Since we have a geometry shader, leave the vertex position in Skia device space for now.
+ // The geometry Shader will operate in device space, and then convert the final positions to
+ // normalized hardware window coordinates under the hood, once everything else has finished.
+ // The subclass must call setNoPerspective on the varying handler, if applicable.
+ vBuilder->codeAppendf("sk_Position = float4(%s", gpArgs.fPositionVar.c_str());
+ switch (gpArgs.fPositionVar.getType()) {
+ case kFloat_GrSLType:
+ vBuilder->codeAppend(", 0"); // fallthru.
+ case kFloat2_GrSLType:
+ vBuilder->codeAppend(", 0"); // fallthru.
+ case kFloat3_GrSLType:
+ vBuilder->codeAppend(", 1"); // fallthru.
+ case kFloat4_GrSLType:
+ vBuilder->codeAppend(");");
+ break;
+ default:
+ SK_ABORT("Invalid position var type");
+ break;
+ }
+ }
+}
+
+void GrGLSLGeometryProcessor::emitTransforms(GrGLSLVertexBuilder* vb,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderVar& localCoordsVar,
+ const SkMatrix& localMatrix,
+ FPCoordTransformHandler* handler) {
+ SkASSERT(GrSLTypeIsFloatType(localCoordsVar.getType()));
+ SkASSERT(2 == GrSLTypeVecLength(localCoordsVar.getType()) ||
+ 3 == GrSLTypeVecLength(localCoordsVar.getType()));
+
+ bool threeComponentLocalCoords = 3 == GrSLTypeVecLength(localCoordsVar.getType());
+ SkString localCoords;
+ if (threeComponentLocalCoords) {
+ localCoords = localCoordsVar.getName();
+ } else {
+ localCoords.printf("float3(%s, 1)", localCoordsVar.c_str());
+ }
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = handler->nextCoordTransform()) {
+ SkString strUniName;
+ strUniName.printf("CoordTransformMatrix_%d", i);
+ const char* uniName;
+ fInstalledTransforms.push_back().fHandle = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat3x3_GrSLType,
+ strUniName.c_str(),
+ &uniName).toIndex();
+ GrSLType varyingType = kFloat2_GrSLType;
+ if (localMatrix.hasPerspective() || coordTransform->getMatrix().hasPerspective()
+ || threeComponentLocalCoords) {
+ varyingType = kFloat3_GrSLType;
+ }
+ SkString strVaryingName;
+ strVaryingName.printf("TransformedCoords_%d", i);
+ GrGLSLVarying v(varyingType);
+ if (coordTransform->computeInVertexShader()) {
+ varyingHandler->addVarying(strVaryingName.c_str(), &v);
+
+ if (kFloat2_GrSLType == varyingType) {
+ vb->codeAppendf("%s = (%s * %s).xy;", v.vsOut(), uniName, localCoords.c_str());
+ } else {
+ vb->codeAppendf("%s = %s * %s;", v.vsOut(), uniName, localCoords.c_str());
+ }
+ }
+ handler->specifyCoordsForCurrCoordTransform(SkString(uniName),
+ fInstalledTransforms.back().fHandle,
+ GrShaderVar(SkString(v.fsIn()), varyingType));
+ ++i;
+ }
+}
+
+void GrGLSLGeometryProcessor::setTransformDataHelper(const SkMatrix& localMatrix,
+ const GrGLSLProgramDataManager& pdman,
+ FPCoordTransformIter* transformIter) {
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = transformIter->next()) {
+ const SkMatrix& m = GetTransformMatrix(localMatrix, *coordTransform);
+ if (!fInstalledTransforms[i].fCurrentValue.cheapEqualTo(m)) {
+ pdman.setSkMatrix(fInstalledTransforms[i].fHandle.toIndex(), m);
+ fInstalledTransforms[i].fCurrentValue = m;
+ }
+ ++i;
+ }
+ SkASSERT(i == fInstalledTransforms.count());
+}
+
+void GrGLSLGeometryProcessor::writeOutputPosition(GrGLSLVertexBuilder* vertBuilder,
+ GrGPArgs* gpArgs,
+ const char* posName) {
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "pos2");
+ vertBuilder->codeAppendf("float2 %s = %s;", gpArgs->fPositionVar.c_str(), posName);
+}
+
+void GrGLSLGeometryProcessor::writeOutputPosition(GrGLSLVertexBuilder* vertBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGPArgs* gpArgs,
+ const char* posName,
+ const SkMatrix& mat,
+ UniformHandle* viewMatrixUniform) {
+ if (mat.isIdentity()) {
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "pos2");
+ vertBuilder->codeAppendf("float2 %s = %s;", gpArgs->fPositionVar.c_str(), posName);
+ } else {
+ const char* viewMatrixName;
+ *viewMatrixUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kFloat3x3_GrSLType,
+ "uViewM",
+ &viewMatrixName);
+ if (!mat.hasPerspective()) {
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "pos2");
+ vertBuilder->codeAppendf("float2 %s = (%s * float3(%s, 1)).xy;",
+ gpArgs->fPositionVar.c_str(), viewMatrixName, posName);
+ } else {
+ gpArgs->fPositionVar.set(kFloat3_GrSLType, "pos3");
+ vertBuilder->codeAppendf("float3 %s = %s * float3(%s, 1);",
+ gpArgs->fPositionVar.c_str(), viewMatrixName, posName);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h
new file mode 100644
index 0000000000..eb34412687
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLGeometryProcessor_DEFINED
+#define GrGLSLGeometryProcessor_DEFINED
+
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+
+class GrGLSLGPBuilder;
+
+/**
+ * If a GL effect needs a GrGLFullShaderBuilder* object to emit vertex code, then it must inherit
+ * from this class. Since paths don't have vertices, this class is only meant to be used internally
+ * by skia, for special cases.
+ */
+class GrGLSLGeometryProcessor : public GrGLSLPrimitiveProcessor {
+public:
+ /* Any general emit code goes in the base class emitCode. Subclasses override onEmitCode */
+ void emitCode(EmitArgs&) final;
+
+protected:
+ // A helper which subclasses can use to upload coord transform matrices in setData().
+ void setTransformDataHelper(const SkMatrix& localMatrix,
+ const GrGLSLProgramDataManager& pdman,
+ FPCoordTransformIter*);
+
+ // Emit transformed local coords from the vertex shader as a uniform matrix and varying per
+ // coord-transform. localCoordsVar must be a 2- or 3-component vector. If it is 3 then it is
+ // assumed to be a 2D homogeneous coordinate.
+ void emitTransforms(GrGLSLVertexBuilder*,
+ GrGLSLVaryingHandler*,
+ GrGLSLUniformHandler*,
+ const GrShaderVar& localCoordsVar,
+ const SkMatrix& localMatrix,
+ FPCoordTransformHandler*);
+
+ // Version of above that assumes identity for the local matrix.
+ void emitTransforms(GrGLSLVertexBuilder* vb,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderVar& localCoordsVar,
+ FPCoordTransformHandler* handler) {
+ this->emitTransforms(vb, varyingHandler, uniformHandler, localCoordsVar, SkMatrix::I(),
+ handler);
+ }
+
+ struct GrGPArgs {
+ // Used to specify the output variable used by the GP to store its device position. It can
+ // either be a float2 or a float3 (in order to handle perspective). The subclass sets this
+ // in its onEmitCode().
+ GrShaderVar fPositionVar;
+ };
+
+ // Helpers for adding code to write the transformed vertex position. The first simple version
+ // just writes a variable named by 'posName' into the position output variable with the
+ // assumption that the position is 2D. The second version transforms the input position by a
+ // view matrix and the output variable is 2D or 3D depending on whether the view matrix is
+ // perspective. Both versions declare the output position variable and will set
+ // GrGPArgs::fPositionVar.
+ void writeOutputPosition(GrGLSLVertexBuilder*, GrGPArgs*, const char* posName);
+ void writeOutputPosition(GrGLSLVertexBuilder*,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGPArgs*,
+ const char* posName,
+ const SkMatrix& mat,
+ UniformHandle* viewMatrixUniform);
+
+ static uint32_t ComputePosKey(const SkMatrix& mat) {
+ if (mat.isIdentity()) {
+ return 0x0;
+ } else if (!mat.hasPerspective()) {
+ return 0x01;
+ } else {
+ return 0x02;
+ }
+ }
+
+private:
+ virtual void onEmitCode(EmitArgs&, GrGPArgs*) = 0;
+
+ struct TransformUniform {
+ UniformHandle fHandle;
+ SkMatrix fCurrentValue = SkMatrix::InvalidMatrix();
+ };
+
+ SkTArray<TransformUniform, true> fInstalledTransforms;
+
+ typedef GrGLSLPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp
new file mode 100644
index 0000000000..9d35c44960
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+SkMatrix GrGLSLPrimitiveProcessor::GetTransformMatrix(const SkMatrix& localMatrix,
+ const GrCoordTransform& coordTransform) {
+ SkMatrix combined;
+ combined.setConcat(coordTransform.getMatrix(), localMatrix);
+ if (coordTransform.normalize()) {
+ combined.postIDiv(coordTransform.peekTexture()->width(),
+ coordTransform.peekTexture()->height());
+ }
+
+ if (coordTransform.reverseY()) {
+ if (coordTransform.normalize()) {
+ // combined.postScale(1,-1);
+ // combined.postTranslate(0,1);
+ combined.set(SkMatrix::kMSkewY,
+ combined[SkMatrix::kMPersp0] - combined[SkMatrix::kMSkewY]);
+ combined.set(SkMatrix::kMScaleY,
+ combined[SkMatrix::kMPersp1] - combined[SkMatrix::kMScaleY]);
+ combined.set(SkMatrix::kMTransY,
+ combined[SkMatrix::kMPersp2] - combined[SkMatrix::kMTransY]);
+ } else {
+ // combined.postScale(1, -1);
+ // combined.postTranslate(0,1);
+ SkScalar h = coordTransform.peekTexture()->height();
+ combined.set(SkMatrix::kMSkewY,
+ h * combined[SkMatrix::kMPersp0] - combined[SkMatrix::kMSkewY]);
+ combined.set(SkMatrix::kMScaleY,
+ h * combined[SkMatrix::kMPersp1] - combined[SkMatrix::kMScaleY]);
+ combined.set(SkMatrix::kMTransY,
+ h * combined[SkMatrix::kMPersp2] - combined[SkMatrix::kMTransY]);
+ }
+ }
+ return combined;
+}
+
+void GrGLSLPrimitiveProcessor::setupUniformColor(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* outputName,
+ UniformHandle* colorUniform) {
+ SkASSERT(colorUniform);
+ const char* stagedLocalVarName;
+ *colorUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType,
+ "Color",
+ &stagedLocalVarName);
+ fragBuilder->codeAppendf("%s = %s;", outputName, stagedLocalVarName);
+ if (fragBuilder->getProgramBuilder()->shaderCaps()->mustObfuscateUniformColor()) {
+ fragBuilder->codeAppendf("%s = max(%s, half4(0, 0, 0, 0));", outputName, outputName);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+const GrCoordTransform* GrGLSLPrimitiveProcessor::FPCoordTransformHandler::nextCoordTransform() {
+#ifdef SK_DEBUG
+ SkASSERT(nullptr == fCurr || fAddedCoord);
+ fAddedCoord = false;
+ fCurr = fIter.next();
+ return fCurr;
+#else
+ return fIter.next();
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h
new file mode 100644
index 0000000000..12fb74f114
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLPrimitiveProcessor_DEFINED
+#define GrGLSLPrimitiveProcessor_DEFINED
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrPrimitiveProcessor;
+class GrGLSLFPFragmentBuilder;
+class GrGLSLGeometryBuilder;
+class GrGLSLGPBuilder;
+class GrGLSLVaryingHandler;
+class GrGLSLVertexBuilder;
+class GrShaderCaps;
+
+class GrGLSLPrimitiveProcessor {
+public:
+ using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+ using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
+ using FPCoordTransformIter = GrFragmentProcessor::CoordTransformIter;
+
+ struct TransformVar {
+ TransformVar() = default;
+
+ TransformVar(SkString matrixCode, UniformHandle uniformMatrix, GrShaderVar varyingPoint)
+ : fMatrixCode(std::move(matrixCode))
+ , fUniformMatrix(uniformMatrix)
+ , fVaryingPoint(varyingPoint) {}
+
+ // a string of SkSL code which resolves to the transformation matrix
+ SkString fMatrixCode;
+ // the variable containing the matrix, if any, otherwise an invalid handle
+ UniformHandle fUniformMatrix;
+ // the transformed coordinate output by the vertex shader and consumed by the fragment
+ // shader
+ GrShaderVar fVaryingPoint;
+ };
+
+
+ virtual ~GrGLSLPrimitiveProcessor() {}
+
+ /**
+ * This class provides access to the GrCoordTransforms across all GrFragmentProcessors in a
+ * GrPipeline. It is also used by the primitive processor to specify the fragment shader
+ * variable that will hold the transformed coords for each GrCoordTransform. It is required that
+ * the primitive processor iterate over each coord transform and insert a shader var result for
+ * each. The GrGLSLFragmentProcessors will reference these variables in their fragment code.
+ */
+ class FPCoordTransformHandler : public SkNoncopyable {
+ public:
+ FPCoordTransformHandler(const GrPipeline& pipeline,
+ SkTArray<TransformVar>* transformedCoordVars)
+ : fIter(pipeline)
+ , fTransformedCoordVars(transformedCoordVars) {}
+
+ ~FPCoordTransformHandler() { SkASSERT(!this->nextCoordTransform());}
+
+ const GrCoordTransform* nextCoordTransform();
+
+ // 'args' are constructor params to GrShaderVar.
+ template<typename... Args>
+ void specifyCoordsForCurrCoordTransform(Args&&... args) {
+ SkASSERT(!fAddedCoord);
+ fTransformedCoordVars->emplace_back(std::forward<Args>(args)...);
+ SkDEBUGCODE(fAddedCoord = true;)
+ }
+
+ private:
+ GrFragmentProcessor::CoordTransformIter fIter;
+ SkDEBUGCODE(bool fAddedCoord = false;)
+ SkDEBUGCODE(const GrCoordTransform* fCurr = nullptr;)
+ SkTArray<TransformVar>* fTransformedCoordVars;
+ };
+
+ struct EmitArgs {
+ EmitArgs(GrGLSLVertexBuilder* vertBuilder,
+ GrGLSLGeometryBuilder* geomBuilder,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderCaps* caps,
+ const GrPrimitiveProcessor& gp,
+ const char* outputColor,
+ const char* outputCoverage,
+ const char* rtAdjustName,
+ const SamplerHandle* texSamplers,
+ FPCoordTransformHandler* transformHandler)
+ : fVertBuilder(vertBuilder)
+ , fGeomBuilder(geomBuilder)
+ , fFragBuilder(fragBuilder)
+ , fVaryingHandler(varyingHandler)
+ , fUniformHandler(uniformHandler)
+ , fShaderCaps(caps)
+ , fGP(gp)
+ , fOutputColor(outputColor)
+ , fOutputCoverage(outputCoverage)
+ , fRTAdjustName(rtAdjustName)
+ , fTexSamplers(texSamplers)
+ , fFPCoordTransformHandler(transformHandler) {}
+ GrGLSLVertexBuilder* fVertBuilder;
+ GrGLSLGeometryBuilder* fGeomBuilder;
+ GrGLSLFPFragmentBuilder* fFragBuilder;
+ GrGLSLVaryingHandler* fVaryingHandler;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrShaderCaps* fShaderCaps;
+ const GrPrimitiveProcessor& fGP;
+ const char* fOutputColor;
+ const char* fOutputCoverage;
+ const char* fRTAdjustName;
+ const SamplerHandle* fTexSamplers;
+ FPCoordTransformHandler* fFPCoordTransformHandler;
+ };
+
+ /**
+ * This is similar to emitCode() in the base class, except it takes a full shader builder.
+ * This allows the effect subclass to emit vertex code.
+ */
+ virtual void emitCode(EmitArgs&) = 0;
+
+ /**
+ * A GrGLSLPrimitiveProcessor instance can be reused with any GrGLSLPrimitiveProcessor that
+ * produces the same stage key; this function reads data from a GrGLSLPrimitiveProcessor and
+ * uploads any uniform variables required by the shaders created in emitCode(). The
+ * GrPrimitiveProcessor parameter is guaranteed to be of the same type and to have an
+ * identical processor key as the GrPrimitiveProcessor that created this
+ * GrGLSLPrimitiveProcessor.
+ * The subclass may use the transform iterator to perform any setup required for the particular
+ * set of fp transform matrices, such as uploading via uniforms. The iterator will iterate over
+ * the transforms in the same order as the TransformHandler passed to emitCode.
+ */
+ virtual void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) = 0;
+
+ static SkMatrix GetTransformMatrix(const SkMatrix& localMatrix, const GrCoordTransform&);
+
+protected:
+ void setupUniformColor(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* outputName,
+ UniformHandle* colorUniform);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp
new file mode 100644
index 0000000000..8cdf43dfa2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+#include "src/sksl/SkSLCompiler.h"
+
+const int GrGLSLProgramBuilder::kVarsPerBlock = 8;
+
+GrGLSLProgramBuilder::GrGLSLProgramBuilder(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ const GrProgramDesc* desc)
+ : fVS(this)
+ , fGS(this)
+ , fFS(this)
+ , fStageIndex(-1)
+ , fRenderTarget(renderTarget)
+ , fProgramInfo(programInfo)
+ , fDesc(desc)
+ , fGeometryProcessor(nullptr)
+ , fXferProcessor(nullptr)
+ , fNumFragmentSamplers(0) {}
+
+void GrGLSLProgramBuilder::addFeature(GrShaderFlags shaders,
+ uint32_t featureBit,
+ const char* extensionName) {
+ if (shaders & kVertex_GrShaderFlag) {
+ fVS.addFeature(featureBit, extensionName);
+ }
+ if (shaders & kGeometry_GrShaderFlag) {
+ SkASSERT(this->primitiveProcessor().willUseGeoShader());
+ fGS.addFeature(featureBit, extensionName);
+ }
+ if (shaders & kFragment_GrShaderFlag) {
+ fFS.addFeature(featureBit, extensionName);
+ }
+}
+
+bool GrGLSLProgramBuilder::emitAndInstallProcs() {
+ // First we loop over all of the installed processors and collect coord transforms. These will
+ // be sent to the GrGLSLPrimitiveProcessor in its emitCode function
+ SkString inputColor;
+ SkString inputCoverage;
+ this->emitAndInstallPrimProc(&inputColor, &inputCoverage);
+ this->emitAndInstallFragProcs(&inputColor, &inputCoverage);
+ this->emitAndInstallXferProc(inputColor, inputCoverage);
+
+ return this->checkSamplerCounts();
+}
+
+void GrGLSLProgramBuilder::emitAndInstallPrimProc(SkString* outputColor,
+ SkString* outputCoverage) {
+ const GrPrimitiveProcessor& proc = this->primitiveProcessor();
+
+ // Because all the texture properties must be consistent between all the dynamic and fixed
+ // primProc proxies, we just deal w/ the first set of dynamic proxies or the set of fixed
+ // proxies here.
+ const GrTextureProxy* const* primProcProxies = nullptr;
+ if (fProgramInfo.hasDynamicPrimProcTextures()) {
+ primProcProxies = fProgramInfo.dynamicPrimProcTextures(0);
+ } else if (fProgramInfo.hasFixedPrimProcTextures()) {
+ primProcProxies = fProgramInfo.fixedPrimProcTextures();
+ }
+
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+ this->nameExpression(outputColor, "outputColor");
+ this->nameExpression(outputCoverage, "outputCoverage");
+
+ SkASSERT(!fUniformHandles.fRTAdjustmentUni.isValid());
+ GrShaderFlags rtAdjustVisibility;
+ if (proc.willUseGeoShader()) {
+ rtAdjustVisibility = kGeometry_GrShaderFlag;
+ } else {
+ rtAdjustVisibility = kVertex_GrShaderFlag;
+ }
+ fUniformHandles.fRTAdjustmentUni = this->uniformHandler()->addUniform(
+ rtAdjustVisibility,
+ kFloat4_GrSLType,
+ SkSL::Compiler::RTADJUST_NAME);
+ const char* rtAdjustName =
+ this->uniformHandler()->getUniformCStr(fUniformHandles.fRTAdjustmentUni);
+
+ // Enclose custom code in a block to avoid namespace conflicts
+ SkString openBrace;
+ openBrace.printf("{ // Stage %d, %s\n", fStageIndex, proc.name());
+ fFS.codeAppend(openBrace.c_str());
+ fVS.codeAppendf("// Primitive Processor %s\n", proc.name());
+
+ SkASSERT(!fGeometryProcessor);
+ fGeometryProcessor.reset(proc.createGLSLInstance(*this->shaderCaps()));
+
+ SkAutoSTMalloc<4, SamplerHandle> texSamplers(proc.numTextureSamplers());
+ for (int i = 0; i < proc.numTextureSamplers(); ++i) {
+ SkString name;
+ name.printf("TextureSampler_%d", i);
+ const auto& sampler = proc.textureSampler(i);
+ SkASSERT(sampler.textureType() == primProcProxies[i]->textureType());
+ texSamplers[i] = this->emitSampler(primProcProxies[i],
+ sampler.samplerState(),
+ sampler.swizzle(),
+ name.c_str());
+ }
+
+ GrGLSLPrimitiveProcessor::FPCoordTransformHandler transformHandler(this->pipeline(),
+ &fTransformedCoordVars);
+ GrGLSLGeometryProcessor::EmitArgs args(&fVS,
+ proc.willUseGeoShader() ? &fGS : nullptr,
+ &fFS,
+ this->varyingHandler(),
+ this->uniformHandler(),
+ this->shaderCaps(),
+ proc,
+ outputColor->c_str(),
+ outputCoverage->c_str(),
+ rtAdjustName,
+ texSamplers.get(),
+ &transformHandler);
+ fGeometryProcessor->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(proc);)
+
+ fFS.codeAppend("}");
+}
+
+void GrGLSLProgramBuilder::emitAndInstallFragProcs(SkString* color, SkString* coverage) {
+ int transformedCoordVarsIdx = 0;
+ SkString** inOut = &color;
+ SkSTArray<8, std::unique_ptr<GrGLSLFragmentProcessor>> glslFragmentProcessors;
+ for (int i = 0; i < this->pipeline().numFragmentProcessors(); ++i) {
+ if (i == this->pipeline().numColorFragmentProcessors()) {
+ inOut = &coverage;
+ }
+ SkString output;
+ const GrFragmentProcessor& fp = this->pipeline().getFragmentProcessor(i);
+ output = this->emitAndInstallFragProc(fp, i, transformedCoordVarsIdx, **inOut, output,
+ &glslFragmentProcessors);
+ GrFragmentProcessor::Iter iter(&fp);
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ transformedCoordVarsIdx += fp->numCoordTransforms();
+ }
+ **inOut = output;
+ }
+ fFragmentProcessorCnt = glslFragmentProcessors.count();
+ fFragmentProcessors.reset(new std::unique_ptr<GrGLSLFragmentProcessor>[fFragmentProcessorCnt]);
+ for (int i = 0; i < fFragmentProcessorCnt; ++i) {
+ fFragmentProcessors[i] = std::move(glslFragmentProcessors[i]);
+ }
+}
+
+// TODO Processors cannot output zeros because an empty string is all 1s
+// the fix is to allow effects to take the SkString directly
+SkString GrGLSLProgramBuilder::emitAndInstallFragProc(
+ const GrFragmentProcessor& fp,
+ int index,
+ int transformedCoordVarsIdx,
+ const SkString& input,
+ SkString output,
+ SkTArray<std::unique_ptr<GrGLSLFragmentProcessor>>* glslFragmentProcessors) {
+ SkASSERT(input.size());
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+ this->nameExpression(&output, "output");
+
+ // Enclose custom code in a block to avoid namespace conflicts
+ SkString openBrace;
+ openBrace.printf("{ // Stage %d, %s\n", fStageIndex, fp.name());
+ fFS.codeAppend(openBrace.c_str());
+
+ GrGLSLFragmentProcessor* fragProc = fp.createGLSLInstance();
+
+ SkSTArray<4, SamplerHandle> texSamplers;
+ GrFragmentProcessor::Iter fpIter(&fp);
+ int samplerIdx = 0;
+ while (const auto* subFP = fpIter.next()) {
+ for (int i = 0; i < subFP->numTextureSamplers(); ++i) {
+ SkString name;
+ name.printf("TextureSampler_%d", samplerIdx++);
+ const auto& sampler = subFP->textureSampler(i);
+ texSamplers.emplace_back(this->emitSampler(sampler.proxy(),
+ sampler.samplerState(),
+ sampler.swizzle(),
+ name.c_str()));
+ }
+ }
+
+ const GrGLSLPrimitiveProcessor::TransformVar* coordVars = fTransformedCoordVars.begin() +
+ transformedCoordVarsIdx;
+ GrGLSLFragmentProcessor::TransformedCoordVars coords(&fp, coordVars);
+ GrGLSLFragmentProcessor::TextureSamplers textureSamplers(&fp, texSamplers.begin());
+ GrGLSLFragmentProcessor::EmitArgs args(&fFS,
+ this->uniformHandler(),
+ this->shaderCaps(),
+ fp,
+ output.c_str(),
+ input.c_str(),
+ coords,
+ textureSamplers);
+
+ fragProc->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(fp);)
+ glslFragmentProcessors->emplace_back(fragProc);
+
+ fFS.codeAppend("}");
+ return output;
+}
+
+void GrGLSLProgramBuilder::emitAndInstallXferProc(const SkString& colorIn,
+ const SkString& coverageIn) {
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+
+ SkASSERT(!fXferProcessor);
+ const GrXferProcessor& xp = this->pipeline().getXferProcessor();
+ fXferProcessor.reset(xp.createGLSLInstance());
+
+ // Enable dual source secondary output if we have one
+ if (xp.hasSecondaryOutput()) {
+ fFS.enableSecondaryOutput();
+ }
+
+ if (this->shaderCaps()->mustDeclareFragmentShaderOutput()) {
+ fFS.enableCustomOutput();
+ }
+
+ SkString openBrace;
+ openBrace.printf("{ // Xfer Processor: %s\n", xp.name());
+ fFS.codeAppend(openBrace.c_str());
+
+ SamplerHandle dstTextureSamplerHandle;
+ GrSurfaceOrigin dstTextureOrigin = kTopLeft_GrSurfaceOrigin;
+
+ if (GrTextureProxy* dstTextureProxy = this->pipeline().dstTextureProxy()) {
+ // GrProcessor::TextureSampler sampler(dstTexture);
+ const GrSwizzle& swizzle = dstTextureProxy->textureSwizzle();
+ dstTextureSamplerHandle = this->emitSampler(dstTextureProxy, GrSamplerState(),
+ swizzle, "DstTextureSampler");
+ dstTextureOrigin = dstTextureProxy->origin();
+ SkASSERT(dstTextureProxy->textureType() != GrTextureType::kExternal);
+ }
+
+ SkString finalInColor = colorIn.size() ? colorIn : SkString("float4(1)");
+
+ GrGLSLXferProcessor::EmitArgs args(&fFS,
+ this->uniformHandler(),
+ this->shaderCaps(),
+ xp,
+ finalInColor.c_str(),
+ coverageIn.size() ? coverageIn.c_str() : "float4(1)",
+ fFS.getPrimaryColorOutputName(),
+ fFS.getSecondaryColorOutputName(),
+ dstTextureSamplerHandle,
+ dstTextureOrigin,
+ this->pipeline().outputSwizzle());
+ fXferProcessor->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(xp);)
+ fFS.codeAppend("}");
+}
+
+GrGLSLProgramBuilder::SamplerHandle GrGLSLProgramBuilder::emitSampler(const GrTextureProxy* texture,
+ const GrSamplerState& state,
+ const GrSwizzle& swizzle,
+ const char* name) {
+ ++fNumFragmentSamplers;
+ return this->uniformHandler()->addSampler(texture, state, swizzle, name, this->shaderCaps());
+}
+
+bool GrGLSLProgramBuilder::checkSamplerCounts() {
+ const GrShaderCaps& shaderCaps = *this->shaderCaps();
+ if (fNumFragmentSamplers > shaderCaps.maxFragmentSamplers()) {
+ GrCapsDebugf(this->caps(), "Program would use too many fragment samplers\n");
+ return false;
+ }
+ return true;
+}
+
+#ifdef SK_DEBUG
+void GrGLSLProgramBuilder::verify(const GrPrimitiveProcessor& gp) {
+ SkASSERT(!fFS.fHasReadDstColorThisStage_DebugOnly);
+ SkASSERT(fFS.fUsedProcessorFeaturesThisStage_DebugOnly == gp.requestedFeatures());
+}
+
+void GrGLSLProgramBuilder::verify(const GrFragmentProcessor& fp) {
+ SkASSERT(!fFS.fHasReadDstColorThisStage_DebugOnly);
+ SkASSERT(fFS.fUsedProcessorFeaturesThisStage_DebugOnly == fp.requestedFeatures());
+}
+
+void GrGLSLProgramBuilder::verify(const GrXferProcessor& xp) {
+ SkASSERT(xp.willReadDstColor() == fFS.fHasReadDstColorThisStage_DebugOnly);
+ SkASSERT(fFS.fUsedProcessorFeaturesThisStage_DebugOnly == xp.requestedFeatures());
+}
+#endif
+
+void GrGLSLProgramBuilder::nameVariable(SkString* out, char prefix, const char* name, bool mangle) {
+ if ('\0' == prefix) {
+ *out = name;
+ } else {
+ out->printf("%c%s", prefix, name);
+ }
+ if (mangle) {
+ if (out->endsWith('_')) {
+ // Names containing "__" are reserved.
+ out->append("x");
+ }
+ out->appendf("_Stage%d%s", fStageIndex, fFS.getMangleString().c_str());
+ }
+}
+
+void GrGLSLProgramBuilder::nameExpression(SkString* output, const char* baseName) {
+ // create var to hold stage result. If we already have a valid output name, just use that
+ // otherwise create a new mangled one. This name is only valid if we are reordering stages
+ // and have to tell stage exactly where to put its output.
+ SkString outName;
+ if (output->size()) {
+ outName = output->c_str();
+ } else {
+ this->nameVariable(&outName, '\0', baseName);
+ }
+ fFS.codeAppendf("half4 %s;", outName.c_str());
+ *output = outName;
+}
+
+void GrGLSLProgramBuilder::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ this->uniformHandler()->appendUniformDecls(visibility, out);
+}
+
+void GrGLSLProgramBuilder::addRTWidthUniform(const char* name) {
+ SkASSERT(!fUniformHandles.fRTWidthUni.isValid());
+ GrGLSLUniformHandler* uniformHandler = this->uniformHandler();
+ fUniformHandles.fRTWidthUni =
+ uniformHandler->internalAddUniformArray(kFragment_GrShaderFlag, kHalf_GrSLType, name,
+ false, 0, nullptr);
+}
+
+void GrGLSLProgramBuilder::addRTHeightUniform(const char* name) {
+ SkASSERT(!fUniformHandles.fRTHeightUni.isValid());
+ GrGLSLUniformHandler* uniformHandler = this->uniformHandler();
+ fUniformHandles.fRTHeightUni =
+ uniformHandler->internalAddUniformArray(kFragment_GrShaderFlag, kHalf_GrSLType, name,
+ false, 0, nullptr);
+}
+
+void GrGLSLProgramBuilder::finalizeShaders() {
+ this->varyingHandler()->finalize();
+ fVS.finalize(kVertex_GrShaderFlag);
+ if (this->primitiveProcessor().willUseGeoShader()) {
+ SkASSERT(this->shaderCaps()->geometryShaderSupport());
+ fGS.finalize(kGeometry_GrShaderFlag);
+ }
+ fFS.finalize(kFragment_GrShaderFlag);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h
new file mode 100644
index 0000000000..dc63aa82aa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLProgramBuilder_DEFINED
+#define GrGLSLProgramBuilder_DEFINED
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+class GrShaderVar;
+class GrGLSLVaryingHandler;
+class SkString;
+class GrShaderCaps;
+
+class GrGLSLProgramBuilder {
+public:
+ using UniformHandle = GrGLSLUniformHandler::UniformHandle;
+ using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
+
+ virtual ~GrGLSLProgramBuilder() {}
+
+ virtual const GrCaps* caps() const = 0;
+ const GrShaderCaps* shaderCaps() const { return this->caps()->shaderCaps(); }
+
+ int numSamples() const { return fProgramInfo.numSamples(); }
+ GrSurfaceOrigin origin() const { return fProgramInfo.origin(); }
+ const GrPipeline& pipeline() const { return fProgramInfo.pipeline(); }
+ const GrPrimitiveProcessor& primitiveProcessor() const { return fProgramInfo.primProc(); }
+ GrProcessor::CustomFeatures processorFeatures() const {
+ return fProgramInfo.requestedFeatures();
+ }
+ bool snapVerticesToPixelCenters() const {
+ return fProgramInfo.pipeline().snapVerticesToPixelCenters();
+ }
+ // TODO: remove this usage of the descriptor's header
+ bool hasPointSize() const { return fDesc->hasPointSize(); }
+
+ // TODO: stop passing in the renderTarget for just the sampleLocations
+ int effectiveSampleCnt() const {
+ SkASSERT(GrProcessor::CustomFeatures::kSampleLocations & fProgramInfo.requestedFeatures());
+ return fRenderTarget->renderTargetPriv().getSampleLocations().count();
+ }
+ const SkTArray<SkPoint>& getSampleLocations() const {
+ return fRenderTarget->renderTargetPriv().getSampleLocations();
+ }
+
+ const GrProgramDesc* desc() const { return fDesc; }
+
+ void appendUniformDecls(GrShaderFlags visibility, SkString*) const;
+
+ const char* samplerVariable(SamplerHandle handle) const {
+ return this->uniformHandler()->samplerVariable(handle);
+ }
+
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const {
+ if (this->caps()->shaderCaps()->textureSwizzleAppliedInShader()) {
+ return this->uniformHandler()->samplerSwizzle(handle);
+ }
+ return GrSwizzle::RGBA();
+ }
+
+ // Used to add a uniform for the RenderTarget width (used for sk_Width) without mangling
+ // the name of the uniform inside of a stage.
+ void addRTWidthUniform(const char* name);
+
+ // Used to add a uniform for the RenderTarget height (used for sk_Height and frag position)
+ // without mangling the name of the uniform inside of a stage.
+ void addRTHeightUniform(const char* name);
+
+ // Generates a name for a variable. The generated string will be name prefixed by the prefix
+ // char (unless the prefix is '\0'). It also will mangle the name to be stage-specific unless
+ // explicitly asked not to.
+ void nameVariable(SkString* out, char prefix, const char* name, bool mangle = true);
+
+ virtual GrGLSLUniformHandler* uniformHandler() = 0;
+ virtual const GrGLSLUniformHandler* uniformHandler() const = 0;
+ virtual GrGLSLVaryingHandler* varyingHandler() = 0;
+
+ // Used for backend customization of the output color and secondary color variables from the
+ // fragment processor. Only used if the outputs are explicitly declared in the shaders
+ virtual void finalizeFragmentOutputColor(GrShaderVar& outputColor) {}
+ virtual void finalizeFragmentSecondaryColor(GrShaderVar& outputColor) {}
+
+ // number of each input/output type in a single allocation block, used by many builders
+ static const int kVarsPerBlock;
+
+ GrGLSLVertexBuilder fVS;
+ GrGLSLGeometryBuilder fGS;
+ GrGLSLFragmentShaderBuilder fFS;
+
+ int fStageIndex;
+
+ const GrRenderTarget* fRenderTarget; // TODO: remove this
+ const GrProgramInfo& fProgramInfo;
+
+ const GrProgramDesc* fDesc;
+
+ GrGLSLBuiltinUniformHandles fUniformHandles;
+
+ std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
+ int fFragmentProcessorCnt;
+
+protected:
+ explicit GrGLSLProgramBuilder(GrRenderTarget*, const GrProgramInfo&, const GrProgramDesc*);
+
+ void addFeature(GrShaderFlags shaders, uint32_t featureBit, const char* extensionName);
+
+ bool emitAndInstallProcs();
+
+ void finalizeShaders();
+
+ bool fragColorIsInOut() const { return fFS.primaryColorOutputIsInOut(); }
+
+private:
+ // reset is called by program creator between each processor's emit code. It increments the
+ // stage offset for variable name mangling, and also ensures verfication variables in the
+ // fragment shader are cleared.
+ void reset() {
+ this->addStage();
+ SkDEBUGCODE(fFS.debugOnly_resetPerStageVerification();)
+ }
+ void addStage() { fStageIndex++; }
+
+ class AutoStageAdvance {
+ public:
+ AutoStageAdvance(GrGLSLProgramBuilder* pb)
+ : fPB(pb) {
+ fPB->reset();
+ // Each output to the fragment processor gets its own code section
+ fPB->fFS.nextStage();
+ }
+ ~AutoStageAdvance() {}
+ private:
+ GrGLSLProgramBuilder* fPB;
+ };
+
+ // Generates a possibly mangled name for a stage variable and writes it to the fragment shader.
+ void nameExpression(SkString*, const char* baseName);
+
+ void emitAndInstallPrimProc(SkString* outputColor, SkString* outputCoverage);
+ void emitAndInstallFragProcs(SkString* colorInOut, SkString* coverageInOut);
+ SkString emitAndInstallFragProc(const GrFragmentProcessor&,
+ int index,
+ int transformedCoordVarsIdx,
+ const SkString& input,
+ SkString output,
+ SkTArray<std::unique_ptr<GrGLSLFragmentProcessor>>*);
+ void emitAndInstallXferProc(const SkString& colorIn, const SkString& coverageIn);
+ SamplerHandle emitSampler(const GrTextureProxy*, const GrSamplerState&, const GrSwizzle&,
+ const char* name);
+ bool checkSamplerCounts();
+
+#ifdef SK_DEBUG
+ void verify(const GrPrimitiveProcessor&);
+ void verify(const GrFragmentProcessor&);
+ void verify(const GrXferProcessor&);
+#endif
+
+ // These are used to check that we don't excede the allowable number of resources in a shader.
+ int fNumFragmentSamplers;
+ SkSTArray<4, GrGLSLPrimitiveProcessor::TransformVar> fTransformedCoordVars;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp
new file mode 100644
index 0000000000..395e325f9a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkMatrix44.h"
+
+void GrGLSLProgramDataManager::setSkMatrix(UniformHandle u, const SkMatrix& matrix) const {
+ float mt[] = {
+ matrix.get(SkMatrix::kMScaleX),
+ matrix.get(SkMatrix::kMSkewY),
+ matrix.get(SkMatrix::kMPersp0),
+ matrix.get(SkMatrix::kMSkewX),
+ matrix.get(SkMatrix::kMScaleY),
+ matrix.get(SkMatrix::kMPersp1),
+ matrix.get(SkMatrix::kMTransX),
+ matrix.get(SkMatrix::kMTransY),
+ matrix.get(SkMatrix::kMPersp2),
+ };
+ this->setMatrix3f(u, mt);
+}
+
+void GrGLSLProgramDataManager::setSkMatrix44(UniformHandle u, const SkMatrix44& matrix) const {
+ float mt[16];
+ matrix.asColMajorf(mt);
+ this->setMatrix4f(u, mt);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h
new file mode 100644
index 0000000000..5db4ae5eca
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLProgramDataManager_DEFINED
+#define GrGLSLProgramDataManager_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/gpu/GrResourceHandle.h"
+
+class SkMatrix;
+class SkMatrix44;
+
+/** Manages the resources used by a shader program.
+ * The resources are objects the program uses to communicate with the
+ * application code.
+ */
+class GrGLSLProgramDataManager : SkNoncopyable {
+public:
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(UniformHandle);
+
+ virtual ~GrGLSLProgramDataManager() {}
+
+ /** Functions for uploading uniform values. The varities ending in v can be used to upload to an
+ * array of uniforms. arrayCount must be <= the array count of the uniform.
+ */
+ virtual void set1i(UniformHandle, int32_t) const = 0;
+ virtual void set1iv(UniformHandle, int arrayCount, const int v[]) const = 0;
+ virtual void set1f(UniformHandle, float v0) const = 0;
+ virtual void set1fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set2i(UniformHandle, int32_t, int32_t) const = 0;
+ virtual void set2iv(UniformHandle, int arrayCount, const int v[]) const = 0;
+ virtual void set2f(UniformHandle, float, float) const = 0;
+ virtual void set2fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set3i(UniformHandle, int32_t, int32_t, int32_t) const = 0;
+ virtual void set3iv(UniformHandle, int arrayCount, const int v[]) const = 0;
+ virtual void set3f(UniformHandle, float, float, float) const = 0;
+ virtual void set3fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set4i(UniformHandle, int32_t, int32_t, int32_t, int32_t) const = 0;
+ virtual void set4iv(UniformHandle, int arrayCount, const int v[]) const = 0;
+ virtual void set4f(UniformHandle, float, float, float, float) const = 0;
+ virtual void set4fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ // matrices are column-major, the first three upload a single matrix, the latter three upload
+ // arrayCount matrices into a uniform array.
+ virtual void setMatrix2f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix3f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix4f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+ virtual void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+ virtual void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+
+ // convenience method for uploading a SkMatrix to a 3x3 matrix uniform
+ void setSkMatrix(UniformHandle, const SkMatrix&) const;
+ // convenience method for uploading a SkMatrix to a 4x4 matrix uniform
+ void setSkMatrix44(UniformHandle, const SkMatrix44&) const;
+
+ // for nvpr only
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(VaryingHandle);
+ virtual void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const = 0;
+
+protected:
+ GrGLSLProgramDataManager() {}
+
+private:
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp
new file mode 100644
index 0000000000..7675744bc5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLShaderBuilder.h"
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+
+GrGLSLShaderBuilder::GrGLSLShaderBuilder(GrGLSLProgramBuilder* program)
+ : fProgramBuilder(program)
+ , fInputs(GrGLSLProgramBuilder::kVarsPerBlock)
+ , fOutputs(GrGLSLProgramBuilder::kVarsPerBlock)
+ , fFeaturesAddedMask(0)
+ , fCodeIndex(kCode)
+ , fFinalized(false) {
+ // We push back some dummy pointers which will later become our header
+ for (int i = 0; i <= kCode; i++) {
+ fShaderStrings.push_back();
+ }
+
+ this->main() = "void main() {";
+}
+
+void GrGLSLShaderBuilder::declAppend(const GrShaderVar& var) {
+ SkString tempDecl;
+ var.appendDecl(fProgramBuilder->shaderCaps(), &tempDecl);
+ this->codeAppendf("%s;", tempDecl.c_str());
+}
+
+void GrGLSLShaderBuilder::declareGlobal(const GrShaderVar& v) {
+ v.appendDecl(this->getProgramBuilder()->shaderCaps(), &this->definitions());
+ this->definitions().append(";");
+}
+
+void GrGLSLShaderBuilder::emitFunction(GrSLType returnType,
+ const char* name,
+ int argCnt,
+ const GrShaderVar* args,
+ const char* body,
+ SkString* outName) {
+ this->functions().append(GrGLSLTypeString(returnType));
+ fProgramBuilder->nameVariable(outName, '\0', name);
+ this->functions().appendf(" %s", outName->c_str());
+ this->functions().append("(");
+ for (int i = 0; i < argCnt; ++i) {
+ args[i].appendDecl(fProgramBuilder->shaderCaps(), &this->functions());
+ if (i < argCnt - 1) {
+ this->functions().append(", ");
+ }
+ }
+ this->functions().append(") {\n");
+ this->functions().append(body);
+ this->functions().append("}\n\n");
+}
+
+static inline void append_texture_swizzle(SkString* out, GrSwizzle swizzle) {
+ if (swizzle != GrSwizzle::RGBA()) {
+ out->appendf(".%s", swizzle.c_str());
+ }
+}
+
+void GrGLSLShaderBuilder::appendTextureLookup(SkString* out,
+ SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType) const {
+ const char* sampler = fProgramBuilder->samplerVariable(samplerHandle);
+ out->appendf("sample(%s, %s)", sampler, coordName);
+ append_texture_swizzle(out, fProgramBuilder->samplerSwizzle(samplerHandle));
+}
+
+void GrGLSLShaderBuilder::appendTextureLookup(SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ SkString lookup;
+ this->appendTextureLookup(&lookup, samplerHandle, coordName, varyingType);
+ this->appendColorGamutXform(lookup.c_str(), colorXformHelper);
+}
+
+void GrGLSLShaderBuilder::appendTextureLookupAndModulate(
+ const char* modulation,
+ SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ SkString lookup;
+ this->appendTextureLookup(&lookup, samplerHandle, coordName, varyingType);
+ this->appendColorGamutXform(lookup.c_str(), colorXformHelper);
+ if (modulation) {
+ this->codeAppendf(" * %s", modulation);
+ }
+}
+
+void GrGLSLShaderBuilder::appendColorGamutXform(SkString* out,
+ const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ if (!colorXformHelper || colorXformHelper->isNoop()) {
+ *out = srcColor;
+ return;
+ }
+
+ GrGLSLUniformHandler* uniformHandler = fProgramBuilder->uniformHandler();
+
+ // We define up to three helper functions, to keep things clearer. One for the source transfer
+ // function, one for the (inverse) destination transfer function, and one for the gamut xform.
+ // Any combination of these may be present, although some configurations are much more likely.
+
+ auto emitTFFunc = [=](const char* name, GrGLSLProgramDataManager::UniformHandle uniform,
+ TFKind kind) {
+ const GrShaderVar gTFArgs[] = { GrShaderVar("x", kHalf_GrSLType) };
+ const char* coeffs = uniformHandler->getUniformCStr(uniform);
+ SkString body;
+ // Temporaries to make evaluation line readable. We always use the sRGBish names, so the
+ // PQ and HLG math is confusing.
+ body.appendf("half G = %s[0];", coeffs);
+ body.appendf("half A = %s[1];", coeffs);
+ body.appendf("half B = %s[2];", coeffs);
+ body.appendf("half C = %s[3];", coeffs);
+ body.appendf("half D = %s[4];", coeffs);
+ body.appendf("half E = %s[5];", coeffs);
+ body.appendf("half F = %s[6];", coeffs);
+ body.append("half s = sign(x);");
+ body.append("x = abs(x);");
+ switch (kind) {
+ case TFKind::sRGBish_TF:
+ body.append("x = (x < D) ? (C * x) + F : pow(A * x + B, G) + E;");
+ break;
+ case TFKind::PQish_TF:
+ body.append("x = pow(max(A + B * pow(x, C), 0) / (D + E * pow(x, C)), F);");
+ break;
+ case TFKind::HLGish_TF:
+ body.append("x = (x*A <= 1) ? pow(x*A, B) : exp((x-E)*C) + D;");
+ break;
+ case TFKind::HLGinvish_TF:
+ body.append("x = (x <= 1) ? A * pow(x, B) : C * log(x - D) + E;");
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ body.append("return s * x;");
+ SkString funcName;
+ this->emitFunction(kHalf_GrSLType, name, SK_ARRAY_COUNT(gTFArgs), gTFArgs, body.c_str(),
+ &funcName);
+ return funcName;
+ };
+
+ SkString srcTFFuncName;
+ if (colorXformHelper->applySrcTF()) {
+ srcTFFuncName = emitTFFunc("src_tf", colorXformHelper->srcTFUniform(),
+ colorXformHelper->srcTFKind());
+ }
+
+ SkString dstTFFuncName;
+ if (colorXformHelper->applyDstTF()) {
+ dstTFFuncName = emitTFFunc("dst_tf", colorXformHelper->dstTFUniform(),
+ colorXformHelper->dstTFKind());
+ }
+
+ SkString gamutXformFuncName;
+ if (colorXformHelper->applyGamutXform()) {
+ const GrShaderVar gGamutXformArgs[] = { GrShaderVar("color", kHalf4_GrSLType) };
+ const char* xform = uniformHandler->getUniformCStr(colorXformHelper->gamutXformUniform());
+ SkString body;
+ body.appendf("color.rgb = (%s * color.rgb);", xform);
+ body.append("return color;");
+ this->emitFunction(kHalf4_GrSLType, "gamut_xform", SK_ARRAY_COUNT(gGamutXformArgs),
+ gGamutXformArgs, body.c_str(), &gamutXformFuncName);
+ }
+
+ // Now define a wrapper function that applies all the intermediate steps
+ {
+ const GrShaderVar gColorXformArgs[] = { GrShaderVar("color", kHalf4_GrSLType) };
+ SkString body;
+ if (colorXformHelper->applyUnpremul()) {
+ body.append("half nonZeroAlpha = max(color.a, 0.0001);");
+ body.append("color = half4(color.rgb / nonZeroAlpha, nonZeroAlpha);");
+ }
+ if (colorXformHelper->applySrcTF()) {
+ body.appendf("color.r = %s(color.r);", srcTFFuncName.c_str());
+ body.appendf("color.g = %s(color.g);", srcTFFuncName.c_str());
+ body.appendf("color.b = %s(color.b);", srcTFFuncName.c_str());
+ }
+ if (colorXformHelper->applyGamutXform()) {
+ body.appendf("color = %s(color);", gamutXformFuncName.c_str());
+ }
+ if (colorXformHelper->applyDstTF()) {
+ body.appendf("color.r = %s(color.r);", dstTFFuncName.c_str());
+ body.appendf("color.g = %s(color.g);", dstTFFuncName.c_str());
+ body.appendf("color.b = %s(color.b);", dstTFFuncName.c_str());
+ }
+ if (colorXformHelper->applyPremul()) {
+ body.append("color.rgb *= color.a;");
+ }
+ body.append("return color;");
+ SkString colorXformFuncName;
+ this->emitFunction(kHalf4_GrSLType, "color_xform", SK_ARRAY_COUNT(gColorXformArgs),
+ gColorXformArgs, body.c_str(), &colorXformFuncName);
+ out->appendf("%s(%s)", colorXformFuncName.c_str(), srcColor);
+ }
+}
+
+void GrGLSLShaderBuilder::appendColorGamutXform(const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ SkString xform;
+ this->appendColorGamutXform(&xform, srcColor, colorXformHelper);
+ this->codeAppend(xform.c_str());
+}
+
+bool GrGLSLShaderBuilder::addFeature(uint32_t featureBit, const char* extensionName) {
+ if (featureBit & fFeaturesAddedMask) {
+ return false;
+ }
+ this->extensions().appendf("#extension %s: require\n", extensionName);
+ fFeaturesAddedMask |= featureBit;
+ return true;
+}
+
+void GrGLSLShaderBuilder::appendDecls(const VarArray& vars, SkString* out) const {
+ for (int i = 0; i < vars.count(); ++i) {
+ vars[i].appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+}
+
+void GrGLSLShaderBuilder::addLayoutQualifier(const char* param, InterfaceQualifier interface) {
+ SkASSERT(fProgramBuilder->shaderCaps()->generation() >= k330_GrGLSLGeneration ||
+ fProgramBuilder->shaderCaps()->mustEnableAdvBlendEqs());
+ fLayoutParams[interface].push_back() = param;
+}
+
+void GrGLSLShaderBuilder::compileAndAppendLayoutQualifiers() {
+ static const char* interfaceQualifierNames[] = {
+ "in",
+ "out"
+ };
+
+ for (int interface = 0; interface <= kLastInterfaceQualifier; ++interface) {
+ const SkTArray<SkString>& params = fLayoutParams[interface];
+ if (params.empty()) {
+ continue;
+ }
+ this->layoutQualifiers().appendf("layout(%s", params[0].c_str());
+ for (int i = 1; i < params.count(); ++i) {
+ this->layoutQualifiers().appendf(", %s", params[i].c_str());
+ }
+ this->layoutQualifiers().appendf(") %s;\n", interfaceQualifierNames[interface]);
+ }
+
+ GR_STATIC_ASSERT(0 == GrGLSLShaderBuilder::kIn_InterfaceQualifier);
+ GR_STATIC_ASSERT(1 == GrGLSLShaderBuilder::kOut_InterfaceQualifier);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(interfaceQualifierNames) == kLastInterfaceQualifier + 1);
+}
+
+void GrGLSLShaderBuilder::finalize(uint32_t visibility) {
+ SkASSERT(!fFinalized);
+ this->compileAndAppendLayoutQualifiers();
+ SkASSERT(visibility);
+ fProgramBuilder->appendUniformDecls((GrShaderFlags) visibility, &this->uniforms());
+ this->appendDecls(fInputs, &this->inputs());
+ this->appendDecls(fOutputs, &this->outputs());
+ this->onFinalize();
+ // append the 'footer' to code
+ this->code().append("}");
+
+ for (int i = 0; i <= fCodeIndex; i++) {
+ fCompilerString.append(fShaderStrings[i].c_str(), fShaderStrings[i].size());
+ }
+
+ fFinalized = true;
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h
new file mode 100644
index 0000000000..9d3cc48cbc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLShaderBuilder_DEFINED
+#define GrGLSLShaderBuilder_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/sksl/SkSLString.h"
+
+#include <stdarg.h>
+
+class GrGLSLColorSpaceXformHelper;
+
+/**
+ base class for all shaders builders
+*/
+class GrGLSLShaderBuilder {
+public:
+ GrGLSLShaderBuilder(GrGLSLProgramBuilder* program);
+ virtual ~GrGLSLShaderBuilder() {}
+
+ using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
+
+ /** Appends a 2D texture sample with projection if necessary. coordType must either be Vec2f or
+ Vec3f. The latter is interpreted as projective texture coords. The vec length and swizzle
+ order of the result depends on the GrProcessor::TextureSampler associated with the
+ SamplerHandle.
+ */
+ void appendTextureLookup(SkString* out,
+ SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kHalf2_GrSLType) const;
+
+ /** Version of above that appends the result to the shader code instead.*/
+ void appendTextureLookup(SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kHalf2_GrSLType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper = nullptr);
+
+
+ /** Does the work of appendTextureLookup and modulates the result by modulation. The result is
+ always a half4. modulation and the swizzle specified by SamplerHandle must both be
+ half4 or half. If modulation is "" or nullptr it this function acts as though
+ appendTextureLookup were called. */
+ void appendTextureLookupAndModulate(const char* modulation,
+ SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kHalf2_GrSLType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper = nullptr);
+
+ /** Adds a helper function to facilitate color gamut transformation, and produces code that
+ returns the srcColor transformed into a new gamut (via multiplication by the xform from
+ colorXformHelper). Premultiplied sources are also handled correctly (colorXformHelper
+ determines if the source is premultipled or not). */
+ void appendColorGamutXform(SkString* out, const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper);
+
+ /** Version of above that appends the result to the shader code instead. */
+ void appendColorGamutXform(const char* srcColor, GrGLSLColorSpaceXformHelper* colorXformHelper);
+
+ /**
+ * Adds a constant declaration to the top of the shader.
+ */
+ void defineConstant(const char* type, const char* name, const char* value) {
+ this->definitions().appendf("const %s %s = %s;\n", type, name, value);
+ }
+
+ void defineConstant(const char* name, int value) {
+ this->definitions().appendf("const int %s = %i;\n", name, value);
+ }
+
+ void defineConstant(const char* name, float value) {
+ this->definitions().appendf("const float %s = %f;\n", name, value);
+ }
+
+ void defineConstantf(const char* type, const char* name, const char* fmt, ...) {
+ this->definitions().appendf("const %s %s = ", type, name);
+ va_list args;
+ va_start(args, fmt);
+ this->definitions().appendVAList(fmt, args);
+ va_end(args);
+ this->definitions().append(";\n");
+ }
+
+ void declareGlobal(const GrShaderVar&);
+
+ /**
+ * Called by GrGLSLProcessors to add code to one of the shaders.
+ */
+ void codeAppendf(const char format[], ...) SK_PRINTF_LIKE(2, 3) {
+ va_list args;
+ va_start(args, format);
+ this->code().appendVAList(format, args);
+ va_end(args);
+ }
+
+ void codeAppend(const char* str) { this->code().append(str); }
+
+ void codeAppend(const char* str, size_t length) { this->code().append(str, length); }
+
+ void codePrependf(const char format[], ...) SK_PRINTF_LIKE(2, 3) {
+ va_list args;
+ va_start(args, format);
+ this->code().prependVAList(format, args);
+ va_end(args);
+ }
+
+ /**
+ * Appends a variable declaration to one of the shaders
+ */
+ void declAppend(const GrShaderVar& var);
+
+ /** Emits a helper function outside of main() in the fragment shader. */
+ void emitFunction(GrSLType returnType,
+ const char* name,
+ int argCnt,
+ const GrShaderVar* args,
+ const char* body,
+ SkString* outName);
+
+ /*
+ * Combines the various parts of the shader to create a single finalized shader string.
+ */
+ void finalize(uint32_t visibility);
+
+ /*
+ * Get parent builder for adding uniforms
+ */
+ GrGLSLProgramBuilder* getProgramBuilder() { return fProgramBuilder; }
+
+ /**
+ * Helper for begining and ending a block in the shader code.
+ */
+ class ShaderBlock {
+ public:
+ ShaderBlock(GrGLSLShaderBuilder* builder) : fBuilder(builder) {
+ SkASSERT(builder);
+ fBuilder->codeAppend("{");
+ }
+
+ ~ShaderBlock() {
+ fBuilder->codeAppend("}");
+ }
+ private:
+ GrGLSLShaderBuilder* fBuilder;
+ };
+
+protected:
+ typedef GrTAllocator<GrShaderVar> VarArray;
+ void appendDecls(const VarArray& vars, SkString* out) const;
+
+ /**
+ * Features that should only be enabled internally by the builders.
+ */
+ enum GLSLPrivateFeature {
+ kFragCoordConventions_GLSLPrivateFeature,
+ kBlendEquationAdvanced_GLSLPrivateFeature,
+ kBlendFuncExtended_GLSLPrivateFeature,
+ kFramebufferFetch_GLSLPrivateFeature,
+ kNoPerspectiveInterpolation_GLSLPrivateFeature,
+ kSampleVariables_GLSLPrivateFeature,
+ kLastGLSLPrivateFeature = kSampleVariables_GLSLPrivateFeature
+ };
+
+ /*
+ * A general function which enables an extension in a shader if the feature bit is not present
+ *
+ * @return true if the feature bit was not yet present, false otherwise.
+ */
+ bool addFeature(uint32_t featureBit, const char* extensionName);
+
+ enum InterfaceQualifier {
+ kIn_InterfaceQualifier,
+ kOut_InterfaceQualifier,
+ kLastInterfaceQualifier = kOut_InterfaceQualifier
+ };
+
+ /*
+ * A low level function to build default layout qualifiers.
+ *
+ * e.g. layout(param1, param2, ...) out;
+ *
+ * GLSL allows default layout qualifiers for in, out, and uniform.
+ */
+ void addLayoutQualifier(const char* param, InterfaceQualifier);
+
+ void compileAndAppendLayoutQualifiers();
+
+ void nextStage() {
+ fShaderStrings.push_back();
+ fCodeIndex++;
+ }
+
+ void deleteStage() {
+ fShaderStrings.pop_back();
+ fCodeIndex--;
+ }
+
+ SkString& extensions() { return fShaderStrings[kExtensions]; }
+ SkString& definitions() { return fShaderStrings[kDefinitions]; }
+ SkString& precisionQualifier() { return fShaderStrings[kPrecisionQualifier]; }
+ SkString& layoutQualifiers() { return fShaderStrings[kLayoutQualifiers]; }
+ SkString& uniforms() { return fShaderStrings[kUniforms]; }
+ SkString& inputs() { return fShaderStrings[kInputs]; }
+ SkString& outputs() { return fShaderStrings[kOutputs]; }
+ SkString& functions() { return fShaderStrings[kFunctions]; }
+ SkString& main() { return fShaderStrings[kMain]; }
+ SkString& code() { return fShaderStrings[fCodeIndex]; }
+
+ virtual void onFinalize() = 0;
+
+ enum {
+ kExtensions,
+ kDefinitions,
+ kPrecisionQualifier,
+ kLayoutQualifiers,
+ kUniforms,
+ kInputs,
+ kOutputs,
+ kFunctions,
+ kMain,
+ kCode,
+
+ kPrealloc = kCode + 6, // 6 == Reasonable upper bound on number of processor stages
+ };
+
+ GrGLSLProgramBuilder* fProgramBuilder;
+ SkSL::String fCompilerString;
+ SkSTArray<kPrealloc, SkString> fShaderStrings;
+ SkString fCode;
+ SkString fFunctions;
+ SkString fExtensions;
+
+ VarArray fInputs;
+ VarArray fOutputs;
+ uint32_t fFeaturesAddedMask;
+ SkSTArray<1, SkString> fLayoutParams[kLastInterfaceQualifier + 1];
+ int fCodeIndex;
+ bool fFinalized;
+
+ friend class GrCCCoverageProcessor; // to access code().
+ friend class GrGLSLProgramBuilder;
+ friend class GrGLProgramBuilder;
+ friend class GrDawnProgramBuilder;
+ friend class GrGLSLVaryingHandler; // to access noperspective interpolation feature.
+ friend class GrGLPathProgramBuilder; // to access fInputs.
+ friend class GrVkPipelineStateBuilder;
+ friend class GrMtlPipelineStateBuilder;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h
new file mode 100644
index 0000000000..f27d961a95
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLUniformHandler_DEFINED
+#define GrGLSLUniformHandler_DEFINED
+
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/GrSwizzle.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+// variable names beginning with this prefix will not be mangled
+#define GR_NO_MANGLE_PREFIX "sk_"
+
+class GrGLSLProgramBuilder;
+class GrSamplerState;
+class GrTextureProxy;
+
+// Handles for program uniforms (other than per-effect uniforms)
+struct GrGLSLBuiltinUniformHandles {
+ GrGLSLProgramDataManager::UniformHandle fRTAdjustmentUni;
+ // Render target width, used to implement sk_Width
+ GrGLSLProgramDataManager::UniformHandle fRTWidthUni;
+ // Render target height, used to implement sk_Height and to calculate sk_FragCoord when
+ // origin_upper_left is not supported.
+ GrGLSLProgramDataManager::UniformHandle fRTHeightUni;
+};
+
+class GrGLSLUniformHandler {
+public:
+ virtual ~GrGLSLUniformHandler() {}
+
+ using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(SamplerHandle);
+
+ /** Add a uniform variable to the current program, that has visibility in one or more shaders.
+ visibility is a bitfield of GrShaderFlag values indicating from which shaders the uniform
+ should be accessible. At least one bit must be set. Geometry shader uniforms are not
+ supported at this time. The actual uniform name will be mangled. If outName is not nullptr
+ then it will refer to the final uniform name after return. Use the addUniformArray variant
+ to add an array of uniforms. */
+ UniformHandle addUniform(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ const char** outName = nullptr) {
+ SkASSERT(!GrSLTypeIsCombinedSamplerType(type));
+ return this->addUniformArray(visibility, type, name, 0, outName);
+ }
+
+ UniformHandle addUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ int arrayCount,
+ const char** outName = nullptr) {
+ SkASSERT(!GrSLTypeIsCombinedSamplerType(type));
+ bool mangle = strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX));
+ return this->internalAddUniformArray(visibility, type, name, mangle, arrayCount, outName);
+ }
+
+ virtual const GrShaderVar& getUniformVariable(UniformHandle u) const = 0;
+
+ /**
+ * 'Or's the visibility parameter with the current uniform visibililty.
+ */
+ virtual void updateUniformVisibility(UniformHandle u, uint32_t visibility) = 0;
+
+ /**
+ * Shortcut for getUniformVariable(u).c_str()
+ */
+ virtual const char* getUniformCStr(UniformHandle u) const = 0;
+
+protected:
+ explicit GrGLSLUniformHandler(GrGLSLProgramBuilder* program) : fProgramBuilder(program) {}
+
+ // This is not owned by the class
+ GrGLSLProgramBuilder* fProgramBuilder;
+
+private:
+ virtual const char * samplerVariable(SamplerHandle) const = 0;
+ // Only called if GrShaderCaps(:textureSwizzleAppliedInShader() == true.
+ virtual GrSwizzle samplerSwizzle(SamplerHandle) const = 0;
+
+ virtual SamplerHandle addSampler(const GrTextureProxy*, const GrSamplerState&, const GrSwizzle&,
+ const char* name, const GrShaderCaps*) = 0;
+
+ virtual UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) = 0;
+
+ virtual void appendUniformDecls(GrShaderFlags visibility, SkString*) const = 0;
+
+ friend class GrGLSLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp
new file mode 100644
index 0000000000..393947a016
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp
@@ -0,0 +1,52 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/core/SkMatrix.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+
+template<> void GrGLSLGetMatrix<3>(float* dest, const SkMatrix& src) {
+ // Col 0
+ dest[0] = SkScalarToFloat(src[SkMatrix::kMScaleX]);
+ dest[1] = SkScalarToFloat(src[SkMatrix::kMSkewY]);
+ dest[2] = SkScalarToFloat(src[SkMatrix::kMPersp0]);
+
+ // Col 1
+ dest[3] = SkScalarToFloat(src[SkMatrix::kMSkewX]);
+ dest[4] = SkScalarToFloat(src[SkMatrix::kMScaleY]);
+ dest[5] = SkScalarToFloat(src[SkMatrix::kMPersp1]);
+
+ // Col 2
+ dest[6] = SkScalarToFloat(src[SkMatrix::kMTransX]);
+ dest[7] = SkScalarToFloat(src[SkMatrix::kMTransY]);
+ dest[8] = SkScalarToFloat(src[SkMatrix::kMPersp2]);
+}
+
+template<> void GrGLSLGetMatrix<4>(float* dest, const SkMatrix& src) {
+ // Col 0
+ dest[0] = SkScalarToFloat(src[SkMatrix::kMScaleX]);
+ dest[1] = SkScalarToFloat(src[SkMatrix::kMSkewY]);
+ dest[2] = 0;
+ dest[3] = SkScalarToFloat(src[SkMatrix::kMPersp0]);
+
+ // Col 1
+ dest[4] = SkScalarToFloat(src[SkMatrix::kMSkewX]);
+ dest[5] = SkScalarToFloat(src[SkMatrix::kMScaleY]);
+ dest[6] = 0;
+ dest[7] = SkScalarToFloat(src[SkMatrix::kMPersp1]);
+
+ // Col 2
+ dest[8] = 0;
+ dest[9] = 0;
+ dest[10] = 1;
+ dest[11] = 0;
+
+ // Col 3
+ dest[12] = SkScalarToFloat(src[SkMatrix::kMTransX]);
+ dest[13] = SkScalarToFloat(src[SkMatrix::kMTransY]);
+ dest[14] = 0;
+ dest[15] = SkScalarToFloat(src[SkMatrix::kMPersp2]);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h
new file mode 100644
index 0000000000..0d2b7e742f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h
@@ -0,0 +1,19 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrGLSLUtil_DEFINED
+#define GrGLSLUtil_DEFINED
+
+class SkMatrix;
+
+/**
+ * Helper for converting SkMatrix to a column-major float array. We assume that all GLSL backends
+ * use a column major representation for matrices.
+ */
+template<int MatrixSize> void GrGLSLGetMatrix(float* dest, const SkMatrix& src);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp
new file mode 100644
index 0000000000..ce086a9082
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+void GrGLSLVaryingHandler::addPassThroughAttribute(const GrGeometryProcessor::Attribute& input,
+ const char* output,
+ Interpolation interpolation) {
+ SkASSERT(input.isInitialized());
+ SkASSERT(!fProgramBuilder->primitiveProcessor().willUseGeoShader());
+ GrGLSLVarying v(input.gpuType());
+ this->addVarying(input.name(), &v, interpolation);
+ fProgramBuilder->fVS.codeAppendf("%s = %s;", v.vsOut(), input.name());
+ fProgramBuilder->fFS.codeAppendf("%s = %s;", output, v.fsIn());
+}
+
+static bool use_flat_interpolation(GrGLSLVaryingHandler::Interpolation interpolation,
+ const GrShaderCaps& shaderCaps) {
+ switch (interpolation) {
+ using Interpolation = GrGLSLVaryingHandler::Interpolation;
+ case Interpolation::kInterpolated:
+ return false;
+ case Interpolation::kCanBeFlat:
+ SkASSERT(!shaderCaps.preferFlatInterpolation() ||
+ shaderCaps.flatInterpolationSupport());
+ return shaderCaps.preferFlatInterpolation();
+ case Interpolation::kMustBeFlat:
+ SkASSERT(shaderCaps.flatInterpolationSupport());
+ return true;
+ }
+ SK_ABORT("Invalid interpolation");
+}
+
+void GrGLSLVaryingHandler::addVarying(const char* name, GrGLSLVarying* varying,
+ Interpolation interpolation) {
+ SkASSERT(GrSLTypeIsFloatType(varying->type()) || Interpolation::kMustBeFlat == interpolation);
+ bool willUseGeoShader = fProgramBuilder->primitiveProcessor().willUseGeoShader();
+ VaryingInfo& v = fVaryings.push_back();
+
+ SkASSERT(varying);
+ SkASSERT(kVoid_GrSLType != varying->fType);
+ v.fType = varying->fType;
+ v.fIsFlat = use_flat_interpolation(interpolation, *fProgramBuilder->shaderCaps());
+ fProgramBuilder->nameVariable(&v.fVsOut, 'v', name);
+ v.fVisibility = kNone_GrShaderFlags;
+ if (varying->isInVertexShader()) {
+ varying->fVsOut = v.fVsOut.c_str();
+ v.fVisibility |= kVertex_GrShaderFlag;
+ }
+ if (willUseGeoShader) {
+ fProgramBuilder->nameVariable(&v.fGsOut, 'g', name);
+ varying->fGsIn = v.fVsOut.c_str();
+ varying->fGsOut = v.fGsOut.c_str();
+ v.fVisibility |= kGeometry_GrShaderFlag;
+ }
+ if (varying->isInFragmentShader()) {
+ varying->fFsIn = (willUseGeoShader ? v.fGsOut : v.fVsOut).c_str();
+ v.fVisibility |= kFragment_GrShaderFlag;
+ }
+}
+
+void GrGLSLVaryingHandler::emitAttributes(const GrGeometryProcessor& gp) {
+ for (const auto& attr : gp.vertexAttributes()) {
+ this->addAttribute(attr.asShaderVar());
+ }
+ for (const auto& attr : gp.instanceAttributes()) {
+ this->addAttribute(attr.asShaderVar());
+ }
+}
+
+void GrGLSLVaryingHandler::addAttribute(const GrShaderVar& var) {
+ SkASSERT(GrShaderVar::kIn_TypeModifier == var.getTypeModifier());
+ for (int j = 0; j < fVertexInputs.count(); ++j) {
+ const GrShaderVar& attr = fVertexInputs[j];
+ // if attribute already added, don't add it again
+ if (attr.getName().equals(var.getName())) {
+ return;
+ }
+ }
+ fVertexInputs.push_back(var);
+}
+
+void GrGLSLVaryingHandler::setNoPerspective() {
+ const GrShaderCaps& caps = *fProgramBuilder->shaderCaps();
+ if (!caps.noperspectiveInterpolationSupport()) {
+ return;
+ }
+ if (const char* extension = caps.noperspectiveInterpolationExtensionString()) {
+ int bit = 1 << GrGLSLFragmentBuilder::kNoPerspectiveInterpolation_GLSLPrivateFeature;
+ fProgramBuilder->fVS.addFeature(bit, extension);
+ if (fProgramBuilder->primitiveProcessor().willUseGeoShader()) {
+ fProgramBuilder->fGS.addFeature(bit, extension);
+ }
+ fProgramBuilder->fFS.addFeature(bit, extension);
+ }
+ fDefaultInterpolationModifier = "noperspective";
+}
+
+void GrGLSLVaryingHandler::finalize() {
+ for (int i = 0; i < fVaryings.count(); ++i) {
+ const VaryingInfo& v = this->fVaryings[i];
+ const char* modifier = v.fIsFlat ? "flat" : fDefaultInterpolationModifier;
+ if (v.fVisibility & kVertex_GrShaderFlag) {
+ fVertexOutputs.push_back().set(v.fType, v.fVsOut, GrShaderVar::kOut_TypeModifier,
+ nullptr, modifier);
+ if (v.fVisibility & kGeometry_GrShaderFlag) {
+ fGeomInputs.push_back().set(v.fType, v.fVsOut, GrShaderVar::kUnsizedArray,
+ GrShaderVar::kIn_TypeModifier, nullptr, modifier);
+ }
+ }
+ if (v.fVisibility & kFragment_GrShaderFlag) {
+ const char* fsIn = v.fVsOut.c_str();
+ if (v.fVisibility & kGeometry_GrShaderFlag) {
+ fGeomOutputs.push_back().set(v.fType, v.fGsOut, GrShaderVar::kOut_TypeModifier,
+ nullptr, modifier);
+ fsIn = v.fGsOut.c_str();
+ }
+ fFragInputs.push_back().set(v.fType, fsIn, GrShaderVar::kIn_TypeModifier, nullptr,
+ modifier);
+ }
+ }
+ this->onFinalize();
+}
+
+void GrGLSLVaryingHandler::appendDecls(const VarArray& vars, SkString* out) const {
+ for (int i = 0; i < vars.count(); ++i) {
+ vars[i].appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";");
+ }
+}
+
+void GrGLSLVaryingHandler::getVertexDecls(SkString* inputDecls, SkString* outputDecls) const {
+ this->appendDecls(fVertexInputs, inputDecls);
+ this->appendDecls(fVertexOutputs, outputDecls);
+}
+
+void GrGLSLVaryingHandler::getGeomDecls(SkString* inputDecls, SkString* outputDecls) const {
+ this->appendDecls(fGeomInputs, inputDecls);
+ this->appendDecls(fGeomOutputs, outputDecls);
+}
+
+void GrGLSLVaryingHandler::getFragDecls(SkString* inputDecls, SkString* outputDecls) const {
+ // We should not have any outputs in the fragment shader when using version 1.10
+ SkASSERT(k110_GrGLSLGeneration != fProgramBuilder->shaderCaps()->generation() ||
+ fFragOutputs.empty());
+ this->appendDecls(fFragInputs, inputDecls);
+ this->appendDecls(fFragOutputs, outputDecls);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h
new file mode 100644
index 0000000000..3294e72a90
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLVarying_DEFINED
+#define GrGLSLVarying_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+class GrGLSLProgramBuilder;
+
+#ifdef SK_DEBUG
+static bool is_matrix(GrSLType type) {
+ switch (type) {
+ case kFloat2x2_GrSLType:
+ case kFloat3x3_GrSLType:
+ case kFloat4x4_GrSLType:
+ case kHalf2x2_GrSLType:
+ case kHalf3x3_GrSLType:
+ case kHalf4x4_GrSLType:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+class GrGLSLVarying {
+public:
+ enum class Scope {
+ kVertToFrag,
+ kVertToGeo,
+ kGeoToFrag
+ };
+
+ GrGLSLVarying() = default;
+ GrGLSLVarying(GrSLType type, Scope scope = Scope::kVertToFrag)
+ : fType(type)
+ , fScope(scope) {
+ // Metal doesn't support varying matrices, so we disallow them everywhere for consistency
+ SkASSERT(!is_matrix(type));
+ }
+
+ void reset(GrSLType type, Scope scope = Scope::kVertToFrag) {
+ // Metal doesn't support varying matrices, so we disallow them everywhere for consistency
+ SkASSERT(!is_matrix(type));
+ *this = GrGLSLVarying();
+ fType = type;
+ fScope = scope;
+ }
+
+ GrSLType type() const { return fType; }
+ Scope scope() const { return fScope; }
+ bool isInVertexShader() const { return Scope::kGeoToFrag != fScope; }
+ bool isInFragmentShader() const { return Scope::kVertToGeo != fScope; }
+
+ const char* vsOut() const { SkASSERT(this->isInVertexShader()); return fVsOut; }
+ const char* gsIn() const { return fGsIn; }
+ const char* gsOut() const { return fGsOut; }
+ const char* fsIn() const { SkASSERT(this->isInFragmentShader()); return fFsIn; }
+
+private:
+ GrSLType fType = kVoid_GrSLType;
+ Scope fScope = Scope::kVertToFrag;
+ const char* fVsOut = nullptr;
+ const char* fGsIn = nullptr;
+ const char* fGsOut = nullptr;
+ const char* fFsIn = nullptr;
+
+ friend class GrGLSLVaryingHandler;
+};
+
+static const int kVaryingsPerBlock = 8;
+
+class GrGLSLVaryingHandler {
+public:
+ explicit GrGLSLVaryingHandler(GrGLSLProgramBuilder* program)
+ : fVaryings(kVaryingsPerBlock)
+ , fVertexInputs(kVaryingsPerBlock)
+ , fVertexOutputs(kVaryingsPerBlock)
+ , fGeomInputs(kVaryingsPerBlock)
+ , fGeomOutputs(kVaryingsPerBlock)
+ , fFragInputs(kVaryingsPerBlock)
+ , fFragOutputs(kVaryingsPerBlock)
+ , fProgramBuilder(program)
+ , fDefaultInterpolationModifier(nullptr) {}
+
+ virtual ~GrGLSLVaryingHandler() {}
+
+ /*
+ * Notifies the varying handler that this shader will never emit geometry in perspective and
+ * therefore does not require perspective-correct interpolation. When supported, this allows
+ * varyings to use the "noperspective" keyword, which means the GPU can use cheaper math for
+ * interpolation.
+ */
+ void setNoPerspective();
+
+ enum class Interpolation {
+ kInterpolated,
+ kCanBeFlat, // Use "flat" if it will be faster.
+ kMustBeFlat // Use "flat" even if it is known to be slow.
+ };
+
+ /*
+ * addVarying allows fine grained control for setting up varyings between stages. Calling this
+ * function will make sure all necessary decls are setup for the client. The client however is
+ * responsible for setting up all shader code (e.g "vOut = vIn;") If you just need to take an
+ * attribute and pass it through to an output value in a fragment shader, use
+ * addPassThroughAttribute.
+ * TODO convert most uses of addVarying to addPassThroughAttribute
+ */
+ void addVarying(const char* name, GrGLSLVarying* varying,
+ Interpolation = Interpolation::kInterpolated);
+
+ /*
+ * The GP can use these calls to pass an attribute through all shaders directly to 'output' in
+ * the fragment shader. Though these calls affect both the vertex shader and fragment shader,
+ * they expect 'output' to be defined in the fragment shader before the call is made. If there
+ * is a geometry shader, we will simply take the value of the varying from the first vertex and
+ * that will be set as the output varying for all emitted vertices.
+ * TODO it might be nicer behavior to have a flag to declare output inside these calls
+ */
+ void addPassThroughAttribute(const GrGeometryProcessor::Attribute&, const char* output,
+ Interpolation = Interpolation::kInterpolated);
+
+ void emitAttributes(const GrGeometryProcessor& gp);
+
+ // This should be called once all attributes and varyings have been added to the
+ // GrGLSLVaryingHanlder and before getting/adding any of the declarations to the shaders.
+ void finalize();
+
+ void getVertexDecls(SkString* inputDecls, SkString* outputDecls) const;
+ void getGeomDecls(SkString* inputDecls, SkString* outputDecls) const;
+ void getFragDecls(SkString* inputDecls, SkString* outputDecls) const;
+
+protected:
+ struct VaryingInfo {
+ GrSLType fType;
+ bool fIsFlat;
+ SkString fVsOut;
+ SkString fGsOut;
+ GrShaderFlags fVisibility;
+ };
+
+ typedef GrTAllocator<VaryingInfo> VaryingList;
+ typedef GrTAllocator<GrShaderVar> VarArray;
+ typedef GrGLSLProgramDataManager::VaryingHandle VaryingHandle;
+
+ VaryingList fVaryings;
+ VarArray fVertexInputs;
+ VarArray fVertexOutputs;
+ VarArray fGeomInputs;
+ VarArray fGeomOutputs;
+ VarArray fFragInputs;
+ VarArray fFragOutputs;
+
+ // This is not owned by the class
+ GrGLSLProgramBuilder* fProgramBuilder;
+
+private:
+ void addAttribute(const GrShaderVar& var);
+
+ virtual void onFinalize() = 0;
+
+ // helper function for get*Decls
+ void appendDecls(const VarArray& vars, SkString* out) const;
+
+ const char* fDefaultInterpolationModifier;
+
+ friend class GrGLSLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.cpp
new file mode 100644
index 0000000000..ccec08db8b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+void GrGLSLVertexGeoBuilder::emitNormalizedSkPosition(SkString* out, const char* devPos,
+ const char* rtAdjustName,
+ GrSLType devPosType) {
+ if (this->getProgramBuilder()->snapVerticesToPixelCenters()) {
+ if (kFloat3_GrSLType == devPosType) {
+ const char* p = devPos;
+ out->appendf("{float2 _posTmp = float2(%s.x/%s.z, %s.y/%s.z);", p, p, p, p);
+ } else {
+ SkASSERT(kFloat2_GrSLType == devPosType);
+ out->appendf("{float2 _posTmp = %s;", devPos);
+ }
+ out->appendf("_posTmp = floor(_posTmp) + half2(0.5, 0.5);"
+ "sk_Position = float4(_posTmp, 0, 1);}");
+ } else if (kFloat3_GrSLType == devPosType) {
+ out->appendf("sk_Position = float4(%s.x , %s.y, 0, %s.z);",
+ devPos, devPos, devPos);
+ } else {
+ SkASSERT(kFloat2_GrSLType == devPosType);
+ out->appendf("sk_Position = float4(%s.x , %s.y, 0, 1);",
+ devPos, devPos);
+ }
+}
+
+void GrGLSLVertexBuilder::onFinalize() {
+ // We could have the GrGeometryProcessor do this, but its just easier to have it performed
+ // here. If we ever need to set variable pointsize, then we can reinvestigate.
+ if (this->getProgramBuilder()->hasPointSize()) {
+ this->codeAppend("sk_PointSize = 1.0;");
+ }
+ fProgramBuilder->varyingHandler()->getVertexDecls(&this->inputs(), &this->outputs());
+}
+
+static const char* input_type_name(GrGLSLGeometryBuilder::InputType in) {
+ using InputType = GrGLSLGeometryBuilder::InputType;
+ switch (in) {
+ case InputType::kPoints: return "points";
+ case InputType::kLines: return "lines";
+ case InputType::kTriangles: return "triangles";
+ }
+ SK_ABORT("invalid input type");
+}
+
+static const char* output_type_name(GrGLSLGeometryBuilder::OutputType out) {
+ using OutputType = GrGLSLGeometryBuilder::OutputType;
+ switch (out) {
+ case OutputType::kPoints: return "points";
+ case OutputType::kLineStrip: return "line_strip";
+ case OutputType::kTriangleStrip: return "triangle_strip";
+ }
+ SK_ABORT("invalid output type");
+}
+
+void GrGLSLGeometryBuilder::configure(InputType inputType, OutputType outputType, int maxVertices,
+ int numInvocations) {
+ SkASSERT(!this->isConfigured());
+ fNumInvocations = numInvocations;
+ this->addLayoutQualifier(input_type_name(inputType), kIn_InterfaceQualifier);
+ this->addLayoutQualifier(SkStringPrintf("invocations = %i", numInvocations).c_str(),
+ kIn_InterfaceQualifier);
+ this->addLayoutQualifier(output_type_name(outputType), kOut_InterfaceQualifier);
+ this->addLayoutQualifier(SkStringPrintf("max_vertices = %i", maxVertices).c_str(),
+ kOut_InterfaceQualifier);
+}
+
+void GrGLSLGeometryBuilder::emitVertex(SkString* out, const char* devPos, const char* rtAdjustName,
+ GrSLType devPosType) {
+ this->emitNormalizedSkPosition(out, devPos, rtAdjustName, devPosType);
+ out->append("EmitVertex();");
+}
+
+void GrGLSLGeometryBuilder::endPrimitive() {
+ this->codeAppend("EndPrimitive();");
+}
+
+void GrGLSLGeometryBuilder::onFinalize() {
+ SkASSERT(this->isConfigured());
+ fProgramBuilder->varyingHandler()->getGeomDecls(&this->inputs(), &this->outputs());
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.h
new file mode 100644
index 0000000000..914dbfa837
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexGeoBuilder.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLVertexGeoBuilder_DEFINED
+#define GrGLSLVertexGeoBuilder_DEFINED
+
+#include "src/gpu/glsl/GrGLSLShaderBuilder.h"
+
+/**
+ * Base class for vertex and geometry shader builders. This is the stage that computes input
+ * geometry for the rasterizer.
+ */
+class GrGLSLVertexGeoBuilder : public GrGLSLShaderBuilder {
+protected:
+ GrGLSLVertexGeoBuilder(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ void emitNormalizedSkPosition(const char* devPos, const char* rtAdjustName,
+ GrSLType devPosType = GrSLType::kFloat2_GrSLType) {
+ this->emitNormalizedSkPosition(&this->code(), devPos, rtAdjustName, devPosType);
+ }
+
+ void emitNormalizedSkPosition(SkString* out, const char* devPos, const char* rtAdjustName,
+ GrSLType devPosType = GrSLType::kFloat2_GrSLType);
+
+ friend class GrGLSLGeometryProcessor;
+
+ typedef GrGLSLShaderBuilder INHERITED;
+};
+
+
+class GrGLSLVertexBuilder : public GrGLSLVertexGeoBuilder {
+public:
+ GrGLSLVertexBuilder(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+private:
+ void onFinalize() override;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLVertexGeoBuilder INHERITED;
+};
+
+
+class GrGLSLGeometryBuilder : public GrGLSLVertexGeoBuilder {
+public:
+ GrGLSLGeometryBuilder(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ enum class InputType {
+ kPoints,
+ kLines,
+ kTriangles,
+ };
+
+ enum class OutputType {
+ kPoints,
+ kLineStrip,
+ kTriangleStrip
+ };
+
+ void configure(InputType, OutputType, int maxVertices, int numInvocations = 1);
+ bool isConfigured() const { return fNumInvocations; }
+
+ void emitVertex(const char* devPos, const char* rtAdjustName,
+ GrSLType devPosType = GrSLType::kFloat2_GrSLType) {
+ this->emitVertex(&this->code(), devPos, rtAdjustName, devPosType);
+ }
+ void emitVertex(SkString* out, const char* devPos, const char* rtAdjustName,
+ GrSLType devPosType = GrSLType::kFloat2_GrSLType);
+
+ void endPrimitive();
+
+private:
+ void onFinalize() override;
+
+ int fNumInvocations = 0;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLVertexGeoBuilder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp
new file mode 100644
index 0000000000..7a04f4bfd2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrXferProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+// This is only called for cases where we are doing LCD coverage and not using in shader blending.
+// For these cases we assume the the src alpha is 1, thus we can just use the max for the alpha
+// coverage since src alpha will always be greater than or equal to dst alpha.
+static void adjust_for_lcd_coverage(GrGLSLXPFragmentBuilder* fragBuilder,
+ const char* srcCoverage,
+ const GrXferProcessor& proc) {
+ if (srcCoverage && proc.isLCD()) {
+ fragBuilder->codeAppendf("%s.a = max(max(%s.r, %s.g), %s.b);",
+ srcCoverage, srcCoverage, srcCoverage, srcCoverage);
+ }
+}
+
+
+void GrGLSLXferProcessor::emitCode(const EmitArgs& args) {
+ if (!args.fXP.willReadDstColor()) {
+ adjust_for_lcd_coverage(args.fXPFragBuilder, args.fInputCoverage, args.fXP);
+ this->emitOutputsForBlendState(args);
+ } else {
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char* dstColor = fragBuilder->dstColor();
+
+ bool needsLocalOutColor = false;
+
+ if (args.fDstTextureSamplerHandle.isValid()) {
+ bool flipY = kBottomLeft_GrSurfaceOrigin == args.fDstTextureOrigin;
+
+ if (args.fInputCoverage) {
+ // We don't think any shaders actually output negative coverage, but just as a
+ // safety check for floating point precision errors we compare with <= here. We just
+ // check the rgb values of the coverage since the alpha may not have been set when
+ // using lcd. If we are using single channel coverage alpha will equal to rgb
+ // anyways.
+ //
+ // The discard here also helps for batching text draws together which need to read
+ // from a dst copy for blends. Though this only helps the case where the outer
+ // bounding boxes of each letter overlap and not two actually parts of the text.
+ fragBuilder->codeAppendf("if (all(lessThanEqual(%s.rgb, half3(0)))) {"
+ " discard;"
+ "}", args.fInputCoverage);
+ }
+
+ const char* dstTopLeftName;
+ const char* dstCoordScaleName;
+
+ fDstTopLeftUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf2_GrSLType,
+ "DstTextureUpperLeft",
+ &dstTopLeftName);
+ fDstScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf2_GrSLType,
+ "DstTextureCoordScale",
+ &dstCoordScaleName);
+
+ fragBuilder->codeAppend("// Read color from copy of the destination.\n");
+ fragBuilder->codeAppendf("half2 _dstTexCoord = (half2(sk_FragCoord.xy) - %s) * %s;",
+ dstTopLeftName, dstCoordScaleName);
+
+ if (flipY) {
+ fragBuilder->codeAppend("_dstTexCoord.y = 1.0 - _dstTexCoord.y;");
+ }
+
+ fragBuilder->codeAppendf("half4 %s = ", dstColor);
+ fragBuilder->appendTextureLookup(args.fDstTextureSamplerHandle, "_dstTexCoord",
+ kHalf2_GrSLType);
+ fragBuilder->codeAppend(";");
+ } else {
+ needsLocalOutColor = args.fShaderCaps->requiresLocalOutputColorForFBFetch();
+ }
+
+ const char* outColor = "_localColorOut";
+ if (!needsLocalOutColor) {
+ outColor = args.fOutputPrimary;
+ } else {
+ fragBuilder->codeAppendf("half4 %s;", outColor);
+ }
+
+ this->emitBlendCodeForDstRead(fragBuilder,
+ uniformHandler,
+ args.fInputColor,
+ args.fInputCoverage,
+ dstColor,
+ outColor,
+ args.fOutputSecondary,
+ args.fXP);
+ if (needsLocalOutColor) {
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputPrimary, outColor);
+ }
+ }
+
+ // Swizzle the fragment shader outputs if necessary.
+ this->emitOutputSwizzle(
+ args.fXPFragBuilder, args.fOutputSwizzle, args.fOutputPrimary, args.fOutputSecondary);
+}
+
+void GrGLSLXferProcessor::emitOutputSwizzle(
+ GrGLSLXPFragmentBuilder* x, const GrSwizzle& swizzle, const char* outColor,
+ const char* outColorSecondary) const {
+ if (GrSwizzle::RGBA() != swizzle) {
+ x->codeAppendf("%s = %s.%s;", outColor, outColor, swizzle.c_str());
+ if (outColorSecondary) {
+ x->codeAppendf("%s = %s.%s;", outColorSecondary, outColorSecondary, swizzle.c_str());
+ }
+ }
+}
+
+void GrGLSLXferProcessor::setData(const GrGLSLProgramDataManager& pdm, const GrXferProcessor& xp,
+ const GrTexture* dstTexture, const SkIPoint& dstTextureOffset) {
+ if (dstTexture) {
+ if (fDstTopLeftUni.isValid()) {
+ pdm.set2f(fDstTopLeftUni, static_cast<float>(dstTextureOffset.fX),
+ static_cast<float>(dstTextureOffset.fY));
+ pdm.set2f(fDstScaleUni, 1.f / dstTexture->width(), 1.f / dstTexture->height());
+ } else {
+ SkASSERT(!fDstScaleUni.isValid());
+ }
+ } else {
+ SkASSERT(!fDstTopLeftUni.isValid());
+ SkASSERT(!fDstScaleUni.isValid());
+ }
+ this->onSetData(pdm, xp);
+}
+
+void GrGLSLXferProcessor::DefaultCoverageModulation(GrGLSLXPFragmentBuilder* fragBuilder,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) {
+ if (proc.dstReadUsesMixedSamples()) {
+ if (srcCoverage) {
+ // TODO: Once we are no longer using legacy mesh ops, it will not be possible to even
+ // create a mixed sample with lcd so we can uncomment the below assert. In practice
+ // today this never happens except for GLPrograms test which can make one. skia:6661
+ // SkASSERT(!proc.isLCD());
+ fragBuilder->codeAppendf("%s *= %s;", outColor, srcCoverage);
+ fragBuilder->codeAppendf("%s = %s;", outColorSecondary, srcCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = half4(1.0);", outColorSecondary);
+ }
+ } else if (srcCoverage) {
+ if (proc.isLCD()) {
+ fragBuilder->codeAppendf("half lerpRed = mix(%s.a, %s.a, %s.r);",
+ dstColor, outColor, srcCoverage);
+ fragBuilder->codeAppendf("half lerpBlue = mix(%s.a, %s.a, %s.g);",
+ dstColor, outColor, srcCoverage);
+ fragBuilder->codeAppendf("half lerpGreen = mix(%s.a, %s.a, %s.b);",
+ dstColor, outColor, srcCoverage);
+ }
+ fragBuilder->codeAppendf("%s = %s * %s + (half4(1.0) - %s) * %s;",
+ outColor, srcCoverage, outColor, srcCoverage, dstColor);
+ if (proc.isLCD()) {
+ fragBuilder->codeAppendf("%s.a = max(max(lerpRed, lerpBlue), lerpGreen);", outColor);
+ }
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h
new file mode 100644
index 0000000000..7c935a1769
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLXferProcessor_DEFINED
+#define GrGLSLXferProcessor_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+class GrXferProcessor;
+class GrGLSLXPBuilder;
+class GrGLSLXPFragmentBuilder;
+class GrShaderCaps;
+class GrTexture;
+
+class GrGLSLXferProcessor {
+public:
+ GrGLSLXferProcessor() {}
+ virtual ~GrGLSLXferProcessor() {}
+
+ using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
+
+ struct EmitArgs {
+ EmitArgs(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderCaps* caps,
+ const GrXferProcessor& xp,
+ const char* inputColor,
+ const char* inputCoverage,
+ const char* outputPrimary,
+ const char* outputSecondary,
+ const SamplerHandle dstTextureSamplerHandle,
+ GrSurfaceOrigin dstTextureOrigin,
+ const GrSwizzle& outputSwizzle)
+ : fXPFragBuilder(fragBuilder)
+ , fUniformHandler(uniformHandler)
+ , fShaderCaps(caps)
+ , fXP(xp)
+ , fInputColor(inputColor ? inputColor : "half4(1.0)")
+ , fInputCoverage(inputCoverage)
+ , fOutputPrimary(outputPrimary)
+ , fOutputSecondary(outputSecondary)
+ , fDstTextureSamplerHandle(dstTextureSamplerHandle)
+ , fDstTextureOrigin(dstTextureOrigin)
+ , fOutputSwizzle(outputSwizzle) {
+ }
+ GrGLSLXPFragmentBuilder* fXPFragBuilder;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrShaderCaps* fShaderCaps;
+ const GrXferProcessor& fXP;
+ const char* fInputColor;
+ const char* fInputCoverage;
+ const char* fOutputPrimary;
+ const char* fOutputSecondary;
+ const SamplerHandle fDstTextureSamplerHandle;
+ GrSurfaceOrigin fDstTextureOrigin;
+ GrSwizzle fOutputSwizzle;
+ };
+ /**
+ * This is similar to emitCode() in the base class, except it takes a full shader builder.
+ * This allows the effect subclass to emit vertex code.
+ */
+ void emitCode(const EmitArgs&);
+
+ /** A GrGLSLXferProcessor instance can be reused with any GrGLSLXferProcessor that produces
+ the same stage key; this function reads data from a GrGLSLXferProcessor and uploads any
+ uniform variables required by the shaders created in emitCode(). The GrXferProcessor
+ parameter is guaranteed to be of the same type that created this GrGLSLXferProcessor and
+ to have an identical processor key as the one that created this GrGLSLXferProcessor. This
+ function calls onSetData on the subclass of GrGLSLXferProcessor
+ */
+ void setData(const GrGLSLProgramDataManager& pdm, const GrXferProcessor& xp,
+ const GrTexture* dstTexture, const SkIPoint& dstTextureOffset);
+
+protected:
+ static void DefaultCoverageModulation(GrGLSLXPFragmentBuilder* fragBuilder,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc);
+
+private:
+ /**
+ * Called by emitCode() when the XP will not be performing a dst read. This method is
+ * responsible for both blending and coverage. A subclass only needs to implement this method if
+ * it can construct a GrXferProcessor that will not read the dst color.
+ */
+ virtual void emitOutputsForBlendState(const EmitArgs&) {
+ SK_ABORT("emitOutputsForBlendState not implemented.");
+ }
+
+ /**
+ * Called by emitCode() when the XP will perform a dst read. This method only needs to supply
+ * the blending logic. The base class applies coverage. A subclass only needs to implement this
+ * method if it can construct a GrXferProcessor that reads the dst color.
+ */
+ virtual void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder*,
+ GrGLSLUniformHandler*,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor&) {
+ SK_ABORT("emitBlendCodeForDstRead not implemented.");
+ }
+
+ virtual void emitOutputSwizzle(GrGLSLXPFragmentBuilder*,
+ const GrSwizzle&,
+ const char* outColor,
+ const char* outColorSecondary) const;
+
+ virtual void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) = 0;
+
+ GrGLSLProgramDataManager::UniformHandle fDstTopLeftUni;
+ GrGLSLProgramDataManager::UniformHandle fDstScaleUni;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gpu_workaround_list.txt b/gfx/skia/skia/src/gpu/gpu_workaround_list.txt
new file mode 100644
index 0000000000..b58ce5d94b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gpu_workaround_list.txt
@@ -0,0 +1,17 @@
+add_and_true_to_loop_condition
+disable_blend_equation_advanced
+disable_discard_framebuffer
+disable_texture_storage
+disallow_large_instanced_draw
+emulate_abs_int_function
+flush_on_framebuffer_change
+gl_clear_broken
+max_fragment_uniform_vectors_32
+max_msaa_sample_count_4
+max_texture_size_limit_4096
+pack_parameters_workaround_with_pack_buffer
+remove_pow_with_constant_exponent
+restore_scissor_on_fbo_change
+rewrite_do_while_loops
+unbind_attachments_on_bound_render_fbo_delete
+unfold_short_circuit_as_ternary_operation
diff --git a/gfx/skia/skia/src/gpu/gradients/GrClampedGradientEffect.fp b/gfx/skia/skia/src/gpu/gradients/GrClampedGradientEffect.fp
new file mode 100644
index 0000000000..69d52f50e0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrClampedGradientEffect.fp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This master effect implements clamping on the layout coordinate and requires specifying the
+// border colors that are used when outside the clamped boundary. Gradients with the
+// SkShader::kClamp_TileMode should use the colors at their first and last stop (after adding dummy
+// stops for t=0,t=1) as the border color. This will automatically replicate the edge color, even if
+// when there is a hard stop.
+//
+// The SkShader::kDecal_TileMode can be produced by specifying transparent black as the border
+// colors, regardless of the gradient's stop colors.
+
+in fragmentProcessor colorizer;
+in fragmentProcessor gradLayout;
+
+layout(ctype=SkPMColor4f, tracked) in uniform half4 leftBorderColor; // t < 0.0
+layout(ctype=SkPMColor4f, tracked) in uniform half4 rightBorderColor; // t > 1.0
+
+layout(key) in bool makePremul;
+// Trust the creator that this matches the color spec of the gradient
+in bool colorsAreOpaque;
+
+void main() {
+ half4 t = sample(gradLayout);
+ // If t.x is below 0, use the left border color without invoking the child processor. If any t.x
+ // is above 1, use the right border color. Otherwise, t is in the [0, 1] range assumed by the
+ // colorizer FP, so delegate to the child processor.
+ if (!gradLayout.preservesOpaqueInput && t.y < 0) {
+ // layout has rejected this fragment (rely on sksl to remove this branch if the layout FP
+ // preserves opacity is false)
+ sk_OutColor = half4(0);
+ } else if (t.x < 0) {
+ sk_OutColor = leftBorderColor;
+ } else if (t.x > 1.0) {
+ sk_OutColor = rightBorderColor;
+ } else {
+ sk_OutColor = sample(colorizer, t);
+ }
+
+ @if(makePremul) {
+ sk_OutColor.xyz *= sk_OutColor.w;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+// If the layout does not preserve opacity, remove the opaque optimization,
+// but otherwise respect the provided color opacity state (which should take
+// into account the opacity of the border colors).
+@optimizationFlags {
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ (colorsAreOpaque && gradLayout->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags)
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrDualIntervalGradientColorizer.fp b/gfx/skia/skia/src/gpu/gradients/GrDualIntervalGradientColorizer.fp
new file mode 100644
index 0000000000..602b075903
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrDualIntervalGradientColorizer.fp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Models two intervals (so 4 colors), that are connected at a specific threshold point.
+
+// Bias and scale for 0 to threshold
+layout(ctype=SkPMColor4f, tracked) in uniform float4 scale01;
+layout(ctype=SkPMColor4f, tracked) in uniform float4 bias01;
+
+// Bias and scale for threshold to 1
+layout(ctype=SkPMColor4f, tracked) in uniform float4 scale23;
+layout(ctype=SkPMColor4f, tracked) in uniform float4 bias23;
+
+layout(tracked) in uniform half threshold;
+
+void main() {
+ half t = sk_InColor.x;
+
+ float4 scale, bias;
+ if (t < threshold) {
+ scale = scale01;
+ bias = bias01;
+ } else {
+ scale = scale23;
+ bias = bias23;
+ }
+
+ sk_OutColor = half4(t * scale + bias);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkPMColor4f& c0, const SkPMColor4f& c1,
+ const SkPMColor4f& c2, const SkPMColor4f& c3,
+ float threshold);
+}
+
+@cppEnd {
+ std::unique_ptr<GrFragmentProcessor> GrDualIntervalGradientColorizer::Make(
+ const SkPMColor4f& c0, const SkPMColor4f& c1, const SkPMColor4f& c2, const SkPMColor4f& c3, float threshold) {
+ // Derive scale and biases from the 4 colors and threshold
+ auto vc0 = Sk4f::Load(c0.vec());
+ auto vc1 = Sk4f::Load(c1.vec());
+ auto scale01 = (vc1 - vc0) / threshold;
+ // bias01 = c0
+
+ auto vc2 = Sk4f::Load(c2.vec());
+ auto vc3 = Sk4f::Load(c3.vec());
+ auto scale23 = (vc3 - vc2) / (1 - threshold);
+ auto bias23 = vc2 - threshold * scale23;
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrDualIntervalGradientColorizer(
+ { scale01[0], scale01[1], scale01[2], scale01[3] }, c0,
+ { scale23[0], scale23[1], scale23[2], scale23[3] },
+ { bias23[0], bias23[1], bias23[2], bias23[3] }, threshold));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.cpp b/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.cpp
new file mode 100644
index 0000000000..fdbc0fcc11
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/gpu/gradients/GrGradientBitmapCache.h"
+
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTemplates.h"
+
+#include <functional>
+
+struct GrGradientBitmapCache::Entry {
+ Entry* fPrev;
+ Entry* fNext;
+
+ void* fBuffer;
+ size_t fSize;
+ SkBitmap fBitmap;
+
+ Entry(const void* buffer, size_t size, const SkBitmap& bm)
+ : fPrev(nullptr),
+ fNext(nullptr),
+ fBitmap(bm) {
+ fBuffer = sk_malloc_throw(size);
+ fSize = size;
+ memcpy(fBuffer, buffer, size);
+ }
+
+ ~Entry() { sk_free(fBuffer); }
+
+ bool equals(const void* buffer, size_t size) const {
+ return (fSize == size) && !memcmp(fBuffer, buffer, size);
+ }
+};
+
+GrGradientBitmapCache::GrGradientBitmapCache(int max, int res)
+ : fMaxEntries(max)
+ , fResolution(res) {
+ fEntryCount = 0;
+ fHead = fTail = nullptr;
+
+ this->validate();
+}
+
+GrGradientBitmapCache::~GrGradientBitmapCache() {
+ this->validate();
+
+ Entry* entry = fHead;
+ while (entry) {
+ Entry* next = entry->fNext;
+ delete entry;
+ entry = next;
+ }
+}
+
+GrGradientBitmapCache::Entry* GrGradientBitmapCache::release(Entry* entry) const {
+ if (entry->fPrev) {
+ SkASSERT(fHead != entry);
+ entry->fPrev->fNext = entry->fNext;
+ } else {
+ SkASSERT(fHead == entry);
+ fHead = entry->fNext;
+ }
+ if (entry->fNext) {
+ SkASSERT(fTail != entry);
+ entry->fNext->fPrev = entry->fPrev;
+ } else {
+ SkASSERT(fTail == entry);
+ fTail = entry->fPrev;
+ }
+ return entry;
+}
+
+void GrGradientBitmapCache::attachToHead(Entry* entry) const {
+ entry->fPrev = nullptr;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ } else {
+ fTail = entry;
+ }
+ fHead = entry;
+}
+
+bool GrGradientBitmapCache::find(const void* buffer, size_t size, SkBitmap* bm) const {
+ AutoValidate av(this);
+
+ Entry* entry = fHead;
+ while (entry) {
+ if (entry->equals(buffer, size)) {
+ if (bm) {
+ *bm = entry->fBitmap;
+ }
+ // move to the head of our list, so we purge it last
+ this->release(entry);
+ this->attachToHead(entry);
+ return true;
+ }
+ entry = entry->fNext;
+ }
+ return false;
+}
+
+void GrGradientBitmapCache::add(const void* buffer, size_t len, const SkBitmap& bm) {
+ AutoValidate av(this);
+
+ if (fEntryCount == fMaxEntries) {
+ SkASSERT(fTail);
+ delete this->release(fTail);
+ fEntryCount -= 1;
+ }
+
+ Entry* entry = new Entry(buffer, len, bm);
+ this->attachToHead(entry);
+ fEntryCount += 1;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+void GrGradientBitmapCache::fillGradient(const SkPMColor4f* colors, const SkScalar* positions,
+ int count, SkColorType colorType, SkBitmap* bitmap) {
+ SkHalf* pixelsF16 = reinterpret_cast<SkHalf*>(bitmap->getPixels());
+ uint32_t* pixels32 = reinterpret_cast<uint32_t*>(bitmap->getPixels());
+
+ typedef std::function<void(const Sk4f&, int)> pixelWriteFn_t;
+
+ pixelWriteFn_t writeF16Pixel = [&](const Sk4f& x, int index) {
+ Sk4h c = SkFloatToHalf_finite_ftz(x);
+ pixelsF16[4*index+0] = c[0];
+ pixelsF16[4*index+1] = c[1];
+ pixelsF16[4*index+2] = c[2];
+ pixelsF16[4*index+3] = c[3];
+ };
+ pixelWriteFn_t write8888Pixel = [&](const Sk4f& c, int index) {
+ pixels32[index] = Sk4f_toL32(c);
+ };
+
+ pixelWriteFn_t writePixel =
+ (colorType == kRGBA_F16_SkColorType) ? writeF16Pixel : write8888Pixel;
+
+ int prevIndex = 0;
+ for (int i = 1; i < count; i++) {
+ // Historically, stops have been mapped to [0, 256], with 256 then nudged to the next
+ // smaller value, then truncate for the texture index. This seems to produce the best
+ // results for some common distributions, so we preserve the behavior.
+ int nextIndex = SkTMin(positions[i] * fResolution,
+ SkIntToScalar(fResolution - 1));
+
+ if (nextIndex > prevIndex) {
+ Sk4f c0 = Sk4f::Load(colors[i - 1].vec()),
+ c1 = Sk4f::Load(colors[i ].vec());
+
+ Sk4f step = Sk4f(1.0f / static_cast<float>(nextIndex - prevIndex));
+ Sk4f delta = (c1 - c0) * step;
+
+ for (int curIndex = prevIndex; curIndex <= nextIndex; ++curIndex) {
+ writePixel(c0, curIndex);
+ c0 += delta;
+ }
+ }
+ prevIndex = nextIndex;
+ }
+ SkASSERT(prevIndex == fResolution - 1);
+}
+
+void GrGradientBitmapCache::getGradient(const SkPMColor4f* colors, const SkScalar* positions,
+ int count, SkColorType colorType, SkAlphaType alphaType, SkBitmap* bitmap) {
+ // build our key: [numColors + colors[] + positions[] + alphaType + colorType ]
+ static_assert(sizeof(SkPMColor4f) % sizeof(int32_t) == 0, "");
+ const int colorsAsIntCount = count * sizeof(SkPMColor4f) / sizeof(int32_t);
+ int keyCount = 1 + colorsAsIntCount + 1 + 1;
+ if (count > 2) {
+ keyCount += count - 1;
+ }
+
+ SkAutoSTMalloc<64, int32_t> storage(keyCount);
+ int32_t* buffer = storage.get();
+
+ *buffer++ = count;
+ memcpy(buffer, colors, count * sizeof(SkPMColor4f));
+ buffer += colorsAsIntCount;
+ if (count > 2) {
+ for (int i = 1; i < count; i++) {
+ *buffer++ = SkFloat2Bits(positions[i]);
+ }
+ }
+ *buffer++ = static_cast<int32_t>(alphaType);
+ *buffer++ = static_cast<int32_t>(colorType);
+ SkASSERT(buffer - storage.get() == keyCount);
+
+ ///////////////////////////////////
+
+ // acquire lock for checking/adding to cache
+ SkAutoMutexExclusive ama(fMutex);
+ size_t size = keyCount * sizeof(int32_t);
+ if (!this->find(storage.get(), size, bitmap)) {
+ SkImageInfo info = SkImageInfo::Make(fResolution, 1, colorType, alphaType);
+ bitmap->allocPixels(info);
+ GrGradientBitmapCache::fillGradient(colors, positions, count, colorType, bitmap);
+ bitmap->setImmutable();
+ this->add(storage.get(), size, *bitmap);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void GrGradientBitmapCache::validate() const {
+ SkASSERT(fEntryCount >= 0 && fEntryCount <= fMaxEntries);
+
+ if (fEntryCount > 0) {
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(nullptr == fTail->fNext);
+
+ if (fEntryCount == 1) {
+ SkASSERT(fHead == fTail);
+ } else {
+ SkASSERT(fHead != fTail);
+ }
+
+ Entry* entry = fHead;
+ int count = 0;
+ while (entry) {
+ count += 1;
+ entry = entry->fNext;
+ }
+ SkASSERT(count == fEntryCount);
+
+ entry = fTail;
+ while (entry) {
+ count -= 1;
+ entry = entry->fPrev;
+ }
+ SkASSERT(0 == count);
+ } else {
+ SkASSERT(nullptr == fHead);
+ SkASSERT(nullptr == fTail);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.h b/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.h
new file mode 100644
index 0000000000..d4c8cef1f4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrGradientBitmapCache.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGradientBitmapCache_DEFINED
+#define GrGradientBitmapCache_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkNoncopyable.h"
+
+class GrGradientBitmapCache : SkNoncopyable {
+public:
+ GrGradientBitmapCache(int maxEntries, int resolution);
+ ~GrGradientBitmapCache();
+
+ // Assumes colors are compatible with the specified alphaType (e.g. if it's premul then colors
+ // are already premultiplied). Thread safe.
+ void getGradient(const SkPMColor4f* colors, const SkScalar* positions, int count,
+ SkColorType colorType, SkAlphaType alphaType, SkBitmap* bitmap);
+
+private:
+ SkMutex fMutex;
+
+ int fEntryCount;
+ const int fMaxEntries;
+ const int fResolution;
+
+ struct Entry;
+ mutable Entry* fHead;
+ mutable Entry* fTail;
+
+ inline Entry* release(Entry*) const;
+ inline void attachToHead(Entry*) const;
+
+ bool find(const void* buffer, size_t len, SkBitmap*) const;
+ void add(const void* buffer, size_t len, const SkBitmap&);
+
+ void fillGradient(const SkPMColor4f* colors, const SkScalar* positions, int count,
+ SkColorType colorType, SkBitmap* bitmap);
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate : SkNoncopyable {
+ public:
+ AutoValidate(const GrGradientBitmapCache* bc) : fBC(bc) { bc->validate(); }
+ ~AutoValidate() { fBC->validate(); }
+ private:
+ const GrGradientBitmapCache* fBC;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/GrGradientShader.cpp b/gfx/skia/skia/src/gpu/gradients/GrGradientShader.cpp
new file mode 100644
index 0000000000..a7946522a3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrGradientShader.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/gradients/GrGradientShader.h"
+
+#include "src/gpu/gradients/generated/GrClampedGradientEffect.h"
+#include "src/gpu/gradients/generated/GrTiledGradientEffect.h"
+
+#include "src/gpu/gradients/generated/GrLinearGradientLayout.h"
+#include "src/gpu/gradients/generated/GrRadialGradientLayout.h"
+#include "src/gpu/gradients/generated/GrSweepGradientLayout.h"
+#include "src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.h"
+
+#include "src/gpu/gradients/GrGradientBitmapCache.h"
+#include "src/gpu/gradients/generated/GrDualIntervalGradientColorizer.h"
+#include "src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.h"
+#include "src/gpu/gradients/generated/GrTextureGradientColorizer.h"
+#include "src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+
+// Intervals smaller than this (that aren't hard stops) on low-precision-only devices force us to
+// use the textured gradient
+static const SkScalar kLowPrecisionIntervalLimit = 0.01f;
+
+// Each cache entry costs 1K or 2K of RAM. Each bitmap will be 1x256 at either 32bpp or 64bpp.
+static const int kMaxNumCachedGradientBitmaps = 32;
+static const int kGradientTextureSize = 256;
+
+// NOTE: signature takes raw pointers to the color/pos arrays and a count to make it easy for
+// MakeColorizer to transparently take care of hard stops at the end points of the gradient.
+static std::unique_ptr<GrFragmentProcessor> make_textured_colorizer(const SkPMColor4f* colors,
+ const SkScalar* positions, int count, bool premul, const GrFPArgs& args) {
+ static GrGradientBitmapCache gCache(kMaxNumCachedGradientBitmaps, kGradientTextureSize);
+
+ // Use 8888 or F16, depending on the destination config.
+ // TODO: Use 1010102 for opaque gradients, at least if destination is 1010102?
+ SkColorType colorType = kRGBA_8888_SkColorType;
+ if (GrColorTypeIsWiderThan(args.fDstColorInfo->colorType(), 8)) {
+ auto f16Format = args.fContext->priv().caps()->getDefaultBackendFormat(
+ GrColorType::kRGBA_F16, GrRenderable::kNo);
+ if (f16Format.isValid()) {
+ colorType = kRGBA_F16_SkColorType;
+ }
+ }
+ SkAlphaType alphaType = premul ? kPremul_SkAlphaType : kUnpremul_SkAlphaType;
+
+ SkBitmap bitmap;
+ gCache.getGradient(colors, positions, count, colorType, alphaType, &bitmap);
+ SkASSERT(1 == bitmap.height() && SkIsPow2(bitmap.width()));
+ SkASSERT(bitmap.isImmutable());
+
+ sk_sp<GrTextureProxy> proxy = GrMakeCachedBitmapProxy(
+ args.fContext->priv().proxyProvider(), bitmap);
+ if (proxy == nullptr) {
+ SkDebugf("Gradient won't draw. Could not create texture.");
+ return nullptr;
+ }
+
+ return GrTextureGradientColorizer::Make(std::move(proxy));
+}
+
+// Analyze the shader's color stops and positions and chooses an appropriate colorizer to represent
+// the gradient.
+static std::unique_ptr<GrFragmentProcessor> make_colorizer(const SkPMColor4f* colors,
+ const SkScalar* positions, int count, bool premul, const GrFPArgs& args) {
+ // If there are hard stops at the beginning or end, the first and/or last color should be
+ // ignored by the colorizer since it should only be used in a clamped border color. By detecting
+ // and removing these stops at the beginning, it makes optimizing the remaining color stops
+ // simpler.
+
+ // SkGradientShaderBase guarantees that pos[0] == 0 by adding a dummy
+ bool bottomHardStop = SkScalarNearlyEqual(positions[0], positions[1]);
+ // The same is true for pos[end] == 1
+ bool topHardStop = SkScalarNearlyEqual(positions[count - 2], positions[count - 1]);
+
+ int offset = 0;
+ if (bottomHardStop) {
+ offset += 1;
+ count--;
+ }
+ if (topHardStop) {
+ count--;
+ }
+
+ // Two remaining colors means a single interval from 0 to 1
+ // (but it may have originally been a 3 or 4 color gradient with 1-2 hard stops at the ends)
+ if (count == 2) {
+ return GrSingleIntervalGradientColorizer::Make(colors[offset], colors[offset + 1]);
+ }
+
+ // Do an early test for the texture fallback to skip all of the other tests for specific
+ // analytic support of the gradient (and compatibility with the hardware), when it's definitely
+ // impossible to use an analytic solution.
+ bool tryAnalyticColorizer = count <= GrUnrolledBinaryGradientColorizer::kMaxColorCount;
+
+ // The remaining analytic colorizers use scale*t+bias, and the scale/bias values can become
+ // quite large when thresholds are close (but still outside the hardstop limit). If float isn't
+ // 32-bit, output can be incorrect if the thresholds are too close together. However, the
+ // analytic shaders are higher quality, so they can be used with lower precision hardware when
+ // the thresholds are not ill-conditioned.
+ const GrShaderCaps* caps = args.fContext->priv().caps()->shaderCaps();
+ if (!caps->floatIs32Bits() && tryAnalyticColorizer) {
+ // Could run into problems, check if thresholds are close together (with a limit of .01, so
+ // that scales will be less than 100, which leaves 4 decimals of precision on 16-bit).
+ for (int i = offset; i < count - 1; i++) {
+ SkScalar dt = SkScalarAbs(positions[i] - positions[i + 1]);
+ if (dt <= kLowPrecisionIntervalLimit && dt > SK_ScalarNearlyZero) {
+ tryAnalyticColorizer = false;
+ break;
+ }
+ }
+ }
+
+ if (tryAnalyticColorizer) {
+ if (count == 3) {
+ // Must be a dual interval gradient, where the middle point is at offset+1 and the two
+ // intervals share the middle color stop.
+ return GrDualIntervalGradientColorizer::Make(colors[offset], colors[offset + 1],
+ colors[offset + 1], colors[offset + 2],
+ positions[offset + 1]);
+ } else if (count == 4 && SkScalarNearlyEqual(positions[offset + 1],
+ positions[offset + 2])) {
+ // Two separate intervals that join at the same threshold position
+ return GrDualIntervalGradientColorizer::Make(colors[offset], colors[offset + 1],
+ colors[offset + 2], colors[offset + 3],
+ positions[offset + 1]);
+ }
+
+ // The single and dual intervals are a specialized case of the unrolled binary search
+ // colorizer which can analytically render gradients of up to 8 intervals (up to 9 or 16
+ // colors depending on how many hard stops are inserted).
+ std::unique_ptr<GrFragmentProcessor> unrolled = GrUnrolledBinaryGradientColorizer::Make(
+ colors + offset, positions + offset, count);
+ if (unrolled) {
+ return unrolled;
+ }
+ }
+
+ // Otherwise fall back to a rasterized gradient sampled by a texture, which can handle
+ // arbitrary gradients (the only downside being sampling resolution).
+ return make_textured_colorizer(colors + offset, positions + offset, count, premul, args);
+}
+
+// Combines the colorizer and layout with an appropriately configured master effect based on the
+// gradient's tile mode
+static std::unique_ptr<GrFragmentProcessor> make_gradient(const SkGradientShaderBase& shader,
+ const GrFPArgs& args, std::unique_ptr<GrFragmentProcessor> layout) {
+ // No shader is possible if a layout couldn't be created, e.g. a layout-specific Make() returned
+ // null.
+ if (layout == nullptr) {
+ return nullptr;
+ }
+
+ // Convert all colors into destination space and into SkPMColor4fs, and handle
+ // premul issues depending on the interpolation mode
+ bool inputPremul = shader.getGradFlags() & SkGradientShader::kInterpolateColorsInPremul_Flag;
+ bool allOpaque = true;
+ SkAutoSTMalloc<4, SkPMColor4f> colors(shader.fColorCount);
+ SkColor4fXformer xformedColors(shader.fOrigColors4f, shader.fColorCount,
+ shader.fColorSpace.get(), args.fDstColorInfo->colorSpace());
+ for (int i = 0; i < shader.fColorCount; i++) {
+ const SkColor4f& upmColor = xformedColors.fColors[i];
+ colors[i] = inputPremul ? upmColor.premul()
+ : SkPMColor4f{ upmColor.fR, upmColor.fG, upmColor.fB, upmColor.fA };
+ if (allOpaque && !SkScalarNearlyEqual(colors[i].fA, 1.0)) {
+ allOpaque = false;
+ }
+ }
+
+ // SkGradientShader stores positions implicitly when they are evenly spaced, but the getPos()
+ // implementation performs a branch for every position index. Since the shader conversion
+ // requires lots of position tests, calculate all of the positions up front if needed.
+ SkTArray<SkScalar, true> implicitPos;
+ SkScalar* positions;
+ if (shader.fOrigPos) {
+ positions = shader.fOrigPos;
+ } else {
+ implicitPos.reserve(shader.fColorCount);
+ SkScalar posScale = SK_Scalar1 / (shader.fColorCount - 1);
+ for (int i = 0 ; i < shader.fColorCount; i++) {
+ implicitPos.push_back(SkIntToScalar(i) * posScale);
+ }
+ positions = implicitPos.begin();
+ }
+
+ // All gradients are colorized the same way, regardless of layout
+ std::unique_ptr<GrFragmentProcessor> colorizer = make_colorizer(
+ colors.get(), positions, shader.fColorCount, inputPremul, args);
+ if (colorizer == nullptr) {
+ return nullptr;
+ }
+
+ // The master effect has to export premul colors, but under certain conditions it doesn't need
+ // to do anything to achieve that: i.e. its interpolating already premul colors (inputPremul)
+ // or all the colors have a = 1, in which case premul is a no op. Note that this allOpaque
+ // check is more permissive than SkGradientShaderBase's isOpaque(), since we can optimize away
+ // the make-premul op for two point conical gradients (which report false for isOpaque).
+ bool makePremul = !inputPremul && !allOpaque;
+
+ // All tile modes are supported (unless something was added to SkShader)
+ std::unique_ptr<GrFragmentProcessor> master;
+ switch(shader.getTileMode()) {
+ case SkTileMode::kRepeat:
+ master = GrTiledGradientEffect::Make(std::move(colorizer), std::move(layout),
+ /* mirror */ false, makePremul, allOpaque);
+ break;
+ case SkTileMode::kMirror:
+ master = GrTiledGradientEffect::Make(std::move(colorizer), std::move(layout),
+ /* mirror */ true, makePremul, allOpaque);
+ break;
+ case SkTileMode::kClamp:
+ // For the clamped mode, the border colors are the first and last colors, corresponding
+ // to t=0 and t=1, because SkGradientShaderBase enforces that by adding color stops as
+ // appropriate. If there is a hard stop, this grabs the expected outer colors for the
+ // border.
+ master = GrClampedGradientEffect::Make(std::move(colorizer), std::move(layout),
+ colors[0], colors[shader.fColorCount - 1], makePremul, allOpaque);
+ break;
+ case SkTileMode::kDecal:
+ // Even if the gradient colors are opaque, the decal borders are transparent so
+ // disable that optimization
+ master = GrClampedGradientEffect::Make(std::move(colorizer), std::move(layout),
+ SK_PMColor4fTRANSPARENT, SK_PMColor4fTRANSPARENT,
+ makePremul, /* colorsAreOpaque */ false);
+ break;
+ }
+
+ if (master == nullptr) {
+ // Unexpected tile mode
+ return nullptr;
+ }
+ if (args.fInputColorIsOpaque) {
+ return GrFragmentProcessor::OverrideInput(std::move(master), SK_PMColor4fWHITE, false);
+ }
+ return GrFragmentProcessor::MulChildByInputAlpha(std::move(master));
+}
+
+namespace GrGradientShader {
+
+std::unique_ptr<GrFragmentProcessor> MakeLinear(const SkLinearGradient& shader,
+ const GrFPArgs& args) {
+ return make_gradient(shader, args, GrLinearGradientLayout::Make(shader, args));
+}
+
+std::unique_ptr<GrFragmentProcessor> MakeRadial(const SkRadialGradient& shader,
+ const GrFPArgs& args) {
+ return make_gradient(shader,args, GrRadialGradientLayout::Make(shader, args));
+}
+
+std::unique_ptr<GrFragmentProcessor> MakeSweep(const SkSweepGradient& shader,
+ const GrFPArgs& args) {
+ return make_gradient(shader,args, GrSweepGradientLayout::Make(shader, args));
+}
+
+std::unique_ptr<GrFragmentProcessor> MakeConical(const SkTwoPointConicalGradient& shader,
+ const GrFPArgs& args) {
+ return make_gradient(shader, args, GrTwoPointConicalGradientLayout::Make(shader, args));
+}
+
+#if GR_TEST_UTILS
+RandomParams::RandomParams(SkRandom* random) {
+ // Set color count to min of 2 so that we don't trigger the const color optimization and make
+ // a non-gradient processor.
+ fColorCount = random->nextRangeU(2, kMaxRandomGradientColors);
+ fUseColors4f = random->nextBool();
+
+ // if one color, omit stops, otherwise randomly decide whether or not to
+ if (fColorCount == 1 || (fColorCount >= 2 && random->nextBool())) {
+ fStops = nullptr;
+ } else {
+ fStops = fStopStorage;
+ }
+
+ // if using SkColor4f, attach a random (possibly null) color space (with linear gamma)
+ if (fUseColors4f) {
+ fColorSpace = GrTest::TestColorSpace(random);
+ }
+
+ SkScalar stop = 0.f;
+ for (int i = 0; i < fColorCount; ++i) {
+ if (fUseColors4f) {
+ fColors4f[i].fR = random->nextUScalar1();
+ fColors4f[i].fG = random->nextUScalar1();
+ fColors4f[i].fB = random->nextUScalar1();
+ fColors4f[i].fA = random->nextUScalar1();
+ } else {
+ fColors[i] = random->nextU();
+ }
+ if (fStops) {
+ fStops[i] = stop;
+ stop = i < fColorCount - 1 ? stop + random->nextUScalar1() * (1.f - stop) : 1.f;
+ }
+ }
+ fTileMode = static_cast<SkTileMode>(random->nextULessThan(kSkTileModeCount));
+}
+#endif
+
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrGradientShader.h b/gfx/skia/skia/src/gpu/gradients/GrGradientShader.h
new file mode 100644
index 0000000000..c8710e7bf4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrGradientShader.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGradientShader_DEFINE
+#define GrGradientShader_DEFINE
+
+#include "src/gpu/GrFPArgs.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+#include "src/shaders/gradients/SkLinearGradient.h"
+#include "src/shaders/gradients/SkRadialGradient.h"
+#include "src/shaders/gradients/SkSweepGradient.h"
+#include "src/shaders/gradients/SkTwoPointConicalGradient.h"
+
+#if GR_TEST_UTILS
+#include "include/utils/SkRandom.h"
+#endif
+
+namespace GrGradientShader {
+ std::unique_ptr<GrFragmentProcessor> MakeLinear(const SkLinearGradient& shader,
+ const GrFPArgs& args);
+
+ std::unique_ptr<GrFragmentProcessor> MakeRadial(const SkRadialGradient& shader,
+ const GrFPArgs& args);
+
+ std::unique_ptr<GrFragmentProcessor> MakeSweep(const SkSweepGradient& shader,
+ const GrFPArgs& args);
+
+ std::unique_ptr<GrFragmentProcessor> MakeConical(const SkTwoPointConicalGradient& shader,
+ const GrFPArgs& args);
+
+#if GR_TEST_UTILS
+ /** Helper struct that stores (and populates) parameters to construct a random gradient.
+ If fUseColors4f is true, then the SkColor4f factory should be called, with fColors4f and
+ fColorSpace. Otherwise, the SkColor factory should be called, with fColors. fColorCount
+ will be the number of color stops in either case, and fColors and fStops can be passed to
+ the gradient factory. (The constructor may decide not to use stops, in which case fStops
+ will be nullptr). */
+ struct RandomParams {
+ static constexpr int kMaxRandomGradientColors = 5;
+
+ // Should be of similar magnitude to the draw area of the tests so that the gradient
+ // sampling is done at an appropriate scale.
+ static constexpr SkScalar kGradientScale = 256.0f;
+
+ RandomParams(SkRandom* r);
+
+ bool fUseColors4f;
+ SkColor fColors[kMaxRandomGradientColors];
+ SkColor4f fColors4f[kMaxRandomGradientColors];
+ sk_sp<SkColorSpace> fColorSpace;
+ SkScalar fStopStorage[kMaxRandomGradientColors];
+ SkTileMode fTileMode;
+ int fColorCount;
+ SkScalar* fStops;
+ };
+#endif
+
+}
+
+#endif // GrGradientShader_DEFINE
diff --git a/gfx/skia/skia/src/gpu/gradients/GrLinearGradientLayout.fp b/gfx/skia/skia/src/gpu/gradients/GrLinearGradientLayout.fp
new file mode 100644
index 0000000000..1b66dc9d6f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrLinearGradientLayout.fp
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in half4x4 gradientMatrix;
+
+@coordTransform {
+ gradientMatrix
+}
+
+void main() {
+ // We add a tiny delta to t. When gradient stops are set up so that a hard stop in a vertically
+ // or horizontally oriented gradient falls exactly at a column or row of pixel centers we can
+ // we can get slightly different interpolated t values along the column/row. By adding the delta
+ // we will consistently get the color to the "right" of the stop. Of course if the hard stop
+ // falls at X.5 - delta then we still could get inconsistent results, but that is much less
+ // likely. crbug.com/938592
+ // If/when we add filtering of the gradient this can be removed.
+ half t = half(sk_TransformedCoords2D[0].x) + 0.00001;
+ sk_OutColor = half4(t, 1, 0, 0); // y = 1 for always valid
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@header {
+ #include "src/gpu/gradients/GrGradientShader.h"
+ #include "src/shaders/gradients/SkLinearGradient.h"
+}
+
+// The linear gradient never rejects a pixel so it doesn't change opacity
+@optimizationFlags {
+ kPreservesOpaqueInput_OptimizationFlag
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkLinearGradient& gradient,
+ const GrFPArgs& args);
+}
+
+@cppEnd {
+ std::unique_ptr<GrFragmentProcessor> GrLinearGradientLayout::Make(
+ const SkLinearGradient& grad, const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(new GrLinearGradientLayout(matrix));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@test(d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkPoint points[] = {{d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)},
+ {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)}};
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f ?
+ SkGradientShader::MakeLinear(points, params.fColors4f, params.fColorSpace, params.fStops,
+ params.fColorCount, params.fTileMode) :
+ SkGradientShader::MakeLinear(points, params.fColors, params.fStops,
+ params.fColorCount, params.fTileMode);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+ GrAlwaysAssert(fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrRadialGradientLayout.fp b/gfx/skia/skia/src/gpu/gradients/GrRadialGradientLayout.fp
new file mode 100644
index 0000000000..47b53cabd9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrRadialGradientLayout.fp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in half4x4 gradientMatrix;
+
+@coordTransform {
+ gradientMatrix
+}
+
+void main() {
+ half t = half(length(sk_TransformedCoords2D[0]));
+ sk_OutColor = half4(t, 1, 0, 0); // y = 1 for always valid
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@header {
+ #include "src/gpu/gradients/GrGradientShader.h"
+ #include "src/shaders/gradients/SkRadialGradient.h"
+}
+
+// The radial gradient never rejects a pixel so it doesn't change opacity
+@optimizationFlags {
+ kPreservesOpaqueInput_OptimizationFlag
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkRadialGradient& gradient,
+ const GrFPArgs& args);
+}
+
+@cppEnd {
+ std::unique_ptr<GrFragmentProcessor> GrRadialGradientLayout::Make(
+ const SkRadialGradient& grad, const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(new GrRadialGradientLayout(matrix));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@test(d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ std::unique_ptr<GrFragmentProcessor> fp;
+ GrTest::TestAsFPArgs asFPArgs(d);
+ do {
+ GrGradientShader::RandomParams params(d->fRandom);
+ SkPoint center = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkScalar radius = d->fRandom->nextRangeScalar(0.0f, scale);
+ sk_sp<SkShader> shader = params.fUseColors4f
+ ? SkGradientShader::MakeRadial(center, radius, params.fColors4f,
+ params.fColorSpace, params.fStops,
+ params.fColorCount, params.fTileMode)
+ : SkGradientShader::MakeRadial(center, radius, params.fColors,
+ params.fStops, params.fColorCount,
+ params.fTileMode);
+ // Degenerate params can create an Empty (non-null) shader, where fp will be nullptr
+ fp = shader ? as_SB(shader)->asFragmentProcessor(asFPArgs.args()) : nullptr;
+ } while (!fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrSingleIntervalGradientColorizer.fp b/gfx/skia/skia/src/gpu/gradients/GrSingleIntervalGradientColorizer.fp
new file mode 100644
index 0000000000..621bd9731b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrSingleIntervalGradientColorizer.fp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This only supports a 2-color single interval so it is a simple linear interpolation between the
+// two end points based on t. But it serves as a good test for connecting all of the plumbing into a
+// functional gradient shader.
+
+layout(ctype=SkPMColor4f, tracked) in uniform half4 start;
+layout(ctype=SkPMColor4f, tracked) in uniform half4 end;
+
+void main() {
+ half t = sk_InColor.x;
+
+ // Clamping and/or wrapping was already handled by the parent shader so the output color is a
+ // simple lerp.
+ sk_OutColor = (1 - t) * start + t * end;
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrSweepGradientLayout.fp b/gfx/skia/skia/src/gpu/gradients/GrSweepGradientLayout.fp
new file mode 100644
index 0000000000..09c1e6098b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrSweepGradientLayout.fp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+in half4x4 gradientMatrix;
+
+layout(tracked) in uniform half bias;
+layout(tracked) in uniform half scale;
+
+@coordTransform {
+ gradientMatrix
+}
+
+void main() {
+ // On some devices they incorrectly implement atan2(y,x) as atan(y/x). In actuality it is
+ // atan2(y,x) = 2 * atan(y / (sqrt(x^2 + y^2) + x)). So to work around this we pass in (sqrt(x^2
+ // + y^2) + x) as the second parameter to atan2 in these cases. We let the device handle the
+ // undefined behavior of the second paramenter being 0 instead of doing the divide ourselves and
+ // using atan instead.
+ half angle;
+ if (sk_Caps.atan2ImplementedAsAtanYOverX) {
+ angle = half(2 * atan(-sk_TransformedCoords2D[0].y,
+ length(sk_TransformedCoords2D[0]) - sk_TransformedCoords2D[0].x));
+ } else {
+ angle = half(atan(-sk_TransformedCoords2D[0].y, -sk_TransformedCoords2D[0].x));
+ }
+
+ // 0.1591549430918 is 1/(2*pi), used since atan returns values [-pi, pi]
+ half t = (angle * 0.1591549430918 + 0.5 + bias) * scale;
+ sk_OutColor = half4(t, 1, 0, 0); // y = 1 for always valid
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@header {
+ #include "src/gpu/gradients/GrGradientShader.h"
+ #include "src/shaders/gradients/SkSweepGradient.h"
+}
+
+// The sweep gradient never rejects a pixel so it doesn't change opacity
+@optimizationFlags {
+ kPreservesOpaqueInput_OptimizationFlag
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkSweepGradient& gradient,
+ const GrFPArgs& args);
+}
+
+@cppEnd {
+ std::unique_ptr<GrFragmentProcessor> GrSweepGradientLayout::Make(
+ const SkSweepGradient& grad, const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(new GrSweepGradientLayout(
+ matrix, grad.getTBias(), grad.getTScale()));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@test(d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkPoint center = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f ?
+ SkGradientShader::MakeSweep(center.fX, center.fY, params.fColors4f, params.fColorSpace,
+ params.fStops, params.fColorCount) :
+ SkGradientShader::MakeSweep(center.fX, center.fY, params.fColors,
+ params.fStops, params.fColorCount);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+ GrAlwaysAssert(fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrTextureGradientColorizer.fp b/gfx/skia/skia/src/gpu/gradients/GrTextureGradientColorizer.fp
new file mode 100644
index 0000000000..c3265bcb3a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrTextureGradientColorizer.fp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Should have height = 1px, horizontal axis represents t = 0 to 1
+in uniform sampler2D gradient;
+
+@samplerParams(gradient) {
+ GrSamplerState::ClampBilerp()
+}
+
+void main() {
+ half2 coord = half2(sk_InColor.x, 0.5);
+ sk_OutColor = sample(gradient, coord);
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrTiledGradientEffect.fp b/gfx/skia/skia/src/gpu/gradients/GrTiledGradientEffect.fp
new file mode 100644
index 0000000000..aba5b6c74c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrTiledGradientEffect.fp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Provides tiling for the repeat or mirror modes.
+
+in fragmentProcessor colorizer;
+in fragmentProcessor gradLayout;
+
+layout(key) in bool mirror;
+layout(key) in bool makePremul;
+// Trust the creator that this matches the color spec of the gradient
+in bool colorsAreOpaque;
+
+void main() {
+ half4 t = sample(gradLayout);
+
+ if (!gradLayout.preservesOpaqueInput && t.y < 0) {
+ // layout has rejected this fragment (rely on sksl to remove this branch if the layout FP
+ // preserves opacity is false)
+ sk_OutColor = half4(0);
+ } else {
+ @if(mirror) {
+ half t_1 = t.x - 1;
+ half tiled_t = t_1 - 2 * floor(t_1 * 0.5) - 1;
+ if (sk_Caps.mustDoOpBetweenFloorAndAbs) {
+ // At this point the expected value of tiled_t should between -1 and 1, so this
+ // clamp has no effect other than to break up the floor and abs calls and make sure
+ // the compiler doesn't merge them back together.
+ tiled_t = clamp(tiled_t, -1, 1);
+ }
+ t.x = abs(tiled_t);
+ } else {
+ // Simple repeat mode
+ t.x = fract(t.x);
+ }
+
+ // t.x has been tiled (repeat or mirrored), but pass through remaining 3 components
+ // unmodified.
+ sk_OutColor = sample(colorizer, t);
+ }
+
+ @if (makePremul) {
+ sk_OutColor.xyz *= sk_OutColor.w;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+// If the layout does not preserve opacity, remove the opaque optimization,
+// but otherwise respect the provided color opacity state.
+@optimizationFlags {
+ kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ (colorsAreOpaque && gradLayout->preservesOpaqueInput() ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags)
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrTwoPointConicalGradientLayout.fp b/gfx/skia/skia/src/gpu/gradients/GrTwoPointConicalGradientLayout.fp
new file mode 100644
index 0000000000..319221359a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrTwoPointConicalGradientLayout.fp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Equivalent to SkTwoPointConicalGradient::Type
+enum class Type {
+ kRadial, kStrip, kFocal
+};
+
+in half4x4 gradientMatrix;
+
+layout(key) in Type type;
+layout(key) in bool isRadiusIncreasing;
+
+// Focal-specific optimizations
+layout(key) in bool isFocalOnCircle;
+layout(key) in bool isWellBehaved;
+layout(key) in bool isSwapped;
+layout(key) in bool isNativelyFocal;
+
+// focalParams is interpreted differently depending on if type is focal or degenerate when
+// degenerate, focalParams = (r0, r0^2), so strips will use .y and kRadial will use .x when focal,
+// focalParams = (1/r1, focalX = r0/(r0-r1)) The correct parameters are calculated once in Make for
+// each FP
+layout(tracked) in uniform half2 focalParams;
+
+@coordTransform {
+ gradientMatrix
+}
+
+void main() {
+ // p typed as a float2 is intentional; while a half2 is adequate for most normal cases in the
+ // two point conic gradient's coordinate system, when the gradient is composed with a local
+ // perspective matrix, certain out-of-bounds regions become ill behaved on mobile devices.
+ // On desktops, they are properly clamped after the fact, but on many Adreno GPUs the
+ // calculations of t and x_t below overflow and produce an incorrect interpolant (which then
+ // renders the wrong border color sporadically). Increasing precition alleviates that issue.
+ float2 p = sk_TransformedCoords2D[0];
+ float t = -1;
+ half v = 1; // validation flag, set to negative to discard fragment later
+
+ @switch(type) {
+ case Type::kStrip: {
+ half r0_2 = focalParams.y;
+ t = r0_2 - p.y * p.y;
+ if (t >= 0) {
+ t = p.x + sqrt(t);
+ } else {
+ v = -1;
+ }
+ }
+ break;
+ case Type::kRadial: {
+ half r0 = focalParams.x;
+ @if(isRadiusIncreasing) {
+ t = length(p) - r0;
+ } else {
+ t = -length(p) - r0;
+ }
+ }
+ break;
+ case Type::kFocal: {
+ half invR1 = focalParams.x;
+ half fx = focalParams.y;
+
+ float x_t = -1;
+ @if (isFocalOnCircle) {
+ x_t = dot(p, p) / p.x;
+ } else if (isWellBehaved) {
+ x_t = length(p) - p.x * invR1;
+ } else {
+ float temp = p.x * p.x - p.y * p.y;
+
+ // Only do sqrt if temp >= 0; this is significantly slower than checking temp >= 0
+ // in the if statement that checks r(t) >= 0. But GPU may break if we sqrt a
+ // negative float. (Although I havevn't observed that on any devices so far, and the
+ // old approach also does sqrt negative value without a check.) If the performance
+ // is really critical, maybe we should just compute the area where temp and x_t are
+ // always valid and drop all these ifs.
+ if (temp >= 0) {
+ @if(isSwapped || !isRadiusIncreasing) {
+ x_t = -sqrt(temp) - p.x * invR1;
+ } else {
+ x_t = sqrt(temp) - p.x * invR1;
+ }
+ }
+ }
+
+ // The final calculation of t from x_t has lots of static optimizations but only do them
+ // when x_t is positive (which can be assumed true if isWellBehaved is true)
+ @if (!isWellBehaved) {
+ // This will still calculate t even though it will be ignored later in the pipeline
+ // to avoid a branch
+ if (x_t <= 0.0) {
+ v = -1;
+ }
+ }
+ @if (isRadiusIncreasing) {
+ @if (isNativelyFocal) {
+ t = x_t;
+ } else {
+ t = x_t + fx;
+ }
+ } else {
+ @if (isNativelyFocal) {
+ t = -x_t;
+ } else {
+ t = -x_t + fx;
+ }
+ }
+
+ @if(isSwapped) {
+ t = 1 - t;
+ }
+ }
+ break;
+ }
+
+ sk_OutColor = half4(half(t), v, 0, 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@header {
+ #include "src/gpu/gradients/GrGradientShader.h"
+ #include "src/shaders/gradients/SkTwoPointConicalGradient.h"
+}
+
+// The 2 point conical gradient can reject a pixel so it does change opacity
+// even if the input was opaque, so disable that optimization
+@optimizationFlags {
+ kNone_OptimizationFlags
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkTwoPointConicalGradient& gradient,
+ const GrFPArgs& args);
+}
+
+@cppEnd {
+ // .fp files do not let you reference outside enum definitions, so we have to explicitly map
+ // between the two compatible enum defs
+ GrTwoPointConicalGradientLayout::Type convert_type(
+ SkTwoPointConicalGradient::Type type) {
+ switch(type) {
+ case SkTwoPointConicalGradient::Type::kRadial:
+ return GrTwoPointConicalGradientLayout::Type::kRadial;
+ case SkTwoPointConicalGradient::Type::kStrip:
+ return GrTwoPointConicalGradientLayout::Type::kStrip;
+ case SkTwoPointConicalGradient::Type::kFocal:
+ return GrTwoPointConicalGradientLayout::Type::kFocal;
+ }
+ SkDEBUGFAIL("Should not be reachable");
+ return GrTwoPointConicalGradientLayout::Type::kRadial;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> GrTwoPointConicalGradientLayout::Make(
+ const SkTwoPointConicalGradient& grad, const GrFPArgs& args) {
+ GrTwoPointConicalGradientLayout::Type grType = convert_type(grad.getType());
+
+ // The focalData struct is only valid if isFocal is true
+ const SkTwoPointConicalGradient::FocalData& focalData = grad.getFocalData();
+ bool isFocal = grType == Type::kFocal;
+
+ // Calculate optimization switches from gradient specification
+ bool isFocalOnCircle = isFocal && focalData.isFocalOnCircle();
+ bool isWellBehaved = isFocal && focalData.isWellBehaved();
+ bool isSwapped = isFocal && focalData.isSwapped();
+ bool isNativelyFocal = isFocal && focalData.isNativelyFocal();
+
+ // Type-specific calculations: isRadiusIncreasing, focalParams, and the gradient matrix.
+ // However, all types start with the total inverse local matrix calculated from the shader
+ // and args
+ bool isRadiusIncreasing;
+ SkPoint focalParams; // really just a 2D tuple
+ SkMatrix matrix;
+
+ // Initialize the base matrix
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+
+ if (isFocal) {
+ isRadiusIncreasing = (1 - focalData.fFocalX) > 0;
+
+ focalParams.set(1.0 / focalData.fR1, focalData.fFocalX);
+
+ matrix.postConcat(grad.getGradientMatrix());
+ } else if (grType == Type::kRadial) {
+ SkScalar dr = grad.getDiffRadius();
+ isRadiusIncreasing = dr >= 0;
+
+ SkScalar r0 = grad.getStartRadius() / dr;
+ focalParams.set(r0, r0 * r0);
+
+
+ // GPU radial matrix is different from the original matrix, since we map the diff radius
+ // to have |dr| = 1, so manually compute the final gradient matrix here.
+
+ // Map center to (0, 0)
+ matrix.postTranslate(-grad.getStartCenter().fX, -grad.getStartCenter().fY);
+
+ // scale |diffRadius| to 1
+ matrix.postScale(1 / dr, 1 / dr);
+ } else { // kStrip
+ isRadiusIncreasing = false; // kStrip doesn't use this flag
+
+ SkScalar r0 = grad.getStartRadius() / grad.getCenterX1();
+ focalParams.set(r0, r0 * r0);
+
+
+ matrix.postConcat(grad.getGradientMatrix());
+ }
+
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrTwoPointConicalGradientLayout(
+ matrix, grType, isRadiusIncreasing, isFocalOnCircle, isWellBehaved,
+ isSwapped, isNativelyFocal, focalParams));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@test(d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkScalar offset = scale / 32.0f;
+
+ SkPoint center1 = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkPoint center2 = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkScalar radius1 = d->fRandom->nextRangeScalar(0.0f, scale);
+ SkScalar radius2 = d->fRandom->nextRangeScalar(0.0f, scale);
+
+ constexpr int kTestTypeMask = (1 << 2) - 1,
+ kTestNativelyFocalBit = (1 << 2),
+ kTestFocalOnCircleBit = (1 << 3),
+ kTestSwappedBit = (1 << 4);
+ // We won't treat isWellDefined and isRadiusIncreasing specially because they
+ // should have high probability to be turned on and off as we're getting random
+ // radii and centers.
+
+ int mask = d->fRandom->nextU();
+ int type = mask & kTestTypeMask;
+ if (type == static_cast<int>(Type::kRadial)) {
+ center2 = center1;
+ // Make sure that the radii are different
+ if (SkScalarNearlyZero(radius1 - radius2)) {
+ radius2 += offset;
+ }
+ } else if (type == static_cast<int>(Type::kStrip)) {
+ radius1 = SkTMax(radius1, .1f); // Make sure that the radius is non-zero
+ radius2 = radius1;
+ // Make sure that the centers are different
+ if (SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ center2.fX += offset;
+ }
+ } else { // kFocal_Type
+ // Make sure that the centers are different
+ if (SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ center2.fX += offset;
+ }
+
+ if (kTestNativelyFocalBit & mask) {
+ radius1 = 0;
+ }
+ if (kTestFocalOnCircleBit & mask) {
+ radius2 = radius1 + SkPoint::Distance(center1, center2);
+ }
+ if (kTestSwappedBit & mask) {
+ std::swap(radius1, radius2);
+ radius2 = 0;
+ }
+
+ // Make sure that the radii are different
+ if (SkScalarNearlyZero(radius1 - radius2)) {
+ radius2 += offset;
+ }
+ }
+
+ if (SkScalarNearlyZero(radius1 - radius2) &&
+ SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ radius2 += offset; // make sure that we're not degenerated
+ }
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f ?
+ SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ params.fColors4f, params.fColorSpace, params.fStops,
+ params.fColorCount, params.fTileMode) :
+ SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ params.fColors, params.fStops,
+ params.fColorCount, params.fTileMode);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+
+ GrAlwaysAssert(fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/GrUnrolledBinaryGradientColorizer.fp b/gfx/skia/skia/src/gpu/gradients/GrUnrolledBinaryGradientColorizer.fp
new file mode 100644
index 0000000000..d5d7053498
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/GrUnrolledBinaryGradientColorizer.fp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Unrolled gradient code supporting up to 8 intervals that produces code
+// targeting a specific interval count.
+
+// Assumed to be between 1 and 8.
+layout(key) in int intervalCount;
+
+// With the current hardstop detection threshold of 0.00024, the maximum scale and bias values
+// will be on the order of 4k (since they divide by dt). That is well outside the precision
+// capabilities of half floats, which can lead to inaccurate gradient calculations
+layout(ctype=SkPMColor4f) in uniform float4 scale0_1;
+layout(ctype=SkPMColor4f, when=intervalCount > 1) in uniform float4 scale2_3;
+layout(ctype=SkPMColor4f, when=intervalCount > 2) in uniform float4 scale4_5;
+layout(ctype=SkPMColor4f, when=intervalCount > 3) in uniform float4 scale6_7;
+layout(ctype=SkPMColor4f, when=intervalCount > 4) in uniform float4 scale8_9;
+layout(ctype=SkPMColor4f, when=intervalCount > 5) in uniform float4 scale10_11;
+layout(ctype=SkPMColor4f, when=intervalCount > 6) in uniform float4 scale12_13;
+layout(ctype=SkPMColor4f, when=intervalCount > 7) in uniform float4 scale14_15;
+
+layout(ctype=SkPMColor4f) in uniform float4 bias0_1;
+layout(ctype=SkPMColor4f, when=intervalCount > 1) in uniform float4 bias2_3;
+layout(ctype=SkPMColor4f, when=intervalCount > 2) in uniform float4 bias4_5;
+layout(ctype=SkPMColor4f, when=intervalCount > 3) in uniform float4 bias6_7;
+layout(ctype=SkPMColor4f, when=intervalCount > 4) in uniform float4 bias8_9;
+layout(ctype=SkPMColor4f, when=intervalCount > 5) in uniform float4 bias10_11;
+layout(ctype=SkPMColor4f, when=intervalCount > 6) in uniform float4 bias12_13;
+layout(ctype=SkPMColor4f, when=intervalCount > 7) in uniform float4 bias14_15;
+
+// The 7 threshold positions that define the boundaries of the 8 intervals (excluding t = 0, and t =
+// 1) are packed into two half4's instead of having up to 7 separate scalar uniforms. For low
+// interval counts, the extra components are ignored in the shader, but the uniform simplification
+// is worth it. It is assumed thresholds are provided in increasing value, mapped as:
+// - thresholds1_7.x = boundary between (0,1) and (2,3) -> 1_2
+// - .y = boundary between (2,3) and (4,5) -> 3_4
+// - .z = boundary between (4,5) and (6,7) -> 5_6
+// - .w = boundary between (6,7) and (8,9) -> 7_8
+// - thresholds9_13.x = boundary between (8,9) and (10,11) -> 9_10
+// - .y = boundary between (10,11) and (12,13) -> 11_12
+// - .z = boundary between (12,13) and (14,15) -> 13_14
+// - .w = unused
+in uniform half4 thresholds1_7;
+in uniform half4 thresholds9_13;
+
+void main() {
+ half t = sk_InColor.x;
+
+ float4 scale, bias;
+ // Explicit binary search for the proper interval that t falls within. The interval count
+ // checks are converted into constant expressions in the C++ generated SkSL, which are then
+ // optimized to the minimal number of branches for the specific interval count.
+
+ // thresholds1_7.w is mid point for intervals (0,7) and (8,15)
+ if (intervalCount <= 4 || t < thresholds1_7.w) {
+ // thresholds1_7.y is mid point for intervals (0,3) and (4,7)
+ if (intervalCount <= 2 || t < thresholds1_7.y) {
+ // thresholds1_7.x is mid point for intervals (0,1) and (2,3)
+ if (intervalCount <= 1 || t < thresholds1_7.x) {
+ scale = scale0_1;
+ bias = bias0_1;
+ } else {
+ scale = scale2_3;
+ bias = bias2_3;
+ }
+ } else {
+ // thresholds1_7.z is mid point for intervals (4,5) and (6,7)
+ if (intervalCount <= 3 || t < thresholds1_7.z) {
+ scale = scale4_5;
+ bias = bias4_5;
+ } else {
+ scale = scale6_7;
+ bias = bias6_7;
+ }
+ }
+ } else {
+ // thresholds9_13.y is mid point for intervals (8,11) and (12,15)
+ if (intervalCount <= 6 || t < thresholds9_13.y) {
+ // thresholds9_13.x is mid point for intervals (8,9) and (10,11)
+ if (intervalCount <= 5 || t < thresholds9_13.x) {
+ // interval 8-9
+ scale = scale8_9;
+ bias = bias8_9;
+ } else {
+ // interval 10-11
+ scale = scale10_11;
+ bias = bias10_11;
+ }
+ } else {
+ // thresholds9_13.z is mid point for intervals (12,13) and (14,15)
+ if (intervalCount <= 7 || t < thresholds9_13.z) {
+ // interval 12-13
+ scale = scale12_13;
+ bias = bias12_13;
+ } else {
+ // interval 14-15
+ scale = scale14_15;
+ bias = bias14_15;
+ }
+ }
+ }
+
+ sk_OutColor = half4(t * scale + bias);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+@class {
+ static const int kMaxColorCount = 16;
+}
+
+@make {
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkPMColor4f* colors,
+ const SkScalar* positions,
+ int count);
+}
+
+@cppEnd {
+ static const int kMaxIntervals = 8;
+ std::unique_ptr<GrFragmentProcessor> GrUnrolledBinaryGradientColorizer::Make(
+ const SkPMColor4f* colors, const SkScalar* positions, int count) {
+ // Depending on how the positions resolve into hard stops or regular stops, the number of
+ // intervals specified by the number of colors/positions can change. For instance, a plain
+ // 3 color gradient is two intervals, but a 4 color gradient with a hard stop is also
+ // two intervals. At the most extreme end, an 8 interval gradient made entirely of hard
+ // stops has 16 colors.
+
+ if (count > kMaxColorCount) {
+ // Definitely cannot represent this gradient configuration
+ return nullptr;
+ }
+
+ // The raster implementation also uses scales and biases, but since they must be calculated
+ // after the dst color space is applied, it limits our ability to cache their values.
+ SkPMColor4f scales[kMaxIntervals];
+ SkPMColor4f biases[kMaxIntervals];
+ SkScalar thresholds[kMaxIntervals];
+
+ int intervalCount = 0;
+
+ for (int i = 0; i < count - 1; i++) {
+ if (intervalCount >= kMaxIntervals) {
+ // Already reached kMaxIntervals, and haven't run out of color stops so this
+ // gradient cannot be represented by this shader.
+ return nullptr;
+ }
+
+ SkScalar t0 = positions[i];
+ SkScalar t1 = positions[i + 1];
+ SkScalar dt = t1 - t0;
+ // If the interval is empty, skip to the next interval. This will automatically create
+ // distinct hard stop intervals as needed. It also protects against malformed gradients
+ // that have repeated hard stops at the very beginning that are effectively unreachable.
+ if (SkScalarNearlyZero(dt)) {
+ continue;
+ }
+
+ auto c0 = Sk4f::Load(colors[i].vec());
+ auto c1 = Sk4f::Load(colors[i + 1].vec());
+
+ auto scale = (c1 - c0) / dt;
+ auto bias = c0 - t0 * scale;
+
+ scale.store(scales + intervalCount);
+ bias.store(biases + intervalCount);
+ thresholds[intervalCount] = t1;
+ intervalCount++;
+ }
+
+ // For isEqual to make sense, set the unused values to something consistent
+ for (int i = intervalCount; i < kMaxIntervals; i++) {
+ scales[i] = SK_PMColor4fTRANSPARENT;
+ biases[i] = SK_PMColor4fTRANSPARENT;
+ thresholds[i] = 0.0;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrUnrolledBinaryGradientColorizer(
+ intervalCount, scales[0], scales[1], scales[2], scales[3], scales[4], scales[5],
+ scales[6], scales[7], biases[0], biases[1], biases[2], biases[3], biases[4],
+ biases[5], biases[6], biases[7],
+ SkRect::MakeLTRB(thresholds[0], thresholds[1], thresholds[2], thresholds[3]),
+ SkRect::MakeLTRB(thresholds[4], thresholds[5], thresholds[6], 0.0)));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/README.md b/gfx/skia/skia/src/gpu/gradients/README.md
new file mode 100644
index 0000000000..72ed05a2c9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/README.md
@@ -0,0 +1,71 @@
+Gradients on the GPU
+====================
+
+Gradients can be thought of, at a very high level, as three pieces:
+
+1. A color interpolator that is one dimensional, returning a color for an input
+ within the range [0.0, 1.0]. This obfuscates the the definition of specific
+ color stops and how to wrap, tile, or clamp out of bound inputs. A color
+ interpolator will be named GrYGradientColorizer
+2. A layout that converts from 2D geometry/position to the one dimensional
+ domain of the color interpolator. This is how a linear or radial gradient
+ distinguishes itself. When designing a new gradient, this is the component
+ that you have to implement. A layout will generally be named
+ GrXGradientLayout
+3. A master effect that composes the layout and color interpolator together. It
+ is also responsible for implementing the clamping behavior that can be
+ abstracted away from both the layout and colorization.
+
+
+GrClampedGradientEffect handles clamped and decal tile modes, while
+GrTiledGradientEffect implements repeat and mirror tile modes. The
+GrClampedGradientEffect requires border colors to be specified outside of its
+colorizer child, but these border colors may be defined by the gradient color
+stops. Both of these master effects delegate calculating a t interpolant to a
+child process, perform their respective tile mode operations, and possibly
+convert the tiled t value (guaranteed to be within 0 and 1) into an output
+color using their child colorizer process.
+
+Because of how child processors are currently defined, where they have a single
+half4 input and a single half4 output, their is a type mismatch between the 1D
+t value and the 4D inputs/outputs of the layout and colorizer processes. For
+now, the master effect assumes an untiled t is output in sk_OutColor.x by the
+layout and it tiles solely off of that value.
+
+However, layouts can output a negative value in the y component to invalidate
+the gradient location (currently on the two point conical gradient does this).
+When invalidated, the master effect outputs transparent black and does not
+invoke the child processor. Other than this condition, any value in y, z, or w
+are passed into the colorizer unmodified. The colorizer should assume that the
+valid tiled t value is in sk_InColor.x and can safely ignore y, z, and w.
+
+Currently there are color interpolators (colorizers) for analytic color cases
+(evaluated directly on the GPU) and sampling a generated texture map.
+
+GrGradientShader provides static factory functions to create
+GrFragmentProcessor graphs that reproduce a particular SkGradientShader.
+
+Optimization Flags
+==================
+
+At an abstract level, gradient shaders are compatible with coverage as alpha
+and, under certain conditions, preserve opacity when the inputs are opaque. To
+reduce the amount of duplicate code and boilerplate, these optimization
+decisions are implemented in the master effects and not in the colorizers. It
+is assumed that all colorizer FPs will be compatible with coverage as alpha and
+will preserve opacity if input colors are opaque. Since this is assumed by the
+master effects, they do not need to report these optimizations or check input
+opacity (this does mean if the colorizers are used independently from the
+master effect shader that the reported flags might not be optimal, but since
+that is unlikely, this convention really simplifies the colorizer
+implementations).
+
+Unlike colorizers, which do not need to report any optimization flags, layout
+FPs should report opacity preserving optimizations because they can impact the
+opacity of a pixel outside of how the gradient would otherwise color it.
+Layouts that potentially reject pixels (i.e. could output a negative y value)
+must not report kPreservesOpaqueInput_OptimizationFlag. Layouts that never
+reject a pixel should report kPreservesOpaqueInput_OptimizationFlag since the
+master effects can optimize away checking if the layout rejects a pixel.
+
+
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.cpp
new file mode 100644
index 0000000000..268fc6f9f8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrClampedGradientEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrClampedGradientEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLClampedGradientEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLClampedGradientEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrClampedGradientEffect& _outer = args.fFp.cast<GrClampedGradientEffect>();
+ (void)_outer;
+ auto leftBorderColor = _outer.leftBorderColor;
+ (void)leftBorderColor;
+ auto rightBorderColor = _outer.rightBorderColor;
+ (void)rightBorderColor;
+ auto makePremul = _outer.makePremul;
+ (void)makePremul;
+ auto colorsAreOpaque = _outer.colorsAreOpaque;
+ (void)colorsAreOpaque;
+ leftBorderColorVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType, "leftBorderColor");
+ rightBorderColorVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType, "rightBorderColor");
+ SkString _sample1099("_sample1099");
+ this->invokeChild(_outer.gradLayout_index, &_sample1099, args);
+ fragBuilder->codeAppendf(
+ "half4 t = %s;\nif (!%s && t.y < 0.0) {\n %s = half4(0.0);\n} else if (t.x < "
+ "0.0) {\n %s = %s;\n} else if (t.x > 1.0) {\n %s = %s;\n} else {",
+ _sample1099.c_str(),
+ (_outer.childProcessor(_outer.gradLayout_index).preservesOpaqueInput() ? "true"
+ : "false"),
+ args.fOutputColor, args.fOutputColor,
+ args.fUniformHandler->getUniformCStr(leftBorderColorVar), args.fOutputColor,
+ args.fUniformHandler->getUniformCStr(rightBorderColorVar));
+ SkString _input1767("t");
+ SkString _sample1767("_sample1767");
+ this->invokeChild(_outer.colorizer_index, _input1767.c_str(), &_sample1767, args);
+ fragBuilder->codeAppendf("\n %s = %s;\n}\n@if (%s) {\n %s.xyz *= %s.w;\n}\n",
+ args.fOutputColor, _sample1767.c_str(),
+ (_outer.makePremul ? "true" : "false"), args.fOutputColor,
+ args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrClampedGradientEffect& _outer = _proc.cast<GrClampedGradientEffect>();
+ {
+ const SkPMColor4f& leftBorderColorValue = _outer.leftBorderColor;
+ if (leftBorderColorPrev != leftBorderColorValue) {
+ leftBorderColorPrev = leftBorderColorValue;
+ pdman.set4fv(leftBorderColorVar, 1, leftBorderColorValue.vec());
+ }
+ const SkPMColor4f& rightBorderColorValue = _outer.rightBorderColor;
+ if (rightBorderColorPrev != rightBorderColorValue) {
+ rightBorderColorPrev = rightBorderColorValue;
+ pdman.set4fv(rightBorderColorVar, 1, rightBorderColorValue.vec());
+ }
+ }
+ }
+ SkPMColor4f leftBorderColorPrev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkPMColor4f rightBorderColorPrev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ UniformHandle leftBorderColorVar;
+ UniformHandle rightBorderColorVar;
+};
+GrGLSLFragmentProcessor* GrClampedGradientEffect::onCreateGLSLInstance() const {
+ return new GrGLSLClampedGradientEffect();
+}
+void GrClampedGradientEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)makePremul);
+}
+bool GrClampedGradientEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrClampedGradientEffect& that = other.cast<GrClampedGradientEffect>();
+ (void)that;
+ if (leftBorderColor != that.leftBorderColor) return false;
+ if (rightBorderColor != that.rightBorderColor) return false;
+ if (makePremul != that.makePremul) return false;
+ if (colorsAreOpaque != that.colorsAreOpaque) return false;
+ return true;
+}
+GrClampedGradientEffect::GrClampedGradientEffect(const GrClampedGradientEffect& src)
+ : INHERITED(kGrClampedGradientEffect_ClassID, src.optimizationFlags())
+ , colorizer_index(src.colorizer_index)
+ , gradLayout_index(src.gradLayout_index)
+ , leftBorderColor(src.leftBorderColor)
+ , rightBorderColor(src.rightBorderColor)
+ , makePremul(src.makePremul)
+ , colorsAreOpaque(src.colorsAreOpaque) {
+ this->registerChildProcessor(src.childProcessor(colorizer_index).clone());
+ this->registerChildProcessor(src.childProcessor(gradLayout_index).clone());
+}
+std::unique_ptr<GrFragmentProcessor> GrClampedGradientEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrClampedGradientEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.h b/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.h
new file mode 100644
index 0000000000..115fe4a8a2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrClampedGradientEffect.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrClampedGradientEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrClampedGradientEffect_DEFINED
+#define GrClampedGradientEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrClampedGradientEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor> colorizer,
+ std::unique_ptr<GrFragmentProcessor> gradLayout, SkPMColor4f leftBorderColor,
+ SkPMColor4f rightBorderColor, bool makePremul, bool colorsAreOpaque) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrClampedGradientEffect(
+ std::move(colorizer), std::move(gradLayout), leftBorderColor, rightBorderColor,
+ makePremul, colorsAreOpaque));
+ }
+ GrClampedGradientEffect(const GrClampedGradientEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "ClampedGradientEffect"; }
+ int colorizer_index = -1;
+ int gradLayout_index = -1;
+ SkPMColor4f leftBorderColor;
+ SkPMColor4f rightBorderColor;
+ bool makePremul;
+ bool colorsAreOpaque;
+
+private:
+ GrClampedGradientEffect(std::unique_ptr<GrFragmentProcessor> colorizer,
+ std::unique_ptr<GrFragmentProcessor> gradLayout,
+ SkPMColor4f leftBorderColor, SkPMColor4f rightBorderColor,
+ bool makePremul, bool colorsAreOpaque)
+ : INHERITED(kGrClampedGradientEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ (colorsAreOpaque && gradLayout->preservesOpaqueInput()
+ ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags))
+ , leftBorderColor(leftBorderColor)
+ , rightBorderColor(rightBorderColor)
+ , makePremul(makePremul)
+ , colorsAreOpaque(colorsAreOpaque) {
+ SkASSERT(colorizer);
+ colorizer_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(colorizer));
+ SkASSERT(gradLayout);
+ gradLayout_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(gradLayout));
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.cpp
new file mode 100644
index 0000000000..b7ce502d3f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrDualIntervalGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#include "GrDualIntervalGradientColorizer.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLDualIntervalGradientColorizer : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLDualIntervalGradientColorizer() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrDualIntervalGradientColorizer& _outer =
+ args.fFp.cast<GrDualIntervalGradientColorizer>();
+ (void)_outer;
+ auto scale01 = _outer.scale01;
+ (void)scale01;
+ auto bias01 = _outer.bias01;
+ (void)bias01;
+ auto scale23 = _outer.scale23;
+ (void)scale23;
+ auto bias23 = _outer.bias23;
+ (void)bias23;
+ auto threshold = _outer.threshold;
+ (void)threshold;
+ scale01Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale01");
+ bias01Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias01");
+ scale23Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale23");
+ bias23Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias23");
+ thresholdVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType,
+ "threshold");
+ fragBuilder->codeAppendf(
+ "half t = %s.x;\nfloat4 scale, bias;\nif (t < %s) {\n scale = %s;\n bias = "
+ "%s;\n} else {\n scale = %s;\n bias = %s;\n}\n%s = half4(float(t) * scale + "
+ "bias);\n",
+ args.fInputColor, args.fUniformHandler->getUniformCStr(thresholdVar),
+ args.fUniformHandler->getUniformCStr(scale01Var),
+ args.fUniformHandler->getUniformCStr(bias01Var),
+ args.fUniformHandler->getUniformCStr(scale23Var),
+ args.fUniformHandler->getUniformCStr(bias23Var), args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrDualIntervalGradientColorizer& _outer =
+ _proc.cast<GrDualIntervalGradientColorizer>();
+ {
+ const SkPMColor4f& scale01Value = _outer.scale01;
+ if (scale01Prev != scale01Value) {
+ scale01Prev = scale01Value;
+ pdman.set4fv(scale01Var, 1, scale01Value.vec());
+ }
+ const SkPMColor4f& bias01Value = _outer.bias01;
+ if (bias01Prev != bias01Value) {
+ bias01Prev = bias01Value;
+ pdman.set4fv(bias01Var, 1, bias01Value.vec());
+ }
+ const SkPMColor4f& scale23Value = _outer.scale23;
+ if (scale23Prev != scale23Value) {
+ scale23Prev = scale23Value;
+ pdman.set4fv(scale23Var, 1, scale23Value.vec());
+ }
+ const SkPMColor4f& bias23Value = _outer.bias23;
+ if (bias23Prev != bias23Value) {
+ bias23Prev = bias23Value;
+ pdman.set4fv(bias23Var, 1, bias23Value.vec());
+ }
+ float thresholdValue = _outer.threshold;
+ if (thresholdPrev != thresholdValue) {
+ thresholdPrev = thresholdValue;
+ pdman.set1f(thresholdVar, thresholdValue);
+ }
+ }
+ }
+ SkPMColor4f scale01Prev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkPMColor4f bias01Prev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkPMColor4f scale23Prev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkPMColor4f bias23Prev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ float thresholdPrev = SK_FloatNaN;
+ UniformHandle scale01Var;
+ UniformHandle bias01Var;
+ UniformHandle scale23Var;
+ UniformHandle bias23Var;
+ UniformHandle thresholdVar;
+};
+GrGLSLFragmentProcessor* GrDualIntervalGradientColorizer::onCreateGLSLInstance() const {
+ return new GrGLSLDualIntervalGradientColorizer();
+}
+void GrDualIntervalGradientColorizer::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrDualIntervalGradientColorizer::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrDualIntervalGradientColorizer& that = other.cast<GrDualIntervalGradientColorizer>();
+ (void)that;
+ if (scale01 != that.scale01) return false;
+ if (bias01 != that.bias01) return false;
+ if (scale23 != that.scale23) return false;
+ if (bias23 != that.bias23) return false;
+ if (threshold != that.threshold) return false;
+ return true;
+}
+GrDualIntervalGradientColorizer::GrDualIntervalGradientColorizer(
+ const GrDualIntervalGradientColorizer& src)
+ : INHERITED(kGrDualIntervalGradientColorizer_ClassID, src.optimizationFlags())
+ , scale01(src.scale01)
+ , bias01(src.bias01)
+ , scale23(src.scale23)
+ , bias23(src.bias23)
+ , threshold(src.threshold) {}
+std::unique_ptr<GrFragmentProcessor> GrDualIntervalGradientColorizer::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDualIntervalGradientColorizer(*this));
+}
+
+std::unique_ptr<GrFragmentProcessor> GrDualIntervalGradientColorizer::Make(const SkPMColor4f& c0,
+ const SkPMColor4f& c1,
+ const SkPMColor4f& c2,
+ const SkPMColor4f& c3,
+ float threshold) {
+ // Derive scale and biases from the 4 colors and threshold
+ auto vc0 = Sk4f::Load(c0.vec());
+ auto vc1 = Sk4f::Load(c1.vec());
+ auto scale01 = (vc1 - vc0) / threshold;
+ // bias01 = c0
+
+ auto vc2 = Sk4f::Load(c2.vec());
+ auto vc3 = Sk4f::Load(c3.vec());
+ auto scale23 = (vc3 - vc2) / (1 - threshold);
+ auto bias23 = vc2 - threshold * scale23;
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrDualIntervalGradientColorizer(
+ {scale01[0], scale01[1], scale01[2], scale01[3]}, c0,
+ {scale23[0], scale23[1], scale23[2], scale23[3]},
+ {bias23[0], bias23[1], bias23[2], bias23[3]}, threshold));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.h b/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.h
new file mode 100644
index 0000000000..7c3e3b8b7b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrDualIntervalGradientColorizer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrDualIntervalGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrDualIntervalGradientColorizer_DEFINED
+#define GrDualIntervalGradientColorizer_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrDualIntervalGradientColorizer : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkPMColor4f& c0, const SkPMColor4f& c1,
+ const SkPMColor4f& c2, const SkPMColor4f& c3,
+ float threshold);
+ GrDualIntervalGradientColorizer(const GrDualIntervalGradientColorizer& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "DualIntervalGradientColorizer"; }
+ SkPMColor4f scale01;
+ SkPMColor4f bias01;
+ SkPMColor4f scale23;
+ SkPMColor4f bias23;
+ float threshold;
+
+private:
+ GrDualIntervalGradientColorizer(SkPMColor4f scale01, SkPMColor4f bias01, SkPMColor4f scale23,
+ SkPMColor4f bias23, float threshold)
+ : INHERITED(kGrDualIntervalGradientColorizer_ClassID, kNone_OptimizationFlags)
+ , scale01(scale01)
+ , bias01(bias01)
+ , scale23(scale23)
+ , bias23(bias23)
+ , threshold(threshold) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.cpp
new file mode 100644
index 0000000000..a20b0e5676
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrLinearGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#include "GrLinearGradientLayout.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLLinearGradientLayout : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLLinearGradientLayout() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrLinearGradientLayout& _outer = args.fFp.cast<GrLinearGradientLayout>();
+ (void)_outer;
+ auto gradientMatrix = _outer.gradientMatrix;
+ (void)gradientMatrix;
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "half t = half(%s.x) + 9.9999997473787516e-06;\n%s = half4(t, 1.0, 0.0, 0.0);\n",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrLinearGradientLayout::onCreateGLSLInstance() const {
+ return new GrGLSLLinearGradientLayout();
+}
+void GrLinearGradientLayout::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrLinearGradientLayout::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrLinearGradientLayout& that = other.cast<GrLinearGradientLayout>();
+ (void)that;
+ if (gradientMatrix != that.gradientMatrix) return false;
+ return true;
+}
+GrLinearGradientLayout::GrLinearGradientLayout(const GrLinearGradientLayout& src)
+ : INHERITED(kGrLinearGradientLayout_ClassID, src.optimizationFlags())
+ , fCoordTransform0(src.fCoordTransform0)
+ , gradientMatrix(src.gradientMatrix) {
+ this->addCoordTransform(&fCoordTransform0);
+}
+std::unique_ptr<GrFragmentProcessor> GrLinearGradientLayout::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrLinearGradientLayout(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrLinearGradientLayout);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrLinearGradientLayout::TestCreate(GrProcessorTestData* d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkPoint points[] = {
+ {d->fRandom->nextRangeScalar(0.0f, scale), d->fRandom->nextRangeScalar(0.0f, scale)},
+ {d->fRandom->nextRangeScalar(0.0f, scale), d->fRandom->nextRangeScalar(0.0f, scale)}};
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f
+ ? SkGradientShader::MakeLinear(points, params.fColors4f,
+ params.fColorSpace, params.fStops,
+ params.fColorCount, params.fTileMode)
+ : SkGradientShader::MakeLinear(points, params.fColors, params.fStops,
+ params.fColorCount, params.fTileMode);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+ GrAlwaysAssert(fp);
+ return fp;
+}
+#endif
+
+std::unique_ptr<GrFragmentProcessor> GrLinearGradientLayout::Make(const SkLinearGradient& grad,
+ const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(new GrLinearGradientLayout(matrix));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.h b/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.h
new file mode 100644
index 0000000000..1f711d977b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrLinearGradientLayout.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrLinearGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrLinearGradientLayout_DEFINED
+#define GrLinearGradientLayout_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/gradients/GrGradientShader.h"
+#include "src/shaders/gradients/SkLinearGradient.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrLinearGradientLayout : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkLinearGradient& gradient,
+ const GrFPArgs& args);
+ GrLinearGradientLayout(const GrLinearGradientLayout& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "LinearGradientLayout"; }
+ GrCoordTransform fCoordTransform0;
+ SkMatrix44 gradientMatrix;
+
+private:
+ GrLinearGradientLayout(SkMatrix44 gradientMatrix)
+ : INHERITED(kGrLinearGradientLayout_ClassID,
+ (OptimizationFlags)kPreservesOpaqueInput_OptimizationFlag)
+ , fCoordTransform0(gradientMatrix)
+ , gradientMatrix(gradientMatrix) {
+ this->addCoordTransform(&fCoordTransform0);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.cpp
new file mode 100644
index 0000000000..f1639438ac
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRadialGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#include "GrRadialGradientLayout.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLRadialGradientLayout : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLRadialGradientLayout() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrRadialGradientLayout& _outer = args.fFp.cast<GrRadialGradientLayout>();
+ (void)_outer;
+ auto gradientMatrix = _outer.gradientMatrix;
+ (void)gradientMatrix;
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf("half t = half(length(%s));\n%s = half4(t, 1.0, 0.0, 0.0);\n",
+ _outer.computeLocalCoordsInVertexShader()
+ ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrRadialGradientLayout::onCreateGLSLInstance() const {
+ return new GrGLSLRadialGradientLayout();
+}
+void GrRadialGradientLayout::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrRadialGradientLayout::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrRadialGradientLayout& that = other.cast<GrRadialGradientLayout>();
+ (void)that;
+ if (gradientMatrix != that.gradientMatrix) return false;
+ return true;
+}
+GrRadialGradientLayout::GrRadialGradientLayout(const GrRadialGradientLayout& src)
+ : INHERITED(kGrRadialGradientLayout_ClassID, src.optimizationFlags())
+ , fCoordTransform0(src.fCoordTransform0)
+ , gradientMatrix(src.gradientMatrix) {
+ this->addCoordTransform(&fCoordTransform0);
+}
+std::unique_ptr<GrFragmentProcessor> GrRadialGradientLayout::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrRadialGradientLayout(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRadialGradientLayout);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrRadialGradientLayout::TestCreate(GrProcessorTestData* d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ std::unique_ptr<GrFragmentProcessor> fp;
+ GrTest::TestAsFPArgs asFPArgs(d);
+ do {
+ GrGradientShader::RandomParams params(d->fRandom);
+ SkPoint center = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkScalar radius = d->fRandom->nextRangeScalar(0.0f, scale);
+ sk_sp<SkShader> shader =
+ params.fUseColors4f
+ ? SkGradientShader::MakeRadial(center, radius, params.fColors4f,
+ params.fColorSpace, params.fStops,
+ params.fColorCount, params.fTileMode)
+ : SkGradientShader::MakeRadial(center, radius, params.fColors,
+ params.fStops, params.fColorCount,
+ params.fTileMode);
+ // Degenerate params can create an Empty (non-null) shader, where fp will be nullptr
+ fp = shader ? as_SB(shader)->asFragmentProcessor(asFPArgs.args()) : nullptr;
+ } while (!fp);
+ return fp;
+}
+#endif
+
+std::unique_ptr<GrFragmentProcessor> GrRadialGradientLayout::Make(const SkRadialGradient& grad,
+ const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(new GrRadialGradientLayout(matrix));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.h b/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.h
new file mode 100644
index 0000000000..17a1b49ea9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrRadialGradientLayout.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrRadialGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrRadialGradientLayout_DEFINED
+#define GrRadialGradientLayout_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/gradients/GrGradientShader.h"
+#include "src/shaders/gradients/SkRadialGradient.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrRadialGradientLayout : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkRadialGradient& gradient,
+ const GrFPArgs& args);
+ GrRadialGradientLayout(const GrRadialGradientLayout& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "RadialGradientLayout"; }
+ GrCoordTransform fCoordTransform0;
+ SkMatrix44 gradientMatrix;
+
+private:
+ GrRadialGradientLayout(SkMatrix44 gradientMatrix)
+ : INHERITED(kGrRadialGradientLayout_ClassID,
+ (OptimizationFlags)kPreservesOpaqueInput_OptimizationFlag)
+ , fCoordTransform0(gradientMatrix)
+ , gradientMatrix(gradientMatrix) {
+ this->addCoordTransform(&fCoordTransform0);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.cpp
new file mode 100644
index 0000000000..f4a3397c19
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSingleIntervalGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#include "GrSingleIntervalGradientColorizer.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLSingleIntervalGradientColorizer : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLSingleIntervalGradientColorizer() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrSingleIntervalGradientColorizer& _outer =
+ args.fFp.cast<GrSingleIntervalGradientColorizer>();
+ (void)_outer;
+ auto start = _outer.start;
+ (void)start;
+ auto end = _outer.end;
+ (void)end;
+ startVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType, "start");
+ endVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType, "end");
+ fragBuilder->codeAppendf("half t = %s.x;\n%s = (1.0 - t) * %s + t * %s;\n",
+ args.fInputColor, args.fOutputColor,
+ args.fUniformHandler->getUniformCStr(startVar),
+ args.fUniformHandler->getUniformCStr(endVar));
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrSingleIntervalGradientColorizer& _outer =
+ _proc.cast<GrSingleIntervalGradientColorizer>();
+ {
+ const SkPMColor4f& startValue = _outer.start;
+ if (startPrev != startValue) {
+ startPrev = startValue;
+ pdman.set4fv(startVar, 1, startValue.vec());
+ }
+ const SkPMColor4f& endValue = _outer.end;
+ if (endPrev != endValue) {
+ endPrev = endValue;
+ pdman.set4fv(endVar, 1, endValue.vec());
+ }
+ }
+ }
+ SkPMColor4f startPrev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkPMColor4f endPrev = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ UniformHandle startVar;
+ UniformHandle endVar;
+};
+GrGLSLFragmentProcessor* GrSingleIntervalGradientColorizer::onCreateGLSLInstance() const {
+ return new GrGLSLSingleIntervalGradientColorizer();
+}
+void GrSingleIntervalGradientColorizer::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrSingleIntervalGradientColorizer::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrSingleIntervalGradientColorizer& that = other.cast<GrSingleIntervalGradientColorizer>();
+ (void)that;
+ if (start != that.start) return false;
+ if (end != that.end) return false;
+ return true;
+}
+GrSingleIntervalGradientColorizer::GrSingleIntervalGradientColorizer(
+ const GrSingleIntervalGradientColorizer& src)
+ : INHERITED(kGrSingleIntervalGradientColorizer_ClassID, src.optimizationFlags())
+ , start(src.start)
+ , end(src.end) {}
+std::unique_ptr<GrFragmentProcessor> GrSingleIntervalGradientColorizer::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSingleIntervalGradientColorizer(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.h b/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.h
new file mode 100644
index 0000000000..f7b49dbd44
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrSingleIntervalGradientColorizer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSingleIntervalGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrSingleIntervalGradientColorizer_DEFINED
+#define GrSingleIntervalGradientColorizer_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrSingleIntervalGradientColorizer : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(SkPMColor4f start, SkPMColor4f end) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSingleIntervalGradientColorizer(start, end));
+ }
+ GrSingleIntervalGradientColorizer(const GrSingleIntervalGradientColorizer& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "SingleIntervalGradientColorizer"; }
+ SkPMColor4f start;
+ SkPMColor4f end;
+
+private:
+ GrSingleIntervalGradientColorizer(SkPMColor4f start, SkPMColor4f end)
+ : INHERITED(kGrSingleIntervalGradientColorizer_ClassID, kNone_OptimizationFlags)
+ , start(start)
+ , end(end) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.cpp
new file mode 100644
index 0000000000..6abcbb1780
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSweepGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#include "GrSweepGradientLayout.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLSweepGradientLayout : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLSweepGradientLayout() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrSweepGradientLayout& _outer = args.fFp.cast<GrSweepGradientLayout>();
+ (void)_outer;
+ auto gradientMatrix = _outer.gradientMatrix;
+ (void)gradientMatrix;
+ auto bias = _outer.bias;
+ (void)bias;
+ auto scale = _outer.scale;
+ (void)scale;
+ biasVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "bias");
+ scaleVar =
+ args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "scale");
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "half angle;\nif (sk_Caps.atan2ImplementedAsAtanYOverX) {\n angle = half(2.0 * "
+ "atan(-%s.y, length(%s) - %s.x));\n} else {\n angle = half(atan(-%s.y, "
+ "-%s.x));\n}\nhalf t = ((angle * 0.15915493667125702 + 0.5) + %s) * %s;\n%s = "
+ "half4(t, 1.0, 0.0, 0.0);\n",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ args.fUniformHandler->getUniformCStr(biasVar),
+ args.fUniformHandler->getUniformCStr(scaleVar), args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrSweepGradientLayout& _outer = _proc.cast<GrSweepGradientLayout>();
+ {
+ float biasValue = _outer.bias;
+ if (biasPrev != biasValue) {
+ biasPrev = biasValue;
+ pdman.set1f(biasVar, biasValue);
+ }
+ float scaleValue = _outer.scale;
+ if (scalePrev != scaleValue) {
+ scalePrev = scaleValue;
+ pdman.set1f(scaleVar, scaleValue);
+ }
+ }
+ }
+ float biasPrev = SK_FloatNaN;
+ float scalePrev = SK_FloatNaN;
+ UniformHandle biasVar;
+ UniformHandle scaleVar;
+};
+GrGLSLFragmentProcessor* GrSweepGradientLayout::onCreateGLSLInstance() const {
+ return new GrGLSLSweepGradientLayout();
+}
+void GrSweepGradientLayout::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrSweepGradientLayout::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrSweepGradientLayout& that = other.cast<GrSweepGradientLayout>();
+ (void)that;
+ if (gradientMatrix != that.gradientMatrix) return false;
+ if (bias != that.bias) return false;
+ if (scale != that.scale) return false;
+ return true;
+}
+GrSweepGradientLayout::GrSweepGradientLayout(const GrSweepGradientLayout& src)
+ : INHERITED(kGrSweepGradientLayout_ClassID, src.optimizationFlags())
+ , fCoordTransform0(src.fCoordTransform0)
+ , gradientMatrix(src.gradientMatrix)
+ , bias(src.bias)
+ , scale(src.scale) {
+ this->addCoordTransform(&fCoordTransform0);
+}
+std::unique_ptr<GrFragmentProcessor> GrSweepGradientLayout::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrSweepGradientLayout(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSweepGradientLayout);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrSweepGradientLayout::TestCreate(GrProcessorTestData* d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkPoint center = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f
+ ? SkGradientShader::MakeSweep(center.fX, center.fY, params.fColors4f,
+ params.fColorSpace, params.fStops,
+ params.fColorCount)
+ : SkGradientShader::MakeSweep(center.fX, center.fY, params.fColors,
+ params.fStops, params.fColorCount);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+ GrAlwaysAssert(fp);
+ return fp;
+}
+#endif
+
+std::unique_ptr<GrFragmentProcessor> GrSweepGradientLayout::Make(const SkSweepGradient& grad,
+ const GrFPArgs& args) {
+ SkMatrix matrix;
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(grad.getGradientMatrix());
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrSweepGradientLayout(matrix, grad.getTBias(), grad.getTScale()));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.h b/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.h
new file mode 100644
index 0000000000..77036635c4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrSweepGradientLayout.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrSweepGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrSweepGradientLayout_DEFINED
+#define GrSweepGradientLayout_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/gradients/GrGradientShader.h"
+#include "src/shaders/gradients/SkSweepGradient.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrSweepGradientLayout : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkSweepGradient& gradient,
+ const GrFPArgs& args);
+ GrSweepGradientLayout(const GrSweepGradientLayout& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "SweepGradientLayout"; }
+ GrCoordTransform fCoordTransform0;
+ SkMatrix44 gradientMatrix;
+ float bias;
+ float scale;
+
+private:
+ GrSweepGradientLayout(SkMatrix44 gradientMatrix, float bias, float scale)
+ : INHERITED(kGrSweepGradientLayout_ClassID,
+ (OptimizationFlags)kPreservesOpaqueInput_OptimizationFlag)
+ , fCoordTransform0(gradientMatrix)
+ , gradientMatrix(gradientMatrix)
+ , bias(bias)
+ , scale(scale) {
+ this->addCoordTransform(&fCoordTransform0);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.cpp
new file mode 100644
index 0000000000..466b407cf7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTextureGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#include "GrTextureGradientColorizer.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLTextureGradientColorizer : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLTextureGradientColorizer() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrTextureGradientColorizer& _outer = args.fFp.cast<GrTextureGradientColorizer>();
+ (void)_outer;
+ fragBuilder->codeAppendf(
+ "half2 coord = half2(%s.x, 0.5);\n%s = sample(%s, float2(coord)).%s;\n",
+ args.fInputColor, args.fOutputColor,
+ fragBuilder->getProgramBuilder()->samplerVariable(args.fTexSamplers[0]),
+ fragBuilder->getProgramBuilder()->samplerSwizzle(args.fTexSamplers[0]).c_str());
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrTextureGradientColorizer::onCreateGLSLInstance() const {
+ return new GrGLSLTextureGradientColorizer();
+}
+void GrTextureGradientColorizer::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {}
+bool GrTextureGradientColorizer::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrTextureGradientColorizer& that = other.cast<GrTextureGradientColorizer>();
+ (void)that;
+ if (gradient != that.gradient) return false;
+ return true;
+}
+GrTextureGradientColorizer::GrTextureGradientColorizer(const GrTextureGradientColorizer& src)
+ : INHERITED(kGrTextureGradientColorizer_ClassID, src.optimizationFlags())
+ , gradient(src.gradient) {
+ this->setTextureSamplerCnt(1);
+}
+std::unique_ptr<GrFragmentProcessor> GrTextureGradientColorizer::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTextureGradientColorizer(*this));
+}
+const GrFragmentProcessor::TextureSampler& GrTextureGradientColorizer::onTextureSampler(
+ int index) const {
+ return IthTextureSampler(index, gradient);
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.h b/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.h
new file mode 100644
index 0000000000..e15558edde
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTextureGradientColorizer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTextureGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrTextureGradientColorizer_DEFINED
+#define GrTextureGradientColorizer_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrTextureGradientColorizer : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(sk_sp<GrTextureProxy> gradient) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTextureGradientColorizer(gradient));
+ }
+ GrTextureGradientColorizer(const GrTextureGradientColorizer& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "TextureGradientColorizer"; }
+ TextureSampler gradient;
+
+private:
+ GrTextureGradientColorizer(sk_sp<GrTextureProxy> gradient)
+ : INHERITED(kGrTextureGradientColorizer_ClassID, kNone_OptimizationFlags)
+ , gradient(std::move(gradient), GrSamplerState::ClampBilerp()) {
+ this->setTextureSamplerCnt(1);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ const TextureSampler& onTextureSampler(int) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.cpp
new file mode 100644
index 0000000000..9d0637feb6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTiledGradientEffect.fp; do not modify.
+ **************************************************************************************************/
+#include "GrTiledGradientEffect.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLTiledGradientEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLTiledGradientEffect() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrTiledGradientEffect& _outer = args.fFp.cast<GrTiledGradientEffect>();
+ (void)_outer;
+ auto mirror = _outer.mirror;
+ (void)mirror;
+ auto makePremul = _outer.makePremul;
+ (void)makePremul;
+ auto colorsAreOpaque = _outer.colorsAreOpaque;
+ (void)colorsAreOpaque;
+ SkString _sample453("_sample453");
+ this->invokeChild(_outer.gradLayout_index, &_sample453, args);
+ fragBuilder->codeAppendf(
+ "half4 t = %s;\nif (!%s && t.y < 0.0) {\n %s = half4(0.0);\n} else {\n @if "
+ "(%s) {\n half t_1 = t.x - 1.0;\n half tiled_t = (t_1 - 2.0 * "
+ "floor(t_1 * 0.5)) - 1.0;\n if (sk_Caps.mustDoOpBetweenFloorAndAbs) {\n "
+ " tiled_t = clamp(tiled_t, -1.0, 1.0);\n }\n t.x = "
+ "abs(tiled_t);\n } else {\n t.x = fract(t.x);\n }",
+ _sample453.c_str(),
+ (_outer.childProcessor(_outer.gradLayout_index).preservesOpaqueInput() ? "true"
+ : "false"),
+ args.fOutputColor, (_outer.mirror ? "true" : "false"));
+ SkString _input1464("t");
+ SkString _sample1464("_sample1464");
+ this->invokeChild(_outer.colorizer_index, _input1464.c_str(), &_sample1464, args);
+ fragBuilder->codeAppendf("\n %s = %s;\n}\n@if (%s) {\n %s.xyz *= %s.w;\n}\n",
+ args.fOutputColor, _sample1464.c_str(),
+ (_outer.makePremul ? "true" : "false"), args.fOutputColor,
+ args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {}
+};
+GrGLSLFragmentProcessor* GrTiledGradientEffect::onCreateGLSLInstance() const {
+ return new GrGLSLTiledGradientEffect();
+}
+void GrTiledGradientEffect::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)mirror);
+ b->add32((int32_t)makePremul);
+}
+bool GrTiledGradientEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrTiledGradientEffect& that = other.cast<GrTiledGradientEffect>();
+ (void)that;
+ if (mirror != that.mirror) return false;
+ if (makePremul != that.makePremul) return false;
+ if (colorsAreOpaque != that.colorsAreOpaque) return false;
+ return true;
+}
+GrTiledGradientEffect::GrTiledGradientEffect(const GrTiledGradientEffect& src)
+ : INHERITED(kGrTiledGradientEffect_ClassID, src.optimizationFlags())
+ , colorizer_index(src.colorizer_index)
+ , gradLayout_index(src.gradLayout_index)
+ , mirror(src.mirror)
+ , makePremul(src.makePremul)
+ , colorsAreOpaque(src.colorsAreOpaque) {
+ this->registerChildProcessor(src.childProcessor(colorizer_index).clone());
+ this->registerChildProcessor(src.childProcessor(gradLayout_index).clone());
+}
+std::unique_ptr<GrFragmentProcessor> GrTiledGradientEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTiledGradientEffect(*this));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.h b/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.h
new file mode 100644
index 0000000000..8942804513
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTiledGradientEffect.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTiledGradientEffect.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrTiledGradientEffect_DEFINED
+#define GrTiledGradientEffect_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrTiledGradientEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor> colorizer,
+ std::unique_ptr<GrFragmentProcessor> gradLayout, bool mirror, bool makePremul,
+ bool colorsAreOpaque) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTiledGradientEffect(
+ std::move(colorizer), std::move(gradLayout), mirror, makePremul, colorsAreOpaque));
+ }
+ GrTiledGradientEffect(const GrTiledGradientEffect& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "TiledGradientEffect"; }
+ int colorizer_index = -1;
+ int gradLayout_index = -1;
+ bool mirror;
+ bool makePremul;
+ bool colorsAreOpaque;
+
+private:
+ GrTiledGradientEffect(std::unique_ptr<GrFragmentProcessor> colorizer,
+ std::unique_ptr<GrFragmentProcessor> gradLayout, bool mirror,
+ bool makePremul, bool colorsAreOpaque)
+ : INHERITED(kGrTiledGradientEffect_ClassID,
+ (OptimizationFlags)kCompatibleWithCoverageAsAlpha_OptimizationFlag |
+ (colorsAreOpaque && gradLayout->preservesOpaqueInput()
+ ? kPreservesOpaqueInput_OptimizationFlag
+ : kNone_OptimizationFlags))
+ , mirror(mirror)
+ , makePremul(makePremul)
+ , colorsAreOpaque(colorsAreOpaque) {
+ SkASSERT(colorizer);
+ colorizer_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(colorizer));
+ SkASSERT(gradLayout);
+ gradLayout_index = this->numChildProcessors();
+ this->registerChildProcessor(std::move(gradLayout));
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.cpp
new file mode 100644
index 0000000000..804a42c6d8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTwoPointConicalGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#include "GrTwoPointConicalGradientLayout.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLTwoPointConicalGradientLayout : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLTwoPointConicalGradientLayout() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrTwoPointConicalGradientLayout& _outer =
+ args.fFp.cast<GrTwoPointConicalGradientLayout>();
+ (void)_outer;
+ auto gradientMatrix = _outer.gradientMatrix;
+ (void)gradientMatrix;
+ auto type = _outer.type;
+ (void)type;
+ auto isRadiusIncreasing = _outer.isRadiusIncreasing;
+ (void)isRadiusIncreasing;
+ auto isFocalOnCircle = _outer.isFocalOnCircle;
+ (void)isFocalOnCircle;
+ auto isWellBehaved = _outer.isWellBehaved;
+ (void)isWellBehaved;
+ auto isSwapped = _outer.isSwapped;
+ (void)isSwapped;
+ auto isNativelyFocal = _outer.isNativelyFocal;
+ (void)isNativelyFocal;
+ auto focalParams = _outer.focalParams;
+ (void)focalParams;
+ focalParamsVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "focalParams");
+ SkString sk_TransformedCoords2D_0 =
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+ fragBuilder->codeAppendf(
+ "float2 p = %s;\nfloat t = -1.0;\nhalf v = 1.0;\n@switch (%d) {\n case 1:\n "
+ " {\n half r0_2 = %s.y;\n t = float(r0_2) - p.y * p.y;\n "
+ " if (t >= 0.0) {\n t = p.x + sqrt(t);\n } else "
+ "{\n v = -1.0;\n }\n }\n break;\n case "
+ "0:\n {\n half r0 = %s.x;\n @if (%s) {\n "
+ " t = length(p) - float(r0);\n } else {\n t = "
+ "-length(p) - float(r0);\n ",
+ _outer.computeLocalCoordsInVertexShader() ? sk_TransformedCoords2D_0.c_str()
+ : "_coords",
+ (int)_outer.type, args.fUniformHandler->getUniformCStr(focalParamsVar),
+ args.fUniformHandler->getUniformCStr(focalParamsVar),
+ (_outer.isRadiusIncreasing ? "true" : "false"));
+ fragBuilder->codeAppendf(
+ " }\n }\n break;\n case 2:\n {\n half invR1 "
+ "= %s.x;\n half fx = %s.y;\n float x_t = -1.0;\n "
+ "@if (%s) {\n x_t = dot(p, p) / p.x;\n } else if (%s) "
+ "{\n x_t = length(p) - p.x * float(invR1);\n } else {\n "
+ " float temp = p.x * p.x - p.y * p.y;\n if (temp >= "
+ "0.0) {\n @if (%s || !%s) {\n x_t = "
+ "-sqrt(temp) - p.x * float(invR1)",
+ args.fUniformHandler->getUniformCStr(focalParamsVar),
+ args.fUniformHandler->getUniformCStr(focalParamsVar),
+ (_outer.isFocalOnCircle ? "true" : "false"),
+ (_outer.isWellBehaved ? "true" : "false"), (_outer.isSwapped ? "true" : "false"),
+ (_outer.isRadiusIncreasing ? "true" : "false"));
+ fragBuilder->codeAppendf(
+ ";\n } else {\n x_t = sqrt(temp) - p.x * "
+ "float(invR1);\n }\n }\n }\n "
+ " @if (!%s) {\n if (x_t <= 0.0) {\n v = -1.0;\n "
+ " }\n }\n @if (%s) {\n @if (%s) "
+ "{\n t = x_t;\n } else {\n t "
+ "= x_t + float(fx);\n }\n } else {\n @if "
+ "(%s) {\n ",
+ (_outer.isWellBehaved ? "true" : "false"),
+ (_outer.isRadiusIncreasing ? "true" : "false"),
+ (_outer.isNativelyFocal ? "true" : "false"),
+ (_outer.isNativelyFocal ? "true" : "false"));
+ fragBuilder->codeAppendf(
+ " t = -x_t;\n } else {\n t = -x_t + "
+ "float(fx);\n }\n }\n @if (%s) {\n "
+ " t = 1.0 - t;\n }\n }\n break;\n}\n%s = "
+ "half4(half(t), v, 0.0, 0.0);\n",
+ (_outer.isSwapped ? "true" : "false"), args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrTwoPointConicalGradientLayout& _outer =
+ _proc.cast<GrTwoPointConicalGradientLayout>();
+ {
+ const SkPoint& focalParamsValue = _outer.focalParams;
+ if (focalParamsPrev != focalParamsValue) {
+ focalParamsPrev = focalParamsValue;
+ pdman.set2f(focalParamsVar, focalParamsValue.fX, focalParamsValue.fY);
+ }
+ }
+ }
+ SkPoint focalParamsPrev = SkPoint::Make(SK_FloatNaN, SK_FloatNaN);
+ UniformHandle focalParamsVar;
+};
+GrGLSLFragmentProcessor* GrTwoPointConicalGradientLayout::onCreateGLSLInstance() const {
+ return new GrGLSLTwoPointConicalGradientLayout();
+}
+void GrTwoPointConicalGradientLayout::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)type);
+ b->add32((int32_t)isRadiusIncreasing);
+ b->add32((int32_t)isFocalOnCircle);
+ b->add32((int32_t)isWellBehaved);
+ b->add32((int32_t)isSwapped);
+ b->add32((int32_t)isNativelyFocal);
+}
+bool GrTwoPointConicalGradientLayout::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrTwoPointConicalGradientLayout& that = other.cast<GrTwoPointConicalGradientLayout>();
+ (void)that;
+ if (gradientMatrix != that.gradientMatrix) return false;
+ if (type != that.type) return false;
+ if (isRadiusIncreasing != that.isRadiusIncreasing) return false;
+ if (isFocalOnCircle != that.isFocalOnCircle) return false;
+ if (isWellBehaved != that.isWellBehaved) return false;
+ if (isSwapped != that.isSwapped) return false;
+ if (isNativelyFocal != that.isNativelyFocal) return false;
+ if (focalParams != that.focalParams) return false;
+ return true;
+}
+GrTwoPointConicalGradientLayout::GrTwoPointConicalGradientLayout(
+ const GrTwoPointConicalGradientLayout& src)
+ : INHERITED(kGrTwoPointConicalGradientLayout_ClassID, src.optimizationFlags())
+ , fCoordTransform0(src.fCoordTransform0)
+ , gradientMatrix(src.gradientMatrix)
+ , type(src.type)
+ , isRadiusIncreasing(src.isRadiusIncreasing)
+ , isFocalOnCircle(src.isFocalOnCircle)
+ , isWellBehaved(src.isWellBehaved)
+ , isSwapped(src.isSwapped)
+ , isNativelyFocal(src.isNativelyFocal)
+ , focalParams(src.focalParams) {
+ this->addCoordTransform(&fCoordTransform0);
+}
+std::unique_ptr<GrFragmentProcessor> GrTwoPointConicalGradientLayout::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrTwoPointConicalGradientLayout(*this));
+}
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrTwoPointConicalGradientLayout);
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrTwoPointConicalGradientLayout::TestCreate(
+ GrProcessorTestData* d) {
+ SkScalar scale = GrGradientShader::RandomParams::kGradientScale;
+ SkScalar offset = scale / 32.0f;
+
+ SkPoint center1 = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkPoint center2 = {d->fRandom->nextRangeScalar(0.0f, scale),
+ d->fRandom->nextRangeScalar(0.0f, scale)};
+ SkScalar radius1 = d->fRandom->nextRangeScalar(0.0f, scale);
+ SkScalar radius2 = d->fRandom->nextRangeScalar(0.0f, scale);
+
+ constexpr int kTestTypeMask = (1 << 2) - 1, kTestNativelyFocalBit = (1 << 2),
+ kTestFocalOnCircleBit = (1 << 3), kTestSwappedBit = (1 << 4);
+ // We won't treat isWellDefined and isRadiusIncreasing specially because they
+ // should have high probability to be turned on and off as we're getting random
+ // radii and centers.
+
+ int mask = d->fRandom->nextU();
+ int type = mask & kTestTypeMask;
+ if (type == static_cast<int>(Type::kRadial)) {
+ center2 = center1;
+ // Make sure that the radii are different
+ if (SkScalarNearlyZero(radius1 - radius2)) {
+ radius2 += offset;
+ }
+ } else if (type == static_cast<int>(Type::kStrip)) {
+ radius1 = SkTMax(radius1, .1f); // Make sure that the radius is non-zero
+ radius2 = radius1;
+ // Make sure that the centers are different
+ if (SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ center2.fX += offset;
+ }
+ } else { // kFocal_Type
+ // Make sure that the centers are different
+ if (SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ center2.fX += offset;
+ }
+
+ if (kTestNativelyFocalBit & mask) {
+ radius1 = 0;
+ }
+ if (kTestFocalOnCircleBit & mask) {
+ radius2 = radius1 + SkPoint::Distance(center1, center2);
+ }
+ if (kTestSwappedBit & mask) {
+ std::swap(radius1, radius2);
+ radius2 = 0;
+ }
+
+ // Make sure that the radii are different
+ if (SkScalarNearlyZero(radius1 - radius2)) {
+ radius2 += offset;
+ }
+ }
+
+ if (SkScalarNearlyZero(radius1 - radius2) &&
+ SkScalarNearlyZero(SkPoint::Distance(center1, center2))) {
+ radius2 += offset; // make sure that we're not degenerated
+ }
+
+ GrGradientShader::RandomParams params(d->fRandom);
+ auto shader = params.fUseColors4f
+ ? SkGradientShader::MakeTwoPointConical(
+ center1, radius1, center2, radius2, params.fColors4f,
+ params.fColorSpace, params.fStops, params.fColorCount,
+ params.fTileMode)
+ : SkGradientShader::MakeTwoPointConical(
+ center1, radius1, center2, radius2, params.fColors,
+ params.fStops, params.fColorCount, params.fTileMode);
+ GrTest::TestAsFPArgs asFPArgs(d);
+ std::unique_ptr<GrFragmentProcessor> fp = as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+
+ GrAlwaysAssert(fp);
+ return fp;
+}
+#endif
+
+// .fp files do not let you reference outside enum definitions, so we have to explicitly map
+// between the two compatible enum defs
+GrTwoPointConicalGradientLayout::Type convert_type(SkTwoPointConicalGradient::Type type) {
+ switch (type) {
+ case SkTwoPointConicalGradient::Type::kRadial:
+ return GrTwoPointConicalGradientLayout::Type::kRadial;
+ case SkTwoPointConicalGradient::Type::kStrip:
+ return GrTwoPointConicalGradientLayout::Type::kStrip;
+ case SkTwoPointConicalGradient::Type::kFocal:
+ return GrTwoPointConicalGradientLayout::Type::kFocal;
+ }
+ SkDEBUGFAIL("Should not be reachable");
+ return GrTwoPointConicalGradientLayout::Type::kRadial;
+}
+
+std::unique_ptr<GrFragmentProcessor> GrTwoPointConicalGradientLayout::Make(
+ const SkTwoPointConicalGradient& grad, const GrFPArgs& args) {
+ GrTwoPointConicalGradientLayout::Type grType = convert_type(grad.getType());
+
+ // The focalData struct is only valid if isFocal is true
+ const SkTwoPointConicalGradient::FocalData& focalData = grad.getFocalData();
+ bool isFocal = grType == Type::kFocal;
+
+ // Calculate optimization switches from gradient specification
+ bool isFocalOnCircle = isFocal && focalData.isFocalOnCircle();
+ bool isWellBehaved = isFocal && focalData.isWellBehaved();
+ bool isSwapped = isFocal && focalData.isSwapped();
+ bool isNativelyFocal = isFocal && focalData.isNativelyFocal();
+
+ // Type-specific calculations: isRadiusIncreasing, focalParams, and the gradient matrix.
+ // However, all types start with the total inverse local matrix calculated from the shader
+ // and args
+ bool isRadiusIncreasing;
+ SkPoint focalParams; // really just a 2D tuple
+ SkMatrix matrix;
+
+ // Initialize the base matrix
+ if (!grad.totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+
+ if (isFocal) {
+ isRadiusIncreasing = (1 - focalData.fFocalX) > 0;
+
+ focalParams.set(1.0 / focalData.fR1, focalData.fFocalX);
+
+ matrix.postConcat(grad.getGradientMatrix());
+ } else if (grType == Type::kRadial) {
+ SkScalar dr = grad.getDiffRadius();
+ isRadiusIncreasing = dr >= 0;
+
+ SkScalar r0 = grad.getStartRadius() / dr;
+ focalParams.set(r0, r0 * r0);
+
+ // GPU radial matrix is different from the original matrix, since we map the diff radius
+ // to have |dr| = 1, so manually compute the final gradient matrix here.
+
+ // Map center to (0, 0)
+ matrix.postTranslate(-grad.getStartCenter().fX, -grad.getStartCenter().fY);
+
+ // scale |diffRadius| to 1
+ matrix.postScale(1 / dr, 1 / dr);
+ } else { // kStrip
+ isRadiusIncreasing = false; // kStrip doesn't use this flag
+
+ SkScalar r0 = grad.getStartRadius() / grad.getCenterX1();
+ focalParams.set(r0, r0 * r0);
+
+ matrix.postConcat(grad.getGradientMatrix());
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrTwoPointConicalGradientLayout(
+ matrix, grType, isRadiusIncreasing, isFocalOnCircle, isWellBehaved, isSwapped,
+ isNativelyFocal, focalParams));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.h b/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.h
new file mode 100644
index 0000000000..878b8d2c54
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrTwoPointConicalGradientLayout.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrTwoPointConicalGradientLayout.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrTwoPointConicalGradientLayout_DEFINED
+#define GrTwoPointConicalGradientLayout_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/gradients/GrGradientShader.h"
+#include "src/shaders/gradients/SkTwoPointConicalGradient.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrTwoPointConicalGradientLayout : public GrFragmentProcessor {
+public:
+ enum class Type { kFocal = 2, kRadial = 0, kStrip = 1 };
+
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkTwoPointConicalGradient& gradient,
+ const GrFPArgs& args);
+ GrTwoPointConicalGradientLayout(const GrTwoPointConicalGradientLayout& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "TwoPointConicalGradientLayout"; }
+ GrCoordTransform fCoordTransform0;
+ SkMatrix44 gradientMatrix;
+ Type type;
+ bool isRadiusIncreasing;
+ bool isFocalOnCircle;
+ bool isWellBehaved;
+ bool isSwapped;
+ bool isNativelyFocal;
+ SkPoint focalParams;
+
+private:
+ GrTwoPointConicalGradientLayout(SkMatrix44 gradientMatrix, Type type, bool isRadiusIncreasing,
+ bool isFocalOnCircle, bool isWellBehaved, bool isSwapped,
+ bool isNativelyFocal, SkPoint focalParams)
+ : INHERITED(kGrTwoPointConicalGradientLayout_ClassID,
+ (OptimizationFlags)kNone_OptimizationFlags)
+ , fCoordTransform0(gradientMatrix)
+ , gradientMatrix(gradientMatrix)
+ , type(type)
+ , isRadiusIncreasing(isRadiusIncreasing)
+ , isFocalOnCircle(isFocalOnCircle)
+ , isWellBehaved(isWellBehaved)
+ , isSwapped(isSwapped)
+ , isNativelyFocal(isNativelyFocal)
+ , focalParams(focalParams) {
+ this->addCoordTransform(&fCoordTransform0);
+ }
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.cpp b/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.cpp
new file mode 100644
index 0000000000..3cf31b7c7d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.cpp
@@ -0,0 +1,381 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrUnrolledBinaryGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#include "GrUnrolledBinaryGradientColorizer.h"
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/sksl/SkSLCPP.h"
+#include "src/sksl/SkSLUtil.h"
+class GrGLSLUnrolledBinaryGradientColorizer : public GrGLSLFragmentProcessor {
+public:
+ GrGLSLUnrolledBinaryGradientColorizer() {}
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrUnrolledBinaryGradientColorizer& _outer =
+ args.fFp.cast<GrUnrolledBinaryGradientColorizer>();
+ (void)_outer;
+ auto intervalCount = _outer.intervalCount;
+ (void)intervalCount;
+ auto scale0_1 = _outer.scale0_1;
+ (void)scale0_1;
+ auto scale2_3 = _outer.scale2_3;
+ (void)scale2_3;
+ auto scale4_5 = _outer.scale4_5;
+ (void)scale4_5;
+ auto scale6_7 = _outer.scale6_7;
+ (void)scale6_7;
+ auto scale8_9 = _outer.scale8_9;
+ (void)scale8_9;
+ auto scale10_11 = _outer.scale10_11;
+ (void)scale10_11;
+ auto scale12_13 = _outer.scale12_13;
+ (void)scale12_13;
+ auto scale14_15 = _outer.scale14_15;
+ (void)scale14_15;
+ auto bias0_1 = _outer.bias0_1;
+ (void)bias0_1;
+ auto bias2_3 = _outer.bias2_3;
+ (void)bias2_3;
+ auto bias4_5 = _outer.bias4_5;
+ (void)bias4_5;
+ auto bias6_7 = _outer.bias6_7;
+ (void)bias6_7;
+ auto bias8_9 = _outer.bias8_9;
+ (void)bias8_9;
+ auto bias10_11 = _outer.bias10_11;
+ (void)bias10_11;
+ auto bias12_13 = _outer.bias12_13;
+ (void)bias12_13;
+ auto bias14_15 = _outer.bias14_15;
+ (void)bias14_15;
+ auto thresholds1_7 = _outer.thresholds1_7;
+ (void)thresholds1_7;
+ auto thresholds9_13 = _outer.thresholds9_13;
+ (void)thresholds9_13;
+ scale0_1Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale0_1");
+ if (intervalCount > 1) {
+ scale2_3Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale2_3");
+ }
+ if (intervalCount > 2) {
+ scale4_5Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale4_5");
+ }
+ if (intervalCount > 3) {
+ scale6_7Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale6_7");
+ }
+ if (intervalCount > 4) {
+ scale8_9Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "scale8_9");
+ }
+ if (intervalCount > 5) {
+ scale10_11Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "scale10_11");
+ }
+ if (intervalCount > 6) {
+ scale12_13Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "scale12_13");
+ }
+ if (intervalCount > 7) {
+ scale14_15Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "scale14_15");
+ }
+ bias0_1Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias0_1");
+ if (intervalCount > 1) {
+ bias2_3Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias2_3");
+ }
+ if (intervalCount > 2) {
+ bias4_5Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias4_5");
+ }
+ if (intervalCount > 3) {
+ bias6_7Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias6_7");
+ }
+ if (intervalCount > 4) {
+ bias8_9Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType,
+ "bias8_9");
+ }
+ if (intervalCount > 5) {
+ bias10_11Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "bias10_11");
+ }
+ if (intervalCount > 6) {
+ bias12_13Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "bias12_13");
+ }
+ if (intervalCount > 7) {
+ bias14_15Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat4_GrSLType, "bias14_15");
+ }
+ thresholds1_7Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, kHalf4_GrSLType,
+ "thresholds1_7");
+ thresholds9_13Var = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kHalf4_GrSLType, "thresholds9_13");
+ fragBuilder->codeAppendf(
+ "half t = %s.x;\nfloat4 scale, bias;\nif (%d <= 4 || t < %s.w) {\n if (%d <= 2 "
+ "|| t < %s.y) {\n if (%d <= 1 || t < %s.x) {\n scale = %s;\n "
+ " bias = %s;\n } else {\n scale = %s;\n bias = "
+ "%s;\n }\n } else {\n if (%d <= 3 || t < %s.z) {\n "
+ "scale = %s;\n bias = %s;\n } else {\n scale = %s;\n "
+ " bias = %s;\n }\n }\n} else {\n if (%d <= 6 || t < %s.y) "
+ "{\n if (%d <= 5 || t <",
+ args.fInputColor, _outer.intervalCount,
+ args.fUniformHandler->getUniformCStr(thresholds1_7Var), _outer.intervalCount,
+ args.fUniformHandler->getUniformCStr(thresholds1_7Var), _outer.intervalCount,
+ args.fUniformHandler->getUniformCStr(thresholds1_7Var),
+ args.fUniformHandler->getUniformCStr(scale0_1Var),
+ args.fUniformHandler->getUniformCStr(bias0_1Var),
+ scale2_3Var.isValid() ? args.fUniformHandler->getUniformCStr(scale2_3Var)
+ : "float4(0)",
+ bias2_3Var.isValid() ? args.fUniformHandler->getUniformCStr(bias2_3Var)
+ : "float4(0)",
+ _outer.intervalCount, args.fUniformHandler->getUniformCStr(thresholds1_7Var),
+ scale4_5Var.isValid() ? args.fUniformHandler->getUniformCStr(scale4_5Var)
+ : "float4(0)",
+ bias4_5Var.isValid() ? args.fUniformHandler->getUniformCStr(bias4_5Var)
+ : "float4(0)",
+ scale6_7Var.isValid() ? args.fUniformHandler->getUniformCStr(scale6_7Var)
+ : "float4(0)",
+ bias6_7Var.isValid() ? args.fUniformHandler->getUniformCStr(bias6_7Var)
+ : "float4(0)",
+ _outer.intervalCount, args.fUniformHandler->getUniformCStr(thresholds9_13Var),
+ _outer.intervalCount);
+ fragBuilder->codeAppendf(
+ " %s.x) {\n scale = %s;\n bias = %s;\n } else {\n "
+ " scale = %s;\n bias = %s;\n }\n } else {\n if "
+ "(%d <= 7 || t < %s.z) {\n scale = %s;\n bias = %s;\n "
+ "} else {\n scale = %s;\n bias = %s;\n }\n "
+ "}\n}\n%s = half4(float(t) * scale + bias);\n",
+ args.fUniformHandler->getUniformCStr(thresholds9_13Var),
+ scale8_9Var.isValid() ? args.fUniformHandler->getUniformCStr(scale8_9Var)
+ : "float4(0)",
+ bias8_9Var.isValid() ? args.fUniformHandler->getUniformCStr(bias8_9Var)
+ : "float4(0)",
+ scale10_11Var.isValid() ? args.fUniformHandler->getUniformCStr(scale10_11Var)
+ : "float4(0)",
+ bias10_11Var.isValid() ? args.fUniformHandler->getUniformCStr(bias10_11Var)
+ : "float4(0)",
+ _outer.intervalCount, args.fUniformHandler->getUniformCStr(thresholds9_13Var),
+ scale12_13Var.isValid() ? args.fUniformHandler->getUniformCStr(scale12_13Var)
+ : "float4(0)",
+ bias12_13Var.isValid() ? args.fUniformHandler->getUniformCStr(bias12_13Var)
+ : "float4(0)",
+ scale14_15Var.isValid() ? args.fUniformHandler->getUniformCStr(scale14_15Var)
+ : "float4(0)",
+ bias14_15Var.isValid() ? args.fUniformHandler->getUniformCStr(bias14_15Var)
+ : "float4(0)",
+ args.fOutputColor);
+ }
+
+private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& _proc) override {
+ const GrUnrolledBinaryGradientColorizer& _outer =
+ _proc.cast<GrUnrolledBinaryGradientColorizer>();
+ {
+ pdman.set4fv(scale0_1Var, 1, (_outer.scale0_1).vec());
+ if (scale2_3Var.isValid()) {
+ pdman.set4fv(scale2_3Var, 1, (_outer.scale2_3).vec());
+ }
+ if (scale4_5Var.isValid()) {
+ pdman.set4fv(scale4_5Var, 1, (_outer.scale4_5).vec());
+ }
+ if (scale6_7Var.isValid()) {
+ pdman.set4fv(scale6_7Var, 1, (_outer.scale6_7).vec());
+ }
+ if (scale8_9Var.isValid()) {
+ pdman.set4fv(scale8_9Var, 1, (_outer.scale8_9).vec());
+ }
+ if (scale10_11Var.isValid()) {
+ pdman.set4fv(scale10_11Var, 1, (_outer.scale10_11).vec());
+ }
+ if (scale12_13Var.isValid()) {
+ pdman.set4fv(scale12_13Var, 1, (_outer.scale12_13).vec());
+ }
+ if (scale14_15Var.isValid()) {
+ pdman.set4fv(scale14_15Var, 1, (_outer.scale14_15).vec());
+ }
+ pdman.set4fv(bias0_1Var, 1, (_outer.bias0_1).vec());
+ if (bias2_3Var.isValid()) {
+ pdman.set4fv(bias2_3Var, 1, (_outer.bias2_3).vec());
+ }
+ if (bias4_5Var.isValid()) {
+ pdman.set4fv(bias4_5Var, 1, (_outer.bias4_5).vec());
+ }
+ if (bias6_7Var.isValid()) {
+ pdman.set4fv(bias6_7Var, 1, (_outer.bias6_7).vec());
+ }
+ if (bias8_9Var.isValid()) {
+ pdman.set4fv(bias8_9Var, 1, (_outer.bias8_9).vec());
+ }
+ if (bias10_11Var.isValid()) {
+ pdman.set4fv(bias10_11Var, 1, (_outer.bias10_11).vec());
+ }
+ if (bias12_13Var.isValid()) {
+ pdman.set4fv(bias12_13Var, 1, (_outer.bias12_13).vec());
+ }
+ if (bias14_15Var.isValid()) {
+ pdman.set4fv(bias14_15Var, 1, (_outer.bias14_15).vec());
+ }
+ pdman.set4fv(thresholds1_7Var, 1,
+ reinterpret_cast<const float*>(&(_outer.thresholds1_7)));
+ pdman.set4fv(thresholds9_13Var, 1,
+ reinterpret_cast<const float*>(&(_outer.thresholds9_13)));
+ }
+ }
+ UniformHandle scale0_1Var;
+ UniformHandle scale2_3Var;
+ UniformHandle scale4_5Var;
+ UniformHandle scale6_7Var;
+ UniformHandle scale8_9Var;
+ UniformHandle scale10_11Var;
+ UniformHandle scale12_13Var;
+ UniformHandle scale14_15Var;
+ UniformHandle bias0_1Var;
+ UniformHandle bias2_3Var;
+ UniformHandle bias4_5Var;
+ UniformHandle bias6_7Var;
+ UniformHandle bias8_9Var;
+ UniformHandle bias10_11Var;
+ UniformHandle bias12_13Var;
+ UniformHandle bias14_15Var;
+ UniformHandle thresholds1_7Var;
+ UniformHandle thresholds9_13Var;
+};
+GrGLSLFragmentProcessor* GrUnrolledBinaryGradientColorizer::onCreateGLSLInstance() const {
+ return new GrGLSLUnrolledBinaryGradientColorizer();
+}
+void GrUnrolledBinaryGradientColorizer::onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32((int32_t)intervalCount);
+}
+bool GrUnrolledBinaryGradientColorizer::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrUnrolledBinaryGradientColorizer& that = other.cast<GrUnrolledBinaryGradientColorizer>();
+ (void)that;
+ if (intervalCount != that.intervalCount) return false;
+ if (scale0_1 != that.scale0_1) return false;
+ if (scale2_3 != that.scale2_3) return false;
+ if (scale4_5 != that.scale4_5) return false;
+ if (scale6_7 != that.scale6_7) return false;
+ if (scale8_9 != that.scale8_9) return false;
+ if (scale10_11 != that.scale10_11) return false;
+ if (scale12_13 != that.scale12_13) return false;
+ if (scale14_15 != that.scale14_15) return false;
+ if (bias0_1 != that.bias0_1) return false;
+ if (bias2_3 != that.bias2_3) return false;
+ if (bias4_5 != that.bias4_5) return false;
+ if (bias6_7 != that.bias6_7) return false;
+ if (bias8_9 != that.bias8_9) return false;
+ if (bias10_11 != that.bias10_11) return false;
+ if (bias12_13 != that.bias12_13) return false;
+ if (bias14_15 != that.bias14_15) return false;
+ if (thresholds1_7 != that.thresholds1_7) return false;
+ if (thresholds9_13 != that.thresholds9_13) return false;
+ return true;
+}
+GrUnrolledBinaryGradientColorizer::GrUnrolledBinaryGradientColorizer(
+ const GrUnrolledBinaryGradientColorizer& src)
+ : INHERITED(kGrUnrolledBinaryGradientColorizer_ClassID, src.optimizationFlags())
+ , intervalCount(src.intervalCount)
+ , scale0_1(src.scale0_1)
+ , scale2_3(src.scale2_3)
+ , scale4_5(src.scale4_5)
+ , scale6_7(src.scale6_7)
+ , scale8_9(src.scale8_9)
+ , scale10_11(src.scale10_11)
+ , scale12_13(src.scale12_13)
+ , scale14_15(src.scale14_15)
+ , bias0_1(src.bias0_1)
+ , bias2_3(src.bias2_3)
+ , bias4_5(src.bias4_5)
+ , bias6_7(src.bias6_7)
+ , bias8_9(src.bias8_9)
+ , bias10_11(src.bias10_11)
+ , bias12_13(src.bias12_13)
+ , bias14_15(src.bias14_15)
+ , thresholds1_7(src.thresholds1_7)
+ , thresholds9_13(src.thresholds9_13) {}
+std::unique_ptr<GrFragmentProcessor> GrUnrolledBinaryGradientColorizer::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrUnrolledBinaryGradientColorizer(*this));
+}
+
+static const int kMaxIntervals = 8;
+std::unique_ptr<GrFragmentProcessor> GrUnrolledBinaryGradientColorizer::Make(
+ const SkPMColor4f* colors, const SkScalar* positions, int count) {
+ // Depending on how the positions resolve into hard stops or regular stops, the number of
+ // intervals specified by the number of colors/positions can change. For instance, a plain
+ // 3 color gradient is two intervals, but a 4 color gradient with a hard stop is also
+ // two intervals. At the most extreme end, an 8 interval gradient made entirely of hard
+ // stops has 16 colors.
+
+ if (count > kMaxColorCount) {
+ // Definitely cannot represent this gradient configuration
+ return nullptr;
+ }
+
+ // The raster implementation also uses scales and biases, but since they must be calculated
+ // after the dst color space is applied, it limits our ability to cache their values.
+ SkPMColor4f scales[kMaxIntervals];
+ SkPMColor4f biases[kMaxIntervals];
+ SkScalar thresholds[kMaxIntervals];
+
+ int intervalCount = 0;
+
+ for (int i = 0; i < count - 1; i++) {
+ if (intervalCount >= kMaxIntervals) {
+ // Already reached kMaxIntervals, and haven't run out of color stops so this
+ // gradient cannot be represented by this shader.
+ return nullptr;
+ }
+
+ SkScalar t0 = positions[i];
+ SkScalar t1 = positions[i + 1];
+ SkScalar dt = t1 - t0;
+ // If the interval is empty, skip to the next interval. This will automatically create
+ // distinct hard stop intervals as needed. It also protects against malformed gradients
+ // that have repeated hard stops at the very beginning that are effectively unreachable.
+ if (SkScalarNearlyZero(dt)) {
+ continue;
+ }
+
+ auto c0 = Sk4f::Load(colors[i].vec());
+ auto c1 = Sk4f::Load(colors[i + 1].vec());
+
+ auto scale = (c1 - c0) / dt;
+ auto bias = c0 - t0 * scale;
+
+ scale.store(scales + intervalCount);
+ bias.store(biases + intervalCount);
+ thresholds[intervalCount] = t1;
+ intervalCount++;
+ }
+
+ // For isEqual to make sense, set the unused values to something consistent
+ for (int i = intervalCount; i < kMaxIntervals; i++) {
+ scales[i] = SK_PMColor4fTRANSPARENT;
+ biases[i] = SK_PMColor4fTRANSPARENT;
+ thresholds[i] = 0.0;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new GrUnrolledBinaryGradientColorizer(
+ intervalCount, scales[0], scales[1], scales[2], scales[3], scales[4], scales[5],
+ scales[6], scales[7], biases[0], biases[1], biases[2], biases[3], biases[4], biases[5],
+ biases[6], biases[7],
+ SkRect::MakeLTRB(thresholds[0], thresholds[1], thresholds[2], thresholds[3]),
+ SkRect::MakeLTRB(thresholds[4], thresholds[5], thresholds[6], 0.0)));
+}
diff --git a/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h b/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h
new file mode 100644
index 0000000000..04a878bc40
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**************************************************************************************************
+ *** This file was autogenerated from GrUnrolledBinaryGradientColorizer.fp; do not modify.
+ **************************************************************************************************/
+#ifndef GrUnrolledBinaryGradientColorizer_DEFINED
+#define GrUnrolledBinaryGradientColorizer_DEFINED
+#include "include/core/SkTypes.h"
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+class GrUnrolledBinaryGradientColorizer : public GrFragmentProcessor {
+public:
+ static const int kMaxColorCount = 16;
+
+ static std::unique_ptr<GrFragmentProcessor> Make(const SkPMColor4f* colors,
+ const SkScalar* positions,
+ int count);
+ GrUnrolledBinaryGradientColorizer(const GrUnrolledBinaryGradientColorizer& src);
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+ const char* name() const override { return "UnrolledBinaryGradientColorizer"; }
+ int32_t intervalCount;
+ SkPMColor4f scale0_1;
+ SkPMColor4f scale2_3;
+ SkPMColor4f scale4_5;
+ SkPMColor4f scale6_7;
+ SkPMColor4f scale8_9;
+ SkPMColor4f scale10_11;
+ SkPMColor4f scale12_13;
+ SkPMColor4f scale14_15;
+ SkPMColor4f bias0_1;
+ SkPMColor4f bias2_3;
+ SkPMColor4f bias4_5;
+ SkPMColor4f bias6_7;
+ SkPMColor4f bias8_9;
+ SkPMColor4f bias10_11;
+ SkPMColor4f bias12_13;
+ SkPMColor4f bias14_15;
+ SkRect thresholds1_7;
+ SkRect thresholds9_13;
+
+private:
+ GrUnrolledBinaryGradientColorizer(int32_t intervalCount,
+ SkPMColor4f scale0_1,
+ SkPMColor4f scale2_3,
+ SkPMColor4f scale4_5,
+ SkPMColor4f scale6_7,
+ SkPMColor4f scale8_9,
+ SkPMColor4f scale10_11,
+ SkPMColor4f scale12_13,
+ SkPMColor4f scale14_15,
+ SkPMColor4f bias0_1,
+ SkPMColor4f bias2_3,
+ SkPMColor4f bias4_5,
+ SkPMColor4f bias6_7,
+ SkPMColor4f bias8_9,
+ SkPMColor4f bias10_11,
+ SkPMColor4f bias12_13,
+ SkPMColor4f bias14_15,
+ SkRect thresholds1_7,
+ SkRect thresholds9_13)
+ : INHERITED(kGrUnrolledBinaryGradientColorizer_ClassID, kNone_OptimizationFlags)
+ , intervalCount(intervalCount)
+ , scale0_1(scale0_1)
+ , scale2_3(scale2_3)
+ , scale4_5(scale4_5)
+ , scale6_7(scale6_7)
+ , scale8_9(scale8_9)
+ , scale10_11(scale10_11)
+ , scale12_13(scale12_13)
+ , scale14_15(scale14_15)
+ , bias0_1(bias0_1)
+ , bias2_3(bias2_3)
+ , bias4_5(bias4_5)
+ , bias6_7(bias6_7)
+ , bias8_9(bias8_9)
+ , bias10_11(bias10_11)
+ , bias12_13(bias12_13)
+ , bias14_15(bias14_15)
+ , thresholds1_7(thresholds1_7)
+ , thresholds9_13(thresholds9_13) {}
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockBuffer.h b/gfx/skia/skia/src/gpu/mock/GrMockBuffer.h
new file mode 100644
index 0000000000..17b56a0f02
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockBuffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockBuffer_DEFINED
+#define GrMockBuffer_DEFINED
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/mock/GrMockGpu.h"
+
+class GrMockBuffer : public GrGpuBuffer {
+public:
+ GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
+ GrAccessPattern accessPattern)
+ : INHERITED(gpu, sizeInBytes, type, accessPattern) {
+ this->registerWithCache(SkBudgeted::kYes);
+ }
+
+private:
+ void onMap() override {
+ if (GrCaps::kNone_MapFlags != this->getGpu()->caps()->mapBufferFlags()) {
+ fMapPtr = sk_malloc_throw(this->size());
+ }
+ }
+ void onUnmap() override { sk_free(fMapPtr); }
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override { return true; }
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockCaps.cpp b/gfx/skia/skia/src/gpu/mock/GrMockCaps.cpp
new file mode 100644
index 0000000000..8d798faf3b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockCaps.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mock/GrMockCaps.h"
+
+#if GR_TEST_UTILS
+std::vector<GrCaps::TestFormatColorTypeCombination> GrMockCaps::getTestingCombinations() const {
+ // TODO: need to add compressed formats to this list
+ std::vector<GrCaps::TestFormatColorTypeCombination> combos = {
+ { GrColorType::kAlpha_8, GrBackendFormat::MakeMock(GrColorType::kAlpha_8) },
+ { GrColorType::kBGR_565, GrBackendFormat::MakeMock(GrColorType::kBGR_565) },
+ { GrColorType::kABGR_4444, GrBackendFormat::MakeMock(GrColorType::kABGR_4444) },
+ { GrColorType::kRGBA_8888, GrBackendFormat::MakeMock(GrColorType::kRGBA_8888) },
+ { GrColorType::kRGBA_8888_SRGB, GrBackendFormat::MakeMock(GrColorType::kRGBA_8888_SRGB) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeMock(GrColorType::kRGB_888x) },
+ { GrColorType::kRG_88, GrBackendFormat::MakeMock(GrColorType::kRG_88) },
+ { GrColorType::kBGRA_8888, GrBackendFormat::MakeMock(GrColorType::kBGRA_8888) },
+ { GrColorType::kRGBA_1010102, GrBackendFormat::MakeMock(GrColorType::kRGBA_1010102) },
+ { GrColorType::kGray_8, GrBackendFormat::MakeMock(GrColorType::kGray_8) },
+ { GrColorType::kAlpha_F16, GrBackendFormat::MakeMock(GrColorType::kAlpha_F16) },
+ { GrColorType::kRGBA_F16, GrBackendFormat::MakeMock(GrColorType::kRGBA_F16) },
+ { GrColorType::kRGBA_F16_Clamped,GrBackendFormat::MakeMock(GrColorType::kRGBA_F16_Clamped)},
+ { GrColorType::kAlpha_16, GrBackendFormat::MakeMock(GrColorType::kAlpha_16) },
+ { GrColorType::kRG_1616, GrBackendFormat::MakeMock(GrColorType::kRG_1616) },
+ { GrColorType::kRGBA_16161616, GrBackendFormat::MakeMock(GrColorType::kRGBA_16161616) },
+ { GrColorType::kRG_F16, GrBackendFormat::MakeMock(GrColorType::kRG_F16) },
+ };
+
+#ifdef SK_DEBUG
+ for (auto combo : combos) {
+ SkASSERT(this->onAreColorTypeAndFormatCompatible(combo.fColorType, combo.fFormat));
+ }
+#endif
+
+ return combos;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockCaps.h b/gfx/skia/skia/src/gpu/mock/GrMockCaps.h
new file mode 100644
index 0000000000..457236c238
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockCaps.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockCaps_DEFINED
+#define GrMockCaps_DEFINED
+
+#include "include/gpu/mock/GrMockTypes.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/SkGr.h"
+
+class GrMockCaps : public GrCaps {
+public:
+ GrMockCaps(const GrContextOptions& contextOptions, const GrMockOptions& options)
+ : INHERITED(contextOptions), fOptions(options) {
+ fMipMapSupport = options.fMipMapSupport;
+ fInstanceAttribSupport = options.fInstanceAttribSupport;
+ fHalfFloatVertexAttributeSupport = options.fHalfFloatVertexAttributeSupport;
+ fMapBufferFlags = options.fMapBufferFlags;
+ fBufferMapThreshold = SK_MaxS32; // Overridable in GrContextOptions.
+ fMaxTextureSize = options.fMaxTextureSize;
+ fMaxRenderTargetSize = SkTMin(options.fMaxRenderTargetSize, fMaxTextureSize);
+ fMaxPreferredRenderTargetSize = fMaxRenderTargetSize;
+ fMaxVertexAttributes = options.fMaxVertexAttributes;
+ fSampleLocationsSupport = true;
+
+ fShaderCaps.reset(new GrShaderCaps(contextOptions));
+ fShaderCaps->fGeometryShaderSupport = options.fGeometryShaderSupport;
+ fShaderCaps->fIntegerSupport = options.fIntegerSupport;
+ fShaderCaps->fFlatInterpolationSupport = options.fFlatInterpolationSupport;
+ fShaderCaps->fMaxFragmentSamplers = options.fMaxFragmentSamplers;
+ fShaderCaps->fShaderDerivativeSupport = options.fShaderDerivativeSupport;
+ fShaderCaps->fDualSourceBlendingSupport = options.fDualSourceBlendingSupport;
+ fShaderCaps->fSampleVariablesSupport = true;
+ fShaderCaps->fSampleVariablesStencilSupport = true;
+
+ this->applyOptionsOverrides(contextOptions);
+ }
+
+ bool isFormatSRGB(const GrBackendFormat& format) const override {
+ auto ct = format.asMockColorType();
+ return GrGetColorTypeDesc(ct).encoding() == GrColorTypeEncoding::kSRGBUnorm;
+ }
+
+ // Mock caps doesn't support any compressed formats right now
+ bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const override {
+ return false;
+ }
+
+ bool isFormatTexturableAndUploadable(GrColorType,
+ const GrBackendFormat& format) const override {
+ return this->isFormatTexturable(format);
+ }
+ bool isFormatTexturable(const GrBackendFormat& format) const override {
+ auto index = static_cast<int>(format.asMockColorType());
+ return fOptions.fConfigOptions[index].fTexturable;
+ }
+
+ bool isFormatCopyable(const GrBackendFormat& format) const override {
+ return false;
+ }
+
+ bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const override {
+ // Currently we don't allow RGB_888X to be renderable because we don't have a way to
+ // handle blends that reference dst alpha when the values in the dst alpha channel are
+ // uninitialized.
+ if (ct == GrColorType::kRGB_888x) {
+ return false;
+ }
+ return this->isFormatRenderable(format, sampleCount);
+ }
+
+ bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const override {
+ return sampleCount <= this->maxRenderTargetSampleCount(format.asMockColorType());
+ }
+
+ int getRenderTargetSampleCount(int requestCount, GrColorType ct) const {
+ requestCount = SkTMax(requestCount, 1);
+
+ switch (fOptions.fConfigOptions[(int)ct].fRenderability) {
+ case GrMockOptions::ConfigOptions::Renderability::kNo:
+ return 0;
+ case GrMockOptions::ConfigOptions::Renderability::kNonMSAA:
+ return requestCount > 1 ? 0 : 1;
+ case GrMockOptions::ConfigOptions::Renderability::kMSAA:
+ return requestCount > kMaxSampleCnt ? 0 : GrNextPow2(requestCount);
+ }
+ return 0;
+ }
+
+ int getRenderTargetSampleCount(int requestCount,
+ const GrBackendFormat& format) const override {
+ return this->getRenderTargetSampleCount(requestCount, format.asMockColorType());
+ }
+
+ int maxRenderTargetSampleCount(GrColorType ct) const {
+ switch (fOptions.fConfigOptions[(int)ct].fRenderability) {
+ case GrMockOptions::ConfigOptions::Renderability::kNo:
+ return 0;
+ case GrMockOptions::ConfigOptions::Renderability::kNonMSAA:
+ return 1;
+ case GrMockOptions::ConfigOptions::Renderability::kMSAA:
+ return kMaxSampleCnt;
+ }
+ return 0;
+ }
+
+ int maxRenderTargetSampleCount(const GrBackendFormat& format) const override {
+ return this->maxRenderTargetSampleCount(format.asMockColorType());
+ }
+
+ size_t bytesPerPixel(const GrBackendFormat& format) const override {
+ return GrColorTypeBytesPerPixel(format.asMockColorType());
+ }
+
+ SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const override {
+ return {surfaceColorType, 1};
+ }
+
+ SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override {
+ return SurfaceReadPixelsSupport::kSupported;
+ }
+
+ GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat& format,
+ bool isAlphaChannel) const override {
+ return format.asMockColorType();
+ }
+
+ GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override {
+ return {};
+ }
+
+ GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const override {
+ return GrSwizzle();
+ }
+ GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const override {
+ return GrSwizzle();
+ }
+
+#if GR_TEST_UTILS
+ std::vector<GrCaps::TestFormatColorTypeCombination> getTestingCombinations() const override;
+#endif
+
+private:
+ bool onSurfaceSupportsWritePixels(const GrSurface*) const override { return true; }
+ bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const override {
+ return true;
+ }
+ GrBackendFormat onGetDefaultBackendFormat(GrColorType ct, GrRenderable) const override {
+ return GrBackendFormat::MakeMock(ct);
+ }
+
+ GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType) const override {
+ return GrColorTypeToPixelConfig(format.asMockColorType());
+ }
+
+ bool onAreColorTypeAndFormatCompatible(GrColorType ct,
+ const GrBackendFormat& format) const override {
+ if (ct == GrColorType::kUnknown) {
+ return false;
+ }
+
+ return ct == format.asMockColorType();
+ }
+
+ SupportedRead onSupportedReadPixelsColorType(GrColorType srcColorType, const GrBackendFormat&,
+ GrColorType) const override {
+ return SupportedRead{srcColorType, 1};
+ }
+
+ static const int kMaxSampleCnt = 16;
+
+ GrMockOptions fOptions;
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockGpu.cpp b/gfx/skia/skia/src/gpu/mock/GrMockGpu.cpp
new file mode 100644
index 0000000000..0825d86ecb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockGpu.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mock/GrMockBuffer.h"
+#include "src/gpu/mock/GrMockCaps.h"
+#include "src/gpu/mock/GrMockGpu.h"
+#include "src/gpu/mock/GrMockOpsRenderPass.h"
+#include "src/gpu/mock/GrMockStencilAttachment.h"
+#include "src/gpu/mock/GrMockTexture.h"
+#include <atomic>
+
+int GrMockGpu::NextInternalTextureID() {
+ static std::atomic<int> nextID{1};
+ int id;
+ do {
+ id = nextID.fetch_add(1);
+ } while (0 == id); // Reserve 0 for an invalid ID.
+ return id;
+}
+
+int GrMockGpu::NextExternalTextureID() {
+ // We use negative ints for the "testing only external textures" so they can easily be
+ // identified when debugging.
+ static std::atomic<int> nextID{-1};
+ return nextID--;
+}
+
+int GrMockGpu::NextInternalRenderTargetID() {
+ // We start off with large numbers to differentiate from texture IDs, even though they're
+ // technically in a different space.
+ static std::atomic<int> nextID{SK_MaxS32};
+ return nextID--;
+}
+
+int GrMockGpu::NextExternalRenderTargetID() {
+ // We use large negative ints for the "testing only external render targets" so they can easily
+ // be identified when debugging.
+ static std::atomic<int> nextID{SK_MinS32};
+ return nextID++;
+}
+
+sk_sp<GrGpu> GrMockGpu::Make(const GrMockOptions* mockOptions,
+ const GrContextOptions& contextOptions, GrContext* context) {
+ static const GrMockOptions kDefaultOptions = GrMockOptions();
+ if (!mockOptions) {
+ mockOptions = &kDefaultOptions;
+ }
+ return sk_sp<GrGpu>(new GrMockGpu(context, *mockOptions, contextOptions));
+}
+
+GrOpsRenderPass* GrMockGpu::getOpsRenderPass(
+ GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ return new GrMockOpsRenderPass(this, rt, origin, colorInfo);
+}
+
+void GrMockGpu::submit(GrOpsRenderPass* renderPass) {
+ for (int i = 0; i < static_cast<GrMockOpsRenderPass*>(renderPass)->numDraws(); ++i) {
+ fStats.incNumDraws();
+ }
+ delete renderPass;
+}
+
+GrMockGpu::GrMockGpu(GrContext* context, const GrMockOptions& options,
+ const GrContextOptions& contextOptions)
+ : INHERITED(context)
+ , fMockOptions(options) {
+ fCaps.reset(new GrMockCaps(contextOptions, options));
+}
+
+void GrMockGpu::querySampleLocations(GrRenderTarget* rt, SkTArray<SkPoint>* sampleLocations) {
+ sampleLocations->reset();
+ int numRemainingSamples = rt->numSamples();
+ while (numRemainingSamples > 0) {
+ // Use standard D3D sample locations.
+ switch (numRemainingSamples) {
+ case 0:
+ case 1:
+ sampleLocations->push_back().set(.5, .5);
+ break;
+ case 2:
+ sampleLocations->push_back().set(.75, .75);
+ sampleLocations->push_back().set(.25, .25);
+ break;
+ case 3:
+ case 4:
+ sampleLocations->push_back().set(.375, .125);
+ sampleLocations->push_back().set(.875, .375);
+ sampleLocations->push_back().set(.125, .625);
+ sampleLocations->push_back().set(.625, .875);
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ sampleLocations->push_back().set(.5625, .3125);
+ sampleLocations->push_back().set(.4375, .6875);
+ sampleLocations->push_back().set(.8125, .5625);
+ sampleLocations->push_back().set(.3125, .1875);
+ sampleLocations->push_back().set(.1875, .8125);
+ sampleLocations->push_back().set(.0625, .4375);
+ sampleLocations->push_back().set(.6875, .4375);
+ sampleLocations->push_back().set(.4375, .0625);
+ break;
+ default:
+ sampleLocations->push_back().set(.5625, .5625);
+ sampleLocations->push_back().set(.4375, .3125);
+ sampleLocations->push_back().set(.3125, .6250);
+ sampleLocations->push_back().set(.2500, .4375);
+ sampleLocations->push_back().set(.1875, .3750);
+ sampleLocations->push_back().set(.6250, .8125);
+ sampleLocations->push_back().set(.8125, .6875);
+ sampleLocations->push_back().set(.6875, .1875);
+ sampleLocations->push_back().set(.3750, .8750);
+ sampleLocations->push_back().set(.5000, .0625);
+ sampleLocations->push_back().set(.2500, .1250);
+ sampleLocations->push_back().set(.1250, .2500);
+ sampleLocations->push_back().set(.0000, .5000);
+ sampleLocations->push_back().set(.4375, .2500);
+ sampleLocations->push_back().set(.8750, .4375);
+ sampleLocations->push_back().set(.0625, .0000);
+ break;
+ }
+ numRemainingSamples = rt->numSamples() - sampleLocations->count();
+ }
+}
+
+sk_sp<GrTexture> GrMockGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ if (fMockOptions.fFailTextureAllocations) {
+ return nullptr;
+ }
+
+ GrColorType ct = format.asMockColorType();
+ SkASSERT(ct != GrColorType::kUnknown);
+
+ GrMipMapsStatus mipMapsStatus =
+ mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
+ GrMockTextureInfo texInfo(ct, NextInternalTextureID());
+ if (renderable == GrRenderable::kYes) {
+ GrMockRenderTargetInfo rtInfo(ct, NextInternalRenderTargetID());
+ return sk_sp<GrTexture>(new GrMockTextureRenderTarget(this, budgeted, desc,
+ renderTargetSampleCnt, isProtected,
+ mipMapsStatus, texInfo, rtInfo));
+ }
+ return sk_sp<GrTexture>(
+ new GrMockTexture(this, budgeted, desc, isProtected, mipMapsStatus, texInfo));
+}
+
+sk_sp<GrTexture> GrMockGpu::onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType compressionType,
+ SkBudgeted budgeted, const void* data) {
+ return nullptr;
+}
+
+sk_sp<GrTexture> GrMockGpu::onWrapBackendTexture(const GrBackendTexture& tex, GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable wrapType, GrIOType ioType) {
+ GrMockTextureInfo texInfo;
+ SkAssertResult(tex.getMockTextureInfo(&texInfo));
+
+ SkASSERT(colorType == texInfo.fColorType);
+ GrSurfaceDesc desc;
+ desc.fWidth = tex.width();
+ desc.fHeight = tex.height();
+ desc.fConfig = texInfo.pixelConfig();
+
+ GrMipMapsStatus mipMapsStatus = tex.hasMipMaps() ? GrMipMapsStatus::kValid
+ : GrMipMapsStatus::kNotAllocated;
+ auto isProtected = GrProtected(tex.isProtected());
+ return sk_sp<GrTexture>(
+ new GrMockTexture(this, desc, isProtected, mipMapsStatus, texInfo, wrapType, ioType));
+}
+
+sk_sp<GrTexture> GrMockGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable) {
+ GrMockTextureInfo texInfo;
+ SkAssertResult(tex.getMockTextureInfo(&texInfo));
+
+ SkASSERT(colorType == texInfo.fColorType);
+ GrSurfaceDesc desc;
+ desc.fWidth = tex.width();
+ desc.fHeight = tex.height();
+ desc.fConfig = texInfo.pixelConfig();
+
+ GrMipMapsStatus mipMapsStatus =
+ tex.hasMipMaps() ? GrMipMapsStatus::kValid : GrMipMapsStatus::kNotAllocated;
+
+ // The client gave us the texture ID but we supply the render target ID.
+ GrMockRenderTargetInfo rtInfo(texInfo.fColorType, NextInternalRenderTargetID());
+
+ auto isProtected = GrProtected(tex.isProtected());
+ return sk_sp<GrTexture>(new GrMockTextureRenderTarget(
+ this, desc, sampleCnt, isProtected, mipMapsStatus, texInfo, rtInfo, cacheable));
+}
+
+sk_sp<GrRenderTarget> GrMockGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt,
+ GrColorType colorType) {
+ GrMockRenderTargetInfo info;
+ SkAssertResult(rt.getMockRenderTargetInfo(&info));
+
+ SkASSERT(colorType == info.colorType());
+ GrSurfaceDesc desc;
+ desc.fWidth = rt.width();
+ desc.fHeight = rt.height();
+ desc.fConfig = info.pixelConfig();
+
+ auto isProtected = GrProtected(rt.isProtected());
+ return sk_sp<GrRenderTarget>(new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc,
+ rt.sampleCnt(), isProtected, info));
+}
+
+sk_sp<GrRenderTarget> GrMockGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType colorType) {
+ GrMockTextureInfo texInfo;
+ SkAssertResult(tex.getMockTextureInfo(&texInfo));
+
+ SkASSERT(colorType == texInfo.fColorType);
+ GrSurfaceDesc desc;
+ desc.fWidth = tex.width();
+ desc.fHeight = tex.height();
+ desc.fConfig = texInfo.pixelConfig();
+
+ // The client gave us the texture ID but we supply the render target ID.
+ GrMockRenderTargetInfo rtInfo(texInfo.fColorType, NextInternalRenderTargetID());
+
+ auto isProtected = GrProtected(tex.isProtected());
+ return sk_sp<GrRenderTarget>(new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc,
+ sampleCnt, isProtected, rtInfo));
+}
+
+sk_sp<GrGpuBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
+ GrAccessPattern accessPattern, const void*) {
+ return sk_sp<GrGpuBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern));
+}
+
+GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(
+ const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
+ SkASSERT(numStencilSamples == rt->numSamples());
+ static constexpr int kBits = 8;
+ fStats.incStencilAttachmentCreates();
+ return new GrMockStencilAttachment(this, width, height, kBits, rt->numSamples());
+}
+
+GrBackendTexture GrMockGpu::onCreateBackendTexture(int w, int h,
+ const GrBackendFormat& format,
+ GrMipMapped mipMapped,
+ GrRenderable /* renderable */,
+ const SkPixmap /*srcData*/[],
+ int /*numMipLevels*/,
+ const SkColor4f* /* color */,
+ GrProtected /* isProtected */) {
+ auto colorType = format.asMockColorType();
+ if (!this->caps()->isFormatTexturable(format)) {
+ return GrBackendTexture(); // invalid
+ }
+
+ GrMockTextureInfo info(colorType, NextExternalTextureID());
+
+ fOutstandingTestingOnlyTextureIDs.add(info.fID);
+ return GrBackendTexture(w, h, mipMapped, info);
+}
+
+void GrMockGpu::deleteBackendTexture(const GrBackendTexture& tex) {
+ SkASSERT(GrBackendApi::kMock == tex.backend());
+
+ GrMockTextureInfo info;
+ if (tex.getMockTextureInfo(&info)) {
+ fOutstandingTestingOnlyTextureIDs.remove(info.fID);
+ }
+}
+
+#if GR_TEST_UTILS
+bool GrMockGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(GrBackendApi::kMock == tex.backend());
+
+ GrMockTextureInfo info;
+ if (!tex.getMockTextureInfo(&info)) {
+ return false;
+ }
+
+ return fOutstandingTestingOnlyTextureIDs.contains(info.fID);
+}
+
+GrBackendRenderTarget GrMockGpu::createTestingOnlyBackendRenderTarget(int w, int h,
+ GrColorType colorType) {
+ GrMockRenderTargetInfo info(colorType, NextExternalRenderTargetID());
+ static constexpr int kSampleCnt = 1;
+ static constexpr int kStencilBits = 8;
+ return GrBackendRenderTarget(w, h, kSampleCnt, kStencilBits, info);
+}
+
+void GrMockGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) {}
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockGpu.h b/gfx/skia/skia/src/gpu/mock/GrMockGpu.h
new file mode 100644
index 0000000000..bbafae0270
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockGpu.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockGpu_DEFINED
+#define GrMockGpu_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "include/private/SkTHash.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrSemaphore.h"
+
+class GrMockOpsRenderPass;
+struct GrMockOptions;
+class GrPipeline;
+
+class GrMockGpu : public GrGpu {
+public:
+ static sk_sp<GrGpu> Make(const GrMockOptions*, const GrContextOptions&, GrContext*);
+
+ ~GrMockGpu() override {}
+
+ GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget*, GrSurfaceOrigin, const SkIRect&,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() override { return 0; }
+ bool waitFence(GrFence, uint64_t) override { return true; }
+ void deleteFence(GrFence) const override {}
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override {
+ return nullptr;
+ }
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) override { return nullptr; }
+ void insertSemaphore(sk_sp<GrSemaphore> semaphore) override {}
+ void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
+
+ void submit(GrOpsRenderPass* renderPass) override;
+
+ void checkFinishProcs() override {}
+
+private:
+ GrMockGpu(GrContext* context, const GrMockOptions&, const GrContextOptions&);
+
+ void onResetContext(uint32_t resetBits) override {}
+
+ void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>* sampleLocations) override;
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&,
+ const GrBackendFormat&,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) override;
+
+ sk_sp<GrTexture> onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) override;
+
+ sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType, GrWrapOwnership,
+ GrWrapCacheable, GrIOType) override;
+
+ sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType,
+ GrWrapOwnership,
+ GrWrapCacheable) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt, GrColorType) override;
+
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern,
+ const void*) override;
+
+ bool onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) override {
+ return true;
+ }
+
+ bool onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) override {
+ return true;
+ }
+
+ bool onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override {
+ return true;
+ }
+ bool onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) override {
+ return true;
+ }
+ bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override {
+ return true;
+ }
+
+ bool onRegenerateMipMapLevels(GrTexture*) override { return true; }
+
+ void onResolveRenderTarget(GrRenderTarget* target, const SkIRect&, GrSurfaceOrigin,
+ ForExternalIO) override {}
+
+ void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override {
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ }
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(
+ const GrRenderTarget*, int width, int height, int numStencilSamples) override;
+ GrBackendTexture onCreateBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected) override;
+ void deleteBackendTexture(const GrBackendTexture&) override;
+
+#if GR_TEST_UTILS
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+
+ GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
+ void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
+
+ void testingOnly_flushGpuAndSync() override {}
+#endif
+
+ const GrMockOptions fMockOptions;
+
+ static int NextInternalTextureID();
+ static int NextExternalTextureID();
+ static int NextInternalRenderTargetID();
+ static int NextExternalRenderTargetID();
+
+ SkTHashSet<int> fOutstandingTestingOnlyTextureIDs;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockOpsRenderPass.h b/gfx/skia/skia/src/gpu/mock/GrMockOpsRenderPass.h
new file mode 100644
index 0000000000..659dfbaa41
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockOpsRenderPass.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockOpsRenderPass_DEFINED
+#define GrMockOpsRenderPass_DEFINED
+
+#include "src/gpu/GrOpsRenderPass.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/mock/GrMockGpu.h"
+
+class GrMockOpsRenderPass : public GrOpsRenderPass {
+public:
+ GrMockOpsRenderPass(GrMockGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ LoadAndStoreInfo colorInfo)
+ : INHERITED(rt, origin)
+ , fGpu(gpu)
+ , fColorLoadOp(colorInfo.fLoadOp) {
+ }
+
+ GrGpu* gpu() override { return fGpu; }
+ void inlineUpload(GrOpFlushState*, GrDeferredTextureUploadFn&) override {}
+ void insertEventMarker(const char*) override {}
+ void begin() override {
+ if (GrLoadOp::kClear == fColorLoadOp) {
+ this->markRenderTargetDirty();
+ }
+ }
+ void end() override {}
+
+ int numDraws() const { return fNumDraws; }
+
+private:
+ void onDraw(const GrProgramInfo&, const GrMesh[], int meshCount,
+ const SkRect& bounds) override {
+ this->markRenderTargetDirty();
+ ++fNumDraws;
+ }
+ void onClear(const GrFixedClip&, const SkPMColor4f&) override {
+ this->markRenderTargetDirty();
+ }
+ void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) override {}
+
+ void markRenderTargetDirty() {
+ if (auto* tex = fRenderTarget->asTexture()) {
+ tex->texturePriv().markMipMapsDirty();
+ }
+ }
+
+ GrMockGpu* fGpu;
+ GrLoadOp fColorLoadOp;
+ int fNumDraws = 0;
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockStencilAttachment.h b/gfx/skia/skia/src/gpu/mock/GrMockStencilAttachment.h
new file mode 100644
index 0000000000..a05894447a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockStencilAttachment.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockStencilAttachment_DEFINED
+#define GrMockStencilAttachment_DEFINED
+
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/mock/GrMockGpu.h"
+
+class GrMockStencilAttachment : public GrStencilAttachment {
+public:
+ GrMockStencilAttachment(GrMockGpu* gpu, int width, int height, int bits, int sampleCnt)
+ : INHERITED(gpu, width, height, bits, sampleCnt) {
+ this->registerWithCache(SkBudgeted::kYes);
+ }
+
+private:
+ size_t onGpuMemorySize() const override {
+ return SkTMax(1, (int)(this->bits() / sizeof(char))) * this->width() * this->height();
+ }
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockTexture.h b/gfx/skia/skia/src/gpu/mock/GrMockTexture.h
new file mode 100644
index 0000000000..21d0634fe3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockTexture.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrMockTexture_DEFINED
+#define GrMockTexture_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "include/gpu/mock/GrMockTypes.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/mock/GrMockGpu.h"
+
+class GrMockTexture : public GrTexture {
+public:
+ GrMockTexture(GrMockGpu* gpu, SkBudgeted budgeted, const GrSurfaceDesc& desc,
+ GrProtected isProtected, GrMipMapsStatus mipMapsStatus,
+ const GrMockTextureInfo& info)
+ : GrMockTexture(gpu, desc, isProtected, mipMapsStatus, info) {
+ this->registerWithCache(budgeted);
+ }
+
+ GrMockTexture(GrMockGpu* gpu, const GrSurfaceDesc& desc, GrProtected isProtected,
+ GrMipMapsStatus mipMapsStatus, const GrMockTextureInfo& info,
+ GrWrapCacheable cacheable, GrIOType ioType)
+ : GrMockTexture(gpu, desc, isProtected, mipMapsStatus, info) {
+ if (ioType == kRead_GrIOType) {
+ this->setReadOnly();
+ }
+ this->registerWithCacheWrapped(cacheable);
+ }
+
+ ~GrMockTexture() override {}
+
+ GrBackendTexture getBackendTexture() const override {
+ return GrBackendTexture(this->width(), this->height(), this->texturePriv().mipMapped(),
+ fInfo);
+ }
+
+ GrBackendFormat backendFormat() const override {
+ return fInfo.getBackendFormat();
+ }
+
+ void textureParamsModified() override {}
+
+protected:
+ // constructor for subclasses
+ GrMockTexture(GrMockGpu* gpu, const GrSurfaceDesc& desc, GrProtected isProtected,
+ GrMipMapsStatus mipMapsStatus, const GrMockTextureInfo& info)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected,
+ GrTextureType::k2D, mipMapsStatus)
+ , fInfo(info) {}
+
+ void onRelease() override {
+ INHERITED::onRelease();
+ }
+
+ void onAbandon() override {
+ INHERITED::onAbandon();
+ }
+
+ bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) override {
+ return false;
+ }
+
+private:
+ GrMockTextureInfo fInfo;
+
+ typedef GrTexture INHERITED;
+};
+
+class GrMockRenderTarget : public GrRenderTarget {
+public:
+ GrMockRenderTarget(GrMockGpu* gpu, SkBudgeted budgeted, const GrSurfaceDesc& desc,
+ int sampleCnt, GrProtected isProtected, const GrMockRenderTargetInfo& info)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, isProtected)
+ , fInfo(info) {
+ this->registerWithCache(budgeted);
+ }
+
+ enum Wrapped { kWrapped };
+ GrMockRenderTarget(GrMockGpu* gpu, Wrapped, const GrSurfaceDesc& desc, int sampleCnt,
+ GrProtected isProtected, const GrMockRenderTargetInfo& info)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, isProtected)
+ , fInfo(info) {
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+ }
+
+ bool canAttemptStencilAttachment() const override { return true; }
+ bool completeStencilAttachment() override { return true; }
+
+ size_t onGpuMemorySize() const override {
+ int numColorSamples = this->numSamples();
+ if (numColorSamples > 1) {
+ // Add one to account for the resolve buffer.
+ ++numColorSamples;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, GrMipMapped::kNo);
+ }
+
+ GrBackendRenderTarget getBackendRenderTarget() const override {
+ int numStencilBits = 0;
+ if (GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment()) {
+ numStencilBits = stencil->bits();
+ }
+ return {this->width(), this->height(), this->numSamples(), numStencilBits, fInfo};
+ }
+
+ GrBackendFormat backendFormat() const override {
+ return fInfo.getBackendFormat();
+ }
+
+protected:
+ // constructor for subclasses
+ GrMockRenderTarget(GrMockGpu* gpu, const GrSurfaceDesc& desc, int sampleCnt,
+ GrProtected isProtected, const GrMockRenderTargetInfo& info)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, isProtected)
+ , fInfo(info) {}
+
+private:
+ GrMockRenderTargetInfo fInfo;
+
+ typedef GrRenderTarget INHERITED;
+};
+
+class GrMockTextureRenderTarget : public GrMockTexture, public GrMockRenderTarget {
+public:
+ // Internally created.
+ GrMockTextureRenderTarget(GrMockGpu* gpu, SkBudgeted budgeted, const GrSurfaceDesc& desc,
+ int sampleCnt, GrProtected isProtected, GrMipMapsStatus mipMapsStatus,
+ const GrMockTextureInfo& texInfo,
+ const GrMockRenderTargetInfo& rtInfo)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , GrMockTexture(gpu, desc, isProtected, mipMapsStatus, texInfo)
+ , GrMockRenderTarget(gpu, desc, sampleCnt, isProtected, rtInfo) {
+ this->registerWithCache(budgeted);
+ }
+
+ // Renderable wrapped backend texture.
+ GrMockTextureRenderTarget(GrMockGpu* gpu, const GrSurfaceDesc& desc, int sampleCnt,
+ GrProtected isProtected, GrMipMapsStatus mipMapsStatus,
+ const GrMockTextureInfo& texInfo,
+ const GrMockRenderTargetInfo& rtInfo, GrWrapCacheable cacheble)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, isProtected)
+ , GrMockTexture(gpu, desc, isProtected, mipMapsStatus, texInfo)
+ , GrMockRenderTarget(gpu, desc, sampleCnt, isProtected, rtInfo) {
+ this->registerWithCacheWrapped(cacheble);
+ }
+
+ GrTexture* asTexture() override { return this; }
+ GrRenderTarget* asRenderTarget() override { return this; }
+ const GrTexture* asTexture() const override { return this; }
+ const GrRenderTarget* asRenderTarget() const override { return this; }
+
+ GrBackendFormat backendFormat() const override {
+ return GrMockTexture::backendFormat();
+ }
+
+protected:
+ // This avoids an inherits via dominance warning on MSVC.
+ void willRemoveLastRef() override { GrTexture::willRemoveLastRef(); }
+
+private:
+ void onAbandon() override {
+ GrRenderTarget::onAbandon();
+ GrMockTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrRenderTarget::onRelease();
+ GrMockTexture::onRelease();
+ }
+
+ size_t onGpuMemorySize() const override {
+ int numColorSamples = this->numSamples();
+ if (numColorSamples > 1) {
+ // Add one to account for the resolve buffer.
+ ++numColorSamples;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, this->texturePriv().mipMapped());
+ }
+
+ // This avoids an inherits via dominance warning on MSVC.
+ void computeScratchKey(GrScratchKey* key) const override { GrTexture::computeScratchKey(key); }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mock/GrMockTypes.cpp b/gfx/skia/skia/src/gpu/mock/GrMockTypes.cpp
new file mode 100644
index 0000000000..836fe8a1d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mock/GrMockTypes.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/mock/GrMockTypes.h"
+
+#include "include/gpu/GrBackendSurface.h"
+
+GrBackendFormat GrMockRenderTargetInfo::getBackendFormat() const {
+ return GrBackendFormat::MakeMock(fColorType);
+}
+
+GrBackendFormat GrMockTextureInfo::getBackendFormat() const {
+ return GrBackendFormat::MakeMock(fColorType);
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.h b/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.h
new file mode 100644
index 0000000000..7f11d788d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlBuffer_DEFINED
+#define GrMtlBuffer_DEFINED
+
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/mtl/GrMtlUniformHandler.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlCaps;
+class GrMtlGpu;
+
+class GrMtlBuffer: public GrGpuBuffer {
+public:
+ static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern, const void* data = nullptr);
+
+ ~GrMtlBuffer() override;
+
+ id<MTLBuffer> mtlBuffer() const { return fMtlBuffer; }
+ size_t offset() const { return fOffset; }
+ void bind(); // for initial binding of XferGpuToCpu buffers
+
+protected:
+ GrMtlBuffer(GrMtlGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern);
+
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrMtlGpu* mtlGpu() const;
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ void internalMap(size_t sizeInBytes);
+ void internalUnmap(size_t sizeInBytes);
+
+#ifdef SK_DEBUG
+ void validate() const;
+#endif
+
+ bool fIsDynamic;
+ id<MTLBuffer> fMtlBuffer;
+ size_t fOffset; // offset into shared buffer for dynamic buffers
+ id<MTLBuffer> fMappedBuffer; // buffer used by static buffers for uploads
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.mm
new file mode 100644
index 0000000000..fe585fcf50
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlBuffer.mm
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/mtl/GrMtlBuffer.h"
+#include "src/gpu/mtl/GrMtlCommandBuffer.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ sk_sp<GrMtlBuffer> buffer(new GrMtlBuffer(gpu, size, intendedType, accessPattern));
+ if (data && !buffer->onUpdateData(data, size)) {
+ return nullptr;
+ }
+ return buffer;
+}
+
+GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
+ GrAccessPattern accessPattern)
+ : INHERITED(gpu, size, intendedType, accessPattern)
+ , fIsDynamic(accessPattern != kStatic_GrAccessPattern)
+ , fOffset(0) {
+ // In most cases, we'll allocate dynamic buffers when we map them, below.
+ if (!fIsDynamic) {
+ NSUInteger options = 0;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ options |= MTLResourceStorageModePrivate;
+ }
+ fMtlBuffer = size == 0 ? nil :
+ [gpu->device() newBufferWithLength: size
+ options: options];
+ }
+ this->registerWithCache(SkBudgeted::kYes);
+ VALIDATE();
+}
+
+GrMtlBuffer::~GrMtlBuffer() {
+ SkASSERT(fMtlBuffer == nil);
+ SkASSERT(fMappedBuffer == nil);
+ SkASSERT(fMapPtr == nullptr);
+}
+
+void GrMtlBuffer::bind() {
+ SkASSERT(fIsDynamic && GrGpuBufferType::kXferGpuToCpu == this->intendedType());
+ fMtlBuffer = this->mtlGpu()->resourceProvider().getDynamicBuffer(this->size(), &fOffset);
+}
+
+bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) {
+ if (!fIsDynamic) {
+ if (fMtlBuffer == nil) {
+ return false;
+ }
+ if (srcInBytes > fMtlBuffer.length) {
+ return false;
+ }
+ }
+ VALIDATE();
+
+ this->internalMap(srcInBytes);
+ if (fMapPtr == nil) {
+ return false;
+ }
+ SkASSERT(fMappedBuffer);
+ if (!fIsDynamic) {
+ SkASSERT(srcInBytes == fMappedBuffer.length);
+ }
+ memcpy(fMapPtr, src, srcInBytes);
+ this->internalUnmap(srcInBytes);
+
+ VALIDATE();
+ return true;
+}
+
+inline GrMtlGpu* GrMtlBuffer::mtlGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrMtlGpu*>(this->getGpu());
+}
+
+void GrMtlBuffer::onAbandon() {
+ fMtlBuffer = nil;
+ fMappedBuffer = nil;
+ fMapPtr = nullptr;
+ VALIDATE();
+ INHERITED::onAbandon();
+}
+
+void GrMtlBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ VALIDATE();
+ fMtlBuffer = nil;
+ fMappedBuffer = nil;
+ fMapPtr = nullptr;
+ VALIDATE();
+ }
+ INHERITED::onRelease();
+}
+
+void GrMtlBuffer::internalMap(size_t sizeInBytes) {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ VALIDATE();
+ SkASSERT(!this->isMapped());
+ if (fIsDynamic) {
+ if (GrGpuBufferType::kXferGpuToCpu != this->intendedType()) {
+ fMtlBuffer = this->mtlGpu()->resourceProvider().getDynamicBuffer(sizeInBytes, &fOffset);
+ }
+ fMappedBuffer = fMtlBuffer;
+ fMapPtr = static_cast<char*>(fMtlBuffer.contents) + fOffset;
+ } else {
+ SkASSERT(fMtlBuffer);
+ SkASSERT(fMappedBuffer == nil);
+ NSUInteger options = 0;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ options |= MTLResourceStorageModeShared;
+ }
+ fMappedBuffer =
+ [this->mtlGpu()->device() newBufferWithLength: sizeInBytes
+ options: options];
+ fMapPtr = fMappedBuffer.contents;
+ }
+ VALIDATE();
+}
+
+void GrMtlBuffer::internalUnmap(size_t sizeInBytes) {
+ SkASSERT(fMtlBuffer);
+ if (this->wasDestroyed()) {
+ return;
+ }
+ VALIDATE();
+ SkASSERT(this->isMapped());
+ if (fMtlBuffer == nil) {
+ fMappedBuffer = nil;
+ fMapPtr = nullptr;
+ return;
+ }
+ if (fIsDynamic) {
+#ifdef SK_BUILD_FOR_MAC
+ // TODO: need to make sure offset and size have valid alignments.
+ [fMtlBuffer didModifyRange: NSMakeRange(fOffset, sizeInBytes)];
+#endif
+ } else {
+ GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
+ id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
+ [blitCmdEncoder copyFromBuffer: fMappedBuffer
+ sourceOffset: 0
+ toBuffer: fMtlBuffer
+ destinationOffset: 0
+ size: sizeInBytes];
+ }
+ fMappedBuffer = nil;
+ fMapPtr = nullptr;
+}
+
+void GrMtlBuffer::onMap() {
+ this->internalMap(this->size());
+}
+
+void GrMtlBuffer::onUnmap() {
+ this->internalUnmap(this->size());
+}
+
+#ifdef SK_DEBUG
+void GrMtlBuffer::validate() const {
+ SkASSERT(fMtlBuffer == nil ||
+ this->intendedType() == GrGpuBufferType::kVertex ||
+ this->intendedType() == GrGpuBufferType::kIndex ||
+ this->intendedType() == GrGpuBufferType::kXferCpuToGpu ||
+ this->intendedType() == GrGpuBufferType::kXferGpuToCpu);
+ SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil ||
+ fMappedBuffer.length <= fMtlBuffer.length);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.h b/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.h
new file mode 100644
index 0000000000..6ffd3c3225
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlCaps_DEFINED
+#define GrMtlCaps_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/mtl/GrMtlStencilAttachment.h"
+
+#import <Metal/Metal.h>
+
+class GrShaderCaps;
+
+/**
+ * Stores some capabilities of a Mtl backend.
+ */
+class GrMtlCaps : public GrCaps {
+public:
+ typedef GrMtlStencilAttachment::Format StencilFormat;
+
+ GrMtlCaps(const GrContextOptions& contextOptions, id<MTLDevice> device,
+ MTLFeatureSet featureSet);
+
+ bool isFormatSRGB(const GrBackendFormat&) const override;
+ bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const override;
+
+ bool isFormatTexturableAndUploadable(GrColorType, const GrBackendFormat&) const override;
+ bool isFormatTexturable(const GrBackendFormat&) const override;
+ bool isFormatTexturable(MTLPixelFormat) const;
+
+ bool isFormatCopyable(const GrBackendFormat&) const override { return true; }
+
+ bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const override;
+ bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const override;
+ bool isFormatRenderable(MTLPixelFormat, int sampleCount) const;
+
+ int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat&) const override;
+ int getRenderTargetSampleCount(int requestedCount, MTLPixelFormat) const;
+
+ int maxRenderTargetSampleCount(const GrBackendFormat&) const override;
+ int maxRenderTargetSampleCount(MTLPixelFormat) const;
+
+ size_t bytesPerPixel(const GrBackendFormat&) const override;
+ size_t bytesPerPixel(MTLPixelFormat) const;
+
+ SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const override;
+
+ SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override {
+ return SurfaceReadPixelsSupport::kSupported;
+ }
+
+ /**
+ * Returns both a supported and most prefered stencil format to use in draws.
+ */
+ const StencilFormat& preferredStencilFormat() const {
+ return fPreferredStencilFormat;
+ }
+
+ bool canCopyAsBlit(MTLPixelFormat dstFormat, int dstSampleCount, MTLPixelFormat srcFormat,
+ int srcSampleCount, const SkIRect& srcRect, const SkIPoint& dstPoint,
+ bool areDstSrcSameObj) const;
+
+ bool canCopyAsResolve(GrSurface* dst, int dstSampleCount, GrSurface* src, int srcSampleCount,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const;
+
+ GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat&,
+ bool isAlphaChannel) const override;
+
+ GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override;
+
+ MTLPixelFormat getFormatFromColorType(GrColorType colorType) const {
+ int idx = static_cast<int>(colorType);
+ return fColorTypeToFormatTable[idx];
+ }
+
+ GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const override;
+ GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const override;
+
+#if GR_TEST_UTILS
+ std::vector<TestFormatColorTypeCombination> getTestingCombinations() const override;
+#endif
+
+private:
+ void initFeatureSet(MTLFeatureSet featureSet);
+
+ void initStencilFormat(const id<MTLDevice> device);
+
+ void initGrCaps(const id<MTLDevice> device);
+ void initShaderCaps();
+
+ void initFormatTable();
+
+ bool onSurfaceSupportsWritePixels(const GrSurface*) const override;
+ bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const override;
+ GrBackendFormat onGetDefaultBackendFormat(GrColorType, GrRenderable) const override;
+ GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat&, GrColorType) const override;
+ bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const override;
+
+ SupportedRead onSupportedReadPixelsColorType(GrColorType, const GrBackendFormat&,
+ GrColorType) const override;
+
+ // ColorTypeInfo for a specific format
+ struct ColorTypeInfo {
+ GrColorType fColorType = GrColorType::kUnknown;
+ enum {
+ kUploadData_Flag = 0x1,
+ // Does Ganesh itself support rendering to this colorType & format pair. Renderability
+ // still additionally depends on if the format itself is renderable.
+ kRenderable_Flag = 0x2,
+ };
+ uint32_t fFlags = 0;
+
+ GrSwizzle fTextureSwizzle;
+ GrSwizzle fOutputSwizzle;
+ };
+
+ struct FormatInfo {
+ uint32_t colorTypeFlags(GrColorType colorType) const {
+ for (int i = 0; i < fColorTypeInfoCount; ++i) {
+ if (fColorTypeInfos[i].fColorType == colorType) {
+ return fColorTypeInfos[i].fFlags;
+ }
+ }
+ return 0;
+ }
+
+ enum {
+ kTexturable_Flag = 0x1,
+ kRenderable_Flag = 0x2, // Color attachment and blendable
+ kMSAA_Flag = 0x4,
+ kResolve_Flag = 0x8,
+ };
+ static const uint16_t kAllFlags = kTexturable_Flag | kRenderable_Flag |
+ kMSAA_Flag | kResolve_Flag;
+
+ uint16_t fFlags = 0;
+
+ // This value is only valid for regular formats. Compressed formats will be 0.
+ size_t fBytesPerPixel = 0;
+
+ std::unique_ptr<ColorTypeInfo[]> fColorTypeInfos;
+ int fColorTypeInfoCount = 0;
+ };
+#ifdef SK_BUILD_FOR_IOS
+ static constexpr size_t kNumMtlFormats = 17;
+#else
+ static constexpr size_t kNumMtlFormats = 14;
+#endif
+ static size_t GetFormatIndex(MTLPixelFormat);
+ FormatInfo fFormatTable[kNumMtlFormats];
+
+ const FormatInfo& getFormatInfo(const MTLPixelFormat pixelFormat) const {
+ size_t index = GetFormatIndex(pixelFormat);
+ return fFormatTable[index];
+ }
+
+ MTLPixelFormat fColorTypeToFormatTable[kGrColorTypeCnt];
+ void setColorType(GrColorType, std::initializer_list<MTLPixelFormat> formats);
+
+ enum class Platform {
+ kMac,
+ kIOS
+ };
+ bool isMac() { return Platform::kMac == fPlatform; }
+ bool isIOS() { return Platform::kIOS == fPlatform; }
+
+ Platform fPlatform;
+ int fFamilyGroup;
+ int fVersion;
+
+ SkTDArray<int> fSampleCounts;
+
+ StencilFormat fPreferredStencilFormat;
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.mm
new file mode 100644
index 0000000000..7511f26366
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlCaps.mm
@@ -0,0 +1,1107 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlCaps.h"
+
+#include "include/core/SkRect.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlCaps::GrMtlCaps(const GrContextOptions& contextOptions, const id<MTLDevice> device,
+ MTLFeatureSet featureSet)
+ : INHERITED(contextOptions) {
+ fShaderCaps.reset(new GrShaderCaps(contextOptions));
+
+ this->initFeatureSet(featureSet);
+ this->initGrCaps(device);
+ this->initShaderCaps();
+ this->initFormatTable();
+ this->initStencilFormat(device);
+
+ this->applyOptionsOverrides(contextOptions);
+ fShaderCaps->applyOptionsOverrides(contextOptions);
+
+ // The following are disabled due to the unfinished Metal backend, not because Metal itself
+ // doesn't support it.
+ fCrossContextTextureSupport = false; // GrMtlGpu::prepareTextureForCrossContextUsage() not impl
+}
+
+void GrMtlCaps::initFeatureSet(MTLFeatureSet featureSet) {
+ // Mac OSX
+#ifdef SK_BUILD_FOR_MAC
+ if (@available(macOS 10.12, *)) {
+ if (MTLFeatureSet_OSX_GPUFamily1_v2 == featureSet) {
+ fPlatform = Platform::kMac;
+ fFamilyGroup = 1;
+ fVersion = 2;
+ return;
+ }
+ }
+ if (MTLFeatureSet_OSX_GPUFamily1_v1 == featureSet) {
+ fPlatform = Platform::kMac;
+ fFamilyGroup = 1;
+ fVersion = 1;
+ return;
+ }
+#endif
+
+ // iOS Family group 3
+#ifdef SK_BUILD_FOR_IOS
+ if (@available(iOS 10.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily3_v2 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 3;
+ fVersion = 2;
+ return;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily3_v1 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 3;
+ fVersion = 1;
+ return;
+ }
+ }
+
+ // iOS Family group 2
+ if (@available(iOS 10.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily2_v3 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 2;
+ fVersion = 3;
+ return;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily2_v2 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 2;
+ fVersion = 2;
+ return;
+ }
+ }
+ if (MTLFeatureSet_iOS_GPUFamily2_v1 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 2;
+ fVersion = 1;
+ return;
+ }
+
+ // iOS Family group 1
+ if (@available(iOS 10.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily1_v3 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 1;
+ fVersion = 3;
+ return;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if (MTLFeatureSet_iOS_GPUFamily1_v2 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 1;
+ fVersion = 2;
+ return;
+ }
+ }
+ if (MTLFeatureSet_iOS_GPUFamily1_v1 == featureSet) {
+ fPlatform = Platform::kIOS;
+ fFamilyGroup = 1;
+ fVersion = 1;
+ return;
+ }
+#endif
+ // No supported feature sets were found
+ SK_ABORT("Requested an unsupported feature set");
+}
+
+bool GrMtlCaps::canCopyAsBlit(MTLPixelFormat dstFormat, int dstSampleCount,
+ MTLPixelFormat srcFormat, int srcSampleCount,
+ const SkIRect& srcRect, const SkIPoint& dstPoint,
+ bool areDstSrcSameObj) const {
+ if (!dstFormat || dstFormat != srcFormat) {
+ return false;
+ }
+ if ((dstSampleCount > 1 || srcSampleCount > 1) && (dstSampleCount != srcSampleCount)) {
+ return false;
+ }
+ if (areDstSrcSameObj) {
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.x(), dstPoint.y(),
+ srcRect.width(), srcRect.height());
+ if (dstRect.intersect(srcRect)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GrMtlCaps::canCopyAsResolve(GrSurface* dst, int dstSampleCount,
+ GrSurface* src, int srcSampleCount,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ if (dst == src) {
+ return false;
+ }
+ if (dst->backendFormat() != src->backendFormat()) {
+ return false;
+ }
+ if (dstSampleCount > 1 || srcSampleCount == 1 || !src->asRenderTarget()) {
+ return false;
+ }
+
+ // TODO: Support copying subrectangles
+ if (dstPoint != SkIPoint::Make(0, 0)) {
+ return false;
+ }
+ if (srcRect != SkIRect::MakeXYWH(0, 0, src->width(), src->height())) {
+ return false;
+ }
+
+ return true;
+}
+
+bool GrMtlCaps::onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ int dstSampleCnt = 0;
+ int srcSampleCnt = 0;
+ if (const GrRenderTargetProxy* rtProxy = dst->asRenderTargetProxy()) {
+ dstSampleCnt = rtProxy->numSamples();
+ }
+ if (const GrRenderTargetProxy* rtProxy = src->asRenderTargetProxy()) {
+ srcSampleCnt = rtProxy->numSamples();
+ }
+ SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTargetProxy()));
+ SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTargetProxy()));
+
+ return this->canCopyAsBlit(GrBackendFormatAsMTLPixelFormat(dst->backendFormat()), dstSampleCnt,
+ GrBackendFormatAsMTLPixelFormat(src->backendFormat()), srcSampleCnt,
+ srcRect, dstPoint, dst == src);
+}
+
+void GrMtlCaps::initGrCaps(const id<MTLDevice> device) {
+ // Max vertex attribs is the same on all devices
+ fMaxVertexAttributes = 31;
+
+ // Metal does not support scissor + clear
+ fPerformPartialClearsAsDraws = true;
+
+ // We always copy in/out of a transfer buffer so it's trivial to support row bytes.
+ fReadPixelsRowBytesSupport = true;
+ fWritePixelsRowBytesSupport = true;
+
+ // RenderTarget and Texture size
+ if (this->isMac()) {
+ fMaxRenderTargetSize = 16384;
+ } else {
+ if (3 == fFamilyGroup) {
+ fMaxRenderTargetSize = 16384;
+ } else {
+ // Family group 1 and 2 support 8192 for version 2 and above, 4096 for v1
+ if (1 == fVersion) {
+ fMaxRenderTargetSize = 4096;
+ } else {
+ fMaxRenderTargetSize = 8192;
+ }
+ }
+ }
+ fMaxPreferredRenderTargetSize = fMaxRenderTargetSize;
+ fMaxTextureSize = fMaxRenderTargetSize;
+
+ // Init sample counts. All devices support 1 (i.e. 0 in skia).
+ fSampleCounts.push_back(1);
+ if (@available(iOS 9.0, *)) {
+ for (auto sampleCnt : {2, 4, 8}) {
+ if ([device supportsTextureSampleCount:sampleCnt]) {
+ fSampleCounts.push_back(sampleCnt);
+ }
+ }
+ }
+
+ // Clamp to border is supported on Mac 10.12 and higher. It is not supported on iOS.
+ fClampToBorderSupport = false;
+#ifdef SK_BUILD_FOR_MAC
+ if (@available(macOS 10.12, *)) {
+ fClampToBorderSupport = true;
+ }
+#endif
+
+ // Starting with the assumption that there isn't a reason to not map small buffers.
+ fBufferMapThreshold = 0;
+
+ // Buffers are always fully mapped.
+ fMapBufferFlags = kCanMap_MapFlag | kAsyncRead_MapFlag;
+
+ fOversizedStencilSupport = true;
+
+ fMipMapSupport = true; // always available in Metal
+ fNPOTTextureTileSupport = true; // always available in Metal
+
+ fReuseScratchTextures = true; // Assuming this okay
+
+ fTransferBufferSupport = true;
+
+ fTextureBarrierSupport = false; // Need to figure out if we can do this
+
+ fSampleLocationsSupport = false;
+ fMultisampleDisableSupport = false;
+
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ if (this->isMac() || 3 == fFamilyGroup) {
+ fInstanceAttribSupport = true;
+ }
+ }
+
+ fMixedSamplesSupport = false;
+ fGpuTracingSupport = false;
+
+ fFenceSyncSupport = true;
+ bool supportsMTLEvent = false;
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ supportsMTLEvent = true;
+ }
+ fSemaphoreSupport = supportsMTLEvent;
+
+ fCrossContextTextureSupport = false;
+ fHalfFloatVertexAttributeSupport = true;
+}
+
+static bool format_is_srgb(MTLPixelFormat format) {
+ switch (format) {
+ case MTLPixelFormatRGBA8Unorm_sRGB:
+ case MTLPixelFormatBGRA8Unorm_sRGB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GrMtlCaps::isFormatSRGB(const GrBackendFormat& format) const {
+ return format_is_srgb(GrBackendFormatAsMTLPixelFormat(format));
+}
+
+bool GrMtlCaps::isFormatCompressed(const GrBackendFormat& format,
+ SkImage::CompressionType* compressionType) const {
+#ifdef SK_BUILD_FOR_MAC
+ return false;
+#else
+ SkImage::CompressionType dummyType;
+ SkImage::CompressionType* compressionTypePtr = compressionType ? compressionType : &dummyType;
+
+ switch (GrBackendFormatAsMTLPixelFormat(format)) {
+ case MTLPixelFormatETC2_RGB8:
+ // ETC2 uses the same compression layout as ETC1
+ *compressionTypePtr = SkImage::kETC1_CompressionType;
+ return true;
+ default:
+ return false;
+ }
+#endif
+}
+
+bool GrMtlCaps::isFormatTexturableAndUploadable(GrColorType ct,
+ const GrBackendFormat& format) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+
+ uint32_t ctFlags = this->getFormatInfo(mtlFormat).colorTypeFlags(ct);
+ return this->isFormatTexturable(mtlFormat) &&
+ SkToBool(ctFlags & ColorTypeInfo::kUploadData_Flag);
+}
+
+bool GrMtlCaps::isFormatTexturable(const GrBackendFormat& format) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ return this->isFormatTexturable(mtlFormat);
+}
+
+bool GrMtlCaps::isFormatTexturable(MTLPixelFormat format) const {
+ const FormatInfo& formatInfo = this->getFormatInfo(format);
+ return SkToBool(FormatInfo::kTexturable_Flag && formatInfo.fFlags);
+}
+
+bool GrMtlCaps::isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount) const {
+ if (!this->isFormatRenderable(format, sampleCount)) {
+ return false;
+ }
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ SkASSERT(mtlFormat != MTLPixelFormatInvalid);
+ const auto& info = this->getFormatInfo(mtlFormat);
+ if (!SkToBool(info.colorTypeFlags(ct) & ColorTypeInfo::kRenderable_Flag)) {
+ return false;
+ }
+ return true;
+}
+
+bool GrMtlCaps::isFormatRenderable(const GrBackendFormat& format, int sampleCount) const {
+ return this->isFormatRenderable(GrBackendFormatAsMTLPixelFormat(format), sampleCount);
+}
+
+bool GrMtlCaps::isFormatRenderable(MTLPixelFormat format, int sampleCount) const {
+ return sampleCount <= this->maxRenderTargetSampleCount(format);
+}
+
+int GrMtlCaps::maxRenderTargetSampleCount(const GrBackendFormat& format) const {
+ return this->maxRenderTargetSampleCount(GrBackendFormatAsMTLPixelFormat(format));
+}
+
+int GrMtlCaps::maxRenderTargetSampleCount(MTLPixelFormat format) const {
+ const FormatInfo& formatInfo = this->getFormatInfo(format);
+ if (formatInfo.fFlags & FormatInfo::kMSAA_Flag) {
+ return fSampleCounts[fSampleCounts.count() - 1];
+ } else if (formatInfo.fFlags & FormatInfo::kRenderable_Flag) {
+ return 1;
+ }
+ return 0;
+}
+
+int GrMtlCaps::getRenderTargetSampleCount(int requestedCount,
+ const GrBackendFormat& format) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+
+ return this->getRenderTargetSampleCount(requestedCount, mtlFormat);
+}
+
+int GrMtlCaps::getRenderTargetSampleCount(int requestedCount, MTLPixelFormat format) const {
+ requestedCount = SkTMax(requestedCount, 1);
+ const FormatInfo& formatInfo = this->getFormatInfo(format);
+ if (!(formatInfo.fFlags & FormatInfo::kRenderable_Flag)) {
+ return 0;
+ }
+ if (formatInfo.fFlags & FormatInfo::kMSAA_Flag) {
+ int count = fSampleCounts.count();
+ for (int i = 0; i < count; ++i) {
+ if (fSampleCounts[i] >= requestedCount) {
+ return fSampleCounts[i];
+ }
+ }
+ }
+ return 1 == requestedCount ? 1 : 0;
+}
+
+size_t GrMtlCaps::bytesPerPixel(const GrBackendFormat& format) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ return this->bytesPerPixel(mtlFormat);
+}
+
+size_t GrMtlCaps::bytesPerPixel(MTLPixelFormat format) const {
+ return this->getFormatInfo(format).fBytesPerPixel;
+}
+
+void GrMtlCaps::initShaderCaps() {
+ GrShaderCaps* shaderCaps = fShaderCaps.get();
+
+ // Setting this true with the assumption that this cap will eventually mean we support varying
+ // precisions and not just via modifiers.
+ shaderCaps->fUsesPrecisionModifiers = true;
+ shaderCaps->fFlatInterpolationSupport = true;
+ // We haven't yet tested that using flat attributes perform well.
+ shaderCaps->fPreferFlatInterpolation = true;
+
+ shaderCaps->fShaderDerivativeSupport = true;
+ shaderCaps->fGeometryShaderSupport = false;
+
+ if (@available(macOS 10.12, iOS 11.0, *)) {
+ shaderCaps->fDualSourceBlendingSupport = true;
+ } else {
+ shaderCaps->fDualSourceBlendingSupport = false;
+ }
+
+ // TODO: Re-enable this once skbug:8720 is fixed. Will also need to remove asserts in
+ // GrMtlPipelineStateBuilder which assert we aren't using this feature.
+#if 0
+ if (this->isIOS()) {
+ shaderCaps->fFBFetchSupport = true;
+ shaderCaps->fFBFetchNeedsCustomOutput = true; // ??
+ shaderCaps->fFBFetchColorName = ""; // Somehow add [[color(0)]] to arguments to frag shader
+ }
+#endif
+ shaderCaps->fDstReadInShaderSupport = shaderCaps->fFBFetchSupport;
+
+ shaderCaps->fIntegerSupport = true;
+ shaderCaps->fVertexIDSupport = false;
+
+ // Metal uses IEEE float and half floats so assuming those values here.
+ shaderCaps->fFloatIs32Bits = true;
+ shaderCaps->fHalfIs32Bits = false;
+
+ shaderCaps->fMaxFragmentSamplers = 16;
+}
+
+// These are all the valid MTLPixelFormats that we support in Skia. They are roughly ordered from
+// most frequently used to least to improve look up times in arrays.
+static constexpr MTLPixelFormat kMtlFormats[] = {
+ MTLPixelFormatRGBA8Unorm,
+ MTLPixelFormatR8Unorm,
+ MTLPixelFormatA8Unorm,
+ MTLPixelFormatBGRA8Unorm,
+#ifdef SK_BUILD_FOR_IOS
+ MTLPixelFormatB5G6R5Unorm,
+#endif
+ MTLPixelFormatRGBA16Float,
+ MTLPixelFormatR16Float,
+ MTLPixelFormatRG8Unorm,
+ MTLPixelFormatRGB10A2Unorm,
+#ifdef SK_BUILD_FOR_IOS
+ MTLPixelFormatABGR4Unorm,
+#endif
+ MTLPixelFormatRGBA8Unorm_sRGB,
+ MTLPixelFormatR16Unorm,
+ MTLPixelFormatRG16Unorm,
+#ifdef SK_BUILD_FOR_IOS
+ MTLPixelFormatETC2_RGB8,
+#endif
+ MTLPixelFormatRGBA16Unorm,
+ MTLPixelFormatRG16Float,
+
+ MTLPixelFormatInvalid,
+};
+
+void GrMtlCaps::setColorType(GrColorType colorType, std::initializer_list<MTLPixelFormat> formats) {
+#ifdef SK_DEBUG
+ for (size_t i = 0; i < kNumMtlFormats; ++i) {
+ const auto& formatInfo = fFormatTable[i];
+ for (int j = 0; j < formatInfo.fColorTypeInfoCount; ++j) {
+ const auto& ctInfo = formatInfo.fColorTypeInfos[j];
+ if (ctInfo.fColorType == colorType) {
+ bool found = false;
+ for (auto it = formats.begin(); it != formats.end(); ++it) {
+ if (kMtlFormats[i] == *it) {
+ found = true;
+ }
+ }
+ SkASSERT(found);
+ }
+ }
+ }
+#endif
+ int idx = static_cast<int>(colorType);
+ for (auto it = formats.begin(); it != formats.end(); ++it) {
+ const auto& info = this->getFormatInfo(*it);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ if (info.fColorTypeInfos[i].fColorType == colorType) {
+ fColorTypeToFormatTable[idx] = *it;
+ return;
+ }
+ }
+ }
+}
+
+size_t GrMtlCaps::GetFormatIndex(MTLPixelFormat pixelFormat) {
+ static_assert(SK_ARRAY_COUNT(kMtlFormats) == GrMtlCaps::kNumMtlFormats,
+ "Size of kMtlFormats array must match static value in header");
+ for (size_t i = 0; i < GrMtlCaps::kNumMtlFormats; ++i) {
+ if (kMtlFormats[i] == pixelFormat) {
+ return i;
+ }
+ }
+ SK_ABORT("Invalid MTLPixelFormat");
+}
+
+void GrMtlCaps::initFormatTable() {
+ FormatInfo* info;
+
+ // Format: R8Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatR8Unorm)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 1;
+ info->fColorTypeInfoCount = 2;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: R8Unorm, Surface: kAlpha_8
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ // Format: R8Unorm, Surface: kGray_8
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kGray_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle("rrr1");
+ }
+ }
+
+ // Format: A8Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatA8Unorm)];
+ info->fFlags = FormatInfo::kTexturable_Flag;
+ info->fBytesPerPixel = 1;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: A8Unorm, Surface: kAlpha_8
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_8;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::AAAA();
+ }
+ }
+
+#ifdef SK_BUILD_FOR_IOS
+ // Format: B5G6R5Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatB5G6R5Unorm)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 2;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: B5G6R5Unorm, Surface: kBGR_565
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kBGR_565;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: ABGR4Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatABGR4Unorm)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 2;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: ABGR4Unorm, Surface: kABGR_4444
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kABGR_4444;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+#endif
+
+ // Format: RGBA8Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRGBA8Unorm)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 2;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA8Unorm, Surface: kRGBA_8888
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_8888;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ // Format: RGBA8Unorm, Surface: kRGB_888x
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGB_888x;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RGB1();
+ }
+ }
+
+ // Format: RG8Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRG8Unorm)];
+ info->fFlags = FormatInfo::kTexturable_Flag;
+ info->fBytesPerPixel = 2;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RG8Unorm, Surface: kRG_88
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_88;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: BGRA8Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatBGRA8Unorm)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: BGRA8Unorm, Surface: kBGRA_8888
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kBGRA_8888;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: RGBA8Unorm_sRGB
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRGBA8Unorm_sRGB)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA8Unorm_sRGB, Surface: kRGBA_8888_SRGB
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_8888_SRGB;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: RGB10A2Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRGB10A2Unorm)];
+ if (this->isMac() || fFamilyGroup >= 3) {
+ info->fFlags = FormatInfo::kAllFlags;
+ } else {
+ info->fFlags = FormatInfo::kTexturable_Flag;
+ }
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGB10A2Unorm, Surface: kRGBA_1010102
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_1010102;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: R16Float
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatR16Float)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 2;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: R16Float, Surface: kAlpha_F16
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ }
+
+ // Format: RGBA16Float
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRGBA16Float)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 8;
+ info->fColorTypeInfoCount = 2;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA16Float, Surface: kRGBA_F16
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ // Format: RGBA16Float, Surface: kRGBA_F16_Clamped
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_F16_Clamped;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: R16Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatR16Unorm)];
+ if (this->isMac()) {
+ info->fFlags = FormatInfo::kAllFlags;
+ } else {
+ info->fFlags = FormatInfo::kTexturable_Flag | FormatInfo::kRenderable_Flag;
+ }
+ info->fBytesPerPixel = 2;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: R16Unorm, Surface: kAlpha_16
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kAlpha_16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ }
+
+ // Format: RG16Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRG16Unorm)];
+ if (this->isMac()) {
+ info->fFlags = FormatInfo::kAllFlags;
+ } else {
+ info->fFlags = FormatInfo::kTexturable_Flag | FormatInfo::kRenderable_Flag;
+ }
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RG16Unorm, Surface: kRG_1616
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_1616;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+#ifdef SK_BUILD_FOR_IOS
+ // ETC2_RGB8
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatETC2_RGB8)];
+ info->fFlags = FormatInfo::kTexturable_Flag;
+ // NO supported colorTypes
+#endif
+
+ // Format: RGBA16Unorm
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRGBA16Unorm)];
+ if (this->isMac()) {
+ info->fFlags = FormatInfo::kAllFlags;
+ } else {
+ info->fFlags = FormatInfo::kTexturable_Flag | FormatInfo::kRenderable_Flag;
+ }
+ info->fBytesPerPixel = 8;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RGBA16Unorm, Surface: kRGBA_16161616
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRGBA_16161616;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ // Format: RG16Float
+ {
+ info = &fFormatTable[GetFormatIndex(MTLPixelFormatRG16Float)];
+ info->fFlags = FormatInfo::kAllFlags;
+ info->fBytesPerPixel = 4;
+ info->fColorTypeInfoCount = 1;
+ info->fColorTypeInfos.reset(new ColorTypeInfo[info->fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: RG16Float, Surface: kRG_F16
+ {
+ auto& ctInfo = info->fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = GrColorType::kRG_F16;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Map GrColorTypes (used for creating GrSurfaces) to MTLPixelFormats. The order in which the
+ // formats are passed into the setColorType function indicates the priority in selecting which
+ // format we use for a given GrcolorType.
+
+ std::fill_n(fColorTypeToFormatTable, kGrColorTypeCnt, MTLPixelFormatInvalid);
+
+ this->setColorType(GrColorType::kAlpha_8, { MTLPixelFormatR8Unorm,
+ MTLPixelFormatA8Unorm });
+#ifdef SK_BUILD_FOR_IOS
+ this->setColorType(GrColorType::kBGR_565, { MTLPixelFormatB5G6R5Unorm });
+ this->setColorType(GrColorType::kABGR_4444, { MTLPixelFormatABGR4Unorm });
+#endif
+ this->setColorType(GrColorType::kRGBA_8888, { MTLPixelFormatRGBA8Unorm });
+ this->setColorType(GrColorType::kRGBA_8888_SRGB, { MTLPixelFormatRGBA8Unorm_sRGB });
+ this->setColorType(GrColorType::kRGB_888x, { MTLPixelFormatRGBA8Unorm });
+ this->setColorType(GrColorType::kRG_88, { MTLPixelFormatRG8Unorm });
+ this->setColorType(GrColorType::kBGRA_8888, { MTLPixelFormatBGRA8Unorm });
+ this->setColorType(GrColorType::kRGBA_1010102, { MTLPixelFormatRGB10A2Unorm });
+ this->setColorType(GrColorType::kGray_8, { MTLPixelFormatR8Unorm });
+ this->setColorType(GrColorType::kAlpha_F16, { MTLPixelFormatR16Float });
+ this->setColorType(GrColorType::kRGBA_F16, { MTLPixelFormatRGBA16Float });
+ this->setColorType(GrColorType::kRGBA_F16_Clamped, { MTLPixelFormatRGBA16Float });
+ this->setColorType(GrColorType::kAlpha_16, { MTLPixelFormatR16Unorm });
+ this->setColorType(GrColorType::kRG_1616, { MTLPixelFormatRG16Unorm });
+ this->setColorType(GrColorType::kRGBA_16161616, { MTLPixelFormatRGBA16Unorm });
+ this->setColorType(GrColorType::kRG_F16, { MTLPixelFormatRG16Float });
+}
+
+void GrMtlCaps::initStencilFormat(id<MTLDevice> physDev) {
+ fPreferredStencilFormat = StencilFormat{ MTLPixelFormatStencil8, 8, 8, true };
+}
+
+bool GrMtlCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
+ if (auto rt = surface->asRenderTarget()) {
+ return rt->numSamples() <= 1 && SkToBool(surface->asTexture());
+ }
+ return true;
+}
+
+static constexpr GrPixelConfig validate_sized_format(GrMTLPixelFormat grFormat, GrColorType ct) {
+ MTLPixelFormat format = static_cast<MTLPixelFormat>(grFormat);
+ switch (ct) {
+ case GrColorType::kUnknown:
+ return kUnknown_GrPixelConfig;
+ case GrColorType::kAlpha_8:
+ if (MTLPixelFormatA8Unorm == format) {
+ return kAlpha_8_as_Alpha_GrPixelConfig;
+ } else if (MTLPixelFormatR8Unorm == format) {
+ return kAlpha_8_as_Red_GrPixelConfig;
+ }
+ break;
+#ifdef SK_BUILD_FOR_MAC
+ case GrColorType::kBGR_565:
+ case GrColorType::kABGR_4444:
+ return kUnknown_GrPixelConfig;
+#else
+ case GrColorType::kBGR_565:
+ if (MTLPixelFormatB5G6R5Unorm == format) {
+ return kRGB_565_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kABGR_4444:
+ if (MTLPixelFormatABGR4Unorm == format) {
+ return kRGBA_4444_GrPixelConfig;
+ }
+ break;
+#endif
+ case GrColorType::kRGBA_8888:
+ if (MTLPixelFormatRGBA8Unorm == format) {
+ return kRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888_SRGB:
+ if (MTLPixelFormatRGBA8Unorm_sRGB == format) {
+ return kSRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGB_888x:
+ if (MTLPixelFormatRGBA8Unorm == format) {
+ return kRGB_888X_GrPixelConfig;
+ }
+#ifdef SK_BUILD_FOR_IOS
+ else if (MTLPixelFormatETC2_RGB8 == format) {
+ return kRGB_ETC1_GrPixelConfig;
+ }
+#endif
+ break;
+ case GrColorType::kRG_88:
+ if (MTLPixelFormatRG8Unorm == format) {
+ return kRG_88_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kBGRA_8888:
+ if (MTLPixelFormatBGRA8Unorm == format) {
+ return kBGRA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_1010102:
+ if (MTLPixelFormatRGB10A2Unorm == format) {
+ return kRGBA_1010102_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kGray_8:
+ if (MTLPixelFormatR8Unorm == format) {
+ return kGray_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_F16:
+ if (MTLPixelFormatR16Float == format) {
+ return kAlpha_half_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16:
+ if (MTLPixelFormatRGBA16Float == format) {
+ return kRGBA_half_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16_Clamped:
+ if (MTLPixelFormatRGBA16Float == format) {
+ return kRGBA_half_Clamped_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_16:
+ if (MTLPixelFormatR16Unorm == format) {
+ return kAlpha_16_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_1616:
+ if (MTLPixelFormatRG16Unorm == format) {
+ return kRG_1616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_16161616:
+ if (MTLPixelFormatRGBA16Unorm == format) {
+ return kRGBA_16161616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_F16:
+ if (MTLPixelFormatRG16Float == format) {
+ return kRG_half_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F32:
+ case GrColorType::kAlpha_8xxx:
+ case GrColorType::kAlpha_F32xxx:
+ case GrColorType::kGray_8xxx:
+ return kUnknown_GrPixelConfig;
+ }
+ SkUNREACHABLE;
+}
+
+bool GrMtlCaps::onAreColorTypeAndFormatCompatible(GrColorType ct,
+ const GrBackendFormat& format) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ const auto& info = this->getFormatInfo(mtlFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ if (info.fColorTypeInfos[i].fColorType == ct) {
+ return true;
+ }
+ }
+ return false;
+}
+
+GrPixelConfig GrMtlCaps::onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType ct) const {
+ return validate_sized_format(GrBackendFormatAsMTLPixelFormat(format), ct);
+}
+
+GrColorType GrMtlCaps::getYUVAColorTypeFromBackendFormat(const GrBackendFormat& format,
+ bool isAlphaChannel) const {
+ switch (GrBackendFormatAsMTLPixelFormat(format)) {
+ case MTLPixelFormatA8Unorm: // fall through
+ case MTLPixelFormatR8Unorm: return isAlphaChannel ? GrColorType::kAlpha_8
+ : GrColorType::kGray_8;
+ case MTLPixelFormatRG8Unorm: return GrColorType::kRG_88;
+ case MTLPixelFormatRGBA8Unorm: return GrColorType::kRGBA_8888;
+ case MTLPixelFormatBGRA8Unorm: return GrColorType::kBGRA_8888;
+ case MTLPixelFormatRGB10A2Unorm: return GrColorType::kRGBA_1010102;
+ case MTLPixelFormatR16Unorm: return GrColorType::kAlpha_16;
+ case MTLPixelFormatR16Float: return GrColorType::kAlpha_F16;
+ case MTLPixelFormatRG16Unorm: return GrColorType::kRG_1616;
+ case MTLPixelFormatRGBA16Unorm: return GrColorType::kRGBA_16161616;
+ case MTLPixelFormatRG16Float: return GrColorType::kRG_F16;
+ default: return GrColorType::kUnknown;
+ }
+}
+
+GrBackendFormat GrMtlCaps::onGetDefaultBackendFormat(GrColorType ct,
+ GrRenderable renderable) const {
+ MTLPixelFormat format = this->getFormatFromColorType(ct);
+ if (!format) {
+ return GrBackendFormat();
+ }
+ return GrBackendFormat::MakeMtl(format);
+}
+
+GrBackendFormat GrMtlCaps::getBackendFormatFromCompressionType(
+ SkImage::CompressionType compressionType) const {
+ switch (compressionType) {
+ case SkImage::kETC1_CompressionType:
+#ifdef SK_BUILD_FOR_MAC
+ return {};
+#else
+ return GrBackendFormat::MakeMtl(MTLPixelFormatETC2_RGB8);
+#endif
+ }
+ SK_ABORT("Invalid compression type");
+}
+
+GrSwizzle GrMtlCaps::getTextureSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ SkASSERT(mtlFormat != MTLPixelFormatInvalid);
+ const auto& info = this->getFormatInfo(mtlFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fTextureSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+GrSwizzle GrMtlCaps::getOutputSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ SkASSERT(mtlFormat != MTLPixelFormatInvalid);
+ const auto& info = this->getFormatInfo(mtlFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fOutputSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+
+GrCaps::SupportedWrite GrMtlCaps::supportedWritePixelsColorType(
+ GrColorType surfaceColorType, const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const {
+ // Metal requires the destination offset for copyFromTexture to be a multiple of the textures
+ // pixels size.
+ size_t offsetAlignment = GrColorTypeBytesPerPixel(surfaceColorType);
+
+ const auto& info = this->getFormatInfo(GrBackendFormatAsMTLPixelFormat(surfaceFormat));
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == surfaceColorType) {
+ return {surfaceColorType, offsetAlignment};
+ }
+ }
+ return {GrColorType::kUnknown, 0};
+}
+
+GrCaps::SupportedRead GrMtlCaps::onSupportedReadPixelsColorType(
+ GrColorType srcColorType, const GrBackendFormat& srcBackendFormat,
+ GrColorType dstColorType) const {
+ MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(srcBackendFormat);
+
+ // Metal requires the destination offset for copyFromTexture to be a multiple of the textures
+ // pixels size.
+ size_t offsetAlignment = GrColorTypeBytesPerPixel(srcColorType);
+
+ const auto& info = this->getFormatInfo(mtlFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == srcColorType) {
+ return {srcColorType, offsetAlignment};
+ }
+ }
+ return {GrColorType::kUnknown, 0};
+}
+
+#if GR_TEST_UTILS
+std::vector<GrCaps::TestFormatColorTypeCombination> GrMtlCaps::getTestingCombinations() const {
+ std::vector<GrCaps::TestFormatColorTypeCombination> combos = {
+ { GrColorType::kAlpha_8, GrBackendFormat::MakeMtl(MTLPixelFormatA8Unorm) },
+ { GrColorType::kAlpha_8, GrBackendFormat::MakeMtl(MTLPixelFormatR8Unorm) },
+#ifdef SK_BUILD_FOR_IOS
+ { GrColorType::kBGR_565, GrBackendFormat::MakeMtl(MTLPixelFormatB5G6R5Unorm) },
+ { GrColorType::kABGR_4444, GrBackendFormat::MakeMtl(MTLPixelFormatABGR4Unorm) },
+#endif
+ { GrColorType::kRGBA_8888, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA8Unorm) },
+ { GrColorType::kRGBA_8888_SRGB, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA8Unorm_sRGB) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA8Unorm) },
+#ifdef SK_BUILD_FOR_IOS
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeMtl(MTLPixelFormatETC2_RGB8) },
+#endif
+ { GrColorType::kRG_88, GrBackendFormat::MakeMtl(MTLPixelFormatRG8Unorm) },
+ { GrColorType::kBGRA_8888, GrBackendFormat::MakeMtl(MTLPixelFormatBGRA8Unorm) },
+ { GrColorType::kRGBA_1010102, GrBackendFormat::MakeMtl(MTLPixelFormatRGB10A2Unorm) },
+ { GrColorType::kGray_8, GrBackendFormat::MakeMtl(MTLPixelFormatR8Unorm) },
+ { GrColorType::kAlpha_F16, GrBackendFormat::MakeMtl(MTLPixelFormatR16Float) },
+ { GrColorType::kRGBA_F16, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA16Float) },
+ { GrColorType::kRGBA_F16_Clamped, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA16Float) },
+ { GrColorType::kAlpha_16, GrBackendFormat::MakeMtl(MTLPixelFormatR16Unorm) },
+ { GrColorType::kRG_1616, GrBackendFormat::MakeMtl(MTLPixelFormatRG16Unorm) },
+ { GrColorType::kRGBA_16161616, GrBackendFormat::MakeMtl(MTLPixelFormatRGBA16Unorm) },
+ { GrColorType::kRG_F16, GrBackendFormat::MakeMtl(MTLPixelFormatRG16Float) },
+ };
+
+ return combos;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.h b/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.h
new file mode 100644
index 0000000000..6626c7d266
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlCommandBuffer_DEFINED
+#define GrMtlCommandBuffer_DEFINED
+
+#import <Metal/Metal.h>
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+class GrMtlGpu;
+class GrMtlPipelineState;
+class GrMtlOpsRenderPass;
+
+class GrMtlCommandBuffer {
+public:
+ static GrMtlCommandBuffer* Create(id<MTLCommandQueue> queue);
+ ~GrMtlCommandBuffer();
+
+ void commit(bool waitUntilCompleted);
+
+ id<MTLBlitCommandEncoder> getBlitCommandEncoder();
+ id<MTLRenderCommandEncoder> getRenderCommandEncoder(MTLRenderPassDescriptor*,
+ const GrMtlPipelineState*,
+ GrMtlOpsRenderPass* opsRenderPass);
+
+ void addCompletedHandler(MTLCommandBufferHandler block) {
+ [fCmdBuffer addCompletedHandler:block];
+ }
+
+ void encodeSignalEvent(id<MTLEvent>, uint64_t value) API_AVAILABLE(macos(10.14), ios(12.0));
+ void encodeWaitForEvent(id<MTLEvent>, uint64_t value) API_AVAILABLE(macos(10.14), ios(12.0));
+
+private:
+ GrMtlCommandBuffer(id<MTLCommandBuffer> cmdBuffer)
+ : fCmdBuffer(cmdBuffer)
+ , fPreviousRenderPassDescriptor(nil) {}
+
+ void endAllEncoding();
+
+ id<MTLCommandBuffer> fCmdBuffer;
+ id<MTLBlitCommandEncoder> fActiveBlitCommandEncoder;
+ id<MTLRenderCommandEncoder> fActiveRenderCommandEncoder;
+ MTLRenderPassDescriptor* fPreviousRenderPassDescriptor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.mm
new file mode 100644
index 0000000000..85b53281f5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlCommandBuffer.mm
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlCommandBuffer.h"
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
+#include "src/gpu/mtl/GrMtlPipelineState.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlCommandBuffer* GrMtlCommandBuffer::Create(id<MTLCommandQueue> queue) {
+ id<MTLCommandBuffer> mtlCommandBuffer;
+ mtlCommandBuffer = [queue commandBuffer];
+ if (nil == mtlCommandBuffer) {
+ return nullptr;
+ }
+
+ mtlCommandBuffer.label = @"GrMtlCommandBuffer::Create";
+
+ return new GrMtlCommandBuffer(mtlCommandBuffer);
+}
+
+GrMtlCommandBuffer::~GrMtlCommandBuffer() {
+ this->endAllEncoding();
+ fCmdBuffer = nil;
+}
+
+id<MTLBlitCommandEncoder> GrMtlCommandBuffer::getBlitCommandEncoder() {
+ if (nil != fActiveRenderCommandEncoder) {
+ [fActiveRenderCommandEncoder endEncoding];
+ fActiveRenderCommandEncoder = nil;
+ }
+
+ if (nil == fActiveBlitCommandEncoder) {
+ fActiveBlitCommandEncoder = [fCmdBuffer blitCommandEncoder];
+ }
+ fPreviousRenderPassDescriptor = nil;
+
+ return fActiveBlitCommandEncoder;
+}
+
+static bool compatible(const MTLRenderPassAttachmentDescriptor* first,
+ const MTLRenderPassAttachmentDescriptor* second,
+ const GrMtlPipelineState* pipelineState) {
+ // Check to see if the previous descriptor is compatible with the new one.
+ // They are compatible if:
+ // * they share the same rendertargets
+ // * the first's store actions are either Store or DontCare
+ // * the second's load actions are either Load or DontCare
+ // * the second doesn't sample from any rendertargets in the first
+ bool renderTargetsMatch = (first.texture == second.texture);
+ bool storeActionsValid = first.storeAction == MTLStoreActionStore ||
+ first.storeAction == MTLStoreActionDontCare;
+ bool loadActionsValid = second.loadAction == MTLLoadActionLoad ||
+ second.loadAction == MTLLoadActionDontCare;
+ bool secondDoesntSampleFirst = (!pipelineState ||
+ pipelineState->doesntSampleAttachment(first)) &&
+ second.storeAction != MTLStoreActionMultisampleResolve;
+
+ return renderTargetsMatch &&
+ (nil == first.texture ||
+ (storeActionsValid && loadActionsValid && secondDoesntSampleFirst));
+}
+
+id<MTLRenderCommandEncoder> GrMtlCommandBuffer::getRenderCommandEncoder(
+ MTLRenderPassDescriptor* descriptor, const GrMtlPipelineState* pipelineState,
+ GrMtlOpsRenderPass* opsRenderPass) {
+ if (nil != fPreviousRenderPassDescriptor) {
+ if (compatible(fPreviousRenderPassDescriptor.colorAttachments[0],
+ descriptor.colorAttachments[0], pipelineState) &&
+ compatible(fPreviousRenderPassDescriptor.stencilAttachment,
+ descriptor.stencilAttachment, pipelineState)) {
+ return fActiveRenderCommandEncoder;
+ }
+ }
+
+ this->endAllEncoding();
+ fActiveRenderCommandEncoder = [fCmdBuffer renderCommandEncoderWithDescriptor:descriptor];
+ if (opsRenderPass) {
+ opsRenderPass->initRenderState(fActiveRenderCommandEncoder);
+ }
+ fPreviousRenderPassDescriptor = descriptor;
+
+ return fActiveRenderCommandEncoder;
+}
+
+void GrMtlCommandBuffer::commit(bool waitUntilCompleted) {
+ this->endAllEncoding();
+ [fCmdBuffer commit];
+ if (waitUntilCompleted) {
+ [fCmdBuffer waitUntilCompleted];
+ }
+
+ if (MTLCommandBufferStatusError == fCmdBuffer.status) {
+ NSString* description = fCmdBuffer.error.localizedDescription;
+ const char* errorString = [description UTF8String];
+ SkDebugf("Error submitting command buffer: %s\n", errorString);
+ }
+
+ fCmdBuffer = nil;
+}
+
+void GrMtlCommandBuffer::endAllEncoding() {
+ if (nil != fActiveRenderCommandEncoder) {
+ [fActiveRenderCommandEncoder endEncoding];
+ fActiveRenderCommandEncoder = nil;
+ fPreviousRenderPassDescriptor = nil;
+ }
+ if (nil != fActiveBlitCommandEncoder) {
+ [fActiveBlitCommandEncoder endEncoding];
+ fActiveBlitCommandEncoder = nil;
+ }
+}
+
+void GrMtlCommandBuffer::encodeSignalEvent(id<MTLEvent> event, uint64_t eventValue) {
+ SkASSERT(fCmdBuffer);
+ this->endAllEncoding(); // ensure we don't have any active command encoders
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ [fCmdBuffer encodeSignalEvent:event value:eventValue];
+ }
+}
+
+void GrMtlCommandBuffer::encodeWaitForEvent(id<MTLEvent> event, uint64_t eventValue) {
+ SkASSERT(fCmdBuffer);
+ this->endAllEncoding(); // ensure we don't have any active command encoders
+ // TODO: not sure if needed but probably
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ [fCmdBuffer encodeWaitForEvent:event value:eventValue];
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlCppUtil.h b/gfx/skia/skia/src/gpu/mtl/GrMtlCppUtil.h
new file mode 100644
index 0000000000..47c6eb50c1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlCppUtil.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlCppUtil_DEFINED
+#define GrMtlCppUtil_DEFINED
+
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+// Utilities that can be used from cpp files (rather than .mm).
+
+GrMTLPixelFormat GrGetMTLPixelFormatFromMtlTextureInfo(const GrMtlTextureInfo&);
+
+#if GR_TEST_UTILS
+const char* GrMtlFormatToStr(GrMTLPixelFormat mtlFormat);
+bool GrMtlFormatIsBGRA(GrMTLPixelFormat mtlFormat);
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.h b/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.h
new file mode 100644
index 0000000000..0f61af877e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlDepthStencil_DEFINED
+#define GrMtlDepthStencil_DEFINED
+
+#import <Metal/Metal.h>
+
+#include "include/gpu/GrTypes.h"
+#include "src/core/SkOpts.h"
+#include <atomic>
+
+class GrMtlGpu;
+class GrStencilSettings;
+
+// A wrapper for a MTLDepthStencilState object with caching support.
+class GrMtlDepthStencil : public SkRefCnt {
+public:
+ static GrMtlDepthStencil* Create(const GrMtlGpu*, const GrStencilSettings&, GrSurfaceOrigin);
+
+ ~GrMtlDepthStencil() { fMtlDepthStencilState = nil; }
+
+ id<MTLDepthStencilState> mtlDepthStencil() const { return fMtlDepthStencilState; }
+
+ struct Key {
+ struct Face {
+ uint32_t fReadMask;
+ uint32_t fWriteMask;
+ uint32_t fOps;
+ };
+ Face fFront;
+ Face fBack;
+
+ bool operator==(const Key& that) const {
+ return this->fFront.fReadMask == that.fFront.fReadMask &&
+ this->fFront.fWriteMask == that.fFront.fWriteMask &&
+ this->fFront.fOps == that.fFront.fOps &&
+ this->fBack.fReadMask == that.fBack.fReadMask &&
+ this->fBack.fWriteMask == that.fBack.fWriteMask &&
+ this->fBack.fOps == that.fBack.fOps;
+ }
+ };
+
+ // Helpers for hashing GrMtlSampler
+ static Key GenerateKey(const GrStencilSettings&, GrSurfaceOrigin);
+
+ static const Key& GetKey(const GrMtlDepthStencil& depthStencil) { return depthStencil.fKey; }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+
+private:
+ GrMtlDepthStencil(id<MTLDepthStencilState> mtlDepthStencilState, Key key)
+ : fMtlDepthStencilState(mtlDepthStencilState)
+ , fKey(key) {}
+
+ id<MTLDepthStencilState> fMtlDepthStencilState;
+ Key fKey;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.mm
new file mode 100644
index 0000000000..7434772531
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlDepthStencil.mm
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/mtl/GrMtlDepthStencil.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+MTLStencilOperation skia_stencil_op_to_mtl(GrStencilOp op) {
+ switch (op) {
+ case GrStencilOp::kKeep:
+ return MTLStencilOperationKeep;
+ case GrStencilOp::kZero:
+ return MTLStencilOperationZero;
+ case GrStencilOp::kReplace:
+ return MTLStencilOperationReplace;
+ case GrStencilOp::kInvert:
+ return MTLStencilOperationInvert;
+ case GrStencilOp::kIncWrap:
+ return MTLStencilOperationIncrementWrap;
+ case GrStencilOp::kDecWrap:
+ return MTLStencilOperationDecrementWrap;
+ case GrStencilOp::kIncClamp:
+ return MTLStencilOperationIncrementClamp;
+ case GrStencilOp::kDecClamp:
+ return MTLStencilOperationDecrementClamp;
+ }
+}
+
+MTLStencilDescriptor* skia_stencil_to_mtl(GrStencilSettings::Face face) {
+ MTLStencilDescriptor* result = [[MTLStencilDescriptor alloc] init];
+ switch (face.fTest) {
+ case GrStencilTest::kAlways:
+ result.stencilCompareFunction = MTLCompareFunctionAlways;
+ break;
+ case GrStencilTest::kNever:
+ result.stencilCompareFunction = MTLCompareFunctionNever;
+ break;
+ case GrStencilTest::kGreater:
+ result.stencilCompareFunction = MTLCompareFunctionGreater;
+ break;
+ case GrStencilTest::kGEqual:
+ result.stencilCompareFunction = MTLCompareFunctionGreaterEqual;
+ break;
+ case GrStencilTest::kLess:
+ result.stencilCompareFunction = MTLCompareFunctionLess;
+ break;
+ case GrStencilTest::kLEqual:
+ result.stencilCompareFunction = MTLCompareFunctionLessEqual;
+ break;
+ case GrStencilTest::kEqual:
+ result.stencilCompareFunction = MTLCompareFunctionEqual;
+ break;
+ case GrStencilTest::kNotEqual:
+ result.stencilCompareFunction = MTLCompareFunctionNotEqual;
+ break;
+ }
+ result.readMask = face.fTestMask;
+ result.writeMask = face.fWriteMask;
+ result.depthStencilPassOperation = skia_stencil_op_to_mtl(face.fPassOp);
+ result.stencilFailureOperation = skia_stencil_op_to_mtl(face.fFailOp);
+ return result;
+}
+
+GrMtlDepthStencil* GrMtlDepthStencil::Create(const GrMtlGpu* gpu,
+ const GrStencilSettings& stencil,
+ GrSurfaceOrigin origin) {
+ MTLDepthStencilDescriptor* desc = [[MTLDepthStencilDescriptor alloc] init];
+ if (!stencil.isDisabled()) {
+ if (stencil.isTwoSided()) {
+ desc.frontFaceStencil = skia_stencil_to_mtl(stencil.front(origin));
+ desc.backFaceStencil = skia_stencil_to_mtl(stencil.back(origin));
+ }
+ else {
+ desc.frontFaceStencil = skia_stencil_to_mtl(stencil.frontAndBack());
+ desc.backFaceStencil = desc.frontFaceStencil;
+ }
+ }
+
+ return new GrMtlDepthStencil([gpu->device() newDepthStencilStateWithDescriptor: desc],
+ GenerateKey(stencil, origin));
+}
+
+void skia_stencil_to_key(GrStencilSettings::Face face, GrMtlDepthStencil::Key::Face* faceKey) {
+ const int kPassOpShift = 3;
+ const int kFailOpShift = 6;
+
+ faceKey->fReadMask = face.fTestMask;
+ faceKey->fWriteMask = face.fWriteMask;
+
+ SkASSERT(static_cast<int>(face.fTest) <= 7);
+ faceKey->fOps = static_cast<uint32_t>(face.fTest);
+
+ SkASSERT(static_cast<int>(face.fPassOp) <= 7);
+ faceKey->fOps |= (static_cast<uint32_t>(face.fPassOp) << kPassOpShift);
+
+ SkASSERT(static_cast<int>(face.fFailOp) <= 7);
+ faceKey->fOps |= (static_cast<uint32_t>(face.fFailOp) << kFailOpShift);
+}
+
+GrMtlDepthStencil::Key GrMtlDepthStencil::GenerateKey(const GrStencilSettings& stencil,
+ GrSurfaceOrigin origin) {
+ Key depthStencilKey;
+
+ if (stencil.isDisabled()) {
+ memset(&depthStencilKey, 0, sizeof(Key));
+ } else {
+ if (stencil.isTwoSided()) {
+ skia_stencil_to_key(stencil.front(origin), &depthStencilKey.fFront);
+ skia_stencil_to_key(stencil.back(origin), &depthStencilKey.fBack);
+ }
+ else {
+ skia_stencil_to_key(stencil.frontAndBack(), &depthStencilKey.fFront);
+ memcpy(&depthStencilKey.fBack, &depthStencilKey.fFront, sizeof(Key::Face));
+ }
+ }
+
+ return depthStencilKey;
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.h b/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.h
new file mode 100644
index 0000000000..7fd9134afd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlGpu_DEFINED
+#define GrMtlGpu_DEFINED
+
+#include <list>
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrSemaphore.h"
+
+#include "src/gpu/mtl/GrMtlCaps.h"
+#include "src/gpu/mtl/GrMtlResourceProvider.h"
+#include "src/gpu/mtl/GrMtlStencilAttachment.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlOpsRenderPass;
+class GrMtlTexture;
+class GrSemaphore;
+struct GrMtlBackendContext;
+class GrMtlCommandBuffer;
+
+namespace SkSL {
+ class Compiler;
+}
+
+class GrMtlGpu : public GrGpu {
+public:
+ static sk_sp<GrGpu> Make(GrContext* context, const GrContextOptions& options,
+ id<MTLDevice> device, id<MTLCommandQueue> queue);
+ ~GrMtlGpu() override;
+
+ void disconnect(DisconnectType) override;
+
+ const GrMtlCaps& mtlCaps() const { return *fMtlCaps.get(); }
+
+ id<MTLDevice> device() const { return fDevice; }
+
+ GrMtlResourceProvider& resourceProvider() { return fResourceProvider; }
+
+ GrMtlCommandBuffer* commandBuffer();
+
+ enum SyncQueue {
+ kForce_SyncQueue,
+ kSkip_SyncQueue
+ };
+
+ // Commits the current command buffer to the queue and then creates a new command buffer. If
+ // sync is set to kForce_SyncQueue, the function will wait for all work in the committed
+ // command buffer to finish before returning.
+ void submitCommandBuffer(SyncQueue sync);
+
+ void deleteBackendTexture(const GrBackendTexture&) override;
+
+#if GR_TEST_UTILS
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+
+ GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
+ void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
+
+ void testingOnly_flushGpuAndSync() override;
+#endif
+
+ void copySurfaceAsResolve(GrSurface* dst, GrSurface* src);
+
+ void copySurfaceAsBlit(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget*, GrSurfaceOrigin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) override;
+
+ SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
+
+ void submit(GrOpsRenderPass* renderPass) override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() override;
+ bool waitFence(GrFence, uint64_t) override;
+ void deleteFence(GrFence) const override;
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) override;
+ void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void checkFinishProcs() override;
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
+
+ // When the Metal backend actually uses indirect command buffers, this function will actually do
+ // what it says. For now, every command is encoded directly into the primary command buffer, so
+ // this function is pretty useless, except for indicating that a render target has been drawn
+ // to.
+ void submitIndirectCommandBuffer(GrSurface* surface, GrSurfaceOrigin origin,
+ const SkIRect* bounds) {
+ this->didWriteToSurface(surface, origin, bounds);
+ }
+
+private:
+ GrMtlGpu(GrContext* context, const GrContextOptions& options,
+ id<MTLDevice> device, id<MTLCommandQueue> queue, MTLFeatureSet featureSet);
+
+ void destroyResources();
+
+ void onResetContext(uint32_t resetBits) override {}
+
+ void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) override {
+ SkASSERT(!this->caps()->sampleLocationsSupport());
+ SK_ABORT("Sample locations not yet implemented for Metal.");
+ }
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ GrBackendTexture onCreateBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected) override;
+
+ sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) override;
+ sk_sp<GrTexture> onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) override;
+
+ sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType,
+ GrWrapOwnership, GrWrapCacheable, GrIOType) override;
+
+ sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt,
+ GrColorType, GrWrapOwnership,
+ GrWrapCacheable) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType) override;
+
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern,
+ const void*) override;
+
+ bool onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType, void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ const GrMipLevel[], int mipLevelCount,
+ bool prepForTexSampling) override;
+
+ bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType, GrGpuBuffer*,
+ size_t offset, size_t rowBytes) override;
+ bool onTransferPixelsFrom(GrSurface*, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer*, size_t offset) override;
+
+ bool onRegenerateMipMapLevels(GrTexture*) override;
+
+ void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO) override;
+
+ void resolveTexture(id<MTLTexture> colorTexture, id<MTLTexture> resolveTexture);
+
+ void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override;
+
+ // Function that uploads data onto textures with private storage mode (GPU access only).
+ bool uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const GrMipLevel texels[], int mipLevels);
+ // Function that fills texture levels with transparent black based on levelMask.
+ bool clearTexture(GrMtlTexture*, GrColorType, uint32_t levelMask);
+ bool readOrTransferPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType dstColorType, id<MTLBuffer> transferBuffer, size_t offset,
+ size_t imageBytes, size_t rowBytes);
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(
+ const GrRenderTarget*, int width, int height, int numStencilSamples) override;
+
+ bool createMtlTextureForBackendSurface(MTLPixelFormat,
+ int w, int h, bool texturable,
+ bool renderable, GrMipMapped,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrMtlTextureInfo*);
+
+#if GR_TEST_UTILS
+ void testingOnly_startCapture() override;
+ void testingOnly_endCapture() override;
+#endif
+
+ sk_sp<GrMtlCaps> fMtlCaps;
+
+ id<MTLDevice> fDevice;
+ id<MTLCommandQueue> fQueue;
+
+ GrMtlCommandBuffer* fCmdBuffer;
+
+ std::unique_ptr<SkSL::Compiler> fCompiler;
+
+ GrMtlResourceProvider fResourceProvider;
+
+ // For FenceSync
+ id<MTLSharedEvent> fSharedEvent API_AVAILABLE(macos(10.14), ios(12.0));
+ MTLSharedEventListener* fSharedEventListener API_AVAILABLE(macos(10.14), ios(12.0));
+ uint64_t fLatestEvent;
+
+ bool fDisconnected;
+
+ struct FinishCallback {
+ GrGpuFinishedProc fCallback;
+ GrGpuFinishedContext fContext;
+ GrFence fFence;
+ };
+ std::list<FinishCallback> fFinishCallbacks;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.mm
new file mode 100644
index 0000000000..8e102306ae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlGpu.mm
@@ -0,0 +1,1375 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/mtl/GrMtlBuffer.h"
+#include "src/gpu/mtl/GrMtlCommandBuffer.h"
+#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
+#include "src/gpu/mtl/GrMtlSemaphore.h"
+#include "src/gpu/mtl/GrMtlTexture.h"
+#include "src/gpu/mtl/GrMtlTextureRenderTarget.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#import <simd/simd.h>
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+static bool get_feature_set(id<MTLDevice> device, MTLFeatureSet* featureSet) {
+ // Mac OSX
+#ifdef SK_BUILD_FOR_MAC
+ if (@available(macOS 10.12, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_OSX_GPUFamily1_v2]) {
+ *featureSet = MTLFeatureSet_OSX_GPUFamily1_v2;
+ return true;
+ }
+ }
+ if ([device supportsFeatureSet:MTLFeatureSet_OSX_GPUFamily1_v1]) {
+ *featureSet = MTLFeatureSet_OSX_GPUFamily1_v1;
+ return true;
+ }
+#endif
+
+ // iOS Family group 3
+#ifdef SK_BUILD_FOR_IOS
+ if (@available(iOS 10.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily3_v2;
+ return true;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily3_v1;
+ return true;
+ }
+ }
+
+ // iOS Family group 2
+ if (@available(iOS 10.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v3]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily2_v3;
+ return true;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v2]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily2_v2;
+ return true;
+ }
+ }
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily2_v1;
+ return true;
+ }
+
+ // iOS Family group 1
+ if (@available(iOS 10.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v3]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily1_v3;
+ return true;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v2]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily1_v2;
+ return true;
+ }
+ }
+ if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+ *featureSet = MTLFeatureSet_iOS_GPUFamily1_v1;
+ return true;
+ }
+#endif
+ // No supported feature sets were found
+ return false;
+}
+
+sk_sp<GrGpu> GrMtlGpu::Make(GrContext* context, const GrContextOptions& options,
+ id<MTLDevice> device, id<MTLCommandQueue> queue) {
+ if (!device || !queue) {
+ return nullptr;
+ }
+ MTLFeatureSet featureSet;
+ if (!get_feature_set(device, &featureSet)) {
+ return nullptr;
+ }
+ return sk_sp<GrGpu>(new GrMtlGpu(context, options, device, queue, featureSet));
+}
+
+GrMtlGpu::GrMtlGpu(GrContext* context, const GrContextOptions& options,
+ id<MTLDevice> device, id<MTLCommandQueue> queue, MTLFeatureSet featureSet)
+ : INHERITED(context)
+ , fDevice(device)
+ , fQueue(queue)
+ , fCmdBuffer(nullptr)
+ , fCompiler(new SkSL::Compiler())
+ , fResourceProvider(this)
+ , fDisconnected(false) {
+ fMtlCaps.reset(new GrMtlCaps(options, fDevice, featureSet));
+ fCaps = fMtlCaps;
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ if (fMtlCaps->fenceSyncSupport()) {
+ fSharedEvent = [fDevice newSharedEvent];
+ dispatch_queue_t dispatchQ = dispatch_queue_create("MTLFenceSync", NULL);
+ fSharedEventListener = [[MTLSharedEventListener alloc] initWithDispatchQueue:dispatchQ];
+ fLatestEvent = 0;
+ }
+ }
+}
+
+GrMtlGpu::~GrMtlGpu() {
+ if (!fDisconnected) {
+ this->destroyResources();
+ }
+}
+
+void GrMtlGpu::disconnect(DisconnectType type) {
+ INHERITED::disconnect(type);
+
+ if (DisconnectType::kCleanup == type) {
+ this->destroyResources();
+ } else {
+ delete fCmdBuffer;
+ fCmdBuffer = nullptr;
+
+ fResourceProvider.destroyResources();
+
+ fQueue = nil;
+ fDevice = nil;
+
+ fDisconnected = true;
+ }
+}
+
+void GrMtlGpu::destroyResources() {
+ // Will implicitly delete the command buffer
+ this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
+ fResourceProvider.destroyResources();
+
+ fQueue = nil;
+ fDevice = nil;
+}
+
+GrOpsRenderPass* GrMtlGpu::getOpsRenderPass(
+ GrRenderTarget* renderTarget, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ return new GrMtlOpsRenderPass(this, renderTarget, origin, colorInfo, stencilInfo);
+}
+
+void GrMtlGpu::submit(GrOpsRenderPass* renderPass) {
+ GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
+ mtlRenderPass->submit();
+ delete renderPass;
+}
+
+GrMtlCommandBuffer* GrMtlGpu::commandBuffer() {
+ if (!fCmdBuffer) {
+ fCmdBuffer = GrMtlCommandBuffer::Create(fQueue);
+ }
+ return fCmdBuffer;
+}
+
+void GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
+ if (fCmdBuffer) {
+ fResourceProvider.addBufferCompletionHandler(fCmdBuffer);
+ fCmdBuffer->commit(SyncQueue::kForce_SyncQueue == sync);
+ delete fCmdBuffer;
+ fCmdBuffer = nullptr;
+ }
+}
+
+void GrMtlGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess,
+ const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
+ bool forceSync = SkToBool(info.fFlags & kSyncCpu_GrFlushFlag) ||
+ (info.fFinishedProc && !this->mtlCaps().fenceSyncSupport());
+ // TODO: do we care about info.fSemaphore?
+ if (forceSync) {
+ this->submitCommandBuffer(kForce_SyncQueue);
+ // After a forced sync everything previously sent to the GPU is done.
+ for (const auto& cb : fFinishCallbacks) {
+ cb.fCallback(cb.fContext);
+ this->deleteFence(cb.fFence);
+ }
+ fFinishCallbacks.clear();
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ } else {
+ if (info.fFinishedProc) {
+ FinishCallback callback;
+ callback.fCallback = info.fFinishedProc;
+ callback.fContext = info.fFinishedContext;
+ callback.fFence = this->insertFence();
+ fFinishCallbacks.push_back(callback);
+ }
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
+}
+
+void GrMtlGpu::checkFinishProcs() {
+ // Bail after the first unfinished sync since we expect they signal in the order inserted.
+ while (!fFinishCallbacks.empty() && this->waitFence(fFinishCallbacks.front().fFence,
+ /* timeout = */ 0)) {
+ fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext);
+ this->deleteFence(fFinishCallbacks.front().fFence);
+ fFinishCallbacks.pop_front();
+ }
+}
+
+sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
+ GrAccessPattern accessPattern, const void* data) {
+ return GrMtlBuffer::Make(this, size, type, accessPattern, data);
+}
+
+static bool check_max_blit_width(int widthInPixels) {
+ if (widthInPixels > 32767) {
+ SkASSERT(false); // surfaces should not be this wide anyway
+ return false;
+ }
+ return true;
+}
+
+bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const GrMipLevel texels[],
+ int mipLevelCount) {
+ SkASSERT(this->caps()->isFormatTexturable(tex->backendFormat()));
+ // The assumption is either that we have no mipmaps, or that our rect is the entire texture
+ SkASSERT(1 == mipLevelCount ||
+ (0 == left && 0 == top && width == tex->width() && height == tex->height()));
+
+ // We assume that if the texture has mip levels, we either upload to all the levels or just the
+ // first.
+ SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
+
+ if (!check_max_blit_width(width)) {
+ return false;
+ }
+ if (width == 0 || height == 0) {
+ return false;
+ }
+ if (GrPixelConfigToColorType(tex->config()) != dataColorType) {
+ return false;
+ }
+
+ id<MTLTexture> mtlTexture = tex->mtlTexture();
+ SkASSERT(mtlTexture);
+ // Either upload only the first miplevel or all miplevels
+ SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
+
+ if (1 == mipLevelCount && !texels[0].fPixels) {
+ return true; // no data to upload
+ }
+
+ for (int i = 0; i < mipLevelCount; ++i) {
+ // We do not allow any gaps in the mip data
+ if (!texels[i].fPixels) {
+ return false;
+ }
+ }
+
+ size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+ size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bpp, width, height,
+ &individualMipOffsets,
+ mipLevelCount);
+ SkASSERT(combinedBufferSize);
+
+ size_t bufferOffset;
+ id<MTLBuffer> transferBuffer = this->resourceProvider().getDynamicBuffer(combinedBufferSize,
+ &bufferOffset);
+ if (!transferBuffer) {
+ return false;
+ }
+ char* buffer = (char*) transferBuffer.contents + bufferOffset;
+
+ int currentWidth = width;
+ int currentHeight = height;
+ int layerHeight = tex->height();
+ MTLOrigin origin = MTLOriginMake(left, top, 0);
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (texels[currentMipLevel].fPixels) {
+ SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
+ const size_t trimRowBytes = currentWidth * bpp;
+ const size_t rowBytes = texels[currentMipLevel].fRowBytes;
+
+ // copy data into the buffer, skipping any trailing bytes
+ char* dst = buffer + individualMipOffsets[currentMipLevel];
+ const char* src = (const char*)texels[currentMipLevel].fPixels;
+ SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
+
+ [blitCmdEncoder copyFromBuffer: transferBuffer
+ sourceOffset: bufferOffset + individualMipOffsets[currentMipLevel]
+ sourceBytesPerRow: trimRowBytes
+ sourceBytesPerImage: trimRowBytes*currentHeight
+ sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
+ toTexture: mtlTexture
+ destinationSlice: 0
+ destinationLevel: currentMipLevel
+ destinationOrigin: origin];
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ layerHeight = currentHeight;
+ }
+#ifdef SK_BUILD_FOR_MAC
+ [transferBuffer didModifyRange: NSMakeRange(bufferOffset, combinedBufferSize)];
+#endif
+
+ if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
+ tex->texturePriv().markMipMapsDirty();
+ }
+
+ return true;
+}
+
+bool GrMtlGpu::clearTexture(GrMtlTexture* tex, GrColorType dataColorType, uint32_t levelMask) {
+ SkASSERT(this->caps()->isFormatTexturableAndUploadable(dataColorType, tex->backendFormat()));
+
+ if (!levelMask) {
+ return true;
+ }
+
+ id<MTLTexture> mtlTexture = tex->mtlTexture();
+ SkASSERT(mtlTexture);
+ // Either upload only the first miplevel or all miplevels
+ int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
+
+ // TODO: implement some way of reusing transfer buffers?
+ size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+ size_t combinedBufferSize = 0;
+ int currentWidth = tex->width();
+ int currentHeight = tex->height();
+
+ // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
+ // config. This works with the assumption that the bytes in pixel config is always a power of 2.
+ // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
+ // penalty?
+ SkASSERT((bpp & (bpp - 1)) == 0);
+ const size_t alignmentMask = 0x3 | (bpp - 1);
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (levelMask & (1 << currentMipLevel)) {
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ const size_t alignmentDiff = combinedBufferSize & alignmentMask;
+ if (alignmentDiff != 0) {
+ combinedBufferSize += alignmentMask - alignmentDiff + 1;
+ }
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+ SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
+
+ // TODO: Create GrMtlTransferBuffer
+ NSUInteger options = 0;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ options |= MTLResourceStorageModePrivate;
+ }
+ id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: combinedBufferSize
+ options: options];
+ if (nil == transferBuffer) {
+ return false;
+ }
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ // clear the buffer to transparent black
+ NSRange clearRange;
+ clearRange.location = 0;
+ clearRange.length = combinedBufferSize;
+ [blitCmdEncoder fillBuffer: transferBuffer
+ range: clearRange
+ value: 0];
+
+ // now copy buffer to texture
+ currentWidth = tex->width();
+ currentHeight = tex->height();
+ MTLOrigin origin = MTLOriginMake(0, 0, 0);
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (levelMask & (1 << currentMipLevel)) {
+ const size_t rowBytes = currentWidth * bpp;
+
+ [blitCmdEncoder copyFromBuffer: transferBuffer
+ sourceOffset: individualMipOffsets[currentMipLevel]
+ sourceBytesPerRow: rowBytes
+ sourceBytesPerImage: rowBytes * currentHeight
+ sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
+ toTexture: mtlTexture
+ destinationSlice: 0
+ destinationLevel: currentMipLevel
+ destinationOrigin: origin];
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
+ tex->texturePriv().markMipMapsDirty();
+ }
+
+ return true;
+}
+
+GrStencilAttachment* GrMtlGpu::createStencilAttachmentForRenderTarget(
+ const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
+ SkASSERT(numStencilSamples == rt->numSamples());
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numSamples();
+
+ const GrMtlCaps::StencilFormat& sFmt = this->mtlCaps().preferredStencilFormat();
+
+ GrMtlStencilAttachment* stencil(GrMtlStencilAttachment::Create(this,
+ width,
+ height,
+ samples,
+ sFmt));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+sk_sp<GrTexture> GrMtlGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ // We don't support protected textures in Metal.
+ if (isProtected == GrProtected::kYes) {
+ return nullptr;
+ }
+ SkASSERT(mipLevelCount > 0);
+
+ MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
+ SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
+ SkASSERT(!this->caps()->isFormatCompressed(format));
+
+ sk_sp<GrMtlTexture> tex;
+ // This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this TexDesc describes the resolved texture. Therefore we always have samples
+ // set to 1.
+ MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
+ texDesc.textureType = MTLTextureType2D;
+ texDesc.pixelFormat = mtlPixelFormat;
+ texDesc.width = desc.fWidth;
+ texDesc.height = desc.fHeight;
+ texDesc.depth = 1;
+ texDesc.mipmapLevelCount = mipLevelCount;
+ texDesc.sampleCount = 1;
+ texDesc.arrayLength = 1;
+ // Make all textures have private gpu only access. We can use transfer buffers or textures
+ // to copy to them.
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ texDesc.storageMode = MTLStorageModePrivate;
+ texDesc.usage = MTLTextureUsageShaderRead;
+ texDesc.usage |= (renderable == GrRenderable::kYes) ? MTLTextureUsageRenderTarget : 0;
+ }
+
+ GrMipMapsStatus mipMapsStatus =
+ mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
+ if (renderable == GrRenderable::kYes) {
+ tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(this, budgeted,
+ desc, renderTargetSampleCnt,
+ texDesc, mipMapsStatus);
+ } else {
+ tex = GrMtlTexture::MakeNewTexture(this, budgeted, desc, texDesc, mipMapsStatus);
+ }
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (levelClearMask) {
+ auto colorType = GrPixelConfigToColorType(desc.fConfig);
+ this->clearTexture(tex.get(), colorType, levelClearMask);
+ }
+
+ return tex;
+}
+
+sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(int width, int height,
+ const GrBackendFormat& format,
+ SkImage::CompressionType compressionType,
+ SkBudgeted budgeted, const void* data) {
+ SkASSERT(this->caps()->isFormatTexturable(format));
+ SkASSERT(data);
+
+ if (!check_max_blit_width(width)) {
+ return nullptr;
+ }
+
+ MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
+
+ // This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this TexDesc describes the resolved texture. Therefore we always have samples
+ // set to 1.
+ // Compressed textures with MIP levels or multiple samples are not supported as of now.
+ MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
+ texDesc.textureType = MTLTextureType2D;
+ texDesc.pixelFormat = mtlPixelFormat;
+ texDesc.width = width;
+ texDesc.height = height;
+ texDesc.depth = 1;
+ texDesc.mipmapLevelCount = 1;
+ texDesc.sampleCount = 1;
+ texDesc.arrayLength = 1;
+ // Make all textures have private gpu only access. We can use transfer buffers or textures
+ // to copy to them.
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ texDesc.storageMode = MTLStorageModePrivate;
+ texDesc.usage = MTLTextureUsageShaderRead;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fConfig = GrCompressionTypePixelConfig(compressionType);
+ desc.fWidth = width;
+ desc.fHeight = height;
+ auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, desc, texDesc,
+ GrMipMapsStatus::kNotAllocated);
+ if (!tex) {
+ return nullptr;
+ }
+
+ // Upload to texture
+ id<MTLTexture> mtlTexture = tex->mtlTexture();
+ SkASSERT(mtlTexture);
+
+ SkImage::CompressionType textureCompressionType;
+ if (!GrMtlFormatToCompressionType(mtlTexture.pixelFormat, &textureCompressionType) ||
+ textureCompressionType != compressionType) {
+ return nullptr;
+ }
+
+ size_t dataSize = GrCompressedDataSize(compressionType, width, height);
+ SkASSERT(dataSize);
+
+ size_t bufferOffset;
+ id<MTLBuffer> transferBuffer = this->resourceProvider().getDynamicBuffer(dataSize,
+ &bufferOffset);
+ if (!transferBuffer) {
+ return nullptr;
+ }
+ char* buffer = (char*) transferBuffer.contents + bufferOffset;
+
+ MTLOrigin origin = MTLOriginMake(0, 0, 0);
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ const size_t rowBytes = GrCompressedRowBytes(compressionType, width);
+
+ // copy data into the buffer, skipping any trailing bytes
+ memcpy(buffer, data, dataSize);
+ [blitCmdEncoder copyFromBuffer: transferBuffer
+ sourceOffset: bufferOffset
+ sourceBytesPerRow: rowBytes
+ sourceBytesPerImage: dataSize
+ sourceSize: MTLSizeMake(width, height, 1)
+ toTexture: mtlTexture
+ destinationSlice: 0
+ destinationLevel: 0
+ destinationOrigin: origin];
+#ifdef SK_BUILD_FOR_MAC
+ [transferBuffer didModifyRange: NSMakeRange(bufferOffset, dataSize)];
+#endif
+
+ return tex;
+}
+
+static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
+ GrMtlTextureInfo textureInfo;
+ if (!backendTex.getMtlTextureInfo(&textureInfo)) {
+ return nil;
+ }
+ return GrGetMTLTexture(textureInfo.fTexture.get());
+}
+
+static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
+ GrMtlTextureInfo textureInfo;
+ if (!backendRT.getMtlTextureInfo(&textureInfo)) {
+ return nil;
+ }
+ return GrGetMTLTexture(textureInfo.fTexture.get());
+}
+
+static inline void init_surface_desc(GrSurfaceDesc* surfaceDesc, id<MTLTexture> mtlTexture,
+ GrRenderable renderable, GrPixelConfig config) {
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ if (renderable == GrRenderable::kYes) {
+ SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
+ }
+ }
+ surfaceDesc->fWidth = mtlTexture.width;
+ surfaceDesc->fHeight = mtlTexture.height;
+ surfaceDesc->fConfig = config;
+}
+
+sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType grColorType,
+ GrWrapOwnership,
+ GrWrapCacheable cacheable, GrIOType ioType) {
+ id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
+ if (!mtlTexture) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ grColorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ init_surface_desc(&surfDesc, mtlTexture, GrRenderable::kNo, config);
+
+ return GrMtlTexture::MakeWrappedTexture(this, surfDesc, mtlTexture, cacheable, ioType);
+}
+
+sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership,
+ GrWrapCacheable cacheable) {
+ id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
+ if (!mtlTexture) {
+ return nullptr;
+ }
+
+ const GrMtlCaps& caps = this->mtlCaps();
+
+ MTLPixelFormat format = mtlTexture.pixelFormat;
+ if (!caps.isFormatRenderable(format, sampleCnt)) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = caps.getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ colorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ init_surface_desc(&surfDesc, mtlTexture, GrRenderable::kYes, config);
+
+ sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
+ SkASSERT(sampleCnt);
+
+ return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(this, surfDesc, sampleCnt,
+ mtlTexture, cacheable);
+}
+
+sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
+ GrColorType grColorType) {
+ // TODO: Revisit this when the Metal backend is completed. It may support MSAA render targets.
+ if (backendRT.sampleCnt() > 1) {
+ return nullptr;
+ }
+ id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
+ if (!mtlTexture) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendRT.getBackendFormat(),
+ grColorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ init_surface_desc(&surfDesc, mtlTexture, GrRenderable::kYes, config);
+
+ return GrMtlRenderTarget::MakeWrappedRenderTarget(this, surfDesc, backendRT.sampleCnt(),
+ mtlTexture);
+}
+
+sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendTextureAsRenderTarget(
+ const GrBackendTexture& backendTex, int sampleCnt, GrColorType grColorType) {
+ id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
+ if (!mtlTexture) {
+ return nullptr;
+ }
+
+ MTLPixelFormat format = mtlTexture.pixelFormat;
+ if (!this->mtlCaps().isFormatRenderable(format, sampleCnt)) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ grColorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ init_surface_desc(&surfDesc, mtlTexture, GrRenderable::kYes, config);
+ sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
+ if (!sampleCnt) {
+ return nullptr;
+ }
+
+ return GrMtlRenderTarget::MakeWrappedRenderTarget(this, surfDesc, sampleCnt, mtlTexture);
+}
+
+bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
+ GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
+ id<MTLTexture> mtlTexture = grMtlTexture->mtlTexture();
+
+ // Automatic mipmap generation is only supported by color-renderable formats
+ if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
+ // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
+ MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
+ return false;
+ }
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
+
+ return true;
+}
+
+static GrPixelConfig mtl_format_to_pixelconfig(MTLPixelFormat format) {
+ switch(format) {
+ case MTLPixelFormatA8Unorm: return kAlpha_8_GrPixelConfig;
+ case MTLPixelFormatR8Unorm: return kAlpha_8_GrPixelConfig;
+
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatB5G6R5Unorm: return kRGB_565_GrPixelConfig;
+ case MTLPixelFormatABGR4Unorm: return kRGBA_4444_GrPixelConfig;
+#endif
+ case MTLPixelFormatRGBA8Unorm: return kRGBA_8888_GrPixelConfig;
+ case MTLPixelFormatRGBA8Unorm_sRGB: return kSRGBA_8888_GrPixelConfig;
+
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatETC2_RGB8: return kRGB_ETC1_GrPixelConfig;
+#endif
+ case MTLPixelFormatRG8Unorm: return kRG_88_GrPixelConfig;
+ case MTLPixelFormatBGRA8Unorm: return kBGRA_8888_GrPixelConfig;
+ case MTLPixelFormatRGB10A2Unorm: return kRGBA_1010102_GrPixelConfig;
+ case MTLPixelFormatR16Float: return kAlpha_half_GrPixelConfig;
+ case MTLPixelFormatRGBA16Float: return kRGBA_half_GrPixelConfig;
+ case MTLPixelFormatR16Unorm: return kAlpha_16_GrPixelConfig;
+ case MTLPixelFormatRG16Unorm: return kRG_1616_GrPixelConfig;
+ case MTLPixelFormatRGBA16Unorm: return kRGBA_16161616_GrPixelConfig;
+ case MTLPixelFormatRG16Float: return kRG_half_GrPixelConfig;
+ default: return kUnknown_GrPixelConfig;
+ }
+
+ SkUNREACHABLE;
+}
+
+void copy_src_data(char* dst, size_t bytesPerPixel, const SkTArray<size_t>& individualMipOffsets,
+ const SkPixmap srcData[], int numMipLevels, size_t bufferSize) {
+ SkASSERT(srcData && numMipLevels);
+ SkASSERT(individualMipOffsets.count() == numMipLevels);
+
+ for (int level = 0; level < numMipLevels; ++level) {
+ const size_t trimRB = srcData[level].width() * bytesPerPixel;
+ SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
+ SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
+ srcData[level].addr(), srcData[level].rowBytes(),
+ trimRB, srcData[level].height());
+ }
+}
+
+bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat format,
+ int w, int h, bool texturable,
+ bool renderable, GrMipMapped mipMapped,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrMtlTextureInfo* info) {
+ SkASSERT(texturable || renderable);
+ if (!texturable) {
+ SkASSERT(GrMipMapped::kNo == mipMapped);
+ SkASSERT(!srcData && !numMipLevels);
+ }
+
+#ifdef SK_BUILD_FOR_IOS
+ // Compressed formats go through onCreateCompressedBackendTexture
+ SkASSERT(!GrMtlFormatIsCompressed(format));
+#endif
+
+ if (texturable && !fMtlCaps->isFormatTexturable(format)) {
+ return false;
+ }
+ if (renderable && !fMtlCaps->isFormatRenderable(format, 1)) {
+ return false;
+ }
+
+ if (!check_max_blit_width(w)) {
+ return false;
+ }
+
+ int mipLevelCount = 1;
+ if (srcData) {
+ SkASSERT(numMipLevels > 0);
+ mipLevelCount = numMipLevels;
+ } else if (GrMipMapped::kYes == mipMapped) {
+ mipLevelCount = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ bool mipmapped = mipMapped == GrMipMapped::kYes ? true : false;
+ MTLTextureDescriptor* desc =
+ [MTLTextureDescriptor texture2DDescriptorWithPixelFormat: format
+ width: w
+ height: h
+ mipmapped: mipmapped];
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ desc.storageMode = MTLStorageModePrivate;
+ desc.usage = texturable ? MTLTextureUsageShaderRead : 0;
+ desc.usage |= renderable ? MTLTextureUsageRenderTarget : 0;
+ }
+ id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
+
+ if (!srcData && !color) {
+ info->fTexture.reset(GrRetainPtrFromId(testTexture));
+
+ return true;
+ }
+
+ // Create the transfer buffer
+ size_t bytesPerPixel = fMtlCaps->bytesPerPixel(format);
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+ size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel, w, h,
+ &individualMipOffsets,
+ mipLevelCount);
+
+ NSUInteger options = 0; // TODO: consider other options here
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+#ifdef SK_BUILD_FOR_MAC
+ options |= MTLResourceStorageModeManaged;
+#else
+ options |= MTLResourceStorageModeShared;
+#endif
+ }
+
+ id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: combinedBufferSize
+ options: options];
+ if (nil == transferBuffer) {
+ return false;
+ }
+
+ char* buffer = (char*) transferBuffer.contents;
+
+ // Fill buffer with data
+ if (srcData) {
+ copy_src_data(buffer, bytesPerPixel, individualMipOffsets,
+ srcData, numMipLevels, combinedBufferSize);
+ } else if (color) {
+ GrPixelConfig config = mtl_format_to_pixelconfig(format);
+ auto colorType = GrPixelConfigToColorType(config);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+ GrFillInData(colorType, w, h, individualMipOffsets, buffer, *color);
+ }
+
+ // Transfer buffer contents to texture
+ int currentWidth = w;
+ int currentHeight = h;
+ MTLOrigin origin = MTLOriginMake(0, 0, 0);
+
+ id<MTLCommandBuffer> cmdBuffer = [fQueue commandBuffer];
+ id<MTLBlitCommandEncoder> blitCmdEncoder = [cmdBuffer blitCommandEncoder];
+
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ size_t trimRowBytes = currentWidth * bytesPerPixel;
+ size_t levelSize = trimRowBytes*currentHeight;
+
+ // TODO: can this all be done in one go?
+ [blitCmdEncoder copyFromBuffer: transferBuffer
+ sourceOffset: individualMipOffsets[currentMipLevel]
+ sourceBytesPerRow: trimRowBytes
+ sourceBytesPerImage: levelSize
+ sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
+ toTexture: testTexture
+ destinationSlice: 0
+ destinationLevel: currentMipLevel
+ destinationOrigin: origin];
+
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+#ifdef SK_BUILD_FOR_MAC
+ [transferBuffer didModifyRange: NSMakeRange(0, combinedBufferSize)];
+#endif
+
+ [blitCmdEncoder endEncoding];
+ [cmdBuffer commit];
+ [cmdBuffer waitUntilCompleted];
+ transferBuffer = nil;
+
+ info->fTexture.reset(GrRetainPtrFromId(testTexture));
+
+ return true;
+}
+
+GrBackendTexture GrMtlGpu::onCreateBackendTexture(int w, int h,
+ const GrBackendFormat& format,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color,
+ GrProtected isProtected) {
+ SkDEBUGCODE(const GrMtlCaps& caps = this->mtlCaps();)
+
+ // GrGpu::createBackendTexture should've ensured these conditions
+ SkASSERT(w >= 1 && w <= caps.maxTextureSize() && h >= 1 && h <= caps.maxTextureSize());
+ SkASSERT(GrGpu::MipMapsAreCorrect(w, h, mipMapped, srcData, numMipLevels));
+ SkASSERT(mipMapped == GrMipMapped::kNo || caps.mipMapSupport());
+
+ const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
+ GrMtlTextureInfo info;
+ if (!this->createMtlTextureForBackendSurface(mtlFormat,
+ w, h, true,
+ GrRenderable::kYes == renderable, mipMapped,
+ srcData, numMipLevels, color, &info)) {
+ return {};
+ }
+
+ GrBackendTexture backendTex(w, h, mipMapped, info);
+ return backendTex;
+}
+
+void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
+ SkASSERT(GrBackendApi::kMetal == tex.backend());
+ // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
+}
+
+#if GR_TEST_UTILS
+bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(GrBackendApi::kMetal == tex.backend());
+
+ GrMtlTextureInfo info;
+ if (!tex.getMtlTextureInfo(&info)) {
+ return false;
+ }
+ id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
+ if (!mtlTexture) {
+ return false;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ return mtlTexture.usage & MTLTextureUsageShaderRead;
+ } else {
+ return true; // best we can do
+ }
+}
+
+GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(int w, int h, GrColorType ct) {
+ if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
+ return GrBackendRenderTarget();
+ }
+
+ GrPixelConfig config = GrColorTypeToPixelConfig(ct);
+
+ MTLPixelFormat format;
+ if (!GrPixelConfigToMTLFormat(config, &format)) {
+ return GrBackendRenderTarget();
+ }
+
+ GrMtlTextureInfo info;
+ if (!this->createMtlTextureForBackendSurface(format, w, h, false, true,
+ GrMipMapped::kNo, nullptr, 0, nullptr, &info)) {
+ return {};
+ }
+
+ GrBackendRenderTarget backendRT(w, h, 1, info);
+ return backendRT;
+}
+
+void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
+ SkASSERT(GrBackendApi::kMetal == rt.backend());
+
+ GrMtlTextureInfo info;
+ if (rt.getMtlTextureInfo(&info)) {
+ this->testingOnly_flushGpuAndSync();
+ // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
+ // is deleted.
+ }
+}
+
+void GrMtlGpu::testingOnly_flushGpuAndSync() {
+ this->submitCommandBuffer(kForce_SyncQueue);
+}
+#endif // GR_TEST_UTILS
+
+static int get_surface_sample_cnt(GrSurface* surf) {
+ if (const GrRenderTarget* rt = surf->asRenderTarget()) {
+ return rt->numSamples();
+ }
+ return 0;
+}
+
+void GrMtlGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src) {
+ // TODO: Add support for subrectangles
+ GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
+ GrRenderTarget* dstRT = dst->asRenderTarget();
+ id<MTLTexture> dstTexture;
+ if (dstRT) {
+ GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
+ dstTexture = mtlRT->mtlColorTexture();
+ } else {
+ SkASSERT(dst->asTexture());
+ dstTexture = static_cast<GrMtlTexture*>(dst->asTexture())->mtlTexture();
+ }
+
+ this->resolveTexture(dstTexture, srcRT->mtlColorTexture());
+}
+
+void GrMtlGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ id<MTLTexture> dstTex = GrGetMTLTextureFromSurface(dst);
+ id<MTLTexture> srcTex = GrGetMTLTextureFromSurface(src);
+
+#ifdef SK_DEBUG
+ int dstSampleCnt = get_surface_sample_cnt(dst);
+ int srcSampleCnt = get_surface_sample_cnt(src);
+ SkASSERT(this->mtlCaps().canCopyAsBlit(dstTex.pixelFormat, dstSampleCnt, srcTex.pixelFormat,
+ srcSampleCnt, srcRect, dstPoint, dst == src));
+#endif
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ [blitCmdEncoder copyFromTexture: srcTex
+ sourceSlice: 0
+ sourceLevel: 0
+ sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
+ sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
+ toTexture: dstTex
+ destinationSlice: 0
+ destinationLevel: 0
+ destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
+}
+
+bool GrMtlGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(!src->isProtected() && !dst->isProtected());
+
+ MTLPixelFormat dstFormat = GrBackendFormatAsMTLPixelFormat(dst->backendFormat());
+ MTLPixelFormat srcFormat = GrBackendFormatAsMTLPixelFormat(src->backendFormat());
+
+ int dstSampleCnt = get_surface_sample_cnt(dst);
+ int srcSampleCnt = get_surface_sample_cnt(src);
+
+ bool success = false;
+ if (this->mtlCaps().canCopyAsResolve(dst, dstSampleCnt, src, srcSampleCnt, srcRect, dstPoint)) {
+ this->copySurfaceAsResolve(dst, src);
+ success = true;
+ } else if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
+ srcRect, dstPoint, dst == src)) {
+ this->copySurfaceAsBlit(dst, src, srcRect, dstPoint);
+ success = true;
+ }
+ if (success) {
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.x(), dstPoint.y(),
+ srcRect.width(), srcRect.height());
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+ }
+ return success;
+}
+
+bool GrMtlGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) {
+ GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
+ // TODO: In principle we should be able to support pure rendertargets as well, but
+ // until we find a use case we'll only support texture rendertargets.
+ if (!mtlTexture) {
+ return false;
+ }
+ if (!mipLevelCount) {
+ return false;
+ }
+#ifdef SK_DEBUG
+ for (int i = 0; i < mipLevelCount; i++) {
+ SkASSERT(texels[i].fPixels);
+ }
+#endif
+ return this->uploadToTexture(mtlTexture, left, top, width, height, srcColorType, texels,
+ mipLevelCount);
+}
+
+bool GrMtlGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) {
+ SkASSERT(surface);
+
+ if (surfaceColorType != dstColorType) {
+ return false;
+ }
+
+ int bpp = GrColorTypeBytesPerPixel(dstColorType);
+ size_t transBufferRowBytes = bpp * width;
+ size_t transBufferImageBytes = transBufferRowBytes * height;
+
+ // TODO: implement some way of reusing buffers instead of making a new one every time.
+ NSUInteger options = 0;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+#ifdef SK_BUILD_FOR_MAC
+ options |= MTLResourceStorageModeManaged;
+#else
+ options |= MTLResourceStorageModeShared;
+#endif
+ }
+
+ id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: transBufferImageBytes
+ options: options];
+
+ if (!this->readOrTransferPixels(surface, left, top, width, height, dstColorType, transferBuffer,
+ 0, transBufferImageBytes, transBufferRowBytes)) {
+ return false;
+ }
+ this->submitCommandBuffer(kForce_SyncQueue);
+
+ const void* mappedMemory = transferBuffer.contents;
+
+ SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, transBufferRowBytes, height);
+
+ return true;
+}
+
+bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) {
+ SkASSERT(texture);
+ SkASSERT(transferBuffer);
+ if (textureColorType != bufferColorType ||
+ GrPixelConfigToColorType(texture->config()) != bufferColorType) {
+ return false;
+ }
+
+ GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
+ id<MTLTexture> mtlTexture = grMtlTexture->mtlTexture();
+ SkASSERT(mtlTexture);
+
+ GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer);
+ id<MTLBuffer> mtlBuffer = grMtlBuffer->mtlBuffer();
+ SkASSERT(mtlBuffer);
+
+ size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
+ if (offset % bpp) {
+ return false;
+ }
+ MTLOrigin origin = MTLOriginMake(left, top, 0);
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ [blitCmdEncoder copyFromBuffer: mtlBuffer
+ sourceOffset: offset + grMtlBuffer->offset()
+ sourceBytesPerRow: rowBytes
+ sourceBytesPerImage: rowBytes*height
+ sourceSize: MTLSizeMake(width, height, 1)
+ toTexture: mtlTexture
+ destinationSlice: 0
+ destinationLevel: 0
+ destinationOrigin: origin];
+
+ return true;
+}
+
+bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) {
+ SkASSERT(surface);
+ SkASSERT(transferBuffer);
+
+ if (surfaceColorType != bufferColorType) {
+ return false;
+ }
+
+ // Metal only supports offsets that are aligned to a pixel.
+ int bpp = GrColorTypeBytesPerPixel(bufferColorType);
+ if (offset % bpp) {
+ return false;
+ }
+
+ GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer);
+ grMtlBuffer->bind();
+
+ size_t transBufferRowBytes = bpp * width;
+ size_t transBufferImageBytes = transBufferRowBytes * height;
+
+ return this->readOrTransferPixels(surface, left, top, width, height, bufferColorType,
+ grMtlBuffer->mtlBuffer(), offset + grMtlBuffer->offset(),
+ transBufferImageBytes, transBufferRowBytes);
+}
+
+bool GrMtlGpu::readOrTransferPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType dstColorType, id<MTLBuffer> transferBuffer,
+ size_t offset, size_t imageBytes, size_t rowBytes) {
+ if (!check_max_blit_width(width)) {
+ return false;
+ }
+ if (GrPixelConfigToColorType(surface->config()) != dstColorType) {
+ return false;
+ }
+
+ id<MTLTexture> mtlTexture;
+ if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
+ if (rt->numSamples() > 1) {
+ SkASSERT(rt->requiresManualMSAAResolve()); // msaa-render-to-texture not yet supported.
+ mtlTexture = rt->mtlResolveTexture();
+ } else {
+ SkASSERT(!rt->requiresManualMSAAResolve());
+ mtlTexture = rt->mtlColorTexture();
+ }
+ } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
+ mtlTexture = texture->mtlTexture();
+ }
+ if (!mtlTexture) {
+ return false;
+ }
+
+ id<MTLBlitCommandEncoder> blitCmdEncoder = this->commandBuffer()->getBlitCommandEncoder();
+ [blitCmdEncoder copyFromTexture: mtlTexture
+ sourceSlice: 0
+ sourceLevel: 0
+ sourceOrigin: MTLOriginMake(left, top, 0)
+ sourceSize: MTLSizeMake(width, height, 1)
+ toBuffer: transferBuffer
+ destinationOffset: offset
+ destinationBytesPerRow: rowBytes
+ destinationBytesPerImage: imageBytes];
+#ifdef SK_BUILD_FOR_MAC
+ // Sync GPU data back to the CPU
+ [blitCmdEncoder synchronizeResource: transferBuffer];
+#endif
+
+ return true;
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrMtlGpu::insertFence() {
+ GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ ++fLatestEvent;
+ cmdBuffer->encodeSignalEvent(fSharedEvent, fLatestEvent);
+
+ return fLatestEvent;
+ }
+ // If MTLSharedEvent isn't available, we create a semaphore and signal it
+ // within the current command buffer's completion handler.
+ dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+ cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
+ dispatch_semaphore_signal(semaphore);
+ });
+
+ const void* cfFence = (__bridge_retained const void*) semaphore;
+ return (GrFence) cfFence;
+}
+
+bool GrMtlGpu::waitFence(GrFence fence, uint64_t timeout) {
+ dispatch_semaphore_t semaphore;
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ semaphore = dispatch_semaphore_create(0);
+
+ // Add listener for this particular value or greater
+ __block dispatch_semaphore_t block_sema = semaphore;
+ [fSharedEvent notifyListener: fSharedEventListener
+ atValue: fence
+ block: ^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
+ dispatch_semaphore_signal(block_sema);
+ }];
+
+ } else {
+ const void* cfFence = (const void*) fence;
+ semaphore = (__bridge dispatch_semaphore_t)cfFence;
+ }
+ long result = dispatch_semaphore_wait(semaphore, timeout);
+
+ return !result;
+}
+
+void GrMtlGpu::deleteFence(GrFence fence) const {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ // nothing to delete
+ } else {
+ const void* cfFence = (const void*) fence;
+ // In this case it's easier to release in CoreFoundation than depend on ARC
+ CFRelease(cfFence);
+ }
+}
+
+sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrMtlGpu::makeSemaphore(bool isOwned) {
+ SkASSERT(this->caps()->semaphoreSupport());
+ return GrMtlSemaphore::Make(this, isOwned);
+}
+
+sk_sp<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) {
+ SkASSERT(this->caps()->semaphoreSupport());
+ return GrMtlSemaphore::MakeWrapped(this, semaphore.mtlSemaphore(), semaphore.mtlValue(),
+ ownership);
+}
+
+void GrMtlGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore.get());
+
+ this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
+ }
+}
+
+void GrMtlGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore.get());
+
+ this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
+ }
+}
+
+void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&, GrSurfaceOrigin,
+ ForExternalIO forExternalIO) {
+ this->resolveTexture(static_cast<GrMtlRenderTarget*>(target)->mtlResolveTexture(),
+ static_cast<GrMtlRenderTarget*>(target)->mtlColorTexture());
+
+ if (ForExternalIO::kYes == forExternalIO) {
+ // This resolve is called when we are preparing an msaa surface for external I/O. It is
+ // called after flushing, so we need to make sure we submit the command buffer after
+ // doing the resolve so that the resolve actually happens.
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
+}
+
+void GrMtlGpu::resolveTexture(id<MTLTexture> resolveTexture, id<MTLTexture> colorTexture) {
+ auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
+ renderPassDesc.colorAttachments[0].texture = colorTexture;
+ renderPassDesc.colorAttachments[0].slice = 0;
+ renderPassDesc.colorAttachments[0].level = 0;
+ renderPassDesc.colorAttachments[0].resolveTexture = resolveTexture;
+ renderPassDesc.colorAttachments[0].slice = 0;
+ renderPassDesc.colorAttachments[0].level = 0;
+ renderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
+ renderPassDesc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
+
+ id<MTLRenderCommandEncoder> cmdEncoder =
+ this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
+ SkASSERT(nil != cmdEncoder);
+ cmdEncoder.label = @"resolveTexture";
+}
+
+#if GR_TEST_UTILS
+void GrMtlGpu::testingOnly_startCapture() {
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ // TODO: add Metal 3 interface as well
+ MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
+ [captureManager startCaptureWithDevice: fDevice];
+ }
+}
+
+void GrMtlGpu::testingOnly_endCapture() {
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
+ [captureManager stopCapture];
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.h b/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.h
new file mode 100644
index 0000000000..702bf29d80
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlOpsRenderPass_DEFINED
+#define GrMtlOpsRenderPass_DEFINED
+
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#import <Metal/Metal.h>
+
+typedef uint32_t GrColor;
+class GrMtlBuffer;
+class GrMtlPipelineState;
+class GrMtlRenderTarget;
+
+class GrMtlOpsRenderPass : public GrOpsRenderPass, private GrMesh::SendToGpuImpl {
+public:
+ GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo);
+
+ ~GrMtlOpsRenderPass() override;
+
+ void begin() override {}
+ void end() override {}
+
+ void insertEventMarker(const char* msg) override {}
+
+ void initRenderState(id<MTLRenderCommandEncoder>);
+
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {
+ // TODO: this could be more efficient
+ state->doUpload(upload);
+ }
+ void submit();
+
+private:
+ GrGpu* gpu() override { return fGpu; }
+
+ GrMtlPipelineState* prepareDrawState(const GrProgramInfo&, GrPrimitiveType);
+
+ void onDraw(const GrProgramInfo& programInfo,
+ const GrMesh mesh[],
+ int meshCount,
+ const SkRect& bounds) override;
+
+ void onClear(const GrFixedClip& clip, const SkPMColor4f& color) override;
+
+ void onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) override;
+
+ void setupRenderPass(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo);
+
+ void bindGeometry(const GrBuffer* vertexBuffer, size_t vertexOffset,
+ const GrBuffer* instanceBuffer);
+
+ // GrMesh::SendToGpuImpl methods. These issue the actual Metal draw commands.
+ // Marked final as a hint to the compiler to not use virtual dispatch.
+ void sendMeshToGpu(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) final;
+
+ void sendIndexedMeshToGpu(GrPrimitiveType primType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, uint16_t /*minIndexValue*/, uint16_t /*maxIndexValue*/,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ GrPrimitiveRestart restart) final;
+
+ void sendInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex, const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) final;
+
+ void sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart) final;
+
+ void setVertexBuffer(id<MTLRenderCommandEncoder>, const GrMtlBuffer*, size_t offset,
+ size_t index);
+ void resetBufferBindings();
+ void precreateCmdEncoder();
+
+ GrMtlGpu* fGpu;
+
+ id<MTLRenderCommandEncoder> fActiveRenderCmdEncoder;
+ MTLRenderPassDescriptor* fRenderPassDesc;
+ SkRect fBounds;
+ size_t fCurrentVertexStride;
+
+ static constexpr size_t kNumBindings = GrMtlUniformHandler::kLastUniformBinding + 3;
+ struct {
+ id<MTLBuffer> fBuffer;
+ size_t fOffset;
+ } fBufferBindings[kNumBindings];
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.mm
new file mode 100644
index 0000000000..f587492f77
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlOpsRenderPass.mm
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
+
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/mtl/GrMtlCommandBuffer.h"
+#include "src/gpu/mtl/GrMtlPipelineState.h"
+#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
+#include "src/gpu/mtl/GrMtlRenderTarget.h"
+#include "src/gpu/mtl/GrMtlTexture.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
+ : INHERITED(rt, origin)
+ , fGpu(gpu) {
+ this->setupRenderPass(colorInfo, stencilInfo);
+}
+
+GrMtlOpsRenderPass::~GrMtlOpsRenderPass() {
+ SkASSERT(nil == fActiveRenderCmdEncoder);
+}
+
+void GrMtlOpsRenderPass::precreateCmdEncoder() {
+ // For clears, we may not have an associated draw. So we prepare a cmdEncoder that
+ // will be submitted whether there's a draw or not.
+ SkASSERT(nil == fActiveRenderCmdEncoder);
+
+ SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
+ fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
+ SkASSERT(nil != cmdEncoder);
+}
+
+void GrMtlOpsRenderPass::submit() {
+ if (!fRenderTarget) {
+ return;
+ }
+ SkIRect iBounds;
+ fBounds.roundOut(&iBounds);
+ fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
+}
+
+GrMtlPipelineState* GrMtlOpsRenderPass::prepareDrawState(const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType) {
+ // TODO: resolve textures and regenerate mipmaps as needed
+
+ GrMtlPipelineState* pipelineState =
+ fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget,
+ programInfo,
+ primitiveType);
+ if (!pipelineState) {
+ return nullptr;
+ }
+
+ pipelineState->setData(fRenderTarget, programInfo);
+ fCurrentVertexStride = programInfo.primProc().vertexStride();
+
+ return pipelineState;
+}
+
+void GrMtlOpsRenderPass::onDraw(const GrProgramInfo& programInfo,
+ const GrMesh meshes[],
+ int meshCount,
+ const SkRect& bounds) {
+
+ SkASSERT(meshCount); // guaranteed by GrOpsRenderPass::draw
+
+ GrPrimitiveType primitiveType = meshes[0].primitiveType();
+ GrMtlPipelineState* pipelineState = this->prepareDrawState(programInfo, primitiveType);
+ if (!pipelineState) {
+ return;
+ }
+
+ SkASSERT(nil == fActiveRenderCmdEncoder);
+ fActiveRenderCmdEncoder = fGpu->commandBuffer()->getRenderCommandEncoder(
+ fRenderPassDesc, pipelineState, this);
+ SkASSERT(fActiveRenderCmdEncoder);
+
+ [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
+ pipelineState->setDrawState(fActiveRenderCmdEncoder,
+ programInfo.pipeline().outputSwizzle(),
+ programInfo.pipeline().getXferProcessor());
+
+ bool hasDynamicScissors = programInfo.hasDynamicScissors();
+
+ if (!programInfo.pipeline().isScissorEnabled()) {
+ GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
+ fRenderTarget, fOrigin,
+ SkIRect::MakeWH(fRenderTarget->width(),
+ fRenderTarget->height()));
+ } else if (!hasDynamicScissors) {
+ SkASSERT(programInfo.hasFixedScissor());
+
+ GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
+ fRenderTarget, fOrigin,
+ programInfo.fixedScissor());
+ }
+
+ for (int i = 0; i < meshCount; ++i) {
+ const GrMesh& mesh = meshes[i];
+ SkASSERT(nil != fActiveRenderCmdEncoder);
+ if (mesh.primitiveType() != primitiveType) {
+ SkDEBUGCODE(pipelineState = nullptr);
+ primitiveType = mesh.primitiveType();
+ pipelineState = this->prepareDrawState(programInfo, primitiveType);
+ if (!pipelineState) {
+ return;
+ }
+
+ [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
+ pipelineState->setDrawState(fActiveRenderCmdEncoder,
+ programInfo.pipeline().outputSwizzle(),
+ programInfo.pipeline().getXferProcessor());
+ }
+
+ if (hasDynamicScissors) {
+ GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, fRenderTarget,
+ fOrigin,
+ programInfo.dynamicScissor(i));
+ }
+
+ mesh.sendToGpu(this);
+ }
+
+ fActiveRenderCmdEncoder = nil;
+ fBounds.join(bounds);
+}
+
+void GrMtlOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
+ // We should never end up here since all clears should either be done as draws or load ops in
+ // metal. If we hit this assert then we missed a chance to set a load op on the
+ // GrRenderTargetContext level.
+ SkASSERT(false);
+}
+
+void GrMtlOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ SkASSERT(!clip.hasWindowRectangles());
+
+ GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ SkASSERT(sb);
+ int stencilBitCount = sb->bits();
+
+ // The contract with the callers does not guarantee that we preserve all bits in the stencil
+ // during this clear. Thus we will clear the entire stencil to the desired value.
+ if (insideStencilMask) {
+ fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
+ } else {
+ fRenderPassDesc.stencilAttachment.clearStencil = 0;
+ }
+
+ fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear;
+ this->precreateCmdEncoder();
+ fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
+}
+
+void GrMtlOpsRenderPass::initRenderState(id<MTLRenderCommandEncoder> encoder) {
+ [encoder pushDebugGroup:@"initRenderState"];
+ [encoder setFrontFacingWinding:MTLWindingCounterClockwise];
+ // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
+ // the drawable used to generate the renderCommandEncoder -- but just in case.
+ MTLViewport viewport = { 0.0, 0.0,
+ (double) fRenderTarget->width(), (double) fRenderTarget->height(),
+ 0.0, 1.0 };
+ [encoder setViewport:viewport];
+ this->resetBufferBindings();
+ [encoder popDebugGroup];
+}
+
+void GrMtlOpsRenderPass::setupRenderPass(
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
+ const static MTLLoadAction mtlLoadAction[] {
+ MTLLoadActionLoad,
+ MTLLoadActionClear,
+ MTLLoadActionDontCare
+ };
+ GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0);
+ GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1);
+ GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2);
+ SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
+ SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
+
+ const static MTLStoreAction mtlStoreAction[] {
+ MTLStoreActionStore,
+ MTLStoreActionDontCare
+ };
+ GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0);
+ GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1);
+ SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
+ SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
+
+ auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
+ renderPassDesc.colorAttachments[0].texture =
+ static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlColorTexture();
+ renderPassDesc.colorAttachments[0].slice = 0;
+ renderPassDesc.colorAttachments[0].level = 0;
+ const SkPMColor4f& clearColor = colorInfo.fClearColor;
+ renderPassDesc.colorAttachments[0].clearColor =
+ MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
+ renderPassDesc.colorAttachments[0].loadAction =
+ mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
+ renderPassDesc.colorAttachments[0].storeAction =
+ mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
+
+ const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>(
+ fRenderTarget->renderTargetPriv().getStencilAttachment());
+ if (stencil) {
+ renderPassDesc.stencilAttachment.texture = stencil->stencilView();
+ }
+ renderPassDesc.stencilAttachment.clearStencil = 0;
+ renderPassDesc.stencilAttachment.loadAction =
+ mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
+ renderPassDesc.stencilAttachment.storeAction =
+ mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
+
+ fRenderPassDesc = renderPassDesc;
+
+ // Manage initial clears
+ if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) {
+ fBounds = SkRect::MakeWH(fRenderTarget->width(),
+ fRenderTarget->height());
+ this->precreateCmdEncoder();
+ if (colorInfo.fLoadOp == GrLoadOp::kClear) {
+ fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
+ }
+ if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
+ fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
+ }
+ } else {
+ fBounds.setEmpty();
+ }
+}
+
+static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
+ const static MTLPrimitiveType mtlPrimitiveType[] {
+ MTLPrimitiveTypeTriangle,
+ MTLPrimitiveTypeTriangleStrip,
+ MTLPrimitiveTypePoint,
+ MTLPrimitiveTypeLine,
+ MTLPrimitiveTypeLineStrip
+ };
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4);
+
+ SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
+ return mtlPrimitiveType[static_cast<int>(primitiveType)];
+}
+
+void GrMtlOpsRenderPass::bindGeometry(const GrBuffer* vertexBuffer,
+ size_t vertexOffset,
+ const GrBuffer* instanceBuffer) {
+ size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
+ if (vertexBuffer) {
+ SkASSERT(!vertexBuffer->isCpuBuffer());
+ SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
+
+ const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer);
+ this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, vertexOffset, bufferIndex++);
+ }
+ if (instanceBuffer) {
+ SkASSERT(!instanceBuffer->isCpuBuffer());
+ SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
+
+ const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer);
+ this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, 0, bufferIndex++);
+ }
+}
+
+void GrMtlOpsRenderPass::sendMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex) {
+ this->bindGeometry(vertexBuffer, 0, nullptr);
+
+ [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
+ vertexStart:baseVertex
+ vertexCount:vertexCount];
+}
+
+void GrMtlOpsRenderPass::sendIndexedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ uint16_t /*minIndexValue*/,
+ uint16_t /*maxIndexValue*/,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ GrPrimitiveRestart restart) {
+ this->bindGeometry(vertexBuffer, fCurrentVertexStride*baseVertex, nullptr);
+
+ id<MTLBuffer> mtlIndexBuffer = nil;
+ if (indexBuffer) {
+ SkASSERT(!indexBuffer->isCpuBuffer());
+ SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
+
+ mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
+ SkASSERT(mtlIndexBuffer);
+ }
+
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
+ sizeof(uint16_t) * baseIndex;
+ [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
+ indexCount:indexCount
+ indexType:MTLIndexTypeUInt16
+ indexBuffer:mtlIndexBuffer
+ indexBufferOffset:indexOffset];
+ fGpu->stats()->incNumDraws();
+}
+
+void GrMtlOpsRenderPass::sendInstancedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
+ this->bindGeometry(vertexBuffer, 0, instanceBuffer);
+
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
+ vertexStart:baseVertex
+ vertexCount:vertexCount
+ instanceCount:instanceCount
+ baseInstance:baseInstance];
+ } else {
+ SkASSERT(false);
+ }
+}
+
+void GrMtlOpsRenderPass::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance,
+ GrPrimitiveRestart restart) {
+ this->bindGeometry(vertexBuffer, 0, instanceBuffer);
+
+ id<MTLBuffer> mtlIndexBuffer = nil;
+ if (indexBuffer) {
+ SkASSERT(!indexBuffer->isCpuBuffer());
+ SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
+
+ mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
+ SkASSERT(mtlIndexBuffer);
+ }
+
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
+ sizeof(uint16_t) * baseIndex;
+
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
+ indexCount:indexCount
+ indexType:MTLIndexTypeUInt16
+ indexBuffer:mtlIndexBuffer
+ indexBufferOffset:indexOffset
+ instanceCount:instanceCount
+ baseVertex:baseVertex
+ baseInstance:baseInstance];
+ } else {
+ SkASSERT(false);
+ }
+ fGpu->stats()->incNumDraws();
+}
+
+void GrMtlOpsRenderPass::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
+ const GrMtlBuffer* buffer,
+ size_t vertexOffset,
+ size_t index) {
+ SkASSERT(index < 4);
+ id<MTLBuffer> mtlVertexBuffer = buffer->mtlBuffer();
+ SkASSERT(mtlVertexBuffer);
+ // Apple recommends using setVertexBufferOffset: when changing the offset
+ // for a currently bound vertex buffer, rather than setVertexBuffer:
+ size_t offset = buffer->offset() + vertexOffset;
+ if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
+ [encoder setVertexBuffer: mtlVertexBuffer
+ offset: offset
+ atIndex: index];
+ fBufferBindings[index].fBuffer = mtlVertexBuffer;
+ fBufferBindings[index].fOffset = offset;
+ } else if (fBufferBindings[index].fOffset != offset) {
+ if (@available(macOS 10.11, iOS 8.3, *)) {
+ [encoder setVertexBufferOffset: offset
+ atIndex: index];
+ } else {
+ [encoder setVertexBuffer: mtlVertexBuffer
+ offset: offset
+ atIndex: index];
+ }
+ fBufferBindings[index].fOffset = offset;
+ }
+}
+
+void GrMtlOpsRenderPass::resetBufferBindings() {
+ for (size_t i = 0; i < kNumBindings; ++i) {
+ fBufferBindings[i].fBuffer = nil;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.h b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.h
new file mode 100644
index 0000000000..e71aee2611
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlPipelineState_DEFINED
+#define GrMtlPipelineState_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/mtl/GrMtlBuffer.h"
+#include "src/gpu/mtl/GrMtlPipelineStateDataManager.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlGpu;
+class GrMtlPipelineStateDataManager;
+class GrMtlSampler;
+class GrMtlTexture;
+class GrPipeline;
+
+/**
+ * Wraps a MTLRenderPipelineState object and also contains more info about the pipeline as needed
+ * by Ganesh
+ */
+class GrMtlPipelineState {
+public:
+ using UniformInfoArray = GrMtlPipelineStateDataManager::UniformInfoArray;
+ using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+
+ GrMtlPipelineState(
+ GrMtlGpu* gpu,
+ id<MTLRenderPipelineState> pipelineState,
+ MTLPixelFormat pixelFormat,
+ const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t uniformBufferSize,
+ uint32_t numSamplers,
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferPRocessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fFragmentProcessorCnt);
+
+ id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState; }
+
+ void setData(const GrRenderTarget*, const GrProgramInfo&);
+
+ void setDrawState(id<MTLRenderCommandEncoder>, const GrSwizzle& outputSwizzle,
+ const GrXferProcessor&);
+
+ static void SetDynamicScissorRectState(id<MTLRenderCommandEncoder> renderCmdEncoder,
+ const GrRenderTarget* renderTarget,
+ GrSurfaceOrigin rtOrigin,
+ SkIRect scissorRect);
+
+ bool doesntSampleAttachment(const MTLRenderPassAttachmentDescriptor*) const;
+
+private:
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to Metal normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin)-1;
+ }
+
+ /**
+ * Gets a float4 that adjusts the position from Skia device coords to Metals normalized
+ * device coords. Assuming the transformed position, pos, is a homogeneous float3, the vec,
+ * v, is applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
+
+ void bind(id<MTLRenderCommandEncoder>);
+
+ void setBlendConstants(id<MTLRenderCommandEncoder>, const GrSwizzle&, const GrXferProcessor&);
+
+ void setDepthStencilState(id<MTLRenderCommandEncoder> renderCmdEncoder);
+
+ struct SamplerBindings {
+ GrMtlSampler* fSampler;
+ id<MTLTexture> fTexture;
+
+ SamplerBindings(const GrSamplerState& state, GrTexture* texture, GrMtlGpu*);
+ };
+
+ GrMtlGpu* fGpu;
+ id<MTLRenderPipelineState> fPipelineState;
+ MTLPixelFormat fPixelFormat;
+
+ RenderTargetState fRenderTargetState;
+ GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;
+
+ GrStencilSettings fStencil;
+
+ int fNumSamplers;
+ SkTArray<SamplerBindings> fSamplerBindings;
+
+ std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
+ int fFragmentProcessorCnt;
+
+ GrMtlPipelineStateDataManager fDataManager;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.mm
new file mode 100644
index 0000000000..83af603731
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineState.mm
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlPipelineState.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+#include "src/gpu/mtl/GrMtlBuffer.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlTexture.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlPipelineState::SamplerBindings::SamplerBindings(const GrSamplerState& state,
+ GrTexture* texture,
+ GrMtlGpu* gpu)
+ : fTexture(static_cast<GrMtlTexture*>(texture)->mtlTexture()) {
+ fSampler = gpu->resourceProvider().findOrCreateCompatibleSampler(state);
+}
+
+GrMtlPipelineState::GrMtlPipelineState(
+ GrMtlGpu* gpu,
+ id<MTLRenderPipelineState> pipelineState,
+ MTLPixelFormat pixelFormat,
+ const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t uniformBufferSize,
+ uint32_t numSamplers,
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fragmentProcessorCnt)
+ : fGpu(gpu)
+ , fPipelineState(pipelineState)
+ , fPixelFormat(pixelFormat)
+ , fBuiltinUniformHandles(builtinUniformHandles)
+ , fNumSamplers(numSamplers)
+ , fGeometryProcessor(std::move(geometryProcessor))
+ , fXferProcessor(std::move(xferProcessor))
+ , fFragmentProcessors(std::move(fragmentProcessors))
+ , fFragmentProcessorCnt(fragmentProcessorCnt)
+ , fDataManager(uniforms, uniformBufferSize) {
+ (void) fPixelFormat; // Suppress unused-var warning.
+}
+
+void GrMtlPipelineState::setData(const GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo) {
+
+ // Note: the Metal backend currently only supports fixed primProc textures
+ SkASSERT(!programInfo.hasDynamicPrimProcTextures());
+ auto proxies = programInfo.hasFixedPrimProcTextures() ? programInfo.fixedPrimProcTextures()
+ : nullptr;
+
+ this->setRenderTargetState(renderTarget, programInfo.origin());
+ fGeometryProcessor->setData(fDataManager, programInfo.primProc(),
+ GrFragmentProcessor::CoordTransformIter(programInfo.pipeline()));
+ fSamplerBindings.reset();
+ for (int i = 0; i < programInfo.primProc().numTextureSamplers(); ++i) {
+ const auto& sampler = programInfo.primProc().textureSampler(i);
+ auto texture = static_cast<GrMtlTexture*>(proxies[i]->peekTexture());
+ fSamplerBindings.emplace_back(sampler.samplerState(), texture, fGpu);
+ }
+
+ GrFragmentProcessor::Iter iter(programInfo.pipeline());
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fDataManager, *fp);
+ for (int i = 0; i < fp->numTextureSamplers(); ++i) {
+ const auto& sampler = fp->textureSampler(i);
+ fSamplerBindings.emplace_back(sampler.samplerState(), sampler.peekTexture(), fGpu);
+ }
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+
+ {
+ SkIPoint offset;
+ GrTexture* dstTexture = programInfo.pipeline().peekDstTexture(&offset);
+
+ fXferProcessor->setData(fDataManager, programInfo.pipeline().getXferProcessor(),
+ dstTexture, offset);
+ }
+
+ if (GrTextureProxy* dstTextureProxy = programInfo.pipeline().dstTextureProxy()) {
+ fSamplerBindings.emplace_back(GrSamplerState::ClampNearest(),
+ dstTextureProxy->peekTexture(),
+ fGpu);
+ }
+
+ SkASSERT(fNumSamplers == fSamplerBindings.count());
+ fDataManager.resetDirtyBits();
+
+ if (programInfo.pipeline().isStencilEnabled()) {
+ SkASSERT(renderTarget->renderTargetPriv().getStencilAttachment());
+ fStencil.reset(*programInfo.pipeline().getUserStencil(),
+ programInfo.pipeline().hasStencilClip(),
+ renderTarget->renderTargetPriv().numStencilBits());
+ }
+}
+
+void GrMtlPipelineState::setDrawState(id<MTLRenderCommandEncoder> renderCmdEncoder,
+ const GrSwizzle& outputSwizzle,
+ const GrXferProcessor& xferProcessor) {
+ [renderCmdEncoder pushDebugGroup:@"setDrawState"];
+ this->bind(renderCmdEncoder);
+ this->setBlendConstants(renderCmdEncoder, outputSwizzle, xferProcessor);
+ this->setDepthStencilState(renderCmdEncoder);
+ [renderCmdEncoder popDebugGroup];
+}
+
+void GrMtlPipelineState::bind(id<MTLRenderCommandEncoder> renderCmdEncoder) {
+ fDataManager.uploadAndBindUniformBuffers(fGpu, renderCmdEncoder);
+
+ SkASSERT(fNumSamplers == fSamplerBindings.count());
+ for (int index = 0; index < fNumSamplers; ++index) {
+ [renderCmdEncoder setFragmentTexture: fSamplerBindings[index].fTexture
+ atIndex: index];
+ [renderCmdEncoder setFragmentSamplerState: fSamplerBindings[index].fSampler->mtlSampler()
+ atIndex: index];
+ }
+}
+
+void GrMtlPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
+ fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
+ }
+
+ // set RT adjustment
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
+ if (fRenderTargetState.fRenderTargetOrigin != origin ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = origin;
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+}
+
+static bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kConstC_GrBlendCoeff:
+ case kIConstC_GrBlendCoeff:
+ case kConstA_GrBlendCoeff:
+ case kIConstA_GrBlendCoeff:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void GrMtlPipelineState::setBlendConstants(id<MTLRenderCommandEncoder> renderCmdEncoder,
+ const GrSwizzle& swizzle,
+ const GrXferProcessor& xferProcessor) {
+ if (!renderCmdEncoder) {
+ return;
+ }
+
+ const GrXferProcessor::BlendInfo& blendInfo = xferProcessor.getBlendInfo();
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
+ // Swizzle the blend to match what the shader will output.
+ SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
+
+ [renderCmdEncoder setBlendColorRed: blendConst.fR
+ green: blendConst.fG
+ blue: blendConst.fB
+ alpha: blendConst.fA];
+ }
+}
+
+void GrMtlPipelineState::setDepthStencilState(id<MTLRenderCommandEncoder> renderCmdEncoder) {
+ const GrSurfaceOrigin& origin = fRenderTargetState.fRenderTargetOrigin;
+ GrMtlDepthStencil* state =
+ fGpu->resourceProvider().findOrCreateCompatibleDepthStencilState(fStencil, origin);
+ if (!fStencil.isDisabled()) {
+ if (fStencil.isTwoSided()) {
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ [renderCmdEncoder setStencilFrontReferenceValue:fStencil.front(origin).fRef
+ backReferenceValue:fStencil.back(origin).fRef];
+ } else {
+ // Two-sided stencil not supported on older versions of iOS
+ // TODO: Find a way to recover from this
+ SkASSERT(false);
+ }
+ } else {
+ [renderCmdEncoder setStencilReferenceValue:fStencil.frontAndBack().fRef];
+ }
+ }
+ [renderCmdEncoder setDepthStencilState:state->mtlDepthStencil()];
+}
+
+void GrMtlPipelineState::SetDynamicScissorRectState(id<MTLRenderCommandEncoder> renderCmdEncoder,
+ const GrRenderTarget* renderTarget,
+ GrSurfaceOrigin rtOrigin,
+ SkIRect scissorRect) {
+ if (!scissorRect.intersect(SkIRect::MakeWH(renderTarget->width(), renderTarget->height()))) {
+ scissorRect.setEmpty();
+ }
+
+ MTLScissorRect scissor;
+ scissor.x = scissorRect.fLeft;
+ scissor.width = scissorRect.width();
+ if (kTopLeft_GrSurfaceOrigin == rtOrigin) {
+ scissor.y = scissorRect.fTop;
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == rtOrigin);
+ scissor.y = renderTarget->height() - scissorRect.fBottom;
+ }
+ scissor.height = scissorRect.height();
+
+ SkASSERT(scissor.x >= 0);
+ SkASSERT(scissor.y >= 0);
+
+ [renderCmdEncoder setScissorRect: scissor];
+}
+
+bool GrMtlPipelineState::doesntSampleAttachment(
+ const MTLRenderPassAttachmentDescriptor* attachment) const {
+ for (int i = 0; i < fSamplerBindings.count(); ++i) {
+ if (attachment.texture == fSamplerBindings[i].fTexture) {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.h b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.h
new file mode 100644
index 0000000000..fda1e6583d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlPipelineStateBuilder_DEFINED
+#define GrMtlPipelineStateBuilder_DEFINED
+
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/mtl/GrMtlUniformHandler.h"
+#include "src/gpu/mtl/GrMtlVaryingHandler.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#import <Metal/Metal.h>
+
+class GrProgramInfo;
+class GrMtlGpu;
+class GrMtlPipelineState;
+
+class GrMtlPipelineStateBuilder : public GrGLSLProgramBuilder {
+public:
+ /**
+ * For Metal we want to cache the entire pipeline for reuse of draws. The Desc here holds all
+ * the information needed to differentiate one pipeline from another.
+ *
+ * The GrProgramDesc contains all the information need to create the actual shaders for the
+ * pipeline.
+ *
+ * For Metal we need to add to the GrProgramDesc to include the rest of the state on the
+ * pipeline. This includes blending information and primitive type. The pipeline is immutable
+ * so any remaining dynamic state is set via the MtlRenderCmdEncoder.
+ */
+ class Desc : public GrProgramDesc {
+ public:
+ static bool Build(Desc*, GrRenderTarget*,
+ const GrProgramInfo&, GrPrimitiveType, GrMtlGpu* gpu);
+
+ size_t shaderKeyLength() const { return fShaderKeyLength; }
+
+ private:
+ size_t fShaderKeyLength;
+
+ typedef GrProgramDesc INHERITED;
+ };
+
+ /** Generates a pipeline state.
+ *
+ * The GrMtlPipelineState implements what is specified in the GrPipeline and
+ * GrPrimitiveProcessor as input. After successful generation, the builder result objects are
+ * available to be used. This function may modify the program key by setting the surface origin
+ * key to 0 (unspecified) if it turns out the program does not care about the surface origin.
+ * @return true if generation was successful.
+ */
+ static GrMtlPipelineState* CreatePipelineState(GrMtlGpu*,
+ GrRenderTarget*,
+ const GrProgramInfo&,
+ Desc*);
+
+private:
+ GrMtlPipelineStateBuilder(GrMtlGpu*, GrRenderTarget*, const GrProgramInfo&, GrProgramDesc*);
+
+ GrMtlPipelineState* finalize(GrRenderTarget*, const GrProgramInfo&, Desc*);
+
+ const GrCaps* caps() const override;
+
+ void finalizeFragmentOutputColor(GrShaderVar& outputColor) override;
+
+ void finalizeFragmentSecondaryColor(GrShaderVar& outputColor) override;
+
+ id<MTLLibrary> createMtlShaderLibrary(const GrGLSLShaderBuilder& builder,
+ SkSL::Program::Kind kind,
+ const SkSL::Program::Settings& settings,
+ GrProgramDesc* desc);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrMtlGpu* fGpu;
+ GrMtlUniformHandler fUniformHandler;
+ GrMtlVaryingHandler fVaryingHandler;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
new file mode 100644
index 0000000000..090c41b9de
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlPipelineState.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#include "src/gpu/GrRenderTargetPriv.h"
+
+#import <simd/simd.h>
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlPipelineState* GrMtlPipelineStateBuilder::CreatePipelineState(GrMtlGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ Desc* desc) {
+ GrMtlPipelineStateBuilder builder(gpu, renderTarget, programInfo, desc);
+
+ if (!builder.emitAndInstallProcs()) {
+ return nullptr;
+ }
+ return builder.finalize(renderTarget, programInfo, desc);
+}
+
+GrMtlPipelineStateBuilder::GrMtlPipelineStateBuilder(GrMtlGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrProgramDesc* desc)
+ : INHERITED(renderTarget, programInfo, desc)
+ , fGpu(gpu)
+ , fUniformHandler(this)
+ , fVaryingHandler(this) {
+}
+
+const GrCaps* GrMtlPipelineStateBuilder::caps() const {
+ return fGpu->caps();
+}
+
+void GrMtlPipelineStateBuilder::finalizeFragmentOutputColor(GrShaderVar& outputColor) {
+ outputColor.addLayoutQualifier("location = 0, index = 0");
+}
+
+void GrMtlPipelineStateBuilder::finalizeFragmentSecondaryColor(GrShaderVar& outputColor) {
+ outputColor.addLayoutQualifier("location = 0, index = 1");
+}
+
+id<MTLLibrary> GrMtlPipelineStateBuilder::createMtlShaderLibrary(
+ const GrGLSLShaderBuilder& builder,
+ SkSL::Program::Kind kind,
+ const SkSL::Program::Settings& settings,
+ GrProgramDesc* desc) {
+ SkSL::Program::Inputs inputs;
+ id<MTLLibrary> shaderLibrary = GrCompileMtlShaderLibrary(fGpu, builder.fCompilerString.c_str(),
+ kind, settings, &inputs);
+ if (shaderLibrary == nil) {
+ return nil;
+ }
+ if (inputs.fRTHeight) {
+ this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
+ }
+ return shaderLibrary;
+}
+
+static inline MTLVertexFormat attribute_type_to_mtlformat(GrVertexAttribType type) {
+ switch (type) {
+ case kFloat_GrVertexAttribType:
+ return MTLVertexFormatFloat;
+ case kFloat2_GrVertexAttribType:
+ return MTLVertexFormatFloat2;
+ case kFloat3_GrVertexAttribType:
+ return MTLVertexFormatFloat3;
+ case kFloat4_GrVertexAttribType:
+ return MTLVertexFormatFloat4;
+ case kHalf_GrVertexAttribType:
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ return MTLVertexFormatHalf;
+ } else {
+ return MTLVertexFormatInvalid;
+ }
+ case kHalf2_GrVertexAttribType:
+ return MTLVertexFormatHalf2;
+ case kHalf3_GrVertexAttribType:
+ return MTLVertexFormatHalf3;
+ case kHalf4_GrVertexAttribType:
+ return MTLVertexFormatHalf4;
+ case kInt2_GrVertexAttribType:
+ return MTLVertexFormatInt2;
+ case kInt3_GrVertexAttribType:
+ return MTLVertexFormatInt3;
+ case kInt4_GrVertexAttribType:
+ return MTLVertexFormatInt4;
+ case kByte_GrVertexAttribType:
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ return MTLVertexFormatChar;
+ } else {
+ return MTLVertexFormatInvalid;
+ }
+ case kByte2_GrVertexAttribType:
+ return MTLVertexFormatChar2;
+ case kByte3_GrVertexAttribType:
+ return MTLVertexFormatChar3;
+ case kByte4_GrVertexAttribType:
+ return MTLVertexFormatChar4;
+ case kUByte_GrVertexAttribType:
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ return MTLVertexFormatUChar;
+ } else {
+ return MTLVertexFormatInvalid;
+ }
+ case kUByte2_GrVertexAttribType:
+ return MTLVertexFormatUChar2;
+ case kUByte3_GrVertexAttribType:
+ return MTLVertexFormatUChar3;
+ case kUByte4_GrVertexAttribType:
+ return MTLVertexFormatUChar4;
+ case kUByte_norm_GrVertexAttribType:
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ return MTLVertexFormatUCharNormalized;
+ } else {
+ return MTLVertexFormatInvalid;
+ }
+ case kUByte4_norm_GrVertexAttribType:
+ return MTLVertexFormatUChar4Normalized;
+ case kShort2_GrVertexAttribType:
+ return MTLVertexFormatShort2;
+ case kShort4_GrVertexAttribType:
+ return MTLVertexFormatShort4;
+ case kUShort2_GrVertexAttribType:
+ return MTLVertexFormatUShort2;
+ case kUShort2_norm_GrVertexAttribType:
+ return MTLVertexFormatUShort2Normalized;
+ case kInt_GrVertexAttribType:
+ return MTLVertexFormatInt;
+ case kUint_GrVertexAttribType:
+ return MTLVertexFormatUInt;
+ case kUShort_norm_GrVertexAttribType:
+ if (@available(macOS 10.13, iOS 11.0, *)) {
+ return MTLVertexFormatUShortNormalized;
+ } else {
+ return MTLVertexFormatInvalid;
+ }
+ case kUShort4_norm_GrVertexAttribType:
+ return MTLVertexFormatUShort4Normalized;
+ }
+ SK_ABORT("Unknown vertex attribute type");
+}
+
+static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor& primProc) {
+ uint32_t vertexBinding = 0, instanceBinding = 0;
+
+ int nextBinding = GrMtlUniformHandler::kLastUniformBinding + 1;
+ if (primProc.hasVertexAttributes()) {
+ vertexBinding = nextBinding++;
+ }
+
+ if (primProc.hasInstanceAttributes()) {
+ instanceBinding = nextBinding;
+ }
+
+ auto vertexDescriptor = [[MTLVertexDescriptor alloc] init];
+ int attributeIndex = 0;
+
+ int vertexAttributeCount = primProc.numVertexAttributes();
+ size_t vertexAttributeOffset = 0;
+ for (const auto& attribute : primProc.vertexAttributes()) {
+ MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex];
+ mtlAttribute.format = attribute_type_to_mtlformat(attribute.cpuType());
+ SkASSERT(MTLVertexFormatInvalid != mtlAttribute.format);
+ mtlAttribute.offset = vertexAttributeOffset;
+ mtlAttribute.bufferIndex = vertexBinding;
+
+ vertexAttributeOffset += attribute.sizeAlign4();
+ attributeIndex++;
+ }
+ SkASSERT(vertexAttributeOffset == primProc.vertexStride());
+
+ if (vertexAttributeCount) {
+ MTLVertexBufferLayoutDescriptor* vertexBufferLayout =
+ vertexDescriptor.layouts[vertexBinding];
+ vertexBufferLayout.stepFunction = MTLVertexStepFunctionPerVertex;
+ vertexBufferLayout.stepRate = 1;
+ vertexBufferLayout.stride = vertexAttributeOffset;
+ }
+
+ int instanceAttributeCount = primProc.numInstanceAttributes();
+ size_t instanceAttributeOffset = 0;
+ for (const auto& attribute : primProc.instanceAttributes()) {
+ MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex];
+ mtlAttribute.format = attribute_type_to_mtlformat(attribute.cpuType());
+ mtlAttribute.offset = instanceAttributeOffset;
+ mtlAttribute.bufferIndex = instanceBinding;
+
+ instanceAttributeOffset += attribute.sizeAlign4();
+ attributeIndex++;
+ }
+ SkASSERT(instanceAttributeOffset == primProc.instanceStride());
+
+ if (instanceAttributeCount) {
+ MTLVertexBufferLayoutDescriptor* instanceBufferLayout =
+ vertexDescriptor.layouts[instanceBinding];
+ instanceBufferLayout.stepFunction = MTLVertexStepFunctionPerInstance;
+ instanceBufferLayout.stepRate = 1;
+ instanceBufferLayout.stride = instanceAttributeOffset;
+ }
+ return vertexDescriptor;
+}
+
+static MTLBlendFactor blend_coeff_to_mtl_blend(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kZero_GrBlendCoeff:
+ return MTLBlendFactorZero;
+ case kOne_GrBlendCoeff:
+ return MTLBlendFactorOne;
+ case kSC_GrBlendCoeff:
+ return MTLBlendFactorSourceColor;
+ case kISC_GrBlendCoeff:
+ return MTLBlendFactorOneMinusSourceColor;
+ case kDC_GrBlendCoeff:
+ return MTLBlendFactorDestinationColor;
+ case kIDC_GrBlendCoeff:
+ return MTLBlendFactorOneMinusDestinationColor;
+ case kSA_GrBlendCoeff:
+ return MTLBlendFactorSourceAlpha;
+ case kISA_GrBlendCoeff:
+ return MTLBlendFactorOneMinusSourceAlpha;
+ case kDA_GrBlendCoeff:
+ return MTLBlendFactorDestinationAlpha;
+ case kIDA_GrBlendCoeff:
+ return MTLBlendFactorOneMinusDestinationAlpha;
+ case kConstC_GrBlendCoeff:
+ return MTLBlendFactorBlendColor;
+ case kIConstC_GrBlendCoeff:
+ return MTLBlendFactorOneMinusBlendColor;
+ case kConstA_GrBlendCoeff:
+ return MTLBlendFactorBlendAlpha;
+ case kIConstA_GrBlendCoeff:
+ return MTLBlendFactorOneMinusBlendAlpha;
+ case kS2C_GrBlendCoeff:
+ if (@available(macOS 10.12, iOS 11.0, *)) {
+ return MTLBlendFactorSource1Color;
+ } else {
+ return MTLBlendFactorZero;
+ }
+ case kIS2C_GrBlendCoeff:
+ if (@available(macOS 10.12, iOS 11.0, *)) {
+ return MTLBlendFactorOneMinusSource1Color;
+ } else {
+ return MTLBlendFactorZero;
+ }
+ case kS2A_GrBlendCoeff:
+ if (@available(macOS 10.12, iOS 11.0, *)) {
+ return MTLBlendFactorSource1Alpha;
+ } else {
+ return MTLBlendFactorZero;
+ }
+ case kIS2A_GrBlendCoeff:
+ if (@available(macOS 10.12, iOS 11.0, *)) {
+ return MTLBlendFactorOneMinusSource1Alpha;
+ } else {
+ return MTLBlendFactorZero;
+ }
+ case kIllegal_GrBlendCoeff:
+ return MTLBlendFactorZero;
+ }
+
+ SK_ABORT("Unknown blend coefficient");
+}
+
+static MTLBlendOperation blend_equation_to_mtl_blend_op(GrBlendEquation equation) {
+ static const MTLBlendOperation gTable[] = {
+ MTLBlendOperationAdd, // kAdd_GrBlendEquation
+ MTLBlendOperationSubtract, // kSubtract_GrBlendEquation
+ MTLBlendOperationReverseSubtract, // kReverseSubtract_GrBlendEquation
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+ GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+
+ SkASSERT((unsigned)equation < kGrBlendEquationCnt);
+ return gTable[equation];
+}
+
+static MTLRenderPipelineColorAttachmentDescriptor* create_color_attachment(
+ GrPixelConfig config, const GrPipeline& pipeline) {
+ auto mtlColorAttachment = [[MTLRenderPipelineColorAttachmentDescriptor alloc] init];
+
+ // pixel format
+ MTLPixelFormat format;
+ SkAssertResult(GrPixelConfigToMTLFormat(config, &format));
+ mtlColorAttachment.pixelFormat = format;
+
+ // blending
+ const GrXferProcessor::BlendInfo& blendInfo = pipeline.getXferProcessor().getBlendInfo();
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+
+ mtlColorAttachment.blendingEnabled = !blendOff;
+ if (!blendOff) {
+ mtlColorAttachment.sourceRGBBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
+ mtlColorAttachment.destinationRGBBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
+ mtlColorAttachment.rgbBlendOperation = blend_equation_to_mtl_blend_op(equation);
+ mtlColorAttachment.sourceAlphaBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
+ mtlColorAttachment.destinationAlphaBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
+ mtlColorAttachment.alphaBlendOperation = blend_equation_to_mtl_blend_op(equation);
+ }
+
+ if (!blendInfo.fWriteColor) {
+ mtlColorAttachment.writeMask = MTLColorWriteMaskNone;
+ } else {
+ mtlColorAttachment.writeMask = MTLColorWriteMaskAll;
+ }
+ return mtlColorAttachment;
+}
+
+uint32_t buffer_size(uint32_t offset, uint32_t maxAlignment) {
+ // Metal expects the buffer to be padded at the end according to the alignment
+ // of the largest element in the buffer.
+ uint32_t offsetDiff = offset & maxAlignment;
+ if (offsetDiff != 0) {
+ offsetDiff = maxAlignment - offsetDiff + 1;
+ }
+ return offset + offsetDiff;
+}
+
+GrMtlPipelineState* GrMtlPipelineStateBuilder::finalize(GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ Desc* desc) {
+ auto pipelineDescriptor = [MTLRenderPipelineDescriptor new];
+
+ fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ this->finalizeShaders();
+
+ SkSL::Program::Settings settings;
+ settings.fCaps = this->caps()->shaderCaps();
+ settings.fFlipY = this->origin() != kTopLeft_GrSurfaceOrigin;
+ settings.fSharpenTextures = fGpu->getContext()->priv().options().fSharpenMipmappedTextures;
+ SkASSERT(!this->fragColorIsInOut());
+
+ // TODO: Store shaders in cache
+ id<MTLLibrary> vertexLibrary = nil;
+ id<MTLLibrary> fragmentLibrary = nil;
+ vertexLibrary = this->createMtlShaderLibrary(fVS,
+ SkSL::Program::kVertex_Kind,
+ settings,
+ desc);
+ fragmentLibrary = this->createMtlShaderLibrary(fFS,
+ SkSL::Program::kFragment_Kind,
+ settings,
+ desc);
+ SkASSERT(!this->primitiveProcessor().willUseGeoShader());
+
+ if (!vertexLibrary || !fragmentLibrary) {
+ return nullptr;
+ }
+
+ id<MTLFunction> vertexFunction = [vertexLibrary newFunctionWithName: @"vertexMain"];
+ id<MTLFunction> fragmentFunction = [fragmentLibrary newFunctionWithName: @"fragmentMain"];
+
+ if (vertexFunction == nil) {
+ SkDebugf("Couldn't find vertexMain() in library\n");
+ return nullptr;
+ }
+ if (fragmentFunction == nil) {
+ SkDebugf("Couldn't find fragmentMain() in library\n");
+ return nullptr;
+ }
+
+ pipelineDescriptor.vertexFunction = vertexFunction;
+ pipelineDescriptor.fragmentFunction = fragmentFunction;
+ pipelineDescriptor.vertexDescriptor = create_vertex_descriptor(programInfo.primProc());
+ pipelineDescriptor.colorAttachments[0] = create_color_attachment(renderTarget->config(),
+ programInfo.pipeline());
+ pipelineDescriptor.sampleCount = renderTarget->numSamples();
+ bool hasStencilAttachment = SkToBool(renderTarget->renderTargetPriv().getStencilAttachment());
+ GrMtlCaps* mtlCaps = (GrMtlCaps*)this->caps();
+ pipelineDescriptor.stencilAttachmentPixelFormat =
+ hasStencilAttachment ? mtlCaps->preferredStencilFormat().fInternalFormat
+ : MTLPixelFormatInvalid;
+
+ SkASSERT(pipelineDescriptor.vertexFunction);
+ SkASSERT(pipelineDescriptor.fragmentFunction);
+ SkASSERT(pipelineDescriptor.vertexDescriptor);
+ SkASSERT(pipelineDescriptor.colorAttachments[0]);
+
+#if defined(SK_BUILD_FOR_MAC) && defined(GR_USE_COMPLETION_HANDLER)
+ bool timedout;
+ id<MTLRenderPipelineState> pipelineState = GrMtlNewRenderPipelineStateWithDescriptor(
+ fGpu->device(), pipelineDescriptor, &timedout);
+ if (timedout) {
+ // try a second time
+ pipelineState = GrMtlNewRenderPipelineStateWithDescriptor(
+ fGpu->device(), pipelineDescriptor, &timedout);
+ }
+ if (!pipelineState) {
+ return nullptr;
+ }
+#else
+ NSError* error = nil;
+ id<MTLRenderPipelineState> pipelineState =
+ [fGpu->device() newRenderPipelineStateWithDescriptor: pipelineDescriptor
+ error: &error];
+ if (error) {
+ SkDebugf("Error creating pipeline: %s\n",
+ [[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]);
+ return nullptr;
+ }
+#endif
+
+ uint32_t bufferSize = buffer_size(fUniformHandler.fCurrentUBOOffset,
+ fUniformHandler.fCurrentUBOMaxAlignment);
+ return new GrMtlPipelineState(fGpu,
+ pipelineState,
+ pipelineDescriptor.colorAttachments[0].pixelFormat,
+ fUniformHandles,
+ fUniformHandler.fUniforms,
+ bufferSize,
+ (uint32_t)fUniformHandler.numSamplers(),
+ std::move(fGeometryProcessor),
+ std::move(fXferProcessor),
+ std::move(fFragmentProcessors),
+ fFragmentProcessorCnt);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+bool GrMtlPipelineStateBuilder::Desc::Build(Desc* desc,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ GrMtlGpu* gpu) {
+ if (!GrProgramDesc::Build(desc, renderTarget, programInfo, primitiveType, gpu)) {
+ return false;
+ }
+
+ GrProcessorKeyBuilder b(&desc->key());
+
+ int keyLength = desc->key().count();
+ SkASSERT(0 == (keyLength % 4));
+ desc->fShaderKeyLength = SkToU32(keyLength);
+
+ b.add32(renderTarget->config());
+ b.add32(renderTarget->numSamples());
+ bool hasStencilAttachment = SkToBool(renderTarget->renderTargetPriv().getStencilAttachment());
+ b.add32(hasStencilAttachment ? gpu->mtlCaps().preferredStencilFormat().fInternalFormat
+ : MTLPixelFormatInvalid);
+ b.add32((uint32_t)programInfo.pipeline().isStencilEnabled());
+ // Stencil samples don't seem to be tracked in the MTLRenderPipeline
+
+ b.add32(programInfo.pipeline().getBlendInfoKey());
+
+ b.add32((uint32_t)primitiveType);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.h b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.h
new file mode 100644
index 0000000000..b3a3dac7c6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlPipelineStateDataManager_DEFINED
+#define GrMtlPipelineStateDataManager_DEFINED
+
+#include "src/core/SkAutoMalloc.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/mtl/GrMtlUniformHandler.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlBuffer;
+class GrMtlGpu;
+
+class GrMtlPipelineStateDataManager : public GrGLSLProgramDataManager {
+public:
+ typedef GrMtlUniformHandler::UniformInfoArray UniformInfoArray;
+
+ GrMtlPipelineStateDataManager(const UniformInfoArray&,
+ uint32_t uniformSize);
+
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2i(UniformHandle, int32_t, int32_t) const override;
+ void set2iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3i(UniformHandle, int32_t, int32_t, int32_t) const override;
+ void set3iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4i(UniformHandle, int32_t, int32_t, int32_t, int32_t) const override;
+ void set4iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first two upload a single matrix, the latter two upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override {
+ SK_ABORT("Only supported in NVPR, which is not in Metal");
+ }
+
+ void uploadAndBindUniformBuffers(GrMtlGpu* gpu,
+ id<MTLRenderCommandEncoder> renderCmdEncoder) const;
+ void resetDirtyBits();
+
+private:
+ struct Uniform {
+ uint32_t fOffset;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ void* getBufferPtrAndMarkDirty(const Uniform& uni) const;
+
+ uint32_t fUniformSize;
+
+ SkTArray<Uniform, true> fUniforms;
+
+ mutable SkAutoMalloc fUniformData;
+ mutable bool fUniformsDirty;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.mm
new file mode 100644
index 0000000000..1f2af241ad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlPipelineStateDataManager.mm
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlPipelineStateDataManager.h"
+
+#include "src/gpu/mtl/GrMtlBuffer.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlPipelineStateDataManager::GrMtlPipelineStateDataManager(const UniformInfoArray& uniforms,
+ uint32_t uniformSize)
+ : fUniformSize(uniformSize)
+ , fUniformsDirty(false) {
+ fUniformData.reset(uniformSize);
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already
+ // owned by other objects will still match up here.
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const GrMtlUniformHandler::UniformInfo uniformInfo = uniforms[i];
+ SkASSERT(GrShaderVar::kNonArray == uniformInfo.fVariable.getArrayCount() ||
+ uniformInfo.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();
+ uniform.fType = uniformInfo.fVariable.getType();
+ );
+ uniform.fOffset = uniformInfo.fUBOffset;
+ }
+}
+
+void* GrMtlPipelineStateDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
+ fUniformsDirty = true;
+ return static_cast<char*>(fUniformData.get())+uni.fOffset;
+}
+
+void GrMtlPipelineStateDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, &i, sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set1iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrMtlPipelineStateDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, &v0, sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set2i(UniformHandle u, int32_t i0, int32_t i1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[2] = { i0, i1 };
+ memcpy(buffer, v, 2 * sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set2iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ memcpy(buffer, v, arrayCount*sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[2] = { v0, v1 };
+ memcpy(buffer, v, 2 * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 2 * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set3i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[3] = { i0, i1, i2 };
+ memcpy(buffer, v, 3 * sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set3iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrMtlPipelineStateDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[3] = { v0, v1, v2 };
+ memcpy(buffer, v, 3 * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrMtlPipelineStateDataManager::set4i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2,
+ int32_t i3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[4] = { i0, i1, i2, i3 };
+ memcpy(buffer, v, 4 * sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set4iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(int32_t));
+}
+
+void GrMtlPipelineStateDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[4] = { v0, v1, v2, v3 };
+ memcpy(buffer, v, 4 * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));
+}
+
+void GrMtlPipelineStateDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrMtlPipelineStateDataManager::setMatrix2fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrMtlPipelineStateDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrMtlPipelineStateDataManager::setMatrix3fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrMtlPipelineStateDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrMtlPipelineStateDataManager::setMatrix4fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrMtlPipelineStateDataManager::setMatrices(
+ UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2x2_GrSLType + (N - 2) ||
+ uni.fType == kHalf2x2_GrSLType + (N - 2));
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ fUniformsDirty = true;
+ set_uniform_matrix<N>::set(fUniformData.get(), uni.fOffset, arrayCount, matrices);
+}
+
+template<> struct set_uniform_matrix<2> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ memcpy(buffer, matrices, count * 4 * sizeof(float));
+ }
+};
+
+template<> struct set_uniform_matrix<3> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ for (int i = 0; i < count; ++i) {
+ const float* matrix = &matrices[3 * 3 * i];
+ for (int j = 0; j < 3; ++j) {
+ memcpy(buffer, &matrix[j * 3], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
+ }
+ }
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ memcpy(buffer, matrices, count * 16 * sizeof(float));
+ }
+};
+
+void GrMtlPipelineStateDataManager::uploadAndBindUniformBuffers(
+ GrMtlGpu* gpu,
+ id<MTLRenderCommandEncoder> renderCmdEncoder) const {
+ if (fUniformSize && fUniformsDirty) {
+ SkASSERT(fUniformSize < 4*1024);
+ if (@available(macOS 10.11, iOS 8.3, *)) {
+ [renderCmdEncoder setVertexBytes: fUniformData.get()
+ length: fUniformSize
+ atIndex: GrMtlUniformHandler::kUniformBinding];
+ [renderCmdEncoder setFragmentBytes: fUniformData.get()
+ length: fUniformSize
+ atIndex: GrMtlUniformHandler::kUniformBinding];
+ } else {
+ size_t bufferOffset;
+ id<MTLBuffer> uniformBuffer = gpu->resourceProvider().getDynamicBuffer(
+ fUniformSize, &bufferOffset);
+ SkASSERT(uniformBuffer);
+ char* bufferData = (char*) uniformBuffer.contents + bufferOffset;
+ memcpy(bufferData, fUniformData.get(), fUniformSize);
+ [renderCmdEncoder setVertexBuffer: uniformBuffer
+ offset: bufferOffset
+ atIndex: GrMtlUniformHandler::kUniformBinding];
+ [renderCmdEncoder setFragmentBuffer: uniformBuffer
+ offset: bufferOffset
+ atIndex: GrMtlUniformHandler::kUniformBinding];
+ }
+ fUniformsDirty = false;
+ }
+}
+
+void GrMtlPipelineStateDataManager::resetDirtyBits() {
+ fUniformsDirty = true;
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.h b/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.h
new file mode 100644
index 0000000000..4ae34d75ae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlRenderTarget_DEFINED
+#define GrMtlRenderTarget_DEFINED
+
+#include "src/gpu/GrRenderTarget.h"
+
+#include "include/gpu/GrBackendSurface.h"
+#include "src/gpu/GrGpu.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlGpu;
+
+class GrMtlRenderTarget: public GrRenderTarget {
+public:
+ static sk_sp<GrMtlRenderTarget> MakeWrappedRenderTarget(GrMtlGpu*,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ id<MTLTexture>);
+
+ ~GrMtlRenderTarget() override;
+
+ bool canAttemptStencilAttachment() const override {
+ return true;
+ }
+
+ id<MTLTexture> mtlColorTexture() const { return fColorTexture; }
+ id<MTLTexture> mtlResolveTexture() const { return fResolveTexture; }
+
+ GrBackendRenderTarget getBackendRenderTarget() const override;
+
+ GrBackendFormat backendFormat() const override;
+
+protected:
+ GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture);
+
+ GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture);
+
+ GrMtlGpu* getMtlGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ int numColorSamples = this->numSamples();
+ // TODO: When used as render targets certain formats may actually have a larger size than
+ // the base format size. Check to make sure we are reporting the correct value here.
+ // The plus 1 is to account for the resolve texture or if not using msaa the RT itself
+ if (numColorSamples > 1) {
+ ++numColorSamples;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, GrMipMapped::kNo);
+ }
+
+ id<MTLTexture> fColorTexture;
+ id<MTLTexture> fResolveTexture;
+
+private:
+ // Extra param to disambiguate from constructor used by subclasses.
+ enum Wrapped { kWrapped };
+ GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ Wrapped);
+ GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ Wrapped);
+
+ bool completeStencilAttachment() override;
+
+ typedef GrRenderTarget INHERITED;
+};
+
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.mm
new file mode 100644
index 0000000000..b01066fec9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlRenderTarget.mm
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlRenderTarget.h"
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+// Called for wrapped non-texture render targets.
+GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ Wrapped)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrRenderTarget(
+ gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, GrProtected::kNo)
+ , fColorTexture(colorTexture)
+ , fResolveTexture(resolveTexture) {
+ SkASSERT(sampleCnt > 1);
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ Wrapped)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, 1, GrProtected::kNo)
+ , fColorTexture(colorTexture)
+ , fResolveTexture(nil) {
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+// Called by subclass constructors.
+GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrRenderTarget(
+ gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, GrProtected::kNo)
+ , fColorTexture(colorTexture)
+ , fResolveTexture(resolveTexture) {
+ SkASSERT(sampleCnt > 1);
+}
+
+GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, 1, GrProtected::kNo)
+ , fColorTexture(colorTexture)
+ , fResolveTexture(nil) {}
+
+sk_sp<GrMtlRenderTarget> GrMtlRenderTarget::MakeWrappedRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> texture) {
+ SkASSERT(nil != texture);
+ SkASSERT(1 == texture.mipmapLevelCount);
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT(MTLTextureUsageRenderTarget & texture.usage);
+ }
+
+ GrMtlRenderTarget* mtlRT;
+ if (sampleCnt > 1) {
+ MTLPixelFormat format;
+ if (!GrPixelConfigToMTLFormat(desc.fConfig, &format)) {
+ return nullptr;
+ }
+ MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
+ texDesc.textureType = MTLTextureType2DMultisample;
+ texDesc.pixelFormat = format;
+ texDesc.width = desc.fWidth;
+ texDesc.height = desc.fHeight;
+ texDesc.depth = 1;
+ texDesc.mipmapLevelCount = 1;
+ texDesc.sampleCount = sampleCnt;
+ texDesc.arrayLength = 1;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ texDesc.storageMode = MTLStorageModePrivate;
+ texDesc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
+ }
+
+ id<MTLTexture> colorTexture = [gpu->device() newTextureWithDescriptor:texDesc];
+ if (!colorTexture) {
+ return nullptr;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
+ }
+ mtlRT = new GrMtlRenderTarget(gpu, desc, sampleCnt, colorTexture, texture, kWrapped);
+ } else {
+ mtlRT = new GrMtlRenderTarget(gpu, desc, texture, kWrapped);
+ }
+
+ return sk_sp<GrMtlRenderTarget>(mtlRT);
+}
+
+GrMtlRenderTarget::~GrMtlRenderTarget() {
+ SkASSERT(nil == fColorTexture);
+ SkASSERT(nil == fResolveTexture);
+}
+
+GrBackendRenderTarget GrMtlRenderTarget::getBackendRenderTarget() const {
+ GrMtlTextureInfo info;
+ info.fTexture.reset(GrRetainPtrFromId(fColorTexture));
+ return GrBackendRenderTarget(this->width(), this->height(), fColorTexture.sampleCount, info);
+}
+
+GrBackendFormat GrMtlRenderTarget::backendFormat() const {
+ return GrBackendFormat::MakeMtl(fColorTexture.pixelFormat);
+}
+
+GrMtlGpu* GrMtlRenderTarget::getMtlGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrMtlGpu*>(this->getGpu());
+}
+
+void GrMtlRenderTarget::onAbandon() {
+ fColorTexture = nil;
+ fResolveTexture = nil;
+ INHERITED::onAbandon();
+}
+
+void GrMtlRenderTarget::onRelease() {
+ fColorTexture = nil;
+ fResolveTexture = nil;
+ INHERITED::onRelease();
+}
+
+bool GrMtlRenderTarget::completeStencilAttachment() {
+ return true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.h b/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.h
new file mode 100644
index 0000000000..0b8d2194e5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+*/
+
+#ifndef GrMtlResourceProvider_DEFINED
+#define GrMtlResourceProvider_DEFINED
+
+#include "include/private/SkSpinlock.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkLRUCache.h"
+#include "src/gpu/mtl/GrMtlDepthStencil.h"
+#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
+#include "src/gpu/mtl/GrMtlSampler.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlGpu;
+class GrMtlCommandBuffer;
+
+class GrMtlResourceProvider {
+public:
+ GrMtlResourceProvider(GrMtlGpu* gpu);
+
+ GrMtlPipelineState* findOrCreateCompatiblePipelineState(GrRenderTarget*,
+ const GrProgramInfo&,
+ GrPrimitiveType);
+
+ // Finds or creates a compatible MTLDepthStencilState based on the GrStencilSettings.
+ GrMtlDepthStencil* findOrCreateCompatibleDepthStencilState(const GrStencilSettings&,
+ GrSurfaceOrigin);
+
+ // Finds or creates a compatible MTLSamplerState based on the GrSamplerState.
+ GrMtlSampler* findOrCreateCompatibleSampler(const GrSamplerState&);
+
+ id<MTLBuffer> getDynamicBuffer(size_t size, size_t* offset);
+ void addBufferCompletionHandler(GrMtlCommandBuffer* cmdBuffer);
+
+ // Destroy any cached resources. To be called before releasing the MtlDevice.
+ void destroyResources();
+
+private:
+#ifdef SK_DEBUG
+#define GR_PIPELINE_STATE_CACHE_STATS
+#endif
+
+ class PipelineStateCache : public ::SkNoncopyable {
+ public:
+ PipelineStateCache(GrMtlGpu* gpu);
+ ~PipelineStateCache();
+
+ void release();
+ GrMtlPipelineState* refPipelineState(GrRenderTarget*, const GrProgramInfo&,
+ GrPrimitiveType);
+
+ private:
+ struct Entry;
+
+ struct DescHash {
+ uint32_t operator()(const GrProgramDesc& desc) const {
+ return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
+ }
+ };
+
+ SkLRUCache<const GrMtlPipelineStateBuilder::Desc, std::unique_ptr<Entry>, DescHash> fMap;
+
+ GrMtlGpu* fGpu;
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ int fTotalRequests;
+ int fCacheMisses;
+#endif
+ };
+
+ // Buffer allocator
+ class BufferSuballocator : public SkRefCnt {
+ public:
+ BufferSuballocator(id<MTLDevice> device, size_t size);
+ ~BufferSuballocator() {
+ fBuffer = nil;
+ fTotalSize = 0;
+ }
+
+ id<MTLBuffer> getAllocation(size_t size, size_t* offset);
+ void addCompletionHandler(GrMtlCommandBuffer* cmdBuffer);
+ size_t size() { return fTotalSize; }
+
+ private:
+ id<MTLBuffer> fBuffer;
+ size_t fTotalSize;
+ size_t fHead SK_GUARDED_BY(fMutex); // where we start allocating
+ size_t fTail SK_GUARDED_BY(fMutex); // where we start deallocating
+ SkSpinlock fMutex;
+ };
+ static constexpr size_t kBufferSuballocatorStartSize = 1024*1024;
+
+ GrMtlGpu* fGpu;
+
+ // Cache of GrMtlPipelineStates
+ std::unique_ptr<PipelineStateCache> fPipelineStateCache;
+
+ SkTDynamicHash<GrMtlSampler, GrMtlSampler::Key> fSamplers;
+ SkTDynamicHash<GrMtlDepthStencil, GrMtlDepthStencil::Key> fDepthStencilStates;
+
+ // This is ref-counted because we might delete the GrContext before the command buffer
+ // finishes. The completion handler will retain a reference to this so it won't get
+ // deleted along with the GrContext.
+ sk_sp<BufferSuballocator> fBufferSuballocator;
+ size_t fBufferSuballocatorMaxSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.mm
new file mode 100644
index 0000000000..9774d2677f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlResourceProvider.mm
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlResourceProvider.h"
+
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/mtl/GrMtlCommandBuffer.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlPipelineState.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#include "src/sksl/SkSLCompiler.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlResourceProvider::GrMtlResourceProvider(GrMtlGpu* gpu)
+ : fGpu(gpu) {
+ fPipelineStateCache.reset(new PipelineStateCache(gpu));
+ fBufferSuballocator.reset(new BufferSuballocator(gpu->device(), kBufferSuballocatorStartSize));
+ // TODO: maxBufferLength seems like a reasonable metric to determine fBufferSuballocatorMaxSize
+ // but may need tuning. Might also need a GrContextOption to let the client set this.
+#ifdef SK_BUILD_FOR_MAC
+ int64_t maxBufferLength = 1024*1024*1024;
+#else
+ int64_t maxBufferLength = 256*1024*1024;
+#endif
+ if (@available(iOS 12, macOS 10.14, *)) {
+ maxBufferLength = gpu->device().maxBufferLength;
+ }
+ fBufferSuballocatorMaxSize = maxBufferLength/16;
+}
+
+GrMtlPipelineState* GrMtlResourceProvider::findOrCreateCompatiblePipelineState(
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType) {
+ return fPipelineStateCache->refPipelineState(renderTarget, programInfo, primitiveType);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrMtlDepthStencil* GrMtlResourceProvider::findOrCreateCompatibleDepthStencilState(
+ const GrStencilSettings& stencil, GrSurfaceOrigin origin) {
+ GrMtlDepthStencil* depthStencilState;
+ GrMtlDepthStencil::Key key = GrMtlDepthStencil::GenerateKey(stencil, origin);
+ depthStencilState = fDepthStencilStates.find(key);
+ if (!depthStencilState) {
+ depthStencilState = GrMtlDepthStencil::Create(fGpu, stencil, origin);
+ fDepthStencilStates.add(depthStencilState);
+ }
+ SkASSERT(depthStencilState);
+ return depthStencilState;
+}
+
+GrMtlSampler* GrMtlResourceProvider::findOrCreateCompatibleSampler(const GrSamplerState& params) {
+ GrMtlSampler* sampler;
+ sampler = fSamplers.find(GrMtlSampler::GenerateKey(params));
+ if (!sampler) {
+ sampler = GrMtlSampler::Create(fGpu, params);
+ fSamplers.add(sampler);
+ }
+ SkASSERT(sampler);
+ return sampler;
+}
+
+void GrMtlResourceProvider::destroyResources() {
+ // Iterate through all stored GrMtlSamplers and unref them before resetting the hash.
+ SkTDynamicHash<GrMtlSampler, GrMtlSampler::Key>::Iter samplerIter(&fSamplers);
+ for (; !samplerIter.done(); ++samplerIter) {
+ (*samplerIter).unref();
+ }
+ fSamplers.reset();
+
+ // Iterate through all stored GrMtlDepthStencils and unref them before resetting the hash.
+ SkTDynamicHash<GrMtlDepthStencil, GrMtlDepthStencil::Key>::Iter dsIter(&fDepthStencilStates);
+ for (; !dsIter.done(); ++dsIter) {
+ (*dsIter).unref();
+ }
+ fDepthStencilStates.reset();
+
+ fPipelineStateCache->release();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+// Display pipeline state cache usage
+static const bool c_DisplayMtlPipelineCache{false};
+#endif
+
+struct GrMtlResourceProvider::PipelineStateCache::Entry {
+ Entry(GrMtlGpu* gpu, GrMtlPipelineState* pipelineState)
+ : fGpu(gpu)
+ , fPipelineState(pipelineState) {}
+
+ GrMtlGpu* fGpu;
+ std::unique_ptr<GrMtlPipelineState> fPipelineState;
+};
+
+GrMtlResourceProvider::PipelineStateCache::PipelineStateCache(GrMtlGpu* gpu)
+ : fMap(gpu->getContext()->priv().options().fRuntimeProgramCacheSize)
+ , fGpu(gpu)
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ , fTotalRequests(0)
+ , fCacheMisses(0)
+#endif
+{}
+
+GrMtlResourceProvider::PipelineStateCache::~PipelineStateCache() {
+ SkASSERT(0 == fMap.count());
+ // dump stats
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ if (c_DisplayMtlPipelineCache) {
+ SkDebugf("--- Pipeline State Cache ---\n");
+ SkDebugf("Total requests: %d\n", fTotalRequests);
+ SkDebugf("Cache misses: %d\n", fCacheMisses);
+ SkDebugf("Cache miss %%: %f\n", (fTotalRequests > 0) ?
+ 100.f * fCacheMisses / fTotalRequests :
+ 0.f);
+ SkDebugf("---------------------\n");
+ }
+#endif
+}
+
+void GrMtlResourceProvider::PipelineStateCache::release() {
+ fMap.reset();
+}
+
+GrMtlPipelineState* GrMtlResourceProvider::PipelineStateCache::refPipelineState(
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primType) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fTotalRequests;
+#endif
+
+ // TODO: unify GL, VK and Mtl
+ // Get GrMtlProgramDesc
+ GrMtlPipelineStateBuilder::Desc desc;
+ if (!GrMtlPipelineStateBuilder::Desc::Build(&desc, renderTarget, programInfo, primType, fGpu)) {
+ GrCapsDebugf(fGpu->caps(), "Failed to build mtl program descriptor!\n");
+ return nullptr;
+ }
+
+ std::unique_ptr<Entry>* entry = fMap.find(desc);
+ if (!entry) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fCacheMisses;
+#endif
+ GrMtlPipelineState* pipelineState(GrMtlPipelineStateBuilder::CreatePipelineState(
+ fGpu, renderTarget, programInfo, &desc));
+ if (!pipelineState) {
+ return nullptr;
+ }
+ entry = fMap.insert(desc, std::unique_ptr<Entry>(new Entry(fGpu, pipelineState)));
+ return (*entry)->fPipelineState.get();
+ }
+ return (*entry)->fPipelineState.get();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+static id<MTLBuffer> alloc_dynamic_buffer(id<MTLDevice> device, size_t size) {
+ NSUInteger options = 0;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+#ifdef SK_BUILD_FOR_MAC
+ options |= MTLResourceStorageModeManaged;
+#else
+ options |= MTLResourceStorageModeShared;
+#endif
+ }
+ return [device newBufferWithLength: size
+ options: options];
+
+}
+
+// The idea here is that we create a ring buffer which is used for all dynamic allocations
+// below a certain size. When a dynamic GrMtlBuffer is mapped, it grabs a portion of this
+// buffer and uses it. On a subsequent map it will grab a different portion of the buffer.
+// This prevents the buffer from overwriting itself before it's submitted to the command
+// stream.
+
+GrMtlResourceProvider::BufferSuballocator::BufferSuballocator(id<MTLDevice> device, size_t size)
+ : fBuffer(alloc_dynamic_buffer(device, size))
+ , fTotalSize(size)
+ , fHead(0)
+ , fTail(0) {
+ // We increment fHead and fTail without bound and let overflow handle any wrapping.
+ // Because of this, size needs to be a power of two.
+ SkASSERT(SkIsPow2(size));
+}
+
+id<MTLBuffer> GrMtlResourceProvider::BufferSuballocator::getAllocation(size_t size,
+ size_t* offset) {
+ // capture current state locally (because fTail could be overwritten by the completion handler)
+ size_t head, tail;
+ SkAutoSpinlock lock(fMutex);
+ head = fHead;
+ tail = fTail;
+
+ // The head and tail indices increment without bound, wrapping with overflow,
+ // so we need to mod them down to the actual bounds of the allocation to determine
+ // which blocks are available.
+ size_t modHead = head & (fTotalSize - 1);
+ size_t modTail = tail & (fTotalSize - 1);
+
+ bool full = (head != tail && modHead == modTail);
+
+
+ // We don't want large allocations to eat up this buffer, so we allocate them separately.
+ if (full || size > fTotalSize/2) {
+ return nil;
+ }
+
+ // case 1: free space lies at the beginning and/or the end of the buffer
+ if (modHead >= modTail) {
+ // check for room at the end
+ if (fTotalSize - modHead < size) {
+ // no room at the end, check the beginning
+ if (modTail < size) {
+ // no room at the beginning
+ return nil;
+ }
+ // we are going to allocate from the beginning, adjust head to '0' position
+ head += fTotalSize - modHead;
+ modHead = 0;
+ }
+ // case 2: free space lies in the middle of the buffer, check for room there
+ } else if (modTail - modHead < size) {
+ // no room in the middle
+ return nil;
+ }
+
+ *offset = modHead;
+ // We're not sure what the usage of the next allocation will be --
+ // to be safe we'll use 16 byte alignment.
+ fHead = GrSizeAlignUp(head + size, 16);
+ return fBuffer;
+}
+
+void GrMtlResourceProvider::BufferSuballocator::addCompletionHandler(
+ GrMtlCommandBuffer* cmdBuffer) {
+ this->ref();
+ SkAutoSpinlock lock(fMutex);
+ size_t newTail = fHead;
+ cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
+ // Make sure SkAutoSpinlock goes out of scope before
+ // the BufferSuballocator is potentially deleted.
+ {
+ SkAutoSpinlock lock(fMutex);
+ fTail = newTail;
+ }
+ this->unref();
+ });
+}
+
+id<MTLBuffer> GrMtlResourceProvider::getDynamicBuffer(size_t size, size_t* offset) {
+ id<MTLBuffer> buffer = fBufferSuballocator->getAllocation(size, offset);
+ if (buffer) {
+ return buffer;
+ }
+
+ // Try to grow allocation (old allocation will age out).
+ // We grow up to a maximum size, and only grow if the requested allocation will
+ // fit into half of the new buffer (to prevent very large transient buffers forcing
+ // growth when they'll never fit anyway).
+ if (fBufferSuballocator->size() < fBufferSuballocatorMaxSize &&
+ size <= fBufferSuballocator->size()) {
+ fBufferSuballocator.reset(new BufferSuballocator(fGpu->device(),
+ 2*fBufferSuballocator->size()));
+ id<MTLBuffer> buffer = fBufferSuballocator->getAllocation(size, offset);
+ if (buffer) {
+ return buffer;
+ }
+ }
+
+ *offset = 0;
+ return alloc_dynamic_buffer(fGpu->device(), size);
+}
+
+void GrMtlResourceProvider::addBufferCompletionHandler(GrMtlCommandBuffer* cmdBuffer) {
+ fBufferSuballocator->addCompletionHandler(cmdBuffer);
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.h b/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.h
new file mode 100644
index 0000000000..50d9df9b46
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlSampler_DEFINED
+#define GrMtlSampler_DEFINED
+
+#import <Metal/Metal.h>
+
+#include "src/core/SkOpts.h"
+#include <atomic>
+
+class GrSamplerState;
+class GrMtlGpu;
+
+// A wrapper for a MTLSamplerState object with caching support.
+class GrMtlSampler : public SkRefCnt {
+public:
+ static GrMtlSampler* Create(const GrMtlGpu* gpu, const GrSamplerState&);
+ ~GrMtlSampler() { fMtlSamplerState = nil; }
+
+ id<MTLSamplerState> mtlSampler() const { return fMtlSamplerState; }
+
+ typedef uint32_t Key;
+
+ // Helpers for hashing GrMtlSampler
+ static Key GenerateKey(const GrSamplerState&);
+
+ static const Key& GetKey(const GrMtlSampler& sampler) { return sampler.fKey; }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+
+private:
+ GrMtlSampler(id<MTLSamplerState> mtlSamplerState, Key key)
+ : fMtlSamplerState(mtlSamplerState)
+ , fKey(key) {}
+
+ id<MTLSamplerState> fMtlSamplerState;
+ Key fKey;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.mm
new file mode 100644
index 0000000000..2d7344eff7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlSampler.mm
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlSampler.h"
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+static inline MTLSamplerAddressMode wrap_mode_to_mtl_sampler_address(
+ GrSamplerState::WrapMode wrapMode, const GrCaps& caps) {
+ switch (wrapMode) {
+ case GrSamplerState::WrapMode::kClamp:
+ return MTLSamplerAddressModeClampToEdge;
+ case GrSamplerState::WrapMode::kRepeat:
+ return MTLSamplerAddressModeRepeat;
+ case GrSamplerState::WrapMode::kMirrorRepeat:
+ return MTLSamplerAddressModeMirrorRepeat;
+ case GrSamplerState::WrapMode::kClampToBorder:
+ // Must guard the reference to the clamp to border address mode by macro since iOS or
+ // older MacOS builds will fail if it's referenced, even if other code makes sure it's
+ // never used.
+#ifdef SK_BUILD_FOR_MAC
+ if (@available(macOS 10.12, *)) {
+ SkASSERT(caps.clampToBorderSupport());
+ return MTLSamplerAddressModeClampToBorderColor;
+ } else
+#endif
+ {
+ SkASSERT(false);
+ return MTLSamplerAddressModeClampToEdge;
+ }
+ }
+ SK_ABORT("Unknown wrap mode.");
+}
+
+GrMtlSampler* GrMtlSampler::Create(const GrMtlGpu* gpu, const GrSamplerState& samplerState) {
+ static MTLSamplerMinMagFilter mtlMinMagFilterModes[] = {
+ MTLSamplerMinMagFilterNearest,
+ MTLSamplerMinMagFilterLinear,
+ MTLSamplerMinMagFilterLinear
+ };
+
+ GR_STATIC_ASSERT((int)GrSamplerState::Filter::kNearest == 0);
+ GR_STATIC_ASSERT((int)GrSamplerState::Filter::kBilerp == 1);
+ GR_STATIC_ASSERT((int)GrSamplerState::Filter::kMipMap == 2);
+
+ auto samplerDesc = [[MTLSamplerDescriptor alloc] init];
+ samplerDesc.rAddressMode = MTLSamplerAddressModeClampToEdge;
+ samplerDesc.sAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeX(),
+ gpu->mtlCaps());
+ samplerDesc.tAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeY(),
+ gpu->mtlCaps());
+ samplerDesc.magFilter = mtlMinMagFilterModes[static_cast<int>(samplerState.filter())];
+ samplerDesc.minFilter = mtlMinMagFilterModes[static_cast<int>(samplerState.filter())];
+ samplerDesc.mipFilter = MTLSamplerMipFilterLinear;
+ samplerDesc.lodMinClamp = 0.0f;
+ bool useMipMaps = GrSamplerState::Filter::kMipMap == samplerState.filter();
+ samplerDesc.lodMaxClamp = !useMipMaps ? 0.0f : 10000.0f;
+ samplerDesc.maxAnisotropy = 1.0f;
+ samplerDesc.normalizedCoordinates = true;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ samplerDesc.compareFunction = MTLCompareFunctionNever;
+ }
+
+ return new GrMtlSampler([gpu->device() newSamplerStateWithDescriptor: samplerDesc],
+ GenerateKey(samplerState));
+}
+
+GrMtlSampler::Key GrMtlSampler::GenerateKey(const GrSamplerState& samplerState) {
+ return GrSamplerState::GenerateKey(samplerState);
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.h b/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.h
new file mode 100644
index 0000000000..5a5ad9ce26
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlSemaphore_DEFINED
+#define GrMtlSemaphore_DEFINED
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#include <Metal/Metal.h>
+
+class GrMtlGpu;
+
+class GrMtlSemaphore : public GrSemaphore {
+public:
+ static sk_sp<GrMtlSemaphore> Make(GrMtlGpu* gpu, bool isOwned);
+
+ static sk_sp<GrMtlSemaphore> MakeWrapped(GrMtlGpu* gpu,
+ GrMTLHandle event,
+ uint64_t value,
+ GrWrapOwnership ownership);
+
+ id<MTLEvent> event() const API_AVAILABLE(macos(10.14), ios(12.0)) { return fEvent; }
+ uint64_t value() const { return fValue; }
+
+ GrBackendSemaphore backendSemaphore() const override;
+
+private:
+ GrMtlSemaphore(GrMtlGpu* gpu, id<MTLEvent> event,
+ uint64_t value, bool isOwned) API_AVAILABLE(macos(10.14), ios(12.0));
+
+ void onRelease() override;
+ void onAbandon() override;
+
+ id<MTLEvent> fEvent API_AVAILABLE(macos(10.14), ios(12.0));
+ uint64_t fValue;
+
+ typedef GrSemaphore INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.mm
new file mode 100644
index 0000000000..3eb31ac503
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlSemaphore.mm
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlSemaphore.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+sk_sp<GrMtlSemaphore> GrMtlSemaphore::Make(GrMtlGpu* gpu, bool isOwned) {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ id<MTLEvent> event = [gpu->device() newEvent];
+ uint64_t value = 1; // seems like a reasonable starting point
+ return sk_sp<GrMtlSemaphore>(new GrMtlSemaphore(gpu, event, value, isOwned));
+ } else {
+ return nullptr;
+ }
+}
+
+sk_sp<GrMtlSemaphore> GrMtlSemaphore::MakeWrapped(GrMtlGpu* gpu,
+ GrMTLHandle event,
+ uint64_t value,
+ GrWrapOwnership ownership) {
+ // The GrMtlSemaphore will have strong ownership at this point.
+ // The GrMTLHandle will subsequently only have weak ownership.
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ id<MTLEvent> mtlEvent = (__bridge_transfer id<MTLEvent>)event;
+ auto sema = sk_sp<GrMtlSemaphore>(new GrMtlSemaphore(gpu, mtlEvent, value,
+ kBorrow_GrWrapOwnership != ownership));
+ return sema;
+ } else {
+ return nullptr;
+ }
+}
+
+GrMtlSemaphore::GrMtlSemaphore(GrMtlGpu* gpu, id<MTLEvent> event, uint64_t value, bool isOwned)
+ : INHERITED(gpu), fEvent(event), fValue(value) {
+ isOwned ? this->registerWithCache(SkBudgeted::kNo)
+ : this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+void GrMtlSemaphore::onRelease() {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ fEvent = nil;
+ }
+ INHERITED::onRelease();
+}
+
+void GrMtlSemaphore::onAbandon() {
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ fEvent = nil;
+ }
+ INHERITED::onAbandon();
+}
+
+GrBackendSemaphore GrMtlSemaphore::backendSemaphore() const {
+ GrBackendSemaphore backendSemaphore;
+ // The GrMtlSemaphore and the GrBackendSemaphore will have strong ownership at this point.
+ // Whoever uses the GrBackendSemaphore will subsquently steal this ref (see MakeWrapped, above).
+ if (@available(macOS 10.14, iOS 12.0, *)) {
+ GrMTLHandle handle = (__bridge_retained GrMTLHandle)(fEvent);
+ backendSemaphore.initMetal(handle, fValue);
+ }
+ return backendSemaphore;
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.h b/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.h
new file mode 100644
index 0000000000..5387b06532
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.h
@@ -0,0 +1,54 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrMtlStencil_DEFINED
+#define GrMtlStencil_DEFINED
+
+#include "src/gpu/GrStencilAttachment.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlImageView;
+class GrMtlGpu;
+
+class GrMtlStencilAttachment : public GrStencilAttachment {
+public:
+ struct Format {
+ MTLPixelFormat fInternalFormat;
+ int fStencilBits;
+ int fTotalBits;
+ bool fPacked;
+ };
+
+ static GrMtlStencilAttachment* Create(GrMtlGpu* gpu, int width, int height,
+ int sampleCnt, const Format& format);
+
+ ~GrMtlStencilAttachment() override;
+
+ MTLPixelFormat mtlFormat() const { return fFormat.fInternalFormat; }
+
+ id<MTLTexture> stencilView() const { return fStencilView; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrMtlStencilAttachment(GrMtlGpu* gpu,
+ const Format& format,
+ const id<MTLTexture> stencilView);
+
+ GrMtlGpu* getMtlGpu() const;
+
+ Format fFormat;
+
+ id<MTLTexture> fStencilView;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.mm
new file mode 100644
index 0000000000..01ed2c245f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlStencilAttachment.mm
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlStencilAttachment::GrMtlStencilAttachment(GrMtlGpu* gpu,
+ const Format& format,
+ const id<MTLTexture> stencilView)
+ : GrStencilAttachment(gpu, stencilView.width, stencilView.height, format.fStencilBits,
+ stencilView.sampleCount)
+ , fFormat(format)
+ , fStencilView(stencilView) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrMtlStencilAttachment* GrMtlStencilAttachment::Create(GrMtlGpu* gpu,
+ int width,
+ int height,
+ int sampleCnt,
+ const Format& format) {
+ MTLTextureDescriptor* desc =
+ [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:format.fInternalFormat
+ width:width
+ height:height
+ mipmapped:NO];
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ desc.storageMode = MTLStorageModePrivate;
+ desc.usage = MTLTextureUsageRenderTarget;
+ }
+ desc.sampleCount = sampleCnt;
+ if (sampleCnt > 1) {
+ desc.textureType = MTLTextureType2DMultisample;
+ }
+ return new GrMtlStencilAttachment(gpu, format, [gpu->device() newTextureWithDescriptor:desc]);
+}
+
+GrMtlStencilAttachment::~GrMtlStencilAttachment() {
+ // should have been released or abandoned first
+ SkASSERT(!fStencilView);
+}
+
+size_t GrMtlStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= this->numSamples();
+ return static_cast<size_t>(size / 8);
+}
+
+void GrMtlStencilAttachment::onRelease() {
+ fStencilView = nullptr;
+ GrStencilAttachment::onRelease();
+}
+
+void GrMtlStencilAttachment::onAbandon() {
+ fStencilView = nullptr;
+ GrStencilAttachment::onAbandon();
+}
+
+GrMtlGpu* GrMtlStencilAttachment::getMtlGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrMtlGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.h b/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.h
new file mode 100644
index 0000000000..c81aef0ebb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTexture_DEFINED
+#define GrMtlTexture_DEFINED
+
+#include "include/gpu/GrTexture.h"
+
+#import <Metal/Metal.h>
+
+class GrMtlGpu;
+
+class GrMtlTexture : public GrTexture {
+public:
+ static sk_sp<GrMtlTexture> MakeNewTexture(GrMtlGpu*, SkBudgeted budgeted,
+ const GrSurfaceDesc&,
+ MTLTextureDescriptor*,
+ GrMipMapsStatus);
+
+ static sk_sp<GrMtlTexture> MakeWrappedTexture(GrMtlGpu*, const GrSurfaceDesc&, id<MTLTexture>,
+ GrWrapCacheable, GrIOType);
+
+ ~GrMtlTexture() override;
+
+ id<MTLTexture> mtlTexture() const { return fTexture; }
+
+ GrBackendTexture getBackendTexture() const override;
+
+ GrBackendFormat backendFormat() const override;
+
+ void textureParamsModified() override {}
+
+ bool reallocForMipmap(GrMtlGpu* gpu, uint32_t mipLevels);
+
+protected:
+ GrMtlTexture(GrMtlGpu*, const GrSurfaceDesc&, id<MTLTexture>, GrMipMapsStatus);
+
+ GrMtlGpu* getMtlGpu() const;
+
+ void onAbandon() override {
+ fTexture = nil;
+ INHERITED::onAbandon();
+ }
+ void onRelease() override {
+ fTexture = nil;
+ INHERITED::onRelease();
+ }
+
+ bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) override {
+ return false;
+ }
+
+private:
+ enum Wrapped { kWrapped };
+
+ GrMtlTexture(GrMtlGpu*, SkBudgeted, const GrSurfaceDesc&, id<MTLTexture>,
+ GrMipMapsStatus);
+
+ GrMtlTexture(GrMtlGpu*, Wrapped, const GrSurfaceDesc&, id<MTLTexture>, GrMipMapsStatus,
+ GrWrapCacheable, GrIOType);
+
+ id<MTLTexture> fTexture;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.mm
new file mode 100644
index 0000000000..be9736fced
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTexture.mm
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlTexture.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> texture,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo,
+ GrTextureType::k2D, mipMapsStatus)
+ , fTexture(texture) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == texture.mipmapLevelCount));
+ this->registerWithCache(budgeted);
+ if (GrMtlFormatIsCompressed(texture.pixelFormat)) {
+ this->setReadOnly();
+ }
+}
+
+GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
+ Wrapped,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> texture,
+ GrMipMapsStatus mipMapsStatus,
+ GrWrapCacheable cacheable,
+ GrIOType ioType)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo,
+ GrTextureType::k2D, mipMapsStatus)
+ , fTexture(texture) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == texture.mipmapLevelCount));
+ if (ioType == kRead_GrIOType) {
+ this->setReadOnly();
+ }
+ this->registerWithCacheWrapped(cacheable);
+}
+
+GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> texture,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo,
+ GrTextureType::k2D, mipMapsStatus)
+ , fTexture(texture) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == texture.mipmapLevelCount));
+}
+
+sk_sp<GrMtlTexture> GrMtlTexture::MakeNewTexture(GrMtlGpu* gpu, SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ MTLTextureDescriptor* texDesc,
+ GrMipMapsStatus mipMapsStatus) {
+ id<MTLTexture> texture = [gpu->device() newTextureWithDescriptor:texDesc];
+ if (!texture) {
+ return nullptr;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT(MTLTextureUsageShaderRead & texture.usage);
+ }
+ return sk_sp<GrMtlTexture>(new GrMtlTexture(gpu, budgeted, desc, texture, mipMapsStatus));
+}
+
+sk_sp<GrMtlTexture> GrMtlTexture::MakeWrappedTexture(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> texture,
+ GrWrapCacheable cacheable,
+ GrIOType ioType) {
+ SkASSERT(nil != texture);
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT(MTLTextureUsageShaderRead & texture.usage);
+ }
+ GrMipMapsStatus mipMapsStatus = texture.mipmapLevelCount > 1 ? GrMipMapsStatus::kValid
+ : GrMipMapsStatus::kNotAllocated;
+ return sk_sp<GrMtlTexture>(new GrMtlTexture(gpu, kWrapped, desc, texture, mipMapsStatus,
+ cacheable, ioType));
+}
+
+GrMtlTexture::~GrMtlTexture() {
+ SkASSERT(nil == fTexture);
+}
+
+GrMtlGpu* GrMtlTexture::getMtlGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrMtlGpu*>(this->getGpu());
+}
+
+GrBackendTexture GrMtlTexture::getBackendTexture() const {
+ GrMipMapped mipMapped = fTexture.mipmapLevelCount > 1 ? GrMipMapped::kYes
+ : GrMipMapped::kNo;
+ GrMtlTextureInfo info;
+ info.fTexture.reset(GrRetainPtrFromId(fTexture));
+ return GrBackendTexture(this->width(), this->height(), mipMapped, info);
+}
+
+GrBackendFormat GrMtlTexture::backendFormat() const {
+ return GrBackendFormat::MakeMtl(fTexture.pixelFormat);
+}
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.h b/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.h
new file mode 100644
index 0000000000..18656e118f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTextureRenderTarget_DEFINED
+#define GrMtlTextureRenderTarget_DEFINED
+
+#include "src/gpu/mtl/GrMtlRenderTarget.h"
+#include "src/gpu/mtl/GrMtlTexture.h"
+
+class GrMtlTextureRenderTarget: public GrMtlTexture, public GrMtlRenderTarget {
+public:
+ static sk_sp<GrMtlTextureRenderTarget> MakeNewTextureRenderTarget(GrMtlGpu*,
+ SkBudgeted,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ MTLTextureDescriptor*,
+ GrMipMapsStatus);
+
+ static sk_sp<GrMtlTextureRenderTarget> MakeWrappedTextureRenderTarget(GrMtlGpu*,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ id<MTLTexture>,
+ GrWrapCacheable);
+ GrBackendFormat backendFormat() const override {
+ return GrMtlTexture::backendFormat();
+ }
+
+protected:
+ void onAbandon() override {
+ GrMtlRenderTarget::onAbandon();
+ GrMtlTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrMtlRenderTarget::onRelease();
+ GrMtlTexture::onRelease();
+ }
+
+private:
+ GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ GrMipMapsStatus);
+
+ GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ GrMipMapsStatus);
+
+ GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ GrMipMapsStatus,
+ GrWrapCacheable cacheable);
+
+ GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ GrMipMapsStatus,
+ GrWrapCacheable cacheable);
+
+ size_t onGpuMemorySize() const override {
+ // TODO: When used as render targets certain formats may actually have a larger size than
+ // the base format size. Check to make sure we are reporting the correct value here.
+ // The plus 1 is to account for the resolve texture or if not using msaa the RT itself
+ int numColorSamples = this->numSamples();
+ if (numColorSamples > 1) {
+ ++numColorSamples;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, GrMipMapped::kNo);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.mm
new file mode 100644
index 0000000000..9bdaa82ac7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTextureRenderTarget.mm
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlTextureRenderTarget.h"
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrMtlTexture(gpu, desc, resolveTexture, mipMapsStatus)
+ , GrMtlRenderTarget(gpu, desc, sampleCnt, colorTexture, resolveTexture) {
+ this->registerWithCache(budgeted);
+}
+
+GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrMtlTexture(gpu, desc, colorTexture, mipMapsStatus)
+ , GrMtlRenderTarget(gpu, desc, colorTexture) {
+ this->registerWithCache(budgeted);
+}
+
+GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> colorTexture,
+ id<MTLTexture> resolveTexture,
+ GrMipMapsStatus mipMapsStatus,
+ GrWrapCacheable cacheable)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrMtlTexture(gpu, desc, resolveTexture, mipMapsStatus)
+ , GrMtlRenderTarget(gpu, desc, sampleCnt, colorTexture, resolveTexture) {
+ this->registerWithCacheWrapped(cacheable);
+}
+
+GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ id<MTLTexture> colorTexture,
+ GrMipMapsStatus mipMapsStatus,
+ GrWrapCacheable cacheable)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, GrProtected::kNo)
+ , GrMtlTexture(gpu, desc, colorTexture, mipMapsStatus)
+ , GrMtlRenderTarget(gpu, desc, colorTexture) {
+ this->registerWithCacheWrapped(cacheable);
+}
+
+id<MTLTexture> create_msaa_texture(GrMtlGpu* gpu, const GrSurfaceDesc& desc, int sampleCnt) {
+ MTLPixelFormat format;
+ if (!GrPixelConfigToMTLFormat(desc.fConfig, &format)) {
+ return nullptr;
+ }
+ MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
+ texDesc.textureType = MTLTextureType2DMultisample;
+ texDesc.pixelFormat = format;
+ texDesc.width = desc.fWidth;
+ texDesc.height = desc.fHeight;
+ texDesc.depth = 1;
+ texDesc.mipmapLevelCount = 1;
+ texDesc.sampleCount = sampleCnt;
+ texDesc.arrayLength = 1;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ texDesc.storageMode = MTLStorageModePrivate;
+ texDesc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
+ }
+
+ return [gpu->device() newTextureWithDescriptor:texDesc];
+}
+
+sk_sp<GrMtlTextureRenderTarget> GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
+ GrMtlGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ MTLTextureDescriptor* texDesc,
+ GrMipMapsStatus mipMapsStatus) {
+ id<MTLTexture> texture = [gpu->device() newTextureWithDescriptor:texDesc];
+ if (!texture) {
+ return nullptr;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & texture.usage);
+ }
+
+ if (sampleCnt > 1) {
+ id<MTLTexture> colorTexture = create_msaa_texture(gpu, desc, sampleCnt);
+ if (!colorTexture) {
+ return nullptr;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
+ }
+ return sk_sp<GrMtlTextureRenderTarget>(new GrMtlTextureRenderTarget(
+ gpu, budgeted, desc, sampleCnt, colorTexture, texture, mipMapsStatus));
+ } else {
+ return sk_sp<GrMtlTextureRenderTarget>(
+ new GrMtlTextureRenderTarget(gpu, budgeted, desc, texture, mipMapsStatus));
+ }
+}
+
+sk_sp<GrMtlTextureRenderTarget> GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
+ GrMtlGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ id<MTLTexture> texture,
+ GrWrapCacheable cacheable) {
+ SkASSERT(nil != texture);
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & texture.usage);
+ }
+ GrMipMapsStatus mipMapsStatus = texture.mipmapLevelCount > 1
+ ? GrMipMapsStatus::kDirty
+ : GrMipMapsStatus::kNotAllocated;
+ if (sampleCnt > 1) {
+ id<MTLTexture> colorTexture = create_msaa_texture(gpu, desc, sampleCnt);
+ if (!colorTexture) {
+ return nullptr;
+ }
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
+ }
+ return sk_sp<GrMtlTextureRenderTarget>(new GrMtlTextureRenderTarget(
+ gpu, desc, sampleCnt, colorTexture, texture, mipMapsStatus, cacheable));
+ } else {
+ return sk_sp<GrMtlTextureRenderTarget>(
+ new GrMtlTextureRenderTarget(gpu, desc, texture, mipMapsStatus, cacheable));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.h b/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.h
new file mode 100644
index 0000000000..ac0cd61e9e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTrampoline_DEFINED
+#define GrMtlTrampoline_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrTypes.h"
+
+class GrContext;
+class GrGpu;
+struct GrContextOptions;
+
+/*
+ * This class is used to hold functions which trampoline from the Ganesh cpp code to the GrMtl
+ * objective-c files.
+ */
+class GrMtlTrampoline {
+public:
+ static sk_sp<GrGpu> MakeGpu(GrContext*, const GrContextOptions&, void* device, void* queue);
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.mm
new file mode 100644
index 0000000000..85f67b061c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlTrampoline.mm
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlTrampoline.h"
+
+#include "src/gpu/mtl/GrMtlGpu.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+sk_sp<GrGpu> GrMtlTrampoline::MakeGpu(GrContext* context,
+ const GrContextOptions& options,
+ void* device,
+ void* queue) {
+ return GrMtlGpu::Make(context,
+ options,
+ (__bridge id<MTLDevice>)device,
+ (__bridge id<MTLCommandQueue>)queue);
+}
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.h b/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.h
new file mode 100644
index 0000000000..bcf15a05f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.h
@@ -0,0 +1,99 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrMtlUniformHandler_DEFINED
+#define GrMtlUniformHandler_DEFINED
+
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+// TODO: this class is basically copy and pasted from GrVkUniformHandler so that we can have
+// some shaders working. The SkSL Metal code generator was written to work with GLSL generated for
+// the Ganesh Vulkan backend, so it should all work. There might be better ways to do things in
+// Metal and/or some Vulkan GLSLisms left in.
+class GrMtlUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ enum {
+ kUniformBinding = 0,
+ kLastUniformBinding = kUniformBinding,
+ };
+
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ struct UniformInfo {
+ GrShaderVar fVariable;
+ uint32_t fVisibility;
+ uint32_t fUBOffset;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ const GrShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+private:
+ explicit GrMtlUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fSamplers(kUniformsPerBlock)
+ , fCurrentUBOOffset(0)
+ , fCurrentUBOMaxAlignment(0x0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void updateUniformVisibility(UniformHandle u, uint32_t visibility) override {
+ fUniforms[u.toIndex()].fVisibility |= visibility;
+ }
+
+ SamplerHandle addSampler(const GrTextureProxy*,
+ const GrSamplerState&,
+ const GrSwizzle&,
+ const char* name,
+ const GrShaderCaps*) override;
+
+ int numSamplers() const { return fSamplers.count(); }
+ const char* samplerVariable(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()].fVariable.c_str();
+ }
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const override {
+ return fSamplerSwizzles[handle.toIndex()];
+ }
+ uint32_t samplerVisibility(SamplerHandle handle) const {
+ return fSamplers[handle.toIndex()].fVisibility;
+ }
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+ UniformInfoArray fUniforms;
+ UniformInfoArray fSamplers;
+ SkTArray<GrSwizzle> fSamplerSwizzles;
+
+ uint32_t fCurrentUBOOffset;
+ uint32_t fCurrentUBOMaxAlignment;
+
+ friend class GrMtlPipelineStateBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.mm
new file mode 100644
index 0000000000..89e1263eaf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlUniformHandler.mm
@@ -0,0 +1,310 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/gpu/GrTexture.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/mtl/GrMtlUniformHandler.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+// TODO: this class is basically copy and pasted from GrVklUniformHandler so that we can have
+// some shaders working. The SkSL Metal code generator was written to work with GLSL generated for
+// the Ganesh Vulkan backend, so it should all work. There might be better ways to do things in
+// Metal and/or some Vulkan GLSLisms left in.
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+static uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType: // fall through
+ case kUByte_GrSLType:
+ return 0x0;
+ case kByte2_GrSLType: // fall through
+ case kUByte2_GrSLType:
+ return 0x1;
+ case kByte3_GrSLType: // fall through
+ case kByte4_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ return 0x3;
+ case kShort_GrSLType: // fall through
+ case kUShort_GrSLType:
+ return 0x1;
+ case kShort2_GrSLType: // fall through
+ case kUShort2_GrSLType:
+ return 0x3;
+ case kShort3_GrSLType: // fall through
+ case kShort4_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ return 0x7;
+ case kInt_GrSLType:
+ case kUint_GrSLType:
+ return 0x3;
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return 0x3;
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 0x7;
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 0xF;
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 0xF;
+ case kUint2_GrSLType:
+ return 0x7;
+ case kInt2_GrSLType:
+ return 0x7;
+ case kInt3_GrSLType:
+ return 0xF;
+ case kInt4_GrSLType:
+ return 0xF;
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ return 0x7;
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 0xF;
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 0xF;
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+/** Returns the size in bytes taken up in Metal buffers for GrSLTypes. */
+static inline uint32_t grsltype_to_mtl_size(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType:
+ return sizeof(int8_t);
+ case kByte2_GrSLType:
+ return 2 * sizeof(int8_t);
+ case kByte3_GrSLType:
+ return 4 * sizeof(int8_t);
+ case kByte4_GrSLType:
+ return 4 * sizeof(int8_t);
+ case kUByte_GrSLType:
+ return sizeof(uint8_t);
+ case kUByte2_GrSLType:
+ return 2 * sizeof(uint8_t);
+ case kUByte3_GrSLType:
+ return 4 * sizeof(uint8_t);
+ case kUByte4_GrSLType:
+ return 4 * sizeof(uint8_t);
+ case kShort_GrSLType:
+ return sizeof(int16_t);
+ case kShort2_GrSLType:
+ return 2 * sizeof(int16_t);
+ case kShort3_GrSLType:
+ return 4 * sizeof(int16_t);
+ case kShort4_GrSLType:
+ return 4 * sizeof(int16_t);
+ case kUShort_GrSLType:
+ return sizeof(uint16_t);
+ case kUShort2_GrSLType:
+ return 2 * sizeof(uint16_t);
+ case kUShort3_GrSLType:
+ return 4 * sizeof(uint16_t);
+ case kUShort4_GrSLType:
+ return 4 * sizeof(uint16_t);
+ case kInt_GrSLType:
+ return sizeof(int32_t);
+ case kUint_GrSLType:
+ return sizeof(int32_t);
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return sizeof(float);
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 2 * sizeof(float);
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 4 * sizeof(float);
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 4 * sizeof(float);
+ case kUint2_GrSLType:
+ return 2 * sizeof(uint32_t);
+ case kInt2_GrSLType:
+ return 2 * sizeof(int32_t);
+ case kInt3_GrSLType:
+ return 4 * sizeof(int32_t);
+ case kInt4_GrSLType:
+ return 4 * sizeof(int32_t);
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ return 4 * sizeof(float);
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 12 * sizeof(float);
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 16 * sizeof(float);
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+static void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ uint32_t* maxAlignment,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ if (alignmentMask > *maxAlignment) {
+ *maxAlignment = alignmentMask;
+ }
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ if (arrayCount) {
+ *currentOffset = *uniformOffset + grsltype_to_mtl_size(type) * arrayCount;
+ } else {
+ *currentOffset = *uniformOffset + grsltype_to_mtl_size(type);
+ }
+}
+
+GrGLSLUniformHandler::UniformHandle GrMtlUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ GrSLTypeIsFloatType(type);
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0] || !strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ uni.fVisibility = kFragment_GrShaderFlag | kVertex_GrShaderFlag;
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrShaderVar::kNone_TypeModifier);
+
+ get_ubo_aligned_offset(&uni.fUBOffset, &fCurrentUBOOffset, &fCurrentUBOMaxAlignment, type,
+ arrayCount);
+
+ SkString layoutQualifier;
+ layoutQualifier.appendf("offset=%d", uni.fUBOffset);
+ uni.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrMtlUniformHandler::addSampler(const GrTextureProxy* texture,
+ const GrSamplerState&,
+ const GrSwizzle& swizzle,
+ const char* name,
+ const GrShaderCaps* caps) {
+ SkASSERT(name && strlen(name));
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+
+ GrTextureType type = texture->textureType();
+
+ UniformInfo& info = fSamplers.push_back();
+ info.fVariable.setType(GrSLCombinedSamplerTypeForTextureType(type));
+ info.fVariable.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ info.fVariable.setName(mangleName);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("binding=%d", fSamplers.count() - 1);
+ info.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+ info.fVisibility = kFragment_GrShaderFlag;
+ info.fUBOffset = 0;
+ SkASSERT(caps->textureSwizzleAppliedInShader());
+ fSamplerSwizzles.push_back(swizzle);
+ SkASSERT(fSamplerSwizzles.count() == fSamplers.count());
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrMtlUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ const UniformInfo& sampler = fSamplers[i];
+ SkASSERT(sampler.fVariable.getType() == kTexture2DSampler_GrSLType);
+ if (visibility == sampler.fVisibility) {
+ sampler.fVariable.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+ }
+
+#ifdef SK_DEBUG
+ bool firstOffsetCheck = false;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (!firstOffsetCheck) {
+ // Check to make sure we are starting our offset at 0 so the offset qualifier we
+ // set on each variable in the uniform block is valid.
+ SkASSERT(0 == localUniform.fUBOffset);
+ firstOffsetCheck = true;
+ }
+ }
+#endif
+
+ SkString uniformsString;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility & localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ localUniform.fVariable.appendDecl(fProgramBuilder->shaderCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ }
+ }
+ }
+
+ if (!uniformsString.isEmpty()) {
+ out->appendf("layout (binding=%d) uniform uniformBuffer\n{\n", kUniformBinding);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.h b/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.h
new file mode 100644
index 0000000000..171b87d7ce
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlUtil_DEFINED
+#define GrMtlUtil_DEFINED
+
+#import <Metal/Metal.h>
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+#if defined(SK_BUILD_FOR_MAC)
+#if __MAC_OS_X_VERSION_MAX_ALLOWED < 101400
+#error Must use at least 10.14 SDK to build Metal backend for MacOS
+#endif
+#else
+#if __IPHONE_OS_VERSION_MAX_ALLOWED < 120000 && __TV_OS_VERSION_MAX_ALLOWED < 120000
+#error Must use at least 12.00 SDK to build Metal backend for iOS
+#endif
+#endif
+
+class GrMtlGpu;
+class GrSurface;
+
+/**
+ * Returns the Metal texture format for the given GrPixelConfig
+ */
+bool GrPixelConfigToMTLFormat(GrPixelConfig config, MTLPixelFormat* format);
+
+/**
+ * Returns a id<MTLTexture> to the MTLTexture pointed at by the const void*.
+ */
+SK_ALWAYS_INLINE id<MTLTexture> GrGetMTLTexture(const void* mtlTexture) {
+ return (__bridge id<MTLTexture>)mtlTexture;
+}
+
+/**
+ * Returns a const void* to whatever the id object is pointing to.
+ */
+SK_ALWAYS_INLINE const void* GrGetPtrFromId(id idObject) {
+ return (__bridge const void*)idObject;
+}
+
+/**
+ * Returns a const void* to whatever the id object is pointing to.
+ * Will call CFRetain on the object.
+ */
+SK_ALWAYS_INLINE const void* GrRetainPtrFromId(id idObject) {
+ return (__bridge_retained const void*)idObject;
+}
+
+
+/**
+ * Returns a MTLTextureDescriptor which describes the MTLTexture. Useful when creating a duplicate
+ * MTLTexture without the same storage allocation.
+ */
+MTLTextureDescriptor* GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture);
+
+/**
+ * Returns a compiled MTLLibrary created from MSL code generated by SkSLC
+ */
+id<MTLLibrary> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
+ const char* shaderString,
+ SkSL::Program::Kind kind,
+ const SkSL::Program::Settings& settings,
+ SkSL::Program::Inputs* outInputs);
+
+/**
+ * Replacement for newLibraryWithSource:options:error that has a timeout.
+ */
+id<MTLLibrary> GrMtlNewLibraryWithSource(id<MTLDevice>, NSString* mslCode,
+ MTLCompileOptions*, bool* timedout);
+
+/**
+ * Replacement for newRenderPipelineStateWithDescriptor:error that has a timeout.
+ */
+id<MTLRenderPipelineState> GrMtlNewRenderPipelineStateWithDescriptor(
+ id<MTLDevice>, MTLRenderPipelineDescriptor*, bool* timedout);
+
+/**
+ * Returns a MTLTexture corresponding to the GrSurface.
+ */
+id<MTLTexture> GrGetMTLTextureFromSurface(GrSurface* surface);
+
+static inline MTLPixelFormat GrBackendFormatAsMTLPixelFormat(const GrBackendFormat& format) {
+ return static_cast<MTLPixelFormat>(format.asMtlFormat());
+}
+
+/**
+ * Returns true if the format is compressed.
+ */
+bool GrMtlFormatIsCompressed(MTLPixelFormat mtlFormat);
+
+/**
+ * Maps a MTLPixelFormat into the CompressionType enum if applicable.
+ */
+bool GrMtlFormatToCompressionType(MTLPixelFormat mtlFormat,
+ SkImage::CompressionType* compressionType);
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.mm
new file mode 100644
index 0000000000..eccf435074
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlUtil.mm
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlUtil.h"
+
+#include "include/gpu/GrSurface.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/mtl/GrMtlGpu.h"
+#include "src/gpu/mtl/GrMtlRenderTarget.h"
+#include "src/gpu/mtl/GrMtlTexture.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#import <Metal/Metal.h>
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+#define PRINT_MSL 0 // print out the MSL code generated
+
+bool GrPixelConfigToMTLFormat(GrPixelConfig config, MTLPixelFormat* format) {
+ MTLPixelFormat dontCare;
+ if (!format) {
+ format = &dontCare;
+ }
+
+ switch (config) {
+ case kUnknown_GrPixelConfig:
+ return false;
+ case kRGBA_8888_GrPixelConfig:
+ *format = MTLPixelFormatRGBA8Unorm;
+ return true;
+ case kRGB_888_GrPixelConfig:
+ *format = MTLPixelFormatRGBA8Unorm;
+ return true;
+ case kRGB_888X_GrPixelConfig:
+ *format = MTLPixelFormatRGBA8Unorm;
+ return true;
+ case kRG_88_GrPixelConfig:
+ *format = MTLPixelFormatRG8Unorm;
+ return true;
+ case kBGRA_8888_GrPixelConfig:
+ *format = MTLPixelFormatBGRA8Unorm;
+ return true;
+ case kSRGBA_8888_GrPixelConfig:
+ *format = MTLPixelFormatRGBA8Unorm_sRGB;
+ return true;
+ case kRGBA_1010102_GrPixelConfig:
+ *format = MTLPixelFormatRGB10A2Unorm;
+ return true;
+ case kRGB_565_GrPixelConfig:
+#ifdef SK_BUILD_FOR_IOS
+ *format = MTLPixelFormatB5G6R5Unorm;
+ return true;
+#else
+ return false;
+#endif
+ case kRGBA_4444_GrPixelConfig:
+#ifdef SK_BUILD_FOR_IOS
+ *format = MTLPixelFormatABGR4Unorm;
+ return true;
+#else
+ return false;
+#endif
+ case kAlpha_8_GrPixelConfig: // fall through
+ case kAlpha_8_as_Red_GrPixelConfig:
+ *format = MTLPixelFormatR8Unorm;
+ return true;
+ case kAlpha_8_as_Alpha_GrPixelConfig:
+ *format = MTLPixelFormatA8Unorm;
+ return true;
+ case kGray_8_GrPixelConfig: // fall through
+ case kGray_8_as_Red_GrPixelConfig:
+ *format = MTLPixelFormatR8Unorm;
+ return true;
+ case kGray_8_as_Lum_GrPixelConfig:
+ return false;
+ case kRGBA_half_GrPixelConfig:
+ *format = MTLPixelFormatRGBA16Float;
+ return true;
+ case kRGBA_half_Clamped_GrPixelConfig:
+ *format = MTLPixelFormatRGBA16Float;
+ return true;
+ case kAlpha_half_GrPixelConfig: // fall through
+ case kAlpha_half_as_Red_GrPixelConfig:
+ *format = MTLPixelFormatR16Float;
+ return true;
+ case kAlpha_half_as_Lum_GrPixelConfig:
+ return false;
+ case kRGB_ETC1_GrPixelConfig:
+#ifdef SK_BUILD_FOR_IOS
+ *format = MTLPixelFormatETC2_RGB8;
+ return true;
+#else
+ return false;
+#endif
+ case kAlpha_16_GrPixelConfig:
+ *format = MTLPixelFormatR16Unorm;
+ return true;
+ case kRG_1616_GrPixelConfig:
+ *format = MTLPixelFormatRG16Unorm;
+ return true;
+ case kRGBA_16161616_GrPixelConfig:
+ *format = MTLPixelFormatRGBA16Unorm;
+ return true;
+ case kRG_half_GrPixelConfig:
+ *format = MTLPixelFormatRG16Float;
+ return true;
+ }
+ SK_ABORT("Unexpected config");
+}
+
+MTLTextureDescriptor* GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture) {
+ MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
+ texDesc.textureType = mtlTexture.textureType;
+ texDesc.pixelFormat = mtlTexture.pixelFormat;
+ texDesc.width = mtlTexture.width;
+ texDesc.height = mtlTexture.height;
+ texDesc.depth = mtlTexture.depth;
+ texDesc.mipmapLevelCount = mtlTexture.mipmapLevelCount;
+ texDesc.arrayLength = mtlTexture.arrayLength;
+ texDesc.sampleCount = mtlTexture.sampleCount;
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ texDesc.usage = mtlTexture.usage;
+ }
+ return texDesc;
+}
+
+#if PRINT_MSL
+void print_msl(const char* source) {
+ SkTArray<SkString> lines;
+ SkStrSplit(source, "\n", kStrict_SkStrSplitMode, &lines);
+ for (int i = 0; i < lines.count(); i++) {
+ SkString& line = lines[i];
+ line.prependf("%4i\t", i + 1);
+ SkDebugf("%s\n", line.c_str());
+ }
+}
+#endif
+
+id<MTLLibrary> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
+ const char* shaderString,
+ SkSL::Program::Kind kind,
+ const SkSL::Program::Settings& settings,
+ SkSL::Program::Inputs* outInputs) {
+ std::unique_ptr<SkSL::Program> program =
+ gpu->shaderCompiler()->convertProgram(kind,
+ SkSL::String(shaderString),
+ settings);
+
+ if (!program) {
+ SkDebugf("SkSL error:\n%s\n", gpu->shaderCompiler()->errorText().c_str());
+ SkASSERT(false);
+ return nil;
+ }
+
+ *outInputs = program->fInputs;
+ SkSL::String code;
+ if (!gpu->shaderCompiler()->toMetal(*program, &code)) {
+ SkDebugf("%s\n", gpu->shaderCompiler()->errorText().c_str());
+ SkASSERT(false);
+ return nil;
+ }
+ NSString* mtlCode = [[NSString alloc] initWithCString: code.c_str()
+ encoding: NSASCIIStringEncoding];
+#if PRINT_MSL
+ print_msl([mtlCode cStringUsingEncoding: NSASCIIStringEncoding]);
+#endif
+
+ MTLCompileOptions* defaultOptions = [[MTLCompileOptions alloc] init];
+#if defined(SK_BUILD_FOR_MAC) && defined(GR_USE_COMPLETION_HANDLER)
+ bool timedout;
+ id<MTLLibrary> compiledLibrary = GrMtlNewLibraryWithSource(gpu->device(), mtlCode,
+ defaultOptions, &timedout);
+ if (timedout) {
+ // try again
+ compiledLibrary = GrMtlNewLibraryWithSource(gpu->device(), mtlCode,
+ defaultOptions, &timedout);
+ }
+#else
+ NSError* error = nil;
+ id<MTLLibrary> compiledLibrary = [gpu->device() newLibraryWithSource: mtlCode
+ options: defaultOptions
+ error: &error];
+ if (error) {
+ SkDebugf("Error compiling MSL shader: %s\n%s\n",
+ code.c_str(),
+ [[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]);
+ return nil;
+ }
+#endif
+ return compiledLibrary;
+}
+
+id<MTLLibrary> GrMtlNewLibraryWithSource(id<MTLDevice> device, NSString* mslCode,
+ MTLCompileOptions* options, bool* timedout) {
+ dispatch_semaphore_t compilerSemaphore = dispatch_semaphore_create(0);
+
+ __block dispatch_semaphore_t semaphore = compilerSemaphore;
+ __block id<MTLLibrary> compiledLibrary;
+ [device newLibraryWithSource: mslCode
+ options: options
+ completionHandler:
+ ^(id<MTLLibrary> library, NSError* error) {
+ if (error) {
+ SkDebugf("Error compiling MSL shader: %s\n%s\n",
+ mslCode,
+ [[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]);
+ }
+ compiledLibrary = library;
+ dispatch_semaphore_signal(semaphore);
+ }
+ ];
+
+ // Wait 100 ms for the compiler
+ if (dispatch_semaphore_wait(compilerSemaphore, dispatch_time(DISPATCH_TIME_NOW, 100000))) {
+ SkDebugf("Timeout compiling MSL shader\n");
+ *timedout = true;
+ return nil;
+ }
+
+ *timedout = false;
+ return compiledLibrary;
+}
+
+id<MTLRenderPipelineState> GrMtlNewRenderPipelineStateWithDescriptor(
+ id<MTLDevice> device, MTLRenderPipelineDescriptor* pipelineDescriptor, bool* timedout) {
+ dispatch_semaphore_t pipelineSemaphore = dispatch_semaphore_create(0);
+
+ __block dispatch_semaphore_t semaphore = pipelineSemaphore;
+ __block id<MTLRenderPipelineState> pipelineState;
+ [device newRenderPipelineStateWithDescriptor: pipelineDescriptor
+ completionHandler:
+ ^(id<MTLRenderPipelineState> state, NSError* error) {
+ if (error) {
+ SkDebugf("Error creating pipeline: %s\n",
+ [[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]);
+ }
+ pipelineState = state;
+ dispatch_semaphore_signal(semaphore);
+ }
+ ];
+
+ // Wait 500 ms for pipeline creation
+ if (dispatch_semaphore_wait(pipelineSemaphore, dispatch_time(DISPATCH_TIME_NOW, 500000))) {
+ SkDebugf("Timeout creating pipeline.\n");
+ *timedout = true;
+ return nil;
+ }
+
+ *timedout = false;
+ return pipelineState;
+}
+
+id<MTLTexture> GrGetMTLTextureFromSurface(GrSurface* surface) {
+ id<MTLTexture> mtlTexture = nil;
+
+ GrMtlRenderTarget* renderTarget = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget());
+ GrMtlTexture* texture;
+ if (renderTarget) {
+ // We should not be using this for multisampled rendertargets
+ if (renderTarget->numSamples() > 1) {
+ SkASSERT(false);
+ return nil;
+ }
+ mtlTexture = renderTarget->mtlColorTexture();
+ } else {
+ texture = static_cast<GrMtlTexture*>(surface->asTexture());
+ if (texture) {
+ mtlTexture = texture->mtlTexture();
+ }
+ }
+ return mtlTexture;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// CPP Utils
+
+GrMTLPixelFormat GrGetMTLPixelFormatFromMtlTextureInfo(const GrMtlTextureInfo& info) {
+ id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
+ return static_cast<GrMTLPixelFormat>(mtlTexture.pixelFormat);
+}
+
+bool GrMtlFormatIsCompressed(MTLPixelFormat mtlFormat) {
+ switch (mtlFormat) {
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatETC2_RGB8:
+ return true;
+#endif
+ default:
+ return false;
+ }
+}
+
+bool GrMtlFormatToCompressionType(MTLPixelFormat mtlFormat,
+ SkImage::CompressionType* compressionType) {
+ switch (mtlFormat) {
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatETC2_RGB8:
+ *compressionType = SkImage::kETC1_CompressionType;
+ return true;
+#endif
+ default:
+ return false;
+ }
+}
+
+#if GR_TEST_UTILS
+bool GrMtlFormatIsBGRA(GrMTLPixelFormat mtlFormat) {
+ return mtlFormat == MTLPixelFormatBGRA8Unorm;
+}
+
+const char* GrMtlFormatToStr(GrMTLPixelFormat mtlFormat) {
+ switch (mtlFormat) {
+ case MTLPixelFormatInvalid: return "Invalid";
+ case MTLPixelFormatRGBA8Unorm: return "RGBA8Unorm";
+ case MTLPixelFormatR8Unorm: return "R8Unorm";
+ case MTLPixelFormatA8Unorm: return "A8Unorm";
+ case MTLPixelFormatBGRA8Unorm: return "BGRA8Unorm";
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatB5G6R5Unorm: return "B5G6R5Unorm";
+#endif
+ case MTLPixelFormatRGBA16Float: return "RGBA16Float";
+ case MTLPixelFormatR16Float: return "R16Float";
+ case MTLPixelFormatRG8Unorm: return "RG8Unorm";
+ case MTLPixelFormatRGB10A2Unorm: return "RGB10A2Unorm";
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatABGR4Unorm: return "ABGR4Unorm";
+#endif
+ case MTLPixelFormatRGBA8Unorm_sRGB: return "RGBA8Unorm_sRGB";
+ case MTLPixelFormatR16Unorm: return "R16Unorm";
+ case MTLPixelFormatRG16Unorm: return "RG16Unorm";
+#ifdef SK_BUILD_FOR_IOS
+ case MTLPixelFormatETC2_RGB8: return "ETC2_RGB8";
+#endif
+ case MTLPixelFormatRGBA16Unorm: return "RGBA16Unorm";
+ case MTLPixelFormatRG16Float: return "RG16Float";
+
+ default: return "Unknown";
+ }
+}
+
+#endif
+
+
+
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.h b/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.h
new file mode 100644
index 0000000000..80cd584836
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrMtlVaryingHandler_DEFINED
+#define GrMtlVaryingHandler_DEFINED
+
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrMtlVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrMtlVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrMtlPipelineStateBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.mm b/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.mm
new file mode 100644
index 0000000000..c01e987d1d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/mtl/GrMtlVaryingHandler.mm
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/mtl/GrMtlVaryingHandler.h"
+
+#if !__has_feature(objc_arc)
+#error This file must be compiled with Arc. Use -fobjc-arc flag
+#endif
+
+static void finalize_helper(GrMtlVaryingHandler::VarArray& vars) {
+ int locationIndex;
+ int componentCount = 0;
+ for (locationIndex = 0; locationIndex < vars.count(); locationIndex++) {
+ GrShaderVar& var = vars[locationIndex];
+ // Metal only allows scalars (including bool and char) and vectors as varyings
+ SkASSERT(GrSLTypeVecLength(var.getType()) != -1);
+ componentCount += GrSLTypeVecLength(var.getType());
+
+ SkString location;
+ location.appendf("location = %d", locationIndex);
+ var.addLayoutQualifier(location.c_str());
+ }
+ // The max number of inputs is 60 for iOS and 32 for macOS. The max number of components is 60
+ // for iOS and 128 for macOS. To be conservative, we are going to assert that we have less than
+ // 32 varyings and less than 60 components across all varyings. If we hit this assert, we can
+ // implement a function in GrMtlCaps to be less conservative.
+ SkASSERT(locationIndex <= 32);
+ SkASSERT(componentCount <= 60);
+}
+
+void GrMtlVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.cpp
new file mode 100644
index 0000000000..8906d29a5e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.cpp
@@ -0,0 +1,880 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+#include "src/gpu/ops/GrAAConvexPathRenderer.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
+}
+
+struct Segment {
+ enum {
+ // These enum values are assumed in member functions below.
+ kLine = 0,
+ kQuad = 1,
+ } fType;
+
+ // line uses one pt, quad uses 2 pts
+ SkPoint fPts[2];
+ // normal to edge ending at each pt
+ SkVector fNorms[2];
+ // is the corner where the previous segment meets this segment
+ // sharp. If so, fMid is a normalized bisector facing outward.
+ SkVector fMid;
+
+ int countPoints() {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fType + 1;
+ }
+ const SkPoint& endPt() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fPts[fType];
+ }
+ const SkPoint& endNorm() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fNorms[fType];
+ }
+};
+
+typedef SkTArray<Segment, true> SegmentArray;
+
+static bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
+ SkScalar area = 0;
+ SkPoint center = {0, 0};
+ int count = segments.count();
+ SkPoint p0 = {0, 0};
+ if (count > 2) {
+ // We translate the polygon so that the first point is at the origin.
+ // This avoids some precision issues with small area polygons far away
+ // from the origin.
+ p0 = segments[0].endPt();
+ SkPoint pi;
+ SkPoint pj;
+ // the first and last iteration of the below loop would compute
+ // zeros since the starting / ending point is (0,0). So instead we start
+ // at i=1 and make the last iteration i=count-2.
+ pj = segments[1].endPt() - p0;
+ for (int i = 1; i < count - 1; ++i) {
+ pi = pj;
+ pj = segments[i + 1].endPt() - p0;
+
+ SkScalar t = SkPoint::CrossProduct(pi, pj);
+ area += t;
+ center.fX += (pi.fX + pj.fX) * t;
+ center.fY += (pi.fY + pj.fY) * t;
+ }
+ }
+
+ // If the poly has no area then we instead return the average of
+ // its points.
+ if (SkScalarNearlyZero(area)) {
+ SkPoint avg;
+ avg.set(0, 0);
+ for (int i = 0; i < count; ++i) {
+ const SkPoint& pt = segments[i].endPt();
+ avg.fX += pt.fX;
+ avg.fY += pt.fY;
+ }
+ SkScalar denom = SK_Scalar1 / count;
+ avg.scale(denom);
+ *c = avg;
+ } else {
+ area *= 3;
+ area = SkScalarInvert(area);
+ center.scale(area);
+ // undo the translate of p0 to the origin.
+ *c = center + p0;
+ }
+ return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
+}
+
+static bool compute_vectors(SegmentArray* segments,
+ SkPoint* fanPt,
+ SkPathPriv::FirstDirection dir,
+ int* vCount,
+ int* iCount) {
+ if (!center_of_mass(*segments, fanPt)) {
+ return false;
+ }
+ int count = segments->count();
+
+ // Make the normals point towards the outside
+ SkPointPriv::Side normSide;
+ if (dir == SkPathPriv::kCCW_FirstDirection) {
+ normSide = SkPointPriv::kRight_Side;
+ } else {
+ normSide = SkPointPriv::kLeft_Side;
+ }
+
+ int64_t vCount64 = 0;
+ int64_t iCount64 = 0;
+ // compute normals at all points
+ for (int a = 0; a < count; ++a) {
+ Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+
+ const SkPoint* prevPt = &sega.endPt();
+ int n = segb.countPoints();
+ for (int p = 0; p < n; ++p) {
+ segb.fNorms[p] = segb.fPts[p] - *prevPt;
+ segb.fNorms[p].normalize();
+ segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
+ prevPt = &segb.fPts[p];
+ }
+ if (Segment::kLine == segb.fType) {
+ vCount64 += 5;
+ iCount64 += 9;
+ } else {
+ vCount64 += 6;
+ iCount64 += 12;
+ }
+ }
+
+ // compute mid-vectors where segments meet. TODO: Detect shallow corners
+ // and leave out the wedges and close gaps by stitching segments together.
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+ segb.fMid = segb.fNorms[0] + sega.endNorm();
+ segb.fMid.normalize();
+ // corner wedges
+ vCount64 += 4;
+ iCount64 += 6;
+ }
+ if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
+ return false;
+ }
+ *vCount = vCount64;
+ *iCount = iCount64;
+ return true;
+}
+
+struct DegenerateTestData {
+ DegenerateTestData() { fStage = kInitial; }
+ bool isDegenerate() const { return kNonDegenerate != fStage; }
+ enum {
+ kInitial,
+ kPoint,
+ kLine,
+ kNonDegenerate
+ } fStage;
+ SkPoint fFirstPoint;
+ SkVector fLineNormal;
+ SkScalar fLineC;
+};
+
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = kClose * kClose;
+
+static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
+ switch (data->fStage) {
+ case DegenerateTestData::kInitial:
+ data->fFirstPoint = pt;
+ data->fStage = DegenerateTestData::kPoint;
+ break;
+ case DegenerateTestData::kPoint:
+ if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
+ data->fLineNormal = pt - data->fFirstPoint;
+ data->fLineNormal.normalize();
+ data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
+ data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
+ data->fStage = DegenerateTestData::kLine;
+ }
+ break;
+ case DegenerateTestData::kLine:
+ if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
+ data->fStage = DegenerateTestData::kNonDegenerate;
+ }
+ case DegenerateTestData::kNonDegenerate:
+ break;
+ default:
+ SK_ABORT("Unexpected degenerate test stage.");
+ }
+}
+
+static inline bool get_direction(const SkPath& path, const SkMatrix& m,
+ SkPathPriv::FirstDirection* dir) {
+ // At this point, we've already returned true from canDraw(), which checked that the path's
+ // direction could be determined, so this should just be fetching the cached direction.
+ // However, if perspective is involved, we're operating on a transformed path, which may no
+ // longer have a computable direction.
+ if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
+ return false;
+ }
+
+ // check whether m reverses the orientation
+ SkASSERT(!m.hasPerspective());
+ SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
+ m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
+ if (det2x2 < 0) {
+ *dir = SkPathPriv::OppositeFirstDirection(*dir);
+ }
+
+ return true;
+}
+
+static inline void add_line_to_segment(const SkPoint& pt,
+ SegmentArray* segments) {
+ segments->push_back();
+ segments->back().fType = Segment::kLine;
+ segments->back().fPts[0] = pt;
+}
+
+static inline void add_quad_segment(const SkPoint pts[3],
+ SegmentArray* segments) {
+ if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
+ if (pts[0] != pts[2]) {
+ add_line_to_segment(pts[2], segments);
+ }
+ } else {
+ segments->push_back();
+ segments->back().fType = Segment::kQuad;
+ segments->back().fPts[0] = pts[1];
+ segments->back().fPts[1] = pts[2];
+ }
+}
+
+static inline void add_cubic_segments(const SkPoint pts[4],
+ SkPathPriv::FirstDirection dir,
+ SegmentArray* segments) {
+ SkSTArray<15, SkPoint, true> quads;
+ GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
+ int count = quads.count();
+ for (int q = 0; q < count; q += 3) {
+ add_quad_segment(&quads[q], segments);
+ }
+}
+
+static bool get_segments(const SkPath& path,
+ const SkMatrix& m,
+ SegmentArray* segments,
+ SkPoint* fanPt,
+ int* vCount,
+ int* iCount) {
+ SkPath::Iter iter(path, true);
+ // This renderer over-emphasizes very thin path regions. We use the distance
+ // to the path from the sample to compute coverage. Every pixel intersected
+ // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
+ // notice that the sample may be close to a very thin area of the path and
+ // thus should be very light. This is particularly egregious for degenerate
+ // line paths. We detect paths that are very close to a line (zero area) and
+ // draw nothing.
+ DegenerateTestData degenerateData;
+ SkPathPriv::FirstDirection dir;
+ if (!get_direction(path, m, &dir)) {
+ return false;
+ }
+
+ for (;;) {
+ SkPoint pts[4];
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ m.mapPoints(pts, 1);
+ update_degenerate_test(&degenerateData, pts[0]);
+ break;
+ case SkPath::kLine_Verb: {
+ if (!SkPathPriv::AllPointsEq(pts, 2)) {
+ m.mapPoints(&pts[1], 1);
+ update_degenerate_test(&degenerateData, pts[1]);
+ add_line_to_segment(pts[1], segments);
+ }
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ if (!SkPathPriv::AllPointsEq(pts, 3)) {
+ m.mapPoints(pts, 3);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ add_quad_segment(pts, segments);
+ }
+ break;
+ case SkPath::kConic_Verb: {
+ if (!SkPathPriv::AllPointsEq(pts, 3)) {
+ m.mapPoints(pts, 3);
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
+ update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
+ add_quad_segment(quadPts + 2*i, segments);
+ }
+ }
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ if (!SkPathPriv::AllPointsEq(pts, 4)) {
+ m.mapPoints(pts, 4);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ update_degenerate_test(&degenerateData, pts[3]);
+ add_cubic_segments(pts, dir, segments);
+ }
+ break;
+ }
+ case SkPath::kDone_Verb:
+ if (degenerateData.isDegenerate()) {
+ return false;
+ } else {
+ return compute_vectors(segments, fanPt, dir, vCount, iCount);
+ }
+ default:
+ break;
+ }
+ }
+}
+
+struct Draw {
+ Draw() : fVertexCnt(0), fIndexCnt(0) {}
+ int fVertexCnt;
+ int fIndexCnt;
+};
+
+typedef SkTArray<Draw, true> DrawArray;
+
+static void create_vertices(const SegmentArray& segments,
+ const SkPoint& fanPt,
+ const GrVertexColor& color,
+ DrawArray* draws,
+ GrVertexWriter& verts,
+ uint16_t* idxs,
+ size_t vertexStride) {
+ Draw* draw = &draws->push_back();
+ // alias just to make vert/index assignments easier to read.
+ int* v = &draw->fVertexCnt;
+ int* i = &draw->fIndexCnt;
+ const size_t uvOffset = sizeof(SkPoint) + color.size();
+
+ int count = segments.count();
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = segments[a];
+ int b = (a + 1) % count;
+ const Segment& segb = segments[b];
+
+ // Check whether adding the verts for this segment to the current draw would cause index
+ // values to overflow.
+ int vCount = 4;
+ if (Segment::kLine == segb.fType) {
+ vCount += 5;
+ } else {
+ vCount += 6;
+ }
+ if (draw->fVertexCnt + vCount > (1 << 16)) {
+ idxs += *i;
+ draw = &draws->push_back();
+ v = &draw->fVertexCnt;
+ i = &draw->fIndexCnt;
+ }
+
+ const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
+
+ // FIXME: These tris are inset in the 1 unit arc around the corner
+ SkPoint p0 = sega.endPt();
+ // Position, Color, UV, D0, D1
+ verts.write(p0, color, SkPoint{0, 0}, negOneDists);
+ verts.write(p0 + sega.endNorm(), color, SkPoint{0, -SK_Scalar1}, negOneDists);
+ verts.write(p0 + segb.fMid, color, SkPoint{0, -SK_Scalar1}, negOneDists);
+ verts.write(p0 + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
+
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+ idxs[*i + 3] = *v + 0;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *v += 4;
+ *i += 6;
+
+ if (Segment::kLine == segb.fType) {
+ // we draw the line edge as a degenerate quad (u is 0, v is the
+ // signed distance to the edge)
+ SkPoint v1Pos = sega.endPt();
+ SkPoint v2Pos = segb.fPts[0];
+ SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
+
+ verts.write(fanPt, color, SkPoint{0, dist}, negOneDists);
+ verts.write(v1Pos, color, SkPoint{0, 0}, negOneDists);
+ verts.write(v2Pos, color, SkPoint{0, 0}, negOneDists);
+ verts.write(v1Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
+ verts.write(v2Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *i += 6;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 5;
+ } else {
+ void* quadVertsBegin = verts.fPtr;
+
+ SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
+
+ SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
+ SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
+ GrVertexWriter::Skip<SkPoint> skipUVs;
+
+ verts.write(fanPt,
+ color, skipUVs,
+ -segb.fNorms[0].dot(fanPt) + c0,
+ -segb.fNorms[1].dot(fanPt) + c1);
+
+ verts.write(qpts[0],
+ color, skipUVs,
+ 0.0f,
+ -segb.fNorms[1].dot(qpts[0]) + c1);
+
+ verts.write(qpts[2],
+ color, skipUVs,
+ -segb.fNorms[0].dot(qpts[2]) + c0,
+ 0.0f);
+
+ verts.write(qpts[0] + segb.fNorms[0],
+ color, skipUVs,
+ -SK_ScalarMax/100,
+ -SK_ScalarMax/100);
+
+ verts.write(qpts[2] + segb.fNorms[1],
+ color, skipUVs,
+ -SK_ScalarMax/100,
+ -SK_ScalarMax/100);
+
+ SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
+ midVec.normalize();
+
+ verts.write(qpts[1] + midVec,
+ color, skipUVs,
+ -SK_ScalarMax/100,
+ -SK_ScalarMax/100);
+
+ GrPathUtils::QuadUVMatrix toUV(qpts);
+ toUV.apply(quadVertsBegin, 6, vertexStride, uvOffset);
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ idxs[*i + 6] = *v + 5;
+ idxs[*i + 7] = *v + 3;
+ idxs[*i + 8] = *v + 4;
+
+ *i += 9;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 6;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. Coverage is based on signed
+ * distance with negative being inside, positive outside. The edge is specified in
+ * window space (y-down). If either the third or fourth component of the interpolated
+ * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
+ * attempt to trim to a portion of the infinite quad.
+ * Requires shader derivative instruction support.
+ */
+
+class QuadEdgeEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(const SkMatrix& localMatrix, bool usesLocalCoords,
+ bool wideColor) {
+ return sk_sp<GrGeometryProcessor>(
+ new QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor));
+ }
+
+ ~QuadEdgeEffect() override {}
+
+ const char* name() const override { return "QuadEdge"; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(qe);
+
+ GrGLSLVarying v(kHalf4_GrSLType);
+ varyingHandler->addVarying("QuadEdge", &v);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
+
+ // Setup pass through color
+ varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ qe.fInPosition.asShaderVar(),
+ qe.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppendf("half edgeAlpha;");
+
+ // keep the derivative instructions outside the conditional
+ fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
+ fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
+ // today we know z and w are in device space. We could use derivatives
+ fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
+ v.fsIn());
+ fragBuilder->codeAppendf ("} else {");
+ fragBuilder->codeAppendf("half2 gF = half2(2.0*%s.x*duvdx.x - duvdx.y,"
+ " 2.0*%s.x*duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
+ v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = "
+ "saturate(0.5 - edgeAlpha / length(gF));}");
+
+ fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
+ b->add32(SkToBool(qee.fUsesLocalCoords && qee.fLocalMatrix.hasPerspective()));
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
+ this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
+ : INHERITED(kQuadEdgeEffect_ClassID)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, kHalf4_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+ }
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInQuadEdge;
+
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
+ // Doesn't work without derivative instructions.
+ return d->caps()->shaderCaps()->shaderDerivativeSupport()
+ ? QuadEdgeEffect::Make(GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool(),
+ d->fRandom->nextBool())
+ : nullptr;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPathRenderer::CanDrawPath
+GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // This check requires convexity and known direction, since the direction is used to build
+ // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
+ if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
+ (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
+ !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
+ args.fShape->knownDirection()) {
+ return CanDrawPath::kYes;
+ }
+ return CanDrawPath::kNo;
+}
+
+namespace {
+
+class AAConvexPathOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrUserStencilSettings* stencilSettings) {
+ return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
+ stencilSettings);
+ }
+
+ AAConvexPathOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkPath& path,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) {
+ fPaths.emplace_back(PathData{viewMatrix, path, color});
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsHairline::kNo);
+ }
+
+ const char* name() const override { return "AAConvexPathOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Count: %d\n", fPaths.count());
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, &fPaths.back().fColor, &fWideColor);
+ }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ int instanceCount = fPaths.count();
+
+ SkMatrix invert;
+ if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
+ return;
+ }
+
+ // Setup GrGeometryProcessor
+ sk_sp<GrGeometryProcessor> quadProcessor(
+ QuadEdgeEffect::Make(invert, fHelper.usesLocalCoords(), fWideColor));
+ const size_t kVertexStride = quadProcessor->vertexStride();
+
+ // TODO generate all segments for all paths and use one vertex buffer
+ for (int i = 0; i < instanceCount; i++) {
+ const PathData& args = fPaths[i];
+
+ // We use the fact that SkPath::transform path does subdivision based on
+ // perspective. Otherwise, we apply the view matrix when copying to the
+ // segment representation.
+ const SkMatrix* viewMatrix = &args.fViewMatrix;
+
+ // We avoid initializing the path unless we have to
+ const SkPath* pathPtr = &args.fPath;
+ SkTLazy<SkPath> tmpPath;
+ if (viewMatrix->hasPerspective()) {
+ SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
+ tmpPathPtr->setIsVolatile(true);
+ tmpPathPtr->transform(*viewMatrix);
+ viewMatrix = &SkMatrix::I();
+ pathPtr = tmpPathPtr;
+ }
+
+ int vertexCount;
+ int indexCount;
+ enum {
+ kPreallocSegmentCnt = 512 / sizeof(Segment),
+ kPreallocDrawCnt = 4,
+ };
+ SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
+ SkPoint fanPt;
+
+ if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
+ &indexCount)) {
+ continue;
+ }
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+
+ GrVertexWriter verts{target->makeVertexSpace(kVertexStride, vertexCount,
+ &vertexBuffer, &firstVertex)};
+
+ if (!verts.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex;
+
+ uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ SkSTArray<kPreallocDrawCnt, Draw, true> draws;
+ GrVertexColor color(args.fColor, fWideColor);
+ create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
+
+ GrMesh* meshes = target->allocMeshes(draws.count());
+ for (int j = 0; j < draws.count(); ++j) {
+ const Draw& draw = draws[j];
+ meshes[j].setPrimitiveType(GrPrimitiveType::kTriangles);
+ meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
+ draw.fVertexCnt - 1, GrPrimitiveRestart::kNo);
+ meshes[j].setVertexData(vertexBuffer, firstVertex);
+ firstIndex += draw.fIndexCnt;
+ firstVertex += draw.fVertexCnt;
+ }
+ target->recordDraw(quadProcessor, meshes, draws.count());
+ }
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ AAConvexPathOp* that = t->cast<AAConvexPathOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+ if (fHelper.usesLocalCoords() &&
+ !fPaths[0].fViewMatrix.cheapEqualTo(that->fPaths[0].fViewMatrix)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct PathData {
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkPMColor4f fColor;
+ };
+
+ Helper fHelper;
+ SkSTArray<1, PathData, true> fPaths;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrAAConvexPathRenderer::onDrawPath");
+ SkASSERT(args.fRenderTargetContext->numSamples() <= 1);
+ SkASSERT(!args.fShape->isEmpty());
+
+ SkPath path;
+ args.fShape->asPath(&path);
+
+ std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
+ *args.fViewMatrix,
+ path, args.fUserStencilSettings);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPathConvex(random);
+ const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
+ return AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path, stencilSettings);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.h
new file mode 100644
index 0000000000..b6b01dc725
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAConvexPathRenderer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexPathRenderer_DEFINED
+#define GrAAConvexPathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrAAConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAAConvexPathRenderer();
+
+private:
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.cpp b/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.cpp
new file mode 100644
index 0000000000..9952491b64
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.cpp
@@ -0,0 +1,1108 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkString.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/ops/GrAAConvexTessellator.h"
+
+// Next steps:
+// add an interactive sample app slide
+// add debug check that all points are suitably far apart
+// test more degenerate cases
+
+// The tolerance for fusing vertices and eliminating colinear lines (It is in device space).
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = kClose * kClose;
+
+// tesselation tolerance values, in device space pixels
+static const SkScalar kQuadTolerance = 0.2f;
+static const SkScalar kCubicTolerance = 0.2f;
+static const SkScalar kConicTolerance = 0.25f;
+
+// dot product below which we use a round cap between curve segments
+static const SkScalar kRoundCapThreshold = 0.8f;
+
+// dot product above which we consider two adjacent curves to be part of the "same" curve
+static const SkScalar kCurveConnectionThreshold = 0.8f;
+
+static bool intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& n1,
+ SkScalar* t) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.fX * n1.fY - n0.fY * n1.fX;
+ if (SkScalarNearlyZero(perpDot)) {
+ return false;
+ }
+ *t = (v.fX * n1.fY - v.fY * n1.fX) / perpDot;
+ SkASSERT(SkScalarIsFinite(*t));
+ return true;
+}
+
+// This is a special case version of intersect where we have the vector
+// perpendicular to the second line rather than the vector parallel to it.
+static SkScalar perp_intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& perp) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.dot(perp);
+ return v.dot(perp) / perpDot;
+}
+
+static bool duplicate_pt(const SkPoint& p0, const SkPoint& p1) {
+ SkScalar distSq = SkPointPriv::DistanceToSqd(p0, p1);
+ return distSq < kCloseSqd;
+}
+
+static bool points_are_colinear_and_b_is_middle(const SkPoint& a, const SkPoint& b,
+ const SkPoint& c) {
+ // 'area' is twice the area of the triangle with corners a, b, and c.
+ SkScalar area = a.fX * (b.fY - c.fY) + b.fX * (c.fY - a.fY) + c.fX * (a.fY - b.fY);
+ if (SkScalarAbs(area) >= 2 * kCloseSqd) {
+ return false;
+ }
+ return (a - b).dot(b - c) >= 0;
+}
+
+int GrAAConvexTessellator::addPt(const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage,
+ bool movable,
+ CurveState curve) {
+ SkASSERT(pt.isFinite());
+ this->validate();
+
+ int index = fPts.count();
+ *fPts.push() = pt;
+ *fCoverages.push() = coverage;
+ *fMovable.push() = movable;
+ *fCurveState.push() = curve;
+
+ this->validate();
+ return index;
+}
+
+void GrAAConvexTessellator::popLastPt() {
+ this->validate();
+
+ fPts.pop();
+ fCoverages.pop();
+ fMovable.pop();
+ fCurveState.pop();
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::popFirstPtShuffle() {
+ this->validate();
+
+ fPts.removeShuffle(0);
+ fCoverages.removeShuffle(0);
+ fMovable.removeShuffle(0);
+ fCurveState.removeShuffle(0);
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::updatePt(int index,
+ const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage) {
+ this->validate();
+ SkASSERT(fMovable[index]);
+
+ fPts[index] = pt;
+ fCoverages[index] = coverage;
+}
+
+void GrAAConvexTessellator::addTri(int i0, int i1, int i2) {
+ if (i0 == i1 || i1 == i2 || i2 == i0) {
+ return;
+ }
+
+ *fIndices.push() = i0;
+ *fIndices.push() = i1;
+ *fIndices.push() = i2;
+}
+
+void GrAAConvexTessellator::rewind() {
+ fPts.rewind();
+ fCoverages.rewind();
+ fMovable.rewind();
+ fIndices.rewind();
+ fNorms.rewind();
+ fCurveState.rewind();
+ fInitialRing.rewind();
+ fCandidateVerts.rewind();
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ fRings.rewind(); // TODO: leak in this case!
+#else
+ fRings[0].rewind();
+ fRings[1].rewind();
+#endif
+}
+
+void GrAAConvexTessellator::computeNormals() {
+ auto normalToVector = [this](SkVector v) {
+ SkVector n = SkPointPriv::MakeOrthog(v, fSide);
+ SkAssertResult(n.normalize());
+ SkASSERT(SkScalarNearlyEqual(1.0f, n.length()));
+ return n;
+ };
+
+ // Check the cross product of the final trio
+ fNorms.append(fPts.count());
+ fNorms[0] = fPts[1] - fPts[0];
+ fNorms.top() = fPts[0] - fPts.top();
+ SkScalar cross = SkPoint::CrossProduct(fNorms[0], fNorms.top());
+ fSide = (cross > 0.0f) ? SkPointPriv::kRight_Side : SkPointPriv::kLeft_Side;
+ fNorms[0] = normalToVector(fNorms[0]);
+ for (int cur = 1; cur < fNorms.count() - 1; ++cur) {
+ fNorms[cur] = normalToVector(fPts[cur + 1] - fPts[cur]);
+ }
+ fNorms.top() = normalToVector(fNorms.top());
+}
+
+void GrAAConvexTessellator::computeBisectors() {
+ fBisectors.setCount(fNorms.count());
+
+ int prev = fBisectors.count() - 1;
+ for (int cur = 0; cur < fBisectors.count(); prev = cur, ++cur) {
+ fBisectors[cur] = fNorms[cur] + fNorms[prev];
+ if (!fBisectors[cur].normalize()) {
+ fBisectors[cur] = SkPointPriv::MakeOrthog(fNorms[cur], (SkPointPriv::Side)-fSide) +
+ SkPointPriv::MakeOrthog(fNorms[prev], fSide);
+ SkAssertResult(fBisectors[cur].normalize());
+ } else {
+ fBisectors[cur].negate(); // make the bisector face in
+ }
+ if (fCurveState[prev] == kIndeterminate_CurveState) {
+ if (fCurveState[cur] == kSharp_CurveState) {
+ fCurveState[prev] = kSharp_CurveState;
+ } else {
+ if (SkScalarAbs(fNorms[cur].dot(fNorms[prev])) > kCurveConnectionThreshold) {
+ fCurveState[prev] = kCurve_CurveState;
+ fCurveState[cur] = kCurve_CurveState;
+ } else {
+ fCurveState[prev] = kSharp_CurveState;
+ fCurveState[cur] = kSharp_CurveState;
+ }
+ }
+ }
+
+ SkASSERT(SkScalarNearlyEqual(1.0f, fBisectors[cur].length()));
+ }
+}
+
+// Create as many rings as we need to (up to a predefined limit) to reach the specified target
+// depth. If we are in fill mode, the final ring will automatically be fanned.
+bool GrAAConvexTessellator::createInsetRings(Ring& previousRing, SkScalar initialDepth,
+ SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, Ring** finalRing) {
+ static const int kMaxNumRings = 8;
+
+ if (previousRing.numPts() < 3) {
+ return false;
+ }
+ Ring* currentRing = &previousRing;
+ int i;
+ for (i = 0; i < kMaxNumRings; ++i) {
+ Ring* nextRing = this->getNextRing(currentRing);
+ SkASSERT(nextRing != currentRing);
+
+ bool done = this->createInsetRing(*currentRing, nextRing, initialDepth, initialCoverage,
+ targetDepth, targetCoverage, i == 0);
+ currentRing = nextRing;
+ if (done) {
+ break;
+ }
+ currentRing->init(*this);
+ }
+
+ if (kMaxNumRings == i) {
+ // Bail if we've exceeded the amount of time we want to throw at this.
+ this->terminate(*currentRing);
+ return false;
+ }
+ bool done = currentRing->numPts() >= 3;
+ if (done) {
+ currentRing->init(*this);
+ }
+ *finalRing = currentRing;
+ return done;
+}
+
+// The general idea here is to, conceptually, start with the original polygon and slide
+// the vertices along the bisectors until the first intersection. At that
+// point two of the edges collapse and the process repeats on the new polygon.
+// The polygon state is captured in the Ring class while the GrAAConvexTessellator
+// controls the iteration. The CandidateVerts holds the formative points for the
+// next ring.
+bool GrAAConvexTessellator::tessellate(const SkMatrix& m, const SkPath& path) {
+ if (!this->extractFromPath(m, path)) {
+ return false;
+ }
+
+ SkScalar coverage = 1.0f;
+ SkScalar scaleFactor = 0.0f;
+
+ if (SkStrokeRec::kStrokeAndFill_Style == fStyle) {
+ SkASSERT(m.isSimilarity());
+ scaleFactor = m.getMaxScale(); // x and y scale are the same
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring outerStrokeAndAARing;
+ this->createOuterRing(fInitialRing,
+ effectiveStrokeWidth / 2 + kAntialiasingRadius, 0.0,
+ &outerStrokeAndAARing);
+
+ // discard all the triangles added between the originating ring and the new outer ring
+ fIndices.rewind();
+
+ outerStrokeAndAARing.init(*this);
+
+ outerStrokeAndAARing.makeOriginalRing();
+
+ // Add the outer stroke ring's normals to the originating ring's normals
+ // so it can also act as an originating ring
+ fNorms.setCount(fNorms.count() + outerStrokeAndAARing.numPts());
+ for (int i = 0; i < outerStrokeAndAARing.numPts(); ++i) {
+ SkASSERT(outerStrokeAndAARing.index(i) < fNorms.count());
+ fNorms[outerStrokeAndAARing.index(i)] = outerStrokeAndAARing.norm(i);
+ }
+
+ // the bisectors are only needed for the computation of the outer ring
+ fBisectors.rewind();
+
+ Ring* insetAARing;
+ this->createInsetRings(outerStrokeAndAARing,
+ 0.0f, 0.0f, 2*kAntialiasingRadius, 1.0f,
+ &insetAARing);
+
+ SkDEBUGCODE(this->validate();)
+ return true;
+ }
+
+ if (SkStrokeRec::kStroke_Style == fStyle) {
+ SkASSERT(fStrokeWidth >= 0.0f);
+ SkASSERT(m.isSimilarity());
+ scaleFactor = m.getMaxScale(); // x and y scale are the same
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring outerStrokeRing;
+ this->createOuterRing(fInitialRing, effectiveStrokeWidth / 2 - kAntialiasingRadius,
+ coverage, &outerStrokeRing);
+ outerStrokeRing.init(*this);
+ Ring outerAARing;
+ this->createOuterRing(outerStrokeRing, kAntialiasingRadius * 2, 0.0f, &outerAARing);
+ } else {
+ Ring outerAARing;
+ this->createOuterRing(fInitialRing, kAntialiasingRadius, 0.0f, &outerAARing);
+ }
+
+ // the bisectors are only needed for the computation of the outer ring
+ fBisectors.rewind();
+ if (SkStrokeRec::kStroke_Style == fStyle && fInitialRing.numPts() > 2) {
+ SkASSERT(fStrokeWidth >= 0.0f);
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring* insetStrokeRing;
+ SkScalar strokeDepth = effectiveStrokeWidth / 2 - kAntialiasingRadius;
+ if (this->createInsetRings(fInitialRing, 0.0f, coverage, strokeDepth, coverage,
+ &insetStrokeRing)) {
+ Ring* insetAARing;
+ this->createInsetRings(*insetStrokeRing, strokeDepth, coverage, strokeDepth +
+ kAntialiasingRadius * 2, 0.0f, &insetAARing);
+ }
+ } else {
+ Ring* insetAARing;
+ this->createInsetRings(fInitialRing, 0.0f, 0.5f, kAntialiasingRadius, 1.0f, &insetAARing);
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+SkScalar GrAAConvexTessellator::computeDepthFromEdge(int edgeIdx, const SkPoint& p) const {
+ SkASSERT(edgeIdx < fNorms.count());
+
+ SkPoint v = p - fPts[edgeIdx];
+ SkScalar depth = -fNorms[edgeIdx].dot(v);
+ return depth;
+}
+
+// Find a point that is 'desiredDepth' away from the 'edgeIdx'-th edge and lies
+// along the 'bisector' from the 'startIdx'-th point.
+bool GrAAConvexTessellator::computePtAlongBisector(int startIdx,
+ const SkVector& bisector,
+ int edgeIdx,
+ SkScalar desiredDepth,
+ SkPoint* result) const {
+ const SkPoint& norm = fNorms[edgeIdx];
+
+ // First find the point where the edge and the bisector intersect
+ SkPoint newP;
+
+ SkScalar t = perp_intersect(fPts[startIdx], bisector, fPts[edgeIdx], norm);
+ if (SkScalarNearlyEqual(t, 0.0f)) {
+ // the start point was one of the original ring points
+ SkASSERT(startIdx < fPts.count());
+ newP = fPts[startIdx];
+ } else if (t < 0.0f) {
+ newP = bisector;
+ newP.scale(t);
+ newP += fPts[startIdx];
+ } else {
+ return false;
+ }
+
+ // Then offset along the bisector from that point the correct distance
+ SkScalar dot = bisector.dot(norm);
+ t = -desiredDepth / dot;
+ *result = bisector;
+ result->scale(t);
+ *result += newP;
+
+ return true;
+}
+
+bool GrAAConvexTessellator::extractFromPath(const SkMatrix& m, const SkPath& path) {
+ SkASSERT(SkPath::kConvex_Convexity == path.getConvexity());
+
+ SkRect bounds = path.getBounds();
+ m.mapRect(&bounds);
+ if (!bounds.isFinite()) {
+ // We could do something smarter here like clip the path based on the bounds of the dst.
+ // We'd have to be careful about strokes to ensure we don't draw something wrong.
+ return false;
+ }
+
+ // Outer ring: 3*numPts
+ // Middle ring: numPts
+ // Presumptive inner ring: numPts
+ this->reservePts(5*path.countPoints());
+ // Outer ring: 12*numPts
+ // Middle ring: 0
+ // Presumptive inner ring: 6*numPts + 6
+ fIndices.setReserve(18*path.countPoints() + 6);
+
+ // TODO: is there a faster way to extract the points from the path? Perhaps
+ // get all the points via a new entry point, transform them all in bulk
+ // and then walk them to find duplicates?
+ SkPathEdgeIter iter(path);
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine:
+ if (!SkPathPriv::AllPointsEq(e.fPts, 2)) {
+ this->lineTo(m, e.fPts[1], kSharp_CurveState);
+ }
+ break;
+ case SkPathEdgeIter::Edge::kQuad:
+ if (!SkPathPriv::AllPointsEq(e.fPts, 3)) {
+ this->quadTo(m, e.fPts);
+ }
+ break;
+ case SkPathEdgeIter::Edge::kCubic:
+ if (!SkPathPriv::AllPointsEq(e.fPts, 4)) {
+ this->cubicTo(m, e.fPts);
+ }
+ break;
+ case SkPathEdgeIter::Edge::kConic:
+ if (!SkPathPriv::AllPointsEq(e.fPts, 3)) {
+ this->conicTo(m, e.fPts, iter.conicWeight());
+ }
+ break;
+ }
+ }
+
+ if (this->numPts() < 2) {
+ return false;
+ }
+
+ // check if last point is a duplicate of the first point. If so, remove it.
+ if (duplicate_pt(fPts[this->numPts()-1], fPts[0])) {
+ this->popLastPt();
+ }
+
+ // Remove any lingering colinear points where the path wraps around
+ bool noRemovalsToDo = false;
+ while (!noRemovalsToDo && this->numPts() >= 3) {
+ if (points_are_colinear_and_b_is_middle(fPts[fPts.count() - 2], fPts.top(), fPts[0])) {
+ this->popLastPt();
+ } else if (points_are_colinear_and_b_is_middle(fPts.top(), fPts[0], fPts[1])) {
+ this->popFirstPtShuffle();
+ } else {
+ noRemovalsToDo = true;
+ }
+ }
+
+ // Compute the normals and bisectors.
+ SkASSERT(fNorms.empty());
+ if (this->numPts() >= 3) {
+ this->computeNormals();
+ this->computeBisectors();
+ } else if (this->numPts() == 2) {
+ // We've got two points, so we're degenerate.
+ if (fStyle == SkStrokeRec::kFill_Style) {
+ // it's a fill, so we don't need to worry about degenerate paths
+ return false;
+ }
+ // For stroking, we still need to process the degenerate path, so fix it up
+ fSide = SkPointPriv::kLeft_Side;
+
+ fNorms.append(2);
+ fNorms[0] = SkPointPriv::MakeOrthog(fPts[1] - fPts[0], fSide);
+ fNorms[0].normalize();
+ fNorms[1] = -fNorms[0];
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[0].length()));
+ // we won't actually use the bisectors, so just push zeroes
+ fBisectors.push_back(SkPoint::Make(0.0, 0.0));
+ fBisectors.push_back(SkPoint::Make(0.0, 0.0));
+ } else {
+ return false;
+ }
+
+ fCandidateVerts.setReserve(this->numPts());
+ fInitialRing.setReserve(this->numPts());
+ for (int i = 0; i < this->numPts(); ++i) {
+ fInitialRing.addIdx(i, i);
+ }
+ fInitialRing.init(fNorms, fBisectors);
+
+ this->validate();
+ return true;
+}
+
+GrAAConvexTessellator::Ring* GrAAConvexTessellator::getNextRing(Ring* lastRing) {
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ Ring* ring = *fRings.push() = new Ring;
+ ring->setReserve(fInitialRing.numPts());
+ ring->rewind();
+ return ring;
+#else
+ // Flip flop back and forth between fRings[0] & fRings[1]
+ int nextRing = (lastRing == &fRings[0]) ? 1 : 0;
+ fRings[nextRing].setReserve(fInitialRing.numPts());
+ fRings[nextRing].rewind();
+ return &fRings[nextRing];
+#endif
+}
+
+void GrAAConvexTessellator::fanRing(const Ring& ring) {
+ // fan out from point 0
+ int startIdx = ring.index(0);
+ for (int cur = ring.numPts() - 2; cur >= 0; --cur) {
+ this->addTri(startIdx, ring.index(cur), ring.index(cur + 1));
+ }
+}
+
+void GrAAConvexTessellator::createOuterRing(const Ring& previousRing, SkScalar outset,
+ SkScalar coverage, Ring* nextRing) {
+ const int numPts = previousRing.numPts();
+ if (numPts == 0) {
+ return;
+ }
+
+ int prev = numPts - 1;
+ int lastPerpIdx = -1, firstPerpIdx = -1;
+
+ const SkScalar outsetSq = outset * outset;
+ SkScalar miterLimitSq = outset * fMiterLimit;
+ miterLimitSq = miterLimitSq * miterLimitSq;
+ for (int cur = 0; cur < numPts; ++cur) {
+ int originalIdx = previousRing.index(cur);
+ // For each vertex of the original polygon we add at least two points to the
+ // outset polygon - one extending perpendicular to each impinging edge. Connecting these
+ // two points yields a bevel join. We need one additional point for a mitered join, and
+ // a round join requires one or more points depending upon curvature.
+
+ // The perpendicular point for the last edge
+ SkPoint normal1 = previousRing.norm(prev);
+ SkPoint perp1 = normal1;
+ perp1.scale(outset);
+ perp1 += this->point(originalIdx);
+
+ // The perpendicular point for the next edge.
+ SkPoint normal2 = previousRing.norm(cur);
+ SkPoint perp2 = normal2;
+ perp2.scale(outset);
+ perp2 += fPts[originalIdx];
+
+ CurveState curve = fCurveState[originalIdx];
+
+ // We know it isn't a duplicate of the prior point (since it and this
+ // one are just perpendicular offsets from the non-merged polygon points)
+ int perp1Idx = this->addPt(perp1, -outset, coverage, false, curve);
+ nextRing->addIdx(perp1Idx, originalIdx);
+
+ int perp2Idx;
+ // For very shallow angles all the corner points could fuse.
+ if (duplicate_pt(perp2, this->point(perp1Idx))) {
+ perp2Idx = perp1Idx;
+ } else {
+ perp2Idx = this->addPt(perp2, -outset, coverage, false, curve);
+ }
+
+ if (perp2Idx != perp1Idx) {
+ if (curve == kCurve_CurveState) {
+ // bevel or round depending upon curvature
+ SkScalar dotProd = normal1.dot(normal2);
+ if (dotProd < kRoundCapThreshold) {
+ // Currently we "round" by creating a single extra point, which produces
+ // good results for common cases. For thick strokes with high curvature, we will
+ // need to add more points; for the time being we simply fall back to software
+ // rendering for thick strokes.
+ SkPoint miter = previousRing.bisector(cur);
+ miter.setLength(-outset);
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false, kSharp_CurveState);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ }
+ } else {
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ }
+ } else {
+ switch (fJoin) {
+ case SkPaint::Join::kMiter_Join: {
+ // The bisector outset point
+ SkPoint miter = previousRing.bisector(cur);
+ SkScalar dotProd = normal1.dot(normal2);
+ // The max is because this could go slightly negative if precision causes
+ // us to become slightly concave.
+ SkScalar sinHalfAngleSq = SkTMax(SkScalarHalf(SK_Scalar1 + dotProd), 0.f);
+ SkScalar lengthSq = sk_ieee_float_divide(outsetSq, sinHalfAngleSq);
+ if (lengthSq > miterLimitSq) {
+ // just bevel it
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ }
+ miter.setLength(-SkScalarSqrt(lengthSq));
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false,
+ kSharp_CurveState);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ } else {
+ // ignore the miter point as it's so close to perp1/perp2 and simply
+ // bevel.
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ }
+ break;
+ }
+ case SkPaint::Join::kBevel_Join:
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ default:
+ // kRound_Join is unsupported for now. GrAALinearizingConvexPathRenderer is
+ // only willing to draw mitered or beveled, so we should never get here.
+ SkASSERT(false);
+ }
+ }
+
+ nextRing->addIdx(perp2Idx, originalIdx);
+ }
+
+ if (0 == cur) {
+ // Store the index of the first perpendicular point to finish up
+ firstPerpIdx = perp1Idx;
+ SkASSERT(-1 == lastPerpIdx);
+ } else {
+ // The triangles for the previous edge
+ int prevIdx = previousRing.index(prev);
+ this->addTri(prevIdx, perp1Idx, originalIdx);
+ this->addTri(prevIdx, lastPerpIdx, perp1Idx);
+ }
+
+ // Track the last perpendicular outset point so we can construct the
+ // trailing edge triangles.
+ lastPerpIdx = perp2Idx;
+ prev = cur;
+ }
+
+ // pick up the final edge rect
+ int lastIdx = previousRing.index(numPts - 1);
+ this->addTri(lastIdx, firstPerpIdx, previousRing.index(0));
+ this->addTri(lastIdx, lastPerpIdx, firstPerpIdx);
+
+ this->validate();
+}
+
+// Something went wrong in the creation of the next ring. If we're filling the shape, just go ahead
+// and fan it.
+void GrAAConvexTessellator::terminate(const Ring& ring) {
+ if (fStyle != SkStrokeRec::kStroke_Style && ring.numPts() > 0) {
+ this->fanRing(ring);
+ }
+}
+
+static SkScalar compute_coverage(SkScalar depth, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage) {
+ if (SkScalarNearlyEqual(initialDepth, targetDepth)) {
+ return targetCoverage;
+ }
+ SkScalar result = (depth - initialDepth) / (targetDepth - initialDepth) *
+ (targetCoverage - initialCoverage) + initialCoverage;
+ return SkScalarClampMax(result, 1.0f);
+}
+
+// return true when processing is complete
+bool GrAAConvexTessellator::createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage,
+ bool forceNew) {
+ bool done = false;
+
+ fCandidateVerts.rewind();
+
+ // Loop through all the points in the ring and find the intersection with the smallest depth
+ SkScalar minDist = SK_ScalarMax, minT = 0.0f;
+ int minEdgeIdx = -1;
+
+ for (int cur = 0; cur < lastRing.numPts(); ++cur) {
+ int next = (cur + 1) % lastRing.numPts();
+
+ SkScalar t;
+ bool result = intersect(this->point(lastRing.index(cur)), lastRing.bisector(cur),
+ this->point(lastRing.index(next)), lastRing.bisector(next),
+ &t);
+ // The bisectors may be parallel (!result) or the previous ring may have become slightly
+ // concave due to accumulated error (t <= 0).
+ if (!result || t <= 0) {
+ continue;
+ }
+ SkScalar dist = -t * lastRing.norm(cur).dot(lastRing.bisector(cur));
+
+ if (minDist > dist) {
+ minDist = dist;
+ minT = t;
+ minEdgeIdx = cur;
+ }
+ }
+
+ if (minEdgeIdx == -1) {
+ return false;
+ }
+ SkPoint newPt = lastRing.bisector(minEdgeIdx);
+ newPt.scale(minT);
+ newPt += this->point(lastRing.index(minEdgeIdx));
+
+ SkScalar depth = this->computeDepthFromEdge(lastRing.origEdgeID(minEdgeIdx), newPt);
+ if (depth >= targetDepth) {
+ // None of the bisectors intersect before reaching the desired depth.
+ // Just step them all to the desired depth
+ depth = targetDepth;
+ done = true;
+ }
+
+ // 'dst' stores where each point in the last ring maps to/transforms into
+ // in the next ring.
+ SkTDArray<int> dst;
+ dst.setCount(lastRing.numPts());
+
+ // Create the first point (who compares with no one)
+ if (!this->computePtAlongBisector(lastRing.index(0),
+ lastRing.bisector(0),
+ lastRing.origEdgeID(0),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ dst[0] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(0), lastRing.origEdgeID(0),
+ !this->movable(lastRing.index(0)));
+
+ // Handle the middle points (who only compare with the prior point)
+ for (int cur = 1; cur < lastRing.numPts()-1; ++cur) {
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ if (!duplicate_pt(newPt, fCandidateVerts.lastPoint())) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ }
+ }
+
+ // Check on the last point (handling the wrap around)
+ int cur = lastRing.numPts()-1;
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ bool dupPrev = duplicate_pt(newPt, fCandidateVerts.lastPoint());
+ bool dupNext = duplicate_pt(newPt, fCandidateVerts.firstPoint());
+
+ if (!dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else if (dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else if (!dupPrev && dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithNext();
+ } else {
+ bool dupPrevVsNext = duplicate_pt(fCandidateVerts.firstPoint(), fCandidateVerts.lastPoint());
+
+ if (!dupPrevVsNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else {
+ const int fused = fCandidateVerts.fuseWithBoth();
+ dst[cur] = fused;
+ const int targetIdx = dst[cur - 1];
+ for (int i = cur - 1; i >= 0 && dst[i] == targetIdx; i--) {
+ dst[i] = fused;
+ }
+ }
+ }
+
+ // Fold the new ring's points into the global pool
+ for (int i = 0; i < fCandidateVerts.numPts(); ++i) {
+ int newIdx;
+ if (fCandidateVerts.needsToBeNew(i) || forceNew) {
+ // if the originating index is still valid then this point wasn't
+ // fused (and is thus movable)
+ SkScalar coverage = compute_coverage(depth, initialDepth, initialCoverage,
+ targetDepth, targetCoverage);
+ newIdx = this->addPt(fCandidateVerts.point(i), depth, coverage,
+ fCandidateVerts.originatingIdx(i) != -1, kSharp_CurveState);
+ } else {
+ SkASSERT(fCandidateVerts.originatingIdx(i) != -1);
+ this->updatePt(fCandidateVerts.originatingIdx(i), fCandidateVerts.point(i), depth,
+ targetCoverage);
+ newIdx = fCandidateVerts.originatingIdx(i);
+ }
+
+ nextRing->addIdx(newIdx, fCandidateVerts.origEdge(i));
+ }
+
+ // 'dst' currently has indices into the ring. Remap these to be indices
+ // into the global pool since the triangulation operates in that space.
+ for (int i = 0; i < dst.count(); ++i) {
+ dst[i] = nextRing->index(dst[i]);
+ }
+
+ for (int i = 0; i < lastRing.numPts(); ++i) {
+ int next = (i + 1) % lastRing.numPts();
+
+ this->addTri(lastRing.index(i), lastRing.index(next), dst[next]);
+ this->addTri(lastRing.index(i), dst[next], dst[i]);
+ }
+
+ if (done && fStyle != SkStrokeRec::kStroke_Style) {
+ // fill or stroke-and-fill
+ this->fanRing(*nextRing);
+ }
+
+ if (nextRing->numPts() < 3) {
+ done = true;
+ }
+ return done;
+}
+
+void GrAAConvexTessellator::validate() const {
+ SkASSERT(fPts.count() == fMovable.count());
+ SkASSERT(fPts.count() == fCoverages.count());
+ SkASSERT(fPts.count() == fCurveState.count());
+ SkASSERT(0 == (fIndices.count() % 3));
+ SkASSERT(!fBisectors.count() || fBisectors.count() == fNorms.count());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+void GrAAConvexTessellator::Ring::init(const GrAAConvexTessellator& tess) {
+ this->computeNormals(tess);
+ this->computeBisectors(tess);
+}
+
+void GrAAConvexTessellator::Ring::init(const SkTDArray<SkVector>& norms,
+ const SkTDArray<SkVector>& bisectors) {
+ for (int i = 0; i < fPts.count(); ++i) {
+ fPts[i].fNorm = norms[i];
+ fPts[i].fBisector = bisectors[i];
+ }
+}
+
+// Compute the outward facing normal at each vertex.
+void GrAAConvexTessellator::Ring::computeNormals(const GrAAConvexTessellator& tess) {
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ fPts[cur].fNorm = tess.point(fPts[next].fIndex) - tess.point(fPts[cur].fIndex);
+ SkPoint::Normalize(&fPts[cur].fNorm);
+ fPts[cur].fNorm = SkPointPriv::MakeOrthog(fPts[cur].fNorm, tess.side());
+ }
+}
+
+void GrAAConvexTessellator::Ring::computeBisectors(const GrAAConvexTessellator& tess) {
+ int prev = fPts.count() - 1;
+ for (int cur = 0; cur < fPts.count(); prev = cur, ++cur) {
+ fPts[cur].fBisector = fPts[cur].fNorm + fPts[prev].fNorm;
+ if (!fPts[cur].fBisector.normalize()) {
+ fPts[cur].fBisector =
+ SkPointPriv::MakeOrthog(fPts[cur].fNorm, (SkPointPriv::Side)-tess.side()) +
+ SkPointPriv::MakeOrthog(fPts[prev].fNorm, tess.side());
+ SkAssertResult(fPts[cur].fBisector.normalize());
+ } else {
+ fPts[cur].fBisector.negate(); // make the bisector face in
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DEBUG
+// Is this ring convex?
+bool GrAAConvexTessellator::Ring::isConvex(const GrAAConvexTessellator& tess) const {
+ if (fPts.count() < 3) {
+ return true;
+ }
+
+ SkPoint prev = tess.point(fPts[0].fIndex) - tess.point(fPts.top().fIndex);
+ SkPoint cur = tess.point(fPts[1].fIndex) - tess.point(fPts[0].fIndex);
+ SkScalar minDot = prev.fX * cur.fY - prev.fY * cur.fX;
+ SkScalar maxDot = minDot;
+
+ prev = cur;
+ for (int i = 1; i < fPts.count(); ++i) {
+ int next = (i + 1) % fPts.count();
+
+ cur = tess.point(fPts[next].fIndex) - tess.point(fPts[i].fIndex);
+ SkScalar dot = prev.fX * cur.fY - prev.fY * cur.fX;
+
+ minDot = SkMinScalar(minDot, dot);
+ maxDot = SkMaxScalar(maxDot, dot);
+
+ prev = cur;
+ }
+
+ if (SkScalarNearlyEqual(maxDot, 0.0f, 0.005f)) {
+ maxDot = 0;
+ }
+ if (SkScalarNearlyEqual(minDot, 0.0f, 0.005f)) {
+ minDot = 0;
+ }
+ return (maxDot >= 0.0f) == (minDot >= 0.0f);
+}
+
+#endif
+
+void GrAAConvexTessellator::lineTo(const SkPoint& p, CurveState curve) {
+ if (this->numPts() > 0 && duplicate_pt(p, this->lastPoint())) {
+ return;
+ }
+
+ if (this->numPts() >= 2 &&
+ points_are_colinear_and_b_is_middle(fPts[fPts.count() - 2], fPts.top(), p)) {
+ // The old last point is on the line from the second to last to the new point
+ this->popLastPt();
+ // double-check that the new last point is not a duplicate of the new point. In an ideal
+ // world this wouldn't be necessary (since it's only possible for non-convex paths), but
+ // floating point precision issues mean it can actually happen on paths that were
+ // determined to be convex.
+ if (duplicate_pt(p, this->lastPoint())) {
+ return;
+ }
+ }
+ SkScalar initialRingCoverage = (SkStrokeRec::kFill_Style == fStyle) ? 0.5f : 1.0f;
+ this->addPt(p, 0.0f, initialRingCoverage, false, curve);
+}
+
+void GrAAConvexTessellator::lineTo(const SkMatrix& m, const SkPoint& p, CurveState curve) {
+ this->lineTo(m.mapXY(p.fX, p.fY), curve);
+}
+
+void GrAAConvexTessellator::quadTo(const SkPoint pts[3]) {
+ int maxCount = GrPathUtils::quadraticPointCount(pts, kQuadTolerance);
+ fPointBuffer.setCount(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ kQuadTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count - 1; i++) {
+ this->lineTo(fPointBuffer[i], kCurve_CurveState);
+ }
+ this->lineTo(fPointBuffer[count - 1], kIndeterminate_CurveState);
+}
+
+void GrAAConvexTessellator::quadTo(const SkMatrix& m, const SkPoint srcPts[3]) {
+ SkPoint pts[3];
+ m.mapPoints(pts, srcPts, 3);
+ this->quadTo(pts);
+}
+
+void GrAAConvexTessellator::cubicTo(const SkMatrix& m, const SkPoint srcPts[4]) {
+ SkPoint pts[4];
+ m.mapPoints(pts, srcPts, 4);
+ int maxCount = GrPathUtils::cubicPointCount(pts, kCubicTolerance);
+ fPointBuffer.setCount(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ kCubicTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count - 1; i++) {
+ this->lineTo(fPointBuffer[i], kCurve_CurveState);
+ }
+ this->lineTo(fPointBuffer[count - 1], kIndeterminate_CurveState);
+}
+
+// include down here to avoid compilation errors caused by "-" overload in SkGeometry.h
+#include "src/core/SkGeometry.h"
+
+void GrAAConvexTessellator::conicTo(const SkMatrix& m, const SkPoint srcPts[3], SkScalar w) {
+ SkPoint pts[3];
+ m.mapPoints(pts, srcPts, 3);
+ SkAutoConicToQuads quadder;
+ const SkPoint* quads = quadder.computeQuads(pts, w, kConicTolerance);
+ SkPoint lastPoint = *(quads++);
+ int count = quadder.countQuads();
+ for (int i = 0; i < count; ++i) {
+ SkPoint quadPts[3];
+ quadPts[0] = lastPoint;
+ quadPts[1] = quads[0];
+ quadPts[2] = i == count - 1 ? pts[2] : quads[1];
+ this->quadTo(quadPts);
+ lastPoint = quadPts[2];
+ quads += 2;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+static const SkScalar kPointRadius = 0.02f;
+static const SkScalar kArrowStrokeWidth = 0.0f;
+static const SkScalar kArrowLength = 0.2f;
+static const SkScalar kEdgeTextSize = 0.1f;
+static const SkScalar kPointTextSize = 0.02f;
+
+static void draw_point(SkCanvas* canvas, const SkPoint& p, SkScalar paramValue, bool stroke) {
+ SkPaint paint;
+ SkASSERT(paramValue <= 1.0f);
+ int gs = int(255*paramValue);
+ paint.setARGB(255, gs, gs, gs);
+
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, paint);
+
+ if (stroke) {
+ SkPaint stroke;
+ stroke.setColor(SK_ColorYELLOW);
+ stroke.setStyle(SkPaint::kStroke_Style);
+ stroke.setStrokeWidth(kPointRadius/3.0f);
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, stroke);
+ }
+}
+
+static void draw_line(SkCanvas* canvas, const SkPoint& p0, const SkPoint& p1, SkColor color) {
+ SkPaint p;
+ p.setColor(color);
+
+ canvas->drawLine(p0.fX, p0.fY, p1.fX, p1.fY, p);
+}
+
+static void draw_arrow(SkCanvas*canvas, const SkPoint& p, const SkPoint &n,
+ SkScalar len, SkColor color) {
+ SkPaint paint;
+ paint.setColor(color);
+ paint.setStrokeWidth(kArrowStrokeWidth);
+ paint.setStyle(SkPaint::kStroke_Style);
+
+ canvas->drawLine(p.fX, p.fY,
+ p.fX + len * n.fX, p.fY + len * n.fY,
+ paint);
+}
+
+void GrAAConvexTessellator::Ring::draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const {
+ SkPaint paint;
+ paint.setTextSize(kEdgeTextSize);
+
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ draw_line(canvas,
+ tess.point(fPts[cur].fIndex),
+ tess.point(fPts[next].fIndex),
+ SK_ColorGREEN);
+
+ SkPoint mid = tess.point(fPts[cur].fIndex) + tess.point(fPts[next].fIndex);
+ mid.scale(0.5f);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, mid, fPts[cur].fNorm, kArrowLength, SK_ColorRED);
+ mid.fX += (kArrowLength/2) * fPts[cur].fNorm.fX;
+ mid.fY += (kArrowLength/2) * fPts[cur].fNorm.fY;
+ }
+
+ SkString num;
+ num.printf("%d", this->origEdgeID(cur));
+ canvas->drawString(num, mid.fX, mid.fY, paint);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, tess.point(fPts[cur].fIndex), fPts[cur].fBisector,
+ kArrowLength, SK_ColorBLUE);
+ }
+ }
+}
+
+void GrAAConvexTessellator::draw(SkCanvas* canvas) const {
+ for (int i = 0; i < fIndices.count(); i += 3) {
+ SkASSERT(fIndices[i] < this->numPts()) ;
+ SkASSERT(fIndices[i+1] < this->numPts()) ;
+ SkASSERT(fIndices[i+2] < this->numPts()) ;
+
+ draw_line(canvas,
+ this->point(this->fIndices[i]), this->point(this->fIndices[i+1]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+1]), this->point(this->fIndices[i+2]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+2]), this->point(this->fIndices[i]),
+ SK_ColorBLACK);
+ }
+
+ fInitialRing.draw(canvas, *this);
+ for (int i = 0; i < fRings.count(); ++i) {
+ fRings[i]->draw(canvas, *this);
+ }
+
+ for (int i = 0; i < this->numPts(); ++i) {
+ draw_point(canvas,
+ this->point(i), 0.5f + (this->depth(i)/(2 * kAntialiasingRadius)),
+ !this->movable(i));
+
+ SkPaint paint;
+ paint.setTextSize(kPointTextSize);
+ if (this->depth(i) <= -kAntialiasingRadius) {
+ paint.setColor(SK_ColorWHITE);
+ }
+
+ SkString num;
+ num.printf("%d", i);
+ canvas->drawString(num,
+ this->point(i).fX, this->point(i).fY+(kPointRadius/2.0f),
+ paint);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.h b/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.h
new file mode 100644
index 0000000000..b4edd0c8a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAConvexTessellator.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexTessellator_DEFINED
+#define GrAAConvexTessellator_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkPointPriv.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPath;
+
+//#define GR_AA_CONVEX_TESSELLATOR_VIZ 1
+
+// device space distance which we inset / outset points in order to create the soft antialiased edge
+static const SkScalar kAntialiasingRadius = 0.5f;
+
+class GrAAConvexTessellator;
+
+// The AAConvexTessellator holds the global pool of points and the triangulation
+// that connects them. It also drives the tessellation process.
+// The outward facing normals of the original polygon are stored (in 'fNorms') to service
+// computeDepthFromEdge requests.
+class GrAAConvexTessellator {
+public:
+ GrAAConvexTessellator(SkStrokeRec::Style style = SkStrokeRec::kFill_Style,
+ SkScalar strokeWidth = -1.0f,
+ SkPaint::Join join = SkPaint::Join::kBevel_Join,
+ SkScalar miterLimit = 0.0f)
+ : fSide(SkPointPriv::kOn_Side)
+ , fStrokeWidth(strokeWidth)
+ , fStyle(style)
+ , fJoin(join)
+ , fMiterLimit(miterLimit) {
+ }
+
+ SkPointPriv::Side side() const { return fSide; }
+
+ bool tessellate(const SkMatrix& m, const SkPath& path);
+
+ // The next five should only be called after tessellate to extract the result
+ int numPts() const { return fPts.count(); }
+ int numIndices() const { return fIndices.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top(); }
+ const SkPoint& point(int index) const { return fPts[index]; }
+ int index(int index) const { return fIndices[index]; }
+ SkScalar coverage(int index) const { return fCoverages[index]; }
+
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas) const;
+#endif
+
+ // The tessellator can be reused for multiple paths by rewinding in between
+ void rewind();
+
+private:
+ // CandidateVerts holds the vertices for the next ring while they are
+ // being generated. Its main function is to de-dup the points.
+ class CandidateVerts {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top().fPt; }
+ const SkPoint& firstPoint() const { return fPts[0].fPt; }
+ const SkPoint& point(int index) const { return fPts[index].fPt; }
+
+ int originatingIdx(int index) const { return fPts[index].fOriginatingIdx; }
+ int origEdge(int index) const { return fPts[index].fOrigEdgeId; }
+ bool needsToBeNew(int index) const { return fPts[index].fNeedsToBeNew; }
+
+ int addNewPt(const SkPoint& newPt, int originatingIdx, int origEdge, bool needsToBeNew) {
+ struct PointData* pt = fPts.push();
+ pt->fPt = newPt;
+ pt->fOrigEdgeId = origEdge;
+ pt->fOriginatingIdx = originatingIdx;
+ pt->fNeedsToBeNew = needsToBeNew;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithPrior(int origEdgeId) {
+ fPts.top().fOrigEdgeId = origEdgeId;
+ fPts.top().fOriginatingIdx = -1;
+ fPts.top().fNeedsToBeNew = true;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithNext() {
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ int fuseWithBoth() {
+ if (fPts.count() > 1) {
+ fPts.pop();
+ }
+
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ private:
+ struct PointData {
+ SkPoint fPt;
+ int fOriginatingIdx;
+ int fOrigEdgeId;
+ bool fNeedsToBeNew;
+ };
+
+ SkTDArray<struct PointData> fPts;
+ };
+
+ // The Ring holds a set of indices into the global pool that together define
+ // a single polygon inset.
+ class Ring {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ void addIdx(int index, int origEdgeId) {
+ struct PointData* pt = fPts.push();
+ pt->fIndex = index;
+ pt->fOrigEdgeId = origEdgeId;
+ }
+
+ // Upgrade this ring so that it can behave like an originating ring
+ void makeOriginalRing() {
+ for (int i = 0; i < fPts.count(); ++i) {
+ fPts[i].fOrigEdgeId = fPts[i].fIndex;
+ }
+ }
+
+ // init should be called after all the indices have been added (via addIdx)
+ void init(const GrAAConvexTessellator& tess);
+ void init(const SkTDArray<SkVector>& norms, const SkTDArray<SkVector>& bisectors);
+
+ const SkPoint& norm(int index) const { return fPts[index].fNorm; }
+ const SkPoint& bisector(int index) const { return fPts[index].fBisector; }
+ int index(int index) const { return fPts[index].fIndex; }
+ int origEdgeID(int index) const { return fPts[index].fOrigEdgeId; }
+ void setOrigEdgeId(int index, int id) { fPts[index].fOrigEdgeId = id; }
+
+ #if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const;
+ #endif
+
+ private:
+ void computeNormals(const GrAAConvexTessellator& result);
+ void computeBisectors(const GrAAConvexTessellator& tess);
+
+ SkDEBUGCODE(bool isConvex(const GrAAConvexTessellator& tess) const;)
+
+ struct PointData {
+ SkPoint fNorm;
+ SkPoint fBisector;
+ int fIndex;
+ int fOrigEdgeId;
+ };
+
+ SkTDArray<PointData> fPts;
+ };
+
+ // Represents whether a given point is within a curve. A point is inside a curve only if it is
+ // an interior point within a quad, cubic, or conic, or if it is the endpoint of a quad, cubic,
+ // or conic with another curve meeting it at (more or less) the same angle.
+ enum CurveState {
+ // point is a sharp vertex
+ kSharp_CurveState,
+ // endpoint of a curve with the other side's curvature not yet determined
+ kIndeterminate_CurveState,
+ // point is in the interior of a curve
+ kCurve_CurveState
+ };
+
+ bool movable(int index) const { return fMovable[index]; }
+
+ // Movable points are those that can be slid along their bisector.
+ // Basically, a point is immovable if it is part of the original
+ // polygon or it results from the fusing of two bisectors.
+ int addPt(const SkPoint& pt, SkScalar depth, SkScalar coverage, bool movable, CurveState curve);
+ void popLastPt();
+ void popFirstPtShuffle();
+
+ void updatePt(int index, const SkPoint& pt, SkScalar depth, SkScalar coverage);
+
+ void addTri(int i0, int i1, int i2);
+
+ void reservePts(int count) {
+ fPts.setReserve(count);
+ fCoverages.setReserve(count);
+ fMovable.setReserve(count);
+ }
+
+ SkScalar computeDepthFromEdge(int edgeIdx, const SkPoint& p) const;
+
+ bool computePtAlongBisector(int startIdx, const SkPoint& bisector,
+ int edgeIdx, SkScalar desiredDepth,
+ SkPoint* result) const;
+
+ void lineTo(const SkPoint& p, CurveState curve);
+
+ void lineTo(const SkMatrix& m, const SkPoint& p, CurveState curve);
+
+ void quadTo(const SkPoint pts[3]);
+
+ void quadTo(const SkMatrix& m, const SkPoint pts[3]);
+
+ void cubicTo(const SkMatrix& m, const SkPoint pts[4]);
+
+ void conicTo(const SkMatrix& m, const SkPoint pts[3], SkScalar w);
+
+ void terminate(const Ring& lastRing);
+
+ // return false on failure/degenerate path
+ bool extractFromPath(const SkMatrix& m, const SkPath& path);
+ void computeBisectors();
+ void computeNormals();
+
+ void fanRing(const Ring& ring);
+
+ Ring* getNextRing(Ring* lastRing);
+
+ void createOuterRing(const Ring& previousRing, SkScalar outset, SkScalar coverage,
+ Ring* nextRing);
+
+ bool createInsetRings(Ring& previousRing, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage, Ring** finalRing);
+
+ bool createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, bool forceNew);
+
+ void validate() const;
+
+ // fPts, fCoverages, fMovable & fCurveState should always have the same # of elements
+ SkTDArray<SkPoint> fPts;
+ SkTDArray<SkScalar> fCoverages;
+ // movable points are those that can be slid further along their bisector
+ SkTDArray<bool> fMovable;
+ // Tracks whether a given point is interior to a curve. Such points are
+ // assumed to have shallow curvature.
+ SkTDArray<CurveState> fCurveState;
+
+ // The outward facing normals for the original polygon
+ SkTDArray<SkVector> fNorms;
+ // The inward facing bisector at each point in the original polygon. Only
+ // needed for exterior ring creation and then handed off to the initial ring.
+ SkTDArray<SkVector> fBisectors;
+
+ SkPointPriv::Side fSide; // winding of the original polygon
+
+ // The triangulation of the points
+ SkTDArray<int> fIndices;
+
+ Ring fInitialRing;
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ // When visualizing save all the rings
+ SkTDArray<Ring*> fRings;
+#else
+ Ring fRings[2];
+#endif
+ CandidateVerts fCandidateVerts;
+
+ // the stroke width is only used for stroke or stroke-and-fill styles
+ SkScalar fStrokeWidth;
+ SkStrokeRec::Style fStyle;
+
+ SkPaint::Join fJoin;
+
+ SkScalar fMiterLimit;
+
+ SkTDArray<SkPoint> fPointBuffer;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.cpp
new file mode 100644
index 0000000000..f78c6722b4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.cpp
@@ -0,0 +1,1106 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPoint3.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkStroke.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrBuffer.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/effects/GrBezierEffect.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrAAHairLinePathRenderer.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+#define PREALLOC_PTARRAY(N) SkSTArray<(N),SkPoint, true>
+
+// quadratics are rendered as 5-sided polys in order to bound the
+// AA stroke around the center-curve. See comments in push_quad_index_buffer and
+// bloat_quad. Quadratics and conics share an index buffer
+
+// lines are rendered as:
+// *______________*
+// |\ -_______ /|
+// | \ \ / |
+// | *--------* |
+// | / ______/ \ |
+// */_-__________\*
+// For: 6 vertices and 18 indices (for 6 triangles)
+
+// Each quadratic is rendered as a five sided polygon. This poly bounds
+// the quadratic's bounding triangle but has been expanded so that the
+// 1-pixel wide area around the curve is inside the poly.
+// If a,b,c are the original control points then the poly a0,b0,c0,c1,a1
+// that is rendered would look like this:
+// b0
+// b
+//
+// a0 c0
+// a c
+// a1 c1
+// Each is drawn as three triangles ((a0,a1,b0), (b0,c1,c0), (a1,c1,b0))
+// specified by these 9 indices:
+static const uint16_t kQuadIdxBufPattern[] = {
+ 0, 1, 2,
+ 2, 4, 3,
+ 1, 4, 2
+};
+
+static const int kIdxsPerQuad = SK_ARRAY_COUNT(kQuadIdxBufPattern);
+static const int kQuadNumVertices = 5;
+static const int kQuadsNumInIdxBuffer = 256;
+GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+
+static sk_sp<const GrBuffer> get_quads_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
+ gQuadsIndexBufferKey);
+}
+
+
+// Each line segment is rendered as two quads and two triangles.
+// p0 and p1 have alpha = 1 while all other points have alpha = 0.
+// The four external points are offset 1 pixel perpendicular to the
+// line and half a pixel parallel to the line.
+//
+// p4 p5
+// p0 p1
+// p2 p3
+//
+// Each is drawn as six triangles specified by these 18 indices:
+
+static const uint16_t kLineSegIdxBufPattern[] = {
+ 0, 1, 3,
+ 0, 3, 2,
+ 0, 4, 5,
+ 0, 5, 1,
+ 0, 2, 4,
+ 1, 5, 3
+};
+
+static const int kIdxsPerLineSeg = SK_ARRAY_COUNT(kLineSegIdxBufPattern);
+static const int kLineSegNumVertices = 6;
+static const int kLineSegsNumInIdxBuffer = 256;
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+
+static sk_sp<const GrBuffer> get_lines_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
+ gLinesIndexBufferKey);
+}
+
+// Takes 178th time of logf on Z600 / VC2010
+static int get_float_exp(float x) {
+ GR_STATIC_ASSERT(sizeof(int) == sizeof(float));
+#ifdef SK_DEBUG
+ static bool tested;
+ if (!tested) {
+ tested = true;
+ SkASSERT(get_float_exp(0.25f) == -2);
+ SkASSERT(get_float_exp(0.3f) == -2);
+ SkASSERT(get_float_exp(0.5f) == -1);
+ SkASSERT(get_float_exp(1.f) == 0);
+ SkASSERT(get_float_exp(2.f) == 1);
+ SkASSERT(get_float_exp(2.5f) == 1);
+ SkASSERT(get_float_exp(8.f) == 3);
+ SkASSERT(get_float_exp(100.f) == 6);
+ SkASSERT(get_float_exp(1000.f) == 9);
+ SkASSERT(get_float_exp(1024.f) == 10);
+ SkASSERT(get_float_exp(3000000.f) == 21);
+ }
+#endif
+ const int* iptr = (const int*)&x;
+ return (((*iptr) & 0x7f800000) >> 23) - 127;
+}
+
+// Uses the max curvature function for quads to estimate
+// where to chop the conic. If the max curvature is not
+// found along the curve segment it will return 1 and
+// dst[0] is the original conic. If it returns 2 the dst[0]
+// and dst[1] are the two new conics.
+static int split_conic(const SkPoint src[3], SkConic dst[2], const SkScalar weight) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t == 0 || t == 1) {
+ if (dst) {
+ dst[0].set(src, weight);
+ }
+ return 1;
+ } else {
+ if (dst) {
+ SkConic conic;
+ conic.set(src, weight);
+ if (!conic.chopAt(t, dst)) {
+ dst[0].set(src, weight);
+ return 1;
+ }
+ }
+ return 2;
+ }
+}
+
+// Calls split_conic on the entire conic and then once more on each subsection.
+// Most cases will result in either 1 conic (chop point is not within t range)
+// or 3 points (split once and then one subsection is split again).
+static int chop_conic(const SkPoint src[3], SkConic dst[4], const SkScalar weight) {
+ SkConic dstTemp[2];
+ int conicCnt = split_conic(src, dstTemp, weight);
+ if (2 == conicCnt) {
+ int conicCnt2 = split_conic(dstTemp[0].fPts, dst, dstTemp[0].fW);
+ conicCnt = conicCnt2 + split_conic(dstTemp[1].fPts, &dst[conicCnt2], dstTemp[1].fW);
+ } else {
+ dst[0] = dstTemp[0];
+ }
+ return conicCnt;
+}
+
+// returns 0 if quad/conic is degen or close to it
+// in this case approx the path with lines
+// otherwise returns 1
+static int is_degen_quad_or_conic(const SkPoint p[3], SkScalar* dsqd) {
+ static const SkScalar gDegenerateToLineTol = GrPathUtils::kDefaultTolerance;
+ static const SkScalar gDegenerateToLineTolSqd =
+ gDegenerateToLineTol * gDegenerateToLineTol;
+
+ if (SkPointPriv::DistanceToSqd(p[0], p[1]) < gDegenerateToLineTolSqd ||
+ SkPointPriv::DistanceToSqd(p[1], p[2]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ *dsqd = SkPointPriv::DistanceToLineBetweenSqd(p[1], p[0], p[2]);
+ if (*dsqd < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ if (SkPointPriv::DistanceToLineBetweenSqd(p[2], p[1], p[0]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+ return 0;
+}
+
+static int is_degen_quad_or_conic(const SkPoint p[3]) {
+ SkScalar dsqd;
+ return is_degen_quad_or_conic(p, &dsqd);
+}
+
+// we subdivide the quads to avoid huge overfill
+// if it returns -1 then should be drawn as lines
+static int num_quad_subdivs(const SkPoint p[3]) {
+ SkScalar dsqd;
+ if (is_degen_quad_or_conic(p, &dsqd)) {
+ return -1;
+ }
+
+ // tolerance of triangle height in pixels
+ // tuned on windows Quadro FX 380 / Z600
+ // trade off of fill vs cpu time on verts
+ // maybe different when do this using gpu (geo or tess shaders)
+ static const SkScalar gSubdivTol = 175 * SK_Scalar1;
+
+ if (dsqd <= gSubdivTol * gSubdivTol) {
+ return 0;
+ } else {
+ static const int kMaxSub = 4;
+ // subdividing the quad reduces d by 4. so we want x = log4(d/tol)
+ // = log4(d*d/tol*tol)/2
+ // = log2(d*d/tol*tol)
+
+ // +1 since we're ignoring the mantissa contribution.
+ int log = get_float_exp(dsqd/(gSubdivTol*gSubdivTol)) + 1;
+ log = SkTMin(SkTMax(0, log), kMaxSub);
+ return log;
+ }
+}
+
+/**
+ * Generates the lines and quads to be rendered. Lines are always recorded in
+ * device space. We will do a device space bloat to account for the 1pixel
+ * thickness.
+ * Quads are recorded in device space unless m contains
+ * perspective, then in they are in src space. We do this because we will
+ * subdivide large quads to reduce over-fill. This subdivision has to be
+ * performed before applying the perspective matrix.
+ */
+static int gather_lines_and_quads(const SkPath& path,
+ const SkMatrix& m,
+ const SkIRect& devClipBounds,
+ SkScalar capLength,
+ bool convertConicsToQuads,
+ GrAAHairLinePathRenderer::PtArray* lines,
+ GrAAHairLinePathRenderer::PtArray* quads,
+ GrAAHairLinePathRenderer::PtArray* conics,
+ GrAAHairLinePathRenderer::IntArray* quadSubdivCnts,
+ GrAAHairLinePathRenderer::FloatArray* conicWeights) {
+ SkPath::Iter iter(path, false);
+
+ int totalQuadCount = 0;
+ SkRect bounds;
+ SkIRect ibounds;
+
+ bool persp = m.hasPerspective();
+
+ // Whenever a degenerate, zero-length contour is encountered, this code will insert a
+ // 'capLength' x-aligned line segment. Since this is rendering hairlines it is hoped this will
+ // suffice for AA square & circle capping.
+ int verbsInContour = 0; // Does not count moves
+ bool seenZeroLengthVerb = false;
+ SkPoint zeroVerbPt;
+
+ // Adds a quad that has already been chopped to the list and checks for quads that are close to
+ // lines. Also does a bounding box check. It takes points that are in src space and device
+ // space. The src points are only required if the view matrix has perspective.
+ auto addChoppedQuad = [&](const SkPoint srcPts[3], const SkPoint devPts[4],
+ bool isContourStart) {
+ SkRect bounds;
+ SkIRect ibounds;
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ // We only need the src space space pts when not in perspective.
+ SkASSERT(srcPts || !persp);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ int subdiv = num_quad_subdivs(devPts);
+ SkASSERT(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ if (isContourStart && pts[0] == pts[1] && pts[2] == pts[3]) {
+ seenZeroLengthVerb = true;
+ zeroVerbPt = pts[0];
+ }
+ } else {
+ // when in perspective keep quads in src space
+ const SkPoint* qPts = persp ? srcPts : devPts;
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = qPts[0];
+ pts[1] = qPts[1];
+ pts[2] = qPts[2];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ };
+
+ // Applies the view matrix to quad src points and calls the above helper.
+ auto addSrcChoppedQuad = [&](const SkPoint srcSpaceQuadPts[3], bool isContourStart) {
+ SkPoint devPts[3];
+ m.mapPoints(devPts, srcSpaceQuadPts, 3);
+ addChoppedQuad(srcSpaceQuadPts, devPts, isContourStart);
+ };
+
+ for (;;) {
+ SkPoint pathPts[4];
+ SkPath::Verb verb = iter.next(pathPts);
+ switch (verb) {
+ case SkPath::kConic_Verb:
+ if (convertConicsToQuads) {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pathPts, weight, 0.25f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ addSrcChoppedQuad(quadPts + 2 * i, !verbsInContour && 0 == i);
+ }
+ } else {
+ SkConic dst[4];
+ // We chop the conics to create tighter clipping to hide error
+ // that appears near max curvature of very thin conics. Thin
+ // hyperbolas with high weight still show error.
+ int conicCnt = chop_conic(pathPts, dst, iter.conicWeight());
+ for (int i = 0; i < conicCnt; ++i) {
+ SkPoint devPts[4];
+ SkPoint* chopPnts = dst[i].fPts;
+ m.mapPoints(devPts, chopPnts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ if (is_degen_quad_or_conic(devPts)) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ if (verbsInContour == 0 && i == 0 && pts[0] == pts[1] &&
+ pts[2] == pts[3]) {
+ seenZeroLengthVerb = true;
+ zeroVerbPt = pts[0];
+ }
+ } else {
+ // when in perspective keep conics in src space
+ SkPoint* cPts = persp ? chopPnts : devPts;
+ SkPoint* pts = conics->push_back_n(3);
+ pts[0] = cPts[0];
+ pts[1] = cPts[1];
+ pts[2] = cPts[2];
+ conicWeights->push_back() = dst[i].fW;
+ }
+ }
+ }
+ }
+ verbsInContour++;
+ break;
+ case SkPath::kMove_Verb:
+ // New contour (and last one was unclosed). If it was just a zero length drawing
+ // operation, and we're supposed to draw caps, then add a tiny line.
+ if (seenZeroLengthVerb && verbsInContour == 1 && capLength > 0) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = SkPoint::Make(zeroVerbPt.fX - capLength, zeroVerbPt.fY);
+ pts[1] = SkPoint::Make(zeroVerbPt.fX + capLength, zeroVerbPt.fY);
+ }
+ verbsInContour = 0;
+ seenZeroLengthVerb = false;
+ break;
+ case SkPath::kLine_Verb: {
+ SkPoint devPts[2];
+ m.mapPoints(devPts, pathPts, 2);
+ bounds.setBounds(devPts, 2);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ if (verbsInContour == 0 && pts[0] == pts[1]) {
+ seenZeroLengthVerb = true;
+ zeroVerbPt = pts[0];
+ }
+ }
+ verbsInContour++;
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ SkPoint choppedPts[5];
+ // Chopping the quad helps when the quad is either degenerate or nearly degenerate.
+ // When it is degenerate it allows the approximation with lines to work since the
+ // chop point (if there is one) will be at the parabola's vertex. In the nearly
+ // degenerate the QuadUVMatrix computed for the points is almost singular which
+ // can cause rendering artifacts.
+ int n = SkChopQuadAtMaxCurvature(pathPts, choppedPts);
+ for (int i = 0; i < n; ++i) {
+ addSrcChoppedQuad(choppedPts + i * 2, !verbsInContour && 0 == i);
+ }
+ verbsInContour++;
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ SkPoint devPts[4];
+ m.mapPoints(devPts, pathPts, 4);
+ bounds.setBounds(devPts, 4);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ PREALLOC_PTARRAY(32) q;
+ // We convert cubics to quadratics (for now).
+ // In perspective have to do conversion in src space.
+ if (persp) {
+ SkScalar tolScale =
+ GrPathUtils::scaleToleranceToSrc(SK_Scalar1, m, path.getBounds());
+ GrPathUtils::convertCubicToQuads(pathPts, tolScale, &q);
+ } else {
+ GrPathUtils::convertCubicToQuads(devPts, SK_Scalar1, &q);
+ }
+ for (int i = 0; i < q.count(); i += 3) {
+ if (persp) {
+ addSrcChoppedQuad(&q[i], !verbsInContour && 0 == i);
+ } else {
+ addChoppedQuad(nullptr, &q[i], !verbsInContour && 0 == i);
+ }
+ }
+ }
+ verbsInContour++;
+ break;
+ }
+ case SkPath::kClose_Verb:
+ // Contour is closed, so we don't need to grow the starting line, unless it's
+ // *just* a zero length subpath. (SVG Spec 11.4, 'stroke').
+ if (capLength > 0) {
+ if (seenZeroLengthVerb && verbsInContour == 1) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = SkPoint::Make(zeroVerbPt.fX - capLength, zeroVerbPt.fY);
+ pts[1] = SkPoint::Make(zeroVerbPt.fX + capLength, zeroVerbPt.fY);
+ } else if (verbsInContour == 0) {
+ // Contour was (moveTo, close). Add a line.
+ SkPoint devPts[2];
+ m.mapPoints(devPts, pathPts, 1);
+ devPts[1] = devPts[0];
+ bounds.setBounds(devPts, 2);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = SkPoint::Make(devPts[0].fX - capLength, devPts[0].fY);
+ pts[1] = SkPoint::Make(devPts[1].fX + capLength, devPts[1].fY);
+ }
+ }
+ }
+ break;
+ case SkPath::kDone_Verb:
+ if (seenZeroLengthVerb && verbsInContour == 1 && capLength > 0) {
+ // Path ended with a dangling (moveTo, line|quad|etc). If the final verb is
+ // degenerate, we need to draw a line.
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = SkPoint::Make(zeroVerbPt.fX - capLength, zeroVerbPt.fY);
+ pts[1] = SkPoint::Make(zeroVerbPt.fX + capLength, zeroVerbPt.fY);
+ }
+ return totalQuadCount;
+ }
+ }
+}
+
+struct LineVertex {
+ SkPoint fPos;
+ float fCoverage;
+};
+
+struct BezierVertex {
+ SkPoint fPos;
+ union {
+ struct {
+ SkScalar fKLM[3];
+ } fConic;
+ SkVector fQuadCoord;
+ struct {
+ SkScalar fBogus[4];
+ };
+ };
+};
+
+GR_STATIC_ASSERT(sizeof(BezierVertex) == 3 * sizeof(SkPoint));
+
+static void intersect_lines(const SkPoint& ptA, const SkVector& normA,
+ const SkPoint& ptB, const SkVector& normB,
+ SkPoint* result) {
+
+ SkScalar lineAW = -normA.dot(ptA);
+ SkScalar lineBW = -normB.dot(ptB);
+
+ SkScalar wInv = normA.fX * normB.fY - normA.fY * normB.fX;
+ wInv = SkScalarInvert(wInv);
+ if (!SkScalarIsFinite(wInv)) {
+ // lines are parallel, pick the point in between
+ *result = (ptA + ptB)*SK_ScalarHalf;
+ *result += normA;
+ } else {
+ result->fX = normA.fY * lineBW - lineAW * normB.fY;
+ result->fX *= wInv;
+
+ result->fY = lineAW * normB.fX - normA.fX * lineBW;
+ result->fY *= wInv;
+ }
+}
+
+static void set_uv_quad(const SkPoint qpts[3], BezierVertex verts[kQuadNumVertices]) {
+ // this should be in the src space, not dev coords, when we have perspective
+ GrPathUtils::QuadUVMatrix DevToUV(qpts);
+ DevToUV.apply(verts, kQuadNumVertices, sizeof(BezierVertex), sizeof(SkPoint));
+}
+
+static void bloat_quad(const SkPoint qpts[3], const SkMatrix* toDevice,
+ const SkMatrix* toSrc, BezierVertex verts[kQuadNumVertices]) {
+ SkASSERT(!toDevice == !toSrc);
+ // original quad is specified by tri a,b,c
+ SkPoint a = qpts[0];
+ SkPoint b = qpts[1];
+ SkPoint c = qpts[2];
+
+ if (toDevice) {
+ toDevice->mapPoints(&a, 1);
+ toDevice->mapPoints(&b, 1);
+ toDevice->mapPoints(&c, 1);
+ }
+ // make a new poly where we replace a and c by a 1-pixel wide edges orthog
+ // to edges ab and bc:
+ //
+ // before | after
+ // | b0
+ // b |
+ // |
+ // | a0 c0
+ // a c | a1 c1
+ //
+ // edges a0->b0 and b0->c0 are parallel to original edges a->b and b->c,
+ // respectively.
+ BezierVertex& a0 = verts[0];
+ BezierVertex& a1 = verts[1];
+ BezierVertex& b0 = verts[2];
+ BezierVertex& c0 = verts[3];
+ BezierVertex& c1 = verts[4];
+
+ SkVector ab = b;
+ ab -= a;
+ SkVector ac = c;
+ ac -= a;
+ SkVector cb = b;
+ cb -= c;
+
+ // After the transform we might have a line, try to do something reasonable
+ if (toDevice && SkPointPriv::LengthSqd(ab) <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ ab = cb;
+ }
+ if (toDevice && SkPointPriv::LengthSqd(cb) <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ cb = ab;
+ }
+
+ // We should have already handled degenerates
+ SkASSERT(toDevice || (ab.length() > 0 && cb.length() > 0));
+
+ ab.normalize();
+ SkVector abN = SkPointPriv::MakeOrthog(ab, SkPointPriv::kLeft_Side);
+ if (abN.dot(ac) > 0) {
+ abN.negate();
+ }
+
+ cb.normalize();
+ SkVector cbN = SkPointPriv::MakeOrthog(cb, SkPointPriv::kLeft_Side);
+ if (cbN.dot(ac) < 0) {
+ cbN.negate();
+ }
+
+ a0.fPos = a;
+ a0.fPos += abN;
+ a1.fPos = a;
+ a1.fPos -= abN;
+
+ if (toDevice && SkPointPriv::LengthSqd(ac) <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ c = b;
+ }
+ c0.fPos = c;
+ c0.fPos += cbN;
+ c1.fPos = c;
+ c1.fPos -= cbN;
+
+ intersect_lines(a0.fPos, abN, c0.fPos, cbN, &b0.fPos);
+
+ if (toSrc) {
+ SkMatrixPriv::MapPointsWithStride(*toSrc, &verts[0].fPos, sizeof(BezierVertex),
+ kQuadNumVertices);
+ }
+}
+
+// Equations based off of Loop-Blinn Quadratic GPU Rendering
+// Input Parametric:
+// P(t) = (P0*(1-t)^2 + 2*w*P1*t*(1-t) + P2*t^2) / (1-t)^2 + 2*w*t*(1-t) + t^2)
+// Output Implicit:
+// f(x, y, w) = f(P) = K^2 - LM
+// K = dot(k, P), L = dot(l, P), M = dot(m, P)
+// k, l, m are calculated in function GrPathUtils::getConicKLM
+static void set_conic_coeffs(const SkPoint p[3], BezierVertex verts[kQuadNumVertices],
+ const SkScalar weight) {
+ SkMatrix klm;
+
+ GrPathUtils::getConicKLM(p, weight, &klm);
+
+ for (int i = 0; i < kQuadNumVertices; ++i) {
+ const SkPoint3 pt3 = {verts[i].fPos.x(), verts[i].fPos.y(), 1.f};
+ klm.mapHomogeneousPoints((SkPoint3* ) verts[i].fConic.fKLM, &pt3, 1);
+ }
+}
+
+static void add_conics(const SkPoint p[3],
+ const SkScalar weight,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_conic_coeffs(p, *vert, weight);
+ *vert += kQuadNumVertices;
+}
+
+static void add_quads(const SkPoint p[3],
+ int subdiv,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ SkASSERT(subdiv >= 0);
+ if (subdiv) {
+ SkPoint newP[5];
+ SkChopQuadAtHalf(p, newP);
+ add_quads(newP + 0, subdiv-1, toDevice, toSrc, vert);
+ add_quads(newP + 2, subdiv-1, toDevice, toSrc, vert);
+ } else {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_uv_quad(p, *vert);
+ *vert += kQuadNumVertices;
+ }
+}
+
+static void add_line(const SkPoint p[2],
+ const SkMatrix* toSrc,
+ uint8_t coverage,
+ LineVertex** vert) {
+ const SkPoint& a = p[0];
+ const SkPoint& b = p[1];
+
+ SkVector ortho, vec = b;
+ vec -= a;
+
+ SkScalar lengthSqd = SkPointPriv::LengthSqd(vec);
+
+ if (vec.setLength(SK_ScalarHalf)) {
+ // Create a vector orthogonal to 'vec' and of unit length
+ ortho.fX = 2.0f * vec.fY;
+ ortho.fY = -2.0f * vec.fX;
+
+ float floatCoverage = GrNormalizeByteToFloat(coverage);
+
+ if (lengthSqd >= 1.0f) {
+ // Relative to points a and b:
+ // The inner vertices are inset half a pixel along the line a,b
+ (*vert)[0].fPos = a + vec;
+ (*vert)[0].fCoverage = floatCoverage;
+ (*vert)[1].fPos = b - vec;
+ (*vert)[1].fCoverage = floatCoverage;
+ } else {
+ // The inner vertices are inset a distance of length(a,b) from the outer edge of
+ // geometry. For the "a" inset this is the same as insetting from b by half a pixel.
+ // The coverage is then modulated by the length. This gives us the correct
+ // coverage for rects shorter than a pixel as they get translated subpixel amounts
+ // inside of a pixel.
+ SkScalar length = SkScalarSqrt(lengthSqd);
+ (*vert)[0].fPos = b - vec;
+ (*vert)[0].fCoverage = floatCoverage * length;
+ (*vert)[1].fPos = a + vec;
+ (*vert)[1].fCoverage = floatCoverage * length;
+ }
+ // Relative to points a and b:
+ // The outer vertices are outset half a pixel along the line a,b and then a whole pixel
+ // orthogonally.
+ (*vert)[2].fPos = a - vec + ortho;
+ (*vert)[2].fCoverage = 0;
+ (*vert)[3].fPos = b + vec + ortho;
+ (*vert)[3].fCoverage = 0;
+ (*vert)[4].fPos = a - vec - ortho;
+ (*vert)[4].fCoverage = 0;
+ (*vert)[5].fPos = b + vec - ortho;
+ (*vert)[5].fCoverage = 0;
+
+ if (toSrc) {
+ SkMatrixPriv::MapPointsWithStride(*toSrc, &(*vert)->fPos, sizeof(LineVertex),
+ kLineSegNumVertices);
+ }
+ } else {
+ // just make it degenerate and likely offscreen
+ for (int i = 0; i < kLineSegNumVertices; ++i) {
+ (*vert)[i].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ }
+ }
+
+ *vert += kLineSegNumVertices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPathRenderer::CanDrawPath
+GrAAHairLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (GrAAType::kCoverage != args.fAAType) {
+ return CanDrawPath::kNo;
+ }
+
+ if (!IsStrokeHairlineOrEquivalent(args.fShape->style(), *args.fViewMatrix, nullptr)) {
+ return CanDrawPath::kNo;
+ }
+
+ // We don't currently handle dashing in this class though perhaps we should.
+ if (args.fShape->style().pathEffect()) {
+ return CanDrawPath::kNo;
+ }
+
+ if (SkPath::kLine_SegmentMask == args.fShape->segmentMask() ||
+ args.fCaps->shaderCaps()->shaderDerivativeSupport()) {
+ return CanDrawPath::kYes;
+ }
+
+ return CanDrawPath::kNo;
+}
+
+template <class VertexType>
+bool check_bounds(const SkMatrix& viewMatrix, const SkRect& devBounds, void* vertices, int vCount)
+{
+ SkRect tolDevBounds = devBounds;
+ // The bounds ought to be tight, but in perspective the below code runs the verts
+ // through the view matrix to get back to dev coords, which can introduce imprecision.
+ if (viewMatrix.hasPerspective()) {
+ tolDevBounds.outset(SK_Scalar1 / 1000, SK_Scalar1 / 1000);
+ } else {
+ // Non-persp matrices cause this path renderer to draw in device space.
+ SkASSERT(viewMatrix.isIdentity());
+ }
+ SkRect actualBounds;
+
+ VertexType* verts = reinterpret_cast<VertexType*>(vertices);
+ bool first = true;
+ for (int i = 0; i < vCount; ++i) {
+ SkPoint pos = verts[i].fPos;
+ // This is a hack to workaround the fact that we move some degenerate segments offscreen.
+ if (SK_ScalarMax == pos.fX) {
+ continue;
+ }
+ viewMatrix.mapPoints(&pos, 1);
+ if (first) {
+ actualBounds.setLTRB(pos.fX, pos.fY, pos.fX, pos.fY);
+ first = false;
+ } else {
+ SkRectPriv::GrowToInclude(&actualBounds, pos);
+ }
+ }
+ if (!first) {
+ return tolDevBounds.contains(actualBounds);
+ }
+
+ return true;
+}
+
+namespace {
+
+class AAHairlineOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style,
+ const SkIRect& devClipBounds,
+ const GrUserStencilSettings* stencilSettings) {
+ SkScalar hairlineCoverage;
+ uint8_t newCoverage = 0xff;
+ if (GrPathRenderer::IsStrokeHairlineOrEquivalent(style, viewMatrix, &hairlineCoverage)) {
+ newCoverage = SkScalarRoundToInt(hairlineCoverage * 0xff);
+ }
+
+ const SkStrokeRec& stroke = style.strokeRec();
+ SkScalar capLength = SkPaint::kButt_Cap != stroke.getCap() ? hairlineCoverage * 0.5f : 0.0f;
+
+ return Helper::FactoryHelper<AAHairlineOp>(context, std::move(paint), newCoverage,
+ viewMatrix, path,
+ devClipBounds, capLength, stencilSettings);
+ }
+
+ AAHairlineOp(const Helper::MakeArgs& helperArgs,
+ const SkPMColor4f& color,
+ uint8_t coverage,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ SkIRect devClipBounds,
+ SkScalar capLength,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage, stencilSettings)
+ , fColor(color)
+ , fCoverage(coverage) {
+ fPaths.emplace_back(PathData{viewMatrix, path, devClipBounds, capLength});
+
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsHairline::kYes);
+ }
+
+ const char* name() const override { return "AAHairlineOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Color: 0x%08x Coverage: 0x%02x, Count: %d\n", fColor.toBytes_RGBA(),
+ fCoverage, fPaths.count());
+ string += INHERITED::dumpInfo();
+ string += fHelper.dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ // This Op uses uniform (not vertex) color, so doesn't need to track wide color.
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, &fColor,
+ nullptr);
+ }
+
+private:
+ void onPrepareDraws(Target*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ AAHairlineOp* that = t->cast<AAHairlineOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->viewMatrix().hasPerspective() != that->viewMatrix().hasPerspective()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // We go to identity if we don't have perspective
+ if (this->viewMatrix().hasPerspective() &&
+ !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // TODO we can actually combine hairlines if they are the same color in a kind of bulk
+ // method but we haven't implemented this yet
+ // TODO investigate going to vertex color and coverage?
+ if (this->coverage() != that->coverage()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->color() != that->color()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
+ return CombineResult::kMerged;
+ }
+
+ const SkPMColor4f& color() const { return fColor; }
+ uint8_t coverage() const { return fCoverage; }
+ const SkMatrix& viewMatrix() const { return fPaths[0].fViewMatrix; }
+
+ struct PathData {
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkIRect fDevClipBounds;
+ SkScalar fCapLength;
+ };
+
+ SkSTArray<1, PathData, true> fPaths;
+ Helper fHelper;
+ SkPMColor4f fColor;
+ uint8_t fCoverage;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+void AAHairlineOp::onPrepareDraws(Target* target) {
+ // Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
+ SkMatrix invert;
+ if (!this->viewMatrix().invert(&invert)) {
+ return;
+ }
+
+ // we will transform to identity space if the viewmatrix does not have perspective
+ bool hasPerspective = this->viewMatrix().hasPerspective();
+ const SkMatrix* geometryProcessorViewM = &SkMatrix::I();
+ const SkMatrix* geometryProcessorLocalM = &invert;
+ const SkMatrix* toDevice = nullptr;
+ const SkMatrix* toSrc = nullptr;
+ if (hasPerspective) {
+ geometryProcessorViewM = &this->viewMatrix();
+ geometryProcessorLocalM = &SkMatrix::I();
+ toDevice = &this->viewMatrix();
+ toSrc = &invert;
+ }
+
+ // This is hand inlined for maximum performance.
+ PREALLOC_PTARRAY(128) lines;
+ PREALLOC_PTARRAY(128) quads;
+ PREALLOC_PTARRAY(128) conics;
+ IntArray qSubdivs;
+ FloatArray cWeights;
+ int quadCount = 0;
+
+ int instanceCount = fPaths.count();
+ bool convertConicsToQuads = !target->caps().shaderCaps()->floatIs32Bits();
+ for (int i = 0; i < instanceCount; i++) {
+ const PathData& args = fPaths[i];
+ quadCount += gather_lines_and_quads(args.fPath, args.fViewMatrix, args.fDevClipBounds,
+ args.fCapLength, convertConicsToQuads, &lines, &quads,
+ &conics, &qSubdivs, &cWeights);
+ }
+
+ int lineCount = lines.count() / 2;
+ int conicCount = conics.count() / 3;
+ int quadAndConicCount = conicCount + quadCount;
+
+ static constexpr int kMaxLines = SK_MaxS32 / kLineSegNumVertices;
+ static constexpr int kMaxQuadsAndConics = SK_MaxS32 / kQuadNumVertices;
+ if (lineCount > kMaxLines || quadAndConicCount > kMaxQuadsAndConics) {
+ return;
+ }
+
+ // do lines first
+ if (lineCount) {
+ sk_sp<GrGeometryProcessor> lineGP;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(this->color());
+ LocalCoords localCoords(fHelper.usesLocalCoords() ? LocalCoords::kUsePosition_Type
+ : LocalCoords::kUnused_Type);
+ localCoords.fMatrix = geometryProcessorLocalM;
+ lineGP = GrDefaultGeoProcFactory::Make(target->caps().shaderCaps(),
+ color, Coverage::kAttribute_Type, localCoords,
+ *geometryProcessorViewM);
+ }
+
+ sk_sp<const GrBuffer> linesIndexBuffer = get_lines_index_buffer(target->resourceProvider());
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+
+ SkASSERT(sizeof(LineVertex) == lineGP->vertexStride());
+ int vertexCount = kLineSegNumVertices * lineCount;
+ LineVertex* verts = reinterpret_cast<LineVertex*>(target->makeVertexSpace(
+ sizeof(LineVertex), vertexCount, &vertexBuffer, &firstVertex));
+
+ if (!verts|| !linesIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < lineCount; ++i) {
+ add_line(&lines[2*i], toSrc, this->coverage(), &verts);
+ }
+
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(std::move(linesIndexBuffer), kIdxsPerLineSeg, kLineSegNumVertices,
+ lineCount, kLineSegsNumInIdxBuffer);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(lineGP), mesh);
+ }
+
+ if (quadCount || conicCount) {
+ sk_sp<GrGeometryProcessor> quadGP(GrQuadEffect::Make(this->color(),
+ *geometryProcessorViewM,
+ GrClipEdgeType::kHairlineAA,
+ target->caps(),
+ *geometryProcessorLocalM,
+ fHelper.usesLocalCoords(),
+ this->coverage()));
+
+ sk_sp<GrGeometryProcessor> conicGP(GrConicEffect::Make(this->color(),
+ *geometryProcessorViewM,
+ GrClipEdgeType::kHairlineAA,
+ target->caps(),
+ *geometryProcessorLocalM,
+ fHelper.usesLocalCoords(),
+ this->coverage()));
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+
+ sk_sp<const GrBuffer> quadsIndexBuffer = get_quads_index_buffer(target->resourceProvider());
+
+ SkASSERT(sizeof(BezierVertex) == quadGP->vertexStride());
+ SkASSERT(sizeof(BezierVertex) == conicGP->vertexStride());
+ int vertexCount = kQuadNumVertices * quadAndConicCount;
+ void* vertices = target->makeVertexSpace(sizeof(BezierVertex), vertexCount, &vertexBuffer,
+ &firstVertex);
+
+ if (!vertices || !quadsIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ // Setup vertices
+ BezierVertex* bezVerts = reinterpret_cast<BezierVertex*>(vertices);
+
+ int unsubdivQuadCnt = quads.count() / 3;
+ for (int i = 0; i < unsubdivQuadCnt; ++i) {
+ SkASSERT(qSubdivs[i] >= 0);
+ add_quads(&quads[3*i], qSubdivs[i], toDevice, toSrc, &bezVerts);
+ }
+
+ // Start Conics
+ for (int i = 0; i < conicCount; ++i) {
+ add_conics(&conics[3*i], cWeights[i], toDevice, toSrc, &bezVerts);
+ }
+
+ if (quadCount > 0) {
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(quadsIndexBuffer, kIdxsPerQuad, kQuadNumVertices, quadCount,
+ kQuadsNumInIdxBuffer);
+ mesh->setVertexData(vertexBuffer, firstVertex);
+ target->recordDraw(std::move(quadGP), mesh);
+ firstVertex += quadCount * kQuadNumVertices;
+ }
+
+ if (conicCount > 0) {
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(std::move(quadsIndexBuffer), kIdxsPerQuad, kQuadNumVertices,
+ conicCount, kQuadsNumInIdxBuffer);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(conicGP), mesh);
+ }
+ }
+}
+
+void AAHairlineOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+}
+
+bool GrAAHairLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrAAHairlinePathRenderer::onDrawPath");
+ SkASSERT(args.fRenderTargetContext->numSamples() <= 1);
+
+ SkIRect devClipBounds;
+ args.fClip->getConservativeBounds(args.fRenderTargetContext->width(),
+ args.fRenderTargetContext->height(),
+ &devClipBounds);
+ SkPath path;
+ args.fShape->asPath(&path);
+ std::unique_ptr<GrDrawOp> op =
+ AAHairlineOp::Make(args.fContext, std::move(args.fPaint), *args.fViewMatrix, path,
+ args.fShape->style(), devClipBounds, args.fUserStencilSettings);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(AAHairlineOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ SkPath path = GrTest::TestPath(random);
+ SkIRect devClipBounds;
+ devClipBounds.setEmpty();
+ return AAHairlineOp::Make(context, std::move(paint), viewMatrix, path,
+ GrStyle::SimpleHairline(), devClipBounds,
+ GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.h
new file mode 100644
index 0000000000..af78faa937
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAAHairLinePathRenderer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAHairLinePathRenderer_DEFINED
+#define GrAAHairLinePathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrAAHairLinePathRenderer : public GrPathRenderer {
+public:
+ GrAAHairLinePathRenderer() {}
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+private:
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.cpp
new file mode 100644
index 0000000000..d0cbb41013
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/ops/GrAAConvexTessellator.h"
+#include "src/gpu/ops/GrAALinearizingConvexPathRenderer.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+static const int DEFAULT_BUFFER_SIZE = 100;
+
+// The thicker the stroke, the harder it is to produce high-quality results using tessellation. For
+// the time being, we simply drop back to software rendering above this stroke width.
+static const SkScalar kMaxStrokeWidth = 20.0;
+
+GrAALinearizingConvexPathRenderer::GrAALinearizingConvexPathRenderer() {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPathRenderer::CanDrawPath
+GrAALinearizingConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (GrAAType::kCoverage != args.fAAType) {
+ return CanDrawPath::kNo;
+ }
+ if (!args.fShape->knownToBeConvex()) {
+ return CanDrawPath::kNo;
+ }
+ if (args.fShape->style().pathEffect()) {
+ return CanDrawPath::kNo;
+ }
+ if (args.fShape->inverseFilled()) {
+ return CanDrawPath::kNo;
+ }
+ if (args.fShape->bounds().width() <= 0 && args.fShape->bounds().height() <= 0) {
+ // Stroked zero length lines should draw, but this PR doesn't handle that case
+ return CanDrawPath::kNo;
+ }
+ const SkStrokeRec& stroke = args.fShape->style().strokeRec();
+
+ if (stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kStrokeAndFill_Style) {
+ if (!args.fViewMatrix->isSimilarity()) {
+ return CanDrawPath::kNo;
+ }
+ SkScalar strokeWidth = args.fViewMatrix->getMaxScale() * stroke.getWidth();
+ if (strokeWidth < 1.0f && stroke.getStyle() == SkStrokeRec::kStroke_Style) {
+ return CanDrawPath::kNo;
+ }
+ if (strokeWidth > kMaxStrokeWidth ||
+ !args.fShape->knownToBeClosed() ||
+ stroke.getJoin() == SkPaint::Join::kRound_Join) {
+ return CanDrawPath::kNo;
+ }
+ return CanDrawPath::kYes;
+ }
+ if (stroke.getStyle() != SkStrokeRec::kFill_Style) {
+ return CanDrawPath::kNo;
+ }
+ return CanDrawPath::kYes;
+}
+
+// extract the result vertices and indices from the GrAAConvexTessellator
+static void extract_verts(const GrAAConvexTessellator& tess,
+ void* vertData,
+ const GrVertexColor& color,
+ uint16_t firstIndex,
+ uint16_t* idxs) {
+ GrVertexWriter verts{vertData};
+ for (int i = 0; i < tess.numPts(); ++i) {
+ verts.write(tess.point(i), color, tess.coverage(i));
+ }
+
+ for (int i = 0; i < tess.numIndices(); ++i) {
+ idxs[i] = tess.index(i) + firstIndex;
+ }
+}
+
+static sk_sp<GrGeometryProcessor> create_lines_only_gp(const GrShaderCaps* shaderCaps,
+ bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool wideColor) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Coverage::Type coverageType =
+ tweakAlphaForCoverage ? Coverage::kAttributeTweakAlpha_Type : Coverage::kAttribute_Type;
+ LocalCoords::Type localCoordsType =
+ usesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type;
+ Color::Type colorType =
+ wideColor ? Color::kPremulWideColorAttribute_Type : Color::kPremulGrColorAttribute_Type;
+
+ return MakeForDeviceSpace(shaderCaps, colorType, coverageType, localCoordsType, viewMatrix);
+}
+
+namespace {
+
+class AAFlatteningConvexPathOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ SkScalar strokeWidth,
+ SkStrokeRec::Style style,
+ SkPaint::Join join,
+ SkScalar miterLimit,
+ const GrUserStencilSettings* stencilSettings) {
+ return Helper::FactoryHelper<AAFlatteningConvexPathOp>(context, std::move(paint),
+ viewMatrix, path,
+ strokeWidth, style, join, miterLimit,
+ stencilSettings);
+ }
+
+ AAFlatteningConvexPathOp(const Helper::MakeArgs& helperArgs,
+ const SkPMColor4f& color,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ SkScalar strokeWidth,
+ SkStrokeRec::Style style,
+ SkPaint::Join join,
+ SkScalar miterLimit,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) {
+ fPaths.emplace_back(
+ PathData{color, viewMatrix, path, strokeWidth, style, join, miterLimit});
+
+ // compute bounds
+ SkRect bounds = path.getBounds();
+ SkScalar w = strokeWidth;
+ if (w > 0) {
+ w /= 2;
+ SkScalar maxScale = viewMatrix.getMaxScale();
+ // We should not have a perspective matrix, thus we should have a valid scale.
+ SkASSERT(maxScale != -1);
+ if (SkPaint::kMiter_Join == join && w * maxScale > 1.f) {
+ w *= miterLimit;
+ }
+ bounds.outset(w, w);
+ }
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kYes, IsHairline::kNo);
+ }
+
+ const char* name() const override { return "AAFlatteningConvexPathOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (const auto& path : fPaths) {
+ string.appendf(
+ "Color: 0x%08x, StrokeWidth: %.2f, Style: %d, Join: %d, "
+ "MiterLimit: %.2f\n",
+ path.fColor.toBytes_RGBA(), path.fStrokeWidth, path.fStyle, path.fJoin,
+ path.fMiterLimit);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, &fPaths.back().fColor, &fWideColor);
+ }
+
+private:
+ void recordDraw(Target* target, sk_sp<const GrGeometryProcessor> gp, int vertexCount,
+ size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) const {
+ if (vertexCount == 0 || indexCount == 0) {
+ return;
+ }
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ memcpy(verts, vertices, vertexCount * vertexStride);
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex;
+ uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ memcpy(idxs, indices, indexCount * sizeof(uint16_t));
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexed(std::move(indexBuffer), indexCount, firstIndex, 0, vertexCount - 1,
+ GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onPrepareDraws(Target* target) override {
+ // Setup GrGeometryProcessor
+ sk_sp<GrGeometryProcessor> gp(create_lines_only_gp(target->caps().shaderCaps(),
+ fHelper.compatibleWithCoverageAsAlpha(),
+ this->viewMatrix(),
+ fHelper.usesLocalCoords(),
+ fWideColor));
+ if (!gp) {
+ SkDebugf("Couldn't create a GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->vertexStride();
+ int instanceCount = fPaths.count();
+
+ int64_t vertexCount = 0;
+ int64_t indexCount = 0;
+ int64_t maxVertices = DEFAULT_BUFFER_SIZE;
+ int64_t maxIndices = DEFAULT_BUFFER_SIZE;
+ uint8_t* vertices = (uint8_t*) sk_malloc_throw(maxVertices * vertexStride);
+ uint16_t* indices = (uint16_t*) sk_malloc_throw(maxIndices * sizeof(uint16_t));
+ for (int i = 0; i < instanceCount; i++) {
+ const PathData& args = fPaths[i];
+ GrAAConvexTessellator tess(args.fStyle, args.fStrokeWidth,
+ args.fJoin, args.fMiterLimit);
+
+ if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
+ continue;
+ }
+
+ int currentVertices = tess.numPts();
+ if (vertexCount + currentVertices > static_cast<int>(UINT16_MAX)) {
+ // if we added the current instance, we would overflow the indices we can store in a
+ // uint16_t. Draw what we've got so far and reset.
+ this->recordDraw(
+ target, gp, vertexCount, vertexStride, vertices, indexCount, indices);
+ vertexCount = 0;
+ indexCount = 0;
+ }
+ if (vertexCount + currentVertices > maxVertices) {
+ maxVertices = SkTMax(vertexCount + currentVertices, maxVertices * 2);
+ if (maxVertices * vertexStride > SK_MaxS32) {
+ sk_free(vertices);
+ sk_free(indices);
+ return;
+ }
+ vertices = (uint8_t*) sk_realloc_throw(vertices, maxVertices * vertexStride);
+ }
+ int currentIndices = tess.numIndices();
+ if (indexCount + currentIndices > maxIndices) {
+ maxIndices = SkTMax(indexCount + currentIndices, maxIndices * 2);
+ if (maxIndices * sizeof(uint16_t) > SK_MaxS32) {
+ sk_free(vertices);
+ sk_free(indices);
+ return;
+ }
+ indices = (uint16_t*) sk_realloc_throw(indices, maxIndices * sizeof(uint16_t));
+ }
+
+ extract_verts(tess, vertices + vertexStride * vertexCount,
+ GrVertexColor(args.fColor, fWideColor), vertexCount,
+ indices + indexCount);
+ vertexCount += currentVertices;
+ indexCount += currentIndices;
+ }
+ if (vertexCount <= SK_MaxS32 && indexCount <= SK_MaxS32) {
+ this->recordDraw(target, std::move(gp), vertexCount, vertexStride, vertices, indexCount,
+ indices);
+ }
+ sk_free(vertices);
+ sk_free(indices);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ AAFlatteningConvexPathOp* that = t->cast<AAFlatteningConvexPathOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ const SkMatrix& viewMatrix() const { return fPaths[0].fViewMatrix; }
+
+ struct PathData {
+ SkPMColor4f fColor;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkScalar fStrokeWidth;
+ SkStrokeRec::Style fStyle;
+ SkPaint::Join fJoin;
+ SkScalar fMiterLimit;
+ };
+
+ SkSTArray<1, PathData, true> fPaths;
+ Helper fHelper;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+bool GrAALinearizingConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrAALinearizingConvexPathRenderer::onDrawPath");
+ SkASSERT(args.fRenderTargetContext->numSamples() <= 1);
+ SkASSERT(!args.fShape->isEmpty());
+ SkASSERT(!args.fShape->style().pathEffect());
+
+ SkPath path;
+ args.fShape->asPath(&path);
+ bool fill = args.fShape->style().isSimpleFill();
+ const SkStrokeRec& stroke = args.fShape->style().strokeRec();
+ SkScalar strokeWidth = fill ? -1.0f : stroke.getWidth();
+ SkPaint::Join join = fill ? SkPaint::Join::kMiter_Join : stroke.getJoin();
+ SkScalar miterLimit = stroke.getMiter();
+
+ std::unique_ptr<GrDrawOp> op = AAFlatteningConvexPathOp::Make(
+ args.fContext, std::move(args.fPaint), *args.fViewMatrix, path, strokeWidth,
+ stroke.getStyle(), join, miterLimit, args.fUserStencilSettings);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(AAFlatteningConvexPathOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
+ SkPath path = GrTest::TestPathConvex(random);
+
+ SkStrokeRec::Style styles[3] = { SkStrokeRec::kFill_Style,
+ SkStrokeRec::kStroke_Style,
+ SkStrokeRec::kStrokeAndFill_Style };
+
+ SkStrokeRec::Style style = styles[random->nextU() % 3];
+
+ SkScalar strokeWidth = -1.f;
+ SkPaint::Join join = SkPaint::kMiter_Join;
+ SkScalar miterLimit = 0.5f;
+
+ if (SkStrokeRec::kFill_Style != style) {
+ strokeWidth = random->nextRangeF(1.0f, 10.0f);
+ if (random->nextBool()) {
+ join = SkPaint::kMiter_Join;
+ } else {
+ join = SkPaint::kBevel_Join;
+ }
+ miterLimit = random->nextRangeF(0.5f, 2.0f);
+ }
+ const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
+ return AAFlatteningConvexPathOp::Make(context, std::move(paint), viewMatrix, path, strokeWidth,
+ style, join, miterLimit, stencilSettings);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.h
new file mode 100644
index 0000000000..b86f6fc273
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAALinearizingConvexPathRenderer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAALinearizingConvexPathRenderer_DEFINED
+#define GrAALinearizingConvexPathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrAALinearizingConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAALinearizingConvexPathRenderer();
+
+private:
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.cpp b/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.cpp
new file mode 100644
index 0000000000..bcf4670395
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.cpp
@@ -0,0 +1,576 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrAtlasTextOp.h"
+
+#include "include/core/SkPoint3.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/effects/GrBitmapTextGeoProc.h"
+#include "src/gpu/effects/GrDistanceFieldGeoProc.h"
+#include "src/gpu/text/GrAtlasManager.h"
+#include "src/gpu/text/GrStrikeCache.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrAtlasTextOp> GrAtlasTextOp::MakeBitmap(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrMaskFormat maskFormat,
+ int glyphCount,
+ bool needsTransform) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ std::unique_ptr<GrAtlasTextOp> op = pool->allocate<GrAtlasTextOp>(std::move(paint));
+
+ switch (maskFormat) {
+ case kA8_GrMaskFormat:
+ op->fMaskType = kGrayscaleCoverageMask_MaskType;
+ break;
+ case kA565_GrMaskFormat:
+ op->fMaskType = kLCDCoverageMask_MaskType;
+ break;
+ case kARGB_GrMaskFormat:
+ op->fMaskType = kColorBitmapMask_MaskType;
+ break;
+ }
+ op->fNumGlyphs = glyphCount;
+ op->fGeoCount = 1;
+ op->fLuminanceColor = 0;
+ op->fNeedsGlyphTransform = needsTransform;
+ return op;
+ }
+
+std::unique_ptr<GrAtlasTextOp> GrAtlasTextOp::MakeDistanceField(
+ GrRecordingContext* context,
+ GrPaint&& paint,
+ int glyphCount,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ bool useGammaCorrectDistanceTable,
+ SkColor luminanceColor,
+ const SkSurfaceProps& props,
+ bool isAntiAliased,
+ bool useLCD) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ std::unique_ptr<GrAtlasTextOp> op = pool->allocate<GrAtlasTextOp>(std::move(paint));
+
+ bool isBGR = SkPixelGeometryIsBGR(props.pixelGeometry());
+ bool isLCD = useLCD && SkPixelGeometryIsH(props.pixelGeometry());
+ op->fMaskType = !isAntiAliased ? kAliasedDistanceField_MaskType
+ : isLCD ? (isBGR ? kLCDBGRDistanceField_MaskType
+ : kLCDDistanceField_MaskType)
+ : kGrayscaleDistanceField_MaskType;
+ op->fDistanceAdjustTable.reset(SkRef(distanceAdjustTable));
+ op->fUseGammaCorrectDistanceTable = useGammaCorrectDistanceTable;
+ op->fLuminanceColor = luminanceColor;
+ op->fNumGlyphs = glyphCount;
+ op->fGeoCount = 1;
+ return op;
+ }
+
+static const int kDistanceAdjustLumShift = 5;
+
+void GrAtlasTextOp::init() {
+ const Geometry& geo = fGeoData[0];
+ if (this->usesDistanceFields()) {
+ bool isLCD = this->isLCD();
+
+ const SkMatrix& viewMatrix = geo.fViewMatrix;
+
+ fDFGPFlags = viewMatrix.isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ fDFGPFlags |= viewMatrix.isScaleTranslate() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ fDFGPFlags |= viewMatrix.hasPerspective() ? kPerspective_DistanceFieldEffectFlag : 0;
+ fDFGPFlags |= fUseGammaCorrectDistanceTable ? kGammaCorrect_DistanceFieldEffectFlag : 0;
+ fDFGPFlags |= (kAliasedDistanceField_MaskType == fMaskType)
+ ? kAliased_DistanceFieldEffectFlag
+ : 0;
+
+ if (isLCD) {
+ fDFGPFlags |= kUseLCD_DistanceFieldEffectFlag;
+ fDFGPFlags |=
+ (kLCDBGRDistanceField_MaskType == fMaskType) ? kBGR_DistanceFieldEffectFlag : 0;
+ }
+
+ fNeedsGlyphTransform = true;
+ }
+
+ SkRect bounds;
+ geo.fBlob->computeSubRunBounds(&bounds, geo.fRun, geo.fSubRun, geo.fViewMatrix, geo.fX, geo.fY,
+ fNeedsGlyphTransform);
+ // We don't have tight bounds on the glyph paths in device space. For the purposes of bounds
+ // we treat this as a set of non-AA rects rendered with a texture.
+ this->setBounds(bounds, HasAABloat::kNo, IsHairline::kNo);
+}
+
+void GrAtlasTextOp::visitProxies(const VisitProxyFunc& func) const {
+ fProcessors.visitProxies(func);
+}
+
+#ifdef SK_DEBUG
+SkString GrAtlasTextOp::dumpInfo() const {
+ SkString str;
+
+ for (int i = 0; i < fGeoCount; ++i) {
+ str.appendf("%d: Color: 0x%08x Trans: %.2f,%.2f Runs: %d\n",
+ i,
+ fGeoData[i].fColor.toBytes_RGBA(),
+ fGeoData[i].fX,
+ fGeoData[i].fY,
+ fGeoData[i].fBlob->runCountLimit());
+ }
+
+ str += fProcessors.dumpProcessors();
+ str += INHERITED::dumpInfo();
+ return str;
+}
+#endif
+
+GrDrawOp::FixedFunctionFlags GrAtlasTextOp::fixedFunctionFlags() const {
+ return FixedFunctionFlags::kNone;
+}
+
+GrProcessorSet::Analysis GrAtlasTextOp::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ GrProcessorAnalysisCoverage coverage;
+ GrProcessorAnalysisColor color;
+ if (kColorBitmapMask_MaskType == fMaskType) {
+ color.setToUnknown();
+ } else {
+ color.setToConstant(this->color());
+ }
+ switch (fMaskType) {
+ case kGrayscaleCoverageMask_MaskType:
+ case kAliasedDistanceField_MaskType:
+ case kGrayscaleDistanceField_MaskType:
+ coverage = GrProcessorAnalysisCoverage::kSingleChannel;
+ break;
+ case kLCDCoverageMask_MaskType:
+ case kLCDDistanceField_MaskType:
+ case kLCDBGRDistanceField_MaskType:
+ coverage = GrProcessorAnalysisCoverage::kLCD;
+ break;
+ case kColorBitmapMask_MaskType:
+ coverage = GrProcessorAnalysisCoverage::kNone;
+ break;
+ }
+ auto analysis = fProcessors.finalize(
+ color, coverage, clip, &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps,
+ clampType, &fGeoData[0].fColor);
+ fUsesLocalCoords = analysis.usesLocalCoords();
+ return analysis;
+}
+
+static void clip_quads(const SkIRect& clipRect, char* currVertex, const char* blobVertices,
+ size_t vertexStride, int glyphCount) {
+ for (int i = 0; i < glyphCount; ++i) {
+ const SkPoint* blobPositionLT = reinterpret_cast<const SkPoint*>(blobVertices);
+ const SkPoint* blobPositionRB =
+ reinterpret_cast<const SkPoint*>(blobVertices + 3 * vertexStride);
+
+ // positions for bitmap glyphs are pixel boundary aligned
+ SkIRect positionRect = SkIRect::MakeLTRB(SkScalarRoundToInt(blobPositionLT->fX),
+ SkScalarRoundToInt(blobPositionLT->fY),
+ SkScalarRoundToInt(blobPositionRB->fX),
+ SkScalarRoundToInt(blobPositionRB->fY));
+ if (clipRect.contains(positionRect)) {
+ memcpy(currVertex, blobVertices, 4 * vertexStride);
+ currVertex += 4 * vertexStride;
+ } else {
+ // Pull out some more data that we'll need.
+ // In the LCD case the color will be garbage, but we'll overwrite it with the texcoords
+ // and it avoids a lot of conditionals.
+ auto color = *reinterpret_cast<const SkColor*>(blobVertices + sizeof(SkPoint));
+ size_t coordOffset = vertexStride - 2*sizeof(uint16_t);
+ auto* blobCoordsLT = reinterpret_cast<const uint16_t*>(blobVertices + coordOffset);
+ auto* blobCoordsRB = reinterpret_cast<const uint16_t*>(blobVertices + 3 * vertexStride +
+ coordOffset);
+ // Pull out the texel coordinates and texture index bits
+ uint16_t coordsRectL = blobCoordsLT[0] >> 1;
+ uint16_t coordsRectT = blobCoordsLT[1] >> 1;
+ uint16_t coordsRectR = blobCoordsRB[0] >> 1;
+ uint16_t coordsRectB = blobCoordsRB[1] >> 1;
+ uint16_t pageIndexX = blobCoordsLT[0] & 0x1;
+ uint16_t pageIndexY = blobCoordsLT[1] & 0x1;
+
+ int positionRectWidth = positionRect.width();
+ int positionRectHeight = positionRect.height();
+ SkASSERT(positionRectWidth == (coordsRectR - coordsRectL));
+ SkASSERT(positionRectHeight == (coordsRectB - coordsRectT));
+
+ // Clip position and texCoords to the clipRect
+ unsigned int delta;
+ delta = SkTMin(SkTMax(clipRect.fLeft - positionRect.fLeft, 0), positionRectWidth);
+ coordsRectL += delta;
+ positionRect.fLeft += delta;
+
+ delta = SkTMin(SkTMax(clipRect.fTop - positionRect.fTop, 0), positionRectHeight);
+ coordsRectT += delta;
+ positionRect.fTop += delta;
+
+ delta = SkTMin(SkTMax(positionRect.fRight - clipRect.fRight, 0), positionRectWidth);
+ coordsRectR -= delta;
+ positionRect.fRight -= delta;
+
+ delta = SkTMin(SkTMax(positionRect.fBottom - clipRect.fBottom, 0), positionRectHeight);
+ coordsRectB -= delta;
+ positionRect.fBottom -= delta;
+
+ // Repack texel coordinates and index
+ coordsRectL = coordsRectL << 1 | pageIndexX;
+ coordsRectT = coordsRectT << 1 | pageIndexY;
+ coordsRectR = coordsRectR << 1 | pageIndexX;
+ coordsRectB = coordsRectB << 1 | pageIndexY;
+
+ // Set new positions and coords
+ SkPoint* currPosition = reinterpret_cast<SkPoint*>(currVertex);
+ currPosition->fX = positionRect.fLeft;
+ currPosition->fY = positionRect.fTop;
+ *(reinterpret_cast<SkColor*>(currVertex + sizeof(SkPoint))) = color;
+ uint16_t* currCoords = reinterpret_cast<uint16_t*>(currVertex + coordOffset);
+ currCoords[0] = coordsRectL;
+ currCoords[1] = coordsRectT;
+ currVertex += vertexStride;
+
+ currPosition = reinterpret_cast<SkPoint*>(currVertex);
+ currPosition->fX = positionRect.fLeft;
+ currPosition->fY = positionRect.fBottom;
+ *(reinterpret_cast<SkColor*>(currVertex + sizeof(SkPoint))) = color;
+ currCoords = reinterpret_cast<uint16_t*>(currVertex + coordOffset);
+ currCoords[0] = coordsRectL;
+ currCoords[1] = coordsRectB;
+ currVertex += vertexStride;
+
+ currPosition = reinterpret_cast<SkPoint*>(currVertex);
+ currPosition->fX = positionRect.fRight;
+ currPosition->fY = positionRect.fTop;
+ *(reinterpret_cast<SkColor*>(currVertex + sizeof(SkPoint))) = color;
+ currCoords = reinterpret_cast<uint16_t*>(currVertex + coordOffset);
+ currCoords[0] = coordsRectR;
+ currCoords[1] = coordsRectT;
+ currVertex += vertexStride;
+
+ currPosition = reinterpret_cast<SkPoint*>(currVertex);
+ currPosition->fX = positionRect.fRight;
+ currPosition->fY = positionRect.fBottom;
+ *(reinterpret_cast<SkColor*>(currVertex + sizeof(SkPoint))) = color;
+ currCoords = reinterpret_cast<uint16_t*>(currVertex + coordOffset);
+ currCoords[0] = coordsRectR;
+ currCoords[1] = coordsRectB;
+ currVertex += vertexStride;
+ }
+
+ blobVertices += 4 * vertexStride;
+ }
+}
+
+void GrAtlasTextOp::onPrepareDraws(Target* target) {
+ auto resourceProvider = target->resourceProvider();
+
+ // if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
+ // TODO actually only invert if we don't have RGBA
+ SkMatrix localMatrix;
+ if (this->usesLocalCoords() && !fGeoData[0].fViewMatrix.invert(&localMatrix)) {
+ return;
+ }
+
+ GrAtlasManager* atlasManager = target->atlasManager();
+ GrStrikeCache* glyphCache = target->glyphCache();
+
+ GrMaskFormat maskFormat = this->maskFormat();
+
+ unsigned int numActiveProxies;
+ const sk_sp<GrTextureProxy>* proxies = atlasManager->getProxies(maskFormat, &numActiveProxies);
+ if (!proxies) {
+ SkDebugf("Could not allocate backing texture for atlas\n");
+ return;
+ }
+ SkASSERT(proxies[0]);
+
+ static constexpr int kMaxTextures = GrBitmapTextGeoProc::kMaxTextures;
+ GR_STATIC_ASSERT(GrDistanceFieldA8TextGeoProc::kMaxTextures == kMaxTextures);
+ GR_STATIC_ASSERT(GrDistanceFieldLCDTextGeoProc::kMaxTextures == kMaxTextures);
+
+ auto fixedDynamicState = target->makeFixedDynamicState(kMaxTextures);
+ for (unsigned i = 0; i < numActiveProxies; ++i) {
+ fixedDynamicState->fPrimitiveProcessorTextures[i] = proxies[i].get();
+ // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the proxies
+ // don't get added during the visitProxies call. Thus we add them here.
+ target->sampledProxyArray()->push_back(proxies[i].get());
+ }
+
+ FlushInfo flushInfo;
+ flushInfo.fFixedDynamicState = fixedDynamicState;
+
+ bool vmPerspective = fGeoData[0].fViewMatrix.hasPerspective();
+ if (this->usesDistanceFields()) {
+ flushInfo.fGeometryProcessor = this->setupDfProcessor(*target->caps().shaderCaps(),
+ proxies, numActiveProxies);
+ } else {
+ GrSamplerState samplerState = fNeedsGlyphTransform ? GrSamplerState::ClampBilerp()
+ : GrSamplerState::ClampNearest();
+ flushInfo.fGeometryProcessor = GrBitmapTextGeoProc::Make(
+ *target->caps().shaderCaps(), this->color(), false, proxies, numActiveProxies,
+ samplerState, maskFormat, localMatrix, vmPerspective);
+ }
+
+ flushInfo.fGlyphsToFlush = 0;
+ size_t vertexStride = flushInfo.fGeometryProcessor->vertexStride();
+
+ int glyphCount = this->numGlyphs();
+
+ void* vertices = target->makeVertexSpace(vertexStride, glyphCount * kVerticesPerGlyph,
+ &flushInfo.fVertexBuffer, &flushInfo.fVertexOffset);
+ flushInfo.fIndexBuffer = resourceProvider->refQuadIndexBuffer();
+ if (!vertices || !flushInfo.fVertexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ char* currVertex = reinterpret_cast<char*>(vertices);
+
+ SkExclusiveStrikePtr autoGlyphCache;
+ // each of these is a SubRun
+ for (int i = 0; i < fGeoCount; i++) {
+ const Geometry& args = fGeoData[i];
+ Blob* blob = args.fBlob;
+ // TODO4F: Preserve float colors
+ GrTextBlob::VertexRegenerator regenerator(
+ resourceProvider, blob, args.fRun, args.fSubRun, args.fViewMatrix, args.fX, args.fY,
+ args.fColor.toBytes_RGBA(), target->deferredUploadTarget(), glyphCache,
+ atlasManager, &autoGlyphCache);
+ bool done = false;
+ while (!done) {
+ GrTextBlob::VertexRegenerator::Result result;
+ if (!regenerator.regenerate(&result)) {
+ break;
+ }
+ done = result.fFinished;
+
+ // Copy regenerated vertices from the blob to our vertex buffer.
+ size_t vertexBytes = result.fGlyphsRegenerated * kVerticesPerGlyph * vertexStride;
+ if (args.fClipRect.isEmpty()) {
+ memcpy(currVertex, result.fFirstVertex, vertexBytes);
+ } else {
+ SkASSERT(!vmPerspective);
+ clip_quads(args.fClipRect, currVertex, result.fFirstVertex, vertexStride,
+ result.fGlyphsRegenerated);
+ }
+ if (fNeedsGlyphTransform && !args.fViewMatrix.isIdentity()) {
+ // We always do the distance field view matrix transformation after copying rather
+ // than during blob vertex generation time in the blob as handling successive
+ // arbitrary transformations would be complicated and accumulate error.
+ if (args.fViewMatrix.hasPerspective()) {
+ auto* pos = reinterpret_cast<SkPoint3*>(currVertex);
+ SkMatrixPriv::MapHomogeneousPointsWithStride(
+ args.fViewMatrix, pos, vertexStride, pos, vertexStride,
+ result.fGlyphsRegenerated * kVerticesPerGlyph);
+ } else {
+ auto* pos = reinterpret_cast<SkPoint*>(currVertex);
+ SkMatrixPriv::MapPointsWithStride(
+ args.fViewMatrix, pos, vertexStride,
+ result.fGlyphsRegenerated * kVerticesPerGlyph);
+ }
+ }
+ flushInfo.fGlyphsToFlush += result.fGlyphsRegenerated;
+ if (!result.fFinished) {
+ this->flush(target, &flushInfo);
+ }
+ currVertex += vertexBytes;
+ }
+ }
+ this->flush(target, &flushInfo);
+}
+
+void GrAtlasTextOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ this, chainBounds, std::move(fProcessors), GrPipeline::InputFlags::kNone);
+}
+
+void GrAtlasTextOp::flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) const {
+ if (!flushInfo->fGlyphsToFlush) {
+ return;
+ }
+
+ auto atlasManager = target->atlasManager();
+
+ GrGeometryProcessor* gp = flushInfo->fGeometryProcessor.get();
+ GrMaskFormat maskFormat = this->maskFormat();
+
+ unsigned int numActiveProxies;
+ const sk_sp<GrTextureProxy>* proxies = atlasManager->getProxies(maskFormat, &numActiveProxies);
+ SkASSERT(proxies);
+ if (gp->numTextureSamplers() != (int) numActiveProxies) {
+ // During preparation the number of atlas pages has increased.
+ // Update the proxies used in the GP to match.
+ for (unsigned i = gp->numTextureSamplers(); i < numActiveProxies; ++i) {
+ flushInfo->fFixedDynamicState->fPrimitiveProcessorTextures[i] = proxies[i].get();
+ // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // proxies don't get added during the visitProxies call. Thus we add them here.
+ target->sampledProxyArray()->push_back(proxies[i].get());
+ }
+ if (this->usesDistanceFields()) {
+ if (this->isLCD()) {
+ reinterpret_cast<GrDistanceFieldLCDTextGeoProc*>(gp)->addNewProxies(
+ proxies, numActiveProxies, GrSamplerState::ClampBilerp());
+ } else {
+ reinterpret_cast<GrDistanceFieldA8TextGeoProc*>(gp)->addNewProxies(
+ proxies, numActiveProxies, GrSamplerState::ClampBilerp());
+ }
+ } else {
+ GrSamplerState samplerState = fNeedsGlyphTransform ? GrSamplerState::ClampBilerp()
+ : GrSamplerState::ClampNearest();
+ reinterpret_cast<GrBitmapTextGeoProc*>(gp)->addNewProxies(proxies, numActiveProxies,
+ samplerState);
+ }
+ }
+ int maxGlyphsPerDraw = static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerGlyph, kVerticesPerGlyph,
+ flushInfo->fGlyphsToFlush, maxGlyphsPerDraw);
+ mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset);
+ target->recordDraw(
+ flushInfo->fGeometryProcessor, mesh, 1, flushInfo->fFixedDynamicState, nullptr);
+ flushInfo->fVertexOffset += kVerticesPerGlyph * flushInfo->fGlyphsToFlush;
+ flushInfo->fGlyphsToFlush = 0;
+}
+
+GrOp::CombineResult GrAtlasTextOp::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
+ GrAtlasTextOp* that = t->cast<GrAtlasTextOp>();
+ if (fProcessors != that->fProcessors) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fMaskType != that->fMaskType) {
+ return CombineResult::kCannotCombine;
+ }
+
+ const SkMatrix& thisFirstMatrix = fGeoData[0].fViewMatrix;
+ const SkMatrix& thatFirstMatrix = that->fGeoData[0].fViewMatrix;
+
+ if (this->usesLocalCoords() && !thisFirstMatrix.cheapEqualTo(thatFirstMatrix)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fNeedsGlyphTransform != that->fNeedsGlyphTransform) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fNeedsGlyphTransform &&
+ (thisFirstMatrix.hasPerspective() != thatFirstMatrix.hasPerspective())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->usesDistanceFields()) {
+ if (fDFGPFlags != that->fDFGPFlags) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fLuminanceColor != that->fLuminanceColor) {
+ return CombineResult::kCannotCombine;
+ }
+ } else {
+ if (kColorBitmapMask_MaskType == fMaskType && this->color() != that->color()) {
+ return CombineResult::kCannotCombine;
+ }
+ }
+
+ // Keep the batch vertex buffer size below 32K so we don't have to create a special one
+ // We use the largest possible vertex size for this
+ static const int kVertexSize = sizeof(SkPoint) + sizeof(SkColor) + 2 * sizeof(uint16_t);
+ static const int kMaxGlyphs = 32768 / (kVerticesPerGlyph * kVertexSize);
+ if (this->fNumGlyphs + that->fNumGlyphs > kMaxGlyphs) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fNumGlyphs += that->numGlyphs();
+
+ // Reallocate space for geo data if necessary and then import that geo's data.
+ int newGeoCount = that->fGeoCount + fGeoCount;
+
+ // We reallocate at a rate of 1.5x to try to get better total memory usage
+ if (newGeoCount > fGeoDataAllocSize) {
+ int newAllocSize = fGeoDataAllocSize + fGeoDataAllocSize / 2;
+ while (newAllocSize < newGeoCount) {
+ newAllocSize += newAllocSize / 2;
+ }
+ fGeoData.realloc(newAllocSize);
+ fGeoDataAllocSize = newAllocSize;
+ }
+
+ // We steal the ref on the blobs from the other AtlasTextOp and set its count to 0 so that
+ // it doesn't try to unref them.
+ memcpy(&fGeoData[fGeoCount], that->fGeoData.get(), that->fGeoCount * sizeof(Geometry));
+#ifdef SK_DEBUG
+ for (int i = 0; i < that->fGeoCount; ++i) {
+ that->fGeoData.get()[i].fBlob = (Blob*)0x1;
+ }
+#endif
+ that->fGeoCount = 0;
+ fGeoCount = newGeoCount;
+
+ return CombineResult::kMerged;
+}
+
+// TODO trying to figure out why lcd is so whack
+// (see comments in GrTextContext::ComputeCanonicalColor)
+sk_sp<GrGeometryProcessor> GrAtlasTextOp::setupDfProcessor(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ unsigned int numActiveProxies) const {
+ bool isLCD = this->isLCD();
+
+ SkMatrix localMatrix = SkMatrix::I();
+ if (this->usesLocalCoords()) {
+ // If this fails we'll just use I().
+ bool result = fGeoData[0].fViewMatrix.invert(&localMatrix);
+ (void)result;
+ }
+
+ // see if we need to create a new effect
+ if (isLCD) {
+ float redCorrection = fDistanceAdjustTable->getAdjustment(
+ SkColorGetR(fLuminanceColor) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ float greenCorrection = fDistanceAdjustTable->getAdjustment(
+ SkColorGetG(fLuminanceColor) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ float blueCorrection = fDistanceAdjustTable->getAdjustment(
+ SkColorGetB(fLuminanceColor) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust widthAdjust =
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust::Make(
+ redCorrection, greenCorrection, blueCorrection);
+ return GrDistanceFieldLCDTextGeoProc::Make(caps, proxies, numActiveProxies,
+ GrSamplerState::ClampBilerp(), widthAdjust,
+ fDFGPFlags, localMatrix);
+ } else {
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float correction = 0;
+ if (kAliasedDistanceField_MaskType != fMaskType) {
+ U8CPU lum = SkColorSpaceLuminance::computeLuminance(SK_GAMMA_EXPONENT,
+ fLuminanceColor);
+ correction = fDistanceAdjustTable->getAdjustment(lum >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ }
+ return GrDistanceFieldA8TextGeoProc::Make(caps, proxies, numActiveProxies,
+ GrSamplerState::ClampBilerp(),
+ correction, fDFGPFlags, localMatrix);
+#else
+ return GrDistanceFieldA8TextGeoProc::Make(caps, proxies, numActiveProxies,
+ GrSamplerState::ClampBilerp(),
+ fDFGPFlags, localMatrix);
+#endif
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.h b/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.h
new file mode 100644
index 0000000000..6b821e605e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrAtlasTextOp.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasTextOp_DEFINED
+#define GrAtlasTextOp_DEFINED
+
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/text/GrDistanceFieldAdjustTable.h"
+#include "src/gpu/text/GrTextBlob.h"
+
+class GrRecordingContext;
+class SkAtlasTextTarget;
+
+class GrAtlasTextOp final : public GrMeshDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ ~GrAtlasTextOp() override {
+ for (int i = 0; i < fGeoCount; i++) {
+ fGeoData[i].fBlob->unref();
+ }
+ }
+
+ static const int kVerticesPerGlyph = GrTextBlob::kVerticesPerGlyph;
+ static const int kIndicesPerGlyph = 6;
+
+ typedef GrTextBlob Blob;
+ struct Geometry {
+ SkMatrix fViewMatrix;
+ SkIRect fClipRect;
+ Blob* fBlob;
+ SkScalar fX;
+ SkScalar fY;
+ uint16_t fRun;
+ uint16_t fSubRun;
+ SkPMColor4f fColor;
+ };
+
+ static std::unique_ptr<GrAtlasTextOp> MakeBitmap(GrRecordingContext*,
+ GrPaint&&,
+ GrMaskFormat,
+ int glyphCount,
+ bool needsTransform);
+
+ static std::unique_ptr<GrAtlasTextOp> MakeDistanceField(
+ GrRecordingContext*,
+ GrPaint&&,
+ int glyphCount,
+ const GrDistanceFieldAdjustTable*,
+ bool useGammaCorrectDistanceTable,
+ SkColor luminanceColor,
+ const SkSurfaceProps&,
+ bool isAntiAliased,
+ bool useLCD);
+
+ // To avoid even the initial copy of the struct, we have a getter for the first item which
+ // is used to seed the op with its initial geometry. After seeding, the client should call
+ // init() so the op can initialize itself
+ Geometry& geometry() { return fGeoData[0]; }
+
+ /** Called after this->geometry() has been configured. */
+ void init();
+
+ const char* name() const override { return "AtlasTextOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override;
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override;
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override;
+
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override;
+
+ enum MaskType {
+ kGrayscaleCoverageMask_MaskType,
+ kLCDCoverageMask_MaskType,
+ kColorBitmapMask_MaskType,
+ kAliasedDistanceField_MaskType,
+ kGrayscaleDistanceField_MaskType,
+ kLCDDistanceField_MaskType,
+ kLCDBGRDistanceField_MaskType,
+ };
+
+ MaskType maskType() const { return fMaskType; }
+
+ void finalizeForTextTarget(uint32_t color, const GrCaps&);
+ void executeForTextTarget(SkAtlasTextTarget*);
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ // The minimum number of Geometry we will try to allocate.
+ static constexpr auto kMinGeometryAllocated = 12;
+
+ GrAtlasTextOp(GrPaint&& paint)
+ : INHERITED(ClassID())
+ , fGeoDataAllocSize(kMinGeometryAllocated)
+ , fProcessors(std::move(paint)) {}
+
+ struct FlushInfo {
+ sk_sp<const GrBuffer> fVertexBuffer;
+ sk_sp<const GrBuffer> fIndexBuffer;
+ sk_sp<GrGeometryProcessor> fGeometryProcessor;
+ GrPipeline::FixedDynamicState* fFixedDynamicState;
+ int fGlyphsToFlush;
+ int fVertexOffset;
+ };
+
+ void onPrepareDraws(Target*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ GrMaskFormat maskFormat() const {
+ switch (fMaskType) {
+ case kLCDCoverageMask_MaskType:
+ return kA565_GrMaskFormat;
+ case kColorBitmapMask_MaskType:
+ return kARGB_GrMaskFormat;
+ case kGrayscaleCoverageMask_MaskType:
+ case kAliasedDistanceField_MaskType:
+ case kGrayscaleDistanceField_MaskType:
+ case kLCDDistanceField_MaskType:
+ case kLCDBGRDistanceField_MaskType:
+ return kA8_GrMaskFormat;
+ }
+ return kA8_GrMaskFormat; // suppress warning
+ }
+
+ bool usesDistanceFields() const {
+ return kAliasedDistanceField_MaskType == fMaskType ||
+ kGrayscaleDistanceField_MaskType == fMaskType ||
+ kLCDDistanceField_MaskType == fMaskType ||
+ kLCDBGRDistanceField_MaskType == fMaskType;
+ }
+
+ bool isLCD() const {
+ return kLCDCoverageMask_MaskType == fMaskType ||
+ kLCDDistanceField_MaskType == fMaskType ||
+ kLCDBGRDistanceField_MaskType == fMaskType;
+ }
+
+ inline void flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) const;
+
+ const SkPMColor4f& color() const { SkASSERT(fGeoCount > 0); return fGeoData[0].fColor; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ int numGlyphs() const { return fNumGlyphs; }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override;
+
+ sk_sp<GrGeometryProcessor> setupDfProcessor(const GrShaderCaps& caps,
+ const sk_sp<GrTextureProxy>* proxies,
+ unsigned int numActiveProxies) const;
+
+ SkAutoSTMalloc<kMinGeometryAllocated, Geometry> fGeoData;
+ int fGeoDataAllocSize;
+ GrProcessorSet fProcessors;
+ struct {
+ uint32_t fUsesLocalCoords : 1;
+ uint32_t fUseGammaCorrectDistanceTable : 1;
+ uint32_t fNeedsGlyphTransform : 1;
+ };
+ int fGeoCount;
+ int fNumGlyphs;
+ MaskType fMaskType;
+ // Distance field properties
+ sk_sp<const GrDistanceFieldAdjustTable> fDistanceAdjustTable;
+ SkColor fLuminanceColor;
+ uint32_t fDFGPFlags = 0;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrClearOp.cpp b/gfx/skia/skia/src/gpu/ops/GrClearOp.cpp
new file mode 100644
index 0000000000..b6dcab3570
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrClearOp.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrClearOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+std::unique_ptr<GrClearOp> GrClearOp::Make(GrRecordingContext* context,
+ const GrFixedClip& clip,
+ const SkPMColor4f& color,
+ GrSurfaceProxy* dstProxy) {
+ const SkIRect rect = SkIRect::MakeWH(dstProxy->width(), dstProxy->height());
+ if (clip.scissorEnabled() && !SkIRect::Intersects(clip.scissorRect(), rect)) {
+ return nullptr;
+ }
+
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrClearOp>(clip, color, dstProxy);
+}
+
+std::unique_ptr<GrClearOp> GrClearOp::Make(GrRecordingContext* context,
+ const SkIRect& rect,
+ const SkPMColor4f& color,
+ bool fullScreen) {
+ SkASSERT(fullScreen || !rect.isEmpty());
+
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrClearOp>(rect, color, fullScreen);
+}
+
+GrClearOp::GrClearOp(const GrFixedClip& clip, const SkPMColor4f& color, GrSurfaceProxy* proxy)
+ : INHERITED(ClassID())
+ , fClip(clip)
+ , fColor(color) {
+ const SkIRect rtRect = SkIRect::MakeWH(proxy->width(), proxy->height());
+ if (fClip.scissorEnabled()) {
+ // Don't let scissors extend outside the RT. This may improve op combining.
+ if (!fClip.intersect(rtRect)) {
+ SkASSERT(0); // should be caught upstream
+ fClip = GrFixedClip(SkIRect::MakeEmpty());
+ }
+
+ if (GrProxyProvider::IsFunctionallyExact(proxy) && fClip.scissorRect() == rtRect) {
+ fClip.disableScissor();
+ }
+ }
+ this->setBounds(SkRect::Make(fClip.scissorEnabled() ? fClip.scissorRect() : rtRect),
+ HasAABloat::kNo, IsHairline::kNo);
+}
+
+void GrClearOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ SkASSERT(state->opsRenderPass());
+ state->opsRenderPass()->clear(fClip, fColor);
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrClearOp.h b/gfx/skia/skia/src/gpu/ops/GrClearOp.h
new file mode 100644
index 0000000000..c64bcc7157
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrClearOp.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClearOp_DEFINED
+#define GrClearOp_DEFINED
+
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrOpFlushState;
+class GrRecordingContext;
+
+class GrClearOp final : public GrOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrClearOp> Make(GrRecordingContext* context,
+ const GrFixedClip& clip,
+ const SkPMColor4f& color,
+ GrSurfaceProxy* dstProxy);
+
+ static std::unique_ptr<GrClearOp> Make(GrRecordingContext* context,
+ const SkIRect& rect,
+ const SkPMColor4f& color,
+ bool fullScreen);
+
+ const char* name() const override { return "Clear"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.append(INHERITED::dumpInfo());
+ string.appendf("Scissor [ ");
+ if (fClip.scissorEnabled()) {
+ const SkIRect& r = fClip.scissorRect();
+ string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ } else {
+ string.append("disabled");
+ }
+ string.appendf("], Color: 0x%08x\n", fColor.toBytes_RGBA());
+ return string;
+ }
+#endif
+
+ const SkPMColor4f& color() const { return fColor; }
+ void setColor(const SkPMColor4f& color) { fColor = color; }
+
+private:
+ friend class GrOpMemoryPool; // for ctors
+
+ GrClearOp(const GrFixedClip& clip, const SkPMColor4f& color, GrSurfaceProxy* proxy);
+
+ GrClearOp(const SkIRect& rect, const SkPMColor4f& color, bool fullScreen)
+ : INHERITED(ClassID())
+ , fClip(GrFixedClip(rect))
+ , fColor(color) {
+
+ if (fullScreen) {
+ fClip.disableScissor();
+ }
+ this->setBounds(SkRect::Make(rect), HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ // This could be much more complicated. Currently we look at cases where the new clear
+ // contains the old clear, or when the new clear is a subset of the old clear and is the
+ // same color.
+ GrClearOp* cb = t->cast<GrClearOp>();
+ if (fClip.windowRectsState() != cb->fClip.windowRectsState()) {
+ return CombineResult::kCannotCombine;
+ }
+ if (cb->contains(this)) {
+ fClip = cb->fClip;
+ fColor = cb->fColor;
+ return CombineResult::kMerged;
+ } else if (cb->fColor == fColor && this->contains(cb)) {
+ return CombineResult::kMerged;
+ }
+ return CombineResult::kCannotCombine;
+ }
+
+ bool contains(const GrClearOp* that) const {
+ // The constructor ensures that scissor gets disabled on any clip that fills the entire RT.
+ return !fClip.scissorEnabled() ||
+ (that->fClip.scissorEnabled() &&
+ fClip.scissorRect().contains(that->fClip.scissorRect()));
+ }
+
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState* state, const SkRect& chainBounds) override;
+
+ GrFixedClip fClip;
+ SkPMColor4f fColor;
+
+ typedef GrOp INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.cpp b/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.cpp
new file mode 100644
index 0000000000..7ca4063b96
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrClearStencilClipOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+std::unique_ptr<GrOp> GrClearStencilClipOp::Make(GrRecordingContext* context,
+ const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTargetProxy* proxy) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrClearStencilClipOp>(clip, insideStencilMask, proxy);
+}
+
+void GrClearStencilClipOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ SkASSERT(state->opsRenderPass());
+ state->opsRenderPass()->clearStencilClip(fClip, fInsideStencilMask);
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.h b/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.h
new file mode 100644
index 0000000000..0a4a7403f6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrClearStencilClipOp.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClearStencilClipOp_DEFINED
+#define GrClearStencilClipOp_DEFINED
+
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrOpFlushState;
+class GrRecordingContext;
+
+class GrClearStencilClipOp final : public GrOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrOp> Make(GrRecordingContext* context,
+ const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTargetProxy* proxy);
+
+ const char* name() const override { return "ClearStencilClip"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string("Scissor [");
+ if (fClip.scissorEnabled()) {
+ const SkIRect& r = fClip.scissorRect();
+ string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ } else {
+ string.append("disabled");
+ }
+ string.appendf("], insideMask: %s\n", fInsideStencilMask ? "true" : "false");
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+#endif
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ GrClearStencilClipOp(const GrFixedClip& clip, bool insideStencilMask,
+ GrRenderTargetProxy* proxy)
+ : INHERITED(ClassID())
+ , fClip(clip)
+ , fInsideStencilMask(insideStencilMask) {
+ const SkRect& bounds = fClip.scissorEnabled()
+ ? SkRect::Make(fClip.scissorRect())
+ : SkRect::MakeIWH(proxy->width(), proxy->height());
+ this->setBounds(bounds, HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ const GrFixedClip fClip;
+ const bool fInsideStencilMask;
+
+ typedef GrOp INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.cpp
new file mode 100644
index 0000000000..f15309ccb0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrDashLinePathRenderer.h"
+#include "src/gpu/ops/GrDashOp.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+
+GrPathRenderer::CanDrawPath
+GrDashLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ SkPoint pts[2];
+ bool inverted;
+ if (args.fShape->style().isDashed() && args.fShape->asLine(pts, &inverted)) {
+ // We should never have an inverse dashed case.
+ SkASSERT(!inverted);
+ if (!GrDashOp::CanDrawDashLine(pts, args.fShape->style(), *args.fViewMatrix)) {
+ return CanDrawPath::kNo;
+ }
+ return CanDrawPath::kYes;
+ }
+ return CanDrawPath::kNo;
+}
+
+bool GrDashLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrDashLinePathRenderer::onDrawPath");
+ GrDashOp::AAMode aaMode;
+ switch (args.fAAType) {
+ case GrAAType::kNone:
+ aaMode = GrDashOp::AAMode::kNone;
+ break;
+ case GrAAType::kMSAA:
+ // In this mode we will use aa between dashes but the outer border uses MSAA. Otherwise,
+ // we can wind up with external edges antialiased and internal edges unantialiased.
+ aaMode = GrDashOp::AAMode::kCoverageWithMSAA;
+ break;
+ case GrAAType::kCoverage:
+ aaMode = GrDashOp::AAMode::kCoverage;
+ break;
+ }
+ SkPoint pts[2];
+ SkAssertResult(args.fShape->asLine(pts, nullptr));
+ std::unique_ptr<GrDrawOp> op =
+ GrDashOp::MakeDashLineOp(args.fContext, std::move(args.fPaint), *args.fViewMatrix, pts,
+ aaMode, args.fShape->style(), args.fUserStencilSettings);
+ if (!op) {
+ return false;
+ }
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.h
new file mode 100644
index 0000000000..44342420ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDashLinePathRenderer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDashLinePathRenderer_DEFINED
+#define GrDashLinePathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrGpu;
+
+class GrDashLinePathRenderer : public GrPathRenderer {
+private:
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ sk_sp<GrGpu> fGpu;
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDashOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDashOp.cpp
new file mode 100644
index 0000000000..ceba8e6d06
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDashOp.cpp
@@ -0,0 +1,1279 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPointPriv.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+#include "src/gpu/ops/GrDashOp.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+
+using AAMode = GrDashOp::AAMode;
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns whether or not the gpu can fast path the dash line effect.
+bool GrDashOp::CanDrawDashLine(const SkPoint pts[2], const GrStyle& style,
+ const SkMatrix& viewMatrix) {
+ // Pts must be either horizontal or vertical in src space
+ if (pts[0].fX != pts[1].fX && pts[0].fY != pts[1].fY) {
+ return false;
+ }
+
+ // May be able to relax this to include skew. As of now cannot do perspective
+ // because of the non uniform scaling of bloating a rect
+ if (!viewMatrix.preservesRightAngles()) {
+ return false;
+ }
+
+ if (!style.isDashed() || 2 != style.dashIntervalCnt()) {
+ return false;
+ }
+
+ const SkScalar* intervals = style.dashIntervals();
+ if (0 == intervals[0] && 0 == intervals[1]) {
+ return false;
+ }
+
+ SkPaint::Cap cap = style.strokeRec().getCap();
+ if (SkPaint::kRound_Cap == cap) {
+ // Current we don't support round caps unless the on interval is zero
+ if (intervals[0] != 0.f) {
+ return false;
+ }
+ // If the width of the circle caps in greater than the off interval we will pick up unwanted
+ // segments of circles at the start and end of the dash line.
+ if (style.strokeRec().getWidth() > intervals[1]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void calc_dash_scaling(SkScalar* parallelScale, SkScalar* perpScale,
+ const SkMatrix& viewMatrix, const SkPoint pts[2]) {
+ SkVector vecSrc = pts[1] - pts[0];
+ if (pts[1] == pts[0]) {
+ vecSrc.set(1.0, 0.0);
+ }
+ SkScalar magSrc = vecSrc.length();
+ SkScalar invSrc = magSrc ? SkScalarInvert(magSrc) : 0;
+ vecSrc.scale(invSrc);
+
+ SkVector vecSrcPerp;
+ SkPointPriv::RotateCW(vecSrc, &vecSrcPerp);
+ viewMatrix.mapVectors(&vecSrc, 1);
+ viewMatrix.mapVectors(&vecSrcPerp, 1);
+
+ // parallelScale tells how much to scale along the line parallel to the dash line
+ // perpScale tells how much to scale in the direction perpendicular to the dash line
+ *parallelScale = vecSrc.length();
+ *perpScale = vecSrcPerp.length();
+}
+
+// calculates the rotation needed to aligned pts to the x axis with pts[0] < pts[1]
+// Stores the rotation matrix in rotMatrix, and the mapped points in ptsRot
+static void align_to_x_axis(const SkPoint pts[2], SkMatrix* rotMatrix, SkPoint ptsRot[2] = nullptr) {
+ SkVector vec = pts[1] - pts[0];
+ if (pts[1] == pts[0]) {
+ vec.set(1.0, 0.0);
+ }
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ rotMatrix->setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
+ if (ptsRot) {
+ rotMatrix->mapPoints(ptsRot, pts, 2);
+ // correction for numerical issues if map doesn't make ptsRot exactly horizontal
+ ptsRot[1].fY = pts[0].fY;
+ }
+}
+
+// Assumes phase < sum of all intervals
+static SkScalar calc_start_adjustment(const SkScalar intervals[2], SkScalar phase) {
+ SkASSERT(phase < intervals[0] + intervals[1]);
+ if (phase >= intervals[0] && phase != 0) {
+ SkScalar srcIntervalLen = intervals[0] + intervals[1];
+ return srcIntervalLen - phase;
+ }
+ return 0;
+}
+
+static SkScalar calc_end_adjustment(const SkScalar intervals[2], const SkPoint pts[2],
+ SkScalar phase, SkScalar* endingInt) {
+ if (pts[1].fX <= pts[0].fX) {
+ return 0;
+ }
+ SkScalar srcIntervalLen = intervals[0] + intervals[1];
+ SkScalar totalLen = pts[1].fX - pts[0].fX;
+ SkScalar temp = totalLen / srcIntervalLen;
+ SkScalar numFullIntervals = SkScalarFloorToScalar(temp);
+ *endingInt = totalLen - numFullIntervals * srcIntervalLen + phase;
+ temp = *endingInt / srcIntervalLen;
+ *endingInt = *endingInt - SkScalarFloorToScalar(temp) * srcIntervalLen;
+ if (0 == *endingInt) {
+ *endingInt = srcIntervalLen;
+ }
+ if (*endingInt > intervals[0]) {
+ return *endingInt - intervals[0];
+ }
+ return 0;
+}
+
+enum DashCap {
+ kRound_DashCap,
+ kNonRound_DashCap,
+};
+
+static void setup_dashed_rect(const SkRect& rect, GrVertexWriter& vertices, const SkMatrix& matrix,
+ SkScalar offset, SkScalar bloatX, SkScalar bloatY, SkScalar len,
+ SkScalar stroke, SkScalar startInterval, SkScalar endInterval,
+ SkScalar strokeWidth, DashCap cap) {
+ SkScalar intervalLength = startInterval + endInterval;
+ SkRect dashRect = { offset - bloatX, -stroke - bloatY,
+ offset + len + bloatX, stroke + bloatY };
+
+ if (kRound_DashCap == cap) {
+ SkScalar radius = SkScalarHalf(strokeWidth) - 0.5f;
+ SkScalar centerX = SkScalarHalf(endInterval);
+
+ vertices.writeQuad(GrQuad::MakeFromRect(rect, matrix),
+ GrVertexWriter::TriStripFromRect(dashRect),
+ intervalLength,
+ radius,
+ centerX);
+ } else {
+ SkASSERT(kNonRound_DashCap == cap);
+ SkScalar halfOffLen = SkScalarHalf(endInterval);
+ SkScalar halfStroke = SkScalarHalf(strokeWidth);
+ SkRect rectParam;
+ rectParam.setLTRB(halfOffLen + 0.5f, -halfStroke + 0.5f,
+ halfOffLen + startInterval - 0.5f, halfStroke - 0.5f);
+
+ vertices.writeQuad(GrQuad::MakeFromRect(rect, matrix),
+ GrVertexWriter::TriStripFromRect(dashRect),
+ intervalLength,
+ rectParam);
+ }
+}
+
+/**
+ * An GrGeometryProcessor that renders a dashed line.
+ * This GrGeometryProcessor is meant for dashed lines that only have a single on/off interval pair.
+ * Bounding geometry is rendered and the effect computes coverage based on the fragment's
+ * position relative to the dashed line.
+ */
+static sk_sp<GrGeometryProcessor> make_dash_gp(const SkPMColor4f&,
+ AAMode aaMode,
+ DashCap cap,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+class DashOp final : public GrMeshDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ struct LineData {
+ SkMatrix fViewMatrix;
+ SkMatrix fSrcRotInv;
+ SkPoint fPtsRot[2];
+ SkScalar fSrcStrokeWidth;
+ SkScalar fPhase;
+ SkScalar fIntervals[2];
+ SkScalar fParallelScale;
+ SkScalar fPerpendicularScale;
+ };
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const LineData& geometry,
+ SkPaint::Cap cap,
+ AAMode aaMode, bool fullDash,
+ const GrUserStencilSettings* stencilSettings) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<DashOp>(std::move(paint), geometry, cap,
+ aaMode, fullDash, stencilSettings);
+ }
+
+ const char* name() const override { return "DashOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fProcessorSet.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (const auto& geo : fLines) {
+ string.appendf("Pt0: [%.2f, %.2f], Pt1: [%.2f, %.2f], Width: %.2f, Ival0: %.2f, "
+ "Ival1 : %.2f, Phase: %.2f\n",
+ geo.fPtsRot[0].fX, geo.fPtsRot[0].fY,
+ geo.fPtsRot[1].fX, geo.fPtsRot[1].fY,
+ geo.fSrcStrokeWidth,
+ geo.fIntervals[0],
+ geo.fIntervals[1],
+ geo.fPhase);
+ }
+ string += fProcessorSet.dumpProcessors();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ FixedFunctionFlags flags = FixedFunctionFlags::kNone;
+ if (AAMode::kCoverageWithMSAA == fAAMode) {
+ flags |= FixedFunctionFlags::kUsesHWAA;
+ }
+ if (fStencilSettings != &GrUserStencilSettings::kUnused) {
+ flags |= FixedFunctionFlags::kUsesStencil;
+ }
+ return flags;
+ }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ GrProcessorAnalysisCoverage coverage;
+ if (AAMode::kNone == fAAMode && !clip->numClipCoverageFragmentProcessors()) {
+ coverage = GrProcessorAnalysisCoverage::kNone;
+ } else {
+ coverage = GrProcessorAnalysisCoverage::kSingleChannel;
+ }
+ auto analysis = fProcessorSet.finalize(
+ fColor, coverage, clip, fStencilSettings, hasMixedSampledCoverage, caps, clampType,
+ &fColor);
+ fUsesLocalCoords = analysis.usesLocalCoords();
+ return analysis;
+ }
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ DashOp(GrPaint&& paint, const LineData& geometry, SkPaint::Cap cap, AAMode aaMode,
+ bool fullDash, const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID())
+ , fColor(paint.getColor4f())
+ , fFullDash(fullDash)
+ , fCap(cap)
+ , fAAMode(aaMode)
+ , fProcessorSet(std::move(paint))
+ , fStencilSettings(stencilSettings) {
+ fLines.push_back(geometry);
+
+ // compute bounds
+ SkScalar halfStrokeWidth = 0.5f * geometry.fSrcStrokeWidth;
+ SkScalar xBloat = SkPaint::kButt_Cap == cap ? 0 : halfStrokeWidth;
+ SkRect bounds;
+ bounds.set(geometry.fPtsRot[0], geometry.fPtsRot[1]);
+ bounds.outset(xBloat, halfStrokeWidth);
+
+ // Note, we actually create the combined matrix here, and save the work
+ SkMatrix& combinedMatrix = fLines[0].fSrcRotInv;
+ combinedMatrix.postConcat(geometry.fViewMatrix);
+
+ IsHairline zeroArea = geometry.fSrcStrokeWidth ? IsHairline::kNo
+ : IsHairline::kYes;
+ HasAABloat aaBloat = (aaMode == AAMode::kNone) ? HasAABloat ::kNo : HasAABloat::kYes;
+ this->setTransformedBounds(bounds, combinedMatrix, aaBloat, zeroArea);
+ }
+
+ struct DashDraw {
+ DashDraw(const LineData& geo) {
+ memcpy(fPtsRot, geo.fPtsRot, sizeof(geo.fPtsRot));
+ memcpy(fIntervals, geo.fIntervals, sizeof(geo.fIntervals));
+ fPhase = geo.fPhase;
+ }
+ SkPoint fPtsRot[2];
+ SkScalar fIntervals[2];
+ SkScalar fPhase;
+ SkScalar fStartOffset;
+ SkScalar fStrokeWidth;
+ SkScalar fLineLength;
+ SkScalar fHalfDevStroke;
+ SkScalar fDevBloatX;
+ SkScalar fDevBloatY;
+ bool fLineDone;
+ bool fHasStartRect;
+ bool fHasEndRect;
+ };
+
+ void onPrepareDraws(Target* target) override {
+ int instanceCount = fLines.count();
+ SkPaint::Cap cap = this->cap();
+ bool isRoundCap = SkPaint::kRound_Cap == cap;
+ DashCap capType = isRoundCap ? kRound_DashCap : kNonRound_DashCap;
+
+ sk_sp<GrGeometryProcessor> gp;
+ if (this->fullDash()) {
+ gp = make_dash_gp(this->color(), this->aaMode(), capType, this->viewMatrix(),
+ fUsesLocalCoords);
+ } else {
+ // Set up the vertex data for the line and start/end dashes
+ using namespace GrDefaultGeoProcFactory;
+ Color color(this->color());
+ LocalCoords::Type localCoordsType =
+ fUsesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type;
+ gp = MakeForDeviceSpace(target->caps().shaderCaps(),
+ color,
+ Coverage::kSolid_Type,
+ localCoordsType,
+ this->viewMatrix());
+ }
+
+ if (!gp) {
+ SkDebugf("Could not create GrGeometryProcessor\n");
+ return;
+ }
+
+ // useAA here means Edge AA or MSAA
+ bool useAA = this->aaMode() != AAMode::kNone;
+ bool fullDash = this->fullDash();
+
+ // We do two passes over all of the dashes. First we setup the start, end, and bounds,
+ // rectangles. We preserve all of this work in the rects / draws arrays below. Then we
+ // iterate again over these decomposed dashes to generate vertices
+ static const int kNumStackDashes = 128;
+ SkSTArray<kNumStackDashes, SkRect, true> rects;
+ SkSTArray<kNumStackDashes, DashDraw, true> draws;
+
+ int totalRectCount = 0;
+ int rectOffset = 0;
+ rects.push_back_n(3 * instanceCount);
+ for (int i = 0; i < instanceCount; i++) {
+ const LineData& args = fLines[i];
+
+ DashDraw& draw = draws.push_back(args);
+
+ bool hasCap = SkPaint::kButt_Cap != cap;
+
+ // We always want to at least stroke out half a pixel on each side in device space
+ // so 0.5f / perpScale gives us this min in src space
+ SkScalar halfSrcStroke =
+ SkMaxScalar(args.fSrcStrokeWidth * 0.5f, 0.5f / args.fPerpendicularScale);
+
+ SkScalar strokeAdj;
+ if (!hasCap) {
+ strokeAdj = 0.f;
+ } else {
+ strokeAdj = halfSrcStroke;
+ }
+
+ SkScalar startAdj = 0;
+
+ bool lineDone = false;
+
+ // Too simplify the algorithm, we always push back rects for start and end rect.
+ // Otherwise we'd have to track start / end rects for each individual geometry
+ SkRect& bounds = rects[rectOffset++];
+ SkRect& startRect = rects[rectOffset++];
+ SkRect& endRect = rects[rectOffset++];
+
+ bool hasStartRect = false;
+ // If we are using AA, check to see if we are drawing a partial dash at the start. If so
+ // draw it separately here and adjust our start point accordingly
+ if (useAA) {
+ if (draw.fPhase > 0 && draw.fPhase < draw.fIntervals[0]) {
+ SkPoint startPts[2];
+ startPts[0] = draw.fPtsRot[0];
+ startPts[1].fY = startPts[0].fY;
+ startPts[1].fX = SkMinScalar(startPts[0].fX + draw.fIntervals[0] - draw.fPhase,
+ draw.fPtsRot[1].fX);
+ startRect.setBounds(startPts, 2);
+ startRect.outset(strokeAdj, halfSrcStroke);
+
+ hasStartRect = true;
+ startAdj = draw.fIntervals[0] + draw.fIntervals[1] - draw.fPhase;
+ }
+ }
+
+ // adjustments for start and end of bounding rect so we only draw dash intervals
+ // contained in the original line segment.
+ startAdj += calc_start_adjustment(draw.fIntervals, draw.fPhase);
+ if (startAdj != 0) {
+ draw.fPtsRot[0].fX += startAdj;
+ draw.fPhase = 0;
+ }
+ SkScalar endingInterval = 0;
+ SkScalar endAdj = calc_end_adjustment(draw.fIntervals, draw.fPtsRot, draw.fPhase,
+ &endingInterval);
+ draw.fPtsRot[1].fX -= endAdj;
+ if (draw.fPtsRot[0].fX >= draw.fPtsRot[1].fX) {
+ lineDone = true;
+ }
+
+ bool hasEndRect = false;
+ // If we are using AA, check to see if we are drawing a partial dash at then end. If so
+ // draw it separately here and adjust our end point accordingly
+ if (useAA && !lineDone) {
+ // If we adjusted the end then we will not be drawing a partial dash at the end.
+ // If we didn't adjust the end point then we just need to make sure the ending
+ // dash isn't a full dash
+ if (0 == endAdj && endingInterval != draw.fIntervals[0]) {
+ SkPoint endPts[2];
+ endPts[1] = draw.fPtsRot[1];
+ endPts[0].fY = endPts[1].fY;
+ endPts[0].fX = endPts[1].fX - endingInterval;
+
+ endRect.setBounds(endPts, 2);
+ endRect.outset(strokeAdj, halfSrcStroke);
+
+ hasEndRect = true;
+ endAdj = endingInterval + draw.fIntervals[1];
+
+ draw.fPtsRot[1].fX -= endAdj;
+ if (draw.fPtsRot[0].fX >= draw.fPtsRot[1].fX) {
+ lineDone = true;
+ }
+ }
+ }
+
+ if (draw.fPtsRot[0].fX == draw.fPtsRot[1].fX &&
+ (0 != endAdj || 0 == startAdj) &&
+ hasCap) {
+ // At this point the fPtsRot[0]/[1] represent the start and end of the inner rect of
+ // dashes that we want to draw. The only way they can be equal is if the on interval
+ // is zero (or an edge case if the end of line ends at a full off interval, but this
+ // is handled as well). Thus if the on interval is zero then we need to draw a cap
+ // at this position if the stroke has caps. The spec says we only draw this point if
+ // point lies between [start of line, end of line). Thus we check if we are at the
+ // end (but not the start), and if so we don't draw the cap.
+ lineDone = false;
+ }
+
+ if (startAdj != 0) {
+ draw.fPhase = 0;
+ }
+
+ // Change the dashing info from src space into device space
+ SkScalar* devIntervals = draw.fIntervals;
+ devIntervals[0] = draw.fIntervals[0] * args.fParallelScale;
+ devIntervals[1] = draw.fIntervals[1] * args.fParallelScale;
+ SkScalar devPhase = draw.fPhase * args.fParallelScale;
+ SkScalar strokeWidth = args.fSrcStrokeWidth * args.fPerpendicularScale;
+
+ if ((strokeWidth < 1.f && useAA) || 0.f == strokeWidth) {
+ strokeWidth = 1.f;
+ }
+
+ SkScalar halfDevStroke = strokeWidth * 0.5f;
+
+ if (SkPaint::kSquare_Cap == cap) {
+ // add cap to on interval and remove from off interval
+ devIntervals[0] += strokeWidth;
+ devIntervals[1] -= strokeWidth;
+ }
+ SkScalar startOffset = devIntervals[1] * 0.5f + devPhase;
+
+ // For EdgeAA, we bloat in X & Y for both square and round caps.
+ // For MSAA, we don't bloat at all for square caps, and bloat in Y only for round caps.
+ SkScalar devBloatX = this->aaMode() == AAMode::kCoverage ? 0.5f : 0.0f;
+ SkScalar devBloatY;
+ if (SkPaint::kRound_Cap == cap && this->aaMode() == AAMode::kCoverageWithMSAA) {
+ devBloatY = 0.5f;
+ } else {
+ devBloatY = devBloatX;
+ }
+
+ SkScalar bloatX = devBloatX / args.fParallelScale;
+ SkScalar bloatY = devBloatY / args.fPerpendicularScale;
+
+ if (devIntervals[1] <= 0.f && useAA) {
+ // Case when we end up drawing a solid AA rect
+ // Reset the start rect to draw this single solid rect
+ // but it requires to upload a new intervals uniform so we can mimic
+ // one giant dash
+ draw.fPtsRot[0].fX -= hasStartRect ? startAdj : 0;
+ draw.fPtsRot[1].fX += hasEndRect ? endAdj : 0;
+ startRect.setBounds(draw.fPtsRot, 2);
+ startRect.outset(strokeAdj, halfSrcStroke);
+ hasStartRect = true;
+ hasEndRect = false;
+ lineDone = true;
+
+ SkPoint devicePts[2];
+ args.fViewMatrix.mapPoints(devicePts, draw.fPtsRot, 2);
+ SkScalar lineLength = SkPoint::Distance(devicePts[0], devicePts[1]);
+ if (hasCap) {
+ lineLength += 2.f * halfDevStroke;
+ }
+ devIntervals[0] = lineLength;
+ }
+
+ totalRectCount += !lineDone ? 1 : 0;
+ totalRectCount += hasStartRect ? 1 : 0;
+ totalRectCount += hasEndRect ? 1 : 0;
+
+ if (SkPaint::kRound_Cap == cap && 0 != args.fSrcStrokeWidth) {
+ // need to adjust this for round caps to correctly set the dashPos attrib on
+ // vertices
+ startOffset -= halfDevStroke;
+ }
+
+ if (!lineDone) {
+ SkPoint devicePts[2];
+ args.fViewMatrix.mapPoints(devicePts, draw.fPtsRot, 2);
+ draw.fLineLength = SkPoint::Distance(devicePts[0], devicePts[1]);
+ if (hasCap) {
+ draw.fLineLength += 2.f * halfDevStroke;
+ }
+
+ bounds.setLTRB(draw.fPtsRot[0].fX, draw.fPtsRot[0].fY,
+ draw.fPtsRot[1].fX, draw.fPtsRot[1].fY);
+ bounds.outset(bloatX + strokeAdj, bloatY + halfSrcStroke);
+ }
+
+ if (hasStartRect) {
+ SkASSERT(useAA); // so that we know bloatX and bloatY have been set
+ startRect.outset(bloatX, bloatY);
+ }
+
+ if (hasEndRect) {
+ SkASSERT(useAA); // so that we know bloatX and bloatY have been set
+ endRect.outset(bloatX, bloatY);
+ }
+
+ draw.fStartOffset = startOffset;
+ draw.fDevBloatX = devBloatX;
+ draw.fDevBloatY = devBloatY;
+ draw.fHalfDevStroke = halfDevStroke;
+ draw.fStrokeWidth = strokeWidth;
+ draw.fHasStartRect = hasStartRect;
+ draw.fLineDone = lineDone;
+ draw.fHasEndRect = hasEndRect;
+ }
+
+ if (!totalRectCount) {
+ return;
+ }
+
+ QuadHelper helper(target, gp->vertexStride(), totalRectCount);
+ GrVertexWriter vertices{ helper.vertices() };
+ if (!vertices.fPtr) {
+ return;
+ }
+
+ int rectIndex = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const LineData& geom = fLines[i];
+
+ if (!draws[i].fLineDone) {
+ if (fullDash) {
+ setup_dashed_rect(
+ rects[rectIndex], vertices, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX, draws[i].fDevBloatY,
+ draws[i].fLineLength, draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth, capType);
+ } else {
+ vertices.writeQuad(GrQuad::MakeFromRect(rects[rectIndex], geom.fSrcRotInv));
+ }
+ }
+ rectIndex++;
+
+ if (draws[i].fHasStartRect) {
+ if (fullDash) {
+ setup_dashed_rect(
+ rects[rectIndex], vertices, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX, draws[i].fDevBloatY,
+ draws[i].fIntervals[0], draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth, capType);
+ } else {
+ vertices.writeQuad(GrQuad::MakeFromRect(rects[rectIndex], geom.fSrcRotInv));
+ }
+ }
+ rectIndex++;
+
+ if (draws[i].fHasEndRect) {
+ if (fullDash) {
+ setup_dashed_rect(
+ rects[rectIndex], vertices, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX, draws[i].fDevBloatY,
+ draws[i].fIntervals[0], draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth, capType);
+ } else {
+ vertices.writeQuad(GrQuad::MakeFromRect(rects[rectIndex], geom.fSrcRotInv));
+ }
+ }
+ rectIndex++;
+ }
+ helper.recordDraw(target, std::move(gp));
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ auto pipelineFlags = GrPipeline::InputFlags::kNone;
+ if (AAMode::kCoverageWithMSAA == fAAMode) {
+ pipelineFlags |= GrPipeline::InputFlags::kHWAntialias;
+ }
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ this, chainBounds, std::move(fProcessorSet), pipelineFlags, fStencilSettings);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ DashOp* that = t->cast<DashOp>();
+ if (fProcessorSet != that->fProcessorSet) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->aaMode() != that->aaMode()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->fullDash() != that->fullDash()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->cap() != that->cap()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // TODO vertex color
+ if (this->color() != that->color()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fUsesLocalCoords && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fLines.push_back_n(that->fLines.count(), that->fLines.begin());
+ return CombineResult::kMerged;
+ }
+
+ const SkPMColor4f& color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fLines[0].fViewMatrix; }
+ AAMode aaMode() const { return fAAMode; }
+ bool fullDash() const { return fFullDash; }
+ SkPaint::Cap cap() const { return fCap; }
+
+ static const int kVertsPerDash = 4;
+ static const int kIndicesPerDash = 6;
+
+ SkSTArray<1, LineData, true> fLines;
+ SkPMColor4f fColor;
+ bool fUsesLocalCoords : 1;
+ bool fFullDash : 1;
+ // We use 3 bits for this 3-value enum because MSVS makes the underlying types signed.
+ SkPaint::Cap fCap : 3;
+ AAMode fAAMode;
+ GrProcessorSet fProcessorSet;
+ const GrUserStencilSettings* fStencilSettings;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+std::unique_ptr<GrDrawOp> GrDashOp::MakeDashLineOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkPoint pts[2],
+ AAMode aaMode,
+ const GrStyle& style,
+ const GrUserStencilSettings* stencilSettings) {
+ SkASSERT(GrDashOp::CanDrawDashLine(pts, style, viewMatrix));
+ const SkScalar* intervals = style.dashIntervals();
+ SkScalar phase = style.dashPhase();
+
+ SkPaint::Cap cap = style.strokeRec().getCap();
+
+ DashOp::LineData lineData;
+ lineData.fSrcStrokeWidth = style.strokeRec().getWidth();
+
+ // the phase should be normalized to be [0, sum of all intervals)
+ SkASSERT(phase >= 0 && phase < intervals[0] + intervals[1]);
+
+ // Rotate the src pts so they are aligned horizontally with pts[0].fX < pts[1].fX
+ if (pts[0].fY != pts[1].fY || pts[0].fX > pts[1].fX) {
+ SkMatrix rotMatrix;
+ align_to_x_axis(pts, &rotMatrix, lineData.fPtsRot);
+ if (!rotMatrix.invert(&lineData.fSrcRotInv)) {
+ SkDebugf("Failed to create invertible rotation matrix!\n");
+ return nullptr;
+ }
+ } else {
+ lineData.fSrcRotInv.reset();
+ memcpy(lineData.fPtsRot, pts, 2 * sizeof(SkPoint));
+ }
+
+ // Scale corrections of intervals and stroke from view matrix
+ calc_dash_scaling(&lineData.fParallelScale, &lineData.fPerpendicularScale, viewMatrix,
+ lineData.fPtsRot);
+ if (SkScalarNearlyZero(lineData.fParallelScale) ||
+ SkScalarNearlyZero(lineData.fPerpendicularScale)) {
+ return nullptr;
+ }
+
+ SkScalar offInterval = intervals[1] * lineData.fParallelScale;
+ SkScalar strokeWidth = lineData.fSrcStrokeWidth * lineData.fPerpendicularScale;
+
+ if (SkPaint::kSquare_Cap == cap && 0 != lineData.fSrcStrokeWidth) {
+ // add cap to on interveal and remove from off interval
+ offInterval -= strokeWidth;
+ }
+
+ // TODO we can do a real rect call if not using fulldash(ie no off interval, not using AA)
+ bool fullDash = offInterval > 0.f || aaMode != AAMode::kNone;
+
+ lineData.fViewMatrix = viewMatrix;
+ lineData.fPhase = phase;
+ lineData.fIntervals[0] = intervals[0];
+ lineData.fIntervals[1] = intervals[1];
+
+ return DashOp::Make(context, std::move(paint), lineData, cap, aaMode, fullDash,
+ stencilSettings);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingCircleEffect;
+
+/*
+ * This effect will draw a dotted line (defined as a dashed lined with round caps and no on
+ * interval). The radius of the dots is given by the strokeWidth and the spacing by the DashInfo.
+ * Both of the previous two parameters are in device space. This effect also requires the setting of
+ * a float2 vertex attribute for the the four corners of the bounding rect. This attribute is the
+ * "dash position" of each vertex. In other words it is the vertex coords (in device space) if we
+ * transform the line to be horizontal, with the start of line at the origin then shifted to the
+ * right by half the off interval. The line then goes in the positive x direction.
+ */
+class DashingCircleEffect : public GrGeometryProcessor {
+public:
+ typedef SkPathEffect::DashInfo DashInfo;
+
+ static sk_sp<GrGeometryProcessor> Make(const SkPMColor4f&,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ const char* name() const override { return "DashingCircleEffect"; }
+
+ AAMode aaMode() const { return fAAMode; }
+
+ const SkPMColor4f& color() const { return fColor; }
+
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ DashingCircleEffect(const SkPMColor4f&, AAMode aaMode, const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ SkPMColor4f fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ AAMode fAAMode;
+
+ Attribute fInPosition;
+ Attribute fInDashParams;
+ Attribute fInCircleParams;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ friend class GLDashingCircleEffect;
+ typedef GrGeometryProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingCircleEffect : public GrGLSLGeometryProcessor {
+public:
+ GLDashingCircleEffect();
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) override;
+private:
+ UniformHandle fParamUniform;
+ UniformHandle fColorUniform;
+ SkPMColor4f fColor;
+ SkScalar fPrevRadius;
+ SkScalar fPrevCenterX;
+ SkScalar fPrevIntervalLength;
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GLDashingCircleEffect::GLDashingCircleEffect() {
+ fColor = SK_PMColor4fILLEGAL;
+ fPrevRadius = SK_ScalarMin;
+ fPrevCenterX = SK_ScalarMin;
+ fPrevIntervalLength = SK_ScalarMax;
+}
+
+void GLDashingCircleEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const DashingCircleEffect& dce = args.fGP.cast<DashingCircleEffect>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dce);
+
+ // XY are dashPos, Z is dashInterval
+ GrGLSLVarying dashParams(kHalf3_GrSLType);
+ varyingHandler->addVarying("DashParam", &dashParams);
+ vertBuilder->codeAppendf("%s = %s;", dashParams.vsOut(), dce.fInDashParams.name());
+
+ // x refers to circle radius - 0.5, y refers to cicle's center x coord
+ GrGLSLVarying circleParams(kHalf2_GrSLType);
+ varyingHandler->addVarying("CircleParams", &circleParams);
+ vertBuilder->codeAppendf("%s = %s;", circleParams.vsOut(), dce.fInCircleParams.name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, dce.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ dce.fInPosition.asShaderVar(),
+ dce.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // transforms all points so that we can compare them to our test circle
+ fragBuilder->codeAppendf("half xShifted = half(%s.x - floor(%s.x / %s.z) * %s.z);",
+ dashParams.fsIn(), dashParams.fsIn(), dashParams.fsIn(),
+ dashParams.fsIn());
+ fragBuilder->codeAppendf("half2 fragPosShifted = half2(xShifted, half(%s.y));",
+ dashParams.fsIn());
+ fragBuilder->codeAppendf("half2 center = half2(%s.y, 0.0);", circleParams.fsIn());
+ fragBuilder->codeAppend("half dist = length(center - fragPosShifted);");
+ if (dce.aaMode() != AAMode::kNone) {
+ fragBuilder->codeAppendf("half diff = dist - %s.x;", circleParams.fsIn());
+ fragBuilder->codeAppend("diff = 1.0 - diff;");
+ fragBuilder->codeAppend("half alpha = saturate(diff);");
+ } else {
+ fragBuilder->codeAppendf("half alpha = 1.0;");
+ fragBuilder->codeAppendf("alpha *= dist < %s.x + 0.5 ? 1.0 : 0.0;", circleParams.fsIn());
+ }
+ fragBuilder->codeAppendf("%s = half4(alpha);", args.fOutputCoverage);
+}
+
+void GLDashingCircleEffect::setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) {
+ const DashingCircleEffect& dce = processor.cast<DashingCircleEffect>();
+ if (dce.color() != fColor) {
+ pdman.set4fv(fColorUniform, 1, dce.color().vec());
+ fColor = dce.color();
+ }
+ this->setTransformDataHelper(dce.localMatrix(), pdman, &transformIter);
+}
+
+void GLDashingCircleEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DashingCircleEffect& dce = gp.cast<DashingCircleEffect>();
+ uint32_t key = 0;
+ key |= dce.usesLocalCoords() && dce.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= static_cast<uint32_t>(dce.aaMode()) << 1;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGeometryProcessor> DashingCircleEffect::Make(const SkPMColor4f& color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new DashingCircleEffect(color, aaMode, localMatrix, usesLocalCoords));
+}
+
+void DashingCircleEffect::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLDashingCircleEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* DashingCircleEffect::createGLSLInstance(const GrShaderCaps&) const {
+ return new GLDashingCircleEffect();
+}
+
+DashingCircleEffect::DashingCircleEffect(const SkPMColor4f& color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : INHERITED(kDashingCircleEffect_ClassID)
+ , fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fAAMode(aaMode) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInDashParams = {"inDashParams", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ fInCircleParams = {"inCircleParams", kFloat2_GrVertexAttribType, kHalf2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+}
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DashingCircleEffect);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> DashingCircleEffect::TestCreate(GrProcessorTestData* d) {
+ AAMode aaMode = static_cast<AAMode>(d->fRandom->nextULessThan(GrDashOp::kAAModeCnt));
+ return DashingCircleEffect::Make(SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ aaMode, GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingLineEffect;
+
+/*
+ * This effect will draw a dashed line. The width of the dash is given by the strokeWidth and the
+ * length and spacing by the DashInfo. Both of the previous two parameters are in device space.
+ * This effect also requires the setting of a float2 vertex attribute for the the four corners of the
+ * bounding rect. This attribute is the "dash position" of each vertex. In other words it is the
+ * vertex coords (in device space) if we transform the line to be horizontal, with the start of
+ * line at the origin then shifted to the right by half the off interval. The line then goes in the
+ * positive x direction.
+ */
+class DashingLineEffect : public GrGeometryProcessor {
+public:
+ typedef SkPathEffect::DashInfo DashInfo;
+
+ static sk_sp<GrGeometryProcessor> Make(const SkPMColor4f&,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ const char* name() const override { return "DashingEffect"; }
+
+ AAMode aaMode() const { return fAAMode; }
+
+ const SkPMColor4f& color() const { return fColor; }
+
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ DashingLineEffect(const SkPMColor4f&, AAMode aaMode, const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ SkPMColor4f fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ AAMode fAAMode;
+
+ Attribute fInPosition;
+ Attribute fInDashParams;
+ Attribute fInRect;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ friend class GLDashingLineEffect;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingLineEffect : public GrGLSLGeometryProcessor {
+public:
+ GLDashingLineEffect();
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& iter) override;
+
+private:
+ SkPMColor4f fColor;
+ UniformHandle fColorUniform;
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GLDashingLineEffect::GLDashingLineEffect() : fColor(SK_PMColor4fILLEGAL) {}
+
+void GLDashingLineEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const DashingLineEffect& de = args.fGP.cast<DashingLineEffect>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(de);
+
+ // XY refers to dashPos, Z is the dash interval length
+ GrGLSLVarying inDashParams(kFloat3_GrSLType);
+ varyingHandler->addVarying("DashParams", &inDashParams);
+ vertBuilder->codeAppendf("%s = %s;", inDashParams.vsOut(), de.fInDashParams.name());
+
+ // The rect uniform's xyzw refer to (left + 0.5, top + 0.5, right - 0.5, bottom - 0.5),
+ // respectively.
+ GrGLSLVarying inRectParams(kFloat4_GrSLType);
+ varyingHandler->addVarying("RectParams", &inRectParams);
+ vertBuilder->codeAppendf("%s = %s;", inRectParams.vsOut(), de.fInRect.name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, de.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ de.fInPosition.asShaderVar(),
+ de.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // transforms all points so that we can compare them to our test rect
+ fragBuilder->codeAppendf("half xShifted = half(%s.x - floor(%s.x / %s.z) * %s.z);",
+ inDashParams.fsIn(), inDashParams.fsIn(), inDashParams.fsIn(),
+ inDashParams.fsIn());
+ fragBuilder->codeAppendf("half2 fragPosShifted = half2(xShifted, half(%s.y));",
+ inDashParams.fsIn());
+ if (de.aaMode() == AAMode::kCoverage) {
+ // The amount of coverage removed in x and y by the edges is computed as a pair of negative
+ // numbers, xSub and ySub.
+ fragBuilder->codeAppend("half xSub, ySub;");
+ fragBuilder->codeAppendf("xSub = half(min(fragPosShifted.x - %s.x, 0.0));",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("xSub += half(min(%s.z - fragPosShifted.x, 0.0));",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("ySub = half(min(fragPosShifted.y - %s.y, 0.0));",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("ySub += half(min(%s.w - fragPosShifted.y, 0.0));",
+ inRectParams.fsIn());
+ // Now compute coverage in x and y and multiply them to get the fraction of the pixel
+ // covered.
+ fragBuilder->codeAppendf(
+ "half alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));");
+ } else if (de.aaMode() == AAMode::kCoverageWithMSAA) {
+ // For MSAA, we don't modulate the alpha by the Y distance, since MSAA coverage will handle
+ // AA on the the top and bottom edges. The shader is only responsible for intra-dash alpha.
+ fragBuilder->codeAppend("half xSub;");
+ fragBuilder->codeAppendf("xSub = half(min(fragPosShifted.x - %s.x, 0.0));",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("xSub += half(min(%s.z - fragPosShifted.x, 0.0));",
+ inRectParams.fsIn());
+ // Now compute coverage in x to get the fraction of the pixel covered.
+ fragBuilder->codeAppendf("half alpha = (1.0 + max(xSub, -1.0));");
+ } else {
+ // Assuming the bounding geometry is tight so no need to check y values
+ fragBuilder->codeAppendf("half alpha = 1.0;");
+ fragBuilder->codeAppendf("alpha *= (fragPosShifted.x - %s.x) > -0.5 ? 1.0 : 0.0;",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("alpha *= (%s.z - fragPosShifted.x) >= -0.5 ? 1.0 : 0.0;",
+ inRectParams.fsIn());
+ }
+ fragBuilder->codeAppendf("%s = half4(alpha);", args.fOutputCoverage);
+}
+
+void GLDashingLineEffect::setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) {
+ const DashingLineEffect& de = processor.cast<DashingLineEffect>();
+ if (de.color() != fColor) {
+ pdman.set4fv(fColorUniform, 1, de.color().vec());
+ fColor = de.color();
+ }
+ this->setTransformDataHelper(de.localMatrix(), pdman, &transformIter);
+}
+
+void GLDashingLineEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DashingLineEffect& de = gp.cast<DashingLineEffect>();
+ uint32_t key = 0;
+ key |= de.usesLocalCoords() && de.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= static_cast<int>(de.aaMode()) << 8;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGeometryProcessor> DashingLineEffect::Make(const SkPMColor4f& color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new DashingLineEffect(color, aaMode, localMatrix, usesLocalCoords));
+}
+
+void DashingLineEffect::getGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLDashingLineEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* DashingLineEffect::createGLSLInstance(const GrShaderCaps&) const {
+ return new GLDashingLineEffect();
+}
+
+DashingLineEffect::DashingLineEffect(const SkPMColor4f& color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : INHERITED(kDashingLineEffect_ClassID)
+ , fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fAAMode(aaMode) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInDashParams = {"inDashParams", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ fInRect = {"inRect", kFloat4_GrVertexAttribType, kHalf4_GrSLType};
+ this->setVertexAttributes(&fInPosition, 3);
+}
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DashingLineEffect);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> DashingLineEffect::TestCreate(GrProcessorTestData* d) {
+ AAMode aaMode = static_cast<AAMode>(d->fRandom->nextULessThan(GrDashOp::kAAModeCnt));
+ return DashingLineEffect::Make(SkPMColor4f::FromBytes_RGBA(GrRandomColor(d->fRandom)),
+ aaMode, GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+}
+
+#endif
+//////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<GrGeometryProcessor> make_dash_gp(const SkPMColor4f& color,
+ AAMode aaMode,
+ DashCap cap,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords) {
+ SkMatrix invert;
+ if (usesLocalCoords && !viewMatrix.invert(&invert)) {
+ SkDebugf("Failed to invert\n");
+ return nullptr;
+ }
+
+ switch (cap) {
+ case kRound_DashCap:
+ return DashingCircleEffect::Make(color, aaMode, invert, usesLocalCoords);
+ case kNonRound_DashCap:
+ return DashingLineEffect::Make(color, aaMode, invert, usesLocalCoords);
+ }
+ return nullptr;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(DashOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
+ AAMode aaMode;
+ do {
+ aaMode = static_cast<AAMode>(random->nextULessThan(GrDashOp::kAAModeCnt));
+ } while (AAMode::kCoverageWithMSAA == aaMode && numSamples <= 1);
+
+ // We can only dash either horizontal or vertical lines
+ SkPoint pts[2];
+ if (random->nextBool()) {
+ // vertical
+ pts[0].fX = 1.f;
+ pts[0].fY = random->nextF() * 10.f;
+ pts[1].fX = 1.f;
+ pts[1].fY = random->nextF() * 10.f;
+ } else {
+ // horizontal
+ pts[0].fX = random->nextF() * 10.f;
+ pts[0].fY = 1.f;
+ pts[1].fX = random->nextF() * 10.f;
+ pts[1].fY = 1.f;
+ }
+
+ // pick random cap
+ SkPaint::Cap cap = SkPaint::Cap(random->nextULessThan(SkPaint::kCapCount));
+
+ SkScalar intervals[2];
+
+ // We can only dash with the following intervals
+ enum Intervals {
+ kOpenOpen_Intervals ,
+ kOpenClose_Intervals,
+ kCloseOpen_Intervals,
+ };
+
+ Intervals intervalType = SkPaint::kRound_Cap == cap ?
+ kOpenClose_Intervals :
+ Intervals(random->nextULessThan(kCloseOpen_Intervals + 1));
+ static const SkScalar kIntervalMin = 0.1f;
+ static const SkScalar kIntervalMinCircles = 1.f; // Must be >= to stroke width
+ static const SkScalar kIntervalMax = 10.f;
+ switch (intervalType) {
+ case kOpenOpen_Intervals:
+ intervals[0] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ intervals[1] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ break;
+ case kOpenClose_Intervals: {
+ intervals[0] = 0.f;
+ SkScalar min = SkPaint::kRound_Cap == cap ? kIntervalMinCircles : kIntervalMin;
+ intervals[1] = random->nextRangeScalar(min, kIntervalMax);
+ break;
+ }
+ case kCloseOpen_Intervals:
+ intervals[0] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ intervals[1] = 0.f;
+ break;
+
+ }
+
+ // phase is 0 < sum (i0, i1)
+ SkScalar phase = random->nextRangeScalar(0, intervals[0] + intervals[1]);
+
+ SkPaint p;
+ p.setStyle(SkPaint::kStroke_Style);
+ p.setStrokeWidth(SkIntToScalar(1));
+ p.setStrokeCap(cap);
+ p.setPathEffect(GrTest::TestDashPathEffect::Make(intervals, 2, phase));
+
+ GrStyle style(p);
+
+ return GrDashOp::MakeDashLineOp(context, std::move(paint), viewMatrix, pts, aaMode, style,
+ GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDashOp.h b/gfx/skia/skia/src/gpu/ops/GrDashOp.h
new file mode 100644
index 0000000000..db16a713fa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDashOp.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDashOp_DEFINED
+#define GrDashOp_DEFINED
+
+#include "include/core/SkPathEffect.h"
+#include "include/gpu/GrTypes.h"
+
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+class GrStyle;
+struct GrUserStencilSettings;
+
+namespace GrDashOp {
+enum class AAMode {
+ kNone,
+ kCoverage,
+ kCoverageWithMSAA,
+};
+static const int kAAModeCnt = static_cast<int>(AAMode::kCoverageWithMSAA) + 1;
+
+std::unique_ptr<GrDrawOp> MakeDashLineOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ const SkPoint pts[2],
+ AAMode,
+ const GrStyle& style,
+ const GrUserStencilSettings*);
+bool CanDrawDashLine(const SkPoint pts[2], const GrStyle& style, const SkMatrix& viewMatrix);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.cpp
new file mode 100644
index 0000000000..8c255cd45c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrDebugMarkerOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+std::unique_ptr<GrOp> GrDebugMarkerOp::Make(GrRecordingContext* context,
+ GrRenderTargetProxy* proxy,
+ const SkString& str) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrDebugMarkerOp>(proxy, str);
+}
+
+void GrDebugMarkerOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ //SkDebugf("%s\n", fStr.c_str());
+ if (state->caps().gpuTracingSupport()) {
+ state->opsRenderPass()->insertEventMarker(fStr.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.h b/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.h
new file mode 100644
index 0000000000..fd7eb0cbdd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDebugMarkerOp.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDebugMarkerOp_DEFINED
+#define GrDebugMarkerOp_DEFINED
+
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrOpFlushState;
+class GrRecordingContext;
+
+class GrDebugMarkerOp final : public GrOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrOp> Make(GrRecordingContext*,
+ GrRenderTargetProxy*,
+ const SkString&);
+
+ const char* name() const override { return "DebugMarker"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+#endif
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ GrDebugMarkerOp(GrRenderTargetProxy* proxy, const SkString& str)
+ : INHERITED(ClassID())
+ , fStr(str) {
+ // Make this cover the whole screen so it can't be reordered around
+ this->makeFullScreen(proxy);
+ }
+
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ SkString fStr;
+
+ typedef GrOp INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.cpp
new file mode 100644
index 0000000000..535b3eee42
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.cpp
@@ -0,0 +1,714 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrDefaultPathRenderer.h"
+
+#include "include/core/SkString.h"
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrSurfaceContextPriv.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+GrDefaultPathRenderer::GrDefaultPathRenderer() {
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers for drawPath
+
+#define STENCIL_OFF 0 // Always disable stencil (even when needed)
+
+static inline bool single_pass_shape(const GrShape& shape) {
+#if STENCIL_OFF
+ return true;
+#else
+ // Inverse fill is always two pass.
+ if (shape.inverseFilled()) {
+ return false;
+ }
+ // This path renderer only accepts simple fill paths or stroke paths that are either hairline
+ // or have a stroke width small enough to treat as hairline. Hairline paths are always single
+ // pass. Filled paths are single pass if they're convex.
+ if (shape.style().isSimpleFill()) {
+ return shape.knownToBeConvex();
+ }
+ return true;
+#endif
+}
+
+GrPathRenderer::StencilSupport
+GrDefaultPathRenderer::onGetStencilSupport(const GrShape& shape) const {
+ if (single_pass_shape(shape)) {
+ return GrPathRenderer::kNoRestriction_StencilSupport;
+ } else {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+}
+
+namespace {
+
+class PathGeoBuilder {
+public:
+ PathGeoBuilder(GrPrimitiveType primitiveType, GrMeshDrawOp::Target* target,
+ sk_sp<const GrGeometryProcessor> geometryProcessor)
+ : fPrimitiveType(primitiveType)
+ , fTarget(target)
+ , fVertexStride(sizeof(SkPoint))
+ , fGeometryProcessor(std::move(geometryProcessor))
+ , fFirstIndex(0)
+ , fIndicesInChunk(0)
+ , fIndices(nullptr) {
+ this->allocNewBuffers();
+ }
+
+ ~PathGeoBuilder() {
+ this->emitMeshAndPutBackReserve();
+ }
+
+ /**
+ * Path verbs
+ */
+ void moveTo(const SkPoint& p) {
+ needSpace(1);
+
+ fSubpathIndexStart = this->currentIndex();
+ *(fCurVert++) = p;
+ }
+
+ void addLine(const SkPoint& p) {
+ needSpace(1, this->indexScale());
+
+ if (this->isIndexed()) {
+ uint16_t prevIdx = this->currentIndex() - 1;
+ appendCountourEdgeIndices(prevIdx);
+ }
+ *(fCurVert++) = p;
+ }
+
+ void addQuad(const SkPoint pts[], SkScalar srcSpaceTolSqd, SkScalar srcSpaceTol) {
+ this->needSpace(GrPathUtils::kMaxPointsPerCurve,
+ GrPathUtils::kMaxPointsPerCurve * this->indexScale());
+
+ // First pt of quad is the pt we ended on in previous step
+ uint16_t firstQPtIdx = this->currentIndex() - 1;
+ uint16_t numPts = (uint16_t)GrPathUtils::generateQuadraticPoints(
+ pts[0], pts[1], pts[2], srcSpaceTolSqd, &fCurVert,
+ GrPathUtils::quadraticPointCount(pts, srcSpaceTol));
+ if (this->isIndexed()) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ appendCountourEdgeIndices(firstQPtIdx + i);
+ }
+ }
+ }
+
+ void addConic(SkScalar weight, const SkPoint pts[], SkScalar srcSpaceTolSqd,
+ SkScalar srcSpaceTol) {
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, srcSpaceTol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ this->addQuad(quadPts + i * 2, srcSpaceTolSqd, srcSpaceTol);
+ }
+ }
+
+ void addCubic(const SkPoint pts[], SkScalar srcSpaceTolSqd, SkScalar srcSpaceTol) {
+ this->needSpace(GrPathUtils::kMaxPointsPerCurve,
+ GrPathUtils::kMaxPointsPerCurve * this->indexScale());
+
+ // First pt of cubic is the pt we ended on in previous step
+ uint16_t firstCPtIdx = this->currentIndex() - 1;
+ uint16_t numPts = (uint16_t) GrPathUtils::generateCubicPoints(
+ pts[0], pts[1], pts[2], pts[3], srcSpaceTolSqd, &fCurVert,
+ GrPathUtils::cubicPointCount(pts, srcSpaceTol));
+ if (this->isIndexed()) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ appendCountourEdgeIndices(firstCPtIdx + i);
+ }
+ }
+ }
+
+ void addPath(const SkPath& path, SkScalar srcSpaceTol) {
+ SkScalar srcSpaceTolSqd = srcSpaceTol * srcSpaceTol;
+
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+
+ bool done = false;
+ while (!done) {
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ this->moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ this->addLine(pts[1]);
+ break;
+ case SkPath::kConic_Verb:
+ this->addConic(iter.conicWeight(), pts, srcSpaceTolSqd, srcSpaceTol);
+ break;
+ case SkPath::kQuad_Verb:
+ this->addQuad(pts, srcSpaceTolSqd, srcSpaceTol);
+ break;
+ case SkPath::kCubic_Verb:
+ this->addCubic(pts, srcSpaceTolSqd, srcSpaceTol);
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ }
+ }
+ }
+
+ static bool PathHasMultipleSubpaths(const SkPath& path) {
+ bool first = true;
+
+ SkPath::Iter iter(path, false);
+ SkPath::Verb verb;
+
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (SkPath::kMove_Verb == verb && !first) {
+ return true;
+ }
+ first = false;
+ }
+ return false;
+ }
+
+private:
+ /**
+ * Derived properties
+ * TODO: Cache some of these for better performance, rather than re-computing?
+ */
+ bool isIndexed() const {
+ return GrPrimitiveType::kLines == fPrimitiveType ||
+ GrPrimitiveType::kTriangles == fPrimitiveType;
+ }
+ bool isHairline() const {
+ return GrPrimitiveType::kLines == fPrimitiveType ||
+ GrPrimitiveType::kLineStrip == fPrimitiveType;
+ }
+ int indexScale() const {
+ switch (fPrimitiveType) {
+ case GrPrimitiveType::kLines:
+ return 2;
+ case GrPrimitiveType::kTriangles:
+ return 3;
+ default:
+ return 0;
+ }
+ }
+
+ uint16_t currentIndex() const { return fCurVert - fVertices; }
+
+ // Allocate vertex and (possibly) index buffers
+ void allocNewBuffers() {
+ // Ensure that we always get enough verts for a worst-case quad/cubic, plus leftover points
+ // from previous mesh piece (up to two verts to continue fanning). If we can't get that
+ // many, ask for a much larger number. This needs to be fairly big to handle quads/cubics,
+ // which have a worst-case of 1k points.
+ static const int kMinVerticesPerChunk = GrPathUtils::kMaxPointsPerCurve + 2;
+ static const int kFallbackVerticesPerChunk = 16384;
+
+ fVertices = static_cast<SkPoint*>(fTarget->makeVertexSpaceAtLeast(fVertexStride,
+ kMinVerticesPerChunk,
+ kFallbackVerticesPerChunk,
+ &fVertexBuffer,
+ &fFirstVertex,
+ &fVerticesInChunk));
+
+ if (this->isIndexed()) {
+ // Similar to above: Ensure we get enough indices for one worst-case quad/cubic.
+ // No extra indices are needed for stitching, though. If we can't get that many, ask
+ // for enough to match our large vertex request.
+ const int kMinIndicesPerChunk = GrPathUtils::kMaxPointsPerCurve * this->indexScale();
+ const int kFallbackIndicesPerChunk = kFallbackVerticesPerChunk * this->indexScale();
+
+ fIndices = fTarget->makeIndexSpaceAtLeast(kMinIndicesPerChunk, kFallbackIndicesPerChunk,
+ &fIndexBuffer, &fFirstIndex,
+ &fIndicesInChunk);
+ }
+
+ fCurVert = fVertices;
+ fCurIdx = fIndices;
+ fSubpathIndexStart = 0;
+ }
+
+ void appendCountourEdgeIndices(uint16_t edgeV0Idx) {
+ // When drawing lines we're appending line segments along the countour. When applying the
+ // other fill rules we're drawing triangle fans around the start of the current (sub)path.
+ if (!this->isHairline()) {
+ *(fCurIdx++) = fSubpathIndexStart;
+ }
+ *(fCurIdx++) = edgeV0Idx;
+ *(fCurIdx++) = edgeV0Idx + 1;
+ }
+
+ // Emits a single draw with all accumulated vertex/index data
+ void emitMeshAndPutBackReserve() {
+ int vertexCount = fCurVert - fVertices;
+ int indexCount = fCurIdx - fIndices;
+ SkASSERT(vertexCount <= fVerticesInChunk);
+ SkASSERT(indexCount <= fIndicesInChunk);
+
+ if (this->isIndexed() ? SkToBool(indexCount) : SkToBool(vertexCount)) {
+ GrMesh* mesh = fTarget->allocMesh(fPrimitiveType);
+ if (!this->isIndexed()) {
+ mesh->setNonIndexedNonInstanced(vertexCount);
+ } else {
+ mesh->setIndexed(std::move(fIndexBuffer), indexCount, fFirstIndex, 0,
+ vertexCount - 1, GrPrimitiveRestart::kNo);
+ }
+ mesh->setVertexData(std::move(fVertexBuffer), fFirstVertex);
+ fTarget->recordDraw(fGeometryProcessor, mesh);
+ }
+
+ fTarget->putBackIndices((size_t)(fIndicesInChunk - indexCount));
+ fTarget->putBackVertices((size_t)(fVerticesInChunk - vertexCount), fVertexStride);
+ }
+
+ void needSpace(int vertsNeeded, int indicesNeeded = 0) {
+ if (fCurVert + vertsNeeded > fVertices + fVerticesInChunk ||
+ fCurIdx + indicesNeeded > fIndices + fIndicesInChunk) {
+ // We are about to run out of space (possibly)
+
+ // To maintain continuity, we need to remember one or two points from the current mesh.
+ // Lines only need the last point, fills need the first point from the current contour.
+ // We always grab both here, and append the ones we need at the end of this process.
+ SkPoint lastPt = *(fCurVert - 1);
+ SkASSERT(fSubpathIndexStart < fVerticesInChunk);
+ SkPoint subpathStartPt = fVertices[fSubpathIndexStart];
+
+ // Draw the mesh we've accumulated, and put back any unused space
+ this->emitMeshAndPutBackReserve();
+
+ // Get new buffers
+ this->allocNewBuffers();
+
+ // Append copies of the points we saved so the two meshes will weld properly
+ if (!this->isHairline()) {
+ *(fCurVert++) = subpathStartPt;
+ }
+ *(fCurVert++) = lastPt;
+ }
+ }
+
+ GrPrimitiveType fPrimitiveType;
+ GrMeshDrawOp::Target* fTarget;
+ size_t fVertexStride;
+ sk_sp<const GrGeometryProcessor> fGeometryProcessor;
+
+ sk_sp<const GrBuffer> fVertexBuffer;
+ int fFirstVertex;
+ int fVerticesInChunk;
+ SkPoint* fVertices;
+ SkPoint* fCurVert;
+
+ sk_sp<const GrBuffer> fIndexBuffer;
+ int fFirstIndex;
+ int fIndicesInChunk;
+ uint16_t* fIndices;
+ uint16_t* fCurIdx;
+ uint16_t fSubpathIndexStart;
+};
+
+class DefaultPathOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkPath& path,
+ SkScalar tolerance,
+ uint8_t coverage,
+ const SkMatrix& viewMatrix,
+ bool isHairline,
+ GrAAType aaType,
+ const SkRect& devBounds,
+ const GrUserStencilSettings* stencilSettings) {
+ return Helper::FactoryHelper<DefaultPathOp>(context, std::move(paint), path, tolerance,
+ coverage, viewMatrix, isHairline, aaType,
+ devBounds, stencilSettings);
+ }
+
+ const char* name() const override { return "DefaultPathOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Color: 0x%08x Count: %d\n", fColor.toBytes_RGBA(), fPaths.count());
+ for (const auto& path : fPaths) {
+ string.appendf("Tolerance: %.2f\n", path.fTolerance);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ DefaultPathOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color, const SkPath& path,
+ SkScalar tolerance, uint8_t coverage, const SkMatrix& viewMatrix, bool isHairline,
+ GrAAType aaType, const SkRect& devBounds,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, aaType, stencilSettings)
+ , fColor(color)
+ , fCoverage(coverage)
+ , fViewMatrix(viewMatrix)
+ , fIsHairline(isHairline) {
+ fPaths.emplace_back(PathData{path, tolerance});
+
+ HasAABloat aaBloat = (aaType == GrAAType::kNone) ? HasAABloat ::kNo : HasAABloat::kYes;
+ this->setBounds(devBounds, aaBloat,
+ isHairline ? IsHairline::kYes : IsHairline::kNo);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ GrProcessorAnalysisCoverage gpCoverage =
+ this->coverage() == 0xFF ? GrProcessorAnalysisCoverage::kNone
+ : GrProcessorAnalysisCoverage::kSingleChannel;
+ // This Op uses uniform (not vertex) color, so doesn't need to track wide color.
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, gpCoverage, &fColor, nullptr);
+ }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(this->color());
+ Coverage coverage(this->coverage());
+ LocalCoords localCoords(fHelper.usesLocalCoords() ? LocalCoords::kUsePosition_Type
+ : LocalCoords::kUnused_Type);
+ gp = GrDefaultGeoProcFactory::Make(target->caps().shaderCaps(),
+ color,
+ coverage,
+ localCoords,
+ this->viewMatrix());
+ }
+
+ SkASSERT(gp->vertexStride() == sizeof(SkPoint));
+
+ int instanceCount = fPaths.count();
+
+ // We avoid indices when we have a single hairline contour.
+ bool isIndexed = !this->isHairline() || instanceCount > 1 ||
+ PathGeoBuilder::PathHasMultipleSubpaths(fPaths[0].fPath);
+
+ // determine primitiveType
+ GrPrimitiveType primitiveType;
+ if (this->isHairline()) {
+ primitiveType = isIndexed ? GrPrimitiveType::kLines : GrPrimitiveType::kLineStrip;
+ } else {
+ primitiveType = GrPrimitiveType::kTriangles;
+ }
+ PathGeoBuilder pathGeoBuilder(primitiveType, target, std::move(gp));
+
+ // fill buffers
+ for (int i = 0; i < instanceCount; i++) {
+ const PathData& args = fPaths[i];
+ pathGeoBuilder.addPath(args.fPath, args.fTolerance);
+ }
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ DefaultPathOp* that = t->cast<DefaultPathOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->color() != that->color()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->coverage() != that->coverage()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->isHairline() != that->isHairline()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
+ return CombineResult::kMerged;
+ }
+
+ const SkPMColor4f& color() const { return fColor; }
+ uint8_t coverage() const { return fCoverage; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool isHairline() const { return fIsHairline; }
+
+ struct PathData {
+ SkPath fPath;
+ SkScalar fTolerance;
+ };
+
+ SkSTArray<1, PathData, true> fPaths;
+ Helper fHelper;
+ SkPMColor4f fColor;
+ uint8_t fCoverage;
+ SkMatrix fViewMatrix;
+ bool fIsHairline;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+bool GrDefaultPathRenderer::internalDrawPath(GrRenderTargetContext* renderTargetContext,
+ GrPaint&& paint,
+ GrAAType aaType,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape,
+ bool stencilOnly) {
+ auto context = renderTargetContext->surfPriv().getContext();
+
+ SkASSERT(GrAAType::kCoverage != aaType);
+ SkPath path;
+ shape.asPath(&path);
+
+ SkScalar hairlineCoverage;
+ uint8_t newCoverage = 0xff;
+ bool isHairline = false;
+ if (IsStrokeHairlineOrEquivalent(shape.style(), viewMatrix, &hairlineCoverage)) {
+ newCoverage = SkScalarRoundToInt(hairlineCoverage * 0xff);
+ isHairline = true;
+ } else {
+ SkASSERT(shape.style().isSimpleFill());
+ }
+
+ int passCount = 0;
+ const GrUserStencilSettings* passes[2];
+ bool reverse = false;
+ bool lastPassIsBounds;
+
+ if (isHairline) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = &userStencilSettings;
+ }
+ lastPassIsBounds = false;
+ } else {
+ if (single_pass_shape(shape)) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = &userStencilSettings;
+ }
+ lastPassIsBounds = false;
+ } else {
+ switch (path.getFillType()) {
+ case SkPath::kInverseEvenOdd_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kEvenOdd_FillType:
+ passes[0] = &gEOStencilPass;
+ if (stencilOnly) {
+ passCount = 1;
+ lastPassIsBounds = false;
+ } else {
+ passCount = 2;
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[1] = &gInvEOColorPass;
+ } else {
+ passes[1] = &gEOColorPass;
+ }
+ }
+ break;
+
+ case SkPath::kInverseWinding_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kWinding_FillType:
+ passes[0] = &gWindStencilPass;
+ passCount = 2;
+ if (stencilOnly) {
+ lastPassIsBounds = false;
+ --passCount;
+ } else {
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[passCount-1] = &gInvWindColorPass;
+ } else {
+ passes[passCount-1] = &gWindColorPass;
+ }
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unknown path fFill!");
+ return false;
+ }
+ }
+ }
+
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ SkScalar srcSpaceTol = GrPathUtils::scaleToleranceToSrc(tol, viewMatrix, path.getBounds());
+
+ SkRect devBounds;
+ GetPathDevBounds(path,
+ renderTargetContext->asRenderTargetProxy()->worstCaseWidth(),
+ renderTargetContext->asRenderTargetProxy()->worstCaseHeight(),
+ viewMatrix, &devBounds);
+
+ for (int p = 0; p < passCount; ++p) {
+ if (lastPassIsBounds && (p == passCount-1)) {
+ SkRect bounds;
+ SkMatrix localMatrix = SkMatrix::I();
+ if (reverse) {
+ // draw over the dev bounds (which will be the whole dst surface for inv fill).
+ bounds = devBounds;
+ SkMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective() && viewMatrix.invert(&vmi)) {
+ vmi.mapRect(&bounds);
+ } else {
+ if (!viewMatrix.invert(&localMatrix)) {
+ return false;
+ }
+ }
+ } else {
+ bounds = path.getBounds();
+ }
+ const SkMatrix& viewM = (reverse && viewMatrix.hasPerspective()) ? SkMatrix::I() :
+ viewMatrix;
+ // This is a non-coverage aa rect op since we assert aaType != kCoverage at the start
+ assert_alive(paint);
+ renderTargetContext->priv().stencilRect(clip, passes[p], std::move(paint),
+ GrAA(aaType == GrAAType::kMSAA), viewM, bounds, &localMatrix);
+ } else {
+ bool stencilPass = stencilOnly || passCount > 1;
+ std::unique_ptr<GrDrawOp> op;
+ if (stencilPass) {
+ GrPaint stencilPaint;
+ stencilPaint.setXPFactory(GrDisableColorXPFactory::Get());
+ op = DefaultPathOp::Make(context, std::move(stencilPaint), path, srcSpaceTol,
+ newCoverage, viewMatrix, isHairline, aaType, devBounds,
+ passes[p]);
+ } else {
+ assert_alive(paint);
+ op = DefaultPathOp::Make(context, std::move(paint), path, srcSpaceTol, newCoverage,
+ viewMatrix, isHairline, aaType, devBounds, passes[p]);
+ }
+ renderTargetContext->addDrawOp(clip, std::move(op));
+ }
+ }
+ return true;
+}
+
+GrPathRenderer::CanDrawPath
+GrDefaultPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ bool isHairline = IsStrokeHairlineOrEquivalent(
+ args.fShape->style(), *args.fViewMatrix, nullptr);
+ // If we aren't a single_pass_shape or hairline, we require stencil buffers.
+ if (!(single_pass_shape(*args.fShape) || isHairline) &&
+ (args.fCaps->avoidStencilBuffers() || args.fTargetIsWrappedVkSecondaryCB)) {
+ return CanDrawPath::kNo;
+ }
+ // If antialiasing is required, we only support MSAA.
+ if (GrAAType::kNone != args.fAAType && GrAAType::kMSAA != args.fAAType) {
+ return CanDrawPath::kNo;
+ }
+ // This can draw any path with any simple fill style.
+ if (!args.fShape->style().isSimpleFill() && !isHairline) {
+ return CanDrawPath::kNo;
+ }
+ // This is the fallback renderer for when a path is too complicated for the others to draw.
+ return CanDrawPath::kAsBackup;
+}
+
+bool GrDefaultPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrDefaultPathRenderer::onDrawPath");
+ GrAAType aaType = (GrAAType::kNone != args.fAAType) ? GrAAType::kMSAA : GrAAType::kNone;
+
+ return this->internalDrawPath(
+ args.fRenderTargetContext, std::move(args.fPaint), aaType, *args.fUserStencilSettings,
+ *args.fClip, *args.fViewMatrix, *args.fShape, false);
+}
+
+void GrDefaultPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrDefaultPathRenderer::onStencilPath");
+ SkASSERT(!args.fShape->inverseFilled());
+
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Get());
+
+ auto aaType = (GrAA::kYes == args.fDoStencilMSAA) ? GrAAType::kMSAA : GrAAType::kNone;
+
+ this->internalDrawPath(
+ args.fRenderTargetContext, std::move(paint), aaType, GrUserStencilSettings::kUnused,
+ *args.fClip, *args.fViewMatrix, *args.fShape, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(DefaultPathOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+
+ // For now just hairlines because the other types of draws require two ops.
+ // TODO we should figure out a way to combine the stencil and cover steps into one op.
+ GrStyle style(SkStrokeRec::kHairline_InitStyle);
+ SkPath path = GrTest::TestPath(random);
+
+ // Compute srcSpaceTol
+ SkRect bounds = path.getBounds();
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ SkScalar srcSpaceTol = GrPathUtils::scaleToleranceToSrc(tol, viewMatrix, bounds);
+
+ viewMatrix.mapRect(&bounds);
+ uint8_t coverage = GrRandomCoverage(random);
+ GrAAType aaType = GrAAType::kNone;
+ if (numSamples > 1 && random->nextBool()) {
+ aaType = GrAAType::kMSAA;
+ }
+ return DefaultPathOp::Make(context, std::move(paint), path, srcSpaceTol, coverage, viewMatrix,
+ true, aaType, bounds, GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.h
new file mode 100644
index 0000000000..871a8c64f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDefaultPathRenderer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDefaultPathRenderer_DEFINED
+#define GrDefaultPathRenderer_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/ops/GrPathStencilSettings.h"
+
+/**
+ * Subclass that renders the path using the stencil buffer to resolve fill rules
+ * (e.g. winding, even-odd)
+ */
+class GrDefaultPathRenderer : public GrPathRenderer {
+public:
+ GrDefaultPathRenderer();
+
+private:
+ StencilSupport onGetStencilSupport(const GrShape&) const override;
+
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ bool internalDrawPath(GrRenderTargetContext*,
+ GrPaint&&,
+ GrAAType,
+ const GrUserStencilSettings&,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape&,
+ bool stencilOnly);
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.cpp
new file mode 100644
index 0000000000..417e2b4d17
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.cpp
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrDrawAtlasOp.h"
+
+#include "include/core/SkRSXform.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/utils/SkRandom.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+namespace {
+
+class DrawAtlasOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ DrawAtlasOp(const Helper::MakeArgs&, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, GrAAType, int spriteCount, const SkRSXform* xforms,
+ const SkRect* rects, const SkColor* colors);
+
+ const char* name() const override { return "DrawAtlasOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override;
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override;
+
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override;
+
+private:
+ void onPrepareDraws(Target*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ const SkPMColor4f& color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool hasColors() const { return fHasColors; }
+ int quadCount() const { return fQuadCount; }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps&) override;
+
+ struct Geometry {
+ SkPMColor4f fColor;
+ SkTArray<uint8_t, true> fVerts;
+ };
+
+ SkSTArray<1, Geometry, true> fGeoData;
+ Helper fHelper;
+ SkMatrix fViewMatrix;
+ SkPMColor4f fColor;
+ int fQuadCount;
+ bool fHasColors;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+static sk_sp<GrGeometryProcessor> make_gp(const GrShaderCaps* shaderCaps,
+ bool hasColors,
+ const SkPMColor4f& color,
+ const SkMatrix& viewMatrix) {
+ using namespace GrDefaultGeoProcFactory;
+ Color gpColor(color);
+ if (hasColors) {
+ gpColor.fType = Color::kPremulGrColorAttribute_Type;
+ }
+
+ return GrDefaultGeoProcFactory::Make(shaderCaps, gpColor, Coverage::kSolid_Type,
+ LocalCoords::kHasExplicit_Type, viewMatrix);
+}
+
+DrawAtlasOp::DrawAtlasOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, GrAAType aaType, int spriteCount,
+ const SkRSXform* xforms, const SkRect* rects, const SkColor* colors)
+ : INHERITED(ClassID()), fHelper(helperArgs, aaType), fColor(color) {
+ SkASSERT(xforms);
+ SkASSERT(rects);
+
+ fViewMatrix = viewMatrix;
+ Geometry& installedGeo = fGeoData.push_back();
+ installedGeo.fColor = color;
+
+ // Figure out stride and offsets
+ // Order within the vertex is: position [color] texCoord
+ size_t texOffset = sizeof(SkPoint);
+ size_t vertexStride = 2 * sizeof(SkPoint);
+ fHasColors = SkToBool(colors);
+ if (colors) {
+ texOffset += sizeof(GrColor);
+ vertexStride += sizeof(GrColor);
+ }
+
+ // Compute buffer size and alloc buffer
+ fQuadCount = spriteCount;
+ int allocSize = static_cast<int>(4 * vertexStride * spriteCount);
+ installedGeo.fVerts.reset(allocSize);
+ uint8_t* currVertex = installedGeo.fVerts.begin();
+
+ SkRect bounds = SkRectPriv::MakeLargestInverted();
+ // TODO4F: Preserve float colors
+ int paintAlpha = GrColorUnpackA(installedGeo.fColor.toBytes_RGBA());
+ for (int spriteIndex = 0; spriteIndex < spriteCount; ++spriteIndex) {
+ // Transform rect
+ SkPoint strip[4];
+ const SkRect& currRect = rects[spriteIndex];
+ xforms[spriteIndex].toTriStrip(currRect.width(), currRect.height(), strip);
+
+ // Copy colors if necessary
+ if (colors) {
+ // convert to GrColor
+ SkColor color = colors[spriteIndex];
+ if (paintAlpha != 255) {
+ color = SkColorSetA(color, SkMulDiv255Round(SkColorGetA(color), paintAlpha));
+ }
+ GrColor grColor = SkColorToPremulGrColor(color);
+
+ *(reinterpret_cast<GrColor*>(currVertex + sizeof(SkPoint))) = grColor;
+ *(reinterpret_cast<GrColor*>(currVertex + vertexStride + sizeof(SkPoint))) = grColor;
+ *(reinterpret_cast<GrColor*>(currVertex + 2 * vertexStride + sizeof(SkPoint))) =
+ grColor;
+ *(reinterpret_cast<GrColor*>(currVertex + 3 * vertexStride + sizeof(SkPoint))) =
+ grColor;
+ }
+
+ // Copy position and uv to verts
+ *(reinterpret_cast<SkPoint*>(currVertex)) = strip[0];
+ *(reinterpret_cast<SkPoint*>(currVertex + texOffset)) =
+ SkPoint::Make(currRect.fLeft, currRect.fTop);
+ SkRectPriv::GrowToInclude(&bounds, strip[0]);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = strip[1];
+ *(reinterpret_cast<SkPoint*>(currVertex + texOffset)) =
+ SkPoint::Make(currRect.fLeft, currRect.fBottom);
+ SkRectPriv::GrowToInclude(&bounds, strip[1]);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = strip[2];
+ *(reinterpret_cast<SkPoint*>(currVertex + texOffset)) =
+ SkPoint::Make(currRect.fRight, currRect.fTop);
+ SkRectPriv::GrowToInclude(&bounds, strip[2]);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = strip[3];
+ *(reinterpret_cast<SkPoint*>(currVertex + texOffset)) =
+ SkPoint::Make(currRect.fRight, currRect.fBottom);
+ SkRectPriv::GrowToInclude(&bounds, strip[3]);
+ currVertex += vertexStride;
+ }
+
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsHairline::kNo);
+}
+
+#ifdef SK_DEBUG
+SkString DrawAtlasOp::dumpInfo() const {
+ SkString string;
+ for (const auto& geo : fGeoData) {
+ string.appendf("Color: 0x%08x, Quads: %d\n", geo.fColor.toBytes_RGBA(),
+ geo.fVerts.count() / 4);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+}
+#endif
+
+void DrawAtlasOp::onPrepareDraws(Target* target) {
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(make_gp(target->caps().shaderCaps(),
+ this->hasColors(),
+ this->color(),
+ this->viewMatrix()));
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->vertexStride();
+
+ int numQuads = this->quadCount();
+ QuadHelper helper(target, vertexStride, numQuads);
+ void* verts = helper.vertices();
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ uint8_t* vertPtr = reinterpret_cast<uint8_t*>(verts);
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ size_t allocSize = args.fVerts.count();
+ memcpy(vertPtr, args.fVerts.begin(), allocSize);
+ vertPtr += allocSize;
+ }
+ helper.recordDraw(target, std::move(gp));
+}
+
+void DrawAtlasOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+}
+
+GrOp::CombineResult DrawAtlasOp::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
+ DrawAtlasOp* that = t->cast<DrawAtlasOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // We currently use a uniform viewmatrix for this op.
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->hasColors() != that->hasColors()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!this->hasColors() && this->color() != that->color()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ fQuadCount += that->quadCount();
+
+ return CombineResult::kMerged;
+}
+
+GrDrawOp::FixedFunctionFlags DrawAtlasOp::fixedFunctionFlags() const {
+ return fHelper.fixedFunctionFlags();
+}
+
+GrProcessorSet::Analysis DrawAtlasOp::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ GrProcessorAnalysisColor gpColor;
+ if (this->hasColors()) {
+ gpColor.setToUnknown();
+ } else {
+ gpColor.setToConstant(fColor);
+ }
+ auto result = fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kNone, &gpColor);
+ if (gpColor.isConstant(&fColor)) {
+ fHasColors = false;
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+std::unique_ptr<GrDrawOp> GrDrawAtlasOp::Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ GrAAType aaType,
+ int spriteCount,
+ const SkRSXform* xforms,
+ const SkRect* rects,
+ const SkColor* colors) {
+ return GrSimpleMeshDrawOpHelper::FactoryHelper<DrawAtlasOp>(context, std::move(paint),
+ viewMatrix, aaType,
+ spriteCount, xforms,
+ rects, colors);
+}
+
+#if GR_TEST_UTILS
+
+static SkRSXform random_xform(SkRandom* random) {
+ static const SkScalar kMinExtent = -100.f;
+ static const SkScalar kMaxExtent = 100.f;
+ static const SkScalar kMinScale = 0.1f;
+ static const SkScalar kMaxScale = 100.f;
+ static const SkScalar kMinRotate = -SK_ScalarPI;
+ static const SkScalar kMaxRotate = SK_ScalarPI;
+
+ SkRSXform xform = SkRSXform::MakeFromRadians(random->nextRangeScalar(kMinScale, kMaxScale),
+ random->nextRangeScalar(kMinRotate, kMaxRotate),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent));
+ return xform;
+}
+
+static SkRect random_texRect(SkRandom* random) {
+ static const SkScalar kMinCoord = 0.0f;
+ static const SkScalar kMaxCoord = 1024.f;
+
+ SkRect texRect = SkRect::MakeLTRB(random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord));
+ texRect.sort();
+ return texRect;
+}
+
+static void randomize_params(uint32_t count, SkRandom* random, SkTArray<SkRSXform>* xforms,
+ SkTArray<SkRect>* texRects, SkTArray<GrColor>* colors,
+ bool hasColors) {
+ for (uint32_t v = 0; v < count; v++) {
+ xforms->push_back(random_xform(random));
+ texRects->push_back(random_texRect(random));
+ if (hasColors) {
+ colors->push_back(GrRandomColor(random));
+ }
+ }
+}
+
+GR_DRAW_OP_TEST_DEFINE(DrawAtlasOp) {
+ uint32_t spriteCount = random->nextRangeU(1, 100);
+
+ SkTArray<SkRSXform> xforms(spriteCount);
+ SkTArray<SkRect> texRects(spriteCount);
+ SkTArray<GrColor> colors;
+
+ bool hasColors = random->nextBool();
+
+ randomize_params(spriteCount, random, &xforms, &texRects, &colors, hasColors);
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrAAType aaType = GrAAType::kNone;
+ if (numSamples > 1 && random->nextBool()) {
+ aaType = GrAAType::kMSAA;
+ }
+
+ return GrDrawAtlasOp::Make(context, std::move(paint), viewMatrix, aaType, spriteCount,
+ xforms.begin(), texRects.begin(),
+ hasColors ? colors.begin() : nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.h b/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.h
new file mode 100644
index 0000000000..425dba41b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawAtlasOp.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawAtlasOp_DEFINED
+#define GrDrawAtlasOp_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+class SkMatrix;
+
+namespace GrDrawAtlasOp {
+ std::unique_ptr<GrDrawOp> Make(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ GrAAType,
+ int spriteCount,
+ const SkRSXform* xforms,
+ const SkRect* rects,
+ const SkColor* colors);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawOp.h b/gfx/skia/skia/src/gpu/ops/GrDrawOp.h
new file mode 100644
index 0000000000..a9e97520f5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawOp.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawOp_DEFINED
+#define GrDrawOp_DEFINED
+
+#include <functional>
+#include "src/gpu/GrDeferredUpload.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrAppliedClip;
+
+/**
+ * Base class for GrOps that draw. These ops can draw into an op list's GrRenderTarget.
+ */
+class GrDrawOp : public GrOp {
+public:
+ GrDrawOp(uint32_t classID) : INHERITED(classID) {}
+
+ /**
+ * This information is required to determine how to compute a GrAppliedClip from a GrClip for
+ * this op.
+ */
+ enum class FixedFunctionFlags : uint32_t {
+ kNone = 0x0,
+ /** Indices that the op will enable MSAA or mixed samples rendering. */
+ kUsesHWAA = 0x1,
+ /** Indices that the op reads and/or writes the stencil buffer */
+ kUsesStencil = 0x2,
+ };
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(FixedFunctionFlags);
+ virtual FixedFunctionFlags fixedFunctionFlags() const = 0;
+
+ /**
+ * This is called after the GrAppliedClip has been computed and just prior to recording the op
+ * or combining it with a previously recorded op. The op should convert any proxies or resources
+ * it owns to "pending io" status so that resource allocation can be more optimal. Additionally,
+ * at this time the op must report whether a copy of the destination (or destination texture
+ * itself) needs to be provided to the GrXferProcessor when this op executes.
+ */
+ virtual GrProcessorSet::Analysis finalize(
+ const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType) = 0;
+
+#ifdef SK_DEBUG
+ bool fAddDrawOpCalled = false;
+
+ void validate() const override {
+ SkASSERT(fAddDrawOpCalled);
+ }
+#endif
+
+private:
+ typedef GrOp INHERITED;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrDrawOp::FixedFunctionFlags);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.cpp
new file mode 100644
index 0000000000..526ebc03d5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkTemplates.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/ops/GrDrawPathOp.h"
+
+static constexpr GrUserStencilSettings kCoverPass{
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+};
+
+GrDrawPathOpBase::GrDrawPathOpBase(uint32_t classID, const SkMatrix& viewMatrix, GrPaint&& paint,
+ GrPathRendering::FillType fill, GrAA aa)
+ : INHERITED(classID)
+ , fViewMatrix(viewMatrix)
+ , fInputColor(paint.getColor4f())
+ , fFillType(fill)
+ , fDoAA(GrAA::kYes == aa)
+ , fProcessorSet(std::move(paint)) {}
+
+#ifdef SK_DEBUG
+SkString GrDrawPathOp::dumpInfo() const {
+ SkString string;
+ string.printf("PATH: 0x%p", fPath.get());
+ string.append(INHERITED::dumpInfo());
+ return string;
+}
+#endif
+
+GrPipeline::InitArgs GrDrawPathOpBase::pipelineInitArgs(const GrOpFlushState& state) {
+ GrPipeline::InitArgs args;
+ if (fDoAA) {
+ args.fInputFlags |= GrPipeline::InputFlags::kHWAntialias;
+ }
+ args.fUserStencil = &kCoverPass;
+ args.fCaps = &state.caps();
+ args.fDstProxy = state.drawOpArgs().dstProxy();
+ args.fOutputSwizzle = state.drawOpArgs().outputSwizzle();
+ return args;
+}
+
+const GrProcessorSet::Analysis& GrDrawPathOpBase::doProcessorAnalysis(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ fAnalysis = fProcessorSet.finalize(
+ fInputColor, GrProcessorAnalysisCoverage::kNone, clip, &kCoverPass,
+ hasMixedSampledCoverage, caps, clampType, &fInputColor);
+ return fAnalysis;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void init_stencil_pass_settings(const GrOpFlushState& flushState,
+ GrPathRendering::FillType fillType, GrStencilSettings* stencil) {
+ const GrAppliedClip* appliedClip = flushState.drawOpArgs().appliedClip();
+ bool stencilClip = appliedClip && appliedClip->hasStencilClip();
+ stencil->reset(GrPathRendering::GetStencilPassSettings(fillType), stencilClip,
+ flushState.drawOpArgs().renderTarget()->renderTargetPriv().numStencilBits());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrDrawOp> GrDrawPathOp::Make(GrRecordingContext* context,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint,
+ GrAA aa,
+ sk_sp<const GrPath> path) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrDrawPathOp>(viewMatrix, std::move(paint), aa, std::move(path));
+}
+
+void GrDrawPathOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ GrAppliedClip appliedClip = state->detachAppliedClip();
+ GrPipeline::FixedDynamicState fixedDynamicState(appliedClip.scissorState().rect());
+ GrPipeline pipeline(this->pipelineInitArgs(*state), this->detachProcessors(),
+ std::move(appliedClip));
+ sk_sp<GrPathProcessor> pathProc(GrPathProcessor::Create(this->color(), this->viewMatrix()));
+
+ GrProgramInfo programInfo(state->drawOpArgs().numSamples(),
+ state->drawOpArgs().origin(),
+ pipeline,
+ *pathProc,
+ &fixedDynamicState,
+ nullptr, 0);
+
+ GrStencilSettings stencil;
+ init_stencil_pass_settings(*state, this->fillType(), &stencil);
+ state->gpu()->pathRendering()->drawPath(state->drawOpArgs().renderTarget(),
+ programInfo, stencil, fPath.get());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+inline void pre_translate_transform_values(const float* xforms,
+ GrPathRendering::PathTransformType type, int count,
+ SkScalar x, SkScalar y, float* dst) {
+ if (0 == x && 0 == y) {
+ memcpy(dst, xforms, count * GrPathRendering::PathTransformSize(type) * sizeof(float));
+ return;
+ }
+ switch (type) {
+ case GrPathRendering::kNone_PathTransformType:
+ SK_ABORT("Cannot pre-translate kNone_PathTransformType.");
+ break;
+ case GrPathRendering::kTranslateX_PathTransformType:
+ SkASSERT(0 == y);
+ for (int i = 0; i < count; i++) {
+ dst[i] = xforms[i] + x;
+ }
+ break;
+ case GrPathRendering::kTranslateY_PathTransformType:
+ SkASSERT(0 == x);
+ for (int i = 0; i < count; i++) {
+ dst[i] = xforms[i] + y;
+ }
+ break;
+ case GrPathRendering::kTranslate_PathTransformType:
+ for (int i = 0; i < 2 * count; i += 2) {
+ dst[i] = xforms[i] + x;
+ dst[i + 1] = xforms[i + 1] + y;
+ }
+ break;
+ case GrPathRendering::kAffine_PathTransformType:
+ for (int i = 0; i < 6 * count; i += 6) {
+ dst[i] = xforms[i];
+ dst[i + 1] = xforms[i + 1];
+ dst[i + 2] = xforms[i] * x + xforms[i + 1] * y + xforms[i + 2];
+ dst[i + 3] = xforms[i + 3];
+ dst[i + 4] = xforms[i + 4];
+ dst[i + 5] = xforms[i + 3] * x + xforms[i + 4] * y + xforms[i + 5];
+ }
+ break;
+ default:
+ SK_ABORT("Unknown transform type.");
+ break;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.h b/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.h
new file mode 100644
index 0000000000..f40c2b96b9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawPathOp.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawPathOp_DEFINED
+#define GrDrawPathOp_DEFINED
+
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrPath.h"
+#include "src/gpu/GrPathProcessor.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrProcessorSet.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrPaint;
+class GrRecordingContext;
+
+class GrDrawPathOpBase : public GrDrawOp {
+protected:
+ GrDrawPathOpBase(uint32_t classID, const SkMatrix& viewMatrix, GrPaint&&,
+ GrPathRendering::FillType, GrAA);
+
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ return (fDoAA)
+ ? FixedFunctionFlags::kUsesHWAA | FixedFunctionFlags::kUsesStencil
+ : FixedFunctionFlags::kUsesStencil;
+ }
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return this->doProcessorAnalysis(caps, clip, hasMixedSampledCoverage, clampType);
+ }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fProcessorSet.visitProxies(func);
+ }
+
+protected:
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkPMColor4f& color() const { return fInputColor; }
+ GrPathRendering::FillType fillType() const { return fFillType; }
+ const GrProcessorSet& processors() const { return fProcessorSet; }
+ GrProcessorSet detachProcessors() { return std::move(fProcessorSet); }
+ inline GrPipeline::InitArgs pipelineInitArgs(const GrOpFlushState&);
+ const GrProcessorSet::Analysis& doProcessorAnalysis(
+ const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType);
+ const GrProcessorSet::Analysis& processorAnalysis() const {
+ SkASSERT(fAnalysis.isInitialized());
+ return fAnalysis;
+ }
+
+private:
+ void onPrepare(GrOpFlushState*) final {}
+
+ SkMatrix fViewMatrix;
+ SkPMColor4f fInputColor;
+ GrProcessorSet::Analysis fAnalysis;
+ GrPathRendering::FillType fFillType;
+ bool fDoAA;
+ GrProcessorSet fProcessorSet;
+
+ typedef GrDrawOp INHERITED;
+};
+
+class GrDrawPathOp final : public GrDrawPathOpBase {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(
+ GrRecordingContext*, const SkMatrix& viewMatrix, GrPaint&&, GrAA, sk_sp<const GrPath>);
+
+ const char* name() const override { return "DrawPath"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override;
+#endif
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ GrDrawPathOp(const SkMatrix& viewMatrix, GrPaint&& paint, GrAA aa, sk_sp<const GrPath> path)
+ : GrDrawPathOpBase(
+ ClassID(), viewMatrix, std::move(paint), path->getFillType(), aa)
+ , fPath(std::move(path)) {
+ this->setTransformedBounds(fPath->getBounds(), viewMatrix, HasAABloat::kNo,
+ IsHairline::kNo);
+ }
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ sk_sp<const GrPath> fPath;
+
+ typedef GrDrawPathOpBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.cpp
new file mode 100644
index 0000000000..4b311b5207
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.cpp
@@ -0,0 +1,708 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/ops/GrDrawVerticesOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+namespace {
+
+class DrawVerticesOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ DrawVerticesOp(const Helper::MakeArgs&, const SkPMColor4f&, sk_sp<SkVertices>,
+ const SkVertices::Bone bones[], int boneCount, GrPrimitiveType, GrAAType,
+ sk_sp<GrColorSpaceXform>, const SkMatrix& viewMatrix);
+
+ const char* name() const override { return "DrawVerticesOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override;
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override;
+
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override;
+
+private:
+ enum class ColorArrayType {
+ kPremulGrColor,
+ kSkColor,
+ };
+
+ void onPrepareDraws(Target*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ void drawVolatile(Target*);
+ void drawNonVolatile(Target*);
+
+ void fillBuffers(bool hasColorAttribute,
+ bool hasLocalCoordsAttribute,
+ size_t vertexStride,
+ void* verts,
+ uint16_t* indices) const;
+
+ void drawVertices(Target*,
+ sk_sp<const GrGeometryProcessor>,
+ sk_sp<const GrBuffer> vertexBuffer,
+ int firstVertex,
+ sk_sp<const GrBuffer> indexBuffer,
+ int firstIndex);
+
+ sk_sp<GrGeometryProcessor> makeGP(const GrShaderCaps* shaderCaps,
+ bool* hasColorAttribute,
+ bool* hasLocalCoordAttribute) const;
+
+ GrPrimitiveType primitiveType() const { return fPrimitiveType; }
+ bool combinablePrimitive() const {
+ return GrPrimitiveType::kTriangles == fPrimitiveType ||
+ GrPrimitiveType::kLines == fPrimitiveType ||
+ GrPrimitiveType::kPoints == fPrimitiveType;
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps&) override;
+
+ struct Mesh {
+ SkPMColor4f fColor; // Used if this->hasPerVertexColors() is false.
+ sk_sp<SkVertices> fVertices;
+ SkMatrix fViewMatrix;
+ bool fIgnoreTexCoords;
+ bool fIgnoreColors;
+
+ bool hasExplicitLocalCoords() const {
+ return fVertices->hasTexCoords() && !fIgnoreTexCoords;
+ }
+
+ bool hasPerVertexColors() const {
+ return fVertices->hasColors() && !fIgnoreColors;
+ }
+ };
+
+ bool isIndexed() const {
+ // Consistency enforced in onCombineIfPossible.
+ return fMeshes[0].fVertices->hasIndices();
+ }
+
+ bool requiresPerVertexColors() const {
+ return SkToBool(kRequiresPerVertexColors_Flag & fFlags);
+ }
+
+ bool anyMeshHasExplicitLocalCoords() const {
+ return SkToBool(kAnyMeshHasExplicitLocalCoords_Flag & fFlags);
+ }
+
+ bool hasMultipleViewMatrices() const {
+ return SkToBool(kHasMultipleViewMatrices_Flag & fFlags);
+ }
+
+ enum Flags {
+ kRequiresPerVertexColors_Flag = 0x1,
+ kAnyMeshHasExplicitLocalCoords_Flag = 0x2,
+ kHasMultipleViewMatrices_Flag = 0x4,
+ };
+
+ Helper fHelper;
+ SkSTArray<1, Mesh, true> fMeshes;
+ // GrPrimitiveType is more expressive than fVertices.mode() so it is used instead and we ignore
+ // the SkVertices mode (though fPrimitiveType may have been inferred from it).
+ GrPrimitiveType fPrimitiveType;
+ uint32_t fFlags;
+ int fVertexCount;
+ int fIndexCount;
+ ColorArrayType fColorArrayType;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+DrawVerticesOp::DrawVerticesOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ sk_sp<SkVertices> vertices, const SkVertices::Bone bones[],
+ int boneCount, GrPrimitiveType primitiveType, GrAAType aaType,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& viewMatrix)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, aaType)
+ , fPrimitiveType(primitiveType)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ SkASSERT(vertices);
+
+ fVertexCount = vertices->vertexCount();
+ fIndexCount = vertices->indexCount();
+ fColorArrayType = vertices->hasColors() ? ColorArrayType::kSkColor
+ : ColorArrayType::kPremulGrColor;
+
+ Mesh& mesh = fMeshes.push_back();
+ mesh.fColor = color;
+ mesh.fViewMatrix = viewMatrix;
+ mesh.fVertices = std::move(vertices);
+ mesh.fIgnoreTexCoords = false;
+ mesh.fIgnoreColors = false;
+
+ if (mesh.fVertices->hasBones() && bones) {
+ // Perform the transformations on the CPU instead of the GPU.
+ mesh.fVertices = mesh.fVertices->applyBones(bones, boneCount);
+ } else {
+ SkASSERT(!bones || boneCount == 1);
+ }
+
+ fFlags = 0;
+ if (mesh.hasPerVertexColors()) {
+ fFlags |= kRequiresPerVertexColors_Flag;
+ }
+ if (mesh.hasExplicitLocalCoords()) {
+ fFlags |= kAnyMeshHasExplicitLocalCoords_Flag;
+ }
+
+ // Special case for meshes with a world transform but no bone weights.
+ // These will be considered normal vertices draws without bones.
+ if (!mesh.fVertices->hasBones() && boneCount == 1) {
+ SkMatrix worldTransform;
+ worldTransform.setAffine(bones[0].values);
+ mesh.fViewMatrix.preConcat(worldTransform);
+ }
+
+ IsHairline zeroArea;
+ if (GrIsPrimTypeLines(primitiveType) || GrPrimitiveType::kPoints == primitiveType) {
+ zeroArea = IsHairline::kYes;
+ } else {
+ zeroArea = IsHairline::kNo;
+ }
+
+ this->setTransformedBounds(mesh.fVertices->bounds(),
+ mesh.fViewMatrix,
+ HasAABloat::kNo,
+ zeroArea);
+}
+
+#ifdef SK_DEBUG
+SkString DrawVerticesOp::dumpInfo() const {
+ SkString string;
+ string.appendf("PrimType: %d, MeshCount %d, VCount: %d, ICount: %d\n", (int)fPrimitiveType,
+ fMeshes.count(), fVertexCount, fIndexCount);
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+}
+#endif
+
+GrDrawOp::FixedFunctionFlags DrawVerticesOp::fixedFunctionFlags() const {
+ return fHelper.fixedFunctionFlags();
+}
+
+GrProcessorSet::Analysis DrawVerticesOp::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ GrProcessorAnalysisColor gpColor;
+ if (this->requiresPerVertexColors()) {
+ gpColor.setToUnknown();
+ } else {
+ gpColor.setToConstant(fMeshes.front().fColor);
+ }
+ auto result = fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kNone, &gpColor);
+ if (gpColor.isConstant(&fMeshes.front().fColor)) {
+ fMeshes.front().fIgnoreColors = true;
+ fFlags &= ~kRequiresPerVertexColors_Flag;
+ fColorArrayType = ColorArrayType::kPremulGrColor;
+ }
+ if (!fHelper.usesLocalCoords()) {
+ fMeshes[0].fIgnoreTexCoords = true;
+ fFlags &= ~kAnyMeshHasExplicitLocalCoords_Flag;
+ }
+ return result;
+}
+
+sk_sp<GrGeometryProcessor> DrawVerticesOp::makeGP(const GrShaderCaps* shaderCaps,
+ bool* hasColorAttribute,
+ bool* hasLocalCoordAttribute) const {
+ using namespace GrDefaultGeoProcFactory;
+ LocalCoords::Type localCoordsType;
+ if (fHelper.usesLocalCoords()) {
+ // If we have multiple view matrices we will transform the positions into device space. We
+ // must then also provide untransformed positions as local coords.
+ if (this->anyMeshHasExplicitLocalCoords() || this->hasMultipleViewMatrices()) {
+ *hasLocalCoordAttribute = true;
+ localCoordsType = LocalCoords::kHasExplicit_Type;
+ } else {
+ *hasLocalCoordAttribute = false;
+ localCoordsType = LocalCoords::kUsePosition_Type;
+ }
+ } else {
+ localCoordsType = LocalCoords::kUnused_Type;
+ *hasLocalCoordAttribute = false;
+ }
+
+ Color color(fMeshes[0].fColor);
+ if (this->requiresPerVertexColors()) {
+ if (fColorArrayType == ColorArrayType::kPremulGrColor) {
+ color.fType = Color::kPremulGrColorAttribute_Type;
+ } else {
+ color.fType = Color::kUnpremulSkColorAttribute_Type;
+ color.fColorSpaceXform = fColorSpaceXform;
+ }
+ *hasColorAttribute = true;
+ } else {
+ *hasColorAttribute = false;
+ }
+
+ const SkMatrix& vm = this->hasMultipleViewMatrices() ? SkMatrix::I() : fMeshes[0].fViewMatrix;
+
+ return GrDefaultGeoProcFactory::Make(shaderCaps,
+ color,
+ Coverage::kSolid_Type,
+ localCoordsType,
+ vm);
+}
+
+void DrawVerticesOp::onPrepareDraws(Target* target) {
+ bool hasMapBufferSupport = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
+ if (fMeshes[0].fVertices->isVolatile() || !hasMapBufferSupport) {
+ this->drawVolatile(target);
+ } else {
+ this->drawNonVolatile(target);
+ }
+}
+
+void DrawVerticesOp::drawVolatile(Target* target) {
+ bool hasColorAttribute;
+ bool hasLocalCoordsAttribute;
+ sk_sp<GrGeometryProcessor> gp = this->makeGP(target->caps().shaderCaps(),
+ &hasColorAttribute,
+ &hasLocalCoordsAttribute);
+
+ // Allocate buffers.
+ size_t vertexStride = gp->vertexStride();
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex = 0;
+ void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex = 0;
+ uint16_t* indices = nullptr;
+ if (this->isIndexed()) {
+ indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ }
+
+ // Fill the buffers.
+ this->fillBuffers(hasColorAttribute,
+ hasLocalCoordsAttribute,
+ vertexStride,
+ verts,
+ indices);
+
+ // Draw the vertices.
+ this->drawVertices(target, std::move(gp), std::move(vertexBuffer), firstVertex, indexBuffer,
+ firstIndex);
+}
+
+void DrawVerticesOp::drawNonVolatile(Target* target) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+
+ bool hasColorAttribute;
+ bool hasLocalCoordsAttribute;
+ sk_sp<GrGeometryProcessor> gp = this->makeGP(target->caps().shaderCaps(),
+ &hasColorAttribute,
+ &hasLocalCoordsAttribute);
+
+ SkASSERT(fMeshes.count() == 1); // Non-volatile meshes should never combine.
+
+ // Get the resource provider.
+ GrResourceProvider* rp = target->resourceProvider();
+
+ // Generate keys for the buffers.
+ GrUniqueKey vertexKey, indexKey;
+ GrUniqueKey::Builder vertexKeyBuilder(&vertexKey, kDomain, 2);
+ GrUniqueKey::Builder indexKeyBuilder(&indexKey, kDomain, 2);
+ vertexKeyBuilder[0] = indexKeyBuilder[0] = fMeshes[0].fVertices->uniqueID();
+ vertexKeyBuilder[1] = 0;
+ indexKeyBuilder[1] = 1;
+ vertexKeyBuilder.finish();
+ indexKeyBuilder.finish();
+
+ // Try to grab data from the cache.
+ sk_sp<GrGpuBuffer> vertexBuffer = rp->findByUniqueKey<GrGpuBuffer>(vertexKey);
+ sk_sp<GrGpuBuffer> indexBuffer =
+ this->isIndexed() ? rp->findByUniqueKey<GrGpuBuffer>(indexKey) : nullptr;
+
+ // Draw using the cached buffers if possible.
+ if (vertexBuffer && (!this->isIndexed() || indexBuffer)) {
+ this->drawVertices(target, std::move(gp), std::move(vertexBuffer), 0,
+ std::move(indexBuffer), 0);
+ return;
+ }
+
+ // Allocate vertex buffer.
+ size_t vertexStride = gp->vertexStride();
+ vertexBuffer = rp->createBuffer(
+ fVertexCount * vertexStride, GrGpuBufferType::kVertex, kStatic_GrAccessPattern);
+ void* verts = vertexBuffer ? vertexBuffer->map() : nullptr;
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ // Allocate index buffer.
+ uint16_t* indices = nullptr;
+ if (this->isIndexed()) {
+ indexBuffer = rp->createBuffer(
+ fIndexCount * sizeof(uint16_t), GrGpuBufferType::kIndex, kStatic_GrAccessPattern);
+ indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr;
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ }
+
+ // Fill the buffers.
+ this->fillBuffers(hasColorAttribute,
+ hasLocalCoordsAttribute,
+ vertexStride,
+ verts,
+ indices);
+
+ // Unmap the buffers.
+ vertexBuffer->unmap();
+ if (indexBuffer) {
+ indexBuffer->unmap();
+ }
+
+ // Cache the buffers.
+ rp->assignUniqueKeyToResource(vertexKey, vertexBuffer.get());
+ rp->assignUniqueKeyToResource(indexKey, indexBuffer.get());
+
+ // Draw the vertices.
+ this->drawVertices(target, std::move(gp), std::move(vertexBuffer), 0, std::move(indexBuffer),
+ 0);
+}
+
+void DrawVerticesOp::fillBuffers(bool hasColorAttribute,
+ bool hasLocalCoordsAttribute,
+ size_t vertexStride,
+ void* verts,
+ uint16_t* indices) const {
+ int instanceCount = fMeshes.count();
+
+ // Copy data into the buffers.
+ int vertexOffset = 0;
+ // We have a fast case below for uploading the vertex data when the matrix is translate
+ // only and there are colors but not local coords.
+ bool fastAttrs = hasColorAttribute && !hasLocalCoordsAttribute;
+ for (int i = 0; i < instanceCount; i++) {
+ // Get each mesh.
+ const Mesh& mesh = fMeshes[i];
+
+ // Copy data into the index buffer.
+ if (indices) {
+ int indexCount = mesh.fVertices->indexCount();
+ for (int j = 0; j < indexCount; ++j) {
+ *indices++ = mesh.fVertices->indices()[j] + vertexOffset;
+ }
+ }
+
+ // Copy data into the vertex buffer.
+ int vertexCount = mesh.fVertices->vertexCount();
+ const SkPoint* positions = mesh.fVertices->positions();
+ const SkColor* colors = mesh.fVertices->colors();
+ const SkPoint* localCoords = mesh.fVertices->texCoords();
+ bool fastMesh = (!this->hasMultipleViewMatrices() ||
+ mesh.fViewMatrix.getType() <= SkMatrix::kTranslate_Mask) &&
+ mesh.hasPerVertexColors();
+ if (fastAttrs && fastMesh) {
+ // Fast case.
+ struct V {
+ SkPoint fPos;
+ uint32_t fColor;
+ };
+ SkASSERT(sizeof(V) == vertexStride);
+ V* v = (V*)verts;
+ Sk2f t(0, 0);
+ if (this->hasMultipleViewMatrices()) {
+ t = Sk2f(mesh.fViewMatrix.getTranslateX(), mesh.fViewMatrix.getTranslateY());
+ }
+ for (int j = 0; j < vertexCount; ++j) {
+ Sk2f p = Sk2f::Load(positions++) + t;
+ p.store(&v[j].fPos);
+ v[j].fColor = colors[j];
+ }
+ verts = v + vertexCount;
+ } else {
+ // Normal case.
+ static constexpr size_t kColorOffset = sizeof(SkPoint);
+ size_t offset = kColorOffset;
+ if (hasColorAttribute) {
+ offset += sizeof(uint32_t);
+ }
+ size_t localCoordOffset = offset;
+ if (hasLocalCoordsAttribute) {
+ offset += sizeof(SkPoint);
+ }
+
+ // TODO4F: Preserve float colors
+ GrColor color = mesh.fColor.toBytes_RGBA();
+
+ for (int j = 0; j < vertexCount; ++j) {
+ if (this->hasMultipleViewMatrices()) {
+ mesh.fViewMatrix.mapPoints(((SkPoint*)verts), &positions[j], 1);
+ } else {
+ *((SkPoint*)verts) = positions[j];
+ }
+ if (hasColorAttribute) {
+ if (mesh.hasPerVertexColors()) {
+ *(uint32_t*)((intptr_t)verts + kColorOffset) = colors[j];
+ } else {
+ *(uint32_t*)((intptr_t)verts + kColorOffset) = color;
+ }
+ }
+ if (hasLocalCoordsAttribute) {
+ if (mesh.hasExplicitLocalCoords()) {
+ *(SkPoint*)((intptr_t)verts + localCoordOffset) = localCoords[j];
+ } else {
+ *(SkPoint*)((intptr_t)verts + localCoordOffset) = positions[j];
+ }
+ }
+ verts = (void*)((intptr_t)verts + vertexStride);
+ }
+ }
+ vertexOffset += vertexCount;
+ }
+}
+
+void DrawVerticesOp::drawVertices(Target* target,
+ sk_sp<const GrGeometryProcessor> gp,
+ sk_sp<const GrBuffer> vertexBuffer,
+ int firstVertex,
+ sk_sp<const GrBuffer> indexBuffer,
+ int firstIndex) {
+ GrMesh* mesh = target->allocMesh(this->primitiveType());
+ if (this->isIndexed()) {
+ mesh->setIndexed(std::move(indexBuffer), fIndexCount, firstIndex, 0, fVertexCount - 1,
+ GrPrimitiveRestart::kNo);
+ } else {
+ mesh->setNonIndexedNonInstanced(fVertexCount);
+ }
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+}
+
+void DrawVerticesOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+}
+
+GrOp::CombineResult DrawVerticesOp::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
+ DrawVerticesOp* that = t->cast<DrawVerticesOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // Non-volatile meshes cannot batch, because if a non-volatile mesh batches with another mesh,
+ // then on the next frame, if that non-volatile mesh is drawn, it will draw the other mesh
+ // that was saved in its vertex buffer, which is not necessarily there anymore.
+ if (!this->fMeshes[0].fVertices->isVolatile() || !that->fMeshes[0].fVertices->isVolatile()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!this->combinablePrimitive() || this->primitiveType() != that->primitiveType()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fMeshes[0].fVertices->hasIndices() != that->fMeshes[0].fVertices->hasIndices()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fColorArrayType != that->fColorArrayType) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fVertexCount + that->fVertexCount > SkTo<int>(UINT16_MAX)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // NOTE: For SkColor vertex colors, the source color space is always sRGB, and the destination
+ // gamut is determined by the render target context. A mis-match should be impossible.
+ SkASSERT(GrColorSpaceXform::Equals(fColorSpaceXform.get(), that->fColorSpaceXform.get()));
+
+ // If either op required explicit local coords or per-vertex colors the combined mesh does. Same
+ // with multiple view matrices.
+ fFlags |= that->fFlags;
+
+ if (!this->requiresPerVertexColors() && this->fMeshes[0].fColor != that->fMeshes[0].fColor) {
+ fFlags |= kRequiresPerVertexColors_Flag;
+ }
+ // Check whether we are about to acquire a mesh with a different view matrix.
+ if (!this->hasMultipleViewMatrices() &&
+ !this->fMeshes[0].fViewMatrix.cheapEqualTo(that->fMeshes[0].fViewMatrix)) {
+ fFlags |= kHasMultipleViewMatrices_Flag;
+ }
+
+ fMeshes.push_back_n(that->fMeshes.count(), that->fMeshes.begin());
+ fVertexCount += that->fVertexCount;
+ fIndexCount += that->fIndexCount;
+
+ return CombineResult::kMerged;
+}
+
+} // anonymous namespace
+
+std::unique_ptr<GrDrawOp> GrDrawVerticesOp::Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ sk_sp<SkVertices> vertices,
+ const SkVertices::Bone bones[],
+ int boneCount,
+ const SkMatrix& viewMatrix,
+ GrAAType aaType,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrPrimitiveType* overridePrimType) {
+ SkASSERT(vertices);
+ GrPrimitiveType primType = overridePrimType ? *overridePrimType
+ : SkVertexModeToGrPrimitiveType(vertices->mode());
+ return GrSimpleMeshDrawOpHelper::FactoryHelper<DrawVerticesOp>(context, std::move(paint),
+ std::move(vertices),
+ bones, boneCount,
+ primType, aaType,
+ std::move(colorSpaceXform),
+ viewMatrix);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+#include "src/gpu/GrDrawOpTest.h"
+
+static uint32_t seed_vertices(GrPrimitiveType type) {
+ switch (type) {
+ case GrPrimitiveType::kTriangles:
+ case GrPrimitiveType::kTriangleStrip:
+ return 3;
+ case GrPrimitiveType::kPoints:
+ return 1;
+ case GrPrimitiveType::kLines:
+ case GrPrimitiveType::kLineStrip:
+ return 2;
+ case GrPrimitiveType::kPath:
+ SkASSERT(0);
+ return 0;
+ }
+ SK_ABORT("Incomplete switch\n");
+}
+
+static uint32_t primitive_vertices(GrPrimitiveType type) {
+ switch (type) {
+ case GrPrimitiveType::kTriangles:
+ return 3;
+ case GrPrimitiveType::kLines:
+ return 2;
+ case GrPrimitiveType::kTriangleStrip:
+ case GrPrimitiveType::kPoints:
+ case GrPrimitiveType::kLineStrip:
+ return 1;
+ case GrPrimitiveType::kPath:
+ SkASSERT(0);
+ return 0;
+ }
+ SK_ABORT("Incomplete switch\n");
+}
+
+static SkPoint random_point(SkRandom* random, SkScalar min, SkScalar max) {
+ SkPoint p;
+ p.fX = random->nextRangeScalar(min, max);
+ p.fY = random->nextRangeScalar(min, max);
+ return p;
+}
+
+static void randomize_params(size_t count, size_t maxVertex, SkScalar min, SkScalar max,
+ SkRandom* random, SkTArray<SkPoint>* positions,
+ SkTArray<SkPoint>* texCoords, bool hasTexCoords,
+ SkTArray<uint32_t>* colors, bool hasColors,
+ SkTArray<uint16_t>* indices, bool hasIndices) {
+ for (uint32_t v = 0; v < count; v++) {
+ positions->push_back(random_point(random, min, max));
+ if (hasTexCoords) {
+ texCoords->push_back(random_point(random, min, max));
+ }
+ if (hasColors) {
+ colors->push_back(GrRandomColor(random));
+ }
+ if (hasIndices) {
+ SkASSERT(maxVertex <= UINT16_MAX);
+ indices->push_back(random->nextULessThan((uint16_t)maxVertex));
+ }
+ }
+}
+
+GR_DRAW_OP_TEST_DEFINE(DrawVerticesOp) {
+ GrPrimitiveType type;
+ do {
+ type = GrPrimitiveType(random->nextULessThan(kNumGrPrimitiveTypes));
+ } while (type == GrPrimitiveType::kPath);
+
+ uint32_t primitiveCount = random->nextRangeU(1, 100);
+
+ // TODO make 'sensible' indexbuffers
+ SkTArray<SkPoint> positions;
+ SkTArray<SkPoint> texCoords;
+ SkTArray<uint32_t> colors;
+ SkTArray<uint16_t> indices;
+
+ bool hasTexCoords = random->nextBool();
+ bool hasIndices = random->nextBool();
+ bool hasColors = random->nextBool();
+
+ uint32_t vertexCount = seed_vertices(type) + (primitiveCount - 1) * primitive_vertices(type);
+
+ static const SkScalar kMinVertExtent = -100.f;
+ static const SkScalar kMaxVertExtent = 100.f;
+ randomize_params(seed_vertices(type), vertexCount, kMinVertExtent, kMaxVertExtent, random,
+ &positions, &texCoords, hasTexCoords, &colors, hasColors, &indices,
+ hasIndices);
+
+ for (uint32_t i = 1; i < primitiveCount; i++) {
+ randomize_params(primitive_vertices(type), vertexCount, kMinVertExtent, kMaxVertExtent,
+ random, &positions, &texCoords, hasTexCoords, &colors, hasColors, &indices,
+ hasIndices);
+ }
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrTest::TestColorXform(random);
+
+ static constexpr SkVertices::VertexMode kIgnoredMode = SkVertices::kTriangles_VertexMode;
+ sk_sp<SkVertices> vertices = SkVertices::MakeCopy(kIgnoredMode, vertexCount, positions.begin(),
+ texCoords.begin(), colors.begin(),
+ hasIndices ? indices.count() : 0,
+ indices.begin());
+ GrAAType aaType = GrAAType::kNone;
+ if (numSamples > 1 && random->nextBool()) {
+ aaType = GrAAType::kMSAA;
+ }
+ return GrDrawVerticesOp::Make(context, std::move(paint), std::move(vertices), nullptr, 0,
+ viewMatrix, aaType, std::move(colorSpaceXform), &type);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.h b/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.h
new file mode 100644
index 0000000000..2f146b8d91
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawVerticesOp.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawVerticesOp_DEFINED
+#define GrDrawVerticesOp_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkVertices.h"
+#include "include/private/GrTypesPriv.h"
+
+class GrColorSpaceXform;
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+
+namespace GrDrawVerticesOp {
+
+ /**
+ * Draw a SkVertices. The GrPaint param's color is used if the vertices lack per-vertex color.
+ * If the vertices lack local coords then the vertex positions are used as local coords. The
+ * primitive type drawn is derived from the SkVertices object, unless overridePrimType is
+ * specified.
+ */
+ std::unique_ptr<GrDrawOp> Make(GrRecordingContext*,
+ GrPaint&&,
+ sk_sp<SkVertices>,
+ const SkVertices::Bone bones[],
+ int boneCount,
+ const SkMatrix& viewMatrix,
+ GrAAType,
+ sk_sp<GrColorSpaceXform>,
+ GrPrimitiveType* overridePrimType = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawableOp.cpp b/gfx/skia/skia/src/gpu/ops/GrDrawableOp.cpp
new file mode 100644
index 0000000000..d50c26b0b5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawableOp.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrDrawableOp.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+std::unique_ptr<GrDrawableOp> GrDrawableOp::Make(
+ GrRecordingContext* context, std::unique_ptr<SkDrawable::GpuDrawHandler> drawable,
+ const SkRect& bounds) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+ return pool->allocate<GrDrawableOp>(std::move(drawable), bounds);
+}
+
+GrDrawableOp::GrDrawableOp(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable,
+ const SkRect& bounds)
+ : INHERITED(ClassID())
+ , fDrawable(std::move(drawable)) {
+ this->setBounds(bounds, HasAABloat::kNo, IsHairline::kNo);
+}
+
+void GrDrawableOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ SkASSERT(state->opsRenderPass());
+ state->opsRenderPass()->executeDrawable(std::move(fDrawable));
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrDrawableOp.h b/gfx/skia/skia/src/gpu/ops/GrDrawableOp.h
new file mode 100644
index 0000000000..b5b71a70d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrDrawableOp.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawableOp_DEFINED
+#define GrDrawableOp_DEFINED
+
+#include "src/gpu/ops/GrOp.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkMatrix.h"
+#include "src/gpu/GrSemaphore.h"
+
+class GrRecordingContext;
+
+class GrDrawableOp final : public GrOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawableOp> Make(GrRecordingContext*,
+ std::unique_ptr<SkDrawable::GpuDrawHandler> drawable,
+ const SkRect& bounds);
+
+ const char* name() const override { return "Drawable"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ return INHERITED::dumpInfo();
+ }
+#endif
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ GrDrawableOp(std::unique_ptr<SkDrawable::GpuDrawHandler>, const SkRect& bounds);
+
+ CombineResult onCombineIfPossible(GrOp* that, const GrCaps& caps) override {
+ return CombineResult::kCannotCombine;
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ std::unique_ptr<SkDrawable::GpuDrawHandler> fDrawable;
+
+ typedef GrOp INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.cpp b/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.cpp
new file mode 100644
index 0000000000..6b3c023ff6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.cpp
@@ -0,0 +1,826 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrFillRRectOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+// Hardware derivatives are not always accurate enough for highly elliptical corners. This method
+// checks to make sure the corners will still all look good if we use HW derivatives.
+static bool can_use_hw_derivatives_with_coverage(
+ const GrShaderCaps&, const SkMatrix&, const SkRRect&);
+
+std::unique_ptr<GrFillRRectOp> GrFillRRectOp::Make(
+ GrRecordingContext* ctx, GrAAType aaType, const SkMatrix& viewMatrix, const SkRRect& rrect,
+ const GrCaps& caps, GrPaint&& paint) {
+ if (!caps.instanceAttribSupport()) {
+ return nullptr;
+ }
+
+ Flags flags = Flags::kNone;
+ if (GrAAType::kCoverage == aaType) {
+ // TODO: Support perspective in a follow-on CL. This shouldn't be difficult, since we
+ // already use HW derivatives. The only trick will be adjusting the AA outset to account for
+ // perspective. (i.e., outset = 0.5 * z.)
+ if (viewMatrix.hasPerspective()) {
+ return nullptr;
+ }
+ if (can_use_hw_derivatives_with_coverage(*caps.shaderCaps(), viewMatrix, rrect)) {
+ // HW derivatives (more specifically, fwidth()) are consistently faster on all platforms
+ // in coverage mode. We use them as long as the approximation will be accurate enough.
+ flags |= Flags::kUseHWDerivatives;
+ }
+ } else {
+ if (GrAAType::kMSAA == aaType) {
+ if (!caps.sampleLocationsSupport() || !caps.shaderCaps()->sampleVariablesSupport()) {
+ return nullptr;
+ }
+ }
+ if (viewMatrix.hasPerspective()) {
+ // HW derivatives are consistently slower on all platforms in sample mask mode. We
+ // therefore only use them when there is perspective, since then we can't interpolate
+ // the symbolic screen-space gradient.
+ flags |= Flags::kUseHWDerivatives | Flags::kHasPerspective;
+ }
+ }
+
+ // Produce a matrix that draws the round rect from normalized [-1, -1, +1, +1] space.
+ float l = rrect.rect().left(), r = rrect.rect().right(),
+ t = rrect.rect().top(), b = rrect.rect().bottom();
+ SkMatrix m;
+ // Unmap the normalized rect [-1, -1, +1, +1] back to [l, t, r, b].
+ m.setScaleTranslate((r - l)/2, (b - t)/2, (l + r)/2, (t + b)/2);
+ // Map to device space.
+ m.postConcat(viewMatrix);
+
+ SkRect devBounds;
+ if (!(flags & Flags::kHasPerspective)) {
+ // Since m is an affine matrix that maps the rect [-1, -1, +1, +1] into the shape's
+ // device-space quad, it's quite simple to find the bounding rectangle:
+ devBounds = SkRect::MakeXYWH(m.getTranslateX(), m.getTranslateY(), 0, 0);
+ devBounds.outset(SkScalarAbs(m.getScaleX()) + SkScalarAbs(m.getSkewX()),
+ SkScalarAbs(m.getSkewY()) + SkScalarAbs(m.getScaleY()));
+ } else {
+ viewMatrix.mapRect(&devBounds, rrect.rect());
+ }
+
+ if (GrAAType::kMSAA == aaType && caps.preferTrianglesOverSampleMask()) {
+ // We are on a platform that prefers fine triangles instead of using the sample mask. See if
+ // the round rect is large enough that it will be faster for us to send it off to the
+ // default path renderer instead. The 200x200 threshold was arrived at using the
+ // "shapes_rrect" benchmark on an ARM Galaxy S9.
+ if (devBounds.height() * devBounds.width() > 200 * 200) {
+ return nullptr;
+ }
+ }
+
+ GrOpMemoryPool* pool = ctx->priv().opMemoryPool();
+ return pool->allocate<GrFillRRectOp>(aaType, rrect, flags, m, std::move(paint), devBounds);
+}
+
+GrFillRRectOp::GrFillRRectOp(
+ GrAAType aaType, const SkRRect& rrect, Flags flags,
+ const SkMatrix& totalShapeMatrix, GrPaint&& paint, const SkRect& devBounds)
+ : GrDrawOp(ClassID())
+ , fAAType(aaType)
+ , fOriginalColor(paint.getColor4f())
+ , fLocalRect(rrect.rect())
+ , fFlags(flags)
+ , fProcessors(std::move(paint)) {
+ SkASSERT((fFlags & Flags::kHasPerspective) == totalShapeMatrix.hasPerspective());
+ this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsHairline::kNo);
+
+ // Write the matrix attribs.
+ const SkMatrix& m = totalShapeMatrix;
+ if (!(fFlags & Flags::kHasPerspective)) {
+ // Affine 2D transformation (float2x2 plus float2 translate).
+ SkASSERT(!m.hasPerspective());
+ this->writeInstanceData(m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY());
+ this->writeInstanceData(m.getTranslateX(), m.getTranslateY());
+ } else {
+ // Perspective float3x3 transformation matrix.
+ SkASSERT(m.hasPerspective());
+ m.get9(this->appendInstanceData<float>(9));
+ }
+
+ // Convert the radii to [-1, -1, +1, +1] space and write their attribs.
+ Sk4f radiiX, radiiY;
+ Sk4f::Load2(SkRRectPriv::GetRadiiArray(rrect), &radiiX, &radiiY);
+ (radiiX * (2/rrect.width())).store(this->appendInstanceData<float>(4));
+ (radiiY * (2/rrect.height())).store(this->appendInstanceData<float>(4));
+
+ // We will write the color and local rect attribs during finalize().
+}
+
+GrProcessorSet::Analysis GrFillRRectOp::finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) {
+ SkASSERT(1 == fInstanceCount);
+
+ SkPMColor4f overrideColor;
+ const GrProcessorSet::Analysis& analysis = fProcessors.finalize(
+
+ fOriginalColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
+ &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps, clampType,
+ &overrideColor);
+
+ // Finish writing the instance attribs.
+ SkPMColor4f finalColor = analysis.inputColorIsOverridden() ? overrideColor : fOriginalColor;
+ if (!SkPMColor4fFitsInBytes(finalColor)) {
+ fFlags |= Flags::kWideColor;
+ uint32_t halfColor[2];
+ SkFloatToHalf_finite_ftz(Sk4f::Load(finalColor.vec())).store(&halfColor);
+ this->writeInstanceData(halfColor[0], halfColor[1]);
+ } else {
+ this->writeInstanceData(finalColor.toBytes_RGBA());
+ }
+
+ if (analysis.usesLocalCoords()) {
+ this->writeInstanceData(fLocalRect);
+ fFlags |= Flags::kHasLocalCoords;
+ }
+ fInstanceStride = fInstanceData.count();
+
+ return analysis;
+}
+
+GrDrawOp::CombineResult GrFillRRectOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
+ const auto& that = *op->cast<GrFillRRectOp>();
+ if (fFlags != that.fFlags || fProcessors != that.fProcessors ||
+ fInstanceData.count() > std::numeric_limits<int>::max() - that.fInstanceData.count()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fInstanceData.push_back_n(that.fInstanceData.count(), that.fInstanceData.begin());
+ fInstanceCount += that.fInstanceCount;
+ SkASSERT(fInstanceStride == that.fInstanceStride);
+ return CombineResult::kMerged;
+}
+
+class GrFillRRectOp::Processor : public GrGeometryProcessor {
+public:
+ Processor(GrAAType aaType, Flags flags)
+ : GrGeometryProcessor(kGrFillRRectOp_Processor_ClassID)
+ , fAAType(aaType)
+ , fFlags(flags) {
+ int numVertexAttribs = (GrAAType::kCoverage == fAAType) ? 3 : 2;
+ this->setVertexAttributes(kVertexAttribs, numVertexAttribs);
+
+ if (!(flags & Flags::kHasPerspective)) {
+ // Affine 2D transformation (float2x2 plus float2 translate).
+ fInstanceAttribs.emplace_back("skew", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ fInstanceAttribs.emplace_back(
+ "translate", kFloat2_GrVertexAttribType, kFloat2_GrSLType);
+ } else {
+ // Perspective float3x3 transformation matrix.
+ fInstanceAttribs.emplace_back("persp_x", kFloat3_GrVertexAttribType, kFloat3_GrSLType);
+ fInstanceAttribs.emplace_back("persp_y", kFloat3_GrVertexAttribType, kFloat3_GrSLType);
+ fInstanceAttribs.emplace_back("persp_z", kFloat3_GrVertexAttribType, kFloat3_GrSLType);
+ }
+ fInstanceAttribs.emplace_back("radii_x", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ fInstanceAttribs.emplace_back("radii_y", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ fColorAttrib = &fInstanceAttribs.push_back(
+ MakeColorAttribute("color", (flags & Flags::kWideColor)));
+ if (fFlags & Flags::kHasLocalCoords) {
+ fInstanceAttribs.emplace_back(
+ "local_rect", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ }
+ this->setInstanceAttributes(fInstanceAttribs.begin(), fInstanceAttribs.count());
+
+ if (GrAAType::kMSAA == fAAType) {
+ this->setWillUseCustomFeature(CustomFeatures::kSampleLocations);
+ }
+ }
+
+ const char* name() const override { return "GrFillRRectOp::Processor"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ b->add32(((uint32_t)fFlags << 16) | (uint32_t)fAAType);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+private:
+ static constexpr Attribute kVertexAttribs[] = {
+ {"radii_selector", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ {"corner_and_radius_outsets", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
+ // Coverage only.
+ {"aa_bloat_and_coverage", kFloat4_GrVertexAttribType, kFloat4_GrSLType}};
+
+ const GrAAType fAAType;
+ const Flags fFlags;
+
+ SkSTArray<6, Attribute> fInstanceAttribs;
+ const Attribute* fColorAttrib;
+
+ class CoverageImpl;
+ class MSAAImpl;
+};
+
+constexpr GrPrimitiveProcessor::Attribute GrFillRRectOp::Processor::kVertexAttribs[];
+
+// Our coverage geometry consists of an inset octagon with solid coverage, surrounded by linear
+// coverage ramps on the horizontal and vertical edges, and "arc coverage" pieces on the diagonal
+// edges. The Vertex struct tells the shader where to place its vertex within a normalized
+// ([l, t, r, b] = [-1, -1, +1, +1]) space, and how to calculate coverage. See onEmitCode.
+struct CoverageVertex {
+ std::array<float, 4> fRadiiSelector;
+ std::array<float, 2> fCorner;
+ std::array<float, 2> fRadiusOutset;
+ std::array<float, 2> fAABloatDirection;
+ float fCoverage;
+ float fIsLinearCoverage;
+};
+
+// This is the offset (when multiplied by radii) from the corners of a bounding box to the vertices
+// of its inscribed octagon. We draw the outside portion of arcs with quarter-octagons rather than
+// rectangles.
+static constexpr float kOctoOffset = 1/(1 + SK_ScalarRoot2Over2);
+
+static constexpr CoverageVertex kCoverageVertexData[] = {
+ // Left inset edge.
+ {{{0,0,0,1}}, {{-1,+1}}, {{0,-1}}, {{+1,0}}, 1, 1},
+ {{{1,0,0,0}}, {{-1,-1}}, {{0,+1}}, {{+1,0}}, 1, 1},
+
+ // Top inset edge.
+ {{{1,0,0,0}}, {{-1,-1}}, {{+1,0}}, {{0,+1}}, 1, 1},
+ {{{0,1,0,0}}, {{+1,-1}}, {{-1,0}}, {{0,+1}}, 1, 1},
+
+ // Right inset edge.
+ {{{0,1,0,0}}, {{+1,-1}}, {{0,+1}}, {{-1,0}}, 1, 1},
+ {{{0,0,1,0}}, {{+1,+1}}, {{0,-1}}, {{-1,0}}, 1, 1},
+
+ // Bottom inset edge.
+ {{{0,0,1,0}}, {{+1,+1}}, {{-1,0}}, {{0,-1}}, 1, 1},
+ {{{0,0,0,1}}, {{-1,+1}}, {{+1,0}}, {{0,-1}}, 1, 1},
+
+
+ // Left outset edge.
+ {{{0,0,0,1}}, {{-1,+1}}, {{0,-1}}, {{-1,0}}, 0, 1},
+ {{{1,0,0,0}}, {{-1,-1}}, {{0,+1}}, {{-1,0}}, 0, 1},
+
+ // Top outset edge.
+ {{{1,0,0,0}}, {{-1,-1}}, {{+1,0}}, {{0,-1}}, 0, 1},
+ {{{0,1,0,0}}, {{+1,-1}}, {{-1,0}}, {{0,-1}}, 0, 1},
+
+ // Right outset edge.
+ {{{0,1,0,0}}, {{+1,-1}}, {{0,+1}}, {{+1,0}}, 0, 1},
+ {{{0,0,1,0}}, {{+1,+1}}, {{0,-1}}, {{+1,0}}, 0, 1},
+
+ // Bottom outset edge.
+ {{{0,0,1,0}}, {{+1,+1}}, {{-1,0}}, {{0,+1}}, 0, 1},
+ {{{0,0,0,1}}, {{-1,+1}}, {{+1,0}}, {{0,+1}}, 0, 1},
+
+
+ // Top-left corner.
+ {{{1,0,0,0}}, {{-1,-1}}, {{ 0,+1}}, {{-1, 0}}, 0, 0},
+ {{{1,0,0,0}}, {{-1,-1}}, {{ 0,+1}}, {{+1, 0}}, 1, 0},
+ {{{1,0,0,0}}, {{-1,-1}}, {{+1, 0}}, {{ 0,+1}}, 1, 0},
+ {{{1,0,0,0}}, {{-1,-1}}, {{+1, 0}}, {{ 0,-1}}, 0, 0},
+ {{{1,0,0,0}}, {{-1,-1}}, {{+kOctoOffset,0}}, {{-1,-1}}, 0, 0},
+ {{{1,0,0,0}}, {{-1,-1}}, {{0,+kOctoOffset}}, {{-1,-1}}, 0, 0},
+
+ // Top-right corner.
+ {{{0,1,0,0}}, {{+1,-1}}, {{-1, 0}}, {{ 0,-1}}, 0, 0},
+ {{{0,1,0,0}}, {{+1,-1}}, {{-1, 0}}, {{ 0,+1}}, 1, 0},
+ {{{0,1,0,0}}, {{+1,-1}}, {{ 0,+1}}, {{-1, 0}}, 1, 0},
+ {{{0,1,0,0}}, {{+1,-1}}, {{ 0,+1}}, {{+1, 0}}, 0, 0},
+ {{{0,1,0,0}}, {{+1,-1}}, {{0,+kOctoOffset}}, {{+1,-1}}, 0, 0},
+ {{{0,1,0,0}}, {{+1,-1}}, {{-kOctoOffset,0}}, {{+1,-1}}, 0, 0},
+
+ // Bottom-right corner.
+ {{{0,0,1,0}}, {{+1,+1}}, {{ 0,-1}}, {{+1, 0}}, 0, 0},
+ {{{0,0,1,0}}, {{+1,+1}}, {{ 0,-1}}, {{-1, 0}}, 1, 0},
+ {{{0,0,1,0}}, {{+1,+1}}, {{-1, 0}}, {{ 0,-1}}, 1, 0},
+ {{{0,0,1,0}}, {{+1,+1}}, {{-1, 0}}, {{ 0,+1}}, 0, 0},
+ {{{0,0,1,0}}, {{+1,+1}}, {{-kOctoOffset,0}}, {{+1,+1}}, 0, 0},
+ {{{0,0,1,0}}, {{+1,+1}}, {{0,-kOctoOffset}}, {{+1,+1}}, 0, 0},
+
+ // Bottom-left corner.
+ {{{0,0,0,1}}, {{-1,+1}}, {{+1, 0}}, {{ 0,+1}}, 0, 0},
+ {{{0,0,0,1}}, {{-1,+1}}, {{+1, 0}}, {{ 0,-1}}, 1, 0},
+ {{{0,0,0,1}}, {{-1,+1}}, {{ 0,-1}}, {{+1, 0}}, 1, 0},
+ {{{0,0,0,1}}, {{-1,+1}}, {{ 0,-1}}, {{-1, 0}}, 0, 0},
+ {{{0,0,0,1}}, {{-1,+1}}, {{0,-kOctoOffset}}, {{-1,+1}}, 0, 0},
+ {{{0,0,0,1}}, {{-1,+1}}, {{+kOctoOffset,0}}, {{-1,+1}}, 0, 0}};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gCoverageVertexBufferKey);
+
+static constexpr uint16_t kCoverageIndexData[] = {
+ // Inset octagon (solid coverage).
+ 0, 1, 7,
+ 1, 2, 7,
+ 7, 2, 6,
+ 2, 3, 6,
+ 6, 3, 5,
+ 3, 4, 5,
+
+ // AA borders (linear coverage).
+ 0, 1, 8, 1, 9, 8,
+ 2, 3, 10, 3, 11, 10,
+ 4, 5, 12, 5, 13, 12,
+ 6, 7, 14, 7, 15, 14,
+
+ // Top-left arc.
+ 16, 17, 21,
+ 17, 21, 18,
+ 21, 18, 20,
+ 18, 20, 19,
+
+ // Top-right arc.
+ 22, 23, 27,
+ 23, 27, 24,
+ 27, 24, 26,
+ 24, 26, 25,
+
+ // Bottom-right arc.
+ 28, 29, 33,
+ 29, 33, 30,
+ 33, 30, 32,
+ 30, 32, 31,
+
+ // Bottom-left arc.
+ 34, 35, 39,
+ 35, 39, 36,
+ 39, 36, 38,
+ 36, 38, 37};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gCoverageIndexBufferKey);
+
+
+// Our MSAA geometry consists of an inset octagon with full sample mask coverage, circumscribed
+// by a larger octagon that modifies the sample mask for the arc at each corresponding corner.
+struct MSAAVertex {
+ std::array<float, 4> fRadiiSelector;
+ std::array<float, 2> fCorner;
+ std::array<float, 2> fRadiusOutset;
+};
+
+static constexpr MSAAVertex kMSAAVertexData[] = {
+ // Left edge. (Negative radii selector indicates this is not an arc section.)
+ {{{0,0,0,-1}}, {{-1,+1}}, {{0,-1}}},
+ {{{-1,0,0,0}}, {{-1,-1}}, {{0,+1}}},
+
+ // Top edge.
+ {{{-1,0,0,0}}, {{-1,-1}}, {{+1,0}}},
+ {{{0,-1,0,0}}, {{+1,-1}}, {{-1,0}}},
+
+ // Right edge.
+ {{{0,-1,0,0}}, {{+1,-1}}, {{0,+1}}},
+ {{{0,0,-1,0}}, {{+1,+1}}, {{0,-1}}},
+
+ // Bottom edge.
+ {{{0,0,-1,0}}, {{+1,+1}}, {{-1,0}}},
+ {{{0,0,0,-1}}, {{-1,+1}}, {{+1,0}}},
+
+ // Top-left corner.
+ {{{1,0,0,0}}, {{-1,-1}}, {{0,+1}}},
+ {{{1,0,0,0}}, {{-1,-1}}, {{0,+kOctoOffset}}},
+ {{{1,0,0,0}}, {{-1,-1}}, {{+1,0}}},
+ {{{1,0,0,0}}, {{-1,-1}}, {{+kOctoOffset,0}}},
+
+ // Top-right corner.
+ {{{0,1,0,0}}, {{+1,-1}}, {{-1,0}}},
+ {{{0,1,0,0}}, {{+1,-1}}, {{-kOctoOffset,0}}},
+ {{{0,1,0,0}}, {{+1,-1}}, {{0,+1}}},
+ {{{0,1,0,0}}, {{+1,-1}}, {{0,+kOctoOffset}}},
+
+ // Bottom-right corner.
+ {{{0,0,1,0}}, {{+1,+1}}, {{0,-1}}},
+ {{{0,0,1,0}}, {{+1,+1}}, {{0,-kOctoOffset}}},
+ {{{0,0,1,0}}, {{+1,+1}}, {{-1,0}}},
+ {{{0,0,1,0}}, {{+1,+1}}, {{-kOctoOffset,0}}},
+
+ // Bottom-left corner.
+ {{{0,0,0,1}}, {{-1,+1}}, {{+1,0}}},
+ {{{0,0,0,1}}, {{-1,+1}}, {{+kOctoOffset,0}}},
+ {{{0,0,0,1}}, {{-1,+1}}, {{0,-1}}},
+ {{{0,0,0,1}}, {{-1,+1}}, {{0,-kOctoOffset}}}};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gMSAAVertexBufferKey);
+
+static constexpr uint16_t kMSAAIndexData[] = {
+ // Inset octagon. (Full sample mask.)
+ 0, 1, 2,
+ 0, 2, 3,
+ 0, 3, 6,
+ 3, 4, 5,
+ 3, 5, 6,
+ 6, 7, 0,
+
+ // Top-left arc. (Sample mask is set to the arc.)
+ 8, 9, 10,
+ 9, 11, 10,
+
+ // Top-right arc.
+ 12, 13, 14,
+ 13, 15, 14,
+
+ // Bottom-right arc.
+ 16, 17, 18,
+ 17, 19, 18,
+
+ // Bottom-left arc.
+ 20, 21, 22,
+ 21, 23, 22};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gMSAAIndexBufferKey);
+
+void GrFillRRectOp::onPrepare(GrOpFlushState* flushState) {
+ if (void* instanceData = flushState->makeVertexSpace(fInstanceStride, fInstanceCount,
+ &fInstanceBuffer, &fBaseInstance)) {
+ SkASSERT(fInstanceStride * fInstanceCount == fInstanceData.count());
+ memcpy(instanceData, fInstanceData.begin(), fInstanceData.count());
+ }
+
+ if (GrAAType::kCoverage == fAAType) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gCoverageIndexBufferKey);
+
+ fIndexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kCoverageIndexData), kCoverageIndexData,
+ gCoverageIndexBufferKey);
+
+ GR_DEFINE_STATIC_UNIQUE_KEY(gCoverageVertexBufferKey);
+
+ fVertexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kCoverageVertexData), kCoverageVertexData,
+ gCoverageVertexBufferKey);
+
+ fIndexCount = SK_ARRAY_COUNT(kCoverageIndexData);
+ } else {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gMSAAIndexBufferKey);
+
+ fIndexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kIndex, sizeof(kMSAAIndexData), kMSAAIndexData,
+ gMSAAIndexBufferKey);
+
+ GR_DEFINE_STATIC_UNIQUE_KEY(gMSAAVertexBufferKey);
+
+ fVertexBuffer = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kMSAAVertexData), kMSAAVertexData,
+ gMSAAVertexBufferKey);
+
+ fIndexCount = SK_ARRAY_COUNT(kMSAAIndexData);
+ }
+}
+
+class GrFillRRectOp::Processor::CoverageImpl : public GrGLSLGeometryProcessor {
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const auto& proc = args.fGP.cast<Processor>();
+ bool useHWDerivatives = (proc.fFlags & Flags::kUseHWDerivatives);
+
+ SkASSERT(proc.vertexStride() == sizeof(CoverageVertex));
+
+ GrGLSLVaryingHandler* varyings = args.fVaryingHandler;
+ varyings->emitAttributes(proc);
+ varyings->addPassThroughAttribute(*proc.fColorAttrib, args.fOutputColor,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+
+ // Emit the vertex shader.
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+
+ // Unpack vertex attribs.
+ v->codeAppend("float2 corner = corner_and_radius_outsets.xy;");
+ v->codeAppend("float2 radius_outset = corner_and_radius_outsets.zw;");
+ v->codeAppend("float2 aa_bloat_direction = aa_bloat_and_coverage.xy;");
+ v->codeAppend("float coverage = aa_bloat_and_coverage.z;");
+ v->codeAppend("float is_linear_coverage = aa_bloat_and_coverage.w;");
+
+ // Find the amount to bloat each edge for AA (in source space).
+ v->codeAppend("float2 pixellength = inversesqrt("
+ "float2(dot(skew.xz, skew.xz), dot(skew.yw, skew.yw)));");
+ v->codeAppend("float4 normalized_axis_dirs = skew * pixellength.xyxy;");
+ v->codeAppend("float2 axiswidths = (abs(normalized_axis_dirs.xy) + "
+ "abs(normalized_axis_dirs.zw));");
+ v->codeAppend("float2 aa_bloatradius = axiswidths * pixellength * .5;");
+
+ // Identify our radii.
+ v->codeAppend("float4 radii_and_neighbors = radii_selector"
+ "* float4x4(radii_x, radii_y, radii_x.yxwz, radii_y.wzyx);");
+ v->codeAppend("float2 radii = radii_and_neighbors.xy;");
+ v->codeAppend("float2 neighbor_radii = radii_and_neighbors.zw;");
+
+ v->codeAppend("if (any(greaterThan(aa_bloatradius, float2(1)))) {");
+ // The rrect is more narrow than an AA coverage ramp. We can't draw as-is
+ // or else opposite AA borders will overlap. Instead, fudge the size up to
+ // the width of a coverage ramp, and then reduce total coverage to make
+ // the rect appear more thin.
+ v->codeAppend( "corner = max(abs(corner), aa_bloatradius) * sign(corner);");
+ v->codeAppend( "coverage /= max(aa_bloatradius.x, 1) * max(aa_bloatradius.y, 1);");
+ // Set radii to zero to ensure we take the "linear coverage" codepath.
+ // (The "coverage" variable only has effect in the linear codepath.)
+ v->codeAppend( "radii = float2(0);");
+ v->codeAppend("}");
+
+ v->codeAppend("if (any(lessThan(radii, aa_bloatradius * 1.25))) {");
+ // The radii are very small. Demote this arc to a sharp 90 degree corner.
+ v->codeAppend( "radii = aa_bloatradius;");
+ // Snap octagon vertices to the corner of the bounding box.
+ v->codeAppend( "radius_outset = floor(abs(radius_outset)) * radius_outset;");
+ v->codeAppend( "is_linear_coverage = 1;");
+ v->codeAppend("} else {");
+ // Don't let radii get smaller than a pixel.
+ v->codeAppend( "radii = clamp(radii, pixellength, 2 - pixellength);");
+ v->codeAppend( "neighbor_radii = clamp(neighbor_radii, pixellength, 2 - pixellength);");
+ // Don't let neighboring radii get closer together than 1/16 pixel.
+ v->codeAppend( "float2 spacing = 2 - radii - neighbor_radii;");
+ v->codeAppend( "float2 extra_pad = max(pixellength * .0625 - spacing, float2(0));");
+ v->codeAppend( "radii -= extra_pad * .5;");
+ v->codeAppend("}");
+
+ // Find our vertex position, adjusted for radii and bloated for AA. Our rect is drawn in
+ // normalized [-1,-1,+1,+1] space.
+ v->codeAppend("float2 aa_outset = aa_bloat_direction.xy * aa_bloatradius;");
+ v->codeAppend("float2 vertexpos = corner + radius_outset * radii + aa_outset;");
+
+ // Emit transforms.
+ GrShaderVar localCoord("", kFloat2_GrSLType);
+ if (proc.fFlags & Flags::kHasLocalCoords) {
+ v->codeAppend("float2 localcoord = (local_rect.xy * (1 - vertexpos) + "
+ "local_rect.zw * (1 + vertexpos)) * .5;");
+ localCoord.set(kFloat2_GrSLType, "localcoord");
+ }
+ this->emitTransforms(v, varyings, args.fUniformHandler, localCoord,
+ args.fFPCoordTransformHandler);
+
+ // Transform to device space.
+ SkASSERT(!(proc.fFlags & Flags::kHasPerspective));
+ v->codeAppend("float2x2 skewmatrix = float2x2(skew.xy, skew.zw);");
+ v->codeAppend("float2 devcoord = vertexpos * skewmatrix + translate;");
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "devcoord");
+
+ // Setup interpolants for coverage.
+ GrGLSLVarying arcCoord(useHWDerivatives ? kFloat2_GrSLType : kFloat4_GrSLType);
+ varyings->addVarying("arccoord", &arcCoord);
+ v->codeAppend("if (0 != is_linear_coverage) {");
+ // We are a non-corner piece: Set x=0 to indicate built-in coverage, and
+ // interpolate linear coverage across y.
+ v->codeAppendf( "%s.xy = float2(0, coverage);", arcCoord.vsOut());
+ v->codeAppend("} else {");
+ // Find the normalized arc coordinates for our corner ellipse.
+ // (i.e., the coordinate system where x^2 + y^2 == 1).
+ v->codeAppend( "float2 arccoord = 1 - abs(radius_outset) + aa_outset/radii * corner;");
+ // We are a corner piece: Interpolate the arc coordinates for coverage.
+ // Emit x+1 to ensure no pixel in the arc has a x value of 0 (since x=0
+ // instructs the fragment shader to use linear coverage).
+ v->codeAppendf( "%s.xy = float2(arccoord.x+1, arccoord.y);", arcCoord.vsOut());
+ if (!useHWDerivatives) {
+ // The gradient is order-1: Interpolate it across arccoord.zw.
+ v->codeAppendf("float2x2 derivatives = inverse(skewmatrix);");
+ v->codeAppendf("%s.zw = derivatives * (arccoord/radii * 2);", arcCoord.vsOut());
+ }
+ v->codeAppend("}");
+
+ // Emit the fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+
+ f->codeAppendf("float x_plus_1=%s.x, y=%s.y;", arcCoord.fsIn(), arcCoord.fsIn());
+ f->codeAppendf("half coverage;");
+ f->codeAppendf("if (0 == x_plus_1) {");
+ f->codeAppendf( "coverage = half(y);"); // We are a non-arc pixel (linear coverage).
+ f->codeAppendf("} else {");
+ f->codeAppendf( "float fn = x_plus_1 * (x_plus_1 - 2);"); // fn = (x+1)*(x-1) = x^2-1
+ f->codeAppendf( "fn = fma(y,y, fn);"); // fn = x^2 + y^2 - 1
+ if (useHWDerivatives) {
+ f->codeAppendf("float fnwidth = fwidth(fn);");
+ } else {
+ // The gradient is interpolated across arccoord.zw.
+ f->codeAppendf("float gx=%s.z, gy=%s.w;", arcCoord.fsIn(), arcCoord.fsIn());
+ f->codeAppendf("float fnwidth = abs(gx) + abs(gy);");
+ }
+ f->codeAppendf( "half d = half(fn/fnwidth);");
+ f->codeAppendf( "coverage = clamp(.5 - d, 0, 1);");
+ f->codeAppendf("}");
+ f->codeAppendf("%s = half4(coverage);", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+};
+
+
+class GrFillRRectOp::Processor::MSAAImpl : public GrGLSLGeometryProcessor {
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const auto& proc = args.fGP.cast<Processor>();
+ bool useHWDerivatives = (proc.fFlags & Flags::kUseHWDerivatives);
+ bool hasPerspective = (proc.fFlags & Flags::kHasPerspective);
+ bool hasLocalCoords = (proc.fFlags & Flags::kHasLocalCoords);
+ SkASSERT(useHWDerivatives == hasPerspective);
+
+ SkASSERT(proc.vertexStride() == sizeof(MSAAVertex));
+
+ // Emit the vertex shader.
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+
+ GrGLSLVaryingHandler* varyings = args.fVaryingHandler;
+ varyings->emitAttributes(proc);
+ varyings->addPassThroughAttribute(*proc.fColorAttrib, args.fOutputColor,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+
+ // Unpack vertex attribs.
+ v->codeAppendf("float2 corner = corner_and_radius_outsets.xy;");
+ v->codeAppendf("float2 radius_outset = corner_and_radius_outsets.zw;");
+
+ // Identify our radii.
+ v->codeAppend("float2 radii;");
+ v->codeAppend("radii.x = dot(radii_selector, radii_x);");
+ v->codeAppend("radii.y = dot(radii_selector, radii_y);");
+ v->codeAppendf("bool is_arc_section = (radii.x > 0);");
+ v->codeAppendf("radii = abs(radii);");
+
+ // Find our vertex position, adjusted for radii. Our rect is drawn in normalized
+ // [-1,-1,+1,+1] space.
+ v->codeAppend("float2 vertexpos = corner + radius_outset * radii;");
+
+ // Emit transforms.
+ GrShaderVar localCoord("", kFloat2_GrSLType);
+ if (hasLocalCoords) {
+ v->codeAppend("float2 localcoord = (local_rect.xy * (1 - vertexpos) + "
+ "local_rect.zw * (1 + vertexpos)) * .5;");
+ localCoord.set(kFloat2_GrSLType, "localcoord");
+ }
+ this->emitTransforms(v, varyings, args.fUniformHandler, localCoord,
+ args.fFPCoordTransformHandler);
+
+ // Transform to device space.
+ if (!hasPerspective) {
+ v->codeAppend("float2x2 skewmatrix = float2x2(skew.xy, skew.zw);");
+ v->codeAppend("float2 devcoord = vertexpos * skewmatrix + translate;");
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "devcoord");
+ } else {
+ v->codeAppend("float3x3 persp_matrix = float3x3(persp_x, persp_y, persp_z);");
+ v->codeAppend("float3 devcoord = float3(vertexpos, 1) * persp_matrix;");
+ gpArgs->fPositionVar.set(kFloat3_GrSLType, "devcoord");
+ }
+
+ // Determine normalized arc coordinates for the implicit function.
+ GrGLSLVarying arcCoord((useHWDerivatives) ? kFloat2_GrSLType : kFloat4_GrSLType);
+ varyings->addVarying("arccoord", &arcCoord);
+ v->codeAppendf("if (is_arc_section) {");
+ v->codeAppendf( "%s.xy = 1 - abs(radius_outset);", arcCoord.vsOut());
+ if (!useHWDerivatives) {
+ // The gradient is order-1: Interpolate it across arccoord.zw.
+ // This doesn't work with perspective.
+ SkASSERT(!hasPerspective);
+ v->codeAppendf("float2x2 derivatives = inverse(skewmatrix);");
+ v->codeAppendf("%s.zw = derivatives * (%s.xy/radii * corner * 2);",
+ arcCoord.vsOut(), arcCoord.vsOut());
+ }
+ v->codeAppendf("} else {");
+ if (useHWDerivatives) {
+ v->codeAppendf("%s = float2(0);", arcCoord.vsOut());
+ } else {
+ v->codeAppendf("%s = float4(0);", arcCoord.vsOut());
+ }
+ v->codeAppendf("}");
+
+ // Emit the fragment shader.
+ GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
+
+ f->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+
+ // If x,y == 0, then we are drawing a triangle that does not track an arc.
+ f->codeAppendf("if (float2(0) != %s.xy) {", arcCoord.fsIn());
+ f->codeAppendf( "float fn = dot(%s.xy, %s.xy) - 1;", arcCoord.fsIn(), arcCoord.fsIn());
+ if (GrAAType::kMSAA == proc.fAAType) {
+ using ScopeFlags = GrGLSLFPFragmentBuilder::ScopeFlags;
+ if (!useHWDerivatives) {
+ f->codeAppendf("float2 grad = %s.zw;", arcCoord.fsIn());
+ f->applyFnToMultisampleMask("fn", "grad", ScopeFlags::kInsidePerPrimitiveBranch);
+ } else {
+ f->applyFnToMultisampleMask("fn", nullptr, ScopeFlags::kInsidePerPrimitiveBranch);
+ }
+ } else {
+ f->codeAppendf("if (fn > 0) {");
+ f->codeAppendf( "%s = half4(0);", args.fOutputCoverage);
+ f->codeAppendf("}");
+ }
+ f->codeAppendf("}");
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+};
+
+GrGLSLPrimitiveProcessor* GrFillRRectOp::Processor::createGLSLInstance(
+ const GrShaderCaps&) const {
+ if (GrAAType::kCoverage != fAAType) {
+ return new MSAAImpl();
+ }
+ return new CoverageImpl();
+}
+
+void GrFillRRectOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ if (!fInstanceBuffer || !fIndexBuffer || !fVertexBuffer) {
+ return; // Setup failed.
+ }
+
+ Processor* proc = flushState->allocator()->make<Processor>(fAAType, fFlags);
+ SkASSERT(proc->instanceStride() == (size_t)fInstanceStride);
+
+ GrPipeline::InitArgs initArgs;
+ if (GrAAType::kMSAA == fAAType) {
+ initArgs.fInputFlags = GrPipeline::InputFlags::kHWAntialias;
+ }
+ initArgs.fCaps = &flushState->caps();
+ initArgs.fDstProxy = flushState->drawOpArgs().dstProxy();
+ initArgs.fOutputSwizzle = flushState->drawOpArgs().outputSwizzle();
+ auto clip = flushState->detachAppliedClip();
+ GrPipeline::FixedDynamicState* fixedDynamicState =
+ flushState->allocator()->make<GrPipeline::FixedDynamicState>(clip.scissorState().rect());
+ GrPipeline* pipeline = flushState->allocator()->make<GrPipeline>(initArgs,
+ std::move(fProcessors),
+ std::move(clip));
+
+ GrProgramInfo programInfo(flushState->drawOpArgs().numSamples(),
+ flushState->drawOpArgs().origin(),
+ *pipeline,
+ *proc,
+ fixedDynamicState,
+ nullptr, 0);
+
+ GrMesh* mesh = flushState->allocator()->make<GrMesh>(GrPrimitiveType::kTriangles);
+ mesh->setIndexedInstanced(
+ std::move(fIndexBuffer), fIndexCount, std::move(fInstanceBuffer), fInstanceCount,
+ fBaseInstance, GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(fVertexBuffer));
+ flushState->opsRenderPass()->draw(programInfo, mesh, 1, this->bounds());
+ fIndexCount = 0;
+}
+
+// Will the given corner look good if we use HW derivatives?
+static bool can_use_hw_derivatives_with_coverage(const Sk2f& devScale, const Sk2f& cornerRadii) {
+ Sk2f devRadii = devScale * cornerRadii;
+ if (devRadii[1] < devRadii[0]) {
+ devRadii = SkNx_shuffle<1,0>(devRadii);
+ }
+ float minDevRadius = SkTMax(devRadii[0], 1.f); // Shader clamps radius at a minimum of 1.
+ // Is the gradient smooth enough for this corner look ok if we use hardware derivatives?
+ // This threshold was arrived at subjevtively on an NVIDIA chip.
+ return minDevRadius * minDevRadius * 5 > devRadii[1];
+}
+
+static bool can_use_hw_derivatives_with_coverage(
+ const Sk2f& devScale, const SkVector& cornerRadii) {
+ return can_use_hw_derivatives_with_coverage(devScale, Sk2f::Load(&cornerRadii));
+}
+
+// Will the given round rect look good if we use HW derivatives?
+static bool can_use_hw_derivatives_with_coverage(
+ const GrShaderCaps& shaderCaps, const SkMatrix& viewMatrix, const SkRRect& rrect) {
+ if (!shaderCaps.shaderDerivativeSupport()) {
+ return false;
+ }
+
+ Sk2f x = Sk2f(viewMatrix.getScaleX(), viewMatrix.getSkewX());
+ Sk2f y = Sk2f(viewMatrix.getSkewY(), viewMatrix.getScaleY());
+ Sk2f devScale = (x*x + y*y).sqrt();
+ switch (rrect.getType()) {
+ case SkRRect::kEmpty_Type:
+ case SkRRect::kRect_Type:
+ return true;
+
+ case SkRRect::kOval_Type:
+ case SkRRect::kSimple_Type:
+ return can_use_hw_derivatives_with_coverage(devScale, rrect.getSimpleRadii());
+
+ case SkRRect::kNinePatch_Type: {
+ Sk2f r0 = Sk2f::Load(SkRRectPriv::GetRadiiArray(rrect));
+ Sk2f r1 = Sk2f::Load(SkRRectPriv::GetRadiiArray(rrect) + 2);
+ Sk2f minRadii = Sk2f::Min(r0, r1);
+ Sk2f maxRadii = Sk2f::Max(r0, r1);
+ return can_use_hw_derivatives_with_coverage(devScale, Sk2f(minRadii[0], maxRadii[1])) &&
+ can_use_hw_derivatives_with_coverage(devScale, Sk2f(maxRadii[0], minRadii[1]));
+ }
+
+ case SkRRect::kComplex_Type: {
+ for (int i = 0; i < 4; ++i) {
+ auto corner = static_cast<SkRRect::Corner>(i);
+ if (!can_use_hw_derivatives_with_coverage(devScale, rrect.radii(corner))) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+ SK_ABORT("Invalid round rect type.");
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.h b/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.h
new file mode 100644
index 0000000000..c3e759947a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrFillRRectOp.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFillRRectOp_DEFINED
+#define GrFillRRectOp_DEFINED
+
+#include "src/gpu/ops/GrDrawOp.h"
+
+class GrRecordingContext;
+
+class GrFillRRectOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrFillRRectOp> Make(
+ GrRecordingContext*, GrAAType, const SkMatrix& viewMatrix, const SkRRect&,
+ const GrCaps&, GrPaint&&);
+
+ const char* name() const override { return "GrFillRRectOp"; }
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ return (GrAAType::kMSAA == fAAType)
+ ? FixedFunctionFlags::kUsesHWAA
+ : FixedFunctionFlags::kNone;
+ }
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override;
+ CombineResult onCombineIfPossible(GrOp*, const GrCaps&) override;
+ void visitProxies(const VisitProxyFunc& fn) const override {
+ fProcessors.visitProxies(fn);
+ }
+ void onPrepare(GrOpFlushState*) override;
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+private:
+ enum class Flags {
+ kNone = 0,
+ kUseHWDerivatives = 1 << 0,
+ kHasPerspective = 1 << 1,
+ kHasLocalCoords = 1 << 2,
+ kWideColor = 1 << 3
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(Flags);
+
+ class Processor;
+
+ GrFillRRectOp(GrAAType, const SkRRect&, Flags, const SkMatrix& totalShapeMatrix,
+ GrPaint&&, const SkRect& devBounds);
+
+ // These methods are used to append data of various POD types to our internal array of instance
+ // data. The actual layout of the instance buffer can vary from Op to Op.
+ template <typename T> inline T* appendInstanceData(int count) {
+ static_assert(std::is_pod<T>::value, "");
+ static_assert(4 == alignof(T), "");
+ return reinterpret_cast<T*>(fInstanceData.push_back_n(sizeof(T) * count));
+ }
+
+ template <typename T, typename... Args>
+ inline void writeInstanceData(const T& val, const Args&... remainder) {
+ memcpy(this->appendInstanceData<T>(1), &val, sizeof(T));
+ this->writeInstanceData(remainder...);
+ }
+
+ void writeInstanceData() {} // Halt condition.
+
+ const GrAAType fAAType;
+ const SkPMColor4f fOriginalColor;
+ const SkRect fLocalRect;
+ Flags fFlags;
+ GrProcessorSet fProcessors;
+
+ SkSTArray<sizeof(float) * 16 * 4, char, /*MEM_MOVE=*/ true> fInstanceData;
+ int fInstanceCount = 1;
+ int fInstanceStride = 0;
+
+ sk_sp<const GrBuffer> fInstanceBuffer;
+ sk_sp<const GrBuffer> fVertexBuffer;
+ sk_sp<const GrBuffer> fIndexBuffer;
+ int fBaseInstance;
+ int fIndexCount = 0;
+
+ friend class GrOpMemoryPool;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrFillRRectOp::Flags)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrFillRectOp.cpp b/gfx/skia/skia/src/gpu/ops/GrFillRectOp.cpp
new file mode 100644
index 0000000000..560adc0a97
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrFillRectOp.cpp
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrFillRectOp.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrPaint.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/geometry/GrQuadBuffer.h"
+#include "src/gpu/geometry/GrQuadUtils.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrQuadPerEdgeAA.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+namespace {
+
+using VertexSpec = GrQuadPerEdgeAA::VertexSpec;
+using ColorType = GrQuadPerEdgeAA::ColorType;
+
+#ifdef SK_DEBUG
+static SkString dump_quad_info(int index, const GrQuad& deviceQuad,
+ const GrQuad& localQuad, const SkPMColor4f& color,
+ GrQuadAAFlags aaFlags) {
+ SkString str;
+ str.appendf("%d: Color: [%.2f, %.2f, %.2f, %.2f], Edge AA: l%u_t%u_r%u_b%u, \n"
+ " device quad: [(%.2f, %2.f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), "
+ "(%.2f, %.2f, %.2f)],\n"
+ " local quad: [(%.2f, %2.f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), "
+ "(%.2f, %.2f, %.2f)]\n",
+ index, color.fR, color.fG, color.fB, color.fA,
+ (uint32_t) (aaFlags & GrQuadAAFlags::kLeft),
+ (uint32_t) (aaFlags & GrQuadAAFlags::kTop),
+ (uint32_t) (aaFlags & GrQuadAAFlags::kRight),
+ (uint32_t) (aaFlags & GrQuadAAFlags::kBottom),
+ deviceQuad.x(0), deviceQuad.y(0), deviceQuad.w(0),
+ deviceQuad.x(1), deviceQuad.y(1), deviceQuad.w(1),
+ deviceQuad.x(2), deviceQuad.y(2), deviceQuad.w(2),
+ deviceQuad.x(3), deviceQuad.y(3), deviceQuad.w(3),
+ localQuad.x(0), localQuad.y(0), localQuad.w(0),
+ localQuad.x(1), localQuad.y(1), localQuad.w(1),
+ localQuad.x(2), localQuad.y(2), localQuad.w(2),
+ localQuad.x(3), localQuad.y(3), localQuad.w(3));
+ return str;
+}
+#endif
+
+class FillRectOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ GrQuadAAFlags edgeAA,
+ const GrUserStencilSettings* stencilSettings,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad) {
+ // Clean up deviations between aaType and edgeAA
+ GrQuadUtils::ResolveAAType(aaType, edgeAA, deviceQuad, &aaType, &edgeAA);
+ return Helper::FactoryHelper<FillRectOp>(context, std::move(paint), aaType, edgeAA,
+ stencilSettings, deviceQuad, localQuad);
+ }
+
+ // aaType is passed to Helper in the initializer list, so incongruities between aaType and
+ // edgeFlags must be resolved prior to calling this constructor.
+ FillRectOp(Helper::MakeArgs args, SkPMColor4f paintColor, GrAAType aaType,
+ GrQuadAAFlags edgeFlags, const GrUserStencilSettings* stencil,
+ const GrQuad& deviceQuad, const GrQuad& localQuad)
+ : INHERITED(ClassID())
+ , fHelper(args, aaType, stencil)
+ , fQuads(1, !fHelper.isTrivial()) {
+ // Conservatively keep track of the local coordinates; it may be that the paint doesn't
+ // need them after analysis is finished. If the paint is known to be solid up front they
+ // can be skipped entirely.
+ fQuads.append(deviceQuad, { paintColor, edgeFlags },
+ fHelper.isTrivial() ? nullptr : &localQuad);
+ this->setBounds(deviceQuad.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
+ IsHairline::kNo);
+ }
+
+ const char* name() const override { return "FillRectOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ return fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# draws: %u\n", fQuads.count());
+ str.appendf("Device quad type: %u, local quad type: %u\n",
+ (uint32_t) fQuads.deviceQuadType(), (uint32_t) fQuads.localQuadType());
+ str += fHelper.dumpInfo();
+ int i = 0;
+ auto iter = fQuads.iterator();
+ while(iter.next()) {
+ const ColorAndAA& info = iter.metadata();
+ str += dump_quad_info(i, iter.deviceQuad(), iter.localQuad(),
+ info.fColor, info.fAAFlags);
+ i++;
+ }
+ str += INHERITED::dumpInfo();
+ return str;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ // Initialize aggregate color analysis with the first quad's color (which always exists)
+ auto iter = fQuads.metadata();
+ SkAssertResult(iter.next());
+ GrProcessorAnalysisColor quadColors(iter->fColor);
+ // Then combine the colors of any additional quads (e.g. from MakeSet)
+ while(iter.next()) {
+ quadColors = GrProcessorAnalysisColor::Combine(quadColors, iter->fColor);
+ if (quadColors.isUnknown()) {
+ // No point in accumulating additional starting colors, combining cannot make it
+ // less unknown.
+ break;
+ }
+ }
+
+ // If the AA type is coverage, it will be a single value per pixel; if it's not coverage AA
+ // then the coverage is always 1.0, so specify kNone for more optimal blending.
+ GrProcessorAnalysisCoverage coverage = fHelper.aaType() == GrAAType::kCoverage ?
+ GrProcessorAnalysisCoverage::kSingleChannel :
+ GrProcessorAnalysisCoverage::kNone;
+ auto result = fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, coverage, &quadColors);
+ // If there is a constant color after analysis, that means all of the quads should be set
+ // to the same color (even if they started out with different colors).
+ iter = fQuads.metadata();
+ SkPMColor4f colorOverride;
+ if (quadColors.isConstant(&colorOverride)) {
+ fColorType = GrQuadPerEdgeAA::MinColorType(colorOverride, clampType, caps);
+ while(iter.next()) {
+ iter->fColor = colorOverride;
+ }
+ } else {
+ // Otherwise compute the color type needed as the max over all quads.
+ fColorType = ColorType::kNone;
+ while(iter.next()) {
+ fColorType = SkTMax(fColorType,
+ GrQuadPerEdgeAA::MinColorType(iter->fColor, clampType, caps));
+ }
+ }
+ // Most SkShaders' FPs multiply their calculated color by the paint color or alpha. We want
+ // to use ColorType::kNone to optimize out that multiply. However, if there are no color
+ // FPs then were really writing a special shader for white rectangles and not saving any
+ // multiples. So in that case use bytes to avoid the extra shader (and possibly work around
+ // an ANGLE issue: crbug.com/942565).
+ if (fColorType == ColorType::kNone && !result.hasColorFragmentProcessor()) {
+ fColorType = ColorType::kByte;
+ }
+
+ return result;
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ // Since the AA type of the whole primitive is kept consistent with the per edge AA flags
+ // the helper's fixed function flags are appropriate.
+ return fHelper.fixedFunctionFlags();
+ }
+
+ DEFINE_OP_CLASS_ID
+
+private:
+ // For GrFillRectOp::MakeSet's use of addQuad
+ friend std::unique_ptr<GrDrawOp> GrFillRectOp::MakeSet(
+ GrRecordingContext*,
+ GrPaint&&,
+ GrAAType, const SkMatrix& viewMatrix,
+ const GrRenderTargetContext::QuadSetEntry quads[], int quadCount,
+ const GrUserStencilSettings*);
+
+ void onPrepareDraws(Target* target) override {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ using Domain = GrQuadPerEdgeAA::Domain;
+ static constexpr SkRect kEmptyDomain = SkRect::MakeEmpty();
+
+ VertexSpec vertexSpec(fQuads.deviceQuadType(), fColorType, fQuads.localQuadType(),
+ fHelper.usesLocalCoords(), Domain::kNo, fHelper.aaType(),
+ fHelper.compatibleWithCoverageAsAlpha());
+ // Make sure that if the op thought it was a solid color, the vertex spec does not use
+ // local coords.
+ SkASSERT(!fHelper.isTrivial() || !fHelper.usesLocalCoords());
+
+ sk_sp<GrGeometryProcessor> gp = GrQuadPerEdgeAA::MakeProcessor(vertexSpec);
+ size_t vertexSize = gp->vertexStride();
+
+ sk_sp<const GrBuffer> vbuffer;
+ int vertexOffsetInBuffer = 0;
+
+ // Fill the allocated vertex data
+ void* vdata = target->makeVertexSpace(
+ vertexSize, fQuads.count() * vertexSpec.verticesPerQuad(),
+ &vbuffer, &vertexOffsetInBuffer);
+ if (!vdata) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ // vertices pointer advances through vdata based on Tessellate's return value
+ void* vertices = vdata;
+ auto iter = fQuads.iterator();
+ while(iter.next()) {
+ // All entries should have local coords, or no entries should have local coords,
+ // matching !helper.isTrivial() (which is more conservative than helper.usesLocalCoords)
+ SkASSERT(iter.isLocalValid() != fHelper.isTrivial());
+ auto info = iter.metadata();
+ vertices = GrQuadPerEdgeAA::Tessellate(vertices, vertexSpec, iter.deviceQuad(),
+ info.fColor, iter.localQuad(), kEmptyDomain, info.fAAFlags);
+ }
+
+ // Configure the mesh for the vertex data
+ GrMesh* mesh = target->allocMeshes(1);
+ if (!GrQuadPerEdgeAA::ConfigureMeshIndices(target, mesh, vertexSpec, fQuads.count())) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ mesh->setVertexData(std::move(vbuffer), vertexOffsetInBuffer);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ const auto* that = t->cast<FillRectOp>();
+
+ if ((fHelper.aaType() == GrAAType::kCoverage ||
+ that->fHelper.aaType() == GrAAType::kCoverage) &&
+ fQuads.count() + that->fQuads.count() > GrQuadPerEdgeAA::kNumAAQuadsInIndexBuffer) {
+ // This limit on batch size seems to help on Adreno devices
+ return CombineResult::kCannotCombine;
+ }
+
+ // Unlike most users of the draw op helper, this op can merge none-aa and coverage-aa draw
+ // ops together, so pass true as the last argument.
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds(), true)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // If the paints were compatible, the trivial/solid-color state should be the same
+ SkASSERT(fHelper.isTrivial() == that->fHelper.isTrivial());
+
+ // If the processor sets are compatible, the two ops are always compatible; it just needs to
+ // adjust the state of the op to be the more general quad and aa types of the two ops and
+ // then concatenate the per-quad data.
+ fColorType = SkTMax(fColorType, that->fColorType);
+
+ // The helper stores the aa type, but isCompatible(with true arg) allows the two ops' aa
+ // types to be none and coverage, in which case this op's aa type must be lifted to coverage
+ // so that quads with no aa edges can be batched with quads that have some/all edges aa'ed.
+ if (fHelper.aaType() == GrAAType::kNone && that->fHelper.aaType() == GrAAType::kCoverage) {
+ fHelper.setAAType(GrAAType::kCoverage);
+ }
+
+ fQuads.concat(that->fQuads);
+ return CombineResult::kMerged;
+ }
+
+ // Similar to onCombineIfPossible, but adds a quad assuming its op would have been compatible.
+ // But since it's avoiding the op list management, it must update the op's bounds. This is only
+ // used with quad sets, which uses the same view matrix for each quad so this assumes that the
+ // device quad type of the new quad is the same as the op's.
+ void addQuad(const GrQuad& deviceQuad, const GrQuad& localQuad,
+ const SkPMColor4f& color, GrQuadAAFlags edgeAA, GrAAType aaType) {
+ // The new quad's aa type should be the same as the first quad's or none, except when the
+ // first quad's aa type was already downgraded to none, in which case the stored type must
+ // be lifted to back to the requested type.
+ if (aaType != fHelper.aaType()) {
+ if (aaType != GrAAType::kNone) {
+ // Original quad was downgraded to non-aa, lift back up to this quad's required type
+ SkASSERT(fHelper.aaType() == GrAAType::kNone);
+ fHelper.setAAType(aaType);
+ }
+ // else the new quad could have been downgraded but the other quads can't be, so don't
+ // reset the op's accumulated aa type.
+ }
+
+ // Update the bounds and add the quad to this op's storage
+ SkRect newBounds = this->bounds();
+ newBounds.joinPossiblyEmptyRect(deviceQuad.bounds());
+ this->setBounds(newBounds, HasAABloat(fHelper.aaType() == GrAAType::kCoverage),
+ IsHairline::kNo);
+ fQuads.append(deviceQuad, { color, edgeAA }, fHelper.isTrivial() ? nullptr : &localQuad);
+ }
+
+ struct ColorAndAA {
+ SkPMColor4f fColor;
+ GrQuadAAFlags fAAFlags;
+ };
+
+ Helper fHelper;
+ GrQuadBuffer<ColorAndAA> fQuads;
+
+ ColorType fColorType;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+namespace GrFillRectOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ GrQuadAAFlags aaFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const GrUserStencilSettings* stencil) {
+ return FillRectOp::Make(context, std::move(paint), aaType, aaFlags, stencil,
+ deviceQuad, localQuad);
+}
+
+std::unique_ptr<GrDrawOp> MakeNonAARect(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& view,
+ const SkRect& rect,
+ const GrUserStencilSettings* stencil) {
+ return FillRectOp::Make(context, std::move(paint), GrAAType::kNone, GrQuadAAFlags::kNone,
+ stencil, GrQuad::MakeFromRect(rect, view), GrQuad(rect));
+}
+
+std::unique_ptr<GrDrawOp> MakeSet(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ const SkMatrix& viewMatrix,
+ const GrRenderTargetContext::QuadSetEntry quads[],
+ int cnt,
+ const GrUserStencilSettings* stencilSettings) {
+ // First make a draw op for the first quad in the set
+ SkASSERT(cnt > 0);
+
+ paint.setColor4f(quads[0].fColor);
+ std::unique_ptr<GrDrawOp> op = FillRectOp::Make(context, std::move(paint), aaType,
+ quads[0].fAAFlags, stencilSettings,
+ GrQuad::MakeFromRect(quads[0].fRect, viewMatrix),
+ GrQuad::MakeFromRect(quads[0].fRect, quads[0].fLocalMatrix));
+ auto* fillRects = op->cast<FillRectOp>();
+
+ // Accumulate remaining quads similar to onCombineIfPossible() without creating an op
+ for (int i = 1; i < cnt; ++i) {
+ GrQuad deviceQuad = GrQuad::MakeFromRect(quads[i].fRect, viewMatrix);
+
+ GrAAType resolvedAA;
+ GrQuadAAFlags resolvedEdgeFlags;
+ GrQuadUtils::ResolveAAType(aaType, quads[i].fAAFlags, deviceQuad,
+ &resolvedAA, &resolvedEdgeFlags);
+
+ fillRects->addQuad(deviceQuad,
+ GrQuad::MakeFromRect(quads[i].fRect, quads[i].fLocalMatrix),
+ quads[i].fColor, resolvedEdgeFlags,resolvedAA);
+ }
+
+ return op;
+}
+
+} // namespace GrFillRectOp
+
+#if GR_TEST_UTILS
+
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/SkGr.h"
+
+GR_DRAW_OP_TEST_DEFINE(FillRectOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkRect rect = GrTest::TestRect(random);
+
+ GrAAType aaType = GrAAType::kNone;
+ if (random->nextBool()) {
+ aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
+ }
+ const GrUserStencilSettings* stencil = random->nextBool() ? nullptr
+ : GrGetRandomStencil(random, context);
+
+ GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
+
+ if (random->nextBool()) {
+ if (random->nextBool()) {
+ if (random->nextBool()) {
+ // Local matrix with a set op
+ uint32_t extraQuadCt = random->nextRangeU(1, 4);
+ SkTArray<GrRenderTargetContext::QuadSetEntry> quads(extraQuadCt + 1);
+ quads.push_back(
+ {rect, SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU())),
+ GrTest::TestMatrixInvertible(random), aaFlags});
+ for (uint32_t i = 0; i < extraQuadCt; ++i) {
+ GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
+
+ quads.push_back(
+ {GrTest::TestRect(random),
+ SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU())),
+ GrTest::TestMatrixInvertible(random), aaFlags});
+ }
+
+ return GrFillRectOp::MakeSet(context, std::move(paint), aaType, viewMatrix,
+ quads.begin(), quads.count(), stencil);
+ } else {
+ // Single local matrix
+ SkMatrix localMatrix = GrTest::TestMatrixInvertible(random);
+ return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
+ GrQuad::MakeFromRect(rect, viewMatrix),
+ GrQuad::MakeFromRect(rect, localMatrix), stencil);
+ }
+ } else {
+ // Pass local rect directly
+ SkRect localRect = GrTest::TestRect(random);
+ return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
+ GrQuad::MakeFromRect(rect, viewMatrix),
+ GrQuad(localRect), stencil);
+ }
+ } else {
+ // The simplest constructor
+ return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
+ GrQuad::MakeFromRect(rect, viewMatrix),
+ GrQuad(rect), stencil);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrFillRectOp.h b/gfx/skia/skia/src/gpu/ops/GrFillRectOp.h
new file mode 100644
index 0000000000..768ed8bc78
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrFillRectOp.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFillRectOp_DEFINED
+#define GrFillRectOp_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+
+class GrDrawOp;
+class GrPaint;
+class GrQuad;
+class GrRecordingContext;
+struct GrUserStencilSettings;
+class SkMatrix;
+struct SkRect;
+
+/**
+ * A set of factory functions for drawing filled rectangles either coverage-antialiased, or
+ * non-antialiased. The non-antialiased ops can be used with MSAA. As with other GrDrawOp factories,
+ * the GrPaint is only consumed by these methods if a valid op is returned. If null is returned then
+ * the paint is unmodified and may still be used.
+ */
+namespace GrFillRectOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ GrQuadAAFlags aaFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const GrUserStencilSettings* stencil = nullptr);
+
+// Utility function to create a non-AA rect transformed by view. This is used commonly enough in
+// testing and GMs that manage ops without going through GrRTC that it's worth the convenience.
+std::unique_ptr<GrDrawOp> MakeNonAARect(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& view,
+ const SkRect& rect,
+ const GrUserStencilSettings* stencil = nullptr);
+
+// Bulk API for drawing quads with a single op
+// TODO(michaelludwig) - remove if the bulk API is not useful for SkiaRenderer
+std::unique_ptr<GrDrawOp> MakeSet(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ const SkMatrix& viewMatrix,
+ const GrRenderTargetContext::QuadSetEntry quads[],
+ int quadCount,
+ const GrUserStencilSettings* stencil = nullptr);
+
+} // namespace GrFillRectOp
+
+#endif // GrFillRectOp_DEFINED
diff --git a/gfx/skia/skia/src/gpu/ops/GrLatticeOp.cpp b/gfx/skia/skia/src/gpu/ops/GrLatticeOp.cpp
new file mode 100644
index 0000000000..adcfe8c0b8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrLatticeOp.cpp
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrResourceProviderPriv.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/ops/GrLatticeOp.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+namespace {
+
+class LatticeGP : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrGpu* gpu,
+ const GrTextureProxy* proxy,
+ sk_sp<GrColorSpaceXform> csxf,
+ GrSamplerState::Filter filter,
+ bool wideColor) {
+ return sk_sp<GrGeometryProcessor>(
+ new LatticeGP(gpu, proxy, std::move(csxf), filter, wideColor));
+ }
+
+ const char* name() const override { return "LatticeGP"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ b->add32(GrColorSpaceXform::XformKey(fColorSpaceXform.get()));
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps& caps) const override {
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+ const auto& latticeGP = proc.cast<LatticeGP>();
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ fColorSpaceXformHelper.setData(pdman, latticeGP.fColorSpaceXform.get());
+ }
+
+ private:
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ using Interpolation = GrGLSLVaryingHandler::Interpolation;
+ const auto& latticeGP = args.fGP.cast<LatticeGP>();
+ fColorSpaceXformHelper.emitCode(args.fUniformHandler,
+ latticeGP.fColorSpaceXform.get());
+
+ args.fVaryingHandler->emitAttributes(latticeGP);
+ this->writeOutputPosition(args.fVertBuilder, gpArgs, latticeGP.fInPosition.name());
+ this->emitTransforms(args.fVertBuilder,
+ args.fVaryingHandler,
+ args.fUniformHandler,
+ latticeGP.fInTextureCoords.asShaderVar(),
+ args.fFPCoordTransformHandler);
+ args.fFragBuilder->codeAppend("float2 textureCoords;");
+ args.fVaryingHandler->addPassThroughAttribute(latticeGP.fInTextureCoords,
+ "textureCoords");
+ args.fFragBuilder->codeAppend("float4 textureDomain;");
+ args.fVaryingHandler->addPassThroughAttribute(
+ latticeGP.fInTextureDomain, "textureDomain", Interpolation::kCanBeFlat);
+ args.fVaryingHandler->addPassThroughAttribute(latticeGP.fInColor,
+ args.fOutputColor,
+ Interpolation::kCanBeFlat);
+ args.fFragBuilder->codeAppendf("%s = ", args.fOutputColor);
+ args.fFragBuilder->appendTextureLookupAndModulate(
+ args.fOutputColor,
+ args.fTexSamplers[0],
+ "clamp(textureCoords, textureDomain.xy, textureDomain.zw)",
+ kFloat2_GrSLType,
+ &fColorSpaceXformHelper);
+ args.fFragBuilder->codeAppend(";");
+ args.fFragBuilder->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ }
+ GrGLSLColorSpaceXformHelper fColorSpaceXformHelper;
+ };
+ return new GLSLProcessor;
+ }
+
+private:
+ LatticeGP(GrGpu* gpu, const GrTextureProxy* proxy, sk_sp<GrColorSpaceXform> csxf,
+ GrSamplerState::Filter filter, bool wideColor)
+ : INHERITED(kLatticeGP_ClassID), fColorSpaceXform(std::move(csxf)) {
+
+ GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
+ filter);
+ uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram(samplerState,
+ proxy->backendFormat());
+
+ fSampler.reset(proxy->textureType(), samplerState, proxy->textureSwizzle(),
+ extraSamplerKey);
+ this->setTextureSamplerCnt(1);
+ fInPosition = {"position", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInTextureCoords = {"textureCoords", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInTextureDomain = {"textureDomain", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ fInColor = MakeColorAttribute("color", wideColor);
+ this->setVertexAttributes(&fInPosition, 4);
+ }
+
+ const TextureSampler& onTextureSampler(int) const override { return fSampler; }
+
+ Attribute fInPosition;
+ Attribute fInTextureCoords;
+ Attribute fInTextureDomain;
+ Attribute fInColor;
+
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+ TextureSampler fSampler;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+class NonAALatticeOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static const int kVertsPerRect = 4;
+ static const int kIndicesPerRect = 6;
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> colorSpaceXForm,
+ GrSamplerState::Filter filter,
+ std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst) {
+ SkASSERT(proxy);
+ return Helper::FactoryHelper<NonAALatticeOp>(context, std::move(paint), viewMatrix,
+ std::move(proxy), srcColorType,
+ std::move(colorSpaceXForm), filter,
+ std::move(iter), dst);
+ }
+
+ NonAALatticeOp(Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType, sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrSamplerState::Filter filter, std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kNone)
+ , fProxy(std::move(proxy))
+ , fSrcColorType(srcColorType)
+ , fColorSpaceXform(std::move(colorSpaceXform))
+ , fFilter(filter) {
+ Patch& patch = fPatches.push_back();
+ patch.fViewMatrix = viewMatrix;
+ patch.fColor = color;
+ patch.fIter = std::move(iter);
+ patch.fDst = dst;
+
+ // setup bounds
+ this->setTransformedBounds(patch.fDst, viewMatrix, HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ const char* name() const override { return "NonAALatticeOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ bool mipped = (GrSamplerState::Filter::kMipMap == fFilter);
+ func(fProxy.get(), GrMipMapped(mipped));
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+
+ for (int i = 0; i < fPatches.count(); ++i) {
+ str.appendf("%d: Color: 0x%08x Dst [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", i,
+ fPatches[i].fColor.toBytes_RGBA(), fPatches[i].fDst.fLeft,
+ fPatches[i].fDst.fTop, fPatches[i].fDst.fRight, fPatches[i].fDst.fBottom);
+ }
+
+ str += fHelper.dumpInfo();
+ str += INHERITED::dumpInfo();
+ return str;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ auto opaque = fPatches[0].fColor.isOpaque() && !GrColorTypeHasAlpha(fSrcColorType)
+ ? GrProcessorAnalysisColor::Opaque::kYes
+ : GrProcessorAnalysisColor::Opaque::kNo;
+ auto analysisColor = GrProcessorAnalysisColor(opaque);
+ auto result = fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, GrProcessorAnalysisCoverage::kNone,
+ &analysisColor);
+ analysisColor.isConstant(&fPatches[0].fColor);
+ fWideColor = SkPMColor4fNeedsWideColor(fPatches[0].fColor, clampType, caps);
+ return result;
+ }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ GrGpu* gpu = target->resourceProvider()->priv().gpu();
+ auto gp = LatticeGP::Make(gpu, fProxy.get(), fColorSpaceXform, fFilter, fWideColor);
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ int patchCnt = fPatches.count();
+ int numRects = 0;
+ for (int i = 0; i < patchCnt; i++) {
+ numRects += fPatches[i].fIter->numRectsToDraw();
+ }
+
+ if (!numRects) {
+ return;
+ }
+
+ const size_t kVertexStride = gp->vertexStride();
+ sk_sp<const GrBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer();
+ if (!indexBuffer) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ PatternHelper helper(target, GrPrimitiveType::kTriangles, kVertexStride,
+ std::move(indexBuffer), kVertsPerRect, kIndicesPerRect, numRects);
+ GrVertexWriter vertices{helper.vertices()};
+ if (!vertices.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < patchCnt; i++) {
+ const Patch& patch = fPatches[i];
+
+ GrVertexColor patchColor(patch.fColor, fWideColor);
+
+ // Apply the view matrix here if it is scale-translate. Otherwise, we need to
+ // wait until we've created the dst rects.
+ bool isScaleTranslate = patch.fViewMatrix.isScaleTranslate();
+ if (isScaleTranslate) {
+ patch.fIter->mapDstScaleTranslate(patch.fViewMatrix);
+ }
+
+ SkIRect srcR;
+ SkRect dstR;
+ SkPoint* patchPositions = reinterpret_cast<SkPoint*>(vertices.fPtr);
+ Sk4f scales(1.f / fProxy->width(), 1.f / fProxy->height(),
+ 1.f / fProxy->width(), 1.f / fProxy->height());
+ static const Sk4f kDomainOffsets(0.5f, 0.5f, -0.5f, -0.5f);
+ static const Sk4f kFlipOffsets(0.f, 1.f, 0.f, 1.f);
+ static const Sk4f kFlipMuls(1.f, -1.f, 1.f, -1.f);
+ while (patch.fIter->next(&srcR, &dstR)) {
+ Sk4f coords(SkIntToScalar(srcR.fLeft), SkIntToScalar(srcR.fTop),
+ SkIntToScalar(srcR.fRight), SkIntToScalar(srcR.fBottom));
+ Sk4f domain = coords + kDomainOffsets;
+ coords *= scales;
+ domain *= scales;
+ if (fProxy->origin() == kBottomLeft_GrSurfaceOrigin) {
+ coords = kFlipMuls * coords + kFlipOffsets;
+ domain = SkNx_shuffle<0, 3, 2, 1>(kFlipMuls * domain + kFlipOffsets);
+ }
+ SkRect texDomain;
+ SkRect texCoords;
+ domain.store(&texDomain);
+ coords.store(&texCoords);
+
+ vertices.writeQuad(GrVertexWriter::TriStripFromRect(dstR),
+ GrVertexWriter::TriStripFromRect(texCoords),
+ texDomain,
+ patchColor);
+ }
+
+ // If we didn't handle it above, apply the matrix here.
+ if (!isScaleTranslate) {
+ SkMatrixPriv::MapPointsWithStride(patch.fViewMatrix, patchPositions, kVertexStride,
+ kVertsPerRect * patch.fIter->numRectsToDraw());
+ }
+ }
+ auto fixedDynamicState = target->makeFixedDynamicState(1);
+ fixedDynamicState->fPrimitiveProcessorTextures[0] = fProxy.get();
+ helper.recordDraw(target, std::move(gp), fixedDynamicState);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ NonAALatticeOp* that = t->cast<NonAALatticeOp>();
+ if (fProxy != that->fProxy) {
+ return CombineResult::kCannotCombine;
+ }
+ if (fFilter != that->fFilter) {
+ return CombineResult::kCannotCombine;
+ }
+ if (GrColorSpaceXform::Equals(fColorSpaceXform.get(), that->fColorSpaceXform.get())) {
+ return CombineResult::kCannotCombine;
+ }
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fPatches.move_back_n(that->fPatches.count(), that->fPatches.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct Patch {
+ SkMatrix fViewMatrix;
+ std::unique_ptr<SkLatticeIter> fIter;
+ SkRect fDst;
+ SkPMColor4f fColor;
+ };
+
+ Helper fHelper;
+ SkSTArray<1, Patch, true> fPatches;
+ sk_sp<GrTextureProxy> fProxy;
+ GrColorType fSrcColorType;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+ GrSamplerState::Filter fFilter;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+namespace GrLatticeOp {
+std::unique_ptr<GrDrawOp> MakeNonAA(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrSamplerState::Filter filter,
+ std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst) {
+ return NonAALatticeOp::Make(context, std::move(paint), viewMatrix, std::move(proxy),
+ srcColorType, std::move(colorSpaceXform), filter, std::move(iter),
+ dst);
+}
+};
+
+#if GR_TEST_UTILS
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+/** Randomly divides subset into count divs. */
+static void init_random_divs(int divs[], int count, int subsetStart, int subsetStop,
+ SkRandom* random) {
+ // Rules for lattice divs: Must be strictly increasing and in the range
+ // [subsetStart, subsetStop).
+ // Not terribly efficient alg for generating random divs:
+ // 1) Start with minimum legal pixels between each div.
+ // 2) Randomly assign the remaining pixels of the subset to divs.
+ // 3) Convert from pixel counts to div offsets.
+
+ // 1) Initially each divs[i] represents the number of pixels between
+ // div i-1 and i. The initial div is allowed to be at subsetStart. There
+ // must be one pixel spacing between subsequent divs.
+ divs[0] = 0;
+ for (int i = 1; i < count; ++i) {
+ divs[i] = 1;
+ }
+ // 2) Assign the remaining subset pixels to fall
+ int subsetLength = subsetStop - subsetStart;
+ for (int i = 0; i < subsetLength - count; ++i) {
+ // +1 because count divs means count+1 intervals.
+ int entry = random->nextULessThan(count + 1);
+ // We don't have an entry to to store the count after the last div
+ if (entry < count) {
+ divs[entry]++;
+ }
+ }
+ // 3) Now convert the counts between divs to pixel indices, incorporating the subset's offset.
+ int offset = subsetStart;
+ for (int i = 0; i < count; ++i) {
+ divs[i] += offset;
+ offset = divs[i];
+ }
+}
+
+GR_DRAW_OP_TEST_DEFINE(NonAALatticeOp) {
+ SkCanvas::Lattice lattice;
+ // We loop because our random lattice code can produce an invalid lattice in the case where
+ // there is a single div separator in both x and y and both are aligned with the left and top
+ // edge of the image subset, respectively.
+ std::unique_ptr<int[]> xdivs;
+ std::unique_ptr<int[]> ydivs;
+ std::unique_ptr<SkCanvas::Lattice::RectType[]> flags;
+ std::unique_ptr<SkColor[]> colors;
+ SkIRect subset;
+ GrSurfaceDesc desc;
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fWidth = random->nextRangeU(1, 1000);
+ desc.fHeight = random->nextRangeU(1, 1000);
+ GrSurfaceOrigin origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin
+ : kBottomLeft_GrSurfaceOrigin;
+ const GrBackendFormat format =
+ context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
+ GrRenderable::kNo);
+ auto proxy = context->priv().proxyProvider()->createProxy(format,
+ desc,
+ GrRenderable::kNo,
+ 1,
+ origin,
+ GrMipMapped::kNo,
+ SkBackingFit::kExact,
+ SkBudgeted::kYes,
+ GrProtected::kNo);
+
+ do {
+ if (random->nextBool()) {
+ subset.fLeft = random->nextULessThan(desc.fWidth);
+ subset.fRight = random->nextRangeU(subset.fLeft + 1, desc.fWidth);
+ subset.fTop = random->nextULessThan(desc.fHeight);
+ subset.fBottom = random->nextRangeU(subset.fTop + 1, desc.fHeight);
+ } else {
+ subset.setXYWH(0, 0, desc.fWidth, desc.fHeight);
+ }
+ // SkCanvas::Lattice allows bounds to be null. However, SkCanvas creates a temp Lattice with
+ // a non-null bounds before creating a SkLatticeIter since SkLatticeIter requires a bounds.
+ lattice.fBounds = &subset;
+ lattice.fXCount = random->nextRangeU(1, subset.width());
+ lattice.fYCount = random->nextRangeU(1, subset.height());
+ xdivs.reset(new int[lattice.fXCount]);
+ ydivs.reset(new int[lattice.fYCount]);
+ init_random_divs(xdivs.get(), lattice.fXCount, subset.fLeft, subset.fRight, random);
+ init_random_divs(ydivs.get(), lattice.fYCount, subset.fTop, subset.fBottom, random);
+ lattice.fXDivs = xdivs.get();
+ lattice.fYDivs = ydivs.get();
+ bool hasFlags = random->nextBool();
+ if (hasFlags) {
+ int n = (lattice.fXCount + 1) * (lattice.fYCount + 1);
+ flags.reset(new SkCanvas::Lattice::RectType[n]);
+ colors.reset(new SkColor[n]);
+ for (int i = 0; i < n; ++i) {
+ flags[i] = random->nextBool() ? SkCanvas::Lattice::kTransparent
+ : SkCanvas::Lattice::kDefault;
+ }
+ lattice.fRectTypes = flags.get();
+ lattice.fColors = colors.get();
+ } else {
+ lattice.fRectTypes = nullptr;
+ lattice.fColors = nullptr;
+ }
+ } while (!SkLatticeIter::Valid(desc.fWidth, desc.fHeight, lattice));
+ SkRect dst;
+ dst.fLeft = random->nextRangeScalar(-2000.5f, 1000.f);
+ dst.fTop = random->nextRangeScalar(-2000.5f, 1000.f);
+ dst.fRight = dst.fLeft + random->nextRangeScalar(0.5f, 1000.f);
+ dst.fBottom = dst.fTop + random->nextRangeScalar(0.5f, 1000.f);
+ std::unique_ptr<SkLatticeIter> iter(new SkLatticeIter(lattice, dst));
+ SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
+ auto csxf = GrTest::TestColorXform(random);
+ GrSamplerState::Filter filter =
+ random->nextBool() ? GrSamplerState::Filter::kNearest : GrSamplerState::Filter::kBilerp;
+ return NonAALatticeOp::Make(context, std::move(paint), viewMatrix, std::move(proxy),
+ GrColorType::kRGBA_8888, std::move(csxf), filter, std::move(iter),
+ dst);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrLatticeOp.h b/gfx/skia/skia/src/gpu/ops/GrLatticeOp.h
new file mode 100644
index 0000000000..1ea57258a7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrLatticeOp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GLatticeOp_DEFINED
+#define GLatticeOp_DEFINED
+
+#include <memory>
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrSamplerState.h"
+
+class GrColorSpaceXform;
+class GrDrawOp;
+class GrPaint;
+class SkLatticeIter;
+class GrRecordingContext;
+class GrTextureProxy;
+class SkMatrix;
+struct SkRect;
+
+namespace GrLatticeOp {
+std::unique_ptr<GrDrawOp> MakeNonAA(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform>,
+ GrSamplerState::Filter,
+ std::unique_ptr<SkLatticeIter>,
+ const SkRect& dst);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.cpp b/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.cpp
new file mode 100644
index 0000000000..0ba8e43ce7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrMeshDrawOp.h"
+
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrResourceProvider.h"
+
+GrMeshDrawOp::GrMeshDrawOp(uint32_t classID) : INHERITED(classID) {}
+
+void GrMeshDrawOp::onPrepare(GrOpFlushState* state) { this->onPrepareDraws(state); }
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrMeshDrawOp::PatternHelper::PatternHelper(Target* target, GrPrimitiveType primitiveType,
+ size_t vertexStride, sk_sp<const GrBuffer> indexBuffer,
+ int verticesPerRepetition, int indicesPerRepetition,
+ int repeatCount) {
+ this->init(target, primitiveType, vertexStride, std::move(indexBuffer), verticesPerRepetition,
+ indicesPerRepetition, repeatCount);
+}
+
+void GrMeshDrawOp::PatternHelper::init(Target* target, GrPrimitiveType primitiveType,
+ size_t vertexStride, sk_sp<const GrBuffer> indexBuffer,
+ int verticesPerRepetition, int indicesPerRepetition,
+ int repeatCount) {
+ SkASSERT(target);
+ if (!indexBuffer) {
+ return;
+ }
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+ int vertexCount = verticesPerRepetition * repeatCount;
+ fVertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
+ if (!fVertices) {
+ SkDebugf("Vertices could not be allocated for patterned rendering.");
+ return;
+ }
+ SkASSERT(vertexBuffer);
+ size_t ibSize = indexBuffer->size();
+ int maxRepetitions = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerRepetition));
+ fMesh = target->allocMesh(primitiveType);
+ fMesh->setIndexedPatterned(std::move(indexBuffer), indicesPerRepetition, verticesPerRepetition,
+ repeatCount, maxRepetitions);
+ fMesh->setVertexData(std::move(vertexBuffer), firstVertex);
+}
+
+void GrMeshDrawOp::PatternHelper::recordDraw(
+ Target* target, sk_sp<const GrGeometryProcessor> gp) const {
+ target->recordDraw(std::move(gp), fMesh);
+}
+
+void GrMeshDrawOp::PatternHelper::recordDraw(
+ Target* target, sk_sp<const GrGeometryProcessor> gp,
+ const GrPipeline::FixedDynamicState* fixedDynamicState) const {
+ target->recordDraw(std::move(gp), fMesh, 1, fixedDynamicState, nullptr);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrMeshDrawOp::QuadHelper::QuadHelper(Target* target, size_t vertexStride, int quadsToDraw) {
+ sk_sp<const GrGpuBuffer> quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
+ if (!quadIndexBuffer) {
+ SkDebugf("Could not get quad index buffer.");
+ return;
+ }
+ this->init(target, GrPrimitiveType::kTriangles, vertexStride, std::move(quadIndexBuffer),
+ kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrPipeline::DynamicStateArrays* GrMeshDrawOp::Target::allocDynamicStateArrays(
+ int numMeshes, int numPrimitiveProcessorTextures, bool allocScissors) {
+ auto result = this->allocator()->make<GrPipeline::DynamicStateArrays>();
+ if (allocScissors) {
+ result->fScissorRects = this->allocator()->makeArray<SkIRect>(numMeshes);
+ }
+ if (numPrimitiveProcessorTextures) {
+ result->fPrimitiveProcessorTextures =
+ this->allocator()->makeArrayDefault<GrTextureProxy*>(
+ numPrimitiveProcessorTextures * numMeshes);
+ }
+ return result;
+}
+
+GrPipeline::FixedDynamicState* GrMeshDrawOp::Target::makeFixedDynamicState(
+ int numPrimProcTextures) {
+ const GrAppliedClip* clip = this->appliedClip();
+ if ((clip && clip->scissorState().enabled()) || numPrimProcTextures) {
+ const SkIRect& scissor = (clip) ? clip->scissorState().rect() : SkIRect::MakeEmpty();
+ auto fixedDynamicState =
+ this->allocator()->make<GrPipeline::FixedDynamicState>(scissor);
+ if (numPrimProcTextures) {
+ fixedDynamicState->fPrimitiveProcessorTextures =
+ this->allocator()->makeArrayDefault<GrTextureProxy*>(numPrimProcTextures);
+ }
+ return fixedDynamicState;
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.h b/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.h
new file mode 100644
index 0000000000..6bdb63768a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrMeshDrawOp.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMeshDrawOp_DEFINED
+#define GrMeshDrawOp_DEFINED
+
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/ops/GrDrawOp.h"
+#include <type_traits>
+
+class GrAtlasManager;
+class GrCaps;
+class GrStrikeCache;
+class GrOpFlushState;
+
+/**
+ * Base class for mesh-drawing GrDrawOps.
+ */
+class GrMeshDrawOp : public GrDrawOp {
+public:
+ /** Abstract interface that represents a destination for a GrMeshDrawOp. */
+ class Target;
+
+protected:
+ GrMeshDrawOp(uint32_t classID);
+
+ /** Helper for rendering repeating meshes using a patterned index buffer. This class creates the
+ space for the vertices and flushes the draws to the GrMeshDrawOp::Target. */
+ class PatternHelper {
+ public:
+ PatternHelper(Target*, GrPrimitiveType, size_t vertexStride,
+ sk_sp<const GrBuffer> indexBuffer, int verticesPerRepetition,
+ int indicesPerRepetition, int repeatCount);
+
+ /** Called to issue draws to the GrMeshDrawOp::Target.*/
+ void recordDraw(Target*, sk_sp<const GrGeometryProcessor>) const;
+ void recordDraw(Target*, sk_sp<const GrGeometryProcessor>,
+ const GrPipeline::FixedDynamicState*) const;
+
+ void* vertices() const { return fVertices; }
+
+ protected:
+ PatternHelper() = default;
+ void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer> indexBuffer,
+ int verticesPerRepetition, int indicesPerRepetition, int repeatCount);
+
+ private:
+ void* fVertices = nullptr;
+ GrMesh* fMesh = nullptr;
+ };
+
+ static const int kVerticesPerQuad = 4;
+ static const int kIndicesPerQuad = 6;
+
+ /** A specialization of InstanceHelper for quad rendering. */
+ class QuadHelper : private PatternHelper {
+ public:
+ QuadHelper() = delete;
+ QuadHelper(Target* target, size_t vertexStride, int quadsToDraw);
+
+ using PatternHelper::recordDraw;
+ using PatternHelper::vertices;
+
+ private:
+ typedef PatternHelper INHERITED;
+ };
+
+private:
+ void onPrePrepare(GrRecordingContext* context) final { this->onPrePrepareDraws(context); }
+ void onPrepare(GrOpFlushState* state) final;
+
+ // Only the GrTextureOp currently overrides this virtual
+ virtual void onPrePrepareDraws(GrRecordingContext*) {}
+
+ virtual void onPrepareDraws(Target*) = 0;
+ typedef GrDrawOp INHERITED;
+};
+
+class GrMeshDrawOp::Target {
+public:
+ virtual ~Target() {}
+
+ /** Adds a draw of a mesh. */
+ virtual void recordDraw(
+ sk_sp<const GrGeometryProcessor>, const GrMesh[], int meshCnt,
+ const GrPipeline::FixedDynamicState*, const GrPipeline::DynamicStateArrays*) = 0;
+
+ /**
+ * Helper for drawing GrMesh(es) with zero primProc textures and no dynamic state besides the
+ * scissor clip.
+ */
+ void recordDraw(sk_sp<const GrGeometryProcessor> gp, const GrMesh meshes[], int meshCnt = 1) {
+ static constexpr int kZeroPrimProcTextures = 0;
+ auto fixedDynamicState = this->makeFixedDynamicState(kZeroPrimProcTextures);
+ this->recordDraw(std::move(gp), meshes, meshCnt, fixedDynamicState, nullptr);
+ }
+
+ /**
+ * Makes space for vertex data. The returned pointer is the location where vertex data
+ * should be written. On return the buffer that will hold the data as well as an offset into
+ * the buffer (in 'vertexSize' units) where the data will be placed.
+ */
+ virtual void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
+ int* startVertex) = 0;
+
+ /**
+ * Makes space for index data. The returned pointer is the location where index data
+ * should be written. On return the buffer that will hold the data as well as an offset into
+ * the buffer (in uint16_t units) where the data will be placed.
+ */
+ virtual uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) = 0;
+
+ /**
+ * This is similar to makeVertexSpace. It allows the caller to use up to 'actualVertexCount'
+ * vertices in the returned pointer, which may exceed 'minVertexCount'.
+ * 'fallbackVertexCount' is the maximum number of vertices that should be allocated if a new
+ * buffer is allocated on behalf of this request.
+ */
+ virtual void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
+ int fallbackVertexCount, sk_sp<const GrBuffer>*,
+ int* startVertex, int* actualVertexCount) = 0;
+
+ /**
+ * This is similar to makeIndexSpace. It allows the caller to use up to 'actualIndexCount'
+ * indices in the returned pointer, which may exceed 'minIndexCount'.
+ * 'fallbackIndexCount' is the maximum number of indices that should be allocated if a new
+ * buffer is allocated on behalf of this request.
+ */
+ virtual uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
+ sk_sp<const GrBuffer>*, int* startIndex,
+ int* actualIndexCount) = 0;
+
+ /** Helpers for ops which over-allocate and then return excess data to the pool. */
+ virtual void putBackIndices(int indices) = 0;
+ virtual void putBackVertices(int vertices, size_t vertexStride) = 0;
+
+ GrMesh* allocMesh(GrPrimitiveType primitiveType) {
+ return this->allocator()->make<GrMesh>(primitiveType);
+ }
+
+ GrMesh* allocMeshes(int n) { return this->allocator()->makeArray<GrMesh>(n); }
+
+ GrPipeline::DynamicStateArrays* allocDynamicStateArrays(int numMeshes,
+ int numPrimitiveProcessorTextures,
+ bool allocScissors);
+
+ GrPipeline::FixedDynamicState* makeFixedDynamicState(int numPrimitiveProcessorTextures);
+
+ virtual GrRenderTargetProxy* proxy() const = 0;
+
+ virtual const GrAppliedClip* appliedClip() = 0;
+ virtual GrAppliedClip detachAppliedClip() = 0;
+
+ virtual const GrXferProcessor::DstProxy& dstProxy() const = 0;
+
+ virtual GrResourceProvider* resourceProvider() const = 0;
+ uint32_t contextUniqueID() const { return this->resourceProvider()->contextUniqueID(); }
+
+ virtual GrStrikeCache* glyphCache() const = 0;
+ virtual GrAtlasManager* atlasManager() const = 0;
+
+ // This should be called during onPrepare of a GrOp. The caller should add any proxies to the
+ // array it will use that it did not access during a call to visitProxies. This is usually the
+ // case for atlases.
+ virtual SkTArray<GrTextureProxy*, true>* sampledProxyArray() = 0;
+
+ virtual const GrCaps& caps() const = 0;
+
+ virtual GrDeferredUploadTarget* deferredUploadTarget() = 0;
+
+private:
+ virtual SkArenaAlloc* allocator() = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrOp.cpp b/gfx/skia/skia/src/gpu/ops/GrOp.cpp
new file mode 100644
index 0000000000..30e2e44896
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrOp.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrOp.h"
+
+std::atomic<uint32_t> GrOp::gCurrOpClassID {GrOp::kIllegalOpID + 1};
+std::atomic<uint32_t> GrOp::gCurrOpUniqueID{GrOp::kIllegalOpID + 1};
+
+#ifdef SK_DEBUG
+void* GrOp::operator new(size_t size) {
+ // All GrOp-derived class should be allocated in a GrMemoryPool
+ SkASSERT(0);
+ return ::operator new(size);
+}
+
+void GrOp::operator delete(void* target) {
+ // All GrOp-derived class should be released from their owning GrMemoryPool
+ SkASSERT(0);
+ ::operator delete(target);
+}
+#endif
+
+GrOp::GrOp(uint32_t classID) : fClassID(classID) {
+ SkASSERT(classID == SkToU32(fClassID));
+ SkASSERT(classID);
+ SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag);
+}
+
+GrOp::CombineResult GrOp::combineIfPossible(GrOp* that, const GrCaps& caps) {
+ SkASSERT(this != that);
+ if (this->classID() != that->classID()) {
+ return CombineResult::kCannotCombine;
+ }
+ auto result = this->onCombineIfPossible(that, caps);
+ if (result == CombineResult::kMerged) {
+ this->joinBounds(*that);
+ }
+ return result;
+}
+
+void GrOp::chainConcat(std::unique_ptr<GrOp> next) {
+ SkASSERT(next);
+ SkASSERT(this->classID() == next->classID());
+ SkASSERT(this->isChainTail());
+ SkASSERT(next->isChainHead());
+ fNextInChain = std::move(next);
+ fNextInChain->fPrevInChain = this;
+}
+
+std::unique_ptr<GrOp> GrOp::cutChain() {
+ if (fNextInChain) {
+ fNextInChain->fPrevInChain = nullptr;
+ return std::move(fNextInChain);
+ }
+ return nullptr;
+}
+
+#ifdef SK_DEBUG
+void GrOp::validateChain(GrOp* expectedTail) const {
+ SkASSERT(this->isChainHead());
+ uint32_t classID = this->classID();
+ const GrOp* op = this;
+ while (op) {
+ SkASSERT(op == this || (op->prevInChain() && op->prevInChain()->nextInChain() == op));
+ SkASSERT(classID == op->classID());
+ if (op->nextInChain()) {
+ SkASSERT(op->nextInChain()->prevInChain() == op);
+ SkASSERT(op != expectedTail);
+ } else {
+ SkASSERT(!expectedTail || op == expectedTail);
+ }
+ op = op->nextInChain();
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrOp.h b/gfx/skia/skia/src/gpu/ops/GrOp.h
new file mode 100644
index 0000000000..ac738f9af5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrOp.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOp_DEFINED
+#define GrOp_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/gpu/GrGpuResource.h"
+#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/GrTracing.h"
+#include "src/gpu/GrXferProcessor.h"
+#include <atomic>
+#include <new>
+
+class GrCaps;
+class GrOpFlushState;
+class GrOpsRenderPass;
+
+/**
+ * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
+ * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
+ * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
+ * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
+ * and minimize state changes.
+ *
+ * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
+ * one takes on the union of the data and the other is left empty. The merged op becomes responsible
+ * for drawing the data from both the original ops. When ops are chained each op maintains its own
+ * data but they are linked in a list and the head op becomes responsible for executing the work for
+ * the chain.
+ *
+ * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
+ * it must be the case that any op that can chain with A will either merge or chain with any op
+ * that can chain to B.
+ *
+ * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
+ * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
+ * in turn depend upon the clip.
+ */
+#define GR_OP_SPEW 0
+#if GR_OP_SPEW
+ #define GrOP_SPEW(code) code
+ #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
+#else
+ #define GrOP_SPEW(code)
+ #define GrOP_INFO(...)
+#endif
+
+// Print out op information at flush time
+#define GR_FLUSH_TIME_OP_SPEW 0
+
+// A helper macro to generate a class static id
+#define DEFINE_OP_CLASS_ID \
+ static uint32_t ClassID() { \
+ static uint32_t kClassID = GenOpClassID(); \
+ return kClassID; \
+ }
+
+class GrOp : private SkNoncopyable {
+public:
+ virtual ~GrOp() = default;
+
+ virtual const char* name() const = 0;
+
+ using VisitProxyFunc = std::function<void(GrTextureProxy*, GrMipMapped)>;
+
+ virtual void visitProxies(const VisitProxyFunc&) const {
+ // This default implementation assumes the op has no proxies
+ }
+
+ enum class CombineResult {
+ /**
+ * The op that combineIfPossible was called on now represents its own work plus that of
+ * the passed op. The passed op should be destroyed without being flushed. Currently it
+ * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
+ * chain (though the op on which combineIfPossible() was called may be).
+ */
+ kMerged,
+ /**
+ * The caller *may* (but is not required) to chain these ops together. If they are chained
+ * then prepare() and execute() will be called on the head op but not the other ops in the
+ * chain. The head op will prepare and execute on behalf of all the ops in the chain.
+ */
+ kMayChain,
+ /**
+ * The ops cannot be combined.
+ */
+ kCannotCombine
+ };
+
+ CombineResult combineIfPossible(GrOp* that, const GrCaps& caps);
+
+ const SkRect& bounds() const {
+ SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
+ return fBounds;
+ }
+
+ void setClippedBounds(const SkRect& clippedBounds) {
+ fBounds = clippedBounds;
+ // The clipped bounds already incorporate any effect of the bounds flags.
+ fBoundsFlags = 0;
+ }
+
+ bool hasAABloat() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
+ }
+
+ bool hasZeroArea() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
+ }
+
+#ifdef SK_DEBUG
+ // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+#endif
+
+ /**
+ * Helper for safely down-casting to a GrOp subclass
+ */
+ template <typename T> const T& cast() const {
+ SkASSERT(T::ClassID() == this->classID());
+ return *static_cast<const T*>(this);
+ }
+
+ template <typename T> T* cast() {
+ SkASSERT(T::ClassID() == this->classID());
+ return static_cast<T*>(this);
+ }
+
+ uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
+
+ // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
+ uint32_t uniqueID() const {
+ if (kIllegalOpID == fUniqueID) {
+ fUniqueID = GenOpID();
+ }
+ return fUniqueID;
+ }
+
+ /**
+ * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
+ * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
+ * ahead of time and when it has not been called).
+ */
+ void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); }
+
+ /**
+ * Called prior to executing. The op should perform any resource creation or data transfers
+ * necessary before execute() is called.
+ */
+ void prepare(GrOpFlushState* state) { this->onPrepare(state); }
+
+ /** Issues the op's commands to GrGpu. */
+ void execute(GrOpFlushState* state, const SkRect& chainBounds) {
+ TRACE_EVENT0("skia.gpu", name());
+ this->onExecute(state, chainBounds);
+ }
+
+ /** Used for spewing information about ops when debugging. */
+#ifdef SK_DEBUG
+ virtual SkString dumpInfo() const {
+ SkString string;
+ string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ return string;
+ }
+#else
+ SkString dumpInfo() const { return SkString("<Op information unavailable>"); }
+#endif
+
+ /**
+ * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
+ * subclass. E.g.:
+ * for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
+ * // ...
+ * }
+ */
+ template <typename OpSubclass = GrOp> class ChainRange {
+ private:
+ class Iter {
+ public:
+ explicit Iter(const OpSubclass* head) : fCurr(head) {}
+ inline Iter& operator++() {
+ return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
+ }
+ const OpSubclass& operator*() const { return *fCurr; }
+ bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
+
+ private:
+ const OpSubclass* fCurr;
+ };
+ const OpSubclass* fHead;
+
+ public:
+ explicit ChainRange(const OpSubclass* head) : fHead(head) {}
+ Iter begin() { return Iter(fHead); }
+ Iter end() { return Iter(nullptr); }
+ };
+
+ /**
+ * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
+ * must be of the same subclass.
+ */
+ void chainConcat(std::unique_ptr<GrOp>);
+ /** Returns true if this is the head of a chain (including a length 1 chain). */
+ bool isChainHead() const { return !fPrevInChain; }
+ /** Returns true if this is the tail of a chain (including a length 1 chain). */
+ bool isChainTail() const { return !fNextInChain; }
+ /** The next op in the chain. */
+ GrOp* nextInChain() const { return fNextInChain.get(); }
+ /** The previous op in the chain. */
+ GrOp* prevInChain() const { return fPrevInChain; }
+ /**
+ * Cuts the chain after this op. The returned op is the op that was previously next in the
+ * chain or null if this was already a tail.
+ */
+ std::unique_ptr<GrOp> cutChain();
+ SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
+
+#ifdef SK_DEBUG
+ virtual void validate() const {}
+#endif
+
+protected:
+ GrOp(uint32_t classID);
+
+ /**
+ * Indicates that the op will produce geometry that extends beyond its bounds for the
+ * purpose of ensuring that the fragment shader runs on partially covered pixels for
+ * non-MSAA antialiasing.
+ */
+ enum class HasAABloat : bool {
+ kNo = false,
+ kYes = true
+ };
+ /**
+ * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
+ * space is also considered a hairline.
+ */
+ enum class IsHairline : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
+ fBounds = newBounds;
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
+ void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
+ HasAABloat aabloat, IsHairline zeroArea) {
+ m.mapRect(&fBounds, srcBounds);
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
+ void makeFullScreen(GrSurfaceProxy* proxy) {
+ this->setBounds(SkRect::MakeIWH(proxy->width(), proxy->height()),
+ HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
+
+private:
+ void joinBounds(const GrOp& that) {
+ if (that.hasAABloat()) {
+ fBoundsFlags |= kAABloat_BoundsFlag;
+ }
+ if (that.hasZeroArea()) {
+ fBoundsFlags |= kZeroArea_BoundsFlag;
+ }
+ return fBounds.joinPossiblyEmptyRect(that.fBounds);
+ }
+
+ virtual CombineResult onCombineIfPossible(GrOp*, const GrCaps&) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // Only GrMeshDrawOp currently overrides this virtual
+ virtual void onPrePrepare(GrRecordingContext*) {}
+ virtual void onPrepare(GrOpFlushState*) = 0;
+ // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
+ // Otherwise, this op's bounds.
+ virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
+
+ static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
+ uint32_t id = (*idCounter)++;
+ if (id == 0) {
+ SK_ABORT("This should never wrap as it should only be called once for each GrOp "
+ "subclass.");
+ }
+ return id;
+ }
+
+ void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
+ fBoundsFlags = 0;
+ fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
+ fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
+ }
+
+ enum {
+ kIllegalOpID = 0,
+ };
+
+ enum BoundsFlags {
+ kAABloat_BoundsFlag = 0x1,
+ kZeroArea_BoundsFlag = 0x2,
+ SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
+ };
+
+ std::unique_ptr<GrOp> fNextInChain;
+ GrOp* fPrevInChain = nullptr;
+ const uint16_t fClassID;
+ uint16_t fBoundsFlags;
+
+ static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
+ mutable uint32_t fUniqueID = SK_InvalidUniqueID;
+ SkRect fBounds;
+
+ static std::atomic<uint32_t> gCurrOpUniqueID;
+ static std::atomic<uint32_t> gCurrOpClassID;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.cpp b/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.cpp
new file mode 100644
index 0000000000..b32758150f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.cpp
@@ -0,0 +1,3266 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/glsl/GrGLSLUtil.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrOvalOpFactory.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+#include <utility>
+
+namespace {
+
+static inline bool circle_stays_circle(const SkMatrix& m) { return m.isSimilarity(); }
+
+// Produces TriStrip vertex data for an origin-centered rectangle from [-x, -y] to [x, y]
+static inline GrVertexWriter::TriStrip<float> origin_centered_tri_strip(float x, float y) {
+ return GrVertexWriter::TriStrip<float>{ -x, -y, x, y };
+};
+
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for a circle. It
+ * operates in a space normalized by the circle radius (outer radius in the case of a stroke)
+ * with origin at the circle center. Three vertex attributes are used:
+ * vec2f : position in device space of the bounding geometry vertices
+ * vec4ub: color
+ * vec4f : (p.xy, outerRad, innerRad)
+ * p is the position in the normalized space.
+ * outerRad is the outerRadius in device space.
+ * innerRad is the innerRadius in normalized space (ignored if not stroking).
+ * Additional clip planes are supported for rendering circular arcs. The additional planes are
+ * either intersected or unioned together. Up to three planes are supported (an initial plane,
+ * a plane intersected with the initial plane, and a plane unioned with the first two). Only two
+ * are useful for any given arc, but having all three in one instance allows combining different
+ * types of arcs.
+ * Round caps for stroking are allowed as well. The caps are specified as two circle center points
+ * in the same space as p.xy.
+ */
+
+class CircleGeometryProcessor : public GrGeometryProcessor {
+public:
+ CircleGeometryProcessor(bool stroke, bool clipPlane, bool isectPlane, bool unionPlane,
+ bool roundCaps, bool wideColor, const SkMatrix& localMatrix)
+ : INHERITED(kCircleGeometryProcessor_ClassID)
+ , fLocalMatrix(localMatrix)
+ , fStroke(stroke) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ fInCircleEdge = {"inCircleEdge", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+
+ if (clipPlane) {
+ fInClipPlane = {"inClipPlane", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ }
+ if (isectPlane) {
+ fInIsectPlane = {"inIsectPlane", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ }
+ if (unionPlane) {
+ fInUnionPlane = {"inUnionPlane", kFloat3_GrVertexAttribType, kHalf3_GrSLType};
+ }
+ if (roundCaps) {
+ SkASSERT(stroke);
+ SkASSERT(clipPlane);
+ fInRoundCapCenters =
+ {"inRoundCapCenters", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ }
+ this->setVertexAttributes(&fInPosition, 7);
+ }
+
+ ~CircleGeometryProcessor() override {}
+
+ const char* name() const override { return "CircleEdge"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const CircleGeometryProcessor& cgp = args.fGP.cast<CircleGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // emit attributes
+ varyingHandler->emitAttributes(cgp);
+ fragBuilder->codeAppend("float4 circleEdge;");
+ varyingHandler->addPassThroughAttribute(cgp.fInCircleEdge, "circleEdge");
+ if (cgp.fInClipPlane.isInitialized()) {
+ fragBuilder->codeAppend("half3 clipPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInClipPlane, "clipPlane");
+ }
+ if (cgp.fInIsectPlane.isInitialized()) {
+ fragBuilder->codeAppend("half3 isectPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInIsectPlane, "isectPlane");
+ }
+ if (cgp.fInUnionPlane.isInitialized()) {
+ SkASSERT(cgp.fInClipPlane.isInitialized());
+ fragBuilder->codeAppend("half3 unionPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInUnionPlane, "unionPlane");
+ }
+ GrGLSLVarying capRadius(kFloat_GrSLType);
+ if (cgp.fInRoundCapCenters.isInitialized()) {
+ fragBuilder->codeAppend("float4 roundCapCenters;");
+ varyingHandler->addPassThroughAttribute(cgp.fInRoundCapCenters, "roundCapCenters");
+ varyingHandler->addVarying("capRadius", &capRadius,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ // This is the cap radius in normalized space where the outer radius is 1 and
+ // circledEdge.w is the normalized inner radius.
+ vertBuilder->codeAppendf("%s = (1.0 - %s.w) / 2.0;", capRadius.vsOut(),
+ cgp.fInCircleEdge.name());
+ }
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(cgp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, cgp.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ cgp.fInPosition.asShaderVar(),
+ cgp.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppend("float d = length(circleEdge.xy);");
+ fragBuilder->codeAppend("half distanceToOuterEdge = half(circleEdge.z * (1.0 - d));");
+ fragBuilder->codeAppend("half edgeAlpha = saturate(distanceToOuterEdge);");
+ if (cgp.fStroke) {
+ fragBuilder->codeAppend(
+ "half distanceToInnerEdge = half(circleEdge.z * (d - circleEdge.w));");
+ fragBuilder->codeAppend("half innerAlpha = saturate(distanceToInnerEdge);");
+ fragBuilder->codeAppend("edgeAlpha *= innerAlpha;");
+ }
+
+ if (cgp.fInClipPlane.isInitialized()) {
+ fragBuilder->codeAppend(
+ "half clip = half(saturate(circleEdge.z * dot(circleEdge.xy, "
+ "clipPlane.xy) + clipPlane.z));");
+ if (cgp.fInIsectPlane.isInitialized()) {
+ fragBuilder->codeAppend(
+ "clip *= half(saturate(circleEdge.z * dot(circleEdge.xy, "
+ "isectPlane.xy) + isectPlane.z));");
+ }
+ if (cgp.fInUnionPlane.isInitialized()) {
+ fragBuilder->codeAppend(
+ "clip = saturate(clip + half(saturate(circleEdge.z * dot(circleEdge.xy,"
+ " unionPlane.xy) + unionPlane.z)));");
+ }
+ fragBuilder->codeAppend("edgeAlpha *= clip;");
+ if (cgp.fInRoundCapCenters.isInitialized()) {
+ // We compute coverage of the round caps as circles at the butt caps produced
+ // by the clip planes. The inverse of the clip planes is applied so that there
+ // is no double counting.
+ fragBuilder->codeAppendf(
+ "half dcap1 = half(circleEdge.z * (%s - length(circleEdge.xy - "
+ " roundCapCenters.xy)));"
+ "half dcap2 = half(circleEdge.z * (%s - length(circleEdge.xy - "
+ " roundCapCenters.zw)));"
+ "half capAlpha = (1 - clip) * (max(dcap1, 0) + max(dcap2, 0));"
+ "edgeAlpha = min(edgeAlpha + capAlpha, 1.0);",
+ capRadius.fsIn(), capRadius.fsIn());
+ }
+ }
+ fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const CircleGeometryProcessor& cgp = gp.cast<CircleGeometryProcessor>();
+ uint16_t key;
+ key = cgp.fStroke ? 0x01 : 0x0;
+ key |= cgp.fLocalMatrix.hasPerspective() ? 0x02 : 0x0;
+ key |= cgp.fInClipPlane.isInitialized() ? 0x04 : 0x0;
+ key |= cgp.fInIsectPlane.isInitialized() ? 0x08 : 0x0;
+ key |= cgp.fInUnionPlane.isInitialized() ? 0x10 : 0x0;
+ key |= cgp.fInRoundCapCenters.isInitialized() ? 0x20 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(primProc.cast<CircleGeometryProcessor>().fLocalMatrix,
+ pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ SkMatrix fLocalMatrix;
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInCircleEdge;
+ // Optional attributes.
+ Attribute fInClipPlane;
+ Attribute fInIsectPlane;
+ Attribute fInUnionPlane;
+ Attribute fInRoundCapCenters;
+
+ bool fStroke;
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(CircleGeometryProcessor);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> CircleGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ bool stroke = d->fRandom->nextBool();
+ bool roundCaps = stroke ? d->fRandom->nextBool() : false;
+ bool wideColor = d->fRandom->nextBool();
+ bool clipPlane = d->fRandom->nextBool();
+ bool isectPlane = d->fRandom->nextBool();
+ bool unionPlane = d->fRandom->nextBool();
+ const SkMatrix& matrix = GrTest::TestMatrix(d->fRandom);
+ return sk_sp<GrGeometryProcessor>(new CircleGeometryProcessor(
+ stroke, clipPlane, isectPlane, unionPlane, roundCaps, wideColor, matrix));
+}
+#endif
+
+class ButtCapDashedCircleGeometryProcessor : public GrGeometryProcessor {
+public:
+ ButtCapDashedCircleGeometryProcessor(bool wideColor, const SkMatrix& localMatrix)
+ : INHERITED(kButtCapStrokedCircleGeometryProcessor_ClassID), fLocalMatrix(localMatrix) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ fInCircleEdge = {"inCircleEdge", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ fInDashParams = {"inDashParams", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ this->setVertexAttributes(&fInPosition, 4);
+ }
+
+ ~ButtCapDashedCircleGeometryProcessor() override {}
+
+ const char* name() const override { return "ButtCapDashedCircleGeometryProcessor"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const ButtCapDashedCircleGeometryProcessor& bcscgp =
+ args.fGP.cast<ButtCapDashedCircleGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // emit attributes
+ varyingHandler->emitAttributes(bcscgp);
+ fragBuilder->codeAppend("float4 circleEdge;");
+ varyingHandler->addPassThroughAttribute(bcscgp.fInCircleEdge, "circleEdge");
+
+ fragBuilder->codeAppend("float4 dashParams;");
+ varyingHandler->addPassThroughAttribute(
+ bcscgp.fInDashParams, "dashParams",
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ GrGLSLVarying wrapDashes(kHalf4_GrSLType);
+ varyingHandler->addVarying("wrapDashes", &wrapDashes,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ GrGLSLVarying lastIntervalLength(kHalf_GrSLType);
+ varyingHandler->addVarying("lastIntervalLength", &lastIntervalLength,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ vertBuilder->codeAppendf("float4 dashParams = %s;", bcscgp.fInDashParams.name());
+ // Our fragment shader works in on/off intervals as specified by dashParams.xy:
+ // x = length of on interval, y = length of on + off.
+ // There are two other parameters in dashParams.zw:
+ // z = start angle in radians, w = phase offset in radians in range -y/2..y/2.
+ // Each interval has a "corresponding" dash which may be shifted partially or
+ // fully out of its interval by the phase. So there may be up to two "visual"
+ // dashes in an interval.
+ // When computing coverage in an interval we look at three dashes. These are the
+ // "corresponding" dashes from the current, previous, and next intervals. Any of these
+ // may be phase shifted into our interval or even when phase=0 they may be within half a
+ // pixel distance of a pixel center in the interval.
+ // When in the first interval we need to check the dash from the last interval. And
+ // similarly when in the last interval we need to check the dash from the first
+ // interval. When 2pi is not perfectly divisible dashParams.y this is a boundary case.
+ // We compute the dash begin/end angles in the vertex shader and apply them in the
+ // fragment shader when we detect we're in the first/last interval.
+ vertBuilder->codeAppend(R"(
+ // The two boundary dash intervals are stored in wrapDashes.xy and .zw and fed
+ // to the fragment shader as a varying.
+ float4 wrapDashes;
+ half lastIntervalLength = mod(6.28318530718, half(dashParams.y));
+ // We can happen to be perfectly divisible.
+ if (0 == lastIntervalLength) {
+ lastIntervalLength = half(dashParams.y);
+ }
+ // Let 'l' be the last interval before reaching 2 pi.
+ // Based on the phase determine whether (l-1)th, l-th, or (l+1)th interval's
+ // "corresponding" dash appears in the l-th interval and is closest to the 0-th
+ // interval.
+ half offset = 0;
+ if (-dashParams.w >= lastIntervalLength) {
+ offset = half(-dashParams.y);
+ } else if (dashParams.w > dashParams.y - lastIntervalLength) {
+ offset = half(dashParams.y);
+ }
+ wrapDashes.x = -lastIntervalLength + offset - dashParams.w;
+ // The end of this dash may be beyond the 2 pi and therefore clipped. Hence the
+ // min.
+ wrapDashes.y = min(wrapDashes.x + dashParams.x, 0);
+
+ // Based on the phase determine whether the -1st, 0th, or 1st interval's
+ // "corresponding" dash appears in the 0th interval and is closest to l.
+ offset = 0;
+ if (dashParams.w >= dashParams.x) {
+ offset = half(dashParams.y);
+ } else if (-dashParams.w > dashParams.y - dashParams.x) {
+ offset = half(-dashParams.y);
+ }
+ wrapDashes.z = lastIntervalLength + offset - dashParams.w;
+ wrapDashes.w = wrapDashes.z + dashParams.x;
+ // The start of the dash we're considering may be clipped by the start of the
+ // circle.
+ wrapDashes.z = max(wrapDashes.z, lastIntervalLength);
+ )");
+ vertBuilder->codeAppendf("%s = half4(wrapDashes);", wrapDashes.vsOut());
+ vertBuilder->codeAppendf("%s = lastIntervalLength;", lastIntervalLength.vsOut());
+ fragBuilder->codeAppendf("half4 wrapDashes = %s;", wrapDashes.fsIn());
+ fragBuilder->codeAppendf("half lastIntervalLength = %s;", lastIntervalLength.fsIn());
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(
+ bcscgp.fInColor, args.fOutputColor,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, bcscgp.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ bcscgp.fInPosition.asShaderVar(),
+ bcscgp.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+ GrShaderVar fnArgs[] = {
+ GrShaderVar("angleToEdge", kFloat_GrSLType),
+ GrShaderVar("diameter", kFloat_GrSLType),
+ };
+ SkString fnName;
+ fragBuilder->emitFunction(kFloat_GrSLType, "coverage_from_dash_edge",
+ SK_ARRAY_COUNT(fnArgs), fnArgs, R"(
+ float linearDist;
+ angleToEdge = clamp(angleToEdge, -3.1415, 3.1415);
+ linearDist = diameter * sin(angleToEdge / 2);
+ return saturate(linearDist + 0.5);
+ )",
+ &fnName);
+ fragBuilder->codeAppend(R"(
+ float d = length(circleEdge.xy) * circleEdge.z;
+
+ // Compute coverage from outer/inner edges of the stroke.
+ half distanceToOuterEdge = half(circleEdge.z - d);
+ half edgeAlpha = saturate(distanceToOuterEdge);
+ half distanceToInnerEdge = half(d - circleEdge.z * circleEdge.w);
+ half innerAlpha = saturate(distanceToInnerEdge);
+ edgeAlpha *= innerAlpha;
+
+ half angleFromStart = half(atan(circleEdge.y, circleEdge.x) - dashParams.z);
+ angleFromStart = mod(angleFromStart, 6.28318530718);
+ float x = mod(angleFromStart, dashParams.y);
+ // Convert the radial distance from center to pixel into a diameter.
+ d *= 2;
+ half2 currDash = half2(half(-dashParams.w), half(dashParams.x) -
+ half(dashParams.w));
+ half2 nextDash = half2(half(dashParams.y) - half(dashParams.w),
+ half(dashParams.y) + half(dashParams.x) -
+ half(dashParams.w));
+ half2 prevDash = half2(half(-dashParams.y) - half(dashParams.w),
+ half(-dashParams.y) + half(dashParams.x) -
+ half(dashParams.w));
+ half dashAlpha = 0;
+ )");
+ fragBuilder->codeAppendf(R"(
+ if (angleFromStart - x + dashParams.y >= 6.28318530718) {
+ dashAlpha += half(%s(x - wrapDashes.z, d) * %s(wrapDashes.w - x, d));
+ currDash.y = min(currDash.y, lastIntervalLength);
+ if (nextDash.x >= lastIntervalLength) {
+ // The next dash is outside the 0..2pi range, throw it away
+ nextDash.xy = half2(1000);
+ } else {
+ // Clip the end of the next dash to the end of the circle
+ nextDash.y = min(nextDash.y, lastIntervalLength);
+ }
+ }
+ )", fnName.c_str(), fnName.c_str());
+ fragBuilder->codeAppendf(R"(
+ if (angleFromStart - x - dashParams.y < -0.01) {
+ dashAlpha += half(%s(x - wrapDashes.x, d) * %s(wrapDashes.y - x, d));
+ currDash.x = max(currDash.x, 0);
+ if (prevDash.y <= 0) {
+ // The previous dash is outside the 0..2pi range, throw it away
+ prevDash.xy = half2(1000);
+ } else {
+ // Clip the start previous dash to the start of the circle
+ prevDash.x = max(prevDash.x, 0);
+ }
+ }
+ )", fnName.c_str(), fnName.c_str());
+ fragBuilder->codeAppendf(R"(
+ dashAlpha += half(%s(x - currDash.x, d) * %s(currDash.y - x, d));
+ dashAlpha += half(%s(x - nextDash.x, d) * %s(nextDash.y - x, d));
+ dashAlpha += half(%s(x - prevDash.x, d) * %s(prevDash.y - x, d));
+ dashAlpha = min(dashAlpha, 1);
+ edgeAlpha *= dashAlpha;
+ )", fnName.c_str(), fnName.c_str(), fnName.c_str(), fnName.c_str(), fnName.c_str(),
+ fnName.c_str());
+ fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const ButtCapDashedCircleGeometryProcessor& bcscgp =
+ gp.cast<ButtCapDashedCircleGeometryProcessor>();
+ b->add32(bcscgp.fLocalMatrix.hasPerspective());
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(
+ primProc.cast<ButtCapDashedCircleGeometryProcessor>().fLocalMatrix, pdman,
+ &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ SkMatrix fLocalMatrix;
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInCircleEdge;
+ Attribute fInDashParams;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> ButtCapDashedCircleGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ bool wideColor = d->fRandom->nextBool();
+ const SkMatrix& matrix = GrTest::TestMatrix(d->fRandom);
+ return sk_sp<GrGeometryProcessor>(new ButtCapDashedCircleGeometryProcessor(wideColor, matrix));
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for an axis-aligned
+ * ellipse, specified as a 2D offset from center, and the reciprocals of the outer and inner radii,
+ * in both x and y directions.
+ *
+ * We are using an implicit function of x^2/a^2 + y^2/b^2 - 1 = 0.
+ */
+
+class EllipseGeometryProcessor : public GrGeometryProcessor {
+public:
+ EllipseGeometryProcessor(bool stroke, bool wideColor, bool useScale,
+ const SkMatrix& localMatrix)
+ : INHERITED(kEllipseGeometryProcessor_ClassID)
+ , fLocalMatrix(localMatrix)
+ , fStroke(stroke)
+ , fUseScale(useScale) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ if (useScale) {
+ fInEllipseOffset = {"inEllipseOffset", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else {
+ fInEllipseOffset = {"inEllipseOffset", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ }
+ fInEllipseRadii = {"inEllipseRadii", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ this->setVertexAttributes(&fInPosition, 4);
+ }
+
+ ~EllipseGeometryProcessor() override {}
+
+ const char* name() const override { return "EllipseEdge"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const EllipseGeometryProcessor& egp = args.fGP.cast<EllipseGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(egp);
+
+ GrSLType offsetType = egp.fUseScale ? kFloat3_GrSLType : kFloat2_GrSLType;
+ GrGLSLVarying ellipseOffsets(offsetType);
+ varyingHandler->addVarying("EllipseOffsets", &ellipseOffsets);
+ vertBuilder->codeAppendf("%s = %s;", ellipseOffsets.vsOut(),
+ egp.fInEllipseOffset.name());
+
+ GrGLSLVarying ellipseRadii(kFloat4_GrSLType);
+ varyingHandler->addVarying("EllipseRadii", &ellipseRadii);
+ vertBuilder->codeAppendf("%s = %s;", ellipseRadii.vsOut(), egp.fInEllipseRadii.name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(egp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder, gpArgs, egp.fInPosition.name());
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ egp.fInPosition.asShaderVar(),
+ egp.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+ // For stroked ellipses, we use the full ellipse equation (x^2/a^2 + y^2/b^2 = 1)
+ // to compute both the edges because we need two separate test equations for
+ // the single offset.
+ // For filled ellipses we can use a unit circle equation (x^2 + y^2 = 1), and warp
+ // the distance by the gradient, non-uniformly scaled by the inverse of the
+ // ellipse size.
+
+ // On medium precision devices, we scale the denominator of the distance equation
+ // before taking the inverse square root to minimize the chance that we're dividing
+ // by zero, then we scale the result back.
+
+ // for outer curve
+ fragBuilder->codeAppendf("float2 offset = %s.xy;", ellipseOffsets.fsIn());
+ if (egp.fStroke) {
+ fragBuilder->codeAppendf("offset *= %s.xy;", ellipseRadii.fsIn());
+ }
+ fragBuilder->codeAppend("float test = dot(offset, offset) - 1.0;");
+ if (egp.fUseScale) {
+ fragBuilder->codeAppendf("float2 grad = 2.0*offset*(%s.z*%s.xy);",
+ ellipseOffsets.fsIn(), ellipseRadii.fsIn());
+ } else {
+ fragBuilder->codeAppendf("float2 grad = 2.0*offset*%s.xy;", ellipseRadii.fsIn());
+ }
+ fragBuilder->codeAppend("float grad_dot = dot(grad, grad);");
+
+ // avoid calling inversesqrt on zero.
+ if (args.fShaderCaps->floatIs32Bits()) {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.1755e-38);");
+ } else {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 6.1036e-5);");
+ }
+ if (egp.fUseScale) {
+ fragBuilder->codeAppendf("float invlen = %s.z*inversesqrt(grad_dot);",
+ ellipseOffsets.fsIn());
+ } else {
+ fragBuilder->codeAppend("float invlen = inversesqrt(grad_dot);");
+ }
+ fragBuilder->codeAppend("float edgeAlpha = saturate(0.5-test*invlen);");
+
+ // for inner curve
+ if (egp.fStroke) {
+ fragBuilder->codeAppendf("offset = %s.xy*%s.zw;", ellipseOffsets.fsIn(),
+ ellipseRadii.fsIn());
+ fragBuilder->codeAppend("test = dot(offset, offset) - 1.0;");
+ if (egp.fUseScale) {
+ fragBuilder->codeAppendf("grad = 2.0*offset*(%s.z*%s.zw);",
+ ellipseOffsets.fsIn(), ellipseRadii.fsIn());
+ } else {
+ fragBuilder->codeAppendf("grad = 2.0*offset*%s.zw;", ellipseRadii.fsIn());
+ }
+ fragBuilder->codeAppend("grad_dot = dot(grad, grad);");
+ if (!args.fShaderCaps->floatIs32Bits()) {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 6.1036e-5);");
+ }
+ if (egp.fUseScale) {
+ fragBuilder->codeAppendf("invlen = %s.z*inversesqrt(grad_dot);",
+ ellipseOffsets.fsIn());
+ } else {
+ fragBuilder->codeAppend("invlen = inversesqrt(grad_dot);");
+ }
+ fragBuilder->codeAppend("edgeAlpha *= saturate(0.5+test*invlen);");
+ }
+
+ fragBuilder->codeAppendf("%s = half4(half(edgeAlpha));", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const EllipseGeometryProcessor& egp = gp.cast<EllipseGeometryProcessor>();
+ uint16_t key = egp.fStroke ? 0x1 : 0x0;
+ key |= egp.fLocalMatrix.hasPerspective() ? 0x2 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const EllipseGeometryProcessor& egp = primProc.cast<EllipseGeometryProcessor>();
+ this->setTransformDataHelper(egp.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInEllipseOffset;
+ Attribute fInEllipseRadii;
+
+ SkMatrix fLocalMatrix;
+ bool fStroke;
+ bool fUseScale;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(EllipseGeometryProcessor);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> EllipseGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(
+ new EllipseGeometryProcessor(d->fRandom->nextBool(), d->fRandom->nextBool(),
+ d->fRandom->nextBool(), GrTest::TestMatrix(d->fRandom)));
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for an ellipse,
+ * specified as a 2D offset from center for both the outer and inner paths (if stroked). The
+ * implict equation used is for a unit circle (x^2 + y^2 - 1 = 0) and the edge corrected by
+ * using differentials.
+ *
+ * The result is device-independent and can be used with any affine matrix.
+ */
+
+enum class DIEllipseStyle { kStroke = 0, kHairline, kFill };
+
+class DIEllipseGeometryProcessor : public GrGeometryProcessor {
+public:
+ DIEllipseGeometryProcessor(bool wideColor, bool useScale, const SkMatrix& viewMatrix,
+ DIEllipseStyle style)
+ : INHERITED(kDIEllipseGeometryProcessor_ClassID)
+ , fViewMatrix(viewMatrix)
+ , fUseScale(useScale)
+ , fStyle(style) {
+ fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fInColor = MakeColorAttribute("inColor", wideColor);
+ if (useScale) {
+ fInEllipseOffsets0 = {"inEllipseOffsets0", kFloat3_GrVertexAttribType,
+ kFloat3_GrSLType};
+ } else {
+ fInEllipseOffsets0 = {"inEllipseOffsets0", kFloat2_GrVertexAttribType,
+ kFloat2_GrSLType};
+ }
+ fInEllipseOffsets1 = {"inEllipseOffsets1", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ this->setVertexAttributes(&fInPosition, 4);
+ }
+
+ ~DIEllipseGeometryProcessor() override {}
+
+ const char* name() const override { return "DIEllipseEdge"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() : fViewMatrix(SkMatrix::InvalidMatrix()) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const DIEllipseGeometryProcessor& diegp = args.fGP.cast<DIEllipseGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(diegp);
+
+ GrSLType offsetType = (diegp.fUseScale) ? kFloat3_GrSLType : kFloat2_GrSLType;
+ GrGLSLVarying offsets0(offsetType);
+ varyingHandler->addVarying("EllipseOffsets0", &offsets0);
+ vertBuilder->codeAppendf("%s = %s;", offsets0.vsOut(), diegp.fInEllipseOffsets0.name());
+
+ GrGLSLVarying offsets1(kFloat2_GrSLType);
+ varyingHandler->addVarying("EllipseOffsets1", &offsets1);
+ vertBuilder->codeAppendf("%s = %s;", offsets1.vsOut(), diegp.fInEllipseOffsets1.name());
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ varyingHandler->addPassThroughAttribute(diegp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->writeOutputPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ diegp.fInPosition.name(),
+ diegp.fViewMatrix,
+ &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ diegp.fInPosition.asShaderVar(),
+ args.fFPCoordTransformHandler);
+
+ // for outer curve
+ fragBuilder->codeAppendf("float2 scaledOffset = %s.xy;", offsets0.fsIn());
+ fragBuilder->codeAppend("float test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("float2 duvdx = dFdx(%s.xy);", offsets0.fsIn());
+ fragBuilder->codeAppendf("float2 duvdy = dFdy(%s.xy);", offsets0.fsIn());
+ fragBuilder->codeAppendf(
+ "float2 grad = float2(%s.x*duvdx.x + %s.y*duvdx.y,"
+ " %s.x*duvdy.x + %s.y*duvdy.y);",
+ offsets0.fsIn(), offsets0.fsIn(), offsets0.fsIn(), offsets0.fsIn());
+ if (diegp.fUseScale) {
+ fragBuilder->codeAppendf("grad *= %s.z;", offsets0.fsIn());
+ }
+
+ fragBuilder->codeAppend("float grad_dot = 4.0*dot(grad, grad);");
+ // avoid calling inversesqrt on zero.
+ if (args.fShaderCaps->floatIs32Bits()) {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.1755e-38);");
+ } else {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 6.1036e-5);");
+ }
+ fragBuilder->codeAppend("float invlen = inversesqrt(grad_dot);");
+ if (diegp.fUseScale) {
+ fragBuilder->codeAppendf("invlen *= %s.z;", offsets0.fsIn());
+ }
+ if (DIEllipseStyle::kHairline == diegp.fStyle) {
+ // can probably do this with one step
+ fragBuilder->codeAppend("float edgeAlpha = saturate(1.0-test*invlen);");
+ fragBuilder->codeAppend("edgeAlpha *= saturate(1.0+test*invlen);");
+ } else {
+ fragBuilder->codeAppend("float edgeAlpha = saturate(0.5-test*invlen);");
+ }
+
+ // for inner curve
+ if (DIEllipseStyle::kStroke == diegp.fStyle) {
+ fragBuilder->codeAppendf("scaledOffset = %s.xy;", offsets1.fsIn());
+ fragBuilder->codeAppend("test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("duvdx = float2(dFdx(%s));", offsets1.fsIn());
+ fragBuilder->codeAppendf("duvdy = float2(dFdy(%s));", offsets1.fsIn());
+ fragBuilder->codeAppendf(
+ "grad = float2(%s.x*duvdx.x + %s.y*duvdx.y,"
+ " %s.x*duvdy.x + %s.y*duvdy.y);",
+ offsets1.fsIn(), offsets1.fsIn(), offsets1.fsIn(), offsets1.fsIn());
+ if (diegp.fUseScale) {
+ fragBuilder->codeAppendf("grad *= %s.z;", offsets0.fsIn());
+ }
+ fragBuilder->codeAppend("grad_dot = 4.0*dot(grad, grad);");
+ if (!args.fShaderCaps->floatIs32Bits()) {
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 6.1036e-5);");
+ }
+ fragBuilder->codeAppend("invlen = inversesqrt(grad_dot);");
+ if (diegp.fUseScale) {
+ fragBuilder->codeAppendf("invlen *= %s.z;", offsets0.fsIn());
+ }
+ fragBuilder->codeAppend("edgeAlpha *= saturate(0.5+test*invlen);");
+ }
+
+ fragBuilder->codeAppendf("%s = half4(half(edgeAlpha));", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DIEllipseGeometryProcessor& diegp = gp.cast<DIEllipseGeometryProcessor>();
+ uint16_t key = static_cast<uint16_t>(diegp.fStyle);
+ key |= ComputePosKey(diegp.fViewMatrix) << 10;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const DIEllipseGeometryProcessor& diegp = gp.cast<DIEllipseGeometryProcessor>();
+
+ if (!diegp.fViewMatrix.isIdentity() && !fViewMatrix.cheapEqualTo(diegp.fViewMatrix)) {
+ fViewMatrix = diegp.fViewMatrix;
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ private:
+ SkMatrix fViewMatrix;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+
+ Attribute fInPosition;
+ Attribute fInColor;
+ Attribute fInEllipseOffsets0;
+ Attribute fInEllipseOffsets1;
+
+ SkMatrix fViewMatrix;
+ bool fUseScale;
+ DIEllipseStyle fStyle;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DIEllipseGeometryProcessor);
+
+#if GR_TEST_UTILS
+sk_sp<GrGeometryProcessor> DIEllipseGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(new DIEllipseGeometryProcessor(
+ d->fRandom->nextBool(), d->fRandom->nextBool(), GrTest::TestMatrix(d->fRandom),
+ (DIEllipseStyle)(d->fRandom->nextRangeU(0, 2))));
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// We have two possible cases for geometry for a circle:
+
+// In the case of a normal fill, we draw geometry for the circle as an octagon.
+static const uint16_t gFillCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 8, 1, 2, 8,
+ 2, 3, 8, 3, 4, 8,
+ 4, 5, 8, 5, 6, 8,
+ 6, 7, 8, 7, 0, 8
+ // clang-format on
+};
+
+// For stroked circles, we use two nested octagons.
+static const uint16_t gStrokeCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 9, 0, 9, 8,
+ 1, 2, 10, 1, 10, 9,
+ 2, 3, 11, 2, 11, 10,
+ 3, 4, 12, 3, 12, 11,
+ 4, 5, 13, 4, 13, 12,
+ 5, 6, 14, 5, 14, 13,
+ 6, 7, 15, 6, 15, 14,
+ 7, 0, 8, 7, 8, 15,
+ // clang-format on
+};
+
+// Normalized geometry for octagons that circumscribe and lie on a circle:
+
+static constexpr SkScalar kOctOffset = 0.41421356237f; // sqrt(2) - 1
+static constexpr SkPoint kOctagonOuter[] = {
+ SkPoint::Make(-kOctOffset, -1),
+ SkPoint::Make( kOctOffset, -1),
+ SkPoint::Make( 1, -kOctOffset),
+ SkPoint::Make( 1, kOctOffset),
+ SkPoint::Make( kOctOffset, 1),
+ SkPoint::Make(-kOctOffset, 1),
+ SkPoint::Make(-1, kOctOffset),
+ SkPoint::Make(-1, -kOctOffset),
+};
+
+// cosine and sine of pi/8
+static constexpr SkScalar kCosPi8 = 0.923579533f;
+static constexpr SkScalar kSinPi8 = 0.382683432f;
+static constexpr SkPoint kOctagonInner[] = {
+ SkPoint::Make(-kSinPi8, -kCosPi8),
+ SkPoint::Make( kSinPi8, -kCosPi8),
+ SkPoint::Make( kCosPi8, -kSinPi8),
+ SkPoint::Make( kCosPi8, kSinPi8),
+ SkPoint::Make( kSinPi8, kCosPi8),
+ SkPoint::Make(-kSinPi8, kCosPi8),
+ SkPoint::Make(-kCosPi8, kSinPi8),
+ SkPoint::Make(-kCosPi8, -kSinPi8),
+};
+
+static const int kIndicesPerFillCircle = SK_ARRAY_COUNT(gFillCircleIndices);
+static const int kIndicesPerStrokeCircle = SK_ARRAY_COUNT(gStrokeCircleIndices);
+static const int kVertsPerStrokeCircle = 16;
+static const int kVertsPerFillCircle = 9;
+
+static int circle_type_to_vert_count(bool stroked) {
+ return stroked ? kVertsPerStrokeCircle : kVertsPerFillCircle;
+}
+
+static int circle_type_to_index_count(bool stroked) {
+ return stroked ? kIndicesPerStrokeCircle : kIndicesPerFillCircle;
+}
+
+static const uint16_t* circle_type_to_indices(bool stroked) {
+ return stroked ? gStrokeCircleIndices : gFillCircleIndices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class CircleOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ /** Optional extra params to render a partial arc rather than a full circle. */
+ struct ArcParams {
+ SkScalar fStartAngleRadians;
+ SkScalar fSweepAngleRadians;
+ bool fUseCenter;
+ };
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ SkPoint center,
+ SkScalar radius,
+ const GrStyle& style,
+ const ArcParams* arcParams = nullptr) {
+ SkASSERT(circle_stays_circle(viewMatrix));
+ if (style.hasPathEffect()) {
+ return nullptr;
+ }
+ const SkStrokeRec& stroke = style.strokeRec();
+ SkStrokeRec::Style recStyle = stroke.getStyle();
+ if (arcParams) {
+ // Arc support depends on the style.
+ switch (recStyle) {
+ case SkStrokeRec::kStrokeAndFill_Style:
+ // This produces a strange result that this op doesn't implement.
+ return nullptr;
+ case SkStrokeRec::kFill_Style:
+ // This supports all fills.
+ break;
+ case SkStrokeRec::kStroke_Style:
+ // Strokes that don't use the center point are supported with butt and round
+ // caps.
+ if (arcParams->fUseCenter || stroke.getCap() == SkPaint::kSquare_Cap) {
+ return nullptr;
+ }
+ break;
+ case SkStrokeRec::kHairline_Style:
+ // Hairline only supports butt cap. Round caps could be emulated by slightly
+ // extending the angle range if we ever care to.
+ if (arcParams->fUseCenter || stroke.getCap() != SkPaint::kButt_Cap) {
+ return nullptr;
+ }
+ break;
+ }
+ }
+ return Helper::FactoryHelper<CircleOp>(context, std::move(paint), viewMatrix, center,
+ radius, style, arcParams);
+ }
+
+ CircleOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, SkPoint center, SkScalar radius, const GrStyle& style,
+ const ArcParams* arcParams)
+ : GrMeshDrawOp(ClassID()), fHelper(helperArgs, GrAAType::kCoverage) {
+ const SkStrokeRec& stroke = style.strokeRec();
+ SkStrokeRec::Style recStyle = stroke.getStyle();
+
+ fRoundCaps = false;
+
+ viewMatrix.mapPoints(&center, 1);
+ radius = viewMatrix.mapRadius(radius);
+ SkScalar strokeWidth = viewMatrix.mapRadius(stroke.getWidth());
+
+ bool isStrokeOnly =
+ SkStrokeRec::kStroke_Style == recStyle || SkStrokeRec::kHairline_Style == recStyle;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == recStyle;
+
+ SkScalar innerRadius = -SK_ScalarHalf;
+ SkScalar outerRadius = radius;
+ SkScalar halfWidth = 0;
+ if (hasStroke) {
+ if (SkScalarNearlyZero(strokeWidth)) {
+ halfWidth = SK_ScalarHalf;
+ } else {
+ halfWidth = SkScalarHalf(strokeWidth);
+ }
+
+ outerRadius += halfWidth;
+ if (isStrokeOnly) {
+ innerRadius = radius - halfWidth;
+ }
+ }
+
+ // The radii are outset for two reasons. First, it allows the shader to simply perform
+ // simpler computation because the computed alpha is zero, rather than 50%, at the radius.
+ // Second, the outer radius is used to compute the verts of the bounding box that is
+ // rendered and the outset ensures the box will cover all partially covered by the circle.
+ outerRadius += SK_ScalarHalf;
+ innerRadius -= SK_ScalarHalf;
+ bool stroked = isStrokeOnly && innerRadius > 0.0f;
+ fViewMatrixIfUsingLocalCoords = viewMatrix;
+
+ // This makes every point fully inside the intersection plane.
+ static constexpr SkScalar kUnusedIsectPlane[] = {0.f, 0.f, 1.f};
+ // This makes every point fully outside the union plane.
+ static constexpr SkScalar kUnusedUnionPlane[] = {0.f, 0.f, 0.f};
+ static constexpr SkPoint kUnusedRoundCaps[] = {{1e10f, 1e10f}, {1e10f, 1e10f}};
+ SkRect devBounds = SkRect::MakeLTRB(center.fX - outerRadius, center.fY - outerRadius,
+ center.fX + outerRadius, center.fY + outerRadius);
+ if (arcParams) {
+ // The shader operates in a space where the circle is translated to be centered at the
+ // origin. Here we compute points on the unit circle at the starting and ending angles.
+ SkPoint startPoint, stopPoint;
+ startPoint.fY = SkScalarSin(arcParams->fStartAngleRadians);
+ startPoint.fX = SkScalarCos(arcParams->fStartAngleRadians);
+ SkScalar endAngle = arcParams->fStartAngleRadians + arcParams->fSweepAngleRadians;
+ stopPoint.fY = SkScalarSin(endAngle);
+ stopPoint.fX = SkScalarCos(endAngle);
+
+ // Adjust the start and end points based on the view matrix (to handle rotated arcs)
+ startPoint = viewMatrix.mapVector(startPoint.fX, startPoint.fY);
+ stopPoint = viewMatrix.mapVector(stopPoint.fX, stopPoint.fY);
+ startPoint.normalize();
+ stopPoint.normalize();
+
+ // If the matrix included scale (on one axis) we need to swap our start and end points
+ if ((viewMatrix.getScaleX() < 0) != (viewMatrix.getScaleY() < 0)) {
+ using std::swap;
+ swap(startPoint, stopPoint);
+ }
+
+ fRoundCaps = style.strokeRec().getWidth() > 0 &&
+ style.strokeRec().getCap() == SkPaint::kRound_Cap;
+ SkPoint roundCaps[2];
+ if (fRoundCaps) {
+ // Compute the cap center points in the normalized space.
+ SkScalar midRadius = (innerRadius + outerRadius) / (2 * outerRadius);
+ roundCaps[0] = startPoint * midRadius;
+ roundCaps[1] = stopPoint * midRadius;
+ } else {
+ roundCaps[0] = kUnusedRoundCaps[0];
+ roundCaps[1] = kUnusedRoundCaps[1];
+ }
+
+ // Like a fill without useCenter, butt-cap stroke can be implemented by clipping against
+ // radial lines. We treat round caps the same way, but tack coverage of circles at the
+ // center of the butts.
+ // However, in both cases we have to be careful about the half-circle.
+ // case. In that case the two radial lines are equal and so that edge gets clipped
+ // twice. Since the shared edge goes through the center we fall back on the !useCenter
+ // case.
+ auto absSweep = SkScalarAbs(arcParams->fSweepAngleRadians);
+ bool useCenter = (arcParams->fUseCenter || isStrokeOnly) &&
+ !SkScalarNearlyEqual(absSweep, SK_ScalarPI);
+ if (useCenter) {
+ SkVector norm0 = {startPoint.fY, -startPoint.fX};
+ SkVector norm1 = {stopPoint.fY, -stopPoint.fX};
+ // This ensures that norm0 is always the clockwise plane, and norm1 is CCW.
+ if (arcParams->fSweepAngleRadians < 0) {
+ std::swap(norm0, norm1);
+ }
+ norm0.negate();
+ fClipPlane = true;
+ if (absSweep > SK_ScalarPI) {
+ fCircles.emplace_back(Circle{
+ color,
+ innerRadius,
+ outerRadius,
+ {norm0.fX, norm0.fY, 0.5f},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {norm1.fX, norm1.fY, 0.5f},
+ {roundCaps[0], roundCaps[1]},
+ devBounds,
+ stroked});
+ fClipPlaneIsect = false;
+ fClipPlaneUnion = true;
+ } else {
+ fCircles.emplace_back(Circle{
+ color,
+ innerRadius,
+ outerRadius,
+ {norm0.fX, norm0.fY, 0.5f},
+ {norm1.fX, norm1.fY, 0.5f},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ {roundCaps[0], roundCaps[1]},
+ devBounds,
+ stroked});
+ fClipPlaneIsect = true;
+ fClipPlaneUnion = false;
+ }
+ } else {
+ // We clip to a secant of the original circle.
+ startPoint.scale(radius);
+ stopPoint.scale(radius);
+ SkVector norm = {startPoint.fY - stopPoint.fY, stopPoint.fX - startPoint.fX};
+ norm.normalize();
+ if (arcParams->fSweepAngleRadians > 0) {
+ norm.negate();
+ }
+ SkScalar d = -norm.dot(startPoint) + 0.5f;
+
+ fCircles.emplace_back(
+ Circle{color,
+ innerRadius,
+ outerRadius,
+ {norm.fX, norm.fY, d},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ {roundCaps[0], roundCaps[1]},
+ devBounds,
+ stroked});
+ fClipPlane = true;
+ fClipPlaneIsect = false;
+ fClipPlaneUnion = false;
+ }
+ } else {
+ fCircles.emplace_back(
+ Circle{color,
+ innerRadius,
+ outerRadius,
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ {kUnusedRoundCaps[0], kUnusedRoundCaps[1]},
+ devBounds,
+ stroked});
+ fClipPlane = false;
+ fClipPlaneIsect = false;
+ fClipPlaneUnion = false;
+ }
+ // Use the original radius and stroke radius for the bounds so that it does not include the
+ // AA bloat.
+ radius += halfWidth;
+ this->setBounds(
+ {center.fX - radius, center.fY - radius, center.fX + radius, center.fY + radius},
+ HasAABloat::kYes, IsHairline::kNo);
+ fVertCount = circle_type_to_vert_count(stroked);
+ fIndexCount = circle_type_to_index_count(stroked);
+ fAllFill = !stroked;
+ }
+
+ const char* name() const override { return "CircleOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fCircles.count(); ++i) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "InnerRad: %.2f, OuterRad: %.2f\n",
+ fCircles[i].fColor.toBytes_RGBA(), fCircles[i].fDevBounds.fLeft,
+ fCircles[i].fDevBounds.fTop, fCircles[i].fDevBounds.fRight,
+ fCircles[i].fDevBounds.fBottom, fCircles[i].fInnerRadius,
+ fCircles[i].fOuterRadius);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ SkPMColor4f* color = &fCircles.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(new CircleGeometryProcessor(
+ !fAllFill, fClipPlane, fClipPlaneIsect, fClipPlaneUnion, fRoundCaps, fWideColor,
+ localMatrix));
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+ GrVertexWriter vertices{target->makeVertexSpace(gp->vertexStride(), fVertCount,
+ &vertexBuffer, &firstVertex)};
+ if (!vertices.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer = nullptr;
+ int firstIndex = 0;
+ uint16_t* indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ int currStartVertex = 0;
+ for (const auto& circle : fCircles) {
+ SkScalar innerRadius = circle.fInnerRadius;
+ SkScalar outerRadius = circle.fOuterRadius;
+ GrVertexColor color(circle.fColor, fWideColor);
+ const SkRect& bounds = circle.fDevBounds;
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ innerRadius = innerRadius / outerRadius;
+ SkPoint radii = { outerRadius, innerRadius };
+
+ SkPoint center = SkPoint::Make(bounds.centerX(), bounds.centerY());
+ SkScalar halfWidth = 0.5f * bounds.width();
+
+ SkVector geoClipPlane = { 0, 0 };
+ SkScalar offsetClipDist = SK_Scalar1;
+ if (!circle.fStroked && fClipPlane && fClipPlaneIsect &&
+ (circle.fClipPlane[0] * circle.fIsectPlane[0] +
+ circle.fClipPlane[1] * circle.fIsectPlane[1]) < 0.0f) {
+ // Acute arc. Clip the vertices to the perpendicular half-plane. We've constructed
+ // fClipPlane to be clockwise, and fISectPlane to be CCW, so we can can rotate them
+ // each 90 degrees to point "out", then average them. We back off by 1/2 pixel so
+ // the AA can extend just past the center of the circle.
+ geoClipPlane.set(circle.fClipPlane[1] - circle.fIsectPlane[1],
+ circle.fIsectPlane[0] - circle.fClipPlane[0]);
+ SkAssertResult(geoClipPlane.normalize());
+ offsetClipDist = 0.5f / halfWidth;
+ }
+
+ for (int i = 0; i < 8; ++i) {
+ // This clips the normalized offset to the half-plane we computed above. Then we
+ // compute the vertex position from this.
+ SkScalar dist = SkTMin(kOctagonOuter[i].dot(geoClipPlane) + offsetClipDist, 0.0f);
+ SkVector offset = kOctagonOuter[i] - geoClipPlane * dist;
+ vertices.write(center + offset * halfWidth,
+ color,
+ offset,
+ radii);
+ if (fClipPlane) {
+ vertices.write(circle.fClipPlane);
+ }
+ if (fClipPlaneIsect) {
+ vertices.write(circle.fIsectPlane);
+ }
+ if (fClipPlaneUnion) {
+ vertices.write(circle.fUnionPlane);
+ }
+ if (fRoundCaps) {
+ vertices.write(circle.fRoundCapCenters);
+ }
+ }
+
+ if (circle.fStroked) {
+ // compute the inner ring
+
+ for (int i = 0; i < 8; ++i) {
+ vertices.write(center + kOctagonInner[i] * circle.fInnerRadius,
+ color,
+ kOctagonInner[i] * innerRadius,
+ radii);
+ if (fClipPlane) {
+ vertices.write(circle.fClipPlane);
+ }
+ if (fClipPlaneIsect) {
+ vertices.write(circle.fIsectPlane);
+ }
+ if (fClipPlaneUnion) {
+ vertices.write(circle.fUnionPlane);
+ }
+ if (fRoundCaps) {
+ vertices.write(circle.fRoundCapCenters);
+ }
+ }
+ } else {
+ // filled
+ vertices.write(center, color, SkPoint::Make(0, 0), radii);
+ if (fClipPlane) {
+ vertices.write(circle.fClipPlane);
+ }
+ if (fClipPlaneIsect) {
+ vertices.write(circle.fIsectPlane);
+ }
+ if (fClipPlaneUnion) {
+ vertices.write(circle.fUnionPlane);
+ }
+ if (fRoundCaps) {
+ vertices.write(circle.fRoundCapCenters);
+ }
+ }
+
+ const uint16_t* primIndices = circle_type_to_indices(circle.fStroked);
+ const int primIndexCount = circle_type_to_index_count(circle.fStroked);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += circle_type_to_vert_count(circle.fStroked);
+ }
+
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexed(std::move(indexBuffer), fIndexCount, firstIndex, 0, fVertCount - 1,
+ GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ CircleOp* that = t->cast<CircleOp>();
+
+ // can only represent 65535 unique vertices with 16-bit indices
+ if (fVertCount + that->fVertCount > 65536) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() &&
+ !fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // Because we've set up the ops that don't use the planes with noop values
+ // we can just accumulate used planes by later ops.
+ fClipPlane |= that->fClipPlane;
+ fClipPlaneIsect |= that->fClipPlaneIsect;
+ fClipPlaneUnion |= that->fClipPlaneUnion;
+ fRoundCaps |= that->fRoundCaps;
+ fWideColor |= that->fWideColor;
+
+ fCircles.push_back_n(that->fCircles.count(), that->fCircles.begin());
+ fVertCount += that->fVertCount;
+ fIndexCount += that->fIndexCount;
+ fAllFill = fAllFill && that->fAllFill;
+ return CombineResult::kMerged;
+ }
+
+ struct Circle {
+ SkPMColor4f fColor;
+ SkScalar fInnerRadius;
+ SkScalar fOuterRadius;
+ SkScalar fClipPlane[3];
+ SkScalar fIsectPlane[3];
+ SkScalar fUnionPlane[3];
+ SkPoint fRoundCapCenters[2];
+ SkRect fDevBounds;
+ bool fStroked;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ Helper fHelper;
+ SkSTArray<1, Circle, true> fCircles;
+ int fVertCount;
+ int fIndexCount;
+ bool fAllFill;
+ bool fClipPlane;
+ bool fClipPlaneIsect;
+ bool fClipPlaneUnion;
+ bool fRoundCaps;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+class ButtCapDashedCircleOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ SkPoint center,
+ SkScalar radius,
+ SkScalar strokeWidth,
+ SkScalar startAngle,
+ SkScalar onAngle,
+ SkScalar offAngle,
+ SkScalar phaseAngle) {
+ SkASSERT(circle_stays_circle(viewMatrix));
+ SkASSERT(strokeWidth < 2 * radius);
+ return Helper::FactoryHelper<ButtCapDashedCircleOp>(context, std::move(paint), viewMatrix,
+ center, radius, strokeWidth, startAngle,
+ onAngle, offAngle, phaseAngle);
+ }
+
+ ButtCapDashedCircleOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, SkPoint center, SkScalar radius,
+ SkScalar strokeWidth, SkScalar startAngle, SkScalar onAngle,
+ SkScalar offAngle, SkScalar phaseAngle)
+ : GrMeshDrawOp(ClassID()), fHelper(helperArgs, GrAAType::kCoverage) {
+ SkASSERT(circle_stays_circle(viewMatrix));
+ viewMatrix.mapPoints(&center, 1);
+ radius = viewMatrix.mapRadius(radius);
+ strokeWidth = viewMatrix.mapRadius(strokeWidth);
+
+ // Determine the angle where the circle starts in device space and whether its orientation
+ // has been reversed.
+ SkVector start;
+ bool reflection;
+ if (!startAngle) {
+ start = {1, 0};
+ } else {
+ start.fY = SkScalarSin(startAngle);
+ start.fX = SkScalarCos(startAngle);
+ }
+ viewMatrix.mapVectors(&start, 1);
+ startAngle = SkScalarATan2(start.fY, start.fX);
+ reflection = (viewMatrix.getScaleX() * viewMatrix.getScaleY() -
+ viewMatrix.getSkewX() * viewMatrix.getSkewY()) < 0;
+
+ auto totalAngle = onAngle + offAngle;
+ phaseAngle = SkScalarMod(phaseAngle + totalAngle / 2, totalAngle) - totalAngle / 2;
+
+ SkScalar halfWidth = 0;
+ if (SkScalarNearlyZero(strokeWidth)) {
+ halfWidth = SK_ScalarHalf;
+ } else {
+ halfWidth = SkScalarHalf(strokeWidth);
+ }
+
+ SkScalar outerRadius = radius + halfWidth;
+ SkScalar innerRadius = radius - halfWidth;
+
+ // The radii are outset for two reasons. First, it allows the shader to simply perform
+ // simpler computation because the computed alpha is zero, rather than 50%, at the radius.
+ // Second, the outer radius is used to compute the verts of the bounding box that is
+ // rendered and the outset ensures the box will cover all partially covered by the circle.
+ outerRadius += SK_ScalarHalf;
+ innerRadius -= SK_ScalarHalf;
+ fViewMatrixIfUsingLocalCoords = viewMatrix;
+
+ SkRect devBounds = SkRect::MakeLTRB(center.fX - outerRadius, center.fY - outerRadius,
+ center.fX + outerRadius, center.fY + outerRadius);
+
+ // We store whether there is a reflection as a negative total angle.
+ if (reflection) {
+ totalAngle = -totalAngle;
+ }
+ fCircles.push_back(Circle{
+ color,
+ outerRadius,
+ innerRadius,
+ onAngle,
+ totalAngle,
+ startAngle,
+ phaseAngle,
+ devBounds
+ });
+ // Use the original radius and stroke radius for the bounds so that it does not include the
+ // AA bloat.
+ radius += halfWidth;
+ this->setBounds(
+ {center.fX - radius, center.fY - radius, center.fX + radius, center.fY + radius},
+ HasAABloat::kYes, IsHairline::kNo);
+ fVertCount = circle_type_to_vert_count(true);
+ fIndexCount = circle_type_to_index_count(true);
+ }
+
+ const char* name() const override { return "ButtCappedDashedCircleOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fCircles.count(); ++i) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "InnerRad: %.2f, OuterRad: %.2f, OnAngle: %.2f, TotalAngle: %.2f, "
+ "Phase: %.2f\n",
+ fCircles[i].fColor.toBytes_RGBA(), fCircles[i].fDevBounds.fLeft,
+ fCircles[i].fDevBounds.fTop, fCircles[i].fDevBounds.fRight,
+ fCircles[i].fDevBounds.fBottom, fCircles[i].fInnerRadius,
+ fCircles[i].fOuterRadius, fCircles[i].fOnAngle, fCircles[i].fTotalAngle,
+ fCircles[i].fPhaseAngle);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ SkPMColor4f* color = &fCircles.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(new ButtCapDashedCircleGeometryProcessor(fWideColor,
+ localMatrix));
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+ GrVertexWriter vertices{target->makeVertexSpace(gp->vertexStride(), fVertCount,
+ &vertexBuffer, &firstVertex)};
+ if (!vertices.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex = 0;
+ uint16_t* indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ int currStartVertex = 0;
+ for (const auto& circle : fCircles) {
+ // The inner radius in the vertex data must be specified in normalized space so that
+ // length() can be called with smaller values to avoid precision issues with half
+ // floats.
+ auto normInnerRadius = circle.fInnerRadius / circle.fOuterRadius;
+ const SkRect& bounds = circle.fDevBounds;
+ bool reflect = false;
+ struct { float onAngle, totalAngle, startAngle, phaseAngle; } dashParams = {
+ circle.fOnAngle, circle.fTotalAngle, circle.fStartAngle, circle.fPhaseAngle
+ };
+ if (dashParams.totalAngle < 0) {
+ reflect = true;
+ dashParams.totalAngle = -dashParams.totalAngle;
+ dashParams.startAngle = -dashParams.startAngle;
+ }
+
+ GrVertexColor color(circle.fColor, fWideColor);
+
+ // The bounding geometry for the circle is composed of an outer bounding octagon and
+ // an inner bounded octagon.
+
+ // Compute the vertices of the outer octagon.
+ SkPoint center = SkPoint::Make(bounds.centerX(), bounds.centerY());
+ SkScalar halfWidth = 0.5f * bounds.width();
+
+ auto reflectY = [=](const SkPoint& p) {
+ return SkPoint{ p.fX, reflect ? -p.fY : p.fY };
+ };
+
+ for (int i = 0; i < 8; ++i) {
+ vertices.write(center + kOctagonOuter[i] * halfWidth,
+ color,
+ reflectY(kOctagonOuter[i]),
+ circle.fOuterRadius,
+ normInnerRadius,
+ dashParams);
+ }
+
+ // Compute the vertices of the inner octagon.
+ for (int i = 0; i < 8; ++i) {
+ vertices.write(center + kOctagonInner[i] * circle.fInnerRadius,
+ color,
+ reflectY(kOctagonInner[i]) * normInnerRadius,
+ circle.fOuterRadius,
+ normInnerRadius,
+ dashParams);
+ }
+
+ const uint16_t* primIndices = circle_type_to_indices(true);
+ const int primIndexCount = circle_type_to_index_count(true);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += circle_type_to_vert_count(true);
+ }
+
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexed(std::move(indexBuffer), fIndexCount, firstIndex, 0, fVertCount - 1,
+ GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ ButtCapDashedCircleOp* that = t->cast<ButtCapDashedCircleOp>();
+
+ // can only represent 65535 unique vertices with 16-bit indices
+ if (fVertCount + that->fVertCount > 65536) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() &&
+ !fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fCircles.push_back_n(that->fCircles.count(), that->fCircles.begin());
+ fVertCount += that->fVertCount;
+ fIndexCount += that->fIndexCount;
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct Circle {
+ SkPMColor4f fColor;
+ SkScalar fOuterRadius;
+ SkScalar fInnerRadius;
+ SkScalar fOnAngle;
+ SkScalar fTotalAngle;
+ SkScalar fStartAngle;
+ SkScalar fPhaseAngle;
+ SkRect fDevBounds;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ Helper fHelper;
+ SkSTArray<1, Circle, true> fCircles;
+ int fVertCount;
+ int fIndexCount;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class EllipseOp : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+ struct DeviceSpaceParams {
+ SkPoint fCenter;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ };
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& ellipse,
+ const SkStrokeRec& stroke) {
+ DeviceSpaceParams params;
+ // do any matrix crunching before we reset the draw state for device coords
+ params.fCenter = SkPoint::Make(ellipse.centerX(), ellipse.centerY());
+ viewMatrix.mapPoints(&params.fCenter, 1);
+ SkScalar ellipseXRadius = SkScalarHalf(ellipse.width());
+ SkScalar ellipseYRadius = SkScalarHalf(ellipse.height());
+ params.fXRadius = SkScalarAbs(viewMatrix[SkMatrix::kMScaleX] * ellipseXRadius +
+ viewMatrix[SkMatrix::kMSkewX] * ellipseYRadius);
+ params.fYRadius = SkScalarAbs(viewMatrix[SkMatrix::kMSkewY] * ellipseXRadius +
+ viewMatrix[SkMatrix::kMScaleY] * ellipseYRadius);
+
+ // do (potentially) anisotropic mapping of stroke
+ SkVector scaledStroke;
+ SkScalar strokeWidth = stroke.getWidth();
+ scaledStroke.fX = SkScalarAbs(
+ strokeWidth * (viewMatrix[SkMatrix::kMScaleX] + viewMatrix[SkMatrix::kMSkewY]));
+ scaledStroke.fY = SkScalarAbs(
+ strokeWidth * (viewMatrix[SkMatrix::kMSkewX] + viewMatrix[SkMatrix::kMScaleY]));
+
+ SkStrokeRec::Style style = stroke.getStyle();
+ bool isStrokeOnly =
+ SkStrokeRec::kStroke_Style == style || SkStrokeRec::kHairline_Style == style;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == style;
+
+ params.fInnerXRadius = 0;
+ params.fInnerYRadius = 0;
+ if (hasStroke) {
+ if (SkScalarNearlyZero(scaledStroke.length())) {
+ scaledStroke.set(SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ scaledStroke.scale(SK_ScalarHalf);
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (scaledStroke.length() > SK_ScalarHalf &&
+ (0.5f * params.fXRadius > params.fYRadius ||
+ 0.5f * params.fYRadius > params.fXRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (scaledStroke.fX * (params.fXRadius * params.fYRadius) <
+ (scaledStroke.fY * scaledStroke.fY) * params.fXRadius ||
+ scaledStroke.fY * (params.fXRadius * params.fXRadius) <
+ (scaledStroke.fX * scaledStroke.fX) * params.fYRadius) {
+ return nullptr;
+ }
+
+ // this is legit only if scale & translation (which should be the case at the moment)
+ if (isStrokeOnly) {
+ params.fInnerXRadius = params.fXRadius - scaledStroke.fX;
+ params.fInnerYRadius = params.fYRadius - scaledStroke.fY;
+ }
+
+ params.fXRadius += scaledStroke.fX;
+ params.fYRadius += scaledStroke.fY;
+ }
+
+ // For large ovals with low precision floats, we fall back to the path renderer.
+ // To compute the AA at the edge we divide by the gradient, which is clamped to a
+ // minimum value to avoid divides by zero. With large ovals and low precision this
+ // leads to blurring at the edge of the oval.
+ const SkScalar kMaxOvalRadius = 16384;
+ if (!context->priv().caps()->shaderCaps()->floatIs32Bits() &&
+ (params.fXRadius >= kMaxOvalRadius || params.fYRadius >= kMaxOvalRadius)) {
+ return nullptr;
+ }
+
+ return Helper::FactoryHelper<EllipseOp>(context, std::move(paint), viewMatrix,
+ params, stroke);
+ }
+
+ EllipseOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const DeviceSpaceParams& params,
+ const SkStrokeRec& stroke)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage)
+ , fUseScale(false) {
+ SkStrokeRec::Style style = stroke.getStyle();
+ bool isStrokeOnly =
+ SkStrokeRec::kStroke_Style == style || SkStrokeRec::kHairline_Style == style;
+
+ fEllipses.emplace_back(Ellipse{color, params.fXRadius, params.fYRadius,
+ params.fInnerXRadius, params.fInnerYRadius,
+ SkRect::MakeLTRB(params.fCenter.fX - params.fXRadius,
+ params.fCenter.fY - params.fYRadius,
+ params.fCenter.fX + params.fXRadius,
+ params.fCenter.fY + params.fYRadius)});
+
+ this->setBounds(fEllipses.back().fDevBounds, HasAABloat::kYes, IsHairline::kNo);
+
+ // Outset bounds to include half-pixel width antialiasing.
+ fEllipses[0].fDevBounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ fStroked = isStrokeOnly && params.fInnerXRadius > 0 && params.fInnerYRadius > 0;
+ fViewMatrixIfUsingLocalCoords = viewMatrix;
+ }
+
+ const char* name() const override { return "EllipseOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Stroked: %d\n", fStroked);
+ for (const auto& geo : fEllipses) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], "
+ "XRad: %.2f, YRad: %.2f, InnerXRad: %.2f, InnerYRad: %.2f\n",
+ geo.fColor.toBytes_RGBA(), geo.fDevBounds.fLeft, geo.fDevBounds.fTop,
+ geo.fDevBounds.fRight, geo.fDevBounds.fBottom, geo.fXRadius, geo.fYRadius,
+ geo.fInnerXRadius, geo.fInnerYRadius);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ fUseScale = !caps.shaderCaps()->floatIs32Bits() &&
+ !caps.shaderCaps()->hasLowFragmentPrecision();
+ SkPMColor4f* color = &fEllipses.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(new EllipseGeometryProcessor(fStroked, fWideColor, fUseScale,
+ localMatrix));
+ QuadHelper helper(target, gp->vertexStride(), fEllipses.count());
+ GrVertexWriter verts{helper.vertices()};
+ if (!verts.fPtr) {
+ return;
+ }
+
+ for (const auto& ellipse : fEllipses) {
+ GrVertexColor color(ellipse.fColor, fWideColor);
+ SkScalar xRadius = ellipse.fXRadius;
+ SkScalar yRadius = ellipse.fYRadius;
+
+ // Compute the reciprocals of the radii here to save time in the shader
+ struct { float xOuter, yOuter, xInner, yInner; } invRadii = {
+ SkScalarInvert(xRadius),
+ SkScalarInvert(yRadius),
+ SkScalarInvert(ellipse.fInnerXRadius),
+ SkScalarInvert(ellipse.fInnerYRadius)
+ };
+ SkScalar xMaxOffset = xRadius + SK_ScalarHalf;
+ SkScalar yMaxOffset = yRadius + SK_ScalarHalf;
+
+ if (!fStroked) {
+ // For filled ellipses we map a unit circle in the vertex attributes rather than
+ // computing an ellipse and modifying that distance, so we normalize to 1
+ xMaxOffset /= xRadius;
+ yMaxOffset /= yRadius;
+ }
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ verts.writeQuad(GrVertexWriter::TriStripFromRect(ellipse.fDevBounds),
+ color,
+ origin_centered_tri_strip(xMaxOffset, yMaxOffset),
+ GrVertexWriter::If(fUseScale, SkTMax(xRadius, yRadius)),
+ invRadii);
+ }
+ helper.recordDraw(target, std::move(gp));
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ EllipseOp* that = t->cast<EllipseOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fStroked != that->fStroked) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() &&
+ !fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fEllipses.push_back_n(that->fEllipses.count(), that->fEllipses.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct Ellipse {
+ SkPMColor4f fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkRect fDevBounds;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ Helper fHelper;
+ bool fStroked;
+ bool fWideColor;
+ bool fUseScale;
+ SkSTArray<1, Ellipse, true> fEllipses;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+class DIEllipseOp : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+ struct DeviceSpaceParams {
+ SkPoint fCenter;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ DIEllipseStyle fStyle;
+ };
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& ellipse,
+ const SkStrokeRec& stroke) {
+ DeviceSpaceParams params;
+ params.fCenter = SkPoint::Make(ellipse.centerX(), ellipse.centerY());
+ params.fXRadius = SkScalarHalf(ellipse.width());
+ params.fYRadius = SkScalarHalf(ellipse.height());
+
+ SkStrokeRec::Style style = stroke.getStyle();
+ params.fStyle = (SkStrokeRec::kStroke_Style == style)
+ ? DIEllipseStyle::kStroke
+ : (SkStrokeRec::kHairline_Style == style)
+ ? DIEllipseStyle::kHairline
+ : DIEllipseStyle::kFill;
+
+ params.fInnerXRadius = 0;
+ params.fInnerYRadius = 0;
+ if (SkStrokeRec::kFill_Style != style && SkStrokeRec::kHairline_Style != style) {
+ SkScalar strokeWidth = stroke.getWidth();
+
+ if (SkScalarNearlyZero(strokeWidth)) {
+ strokeWidth = SK_ScalarHalf;
+ } else {
+ strokeWidth *= SK_ScalarHalf;
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (strokeWidth > SK_ScalarHalf &&
+ (SK_ScalarHalf * params.fXRadius > params.fYRadius ||
+ SK_ScalarHalf * params.fYRadius > params.fXRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (strokeWidth * (params.fYRadius * params.fYRadius) <
+ (strokeWidth * strokeWidth) * params.fXRadius) {
+ return nullptr;
+ }
+ if (strokeWidth * (params.fXRadius * params.fXRadius) <
+ (strokeWidth * strokeWidth) * params.fYRadius) {
+ return nullptr;
+ }
+
+ // set inner radius (if needed)
+ if (SkStrokeRec::kStroke_Style == style) {
+ params.fInnerXRadius = params.fXRadius - strokeWidth;
+ params.fInnerYRadius = params.fYRadius - strokeWidth;
+ }
+
+ params.fXRadius += strokeWidth;
+ params.fYRadius += strokeWidth;
+ }
+
+ // For large ovals with low precision floats, we fall back to the path renderer.
+ // To compute the AA at the edge we divide by the gradient, which is clamped to a
+ // minimum value to avoid divides by zero. With large ovals and low precision this
+ // leads to blurring at the edge of the oval.
+ const SkScalar kMaxOvalRadius = 16384;
+ if (!context->priv().caps()->shaderCaps()->floatIs32Bits() &&
+ (params.fXRadius >= kMaxOvalRadius || params.fYRadius >= kMaxOvalRadius)) {
+ return nullptr;
+ }
+
+ if (DIEllipseStyle::kStroke == params.fStyle &&
+ (params.fInnerXRadius <= 0 || params.fInnerYRadius <= 0)) {
+ params.fStyle = DIEllipseStyle::kFill;
+ }
+ return Helper::FactoryHelper<DIEllipseOp>(context, std::move(paint), params, viewMatrix);
+ }
+
+ DIEllipseOp(Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const DeviceSpaceParams& params, const SkMatrix& viewMatrix)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage)
+ , fUseScale(false) {
+ // This expands the outer rect so that after CTM we end up with a half-pixel border
+ SkScalar a = viewMatrix[SkMatrix::kMScaleX];
+ SkScalar b = viewMatrix[SkMatrix::kMSkewX];
+ SkScalar c = viewMatrix[SkMatrix::kMSkewY];
+ SkScalar d = viewMatrix[SkMatrix::kMScaleY];
+ SkScalar geoDx = SK_ScalarHalf / SkScalarSqrt(a * a + c * c);
+ SkScalar geoDy = SK_ScalarHalf / SkScalarSqrt(b * b + d * d);
+
+ fEllipses.emplace_back(
+ Ellipse{viewMatrix, color, params.fXRadius, params.fYRadius, params.fInnerXRadius,
+ params.fInnerYRadius, geoDx, geoDy, params.fStyle,
+ SkRect::MakeLTRB(params.fCenter.fX - params.fXRadius - geoDx,
+ params.fCenter.fY - params.fYRadius - geoDy,
+ params.fCenter.fX + params.fXRadius + geoDx,
+ params.fCenter.fY + params.fYRadius + geoDy)});
+ this->setTransformedBounds(fEllipses[0].fBounds, viewMatrix, HasAABloat::kYes,
+ IsHairline::kNo);
+ }
+
+ const char* name() const override { return "DIEllipseOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (const auto& geo : fEllipses) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], XRad: %.2f, "
+ "YRad: %.2f, InnerXRad: %.2f, InnerYRad: %.2f, GeoDX: %.2f, "
+ "GeoDY: %.2f\n",
+ geo.fColor.toBytes_RGBA(), geo.fBounds.fLeft, geo.fBounds.fTop,
+ geo.fBounds.fRight, geo.fBounds.fBottom, geo.fXRadius, geo.fYRadius,
+ geo.fInnerXRadius, geo.fInnerYRadius, geo.fGeoDx, geo.fGeoDy);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ fUseScale = !caps.shaderCaps()->floatIs32Bits() &&
+ !caps.shaderCaps()->hasLowFragmentPrecision();
+ SkPMColor4f* color = &fEllipses.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(
+ new DIEllipseGeometryProcessor(fWideColor, fUseScale, this->viewMatrix(),
+ this->style()));
+
+ QuadHelper helper(target, gp->vertexStride(), fEllipses.count());
+ GrVertexWriter verts{helper.vertices()};
+ if (!verts.fPtr) {
+ return;
+ }
+
+ for (const auto& ellipse : fEllipses) {
+ GrVertexColor color(ellipse.fColor, fWideColor);
+ SkScalar xRadius = ellipse.fXRadius;
+ SkScalar yRadius = ellipse.fYRadius;
+
+ // This adjusts the "radius" to include the half-pixel border
+ SkScalar offsetDx = ellipse.fGeoDx / xRadius;
+ SkScalar offsetDy = ellipse.fGeoDy / yRadius;
+
+ // By default, constructed so that inner offset is (0, 0) for all points
+ SkScalar innerRatioX = -offsetDx;
+ SkScalar innerRatioY = -offsetDy;
+
+ // ... unless we're stroked
+ if (DIEllipseStyle::kStroke == this->style()) {
+ innerRatioX = xRadius / ellipse.fInnerXRadius;
+ innerRatioY = yRadius / ellipse.fInnerYRadius;
+ }
+
+ verts.writeQuad(GrVertexWriter::TriStripFromRect(ellipse.fBounds),
+ color,
+ origin_centered_tri_strip(1.0f + offsetDx, 1.0f + offsetDy),
+ GrVertexWriter::If(fUseScale, SkTMax(xRadius, yRadius)),
+ origin_centered_tri_strip(innerRatioX + offsetDx,
+ innerRatioY + offsetDy));
+ }
+ helper.recordDraw(target, std::move(gp));
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ DIEllipseOp* that = t->cast<DIEllipseOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->style() != that->style()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // TODO rewrite to allow positioning on CPU
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fEllipses.push_back_n(that->fEllipses.count(), that->fEllipses.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ const SkMatrix& viewMatrix() const { return fEllipses[0].fViewMatrix; }
+ DIEllipseStyle style() const { return fEllipses[0].fStyle; }
+
+ struct Ellipse {
+ SkMatrix fViewMatrix;
+ SkPMColor4f fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkScalar fGeoDx;
+ SkScalar fGeoDy;
+ DIEllipseStyle fStyle;
+ SkRect fBounds;
+ };
+
+ Helper fHelper;
+ bool fWideColor;
+ bool fUseScale;
+ SkSTArray<1, Ellipse, true> fEllipses;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// We have three possible cases for geometry for a roundrect.
+//
+// In the case of a normal fill or a stroke, we draw the roundrect as a 9-patch:
+// ____________
+// |_|________|_|
+// | | | |
+// | | | |
+// | | | |
+// |_|________|_|
+// |_|________|_|
+//
+// For strokes, we don't draw the center quad.
+//
+// For circular roundrects, in the case where the stroke width is greater than twice
+// the corner radius (overstroke), we add additional geometry to mark out the rectangle
+// in the center. The shared vertices are duplicated so we can set a different outer radius
+// for the fill calculation.
+// ____________
+// |_|________|_|
+// | |\ ____ /| |
+// | | | | | |
+// | | |____| | |
+// |_|/______\|_|
+// |_|________|_|
+//
+// We don't draw the center quad from the fill rect in this case.
+//
+// For filled rrects that need to provide a distance vector we resuse the overstroke
+// geometry but make the inner rect degenerate (either a point or a horizontal or
+// vertical line).
+
+static const uint16_t gOverstrokeRRectIndices[] = {
+ // clang-format off
+ // overstroke quads
+ // we place this at the beginning so that we can skip these indices when rendering normally
+ 16, 17, 19, 16, 19, 18,
+ 19, 17, 23, 19, 23, 21,
+ 21, 23, 22, 21, 22, 20,
+ 22, 16, 18, 22, 18, 20,
+
+ // corners
+ 0, 1, 5, 0, 5, 4,
+ 2, 3, 7, 2, 7, 6,
+ 8, 9, 13, 8, 13, 12,
+ 10, 11, 15, 10, 15, 14,
+
+ // edges
+ 1, 2, 6, 1, 6, 5,
+ 4, 5, 9, 4, 9, 8,
+ 6, 7, 11, 6, 11, 10,
+ 9, 10, 14, 9, 14, 13,
+
+ // center
+ // we place this at the end so that we can ignore these indices when not rendering as filled
+ 5, 6, 10, 5, 10, 9,
+ // clang-format on
+};
+
+// fill and standard stroke indices skip the overstroke "ring"
+static const uint16_t* gStandardRRectIndices = gOverstrokeRRectIndices + 6 * 4;
+
+// overstroke count is arraysize minus the center indices
+static const int kIndicesPerOverstrokeRRect = SK_ARRAY_COUNT(gOverstrokeRRectIndices) - 6;
+// fill count skips overstroke indices and includes center
+static const int kIndicesPerFillRRect = kIndicesPerOverstrokeRRect - 6 * 4 + 6;
+// stroke count is fill count minus center indices
+static const int kIndicesPerStrokeRRect = kIndicesPerFillRRect - 6;
+static const int kVertsPerStandardRRect = 16;
+static const int kVertsPerOverstrokeRRect = 24;
+
+enum RRectType {
+ kFill_RRectType,
+ kStroke_RRectType,
+ kOverstroke_RRectType,
+};
+
+static int rrect_type_to_vert_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ case kStroke_RRectType:
+ return kVertsPerStandardRRect;
+ case kOverstroke_RRectType:
+ return kVertsPerOverstrokeRRect;
+ }
+ SK_ABORT("Invalid type");
+}
+
+static int rrect_type_to_index_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kIndicesPerFillRRect;
+ case kStroke_RRectType:
+ return kIndicesPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kIndicesPerOverstrokeRRect;
+ }
+ SK_ABORT("Invalid type");
+}
+
+static const uint16_t* rrect_type_to_indices(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ case kStroke_RRectType:
+ return gStandardRRectIndices;
+ case kOverstroke_RRectType:
+ return gOverstrokeRRectIndices;
+ }
+ SK_ABORT("Invalid type");
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// For distance computations in the interior of filled rrects we:
+//
+// add a interior degenerate (point or line) rect
+// each vertex of that rect gets -outerRad as its radius
+// this makes the computation of the distance to the outer edge be negative
+// negative values are caught and then handled differently in the GP's onEmitCode
+// each vertex is also given the normalized x & y distance from the interior rect's edge
+// the GP takes the min of those depths +1 to get the normalized distance to the outer edge
+
+class CircularRRectOp : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ // A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
+ // whether the rrect is only stroked or stroked and filled.
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& devRect,
+ float devRadius,
+ float devStrokeWidth,
+ bool strokeOnly) {
+ return Helper::FactoryHelper<CircularRRectOp>(context, std::move(paint), viewMatrix,
+ devRect, devRadius,
+ devStrokeWidth, strokeOnly);
+ }
+ CircularRRectOp(Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkRect& devRect, float devRadius,
+ float devStrokeWidth, bool strokeOnly)
+ : INHERITED(ClassID())
+ , fViewMatrixIfUsingLocalCoords(viewMatrix)
+ , fHelper(helperArgs, GrAAType::kCoverage) {
+ SkRect bounds = devRect;
+ SkASSERT(!(devStrokeWidth <= 0 && strokeOnly));
+ SkScalar innerRadius = 0.0f;
+ SkScalar outerRadius = devRadius;
+ SkScalar halfWidth = 0;
+ RRectType type = kFill_RRectType;
+ if (devStrokeWidth > 0) {
+ if (SkScalarNearlyZero(devStrokeWidth)) {
+ halfWidth = SK_ScalarHalf;
+ } else {
+ halfWidth = SkScalarHalf(devStrokeWidth);
+ }
+
+ if (strokeOnly) {
+ // Outset stroke by 1/4 pixel
+ devStrokeWidth += 0.25f;
+ // If stroke is greater than width or height, this is still a fill
+ // Otherwise we compute stroke params
+ if (devStrokeWidth <= devRect.width() && devStrokeWidth <= devRect.height()) {
+ innerRadius = devRadius - halfWidth;
+ type = (innerRadius >= 0) ? kStroke_RRectType : kOverstroke_RRectType;
+ }
+ }
+ outerRadius += halfWidth;
+ bounds.outset(halfWidth, halfWidth);
+ }
+
+ // The radii are outset for two reasons. First, it allows the shader to simply perform
+ // simpler computation because the computed alpha is zero, rather than 50%, at the radius.
+ // Second, the outer radius is used to compute the verts of the bounding box that is
+ // rendered and the outset ensures the box will cover all partially covered by the rrect
+ // corners.
+ outerRadius += SK_ScalarHalf;
+ innerRadius -= SK_ScalarHalf;
+
+ this->setBounds(bounds, HasAABloat::kYes, IsHairline::kNo);
+
+ // Expand the rect for aa to generate correct vertices.
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ fRRects.emplace_back(RRect{color, innerRadius, outerRadius, bounds, type});
+ fVertCount = rrect_type_to_vert_count(type);
+ fIndexCount = rrect_type_to_index_count(type);
+ fAllFill = (kFill_RRectType == type);
+ }
+
+ const char* name() const override { return "CircularRRectOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fRRects.count(); ++i) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "InnerRad: %.2f, OuterRad: %.2f\n",
+ fRRects[i].fColor.toBytes_RGBA(), fRRects[i].fDevBounds.fLeft,
+ fRRects[i].fDevBounds.fTop, fRRects[i].fDevBounds.fRight,
+ fRRects[i].fDevBounds.fBottom, fRRects[i].fInnerRadius,
+ fRRects[i].fOuterRadius);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ SkPMColor4f* color = &fRRects.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ static void FillInOverstrokeVerts(GrVertexWriter& verts, const SkRect& bounds, SkScalar smInset,
+ SkScalar bigInset, SkScalar xOffset, SkScalar outerRadius,
+ SkScalar innerRadius, const GrVertexColor& color) {
+ SkASSERT(smInset < bigInset);
+
+ // TL
+ verts.write(bounds.fLeft + smInset, bounds.fTop + smInset,
+ color,
+ xOffset, 0.0f,
+ outerRadius, innerRadius);
+
+ // TR
+ verts.write(bounds.fRight - smInset, bounds.fTop + smInset,
+ color,
+ xOffset, 0.0f,
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fLeft + bigInset, bounds.fTop + bigInset,
+ color,
+ 0.0f, 0.0f,
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fRight - bigInset, bounds.fTop + bigInset,
+ color,
+ 0.0f, 0.0f,
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fLeft + bigInset, bounds.fBottom - bigInset,
+ color,
+ 0.0f, 0.0f,
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fRight - bigInset, bounds.fBottom - bigInset,
+ color,
+ 0.0f, 0.0f,
+ outerRadius, innerRadius);
+
+ // BL
+ verts.write(bounds.fLeft + smInset, bounds.fBottom - smInset,
+ color,
+ xOffset, 0.0f,
+ outerRadius, innerRadius);
+
+ // BR
+ verts.write(bounds.fRight - smInset, bounds.fBottom - smInset,
+ color,
+ xOffset, 0.0f,
+ outerRadius, innerRadius);
+ }
+
+ void onPrepareDraws(Target* target) override {
+ // Invert the view matrix as a local matrix (if any other processors require coords).
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(
+ new CircleGeometryProcessor(!fAllFill, false, false, false, false, fWideColor,
+ localMatrix));
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+
+ GrVertexWriter verts{target->makeVertexSpace(gp->vertexStride(), fVertCount,
+ &vertexBuffer, &firstVertex)};
+ if (!verts.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex = 0;
+ uint16_t* indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ int currStartVertex = 0;
+ for (const auto& rrect : fRRects) {
+ GrVertexColor color(rrect.fColor, fWideColor);
+ SkScalar outerRadius = rrect.fOuterRadius;
+ const SkRect& bounds = rrect.fDevBounds;
+
+ SkScalar yCoords[4] = {bounds.fTop, bounds.fTop + outerRadius,
+ bounds.fBottom - outerRadius, bounds.fBottom};
+
+ SkScalar yOuterRadii[4] = {-1, 0, 0, 1};
+ // The inner radius in the vertex data must be specified in normalized space.
+ // For fills, specifying -1/outerRadius guarantees an alpha of 1.0 at the inner radius.
+ SkScalar innerRadius = rrect.fType != kFill_RRectType
+ ? rrect.fInnerRadius / rrect.fOuterRadius
+ : -1.0f / rrect.fOuterRadius;
+ for (int i = 0; i < 4; ++i) {
+ verts.write(bounds.fLeft, yCoords[i],
+ color,
+ -1.0f, yOuterRadii[i],
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fLeft + outerRadius, yCoords[i],
+ color,
+ 0.0f, yOuterRadii[i],
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fRight - outerRadius, yCoords[i],
+ color,
+ 0.0f, yOuterRadii[i],
+ outerRadius, innerRadius);
+
+ verts.write(bounds.fRight, yCoords[i],
+ color,
+ 1.0f, yOuterRadii[i],
+ outerRadius, innerRadius);
+ }
+ // Add the additional vertices for overstroked rrects.
+ // Effectively this is an additional stroked rrect, with its
+ // outer radius = outerRadius - innerRadius, and inner radius = 0.
+ // This will give us correct AA in the center and the correct
+ // distance to the outer edge.
+ //
+ // Also, the outer offset is a constant vector pointing to the right, which
+ // guarantees that the distance value along the outer rectangle is constant.
+ if (kOverstroke_RRectType == rrect.fType) {
+ SkASSERT(rrect.fInnerRadius <= 0.0f);
+
+ SkScalar overstrokeOuterRadius = outerRadius - rrect.fInnerRadius;
+ // this is the normalized distance from the outer rectangle of this
+ // geometry to the outer edge
+ SkScalar maxOffset = -rrect.fInnerRadius / overstrokeOuterRadius;
+
+ FillInOverstrokeVerts(verts, bounds, outerRadius, overstrokeOuterRadius, maxOffset,
+ overstrokeOuterRadius, 0.0f, color);
+ }
+
+ const uint16_t* primIndices = rrect_type_to_indices(rrect.fType);
+ const int primIndexCount = rrect_type_to_index_count(rrect.fType);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += rrect_type_to_vert_count(rrect.fType);
+ }
+
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexed(std::move(indexBuffer), fIndexCount, firstIndex, 0, fVertCount - 1,
+ GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ CircularRRectOp* that = t->cast<CircularRRectOp>();
+
+ // can only represent 65535 unique vertices with 16-bit indices
+ if (fVertCount + that->fVertCount > 65536) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() &&
+ !fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fRRects.push_back_n(that->fRRects.count(), that->fRRects.begin());
+ fVertCount += that->fVertCount;
+ fIndexCount += that->fIndexCount;
+ fAllFill = fAllFill && that->fAllFill;
+ fWideColor = fWideColor || that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct RRect {
+ SkPMColor4f fColor;
+ SkScalar fInnerRadius;
+ SkScalar fOuterRadius;
+ SkRect fDevBounds;
+ RRectType fType;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ Helper fHelper;
+ int fVertCount;
+ int fIndexCount;
+ bool fAllFill;
+ bool fWideColor;
+ SkSTArray<1, RRect, true> fRRects;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+static const int kNumRRectsInIndexBuffer = 256;
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
+GR_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
+static sk_sp<const GrBuffer> get_rrect_index_buffer(RRectType type,
+ GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
+ switch (type) {
+ case kFill_RRectType:
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ gStandardRRectIndices, kIndicesPerFillRRect, kNumRRectsInIndexBuffer,
+ kVertsPerStandardRRect, gRRectOnlyIndexBufferKey);
+ case kStroke_RRectType:
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ gStandardRRectIndices, kIndicesPerStrokeRRect, kNumRRectsInIndexBuffer,
+ kVertsPerStandardRRect, gStrokeRRectOnlyIndexBufferKey);
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
+
+class EllipticalRRectOp : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ // If devStrokeWidths values are <= 0 indicates then fill only. Otherwise, strokeOnly indicates
+ // whether the rrect is only stroked or stroked and filled.
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& devRect,
+ float devXRadius,
+ float devYRadius,
+ SkVector devStrokeWidths,
+ bool strokeOnly) {
+ SkASSERT(devXRadius >= 0.5);
+ SkASSERT(devYRadius >= 0.5);
+ SkASSERT((devStrokeWidths.fX > 0) == (devStrokeWidths.fY > 0));
+ SkASSERT(!(strokeOnly && devStrokeWidths.fX <= 0));
+ if (devStrokeWidths.fX > 0) {
+ if (SkScalarNearlyZero(devStrokeWidths.length())) {
+ devStrokeWidths.set(SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ devStrokeWidths.scale(SK_ScalarHalf);
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (devStrokeWidths.length() > SK_ScalarHalf &&
+ (SK_ScalarHalf * devXRadius > devYRadius ||
+ SK_ScalarHalf * devYRadius > devXRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (devStrokeWidths.fX * (devYRadius * devYRadius) <
+ (devStrokeWidths.fY * devStrokeWidths.fY) * devXRadius) {
+ return nullptr;
+ }
+ if (devStrokeWidths.fY * (devXRadius * devXRadius) <
+ (devStrokeWidths.fX * devStrokeWidths.fX) * devYRadius) {
+ return nullptr;
+ }
+ }
+ return Helper::FactoryHelper<EllipticalRRectOp>(context, std::move(paint),
+ viewMatrix, devRect,
+ devXRadius, devYRadius, devStrokeWidths,
+ strokeOnly);
+ }
+
+ EllipticalRRectOp(Helper::MakeArgs helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkRect& devRect, float devXRadius,
+ float devYRadius, SkVector devStrokeHalfWidths, bool strokeOnly)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage)
+ , fUseScale(false) {
+ SkScalar innerXRadius = 0.0f;
+ SkScalar innerYRadius = 0.0f;
+ SkRect bounds = devRect;
+ bool stroked = false;
+ if (devStrokeHalfWidths.fX > 0) {
+ // this is legit only if scale & translation (which should be the case at the moment)
+ if (strokeOnly) {
+ innerXRadius = devXRadius - devStrokeHalfWidths.fX;
+ innerYRadius = devYRadius - devStrokeHalfWidths.fY;
+ stroked = (innerXRadius >= 0 && innerYRadius >= 0);
+ }
+
+ devXRadius += devStrokeHalfWidths.fX;
+ devYRadius += devStrokeHalfWidths.fY;
+ bounds.outset(devStrokeHalfWidths.fX, devStrokeHalfWidths.fY);
+ }
+
+ fStroked = stroked;
+ fViewMatrixIfUsingLocalCoords = viewMatrix;
+ this->setBounds(bounds, HasAABloat::kYes, IsHairline::kNo);
+ // Expand the rect for aa in order to generate the correct vertices.
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+ fRRects.emplace_back(
+ RRect{color, devXRadius, devYRadius, innerXRadius, innerYRadius, bounds});
+ }
+
+ const char* name() const override { return "EllipticalRRectOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Stroked: %d\n", fStroked);
+ for (const auto& geo : fRRects) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], "
+ "XRad: %.2f, YRad: %.2f, InnerXRad: %.2f, InnerYRad: %.2f\n",
+ geo.fColor.toBytes_RGBA(), geo.fDevBounds.fLeft, geo.fDevBounds.fTop,
+ geo.fDevBounds.fRight, geo.fDevBounds.fBottom, geo.fXRadius, geo.fYRadius,
+ geo.fInnerXRadius, geo.fInnerYRadius);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ fUseScale = !caps.shaderCaps()->floatIs32Bits();
+ SkPMColor4f* color = &fRRects.front().fColor;
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, color,
+ &fWideColor);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(new EllipseGeometryProcessor(fStroked, fWideColor, fUseScale,
+ localMatrix));
+
+ // drop out the middle quad if we're stroked
+ int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerFillRRect;
+ sk_sp<const GrBuffer> indexBuffer = get_rrect_index_buffer(
+ fStroked ? kStroke_RRectType : kFill_RRectType, target->resourceProvider());
+
+ if (!indexBuffer) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ PatternHelper helper(target, GrPrimitiveType::kTriangles, gp->vertexStride(),
+ std::move(indexBuffer), kVertsPerStandardRRect, indicesPerInstance,
+ fRRects.count());
+ GrVertexWriter verts{helper.vertices()};
+ if (!verts.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (const auto& rrect : fRRects) {
+ GrVertexColor color(rrect.fColor, fWideColor);
+ // Compute the reciprocals of the radii here to save time in the shader
+ float reciprocalRadii[4] = {
+ SkScalarInvert(rrect.fXRadius),
+ SkScalarInvert(rrect.fYRadius),
+ SkScalarInvert(rrect.fInnerXRadius),
+ SkScalarInvert(rrect.fInnerYRadius)
+ };
+
+ // Extend the radii out half a pixel to antialias.
+ SkScalar xOuterRadius = rrect.fXRadius + SK_ScalarHalf;
+ SkScalar yOuterRadius = rrect.fYRadius + SK_ScalarHalf;
+
+ SkScalar xMaxOffset = xOuterRadius;
+ SkScalar yMaxOffset = yOuterRadius;
+ if (!fStroked) {
+ // For filled rrects we map a unit circle in the vertex attributes rather than
+ // computing an ellipse and modifying that distance, so we normalize to 1.
+ xMaxOffset /= rrect.fXRadius;
+ yMaxOffset /= rrect.fYRadius;
+ }
+
+ const SkRect& bounds = rrect.fDevBounds;
+
+ SkScalar yCoords[4] = {bounds.fTop, bounds.fTop + yOuterRadius,
+ bounds.fBottom - yOuterRadius, bounds.fBottom};
+ SkScalar yOuterOffsets[4] = {yMaxOffset,
+ SK_ScalarNearlyZero, // we're using inversesqrt() in
+ // shader, so can't be exactly 0
+ SK_ScalarNearlyZero, yMaxOffset};
+
+ auto maybeScale = GrVertexWriter::If(fUseScale, SkTMax(rrect.fXRadius, rrect.fYRadius));
+ for (int i = 0; i < 4; ++i) {
+ verts.write(bounds.fLeft, yCoords[i],
+ color,
+ xMaxOffset, yOuterOffsets[i],
+ maybeScale,
+ reciprocalRadii);
+
+ verts.write(bounds.fLeft + xOuterRadius, yCoords[i],
+ color,
+ SK_ScalarNearlyZero, yOuterOffsets[i],
+ maybeScale,
+ reciprocalRadii);
+
+ verts.write(bounds.fRight - xOuterRadius, yCoords[i],
+ color,
+ SK_ScalarNearlyZero, yOuterOffsets[i],
+ maybeScale,
+ reciprocalRadii);
+
+ verts.write(bounds.fRight, yCoords[i],
+ color,
+ xMaxOffset, yOuterOffsets[i],
+ maybeScale,
+ reciprocalRadii);
+ }
+ }
+ helper.recordDraw(target, std::move(gp));
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ EllipticalRRectOp* that = t->cast<EllipticalRRectOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fStroked != that->fStroked) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fHelper.usesLocalCoords() &&
+ !fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fRRects.push_back_n(that->fRRects.count(), that->fRRects.begin());
+ fWideColor = fWideColor || that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct RRect {
+ SkPMColor4f fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkRect fDevBounds;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ Helper fHelper;
+ bool fStroked;
+ bool fWideColor;
+ bool fUseScale;
+ SkSTArray<1, RRect, true> fRRects;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+std::unique_ptr<GrDrawOp> GrOvalOpFactory::MakeCircularRRectOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps) {
+ SkASSERT(viewMatrix.rectStaysRect());
+ SkASSERT(viewMatrix.isSimilarity());
+ SkASSERT(rrect.isSimple());
+ SkASSERT(!rrect.isOval());
+ SkASSERT(SkRRectPriv::GetSimpleRadii(rrect).fX == SkRRectPriv::GetSimpleRadii(rrect).fY);
+
+ // RRect ops only handle simple, but not too simple, rrects.
+ // Do any matrix crunching before we reset the draw state for device coords.
+ const SkRect& rrectBounds = rrect.getBounds();
+ SkRect bounds;
+ viewMatrix.mapRect(&bounds, rrectBounds);
+
+ SkScalar radius = SkRRectPriv::GetSimpleRadii(rrect).fX;
+ SkScalar scaledRadius = SkScalarAbs(radius * (viewMatrix[SkMatrix::kMScaleX] +
+ viewMatrix[SkMatrix::kMSkewY]));
+
+ // Do mapping of stroke. Use -1 to indicate fill-only draws.
+ SkScalar scaledStroke = -1;
+ SkScalar strokeWidth = stroke.getWidth();
+ SkStrokeRec::Style style = stroke.getStyle();
+
+ bool isStrokeOnly =
+ SkStrokeRec::kStroke_Style == style || SkStrokeRec::kHairline_Style == style;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == style;
+
+ if (hasStroke) {
+ if (SkStrokeRec::kHairline_Style == style) {
+ scaledStroke = SK_Scalar1;
+ } else {
+ scaledStroke = SkScalarAbs(strokeWidth * (viewMatrix[SkMatrix::kMScaleX] + viewMatrix[SkMatrix::kMSkewY]));
+ }
+ }
+
+ // The way the effect interpolates the offset-to-ellipse/circle-center attribute only works on
+ // the interior of the rrect if the radii are >= 0.5. Otherwise, the inner rect of the nine-
+ // patch will have fractional coverage. This only matters when the interior is actually filled.
+ // We could consider falling back to rect rendering here, since a tiny radius is
+ // indistinguishable from a square corner.
+ if (!isStrokeOnly && SK_ScalarHalf > scaledRadius) {
+ return nullptr;
+ }
+
+ return CircularRRectOp::Make(context, std::move(paint), viewMatrix, bounds, scaledRadius,
+ scaledStroke, isStrokeOnly);
+}
+
+static std::unique_ptr<GrDrawOp> make_rrect_op(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke) {
+ SkASSERT(viewMatrix.rectStaysRect());
+ SkASSERT(rrect.isSimple());
+ SkASSERT(!rrect.isOval());
+
+ // RRect ops only handle simple, but not too simple, rrects.
+ // Do any matrix crunching before we reset the draw state for device coords.
+ const SkRect& rrectBounds = rrect.getBounds();
+ SkRect bounds;
+ viewMatrix.mapRect(&bounds, rrectBounds);
+
+ SkVector radii = SkRRectPriv::GetSimpleRadii(rrect);
+ SkScalar xRadius = SkScalarAbs(viewMatrix[SkMatrix::kMScaleX] * radii.fX +
+ viewMatrix[SkMatrix::kMSkewY] * radii.fY);
+ SkScalar yRadius = SkScalarAbs(viewMatrix[SkMatrix::kMSkewX] * radii.fX +
+ viewMatrix[SkMatrix::kMScaleY] * radii.fY);
+
+ SkStrokeRec::Style style = stroke.getStyle();
+
+ // Do (potentially) anisotropic mapping of stroke. Use -1s to indicate fill-only draws.
+ SkVector scaledStroke = {-1, -1};
+ SkScalar strokeWidth = stroke.getWidth();
+
+ bool isStrokeOnly =
+ SkStrokeRec::kStroke_Style == style || SkStrokeRec::kHairline_Style == style;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == style;
+
+ if (hasStroke) {
+ if (SkStrokeRec::kHairline_Style == style) {
+ scaledStroke.set(1, 1);
+ } else {
+ scaledStroke.fX = SkScalarAbs(
+ strokeWidth * (viewMatrix[SkMatrix::kMScaleX] + viewMatrix[SkMatrix::kMSkewY]));
+ scaledStroke.fY = SkScalarAbs(
+ strokeWidth * (viewMatrix[SkMatrix::kMSkewX] + viewMatrix[SkMatrix::kMScaleY]));
+ }
+
+ // if half of strokewidth is greater than radius, we don't handle that right now
+ if ((SK_ScalarHalf * scaledStroke.fX > xRadius ||
+ SK_ScalarHalf * scaledStroke.fY > yRadius)) {
+ return nullptr;
+ }
+ }
+
+ // The matrix may have a rotation by an odd multiple of 90 degrees.
+ if (viewMatrix.getScaleX() == 0) {
+ std::swap(xRadius, yRadius);
+ std::swap(scaledStroke.fX, scaledStroke.fY);
+ }
+
+ // The way the effect interpolates the offset-to-ellipse/circle-center attribute only works on
+ // the interior of the rrect if the radii are >= 0.5. Otherwise, the inner rect of the nine-
+ // patch will have fractional coverage. This only matters when the interior is actually filled.
+ // We could consider falling back to rect rendering here, since a tiny radius is
+ // indistinguishable from a square corner.
+ if (!isStrokeOnly && (SK_ScalarHalf > xRadius || SK_ScalarHalf > yRadius)) {
+ return nullptr;
+ }
+
+ // if the corners are circles, use the circle renderer
+ return EllipticalRRectOp::Make(context, std::move(paint), viewMatrix, bounds,
+ xRadius, yRadius, scaledStroke, isStrokeOnly);
+}
+
+std::unique_ptr<GrDrawOp> GrOvalOpFactory::MakeRRectOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps) {
+ if (rrect.isOval()) {
+ return MakeOvalOp(context, std::move(paint), viewMatrix, rrect.getBounds(),
+ GrStyle(stroke, nullptr), shaderCaps);
+ }
+
+ if (!viewMatrix.rectStaysRect() || !rrect.isSimple()) {
+ return nullptr;
+ }
+
+ return make_rrect_op(context, std::move(paint), viewMatrix, rrect, stroke);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrDrawOp> GrOvalOpFactory::MakeCircleOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style,
+ const GrShaderCaps* shaderCaps) {
+ SkScalar width = oval.width();
+ SkASSERT(width > SK_ScalarNearlyZero && SkScalarNearlyEqual(width, oval.height()) &&
+ circle_stays_circle(viewMatrix));
+
+ auto r = width / 2.f;
+ SkPoint center = { oval.centerX(), oval.centerY() };
+ if (style.hasNonDashPathEffect()) {
+ return nullptr;
+ } else if (style.isDashed()) {
+ if (style.strokeRec().getCap() != SkPaint::kButt_Cap ||
+ style.dashIntervalCnt() != 2 || style.strokeRec().getWidth() >= width) {
+ return nullptr;
+ }
+ auto onInterval = style.dashIntervals()[0];
+ auto offInterval = style.dashIntervals()[1];
+ if (offInterval == 0) {
+ GrStyle strokeStyle(style.strokeRec(), nullptr);
+ return MakeOvalOp(context, std::move(paint), viewMatrix, oval,
+ strokeStyle, shaderCaps);
+ } else if (onInterval == 0) {
+ // There is nothing to draw but we have no way to indicate that here.
+ return nullptr;
+ }
+ auto angularOnInterval = onInterval / r;
+ auto angularOffInterval = offInterval / r;
+ auto phaseAngle = style.dashPhase() / r;
+ // Currently this function doesn't accept ovals with different start angles, though
+ // it could.
+ static const SkScalar kStartAngle = 0.f;
+ return ButtCapDashedCircleOp::Make(context, std::move(paint), viewMatrix, center, r,
+ style.strokeRec().getWidth(), kStartAngle,
+ angularOnInterval, angularOffInterval, phaseAngle);
+ }
+ return CircleOp::Make(context, std::move(paint), viewMatrix, center, r, style);
+}
+
+std::unique_ptr<GrDrawOp> GrOvalOpFactory::MakeOvalOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style,
+ const GrShaderCaps* shaderCaps) {
+ // we can draw circles
+ SkScalar width = oval.width();
+ if (width > SK_ScalarNearlyZero && SkScalarNearlyEqual(width, oval.height()) &&
+ circle_stays_circle(viewMatrix)) {
+ return MakeCircleOp(context, std::move(paint), viewMatrix, oval, style, shaderCaps);
+ }
+
+ if (style.pathEffect()) {
+ return nullptr;
+ }
+
+ // prefer the device space ellipse op for batchability
+ if (viewMatrix.rectStaysRect()) {
+ return EllipseOp::Make(context, std::move(paint), viewMatrix, oval, style.strokeRec());
+ }
+
+ // Otherwise, if we have shader derivative support, render as device-independent
+ if (shaderCaps->shaderDerivativeSupport()) {
+ SkScalar a = viewMatrix[SkMatrix::kMScaleX];
+ SkScalar b = viewMatrix[SkMatrix::kMSkewX];
+ SkScalar c = viewMatrix[SkMatrix::kMSkewY];
+ SkScalar d = viewMatrix[SkMatrix::kMScaleY];
+ // Check for near-degenerate matrix
+ if (a*a + c*c > SK_ScalarNearlyZero && b*b + d*d > SK_ScalarNearlyZero) {
+ return DIEllipseOp::Make(context, std::move(paint), viewMatrix, oval,
+ style.strokeRec());
+ }
+ }
+
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrDrawOp> GrOvalOpFactory::MakeArcOp(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const GrStyle& style,
+ const GrShaderCaps* shaderCaps) {
+ SkASSERT(!oval.isEmpty());
+ SkASSERT(sweepAngle);
+ SkScalar width = oval.width();
+ if (SkScalarAbs(sweepAngle) >= 360.f) {
+ return nullptr;
+ }
+ if (!SkScalarNearlyEqual(width, oval.height()) || !circle_stays_circle(viewMatrix)) {
+ return nullptr;
+ }
+ SkPoint center = {oval.centerX(), oval.centerY()};
+ CircleOp::ArcParams arcParams = {SkDegreesToRadians(startAngle), SkDegreesToRadians(sweepAngle),
+ useCenter};
+ return CircleOp::Make(context, std::move(paint), viewMatrix,
+ center, width / 2.f, style, &arcParams);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(CircleOp) {
+ do {
+ SkScalar rotate = random->nextSScalar1() * 360.f;
+ SkScalar translateX = random->nextSScalar1() * 1000.f;
+ SkScalar translateY = random->nextSScalar1() * 1000.f;
+ SkScalar scale;
+ do {
+ scale = random->nextSScalar1() * 100.f;
+ } while (scale == 0);
+ SkMatrix viewMatrix;
+ viewMatrix.setRotate(rotate);
+ viewMatrix.postTranslate(translateX, translateY);
+ viewMatrix.postScale(scale, scale);
+ SkRect circle = GrTest::TestSquare(random);
+ SkPoint center = {circle.centerX(), circle.centerY()};
+ SkScalar radius = circle.width() / 2.f;
+ SkStrokeRec stroke = GrTest::TestStrokeRec(random);
+ CircleOp::ArcParams arcParamsTmp;
+ const CircleOp::ArcParams* arcParams = nullptr;
+ if (random->nextBool()) {
+ arcParamsTmp.fStartAngleRadians = random->nextSScalar1() * SK_ScalarPI * 2;
+ arcParamsTmp.fSweepAngleRadians = random->nextSScalar1() * SK_ScalarPI * 2 - .01f;
+ arcParamsTmp.fUseCenter = random->nextBool();
+ arcParams = &arcParamsTmp;
+ }
+ std::unique_ptr<GrDrawOp> op = CircleOp::Make(context, std::move(paint), viewMatrix,
+ center, radius,
+ GrStyle(stroke, nullptr), arcParams);
+ if (op) {
+ return op;
+ }
+ assert_alive(paint);
+ } while (true);
+}
+
+GR_DRAW_OP_TEST_DEFINE(ButtCapDashedCircleOp) {
+ SkScalar rotate = random->nextSScalar1() * 360.f;
+ SkScalar translateX = random->nextSScalar1() * 1000.f;
+ SkScalar translateY = random->nextSScalar1() * 1000.f;
+ SkScalar scale;
+ do {
+ scale = random->nextSScalar1() * 100.f;
+ } while (scale == 0);
+ SkMatrix viewMatrix;
+ viewMatrix.setRotate(rotate);
+ viewMatrix.postTranslate(translateX, translateY);
+ viewMatrix.postScale(scale, scale);
+ SkRect circle = GrTest::TestSquare(random);
+ SkPoint center = {circle.centerX(), circle.centerY()};
+ SkScalar radius = circle.width() / 2.f;
+ SkScalar strokeWidth = random->nextRangeScalar(0.001f * radius, 1.8f * radius);
+ SkScalar onAngle = random->nextRangeScalar(0.01f, 1000.f);
+ SkScalar offAngle = random->nextRangeScalar(0.01f, 1000.f);
+ SkScalar startAngle = random->nextRangeScalar(-1000.f, 1000.f);
+ SkScalar phase = random->nextRangeScalar(-1000.f, 1000.f);
+ return ButtCapDashedCircleOp::Make(context, std::move(paint), viewMatrix,
+ center, radius, strokeWidth,
+ startAngle, onAngle, offAngle, phase);
+}
+
+GR_DRAW_OP_TEST_DEFINE(EllipseOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixRectStaysRect(random);
+ SkRect ellipse = GrTest::TestSquare(random);
+ return EllipseOp::Make(context, std::move(paint), viewMatrix, ellipse,
+ GrTest::TestStrokeRec(random));
+}
+
+GR_DRAW_OP_TEST_DEFINE(DIEllipseOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ SkRect ellipse = GrTest::TestSquare(random);
+ return DIEllipseOp::Make(context, std::move(paint), viewMatrix, ellipse,
+ GrTest::TestStrokeRec(random));
+}
+
+GR_DRAW_OP_TEST_DEFINE(CircularRRectOp) {
+ do {
+ SkScalar rotate = random->nextSScalar1() * 360.f;
+ SkScalar translateX = random->nextSScalar1() * 1000.f;
+ SkScalar translateY = random->nextSScalar1() * 1000.f;
+ SkScalar scale;
+ do {
+ scale = random->nextSScalar1() * 100.f;
+ } while (scale == 0);
+ SkMatrix viewMatrix;
+ viewMatrix.setRotate(rotate);
+ viewMatrix.postTranslate(translateX, translateY);
+ viewMatrix.postScale(scale, scale);
+ SkRect rect = GrTest::TestRect(random);
+ SkScalar radius = random->nextRangeF(0.1f, 10.f);
+ SkRRect rrect = SkRRect::MakeRectXY(rect, radius, radius);
+ if (rrect.isOval()) {
+ continue;
+ }
+ std::unique_ptr<GrDrawOp> op =
+ GrOvalOpFactory::MakeCircularRRectOp(context, std::move(paint), viewMatrix, rrect,
+ GrTest::TestStrokeRec(random), nullptr);
+ if (op) {
+ return op;
+ }
+ assert_alive(paint);
+ } while (true);
+}
+
+GR_DRAW_OP_TEST_DEFINE(RRectOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixRectStaysRect(random);
+ const SkRRect& rrect = GrTest::TestRRectSimple(random);
+ return make_rrect_op(context, std::move(paint), viewMatrix, rrect,
+ GrTest::TestStrokeRec(random));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.h b/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.h
new file mode 100644
index 0000000000..de25fa18d3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrOvalOpFactory.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOvalOpFactory_DEFINED
+#define GrOvalOpFactory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/gpu/GrColor.h"
+
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+class GrShaderCaps;
+class GrStyle;
+class SkMatrix;
+struct SkRect;
+class SkRRect;
+class SkStrokeRec;
+
+/*
+ * This namespace wraps helper functions that draw ovals, rrects, and arcs (filled & stroked)
+ */
+class GrOvalOpFactory {
+public:
+ static std::unique_ptr<GrDrawOp> MakeCircleOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix&,
+ const SkRect& oval,
+ const GrStyle& style,
+ const GrShaderCaps*);
+
+ static std::unique_ptr<GrDrawOp> MakeOvalOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix&,
+ const SkRect& oval,
+ const GrStyle& style,
+ const GrShaderCaps*);
+
+ static std::unique_ptr<GrDrawOp> MakeCircularRRectOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix&,
+ const SkRRect&,
+ const SkStrokeRec&,
+ const GrShaderCaps*);
+
+ static std::unique_ptr<GrDrawOp> MakeRRectOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix&,
+ const SkRRect&,
+ const SkStrokeRec&,
+ const GrShaderCaps*);
+
+ static std::unique_ptr<GrDrawOp> MakeArcOp(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix&,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle&,
+ const GrShaderCaps*);
+};
+
+#endif // GrOvalOpFactory_DEFINED
diff --git a/gfx/skia/skia/src/gpu/ops/GrPathStencilSettings.h b/gfx/skia/skia/src/gpu/ops/GrPathStencilSettings.h
new file mode 100644
index 0000000000..8d221736ab
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrPathStencilSettings.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathStencilSettings_DEFINED
+#define GrPathStencilSettings_DEFINED
+
+#include "src/gpu/GrUserStencilSettings.h"
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil rules for paths
+
+////// Even/Odd
+
+static constexpr GrUserStencilSettings gEOStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+// ok not to check clip b/c stencil pass only wrote inside clip
+static constexpr GrUserStencilSettings gEOColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+// have to check clip b/c outside clip will always be zero.
+static constexpr GrUserStencilSettings gInvEOColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+////// Winding
+
+static constexpr GrUserStencilSettings gWindStencilPass (
+ GrUserStencilSettings::StaticInitSeparate<
+ 0xffff, 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip, GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff, 0xffff,
+ GrUserStencilOp::kIncWrap, GrUserStencilOp::kDecWrap,
+ GrUserStencilOp::kKeep, GrUserStencilOp::kKeep,
+ 0xffff, 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gWindColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kLessIfInClip, // "0 < stencil" is equivalent to "0 != stencil".
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvWindColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+////// Normal render to stencil
+
+// Sometimes the default path renderer can draw a path directly to the stencil
+// buffer without having to first resolve the interior / exterior.
+static constexpr GrUserStencilSettings gDirectToStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kIncMaybeClamp,
+ 0xffff>()
+);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.cpp b/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.cpp
new file mode 100644
index 0000000000..27bd00eec0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.cpp
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrQuadPerEdgeAA.h"
+
+#include "include/private/SkNx.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLColorSpaceXformHelper.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLPrimitiveProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+#define AI SK_ALWAYS_INLINE
+
+namespace {
+
+// Helper data types since there is a lot of information that needs to be passed around to
+// avoid recalculation in the different procedures for tessellating an AA quad.
+
+using V4f = skvx::Vec<4, float>;
+using M4f = skvx::Vec<4, int32_t>;
+
+struct Vertices {
+ // X, Y, and W coordinates in device space. If not perspective, w should be set to 1.f
+ V4f fX, fY, fW;
+ // U, V, and R coordinates representing local quad. Ignored depending on uvrCount (0, 1, 2).
+ V4f fU, fV, fR;
+ int fUVRCount;
+};
+
+struct QuadMetadata {
+ // Normalized edge vectors of the device space quad, ordered L, B, T, R (i.e. nextCCW(x) - x).
+ V4f fDX, fDY;
+ // 1 / edge length of the device space quad
+ V4f fInvLengths;
+ // Edge mask (set to all 1s if aa flags is kAll), otherwise 1.f if edge was AA, 0.f if non-AA.
+ V4f fMask;
+};
+
+struct Edges {
+ // a * x + b * y + c = 0; positive distance is inside the quad; ordered LBTR.
+ V4f fA, fB, fC;
+ // Whether or not the edge normals had to be flipped to preserve positive distance on the inside
+ bool fFlipped;
+};
+
+static constexpr float kTolerance = 1e-2f;
+// True/false bit masks for initializing an M4f
+static constexpr int32_t kTrue = ~0;
+static constexpr int32_t kFalse = 0;
+
+static AI V4f fma(const V4f& f, const V4f& m, const V4f& a) {
+ return mad(f, m, a);
+}
+
+// These rotate the points/edge values either clockwise or counterclockwise assuming tri strip
+// order.
+static AI V4f nextCW(const V4f& v) {
+ return skvx::shuffle<2, 0, 3, 1>(v);
+}
+
+static AI V4f nextCCW(const V4f& v) {
+ return skvx::shuffle<1, 3, 0, 2>(v);
+}
+
+// Replaces zero-length 'bad' edge vectors with the reversed opposite edge vector.
+// e3 may be null if only 2D edges need to be corrected for.
+static AI void correct_bad_edges(const M4f& bad, V4f* e1, V4f* e2, V4f* e3) {
+ if (any(bad)) {
+ // Want opposite edges, L B T R -> R T B L but with flipped sign to preserve winding
+ *e1 = if_then_else(bad, -skvx::shuffle<3, 2, 1, 0>(*e1), *e1);
+ *e2 = if_then_else(bad, -skvx::shuffle<3, 2, 1, 0>(*e2), *e2);
+ if (e3) {
+ *e3 = if_then_else(bad, -skvx::shuffle<3, 2, 1, 0>(*e3), *e3);
+ }
+ }
+}
+
+// Replace 'bad' coordinates by rotating CCW to get the next point. c3 may be null for 2D points.
+static AI void correct_bad_coords(const M4f& bad, V4f* c1, V4f* c2, V4f* c3) {
+ if (any(bad)) {
+ *c1 = if_then_else(bad, nextCCW(*c1), *c1);
+ *c2 = if_then_else(bad, nextCCW(*c2), *c2);
+ if (c3) {
+ *c3 = if_then_else(bad, nextCCW(*c3), *c3);
+ }
+ }
+}
+
+static AI QuadMetadata get_metadata(const Vertices& vertices, GrQuadAAFlags aaFlags) {
+ V4f dx = nextCCW(vertices.fX) - vertices.fX;
+ V4f dy = nextCCW(vertices.fY) - vertices.fY;
+ V4f invLengths = rsqrt(fma(dx, dx, dy * dy));
+
+ V4f mask = aaFlags == GrQuadAAFlags::kAll ? V4f(1.f) :
+ V4f{(GrQuadAAFlags::kLeft & aaFlags) ? 1.f : 0.f,
+ (GrQuadAAFlags::kBottom & aaFlags) ? 1.f : 0.f,
+ (GrQuadAAFlags::kTop & aaFlags) ? 1.f : 0.f,
+ (GrQuadAAFlags::kRight & aaFlags) ? 1.f : 0.f};
+ return { dx * invLengths, dy * invLengths, invLengths, mask };
+}
+
+static AI Edges get_edge_equations(const QuadMetadata& metadata, const Vertices& vertices) {
+ V4f dx = metadata.fDX;
+ V4f dy = metadata.fDY;
+ // Correct for bad edges by copying adjacent edge information into the bad component
+ correct_bad_edges(metadata.fInvLengths >= 1.f / kTolerance, &dx, &dy, nullptr);
+
+ V4f c = fma(dx, vertices.fY, -dy * vertices.fX);
+ // Make sure normals point into the shape
+ V4f test = fma(dy, nextCW(vertices.fX), fma(-dx, nextCW(vertices.fY), c));
+ if (any(test < -kTolerance)) {
+ return {-dy, dx, -c, true};
+ } else {
+ return {dy, -dx, c, false};
+ }
+}
+
+// Sets 'outset' to the magnitude of outset/inset to adjust each corner of a quad given the
+// edge angles and lengths. If the quad is too small, has empty edges, or too sharp of angles,
+// false is returned and the degenerate slow-path should be used.
+static bool get_optimized_outset(const QuadMetadata& metadata, bool rectilinear, V4f* outset) {
+ if (rectilinear) {
+ *outset = 0.5f;
+ // Stay in the fast path as long as all edges are at least a pixel long (so 1/len <= 1)
+ return all(metadata.fInvLengths <= 1.f);
+ }
+
+ if (any(metadata.fInvLengths >= 1.f / kTolerance)) {
+ // Have an empty edge from a degenerate quad, so there's no hope
+ return false;
+ }
+
+ // The distance the point needs to move is 1/2sin(theta), where theta is the angle between the
+ // two edges at that point. cos(theta) is equal to dot(dxy, nextCW(dxy))
+ V4f cosTheta = fma(metadata.fDX, nextCW(metadata.fDX), metadata.fDY * nextCW(metadata.fDY));
+ // If the angle is too shallow between edges, go through the degenerate path, otherwise adding
+ // and subtracting very large vectors in almost opposite directions leads to float errors
+ if (any(abs(cosTheta) >= 0.9f)) {
+ return false;
+ }
+ *outset = 0.5f * rsqrt(1.f - cosTheta * cosTheta); // 1/2sin(theta)
+
+ // When outsetting or insetting, the current edge's AA adds to the length:
+ // cos(pi - theta)/2sin(theta) + cos(pi-ccw(theta))/2sin(ccw(theta))
+ // Moving an adjacent edge updates the length by 1/2sin(theta|ccw(theta))
+ V4f halfTanTheta = -cosTheta * (*outset); // cos(pi - theta) = -cos(theta)
+ V4f edgeAdjust = metadata.fMask * (halfTanTheta + nextCCW(halfTanTheta)) +
+ nextCCW(metadata.fMask) * nextCCW(*outset) +
+ nextCW(metadata.fMask) * (*outset);
+ // If either outsetting (plus edgeAdjust) or insetting (minus edgeAdjust) make edgeLen negative
+ // then use the slow path
+ V4f threshold = 0.1f - (1.f / metadata.fInvLengths);
+ return all(edgeAdjust > threshold) && all(edgeAdjust < -threshold);
+}
+
+// Ignores the quad's fW, use outset_projected_vertices if it's known to need 3D.
+static AI void outset_vertices(const V4f& outset, const QuadMetadata& metadata, Vertices* quad) {
+ // The mask is rotated compared to the outsets and edge vectors, since if the edge is "on"
+ // both its points need to be moved along their other edge vectors.
+ auto maskedOutset = -outset * nextCW(metadata.fMask);
+ auto maskedOutsetCW = outset * metadata.fMask;
+ // x = x + outset * mask * nextCW(xdiff) - outset * nextCW(mask) * xdiff
+ quad->fX += fma(maskedOutsetCW, nextCW(metadata.fDX), maskedOutset * metadata.fDX);
+ quad->fY += fma(maskedOutsetCW, nextCW(metadata.fDY), maskedOutset * metadata.fDY);
+ if (quad->fUVRCount > 0) {
+ // We want to extend the texture coords by the same proportion as the positions.
+ maskedOutset *= metadata.fInvLengths;
+ maskedOutsetCW *= nextCW(metadata.fInvLengths);
+ V4f du = nextCCW(quad->fU) - quad->fU;
+ V4f dv = nextCCW(quad->fV) - quad->fV;
+ quad->fU += fma(maskedOutsetCW, nextCW(du), maskedOutset * du);
+ quad->fV += fma(maskedOutsetCW, nextCW(dv), maskedOutset * dv);
+ if (quad->fUVRCount == 3) {
+ V4f dr = nextCCW(quad->fR) - quad->fR;
+ quad->fR += fma(maskedOutsetCW, nextCW(dr), maskedOutset * dr);
+ }
+ }
+}
+
+// Updates (x,y,w) to be at (x2d,y2d) once projected. Updates (u,v,r) to match if provided.
+// Gracefully handles 2D content if *w holds all 1s.
+static void outset_projected_vertices(const V4f& x2d, const V4f& y2d,
+ GrQuadAAFlags aaFlags, Vertices* quad) {
+ // Left to right, in device space, for each point
+ V4f e1x = skvx::shuffle<2, 3, 2, 3>(quad->fX) - skvx::shuffle<0, 1, 0, 1>(quad->fX);
+ V4f e1y = skvx::shuffle<2, 3, 2, 3>(quad->fY) - skvx::shuffle<0, 1, 0, 1>(quad->fY);
+ V4f e1w = skvx::shuffle<2, 3, 2, 3>(quad->fW) - skvx::shuffle<0, 1, 0, 1>(quad->fW);
+ correct_bad_edges(fma(e1x, e1x, e1y * e1y) < kTolerance * kTolerance, &e1x, &e1y, &e1w);
+
+ // // Top to bottom, in device space, for each point
+ V4f e2x = skvx::shuffle<1, 1, 3, 3>(quad->fX) - skvx::shuffle<0, 0, 2, 2>(quad->fX);
+ V4f e2y = skvx::shuffle<1, 1, 3, 3>(quad->fY) - skvx::shuffle<0, 0, 2, 2>(quad->fY);
+ V4f e2w = skvx::shuffle<1, 1, 3, 3>(quad->fW) - skvx::shuffle<0, 0, 2, 2>(quad->fW);
+ correct_bad_edges(fma(e2x, e2x, e2y * e2y) < kTolerance * kTolerance, &e2x, &e2y, &e2w);
+
+ // Can only move along e1 and e2 to reach the new 2D point, so we have
+ // x2d = (x + a*e1x + b*e2x) / (w + a*e1w + b*e2w) and
+ // y2d = (y + a*e1y + b*e2y) / (w + a*e1w + b*e2w) for some a, b
+ // This can be rewritten to a*c1x + b*c2x + c3x = 0; a * c1y + b*c2y + c3y = 0, where
+ // the cNx and cNy coefficients are:
+ V4f c1x = e1w * x2d - e1x;
+ V4f c1y = e1w * y2d - e1y;
+ V4f c2x = e2w * x2d - e2x;
+ V4f c2y = e2w * y2d - e2y;
+ V4f c3x = quad->fW * x2d - quad->fX;
+ V4f c3y = quad->fW * y2d - quad->fY;
+
+ // Solve for a and b
+ V4f a, b, denom;
+ if (aaFlags == GrQuadAAFlags::kAll) {
+ // When every edge is outset/inset, each corner can use both edge vectors
+ denom = c1x * c2y - c2x * c1y;
+ a = (c2x * c3y - c3x * c2y) / denom;
+ b = (c3x * c1y - c1x * c3y) / denom;
+ } else {
+ // Force a or b to be 0 if that edge cannot be used due to non-AA
+ M4f aMask = M4f{(aaFlags & GrQuadAAFlags::kLeft) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kLeft) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kRight) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kRight) ? kTrue : kFalse};
+ M4f bMask = M4f{(aaFlags & GrQuadAAFlags::kTop) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kBottom) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kTop) ? kTrue : kFalse,
+ (aaFlags & GrQuadAAFlags::kBottom) ? kTrue : kFalse};
+
+ // When aMask[i]&bMask[i], then a[i], b[i], denom[i] match the kAll case.
+ // When aMask[i]&!bMask[i], then b[i] = 0, a[i] = -c3x/c1x or -c3y/c1y, using better denom
+ // When !aMask[i]&bMask[i], then a[i] = 0, b[i] = -c3x/c2x or -c3y/c2y, ""
+ // When !aMask[i]&!bMask[i], then both a[i] = 0 and b[i] = 0
+ M4f useC1x = abs(c1x) > abs(c1y);
+ M4f useC2x = abs(c2x) > abs(c2y);
+
+ denom = if_then_else(aMask,
+ if_then_else(bMask,
+ c1x * c2y - c2x * c1y, /* A & B */
+ if_then_else(useC1x, c1x, c1y)), /* A & !B */
+ if_then_else(bMask,
+ if_then_else(useC2x, c2x, c2y), /* !A & B */
+ V4f(1.f))); /* !A & !B */
+
+ a = if_then_else(aMask,
+ if_then_else(bMask,
+ c2x * c3y - c3x * c2y, /* A & B */
+ if_then_else(useC1x, -c3x, -c3y)), /* A & !B */
+ V4f(0.f)) / denom; /* !A */
+ b = if_then_else(bMask,
+ if_then_else(aMask,
+ c3x * c1y - c1x * c3y, /* A & B */
+ if_then_else(useC2x, -c3x, -c3y)), /* !A & B */
+ V4f(0.f)) / denom; /* !B */
+ }
+
+ V4f newW = quad->fW + a * e1w + b * e2w;
+ // If newW < 0, scale a and b such that the point reaches the infinity plane instead of crossing
+ // This breaks orthogonality of inset/outsets, but GPUs don't handle negative Ws well so this
+ // is far less visually disturbing (likely not noticeable since it's at extreme perspective).
+ // The alternative correction (multiply xyw by -1) has the disadvantage of changing how local
+ // coordinates would be interpolated.
+ static const float kMinW = 1e-6f;
+ if (any(newW < 0.f)) {
+ V4f scale = if_then_else(newW < kMinW, (kMinW - quad->fW) / (newW - quad->fW), V4f(1.f));
+ a *= scale;
+ b *= scale;
+ }
+
+ quad->fX += a * e1x + b * e2x;
+ quad->fY += a * e1y + b * e2y;
+ quad->fW += a * e1w + b * e2w;
+ correct_bad_coords(abs(denom) < kTolerance, &quad->fX, &quad->fY, &quad->fW);
+
+ if (quad->fUVRCount > 0) {
+ // Calculate R here so it can be corrected with U and V in case it's needed later
+ V4f e1u = skvx::shuffle<2, 3, 2, 3>(quad->fU) - skvx::shuffle<0, 1, 0, 1>(quad->fU);
+ V4f e1v = skvx::shuffle<2, 3, 2, 3>(quad->fV) - skvx::shuffle<0, 1, 0, 1>(quad->fV);
+ V4f e1r = skvx::shuffle<2, 3, 2, 3>(quad->fR) - skvx::shuffle<0, 1, 0, 1>(quad->fR);
+ correct_bad_edges(fma(e1u, e1u, e1v * e1v) < kTolerance * kTolerance, &e1u, &e1v, &e1r);
+
+ V4f e2u = skvx::shuffle<1, 1, 3, 3>(quad->fU) - skvx::shuffle<0, 0, 2, 2>(quad->fU);
+ V4f e2v = skvx::shuffle<1, 1, 3, 3>(quad->fV) - skvx::shuffle<0, 0, 2, 2>(quad->fV);
+ V4f e2r = skvx::shuffle<1, 1, 3, 3>(quad->fR) - skvx::shuffle<0, 0, 2, 2>(quad->fR);
+ correct_bad_edges(fma(e2u, e2u, e2v * e2v) < kTolerance * kTolerance, &e2u, &e2v, &e2r);
+
+ quad->fU += a * e1u + b * e2u;
+ quad->fV += a * e1v + b * e2v;
+ if (quad->fUVRCount == 3) {
+ quad->fR += a * e1r + b * e2r;
+ correct_bad_coords(abs(denom) < kTolerance, &quad->fU, &quad->fV, &quad->fR);
+ } else {
+ correct_bad_coords(abs(denom) < kTolerance, &quad->fU, &quad->fV, nullptr);
+ }
+ }
+}
+
+static V4f degenerate_coverage(const V4f& px, const V4f& py, const Edges& edges) {
+ // Calculate distance of the 4 inset points (px, py) to the 4 edges
+ V4f d0 = fma(edges.fA[0], px, fma(edges.fB[0], py, edges.fC[0]));
+ V4f d1 = fma(edges.fA[1], px, fma(edges.fB[1], py, edges.fC[1]));
+ V4f d2 = fma(edges.fA[2], px, fma(edges.fB[2], py, edges.fC[2]));
+ V4f d3 = fma(edges.fA[3], px, fma(edges.fB[3], py, edges.fC[3]));
+
+ // For each point, pretend that there's a rectangle that touches e0 and e3 on the horizontal
+ // axis, so its width is "approximately" d0 + d3, and it touches e1 and e2 on the vertical axis
+ // so its height is d1 + d2. Pin each of these dimensions to [0, 1] and approximate the coverage
+ // at each point as clamp(d0+d3, 0, 1) x clamp(d1+d2, 0, 1). For rectilinear quads this is an
+ // accurate calculation of its area clipped to an aligned pixel. For arbitrary quads it is not
+ // mathematically accurate but qualitatively provides a stable value proportional to the size of
+ // the shape.
+ V4f w = max(0.f, min(1.f, d0 + d3));
+ V4f h = max(0.f, min(1.f, d1 + d2));
+ return w * h;
+}
+
+// Outsets or insets xs/ys in place. To be used when the interior is very small, edges are near
+// parallel, or edges are very short/zero-length. Returns coverage for each vertex.
+// Requires (dx, dy) to already be fixed for empty edges.
+static V4f compute_degenerate_quad(GrQuadAAFlags aaFlags, const V4f& mask, const Edges& edges,
+ bool outset, Vertices* quad) {
+ // Move the edge 1/2 pixel in or out depending on 'outset'.
+ V4f oc = edges.fC + mask * (outset ? 0.5f : -0.5f);
+
+ // There are 6 points that we care about to determine the final shape of the polygon, which
+ // are the intersections between (e0,e2), (e1,e0), (e2,e3), (e3,e1) (corresponding to the
+ // 4 corners), and (e1, e2), (e0, e3) (representing the intersections of opposite edges).
+ V4f denom = edges.fA * nextCW(edges.fB) - edges.fB * nextCW(edges.fA);
+ V4f px = (edges.fB * nextCW(oc) - oc * nextCW(edges.fB)) / denom;
+ V4f py = (oc * nextCW(edges.fA) - edges.fA * nextCW(oc)) / denom;
+ correct_bad_coords(abs(denom) < kTolerance, &px, &py, nullptr);
+
+ // Calculate the signed distances from these 4 corners to the other two edges that did not
+ // define the intersection. So p(0) is compared to e3,e1, p(1) to e3,e2 , p(2) to e0,e1, and
+ // p(3) to e0,e2
+ V4f dists1 = px * skvx::shuffle<3, 3, 0, 0>(edges.fA) +
+ py * skvx::shuffle<3, 3, 0, 0>(edges.fB) +
+ skvx::shuffle<3, 3, 0, 0>(oc);
+ V4f dists2 = px * skvx::shuffle<1, 2, 1, 2>(edges.fA) +
+ py * skvx::shuffle<1, 2, 1, 2>(edges.fB) +
+ skvx::shuffle<1, 2, 1, 2>(oc);
+
+ // If all the distances are >= 0, the 4 corners form a valid quadrilateral, so use them as
+ // the 4 points. If any point is on the wrong side of both edges, the interior has collapsed
+ // and we need to use a central point to represent it. If all four points are only on the
+ // wrong side of 1 edge, one edge has crossed over another and we use a line to represent it.
+ // Otherwise, use a triangle that replaces the bad points with the intersections of
+ // (e1, e2) or (e0, e3) as needed.
+ M4f d1v0 = dists1 < kTolerance;
+ M4f d2v0 = dists2 < kTolerance;
+ M4f d1And2 = d1v0 & d2v0;
+ M4f d1Or2 = d1v0 | d2v0;
+
+ V4f coverage;
+ if (!any(d1Or2)) {
+ // Every dists1 and dists2 >= kTolerance so it's not degenerate, use all 4 corners as-is
+ // and use full coverage
+ coverage = 1.f;
+ } else if (any(d1And2)) {
+ // A point failed against two edges, so reduce the shape to a single point, which we take as
+ // the center of the original quad to ensure it is contained in the intended geometry. Since
+ // it has collapsed, we know the shape cannot cover a pixel so update the coverage.
+ SkPoint center = {0.25f * (quad->fX[0] + quad->fX[1] + quad->fX[2] + quad->fX[3]),
+ 0.25f * (quad->fY[0] + quad->fY[1] + quad->fY[2] + quad->fY[3])};
+ px = center.fX;
+ py = center.fY;
+ coverage = degenerate_coverage(px, py, edges);
+ } else if (all(d1Or2)) {
+ // Degenerates to a line. Compare p[2] and p[3] to edge 0. If they are on the wrong side,
+ // that means edge 0 and 3 crossed, and otherwise edge 1 and 2 crossed.
+ if (dists1[2] < kTolerance && dists1[3] < kTolerance) {
+ // Edges 0 and 3 have crossed over, so make the line from average of (p0,p2) and (p1,p3)
+ px = 0.5f * (skvx::shuffle<0, 1, 0, 1>(px) + skvx::shuffle<2, 3, 2, 3>(px));
+ py = 0.5f * (skvx::shuffle<0, 1, 0, 1>(py) + skvx::shuffle<2, 3, 2, 3>(py));
+ } else {
+ // Edges 1 and 2 have crossed over, so make the line from average of (p0,p1) and (p2,p3)
+ px = 0.5f * (skvx::shuffle<0, 0, 2, 2>(px) + skvx::shuffle<1, 1, 3, 3>(px));
+ py = 0.5f * (skvx::shuffle<0, 0, 2, 2>(py) + skvx::shuffle<1, 1, 3, 3>(py));
+ }
+ coverage = degenerate_coverage(px, py, edges);
+ } else {
+ // This turns into a triangle. Replace corners as needed with the intersections between
+ // (e0,e3) and (e1,e2), which must now be calculated
+ using V2f = skvx::Vec<2, float>;
+ V2f eDenom = skvx::shuffle<0, 1>(edges.fA) * skvx::shuffle<3, 2>(edges.fB) -
+ skvx::shuffle<0, 1>(edges.fB) * skvx::shuffle<3, 2>(edges.fA);
+ V2f ex = (skvx::shuffle<0, 1>(edges.fB) * skvx::shuffle<3, 2>(oc) -
+ skvx::shuffle<0, 1>(oc) * skvx::shuffle<3, 2>(edges.fB)) / eDenom;
+ V2f ey = (skvx::shuffle<0, 1>(oc) * skvx::shuffle<3, 2>(edges.fA) -
+ skvx::shuffle<0, 1>(edges.fA) * skvx::shuffle<3, 2>(oc)) / eDenom;
+
+ if (SkScalarAbs(eDenom[0]) > kTolerance) {
+ px = if_then_else(d1v0, V4f(ex[0]), px);
+ py = if_then_else(d1v0, V4f(ey[0]), py);
+ }
+ if (SkScalarAbs(eDenom[1]) > kTolerance) {
+ px = if_then_else(d2v0, V4f(ex[1]), px);
+ py = if_then_else(d2v0, V4f(ey[1]), py);
+ }
+
+ coverage = 1.f;
+ }
+
+ outset_projected_vertices(px, py, aaFlags, quad);
+ return coverage;
+}
+
+// Computes the vertices for the two nested quads used to create AA edges. The original single quad
+// should be duplicated as input in 'inner' and 'outer', and the resulting quad frame will be
+// stored in-place on return. Returns per-vertex coverage for the inner vertices.
+static V4f compute_nested_quad_vertices(GrQuadAAFlags aaFlags, bool rectilinear,
+ Vertices* inner, Vertices* outer, SkRect* domain) {
+ SkASSERT(inner->fUVRCount == 0 || inner->fUVRCount == 2 || inner->fUVRCount == 3);
+ SkASSERT(outer->fUVRCount == inner->fUVRCount);
+
+ QuadMetadata metadata = get_metadata(*inner, aaFlags);
+
+ // Calculate domain first before updating vertices. It's only used when not rectilinear.
+ if (!rectilinear) {
+ SkASSERT(domain);
+ // The domain is the bounding box of the quad, outset by 0.5. Don't worry about edge masks
+ // since the FP only applies the domain on the exterior triangles, which are degenerate for
+ // non-AA edges.
+ domain->fLeft = min(outer->fX) - 0.5f;
+ domain->fRight = max(outer->fX) + 0.5f;
+ domain->fTop = min(outer->fY) - 0.5f;
+ domain->fBottom = max(outer->fY) + 0.5f;
+ }
+
+ // When outsetting, we want the new edge to be .5px away from the old line, which means the
+ // corners may need to be adjusted by more than .5px if the matrix had sheer. This adjustment
+ // is only computed if there are no empty edges, and it may signal going through the slow path.
+ V4f outset = 0.5f;
+ if (get_optimized_outset(metadata, rectilinear, &outset)) {
+ // Since it's not subpixel, outsetting and insetting are trivial vector additions.
+ outset_vertices(outset, metadata, outer);
+ outset_vertices(-outset, metadata, inner);
+ return 1.f;
+ }
+
+ // Only compute edge equations once since they are the same for inner and outer quads
+ Edges edges = get_edge_equations(metadata, *inner);
+
+ // Calculate both outset and inset, returning the coverage reported for the inset, since the
+ // outset will always have 0.0f.
+ compute_degenerate_quad(aaFlags, metadata.fMask, edges, true, outer);
+ return compute_degenerate_quad(aaFlags, metadata.fMask, edges, false, inner);
+}
+
+// Generalizes compute_nested_quad_vertices to extrapolate local coords such that after perspective
+// division of the device coordinates, the original local coordinate value is at the original
+// un-outset device position.
+static V4f compute_nested_persp_quad_vertices(const GrQuadAAFlags aaFlags, Vertices* inner,
+ Vertices* outer, SkRect* domain) {
+ SkASSERT(inner->fUVRCount == 0 || inner->fUVRCount == 2 || inner->fUVRCount == 3);
+ SkASSERT(outer->fUVRCount == inner->fUVRCount);
+
+ // Calculate the projected 2D quad and use it to form projeccted inner/outer quads
+ V4f iw = 1.0f / inner->fW;
+ V4f x2d = inner->fX * iw;
+ V4f y2d = inner->fY * iw;
+
+ Vertices inner2D = { x2d, y2d, /*w*/ 1.f, 0.f, 0.f, 0.f, 0 }; // No uvr outsetting in 2D
+ Vertices outer2D = inner2D;
+
+ V4f coverage = compute_nested_quad_vertices(
+ aaFlags, /* rect */ false, &inner2D, &outer2D, domain);
+
+ // Now map from the 2D inset/outset back to 3D and update the local coordinates as well
+ outset_projected_vertices(inner2D.fX, inner2D.fY, aaFlags, inner);
+ outset_projected_vertices(outer2D.fX, outer2D.fY, aaFlags, outer);
+
+ return coverage;
+}
+
+// Writes four vertices in triangle strip order, including the additional data for local
+// coordinates, geometry + texture domains, color, and coverage as needed to satisfy the vertex spec
+static void write_quad(GrVertexWriter* vb, const GrQuadPerEdgeAA::VertexSpec& spec,
+ GrQuadPerEdgeAA::CoverageMode mode, const V4f& coverage, SkPMColor4f color4f,
+ const SkRect& geomDomain, const SkRect& texDomain, const Vertices& quad) {
+ static constexpr auto If = GrVertexWriter::If<float>;
+
+ for (int i = 0; i < 4; ++i) {
+ // save position, this is a float2 or float3 or float4 depending on the combination of
+ // perspective and coverage mode.
+ vb->write(quad.fX[i], quad.fY[i],
+ If(spec.deviceQuadType() == GrQuad::Type::kPerspective, quad.fW[i]),
+ If(mode == GrQuadPerEdgeAA::CoverageMode::kWithPosition, coverage[i]));
+
+ // save color
+ if (spec.hasVertexColors()) {
+ bool wide = spec.colorType() == GrQuadPerEdgeAA::ColorType::kHalf;
+ vb->write(GrVertexColor(
+ color4f * (mode == GrQuadPerEdgeAA::CoverageMode::kWithColor ? coverage[i] : 1.f),
+ wide));
+ }
+
+ // save local position
+ if (spec.hasLocalCoords()) {
+ vb->write(quad.fU[i], quad.fV[i],
+ If(spec.localQuadType() == GrQuad::Type::kPerspective, quad.fR[i]));
+ }
+
+ // save the geometry domain
+ if (spec.requiresGeometryDomain()) {
+ vb->write(geomDomain);
+ }
+
+ // save the texture domain
+ if (spec.hasDomain()) {
+ vb->write(texDomain);
+ }
+ }
+}
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
+
+static const int kVertsPerAAFillRect = 8;
+static const int kIndicesPerAAFillRect = 30;
+
+static sk_sp<const GrGpuBuffer> get_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
+
+ // clang-format off
+ static const uint16_t gFillAARectIdx[] = {
+ 0, 1, 2, 1, 3, 2,
+ 0, 4, 1, 4, 5, 1,
+ 0, 6, 4, 0, 2, 6,
+ 2, 3, 6, 3, 7, 6,
+ 1, 5, 3, 3, 5, 7,
+ };
+ // clang-format on
+
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gFillAARectIdx) == kIndicesPerAAFillRect);
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ gFillAARectIdx, kIndicesPerAAFillRect, GrQuadPerEdgeAA::kNumAAQuadsInIndexBuffer,
+ kVertsPerAAFillRect, gAAFillRectIndexBufferKey);
+}
+
+} // anonymous namespace
+
+namespace GrQuadPerEdgeAA {
+
+// This is a more elaborate version of SkPMColor4fNeedsWideColor that allows "no color" for white
+ColorType MinColorType(SkPMColor4f color, GrClampType clampType, const GrCaps& caps) {
+ if (color == SK_PMColor4fWHITE) {
+ return ColorType::kNone;
+ } else {
+ return SkPMColor4fNeedsWideColor(color, clampType, caps) ? ColorType::kHalf
+ : ColorType::kByte;
+ }
+}
+
+////////////////// Tessellate Implementation
+
+void* Tessellate(void* vertices, const VertexSpec& spec, const GrQuad& deviceQuad,
+ const SkPMColor4f& color4f, const GrQuad& localQuad, const SkRect& domain,
+ GrQuadAAFlags aaFlags) {
+ SkASSERT(deviceQuad.quadType() <= spec.deviceQuadType());
+ SkASSERT(!spec.hasLocalCoords() || localQuad.quadType() <= spec.localQuadType());
+
+ GrQuadPerEdgeAA::CoverageMode mode = spec.coverageMode();
+
+ // Load position data into V4fs (always x, y, and load w to avoid branching down the road)
+ Vertices outer;
+ outer.fX = deviceQuad.x4f();
+ outer.fY = deviceQuad.y4f();
+ outer.fW = deviceQuad.w4f(); // Guaranteed to be 1f if it's not perspective
+
+ // Load local position data into V4fs (either none, just u,v or all three)
+ outer.fUVRCount = spec.localDimensionality();
+ if (spec.hasLocalCoords()) {
+ outer.fU = localQuad.x4f();
+ outer.fV = localQuad.y4f();
+ outer.fR = localQuad.w4f(); // Will be ignored if the local quad type isn't perspective
+ }
+
+ GrVertexWriter vb{vertices};
+ if (spec.usesCoverageAA()) {
+ SkASSERT(mode == CoverageMode::kWithPosition || mode == CoverageMode::kWithColor);
+ // Must calculate two new quads, an outset and inset by .5 in projected device space, so
+ // duplicate the original quad for the inner space
+ Vertices inner = outer;
+
+ SkRect geomDomain;
+ V4f maxCoverage = 1.f;
+ if (spec.deviceQuadType() == GrQuad::Type::kPerspective) {
+ // For perspective, send quads with all edges non-AA through the tessellation to ensure
+ // their corners are processed the same as adjacent quads. This approach relies on
+ // solving edge equations to reconstruct corners, which can create seams if an inner
+ // fully non-AA quad is not similarly processed.
+ maxCoverage = compute_nested_persp_quad_vertices(aaFlags, &inner, &outer, &geomDomain);
+ } else if (aaFlags != GrQuadAAFlags::kNone) {
+ // In 2D, the simpler corner math does not cause issues with seaming against non-AA
+ // inner quads.
+ maxCoverage = compute_nested_quad_vertices(
+ aaFlags, spec.deviceQuadType() <= GrQuad::Type::kRectilinear, &inner, &outer,
+ &geomDomain);
+ } else if (spec.requiresGeometryDomain()) {
+ // The quad itself wouldn't need a geometric domain, but the batch does, so set the
+ // domain to the bounds of the X/Y coords. Since it's non-AA, this won't actually be
+ // evaluated by the shader, but make sure not to upload uninitialized data.
+ geomDomain.fLeft = min(outer.fX);
+ geomDomain.fRight = max(outer.fX);
+ geomDomain.fTop = min(outer.fY);
+ geomDomain.fBottom = max(outer.fY);
+ }
+
+ // Write two quads for inner and outer, inner will use the
+ write_quad(&vb, spec, mode, maxCoverage, color4f, geomDomain, domain, inner);
+ write_quad(&vb, spec, mode, 0.f, color4f, geomDomain, domain, outer);
+ } else {
+ // No outsetting needed, just write a single quad with full coverage
+ SkASSERT(mode == CoverageMode::kNone && !spec.requiresGeometryDomain());
+ write_quad(&vb, spec, mode, 1.f, color4f, SkRect::MakeEmpty(), domain, outer);
+ }
+
+ return vb.fPtr;
+}
+
+bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const VertexSpec& spec,
+ int quadCount) {
+ if (spec.usesCoverageAA()) {
+ // AA quads use 8 vertices, basically nested rectangles
+ sk_sp<const GrGpuBuffer> ibuffer = get_index_buffer(target->resourceProvider());
+ if (!ibuffer) {
+ return false;
+ }
+
+ mesh->setPrimitiveType(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(std::move(ibuffer), kIndicesPerAAFillRect, kVertsPerAAFillRect,
+ quadCount, kNumAAQuadsInIndexBuffer);
+ } else {
+ // Non-AA quads use 4 vertices, and regular triangle strip layout
+ if (quadCount > 1) {
+ sk_sp<const GrGpuBuffer> ibuffer = target->resourceProvider()->refQuadIndexBuffer();
+ if (!ibuffer) {
+ return false;
+ }
+
+ mesh->setPrimitiveType(GrPrimitiveType::kTriangles);
+ mesh->setIndexedPatterned(std::move(ibuffer), 6, 4, quadCount,
+ GrResourceProvider::QuadCountOfQuadBuffer());
+ } else {
+ mesh->setPrimitiveType(GrPrimitiveType::kTriangleStrip);
+ mesh->setNonIndexedNonInstanced(4);
+ }
+ }
+
+ return true;
+}
+
+////////////////// VertexSpec Implementation
+
+int VertexSpec::deviceDimensionality() const {
+ return this->deviceQuadType() == GrQuad::Type::kPerspective ? 3 : 2;
+}
+
+int VertexSpec::localDimensionality() const {
+ return fHasLocalCoords ? (this->localQuadType() == GrQuad::Type::kPerspective ? 3 : 2) : 0;
+}
+
+CoverageMode VertexSpec::coverageMode() const {
+ if (this->usesCoverageAA()) {
+ if (this->compatibleWithCoverageAsAlpha() && this->hasVertexColors() &&
+ !this->requiresGeometryDomain()) {
+ // Using a geometric domain acts as a second source of coverage and folding
+ // the original coverage into color makes it impossible to apply the color's
+ // alpha to the geometric domain's coverage when the original shape is clipped.
+ return CoverageMode::kWithColor;
+ } else {
+ return CoverageMode::kWithPosition;
+ }
+ } else {
+ return CoverageMode::kNone;
+ }
+}
+
+// This needs to stay in sync w/ QuadPerEdgeAAGeometryProcessor::initializeAttrs
+size_t VertexSpec::vertexSize() const {
+ bool needsPerspective = (this->deviceDimensionality() == 3);
+ CoverageMode coverageMode = this->coverageMode();
+
+ size_t count = 0;
+
+ if (coverageMode == CoverageMode::kWithPosition) {
+ if (needsPerspective) {
+ count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType);
+ } else {
+ count += GrVertexAttribTypeSize(kFloat2_GrVertexAttribType) +
+ GrVertexAttribTypeSize(kFloat_GrVertexAttribType);
+ }
+ } else {
+ if (needsPerspective) {
+ count += GrVertexAttribTypeSize(kFloat3_GrVertexAttribType);
+ } else {
+ count += GrVertexAttribTypeSize(kFloat2_GrVertexAttribType);
+ }
+ }
+
+ if (this->requiresGeometryDomain()) {
+ count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType);
+ }
+
+ count += this->localDimensionality() * GrVertexAttribTypeSize(kFloat_GrVertexAttribType);
+
+ if (ColorType::kByte == this->colorType()) {
+ count += GrVertexAttribTypeSize(kUByte4_norm_GrVertexAttribType);
+ } else if (ColorType::kHalf == this->colorType()) {
+ count += GrVertexAttribTypeSize(kHalf4_GrVertexAttribType);
+ }
+
+ if (this->hasDomain()) {
+ count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType);
+ }
+
+ return count;
+}
+
+////////////////// Geometry Processor Implementation
+
+class QuadPerEdgeAAGeometryProcessor : public GrGeometryProcessor {
+public:
+ using Saturate = GrTextureOp::Saturate;
+
+ static sk_sp<GrGeometryProcessor> Make(const VertexSpec& spec) {
+ return sk_sp<QuadPerEdgeAAGeometryProcessor>(new QuadPerEdgeAAGeometryProcessor(spec));
+ }
+
+ static sk_sp<GrGeometryProcessor> Make(const VertexSpec& vertexSpec, const GrShaderCaps& caps,
+ GrTextureType textureType,
+ const GrSamplerState& samplerState,
+ const GrSwizzle& swizzle, uint32_t extraSamplerKey,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform,
+ Saturate saturate) {
+ return sk_sp<QuadPerEdgeAAGeometryProcessor>(new QuadPerEdgeAAGeometryProcessor(
+ vertexSpec, caps, textureType, samplerState, swizzle, extraSamplerKey,
+ std::move(textureColorSpaceXform), saturate));
+ }
+
+ const char* name() const override { return "QuadPerEdgeAAGeometryProcessor"; }
+
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ // texturing, device-dimensions are single bit flags
+ uint32_t x = (fTexDomain.isInitialized() ? 0 : 0x1)
+ | (fSampler.isInitialized() ? 0 : 0x2)
+ | (fNeedsPerspective ? 0 : 0x4)
+ | (fSaturate == Saturate::kNo ? 0 : 0x8);
+ // local coords require 2 bits (3 choices), 00 for none, 01 for 2d, 10 for 3d
+ if (fLocalCoord.isInitialized()) {
+ x |= kFloat3_GrVertexAttribType == fLocalCoord.cpuType() ? 0x10 : 0x20;
+ }
+ // similar for colors, 00 for none, 01 for bytes, 10 for half-floats
+ if (fColor.isInitialized()) {
+ x |= kUByte4_norm_GrVertexAttribType == fColor.cpuType() ? 0x40 : 0x80;
+ }
+ // and coverage mode, 00 for none, 01 for withposition, 10 for withcolor, 11 for
+ // position+geomdomain
+ SkASSERT(!fGeomDomain.isInitialized() || fCoverageMode == CoverageMode::kWithPosition);
+ if (fCoverageMode != CoverageMode::kNone) {
+ x |= fGeomDomain.isInitialized()
+ ? 0x300
+ : (CoverageMode::kWithPosition == fCoverageMode ? 0x100 : 0x200);
+ }
+
+ b->add32(GrColorSpaceXform::XformKey(fTextureColorSpaceXform.get()));
+ b->add32(x);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps& caps) const override {
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+ const auto& gp = proc.cast<QuadPerEdgeAAGeometryProcessor>();
+ if (gp.fLocalCoord.isInitialized()) {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+ fTextureColorSpaceXformHelper.setData(pdman, gp.fTextureColorSpaceXform.get());
+ }
+
+ private:
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ using Interpolation = GrGLSLVaryingHandler::Interpolation;
+
+ const auto& gp = args.fGP.cast<QuadPerEdgeAAGeometryProcessor>();
+ fTextureColorSpaceXformHelper.emitCode(args.fUniformHandler,
+ gp.fTextureColorSpaceXform.get());
+
+ args.fVaryingHandler->emitAttributes(gp);
+
+ if (gp.fCoverageMode == CoverageMode::kWithPosition) {
+ // Strip last channel from the vertex attribute to remove coverage and get the
+ // actual position
+ if (gp.fNeedsPerspective) {
+ args.fVertBuilder->codeAppendf("float3 position = %s.xyz;",
+ gp.fPosition.name());
+ } else {
+ args.fVertBuilder->codeAppendf("float2 position = %s.xy;",
+ gp.fPosition.name());
+ }
+ gpArgs->fPositionVar = {"position",
+ gp.fNeedsPerspective ? kFloat3_GrSLType
+ : kFloat2_GrSLType,
+ GrShaderVar::kNone_TypeModifier};
+ } else {
+ // No coverage to eliminate
+ gpArgs->fPositionVar = gp.fPosition.asShaderVar();
+ }
+
+ // Handle local coordinates if they exist
+ if (gp.fLocalCoord.isInitialized()) {
+ // NOTE: If the only usage of local coordinates is for the inline texture fetch
+ // before FPs, then there are no registered FPCoordTransforms and this ends up
+ // emitting nothing, so there isn't a duplication of local coordinates
+ this->emitTransforms(args.fVertBuilder,
+ args.fVaryingHandler,
+ args.fUniformHandler,
+ gp.fLocalCoord.asShaderVar(),
+ args.fFPCoordTransformHandler);
+ }
+
+ // Solid color before any texturing gets modulated in
+ if (gp.fColor.isInitialized()) {
+ SkASSERT(gp.fCoverageMode != CoverageMode::kWithColor || !gp.fNeedsPerspective);
+ // The color cannot be flat if the varying coverage has been modulated into it
+ args.fVaryingHandler->addPassThroughAttribute(gp.fColor, args.fOutputColor,
+ gp.fCoverageMode == CoverageMode::kWithColor ?
+ Interpolation::kInterpolated : Interpolation::kCanBeFlat);
+ } else {
+ // Output color must be initialized to something
+ args.fFragBuilder->codeAppendf("%s = half4(1);", args.fOutputColor);
+ }
+
+ // If there is a texture, must also handle texture coordinates and reading from
+ // the texture in the fragment shader before continuing to fragment processors.
+ if (gp.fSampler.isInitialized()) {
+ // Texture coordinates clamped by the domain on the fragment shader; if the GP
+ // has a texture, it's guaranteed to have local coordinates
+ args.fFragBuilder->codeAppend("float2 texCoord;");
+ if (gp.fLocalCoord.cpuType() == kFloat3_GrVertexAttribType) {
+ // Can't do a pass through since we need to perform perspective division
+ GrGLSLVarying v(gp.fLocalCoord.gpuType());
+ args.fVaryingHandler->addVarying(gp.fLocalCoord.name(), &v);
+ args.fVertBuilder->codeAppendf("%s = %s;",
+ v.vsOut(), gp.fLocalCoord.name());
+ args.fFragBuilder->codeAppendf("texCoord = %s.xy / %s.z;",
+ v.fsIn(), v.fsIn());
+ } else {
+ args.fVaryingHandler->addPassThroughAttribute(gp.fLocalCoord, "texCoord");
+ }
+
+ // Clamp the now 2D localCoordName variable by the domain if it is provided
+ if (gp.fTexDomain.isInitialized()) {
+ args.fFragBuilder->codeAppend("float4 domain;");
+ args.fVaryingHandler->addPassThroughAttribute(gp.fTexDomain, "domain",
+ Interpolation::kCanBeFlat);
+ args.fFragBuilder->codeAppend(
+ "texCoord = clamp(texCoord, domain.xy, domain.zw);");
+ }
+
+ // Now modulate the starting output color by the texture lookup
+ args.fFragBuilder->codeAppendf("%s = ", args.fOutputColor);
+ args.fFragBuilder->appendTextureLookupAndModulate(
+ args.fOutputColor, args.fTexSamplers[0], "texCoord", kFloat2_GrSLType,
+ &fTextureColorSpaceXformHelper);
+ args.fFragBuilder->codeAppend(";");
+ if (gp.fSaturate == Saturate::kYes) {
+ args.fFragBuilder->codeAppendf("%s = saturate(%s);",
+ args.fOutputColor, args.fOutputColor);
+ }
+ } else {
+ // Saturate is only intended for use with a proxy to account for the fact
+ // that GrTextureOp skips SkPaint conversion, which normally handles this.
+ SkASSERT(gp.fSaturate == Saturate::kNo);
+ }
+
+ // And lastly, output the coverage calculation code
+ if (gp.fCoverageMode == CoverageMode::kWithPosition) {
+ GrGLSLVarying coverage(kFloat_GrSLType);
+ args.fVaryingHandler->addVarying("coverage", &coverage);
+ if (gp.fNeedsPerspective) {
+ // Multiply by "W" in the vertex shader, then by 1/w (sk_FragCoord.w) in
+ // the fragment shader to get screen-space linear coverage.
+ args.fVertBuilder->codeAppendf("%s = %s.w * %s.z;",
+ coverage.vsOut(), gp.fPosition.name(),
+ gp.fPosition.name());
+ args.fFragBuilder->codeAppendf("float coverage = %s * sk_FragCoord.w;",
+ coverage.fsIn());
+ } else {
+ args.fVertBuilder->codeAppendf("%s = %s;",
+ coverage.vsOut(), gp.fCoverage.name());
+ args.fFragBuilder->codeAppendf("float coverage = %s;", coverage.fsIn());
+ }
+
+ if (gp.fGeomDomain.isInitialized()) {
+ // Calculate distance from sk_FragCoord to the 4 edges of the domain
+ // and clamp them to (0, 1). Use the minimum of these and the original
+ // coverage. This only has to be done in the exterior triangles, the
+ // interior of the quad geometry can never be clipped by the domain box.
+ args.fFragBuilder->codeAppend("float4 geoDomain;");
+ args.fVaryingHandler->addPassThroughAttribute(gp.fGeomDomain, "geoDomain",
+ Interpolation::kCanBeFlat);
+ args.fFragBuilder->codeAppend(
+ "if (coverage < 0.5) {"
+ " float4 dists4 = clamp(float4(1, 1, -1, -1) * "
+ "(sk_FragCoord.xyxy - geoDomain), 0, 1);"
+ " float2 dists2 = dists4.xy * dists4.zw;"
+ " coverage = min(coverage, dists2.x * dists2.y);"
+ "}");
+ }
+
+ args.fFragBuilder->codeAppendf("%s = half4(half(coverage));",
+ args.fOutputCoverage);
+ } else {
+ // Set coverage to 1, since it's either non-AA or the coverage was already
+ // folded into the output color
+ SkASSERT(!gp.fGeomDomain.isInitialized());
+ args.fFragBuilder->codeAppendf("%s = half4(1);", args.fOutputCoverage);
+ }
+ }
+ GrGLSLColorSpaceXformHelper fTextureColorSpaceXformHelper;
+ };
+ return new GLSLProcessor;
+ }
+
+private:
+ QuadPerEdgeAAGeometryProcessor(const VertexSpec& spec)
+ : INHERITED(kQuadPerEdgeAAGeometryProcessor_ClassID)
+ , fTextureColorSpaceXform(nullptr) {
+ SkASSERT(!spec.hasDomain());
+ this->initializeAttrs(spec);
+ this->setTextureSamplerCnt(0);
+ }
+
+ QuadPerEdgeAAGeometryProcessor(const VertexSpec& spec,
+ const GrShaderCaps& caps,
+ GrTextureType textureType,
+ const GrSamplerState& samplerState,
+ const GrSwizzle& swizzle,
+ uint32_t extraSamplerKey,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform,
+ Saturate saturate)
+ : INHERITED(kQuadPerEdgeAAGeometryProcessor_ClassID)
+ , fSaturate(saturate)
+ , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
+ , fSampler(textureType, samplerState, swizzle, extraSamplerKey) {
+ SkASSERT(spec.hasLocalCoords());
+ this->initializeAttrs(spec);
+ this->setTextureSamplerCnt(1);
+ }
+
+ // This needs to stay in sync w/ VertexSpec::vertexSize
+ void initializeAttrs(const VertexSpec& spec) {
+ fNeedsPerspective = spec.deviceDimensionality() == 3;
+ fCoverageMode = spec.coverageMode();
+
+ if (fCoverageMode == CoverageMode::kWithPosition) {
+ if (fNeedsPerspective) {
+ fPosition = {"positionWithCoverage", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ } else {
+ fPosition = {"position", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ fCoverage = {"coverage", kFloat_GrVertexAttribType, kFloat_GrSLType};
+ }
+ } else {
+ if (fNeedsPerspective) {
+ fPosition = {"position", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else {
+ fPosition = {"position", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ }
+ }
+
+ // Need a geometry domain when the quads are AA and not rectilinear, since their AA
+ // outsetting can go beyond a half pixel.
+ if (spec.requiresGeometryDomain()) {
+ fGeomDomain = {"geomDomain", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ }
+
+ int localDim = spec.localDimensionality();
+ if (localDim == 3) {
+ fLocalCoord = {"localCoord", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
+ } else if (localDim == 2) {
+ fLocalCoord = {"localCoord", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
+ } // else localDim == 0 and attribute remains uninitialized
+
+ if (ColorType::kByte == spec.colorType()) {
+ fColor = {"color", kUByte4_norm_GrVertexAttribType, kHalf4_GrSLType};
+ } else if (ColorType::kHalf == spec.colorType()) {
+ fColor = {"color", kHalf4_GrVertexAttribType, kHalf4_GrSLType};
+ }
+
+ if (spec.hasDomain()) {
+ fTexDomain = {"texDomain", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
+ }
+
+ this->setVertexAttributes(&fPosition, 6);
+ }
+
+ const TextureSampler& onTextureSampler(int) const override { return fSampler; }
+
+ Attribute fPosition; // May contain coverage as last channel
+ Attribute fCoverage; // Used for non-perspective position to avoid Intel Metal issues
+ Attribute fColor; // May have coverage modulated in if the FPs support it
+ Attribute fLocalCoord;
+ Attribute fGeomDomain; // Screen-space bounding box on geometry+aa outset
+ Attribute fTexDomain; // Texture-space bounding box on local coords
+
+ // The positions attribute may have coverage built into it, so float3 is an ambiguous type
+ // and may mean 2d with coverage, or 3d with no coverage
+ bool fNeedsPerspective;
+ // Should saturate() be called on the color? Only relevant when created with a texture.
+ Saturate fSaturate = Saturate::kNo;
+ CoverageMode fCoverageMode;
+
+ // Color space will be null and fSampler.isInitialized() returns false when the GP is configured
+ // to skip texturing.
+ sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
+ TextureSampler fSampler;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+sk_sp<GrGeometryProcessor> MakeProcessor(const VertexSpec& spec) {
+ return QuadPerEdgeAAGeometryProcessor::Make(spec);
+}
+
+sk_sp<GrGeometryProcessor> MakeTexturedProcessor(const VertexSpec& spec, const GrShaderCaps& caps,
+ GrTextureType textureType,
+ const GrSamplerState& samplerState,
+ const GrSwizzle& swizzle, uint32_t extraSamplerKey,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform,
+ Saturate saturate) {
+ return QuadPerEdgeAAGeometryProcessor::Make(spec, caps, textureType, samplerState, swizzle,
+ extraSamplerKey, std::move(textureColorSpaceXform),
+ saturate);
+}
+
+} // namespace GrQuadPerEdgeAA
diff --git a/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.h b/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.h
new file mode 100644
index 0000000000..06bb315efc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrQuadPerEdgeAA.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrQuadPerEdgeAA_DEFINED
+#define GrQuadPerEdgeAA_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrTextureOp.h"
+
+class GrCaps;
+class GrColorSpaceXform;
+class GrShaderCaps;
+
+namespace GrQuadPerEdgeAA {
+ using Saturate = GrTextureOp::Saturate;
+
+ enum class CoverageMode { kNone, kWithPosition, kWithColor };
+ enum class Domain : bool { kNo = false, kYes = true };
+ enum class ColorType { kNone, kByte, kHalf, kLast = kHalf };
+ static const int kColorTypeCount = static_cast<int>(ColorType::kLast) + 1;
+
+ // Gets the minimum ColorType that can represent a color.
+ ColorType MinColorType(SkPMColor4f, GrClampType, const GrCaps&);
+
+ // Specifies the vertex configuration for an op that renders per-edge AA quads. The vertex
+ // order (when enabled) is device position, color, local position, domain, aa edge equations.
+ // This order matches the constructor argument order of VertexSpec and is the order that
+ // GPAttributes maintains. If hasLocalCoords is false, then the local quad type can be ignored.
+ struct VertexSpec {
+ public:
+ VertexSpec(GrQuad::Type deviceQuadType, ColorType colorType, GrQuad::Type localQuadType,
+ bool hasLocalCoords, Domain domain, GrAAType aa, bool coverageAsAlpha)
+ : fDeviceQuadType(static_cast<unsigned>(deviceQuadType))
+ , fLocalQuadType(static_cast<unsigned>(localQuadType))
+ , fHasLocalCoords(hasLocalCoords)
+ , fColorType(static_cast<unsigned>(colorType))
+ , fHasDomain(static_cast<unsigned>(domain))
+ , fUsesCoverageAA(aa == GrAAType::kCoverage)
+ , fCompatibleWithCoverageAsAlpha(coverageAsAlpha)
+ , fRequiresGeometryDomain(aa == GrAAType::kCoverage &&
+ deviceQuadType > GrQuad::Type::kRectilinear) { }
+
+ GrQuad::Type deviceQuadType() const { return static_cast<GrQuad::Type>(fDeviceQuadType); }
+ GrQuad::Type localQuadType() const { return static_cast<GrQuad::Type>(fLocalQuadType); }
+ bool hasLocalCoords() const { return fHasLocalCoords; }
+ ColorType colorType() const { return static_cast<ColorType>(fColorType); }
+ bool hasVertexColors() const { return ColorType::kNone != this->colorType(); }
+ bool hasDomain() const { return fHasDomain; }
+ bool usesCoverageAA() const { return fUsesCoverageAA; }
+ bool compatibleWithCoverageAsAlpha() const { return fCompatibleWithCoverageAsAlpha; }
+ bool requiresGeometryDomain() const { return fRequiresGeometryDomain; }
+ // Will always be 2 or 3
+ int deviceDimensionality() const;
+ // Will always be 0 if hasLocalCoords is false, otherwise will be 2 or 3
+ int localDimensionality() const;
+
+ int verticesPerQuad() const { return fUsesCoverageAA ? 8 : 4; }
+
+ CoverageMode coverageMode() const;
+ size_t vertexSize() const;
+
+ private:
+ static_assert(GrQuad::kTypeCount <= 4, "GrQuad::Type doesn't fit in 2 bits");
+ static_assert(kColorTypeCount <= 4, "Color doesn't fit in 2 bits");
+
+ unsigned fDeviceQuadType: 2;
+ unsigned fLocalQuadType: 2;
+ unsigned fHasLocalCoords: 1;
+ unsigned fColorType : 2;
+ unsigned fHasDomain: 1;
+ unsigned fUsesCoverageAA: 1;
+ unsigned fCompatibleWithCoverageAsAlpha: 1;
+ // The geometry domain serves to clip off pixels touched by quads with sharp corners that
+ // would otherwise exceed the miter limit for the AA-outset geometry.
+ unsigned fRequiresGeometryDomain: 1;
+ };
+
+ sk_sp<GrGeometryProcessor> MakeProcessor(const VertexSpec& spec);
+
+ sk_sp<GrGeometryProcessor> MakeTexturedProcessor(
+ const VertexSpec& spec, const GrShaderCaps& caps, GrTextureType textureType,
+ const GrSamplerState& samplerState, const GrSwizzle& swizzle, uint32_t extraSamplerKey,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform, Saturate saturate);
+
+ // Fill vertices with the vertex data needed to represent the given quad. The device position,
+ // local coords, vertex color, domain, and edge coefficients will be written and/or computed
+ // based on the configuration in the vertex spec; if that attribute is disabled in the spec,
+ // then its corresponding function argument is ignored.
+ //
+ // Tessellation is based on the quad type of the vertex spec, not the provided GrQuad's
+ // so that all quads in a batch are tessellated the same.
+ //
+ // Returns the advanced pointer in vertices.
+ void* Tessellate(void* vertices, const VertexSpec& spec, const GrQuad& deviceQuad,
+ const SkPMColor4f& color, const GrQuad& localQuad, const SkRect& domain,
+ GrQuadAAFlags aa);
+
+ // The mesh will have its index data configured to meet the expectations of the Tessellate()
+ // function, but it the calling code must handle filling a vertex buffer via Tessellate() and
+ // then assigning it to the returned mesh.
+ //
+ // Returns false if the index data could not be allocated.
+ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const VertexSpec& spec,
+ int quadCount);
+
+ static constexpr int kNumAAQuadsInIndexBuffer = 512;
+
+} // namespace GrQuadPerEdgeAA
+
+#endif // GrQuadPerEdgeAA_DEFINED
diff --git a/gfx/skia/skia/src/gpu/ops/GrRegionOp.cpp b/gfx/skia/skia/src/gpu/ops/GrRegionOp.cpp
new file mode 100644
index 0000000000..6d48a339fa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrRegionOp.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrRegionOp.h"
+
+#include "include/core/SkRegion.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+static const int kVertsPerInstance = 4;
+static const int kIndicesPerInstance = 6;
+
+static sk_sp<GrGeometryProcessor> make_gp(const GrShaderCaps* shaderCaps,
+ const SkMatrix& viewMatrix,
+ bool wideColor) {
+ using namespace GrDefaultGeoProcFactory;
+ Color::Type colorType =
+ wideColor ? Color::kPremulWideColorAttribute_Type : Color::kPremulGrColorAttribute_Type;
+ return GrDefaultGeoProcFactory::Make(shaderCaps, colorType, Coverage::kSolid_Type,
+ LocalCoords::kUsePosition_Type, viewMatrix);
+}
+
+namespace {
+
+class RegionOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ GrAAType aaType,
+ const GrUserStencilSettings* stencilSettings = nullptr) {
+ return Helper::FactoryHelper<RegionOp>(context, std::move(paint), viewMatrix, region,
+ aaType, stencilSettings);
+ }
+
+ RegionOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkRegion& region, GrAAType aaType,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, aaType, stencilSettings)
+ , fViewMatrix(viewMatrix) {
+ RegionInfo& info = fRegions.push_back();
+ info.fColor = color;
+ info.fRegion = region;
+
+ SkRect bounds = SkRect::Make(region.getBounds());
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ const char* name() const override { return "GrRegionOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# combined: %d\n", fRegions.count());
+ for (int i = 0; i < fRegions.count(); ++i) {
+ const RegionInfo& info = fRegions[i];
+ str.appendf("%d: Color: 0x%08x, Region with %d rects\n", i, info.fColor.toBytes_RGBA(),
+ info.fRegion.computeRegionComplexity());
+ }
+ str += fHelper.dumpInfo();
+ str += INHERITED::dumpInfo();
+ return str;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, GrProcessorAnalysisCoverage::kNone,
+ &fRegions[0].fColor, &fWideColor);
+ }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ sk_sp<GrGeometryProcessor> gp = make_gp(target->caps().shaderCaps(), fViewMatrix,
+ fWideColor);
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ int numRegions = fRegions.count();
+ int numRects = 0;
+ for (int i = 0; i < numRegions; i++) {
+ numRects += fRegions[i].fRegion.computeRegionComplexity();
+ }
+
+ if (!numRects) {
+ return;
+ }
+ sk_sp<const GrGpuBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer();
+ if (!indexBuffer) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ PatternHelper helper(target, GrPrimitiveType::kTriangles, gp->vertexStride(),
+ std::move(indexBuffer), kVertsPerInstance, kIndicesPerInstance,
+ numRects);
+ GrVertexWriter vertices{helper.vertices()};
+ if (!vertices.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < numRegions; i++) {
+ GrVertexColor color(fRegions[i].fColor, fWideColor);
+ SkRegion::Iterator iter(fRegions[i].fRegion);
+ while (!iter.done()) {
+ SkRect rect = SkRect::Make(iter.rect());
+ vertices.writeQuad(GrVertexWriter::TriStripFromRect(rect), color);
+ iter.next();
+ }
+ }
+ helper.recordDraw(target, std::move(gp));
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ RegionOp* that = t->cast<RegionOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fViewMatrix != that->fViewMatrix) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fRegions.push_back_n(that->fRegions.count(), that->fRegions.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ struct RegionInfo {
+ SkPMColor4f fColor;
+ SkRegion fRegion;
+ };
+
+ Helper fHelper;
+ SkMatrix fViewMatrix;
+ SkSTArray<1, RegionInfo, true> fRegions;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+namespace GrRegionOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ GrAAType aaType,
+ const GrUserStencilSettings* stencilSettings) {
+ if (aaType != GrAAType::kNone && aaType != GrAAType::kMSAA) {
+ return nullptr;
+ }
+ return RegionOp::Make(context, std::move(paint), viewMatrix, region, aaType, stencilSettings);
+}
+}
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(RegionOp) {
+ SkRegion region;
+ int n = random->nextULessThan(200);
+ for (int i = 0; i < n; ++i) {
+ SkIPoint center;
+ center.fX = random->nextULessThan(1000);
+ center.fY = random->nextULessThan(1000);
+ int w = random->nextRangeU(10, 1000);
+ int h = random->nextRangeU(10, 1000);
+ SkIRect rect = {center.fX - w / 2, center.fY - h / 2, center.fX + w / 2, center.fY + h / 2};
+ SkRegion::Op op;
+ if (i == 0) {
+ op = SkRegion::kReplace_Op;
+ } else {
+ // Pick an other than replace.
+ GR_STATIC_ASSERT(SkRegion::kLastOp == SkRegion::kReplace_Op);
+ op = (SkRegion::Op)random->nextULessThan(SkRegion::kLastOp);
+ }
+ region.op(rect, op);
+ }
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrAAType aaType = GrAAType::kNone;
+ if (numSamples > 1 && random->nextBool()) {
+ aaType = GrAAType::kMSAA;
+ }
+ return RegionOp::Make(context, std::move(paint), viewMatrix, region, aaType,
+ GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrRegionOp.h b/gfx/skia/skia/src/gpu/ops/GrRegionOp.h
new file mode 100644
index 0000000000..d7dc17c313
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrRegionOp.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRegionOp_DEFINED
+#define GrRegionOp_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+
+class GrDrawOp;
+class GrRecordingContext;
+class SkMatrix;
+class SkRegion;
+class GrPaint;
+struct GrUserStencilSettings;
+
+namespace GrRegionOp {
+/** GrAAType must be kNone or kMSAA. */
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext*,
+ GrPaint&&,
+ const SkMatrix& viewMatrix,
+ const SkRegion&,
+ GrAAType,
+ const GrUserStencilSettings* stencilSettings = nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.cpp b/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.cpp
new file mode 100644
index 0000000000..546933bb8a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.cpp
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrShadowRRectOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/effects/GrShadowGeoProc.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// Circle Data
+//
+// We have two possible cases for geometry for a circle:
+
+// In the case of a normal fill, we draw geometry for the circle as an octagon.
+static const uint16_t gFillCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 8, 1, 2, 8,
+ 2, 3, 8, 3, 4, 8,
+ 4, 5, 8, 5, 6, 8,
+ 6, 7, 8, 7, 0, 8,
+ // clang-format on
+};
+
+// For stroked circles, we use two nested octagons.
+static const uint16_t gStrokeCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 9, 0, 9, 8,
+ 1, 2, 10, 1, 10, 9,
+ 2, 3, 11, 2, 11, 10,
+ 3, 4, 12, 3, 12, 11,
+ 4, 5, 13, 4, 13, 12,
+ 5, 6, 14, 5, 14, 13,
+ 6, 7, 15, 6, 15, 14,
+ 7, 0, 8, 7, 8, 15,
+ // clang-format on
+};
+
+static const int kIndicesPerFillCircle = SK_ARRAY_COUNT(gFillCircleIndices);
+static const int kIndicesPerStrokeCircle = SK_ARRAY_COUNT(gStrokeCircleIndices);
+static const int kVertsPerStrokeCircle = 16;
+static const int kVertsPerFillCircle = 9;
+
+static int circle_type_to_vert_count(bool stroked) {
+ return stroked ? kVertsPerStrokeCircle : kVertsPerFillCircle;
+}
+
+static int circle_type_to_index_count(bool stroked) {
+ return stroked ? kIndicesPerStrokeCircle : kIndicesPerFillCircle;
+}
+
+static const uint16_t* circle_type_to_indices(bool stroked) {
+ return stroked ? gStrokeCircleIndices : gFillCircleIndices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// RoundRect Data
+//
+// The geometry for a shadow roundrect is similar to a 9-patch:
+// ____________
+// |_|________|_|
+// | | | |
+// | | | |
+// | | | |
+// |_|________|_|
+// |_|________|_|
+//
+// However, each corner is rendered as a fan rather than a simple quad, as below. (The diagram
+// shows the upper part of the upper left corner. The bottom triangle would similarly be split
+// into two triangles.)
+// ________
+// |\ \ |
+// | \ \ |
+// | \\ |
+// | \|
+// --------
+//
+// The center of the fan handles the curve of the corner. For roundrects where the stroke width
+// is greater than the corner radius, the outer triangles blend from the curve to the straight
+// sides. Otherwise these triangles will be degenerate.
+//
+// In the case where the stroke width is greater than the corner radius and the
+// blur radius (overstroke), we add additional geometry to mark out the rectangle in the center.
+// This rectangle extends the coverage values of the center edges of the 9-patch.
+// ____________
+// |_|________|_|
+// | |\ ____ /| |
+// | | | | | |
+// | | |____| | |
+// |_|/______\|_|
+// |_|________|_|
+//
+// For filled rrects we reuse the stroke geometry but add an additional quad to the center.
+
+static const uint16_t gRRectIndices[] = {
+ // clang-format off
+ // overstroke quads
+ // we place this at the beginning so that we can skip these indices when rendering as filled
+ 0, 6, 25, 0, 25, 24,
+ 6, 18, 27, 6, 27, 25,
+ 18, 12, 26, 18, 26, 27,
+ 12, 0, 24, 12, 24, 26,
+
+ // corners
+ 0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5,
+ 6, 11, 10, 6, 10, 9, 6, 9, 8, 6, 8, 7,
+ 12, 17, 16, 12, 16, 15, 12, 15, 14, 12, 14, 13,
+ 18, 19, 20, 18, 20, 21, 18, 21, 22, 18, 22, 23,
+
+ // edges
+ 0, 5, 11, 0, 11, 6,
+ 6, 7, 19, 6, 19, 18,
+ 18, 23, 17, 18, 17, 12,
+ 12, 13, 1, 12, 1, 0,
+
+ // fill quad
+ // we place this at the end so that we can skip these indices when rendering as stroked
+ 0, 6, 18, 0, 18, 12,
+ // clang-format on
+};
+
+// overstroke count
+static const int kIndicesPerOverstrokeRRect = SK_ARRAY_COUNT(gRRectIndices) - 6;
+// simple stroke count skips overstroke indices
+static const int kIndicesPerStrokeRRect = kIndicesPerOverstrokeRRect - 6*4;
+// fill count adds final quad to stroke count
+static const int kIndicesPerFillRRect = kIndicesPerStrokeRRect + 6;
+static const int kVertsPerStrokeRRect = 24;
+static const int kVertsPerOverstrokeRRect = 28;
+static const int kVertsPerFillRRect = 24;
+
+enum RRectType {
+ kFill_RRectType,
+ kStroke_RRectType,
+ kOverstroke_RRectType,
+};
+
+static int rrect_type_to_vert_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kVertsPerFillRRect;
+ case kStroke_RRectType:
+ return kVertsPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kVertsPerOverstrokeRRect;
+ }
+ SK_ABORT("Invalid type");
+}
+
+static int rrect_type_to_index_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kIndicesPerFillRRect;
+ case kStroke_RRectType:
+ return kIndicesPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kIndicesPerOverstrokeRRect;
+ }
+ SK_ABORT("Invalid type");
+}
+
+static const uint16_t* rrect_type_to_indices(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ case kStroke_RRectType:
+ return gRRectIndices + 6*4;
+ case kOverstroke_RRectType:
+ return gRRectIndices;
+ }
+ SK_ABORT("Invalid type");
+}
+
+///////////////////////////////////////////////////////////////////////////////
+namespace {
+
+class ShadowCircularRRectOp final : public GrMeshDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ // An insetWidth > 1/2 rect width or height indicates a simple fill.
+ ShadowCircularRRectOp(GrColor color, const SkRect& devRect,
+ float devRadius, bool isCircle, float blurRadius, float insetWidth)
+ : INHERITED(ClassID()) {
+ SkRect bounds = devRect;
+ SkASSERT(insetWidth > 0);
+ SkScalar innerRadius = 0.0f;
+ SkScalar outerRadius = devRadius;
+ SkScalar umbraInset;
+
+ RRectType type = kFill_RRectType;
+ if (isCircle) {
+ umbraInset = 0;
+ } else {
+ umbraInset = SkTMax(outerRadius, blurRadius);
+ }
+
+ // If stroke is greater than width or height, this is still a fill,
+ // otherwise we compute stroke params.
+ if (isCircle) {
+ innerRadius = devRadius - insetWidth;
+ type = innerRadius > 0 ? kStroke_RRectType : kFill_RRectType;
+ } else {
+ if (insetWidth <= 0.5f*SkTMin(devRect.width(), devRect.height())) {
+ // We don't worry about a real inner radius, we just need to know if we
+ // need to create overstroke vertices.
+ innerRadius = SkTMax(insetWidth - umbraInset, 0.0f);
+ type = innerRadius > 0 ? kOverstroke_RRectType : kStroke_RRectType;
+ }
+ }
+
+ this->setBounds(bounds, HasAABloat::kNo, IsHairline::kNo);
+
+ fGeoData.emplace_back(Geometry{color, outerRadius, umbraInset, innerRadius,
+ blurRadius, bounds, type, isCircle});
+ if (isCircle) {
+ fVertCount = circle_type_to_vert_count(kStroke_RRectType == type);
+ fIndexCount = circle_type_to_index_count(kStroke_RRectType == type);
+ } else {
+ fVertCount = rrect_type_to_vert_count(type);
+ fIndexCount = rrect_type_to_index_count(type);
+ }
+ }
+
+ const char* name() const override { return "ShadowCircularRRectOp"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fGeoData.count(); ++i) {
+ string.appendf(
+ "Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "OuterRad: %.2f, Umbra: %.2f, InnerRad: %.2f, BlurRad: %.2f\n",
+ fGeoData[i].fColor, fGeoData[i].fDevBounds.fLeft, fGeoData[i].fDevBounds.fTop,
+ fGeoData[i].fDevBounds.fRight, fGeoData[i].fDevBounds.fBottom,
+ fGeoData[i].fOuterRadius, fGeoData[i].fUmbraInset,
+ fGeoData[i].fInnerRadius, fGeoData[i].fBlurRadius);
+ }
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
+ bool hasMixedSampledCoverage, GrClampType) override {
+ return GrProcessorSet::EmptySetAnalysis();
+ }
+
+private:
+ struct Geometry {
+ GrColor fColor;
+ SkScalar fOuterRadius;
+ SkScalar fUmbraInset;
+ SkScalar fInnerRadius;
+ SkScalar fBlurRadius;
+ SkRect fDevBounds;
+ RRectType fType;
+ bool fIsCircle;
+ };
+
+ struct CircleVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fOffset;
+ SkScalar fDistanceCorrection;
+ };
+
+ void fillInCircleVerts(const Geometry& args, bool isStroked, CircleVertex** verts) const {
+
+ GrColor color = args.fColor;
+ SkScalar outerRadius = args.fOuterRadius;
+ SkScalar innerRadius = args.fInnerRadius;
+ SkScalar blurRadius = args.fBlurRadius;
+ SkScalar distanceCorrection = outerRadius / blurRadius;
+
+ const SkRect& bounds = args.fDevBounds;
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ innerRadius = innerRadius / outerRadius;
+
+ SkPoint center = SkPoint::Make(bounds.centerX(), bounds.centerY());
+ SkScalar halfWidth = 0.5f * bounds.width();
+ SkScalar octOffset = 0.41421356237f; // sqrt(2) - 1
+
+ (*verts)->fPos = center + SkPoint::Make(-octOffset * halfWidth, -halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-octOffset, -1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(octOffset * halfWidth, -halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(octOffset, -1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(halfWidth, -octOffset * halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(1, -octOffset);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(halfWidth, octOffset * halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(1, octOffset);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(octOffset * halfWidth, halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(octOffset, 1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-octOffset * halfWidth, halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-octOffset, 1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-halfWidth, octOffset * halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-1, octOffset);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-halfWidth, -octOffset * halfWidth);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-1, -octOffset);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ if (isStroked) {
+ // compute the inner ring
+
+ // cosine and sine of pi/8
+ SkScalar c = 0.923579533f;
+ SkScalar s = 0.382683432f;
+ SkScalar r = args.fInnerRadius;
+
+ (*verts)->fPos = center + SkPoint::Make(-s * r, -c * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-s * innerRadius, -c * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(s * r, -c * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(s * innerRadius, -c * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(c * r, -s * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(c * innerRadius, -s * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(c * r, s * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(c * innerRadius, s * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(s * r, c * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(s * innerRadius, c * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-s * r, c * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-s * innerRadius, c * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-c * r, s * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-c * innerRadius, s * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = center + SkPoint::Make(-c * r, -s * r);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(-c * innerRadius, -s * innerRadius);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+ } else {
+ // filled
+ (*verts)->fPos = center;
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+ }
+ }
+
+ void fillInRRectVerts(const Geometry& args, CircleVertex** verts) const {
+ GrColor color = args.fColor;
+ SkScalar outerRadius = args.fOuterRadius;
+
+ const SkRect& bounds = args.fDevBounds;
+
+ SkScalar umbraInset = args.fUmbraInset;
+ SkScalar minDim = 0.5f*SkTMin(bounds.width(), bounds.height());
+ if (umbraInset > minDim) {
+ umbraInset = minDim;
+ }
+
+ SkScalar xInner[4] = { bounds.fLeft + umbraInset, bounds.fRight - umbraInset,
+ bounds.fLeft + umbraInset, bounds.fRight - umbraInset };
+ SkScalar xMid[4] = { bounds.fLeft + outerRadius, bounds.fRight - outerRadius,
+ bounds.fLeft + outerRadius, bounds.fRight - outerRadius };
+ SkScalar xOuter[4] = { bounds.fLeft, bounds.fRight,
+ bounds.fLeft, bounds.fRight };
+ SkScalar yInner[4] = { bounds.fTop + umbraInset, bounds.fTop + umbraInset,
+ bounds.fBottom - umbraInset, bounds.fBottom - umbraInset };
+ SkScalar yMid[4] = { bounds.fTop + outerRadius, bounds.fTop + outerRadius,
+ bounds.fBottom - outerRadius, bounds.fBottom - outerRadius };
+ SkScalar yOuter[4] = { bounds.fTop, bounds.fTop,
+ bounds.fBottom, bounds.fBottom };
+
+ SkScalar blurRadius = args.fBlurRadius;
+
+ // In the case where we have to inset more for the umbra, our two triangles in the
+ // corner get skewed to a diamond rather than a square. To correct for that,
+ // we also skew the vectors we send to the shader that help define the circle.
+ // By doing so, we end up with a quarter circle in the corner rather than the
+ // elliptical curve.
+
+ // This is a bit magical, but it gives us the correct results at extrema:
+ // a) umbraInset == outerRadius produces an orthogonal vector
+ // b) outerRadius == 0 produces a diagonal vector
+ // And visually the corner looks correct.
+ SkVector outerVec = SkVector::Make(outerRadius - umbraInset, -outerRadius - umbraInset);
+ outerVec.normalize();
+ // We want the circle edge to fall fractionally along the diagonal at
+ // (sqrt(2)*(umbraInset - outerRadius) + outerRadius)/sqrt(2)*umbraInset
+ //
+ // Setting the components of the diagonal offset to the following value will give us that.
+ SkScalar diagVal = umbraInset / (SK_ScalarSqrt2*(outerRadius - umbraInset) - outerRadius);
+ SkVector diagVec = SkVector::Make(diagVal, diagVal);
+ SkScalar distanceCorrection = umbraInset / blurRadius;
+
+ // build corner by corner
+ for (int i = 0; i < 4; ++i) {
+ // inner point
+ (*verts)->fPos = SkPoint::Make(xInner[i], yInner[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkVector::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ // outer points
+ (*verts)->fPos = SkPoint::Make(xOuter[i], yInner[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkVector::Make(0, -1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = SkPoint::Make(xOuter[i], yMid[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = outerVec;
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = SkPoint::Make(xOuter[i], yOuter[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = diagVec;
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = SkPoint::Make(xMid[i], yOuter[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = outerVec;
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ (*verts)->fPos = SkPoint::Make(xInner[i], yOuter[i]);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkVector::Make(0, -1);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+ }
+
+ // Add the additional vertices for overstroked rrects.
+ // Effectively this is an additional stroked rrect, with its
+ // parameters equal to those in the center of the 9-patch. This will
+ // give constant values across this inner ring.
+ if (kOverstroke_RRectType == args.fType) {
+ SkASSERT(args.fInnerRadius > 0.0f);
+
+ SkScalar inset = umbraInset + args.fInnerRadius;
+
+ // TL
+ (*verts)->fPos = SkPoint::Make(bounds.fLeft + inset, bounds.fTop + inset);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ // TR
+ (*verts)->fPos = SkPoint::Make(bounds.fRight - inset, bounds.fTop + inset);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ // BL
+ (*verts)->fPos = SkPoint::Make(bounds.fLeft + inset, bounds.fBottom - inset);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+
+ // BR
+ (*verts)->fPos = SkPoint::Make(bounds.fRight - inset, bounds.fBottom - inset);
+ (*verts)->fColor = color;
+ (*verts)->fOffset = SkPoint::Make(0, 0);
+ (*verts)->fDistanceCorrection = distanceCorrection;
+ (*verts)++;
+ }
+
+ }
+
+ void onPrepareDraws(Target* target) override {
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp = GrRRectShadowGeoProc::Make();
+
+ int instanceCount = fGeoData.count();
+ SkASSERT(sizeof(CircleVertex) == gp->vertexStride());
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+ CircleVertex* verts = (CircleVertex*)target->makeVertexSpace(
+ sizeof(CircleVertex), fVertCount, &vertexBuffer, &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ sk_sp<const GrBuffer> indexBuffer;
+ int firstIndex = 0;
+ uint16_t* indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ int currStartVertex = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ if (args.fIsCircle) {
+ bool isStroked = SkToBool(kStroke_RRectType == args.fType);
+ this->fillInCircleVerts(args, isStroked, &verts);
+
+ const uint16_t* primIndices = circle_type_to_indices(isStroked);
+ const int primIndexCount = circle_type_to_index_count(isStroked);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += circle_type_to_vert_count(isStroked);
+
+ } else {
+ this->fillInRRectVerts(args, &verts);
+
+ const uint16_t* primIndices = rrect_type_to_indices(args.fType);
+ const int primIndexCount = rrect_type_to_index_count(args.fType);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += rrect_type_to_vert_count(args.fType);
+ }
+ }
+
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ mesh->setIndexed(std::move(indexBuffer), fIndexCount, firstIndex, 0, fVertCount - 1,
+ GrPrimitiveRestart::kNo);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ this, chainBounds, GrProcessorSet::MakeEmptySet());
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ ShadowCircularRRectOp* that = t->cast<ShadowCircularRRectOp>();
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ fVertCount += that->fVertCount;
+ fIndexCount += that->fIndexCount;
+ return CombineResult::kMerged;
+ }
+
+ SkSTArray<1, Geometry, true> fGeoData;
+ int fVertCount;
+ int fIndexCount;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace GrShadowRRectOp {
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ SkScalar blurWidth,
+ SkScalar insetWidth) {
+ // Shadow rrect ops only handle simple circular rrects.
+ SkASSERT(viewMatrix.isSimilarity() && SkRRectPriv::EqualRadii(rrect));
+
+ // Do any matrix crunching before we reset the draw state for device coords.
+ const SkRect& rrectBounds = rrect.getBounds();
+ SkRect bounds;
+ viewMatrix.mapRect(&bounds, rrectBounds);
+
+ // Map radius and inset. As the matrix is a similarity matrix, this should be isotropic.
+ SkScalar radius = SkRRectPriv::GetSimpleRadii(rrect).fX;
+ SkScalar matrixFactor = viewMatrix[SkMatrix::kMScaleX] + viewMatrix[SkMatrix::kMSkewX];
+ SkScalar scaledRadius = SkScalarAbs(radius*matrixFactor);
+ SkScalar scaledInsetWidth = SkScalarAbs(insetWidth*matrixFactor);
+
+ if (scaledInsetWidth <= 0) {
+ return nullptr;
+ }
+
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<ShadowCircularRRectOp>(color, bounds,
+ scaledRadius,
+ rrect.isOval(),
+ blurWidth,
+ scaledInsetWidth);
+}
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(ShadowRRectOp) {
+ // create a similarity matrix
+ SkScalar rotate = random->nextSScalar1() * 360.f;
+ SkScalar translateX = random->nextSScalar1() * 1000.f;
+ SkScalar translateY = random->nextSScalar1() * 1000.f;
+ SkScalar scale;
+ do {
+ scale = random->nextSScalar1() * 100.f;
+ } while (scale == 0);
+ SkMatrix viewMatrix;
+ viewMatrix.setRotate(rotate);
+ viewMatrix.postTranslate(translateX, translateY);
+ viewMatrix.postScale(scale, scale);
+ SkScalar insetWidth = random->nextSScalar1() * 72.f;
+ SkScalar blurWidth = random->nextSScalar1() * 72.f;
+ bool isCircle = random->nextBool();
+ // This op doesn't use a full GrPaint, just a color.
+ GrColor color = paint.getColor4f().toBytes_RGBA();
+ if (isCircle) {
+ SkRect circle = GrTest::TestSquare(random);
+ SkRRect rrect = SkRRect::MakeOval(circle);
+ return GrShadowRRectOp::Make(context, color, viewMatrix, rrect, blurWidth, insetWidth);
+ } else {
+ SkRRect rrect;
+ do {
+ // This may return a rrect with elliptical corners, which we don't support.
+ rrect = GrTest::TestRRectSimple(random);
+ } while (!SkRRectPriv::IsSimpleCircular(rrect));
+ return GrShadowRRectOp::Make(context, color, viewMatrix, rrect, blurWidth, insetWidth);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.h b/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.h
new file mode 100644
index 0000000000..c4d313dd69
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrShadowRRectOp.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShadowRRectOp_DEFINED
+#define GrShadowRRectOp_DEFINED
+
+#include <memory>
+#include "src/gpu/GrColor.h"
+
+class GrDrawOp;
+class GrRecordingContext;
+
+class SkMatrix;
+class SkRRect;
+
+namespace GrShadowRRectOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext*,
+ GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRRect&,
+ SkScalar blurWidth,
+ SkScalar insetWidth);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp b/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
new file mode 100644
index 0000000000..3c10b8bfd4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrProcessorSet.h"
+#include "src/gpu/GrUserStencilSettings.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/geometry/GrRect.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+GrSimpleMeshDrawOpHelper::GrSimpleMeshDrawOpHelper(const MakeArgs& args, GrAAType aaType,
+ InputFlags inputFlags)
+ : fProcessors(args.fProcessorSet)
+ , fPipelineFlags((GrPipeline::InputFlags)inputFlags)
+ , fAAType((int)aaType)
+ , fUsesLocalCoords(false)
+ , fCompatibleWithCoverageAsAlpha(false) {
+ SkDEBUGCODE(fDidAnalysis = false);
+ SkDEBUGCODE(fMadePipeline = false);
+ if (GrAATypeIsHW(aaType)) {
+ fPipelineFlags |= GrPipeline::InputFlags::kHWAntialias;
+ }
+}
+
+GrSimpleMeshDrawOpHelper::~GrSimpleMeshDrawOpHelper() {
+ if (fProcessors) {
+ fProcessors->~GrProcessorSet();
+ }
+}
+
+GrDrawOp::FixedFunctionFlags GrSimpleMeshDrawOpHelper::fixedFunctionFlags() const {
+ return GrAATypeIsHW((this->aaType())) ? GrDrawOp::FixedFunctionFlags::kUsesHWAA
+ : GrDrawOp::FixedFunctionFlags::kNone;
+}
+
+static bool none_as_coverage_aa_compatible(GrAAType aa1, GrAAType aa2) {
+ return (aa1 == GrAAType::kNone && aa2 == GrAAType::kCoverage) ||
+ (aa1 == GrAAType::kCoverage && aa2 == GrAAType::kNone);
+}
+
+bool GrSimpleMeshDrawOpHelper::isCompatible(const GrSimpleMeshDrawOpHelper& that,
+ const GrCaps& caps, const SkRect& thisBounds,
+ const SkRect& thatBounds, bool noneAsCoverageAA) const {
+ if (SkToBool(fProcessors) != SkToBool(that.fProcessors)) {
+ return false;
+ }
+ if (fProcessors) {
+ if (*fProcessors != *that.fProcessors) {
+ return false;
+ }
+ }
+ bool result = fPipelineFlags == that.fPipelineFlags && (fAAType == that.fAAType ||
+ (noneAsCoverageAA && none_as_coverage_aa_compatible(this->aaType(), that.aaType())));
+ SkASSERT(!result || fCompatibleWithCoverageAsAlpha == that.fCompatibleWithCoverageAsAlpha);
+ SkASSERT(!result || fUsesLocalCoords == that.fUsesLocalCoords);
+ return result;
+}
+
+GrProcessorSet::Analysis GrSimpleMeshDrawOpHelper::finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType, GrProcessorAnalysisCoverage geometryCoverage,
+ SkPMColor4f* geometryColor, bool* wideColor) {
+ GrProcessorAnalysisColor color = *geometryColor;
+ auto result = this->finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, geometryCoverage, &color);
+ color.isConstant(geometryColor);
+ if (wideColor) {
+ *wideColor = SkPMColor4fNeedsWideColor(*geometryColor, clampType, caps);
+ }
+ return result;
+}
+
+GrProcessorSet::Analysis GrSimpleMeshDrawOpHelper::finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip* clip, const GrUserStencilSettings* userStencil,
+ bool hasMixedSampledCoverage, GrClampType clampType,
+ GrProcessorAnalysisCoverage geometryCoverage, GrProcessorAnalysisColor* geometryColor) {
+ SkDEBUGCODE(fDidAnalysis = true);
+ GrProcessorSet::Analysis analysis;
+ if (fProcessors) {
+ GrProcessorAnalysisCoverage coverage = geometryCoverage;
+ if (GrProcessorAnalysisCoverage::kNone == coverage) {
+ coverage = clip->numClipCoverageFragmentProcessors()
+ ? GrProcessorAnalysisCoverage::kSingleChannel
+ : GrProcessorAnalysisCoverage::kNone;
+ }
+ SkPMColor4f overrideColor;
+ analysis = fProcessors->finalize(*geometryColor, coverage, clip, userStencil,
+ hasMixedSampledCoverage, caps, clampType, &overrideColor);
+ if (analysis.inputColorIsOverridden()) {
+ *geometryColor = overrideColor;
+ }
+ } else {
+ analysis = GrProcessorSet::EmptySetAnalysis();
+ }
+ fUsesLocalCoords = analysis.usesLocalCoords();
+ fCompatibleWithCoverageAsAlpha = analysis.isCompatibleWithCoverageAsAlpha();
+ return analysis;
+}
+
+void GrSimpleMeshDrawOpHelper::executeDrawsAndUploads(
+ const GrOp* op, GrOpFlushState* flushState, const SkRect& chainBounds) {
+ if (fProcessors) {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ op, chainBounds, std::move(*fProcessors), fPipelineFlags);
+ } else {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ op, chainBounds, GrProcessorSet::MakeEmptySet(), fPipelineFlags);
+ }
+}
+
+#ifdef SK_DEBUG
+static void dump_pipeline_flags(GrPipeline::InputFlags flags, SkString* result) {
+ if (GrPipeline::InputFlags::kNone != flags) {
+ if (flags & GrPipeline::InputFlags::kSnapVerticesToPixelCenters) {
+ result->append("Snap vertices to pixel center.\n");
+ }
+ if (flags & GrPipeline::InputFlags::kHWAntialias) {
+ result->append("HW Antialiasing enabled.\n");
+ }
+ return;
+ }
+ result->append("No pipeline flags\n");
+}
+
+SkString GrSimpleMeshDrawOpHelper::dumpInfo() const {
+ const GrProcessorSet& processors = fProcessors ? *fProcessors : GrProcessorSet::EmptySet();
+ SkString result = processors.dumpProcessors();
+ result.append("AA Type: ");
+ switch (this->aaType()) {
+ case GrAAType::kNone:
+ result.append(" none\n");
+ break;
+ case GrAAType::kCoverage:
+ result.append(" coverage\n");
+ break;
+ case GrAAType::kMSAA:
+ result.append(" msaa\n");
+ break;
+ }
+ dump_pipeline_flags(fPipelineFlags, &result);
+ return result;
+}
+#endif
+
+GrSimpleMeshDrawOpHelperWithStencil::GrSimpleMeshDrawOpHelperWithStencil(
+ const MakeArgs& args, GrAAType aaType, const GrUserStencilSettings* stencilSettings,
+ InputFlags inputFlags)
+ : INHERITED(args, aaType, inputFlags)
+ , fStencilSettings(stencilSettings ? stencilSettings : &GrUserStencilSettings::kUnused) {}
+
+GrDrawOp::FixedFunctionFlags GrSimpleMeshDrawOpHelperWithStencil::fixedFunctionFlags() const {
+ GrDrawOp::FixedFunctionFlags flags = INHERITED::fixedFunctionFlags();
+ if (fStencilSettings != &GrUserStencilSettings::kUnused) {
+ flags |= GrDrawOp::FixedFunctionFlags::kUsesStencil;
+ }
+ return flags;
+}
+
+GrProcessorSet::Analysis GrSimpleMeshDrawOpHelperWithStencil::finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType, GrProcessorAnalysisCoverage geometryCoverage,
+ SkPMColor4f* geometryColor, bool* wideColor) {
+ GrProcessorAnalysisColor color = *geometryColor;
+ auto result = this->finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, geometryCoverage, &color);
+ color.isConstant(geometryColor);
+ if (wideColor) {
+ *wideColor = SkPMColor4fNeedsWideColor(*geometryColor, clampType, caps);
+ }
+ return result;
+}
+
+bool GrSimpleMeshDrawOpHelperWithStencil::isCompatible(
+ const GrSimpleMeshDrawOpHelperWithStencil& that, const GrCaps& caps,
+ const SkRect& thisBounds, const SkRect& thatBounds, bool noneAsCoverageAA) const {
+ return INHERITED::isCompatible(that, caps, thisBounds, thatBounds, noneAsCoverageAA) &&
+ fStencilSettings == that.fStencilSettings;
+}
+
+void GrSimpleMeshDrawOpHelperWithStencil::executeDrawsAndUploads(
+ const GrOp* op, GrOpFlushState* flushState, const SkRect& chainBounds) {
+ if (fProcessors) {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ op, chainBounds, std::move(*fProcessors), fPipelineFlags, fStencilSettings);
+ } else {
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ op, chainBounds, GrProcessorSet::MakeEmptySet(), fPipelineFlags, fStencilSettings);
+ }
+}
+
+#ifdef SK_DEBUG
+SkString GrSimpleMeshDrawOpHelperWithStencil::dumpInfo() const {
+ SkString result = INHERITED::dumpInfo();
+ result.appendf("Stencil settings: %s\n", (fStencilSettings ? "yes" : "no"));
+ return result;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.h b/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.h
new file mode 100644
index 0000000000..35bf3cc86f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrSimpleMeshDrawOpHelper.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSimpleMeshDrawOpHelper_DEFINED
+#define GrSimpleMeshDrawOpHelper_DEFINED
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include <new>
+
+struct SkRect;
+
+/**
+ * This class can be used to help implement simple mesh draw ops. It reduces the amount of
+ * boilerplate code to type and also provides a mechanism for optionally allocating space for a
+ * GrProcessorSet based on a GrPaint. It is intended to be used by ops that construct a single
+ * GrPipeline for a uniform primitive color and a GrPaint.
+ */
+class GrSimpleMeshDrawOpHelper {
+public:
+ struct MakeArgs;
+
+ /**
+ * This can be used by a Op class to perform allocation and initialization such that a
+ * GrProcessorSet (if required) is allocated as part of the the same allocation that as
+ * the Op instance. It requires that Op implements a constructor of the form:
+ * Op(MakeArgs, GrColor, OpArgs...)
+ * which is public or made accessible via 'friend'.
+ */
+ template <typename Op, typename... OpArgs>
+ static std::unique_ptr<GrDrawOp> FactoryHelper(GrRecordingContext*, GrPaint&&, OpArgs...);
+
+ // Here we allow callers to specify a subset of the GrPipeline::InputFlags upon creation.
+ enum class InputFlags : uint8_t {
+ kNone = 0,
+ kSnapVerticesToPixelCenters = (uint8_t)GrPipeline::InputFlags::kSnapVerticesToPixelCenters,
+ };
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(InputFlags);
+
+ GrSimpleMeshDrawOpHelper(const MakeArgs&, GrAAType, InputFlags = InputFlags::kNone);
+ ~GrSimpleMeshDrawOpHelper();
+
+ GrSimpleMeshDrawOpHelper() = delete;
+ GrSimpleMeshDrawOpHelper(const GrSimpleMeshDrawOpHelper&) = delete;
+ GrSimpleMeshDrawOpHelper& operator=(const GrSimpleMeshDrawOpHelper&) = delete;
+
+ GrDrawOp::FixedFunctionFlags fixedFunctionFlags() const;
+
+ // noneAACompatibleWithCoverage should be set to true if the op can properly render a non-AA
+ // primitive merged into a coverage-based op.
+ bool isCompatible(const GrSimpleMeshDrawOpHelper& that, const GrCaps&, const SkRect& thisBounds,
+ const SkRect& thatBounds, bool noneAACompatibleWithCoverage = false) const;
+
+ /**
+ * Finalizes the processor set and determines whether the destination must be provided
+ * to the fragment shader as a texture for blending.
+ *
+ * @param geometryCoverage Describes the coverage output of the op's geometry processor
+ * @param geometryColor An in/out param. As input this informs processor analysis about the
+ * color the op expects to output from its geometry processor. As output
+ * this may be set to a known color in which case the op must output this
+ * color from its geometry processor instead.
+ */
+ GrProcessorSet::Analysis finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType, GrProcessorAnalysisCoverage geometryCoverage,
+ GrProcessorAnalysisColor* geometryColor) {
+ return this->finalizeProcessors(
+ caps, clip, &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, clampType,
+ geometryCoverage, geometryColor);
+ }
+
+ /**
+ * Version of above that can be used by ops that have a constant color geometry processor
+ * output. The op passes this color as 'geometryColor' and after return if 'geometryColor' has
+ * changed the op must override its geometry processor color output with the new color.
+ */
+ GrProcessorSet::Analysis finalizeProcessors(
+ const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType,
+ GrProcessorAnalysisCoverage geometryCoverage, SkPMColor4f* geometryColor,
+ bool* wideColor);
+
+ bool isTrivial() const {
+ return fProcessors == nullptr;
+ }
+
+ bool usesLocalCoords() const {
+ SkASSERT(fDidAnalysis);
+ return fUsesLocalCoords;
+ }
+
+ bool compatibleWithCoverageAsAlpha() const { return fCompatibleWithCoverageAsAlpha; }
+
+ struct MakeArgs {
+ private:
+ MakeArgs() = default;
+
+ GrProcessorSet* fProcessorSet;
+
+ friend class GrSimpleMeshDrawOpHelper;
+ };
+
+ void visitProxies(const GrOp::VisitProxyFunc& func) const {
+ if (fProcessors) {
+ fProcessors->visitProxies(func);
+ }
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const;
+#endif
+ GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
+
+ void setAAType(GrAAType aaType) {
+ fAAType = static_cast<unsigned>(aaType);
+ }
+
+ void executeDrawsAndUploads(const GrOp*, GrOpFlushState*, const SkRect& chainBounds);
+
+protected:
+ GrPipeline::InputFlags pipelineFlags() const { return fPipelineFlags; }
+
+ GrProcessorSet::Analysis finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip*, const GrUserStencilSettings*,
+ bool hasMixedSampledCoverage, GrClampType, GrProcessorAnalysisCoverage geometryCoverage,
+ GrProcessorAnalysisColor* geometryColor);
+
+ GrProcessorSet* fProcessors;
+ GrPipeline::InputFlags fPipelineFlags;
+ unsigned fAAType : 2;
+ unsigned fUsesLocalCoords : 1;
+ unsigned fCompatibleWithCoverageAsAlpha : 1;
+ SkDEBUGCODE(unsigned fMadePipeline : 1;)
+ SkDEBUGCODE(unsigned fDidAnalysis : 1;)
+};
+
+/**
+ * This class extends GrSimpleMeshDrawOpHelper to support an optional GrUserStencilSettings. This
+ * uses private inheritance because it non-virtually overrides methods in the base class and should
+ * never be used with a GrSimpleMeshDrawOpHelper pointer or reference.
+ */
+class GrSimpleMeshDrawOpHelperWithStencil : private GrSimpleMeshDrawOpHelper {
+public:
+ using MakeArgs = GrSimpleMeshDrawOpHelper::MakeArgs;
+ using InputFlags = GrSimpleMeshDrawOpHelper::InputFlags;
+
+ using GrSimpleMeshDrawOpHelper::visitProxies;
+
+ // using declarations can't be templated, so this is a pass through function instead.
+ template <typename Op, typename... OpArgs>
+ static std::unique_ptr<GrDrawOp> FactoryHelper(GrRecordingContext* context, GrPaint&& paint,
+ OpArgs... opArgs) {
+ return GrSimpleMeshDrawOpHelper::FactoryHelper<Op, OpArgs...>(
+ context, std::move(paint), std::forward<OpArgs>(opArgs)...);
+ }
+
+ GrSimpleMeshDrawOpHelperWithStencil(const MakeArgs&, GrAAType, const GrUserStencilSettings*,
+ InputFlags = InputFlags::kNone);
+
+ GrDrawOp::FixedFunctionFlags fixedFunctionFlags() const;
+
+ GrProcessorSet::Analysis finalizeProcessors(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType, GrProcessorAnalysisCoverage geometryCoverage,
+ GrProcessorAnalysisColor* geometryColor) {
+ return this->INHERITED::finalizeProcessors(
+ caps, clip, fStencilSettings, hasMixedSampledCoverage, clampType, geometryCoverage,
+ geometryColor);
+ }
+
+ GrProcessorSet::Analysis finalizeProcessors(
+ const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType,
+ GrProcessorAnalysisCoverage geometryCoverage, SkPMColor4f* geometryColor, bool*
+ wideColor);
+
+ using GrSimpleMeshDrawOpHelper::aaType;
+ using GrSimpleMeshDrawOpHelper::setAAType;
+ using GrSimpleMeshDrawOpHelper::isTrivial;
+ using GrSimpleMeshDrawOpHelper::usesLocalCoords;
+ using GrSimpleMeshDrawOpHelper::compatibleWithCoverageAsAlpha;
+
+ bool isCompatible(const GrSimpleMeshDrawOpHelperWithStencil& that, const GrCaps&,
+ const SkRect& thisBounds, const SkRect& thatBounds,
+ bool noneAACompatibleWithCoverage = false) const;
+
+ void executeDrawsAndUploads(const GrOp*, GrOpFlushState*, const SkRect& chainBounds);
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const;
+#endif
+
+private:
+ const GrUserStencilSettings* fStencilSettings;
+ typedef GrSimpleMeshDrawOpHelper INHERITED;
+};
+
+template <typename Op, typename... OpArgs>
+std::unique_ptr<GrDrawOp> GrSimpleMeshDrawOpHelper::FactoryHelper(GrRecordingContext* context,
+ GrPaint&& paint,
+ OpArgs... opArgs) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ MakeArgs makeArgs;
+
+ if (paint.isTrivial()) {
+ makeArgs.fProcessorSet = nullptr;
+ return pool->allocate<Op>(makeArgs, paint.getColor4f(), std::forward<OpArgs>(opArgs)...);
+ } else {
+ char* mem = (char*) pool->allocate(sizeof(Op) + sizeof(GrProcessorSet));
+ char* setMem = mem + sizeof(Op);
+ auto color = paint.getColor4f();
+ makeArgs.fProcessorSet = new (setMem) GrProcessorSet(std::move(paint));
+ return std::unique_ptr<GrDrawOp>(new (mem) Op(makeArgs, color,
+ std::forward<OpArgs>(opArgs)...));
+ }
+}
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrSimpleMeshDrawOpHelper::InputFlags)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.cpp
new file mode 100644
index 0000000000..b65cb423ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.cpp
@@ -0,0 +1,999 @@
+/*
+ * Copyright 2014 Google Inc.
+ * Copyright 2017 ARM Ltd.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrSmallPathRenderer.h"
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrBuffer.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDistanceFieldGenFromVector.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/effects/GrBitmapTextGeoProc.h"
+#include "src/gpu/effects/GrDistanceFieldGeoProc.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+#define ATLAS_TEXTURE_WIDTH 2048
+#define ATLAS_TEXTURE_HEIGHT 2048
+#define PLOT_WIDTH 512
+#define PLOT_HEIGHT 256
+
+#define NUM_PLOTS_X (ATLAS_TEXTURE_WIDTH / PLOT_WIDTH)
+#define NUM_PLOTS_Y (ATLAS_TEXTURE_HEIGHT / PLOT_HEIGHT)
+
+#ifdef DF_PATH_TRACKING
+static int g_NumCachedShapes = 0;
+static int g_NumFreedShapes = 0;
+#endif
+
+// mip levels
+static const SkScalar kIdealMinMIP = 12;
+static const SkScalar kMaxMIP = 162;
+
+static const SkScalar kMaxDim = 73;
+static const SkScalar kMinSize = SK_ScalarHalf;
+static const SkScalar kMaxSize = 2*kMaxMIP;
+
+class ShapeDataKey {
+public:
+ ShapeDataKey() {}
+ ShapeDataKey(const ShapeDataKey& that) { *this = that; }
+ ShapeDataKey(const GrShape& shape, uint32_t dim) { this->set(shape, dim); }
+ ShapeDataKey(const GrShape& shape, const SkMatrix& ctm) { this->set(shape, ctm); }
+
+ ShapeDataKey& operator=(const ShapeDataKey& that) {
+ fKey.reset(that.fKey.count());
+ memcpy(fKey.get(), that.fKey.get(), fKey.count() * sizeof(uint32_t));
+ return *this;
+ }
+
+ // for SDF paths
+ void set(const GrShape& shape, uint32_t dim) {
+ // Shapes' keys are for their pre-style geometry, but by now we shouldn't have any
+ // relevant styling information.
+ SkASSERT(shape.style().isSimpleFill());
+ SkASSERT(shape.hasUnstyledKey());
+ int shapeKeySize = shape.unstyledKeySize();
+ fKey.reset(1 + shapeKeySize);
+ fKey[0] = dim;
+ shape.writeUnstyledKey(&fKey[1]);
+ }
+
+ // for bitmap paths
+ void set(const GrShape& shape, const SkMatrix& ctm) {
+ // Shapes' keys are for their pre-style geometry, but by now we shouldn't have any
+ // relevant styling information.
+ SkASSERT(shape.style().isSimpleFill());
+ SkASSERT(shape.hasUnstyledKey());
+ // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
+ SkScalar sx = ctm.get(SkMatrix::kMScaleX);
+ SkScalar sy = ctm.get(SkMatrix::kMScaleY);
+ SkScalar kx = ctm.get(SkMatrix::kMSkewX);
+ SkScalar ky = ctm.get(SkMatrix::kMSkewY);
+ SkScalar tx = ctm.get(SkMatrix::kMTransX);
+ SkScalar ty = ctm.get(SkMatrix::kMTransY);
+ // Allow 8 bits each in x and y of subpixel positioning.
+ tx -= SkScalarFloorToScalar(tx);
+ ty -= SkScalarFloorToScalar(ty);
+ SkFixed fracX = SkScalarToFixed(tx) & 0x0000FF00;
+ SkFixed fracY = SkScalarToFixed(ty) & 0x0000FF00;
+ int shapeKeySize = shape.unstyledKeySize();
+ fKey.reset(5 + shapeKeySize);
+ fKey[0] = SkFloat2Bits(sx);
+ fKey[1] = SkFloat2Bits(sy);
+ fKey[2] = SkFloat2Bits(kx);
+ fKey[3] = SkFloat2Bits(ky);
+ fKey[4] = fracX | (fracY >> 8);
+ shape.writeUnstyledKey(&fKey[5]);
+ }
+
+ bool operator==(const ShapeDataKey& that) const {
+ return fKey.count() == that.fKey.count() &&
+ 0 == memcmp(fKey.get(), that.fKey.get(), sizeof(uint32_t) * fKey.count());
+ }
+
+ int count32() const { return fKey.count(); }
+ const uint32_t* data() const { return fKey.get(); }
+
+private:
+ // The key is composed of the GrShape's key, and either the dimensions of the DF
+ // generated for the path (32x32 max, 64x64 max, 128x128 max) if an SDF image or
+ // the matrix for the path with only fractional translation.
+ SkAutoSTArray<24, uint32_t> fKey;
+};
+
+class ShapeData {
+public:
+ ShapeDataKey fKey;
+ GrDrawOpAtlas::AtlasID fID;
+ SkRect fBounds;
+ GrIRect16 fTextureCoords;
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(ShapeData);
+
+ static inline const ShapeDataKey& GetKey(const ShapeData& data) {
+ return data.fKey;
+ }
+
+ static inline uint32_t Hash(const ShapeDataKey& key) {
+ return SkOpts::hash(key.data(), sizeof(uint32_t) * key.count32());
+ }
+};
+
+
+
+// Callback to clear out internal path cache when eviction occurs
+void GrSmallPathRenderer::HandleEviction(GrDrawOpAtlas::AtlasID id, void* pr) {
+ GrSmallPathRenderer* dfpr = (GrSmallPathRenderer*)pr;
+ // remove any paths that use this plot
+ ShapeDataList::Iter iter;
+ iter.init(dfpr->fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ if (id == shapeData->fID) {
+ dfpr->fShapeCache.remove(shapeData->fKey);
+ dfpr->fShapeList.remove(shapeData);
+ delete shapeData;
+#ifdef DF_PATH_TRACKING
+ ++g_NumFreedPaths;
+#endif
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrSmallPathRenderer::GrSmallPathRenderer() : fAtlas(nullptr) {}
+
+GrSmallPathRenderer::~GrSmallPathRenderer() {
+ ShapeDataList::Iter iter;
+ iter.init(fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ delete shapeData;
+ }
+
+#ifdef DF_PATH_TRACKING
+ SkDebugf("Cached shapes: %d, freed shapes: %d\n", g_NumCachedShapes, g_NumFreedShapes);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrPathRenderer::CanDrawPath GrSmallPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fCaps->shaderCaps()->shaderDerivativeSupport()) {
+ return CanDrawPath::kNo;
+ }
+ // If the shape has no key then we won't get any reuse.
+ if (!args.fShape->hasUnstyledKey()) {
+ return CanDrawPath::kNo;
+ }
+ // This only supports filled paths, however, the caller may apply the style to make a filled
+ // path and try again.
+ if (!args.fShape->style().isSimpleFill()) {
+ return CanDrawPath::kNo;
+ }
+ // This does non-inverse coverage-based antialiased fills.
+ if (GrAAType::kCoverage != args.fAAType) {
+ return CanDrawPath::kNo;
+ }
+ // TODO: Support inverse fill
+ if (args.fShape->inverseFilled()) {
+ return CanDrawPath::kNo;
+ }
+
+ // Only support paths with bounds within kMaxDim by kMaxDim,
+ // scaled to have bounds within kMaxSize by kMaxSize.
+ // The goal is to accelerate rendering of lots of small paths that may be scaling.
+ SkScalar scaleFactors[2] = { 1, 1 };
+ if (!args.fViewMatrix->hasPerspective() && !args.fViewMatrix->getMinMaxScales(scaleFactors)) {
+ return CanDrawPath::kNo;
+ }
+ SkRect bounds = args.fShape->styledBounds();
+ SkScalar minDim = SkMinScalar(bounds.width(), bounds.height());
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ SkScalar minSize = minDim * SkScalarAbs(scaleFactors[0]);
+ SkScalar maxSize = maxDim * SkScalarAbs(scaleFactors[1]);
+ if (maxDim > kMaxDim || kMinSize > minSize || maxSize > kMaxSize) {
+ return CanDrawPath::kNo;
+ }
+
+ return CanDrawPath::kYes;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// padding around path bounds to allow for antialiased pixels
+static const SkScalar kAntiAliasPad = 1.0f;
+
+class GrSmallPathRenderer::SmallPathOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ using ShapeCache = SkTDynamicHash<ShapeData, ShapeDataKey>;
+ using ShapeDataList = GrSmallPathRenderer::ShapeDataList;
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ GrDrawOpAtlas* atlas,
+ ShapeCache* shapeCache,
+ ShapeDataList* shapeList,
+ bool gammaCorrect,
+ const GrUserStencilSettings* stencilSettings) {
+ return Helper::FactoryHelper<SmallPathOp>(context, std::move(paint), shape, viewMatrix,
+ atlas, shapeCache, shapeList, gammaCorrect,
+ stencilSettings);
+ }
+
+ SmallPathOp(Helper::MakeArgs helperArgs, const SkPMColor4f& color, const GrShape& shape,
+ const SkMatrix& viewMatrix, GrDrawOpAtlas* atlas, ShapeCache* shapeCache,
+ ShapeDataList* shapeList, bool gammaCorrect,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) {
+ SkASSERT(shape.hasUnstyledKey());
+ // Compute bounds
+ this->setTransformedBounds(shape.bounds(), viewMatrix, HasAABloat::kYes, IsHairline::kNo);
+
+#if defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ fUsesDistanceField = true;
+#else
+ // only use distance fields on desktop and Android framework to save space in the atlas
+ fUsesDistanceField = this->bounds().width() > kMaxMIP || this->bounds().height() > kMaxMIP;
+#endif
+ // always use distance fields if in perspective
+ fUsesDistanceField = fUsesDistanceField || viewMatrix.hasPerspective();
+
+ fShapes.emplace_back(Entry{color, shape, viewMatrix});
+
+ fAtlas = atlas;
+ fShapeCache = shapeCache;
+ fShapeList = shapeList;
+ fGammaCorrect = gammaCorrect;
+ }
+
+ const char* name() const override { return "SmallPathOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+
+ const sk_sp<GrTextureProxy>* proxies = fAtlas->getProxies();
+ for (uint32_t i = 0; i < fAtlas->numActivePages(); ++i) {
+ SkASSERT(proxies[i]);
+ func(proxies[i].get(), GrMipMapped::kNo);
+ }
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (const auto& geo : fShapes) {
+ string.appendf("Color: 0x%08x\n", geo.fColor.toBytes_RGBA());
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, &fShapes.front().fColor, &fWideColor);
+ }
+
+private:
+ struct FlushInfo {
+ sk_sp<const GrBuffer> fVertexBuffer;
+ sk_sp<const GrBuffer> fIndexBuffer;
+ sk_sp<GrGeometryProcessor> fGeometryProcessor;
+ GrPipeline::FixedDynamicState* fFixedDynamicState;
+ int fVertexOffset;
+ int fInstancesToFlush;
+ };
+
+ void onPrepareDraws(Target* target) override {
+ int instanceCount = fShapes.count();
+
+ static constexpr int kMaxTextures = GrDistanceFieldPathGeoProc::kMaxTextures;
+ GR_STATIC_ASSERT(GrBitmapTextGeoProc::kMaxTextures == kMaxTextures);
+
+ FlushInfo flushInfo;
+ flushInfo.fFixedDynamicState = target->makeFixedDynamicState(kMaxTextures);
+ int numActiveProxies = fAtlas->numActivePages();
+ const auto proxies = fAtlas->getProxies();
+ for (int i = 0; i < numActiveProxies; ++i) {
+ // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // proxies don't get added during the visitProxies call. Thus we add them here.
+ flushInfo.fFixedDynamicState->fPrimitiveProcessorTextures[i] = proxies[i].get();
+ target->sampledProxyArray()->push_back(proxies[i].get());
+ }
+
+ // Setup GrGeometryProcessor
+ const SkMatrix& ctm = fShapes[0].fViewMatrix;
+ if (fUsesDistanceField) {
+ uint32_t flags = 0;
+ // Still need to key off of ctm to pick the right shader for the transformed quad
+ flags |= ctm.isScaleTranslate() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ flags |= ctm.isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ flags |= fGammaCorrect ? kGammaCorrect_DistanceFieldEffectFlag : 0;
+
+ const SkMatrix* matrix;
+ SkMatrix invert;
+ if (ctm.hasPerspective()) {
+ matrix = &ctm;
+ } else if (fHelper.usesLocalCoords()) {
+ if (!ctm.invert(&invert)) {
+ return;
+ }
+ matrix = &invert;
+ } else {
+ matrix = &SkMatrix::I();
+ }
+ flushInfo.fGeometryProcessor = GrDistanceFieldPathGeoProc::Make(
+ *target->caps().shaderCaps(), *matrix, fWideColor, fAtlas->getProxies(),
+ fAtlas->numActivePages(), GrSamplerState::ClampBilerp(), flags);
+ } else {
+ SkMatrix invert;
+ if (fHelper.usesLocalCoords()) {
+ if (!ctm.invert(&invert)) {
+ return;
+ }
+ }
+
+ flushInfo.fGeometryProcessor = GrBitmapTextGeoProc::Make(
+ *target->caps().shaderCaps(), this->color(), fWideColor, fAtlas->getProxies(),
+ fAtlas->numActivePages(), GrSamplerState::ClampNearest(), kA8_GrMaskFormat,
+ invert, false);
+ }
+
+ // allocate vertices
+ const size_t kVertexStride = flushInfo.fGeometryProcessor->vertexStride();
+
+ // We need to make sure we don't overflow a 32 bit int when we request space in the
+ // makeVertexSpace call below.
+ if (instanceCount > SK_MaxS32 / kVerticesPerQuad) {
+ return;
+ }
+ GrVertexWriter vertices{target->makeVertexSpace(kVertexStride,
+ kVerticesPerQuad * instanceCount,
+ &flushInfo.fVertexBuffer,
+ &flushInfo.fVertexOffset)};
+ flushInfo.fIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
+ if (!vertices.fPtr || !flushInfo.fIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ flushInfo.fInstancesToFlush = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Entry& args = fShapes[i];
+
+ ShapeData* shapeData;
+ if (fUsesDistanceField) {
+ // get mip level
+ SkScalar maxScale;
+ const SkRect& bounds = args.fShape.bounds();
+ if (args.fViewMatrix.hasPerspective()) {
+ // approximate the scale since we can't get it from the matrix
+ SkRect xformedBounds;
+ args.fViewMatrix.mapRect(&xformedBounds, bounds);
+ maxScale = SkScalarAbs(SkTMax(xformedBounds.width() / bounds.width(),
+ xformedBounds.height() / bounds.height()));
+ } else {
+ maxScale = SkScalarAbs(args.fViewMatrix.getMaxScale());
+ }
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ // We try to create the DF at a 2^n scaled path resolution (1/2, 1, 2, 4, etc.)
+ // In the majority of cases this will yield a crisper rendering.
+ SkScalar mipScale = 1.0f;
+ // Our mipscale is the maxScale clamped to the next highest power of 2
+ if (maxScale <= SK_ScalarHalf) {
+ SkScalar log = SkScalarFloorToScalar(SkScalarLog2(SkScalarInvert(maxScale)));
+ mipScale = SkScalarPow(2, -log);
+ } else if (maxScale > SK_Scalar1) {
+ SkScalar log = SkScalarCeilToScalar(SkScalarLog2(maxScale));
+ mipScale = SkScalarPow(2, log);
+ }
+ SkASSERT(maxScale <= mipScale);
+
+ SkScalar mipSize = mipScale*SkScalarAbs(maxDim);
+ // For sizes less than kIdealMinMIP we want to use as large a distance field as we can
+ // so we can preserve as much detail as possible. However, we can't scale down more
+ // than a 1/4 of the size without artifacts. So the idea is that we pick the mipsize
+ // just bigger than the ideal, and then scale down until we are no more than 4x the
+ // original mipsize.
+ if (mipSize < kIdealMinMIP) {
+ SkScalar newMipSize = mipSize;
+ do {
+ newMipSize *= 2;
+ } while (newMipSize < kIdealMinMIP);
+ while (newMipSize > 4 * mipSize) {
+ newMipSize *= 0.25f;
+ }
+ mipSize = newMipSize;
+ }
+ SkScalar desiredDimension = SkTMin(mipSize, kMaxMIP);
+
+ // check to see if df path is cached
+ ShapeDataKey key(args.fShape, SkScalarCeilToInt(desiredDimension));
+ shapeData = fShapeCache->find(key);
+ if (nullptr == shapeData || !fAtlas->hasID(shapeData->fID)) {
+ // Remove the stale cache entry
+ if (shapeData) {
+ fShapeCache->remove(shapeData->fKey);
+ fShapeList->remove(shapeData);
+ delete shapeData;
+ }
+ SkScalar scale = desiredDimension / maxDim;
+
+ shapeData = new ShapeData;
+ if (!this->addDFPathToAtlas(target,
+ &flushInfo,
+ fAtlas,
+ shapeData,
+ args.fShape,
+ SkScalarCeilToInt(desiredDimension),
+ scale)) {
+ delete shapeData;
+ continue;
+ }
+ }
+ } else {
+ // check to see if bitmap path is cached
+ ShapeDataKey key(args.fShape, args.fViewMatrix);
+ shapeData = fShapeCache->find(key);
+ if (nullptr == shapeData || !fAtlas->hasID(shapeData->fID)) {
+ // Remove the stale cache entry
+ if (shapeData) {
+ fShapeCache->remove(shapeData->fKey);
+ fShapeList->remove(shapeData);
+ delete shapeData;
+ }
+
+ shapeData = new ShapeData;
+ if (!this->addBMPathToAtlas(target,
+ &flushInfo,
+ fAtlas,
+ shapeData,
+ args.fShape,
+ args.fViewMatrix)) {
+ delete shapeData;
+ continue;
+ }
+ }
+ }
+
+ auto uploadTarget = target->deferredUploadTarget();
+ fAtlas->setLastUseToken(shapeData->fID, uploadTarget->tokenTracker()->nextDrawToken());
+
+ this->writePathVertices(fAtlas, vertices, GrVertexColor(args.fColor, fWideColor),
+ args.fViewMatrix, shapeData);
+ flushInfo.fInstancesToFlush++;
+ }
+
+ this->flush(target, &flushInfo);
+ }
+
+ bool addToAtlas(GrMeshDrawOp::Target* target, FlushInfo* flushInfo, GrDrawOpAtlas* atlas,
+ int width, int height, const void* image,
+ GrDrawOpAtlas::AtlasID* id, SkIPoint16* atlasLocation) const {
+ auto resourceProvider = target->resourceProvider();
+ auto uploadTarget = target->deferredUploadTarget();
+
+ GrDrawOpAtlas::ErrorCode code = atlas->addToAtlas(resourceProvider, id,
+ uploadTarget, width, height,
+ image, atlasLocation);
+ if (GrDrawOpAtlas::ErrorCode::kError == code) {
+ return false;
+ }
+
+ if (GrDrawOpAtlas::ErrorCode::kTryAgain == code) {
+ this->flush(target, flushInfo);
+
+ code = atlas->addToAtlas(resourceProvider, id, uploadTarget, width, height,
+ image, atlasLocation);
+ }
+
+ return GrDrawOpAtlas::ErrorCode::kSucceeded == code;
+ }
+
+ bool addDFPathToAtlas(GrMeshDrawOp::Target* target, FlushInfo* flushInfo,
+ GrDrawOpAtlas* atlas, ShapeData* shapeData, const GrShape& shape,
+ uint32_t dimension, SkScalar scale) const {
+
+ const SkRect& bounds = shape.bounds();
+
+ // generate bounding rect for bitmap draw
+ SkRect scaledBounds = bounds;
+ // scale to mip level size
+ scaledBounds.fLeft *= scale;
+ scaledBounds.fTop *= scale;
+ scaledBounds.fRight *= scale;
+ scaledBounds.fBottom *= scale;
+ // subtract out integer portion of origin
+ // (SDF created will be placed with fractional offset burnt in)
+ SkScalar dx = SkScalarFloorToScalar(scaledBounds.fLeft);
+ SkScalar dy = SkScalarFloorToScalar(scaledBounds.fTop);
+ scaledBounds.offset(-dx, -dy);
+ // get integer boundary
+ SkIRect devPathBounds;
+ scaledBounds.roundOut(&devPathBounds);
+ // pad to allow room for antialiasing
+ const int intPad = SkScalarCeilToInt(kAntiAliasPad);
+ // place devBounds at origin
+ int width = devPathBounds.width() + 2*intPad;
+ int height = devPathBounds.height() + 2*intPad;
+ devPathBounds = SkIRect::MakeWH(width, height);
+ SkScalar translateX = intPad - dx;
+ SkScalar translateY = intPad - dy;
+
+ // draw path to bitmap
+ SkMatrix drawMatrix;
+ drawMatrix.setScale(scale, scale);
+ drawMatrix.postTranslate(translateX, translateY);
+
+ SkASSERT(devPathBounds.fLeft == 0);
+ SkASSERT(devPathBounds.fTop == 0);
+ SkASSERT(devPathBounds.width() > 0);
+ SkASSERT(devPathBounds.height() > 0);
+
+ // setup signed distance field storage
+ SkIRect dfBounds = devPathBounds.makeOutset(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ width = dfBounds.width();
+ height = dfBounds.height();
+ // TODO We should really generate this directly into the plot somehow
+ SkAutoSMalloc<1024> dfStorage(width * height * sizeof(unsigned char));
+
+ SkPath path;
+ shape.asPath(&path);
+#ifndef SK_USE_LEGACY_DISTANCE_FIELDS
+ // Generate signed distance field directly from SkPath
+ bool succeed = GrGenerateDistanceFieldFromPath((unsigned char*)dfStorage.get(),
+ path, drawMatrix,
+ width, height, width * sizeof(unsigned char));
+ if (!succeed) {
+#endif
+ // setup bitmap backing
+ SkAutoPixmapStorage dst;
+ if (!dst.tryAlloc(SkImageInfo::MakeA8(devPathBounds.width(),
+ devPathBounds.height()))) {
+ return false;
+ }
+ sk_bzero(dst.writable_addr(), dst.computeByteSize());
+
+ // rasterize path
+ SkPaint paint;
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setAntiAlias(true);
+
+ SkDraw draw;
+
+ SkRasterClip rasterClip;
+ rasterClip.setRect(devPathBounds);
+ draw.fRC = &rasterClip;
+ draw.fMatrix = &drawMatrix;
+ draw.fDst = dst;
+
+ draw.drawPathCoverage(path, paint);
+
+ // Generate signed distance field
+ SkGenerateDistanceFieldFromA8Image((unsigned char*)dfStorage.get(),
+ (const unsigned char*)dst.addr(),
+ dst.width(), dst.height(), dst.rowBytes());
+#ifndef SK_USE_LEGACY_DISTANCE_FIELDS
+ }
+#endif
+
+ // add to atlas
+ SkIPoint16 atlasLocation;
+ GrDrawOpAtlas::AtlasID id;
+
+ if (!this->addToAtlas(target, flushInfo, atlas,
+ width, height, dfStorage.get(), &id, &atlasLocation)) {
+ return false;
+ }
+
+ // add to cache
+ shapeData->fKey.set(shape, dimension);
+ shapeData->fID = id;
+
+ shapeData->fBounds = SkRect::Make(devPathBounds);
+ shapeData->fBounds.offset(-translateX, -translateY);
+ shapeData->fBounds.fLeft /= scale;
+ shapeData->fBounds.fTop /= scale;
+ shapeData->fBounds.fRight /= scale;
+ shapeData->fBounds.fBottom /= scale;
+
+ // We pack the 2bit page index in the low bit of the u and v texture coords
+ uint16_t pageIndex = GrDrawOpAtlas::GetPageIndexFromID(id);
+ SkASSERT(pageIndex < 4);
+ uint16_t uBit = (pageIndex >> 1) & 0x1;
+ uint16_t vBit = pageIndex & 0x1;
+ shapeData->fTextureCoords.set((atlasLocation.fX+SK_DistanceFieldPad) << 1 | uBit,
+ (atlasLocation.fY+SK_DistanceFieldPad) << 1 | vBit,
+ (atlasLocation.fX+SK_DistanceFieldPad+
+ devPathBounds.width()) << 1 | uBit,
+ (atlasLocation.fY+SK_DistanceFieldPad+
+ devPathBounds.height()) << 1 | vBit);
+
+ fShapeCache->add(shapeData);
+ fShapeList->addToTail(shapeData);
+#ifdef DF_PATH_TRACKING
+ ++g_NumCachedPaths;
+#endif
+ return true;
+ }
+
+ bool addBMPathToAtlas(GrMeshDrawOp::Target* target, FlushInfo* flushInfo,
+ GrDrawOpAtlas* atlas, ShapeData* shapeData, const GrShape& shape,
+ const SkMatrix& ctm) const {
+ const SkRect& bounds = shape.bounds();
+ if (bounds.isEmpty()) {
+ return false;
+ }
+ SkMatrix drawMatrix(ctm);
+ SkScalar tx = ctm.getTranslateX();
+ SkScalar ty = ctm.getTranslateY();
+ tx -= SkScalarFloorToScalar(tx);
+ ty -= SkScalarFloorToScalar(ty);
+ drawMatrix.set(SkMatrix::kMTransX, tx);
+ drawMatrix.set(SkMatrix::kMTransY, ty);
+ SkRect shapeDevBounds;
+ drawMatrix.mapRect(&shapeDevBounds, bounds);
+ SkScalar dx = SkScalarFloorToScalar(shapeDevBounds.fLeft);
+ SkScalar dy = SkScalarFloorToScalar(shapeDevBounds.fTop);
+
+ // get integer boundary
+ SkIRect devPathBounds;
+ shapeDevBounds.roundOut(&devPathBounds);
+ // pad to allow room for antialiasing
+ const int intPad = SkScalarCeilToInt(kAntiAliasPad);
+ // place devBounds at origin
+ int width = devPathBounds.width() + 2 * intPad;
+ int height = devPathBounds.height() + 2 * intPad;
+ devPathBounds = SkIRect::MakeWH(width, height);
+ SkScalar translateX = intPad - dx;
+ SkScalar translateY = intPad - dy;
+
+ SkASSERT(devPathBounds.fLeft == 0);
+ SkASSERT(devPathBounds.fTop == 0);
+ SkASSERT(devPathBounds.width() > 0);
+ SkASSERT(devPathBounds.height() > 0);
+
+ SkPath path;
+ shape.asPath(&path);
+ // setup bitmap backing
+ SkAutoPixmapStorage dst;
+ if (!dst.tryAlloc(SkImageInfo::MakeA8(devPathBounds.width(),
+ devPathBounds.height()))) {
+ return false;
+ }
+ sk_bzero(dst.writable_addr(), dst.computeByteSize());
+
+ // rasterize path
+ SkPaint paint;
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setAntiAlias(true);
+
+ SkDraw draw;
+
+ SkRasterClip rasterClip;
+ rasterClip.setRect(devPathBounds);
+ draw.fRC = &rasterClip;
+ drawMatrix.postTranslate(translateX, translateY);
+ draw.fMatrix = &drawMatrix;
+ draw.fDst = dst;
+
+ draw.drawPathCoverage(path, paint);
+
+ // add to atlas
+ SkIPoint16 atlasLocation;
+ GrDrawOpAtlas::AtlasID id;
+
+ if (!this->addToAtlas(target, flushInfo, atlas,
+ dst.width(), dst.height(), dst.addr(), &id, &atlasLocation)) {
+ return false;
+ }
+
+ // add to cache
+ shapeData->fKey.set(shape, ctm);
+ shapeData->fID = id;
+
+ shapeData->fBounds = SkRect::Make(devPathBounds);
+ shapeData->fBounds.offset(-translateX, -translateY);
+
+ // We pack the 2bit page index in the low bit of the u and v texture coords
+ uint16_t pageIndex = GrDrawOpAtlas::GetPageIndexFromID(id);
+ SkASSERT(pageIndex < 4);
+ uint16_t uBit = (pageIndex >> 1) & 0x1;
+ uint16_t vBit = pageIndex & 0x1;
+ shapeData->fTextureCoords.set(atlasLocation.fX << 1 | uBit, atlasLocation.fY << 1 | vBit,
+ (atlasLocation.fX+width) << 1 | uBit,
+ (atlasLocation.fY+height) << 1 | vBit);
+
+ fShapeCache->add(shapeData);
+ fShapeList->addToTail(shapeData);
+#ifdef DF_PATH_TRACKING
+ ++g_NumCachedPaths;
+#endif
+ return true;
+ }
+
+ void writePathVertices(GrDrawOpAtlas* atlas,
+ GrVertexWriter& vertices,
+ const GrVertexColor& color,
+ const SkMatrix& ctm,
+ const ShapeData* shapeData) const {
+ SkRect translatedBounds(shapeData->fBounds);
+ if (!fUsesDistanceField) {
+ translatedBounds.offset(SkScalarFloorToScalar(ctm.get(SkMatrix::kMTransX)),
+ SkScalarFloorToScalar(ctm.get(SkMatrix::kMTransY)));
+ }
+
+ // set up texture coordinates
+ GrVertexWriter::TriStrip<uint16_t> texCoords{
+ (uint16_t)shapeData->fTextureCoords.fLeft,
+ (uint16_t)shapeData->fTextureCoords.fTop,
+ (uint16_t)shapeData->fTextureCoords.fRight,
+ (uint16_t)shapeData->fTextureCoords.fBottom
+ };
+
+ if (fUsesDistanceField && !ctm.hasPerspective()) {
+ vertices.writeQuad(GrQuad::MakeFromRect(translatedBounds, ctm),
+ color,
+ texCoords);
+ } else {
+ vertices.writeQuad(GrVertexWriter::TriStripFromRect(translatedBounds),
+ color,
+ texCoords);
+ }
+ }
+
+ void flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) const {
+ GrGeometryProcessor* gp = flushInfo->fGeometryProcessor.get();
+ int numAtlasTextures = SkToInt(fAtlas->numActivePages());
+ auto proxies = fAtlas->getProxies();
+ if (gp->numTextureSamplers() != numAtlasTextures) {
+ for (int i = gp->numTextureSamplers(); i < numAtlasTextures; ++i) {
+ flushInfo->fFixedDynamicState->fPrimitiveProcessorTextures[i] = proxies[i].get();
+ // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // proxies don't get added during the visitProxies call. Thus we add them here.
+ target->sampledProxyArray()->push_back(proxies[i].get());
+ }
+ // During preparation the number of atlas pages has increased.
+ // Update the proxies used in the GP to match.
+ if (fUsesDistanceField) {
+ reinterpret_cast<GrDistanceFieldPathGeoProc*>(gp)->addNewProxies(
+ fAtlas->getProxies(), fAtlas->numActivePages(), GrSamplerState::ClampBilerp());
+ } else {
+ reinterpret_cast<GrBitmapTextGeoProc*>(gp)->addNewProxies(
+ fAtlas->getProxies(), fAtlas->numActivePages(), GrSamplerState::ClampNearest());
+ }
+ }
+
+ if (flushInfo->fInstancesToFlush) {
+ GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
+ int maxInstancesPerDraw =
+ static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
+ mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerQuad, kVerticesPerQuad,
+ flushInfo->fInstancesToFlush, maxInstancesPerDraw);
+ mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset);
+ target->recordDraw(
+ flushInfo->fGeometryProcessor, mesh, 1, flushInfo->fFixedDynamicState, nullptr);
+ flushInfo->fVertexOffset += kVerticesPerQuad * flushInfo->fInstancesToFlush;
+ flushInfo->fInstancesToFlush = 0;
+ }
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ const SkPMColor4f& color() const { return fShapes[0].fColor; }
+ bool usesDistanceField() const { return fUsesDistanceField; }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ SmallPathOp* that = t->cast<SmallPathOp>();
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ if (this->usesDistanceField() != that->usesDistanceField()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ const SkMatrix& thisCtm = this->fShapes[0].fViewMatrix;
+ const SkMatrix& thatCtm = that->fShapes[0].fViewMatrix;
+
+ if (thisCtm.hasPerspective() != thatCtm.hasPerspective()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // We can position on the cpu unless we're in perspective,
+ // but also need to make sure local matrices are identical
+ if ((thisCtm.hasPerspective() || fHelper.usesLocalCoords()) &&
+ !thisCtm.cheapEqualTo(thatCtm)) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // Depending on the ctm we may have a different shader for SDF paths
+ if (this->usesDistanceField()) {
+ if (thisCtm.isScaleTranslate() != thatCtm.isScaleTranslate() ||
+ thisCtm.isSimilarity() != thatCtm.isSimilarity()) {
+ return CombineResult::kCannotCombine;
+ }
+ }
+
+ fShapes.push_back_n(that->fShapes.count(), that->fShapes.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+ }
+
+ bool fUsesDistanceField;
+
+ struct Entry {
+ SkPMColor4f fColor;
+ GrShape fShape;
+ SkMatrix fViewMatrix;
+ };
+
+ SkSTArray<1, Entry> fShapes;
+ Helper fHelper;
+ GrDrawOpAtlas* fAtlas;
+ ShapeCache* fShapeCache;
+ ShapeDataList* fShapeList;
+ bool fGammaCorrect;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+bool GrSmallPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrSmallPathRenderer::onDrawPath");
+
+ // we've already bailed on inverse filled paths, so this is safe
+ SkASSERT(!args.fShape->isEmpty());
+ SkASSERT(args.fShape->hasUnstyledKey());
+ if (!fAtlas) {
+ const GrBackendFormat format = args.fContext->priv().caps()->getDefaultBackendFormat(
+ GrColorType::kAlpha_8, GrRenderable::kNo);
+ fAtlas = GrDrawOpAtlas::Make(args.fContext->priv().proxyProvider(),
+ format,
+ GrColorType::kAlpha_8,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ PLOT_WIDTH, PLOT_HEIGHT,
+ GrDrawOpAtlas::AllowMultitexturing::kYes,
+ &GrSmallPathRenderer::HandleEviction,
+ (void*)this);
+ if (!fAtlas) {
+ return false;
+ }
+ }
+
+ std::unique_ptr<GrDrawOp> op = SmallPathOp::Make(
+ args.fContext, std::move(args.fPaint), *args.fShape, *args.fViewMatrix, fAtlas.get(),
+ &fShapeCache, &fShapeList, args.fGammaCorrect, args.fUserStencilSettings);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+struct GrSmallPathRenderer::PathTestStruct {
+ PathTestStruct() : fContextID(SK_InvalidGenID), fAtlas(nullptr) {}
+ ~PathTestStruct() { this->reset(); }
+
+ void reset() {
+ ShapeDataList::Iter iter;
+ iter.init(fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ fShapeList.remove(shapeData);
+ delete shapeData;
+ }
+ fAtlas = nullptr;
+ fShapeCache.reset();
+ }
+
+ static void HandleEviction(GrDrawOpAtlas::AtlasID id, void* pr) {
+ PathTestStruct* dfpr = (PathTestStruct*)pr;
+ // remove any paths that use this plot
+ ShapeDataList::Iter iter;
+ iter.init(dfpr->fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ if (id == shapeData->fID) {
+ dfpr->fShapeCache.remove(shapeData->fKey);
+ dfpr->fShapeList.remove(shapeData);
+ delete shapeData;
+ }
+ }
+ }
+
+ uint32_t fContextID;
+ std::unique_ptr<GrDrawOpAtlas> fAtlas;
+ ShapeCache fShapeCache;
+ ShapeDataList fShapeList;
+};
+
+std::unique_ptr<GrDrawOp> GrSmallPathRenderer::createOp_TestingOnly(
+ GrRecordingContext* context,
+ GrPaint&& paint,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ GrDrawOpAtlas* atlas,
+ ShapeCache* shapeCache,
+ ShapeDataList* shapeList,
+ bool gammaCorrect,
+ const GrUserStencilSettings* stencil) {
+
+ return GrSmallPathRenderer::SmallPathOp::Make(context, std::move(paint), shape, viewMatrix,
+ atlas, shapeCache, shapeList, gammaCorrect,
+ stencil);
+
+}
+
+GR_DRAW_OP_TEST_DEFINE(SmallPathOp) {
+ using PathTestStruct = GrSmallPathRenderer::PathTestStruct;
+ static PathTestStruct gTestStruct;
+
+ if (context->priv().contextID() != gTestStruct.fContextID) {
+ gTestStruct.fContextID = context->priv().contextID();
+ gTestStruct.reset();
+ const GrBackendFormat format = context->priv().caps()->getDefaultBackendFormat(
+ GrColorType::kAlpha_8, GrRenderable::kNo);
+ gTestStruct.fAtlas = GrDrawOpAtlas::Make(context->priv().proxyProvider(),
+ format, GrColorType::kAlpha_8,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ PLOT_WIDTH, PLOT_HEIGHT,
+ GrDrawOpAtlas::AllowMultitexturing::kYes,
+ &PathTestStruct::HandleEviction,
+ (void*)&gTestStruct);
+ }
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ bool gammaCorrect = random->nextBool();
+
+ // This path renderer only allows fill styles.
+ GrShape shape(GrTest::TestPath(random), GrStyle::SimpleFill());
+ return GrSmallPathRenderer::createOp_TestingOnly(
+ context,
+ std::move(paint), shape, viewMatrix,
+ gTestStruct.fAtlas.get(),
+ &gTestStruct.fShapeCache,
+ &gTestStruct.fShapeList,
+ gammaCorrect,
+ GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.h
new file mode 100644
index 0000000000..a916fc1d89
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrSmallPathRenderer.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSmallPathRenderer_DEFINED
+#define GrSmallPathRenderer_DEFINED
+
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/geometry/GrRect.h"
+#include "src/gpu/geometry/GrShape.h"
+
+#include "src/core/SkOpts.h"
+#include "src/core/SkTDynamicHash.h"
+
+class GrRecordingContext;
+
+class ShapeData;
+class ShapeDataKey;
+
+class GrSmallPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
+public:
+ GrSmallPathRenderer();
+ ~GrSmallPathRenderer() override;
+
+ // GrOnFlushCallbackObject overrides
+ //
+ // Note: because this class is associated with a path renderer we want it to be removed from
+ // the list of active OnFlushBackkbackObjects in an freeGpuResources call (i.e., we accept the
+ // default retainOnFreeGpuResources implementation).
+
+ void preFlush(GrOnFlushResourceProvider* onFlushRP, const uint32_t*, int) override {
+ if (fAtlas) {
+ fAtlas->instantiate(onFlushRP);
+ }
+ }
+
+ void postFlush(GrDeferredUploadToken startTokenForNextFlush,
+ const uint32_t* /*opsTaskIDs*/, int /*numOpsTaskIDs*/) override {
+ if (fAtlas) {
+ fAtlas->compact(startTokenForNextFlush);
+ }
+ }
+
+ using ShapeCache = SkTDynamicHash<ShapeData, ShapeDataKey>;
+ typedef SkTInternalLList<ShapeData> ShapeDataList;
+
+ static std::unique_ptr<GrDrawOp> createOp_TestingOnly(GrRecordingContext*,
+ GrPaint&&,
+ const GrShape&,
+ const SkMatrix& viewMatrix,
+ GrDrawOpAtlas* atlas,
+ ShapeCache*,
+ ShapeDataList*,
+ bool gammaCorrect,
+ const GrUserStencilSettings*);
+ struct PathTestStruct;
+
+private:
+ class SmallPathOp;
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ static void HandleEviction(GrDrawOpAtlas::AtlasID, void*);
+
+ std::unique_ptr<GrDrawOpAtlas> fAtlas;
+ ShapeCache fShapeCache;
+ ShapeDataList fShapeList;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.cpp
new file mode 100644
index 0000000000..c05b955d8e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrPath.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrStencilClip.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrDrawPathOp.h"
+#include "src/gpu/ops/GrStencilAndCoverPathRenderer.h"
+#include "src/gpu/ops/GrStencilPathOp.h"
+
+GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrResourceProvider* resourceProvider,
+ const GrCaps& caps) {
+ if (caps.shaderCaps()->pathRenderingSupport() && !caps.avoidStencilBuffers()) {
+ return new GrStencilAndCoverPathRenderer(resourceProvider);
+ } else {
+ return nullptr;
+ }
+}
+
+GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrResourceProvider* resourceProvider)
+ : fResourceProvider(resourceProvider) {
+}
+
+GrPathRenderer::CanDrawPath
+GrStencilAndCoverPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ SkASSERT(!args.fTargetIsWrappedVkSecondaryCB);
+ // GrPath doesn't support hairline paths. An arbitrary path effect could produce a hairline
+ // path.
+ if (args.fShape->style().strokeRec().isHairlineStyle() ||
+ args.fShape->style().hasNonDashPathEffect() ||
+ args.fHasUserStencilSettings) {
+ return CanDrawPath::kNo;
+ }
+ if (GrAAType::kCoverage == args.fAAType && !args.fProxy->canUseMixedSamples(*args.fCaps)) {
+ // We rely on a mixed sampled stencil buffer to implement coverage AA.
+ return CanDrawPath::kNo;
+ }
+ return CanDrawPath::kYes;
+}
+
+static sk_sp<GrPath> get_gr_path(GrResourceProvider* resourceProvider, const GrShape& shape) {
+ GrUniqueKey key;
+ bool isVolatile;
+ GrPath::ComputeKey(shape, &key, &isVolatile);
+ sk_sp<GrPath> path;
+ if (!isVolatile) {
+ path = resourceProvider->findByUniqueKey<GrPath>(key);
+ }
+ if (!path) {
+ SkPath skPath;
+ shape.asPath(&skPath);
+ path = resourceProvider->createPath(skPath, shape.style());
+ if (!isVolatile) {
+ resourceProvider->assignUniqueKeyToResource(key, path.get());
+ }
+ } else {
+#ifdef SK_DEBUG
+ SkPath skPath;
+ shape.asPath(&skPath);
+ SkASSERT(path->isEqualTo(skPath, shape.style()));
+#endif
+ }
+ return path;
+}
+
+void GrStencilAndCoverPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrStencilAndCoverPathRenderer::onStencilPath");
+ sk_sp<GrPath> p(get_gr_path(fResourceProvider, *args.fShape));
+ args.fRenderTargetContext->priv().stencilPath(
+ *args.fClip, args.fDoStencilMSAA, *args.fViewMatrix, std::move(p));
+}
+
+bool GrStencilAndCoverPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrStencilAndCoverPathRenderer::onDrawPath");
+ SkASSERT(!args.fShape->style().strokeRec().isHairlineStyle());
+
+ const SkMatrix& viewMatrix = *args.fViewMatrix;
+
+ bool doStencilMSAA = GrAAType::kNone != args.fAAType;
+
+ sk_sp<GrPath> path(get_gr_path(fResourceProvider, *args.fShape));
+
+ if (args.fShape->inverseFilled()) {
+ SkMatrix vmi;
+ if (!viewMatrix.invert(&vmi)) {
+ return true;
+ }
+
+ SkRect devBounds = SkRect::MakeIWH(args.fRenderTargetContext->width(),
+ args.fRenderTargetContext->height()); // Inverse fill.
+
+ // fake inverse with a stencil and cover
+ GrAppliedClip appliedClip;
+ if (!args.fClip->apply(
+ args.fContext, args.fRenderTargetContext, doStencilMSAA, true, &appliedClip,
+ &devBounds)) {
+ return true;
+ }
+ GrStencilClip stencilClip(appliedClip.stencilStackID());
+ if (appliedClip.scissorState().enabled()) {
+ stencilClip.fixedClip().setScissor(appliedClip.scissorState().rect());
+ }
+ if (appliedClip.windowRectsState().enabled()) {
+ stencilClip.fixedClip().setWindowRectangles(appliedClip.windowRectsState().windows(),
+ appliedClip.windowRectsState().mode());
+ }
+ // Just ignore the analytic FPs (if any) during the stencil pass. They will still clip the
+ // final draw and it is meaningless to multiply by coverage when drawing to stencil.
+ args.fRenderTargetContext->priv().stencilPath(
+ stencilClip, GrAA(doStencilMSAA), viewMatrix, std::move(path));
+
+ {
+ static constexpr GrUserStencilSettings kInvertedCoverPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ // We know our rect will hit pixels outside the clip and the user bits will
+ // be 0 outside the clip. So we can't just fill where the user bits are 0. We
+ // also need to check that the clip bit is set.
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+ );
+
+ SkRect coverBounds;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective()) {
+ vmi.mapRect(&coverBounds, devBounds);
+ // theoretically could set bloat = 0, instead leave it because of matrix inversion
+ // precision.
+ SkScalar bloat = viewMatrix.getMaxScale() * SK_ScalarHalf;
+ coverBounds.outset(bloat, bloat);
+ } else {
+ coverBounds = devBounds;
+ }
+ const SkMatrix& coverMatrix = !viewMatrix.hasPerspective() ? viewMatrix : SkMatrix::I();
+ const SkMatrix& localMatrix = !viewMatrix.hasPerspective() ? SkMatrix::I() : vmi;
+
+ // We have to suppress enabling MSAA for mixed samples or we will get seams due to
+ // coverage modulation along the edge where two triangles making up the rect meet.
+ GrAA doStencilMSAA = GrAA::kNo;
+ if (GrAAType::kMSAA == args.fAAType) {
+ doStencilMSAA = GrAA::kYes;
+ }
+ args.fRenderTargetContext->priv().stencilRect(
+ *args.fClip, &kInvertedCoverPass, std::move(args.fPaint), doStencilMSAA,
+ coverMatrix, coverBounds, &localMatrix);
+ }
+ } else {
+ std::unique_ptr<GrDrawOp> op = GrDrawPathOp::Make(
+ args.fContext, viewMatrix, std::move(args.fPaint), GrAA(doStencilMSAA),
+ std::move(path));
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.h
new file mode 100644
index 0000000000..755a1b63b7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStencilAndCoverPathRenderer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuiltInPathRenderer_DEFINED
+#define GrBuiltInPathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+class GrGpu;
+class GrResourceProvider;
+
+/**
+ * Uses GrGpu::stencilPath followed by a cover rectangle. This subclass doesn't apply AA; it relies
+ * on the target having MSAA if AA is desired.
+ */
+class GrStencilAndCoverPathRenderer : public GrPathRenderer {
+public:
+
+ static GrPathRenderer* Create(GrResourceProvider*, const GrCaps&);
+
+
+private:
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ GrStencilAndCoverPathRenderer(GrResourceProvider*);
+
+ GrResourceProvider* fResourceProvider;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.cpp b/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.cpp
new file mode 100644
index 0000000000..1cfd8ff04a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrStencilPathOp.h"
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+
+std::unique_ptr<GrOp> GrStencilPathOp::Make(GrRecordingContext* context,
+ const SkMatrix& viewMatrix,
+ bool useHWAA,
+ bool hasStencilClip,
+ const GrScissorState& scissor,
+ sk_sp<const GrPath> path) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ return pool->allocate<GrStencilPathOp>(viewMatrix, useHWAA,
+ hasStencilClip, scissor, std::move(path));
+}
+
+void GrStencilPathOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
+ GrRenderTarget* rt = state->drawOpArgs().renderTarget();
+ SkASSERT(rt);
+
+ int numStencilBits = rt->renderTargetPriv().numStencilBits();
+ GrStencilSettings stencil(GrPathRendering::GetStencilPassSettings(fPath->getFillType()),
+ fHasStencilClip, numStencilBits);
+
+ GrPathRendering::StencilPathArgs args(fUseHWAA, state->drawOpArgs().proxy(),
+ &fViewMatrix, &fScissor, &stencil);
+ state->gpu()->pathRendering()->stencilPath(args, fPath.get());
+}
diff --git a/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.h b/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.h
new file mode 100644
index 0000000000..2d4f37fee3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStencilPathOp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStencilPathOp_DEFINED
+#define GrStencilPathOp_DEFINED
+
+#include "src/gpu/GrPath.h"
+#include "src/gpu/GrPathRendering.h"
+#include "src/gpu/GrScissorState.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/ops/GrOp.h"
+
+class GrOpFlushState;
+class GrRecordingContext;
+
+class GrStencilPathOp final : public GrOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrOp> Make(GrRecordingContext* context,
+ const SkMatrix& viewMatrix,
+ bool useHWAA,
+ bool hasStencilClip,
+ const GrScissorState& scissor,
+ sk_sp<const GrPath> path);
+
+ const char* name() const override { return "StencilPathOp"; }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.printf("Path: 0x%p, AA: %d", fPath.get(), fUseHWAA);
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+#endif
+
+private:
+ friend class GrOpMemoryPool; // for ctor
+
+ GrStencilPathOp(const SkMatrix& viewMatrix,
+ bool useHWAA,
+ bool hasStencilClip,
+ const GrScissorState& scissor,
+ sk_sp<const GrPath> path)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix)
+ , fUseHWAA(useHWAA)
+ , fHasStencilClip(hasStencilClip)
+ , fScissor(scissor)
+ , fPath(std::move(path)) {
+ this->setBounds(fPath->getBounds(), HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ SkMatrix fViewMatrix;
+ bool fUseHWAA;
+ bool fHasStencilClip;
+ GrScissorState fScissor;
+ sk_sp<const GrPath> fPath;
+
+ typedef GrOp INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.cpp b/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.cpp
new file mode 100644
index 0000000000..439a7c7dcc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.cpp
@@ -0,0 +1,830 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrStrokeRectOp.h"
+
+#include "include/core/SkStrokeRec.h"
+#include "include/private/GrResourceKey.h"
+#include "include/utils/SkRandom.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/ops/GrFillRectOp.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+namespace {
+
+// We support all hairlines, bevels, and miters, but not round joins. Also, check whether the miter
+// limit makes a miter join effectively beveled. If the miter is effectively beveled, it is only
+// supported when using an AA stroke.
+inline static bool allowed_stroke(const SkStrokeRec& stroke, GrAA aa, bool* isMiter) {
+ SkASSERT(stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kHairline_Style);
+ // For hairlines, make bevel and round joins appear the same as mitered ones.
+ if (!stroke.getWidth()) {
+ *isMiter = true;
+ return true;
+ }
+ if (stroke.getJoin() == SkPaint::kBevel_Join) {
+ *isMiter = false;
+ return aa == GrAA::kYes; // bevel only supported with AA
+ }
+ if (stroke.getJoin() == SkPaint::kMiter_Join) {
+ *isMiter = stroke.getMiter() >= SK_ScalarSqrt2;
+ // Supported under non-AA only if it remains mitered
+ return aa == GrAA::kYes || *isMiter;
+ }
+ return false;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Non-AA Stroking
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/* create a triangle strip that strokes the specified rect. There are 8
+ unique vertices, but we repeat the last 2 to close up. Alternatively we
+ could use an indices array, and then only send 8 verts, but not sure that
+ would be faster.
+ */
+static void init_nonaa_stroke_rect_strip(SkPoint verts[10], const SkRect& rect, SkScalar width) {
+ const SkScalar rad = SkScalarHalf(width);
+
+ verts[0].set(rect.fLeft + rad, rect.fTop + rad);
+ verts[1].set(rect.fLeft - rad, rect.fTop - rad);
+ verts[2].set(rect.fRight - rad, rect.fTop + rad);
+ verts[3].set(rect.fRight + rad, rect.fTop - rad);
+ verts[4].set(rect.fRight - rad, rect.fBottom - rad);
+ verts[5].set(rect.fRight + rad, rect.fBottom + rad);
+ verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
+ verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
+ verts[8] = verts[0];
+ verts[9] = verts[1];
+
+ // TODO: we should be catching this higher up the call stack and just draw a single
+ // non-AA rect
+ if (2*rad >= rect.width()) {
+ verts[0].fX = verts[2].fX = verts[4].fX = verts[6].fX = verts[8].fX = rect.centerX();
+ }
+ if (2*rad >= rect.height()) {
+ verts[0].fY = verts[2].fY = verts[4].fY = verts[6].fY = verts[8].fY = rect.centerY();
+ }
+}
+
+class NonAAStrokeRectOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ const char* name() const override { return "NonAAStrokeRectOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf(
+ "Color: 0x%08x, Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], "
+ "StrokeWidth: %.2f\n",
+ fColor.toBytes_RGBA(), fRect.fLeft, fRect.fTop, fRect.fRight, fRect.fBottom,
+ fStrokeWidth);
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke,
+ GrAAType aaType) {
+ bool isMiter;
+ if (!allowed_stroke(stroke, GrAA::kNo, &isMiter)) {
+ return nullptr;
+ }
+ Helper::InputFlags inputFlags = Helper::InputFlags::kNone;
+ // Depending on sub-pixel coordinates and the particular GPU, we may lose a corner of
+ // hairline rects. We jam all the vertices to pixel centers to avoid this, but not
+ // when MSAA is enabled because it can cause ugly artifacts.
+ if (stroke.getStyle() == SkStrokeRec::kHairline_Style && aaType != GrAAType::kMSAA) {
+ inputFlags |= Helper::InputFlags::kSnapVerticesToPixelCenters;
+ }
+ return Helper::FactoryHelper<NonAAStrokeRectOp>(context, std::move(paint), inputFlags,
+ viewMatrix, rect,
+ stroke, aaType);
+ }
+
+ NonAAStrokeRectOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ Helper::InputFlags inputFlags, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkStrokeRec& stroke, GrAAType aaType)
+ : INHERITED(ClassID()), fHelper(helperArgs, aaType, inputFlags) {
+ fColor = color;
+ fViewMatrix = viewMatrix;
+ fRect = rect;
+ // Sort the rect for hairlines
+ fRect.sort();
+ fStrokeWidth = stroke.getWidth();
+
+ SkScalar rad = SkScalarHalf(fStrokeWidth);
+ SkRect bounds = rect;
+ bounds.outset(rad, rad);
+
+ // If our caller snaps to pixel centers then we have to round out the bounds
+ if (inputFlags & Helper::InputFlags::kSnapVerticesToPixelCenters) {
+ SkASSERT(!fStrokeWidth || aaType == GrAAType::kNone);
+ viewMatrix.mapRect(&bounds);
+ // We want to be consistent with how we snap non-aa lines. To match what we do in
+ // GrGLSLVertexShaderBuilder, we first floor all the vertex values and then add half a
+ // pixel to force us to pixel centers.
+ bounds.setLTRB(SkScalarFloorToScalar(bounds.fLeft),
+ SkScalarFloorToScalar(bounds.fTop),
+ SkScalarFloorToScalar(bounds.fRight),
+ SkScalarFloorToScalar(bounds.fBottom));
+ bounds.offset(0.5f, 0.5f);
+ this->setBounds(bounds, HasAABloat::kNo, IsHairline::kNo);
+ } else {
+ HasAABloat aaBloat = (aaType == GrAAType::kNone) ? HasAABloat ::kNo : HasAABloat::kYes;
+ this->setTransformedBounds(bounds, fViewMatrix, aaBloat,
+ fStrokeWidth ? IsHairline::kNo : IsHairline::kYes);
+ }
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ // This Op uses uniform (not vertex) color, so doesn't need to track wide color.
+ return fHelper.finalizeProcessors(caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kNone, &fColor, nullptr);
+ }
+
+private:
+ void onPrepareDraws(Target* target) override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(fColor);
+ LocalCoords::Type localCoordsType = fHelper.usesLocalCoords()
+ ? LocalCoords::kUsePosition_Type
+ : LocalCoords::kUnused_Type;
+ gp = GrDefaultGeoProcFactory::Make(target->caps().shaderCaps(), color,
+ Coverage::kSolid_Type, localCoordsType,
+ fViewMatrix);
+ }
+
+ size_t kVertexStride = gp->vertexStride();
+ int vertexCount = kVertsPerHairlineRect;
+ if (fStrokeWidth > 0) {
+ vertexCount = kVertsPerStrokeRect;
+ }
+
+ sk_sp<const GrBuffer> vertexBuffer;
+ int firstVertex;
+
+ void* verts =
+ target->makeVertexSpace(kVertexStride, vertexCount, &vertexBuffer, &firstVertex);
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ SkPoint* vertex = reinterpret_cast<SkPoint*>(verts);
+
+ GrPrimitiveType primType;
+ if (fStrokeWidth > 0) {
+ primType = GrPrimitiveType::kTriangleStrip;
+ init_nonaa_stroke_rect_strip(vertex, fRect, fStrokeWidth);
+ } else {
+ // hairline
+ primType = GrPrimitiveType::kLineStrip;
+ vertex[0].set(fRect.fLeft, fRect.fTop);
+ vertex[1].set(fRect.fRight, fRect.fTop);
+ vertex[2].set(fRect.fRight, fRect.fBottom);
+ vertex[3].set(fRect.fLeft, fRect.fBottom);
+ vertex[4].set(fRect.fLeft, fRect.fTop);
+ }
+
+ GrMesh* mesh = target->allocMesh(primType);
+ mesh->setNonIndexedNonInstanced(vertexCount);
+ mesh->setVertexData(std::move(vertexBuffer), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ // TODO: override onCombineIfPossible
+
+ Helper fHelper;
+ SkPMColor4f fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ SkScalar fStrokeWidth;
+
+ const static int kVertsPerHairlineRect = 5;
+ const static int kVertsPerStrokeRect = 10;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// AA Stroking
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
+GR_DECLARE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
+
+static void compute_aa_rects(SkRect* devOutside, SkRect* devOutsideAssist, SkRect* devInside,
+ bool* isDegenerate, const SkMatrix& viewMatrix, const SkRect& rect,
+ SkScalar strokeWidth, bool miterStroke) {
+ SkRect devRect;
+ viewMatrix.mapRect(&devRect, rect);
+
+ SkVector devStrokeSize;
+ if (strokeWidth > 0) {
+ devStrokeSize.set(strokeWidth, strokeWidth);
+ viewMatrix.mapVectors(&devStrokeSize, 1);
+ devStrokeSize.setAbs(devStrokeSize);
+ } else {
+ devStrokeSize.set(SK_Scalar1, SK_Scalar1);
+ }
+
+ const SkScalar dx = devStrokeSize.fX;
+ const SkScalar dy = devStrokeSize.fY;
+ const SkScalar rx = SkScalarHalf(dx);
+ const SkScalar ry = SkScalarHalf(dy);
+
+ *devOutside = devRect;
+ *devOutsideAssist = devRect;
+ *devInside = devRect;
+
+ devOutside->outset(rx, ry);
+ devInside->inset(rx, ry);
+
+ // If we have a degenerate stroking rect(ie the stroke is larger than inner rect) then we
+ // make a degenerate inside rect to avoid double hitting. We will also jam all of the points
+ // together when we render these rects.
+ SkScalar spare;
+ {
+ SkScalar w = devRect.width() - dx;
+ SkScalar h = devRect.height() - dy;
+ spare = SkTMin(w, h);
+ }
+
+ *isDegenerate = spare <= 0;
+ if (*isDegenerate) {
+ devInside->fLeft = devInside->fRight = devRect.centerX();
+ devInside->fTop = devInside->fBottom = devRect.centerY();
+ }
+
+ // For bevel-stroke, use 2 SkRect instances(devOutside and devOutsideAssist)
+ // to draw the outside of the octagon. Because there are 8 vertices on the outer
+ // edge, while vertex number of inner edge is 4, the same as miter-stroke.
+ if (!miterStroke) {
+ devOutside->inset(0, ry);
+ devOutsideAssist->outset(0, ry);
+ }
+}
+
+static sk_sp<GrGeometryProcessor> create_aa_stroke_rect_gp(const GrShaderCaps* shaderCaps,
+ bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool wideColor) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Coverage::Type coverageType =
+ tweakAlphaForCoverage ? Coverage::kSolid_Type : Coverage::kAttribute_Type;
+ LocalCoords::Type localCoordsType =
+ usesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type;
+ Color::Type colorType =
+ wideColor ? Color::kPremulWideColorAttribute_Type: Color::kPremulGrColorAttribute_Type;
+
+ return MakeForDeviceSpace(shaderCaps, colorType, coverageType, localCoordsType, viewMatrix);
+}
+
+class AAStrokeRectOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelper;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& devOutside,
+ const SkRect& devInside) {
+ return Helper::FactoryHelper<AAStrokeRectOp>(context, std::move(paint), viewMatrix,
+ devOutside, devInside);
+ }
+
+ AAStrokeRectOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkRect& devOutside, const SkRect& devInside)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage)
+ , fViewMatrix(viewMatrix) {
+ SkASSERT(!devOutside.isEmpty());
+ SkASSERT(!devInside.isEmpty());
+
+ fRects.emplace_back(RectInfo{color, devOutside, devOutside, devInside, false});
+ this->setBounds(devOutside, HasAABloat::kYes, IsHairline::kNo);
+ fMiterStroke = true;
+ }
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke) {
+ bool isMiter;
+ if (!allowed_stroke(stroke, GrAA::kYes, &isMiter)) {
+ return nullptr;
+ }
+ return Helper::FactoryHelper<AAStrokeRectOp>(context, std::move(paint), viewMatrix, rect,
+ stroke, isMiter);
+ }
+
+ AAStrokeRectOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
+ const SkMatrix& viewMatrix, const SkRect& rect, const SkStrokeRec& stroke,
+ bool isMiter)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, GrAAType::kCoverage)
+ , fViewMatrix(viewMatrix) {
+ fMiterStroke = isMiter;
+ RectInfo& info = fRects.push_back();
+ compute_aa_rects(&info.fDevOutside, &info.fDevOutsideAssist, &info.fDevInside,
+ &info.fDegenerate, viewMatrix, rect, stroke.getWidth(), isMiter);
+ info.fColor = color;
+ if (isMiter) {
+ this->setBounds(info.fDevOutside, HasAABloat::kYes, IsHairline::kNo);
+ } else {
+ // The outer polygon of the bevel stroke is an octagon specified by the points of a
+ // pair of overlapping rectangles where one is wide and the other is narrow.
+ SkRect bounds = info.fDevOutside;
+ bounds.joinPossiblyEmptyRect(info.fDevOutsideAssist);
+ this->setBounds(bounds, HasAABloat::kYes, IsHairline::kNo);
+ }
+ }
+
+ const char* name() const override { return "AAStrokeRect"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ for (const auto& info : fRects) {
+ string.appendf(
+ "Color: 0x%08x, ORect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], "
+ "AssistORect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], "
+ "IRect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], Degen: %d",
+ info.fColor.toBytes_RGBA(), info.fDevOutside.fLeft, info.fDevOutside.fTop,
+ info.fDevOutside.fRight, info.fDevOutside.fBottom, info.fDevOutsideAssist.fLeft,
+ info.fDevOutsideAssist.fTop, info.fDevOutsideAssist.fRight,
+ info.fDevOutsideAssist.fBottom, info.fDevInside.fLeft, info.fDevInside.fTop,
+ info.fDevInside.fRight, info.fDevInside.fBottom, info.fDegenerate);
+ }
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType,
+ GrProcessorAnalysisCoverage::kSingleChannel, &fRects.back().fColor, &fWideColor);
+ }
+
+private:
+ void onPrepareDraws(Target*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+ static const int kMiterIndexCnt = 3 * 24;
+ static const int kMiterVertexCnt = 16;
+ static const int kNumMiterRectsInIndexBuffer = 256;
+
+ static const int kBevelIndexCnt = 48 + 36 + 24;
+ static const int kBevelVertexCnt = 24;
+ static const int kNumBevelRectsInIndexBuffer = 256;
+
+ static sk_sp<const GrGpuBuffer> GetIndexBuffer(GrResourceProvider*, bool miterStroke);
+
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool miterStroke() const { return fMiterStroke; }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps&) override;
+
+ void generateAAStrokeRectGeometry(GrVertexWriter& vertices,
+ const SkPMColor4f& color,
+ bool wideColor,
+ const SkRect& devOutside,
+ const SkRect& devOutsideAssist,
+ const SkRect& devInside,
+ bool miterStroke,
+ bool degenerate,
+ bool tweakAlphaForCoverage) const;
+
+ // TODO support AA rotated stroke rects by copying around view matrices
+ struct RectInfo {
+ SkPMColor4f fColor;
+ SkRect fDevOutside;
+ SkRect fDevOutsideAssist;
+ SkRect fDevInside;
+ bool fDegenerate;
+ };
+
+ Helper fHelper;
+ SkSTArray<1, RectInfo, true> fRects;
+ SkMatrix fViewMatrix;
+ bool fMiterStroke;
+ bool fWideColor;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+void AAStrokeRectOp::onPrepareDraws(Target* target) {
+ sk_sp<GrGeometryProcessor> gp(create_aa_stroke_rect_gp(target->caps().shaderCaps(),
+ fHelper.compatibleWithCoverageAsAlpha(),
+ this->viewMatrix(),
+ fHelper.usesLocalCoords(),
+ fWideColor));
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ int innerVertexNum = 4;
+ int outerVertexNum = this->miterStroke() ? 4 : 8;
+ int verticesPerInstance = (outerVertexNum + innerVertexNum) * 2;
+ int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
+ int instanceCount = fRects.count();
+
+ sk_sp<const GrGpuBuffer> indexBuffer =
+ GetIndexBuffer(target->resourceProvider(), this->miterStroke());
+ if (!indexBuffer) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ PatternHelper helper(target, GrPrimitiveType::kTriangles, gp->vertexStride(),
+ std::move(indexBuffer), verticesPerInstance, indicesPerInstance,
+ instanceCount);
+ GrVertexWriter vertices{ helper.vertices() };
+ if (!vertices.fPtr) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const RectInfo& info = fRects[i];
+ this->generateAAStrokeRectGeometry(vertices,
+ info.fColor,
+ fWideColor,
+ info.fDevOutside,
+ info.fDevOutsideAssist,
+ info.fDevInside,
+ fMiterStroke,
+ info.fDegenerate,
+ fHelper.compatibleWithCoverageAsAlpha());
+ }
+ helper.recordDraw(target, std::move(gp));
+}
+
+void AAStrokeRectOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+}
+
+sk_sp<const GrGpuBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider,
+ bool miterStroke) {
+ if (miterStroke) {
+ // clang-format off
+ static const uint16_t gMiterIndices[] = {
+ 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
+ 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
+ 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0,
+ 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0,
+
+ 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4,
+ 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4,
+ 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4,
+ 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4,
+
+ 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8,
+ 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8,
+ 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8,
+ 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8,
+ };
+ // clang-format on
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gMiterIndices) == kMiterIndexCnt);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ gMiterIndices, kMiterIndexCnt, kNumMiterRectsInIndexBuffer, kMiterVertexCnt,
+ gMiterIndexBufferKey);
+ } else {
+ /**
+ * As in miter-stroke, index = a + b, and a is the current index, b is the shift
+ * from the first index. The index layout:
+ * outer AA line: 0~3, 4~7
+ * outer edge: 8~11, 12~15
+ * inner edge: 16~19
+ * inner AA line: 20~23
+ * Following comes a bevel-stroke rect and its indices:
+ *
+ * 4 7
+ * *********************************
+ * * ______________________________ *
+ * * / 12 15 \ *
+ * * / \ *
+ * 0 * |8 16_____________________19 11 | * 3
+ * * | | | | *
+ * * | | **************** | | *
+ * * | | * 20 23 * | | *
+ * * | | * * | | *
+ * * | | * 21 22 * | | *
+ * * | | **************** | | *
+ * * | |____________________| | *
+ * 1 * |9 17 18 10| * 2
+ * * \ / *
+ * * \13 __________________________14/ *
+ * * *
+ * **********************************
+ * 5 6
+ */
+ // clang-format off
+ static const uint16_t gBevelIndices[] = {
+ // Draw outer AA, from outer AA line to outer edge, shift is 0.
+ 0 + 0, 1 + 0, 9 + 0, 9 + 0, 8 + 0, 0 + 0,
+ 1 + 0, 5 + 0, 13 + 0, 13 + 0, 9 + 0, 1 + 0,
+ 5 + 0, 6 + 0, 14 + 0, 14 + 0, 13 + 0, 5 + 0,
+ 6 + 0, 2 + 0, 10 + 0, 10 + 0, 14 + 0, 6 + 0,
+ 2 + 0, 3 + 0, 11 + 0, 11 + 0, 10 + 0, 2 + 0,
+ 3 + 0, 7 + 0, 15 + 0, 15 + 0, 11 + 0, 3 + 0,
+ 7 + 0, 4 + 0, 12 + 0, 12 + 0, 15 + 0, 7 + 0,
+ 4 + 0, 0 + 0, 8 + 0, 8 + 0, 12 + 0, 4 + 0,
+
+ // Draw the stroke, from outer edge to inner edge, shift is 8.
+ 0 + 8, 1 + 8, 9 + 8, 9 + 8, 8 + 8, 0 + 8,
+ 1 + 8, 5 + 8, 9 + 8,
+ 5 + 8, 6 + 8, 10 + 8, 10 + 8, 9 + 8, 5 + 8,
+ 6 + 8, 2 + 8, 10 + 8,
+ 2 + 8, 3 + 8, 11 + 8, 11 + 8, 10 + 8, 2 + 8,
+ 3 + 8, 7 + 8, 11 + 8,
+ 7 + 8, 4 + 8, 8 + 8, 8 + 8, 11 + 8, 7 + 8,
+ 4 + 8, 0 + 8, 8 + 8,
+
+ // Draw the inner AA, from inner edge to inner AA line, shift is 16.
+ 0 + 16, 1 + 16, 5 + 16, 5 + 16, 4 + 16, 0 + 16,
+ 1 + 16, 2 + 16, 6 + 16, 6 + 16, 5 + 16, 1 + 16,
+ 2 + 16, 3 + 16, 7 + 16, 7 + 16, 6 + 16, 2 + 16,
+ 3 + 16, 0 + 16, 4 + 16, 4 + 16, 7 + 16, 3 + 16,
+ };
+ // clang-format on
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gBevelIndices) == kBevelIndexCnt);
+
+ GR_DEFINE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
+ return resourceProvider->findOrCreatePatternedIndexBuffer(
+ gBevelIndices, kBevelIndexCnt, kNumBevelRectsInIndexBuffer, kBevelVertexCnt,
+ gBevelIndexBufferKey);
+ }
+}
+
+GrOp::CombineResult AAStrokeRectOp::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
+ AAStrokeRectOp* that = t->cast<AAStrokeRectOp>();
+
+ if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // TODO combine across miterstroke changes
+ if (this->miterStroke() != that->miterStroke()) {
+ return CombineResult::kCannotCombine;
+ }
+
+ // We apply the viewmatrix to the rect points on the cpu. However, if the pipeline uses
+ // local coords then we won't be able to combine. TODO: Upload local coords as an attribute.
+ if (fHelper.usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return CombineResult::kCannotCombine;
+ }
+
+ fRects.push_back_n(that->fRects.count(), that->fRects.begin());
+ fWideColor |= that->fWideColor;
+ return CombineResult::kMerged;
+}
+
+static void setup_scale(int* scale, SkScalar inset) {
+ if (inset < SK_ScalarHalf) {
+ *scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
+ SkASSERT(*scale >= 0 && *scale <= 255);
+ } else {
+ *scale = 0xff;
+ }
+}
+
+void AAStrokeRectOp::generateAAStrokeRectGeometry(GrVertexWriter& vertices,
+ const SkPMColor4f& color,
+ bool wideColor,
+ const SkRect& devOutside,
+ const SkRect& devOutsideAssist,
+ const SkRect& devInside,
+ bool miterStroke,
+ bool degenerate,
+ bool tweakAlphaForCoverage) const {
+ // We create vertices for four nested rectangles. There are two ramps from 0 to full
+ // coverage, one on the exterior of the stroke and the other on the interior.
+
+ // TODO: this only really works if the X & Y margins are the same all around
+ // the rect (or if they are all >= 1.0).
+ SkScalar inset;
+ if (!degenerate) {
+ inset = SkMinScalar(SK_Scalar1, devOutside.fRight - devInside.fRight);
+ inset = SkMinScalar(inset, devInside.fLeft - devOutside.fLeft);
+ inset = SkMinScalar(inset, devInside.fTop - devOutside.fTop);
+ if (miterStroke) {
+ inset = SK_ScalarHalf * SkMinScalar(inset, devOutside.fBottom - devInside.fBottom);
+ } else {
+ inset = SK_ScalarHalf *
+ SkMinScalar(inset, devOutsideAssist.fBottom - devInside.fBottom);
+ }
+ SkASSERT(inset >= 0);
+ } else {
+ // TODO use real devRect here
+ inset = SkMinScalar(devOutside.width(), SK_Scalar1);
+ inset = SK_ScalarHalf *
+ SkMinScalar(inset, SkTMax(devOutside.height(), devOutsideAssist.height()));
+ }
+
+ auto inset_fan = [](const SkRect& r, SkScalar dx, SkScalar dy) {
+ return GrVertexWriter::TriFanFromRect(r.makeInset(dx, dy));
+ };
+
+ auto maybe_coverage = [tweakAlphaForCoverage](float coverage) {
+ return GrVertexWriter::If(!tweakAlphaForCoverage, coverage);
+ };
+
+ GrVertexColor outerColor(tweakAlphaForCoverage ? SK_PMColor4fTRANSPARENT : color, wideColor);
+
+ // Outermost rect
+ vertices.writeQuad(inset_fan(devOutside, -SK_ScalarHalf, -SK_ScalarHalf),
+ outerColor,
+ maybe_coverage(0.0f));
+
+ if (!miterStroke) {
+ // Second outermost
+ vertices.writeQuad(inset_fan(devOutsideAssist, -SK_ScalarHalf, -SK_ScalarHalf),
+ outerColor,
+ maybe_coverage(0.0f));
+ }
+
+ // scale is the coverage for the the inner two rects.
+ int scale;
+ setup_scale(&scale, inset);
+
+ float innerCoverage = GrNormalizeByteToFloat(scale);
+ SkPMColor4f scaledColor = color * innerCoverage;
+ GrVertexColor innerColor(tweakAlphaForCoverage ? scaledColor : color, wideColor);
+
+ // Inner rect
+ vertices.writeQuad(inset_fan(devOutside, inset, inset),
+ innerColor,
+ maybe_coverage(innerCoverage));
+
+ if (!miterStroke) {
+ // Second inner
+ vertices.writeQuad(inset_fan(devOutsideAssist, inset, inset),
+ innerColor,
+ maybe_coverage(innerCoverage));
+ }
+
+ if (!degenerate) {
+ vertices.writeQuad(inset_fan(devInside, -inset, -inset),
+ innerColor,
+ maybe_coverage(innerCoverage));
+
+ // The innermost rect has 0 coverage...
+ vertices.writeQuad(inset_fan(devInside, SK_ScalarHalf, SK_ScalarHalf),
+ outerColor,
+ maybe_coverage(0.0f));
+ } else {
+ // When the interior rect has become degenerate we smoosh to a single point
+ SkASSERT(devInside.fLeft == devInside.fRight && devInside.fTop == devInside.fBottom);
+
+ vertices.writeQuad(GrVertexWriter::TriFanFromRect(devInside),
+ innerColor,
+ maybe_coverage(innerCoverage));
+
+ // ... unless we are degenerate, in which case we must apply the scaled coverage
+ vertices.writeQuad(GrVertexWriter::TriFanFromRect(devInside),
+ innerColor,
+ maybe_coverage(innerCoverage));
+ }
+}
+
+} // anonymous namespace
+
+namespace GrStrokeRectOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke) {
+ if (aaType == GrAAType::kCoverage) {
+ // The AA op only supports axis-aligned rectangles
+ if (!viewMatrix.rectStaysRect()) {
+ return nullptr;
+ }
+ return AAStrokeRectOp::Make(context, std::move(paint), viewMatrix, rect, stroke);
+ } else {
+ return NonAAStrokeRectOp::Make(context, std::move(paint), viewMatrix, rect, stroke, aaType);
+ }
+}
+
+std::unique_ptr<GrDrawOp> MakeNested(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect rects[2]) {
+ SkASSERT(viewMatrix.rectStaysRect());
+ SkASSERT(!rects[0].isEmpty() && !rects[1].isEmpty());
+
+ SkRect devOutside, devInside;
+ viewMatrix.mapRect(&devOutside, rects[0]);
+ viewMatrix.mapRect(&devInside, rects[1]);
+ if (devInside.isEmpty()) {
+ if (devOutside.isEmpty()) {
+ return nullptr;
+ }
+ return GrFillRectOp::Make(context, std::move(paint), GrAAType::kCoverage,
+ GrQuadAAFlags::kAll,
+ GrQuad::MakeFromRect(rects[0], viewMatrix),
+ GrQuad(rects[0]));
+ }
+
+ return AAStrokeRectOp::Make(context, std::move(paint), viewMatrix, devOutside, devInside);
+}
+
+} // namespace GrStrokeRectOp
+
+#if GR_TEST_UTILS
+
+#include "src/gpu/GrDrawOpTest.h"
+
+GR_DRAW_OP_TEST_DEFINE(NonAAStrokeRectOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkScalar strokeWidth = random->nextBool() ? 0.0f : 2.0f;
+ SkPaint strokePaint;
+ strokePaint.setStrokeWidth(strokeWidth);
+ strokePaint.setStyle(SkPaint::kStroke_Style);
+ strokePaint.setStrokeJoin(SkPaint::kMiter_Join);
+ SkStrokeRec strokeRec(strokePaint);
+ GrAAType aaType = GrAAType::kNone;
+ if (numSamples > 1) {
+ aaType = random->nextBool() ? GrAAType::kMSAA : GrAAType::kNone;
+ }
+ return NonAAStrokeRectOp::Make(context, std::move(paint), viewMatrix, rect, strokeRec, aaType);
+}
+
+GR_DRAW_OP_TEST_DEFINE(AAStrokeRectOp) {
+ bool miterStroke = random->nextBool();
+
+ // Create either a empty rect or a non-empty rect.
+ SkRect rect =
+ random->nextBool() ? SkRect::MakeXYWH(10, 10, 50, 40) : SkRect::MakeXYWH(6, 7, 0, 0);
+ SkScalar minDim = SkMinScalar(rect.width(), rect.height());
+ SkScalar strokeWidth = random->nextUScalar1() * minDim;
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ rec.setStrokeStyle(strokeWidth);
+ rec.setStrokeParams(SkPaint::kButt_Cap,
+ miterStroke ? SkPaint::kMiter_Join : SkPaint::kBevel_Join, 1.f);
+ SkMatrix matrix = GrTest::TestMatrixRectStaysRect(random);
+ return AAStrokeRectOp::Make(context, std::move(paint), matrix, rect, rec);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.h b/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.h
new file mode 100644
index 0000000000..6f319cc17d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrStrokeRectOp.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStrokeRectOp_DEFINED
+#define GrStrokeRectOp_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+
+class GrDrawOp;
+class GrPaint;
+class GrRecordingContext;
+class SkMatrix;
+struct SkRect;
+class SkStrokeRec;
+
+/**
+ * A set of factory functions for drawing stroked rectangles either coverage-antialiased, or
+ * non-antialiased. The non-antialiased ops can be used with MSAA. As with other GrDrawOp factories,
+ * the GrPaint is only consumed by these methods if a valid op is returned. If null is returned then
+ * the paint is unmodified and may still be used.
+ */
+namespace GrStrokeRectOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ GrAAType aaType,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke);
+
+// rects[0] == outer rectangle, rects[1] == inner rectangle. Null return means there is nothing to
+// draw rather than failure. The area between the rectangles will be filled by the paint, and it
+// will be anti-aliased with coverage AA. viewMatrix.rectStaysRect() must be true.
+std::unique_ptr<GrDrawOp> MakeNested(GrRecordingContext* context,
+ GrPaint&& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect rects[2]);
+
+} // namespace GrStrokeRectOp
+
+#endif // GrStrokeRectOp_DEFINED
diff --git a/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.cpp b/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.cpp
new file mode 100644
index 0000000000..8ec223d604
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.cpp
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrTessellatingPathRenderer.h"
+#include <stdio.h>
+#include "src/core/SkGeometry.h"
+#include "src/gpu/GrAuditTrail.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrDefaultGeoProcFactory.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrResourceCache.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/GrTessellator.h"
+#include "src/gpu/geometry/GrPathUtils.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
+
+#ifndef GR_AA_TESSELLATOR_MAX_VERB_COUNT
+#define GR_AA_TESSELLATOR_MAX_VERB_COUNT 10
+#endif
+
+/*
+ * This path renderer tessellates the path into triangles using GrTessellator, uploads the
+ * triangles to a vertex buffer, and renders them with a single draw call. It can do screenspace
+ * antialiasing with a one-pixel coverage ramp.
+ */
+namespace {
+
+struct TessInfo {
+ SkScalar fTolerance;
+ int fCount;
+};
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class PathInvalidator : public SkPathRef::GenIDChangeListener {
+public:
+ PathInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
+ : fMsg(key, contextUniqueID) {}
+
+private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
+ }
+};
+
+bool cache_match(GrGpuBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
+ if (!vertexBuffer) {
+ return false;
+ }
+ const SkData* data = vertexBuffer->getUniqueKey().getCustomData();
+ SkASSERT(data);
+ const TessInfo* info = static_cast<const TessInfo*>(data->data());
+ if (info->fTolerance == 0 || info->fTolerance < 3.0f * tol) {
+ *actualCount = info->fCount;
+ return true;
+ }
+ return false;
+}
+
+class StaticVertexAllocator : public GrTessellator::VertexAllocator {
+public:
+ StaticVertexAllocator(size_t stride, GrResourceProvider* resourceProvider, bool canMapVB)
+ : VertexAllocator(stride)
+ , fResourceProvider(resourceProvider)
+ , fCanMapVB(canMapVB)
+ , fVertices(nullptr) {
+ }
+ void* lock(int vertexCount) override {
+ size_t size = vertexCount * stride();
+ fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
+ kStatic_GrAccessPattern);
+ if (!fVertexBuffer.get()) {
+ return nullptr;
+ }
+ if (fCanMapVB) {
+ fVertices = fVertexBuffer->map();
+ } else {
+ fVertices = sk_malloc_throw(vertexCount * stride());
+ }
+ return fVertices;
+ }
+ void unlock(int actualCount) override {
+ if (fCanMapVB) {
+ fVertexBuffer->unmap();
+ } else {
+ fVertexBuffer->updateData(fVertices, actualCount * stride());
+ sk_free(fVertices);
+ }
+ fVertices = nullptr;
+ }
+ sk_sp<GrGpuBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); }
+
+private:
+ sk_sp<GrGpuBuffer> fVertexBuffer;
+ GrResourceProvider* fResourceProvider;
+ bool fCanMapVB;
+ void* fVertices;
+};
+
+class DynamicVertexAllocator : public GrTessellator::VertexAllocator {
+public:
+ DynamicVertexAllocator(size_t stride, GrMeshDrawOp::Target* target)
+ : VertexAllocator(stride)
+ , fTarget(target)
+ , fVertexBuffer(nullptr)
+ , fVertices(nullptr) {}
+ void* lock(int vertexCount) override {
+ fVertexCount = vertexCount;
+ fVertices = fTarget->makeVertexSpace(stride(), vertexCount, &fVertexBuffer, &fFirstVertex);
+ return fVertices;
+ }
+ void unlock(int actualCount) override {
+ fTarget->putBackVertices(fVertexCount - actualCount, stride());
+ fVertices = nullptr;
+ }
+ sk_sp<const GrBuffer> detachVertexBuffer() const { return std::move(fVertexBuffer); }
+ int firstVertex() const { return fFirstVertex; }
+
+private:
+ GrMeshDrawOp::Target* fTarget;
+ sk_sp<const GrBuffer> fVertexBuffer;
+ int fVertexCount;
+ int fFirstVertex;
+ void* fVertices;
+};
+
+} // namespace
+
+GrTessellatingPathRenderer::GrTessellatingPathRenderer()
+ : fMaxVerbCount(GR_AA_TESSELLATOR_MAX_VERB_COUNT) {
+}
+
+GrPathRenderer::CanDrawPath
+GrTessellatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // This path renderer can draw fill styles, and can do screenspace antialiasing via a
+ // one-pixel coverage ramp. It can do convex and concave paths, but we'll leave the convex
+ // ones to simpler algorithms. We pass on paths that have styles, though they may come back
+ // around after applying the styling information to the geometry to create a filled path.
+ if (!args.fShape->style().isSimpleFill() || args.fShape->knownToBeConvex()) {
+ return CanDrawPath::kNo;
+ }
+ switch (args.fAAType) {
+ case GrAAType::kNone:
+ case GrAAType::kMSAA:
+ // Prefer MSAA, if any antialiasing. In the non-analytic-AA case, We skip paths that
+ // don't have a key since the real advantage of this path renderer comes from caching
+ // the tessellated geometry.
+ if (!args.fShape->hasUnstyledKey()) {
+ return CanDrawPath::kNo;
+ }
+ break;
+ case GrAAType::kCoverage:
+ // Use analytic AA if we don't have MSAA. In this case, we do not cache, so we accept
+ // paths without keys.
+ SkPath path;
+ args.fShape->asPath(&path);
+ if (path.countVerbs() > fMaxVerbCount) {
+ return CanDrawPath::kNo;
+ }
+ break;
+ }
+ return CanDrawPath::kYes;
+}
+
+namespace {
+
+class TessellatingPathOp final : public GrMeshDrawOp {
+private:
+ using Helper = GrSimpleMeshDrawOpHelperWithStencil;
+
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ GrPaint&& paint,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ SkIRect devClipBounds,
+ GrAAType aaType,
+ const GrUserStencilSettings* stencilSettings) {
+ return Helper::FactoryHelper<TessellatingPathOp>(context, std::move(paint), shape,
+ viewMatrix, devClipBounds,
+ aaType, stencilSettings);
+ }
+
+ const char* name() const override { return "TessellatingPathOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fHelper.visitProxies(func);
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("Color 0x%08x, aa: %d\n", fColor.toBytes_RGBA(), fAntiAlias);
+ string += fHelper.dumpInfo();
+ string += INHERITED::dumpInfo();
+ return string;
+ }
+#endif
+
+ TessellatingPathOp(Helper::MakeArgs helperArgs,
+ const SkPMColor4f& color,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ GrAAType aaType,
+ const GrUserStencilSettings* stencilSettings)
+ : INHERITED(ClassID())
+ , fHelper(helperArgs, aaType, stencilSettings)
+ , fColor(color)
+ , fShape(shape)
+ , fViewMatrix(viewMatrix)
+ , fDevClipBounds(devClipBounds)
+ , fAntiAlias(GrAAType::kCoverage == aaType) {
+ SkRect devBounds;
+ viewMatrix.mapRect(&devBounds, shape.bounds());
+ if (shape.inverseFilled()) {
+ // Because the clip bounds are used to add a contour for inverse fills, they must also
+ // include the path bounds.
+ devBounds.join(SkRect::Make(fDevClipBounds));
+ }
+ this->setBounds(devBounds, HasAABloat::kNo, IsHairline::kNo);
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ GrProcessorAnalysisCoverage coverage = fAntiAlias
+ ? GrProcessorAnalysisCoverage::kSingleChannel
+ : GrProcessorAnalysisCoverage::kNone;
+ // This Op uses uniform (not vertex) color, so doesn't need to track wide color.
+ return fHelper.finalizeProcessors(
+ caps, clip, hasMixedSampledCoverage, clampType, coverage, &fColor, nullptr);
+ }
+
+private:
+ SkPath getPath() const {
+ SkASSERT(!fShape.style().applies());
+ SkPath path;
+ fShape.asPath(&path);
+ return path;
+ }
+
+ void draw(Target* target, sk_sp<const GrGeometryProcessor> gp, size_t vertexStride) {
+ SkASSERT(!fAntiAlias);
+ GrResourceProvider* rp = target->resourceProvider();
+ bool inverseFill = fShape.inverseFilled();
+ // construct a cache key from the path's genID and the view matrix
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ static constexpr int kClipBoundsCnt = sizeof(fDevClipBounds) / sizeof(uint32_t);
+ int shapeKeyDataCnt = fShape.unstyledKeySize();
+ SkASSERT(shapeKeyDataCnt >= 0);
+ GrUniqueKey::Builder builder(&key, kDomain, shapeKeyDataCnt + kClipBoundsCnt, "Path");
+ fShape.writeUnstyledKey(&builder[0]);
+ // For inverse fills, the tessellation is dependent on clip bounds.
+ if (inverseFill) {
+ memcpy(&builder[shapeKeyDataCnt], &fDevClipBounds, sizeof(fDevClipBounds));
+ } else {
+ memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds));
+ }
+ builder.finish();
+ sk_sp<GrGpuBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrGpuBuffer>(key));
+ int actualCount;
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds());
+ if (cache_match(cachedVertexBuffer.get(), tol, &actualCount)) {
+ this->drawVertices(target, std::move(gp), std::move(cachedVertexBuffer), 0,
+ actualCount);
+ return;
+ }
+
+ SkRect clipBounds = SkRect::Make(fDevClipBounds);
+
+ SkMatrix vmi;
+ if (!fViewMatrix.invert(&vmi)) {
+ return;
+ }
+ vmi.mapRect(&clipBounds);
+ bool isLinear;
+ bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
+ StaticVertexAllocator allocator(vertexStride, rp, canMapVB);
+ int count = GrTessellator::PathToTriangles(getPath(), tol, clipBounds, &allocator, false,
+ &isLinear);
+ if (count == 0) {
+ return;
+ }
+ sk_sp<GrGpuBuffer> vb = allocator.detachVertexBuffer();
+ TessInfo info;
+ info.fTolerance = isLinear ? 0 : tol;
+ info.fCount = count;
+ fShape.addGenIDChangeListener(sk_make_sp<PathInvalidator>(key, target->contextUniqueID()));
+ key.setCustomData(SkData::MakeWithCopy(&info, sizeof(info)));
+ rp->assignUniqueKeyToResource(key, vb.get());
+
+ this->drawVertices(target, std::move(gp), std::move(vb), 0, count);
+ }
+
+ void drawAA(Target* target, sk_sp<const GrGeometryProcessor> gp, size_t vertexStride) {
+ SkASSERT(fAntiAlias);
+ SkPath path = getPath();
+ if (path.isEmpty()) {
+ return;
+ }
+ SkRect clipBounds = SkRect::Make(fDevClipBounds);
+ path.transform(fViewMatrix);
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ bool isLinear;
+ DynamicVertexAllocator allocator(vertexStride, target);
+ int count = GrTessellator::PathToTriangles(path, tol, clipBounds, &allocator, true,
+ &isLinear);
+ if (count == 0) {
+ return;
+ }
+ this->drawVertices(target, std::move(gp), allocator.detachVertexBuffer(),
+ allocator.firstVertex(), count);
+ }
+
+ void onPrepareDraws(Target* target) override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(fColor);
+ LocalCoords::Type localCoordsType = fHelper.usesLocalCoords()
+ ? LocalCoords::kUsePosition_Type
+ : LocalCoords::kUnused_Type;
+ Coverage::Type coverageType;
+ if (fAntiAlias) {
+ if (fHelper.compatibleWithCoverageAsAlpha()) {
+ coverageType = Coverage::kAttributeTweakAlpha_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ } else {
+ coverageType = Coverage::kSolid_Type;
+ }
+ if (fAntiAlias) {
+ gp = GrDefaultGeoProcFactory::MakeForDeviceSpace(target->caps().shaderCaps(),
+ color, coverageType,
+ localCoordsType, fViewMatrix);
+ } else {
+ gp = GrDefaultGeoProcFactory::Make(target->caps().shaderCaps(),
+ color, coverageType, localCoordsType,
+ fViewMatrix);
+ }
+ }
+ if (!gp.get()) {
+ return;
+ }
+ size_t vertexStride = gp->vertexStride();
+ if (fAntiAlias) {
+ this->drawAA(target, std::move(gp), vertexStride);
+ } else {
+ this->draw(target, std::move(gp), vertexStride);
+ }
+ }
+
+ void drawVertices(Target* target, sk_sp<const GrGeometryProcessor> gp, sk_sp<const GrBuffer> vb,
+ int firstVertex, int count) {
+ GrMesh* mesh = target->allocMesh(TESSELLATOR_WIREFRAME ? GrPrimitiveType::kLines
+ : GrPrimitiveType::kTriangles);
+ mesh->setNonIndexedNonInstanced(count);
+ mesh->setVertexData(std::move(vb), firstVertex);
+ target->recordDraw(std::move(gp), mesh);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
+ }
+
+ Helper fHelper;
+ SkPMColor4f fColor;
+ GrShape fShape;
+ SkMatrix fViewMatrix;
+ SkIRect fDevClipBounds;
+ bool fAntiAlias;
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+bool GrTessellatingPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
+ "GrTessellatingPathRenderer::onDrawPath");
+ SkIRect clipBoundsI;
+ args.fClip->getConservativeBounds(args.fRenderTargetContext->width(),
+ args.fRenderTargetContext->height(),
+ &clipBoundsI);
+ std::unique_ptr<GrDrawOp> op = TessellatingPathOp::Make(
+ args.fContext, std::move(args.fPaint), *args.fShape, *args.fViewMatrix, clipBoundsI,
+ args.fAAType, args.fUserStencilSettings);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+GR_DRAW_OP_TEST_DEFINE(TesselatingPathOp) {
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPath(random);
+ SkIRect devClipBounds = SkIRect::MakeLTRB(
+ random->nextU(), random->nextU(), random->nextU(), random->nextU());
+ devClipBounds.sort();
+ static constexpr GrAAType kAATypes[] = {GrAAType::kNone, GrAAType::kMSAA, GrAAType::kCoverage};
+ GrAAType aaType;
+ do {
+ aaType = kAATypes[random->nextULessThan(SK_ARRAY_COUNT(kAATypes))];
+ } while(GrAAType::kMSAA == aaType && numSamples <= 1);
+ GrStyle style;
+ do {
+ GrTest::TestStyle(random, &style);
+ } while (!style.isSimpleFill());
+ GrShape shape(path, style);
+ return TessellatingPathOp::Make(context, std::move(paint), shape, viewMatrix, devClipBounds,
+ aaType, GrGetRandomStencil(random, context));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.h b/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.h
new file mode 100644
index 0000000000..2ebcd79a2f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrTessellatingPathRenderer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTessellatingPathRenderer_DEFINED
+#define GrTessellatingPathRenderer_DEFINED
+
+#include "src/gpu/GrPathRenderer.h"
+
+/**
+ * Subclass that renders the path by converting to screen-space trapezoids plus
+ * extra 1-pixel geometry for AA.
+ */
+class GrTessellatingPathRenderer : public GrPathRenderer {
+public:
+ GrTessellatingPathRenderer();
+#if GR_TEST_UTILS
+ void setMaxVerbCount(int maxVerbCount) { fMaxVerbCount = maxVerbCount; }
+#endif
+
+private:
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+ int fMaxVerbCount;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrTextureOp.cpp b/gfx/skia/skia/src/gpu/ops/GrTextureOp.cpp
new file mode 100644
index 0000000000..bd4673e26a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrTextureOp.cpp
@@ -0,0 +1,817 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <new>
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkFloatingPoint.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkRectPriv.h"
+#include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawOpTest.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrResourceProviderPriv.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrTextureDomain.h"
+#include "src/gpu/effects/generated/GrSaturateProcessor.h"
+#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/geometry/GrQuadBuffer.h"
+#include "src/gpu/geometry/GrQuadUtils.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/ops/GrFillRectOp.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/ops/GrQuadPerEdgeAA.h"
+#include "src/gpu/ops/GrTextureOp.h"
+
+namespace {
+
+using Domain = GrQuadPerEdgeAA::Domain;
+using VertexSpec = GrQuadPerEdgeAA::VertexSpec;
+using ColorType = GrQuadPerEdgeAA::ColorType;
+
+// Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
+// between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
+static SkSize axis_aligned_quad_size(const GrQuad& quad) {
+ SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
+ // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
+ float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
+ float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
+ return {dw, dh};
+}
+
+static bool filter_has_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
+ // If not axis-aligned in src or dst, then always say it has an effect
+ if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
+ dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
+ return true;
+ }
+
+ SkRect srcRect;
+ SkRect dstRect;
+ if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
+ // Disable filtering when there is no scaling (width and height are the same), and the
+ // top-left corners have the same fraction (so src and dst snap to the pixel grid
+ // identically).
+ SkASSERT(srcRect.isSorted());
+ return srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
+ SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
+ SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
+ } else {
+ // Although the quads are axis-aligned, the local coordinate system is transformed such
+ // that fractionally-aligned sample centers will not align with the device coordinate system
+ // So disable filtering when edges are the same length and both srcQuad and dstQuad
+ // 0th vertex is integer aligned.
+ if (SkScalarIsInt(srcQuad.x(0)) && SkScalarIsInt(srcQuad.y(0)) &&
+ SkScalarIsInt(dstQuad.x(0)) && SkScalarIsInt(dstQuad.y(0))) {
+ // Extract edge lengths
+ SkSize srcSize = axis_aligned_quad_size(srcQuad);
+ SkSize dstSize = axis_aligned_quad_size(dstQuad);
+ return srcSize.fWidth != dstSize.fWidth || srcSize.fHeight != dstSize.fHeight;
+ } else {
+ return true;
+ }
+ }
+}
+
+// if normalizing the domain then pass 1/width, 1/height, 1 for iw, ih, h. Otherwise pass
+// 1, 1, and height.
+static void compute_domain(Domain domain, GrSamplerState::Filter filter, GrSurfaceOrigin origin,
+ const SkRect& domainRect, float iw, float ih, float h, SkRect* out) {
+ static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
+ if (domain == Domain::kNo) {
+ // Either the quad has no domain constraint and is batched with a domain constrained op
+ // (in which case we want a domain that doesn't restrict normalized tex coords), or the
+ // entire op doesn't use the domain, in which case the returned value is ignored.
+ *out = kLargeRect;
+ return;
+ }
+
+ auto ltrb = Sk4f::Load(&domainRect);
+ if (filter == GrSamplerState::Filter::kBilerp) {
+ auto rblt = SkNx_shuffle<2, 3, 0, 1>(ltrb);
+ auto whwh = (rblt - ltrb).abs();
+ auto c = (rblt + ltrb) * 0.5f;
+ static const Sk4f kOffsets = {0.5f, 0.5f, -0.5f, -0.5f};
+ ltrb = (whwh < 1.f).thenElse(c, ltrb + kOffsets);
+ }
+ ltrb *= Sk4f(iw, ih, iw, ih);
+ if (origin == kBottomLeft_GrSurfaceOrigin) {
+ static const Sk4f kMul = {1.f, -1.f, 1.f, -1.f};
+ const Sk4f kAdd = {0.f, h, 0.f, h};
+ ltrb = SkNx_shuffle<0, 3, 2, 1>(kMul * ltrb + kAdd);
+ }
+
+ ltrb.store(out);
+}
+
+// Normalizes logical src coords and corrects for origin
+static void compute_src_quad(GrSurfaceOrigin origin, const GrQuad& srcQuad,
+ float iw, float ih, float h, GrQuad* out) {
+ // The src quad should not have any perspective
+ SkASSERT(!srcQuad.hasPerspective() && !out->hasPerspective());
+ skvx::Vec<4, float> xs = srcQuad.x4f() * iw;
+ skvx::Vec<4, float> ys = srcQuad.y4f() * ih;
+ if (origin == kBottomLeft_GrSurfaceOrigin) {
+ ys = h - ys;
+ }
+ xs.store(out->xs());
+ ys.store(out->ys());
+ out->setQuadType(srcQuad.quadType());
+}
+
+/**
+ * Op that implements GrTextureOp::Make. It draws textured quads. Each quad can modulate against a
+ * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
+ */
+class TextureOp final : public GrMeshDrawOp {
+public:
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> proxy,
+ sk_sp<GrColorSpaceXform> textureXform,
+ GrSamplerState::Filter filter,
+ const SkPMColor4f& color,
+ GrTextureOp::Saturate saturate,
+ GrAAType aaType,
+ GrQuadAAFlags aaFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const SkRect* domain) {
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+ return pool->allocate<TextureOp>(std::move(proxy), std::move(textureXform), filter, color,
+ saturate, aaType, aaFlags, deviceQuad, localQuad, domain);
+ }
+ static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ const GrRenderTargetContext::TextureSetEntry set[],
+ int cnt,
+ GrSamplerState::Filter filter,
+ GrTextureOp::Saturate saturate,
+ GrAAType aaType,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
+ size_t size = sizeof(TextureOp) + sizeof(Proxy) * (cnt - 1);
+ GrOpMemoryPool* pool = context->priv().opMemoryPool();
+ void* mem = pool->allocate(size);
+ return std::unique_ptr<GrDrawOp>(new (mem) TextureOp(set, cnt, filter, saturate, aaType,
+ constraint, viewMatrix,
+ std::move(textureColorSpaceXform)));
+ }
+
+ ~TextureOp() override {
+ for (unsigned p = 0; p < fProxyCnt; ++p) {
+ fProxies[p].fProxy->unref();
+ }
+ }
+
+ const char* name() const override { return "TextureOp"; }
+
+ void visitProxies(const VisitProxyFunc& func) const override {
+ for (unsigned p = 0; p < fProxyCnt; ++p) {
+ bool mipped = (GrSamplerState::Filter::kMipMap == this->filter());
+ func(fProxies[p].fProxy, GrMipMapped(mipped));
+ }
+ }
+
+#ifdef SK_DEBUG
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# draws: %d\n", fQuads.count());
+ auto iter = fQuads.iterator();
+ for (unsigned p = 0; p < fProxyCnt; ++p) {
+ str.appendf("Proxy ID: %d, Filter: %d\n", fProxies[p].fProxy->uniqueID().asUInt(),
+ static_cast<int>(fFilter));
+ int i = 0;
+ while(i < fProxies[p].fQuadCnt && iter.next()) {
+ const GrQuad& quad = iter.deviceQuad();
+ const GrQuad& uv = iter.localQuad();
+ const ColorDomainAndAA& info = iter.metadata();
+ str.appendf(
+ "%d: Color: 0x%08x, Domain(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
+ " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
+ " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
+ i, info.fColor.toBytes_RGBA(), info.fHasDomain, info.fDomainRect.fLeft,
+ info.fDomainRect.fTop, info.fDomainRect.fRight, info.fDomainRect.fBottom,
+ quad.point(0).fX, quad.point(0).fY, quad.point(1).fX, quad.point(1).fY,
+ quad.point(2).fX, quad.point(2).fY, quad.point(3).fX, quad.point(3).fY,
+ uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
+ uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
+
+ i++;
+ }
+ }
+ str += INHERITED::dumpInfo();
+ return str;
+ }
+#endif
+
+ GrProcessorSet::Analysis finalize(
+ const GrCaps& caps, const GrAppliedClip*, bool hasMixedSampledCoverage,
+ GrClampType clampType) override {
+ fColorType = static_cast<unsigned>(ColorType::kNone);
+ auto iter = fQuads.metadata();
+ while(iter.next()) {
+ auto colorType = GrQuadPerEdgeAA::MinColorType(iter->fColor, clampType, caps);
+ fColorType = SkTMax(fColorType, static_cast<unsigned>(colorType));
+ }
+ return GrProcessorSet::EmptySetAnalysis();
+ }
+
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ return this->aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
+ : FixedFunctionFlags::kNone;
+ }
+
+ DEFINE_OP_CLASS_ID
+
+private:
+ friend class ::GrOpMemoryPool;
+
+ struct ColorDomainAndAA {
+ ColorDomainAndAA(const SkPMColor4f& color, const SkRect* domainRect, GrQuadAAFlags aaFlags)
+ : fColor(color)
+ , fDomainRect(domainRect ? *domainRect : SkRect::MakeEmpty())
+ , fHasDomain(static_cast<unsigned>(domainRect ? Domain::kYes : Domain::kNo))
+ , fAAFlags(static_cast<unsigned>(aaFlags)) {
+ SkASSERT(fAAFlags == static_cast<unsigned>(aaFlags));
+ }
+
+ SkPMColor4f fColor;
+ SkRect fDomainRect;
+ unsigned fHasDomain : 1;
+ unsigned fAAFlags : 4;
+
+ Domain domain() const { return Domain(fHasDomain); }
+ GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
+ };
+ struct Proxy {
+ GrTextureProxy* fProxy;
+ int fQuadCnt;
+ };
+
+ // dstQuad should be the geometry transformed by the view matrix. If domainRect
+ // is not null it will be used to apply the strict src rect constraint.
+ TextureOp(sk_sp<GrTextureProxy> proxy,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform,
+ GrSamplerState::Filter filter,
+ const SkPMColor4f& color,
+ GrTextureOp::Saturate saturate,
+ GrAAType aaType,
+ GrQuadAAFlags aaFlags,
+ const GrQuad& dstQuad,
+ const GrQuad& srcQuad,
+ const SkRect* domainRect)
+ : INHERITED(ClassID())
+ , fQuads(1, true /* includes locals */)
+ , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
+ , fSaturate(static_cast<unsigned>(saturate))
+ , fFilter(static_cast<unsigned>(filter))
+ , fPrePrepared(false) {
+ // Clean up disparities between the overall aa type and edge configuration and apply
+ // optimizations based on the rect and matrix when appropriate
+ GrQuadUtils::ResolveAAType(aaType, aaFlags, dstQuad, &aaType, &aaFlags);
+ fAAType = static_cast<unsigned>(aaType);
+
+ // We expect our caller to have already caught this optimization.
+ SkASSERT(!domainRect || !domainRect->contains(proxy->getWorstCaseBoundsRect()));
+
+ // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
+ // If we don't have (or determined we don't need) coverage AA then we can skip using a
+ // domain.
+ if (domainRect && this->filter() == GrSamplerState::Filter::kNearest &&
+ aaType != GrAAType::kCoverage) {
+ domainRect = nullptr;
+ }
+
+ fQuads.append(dstQuad, {color, domainRect, aaFlags}, &srcQuad);
+
+ fProxyCnt = 1;
+ fProxies[0] = {proxy.release(), 1};
+ this->setBounds(dstQuad.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
+ IsHairline::kNo);
+ fDomain = static_cast<unsigned>(domainRect != nullptr);
+ }
+ TextureOp(const GrRenderTargetContext::TextureSetEntry set[],
+ int cnt,
+ GrSamplerState::Filter filter,
+ GrTextureOp::Saturate saturate,
+ GrAAType aaType,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform)
+ : INHERITED(ClassID())
+ , fQuads(cnt, true /* includes locals */)
+ , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
+ , fSaturate(static_cast<unsigned>(saturate))
+ , fFilter(static_cast<unsigned>(filter))
+ , fPrePrepared(false) {
+ fProxyCnt = SkToUInt(cnt);
+ SkRect bounds = SkRectPriv::MakeLargestInverted();
+ GrAAType overallAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
+ bool mustFilter = false;
+ bool allOpaque = true;
+ Domain netDomain = Domain::kNo;
+ for (unsigned p = 0; p < fProxyCnt; ++p) {
+ fProxies[p].fProxy = SkRef(set[p].fProxy.get());
+ fProxies[p].fQuadCnt = 1;
+ SkASSERT(fProxies[p].fProxy->textureType() == fProxies[0].fProxy->textureType());
+ SkASSERT(fProxies[p].fProxy->config() == fProxies[0].fProxy->config());
+
+ SkMatrix ctm = viewMatrix;
+ if (set[p].fPreViewMatrix) {
+ ctm.preConcat(*set[p].fPreViewMatrix);
+ }
+
+ // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
+ // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
+ GrQuad quad, srcQuad;
+ if (set[p].fDstClipQuad) {
+ quad = GrQuad::MakeFromSkQuad(set[p].fDstClipQuad, ctm);
+
+ SkPoint srcPts[4];
+ GrMapRectPoints(set[p].fDstRect, set[p].fSrcRect, set[p].fDstClipQuad, srcPts, 4);
+ srcQuad = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
+ } else {
+ quad = GrQuad::MakeFromRect(set[p].fDstRect, ctm);
+ srcQuad = GrQuad(set[p].fSrcRect);
+ }
+
+ if (!mustFilter && this->filter() != GrSamplerState::Filter::kNearest) {
+ mustFilter = filter_has_effect(srcQuad, quad);
+ }
+
+ bounds.joinPossiblyEmptyRect(quad.bounds());
+ GrQuadAAFlags aaFlags;
+ // Don't update the overall aaType, might be inappropriate for some of the quads
+ GrAAType aaForQuad;
+ GrQuadUtils::ResolveAAType(aaType, set[p].fAAFlags, quad, &aaForQuad, &aaFlags);
+ // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
+ SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
+ if (overallAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
+ overallAAType = aaType;
+ }
+
+ // Calculate metadata for the entry
+ const SkRect* domainForQuad = nullptr;
+ if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
+ // Check (briefly) if the strict constraint is needed for this set entry
+ if (!set[p].fSrcRect.contains(fProxies[p].fProxy->getWorstCaseBoundsRect()) &&
+ (mustFilter || aaForQuad == GrAAType::kCoverage)) {
+ // Can't rely on hardware clamping and the draw will access outer texels
+ // for AA and/or bilerp
+ netDomain = Domain::kYes;
+ domainForQuad = &set[p].fSrcRect;
+ }
+ }
+ float alpha = SkTPin(set[p].fAlpha, 0.f, 1.f);
+ allOpaque &= (1.f == alpha);
+ SkPMColor4f color{alpha, alpha, alpha, alpha};
+ fQuads.append(quad, {color, domainForQuad, aaFlags}, &srcQuad);
+ }
+ fAAType = static_cast<unsigned>(overallAAType);
+ if (!mustFilter) {
+ fFilter = static_cast<unsigned>(GrSamplerState::Filter::kNearest);
+ }
+ this->setBounds(bounds, HasAABloat(this->aaType() == GrAAType::kCoverage),
+ IsHairline::kNo);
+ fDomain = static_cast<unsigned>(netDomain);
+ }
+
+ void tess(void* v, const VertexSpec& spec, const GrTextureProxy* proxy,
+ GrQuadBuffer<ColorDomainAndAA>::Iter* iter, int cnt) const {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ auto origin = proxy->origin();
+ const auto* texture = proxy->peekTexture();
+ float iw, ih, h;
+ if (proxy->textureType() == GrTextureType::kRectangle) {
+ iw = ih = 1.f;
+ h = texture->height();
+ } else {
+ iw = 1.f / texture->width();
+ ih = 1.f / texture->height();
+ h = 1.f;
+ }
+
+ int i = 0;
+ // Explicit ctor ensures ws are 1s, which compute_src_quad requires
+ GrQuad srcQuad(SkRect::MakeEmpty());
+ SkRect domain;
+ while(i < cnt && iter->next()) {
+ SkASSERT(iter->isLocalValid());
+ const ColorDomainAndAA& info = iter->metadata();
+ // Must correct the texture coordinates and domain now that the real texture size
+ // is known
+ compute_src_quad(origin, iter->localQuad(), iw, ih, h, &srcQuad);
+ compute_domain(info.domain(), this->filter(), origin, info.fDomainRect, iw, ih, h,
+ &domain);
+ v = GrQuadPerEdgeAA::Tessellate(v, spec, iter->deviceQuad(), info.fColor, srcQuad,
+ domain, info.aaFlags());
+ i++;
+ }
+ }
+
+ void onPrePrepareDraws(GrRecordingContext* context) override {
+ SkASSERT(!fPrePrepared);
+ // Pull forward the tessellation of the quads to here
+
+ //GrOpMemoryPool* pool = context->priv().opMemoryPool();
+
+ fPrePrepared = true;
+ }
+
+#ifdef SK_DEBUG
+ void validate() const override {
+ auto textureType = fProxies[0].fProxy->textureType();
+ const GrSwizzle& swizzle = fProxies[0].fProxy->textureSwizzle();
+ GrAAType aaType = this->aaType();
+
+ for (const auto& op : ChainRange<TextureOp>(this)) {
+ for (unsigned p = 0; p < op.fProxyCnt; ++p) {
+ auto* proxy = op.fProxies[p].fProxy;
+ SkASSERT(proxy);
+ SkASSERT(proxy->textureType() == textureType);
+ SkASSERT(proxy->textureSwizzle() == swizzle);
+ }
+
+ // Each individual op must be a single aaType. kCoverage and kNone ops can chain
+ // together but kMSAA ones do not.
+ if (aaType == GrAAType::kCoverage || aaType == GrAAType::kNone) {
+ SkASSERT(op.aaType() == GrAAType::kCoverage || op.aaType() == GrAAType::kNone);
+ } else {
+ SkASSERT(aaType == GrAAType::kMSAA && op.aaType() == GrAAType::kMSAA);
+ }
+ }
+ }
+#endif
+
+ VertexSpec characterize(int* numProxies, int* numTotalQuads) const {
+ GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
+ ColorType colorType = ColorType::kNone;
+ GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
+ Domain domain = Domain::kNo;
+ GrAAType overallAAType = this->aaType();
+
+ *numProxies = 0;
+ *numTotalQuads = 0;
+
+ for (const auto& op : ChainRange<TextureOp>(this)) {
+ if (op.fQuads.deviceQuadType() > quadType) {
+ quadType = op.fQuads.deviceQuadType();
+ }
+ if (op.fQuads.localQuadType() > srcQuadType) {
+ srcQuadType = op.fQuads.localQuadType();
+ }
+ if (op.fDomain) {
+ domain = Domain::kYes;
+ }
+ colorType = SkTMax(colorType, static_cast<ColorType>(op.fColorType));
+ *numProxies += op.fProxyCnt;
+ for (unsigned p = 0; p < op.fProxyCnt; ++p) {
+ *numTotalQuads += op.fProxies[p].fQuadCnt;
+ }
+ if (op.aaType() == GrAAType::kCoverage) {
+ overallAAType = GrAAType::kCoverage;
+ }
+ }
+
+ return VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true, domain,
+ overallAAType, /* alpha as coverage */ true);
+ }
+
+ // onPrePrepareDraws may or may not have been called at this point
+ void onPrepareDraws(Target* target) override {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+
+ SkDEBUGCODE(this->validate();)
+
+ int numProxies, numTotalQuads;
+
+ const VertexSpec vertexSpec = this->characterize(&numProxies, &numTotalQuads);
+
+ // We'll use a dynamic state array for the GP textures when there are multiple ops.
+ // Otherwise, we use fixed dynamic state to specify the single op's proxy.
+ GrPipeline::DynamicStateArrays* dynamicStateArrays = nullptr;
+ GrPipeline::FixedDynamicState* fixedDynamicState;
+ if (numProxies > 1) {
+ dynamicStateArrays = target->allocDynamicStateArrays(numProxies, 1, false);
+ fixedDynamicState = target->makeFixedDynamicState(0);
+ } else {
+ fixedDynamicState = target->makeFixedDynamicState(1);
+ fixedDynamicState->fPrimitiveProcessorTextures[0] = fProxies[0].fProxy;
+ }
+
+ size_t vertexSize = vertexSpec.vertexSize();
+
+ GrMesh* meshes = target->allocMeshes(numProxies);
+ sk_sp<const GrBuffer> vbuffer;
+ int vertexOffsetInBuffer = 0;
+ int numQuadVerticesLeft = numTotalQuads * vertexSpec.verticesPerQuad();
+ int numAllocatedVertices = 0;
+ void* vdata = nullptr;
+
+ int m = 0;
+ for (const auto& op : ChainRange<TextureOp>(this)) {
+ auto iter = op.fQuads.iterator();
+ for (unsigned p = 0; p < op.fProxyCnt; ++p) {
+ int quadCnt = op.fProxies[p].fQuadCnt;
+ auto* proxy = op.fProxies[p].fProxy;
+ int meshVertexCnt = quadCnt * vertexSpec.verticesPerQuad();
+ if (numAllocatedVertices < meshVertexCnt) {
+ vdata = target->makeVertexSpaceAtLeast(
+ vertexSize, meshVertexCnt, numQuadVerticesLeft, &vbuffer,
+ &vertexOffsetInBuffer, &numAllocatedVertices);
+ SkASSERT(numAllocatedVertices <= numQuadVerticesLeft);
+ if (!vdata) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ }
+ SkASSERT(numAllocatedVertices >= meshVertexCnt);
+
+ op.tess(vdata, vertexSpec, proxy, &iter, quadCnt);
+
+ if (!GrQuadPerEdgeAA::ConfigureMeshIndices(target, &(meshes[m]), vertexSpec,
+ quadCnt)) {
+ SkDebugf("Could not allocate indices");
+ return;
+ }
+ meshes[m].setVertexData(vbuffer, vertexOffsetInBuffer);
+ if (dynamicStateArrays) {
+ dynamicStateArrays->fPrimitiveProcessorTextures[m] = proxy;
+ }
+ ++m;
+ numAllocatedVertices -= meshVertexCnt;
+ numQuadVerticesLeft -= meshVertexCnt;
+ vertexOffsetInBuffer += meshVertexCnt;
+ vdata = reinterpret_cast<char*>(vdata) + vertexSize * meshVertexCnt;
+ }
+ // If quad counts per proxy were calculated correctly, the entire iterator should have
+ // been consumed.
+ SkASSERT(!iter.next());
+ }
+ SkASSERT(!numQuadVerticesLeft);
+ SkASSERT(!numAllocatedVertices);
+
+ sk_sp<GrGeometryProcessor> gp;
+
+ {
+ auto textureType = fProxies[0].fProxy->textureType();
+ const GrSwizzle& swizzle = fProxies[0].fProxy->textureSwizzle();
+
+ GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
+ this->filter());
+
+ auto saturate = static_cast<GrTextureOp::Saturate>(fSaturate);
+
+ GrGpu* gpu = target->resourceProvider()->priv().gpu();
+ uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram(
+ samplerState, fProxies[0].fProxy->backendFormat());
+
+ gp = GrQuadPerEdgeAA::MakeTexturedProcessor(
+ vertexSpec, *target->caps().shaderCaps(), textureType, samplerState, swizzle,
+ extraSamplerKey, std::move(fTextureColorSpaceXform), saturate);
+
+ SkASSERT(vertexSize == gp->vertexStride());
+ }
+
+ target->recordDraw(
+ std::move(gp), meshes, numProxies, fixedDynamicState, dynamicStateArrays);
+ }
+
+ void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
+ auto pipelineFlags = (GrAAType::kMSAA == this->aaType())
+ ? GrPipeline::InputFlags::kHWAntialias
+ : GrPipeline::InputFlags::kNone;
+ flushState->executeDrawsAndUploadsForMeshDrawOp(
+ this, chainBounds, GrProcessorSet::MakeEmptySet(), pipelineFlags);
+ }
+
+ CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ const auto* that = t->cast<TextureOp>();
+
+ if (fPrePrepared || that->fPrePrepared) {
+ // This should never happen (since only DDL recorded ops should be prePrepared)
+ // but, in any case, we should never combine ops that that been prePrepared
+ return CombineResult::kCannotCombine;
+ }
+
+ if (fDomain != that->fDomain) {
+ // It is technically possible to combine operations across domain modes, but performance
+ // testing suggests it's better to make more draw calls where some take advantage of
+ // the more optimal shader path without coordinate clamping.
+ return CombineResult::kCannotCombine;
+ }
+ if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
+ that->fTextureColorSpaceXform.get())) {
+ return CombineResult::kCannotCombine;
+ }
+ bool upgradeToCoverageAAOnMerge = false;
+ if (this->aaType() != that->aaType()) {
+ if (!((this->aaType() == GrAAType::kCoverage && that->aaType() == GrAAType::kNone) ||
+ (that->aaType() == GrAAType::kCoverage && this->aaType() == GrAAType::kNone))) {
+ return CombineResult::kCannotCombine;
+ }
+ upgradeToCoverageAAOnMerge = true;
+ }
+ if (fSaturate != that->fSaturate) {
+ return CombineResult::kCannotCombine;
+ }
+ if (fFilter != that->fFilter) {
+ return CombineResult::kCannotCombine;
+ }
+ auto thisProxy = fProxies[0].fProxy;
+ auto thatProxy = that->fProxies[0].fProxy;
+ if (fProxyCnt > 1 || that->fProxyCnt > 1 ||
+ thisProxy->uniqueID() != thatProxy->uniqueID()) {
+ // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
+ if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
+ caps.dynamicStateArrayGeometryProcessorTextureSupport()) {
+ return CombineResult::kMayChain;
+ }
+ return CombineResult::kCannotCombine;
+ }
+
+ fDomain |= that->fDomain;
+ fColorType = SkTMax(fColorType, that->fColorType);
+ if (upgradeToCoverageAAOnMerge) {
+ fAAType = static_cast<unsigned>(GrAAType::kCoverage);
+ }
+
+ // Concatenate quad lists together
+ fQuads.concat(that->fQuads);
+ fProxies[0].fQuadCnt += that->fQuads.count();
+
+ return CombineResult::kMerged;
+ }
+
+ GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
+ GrSamplerState::Filter filter() const { return static_cast<GrSamplerState::Filter>(fFilter); }
+
+ GrQuadBuffer<ColorDomainAndAA> fQuads;
+ sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
+ unsigned fSaturate : 1;
+ unsigned fFilter : 2;
+ unsigned fAAType : 2;
+ unsigned fDomain : 1;
+ unsigned fColorType : 2;
+ GR_STATIC_ASSERT(GrQuadPerEdgeAA::kColorTypeCount <= 4);
+ unsigned fPrePrepared : 1;
+ unsigned fProxyCnt : 32 - 7;
+ Proxy fProxies[1];
+
+ static_assert(GrQuad::kTypeCount <= 4, "GrQuad::Type does not fit in 2 bits");
+
+ typedef GrMeshDrawOp INHERITED;
+};
+
+} // anonymous namespace
+
+namespace GrTextureOp {
+
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
+ sk_sp<GrTextureProxy> proxy,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform> textureXform,
+ GrSamplerState::Filter filter,
+ const SkPMColor4f& color,
+ Saturate saturate,
+ SkBlendMode blendMode,
+ GrAAType aaType,
+ GrQuadAAFlags aaFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const SkRect* domain) {
+ // Apply optimizations that are valid whether or not using GrTextureOp or GrFillRectOp
+ if (domain && domain->contains(proxy->getWorstCaseBoundsRect())) {
+ // No need for a shader-based domain if hardware clamping achieves the same effect
+ domain = nullptr;
+ }
+
+ if (filter != GrSamplerState::Filter::kNearest && !filter_has_effect(localQuad, deviceQuad)) {
+ filter = GrSamplerState::Filter::kNearest;
+ }
+
+ if (blendMode == SkBlendMode::kSrcOver) {
+ return TextureOp::Make(context, std::move(proxy), std::move(textureXform), filter, color,
+ saturate, aaType, aaFlags, deviceQuad, localQuad, domain);
+ } else {
+ // Emulate complex blending using GrFillRectOp
+ GrPaint paint;
+ paint.setColor4f(color);
+ paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+ if (domain) {
+ // Update domain to match what GrTextureOp computes during tessellation, using top-left
+ // as the origin so that it doesn't depend on final texture size (which the FP handles
+ // later, as well as accounting for the true origin).
+ SkRect correctedDomain;
+ compute_domain(Domain::kYes, filter, kTopLeft_GrSurfaceOrigin, *domain,
+ 1.f, 1.f, proxy->height(), &correctedDomain);
+ fp = GrTextureDomainEffect::Make(std::move(proxy), srcColorType, SkMatrix::I(),
+ correctedDomain, GrTextureDomain::kClamp_Mode, filter);
+ } else {
+ fp = GrSimpleTextureEffect::Make(std::move(proxy), srcColorType, SkMatrix::I(), filter);
+ }
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
+ paint.addColorFragmentProcessor(std::move(fp));
+ if (saturate == GrTextureOp::Saturate::kYes) {
+ paint.addColorFragmentProcessor(GrSaturateProcessor::Make());
+ }
+
+ return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
+ deviceQuad, localQuad);
+ }
+}
+
+std::unique_ptr<GrDrawOp> MakeSet(GrRecordingContext* context,
+ const GrRenderTargetContext::TextureSetEntry set[],
+ int cnt,
+ GrSamplerState::Filter filter,
+ Saturate saturate,
+ GrAAType aaType,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
+ return TextureOp::Make(context, set, cnt, filter, saturate, aaType, constraint, viewMatrix,
+ std::move(textureColorSpaceXform));
+}
+
+} // namespace GrTextureOp
+
+#if GR_TEST_UTILS
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+
+GR_DRAW_OP_TEST_DEFINE(TextureOp) {
+ GrSurfaceDesc desc;
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fHeight = random->nextULessThan(90) + 10;
+ desc.fWidth = random->nextULessThan(90) + 10;
+ auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
+ GrMipMapped mipMapped = random->nextBool() ? GrMipMapped::kYes : GrMipMapped::kNo;
+ SkBackingFit fit = SkBackingFit::kExact;
+ if (mipMapped == GrMipMapped::kNo) {
+ fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
+ }
+ const GrBackendFormat format =
+ context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
+ GrRenderable::kNo);
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
+ format, desc, GrRenderable::kNo, 1, origin, mipMapped, fit, SkBudgeted::kNo,
+ GrProtected::kNo, GrInternalSurfaceFlags::kNone);
+
+ SkRect rect = GrTest::TestRect(random);
+ SkRect srcRect;
+ srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
+ srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
+ srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
+ srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
+ SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
+ SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
+ GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
+ static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
+ while (mipMapped == GrMipMapped::kNo && filter == GrSamplerState::Filter::kMipMap) {
+ filter = (GrSamplerState::Filter)random->nextULessThan(
+ static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
+ }
+ auto texXform = GrTest::TestColorXform(random);
+ GrAAType aaType = GrAAType::kNone;
+ if (random->nextBool()) {
+ aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
+ }
+ GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
+ aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
+ bool useDomain = random->nextBool();
+ auto saturate = random->nextBool() ? GrTextureOp::Saturate::kYes : GrTextureOp::Saturate::kNo;
+ return GrTextureOp::Make(context, std::move(proxy), GrColorType::kRGBA_8888,
+ std::move(texXform), filter, color, saturate, SkBlendMode::kSrcOver,
+ aaType, aaFlags, GrQuad::MakeFromRect(rect, viewMatrix),
+ GrQuad(srcRect), useDomain ? &srcRect : nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/ops/GrTextureOp.h b/gfx/skia/skia/src/gpu/ops/GrTextureOp.h
new file mode 100644
index 0000000000..da7a6b4eb1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/ops/GrTextureOp.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrTextureOp_DEFINED
+#define GrTextureOp_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSamplerState.h"
+
+class GrColorSpaceXform;
+class GrDrawOp;
+class GrTextureProxy;
+struct SkRect;
+class SkMatrix;
+
+namespace GrTextureOp {
+
+/**
+ * Controls whether saturate() is called after the texture is color-converted to ensure all
+ * color values are in 0..1 range.
+ */
+enum class Saturate : bool { kNo = false, kYes = true };
+
+/**
+ * Creates an op that draws a sub-quadrilateral of a texture. The passed color is modulated by the
+ * texture's color. 'deviceQuad' specifies the device-space coordinates to draw, using 'localQuad'
+ * to map into the proxy's texture space. If non-null, 'domain' represents the boundary for the
+ * strict src rect constraint. If GrAAType is kCoverage then AA is applied to the edges
+ * indicated by GrQuadAAFlags. Otherwise, GrQuadAAFlags is ignored.
+ *
+ * This is functionally very similar to GrFillRectOp::Make, except that the GrPaint has been
+ * deconstructed into the texture, filter, modulating color, and blend mode. When blend mode is
+ * src over, this will return a GrFillRectOp with a paint that samples the proxy.
+ */
+std::unique_ptr<GrDrawOp> Make(GrRecordingContext*,
+ sk_sp<GrTextureProxy>,
+ GrColorType srcColorType,
+ sk_sp<GrColorSpaceXform>,
+ GrSamplerState::Filter,
+ const SkPMColor4f&,
+ Saturate,
+ SkBlendMode,
+ GrAAType,
+ GrQuadAAFlags,
+ const GrQuad& deviceQuad,
+ const GrQuad& localQuad,
+ const SkRect* domain = nullptr);
+
+// Unlike the single-proxy factory, this only supports src-over blending.
+std::unique_ptr<GrDrawOp> MakeSet(GrRecordingContext*,
+ const GrRenderTargetContext::TextureSetEntry[],
+ int cnt,
+ GrSamplerState::Filter,
+ Saturate,
+ GrAAType,
+ SkCanvas::SrcRectConstraint,
+ const SkMatrix& viewMatrix,
+ sk_sp<GrColorSpaceXform> textureXform);
+
+}
+#endif // GrTextureOp_DEFINED
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasManager.cpp b/gfx/skia/skia/src/gpu/text/GrAtlasManager.cpp
new file mode 100644
index 0000000000..29e32165e4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasManager.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/text/GrAtlasManager.h"
+
+#include "src/gpu/GrGlyph.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/text/GrStrikeCache.h"
+
+GrAtlasManager::GrAtlasManager(GrProxyProvider* proxyProvider, GrStrikeCache* glyphCache,
+ size_t maxTextureBytes,
+ GrDrawOpAtlas::AllowMultitexturing allowMultitexturing)
+ : fAllowMultitexturing{allowMultitexturing}
+ , fProxyProvider{proxyProvider}
+ , fCaps{fProxyProvider->refCaps()}
+ , fGlyphCache{glyphCache}
+ , fAtlasConfig{fCaps->maxTextureSize(), maxTextureBytes} { }
+
+GrAtlasManager::~GrAtlasManager() = default;
+
+static GrColorType mask_format_to_gr_color_type(GrMaskFormat format) {
+ switch (format) {
+ case kA8_GrMaskFormat:
+ return GrColorType::kAlpha_8;
+ case kA565_GrMaskFormat:
+ return GrColorType::kBGR_565;
+ case kARGB_GrMaskFormat:
+ return GrColorType::kRGBA_8888;
+ default:
+ SkDEBUGFAIL("unsupported GrMaskFormat");
+ return GrColorType::kAlpha_8;
+ }
+}
+
+void GrAtlasManager::freeAll() {
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ fAtlases[i] = nullptr;
+ }
+}
+
+bool GrAtlasManager::hasGlyph(GrGlyph* glyph) {
+ SkASSERT(glyph);
+ return this->getAtlas(glyph->fMaskFormat)->hasID(glyph->fID);
+}
+
+// add to texture atlas that matches this format
+GrDrawOpAtlas::ErrorCode GrAtlasManager::addToAtlas(
+ GrResourceProvider* resourceProvider,
+ GrStrikeCache* glyphCache,
+ GrTextStrike* strike, GrDrawOpAtlas::AtlasID* id,
+ GrDeferredUploadTarget* target, GrMaskFormat format,
+ int width, int height, const void* image, SkIPoint16* loc) {
+ glyphCache->setStrikeToPreserve(strike);
+ return this->getAtlas(format)->addToAtlas(resourceProvider, id, target, width, height,
+ image, loc);
+}
+
+void GrAtlasManager::addGlyphToBulkAndSetUseToken(GrDrawOpAtlas::BulkUseTokenUpdater* updater,
+ GrGlyph* glyph,
+ GrDeferredUploadToken token) {
+ SkASSERT(glyph);
+ if (updater->add(glyph->fID)) {
+ this->getAtlas(glyph->fMaskFormat)->setLastUseToken(glyph->fID, token);
+ }
+}
+
+#ifdef SK_DEBUG
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrSurfaceContext.h"
+#include "src/gpu/GrSurfaceProxy.h"
+#include "src/gpu/GrTextureProxy.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkStream.h"
+#include <stdio.h>
+
+/**
+ * Write the contents of the surface proxy to a PNG. Returns true if successful.
+ * @param filename Full path to desired file
+ */
+static bool save_pixels(GrContext* context, GrSurfaceProxy* sProxy, GrColorType colorType,
+ const char* filename) {
+ if (!sProxy) {
+ return false;
+ }
+
+ SkImageInfo ii = SkImageInfo::Make(sProxy->width(), sProxy->height(),
+ kRGBA_8888_SkColorType, kPremul_SkAlphaType);
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(ii)) {
+ return false;
+ }
+
+ auto sContext = context->priv().makeWrappedSurfaceContext(sk_ref_sp(sProxy), colorType,
+ kUnknown_SkAlphaType);
+ if (!sContext || !sContext->asTextureProxy()) {
+ return false;
+ }
+
+ bool result = sContext->readPixels(ii, bm.getPixels(), bm.rowBytes(), {0, 0});
+ if (!result) {
+ SkDebugf("------ failed to read pixels for %s\n", filename);
+ return false;
+ }
+
+ // remove any previous version of this file
+ remove(filename);
+
+ SkFILEWStream file(filename);
+ if (!file.isValid()) {
+ SkDebugf("------ failed to create file: %s\n", filename);
+ remove(filename); // remove any partial file
+ return false;
+ }
+
+ if (!SkEncodeImage(&file, bm, SkEncodedImageFormat::kPNG, 100)) {
+ SkDebugf("------ failed to encode %s\n", filename);
+ remove(filename); // remove any partial file
+ return false;
+ }
+
+ return true;
+}
+
+void GrAtlasManager::dump(GrContext* context) const {
+ static int gDumpCount = 0;
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ if (fAtlases[i]) {
+ const sk_sp<GrTextureProxy>* proxies = fAtlases[i]->getProxies();
+ for (uint32_t pageIdx = 0; pageIdx < fAtlases[i]->numActivePages(); ++pageIdx) {
+ SkASSERT(proxies[pageIdx]);
+ SkString filename;
+#ifdef SK_BUILD_FOR_ANDROID
+ filename.printf("/sdcard/fontcache_%d%d%d.png", gDumpCount, i, pageIdx);
+#else
+ filename.printf("fontcache_%d%d%d.png", gDumpCount, i, pageIdx);
+#endif
+ auto ct = mask_format_to_gr_color_type(AtlasIndexToMaskFormat(i));
+ save_pixels(context, proxies[pageIdx].get(), ct, filename.c_str());
+ }
+ }
+ }
+ ++gDumpCount;
+}
+#endif
+
+void GrAtlasManager::setAtlasSizesToMinimum_ForTesting() {
+ // Delete any old atlases.
+ // This should be safe to do as long as we are not in the middle of a flush.
+ for (int i = 0; i < kMaskFormatCount; i++) {
+ fAtlases[i] = nullptr;
+ }
+
+ // Set all the atlas sizes to 1x1 plot each.
+ new (&fAtlasConfig) GrDrawOpAtlasConfig{};
+}
+
+bool GrAtlasManager::initAtlas(GrMaskFormat format) {
+ int index = MaskFormatToAtlasIndex(format);
+ if (fAtlases[index] == nullptr) {
+ GrColorType grColorType = mask_format_to_gr_color_type(format);
+ SkISize atlasDimensions = fAtlasConfig.atlasDimensions(format);
+ SkISize plotDimensions = fAtlasConfig.plotDimensions(format);
+
+ const GrBackendFormat format = fCaps->getDefaultBackendFormat(grColorType,
+ GrRenderable::kNo);
+
+ fAtlases[index] = GrDrawOpAtlas::Make(
+ fProxyProvider, format, grColorType,
+ atlasDimensions.width(), atlasDimensions.height(),
+ plotDimensions.width(), plotDimensions.height(),
+ fAllowMultitexturing, &GrStrikeCache::HandleEviction, fGlyphCache);
+ if (!fAtlases[index]) {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasManager.h b/gfx/skia/skia/src/gpu/text/GrAtlasManager.h
new file mode 100644
index 0000000000..1799d8d6b1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasManager.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasManager_DEFINED
+#define GrAtlasManager_DEFINED
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrProxyProvider.h"
+
+struct GrGlyph;
+class GrTextStrike;
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+/** The GrAtlasManager manages the lifetime of and access to GrDrawOpAtlases.
+ * It is only available at flush and only via the GrOpFlushState.
+ *
+ * This implies that all of the advanced atlasManager functionality (i.e.,
+ * adding glyphs to the atlas) are only available at flush time.
+ */
+class GrAtlasManager : public GrOnFlushCallbackObject {
+public:
+ GrAtlasManager(GrProxyProvider*, GrStrikeCache*,
+ size_t maxTextureBytes, GrDrawOpAtlas::AllowMultitexturing);
+ ~GrAtlasManager() override;
+
+ // Change an expected 565 mask format to 8888 if 565 is not supported (will happen when using
+ // Metal on macOS). The actual conversion of the data is handled in get_packed_glyph_image() in
+ // GrStrikeCache.cpp
+ GrMaskFormat resolveMaskFormat(GrMaskFormat format) const {
+ if (kA565_GrMaskFormat == format &&
+ !fProxyProvider->caps()->getDefaultBackendFormat(GrColorType::kBGR_565,
+ GrRenderable::kNo).isValid()) {
+ format = kARGB_GrMaskFormat;
+ }
+ return format;
+ }
+
+ // if getProxies returns nullptr, the client must not try to use other functions on the
+ // GrStrikeCache which use the atlas. This function *must* be called first, before other
+ // functions which use the atlas. Note that we can have proxies available but none active
+ // (i.e., none instantiated).
+ const sk_sp<GrTextureProxy>* getProxies(GrMaskFormat format, unsigned int* numActiveProxies) {
+ format = this->resolveMaskFormat(format);
+ if (this->initAtlas(format)) {
+ *numActiveProxies = this->getAtlas(format)->numActivePages();
+ return this->getAtlas(format)->getProxies();
+ }
+ *numActiveProxies = 0;
+ return nullptr;
+ }
+
+ void freeAll();
+
+ bool hasGlyph(GrGlyph* glyph);
+
+ // To ensure the GrDrawOpAtlas does not evict the Glyph Mask from its texture backing store,
+ // the client must pass in the current op token along with the GrGlyph.
+ // A BulkUseTokenUpdater is used to manage bulk last use token updating in the Atlas.
+ // For convenience, this function will also set the use token for the current glyph if required
+ // NOTE: the bulk uploader is only valid if the subrun has a valid atlasGeneration
+ void addGlyphToBulkAndSetUseToken(GrDrawOpAtlas::BulkUseTokenUpdater*, GrGlyph*,
+ GrDeferredUploadToken);
+
+ void setUseTokenBulk(const GrDrawOpAtlas::BulkUseTokenUpdater& updater,
+ GrDeferredUploadToken token,
+ GrMaskFormat format) {
+ this->getAtlas(format)->setLastUseTokenBulk(updater, token);
+ }
+
+ // add to texture atlas that matches this format
+ GrDrawOpAtlas::ErrorCode addToAtlas(
+ GrResourceProvider*, GrStrikeCache*, GrTextStrike*,
+ GrDrawOpAtlas::AtlasID*, GrDeferredUploadTarget*, GrMaskFormat,
+ int width, int height, const void* image, SkIPoint16* loc);
+
+ // Some clients may wish to verify the integrity of the texture backing store of the
+ // GrDrawOpAtlas. The atlasGeneration returned below is a monotonically increasing number which
+ // changes every time something is removed from the texture backing store.
+ uint64_t atlasGeneration(GrMaskFormat format) const {
+ return this->getAtlas(format)->atlasGeneration();
+ }
+
+ // GrOnFlushCallbackObject overrides
+
+ void preFlush(GrOnFlushResourceProvider* onFlushRP, const uint32_t*, int) override {
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ if (fAtlases[i]) {
+ fAtlases[i]->instantiate(onFlushRP);
+ }
+ }
+ }
+
+ void postFlush(GrDeferredUploadToken startTokenForNextFlush,
+ const uint32_t* opsTaskIDs, int numOpsTaskIDs) override {
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ if (fAtlases[i]) {
+ fAtlases[i]->compact(startTokenForNextFlush);
+ }
+ }
+ }
+
+ // The AtlasGlyph cache always survives freeGpuResources so we want it to remain in the active
+ // OnFlushCallbackObject list
+ bool retainOnFreeGpuResources() override { return true; }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Functions intended debug only
+#ifdef SK_DEBUG
+ void dump(GrContext* context) const;
+#endif
+
+ void setAtlasSizesToMinimum_ForTesting();
+ void setMaxPages_TestingOnly(uint32_t maxPages);
+
+private:
+ bool initAtlas(GrMaskFormat);
+
+ // There is a 1:1 mapping between GrMaskFormats and atlas indices
+ static int MaskFormatToAtlasIndex(GrMaskFormat format) { return static_cast<int>(format); }
+ static GrMaskFormat AtlasIndexToMaskFormat(int idx) { return static_cast<GrMaskFormat>(idx); }
+
+ GrDrawOpAtlas* getAtlas(GrMaskFormat format) const {
+ format = this->resolveMaskFormat(format);
+ int atlasIndex = MaskFormatToAtlasIndex(format);
+ SkASSERT(fAtlases[atlasIndex]);
+ return fAtlases[atlasIndex].get();
+ }
+
+ GrDrawOpAtlas::AllowMultitexturing fAllowMultitexturing;
+ std::unique_ptr<GrDrawOpAtlas> fAtlases[kMaskFormatCount];
+ GrProxyProvider* fProxyProvider;
+ sk_sp<const GrCaps> fCaps;
+ GrStrikeCache* fGlyphCache;
+ GrDrawOpAtlasConfig fAtlasConfig;
+
+ typedef GrOnFlushCallbackObject INHERITED;
+};
+
+#endif // GrAtlasManager_DEFINED
diff --git a/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp
new file mode 100644
index 0000000000..a760d6ef6e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/text/GrDistanceFieldAdjustTable.h"
+
+#include "src/core/SkScalerContext.h"
+
+SkDEBUGCODE(static const int kExpectedDistanceAdjustTableSize = 8;)
+
+SkScalar* build_distance_adjust_table(SkScalar paintGamma, SkScalar deviceGamma) {
+ // This is used for an approximation of the mask gamma hack, used by raster and bitmap
+ // text. The mask gamma hack is based off of guessing what the blend color is going to
+ // be, and adjusting the mask so that when run through the linear blend will
+ // produce the value closest to the desired result. However, in practice this means
+ // that the 'adjusted' mask is just increasing or decreasing the coverage of
+ // the mask depending on what it is thought it will blit against. For black (on
+ // assumed white) this means that coverages are decreased (on a curve). For white (on
+ // assumed black) this means that coverages are increased (on a a curve). At
+ // middle (perceptual) gray (which could be blit against anything) the coverages
+ // remain the same.
+ //
+ // The idea here is that instead of determining the initial (real) coverage and
+ // then adjusting that coverage, we determine an adjusted coverage directly by
+ // essentially manipulating the geometry (in this case, the distance to the glyph
+ // edge). So for black (on assumed white) this thins a bit; for white (on
+ // assumed black) this fake bolds the geometry a bit.
+ //
+ // The distance adjustment is calculated by determining the actual coverage value which
+ // when fed into in the mask gamma table gives us an 'adjusted coverage' value of 0.5. This
+ // actual coverage value (assuming it's between 0 and 1) corresponds to a distance from the
+ // actual edge. So by subtracting this distance adjustment and computing without the
+ // the coverage adjustment we should get 0.5 coverage at the same point.
+ //
+ // This has several implications:
+ // For non-gray lcd smoothed text, each subpixel essentially is using a
+ // slightly different geometry.
+ //
+ // For black (on assumed white) this may not cover some pixels which were
+ // previously covered; however those pixels would have been only slightly
+ // covered and that slight coverage would have been decreased anyway. Also, some pixels
+ // which were previously fully covered may no longer be fully covered.
+ //
+ // For white (on assumed black) this may cover some pixels which weren't
+ // previously covered at all.
+
+ int width, height;
+ size_t size;
+
+#ifdef SK_GAMMA_CONTRAST
+ SkScalar contrast = SK_GAMMA_CONTRAST;
+#else
+ SkScalar contrast = 0.5f;
+#endif
+
+ size = SkScalerContext::GetGammaLUTSize(contrast, paintGamma, deviceGamma,
+ &width, &height);
+
+ SkASSERT(kExpectedDistanceAdjustTableSize == height);
+ SkScalar* table = new SkScalar[height];
+
+ SkAutoTArray<uint8_t> data((int)size);
+ if (!SkScalerContext::GetGammaLUTData(contrast, paintGamma, deviceGamma, data.get())) {
+ // if no valid data is available simply do no adjustment
+ for (int row = 0; row < height; ++row) {
+ table[row] = 0;
+ }
+ return table;
+ }
+
+ // find the inverse points where we cross 0.5
+ // binsearch might be better, but we only need to do this once on creation
+ for (int row = 0; row < height; ++row) {
+ uint8_t* rowPtr = data.get() + row*width;
+ for (int col = 0; col < width - 1; ++col) {
+ if (rowPtr[col] <= 127 && rowPtr[col + 1] >= 128) {
+ // compute point where a mask value will give us a result of 0.5
+ float interp = (127.5f - rowPtr[col]) / (rowPtr[col + 1] - rowPtr[col]);
+ float borderAlpha = (col + interp) / 255.f;
+
+ // compute t value for that alpha
+ // this is an approximate inverse for smoothstep()
+ float t = borderAlpha*(borderAlpha*(4.0f*borderAlpha - 6.0f) + 5.0f) / 3.0f;
+
+ // compute distance which gives us that t value
+ const float kDistanceFieldAAFactor = 0.65f; // should match SK_DistanceFieldAAFactor
+ float d = 2.0f*kDistanceFieldAAFactor*t - kDistanceFieldAAFactor;
+
+ table[row] = d;
+ break;
+ }
+ }
+ }
+
+ return table;
+}
+
+void GrDistanceFieldAdjustTable::buildDistanceAdjustTables() {
+ fTable = build_distance_adjust_table(SK_GAMMA_EXPONENT, SK_GAMMA_EXPONENT);
+ fGammaCorrectTable = build_distance_adjust_table(SK_Scalar1, SK_Scalar1);
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h
new file mode 100644
index 0000000000..3b217b9780
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDistanceFieldAdjustTable_DEFINED
+#define GrDistanceFieldAdjustTable_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+// Distance field text needs this table to compute a value for use in the fragment shader.
+// Because the GrTextContext can go out of scope before the final flush, this needs to be
+// refcnted and malloced
+struct GrDistanceFieldAdjustTable : public SkNVRefCnt<GrDistanceFieldAdjustTable> {
+ GrDistanceFieldAdjustTable() { this->buildDistanceAdjustTables(); }
+ ~GrDistanceFieldAdjustTable() {
+ delete[] fTable;
+ delete[] fGammaCorrectTable;
+ }
+
+ const SkScalar& getAdjustment(int i, bool useGammaCorrectTable) const {
+ return useGammaCorrectTable ? fGammaCorrectTable[i] : fTable[i];
+ }
+
+private:
+ void buildDistanceAdjustTables();
+
+ SkScalar* fTable;
+ SkScalar* fGammaCorrectTable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.cpp b/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.cpp
new file mode 100644
index 0000000000..7bcaed3fac
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeMath.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/gpu/text/GrSDFMaskFilter.h"
+
+class GrSDFMaskFilterImpl : public SkMaskFilterBase {
+public:
+ GrSDFMaskFilterImpl();
+
+ // overrides from SkMaskFilterBase
+ // This method is not exported to java.
+ SkMask::Format getFormat() const override;
+ // This method is not exported to java.
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+ void computeFastBounds(const SkRect&, SkRect*) const override;
+
+protected:
+
+private:
+ SK_FLATTENABLE_HOOKS(GrSDFMaskFilterImpl)
+
+ typedef SkMaskFilter INHERITED;
+ friend void gr_register_sdf_maskfilter_createproc();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrSDFMaskFilterImpl::GrSDFMaskFilterImpl() {}
+
+SkMask::Format GrSDFMaskFilterImpl::getFormat() const {
+ return SkMask::kSDF_Format;
+}
+
+bool GrSDFMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format
+ && src.fFormat != SkMask::kBW_Format
+ && src.fFormat != SkMask::kLCD16_Format) {
+ return false;
+ }
+
+ *dst = SkMask::PrepareDestination(SK_DistanceFieldPad, SK_DistanceFieldPad, src);
+ dst->fFormat = SkMask::kSDF_Format;
+
+ if (margin) {
+ margin->set(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ }
+
+ if (src.fImage == nullptr) {
+ return true;
+ }
+ if (dst->fImage == nullptr) {
+ dst->fBounds.setEmpty();
+ return false;
+ }
+
+ if (src.fFormat == SkMask::kA8_Format) {
+ return SkGenerateDistanceFieldFromA8Image(dst->fImage, src.fImage,
+ src.fBounds.width(), src.fBounds.height(),
+ src.fRowBytes);
+ } else if (src.fFormat == SkMask::kLCD16_Format) {
+ return SkGenerateDistanceFieldFromLCD16Mask(dst->fImage, src.fImage,
+ src.fBounds.width(), src.fBounds.height(),
+ src.fRowBytes);
+ } else {
+ return SkGenerateDistanceFieldFromBWImage(dst->fImage, src.fImage,
+ src.fBounds.width(), src.fBounds.height(),
+ src.fRowBytes);
+ }
+}
+
+void GrSDFMaskFilterImpl::computeFastBounds(const SkRect& src,
+ SkRect* dst) const {
+ dst->setLTRB(src.fLeft - SK_DistanceFieldPad, src.fTop - SK_DistanceFieldPad,
+ src.fRight + SK_DistanceFieldPad, src.fBottom + SK_DistanceFieldPad);
+}
+
+sk_sp<SkFlattenable> GrSDFMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ return GrSDFMaskFilter::Make();
+}
+
+void gr_register_sdf_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(GrSDFMaskFilterImpl); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkMaskFilter> GrSDFMaskFilter::Make() {
+ return sk_sp<SkMaskFilter>(new GrSDFMaskFilterImpl());
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.h b/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.h
new file mode 100644
index 0000000000..6dfbaf062b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrSDFMaskFilter.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSDFMaskFilter_DEFINED
+#define GrSDFMaskFilter_DEFINED
+
+#include "include/core/SkMaskFilter.h"
+
+/** \class GrSDFMaskFilter
+
+ This mask filter converts an alpha mask to a signed distance field representation
+*/
+class GrSDFMaskFilter : public SkMaskFilter {
+public:
+ static sk_sp<SkMaskFilter> Make();
+};
+
+extern void gr_register_sdf_maskfilter_createproc();
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrStrikeCache.cpp b/gfx/skia/skia/src/gpu/text/GrStrikeCache.cpp
new file mode 100644
index 0000000000..f445c65986
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrStrikeCache.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrDistanceFieldGenFromVector.h"
+#include "src/gpu/text/GrAtlasManager.h"
+#include "src/gpu/text/GrStrikeCache.h"
+
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkDistanceFieldGen.h"
+
+GrStrikeCache::GrStrikeCache(const GrCaps* caps, size_t maxTextureBytes)
+ : fPreserveStrike(nullptr)
+ , f565Masks(SkMasks::CreateMasks({0xF800, 0x07E0, 0x001F, 0},
+ GrMaskFormatBytesPerPixel(kA565_GrMaskFormat))) { }
+
+GrStrikeCache::~GrStrikeCache() {
+ StrikeHash::Iter iter(&fCache);
+ while (!iter.done()) {
+ (*iter).fIsAbandoned = true;
+ (*iter).unref();
+ ++iter;
+ }
+}
+
+void GrStrikeCache::freeAll() {
+ StrikeHash::Iter iter(&fCache);
+ while (!iter.done()) {
+ (*iter).fIsAbandoned = true;
+ (*iter).unref();
+ ++iter;
+ }
+ fCache.rewind();
+}
+
+void GrStrikeCache::HandleEviction(GrDrawOpAtlas::AtlasID id, void* ptr) {
+ GrStrikeCache* grStrikeCache = reinterpret_cast<GrStrikeCache*>(ptr);
+
+ StrikeHash::Iter iter(&grStrikeCache->fCache);
+ for (; !iter.done(); ++iter) {
+ GrTextStrike* strike = &*iter;
+ strike->removeID(id);
+
+ // clear out any empty strikes. We will preserve the strike whose call to addToAtlas
+ // triggered the eviction
+ if (strike != grStrikeCache->fPreserveStrike && 0 == strike->fAtlasedGlyphs) {
+ grStrikeCache->fCache.remove(GrTextStrike::GetKey(*strike));
+ strike->fIsAbandoned = true;
+ strike->unref();
+ }
+ }
+}
+
+// expands each bit in a bitmask to 0 or ~0 of type INT_TYPE. Used to expand a BW glyph mask to
+// A8, RGB565, or RGBA8888.
+template <typename INT_TYPE>
+static void expand_bits(INT_TYPE* dst,
+ const uint8_t* src,
+ int width,
+ int height,
+ int dstRowBytes,
+ int srcRowBytes) {
+ for (int i = 0; i < height; ++i) {
+ int rowWritesLeft = width;
+ const uint8_t* s = src;
+ INT_TYPE* d = dst;
+ while (rowWritesLeft > 0) {
+ unsigned mask = *s++;
+ for (int i = 7; i >= 0 && rowWritesLeft; --i, --rowWritesLeft) {
+ *d++ = (mask & (1 << i)) ? (INT_TYPE)(~0UL) : 0;
+ }
+ }
+ dst = reinterpret_cast<INT_TYPE*>(reinterpret_cast<intptr_t>(dst) + dstRowBytes);
+ src += srcRowBytes;
+ }
+}
+
+static bool get_packed_glyph_image(SkStrike* cache, SkGlyph* glyph, int width,
+ int height, int dstRB, GrMaskFormat expectedMaskFormat,
+ void* dst, const SkMasks& masks) {
+ SkASSERT(glyph->width() == width);
+ SkASSERT(glyph->height() == height);
+ const void* src = cache->prepareImage(glyph);
+ if (src == nullptr) {
+ return false;
+ }
+
+ // Convert if the glyph uses a 565 mask format since it is using LCD text rendering but the
+ // expected format is 8888 (will happen on macOS with Metal since that combination does not
+ // support 565).
+ if (kA565_GrMaskFormat == GrGlyph::FormatFromSkGlyph(glyph->maskFormat()) &&
+ kARGB_GrMaskFormat == expectedMaskFormat) {
+ const int a565Bpp = GrMaskFormatBytesPerPixel(kA565_GrMaskFormat);
+ const int argbBpp = GrMaskFormatBytesPerPixel(kARGB_GrMaskFormat);
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ uint16_t color565 = 0;
+ memcpy(&color565, src, a565Bpp);
+ uint32_t colorRGBA = GrColorPackRGBA(masks.getRed(color565),
+ masks.getGreen(color565),
+ masks.getBlue(color565),
+ 0xFF);
+ memcpy(dst, &colorRGBA, argbBpp);
+ src = (char*)src + a565Bpp;
+ dst = (char*)dst + argbBpp;
+ }
+ }
+ return true;
+ }
+
+ // crbug:510931
+ // Retrieving the image from the cache can actually change the mask format. This case is very
+ // uncommon so for now we just draw a clear box for these glyphs.
+ if (GrGlyph::FormatFromSkGlyph(glyph->maskFormat()) != expectedMaskFormat) {
+ const int bpp = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+ for (int y = 0; y < height; y++) {
+ sk_bzero(dst, width * bpp);
+ dst = (char*)dst + dstRB;
+ }
+ return true;
+ }
+
+ int srcRB = glyph->rowBytes();
+ // The windows font host sometimes has BW glyphs in a non-BW strike. So it is important here to
+ // check the glyph's format, not the strike's format, and to be able to convert to any of the
+ // GrMaskFormats.
+ if (glyph->maskFormat() == SkMask::kBW_Format) {
+ // expand bits to our mask type
+ const uint8_t* bits = reinterpret_cast<const uint8_t*>(src);
+ switch (expectedMaskFormat) {
+ case kA8_GrMaskFormat:{
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(dst);
+ expand_bits(bytes, bits, width, height, dstRB, srcRB);
+ break;
+ }
+ case kA565_GrMaskFormat: {
+ uint16_t* rgb565 = reinterpret_cast<uint16_t*>(dst);
+ expand_bits(rgb565, bits, width, height, dstRB, srcRB);
+ break;
+ }
+ default:
+ SK_ABORT("Invalid GrMaskFormat");
+ }
+ } else if (srcRB == dstRB) {
+ memcpy(dst, src, dstRB * height);
+ } else {
+ const int bbp = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, width * bbp);
+ src = (const char*)src + srcRB;
+ dst = (char*)dst + dstRB;
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ The text strike is specific to a given font/style/matrix setup, which is
+ represented by the GrHostFontScaler object we are given in getGlyph().
+
+ We map a 32bit glyphID to a GrGlyph record, which in turn points to a
+ atlas and a position within that texture.
+ */
+
+GrTextStrike::GrTextStrike(const SkDescriptor& key)
+ : fFontScalerKey(key) {}
+
+void GrTextStrike::removeID(GrDrawOpAtlas::AtlasID id) {
+ SkTDynamicHash<GrGlyph, SkPackedGlyphID>::Iter iter(&fCache);
+ while (!iter.done()) {
+ if (id == (*iter).fID) {
+ (*iter).fID = GrDrawOpAtlas::kInvalidAtlasID;
+ fAtlasedGlyphs--;
+ SkASSERT(fAtlasedGlyphs >= 0);
+ }
+ ++iter;
+ }
+}
+
+GrDrawOpAtlas::ErrorCode GrTextStrike::addGlyphToAtlas(
+ GrResourceProvider* resourceProvider,
+ GrDeferredUploadTarget* target,
+ GrStrikeCache* glyphCache,
+ GrAtlasManager* fullAtlasManager,
+ GrGlyph* glyph,
+ SkStrike* skStrikeCache,
+ GrMaskFormat expectedMaskFormat,
+ bool isScaledGlyph) {
+ SkASSERT(glyph);
+ SkASSERT(skStrikeCache);
+ SkASSERT(fCache.find(glyph->fPackedID));
+
+ expectedMaskFormat = fullAtlasManager->resolveMaskFormat(expectedMaskFormat);
+ int bytesPerPixel = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+ int width = glyph->width();
+ int height = glyph->height();
+ int rowBytes = width * bytesPerPixel;
+
+ size_t size = glyph->fBounds.area() * bytesPerPixel;
+ bool isSDFGlyph = GrGlyph::kDistance_MaskStyle == glyph->maskStyle();
+ bool addPad = isScaledGlyph && !isSDFGlyph;
+ if (addPad) {
+ width += 2;
+ rowBytes += 2*bytesPerPixel;
+ size += 2 * rowBytes;
+ height += 2;
+ size += 2 * (height + 2) * bytesPerPixel;
+ }
+ SkAutoSMalloc<1024> storage(size);
+
+ SkGlyph* skGlyph = skStrikeCache->glyph(glyph->fPackedID);
+ void* dataPtr = storage.get();
+ if (addPad) {
+ sk_bzero(dataPtr, size);
+ dataPtr = (char*)(dataPtr) + rowBytes + bytesPerPixel;
+ }
+ if (!get_packed_glyph_image(skStrikeCache, skGlyph, glyph->width(), glyph->height(),
+ rowBytes, expectedMaskFormat,
+ dataPtr, glyphCache->getMasks())) {
+ return GrDrawOpAtlas::ErrorCode::kError;
+ }
+
+ GrDrawOpAtlas::ErrorCode result = fullAtlasManager->addToAtlas(
+ resourceProvider, glyphCache, this,
+ &glyph->fID, target, expectedMaskFormat,
+ width, height,
+ storage.get(), &glyph->fAtlasLocation);
+ if (GrDrawOpAtlas::ErrorCode::kSucceeded == result) {
+ if (addPad) {
+ glyph->fAtlasLocation.fX += 1;
+ glyph->fAtlasLocation.fY += 1;
+ }
+ SkASSERT(GrDrawOpAtlas::kInvalidAtlasID != glyph->fID);
+ fAtlasedGlyphs++;
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrStrikeCache.h b/gfx/skia/skia/src/gpu/text/GrStrikeCache.h
new file mode 100644
index 0000000000..365e0a7868
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrStrikeCache.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStrikeCache_DEFINED
+#define GrStrikeCache_DEFINED
+
+#include "src/codec/SkMasks.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/GrGlyph.h"
+
+class GrAtlasManager;
+class GrGpu;
+class GrStrikeCache;
+
+/**
+ * The GrTextStrike manages a pool of CPU backing memory for GrGlyphs. This backing memory
+ * is indexed by a PackedID and SkStrike. The SkStrike is what actually creates the mask.
+ * The GrTextStrike may outlive the generating SkStrike. However, it retains a copy
+ * of it's SkDescriptor as a key to access (or regenerate) the SkStrike. GrTextStrikes are
+ * created by and owned by a GrStrikeCache.
+ */
+class GrTextStrike : public SkNVRefCnt<GrTextStrike> {
+public:
+ GrTextStrike(const SkDescriptor& fontScalerKey);
+
+ GrGlyph* getGlyph(const SkGlyph& skGlyph) {
+ GrGlyph* grGlyph = fCache.find(skGlyph.getPackedID());
+ if (grGlyph == nullptr) {
+ grGlyph = fAlloc.make<GrGlyph>(skGlyph);
+ fCache.add(grGlyph);
+ }
+ return grGlyph;
+ }
+
+ // This variant of the above function is called by GrAtlasTextOp. At this point, it is possible
+ // that the maskformat of the glyph differs from what we expect. In these cases we will just
+ // draw a clear square.
+ // skbug:4143 crbug:510931
+ GrGlyph* getGlyph(SkPackedGlyphID packed, SkStrike* skStrike) {
+ GrGlyph* grGlyph = fCache.find(packed);
+ if (grGlyph == nullptr) {
+ // We could return this to the caller, but in practice it adds code complexity for
+ // potentially little benefit(ie, if the glyph is not in our font cache, then its not
+ // in the atlas and we're going to be doing a texture upload anyways).
+ grGlyph = fAlloc.make<GrGlyph>(*skStrike->glyph(packed));
+ fCache.add(grGlyph);
+ }
+ return grGlyph;
+ }
+
+ // returns true if glyph successfully added to texture atlas, false otherwise. If the glyph's
+ // mask format has changed, then addGlyphToAtlas will draw a clear box. This will almost never
+ // happen.
+ // TODO we can handle some of these cases if we really want to, but the long term solution is to
+ // get the actual glyph image itself when we get the glyph metrics.
+ GrDrawOpAtlas::ErrorCode addGlyphToAtlas(GrResourceProvider*, GrDeferredUploadTarget*,
+ GrStrikeCache*, GrAtlasManager*, GrGlyph*,
+ SkStrike*, GrMaskFormat expectedMaskFormat,
+ bool isScaledGlyph);
+
+ // testing
+ int countGlyphs() const { return fCache.count(); }
+
+ // remove any references to this plot
+ void removeID(GrDrawOpAtlas::AtlasID);
+
+ // If a TextStrike is abandoned by the cache, then the caller must get a new strike
+ bool isAbandoned() const { return fIsAbandoned; }
+
+ static const SkDescriptor& GetKey(const GrTextStrike& strike) {
+ return *strike.fFontScalerKey.getDesc();
+ }
+
+ static uint32_t Hash(const SkDescriptor& desc) { return desc.getChecksum(); }
+
+private:
+ SkTDynamicHash<GrGlyph, SkPackedGlyphID> fCache;
+ SkAutoDescriptor fFontScalerKey;
+ SkArenaAlloc fAlloc{512};
+
+ int fAtlasedGlyphs{0};
+ bool fIsAbandoned{false};
+
+ friend class GrStrikeCache;
+};
+
+/**
+ * GrStrikeCache manages strikes which are indexed by a SkStrike. These strikes can then be
+ * used to generate individual Glyph Masks.
+ */
+class GrStrikeCache {
+public:
+ GrStrikeCache(const GrCaps* caps, size_t maxTextureBytes);
+ ~GrStrikeCache();
+
+ void setStrikeToPreserve(GrTextStrike* strike) { fPreserveStrike = strike; }
+
+ // The user of the cache may hold a long-lived ref to the returned strike. However, actions by
+ // another client of the cache may cause the strike to be purged while it is still reffed.
+ // Therefore, the caller must check GrTextStrike::isAbandoned() if there are other
+ // interactions with the cache since the strike was received.
+ sk_sp<GrTextStrike> getStrike(const SkDescriptor& desc) {
+ sk_sp<GrTextStrike> strike = sk_ref_sp(fCache.find(desc));
+ if (!strike) {
+ strike = this->generateStrike(desc);
+ }
+ return strike;
+ }
+
+ const SkMasks& getMasks() const { return *f565Masks; }
+
+ void freeAll();
+
+ static void HandleEviction(GrDrawOpAtlas::AtlasID, void*);
+
+private:
+ sk_sp<GrTextStrike> generateStrike(const SkDescriptor& desc) {
+ // 'fCache' get the construction ref
+ sk_sp<GrTextStrike> strike = sk_ref_sp(new GrTextStrike(desc));
+ fCache.add(strike.get());
+ return strike;
+ }
+
+ using StrikeHash = SkTDynamicHash<GrTextStrike, SkDescriptor>;
+
+ StrikeHash fCache;
+ GrTextStrike* fPreserveStrike;
+ std::unique_ptr<const SkMasks> f565Masks;
+};
+
+#endif // GrStrikeCache_DEFINED
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlob.cpp b/gfx/skia/skia/src/gpu/text/GrTextBlob.cpp
new file mode 100644
index 0000000000..095c6fa531
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlob.cpp
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/gpu/GrContext.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/gpu/GrBlurUtils.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrStyle.h"
+#include "src/gpu/geometry/GrShape.h"
+#include "src/gpu/ops/GrAtlasTextOp.h"
+#include "src/gpu/text/GrTextBlob.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+#include <new>
+
+template <size_t N> static size_t sk_align(size_t s) {
+ return ((s + (N-1)) / N) * N;
+}
+
+sk_sp<GrTextBlob> GrTextBlob::Make(int glyphCount,
+ int runCount,
+ GrColor color,
+ GrStrikeCache* strikeCache) {
+ // We allocate size for the GrTextBlob itself, plus size for the vertices array,
+ // and size for the glyphIds array.
+ size_t verticesCount = glyphCount * kVerticesPerGlyph * kMaxVASize;
+
+ size_t blobStart = 0;
+ size_t vertex = sk_align<alignof(char)> (blobStart + sizeof(GrTextBlob) * 1);
+ size_t glyphs = sk_align<alignof(GrGlyph*)> (vertex + sizeof(char) * verticesCount);
+ size_t runs = sk_align<alignof(GrTextBlob::Run)>(glyphs + sizeof(GrGlyph*) * glyphCount);
+ size_t size = (runs + sizeof(GrTextBlob::Run) * runCount);
+
+ void* allocation = ::operator new (size);
+
+ if (CACHE_SANITY_CHECK) {
+ sk_bzero(allocation, size);
+ }
+
+ sk_sp<GrTextBlob> blob{new (allocation) GrTextBlob{strikeCache}};
+ blob->fSize = size;
+
+ // setup offsets for vertices / glyphs
+ blob->fVertices = SkTAddOffset<char>(blob.get(), vertex);
+ blob->fGlyphs = SkTAddOffset<GrGlyph*>(blob.get(), glyphs);
+ blob->fRuns = SkTAddOffset<GrTextBlob::Run>(blob.get(), runs);
+
+ // Initialize runs
+ for (int i = 0; i < runCount; i++) {
+ new (&blob->fRuns[i]) GrTextBlob::Run{blob.get(), color};
+ }
+ blob->fRunCountLimit = runCount;
+ return blob;
+}
+
+void GrTextBlob::Run::setupFont(const SkStrikeSpec& strikeSpec) {
+
+ if (fFallbackStrikeSpec != nullptr) {
+ *fFallbackStrikeSpec = strikeSpec;
+ } else {
+ fStrikeSpec = strikeSpec;
+ }
+}
+
+void GrTextBlob::Run::appendPathGlyph(const SkPath& path, SkPoint position,
+ SkScalar scale, bool preTransformed) {
+ fPathGlyphs.push_back(PathGlyph(path, position.x(), position.y(), scale, preTransformed));
+}
+
+bool GrTextBlob::mustRegenerate(const SkPaint& paint, bool anyRunHasSubpixelPosition,
+ const SkMaskFilterBase::BlurRec& blurRec,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ // If we have LCD text then our canonical color will be set to transparent, in this case we have
+ // to regenerate the blob on any color change
+ // We use the grPaint to get any color filter effects
+ if (fKey.fCanonicalColor == SK_ColorTRANSPARENT &&
+ fLuminanceColor != SkPaintPriv::ComputeLuminanceColor(paint)) {
+ return true;
+ }
+
+ if (fInitialViewMatrix.hasPerspective() != viewMatrix.hasPerspective()) {
+ return true;
+ }
+
+ /** This could be relaxed for blobs with only distance field glyphs. */
+ if (fInitialViewMatrix.hasPerspective() && !fInitialViewMatrix.cheapEqualTo(viewMatrix)) {
+ return true;
+ }
+
+ // We only cache one masked version
+ if (fKey.fHasBlur &&
+ (fBlurRec.fSigma != blurRec.fSigma || fBlurRec.fStyle != blurRec.fStyle)) {
+ return true;
+ }
+
+ // Similarly, we only cache one version for each style
+ if (fKey.fStyle != SkPaint::kFill_Style &&
+ (fStrokeInfo.fFrameWidth != paint.getStrokeWidth() ||
+ fStrokeInfo.fMiterLimit != paint.getStrokeMiter() ||
+ fStrokeInfo.fJoin != paint.getStrokeJoin())) {
+ return true;
+ }
+
+ // Mixed blobs must be regenerated. We could probably figure out a way to do integer scrolls
+ // for mixed blobs if this becomes an issue.
+ if (this->hasBitmap() && this->hasDistanceField()) {
+ // Identical viewmatrices and we can reuse in all cases
+ if (fInitialViewMatrix.cheapEqualTo(viewMatrix) && x == fInitialX && y == fInitialY) {
+ return false;
+ }
+ return true;
+ }
+
+ if (this->hasBitmap()) {
+ if (fInitialViewMatrix.getScaleX() != viewMatrix.getScaleX() ||
+ fInitialViewMatrix.getScaleY() != viewMatrix.getScaleY() ||
+ fInitialViewMatrix.getSkewX() != viewMatrix.getSkewX() ||
+ fInitialViewMatrix.getSkewY() != viewMatrix.getSkewY()) {
+ return true;
+ }
+
+ // TODO(herb): this is not needed for full pixel glyph choice, but is needed to adjust
+ // the quads properly. Devise a system that regenerates the quads from original data
+ // using the transform to allow this to be used in general.
+
+ // We can update the positions in the text blob without regenerating the whole
+ // blob, but only for integer translations.
+ // This cool bit of math will determine the necessary translation to apply to the
+ // already generated vertex coordinates to move them to the correct position.
+ // Figure out the translation in view space given a translation in source space.
+ SkScalar transX = viewMatrix.getTranslateX() +
+ viewMatrix.getScaleX() * (x - fInitialX) +
+ viewMatrix.getSkewX() * (y - fInitialY) -
+ fInitialViewMatrix.getTranslateX();
+ SkScalar transY = viewMatrix.getTranslateY() +
+ viewMatrix.getSkewY() * (x - fInitialX) +
+ viewMatrix.getScaleY() * (y - fInitialY) -
+ fInitialViewMatrix.getTranslateY();
+ if (!SkScalarIsInt(transX) || !SkScalarIsInt(transY)) {
+ return true;
+ }
+ } else if (this->hasDistanceField()) {
+ // A scale outside of [blob.fMaxMinScale, blob.fMinMaxScale] would result in a different
+ // distance field being generated, so we have to regenerate in those cases
+ SkScalar newMaxScale = viewMatrix.getMaxScale();
+ SkScalar oldMaxScale = fInitialViewMatrix.getMaxScale();
+ SkScalar scaleAdjust = newMaxScale / oldMaxScale;
+ if (scaleAdjust < fMaxMinScale || scaleAdjust > fMinMaxScale) {
+ return true;
+ }
+ }
+
+ // It is possible that a blob has neither distanceField nor bitmaptext. This is in the case
+ // when all of the runs inside the blob are drawn as paths. In this case, we always regenerate
+ // the blob anyways at flush time, so no need to regenerate explicitly
+ return false;
+}
+
+inline std::unique_ptr<GrAtlasTextOp> GrTextBlob::makeOp(
+ const SubRun& info, int glyphCount, uint16_t run, uint16_t subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y, const SkIRect& clipRect,
+ const SkPaint& paint, const SkPMColor4f& filteredColor, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable, GrTextTarget* target) {
+ GrMaskFormat format = info.maskFormat();
+
+ GrPaint grPaint;
+ target->makeGrPaint(info.maskFormat(), paint, viewMatrix, &grPaint);
+ std::unique_ptr<GrAtlasTextOp> op;
+ if (info.drawAsDistanceFields()) {
+ // TODO: Can we be even smarter based on the dest transfer function?
+ op = GrAtlasTextOp::MakeDistanceField(
+ target->getContext(), std::move(grPaint), glyphCount, distanceAdjustTable,
+ target->colorInfo().isLinearlyBlended(), SkPaintPriv::ComputeLuminanceColor(paint),
+ props, info.isAntiAliased(), info.hasUseLCDText());
+ } else {
+ op = GrAtlasTextOp::MakeBitmap(target->getContext(), std::move(grPaint), format, glyphCount,
+ info.needsTransform());
+ }
+ GrAtlasTextOp::Geometry& geometry = op->geometry();
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fClipRect = clipRect;
+ geometry.fBlob = SkRef(this);
+ geometry.fRun = run;
+ geometry.fSubRun = subRun;
+ geometry.fColor = info.maskFormat() == kARGB_GrMaskFormat ? SK_PMColor4fWHITE : filteredColor;
+ geometry.fX = x;
+ geometry.fY = y;
+ op->init();
+ return op;
+}
+
+static void calculate_translation(bool applyVM,
+ const SkMatrix& newViewMatrix, SkScalar newX, SkScalar newY,
+ const SkMatrix& currentViewMatrix, SkScalar currentX,
+ SkScalar currentY, SkScalar* transX, SkScalar* transY) {
+ if (applyVM) {
+ *transX = newViewMatrix.getTranslateX() +
+ newViewMatrix.getScaleX() * (newX - currentX) +
+ newViewMatrix.getSkewX() * (newY - currentY) -
+ currentViewMatrix.getTranslateX();
+
+ *transY = newViewMatrix.getTranslateY() +
+ newViewMatrix.getSkewY() * (newX - currentX) +
+ newViewMatrix.getScaleY() * (newY - currentY) -
+ currentViewMatrix.getTranslateY();
+ } else {
+ *transX = newX - currentX;
+ *transY = newY - currentY;
+ }
+}
+
+void GrTextBlob::flush(GrTextTarget* target, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& paint, const SkPMColor4f& filteredColor, const GrClip& clip,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+
+ // GrTextBlob::makeOp only takes uint16_t values for run and subRun indices.
+ // Encountering something larger than this is highly unlikely, so we'll just not draw it.
+ int lastRun = SkTMin(fRunCountLimit, (1 << 16)) - 1;
+ // For each run in the GrTextBlob we're going to churn through all the glyphs.
+ // Each run is broken into a path part and a Mask / DFT / ARGB part.
+ for (int runIndex = 0; runIndex <= lastRun; runIndex++) {
+
+ Run& run = fRuns[runIndex];
+
+ // first flush any path glyphs
+ if (run.fPathGlyphs.count()) {
+ SkPaint runPaint{paint};
+ runPaint.setAntiAlias(run.fAntiAlias);
+
+ for (int i = 0; i < run.fPathGlyphs.count(); i++) {
+ GrTextBlob::Run::PathGlyph& pathGlyph = run.fPathGlyphs[i];
+
+ SkMatrix ctm;
+ const SkPath* path = &pathGlyph.fPath;
+
+ // TmpPath must be in the same scope as GrShape shape below.
+ SkTLazy<SkPath> tmpPath;
+
+ // The glyph positions and glyph outlines are either in device space or in source
+ // space based on fPreTransformed.
+ if (!pathGlyph.fPreTransformed) {
+ // Positions and outlines are in source space.
+
+ ctm = viewMatrix;
+
+ SkMatrix pathMatrix = SkMatrix::MakeScale(pathGlyph.fScale, pathGlyph.fScale);
+
+ // The origin for the blob may have changed, so figure out the delta.
+ SkVector originShift = SkPoint{x, y} - SkPoint{fInitialX, fInitialY};
+
+ // Shift the original glyph location in source space to the position of the new
+ // blob.
+ pathMatrix.postTranslate(originShift.x() + pathGlyph.fX,
+ originShift.y() + pathGlyph.fY);
+
+ // If there are shaders, blurs or styles, the path must be scaled into source
+ // space independently of the CTM. This allows the CTM to be correct for the
+ // different effects.
+ GrStyle style(runPaint);
+ bool scalePath = runPaint.getShader()
+ || style.applies()
+ || runPaint.getMaskFilter();
+ if (!scalePath) {
+ // Scale can be applied to CTM -- no effects.
+
+ ctm.preConcat(pathMatrix);
+ } else {
+ // Scale the outline into source space.
+
+ // Transform the path form the normalized outline to source space. This
+ // way the CTM will remain the same so it can be used by the effects.
+ SkPath* sourceOutline = tmpPath.init();
+ path->transform(pathMatrix, sourceOutline);
+ sourceOutline->setIsVolatile(true);
+ path = sourceOutline;
+ }
+
+
+ } else {
+ // Positions and outlines are in device space.
+
+ SkPoint originalOrigin = {fInitialX, fInitialY};
+ fInitialViewMatrix.mapPoints(&originalOrigin, 1);
+
+ SkPoint newOrigin = {x, y};
+ viewMatrix.mapPoints(&newOrigin, 1);
+
+ // The origin shift in device space.
+ SkPoint originShift = newOrigin - originalOrigin;
+
+ // Shift the original glyph location in device space to the position of the
+ // new blob.
+ ctm = SkMatrix::MakeTrans(originShift.x() + pathGlyph.fX,
+ originShift.y() + pathGlyph.fY);
+ }
+
+ // TODO: we are losing the mutability of the path here
+ GrShape shape(*path, paint);
+
+ target->drawShape(clip, runPaint, ctm, shape);
+ }
+ }
+
+ // then flush each subrun, if any
+ if (!run.fInitialized) {
+ continue;
+ }
+
+ int lastSubRun = SkTMin(run.fSubRunInfo.count(), 1 << 16) - 1;
+ for (int subRun = 0; subRun <= lastSubRun; subRun++) {
+ const SubRun& info = run.fSubRunInfo[subRun];
+ int glyphCount = info.glyphCount();
+ if (0 == glyphCount) {
+ continue;
+ }
+
+ bool skipClip = false;
+ bool submitOp = true;
+ SkIRect clipRect = SkIRect::MakeEmpty();
+ SkRect rtBounds = SkRect::MakeWH(target->width(), target->height());
+ SkRRect clipRRect;
+ GrAA aa;
+ // We can clip geometrically if we're not using SDFs or transformed glyphs,
+ // and we have an axis-aligned rectangular non-AA clip
+ if (!info.drawAsDistanceFields() && !info.needsTransform() &&
+ clip.isRRect(rtBounds, &clipRRect, &aa) &&
+ clipRRect.isRect() && GrAA::kNo == aa) {
+ skipClip = true;
+ // We only need to do clipping work if the subrun isn't contained by the clip
+ SkRect subRunBounds;
+ this->computeSubRunBounds(&subRunBounds, runIndex, subRun, viewMatrix, x, y,
+ false);
+ if (!clipRRect.getBounds().contains(subRunBounds)) {
+ // If the subrun is completely outside, don't add an op for it
+ if (!clipRRect.getBounds().intersects(subRunBounds)) {
+ submitOp = false;
+ }
+ else {
+ clipRRect.getBounds().round(&clipRect);
+ }
+ }
+ }
+
+ if (submitOp) {
+ auto op = this->makeOp(info, glyphCount, runIndex, subRun, viewMatrix, x, y,
+ clipRect, paint, filteredColor, props, distanceAdjustTable,
+ target);
+ if (op) {
+ if (skipClip) {
+ target->addDrawOp(GrNoClip(), std::move(op));
+ }
+ else {
+ target->addDrawOp(clip, std::move(op));
+ }
+ }
+ }
+ }
+
+ }
+}
+
+std::unique_ptr<GrDrawOp> GrTextBlob::test_makeOp(
+ int glyphCount, uint16_t run, uint16_t subRun, const SkMatrix& viewMatrix,
+ SkScalar x, SkScalar y, const SkPaint& paint, const SkPMColor4f& filteredColor,
+ const SkSurfaceProps& props, const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ GrTextTarget* target) {
+ const GrTextBlob::SubRun& info = fRuns[run].fSubRunInfo[subRun];
+ SkIRect emptyRect = SkIRect::MakeEmpty();
+ return this->makeOp(info, glyphCount, run, subRun, viewMatrix, x, y, emptyRect,
+ paint, filteredColor, props, distanceAdjustTable, target);
+}
+
+void GrTextBlob::AssertEqual(const GrTextBlob& l, const GrTextBlob& r) {
+ SkASSERT_RELEASE(l.fSize == r.fSize);
+
+ SkASSERT_RELEASE(l.fBlurRec.fSigma == r.fBlurRec.fSigma);
+ SkASSERT_RELEASE(l.fBlurRec.fStyle == r.fBlurRec.fStyle);
+
+ SkASSERT_RELEASE(l.fStrokeInfo.fFrameWidth == r.fStrokeInfo.fFrameWidth);
+ SkASSERT_RELEASE(l.fStrokeInfo.fMiterLimit == r.fStrokeInfo.fMiterLimit);
+ SkASSERT_RELEASE(l.fStrokeInfo.fJoin == r.fStrokeInfo.fJoin);
+
+ SkASSERT_RELEASE(l.fKey == r.fKey);
+ //SkASSERT_RELEASE(l.fPaintColor == r.fPaintColor); // Colors might not actually be identical
+ SkASSERT_RELEASE(l.fMaxMinScale == r.fMaxMinScale);
+ SkASSERT_RELEASE(l.fMinMaxScale == r.fMinMaxScale);
+ SkASSERT_RELEASE(l.fTextType == r.fTextType);
+
+ SkASSERT_RELEASE(l.fRunCountLimit == r.fRunCountLimit);
+ for (int i = 0; i < l.fRunCountLimit; i++) {
+ const Run& lRun = l.fRuns[i];
+ const Run& rRun = r.fRuns[i];
+
+ SkASSERT_RELEASE(lRun.fStrikeSpec.descriptor() == rRun.fStrikeSpec.descriptor());
+
+ // color can be changed
+ //SkASSERT(lRun.fColor == rRun.fColor);
+ SkASSERT_RELEASE(lRun.fInitialized == rRun.fInitialized);
+
+ SkASSERT_RELEASE(lRun.fSubRunInfo.count() == rRun.fSubRunInfo.count());
+ for(int j = 0; j < lRun.fSubRunInfo.count(); j++) {
+ const SubRun& lSubRun = lRun.fSubRunInfo[j];
+ const SubRun& rSubRun = rRun.fSubRunInfo[j];
+
+ // TODO we can do this check, but we have to apply the VM to the old vertex bounds
+ //SkASSERT_RELEASE(lSubRun.vertexBounds() == rSubRun.vertexBounds());
+
+ if (lSubRun.strike()) {
+ SkASSERT_RELEASE(rSubRun.strike());
+ SkASSERT_RELEASE(GrTextStrike::GetKey(*lSubRun.strike()) ==
+ GrTextStrike::GetKey(*rSubRun.strike()));
+
+ } else {
+ SkASSERT_RELEASE(!rSubRun.strike());
+ }
+
+ SkASSERT_RELEASE(lSubRun.vertexStartIndex() == rSubRun.vertexStartIndex());
+ SkASSERT_RELEASE(lSubRun.vertexEndIndex() == rSubRun.vertexEndIndex());
+ SkASSERT_RELEASE(lSubRun.glyphStartIndex() == rSubRun.glyphStartIndex());
+ SkASSERT_RELEASE(lSubRun.glyphEndIndex() == rSubRun.glyphEndIndex());
+ SkASSERT_RELEASE(lSubRun.maskFormat() == rSubRun.maskFormat());
+ SkASSERT_RELEASE(lSubRun.drawAsDistanceFields() == rSubRun.drawAsDistanceFields());
+ SkASSERT_RELEASE(lSubRun.hasUseLCDText() == rSubRun.hasUseLCDText());
+ }
+
+ SkASSERT_RELEASE(lRun.fPathGlyphs.count() == rRun.fPathGlyphs.count());
+ for (int i = 0; i < lRun.fPathGlyphs.count(); i++) {
+ const Run::PathGlyph& lPathGlyph = lRun.fPathGlyphs[i];
+ const Run::PathGlyph& rPathGlyph = rRun.fPathGlyphs[i];
+
+ SkASSERT_RELEASE(lPathGlyph.fPath == rPathGlyph.fPath);
+ // We can't assert that these have the same translations
+ }
+ }
+}
+
+void GrTextBlob::SubRun::computeTranslation(const SkMatrix& viewMatrix,
+ SkScalar x, SkScalar y, SkScalar* transX,
+ SkScalar* transY) {
+ // Don't use the matrix to translate on distance field for fallback subruns.
+ calculate_translation(!this->drawAsDistanceFields() && !this->isFallback(), viewMatrix,
+ x, y, fCurrentViewMatrix, fX, fY, transX, transY);
+ fCurrentViewMatrix = viewMatrix;
+ fX = x;
+ fY = y;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlob.h b/gfx/skia/skia/src/gpu/text/GrTextBlob.h
new file mode 100644
index 0000000000..ddea88d15f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlob.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextBlob_DEFINED
+#define GrTextBlob_DEFINED
+
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrDrawOpAtlas.h"
+#include "src/gpu/text/GrStrikeCache.h"
+#include "src/gpu/text/GrTextContext.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+class GrAtlasManager;
+struct GrDistanceFieldAdjustTable;
+struct GrGlyph;
+
+class SkTextBlob;
+class SkTextBlobRunIterator;
+
+// With this flag enabled, the GrTextContext will, as a sanity check, regenerate every blob
+// that comes in to verify the integrity of its cache
+#define CACHE_SANITY_CHECK 0
+
+/*
+ * A GrTextBlob contains a fully processed SkTextBlob, suitable for nearly immediate drawing
+ * on the GPU. These are initially created with valid positions and colors, but invalid
+ * texture coordinates. The GrTextBlob itself has a few Blob-wide properties, and also
+ * consists of a number of runs. Runs inside a blob are flushed individually so they can be
+ * reordered.
+ *
+ * The only thing(aside from a memcopy) required to flush a GrTextBlob is to ensure that
+ * the GrAtlas will not evict anything the Blob needs.
+ *
+ * Note: This struct should really be named GrCachedAtasTextBlob, but that is too verbose.
+ *
+ * *WARNING* If you add new fields to this struct, then you may need to to update AssertEqual
+ */
+class GrTextBlob : public SkNVRefCnt<GrTextBlob>, public SkGlyphRunPainterInterface {
+ struct Run;
+public:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrTextBlob);
+
+ class VertexRegenerator;
+
+ void generateFromGlyphRunList(const GrShaderCaps& shaderCaps,
+ const GrTextContext::Options& options,
+ const SkPaint& paint,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const SkGlyphRunList& glyphRunList,
+ SkGlyphRunListPainter* glyphPainter);
+
+ static sk_sp<GrTextBlob> Make(
+ int glyphCount,
+ int runCount,
+ GrColor color,
+ GrStrikeCache* strikeCache);
+
+ /**
+ * We currently force regeneration of a blob if old or new matrix differ in having perspective.
+ * If we ever change that then the key must contain the perspectiveness when there are distance
+ * fields as perspective distance field use 3 component vertex positions and non-perspective
+ * uses 2.
+ */
+ struct Key {
+ Key() {
+ sk_bzero(this, sizeof(Key));
+ }
+ uint32_t fUniqueID;
+ // Color may affect the gamma of the mask we generate, but in a fairly limited way.
+ // Each color is assigned to on of a fixed number of buckets based on its
+ // luminance. For each luminance bucket there is a "canonical color" that
+ // represents the bucket. This functionality is currently only supported for A8
+ SkColor fCanonicalColor;
+ SkPaint::Style fStyle;
+ SkPixelGeometry fPixelGeometry;
+ bool fHasBlur;
+ uint32_t fScalerContextFlags;
+
+ bool operator==(const Key& other) const {
+ return 0 == memcmp(this, &other, sizeof(Key));
+ }
+ };
+
+ void setupKey(const GrTextBlob::Key& key,
+ const SkMaskFilterBase::BlurRec& blurRec,
+ const SkPaint& paint) {
+ fKey = key;
+ if (key.fHasBlur) {
+ fBlurRec = blurRec;
+ }
+ if (key.fStyle != SkPaint::kFill_Style) {
+ fStrokeInfo.fFrameWidth = paint.getStrokeWidth();
+ fStrokeInfo.fMiterLimit = paint.getStrokeMiter();
+ fStrokeInfo.fJoin = paint.getStrokeJoin();
+ }
+ }
+
+ static const Key& GetKey(const GrTextBlob& blob) {
+ return blob.fKey;
+ }
+
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(&key, sizeof(Key));
+ }
+
+ void operator delete(void* p) {
+ ::operator delete(p);
+ }
+
+ void* operator new(size_t) {
+ SK_ABORT("All blobs are created by placement new.");
+ }
+
+ void* operator new(size_t, void* p) { return p; }
+
+ bool hasDistanceField() const { return SkToBool(fTextType & kHasDistanceField_TextType); }
+ bool hasBitmap() const { return SkToBool(fTextType & kHasBitmap_TextType); }
+ void setHasDistanceField() { fTextType |= kHasDistanceField_TextType; }
+ void setHasBitmap() { fTextType |= kHasBitmap_TextType; }
+
+ int runCountLimit() const { return fRunCountLimit; }
+
+ Run* pushBackRun() {
+ SkASSERT(fRunCount < fRunCountLimit);
+
+ // If there is more run, then connect up the subruns.
+ if (fRunCount > 0) {
+ SubRun& newRun = fRuns[fRunCount].fSubRunInfo.back();
+ SubRun& lastRun = fRuns[fRunCount - 1].fSubRunInfo.back();
+ newRun.setAsSuccessor(lastRun);
+ }
+
+ fRunCount++;
+ return this->currentRun();
+ }
+
+ void setMinAndMaxScale(SkScalar scaledMin, SkScalar scaledMax) {
+ // we init fMaxMinScale and fMinMaxScale in the constructor
+ fMaxMinScale = SkMaxScalar(scaledMin, fMaxMinScale);
+ fMinMaxScale = SkMinScalar(scaledMax, fMinMaxScale);
+ }
+
+ static size_t GetVertexStride(GrMaskFormat maskFormat, bool hasWCoord) {
+ switch (maskFormat) {
+ case kA8_GrMaskFormat:
+ return hasWCoord ? kGrayTextDFPerspectiveVASize : kGrayTextVASize;
+ case kARGB_GrMaskFormat:
+ return hasWCoord ? kColorTextPerspectiveVASize : kColorTextVASize;
+ default:
+ SkASSERT(!hasWCoord);
+ return kLCDTextVASize;
+ }
+ }
+
+ bool mustRegenerate(const SkPaint&, bool, const SkMaskFilterBase::BlurRec& blurRec,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y);
+
+ void flush(GrTextTarget*, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& paint, const SkPMColor4f& filteredColor, const GrClip& clip,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y);
+
+ void computeSubRunBounds(SkRect* outBounds, int runIndex, int subRunIndex,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ bool needsGlyphTransform) {
+ // We don't yet position distance field text on the cpu, so we have to map the vertex bounds
+ // into device space.
+ // We handle vertex bounds differently for distance field text and bitmap text because
+ // the vertex bounds of bitmap text are in device space. If we are flushing multiple runs
+ // from one blob then we are going to pay the price here of mapping the rect for each run.
+ const Run& run = fRuns[runIndex];
+ const SubRun& subRun = run.fSubRunInfo[subRunIndex];
+ *outBounds = subRun.vertexBounds();
+ if (needsGlyphTransform) {
+ // Distance field text is positioned with the (X,Y) as part of the glyph position,
+ // and currently the view matrix is applied on the GPU
+ outBounds->offset(x - fInitialX, y - fInitialY);
+ viewMatrix.mapRect(outBounds);
+ } else {
+ // Bitmap text is fully positioned on the CPU, and offset by an (X,Y) translate in
+ // device space.
+ SkMatrix boundsMatrix = fInitialViewMatrixInverse;
+
+ boundsMatrix.postTranslate(-fInitialX, -fInitialY);
+
+ boundsMatrix.postTranslate(x, y);
+
+ boundsMatrix.postConcat(viewMatrix);
+ boundsMatrix.mapRect(outBounds);
+
+ // Due to floating point numerical inaccuracies, we have to round out here
+ outBounds->roundOut(outBounds);
+ }
+ }
+
+ // position + local coord
+ static const size_t kColorTextVASize = sizeof(SkPoint) + sizeof(SkIPoint16);
+ static const size_t kColorTextPerspectiveVASize = sizeof(SkPoint3) + sizeof(SkIPoint16);
+ static const size_t kGrayTextVASize = sizeof(SkPoint) + sizeof(GrColor) + sizeof(SkIPoint16);
+ static const size_t kGrayTextDFPerspectiveVASize =
+ sizeof(SkPoint3) + sizeof(GrColor) + sizeof(SkIPoint16);
+ static const size_t kLCDTextVASize = kGrayTextVASize;
+ static const size_t kMaxVASize = kGrayTextDFPerspectiveVASize;
+ static const int kVerticesPerGlyph = 4;
+
+ static void AssertEqual(const GrTextBlob&, const GrTextBlob&);
+
+ // The color here is the GrPaint color, and it is used to determine whether we
+ // have to regenerate LCD text blobs.
+ // We use this color vs the SkPaint color because it has the colorfilter applied.
+ void initReusableBlob(SkColor luminanceColor, const SkMatrix& viewMatrix,
+ SkScalar x, SkScalar y) {
+ fLuminanceColor = luminanceColor;
+ this->setupViewMatrix(viewMatrix, x, y);
+ }
+
+ void initThrowawayBlob(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ this->setupViewMatrix(viewMatrix, x, y);
+ }
+
+ const Key& key() const { return fKey; }
+
+ size_t size() const { return fSize; }
+
+ ~GrTextBlob() override {
+ for (int i = 0; i < fRunCountLimit; i++) {
+ fRuns[i].~Run();
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////
+ // Internal test methods
+ std::unique_ptr<GrDrawOp> test_makeOp(int glyphCount, uint16_t run, uint16_t subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ const SkPaint& paint, const SkPMColor4f& filteredColor,
+ const SkSurfaceProps&, const GrDistanceFieldAdjustTable*,
+ GrTextTarget*);
+
+private:
+ GrTextBlob(GrStrikeCache* strikeCache) : fStrikeCache{strikeCache} { }
+
+ // This function will only be called when we are generating a blob from scratch. We record the
+ // initial view matrix and initial offsets(x,y), because we record vertex bounds relative to
+ // these numbers. When blobs are reused with new matrices, we need to return to model space so
+ // we can update the vertex bounds appropriately.
+ void setupViewMatrix(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ fInitialViewMatrix = viewMatrix;
+ if (!viewMatrix.invert(&fInitialViewMatrixInverse)) {
+ fInitialViewMatrixInverse = SkMatrix::I();
+ }
+ fInitialX = x;
+ fInitialY = y;
+
+ // make sure all initial subruns have the correct VM and X/Y applied
+ for (int i = 0; i < fRunCountLimit; i++) {
+ fRuns[i].fSubRunInfo[0].init(fInitialViewMatrix, x, y);
+ }
+ }
+
+ class SubRun {
+ public:
+ SubRun(Run* run, const SkStrikeSpec& strikeSpec, GrColor color)
+ : fColor{color}
+ , fRun{run}
+ , fStrikeSpec{strikeSpec} {}
+
+ // When used with emplace_back, this constructs a SubRun from the last SubRun in an array.
+ //SubRun(SkSTArray<1, SubRun>* subRunList)
+ // : fColor{subRunList->fromBack(1).fColor} { }
+
+ void appendGlyph(GrGlyph* glyph, SkRect dstRect);
+
+ // TODO when this object is more internal, drop the privacy
+ void resetBulkUseToken() { fBulkUseToken.reset(); }
+ GrDrawOpAtlas::BulkUseTokenUpdater* bulkUseToken() { return &fBulkUseToken; }
+ void setStrike(sk_sp<GrTextStrike> strike) { fStrike = std::move(strike); }
+ GrTextStrike* strike() const { return fStrike.get(); }
+ sk_sp<GrTextStrike> refStrike() const { return fStrike; }
+
+ void setAtlasGeneration(uint64_t atlasGeneration) { fAtlasGeneration = atlasGeneration;}
+ uint64_t atlasGeneration() const { return fAtlasGeneration; }
+
+ size_t byteCount() const { return fVertexEndIndex - fVertexStartIndex; }
+ size_t vertexStartIndex() const { return fVertexStartIndex; }
+ size_t vertexEndIndex() const { return fVertexEndIndex; }
+
+ uint32_t glyphCount() const { return fGlyphEndIndex - fGlyphStartIndex; }
+ uint32_t glyphStartIndex() const { return fGlyphStartIndex; }
+ uint32_t glyphEndIndex() const { return fGlyphEndIndex; }
+ void setColor(GrColor color) { fColor = color; }
+ GrColor color() const { return fColor; }
+ void setMaskFormat(GrMaskFormat format) { fMaskFormat = format; }
+ GrMaskFormat maskFormat() const { return fMaskFormat; }
+
+ void setAsSuccessor(const SubRun& prev) {
+ fGlyphStartIndex = prev.glyphEndIndex();
+ fGlyphEndIndex = fGlyphStartIndex;
+
+ fVertexStartIndex = prev.vertexEndIndex();
+ fVertexEndIndex = fVertexStartIndex;
+
+ // copy over viewmatrix settings
+ this->init(prev.fCurrentViewMatrix, prev.fX, prev.fY);
+ }
+
+ const SkRect& vertexBounds() const { return fVertexBounds; }
+ void joinGlyphBounds(const SkRect& glyphBounds) {
+ fVertexBounds.joinNonEmptyArg(glyphBounds);
+ }
+
+ void init(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ fCurrentViewMatrix = viewMatrix;
+ fX = x;
+ fY = y;
+ }
+
+ // This function assumes the translation will be applied before it is called again
+ void computeTranslation(const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ SkScalar* transX, SkScalar* transY);
+
+ // df properties
+ void setDrawAsDistanceFields() { fFlags.drawAsSdf = true; }
+ bool drawAsDistanceFields() const { return fFlags.drawAsSdf; }
+ void setUseLCDText(bool useLCDText) { fFlags.useLCDText = useLCDText; }
+ bool hasUseLCDText() const { return fFlags.useLCDText; }
+ void setAntiAliased(bool antiAliased) { fFlags.antiAliased = antiAliased; }
+ bool isAntiAliased() const { return fFlags.antiAliased; }
+ void setHasWCoord(bool hasW) { fFlags.hasWCoord = hasW; }
+ bool hasWCoord() const { return fFlags.hasWCoord; }
+ void setNeedsTransform(bool needsTransform) { fFlags.needsTransform = needsTransform; }
+ bool needsTransform() const { return fFlags.needsTransform; }
+ void setFallback() { fFlags.argbFallback = true; }
+ bool isFallback() { return fFlags.argbFallback; }
+
+ const SkStrikeSpec& strikeSpec() const { return fStrikeSpec; }
+
+ private:
+ GrDrawOpAtlas::BulkUseTokenUpdater fBulkUseToken;
+ sk_sp<GrTextStrike> fStrike;
+ SkMatrix fCurrentViewMatrix;
+ SkRect fVertexBounds = SkRectPriv::MakeLargestInverted();
+ uint64_t fAtlasGeneration{GrDrawOpAtlas::kInvalidAtlasGeneration};
+ size_t fVertexStartIndex{0};
+ size_t fVertexEndIndex{0};
+ uint32_t fGlyphStartIndex{0};
+ uint32_t fGlyphEndIndex{0};
+ SkScalar fX;
+ SkScalar fY;
+ GrColor fColor{GrColor_ILLEGAL};
+ GrMaskFormat fMaskFormat{kA8_GrMaskFormat};
+ struct {
+ bool drawAsSdf:1;
+ bool useLCDText:1;
+ bool antiAliased:1;
+ bool hasWCoord:1;
+ bool needsTransform:1;
+ bool argbFallback:1;
+ } fFlags{false, false, false, false, false, false};
+ Run* const fRun;
+ const SkStrikeSpec& fStrikeSpec;
+ }; // SubRunInfo
+
+ /*
+ * Each Run inside of the blob can have its texture coordinates regenerated if required.
+ * To determine if regeneration is necessary, fAtlasGeneration is used. If there have been
+ * any evictions inside of the atlas, then we will simply regenerate Runs. We could track
+ * this at a more fine grained level, but its not clear if this is worth it, as evictions
+ * should be fairly rare.
+ *
+ * One additional point, each run can contain glyphs with any of the three mask formats.
+ * We call these SubRuns. Because a subrun must be a contiguous range, we have to create
+ * a new subrun each time the mask format changes in a run. In theory, a run can have as
+ * many SubRuns as it has glyphs, ie if a run alternates between color emoji and A8. In
+ * practice, the vast majority of runs have only a single subrun.
+ *
+ * Finally, for runs where the entire thing is too large for the GrTextContext to
+ * handle, we have a bit to mark the run as flushable via rendering as paths or as scaled
+ * glyphs. It would be a bit expensive to figure out ahead of time whether or not a run
+ * can flush in this manner, so we always allocate vertices for the run, regardless of
+ * whether or not it is too large. The benefit of this strategy is that we can always reuse
+ * a blob allocation regardless of viewmatrix changes. We could store positions for these
+ * glyphs, however, it's not clear if this is a win because we'd still have to either go to the
+ * glyph cache to get the path at flush time, or hold onto the path in the cache, which
+ * would greatly increase the memory of these cached items.
+ */
+ struct Run {
+ explicit Run(GrTextBlob* blob, GrColor color)
+ : fBlob{blob}, fColor{color} {
+ // To ensure we always have one subrun, we push back a fresh run here
+ fSubRunInfo.emplace_back(this, fStrikeSpec, color);
+ }
+
+ // sets the last subrun of runIndex to use w values
+ void setSubRunHasW(bool hasWCoord) {
+ SubRun& subRun = this->fSubRunInfo.back();
+ subRun.setHasWCoord(hasWCoord);
+ }
+
+ // inits the override descriptor on the current run. All following subruns must use this
+ // descriptor
+ SubRun* initARGBFallback() {
+ fFallbackStrikeSpec.reset(new SkStrikeSpec{});
+ // Push back a new subrun to fill and set the override descriptor
+ SubRun* subRun = this->pushBackSubRun(*fFallbackStrikeSpec, fColor);
+ subRun->setMaskFormat(kARGB_GrMaskFormat);
+ subRun->setFallback();
+ return subRun;
+ }
+
+ // Appends a glyph to the blob as a path only.
+ void appendPathGlyph(
+ const SkPath& path, SkPoint position, SkScalar scale, bool preTransformed);
+
+ // Append a glyph to the sub run taking care to switch the glyph if needed.
+ void switchSubRunIfNeededAndAppendGlyph(GrGlyph* glyph,
+ const sk_sp<GrTextStrike>& strike,
+ const SkRect& destRect,
+ bool needsTransform);
+
+ // Used when the glyph in the cache has the CTM already applied, therefore no transform
+ // is needed during rendering.
+ void appendDeviceSpaceGlyph(const sk_sp<GrTextStrike>& strike,
+ const SkGlyph& skGlyph,
+ SkPoint origin);
+
+ // The glyph is oriented upright in the cache and needs to be transformed onto the screen.
+ void appendSourceSpaceGlyph(const sk_sp<GrTextStrike>& strike,
+ const SkGlyph& skGlyph,
+ SkPoint origin,
+ SkScalar textScale);
+
+ void setupFont(const SkStrikeSpec& strikeSpec);
+
+ void setRunFontAntiAlias(bool aa) {
+ fAntiAlias = aa;
+ }
+
+ // sets the last subrun of runIndex to use distance field text
+ void setSubRunHasDistanceFields(bool hasLCD, bool isAntiAlias, bool hasWCoord) {
+ SubRun& subRun = fSubRunInfo.back();
+ subRun.setUseLCDText(hasLCD);
+ subRun.setAntiAliased(isAntiAlias);
+ subRun.setDrawAsDistanceFields();
+ subRun.setHasWCoord(hasWCoord);
+ }
+
+ SubRun* pushBackSubRun(const SkStrikeSpec& desc, GrColor color) {
+ // Forward glyph / vertex information to seed the new sub run
+ SubRun& newSubRun = fSubRunInfo.emplace_back(this, desc, color);
+
+ const SubRun& prevSubRun = fSubRunInfo.fromBack(1);
+
+ // Forward glyph / vertex information to seed the new sub run
+ newSubRun.setAsSuccessor(prevSubRun);
+ return &newSubRun;
+ }
+
+ // Any glyphs that can't be rendered with the base or override descriptor
+ // are rendered as paths
+ struct PathGlyph {
+ PathGlyph(const SkPath& path, SkScalar x, SkScalar y, SkScalar scale, bool preXformed)
+ : fPath(path)
+ , fX(x)
+ , fY(y)
+ , fScale(scale)
+ , fPreTransformed(preXformed) {}
+ SkPath fPath;
+ SkScalar fX;
+ SkScalar fY;
+ SkScalar fScale;
+ bool fPreTransformed;
+ };
+
+ SkSTArray<1, SubRun> fSubRunInfo;
+ SkStrikeSpec fStrikeSpec;
+
+ // Distance field text cannot draw coloremoji, and so has to fall back. However,
+ // though the distance field text and the coloremoji may share the same run, they
+ // will have different descriptors. If fFallbackStrikeSpec is non-nullptr, then it
+ // will be used in place of the run's descriptor to regen texture coords
+ std::unique_ptr<SkStrikeSpec> fFallbackStrikeSpec;
+
+ SkTArray<PathGlyph> fPathGlyphs;
+
+ bool fAntiAlias{false}; // needed mainly for rendering paths
+ bool fInitialized{false};
+
+ GrTextBlob* const fBlob;
+ GrColor fColor;
+ }; // Run
+
+ std::unique_ptr<GrAtlasTextOp> makeOp(
+ const SubRun& info, int glyphCount, uint16_t run, uint16_t subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y, const SkIRect& clipRect,
+ const SkPaint& paint, const SkPMColor4f& filteredColor, const SkSurfaceProps&,
+ const GrDistanceFieldAdjustTable*, GrTextTarget*);
+
+ // currentRun, startRun, and the process* calls are all used by the SkGlyphRunPainter, and
+ // live in SkGlyphRunPainter.cpp file.
+ Run* currentRun();
+
+ void startRun(const SkGlyphRun& glyphRun, bool useSDFT) override;
+
+ void processDeviceMasks(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) override;
+
+ void processSourcePaths(SkSpan<const SkGlyphPos> paths,
+ const SkStrikeSpec& strikeSpec) override;
+
+ void processDevicePaths(SkSpan<const SkGlyphPos> paths) override;
+
+ void processSourceSDFT(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ const SkFont& runFont,
+ SkScalar minScale,
+ SkScalar maxScale,
+ bool hasWCoord) override;
+
+ void processSourceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec,
+ bool hasW) override;
+
+ void processDeviceFallback(SkSpan<const SkGlyphPos> masks,
+ const SkStrikeSpec& strikeSpec) override;
+
+ struct StrokeInfo {
+ SkScalar fFrameWidth;
+ SkScalar fMiterLimit;
+ SkPaint::Join fJoin;
+ };
+
+ enum TextType {
+ kHasDistanceField_TextType = 0x1,
+ kHasBitmap_TextType = 0x2,
+ };
+
+ // all glyph / vertex offsets are into these pools.
+ char* fVertices;
+ GrGlyph** fGlyphs;
+ Run* fRuns;
+
+ // Lifetime: The GrStrikeCache is owned by and has the same lifetime as the GrRecordingContext.
+ // The GrRecordingContext also owns the GrTextBlob cache which owns this GrTextBlob.
+ GrStrikeCache* const fStrikeCache;
+ SkMaskFilterBase::BlurRec fBlurRec;
+ StrokeInfo fStrokeInfo;
+ Key fKey;
+ SkMatrix fInitialViewMatrix;
+ SkMatrix fInitialViewMatrixInverse;
+ size_t fSize;
+ SkColor fLuminanceColor;
+ SkScalar fInitialX;
+ SkScalar fInitialY;
+
+ // We can reuse distance field text, but only if the new viewmatrix would not result in
+ // a mip change. Because there can be multiple runs in a blob, we track the overall
+ // maximum minimum scale, and minimum maximum scale, we can support before we need to regen
+ SkScalar fMaxMinScale{-SK_ScalarMax};
+ SkScalar fMinMaxScale{SK_ScalarMax};
+ int fRunCount{0};
+ int fRunCountLimit;
+ uint8_t fTextType{0};
+};
+
+/**
+ * Used to produce vertices for a subrun of a blob. The vertices are cached in the blob itself.
+ * This is invoked each time a sub run is drawn. It regenerates the vertex data as required either
+ * because of changes to the atlas or because of different draw parameters (e.g. color change). In
+ * rare cases the draw may have to interrupted and flushed in the middle of the sub run in order to
+ * free up atlas space. Thus, this generator is stateful and should be invoked in a loop until the
+ * entire sub run has been completed.
+ */
+class GrTextBlob::VertexRegenerator {
+public:
+ /**
+ * Consecutive VertexRegenerators often use the same SkGlyphCache. If the same instance of
+ * SkAutoGlyphCache is reused then it can save the cost of multiple detach/attach operations of
+ * SkGlyphCache.
+ */
+ VertexRegenerator(GrResourceProvider*, GrTextBlob*, int runIdx, int subRunIdx,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y, GrColor color,
+ GrDeferredUploadTarget*, GrStrikeCache*, GrAtlasManager*,
+ SkExclusiveStrikePtr*);
+
+ struct Result {
+ /**
+ * Was regenerate() able to draw all the glyphs from the sub run? If not flush all glyph
+ * draws and call regenerate() again.
+ */
+ bool fFinished = true;
+
+ /**
+ * How many glyphs were regenerated. Will be equal to the sub run's glyph count if
+ * fType is kFinished.
+ */
+ int fGlyphsRegenerated = 0;
+
+ /**
+ * Pointer where the caller finds the first regenerated vertex.
+ */
+ const char* fFirstVertex;
+ };
+
+ bool regenerate(Result*);
+
+private:
+ bool doRegen(Result*, bool regenPos, bool regenCol, bool regenTexCoords, bool regenGlyphs);
+
+ GrResourceProvider* fResourceProvider;
+ const SkMatrix& fViewMatrix;
+ GrTextBlob* fBlob;
+ GrDeferredUploadTarget* fUploadTarget;
+ GrStrikeCache* fGlyphCache;
+ GrAtlasManager* fFullAtlasManager;
+ SkExclusiveStrikePtr* fLazyStrike;
+ SubRun* fSubRun;
+ GrColor fColor;
+ SkScalar fTransX;
+ SkScalar fTransY;
+
+ uint32_t fRegenFlags = 0;
+ int fCurrGlyph = 0;
+ bool fBrokenRun = false;
+};
+
+#endif // GrTextBlob_DEFINED
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp
new file mode 100644
index 0000000000..c3a6860231
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/text/GrTextBlobCache.h"
+
+DECLARE_SKMESSAGEBUS_MESSAGE(GrTextBlobCache::PurgeBlobMessage)
+
+static inline bool SkShouldPostMessageToBus(
+ const GrTextBlobCache::PurgeBlobMessage& msg, uint32_t msgBusUniqueID) {
+ return msg.fContextID == msgBusUniqueID;
+}
+
+GrTextBlobCache::~GrTextBlobCache() {
+ this->freeAll();
+}
+
+void GrTextBlobCache::freeAll() {
+ fBlobIDCache.foreach([this](uint32_t, BlobIDCacheEntry* entry) {
+ for (const auto& blob : entry->fBlobs) {
+ fBlobList.remove(blob.get());
+ }
+ });
+
+ fBlobIDCache.reset();
+
+ fCurrentSize = 0;
+
+ // There should be no allocations in the memory pool at this point
+ SkASSERT(fBlobList.isEmpty());
+}
+
+void GrTextBlobCache::PostPurgeBlobMessage(uint32_t blobID, uint32_t cacheID) {
+ SkASSERT(blobID != SK_InvalidGenID);
+ SkMessageBus<PurgeBlobMessage>::Post(PurgeBlobMessage(blobID, cacheID));
+}
+
+void GrTextBlobCache::purgeStaleBlobs() {
+ SkTArray<PurgeBlobMessage> msgs;
+ fPurgeBlobInbox.poll(&msgs);
+
+ for (const auto& msg : msgs) {
+ auto* idEntry = fBlobIDCache.find(msg.fBlobID);
+ if (!idEntry) {
+ // no cache entries for id
+ continue;
+ }
+
+ // remove all blob entries from the LRU list
+ for (const auto& blob : idEntry->fBlobs) {
+ fCurrentSize -= blob->size();
+ fBlobList.remove(blob.get());
+ }
+
+ // drop the idEntry itself (unrefs all blobs)
+ fBlobIDCache.remove(msg.fBlobID);
+ }
+}
+
+void GrTextBlobCache::checkPurge(GrTextBlob* blob) {
+ // First, purge all stale blob IDs.
+ this->purgeStaleBlobs();
+
+ // If we are still over budget, then unref until we are below budget again
+ if (fCurrentSize > fSizeBudget) {
+ BitmapBlobList::Iter iter;
+ iter.init(fBlobList, BitmapBlobList::Iter::kTail_IterStart);
+ GrTextBlob* lruBlob = nullptr;
+ while (fCurrentSize > fSizeBudget && (lruBlob = iter.get()) && lruBlob != blob) {
+ // Backup the iterator before removing and unrefing the blob
+ iter.prev();
+
+ this->remove(lruBlob);
+ }
+
+ // If we break out of the loop with lruBlob == blob, then we haven't purged enough
+ // use the call back and try to free some more. If we are still overbudget after this,
+ // then this single textblob is over our budget
+ if (blob && lruBlob == blob) {
+ (*fCallback)(fData);
+ }
+
+#ifdef SPEW_BUDGET_MESSAGE
+ if (fCurrentSize > fSizeBudget) {
+ SkDebugf("Single textblob is larger than our whole budget");
+ }
+#endif
+ }
+}
+
+
+
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h
new file mode 100644
index 0000000000..5479112085
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextBlobCache_DEFINED
+#define GrTextBlobCache_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/gpu/text/GrTextBlob.h"
+
+class GrTextBlobCache {
+public:
+ /**
+ * The callback function used by the cache when it is still over budget after a purge. The
+ * passed in 'data' is the same 'data' handed to setOverbudgetCallback.
+ */
+ typedef void (*PFOverBudgetCB)(void* data);
+
+ GrTextBlobCache(PFOverBudgetCB cb, void* data, uint32_t uniqueID)
+ : fCallback(cb)
+ , fData(data)
+ , fSizeBudget(kDefaultBudget)
+ , fUniqueID(uniqueID)
+ , fPurgeBlobInbox(uniqueID) {
+ SkASSERT(cb && data);
+ }
+ ~GrTextBlobCache();
+
+ sk_sp<GrTextBlob> makeBlob(const SkGlyphRunList& glyphRunList,
+ GrColor color,
+ GrStrikeCache* strikeCache) {
+ return GrTextBlob::Make(
+ glyphRunList.totalGlyphCount(), glyphRunList.size(), color, strikeCache);
+ }
+
+ sk_sp<GrTextBlob> makeCachedBlob(const SkGlyphRunList& glyphRunList,
+ const GrTextBlob::Key& key,
+ const SkMaskFilterBase::BlurRec& blurRec,
+ const SkPaint& paint,
+ GrColor color,
+ GrStrikeCache* strikeCache) {
+ sk_sp<GrTextBlob> cacheBlob(makeBlob(glyphRunList, color, strikeCache));
+ cacheBlob->setupKey(key, blurRec, paint);
+ this->add(cacheBlob);
+ glyphRunList.temporaryShuntBlobNotifyAddedToCache(fUniqueID);
+ return cacheBlob;
+ }
+
+ sk_sp<GrTextBlob> find(const GrTextBlob::Key& key) const {
+ const auto* idEntry = fBlobIDCache.find(key.fUniqueID);
+ return idEntry ? idEntry->find(key) : nullptr;
+ }
+
+ void remove(GrTextBlob* blob) {
+ auto id = GrTextBlob::GetKey(*blob).fUniqueID;
+ auto* idEntry = fBlobIDCache.find(id);
+ SkASSERT(idEntry);
+
+ fCurrentSize -= blob->size();
+ fBlobList.remove(blob);
+ idEntry->removeBlob(blob);
+ if (idEntry->fBlobs.empty()) {
+ fBlobIDCache.remove(id);
+ }
+ }
+
+ void makeMRU(GrTextBlob* blob) {
+ if (fBlobList.head() == blob) {
+ return;
+ }
+
+ fBlobList.remove(blob);
+ fBlobList.addToHead(blob);
+ }
+
+ void freeAll();
+
+ // TODO move to SkTextBlob
+ static void BlobGlyphCount(int* glyphCount, int* runCount, const SkTextBlob* blob) {
+ SkTextBlobRunIterator itCounter(blob);
+ for (; !itCounter.done(); itCounter.next(), (*runCount)++) {
+ *glyphCount += itCounter.glyphCount();
+ }
+ }
+
+ void setBudget(size_t budget) {
+ fSizeBudget = budget;
+ this->checkPurge();
+ }
+
+ struct PurgeBlobMessage {
+ PurgeBlobMessage(uint32_t blobID, uint32_t contextUniqueID)
+ : fBlobID(blobID), fContextID(contextUniqueID) {}
+
+ uint32_t fBlobID;
+ uint32_t fContextID;
+ };
+
+ static void PostPurgeBlobMessage(uint32_t blobID, uint32_t cacheID);
+
+ void purgeStaleBlobs();
+
+ size_t usedBytes() const { return fCurrentSize; }
+
+private:
+ using BitmapBlobList = SkTInternalLList<GrTextBlob>;
+
+ struct BlobIDCacheEntry {
+ BlobIDCacheEntry() : fID(SK_InvalidGenID) {}
+ explicit BlobIDCacheEntry(uint32_t id) : fID(id) {}
+
+ static uint32_t GetKey(const BlobIDCacheEntry& entry) {
+ return entry.fID;
+ }
+
+ void addBlob(sk_sp<GrTextBlob> blob) {
+ SkASSERT(blob);
+ SkASSERT(GrTextBlob::GetKey(*blob).fUniqueID == fID);
+ SkASSERT(!this->find(GrTextBlob::GetKey(*blob)));
+
+ fBlobs.emplace_back(std::move(blob));
+ }
+
+ void removeBlob(GrTextBlob* blob) {
+ SkASSERT(blob);
+ SkASSERT(GrTextBlob::GetKey(*blob).fUniqueID == fID);
+
+ auto index = this->findBlobIndex(GrTextBlob::GetKey(*blob));
+ SkASSERT(index >= 0);
+
+ fBlobs.removeShuffle(index);
+ }
+
+ sk_sp<GrTextBlob> find(const GrTextBlob::Key& key) const {
+ auto index = this->findBlobIndex(key);
+ return index < 0 ? nullptr : fBlobs[index];
+ }
+
+ int findBlobIndex(const GrTextBlob::Key& key) const{
+ for (int i = 0; i < fBlobs.count(); ++i) {
+ if (GrTextBlob::GetKey(*fBlobs[i]) == key) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ uint32_t fID;
+ // Current clients don't generate multiple GrAtlasTextBlobs per SkTextBlob, so an array w/
+ // linear search is acceptable. If usage changes, we should re-evaluate this structure.
+ SkSTArray<1, sk_sp<GrTextBlob>, true> fBlobs;
+ };
+
+ void add(sk_sp<GrTextBlob> blob) {
+ auto id = GrTextBlob::GetKey(*blob).fUniqueID;
+ auto* idEntry = fBlobIDCache.find(id);
+ if (!idEntry) {
+ idEntry = fBlobIDCache.set(id, BlobIDCacheEntry(id));
+ }
+
+ // Safe to retain a raw ptr temporarily here, because the cache will hold a ref.
+ GrTextBlob* rawBlobPtr = blob.get();
+ fBlobList.addToHead(rawBlobPtr);
+ fCurrentSize += blob->size();
+ idEntry->addBlob(std::move(blob));
+
+ this->checkPurge(rawBlobPtr);
+ }
+
+ void checkPurge(GrTextBlob* blob = nullptr);
+
+ static const int kMinGrowthSize = 1 << 16;
+ static const int kDefaultBudget = 1 << 22;
+ BitmapBlobList fBlobList;
+ SkTHashMap<uint32_t, BlobIDCacheEntry> fBlobIDCache;
+ PFOverBudgetCB fCallback;
+ void* fData;
+ size_t fSizeBudget;
+ size_t fCurrentSize{0};
+ uint32_t fUniqueID; // unique id to use for messaging
+ SkMessageBus<PurgeBlobMessage>::Inbox fPurgeBlobInbox;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlobVertexRegenerator.cpp b/gfx/skia/skia/src/gpu/text/GrTextBlobVertexRegenerator.cpp
new file mode 100644
index 0000000000..976beaf3d4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlobVertexRegenerator.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/gpu/ops/GrAtlasTextOp.h"
+#include "src/gpu/text/GrAtlasManager.h"
+#include "src/gpu/text/GrTextBlob.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+enum RegenMask {
+ kNoRegen = 0x0,
+ kRegenPos = 0x1,
+ kRegenCol = 0x2,
+ kRegenTex = 0x4,
+ kRegenGlyph = 0x8,
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void regen_positions(char* vertex, size_t vertexStride, SkScalar transX, SkScalar transY) {
+ SkPoint* point = reinterpret_cast<SkPoint*>(vertex);
+ for (int i = 0; i < 4; ++i) {
+ point->fX += transX;
+ point->fY += transY;
+ point = SkTAddOffset<SkPoint>(point, vertexStride);
+ }
+}
+
+static void regen_colors(char* vertex, size_t vertexStride, GrColor color) {
+ // This is a bit wonky, but sometimes we have LCD text, in which case we won't have color
+ // vertices, hence vertexStride - sizeof(SkIPoint16)
+ size_t colorOffset = vertexStride - sizeof(SkIPoint16) - sizeof(GrColor);
+ GrColor* vcolor = reinterpret_cast<GrColor*>(vertex + colorOffset);
+ for (int i = 0; i < 4; ++i) {
+ *vcolor = color;
+ vcolor = SkTAddOffset<GrColor>(vcolor, vertexStride);
+ }
+}
+
+static void regen_texcoords(char* vertex, size_t vertexStride, const GrGlyph* glyph,
+ bool useDistanceFields) {
+ // This is a bit wonky, but sometimes we have LCD text, in which case we won't have color
+ // vertices, hence vertexStride - sizeof(SkIPoint16)
+ size_t texCoordOffset = vertexStride - sizeof(SkIPoint16);
+
+ uint16_t u0, v0, u1, v1;
+ SkASSERT(glyph);
+ int width = glyph->fBounds.width();
+ int height = glyph->fBounds.height();
+
+ if (useDistanceFields) {
+ u0 = glyph->fAtlasLocation.fX + SK_DistanceFieldInset;
+ v0 = glyph->fAtlasLocation.fY + SK_DistanceFieldInset;
+ u1 = u0 + width - 2 * SK_DistanceFieldInset;
+ v1 = v0 + height - 2 * SK_DistanceFieldInset;
+ } else {
+ u0 = glyph->fAtlasLocation.fX;
+ v0 = glyph->fAtlasLocation.fY;
+ u1 = u0 + width;
+ v1 = v0 + height;
+ }
+ // We pack the 2bit page index in the low bit of the u and v texture coords
+ uint32_t pageIndex = glyph->pageIndex();
+ SkASSERT(pageIndex < 4);
+ uint16_t uBit = (pageIndex >> 1) & 0x1;
+ uint16_t vBit = pageIndex & 0x1;
+ u0 <<= 1;
+ u0 |= uBit;
+ v0 <<= 1;
+ v0 |= vBit;
+ u1 <<= 1;
+ u1 |= uBit;
+ v1 <<= 1;
+ v1 |= vBit;
+
+ uint16_t* textureCoords = reinterpret_cast<uint16_t*>(vertex + texCoordOffset);
+ textureCoords[0] = u0;
+ textureCoords[1] = v0;
+ textureCoords = SkTAddOffset<uint16_t>(textureCoords, vertexStride);
+ textureCoords[0] = u0;
+ textureCoords[1] = v1;
+ textureCoords = SkTAddOffset<uint16_t>(textureCoords, vertexStride);
+ textureCoords[0] = u1;
+ textureCoords[1] = v0;
+ textureCoords = SkTAddOffset<uint16_t>(textureCoords, vertexStride);
+ textureCoords[0] = u1;
+ textureCoords[1] = v1;
+
+#ifdef DISPLAY_PAGE_INDEX
+ // Enable this to visualize the page from which each glyph is being drawn.
+ // Green Red Magenta Cyan -> 0 1 2 3; Black -> error
+ GrColor hackColor;
+ switch (pageIndex) {
+ case 0:
+ hackColor = GrColorPackRGBA(0, 255, 0, 255);
+ break;
+ case 1:
+ hackColor = GrColorPackRGBA(255, 0, 0, 255);;
+ break;
+ case 2:
+ hackColor = GrColorPackRGBA(255, 0, 255, 255);
+ break;
+ case 3:
+ hackColor = GrColorPackRGBA(0, 255, 255, 255);
+ break;
+ default:
+ hackColor = GrColorPackRGBA(0, 0, 0, 255);
+ break;
+ }
+ regen_colors(vertex, vertexStride, hackColor);
+#endif
+}
+
+GrTextBlob::VertexRegenerator::VertexRegenerator(GrResourceProvider* resourceProvider,
+ GrTextBlob* blob,
+ int runIdx, int subRunIdx,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ GrColor color,
+ GrDeferredUploadTarget* uploadTarget,
+ GrStrikeCache* glyphCache,
+ GrAtlasManager* fullAtlasManager,
+ SkExclusiveStrikePtr* lazyStrike)
+ : fResourceProvider(resourceProvider)
+ , fViewMatrix(viewMatrix)
+ , fBlob(blob)
+ , fUploadTarget(uploadTarget)
+ , fGlyphCache(glyphCache)
+ , fFullAtlasManager(fullAtlasManager)
+ , fLazyStrike(lazyStrike)
+ , fSubRun(&blob->fRuns[runIdx].fSubRunInfo[subRunIdx])
+ , fColor(color) {
+ // Compute translation if any
+ fSubRun->computeTranslation(fViewMatrix, x, y, &fTransX, &fTransY);
+
+ // Because the GrStrikeCache may evict the strike a blob depends on using for
+ // generating its texture coords, we have to track whether or not the strike has
+ // been abandoned. If it hasn't been abandoned, then we can use the GrGlyph*s as is
+ // otherwise we have to get the new strike, and use that to get the correct glyphs.
+ // Because we do not have the packed ids, and thus can't look up our glyphs in the
+ // new strike, we instead keep our ref to the old strike and use the packed ids from
+ // it. These ids will still be valid as long as we hold the ref. When we are done
+ // updating our cache of the GrGlyph*s, we drop our ref on the old strike
+ if (fSubRun->strike()->isAbandoned()) {
+ fRegenFlags |= kRegenGlyph;
+ fRegenFlags |= kRegenTex;
+ }
+ if (kARGB_GrMaskFormat != fSubRun->maskFormat() && fSubRun->color() != color) {
+ fRegenFlags |= kRegenCol;
+ }
+ if (0.f != fTransX || 0.f != fTransY) {
+ fRegenFlags |= kRegenPos;
+ }
+}
+
+bool GrTextBlob::VertexRegenerator::doRegen(GrTextBlob::VertexRegenerator::Result* result,
+ bool regenPos, bool regenCol, bool regenTexCoords,
+ bool regenGlyphs) {
+ SkASSERT(!regenGlyphs || regenTexCoords);
+ sk_sp<GrTextStrike> strike;
+ if (regenTexCoords) {
+ fSubRun->resetBulkUseToken();
+
+ const SkStrikeSpec& strikeSpec = fSubRun->strikeSpec();
+
+ if (!*fLazyStrike || (*fLazyStrike)->getDescriptor() != strikeSpec.descriptor()) {
+ *fLazyStrike =
+ strikeSpec.findOrCreateExclusiveStrike(SkStrikeCache::GlobalStrikeCache());
+ }
+
+ if (regenGlyphs) {
+ strike = strikeSpec.findOrCreateGrStrike(fGlyphCache);
+ } else {
+ strike = fSubRun->refStrike();
+ }
+ }
+
+ bool hasW = fSubRun->hasWCoord();
+ auto vertexStride = GetVertexStride(fSubRun->maskFormat(), hasW);
+ char* currVertex = fBlob->fVertices + fSubRun->vertexStartIndex() +
+ fCurrGlyph * kVerticesPerGlyph * vertexStride;
+ result->fFirstVertex = currVertex;
+
+ for (int glyphIdx = fCurrGlyph; glyphIdx < (int)fSubRun->glyphCount(); glyphIdx++) {
+ GrGlyph* glyph = nullptr;
+ if (regenTexCoords) {
+ size_t glyphOffset = glyphIdx + fSubRun->glyphStartIndex();
+
+ if (regenGlyphs) {
+ // Get the id from the old glyph, and use the new strike to lookup
+ // the glyph.
+ SkPackedGlyphID id = fBlob->fGlyphs[glyphOffset]->fPackedID;
+ fBlob->fGlyphs[glyphOffset] = strike->getGlyph(id, fLazyStrike->get());
+ SkASSERT(id == fBlob->fGlyphs[glyphOffset]->fPackedID);
+ }
+ glyph = fBlob->fGlyphs[glyphOffset];
+ SkASSERT(glyph && glyph->fMaskFormat == fSubRun->maskFormat());
+
+ if (!fFullAtlasManager->hasGlyph(glyph)) {
+ GrDrawOpAtlas::ErrorCode code;
+ code = strike->addGlyphToAtlas(fResourceProvider, fUploadTarget, fGlyphCache,
+ fFullAtlasManager, glyph,
+ fLazyStrike->get(), fSubRun->maskFormat(),
+ fSubRun->needsTransform());
+ if (GrDrawOpAtlas::ErrorCode::kError == code) {
+ // Something horrible has happened - drop the op
+ return false;
+ }
+ else if (GrDrawOpAtlas::ErrorCode::kTryAgain == code) {
+ fBrokenRun = glyphIdx > 0;
+ result->fFinished = false;
+ return true;
+ }
+ }
+ auto tokenTracker = fUploadTarget->tokenTracker();
+ fFullAtlasManager->addGlyphToBulkAndSetUseToken(fSubRun->bulkUseToken(), glyph,
+ tokenTracker->nextDrawToken());
+ }
+
+ if (regenPos) {
+ regen_positions(currVertex, vertexStride, fTransX, fTransY);
+ }
+ if (regenCol) {
+ regen_colors(currVertex, vertexStride, fColor);
+ }
+ if (regenTexCoords) {
+ regen_texcoords(currVertex, vertexStride, glyph, fSubRun->drawAsDistanceFields());
+ }
+
+ currVertex += vertexStride * GrAtlasTextOp::kVerticesPerGlyph;
+ ++result->fGlyphsRegenerated;
+ ++fCurrGlyph;
+ }
+
+ // We may have changed the color so update it here
+ fSubRun->setColor(fColor);
+ if (regenTexCoords) {
+ if (regenGlyphs) {
+ fSubRun->setStrike(std::move(strike));
+ }
+ fSubRun->setAtlasGeneration(fBrokenRun
+ ? GrDrawOpAtlas::kInvalidAtlasGeneration
+ : fFullAtlasManager->atlasGeneration(fSubRun->maskFormat()));
+ } else {
+ // For the non-texCoords case we need to ensure that we update the associated use tokens
+ fFullAtlasManager->setUseTokenBulk(*fSubRun->bulkUseToken(),
+ fUploadTarget->tokenTracker()->nextDrawToken(),
+ fSubRun->maskFormat());
+ }
+ return true;
+}
+
+bool GrTextBlob::VertexRegenerator::regenerate(GrTextBlob::VertexRegenerator::Result* result) {
+ uint64_t currentAtlasGen = fFullAtlasManager->atlasGeneration(fSubRun->maskFormat());
+ // If regenerate() is called multiple times then the atlas gen may have changed. So we check
+ // this each time.
+ if (fSubRun->atlasGeneration() != currentAtlasGen) {
+ fRegenFlags |= kRegenTex;
+ }
+
+ if (fRegenFlags) {
+ return this->doRegen(result,
+ fRegenFlags & kRegenPos,
+ fRegenFlags & kRegenCol,
+ fRegenFlags & kRegenTex,
+ fRegenFlags & kRegenGlyph);
+ } else {
+ bool hasW = fSubRun->hasWCoord();
+ auto vertexStride = GetVertexStride(fSubRun->maskFormat(), hasW);
+ result->fFinished = true;
+ result->fGlyphsRegenerated = fSubRun->glyphCount() - fCurrGlyph;
+ result->fFirstVertex = fBlob->fVertices + fSubRun->vertexStartIndex() +
+ fCurrGlyph * kVerticesPerGlyph * vertexStride;
+ fCurrGlyph = fSubRun->glyphCount();
+
+ // set use tokens for all of the glyphs in our subrun. This is only valid if we
+ // have a valid atlas generation
+ fFullAtlasManager->setUseTokenBulk(*fSubRun->bulkUseToken(),
+ fUploadTarget->tokenTracker()->nextDrawToken(),
+ fSubRun->maskFormat());
+ return true;
+ }
+ SK_ABORT("Should not get here");
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrTextContext.cpp b/gfx/skia/skia/src/gpu/text/GrTextContext.cpp
new file mode 100644
index 0000000000..f1ecef43b6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextContext.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/text/GrTextContext.h"
+
+#include "include/core/SkGraphics.h"
+#include "include/gpu/GrContext.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkDrawProcs.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/ops/GrMeshDrawOp.h"
+#include "src/gpu/text/GrSDFMaskFilter.h"
+#include "src/gpu/text/GrTextBlobCache.h"
+
+// DF sizes and thresholds for usage of the small and medium sizes. For example, above
+// kSmallDFFontLimit we will use the medium size. The large size is used up until the size at
+// which we switch over to drawing as paths as controlled by Options.
+static const int kSmallDFFontSize = 32;
+static const int kSmallDFFontLimit = 32;
+static const int kMediumDFFontSize = 72;
+static const int kMediumDFFontLimit = 72;
+static const int kLargeDFFontSize = 162;
+
+static const int kDefaultMinDistanceFieldFontSize = 18;
+#ifdef SK_BUILD_FOR_ANDROID
+static const int kDefaultMaxDistanceFieldFontSize = 384;
+#else
+static const int kDefaultMaxDistanceFieldFontSize = 2 * kLargeDFFontSize;
+#endif
+
+GrTextContext::GrTextContext(const Options& options)
+ : fDistanceAdjustTable(new GrDistanceFieldAdjustTable), fOptions(options) {
+ SanitizeOptions(&fOptions);
+}
+
+std::unique_ptr<GrTextContext> GrTextContext::Make(const Options& options) {
+ return std::unique_ptr<GrTextContext>(new GrTextContext(options));
+}
+
+SkColor GrTextContext::ComputeCanonicalColor(const SkPaint& paint, bool lcd) {
+ SkColor canonicalColor = SkPaintPriv::ComputeLuminanceColor(paint);
+ if (lcd) {
+ // This is the correct computation, but there are tons of cases where LCD can be overridden.
+ // For now we just regenerate if any run in a textblob has LCD.
+ // TODO figure out where all of these overrides are and see if we can incorporate that logic
+ // at a higher level *OR* use sRGB
+ SkASSERT(false);
+ //canonicalColor = SkMaskGamma::CanonicalColor(canonicalColor);
+ } else {
+ // A8, though can have mixed BMP text but it shouldn't matter because BMP text won't have
+ // gamma corrected masks anyways, nor color
+ U8CPU lum = SkComputeLuminance(SkColorGetR(canonicalColor),
+ SkColorGetG(canonicalColor),
+ SkColorGetB(canonicalColor));
+ // reduce to our finite number of bits
+ canonicalColor = SkMaskGamma::CanonicalColor(SkColorSetRGB(lum, lum, lum));
+ }
+ return canonicalColor;
+}
+
+SkScalerContextFlags GrTextContext::ComputeScalerContextFlags(const GrColorInfo& colorInfo) {
+ // If we're doing linear blending, then we can disable the gamma hacks.
+ // Otherwise, leave them on. In either case, we still want the contrast boost:
+ // TODO: Can we be even smarter about mask gamma based on the dest transfer function?
+ if (colorInfo.isLinearlyBlended()) {
+ return SkScalerContextFlags::kBoostContrast;
+ } else {
+ return SkScalerContextFlags::kFakeGammaAndBoostContrast;
+ }
+}
+
+void GrTextContext::SanitizeOptions(Options* options) {
+ if (options->fMaxDistanceFieldFontSize < 0.f) {
+ options->fMaxDistanceFieldFontSize = kDefaultMaxDistanceFieldFontSize;
+ }
+ if (options->fMinDistanceFieldFontSize < 0.f) {
+ options->fMinDistanceFieldFontSize = kDefaultMinDistanceFieldFontSize;
+ }
+}
+
+bool GrTextContext::CanDrawAsDistanceFields(const SkPaint& paint, const SkFont& font,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ bool contextSupportsDistanceFieldText,
+ const Options& options) {
+ // mask filters modify alpha, which doesn't translate well to distance
+ if (paint.getMaskFilter() || !contextSupportsDistanceFieldText) {
+ return false;
+ }
+
+ // TODO: add some stroking support
+ if (paint.getStyle() != SkPaint::kFill_Style) {
+ return false;
+ }
+
+ if (viewMatrix.hasPerspective()) {
+ if (!options.fDistanceFieldVerticesAlwaysHaveW) {
+ return false;
+ }
+ } else {
+ SkScalar maxScale = viewMatrix.getMaxScale();
+ SkScalar scaledTextSize = maxScale * font.getSize();
+ // Hinted text looks far better at small resolutions
+ // Scaling up beyond 2x yields undesireable artifacts
+ if (scaledTextSize < options.fMinDistanceFieldFontSize ||
+ scaledTextSize > options.fMaxDistanceFieldFontSize) {
+ return false;
+ }
+
+ bool useDFT = props.isUseDeviceIndependentFonts();
+#if SK_FORCE_DISTANCE_FIELD_TEXT
+ useDFT = true;
+#endif
+
+ if (!useDFT && scaledTextSize < kLargeDFFontSize) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+SkScalar scaled_text_size(const SkScalar textSize, const SkMatrix& viewMatrix) {
+ SkScalar scaledTextSize = textSize;
+
+ if (viewMatrix.hasPerspective()) {
+ // for perspective, we simply force to the medium size
+ // TODO: compute a size based on approximate screen area
+ scaledTextSize = kMediumDFFontLimit;
+ } else {
+ SkScalar maxScale = viewMatrix.getMaxScale();
+ // if we have non-unity scale, we need to choose our base text size
+ // based on the SkPaint's text size multiplied by the max scale factor
+ // TODO: do we need to do this if we're scaling down (i.e. maxScale < 1)?
+ if (maxScale > 0 && !SkScalarNearlyEqual(maxScale, SK_Scalar1)) {
+ scaledTextSize *= maxScale;
+ }
+ }
+
+ return scaledTextSize;
+}
+
+SkFont GrTextContext::InitDistanceFieldFont(const SkFont& font,
+ const SkMatrix& viewMatrix,
+ const Options& options,
+ SkScalar* textRatio) {
+ SkScalar textSize = font.getSize();
+ SkScalar scaledTextSize = scaled_text_size(textSize, viewMatrix);
+
+ SkFont dfFont{font};
+
+ if (scaledTextSize <= kSmallDFFontLimit) {
+ *textRatio = textSize / kSmallDFFontSize;
+ dfFont.setSize(SkIntToScalar(kSmallDFFontSize));
+ } else if (scaledTextSize <= kMediumDFFontLimit) {
+ *textRatio = textSize / kMediumDFFontSize;
+ dfFont.setSize(SkIntToScalar(kMediumDFFontSize));
+ } else {
+ *textRatio = textSize / kLargeDFFontSize;
+ dfFont.setSize(SkIntToScalar(kLargeDFFontSize));
+ }
+
+ dfFont.setEdging(SkFont::Edging::kAntiAlias);
+ dfFont.setForceAutoHinting(false);
+ dfFont.setHinting(SkFontHinting::kNormal);
+
+ // The sub-pixel position will always happen when transforming to the screen.
+ dfFont.setSubpixel(false);
+ return dfFont;
+}
+
+std::pair<SkScalar, SkScalar> GrTextContext::InitDistanceFieldMinMaxScale(
+ SkScalar textSize,
+ const SkMatrix& viewMatrix,
+ const GrTextContext::Options& options) {
+
+ SkScalar scaledTextSize = scaled_text_size(textSize, viewMatrix);
+
+ // We have three sizes of distance field text, and within each size 'bucket' there is a floor
+ // and ceiling. A scale outside of this range would require regenerating the distance fields
+ SkScalar dfMaskScaleFloor;
+ SkScalar dfMaskScaleCeil;
+ if (scaledTextSize <= kSmallDFFontLimit) {
+ dfMaskScaleFloor = options.fMinDistanceFieldFontSize;
+ dfMaskScaleCeil = kSmallDFFontLimit;
+ } else if (scaledTextSize <= kMediumDFFontLimit) {
+ dfMaskScaleFloor = kSmallDFFontLimit;
+ dfMaskScaleCeil = kMediumDFFontLimit;
+ } else {
+ dfMaskScaleFloor = kMediumDFFontLimit;
+ dfMaskScaleCeil = options.fMaxDistanceFieldFontSize;
+ }
+
+ // Because there can be multiple runs in the blob, we want the overall maxMinScale, and
+ // minMaxScale to make regeneration decisions. Specifically, we want the maximum minimum scale
+ // we can tolerate before we'd drop to a lower mip size, and the minimum maximum scale we can
+ // tolerate before we'd have to move to a large mip size. When we actually test these values
+ // we look at the delta in scale between the new viewmatrix and the old viewmatrix, and test
+ // against these values to decide if we can reuse or not(ie, will a given scale change our mip
+ // level)
+ SkASSERT(dfMaskScaleFloor <= scaledTextSize && scaledTextSize <= dfMaskScaleCeil);
+
+ return std::make_pair(dfMaskScaleFloor / scaledTextSize, dfMaskScaleCeil / scaledTextSize);
+}
+
+SkPaint GrTextContext::InitDistanceFieldPaint(const SkPaint& paint) {
+ SkPaint dfPaint{paint};
+ dfPaint.setMaskFilter(GrSDFMaskFilter::Make());
+ return dfPaint;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+
+#include "src/gpu/GrRenderTargetContext.h"
+
+GR_DRAW_OP_TEST_DEFINE(GrAtlasTextOp) {
+ static uint32_t gContextID = SK_InvalidGenID;
+ static std::unique_ptr<GrTextContext> gTextContext;
+ static SkSurfaceProps gSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType);
+
+ if (context->priv().contextID() != gContextID) {
+ gContextID = context->priv().contextID();
+ gTextContext = GrTextContext::Make(GrTextContext::Options());
+ }
+
+ // Setup dummy SkPaint / GrPaint / GrRenderTargetContext
+ auto rtc = context->priv().makeDeferredRenderTargetContext(SkBackingFit::kApprox, 1024, 1024,
+ GrColorType::kRGBA_8888, nullptr);
+
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+
+ SkPaint skPaint;
+ skPaint.setColor(random->nextU());
+
+ SkFont font;
+ if (random->nextBool()) {
+ font.setEdging(SkFont::Edging::kSubpixelAntiAlias);
+ } else {
+ font.setEdging(random->nextBool() ? SkFont::Edging::kAntiAlias : SkFont::Edging::kAlias);
+ }
+ font.setSubpixel(random->nextBool());
+
+ const char* text = "The quick brown fox jumps over the lazy dog.";
+
+ // create some random x/y offsets, including negative offsets
+ static const int kMaxTrans = 1024;
+ int xPos = (random->nextU() % 2) * 2 - 1;
+ int yPos = (random->nextU() % 2) * 2 - 1;
+ int xInt = (random->nextU() % kMaxTrans) * xPos;
+ int yInt = (random->nextU() % kMaxTrans) * yPos;
+
+ return gTextContext->createOp_TestingOnly(context, gTextContext.get(), rtc.get(),
+ skPaint, font, viewMatrix, text, xInt, yInt);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrTextContext.h b/gfx/skia/skia/src/gpu/text/GrTextContext.h
new file mode 100644
index 0000000000..1cb973445c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextContext.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextContext_DEFINED
+#define GrTextContext_DEFINED
+
+#include "src/core/SkGlyphRun.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/text/GrDistanceFieldAdjustTable.h"
+#include "src/gpu/text/GrTextTarget.h"
+
+#if GR_TEST_UTILS
+#include "src/gpu/GrDrawOpTest.h"
+#endif
+
+class GrDrawOp;
+class GrRecordingContext;
+class GrTextBlobCache;
+class SkGlyph;
+class GrTextBlob;
+
+/*
+ * Renders text using some kind of an atlas, ie BitmapText or DistanceField text
+ */
+class GrTextContext {
+public:
+ struct Options {
+ /**
+ * Below this size (in device space) distance field text will not be used. Negative means
+ * use a default value.
+ */
+ SkScalar fMinDistanceFieldFontSize = -1.f;
+ /**
+ * Above this size (in device space) distance field text will not be used and glyphs will
+ * be rendered from outline as individual paths. Negative means use a default value.
+ */
+ SkScalar fMaxDistanceFieldFontSize = -1.f;
+ /** Forces all distance field vertices to use 3 components, not just when in perspective. */
+ bool fDistanceFieldVerticesAlwaysHaveW = false;
+ };
+
+ static std::unique_ptr<GrTextContext> Make(const Options& options);
+
+ void drawGlyphRunList(GrRecordingContext*, GrTextTarget*, const GrClip&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&, const SkGlyphRunList&);
+
+ std::unique_ptr<GrDrawOp> createOp_TestingOnly(GrRecordingContext*,
+ GrTextContext*,
+ GrRenderTargetContext*,
+ const SkPaint&, const SkFont&,
+ const SkMatrix& viewMatrix,
+ const char* text,
+ int x,
+ int y);
+
+ static void SanitizeOptions(Options* options);
+ static bool CanDrawAsDistanceFields(const SkPaint&, const SkFont&, const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ bool contextSupportsDistanceFieldText,
+ const Options& options);
+
+ static SkFont InitDistanceFieldFont(const SkFont& font,
+ const SkMatrix& viewMatrix,
+ const Options& options,
+ SkScalar* textRatio);
+
+ static SkPaint InitDistanceFieldPaint(const SkPaint& paint);
+
+ static std::pair<SkScalar, SkScalar> InitDistanceFieldMinMaxScale(SkScalar textSize,
+ const SkMatrix& viewMatrix,
+ const Options& options);
+
+private:
+ GrTextContext(const Options& options);
+
+ // sets up the descriptor on the blob and returns a detached cache. Client must attach
+ static SkColor ComputeCanonicalColor(const SkPaint&, bool lcd);
+ // Determines if we need to use fake gamma (and contrast boost):
+ static SkScalerContextFlags ComputeScalerContextFlags(const GrColorInfo&);
+
+ const GrDistanceFieldAdjustTable* dfAdjustTable() const { return fDistanceAdjustTable.get(); }
+
+ sk_sp<const GrDistanceFieldAdjustTable> fDistanceAdjustTable;
+
+ Options fOptions;
+
+#if GR_TEST_UTILS
+ static const SkScalerContextFlags kTextBlobOpScalerContextFlags =
+ SkScalerContextFlags::kFakeGammaAndBoostContrast;
+ GR_DRAW_OP_TEST_FRIEND(GrAtlasTextOp);
+#endif
+};
+
+#endif // GrTextContext_DEFINED
diff --git a/gfx/skia/skia/src/gpu/text/GrTextTarget.h b/gfx/skia/skia/src/gpu/text/GrTextTarget.h
new file mode 100644
index 0000000000..8498d717c2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextTarget.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextTarget_DEFINED
+#define GrTextTarget_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "src/gpu/GrColorInfo.h"
+
+class GrAtlasTextOp;
+class GrClip;
+class GrPaint;
+class GrRecordingContext;
+class GrShape;
+class SkGlyphRunListPainter;
+class SkMatrix;
+struct SkIRect;
+
+class GrTextTarget {
+public:
+ virtual ~GrTextTarget() = default;
+
+ int width() const { return fWidth; }
+
+ int height() const { return fHeight; }
+
+ const GrColorInfo& colorInfo() const { return fColorInfo; }
+
+ virtual void addDrawOp(const GrClip&, std::unique_ptr<GrAtlasTextOp> op) = 0;
+
+ virtual void drawShape(const GrClip&, const SkPaint&,
+ const SkMatrix& viewMatrix, const GrShape&) = 0;
+
+ virtual void makeGrPaint(GrMaskFormat, const SkPaint&, const SkMatrix& viewMatrix,
+ GrPaint*) = 0;
+
+ virtual GrRecordingContext* getContext() = 0;
+
+ virtual SkGlyphRunListPainter* glyphPainter() = 0;
+
+protected:
+ GrTextTarget(int width, int height, const GrColorInfo& colorInfo)
+ : fWidth(width), fHeight(height), fColorInfo(colorInfo) {
+ SkASSERT(kPremul_SkAlphaType == colorInfo.alphaType());
+ }
+
+private:
+ int fWidth;
+ int fHeight;
+ const GrColorInfo& fColorInfo;
+};
+#endif // GrTextTarget_DEFINED
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.cpp b/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
new file mode 100644
index 0000000000..65577bcba8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
+
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice,
+ VkDevice device,
+ sk_sp<const GrVkInterface> interface)
+ : fAllocator(VK_NULL_HANDLE)
+ , fInterface(std::move(interface))
+ , fDevice(device) {
+#define GR_COPY_FUNCTION(NAME) functions.vk##NAME = fInterface->fFunctions.f##NAME
+
+ VmaVulkanFunctions functions;
+ GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
+ GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
+ GR_COPY_FUNCTION(AllocateMemory);
+ GR_COPY_FUNCTION(FreeMemory);
+ GR_COPY_FUNCTION(MapMemory);
+ GR_COPY_FUNCTION(UnmapMemory);
+ GR_COPY_FUNCTION(BindBufferMemory);
+ GR_COPY_FUNCTION(BindImageMemory);
+ GR_COPY_FUNCTION(GetBufferMemoryRequirements);
+ GR_COPY_FUNCTION(GetImageMemoryRequirements);
+ GR_COPY_FUNCTION(CreateBuffer);
+ GR_COPY_FUNCTION(DestroyBuffer);
+ GR_COPY_FUNCTION(CreateImage);
+ GR_COPY_FUNCTION(DestroyImage);
+
+ // Skia current doesn't support VK_KHR_dedicated_allocation
+ functions.vkGetBufferMemoryRequirements2KHR = nullptr;
+ functions.vkGetImageMemoryRequirements2KHR = nullptr;
+
+ VmaAllocatorCreateInfo info;
+ info.flags = 0;
+ info.physicalDevice = physicalDevice;
+ info.device = device;
+ // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
+ // It seems to be a good compromise of not wasting unused allocated space and not making too
+ // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
+ // builds up block size as needed before capping at the max set here.
+ info.preferredLargeHeapBlockSize = 4*1024*1024;
+ info.pAllocationCallbacks = nullptr;
+ info.pDeviceMemoryCallbacks = nullptr;
+ info.frameInUseCount = 0;
+ info.pHeapSizeLimit = nullptr;
+ info.pVulkanFunctions = &functions;
+
+ vmaCreateAllocator(&info, &fAllocator);
+}
+
+GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
+ vmaDestroyAllocator(fAllocator);
+ fAllocator = VK_NULL_HANDLE;
+}
+
+bool GrVkAMDMemoryAllocator::allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
+ GrVkBackendMemory* backendMemory) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ VmaAllocationCreateInfo info;
+ info.flags = 0;
+ info.usage = VMA_MEMORY_USAGE_UNKNOWN;
+ info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ info.preferredFlags = 0;
+ info.memoryTypeBits = 0;
+ info.pool = VK_NULL_HANDLE;
+ info.pUserData = nullptr;
+
+ if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
+ info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+ }
+
+ if (AllocationPropertyFlags::kLazyAllocation & flags) {
+ info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ }
+
+ if (AllocationPropertyFlags::kProtected & flags) {
+ info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
+ }
+
+ VmaAllocation allocation;
+ VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
+ if (VK_SUCCESS != result) {
+ return false;
+ }
+ *backendMemory = (GrVkBackendMemory)allocation;
+ return true;
+}
+
+bool GrVkAMDMemoryAllocator::allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backendMemory) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ VmaAllocationCreateInfo info;
+ info.flags = 0;
+ info.usage = VMA_MEMORY_USAGE_UNKNOWN;
+ info.memoryTypeBits = 0;
+ info.pool = VK_NULL_HANDLE;
+ info.pUserData = nullptr;
+
+ switch (usage) {
+ case BufferUsage::kGpuOnly:
+ info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ info.preferredFlags = 0;
+ break;
+ case BufferUsage::kCpuOnly:
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ case BufferUsage::kCpuWritesGpuReads:
+ // First attempt to try memory is also cached
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ break;
+ case BufferUsage::kGpuWritesCpuReads:
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ }
+
+ if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
+ info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+ }
+
+ if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
+ info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ }
+
+ if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
+ SkASSERT(BufferUsage::kGpuOnly != usage);
+ info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
+ }
+
+ VmaAllocation allocation;
+ VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
+ if (VK_SUCCESS != result) {
+ if (usage == BufferUsage::kCpuWritesGpuReads) {
+ // We try again but this time drop the requirement for cached
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
+ }
+ }
+ if (VK_SUCCESS != result) {
+ return false;
+ }
+
+ *backendMemory = (GrVkBackendMemory)allocation;
+ return true;
+}
+
+void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ vmaFreeMemory(fAllocator, allocation);
+}
+
+void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
+ GrVkAlloc* alloc) const {
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ VmaAllocationInfo vmaInfo;
+ vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
+
+ VkMemoryPropertyFlags memFlags;
+ vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
+
+ uint32_t flags = 0;
+ if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
+ flags |= GrVkAlloc::kMappable_Flag;
+ }
+ if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
+ flags |= GrVkAlloc::kNoncoherent_Flag;
+ }
+
+ alloc->fMemory = vmaInfo.deviceMemory;
+ alloc->fOffset = vmaInfo.offset;
+ alloc->fSize = vmaInfo.size;
+ alloc->fFlags = flags;
+ alloc->fBackendMemory = memoryHandle;
+
+ // TODO: Remove this hack once the AMD allocator is able to handle the alignment of noncoherent
+ // memory itself.
+ if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
+ // This is a hack to say that the allocation size is actually larger than it is. This is to
+ // make sure when we are flushing and invalidating noncoherent memory we have a size that is
+ // aligned to the nonCoherentAtomSize. This is safe for three reasons. First the total size
+ // of the VkDeviceMemory we allocate will always be a multple of the max possible alignment
+ // (currently 256). Second all sub allocations are alignmed with an offset of 256. And
+ // finally the allocator we are using always maps the entire VkDeviceMemory so the range
+ // we'll be flushing/invalidating will be mapped. So our new fake allocation size will
+ // always fit into the VkDeviceMemory, will never push it into another suballocation, and
+ // will always be mapped when map is called.
+ const VkPhysicalDeviceProperties* devProps;
+ vmaGetPhysicalDeviceProperties(fAllocator, &devProps);
+ VkDeviceSize alignment = devProps->limits.nonCoherentAtomSize;
+
+ alloc->fSize = (alloc->fSize + alignment - 1) & ~(alignment -1);
+ }
+}
+
+void* GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ void* mapPtr;
+ vmaMapMemory(fAllocator, allocation, &mapPtr);
+ return mapPtr;
+}
+
+void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ vmaUnmapMemory(fAllocator, allocation);
+}
+
+void GrVkAMDMemoryAllocator::flushMappedMemory(const GrVkBackendMemory& memoryHandle,
+ VkDeviceSize offset, VkDeviceSize size) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ GrVkAlloc info;
+ this->getAllocInfo(memoryHandle, &info);
+
+ if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
+ // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
+ const VkPhysicalDeviceProperties* physDevProps;
+ vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
+ VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
+
+ VkMappedMemoryRange mappedMemoryRange;
+ GrVkMemory::GetNonCoherentMappedMemoryRange(info, offset, size, alignment,
+ &mappedMemoryRange);
+ GR_VK_CALL(fInterface, FlushMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
+ }
+}
+
+void GrVkAMDMemoryAllocator::invalidateMappedMemory(const GrVkBackendMemory& memoryHandle,
+ VkDeviceSize offset, VkDeviceSize size) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ GrVkAlloc info;
+ this->getAllocInfo(memoryHandle, &info);
+
+ if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
+ // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
+ const VkPhysicalDeviceProperties* physDevProps;
+ vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
+ VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
+
+ VkMappedMemoryRange mappedMemoryRange;
+ GrVkMemory::GetNonCoherentMappedMemoryRange(info, offset, size, alignment,
+ &mappedMemoryRange);
+ GR_VK_CALL(fInterface, InvalidateMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
+ }
+}
+
+uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
+ VmaStats stats;
+ vmaCalculateStats(fAllocator, &stats);
+ return stats.total.usedBytes;
+}
+
+uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
+ VmaStats stats;
+ vmaCalculateStats(fAllocator, &stats);
+ return stats.total.usedBytes + stats.total.unusedBytes;
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.h b/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.h
new file mode 100644
index 0000000000..5d93dea63b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkAMDMemoryAllocator.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkAMDMemoryAllocator_DEFINED
+#define GrVkAMDMemoryAllocator_DEFINED
+
+
+#include "include/gpu/vk/GrVkMemoryAllocator.h"
+
+#include "GrVulkanMemoryAllocator.h"
+
+struct GrVkInterface;
+
+class GrVkAMDMemoryAllocator : public GrVkMemoryAllocator {
+public:
+ GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice, VkDevice device,
+ sk_sp<const GrVkInterface> interface);
+
+ ~GrVkAMDMemoryAllocator() override;
+
+ bool allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags, GrVkBackendMemory*) override;
+
+ bool allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
+ AllocationPropertyFlags flags, GrVkBackendMemory*) override;
+
+ void freeMemory(const GrVkBackendMemory&) override;
+
+ void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const override;
+
+ void* mapMemory(const GrVkBackendMemory&) override;
+ void unmapMemory(const GrVkBackendMemory&) override;
+
+ void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size) override;
+ void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size) override;
+
+ uint64_t totalUsedMemory() const override;
+ uint64_t totalAllocatedMemory() const override;
+
+private:
+ VmaAllocator fAllocator;
+
+ // If a future version of the AMD allocator has helper functions for flushing and invalidating
+ // memory, then we won't need to save the GrVkInterface here since we won't need to make direct
+ // vulkan calls.
+ sk_sp<const GrVkInterface> fInterface;
+ VkDevice fDevice;
+
+ typedef GrVkMemoryAllocator INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp
new file mode 100644
index 0000000000..833dfdb5dc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkBuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkTransferBuffer.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
+ VkBuffer buffer;
+ GrVkAlloc alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = desc.fSizeInBytes;
+ switch (desc.fType) {
+ case kVertex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case kIndex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case kUniform_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case kCopyRead_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ break;
+ case kCopyWrite_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ break;
+ case kTexel_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
+ }
+ if (!desc.fDynamic) {
+ bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ }
+
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ desc.fType,
+ desc.fDynamic,
+ &alloc)) {
+ return nullptr;
+ }
+
+ const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
+ return nullptr;
+ }
+
+ return resource;
+}
+
+void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccesMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const {
+ VkBufferMemoryBarrier bufferMemoryBarrier = {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
+ nullptr, // pNext
+ srcAccessMask, // srcAccessMask
+ dstAccesMask, // dstAccessMask
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ this->buffer(), // buffer
+ 0, // offset
+ fDesc.fSizeInBytes, // size
+ };
+
+ // TODO: restrict to area of buffer we're interested in
+ gpu->addBufferMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
+ &bufferMemoryBarrier);
+}
+
+void GrVkBuffer::Resource::freeGPUData(GrVkGpu* gpu) const {
+ SkASSERT(fBuffer);
+ SkASSERT(fAlloc.fMemory);
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
+}
+
+void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
+ VALIDATE();
+ fResource->recycle(const_cast<GrVkGpu*>(gpu));
+ fResource = nullptr;
+ if (!fDesc.fDynamic) {
+ delete[] (unsigned char*)fMapPtr;
+ }
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void GrVkBuffer::vkAbandon() {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ if (!fDesc.fDynamic) {
+ delete[] (unsigned char*)fMapPtr;
+ }
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
+ switch (type) {
+ case GrVkBuffer::kIndex_Type:
+ return VK_ACCESS_INDEX_READ_BIT;
+ case GrVkBuffer::kVertex_Type:
+ return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ default:
+ // This helper is only called for static buffers so we should only ever see index or
+ // vertex buffers types
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
+ VALIDATE();
+ SkASSERT(!this->vkIsMapped());
+
+ if (!fResource->unique()) {
+ if (fDesc.fDynamic) {
+ // in use by the command buffer, so we need to create a new one
+ fResource->recycle(gpu);
+ fResource = this->createResource(gpu, fDesc);
+ if (createdNewBuffer) {
+ *createdNewBuffer = true;
+ }
+ } else {
+ SkASSERT(fMapPtr);
+ this->addMemoryBarrier(gpu,
+ buffer_type_to_access_flags(fDesc.fType),
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+ }
+ }
+
+ if (fDesc.fDynamic) {
+ const GrVkAlloc& alloc = this->alloc();
+ SkASSERT(alloc.fSize > 0);
+ SkASSERT(alloc.fSize >= size);
+ SkASSERT(0 == fOffset);
+
+ fMapPtr = GrVkMemory::MapAlloc(gpu, alloc);
+ } else {
+ if (!fMapPtr) {
+ fMapPtr = new unsigned char[this->size()];
+ }
+ }
+
+ VALIDATE();
+}
+
+void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
+ SkASSERT(src);
+ // We should never call this method in protected contexts.
+ SkASSERT(!gpu->protectedContext());
+ // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
+ // to 65536 bytes and a size the is 4 byte aligned.
+ if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
+ gpu->updateBuffer(this, src, this->offset(), size);
+ } else {
+ sk_sp<GrVkTransferBuffer> transferBuffer =
+ GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type);
+ if (!transferBuffer) {
+ return;
+ }
+
+ char* buffer = (char*) transferBuffer->map();
+ memcpy (buffer, src, size);
+ transferBuffer->unmap();
+
+ gpu->copyBuffer(transferBuffer.get(), this, 0, this->offset(), size);
+ }
+ this->addMemoryBarrier(gpu,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ buffer_type_to_access_flags(fDesc.fType),
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+}
+
+void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
+ VALIDATE();
+ SkASSERT(this->vkIsMapped());
+
+ if (fDesc.fDynamic) {
+ const GrVkAlloc& alloc = this->alloc();
+ SkASSERT(alloc.fSize > 0);
+ SkASSERT(alloc.fSize >= size);
+ // We currently don't use fOffset
+ SkASSERT(0 == fOffset);
+
+ GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
+ GrVkMemory::UnmapAlloc(gpu, alloc);
+ fMapPtr = nullptr;
+ } else {
+ SkASSERT(fMapPtr);
+ this->copyCpuDataToGpuBuffer(gpu, fMapPtr, size);
+ }
+}
+
+bool GrVkBuffer::vkIsMapped() const {
+ VALIDATE();
+ return SkToBool(fMapPtr);
+}
+
+bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
+ if (srcSizeInBytes > fDesc.fSizeInBytes) {
+ return false;
+ }
+
+ if (fDesc.fDynamic) {
+ this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
+ if (!fMapPtr) {
+ return false;
+ }
+
+ memcpy(fMapPtr, src, srcSizeInBytes);
+ this->internalUnmap(gpu, srcSizeInBytes);
+ } else {
+ this->copyCpuDataToGpuBuffer(gpu, src, srcSizeInBytes);
+ }
+
+
+ return true;
+}
+
+void GrVkBuffer::validate() const {
+ SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
+ || kTexel_Type == fDesc.fType || kCopyRead_Type == fDesc.fType
+ || kCopyWrite_Type == fDesc.fType || kUniform_Type == fDesc.fType);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h
new file mode 100644
index 0000000000..e82613d0d2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBuffer_DEFINED
+#define GrVkBuffer_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkGpu;
+
+/**
+ * This class serves as the base of GrVk*Buffer classes. It was written to avoid code
+ * duplication in those classes.
+ */
+class GrVkBuffer : public SkNoncopyable {
+public:
+ virtual ~GrVkBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fResource);
+ delete [] (unsigned char*)fMapPtr;
+ }
+
+ VkBuffer buffer() const { return fResource->fBuffer; }
+ const GrVkAlloc& alloc() const { return fResource->fAlloc; }
+ const GrVkRecycledResource* resource() const { return fResource; }
+ size_t size() const { return fDesc.fSizeInBytes; }
+ VkDeviceSize offset() const { return fOffset; }
+
+ void addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const;
+
+ enum Type {
+ kVertex_Type,
+ kIndex_Type,
+ kUniform_Type,
+ kTexel_Type,
+ kCopyRead_Type,
+ kCopyWrite_Type,
+ };
+
+protected:
+ struct Desc {
+ size_t fSizeInBytes;
+ Type fType; // vertex buffer, index buffer, etc.
+ bool fDynamic;
+ };
+
+ class Resource : public GrVkRecycledResource {
+ public:
+ Resource(VkBuffer buf, const GrVkAlloc& alloc, Type type)
+ : INHERITED(), fBuffer(buf), fAlloc(alloc), fType(type) {}
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkBuffer: %d (%d refs)\n", fBuffer, this->getRefCnt());
+ }
+#endif
+ VkBuffer fBuffer;
+ GrVkAlloc fAlloc;
+ Type fType;
+
+ private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ void onRecycle(GrVkGpu* gpu) const override { this->unref(gpu); }
+
+ typedef GrVkRecycledResource INHERITED;
+ };
+
+ // convenience routine for raw buffer creation
+ static const Resource* Create(const GrVkGpu* gpu,
+ const Desc& descriptor);
+
+ GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
+ : fDesc(desc), fResource(resource), fOffset(0), fMapPtr(nullptr) {
+ }
+
+ void* vkMap(GrVkGpu* gpu) {
+ this->internalMap(gpu, fDesc.fSizeInBytes);
+ return fMapPtr;
+ }
+ void vkUnmap(GrVkGpu* gpu) { this->internalUnmap(gpu, this->size()); }
+
+ // If the caller passes in a non null createdNewBuffer, this function will set the bool to true
+ // if it creates a new VkBuffer to upload the data to.
+ bool vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer = nullptr);
+
+ void vkAbandon();
+ void vkRelease(const GrVkGpu* gpu);
+
+private:
+ virtual const Resource* createResource(GrVkGpu* gpu,
+ const Desc& descriptor) {
+ return Create(gpu, descriptor);
+ }
+
+ void internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer = nullptr);
+ void internalUnmap(GrVkGpu* gpu, size_t size);
+ void copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* srcData, size_t size);
+
+ void validate() const;
+ bool vkIsMapped() const;
+
+ Desc fDesc;
+ const Resource* fResource;
+ VkDeviceSize fOffset;
+ void* fMapPtr;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBufferView.cpp b/gfx/skia/skia/src/gpu/vk/GrVkBufferView.cpp
new file mode 100644
index 0000000000..f4ed9ea821
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBufferView.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkBufferView.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+const GrVkBufferView* GrVkBufferView::Create(const GrVkGpu* gpu, VkBuffer buffer, VkFormat format,
+ VkDeviceSize offset, VkDeviceSize range) {
+ VkBufferView bufferView;
+
+ // Create the VkBufferView
+ VkBufferViewCreateInfo viewInfo;
+ memset(&viewInfo, 0, sizeof(VkBufferViewCreateInfo));
+ viewInfo.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+ viewInfo.pNext = nullptr;
+ viewInfo.flags = 0;
+ viewInfo.buffer = buffer;
+ viewInfo.format = format;
+ viewInfo.offset = offset;
+ viewInfo.range = range;
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateBufferView(gpu->device(), &viewInfo,
+ nullptr, &bufferView));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkBufferView(bufferView);
+}
+
+void GrVkBufferView::freeGPUData(GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyBufferView(gpu->device(), fBufferView, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBufferView.h b/gfx/skia/skia/src/gpu/vk/GrVkBufferView.h
new file mode 100644
index 0000000000..e6adf6b9a9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBufferView.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBufferView_DEFINED
+#define GrVkBufferView_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkBufferView : public GrVkResource {
+public:
+ static const GrVkBufferView* Create(const GrVkGpu* gpu, VkBuffer buffer, VkFormat format,
+ VkDeviceSize offset, VkDeviceSize range);
+
+ VkBufferView bufferView() const { return fBufferView; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkBufferView: %d (%d refs)\n", fBufferView, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkBufferView(VkBufferView bufferView) : INHERITED(), fBufferView(bufferView) {}
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkBufferView fBufferView;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp
new file mode 100644
index 0000000000..36e2436a85
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp
@@ -0,0 +1,1747 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/vk/GrVkBackendContext.h"
+#include "include/gpu/vk/GrVkExtensions.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrUtil.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/vk/GrVkCaps.h"
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/gpu/vk/GrVkTexture.h"
+#include "src/gpu/vk/GrVkUniformHandler.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#ifdef SK_BUILD_FOR_ANDROID
+#include <sys/system_properties.h>
+#endif
+
+GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
+ uint32_t instanceVersion, uint32_t physicalDeviceVersion,
+ const GrVkExtensions& extensions, GrProtected isProtected)
+ : INHERITED(contextOptions) {
+ /**************************************************************************
+ * GrCaps fields
+ **************************************************************************/
+ fMipMapSupport = true; // always available in Vulkan
+ fNPOTTextureTileSupport = true; // always available in Vulkan
+ fReuseScratchTextures = true; //TODO: figure this out
+ fGpuTracingSupport = false; //TODO: figure this out
+ fOversizedStencilSupport = false; //TODO: figure this out
+ fInstanceAttribSupport = true;
+
+ fSemaphoreSupport = true; // always available in Vulkan
+ fFenceSyncSupport = true; // always available in Vulkan
+ fCrossContextTextureSupport = true;
+ fHalfFloatVertexAttributeSupport = true;
+
+ // We always copy in/out of a transfer buffer so it's trivial to support row bytes.
+ fReadPixelsRowBytesSupport = true;
+ fWritePixelsRowBytesSupport = true;
+
+ fTransferBufferSupport = true;
+
+ fMaxRenderTargetSize = 4096; // minimum required by spec
+ fMaxTextureSize = 4096; // minimum required by spec
+
+ fDynamicStateArrayGeometryProcessorTextureSupport = true;
+
+ fShaderCaps.reset(new GrShaderCaps(contextOptions));
+
+ this->init(contextOptions, vkInterface, physDev, features, physicalDeviceVersion, extensions,
+ isProtected);
+}
+
+namespace {
+/**
+ * This comes from section 37.1.6 of the Vulkan spec. Format is
+ * (<bits>|<tag>)_<block_size>_<texels_per_block>.
+ */
+enum class FormatCompatibilityClass {
+ k8_1_1,
+ k16_2_1,
+ k24_3_1,
+ k32_4_1,
+ k64_8_1,
+ kETC2_RGB_8_16,
+};
+} // anonymous namespace
+
+static FormatCompatibilityClass format_compatibility_class(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ case VK_FORMAT_R16G16_UNORM:
+ case VK_FORMAT_R16G16_SFLOAT:
+ return FormatCompatibilityClass::k32_4_1;
+
+ case VK_FORMAT_R8_UNORM:
+ return FormatCompatibilityClass::k8_1_1;
+
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ case VK_FORMAT_R16_SFLOAT:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+ case VK_FORMAT_R16_UNORM:
+ return FormatCompatibilityClass::k16_2_1;
+
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ return FormatCompatibilityClass::k64_8_1;
+
+ case VK_FORMAT_R8G8B8_UNORM:
+ return FormatCompatibilityClass::k24_3_1;
+
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ return FormatCompatibilityClass::kETC2_RGB_8_16;
+
+ default:
+ SK_ABORT("Unsupported VkFormat");
+ }
+}
+
+bool GrVkCaps::canCopyImage(VkFormat dstFormat, int dstSampleCnt, bool dstHasYcbcr,
+ VkFormat srcFormat, int srcSampleCnt, bool srcHasYcbcr) const {
+ if ((dstSampleCnt > 1 || srcSampleCnt > 1) && dstSampleCnt != srcSampleCnt) {
+ return false;
+ }
+
+ if (dstHasYcbcr || srcHasYcbcr) {
+ return false;
+ }
+
+ // We require that all Vulkan GrSurfaces have been created with transfer_dst and transfer_src
+ // as image usage flags.
+ return format_compatibility_class(srcFormat) == format_compatibility_class(dstFormat);
+}
+
+bool GrVkCaps::canCopyAsBlit(VkFormat dstFormat, int dstSampleCnt, bool dstIsLinear,
+ bool dstHasYcbcr, VkFormat srcFormat, int srcSampleCnt,
+ bool srcIsLinear, bool srcHasYcbcr) const {
+ // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
+ // as image usage flags.
+ if (!this->formatCanBeDstofBlit(dstFormat, dstIsLinear) ||
+ !this->formatCanBeSrcofBlit(srcFormat, srcIsLinear)) {
+ return false;
+ }
+
+ // We cannot blit images that are multisampled. Will need to figure out if we can blit the
+ // resolved msaa though.
+ if (dstSampleCnt > 1 || srcSampleCnt > 1) {
+ return false;
+ }
+
+ if (dstHasYcbcr || srcHasYcbcr) {
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkCaps::canCopyAsResolve(VkFormat dstFormat, int dstSampleCnt, bool dstHasYcbcr,
+ VkFormat srcFormat, int srcSampleCnt, bool srcHasYcbcr) const {
+ // The src surface must be multisampled.
+ if (srcSampleCnt <= 1) {
+ return false;
+ }
+
+ // The dst must not be multisampled.
+ if (dstSampleCnt > 1) {
+ return false;
+ }
+
+ // Surfaces must have the same format.
+ if (srcFormat != dstFormat) {
+ return false;
+ }
+
+ if (dstHasYcbcr || srcHasYcbcr) {
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkCaps::onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const {
+ if (src->isProtected() && !dst->isProtected()) {
+ return false;
+ }
+
+ // TODO: Figure out a way to track if we've wrapped a linear texture in a proxy (e.g.
+ // PromiseImage which won't get instantiated right away. Does this need a similar thing like the
+ // tracking of external or rectangle textures in GL? For now we don't create linear textures
+ // internally, and I don't believe anyone is wrapping them.
+ bool srcIsLinear = false;
+ bool dstIsLinear = false;
+
+ int dstSampleCnt = 0;
+ int srcSampleCnt = 0;
+ if (const GrRenderTargetProxy* rtProxy = dst->asRenderTargetProxy()) {
+ // Copying to or from render targets that wrap a secondary command buffer is not allowed
+ // since they would require us to know the VkImage, which we don't have, as well as need us
+ // to stop and start the VkRenderPass which we don't have access to.
+ if (rtProxy->wrapsVkSecondaryCB()) {
+ return false;
+ }
+ dstSampleCnt = rtProxy->numSamples();
+ }
+ if (const GrRenderTargetProxy* rtProxy = src->asRenderTargetProxy()) {
+ // Copying to or from render targets that wrap a secondary command buffer is not allowed
+ // since they would require us to know the VkImage, which we don't have, as well as need us
+ // to stop and start the VkRenderPass which we don't have access to.
+ if (rtProxy->wrapsVkSecondaryCB()) {
+ return false;
+ }
+ srcSampleCnt = rtProxy->numSamples();
+ }
+ SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTargetProxy()));
+ SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTargetProxy()));
+
+ bool dstHasYcbcr = false;
+ if (auto ycbcr = dst->backendFormat().getVkYcbcrConversionInfo()) {
+ if (ycbcr->isValid()) {
+ dstHasYcbcr = true;
+ }
+ }
+
+ bool srcHasYcbcr = false;
+ if (auto ycbcr = src->backendFormat().getVkYcbcrConversionInfo()) {
+ if (ycbcr->isValid()) {
+ srcHasYcbcr = true;
+ }
+ }
+
+ VkFormat dstFormat, srcFormat;
+ SkAssertResult(dst->backendFormat().asVkFormat(&dstFormat));
+ SkAssertResult(src->backendFormat().asVkFormat(&srcFormat));
+
+ return this->canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcHasYcbcr) ||
+ this->canCopyAsBlit(dstFormat, dstSampleCnt, dstIsLinear, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcIsLinear, srcHasYcbcr) ||
+ this->canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcHasYcbcr);
+}
+
+template<typename T> T* get_extension_feature_struct(const VkPhysicalDeviceFeatures2& features,
+ VkStructureType type) {
+ // All Vulkan structs that could be part of the features chain will start with the
+ // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
+ // so we can get access to the pNext for the next struct.
+ struct CommonVulkanHeader {
+ VkStructureType sType;
+ void* pNext;
+ };
+
+ void* pNext = features.pNext;
+ while (pNext) {
+ CommonVulkanHeader* header = static_cast<CommonVulkanHeader*>(pNext);
+ if (header->sType == type) {
+ return static_cast<T*>(pNext);
+ }
+ pNext = header->pNext;
+ }
+ return nullptr;
+}
+
+void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev, const VkPhysicalDeviceFeatures2& features,
+ uint32_t physicalDeviceVersion, const GrVkExtensions& extensions,
+ GrProtected isProtected) {
+ VkPhysicalDeviceProperties properties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
+
+ VkPhysicalDeviceMemoryProperties memoryProperties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceMemoryProperties(physDev, &memoryProperties));
+
+ SkASSERT(physicalDeviceVersion <= properties.apiVersion);
+
+ if (extensions.hasExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME, 1)) {
+ fSupportsSwapchain = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
+ fSupportsPhysicalDeviceProperties2 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1)) {
+ fSupportsMemoryRequirements2 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, 1)) {
+ fSupportsBindMemory2 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_MAINTENANCE1_EXTENSION_NAME, 1)) {
+ fSupportsMaintenance1 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_MAINTENANCE2_EXTENSION_NAME, 1)) {
+ fSupportsMaintenance2 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions.hasExtension(VK_KHR_MAINTENANCE3_EXTENSION_NAME, 1)) {
+ fSupportsMaintenance3 = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ (extensions.hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
+ this->supportsMemoryRequirements2())) {
+ fSupportsDedicatedAllocation = true;
+ }
+
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ (extensions.hasExtension(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, 1) &&
+ this->supportsPhysicalDeviceProperties2() &&
+ extensions.hasExtension(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, 1) &&
+ this->supportsDedicatedAllocation())) {
+ fSupportsExternalMemory = true;
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // Currently Adreno devices are not supporting the QUEUE_FAMILY_FOREIGN_EXTENSION, so until they
+ // do we don't explicitly require it here even the spec says it is required.
+ if (extensions.hasExtension(
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, 2) &&
+ /* extensions.hasExtension(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME, 1) &&*/
+ this->supportsExternalMemory() &&
+ this->supportsBindMemory2()) {
+ fSupportsAndroidHWBExternalMemory = true;
+ fSupportsAHardwareBufferImages = true;
+ }
+#endif
+
+ auto ycbcrFeatures =
+ get_extension_feature_struct<VkPhysicalDeviceSamplerYcbcrConversionFeatures>(
+ features, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES);
+ if (ycbcrFeatures && ycbcrFeatures->samplerYcbcrConversion &&
+ (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ (extensions.hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1) &&
+ this->supportsMaintenance1() && this->supportsBindMemory2() &&
+ this->supportsMemoryRequirements2() && this->supportsPhysicalDeviceProperties2()))) {
+ fSupportsYcbcrConversion = true;
+ }
+
+ // We always push back the default GrVkYcbcrConversionInfo so that the case of no conversion
+ // will return a key of 0.
+ fYcbcrInfos.push_back(GrVkYcbcrConversionInfo());
+
+ if ((isProtected == GrProtected::kYes) &&
+ (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0))) {
+ fSupportsProtectedMemory = true;
+ fAvoidUpdateBuffers = true;
+ fShouldAlwaysUseDedicatedImageMemory = true;
+ }
+
+ this->initGrCaps(vkInterface, physDev, properties, memoryProperties, features, extensions);
+ this->initShaderCaps(properties, features);
+
+ if (kQualcomm_VkVendor == properties.vendorID) {
+ // A "clear" load for the CCPR atlas runs faster on QC than a "discard" load followed by a
+ // scissored clear.
+ // On NVIDIA and Intel, the discard load followed by clear is faster.
+ // TODO: Evaluate on ARM, Imagination, and ATI.
+ fPreferFullscreenClears = true;
+ }
+
+ if (kQualcomm_VkVendor == properties.vendorID || kARM_VkVendor == properties.vendorID) {
+ // On Qualcomm and ARM mapping a gpu buffer and doing both reads and writes to it is slow.
+ // Thus for index and vertex buffers we will force to use a cpu side buffer and then copy
+ // the whole buffer up to the gpu.
+ fBufferMapThreshold = SK_MaxS32;
+ }
+
+ if (kQualcomm_VkVendor == properties.vendorID) {
+ // On Qualcomm it looks like using vkCmdUpdateBuffer is slower than using a transfer buffer
+ // even for small sizes.
+ fAvoidUpdateBuffers = true;
+ }
+
+ if (kARM_VkVendor == properties.vendorID) {
+ // ARM seems to do better with more fine triangles as opposed to using the sample mask.
+ // (At least in our current round rect op.)
+ fPreferTrianglesOverSampleMask = true;
+ }
+
+ this->initFormatTable(vkInterface, physDev, properties);
+ this->initStencilFormat(vkInterface, physDev);
+
+ if (!contextOptions.fDisableDriverCorrectnessWorkarounds) {
+ this->applyDriverCorrectnessWorkarounds(properties);
+ }
+
+ this->applyOptionsOverrides(contextOptions);
+ fShaderCaps->applyOptionsOverrides(contextOptions);
+}
+
+void GrVkCaps::applyDriverCorrectnessWorkarounds(const VkPhysicalDeviceProperties& properties) {
+ if (kQualcomm_VkVendor == properties.vendorID) {
+ fMustDoCopiesFromOrigin = true;
+ // Transfer doesn't support this workaround.
+ fTransferBufferSupport = false;
+ }
+
+#if defined(SK_BUILD_FOR_WIN)
+ if (kNvidia_VkVendor == properties.vendorID || kIntel_VkVendor == properties.vendorID) {
+ fMustSleepOnTearDown = true;
+ }
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (kImagination_VkVendor == properties.vendorID) {
+ fMustSleepOnTearDown = true;
+ }
+#endif
+
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Protected memory features have problems in Android P and earlier.
+ if (fSupportsProtectedMemory && (kQualcomm_VkVendor == properties.vendorID)) {
+ char androidAPIVersion[PROP_VALUE_MAX];
+ int strLength = __system_property_get("ro.build.version.sdk", androidAPIVersion);
+ if (strLength == 0 || atoi(androidAPIVersion) <= 28) {
+ fSupportsProtectedMemory = false;
+ }
+ }
+#endif
+
+ // On Mali galaxy s7 we see lots of rendering issues when we suballocate VkImages.
+ if (kARM_VkVendor == properties.vendorID) {
+ fShouldAlwaysUseDedicatedImageMemory = true;
+ }
+
+ // On Mali galaxy s7 and s9 we see lots of rendering issues with image filters dropping out when
+ // using only primary command buffers.
+ if (kARM_VkVendor == properties.vendorID) {
+ fPreferPrimaryOverSecondaryCommandBuffers = false;
+ }
+
+ // On various devices, when calling vkCmdClearAttachments on a primary command buffer, it
+ // corrupts the bound buffers on the command buffer. As a workaround we invalidate our knowledge
+ // of bound buffers so that we will rebind them on the next draw.
+ if (kQualcomm_VkVendor == properties.vendorID || kAMD_VkVendor == properties.vendorID) {
+ fMustInvalidatePrimaryCmdBufferStateAfterClearAttachments = true;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // GrCaps workarounds
+ ////////////////////////////////////////////////////////////////////////////
+
+ if (kARM_VkVendor == properties.vendorID) {
+ fInstanceAttribSupport = false;
+ fAvoidWritePixelsFastPath = true; // bugs.skia.org/8064
+ }
+
+ // AMD advertises support for MAX_UINT vertex input attributes, but in reality only supports 32.
+ if (kAMD_VkVendor == properties.vendorID) {
+ fMaxVertexAttributes = SkTMin(fMaxVertexAttributes, 32);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // GrShaderCaps workarounds
+ ////////////////////////////////////////////////////////////////////////////
+
+ if (kImagination_VkVendor == properties.vendorID) {
+ fShaderCaps->fAtan2ImplementedAsAtanYOverX = true;
+ }
+}
+
+int get_max_sample_count(VkSampleCountFlags flags) {
+ SkASSERT(flags & VK_SAMPLE_COUNT_1_BIT);
+ if (!(flags & VK_SAMPLE_COUNT_2_BIT)) {
+ return 0;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_4_BIT)) {
+ return 2;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_8_BIT)) {
+ return 4;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_16_BIT)) {
+ return 8;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_32_BIT)) {
+ return 16;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_64_BIT)) {
+ return 32;
+ }
+ return 64;
+}
+
+void GrVkCaps::initGrCaps(const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev,
+ const VkPhysicalDeviceProperties& properties,
+ const VkPhysicalDeviceMemoryProperties& memoryProperties,
+ const VkPhysicalDeviceFeatures2& features,
+ const GrVkExtensions& extensions) {
+ // So GPUs, like AMD, are reporting MAX_INT support vertex attributes. In general, there is no
+ // need for us ever to support that amount, and it makes tests which tests all the vertex
+ // attribs timeout looping over that many. For now, we'll cap this at 64 max and can raise it if
+ // we ever find that need.
+ static const uint32_t kMaxVertexAttributes = 64;
+ fMaxVertexAttributes = SkTMin(properties.limits.maxVertexInputAttributes, kMaxVertexAttributes);
+
+ // We could actually query and get a max size for each config, however maxImageDimension2D will
+ // give the minimum max size across all configs. So for simplicity we will use that for now.
+ fMaxRenderTargetSize = SkTMin(properties.limits.maxImageDimension2D, (uint32_t)INT_MAX);
+ fMaxTextureSize = SkTMin(properties.limits.maxImageDimension2D, (uint32_t)INT_MAX);
+ if (fDriverBugWorkarounds.max_texture_size_limit_4096) {
+ fMaxTextureSize = SkTMin(fMaxTextureSize, 4096);
+ }
+ // Our render targets are always created with textures as the color
+ // attachment, hence this min:
+ fMaxRenderTargetSize = SkTMin(fMaxTextureSize, fMaxRenderTargetSize);
+
+ // TODO: check if RT's larger than 4k incur a performance cost on ARM.
+ fMaxPreferredRenderTargetSize = fMaxRenderTargetSize;
+
+ // Assuming since we will always map in the end to upload the data we might as well just map
+ // from the get go. There is no hard data to suggest this is faster or slower.
+ fBufferMapThreshold = 0;
+
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag | kAsyncRead_MapFlag;
+
+ fOversizedStencilSupport = true;
+
+ if (extensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2) &&
+ this->supportsPhysicalDeviceProperties2()) {
+
+ VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT blendProps;
+ blendProps.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT;
+ blendProps.pNext = nullptr;
+
+ VkPhysicalDeviceProperties2 props;
+ props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ props.pNext = &blendProps;
+
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties2(physDev, &props));
+
+ if (blendProps.advancedBlendAllOperations == VK_TRUE) {
+ fShaderCaps->fAdvBlendEqInteraction = GrShaderCaps::kAutomatic_AdvBlendEqInteraction;
+
+ auto blendFeatures =
+ get_extension_feature_struct<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT>(
+ features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT);
+ if (blendFeatures && blendFeatures->advancedBlendCoherentOperations == VK_TRUE) {
+ fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
+ } else {
+ // TODO: Currently non coherent blends are not supported in our vulkan backend. They
+ // require us to support self dependencies in our render passes.
+ // fBlendEquationSupport = kAdvanced_BlendEquationSupport;
+ }
+ }
+ }
+}
+
+void GrVkCaps::initShaderCaps(const VkPhysicalDeviceProperties& properties,
+ const VkPhysicalDeviceFeatures2& features) {
+ GrShaderCaps* shaderCaps = fShaderCaps.get();
+ shaderCaps->fVersionDeclString = "#version 330\n";
+
+ // Vulkan is based off ES 3.0 so the following should all be supported
+ shaderCaps->fUsesPrecisionModifiers = true;
+ shaderCaps->fFlatInterpolationSupport = true;
+ // Flat interpolation appears to be slow on Qualcomm GPUs. This was tested in GL and is assumed
+ // to be true with Vulkan as well.
+ shaderCaps->fPreferFlatInterpolation = kQualcomm_VkVendor != properties.vendorID;
+
+ // GrShaderCaps
+
+ shaderCaps->fShaderDerivativeSupport = true;
+
+ // FIXME: http://skbug.com/7733: Disable geometry shaders until Intel/Radeon GMs draw correctly.
+ // shaderCaps->fGeometryShaderSupport =
+ // shaderCaps->fGSInvocationsSupport = features.features.geometryShader;
+
+ shaderCaps->fDualSourceBlendingSupport = features.features.dualSrcBlend;
+
+ shaderCaps->fIntegerSupport = true;
+ shaderCaps->fVertexIDSupport = true;
+ shaderCaps->fFPManipulationSupport = true;
+
+ // Assume the minimum precisions mandated by the SPIR-V spec.
+ shaderCaps->fFloatIs32Bits = true;
+ shaderCaps->fHalfIs32Bits = false;
+
+ shaderCaps->fMaxFragmentSamplers = SkTMin(
+ SkTMin(properties.limits.maxPerStageDescriptorSampledImages,
+ properties.limits.maxPerStageDescriptorSamplers),
+ (uint32_t)INT_MAX);
+}
+
+bool stencil_format_supported(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ VkFormat format) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ return SkToBool(VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT & props.optimalTilingFeatures);
+}
+
+void GrVkCaps::initStencilFormat(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // List of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least. We are guaranteed to have either
+ // VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT. VK_FORMAT_D32_SFLOAT_S8_UINT
+ // can optionally have 24 unused bits at the end so we assume the total bits is 64.
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = { VK_FORMAT_S8_UINT, 8, 8, false },
+ gD24S8 = { VK_FORMAT_D24_UNORM_S8_UINT, 8, 32, true },
+ gD32S8 = { VK_FORMAT_D32_SFLOAT_S8_UINT, 8, 64, true };
+
+ if (stencil_format_supported(interface, physDev, VK_FORMAT_S8_UINT)) {
+ fPreferredStencilFormat = gS8;
+ } else if (stencil_format_supported(interface, physDev, VK_FORMAT_D24_UNORM_S8_UINT)) {
+ fPreferredStencilFormat = gD24S8;
+ } else {
+ SkASSERT(stencil_format_supported(interface, physDev, VK_FORMAT_D32_SFLOAT_S8_UINT));
+ fPreferredStencilFormat = gD32S8;
+ }
+}
+
+static bool format_is_srgb(VkFormat format) {
+ SkASSERT(GrVkFormatIsSupported(format));
+
+ switch (format) {
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// These are all the valid VkFormats that we support in Skia. They are roughly ordered from most
+// frequently used to least to improve look up times in arrays.
+static constexpr VkFormat kVkFormats[] = {
+ VK_FORMAT_R8G8B8A8_UNORM,
+ VK_FORMAT_R8_UNORM,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ VK_FORMAT_R16_SFLOAT,
+ VK_FORMAT_R8G8B8_UNORM,
+ VK_FORMAT_R8G8_UNORM,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ VK_FORMAT_R16_UNORM,
+ VK_FORMAT_R16G16_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ VK_FORMAT_R16G16_SFLOAT,
+};
+
+void GrVkCaps::setColorType(GrColorType colorType, std::initializer_list<VkFormat> formats) {
+#ifdef SK_DEBUG
+ for (size_t i = 0; i < kNumVkFormats; ++i) {
+ const auto& formatInfo = fFormatTable[i];
+ for (int j = 0; j < formatInfo.fColorTypeInfoCount; ++j) {
+ const auto& ctInfo = formatInfo.fColorTypeInfos[j];
+ if (ctInfo.fColorType == colorType &&
+ !SkToBool(ctInfo.fFlags & ColorTypeInfo::kWrappedOnly_Flag)) {
+ bool found = false;
+ for (auto it = formats.begin(); it != formats.end(); ++it) {
+ if (kVkFormats[i] == *it) {
+ found = true;
+ }
+ }
+ SkASSERT(found);
+ }
+ }
+ }
+#endif
+ int idx = static_cast<int>(colorType);
+ for (auto it = formats.begin(); it != formats.end(); ++it) {
+ const auto& info = this->getFormatInfo(*it);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ if (info.fColorTypeInfos[i].fColorType == colorType) {
+ fColorTypeToFormatTable[idx] = *it;
+ return;
+ }
+ }
+ }
+}
+
+const GrVkCaps::FormatInfo& GrVkCaps::getFormatInfo(VkFormat format) const {
+ GrVkCaps* nonConstThis = const_cast<GrVkCaps*>(this);
+ return nonConstThis->getFormatInfo(format);
+}
+
+GrVkCaps::FormatInfo& GrVkCaps::getFormatInfo(VkFormat format) {
+ static_assert(SK_ARRAY_COUNT(kVkFormats) == GrVkCaps::kNumVkFormats,
+ "Size of VkFormats array must match static value in header");
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kVkFormats); ++i) {
+ if (kVkFormats[i] == format) {
+ return fFormatTable[i];
+ }
+ }
+ static FormatInfo kInvalidFormat;
+ return kInvalidFormat;
+}
+
+void GrVkCaps::initFormatTable(const GrVkInterface* interface, VkPhysicalDevice physDev,
+ const VkPhysicalDeviceProperties& properties) {
+ static_assert(SK_ARRAY_COUNT(kVkFormats) == GrVkCaps::kNumVkFormats,
+ "Size of VkFormats array must match static value in header");
+
+ std::fill_n(fColorTypeToFormatTable, kGrColorTypeCnt, VK_FORMAT_UNDEFINED);
+
+ // Go through all the formats and init their support surface and data GrColorTypes.
+ // Format: VK_FORMAT_R8G8B8A8_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R8G8B8A8_UNORM, Surface: kRGBA_8888
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_8888;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ // Format: VK_FORMAT_R8G8B8A8_UNORM, Surface: kRGB_888x
+ {
+ constexpr GrColorType ct = GrColorType::kRGB_888x;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RGB1();
+ }
+ }
+ }
+
+ // Format: VK_FORMAT_R8_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R8_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 1;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R8_UNORM, Surface: kAlpha_8
+ {
+ constexpr GrColorType ct = GrColorType::kAlpha_8;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ // Format: VK_FORMAT_R8_UNORM, Surface: kGray_8
+ {
+ constexpr GrColorType ct = GrColorType::kGray_8;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle("rrr1");
+ }
+ }
+ }
+ // Format: VK_FORMAT_B8G8R8A8_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_B8G8R8A8_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_B8G8R8A8_UNORM, Surface: kBGRA_8888
+ {
+ constexpr GrColorType ct = GrColorType::kBGRA_8888;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R5G6B5_UNORM_PACK16
+ {
+ constexpr VkFormat format = VK_FORMAT_R5G6B5_UNORM_PACK16;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R5G6B5_UNORM_PACK16, Surface: kBGR_565
+ {
+ constexpr GrColorType ct = GrColorType::kBGR_565;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16G16B16A16_SFLOAT
+ {
+ constexpr VkFormat format = VK_FORMAT_R16G16B16A16_SFLOAT;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 8;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 2;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16G16B16A16_SFLOAT, Surface: GrColorType::kRGBA_F16
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_F16;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ // Format: VK_FORMAT_R16G16B16A16_SFLOAT, Surface: GrColorType::kRGBA_F16_Clamped
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_F16_Clamped;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16_SFLOAT
+ {
+ constexpr VkFormat format = VK_FORMAT_R16_SFLOAT;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16_SFLOAT, Surface: kAlpha_F16
+ {
+ constexpr GrColorType ct = GrColorType::kAlpha_F16;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ }
+ }
+ // Format: VK_FORMAT_R8G8B8_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R8G8B8_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 3;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R8G8B8_UNORM, Surface: kRGB_888x
+ {
+ constexpr GrColorType ct = GrColorType::kRGB_888x;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R8G8_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R8G8_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R8G8_UNORM, Surface: kRG_88
+ {
+ constexpr GrColorType ct = GrColorType::kRG_88;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_A2B10G10R10_UNORM_PACK32
+ {
+ constexpr VkFormat format = VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_A2B10G10R10_UNORM_PACK32, Surface: kRGBA_1010102
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_1010102;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_B4G4R4A4_UNORM_PACK16
+ {
+ constexpr VkFormat format = VK_FORMAT_B4G4R4A4_UNORM_PACK16;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_B4G4R4A4_UNORM_PACK16, Surface: kABGR_4444
+ {
+ constexpr GrColorType ct = GrColorType::kABGR_4444;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::BGRA();
+ ctInfo.fOutputSwizzle = GrSwizzle::BGRA();
+ }
+ }
+ }
+ // Format: VK_FORMAT_R4G4B4A4_UNORM_PACK16
+ {
+ constexpr VkFormat format = VK_FORMAT_R4G4B4A4_UNORM_PACK16;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R4G4B4A4_UNORM_PACK16, Surface: kABGR_4444
+ {
+ constexpr GrColorType ct = GrColorType::kABGR_4444;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R8G8B8A8_SRGB
+ {
+ constexpr VkFormat format = VK_FORMAT_R8G8B8A8_SRGB;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R8G8B8A8_SRGB, Surface: kRGBA_8888_SRGB
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_8888_SRGB;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R16_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 2;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16_UNORM, Surface: kAlpha_16
+ {
+ constexpr GrColorType ct = GrColorType::kAlpha_16;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ ctInfo.fTextureSwizzle = GrSwizzle::RRRR();
+ ctInfo.fOutputSwizzle = GrSwizzle::AAAA();
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16G16_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R16G16_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16G16_UNORM, Surface: kRG_1616
+ {
+ constexpr GrColorType ct = GrColorType::kRG_1616;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16G16B16A16_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_R16G16B16A16_UNORM;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 8;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16G16B16A16_UNORM, Surface: kRGBA_16161616
+ {
+ constexpr GrColorType ct = GrColorType::kRGBA_16161616;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_R16G16_SFLOAT
+ {
+ constexpr VkFormat format = VK_FORMAT_R16G16_SFLOAT;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 4;
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_R16G16_SFLOAT, Surface: kRG_F16
+ {
+ constexpr GrColorType ct = GrColorType::kRG_F16;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kRenderable_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
+ auto& info = this->getFormatInfo(format);
+ // Currently we are just over estimating this value to be used in gpu size calculations even
+ // though the actually size is probably less. We should instead treat planar formats similar
+ // to compressed textures that go through their own special query for calculating size.
+ info.fBytesPerPixel = 3;
+ if (fSupportsYcbcrConversion) {
+ info.init(interface, physDev, properties, format);
+ }
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, Surface: kRGB_888x
+ {
+ constexpr GrColorType ct = GrColorType::kRGB_888x;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kWrappedOnly_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_G8_B8R8_2PLANE_420_UNORM
+ {
+ constexpr VkFormat format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+ auto& info = this->getFormatInfo(format);
+ // Currently we are just over estimating this value to be used in gpu size calculations even
+ // though the actually size is probably less. We should instead treat planar formats similar
+ // to compressed textures that go through their own special query for calculating size.
+ info.fBytesPerPixel = 3;
+ if (fSupportsYcbcrConversion) {
+ info.init(interface, physDev, properties, format);
+ }
+ if (SkToBool(info.fOptimalFlags & FormatInfo::kTexturable_Flag)) {
+ info.fColorTypeInfoCount = 1;
+ info.fColorTypeInfos.reset(new ColorTypeInfo[info.fColorTypeInfoCount]());
+ int ctIdx = 0;
+ // Format: VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, Surface: kRGB_888x
+ {
+ constexpr GrColorType ct = GrColorType::kRGB_888x;
+ auto& ctInfo = info.fColorTypeInfos[ctIdx++];
+ ctInfo.fColorType = ct;
+ ctInfo.fFlags = ColorTypeInfo::kUploadData_Flag | ColorTypeInfo::kWrappedOnly_Flag;
+ }
+ }
+ }
+ // Format: VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK
+ {
+ constexpr VkFormat format = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ auto& info = this->getFormatInfo(format);
+ info.init(interface, physDev, properties, format);
+ info.fBytesPerPixel = 0;
+ // No supported GrColorTypes.
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Map GrColorTypes (used for creating GrSurfaces) to VkFormats. The order in which the formats
+ // are passed into the setColorType function indicates the priority in selecting which format
+ // we use for a given GrcolorType.
+
+ this->setColorType(GrColorType::kAlpha_8, { VK_FORMAT_R8_UNORM });
+ this->setColorType(GrColorType::kBGR_565, { VK_FORMAT_R5G6B5_UNORM_PACK16 });
+ this->setColorType(GrColorType::kABGR_4444, { VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16 });
+ this->setColorType(GrColorType::kRGBA_8888, { VK_FORMAT_R8G8B8A8_UNORM });
+ this->setColorType(GrColorType::kRGBA_8888_SRGB, { VK_FORMAT_R8G8B8A8_SRGB });
+ this->setColorType(GrColorType::kRGB_888x, { VK_FORMAT_R8G8B8_UNORM,
+ VK_FORMAT_R8G8B8A8_UNORM });
+ this->setColorType(GrColorType::kRG_88, { VK_FORMAT_R8G8_UNORM });
+ this->setColorType(GrColorType::kBGRA_8888, { VK_FORMAT_B8G8R8A8_UNORM });
+ this->setColorType(GrColorType::kRGBA_1010102, { VK_FORMAT_A2B10G10R10_UNORM_PACK32 });
+ this->setColorType(GrColorType::kGray_8, { VK_FORMAT_R8_UNORM });
+ this->setColorType(GrColorType::kAlpha_F16, { VK_FORMAT_R16_SFLOAT });
+ this->setColorType(GrColorType::kRGBA_F16, { VK_FORMAT_R16G16B16A16_SFLOAT });
+ this->setColorType(GrColorType::kRGBA_F16_Clamped, { VK_FORMAT_R16G16B16A16_SFLOAT });
+ this->setColorType(GrColorType::kAlpha_16, { VK_FORMAT_R16_UNORM });
+ this->setColorType(GrColorType::kRG_1616, { VK_FORMAT_R16G16_UNORM });
+ this->setColorType(GrColorType::kRGBA_16161616, { VK_FORMAT_R16G16B16A16_UNORM });
+ this->setColorType(GrColorType::kRG_F16, { VK_FORMAT_R16G16_SFLOAT });
+}
+
+void GrVkCaps::FormatInfo::InitFormatFlags(VkFormatFeatureFlags vkFlags, uint16_t* flags) {
+ if (SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & vkFlags) &&
+ SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT & vkFlags)) {
+ *flags = *flags | kTexturable_Flag;
+
+ // Ganesh assumes that all renderable surfaces are also texturable
+ if (SkToBool(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT & vkFlags)) {
+ *flags = *flags | kRenderable_Flag;
+ }
+ }
+
+ if (SkToBool(VK_FORMAT_FEATURE_BLIT_SRC_BIT & vkFlags)) {
+ *flags = *flags | kBlitSrc_Flag;
+ }
+
+ if (SkToBool(VK_FORMAT_FEATURE_BLIT_DST_BIT & vkFlags)) {
+ *flags = *flags | kBlitDst_Flag;
+ }
+}
+
+void GrVkCaps::FormatInfo::initSampleCounts(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ const VkPhysicalDeviceProperties& physProps,
+ VkFormat format) {
+ VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ VkImageFormatProperties properties;
+ GR_VK_CALL(interface, GetPhysicalDeviceImageFormatProperties(physDev,
+ format,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TILING_OPTIMAL,
+ usage,
+ 0, // createFlags
+ &properties));
+ VkSampleCountFlags flags = properties.sampleCounts;
+ if (flags & VK_SAMPLE_COUNT_1_BIT) {
+ fColorSampleCounts.push_back(1);
+ }
+ if (kImagination_VkVendor == physProps.vendorID) {
+ // MSAA does not work on imagination
+ return;
+ }
+ if (kIntel_VkVendor == physProps.vendorID) {
+ // MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
+ return;
+ }
+ if (flags & VK_SAMPLE_COUNT_2_BIT) {
+ fColorSampleCounts.push_back(2);
+ }
+ if (flags & VK_SAMPLE_COUNT_4_BIT) {
+ fColorSampleCounts.push_back(4);
+ }
+ if (flags & VK_SAMPLE_COUNT_8_BIT) {
+ fColorSampleCounts.push_back(8);
+ }
+ if (flags & VK_SAMPLE_COUNT_16_BIT) {
+ fColorSampleCounts.push_back(16);
+ }
+ if (flags & VK_SAMPLE_COUNT_32_BIT) {
+ fColorSampleCounts.push_back(32);
+ }
+ if (flags & VK_SAMPLE_COUNT_64_BIT) {
+ fColorSampleCounts.push_back(64);
+ }
+}
+
+void GrVkCaps::FormatInfo::init(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ const VkPhysicalDeviceProperties& properties,
+ VkFormat format) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ InitFormatFlags(props.linearTilingFeatures, &fLinearFlags);
+ InitFormatFlags(props.optimalTilingFeatures, &fOptimalFlags);
+ if (fOptimalFlags & kRenderable_Flag) {
+ this->initSampleCounts(interface, physDev, properties, format);
+ }
+}
+
+// For many checks in caps, we need to know whether the GrBackendFormat is external or not. If it is
+// external the VkFormat will be VK_NULL_HANDLE which is not handled by our various format
+// capability checks.
+static bool backend_format_is_external(const GrBackendFormat& format) {
+ const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
+ SkASSERT(ycbcrInfo);
+
+ // All external formats have a valid ycbcrInfo used for sampling and a non zero external format.
+ if (ycbcrInfo->isValid() && ycbcrInfo->fExternalFormat != 0) {
+#ifdef SK_DEBUG
+ VkFormat vkFormat;
+ SkAssertResult(format.asVkFormat(&vkFormat));
+ SkASSERT(vkFormat == VK_NULL_HANDLE);
+#endif
+ return true;
+ }
+ return false;
+}
+
+bool GrVkCaps::isFormatSRGB(const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ if (backend_format_is_external(format)) {
+ return false;
+ }
+
+ return format_is_srgb(vkFormat);
+}
+
+bool GrVkCaps::isFormatCompressed(const GrBackendFormat& format,
+ SkImage::CompressionType* compressionType) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ SkImage::CompressionType dummyType;
+ SkImage::CompressionType* compressionTypePtr = compressionType ? compressionType : &dummyType;
+
+ switch (vkFormat) {
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ // ETC2 uses the same compression layout as ETC1
+ *compressionTypePtr = SkImage::kETC1_CompressionType;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GrVkCaps::isFormatTexturableAndUploadable(GrColorType ct,
+ const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+
+ uint32_t ctFlags = this->getFormatInfo(vkFormat).colorTypeFlags(ct);
+ return this->isVkFormatTexturable(vkFormat) &&
+ SkToBool(ctFlags & ColorTypeInfo::kUploadData_Flag);
+}
+
+bool GrVkCaps::isFormatTexturable(const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ if (backend_format_is_external(format)) {
+ // We can always texture from an external format (assuming we have the ycbcr conversion
+ // info which we require to be passed in).
+ return true;
+ }
+ return this->isVkFormatTexturable(vkFormat);
+}
+
+bool GrVkCaps::isVkFormatTexturable(VkFormat format) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+ return SkToBool(FormatInfo::kTexturable_Flag & info.fOptimalFlags);
+}
+
+bool GrVkCaps::isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount) const {
+ if (!this->isFormatRenderable(format, sampleCount)) {
+ return false;
+ }
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ const auto& info = this->getFormatInfo(vkFormat);
+ if (!SkToBool(info.colorTypeFlags(ct) & ColorTypeInfo::kRenderable_Flag)) {
+ return false;
+ }
+ return true;
+}
+
+bool GrVkCaps::isFormatRenderable(const GrBackendFormat& format, int sampleCount) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ return this->isFormatRenderable(vkFormat, sampleCount);
+}
+
+bool GrVkCaps::isFormatRenderable(VkFormat format, int sampleCount) const {
+ return sampleCount <= this->maxRenderTargetSampleCount(format);
+}
+
+int GrVkCaps::getRenderTargetSampleCount(int requestedCount,
+ const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return 0;
+ }
+
+ return this->getRenderTargetSampleCount(requestedCount, vkFormat);
+}
+
+int GrVkCaps::getRenderTargetSampleCount(int requestedCount, VkFormat format) const {
+ requestedCount = SkTMax(1, requestedCount);
+
+ const FormatInfo& info = this->getFormatInfo(format);
+
+ int count = info.fColorSampleCounts.count();
+
+ if (!count) {
+ return 0;
+ }
+
+ if (1 == requestedCount) {
+ SkASSERT(info.fColorSampleCounts.count() && info.fColorSampleCounts[0] == 1);
+ return 1;
+ }
+
+ for (int i = 0; i < count; ++i) {
+ if (info.fColorSampleCounts[i] >= requestedCount) {
+ return info.fColorSampleCounts[i];
+ }
+ }
+ return 0;
+}
+
+int GrVkCaps::maxRenderTargetSampleCount(const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return 0;
+ }
+ return this->maxRenderTargetSampleCount(vkFormat);
+}
+
+int GrVkCaps::maxRenderTargetSampleCount(VkFormat format) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+
+ const auto& table = info.fColorSampleCounts;
+ if (!table.count()) {
+ return 0;
+ }
+ return table[table.count() - 1];
+}
+
+size_t GrVkCaps::bytesPerPixel(const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return 0;
+ }
+ return this->bytesPerPixel(vkFormat);
+}
+
+size_t GrVkCaps::bytesPerPixel(VkFormat format) const {
+ return this->getFormatInfo(format).fBytesPerPixel;
+}
+
+static inline size_t align_to_4(size_t v) {
+ switch (v & 0b11) {
+ // v is already a multiple of 4.
+ case 0: return v;
+ // v is a multiple of 2 but not 4.
+ case 2: return 2 * v;
+ // v is not a multiple of 2.
+ default: return 4 * v;
+ }
+}
+
+GrCaps::SupportedWrite GrVkCaps::supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const {
+ VkFormat vkFormat;
+ if (!surfaceFormat.asVkFormat(&vkFormat)) {
+ return {GrColorType::kUnknown, 0};
+ }
+
+ // We don't support the ability to upload to external formats or formats that require a ycbcr
+ // sampler. In general these types of formats are only used for sampling in a shader.
+ if (backend_format_is_external(surfaceFormat) || GrVkFormatNeedsYcbcrSampler(vkFormat)) {
+ return {GrColorType::kUnknown, 0};
+ }
+
+ // The VkBufferImageCopy bufferOffset field must be both a multiple of 4 and of a single texel.
+ size_t offsetAlignment = align_to_4(this->bytesPerPixel(vkFormat));
+
+ const auto& info = this->getFormatInfo(vkFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == surfaceColorType) {
+ return {surfaceColorType, offsetAlignment};
+ }
+ }
+ return {GrColorType::kUnknown, 0};
+}
+
+GrCaps::SurfaceReadPixelsSupport GrVkCaps::surfaceSupportsReadPixels(
+ const GrSurface* surface) const {
+ if (surface->isProtected()) {
+ return SurfaceReadPixelsSupport::kUnsupported;
+ }
+ if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
+ // We can't directly read from a VkImage that has a ycbcr sampler.
+ if (tex->ycbcrConversionInfo().isValid()) {
+ return SurfaceReadPixelsSupport::kCopyToTexture2D;
+ }
+ // We can't directly read from a compressed format
+ SkImage::CompressionType compressionType;
+ if (GrVkFormatToCompressionType(tex->imageFormat(), &compressionType)) {
+ return SurfaceReadPixelsSupport::kCopyToTexture2D;
+ }
+ }
+ return SurfaceReadPixelsSupport::kSupported;
+}
+
+bool GrVkCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
+ if (auto rt = surface->asRenderTarget()) {
+ return rt->numSamples() <= 1 && SkToBool(surface->asTexture());
+ }
+ // We can't write to a texture that has a ycbcr sampler.
+ if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
+ // We can't directly read from a VkImage that has a ycbcr sampler.
+ if (tex->ycbcrConversionInfo().isValid()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GrVkCaps::onAreColorTypeAndFormatCompatible(GrColorType ct,
+ const GrBackendFormat& format) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return false;
+ }
+ const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
+ SkASSERT(ycbcrInfo);
+
+ if (ycbcrInfo->isValid() && !GrVkFormatNeedsYcbcrSampler(vkFormat)) {
+ // Format may be undefined for external images, which are required to have YCbCr conversion.
+ if (VK_FORMAT_UNDEFINED == vkFormat && ycbcrInfo->fExternalFormat != 0) {
+ return true;
+ }
+ return false;
+ }
+
+ const auto& info = this->getFormatInfo(vkFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ if (info.fColorTypeInfos[i].fColorType == ct) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static GrPixelConfig validate_image_info(VkFormat format, GrColorType ct, bool hasYcbcrConversion) {
+ if (hasYcbcrConversion) {
+ if (GrVkFormatNeedsYcbcrSampler(format)) {
+ return kRGB_888X_GrPixelConfig;
+ }
+
+ // Format may be undefined for external images, which are required to have YCbCr conversion.
+ if (VK_FORMAT_UNDEFINED == format) {
+ // We don't actually care what the color type or config are since we won't use those
+ // values for external textures. However, for read pixels we will draw to a non ycbcr
+ // texture of this config so we set RGBA here for that.
+ return kRGBA_8888_GrPixelConfig;
+ }
+
+ return kUnknown_GrPixelConfig;
+ }
+
+ if (VK_FORMAT_UNDEFINED == format) {
+ return kUnknown_GrPixelConfig;
+ }
+
+ switch (ct) {
+ case GrColorType::kUnknown:
+ break;
+ case GrColorType::kAlpha_8:
+ if (VK_FORMAT_R8_UNORM == format) {
+ return kAlpha_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kBGR_565:
+ if (VK_FORMAT_R5G6B5_UNORM_PACK16 == format) {
+ return kRGB_565_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kABGR_4444:
+ if (VK_FORMAT_B4G4R4A4_UNORM_PACK16 == format ||
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16 == format) {
+ return kRGBA_4444_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888:
+ if (VK_FORMAT_R8G8B8A8_UNORM == format) {
+ return kRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_8888_SRGB:
+ if (VK_FORMAT_R8G8B8A8_SRGB == format) {
+ return kSRGBA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGB_888x:
+ if (VK_FORMAT_R8G8B8_UNORM == format) {
+ return kRGB_888_GrPixelConfig;
+ } else if (VK_FORMAT_R8G8B8A8_UNORM == format) {
+ return kRGB_888X_GrPixelConfig;
+ } else if (VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK == format) {
+ return kRGB_ETC1_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_88:
+ if (VK_FORMAT_R8G8_UNORM == format) {
+ return kRG_88_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kBGRA_8888:
+ if (VK_FORMAT_B8G8R8A8_UNORM == format) {
+ return kBGRA_8888_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_1010102:
+ if (VK_FORMAT_A2B10G10R10_UNORM_PACK32 == format) {
+ return kRGBA_1010102_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kGray_8:
+ if (VK_FORMAT_R8_UNORM == format) {
+ return kGray_8_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_F16:
+ if (VK_FORMAT_R16_SFLOAT == format) {
+ return kAlpha_half_as_Red_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16:
+ if (VK_FORMAT_R16G16B16A16_SFLOAT == format) {
+ return kRGBA_half_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_F16_Clamped:
+ if (VK_FORMAT_R16G16B16A16_SFLOAT == format) {
+ return kRGBA_half_Clamped_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kAlpha_16:
+ if (VK_FORMAT_R16_UNORM == format) {
+ return kAlpha_16_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_1616:
+ if (VK_FORMAT_R16G16_UNORM == format) {
+ return kRG_1616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRGBA_16161616:
+ if (VK_FORMAT_R16G16B16A16_UNORM == format) {
+ return kRGBA_16161616_GrPixelConfig;
+ }
+ break;
+ case GrColorType::kRG_F16:
+ if (VK_FORMAT_R16G16_SFLOAT == format) {
+ return kRG_half_GrPixelConfig;
+ }
+ break;
+ // These have no equivalent:
+ case GrColorType::kRGBA_F32:
+ case GrColorType::kAlpha_8xxx:
+ case GrColorType::kAlpha_F32xxx:
+ case GrColorType::kGray_8xxx:
+ break;
+ }
+
+ return kUnknown_GrPixelConfig;
+}
+
+GrPixelConfig GrVkCaps::onGetConfigFromBackendFormat(const GrBackendFormat& format,
+ GrColorType ct) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return kUnknown_GrPixelConfig;
+ }
+ const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
+ SkASSERT(ycbcrInfo);
+ return validate_image_info(vkFormat, ct, ycbcrInfo->isValid());
+}
+
+GrColorType GrVkCaps::getYUVAColorTypeFromBackendFormat(const GrBackendFormat& format,
+ bool isAlphaChannel) const {
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ return GrColorType::kUnknown;
+ }
+
+ switch (vkFormat) {
+ case VK_FORMAT_R8_UNORM: return isAlphaChannel ? GrColorType::kAlpha_8
+ : GrColorType::kGray_8;
+ case VK_FORMAT_R8G8B8A8_UNORM: return GrColorType::kRGBA_8888;
+ case VK_FORMAT_R8G8B8_UNORM: return GrColorType::kRGB_888x;
+ case VK_FORMAT_R8G8_UNORM: return GrColorType::kRG_88;
+ case VK_FORMAT_B8G8R8A8_UNORM: return GrColorType::kBGRA_8888;
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return GrColorType::kRGBA_1010102;
+ case VK_FORMAT_R16_UNORM: return GrColorType::kAlpha_16;
+ case VK_FORMAT_R16_SFLOAT: return GrColorType::kAlpha_F16;
+ case VK_FORMAT_R16G16_UNORM: return GrColorType::kRG_1616;
+ case VK_FORMAT_R16G16B16A16_UNORM: return GrColorType::kRGBA_16161616;
+ case VK_FORMAT_R16G16_SFLOAT: return GrColorType::kRG_F16;
+ default: return GrColorType::kUnknown;
+ }
+
+ SkUNREACHABLE;
+}
+
+GrBackendFormat GrVkCaps::onGetDefaultBackendFormat(GrColorType ct,
+ GrRenderable renderable) const {
+ VkFormat format = this->getFormatFromColorType(ct);
+ if (format == VK_FORMAT_UNDEFINED) {
+ return GrBackendFormat();
+ }
+ return GrBackendFormat::MakeVk(format);
+}
+
+GrBackendFormat GrVkCaps::getBackendFormatFromCompressionType(
+ SkImage::CompressionType compressionType) const {
+ switch (compressionType) {
+ case SkImage::kETC1_CompressionType:
+ return GrBackendFormat::MakeVk(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK);
+ }
+ SK_ABORT("Invalid compression type");
+}
+
+GrSwizzle GrVkCaps::getTextureSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ VkFormat vkFormat;
+ SkAssertResult(format.asVkFormat(&vkFormat));
+ const auto& info = this->getFormatInfo(vkFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fTextureSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+
+GrSwizzle GrVkCaps::getOutputSwizzle(const GrBackendFormat& format, GrColorType colorType) const {
+ VkFormat vkFormat;
+ SkAssertResult(format.asVkFormat(&vkFormat));
+ const auto& info = this->getFormatInfo(vkFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == colorType) {
+ return ctInfo.fOutputSwizzle;
+ }
+ }
+ return GrSwizzle::RGBA();
+}
+
+GrCaps::SupportedRead GrVkCaps::onSupportedReadPixelsColorType(
+ GrColorType srcColorType, const GrBackendFormat& srcBackendFormat,
+ GrColorType dstColorType) const {
+ VkFormat vkFormat;
+ if (!srcBackendFormat.asVkFormat(&vkFormat)) {
+ return {GrColorType::kUnknown, 0};
+ }
+
+ if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
+ return {GrColorType::kUnknown, 0};
+ }
+
+ // The VkBufferImageCopy bufferOffset field must be both a multiple of 4 and of a single texel.
+ size_t offsetAlignment = align_to_4(this->bytesPerPixel(vkFormat));
+
+ const auto& info = this->getFormatInfo(vkFormat);
+ for (int i = 0; i < info.fColorTypeInfoCount; ++i) {
+ const auto& ctInfo = info.fColorTypeInfos[i];
+ if (ctInfo.fColorType == srcColorType) {
+ return {srcColorType, offsetAlignment};
+ }
+ }
+ return {GrColorType::kUnknown, 0};
+}
+
+int GrVkCaps::getFragmentUniformBinding() const {
+ return GrVkUniformHandler::kUniformBinding;
+}
+
+int GrVkCaps::getFragmentUniformSet() const {
+ return GrVkUniformHandler::kUniformBufferDescSet;
+}
+
+#if GR_TEST_UTILS
+std::vector<GrCaps::TestFormatColorTypeCombination> GrVkCaps::getTestingCombinations() const {
+ std::vector<GrCaps::TestFormatColorTypeCombination> combos = {
+ { GrColorType::kAlpha_8, GrBackendFormat::MakeVk(VK_FORMAT_R8_UNORM) },
+ { GrColorType::kBGR_565, GrBackendFormat::MakeVk(VK_FORMAT_R5G6B5_UNORM_PACK16) },
+ { GrColorType::kABGR_4444, GrBackendFormat::MakeVk(VK_FORMAT_R4G4B4A4_UNORM_PACK16)},
+ { GrColorType::kABGR_4444, GrBackendFormat::MakeVk(VK_FORMAT_B4G4R4A4_UNORM_PACK16)},
+ { GrColorType::kRGBA_8888, GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8A8_UNORM) },
+ { GrColorType::kRGBA_8888_SRGB, GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8A8_SRGB) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8A8_UNORM) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeVk(VK_FORMAT_R8G8B8_UNORM) },
+ { GrColorType::kRGB_888x, GrBackendFormat::MakeVk(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK)},
+ { GrColorType::kRG_88, GrBackendFormat::MakeVk(VK_FORMAT_R8G8_UNORM) },
+ { GrColorType::kBGRA_8888, GrBackendFormat::MakeVk(VK_FORMAT_B8G8R8A8_UNORM) },
+ { GrColorType::kRGBA_1010102, GrBackendFormat::MakeVk(VK_FORMAT_A2B10G10R10_UNORM_PACK32)},
+ { GrColorType::kGray_8, GrBackendFormat::MakeVk(VK_FORMAT_R8_UNORM) },
+ { GrColorType::kAlpha_F16, GrBackendFormat::MakeVk(VK_FORMAT_R16_SFLOAT) },
+ { GrColorType::kRGBA_F16, GrBackendFormat::MakeVk(VK_FORMAT_R16G16B16A16_SFLOAT) },
+ { GrColorType::kRGBA_F16_Clamped, GrBackendFormat::MakeVk(VK_FORMAT_R16G16B16A16_SFLOAT) },
+ { GrColorType::kAlpha_16, GrBackendFormat::MakeVk(VK_FORMAT_R16_UNORM) },
+ { GrColorType::kRG_1616, GrBackendFormat::MakeVk(VK_FORMAT_R16G16_UNORM) },
+ { GrColorType::kRGBA_16161616, GrBackendFormat::MakeVk(VK_FORMAT_R16G16B16A16_UNORM) },
+ { GrColorType::kRG_F16, GrBackendFormat::MakeVk(VK_FORMAT_R16G16_SFLOAT) },
+ };
+
+ return combos;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCaps.h b/gfx/skia/skia/src/gpu/vk/GrVkCaps.h
new file mode 100644
index 0000000000..0b90035d60
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCaps.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCaps_DEFINED
+#define GrVkCaps_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/vk/GrVkStencilAttachment.h"
+
+class GrShaderCaps;
+class GrVkExtensions;
+struct GrVkInterface;
+
+/**
+ * Stores some capabilities of a Vk backend.
+ */
+class GrVkCaps : public GrCaps {
+public:
+ typedef GrVkStencilAttachment::Format StencilFormat;
+
+ /**
+ * Creates a GrVkCaps that is set such that nothing is supported. The init function should
+ * be called to fill out the caps.
+ */
+ GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device, const VkPhysicalDeviceFeatures2& features,
+ uint32_t instanceVersion, uint32_t physicalDeviceVersion,
+ const GrVkExtensions& extensions, GrProtected isProtected = GrProtected::kNo);
+
+ bool isFormatSRGB(const GrBackendFormat&) const override;
+ bool isFormatCompressed(const GrBackendFormat&,
+ SkImage::CompressionType* compressionType = nullptr) const override;
+
+ bool isFormatTexturableAndUploadable(GrColorType, const GrBackendFormat&) const override;
+ bool isFormatTexturable(const GrBackendFormat&) const override;
+ bool isVkFormatTexturable(VkFormat) const;
+
+ bool isFormatCopyable(const GrBackendFormat&) const override { return true; }
+
+ bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
+ int sampleCount = 1) const override;
+ bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const override;
+ bool isFormatRenderable(VkFormat, int sampleCount) const;
+
+ int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat&) const override;
+ int getRenderTargetSampleCount(int requestedCount, VkFormat) const;
+
+ int maxRenderTargetSampleCount(const GrBackendFormat&) const override;
+ int maxRenderTargetSampleCount(VkFormat format) const;
+
+ size_t bytesPerPixel(const GrBackendFormat&) const override;
+ size_t bytesPerPixel(VkFormat format) const;
+
+ SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
+ const GrBackendFormat& surfaceFormat,
+ GrColorType srcColorType) const override;
+
+ SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override;
+
+ bool isVkFormatTexturableLinearly(VkFormat format) const {
+ return SkToBool(FormatInfo::kTexturable_Flag & this->getFormatInfo(format).fLinearFlags);
+ }
+
+ bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+ const uint16_t& flags = linearTiled ? info.fLinearFlags : info.fOptimalFlags;
+ return SkToBool(FormatInfo::kBlitDst_Flag & flags);
+ }
+
+ bool formatCanBeSrcofBlit(VkFormat format, bool linearTiled) const {
+ const FormatInfo& info = this->getFormatInfo(format);
+ const uint16_t& flags = linearTiled ? info.fLinearFlags : info.fOptimalFlags;
+ return SkToBool(FormatInfo::kBlitSrc_Flag & flags);
+ }
+
+ // On Adreno vulkan, they do not respect the imageOffset parameter at least in
+ // copyImageToBuffer. This flag says that we must do the copy starting from the origin always.
+ bool mustDoCopiesFromOrigin() const {
+ return fMustDoCopiesFromOrigin;
+ }
+
+ // Sometimes calls to QueueWaitIdle return before actually signalling the fences
+ // on the command buffers even though they have completed. This causes an assert to fire when
+ // destroying the command buffers. Therefore we add a sleep to make sure the fence signals.
+ bool mustSleepOnTearDown() const {
+ return fMustSleepOnTearDown;
+ }
+
+ // Returns true if we should always make dedicated allocations for VkImages.
+ bool shouldAlwaysUseDedicatedImageMemory() const {
+ return fShouldAlwaysUseDedicatedImageMemory;
+ }
+
+ // Always use a transfer buffer instead of vkCmdUpdateBuffer to upload data to a VkBuffer.
+ bool avoidUpdateBuffers() const {
+ return fAvoidUpdateBuffers;
+ }
+
+ /**
+ * Returns both a supported and most preferred stencil format to use in draws.
+ */
+ const StencilFormat& preferredStencilFormat() const {
+ return fPreferredStencilFormat;
+ }
+
+ // Returns whether the device supports VK_KHR_Swapchain. Internally Skia never uses any of the
+ // swapchain functions, but we may need to transition to and from the
+ // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR image layout, so we must know whether that layout is
+ // supported.
+ bool supportsSwapchain() const { return fSupportsSwapchain; }
+
+ // Returns whether the device supports the ability to extend VkPhysicalDeviceProperties struct.
+ bool supportsPhysicalDeviceProperties2() const { return fSupportsPhysicalDeviceProperties2; }
+ // Returns whether the device supports the ability to extend VkMemoryRequirements struct.
+ bool supportsMemoryRequirements2() const { return fSupportsMemoryRequirements2; }
+
+ // Returns whether the device supports the ability to extend the vkBindMemory call.
+ bool supportsBindMemory2() const { return fSupportsBindMemory2; }
+
+ // Returns whether or not the device suports the various API maintenance fixes to Vulkan 1.0. In
+ // Vulkan 1.1 all these maintenance are part of the core spec.
+ bool supportsMaintenance1() const { return fSupportsMaintenance1; }
+ bool supportsMaintenance2() const { return fSupportsMaintenance2; }
+ bool supportsMaintenance3() const { return fSupportsMaintenance3; }
+
+ // Returns true if the device supports passing in a flag to say we are using dedicated GPU when
+ // allocating memory. For some devices this allows them to return more optimized memory knowning
+ // they will never need to suballocate amonst multiple objects.
+ bool supportsDedicatedAllocation() const { return fSupportsDedicatedAllocation; }
+
+ // Returns true if the device supports importing of external memory into Vulkan memory.
+ bool supportsExternalMemory() const { return fSupportsExternalMemory; }
+ // Returns true if the device supports importing Android hardware buffers into Vulkan memory.
+ bool supportsAndroidHWBExternalMemory() const { return fSupportsAndroidHWBExternalMemory; }
+
+ // Returns true if it supports ycbcr conversion for samplers
+ bool supportsYcbcrConversion() const { return fSupportsYcbcrConversion; }
+
+ // Returns true if the device supports protected memory.
+ bool supportsProtectedMemory() const { return fSupportsProtectedMemory; }
+
+ // Returns whether we prefer to record draws directly into a primary command buffer.
+ bool preferPrimaryOverSecondaryCommandBuffers() const {
+ return fPreferPrimaryOverSecondaryCommandBuffers;
+ }
+
+ bool mustInvalidatePrimaryCmdBufferStateAfterClearAttachments() const {
+ return fMustInvalidatePrimaryCmdBufferStateAfterClearAttachments;
+ }
+
+ /**
+ * Helpers used by canCopySurface. In all cases if the SampleCnt parameter is zero that means
+ * the surface is not a render target, otherwise it is the number of samples in the render
+ * target.
+ */
+ bool canCopyImage(VkFormat dstFormat, int dstSampleCnt, bool dstHasYcbcr,
+ VkFormat srcFormat, int srcSamplecnt, bool srcHasYcbcr) const;
+
+ bool canCopyAsBlit(VkFormat dstConfig, int dstSampleCnt, bool dstIsLinear, bool dstHasYcbcr,
+ VkFormat srcConfig, int srcSampleCnt, bool srcIsLinear,
+ bool srcHasYcbcr) const;
+
+ bool canCopyAsResolve(VkFormat dstConfig, int dstSampleCnt, bool dstHasYcbcr,
+ VkFormat srcConfig, int srcSamplecnt, bool srcHasYcbcr) const;
+
+ GrColorType getYUVAColorTypeFromBackendFormat(const GrBackendFormat&,
+ bool isAlphaChannel) const override;
+
+ GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override;
+
+ VkFormat getFormatFromColorType(GrColorType colorType) const {
+ int idx = static_cast<int>(colorType);
+ return fColorTypeToFormatTable[idx];
+ }
+
+ GrSwizzle getTextureSwizzle(const GrBackendFormat&, GrColorType) const override;
+ GrSwizzle getOutputSwizzle(const GrBackendFormat&, GrColorType) const override;
+
+ int getFragmentUniformBinding() const;
+ int getFragmentUniformSet() const;
+
+#if GR_TEST_UTILS
+ std::vector<TestFormatColorTypeCombination> getTestingCombinations() const override;
+#endif
+
+private:
+ enum VkVendor {
+ kAMD_VkVendor = 4098,
+ kARM_VkVendor = 5045,
+ kImagination_VkVendor = 4112,
+ kIntel_VkVendor = 32902,
+ kNvidia_VkVendor = 4318,
+ kQualcomm_VkVendor = 20803,
+ };
+
+ void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device, const VkPhysicalDeviceFeatures2&,
+ uint32_t physicalDeviceVersion, const GrVkExtensions&, GrProtected isProtected);
+ void initGrCaps(const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev,
+ const VkPhysicalDeviceProperties&,
+ const VkPhysicalDeviceMemoryProperties&,
+ const VkPhysicalDeviceFeatures2&,
+ const GrVkExtensions&);
+ void initShaderCaps(const VkPhysicalDeviceProperties&, const VkPhysicalDeviceFeatures2&);
+
+ void initFormatTable(const GrVkInterface*, VkPhysicalDevice, const VkPhysicalDeviceProperties&);
+ void initStencilFormat(const GrVkInterface* iface, VkPhysicalDevice physDev);
+
+ void applyDriverCorrectnessWorkarounds(const VkPhysicalDeviceProperties&);
+
+ bool onSurfaceSupportsWritePixels(const GrSurface*) const override;
+ bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) const override;
+ GrBackendFormat onGetDefaultBackendFormat(GrColorType, GrRenderable) const override;
+
+ GrPixelConfig onGetConfigFromBackendFormat(const GrBackendFormat&, GrColorType) const override;
+ bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const override;
+
+ SupportedRead onSupportedReadPixelsColorType(GrColorType, const GrBackendFormat&,
+ GrColorType) const override;
+
+ // ColorTypeInfo for a specific format
+ struct ColorTypeInfo {
+ GrColorType fColorType = GrColorType::kUnknown;
+ enum {
+ kUploadData_Flag = 0x1,
+ // Does Ganesh itself support rendering to this colorType & format pair. Renderability
+ // still additionally depends on if the format itself is renderable.
+ kRenderable_Flag = 0x2,
+ // Indicates that this colorType is supported only if we are wrapping a texture with
+ // the given format and colorType. We do not allow creation with this pair.
+ kWrappedOnly_Flag = 0x4,
+ };
+ uint32_t fFlags = 0;
+
+ GrSwizzle fTextureSwizzle;
+ GrSwizzle fOutputSwizzle;
+ };
+
+ struct FormatInfo {
+ uint32_t colorTypeFlags(GrColorType colorType) const {
+ for (int i = 0; i < fColorTypeInfoCount; ++i) {
+ if (fColorTypeInfos[i].fColorType == colorType) {
+ return fColorTypeInfos[i].fFlags;
+ }
+ }
+ return 0;
+ }
+
+ void init(const GrVkInterface*, VkPhysicalDevice, const VkPhysicalDeviceProperties&,
+ VkFormat);
+ static void InitFormatFlags(VkFormatFeatureFlags, uint16_t* flags);
+ void initSampleCounts(const GrVkInterface*, VkPhysicalDevice,
+ const VkPhysicalDeviceProperties&, VkFormat);
+
+ enum {
+ kTexturable_Flag = 0x1,
+ kRenderable_Flag = 0x2,
+ kBlitSrc_Flag = 0x4,
+ kBlitDst_Flag = 0x8,
+ };
+
+ uint16_t fOptimalFlags = 0;
+ uint16_t fLinearFlags = 0;
+
+ SkTDArray<int> fColorSampleCounts;
+ // This value is only valid for regular formats. Compressed formats will be 0.
+ size_t fBytesPerPixel = 0;
+
+ std::unique_ptr<ColorTypeInfo[]> fColorTypeInfos;
+ int fColorTypeInfoCount = 0;
+ };
+ static const size_t kNumVkFormats = 19;
+ FormatInfo fFormatTable[kNumVkFormats];
+
+ FormatInfo& getFormatInfo(VkFormat);
+ const FormatInfo& getFormatInfo(VkFormat) const;
+
+ VkFormat fColorTypeToFormatTable[kGrColorTypeCnt];
+ void setColorType(GrColorType, std::initializer_list<VkFormat> formats);
+
+ StencilFormat fPreferredStencilFormat;
+
+ SkSTArray<1, GrVkYcbcrConversionInfo> fYcbcrInfos;
+
+ bool fMustDoCopiesFromOrigin = false;
+ bool fMustSleepOnTearDown = false;
+ bool fShouldAlwaysUseDedicatedImageMemory = false;
+
+ bool fAvoidUpdateBuffers = false;
+
+ bool fSupportsSwapchain = false;
+
+ bool fSupportsPhysicalDeviceProperties2 = false;
+ bool fSupportsMemoryRequirements2 = false;
+ bool fSupportsBindMemory2 = false;
+ bool fSupportsMaintenance1 = false;
+ bool fSupportsMaintenance2 = false;
+ bool fSupportsMaintenance3 = false;
+
+ bool fSupportsDedicatedAllocation = false;
+ bool fSupportsExternalMemory = false;
+ bool fSupportsAndroidHWBExternalMemory = false;
+
+ bool fSupportsYcbcrConversion = false;
+
+ bool fSupportsProtectedMemory = false;
+
+ bool fPreferPrimaryOverSecondaryCommandBuffers = true;
+ bool fMustInvalidatePrimaryCmdBufferStateAfterClearAttachments = false;
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp
new file mode 100644
index 0000000000..5f95ccfe88
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -0,0 +1,942 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+
+#include "include/core/SkRect.h"
+#include "src/gpu/vk/GrVkCommandPool.h"
+#include "src/gpu/vk/GrVkFramebuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImage.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkIndexBuffer.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkTransferBuffer.h"
+#include "src/gpu/vk/GrVkUtil.h"
+#include "src/gpu/vk/GrVkVertexBuffer.h"
+
+void GrVkCommandBuffer::invalidateState() {
+ for (auto& boundInputBuffer : fBoundInputBuffers) {
+ boundInputBuffer = VK_NULL_HANDLE;
+ }
+ fBoundIndexBuffer = VK_NULL_HANDLE;
+
+ memset(&fCachedViewport, 0, sizeof(VkViewport));
+ fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
+
+ memset(&fCachedScissor, 0, sizeof(VkRect2D));
+ fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
+
+ for (int i = 0; i < 4; ++i) {
+ fCachedBlendConstant[i] = -1.0;
+ }
+}
+
+void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu) const {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(!fIsActive);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedResources[i]->unref(gpu);
+ }
+
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
+ }
+
+ if (!this->isWrapped()) {
+ GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), fCmdPool->vkCommandPool(),
+ 1, &fCmdBuffer));
+ }
+
+ this->onFreeGPUData(gpu);
+}
+
+void GrVkCommandBuffer::abandonGPUData() const {
+ SkDEBUGCODE(fResourcesReleased = true;)
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedResources[i]->unrefAndAbandon();
+ }
+
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
+ // We don't recycle resources when abandoning them.
+ fTrackedRecycledResources[i]->unrefAndAbandon();
+ }
+
+ this->onAbandonGPUData();
+}
+
+void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkDEBUGCODE(fResourcesReleased = true;)
+ SkASSERT(!fIsActive);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedResources[i]->unref(gpu);
+ }
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
+ }
+
+ if (++fNumResets > kNumRewindResetsBeforeFullReset) {
+ fTrackedResources.reset();
+ fTrackedRecycledResources.reset();
+ fTrackedResources.setReserve(kInitialTrackedResourcesCount);
+ fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
+ fNumResets = 0;
+ } else {
+ fTrackedResources.rewind();
+ fTrackedRecycledResources.rewind();
+ }
+
+ this->invalidateState();
+
+ this->onReleaseResources(gpu);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CommandBuffer commands
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
+ const GrVkResource* resource,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) {
+ SkASSERT(!this->isWrapped());
+ SkASSERT(fIsActive);
+ // For images we can have barriers inside of render passes but they require us to add more
+ // support in subpasses which need self dependencies to have barriers inside them. Also, we can
+ // never have buffer barriers inside of a render pass. For now we will just assert that we are
+ // not in a render pass.
+ SkASSERT(!fActiveRenderPass);
+
+ if (barrierType == kBufferMemory_BarrierType) {
+ const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
+ fBufferBarriers.push_back(*barrierPtr);
+ } else {
+ SkASSERT(barrierType == kImageMemory_BarrierType);
+ const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
+ // We need to check if we are adding a pipeline barrier that covers part of the same
+ // subresource range as a barrier that is already in current batch. If it does, then we must
+ // submit the first batch because the vulkan spec does not define a specific ordering for
+ // barriers submitted in the same batch.
+ // TODO: Look if we can gain anything by merging barriers together instead of submitting
+ // the old ones.
+ for (int i = 0; i < fImageBarriers.count(); ++i) {
+ VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
+ if (barrierPtr->image == currentBarrier.image) {
+ const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
+ const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
+ SkASSERT(newRange.aspectMask == oldRange.aspectMask);
+ SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
+ SkASSERT(newRange.layerCount == oldRange.layerCount);
+ uint32_t newStart = newRange.baseMipLevel;
+ uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
+ uint32_t oldStart = oldRange.baseMipLevel;
+ uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
+ if (SkTMax(newStart, oldStart) <= SkTMin(newEnd, oldEnd)) {
+ this->submitPipelineBarriers(gpu);
+ break;
+ }
+ }
+ }
+ fImageBarriers.push_back(*barrierPtr);
+ }
+ fBarriersByRegion |= byRegion;
+
+ fSrcStageMask = fSrcStageMask | srcStageMask;
+ fDstStageMask = fDstStageMask | dstStageMask;
+
+ fHasWork = true;
+ if (resource) {
+ this->addResource(resource);
+ }
+}
+
+void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+
+ // Currently we never submit a pipeline barrier without at least one memory barrier.
+ if (fBufferBarriers.count() || fImageBarriers.count()) {
+ // For images we can have barriers inside of render passes but they require us to add more
+ // support in subpasses which need self dependencies to have barriers inside them. Also, we
+ // can never have buffer barriers inside of a render pass. For now we will just assert that
+ // we are not in a render pass.
+ SkASSERT(!fActiveRenderPass);
+ SkASSERT(!this->isWrapped());
+ SkASSERT(fSrcStageMask && fDstStageMask);
+
+ VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
+ fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
+ fBufferBarriers.count(), fBufferBarriers.begin(),
+ fImageBarriers.count(), fImageBarriers.begin()));
+ fBufferBarriers.reset();
+ fImageBarriers.reset();
+ fBarriersByRegion = false;
+ fSrcStageMask = 0;
+ fDstStageMask = 0;
+ }
+ SkASSERT(!fBufferBarriers.count());
+ SkASSERT(!fImageBarriers.count());
+ SkASSERT(!fBarriersByRegion);
+ SkASSERT(!fSrcStageMask);
+ SkASSERT(!fDstStageMask);
+}
+
+
+void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
+ const GrVkVertexBuffer* vbuffer) {
+ VkBuffer vkBuffer = vbuffer->buffer();
+ SkASSERT(VK_NULL_HANDLE != vkBuffer);
+ SkASSERT(binding < kMaxInputBuffers);
+ // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
+ // to know if we can skip binding or not.
+ if (vkBuffer != fBoundInputBuffers[binding]) {
+ VkDeviceSize offset = vbuffer->offset();
+ GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
+ binding,
+ 1,
+ &vkBuffer,
+ &offset));
+ fBoundInputBuffers[binding] = vkBuffer;
+ this->addResource(vbuffer->resource());
+ }
+}
+
+void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer) {
+ VkBuffer vkBuffer = ibuffer->buffer();
+ SkASSERT(VK_NULL_HANDLE != vkBuffer);
+ // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
+ // to know if we can skip binding or not.
+ if (vkBuffer != fBoundIndexBuffer) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
+ vkBuffer,
+ ibuffer->offset(),
+ VK_INDEX_TYPE_UINT16));
+ fBoundIndexBuffer = vkBuffer;
+ this->addResource(ibuffer->resource());
+ }
+}
+
+void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(numAttachments > 0);
+ SkASSERT(numRects > 0);
+
+ this->addingWork(gpu);
+
+#ifdef SK_DEBUG
+ for (int i = 0; i < numAttachments; ++i) {
+ if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ uint32_t testIndex;
+ SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
+ SkASSERT(testIndex == attachments[i].colorAttachment);
+ }
+ }
+#endif
+ GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
+ numAttachments,
+ attachments,
+ numRects,
+ clearRects));
+ if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
+ this->invalidateState();
+ }
+}
+
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkPipelineState* pipelineState,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ layout,
+ firstSet,
+ setCount,
+ descriptorSets,
+ dynamicOffsetCount,
+ dynamicOffsets));
+}
+
+void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->pipeline()));
+ this->addResource(pipeline);
+}
+
+void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ this->addingWork(gpu);
+ GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
+ indexCount,
+ instanceCount,
+ firstIndex,
+ vertexOffset,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ this->addingWork(gpu);
+ GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* viewports) {
+ SkASSERT(fIsActive);
+ SkASSERT(1 == viewportCount);
+ if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
+ firstViewport,
+ viewportCount,
+ viewports));
+ fCachedViewport = viewports[0];
+ }
+}
+
+void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* scissors) {
+ SkASSERT(fIsActive);
+ SkASSERT(1 == scissorCount);
+ if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
+ firstScissor,
+ scissorCount,
+ scissors));
+ fCachedScissor = scissors[0];
+ }
+}
+
+void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
+ const float blendConstants[4]) {
+ SkASSERT(fIsActive);
+ if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
+ memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
+ }
+}
+
+void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
+ this->submitPipelineBarriers(gpu);
+ fHasWork = true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PrimaryCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
+GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
+ // Should have ended any render pass we're in the middle of
+ SkASSERT(!fActiveRenderPass);
+}
+
+GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu,
+ GrVkCommandPool* cmdPool) {
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ nullptr, // pNext
+ cmdPool->vkCommandPool(), // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkPrimaryCommandBuffer(cmdBuffer, cmdPool);
+}
+
+void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ fIsActive = true;
+}
+
+void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+
+ this->submitPipelineBarriers(gpu);
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ this->invalidateState();
+ fIsActive = false;
+ fHasWork = false;
+}
+
+void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const VkClearValue clearValues[],
+ const GrVkRenderTarget& target,
+ const SkIRect& bounds,
+ bool forSecondaryCB) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ SkASSERT(renderPass->isCompatible(target));
+
+ this->addingWork(gpu);
+
+ VkRenderPassBeginInfo beginInfo;
+ VkRect2D renderArea;
+ renderArea.offset = { bounds.fLeft , bounds.fTop };
+ renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
+
+ memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
+ beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.renderPass = renderPass->vkRenderPass();
+ beginInfo.framebuffer = target.framebuffer()->framebuffer();
+ beginInfo.renderArea = renderArea;
+ beginInfo.clearValueCount = renderPass->clearValueCount();
+ beginInfo.pClearValues = clearValues;
+
+ VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
+ : VK_SUBPASS_CONTENTS_INLINE;
+
+ GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
+ fActiveRenderPass = renderPass;
+ this->addResource(renderPass);
+ target.addResources(*this);
+}
+
+void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ this->addingWork(gpu);
+ GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
+ fActiveRenderPass = nullptr;
+}
+
+void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
+ std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
+ // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
+ // if the command pools both were created from were created with the same queue family. However,
+ // we currently always create them from the same pool.
+ SkASSERT(buffer->commandPool() == fCmdPool);
+ SkASSERT(fIsActive);
+ SkASSERT(!buffer->fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
+
+ this->addingWork(gpu);
+
+ GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
+ fSecondaryCommandBuffers.push_back(std::move(buffer));
+ // When executing a secondary command buffer all state (besides render pass state) becomes
+ // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
+ this->invalidateState();
+}
+
+static void submit_to_queue(const GrVkInterface* interface,
+ VkQueue queue,
+ VkFence fence,
+ uint32_t waitCount,
+ const VkSemaphore* waitSemaphores,
+ const VkPipelineStageFlags* waitStages,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* commandBuffers,
+ uint32_t signalCount,
+ const VkSemaphore* signalSemaphores,
+ GrProtected protectedContext) {
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (protectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
+ submitInfo.waitSemaphoreCount = waitCount;
+ submitInfo.pWaitSemaphores = waitSemaphores;
+ submitInfo.pWaitDstStageMask = waitStages;
+ submitInfo.commandBufferCount = commandBufferCount;
+ submitInfo.pCommandBuffers = commandBuffers;
+ submitInfo.signalSemaphoreCount = signalCount;
+ submitInfo.pSignalSemaphores = signalSemaphores;
+ GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence));
+}
+
+void GrVkPrimaryCommandBuffer::submitToQueue(
+ const GrVkGpu* gpu,
+ VkQueue queue,
+ GrVkGpu::SyncQueue sync,
+ SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
+ SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
+ SkASSERT(!fIsActive);
+
+ VkResult err;
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
+ &fSubmitFence));
+ SkASSERT(!err);
+ } else {
+ GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence));
+ }
+
+ int signalCount = signalSemaphores.count();
+ int waitCount = waitSemaphores.count();
+
+ if (0 == signalCount && 0 == waitCount) {
+ // This command buffer has no dependent semaphores so we can simply just submit it to the
+ // queue with no worries.
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr, 1,
+ &fCmdBuffer, 0, nullptr,
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
+ } else {
+ SkTArray<VkSemaphore> vkSignalSems(signalCount);
+ for (int i = 0; i < signalCount; ++i) {
+ if (signalSemaphores[i]->shouldSignal()) {
+ this->addResource(signalSemaphores[i]);
+ vkSignalSems.push_back(signalSemaphores[i]->semaphore());
+ }
+ }
+
+ SkTArray<VkSemaphore> vkWaitSems(waitCount);
+ SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
+ for (int i = 0; i < waitCount; ++i) {
+ if (waitSemaphores[i]->shouldWait()) {
+ this->addResource(waitSemaphores[i]);
+ vkWaitSems.push_back(waitSemaphores[i]->semaphore());
+ vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+ }
+ }
+ submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, vkWaitSems.count(),
+ vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
+ vkSignalSems.count(), vkSignalSems.begin(),
+ gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
+ for (int i = 0; i < signalCount; ++i) {
+ signalSemaphores[i]->markAsSignaled();
+ }
+ for (int i = 0; i < waitCount; ++i) {
+ waitSemaphores[i]->markAsWaited();
+ }
+ }
+
+ if (GrVkGpu::kForce_SyncQueue == sync) {
+ err = GR_VK_CALL(gpu->vkInterface(),
+ WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SK_ABORT("failing");
+ }
+ SkASSERT(!err);
+
+ fFinishedProcs.reset();
+
+ // Destroy the fence
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ fSubmitFence = VK_NULL_HANDLE;
+ }
+}
+
+bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ return true;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
+ switch (err) {
+ case VK_SUCCESS:
+ return true;
+
+ case VK_NOT_READY:
+ return false;
+
+ default:
+ SkDebugf("Error getting fence status: %d\n", err);
+ SK_ABORT("failing");
+ break;
+ }
+
+ return false;
+}
+
+void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
+ fFinishedProcs.push_back(std::move(finishedProc));
+}
+
+void GrVkPrimaryCommandBuffer::onReleaseResources(GrVkGpu* gpu) {
+ for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
+ fSecondaryCommandBuffers[i]->releaseResources(gpu);
+ }
+ fFinishedProcs.reset();
+}
+
+void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkGpu* gpu) {
+ for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
+ SkASSERT(fSecondaryCommandBuffers[i]->commandPool() == fCmdPool);
+ fSecondaryCommandBuffers[i].release()->recycle(gpu);
+ }
+ fSecondaryCommandBuffers.reset();
+}
+
+void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(srcImage->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
+ srcImage->image(),
+ srcLayout,
+ dstImage->image(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
+ const GrVkResource* srcResource,
+ VkImage srcImage,
+ VkImageLayout srcLayout,
+ const GrVkResource* dstResource,
+ VkImage dstImage,
+ VkImageLayout dstLayout,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(srcResource);
+ this->addResource(dstResource);
+ GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
+ srcImage,
+ srcLayout,
+ dstImage,
+ dstLayout,
+ blitRegionCount,
+ blitRegions,
+ filter));
+}
+
+void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter) {
+ this->blitImage(gpu,
+ srcImage.resource(),
+ srcImage.image(),
+ srcImage.currentLayout(),
+ dstImage.resource(),
+ dstImage.image(),
+ dstImage.currentLayout(),
+ blitRegionCount,
+ blitRegions,
+ filter);
+}
+
+
+void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(srcImage->resource());
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
+ srcImage->image(),
+ srcLayout,
+ dstBuffer->buffer(),
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(srcBuffer->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
+ srcBuffer->buffer(),
+ dstImage->image(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+
+void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
+ GrVkBuffer* srcBuffer,
+ GrVkBuffer* dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* regions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+#ifdef SK_DEBUG
+ for (uint32_t i = 0; i < regionCount; ++i) {
+ const VkBufferCopy& region = regions[i];
+ SkASSERT(region.size > 0);
+ SkASSERT(region.srcOffset < srcBuffer->size());
+ SkASSERT(region.dstOffset < dstBuffer->size());
+ SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
+ SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
+ }
+#endif
+ this->addResource(srcBuffer->resource());
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
+ srcBuffer->buffer(),
+ dstBuffer->buffer(),
+ regionCount,
+ regions));
+}
+
+void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
+ GrVkBuffer* dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* data) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
+ // TODO: handle larger transfer sizes
+ SkASSERT(dataSize <= 65536);
+ SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
+ this->addingWork(gpu);
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
+ dstBuffer->buffer(),
+ dstOffset,
+ dataSize,
+ (const uint32_t*) data));
+}
+
+void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
+ image->image(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearDepthStencilValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addingWork(gpu);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
+ image->image(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t regionCount,
+ const VkImageResolve* regions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+
+ this->addingWork(gpu);
+ this->addResource(srcImage.resource());
+ this->addResource(dstImage.resource());
+
+ GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
+ srcImage.image(),
+ srcImage.currentLayout(),
+ dstImage.image(),
+ dstImage.currentLayout(),
+ regionCount,
+ regions));
+}
+
+void GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu* gpu) const {
+ SkASSERT(!fActiveRenderPass);
+ // Destroy the fence, if any
+ if (VK_NULL_HANDLE != fSubmitFence) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ }
+ for (const auto& buffer : fSecondaryCommandBuffers) {
+ buffer->freeGPUData(gpu);
+ }
+}
+
+void GrVkPrimaryCommandBuffer::onAbandonGPUData() const {
+ SkASSERT(!fActiveRenderPass);
+ for (const auto& buffer : fSecondaryCommandBuffers) {
+ buffer->abandonGPUData();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SecondaryCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu,
+ GrVkCommandPool* cmdPool) {
+ SkASSERT(cmdPool);
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ nullptr, // pNext
+ cmdPool->vkCommandPool(), // commandPool
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkSecondaryCommandBuffer(cmdBuffer, cmdPool);
+}
+
+GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
+ return new GrVkSecondaryCommandBuffer(cmdBuffer, nullptr);
+}
+
+void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
+ const GrVkRenderPass* compatibleRenderPass) {
+ SkASSERT(!fIsActive);
+ SkASSERT(compatibleRenderPass);
+ fActiveRenderPass = compatibleRenderPass;
+
+ if (!this->isWrapped()) {
+ VkCommandBufferInheritanceInfo inheritanceInfo;
+ memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
+ inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
+ inheritanceInfo.pNext = nullptr;
+ inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
+ inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
+ inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
+ inheritanceInfo.occlusionQueryEnable = false;
+ inheritanceInfo.queryFlags = 0;
+ inheritanceInfo.pipelineStatistics = 0;
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ }
+ fIsActive = true;
+}
+
+void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ if (!this->isWrapped()) {
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ }
+ this->invalidateState();
+ fIsActive = false;
+ fHasWork = false;
+}
+
+void GrVkSecondaryCommandBuffer::recycle(GrVkGpu* gpu) {
+ if (this->isWrapped()) {
+ this->freeGPUData(gpu);
+ delete this;
+ } else {
+ fCmdPool->recycleSecondaryCommandBuffer(this);
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h
new file mode 100644
index 0000000000..ec35c124f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h
@@ -0,0 +1,347 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkCommandBuffer_DEFINED
+#define GrVkCommandBuffer_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/vk/GrVkSemaphore.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+class GrVkBuffer;
+class GrVkFramebuffer;
+class GrVkIndexBuffer;
+class GrVkImage;
+class GrVkPipeline;
+class GrVkPipelineState;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkTransferBuffer;
+class GrVkVertexBuffer;
+
+class GrVkCommandBuffer {
+public:
+ virtual ~GrVkCommandBuffer() {}
+
+ void invalidateState();
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer commands
+ ////////////////////////////////////////////////////////////////////////////
+ enum BarrierType {
+ kBufferMemory_BarrierType,
+ kImageMemory_BarrierType
+ };
+
+ void pipelineBarrier(const GrVkGpu* gpu,
+ const GrVkResource* resource,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier);
+
+ void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, const GrVkVertexBuffer* vbuffer);
+
+ void bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer);
+
+ void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline);
+
+ void bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkPipelineState*,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+
+ GrVkCommandPool* commandPool() { return fCmdPool; }
+
+ void setViewport(const GrVkGpu* gpu,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* viewports);
+
+ void setScissor(const GrVkGpu* gpu,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* scissors);
+
+ void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
+
+ // Commands that only work inside of a render pass
+ void clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects);
+
+ void drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance);
+
+ void draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance);
+
+ // Add ref-counted resource that will be tracked and released when this command buffer finishes
+ // execution
+ void addResource(const GrVkResource* resource) {
+ resource->ref();
+ resource->notifyAddedToCommandBuffer();
+ fTrackedResources.append(1, &resource);
+ }
+
+ // Add ref-counted resource that will be tracked and released when this command buffer finishes
+ // execution. When it is released, it will signal that the resource can be recycled for reuse.
+ void addRecycledResource(const GrVkRecycledResource* resource) {
+ resource->ref();
+ resource->notifyAddedToCommandBuffer();
+ fTrackedRecycledResources.append(1, &resource);
+ }
+
+ void releaseResources(GrVkGpu* gpu);
+
+ void freeGPUData(GrVkGpu* gpu) const;
+ void abandonGPUData() const;
+
+ bool hasWork() const { return fHasWork; }
+
+protected:
+ GrVkCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool,
+ const GrVkRenderPass* rp = nullptr)
+ : fIsActive(false)
+ , fActiveRenderPass(rp)
+ , fCmdBuffer(cmdBuffer)
+ , fCmdPool(cmdPool)
+ , fNumResets(0) {
+ fTrackedResources.setReserve(kInitialTrackedResourcesCount);
+ fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
+ this->invalidateState();
+ }
+
+ bool isWrapped() const { return fCmdPool == nullptr; }
+
+ void addingWork(const GrVkGpu* gpu);
+
+ void submitPipelineBarriers(const GrVkGpu* gpu);
+
+ SkTDArray<const GrVkResource*> fTrackedResources;
+ SkTDArray<const GrVkRecycledResource*> fTrackedRecycledResources;
+
+ // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
+ // new commands to the buffer;
+ bool fIsActive;
+ bool fHasWork = false;
+
+ // Stores a pointer to the current active render pass (i.e. begin has been called but not
+ // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
+ // the render pass.
+ const GrVkRenderPass* fActiveRenderPass;
+
+ VkCommandBuffer fCmdBuffer;
+
+ // Raw pointer, not refcounted. The command pool controls the command buffer's lifespan, so
+ // it's guaranteed to outlive us.
+ GrVkCommandPool* fCmdPool;
+
+private:
+ static const int kInitialTrackedResourcesCount = 32;
+
+ virtual void onReleaseResources(GrVkGpu* gpu) {}
+ virtual void onFreeGPUData(GrVkGpu* gpu) const = 0;
+ virtual void onAbandonGPUData() const = 0;
+
+ static constexpr uint32_t kMaxInputBuffers = 2;
+
+ VkBuffer fBoundInputBuffers[kMaxInputBuffers];
+ VkBuffer fBoundIndexBuffer;
+
+ // When resetting the command buffer, we remove the tracked resources from their arrays, and
+ // we prefer to not free all the memory every time so usually we just rewind. However, to avoid
+ // all arrays growing to the max size, after so many resets we'll do a full reset of the tracked
+ // resource arrays.
+ static const int kNumRewindResetsBeforeFullReset = 8;
+ int fNumResets;
+
+ // Cached values used for dynamic state updates
+ VkViewport fCachedViewport;
+ VkRect2D fCachedScissor;
+ float fCachedBlendConstant[4];
+
+#ifdef SK_DEBUG
+ mutable bool fResourcesReleased = false;
+#endif
+ // Tracking of memory barriers so that we can submit them all in a batch together.
+ SkSTArray<4, VkBufferMemoryBarrier> fBufferBarriers;
+ SkSTArray<1, VkImageMemoryBarrier> fImageBarriers;
+ bool fBarriersByRegion = false;
+ VkPipelineStageFlags fSrcStageMask = 0;
+ VkPipelineStageFlags fDstStageMask = 0;
+};
+
+class GrVkSecondaryCommandBuffer;
+
+class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
+public:
+ ~GrVkPrimaryCommandBuffer() override;
+
+ static GrVkPrimaryCommandBuffer* Create(const GrVkGpu* gpu, GrVkCommandPool* cmdPool);
+
+ void begin(const GrVkGpu* gpu);
+ void end(GrVkGpu* gpu);
+
+ // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
+ // in the render pass.
+ void beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const VkClearValue clearValues[],
+ const GrVkRenderTarget& target,
+ const SkIRect& bounds,
+ bool forSecondaryCB);
+ void endRenderPass(const GrVkGpu* gpu);
+
+ // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
+ // currently inside a render pass that is compatible with the one used to create the
+ // SecondaryCommandBuffer.
+ void executeCommands(const GrVkGpu* gpu,
+ std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
+
+ // Commands that only work outside of a render pass
+ void clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void clearDepthStencilImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearDepthStencilValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions);
+
+ void blitImage(const GrVkGpu* gpu,
+ const GrVkResource* srcResource,
+ VkImage srcImage,
+ VkImageLayout srcLayout,
+ const GrVkResource* dstResource,
+ VkImage dstImage,
+ VkImageLayout dstLayout,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter);
+
+ void blitImage(const GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter);
+
+ void copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void copyBuffer(GrVkGpu* gpu,
+ GrVkBuffer* srcBuffer,
+ GrVkBuffer* dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* regions);
+
+ void updateBuffer(GrVkGpu* gpu,
+ GrVkBuffer* dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* data);
+
+ void resolveImage(GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t regionCount,
+ const VkImageResolve* regions);
+
+ void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync,
+ SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
+ SkTArray<GrVkSemaphore::Resource*>& waitSemaphores);
+ bool finished(const GrVkGpu* gpu);
+
+ void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc);
+
+ void recycleSecondaryCommandBuffers(GrVkGpu* gpu);
+
+private:
+ explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool)
+ : INHERITED(cmdBuffer, cmdPool)
+ , fSubmitFence(VK_NULL_HANDLE) {}
+
+ void onFreeGPUData(GrVkGpu* gpu) const override;
+
+ void onAbandonGPUData() const override;
+
+ void onReleaseResources(GrVkGpu* gpu) override;
+
+ SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fSecondaryCommandBuffers;
+ VkFence fSubmitFence;
+ SkTArray<sk_sp<GrRefCntedCallback>> fFinishedProcs;
+
+ typedef GrVkCommandBuffer INHERITED;
+};
+
+class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
+public:
+ static GrVkSecondaryCommandBuffer* Create(const GrVkGpu* gpu, GrVkCommandPool* cmdPool);
+ // Used for wrapping an external secondary command buffer.
+ static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB);
+
+ void begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
+ const GrVkRenderPass* compatibleRenderPass);
+ void end(GrVkGpu* gpu);
+
+ void recycle(GrVkGpu* gpu);
+
+ VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
+
+private:
+ explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool)
+ : INHERITED(cmdBuffer, cmdPool) {}
+
+ void onFreeGPUData(GrVkGpu* gpu) const override {}
+
+ void onAbandonGPUData() const override {}
+
+ friend class GrVkPrimaryCommandBuffer;
+
+ typedef GrVkCommandBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.cpp
new file mode 100644
index 0000000000..5dc5a94fdc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkCommandPool.h"
+
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+
+GrVkCommandPool* GrVkCommandPool::Create(const GrVkGpu* gpu) {
+ VkCommandPoolCreateFlags cmdPoolCreateFlags =
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ if (gpu->protectedContext()) {
+ cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
+ }
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ cmdPoolCreateFlags, // CmdPoolCreateFlags
+ gpu->queueIndex(), // queueFamilyIndex
+ };
+ VkCommandPool pool;
+ GR_VK_CALL_ERRCHECK(
+ gpu->vkInterface(),
+ CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
+ return new GrVkCommandPool(gpu, pool);
+}
+
+GrVkCommandPool::GrVkCommandPool(const GrVkGpu* gpu, VkCommandPool commandPool)
+ : fCommandPool(commandPool) {
+ fPrimaryCommandBuffer.reset(GrVkPrimaryCommandBuffer::Create(gpu, this));
+}
+
+std::unique_ptr<GrVkSecondaryCommandBuffer> GrVkCommandPool::findOrCreateSecondaryCommandBuffer(
+ GrVkGpu* gpu) {
+ std::unique_ptr<GrVkSecondaryCommandBuffer> result;
+ if (fAvailableSecondaryBuffers.count()) {
+ result = std::move(fAvailableSecondaryBuffers.back());
+ fAvailableSecondaryBuffers.pop_back();
+ } else{
+ result.reset(GrVkSecondaryCommandBuffer::Create(gpu, this));
+ }
+ return result;
+}
+
+void GrVkCommandPool::recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* buffer) {
+ SkASSERT(buffer->commandPool() == this);
+ std::unique_ptr<GrVkSecondaryCommandBuffer> scb(buffer);
+ fAvailableSecondaryBuffers.push_back(std::move(scb));
+}
+
+void GrVkCommandPool::close() {
+ fOpen = false;
+}
+
+void GrVkCommandPool::reset(GrVkGpu* gpu) {
+ SkASSERT(!fOpen);
+ fOpen = true;
+ fPrimaryCommandBuffer->recycleSecondaryCommandBuffers(gpu);
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetCommandPool(gpu->device(), fCommandPool, 0));
+}
+
+void GrVkCommandPool::releaseResources(GrVkGpu* gpu) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(!fOpen);
+ fPrimaryCommandBuffer->releaseResources(gpu);
+}
+
+void GrVkCommandPool::abandonGPUData() const {
+ fPrimaryCommandBuffer->abandonGPUData();
+ for (const auto& buffer : fAvailableSecondaryBuffers) {
+ buffer->abandonGPUData();
+ }
+}
+
+void GrVkCommandPool::freeGPUData(GrVkGpu* gpu) const {
+ fPrimaryCommandBuffer->freeGPUData(gpu);
+ for (const auto& buffer : fAvailableSecondaryBuffers) {
+ buffer->freeGPUData(gpu);
+ }
+ if (fCommandPool != VK_NULL_HANDLE) {
+ GR_VK_CALL(gpu->vkInterface(),
+ DestroyCommandPool(gpu->device(), fCommandPool, nullptr));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.h b/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.h
new file mode 100644
index 0000000000..fd44d62e94
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandPool.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCommandPool_DEFINED
+#define GrVkCommandPool_DEFINED
+
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+
+class GrVkPrimaryCommandBuffer;
+class GrVkSecondaryCommandBuffer;
+class GrVkGpu;
+
+class GrVkCommandPool : public GrVkResource {
+public:
+ static GrVkCommandPool* Create(const GrVkGpu* gpu);
+
+ VkCommandPool vkCommandPool() const {
+ return fCommandPool;
+ }
+
+ void reset(GrVkGpu* gpu);
+
+ void releaseResources(GrVkGpu* gpu);
+
+ GrVkPrimaryCommandBuffer* getPrimaryCommandBuffer() { return fPrimaryCommandBuffer.get(); }
+
+ std::unique_ptr<GrVkSecondaryCommandBuffer> findOrCreateSecondaryCommandBuffer(GrVkGpu* gpu);
+
+ void recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* buffer);
+
+ // marks that we are finished with this command pool; it is not legal to continue creating or
+ // writing to command buffers in a closed pool
+ void close();
+
+ // returns true if close() has not been called
+ bool isOpen() const { return fOpen; }
+
+#ifdef SK_DEBUG
+ void dumpInfo() const override {
+ SkDebugf("GrVkCommandPool: %p (%d refs)\n", fCommandPool, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkCommandPool() = delete;
+
+ GrVkCommandPool(const GrVkGpu* gpu, VkCommandPool commandPool);
+
+ void abandonGPUData() const override;
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ bool fOpen = true;
+
+ VkCommandPool fCommandPool;
+
+ std::unique_ptr<GrVkPrimaryCommandBuffer> fPrimaryCommandBuffer;
+
+ // Array of available secondary command buffers that are not in flight
+ SkSTArray<4, std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fAvailableSecondaryBuffers;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp
new file mode 100644
index 0000000000..16350bd3c8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp
@@ -0,0 +1,51 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+
+#include "include/private/SkTemplates.h"
+#include "src/gpu/vk/GrVkGpu.h"
+
+
+GrVkDescriptorPool::GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count)
+ : INHERITED()
+ , fType (type)
+ , fCount(count) {
+ VkDescriptorPoolSize poolSize;
+ memset(&poolSize, 0, sizeof(VkDescriptorPoolSize));
+ poolSize.descriptorCount = count;
+ poolSize.type = type;
+
+ VkDescriptorPoolCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkDescriptorPoolCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ // This is an over/conservative estimate since each set may contain more than count descriptors.
+ createInfo.maxSets = count;
+ createInfo.poolSizeCount = 1;
+ createInfo.pPoolSizes = &poolSize;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorPool(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fDescPool));
+}
+
+bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) const {
+ return fType == type && count <= fCount;
+}
+
+void GrVkDescriptorPool::reset(const GrVkGpu* gpu) {
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetDescriptorPool(gpu->device(), fDescPool, 0));
+}
+
+void GrVkDescriptorPool::freeGPUData(GrVkGpu* gpu) const {
+ // Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
+ // allocated from the pool.
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h
new file mode 100644
index 0000000000..db90e61a09
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h
@@ -0,0 +1,50 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorPool_DEFINED
+#define GrVkDescriptorPool_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkGpu;
+
+/**
+ * We require that all descriptor sets are of a single descriptor type. We also use a pool to only
+ * make one type of descriptor set. Thus a single VkDescriptorPool will only allocated space for
+ * for one type of descriptor.
+ */
+class GrVkDescriptorPool : public GrVkResource {
+public:
+ GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
+
+ VkDescriptorPool descPool() const { return fDescPool; }
+
+ void reset(const GrVkGpu* gpu);
+
+ // Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
+ // not in use by another draw, to support the requested type and count.
+ bool isCompatible(VkDescriptorType type, uint32_t count) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkDescriptorPool: %d, type %d (%d refs)\n", fDescPool, fType,
+ this->getRefCnt());
+ }
+#endif
+
+private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkDescriptorType fType;
+ uint32_t fCount;
+ VkDescriptorPool fDescPool;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp
new file mode 100644
index 0000000000..9de9a6778b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp
@@ -0,0 +1,34 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkDescriptorSet.h"
+
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+
+GrVkDescriptorSet::GrVkDescriptorSet(VkDescriptorSet descSet,
+ GrVkDescriptorPool* pool,
+ GrVkDescriptorSetManager::Handle handle)
+ : fDescSet(descSet)
+ , fPool(pool)
+ , fHandle(handle) {
+ fPool->ref();
+}
+
+void GrVkDescriptorSet::freeGPUData(GrVkGpu* gpu) const {
+ fPool->unref(gpu);
+}
+
+void GrVkDescriptorSet::onRecycle(GrVkGpu* gpu) const {
+ gpu->resourceProvider().recycleDescriptorSet(this, fHandle);
+}
+
+void GrVkDescriptorSet::abandonGPUData() const {
+ fPool->unrefAndAbandon();
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h
new file mode 100644
index 0000000000..d909511c45
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h
@@ -0,0 +1,44 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorSet_DEFINED
+#define GrVkDescriptorSet_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkDescriptorSetManager.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkDescriptorPool;
+class GrVkGpu;
+
+class GrVkDescriptorSet : public GrVkRecycledResource {
+public:
+ GrVkDescriptorSet(VkDescriptorSet descSet,
+ GrVkDescriptorPool* pool,
+ GrVkDescriptorSetManager::Handle handle);
+
+ ~GrVkDescriptorSet() override {}
+
+ VkDescriptorSet descriptorSet() const { return fDescSet; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkDescriptorSet: %d (%d refs)\n", fDescSet, this->getRefCnt());
+ }
+#endif
+
+private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+ void abandonGPUData() const override;
+ void onRecycle(GrVkGpu* gpu) const override;
+
+ VkDescriptorSet fDescSet;
+ SkDEBUGCODE(mutable) GrVkDescriptorPool* fPool;
+ GrVkDescriptorSetManager::Handle fHandle;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp
new file mode 100644
index 0000000000..5869ae6911
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp
@@ -0,0 +1,336 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkDescriptorSetManager.h"
+
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+#include "src/gpu/vk/GrVkDescriptorSet.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUniformHandler.h"
+
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
+ SkSTArray<1, uint32_t> visibilities;
+ uint32_t stages = kVertex_GrShaderFlag | kFragment_GrShaderFlag;
+ if (gpu->vkCaps().shaderCaps()->geometryShaderSupport()) {
+ stages |= kGeometry_GrShaderFlag;
+ }
+ visibilities.push_back(stages);
+
+ SkTArray<const GrVkSampler*> samplers;
+ return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities,
+ samplers);
+}
+
+GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
+ GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
+ SkSTArray<4, uint32_t> visibilities;
+ SkSTArray<4, const GrVkSampler*> immutableSamplers;
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
+ for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
+ visibilities.push_back(uniformHandler.samplerVisibility(i));
+ immutableSamplers.push_back(uniformHandler.immutableSampler(i));
+ }
+ return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
+}
+
+GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
+ GrVkGpu* gpu, VkDescriptorType type, const SkTArray<uint32_t>& visibilities) {
+ SkSTArray<4, const GrVkSampler*> immutableSamplers;
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
+ for (int i = 0 ; i < visibilities.count(); ++i) {
+ immutableSamplers.push_back(nullptr);
+ }
+ return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
+}
+
+GrVkDescriptorSetManager::GrVkDescriptorSetManager(
+ GrVkGpu* gpu, VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities,
+ const SkTArray<const GrVkSampler*>& immutableSamplers)
+ : fPoolManager(type, gpu, visibilities, immutableSamplers) {
+#ifdef SK_DEBUG
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ SkASSERT(visibilities.count() == immutableSamplers.count());
+ } else {
+ SkASSERT(immutableSamplers.count() == 0);
+ }
+#endif
+ for (int i = 0; i < visibilities.count(); ++i) {
+ fBindingVisibilities.push_back(visibilities[i]);
+ }
+ for (int i = 0; i < immutableSamplers.count(); ++i) {
+ const GrVkSampler* sampler = immutableSamplers[i];
+ if (sampler) {
+ sampler->ref();
+ }
+ fImmutableSamplers.push_back(sampler);
+ }
+}
+
+const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
+ const Handle& handle) {
+ const GrVkDescriptorSet* ds = nullptr;
+ int count = fFreeSets.count();
+ if (count > 0) {
+ ds = fFreeSets[count - 1];
+ fFreeSets.removeShuffle(count - 1);
+ } else {
+ VkDescriptorSet vkDS;
+ fPoolManager.getNewDescriptorSet(gpu, &vkDS);
+
+ ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
+ }
+ SkASSERT(ds);
+ return ds;
+}
+
+void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
+ SkASSERT(descSet);
+ fFreeSets.push_back(descSet);
+}
+
+void GrVkDescriptorSetManager::release(GrVkGpu* gpu) {
+ fPoolManager.freeGPUResources(gpu);
+
+ for (int i = 0; i < fFreeSets.count(); ++i) {
+ fFreeSets[i]->unref(gpu);
+ }
+ fFreeSets.reset();
+
+ for (int i = 0; i < fImmutableSamplers.count(); ++i) {
+ if (fImmutableSamplers[i]) {
+ fImmutableSamplers[i]->unref(gpu);
+ }
+ }
+ fImmutableSamplers.reset();
+}
+
+void GrVkDescriptorSetManager::abandon() {
+ fPoolManager.abandonGPUResources();
+
+ for (int i = 0; i < fFreeSets.count(); ++i) {
+ fFreeSets[i]->unrefAndAbandon();
+ }
+ fFreeSets.reset();
+
+ for (int i = 0; i < fImmutableSamplers.count(); ++i) {
+ if (fImmutableSamplers[i]) {
+ fImmutableSamplers[i]->unrefAndAbandon();
+ }
+ }
+ fImmutableSamplers.reset();
+}
+
+bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
+ const GrVkUniformHandler* uniHandler) const {
+ SkASSERT(uniHandler);
+ if (type != fPoolManager.fDescType) {
+ return false;
+ }
+
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
+ if (fBindingVisibilities.count() != uniHandler->numSamplers()) {
+ return false;
+ }
+ for (int i = 0; i < uniHandler->numSamplers(); ++i) {
+ if (uniHandler->samplerVisibility(i) != fBindingVisibilities[i] ||
+ uniHandler->immutableSampler(i) != fImmutableSamplers[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities) const {
+ if (type != fPoolManager.fDescType) {
+ return false;
+ }
+
+ if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
+ if (fBindingVisibilities.count() != visibilities.count()) {
+ return false;
+ }
+ for (int i = 0; i < visibilities.count(); ++i) {
+ if (visibilities[i] != fBindingVisibilities[i] || fImmutableSamplers[i] != nullptr) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
+ VkShaderStageFlags flags = 0;
+
+ if (visibility & kVertex_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+ if (visibility & kGeometry_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
+ }
+ if (visibility & kFragment_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ return flags;
+}
+
+GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
+ VkDescriptorType type,
+ GrVkGpu* gpu,
+ const SkTArray<uint32_t>& visibilities,
+ const SkTArray<const GrVkSampler*>& immutableSamplers)
+ : fDescType(type)
+ , fCurrentDescriptorCount(0)
+ , fPool(nullptr) {
+
+
+ if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
+ uint32_t numBindings = visibilities.count();
+ std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
+ new VkDescriptorSetLayoutBinding[numBindings]);
+ for (uint32_t i = 0; i < numBindings; ++i) {
+ uint32_t visibility = visibilities[i];
+ dsSamplerBindings[i].binding = i;
+ dsSamplerBindings[i].descriptorType = type;
+ dsSamplerBindings[i].descriptorCount = 1;
+ dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
+ if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
+ if (immutableSamplers[i]) {
+ dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
+ } else {
+ dsSamplerBindings[i].pImmutableSamplers = nullptr;
+ }
+ }
+ }
+
+ VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
+ memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsSamplerLayoutCreateInfo.pNext = nullptr;
+ dsSamplerLayoutCreateInfo.flags = 0;
+ dsSamplerLayoutCreateInfo.bindingCount = numBindings;
+ // Setting to nullptr fixes an error in the param checker validation layer. Even though
+ // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
+ // null.
+ dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
+
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+ // skia:8713
+ __lsan::ScopedDisabler lsanDisabler;
+#endif
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
+ CreateDescriptorSetLayout(gpu->device(),
+ &dsSamplerLayoutCreateInfo,
+ nullptr,
+ &fDescLayout));
+ fDescCountPerSet = visibilities.count();
+ } else {
+ SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
+ GR_STATIC_ASSERT(1 == kUniformDescPerSet);
+ SkASSERT(kUniformDescPerSet == visibilities.count());
+ // Create Uniform Buffer Descriptor
+ VkDescriptorSetLayoutBinding dsUniBinding;
+ memset(&dsUniBinding, 0, sizeof(dsUniBinding));
+ dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
+ dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBinding.descriptorCount = 1;
+ dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
+ dsUniBinding.pImmutableSamplers = nullptr;
+
+ VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
+ memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ uniformLayoutCreateInfo.pNext = nullptr;
+ uniformLayoutCreateInfo.flags = 0;
+ uniformLayoutCreateInfo.bindingCount = 1;
+ uniformLayoutCreateInfo.pBindings = &dsUniBinding;
+
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+ // skia:8713
+ __lsan::ScopedDisabler lsanDisabler;
+#endif
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorSetLayout(gpu->device(),
+ &uniformLayoutCreateInfo,
+ nullptr,
+ &fDescLayout));
+ fDescCountPerSet = kUniformDescPerSet;
+ }
+
+ SkASSERT(fDescCountPerSet < kStartNumDescriptors);
+ fMaxDescriptors = kStartNumDescriptors;
+ SkASSERT(fMaxDescriptors > 0);
+ this->getNewPool(gpu);
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
+ if (fPool) {
+ fPool->unref(gpu);
+ uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
+ if (newPoolSize < kMaxDescriptors) {
+ fMaxDescriptors = newPoolSize;
+ } else {
+ fMaxDescriptors = kMaxDescriptors;
+ }
+
+ }
+ fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
+ fMaxDescriptors);
+ SkASSERT(fPool);
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
+ VkDescriptorSet* ds) {
+ if (!fMaxDescriptors) {
+ return;
+ }
+ fCurrentDescriptorCount += fDescCountPerSet;
+ if (fCurrentDescriptorCount > fMaxDescriptors) {
+ this->getNewPool(gpu);
+ fCurrentDescriptorCount = fDescCountPerSet;
+ }
+
+ VkDescriptorSetAllocateInfo dsAllocateInfo;
+ memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
+ dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsAllocateInfo.pNext = nullptr;
+ dsAllocateInfo.descriptorPool = fPool->descPool();
+ dsAllocateInfo.descriptorSetCount = 1;
+ dsAllocateInfo.pSetLayouts = &fDescLayout;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
+ &dsAllocateInfo,
+ ds));
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {
+ if (fDescLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
+ nullptr));
+ fDescLayout = VK_NULL_HANDLE;
+ }
+
+ if (fPool) {
+ fPool->unref(gpu);
+ fPool = nullptr;
+ }
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::abandonGPUResources() {
+ fDescLayout = VK_NULL_HANDLE;
+ if (fPool) {
+ fPool->unrefAndAbandon();
+ fPool = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h
new file mode 100644
index 0000000000..767ca33ec3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h
@@ -0,0 +1,97 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorSetManager_DEFINED
+#define GrVkDescriptorSetManager_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/GrResourceHandle.h"
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+#include "src/gpu/vk/GrVkSampler.h"
+
+class GrVkDescriptorSet;
+class GrVkGpu;
+class GrVkUniformHandler;
+
+/**
+ * This class handles the allocation of descriptor sets for a given VkDescriptorSetLayout. It will
+ * try to reuse previously allocated descriptor sets if they are no longer in use by other objects.
+ */
+class GrVkDescriptorSetManager {
+public:
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(Handle);
+
+ static GrVkDescriptorSetManager* CreateUniformManager(GrVkGpu* gpu);
+ static GrVkDescriptorSetManager* CreateSamplerManager(GrVkGpu* gpu, VkDescriptorType type,
+ const GrVkUniformHandler&);
+ static GrVkDescriptorSetManager* CreateSamplerManager(GrVkGpu* gpu, VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities);
+
+ ~GrVkDescriptorSetManager() {}
+
+ void abandon();
+ void release(GrVkGpu* gpu);
+
+ VkDescriptorSetLayout layout() const { return fPoolManager.fDescLayout; }
+
+ const GrVkDescriptorSet* getDescriptorSet(GrVkGpu* gpu, const Handle& handle);
+
+ void recycleDescriptorSet(const GrVkDescriptorSet*);
+
+ bool isCompatible(VkDescriptorType type, const GrVkUniformHandler*) const;
+ bool isCompatible(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities) const;
+
+private:
+ struct DescriptorPoolManager {
+ DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
+ const SkTArray<uint32_t>& visibilities,
+ const SkTArray<const GrVkSampler*>& immutableSamplers);
+
+
+ ~DescriptorPoolManager() {
+ SkASSERT(!fDescLayout);
+ SkASSERT(!fPool);
+ }
+
+ void getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
+
+ void freeGPUResources(GrVkGpu* gpu);
+ void abandonGPUResources();
+
+ VkDescriptorSetLayout fDescLayout;
+ VkDescriptorType fDescType;
+ uint32_t fDescCountPerSet;
+ uint32_t fMaxDescriptors;
+ uint32_t fCurrentDescriptorCount;
+ GrVkDescriptorPool* fPool;
+
+ private:
+ enum {
+ kUniformDescPerSet = 1,
+ kMaxDescriptors = 1024,
+ kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors
+ };
+
+ void getNewPool(GrVkGpu* gpu);
+ };
+
+ GrVkDescriptorSetManager(GrVkGpu* gpu,
+ VkDescriptorType,
+ const SkTArray<uint32_t>& visibilities,
+ const SkTArray<const GrVkSampler*>& immutableSamplers);
+
+
+ DescriptorPoolManager fPoolManager;
+ SkTArray<const GrVkDescriptorSet*, true> fFreeSets;
+ SkSTArray<4, uint32_t> fBindingVisibilities;
+ SkSTArray<4, const GrVkSampler*> fImmutableSamplers;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp
new file mode 100644
index 0000000000..b9990e67b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/vk/GrVkExtensions.h"
+
+// Can remove this once we get rid of the extension flags.
+#include "include/gpu/vk/GrVkBackendContext.h"
+
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTSort.h"
+
+// finds the index of ext in infos or a negative result if ext is not found.
+static int find_info(const SkTArray<GrVkExtensions::Info>& infos, const char ext[]) {
+ if (infos.empty()) {
+ return -1;
+ }
+ SkString extensionStr(ext);
+ GrVkExtensions::Info::Less less;
+ int idx = SkTSearch<GrVkExtensions::Info, SkString, GrVkExtensions::Info::Less>(
+ &infos.front(), infos.count(), extensionStr, sizeof(GrVkExtensions::Info),
+ less);
+ return idx;
+}
+
+namespace { // This cannot be static because it is used as a template parameter.
+inline bool extension_compare(const GrVkExtensions::Info& a, const GrVkExtensions::Info& b) {
+ return strcmp(a.fName.c_str(), b.fName.c_str()) < 0;
+}
+}
+
+void GrVkExtensions::init(GrVkGetProc getProc,
+ VkInstance instance,
+ VkPhysicalDevice physDev,
+ uint32_t instanceExtensionCount,
+ const char* const* instanceExtensions,
+ uint32_t deviceExtensionCount,
+ const char* const* deviceExtensions) {
+ SkTLessFunctionToFunctorAdaptor<GrVkExtensions::Info, extension_compare> cmp;
+
+ for (uint32_t i = 0; i < instanceExtensionCount; ++i) {
+ const char* extension = instanceExtensions[i];
+ // if not already in the list, add it
+ if (find_info(fExtensions, extension) < 0) {
+ fExtensions.push_back() = Info(extension);
+ SkTQSort(&fExtensions.front(), &fExtensions.back(), cmp);
+ }
+ }
+ for (uint32_t i = 0; i < deviceExtensionCount; ++i) {
+ const char* extension = deviceExtensions[i];
+ // if not already in the list, add it
+ if (find_info(fExtensions, extension) < 0) {
+ fExtensions.push_back() = Info(extension);
+ SkTQSort(&fExtensions.front(), &fExtensions.back(), cmp);
+ }
+ }
+ this->getSpecVersions(getProc, instance, physDev);
+}
+
+#define GET_PROC(F, inst) \
+ PFN_vk##F grVk##F = (PFN_vk ## F) getProc("vk" #F, inst, VK_NULL_HANDLE)
+
+void GrVkExtensions::getSpecVersions(GrVkGetProc getProc, VkInstance instance,
+ VkPhysicalDevice physDevice) {
+ // We grab all the extensions for the VkInstance and VkDevice so we can look up what spec
+ // version each of the supported extensions are. We do not grab the extensions for layers
+ // because we don't know what layers the client has enabled and in general we don't do anything
+ // special for those extensions.
+
+ if (instance == VK_NULL_HANDLE) {
+ return;
+ }
+ GET_PROC(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
+ SkASSERT(grVkEnumerateInstanceExtensionProperties);
+
+ VkResult res;
+ // instance extensions
+ uint32_t extensionCount = 0;
+ res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return;
+ }
+ VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
+ res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ int idx = find_info(fExtensions, extensions[i].extensionName);
+ if (idx >= 0) {
+ fExtensions[idx].fSpecVersion = extensions[i].specVersion;
+ }
+ }
+ delete[] extensions;
+
+ if (physDevice == VK_NULL_HANDLE) {
+ return;
+ }
+ GET_PROC(EnumerateDeviceExtensionProperties, instance);
+ SkASSERT(grVkEnumerateDeviceExtensionProperties);
+
+ // device extensions
+ extensionCount = 0;
+ res = grVkEnumerateDeviceExtensionProperties(physDevice, nullptr, &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return;
+ }
+ extensions = new VkExtensionProperties[extensionCount];
+ res = grVkEnumerateDeviceExtensionProperties(physDevice, nullptr, &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ int idx = find_info(fExtensions, extensions[i].extensionName);
+ if (idx >= 0) {
+ fExtensions[idx].fSpecVersion = extensions[i].specVersion;
+ }
+ }
+ delete[] extensions;
+}
+
+bool GrVkExtensions::hasExtension(const char ext[], uint32_t minVersion) const {
+ int idx = find_info(fExtensions, ext);
+ return idx >= 0 && fExtensions[idx].fSpecVersion >= minVersion;
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp
new file mode 100644
index 0000000000..a06d792d94
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp
@@ -0,0 +1,57 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkFramebuffer.h"
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+
+GrVkFramebuffer* GrVkFramebuffer::Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* stencilAttachment) {
+ // At the very least we need a renderPass and a colorAttachment
+ SkASSERT(renderPass);
+ SkASSERT(colorAttachment);
+
+ VkImageView attachments[3];
+ attachments[0] = colorAttachment->imageView();
+ int numAttachments = 1;
+ if (stencilAttachment) {
+ attachments[numAttachments++] = stencilAttachment->imageView();
+ }
+
+ VkFramebufferCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFramebufferCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPass->vkRenderPass();
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments;
+ createInfo.width = width;
+ createInfo.height = height;
+ createInfo.layers = 1;
+
+ VkFramebuffer framebuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateFramebuffer(gpu->device(),
+ &createInfo,
+ nullptr,
+ &framebuffer));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkFramebuffer(framebuffer);
+}
+
+void GrVkFramebuffer::freeGPUData(GrVkGpu* gpu) const {
+ SkASSERT(fFramebuffer);
+ GR_VK_CALL(gpu->vkInterface(), DestroyFramebuffer(gpu->device(), fFramebuffer, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h
new file mode 100644
index 0000000000..1a97d5762a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h
@@ -0,0 +1,48 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkFramebuffer_DEFINED
+#define GrVkFramebuffer_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkGpu;
+class GrVkImageView;
+class GrVkRenderPass;
+
+class GrVkFramebuffer : public GrVkResource {
+public:
+ static GrVkFramebuffer* Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* stencilAttachment);
+
+ VkFramebuffer framebuffer() const { return fFramebuffer; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkFramebuffer: %d (%d refs)\n", fFramebuffer, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkFramebuffer(VkFramebuffer framebuffer) : INHERITED(), fFramebuffer(framebuffer) {}
+
+ GrVkFramebuffer(const GrVkFramebuffer&);
+ GrVkFramebuffer& operator=(const GrVkFramebuffer&);
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkFramebuffer fFramebuffer;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp b/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp
new file mode 100644
index 0000000000..d9baea4d25
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp
@@ -0,0 +1,2558 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkGpu.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrGpuResourceCacheAccess.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrNativeRect.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/SkGpuDevice.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkCommandPool.h"
+#include "src/gpu/vk/GrVkImage.h"
+#include "src/gpu/vk/GrVkIndexBuffer.h"
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkOpsRenderPass.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+#include "src/gpu/vk/GrVkSemaphore.h"
+#include "src/gpu/vk/GrVkTexture.h"
+#include "src/gpu/vk/GrVkTextureRenderTarget.h"
+#include "src/gpu/vk/GrVkTransferBuffer.h"
+#include "src/gpu/vk/GrVkVertexBuffer.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkSurface_Gpu.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#include "include/gpu/vk/GrVkExtensions.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+#include <utility>
+
+#if !defined(SK_BUILD_FOR_WIN)
+#include <unistd.h>
+#endif // !defined(SK_BUILD_FOR_WIN)
+
+#if defined(SK_BUILD_FOR_WIN) && defined(SK_DEBUG)
+#include "src/core/SkLeanWindows.h"
+#endif
+
+#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
+#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
+#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
+
+sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
+ const GrContextOptions& options, GrContext* context) {
+ if (backendContext.fInstance == VK_NULL_HANDLE ||
+ backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
+ backendContext.fDevice == VK_NULL_HANDLE ||
+ backendContext.fQueue == VK_NULL_HANDLE) {
+ return nullptr;
+ }
+ if (!backendContext.fGetProc) {
+ return nullptr;
+ }
+
+ PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
+ reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
+ backendContext.fGetProc("vkEnumerateInstanceVersion",
+ VK_NULL_HANDLE, VK_NULL_HANDLE));
+ uint32_t instanceVersion = 0;
+ if (!localEnumerateInstanceVersion) {
+ instanceVersion = VK_MAKE_VERSION(1, 0, 0);
+ } else {
+ VkResult err = localEnumerateInstanceVersion(&instanceVersion);
+ if (err) {
+ SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
+ return nullptr;
+ }
+ }
+
+ PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
+ backendContext.fGetProc("vkGetPhysicalDeviceProperties",
+ backendContext.fInstance,
+ VK_NULL_HANDLE));
+
+ if (!localGetPhysicalDeviceProperties) {
+ return nullptr;
+ }
+ VkPhysicalDeviceProperties physDeviceProperties;
+ localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
+ uint32_t physDevVersion = physDeviceProperties.apiVersion;
+
+ uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
+ : instanceVersion;
+
+ instanceVersion = SkTMin(instanceVersion, apiVersion);
+ physDevVersion = SkTMin(physDevVersion, apiVersion);
+
+ sk_sp<const GrVkInterface> interface;
+
+ if (backendContext.fVkExtensions) {
+ interface.reset(new GrVkInterface(backendContext.fGetProc,
+ backendContext.fInstance,
+ backendContext.fDevice,
+ instanceVersion,
+ physDevVersion,
+ backendContext.fVkExtensions));
+ if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
+ return nullptr;
+ }
+ } else {
+ GrVkExtensions extensions;
+ // The only extension flag that may effect the vulkan backend is the swapchain extension. We
+ // need to know if this is enabled to know if we can transition to a present layout when
+ // flushing a surface.
+ if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
+ const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ extensions.init(backendContext.fGetProc, backendContext.fInstance,
+ backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
+ }
+ interface.reset(new GrVkInterface(backendContext.fGetProc,
+ backendContext.fInstance,
+ backendContext.fDevice,
+ instanceVersion,
+ physDevVersion,
+ &extensions));
+ if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
+ return nullptr;
+ }
+ }
+
+ sk_sp<GrVkGpu> vkGpu(new GrVkGpu(context, options, backendContext, interface,
+ instanceVersion, physDevVersion));
+ if (backendContext.fProtectedContext == GrProtected::kYes &&
+ !vkGpu->vkCaps().supportsProtectedMemory()) {
+ return nullptr;
+ }
+ return vkGpu;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
+ const GrVkBackendContext& backendContext, sk_sp<const GrVkInterface> interface,
+ uint32_t instanceVersion, uint32_t physicalDeviceVersion)
+ : INHERITED(context)
+ , fInterface(std::move(interface))
+ , fMemoryAllocator(backendContext.fMemoryAllocator)
+ , fInstance(backendContext.fInstance)
+ , fPhysicalDevice(backendContext.fPhysicalDevice)
+ , fDevice(backendContext.fDevice)
+ , fQueue(backendContext.fQueue)
+ , fQueueIndex(backendContext.fGraphicsQueueIndex)
+ , fResourceProvider(this)
+ , fDisconnected(false)
+ , fProtectedContext(backendContext.fProtectedContext) {
+ SkASSERT(!backendContext.fOwnsInstanceAndDevice);
+
+ if (!fMemoryAllocator) {
+ // We were not given a memory allocator at creation
+ fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice,
+ fDevice, fInterface));
+ }
+
+ fCompiler = new SkSL::Compiler();
+
+ if (backendContext.fDeviceFeatures2) {
+ fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
+ *backendContext.fDeviceFeatures2, instanceVersion,
+ physicalDeviceVersion,
+ *backendContext.fVkExtensions, fProtectedContext));
+ } else if (backendContext.fDeviceFeatures) {
+ VkPhysicalDeviceFeatures2 features2;
+ features2.pNext = nullptr;
+ features2.features = *backendContext.fDeviceFeatures;
+ fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
+ features2, instanceVersion, physicalDeviceVersion,
+ *backendContext.fVkExtensions, fProtectedContext));
+ } else {
+ VkPhysicalDeviceFeatures2 features;
+ memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
+ features.pNext = nullptr;
+ if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
+ features.features.geometryShader = true;
+ }
+ if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
+ features.features.dualSrcBlend = true;
+ }
+ if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
+ features.features.sampleRateShading = true;
+ }
+ GrVkExtensions extensions;
+ // The only extension flag that may effect the vulkan backend is the swapchain extension. We
+ // need to know if this is enabled to know if we can transition to a present layout when
+ // flushing a surface.
+ if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
+ const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ extensions.init(backendContext.fGetProc, backendContext.fInstance,
+ backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
+ }
+ fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
+ features, instanceVersion, physicalDeviceVersion, extensions,
+ fProtectedContext));
+ }
+ fCaps.reset(SkRef(fVkCaps.get()));
+
+ VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
+ VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
+
+ fResourceProvider.init();
+
+ fCmdPool = fResourceProvider.findOrCreateCommandPool();
+ fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->begin(this);
+}
+
+void GrVkGpu::destroyResources() {
+ if (fCmdPool) {
+ fCmdPool->getPrimaryCommandBuffer()->end(this);
+ fCmdPool->close();
+ }
+
+ // wait for all commands to finish
+ VkResult res = VK_CALL(QueueWaitIdle(fQueue));
+
+ // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
+ // on the command buffers even though they have completed. This causes an assert to fire when
+ // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
+ // sleep to make sure the fence signals.
+#ifdef SK_DEBUG
+ if (this->vkCaps().mustSleepOnTearDown()) {
+#if defined(SK_BUILD_FOR_WIN)
+ Sleep(10); // In milliseconds
+#else
+ sleep(1); // In seconds
+#endif
+ }
+#endif
+
+#ifdef SK_DEBUG
+ SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
+#endif
+
+ if (fCmdPool) {
+ fCmdPool->unref(this);
+ fCmdPool = nullptr;
+ }
+
+ for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
+ fSemaphoresToWaitOn[i]->unref(this);
+ }
+ fSemaphoresToWaitOn.reset();
+
+ for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
+ fSemaphoresToSignal[i]->unref(this);
+ }
+ fSemaphoresToSignal.reset();
+
+ // must call this just before we destroy the command pool and VkDevice
+ fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res);
+
+ fMemoryAllocator.reset();
+
+ fQueue = VK_NULL_HANDLE;
+ fDevice = VK_NULL_HANDLE;
+ fInstance = VK_NULL_HANDLE;
+}
+
+GrVkGpu::~GrVkGpu() {
+ if (!fDisconnected) {
+ this->destroyResources();
+ }
+ delete fCompiler;
+}
+
+
+void GrVkGpu::disconnect(DisconnectType type) {
+ INHERITED::disconnect(type);
+ if (!fDisconnected) {
+ if (DisconnectType::kCleanup == type) {
+ this->destroyResources();
+ } else {
+ if (fCmdPool) {
+ fCmdPool->unrefAndAbandon();
+ fCmdPool = nullptr;
+ }
+ for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
+ fSemaphoresToWaitOn[i]->unrefAndAbandon();
+ }
+ for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
+ fSemaphoresToSignal[i]->unrefAndAbandon();
+ }
+
+ // must call this just before we destroy the command pool and VkDevice
+ fResourceProvider.abandonResources();
+
+ fMemoryAllocator.reset();
+ }
+ fSemaphoresToWaitOn.reset();
+ fSemaphoresToSignal.reset();
+ fCurrentCmdBuffer = nullptr;
+ fDisconnected = true;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrOpsRenderPass* GrVkGpu::getOpsRenderPass(
+ GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ if (!fCachedOpsRenderPass) {
+ fCachedOpsRenderPass.reset(new GrVkOpsRenderPass(this));
+ }
+
+ fCachedOpsRenderPass->set(rt, origin, bounds, colorInfo, stencilInfo, sampledProxies);
+ return fCachedOpsRenderPass.get();
+}
+
+void GrVkGpu::submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(fCurrentCmdBuffer);
+ SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
+
+ if (!fCurrentCmdBuffer->hasWork() && kForce_SyncQueue != sync &&
+ !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
+ SkASSERT(fDrawables.empty());
+ fResourceProvider.checkCommandBuffers();
+ if (finishedProc) {
+ fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
+ }
+ return;
+ }
+
+ fCurrentCmdBuffer->end(this);
+ fCmdPool->close();
+ fCurrentCmdBuffer->submitToQueue(this, fQueue, sync, fSemaphoresToSignal, fSemaphoresToWaitOn);
+
+ if (finishedProc) {
+ // Make sure this is called after closing the current command pool
+ fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
+ }
+
+ // We must delete and drawables that have been waitint till submit for us to destroy.
+ fDrawables.reset();
+
+ for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
+ fSemaphoresToWaitOn[i]->unref(this);
+ }
+ fSemaphoresToWaitOn.reset();
+ for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
+ fSemaphoresToSignal[i]->unref(this);
+ }
+ fSemaphoresToSignal.reset();
+
+ // Release old command pool and create a new one
+ fCmdPool->unref(this);
+ fResourceProvider.checkCommandBuffers();
+ fCmdPool = fResourceProvider.findOrCreateCommandPool();
+ fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
+ fCurrentCmdBuffer->begin(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
+ GrAccessPattern accessPattern, const void* data) {
+ sk_sp<GrGpuBuffer> buff;
+ switch (type) {
+ case GrGpuBufferType::kVertex:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ buff = GrVkVertexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
+ break;
+ case GrGpuBufferType::kIndex:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ buff = GrVkIndexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
+ break;
+ case GrGpuBufferType::kXferCpuToGpu:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStream_GrAccessPattern == accessPattern);
+ buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type);
+ break;
+ case GrGpuBufferType::kXferGpuToCpu:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStream_GrAccessPattern == accessPattern);
+ buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type);
+ break;
+ default:
+ SK_ABORT("Unknown buffer type.");
+ }
+ if (data && buff) {
+ buff->updateData(data, size);
+ }
+ return buff;
+}
+
+bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!vkTex) {
+ return false;
+ }
+
+ // Make sure we have at least the base level
+ if (!mipLevelCount || !texels[0].fPixels) {
+ return false;
+ }
+
+ SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
+ bool success = false;
+ bool linearTiling = vkTex->isLinearTiled();
+ if (linearTiling) {
+ if (mipLevelCount > 1) {
+ SkDebugf("Can't upload mipmap data to linear tiled texture");
+ return false;
+ }
+ if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
+ // Need to change the layout to general in order to perform a host write
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_GENERAL,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+ this->submitCommandBuffer(kForce_SyncQueue);
+ }
+ success = this->uploadTexDataLinear(vkTex, left, top, width, height, srcColorType,
+ texels[0].fPixels, texels[0].fRowBytes);
+ } else {
+ SkASSERT(mipLevelCount <= vkTex->texturePriv().maxMipMapLevel() + 1);
+ success = this->uploadTexDataOptimal(vkTex, left, top, width, height, srcColorType, texels,
+ mipLevelCount);
+ }
+
+ if (prepForTexSampling) {
+ vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ false);
+ }
+
+ return success;
+}
+
+bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t bufferOffset,
+ size_t rowBytes) {
+ // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
+ if ((bufferOffset & 0x3) || (bufferOffset % GrColorTypeBytesPerPixel(bufferColorType))) {
+ return false;
+ }
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
+ if (!vkTex) {
+ return false;
+ }
+
+ // Can't transfer compressed data
+ SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
+
+ GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
+ if (!vkBuffer) {
+ return false;
+ }
+
+ SkDEBUGCODE(
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
+ SkASSERT(bounds.contains(subRect));
+ )
+ size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
+
+ // Set up copy region
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = bufferOffset;
+ region.bufferRowLength = (uint32_t)(rowBytes/bpp);
+ region.bufferImageHeight = 0;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { left, top, 0 };
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ // Change layout of our target so it can be copied to
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ vkBuffer,
+ vkTex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &region);
+
+ vkTex->texturePriv().markMipMapsDirty();
+ return true;
+}
+
+bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) {
+ SkASSERT(surface);
+ SkASSERT(transferBuffer);
+ if (fProtectedContext == GrProtected::kYes) {
+ return false;
+ }
+
+ GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
+
+ GrVkImage* srcImage;
+ if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
+ // Reading from render targets that wrap a secondary command buffer is not allowed since
+ // it would require us to know the VkImage, which we don't have, as well as need us to
+ // stop and start the VkRenderPass which we don't have access to.
+ if (rt->wrapsSecondaryCommandBuffer()) {
+ return false;
+ }
+ srcImage = rt;
+ } else {
+ srcImage = static_cast<GrVkTexture*>(surface->asTexture());
+ }
+
+ // Set up copy region
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = offset;
+ region.bufferRowLength = width;
+ region.bufferImageHeight = 0;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { left, top, 0 };
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ srcImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ fCurrentCmdBuffer->copyImageToBuffer(this, srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ vkBuffer, 1, &region);
+
+ // Make sure the copy to buffer has finished.
+ vkBuffer->addMemoryBarrier(this,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+ return true;
+}
+
+void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(dst);
+ SkASSERT(src && src->numSamples() > 1 && src->msaaImage());
+
+ VkImageResolve resolveInfo;
+ resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
+ resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
+ resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
+ resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
+ resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
+
+ GrVkImage* dstImage;
+ GrRenderTarget* dstRT = dst->asRenderTarget();
+ if (dstRT) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
+ dstImage = vkRT;
+ } else {
+ SkASSERT(dst->asTexture());
+ dstImage = static_cast<GrVkTexture*>(dst->asTexture());
+ }
+ dstImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ src->msaaImage()->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo);
+}
+
+void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO forExternalIO) {
+ SkASSERT(target->numSamples() > 1);
+ GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
+ SkASSERT(rt->msaaImage());
+
+ auto nativeResolveRect = GrNativeRect::MakeRelativeTo(
+ resolveOrigin, target->height(), resolveRect);
+ this->resolveImage(target, rt, nativeResolveRect.asSkIRect(),
+ SkIPoint::Make(nativeResolveRect.fX, nativeResolveRect.fY));
+
+ if (ForExternalIO::kYes == forExternalIO) {
+ // This resolve is called when we are preparing an msaa surface for external I/O. It is
+ // called after flushing, so we need to make sure we submit the command buffer after doing
+ // the resolve so that the resolve actually happens.
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
+}
+
+bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const void* data, size_t rowBytes) {
+ SkASSERT(data);
+ SkASSERT(tex->isLinearTiled());
+
+ SkDEBUGCODE(
+ SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
+ SkASSERT(bounds.contains(subRect));
+ )
+ size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
+ size_t trimRowBytes = width * bpp;
+
+ SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
+ VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ tex->image(),
+ &subres,
+ &layout));
+
+ const GrVkAlloc& alloc = tex->alloc();
+ if (VK_NULL_HANDLE == alloc.fMemory) {
+ return false;
+ }
+ VkDeviceSize offset = top * layout.rowPitch + left * bpp;
+ VkDeviceSize size = height*layout.rowPitch;
+ SkASSERT(size + offset <= alloc.fSize);
+ void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
+ if (!mapPtr) {
+ return false;
+ }
+ mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
+
+ SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
+ height);
+
+ GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
+ GrVkMemory::UnmapAlloc(this, alloc);
+
+ return true;
+}
+
+bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType dataColorType, const GrMipLevel texels[],
+ int mipLevelCount) {
+ SkASSERT(!tex->isLinearTiled());
+ // The assumption is either that we have no mipmaps, or that our rect is the entire texture
+ SkASSERT(1 == mipLevelCount ||
+ (0 == left && 0 == top && width == tex->width() && height == tex->height()));
+
+ // We assume that if the texture has mip levels, we either upload to all the levels or just the
+ // first.
+ SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ if (GrPixelConfigToColorType(tex->config()) != dataColorType) {
+ return false;
+ }
+
+ // For RGB_888x src data we are uploading it first to an RGBA texture and then copying it to the
+ // dst RGB texture. Thus we do not upload mip levels for that.
+ if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
+ SkASSERT(tex->config() == kRGB_888_GrPixelConfig);
+ // First check that we'll be able to do the copy to the to the R8G8B8 image in the end via a
+ // blit or draw.
+ if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM, tex->isLinearTiled()) &&
+ !this->vkCaps().isFormatRenderable(VK_FORMAT_R8G8B8_UNORM, 1)) {
+ return false;
+ }
+ mipLevelCount = 1;
+ }
+
+ SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
+ size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
+
+ // texels is const.
+ // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
+ // Because of this we need to make a non-const shallow copy of texels.
+ SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
+
+ texelsShallowCopy.reset(mipLevelCount);
+ memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+ individualMipOffsets.push_back(0);
+ size_t combinedBufferSize = width * bpp * height;
+ int currentWidth = width;
+ int currentHeight = height;
+ if (!texelsShallowCopy[0].fPixels) {
+ combinedBufferSize = 0;
+ }
+
+ // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
+ // config. This works with the assumption that the bytes in pixel config is always a power of 2.
+ SkASSERT((bpp & (bpp - 1)) == 0);
+ const size_t alignmentMask = 0x3 | (bpp - 1);
+ for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+
+ if (texelsShallowCopy[currentMipLevel].fPixels) {
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ const size_t alignmentDiff = combinedBufferSize & alignmentMask;
+ if (alignmentDiff != 0) {
+ combinedBufferSize += alignmentMask - alignmentDiff + 1;
+ }
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ } else {
+ individualMipOffsets.push_back(0);
+ }
+ }
+ if (0 == combinedBufferSize) {
+ // We don't actually have any data to upload so just return success
+ return true;
+ }
+
+ // allocate buffer to hold our mip data
+ sk_sp<GrVkTransferBuffer> transferBuffer =
+ GrVkTransferBuffer::Make(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
+ if (!transferBuffer) {
+ return false;
+ }
+
+ int uploadLeft = left;
+ int uploadTop = top;
+ GrVkTexture* uploadTexture = tex;
+ // For uploading RGB_888x data to an R8G8B8_UNORM texture we must first upload the data to an
+ // R8G8B8A8_UNORM image and then copy it.
+ sk_sp<GrVkTexture> copyTexture;
+ if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
+ bool dstHasYcbcr = tex->ycbcrConversionInfo().isValid();
+ if (!this->vkCaps().canCopyAsBlit(tex->imageFormat(), 1, false, dstHasYcbcr,
+ VK_FORMAT_R8G8B8A8_UNORM, 1, false, false)) {
+ return false;
+ }
+ GrSurfaceDesc surfDesc;
+ surfDesc.fWidth = width;
+ surfDesc.fHeight = height;
+ surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ copyTexture = GrVkTexture::MakeNewTexture(this, SkBudgeted::kYes, surfDesc, imageDesc,
+ GrMipMapsStatus::kNotAllocated);
+ if (!copyTexture) {
+ return false;
+ }
+
+ uploadTexture = copyTexture.get();
+ uploadLeft = 0;
+ uploadTop = 0;
+ }
+
+ char* buffer = (char*) transferBuffer->map();
+ SkTArray<VkBufferImageCopy> regions(mipLevelCount);
+
+ currentWidth = width;
+ currentHeight = height;
+ int layerHeight = uploadTexture->height();
+ for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ if (texelsShallowCopy[currentMipLevel].fPixels) {
+ SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
+ const size_t trimRowBytes = currentWidth * bpp;
+ const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
+
+ // copy data into the buffer, skipping the trailing bytes
+ char* dst = buffer + individualMipOffsets[currentMipLevel];
+ const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
+ SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
+
+ VkBufferImageCopy& region = regions.push_back();
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
+ region.imageOffset = {uploadLeft, uploadTop, 0};
+ region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
+ }
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ layerHeight = currentHeight;
+ }
+
+ // no need to flush non-coherent memory, unmap will do that for us
+ transferBuffer->unmap();
+
+ // Change layout of our target so it can be copied to
+ uploadTexture->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer.get(),
+ uploadTexture,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ regions.count(),
+ regions.begin());
+
+ // If we copied the data into a temporary image first, copy that image into our main texture
+ // now.
+ if (copyTexture.get()) {
+ SkASSERT(dataColorType == GrColorType::kRGB_888x);
+ SkAssertResult(this->copySurface(tex, copyTexture.get(), SkIRect::MakeWH(width, height),
+ SkIPoint::Make(left, top)));
+ }
+ if (1 == mipLevelCount) {
+ tex->texturePriv().markMipMapsDirty();
+ }
+
+ return true;
+}
+
+// It's probably possible to roll this into uploadTexDataOptimal,
+// but for now it's easier to maintain as a separate entity.
+bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
+ SkImage::CompressionType compressionType, const void* data) {
+ SkASSERT(data);
+ SkASSERT(!tex->isLinearTiled());
+ // For now the assumption is that our rect is the entire texture.
+ // Compressed textures are read-only so this should be a reasonable assumption.
+ SkASSERT(0 == left && 0 == top && width == tex->width() && height == tex->height());
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ SkImage::CompressionType textureCompressionType;
+ if (!GrVkFormatToCompressionType(tex->imageFormat(), &textureCompressionType) ||
+ textureCompressionType != compressionType) {
+ return false;
+ }
+
+ SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
+
+ size_t dataSize = GrCompressedDataSize(compressionType, width, height);
+
+ // allocate buffer to hold our mip data
+ sk_sp<GrVkTransferBuffer> transferBuffer =
+ GrVkTransferBuffer::Make(this, dataSize, GrVkBuffer::kCopyRead_Type);
+ if (!transferBuffer) {
+ return false;
+ }
+
+ int uploadLeft = left;
+ int uploadTop = top;
+ GrVkTexture* uploadTexture = tex;
+
+ char* buffer = (char*)transferBuffer->map();
+
+ memcpy(buffer, data, dataSize);
+
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = transferBuffer->offset();
+ region.bufferRowLength = width;
+ region.bufferImageHeight = height;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { uploadLeft, uploadTop, 0 };
+ region.imageExtent = { SkToU32(width), SkToU32(height), 1 };
+
+ // no need to flush non-coherent memory, unmap will do that for us
+ transferBuffer->unmap();
+
+ // Change layout of our target so it can be copied to
+ uploadTexture->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer.get(),
+ uploadTexture,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &region);
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ const GrBackendFormat& format,
+ GrRenderable renderable,
+ int renderTargetSampleCnt,
+ SkBudgeted budgeted,
+ GrProtected isProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) {
+ VkFormat pixelFormat;
+ SkAssertResult(format.asVkFormat(&pixelFormat));
+ SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (renderable == GrRenderable::kYes) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
+ // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
+ // will be using this texture in some copy or not. Also this assumes, as is the current case,
+ // that all render targets in vulkan are also textures. If we change this practice of setting
+ // both bits, we must make sure to set the destination bit if we are uploading srcData to the
+ // texture.
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
+ // to 1.
+ SkASSERT(mipLevelCount > 0);
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = desc.fWidth;
+ imageDesc.fHeight = desc.fHeight;
+ imageDesc.fLevels = mipLevelCount;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fIsProtected = isProtected;
+
+ GrMipMapsStatus mipMapsStatus =
+ mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
+
+ sk_sp<GrVkTexture> tex;
+ if (renderable == GrRenderable::kYes) {
+ tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
+ this, budgeted, desc, renderTargetSampleCnt, imageDesc, mipMapsStatus);
+ } else {
+ tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc, mipMapsStatus);
+ }
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (levelClearMask) {
+ SkSTArray<1, VkImageSubresourceRange> ranges;
+ bool inRange = false;
+ for (uint32_t i = 0; i < tex->mipLevels(); ++i) {
+ if (levelClearMask & (1U << i)) {
+ if (inRange) {
+ ranges.back().levelCount++;
+ } else {
+ auto& range = ranges.push_back();
+ range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ range.baseArrayLayer = 0;
+ range.baseMipLevel = i;
+ range.layerCount = 1;
+ range.levelCount = 1;
+ inRange = true;
+ }
+ } else if (inRange) {
+ inRange = false;
+ }
+ }
+ SkASSERT(!ranges.empty());
+ static constexpr VkClearColorValue kZeroClearColor = {};
+ tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
+ this->currentCommandBuffer()->clearColorImage(this, tex.get(), &kZeroClearColor,
+ ranges.count(), ranges.begin());
+ }
+ return tex;
+}
+
+sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(int width, int height,
+ const GrBackendFormat& format,
+ SkImage::CompressionType compressionType,
+ SkBudgeted budgeted, const void* data) {
+ VkFormat pixelFormat;
+ if (!format.asVkFormat(&pixelFormat)) {
+ return nullptr;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
+ // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
+ // will be using this texture in some copy or not. Also this assumes, as is the current case,
+ // that all render targets in vulkan are also textures. If we change this practice of setting
+ // both bits, we must make sure to set the destination bit if we are uploading srcData to the
+ // texture.
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ // Compressed textures with MIP levels or multiple samples are not supported as of now.
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fIsProtected = GrProtected::kNo;
+
+ GrSurfaceDesc desc;
+ desc.fConfig = GrCompressionTypePixelConfig(compressionType);
+ desc.fWidth = width;
+ desc.fHeight = height;
+ auto tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc,
+ GrMipMapsStatus::kNotAllocated);
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (!this->uploadTexDataCompressed(tex.get(), 0, 0, desc.fWidth, desc.fHeight, compressionType,
+ data)) {
+ return nullptr;
+ }
+
+ return tex;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
+ VkDeviceSize dstOffset, VkDeviceSize size) {
+ VkBufferCopy copyRegion;
+ copyRegion.srcOffset = srcOffset;
+ copyRegion.dstOffset = dstOffset;
+ copyRegion.size = size;
+ fCurrentCmdBuffer->copyBuffer(this, srcBuffer, dstBuffer, 1, &copyRegion);
+}
+
+bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
+ VkDeviceSize offset, VkDeviceSize size) {
+ // Update the buffer
+ fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src);
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static bool check_image_info(const GrVkCaps& caps,
+ const GrVkImageInfo& info,
+ GrColorType colorType,
+ bool needsAllocation) {
+ if (VK_NULL_HANDLE == info.fImage) {
+ return false;
+ }
+
+ if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
+ return false;
+ }
+
+ if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
+ return false;
+ }
+
+ if (info.fYcbcrConversionInfo.isValid()) {
+ if (!caps.supportsYcbcrConversion()) {
+ return false;
+ }
+ if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
+ return true;
+ }
+ }
+
+ SkASSERT(GrVkFormatColorTypePairIsValid(info.fFormat, colorType));
+ return true;
+}
+
+static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
+ if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
+ return true;
+ }
+ if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
+ if (!caps.isVkFormatTexturable(info.fFormat)) {
+ return false;
+ }
+ } else {
+ SkASSERT(info.fImageTiling == VK_IMAGE_TILING_LINEAR);
+ if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, int sampleCnt) {
+ if (!caps.isFormatRenderable(info.fFormat, sampleCnt)) {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
+ GrColorType colorType, GrWrapOwnership ownership,
+ GrWrapCacheable cacheable, GrIOType ioType) {
+ GrVkImageInfo imageInfo;
+ if (!backendTex.getVkImageInfo(&imageInfo)) {
+ return nullptr;
+ }
+
+ if (!check_image_info(this->vkCaps(), imageInfo, colorType,
+ kAdopt_GrWrapOwnership == ownership)) {
+ return nullptr;
+ }
+ if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
+ return nullptr;
+ }
+
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ colorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ surfDesc.fWidth = backendTex.width();
+ surfDesc.fHeight = backendTex.height();
+ surfDesc.fConfig = config;
+
+ sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
+ SkASSERT(layout);
+ return GrVkTexture::MakeWrappedTexture(this, surfDesc, ownership, cacheable, ioType, imageInfo,
+ std::move(layout));
+}
+
+sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership ownership,
+ GrWrapCacheable cacheable) {
+ GrVkImageInfo imageInfo;
+ if (!backendTex.getVkImageInfo(&imageInfo)) {
+ return nullptr;
+ }
+
+ if (!check_image_info(this->vkCaps(), imageInfo, colorType,
+ kAdopt_GrWrapOwnership == ownership)) {
+ return nullptr;
+ }
+ if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
+ return nullptr;
+ }
+ if (!check_rt_image_info(this->vkCaps(), imageInfo, sampleCnt)) {
+ return nullptr;
+ }
+
+ if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
+ colorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc surfDesc;
+ surfDesc.fWidth = backendTex.width();
+ surfDesc.fHeight = backendTex.height();
+ surfDesc.fConfig = config;
+ sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
+
+ sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
+ SkASSERT(layout);
+
+ return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(
+ this, surfDesc, sampleCnt, ownership, cacheable, imageInfo, std::move(layout));
+}
+
+sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
+ GrColorType colorType) {
+ // Currently the Vulkan backend does not support wrapping of msaa render targets directly. In
+ // general this is not an issue since swapchain images in vulkan are never multisampled. Thus if
+ // you want a multisampled RT it is best to wrap the swapchain images and then let Skia handle
+ // creating and owning the MSAA images.
+ if (backendRT.sampleCnt() > 1) {
+ return nullptr;
+ }
+
+ GrVkImageInfo info;
+ if (!backendRT.getVkImageInfo(&info)) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendRT.getBackendFormat(),
+ colorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ if (!check_image_info(this->vkCaps(), info, colorType, false)) {
+ return nullptr;
+ }
+ if (!check_rt_image_info(this->vkCaps(), info, backendRT.sampleCnt())) {
+ return nullptr;
+ }
+
+ if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = backendRT.width();
+ desc.fHeight = backendRT.height();
+ desc.fConfig = config;
+
+ sk_sp<GrVkImageLayout> layout = backendRT.getGrVkImageLayout();
+
+ sk_sp<GrVkRenderTarget> tgt =
+ GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, 1, info, std::move(layout));
+
+ // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
+ SkASSERT(!backendRT.stencilBits());
+ if (tgt) {
+ SkASSERT(tgt->canAttemptStencilAttachment());
+ }
+
+ return tgt;
+}
+
+sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
+ int sampleCnt,
+ GrColorType grColorType) {
+
+ GrVkImageInfo imageInfo;
+ if (!tex.getVkImageInfo(&imageInfo)) {
+ return nullptr;
+ }
+ if (!check_image_info(this->vkCaps(), imageInfo, grColorType, false)) {
+ return nullptr;
+ }
+ if (!check_rt_image_info(this->vkCaps(), imageInfo, sampleCnt)) {
+ return nullptr;
+ }
+
+ if (tex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
+ return nullptr;
+ }
+
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(tex.getBackendFormat(),
+ grColorType);
+ SkASSERT(kUnknown_GrPixelConfig != config);
+
+ GrSurfaceDesc desc;
+ desc.fWidth = tex.width();
+ desc.fHeight = tex.height();
+ desc.fConfig = config;
+
+ sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
+ if (!sampleCnt) {
+ return nullptr;
+ }
+
+ sk_sp<GrVkImageLayout> layout = tex.getGrVkImageLayout();
+ SkASSERT(layout);
+
+ return GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, sampleCnt, imageInfo,
+ std::move(layout));
+}
+
+sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
+ const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
+ int maxSize = this->caps()->maxTextureSize();
+ if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
+ return nullptr;
+ }
+
+ GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
+ if (!backendFormat.isValid()) {
+ return nullptr;
+ }
+ int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
+ if (!sampleCnt) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(imageInfo.colorType());
+ GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendFormat, grColorType);
+ if (config == kUnknown_GrPixelConfig) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fWidth = imageInfo.width();
+ desc.fHeight = imageInfo.height();
+ desc.fConfig = config;
+
+ return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, desc, vkInfo);
+}
+
+bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
+ auto* vkTex = static_cast<GrVkTexture*>(tex);
+ // don't do anything for linearly tiled textures (can't have mipmaps)
+ if (vkTex->isLinearTiled()) {
+ SkDebugf("Trying to create mipmap for linear tiled texture");
+ return false;
+ }
+ SkASSERT(tex->texturePriv().textureType() == GrTextureType::k2D);
+
+ // determine if we can blit to and from this format
+ const GrVkCaps& caps = this->vkCaps();
+ if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
+ !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
+ !caps.mipMapSupport()) {
+ return false;
+ }
+
+ int width = tex->width();
+ int height = tex->height();
+ VkImageBlit blitRegion;
+ memset(&blitRegion, 0, sizeof(VkImageBlit));
+
+ // SkMipMap doesn't include the base level in the level count so we have to add 1
+ uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
+ SkASSERT(levelCount == vkTex->mipLevels());
+
+ // change layout of the layers so we can write to them.
+ vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, false);
+
+ // setup memory barrier
+ SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ nullptr, // pNext
+ VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
+ VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ vkTex->image(), // image
+ {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
+ };
+
+ // Blit the miplevels
+ uint32_t mipLevel = 1;
+ while (mipLevel < levelCount) {
+ int prevWidth = width;
+ int prevHeight = height;
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+
+ imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
+ this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
+
+ blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
+ blitRegion.srcOffsets[0] = { 0, 0, 0 };
+ blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
+ blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
+ blitRegion.dstOffsets[0] = { 0, 0, 0 };
+ blitRegion.dstOffsets[1] = { width, height, 1 };
+ fCurrentCmdBuffer->blitImage(this,
+ vkTex->resource(),
+ vkTex->image(),
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ vkTex->resource(),
+ vkTex->image(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &blitRegion,
+ VK_FILTER_LINEAR);
+ ++mipLevel;
+ }
+ if (levelCount > 1) {
+ // This barrier logically is not needed, but it changes the final level to the same layout
+ // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
+ // layouts and future layout changes easier. The alternative here would be to track layout
+ // and memory accesses per layer which doesn't seem work it.
+ imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
+ this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
+ vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(
+ const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
+ SkASSERT(numStencilSamples == rt->numSamples());
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numSamples();
+
+ const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferredStencilFormat();
+
+ GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
+ width,
+ height,
+ samples,
+ sFmt));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool copy_src_data(GrVkGpu* gpu, const GrVkAlloc& alloc, VkFormat vkFormat,
+ const SkTArray<size_t>& individualMipOffsets,
+ const SkPixmap srcData[], int numMipLevels) {
+ SkASSERT(srcData && numMipLevels);
+ SkASSERT(!GrVkFormatIsCompressed(vkFormat));
+ SkASSERT(individualMipOffsets.count() == numMipLevels);
+
+ char* mapPtr = (char*) GrVkMemory::MapAlloc(gpu, alloc);
+ if (!mapPtr) {
+ return false;
+ }
+ size_t bytesPerPixel = gpu->vkCaps().bytesPerPixel(vkFormat);
+
+ for (int level = 0; level < numMipLevels; ++level) {
+ const size_t trimRB = srcData[level].width() * bytesPerPixel;
+ SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= alloc.fSize);
+
+ SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
+ srcData[level].addr(), srcData[level].rowBytes(),
+ trimRB, srcData[level].height());
+ }
+
+ GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, alloc.fSize);
+ GrVkMemory::UnmapAlloc(gpu, alloc);
+ return true;
+}
+
+static void set_image_layout(const GrVkInterface* vkInterface, VkCommandBuffer cmdBuffer,
+ GrVkImageInfo* info, VkImageLayout newLayout, uint32_t mipLevels,
+ VkAccessFlags dstAccessMask, VkPipelineStageFlagBits dstStageMask) {
+ VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(info->fImageLayout);
+ VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(
+ info->fImageLayout);
+
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = srcAccessMask;
+ barrier.dstAccessMask = dstAccessMask;
+ barrier.oldLayout = info->fImageLayout;
+ barrier.newLayout = newLayout;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = info->fImage;
+ barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
+ GR_VK_CALL(vkInterface, CmdPipelineBarrier(
+ cmdBuffer,
+ srcStageMask,
+ dstStageMask,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+ info->fImageLayout = newLayout;
+}
+
+bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat, int w, int h, bool texturable,
+ bool renderable, GrMipMapped mipMapped,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrVkImageInfo* info,
+ GrProtected isProtected) {
+ SkASSERT(texturable || renderable);
+ if (!texturable) {
+ SkASSERT(GrMipMapped::kNo == mipMapped);
+ SkASSERT(!srcData && !numMipLevels);
+ }
+
+ // Compressed formats go through onCreateCompressedBackendTexture
+ SkASSERT(!GrVkFormatIsCompressed(vkFormat));
+
+ if (fProtectedContext != isProtected) {
+ return false;
+ }
+
+ if (texturable && !fVkCaps->isVkFormatTexturable(vkFormat)) {
+ return false;
+ }
+
+ if (renderable && !fVkCaps->isFormatRenderable(vkFormat, 1)) {
+ return false;
+ }
+
+ VkImageUsageFlags usageFlags = 0;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (texturable) {
+ usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+ if (renderable) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // Figure out the number of mip levels.
+ uint32_t mipLevelCount = 1;
+ if (srcData) {
+ SkASSERT(numMipLevels > 0);
+ mipLevelCount = numMipLevels;
+ } else if (GrMipMapped::kYes == mipMapped) {
+ mipLevelCount = SkMipMap::ComputeLevelCount(w, h) + 1;
+ }
+
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = vkFormat;
+ imageDesc.fWidth = w;
+ imageDesc.fHeight = h;
+ imageDesc.fLevels = mipLevelCount;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ imageDesc.fIsProtected = fProtectedContext;
+
+ if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
+ SkDebugf("Failed to init image info\n");
+ return false;
+ }
+
+ if (!srcData && !color) {
+ return true;
+ }
+
+ // We need to declare these early so that we can delete them at the end outside of
+ // the if block.
+ GrVkAlloc bufferAlloc;
+ VkBuffer buffer = VK_NULL_HANDLE;
+
+ VkResult err;
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ nullptr, // pNext
+ fCmdPool->vkCommandPool(), // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
+ if (err) {
+ GrVkImage::DestroyImageInfo(this, info);
+ return false;
+ }
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
+ SkASSERT(!err);
+
+ // Set image layout and add barrier
+ set_image_layout(this->vkInterface(), cmdBuffer, info, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ mipLevelCount, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
+
+ if (srcData) {
+ size_t bytesPerPixel = fVkCaps->bytesPerPixel(vkFormat);
+ SkASSERT(w && h);
+
+ SkTArray<size_t> individualMipOffsets(mipLevelCount);
+
+ size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel, w, h,
+ &individualMipOffsets,
+ mipLevelCount);
+
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = fProtectedContext == GrProtected::kYes ? VK_BUFFER_CREATE_PROTECTED_BIT : 0;
+ bufInfo.size = combinedBufferSize;
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+ err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
+
+ if (err) {
+ GrVkImage::DestroyImageInfo(this, info);
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+ return false;
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type, true,
+ &bufferAlloc)) {
+ GrVkImage::DestroyImageInfo(this, info);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+ return false;
+ }
+
+ bool result = copy_src_data(this, bufferAlloc, vkFormat, individualMipOffsets,
+ srcData, numMipLevels);
+ if (!result) {
+ GrVkImage::DestroyImageInfo(this, info);
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(EndCommandBuffer(cmdBuffer));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+ return false;
+ }
+
+ SkTArray<VkBufferImageCopy> regions(mipLevelCount);
+
+ int currentWidth = w;
+ int currentHeight = h;
+ for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
+ // Submit copy command
+ VkBufferImageCopy& region = regions.push_back();
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, currentMipLevel, 0, 1};
+ region.imageOffset = {0, 0, 0};
+ region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
+ currentWidth = SkTMax(1, currentWidth / 2);
+ currentHeight = SkTMax(1, currentHeight / 2);
+ }
+
+ VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, info->fImage, info->fImageLayout,
+ regions.count(), regions.begin()));
+ } else {
+ SkASSERT(color);
+ VkClearColorValue vkColor;
+ // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
+ // uint32 union members in those cases.
+ vkColor.float32[0] = color->fR;
+ vkColor.float32[1] = color->fG;
+ vkColor.float32[2] = color->fB;
+ vkColor.float32[3] = color->fA;
+ VkImageSubresourceRange range;
+ range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ range.baseArrayLayer = 0;
+ range.baseMipLevel = 0;
+ range.layerCount = 1;
+ range.levelCount = mipLevelCount;
+ VK_CALL(CmdClearColorImage(cmdBuffer, info->fImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ &vkColor, 1, &range));
+ }
+
+ if (!srcData && renderable) {
+ SkASSERT(color);
+
+ // Change image layout to color-attachment-optimal since if we use this texture as a
+ // borrowed texture within Ganesh we are probably going to render to it
+ set_image_layout(this->vkInterface(), cmdBuffer, info,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, mipLevelCount,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
+ } else if (texturable) {
+ // Change image layout to shader read since if we use this texture as a borrowed
+ // texture within Ganesh we require that its layout be set to that
+ set_image_layout(this->vkInterface(), cmdBuffer, info,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, mipLevelCount,
+ VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
+ }
+
+ // End CommandBuffer
+ err = VK_CALL(EndCommandBuffer(cmdBuffer));
+ SkASSERT(!err);
+
+ // Create Fence for queue
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ fenceInfo.pNext = nullptr;
+ fenceInfo.flags = 0;
+ VkFence fence = VK_NULL_HANDLE;
+
+ err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
+ SkASSERT(!err);
+
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (fProtectedContext == GrProtected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = fProtectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.pWaitDstStageMask = 0;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &cmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
+ SkASSERT(!err);
+
+ err = VK_CALL(WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ GrVkImage::DestroyImageInfo(this, info);
+ if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ }
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+ VK_CALL(DestroyFence(this->device(), fence, nullptr));
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SK_ABORT("failing");
+ }
+ SkASSERT(!err);
+
+ // Clean up transfer resources
+ if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ }
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+ VK_CALL(DestroyFence(this->device(), fence, nullptr));
+
+ return true;
+}
+
+GrBackendTexture GrVkGpu::onCreateBackendTexture(int w, int h,
+ const GrBackendFormat& format,
+ GrMipMapped mipMapped,
+ GrRenderable renderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected isProtected) {
+ this->handleDirtyContext();
+
+ const GrVkCaps& caps = this->vkCaps();
+
+ // GrGpu::createBackendTexture should've ensured these conditions
+ SkASSERT(w >= 1 && w <= caps.maxTextureSize() && h >= 1 && h <= caps.maxTextureSize());
+ SkASSERT(GrGpu::MipMapsAreCorrect(w, h, mipMapped, srcData, numMipLevels));
+ SkASSERT(mipMapped == GrMipMapped::kNo || caps.mipMapSupport());
+
+ if (fProtectedContext != isProtected) {
+ return GrBackendTexture();
+ }
+
+ VkFormat vkFormat;
+ if (!format.asVkFormat(&vkFormat)) {
+ SkDebugf("Could net get vkformat\n");
+ return GrBackendTexture();
+ }
+
+ // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
+ if (!caps.isVkFormatTexturable(vkFormat)) {
+ SkDebugf("Config is not texturable\n");
+ return GrBackendTexture();
+ }
+
+ if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
+ SkDebugf("Can't create BackendTexture that requires Ycbcb sampler.\n");
+ return GrBackendTexture();
+ }
+
+ GrVkImageInfo info;
+ if (!this->createVkImageForBackendSurface(vkFormat, w, h, true,
+ GrRenderable::kYes == renderable, mipMapped,
+ srcData, numMipLevels, color, &info, isProtected)) {
+ SkDebugf("Failed to create testing only image\n");
+ return GrBackendTexture();
+ }
+
+ return GrBackendTexture(w, h, info);
+}
+
+void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
+ SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
+
+ GrVkImageInfo info;
+ if (tex.getVkImageInfo(&info)) {
+ GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
+ }
+}
+
+#if GR_TEST_UTILS
+bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
+ SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
+
+ GrVkImageInfo backend;
+ if (!tex.getVkImageInfo(&backend)) {
+ return false;
+ }
+
+ if (backend.fImage && backend.fAlloc.fMemory) {
+ VkMemoryRequirements req;
+ memset(&req, 0, sizeof(req));
+ GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
+ backend.fImage,
+ &req));
+ // TODO: find a better check
+ // This will probably fail with a different driver
+ return (req.size > 0) && (req.size <= 8192 * 8192);
+ }
+
+ return false;
+}
+
+GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(int w, int h, GrColorType ct) {
+ this->handleDirtyContext();
+
+ if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
+ return GrBackendRenderTarget();
+ }
+
+ VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
+
+ GrVkImageInfo info;
+ if (!this->createVkImageForBackendSurface(vkFormat, w, h, false, true, GrMipMapped::kNo,
+ nullptr, 0, &SkColors::kTransparent, &info,
+ GrProtected::kNo)) {
+ return {};
+ }
+
+ return GrBackendRenderTarget(w, h, 1, 0, info);
+}
+
+void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
+ SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
+
+ GrVkImageInfo info;
+ if (rt.getVkImageInfo(&info)) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
+ }
+}
+
+void GrVkGpu::testingOnly_flushGpuAndSync() {
+ this->submitCommandBuffer(kForce_SyncQueue);
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::addBufferMemoryBarrier(const GrVkResource* resource,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ SkASSERT(resource);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ resource,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kBufferMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ SkASSERT(resource);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ resource,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kImageMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
+ SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
+ const GrPrepareForExternalIORequests& externalRequests) {
+ SkASSERT(n >= 0);
+ SkASSERT(!n || proxies);
+ // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
+ // not effect what we do here.
+ if (n && access == SkSurface::BackendSurfaceAccess::kPresent) {
+ GrVkImage* image;
+ for (int i = 0; i < n; ++i) {
+ SkASSERT(proxies[i]->isInstantiated());
+ if (GrTexture* tex = proxies[i]->peekTexture()) {
+ image = static_cast<GrVkTexture*>(tex);
+ } else {
+ GrRenderTarget* rt = proxies[i]->peekRenderTarget();
+ SkASSERT(rt);
+ image = static_cast<GrVkRenderTarget*>(rt);
+ }
+ image->prepareForPresent(this);
+ }
+ }
+
+ // Handle requests for preparing for external IO
+ for (int i = 0; i < externalRequests.fNumImages; ++i) {
+ SkImage* image = externalRequests.fImages[i];
+ if (!image->isTextureBacked()) {
+ continue;
+ }
+ SkImage_GpuBase* gpuImage = static_cast<SkImage_GpuBase*>(as_IB(image));
+ sk_sp<GrTextureProxy> proxy = gpuImage->asTextureProxyRef(this->getContext());
+ SkASSERT(proxy);
+
+ if (!proxy->isInstantiated()) {
+ auto resourceProvider = this->getContext()->priv().resourceProvider();
+ if (!proxy->instantiate(resourceProvider)) {
+ continue;
+ }
+ }
+
+ GrTexture* tex = proxy->peekTexture();
+ if (!tex) {
+ continue;
+ }
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
+ vkTex->prepareForExternal(this);
+ }
+ for (int i = 0; i < externalRequests.fNumSurfaces; ++i) {
+ SkSurface* surface = externalRequests.fSurfaces[i];
+ if (!surface->getCanvas()->getGrContext()) {
+ continue;
+ }
+ SkSurface_Gpu* gpuSurface = static_cast<SkSurface_Gpu*>(surface);
+ auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext();
+ sk_sp<GrRenderTargetProxy> proxy = rtc->asRenderTargetProxyRef();
+ if (!proxy->isInstantiated()) {
+ auto resourceProvider = this->getContext()->priv().resourceProvider();
+ if (!proxy->instantiate(resourceProvider)) {
+ continue;
+ }
+ }
+
+ GrRenderTarget* rt = proxy->peekRenderTarget();
+ SkASSERT(rt);
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ if (externalRequests.fPrepareSurfaceForPresent &&
+ externalRequests.fPrepareSurfaceForPresent[i]) {
+ vkRT->prepareForPresent(this);
+ } else {
+ vkRT->prepareForExternal(this);
+ }
+ }
+
+ if (info.fFlags & kSyncCpu_GrFlushFlag) {
+ this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc, info.fFinishedContext);
+ } else {
+ this->submitCommandBuffer(kSkip_SyncQueue, info.fFinishedProc, info.fFinishedContext);
+ }
+}
+
+static int get_surface_sample_cnt(GrSurface* surf) {
+ if (const GrRenderTarget* rt = surf->asRenderTarget()) {
+ return rt->numSamples();
+ }
+ return 0;
+}
+
+void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
+ GrVkImage* srcImage, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+#ifdef SK_DEBUG
+ int dstSampleCnt = get_surface_sample_cnt(dst);
+ int srcSampleCnt = get_surface_sample_cnt(src);
+ bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
+ bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
+ VkFormat dstFormat = dstImage->imageFormat();
+ VkFormat srcFormat;
+ SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
+ SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcHasYcbcr));
+#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
+
+ // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
+ // the cache is flushed since it is only being written to.
+ dstImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ srcImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ VkImageCopy copyRegion;
+ memset(&copyRegion, 0, sizeof(VkImageCopy));
+ copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
+ copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
+ copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
+
+ fCurrentCmdBuffer->copyImage(this,
+ srcImage,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &copyRegion);
+
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+}
+
+void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
+ GrVkImage* srcImage, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+#ifdef SK_DEBUG
+ int dstSampleCnt = get_surface_sample_cnt(dst);
+ int srcSampleCnt = get_surface_sample_cnt(src);
+ bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
+ bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
+ VkFormat dstFormat = dstImage->imageFormat();
+ VkFormat srcFormat;
+ SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
+ SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
+ dstHasYcbcr, srcFormat, srcSampleCnt,
+ srcImage->isLinearTiled(), srcHasYcbcr));
+
+#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
+
+ dstImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ srcImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Flip rect if necessary
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
+ srcRect.height());
+
+ VkImageBlit blitRegion;
+ memset(&blitRegion, 0, sizeof(VkImageBlit));
+ blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
+ blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
+ blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
+ blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
+
+ fCurrentCmdBuffer->blitImage(this,
+ *srcImage,
+ *dstImage,
+ 1,
+ &blitRegion,
+ VK_FILTER_NEAREST); // We never scale so any filter works here
+
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+}
+
+void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return;
+ }
+ GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
+ this->resolveImage(dst, srcRT, srcRect, dstPoint);
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ // The rect is already in device space so we pass in kTopLeft so no flip is done.
+ this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
+}
+
+bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+#ifdef SK_DEBUG
+ if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
+ SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
+ }
+ if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
+ SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
+ }
+#endif
+ if (src->isProtected() && !dst->isProtected()) {
+ SkDebugf("Can't copy from protected memory to non-protected");
+ return false;
+ }
+
+ int dstSampleCnt = get_surface_sample_cnt(dst);
+ int srcSampleCnt = get_surface_sample_cnt(src);
+
+ GrVkImage* dstImage;
+ GrVkImage* srcImage;
+ GrRenderTarget* dstRT = dst->asRenderTarget();
+ if (dstRT) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
+ if (vkRT->wrapsSecondaryCommandBuffer()) {
+ return false;
+ }
+ dstImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
+ } else {
+ SkASSERT(dst->asTexture());
+ dstImage = static_cast<GrVkTexture*>(dst->asTexture());
+ }
+ GrRenderTarget* srcRT = src->asRenderTarget();
+ if (srcRT) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
+ srcImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
+ } else {
+ SkASSERT(src->asTexture());
+ srcImage = static_cast<GrVkTexture*>(src->asTexture());
+ }
+
+ VkFormat dstFormat = dstImage->imageFormat();
+ VkFormat srcFormat = srcImage->imageFormat();
+
+ bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
+ bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
+
+ if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcHasYcbcr)) {
+ this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
+ srcFormat, srcSampleCnt, srcHasYcbcr)) {
+ this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
+ return true;
+ }
+
+ if (this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
+ dstHasYcbcr, srcFormat, srcSampleCnt,
+ srcImage->isLinearTiled(), srcHasYcbcr)) {
+ this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) {
+ if (surface->isProtected()) {
+ return false;
+ }
+
+ if (surfaceColorType != dstColorType) {
+ return false;
+ }
+
+ GrVkImage* image = nullptr;
+ GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
+ if (rt) {
+ // Reading from render targets that wrap a secondary command buffer is not allowed since
+ // it would require us to know the VkImage, which we don't have, as well as need us to
+ // stop and start the VkRenderPass which we don't have access to.
+ if (rt->wrapsSecondaryCommandBuffer()) {
+ return false;
+ }
+ image = rt;
+ } else {
+ image = static_cast<GrVkTexture*>(surface->asTexture());
+ }
+
+ if (!image) {
+ return false;
+ }
+
+ // Skia's RGB_888x color type, which we map to the vulkan R8G8B8_UNORM, expects the data to be
+ // 32 bits, but the Vulkan format is only 24. So we first copy the surface into an R8G8B8A8
+ // image and then do the read pixels from that.
+ sk_sp<GrVkTextureRenderTarget> copySurface;
+ if (dstColorType == GrColorType::kRGB_888x && image->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
+ int srcSampleCount = 0;
+ if (rt) {
+ srcSampleCount = rt->numSamples();
+ }
+ bool srcHasYcbcr = image->ycbcrConversionInfo().isValid();
+ if (!this->vkCaps().canCopyAsBlit(VK_FORMAT_R8G8B8A8_UNORM, 1, false, false,
+ image->imageFormat(), srcSampleCount,
+ image->isLinearTiled(), srcHasYcbcr)) {
+ return false;
+ }
+
+ // Make a new surface that is RGBA to copy the RGB surface into.
+ GrSurfaceDesc surfDesc;
+ surfDesc.fWidth = width;
+ surfDesc.fHeight = height;
+ surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ copySurface = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
+ this, SkBudgeted::kYes, surfDesc, 1, imageDesc, GrMipMapsStatus::kNotAllocated);
+ if (!copySurface) {
+ return false;
+ }
+
+ SkIRect srcRect = SkIRect::MakeXYWH(left, top, width, height);
+ SkAssertResult(this->copySurface(copySurface.get(), surface, srcRect, SkIPoint::Make(0,0)));
+
+ top = 0;
+ left = 0;
+ dstColorType = GrColorType::kRGBA_8888;
+ image = copySurface.get();
+ }
+
+ // Change layout of our target so it can be used as copy
+ image->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
+ size_t tightRowBytes = bpp * width;
+
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+
+ bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin();
+ if (copyFromOrigin) {
+ region.imageOffset = { 0, 0, 0 };
+ region.imageExtent = { (uint32_t)(left + width), (uint32_t)(top + height), 1 };
+ } else {
+ VkOffset3D offset = { left, top, 0 };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+ }
+
+ size_t transBufferRowBytes = bpp * region.imageExtent.width;
+ size_t imageRows = region.imageExtent.height;
+ auto transferBuffer = sk_sp<GrVkTransferBuffer>(
+ static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
+ GrGpuBufferType::kXferGpuToCpu,
+ kStream_GrAccessPattern)
+ .release()));
+
+ // Copy the image to a buffer so we can map it to cpu memory
+ region.bufferOffset = transferBuffer->offset();
+ region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
+ region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+
+ fCurrentCmdBuffer->copyImageToBuffer(this,
+ image,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ transferBuffer.get(),
+ 1,
+ &region);
+
+ // make sure the copy to buffer has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+
+ // We need to submit the current command buffer to the Queue and make sure it finishes before
+ // we can copy the data out of the buffer.
+ this->submitCommandBuffer(kForce_SyncQueue);
+ void* mappedMemory = transferBuffer->map();
+ const GrVkAlloc& transAlloc = transferBuffer->alloc();
+ GrVkMemory::InvalidateMappedAlloc(this, transAlloc, 0, transAlloc.fSize);
+
+ if (copyFromOrigin) {
+ uint32_t skipRows = region.imageExtent.height - height;
+ mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left;
+ }
+
+ SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
+
+ transferBuffer->unmap();
+ return true;
+}
+
+// The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
+// of the granularity. The width must also be a multiple of the granularity or eaqual to the width
+// the the entire attachment. Similar requirements for the y and height components.
+void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
+ const VkExtent2D& granularity, int maxWidth, int maxHeight) {
+ // Adjust Width
+ if ((0 != granularity.width && 1 != granularity.width)) {
+ // Start with the right side of rect so we know if we end up going pass the maxWidth.
+ int rightAdj = srcBounds.fRight % granularity.width;
+ if (rightAdj != 0) {
+ rightAdj = granularity.width - rightAdj;
+ }
+ dstBounds->fRight = srcBounds.fRight + rightAdj;
+ if (dstBounds->fRight > maxWidth) {
+ dstBounds->fRight = maxWidth;
+ dstBounds->fLeft = 0;
+ } else {
+ dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
+ }
+ } else {
+ dstBounds->fLeft = srcBounds.fLeft;
+ dstBounds->fRight = srcBounds.fRight;
+ }
+
+ // Adjust height
+ if ((0 != granularity.height && 1 != granularity.height)) {
+ // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
+ int bottomAdj = srcBounds.fBottom % granularity.height;
+ if (bottomAdj != 0) {
+ bottomAdj = granularity.height - bottomAdj;
+ }
+ dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
+ if (dstBounds->fBottom > maxHeight) {
+ dstBounds->fBottom = maxHeight;
+ dstBounds->fTop = 0;
+ } else {
+ dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
+ }
+ } else {
+ dstBounds->fTop = srcBounds.fTop;
+ dstBounds->fBottom = srcBounds.fBottom;
+ }
+}
+
+void GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
+ const VkClearValue* colorClear,
+ GrVkRenderTarget* target, GrSurfaceOrigin origin,
+ const SkIRect& bounds, bool forSecondaryCB) {
+ SkASSERT (!target->wrapsSecondaryCommandBuffer());
+ auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, target->height(), bounds);
+
+ // The bounds we use for the render pass should be of the granularity supported
+ // by the device.
+ const VkExtent2D& granularity = renderPass->granularity();
+ SkIRect adjustedBounds;
+ if ((0 != granularity.width && 1 != granularity.width) ||
+ (0 != granularity.height && 1 != granularity.height)) {
+ adjust_bounds_to_granularity(&adjustedBounds, nativeBounds.asSkIRect(), granularity,
+ target->width(), target->height());
+ } else {
+ adjustedBounds = nativeBounds.asSkIRect();
+ }
+
+#ifdef SK_DEBUG
+ uint32_t index;
+ bool result = renderPass->colorAttachmentIndex(&index);
+ SkASSERT(result && 0 == index);
+ result = renderPass->stencilAttachmentIndex(&index);
+ if (result) {
+ SkASSERT(1 == index);
+ }
+#endif
+ VkClearValue clears[2];
+ clears[0].color = colorClear->color;
+ clears[1].depthStencil.depth = 0.0f;
+ clears[1].depthStencil.stencil = 0;
+
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, clears, *target, adjustedBounds,
+ forSecondaryCB);
+}
+
+void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
+ const SkIRect& bounds) {
+ fCurrentCmdBuffer->endRenderPass(this);
+ this->didWriteToSurface(target, origin, &bounds);
+}
+
+void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
+ fCurrentCmdBuffer->executeCommands(this, std::move(buffer));
+}
+
+void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
+ SkASSERT(fCachedOpsRenderPass.get() == renderPass);
+
+ fCachedOpsRenderPass->submit();
+ fCachedOpsRenderPass->reset();
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
+ VkFenceCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ VkFence fence = VK_NULL_HANDLE;
+
+ VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
+ VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
+
+ GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
+ return (GrFence)fence;
+}
+
+bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
+ SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
+
+ VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
+ return (VK_SUCCESS == result);
+}
+
+void GrVkGpu::deleteFence(GrFence fence) const {
+ VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
+}
+
+sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
+ return GrVkSemaphore::Make(this, isOwned);
+}
+
+sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) {
+ return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
+}
+
+void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
+ GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
+
+ GrVkSemaphore::Resource* resource = vkSem->getResource();
+ if (resource->shouldSignal()) {
+ resource->ref();
+ fSemaphoresToSignal.push_back(resource);
+ }
+}
+
+void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
+ GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
+
+ GrVkSemaphore::Resource* resource = vkSem->getResource();
+ if (resource->shouldWait()) {
+ resource->ref();
+ fSemaphoresToWaitOn.push_back(resource);
+ }
+}
+
+sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
+ SkASSERT(texture);
+ GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
+ vkTexture->setImageLayout(this,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ false);
+ this->submitCommandBuffer(kSkip_SyncQueue);
+
+ // The image layout change serves as a barrier, so no semaphore is needed.
+ // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
+ // thread safe so that only the first thread that tries to use the semaphore actually submits
+ // it. This additionally would also require thread safety in command buffer submissions to
+ // queues in general.
+ return nullptr;
+}
+
+void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
+ fDrawables.emplace_back(std::move(drawable));
+}
+
+uint32_t GrVkGpu::getExtraSamplerKeyForProgram(const GrSamplerState& samplerState,
+ const GrBackendFormat& format) {
+ const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
+ SkASSERT(ycbcrInfo);
+ if (!ycbcrInfo->isValid()) {
+ return 0;
+ }
+
+ const GrVkSampler* sampler = this->resourceProvider().findOrCreateCompatibleSampler(
+ samplerState, *ycbcrInfo);
+
+ uint32_t result = sampler->uniqueID();
+
+ sampler->unref(this);
+
+ return result;
+}
+
+void GrVkGpu::storeVkPipelineCacheData() {
+ if (this->getContext()->priv().getPersistentCache()) {
+ this->resourceProvider().storePipelineCacheData();
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpu.h b/gfx/skia/skia/src/gpu/vk/GrVkGpu.h
new file mode 100644
index 0000000000..cdbb31c6c2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpu.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkGpu_DEFINED
+#define GrVkGpu_DEFINED
+
+#include "include/gpu/vk/GrVkBackendContext.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/vk/GrVkCaps.h"
+#include "src/gpu/vk/GrVkIndexBuffer.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+#include "src/gpu/vk/GrVkSemaphore.h"
+#include "src/gpu/vk/GrVkUtil.h"
+#include "src/gpu/vk/GrVkVertexBuffer.h"
+
+class GrPipeline;
+
+class GrVkBufferImpl;
+class GrVkCommandPool;
+class GrVkMemoryAllocator;
+class GrVkPipeline;
+class GrVkPipelineState;
+class GrVkPrimaryCommandBuffer;
+class GrVkOpsRenderPass;
+class GrVkRenderPass;
+class GrVkSecondaryCommandBuffer;
+class GrVkTexture;
+struct GrVkInterface;
+
+namespace SkSL {
+ class Compiler;
+}
+
+class GrVkGpu : public GrGpu {
+public:
+ static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*);
+
+ ~GrVkGpu() override;
+
+ void disconnect(DisconnectType) override;
+
+ const GrVkInterface* vkInterface() const { return fInterface.get(); }
+ const GrVkCaps& vkCaps() const { return *fVkCaps; }
+
+ GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
+
+ VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; }
+ VkDevice device() const { return fDevice; }
+ VkQueue queue() const { return fQueue; }
+ uint32_t queueIndex() const { return fQueueIndex; }
+ GrVkCommandPool* cmdPool() const { return fCmdPool; }
+ const VkPhysicalDeviceProperties& physicalDeviceProperties() const {
+ return fPhysDevProps;
+ }
+ const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
+ return fPhysDevMemProps;
+ }
+ bool protectedContext() const { return fProtectedContext == GrProtected::kYes; }
+
+ GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
+
+ GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
+
+ enum SyncQueue {
+ kForce_SyncQueue,
+ kSkip_SyncQueue
+ };
+
+ void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) override {
+ SkASSERT(!this->caps()->sampleLocationsSupport());
+ SK_ABORT("Sample locations not yet implemented for Vulkan.");
+ }
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ void deleteBackendTexture(const GrBackendTexture&) override;
+#if GR_TEST_UTILS
+ bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
+
+ GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
+ void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
+
+ void testingOnly_flushGpuAndSync() override;
+
+ void resetShaderCacheForTesting() const override {
+ fResourceProvider.resetShaderCacheForTesting();
+ }
+#endif
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(
+ const GrRenderTarget*, int width, int height, int numStencilSamples) override;
+
+ GrOpsRenderPass* getOpsRenderPass(
+ GrRenderTarget*, GrSurfaceOrigin, const SkIRect&,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) override;
+
+ void addBufferMemoryBarrier(const GrVkResource*,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const;
+ void addImageMemoryBarrier(const GrVkResource*,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const;
+
+ SkSL::Compiler* shaderCompiler() const {
+ return fCompiler;
+ }
+
+ bool onRegenerateMipMapLevels(GrTexture* tex) override;
+
+ void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
+ GrSurfaceOrigin resolveOrigin, ForExternalIO) override;
+
+ void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>);
+
+ void submit(GrOpsRenderPass*) override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() override;
+ bool waitFence(GrFence, uint64_t timeout) override;
+ void deleteFence(GrFence) const override;
+
+ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
+ sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
+ GrResourceProvider::SemaphoreWrapType wrapType,
+ GrWrapOwnership ownership) override;
+ void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+
+ // These match the definitions in SkDrawable, from whence they came
+ typedef void* SubmitContext;
+ typedef void (*SubmitProc)(SubmitContext submitContext);
+
+ // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary
+ // command buffer to the gpu.
+ void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
+
+ void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); }
+
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
+
+ void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
+ VkDeviceSize dstOffset, VkDeviceSize size);
+ bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
+
+ uint32_t getExtraSamplerKeyForProgram(const GrSamplerState&,
+ const GrBackendFormat& format) override;
+
+ enum PersistentCacheKeyType : uint32_t {
+ kShader_PersistentCacheKeyType = 0,
+ kPipelineCache_PersistentCacheKeyType = 1,
+ };
+
+ void storeVkPipelineCacheData() override;
+
+ void beginRenderPass(const GrVkRenderPass*,
+ const VkClearValue* colorClear,
+ GrVkRenderTarget*, GrSurfaceOrigin,
+ const SkIRect& bounds, bool forSecondaryCB);
+ void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds);
+
+private:
+ GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext&,
+ sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion);
+
+ void onResetContext(uint32_t resetBits) override {}
+
+ void destroyResources();
+
+ GrBackendTexture onCreateBackendTexture(int w, int h, const GrBackendFormat&,
+ GrMipMapped, GrRenderable,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrProtected) override;
+ sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&,
+ const GrBackendFormat& format,
+ GrRenderable,
+ int renderTargetSampleCnt,
+ SkBudgeted,
+ GrProtected,
+ int mipLevelCount,
+ uint32_t levelClearMask) override;
+ sk_sp<GrTexture> onCreateCompressedTexture(int width, int height, const GrBackendFormat&,
+ SkImage::CompressionType, SkBudgeted,
+ const void* data) override;
+
+ sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrColorType, GrWrapOwnership,
+ GrWrapCacheable, GrIOType) override;
+ sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
+ int sampleCnt,
+ GrColorType colorType,
+ GrWrapOwnership,
+ GrWrapCacheable) override;
+ sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
+ GrColorType) override;
+
+ sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
+ int sampleCnt, GrColorType) override;
+
+ sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
+ const GrVkDrawableInfo&) override;
+
+ sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
+ const void* data) override;
+
+ bool onReadPixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType srcColorType,
+ const GrMipLevel texels[], int mipLevelCount,
+ bool prepForTexSampling) override;
+
+ bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
+ GrColorType textureColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
+ bool onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
+ GrColorType surfaceColorType, GrColorType bufferColorType,
+ GrGpuBuffer* transferBuffer, size_t offset) override;
+
+ bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ void onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
+ const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
+
+ // Ends and submits the current command buffer to the queue and then creates a new command
+ // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
+ // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
+ // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
+ // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
+ // wait semaphores to the submission of this command buffer.
+ void submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ void copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
+ GrVkImage* srcImage, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage,
+ const SkIRect& srcRect, const SkIPoint& dstPoint);
+
+ void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ // helpers for onCreateTexture and writeTexturePixels
+ bool uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType colorType, const void* data, size_t rowBytes);
+ bool uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
+ GrColorType colorType, const GrMipLevel texels[], int mipLevelCount);
+ bool uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
+ SkImage::CompressionType, const void* data);
+ void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ bool createVkImageForBackendSurface(VkFormat vkFormat, int w, int h, bool texturable,
+ bool renderable, GrMipMapped mipMapped,
+ const SkPixmap srcData[], int numMipLevels,
+ const SkColor4f* color, GrVkImageInfo* info,
+ GrProtected isProtected);
+
+ sk_sp<const GrVkInterface> fInterface;
+ sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
+ sk_sp<GrVkCaps> fVkCaps;
+
+ VkInstance fInstance;
+ VkPhysicalDevice fPhysicalDevice;
+ VkDevice fDevice;
+ VkQueue fQueue; // Must be Graphics queue
+ uint32_t fQueueIndex;
+
+ // Created by GrVkGpu
+ GrVkResourceProvider fResourceProvider;
+
+ GrVkCommandPool* fCmdPool;
+
+ // just a raw pointer; object's lifespan is managed by fCmdPool
+ GrVkPrimaryCommandBuffer* fCurrentCmdBuffer;
+
+ SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
+ SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal;
+
+ SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables;
+
+ VkPhysicalDeviceProperties fPhysDevProps;
+ VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
+
+ // compiler used for compiling sksl into spirv. We only want to create the compiler once since
+ // there is significant overhead to the first compile of any compiler.
+ SkSL::Compiler* fCompiler;
+
+ // We need a bool to track whether or not we've already disconnected all the gpu resources from
+ // vulkan context.
+ bool fDisconnected;
+
+ GrProtected fProtectedContext;
+
+ std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp b/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp
new file mode 100644
index 0000000000..2b05d00e41
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImage.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkTexture.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
+ // There are no writes that need to be made available
+ flags = 0;
+ }
+ return flags;
+}
+
+VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_S8_UINT:
+ return VK_IMAGE_ASPECT_STENCIL_BIT;
+ case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+ default:
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+}
+
+void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion, bool releaseFamilyQueue) {
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
+ VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
+ VkImageLayout currentLayout = this->currentLayout();
+
+ if (releaseFamilyQueue && fInfo.fCurrentQueueFamily == fInitialQueueFamily &&
+ newLayout == currentLayout) {
+ // We never transfered the image to this queue and we are releasing it so don't do anything.
+ return;
+ }
+
+ // If the old and new layout are the same and the layout is a read only layout, there is no need
+ // to put in a barrier unless we also need to switch queues.
+ if (newLayout == currentLayout && !releaseFamilyQueue &&
+ (fInfo.fCurrentQueueFamily == VK_QUEUE_FAMILY_IGNORED ||
+ fInfo.fCurrentQueueFamily == gpu->queueIndex()) &&
+ (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
+ return;
+ }
+
+ VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
+ VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
+
+ VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
+
+ uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
+ gpu->queueIndex() != fInfo.fCurrentQueueFamily) {
+ // The image still is owned by its original queue family and we need to transfer it into
+ // ours.
+ SkASSERT(!releaseFamilyQueue);
+ SkASSERT(fInfo.fCurrentQueueFamily == fInitialQueueFamily);
+
+ srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
+ dstQueueFamilyIndex = gpu->queueIndex();
+ fInfo.fCurrentQueueFamily = gpu->queueIndex();
+ } else if (releaseFamilyQueue) {
+ // We are releasing the image so we must transfer the image back to its original queue
+ // family.
+ srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
+ dstQueueFamilyIndex = fInitialQueueFamily;
+ fInfo.fCurrentQueueFamily = fInitialQueueFamily;
+ }
+
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ nullptr, // pNext
+ srcAccessMask, // srcAccessMask
+ dstAccessMask, // dstAccessMask
+ currentLayout, // oldLayout
+ newLayout, // newLayout
+ srcQueueFamilyIndex, // srcQueueFamilyIndex
+ dstQueueFamilyIndex, // dstQueueFamilyIndex
+ fInfo.fImage, // image
+ { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
+ };
+
+ gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
+ &imageMemoryBarrier);
+
+ this->updateImageLayout(newLayout);
+}
+
+bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
+ if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
+ return false;
+ }
+ if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
+ return false;
+ }
+ VkImage image = VK_NULL_HANDLE;
+ GrVkAlloc alloc;
+
+ bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
+ VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
+ return false;
+ }
+
+ SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
+ VK_SAMPLE_COUNT_1_BIT == vkSamples);
+
+ VkImageCreateFlags createflags = 0;
+ if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
+ createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
+ }
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ nullptr, // pNext
+ createflags, // VkImageCreateFlags
+ imageDesc.fImageType, // VkImageType
+ imageDesc.fFormat, // VkFormat
+ { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
+ imageDesc.fLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageDesc.fImageTiling, // VkImageTiling
+ imageDesc.fUsageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
+ &image));
+
+ if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
+ return false;
+ }
+
+ info->fImage = image;
+ info->fAlloc = alloc;
+ info->fImageTiling = imageDesc.fImageTiling;
+ info->fImageLayout = initialLayout;
+ info->fFormat = imageDesc.fFormat;
+ info->fLevelCount = imageDesc.fLevels;
+ info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
+ info->fProtected =
+ (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
+ return true;
+}
+
+void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
+ bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
+ GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
+}
+
+GrVkImage::~GrVkImage() {
+ // should have been released or abandoned first
+ SkASSERT(!fResource);
+}
+
+void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
+ VkImageLayout layout = this->currentLayout();
+ if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
+ fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
+ if (gpu->vkCaps().supportsSwapchain()) {
+ layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ }
+ }
+ this->setImageLayout(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, true);
+}
+
+void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
+ this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
+ true);
+}
+
+void GrVkImage::releaseImage(GrVkGpu* gpu) {
+ if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) {
+ // The Vulkan spec is vague on what to put for the dstStageMask here. The spec for image
+ // memory barrier says the dstStageMask must not be zero. However, in the spec when it talks
+ // about family queue transfers it says the dstStageMask is ignored and should be set to
+ // zero. Assuming it really is ignored we set it to VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT here
+ // since it makes the Vulkan validation layers happy.
+ this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ false, true);
+ }
+ if (fResource) {
+ fResource->removeOwningTexture();
+ fResource->unref(gpu);
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::abandonImage() {
+ if (fResource) {
+ fResource->removeOwningTexture();
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
+ SkASSERT(fResource);
+ // Forward the release proc on to GrVkImage::Resource
+ fResource->setRelease(std::move(releaseHelper));
+}
+
+void GrVkImage::Resource::freeGPUData(GrVkGpu* gpu) const {
+ this->invokeReleaseProc();
+ VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
+ bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
+ GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
+}
+
+void GrVkImage::Resource::addIdleProc(GrVkTexture* owningTexture,
+ sk_sp<GrRefCntedCallback> idleProc) const {
+ SkASSERT(!fOwningTexture || fOwningTexture == owningTexture);
+ fOwningTexture = owningTexture;
+ fIdleProcs.push_back(std::move(idleProc));
+}
+
+int GrVkImage::Resource::idleProcCnt() const { return fIdleProcs.count(); }
+
+sk_sp<GrRefCntedCallback> GrVkImage::Resource::idleProc(int i) const { return fIdleProcs[i]; }
+
+void GrVkImage::Resource::resetIdleProcs() const { fIdleProcs.reset(); }
+
+void GrVkImage::Resource::removeOwningTexture() const { fOwningTexture = nullptr; }
+
+void GrVkImage::Resource::notifyAddedToCommandBuffer() const { ++fNumCommandBufferOwners; }
+
+void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
+ SkASSERT(fNumCommandBufferOwners);
+ if (--fNumCommandBufferOwners || !fIdleProcs.count()) {
+ return;
+ }
+ if (fOwningTexture) {
+ if (fOwningTexture->resourcePriv().hasRef()) {
+ // Wait for the texture to become idle in the cache to call the procs.
+ return;
+ }
+ fOwningTexture->callIdleProcsOnBehalfOfResource();
+ } else {
+ fIdleProcs.reset();
+ }
+}
+
+void GrVkImage::BorrowedResource::freeGPUData(GrVkGpu* gpu) const {
+ this->invokeReleaseProc();
+}
+
+void GrVkImage::BorrowedResource::abandonGPUData() const {
+ this->invokeReleaseProc();
+}
+
+#if GR_TEST_UTILS
+void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
+ fInfo.fCurrentQueueFamily = gpu->queueIndex();
+}
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImage.h b/gfx/skia/skia/src/gpu/vk/GrVkImage.h
new file mode 100644
index 0000000000..8f448639c6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImage.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkImage_DEFINED
+#define GrVkImage_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrTexture.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/vk/GrVkImageLayout.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkGpu;
+class GrVkTexture;
+
+class GrVkImage : SkNoncopyable {
+private:
+ class Resource;
+
+public:
+ GrVkImage(const GrVkImageInfo& info, sk_sp<GrVkImageLayout> layout,
+ GrBackendObjectOwnership ownership, bool forSecondaryCB = false)
+ : fInfo(info)
+ , fInitialQueueFamily(info.fCurrentQueueFamily)
+ , fLayout(std::move(layout))
+ , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
+ SkASSERT(fLayout->getImageLayout() == fInfo.fImageLayout);
+ if (forSecondaryCB) {
+ fResource = nullptr;
+ } else if (fIsBorrowed) {
+ fResource = new BorrowedResource(info.fImage, info.fAlloc, info.fImageTiling);
+ } else {
+ SkASSERT(VK_NULL_HANDLE != info.fAlloc.fMemory);
+ fResource = new Resource(info.fImage, info.fAlloc, info.fImageTiling);
+ }
+ }
+ virtual ~GrVkImage();
+
+ VkImage image() const {
+ // Should only be called when we have a real fResource object, i.e. never when being used as
+ // a RT in an external secondary command buffer.
+ SkASSERT(fResource);
+ return fInfo.fImage;
+ }
+ const GrVkAlloc& alloc() const {
+ // Should only be called when we have a real fResource object, i.e. never when being used as
+ // a RT in an external secondary command buffer.
+ SkASSERT(fResource);
+ return fInfo.fAlloc;
+ }
+ VkFormat imageFormat() const { return fInfo.fFormat; }
+ GrBackendFormat getBackendFormat() const {
+ if (fResource && this->ycbcrConversionInfo().isValid()) {
+ SkASSERT(this->imageFormat() == this->ycbcrConversionInfo().fFormat);
+ return GrBackendFormat::MakeVk(this->ycbcrConversionInfo());
+ }
+ SkASSERT(this->imageFormat() != VK_FORMAT_UNDEFINED);
+ return GrBackendFormat::MakeVk(this->imageFormat());
+ }
+ uint32_t mipLevels() const { return fInfo.fLevelCount; }
+ const GrVkYcbcrConversionInfo& ycbcrConversionInfo() const {
+ // Should only be called when we have a real fResource object, i.e. never when being used as
+ // a RT in an external secondary command buffer.
+ SkASSERT(fResource);
+ return fInfo.fYcbcrConversionInfo;
+ }
+ const Resource* resource() const {
+ SkASSERT(fResource);
+ return fResource;
+ }
+ bool isLinearTiled() const {
+ // Should only be called when we have a real fResource object, i.e. never when being used as
+ // a RT in an external secondary command buffer.
+ SkASSERT(fResource);
+ return SkToBool(VK_IMAGE_TILING_LINEAR == fInfo.fImageTiling);
+ }
+ bool isBorrowed() const { return fIsBorrowed; }
+
+ sk_sp<GrVkImageLayout> grVkImageLayout() const { return fLayout; }
+
+ VkImageLayout currentLayout() const {
+ return fLayout->getImageLayout();
+ }
+
+ void setImageLayout(const GrVkGpu* gpu,
+ VkImageLayout newLayout,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ bool releaseFamilyQueue = false);
+
+ // Returns the image to its original queue family and changes the layout to present if the queue
+ // family is not external or foreign.
+ void prepareForPresent(GrVkGpu* gpu);
+
+ // Returns the image to its original queue family
+ void prepareForExternal(GrVkGpu* gpu);
+
+ // This simply updates our tracking of the image layout and does not actually do any gpu work.
+ // This is only used for mip map generation where we are manually changing the layouts as we
+ // blit each layer, and then at the end need to update our tracking.
+ void updateImageLayout(VkImageLayout newLayout) {
+ // Should only be called when we have a real fResource object, i.e. never when being used as
+ // a RT in an external secondary command buffer.
+ SkASSERT(fResource);
+ fLayout->setImageLayout(newLayout);
+ }
+
+ struct ImageDesc {
+ VkImageType fImageType;
+ VkFormat fFormat;
+ uint32_t fWidth;
+ uint32_t fHeight;
+ uint32_t fLevels;
+ uint32_t fSamples;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fUsageFlags;
+ VkFlags fMemProps;
+ GrProtected fIsProtected;
+
+ ImageDesc()
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ , fIsProtected(GrProtected::kNo) {}
+ };
+
+ static bool InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo*);
+ // Destroys the internal VkImage and VkDeviceMemory in the GrVkImageInfo
+ static void DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo*);
+
+ // These match the definitions in SkImage, for whence they came
+ typedef void* ReleaseCtx;
+ typedef void (*ReleaseProc)(ReleaseCtx);
+
+ void setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper);
+
+ // Helpers to use for setting the layout of the VkImage
+ static VkPipelineStageFlags LayoutToPipelineSrcStageFlags(const VkImageLayout layout);
+ static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+
+#if GR_TEST_UTILS
+ void setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu);
+#endif
+
+protected:
+ void releaseImage(GrVkGpu* gpu);
+ void abandonImage();
+ bool hasResource() const { return fResource; }
+
+ GrVkImageInfo fInfo;
+ uint32_t fInitialQueueFamily;
+ sk_sp<GrVkImageLayout> fLayout;
+ bool fIsBorrowed;
+
+private:
+ class Resource : public GrVkResource {
+ public:
+ Resource()
+ : fImage(VK_NULL_HANDLE) {
+ fAlloc.fMemory = VK_NULL_HANDLE;
+ fAlloc.fOffset = 0;
+ }
+
+ Resource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling)
+ : fImage(image)
+ , fAlloc(alloc)
+ , fImageTiling(tiling) {}
+
+ ~Resource() override {
+ SkASSERT(!fReleaseHelper);
+ }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkImage: %d (%d refs)\n", fImage, this->getRefCnt());
+ }
+#endif
+ void setRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
+ fReleaseHelper = std::move(releaseHelper);
+ }
+
+ /**
+ * These are used to coordinate calling the "finished" idle procs between the GrVkTexture
+ * and the Resource. If the GrVkTexture becomes purgeable and if there are no command
+ * buffers referring to the Resource then it calls the procs. Otherwise, the Resource calls
+ * them when the last command buffer reference goes away and the GrVkTexture is purgeable.
+ */
+ void addIdleProc(GrVkTexture*, sk_sp<GrRefCntedCallback>) const;
+ int idleProcCnt() const;
+ sk_sp<GrRefCntedCallback> idleProc(int) const;
+ void resetIdleProcs() const;
+ void removeOwningTexture() const;
+
+ /**
+ * We track how many outstanding references this Resource has in command buffers and
+ * when the count reaches zero we call the idle proc.
+ */
+ void notifyAddedToCommandBuffer() const override;
+ void notifyRemovedFromCommandBuffer() const override;
+ bool isOwnedByCommandBuffer() const { return fNumCommandBufferOwners > 0; }
+
+ protected:
+ mutable sk_sp<GrRefCntedCallback> fReleaseHelper;
+
+ void invokeReleaseProc() const {
+ if (fReleaseHelper) {
+ // Depending on the ref count of fReleaseHelper this may or may not actually trigger
+ // the ReleaseProc to be called.
+ fReleaseHelper.reset();
+ }
+ }
+
+ private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+ void abandonGPUData() const override {
+ this->invokeReleaseProc();
+ SkASSERT(!fReleaseHelper);
+ }
+
+ VkImage fImage;
+ GrVkAlloc fAlloc;
+ VkImageTiling fImageTiling;
+ mutable int fNumCommandBufferOwners = 0;
+ mutable SkTArray<sk_sp<GrRefCntedCallback>> fIdleProcs;
+ mutable GrVkTexture* fOwningTexture = nullptr;
+
+ typedef GrVkResource INHERITED;
+ };
+
+ // for wrapped textures
+ class BorrowedResource : public Resource {
+ public:
+ BorrowedResource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling)
+ : Resource(image, alloc, tiling) {
+ }
+ private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+ void abandonGPUData() const override;
+ };
+
+ Resource* fResource;
+
+ friend class GrVkRenderTarget;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImageLayout.h b/gfx/skia/skia/src/gpu/vk/GrVkImageLayout.h
new file mode 100644
index 0000000000..154c21fb36
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImageLayout.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkImageLayout_DEFINED
+#define GrVkImageLayout_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+class GrVkImageLayout : public SkRefCnt {
+public:
+ GrVkImageLayout(VkImageLayout layout) : fLayout(layout) {}
+
+ void setImageLayout(VkImageLayout layout) {
+ // Defaulting to use std::memory_order_seq_cst
+ fLayout.store(layout);
+ }
+
+ VkImageLayout getImageLayout() const {
+ // Defaulting to use std::memory_order_seq_cst
+ return fLayout.load();
+ }
+
+private:
+ std::atomic<VkImageLayout> fLayout;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp b/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp
new file mode 100644
index 0000000000..ce7b1c5458
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp
@@ -0,0 +1,77 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+const GrVkImageView* GrVkImageView::Create(GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType, uint32_t miplevels,
+ const GrVkYcbcrConversionInfo& ycbcrInfo) {
+
+ void* pNext = nullptr;
+ VkSamplerYcbcrConversionInfo conversionInfo;
+ GrVkSamplerYcbcrConversion* ycbcrConversion = nullptr;
+
+ if (ycbcrInfo.isValid()) {
+ SkASSERT(gpu->vkCaps().supportsYcbcrConversion() && format == ycbcrInfo.fFormat);
+
+ ycbcrConversion =
+ gpu->resourceProvider().findOrCreateCompatibleSamplerYcbcrConversion(ycbcrInfo);
+ if (!ycbcrConversion) {
+ return nullptr;
+ }
+
+ conversionInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
+ conversionInfo.pNext = nullptr;
+ conversionInfo.conversion = ycbcrConversion->ycbcrConversion();
+ pNext = &conversionInfo;
+ }
+
+ VkImageView imageView;
+ // Create the VkImageView
+ VkImageViewCreateInfo viewInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
+ pNext, // pNext
+ 0, // flags
+ image, // image
+ VK_IMAGE_VIEW_TYPE_2D, // viewType
+ format, // format
+ { VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY }, // components
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, miplevels, 0, 1 }, // subresourceRange
+ };
+ if (kStencil_Type == viewType) {
+ viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateImageView(gpu->device(), &viewInfo,
+ nullptr, &imageView));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkImageView(imageView, ycbcrConversion);
+}
+
+void GrVkImageView::freeGPUData(GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
+
+ if (fYcbcrConversion) {
+ fYcbcrConversion->unref(gpu);
+ }
+}
+
+void GrVkImageView::abandonGPUData() const {
+ if (fYcbcrConversion) {
+ fYcbcrConversion->unrefAndAbandon();
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImageView.h b/gfx/skia/skia/src/gpu/vk/GrVkImageView.h
new file mode 100644
index 0000000000..12f3b21820
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImageView.h
@@ -0,0 +1,53 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkImageView_DEFINED
+#define GrVkImageView_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrVkSamplerYcbcrConversion;
+struct GrVkYcbcrConversionInfo;
+
+class GrVkImageView : public GrVkResource {
+public:
+ enum Type {
+ kColor_Type,
+ kStencil_Type
+ };
+
+ static const GrVkImageView* Create(GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType, uint32_t miplevels,
+ const GrVkYcbcrConversionInfo& ycbcrInfo);
+
+ VkImageView imageView() const { return fImageView; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkImageView: %d (%d refs)\n", fImageView, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkImageView(VkImageView imageView, GrVkSamplerYcbcrConversion* ycbcrConversion)
+ : INHERITED(), fImageView(imageView), fYcbcrConversion(ycbcrConversion) {}
+
+ GrVkImageView(const GrVkImageView&);
+ GrVkImageView& operator=(const GrVkImageView&);
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+ void abandonGPUData() const override;
+
+ VkImageView fImageView;
+ GrVkSamplerYcbcrConversion* fYcbcrConversion;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp
new file mode 100644
index 0000000000..c8dfd94ffd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkIndexBuffer.h"
+
+GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, GrGpuBufferType::kIndex,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+sk_sp<GrVkIndexBuffer> GrVkIndexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
+ desc.fType = GrVkBuffer::kIndex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+
+ GrVkIndexBuffer* buffer = new GrVkIndexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return sk_sp<GrVkIndexBuffer>(buffer);
+}
+
+void GrVkIndexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkIndexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkIndexBuffer::onMap() { this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu()); }
+
+void GrVkIndexBuffer::onUnmap() { this->vkUnmap(this->getVkGpu()); }
+
+bool GrVkIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+}
+
+GrVkGpu* GrVkIndexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h
new file mode 100644
index 0000000000..bc29d2b1a7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkIndexBuffer_DEFINED
+#define GrVkIndexBuffer_DEFINED
+
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/vk/GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkIndexBuffer : public GrGpuBuffer, public GrVkBuffer {
+public:
+ static sk_sp<GrVkIndexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp b/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp
new file mode 100644
index 0000000000..acb7bcb82c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/vk/GrVkBackendContext.h"
+#include "include/gpu/vk/GrVkExtensions.h"
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#define ACQUIRE_PROC(name, instance, device) \
+ fFunctions.f##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
+
+#define ACQUIRE_PROC_SUFFIX(name, suffix, instance, device) \
+ fFunctions.f##name = \
+ reinterpret_cast<PFN_vk##name##suffix>(getProc("vk" #name #suffix, instance, device))
+
+GrVkInterface::GrVkInterface(GrVkGetProc getProc,
+ VkInstance instance,
+ VkDevice device,
+ uint32_t instanceVersion,
+ uint32_t physicalDeviceVersion,
+ const GrVkExtensions* extensions) {
+ if (getProc == nullptr) {
+ return;
+ }
+ // Global/Loader Procs.
+ ACQUIRE_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
+ ACQUIRE_PROC(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
+ ACQUIRE_PROC(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
+
+ // Instance Procs.
+ ACQUIRE_PROC(EnumeratePhysicalDevices, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceFeatures, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceFormatProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceImageFormatProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceSparseImageFormatProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(DestroyInstance, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(CreateDevice, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(DestroyDevice, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(EnumerateDeviceExtensionProperties, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(EnumerateDeviceLayerProperties, instance, VK_NULL_HANDLE);
+
+ // Device Procs.
+ ACQUIRE_PROC(GetDeviceQueue, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(QueueSubmit, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(QueueWaitIdle, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DeviceWaitIdle, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(AllocateMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(FreeMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(MapMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(UnmapMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(FlushMappedMemoryRanges, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(InvalidateMappedMemoryRanges, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetDeviceMemoryCommitment, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(BindBufferMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(BindImageMemory, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetBufferMemoryRequirements, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetImageMemoryRequirements, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetImageSparseMemoryRequirements, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(QueueBindSparse, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateFence, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyFence, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(ResetFences, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetFenceStatus, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(WaitForFences, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateSemaphore, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroySemaphore, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetEventStatus, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(SetEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(ResetEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateQueryPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyQueryPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetQueryPoolResults, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateBufferView, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyBufferView, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetImageSubresourceLayout, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateImageView, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyImageView, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateShaderModule, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyShaderModule, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreatePipelineCache, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyPipelineCache, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetPipelineCacheData, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(MergePipelineCaches, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateGraphicsPipelines, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateComputePipelines, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyPipeline, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreatePipelineLayout, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyPipelineLayout, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateSampler, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroySampler, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateDescriptorSetLayout, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyDescriptorSetLayout, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateDescriptorPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyDescriptorPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(ResetDescriptorPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(AllocateDescriptorSets, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(FreeDescriptorSets, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(UpdateDescriptorSets, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateFramebuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyFramebuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateRenderPass, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyRenderPass, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetRenderAreaGranularity, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CreateCommandPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroyCommandPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(ResetCommandPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(AllocateCommandBuffers, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(FreeCommandBuffers, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(BeginCommandBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(EndCommandBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(ResetCommandBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBindPipeline, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetViewport, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetScissor, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetLineWidth, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetDepthBias, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetBlendConstants, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetDepthBounds, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetStencilCompareMask, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetStencilWriteMask, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetStencilReference, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBindDescriptorSets, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBindIndexBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBindVertexBuffers, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDraw, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDrawIndexed, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDrawIndirect, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDrawIndexedIndirect, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDispatch, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdDispatchIndirect, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdCopyBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdCopyImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBlitImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdCopyBufferToImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdCopyImageToBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdUpdateBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdFillBuffer, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdClearColorImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdClearDepthStencilImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdClearAttachments, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdResolveImage, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdSetEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdResetEvent, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdWaitEvents, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdPipelineBarrier, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBeginQuery, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdEndQuery, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdResetQueryPool, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdWriteTimestamp, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdCopyQueryPoolResults, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdPushConstants, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdBeginRenderPass, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdNextSubpass, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdEndRenderPass, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(CmdExecuteCommands, VK_NULL_HANDLE, device);
+
+ // Functions for VK_KHR_get_physical_device_properties2
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(GetPhysicalDeviceFeatures2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceProperties2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceFormatProperties2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceImageFormatProperties2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties2, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC(GetPhysicalDeviceSparseImageFormatProperties2, instance, VK_NULL_HANDLE);
+ } else if (extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
+ 1)) {
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceFeatures2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceProperties2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceFormatProperties2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceImageFormatProperties2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceQueueFamilyProperties2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceMemoryProperties2, KHR, instance, VK_NULL_HANDLE);
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceSparseImageFormatProperties2, KHR, instance,
+ VK_NULL_HANDLE);
+ }
+
+ // Functions for VK_KHR_get_memory_requirements2
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(GetImageMemoryRequirements2, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetBufferMemoryRequirements2, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(GetImageSparseMemoryRequirements2, VK_NULL_HANDLE, device);
+ } else if (extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(GetImageMemoryRequirements2, KHR, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC_SUFFIX(GetBufferMemoryRequirements2, KHR, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC_SUFFIX(GetImageSparseMemoryRequirements2, KHR, VK_NULL_HANDLE, device);
+ }
+
+ // Functions for VK_KHR_bind_memory2
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(BindBufferMemory2, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(BindImageMemory2, VK_NULL_HANDLE, device);
+ } else if (extensions->hasExtension(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(BindBufferMemory2, KHR, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC_SUFFIX(BindImageMemory2, KHR, VK_NULL_HANDLE, device);
+ }
+
+ // Functions for VK_KHR_maintenance1 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(TrimCommandPool, VK_NULL_HANDLE, device);
+ } else if (extensions->hasExtension(VK_KHR_MAINTENANCE1_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(TrimCommandPool, KHR, VK_NULL_HANDLE, device);
+ }
+
+ // Functions for VK_KHR_maintenance3 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(GetDescriptorSetLayoutSupport, VK_NULL_HANDLE, device);
+ } else if (extensions->hasExtension(VK_KHR_MAINTENANCE3_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(GetDescriptorSetLayoutSupport, KHR, VK_NULL_HANDLE, device);
+ }
+
+ // Functions for VK_KHR_external_memory_capabilities
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(GetPhysicalDeviceExternalBufferProperties, instance, VK_NULL_HANDLE);
+ } else if (extensions->hasExtension(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(GetPhysicalDeviceExternalBufferProperties, KHR, instance,
+ VK_NULL_HANDLE);
+ }
+
+ // Functions for VK_KHR_sampler_ycbcr_conversion
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ ACQUIRE_PROC(CreateSamplerYcbcrConversion, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC(DestroySamplerYcbcrConversion, VK_NULL_HANDLE, device);
+ } else if (extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
+ ACQUIRE_PROC_SUFFIX(CreateSamplerYcbcrConversion, KHR, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC_SUFFIX(DestroySamplerYcbcrConversion, KHR, VK_NULL_HANDLE, device);
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // Functions for VK_ANDROID_external_memory_android_hardware_buffer
+ if (extensions->hasExtension(
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, 2)) {
+ ACQUIRE_PROC_SUFFIX(GetAndroidHardwareBufferProperties, ANDROID, VK_NULL_HANDLE, device);
+ ACQUIRE_PROC_SUFFIX(GetMemoryAndroidHardwareBuffer, ANDROID, VK_NULL_HANDLE, device);
+ }
+#endif
+
+}
+
+#ifdef SK_DEBUG
+ static int kIsDebug = 1;
+#else
+ static int kIsDebug = 0;
+#endif
+
+#define RETURN_FALSE_INTERFACE \
+ if (kIsDebug) { SkDebugf("%s:%d GrVkInterface::validate() failed.\n", __FILE__, __LINE__); } \
+ return false;
+
+bool GrVkInterface::validate(uint32_t instanceVersion, uint32_t physicalDeviceVersion,
+ const GrVkExtensions* extensions) const {
+ // functions that are always required
+ if (nullptr == fFunctions.fCreateInstance ||
+ nullptr == fFunctions.fDestroyInstance ||
+ nullptr == fFunctions.fEnumeratePhysicalDevices ||
+ nullptr == fFunctions.fGetPhysicalDeviceFeatures ||
+ nullptr == fFunctions.fGetPhysicalDeviceFormatProperties ||
+ nullptr == fFunctions.fGetPhysicalDeviceImageFormatProperties ||
+ nullptr == fFunctions.fGetPhysicalDeviceProperties ||
+ nullptr == fFunctions.fGetPhysicalDeviceQueueFamilyProperties ||
+ nullptr == fFunctions.fGetPhysicalDeviceMemoryProperties ||
+ nullptr == fFunctions.fCreateDevice ||
+ nullptr == fFunctions.fDestroyDevice ||
+ nullptr == fFunctions.fEnumerateInstanceExtensionProperties ||
+ nullptr == fFunctions.fEnumerateDeviceExtensionProperties ||
+ nullptr == fFunctions.fEnumerateInstanceLayerProperties ||
+ nullptr == fFunctions.fEnumerateDeviceLayerProperties ||
+ nullptr == fFunctions.fGetDeviceQueue ||
+ nullptr == fFunctions.fQueueSubmit ||
+ nullptr == fFunctions.fQueueWaitIdle ||
+ nullptr == fFunctions.fDeviceWaitIdle ||
+ nullptr == fFunctions.fAllocateMemory ||
+ nullptr == fFunctions.fFreeMemory ||
+ nullptr == fFunctions.fMapMemory ||
+ nullptr == fFunctions.fUnmapMemory ||
+ nullptr == fFunctions.fFlushMappedMemoryRanges ||
+ nullptr == fFunctions.fInvalidateMappedMemoryRanges ||
+ nullptr == fFunctions.fGetDeviceMemoryCommitment ||
+ nullptr == fFunctions.fBindBufferMemory ||
+ nullptr == fFunctions.fBindImageMemory ||
+ nullptr == fFunctions.fGetBufferMemoryRequirements ||
+ nullptr == fFunctions.fGetImageMemoryRequirements ||
+ nullptr == fFunctions.fGetImageSparseMemoryRequirements ||
+ nullptr == fFunctions.fGetPhysicalDeviceSparseImageFormatProperties ||
+ nullptr == fFunctions.fQueueBindSparse ||
+ nullptr == fFunctions.fCreateFence ||
+ nullptr == fFunctions.fDestroyFence ||
+ nullptr == fFunctions.fResetFences ||
+ nullptr == fFunctions.fGetFenceStatus ||
+ nullptr == fFunctions.fWaitForFences ||
+ nullptr == fFunctions.fCreateSemaphore ||
+ nullptr == fFunctions.fDestroySemaphore ||
+ nullptr == fFunctions.fCreateEvent ||
+ nullptr == fFunctions.fDestroyEvent ||
+ nullptr == fFunctions.fGetEventStatus ||
+ nullptr == fFunctions.fSetEvent ||
+ nullptr == fFunctions.fResetEvent ||
+ nullptr == fFunctions.fCreateQueryPool ||
+ nullptr == fFunctions.fDestroyQueryPool ||
+ nullptr == fFunctions.fGetQueryPoolResults ||
+ nullptr == fFunctions.fCreateBuffer ||
+ nullptr == fFunctions.fDestroyBuffer ||
+ nullptr == fFunctions.fCreateBufferView ||
+ nullptr == fFunctions.fDestroyBufferView ||
+ nullptr == fFunctions.fCreateImage ||
+ nullptr == fFunctions.fDestroyImage ||
+ nullptr == fFunctions.fGetImageSubresourceLayout ||
+ nullptr == fFunctions.fCreateImageView ||
+ nullptr == fFunctions.fDestroyImageView ||
+ nullptr == fFunctions.fCreateShaderModule ||
+ nullptr == fFunctions.fDestroyShaderModule ||
+ nullptr == fFunctions.fCreatePipelineCache ||
+ nullptr == fFunctions.fDestroyPipelineCache ||
+ nullptr == fFunctions.fGetPipelineCacheData ||
+ nullptr == fFunctions.fMergePipelineCaches ||
+ nullptr == fFunctions.fCreateGraphicsPipelines ||
+ nullptr == fFunctions.fCreateComputePipelines ||
+ nullptr == fFunctions.fDestroyPipeline ||
+ nullptr == fFunctions.fCreatePipelineLayout ||
+ nullptr == fFunctions.fDestroyPipelineLayout ||
+ nullptr == fFunctions.fCreateSampler ||
+ nullptr == fFunctions.fDestroySampler ||
+ nullptr == fFunctions.fCreateDescriptorSetLayout ||
+ nullptr == fFunctions.fDestroyDescriptorSetLayout ||
+ nullptr == fFunctions.fCreateDescriptorPool ||
+ nullptr == fFunctions.fDestroyDescriptorPool ||
+ nullptr == fFunctions.fResetDescriptorPool ||
+ nullptr == fFunctions.fAllocateDescriptorSets ||
+ nullptr == fFunctions.fFreeDescriptorSets ||
+ nullptr == fFunctions.fUpdateDescriptorSets ||
+ nullptr == fFunctions.fCreateFramebuffer ||
+ nullptr == fFunctions.fDestroyFramebuffer ||
+ nullptr == fFunctions.fCreateRenderPass ||
+ nullptr == fFunctions.fDestroyRenderPass ||
+ nullptr == fFunctions.fGetRenderAreaGranularity ||
+ nullptr == fFunctions.fCreateCommandPool ||
+ nullptr == fFunctions.fDestroyCommandPool ||
+ nullptr == fFunctions.fResetCommandPool ||
+ nullptr == fFunctions.fAllocateCommandBuffers ||
+ nullptr == fFunctions.fFreeCommandBuffers ||
+ nullptr == fFunctions.fBeginCommandBuffer ||
+ nullptr == fFunctions.fEndCommandBuffer ||
+ nullptr == fFunctions.fResetCommandBuffer ||
+ nullptr == fFunctions.fCmdBindPipeline ||
+ nullptr == fFunctions.fCmdSetViewport ||
+ nullptr == fFunctions.fCmdSetScissor ||
+ nullptr == fFunctions.fCmdSetLineWidth ||
+ nullptr == fFunctions.fCmdSetDepthBias ||
+ nullptr == fFunctions.fCmdSetBlendConstants ||
+ nullptr == fFunctions.fCmdSetDepthBounds ||
+ nullptr == fFunctions.fCmdSetStencilCompareMask ||
+ nullptr == fFunctions.fCmdSetStencilWriteMask ||
+ nullptr == fFunctions.fCmdSetStencilReference ||
+ nullptr == fFunctions.fCmdBindDescriptorSets ||
+ nullptr == fFunctions.fCmdBindIndexBuffer ||
+ nullptr == fFunctions.fCmdBindVertexBuffers ||
+ nullptr == fFunctions.fCmdDraw ||
+ nullptr == fFunctions.fCmdDrawIndexed ||
+ nullptr == fFunctions.fCmdDrawIndirect ||
+ nullptr == fFunctions.fCmdDrawIndexedIndirect ||
+ nullptr == fFunctions.fCmdDispatch ||
+ nullptr == fFunctions.fCmdDispatchIndirect ||
+ nullptr == fFunctions.fCmdCopyBuffer ||
+ nullptr == fFunctions.fCmdCopyImage ||
+ nullptr == fFunctions.fCmdBlitImage ||
+ nullptr == fFunctions.fCmdCopyBufferToImage ||
+ nullptr == fFunctions.fCmdCopyImageToBuffer ||
+ nullptr == fFunctions.fCmdUpdateBuffer ||
+ nullptr == fFunctions.fCmdFillBuffer ||
+ nullptr == fFunctions.fCmdClearColorImage ||
+ nullptr == fFunctions.fCmdClearDepthStencilImage ||
+ nullptr == fFunctions.fCmdClearAttachments ||
+ nullptr == fFunctions.fCmdResolveImage ||
+ nullptr == fFunctions.fCmdSetEvent ||
+ nullptr == fFunctions.fCmdResetEvent ||
+ nullptr == fFunctions.fCmdWaitEvents ||
+ nullptr == fFunctions.fCmdPipelineBarrier ||
+ nullptr == fFunctions.fCmdBeginQuery ||
+ nullptr == fFunctions.fCmdEndQuery ||
+ nullptr == fFunctions.fCmdResetQueryPool ||
+ nullptr == fFunctions.fCmdWriteTimestamp ||
+ nullptr == fFunctions.fCmdCopyQueryPoolResults ||
+ nullptr == fFunctions.fCmdPushConstants ||
+ nullptr == fFunctions.fCmdBeginRenderPass ||
+ nullptr == fFunctions.fCmdNextSubpass ||
+ nullptr == fFunctions.fCmdEndRenderPass ||
+ nullptr == fFunctions.fCmdExecuteCommands) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ // Functions for VK_KHR_get_physical_device_properties2 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fGetPhysicalDeviceFeatures2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceProperties2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceFormatProperties2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceImageFormatProperties2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceQueueFamilyProperties2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceMemoryProperties2 ||
+ nullptr == fFunctions.fGetPhysicalDeviceSparseImageFormatProperties2) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_get_memory_requirements2 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fGetImageMemoryRequirements2 ||
+ nullptr == fFunctions.fGetBufferMemoryRequirements2 ||
+ nullptr == fFunctions.fGetImageSparseMemoryRequirements2) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_bind_memory2
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fBindBufferMemory2 ||
+ nullptr == fFunctions.fBindImageMemory2) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_maintenance1 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_MAINTENANCE1_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fTrimCommandPool) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_maintenance3 or vulkan 1.1
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_MAINTENANCE3_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fGetDescriptorSetLayoutSupport) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_external_memory_capabilities
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fGetPhysicalDeviceExternalBufferProperties) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Functions for VK_KHR_sampler_ycbcr_conversion
+ if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
+ extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
+ if (nullptr == fFunctions.fCreateSamplerYcbcrConversion ||
+ nullptr == fFunctions.fDestroySamplerYcbcrConversion) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // Functions for VK_ANDROID_external_memory_android_hardware_buffer
+ if (extensions->hasExtension(
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, 2)) {
+ if (nullptr == fFunctions.fGetAndroidHardwareBufferProperties ||
+ nullptr == fFunctions.fGetMemoryAndroidHardwareBuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+#endif
+
+ return true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkInterface.h b/gfx/skia/skia/src/gpu/vk/GrVkInterface.h
new file mode 100644
index 0000000000..53360ba9c4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkInterface.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkInterface_DEFINED
+#define GrVkInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#include "include/gpu/vk/GrVkBackendContext.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+class GrVkExtensions;
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * GrContext uses the following interface to make all calls into Vulkan. When a
+ * GrContext is created it is given a GrVkInterface. All functions that should be
+ * available based on the Vulkan's version must be non-NULL or GrContext creation
+ * will fail. This can be tested with the validate() method.
+ */
+struct GrVkInterface : public SkRefCnt {
+private:
+ // simple wrapper class that exists only to initialize a pointer to NULL
+ template <typename FNPTR_TYPE> class VkPtr {
+ public:
+ VkPtr() : fPtr(NULL) {}
+ VkPtr operator=(FNPTR_TYPE ptr) { fPtr = ptr; return *this; }
+ operator FNPTR_TYPE() const { return fPtr; }
+ private:
+ FNPTR_TYPE fPtr;
+ };
+
+ typedef SkRefCnt INHERITED;
+
+public:
+ GrVkInterface(GrVkGetProc getProc,
+ VkInstance instance,
+ VkDevice device,
+ uint32_t instanceVersion,
+ uint32_t physicalDeviceVersion,
+ const GrVkExtensions*);
+
+ // Validates that the GrVkInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for Vulkan version.
+ bool validate(uint32_t instanceVersion, uint32_t physicalDeviceVersion,
+ const GrVkExtensions*) const;
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ VkPtr<PFN_vkCreateInstance> fCreateInstance;
+ VkPtr<PFN_vkDestroyInstance> fDestroyInstance;
+ VkPtr<PFN_vkEnumeratePhysicalDevices> fEnumeratePhysicalDevices;
+ VkPtr<PFN_vkGetPhysicalDeviceFeatures> fGetPhysicalDeviceFeatures;
+ VkPtr<PFN_vkGetPhysicalDeviceFormatProperties> fGetPhysicalDeviceFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceImageFormatProperties> fGetPhysicalDeviceImageFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceProperties> fGetPhysicalDeviceProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceQueueFamilyProperties> fGetPhysicalDeviceQueueFamilyProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceMemoryProperties> fGetPhysicalDeviceMemoryProperties;
+ VkPtr<PFN_vkCreateDevice> fCreateDevice;
+ VkPtr<PFN_vkDestroyDevice> fDestroyDevice;
+ VkPtr<PFN_vkEnumerateInstanceExtensionProperties> fEnumerateInstanceExtensionProperties;
+ VkPtr<PFN_vkEnumerateDeviceExtensionProperties> fEnumerateDeviceExtensionProperties;
+ VkPtr<PFN_vkEnumerateInstanceLayerProperties> fEnumerateInstanceLayerProperties;
+ VkPtr<PFN_vkEnumerateDeviceLayerProperties> fEnumerateDeviceLayerProperties;
+ VkPtr<PFN_vkGetDeviceQueue> fGetDeviceQueue;
+ VkPtr<PFN_vkQueueSubmit> fQueueSubmit;
+ VkPtr<PFN_vkQueueWaitIdle> fQueueWaitIdle;
+ VkPtr<PFN_vkDeviceWaitIdle> fDeviceWaitIdle;
+ VkPtr<PFN_vkAllocateMemory> fAllocateMemory;
+ VkPtr<PFN_vkFreeMemory> fFreeMemory;
+ VkPtr<PFN_vkMapMemory> fMapMemory;
+ VkPtr<PFN_vkUnmapMemory> fUnmapMemory;
+ VkPtr<PFN_vkFlushMappedMemoryRanges> fFlushMappedMemoryRanges;
+ VkPtr<PFN_vkInvalidateMappedMemoryRanges> fInvalidateMappedMemoryRanges;
+ VkPtr<PFN_vkGetDeviceMemoryCommitment> fGetDeviceMemoryCommitment;
+ VkPtr<PFN_vkBindBufferMemory> fBindBufferMemory;
+ VkPtr<PFN_vkBindImageMemory> fBindImageMemory;
+ VkPtr<PFN_vkGetBufferMemoryRequirements> fGetBufferMemoryRequirements;
+ VkPtr<PFN_vkGetImageMemoryRequirements> fGetImageMemoryRequirements;
+ VkPtr<PFN_vkGetImageSparseMemoryRequirements> fGetImageSparseMemoryRequirements;
+ VkPtr<PFN_vkGetPhysicalDeviceSparseImageFormatProperties> fGetPhysicalDeviceSparseImageFormatProperties;
+ VkPtr<PFN_vkQueueBindSparse> fQueueBindSparse;
+ VkPtr<PFN_vkCreateFence> fCreateFence;
+ VkPtr<PFN_vkDestroyFence> fDestroyFence;
+ VkPtr<PFN_vkResetFences> fResetFences;
+ VkPtr<PFN_vkGetFenceStatus> fGetFenceStatus;
+ VkPtr<PFN_vkWaitForFences> fWaitForFences;
+ VkPtr<PFN_vkCreateSemaphore> fCreateSemaphore;
+ VkPtr<PFN_vkDestroySemaphore> fDestroySemaphore;
+ VkPtr<PFN_vkCreateEvent> fCreateEvent;
+ VkPtr<PFN_vkDestroyEvent> fDestroyEvent;
+ VkPtr<PFN_vkGetEventStatus> fGetEventStatus;
+ VkPtr<PFN_vkSetEvent> fSetEvent;
+ VkPtr<PFN_vkResetEvent> fResetEvent;
+ VkPtr<PFN_vkCreateQueryPool> fCreateQueryPool;
+ VkPtr<PFN_vkDestroyQueryPool> fDestroyQueryPool;
+ VkPtr<PFN_vkGetQueryPoolResults> fGetQueryPoolResults;
+ VkPtr<PFN_vkCreateBuffer> fCreateBuffer;
+ VkPtr<PFN_vkDestroyBuffer> fDestroyBuffer;
+ VkPtr<PFN_vkCreateBufferView> fCreateBufferView;
+ VkPtr<PFN_vkDestroyBufferView> fDestroyBufferView;
+ VkPtr<PFN_vkCreateImage> fCreateImage;
+ VkPtr<PFN_vkDestroyImage> fDestroyImage;
+ VkPtr<PFN_vkGetImageSubresourceLayout> fGetImageSubresourceLayout;
+ VkPtr<PFN_vkCreateImageView> fCreateImageView;
+ VkPtr<PFN_vkDestroyImageView> fDestroyImageView;
+ VkPtr<PFN_vkCreateShaderModule> fCreateShaderModule;
+ VkPtr<PFN_vkDestroyShaderModule> fDestroyShaderModule;
+ VkPtr<PFN_vkCreatePipelineCache> fCreatePipelineCache;
+ VkPtr<PFN_vkDestroyPipelineCache> fDestroyPipelineCache;
+ VkPtr<PFN_vkGetPipelineCacheData> fGetPipelineCacheData;
+ VkPtr<PFN_vkMergePipelineCaches> fMergePipelineCaches;
+ VkPtr<PFN_vkCreateGraphicsPipelines> fCreateGraphicsPipelines;
+ VkPtr<PFN_vkCreateComputePipelines> fCreateComputePipelines;
+ VkPtr<PFN_vkDestroyPipeline> fDestroyPipeline;
+ VkPtr<PFN_vkCreatePipelineLayout> fCreatePipelineLayout;
+ VkPtr<PFN_vkDestroyPipelineLayout> fDestroyPipelineLayout;
+ VkPtr<PFN_vkCreateSampler> fCreateSampler;
+ VkPtr<PFN_vkDestroySampler> fDestroySampler;
+ VkPtr<PFN_vkCreateDescriptorSetLayout> fCreateDescriptorSetLayout;
+ VkPtr<PFN_vkDestroyDescriptorSetLayout> fDestroyDescriptorSetLayout;
+ VkPtr<PFN_vkCreateDescriptorPool> fCreateDescriptorPool;
+ VkPtr<PFN_vkDestroyDescriptorPool> fDestroyDescriptorPool;
+ VkPtr<PFN_vkResetDescriptorPool> fResetDescriptorPool;
+ VkPtr<PFN_vkAllocateDescriptorSets> fAllocateDescriptorSets;
+ VkPtr<PFN_vkFreeDescriptorSets> fFreeDescriptorSets;
+ VkPtr<PFN_vkUpdateDescriptorSets> fUpdateDescriptorSets;
+ VkPtr<PFN_vkCreateFramebuffer> fCreateFramebuffer;
+ VkPtr<PFN_vkDestroyFramebuffer> fDestroyFramebuffer;
+ VkPtr<PFN_vkCreateRenderPass> fCreateRenderPass;
+ VkPtr<PFN_vkDestroyRenderPass> fDestroyRenderPass;
+ VkPtr<PFN_vkGetRenderAreaGranularity> fGetRenderAreaGranularity;
+ VkPtr<PFN_vkCreateCommandPool> fCreateCommandPool;
+ VkPtr<PFN_vkDestroyCommandPool> fDestroyCommandPool;
+ VkPtr<PFN_vkResetCommandPool> fResetCommandPool;
+ VkPtr<PFN_vkAllocateCommandBuffers> fAllocateCommandBuffers;
+ VkPtr<PFN_vkFreeCommandBuffers> fFreeCommandBuffers;
+ VkPtr<PFN_vkBeginCommandBuffer> fBeginCommandBuffer;
+ VkPtr<PFN_vkEndCommandBuffer> fEndCommandBuffer;
+ VkPtr<PFN_vkResetCommandBuffer> fResetCommandBuffer;
+ VkPtr<PFN_vkCmdBindPipeline> fCmdBindPipeline;
+ VkPtr<PFN_vkCmdSetViewport> fCmdSetViewport;
+ VkPtr<PFN_vkCmdSetScissor> fCmdSetScissor;
+ VkPtr<PFN_vkCmdSetLineWidth> fCmdSetLineWidth;
+ VkPtr<PFN_vkCmdSetDepthBias> fCmdSetDepthBias;
+ VkPtr<PFN_vkCmdSetBlendConstants> fCmdSetBlendConstants;
+ VkPtr<PFN_vkCmdSetDepthBounds> fCmdSetDepthBounds;
+ VkPtr<PFN_vkCmdSetStencilCompareMask> fCmdSetStencilCompareMask;
+ VkPtr<PFN_vkCmdSetStencilWriteMask> fCmdSetStencilWriteMask;
+ VkPtr<PFN_vkCmdSetStencilReference> fCmdSetStencilReference;
+ VkPtr<PFN_vkCmdBindDescriptorSets> fCmdBindDescriptorSets;
+ VkPtr<PFN_vkCmdBindIndexBuffer> fCmdBindIndexBuffer;
+ VkPtr<PFN_vkCmdBindVertexBuffers> fCmdBindVertexBuffers;
+ VkPtr<PFN_vkCmdDraw> fCmdDraw;
+ VkPtr<PFN_vkCmdDrawIndexed> fCmdDrawIndexed;
+ VkPtr<PFN_vkCmdDrawIndirect> fCmdDrawIndirect;
+ VkPtr<PFN_vkCmdDrawIndexedIndirect> fCmdDrawIndexedIndirect;
+ VkPtr<PFN_vkCmdDispatch> fCmdDispatch;
+ VkPtr<PFN_vkCmdDispatchIndirect> fCmdDispatchIndirect;
+ VkPtr<PFN_vkCmdCopyBuffer> fCmdCopyBuffer;
+ VkPtr<PFN_vkCmdCopyImage> fCmdCopyImage;
+ VkPtr<PFN_vkCmdBlitImage> fCmdBlitImage;
+ VkPtr<PFN_vkCmdCopyBufferToImage> fCmdCopyBufferToImage;
+ VkPtr<PFN_vkCmdCopyImageToBuffer> fCmdCopyImageToBuffer;
+ VkPtr<PFN_vkCmdUpdateBuffer> fCmdUpdateBuffer;
+ VkPtr<PFN_vkCmdFillBuffer> fCmdFillBuffer;
+ VkPtr<PFN_vkCmdClearColorImage> fCmdClearColorImage;
+ VkPtr<PFN_vkCmdClearDepthStencilImage> fCmdClearDepthStencilImage;
+ VkPtr<PFN_vkCmdClearAttachments> fCmdClearAttachments;
+ VkPtr<PFN_vkCmdResolveImage> fCmdResolveImage;
+ VkPtr<PFN_vkCmdSetEvent> fCmdSetEvent;
+ VkPtr<PFN_vkCmdResetEvent> fCmdResetEvent;
+ VkPtr<PFN_vkCmdWaitEvents> fCmdWaitEvents;
+ VkPtr<PFN_vkCmdPipelineBarrier> fCmdPipelineBarrier;
+ VkPtr<PFN_vkCmdBeginQuery> fCmdBeginQuery;
+ VkPtr<PFN_vkCmdEndQuery> fCmdEndQuery;
+ VkPtr<PFN_vkCmdResetQueryPool> fCmdResetQueryPool;
+ VkPtr<PFN_vkCmdWriteTimestamp> fCmdWriteTimestamp;
+ VkPtr<PFN_vkCmdCopyQueryPoolResults> fCmdCopyQueryPoolResults;
+ VkPtr<PFN_vkCmdPushConstants> fCmdPushConstants;
+ VkPtr<PFN_vkCmdBeginRenderPass> fCmdBeginRenderPass;
+ VkPtr<PFN_vkCmdNextSubpass> fCmdNextSubpass;
+ VkPtr<PFN_vkCmdEndRenderPass> fCmdEndRenderPass;
+ VkPtr<PFN_vkCmdExecuteCommands> fCmdExecuteCommands;
+
+ // Functions for VK_KHR_get_physical_device_properties2 or vulkan 1.1
+ VkPtr<PFN_vkGetPhysicalDeviceFeatures2> fGetPhysicalDeviceFeatures2;
+ VkPtr<PFN_vkGetPhysicalDeviceProperties2> fGetPhysicalDeviceProperties2;
+ VkPtr<PFN_vkGetPhysicalDeviceFormatProperties2> fGetPhysicalDeviceFormatProperties2;
+ VkPtr<PFN_vkGetPhysicalDeviceImageFormatProperties2> fGetPhysicalDeviceImageFormatProperties2;
+ VkPtr<PFN_vkGetPhysicalDeviceQueueFamilyProperties2> fGetPhysicalDeviceQueueFamilyProperties2;
+ VkPtr<PFN_vkGetPhysicalDeviceMemoryProperties2> fGetPhysicalDeviceMemoryProperties2;
+ VkPtr<PFN_vkGetPhysicalDeviceSparseImageFormatProperties2> fGetPhysicalDeviceSparseImageFormatProperties2;
+
+ // Functions for VK_KHR_get_memory_requirements2 or vulkan 1.1
+ VkPtr<PFN_vkGetImageMemoryRequirements2> fGetImageMemoryRequirements2;
+ VkPtr<PFN_vkGetBufferMemoryRequirements2> fGetBufferMemoryRequirements2;
+ VkPtr<PFN_vkGetImageSparseMemoryRequirements2> fGetImageSparseMemoryRequirements2;
+
+ //Functions for VK_KHR_bind_memory2
+ VkPtr<PFN_vkBindBufferMemory2> fBindBufferMemory2;
+ VkPtr<PFN_vkBindImageMemory2> fBindImageMemory2;
+
+ // Functions for VK_KHR_maintenance1 or vulkan 1.1
+ VkPtr<PFN_vkTrimCommandPool> fTrimCommandPool;
+
+ // Functions for VK_KHR_maintenance3 or vulkan 1.1
+ VkPtr<PFN_vkGetDescriptorSetLayoutSupport> fGetDescriptorSetLayoutSupport;
+
+ // Functions for VK_KHR_external_memory_capabilities
+ VkPtr<PFN_vkGetPhysicalDeviceExternalBufferProperties> fGetPhysicalDeviceExternalBufferProperties;
+
+ // Functions for YCBCRConversion
+ VkPtr<PFN_vkCreateSamplerYcbcrConversion> fCreateSamplerYcbcrConversion;
+ VkPtr<PFN_vkDestroySamplerYcbcrConversion> fDestroySamplerYcbcrConversion;
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // Functions for VK_ANDROID_external_memory_android_hardware_buffer
+ VkPtr<PFN_vkGetAndroidHardwareBufferPropertiesANDROID> fGetAndroidHardwareBufferProperties;
+ VkPtr<PFN_vkGetMemoryAndroidHardwareBufferANDROID> fGetMemoryAndroidHardwareBuffer;
+#endif
+
+
+ } fFunctions;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp b/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp
new file mode 100644
index 0000000000..ed14a643ec
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp
@@ -0,0 +1,230 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkMemory.h"
+
+#include "include/gpu/vk/GrVkMemoryAllocator.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
+using BufferUsage = GrVkMemoryAllocator::BufferUsage;
+
+static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
+ switch (type) {
+ case GrVkBuffer::kVertex_Type: // fall through
+ case GrVkBuffer::kIndex_Type: // fall through
+ case GrVkBuffer::kTexel_Type:
+ return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
+ case GrVkBuffer::kUniform_Type:
+ SkASSERT(dynamic);
+ return BufferUsage::kCpuWritesGpuReads;
+ case GrVkBuffer::kCopyRead_Type: // fall through
+ case GrVkBuffer::kCopyWrite_Type:
+ return BufferUsage::kCpuOnly;
+ }
+ SK_ABORT("Invalid GrVkBuffer::Type");
+}
+
+bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ GrVkBuffer::Type type,
+ bool dynamic,
+ GrVkAlloc* alloc) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ GrVkBackendMemory memory = 0;
+
+ GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
+
+ AllocationPropertyFlags propFlags;
+ if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
+ // In general it is always fine (and often better) to keep buffers always mapped.
+ // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
+ // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
+ // is fine). In general, by the time Vulkan ships it is probably less likely to be running
+ // on non Win10 or newer machines. The second use case is if running on an AMD card and you
+ // are using the special GPU local and host mappable memory. However, in general we don't
+ // pick this memory as we've found it slower than using the cached host visible memory. In
+ // the future if we find the need to special case either of these two issues we can add
+ // checks for them here.
+ propFlags = AllocationPropertyFlags::kPersistentlyMapped;
+ } else {
+ propFlags = AllocationPropertyFlags::kNone;
+ }
+
+ if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
+ return false;
+ }
+ allocator->getAllocInfo(memory, alloc);
+
+ // Bind buffer
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
+ alloc->fMemory,
+ alloc->fOffset));
+ if (err) {
+ FreeBufferMemory(gpu, type, *alloc);
+ return false;
+ }
+
+ return true;
+}
+
+void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
+ const GrVkAlloc& alloc) {
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ allocator->freeMemory(alloc.fBackendMemory);
+ } else {
+ GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
+ }
+}
+
+const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
+
+bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ bool linearTiling,
+ GrVkAlloc* alloc) {
+ SkASSERT(!linearTiling);
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ GrVkBackendMemory memory = 0;
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
+
+ AllocationPropertyFlags propFlags;
+ if (memReqs.size > kMaxSmallImageSize ||
+ gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
+ propFlags = AllocationPropertyFlags::kDedicatedAllocation;
+ } else {
+ propFlags = AllocationPropertyFlags::kNone;
+ }
+
+ if (gpu->protectedContext()) {
+ propFlags |= AllocationPropertyFlags::kProtected;
+ }
+
+ if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
+ return false;
+ }
+ allocator->getAllocInfo(memory, alloc);
+
+ // Bind buffer
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
+ alloc->fMemory, alloc->fOffset));
+ if (err) {
+ FreeImageMemory(gpu, linearTiling, *alloc);
+ return false;
+ }
+
+ return true;
+}
+
+void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
+ const GrVkAlloc& alloc) {
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ allocator->freeMemory(alloc.fBackendMemory);
+ } else {
+ GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
+ }
+}
+
+void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+ SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
+#ifdef SK_DEBUG
+ if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ SkASSERT(0 == (alloc.fOffset & (alignment-1)));
+ SkASSERT(0 == (alloc.fSize & (alignment-1)));
+ }
+#endif
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ return allocator->mapMemory(alloc.fBackendMemory);
+ }
+
+ void* mapPtr;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
+ alloc.fOffset,
+ alloc.fSize, 0, &mapPtr));
+ if (err) {
+ mapPtr = nullptr;
+ }
+ return mapPtr;
+}
+
+void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ allocator->unmapMemory(alloc.fBackendMemory);
+ } else {
+ GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
+ }
+}
+
+void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size, VkDeviceSize alignment,
+ VkMappedMemoryRange* range) {
+ SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
+ offset = offset + alloc.fOffset;
+ VkDeviceSize offsetDiff = offset & (alignment -1);
+ offset = offset - offsetDiff;
+ size = (size + alignment - 1) & ~(alignment - 1);
+#ifdef SK_DEBUG
+ SkASSERT(offset >= alloc.fOffset);
+ SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
+ SkASSERT(0 == (offset & (alignment-1)));
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)));
+#endif
+
+ memset(range, 0, sizeof(VkMappedMemoryRange));
+ range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ range->memory = alloc.fMemory;
+ range->offset = offset;
+ range->size = size;
+}
+
+void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size) {
+ if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+ SkASSERT(offset == 0);
+ SkASSERT(size <= alloc.fSize);
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
+ } else {
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ VkMappedMemoryRange mappedMemoryRange;
+ GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
+ &mappedMemoryRange);
+ GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
+ &mappedMemoryRange));
+ }
+ }
+}
+
+void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
+ VkDeviceSize offset, VkDeviceSize size) {
+ if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+ SkASSERT(offset == 0);
+ SkASSERT(size <= alloc.fSize);
+ if (alloc.fBackendMemory) {
+ GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
+ allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
+ } else {
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ VkMappedMemoryRange mappedMemoryRange;
+ GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
+ &mappedMemoryRange);
+ GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
+ &mappedMemoryRange));
+ }
+ }
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkMemory.h b/gfx/skia/skia/src/gpu/vk/GrVkMemory.h
new file mode 100644
index 0000000000..7244da053c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkMemory.h
@@ -0,0 +1,56 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkMemory_DEFINED
+#define GrVkMemory_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/gpu/vk/GrVkBuffer.h"
+
+class GrVkGpu;
+
+namespace GrVkMemory {
+ /**
+ * Allocates vulkan device memory and binds it to the gpu's device for the given object.
+ * Returns true if allocation succeeded.
+ */
+ bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ GrVkBuffer::Type type,
+ bool dynamic,
+ GrVkAlloc* alloc);
+ void FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc);
+
+ bool AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ bool linearTiling,
+ GrVkAlloc* alloc);
+ void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc);
+
+ // Maps the entire GrVkAlloc and returns a pointer to the start of the allocation. Underneath
+ // the hood, we may map more than the range of the GrVkAlloc (e.g. the entire VkDeviceMemory),
+ // but the pointer returned will always be to the start of the GrVkAlloc. The caller should also
+ // never assume more than the GrVkAlloc block has been mapped.
+ void* MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+ void UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+
+ // For the Flush and Invalidate calls, the offset should be relative to the GrVkAlloc. Thus this
+ // will often be 0. The client does not need to make sure the offset and size are aligned to the
+ // nonCoherentAtomSize, the internal calls will handle that.
+ void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
+ void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
+
+ // Helper for aligning and setting VkMappedMemoryRange for flushing/invalidating noncoherent
+ // memory.
+ void GetNonCoherentMappedMemoryRange(const GrVkAlloc&, VkDeviceSize offset, VkDeviceSize size,
+ VkDeviceSize alignment, VkMappedMemoryRange*);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.cpp b/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.cpp
new file mode 100644
index 0000000000..c58c412409
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.cpp
@@ -0,0 +1,662 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkOpsRenderPass.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkRect.h"
+#include "include/gpu/GrBackendDrawableInfo.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrFixedClip.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkCommandPool.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+#include "src/gpu/vk/GrVkSemaphore.h"
+#include "src/gpu/vk/GrVkTexture.h"
+
+/////////////////////////////////////////////////////////////////////////////
+
+void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
+ VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
+ switch (loadOpIn) {
+ case GrLoadOp::kLoad:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ break;
+ case GrLoadOp::kClear:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ break;
+ case GrLoadOp::kDiscard:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ break;
+ default:
+ SK_ABORT("Invalid LoadOp");
+ *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ }
+
+ switch (storeOpIn) {
+ case GrStoreOp::kStore:
+ *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ break;
+ case GrStoreOp::kDiscard:
+ *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ break;
+ default:
+ SK_ABORT("Invalid StoreOp");
+ *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ }
+}
+
+GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
+
+void GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkPMColor4f& clearColor) {
+
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
+ &loadOp, &storeOp);
+ GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
+
+ get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
+ &loadOp, &storeOp);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+ GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
+
+ // Change layout of our render target so it can be used as the color attachment.
+ // TODO: If we know that we will never be blending or loading the attachment we could drop the
+ // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
+ targetImage->setImageLayout(fGpu,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ false);
+
+ // If we are using a stencil attachment we also need to update its layout
+ if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
+ GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
+ // We need the write and read access bits since we may load and store the stencil.
+ // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
+ // wait there.
+ vkStencil->setImageLayout(fGpu,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
+ false);
+ }
+
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle = vkRT->compatibleRenderPassHandle();
+ if (rpHandle.isValid()) {
+ fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
+ vkColorOps,
+ vkStencilOps);
+ }
+ SkASSERT(fCurrentRenderPass);
+
+ VkClearValue vkClearColor;
+ vkClearColor.color.float32[0] = clearColor[0];
+ vkClearColor.color.float32[1] = clearColor[1];
+ vkClearColor.color.float32[2] = clearColor[2];
+ vkClearColor.color.float32[3] = clearColor[3];
+
+ if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
+ fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
+ fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->framebuffer(), fCurrentRenderPass);
+ }
+
+ fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
+ SkToBool(fCurrentSecondaryCommandBuffer));
+}
+
+void GrVkOpsRenderPass::initWrapped() {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+ SkASSERT(vkRT->wrapsSecondaryCommandBuffer());
+ fCurrentRenderPass = vkRT->externalRenderPass();
+ SkASSERT(fCurrentRenderPass);
+ fCurrentRenderPass->ref();
+
+ fCurrentSecondaryCommandBuffer.reset(
+ GrVkSecondaryCommandBuffer::Create(vkRT->getExternalSecondaryCommandBuffer()));
+ fCurrentSecondaryCommandBuffer->begin(fGpu, nullptr, fCurrentRenderPass);
+}
+
+GrVkOpsRenderPass::~GrVkOpsRenderPass() {
+ this->reset();
+}
+
+GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
+
+GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
+ if (fCurrentSecondaryCommandBuffer) {
+ return fCurrentSecondaryCommandBuffer.get();
+ }
+ return fGpu->currentCommandBuffer();
+}
+
+void GrVkOpsRenderPass::end() {
+ if (fCurrentSecondaryCommandBuffer) {
+ fCurrentSecondaryCommandBuffer->end(fGpu);
+ }
+}
+
+void GrVkOpsRenderPass::submit() {
+ if (!fRenderTarget) {
+ return;
+ }
+
+ // We don't want to actually submit the secondary command buffer if it is wrapped.
+ if (this->wrapsSecondaryCommandBuffer()) {
+ return;
+ }
+
+ if (fCurrentSecondaryCommandBuffer) {
+ fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
+ }
+ fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
+}
+
+void GrVkOpsRenderPass::set(GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies) {
+ SkASSERT(!fRenderTarget);
+ SkASSERT(fGpu == rt->getContext()->priv().getGpu());
+
+#ifdef SK_DEBUG
+ fIsActive = true;
+#endif
+
+ this->INHERITED::set(rt, origin);
+
+ for (int i = 0; i < sampledProxies.count(); ++i) {
+ if (sampledProxies[i]->isInstantiated()) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
+ SkASSERT(vkTex);
+ vkTex->setImageLayout(
+ fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
+ }
+ }
+
+ SkASSERT(bounds.isEmpty() || SkIRect::MakeWH(rt->width(), rt->height()).contains(bounds));
+ fBounds = bounds;
+
+ if (this->wrapsSecondaryCommandBuffer()) {
+ this->initWrapped();
+ return;
+ }
+
+ this->init(colorInfo, stencilInfo, colorInfo.fClearColor);
+}
+
+void GrVkOpsRenderPass::reset() {
+ if (fCurrentSecondaryCommandBuffer) {
+ fCurrentSecondaryCommandBuffer.release()->recycle(fGpu);
+ }
+ if (fCurrentRenderPass) {
+ fCurrentRenderPass->unref(fGpu);
+ fCurrentRenderPass = nullptr;
+ }
+ fCurrentCBIsEmpty = true;
+
+ fRenderTarget = nullptr;
+
+#ifdef SK_DEBUG
+ fIsActive = false;
+#endif
+}
+
+bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+ return vkRT->wrapsSecondaryCommandBuffer();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkOpsRenderPass::insertEventMarker(const char* msg) {
+ // TODO: does Vulkan have a correlate?
+}
+
+void GrVkOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ SkASSERT(!clip.hasWindowRectangles());
+
+ GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ SkASSERT(sb);
+ int stencilBitCount = sb->bits();
+
+ // The contract with the callers does not guarantee that we preserve all bits in the stencil
+ // during this clear. Thus we will clear the entire stencil to the desired value.
+
+ VkClearDepthStencilValue vkStencilColor;
+ memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
+ if (insideStencilMask) {
+ vkStencilColor.stencil = (1 << (stencilBitCount - 1));
+ } else {
+ vkStencilColor.stencil = 0;
+ }
+
+ VkClearRect clearRect;
+ // Flip rect if necessary
+ SkIRect vkRect;
+ if (!clip.scissorEnabled()) {
+ vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
+ } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
+ vkRect = clip.scissorRect();
+ } else {
+ const SkIRect& scissor = clip.scissorRect();
+ vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
+ scissor.fRight, fRenderTarget->height() - scissor.fTop);
+ }
+
+ clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
+ clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
+
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+ uint32_t stencilIndex;
+ SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ attachment.colorAttachment = 0; // this value shouldn't matter
+ attachment.clearValue.depthStencil = vkStencilColor;
+
+ this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ fCurrentCBIsEmpty = false;
+}
+
+void GrVkOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
+ // parent class should never let us get here with no RT
+ SkASSERT(!clip.hasWindowRectangles());
+
+ VkClearColorValue vkColor = {{color.fR, color.fG, color.fB, color.fA}};
+
+ // If we end up in a situation where we are calling clear without a scissior then in general it
+ // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
+ // there are situations where higher up we couldn't discard the previous ops and set a clear
+ // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
+ // TODO: Make the waitOp a RenderTask instead so we can clear out the GrOpsTask for a clear. We
+ // can then reenable this assert assuming we can't get messed up by a waitOp.
+ //SkASSERT(!fCurrentCBIsEmpty || clip.scissorEnabled());
+
+ // We always do a sub rect clear with clearAttachments since we are inside a render pass
+ VkClearRect clearRect;
+ // Flip rect if necessary
+ SkIRect vkRect;
+ if (!clip.scissorEnabled()) {
+ vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
+ } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
+ vkRect = clip.scissorRect();
+ } else {
+ const SkIRect& scissor = clip.scissorRect();
+ vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
+ scissor.fRight, fRenderTarget->height() - scissor.fTop);
+ }
+ clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
+ clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+ uint32_t colorIndex;
+ SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachment.colorAttachment = colorIndex;
+ attachment.clearValue.color = vkColor;
+
+ this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ fCurrentCBIsEmpty = false;
+ return;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
+
+ GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_STORE_OP_STORE);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_STORE_OP_STORE);
+
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
+ vkRT->compatibleRenderPassHandle();
+ SkASSERT(fCurrentRenderPass);
+ fCurrentRenderPass->unref(fGpu);
+ if (rpHandle.isValid()) {
+ fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
+ vkColorOps,
+ vkStencilOps);
+ }
+ SkASSERT(fCurrentRenderPass);
+
+ VkClearValue vkClearColor;
+ memset(&vkClearColor, 0, sizeof(VkClearValue));
+
+ if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
+ mustUseSecondaryCommandBuffer) {
+ fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
+ fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->framebuffer(), fCurrentRenderPass);
+ }
+
+ // We use the same fBounds as the whole GrVkOpsRenderPass since we have no way of tracking the
+ // bounds in GrOpsTask for parts before and after inline uploads separately.
+ fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
+ SkToBool(fCurrentSecondaryCommandBuffer));
+}
+
+void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
+ if (fCurrentSecondaryCommandBuffer) {
+ fCurrentSecondaryCommandBuffer->end(fGpu);
+ fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
+ }
+ fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
+
+ // We pass in true here to signal that after the upload we need to set the upload textures
+ // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
+ state->doUpload(upload, true);
+
+ this->addAdditionalRenderPass(false);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkOpsRenderPass::bindGeometry(const GrGpuBuffer* indexBuffer,
+ const GrGpuBuffer* vertexBuffer,
+ const GrGpuBuffer* instanceBuffer) {
+ GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
+ // There is no need to put any memory barriers to make sure host writes have finished here.
+ // When a command buffer is submitted to a queue, there is an implicit memory barrier that
+ // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
+ // an active RenderPass.
+
+ // Here our vertex and instance inputs need to match the same 0-based bindings they were
+ // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
+ uint32_t binding = 0;
+
+ if (vertexBuffer) {
+ SkASSERT(vertexBuffer);
+ SkASSERT(!vertexBuffer->isMapped());
+
+ currCmdBuf->bindInputBuffer(fGpu, binding++,
+ static_cast<const GrVkVertexBuffer*>(vertexBuffer));
+ }
+
+ if (instanceBuffer) {
+ SkASSERT(instanceBuffer);
+ SkASSERT(!instanceBuffer->isMapped());
+
+ currCmdBuf->bindInputBuffer(fGpu, binding++,
+ static_cast<const GrVkVertexBuffer*>(instanceBuffer));
+ }
+ if (indexBuffer) {
+ SkASSERT(indexBuffer);
+ SkASSERT(!indexBuffer->isMapped());
+
+ currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
+ }
+}
+
+GrVkPipelineState* GrVkOpsRenderPass::prepareDrawState(
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ const SkIRect& renderPassScissorRect) {
+ GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
+ SkASSERT(fCurrentRenderPass);
+
+ VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
+
+ GrVkPipelineState* pipelineState =
+ fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget,
+ programInfo,
+ primitiveType,
+ compatibleRenderPass);
+ if (!pipelineState) {
+ return pipelineState;
+ }
+
+ pipelineState->bindPipeline(fGpu, currentCB);
+
+ // Both the 'programInfo' and this renderPass have an origin. Since they come from the
+ // same place (i.e., the target renderTargetProxy) that had best agree.
+ SkASSERT(programInfo.origin() == fOrigin);
+
+ pipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB);
+
+ // Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
+ if (!programInfo.hasDynamicPrimProcTextures()) {
+ auto proxies = programInfo.hasFixedPrimProcTextures() ? programInfo.fixedPrimProcTextures()
+ : nullptr;
+ pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
+ proxies, currentCB);
+ }
+
+ if (!programInfo.pipeline().isScissorEnabled()) {
+ GrVkPipeline::SetDynamicScissorRectState(fGpu, currentCB, fRenderTarget, fOrigin,
+ renderPassScissorRect);
+ } else if (!programInfo.hasDynamicScissors()) {
+ SkASSERT(programInfo.hasFixedScissor());
+
+ SkIRect combinedScissorRect;
+ if (!combinedScissorRect.intersect(renderPassScissorRect, programInfo.fixedScissor())) {
+ combinedScissorRect = SkIRect::MakeEmpty();
+ }
+ GrVkPipeline::SetDynamicScissorRectState(fGpu, currentCB, fRenderTarget, fOrigin,
+ combinedScissorRect);
+ }
+ GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, fRenderTarget);
+ GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
+ programInfo.pipeline().outputSwizzle(),
+ programInfo.pipeline().getXferProcessor());
+
+ return pipelineState;
+}
+
+#ifdef SK_DEBUG
+void check_sampled_texture(GrTexture* tex, GrRenderTarget* rt, GrVkGpu* gpu) {
+ SkASSERT(!tex->isProtected() || (rt->isProtected() && gpu->protectedContext()));
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
+ SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+}
+#endif
+
+
+void GrVkOpsRenderPass::onDraw(const GrProgramInfo& programInfo,
+ const GrMesh meshes[], int meshCount,
+ const SkRect& bounds) {
+
+ SkASSERT(meshCount); // guaranteed by GrOpsRenderPass::draw
+
+#ifdef SK_DEBUG
+ if (programInfo.hasDynamicPrimProcTextures()) {
+ for (int m = 0; m < meshCount; ++m) {
+ auto dynamicPrimProcTextures = programInfo.dynamicPrimProcTextures(m);
+
+ for (int s = 0; s < programInfo.primProc().numTextureSamplers(); ++s) {
+ auto texture = dynamicPrimProcTextures[s]->peekTexture();
+ check_sampled_texture(texture, fRenderTarget, fGpu);
+ }
+ }
+ } else if (programInfo.hasFixedPrimProcTextures()) {
+ auto fixedPrimProcTextures = programInfo.fixedPrimProcTextures();
+
+ for (int s = 0; s < programInfo.primProc().numTextureSamplers(); ++s) {
+ auto texture = fixedPrimProcTextures[s]->peekTexture();
+ check_sampled_texture(texture, fRenderTarget, fGpu);
+ }
+ }
+
+ GrFragmentProcessor::Iter iter(programInfo.pipeline());
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ for (int i = 0; i < fp->numTextureSamplers(); ++i) {
+ const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
+ check_sampled_texture(sampler.peekTexture(), fRenderTarget, fGpu);
+ }
+ }
+ if (GrTexture* dstTexture = programInfo.pipeline().peekDstTexture()) {
+ check_sampled_texture(dstTexture, fRenderTarget, fGpu);
+ }
+
+ // Both the 'programInfo' and this renderPass have an origin. Since they come from the
+ // same place (i.e., the target renderTargetProxy) that had best agree.
+ SkASSERT(programInfo.origin() == fOrigin);
+#endif
+
+ SkRect scissorRect = SkRect::Make(fBounds);
+ SkIRect renderPassScissorRect = SkIRect::MakeEmpty();
+ if (scissorRect.intersect(bounds)) {
+ scissorRect.roundOut(&renderPassScissorRect);
+ }
+
+ GrPrimitiveType primitiveType = meshes[0].primitiveType();
+ GrVkPipelineState* pipelineState = this->prepareDrawState(programInfo, primitiveType,
+ renderPassScissorRect);
+ if (!pipelineState) {
+ return;
+ }
+
+ bool hasDynamicScissors = programInfo.hasDynamicScissors();
+ bool hasDynamicTextures = programInfo.hasDynamicPrimProcTextures();
+
+ for (int i = 0; i < meshCount; ++i) {
+ const GrMesh& mesh = meshes[i];
+ if (mesh.primitiveType() != primitiveType) {
+ SkDEBUGCODE(pipelineState = nullptr);
+ primitiveType = mesh.primitiveType();
+ pipelineState = this->prepareDrawState(programInfo, primitiveType,
+ renderPassScissorRect);
+ if (!pipelineState) {
+ return;
+ }
+ }
+
+ if (hasDynamicScissors) {
+ SkIRect combinedScissorRect;
+ if (!combinedScissorRect.intersect(renderPassScissorRect,
+ programInfo.dynamicScissor(i))) {
+ combinedScissorRect = SkIRect::MakeEmpty();
+ }
+ GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
+ fRenderTarget, fOrigin,
+ combinedScissorRect);
+ }
+ if (hasDynamicTextures) {
+ auto meshProxies = programInfo.dynamicPrimProcTextures(i);
+ pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
+ meshProxies, this->currentCommandBuffer());
+ }
+ SkASSERT(pipelineState);
+ mesh.sendToGpu(this);
+ }
+
+ fCurrentCBIsEmpty = false;
+}
+
+void GrVkOpsRenderPass::sendInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
+ SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
+ SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
+ auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
+ auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
+ this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer);
+ this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
+ fGpu->stats()->incNumDraws();
+}
+
+void GrVkOpsRenderPass::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance,
+ GrPrimitiveRestart restart) {
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
+ SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
+ SkASSERT(!indexBuffer->isCpuBuffer());
+ auto gpuIndexxBuffer = static_cast<const GrGpuBuffer*>(indexBuffer);
+ auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
+ auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
+ this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer);
+ this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
+ baseIndex, baseVertex, baseInstance);
+ fGpu->stats()->incNumDraws();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkOpsRenderPass::executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
+ GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(fRenderTarget);
+
+ GrVkImage* targetImage = target->msaaImage() ? target->msaaImage() : target;
+
+ VkRect2D bounds;
+ bounds.offset = { 0, 0 };
+ bounds.extent = { 0, 0 };
+
+ if (!fCurrentSecondaryCommandBuffer) {
+ fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
+ this->addAdditionalRenderPass(true);
+ }
+ SkASSERT(fCurrentSecondaryCommandBuffer);
+
+ GrVkDrawableInfo vkInfo;
+ vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
+ vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
+ SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
+ vkInfo.fFormat = targetImage->imageFormat();
+ vkInfo.fDrawBounds = &bounds;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ vkInfo.fImage = targetImage->image();
+#else
+ vkInfo.fImage = VK_NULL_HANDLE;
+#endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+ GrBackendDrawableInfo info(vkInfo);
+
+ // After we draw into the command buffer via the drawable, cached state we have may be invalid.
+ this->currentCommandBuffer()->invalidateState();
+ // Also assume that the drawable produced output.
+ fCurrentCBIsEmpty = false;
+
+ drawable->draw(info);
+ fGpu->addDrawable(std::move(drawable));
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.h b/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.h
new file mode 100644
index 0000000000..65d6b5b464
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkOpsRenderPass.h
@@ -0,0 +1,131 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkOpsRenderPass_DEFINED
+#define GrVkOpsRenderPass_DEFINED
+
+#include "src/gpu/GrOpsRenderPass.h"
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrMesh.h"
+#include "src/gpu/GrTRecorder.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+
+class GrVkGpu;
+class GrVkImage;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkSecondaryCommandBuffer;
+
+class GrVkOpsRenderPass : public GrOpsRenderPass, private GrMesh::SendToGpuImpl {
+public:
+ GrVkOpsRenderPass(GrVkGpu*);
+
+ ~GrVkOpsRenderPass() override;
+
+ void begin() override { }
+ void end() override;
+
+ void insertEventMarker(const char*) override;
+
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
+
+ void executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler>) override;
+
+ void set(GrRenderTarget*, GrSurfaceOrigin, const SkIRect& bounds,
+ const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkTArray<GrTextureProxy*, true>& sampledProxies);
+ void reset();
+
+ void submit();
+
+#ifdef SK_DEBUG
+ bool isActive() const { return fIsActive; }
+#endif
+
+private:
+ void init(const GrOpsRenderPass::LoadAndStoreInfo&,
+ const GrOpsRenderPass::StencilLoadAndStoreInfo&,
+ const SkPMColor4f& clearColor);
+
+ // Called instead of init when we are drawing to a render target that already wraps a secondary
+ // command buffer.
+ void initWrapped();
+
+ bool wrapsSecondaryCommandBuffer() const;
+
+ GrGpu* gpu() override;
+
+ GrVkCommandBuffer* currentCommandBuffer();
+
+ // Bind vertex and index buffers
+ void bindGeometry(const GrGpuBuffer* indexBuffer,
+ const GrGpuBuffer* vertexBuffer,
+ const GrGpuBuffer* instanceBuffer);
+
+ GrVkPipelineState* prepareDrawState(const GrProgramInfo&, GrPrimitiveType,
+ const SkIRect& renderPassScissorRect);
+
+ void onDraw(const GrProgramInfo&, const GrMesh[], int meshCount,
+ const SkRect& bounds) override;
+
+ // GrMesh::SendToGpuImpl methods. These issue the actual Vulkan draw commands.
+ // Marked final as a hint to the compiler to not use virtual dispatch.
+ void sendMeshToGpu(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) final {
+ this->sendInstancedMeshToGpu(primType, vertexBuffer, vertexCount, baseVertex, nullptr, 1,
+ 0);
+ }
+
+ void sendIndexedMeshToGpu(GrPrimitiveType primType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, uint16_t /*minIndexValue*/, uint16_t /*maxIndexValue*/,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ GrPrimitiveRestart restart) final {
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ this->sendIndexedInstancedMeshToGpu(primType, indexBuffer, indexCount, baseIndex,
+ vertexBuffer, baseVertex, nullptr, 1, 0,
+ GrPrimitiveRestart::kNo);
+ }
+
+ void sendInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex, const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) final;
+
+ void sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart) final;
+
+ void onClear(const GrFixedClip&, const SkPMColor4f& color) override;
+
+ void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) override;
+
+ void addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer);
+
+ std::unique_ptr<GrVkSecondaryCommandBuffer> fCurrentSecondaryCommandBuffer;
+ const GrVkRenderPass* fCurrentRenderPass;
+ bool fCurrentCBIsEmpty = true;
+ SkIRect fBounds;
+ GrVkGpu* fGpu;
+
+#ifdef SK_DEBUG
+ // When we are actively recording into the GrVkOpsRenderPass we set this flag to true. This
+ // then allows us to assert that we never submit a primary command buffer to the queue while in
+ // a recording state. This is needed since when we submit to the queue we change command pools
+ // and may trigger the old one to be reset, but a recording GrVkOpsRenderPass may still have
+ // a outstanding secondary command buffer allocated from that pool that we'll try to access
+ // after the pool as been reset.
+ bool fIsActive = false;
+#endif
+
+ typedef GrOpsRenderPass INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp
new file mode 100644
index 0000000000..6868ff5bab
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp
@@ -0,0 +1,642 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+static inline VkFormat attrib_type_to_vkformat(GrVertexAttribType type) {
+ switch (type) {
+ case kFloat_GrVertexAttribType:
+ return VK_FORMAT_R32_SFLOAT;
+ case kFloat2_GrVertexAttribType:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case kFloat3_GrVertexAttribType:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ case kFloat4_GrVertexAttribType:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case kHalf_GrVertexAttribType:
+ return VK_FORMAT_R16_SFLOAT;
+ case kHalf2_GrVertexAttribType:
+ return VK_FORMAT_R16G16_SFLOAT;
+ case kHalf3_GrVertexAttribType:
+ return VK_FORMAT_R16G16B16_SFLOAT;
+ case kHalf4_GrVertexAttribType:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+ case kInt2_GrVertexAttribType:
+ return VK_FORMAT_R32G32_SINT;
+ case kInt3_GrVertexAttribType:
+ return VK_FORMAT_R32G32B32_SINT;
+ case kInt4_GrVertexAttribType:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ case kByte_GrVertexAttribType:
+ return VK_FORMAT_R8_SINT;
+ case kByte2_GrVertexAttribType:
+ return VK_FORMAT_R8G8_SINT;
+ case kByte3_GrVertexAttribType:
+ return VK_FORMAT_R8G8B8_SINT;
+ case kByte4_GrVertexAttribType:
+ return VK_FORMAT_R8G8B8A8_SINT;
+ case kUByte_GrVertexAttribType:
+ return VK_FORMAT_R8_UINT;
+ case kUByte2_GrVertexAttribType:
+ return VK_FORMAT_R8G8_UINT;
+ case kUByte3_GrVertexAttribType:
+ return VK_FORMAT_R8G8B8_UINT;
+ case kUByte4_GrVertexAttribType:
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case kUByte_norm_GrVertexAttribType:
+ return VK_FORMAT_R8_UNORM;
+ case kUByte4_norm_GrVertexAttribType:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case kShort2_GrVertexAttribType:
+ return VK_FORMAT_R16G16_SINT;
+ case kShort4_GrVertexAttribType:
+ return VK_FORMAT_R16G16B16A16_SINT;
+ case kUShort2_GrVertexAttribType:
+ return VK_FORMAT_R16G16_UINT;
+ case kUShort2_norm_GrVertexAttribType:
+ return VK_FORMAT_R16G16_UNORM;
+ case kInt_GrVertexAttribType:
+ return VK_FORMAT_R32_SINT;
+ case kUint_GrVertexAttribType:
+ return VK_FORMAT_R32_UINT;
+ case kUShort_norm_GrVertexAttribType:
+ return VK_FORMAT_R16_UNORM;
+ case kUShort4_norm_GrVertexAttribType:
+ return VK_FORMAT_R16G16B16A16_UNORM;
+ }
+ SK_ABORT("Unknown vertex attrib type");
+}
+
+static void setup_vertex_input_state(const GrPrimitiveProcessor& primProc,
+ VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
+ SkSTArray<2, VkVertexInputBindingDescription, true>* bindingDescs,
+ VkVertexInputAttributeDescription* attributeDesc) {
+ uint32_t vertexBinding = 0, instanceBinding = 0;
+
+ int nextBinding = bindingDescs->count();
+ if (primProc.hasVertexAttributes()) {
+ vertexBinding = nextBinding++;
+ }
+
+ if (primProc.hasInstanceAttributes()) {
+ instanceBinding = nextBinding;
+ }
+
+ // setup attribute descriptions
+ int vaCount = primProc.numVertexAttributes();
+ int attribIndex = 0;
+ size_t vertexAttributeOffset = 0;
+ for (const auto& attrib : primProc.vertexAttributes()) {
+ VkVertexInputAttributeDescription& vkAttrib = attributeDesc[attribIndex];
+ vkAttrib.location = attribIndex++; // for now assume location = attribIndex
+ vkAttrib.binding = vertexBinding;
+ vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
+ vkAttrib.offset = vertexAttributeOffset;
+ vertexAttributeOffset += attrib.sizeAlign4();
+ }
+ SkASSERT(vertexAttributeOffset == primProc.vertexStride());
+
+ int iaCount = primProc.numInstanceAttributes();
+ size_t instanceAttributeOffset = 0;
+ for (const auto& attrib : primProc.instanceAttributes()) {
+ VkVertexInputAttributeDescription& vkAttrib = attributeDesc[attribIndex];
+ vkAttrib.location = attribIndex++; // for now assume location = attribIndex
+ vkAttrib.binding = instanceBinding;
+ vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
+ vkAttrib.offset = instanceAttributeOffset;
+ instanceAttributeOffset += attrib.sizeAlign4();
+ }
+ SkASSERT(instanceAttributeOffset == primProc.instanceStride());
+
+ if (primProc.hasVertexAttributes()) {
+ bindingDescs->push_back() = {
+ vertexBinding,
+ (uint32_t) vertexAttributeOffset,
+ VK_VERTEX_INPUT_RATE_VERTEX
+ };
+ }
+ if (primProc.hasInstanceAttributes()) {
+ bindingDescs->push_back() = {
+ instanceBinding,
+ (uint32_t) instanceAttributeOffset,
+ VK_VERTEX_INPUT_RATE_INSTANCE
+ };
+ }
+
+ memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
+ vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertexInputInfo->pNext = nullptr;
+ vertexInputInfo->flags = 0;
+ vertexInputInfo->vertexBindingDescriptionCount = bindingDescs->count();
+ vertexInputInfo->pVertexBindingDescriptions = bindingDescs->begin();
+ vertexInputInfo->vertexAttributeDescriptionCount = vaCount + iaCount;
+ vertexInputInfo->pVertexAttributeDescriptions = attributeDesc;
+}
+
+static VkPrimitiveTopology gr_primitive_type_to_vk_topology(GrPrimitiveType primitiveType) {
+ switch (primitiveType) {
+ case GrPrimitiveType::kTriangles:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ case GrPrimitiveType::kTriangleStrip:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+ case GrPrimitiveType::kPoints:
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+ case GrPrimitiveType::kLines:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+ case GrPrimitiveType::kLineStrip:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+ case GrPrimitiveType::kPath:
+ SK_ABORT("Unsupported primitive type");
+ }
+ SK_ABORT("invalid GrPrimitiveType");
+}
+
+static void setup_input_assembly_state(GrPrimitiveType primitiveType,
+ VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
+ memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+ inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssemblyInfo->pNext = nullptr;
+ inputAssemblyInfo->flags = 0;
+ inputAssemblyInfo->primitiveRestartEnable = false;
+ inputAssemblyInfo->topology = gr_primitive_type_to_vk_topology(primitiveType);
+}
+
+
+static VkStencilOp stencil_op_to_vk_stencil_op(GrStencilOp op) {
+ static const VkStencilOp gTable[] = {
+ VK_STENCIL_OP_KEEP, // kKeep
+ VK_STENCIL_OP_ZERO, // kZero
+ VK_STENCIL_OP_REPLACE, // kReplace
+ VK_STENCIL_OP_INVERT, // kInvert
+ VK_STENCIL_OP_INCREMENT_AND_WRAP, // kIncWrap
+ VK_STENCIL_OP_DECREMENT_AND_WRAP, // kDecWrap
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP, // kIncClamp
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP, // kDecClamp
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrStencilOpCount);
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+ GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
+ GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
+ GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
+ GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
+ GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
+ GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
+ GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
+ SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
+ return gTable[(int)op];
+}
+
+static VkCompareOp stencil_func_to_vk_compare_op(GrStencilTest test) {
+ static const VkCompareOp gTable[] = {
+ VK_COMPARE_OP_ALWAYS, // kAlways
+ VK_COMPARE_OP_NEVER, // kNever
+ VK_COMPARE_OP_GREATER, // kGreater
+ VK_COMPARE_OP_GREATER_OR_EQUAL, // kGEqual
+ VK_COMPARE_OP_LESS, // kLess
+ VK_COMPARE_OP_LESS_OR_EQUAL, // kLEqual
+ VK_COMPARE_OP_EQUAL, // kEqual
+ VK_COMPARE_OP_NOT_EQUAL, // kNotEqual
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrStencilTestCount);
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(1 == (int)GrStencilTest::kNever);
+ GR_STATIC_ASSERT(2 == (int)GrStencilTest::kGreater);
+ GR_STATIC_ASSERT(3 == (int)GrStencilTest::kGEqual);
+ GR_STATIC_ASSERT(4 == (int)GrStencilTest::kLess);
+ GR_STATIC_ASSERT(5 == (int)GrStencilTest::kLEqual);
+ GR_STATIC_ASSERT(6 == (int)GrStencilTest::kEqual);
+ GR_STATIC_ASSERT(7 == (int)GrStencilTest::kNotEqual);
+ SkASSERT(test < (GrStencilTest)kGrStencilTestCount);
+
+ return gTable[(int)test];
+}
+
+static void setup_stencil_op_state(
+ VkStencilOpState* opState, const GrStencilSettings::Face& stencilFace) {
+ opState->failOp = stencil_op_to_vk_stencil_op(stencilFace.fFailOp);
+ opState->passOp = stencil_op_to_vk_stencil_op(stencilFace.fPassOp);
+ opState->depthFailOp = opState->failOp;
+ opState->compareOp = stencil_func_to_vk_compare_op(stencilFace.fTest);
+ opState->compareMask = stencilFace.fTestMask;
+ opState->writeMask = stencilFace.fWriteMask;
+ opState->reference = stencilFace.fRef;
+}
+
+static void setup_depth_stencil_state(
+ const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin,
+ VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
+ memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
+ stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ stencilInfo->pNext = nullptr;
+ stencilInfo->flags = 0;
+ // set depth testing defaults
+ stencilInfo->depthTestEnable = VK_FALSE;
+ stencilInfo->depthWriteEnable = VK_FALSE;
+ stencilInfo->depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ stencilInfo->depthBoundsTestEnable = VK_FALSE;
+ stencilInfo->stencilTestEnable = !stencilSettings.isDisabled();
+ if (!stencilSettings.isDisabled()) {
+ if (!stencilSettings.isTwoSided()) {
+ setup_stencil_op_state(&stencilInfo->front, stencilSettings.frontAndBack());
+ stencilInfo->back = stencilInfo->front;
+ } else {
+ setup_stencil_op_state(&stencilInfo->front, stencilSettings.front(origin));
+ setup_stencil_op_state(&stencilInfo->back, stencilSettings.back(origin));
+ }
+ }
+ stencilInfo->minDepthBounds = 0.0f;
+ stencilInfo->maxDepthBounds = 1.0f;
+}
+
+static void setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo* viewportInfo) {
+ memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
+ viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewportInfo->pNext = nullptr;
+ viewportInfo->flags = 0;
+
+ viewportInfo->viewportCount = 1;
+ viewportInfo->pViewports = nullptr; // This is set dynamically
+
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = nullptr; // This is set dynamically
+
+ SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
+}
+
+static void setup_multisample_state(const GrProgramInfo& programInfo,
+ const GrCaps* caps,
+ VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
+ memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
+ multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampleInfo->pNext = nullptr;
+ multisampleInfo->flags = 0;
+ SkAssertResult(GrSampleCountToVkSampleCount(programInfo.numSamples(),
+ &multisampleInfo->rasterizationSamples));
+ multisampleInfo->sampleShadingEnable = VK_FALSE;
+ multisampleInfo->minSampleShading = 0.0f;
+ multisampleInfo->pSampleMask = nullptr;
+ multisampleInfo->alphaToCoverageEnable = VK_FALSE;
+ multisampleInfo->alphaToOneEnable = VK_FALSE;
+}
+
+static VkBlendFactor blend_coeff_to_vk_blend(GrBlendCoeff coeff) {
+ static const VkBlendFactor gTable[] = {
+ VK_BLEND_FACTOR_ZERO, // kZero_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE, // kOne_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_COLOR, // kSC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, // kISC_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_COLOR, // kDC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, // kIDC_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_ALPHA, // kSA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, // kISA_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_ALPHA, // kDA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, // kIDA_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_COLOR, // kConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, // kIConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_ALPHA, // kConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, // kIConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_COLOR, // kS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, // kIS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_ALPHA, // kS2A_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, // kIS2A_GrBlendCoeff
+ VK_BLEND_FACTOR_ZERO, // kIllegal_GrBlendCoeff
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrBlendCoeffCnt);
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ SkASSERT((unsigned)coeff < kGrBlendCoeffCnt);
+ return gTable[coeff];
+}
+
+
+static VkBlendOp blend_equation_to_vk_blend_op(GrBlendEquation equation) {
+ static const VkBlendOp gTable[] = {
+ // Basic blend ops
+ VK_BLEND_OP_ADD,
+ VK_BLEND_OP_SUBTRACT,
+ VK_BLEND_OP_REVERSE_SUBTRACT,
+
+ // Advanced blend ops
+ VK_BLEND_OP_SCREEN_EXT,
+ VK_BLEND_OP_OVERLAY_EXT,
+ VK_BLEND_OP_DARKEN_EXT,
+ VK_BLEND_OP_LIGHTEN_EXT,
+ VK_BLEND_OP_COLORDODGE_EXT,
+ VK_BLEND_OP_COLORBURN_EXT,
+ VK_BLEND_OP_HARDLIGHT_EXT,
+ VK_BLEND_OP_SOFTLIGHT_EXT,
+ VK_BLEND_OP_DIFFERENCE_EXT,
+ VK_BLEND_OP_EXCLUSION_EXT,
+ VK_BLEND_OP_MULTIPLY_EXT,
+ VK_BLEND_OP_HSL_HUE_EXT,
+ VK_BLEND_OP_HSL_SATURATION_EXT,
+ VK_BLEND_OP_HSL_COLOR_EXT,
+ VK_BLEND_OP_HSL_LUMINOSITY_EXT,
+
+ // Illegal.
+ VK_BLEND_OP_ADD,
+ };
+ GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+ GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
+ GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
+ GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
+ GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
+ GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
+ GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
+ GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
+ GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
+ GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
+ GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
+ GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
+ GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
+ GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
+ GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
+ GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrBlendEquationCnt);
+
+ SkASSERT((unsigned)equation < kGrBlendCoeffCnt);
+ return gTable[equation];
+}
+
+static bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+
+ // Illegal
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+ // Individual enum asserts already made in blend_coeff_to_vk_blend
+}
+
+static void setup_color_blend_state(const GrPipeline& pipeline,
+ VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
+ VkPipelineColorBlendAttachmentState* attachmentState) {
+ const GrXferProcessor::BlendInfo& blendInfo = pipeline.getXferProcessor().getBlendInfo();
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+
+ memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
+ attachmentState->blendEnable = !blendOff;
+ if (!blendOff) {
+ attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
+ attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
+ }
+
+ if (!blendInfo.fWriteColor) {
+ attachmentState->colorWriteMask = 0;
+ } else {
+ attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+ }
+
+ memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
+ colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlendInfo->pNext = nullptr;
+ colorBlendInfo->flags = 0;
+ colorBlendInfo->logicOpEnable = VK_FALSE;
+ colorBlendInfo->attachmentCount = 1;
+ colorBlendInfo->pAttachments = attachmentState;
+ // colorBlendInfo->blendConstants is set dynamically
+}
+
+static void setup_raster_state(const GrPipeline& pipeline,
+ const GrCaps* caps,
+ VkPipelineRasterizationStateCreateInfo* rasterInfo) {
+ memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
+ rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterInfo->pNext = nullptr;
+ rasterInfo->flags = 0;
+ rasterInfo->depthClampEnable = VK_FALSE;
+ rasterInfo->rasterizerDiscardEnable = VK_FALSE;
+ rasterInfo->polygonMode = caps->wireframeMode() ? VK_POLYGON_MODE_LINE
+ : VK_POLYGON_MODE_FILL;
+ rasterInfo->cullMode = VK_CULL_MODE_NONE;
+ rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterInfo->depthBiasEnable = VK_FALSE;
+ rasterInfo->depthBiasConstantFactor = 0.0f;
+ rasterInfo->depthBiasClamp = 0.0f;
+ rasterInfo->depthBiasSlopeFactor = 0.0f;
+ rasterInfo->lineWidth = 1.0f;
+}
+
+static void setup_dynamic_state(VkPipelineDynamicStateCreateInfo* dynamicInfo,
+ VkDynamicState* dynamicStates) {
+ memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
+ dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamicInfo->pNext = VK_NULL_HANDLE;
+ dynamicInfo->flags = 0;
+ dynamicStates[0] = VK_DYNAMIC_STATE_VIEWPORT;
+ dynamicStates[1] = VK_DYNAMIC_STATE_SCISSOR;
+ dynamicStates[2] = VK_DYNAMIC_STATE_BLEND_CONSTANTS;
+ dynamicInfo->dynamicStateCount = 3;
+ dynamicInfo->pDynamicStates = dynamicStates;
+}
+
+GrVkPipeline* GrVkPipeline::Create(
+ GrVkGpu* gpu,
+ const GrProgramInfo& programInfo,
+ const GrStencilSettings& stencil,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo, int shaderStageCount,
+ GrPrimitiveType primitiveType, VkRenderPass compatibleRenderPass, VkPipelineLayout layout,
+ VkPipelineCache cache) {
+ VkPipelineVertexInputStateCreateInfo vertexInputInfo;
+ SkSTArray<2, VkVertexInputBindingDescription, true> bindingDescs;
+ SkSTArray<16, VkVertexInputAttributeDescription> attributeDesc;
+ int totalAttributeCnt = programInfo.primProc().numVertexAttributes() +
+ programInfo.primProc().numInstanceAttributes();
+ SkASSERT(totalAttributeCnt <= gpu->vkCaps().maxVertexAttributes());
+ VkVertexInputAttributeDescription* pAttribs = attributeDesc.push_back_n(totalAttributeCnt);
+ setup_vertex_input_state(programInfo.primProc(), &vertexInputInfo, &bindingDescs, pAttribs);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
+ setup_input_assembly_state(primitiveType, &inputAssemblyInfo);
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
+ setup_depth_stencil_state(stencil, programInfo.origin(), &depthStencilInfo);
+
+ VkPipelineViewportStateCreateInfo viewportInfo;
+ setup_viewport_scissor_state(&viewportInfo);
+
+ VkPipelineMultisampleStateCreateInfo multisampleInfo;
+ setup_multisample_state(programInfo, gpu->caps(), &multisampleInfo);
+
+ // We will only have one color attachment per pipeline.
+ VkPipelineColorBlendAttachmentState attachmentStates[1];
+ VkPipelineColorBlendStateCreateInfo colorBlendInfo;
+ setup_color_blend_state(programInfo.pipeline(), &colorBlendInfo, attachmentStates);
+
+ VkPipelineRasterizationStateCreateInfo rasterInfo;
+ setup_raster_state(programInfo.pipeline(), gpu->caps(), &rasterInfo);
+
+ VkDynamicState dynamicStates[3];
+ VkPipelineDynamicStateCreateInfo dynamicInfo;
+ setup_dynamic_state(&dynamicInfo, dynamicStates);
+
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo;
+ memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
+ pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipelineCreateInfo.pNext = nullptr;
+ pipelineCreateInfo.flags = 0;
+ pipelineCreateInfo.stageCount = shaderStageCount;
+ pipelineCreateInfo.pStages = shaderStageInfo;
+ pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
+ pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
+ pipelineCreateInfo.pTessellationState = nullptr;
+ pipelineCreateInfo.pViewportState = &viewportInfo;
+ pipelineCreateInfo.pRasterizationState = &rasterInfo;
+ pipelineCreateInfo.pMultisampleState = &multisampleInfo;
+ pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
+ pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
+ pipelineCreateInfo.pDynamicState = &dynamicInfo;
+ pipelineCreateInfo.layout = layout;
+ pipelineCreateInfo.renderPass = compatibleRenderPass;
+ pipelineCreateInfo.subpass = 0;
+ pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
+ pipelineCreateInfo.basePipelineIndex = -1;
+
+ VkPipeline vkPipeline;
+ VkResult err;
+ {
+#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
+ // skia:8712
+ __lsan::ScopedDisabler lsanDisabler;
+#endif
+ err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
+ cache, 1,
+ &pipelineCreateInfo,
+ nullptr, &vkPipeline));
+ }
+ if (err) {
+ SkDebugf("Failed to create pipeline. Error: %d\n", err);
+ return nullptr;
+ }
+
+ return new GrVkPipeline(vkPipeline, layout);
+}
+
+void GrVkPipeline::freeGPUData(GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr));
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(), fPipelineLayout, nullptr));
+}
+
+void GrVkPipeline::SetDynamicScissorRectState(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrRenderTarget* renderTarget,
+ GrSurfaceOrigin rtOrigin,
+ const SkIRect& scissorRect) {
+ SkASSERT(scissorRect.isEmpty() ||
+ SkIRect::MakeWH(renderTarget->width(), renderTarget->height()).contains(scissorRect));
+
+ VkRect2D scissor;
+ scissor.offset.x = scissorRect.fLeft;
+ scissor.extent.width = scissorRect.width();
+ if (kTopLeft_GrSurfaceOrigin == rtOrigin) {
+ scissor.offset.y = scissorRect.fTop;
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == rtOrigin);
+ scissor.offset.y = renderTarget->height() - scissorRect.fBottom;
+ }
+ scissor.extent.height = scissorRect.height();
+
+ SkASSERT(scissor.offset.x >= 0);
+ SkASSERT(scissor.offset.y >= 0);
+ cmdBuffer->setScissor(gpu, 0, 1, &scissor);
+}
+
+void GrVkPipeline::SetDynamicViewportState(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrRenderTarget* renderTarget) {
+ // We always use one viewport the size of the RT
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = SkIntToScalar(renderTarget->width());
+ viewport.height = SkIntToScalar(renderTarget->height());
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ cmdBuffer->setViewport(gpu, 0, 1, &viewport);
+}
+
+void GrVkPipeline::SetDynamicBlendConstantState(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrSwizzle& swizzle,
+ const GrXferProcessor& xferProcessor) {
+ const GrXferProcessor::BlendInfo& blendInfo = xferProcessor.getBlendInfo();
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ float floatColors[4];
+ if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
+ // Swizzle the blend to match what the shader will output.
+ SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
+ floatColors[0] = blendConst.fR;
+ floatColors[1] = blendConst.fG;
+ floatColors[2] = blendConst.fB;
+ floatColors[3] = blendConst.fA;
+ } else {
+ memset(floatColors, 0, 4 * sizeof(float));
+ }
+ cmdBuffer->setBlendConstants(gpu, floatColors);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h
new file mode 100644
index 0000000000..ce33a49d72
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h
@@ -0,0 +1,66 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipeline_DEFINED
+#define GrVkPipeline_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrRenderTarget;
+class GrXferProcessor;
+class GrStencilSettings;
+class GrVkCommandBuffer;
+class GrVkGpu;
+class GrVkRenderPass;
+struct SkIRect;
+
+class GrVkPipeline : public GrVkResource {
+public:
+ static GrVkPipeline* Create(GrVkGpu*,
+ const GrProgramInfo&,
+ const GrStencilSettings&,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass,
+ VkPipelineLayout layout,
+ VkPipelineCache cache);
+
+ VkPipeline pipeline() const { return fPipeline; }
+ VkPipelineLayout layout() const { return fPipelineLayout; }
+
+ static void SetDynamicScissorRectState(GrVkGpu*, GrVkCommandBuffer*, const GrRenderTarget*,
+ GrSurfaceOrigin, const SkIRect& scissorRect);
+ static void SetDynamicViewportState(GrVkGpu*, GrVkCommandBuffer*, const GrRenderTarget*);
+ static void SetDynamicBlendConstantState(GrVkGpu*, GrVkCommandBuffer*,
+ const GrSwizzle& outputSwizzle,
+ const GrXferProcessor&);
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkPipeline: %d (%d refs)\n", fPipeline, this->getRefCnt());
+ }
+#endif
+
+protected:
+ GrVkPipeline(VkPipeline pipeline, VkPipelineLayout layout)
+ : INHERITED(), fPipeline(pipeline), fPipelineLayout(layout) {}
+
+ VkPipeline fPipeline;
+ VkPipelineLayout fPipelineLayout;
+
+private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp
new file mode 100644
index 0000000000..b48f15550e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp
@@ -0,0 +1,337 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/gpu/GrContext.h"
+#include "src/core/SkMipMap.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLXferProcessor.h"
+#include "src/gpu/vk/GrVkBufferView.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+#include "src/gpu/vk/GrVkDescriptorSet.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkMemory.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkSampler.h"
+#include "src/gpu/vk/GrVkTexture.h"
+#include "src/gpu/vk/GrVkUniformBuffer.h"
+
+GrVkPipelineState::GrVkPipelineState(
+ GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ const GrVkDescriptorSetManager::Handle& samplerDSHandle,
+ const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t uniformSize,
+ const UniformInfoArray& samplers,
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fragmentProcessorCnt)
+ : fPipeline(pipeline)
+ , fUniformDescriptorSet(nullptr)
+ , fSamplerDescriptorSet(nullptr)
+ , fSamplerDSHandle(samplerDSHandle)
+ , fBuiltinUniformHandles(builtinUniformHandles)
+ , fGeometryProcessor(std::move(geometryProcessor))
+ , fXferProcessor(std::move(xferProcessor))
+ , fFragmentProcessors(std::move(fragmentProcessors))
+ , fFragmentProcessorCnt(fragmentProcessorCnt)
+ , fDataManager(uniforms, uniformSize) {
+ fDescriptorSets[0] = VK_NULL_HANDLE;
+ fDescriptorSets[1] = VK_NULL_HANDLE;
+ fDescriptorSets[2] = VK_NULL_HANDLE;
+
+ fUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, uniformSize));
+
+ fNumSamplers = samplers.count();
+
+ for (int i = 0; i < fNumSamplers; ++i) {
+ // We store the immutable samplers here and take ownership of the ref from the
+ // GrVkUnformHandler.
+ fImmutableSamplers.push_back(samplers[i].fImmutableSampler);
+ }
+}
+
+GrVkPipelineState::~GrVkPipelineState() {
+ // Must have freed all GPU resources before this is destroyed
+ SkASSERT(!fPipeline);
+}
+
+void GrVkPipelineState::freeGPUResources(GrVkGpu* gpu) {
+ if (fPipeline) {
+ fPipeline->unref(gpu);
+ fPipeline = nullptr;
+ }
+
+ if (fUniformBuffer) {
+ fUniformBuffer->release(gpu);
+ fUniformBuffer.reset();
+ }
+
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
+ fUniformDescriptorSet = nullptr;
+ }
+
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
+ fSamplerDescriptorSet = nullptr;
+ }
+}
+
+void GrVkPipelineState::abandonGPUResources() {
+ if (fPipeline) {
+ fPipeline->unrefAndAbandon();
+ fPipeline = nullptr;
+ }
+
+ if (fUniformBuffer) {
+ fUniformBuffer->abandon();
+ fUniformBuffer.reset();
+ }
+
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->unrefAndAbandon();
+ fUniformDescriptorSet = nullptr;
+ }
+
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->unrefAndAbandon();
+ fSamplerDescriptorSet = nullptr;
+ }
+}
+
+void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
+ const GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrVkCommandBuffer* commandBuffer) {
+ this->setRenderTargetState(renderTarget, programInfo.origin());
+
+ fGeometryProcessor->setData(fDataManager, programInfo.primProc(),
+ GrFragmentProcessor::CoordTransformIter(programInfo.pipeline()));
+ GrFragmentProcessor::Iter iter(programInfo.pipeline());
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fDataManager, *fp);
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+
+ {
+ SkIPoint offset;
+ GrTexture* dstTexture = programInfo.pipeline().peekDstTexture(&offset);
+
+ fXferProcessor->setData(fDataManager, programInfo.pipeline().getXferProcessor(),
+ dstTexture, offset);
+ }
+
+ // Get new descriptor set
+ if (fUniformBuffer) {
+ int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
+ if (fDataManager.uploadUniformBuffers(gpu, fUniformBuffer.get()) ||
+ !fUniformDescriptorSet) {
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->recycle(gpu);
+ }
+ fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
+ fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
+ this->writeUniformBuffers(gpu);
+ }
+ commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), uniformDSIdx, 1,
+ &fDescriptorSets[uniformDSIdx], 0, nullptr);
+ if (fUniformDescriptorSet) {
+ commandBuffer->addRecycledResource(fUniformDescriptorSet);
+ }
+ if (fUniformBuffer) {
+ commandBuffer->addRecycledResource(fUniformBuffer->resource());
+ }
+ }
+}
+
+void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrTextureProxy* const primProcTextures[],
+ GrVkCommandBuffer* commandBuffer) {
+ SkASSERT(primProcTextures || !primProc.numTextureSamplers());
+
+ struct SamplerBindings {
+ GrSamplerState fState;
+ GrVkTexture* fTexture;
+ };
+ SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers);
+ int currTextureBinding = 0;
+
+ fGeometryProcessor->setData(fDataManager, primProc,
+ GrFragmentProcessor::CoordTransformIter(pipeline));
+ for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
+ const auto& sampler = primProc.textureSampler(i);
+ auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture());
+ samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture};
+ }
+
+ GrFragmentProcessor::Iter iter(pipeline);
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ for (int i = 0; i < fp->numTextureSamplers(); ++i) {
+ const auto& sampler = fp->textureSampler(i);
+ samplerBindings[currTextureBinding++] =
+ {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())};
+ }
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+
+ if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) {
+ samplerBindings[currTextureBinding++] = {
+ GrSamplerState::ClampNearest(),
+ static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())};
+ }
+
+ // Get new descriptor set
+ SkASSERT(fNumSamplers == currTextureBinding);
+ if (fNumSamplers) {
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->recycle(gpu);
+ }
+ fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
+ int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
+ fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
+ for (int i = 0; i < fNumSamplers; ++i) {
+ const GrSamplerState& state = samplerBindings[i].fState;
+ GrVkTexture* texture = samplerBindings[i].fTexture;
+
+ const GrVkImageView* textureView = texture->textureView();
+ const GrVkSampler* sampler = nullptr;
+ if (fImmutableSamplers[i]) {
+ sampler = fImmutableSamplers[i];
+ } else {
+ sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
+ state, texture->ycbcrConversionInfo());
+ }
+ SkASSERT(sampler);
+
+ VkDescriptorImageInfo imageInfo;
+ memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
+ imageInfo.sampler = sampler->sampler();
+ imageInfo.imageView = textureView->imageView();
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet writeInfo;
+ memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
+ writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeInfo.pNext = nullptr;
+ writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
+ writeInfo.dstBinding = i;
+ writeInfo.dstArrayElement = 0;
+ writeInfo.descriptorCount = 1;
+ writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ writeInfo.pImageInfo = &imageInfo;
+ writeInfo.pBufferInfo = nullptr;
+ writeInfo.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(),
+ UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
+ commandBuffer->addResource(sampler);
+ if (!fImmutableSamplers[i]) {
+ sampler->unref(gpu);
+ }
+ commandBuffer->addResource(samplerBindings[i].fTexture->textureView());
+ commandBuffer->addResource(samplerBindings[i].fTexture->resource());
+ }
+
+ commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), samplerDSIdx, 1,
+ &fDescriptorSets[samplerDSIdx], 0, nullptr);
+ commandBuffer->addRecycledResource(fSamplerDescriptorSet);
+ }
+}
+
+void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
+ VkDescriptorBufferInfo* bufferInfo,
+ const GrVkUniformBuffer* buffer,
+ VkDescriptorSet descriptorSet) {
+
+ memset(bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ bufferInfo->buffer = buffer->buffer();
+ bufferInfo->offset = buffer->offset();
+ bufferInfo->range = buffer->size();
+
+ memset(descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
+ descriptorWrite->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrite->pNext = nullptr;
+ descriptorWrite->dstSet = descriptorSet;
+ descriptorWrite->dstBinding = GrVkUniformHandler::kUniformBinding;
+ descriptorWrite->dstArrayElement = 0;
+ descriptorWrite->descriptorCount = 1;
+ descriptorWrite->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrite->pImageInfo = nullptr;
+ descriptorWrite->pBufferInfo = bufferInfo;
+ descriptorWrite->pTexelBufferView = nullptr;
+}
+
+void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
+ VkWriteDescriptorSet descriptorWrites[3];
+ VkDescriptorBufferInfo bufferInfos[3];
+
+ uint32_t writeCount = 0;
+
+ if (fUniformBuffer.get()) {
+ set_uniform_descriptor_writes(&descriptorWrites[writeCount],
+ &bufferInfos[writeCount],
+ fUniformBuffer.get(),
+ fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]);
+ ++writeCount;
+ }
+
+ if (writeCount) {
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ writeCount,
+ descriptorWrites,
+ 0, nullptr));
+ }
+}
+
+void GrVkPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
+
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
+ fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
+ }
+
+ // set RT adjustment
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
+ if (fRenderTargetState.fRenderTargetOrigin != origin ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = origin;
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+}
+
+void GrVkPipelineState::bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
+ commandBuffer->bindPipeline(gpu, fPipeline);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h
new file mode 100644
index 0000000000..331bb01f01
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkPipelineState_DEFINED
+#define GrVkPipelineState_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/vk/GrVkDescriptorSetManager.h"
+#include "src/gpu/vk/GrVkPipelineStateDataManager.h"
+
+class GrPipeline;
+class GrStencilSettings;
+class GrVkBufferView;
+class GrVkCommandBuffer;
+class GrVkDescriptorPool;
+class GrVkDescriptorSet;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkPipeline;
+class GrVkSampler;
+class GrVkTexture;
+class GrVkUniformBuffer;
+
+/**
+ * This class holds onto a GrVkPipeline object that we use for draws. Besides storing the acutal
+ * GrVkPipeline object, this class is also responsible handling all uniforms, descriptors, samplers,
+ * and other similar objects that are used along with the VkPipeline in the draw. This includes both
+ * allocating and freeing these objects, as well as updating their values.
+ */
+class GrVkPipelineState : public SkRefCnt {
+public:
+ using UniformInfoArray = GrVkPipelineStateDataManager::UniformInfoArray;
+ using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
+
+ GrVkPipelineState(
+ GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ const GrVkDescriptorSetManager::Handle& samplerDSHandle,
+ const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t uniformSize,
+ const UniformInfoArray& samplers,
+ std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
+ std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
+ int fFragmentProcessorCnt);
+
+ ~GrVkPipelineState();
+
+ void setAndBindUniforms(GrVkGpu*, const GrRenderTarget*, const GrProgramInfo&,
+ GrVkCommandBuffer*);
+ /**
+ * This must be called after setAndBindUniforms() since that function invalidates texture
+ * bindings.
+ */
+ void setAndBindTextures(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&,
+ const GrTextureProxy* const primitiveProcessorTextures[],
+ GrVkCommandBuffer*);
+
+ void bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer);
+
+ void addUniformResources(GrVkCommandBuffer&, GrVkSampler*[], GrVkTexture*[], int numTextures);
+
+ void freeGPUResources(GrVkGpu* gpu);
+
+ void abandonGPUResources();
+
+private:
+ void writeUniformBuffers(const GrVkGpu* gpu);
+
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to vulkan normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin)-1;
+ }
+
+ /**
+ * Gets a float4 that adjusts the position from Skia device coords to Vulkans normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous float3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform
+ void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
+
+ // GrVkResources
+ GrVkPipeline* fPipeline;
+
+ // The DescriptorSets need to survive until the gpu has finished all draws that use them.
+ // However, they will only be freed by the descriptor pool. Thus by simply keeping the
+ // descriptor pool alive through the draw, the descritor sets will also stay alive. Thus we do
+ // not need a GrVkResource versions of VkDescriptorSet. We hold on to these in the
+ // GrVkPipelineState since we update the descriptor sets and bind them at separate times;
+ VkDescriptorSet fDescriptorSets[3];
+
+ const GrVkDescriptorSet* fUniformDescriptorSet;
+ const GrVkDescriptorSet* fSamplerDescriptorSet;
+
+ const GrVkDescriptorSetManager::Handle fSamplerDSHandle;
+
+ SkSTArray<4, const GrVkSampler*> fImmutableSamplers;
+
+ std::unique_ptr<GrVkUniformBuffer> fUniformBuffer;
+
+ // Tracks the current render target uniforms stored in the vertex buffer.
+ RenderTargetState fRenderTargetState;
+ GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;
+
+ // Processors in the GrVkPipelineState
+ std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
+ std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
+ int fFragmentProcessorCnt;
+
+ GrVkPipelineStateDataManager fDataManager;
+
+ int fNumSamplers;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp
new file mode 100644
index 0000000000..4d595e7eca
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp
@@ -0,0 +1,353 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrPersistentCacheUtils.h"
+#include "src/gpu/GrShaderCaps.h"
+#include "src/gpu/GrShaderUtils.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/vk/GrVkDescriptorSetManager.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipelineStateBuilder.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+
+typedef size_t shader_size;
+
+GrVkPipelineState* GrVkPipelineStateBuilder::CreatePipelineState(
+ GrVkGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ const GrStencilSettings& stencil,
+ GrPrimitiveType primitiveType,
+ Desc* desc,
+ VkRenderPass compatibleRenderPass) {
+ // create a builder. This will be handed off to effects so they can use it to add
+ // uniforms, varyings, textures, etc
+ GrVkPipelineStateBuilder builder(gpu, renderTarget, programInfo, desc);
+
+ if (!builder.emitAndInstallProcs()) {
+ return nullptr;
+ }
+
+ return builder.finalize(stencil, primitiveType, compatibleRenderPass, desc);
+}
+
+GrVkPipelineStateBuilder::GrVkPipelineStateBuilder(GrVkGpu* gpu,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrProgramDesc* desc)
+ : INHERITED(renderTarget, programInfo, desc)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {}
+
+const GrCaps* GrVkPipelineStateBuilder::caps() const {
+ return fGpu->caps();
+}
+
+void GrVkPipelineStateBuilder::finalizeFragmentOutputColor(GrShaderVar& outputColor) {
+ outputColor.addLayoutQualifier("location = 0, index = 0");
+}
+
+void GrVkPipelineStateBuilder::finalizeFragmentSecondaryColor(GrShaderVar& outputColor) {
+ outputColor.addLayoutQualifier("location = 0, index = 1");
+}
+
+bool GrVkPipelineStateBuilder::createVkShaderModule(VkShaderStageFlagBits stage,
+ const SkSL::String& sksl,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ const SkSL::Program::Settings& settings,
+ Desc* desc,
+ SkSL::String* outSPIRV,
+ SkSL::Program::Inputs* outInputs) {
+ if (!GrCompileVkShaderModule(fGpu, sksl, stage, shaderModule,
+ stageInfo, settings, outSPIRV, outInputs)) {
+ return false;
+ }
+ if (outInputs->fRTHeight) {
+ this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
+ }
+ return true;
+}
+
+bool GrVkPipelineStateBuilder::installVkShaderModule(VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ SkSL::String spirv,
+ SkSL::Program::Inputs inputs) {
+ if (!GrInstallVkShaderModule(fGpu, spirv, stage, shaderModule, stageInfo)) {
+ return false;
+ }
+ if (inputs.fRTHeight) {
+ this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
+ }
+ return true;
+}
+
+static constexpr SkFourByteTag kSPIRV_Tag = SkSetFourByteTag('S', 'P', 'R', 'V');
+static constexpr SkFourByteTag kSKSL_Tag = SkSetFourByteTag('S', 'K', 'S', 'L');
+
+int GrVkPipelineStateBuilder::loadShadersFromCache(SkReader32* cached,
+ VkShaderModule outShaderModules[],
+ VkPipelineShaderStageCreateInfo* outStageInfo) {
+ SkSL::String shaders[kGrShaderTypeCount];
+ SkSL::Program::Inputs inputs[kGrShaderTypeCount];
+
+ GrPersistentCacheUtils::UnpackCachedShaders(cached, shaders, inputs, kGrShaderTypeCount);
+
+ SkAssertResult(this->installVkShaderModule(VK_SHADER_STAGE_VERTEX_BIT,
+ fVS,
+ &outShaderModules[kVertex_GrShaderType],
+ &outStageInfo[0],
+ shaders[kVertex_GrShaderType],
+ inputs[kVertex_GrShaderType]));
+
+ SkAssertResult(this->installVkShaderModule(VK_SHADER_STAGE_FRAGMENT_BIT,
+ fFS,
+ &outShaderModules[kFragment_GrShaderType],
+ &outStageInfo[1],
+ shaders[kFragment_GrShaderType],
+ inputs[kFragment_GrShaderType]));
+
+ if (!shaders[kGeometry_GrShaderType].empty()) {
+ SkAssertResult(this->installVkShaderModule(VK_SHADER_STAGE_GEOMETRY_BIT,
+ fGS,
+ &outShaderModules[kGeometry_GrShaderType],
+ &outStageInfo[2],
+ shaders[kGeometry_GrShaderType],
+ inputs[kGeometry_GrShaderType]));
+ return 3;
+ } else {
+ return 2;
+ }
+}
+
+void GrVkPipelineStateBuilder::storeShadersInCache(const SkSL::String shaders[],
+ const SkSL::Program::Inputs inputs[],
+ bool isSkSL) {
+ const Desc* desc = static_cast<const Desc*>(this->desc());
+ // Here we shear off the Vk-specific portion of the Desc in order to create the
+ // persistent key. This is bc Vk only caches the SPIRV code, not the fully compiled
+ // program, and that only depends on the base GrProgramDesc data.
+ sk_sp<SkData> key = SkData::MakeWithoutCopy(desc->asKey(), desc->shaderKeyLength());
+ sk_sp<SkData> data = GrPersistentCacheUtils::PackCachedShaders(isSkSL ? kSKSL_Tag : kSPIRV_Tag,
+ shaders,
+ inputs, kGrShaderTypeCount);
+ this->gpu()->getContext()->priv().getPersistentCache()->store(*key, *data);
+}
+
+GrVkPipelineState* GrVkPipelineStateBuilder::finalize(const GrStencilSettings& stencil,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass,
+ Desc* desc) {
+ VkDescriptorSetLayout dsLayout[2];
+ VkPipelineLayout pipelineLayout;
+ VkShaderModule shaderModules[kGrShaderTypeCount] = { VK_NULL_HANDLE,
+ VK_NULL_HANDLE,
+ VK_NULL_HANDLE };
+
+ GrVkResourceProvider& resourceProvider = fGpu->resourceProvider();
+ // These layouts are not owned by the PipelineStateBuilder and thus should not be destroyed
+ dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = resourceProvider.getUniformDSLayout();
+
+ GrVkDescriptorSetManager::Handle samplerDSHandle;
+ resourceProvider.getSamplerDescriptorSetHandle(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ fUniformHandler, &samplerDSHandle);
+ dsLayout[GrVkUniformHandler::kSamplerDescSet] =
+ resourceProvider.getSamplerDSLayout(samplerDSHandle);
+
+ // Create the VkPipelineLayout
+ VkPipelineLayoutCreateInfo layoutCreateInfo;
+ memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
+ layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layoutCreateInfo.pNext = 0;
+ layoutCreateInfo.flags = 0;
+ layoutCreateInfo.setLayoutCount = 2;
+ layoutCreateInfo.pSetLayouts = dsLayout;
+ layoutCreateInfo.pushConstantRangeCount = 0;
+ layoutCreateInfo.pPushConstantRanges = nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(),
+ &layoutCreateInfo,
+ nullptr,
+ &pipelineLayout));
+
+ // We need to enable the following extensions so that the compiler can correctly make spir-v
+ // from our glsl shaders.
+ fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ this->finalizeShaders();
+
+ VkPipelineShaderStageCreateInfo shaderStageInfo[3];
+ SkSL::Program::Settings settings;
+ settings.fCaps = this->caps()->shaderCaps();
+ settings.fVkCaps = &this->gpu()->vkCaps();
+ settings.fFlipY = this->origin() != kTopLeft_GrSurfaceOrigin;
+ settings.fSharpenTextures =
+ this->gpu()->getContext()->priv().options().fSharpenMipmappedTextures;
+ settings.fRTHeightOffset = fUniformHandler.getRTHeightOffset();
+ SkASSERT(!this->fragColorIsInOut());
+
+ sk_sp<SkData> cached;
+ SkReader32 reader;
+ SkFourByteTag shaderType = 0;
+ auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
+ if (persistentCache) {
+ // Here we shear off the Vk-specific portion of the Desc in order to create the
+ // persistent key. This is bc Vk only caches the SPIRV code, not the fully compiled
+ // program, and that only depends on the base GrProgramDesc data.
+ sk_sp<SkData> key = SkData::MakeWithoutCopy(desc->asKey(), desc->shaderKeyLength());
+ cached = persistentCache->load(*key);
+ if (cached) {
+ reader.setMemory(cached->data(), cached->size());
+ shaderType = reader.readU32();
+ }
+ }
+
+ int numShaderStages = 0;
+ if (kSPIRV_Tag == shaderType) {
+ numShaderStages = this->loadShadersFromCache(&reader, shaderModules, shaderStageInfo);
+ } else {
+ numShaderStages = 2; // We always have at least vertex and fragment stages.
+ SkSL::String shaders[kGrShaderTypeCount];
+ SkSL::Program::Inputs inputs[kGrShaderTypeCount];
+
+ SkSL::String* sksl[kGrShaderTypeCount] = {
+ &fVS.fCompilerString,
+ &fGS.fCompilerString,
+ &fFS.fCompilerString,
+ };
+ SkSL::String cached_sksl[kGrShaderTypeCount];
+ if (kSKSL_Tag == shaderType) {
+ GrPersistentCacheUtils::UnpackCachedShaders(&reader, cached_sksl, inputs,
+ kGrShaderTypeCount);
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ sksl[i] = &cached_sksl[i];
+ }
+ }
+
+ bool success = this->createVkShaderModule(VK_SHADER_STAGE_VERTEX_BIT,
+ *sksl[kVertex_GrShaderType],
+ &shaderModules[kVertex_GrShaderType],
+ &shaderStageInfo[0],
+ settings,
+ desc,
+ &shaders[kVertex_GrShaderType],
+ &inputs[kVertex_GrShaderType]);
+
+ success = success && this->createVkShaderModule(VK_SHADER_STAGE_FRAGMENT_BIT,
+ *sksl[kFragment_GrShaderType],
+ &shaderModules[kFragment_GrShaderType],
+ &shaderStageInfo[1],
+ settings,
+ desc,
+ &shaders[kFragment_GrShaderType],
+ &inputs[kFragment_GrShaderType]);
+
+ if (this->primitiveProcessor().willUseGeoShader()) {
+ success = success && this->createVkShaderModule(VK_SHADER_STAGE_GEOMETRY_BIT,
+ *sksl[kGeometry_GrShaderType],
+ &shaderModules[kGeometry_GrShaderType],
+ &shaderStageInfo[2],
+ settings,
+ desc,
+ &shaders[kGeometry_GrShaderType],
+ &inputs[kGeometry_GrShaderType]);
+ ++numShaderStages;
+ }
+
+ if (!success) {
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ if (shaderModules[i]) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(),
+ shaderModules[i], nullptr));
+ }
+ }
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout,
+ nullptr));
+ return nullptr;
+ }
+
+ if (persistentCache && !cached) {
+ bool isSkSL = false;
+ if (fGpu->getContext()->priv().options().fShaderCacheStrategy ==
+ GrContextOptions::ShaderCacheStrategy::kSkSL) {
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ shaders[i] = GrShaderUtils::PrettyPrint(*sksl[i]);
+ }
+ isSkSL = true;
+ }
+ this->storeShadersInCache(shaders, inputs, isSkSL);
+ }
+ }
+ GrVkPipeline* pipeline = resourceProvider.createPipeline(fProgramInfo, stencil,
+ shaderStageInfo, numShaderStages, primitiveType, compatibleRenderPass, pipelineLayout);
+ for (int i = 0; i < kGrShaderTypeCount; ++i) {
+ // This if check should not be needed since calling destroy on a VK_NULL_HANDLE is allowed.
+ // However this is causing a crash in certain drivers (e.g. NVidia).
+ if (shaderModules[i]) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), shaderModules[i],
+ nullptr));
+ }
+ }
+
+ if (!pipeline) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout,
+ nullptr));
+ return nullptr;
+ }
+
+ return new GrVkPipelineState(fGpu,
+ pipeline,
+ samplerDSHandle,
+ fUniformHandles,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fCurrentUBOOffset,
+ fUniformHandler.fSamplers,
+ std::move(fGeometryProcessor),
+ std::move(fXferProcessor),
+ std::move(fFragmentProcessors),
+ fFragmentProcessorCnt);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+bool GrVkPipelineStateBuilder::Desc::Build(Desc* desc,
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ const GrStencilSettings& stencil,
+ GrPrimitiveType primitiveType,
+ GrVkGpu* gpu) {
+ if (!GrProgramDesc::Build(desc, renderTarget, programInfo, primitiveType, gpu)) {
+ return false;
+ }
+
+ GrProcessorKeyBuilder b(&desc->key());
+
+ b.add32(GrVkGpu::kShader_PersistentCacheKeyType);
+ int keyLength = desc->key().count();
+ SkASSERT(0 == (keyLength % 4));
+ desc->fShaderKeyLength = SkToU32(keyLength);
+
+ GrVkRenderTarget* vkRT = (GrVkRenderTarget*)renderTarget;
+ vkRT->simpleRenderPass()->genKey(&b);
+
+ stencil.genKey(&b);
+
+ b.add32(programInfo.pipeline().getBlendInfoKey());
+
+ b.add32((uint32_t)primitiveType);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h
new file mode 100644
index 0000000000..0ba7fc5001
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h
@@ -0,0 +1,121 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipelineStateBuilder_DEFINED
+#define GrVkPipelineStateBuilder_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrPipeline.h"
+#include "src/gpu/GrProgramDesc.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkUniformHandler.h"
+#include "src/gpu/vk/GrVkVaryingHandler.h"
+#include "src/sksl/SkSLCompiler.h"
+
+class GrVkGpu;
+class GrVkRenderPass;
+class SkReader32;
+
+class GrVkPipelineStateBuilder : public GrGLSLProgramBuilder {
+public:
+ /**
+ * For Vulkan we want to cache the entire VkPipeline for reuse of draws. The Desc here holds all
+ * the information needed to differentiate one pipeline from another.
+ *
+ * The GrProgramDesc contains all the information need to create the actual shaders for the
+ * pipeline.
+ *
+ * For Vulkan we need to add to the GrProgramDesc to include the rest of the state on the
+ * pipline. This includes stencil settings, blending information, render pass format, draw face
+ * information, and primitive type. Note that some state is set dynamically on the pipeline for
+ * each draw and thus is not included in this descriptor. This includes the viewport, scissor,
+ * and blend constant.
+ */
+ class Desc : public GrProgramDesc {
+ public:
+ static bool Build(Desc*,
+ GrRenderTarget*,
+ const GrProgramInfo&,
+ const GrStencilSettings&,
+ GrPrimitiveType primitiveType,
+ GrVkGpu* gpu);
+
+ size_t shaderKeyLength() const { return fShaderKeyLength; }
+
+ private:
+ size_t fShaderKeyLength;
+
+ typedef GrProgramDesc INHERITED;
+ };
+
+ /** Generates a pipeline state.
+ *
+ * The GrVkPipelineState implements what is specified in the GrPipeline and GrPrimitiveProcessor
+ * as input. After successful generation, the builder result objects are available to be used.
+ * This function may modify the program key by setting the surface origin key to 0 (unspecified)
+ * if it turns out the program does not care about the surface origin.
+ * @return true if generation was successful.
+ */
+ static GrVkPipelineState* CreatePipelineState(GrVkGpu*,
+ GrRenderTarget*,
+ const GrProgramInfo&,
+ const GrStencilSettings&,
+ GrPrimitiveType,
+ Desc*,
+ VkRenderPass compatibleRenderPass);
+
+ const GrCaps* caps() const override;
+
+ GrVkGpu* gpu() const { return fGpu; }
+
+ void finalizeFragmentOutputColor(GrShaderVar& outputColor) override;
+ void finalizeFragmentSecondaryColor(GrShaderVar& outputColor) override;
+
+private:
+ GrVkPipelineStateBuilder(GrVkGpu*, GrRenderTarget*, const GrProgramInfo&, GrProgramDesc*);
+
+ GrVkPipelineState* finalize(const GrStencilSettings&,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass,
+ Desc*);
+
+ // returns number of shader stages
+ int loadShadersFromCache(SkReader32* cached, VkShaderModule outShaderModules[],
+ VkPipelineShaderStageCreateInfo* outStageInfo);
+
+ void storeShadersInCache(const SkSL::String shaders[], const SkSL::Program::Inputs inputs[],
+ bool isSkSL);
+
+ bool createVkShaderModule(VkShaderStageFlagBits stage,
+ const SkSL::String& sksl,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ const SkSL::Program::Settings& settings,
+ Desc* desc,
+ SkSL::String* outSPIRV,
+ SkSL::Program::Inputs* outInputs);
+
+ bool installVkShaderModule(VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ SkSL::String spirv,
+ SkSL::Program::Inputs inputs);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrVkGpu* fGpu;
+ GrVkVaryingHandler fVaryingHandler;
+ GrVkUniformHandler fUniformHandler;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp
new file mode 100644
index 0000000000..da6a69f3f3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/gpu/GrContextOptions.h"
+#include "src/core/SkOpts.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrStencilSettings.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipelineState.h"
+#include "src/gpu/vk/GrVkPipelineStateBuilder.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+// Display pipeline state cache usage
+static const bool c_DisplayVkPipelineCache{false};
+#endif
+
+struct GrVkResourceProvider::PipelineStateCache::Entry {
+ Entry(GrVkGpu* gpu, GrVkPipelineState* pipelineState)
+ : fGpu(gpu)
+ , fPipelineState(pipelineState) {}
+
+ ~Entry() {
+ if (fPipelineState) {
+ fPipelineState->freeGPUResources(fGpu);
+ }
+ }
+
+ GrVkGpu* fGpu;
+ std::unique_ptr<GrVkPipelineState> fPipelineState;
+};
+
+GrVkResourceProvider::PipelineStateCache::PipelineStateCache(GrVkGpu* gpu)
+ : fMap(gpu->getContext()->priv().options().fRuntimeProgramCacheSize)
+ , fGpu(gpu)
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ , fTotalRequests(0)
+ , fCacheMisses(0)
+#endif
+{}
+
+GrVkResourceProvider::PipelineStateCache::~PipelineStateCache() {
+ SkASSERT(0 == fMap.count());
+ // dump stats
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ if (c_DisplayVkPipelineCache) {
+ SkDebugf("--- Pipeline State Cache ---\n");
+ SkDebugf("Total requests: %d\n", fTotalRequests);
+ SkDebugf("Cache misses: %d\n", fCacheMisses);
+ SkDebugf("Cache miss %%: %f\n", (fTotalRequests > 0) ?
+ 100.f * fCacheMisses / fTotalRequests :
+ 0.f);
+ SkDebugf("---------------------\n");
+ }
+#endif
+}
+
+void GrVkResourceProvider::PipelineStateCache::abandon() {
+ fMap.foreach([](std::unique_ptr<Entry>* e) {
+ (*e)->fPipelineState->abandonGPUResources();
+ (*e)->fPipelineState = nullptr;
+ });
+ fMap.reset();
+}
+
+void GrVkResourceProvider::PipelineStateCache::release() {
+ fMap.reset();
+}
+
+GrVkPipelineState* GrVkResourceProvider::PipelineStateCache::refPipelineState(
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fTotalRequests;
+#endif
+ GrStencilSettings stencil;
+ if (programInfo.pipeline().isStencilEnabled()) {
+ // TODO: attach stencil and create settings during render target flush.
+ SkASSERT(renderTarget->renderTargetPriv().getStencilAttachment());
+ stencil.reset(*programInfo.pipeline().getUserStencil(),
+ programInfo.pipeline().hasStencilClip(),
+ renderTarget->renderTargetPriv().numStencilBits());
+ }
+
+ // TODO: can this be unified between GL, Vk and Mtl?
+ // Get GrVkProgramDesc
+ GrVkPipelineStateBuilder::Desc desc;
+ if (!GrVkPipelineStateBuilder::Desc::Build(&desc, renderTarget, programInfo, stencil,
+ primitiveType, fGpu)) {
+ GrCapsDebugf(fGpu->caps(), "Failed to build vk program descriptor!\n");
+ return nullptr;
+ }
+
+ std::unique_ptr<Entry>* entry = fMap.find(desc);
+ if (!entry) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fCacheMisses;
+#endif
+ GrVkPipelineState* pipelineState(GrVkPipelineStateBuilder::CreatePipelineState(
+ fGpu, renderTarget, programInfo,
+ stencil, primitiveType, &desc, compatibleRenderPass));
+ if (!pipelineState) {
+ return nullptr;
+ }
+ entry = fMap.insert(desc, std::unique_ptr<Entry>(new Entry(fGpu, pipelineState)));
+ return (*entry)->fPipelineState.get();
+ }
+ return (*entry)->fPipelineState.get();
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp
new file mode 100644
index 0000000000..cbef926177
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp
@@ -0,0 +1,346 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkPipelineStateDataManager.h"
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUniformBuffer.h"
+
+GrVkPipelineStateDataManager::GrVkPipelineStateDataManager(const UniformInfoArray& uniforms,
+ uint32_t uniformSize)
+ : fUniformSize(uniformSize)
+ , fUniformsDirty(false) {
+ fUniformData.reset(uniformSize);
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already
+ // owned by other objects will still match up here.
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const GrVkUniformHandler::UniformInfo uniformInfo = uniforms[i];
+ SkASSERT(GrShaderVar::kNonArray == uniformInfo.fVariable.getArrayCount() ||
+ uniformInfo.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();
+ uniform.fType = uniformInfo.fVariable.getType();
+ )
+
+ uniform.fOffset = uniformInfo.fUBOffset;
+ }
+}
+
+void* GrVkPipelineStateDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
+ fUniformsDirty = true;
+ return static_cast<char*>(fUniformData.get())+uni.fOffset;
+}
+
+void GrVkPipelineStateDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, &i, sizeof(int32_t));
+}
+
+void GrVkPipelineStateDataManager::set1iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrVkPipelineStateDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, &v0, sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set2i(UniformHandle u, int32_t i0, int32_t i1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[2] = { i0, i1 };
+ memcpy(buffer, v, 2 * sizeof(int32_t));
+}
+
+void GrVkPipelineStateDataManager::set2iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[2 * i];
+ memcpy(buffer, curVec, 2 * sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrVkPipelineStateDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[2] = { v0, v1 };
+ memcpy(buffer, v, 2 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[2 * i];
+ memcpy(buffer, curVec, 2 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set3i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[3] = { i0, i1, i2 };
+ memcpy(buffer, v, 3 * sizeof(int32_t));
+}
+
+void GrVkPipelineStateDataManager::set3iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrVkPipelineStateDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[3] = { v0, v1, v2 };
+ memcpy(buffer, v, 3 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set4i(UniformHandle u,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2,
+ int32_t i3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ int32_t v[4] = { i0, i1, i2, i3 };
+ memcpy(buffer, v, 4 * sizeof(int32_t));
+}
+
+void GrVkPipelineStateDataManager::set4iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[4 * i];
+ memcpy(buffer, curVec, 4 * sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrVkPipelineStateDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[4] = { v0, v1, v2, v3 };
+ memcpy(buffer, v, 4 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix2fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrVkPipelineStateDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix3fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrVkPipelineStateDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix4fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrVkPipelineStateDataManager::setMatrices(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat2x2_GrSLType + (N - 2) ||
+ uni.fType == kHalf2x2_GrSLType + (N - 2));
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = fUniformData.get();
+ fUniformsDirty = true;
+
+ set_uniform_matrix<N>::set(buffer, uni.fOffset, arrayCount, matrices);
+}
+
+template<int N> struct set_uniform_matrix {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ for (int i = 0; i < count; ++i) {
+ const float* matrix = &matrices[N * N * i];
+ for (int j = 0; j < N; ++j) {
+ memcpy(buffer, &matrix[j * N], N * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
+ }
+ }
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ memcpy(buffer, matrices, count * 16 * sizeof(float));
+ }
+};
+
+bool GrVkPipelineStateDataManager::uploadUniformBuffers(GrVkGpu* gpu,
+ GrVkUniformBuffer* buffer) const {
+ bool updatedBuffer = false;
+ if (buffer && fUniformsDirty) {
+ SkAssertResult(buffer->updateData(gpu, fUniformData.get(),
+ fUniformSize, &updatedBuffer));
+ fUniformsDirty = false;
+ }
+
+ return updatedBuffer;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h
new file mode 100644
index 0000000000..91d50924b6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h
@@ -0,0 +1,85 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipelineStateDataManager_DEFINED
+#define GrVkPipelineStateDataManager_DEFINED
+
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/gpu/vk/GrVkUniformHandler.h"
+
+class GrVkGpu;
+class GrVkUniformBuffer;
+
+class GrVkPipelineStateDataManager : public GrGLSLProgramDataManager {
+public:
+ typedef GrVkUniformHandler::UniformInfoArray UniformInfoArray;
+
+ GrVkPipelineStateDataManager(const UniformInfoArray&,
+ uint32_t uniformSize);
+
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2i(UniformHandle, int32_t, int32_t) const override;
+ void set2iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3i(UniformHandle, int32_t, int32_t, int32_t) const override;
+ void set3iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4i(UniformHandle, int32_t, int32_t, int32_t, int32_t) const override;
+ void set4iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first two upload a single matrix, the latter two upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override {
+ SK_ABORT("Only supported in NVPR, which is not in vulkan");
+ }
+
+ // Returns true if either the geometry or fragment buffers needed to generate a new underlying
+ // VkBuffer object in order upload data. If true is returned, this is a signal to the caller
+ // that they will need to update the descriptor set that is using these buffers.
+ bool uploadUniformBuffers(GrVkGpu* gpu,
+ GrVkUniformBuffer* buffer) const;
+private:
+ struct Uniform {
+ uint32_t fOffset;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ void* getBufferPtrAndMarkDirty(const Uniform& uni) const;
+
+ uint32_t fUniformSize;
+
+ SkTArray<Uniform, true> fUniforms;
+
+ mutable SkAutoMalloc fUniformData;
+ mutable bool fUniformsDirty;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp
new file mode 100644
index 0000000000..d73ae6149a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp
@@ -0,0 +1,266 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkRenderPass.h"
+
+#include "src/gpu/GrProcessor.h"
+#include "src/gpu/vk/GrVkFramebuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+typedef GrVkRenderPass::AttachmentsDescriptor::AttachmentDesc AttachmentDesc;
+
+void setup_vk_attachment_description(VkAttachmentDescription* attachment,
+ const AttachmentDesc& desc,
+ VkImageLayout layout) {
+ attachment->flags = 0;
+ attachment->format = desc.fFormat;
+ SkAssertResult(GrSampleCountToVkSampleCount(desc.fSamples, &attachment->samples));
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ attachment->loadOp = desc.fLoadStoreOps.fLoadOp;
+ attachment->storeOp = desc.fLoadStoreOps.fStoreOp;
+ attachment->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachment->stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ break;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ attachment->loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachment->stencilLoadOp = desc.fLoadStoreOps.fLoadOp;
+ attachment->stencilStoreOp = desc.fLoadStoreOps.fStoreOp;
+ break;
+ default:
+ SK_ABORT("Unexpected attachment layout");
+ }
+
+ attachment->initialLayout = layout;
+ attachment->finalLayout = layout;
+}
+
+void GrVkRenderPass::initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target) {
+ static const GrVkRenderPass::LoadStoreOps kBasicLoadStoreOps(VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_STORE_OP_STORE);
+
+ this->init(gpu, target, kBasicLoadStoreOps, kBasicLoadStoreOps);
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ uint32_t numAttachments = fAttachmentsDescriptor.fAttachmentCount;
+ // Attachment descriptions to be set on the render pass
+ SkTArray<VkAttachmentDescription> attachments(numAttachments);
+ attachments.reset(numAttachments);
+ memset(attachments.begin(), 0, numAttachments * sizeof(VkAttachmentDescription));
+
+ // Refs to attachments on the render pass (as described by teh VkAttachmentDescription above),
+ // that are used by the subpass.
+ VkAttachmentReference colorRef;
+ VkAttachmentReference stencilRef;
+ uint32_t currentAttachment = 0;
+
+ // Go through each of the attachment types (color, stencil) and set the necessary
+ // on the various Vk structs.
+ VkSubpassDescription subpassDesc;
+ memset(&subpassDesc, 0, sizeof(VkSubpassDescription));
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ subpassDesc.pResolveAttachments = nullptr;
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ // set up color attachment
+ fAttachmentsDescriptor.fColor.fLoadStoreOps = colorOp;
+ setup_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fColor,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ colorRef.attachment = currentAttachment++;
+ colorRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.colorAttachmentCount = 1;
+
+ if (VK_ATTACHMENT_LOAD_OP_CLEAR == colorOp.fLoadOp) {
+ fClearValueCount = colorRef.attachment + 1;
+ }
+ } else {
+ // I don't think there should ever be a time where we don't have a color attachment
+ SkASSERT(false);
+ colorRef.attachment = VK_ATTACHMENT_UNUSED;
+ colorRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ subpassDesc.colorAttachmentCount = 0;
+ }
+ subpassDesc.pColorAttachments = &colorRef;
+
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ // set up stencil attachment
+ fAttachmentsDescriptor.fStencil.fLoadStoreOps = stencilOp;
+ setup_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fStencil,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ stencilRef.attachment = currentAttachment++;
+ stencilRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ if (VK_ATTACHMENT_LOAD_OP_CLEAR == stencilOp.fLoadOp) {
+ fClearValueCount = SkTMax(fClearValueCount, stencilRef.attachment + 1);
+ }
+ } else {
+ stencilRef.attachment = VK_ATTACHMENT_UNUSED;
+ stencilRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ subpassDesc.pDepthStencilAttachment = &stencilRef;
+
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ SkASSERT(numAttachments == currentAttachment);
+
+ // Create the VkRenderPass compatible with the attachment descriptions above
+ VkRenderPassCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkRenderPassCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments.begin();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateRenderPass(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fRenderPass));
+
+ // Get granularity for this render pass
+ GR_VK_CALL(gpu->vkInterface(), GetRenderAreaGranularity(gpu->device(),
+ fRenderPass,
+ &fGranularity));
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const GrVkRenderPass& compatibleRenderPass,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ fAttachmentFlags = compatibleRenderPass.fAttachmentFlags;
+ fAttachmentsDescriptor = compatibleRenderPass.fAttachmentsDescriptor;
+ this->init(gpu, colorOp, stencilOp);
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const GrVkRenderTarget& target,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ // Get attachment information from render target. This includes which attachments the render
+ // target has (color, stencil) and the attachments format and sample count.
+ target.getAttachmentsDescriptor(&fAttachmentsDescriptor, &fAttachmentFlags);
+ this->init(gpu, colorOp, stencilOp);
+}
+
+void GrVkRenderPass::freeGPUData(GrVkGpu* gpu) const {
+ if (!(fAttachmentFlags & kExternal_AttachmentFlag)) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr));
+ }
+}
+
+bool GrVkRenderPass::colorAttachmentIndex(uint32_t* index) const {
+ *index = fColorAttachmentIndex;
+ if ((fAttachmentFlags & kColor_AttachmentFlag) ||
+ (fAttachmentFlags & kExternal_AttachmentFlag)) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that stencil attachment will always be after the color and resolve
+// attachment.
+bool GrVkRenderPass::stencilAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+bool GrVkRenderPass::isCompatible(const AttachmentsDescriptor& desc,
+ const AttachmentFlags& flags) const {
+ SkASSERT(!(fAttachmentFlags & kExternal_AttachmentFlag));
+ if (flags != fAttachmentFlags) {
+ return false;
+ }
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (!fAttachmentsDescriptor.fColor.isCompatible(desc.fColor)) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (!fAttachmentsDescriptor.fStencil.isCompatible(desc.fStencil)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderTarget& target) const {
+ SkASSERT(!(fAttachmentFlags & kExternal_AttachmentFlag));
+ AttachmentsDescriptor desc;
+ AttachmentFlags flags;
+ target.getAttachmentsDescriptor(&desc, &flags);
+
+ return this->isCompatible(desc, flags);
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderPass& renderPass) const {
+ SkASSERT(!(fAttachmentFlags & kExternal_AttachmentFlag));
+ return this->isCompatible(renderPass.fAttachmentsDescriptor, renderPass.fAttachmentFlags);
+}
+
+bool GrVkRenderPass::isCompatibleExternalRP(VkRenderPass renderPass) const {
+ SkASSERT(fAttachmentFlags & kExternal_AttachmentFlag);
+ return fRenderPass == renderPass;
+}
+
+bool GrVkRenderPass::equalLoadStoreOps(const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps) const {
+ SkASSERT(!(fAttachmentFlags & kExternal_AttachmentFlag));
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fColor.fLoadStoreOps != colorOps) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fStencil.fLoadStoreOps != stencilOps) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrVkRenderPass::genKey(GrProcessorKeyBuilder* b) const {
+ b->add32(fAttachmentFlags);
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ b->add32(fAttachmentsDescriptor.fColor.fFormat);
+ b->add32(fAttachmentsDescriptor.fColor.fSamples);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ b->add32(fAttachmentsDescriptor.fStencil.fFormat);
+ b->add32(fAttachmentsDescriptor.fStencil.fSamples);
+ }
+ if (fAttachmentFlags & kExternal_AttachmentFlag) {
+ SkASSERT(!(fAttachmentFlags & ~kExternal_AttachmentFlag));
+ uint64_t handle = (uint64_t)fRenderPass;
+ b->add32((uint32_t)(handle & 0xFFFFFFFF));
+ b->add32((uint32_t)(handle>>32));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h
new file mode 100644
index 0000000000..725b1962cd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h
@@ -0,0 +1,158 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkRenderPass_DEFINED
+#define GrVkRenderPass_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrProcessorKeyBuilder;
+class GrVkGpu;
+class GrVkRenderTarget;
+
+class GrVkRenderPass : public GrVkResource {
+public:
+ GrVkRenderPass() : INHERITED(), fRenderPass(VK_NULL_HANDLE), fClearValueCount(0) {}
+
+ // Used when importing an external render pass. In this case we have to explicitly be told the
+ // color attachment index
+ explicit GrVkRenderPass(VkRenderPass renderPass, uint32_t colorAttachmentIndex)
+ : INHERITED()
+ , fRenderPass(renderPass)
+ , fAttachmentFlags(kExternal_AttachmentFlag)
+ , fClearValueCount(0)
+ , fColorAttachmentIndex(colorAttachmentIndex) {}
+
+ struct LoadStoreOps {
+ VkAttachmentLoadOp fLoadOp;
+ VkAttachmentStoreOp fStoreOp;
+
+ LoadStoreOps(VkAttachmentLoadOp loadOp, VkAttachmentStoreOp storeOp)
+ : fLoadOp(loadOp)
+ , fStoreOp(storeOp) {}
+
+ bool operator==(const LoadStoreOps& right) const {
+ return fLoadOp == right.fLoadOp && fStoreOp == right.fStoreOp;
+ }
+
+ bool operator!=(const LoadStoreOps& right) const {
+ return !(*this == right);
+ }
+ };
+
+ void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+ void init(const GrVkGpu* gpu,
+ const GrVkRenderTarget& target,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp);
+
+ void init(const GrVkGpu* gpu,
+ const GrVkRenderPass& compatibleRenderPass,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp);
+
+ struct AttachmentsDescriptor {
+ struct AttachmentDesc {
+ VkFormat fFormat;
+ int fSamples;
+ LoadStoreOps fLoadStoreOps;
+
+ AttachmentDesc()
+ : fFormat(VK_FORMAT_UNDEFINED)
+ , fSamples(0)
+ , fLoadStoreOps(VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE) {}
+ bool operator==(const AttachmentDesc& right) const {
+ return (fFormat == right.fFormat &&
+ fSamples == right.fSamples &&
+ fLoadStoreOps == right.fLoadStoreOps);
+ }
+ bool operator!=(const AttachmentDesc& right) const {
+ return !(*this == right);
+ }
+ bool isCompatible(const AttachmentDesc& desc) const {
+ return (fFormat == desc.fFormat && fSamples == desc.fSamples);
+ }
+ };
+ AttachmentDesc fColor;
+ AttachmentDesc fStencil;
+ uint32_t fAttachmentCount;
+ };
+
+ enum AttachmentFlags {
+ kColor_AttachmentFlag = 0x1,
+ kStencil_AttachmentFlag = 0x2,
+ // The external attachment flag signals that this render pass is imported from an external
+ // client. Since we don't know every attachment on the render pass we don't set any of the
+ // specific attachment flags when using external. However, the external render pass must
+ // at least have a color attachment.
+ kExternal_AttachmentFlag = 0x4,
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(AttachmentFlags);
+
+ // The following return the index of the render pass attachment array for the given attachment.
+ // If the render pass does not have the given attachment it will return false and not set the
+ // index value.
+ bool colorAttachmentIndex(uint32_t* index) const;
+ bool stencilAttachmentIndex(uint32_t* index) const;
+
+ // Returns whether or not the structure of a RenderTarget matches that of the VkRenderPass in
+ // this object. Specifically this compares that the number of attachments, format of
+ // attachments, and sample counts are all the same. This function is used in the creation of
+ // basic RenderPasses that can be used when creating a VkFrameBuffer object.
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ bool isCompatible(const GrVkRenderPass& renderPass) const;
+
+ bool isCompatibleExternalRP(VkRenderPass) const;
+
+ bool equalLoadStoreOps(const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps) const;
+
+ VkRenderPass vkRenderPass() const { return fRenderPass; }
+
+ const VkExtent2D& granularity() const { return fGranularity; }
+
+ // Returns the number of clear colors needed to begin this render pass. Currently this will
+ // either only be 0 or 1 since we only ever clear the color attachment.
+ uint32_t clearValueCount() const { return fClearValueCount; }
+
+
+ void genKey(GrProcessorKeyBuilder* b) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkRenderPass: %d (%d refs)\n", fRenderPass, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkRenderPass(const GrVkRenderPass&);
+
+ void init(const GrVkGpu* gpu,
+ const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps);
+
+ bool isCompatible(const AttachmentsDescriptor&, const AttachmentFlags&) const;
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkRenderPass fRenderPass;
+ AttachmentFlags fAttachmentFlags;
+ AttachmentsDescriptor fAttachmentsDescriptor;
+ VkExtent2D fGranularity;
+ uint32_t fClearValueCount;
+ // For internally created render passes we assume the color attachment index is always 0.
+ uint32_t fColorAttachmentIndex = 0;
+
+ typedef GrVkResource INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrVkRenderPass::AttachmentFlags);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp
new file mode 100644
index 0000000000..d7d53fb3b0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkRenderTarget.h"
+
+#include "include/gpu/GrBackendSurface.h"
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkFramebuffer.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), GrBackendObjectOwnership::kBorrowed)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, info.fProtected)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(new GrVkImage(msaaInfo, std::move(msaaLayout),
+ GrBackendObjectOwnership::kOwned))
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(info.fProtected == msaaInfo.fProtected);
+ SkASSERT(sampleCnt > 1);
+ this->createFramebuffer(gpu);
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrBackendObjectOwnership ownership)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), ownership)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, sampleCnt, info.fProtected)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(
+ new GrVkImage(msaaInfo, std::move(msaaLayout), GrBackendObjectOwnership::kOwned))
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(info.fProtected == msaaInfo.fProtected);
+ SkASSERT(sampleCnt > 1);
+ this->createFramebuffer(gpu);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), GrBackendObjectOwnership::kBorrowed)
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, 1, info.fProtected)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ this->createFramebuffer(gpu);
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* colorAttachmentView,
+ GrBackendObjectOwnership ownership)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), ownership)
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, 1, info.fProtected)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ this->createFramebuffer(gpu);
+}
+
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkRenderPass* renderPass,
+ VkCommandBuffer secondaryCommandBuffer)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), GrBackendObjectOwnership::kBorrowed, true)
+ , GrRenderTarget(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, 1, info.fProtected)
+ , fColorAttachmentView(nullptr)
+ , fMSAAImage(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(renderPass)
+ , fSecondaryCommandBuffer(secondaryCommandBuffer) {
+ SkASSERT(fSecondaryCommandBuffer != VK_NULL_HANDLE);
+ this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+sk_sp<GrVkRenderTarget> GrVkRenderTarget::MakeWrappedRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout) {
+ SkASSERT(VK_NULL_HANDLE != info.fImage);
+
+ SkASSERT(1 == info.fLevelCount);
+ VkFormat pixelFormat = info.fFormat;
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ GrVkImageInfo msInfo;
+ sk_sp<GrVkImageLayout> msLayout;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (sampleCnt > 1) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = sampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ msImageDesc.fIsProtected = info.fProtected;
+
+ if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &msInfo)) {
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msInfo.fImage;
+
+ // Create Resolve attachment view
+ resolveAttachmentView = GrVkImageView::Create(gpu, info.fImage, pixelFormat,
+ GrVkImageView::kColor_Type, 1,
+ GrVkYcbcrConversionInfo());
+ if (!resolveAttachmentView) {
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ return nullptr;
+ }
+ msLayout.reset(new GrVkImageLayout(msInfo.fImageLayout));
+ } else {
+ // Set color attachment image
+ colorImage = info.fImage;
+ }
+
+ // Get color attachment view
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type, 1,
+ GrVkYcbcrConversionInfo());
+ if (!colorAttachmentView) {
+ if (sampleCnt > 1) {
+ resolveAttachmentView->unref(gpu);
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ }
+ return nullptr;
+ }
+
+ GrVkRenderTarget* vkRT;
+ if (sampleCnt > 1) {
+ vkRT = new GrVkRenderTarget(gpu, desc, sampleCnt, info, std::move(layout), msInfo,
+ std::move(msLayout), colorAttachmentView,
+ resolveAttachmentView);
+ } else {
+ vkRT = new GrVkRenderTarget(gpu, desc, info, std::move(layout), colorAttachmentView);
+ }
+
+ return sk_sp<GrVkRenderTarget>(vkRT);
+}
+
+sk_sp<GrVkRenderTarget> GrVkRenderTarget::MakeSecondaryCBRenderTarget(
+ GrVkGpu* gpu, const GrSurfaceDesc& desc, const GrVkDrawableInfo& vkInfo) {
+ // We only set the few properties of the GrVkImageInfo that we know like layout and format. The
+ // others we keep at the default "null" values.
+ GrVkImageInfo info;
+ info.fImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ info.fFormat = vkInfo.fFormat;
+
+ sk_sp<GrVkImageLayout> layout(new GrVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL));
+
+ const GrVkRenderPass* rp =
+ gpu->resourceProvider().findCompatibleExternalRenderPass(vkInfo.fCompatibleRenderPass,
+ vkInfo.fColorAttachmentIndex);
+ if (!rp) {
+ return nullptr;
+ }
+
+ if (vkInfo.fSecondaryCommandBuffer == VK_NULL_HANDLE) {
+ return nullptr;
+ }
+
+ GrVkRenderTarget* vkRT = new GrVkRenderTarget(gpu, desc, info, std::move(layout), rp,
+ vkInfo.fSecondaryCommandBuffer);
+
+ return sk_sp<GrVkRenderTarget>(vkRT);
+}
+
+bool GrVkRenderTarget::completeStencilAttachment() {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ this->createFramebuffer(this->getVkGpu());
+ return true;
+}
+
+void GrVkRenderTarget::createFramebuffer(GrVkGpu* gpu) {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ }
+
+ // Vulkan requires us to create a compatible renderpass before we can create our framebuffer,
+ // so we use this to get a (cached) basic renderpass, only for creation.
+ fCachedSimpleRenderPass =
+ gpu->resourceProvider().findCompatibleRenderPass(*this, &fCompatibleRPHandle);
+
+ // Stencil attachment view is stored in the base RT stencil attachment
+ const GrVkImageView* stencilView = this->stencilAttachmentView();
+ fFramebuffer = GrVkFramebuffer::Create(gpu, this->width(), this->height(),
+ fCachedSimpleRenderPass, fColorAttachmentView,
+ stencilView);
+ SkASSERT(fFramebuffer);
+}
+
+void GrVkRenderTarget::getAttachmentsDescriptor(
+ GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* attachmentFlags) const {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ desc->fColor.fFormat = this->imageFormat();
+ desc->fColor.fSamples = this->numSamples();
+ *attachmentFlags = GrVkRenderPass::kColor_AttachmentFlag;
+ uint32_t attachmentCount = 1;
+
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ desc->fStencil.fFormat = vkStencil->vkFormat();
+ desc->fStencil.fSamples = vkStencil->numSamples();
+ // Currently in vulkan stencil and color attachments must all have same number of samples
+ SkASSERT(desc->fColor.fSamples == desc->fStencil.fSamples);
+ *attachmentFlags |= GrVkRenderPass::kStencil_AttachmentFlag;
+ ++attachmentCount;
+ }
+ desc->fAttachmentCount = attachmentCount;
+}
+
+GrVkRenderTarget::~GrVkRenderTarget() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fMSAAImage);
+ SkASSERT(!fResolveAttachmentView);
+ SkASSERT(!fColorAttachmentView);
+ SkASSERT(!fFramebuffer);
+ SkASSERT(!fCachedSimpleRenderPass);
+}
+
+void GrVkRenderTarget::addResources(GrVkCommandBuffer& commandBuffer) const {
+ commandBuffer.addResource(this->framebuffer());
+ commandBuffer.addResource(this->colorAttachmentView());
+ commandBuffer.addResource(this->msaaImageResource() ? this->msaaImageResource()
+ : this->resource());
+ if (this->stencilImageResource()) {
+ commandBuffer.addResource(this->stencilImageResource());
+ commandBuffer.addResource(this->stencilAttachmentView());
+ }
+}
+
+void GrVkRenderTarget::releaseInternalObjects() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ if (fMSAAImage) {
+ fMSAAImage->releaseImage(gpu);
+ fMSAAImage.reset();
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unref(gpu);
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unref(gpu);
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::abandonInternalObjects() {
+ if (fMSAAImage) {
+ fMSAAImage->abandonImage();
+ fMSAAImage.reset();
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unrefAndAbandon();
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unrefAndAbandon();
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unrefAndAbandon();
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unrefAndAbandon();
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::onRelease() {
+ this->releaseInternalObjects();
+ this->releaseImage(this->getVkGpu());
+ GrRenderTarget::onRelease();
+}
+
+void GrVkRenderTarget::onAbandon() {
+ this->abandonInternalObjects();
+ this->abandonImage();
+ GrRenderTarget::onAbandon();
+}
+
+
+GrBackendRenderTarget GrVkRenderTarget::getBackendRenderTarget() const {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ return GrBackendRenderTarget(this->width(), this->height(), this->numSamples(), fInfo,
+ this->grVkImageLayout());
+}
+
+const GrVkResource* GrVkRenderTarget::stencilImageResource() const {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->imageResource();
+ }
+
+ return nullptr;
+}
+
+const GrVkImageView* GrVkRenderTarget::stencilAttachmentView() const {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->stencilView();
+ }
+
+ return nullptr;
+}
+
+GrVkGpu* GrVkRenderTarget::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h
new file mode 100644
index 0000000000..09bb3d6a35
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkRenderTarget_DEFINED
+#define GrVkRenderTarget_DEFINED
+
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/vk/GrVkImage.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkResourceProvider.h"
+
+class GrVkCommandBuffer;
+class GrVkFramebuffer;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkSecondaryCommandBuffer;
+class GrVkStencilAttachment;
+
+struct GrVkImageInfo;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkRenderTarget: public GrRenderTarget, public virtual GrVkImage {
+public:
+ static sk_sp<GrVkRenderTarget> MakeWrappedRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ int sampleCnt, const GrVkImageInfo&,
+ sk_sp<GrVkImageLayout>);
+
+ static sk_sp<GrVkRenderTarget> MakeSecondaryCBRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ const GrVkDrawableInfo& vkInfo);
+
+ ~GrVkRenderTarget() override;
+
+ GrBackendFormat backendFormat() const override { return this->getBackendFormat(); }
+
+ const GrVkFramebuffer* framebuffer() const { return fFramebuffer; }
+ const GrVkImageView* colorAttachmentView() const { return fColorAttachmentView; }
+ const GrVkResource* msaaImageResource() const {
+ if (fMSAAImage) {
+ return fMSAAImage->fResource;
+ }
+ return nullptr;
+ }
+ GrVkImage* msaaImage() { return fMSAAImage.get(); }
+ const GrVkImageView* resolveAttachmentView() const { return fResolveAttachmentView; }
+ const GrVkResource* stencilImageResource() const;
+ const GrVkImageView* stencilAttachmentView() const;
+
+ const GrVkRenderPass* simpleRenderPass() const { return fCachedSimpleRenderPass; }
+ GrVkResourceProvider::CompatibleRPHandle compatibleRenderPassHandle() const {
+ SkASSERT(!this->wrapsSecondaryCommandBuffer());
+ return fCompatibleRPHandle;
+ }
+ const GrVkRenderPass* externalRenderPass() const {
+ SkASSERT(this->wrapsSecondaryCommandBuffer());
+ // We use the cached simple render pass to hold the external render pass.
+ return fCachedSimpleRenderPass;
+ }
+
+ bool wrapsSecondaryCommandBuffer() const { return fSecondaryCommandBuffer != VK_NULL_HANDLE; }
+ VkCommandBuffer getExternalSecondaryCommandBuffer() const {
+ return fSecondaryCommandBuffer;
+ }
+
+ bool canAttemptStencilAttachment() const override {
+ // We don't know the status of the stencil attachment for wrapped external secondary command
+ // buffers so we just assume we don't have one.
+ return !this->wrapsSecondaryCommandBuffer();
+ }
+
+ GrBackendRenderTarget getBackendRenderTarget() const override;
+
+ void getAttachmentsDescriptor(GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* flags) const;
+
+ void addResources(GrVkCommandBuffer& commandBuffer) const;
+
+protected:
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrBackendObjectOwnership);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* colorAttachmentView,
+ GrBackendObjectOwnership);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ int numColorSamples = this->numSamples();
+ if (numColorSamples > 1) {
+ // Add one to account for the resolved VkImage.
+ numColorSamples += 1;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, GrMipMapped::kNo);
+ }
+
+ void createFramebuffer(GrVkGpu* gpu);
+
+ const GrVkImageView* fColorAttachmentView;
+ std::unique_ptr<GrVkImage> fMSAAImage;
+ const GrVkImageView* fResolveAttachmentView;
+
+private:
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* colorAttachmentView);
+
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkRenderPass* renderPass,
+ VkCommandBuffer secondaryCommandBuffer);
+
+ bool completeStencilAttachment() override;
+
+ // In Vulkan we call the release proc after we are finished with the underlying
+ // GrVkImage::Resource object (which occurs after the GPU has finished all work on it).
+ void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {
+ // Forward the release proc on to GrVkImage
+ this->setResourceRelease(std::move(releaseHelper));
+ }
+
+ void releaseInternalObjects();
+ void abandonInternalObjects();
+
+ const GrVkFramebuffer* fFramebuffer;
+
+ // This is a cached pointer to a simple render pass. The render target should unref it
+ // once it is done with it.
+ const GrVkRenderPass* fCachedSimpleRenderPass;
+ // This is a handle to be used to quickly get compatible GrVkRenderPasses for this render target
+ GrVkResourceProvider::CompatibleRPHandle fCompatibleRPHandle;
+
+ // If this render target wraps an external VkCommandBuffer, then this handle will be that
+ // VkCommandBuffer and not VK_NULL_HANDLE. In this case the render target will not be backed by
+ // an actual VkImage and will thus be limited in terms of what it can be used for.
+ VkCommandBuffer fSecondaryCommandBuffer = VK_NULL_HANDLE;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResource.h b/gfx/skia/skia/src/gpu/vk/GrVkResource.h
new file mode 100644
index 0000000000..7b9949ba1b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResource.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkResource_DEFINED
+#define GrVkResource_DEFINED
+
+
+#include "include/private/SkTHash.h"
+#include "include/utils/SkRandom.h"
+#include <atomic>
+
+class GrVkGpu;
+
+// uncomment to enable tracing of resource refs
+#ifdef SK_DEBUG
+#define SK_TRACE_VK_RESOURCES
+#endif
+
+/** \class GrVkResource
+
+ GrVkResource is the base class for Vulkan resources that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+
+ This is nearly identical to SkRefCntBase. The exceptions are that unref()
+ takes a GrVkGpu, and any derived classes must implement freeGPUData() and
+ possibly abandonGPUData().
+*/
+
+class GrVkResource : SkNoncopyable {
+public:
+ // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
+#ifdef SK_TRACE_VK_RESOURCES
+ struct Hash {
+ uint32_t operator()(const GrVkResource* const& r) const {
+ SkASSERT(r);
+ return r->fKey;
+ }
+ };
+
+ class Trace {
+ public:
+ ~Trace() {
+ fHashSet.foreach([](const GrVkResource* r) {
+ r->dumpInfo();
+ });
+ SkASSERT(0 == fHashSet.count());
+ }
+
+ void add(const GrVkResource* r) {
+ fHashSet.add(r);
+ }
+
+ void remove(const GrVkResource* r) {
+ fHashSet.remove(r);
+ }
+
+ private:
+ SkTHashSet<const GrVkResource*, GrVkResource::Hash> fHashSet;
+ };
+
+ static std::atomic<uint32_t> fKeyCounter;
+#endif
+
+ /** Default construct, initializing the reference count to 1.
+ */
+ GrVkResource() : fRefCnt(1) {
+#ifdef SK_TRACE_VK_RESOURCES
+ fKey = fKeyCounter.fetch_add(+1, std::memory_order_relaxed);
+ GetTrace()->add(this);
+#endif
+ }
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~GrVkResource() {
+#ifdef SK_DEBUG
+ auto count = this->getRefCnt();
+ SkASSERTF(count == 1, "fRefCnt was %d", count);
+ fRefCnt.store(0); // illegal value, to catch us if we reuse after delete
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const { return fRefCnt.load(); }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return 1 == fRefCnt.load(std::memory_order_acquire);
+ }
+
+ /** Increment the reference count.
+ Must be balanced by a call to unref() or unrefAndFreeResources().
+ */
+ void ref() const {
+ // No barrier required.
+ SkDEBUGCODE(int newRefCount = )fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ SkASSERT(newRefCount >= 1);
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ Any GPU data associated with this resource will be freed before it's deleted.
+ */
+ void unref(GrVkGpu* gpu) const {
+ SkASSERT(gpu);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ int newRefCount = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(newRefCount >= 0);
+ if (newRefCount == 1) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose(gpu);
+ }
+ }
+
+ /** Unref without freeing GPU data. Used only when we're abandoning the resource */
+ void unrefAndAbandon() const {
+ SkASSERT(this->getRefCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ int newRefCount = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(newRefCount >= 0);
+ if (newRefCount == 1) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+ // Called every time this resource is added to a command buffer.
+ virtual void notifyAddedToCommandBuffer() const {}
+ // Called every time this resource is removed from a command buffer (typically because
+ // the command buffer finished execution on the GPU but also when the command buffer
+ // is abandoned.)
+ virtual void notifyRemovedFromCommandBuffer() const {}
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(this->getRefCnt() > 0);
+ }
+#endif
+
+#ifdef SK_TRACE_VK_RESOURCES
+ /** Output a human-readable dump of this resource's information
+ */
+ virtual void dumpInfo() const = 0;
+#endif
+
+private:
+#ifdef SK_TRACE_VK_RESOURCES
+ static Trace* GetTrace() {
+ static Trace kTrace;
+ return &kTrace;
+ }
+#endif
+
+ /** Must be implemented by any subclasses.
+ * Deletes any Vk data associated with this resource
+ */
+ virtual void freeGPUData(GrVkGpu* gpu) const = 0;
+
+ /**
+ * Called from unrefAndAbandon. Resources should do any necessary cleanup without freeing
+ * underlying Vk objects. This must be overridden by subclasses that themselves store
+ * GrVkResources since those resource will need to be unrefed.
+ */
+ virtual void abandonGPUData() const {}
+
+ /**
+ * Called when the ref count goes to 0. Will free Vk resources.
+ */
+ void internal_dispose(GrVkGpu* gpu) const {
+ this->freeGPUData(gpu);
+#ifdef SK_TRACE_VK_RESOURCES
+ GetTrace()->remove(this);
+#endif
+
+#ifdef SK_DEBUG
+ SkASSERT(0 == this->getRefCnt());
+ fRefCnt.store(1);
+#endif
+ delete this;
+ }
+
+ /**
+ * Internal_dispose without freeing Vk resources. Used when we've lost context.
+ */
+ void internal_dispose() const {
+ this->abandonGPUData();
+#ifdef SK_TRACE_VK_RESOURCES
+ GetTrace()->remove(this);
+#endif
+
+#ifdef SK_DEBUG
+ SkASSERT(0 == this->getRefCnt());
+ fRefCnt.store(1);
+#endif
+ delete this;
+ }
+
+ mutable std::atomic<int32_t> fRefCnt;
+#ifdef SK_TRACE_VK_RESOURCES
+ uint32_t fKey;
+#endif
+
+ typedef SkNoncopyable INHERITED;
+};
+
+// This subclass allows for recycling
+class GrVkRecycledResource : public GrVkResource {
+public:
+ // When recycle is called and there is only one ref left on the resource, we will signal that
+ // the resource can be recycled for reuse. If the sublass (or whoever is managing this resource)
+ // decides not to recycle the objects, it is their responsibility to call unref on the object.
+ void recycle(GrVkGpu* gpu) const {
+ if (this->unique()) {
+ this->onRecycle(gpu);
+ } else {
+ this->unref(gpu);
+ }
+ }
+
+private:
+ virtual void onRecycle(GrVkGpu* gpu) const = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp
new file mode 100644
index 0000000000..5f21dbd3fd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp
@@ -0,0 +1,585 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkResourceProvider.h"
+
+#include "src/core/SkTaskGroup.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/vk/GrVkCommandBuffer.h"
+#include "src/gpu/vk/GrVkCommandPool.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipeline.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkUniformBuffer.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#ifdef SK_TRACE_VK_RESOURCES
+std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
+#endif
+
+GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
+ : fGpu(gpu)
+ , fPipelineCache(VK_NULL_HANDLE) {
+ fPipelineStateCache = new PipelineStateCache(gpu);
+}
+
+GrVkResourceProvider::~GrVkResourceProvider() {
+ SkASSERT(0 == fRenderPassArray.count());
+ SkASSERT(0 == fExternalRenderPasses.count());
+ SkASSERT(VK_NULL_HANDLE == fPipelineCache);
+ delete fPipelineStateCache;
+}
+
+VkPipelineCache GrVkResourceProvider::pipelineCache() {
+ if (fPipelineCache == VK_NULL_HANDLE) {
+ VkPipelineCacheCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+
+ auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
+ sk_sp<SkData> cached;
+ if (persistentCache) {
+ uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
+ sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
+ cached = persistentCache->load(*keyData);
+ }
+ bool usedCached = false;
+ if (cached) {
+ uint32_t* cacheHeader = (uint32_t*)cached->data();
+ if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
+ // For version one of the header, the total header size is 16 bytes plus
+ // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
+ // the breakdown of these bytes.
+ SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
+ const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
+ const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
+ if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
+ !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
+ createInfo.initialDataSize = cached->size();
+ createInfo.pInitialData = cached->data();
+ usedCached = true;
+ }
+ }
+ }
+ if (!usedCached) {
+ createInfo.initialDataSize = 0;
+ createInfo.pInitialData = nullptr;
+ }
+ VkResult result = GR_VK_CALL(fGpu->vkInterface(),
+ CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
+ &fPipelineCache));
+ SkASSERT(VK_SUCCESS == result);
+ if (VK_SUCCESS != result) {
+ fPipelineCache = VK_NULL_HANDLE;
+ }
+ }
+ return fPipelineCache;
+}
+
+void GrVkResourceProvider::init() {
+ // Init uniform descriptor objects
+ GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
+ fDescriptorSetManagers.emplace_back(dsm);
+ SkASSERT(1 == fDescriptorSetManagers.count());
+ fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
+}
+
+GrVkPipeline* GrVkResourceProvider::createPipeline(const GrProgramInfo& programInfo,
+ const GrStencilSettings& stencil,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass,
+ VkPipelineLayout layout) {
+ return GrVkPipeline::Create(fGpu, programInfo, stencil, shaderStageInfo,
+ shaderStageCount, primitiveType, compatibleRenderPass,
+ layout, this->pipelineCache());
+}
+
+// To create framebuffers, we first need to create a simple RenderPass that is
+// only used for framebuffer creation. When we actually render we will create
+// RenderPasses as needed that are compatible with the framebuffer.
+const GrVkRenderPass*
+GrVkResourceProvider::findCompatibleRenderPass(const GrVkRenderTarget& target,
+ CompatibleRPHandle* compatibleHandle) {
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ if (fRenderPassArray[i].isCompatible(target)) {
+ const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
+ renderPass->ref();
+ if (compatibleHandle) {
+ *compatibleHandle = CompatibleRPHandle(i);
+ }
+ return renderPass;
+ }
+ }
+
+ const GrVkRenderPass* renderPass =
+ fRenderPassArray.emplace_back(fGpu, target).getCompatibleRenderPass();
+ renderPass->ref();
+
+ if (compatibleHandle) {
+ *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
+ }
+ return renderPass;
+}
+
+const GrVkRenderPass*
+GrVkResourceProvider::findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle) {
+ SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
+ int index = compatibleHandle.toIndex();
+ const GrVkRenderPass* renderPass = fRenderPassArray[index].getCompatibleRenderPass();
+ renderPass->ref();
+ return renderPass;
+}
+
+const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
+ VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
+ for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
+ if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
+ fExternalRenderPasses[i]->ref();
+#ifdef SK_DEBUG
+ uint32_t cachedColorIndex;
+ SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
+ SkASSERT(cachedColorIndex == colorAttachmentIndex);
+#endif
+ return fExternalRenderPasses[i];
+ }
+ }
+
+ const GrVkRenderPass* newRenderPass = new GrVkRenderPass(renderPass, colorAttachmentIndex);
+ fExternalRenderPasses.push_back(newRenderPass);
+ newRenderPass->ref();
+ return newRenderPass;
+}
+
+const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
+ const GrVkRenderTarget& target,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps,
+ CompatibleRPHandle* compatibleHandle) {
+ GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
+ GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
+ : &tempRPHandle;
+ *pRPHandle = target.compatibleRenderPassHandle();
+
+ // This will get us the handle to (and possible create) the compatible set for the specific
+ // GrVkRenderPass we are looking for.
+ this->findCompatibleRenderPass(target, compatibleHandle);
+ return this->findRenderPass(*pRPHandle, colorOps, stencilOps);
+}
+
+const GrVkRenderPass*
+GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps) {
+ SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
+ CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
+ const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
+ colorOps,
+ stencilOps);
+ renderPass->ref();
+ return renderPass;
+}
+
+GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
+ VkDescriptorType type, uint32_t count) {
+ return new GrVkDescriptorPool(fGpu, type, count);
+}
+
+GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
+ const GrSamplerState& params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
+ if (!sampler) {
+ sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
+ if (!sampler) {
+ return nullptr;
+ }
+ fSamplers.add(sampler);
+ }
+ SkASSERT(sampler);
+ sampler->ref();
+ return sampler;
+}
+
+GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
+ const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ GrVkSamplerYcbcrConversion* ycbcrConversion =
+ fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
+ if (!ycbcrConversion) {
+ ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
+ if (!ycbcrConversion) {
+ return nullptr;
+ }
+ fYcbcrConversions.add(ycbcrConversion);
+ }
+ SkASSERT(ycbcrConversion);
+ ycbcrConversion->ref();
+ return ycbcrConversion;
+}
+
+GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
+ GrRenderTarget* renderTarget,
+ const GrProgramInfo& programInfo,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass) {
+ return fPipelineStateCache->refPipelineState(renderTarget, programInfo,
+ primitiveType, compatibleRenderPass);
+}
+
+void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
+ const GrVkUniformHandler& uniformHandler,
+ GrVkDescriptorSetManager::Handle* handle) {
+ SkASSERT(handle);
+ SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
+ *handle = GrVkDescriptorSetManager::Handle(i);
+ return;
+ }
+ }
+
+ GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
+ uniformHandler);
+ fDescriptorSetManagers.emplace_back(dsm);
+ *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
+}
+
+void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities,
+ GrVkDescriptorSetManager::Handle* handle) {
+ SkASSERT(handle);
+ SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ if (fDescriptorSetManagers[i]->isCompatible(type, visibilities)) {
+ *handle = GrVkDescriptorSetManager::Handle(i);
+ return;
+ }
+ }
+
+ GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
+ visibilities);
+ fDescriptorSetManagers.emplace_back(dsm);
+ *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
+}
+
+VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
+ SkASSERT(fUniformDSHandle.isValid());
+ return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
+}
+
+VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
+ const GrVkDescriptorSetManager::Handle& handle) const {
+ SkASSERT(handle.isValid());
+ return fDescriptorSetManagers[handle.toIndex()]->layout();
+}
+
+const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
+ SkASSERT(fUniformDSHandle.isValid());
+ return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
+ fUniformDSHandle);
+}
+
+const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
+ const GrVkDescriptorSetManager::Handle& handle) {
+ SkASSERT(handle.isValid());
+ return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
+}
+
+void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
+ const GrVkDescriptorSetManager::Handle& handle) {
+ SkASSERT(descSet);
+ SkASSERT(handle.isValid());
+ int managerIdx = handle.toIndex();
+ SkASSERT(managerIdx < fDescriptorSetManagers.count());
+ fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
+}
+
+GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
+ std::unique_lock<std::recursive_mutex> lock(fBackgroundMutex);
+ GrVkCommandPool* result;
+ if (fAvailableCommandPools.count()) {
+ result = fAvailableCommandPools.back();
+ fAvailableCommandPools.pop_back();
+ } else {
+ result = GrVkCommandPool::Create(fGpu);
+ }
+ SkASSERT(result->unique());
+ SkDEBUGCODE(
+ for (const GrVkCommandPool* pool : fActiveCommandPools) {
+ SkASSERT(pool != result);
+ }
+ for (const GrVkCommandPool* pool : fAvailableCommandPools) {
+ SkASSERT(pool != result);
+ }
+ )
+ fActiveCommandPools.push_back(result);
+ result->ref();
+ return result;
+}
+
+void GrVkResourceProvider::checkCommandBuffers() {
+ for (int i = fActiveCommandPools.count() - 1; i >= 0; --i) {
+ GrVkCommandPool* pool = fActiveCommandPools[i];
+ if (!pool->isOpen()) {
+ GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
+ if (buffer->finished(fGpu)) {
+ fActiveCommandPools.removeShuffle(i);
+ this->backgroundReset(pool);
+ }
+ }
+ }
+}
+
+void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
+ GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) {
+ sk_sp<GrRefCntedCallback> procRef(new GrRefCntedCallback(finishedProc, finishedContext));
+ for (int i = 0; i < fActiveCommandPools.count(); ++i) {
+ GrVkCommandPool* pool = fActiveCommandPools[i];
+ if (!pool->isOpen()) {
+ GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
+ buffer->addFinishedProc(procRef);
+ }
+ }
+}
+
+const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
+ const GrVkResource* resource = nullptr;
+ int count = fAvailableUniformBufferResources.count();
+ if (count > 0) {
+ resource = fAvailableUniformBufferResources[count - 1];
+ fAvailableUniformBufferResources.removeShuffle(count - 1);
+ } else {
+ resource = GrVkUniformBuffer::CreateResource(fGpu, GrVkUniformBuffer::kStandardSize);
+ }
+ return resource;
+}
+
+void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
+ fAvailableUniformBufferResources.push_back(resource);
+}
+
+void GrVkResourceProvider::destroyResources(bool deviceLost) {
+ SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
+ if (taskGroup) {
+ taskGroup->wait();
+ }
+
+ // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ fRenderPassArray[i].releaseResources(fGpu);
+ }
+ fRenderPassArray.reset();
+
+ for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
+ fExternalRenderPasses[i]->unref(fGpu);
+ }
+ fExternalRenderPasses.reset();
+
+ // Iterate through all store GrVkSamplers and unref them before resetting the hash.
+ for (decltype(fSamplers)::Iter iter(&fSamplers); !iter.done(); ++iter) {
+ (*iter).unref(fGpu);
+ }
+ fSamplers.reset();
+
+ for (decltype(fYcbcrConversions)::Iter iter(&fYcbcrConversions); !iter.done(); ++iter) {
+ (*iter).unref(fGpu);
+ }
+ fYcbcrConversions.reset();
+
+ fPipelineStateCache->release();
+
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
+ fPipelineCache = VK_NULL_HANDLE;
+
+ for (GrVkCommandPool* pool : fActiveCommandPools) {
+ SkASSERT(pool->unique());
+ pool->unref(fGpu);
+ }
+ fActiveCommandPools.reset();
+
+ for (GrVkCommandPool* pool : fAvailableCommandPools) {
+ SkASSERT(pool->unique());
+ pool->unref(fGpu);
+ }
+ fAvailableCommandPools.reset();
+
+ // We must release/destroy all command buffers and pipeline states before releasing the
+ // GrVkDescriptorSetManagers
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ fDescriptorSetManagers[i]->release(fGpu);
+ }
+ fDescriptorSetManagers.reset();
+
+ // release our uniform buffers
+ for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
+ SkASSERT(fAvailableUniformBufferResources[i]->unique());
+ fAvailableUniformBufferResources[i]->unref(fGpu);
+ }
+ fAvailableUniformBufferResources.reset();
+}
+
+void GrVkResourceProvider::abandonResources() {
+ SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
+ if (taskGroup) {
+ taskGroup->wait();
+ }
+
+ // Abandon all command pools
+ for (int i = 0; i < fActiveCommandPools.count(); ++i) {
+ SkASSERT(fActiveCommandPools[i]->unique());
+ fActiveCommandPools[i]->unrefAndAbandon();
+ }
+ fActiveCommandPools.reset();
+ for (int i = 0; i < fAvailableCommandPools.count(); ++i) {
+ SkASSERT(fAvailableCommandPools[i]->unique());
+ fAvailableCommandPools[i]->unrefAndAbandon();
+ }
+ fAvailableCommandPools.reset();
+
+ // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ fRenderPassArray[i].abandonResources();
+ }
+ fRenderPassArray.reset();
+
+ for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
+ fExternalRenderPasses[i]->unrefAndAbandon();
+ }
+ fExternalRenderPasses.reset();
+
+ // Iterate through all store GrVkSamplers and unrefAndAbandon them before resetting the hash.
+ SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
+ for (; !iter.done(); ++iter) {
+ (*iter).unrefAndAbandon();
+ }
+ fSamplers.reset();
+
+ for (decltype(fYcbcrConversions)::Iter iter(&fYcbcrConversions); !iter.done(); ++iter) {
+ (*iter).unrefAndAbandon();
+ }
+ fYcbcrConversions.reset();
+
+ fPipelineStateCache->abandon();
+
+ fPipelineCache = VK_NULL_HANDLE;
+
+ // We must abandon all command buffers and pipeline states before abandoning the
+ // GrVkDescriptorSetManagers
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ fDescriptorSetManagers[i]->abandon();
+ }
+ fDescriptorSetManagers.reset();
+
+ // release our uniform buffers
+ for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
+ SkASSERT(fAvailableUniformBufferResources[i]->unique());
+ fAvailableUniformBufferResources[i]->unrefAndAbandon();
+ }
+ fAvailableUniformBufferResources.reset();
+}
+
+void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(pool->unique());
+ pool->releaseResources(fGpu);
+ SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
+ if (taskGroup) {
+ taskGroup->add([this, pool]() {
+ this->reset(pool);
+ });
+ } else {
+ this->reset(pool);
+ }
+}
+
+void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
+ TRACE_EVENT0("skia.gpu", TRACE_FUNC);
+ SkASSERT(pool->unique());
+ pool->reset(fGpu);
+ std::unique_lock<std::recursive_mutex> providerLock(fBackgroundMutex);
+ fAvailableCommandPools.push_back(pool);
+}
+
+void GrVkResourceProvider::storePipelineCacheData() {
+ size_t dataSize = 0;
+ VkResult result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
+ this->pipelineCache(),
+ &dataSize, nullptr));
+ SkASSERT(result == VK_SUCCESS);
+
+ std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
+
+ result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
+ this->pipelineCache(),
+ &dataSize,
+ (void*)data.get()));
+ SkASSERT(result == VK_SUCCESS);
+
+ uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
+ sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
+
+ fGpu->getContext()->priv().getPersistentCache()->store(
+ *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
+ const GrVkGpu* gpu,
+ const GrVkRenderTarget& target)
+ : fLastReturnedIndex(0) {
+ fRenderPasses.emplace_back(new GrVkRenderPass());
+ fRenderPasses[0]->initSimple(gpu, target);
+}
+
+bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
+ const GrVkRenderTarget& target) const {
+ // The first GrVkRenderpass should always exists since we create the basic load store
+ // render pass on create
+ SkASSERT(fRenderPasses[0]);
+ return fRenderPasses[0]->isCompatible(target);
+}
+
+GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
+ const GrVkGpu* gpu,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps) {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
+ if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, stencilOps)) {
+ fLastReturnedIndex = idx;
+ return fRenderPasses[idx];
+ }
+ }
+ GrVkRenderPass* renderPass = fRenderPasses.emplace_back(new GrVkRenderPass());
+ renderPass->init(gpu, *this->getCompatibleRenderPass(), colorOps, stencilOps);
+ fLastReturnedIndex = fRenderPasses.count() - 1;
+ return renderPass;
+}
+
+void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources(GrVkGpu* gpu) {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ if (fRenderPasses[i]) {
+ fRenderPasses[i]->unref(gpu);
+ fRenderPasses[i] = nullptr;
+ }
+ }
+}
+
+void GrVkResourceProvider::CompatibleRenderPassSet::abandonResources() {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ if (fRenderPasses[i]) {
+ fRenderPasses[i]->unrefAndAbandon();
+ fRenderPasses[i] = nullptr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h
new file mode 100644
index 0000000000..44755d7a01
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h
@@ -0,0 +1,284 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkResourceProvider_DEFINED
+#define GrVkResourceProvider_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkLRUCache.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/gpu/GrResourceHandle.h"
+#include "src/gpu/vk/GrVkDescriptorPool.h"
+#include "src/gpu/vk/GrVkDescriptorSetManager.h"
+#include "src/gpu/vk/GrVkPipelineStateBuilder.h"
+#include "src/gpu/vk/GrVkRenderPass.h"
+#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/vk/GrVkSampler.h"
+#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#include <mutex>
+#include <thread>
+
+class GrVkCommandPool;
+class GrVkGpu;
+class GrVkPipeline;
+class GrVkPipelineState;
+class GrVkPrimaryCommandBuffer;
+class GrVkRenderTarget;
+class GrVkSecondaryCommandBuffer;
+class GrVkUniformHandler;
+
+class GrVkResourceProvider {
+public:
+ GrVkResourceProvider(GrVkGpu* gpu);
+ ~GrVkResourceProvider();
+
+ // Set up any initial vk objects
+ void init();
+
+ GrVkPipeline* createPipeline(const GrProgramInfo&,
+ const GrStencilSettings& stencil,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ VkRenderPass compatibleRenderPass,
+ VkPipelineLayout layout);
+
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(CompatibleRPHandle);
+
+ // Finds or creates a simple render pass that matches the target, increments the refcount,
+ // and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. If this is
+ // non null it will be set to a handle that can be used in the furutre to quickly return a
+ // compatible GrVkRenderPasses without the need inspecting a GrVkRenderTarget.
+ const GrVkRenderPass* findCompatibleRenderPass(const GrVkRenderTarget& target,
+ CompatibleRPHandle* compatibleHandle = nullptr);
+ // The CompatibleRPHandle must be a valid handle previously set by a call to
+ // findCompatibleRenderPass(GrVkRenderTarget&, CompatibleRPHandle*).
+ const GrVkRenderPass* findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle);
+
+ const GrVkRenderPass* findCompatibleExternalRenderPass(VkRenderPass,
+ uint32_t colorAttachmentIndex);
+
+ // Finds or creates a render pass that matches the target and LoadStoreOps, increments the
+ // refcount, and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle.
+ // If this is non null it will be set to a handle that can be used in the furutre to quickly
+ // return a GrVkRenderPasses without the need inspecting a GrVkRenderTarget.
+ const GrVkRenderPass* findRenderPass(const GrVkRenderTarget& target,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps,
+ CompatibleRPHandle* compatibleHandle = nullptr);
+
+ // The CompatibleRPHandle must be a valid handle previously set by a call to findRenderPass or
+ // findCompatibleRenderPass.
+ const GrVkRenderPass* findRenderPass(const CompatibleRPHandle& compatibleHandle,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps);
+
+ GrVkCommandPool* findOrCreateCommandPool();
+
+ void checkCommandBuffers();
+
+ // We must add the finishedProc to all active command buffers since we may have flushed work
+ // that the client cares about before they explicitly called flush and the GPU may reorder
+ // command execution. So we make sure all previously submitted work finishes before we call the
+ // finishedProc.
+ void addFinishedProcToActiveCommandBuffers(GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ // Finds or creates a compatible GrVkDescriptorPool for the requested type and count.
+ // The refcount is incremented and a pointer returned.
+ // TODO: Currently this will just create a descriptor pool without holding onto a ref itself
+ // so we currently do not reuse them. Rquires knowing if another draw is currently using
+ // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out
+ // of our cache of GrVkDescriptorPools.
+ GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count);
+
+ // Finds or creates a compatible GrVkSampler based on the GrSamplerState and
+ // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned.
+ GrVkSampler* findOrCreateCompatibleSampler(const GrSamplerState&,
+ const GrVkYcbcrConversionInfo& ycbcrInfo);
+
+ // Finds or creates a compatible GrVkSamplerYcbcrConversion based on the GrSamplerState and
+ // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned.
+ GrVkSamplerYcbcrConversion* findOrCreateCompatibleSamplerYcbcrConversion(
+ const GrVkYcbcrConversionInfo& ycbcrInfo);
+
+ GrVkPipelineState* findOrCreateCompatiblePipelineState(
+ GrRenderTarget*,
+ const GrProgramInfo&,
+ GrPrimitiveType,
+ VkRenderPass compatibleRenderPass);
+
+ void getSamplerDescriptorSetHandle(VkDescriptorType type,
+ const GrVkUniformHandler&,
+ GrVkDescriptorSetManager::Handle* handle);
+ void getSamplerDescriptorSetHandle(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities,
+ GrVkDescriptorSetManager::Handle* handle);
+
+ // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not
+ // own the VkDescriptorSetLayout and thus should not delete it. This function should be used
+ // when the caller needs the layout to create a VkPipelineLayout.
+ VkDescriptorSetLayout getUniformDSLayout() const;
+
+ // Returns the compatible VkDescriptorSetLayout to use for a specific sampler handle. The caller
+ // does not own the VkDescriptorSetLayout and thus should not delete it. This function should be
+ // used when the caller needs the layout to create a VkPipelineLayout.
+ VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle&) const;
+
+ // Returns a GrVkDescriptorSet that can be used for uniform buffers. The GrVkDescriptorSet
+ // is already reffed for the caller.
+ const GrVkDescriptorSet* getUniformDescriptorSet();
+
+ // Returns a GrVkDescriptorSet that can be used for sampler descriptors that are compatible with
+ // the GrVkDescriptorSetManager::Handle passed in. The GrVkDescriptorSet is already reffed for
+ // the caller.
+ const GrVkDescriptorSet* getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle&);
+
+
+ // Signals that the descriptor set passed it, which is compatible with the passed in handle,
+ // can be reused by the next allocation request.
+ void recycleDescriptorSet(const GrVkDescriptorSet* descSet,
+ const GrVkDescriptorSetManager::Handle&);
+
+ // Creates or finds free uniform buffer resources of size GrVkUniformBuffer::kStandardSize.
+ // Anything larger will need to be created and released by the client.
+ const GrVkResource* findOrCreateStandardUniformBufferResource();
+
+ // Signals that the resource passed to it (which should be a uniform buffer resource)
+ // can be reused by the next uniform buffer resource request.
+ void recycleStandardUniformBufferResource(const GrVkResource*);
+
+ void storePipelineCacheData();
+
+ // Destroy any cached resources. To be called before destroying the VkDevice.
+ // The assumption is that all queues are idle and all command buffers are finished.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ // If deviceLost is true, then resources will not be checked to see if they've finished
+ // before deleting (see section 4.2.4 of the Vulkan spec).
+ void destroyResources(bool deviceLost);
+
+ // Abandon any cached resources. To be used when the context/VkDevice is lost.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void abandonResources();
+
+ void backgroundReset(GrVkCommandPool* pool);
+
+ void reset(GrVkCommandPool* pool);
+
+#if GR_TEST_UTILS
+ void resetShaderCacheForTesting() const { fPipelineStateCache->release(); }
+#endif
+
+private:
+
+#ifdef SK_DEBUG
+#define GR_PIPELINE_STATE_CACHE_STATS
+#endif
+
+ class PipelineStateCache : public ::SkNoncopyable {
+ public:
+ PipelineStateCache(GrVkGpu* gpu);
+ ~PipelineStateCache();
+
+ void abandon();
+ void release();
+ GrVkPipelineState* refPipelineState(GrRenderTarget*,
+ const GrProgramInfo&,
+ GrPrimitiveType,
+ VkRenderPass compatibleRenderPass);
+
+ private:
+ struct Entry;
+
+ struct DescHash {
+ uint32_t operator()(const GrProgramDesc& desc) const {
+ return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
+ }
+ };
+
+ SkLRUCache<const GrVkPipelineStateBuilder::Desc, std::unique_ptr<Entry>, DescHash> fMap;
+
+ GrVkGpu* fGpu;
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ int fTotalRequests;
+ int fCacheMisses;
+#endif
+ };
+
+ class CompatibleRenderPassSet {
+ public:
+ // This will always construct the basic load store render pass (all attachments load and
+ // store their data) so that there is at least one compatible VkRenderPass that can be used
+ // with this set.
+ CompatibleRenderPassSet(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ GrVkRenderPass* getCompatibleRenderPass() const {
+ // The first GrVkRenderpass should always exist since we create the basic load store
+ // render pass on create
+ SkASSERT(fRenderPasses[0]);
+ return fRenderPasses[0];
+ }
+
+ GrVkRenderPass* getRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps);
+
+ void releaseResources(GrVkGpu* gpu);
+ void abandonResources();
+
+ private:
+ SkSTArray<4, GrVkRenderPass*> fRenderPasses;
+ int fLastReturnedIndex;
+ };
+
+ VkPipelineCache pipelineCache();
+
+ GrVkGpu* fGpu;
+
+ // Central cache for creating pipelines
+ VkPipelineCache fPipelineCache;
+
+ SkSTArray<4, CompatibleRenderPassSet> fRenderPassArray;
+
+ SkTArray<const GrVkRenderPass*> fExternalRenderPasses;
+
+ // Array of command pools that we are waiting on
+ SkSTArray<4, GrVkCommandPool*, true> fActiveCommandPools;
+
+ // Array of available command pools that are not in flight
+ SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools;
+
+ // Array of available uniform buffer resources
+ SkSTArray<16, const GrVkResource*, true> fAvailableUniformBufferResources;
+
+ // Stores GrVkSampler objects that we've already created so we can reuse them across multiple
+ // GrVkPipelineStates
+ SkTDynamicHash<GrVkSampler, GrVkSampler::Key> fSamplers;
+
+ // Stores GrVkSamplerYcbcrConversion objects that we've already created so we can reuse them.
+ SkTDynamicHash<GrVkSamplerYcbcrConversion, GrVkSamplerYcbcrConversion::Key> fYcbcrConversions;
+
+ // Cache of GrVkPipelineStates
+ PipelineStateCache* fPipelineStateCache;
+
+ SkSTArray<4, std::unique_ptr<GrVkDescriptorSetManager>> fDescriptorSetManagers;
+
+ GrVkDescriptorSetManager::Handle fUniformDSHandle;
+
+ std::recursive_mutex fBackgroundMutex;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp
new file mode 100644
index 0000000000..5dc3544f00
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp
@@ -0,0 +1,132 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkSampler.h"
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
+
+static inline VkSamplerAddressMode wrap_mode_to_vk_sampler_address(
+ GrSamplerState::WrapMode wrapMode) {
+ switch (wrapMode) {
+ case GrSamplerState::WrapMode::kClamp:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ case GrSamplerState::WrapMode::kRepeat:
+ return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case GrSamplerState::WrapMode::kMirrorRepeat:
+ return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case GrSamplerState::WrapMode::kClampToBorder:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ }
+ SK_ABORT("Unknown wrap mode.");
+}
+
+GrVkSampler* GrVkSampler::Create(GrVkGpu* gpu, const GrSamplerState& samplerState,
+ const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ static VkFilter vkMinFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+ static VkFilter vkMagFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+
+ VkSamplerCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkSamplerCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.magFilter = vkMagFilterModes[static_cast<int>(samplerState.filter())];
+ createInfo.minFilter = vkMinFilterModes[static_cast<int>(samplerState.filter())];
+ createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ createInfo.addressModeU = wrap_mode_to_vk_sampler_address(samplerState.wrapModeX());
+ createInfo.addressModeV = wrap_mode_to_vk_sampler_address(samplerState.wrapModeY());
+ createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; // Shouldn't matter
+ createInfo.mipLodBias = 0.0f;
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1.0f;
+ createInfo.compareEnable = VK_FALSE;
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ // Vulkan doesn't have a direct mapping of GL's nearest or linear filters for minFilter since
+ // there is always a mipmapMode. To get the same effect as GL we can set minLod = maxLod = 0.0.
+ // This works since our min and mag filters are the same (this forces us to use mag on the 0
+ // level mip). If the filters weren't the same we could set min = 0 and max = 0.25 to force
+ // the minFilter on mip level 0.
+ createInfo.minLod = 0.0f;
+ bool useMipMaps = GrSamplerState::Filter::kMipMap == samplerState.filter();
+ createInfo.maxLod = !useMipMaps ? 0.0f : 10000.0f;
+ createInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ VkSamplerYcbcrConversionInfo conversionInfo;
+ GrVkSamplerYcbcrConversion* ycbcrConversion = nullptr;
+ if (ycbcrInfo.isValid()) {
+ SkASSERT(gpu->vkCaps().supportsYcbcrConversion());
+
+ ycbcrConversion =
+ gpu->resourceProvider().findOrCreateCompatibleSamplerYcbcrConversion(ycbcrInfo);
+ if (!ycbcrConversion) {
+ return nullptr;
+ }
+
+ conversionInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
+ conversionInfo.pNext = nullptr;
+ conversionInfo.conversion = ycbcrConversion->ycbcrConversion();
+
+ createInfo.pNext = &conversionInfo;
+
+ VkFormatFeatureFlags flags = ycbcrInfo.fFormatFeatures;
+ if (!SkToBool(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT)) {
+ createInfo.magFilter = VK_FILTER_NEAREST;
+ createInfo.minFilter = VK_FILTER_NEAREST;
+ } else if (
+ !(flags &
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT)) {
+ createInfo.magFilter = ycbcrInfo.fChromaFilter;
+ createInfo.minFilter = ycbcrInfo.fChromaFilter;
+ }
+
+ // Required values when using ycbcr conversion
+ createInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ createInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+ }
+
+ VkSampler sampler;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateSampler(gpu->device(),
+ &createInfo,
+ nullptr,
+ &sampler));
+
+ return new GrVkSampler(sampler, ycbcrConversion, GenerateKey(samplerState, ycbcrInfo));
+}
+
+void GrVkSampler::freeGPUData(GrVkGpu* gpu) const {
+ SkASSERT(fSampler);
+ GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr));
+ if (fYcbcrConversion) {
+ fYcbcrConversion->unref(gpu);
+ }
+}
+
+void GrVkSampler::abandonGPUData() const {
+ if (fYcbcrConversion) {
+ fYcbcrConversion->unrefAndAbandon();
+ }
+}
+
+GrVkSampler::Key GrVkSampler::GenerateKey(const GrSamplerState& samplerState,
+ const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ return { GrSamplerState::GenerateKey(samplerState),
+ GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo) };
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSampler.h b/gfx/skia/skia/src/gpu/vk/GrVkSampler.h
new file mode 100644
index 0000000000..c0069c6710
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSampler.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSampler_DEFINED
+#define GrVkSampler_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/core/SkOpts.h"
+#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
+#include <atomic>
+
+class GrSamplerState;
+class GrVkGpu;
+
+class GrVkSampler : public GrVkResource {
+public:
+ static GrVkSampler* Create(GrVkGpu* gpu, const GrSamplerState&, const GrVkYcbcrConversionInfo&);
+
+ VkSampler sampler() const { return fSampler; }
+ const VkSampler* samplerPtr() const { return &fSampler; }
+
+ struct Key {
+ Key(uint8_t samplerKey, const GrVkSamplerYcbcrConversion::Key& ycbcrKey) {
+ // We must memset here since the GrVkSamplerYcbcrConversion has a 64 bit value which may
+ // force alignment padding to occur in the middle of the Key struct.
+ memset(this, 0, sizeof(Key));
+ fSamplerKey = samplerKey;
+ fYcbcrKey = ycbcrKey;
+ }
+ uint8_t fSamplerKey;
+ GrVkSamplerYcbcrConversion::Key fYcbcrKey;
+
+ bool operator==(const Key& that) const {
+ return this->fSamplerKey == that.fSamplerKey &&
+ this->fYcbcrKey == that.fYcbcrKey;
+ }
+ };
+
+ // Helpers for hashing GrVkSampler
+ static Key GenerateKey(const GrSamplerState&, const GrVkYcbcrConversionInfo&);
+
+ static const Key& GetKey(const GrVkSampler& sampler) { return sampler.fKey; }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkSampler: %d (%d refs)\n", fSampler, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkSampler(VkSampler sampler, GrVkSamplerYcbcrConversion* ycbcrConversion, Key key)
+ : INHERITED()
+ , fSampler(sampler)
+ , fYcbcrConversion(ycbcrConversion)
+ , fKey(key)
+ , fUniqueID(GenID()) {}
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+ void abandonGPUData() const override;
+
+ static uint32_t GenID() {
+ static std::atomic<uint32_t> nextID{1};
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidUniqueID);
+ return id;
+ }
+
+ VkSampler fSampler;
+ GrVkSamplerYcbcrConversion* fYcbcrConversion;
+ Key fKey;
+ uint32_t fUniqueID;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp b/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
new file mode 100644
index 0000000000..f33f5216dc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
+
+#include "src/gpu/vk/GrVkGpu.h"
+
+GrVkSamplerYcbcrConversion* GrVkSamplerYcbcrConversion::Create(
+ const GrVkGpu* gpu, const GrVkYcbcrConversionInfo& info) {
+ if (!gpu->vkCaps().supportsYcbcrConversion()) {
+ return nullptr;
+ }
+
+#ifdef SK_DEBUG
+ const VkFormatFeatureFlags& featureFlags = info.fFormatFeatures;
+ if (info.fXChromaOffset == VK_CHROMA_LOCATION_MIDPOINT ||
+ info.fYChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
+ SkASSERT(featureFlags & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT);
+ }
+ if (info.fXChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN ||
+ info.fYChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
+ SkASSERT(featureFlags & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT);
+ }
+ if (info.fChromaFilter == VK_FILTER_LINEAR) {
+ SkASSERT(featureFlags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT);
+ }
+ if (info.fForceExplicitReconstruction) {
+ SkASSERT(featureFlags &
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT);
+ }
+#endif
+
+
+ VkSamplerYcbcrConversionCreateInfo ycbcrCreateInfo;
+ ycbcrCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
+ ycbcrCreateInfo.pNext = nullptr;
+ ycbcrCreateInfo.format = info.fFormat;
+ ycbcrCreateInfo.ycbcrModel = info.fYcbcrModel;
+ ycbcrCreateInfo.ycbcrRange = info.fYcbcrRange;
+
+ // Components is ignored for external format conversions. For all other formats identity swizzle
+ // is used. It can be added to GrVkYcbcrConversionInfo if necessary.
+ ycbcrCreateInfo.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
+ ycbcrCreateInfo.xChromaOffset = info.fXChromaOffset;
+ ycbcrCreateInfo.yChromaOffset = info.fYChromaOffset;
+ ycbcrCreateInfo.chromaFilter = info.fChromaFilter;
+ ycbcrCreateInfo.forceExplicitReconstruction = info.fForceExplicitReconstruction;
+
+#ifdef SK_BUILD_FOR_ANDROID
+ VkExternalFormatANDROID externalFormat;
+ if (info.fExternalFormat) {
+ // Format must not be specified for external images.
+ SkASSERT(info.fFormat == VK_FORMAT_UNDEFINED);
+ externalFormat.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
+ externalFormat.pNext = nullptr;
+ externalFormat.externalFormat = info.fExternalFormat;
+ ycbcrCreateInfo.pNext = &externalFormat;
+ }
+#else
+ // External images are supported only on Android;
+ SkASSERT(!info.fExternalFormat);
+#endif
+
+ if (!info.fExternalFormat) {
+ SkASSERT(info.fFormat != VK_FORMAT_UNDEFINED);
+ }
+
+ VkSamplerYcbcrConversion conversion;
+ GR_VK_CALL(gpu->vkInterface(), CreateSamplerYcbcrConversion(gpu->device(), &ycbcrCreateInfo,
+ nullptr, &conversion));
+ if (conversion == VK_NULL_HANDLE) {
+ return nullptr;
+ }
+
+ return new GrVkSamplerYcbcrConversion(conversion, GenerateKey(info));
+}
+
+void GrVkSamplerYcbcrConversion::freeGPUData(GrVkGpu* gpu) const {
+ SkASSERT(fYcbcrConversion);
+ GR_VK_CALL(gpu->vkInterface(), DestroySamplerYcbcrConversion(gpu->device(), fYcbcrConversion,
+ nullptr));
+}
+
+GrVkSamplerYcbcrConversion::Key GrVkSamplerYcbcrConversion::GenerateKey(
+ const GrVkYcbcrConversionInfo& ycbcrInfo) {
+ SkASSERT(static_cast<int>(ycbcrInfo.fYcbcrModel <= 7));
+ static const int kRangeShift = 3;
+ SkASSERT(static_cast<int>(ycbcrInfo.fYcbcrRange) <= 1);
+ static const int kXChromaOffsetShift = kRangeShift + 1;
+ SkASSERT(static_cast<int>(ycbcrInfo.fXChromaOffset) <= 1);
+ static const int kYChromaOffsetShift = kXChromaOffsetShift + 1;
+ SkASSERT(static_cast<int>(ycbcrInfo.fXChromaOffset) <= 1);
+ static const int kChromaFilterShift = kYChromaOffsetShift + 1;
+ SkASSERT(static_cast<int>(ycbcrInfo.fChromaFilter) <= 1);
+ static const int kReconShift = kChromaFilterShift + 1;
+ SkASSERT(static_cast<int>(ycbcrInfo.fForceExplicitReconstruction) <= 1);
+ GR_STATIC_ASSERT(kReconShift <= 7);
+
+ uint8_t ycbcrKey = static_cast<uint8_t>(ycbcrInfo.fYcbcrModel);
+ ycbcrKey |= (static_cast<uint8_t>(ycbcrInfo.fYcbcrRange) << kRangeShift);
+ ycbcrKey |= (static_cast<uint8_t>(ycbcrInfo.fXChromaOffset) << kXChromaOffsetShift);
+ ycbcrKey |= (static_cast<uint8_t>(ycbcrInfo.fYChromaOffset) << kYChromaOffsetShift);
+ ycbcrKey |= (static_cast<uint8_t>(ycbcrInfo.fChromaFilter) << kChromaFilterShift);
+ ycbcrKey |= (static_cast<uint8_t>(ycbcrInfo.fForceExplicitReconstruction) << kReconShift);
+
+ return Key{ycbcrInfo.fFormat, ycbcrInfo.fExternalFormat, ycbcrKey};
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.h b/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.h
new file mode 100644
index 0000000000..cf7a2c5995
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSamplerYcbcrConversion.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSamplerYcbcrConverison_DEFINED
+#define GrVkSamplerYcbcrConverison_DEFINED
+
+#include "src/gpu/vk/GrVkResource.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/core/SkOpts.h"
+
+class GrVkGpu;
+
+class GrVkSamplerYcbcrConversion : public GrVkResource {
+public:
+ static GrVkSamplerYcbcrConversion* Create(const GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
+
+ VkSamplerYcbcrConversion ycbcrConversion() const { return fYcbcrConversion; }
+
+ struct Key {
+ Key() : fVkFormat(VK_FORMAT_UNDEFINED), fExternalFormat(0), fConversionKey(0) {}
+ Key(VkFormat vkFormat, uint64_t externalFormat, uint8_t conversionKey) {
+ memset(this, 0, sizeof(Key));
+ fVkFormat = vkFormat;
+ fExternalFormat = externalFormat;
+ fConversionKey = conversionKey;
+ }
+
+ VkFormat fVkFormat;
+ uint64_t fExternalFormat;
+ uint8_t fConversionKey;
+
+ bool operator==(const Key& that) const {
+ return this->fVkFormat == that.fVkFormat &&
+ this->fExternalFormat == that.fExternalFormat &&
+ this->fConversionKey == that.fConversionKey;
+ }
+ };
+
+ // Helpers for hashing GrVkSamplerYcbcrConversion
+ static Key GenerateKey(const GrVkYcbcrConversionInfo& ycbcrInfo);
+
+ static const Key& GetKey(const GrVkSamplerYcbcrConversion& ycbcrConversion) {
+ return ycbcrConversion.fKey;
+ }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkSamplerYcbcrConversion: %d (%d refs)\n", fYcbcrConversion, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkSamplerYcbcrConversion(VkSamplerYcbcrConversion ycbcrConversion, Key key)
+ : INHERITED()
+ , fYcbcrConversion(ycbcrConversion)
+ , fKey(key) {}
+
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkSamplerYcbcrConversion fYcbcrConversion;
+ Key fKey;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp b/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp
new file mode 100644
index 0000000000..8f71f3ec5a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkSecondaryCBDrawContext.h"
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/SkGpuDevice.h"
+
+sk_sp<GrVkSecondaryCBDrawContext> GrVkSecondaryCBDrawContext::Make(GrContext* ctx,
+ const SkImageInfo& imageInfo,
+ const GrVkDrawableInfo& vkInfo,
+ const SkSurfaceProps* props) {
+ if (!ctx) {
+ return nullptr;
+ }
+
+ if (ctx->backend() != GrBackendApi::kVulkan) {
+ return nullptr;
+ }
+
+ auto rtc = ctx->priv().makeVulkanSecondaryCBRenderTargetContext(imageInfo, vkInfo, props);
+ SkASSERT(rtc->asSurfaceProxy()->isInstantiated());
+
+ sk_sp<SkGpuDevice> device(
+ SkGpuDevice::Make(ctx, std::move(rtc), SkGpuDevice::kUninit_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_sp<GrVkSecondaryCBDrawContext>(new GrVkSecondaryCBDrawContext(std::move(device),
+ props));
+}
+
+GrVkSecondaryCBDrawContext::GrVkSecondaryCBDrawContext(sk_sp<SkGpuDevice> device,
+ const SkSurfaceProps* props)
+ : fDevice(device)
+ , fProps(SkSurfacePropsCopyOrDefault(props)) {}
+
+GrVkSecondaryCBDrawContext::~GrVkSecondaryCBDrawContext() {
+ SkASSERT(!fDevice);
+ SkASSERT(!fCachedCanvas.get());
+}
+
+SkCanvas* GrVkSecondaryCBDrawContext::getCanvas() {
+ if (!fCachedCanvas) {
+ fCachedCanvas = std::unique_ptr<SkCanvas>(new SkCanvas(fDevice));
+ }
+ return fCachedCanvas.get();
+}
+
+void GrVkSecondaryCBDrawContext::flush() {
+ fDevice->flush();
+}
+
+bool GrVkSecondaryCBDrawContext::wait(int numSemaphores,
+ const GrBackendSemaphore waitSemaphores[]) {
+ return fDevice->wait(numSemaphores, waitSemaphores);
+}
+
+void GrVkSecondaryCBDrawContext::releaseResources() {
+ fCachedCanvas.reset();
+ fDevice.reset();
+}
+
+bool GrVkSecondaryCBDrawContext::characterize(SkSurfaceCharacterization* characterization) const {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ size_t maxResourceBytes = ctx->getResourceCacheLimit();
+
+ // We current don't support textured GrVkSecondaryCBDrawContexts.
+ SkASSERT(!rtc->asTextureProxy());
+
+ SkColorType ct = GrColorTypeToSkColorType(rtc->colorInfo().colorType());
+ if (ct == kUnknown_SkColorType) {
+ return false;
+ }
+
+ SkImageInfo ii = SkImageInfo::Make(rtc->width(), rtc->height(), ct, kPremul_SkAlphaType,
+ rtc->colorInfo().refColorSpace());
+
+ GrBackendFormat format = rtc->asRenderTargetProxy()->backendFormat();
+
+ characterization->set(ctx->threadSafeProxy(), maxResourceBytes, ii, format,
+ rtc->origin(), rtc->numSamples(),
+ SkSurfaceCharacterization::Textureable(false),
+ SkSurfaceCharacterization::MipMapped(false),
+ SkSurfaceCharacterization::UsesGLFBO0(false),
+ SkSurfaceCharacterization::VulkanSecondaryCBCompatible(true),
+ GrProtected(rtc->asRenderTargetProxy()->isProtected()),
+ this->props());
+
+ return true;
+}
+
+bool GrVkSecondaryCBDrawContext::isCompatible(
+ const SkSurfaceCharacterization& characterization) const {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ if (!characterization.isValid()) {
+ return false;
+ }
+
+ if (!characterization.vulkanSecondaryCBCompatible()) {
+ return false;
+ }
+
+ // As long as the current state in the context allows for greater or equal resources,
+ // we allow the DDL to be replayed.
+ // DDL TODO: should we just remove the resource check and ignore the cache limits on playback?
+ size_t maxResourceBytes = ctx->getResourceCacheLimit();
+
+ if (characterization.isTextureable()) {
+ // We don't support textureable DDL when rendering to a GrVkSecondaryCBDrawContext.
+ return false;
+ }
+
+ if (characterization.usesGLFBO0()) {
+ return false;
+ }
+
+ SkColorType rtColorType = GrColorTypeToSkColorType(rtc->colorInfo().colorType());
+ if (rtColorType == kUnknown_SkColorType) {
+ return false;
+ }
+
+ GrBackendFormat rtcFormat = rtc->asRenderTargetProxy()->backendFormat();
+ GrProtected isProtected = GrProtected(rtc->asRenderTargetProxy()->isProtected());
+
+ return characterization.contextInfo() && characterization.contextInfo()->priv().matches(ctx) &&
+ characterization.cacheMaxResourceBytes() <= maxResourceBytes &&
+ characterization.origin() == rtc->origin() &&
+ characterization.backendFormat() == rtcFormat &&
+ characterization.width() == rtc->width() &&
+ characterization.height() == rtc->height() &&
+ characterization.colorType() == rtColorType &&
+ characterization.sampleCount() == rtc->numSamples() &&
+ SkColorSpace::Equals(characterization.colorSpace(), rtc->colorInfo().colorSpace()) &&
+ characterization.isProtected() == isProtected &&
+ characterization.surfaceProps() == rtc->surfaceProps();
+}
+
+bool GrVkSecondaryCBDrawContext::draw(SkDeferredDisplayList* ddl) {
+ if (!ddl || !this->isCompatible(ddl->characterization())) {
+ return false;
+ }
+
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ ctx->priv().copyRenderTasksFromDDL(ddl, rtc->asRenderTargetProxy());
+ return true;
+}
+
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.h b/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.h
new file mode 100644
index 0000000000..b37f6a6093
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSecondaryCBDrawContext.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSecondaryCBDrawContext_DEFINED
+#define GrVkSecondaryCBDrawContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+
+class GrBackendSemaphore;
+class GrContext;
+struct GrVkDrawableInfo;
+class SkCanvas;
+class SkDeferredDisplayList;
+class SkGpuDevice;
+struct SkImageInfo;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+
+/**
+ * This class is a private header that is intended to only be used inside of Chromium. This requires
+ * Chromium to burrow in and include this specifically since it is not part of skia's public include
+ * directory.
+ */
+
+/**
+ * This class is used to draw into an external Vulkan secondary command buffer that is imported
+ * by the client. The secondary command buffer that gets imported must already have had begin called
+ * on it with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT. Thus any draws to the imported
+ * command buffer cannot require changing the render pass. This requirement means that certain types
+ * of draws will not be supported when using a GrVkSecondaryCBDrawContext. This includes:
+ * Draws that require a dst copy for blending will be dropped
+ * Text draws will be dropped (these may require intermediate uploads of text data)
+ * Read and Write pixels will not work
+ * Any other draw that requires a copy will fail (this includes using backdrop filter with save
+ * layer).
+ * Stenciling is also disabled, but that should not restrict any actual draws from working.
+ *
+ * While using a GrVkSecondaryCBDrawContext, the client can also draw into normal SkSurfaces and
+ * then draw those SkSufaces (as SkImages) into the GrVkSecondaryCBDrawContext. If any of the
+ * previously mentioned unsupported draws are needed by the client, they can draw them into an
+ * offscreen surface, and then draw that into the GrVkSecondaryCBDrawContext.
+ *
+ * After all drawing to the GrVkSecondaryCBDrawContext has been done, the client must call flush()
+ * on the GrVkSecondaryCBDrawContext to actually fill in the secondary VkCommandBuffer with the
+ * draws.
+ *
+ * Additionally, the client must keep the GrVkSecondaryCBDrawContext alive until the secondary
+ * VkCommandBuffer has been submitted and all work finished on the GPU. Before deleting the
+ * GrVkSecondaryCBDrawContext, the client must call releaseResources() so that Skia can cleanup
+ * any internal objects that were created for the draws into the secondary command buffer.
+ */
+class SK_API GrVkSecondaryCBDrawContext : public SkRefCnt {
+public:
+ static sk_sp<GrVkSecondaryCBDrawContext> Make(GrContext*, const SkImageInfo&,
+ const GrVkDrawableInfo&,
+ const SkSurfaceProps* props);
+
+ ~GrVkSecondaryCBDrawContext() override;
+
+ SkCanvas* getCanvas();
+
+ // Records all the draws to the imported secondary command buffer and sets any dependent
+ // offscreen draws to the GPU.
+ void flush();
+
+ /** Inserts a list of GPU semaphores that Skia will have the driver wait on before executing
+ commands for this secondary CB. The wait semaphores will get added to the VkCommandBuffer
+ owned by this GrContext when flush() is called, and not the command buffer which the
+ Secondary CB is from. This will guarantee that the driver waits on the semaphores before
+ the secondary command buffer gets executed. Skia will take ownership of the underlying
+ semaphores and delete them once they have been signaled and waited on. If this call returns
+ false, then the GPU back-end will not wait on any passed in semaphores, and the client will
+ still own the semaphores.
+
+ @param numSemaphores size of waitSemaphores array
+ @param waitSemaphores array of semaphore containers
+ @return true if GPU is waiting on semaphores
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[]);
+
+ // This call will release all resources held by the draw context. The client must call
+ // releaseResources() before deleting the drawing context. However, the resources also include
+ // any Vulkan resources that were created and used for draws. Therefore the client must only
+ // call releaseResources() after submitting the secondary command buffer, and waiting for it to
+ // finish on the GPU. If it is called earlier then some vulkan objects may be deleted while they
+ // are still in use by the GPU.
+ void releaseResources();
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ // TODO: Fill out these calls to support DDL
+ bool characterize(SkSurfaceCharacterization* characterization) const;
+ bool draw(SkDeferredDisplayList* deferredDisplayList);
+
+private:
+ explicit GrVkSecondaryCBDrawContext(sk_sp<SkGpuDevice>, const SkSurfaceProps*);
+
+ bool isCompatible(const SkSurfaceCharacterization& characterization) const;
+
+ sk_sp<SkGpuDevice> fDevice;
+ std::unique_ptr<SkCanvas> fCachedCanvas;
+ const SkSurfaceProps fProps;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.cpp b/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.cpp
new file mode 100644
index 0000000000..dd7796ee88
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkSemaphore.h"
+
+#include "include/gpu/GrBackendSemaphore.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+// windows wants to define this as CreateSemaphoreA or CreateSemaphoreW
+#undef CreateSemaphore
+#endif
+
+sk_sp<GrVkSemaphore> GrVkSemaphore::Make(GrVkGpu* gpu, bool isOwned) {
+ VkSemaphoreCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkSemaphoreCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
+ CreateSemaphore(gpu->device(), &createInfo, nullptr, &semaphore));
+
+ return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore, false, false, isOwned));
+}
+
+sk_sp<GrVkSemaphore> GrVkSemaphore::MakeWrapped(GrVkGpu* gpu,
+ VkSemaphore semaphore,
+ WrapType wrapType,
+ GrWrapOwnership ownership) {
+ if (VK_NULL_HANDLE == semaphore) {
+ return nullptr;
+ }
+ bool prohibitSignal = WrapType::kWillWait == wrapType;
+ bool prohibitWait = WrapType::kWillSignal == wrapType;
+ return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore, prohibitSignal, prohibitWait,
+ kBorrow_GrWrapOwnership != ownership));
+}
+
+GrVkSemaphore::GrVkSemaphore(GrVkGpu* gpu, VkSemaphore semaphore, bool prohibitSignal,
+ bool prohibitWait, bool isOwned)
+ : INHERITED(gpu) {
+ fResource = new Resource(semaphore, prohibitSignal, prohibitWait, isOwned);
+ isOwned ? this->registerWithCache(SkBudgeted::kNo)
+ : this->registerWithCacheWrapped(GrWrapCacheable::kNo);
+}
+
+void GrVkSemaphore::onRelease() {
+ if (fResource) {
+ fResource->unref(static_cast<GrVkGpu*>(this->getGpu()));
+ fResource = nullptr;
+ }
+ INHERITED::onRelease();
+}
+
+void GrVkSemaphore::onAbandon() {
+ if (fResource) {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ }
+ INHERITED::onAbandon();
+}
+
+void GrVkSemaphore::Resource::freeGPUData(GrVkGpu* gpu) const {
+ if (fIsOwned) {
+ GR_VK_CALL(gpu->vkInterface(),
+ DestroySemaphore(gpu->device(), fSemaphore, nullptr));
+ }
+}
+
+GrBackendSemaphore GrVkSemaphore::backendSemaphore() const {
+ GrBackendSemaphore backendSemaphore;
+ backendSemaphore.initVulkan(fResource->semaphore());
+ return backendSemaphore;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.h b/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.h
new file mode 100644
index 0000000000..0c73aa1191
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSemaphore.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSemaphore_DEFINED
+#define GrVkSemaphore_DEFINED
+
+#include "src/gpu/GrSemaphore.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/vk/GrVkResource.h"
+
+class GrBackendSemaphore;
+class GrVkGpu;
+
+class GrVkSemaphore : public GrSemaphore {
+public:
+ static sk_sp<GrVkSemaphore> Make(GrVkGpu* gpu, bool isOwned);
+
+ using WrapType = GrResourceProvider::SemaphoreWrapType;
+
+ static sk_sp<GrVkSemaphore> MakeWrapped(GrVkGpu* gpu,
+ VkSemaphore semaphore,
+ WrapType wrapType,
+ GrWrapOwnership);
+
+ GrBackendSemaphore backendSemaphore() const override;
+
+ class Resource : public GrVkResource {
+ public:
+ Resource(VkSemaphore semaphore, bool prohibitSignal, bool prohibitWait, bool isOwned)
+ : INHERITED()
+ , fSemaphore(semaphore)
+ , fHasBeenSubmittedToQueueForSignal(prohibitSignal)
+ , fHasBeenSubmittedToQueueForWait(prohibitWait)
+ , fIsOwned(isOwned) {}
+
+ ~Resource() override {}
+
+ VkSemaphore semaphore() const { return fSemaphore; }
+
+ bool shouldSignal() const {
+ return !fHasBeenSubmittedToQueueForSignal;
+ }
+ bool shouldWait() const {
+ return !fHasBeenSubmittedToQueueForWait;
+ }
+
+ void markAsSignaled() {
+ fHasBeenSubmittedToQueueForSignal = true;
+ }
+ void markAsWaited() {
+ fHasBeenSubmittedToQueueForWait = true;
+ }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkSemaphore: %d (%d refs)\n", fSemaphore, this->getRefCnt());
+ }
+#endif
+ private:
+ void freeGPUData(GrVkGpu* gpu) const override;
+
+ VkSemaphore fSemaphore;
+ bool fHasBeenSubmittedToQueueForSignal;
+ bool fHasBeenSubmittedToQueueForWait;
+ bool fIsOwned;
+
+ typedef GrVkResource INHERITED;
+ };
+
+ Resource* getResource() { return fResource; }
+
+private:
+ GrVkSemaphore(GrVkGpu* gpu, VkSemaphore semaphore, bool prohibitSignal, bool prohibitWait,
+ bool isOwned);
+
+ void onRelease() override;
+ void onAbandon() override;
+
+ Resource* fResource;
+
+ typedef GrSemaphore INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp
new file mode 100644
index 0000000000..58c92d8c4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImage.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkStencilAttachment.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkStencilAttachment::GrVkStencilAttachment(GrVkGpu* gpu,
+ const Format& format,
+ const GrVkImage::ImageDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* stencilView)
+ : GrStencilAttachment(gpu, desc.fWidth, desc.fHeight, format.fStencilBits, desc.fSamples)
+ , GrVkImage(info, std::move(layout), GrBackendObjectOwnership::kOwned)
+ , fFormat(format)
+ , fStencilView(stencilView) {
+ this->registerWithCache(SkBudgeted::kYes);
+ stencilView->ref();
+}
+
+GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
+ int width,
+ int height,
+ int sampleCnt,
+ const Format& format) {
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = format.fInternalFormat;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = sampleCnt;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, info.fImage,
+ format.fInternalFormat,
+ GrVkImageView::kStencil_Type, 1,
+ GrVkYcbcrConversionInfo());
+ if (!imageView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return nullptr;
+ }
+
+ sk_sp<GrVkImageLayout> layout(new GrVkImageLayout(info.fImageLayout));
+ GrVkStencilAttachment* stencil = new GrVkStencilAttachment(gpu, format, imageDesc,
+ info, std::move(layout), imageView);
+ imageView->unref(gpu);
+
+ return stencil;
+}
+
+GrVkStencilAttachment::~GrVkStencilAttachment() {
+ // should have been released or abandoned first
+ SkASSERT(!fStencilView);
+}
+
+size_t GrVkStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= this->numSamples();
+ return static_cast<size_t>(size / 8);
+}
+
+void GrVkStencilAttachment::onRelease() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ this->releaseImage(gpu);
+
+ fStencilView->unref(gpu);
+ fStencilView = nullptr;
+ GrStencilAttachment::onRelease();
+}
+
+void GrVkStencilAttachment::onAbandon() {
+ this->abandonImage();
+ fStencilView->unrefAndAbandon();
+ fStencilView = nullptr;
+ GrStencilAttachment::onAbandon();
+}
+
+GrVkGpu* GrVkStencilAttachment::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h
new file mode 100644
index 0000000000..568e0e5c93
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h
@@ -0,0 +1,58 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkStencil_DEFINED
+#define GrVkStencil_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/vk/GrVkImage.h"
+
+class GrVkImageView;
+class GrVkGpu;
+
+class GrVkStencilAttachment : public GrStencilAttachment, public GrVkImage {
+public:
+ struct Format {
+ VkFormat fInternalFormat;
+ int fStencilBits;
+ int fTotalBits;
+ bool fPacked;
+ };
+
+ static GrVkStencilAttachment* Create(GrVkGpu* gpu, int width, int height,
+ int sampleCnt, const Format& format);
+
+ ~GrVkStencilAttachment() override;
+
+ const GrVkResource* imageResource() const { return this->resource(); }
+ const GrVkImageView* stencilView() const { return fStencilView; }
+
+ VkFormat vkFormat() const { return fFormat.fInternalFormat; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrVkStencilAttachment(GrVkGpu* gpu,
+ const Format& format,
+ const GrVkImage::ImageDesc&,
+ const GrVkImageInfo&,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* stencilView);
+
+ GrVkGpu* getVkGpu() const;
+
+ Format fFormat;
+
+ const GrVkImageView* fStencilView;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp
new file mode 100644
index 0000000000..e162b34b99
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkTexture.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkTextureRenderTarget.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* view,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), GrBackendObjectOwnership::kOwned)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected,
+ GrTextureType::k2D, mipMapsStatus)
+ , fTextureView(view) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == info.fLevelCount));
+ // We don't support creating external GrVkTextures
+ SkASSERT(!info.fYcbcrConversionInfo.isValid() || !info.fYcbcrConversionInfo.fExternalFormat);
+ this->registerWithCache(budgeted);
+ if (GrVkFormatIsCompressed(info.fFormat)) {
+ this->setReadOnly();
+ }
+}
+
+GrVkTexture::GrVkTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc, const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout, const GrVkImageView* view,
+ GrMipMapsStatus mipMapsStatus, GrBackendObjectOwnership ownership,
+ GrWrapCacheable cacheable, GrIOType ioType, bool isExternal)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, std::move(layout), ownership)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected,
+ isExternal ? GrTextureType::kExternal : GrTextureType::k2D, mipMapsStatus)
+ , fTextureView(view) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == info.fLevelCount));
+ if (ioType == kRead_GrIOType) {
+ this->setReadOnly();
+ }
+ this->registerWithCacheWrapped(cacheable);
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* view,
+ GrMipMapsStatus mipMapsStatus,
+ GrBackendObjectOwnership ownership)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, layout, ownership)
+ , INHERITED(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected,
+ GrTextureType::k2D, mipMapsStatus)
+ , fTextureView(view) {
+ SkASSERT((GrMipMapsStatus::kNotAllocated == mipMapsStatus) == (1 == info.fLevelCount));
+ // Since this ctor is only called from GrVkTextureRenderTarget, we can't have a ycbcr conversion
+ // since we don't support that on render targets.
+ SkASSERT(!info.fYcbcrConversionInfo.isValid());
+}
+
+sk_sp<GrVkTexture> GrVkTexture::MakeNewTexture(GrVkGpu* gpu, SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImage::ImageDesc& imageDesc,
+ GrMipMapsStatus mipMapsStatus) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(
+ gpu, info.fImage, info.fFormat, GrVkImageView::kColor_Type, info.fLevelCount,
+ info.fYcbcrConversionInfo);
+ if (!imageView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return nullptr;
+ }
+ sk_sp<GrVkImageLayout> layout(new GrVkImageLayout(info.fImageLayout));
+
+ return sk_sp<GrVkTexture>(new GrVkTexture(gpu, budgeted, desc, info, std::move(layout),
+ imageView, mipMapsStatus));
+}
+
+sk_sp<GrVkTexture> GrVkTexture::MakeWrappedTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrWrapOwnership wrapOwnership,
+ GrWrapCacheable cacheable,
+ GrIOType ioType,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout) {
+ // Adopted textures require both image and allocation because we're responsible for freeing
+ SkASSERT(VK_NULL_HANDLE != info.fImage &&
+ (kBorrow_GrWrapOwnership == wrapOwnership || VK_NULL_HANDLE != info.fAlloc.fMemory));
+
+ const GrVkImageView* imageView = GrVkImageView::Create(
+ gpu, info.fImage, info.fFormat, GrVkImageView::kColor_Type, info.fLevelCount,
+ info.fYcbcrConversionInfo);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ GrMipMapsStatus mipMapsStatus = info.fLevelCount > 1 ? GrMipMapsStatus::kValid
+ : GrMipMapsStatus::kNotAllocated;
+
+ GrBackendObjectOwnership ownership = kBorrow_GrWrapOwnership == wrapOwnership
+ ? GrBackendObjectOwnership::kBorrowed : GrBackendObjectOwnership::kOwned;
+ bool isExternal = info.fYcbcrConversionInfo.isValid() &&
+ (info.fYcbcrConversionInfo.fExternalFormat != 0);
+ return sk_sp<GrVkTexture>(new GrVkTexture(gpu, desc, info, std::move(layout), imageView,
+ mipMapsStatus, ownership, cacheable, ioType,
+ isExternal));
+}
+
+GrVkTexture::~GrVkTexture() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fTextureView);
+}
+
+void GrVkTexture::onRelease() {
+ // We're about to be severed from our GrVkResource. If there are "finish" idle procs we have to
+ // decide who will handle them. If the resource is still tied to a command buffer we let it
+ // handle them. Otherwise, we handle them.
+ if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
+ this->removeFinishIdleProcs();
+ }
+
+ // we create this and don't hand it off, so we should always destroy it
+ if (fTextureView) {
+ fTextureView->unref(this->getVkGpu());
+ fTextureView = nullptr;
+ }
+
+ this->releaseImage(this->getVkGpu());
+
+ INHERITED::onRelease();
+}
+
+void GrVkTexture::onAbandon() {
+ // We're about to be severed from our GrVkResource. If there are "finish" idle procs we have to
+ // decide who will handle them. If the resource is still tied to a command buffer we let it
+ // handle them. Otherwise, we handle them.
+ if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
+ this->removeFinishIdleProcs();
+ }
+
+ // we create this and don't hand it off, so we should always destroy it
+ if (fTextureView) {
+ fTextureView->unrefAndAbandon();
+ fTextureView = nullptr;
+ }
+
+ this->abandonImage();
+ INHERITED::onAbandon();
+}
+
+GrBackendTexture GrVkTexture::getBackendTexture() const {
+ return GrBackendTexture(this->width(), this->height(), fInfo, this->grVkImageLayout());
+}
+
+GrVkGpu* GrVkTexture::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
+const GrVkImageView* GrVkTexture::textureView() {
+ return fTextureView;
+}
+
+void GrVkTexture::addIdleProc(sk_sp<GrRefCntedCallback> idleProc, IdleState type) {
+ INHERITED::addIdleProc(idleProc, type);
+ if (type == IdleState::kFinished) {
+ if (auto* resource = this->resource()) {
+ resource->addIdleProc(this, std::move(idleProc));
+ }
+ }
+}
+
+void GrVkTexture::callIdleProcsOnBehalfOfResource() {
+ // If we got here then the resource is being removed from its last command buffer and the
+ // texture is idle in the cache. Any kFlush idle procs should already have been called. So
+ // the texture and resource should have the same set of procs.
+ SkASSERT(this->resource());
+ SkASSERT(this->resource()->idleProcCnt() == fIdleProcs.count());
+#ifdef SK_DEBUG
+ for (int i = 0; i < fIdleProcs.count(); ++i) {
+ SkASSERT(fIdleProcs[i] == this->resource()->idleProc(i));
+ }
+#endif
+ fIdleProcs.reset();
+ this->resource()->resetIdleProcs();
+}
+
+void GrVkTexture::willRemoveLastRef() {
+ if (!fIdleProcs.count()) {
+ return;
+ }
+ // This is called when the GrTexture is purgeable. However, we need to check whether the
+ // Resource is still owned by any command buffers. If it is then it will call the proc.
+ auto* resource = this->hasResource() ? this->resource() : nullptr;
+ bool callFinishProcs = !resource || !resource->isOwnedByCommandBuffer();
+ if (callFinishProcs) {
+ // Everything must go!
+ fIdleProcs.reset();
+ resource->resetIdleProcs();
+ } else {
+ // The procs that should be called on flush but not finish are those that are owned
+ // by the GrVkTexture and not the Resource. We do this by copying the resource's array
+ // and thereby dropping refs to procs we own but the resource does not.
+ SkASSERT(resource);
+ fIdleProcs.reset(resource->idleProcCnt());
+ for (int i = 0; i < fIdleProcs.count(); ++i) {
+ fIdleProcs[i] = resource->idleProc(i);
+ }
+ }
+}
+
+void GrVkTexture::removeFinishIdleProcs() {
+ // This should only be called by onRelease/onAbandon when we have already checked for a
+ // resource.
+ const auto* resource = this->resource();
+ SkASSERT(resource);
+ SkSTArray<4, sk_sp<GrRefCntedCallback>> procsToKeep;
+ int resourceIdx = 0;
+ // The idle procs that are common between the GrVkTexture and its Resource should be found in
+ // the same order.
+ for (int i = 0; i < fIdleProcs.count(); ++i) {
+ if (fIdleProcs[i] == resource->idleProc(resourceIdx)) {
+ ++resourceIdx;
+ } else {
+ procsToKeep.push_back(fIdleProcs[i]);
+ }
+ }
+ SkASSERT(resourceIdx == resource->idleProcCnt());
+ fIdleProcs = procsToKeep;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTexture.h b/gfx/skia/skia/src/gpu/vk/GrVkTexture.h
new file mode 100644
index 0000000000..6d90826227
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTexture.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTexture_DEFINED
+#define GrVkTexture_DEFINED
+
+#include "include/gpu/GrTexture.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkImage.h"
+
+class GrVkGpu;
+class GrVkImageView;
+struct GrVkImageInfo;
+
+class GrVkTexture : public GrTexture, public virtual GrVkImage {
+public:
+ static sk_sp<GrVkTexture> MakeNewTexture(GrVkGpu*,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc&,
+ const GrVkImage::ImageDesc&,
+ GrMipMapsStatus);
+
+ static sk_sp<GrVkTexture> MakeWrappedTexture(GrVkGpu*, const GrSurfaceDesc&, GrWrapOwnership,
+ GrWrapCacheable, GrIOType, const GrVkImageInfo&,
+ sk_sp<GrVkImageLayout>);
+
+ ~GrVkTexture() override;
+
+ GrBackendTexture getBackendTexture() const override;
+
+ GrBackendFormat backendFormat() const override { return this->getBackendFormat(); }
+
+ void textureParamsModified() override {}
+
+ const GrVkImageView* textureView();
+
+ void addIdleProc(sk_sp<GrRefCntedCallback>, IdleState) override;
+ void callIdleProcsOnBehalfOfResource();
+
+protected:
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, const GrVkImageInfo&, sk_sp<GrVkImageLayout>,
+ const GrVkImageView*, GrMipMapsStatus, GrBackendObjectOwnership);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) override {
+ return false;
+ }
+
+ void willRemoveLastRef() override;
+
+private:
+ GrVkTexture(GrVkGpu*, SkBudgeted, const GrSurfaceDesc&, const GrVkImageInfo&,
+ sk_sp<GrVkImageLayout> layout, const GrVkImageView* imageView,
+ GrMipMapsStatus);
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, const GrVkImageInfo&, sk_sp<GrVkImageLayout>,
+ const GrVkImageView*, GrMipMapsStatus, GrBackendObjectOwnership, GrWrapCacheable,
+ GrIOType, bool isExternal);
+
+ // In Vulkan we call the release proc after we are finished with the underlying
+ // GrVkImage::Resource object (which occurs after the GPU has finished all work on it).
+ void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {
+ // Forward the release proc on to GrVkImage
+ this->setResourceRelease(std::move(releaseHelper));
+ }
+
+ void removeFinishIdleProcs();
+
+ const GrVkImageView* fTextureView;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp
new file mode 100644
index 0000000000..d89822da3b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkTextureRenderTarget.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkImageView.h"
+#include "src/gpu/vk/GrVkUtil.h"
+
+#include "src/core/SkMipMap.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkTextureRenderTarget::GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, layout, GrBackendObjectOwnership::kOwned)
+ , GrVkTexture(gpu, desc, info, layout, texView, mipMapsStatus,
+ GrBackendObjectOwnership::kOwned)
+ , GrVkRenderTarget(gpu, desc, sampleCnt, info, layout, msaaInfo, std::move(msaaLayout),
+ colorAttachmentView, resolveAttachmentView,
+ GrBackendObjectOwnership::kOwned) {
+ SkASSERT(info.fProtected == msaaInfo.fProtected);
+ this->registerWithCache(budgeted);
+}
+
+GrVkTextureRenderTarget::GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView,
+ GrMipMapsStatus mipMapsStatus)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, layout, GrBackendObjectOwnership::kOwned)
+ , GrVkTexture(gpu, desc, info, layout, texView, mipMapsStatus,
+ GrBackendObjectOwnership::kOwned)
+ , GrVkRenderTarget(gpu, desc, info, layout, colorAttachmentView,
+ GrBackendObjectOwnership::kOwned) {
+ this->registerWithCache(budgeted);
+}
+
+GrVkTextureRenderTarget::GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrMipMapsStatus mipMapsStatus,
+ GrBackendObjectOwnership ownership,
+ GrWrapCacheable cacheable)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, layout, ownership)
+ , GrVkTexture(gpu, desc, info, layout, texView, mipMapsStatus, ownership)
+ , GrVkRenderTarget(gpu, desc, sampleCnt, info, layout, msaaInfo, std::move(msaaLayout),
+ colorAttachmentView, resolveAttachmentView, ownership) {
+ SkASSERT(info.fProtected == msaaInfo.fProtected);
+ this->registerWithCacheWrapped(cacheable);
+}
+
+GrVkTextureRenderTarget::GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView,
+ GrMipMapsStatus mipMapsStatus,
+ GrBackendObjectOwnership ownership,
+ GrWrapCacheable cacheable)
+ : GrSurface(gpu, {desc.fWidth, desc.fHeight}, desc.fConfig, info.fProtected)
+ , GrVkImage(info, layout, ownership)
+ , GrVkTexture(gpu, desc, info, layout, texView, mipMapsStatus, ownership)
+ , GrVkRenderTarget(gpu, desc, info, layout, colorAttachmentView, ownership) {
+ this->registerWithCacheWrapped(cacheable);
+}
+
+namespace {
+struct Views {
+ const GrVkImageView* imageView = nullptr;
+ const GrVkImageView* colorAttachmentView = nullptr;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ GrVkImageInfo msInfo;
+ sk_sp<GrVkImageLayout> msLayout;
+};
+} // anonymous namespace
+
+static Views create_views(GrVkGpu* gpu, const GrSurfaceDesc& desc, int sampleCnt,
+ const GrVkImageInfo& info) {
+ VkImage image = info.fImage;
+ // Create the texture ImageView
+ Views views;
+ views.imageView = GrVkImageView::Create(gpu, image, info.fFormat, GrVkImageView::kColor_Type,
+ info.fLevelCount, info.fYcbcrConversionInfo);
+ if (!views.imageView) {
+ return {};
+ }
+
+ VkFormat pixelFormat = info.fFormat;
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ if (sampleCnt > 1) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = sampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &views.msInfo)) {
+ views.imageView->unref(gpu);
+ return {};
+ }
+
+ // Set color attachment image
+ colorImage = views.msInfo.fImage;
+
+ // Create resolve attachment view.
+ views.resolveAttachmentView =
+ GrVkImageView::Create(gpu, image, pixelFormat, GrVkImageView::kColor_Type,
+ info.fLevelCount, GrVkYcbcrConversionInfo());
+ if (!views.resolveAttachmentView) {
+ GrVkImage::DestroyImageInfo(gpu, &views.msInfo);
+ views.imageView->unref(gpu);
+ return {};
+ }
+ views.msLayout.reset(new GrVkImageLayout(views.msInfo.fImageLayout));
+ } else {
+ // Set color attachment image
+ colorImage = info.fImage;
+ }
+
+ views.colorAttachmentView = GrVkImageView::Create(
+ gpu, colorImage, pixelFormat, GrVkImageView::kColor_Type, 1, GrVkYcbcrConversionInfo());
+ if (!views.colorAttachmentView) {
+ if (sampleCnt > 1) {
+ views.resolveAttachmentView->unref(gpu);
+ GrVkImage::DestroyImageInfo(gpu, &views.msInfo);
+ }
+ views.imageView->unref(gpu);
+ return {};
+ }
+ return views;
+}
+
+sk_sp<GrVkTextureRenderTarget> GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
+ GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImage::ImageDesc& imageDesc,
+ GrMipMapsStatus mipMapsStatus) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+ sk_sp<GrVkImageLayout> layout(new GrVkImageLayout(info.fImageLayout));
+
+ Views views = create_views(gpu, desc, sampleCnt, info);
+ if (!views.colorAttachmentView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return nullptr;
+ }
+ if (sampleCnt > 1) {
+ return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
+ gpu, budgeted, desc, sampleCnt, info, std::move(layout), views.imageView,
+ views.msInfo, std::move(views.msLayout), views.colorAttachmentView,
+ views.resolveAttachmentView, mipMapsStatus));
+ } else {
+ return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
+ gpu, budgeted, desc, info, std::move(layout), views.imageView,
+ views.colorAttachmentView, mipMapsStatus));
+ }
+}
+
+sk_sp<GrVkTextureRenderTarget> GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(
+ GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ GrWrapOwnership wrapOwnership,
+ GrWrapCacheable cacheable,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout) {
+ // Adopted textures require both image and allocation because we're responsible for freeing
+ SkASSERT(VK_NULL_HANDLE != info.fImage &&
+ (kBorrow_GrWrapOwnership == wrapOwnership || VK_NULL_HANDLE != info.fAlloc.fMemory));
+
+ GrMipMapsStatus mipMapsStatus = info.fLevelCount > 1 ? GrMipMapsStatus::kDirty
+ : GrMipMapsStatus::kNotAllocated;
+
+ GrBackendObjectOwnership ownership = kBorrow_GrWrapOwnership == wrapOwnership
+ ? GrBackendObjectOwnership::kBorrowed : GrBackendObjectOwnership::kOwned;
+ Views views = create_views(gpu, desc, sampleCnt, info);
+ if (!views.colorAttachmentView) {
+ return nullptr;
+ }
+ if (sampleCnt > 1) {
+ return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
+ gpu, desc, sampleCnt, info, std::move(layout), views.imageView, views.msInfo,
+ std::move(views.msLayout), views.colorAttachmentView, views.resolveAttachmentView,
+ mipMapsStatus, ownership, cacheable));
+ } else {
+ return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
+ gpu, desc, info, std::move(layout), views.imageView, views.colorAttachmentView,
+ mipMapsStatus, ownership, cacheable));
+ }
+}
+
+size_t GrVkTextureRenderTarget::onGpuMemorySize() const {
+ int numColorSamples = this->numSamples();
+ if (numColorSamples > 1) {
+ // Add one to account for the resolve VkImage.
+ ++numColorSamples;
+ }
+ const GrCaps& caps = *this->getGpu()->caps();
+ return GrSurface::ComputeSize(caps, this->backendFormat(), this->width(), this->height(),
+ numColorSamples, // TODO: this still correct?
+ this->texturePriv().mipMapped());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h
new file mode 100644
index 0000000000..077143195e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkTextureRenderTarget_DEFINED
+#define GrVkTextureRenderTarget_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkRenderTarget.h"
+#include "src/gpu/vk/GrVkTexture.h"
+
+class GrVkGpu;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkImageView;
+struct GrVkImageInfo;
+
+class GrVkTextureRenderTarget: public GrVkTexture, public GrVkRenderTarget {
+public:
+ static sk_sp<GrVkTextureRenderTarget> MakeNewTextureRenderTarget(GrVkGpu*, SkBudgeted,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ const GrVkImage::ImageDesc&,
+ GrMipMapsStatus);
+
+ static sk_sp<GrVkTextureRenderTarget> MakeWrappedTextureRenderTarget(GrVkGpu*,
+ const GrSurfaceDesc&,
+ int sampleCnt,
+ GrWrapOwnership,
+ GrWrapCacheable,
+ const GrVkImageInfo&,
+ sk_sp<GrVkImageLayout>);
+
+ GrBackendFormat backendFormat() const override { return this->getBackendFormat(); }
+
+protected:
+ void onAbandon() override {
+ // In order to correctly handle calling texture idle procs, GrVkTexture must go first.
+ GrVkTexture::onAbandon();
+ GrVkRenderTarget::onAbandon();
+ }
+
+ void onRelease() override {
+ // In order to correctly handle calling texture idle procs, GrVkTexture must go first.
+ GrVkTexture::onRelease();
+ GrVkRenderTarget::onRelease();
+ }
+
+private:
+ // MSAA, not-wrapped
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrMipMapsStatus);
+
+ // non-MSAA, not-wrapped
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView,
+ GrMipMapsStatus);
+
+ // MSAA, wrapped
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ int sampleCnt,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ sk_sp<GrVkImageLayout> msaaLayout,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrMipMapsStatus,
+ GrBackendObjectOwnership,
+ GrWrapCacheable);
+
+ // non-MSAA, wrapped
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ sk_sp<GrVkImageLayout> layout,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView,
+ GrMipMapsStatus,
+ GrBackendObjectOwnership,
+ GrWrapCacheable);
+
+ // GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override;
+
+ // In Vulkan we call the release proc after we are finished with the underlying
+ // GrVkImage::Resource object (which occurs after the GPU has finished all work on it).
+ void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {
+ // Forward the release proc on to GrVkImage
+ this->setResourceRelease(std::move(releaseHelper));
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp
new file mode 100644
index 0000000000..38b00e11a7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp
@@ -0,0 +1,61 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkTransferBuffer.h"
+
+sk_sp<GrVkTransferBuffer> GrVkTransferBuffer::Make(GrVkGpu* gpu, size_t size,
+ GrVkBuffer::Type type) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = true;
+ SkASSERT(GrVkBuffer::kCopyRead_Type == type || GrVkBuffer::kCopyWrite_Type == type);
+ desc.fType = type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkTransferBuffer* buffer = new GrVkTransferBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return sk_sp<GrVkTransferBuffer>(buffer);
+}
+
+GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes,
+ kCopyRead_Type == desc.fType ? GrGpuBufferType::kXferCpuToGpu
+ : GrGpuBufferType::kXferGpuToCpu,
+ kStream_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+void GrVkTransferBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTransferBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU64((uint64_t)this->buffer());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "vk_buffer",
+ buffer_id.c_str());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h
new file mode 100644
index 0000000000..210cf228d9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkTransferBuffer_DEFINED
+#define GrVkTransferBuffer_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/vk/GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkTransferBuffer : public GrGpuBuffer, public GrVkBuffer {
+public:
+ static sk_sp<GrVkTransferBuffer> Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+ void onMap() override { this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu()); }
+
+ void onUnmap() override { this->vkUnmap(this->getVkGpu()); }
+
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override {
+ SK_ABORT("Not implemented for transfer buffers.");
+ }
+
+ GrVkGpu* getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return reinterpret_cast<GrVkGpu*>(this->getGpu());
+ }
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTypesPriv.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTypesPriv.cpp
new file mode 100644
index 0000000000..6f5d0b27f2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTypesPriv.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/GrVkTypesPriv.h"
+
+#include "src/gpu/vk/GrVkImageLayout.h"
+
+void GrVkBackendSurfaceInfo::cleanup() {
+ SkSafeUnref(fLayout);
+ fLayout = nullptr;
+};
+
+void GrVkBackendSurfaceInfo::assign(const GrVkBackendSurfaceInfo& that, bool isThisValid) {
+ fImageInfo = that.fImageInfo;
+ GrVkImageLayout* oldLayout = fLayout;
+ fLayout = SkSafeRef(that.fLayout);
+ if (isThisValid) {
+ SkSafeUnref(oldLayout);
+ }
+}
+
+void GrVkBackendSurfaceInfo::setImageLayout(VkImageLayout layout) {
+ SkASSERT(fLayout);
+ fLayout->setImageLayout(layout);
+}
+
+sk_sp<GrVkImageLayout> GrVkBackendSurfaceInfo::getGrVkImageLayout() const {
+ SkASSERT(fLayout);
+ return sk_ref_sp(fLayout);
+}
+
+GrVkImageInfo GrVkBackendSurfaceInfo::snapImageInfo() const {
+ return GrVkImageInfo(fImageInfo, fLayout->getImageLayout());
+}
+
+#if GR_TEST_UTILS
+bool GrVkBackendSurfaceInfo::operator==(const GrVkBackendSurfaceInfo& that) const {
+ GrVkImageInfo cpyInfoThis = fImageInfo;
+ GrVkImageInfo cpyInfoThat = that.fImageInfo;
+ // We don't care about the fImageLayout here since we require they use the same
+ // GrVkImageLayout.
+ cpyInfoThis.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ cpyInfoThat.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ return cpyInfoThis == cpyInfoThat && fLayout == that.fLayout;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp
new file mode 100644
index 0000000000..d5a4f9d86b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp
@@ -0,0 +1,103 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkUniformBuffer.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size) {
+ if (0 == size) {
+ return nullptr;
+ }
+ const GrVkResource* resource = nullptr;
+ if (size <= GrVkUniformBuffer::kStandardSize) {
+ resource = gpu->resourceProvider().findOrCreateStandardUniformBufferResource();
+ } else {
+ resource = CreateResource(gpu, size);
+ }
+ if (!resource) {
+ return nullptr;
+ }
+
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = true;
+ desc.fType = GrVkBuffer::kUniform_Type;
+ desc.fSizeInBytes = size;
+ GrVkUniformBuffer* buffer = new GrVkUniformBuffer(gpu, desc,
+ (const GrVkUniformBuffer::Resource*) resource);
+ if (!buffer) {
+ // this will destroy anything we got from the resource provider,
+ // but this avoids a conditional
+ resource->unref(gpu);
+ }
+ return buffer;
+}
+
+// We implement our own creation function for special buffer resource type
+const GrVkResource* GrVkUniformBuffer::CreateResource(GrVkGpu* gpu, size_t size) {
+ if (0 == size) {
+ return nullptr;
+ }
+
+ VkBuffer buffer;
+ GrVkAlloc alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = size;
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ kUniform_Type,
+ true, // dynamic
+ &alloc)) {
+ return nullptr;
+ }
+
+ const GrVkResource* resource = new GrVkUniformBuffer::Resource(buffer, alloc);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, kUniform_Type, alloc);
+ return nullptr;
+ }
+
+ return resource;
+}
+
+const GrVkBuffer::Resource* GrVkUniformBuffer::createResource(GrVkGpu* gpu,
+ const GrVkBuffer::Desc& descriptor) {
+ const GrVkResource* vkResource;
+ if (descriptor.fSizeInBytes <= GrVkUniformBuffer::kStandardSize) {
+ GrVkResourceProvider& provider = gpu->resourceProvider();
+ vkResource = provider.findOrCreateStandardUniformBufferResource();
+ } else {
+ vkResource = CreateResource(gpu, descriptor.fSizeInBytes);
+ }
+ return (const GrVkBuffer::Resource*) vkResource;
+}
+
+void GrVkUniformBuffer::Resource::onRecycle(GrVkGpu* gpu) const {
+ if (fAlloc.fSize <= GrVkUniformBuffer::kStandardSize) {
+ gpu->resourceProvider().recycleStandardUniformBufferResource(this);
+ } else {
+ this->unref(gpu);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h
new file mode 100644
index 0000000000..9a3ad3cdae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h
@@ -0,0 +1,59 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformBuffer_DEFINED
+#define GrVkUniformBuffer_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/vk/GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkUniformBuffer : public GrVkBuffer {
+
+public:
+ static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size);
+ static const GrVkResource* CreateResource(GrVkGpu* gpu, size_t size);
+ static const size_t kStandardSize = 256;
+
+ void* map(GrVkGpu* gpu) {
+ return this->vkMap(gpu);
+ }
+ void unmap(GrVkGpu* gpu) {
+ this->vkUnmap(gpu);
+ }
+ // The output variable createdNewBuffer must be set to true if a new VkBuffer is created in
+ // order to upload the data
+ bool updateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
+ return this->vkUpdateData(gpu, src, srcSizeInBytes, createdNewBuffer);
+ }
+ void release(const GrVkGpu* gpu) { this->vkRelease(gpu); }
+ void abandon() { this->vkAbandon(); }
+
+private:
+ class Resource : public GrVkBuffer::Resource {
+ public:
+ Resource(VkBuffer buf, const GrVkAlloc& alloc)
+ : INHERITED(buf, alloc, kUniform_Type) {}
+
+ void onRecycle(GrVkGpu* gpu) const override;
+
+ typedef GrVkBuffer::Resource INHERITED;
+ };
+
+ const GrVkBuffer::Resource* createResource(GrVkGpu* gpu,
+ const GrVkBuffer::Desc& descriptor) override;
+
+ GrVkUniformBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkUniformBuffer::Resource* resource)
+ : INHERITED(desc, resource) {}
+
+ typedef GrVkBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp
new file mode 100644
index 0000000000..6df62ce6a9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp
@@ -0,0 +1,341 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/gpu/vk/GrVkUniformHandler.h"
+
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkPipelineStateBuilder.h"
+#include "src/gpu/vk/GrVkTexture.h"
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+// This alignment mask will give correct alignments for using the std430 block layout. If you want
+// the std140 alignment, you can use this, but then make sure if you have an array type it is
+// aligned to 16 bytes (i.e. has mask of 0xF).
+// These are designated in the Vulkan spec, section 14.5.4 "Offset and Stride Assignment".
+// https://www.khronos.org/registry/vulkan/specs/1.0-wsi_extensions/html/vkspec.html#interfaces-resources-layout
+static uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType: // fall through
+ case kUByte_GrSLType:
+ return 0x0;
+ case kByte2_GrSLType: // fall through
+ case kUByte2_GrSLType:
+ return 0x1;
+ case kByte3_GrSLType: // fall through
+ case kByte4_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ return 0x3;
+ case kShort_GrSLType: // fall through
+ case kUShort_GrSLType:
+ return 0x1;
+ case kShort2_GrSLType: // fall through
+ case kUShort2_GrSLType:
+ return 0x3;
+ case kShort3_GrSLType: // fall through
+ case kShort4_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ return 0x7;
+ case kInt_GrSLType:
+ case kUint_GrSLType:
+ return 0x3;
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return 0x3;
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 0x7;
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 0xF;
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 0xF;
+ case kUint2_GrSLType:
+ return 0x7;
+ case kInt2_GrSLType:
+ return 0x7;
+ case kInt3_GrSLType:
+ return 0xF;
+ case kInt4_GrSLType:
+ return 0xF;
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ return 0x7;
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 0xF;
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 0xF;
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+/** Returns the size in bytes taken up in vulkanbuffers for GrSLTypes. */
+static inline uint32_t grsltype_to_vk_size(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType:
+ return sizeof(int8_t);
+ case kByte2_GrSLType:
+ return 2 * sizeof(int8_t);
+ case kByte3_GrSLType:
+ return 3 * sizeof(int8_t);
+ case kByte4_GrSLType:
+ return 4 * sizeof(int8_t);
+ case kUByte_GrSLType:
+ return sizeof(uint8_t);
+ case kUByte2_GrSLType:
+ return 2 * sizeof(uint8_t);
+ case kUByte3_GrSLType:
+ return 3 * sizeof(uint8_t);
+ case kUByte4_GrSLType:
+ return 4 * sizeof(uint8_t);
+ case kShort_GrSLType:
+ return sizeof(int16_t);
+ case kShort2_GrSLType:
+ return 2 * sizeof(int16_t);
+ case kShort3_GrSLType:
+ return 3 * sizeof(int16_t);
+ case kShort4_GrSLType:
+ return 4 * sizeof(int16_t);
+ case kUShort_GrSLType:
+ return sizeof(uint16_t);
+ case kUShort2_GrSLType:
+ return 2 * sizeof(uint16_t);
+ case kUShort3_GrSLType:
+ return 3 * sizeof(uint16_t);
+ case kUShort4_GrSLType:
+ return 4 * sizeof(uint16_t);
+ case kInt_GrSLType:
+ return sizeof(int32_t);
+ case kUint_GrSLType:
+ return sizeof(int32_t);
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return sizeof(float);
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 2 * sizeof(float);
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 3 * sizeof(float);
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 4 * sizeof(float);
+ case kUint2_GrSLType:
+ return 2 * sizeof(uint32_t);
+ case kInt2_GrSLType:
+ return 2 * sizeof(int32_t);
+ case kInt3_GrSLType:
+ return 3 * sizeof(int32_t);
+ case kInt4_GrSLType:
+ return 4 * sizeof(int32_t);
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ //TODO: this will be 4 * szof(float) on std430.
+ return 8 * sizeof(float);
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 12 * sizeof(float);
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 16 * sizeof(float);
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ case kSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+static void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ // We want to use the std140 layout here, so we must make arrays align to 16 bytes.
+ if (arrayCount || type == kFloat2x2_GrSLType) {
+ alignmentMask = 0xF;
+ }
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ if (arrayCount) {
+ uint32_t elementSize = SkTMax<uint32_t>(16, grsltype_to_vk_size(type));
+ SkASSERT(0 == (elementSize & 0xF));
+ *currentOffset = *uniformOffset + elementSize * arrayCount;
+ } else {
+ *currentOffset = *uniformOffset + grsltype_to_vk_size(type);
+ }
+}
+
+GrVkUniformHandler::~GrVkUniformHandler() {
+ GrVkGpu* gpu = static_cast<GrVkPipelineStateBuilder*>(fProgramBuilder)->gpu();
+ for (decltype(fSamplers)::Iter iter(&fSamplers); iter.next();) {
+ if (iter->fImmutableSampler) {
+ iter->fImmutableSampler->unref(gpu);
+ iter->fImmutableSampler = nullptr;
+ }
+ }
+}
+
+GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkASSERT(GrSLTypeIsFloatType(type));
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0] || !strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ uni.fVisibility = visibility;
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrShaderVar::kNone_TypeModifier);
+
+ get_ubo_aligned_offset(&uni.fUBOffset, &fCurrentUBOOffset, type, arrayCount);
+
+ SkString layoutQualifier;
+ layoutQualifier.appendf("offset=%d", uni.fUBOffset);
+ uni.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrVkUniformHandler::addSampler(const GrTextureProxy* texture,
+ const GrSamplerState& state,
+ const GrSwizzle& swizzle,
+ const char* name,
+ const GrShaderCaps* shaderCaps) {
+ SkASSERT(name && strlen(name));
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+
+ GrTextureType type = texture->textureType();
+
+ UniformInfo& info = fSamplers.push_back();
+ info.fVariable.setType(GrSLCombinedSamplerTypeForTextureType(type));
+ info.fVariable.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ info.fVariable.setName(mangleName);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("set=%d, binding=%d", kSamplerDescSet, fSamplers.count() - 1);
+ info.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+ info.fVisibility = kFragment_GrShaderFlag;
+ info.fUBOffset = 0;
+
+ // Check if we are dealing with an external texture and store the needed information if so.
+ auto ycbcrInfo = texture->backendFormat().getVkYcbcrConversionInfo();
+ if (ycbcrInfo && ycbcrInfo->isValid()) {
+ GrVkGpu* gpu = static_cast<GrVkPipelineStateBuilder*>(fProgramBuilder)->gpu();
+ info.fImmutableSampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
+ state, *ycbcrInfo);
+ SkASSERT(info.fImmutableSampler);
+ }
+
+ SkASSERT(shaderCaps->textureSwizzleAppliedInShader());
+ fSamplerSwizzles.push_back(swizzle);
+ SkASSERT(fSamplerSwizzles.count() == fSamplers.count());
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ const UniformInfo& sampler = fSamplers[i];
+ SkASSERT(sampler.fVariable.getType() == kTexture2DSampler_GrSLType ||
+ sampler.fVariable.getType() == kTextureExternalSampler_GrSLType);
+ if (visibility == sampler.fVisibility) {
+ sampler.fVariable.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+ }
+
+#ifdef SK_DEBUG
+ bool firstOffsetCheck = false;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (!firstOffsetCheck) {
+ // Check to make sure we are starting our offset at 0 so the offset qualifier we
+ // set on each variable in the uniform block is valid.
+ SkASSERT(0 == localUniform.fUBOffset);
+ firstOffsetCheck = true;
+ }
+ }
+#endif
+
+ SkString uniformsString;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility & localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ localUniform.fVariable.appendDecl(fProgramBuilder->shaderCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ }
+ }
+ }
+
+ if (!uniformsString.isEmpty()) {
+ out->appendf("layout (set=%d, binding=%d) uniform uniformBuffer\n{\n",
+ kUniformBufferDescSet, kUniformBinding);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
+
+uint32_t GrVkUniformHandler::getRTHeightOffset() const {
+ uint32_t result;
+ uint32_t currentOffset = fCurrentUBOOffset;
+ get_ubo_aligned_offset(&result, &currentOffset, kFloat_GrSLType, 0);
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h
new file mode 100644
index 0000000000..76f82f03db
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h
@@ -0,0 +1,120 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformHandler_DEFINED
+#define GrVkUniformHandler_DEFINED
+
+#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrAllocator.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/GrShaderVar.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#include "src/gpu/vk/GrVkSampler.h"
+
+class GrVkUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ enum {
+ /**
+ * Binding a descriptor set invalidates all higher index descriptor sets. We must bind
+ * in the order of this enumeration. Samplers are after Uniforms because GrOps can specify
+ * GP textures as dynamic state, meaning they get rebound for each GrMesh in a draw while
+ * uniforms are bound once before all the draws.
+ */
+ kUniformBufferDescSet = 0,
+ kSamplerDescSet = 1,
+ };
+ enum {
+ kUniformBinding = 0
+ };
+
+ struct UniformInfo {
+ GrShaderVar fVariable;
+ uint32_t fVisibility;
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ uint32_t fUBOffset;
+ // fImmutableSampler is used for sampling an image with a ycbcr conversion.
+ const GrVkSampler* fImmutableSampler = nullptr;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ ~GrVkUniformHandler() override;
+
+ const GrShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+ /**
+ * Returns the offset that the RTHeight synthetic uniform should use if it needs to be created.
+ */
+ uint32_t getRTHeightOffset() const;
+
+private:
+ explicit GrVkUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fSamplers(kUniformsPerBlock)
+ , fCurrentUBOOffset(0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void updateUniformVisibility(UniformHandle u, uint32_t visibility) override {
+ fUniforms[u.toIndex()].fVisibility |= visibility;
+ }
+
+ SamplerHandle addSampler(const GrTextureProxy*,
+ const GrSamplerState&,
+ const GrSwizzle&,
+ const char* name,
+ const GrShaderCaps*) override;
+
+ int numSamplers() const { return fSamplers.count(); }
+ const char* samplerVariable(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()].fVariable.c_str();
+ }
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const override {
+ return fSamplerSwizzles[handle.toIndex()];
+ }
+ uint32_t samplerVisibility(SamplerHandle handle) const {
+ return fSamplers[handle.toIndex()].fVisibility;
+ }
+
+ const GrVkSampler* immutableSampler(UniformHandle u) const {
+ return fSamplers[u.toIndex()].fImmutableSampler;
+ }
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+
+ UniformInfoArray fUniforms;
+ UniformInfoArray fSamplers;
+ SkTArray<GrSwizzle> fSamplerSwizzles;
+
+ uint32_t fCurrentUBOOffset;
+
+ friend class GrVkPipelineStateBuilder;
+ friend class GrVkDescriptorSetManager;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp
new file mode 100644
index 0000000000..a49dcea8ae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkUtil.h"
+
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#ifdef SK_DEBUG
+bool GrVkFormatColorTypePairIsValid(VkFormat format, GrColorType colorType) {
+ switch (format) {
+ case VK_FORMAT_R8G8B8A8_UNORM: return GrColorType::kRGBA_8888 == colorType ||
+ GrColorType::kRGB_888x == colorType;
+ case VK_FORMAT_B8G8R8A8_UNORM: return GrColorType::kBGRA_8888 == colorType;
+ case VK_FORMAT_R8G8B8A8_SRGB: return GrColorType::kRGBA_8888_SRGB == colorType;
+ case VK_FORMAT_R8G8B8_UNORM: return GrColorType::kRGB_888x == colorType;
+ case VK_FORMAT_R8G8_UNORM: return GrColorType::kRG_88 == colorType;
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return GrColorType::kRGBA_1010102 == colorType;
+ case VK_FORMAT_R5G6B5_UNORM_PACK16: return GrColorType::kBGR_565 == colorType;
+ // R4G4B4A4 is not required to be supported so we actually
+ // store RGBA_4444 data as B4G4R4A4.
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return GrColorType::kABGR_4444 == colorType;
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return GrColorType::kABGR_4444 == colorType;
+ case VK_FORMAT_R8_UNORM: return GrColorType::kAlpha_8 == colorType ||
+ GrColorType::kGray_8 == colorType;
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return GrColorType::kRGBA_F16 == colorType ||
+ GrColorType::kRGBA_F16_Clamped == colorType;
+ case VK_FORMAT_R16_SFLOAT: return GrColorType::kAlpha_F16 == colorType;
+ case VK_FORMAT_R16_UNORM: return GrColorType::kAlpha_16 == colorType;
+ case VK_FORMAT_R16G16_UNORM: return GrColorType::kRG_1616 == colorType;
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: return GrColorType::kRGB_888x == colorType;
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: return GrColorType::kRGB_888x == colorType;
+ case VK_FORMAT_R16G16B16A16_UNORM: return GrColorType::kRGBA_16161616 == colorType;
+ case VK_FORMAT_R16G16_SFLOAT: return GrColorType::kRG_F16 == colorType;
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return GrColorType::kRGB_888x == colorType;
+ default: return false;
+ }
+
+ SkUNREACHABLE;
+}
+#endif
+
+bool GrVkFormatIsSupported(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ case VK_FORMAT_R8G8B8_UNORM:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+ case VK_FORMAT_R8_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ case VK_FORMAT_R16_SFLOAT:
+ case VK_FORMAT_R16_UNORM:
+ case VK_FORMAT_R16G16_UNORM:
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ case VK_FORMAT_R16G16_SFLOAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GrVkFormatNeedsYcbcrSampler(VkFormat format) {
+ return format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM ||
+ format == VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
+}
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
+ SkASSERT(samples >= 1);
+ switch (samples) {
+ case 1:
+ *vkSamples = VK_SAMPLE_COUNT_1_BIT;
+ return true;
+ case 2:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 4:
+ *vkSamples = VK_SAMPLE_COUNT_4_BIT;
+ return true;
+ case 8:
+ *vkSamples = VK_SAMPLE_COUNT_8_BIT;
+ return true;
+ case 16:
+ *vkSamples = VK_SAMPLE_COUNT_16_BIT;
+ return true;
+ case 32:
+ *vkSamples = VK_SAMPLE_COUNT_32_BIT;
+ return true;
+ case 64:
+ *vkSamples = VK_SAMPLE_COUNT_64_BIT;
+ return true;
+ default:
+ return false;
+ }
+}
+
+SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
+ if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
+ return SkSL::Program::kVertex_Kind;
+ }
+ if (VK_SHADER_STAGE_GEOMETRY_BIT == stage) {
+ return SkSL::Program::kGeometry_Kind;
+ }
+ SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
+ return SkSL::Program::kFragment_Kind;
+}
+
+bool GrCompileVkShaderModule(const GrVkGpu* gpu,
+ const SkSL::String& shaderString,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ const SkSL::Program::Settings& settings,
+ SkSL::String* outSPIRV,
+ SkSL::Program::Inputs* outInputs) {
+ auto errorHandler = gpu->getContext()->priv().getShaderErrorHandler();
+ std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
+ vk_shader_stage_to_skiasl_kind(stage), shaderString, settings);
+ if (!program) {
+ errorHandler->compileError(shaderString.c_str(),
+ gpu->shaderCompiler()->errorText().c_str());
+ return false;
+ }
+ *outInputs = program->fInputs;
+ if (!gpu->shaderCompiler()->toSPIRV(*program, outSPIRV)) {
+ errorHandler->compileError(shaderString.c_str(),
+ gpu->shaderCompiler()->errorText().c_str());
+ return false;
+ }
+
+ return GrInstallVkShaderModule(gpu, *outSPIRV, stage, shaderModule, stageInfo);
+}
+
+bool GrInstallVkShaderModule(const GrVkGpu* gpu,
+ const SkSL::String& spirv,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo) {
+ VkShaderModuleCreateInfo moduleCreateInfo;
+ memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
+ moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ moduleCreateInfo.pNext = nullptr;
+ moduleCreateInfo.flags = 0;
+ moduleCreateInfo.codeSize = spirv.size();
+ moduleCreateInfo.pCode = (const uint32_t*)spirv.c_str();
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
+ &moduleCreateInfo,
+ nullptr,
+ shaderModule));
+ if (err) {
+ return false;
+ }
+
+ memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
+ stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stageInfo->pNext = nullptr;
+ stageInfo->flags = 0;
+ stageInfo->stage = stage;
+ stageInfo->module = *shaderModule;
+ stageInfo->pName = "main";
+ stageInfo->pSpecializationInfo = nullptr;
+
+ return true;
+}
+
+bool GrVkFormatIsCompressed(VkFormat vkFormat) {
+ switch (vkFormat) {
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GrVkFormatToCompressionType(VkFormat vkFormat, SkImage::CompressionType* compressionType) {
+ switch (vkFormat) {
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ *compressionType = SkImage::kETC1_CompressionType;
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUtil.h b/gfx/skia/skia/src/gpu/vk/GrVkUtil.h
new file mode 100644
index 0000000000..b34dfbca0c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUtil.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkUtil_DEFINED
+#define GrVkUtil_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/SkMacros.h"
+#include "src/gpu/GrColor.h"
+#include "src/gpu/GrDataUtils.h"
+#include "src/gpu/vk/GrVkInterface.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+class GrVkGpu;
+
+// makes a Vk call on the interface
+#define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X
+// same as GR_VK_CALL but checks for success
+#ifdef SK_DEBUG
+#define GR_VK_CALL_ERRCHECK(IFACE, X) \
+ VkResult SK_MACRO_APPEND_LINE(ret) = GR_VK_CALL(IFACE, X); \
+ SkASSERT(VK_SUCCESS == SK_MACRO_APPEND_LINE(ret))
+#else
+#define GR_VK_CALL_ERRCHECK(IFACE, X) (void) GR_VK_CALL(IFACE, X)
+#endif
+
+bool GrVkFormatIsSupported(VkFormat);
+
+bool GrVkFormatNeedsYcbcrSampler(VkFormat format);
+
+#ifdef SK_DEBUG
+/**
+ * Returns true if the passed in VkFormat and GrColorType are compatible with each other.
+ */
+bool GrVkFormatColorTypePairIsValid(VkFormat, GrColorType);
+#endif
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
+
+bool GrCompileVkShaderModule(const GrVkGpu* gpu,
+ const SkSL::String& shaderString,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo,
+ const SkSL::Program::Settings& settings,
+ SkSL::String* outSPIRV,
+ SkSL::Program::Inputs* outInputs);
+
+bool GrInstallVkShaderModule(const GrVkGpu* gpu,
+ const SkSL::String& spirv,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo);
+
+/**
+ * Returns true if the format is compressed.
+ */
+bool GrVkFormatIsCompressed(VkFormat);
+
+/**
+ * Maps a vk format into the CompressionType enum if applicable.
+ */
+bool GrVkFormatToCompressionType(VkFormat vkFormat, SkImage::CompressionType* compressionType);
+
+#if GR_TEST_UTILS
+static constexpr const char* GrVkFormatToStr(VkFormat vkFormat) {
+ switch (vkFormat) {
+ case VK_FORMAT_R8G8B8A8_UNORM: return "R8G8B8A8_UNORM";
+ case VK_FORMAT_R8_UNORM: return "R8_UNORM";
+ case VK_FORMAT_B8G8R8A8_UNORM: return "B8G8R8A8_UNORM";
+ case VK_FORMAT_R5G6B5_UNORM_PACK16: return "R5G6B5_UNORM_PACK16";
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return "R16G16B16A16_SFLOAT";
+ case VK_FORMAT_R16_SFLOAT: return "R16_SFLOAT";
+ case VK_FORMAT_R8G8B8_UNORM: return "R8G8B8_UNORM";
+ case VK_FORMAT_R8G8_UNORM: return "R8G8_UNORM";
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return "A2B10G10R10_UNORM_PACK32";
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return "B4G4R4A4_UNORM_PACK16";
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return "R4G4B4A4_UNORM_PACK16";
+ case VK_FORMAT_R32G32B32A32_SFLOAT: return "R32G32B32A32_SFLOAT";
+ case VK_FORMAT_R8G8B8A8_SRGB: return "R8G8B8A8_SRGB";
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return "ETC2_R8G8B8_UNORM_BLOCK";
+ case VK_FORMAT_R16_UNORM: return "R16_UNORM";
+ case VK_FORMAT_R16G16_UNORM: return "R16G16_UNORM";
+ case VK_FORMAT_R16G16B16A16_UNORM: return "R16G16B16A16_UNORM";
+ case VK_FORMAT_R16G16_SFLOAT: return "R16G16_SFLOAT";
+
+ default: return "Unknown";
+ }
+}
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp
new file mode 100644
index 0000000000..5fe2f45373
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkVaryingHandler.h"
+
+/** Returns the number of locations take up by a given GrSLType. We assume that all
+ scalar values are 32 bits. */
+static inline int grsltype_to_location_size(GrSLType type) {
+ switch(type) {
+ case kVoid_GrSLType:
+ return 0;
+ case kFloat_GrSLType: // fall through
+ case kHalf_GrSLType:
+ return 1;
+ case kFloat2_GrSLType: // fall through
+ case kHalf2_GrSLType:
+ return 1;
+ case kFloat3_GrSLType:
+ case kHalf3_GrSLType:
+ return 1;
+ case kFloat4_GrSLType:
+ case kHalf4_GrSLType:
+ return 1;
+ case kUint2_GrSLType:
+ return 1;
+ case kInt2_GrSLType:
+ case kShort2_GrSLType:
+ case kUShort2_GrSLType:
+ case kByte2_GrSLType:
+ case kUByte2_GrSLType:
+ return 1;
+ case kInt3_GrSLType:
+ case kShort3_GrSLType:
+ case kUShort3_GrSLType:
+ case kByte3_GrSLType:
+ case kUByte3_GrSLType:
+ return 1;
+ case kInt4_GrSLType:
+ case kShort4_GrSLType:
+ case kUShort4_GrSLType:
+ case kByte4_GrSLType:
+ case kUByte4_GrSLType:
+ return 1;
+ case kFloat2x2_GrSLType:
+ case kHalf2x2_GrSLType:
+ return 2;
+ case kFloat3x3_GrSLType:
+ case kHalf3x3_GrSLType:
+ return 3;
+ case kFloat4x4_GrSLType:
+ case kHalf4x4_GrSLType:
+ return 4;
+ case kTexture2DSampler_GrSLType:
+ case kSampler_GrSLType:
+ case kTexture2D_GrSLType:
+ return 0;
+ case kTextureExternalSampler_GrSLType:
+ return 0;
+ case kTexture2DRectSampler_GrSLType:
+ return 0;
+ case kBool_GrSLType:
+ return 1;
+ case kInt_GrSLType: // fall through
+ case kShort_GrSLType:
+ case kByte_GrSLType:
+ return 1;
+ case kUint_GrSLType: // fall through
+ case kUShort_GrSLType:
+ case kUByte_GrSLType:
+ return 1;
+ }
+ SK_ABORT("Unexpected type");
+}
+
+static void finalize_helper(GrVkVaryingHandler::VarArray& vars) {
+ int locationIndex = 0;
+ for (int i = 0; i < vars.count(); ++i) {
+ GrShaderVar& var = vars[i];
+ SkString location;
+ location.appendf("location = %d", locationIndex);
+ var.addLayoutQualifier(location.c_str());
+
+ int elementSize = grsltype_to_location_size(var.getType());
+ SkASSERT(elementSize > 0);
+ int numElements = 1;
+ if (var.isArray() && !var.isUnsizedArray()) {
+ numElements = var.getArrayCount();
+ }
+ SkASSERT(numElements > 0);
+ locationIndex += elementSize * numElements;
+ }
+ // Vulkan requires at least 64 locations to be supported for both vertex output and fragment
+ // input. If we ever hit this assert, then we'll need to add a cap to actually check the
+ // supported input and output values and adjust our supported shaders based on those values.
+ SkASSERT(locationIndex <= 64);
+}
+
+void GrVkVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h
new file mode 100644
index 0000000000..523ab6a021
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkVaryingHandler_DEFINED
+#define GrVkVaryingHandler_DEFINED
+
+#include "src/gpu/glsl/GrGLSLVarying.h"
+
+class GrVkVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrVkVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrVkPipelineStateBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp
new file mode 100644
index 0000000000..50cfc347f4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/vk/GrVkGpu.h"
+#include "src/gpu/vk/GrVkVertexBuffer.h"
+
+GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, GrGpuBufferType::kVertex,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+sk_sp<GrVkVertexBuffer> GrVkVertexBuffer::Make(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = gpu->protectedContext() ? true : dynamic;
+ desc.fType = GrVkBuffer::kVertex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkVertexBuffer* buffer = new GrVkVertexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return sk_sp<GrVkVertexBuffer>(buffer);
+}
+
+void GrVkVertexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkVertexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkVertexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
+ }
+}
+
+void GrVkVertexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkVertexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h
new file mode 100644
index 0000000000..8ddc460568
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkVertexBuffer_DEFINED
+#define GrVkVertexBuffer_DEFINED
+
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/vk/GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkVertexBuffer : public GrGpuBuffer, public GrVkBuffer {
+public:
+ static sk_sp<GrVkVertexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrGpuBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage.cpp b/gfx/skia/skia/src/image/SkImage.cpp
new file mode 100644
index 0000000000..30179c7e7f
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage.cpp
@@ -0,0 +1,535 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurface.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkNextID.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkReadPixelsRec.h"
+#include "src/shaders/SkImageShader.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "src/image/SkImage_Gpu.h"
+#endif
+#include "include/gpu/GrBackendSurface.h"
+
+SkImage::SkImage(const SkImageInfo& info, uint32_t uniqueID)
+ : fInfo(info)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID) {
+ SkASSERT(info.width() > 0);
+ SkASSERT(info.height() > 0);
+}
+
+bool SkImage::peekPixels(SkPixmap* pm) const {
+ SkPixmap tmp;
+ if (!pm) {
+ pm = &tmp;
+ }
+ return as_IB(this)->onPeekPixels(pm);
+}
+
+bool SkImage::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, int srcX,
+ int srcY, CachingHint chint) const {
+ return as_IB(this)->onReadPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY, chint);
+}
+
+bool SkImage::scalePixels(const SkPixmap& dst, SkFilterQuality quality, CachingHint chint) const {
+ if (this->width() == dst.width() && this->height() == dst.height()) {
+ return this->readPixels(dst, 0, 0, chint);
+ }
+
+ // Idea: If/when SkImageGenerator supports a native-scaling API (where the generator itself
+ // can scale more efficiently) we should take advantage of it here.
+ //
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(&bm, chint)) {
+ SkPixmap pmap;
+ // Note: By calling the pixmap scaler, we never cache the final result, so the chint
+ // is (currently) only being applied to the getROPixels. If we get a request to
+ // also attempt to cache the final (scaled) result, we would add that logic here.
+ //
+ return bm.peekPixels(&pmap) && pmap.scalePixels(dst, quality);
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColorType SkImage::colorType() const { return fInfo.colorType(); }
+
+SkAlphaType SkImage::alphaType() const { return fInfo.alphaType(); }
+
+SkColorSpace* SkImage::colorSpace() const { return fInfo.colorSpace(); }
+
+sk_sp<SkColorSpace> SkImage::refColorSpace() const { return fInfo.refColorSpace(); }
+
+sk_sp<SkShader> SkImage::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)), tmx, tmy, localMatrix);
+}
+
+sk_sp<SkData> SkImage::encodeToData(SkEncodedImageFormat type, int quality) const {
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(&bm)) {
+ return SkEncodeBitmap(bm, type, quality);
+ }
+ return nullptr;
+}
+
+sk_sp<SkData> SkImage::encodeToData() const {
+ if (auto encoded = this->refEncodedData()) {
+ return encoded;
+ }
+
+ return this->encodeToData(SkEncodedImageFormat::kPNG, 100);
+}
+
+sk_sp<SkData> SkImage::refEncodedData() const {
+ return sk_sp<SkData>(as_IB(this)->onRefEncoded());
+}
+
+sk_sp<SkImage> SkImage::MakeFromEncoded(sk_sp<SkData> encoded, const SkIRect* subset) {
+ if (nullptr == encoded || 0 == encoded->size()) {
+ return nullptr;
+ }
+ return SkImage::MakeFromGenerator(SkImageGenerator::MakeFromEncoded(std::move(encoded)),
+ subset);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::makeSubset(const SkIRect& subset) const {
+ if (subset.isEmpty()) {
+ return nullptr;
+ }
+
+ const SkIRect bounds = SkIRect::MakeWH(this->width(), this->height());
+ if (!bounds.contains(subset)) {
+ return nullptr;
+ }
+
+ // optimization : return self if the subset == our bounds
+ if (bounds == subset) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ // CONTEXT TODO: propagate the context parameter to the top-level API
+#if SK_SUPPORT_GPU
+ return as_IB(this)->onMakeSubset(as_IB(this)->context(), subset);
+#else
+ return as_IB(this)->onMakeSubset(nullptr, subset);
+#endif
+}
+
+#if SK_SUPPORT_GPU
+
+GrTexture* SkImage::getTexture() const {
+ return as_IB(this)->onGetTexture();
+}
+
+bool SkImage::isTextureBacked() const { return as_IB(this)->onIsTextureBacked(); }
+
+GrBackendTexture SkImage::getBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ return as_IB(this)->onGetBackendTexture(flushPendingGrContextIO, origin);
+}
+
+bool SkImage::isValid(GrContext* context) const {
+ if (context && context->abandoned()) {
+ return false;
+ }
+ return as_IB(this)->onIsValid(context);
+}
+
+GrSemaphoresSubmitted SkImage::flush(GrContext* context, const GrFlushInfo& flushInfo) {
+ return as_IB(this)->onFlush(context, flushInfo);
+}
+
+void SkImage::flush(GrContext* context) { as_IB(this)->onFlush(context, {}); }
+
+#else
+
+GrTexture* SkImage::getTexture() const { return nullptr; }
+
+bool SkImage::isTextureBacked() const { return false; }
+
+GrBackendTexture SkImage::getBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ return GrBackendTexture(); // invalid
+}
+
+bool SkImage::isValid(GrContext* context) const {
+ if (context) {
+ return false;
+ }
+ return as_IB(this)->onIsValid(context);
+}
+
+GrSemaphoresSubmitted SkImage::flush(GrContext*, const GrFlushInfo&) {
+ return GrSemaphoresSubmitted::kNo;
+}
+
+void SkImage::flush(GrContext*) {}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkImage_Base::SkImage_Base(const SkImageInfo& info, uint32_t uniqueID)
+ : INHERITED(info, uniqueID), fAddedToRasterCache(false) {}
+
+SkImage_Base::~SkImage_Base() {
+ if (fAddedToRasterCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+}
+
+GrBackendTexture SkImage_Base::onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ return GrBackendTexture(); // invalid
+}
+
+bool SkImage::readPixels(const SkPixmap& pmap, int srcX, int srcY, CachingHint chint) const {
+ return this->readPixels(pmap.info(), pmap.writable_addr(), pmap.rowBytes(), srcX, srcY, chint);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeFromBitmap(const SkBitmap& bm) {
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ return SkMakeImageFromRasterBitmap(bm, kIfMutable_SkCopyPixelsMode);
+}
+
+bool SkImage::asLegacyBitmap(SkBitmap* bitmap, LegacyBitmapMode ) const {
+ return as_IB(this)->onAsLegacyBitmap(bitmap);
+}
+
+sk_sp<SkCachedData> SkImage_Base::getPlanes(SkYUVASizeInfo*, SkYUVAIndex[4],
+ SkYUVColorSpace*, const void*[4]) {
+ return nullptr;
+}
+
+bool SkImage_Base::onAsLegacyBitmap(SkBitmap* bitmap) const {
+ // As the base-class, all we can do is make a copy (regardless of mode).
+ // Subclasses that want to be more optimal should override.
+ SkImageInfo info = fInfo.makeColorType(kN32_SkColorType).makeColorSpace(nullptr);
+ if (!bitmap->tryAllocPixels(info)) {
+ return false;
+ }
+ if (!this->readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(), 0, 0)) {
+ bitmap->reset();
+ return false;
+ }
+
+ bitmap->setImmutable();
+ return true;
+}
+
+sk_sp<SkImage> SkImage::MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace) {
+ return MakeFromGenerator(SkImageGenerator::MakeFromPicture(dimensions, std::move(picture),
+ matrix, paint, bitDepth,
+ std::move(colorSpace)));
+}
+
+sk_sp<SkImage> SkImage::makeWithFilter(const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const {
+ GrContext* context = as_IB(this)->context();
+
+ return this->makeWithFilter(context, filter, subset, clipBounds, outSubset, offset);
+}
+
+sk_sp<SkImage> SkImage::makeWithFilter(GrContext* grContext,
+ const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const {
+ if (!filter || !outSubset || !offset || !this->bounds().contains(subset)) {
+ return nullptr;
+ }
+ sk_sp<SkSpecialImage> srcSpecialImage =
+#if SK_SUPPORT_GPU
+ SkSpecialImage::MakeFromImage(grContext, subset, sk_ref_sp(const_cast<SkImage*>(this)));
+#else
+ SkSpecialImage::MakeFromImage(nullptr, subset, sk_ref_sp(const_cast<SkImage*>(this)));
+#endif
+ if (!srcSpecialImage) {
+ return nullptr;
+ }
+
+ sk_sp<SkImageFilterCache> cache(
+ SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize));
+
+ // The filters operate in the local space of the src image, where (0,0) corresponds to the
+ // subset's top left corner. But the clip bounds and any crop rects on the filters are in the
+ // original coordinate system, so configure the CTM to correct crop rects and explicitly adjust
+ // the clip bounds (since it is assumed to already be in image space).
+ SkImageFilter_Base::Context context(SkMatrix::MakeTrans(-subset.x(), -subset.y()),
+ clipBounds.makeOffset(-subset.topLeft()),
+ cache.get(), fInfo.colorType(), fInfo.colorSpace(),
+ srcSpecialImage.get());
+
+ sk_sp<SkSpecialImage> result = as_IFB(filter)->filterImage(context).imageAndOffset(offset);
+ if (!result) {
+ return nullptr;
+ }
+
+ // The output image and offset are relative to the subset rectangle, so the offset needs to
+ // be shifted to put it in the correct spot with respect to the original coordinate system
+ offset->fX += subset.x();
+ offset->fY += subset.y();
+
+ // Final clip against the exact clipBounds (the clip provided in the context gets adjusted
+ // to account for pixel-moving filters so doesn't always exactly match when finished). The
+ // clipBounds are translated into the clippedDstRect coordinate space, including the
+ // result->subset() ensures that the result's image pixel origin does not affect results.
+ SkIRect dstRect = result->subset();
+ SkIRect clippedDstRect = dstRect;
+ if (!clippedDstRect.intersect(clipBounds.makeOffset(result->subset().topLeft() - *offset))) {
+ return nullptr;
+ }
+
+ // Adjust the geometric offset if the top-left corner moved as well
+ offset->fX += (clippedDstRect.x() - dstRect.x());
+ offset->fY += (clippedDstRect.y() - dstRect.y());
+ *outSubset = clippedDstRect;
+ return result->asImage();
+}
+
+bool SkImage::isLazyGenerated() const {
+ return as_IB(this)->onIsLazyGenerated();
+}
+
+bool SkImage::isAlphaOnly() const { return SkColorTypeIsAlphaOnly(fInfo.colorType()); }
+
+sk_sp<SkImage> SkImage::makeColorSpace(sk_sp<SkColorSpace> target) const {
+ if (!target) {
+ return nullptr;
+ }
+
+ // No need to create a new image if:
+ // (1) The color spaces are equal.
+ // (2) The color type is kAlpha8.
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (SkColorSpace::Equals(colorSpace, target.get()) || this->isAlphaOnly()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ // CONTEXT TODO: propagate the context parameter to the top-level API
+#if SK_SUPPORT_GPU
+ return as_IB(this)->onMakeColorTypeAndColorSpace(as_IB(this)->context(),
+#else
+ return as_IB(this)->onMakeColorTypeAndColorSpace(nullptr,
+#endif
+ this->colorType(), std::move(target));
+}
+
+sk_sp<SkImage> SkImage::makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace) const {
+ if (kUnknown_SkColorType == targetColorType || !targetColorSpace) {
+ return nullptr;
+ }
+
+ SkColorType colorType = this->colorType();
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (colorType == targetColorType &&
+ (SkColorSpace::Equals(colorSpace, targetColorSpace.get()) || this->isAlphaOnly())) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ // CONTEXT TODO: propagate the context parameter to the top-level API
+#if SK_SUPPORT_GPU
+ return as_IB(this)->onMakeColorTypeAndColorSpace(as_IB(this)->context(),
+#else
+ return as_IB(this)->onMakeColorTypeAndColorSpace(nullptr,
+#endif
+ targetColorType, std::move(targetColorSpace));
+}
+
+sk_sp<SkImage> SkImage::reinterpretColorSpace(sk_sp<SkColorSpace> target) const {
+ if (!target) {
+ return nullptr;
+ }
+
+ // No need to create a new image if:
+ // (1) The color spaces are equal.
+ // (2) The color type is kAlpha8.
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (SkColorSpace::Equals(colorSpace, target.get()) || this->isAlphaOnly()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ return as_IB(this)->onReinterpretColorSpace(std::move(target));
+}
+
+sk_sp<SkImage> SkImage::makeNonTextureImage() const {
+ if (!this->isTextureBacked()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ return this->makeRasterImage();
+}
+
+sk_sp<SkImage> SkImage::makeRasterImage() const {
+ SkPixmap pm;
+ if (this->peekPixels(&pm)) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ const size_t rowBytes = fInfo.minRowBytes();
+ size_t size = fInfo.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ pm = {fInfo.makeColorSpace(nullptr), data->writable_data(), fInfo.minRowBytes()};
+ if (!this->readPixels(pm, 0, 0)) {
+ return nullptr;
+ }
+
+ return SkImage::MakeRasterData(fInfo, std::move(data), rowBytes);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#if !SK_SUPPORT_GPU
+
+sk_sp<SkImage> SkImage::DecodeToTexture(GrContext*, const void*, size_t, const SkIRect*) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromTexture(GrContext* ctx,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs,
+ TextureReleaseProc releaseP, ReleaseContext releaseC) {
+ return nullptr;
+}
+
+bool SkImage::MakeBackendTextureFromSkImage(GrContext*,
+ sk_sp<SkImage>,
+ GrBackendTexture*,
+ BackendTextureReleaseProc*) {
+ return false;
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrContext* ctx,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVATexturesCopy(GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVATexturesCopyWithExternalBackend(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace space,
+ const GrBackendTexture[3],
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopyWithExternalBackend(
+ GrContext* context, SkYUVColorSpace yuvColorSpace, const GrBackendTexture yuvTextures[3],
+ GrSurfaceOrigin surfaceOrigin, const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> colorSpace) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace space,
+ const GrBackendTexture[2],
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::makeTextureImage(GrContext*, GrMipMapped mipMapped) const {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopyWithExternalBackend(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture nv12Textures[2],
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ return nullptr;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_pinAsTexture(const SkImage* image, GrContext* ctx) {
+ SkASSERT(image);
+ SkASSERT(ctx);
+ return as_IB(image)->onPinAsTexture(ctx);
+}
+
+void SkImage_unpinAsTexture(const SkImage* image, GrContext* ctx) {
+ SkASSERT(image);
+ SkASSERT(ctx);
+ as_IB(image)->onUnpinAsTexture(ctx);
+}
+
+SkIRect SkImage_getSubset(const SkImage* image) {
+ SkASSERT(image);
+ return as_IB(image)->onGetSubset();
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Base.h b/gfx/skia/skia/src/image/SkImage_Base.h
new file mode 100644
index 0000000000..1c01aa1a70
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Base.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Base_DEFINED
+#define SkImage_Base_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkSurface.h"
+#include <atomic>
+
+#if SK_SUPPORT_GPU
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrRecordingContext;
+class GrTexture;
+#endif
+
+#include <new>
+
+class GrSamplerState;
+class SkCachedData;
+struct SkYUVASizeInfo;
+
+enum {
+ kNeedNewImageUniqueID = 0
+};
+
+class SkImage_Base : public SkImage {
+public:
+ virtual ~SkImage_Base();
+
+ virtual SkIRect onGetSubset() const {
+ return { 0, 0, this->width(), this->height() };
+ }
+
+ virtual bool onPeekPixels(SkPixmap*) const { return false; }
+
+ virtual const SkBitmap* onPeekBitmap() const { return nullptr; }
+
+ virtual bool onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const = 0;
+
+ virtual GrContext* context() const { return nullptr; }
+
+#if SK_SUPPORT_GPU
+ virtual GrSemaphoresSubmitted onFlush(GrContext* context, const GrFlushInfo&) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ // Return the proxy if this image is backed by a single proxy. For YUVA images, this
+ // will return nullptr unless the YUVA planes have been converted to RGBA in which case
+ // that single backing proxy will be returned.
+ virtual GrTextureProxy* peekProxy() const { return nullptr; }
+ virtual sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*) const { return nullptr; }
+ virtual sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*, const GrSamplerState&,
+ SkScalar scaleAdjust[2]) const = 0;
+ virtual sk_sp<GrTextureProxy> refPinnedTextureProxy(GrRecordingContext*,
+ uint32_t* uniqueID) const {
+ return nullptr;
+ }
+ virtual bool isYUVA() const { return false; }
+ virtual GrTexture* onGetTexture() const { return nullptr; }
+#endif
+ virtual GrBackendTexture onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const;
+
+ // return a read-only copy of the pixels. We promise to not modify them,
+ // but only inspect them (or encode them).
+ virtual bool getROPixels(SkBitmap*, CachingHint = kAllow_CachingHint) const = 0;
+
+ virtual sk_sp<SkImage> onMakeSubset(GrRecordingContext*, const SkIRect&) const = 0;
+
+ virtual sk_sp<SkCachedData> getPlanes(SkYUVASizeInfo*, SkYUVAIndex[4],
+ SkYUVColorSpace*, const void* planes[4]);
+ virtual sk_sp<SkData> onRefEncoded() const { return nullptr; }
+
+ virtual bool onAsLegacyBitmap(SkBitmap*) const;
+
+ // True for picture-backed and codec-backed
+ virtual bool onIsLazyGenerated() const { return false; }
+
+ // True for images instantiated in GPU memory
+ virtual bool onIsTextureBacked() const { return false; }
+
+ // Call when this image is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this SkImage deleted.
+ virtual void notifyAddedToRasterCache() const {
+ fAddedToRasterCache.store(true);
+ }
+
+ virtual bool onIsValid(GrContext*) const = 0;
+
+ virtual bool onPinAsTexture(GrContext*) const { return false; }
+ virtual void onUnpinAsTexture(GrContext*) const {}
+
+ virtual sk_sp<SkImage> onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType, sk_sp<SkColorSpace>) const = 0;
+
+ virtual sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const = 0;
+
+protected:
+ SkImage_Base(const SkImageInfo& info, uint32_t uniqueID);
+
+private:
+ // Set true by caches when they cache content that's derived from the current pixels.
+ mutable std::atomic<bool> fAddedToRasterCache;
+
+ typedef SkImage INHERITED;
+};
+
+static inline SkImage_Base* as_IB(SkImage* image) {
+ return static_cast<SkImage_Base*>(image);
+}
+
+static inline SkImage_Base* as_IB(const sk_sp<SkImage>& image) {
+ return static_cast<SkImage_Base*>(image.get());
+}
+
+static inline const SkImage_Base* as_IB(const SkImage* image) {
+ return static_cast<const SkImage_Base*>(image);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.cpp b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
new file mode 100644
index 0000000000..30254e5bb5
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
@@ -0,0 +1,681 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <cstddef>
+#include <cstring>
+#include <type_traits>
+
+#include "include/core/SkCanvas.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkScopeExit.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/gpu/GrAHardwareBufferImageGenerator.h"
+#include "src/gpu/GrAHardwareBufferUtils.h"
+#include "src/gpu/GrBackendTextureImageGenerator.h"
+#include "src/gpu/GrBitmapTextureMaker.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrImageTextureMaker.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrSemaphore.h"
+#include "src/gpu/GrSurfacePriv.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/GrTextureContext.h"
+#include "src/gpu/GrTexturePriv.h"
+#include "src/gpu/GrTextureProxy.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/gl/GrGLTexture.h"
+#include "src/image/SkImage_Gpu.h"
+
+static SkColorType proxy_color_type(GrTextureProxy* proxy) {
+ SkColorType colorType;
+ if (!GrPixelConfigToColorType(proxy->config(), &colorType)) {
+ colorType = kUnknown_SkColorType;
+ }
+ return colorType;
+}
+
+SkImage_Gpu::SkImage_Gpu(sk_sp<GrContext> context, uint32_t uniqueID, SkAlphaType at,
+ sk_sp<GrTextureProxy> proxy, sk_sp<SkColorSpace> colorSpace)
+ : INHERITED(std::move(context), proxy->worstCaseWidth(), proxy->worstCaseHeight(), uniqueID,
+ proxy_color_type(proxy.get()), at, colorSpace)
+ , fProxy(std::move(proxy)) {}
+
+SkImage_Gpu::~SkImage_Gpu() {}
+
+GrSemaphoresSubmitted SkImage_Gpu::onFlush(GrContext* context, const GrFlushInfo& info) {
+ if (!context || !fContext->priv().matches(context) || fContext->abandoned()) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ GrSurfaceProxy* p[1] = {fProxy.get()};
+ return context->priv().flushSurfaces(p, 1, info);
+}
+
+sk_sp<SkImage> SkImage_Gpu::onMakeColorTypeAndColorSpace(GrRecordingContext* context,
+ SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS) const {
+ if (!context || !fContext->priv().matches(context)) {
+ return nullptr;
+ }
+
+ auto xform = GrColorSpaceXformEffect::Make(this->colorSpace(), this->alphaType(),
+ targetCS.get(), this->alphaType());
+ SkASSERT(xform || targetCT != this->colorType());
+
+ sk_sp<GrTextureProxy> proxy = this->asTextureProxyRef(context);
+
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContextWithFallback(
+ SkBackingFit::kExact, this->width(), this->height(), SkColorTypeToGrColorType(targetCT),
+ nullptr);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.addColorTextureProcessor(std::move(proxy), SkColorTypeToGrColorType(this->colorType()),
+ SkMatrix::I());
+ if (xform) {
+ paint.addColorFragmentProcessor(std::move(xform));
+ }
+
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
+ SkRect::MakeIWH(this->width(), this->height()));
+ if (!renderTargetContext->asTextureProxy()) {
+ return nullptr;
+ }
+
+ // MDB: this call is okay bc we know 'renderTargetContext' was exact
+ return sk_make_sp<SkImage_Gpu>(fContext, kNeedNewImageUniqueID, this->alphaType(),
+ renderTargetContext->asTextureProxyRef(), std::move(targetCS));
+}
+
+sk_sp<SkImage> SkImage_Gpu::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ return sk_make_sp<SkImage_Gpu>(fContext, kNeedNewImageUniqueID, this->alphaType(), fProxy,
+ std::move(newCS));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkImage> new_wrapped_texture_common(GrContext* ctx,
+ const GrBackendTexture& backendTex,
+ GrColorType colorType, GrSurfaceOrigin origin,
+ SkAlphaType at, sk_sp<SkColorSpace> colorSpace,
+ GrWrapOwnership ownership,
+ SkImage::TextureReleaseProc releaseProc,
+ SkImage::ReleaseContext releaseCtx) {
+ if (!backendTex.isValid() || backendTex.width() <= 0 || backendTex.height() <= 0) {
+ return nullptr;
+ }
+
+ GrProxyProvider* proxyProvider = ctx->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy =
+ proxyProvider->wrapBackendTexture(backendTex, colorType, origin, ownership,
+ GrWrapCacheable::kNo, kRead_GrIOType,
+ releaseProc, releaseCtx);
+ if (!proxy) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(ctx), kNeedNewImageUniqueID, at, std::move(proxy),
+ std::move(colorSpace));
+}
+
+sk_sp<SkImage> SkImage::MakeFromTexture(GrContext* ctx,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs,
+ TextureReleaseProc releaseP, ReleaseContext releaseC) {
+ if (!ctx) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = ctx->priv().caps();
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(caps, ct, tex.getBackendFormat());
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ if (!SkImage_GpuBase::ValidateBackendTexture(caps, tex, grColorType, ct, at, cs)) {
+ return nullptr;
+ }
+
+ return new_wrapped_texture_common(ctx, tex, grColorType, origin, at, std::move(cs),
+ kBorrow_GrWrapOwnership, releaseP, releaseC);
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrContext* ctx,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ if (!ctx || !ctx->priv().resourceProvider()) {
+ // We have a DDL context and we don't support adopted textures for them.
+ return nullptr;
+ }
+
+ const GrCaps* caps = ctx->priv().caps();
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(caps, ct, tex.getBackendFormat());
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ if (!SkImage_GpuBase::ValidateBackendTexture(caps, tex, grColorType, ct, at, cs)) {
+ return nullptr;
+ }
+
+ return new_wrapped_texture_common(ctx, tex, grColorType, origin, at, std::move(cs),
+ kAdopt_GrWrapOwnership, nullptr, nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeFromCompressed(GrContext* context, sk_sp<SkData> data,
+ int width, int height, CompressionType type) {
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createCompressedTextureProxy(
+ width, height, SkBudgeted::kYes, type, std::move(data));
+
+ if (!proxy) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(context), kNeedNewImageUniqueID, kOpaque_SkAlphaType,
+ std::move(proxy), nullptr);
+}
+
+sk_sp<SkImage> SkImage_Gpu::ConvertYUVATexturesToRGB(GrContext* ctx, SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4], SkISize size,
+ GrSurfaceOrigin origin,
+ GrRenderTargetContext* renderTargetContext) {
+ SkASSERT(renderTargetContext);
+
+ int numTextures;
+ if (!SkYUVAIndex::AreValidIndices(yuvaIndices, &numTextures)) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> tempTextureProxies[4];
+ if (!SkImage_GpuBase::MakeTempTextureProxies(ctx, yuvaTextures, numTextures, yuvaIndices,
+ origin, tempTextureProxies)) {
+ return nullptr;
+ }
+
+ const SkRect rect = SkRect::MakeIWH(size.width(), size.height());
+ if (!RenderYUVAToRGBA(ctx, renderTargetContext, rect, yuvColorSpace, nullptr,
+ tempTextureProxies, yuvaIndices)) {
+ return nullptr;
+ }
+
+ SkAlphaType at = GetAlphaTypeFromYUVAIndices(yuvaIndices);
+ // MDB: this call is okay bc we know 'renderTargetContext' was exact
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(ctx), kNeedNewImageUniqueID, at,
+ renderTargetContext->asTextureProxyRef(),
+ renderTargetContext->colorInfo().refColorSpace());
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVATexturesCopy(GrContext* ctx,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ const int width = imageSize.width();
+ const int height = imageSize.height();
+
+ // Needs to create a render target in order to draw to it for the yuv->rgb conversion.
+ auto renderTargetContext = ctx->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kExact, width, height, GrColorType::kRGBA_8888,
+ std::move(imageColorSpace), 1, GrMipMapped::kNo, imageOrigin);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ return SkImage_Gpu::ConvertYUVATexturesToRGB(ctx, yuvColorSpace, yuvaTextures, yuvaIndices,
+ imageSize, imageOrigin, renderTargetContext.get());
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVATexturesCopyWithExternalBackend(
+ GrContext* ctx,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ const GrCaps* caps = ctx->priv().caps();
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(caps, kRGBA_8888_SkColorType,
+ backendTexture.getBackendFormat());
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ SkAlphaType at = SkImage_GpuBase::GetAlphaTypeFromYUVAIndices(yuvaIndices);
+ if (!SkImage_Gpu::ValidateBackendTexture(caps, backendTexture, grColorType,
+ kRGBA_8888_SkColorType, at, nullptr)) {
+ return nullptr;
+ }
+
+ // Needs to create a render target with external texture
+ // in order to draw to it for the yuv->rgb conversion.
+ auto renderTargetContext = ctx->priv().makeBackendTextureRenderTargetContext(
+ backendTexture, imageOrigin, 1, grColorType, std::move(imageColorSpace), nullptr,
+ textureReleaseProc, releaseContext);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ return SkImage_Gpu::ConvertYUVATexturesToRGB(ctx, yuvColorSpace, yuvaTextures, yuvaIndices,
+ imageSize, imageOrigin, renderTargetContext.get());
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvTextures[3],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ // TODO: SkImageSourceChannel input is being ingored right now. Setup correctly in the future.
+ SkYUVAIndex yuvaIndices[4] = {
+ SkYUVAIndex{0, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kR},
+ SkYUVAIndex{2, SkColorChannel::kR},
+ SkYUVAIndex{-1, SkColorChannel::kA}};
+ SkISize size{yuvTextures[0].width(), yuvTextures[0].height()};
+ return SkImage_Gpu::MakeFromYUVATexturesCopy(ctx, yuvColorSpace, yuvTextures, yuvaIndices,
+ size, imageOrigin, std::move(imageColorSpace));
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopyWithExternalBackend(
+ GrContext* ctx, SkYUVColorSpace yuvColorSpace, const GrBackendTexture yuvTextures[3],
+ GrSurfaceOrigin imageOrigin, const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ SkYUVAIndex yuvaIndices[4] = {
+ SkYUVAIndex{0, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kR},
+ SkYUVAIndex{2, SkColorChannel::kR},
+ SkYUVAIndex{-1, SkColorChannel::kA}};
+ SkISize size{yuvTextures[0].width(), yuvTextures[0].height()};
+ return SkImage_Gpu::MakeFromYUVATexturesCopyWithExternalBackend(
+ ctx, yuvColorSpace, yuvTextures, yuvaIndices, size, imageOrigin, backendTexture,
+ std::move(imageColorSpace), nullptr, nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture nv12Textures[2],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ // TODO: SkImageSourceChannel input is being ingored right now. Setup correctly in the future.
+ SkYUVAIndex yuvaIndices[4] = {
+ SkYUVAIndex{0, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kG},
+ SkYUVAIndex{-1, SkColorChannel::kA}};
+ SkISize size{nv12Textures[0].width(), nv12Textures[0].height()};
+ return SkImage_Gpu::MakeFromYUVATexturesCopy(ctx, yuvColorSpace, nv12Textures, yuvaIndices,
+ size, imageOrigin, std::move(imageColorSpace));
+}
+
+sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopyWithExternalBackend(
+ GrContext* ctx,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture nv12Textures[2],
+ GrSurfaceOrigin imageOrigin,
+ const GrBackendTexture& backendTexture,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ SkYUVAIndex yuvaIndices[4] = {
+ SkYUVAIndex{0, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kR},
+ SkYUVAIndex{1, SkColorChannel::kG},
+ SkYUVAIndex{-1, SkColorChannel::kA}};
+ SkISize size{nv12Textures[0].width(), nv12Textures[0].height()};
+ return SkImage_Gpu::MakeFromYUVATexturesCopyWithExternalBackend(
+ ctx, yuvColorSpace, nv12Textures, yuvaIndices, size, imageOrigin, backendTexture,
+ std::move(imageColorSpace), textureReleaseProc, releaseContext);
+}
+
+static sk_sp<SkImage> create_image_from_producer(GrContext* context, GrTextureProducer* producer,
+ SkAlphaType at, uint32_t id,
+ GrMipMapped mipMapped) {
+ sk_sp<GrTextureProxy> proxy(producer->refTextureProxy(mipMapped));
+ if (!proxy) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(context), id, at, std::move(proxy),
+ sk_ref_sp(producer->colorSpace()));
+}
+
+sk_sp<SkImage> SkImage::makeTextureImage(GrContext* context, GrMipMapped mipMapped) const {
+ if (!context) {
+ return nullptr;
+ }
+
+ if (this->isTextureBacked()) {
+ if (!as_IB(this)->context()->priv().matches(context)) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = as_IB(this)->asTextureProxyRef(context);
+ SkASSERT(proxy);
+ if (GrMipMapped::kNo == mipMapped || proxy->mipMapped() == mipMapped) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ GrTextureAdjuster adjuster(context, std::move(proxy),
+ SkColorTypeToGrColorType(this->colorType()), this->alphaType(),
+ this->uniqueID(), this->colorSpace());
+ return create_image_from_producer(context, &adjuster, this->alphaType(),
+ this->uniqueID(), mipMapped);
+ }
+
+ if (this->isLazyGenerated()) {
+ GrImageTextureMaker maker(context, this, kDisallow_CachingHint);
+ return create_image_from_producer(context, &maker, this->alphaType(),
+ this->uniqueID(), mipMapped);
+ }
+
+ if (const SkBitmap* bmp = as_IB(this)->onPeekBitmap()) {
+ GrBitmapTextureMaker maker(context, *bmp);
+ return create_image_from_producer(context, &maker, this->alphaType(),
+ this->uniqueID(), mipMapped);
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage_Gpu::MakePromiseTexture(GrContext* context,
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion version) {
+ // The contract here is that if 'promiseDoneProc' is passed in it should always be called,
+ // even if creation of the SkImage fails. Once we call MakePromiseImageLazyProxy it takes
+ // responsibility for calling the done proc.
+ if (!textureDoneProc) {
+ return nullptr;
+ }
+ SkScopeExit callDone([textureDoneProc, textureContext]() { textureDoneProc(textureContext); });
+
+ SkImageInfo info = SkImageInfo::Make(width, height, colorType, alphaType, colorSpace);
+ if (!SkImageInfoIsValid(info)) {
+ return nullptr;
+ }
+
+ if (!context) {
+ return nullptr;
+ }
+
+ if (width <= 0 || height <= 0) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(context->priv().caps(),
+ colorType,
+ backendFormat);
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ callDone.clear();
+ auto proxy = MakePromiseImageLazyProxy(context, width, height, origin,
+ grColorType, backendFormat,
+ mipMapped, textureFulfillProc, textureReleaseProc,
+ textureDoneProc, textureContext, version);
+ if (!proxy) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(context), kNeedNewImageUniqueID, alphaType,
+ std::move(proxy), std::move(colorSpace));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeCrossContextFromPixmap(GrContext* context,
+ const SkPixmap& originalPixmap, bool buildMips,
+ bool limitToMaxTextureSize) {
+ // Some backends or drivers don't support (safely) moving resources between contexts
+ if (!context || !context->priv().caps()->crossContextTextureSupport()) {
+ return SkImage::MakeRasterCopy(originalPixmap);
+ }
+
+ // If we don't have access to the resource provider and gpu (i.e. in a DDL context) we will not
+ // be able to make everything needed for a GPU CrossContext image. Thus return a raster copy
+ // instead.
+ if (!context->priv().resourceProvider()) {
+ return SkImage::MakeRasterCopy(originalPixmap);
+ }
+
+ // If non-power-of-two mipmapping isn't supported, ignore the client's request
+ if (!context->priv().caps()->mipMapSupport()) {
+ buildMips = false;
+ }
+
+ const SkPixmap* pixmap = &originalPixmap;
+ SkAutoPixmapStorage resized;
+ int maxTextureSize = context->priv().caps()->maxTextureSize();
+ int maxDim = SkTMax(originalPixmap.width(), originalPixmap.height());
+ if (limitToMaxTextureSize && maxDim > maxTextureSize) {
+ float scale = static_cast<float>(maxTextureSize) / maxDim;
+ int newWidth = SkTMin(static_cast<int>(originalPixmap.width() * scale), maxTextureSize);
+ int newHeight = SkTMin(static_cast<int>(originalPixmap.height() * scale), maxTextureSize);
+ SkImageInfo info = originalPixmap.info().makeWH(newWidth, newHeight);
+ if (!resized.tryAlloc(info) || !originalPixmap.scalePixels(resized, kLow_SkFilterQuality)) {
+ return nullptr;
+ }
+ pixmap = &resized;
+ }
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ // Turn the pixmap into a GrTextureProxy
+ SkBitmap bmp;
+ bmp.installPixels(*pixmap);
+ GrMipMapped mipMapped = buildMips ? GrMipMapped::kYes : GrMipMapped::kNo;
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createProxyFromBitmap(bmp, mipMapped);
+ if (!proxy) {
+ return SkImage::MakeRasterCopy(*pixmap);
+ }
+
+ sk_sp<GrTexture> texture = sk_ref_sp(proxy->peekTexture());
+
+ // Flush any writes or uploads
+ context->priv().flushSurface(proxy.get());
+ GrGpu* gpu = context->priv().getGpu();
+
+ sk_sp<GrSemaphore> sema = gpu->prepareTextureForCrossContextUsage(texture.get());
+
+ auto gen = GrBackendTextureImageGenerator::Make(std::move(texture), proxy->origin(),
+ std::move(sema), pixmap->colorType(),
+ pixmap->alphaType(),
+ pixmap->info().refColorSpace());
+ return SkImage::MakeFromGenerator(std::move(gen));
+}
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+sk_sp<SkImage> SkImage::MakeFromAHardwareBuffer(AHardwareBuffer* graphicBuffer, SkAlphaType at,
+ sk_sp<SkColorSpace> cs,
+ GrSurfaceOrigin surfaceOrigin) {
+ auto gen = GrAHardwareBufferImageGenerator::Make(graphicBuffer, at, cs, surfaceOrigin);
+ return SkImage::MakeFromGenerator(std::move(gen));
+}
+
+sk_sp<SkImage> SkImage::MakeFromAHardwareBufferWithData(GrContext* context,
+ const SkPixmap& pixmap,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin surfaceOrigin) {
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(hardwareBuffer, &bufferDesc);
+
+ if (!SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE)) {
+ return nullptr;
+ }
+
+ GrBackendFormat backendFormat = GrAHardwareBufferUtils::GetBackendFormat(context,
+ hardwareBuffer,
+ bufferDesc.format,
+ true);
+
+ if (!backendFormat.isValid()) {
+ return nullptr;
+ }
+
+ GrAHardwareBufferUtils::DeleteImageProc deleteImageProc = nullptr;
+ GrAHardwareBufferUtils::UpdateImageProc updateImageProc = nullptr;
+ GrAHardwareBufferUtils::TexImageCtx deleteImageCtx = nullptr;
+
+ GrBackendTexture backendTexture =
+ GrAHardwareBufferUtils::MakeBackendTexture(context, hardwareBuffer,
+ bufferDesc.width, bufferDesc.height,
+ &deleteImageProc, &updateImageProc,
+ &deleteImageCtx, false, backendFormat, true);
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+ SkASSERT(deleteImageProc);
+
+ SkColorType colorType =
+ GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(bufferDesc.format);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ if (!proxyProvider) {
+ deleteImageProc(deleteImageCtx);
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy =
+ proxyProvider->wrapBackendTexture(backendTexture, grColorType, surfaceOrigin,
+ kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
+ kRW_GrIOType, deleteImageProc, deleteImageCtx);
+ if (!proxy) {
+ deleteImageProc(deleteImageCtx);
+ return nullptr;
+ }
+
+ sk_sp<SkColorSpace> cs = pixmap.refColorSpace();
+ SkAlphaType at = pixmap.alphaType();
+
+ sk_sp<SkImage> image = sk_make_sp<SkImage_Gpu>(sk_ref_sp(context), kNeedNewImageUniqueID, at,
+ proxy, cs);
+ if (!image) {
+ return nullptr;
+ }
+
+ GrDrawingManager* drawingManager = context->priv().drawingManager();
+ if (!drawingManager) {
+ return nullptr;
+ }
+
+ auto texContext =
+ drawingManager->makeTextureContext(proxy, SkColorTypeToGrColorType(pixmap.colorType()),
+ pixmap.alphaType(), cs);
+ if (!texContext) {
+ return nullptr;
+ }
+
+ SkImageInfo srcInfo = SkImageInfo::Make(bufferDesc.width, bufferDesc.height, colorType, at,
+ std::move(cs));
+ texContext->writePixels(srcInfo, pixmap.addr(0, 0), pixmap.rowBytes(), {0, 0});
+
+ GrFlushInfo info;
+ info.fFlags = kSyncCpu_GrFlushFlag;
+ GrSurfaceProxy* p[1] = {proxy.get()};
+ drawingManager->flush(p, 1, SkSurface::BackendSurfaceAccess::kNoAccess, info,
+ GrPrepareForExternalIORequests());
+
+ return image;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage::MakeBackendTextureFromSkImage(GrContext* ctx,
+ sk_sp<SkImage> image,
+ GrBackendTexture* backendTexture,
+ BackendTextureReleaseProc* releaseProc) {
+ if (!image || !ctx || !backendTexture || !releaseProc) {
+ return false;
+ }
+
+ // Ensure we have a texture backed image.
+ if (!image->isTextureBacked()) {
+ image = image->makeTextureImage(ctx);
+ if (!image) {
+ return false;
+ }
+ }
+ GrTexture* texture = image->getTexture();
+ if (!texture) {
+ // In context-loss cases, we may not have a texture.
+ return false;
+ }
+
+ // If the image's context doesn't match the provided context, fail.
+ if (texture->getContext() != ctx) {
+ return false;
+ }
+
+ // Flush any pending IO on the texture.
+ ctx->priv().flushSurface(as_IB(image)->peekProxy());
+
+ // We must make a copy of the image if the image is not unique, if the GrTexture owned by the
+ // image is not unique, or if the texture wraps an external object.
+ if (!image->unique() || !texture->unique() ||
+ texture->resourcePriv().refsWrappedObjects()) {
+ // onMakeSubset will always copy the image.
+ image = as_IB(image)->onMakeSubset(ctx, image->bounds());
+ if (!image) {
+ return false;
+ }
+
+ texture = image->getTexture();
+ if (!texture) {
+ return false;
+ }
+
+ // Flush to ensure that the copy is completed before we return the texture.
+ ctx->priv().flushSurface(as_IB(image)->peekProxy());
+ }
+
+ SkASSERT(!texture->resourcePriv().refsWrappedObjects());
+ SkASSERT(texture->unique());
+ SkASSERT(image->unique());
+
+ // Take a reference to the GrTexture and release the image.
+ sk_sp<GrTexture> textureRef(SkSafeRef(texture));
+ image = nullptr;
+
+ // Steal the backend texture from the GrTexture, releasing the GrTexture in the process.
+ return GrTexture::StealBackendTexture(std::move(textureRef), backendTexture, releaseProc);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.h b/gfx/skia/skia/src/image/SkImage_Gpu.h
new file mode 100644
index 0000000000..71f3b14f4a
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Gpu_DEFINED
+#define SkImage_Gpu_DEFINED
+
+#include "include/gpu/GrContext.h"
+#include "src/core/SkImagePriv.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrSurfaceProxyPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/image/SkImage_GpuBase.h"
+
+class GrTexture;
+
+class SkBitmap;
+struct SkYUVAIndex;
+
+class SkImage_Gpu : public SkImage_GpuBase {
+public:
+ SkImage_Gpu(sk_sp<GrContext>, uint32_t uniqueID, SkAlphaType, sk_sp<GrTextureProxy>,
+ sk_sp<SkColorSpace>);
+ ~SkImage_Gpu() override;
+
+ GrSemaphoresSubmitted onFlush(GrContext*, const GrFlushInfo&) override;
+
+ GrTextureProxy* peekProxy() const override {
+ return fProxy.get();
+ }
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*) const override {
+ return fProxy;
+ }
+
+ bool onIsTextureBacked() const override { return SkToBool(fProxy.get()); }
+
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType, sk_sp<SkColorSpace>) const final;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ /**
+ * This is the implementation of SkDeferredDisplayListRecorder::makePromiseImage.
+ */
+ static sk_sp<SkImage> MakePromiseTexture(GrContext* context,
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipMapped mipMapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion);
+
+ static sk_sp<SkImage> ConvertYUVATexturesToRGB(GrContext*, SkYUVColorSpace yuvColorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize, GrSurfaceOrigin imageOrigin,
+ GrRenderTargetContext*);
+
+private:
+ sk_sp<GrTextureProxy> fProxy;
+
+ typedef SkImage_GpuBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_GpuBase.cpp b/gfx/skia/skia/src/image/SkImage_GpuBase.cpp
new file mode 100644
index 0000000000..58d0eb8617
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuBase.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPromiseImageTexture.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkTLList.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrImageInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/effects/GrYUVtoRGBEffect.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkImage_GpuBase.h"
+#include "src/image/SkReadPixelsRec.h"
+
+SkImage_GpuBase::SkImage_GpuBase(sk_sp<GrContext> context, int width, int height, uint32_t uniqueID,
+ SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs)
+ : INHERITED(SkImageInfo::Make(width, height, ct, at, std::move(cs)), uniqueID)
+ , fContext(std::move(context)) {}
+
+SkImage_GpuBase::~SkImage_GpuBase() {}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GR_TEST_UTILS
+void SkImage_GpuBase::resetContext(sk_sp<GrContext> newContext) {
+ SkASSERT(fContext->priv().matches(newContext.get()));
+ fContext = newContext;
+}
+#endif
+
+bool SkImage_GpuBase::ValidateBackendTexture(const GrCaps* caps, const GrBackendTexture& tex,
+ GrColorType grCT, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ if (!tex.isValid()) {
+ return false;
+ }
+ // TODO: Create a SkImageColorInfo struct for color, alpha, and color space so we don't need to
+ // create a fake image info here.
+ SkImageInfo info = SkImageInfo::Make(1, 1, ct, at, cs);
+ if (!SkImageInfoIsValid(info)) {
+ return false;
+ }
+ GrBackendFormat backendFormat = tex.getBackendFormat();
+ if (!backendFormat.isValid()) {
+ return false;
+ }
+
+ return caps->areColorTypeAndFormatCompatible(grCT, backendFormat);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_GpuBase::getROPixels(SkBitmap* dst, CachingHint chint) const {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ // DDL TODO: buffer up the readback so it occurs when the DDL is drawn?
+ return false;
+ }
+
+ const auto desc = SkBitmapCacheDesc::Make(this);
+ if (SkBitmapCache::Find(desc, dst)) {
+ SkASSERT(dst->isImmutable());
+ SkASSERT(dst->getPixels());
+ return true;
+ }
+
+ SkBitmapCache::RecPtr rec = nullptr;
+ SkPixmap pmap;
+ if (kAllow_CachingHint == chint) {
+ rec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
+ if (!rec) {
+ return false;
+ }
+ } else {
+ if (!dst->tryAllocPixels(this->imageInfo()) || !dst->peekPixels(&pmap)) {
+ return false;
+ }
+ }
+
+ sk_sp<GrTextureProxy> texProxy = this->asTextureProxyRef(direct);
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(fContext->priv().caps(),
+ this->colorType(),
+ texProxy->backendFormat());
+
+ auto sContext = direct->priv().makeWrappedSurfaceContext(
+ std::move(texProxy), grColorType, this->alphaType(), this->refColorSpace());
+ if (!sContext) {
+ return false;
+ }
+
+ if (!sContext->readPixels(pmap.info(), pmap.writable_addr(), pmap.rowBytes(), {0, 0})) {
+ return false;
+ }
+
+ if (rec) {
+ SkBitmapCache::Add(std::move(rec), dst);
+ this->notifyAddedToRasterCache();
+ }
+ return true;
+}
+
+sk_sp<SkImage> SkImage_GpuBase::onMakeSubset(GrRecordingContext* context,
+ const SkIRect& subset) const {
+ if (!context || !fContext->priv().matches(context)) {
+ return nullptr;
+ }
+
+ sk_sp<GrSurfaceProxy> proxy = this->asTextureProxyRef(context);
+ GrColorType srcColorType = SkColorTypeToGrColorType(this->colorType());
+
+ sk_sp<GrTextureProxy> copyProxy = GrSurfaceProxy::Copy(
+ context, proxy.get(), srcColorType, GrMipMapped::kNo, subset, SkBackingFit::kExact,
+ proxy->isBudgeted());
+
+ if (!copyProxy) {
+ return nullptr;
+ }
+
+ // MDB: this call is okay bc we know 'sContext' was kExact
+ return sk_make_sp<SkImage_Gpu>(fContext, kNeedNewImageUniqueID, this->alphaType(),
+ std::move(copyProxy), this->refColorSpace());
+}
+
+bool SkImage_GpuBase::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int srcX, int srcY, CachingHint) const {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ // DDL TODO: buffer up the readback so it occurs when the DDL is drawn?
+ return false;
+ }
+
+ if (!SkImageInfoValidConversion(dstInfo, this->imageInfo())) {
+ return false;
+ }
+
+ sk_sp<GrTextureProxy> texProxy = this->asTextureProxyRef(direct);
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(fContext->priv().caps(),
+ this->colorType(),
+ texProxy->backendFormat());
+
+ auto sContext = direct->priv().makeWrappedSurfaceContext(
+ std::move(texProxy), grColorType, this->alphaType(), this->refColorSpace());
+ if (!sContext) {
+ return false;
+ }
+
+ return sContext->readPixels(dstInfo, dstPixels, dstRB, {srcX, srcY});
+}
+
+sk_sp<GrTextureProxy> SkImage_GpuBase::asTextureProxyRef(GrRecordingContext* context,
+ const GrSamplerState& params,
+ SkScalar scaleAdjust[2]) const {
+ if (!context || !fContext->priv().matches(context)) {
+ SkASSERT(0);
+ return nullptr;
+ }
+
+ GrTextureAdjuster adjuster(fContext.get(), this->asTextureProxyRef(context),
+ SkColorTypeToGrColorType(this->colorType()), this->alphaType(),
+ this->uniqueID(), this->colorSpace());
+ return adjuster.refTextureProxyForParams(params, scaleAdjust);
+}
+
+GrBackendTexture SkImage_GpuBase::onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ // This image was created with a DDL context and cannot be instantiated.
+ return GrBackendTexture(); // invalid
+ }
+
+ sk_sp<GrTextureProxy> proxy = this->asTextureProxyRef(direct);
+ SkASSERT(proxy);
+
+ if (!proxy->isInstantiated()) {
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ if (!proxy->instantiate(resourceProvider)) {
+ return GrBackendTexture(); // invalid
+ }
+ }
+
+ GrTexture* texture = proxy->peekTexture();
+ if (texture) {
+ if (flushPendingGrContextIO) {
+ direct->priv().flushSurface(proxy.get());
+ }
+ if (origin) {
+ *origin = proxy->origin();
+ }
+ return texture->getBackendTexture();
+ }
+ return GrBackendTexture(); // invalid
+}
+
+GrTexture* SkImage_GpuBase::onGetTexture() const {
+ GrTextureProxy* proxy = this->peekProxy();
+ if (proxy && proxy->isInstantiated()) {
+ return proxy->peekTexture();
+ }
+
+ auto direct = fContext->priv().asDirectContext();
+ if (!direct) {
+ // This image was created with a DDL context and cannot be instantiated.
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxyRef = this->asTextureProxyRef(direct);
+ SkASSERT(proxyRef && !proxyRef->isInstantiated());
+
+ if (!proxyRef->instantiate(direct->priv().resourceProvider())) {
+ return nullptr;
+ }
+
+ return proxyRef->peekTexture();
+}
+
+bool SkImage_GpuBase::onIsValid(GrContext* context) const {
+ // The base class has already checked that context isn't abandoned (if it's not nullptr)
+ if (fContext->priv().abandoned()) {
+ return false;
+ }
+
+ if (context && !fContext->priv().matches(context)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool SkImage_GpuBase::MakeTempTextureProxies(GrContext* ctx, const GrBackendTexture yuvaTextures[],
+ int numTextures, const SkYUVAIndex yuvaIndices[4],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<GrTextureProxy> tempTextureProxies[4]) {
+ GrProxyProvider* proxyProvider = ctx->priv().proxyProvider();
+ const GrCaps* caps = ctx->priv().caps();
+
+ for (int textureIndex = 0; textureIndex < numTextures; ++textureIndex) {
+ GrBackendFormat backendFormat = yuvaTextures[textureIndex].getBackendFormat();
+ if (!backendFormat.isValid()) {
+ return false;
+ }
+
+ GrColorType grColorType = caps->getYUVAColorTypeFromBackendFormat(
+ backendFormat,
+ yuvaIndices[3].fIndex == textureIndex);
+ if (GrColorType::kUnknown == grColorType) {
+ return false;
+ }
+
+ SkASSERT(yuvaTextures[textureIndex].isValid());
+
+ tempTextureProxies[textureIndex] = proxyProvider->wrapBackendTexture(
+ yuvaTextures[textureIndex], grColorType, imageOrigin, kBorrow_GrWrapOwnership,
+ GrWrapCacheable::kNo, kRead_GrIOType);
+ if (!tempTextureProxies[textureIndex]) {
+ return false;
+ }
+
+ // Check that each texture contains the channel data for the corresponding YUVA index
+ auto componentFlags = GrColorTypeComponentFlags(grColorType);
+ for (int yuvaIndex = 0; yuvaIndex < SkYUVAIndex::kIndexCount; ++yuvaIndex) {
+ if (yuvaIndices[yuvaIndex].fIndex == textureIndex) {
+ switch (yuvaIndices[yuvaIndex].fChannel) {
+ case SkColorChannel::kR:
+ // TODO: Chrome needs to be patched before this can be
+ // enforced.
+// if (!(kRed_SkColorTypeComponentFlag & componentFlags)) {
+// return false;
+// }
+ break;
+ case SkColorChannel::kG:
+ if (!(kGreen_SkColorTypeComponentFlag & componentFlags)) {
+ return false;
+ }
+ break;
+ case SkColorChannel::kB:
+ if (!(kBlue_SkColorTypeComponentFlag & componentFlags)) {
+ return false;
+ }
+ break;
+ case SkColorChannel::kA:
+ if (!(kAlpha_SkColorTypeComponentFlag & componentFlags)) {
+ return false;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool SkImage_GpuBase::RenderYUVAToRGBA(GrContext* ctx, GrRenderTargetContext* renderTargetContext,
+ const SkRect& rect, SkYUVColorSpace yuvColorSpace,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const sk_sp<GrTextureProxy> proxies[4],
+ const SkYUVAIndex yuvaIndices[4]) {
+ SkASSERT(renderTargetContext);
+ if (!renderTargetContext->asSurfaceProxy()) {
+ return false;
+ }
+
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ auto fp = GrYUVtoRGBEffect::Make(proxies, yuvaIndices, yuvColorSpace,
+ GrSamplerState::Filter::kNearest);
+ if (colorSpaceXform) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(colorSpaceXform));
+ }
+ paint.addColorFragmentProcessor(std::move(fp));
+
+ renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(), rect);
+ return true;
+}
+
+sk_sp<GrTextureProxy> SkImage_GpuBase::MakePromiseImageLazyProxy(
+ GrContext* context, int width, int height, GrSurfaceOrigin origin, GrColorType colorType,
+ GrBackendFormat backendFormat, GrMipMapped mipMapped,
+ PromiseImageTextureFulfillProc fulfillProc,
+ PromiseImageTextureReleaseProc releaseProc,
+ PromiseImageTextureDoneProc doneProc,
+ PromiseImageTextureContext textureContext,
+ PromiseImageApiVersion version) {
+ SkASSERT(context);
+ SkASSERT(width > 0 && height > 0);
+ SkASSERT(doneProc);
+ SkASSERT(colorType != GrColorType::kUnknown);
+
+ if (!fulfillProc || !releaseProc) {
+ doneProc(textureContext);
+ return nullptr;
+ }
+
+ if (mipMapped == GrMipMapped::kYes &&
+ GrTextureTypeHasRestrictedSampling(backendFormat.textureType())) {
+ // It is invalid to have a GL_TEXTURE_EXTERNAL or GL_TEXTURE_RECTANGLE and have mips as
+ // well.
+ doneProc(textureContext);
+ return nullptr;
+ }
+
+ /**
+ * This class is the lazy instantiation callback for promise images. It manages calling the
+ * client's Fulfill, Release, and Done procs. It attempts to reuse a GrTexture instance in
+ * cases where the client provides the same SkPromiseImageTexture as Fulfill results for
+ * multiple SkImages. The created GrTexture is given a key based on a unique ID associated with
+ * the SkPromiseImageTexture.
+ *
+ * The GrTexutre idle proc mechanism is used to call the Release and Done procs. We use this
+ * instead of the GrSurface release proc because the GrTexture is cached and therefore may
+ * outlive the proxy into which this callback is installed.
+ *
+ * A key invalidation message is installed on the SkPromiseImageTexture so that the GrTexture
+ * is deleted once it can no longer be used to instantiate a proxy.
+ */
+ class PromiseLazyInstantiateCallback {
+ public:
+ PromiseLazyInstantiateCallback(PromiseImageTextureFulfillProc fulfillProc,
+ PromiseImageTextureReleaseProc releaseProc,
+ PromiseImageTextureDoneProc doneProc,
+ PromiseImageTextureContext context,
+ GrColorType colorType,
+ PromiseImageApiVersion version)
+ : fFulfillProc(fulfillProc)
+ , fReleaseProc(releaseProc)
+ , fColorType(colorType)
+ , fVersion(version) {
+ fDoneCallback = sk_make_sp<GrRefCntedCallback>(doneProc, context);
+ }
+ PromiseLazyInstantiateCallback(PromiseLazyInstantiateCallback&&) = default;
+ PromiseLazyInstantiateCallback(const PromiseLazyInstantiateCallback&) {
+ // Because we get wrapped in std::function we must be copyable. But we should never
+ // be copied.
+ SkASSERT(false);
+ }
+ PromiseLazyInstantiateCallback& operator=(PromiseLazyInstantiateCallback&&) = default;
+ PromiseLazyInstantiateCallback& operator=(const PromiseLazyInstantiateCallback&) {
+ SkASSERT(false);
+ return *this;
+ }
+
+ ~PromiseLazyInstantiateCallback() {
+ // Our destructor can run on any thread. We trigger the unref of fTexture by message.
+ // This unreffed texture pointer is a real problem! When the context has been
+ // abandoned, the GrTexture pointed to by this pointer is deleted! Due to virtual
+ // inheritance any manipulation of this pointer at that point will cause a crash.
+ // For now we "work around" the problem by just passing it, untouched, into the
+ // message bus but this very fragile.
+ // In the future the GrSurface class hierarchy refactoring should eliminate this
+ // difficulty by removing the virtual inheritance.
+ if (fTexture) {
+ SkMessageBus<GrTextureFreedMessage>::Post({fTexture, fTextureContextID});
+ }
+ }
+
+ GrSurfaceProxy::LazyCallbackResult operator()(GrResourceProvider* resourceProvider) {
+ // We use the unique key in a way that is unrelated to the SkImage-based key that the
+ // proxy may receive, hence kUnsynced.
+ static constexpr auto kKeySyncMode =
+ GrSurfaceProxy::LazyInstantiationKeyMode::kUnsynced;
+
+ // In order to make the SkImage "thread safe" we rely on holding an extra ref to the
+ // texture in the callback and signalling the unref via a message to the resource cache.
+ // We need to extend the callback's lifetime to that of the proxy.
+ static constexpr auto kReleaseCallbackOnInstantiation = false;
+
+ // Our proxy is getting instantiated for the second+ time. We are only allowed to call
+ // Fulfill once. So return our cached result.
+ if (fTexture) {
+ return {sk_ref_sp(fTexture), kReleaseCallbackOnInstantiation, kKeySyncMode};
+ } else if (fColorType == GrColorType::kUnknown) {
+ // We've already called fulfill and it failed. Our contract says that we should only
+ // call each callback once.
+ return {};
+ }
+ SkASSERT(fDoneCallback);
+ PromiseImageTextureContext textureContext = fDoneCallback->context();
+ sk_sp<SkPromiseImageTexture> promiseTexture = fFulfillProc(textureContext);
+ // From here on out our contract is that the release proc must be called, even if
+ // the return from fulfill was invalid or we fail for some other reason.
+ auto releaseCallback = sk_make_sp<GrRefCntedCallback>(fReleaseProc, textureContext);
+ if (!promiseTexture) {
+ // This records that we have failed.
+ fColorType = GrColorType::kUnknown;
+ return {};
+ }
+
+ const GrBackendTexture& backendTexture = promiseTexture->backendTexture();
+ if (!backendTexture.isValid()) {
+ return {};
+ }
+
+ sk_sp<GrTexture> tex;
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 2, "promise");
+ builder[0] = promiseTexture->uniqueID();
+ builder[1] = (uint32_t) fColorType;
+ builder.finish();
+ // A texture with this key may already exist from a different instance of this lazy
+ // callback. This could happen if the client fulfills a promise image with a texture
+ // that was previously used to fulfill a different promise image.
+ if (auto surf = resourceProvider->findByUniqueKey<GrSurface>(key)) {
+ tex = sk_ref_sp(surf->asTexture());
+ SkASSERT(tex);
+ } else {
+ if ((tex = resourceProvider->wrapBackendTexture(
+ backendTexture, fColorType, kBorrow_GrWrapOwnership,
+ GrWrapCacheable::kYes, kRead_GrIOType))) {
+ tex->resourcePriv().setUniqueKey(key);
+ } else {
+ return {};
+ }
+ }
+ auto releaseIdleState = fVersion == PromiseImageApiVersion::kLegacy
+ ? GrTexture::IdleState::kFinished
+ : GrTexture::IdleState::kFlushed;
+ tex->addIdleProc(std::move(releaseCallback), releaseIdleState);
+ tex->addIdleProc(std::move(fDoneCallback), GrTexture::IdleState::kFinished);
+ promiseTexture->addKeyToInvalidate(tex->getContext()->priv().contextID(), key);
+ fTexture = tex.get();
+ // We need to hold on to the GrTexture in case our proxy gets reinstantiated. However,
+ // we can't unref in our destructor because we may be on another thread then. So we
+ // let the cache know it is waiting on an unref message. We will send that message from
+ // our destructor.
+ GrContext* context = fTexture->getContext();
+ context->priv().getResourceCache()->insertDelayedTextureUnref(fTexture);
+ fTextureContextID = context->priv().contextID();
+ return {std::move(tex), kReleaseCallbackOnInstantiation, kKeySyncMode};
+ }
+
+ private:
+ PromiseImageTextureFulfillProc fFulfillProc;
+ PromiseImageTextureReleaseProc fReleaseProc;
+ sk_sp<GrRefCntedCallback> fDoneCallback;
+ GrTexture* fTexture = nullptr;
+ uint32_t fTextureContextID = SK_InvalidUniqueID;
+ GrColorType fColorType;
+ PromiseImageApiVersion fVersion;
+ } callback(fulfillProc, releaseProc, doneProc, textureContext, colorType, version);
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+
+ GrPixelConfig config = context->priv().caps()->getConfigFromBackendFormat(
+ backendFormat,
+ colorType);
+
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = config;
+
+ // Ganesh assumes that, when wrapping a mipmapped backend texture from a client, that its
+ // mipmaps are fully fleshed out.
+ GrMipMapsStatus mipMapsStatus = (GrMipMapped::kYes == mipMapped)
+ ? GrMipMapsStatus::kValid : GrMipMapsStatus::kNotAllocated;
+
+ // We pass kReadOnly here since we should treat content of the client's texture as immutable.
+ // The promise API provides no way for the client to indicated that the texture is protected.
+ return proxyProvider->createLazyProxy(
+ std::move(callback), backendFormat, desc, GrRenderable::kNo, 1, origin, mipMapped,
+ mipMapsStatus, GrInternalSurfaceFlags::kReadOnly, SkBackingFit::kExact, SkBudgeted::kNo,
+ GrProtected::kNo, GrSurfaceProxy::UseAllocator::kYes);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_GpuBase.h b/gfx/skia/skia/src/image/SkImage_GpuBase.h
new file mode 100644
index 0000000000..c78f7993f4
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuBase.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_GpuBase_DEFINED
+#define SkImage_GpuBase_DEFINED
+
+#include "include/core/SkDeferredDisplayListRecorder.h"
+#include "include/core/SkYUVAIndex.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/private/GrTypesPriv.h"
+#include "src/image/SkImage_Base.h"
+
+class GrColorSpaceXform;
+class SkColorSpace;
+
+class SkImage_GpuBase : public SkImage_Base {
+public:
+ SkImage_GpuBase(sk_sp<GrContext>, int width, int height, uint32_t uniqueID, SkColorType,
+ SkAlphaType, sk_sp<SkColorSpace>);
+ ~SkImage_GpuBase() override;
+
+ GrContext* context() const final { return fContext.get(); }
+
+ bool getROPixels(SkBitmap*, CachingHint) const final;
+ sk_sp<SkImage> onMakeSubset(GrRecordingContext*, const SkIRect& subset) const final;
+
+ bool onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int srcX, int srcY, CachingHint) const override;
+
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext* context) const override {
+ // we shouldn't end up calling this
+ SkASSERT(false);
+ return this->INHERITED::asTextureProxyRef(context);
+ }
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*, const GrSamplerState&,
+ SkScalar scaleAdjust[2]) const final;
+
+ sk_sp<GrTextureProxy> refPinnedTextureProxy(GrRecordingContext* context,
+ uint32_t* uniqueID) const final {
+ *uniqueID = this->uniqueID();
+ return this->asTextureProxyRef(context);
+ }
+
+ GrBackendTexture onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const final;
+
+ GrTexture* onGetTexture() const final;
+
+ bool onIsValid(GrContext*) const final;
+
+#if GR_TEST_UTILS
+ void resetContext(sk_sp<GrContext> newContext);
+#endif
+
+ static bool ValidateBackendTexture(const GrCaps*, const GrBackendTexture& tex,
+ GrColorType grCT, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs);
+ static bool MakeTempTextureProxies(GrContext* ctx, const GrBackendTexture yuvaTextures[],
+ int numTextures, const SkYUVAIndex [4],
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<GrTextureProxy> tempTextureProxies[4]);
+
+ static SkAlphaType GetAlphaTypeFromYUVAIndices(const SkYUVAIndex yuvaIndices[4]) {
+ return -1 != yuvaIndices[SkYUVAIndex::kA_Index].fIndex ? kPremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ }
+
+ using PromiseImageTextureContext = SkDeferredDisplayListRecorder::PromiseImageTextureContext;
+ using PromiseImageTextureFulfillProc =
+ SkDeferredDisplayListRecorder::PromiseImageTextureFulfillProc;
+ using PromiseImageTextureReleaseProc =
+ SkDeferredDisplayListRecorder::PromiseImageTextureReleaseProc;
+ using PromiseImageTextureDoneProc = SkDeferredDisplayListRecorder::PromiseImageTextureDoneProc;
+
+protected:
+ using PromiseImageApiVersion = SkDeferredDisplayListRecorder::PromiseImageApiVersion;
+ // Helper for making a lazy proxy for a promise image. The PromiseDoneProc we be called,
+ // if not null, immediately if this function fails. Othwerwise, it is installed in the
+ // proxy along with the TextureFulfillProc and TextureReleaseProc. PromiseDoneProc must not
+ // be null.
+ static sk_sp<GrTextureProxy> MakePromiseImageLazyProxy(
+ GrContext*, int width, int height, GrSurfaceOrigin, GrColorType, GrBackendFormat,
+ GrMipMapped, PromiseImageTextureFulfillProc, PromiseImageTextureReleaseProc,
+ PromiseImageTextureDoneProc, PromiseImageTextureContext, PromiseImageApiVersion);
+
+ static bool RenderYUVAToRGBA(GrContext* ctx, GrRenderTargetContext* renderTargetContext,
+ const SkRect& rect, SkYUVColorSpace yuvColorSpace,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const sk_sp<GrTextureProxy> proxies[4],
+ const SkYUVAIndex yuvaIndices[4]);
+
+ sk_sp<GrContext> fContext;
+
+private:
+ typedef SkImage_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp b/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp
new file mode 100644
index 0000000000..03a7cacd70
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <cstddef>
+#include <cstring>
+#include <type_traits>
+
+#include "include/core/SkYUVASizeInfo.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkMipMap.h"
+#include "src/core/SkScopeExit.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrTextureProducer.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrYUVtoRGBEffect.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkImage_GpuYUVA.h"
+
+static constexpr auto kAssumedColorType = kRGBA_8888_SkColorType;
+
+SkImage_GpuYUVA::SkImage_GpuYUVA(sk_sp<GrContext> context, int width, int height, uint32_t uniqueID,
+ SkYUVColorSpace colorSpace, sk_sp<GrTextureProxy> proxies[],
+ GrColorType proxyColorTypes[], int numProxies,
+ const SkYUVAIndex yuvaIndices[4], GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace)
+ : INHERITED(std::move(context), width, height, uniqueID, kAssumedColorType,
+ // If an alpha channel is present we always switch to kPremul. This is because,
+ // although the planar data is always un-premul, the final interleaved RGB image
+ // is/would-be premul.
+ GetAlphaTypeFromYUVAIndices(yuvaIndices), std::move(imageColorSpace))
+ , fNumProxies(numProxies)
+ , fYUVColorSpace(colorSpace)
+ , fOrigin(origin) {
+ // The caller should have done this work, just verifying
+ SkDEBUGCODE(int textureCount;)
+ SkASSERT(SkYUVAIndex::AreValidIndices(yuvaIndices, &textureCount));
+ SkASSERT(textureCount == fNumProxies);
+
+ for (int i = 0; i < numProxies; ++i) {
+ fProxies[i] = std::move(proxies[i]);
+ fProxyColorTypes[i] = proxyColorTypes[i];
+ }
+ memcpy(fYUVAIndices, yuvaIndices, 4*sizeof(SkYUVAIndex));
+}
+
+// For onMakeColorSpace()
+SkImage_GpuYUVA::SkImage_GpuYUVA(const SkImage_GpuYUVA* image, sk_sp<SkColorSpace> targetCS)
+ : INHERITED(image->fContext, image->width(), image->height(), kNeedNewImageUniqueID,
+ kAssumedColorType,
+ // If an alpha channel is present we always switch to kPremul. This is because,
+ // although the planar data is always un-premul, the final interleaved RGB image
+ // is/would-be premul.
+ GetAlphaTypeFromYUVAIndices(image->fYUVAIndices), std::move(targetCS))
+ , fNumProxies(image->fNumProxies)
+ , fYUVColorSpace(image->fYUVColorSpace)
+ , fOrigin(image->fOrigin)
+ // Since null fFromColorSpace means no GrColorSpaceXform, we turn a null
+ // image->refColorSpace() into an explicit SRGB.
+ , fFromColorSpace(image->colorSpace() ? image->refColorSpace() : SkColorSpace::MakeSRGB()) {
+ // The caller should have done this work, just verifying
+ SkDEBUGCODE(int textureCount;)
+ SkASSERT(SkYUVAIndex::AreValidIndices(image->fYUVAIndices, &textureCount));
+ SkASSERT(textureCount == fNumProxies);
+
+ if (image->fRGBProxy) {
+ fRGBProxy = image->fRGBProxy; // we ref in this case, not move
+ } else {
+ for (int i = 0; i < fNumProxies; ++i) {
+ fProxies[i] = image->fProxies[i]; // we ref in this case, not move
+ fProxyColorTypes[i] = image->fProxyColorTypes[i];
+ }
+ }
+ memcpy(fYUVAIndices, image->fYUVAIndices, 4 * sizeof(SkYUVAIndex));
+}
+
+SkImage_GpuYUVA::~SkImage_GpuYUVA() {}
+
+bool SkImage_GpuYUVA::setupMipmapsForPlanes(GrRecordingContext* context) const {
+ // We shouldn't get here if the planes were already flattened to RGBA.
+ SkASSERT(fProxies[0] && !fRGBProxy);
+ if (!context || !fContext->priv().matches(context)) {
+ return false;
+ }
+
+ for (int i = 0; i < fNumProxies; ++i) {
+ GrTextureProducer::CopyParams copyParams;
+ int mipCount = SkMipMap::ComputeLevelCount(fProxies[i]->width(), fProxies[i]->height());
+ if (mipCount && GrGpu::IsACopyNeededForMips(fContext->priv().caps(),
+ fProxies[i].get(),
+ GrSamplerState::Filter::kMipMap,
+ &copyParams)) {
+ auto mippedProxy = GrCopyBaseMipMapToTextureProxy(context, fProxies[i].get(),
+ fProxyColorTypes[i]);
+ if (!mippedProxy) {
+ return false;
+ }
+ fProxies[i] = mippedProxy;
+ }
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrSemaphoresSubmitted SkImage_GpuYUVA::onFlush(GrContext* context, const GrFlushInfo& info) {
+ if (!context || !fContext->priv().matches(context) || fContext->abandoned()) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ GrSurfaceProxy* proxies[4] = {fProxies[0].get(), fProxies[1].get(),
+ fProxies[2].get(), fProxies[3].get()};
+ int numProxies = fNumProxies;
+ if (fRGBProxy) {
+ // Either we've already flushed the flattening draw or the flattening is unflushed. In the
+ // latter case it should still be ok to just pass fRGBProxy because it in turn depends on
+ // the planar proxies and will cause all of their work to flush as well.
+ proxies[0] = fRGBProxy.get();
+ numProxies = 1;
+ }
+ return context->priv().flushSurfaces(proxies, numProxies, info);
+}
+
+GrTextureProxy* SkImage_GpuYUVA::peekProxy() const {
+ return fRGBProxy.get();
+}
+
+sk_sp<GrTextureProxy> SkImage_GpuYUVA::asTextureProxyRef(GrRecordingContext* context) const {
+ if (fRGBProxy) {
+ return fRGBProxy;
+ }
+
+ if (!context || !fContext->priv().matches(context)) {
+ return nullptr;
+ }
+
+ // Needs to create a render target in order to draw to it for the yuv->rgb conversion.
+ auto renderTargetContext = context->priv().makeDeferredRenderTargetContext(
+ SkBackingFit::kExact, this->width(), this->height(), GrColorType::kRGBA_8888,
+ this->refColorSpace(), 1, GrMipMapped::kNo, fOrigin);
+ if (!renderTargetContext) {
+ return nullptr;
+ }
+
+ sk_sp<GrColorSpaceXform> colorSpaceXform;
+ if (fFromColorSpace) {
+ colorSpaceXform = GrColorSpaceXform::Make(fFromColorSpace.get(), this->alphaType(),
+ this->colorSpace(), this->alphaType());
+ }
+ const SkRect rect = SkRect::MakeIWH(this->width(), this->height());
+ if (!RenderYUVAToRGBA(fContext.get(), renderTargetContext.get(), rect, fYUVColorSpace,
+ std::move(colorSpaceXform), fProxies, fYUVAIndices)) {
+ return nullptr;
+ }
+
+ fRGBProxy = renderTargetContext->asTextureProxyRef();
+ for (auto& p : fProxies) {
+ p.reset();
+ }
+ return fRGBProxy;
+}
+
+sk_sp<GrTextureProxy> SkImage_GpuYUVA::asMippedTextureProxyRef(GrRecordingContext* context) const {
+ if (!context || !fContext->priv().matches(context)) {
+ return nullptr;
+ }
+
+ // if invalid or already has miplevels
+ auto proxy = this->asTextureProxyRef(context);
+ if (!proxy || GrMipMapped::kYes == fRGBProxy->mipMapped()) {
+ return proxy;
+ }
+
+ // need to generate mips for the proxy
+ GrColorType srcColorType = SkColorTypeToGrColorType(this->colorType());
+ if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(context, proxy.get(), srcColorType)) {
+ fRGBProxy = mippedProxy;
+ return mippedProxy;
+ }
+
+ // failed to generate mips
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage_GpuYUVA::onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType,
+ sk_sp<SkColorSpace> targetCS) const {
+ // We explicitly ignore color type changes, for now.
+
+ // we may need a mutex here but for now we expect usage to be in a single thread
+ if (fOnMakeColorSpaceTarget &&
+ SkColorSpace::Equals(targetCS.get(), fOnMakeColorSpaceTarget.get())) {
+ return fOnMakeColorSpaceResult;
+ }
+ sk_sp<SkImage> result = sk_sp<SkImage>(new SkImage_GpuYUVA(this, targetCS));
+ if (result) {
+ fOnMakeColorSpaceTarget = targetCS;
+ fOnMakeColorSpaceResult = result;
+ }
+ return result;
+}
+
+sk_sp<SkImage> SkImage_GpuYUVA::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ return sk_make_sp<SkImage_GpuYUVA>(fContext, this->width(), this->height(),
+ kNeedNewImageUniqueID, fYUVColorSpace, fProxies,
+ fProxyColorTypes, fNumProxies, fYUVAIndices, fOrigin,
+ std::move(newCS));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeFromYUVATextures(GrContext* ctx,
+ SkYUVColorSpace colorSpace,
+ const GrBackendTexture yuvaTextures[],
+ const SkYUVAIndex yuvaIndices[4],
+ SkISize imageSize,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ int numTextures;
+ if (!SkYUVAIndex::AreValidIndices(yuvaIndices, &numTextures)) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> tempTextureProxies[4];
+ if (!SkImage_GpuBase::MakeTempTextureProxies(ctx, yuvaTextures, numTextures, yuvaIndices,
+ imageOrigin, tempTextureProxies)) {
+ return nullptr;
+ }
+ GrColorType proxyColorTypes[4];
+ for (int i = 0; i < numTextures; ++i) {
+ proxyColorTypes[i] = ctx->priv().caps()->getYUVAColorTypeFromBackendFormat(
+ yuvaTextures[i].getBackendFormat(), yuvaIndices[3].fIndex == i);
+ }
+
+ return sk_make_sp<SkImage_GpuYUVA>(sk_ref_sp(ctx), imageSize.width(), imageSize.height(),
+ kNeedNewImageUniqueID, colorSpace, tempTextureProxies,
+ proxyColorTypes, numTextures, yuvaIndices, imageOrigin,
+ imageColorSpace);
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVAPixmaps(
+ GrContext* context, SkYUVColorSpace yuvColorSpace, const SkPixmap yuvaPixmaps[],
+ const SkYUVAIndex yuvaIndices[4], SkISize imageSize, GrSurfaceOrigin imageOrigin,
+ bool buildMips, bool limitToMaxTextureSize, sk_sp<SkColorSpace> imageColorSpace) {
+ if (!context) {
+ return nullptr; // until we impl this for raster backend
+ }
+
+ int numPixmaps;
+ if (!SkYUVAIndex::AreValidIndices(yuvaIndices, &numPixmaps)) {
+ return nullptr;
+ }
+
+ if (!context->priv().caps()->mipMapSupport()) {
+ buildMips = false;
+ }
+
+ // Make proxies
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ sk_sp<GrTextureProxy> tempTextureProxies[4];
+ GrColorType proxyColorTypes[4];
+ for (int i = 0; i < numPixmaps; ++i) {
+ const SkPixmap* pixmap = &yuvaPixmaps[i];
+ SkAutoPixmapStorage resized;
+ int maxTextureSize = context->priv().caps()->maxTextureSize();
+ int maxDim = SkTMax(yuvaPixmaps[i].width(), yuvaPixmaps[i].height());
+ if (limitToMaxTextureSize && maxDim > maxTextureSize) {
+ float scale = static_cast<float>(maxTextureSize) / maxDim;
+ int newWidth = SkTMin(static_cast<int>(yuvaPixmaps[i].width() * scale),
+ maxTextureSize);
+ int newHeight = SkTMin(static_cast<int>(yuvaPixmaps[i].height() * scale),
+ maxTextureSize);
+ SkImageInfo info = yuvaPixmaps[i].info().makeWH(newWidth, newHeight);
+ if (!resized.tryAlloc(info) ||
+ !yuvaPixmaps[i].scalePixels(resized, kLow_SkFilterQuality)) {
+ return nullptr;
+ }
+ pixmap = &resized;
+ }
+ // Turn the pixmap into a GrTextureProxy
+ SkBitmap bmp;
+ bmp.installPixels(*pixmap);
+ GrMipMapped mipMapped = buildMips ? GrMipMapped::kYes : GrMipMapped::kNo;
+ tempTextureProxies[i] = proxyProvider->createProxyFromBitmap(bmp, mipMapped);
+ if (!tempTextureProxies[i]) {
+ return nullptr;
+ }
+ proxyColorTypes[i] = SkColorTypeToGrColorType(bmp.colorType());
+ }
+
+ return sk_make_sp<SkImage_GpuYUVA>(sk_ref_sp(context), imageSize.width(), imageSize.height(),
+ kNeedNewImageUniqueID, yuvColorSpace, tempTextureProxies,
+ proxyColorTypes, numPixmaps, yuvaIndices, imageOrigin,
+ imageColorSpace);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+sk_sp<SkImage> SkImage_GpuYUVA::MakePromiseYUVATexture(
+ GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendFormat yuvaFormats[],
+ const SkISize yuvaSizes[],
+ const SkYUVAIndex yuvaIndices[4],
+ int imageWidth,
+ int imageHeight,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc promiseDoneProc,
+ PromiseImageTextureContext textureContexts[],
+ PromiseImageApiVersion version) {
+ int numTextures;
+ bool valid = SkYUVAIndex::AreValidIndices(yuvaIndices, &numTextures);
+
+ // The contract here is that if 'promiseDoneProc' is passed in it should always be called,
+ // even if creation of the SkImage fails. Once we call MakePromiseImageLazyProxy it takes
+ // responsibility for calling the done proc.
+ if (!promiseDoneProc) {
+ return nullptr;
+ }
+ int proxiesCreated = 0;
+ SkScopeExit callDone([promiseDoneProc, textureContexts, numTextures, &proxiesCreated]() {
+ for (int i = proxiesCreated; i < numTextures; ++i) {
+ promiseDoneProc(textureContexts[i]);
+ }
+ });
+
+ if (!valid) {
+ return nullptr;
+ }
+
+ if (!context) {
+ return nullptr;
+ }
+
+ if (imageWidth <= 0 || imageHeight <= 0) {
+ return nullptr;
+ }
+
+ SkAlphaType at = (-1 != yuvaIndices[SkYUVAIndex::kA_Index].fIndex) ? kPremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ SkImageInfo info = SkImageInfo::Make(imageWidth, imageHeight, kAssumedColorType,
+ at, imageColorSpace);
+ if (!SkImageInfoIsValid(info)) {
+ return nullptr;
+ }
+
+ // verify sizes with expected texture count
+ for (int i = 0; i < numTextures; ++i) {
+ if (yuvaSizes[i].isEmpty()) {
+ return nullptr;
+ }
+ }
+ for (int i = numTextures; i < SkYUVASizeInfo::kMaxCount; ++i) {
+ if (!yuvaSizes[i].isEmpty()) {
+ return nullptr;
+ }
+ }
+
+ // Get lazy proxies
+ sk_sp<GrTextureProxy> proxies[4];
+ GrColorType proxyColorTypes[4];
+ for (int texIdx = 0; texIdx < numTextures; ++texIdx) {
+ GrColorType colorType = context->priv().caps()->getYUVAColorTypeFromBackendFormat(
+ yuvaFormats[texIdx],
+ yuvaIndices[3].fIndex == texIdx);
+ if (GrColorType::kUnknown == colorType) {
+ return nullptr;
+ }
+
+ proxies[texIdx] = MakePromiseImageLazyProxy(
+ context, yuvaSizes[texIdx].width(), yuvaSizes[texIdx].height(), imageOrigin,
+ colorType, yuvaFormats[texIdx], GrMipMapped::kNo, textureFulfillProc,
+ textureReleaseProc, promiseDoneProc, textureContexts[texIdx], version);
+ ++proxiesCreated;
+ if (!proxies[texIdx]) {
+ return nullptr;
+ }
+ proxyColorTypes[texIdx] = colorType;
+ }
+
+ return sk_make_sp<SkImage_GpuYUVA>(sk_ref_sp(context), imageWidth, imageHeight,
+ kNeedNewImageUniqueID, yuvColorSpace, proxies,
+ proxyColorTypes, numTextures, yuvaIndices, imageOrigin,
+ std::move(imageColorSpace));
+}
diff --git a/gfx/skia/skia/src/image/SkImage_GpuYUVA.h b/gfx/skia/skia/src/image/SkImage_GpuYUVA.h
new file mode 100644
index 0000000000..eac04e4f25
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuYUVA.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_GpuYUVA_DEFINED
+#define SkImage_GpuYUVA_DEFINED
+
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "src/core/SkCachedData.h"
+#include "src/image/SkImage_GpuBase.h"
+
+class GrTexture;
+struct SkYUVASizeInfo;
+
+// Wraps the 3 or 4 planes of a YUVA image for consumption by the GPU.
+// Initially any direct rendering will be done by passing the individual planes to a shader.
+// Once any method requests a flattened image (e.g., onReadPixels), the flattened RGB
+// proxy will be stored and used for any future rendering.
+class SkImage_GpuYUVA : public SkImage_GpuBase {
+public:
+ friend class GrYUVAImageTextureMaker;
+
+ SkImage_GpuYUVA(sk_sp<GrContext>, int width, int height, uint32_t uniqueID, SkYUVColorSpace,
+ sk_sp<GrTextureProxy> proxies[], GrColorType proxyColorTypes[], int numProxies,
+ const SkYUVAIndex[4], GrSurfaceOrigin, sk_sp<SkColorSpace>);
+ ~SkImage_GpuYUVA() override;
+
+ GrSemaphoresSubmitted onFlush(GrContext*, const GrFlushInfo&) override;
+
+ // This returns the single backing proxy if the YUV channels have already been flattened but
+ // nullptr if they have not.
+ GrTextureProxy* peekProxy() const override;
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*) const override;
+
+ virtual bool onIsTextureBacked() const override { return fProxies[0] || fRGBProxy; }
+
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType, sk_sp<SkColorSpace>) const final;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ virtual bool isYUVA() const override { return true; }
+
+ bool setupMipmapsForPlanes(GrRecordingContext*) const;
+
+ // Returns a ref-ed texture proxy with miplevels
+ sk_sp<GrTextureProxy> asMippedTextureProxyRef(GrRecordingContext*) const;
+
+#if GR_TEST_UTILS
+ bool testingOnly_IsFlattened() const {
+ // We should only have the flattened proxy or the planar proxies at one point in time.
+ SkASSERT(SkToBool(fRGBProxy) != SkToBool(fProxies[0]));
+ return SkToBool(fRGBProxy);
+ }
+#endif
+
+ /**
+ * This is the implementation of SkDeferredDisplayListRecorder::makeYUVAPromiseTexture.
+ */
+ static sk_sp<SkImage> MakePromiseYUVATexture(GrContext* context,
+ SkYUVColorSpace yuvColorSpace,
+ const GrBackendFormat yuvaFormats[],
+ const SkISize yuvaSizes[],
+ const SkYUVAIndex yuvaIndices[4],
+ int width,
+ int height,
+ GrSurfaceOrigin imageOrigin,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureDoneProc textureDoneProc,
+ PromiseImageTextureContext textureContexts[],
+ PromiseImageApiVersion);
+
+private:
+ SkImage_GpuYUVA(const SkImage_GpuYUVA* image, sk_sp<SkColorSpace>);
+
+ // This array will usually only be sparsely populated.
+ // The actual non-null fields are dictated by the 'fYUVAIndices' indices
+ mutable sk_sp<GrTextureProxy> fProxies[4];
+ mutable GrColorType fProxyColorTypes[4];
+ int fNumProxies;
+ SkYUVAIndex fYUVAIndices[4];
+ const SkYUVColorSpace fYUVColorSpace;
+ GrSurfaceOrigin fOrigin;
+ // If this is non-null then the planar data should be converted from fFromColorSpace to
+ // this->colorSpace(). Otherwise we assume the planar data (post YUV->RGB conversion) is already
+ // in this->colorSpace().
+ const sk_sp<SkColorSpace> fFromColorSpace;
+
+ // Repeated calls to onMakeColorSpace will result in a proliferation of unique IDs and
+ // SkImage_GpuYUVA instances. Cache the result of the last successful onMakeColorSpace call.
+ mutable sk_sp<SkColorSpace> fOnMakeColorSpaceTarget;
+ mutable sk_sp<SkImage> fOnMakeColorSpaceResult;
+
+ // This is only allocated when the image needs to be flattened rather than
+ // using the separate YUVA planes. From thence forth we will only use the
+ // the RGBProxy.
+ mutable sk_sp<GrTextureProxy> fRGBProxy;
+ typedef SkImage_GpuBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Lazy.cpp b/gfx/skia/skia/src/image/SkImage_Lazy.cpp
new file mode 100644
index 0000000000..022ba40f45
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Lazy.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_Lazy.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkNextID.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "include/private/GrResourceKey.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrImageTextureMaker.h"
+#include "src/gpu/GrProxyProvider.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrSamplerState.h"
+#include "src/gpu/GrYUVProvider.h"
+#include "src/gpu/SkGr.h"
+#endif
+
+// Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing one generator among N images
+class SharedGenerator final : public SkNVRefCnt<SharedGenerator> {
+public:
+ static sk_sp<SharedGenerator> Make(std::unique_ptr<SkImageGenerator> gen) {
+ return gen ? sk_sp<SharedGenerator>(new SharedGenerator(std::move(gen))) : nullptr;
+ }
+
+ // This is thread safe. It is a const field set in the constructor.
+ const SkImageInfo& getInfo() { return fGenerator->getInfo(); }
+
+private:
+ explicit SharedGenerator(std::unique_ptr<SkImageGenerator> gen)
+ : fGenerator(std::move(gen)) {
+ SkASSERT(fGenerator);
+ }
+
+ friend class ScopedGenerator;
+ friend class SkImage_Lazy;
+
+ std::unique_ptr<SkImageGenerator> fGenerator;
+ SkMutex fMutex;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkImage_Lazy::Validator::Validator(sk_sp<SharedGenerator> gen, const SkIRect* subset,
+ const SkColorType* colorType, sk_sp<SkColorSpace> colorSpace)
+ : fSharedGenerator(std::move(gen)) {
+ if (!fSharedGenerator) {
+ return;
+ }
+
+ // The following generator accessors are safe without acquiring the mutex (const getters).
+ // TODO: refactor to use a ScopedGenerator instead, for clarity.
+ const SkImageInfo& info = fSharedGenerator->fGenerator->getInfo();
+ if (info.isEmpty()) {
+ fSharedGenerator.reset();
+ return;
+ }
+
+ fUniqueID = fSharedGenerator->fGenerator->uniqueID();
+ const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height());
+ if (subset) {
+ if (!bounds.contains(*subset)) {
+ fSharedGenerator.reset();
+ return;
+ }
+ if (*subset != bounds) {
+ // we need a different uniqueID since we really are a subset of the raw generator
+ fUniqueID = SkNextID::ImageID();
+ }
+ } else {
+ subset = &bounds;
+ }
+
+ fInfo = info.makeDimensions(subset->size());
+ fOrigin = SkIPoint::Make(subset->x(), subset->y());
+ if (colorType || colorSpace) {
+ if (colorType) {
+ fInfo = fInfo.makeColorType(*colorType);
+ }
+ if (colorSpace) {
+ fInfo = fInfo.makeColorSpace(colorSpace);
+ }
+ fUniqueID = SkNextID::ImageID();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Helper for exclusive access to a shared generator.
+class SkImage_Lazy::ScopedGenerator {
+public:
+ ScopedGenerator(const sk_sp<SharedGenerator>& gen)
+ : fSharedGenerator(gen)
+ , fAutoAquire(gen->fMutex) {}
+
+ SkImageGenerator* operator->() const {
+ fSharedGenerator->fMutex.assertHeld();
+ return fSharedGenerator->fGenerator.get();
+ }
+
+ operator SkImageGenerator*() const {
+ fSharedGenerator->fMutex.assertHeld();
+ return fSharedGenerator->fGenerator.get();
+ }
+
+private:
+ const sk_sp<SharedGenerator>& fSharedGenerator;
+ SkAutoMutexExclusive fAutoAquire;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkImage_Lazy::SkImage_Lazy(Validator* validator)
+ : INHERITED(validator->fInfo, validator->fUniqueID)
+ , fSharedGenerator(std::move(validator->fSharedGenerator))
+ , fOrigin(validator->fOrigin) {
+ SkASSERT(fSharedGenerator);
+ fUniqueID = validator->fUniqueID;
+}
+
+SkImage_Lazy::~SkImage_Lazy() {
+#if SK_SUPPORT_GPU
+ for (int i = 0; i < fUniqueKeyInvalidatedMessages.count(); ++i) {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(*fUniqueKeyInvalidatedMessages[i]);
+ }
+ fUniqueKeyInvalidatedMessages.deleteAll();
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool generate_pixels(SkImageGenerator* gen, const SkPixmap& pmap, int originX, int originY) {
+ const int genW = gen->getInfo().width();
+ const int genH = gen->getInfo().height();
+ const SkIRect srcR = SkIRect::MakeWH(genW, genH);
+ const SkIRect dstR = SkIRect::MakeXYWH(originX, originY, pmap.width(), pmap.height());
+ if (!srcR.contains(dstR)) {
+ return false;
+ }
+
+ // If they are requesting a subset, we have to have a temp allocation for full image, and
+ // then copy the subset into their allocation
+ SkBitmap full;
+ SkPixmap fullPM;
+ const SkPixmap* dstPM = &pmap;
+ if (srcR != dstR) {
+ if (!full.tryAllocPixels(pmap.info().makeWH(genW, genH))) {
+ return false;
+ }
+ if (!full.peekPixels(&fullPM)) {
+ return false;
+ }
+ dstPM = &fullPM;
+ }
+
+ if (!gen->getPixels(dstPM->info(), dstPM->writable_addr(), dstPM->rowBytes())) {
+ return false;
+ }
+
+ if (srcR != dstR) {
+ if (!full.readPixels(pmap, originX, originY)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkImage_Lazy::getROPixels(SkBitmap* bitmap, SkImage::CachingHint chint) const {
+ auto check_output_bitmap = [bitmap]() {
+ SkASSERT(bitmap->isImmutable());
+ SkASSERT(bitmap->getPixels());
+ (void)bitmap;
+ };
+
+ auto desc = SkBitmapCacheDesc::Make(this);
+ if (SkBitmapCache::Find(desc, bitmap)) {
+ check_output_bitmap();
+ return true;
+ }
+
+ if (SkImage::kAllow_CachingHint == chint) {
+ SkPixmap pmap;
+ SkBitmapCache::RecPtr cacheRec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
+ if (!cacheRec ||
+ !generate_pixels(ScopedGenerator(fSharedGenerator), pmap,
+ fOrigin.x(), fOrigin.y())) {
+ return false;
+ }
+ SkBitmapCache::Add(std::move(cacheRec), bitmap);
+ this->notifyAddedToRasterCache();
+ } else {
+ if (!bitmap->tryAllocPixels(this->imageInfo()) ||
+ !generate_pixels(ScopedGenerator(fSharedGenerator), bitmap->pixmap(), fOrigin.x(),
+ fOrigin.y())) {
+ return false;
+ }
+ bitmap->setImmutable();
+ }
+
+ check_output_bitmap();
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_Lazy::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int srcX, int srcY, CachingHint chint) const {
+ SkBitmap bm;
+ if (this->getROPixels(&bm, chint)) {
+ return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
+ }
+ return false;
+}
+
+sk_sp<SkData> SkImage_Lazy::onRefEncoded() const {
+ ScopedGenerator generator(fSharedGenerator);
+ return generator->refEncodedData();
+}
+
+bool SkImage_Lazy::onIsValid(GrContext* context) const {
+ ScopedGenerator generator(fSharedGenerator);
+ return generator->isValid(context);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTextureProxy> SkImage_Lazy::asTextureProxyRef(GrRecordingContext* context,
+ const GrSamplerState& params,
+ SkScalar scaleAdjust[2]) const {
+ if (!context) {
+ return nullptr;
+ }
+
+ GrImageTextureMaker textureMaker(context, this, kAllow_CachingHint);
+ return textureMaker.refTextureProxyForParams(params, scaleAdjust);
+}
+#endif
+
+sk_sp<SkImage> SkImage_Lazy::onMakeSubset(GrRecordingContext* context,
+ const SkIRect& subset) const {
+ SkASSERT(this->bounds().contains(subset));
+ SkASSERT(this->bounds() != subset);
+
+ const SkIRect generatorSubset = subset.makeOffset(fOrigin);
+ const SkColorType colorType = this->colorType();
+ Validator validator(fSharedGenerator, &generatorSubset, &colorType, this->refColorSpace());
+ return validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
+}
+
+sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS) const {
+ SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex);
+ if (fOnMakeColorTypeAndSpaceResult &&
+ targetCT == fOnMakeColorTypeAndSpaceResult->colorType() &&
+ SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) {
+ return fOnMakeColorTypeAndSpaceResult;
+ }
+ const SkIRect generatorSubset =
+ SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), this->width(), this->height());
+ Validator validator(fSharedGenerator, &generatorSubset, &targetCT, targetCS);
+ sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
+ if (result) {
+ fOnMakeColorTypeAndSpaceResult = result;
+ }
+ return result;
+}
+
+sk_sp<SkImage> SkImage_Lazy::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ // TODO: The correct thing is to clone the generator, and modify its color space. That's hard,
+ // because we don't have a clone method, and generator is public (and derived-from by clients).
+ // So do the simple/inefficient thing here, and fallback to raster when this is called.
+
+ // We allocate the bitmap with the new color space, then generate the image using the original.
+ SkBitmap bitmap;
+ if (bitmap.tryAllocPixels(this->imageInfo().makeColorSpace(std::move(newCS)))) {
+ SkPixmap pixmap = bitmap.pixmap();
+ pixmap.setColorSpace(this->refColorSpace());
+ if (generate_pixels(ScopedGenerator(fSharedGenerator), pixmap, fOrigin.x(), fOrigin.y())) {
+ bitmap.setImmutable();
+ return SkImage::MakeFromBitmap(bitmap);
+ }
+ }
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator,
+ const SkIRect* subset) {
+ SkImage_Lazy::Validator
+ validator(SharedGenerator::Make(std::move(generator)), subset, nullptr, nullptr);
+
+ return validator ? sk_make_sp<SkImage_Lazy>(&validator) : nullptr;
+}
+
+sk_sp<SkImage> SkImage::DecodeToRaster(const void* encoded, size_t length, const SkIRect* subset) {
+ // The generator will not outlive this function, so we can wrap the encoded data without copy
+ auto gen = SkImageGenerator::MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length));
+ if (!gen) {
+ return nullptr;
+ }
+ SkImageInfo info = gen->getInfo();
+ if (info.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIPoint origin = {0, 0};
+ if (subset) {
+ if (!SkIRect::MakeWH(info.width(), info.height()).contains(*subset)) {
+ return nullptr;
+ }
+ info = info.makeDimensions(subset->size());
+ origin = {subset->x(), subset->y()};
+ }
+
+ size_t rb = info.minRowBytes();
+ if (rb == 0) {
+ return nullptr; // rb was too big
+ }
+ size_t size = info.computeByteSize(rb);
+ if (size == SIZE_MAX) {
+ return nullptr;
+ }
+ auto data = SkData::MakeUninitialized(size);
+
+ SkPixmap pmap(info, data->writable_data(), rb);
+ if (!generate_pixels(gen.get(), pmap, origin.x(), origin.y())) {
+ return nullptr;
+ }
+
+ return SkImage::MakeRasterData(info, data, rb);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+void SkImage_Lazy::makeCacheKeyFromOrigKey(const GrUniqueKey& origKey,
+ GrUniqueKey* cacheKey) const {
+ SkASSERT(!cacheKey->isValid());
+ if (origKey.isValid()) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(cacheKey, origKey, kDomain, 0, "Image");
+ }
+}
+
+class Generator_GrYUVProvider : public GrYUVProvider {
+public:
+ Generator_GrYUVProvider(SkImageGenerator* gen) : fGen(gen) {}
+
+private:
+ uint32_t onGetID() const override { return fGen->uniqueID(); }
+ bool onQueryYUVA8(SkYUVASizeInfo* sizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* colorSpace) const override {
+ return fGen->queryYUVA8(sizeInfo, yuvaIndices, colorSpace);
+ }
+ bool onGetYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
+ const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ void* planes[]) override {
+ return fGen->getYUVA8Planes(sizeInfo, yuvaIndices, planes);
+ }
+
+ SkImageGenerator* fGen;
+
+ typedef GrYUVProvider INHERITED;
+};
+
+static void set_key_on_proxy(GrProxyProvider* proxyProvider,
+ GrTextureProxy* proxy, GrTextureProxy* originalProxy,
+ const GrUniqueKey& key) {
+ if (key.isValid()) {
+ if (originalProxy && originalProxy->getUniqueKey().isValid()) {
+ SkASSERT(originalProxy->getUniqueKey() == key);
+ SkASSERT(GrMipMapped::kYes == proxy->mipMapped() &&
+ GrMipMapped::kNo == originalProxy->mipMapped());
+ // If we had an originalProxy with a valid key, that means there already is a proxy in
+ // the cache which matches the key, but it does not have mip levels and we require them.
+ // Thus we must remove the unique key from that proxy.
+ SkASSERT(originalProxy->getUniqueKey() == key);
+ proxyProvider->removeUniqueKeyFromProxy(originalProxy);
+ }
+ proxyProvider->assignUniqueKeyToProxy(key, proxy);
+ }
+}
+
+sk_sp<SkCachedData> SkImage_Lazy::getPlanes(SkYUVASizeInfo* yuvaSizeInfo,
+ SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
+ SkYUVColorSpace* yuvColorSpace,
+ const void* planes[SkYUVASizeInfo::kMaxCount]) {
+ ScopedGenerator generator(fSharedGenerator);
+ Generator_GrYUVProvider provider(generator);
+
+ sk_sp<SkCachedData> data = provider.getPlanes(yuvaSizeInfo, yuvaIndices, yuvColorSpace, planes);
+ if (!data) {
+ return nullptr;
+ }
+
+ return data;
+}
+
+
+/*
+ * We have 4 ways to try to return a texture (in sorted order)
+ *
+ * 1. Check the cache for a pre-existing one
+ * 2. Ask the generator to natively create one
+ * 3. Ask the generator to return YUV planes, which the GPU can convert
+ * 4. Ask the generator to return RGB(A) data, which the GPU can convert
+ */
+sk_sp<GrTextureProxy> SkImage_Lazy::lockTextureProxy(
+ GrRecordingContext* ctx,
+ const GrUniqueKey& origKey,
+ SkImage::CachingHint chint,
+ bool willBeMipped,
+ GrTextureMaker::AllowedTexGenType genType) const {
+ // Values representing the various texture lock paths we can take. Used for logging the path
+ // taken to a histogram.
+ enum LockTexturePath {
+ kFailure_LockTexturePath,
+ kPreExisting_LockTexturePath,
+ kNative_LockTexturePath,
+ kCompressed_LockTexturePath, // Deprecated
+ kYUV_LockTexturePath,
+ kRGBA_LockTexturePath,
+ };
+
+ enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
+
+ // Build our texture key.
+ // Even though some proxies created here may have a specific origin and use that origin, we do
+ // not include that in the key. Since SkImages are meant to be immutable, a given SkImage will
+ // always have an associated proxy that is always one origin or the other. It never can change
+ // origins. Thus we don't need to include that info in the key iteself.
+ GrUniqueKey key;
+ this->makeCacheKeyFromOrigKey(origKey, &key);
+
+ GrProxyProvider* proxyProvider = ctx->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy;
+
+ // 1. Check the cache for a pre-existing one
+ if (key.isValid()) {
+ auto ct = SkColorTypeToGrColorType(this->colorType());
+ proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, ct, kTopLeft_GrSurfaceOrigin);
+ if (proxy) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath,
+ kLockTexturePathCount);
+ if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) {
+ return proxy;
+ }
+ }
+ }
+
+ // 2. Ask the generator to natively create one
+ if (!proxy) {
+ ScopedGenerator generator(fSharedGenerator);
+ if (GrTextureMaker::AllowedTexGenType::kCheap == genType &&
+ SkImageGenerator::TexGenType::kCheap != generator->onCanGenerateTexture()) {
+ return nullptr;
+ }
+ if ((proxy = generator->generateTexture(ctx, this->imageInfo(), fOrigin, willBeMipped))) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath,
+ kLockTexturePathCount);
+ set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
+ if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) {
+ *fUniqueKeyInvalidatedMessages.append() =
+ new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
+ return proxy;
+ }
+ }
+ }
+
+ // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping
+ // the texture we fall through here and have the CPU generate the mip maps for us.
+ if (!proxy && !willBeMipped && !ctx->priv().options().fDisableGpuYUVConversion) {
+ const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(this->imageInfo());
+
+ SkColorType colorType = this->colorType();
+
+ ScopedGenerator generator(fSharedGenerator);
+ Generator_GrYUVProvider provider(generator);
+
+ // The pixels in the texture will be in the generator's color space.
+ // If onMakeColorTypeAndColorSpace has been called then this will not match this image's
+ // color space. To correct this, apply a color space conversion from the generator's color
+ // space to this image's color space.
+ SkColorSpace* generatorColorSpace = fSharedGenerator->fGenerator->getInfo().colorSpace();
+ SkColorSpace* thisColorSpace = this->colorSpace();
+
+ // TODO: Update to create the mipped surface in the YUV generator and draw the base
+ // layer directly into the mipped surface.
+ proxy = provider.refAsTextureProxy(ctx, desc, SkColorTypeToGrColorType(colorType),
+ generatorColorSpace, thisColorSpace);
+ if (proxy) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath,
+ kLockTexturePathCount);
+ set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
+ *fUniqueKeyInvalidatedMessages.append() =
+ new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
+ return proxy;
+ }
+ }
+
+ // 4. Ask the generator to return RGB(A) data, which the GPU can convert
+ SkBitmap bitmap;
+ if (!proxy && this->getROPixels(&bitmap, chint)) {
+ proxy = proxyProvider->createProxyFromBitmap(bitmap, willBeMipped ? GrMipMapped::kYes
+ : GrMipMapped::kNo);
+ if (proxy && (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped())) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath,
+ kLockTexturePathCount);
+ set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
+ *fUniqueKeyInvalidatedMessages.append() =
+ new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
+ return proxy;
+ }
+ }
+
+ if (proxy) {
+ // We need a mipped proxy, but we either found a proxy earlier that wasn't mipped, generated
+ // a native non mipped proxy, or generated a non-mipped yuv proxy. Thus we generate a new
+ // mipped surface and copy the original proxy into the base layer. We will then let the gpu
+ // generate the rest of the mips.
+ SkASSERT(willBeMipped);
+ SkASSERT(GrMipMapped::kNo == proxy->mipMapped());
+ *fUniqueKeyInvalidatedMessages.append() =
+ new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
+ GrColorType srcColorType = SkColorTypeToGrColorType(this->colorType());
+ if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(ctx, proxy.get(), srcColorType)) {
+ set_key_on_proxy(proxyProvider, mippedProxy.get(), proxy.get(), key);
+ return mippedProxy;
+ }
+ // We failed to make a mipped proxy with the base copied into it. This could have
+ // been from failure to make the proxy or failure to do the copy. Thus we will fall
+ // back to just using the non mipped proxy; See skbug.com/7094.
+ return proxy;
+ }
+
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath,
+ kLockTexturePathCount);
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::DecodeToTexture(GrContext* ctx, const void* encoded, size_t length,
+ const SkIRect* subset) {
+ // img will not survive this function, so we don't need to copy/own the encoded data,
+ auto img = MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length), subset);
+ if (!img) {
+ return nullptr;
+ }
+ return img->makeTextureImage(ctx);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Lazy.h b/gfx/skia/skia/src/image/SkImage_Lazy.h
new file mode 100644
index 0000000000..ca094b3d1f
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Lazy.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Lazy_DEFINED
+#define SkImage_Lazy_DEFINED
+
+#include "include/private/SkMutex.h"
+#include "src/image/SkImage_Base.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrTextureMaker.h"
+#endif
+
+class SharedGenerator;
+
+class SkImage_Lazy : public SkImage_Base {
+public:
+ struct Validator {
+ Validator(sk_sp<SharedGenerator>, const SkIRect* subset, const SkColorType* colorType,
+ sk_sp<SkColorSpace> colorSpace);
+
+ MOZ_IMPLICIT operator bool() const { return fSharedGenerator.get(); }
+
+ sk_sp<SharedGenerator> fSharedGenerator;
+ SkImageInfo fInfo;
+ SkIPoint fOrigin;
+ sk_sp<SkColorSpace> fColorSpace;
+ uint32_t fUniqueID;
+ };
+
+ SkImage_Lazy(Validator* validator);
+ ~SkImage_Lazy() override;
+
+ SkIRect onGetSubset() const override {
+ return SkIRect::MakeXYWH(fOrigin.fX, fOrigin.fY, this->width(), this->height());
+ }
+
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int srcX, int srcY,
+ CachingHint) const override;
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*,
+ const GrSamplerState&,
+ SkScalar scaleAdjust[2]) const override;
+ sk_sp<SkCachedData> getPlanes(SkYUVASizeInfo*, SkYUVAIndex[4],
+ SkYUVColorSpace*, const void* planes[4]) override;
+#endif
+ sk_sp<SkData> onRefEncoded() const override;
+ sk_sp<SkImage> onMakeSubset(GrRecordingContext*, const SkIRect&) const override;
+ bool getROPixels(SkBitmap*, CachingHint) const override;
+ bool onIsLazyGenerated() const override { return true; }
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType, sk_sp<SkColorSpace>) const override;
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ bool onIsValid(GrContext*) const override;
+
+#if SK_SUPPORT_GPU
+ // Returns the texture proxy. If we're going to generate and cache the texture, we should use
+ // the passed in key (if the key is valid). If genType is AllowedTexGenType::kCheap and the
+ // texture is not trivial to construct, returns nullptr.
+ sk_sp<GrTextureProxy> lockTextureProxy(GrRecordingContext*,
+ const GrUniqueKey& key,
+ SkImage::CachingHint,
+ bool willBeMipped,
+ GrTextureMaker::AllowedTexGenType genType) const;
+
+ void makeCacheKeyFromOrigKey(const GrUniqueKey& origKey, GrUniqueKey* cacheKey) const;
+#endif
+
+private:
+ class ScopedGenerator;
+
+ // Note that this->imageInfo() is not necessarily the info from the generator. It may be
+ // cropped by onMakeSubset and its color type/space may be changed by
+ // onMakeColorTypeAndColorSpace.
+ sk_sp<SharedGenerator> fSharedGenerator;
+ const SkIPoint fOrigin;
+
+ uint32_t fUniqueID;
+
+ // Repeated calls to onMakeColorTypeAndColorSpace will result in a proliferation of unique IDs
+ // and SkImage_Lazy instances. Cache the result of the last successful call.
+ mutable SkMutex fOnMakeColorTypeAndSpaceMutex;
+ mutable sk_sp<SkImage> fOnMakeColorTypeAndSpaceResult;
+
+#if SK_SUPPORT_GPU
+ // When the SkImage_Lazy goes away, we will iterate over all the unique keys we've used and
+ // send messages to the GrContexts to say the unique keys are no longer valid. The GrContexts
+ // can then release the resources, conntected with the those unique keys, from their caches.
+ mutable SkTDArray<GrUniqueKeyInvalidatedMessage*> fUniqueKeyInvalidatedMessages;
+#endif
+
+ typedef SkImage_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Raster.cpp b/gfx/skia/skia/src/image/SkImage_Raster.cpp
new file mode 100644
index 0000000000..5992af798d
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Raster.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkSurface.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/codec/SkColorTable.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkTLazy.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkBitmapProcShader.h"
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrTextureAdjuster.h"
+#include "src/gpu/SkGr.h"
+#endif
+
+// fixes https://bug.skia.org/5096
+static bool is_not_subset(const SkBitmap& bm) {
+ SkASSERT(bm.pixelRef());
+ SkISize dim = SkISize::Make(bm.pixelRef()->width(), bm.pixelRef()->height());
+ SkASSERT(dim != bm.dimensions() || bm.pixelRefOrigin().isZero());
+ return dim == bm.dimensions();
+}
+
+class SkImage_Raster : public SkImage_Base {
+public:
+ static bool ValidArgs(const SkImageInfo& info, size_t rowBytes, size_t* minSize) {
+ const int maxDimension = SK_MaxS32 >> 2;
+
+ if (info.width() <= 0 || info.height() <= 0) {
+ return false;
+ }
+ if (info.width() > maxDimension || info.height() > maxDimension) {
+ return false;
+ }
+ if ((unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType) {
+ return false;
+ }
+ if ((unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType) {
+ return false;
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+ if (!info.validRowBytes(rowBytes)) {
+ return false;
+ }
+
+ size_t size = info.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return false;
+ }
+
+ if (minSize) {
+ *minSize = size;
+ }
+ return true;
+ }
+
+ SkImage_Raster(const SkImageInfo&, sk_sp<SkData>, size_t rb,
+ uint32_t id = kNeedNewImageUniqueID);
+ ~SkImage_Raster() override;
+
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int srcX, int srcY, CachingHint) const override;
+ bool onPeekPixels(SkPixmap*) const override;
+ const SkBitmap* onPeekBitmap() const override { return &fBitmap; }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> asTextureProxyRef(GrRecordingContext*, const GrSamplerState&,
+ SkScalar scaleAdjust[2]) const override;
+#endif
+
+ bool getROPixels(SkBitmap*, CachingHint) const override;
+ sk_sp<SkImage> onMakeSubset(GrRecordingContext*, const SkIRect&) const override;
+
+ SkPixelRef* getPixelRef() const { return fBitmap.pixelRef(); }
+
+ bool onAsLegacyBitmap(SkBitmap*) const override;
+
+ SkImage_Raster(const SkBitmap& bm, bool bitmapMayBeMutable = false)
+ : INHERITED(bm.info(),
+ is_not_subset(bm) ? bm.getGenerationID() : (uint32_t)kNeedNewImageUniqueID)
+ , fBitmap(bm) {
+ SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable());
+ }
+
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType, sk_sp<SkColorSpace>) const override;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const override;
+
+ bool onIsValid(GrContext* context) const override { return true; }
+ void notifyAddedToRasterCache() const override {
+ // We explicitly DON'T want to call INHERITED::notifyAddedToRasterCache. That ties the
+ // lifetime of derived/cached resources to the image. In this case, we only want cached
+ // data (eg mips) tied to the lifetime of the underlying pixelRef.
+ SkASSERT(fBitmap.pixelRef());
+ fBitmap.pixelRef()->notifyAddedToCache();
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> refPinnedTextureProxy(GrRecordingContext*,
+ uint32_t* uniqueID) const override;
+ bool onPinAsTexture(GrContext*) const override;
+ void onUnpinAsTexture(GrContext*) const override;
+#endif
+
+private:
+ SkBitmap fBitmap;
+
+#if SK_SUPPORT_GPU
+ mutable sk_sp<GrTextureProxy> fPinnedProxy;
+ mutable int32_t fPinnedCount = 0;
+ mutable uint32_t fPinnedUniqueID = 0;
+#endif
+
+ typedef SkImage_Base INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void release_data(void* addr, void* context) {
+ SkData* data = static_cast<SkData*>(context);
+ data->unref();
+}
+
+SkImage_Raster::SkImage_Raster(const SkImageInfo& info, sk_sp<SkData> data, size_t rowBytes,
+ uint32_t id)
+ : INHERITED(info, id) {
+ void* addr = const_cast<void*>(data->data());
+
+ fBitmap.installPixels(info, addr, rowBytes, release_data, data.release());
+ fBitmap.setImmutable();
+}
+
+SkImage_Raster::~SkImage_Raster() {
+#if SK_SUPPORT_GPU
+ SkASSERT(nullptr == fPinnedProxy.get()); // want the caller to have manually unpinned
+#endif
+}
+
+bool SkImage_Raster::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const {
+ SkBitmap shallowCopy(fBitmap);
+ return shallowCopy.readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY);
+}
+
+bool SkImage_Raster::onPeekPixels(SkPixmap* pm) const {
+ return fBitmap.peekPixels(pm);
+}
+
+bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const {
+ *dst = fBitmap;
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTextureProxy> SkImage_Raster::asTextureProxyRef(GrRecordingContext* context,
+ const GrSamplerState& params,
+ SkScalar scaleAdjust[2]) const {
+ if (!context) {
+ return nullptr;
+ }
+
+ uint32_t uniqueID;
+ sk_sp<GrTextureProxy> tex = this->refPinnedTextureProxy(context, &uniqueID);
+ if (tex) {
+ GrTextureAdjuster adjuster(context, fPinnedProxy,
+ SkColorTypeToGrColorType(fBitmap.colorType()),
+ fBitmap.alphaType(), fPinnedUniqueID, fBitmap.colorSpace());
+ return adjuster.refTextureProxyForParams(params, scaleAdjust);
+ }
+
+ return GrRefCachedBitmapTextureProxy(context, fBitmap, params, scaleAdjust);
+}
+#endif
+
+#if SK_SUPPORT_GPU
+
+sk_sp<GrTextureProxy> SkImage_Raster::refPinnedTextureProxy(GrRecordingContext*,
+ uint32_t* uniqueID) const {
+ if (fPinnedProxy) {
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+ *uniqueID = fPinnedUniqueID;
+ return fPinnedProxy;
+ }
+ return nullptr;
+}
+
+bool SkImage_Raster::onPinAsTexture(GrContext* ctx) const {
+ if (fPinnedProxy) {
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+ } else {
+ SkASSERT(fPinnedCount == 0);
+ SkASSERT(fPinnedUniqueID == 0);
+ fPinnedProxy = GrRefCachedBitmapTextureProxy(ctx, fBitmap, GrSamplerState::ClampNearest(),
+ nullptr);
+ if (!fPinnedProxy) {
+ return false;
+ }
+ fPinnedUniqueID = fBitmap.getGenerationID();
+ }
+ // Note: we only increment if the texture was successfully pinned
+ ++fPinnedCount;
+ return true;
+}
+
+void SkImage_Raster::onUnpinAsTexture(GrContext* ctx) const {
+ // Note: we always decrement, even if fPinnedTexture is null
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+
+ if (0 == --fPinnedCount) {
+ fPinnedProxy.reset(nullptr);
+ fPinnedUniqueID = 0;
+ }
+}
+#endif
+
+sk_sp<SkImage> SkImage_Raster::onMakeSubset(GrRecordingContext*, const SkIRect& subset) const {
+ SkImageInfo info = fBitmap.info().makeDimensions(subset.size());
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ void* dst = bitmap.getPixels();
+ void* src = fBitmap.getAddr(subset.x(), subset.y());
+ if (!dst || !src) {
+ SkDEBUGFAIL("SkImage_Raster::onMakeSubset with nullptr src or dst");
+ return nullptr;
+ }
+
+ SkRectMemcpy(dst, bitmap.rowBytes(), src, fBitmap.rowBytes(), bitmap.rowBytes(),
+ subset.height());
+
+ bitmap.setImmutable();
+ return MakeFromBitmap(bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> MakeRasterCopyPriv(const SkPixmap& pmap, uint32_t id) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(pmap.info(), pmap.rowBytes(), &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ // Here we actually make a copy of the caller's pixel data
+ sk_sp<SkData> data(SkData::MakeWithCopy(pmap.addr(), size));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes(), id);
+}
+
+sk_sp<SkImage> SkImage::MakeRasterCopy(const SkPixmap& pmap) {
+ return MakeRasterCopyPriv(pmap, kNeedNewImageUniqueID);
+}
+
+sk_sp<SkImage> SkImage::MakeRasterData(const SkImageInfo& info, sk_sp<SkData> data,
+ size_t rowBytes) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(info, rowBytes, &size) || !data) {
+ return nullptr;
+ }
+
+ // did they give us enough data?
+ if (data->size() < size) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkImage_Raster>(info, std::move(data), rowBytes);
+}
+
+sk_sp<SkImage> SkImage::MakeFromRaster(const SkPixmap& pmap, RasterReleaseProc proc,
+ ReleaseContext ctx) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(pmap.info(), pmap.rowBytes(), &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data(SkData::MakeWithProc(pmap.addr(), size, proc, ctx));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes());
+}
+
+sk_sp<SkImage> SkMakeImageFromRasterBitmapPriv(const SkBitmap& bm, SkCopyPixelsMode cpm,
+ uint32_t idForCopy) {
+ if (kAlways_SkCopyPixelsMode == cpm || (!bm.isImmutable() && kNever_SkCopyPixelsMode != cpm)) {
+ SkPixmap pmap;
+ if (bm.peekPixels(&pmap)) {
+ return MakeRasterCopyPriv(pmap, idForCopy);
+ } else {
+ return sk_sp<SkImage>();
+ }
+ }
+
+ return sk_make_sp<SkImage_Raster>(bm, kNever_SkCopyPixelsMode == cpm);
+}
+
+sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap& bm, SkCopyPixelsMode cpm) {
+ if (!SkImageInfoIsValid(bm.info()) || bm.rowBytes() < bm.info().minRowBytes()) {
+ return nullptr;
+ }
+
+ return SkMakeImageFromRasterBitmapPriv(bm, cpm, kNeedNewImageUniqueID);
+}
+
+const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* image) {
+ return ((const SkImage_Raster*)image)->getPixelRef();
+}
+
+bool SkImage_Raster::onAsLegacyBitmap(SkBitmap* bitmap) const {
+ // When we're a snapshot from a surface, our bitmap may not be marked immutable
+ // even though logically always we are, but in that case we can't physically share our
+ // pixelref since the caller might call setImmutable() themselves
+ // (thus changing our state).
+ if (fBitmap.isImmutable()) {
+ SkIPoint origin = fBitmap.pixelRefOrigin();
+ bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes());
+ bitmap->setPixelRef(sk_ref_sp(fBitmap.pixelRef()), origin.x(), origin.y());
+ return true;
+ }
+ return this->INHERITED::onAsLegacyBitmap(bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage_Raster::onMakeColorTypeAndColorSpace(GrRecordingContext*,
+ SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS) const {
+ SkPixmap src;
+ SkAssertResult(fBitmap.peekPixels(&src));
+
+ SkBitmap dst;
+ dst.allocPixels(fBitmap.info().makeColorType(targetCT).makeColorSpace(targetCS));
+
+ SkAssertResult(dst.writePixels(src));
+ dst.setImmutable();
+ return SkImage::MakeFromBitmap(dst);
+}
+
+sk_sp<SkImage> SkImage_Raster::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ // TODO: If our bitmap is immutable, then we could theoretically create another image sharing
+ // our pixelRef. That doesn't work (without more invasive logic), because the image gets its
+ // gen ID from the bitmap, which gets it from the pixelRef.
+ SkPixmap pixmap = fBitmap.pixmap();
+ pixmap.setColorSpace(std::move(newCS));
+ return SkImage::MakeRasterCopy(pixmap);
+}
diff --git a/gfx/skia/skia/src/image/SkReadPixelsRec.h b/gfx/skia/skia/src/image/SkReadPixelsRec.h
new file mode 100644
index 0000000000..dd88d6c1c1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkReadPixelsRec.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadPixelsRec_DEFINED
+#define SkReadPixelsRec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+
+/**
+ * Helper class to package and trim the parameters passed to readPixels()
+ */
+struct SkReadPixelsRec {
+ SkReadPixelsRec(const SkImageInfo& info, void* pixels, size_t rowBytes, int x, int y)
+ : fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fInfo(info)
+ , fX(x)
+ , fY(y)
+ {}
+
+ SkReadPixelsRec(const SkPixmap& pm, int x, int y)
+ : fPixels(pm.writable_addr())
+ , fRowBytes(pm.rowBytes())
+ , fInfo(pm.info())
+ , fX(x)
+ , fY(y)
+ {}
+
+ void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+ int fX;
+ int fY;
+
+ /*
+ * On true, may have modified its fields (except fRowBytes) to make it a legal subset
+ * of the specified src width/height.
+ *
+ * On false, leaves self unchanged, but indicates that it does not overlap src, or
+ * is not valid (e.g. bad fInfo) for readPixels().
+ */
+ bool trim(int srcWidth, int srcHeight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface.cpp b/gfx/skia/skia/src/image/SkSurface.cpp
new file mode 100644
index 0000000000..a04982dc51
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface.cpp
@@ -0,0 +1,643 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <atomic>
+#include <cmath>
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFontLCDConfig.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/image/SkSurface_Base.h"
+
+static SkPixelGeometry compute_default_geometry() {
+ SkFontLCDConfig::LCDOrder order = SkFontLCDConfig::GetSubpixelOrder();
+ if (SkFontLCDConfig::kNONE_LCDOrder == order) {
+ return kUnknown_SkPixelGeometry;
+ } else {
+ // Bit0 is RGB(0), BGR(1)
+ // Bit1 is H(0), V(1)
+ const SkPixelGeometry gGeo[] = {
+ kRGB_H_SkPixelGeometry,
+ kBGR_H_SkPixelGeometry,
+ kRGB_V_SkPixelGeometry,
+ kBGR_V_SkPixelGeometry,
+ };
+ int index = 0;
+ if (SkFontLCDConfig::kBGR_LCDOrder == order) {
+ index |= 1;
+ }
+ if (SkFontLCDConfig::kVertical_LCDOrientation == SkFontLCDConfig::GetSubpixelOrientation()){
+ index |= 2;
+ }
+ return gGeo[index];
+ }
+}
+
+SkSurfaceProps::SkSurfaceProps() : fFlags(0), fPixelGeometry(kUnknown_SkPixelGeometry) {}
+
+SkSurfaceProps::SkSurfaceProps(InitType) : fFlags(0), fPixelGeometry(compute_default_geometry()) {}
+
+SkSurfaceProps::SkSurfaceProps(uint32_t flags, InitType)
+ : fFlags(flags)
+ , fPixelGeometry(compute_default_geometry())
+{}
+
+SkSurfaceProps::SkSurfaceProps(uint32_t flags, SkPixelGeometry pg)
+ : fFlags(flags), fPixelGeometry(pg)
+{}
+
+SkSurfaceProps::SkSurfaceProps(const SkSurfaceProps& other)
+ : fFlags(other.fFlags)
+ , fPixelGeometry(other.fPixelGeometry)
+{}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSurface_Base::SkSurface_Base(int width, int height, const SkSurfaceProps* props)
+ : INHERITED(width, height, props) {
+}
+
+SkSurface_Base::SkSurface_Base(const SkImageInfo& info, const SkSurfaceProps* props)
+ : INHERITED(info, props) {
+}
+
+SkSurface_Base::~SkSurface_Base() {
+ // in case the canvas outsurvives us, we null the callback
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(nullptr);
+ }
+}
+
+GrBackendTexture SkSurface_Base::onGetBackendTexture(BackendHandleAccess) {
+ return GrBackendTexture(); // invalid
+}
+
+GrBackendRenderTarget SkSurface_Base::onGetBackendRenderTarget(BackendHandleAccess) {
+ return GrBackendRenderTarget(); // invalid
+}
+
+bool SkSurface_Base::onReplaceBackendTexture(const GrBackendTexture&,
+ GrSurfaceOrigin,
+ TextureReleaseProc,
+ ReleaseContext) {
+ return false;
+}
+
+void SkSurface_Base::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) {
+ auto image = this->makeImageSnapshot();
+ if (image) {
+ canvas->drawImage(image, x, y, paint);
+ }
+}
+
+void SkSurface_Base::onAsyncRescaleAndReadPixels(const SkImageInfo& info, const SkIRect& srcRect,
+ SkSurface::RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ SkSurface::ReadPixelsCallback callback,
+ SkSurface::ReadPixelsContext context) {
+ int srcW = srcRect.width();
+ int srcH = srcRect.height();
+ float sx = (float)info.width() / srcW;
+ float sy = (float)info.height() / srcH;
+ // How many bilerp/bicubic steps to do in X and Y. + means upscaling, - means downscaling.
+ int stepsX;
+ int stepsY;
+ if (rescaleQuality > kNone_SkFilterQuality) {
+ stepsX = static_cast<int>((sx > 1.f) ? std::ceil(std::log2f(sx))
+ : std::floor(std::log2f(sx)));
+ stepsY = static_cast<int>((sy > 1.f) ? std::ceil(std::log2f(sy))
+ : std::floor(std::log2f(sy)));
+ } else {
+ stepsX = sx != 1.f;
+ stepsY = sy != 1.f;
+ }
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ if (stepsX < 0 || stepsY < 0) {
+ // Don't trigger MIP generation. We don't currently have a way to trigger bicubic for
+ // downscaling draws.
+ rescaleQuality = std::min(rescaleQuality, kLow_SkFilterQuality);
+ }
+ paint.setFilterQuality(rescaleQuality);
+ sk_sp<SkSurface> src(SkRef(this));
+ int srcX = srcRect.fLeft;
+ int srcY = srcRect.fTop;
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ // Assume we should ignore the rescale linear request if the surface has no color space since
+ // it's unclear how we'd linearize from an unknown color space.
+ if (rescaleGamma == SkSurface::RescaleGamma::kLinear &&
+ this->getCanvas()->imageInfo().colorSpace() &&
+ !this->getCanvas()->imageInfo().colorSpace()->gammaIsLinear()) {
+ auto cs = this->getCanvas()->imageInfo().colorSpace()->makeLinearGamma();
+ // Promote to F16 color type to preserve precision.
+ auto ii = SkImageInfo::Make(srcW, srcH, kRGBA_F16_SkColorType,
+ this->getCanvas()->imageInfo().alphaType(), std::move(cs));
+ auto linearSurf = this->makeSurface(ii);
+ if (!linearSurf) {
+ // Maybe F16 isn't supported? Try again with original color type.
+ ii = ii.makeColorType(this->getCanvas()->imageInfo().colorType());
+ linearSurf = this->makeSurface(ii);
+ if (!linearSurf) {
+ callback(context, nullptr);
+ return;
+ }
+ }
+ this->draw(linearSurf->getCanvas(), -srcX, -srcY, &paint);
+ src = std::move(linearSurf);
+ srcX = 0;
+ srcY = 0;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+ while (stepsX || stepsY) {
+ int nextW = info.width();
+ int nextH = info.height();
+ if (stepsX < 0) {
+ nextW = info.width() << (-stepsX - 1);
+ stepsX++;
+ } else if (stepsX != 0) {
+ if (stepsX > 1) {
+ nextW = srcW * 2;
+ }
+ --stepsX;
+ }
+ if (stepsY < 0) {
+ nextH = info.height() << (-stepsY - 1);
+ stepsY++;
+ } else if (stepsY != 0) {
+ if (stepsY > 1) {
+ nextH = srcH * 2;
+ }
+ --stepsY;
+ }
+ auto ii = src->getCanvas()->imageInfo().makeWH(nextW, nextH);
+ if (!stepsX && !stepsY) {
+ // Might as well fold conversion to final info in the last step.
+ ii = info;
+ }
+ auto next = this->makeSurface(ii);
+ if (!next) {
+ callback(context, nullptr);
+ return;
+ }
+ next->getCanvas()->drawImageRect(
+ src->makeImageSnapshot(), SkIRect::MakeXYWH(srcX, srcY, srcW, srcH),
+ SkRect::MakeWH((float)nextW, (float)nextH), &paint, constraint);
+ src = std::move(next);
+ srcX = srcY = 0;
+ srcW = nextW;
+ srcH = nextH;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+
+ size_t rowBytes = info.minRowBytes();
+ std::unique_ptr<char[]> data(new char[info.height() * rowBytes]);
+ SkPixmap pm(info, data.get(), rowBytes);
+ if (src->readPixels(pm, srcX, srcY)) {
+ class Result : public AsyncReadResult {
+ public:
+ Result(std::unique_ptr<const char[]> data, size_t rowBytes)
+ : fData(std::move(data)), fRowBytes(rowBytes) {}
+ int count() const override { return 1; }
+ const void* data(int i) const override { return fData.get(); }
+ size_t rowBytes(int i) const override { return fRowBytes; }
+
+ private:
+ std::unique_ptr<const char[]> fData;
+ size_t fRowBytes;
+ };
+ callback(context, skstd::make_unique<Result>(std::move(data), rowBytes));
+ } else {
+ callback(context, nullptr);
+ }
+}
+
+void SkSurface_Base::onAsyncRescaleAndReadPixelsYUV420(
+ SkYUVColorSpace yuvColorSpace, sk_sp<SkColorSpace> dstColorSpace, const SkIRect& srcRect,
+ const SkISize& dstSize, RescaleGamma rescaleGamma, SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback, ReadPixelsContext context) {
+ // TODO: Call non-YUV asyncRescaleAndReadPixels and then make our callback convert to YUV and
+ // call client's callback.
+ callback(context, nullptr);
+}
+
+bool SkSurface_Base::outstandingImageSnapshot() const {
+ return fCachedImage && !fCachedImage->unique();
+}
+
+void SkSurface_Base::aboutToDraw(ContentChangeMode mode) {
+ this->dirtyGenerationID();
+
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+
+ if (fCachedImage) {
+ // the surface may need to fork its backend, if its sharing it with
+ // the cached image. Note: we only call if there is an outstanding owner
+ // on the image (besides us).
+ bool unique = fCachedImage->unique();
+ if (!unique) {
+ this->onCopyOnWrite(mode);
+ }
+
+ // regardless of copy-on-write, we must drop our cached image now, so
+ // that the next request will get our new contents.
+ fCachedImage.reset();
+
+ if (unique) {
+ // Our content isn't held by any image now, so we can consider that content mutable.
+ // Raster surfaces need to be told it's safe to consider its pixels mutable again.
+ // We make this call after the ->unref() so the subclass can assert there are no images.
+ this->onRestoreBackingMutability();
+ }
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->onDiscard();
+ }
+}
+
+uint32_t SkSurface_Base::newGenerationID() {
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ static std::atomic<uint32_t> nextID{1};
+ return nextID++;
+}
+
+static SkSurface_Base* asSB(SkSurface* surface) {
+ return static_cast<SkSurface_Base*>(surface);
+}
+
+static const SkSurface_Base* asConstSB(const SkSurface* surface) {
+ return static_cast<const SkSurface_Base*>(surface);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSurface::SkSurface(int width, int height, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(width), fHeight(height)
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+SkSurface::SkSurface(const SkImageInfo& info, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(info.width()), fHeight(info.height())
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+SkImageInfo SkSurface::imageInfo() {
+ // TODO: do we need to go through canvas for this?
+ return this->getCanvas()->imageInfo();
+}
+
+uint32_t SkSurface::generationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = asSB(this)->newGenerationID();
+ }
+ return fGenerationID;
+}
+
+void SkSurface::notifyContentWillChange(ContentChangeMode mode) {
+ asSB(this)->aboutToDraw(mode);
+}
+
+SkCanvas* SkSurface::getCanvas() {
+ return asSB(this)->getCachedCanvas();
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot() {
+ return asSB(this)->refCachedImage();
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot(const SkIRect& srcBounds) {
+ const SkIRect surfBounds = { 0, 0, fWidth, fHeight };
+ SkIRect bounds = srcBounds;
+ if (!bounds.intersect(surfBounds)) {
+ return nullptr;
+ }
+ SkASSERT(!bounds.isEmpty());
+ if (bounds == surfBounds) {
+ return this->makeImageSnapshot();
+ } else {
+ return asSB(this)->onNewImageSnapshot(&bounds);
+ }
+}
+
+sk_sp<SkSurface> SkSurface::makeSurface(const SkImageInfo& info) {
+ return asSB(this)->onNewSurface(info);
+}
+
+sk_sp<SkSurface> SkSurface::makeSurface(int width, int height) {
+ return this->makeSurface(this->imageInfo().makeWH(width, height));
+}
+
+void SkSurface::draw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ return asSB(this)->onDraw(canvas, x, y, paint);
+}
+
+bool SkSurface::peekPixels(SkPixmap* pmap) {
+ return this->getCanvas()->peekPixels(pmap);
+}
+
+bool SkSurface::readPixels(const SkPixmap& pm, int srcX, int srcY) {
+ return this->getCanvas()->readPixels(pm, srcX, srcY);
+}
+
+bool SkSurface::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) {
+ return this->readPixels({dstInfo, dstPixels, dstRowBytes}, srcX, srcY);
+}
+
+bool SkSurface::readPixels(const SkBitmap& bitmap, int srcX, int srcY) {
+ SkPixmap pm;
+ return bitmap.peekPixels(&pm) && this->readPixels(pm, srcX, srcY);
+}
+
+// Stuff to keep the legacy async readback APIs working on top of the new implementation.
+namespace {
+struct BridgeContext {
+ SkSurface::ReadPixelsContext fClientContext;
+ SkSurface::LegacyReadPixelsCallback* fClientCallback;
+};
+struct BridgeContextYUV420 {
+ SkSurface::ReadPixelsContext fClientContext;
+ SkSurface::LegacyReadPixelsCallbackYUV420* fClientCallback;
+};
+} // anonymous namespace
+
+static void bridge_callback(SkSurface::ReadPixelsContext context,
+ std::unique_ptr<const SkSurface::AsyncReadResult> result) {
+ auto bridgeContext = static_cast<const BridgeContext*>(context);
+ if (!result || result->count() != 1) {
+ bridgeContext->fClientCallback(bridgeContext->fClientContext, nullptr, 0);
+ } else {
+ bridgeContext->fClientCallback(bridgeContext->fClientContext, result->data(0),
+ result->rowBytes(0));
+ }
+ delete bridgeContext;
+}
+
+static void bridge_callback_yuv420(SkSurface::ReadPixelsContext context,
+ std::unique_ptr<const SkSurface::AsyncReadResult> result) {
+ auto bridgeContext = static_cast<const BridgeContextYUV420*>(context);
+ if (!result || result->count() != 3) {
+ bridgeContext->fClientCallback(bridgeContext->fClientContext, nullptr, 0);
+ } else {
+ const void* data[] = {result->data(0), result->data(1), result->data(2)};
+ size_t rowBytes[] = {result->rowBytes(0), result->rowBytes(1), result->rowBytes(2)};
+ bridgeContext->fClientCallback(bridgeContext->fClientContext, data, rowBytes);
+ }
+ delete bridgeContext;
+}
+
+void SkSurface::asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ LegacyReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) ||
+ !SkImageInfoIsValid(info)) {
+ callback(context, nullptr, 0);
+ return;
+ }
+
+ auto bridgeContext = new BridgeContext{context, callback};
+ asSB(this)->onAsyncRescaleAndReadPixels(info, srcRect, rescaleGamma, rescaleQuality,
+ bridge_callback, bridgeContext);
+}
+
+void SkSurface::asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) ||
+ !SkImageInfoIsValid(info)) {
+ callback(context, nullptr);
+ return;
+ }
+ asSB(this)->onAsyncRescaleAndReadPixels(
+ info, srcRect, rescaleGamma, rescaleQuality, callback, context);
+}
+
+void SkSurface::asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ int dstW, int dstH,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ LegacyReadPixelsCallbackYUV420 callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) || (dstW & 0b1) ||
+ (dstH & 0b1)) {
+ callback(context, nullptr, nullptr);
+ return;
+ }
+ auto bridgeContext = new BridgeContextYUV420{context, callback};
+ asSB(this)->onAsyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace), srcRect,
+ {dstW, dstH},
+ rescaleGamma,
+ rescaleQuality,
+ bridge_callback_yuv420,
+ bridgeContext);
+}
+
+void SkSurface::asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) || dstSize.isZero() ||
+ (dstSize.width() & 0b1) || (dstSize.height() & 0b1)) {
+ callback(context, nullptr);
+ return;
+ }
+ asSB(this)->onAsyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleQuality,
+ callback,
+ context);
+}
+
+void SkSurface::writePixels(const SkPixmap& pmap, int x, int y) {
+ if (pmap.addr() == nullptr || pmap.width() <= 0 || pmap.height() <= 0) {
+ return;
+ }
+
+ const SkIRect srcR = SkIRect::MakeXYWH(x, y, pmap.width(), pmap.height());
+ const SkIRect dstR = SkIRect::MakeWH(this->width(), this->height());
+ if (SkIRect::Intersects(srcR, dstR)) {
+ ContentChangeMode mode = kRetain_ContentChangeMode;
+ if (srcR.contains(dstR)) {
+ mode = kDiscard_ContentChangeMode;
+ }
+ asSB(this)->aboutToDraw(mode);
+ asSB(this)->onWritePixels(pmap, x, y);
+ }
+}
+
+void SkSurface::writePixels(const SkBitmap& src, int x, int y) {
+ SkPixmap pm;
+ if (src.peekPixels(&pm)) {
+ this->writePixels(pm, x, y);
+ }
+}
+
+GrBackendTexture SkSurface::getBackendTexture(BackendHandleAccess access) {
+ return asSB(this)->onGetBackendTexture(access);
+}
+
+GrBackendRenderTarget SkSurface::getBackendRenderTarget(BackendHandleAccess access) {
+ return asSB(this)->onGetBackendRenderTarget(access);
+}
+
+bool SkSurface::replaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ return asSB(this)->onReplaceBackendTexture(backendTexture, origin, textureReleaseProc,
+ releaseContext);
+}
+
+void SkSurface::flush() {
+ this->flush(BackendSurfaceAccess::kNoAccess, GrFlushInfo());
+}
+
+GrSemaphoresSubmitted SkSurface::flush(BackendSurfaceAccess access, const GrFlushInfo& flushInfo) {
+ return asSB(this)->onFlush(access, flushInfo);
+}
+
+GrSemaphoresSubmitted SkSurface::flush(BackendSurfaceAccess access, GrFlushFlags flags,
+ int numSemaphores, GrBackendSemaphore signalSemaphores[],
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext) {
+ GrFlushInfo info;
+ info.fFlags = flags;
+ info.fNumSemaphores = numSemaphores;
+ info.fSignalSemaphores = signalSemaphores;
+ info.fFinishedProc = finishedProc;
+ info.fFinishedContext = finishedContext;
+ return this->flush(access, info);
+}
+
+GrSemaphoresSubmitted SkSurface::flush(BackendSurfaceAccess access, FlushFlags flags,
+ int numSemaphores, GrBackendSemaphore signalSemaphores[]) {
+ GrFlushFlags grFlags = flags == kSyncCpu_FlushFlag ? kSyncCpu_GrFlushFlag : kNone_GrFlushFlags;
+ GrFlushInfo info;
+ info.fFlags = grFlags;
+ info.fNumSemaphores = numSemaphores;
+ info.fSignalSemaphores = signalSemaphores;
+ return this->flush(access, info);
+}
+
+GrSemaphoresSubmitted SkSurface::flushAndSignalSemaphores(int numSemaphores,
+ GrBackendSemaphore signalSemaphores[]) {
+ GrFlushInfo info;
+ info.fNumSemaphores = numSemaphores;
+ info.fSignalSemaphores = signalSemaphores;
+ return this->flush(BackendSurfaceAccess::kNoAccess, info);
+}
+
+bool SkSurface::wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores) {
+ return asSB(this)->onWait(numSemaphores, waitSemaphores);
+}
+
+bool SkSurface::characterize(SkSurfaceCharacterization* characterization) const {
+ return asConstSB(this)->onCharacterize(characterization);
+}
+
+bool SkSurface::isCompatible(const SkSurfaceCharacterization& characterization) const {
+ return asConstSB(this)->onIsCompatible(characterization);
+}
+
+bool SkSurface::draw(SkDeferredDisplayList* ddl) {
+ return asSB(this)->onDraw(ddl);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+#include "include/utils/SkNoDrawCanvas.h"
+
+class SkNullSurface : public SkSurface_Base {
+public:
+ SkNullSurface(int width, int height) : SkSurface_Base(width, height, nullptr) {}
+
+protected:
+ SkCanvas* onNewCanvas() override {
+ return new SkNoDrawCanvas(this->width(), this->height());
+ }
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo& info) override {
+ return MakeNull(info.width(), info.height());
+ }
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subsetOrNull) override { return nullptr; }
+ void onWritePixels(const SkPixmap&, int x, int y) override {}
+ void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) override {}
+ void onCopyOnWrite(ContentChangeMode) override {}
+};
+
+sk_sp<SkSurface> SkSurface::MakeNull(int width, int height) {
+ if (width < 1 || height < 1) {
+ return nullptr;
+ }
+ return sk_sp<SkSurface>(new SkNullSurface(width, height));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#if !SK_SUPPORT_GPU
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrContext*, SkBudgeted, const SkImageInfo&, int,
+ GrSurfaceOrigin, const SkSurfaceProps*, bool) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrRecordingContext*, const SkSurfaceCharacterization&,
+ SkBudgeted) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrContext*, const GrBackendTexture&,
+ GrSurfaceOrigin origin, int sampleCnt,
+ SkColorType, sk_sp<SkColorSpace>,
+ const SkSurfaceProps*,
+ TextureReleaseProc, ReleaseContext) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendRenderTarget(GrContext*,
+ const GrBackendRenderTarget&,
+ GrSurfaceOrigin origin,
+ SkColorType,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*,
+ RenderTargetReleaseProc, ReleaseContext) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTextureAsRenderTarget(GrContext*,
+ const GrBackendTexture&,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*) {
+ return nullptr;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Base.h b/gfx/skia/skia/src/image/SkSurface_Base.h
new file mode 100644
index 0000000000..7bff9a87e9
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Base.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Base_DEFINED
+#define SkSurface_Base_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkSurface.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkSurfacePriv.h"
+
+class SkSurface_Base : public SkSurface {
+public:
+ SkSurface_Base(int width, int height, const SkSurfaceProps*);
+ SkSurface_Base(const SkImageInfo&, const SkSurfaceProps*);
+ virtual ~SkSurface_Base();
+
+ virtual GrBackendTexture onGetBackendTexture(BackendHandleAccess);
+ virtual GrBackendRenderTarget onGetBackendRenderTarget(BackendHandleAccess);
+ virtual bool onReplaceBackendTexture(const GrBackendTexture&,
+ GrSurfaceOrigin,
+ TextureReleaseProc,
+ ReleaseContext);
+ /**
+ * Allocate a canvas that will draw into this surface. We will cache this
+ * canvas, to return the same object to the caller multiple times. We
+ * take ownership, and will call unref() on the canvas when we go out of
+ * scope.
+ */
+ virtual SkCanvas* onNewCanvas() = 0;
+
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo&) = 0;
+
+ /**
+ * Allocate an SkImage that represents the current contents of the surface.
+ * This needs to be able to outlive the surface itself (if need be), and
+ * must faithfully represent the current contents, even if the surface
+ * is changed after this called (e.g. it is drawn to via its canvas).
+ *
+ * If a subset is specified, the the impl must make a copy, rather than try to wait
+ * on copy-on-write.
+ */
+ virtual sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset = nullptr) { return nullptr; }
+
+ virtual void onWritePixels(const SkPixmap&, int x, int y) = 0;
+
+ /**
+ * Default implementation does a rescale/read and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixels(const SkImageInfo&,
+ const SkIRect& srcRect,
+ RescaleGamma,
+ SkFilterQuality,
+ ReadPixelsCallback,
+ ReadPixelsContext);
+ /**
+ * Default implementation does a rescale/read/yuv conversion and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma,
+ SkFilterQuality,
+ ReadPixelsCallback,
+ ReadPixelsContext);
+
+ /**
+ * Default implementation:
+ *
+ * image = this->newImageSnapshot();
+ * if (image) {
+ * image->draw(canvas, ...);
+ * image->unref();
+ * }
+ */
+ virtual void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*);
+
+ /**
+ * Called as a performance hint when the Surface is allowed to make it's contents
+ * undefined.
+ */
+ virtual void onDiscard() {}
+
+ /**
+ * If the surface is about to change, we call this so that our subclass
+ * can optionally fork their backend (copy-on-write) in case it was
+ * being shared with the cachedImage.
+ */
+ virtual void onCopyOnWrite(ContentChangeMode) = 0;
+
+ /**
+ * Signal the surface to remind its backing store that it's mutable again.
+ * Called only when we _didn't_ copy-on-write; we assume the copies start mutable.
+ */
+ virtual void onRestoreBackingMutability() {}
+
+ /**
+ * Issue any pending surface IO to the current backend 3D API and resolve any surface MSAA.
+ * Inserts the requested number of semaphores for the gpu to signal when work is complete on the
+ * gpu and inits the array of GrBackendSemaphores with the signaled semaphores.
+ */
+ virtual GrSemaphoresSubmitted onFlush(BackendSurfaceAccess access, const GrFlushInfo&) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ /**
+ * Caused the current backend 3D API to wait on the passed in semaphores before executing new
+ * commands on the gpu. Any previously submitting commands will not be blocked by these
+ * semaphores.
+ */
+ virtual bool onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores) {
+ return false;
+ }
+
+ virtual bool onCharacterize(SkSurfaceCharacterization*) const { return false; }
+ virtual bool onIsCompatible(const SkSurfaceCharacterization&) const { return false; }
+ virtual bool onDraw(const SkDeferredDisplayList*) { return false; }
+
+ inline SkCanvas* getCachedCanvas();
+ inline sk_sp<SkImage> refCachedImage();
+
+ bool hasCachedImage() const { return fCachedImage != nullptr; }
+
+ // called by SkSurface to compute a new genID
+ uint32_t newGenerationID();
+
+private:
+ std::unique_ptr<SkCanvas> fCachedCanvas;
+ sk_sp<SkImage> fCachedImage;
+
+ void aboutToDraw(ContentChangeMode mode);
+
+ // Returns true if there is an outstanding image-snapshot, indicating that a call to aboutToDraw
+ // would trigger a copy-on-write.
+ bool outstandingImageSnapshot() const;
+
+ friend class SkCanvas;
+ friend class SkSurface;
+
+ typedef SkSurface INHERITED;
+};
+
+SkCanvas* SkSurface_Base::getCachedCanvas() {
+ if (nullptr == fCachedCanvas) {
+ fCachedCanvas = std::unique_ptr<SkCanvas>(this->onNewCanvas());
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(this);
+ }
+ }
+ return fCachedCanvas.get();
+}
+
+sk_sp<SkImage> SkSurface_Base::refCachedImage() {
+ if (fCachedImage) {
+ return fCachedImage;
+ }
+
+ fCachedImage = this->onNewImageSnapshot();
+
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ return fCachedImage;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.cpp b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
new file mode 100644
index 0000000000..623c54eab0
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
@@ -0,0 +1,741 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrTexture.h"
+#include "include/private/GrRecordingContext.h"
+#include "include/private/SkDeferredDisplayList.h"
+#include "src/core/SkImagePriv.h"
+#include "src/gpu/GrAHardwareBufferUtils.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrContextPriv.h"
+#include "src/gpu/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrRenderTarget.h"
+#include "src/gpu/GrRenderTargetContextPriv.h"
+#include "src/gpu/GrRenderTargetProxyPriv.h"
+#include "src/gpu/SkGpuDevice.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkSurface_Base.h"
+#include "src/image/SkSurface_Gpu.h"
+
+#if SK_SUPPORT_GPU
+
+SkSurface_Gpu::SkSurface_Gpu(sk_sp<SkGpuDevice> device)
+ : INHERITED(device->width(), device->height(), &device->surfaceProps())
+ , fDevice(std::move(device)) {
+ SkASSERT(fDevice->accessRenderTargetContext()->asSurfaceProxy()->priv().isExact());
+}
+
+SkSurface_Gpu::~SkSurface_Gpu() {
+}
+
+static GrRenderTarget* prepare_rt_for_external_access(SkSurface_Gpu* surface,
+ SkSurface::BackendHandleAccess access) {
+ switch (access) {
+ case SkSurface::kFlushRead_BackendHandleAccess:
+ break;
+ case SkSurface::kFlushWrite_BackendHandleAccess:
+ case SkSurface::kDiscardWrite_BackendHandleAccess:
+ // for now we don't special-case on Discard, but we may in the future.
+ surface->notifyContentWillChange(SkSurface::kRetain_ContentChangeMode);
+ break;
+ }
+
+ // Grab the render target *after* firing notifications, as it may get switched if CoW kicks in.
+ surface->getDevice()->flush(SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
+ GrRenderTargetContext* rtc = surface->getDevice()->accessRenderTargetContext();
+ return rtc->accessRenderTarget();
+}
+
+GrBackendTexture SkSurface_Gpu::onGetBackendTexture(BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ if (!rt) {
+ return GrBackendTexture(); // invalid
+ }
+ GrTexture* texture = rt->asTexture();
+ if (texture) {
+ return texture->getBackendTexture();
+ }
+ return GrBackendTexture(); // invalid
+}
+
+GrBackendRenderTarget SkSurface_Gpu::onGetBackendRenderTarget(BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ if (!rt) {
+ return GrBackendRenderTarget(); // invalid
+ }
+
+ return rt->getBackendRenderTarget();
+}
+
+SkCanvas* SkSurface_Gpu::onNewCanvas() { return new SkCanvas(fDevice); }
+
+sk_sp<SkSurface> SkSurface_Gpu::onNewSurface(const SkImageInfo& info) {
+ int sampleCount = fDevice->accessRenderTargetContext()->numSamples();
+ GrSurfaceOrigin origin = fDevice->accessRenderTargetContext()->origin();
+ // TODO: Make caller specify this (change virtual signature of onNewSurface).
+ static const SkBudgeted kBudgeted = SkBudgeted::kNo;
+ return SkSurface::MakeRenderTarget(fDevice->context(), kBudgeted, info, sampleCount,
+ origin, &this->props());
+}
+
+sk_sp<SkImage> SkSurface_Gpu::onNewImageSnapshot(const SkIRect* subset) {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ if (!rtc) {
+ return nullptr;
+ }
+
+ GrContext* ctx = fDevice->context();
+
+ if (!rtc->asSurfaceProxy()) {
+ return nullptr;
+ }
+
+ SkBudgeted budgeted = rtc->asSurfaceProxy()->isBudgeted();
+
+ sk_sp<GrTextureProxy> srcProxy = rtc->asTextureProxyRef();
+
+ if (subset) {
+ srcProxy = GrSurfaceProxy::Copy(ctx, rtc->asSurfaceProxy(), rtc->colorInfo().colorType(),
+ rtc->mipMapped(), *subset, SkBackingFit::kExact, budgeted);
+ } else if (!srcProxy || rtc->priv().refsWrappedObjects()) {
+ // If the original render target is a buffer originally created by the client, then we don't
+ // want to ever retarget the SkSurface at another buffer we create. Force a copy now to avoid
+ // copy-on-write.
+ SkASSERT(rtc->origin() == rtc->asSurfaceProxy()->origin());
+
+ srcProxy = GrSurfaceProxy::Copy(ctx, rtc->asSurfaceProxy(), rtc->colorInfo().colorType(),
+ rtc->mipMapped(), SkBackingFit::kExact, budgeted);
+ }
+
+ const SkImageInfo info = fDevice->imageInfo();
+ sk_sp<SkImage> image;
+ if (srcProxy) {
+ // The renderTargetContext coming out of SkGpuDevice should always be exact and the
+ // above copy creates a kExact surfaceContext.
+ SkASSERT(srcProxy->priv().isExact());
+ image = sk_make_sp<SkImage_Gpu>(sk_ref_sp(ctx), kNeedNewImageUniqueID, info.alphaType(),
+ std::move(srcProxy), info.refColorSpace());
+ }
+ return image;
+}
+
+void SkSurface_Gpu::onWritePixels(const SkPixmap& src, int x, int y) {
+ fDevice->writePixels(src, x, y);
+}
+
+void SkSurface_Gpu::onAsyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ auto* rtc = this->fDevice->accessRenderTargetContext();
+ rtc->asyncRescaleAndReadPixels(info, srcRect, rescaleGamma, rescaleQuality, callback, context);
+}
+
+void SkSurface_Gpu::onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ auto* rtc = this->fDevice->accessRenderTargetContext();
+ rtc->asyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleQuality,
+ callback,
+ context);
+}
+
+// Create a new render target and, if necessary, copy the contents of the old
+// render target into it. Note that this flushes the SkGpuDevice but
+// doesn't force an OpenGL flush.
+void SkSurface_Gpu::onCopyOnWrite(ContentChangeMode mode) {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+
+ // are we sharing our backing proxy with the image? Note this call should never create a new
+ // image because onCopyOnWrite is only called when there is a cached image.
+ sk_sp<SkImage> image(this->refCachedImage());
+ SkASSERT(image);
+
+ GrSurfaceProxy* imageProxy = ((SkImage_Base*) image.get())->peekProxy();
+ SkASSERT(imageProxy);
+
+ if (rtc->asSurfaceProxy()->underlyingUniqueID() == imageProxy->underlyingUniqueID()) {
+ fDevice->replaceRenderTargetContext(SkSurface::kRetain_ContentChangeMode == mode);
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->SkSurface_Gpu::onDiscard();
+ }
+}
+
+void SkSurface_Gpu::onDiscard() {
+ fDevice->accessRenderTargetContext()->discard();
+}
+
+GrSemaphoresSubmitted SkSurface_Gpu::onFlush(BackendSurfaceAccess access,
+ const GrFlushInfo& info) {
+ return fDevice->flush(access, info);
+}
+
+bool SkSurface_Gpu::onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores) {
+ return fDevice->wait(numSemaphores, waitSemaphores);
+}
+
+bool SkSurface_Gpu::onCharacterize(SkSurfaceCharacterization* characterization) const {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ size_t maxResourceBytes = ctx->getResourceCacheLimit();
+
+ bool mipmapped = rtc->asTextureProxy() ? GrMipMapped::kYes == rtc->asTextureProxy()->mipMapped()
+ : false;
+
+ SkColorType ct = GrColorTypeToSkColorType(rtc->colorInfo().colorType());
+ if (ct == kUnknown_SkColorType) {
+ return false;
+ }
+
+ bool usesGLFBO0 = rtc->asRenderTargetProxy()->rtPriv().glRTFBOIDIs0();
+ // We should never get in the situation where we have a texture render target that is also
+ // backend by FBO 0.
+ SkASSERT(!usesGLFBO0 || !SkToBool(rtc->asTextureProxy()));
+
+ SkImageInfo ii = SkImageInfo::Make(rtc->width(), rtc->height(), ct, kPremul_SkAlphaType,
+ rtc->colorInfo().refColorSpace());
+
+ GrBackendFormat format = rtc->asSurfaceProxy()->backendFormat();
+
+ characterization->set(ctx->threadSafeProxy(), maxResourceBytes, ii, format,
+ rtc->origin(), rtc->numSamples(),
+ SkSurfaceCharacterization::Textureable(SkToBool(rtc->asTextureProxy())),
+ SkSurfaceCharacterization::MipMapped(mipmapped),
+ SkSurfaceCharacterization::UsesGLFBO0(usesGLFBO0),
+ SkSurfaceCharacterization::VulkanSecondaryCBCompatible(false),
+ GrProtected(rtc->asRenderTargetProxy()->isProtected()),
+ this->props());
+ return true;
+}
+
+void SkSurface_Gpu::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) {
+ // If the dst is also GPU we try to not force a new image snapshot (by calling the base class
+ // onDraw) since that may not always perform the copy-on-write optimization.
+ auto tryDraw = [&] {
+ SkASSERT(fDevice->context()->priv().asDirectContext());
+ GrContext* context = fDevice->context();
+ GrContext* canvasContext = canvas->getGrContext();
+ if (!canvasContext) {
+ return false;
+ }
+ if (!canvasContext->priv().asDirectContext() ||
+ canvasContext->priv().contextID() != context->priv().contextID()) {
+ return false;
+ }
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ if (!rtc) {
+ return false;
+ }
+ sk_sp<GrTextureProxy> srcProxy = rtc->asTextureProxyRef();
+ if (!srcProxy) {
+ return false;
+ }
+ // Possibly we could skip making an image here if SkGpuDevice exposed a lower level way
+ // of drawing a texture proxy.
+ const SkImageInfo info = fDevice->imageInfo();
+ sk_sp<SkImage> image;
+ image = sk_make_sp<SkImage_Gpu>(sk_ref_sp(context), kNeedNewImageUniqueID, info.alphaType(),
+ std::move(srcProxy), info.refColorSpace());
+ canvas->drawImage(image, x, y, paint);
+ return true;
+ };
+ if (!tryDraw()) {
+ INHERITED::onDraw(canvas, x, y, paint);
+ }
+}
+
+bool SkSurface_Gpu::onIsCompatible(const SkSurfaceCharacterization& characterization) const {
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ if (!characterization.isValid()) {
+ return false;
+ }
+
+ if (characterization.vulkanSecondaryCBCompatible()) {
+ return false;
+ }
+
+ // As long as the current state if the context allows for greater or equal resources,
+ // we allow the DDL to be replayed.
+ // DDL TODO: should we just remove the resource check and ignore the cache limits on playback?
+ size_t maxResourceBytes = ctx->getResourceCacheLimit();
+
+ if (characterization.isTextureable()) {
+ if (!rtc->asTextureProxy()) {
+ // If the characterization was textureable we require the replay dest to also be
+ // textureable. If the characterized surface wasn't textureable we allow the replay
+ // dest to be textureable.
+ return false;
+ }
+
+ if (characterization.isMipMapped() &&
+ GrMipMapped::kNo == rtc->asTextureProxy()->mipMapped()) {
+ // Fail if the DDL's surface was mipmapped but the replay surface is not.
+ // Allow drawing to proceed if the DDL was not mipmapped but the replay surface is.
+ return false;
+ }
+ }
+
+ if (characterization.usesGLFBO0() != rtc->asRenderTargetProxy()->rtPriv().glRTFBOIDIs0()) {
+ return false;
+ }
+
+ SkColorType rtcColorType = GrColorTypeToSkColorType(rtc->colorInfo().colorType());
+ if (rtcColorType == kUnknown_SkColorType) {
+ return false;
+ }
+
+ GrProtected isProtected = GrProtected(rtc->asSurfaceProxy()->isProtected());
+
+ return characterization.contextInfo() && characterization.contextInfo()->priv().matches(ctx) &&
+ characterization.cacheMaxResourceBytes() <= maxResourceBytes &&
+ characterization.origin() == rtc->origin() &&
+ characterization.backendFormat() == rtc->asSurfaceProxy()->backendFormat() &&
+ characterization.width() == rtc->width() && characterization.height() == rtc->height() &&
+ characterization.colorType() == rtcColorType &&
+ characterization.sampleCount() == rtc->numSamples() &&
+ SkColorSpace::Equals(characterization.colorSpace(), rtc->colorInfo().colorSpace()) &&
+ characterization.isProtected() == isProtected &&
+ characterization.surfaceProps() == rtc->surfaceProps();
+}
+
+bool SkSurface_Gpu::onDraw(const SkDeferredDisplayList* ddl) {
+ if (!ddl || !this->isCompatible(ddl->characterization())) {
+ return false;
+ }
+
+ GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
+ GrContext* ctx = fDevice->context();
+
+ ctx->priv().copyRenderTasksFromDDL(ddl, rtc->asRenderTargetProxy());
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrRecordingContext* context,
+ const SkSurfaceCharacterization& c,
+ SkBudgeted budgeted) {
+ if (!context || !c.isValid()) {
+ return nullptr;
+ }
+
+ if (c.usesGLFBO0()) {
+ // If we are making the surface we will never use FBO0.
+ return nullptr;
+ }
+
+ if (c.vulkanSecondaryCBCompatible()) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(c.colorType());
+
+ auto rtc = context->priv().makeDeferredRenderTargetContext(SkBackingFit::kExact,
+ c.width(),
+ c.height(),
+ grColorType,
+ c.refColorSpace(),
+ c.sampleCount(),
+ GrMipMapped(c.isMipMapped()),
+ c.origin(),
+ &c.surfaceProps(),
+ budgeted,
+ c.isProtected());
+ if (!rtc) {
+ return nullptr;
+ }
+
+ // CONTEXT TODO: remove this use of 'backdoor' to create an SkGpuDevice
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(context->priv().backdoor(), std::move(rtc),
+ SkGpuDevice::kClear_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+
+ sk_sp<SkSurface> result = sk_make_sp<SkSurface_Gpu>(std::move(device));
+#ifdef SK_DEBUG
+ if (result) {
+ SkASSERT(result->isCompatible(c));
+ }
+#endif
+
+ return result;
+}
+
+static bool validate_backend_texture(const GrCaps* caps, const GrBackendTexture& tex,
+ int sampleCnt, GrColorType grCT,
+ bool texturable) {
+ if (!tex.isValid()) {
+ return false;
+ }
+
+ GrBackendFormat backendFormat = tex.getBackendFormat();
+ if (!backendFormat.isValid()) {
+ return false;
+ }
+
+ if (!caps->areColorTypeAndFormatCompatible(grCT, backendFormat)) {
+ return false;
+ }
+
+ if (!caps->isFormatAsColorTypeRenderable(grCT, backendFormat, sampleCnt)) {
+ return false;
+ }
+
+ if (texturable && !caps->isFormatTexturable(backendFormat)) {
+ return false;
+ }
+
+ return true;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrContext* context,
+ const SkSurfaceCharacterization& c,
+ const GrBackendTexture& backendTexture,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ if (!context || !c.isValid()) {
+ return nullptr;
+ }
+
+ if (c.usesGLFBO0()) {
+ // If we are making the surface we will never use FBO0.
+ return nullptr;
+ }
+
+ if (!c.isCompatible(backendTexture)) {
+ return nullptr;
+ }
+
+ GrColorType grCT = SkColorTypeAndFormatToGrColorType(context->priv().caps(), c.colorType(),
+ backendTexture.getBackendFormat());
+ if (grCT == GrColorType::kUnknown) {
+ return nullptr;
+ }
+
+ if (!validate_backend_texture(context->priv().caps(), backendTexture,
+ c.sampleCount(), grCT, true)) {
+ return nullptr;
+ }
+
+ auto rtc = context->priv().makeBackendTextureRenderTargetContext(
+ backendTexture, c.origin(), c.sampleCount(), grCT, c.refColorSpace(), &c.surfaceProps(),
+ textureReleaseProc, releaseContext);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ auto device = SkGpuDevice::Make(context, std::move(rtc), SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return nullptr;
+ }
+
+ sk_sp<SkSurface> result = sk_make_sp<SkSurface_Gpu>(std::move(device));
+#ifdef SK_DEBUG
+ if (result) {
+ SkASSERT(result->isCompatible(c));
+ }
+#endif
+
+ return result;
+}
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrContext* ctx, SkBudgeted budgeted,
+ const SkImageInfo& info, int sampleCount,
+ GrSurfaceOrigin origin, const SkSurfaceProps* props,
+ bool shouldCreateWithMips) {
+ if (!ctx) {
+ return nullptr;
+ }
+ sampleCount = SkTMax(1, sampleCount);
+ GrMipMapped mipMapped = shouldCreateWithMips ? GrMipMapped::kYes : GrMipMapped::kNo;
+
+ if (!ctx->priv().caps()->mipMapSupport()) {
+ mipMapped = GrMipMapped::kNo;
+ }
+
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(
+ ctx, budgeted, info, sampleCount, origin, props, mipMapped,
+ SkGpuDevice::kClear_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface_Gpu::MakeWrappedRenderTarget(
+ GrContext* context, std::unique_ptr<GrRenderTargetContext> rtc) {
+ if (!context) {
+ return nullptr;
+ }
+
+ auto device = SkGpuDevice::Make(context, std::move(rtc), SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrContext* context, const GrBackendTexture& tex,
+ GrSurfaceOrigin origin, int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkSurface::TextureReleaseProc textureReleaseProc,
+ SkSurface::ReleaseContext releaseContext) {
+ if (!context) {
+ return nullptr;
+ }
+ sampleCnt = SkTMax(1, sampleCnt);
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(context->priv().caps(), colorType,
+ tex.getBackendFormat());
+ if (grColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+
+ if (!validate_backend_texture(context->priv().caps(), tex, sampleCnt, grColorType, true)) {
+ return nullptr;
+ }
+
+ auto rtc = context->priv().makeBackendTextureRenderTargetContext(
+ tex, origin, sampleCnt, grColorType, std::move(colorSpace), props, textureReleaseProc,
+ releaseContext);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ auto device = SkGpuDevice::Make(context, std::move(rtc), SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+bool SkSurface_Gpu::onReplaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin, TextureReleaseProc releaseProc,
+ ReleaseContext releaseContext) {
+ auto context = this->fDevice->context();
+ if (context->abandoned()) {
+ return false;
+ }
+ if (!backendTexture.isValid()) {
+ return false;
+ }
+ if (backendTexture.width() != this->width() || backendTexture.height() != this->height()) {
+ return false;
+ }
+ auto* oldRTC = fDevice->accessRenderTargetContext();
+ auto oldProxy = sk_ref_sp(oldRTC->asTextureProxy());
+ if (!oldProxy) {
+ return false;
+ }
+ auto* oldTexture = oldProxy->peekTexture();
+ if (!oldTexture) {
+ return false;
+ }
+ if (!oldTexture->resourcePriv().refsWrappedObjects()) {
+ return false;
+ }
+ if (oldTexture->backendFormat() != backendTexture.getBackendFormat()) {
+ return false;
+ }
+ if (oldTexture->getBackendTexture().isSameTexture(backendTexture)) {
+ return false;
+ }
+ SkASSERT(oldTexture->asRenderTarget());
+ int sampleCnt = oldTexture->asRenderTarget()->numSamples();
+ GrColorType grColorType = SkColorTypeToGrColorType(this->getCanvas()->imageInfo().colorType());
+ auto colorSpace = sk_ref_sp(oldRTC->colorInfo().colorSpace());
+ if (!validate_backend_texture(context->priv().caps(), backendTexture,
+ sampleCnt, grColorType, true)) {
+ return false;
+ }
+ auto rtc =
+ context->priv().makeBackendTextureRenderTargetContext(backendTexture,
+ origin,
+ sampleCnt,
+ oldRTC->colorInfo().colorType(),
+ std::move(colorSpace),
+ &this->props(),
+ releaseProc,
+ releaseContext);
+ if (!rtc) {
+ return false;
+ }
+ fDevice->replaceRenderTargetContext(std::move(rtc), true);
+ return true;
+}
+
+bool validate_backend_render_target(const GrCaps* caps, const GrBackendRenderTarget& rt,
+ GrColorType grCT) {
+ if (!caps->areColorTypeAndFormatCompatible(grCT, rt.getBackendFormat())) {
+ return false;
+ }
+
+ if (!caps->isFormatAsColorTypeRenderable(grCT, rt.getBackendFormat(), rt.sampleCnt())) {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendRenderTarget(GrContext* context,
+ const GrBackendRenderTarget& rt,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkSurface::RenderTargetReleaseProc relProc,
+ SkSurface::ReleaseContext releaseContext) {
+ if (!context) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(context->priv().caps(), colorType,
+ rt.getBackendFormat());
+ if (grColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+
+ if (!validate_backend_render_target(context->priv().caps(), rt, grColorType)) {
+ return nullptr;
+ }
+
+ auto rtc = context->priv().makeBackendRenderTargetRenderTargetContext(
+ rt, origin, grColorType, std::move(colorSpace), props, relProc, releaseContext);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ auto device = SkGpuDevice::Make(context, std::move(rtc), SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTextureAsRenderTarget(GrContext* context,
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ if (!context) {
+ return nullptr;
+ }
+
+ sampleCnt = SkTMax(1, sampleCnt);
+ GrColorType grColorType = SkColorTypeAndFormatToGrColorType(context->priv().caps(), colorType,
+ tex.getBackendFormat());
+ if (grColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+ if (!validate_backend_texture(context->priv().caps(), tex, sampleCnt, grColorType, false)) {
+ return nullptr;
+ }
+
+ auto rtc = context->priv().makeBackendTextureAsRenderTargetRenderTargetContext(
+ tex, origin, sampleCnt, grColorType, std::move(colorSpace), props);
+ if (!rtc) {
+ return nullptr;
+ }
+
+ auto device = SkGpuDevice::Make(context, std::move(rtc), SkGpuDevice::kUninit_InitContents);
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+sk_sp<SkSurface> SkSurface::MakeFromAHardwareBuffer(GrContext* context,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(hardwareBuffer, &bufferDesc);
+
+ if (!SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT)) {
+ return nullptr;
+ }
+
+ bool isTextureable = SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE);
+ bool isProtectedContent = SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT);
+
+ // We currently don't support protected content
+ if (isProtectedContent) {
+ SkDebugf("We currently don't support protected content on android\n");
+ return nullptr;
+ }
+
+ GrBackendFormat backendFormat = GrAHardwareBufferUtils::GetBackendFormat(context,
+ hardwareBuffer,
+ bufferDesc.format,
+ true);
+ if (!backendFormat.isValid()) {
+ return nullptr;
+ }
+
+ if (isTextureable) {
+ GrAHardwareBufferUtils::DeleteImageProc deleteImageProc = nullptr;
+ GrAHardwareBufferUtils::UpdateImageProc updateImageProc = nullptr;
+ GrAHardwareBufferUtils::TexImageCtx deleteImageCtx = nullptr;
+
+ GrBackendTexture backendTexture =
+ GrAHardwareBufferUtils::MakeBackendTexture(context, hardwareBuffer,
+ bufferDesc.width, bufferDesc.height,
+ &deleteImageProc, &updateImageProc,
+ &deleteImageCtx, isProtectedContent,
+ backendFormat, true);
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+
+ SkColorType colorType =
+ GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(bufferDesc.format);
+
+ sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(context, backendTexture,
+ origin, 0, colorType, std::move(colorSpace), surfaceProps, deleteImageProc,
+ deleteImageCtx);
+
+ if (!surface) {
+ SkASSERT(deleteImageProc);
+ deleteImageProc(deleteImageCtx);
+ }
+ return surface;
+ } else {
+ return nullptr;
+ }
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.h b/gfx/skia/skia/src/image/SkSurface_Gpu.h
new file mode 100644
index 0000000000..e27e7e5df1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Gpu_DEFINED
+#define SkSurface_Gpu_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "src/image/SkSurface_Base.h"
+
+#if SK_SUPPORT_GPU
+
+class GrBackendFormat;
+class SkGpuDevice;
+
+class SkSurface_Gpu : public SkSurface_Base {
+public:
+ SkSurface_Gpu(sk_sp<SkGpuDevice>);
+ ~SkSurface_Gpu() override;
+
+ // This is an internal-only factory
+ static sk_sp<SkSurface> MakeWrappedRenderTarget(GrContext*,
+ std::unique_ptr<GrRenderTargetContext>);
+
+ GrBackendTexture onGetBackendTexture(BackendHandleAccess) override;
+ GrBackendRenderTarget onGetBackendRenderTarget(BackendHandleAccess) override;
+ bool onReplaceBackendTexture(const GrBackendTexture&, GrSurfaceOrigin, TextureReleaseProc,
+ ReleaseContext) override;
+
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset) override;
+ void onWritePixels(const SkPixmap&, int x, int y) override;
+ void onAsyncRescaleAndReadPixels(const SkImageInfo& info, const SkIRect& srcRect,
+ RescaleGamma rescaleGamma, SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) override;
+ void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ SkFilterQuality rescaleQuality,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) override;
+
+ void onCopyOnWrite(ContentChangeMode) override;
+ void onDiscard() override;
+ GrSemaphoresSubmitted onFlush(BackendSurfaceAccess access, const GrFlushInfo& info) override;
+ bool onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores) override;
+ bool onCharacterize(SkSurfaceCharacterization*) const override;
+ bool onIsCompatible(const SkSurfaceCharacterization&) const override;
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) override;
+ bool onDraw(const SkDeferredDisplayList*) override;
+
+ SkGpuDevice* getDevice() { return fDevice.get(); }
+
+private:
+ sk_sp<SkGpuDevice> fDevice;
+
+ typedef SkSurface_Base INHERITED;
+};
+
+#endif // SK_SUPPORT_GPU
+
+#endif // SkSurface_Gpu_DEFINED
diff --git a/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm b/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm
new file mode 100644
index 0000000000..dc410886f1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSurface.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContext.h"
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+#if SK_SUPPORT_GPU
+
+#ifdef SK_METAL
+#import <Metal/Metal.h>
+#import <QuartzCore/CAMetalLayer.h>
+
+sk_sp<SkSurface> SkSurface::MakeFromCAMetalLayer(GrContext* context,
+ GrMTLHandle layer,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ GrMTLHandle* drawable) {
+ // TODO: Apple recommends grabbing the drawable (which we're implicitly doing here)
+ // for as little time as possible. I'm not sure it matters for our test apps, but
+ // you can get better throughput by doing any offscreen renders, texture uploads, or
+ // other non-dependant tasks first before grabbing the drawable.
+ CAMetalLayer* metalLayer = (__bridge CAMetalLayer*)layer;
+ id<CAMetalDrawable> currentDrawable = [metalLayer nextDrawable];
+
+ GrMtlTextureInfo fbInfo;
+ fbInfo.fTexture.retain((__bridge const void*)(currentDrawable.texture));
+
+ CGSize size = [metalLayer drawableSize];
+ sk_sp<SkSurface> surface;
+ if (sampleCnt <= 1) {
+ GrBackendRenderTarget backendRT(size.width,
+ size.height,
+ sampleCnt,
+ fbInfo);
+
+ surface = SkSurface::MakeFromBackendRenderTarget(context, backendRT, origin, colorType,
+ colorSpace, surfaceProps);
+ } else {
+ GrBackendTexture backendTexture(size.width,
+ size.height,
+ GrMipMapped::kNo,
+ fbInfo);
+
+ surface = SkSurface::MakeFromBackendTexture(context, backendTexture, origin, sampleCnt,
+ colorType, colorSpace, surfaceProps);
+ }
+ *drawable = (__bridge_retained GrMTLHandle) currentDrawable;
+
+ return surface;
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Raster.cpp b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
new file mode 100644
index 0000000000..a086165ea4
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMallocPixelRef.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkImagePriv.h"
+#include "src/image/SkSurface_Base.h"
+
+class SkSurface_Raster : public SkSurface_Base {
+public:
+ SkSurface_Raster(const SkImageInfo&, void*, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps*);
+ SkSurface_Raster(const SkImageInfo& info, sk_sp<SkPixelRef>, const SkSurfaceProps*);
+
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset) override;
+ void onWritePixels(const SkPixmap&, int x, int y) override;
+ void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) override;
+ void onCopyOnWrite(ContentChangeMode) override;
+ void onRestoreBackingMutability() override;
+
+private:
+ SkBitmap fBitmap;
+ size_t fRowBytes;
+ bool fWeOwnThePixels;
+
+ typedef SkSurface_Base INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkSurfaceValidateRasterInfo(const SkImageInfo& info, size_t rowBytes) {
+ if (!SkImageInfoIsValid(info)) {
+ return false;
+ }
+
+ if (info.colorType() == kR8G8_unorm_SkColorType ||
+ info.colorType() == kR16G16_unorm_SkColorType ||
+ info.colorType() == kR16G16_float_SkColorType ||
+ info.colorType() == kA16_unorm_SkColorType ||
+ info.colorType() == kA16_float_SkColorType ||
+ info.colorType() == kR16G16B16A16_unorm_SkColorType) {
+ return false;
+ }
+
+ if (kIgnoreRowBytesValue == rowBytes) {
+ return true;
+ }
+
+ int shift = info.shiftPerPixel();
+
+ uint64_t minRB = (uint64_t)info.width() << shift;
+ if (minRB > rowBytes) {
+ return false;
+ }
+
+ size_t alignedRowBytes = rowBytes >> shift << shift;
+ if (alignedRowBytes != rowBytes) {
+ return false;
+ }
+
+ uint64_t size = sk_64_mul(info.height(), rowBytes);
+ static const size_t kMaxTotalSize = SK_MaxS32;
+ if (size > kMaxTotalSize) {
+ return false;
+ }
+
+ return true;
+}
+
+SkSurface_Raster::SkSurface_Raster(const SkImageInfo& info, void* pixels, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props)
+ : INHERITED(info, props)
+{
+ fBitmap.installPixels(info, pixels, rb, releaseProc, context);
+ fRowBytes = 0; // don't need to track the rowbytes
+ fWeOwnThePixels = false; // We are "Direct"
+}
+
+SkSurface_Raster::SkSurface_Raster(const SkImageInfo& info, sk_sp<SkPixelRef> pr,
+ const SkSurfaceProps* props)
+ : INHERITED(pr->width(), pr->height(), props)
+{
+ fBitmap.setInfo(info, pr->rowBytes());
+ fRowBytes = pr->rowBytes(); // we track this, so that subsequent re-allocs will match
+ fBitmap.setPixelRef(std::move(pr), 0, 0);
+ fWeOwnThePixels = true;
+}
+
+SkCanvas* SkSurface_Raster::onNewCanvas() { return new SkCanvas(fBitmap, this->props()); }
+
+sk_sp<SkSurface> SkSurface_Raster::onNewSurface(const SkImageInfo& info) {
+ return SkSurface::MakeRaster(info, &this->props());
+}
+
+void SkSurface_Raster::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ canvas->drawBitmap(fBitmap, x, y, paint);
+}
+
+sk_sp<SkImage> SkSurface_Raster::onNewImageSnapshot(const SkIRect* subset) {
+ if (subset) {
+ SkASSERT(SkIRect::MakeWH(fBitmap.width(), fBitmap.height()).contains(*subset));
+ SkBitmap dst;
+ dst.allocPixels(fBitmap.info().makeDimensions(subset->size()));
+ SkAssertResult(fBitmap.readPixels(dst.pixmap(), subset->left(), subset->top()));
+ dst.setImmutable(); // key, so MakeFromBitmap doesn't make a copy of the buffer
+ return SkImage::MakeFromBitmap(dst);
+ }
+
+ SkCopyPixelsMode cpm = kIfMutable_SkCopyPixelsMode;
+ if (fWeOwnThePixels) {
+ // SkImage_raster requires these pixels are immutable for its full lifetime.
+ // We'll undo this via onRestoreBackingMutability() if we can avoid the COW.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->setTemporarilyImmutable();
+ }
+ } else {
+ cpm = kAlways_SkCopyPixelsMode;
+ }
+
+ // Our pixels are in memory, so read access on the snapshot SkImage could be cheap.
+ // Lock the shared pixel ref to ensure peekPixels() is usable.
+ return SkMakeImageFromRasterBitmap(fBitmap, cpm);
+}
+
+void SkSurface_Raster::onWritePixels(const SkPixmap& src, int x, int y) {
+ fBitmap.writePixels(src, x, y);
+}
+
+void SkSurface_Raster::onRestoreBackingMutability() {
+ SkASSERT(!this->hasCachedImage()); // Shouldn't be any snapshots out there.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->restoreMutability();
+ }
+}
+
+void SkSurface_Raster::onCopyOnWrite(ContentChangeMode mode) {
+ // are we sharing pixelrefs with the image?
+ sk_sp<SkImage> cached(this->refCachedImage());
+ SkASSERT(cached);
+ if (SkBitmapImageGetPixelRef(cached.get()) == fBitmap.pixelRef()) {
+ SkASSERT(fWeOwnThePixels);
+ if (kDiscard_ContentChangeMode == mode) {
+ fBitmap.allocPixels();
+ } else {
+ SkBitmap prev(fBitmap);
+ fBitmap.allocPixels();
+ SkASSERT(prev.info() == fBitmap.info());
+ SkASSERT(prev.rowBytes() == fBitmap.rowBytes());
+ memcpy(fBitmap.getPixels(), prev.getPixels(), fBitmap.computeByteSize());
+ }
+ SkASSERT(fBitmap.rowBytes() == fRowBytes); // be sure we always use the same value
+
+ // Now fBitmap is a deep copy of itself (and therefore different from
+ // what is being used by the image. Next we update the canvas to use
+ // this as its backend, so we can't modify the image's pixels anymore.
+ SkASSERT(this->getCachedCanvas());
+ this->getCachedCanvas()->getDevice()->replaceBitmapBackendForRasterSurface(fBitmap);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirectReleaseProc(const SkImageInfo& info, void* pixels,
+ size_t rb, void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props) {
+ if (nullptr == releaseProc) {
+ context = nullptr;
+ }
+ if (!SkSurfaceValidateRasterInfo(info, rb)) {
+ return nullptr;
+ }
+ if (nullptr == pixels) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Raster>(info, pixels, rb, releaseProc, context, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirect(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ return MakeRasterDirectReleaseProc(info, pixels, rowBytes, nullptr, nullptr, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRaster(const SkImageInfo& info, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ if (!SkSurfaceValidateRasterInfo(info)) {
+ return nullptr;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, rowBytes);
+ if (!pr) {
+ return nullptr;
+ }
+ if (rowBytes) {
+ SkASSERT(pr->rowBytes() == rowBytes);
+ }
+ return sk_make_sp<SkSurface_Raster>(info, std::move(pr), props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRasterN32Premul(int width, int height,
+ const SkSurfaceProps* surfaceProps) {
+ return MakeRaster(SkImageInfo::MakeN32Premul(width, height), surfaceProps);
+}
diff --git a/gfx/skia/skia/src/images/SkImageEncoder.cpp b/gfx/skia/skia/src/images/SkImageEncoder.cpp
new file mode 100644
index 0000000000..101b34f505
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkImageEncoder.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/encode/SkJpegEncoder.h"
+#include "include/encode/SkPngEncoder.h"
+#include "include/encode/SkWebpEncoder.h"
+#include "src/images/SkImageEncoderPriv.h"
+
+#ifndef SK_HAS_JPEG_LIBRARY
+bool SkJpegEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream*, const SkPixmap&, const Options&) {
+ return nullptr;
+}
+#endif
+
+#ifndef SK_HAS_PNG_LIBRARY
+bool SkPngEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+std::unique_ptr<SkEncoder> SkPngEncoder::Make(SkWStream*, const SkPixmap&, const Options&) {
+ return nullptr;
+}
+#endif
+
+#ifndef SK_HAS_WEBP_LIBRARY
+bool SkWebpEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+#endif
+
+bool SkEncodeImage(SkWStream* dst, const SkPixmap& src,
+ SkEncodedImageFormat format, int quality) {
+ #ifdef SK_USE_CG_ENCODER
+ (void)quality;
+ return SkEncodeImageWithCG(dst, src, format);
+ #elif SK_USE_WIC_ENCODER
+ return SkEncodeImageWithWIC(dst, src, format, quality);
+ #else
+ switch(format) {
+ case SkEncodedImageFormat::kJPEG: {
+ SkJpegEncoder::Options opts;
+ opts.fQuality = quality;
+ return SkJpegEncoder::Encode(dst, src, opts);
+ }
+ case SkEncodedImageFormat::kPNG: {
+ SkPngEncoder::Options opts;
+ return SkPngEncoder::Encode(dst, src, opts);
+ }
+ case SkEncodedImageFormat::kWEBP: {
+ SkWebpEncoder::Options opts;
+ if (quality == 100) {
+ opts.fCompression = SkWebpEncoder::Compression::kLossless;
+ // Note: SkEncodeImage treats 0 quality as the lowest quality
+ // (greatest compression) and 100 as the highest quality (least
+ // compression). For kLossy, this matches libwebp's
+ // interpretation, so it is passed directly to libwebp. But
+ // with kLossless, libwebp always creates the highest quality
+ // image. In this case, fQuality is reinterpreted as how much
+ // effort (time) to put into making a smaller file. This API
+ // does not provide a way to specify this value (though it can
+ // be specified by using SkWebpEncoder::Encode) so we have to
+ // pick one arbitrarily. This value matches that chosen by
+ // blink::ImageEncoder::ComputeWebpOptions as well
+ // WebPConfigInit.
+ opts.fQuality = 75;
+ } else {
+ opts.fCompression = SkWebpEncoder::Compression::kLossy;
+ opts.fQuality = quality;
+ }
+ return SkWebpEncoder::Encode(dst, src, opts);
+ }
+ default:
+ return false;
+ }
+ #endif
+}
+
+bool SkEncoder::encodeRows(int numRows) {
+ SkASSERT(numRows > 0 && fCurrRow < fSrc.height());
+ if (numRows <= 0 || fCurrRow >= fSrc.height()) {
+ return false;
+ }
+
+ if (fCurrRow + numRows > fSrc.height()) {
+ numRows = fSrc.height() - fCurrRow;
+ }
+
+ if (!this->onEncodeRows(numRows)) {
+ // If we fail, short circuit any future calls.
+ fCurrRow = fSrc.height();
+ return false;
+ }
+
+ return true;
+}
+
+sk_sp<SkData> SkEncodePixmap(const SkPixmap& src, SkEncodedImageFormat format, int quality) {
+ SkDynamicMemoryWStream stream;
+ return SkEncodeImage(&stream, src, format, quality) ? stream.detachAsData() : nullptr;
+}
+
+sk_sp<SkData> SkEncodeBitmap(const SkBitmap& src, SkEncodedImageFormat format, int quality) {
+ SkPixmap pixmap;
+ return src.peekPixels(&pixmap) ? SkEncodePixmap(pixmap, format, quality) : nullptr;
+}
diff --git a/gfx/skia/skia/src/images/SkImageEncoderFns.h b/gfx/skia/skia/src/images/SkImageEncoderFns.h
new file mode 100644
index 0000000000..42b9dd7c79
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkImageEncoderFns.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoderFns_DEFINED
+#define SkImageEncoderFns_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkICC.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/third_party/skcms/skcms.h"
+
+typedef void (*transform_scanline_proc)(char* dst, const char* src, int width, int bpp);
+
+static inline void transform_scanline_memcpy(char* dst, const char* src, int width, int bpp) {
+ memcpy(dst, src, width * bpp);
+}
+
+static inline void transform_scanline_A8_to_GrayAlpha(char* dst, const char* src, int width, int) {
+ for (int i = 0; i < width; i++) {
+ *dst++ = 0;
+ *dst++ = *src++;
+ }
+}
+
+
+static void skcms(char* dst, const char* src, int n,
+ skcms_PixelFormat srcFmt, skcms_AlphaFormat srcAlpha,
+ skcms_PixelFormat dstFmt, skcms_AlphaFormat dstAlpha) {
+ SkAssertResult(skcms_Transform(src, srcFmt, srcAlpha, nullptr,
+ dst, dstFmt, dstAlpha, nullptr, n));
+}
+
+static inline void transform_scanline_gray(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_G_8, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_565(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGR_565, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_RGBX(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_BGRX(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_444(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_ABGR_4444, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_rgbA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_bgrA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_to_premul_legacy(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded);
+}
+
+static inline void transform_scanline_BGRA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_4444(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_ABGR_4444, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_101010x(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_1010102(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_1010102_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_to_8888(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_premul_to_8888(char* dst,
+ const char* src,
+ int width,
+ int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_to_premul_8888(char* dst,
+ const char* src,
+ int width,
+ int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded);
+}
+
+static inline void transform_scanline_F32(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_ffff, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F32_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_ffff, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline sk_sp<SkData> icc_from_color_space(const SkImageInfo& info) {
+ SkColorSpace* cs = info.colorSpace();
+ if (!cs) {
+ return nullptr;
+ }
+
+ skcms_TransferFunction fn;
+ skcms_Matrix3x3 toXYZD50;
+ if (cs->isNumericalTransferFn(&fn) && cs->toXYZD50(&toXYZD50)) {
+ return SkWriteICCProfile(fn, toXYZD50);
+ }
+ return nullptr;
+}
+
+#endif // SkImageEncoderFns_DEFINED
diff --git a/gfx/skia/skia/src/images/SkImageEncoderPriv.h b/gfx/skia/skia/src/images/SkImageEncoderPriv.h
new file mode 100644
index 0000000000..67ede545b9
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkImageEncoderPriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoderPriv_DEFINED
+#define SkImageEncoderPriv_DEFINED
+
+#include "include/core/SkImageEncoder.h"
+#include "include/private/SkImageInfoPriv.h"
+
+static inline bool SkPixmapIsValid(const SkPixmap& src) {
+ if (!SkImageInfoIsValid(src.info())) {
+ return false;
+ }
+
+ if (!src.addr() || src.rowBytes() < src.info().minRowBytes()) {
+ return false;
+ }
+
+ return true;
+}
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ bool SkEncodeImageWithCG(SkWStream*, const SkPixmap&, SkEncodedImageFormat);
+#else
+ #define SkEncodeImageWithCG(...) false
+#endif
+
+#ifdef SK_BUILD_FOR_WIN
+ bool SkEncodeImageWithWIC(SkWStream*, const SkPixmap&, SkEncodedImageFormat, int quality);
+#else
+ #define SkEncodeImageWithWIC(...) false
+#endif
+
+#endif // SkImageEncoderPriv_DEFINED
diff --git a/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp b/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp
new file mode 100644
index 0000000000..4536bb4ff6
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/images/SkJPEGWriteUtility.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void sk_init_destination(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+}
+
+static boolean sk_empty_output_buffer(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+// if (!dest->fStream->write(dest->fBuffer, skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer))
+ if (!dest->fStream->write(dest->fBuffer,
+ skjpeg_destination_mgr::kBufferSize)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return FALSE;
+ }
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+ return TRUE;
+}
+
+static void sk_term_destination (j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ size_t size = skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer;
+ if (size > 0) {
+ if (!dest->fStream->write(dest->fBuffer, size)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return;
+ }
+ }
+ dest->fStream->flush();
+}
+
+skjpeg_destination_mgr::skjpeg_destination_mgr(SkWStream* stream)
+ : fStream(stream) {
+ this->init_destination = sk_init_destination;
+ this->empty_output_buffer = sk_empty_output_buffer;
+ this->term_destination = sk_term_destination;
+}
+
+void skjpeg_error_exit(j_common_ptr cinfo) {
+ skjpeg_error_mgr* error = (skjpeg_error_mgr*)cinfo->err;
+
+ (*error->output_message) (cinfo);
+
+ /* Let the memory manager delete any temp files before we die */
+ jpeg_destroy(cinfo);
+
+ if (error->fJmpBufStack.empty()) {
+ SK_ABORT("JPEG error with no jmp_buf set.");
+ }
+ longjmp(*error->fJmpBufStack.back(), -1);
+}
diff --git a/gfx/skia/skia/src/images/SkJPEGWriteUtility.h b/gfx/skia/skia/src/images/SkJPEGWriteUtility.h
new file mode 100644
index 0000000000..356b08674b
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJPEGWriteUtility.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegUtility_DEFINED
+#define SkJpegUtility_DEFINED
+
+#include "include/core/SkStream.h"
+#include "src/codec/SkJpegPriv.h"
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+#include <setjmp.h>
+
+void SK_API skjpeg_error_exit(j_common_ptr cinfo);
+
+/////////////////////////////////////////////////////////////////////////////
+/* Our destination struct for directing decompressed pixels to our stream
+ * object.
+ */
+struct SK_API skjpeg_destination_mgr : jpeg_destination_mgr {
+ skjpeg_destination_mgr(SkWStream* stream);
+
+ SkWStream* fStream;
+
+ enum {
+ kBufferSize = 1024
+ };
+ uint8_t fBuffer[kBufferSize];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/images/SkJpegEncoder.cpp b/gfx/skia/skia/src/images/SkJpegEncoder.cpp
new file mode 100644
index 0000000000..144876d8fc
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJpegEncoder.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/images/SkImageEncoderPriv.h"
+
+#ifdef SK_HAS_JPEG_LIBRARY
+
+#include "include/core/SkStream.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkMSAN.h"
+#include "src/images/SkImageEncoderFns.h"
+#include "src/images/SkJPEGWriteUtility.h"
+
+#include <stdio.h>
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+class SkJpegEncoderMgr final : SkNoncopyable {
+public:
+
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream
+ */
+ static std::unique_ptr<SkJpegEncoderMgr> Make(SkWStream* stream) {
+ return std::unique_ptr<SkJpegEncoderMgr>(new SkJpegEncoderMgr(stream));
+ }
+
+ bool setParams(const SkImageInfo& srcInfo, const SkJpegEncoder::Options& options);
+
+ jpeg_compress_struct* cinfo() { return &fCInfo; }
+
+ skjpeg_error_mgr* errorMgr() { return &fErrMgr; }
+
+ transform_scanline_proc proc() const { return fProc; }
+
+ ~SkJpegEncoderMgr() {
+ jpeg_destroy_compress(&fCInfo);
+ }
+
+private:
+
+ SkJpegEncoderMgr(SkWStream* stream)
+ : fDstMgr(stream)
+ , fProc(nullptr)
+ {
+ fCInfo.err = jpeg_std_error(&fErrMgr);
+ fErrMgr.error_exit = skjpeg_error_exit;
+ jpeg_create_compress(&fCInfo);
+ fCInfo.dest = &fDstMgr;
+ }
+
+ jpeg_compress_struct fCInfo;
+ skjpeg_error_mgr fErrMgr;
+ skjpeg_destination_mgr fDstMgr;
+ transform_scanline_proc fProc;
+};
+
+bool SkJpegEncoderMgr::setParams(const SkImageInfo& srcInfo, const SkJpegEncoder::Options& options)
+{
+ auto chooseProc8888 = [&]() {
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType() &&
+ options.fAlphaOption == SkJpegEncoder::AlphaOption::kBlendOnBlack) {
+ return transform_scanline_to_premul_legacy;
+ }
+ return (transform_scanline_proc) nullptr;
+ };
+
+ J_COLOR_SPACE jpegColorType = JCS_EXT_RGBA;
+ int numComponents = 0;
+ switch (srcInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ fProc = chooseProc8888();
+ jpegColorType = JCS_EXT_RGBA;
+ numComponents = 4;
+ break;
+ case kBGRA_8888_SkColorType:
+ fProc = chooseProc8888();
+ jpegColorType = JCS_EXT_BGRA;
+ numComponents = 4;
+ break;
+ case kRGB_565_SkColorType:
+ fProc = transform_scanline_565;
+ jpegColorType = JCS_RGB;
+ numComponents = 3;
+ break;
+ case kARGB_4444_SkColorType:
+ if (SkJpegEncoder::AlphaOption::kBlendOnBlack == options.fAlphaOption) {
+ return false;
+ }
+
+ fProc = transform_scanline_444;
+ jpegColorType = JCS_RGB;
+ numComponents = 3;
+ break;
+ case kGray_8_SkColorType:
+ SkASSERT(srcInfo.isOpaque());
+ jpegColorType = JCS_GRAYSCALE;
+ numComponents = 1;
+ break;
+ case kRGBA_F16_SkColorType:
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType() &&
+ options.fAlphaOption == SkJpegEncoder::AlphaOption::kBlendOnBlack) {
+ fProc = transform_scanline_F16_to_premul_8888;
+ } else {
+ fProc = transform_scanline_F16_to_8888;
+ }
+ jpegColorType = JCS_EXT_RGBA;
+ numComponents = 4;
+ break;
+ default:
+ return false;
+ }
+
+ fCInfo.image_width = srcInfo.width();
+ fCInfo.image_height = srcInfo.height();
+ fCInfo.in_color_space = jpegColorType;
+ fCInfo.input_components = numComponents;
+ jpeg_set_defaults(&fCInfo);
+
+ if (kGray_8_SkColorType != srcInfo.colorType()) {
+ switch (options.fDownsample) {
+ case SkJpegEncoder::Downsample::k420:
+ SkASSERT(2 == fCInfo.comp_info[0].h_samp_factor);
+ SkASSERT(2 == fCInfo.comp_info[0].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].v_samp_factor);
+ break;
+ case SkJpegEncoder::Downsample::k422:
+ fCInfo.comp_info[0].h_samp_factor = 2;
+ fCInfo.comp_info[0].v_samp_factor = 1;
+ fCInfo.comp_info[1].h_samp_factor = 1;
+ fCInfo.comp_info[1].v_samp_factor = 1;
+ fCInfo.comp_info[2].h_samp_factor = 1;
+ fCInfo.comp_info[2].v_samp_factor = 1;
+ break;
+ case SkJpegEncoder::Downsample::k444:
+ fCInfo.comp_info[0].h_samp_factor = 1;
+ fCInfo.comp_info[0].v_samp_factor = 1;
+ fCInfo.comp_info[1].h_samp_factor = 1;
+ fCInfo.comp_info[1].v_samp_factor = 1;
+ fCInfo.comp_info[2].h_samp_factor = 1;
+ fCInfo.comp_info[2].v_samp_factor = 1;
+ break;
+ }
+ }
+
+ // Tells libjpeg-turbo to compute optimal Huffman coding tables
+ // for the image. This improves compression at the cost of
+ // slower encode performance.
+ fCInfo.optimize_coding = TRUE;
+ return true;
+}
+
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options) {
+ if (!SkPixmapIsValid(src)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkJpegEncoderMgr> encoderMgr = SkJpegEncoderMgr::Make(dst);
+
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(encoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->setParams(src.info(), options)) {
+ return nullptr;
+ }
+
+ jpeg_set_quality(encoderMgr->cinfo(), options.fQuality, TRUE);
+ jpeg_start_compress(encoderMgr->cinfo(), TRUE);
+
+ sk_sp<SkData> icc = icc_from_color_space(src.info());
+ if (icc) {
+ // Create a contiguous block of memory with the icc signature followed by the profile.
+ sk_sp<SkData> markerData =
+ SkData::MakeUninitialized(kICCMarkerHeaderSize + icc->size());
+ uint8_t* ptr = (uint8_t*) markerData->writable_data();
+ memcpy(ptr, kICCSig, sizeof(kICCSig));
+ ptr += sizeof(kICCSig);
+ *ptr++ = 1; // This is the first marker.
+ *ptr++ = 1; // Out of one total markers.
+ memcpy(ptr, icc->data(), icc->size());
+
+ jpeg_write_marker(encoderMgr->cinfo(), kICCMarker, markerData->bytes(), markerData->size());
+ }
+
+ return std::unique_ptr<SkJpegEncoder>(new SkJpegEncoder(std::move(encoderMgr), src));
+}
+
+SkJpegEncoder::SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr> encoderMgr, const SkPixmap& src)
+ : INHERITED(src, encoderMgr->proc() ? encoderMgr->cinfo()->input_components*src.width() : 0)
+ , fEncoderMgr(std::move(encoderMgr))
+{}
+
+SkJpegEncoder::~SkJpegEncoder() {}
+
+bool SkJpegEncoder::onEncodeRows(int numRows) {
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fEncoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return false;
+ }
+
+ const size_t srcBytes = SkColorTypeBytesPerPixel(fSrc.colorType()) * fSrc.width();
+ const size_t jpegSrcBytes = fEncoderMgr->cinfo()->input_components * fSrc.width();
+
+ const void* srcRow = fSrc.addr(0, fCurrRow);
+ for (int i = 0; i < numRows; i++) {
+ JSAMPLE* jpegSrcRow = (JSAMPLE*) srcRow;
+ if (fEncoderMgr->proc()) {
+ sk_msan_assert_initialized(srcRow, SkTAddOffset<const void>(srcRow, srcBytes));
+ fEncoderMgr->proc()((char*)fStorage.get(),
+ (const char*)srcRow,
+ fSrc.width(),
+ fEncoderMgr->cinfo()->input_components);
+ jpegSrcRow = fStorage.get();
+ sk_msan_assert_initialized(jpegSrcRow,
+ SkTAddOffset<const void>(jpegSrcRow, jpegSrcBytes));
+ } else {
+ // Same as above, but this repetition allows determining whether a
+ // proc was used when msan asserts.
+ sk_msan_assert_initialized(jpegSrcRow,
+ SkTAddOffset<const void>(jpegSrcRow, jpegSrcBytes));
+ }
+
+ jpeg_write_scanlines(fEncoderMgr->cinfo(), &jpegSrcRow, 1);
+ srcRow = SkTAddOffset<const void>(srcRow, fSrc.rowBytes());
+ }
+
+ fCurrRow += numRows;
+ if (fCurrRow == fSrc.height()) {
+ jpeg_finish_compress(fEncoderMgr->cinfo());
+ }
+
+ return true;
+}
+
+bool SkJpegEncoder::Encode(SkWStream* dst, const SkPixmap& src, const Options& options) {
+ auto encoder = SkJpegEncoder::Make(dst, src, options);
+ return encoder.get() && encoder->encodeRows(src.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/images/SkPngEncoder.cpp b/gfx/skia/skia/src/images/SkPngEncoder.cpp
new file mode 100644
index 0000000000..5a3c9e6957
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkPngEncoder.cpp
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/images/SkImageEncoderPriv.h"
+
+#ifdef SK_HAS_PNG_LIBRARY
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/encode/SkPngEncoder.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "src/codec/SkColorTable.h"
+#include "src/codec/SkPngPriv.h"
+#include "src/core/SkMSAN.h"
+#include "src/images/SkImageEncoderFns.h"
+#include <vector>
+
+#include "png.h"
+
+static_assert(PNG_FILTER_NONE == (int)SkPngEncoder::FilterFlag::kNone, "Skia libpng filter err.");
+static_assert(PNG_FILTER_SUB == (int)SkPngEncoder::FilterFlag::kSub, "Skia libpng filter err.");
+static_assert(PNG_FILTER_UP == (int)SkPngEncoder::FilterFlag::kUp, "Skia libpng filter err.");
+static_assert(PNG_FILTER_AVG == (int)SkPngEncoder::FilterFlag::kAvg, "Skia libpng filter err.");
+static_assert(PNG_FILTER_PAETH == (int)SkPngEncoder::FilterFlag::kPaeth, "Skia libpng filter err.");
+static_assert(PNG_ALL_FILTERS == (int)SkPngEncoder::FilterFlag::kAll, "Skia libpng filter err.");
+
+static constexpr bool kSuppressPngEncodeWarnings = true;
+
+static void sk_error_fn(png_structp png_ptr, png_const_charp msg) {
+ if (!kSuppressPngEncodeWarnings) {
+ SkDebugf("libpng encode error: %s\n", msg);
+ }
+
+ longjmp(png_jmpbuf(png_ptr), 1);
+}
+
+static void sk_write_fn(png_structp png_ptr, png_bytep data, png_size_t len) {
+ SkWStream* stream = (SkWStream*)png_get_io_ptr(png_ptr);
+ if (!stream->write(data, len)) {
+ png_error(png_ptr, "sk_write_fn cannot write to stream");
+ }
+}
+
+class SkPngEncoderMgr final : SkNoncopyable {
+public:
+
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream
+ */
+ static std::unique_ptr<SkPngEncoderMgr> Make(SkWStream* stream);
+
+ bool setHeader(const SkImageInfo& srcInfo, const SkPngEncoder::Options& options);
+ bool setColorSpace(const SkImageInfo& info);
+ bool writeInfo(const SkImageInfo& srcInfo);
+ void chooseProc(const SkImageInfo& srcInfo);
+
+ png_structp pngPtr() { return fPngPtr; }
+ png_infop infoPtr() { return fInfoPtr; }
+ int pngBytesPerPixel() const { return fPngBytesPerPixel; }
+ transform_scanline_proc proc() const { return fProc; }
+
+ ~SkPngEncoderMgr() {
+ png_destroy_write_struct(&fPngPtr, &fInfoPtr);
+ }
+
+private:
+
+ SkPngEncoderMgr(png_structp pngPtr, png_infop infoPtr)
+ : fPngPtr(pngPtr)
+ , fInfoPtr(infoPtr)
+ {}
+
+ png_structp fPngPtr;
+ png_infop fInfoPtr;
+ int fPngBytesPerPixel;
+ transform_scanline_proc fProc;
+};
+
+std::unique_ptr<SkPngEncoderMgr> SkPngEncoderMgr::Make(SkWStream* stream) {
+ png_structp pngPtr =
+ png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, sk_error_fn, nullptr);
+ if (!pngPtr) {
+ return nullptr;
+ }
+
+ png_infop infoPtr = png_create_info_struct(pngPtr);
+ if (!infoPtr) {
+ png_destroy_write_struct(&pngPtr, nullptr);
+ return nullptr;
+ }
+
+ png_set_write_fn(pngPtr, (void*)stream, sk_write_fn, nullptr);
+ return std::unique_ptr<SkPngEncoderMgr>(new SkPngEncoderMgr(pngPtr, infoPtr));
+}
+
+bool SkPngEncoderMgr::setHeader(const SkImageInfo& srcInfo, const SkPngEncoder::Options& options) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ int pngColorType;
+ png_color_8 sigBit;
+ int bitDepth = 8;
+ switch (srcInfo.colorType()) {
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F32_SkColorType:
+ sigBit.red = 16;
+ sigBit.green = 16;
+ sigBit.blue = 16;
+ sigBit.alpha = 16;
+ bitDepth = 16;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = 8;
+ break;
+ case kGray_8_SkColorType:
+ sigBit.gray = 8;
+ pngColorType = PNG_COLOR_TYPE_GRAY;
+ fPngBytesPerPixel = 1;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ sigBit.red = 8;
+ sigBit.green = 8;
+ sigBit.blue = 8;
+ sigBit.alpha = 8;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = srcInfo.isOpaque() ? 3 : 4;
+ break;
+ case kRGB_888x_SkColorType:
+ sigBit.red = 8;
+ sigBit.green = 8;
+ sigBit.blue = 8;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 3;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kARGB_4444_SkColorType:
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType()) {
+ return false;
+ }
+
+ sigBit.red = 4;
+ sigBit.green = 4;
+ sigBit.blue = 4;
+ sigBit.alpha = 4;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = srcInfo.isOpaque() ? 3 : 4;
+ break;
+ case kRGB_565_SkColorType:
+ sigBit.red = 5;
+ sigBit.green = 6;
+ sigBit.blue = 5;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 3;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kAlpha_8_SkColorType: // store as gray+alpha, but ignore gray
+ sigBit.gray = kGraySigBit_GrayAlphaIsJustAlpha;
+ sigBit.alpha = 8;
+ pngColorType = PNG_COLOR_TYPE_GRAY_ALPHA;
+ fPngBytesPerPixel = 2;
+ break;
+ case kRGBA_1010102_SkColorType:
+ bitDepth = 16;
+ sigBit.red = 10;
+ sigBit.green = 10;
+ sigBit.blue = 10;
+ sigBit.alpha = 2;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = 8;
+ break;
+ case kRGB_101010x_SkColorType:
+ bitDepth = 16;
+ sigBit.red = 10;
+ sigBit.green = 10;
+ sigBit.blue = 10;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 6;
+ break;
+ default:
+ return false;
+ }
+
+ png_set_IHDR(fPngPtr, fInfoPtr, srcInfo.width(), srcInfo.height(),
+ bitDepth, pngColorType,
+ PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+ png_set_sBIT(fPngPtr, fInfoPtr, &sigBit);
+
+ int filters = (int)options.fFilterFlags & (int)SkPngEncoder::FilterFlag::kAll;
+ SkASSERT(filters == (int)options.fFilterFlags);
+ png_set_filter(fPngPtr, PNG_FILTER_TYPE_BASE, filters);
+
+ int zlibLevel = SkTMin(SkTMax(0, options.fZLibLevel), 9);
+ SkASSERT(zlibLevel == options.fZLibLevel);
+ png_set_compression_level(fPngPtr, zlibLevel);
+
+ // Set comments in tEXt chunk
+ const sk_sp<SkDataTable>& comments = options.fComments;
+ if (comments != nullptr) {
+ std::vector<png_text> png_texts(comments->count());
+ std::vector<SkString> clippedKeys;
+ for (int i = 0; i < comments->count() / 2; ++i) {
+ const char* keyword;
+ const char* originalKeyword = comments->atStr(2 * i);
+ const char* text = comments->atStr(2 * i + 1);
+ if (strlen(originalKeyword) <= PNG_KEYWORD_MAX_LENGTH) {
+ keyword = originalKeyword;
+ } else {
+ SkDEBUGFAILF("PNG tEXt keyword should be no longer than %d.",
+ PNG_KEYWORD_MAX_LENGTH);
+ clippedKeys.emplace_back(originalKeyword, PNG_KEYWORD_MAX_LENGTH);
+ keyword = clippedKeys.back().c_str();
+ }
+ // It seems safe to convert png_const_charp to png_charp for key/text,
+ // and we don't have to provide text_length and other fields as we're providing
+ // 0-terminated c_str with PNG_TEXT_COMPRESSION_NONE (no compression, no itxt).
+ png_texts[i].compression = PNG_TEXT_COMPRESSION_NONE;
+ png_texts[i].key = (png_charp)keyword;
+ png_texts[i].text = (png_charp)text;
+ }
+ png_set_text(fPngPtr, fInfoPtr, png_texts.data(), png_texts.size());
+ }
+
+ return true;
+}
+
+static transform_scanline_proc choose_proc(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kUnknown_SkColorType:
+ break;
+
+ case kRGBA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_RGBX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_memcpy;
+ case kPremul_SkAlphaType:
+ return transform_scanline_rgbA;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kBGRA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_BGRX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_BGRA;
+ case kPremul_SkAlphaType:
+ return transform_scanline_bgrA;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGB_565_SkColorType:
+ return transform_scanline_565;
+ case kRGB_888x_SkColorType:
+ return transform_scanline_RGBX;
+ case kARGB_4444_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_444;
+ case kPremul_SkAlphaType:
+ return transform_scanline_4444;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kGray_8_SkColorType:
+ return transform_scanline_memcpy;
+
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_F16;
+ case kPremul_SkAlphaType:
+ return transform_scanline_F16_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGBA_F32_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_F32;
+ case kPremul_SkAlphaType:
+ return transform_scanline_F32_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGBA_1010102_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_1010102;
+ case kPremul_SkAlphaType:
+ return transform_scanline_1010102_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGB_101010x_SkColorType:
+ return transform_scanline_101010x;
+ case kAlpha_8_SkColorType:
+ return transform_scanline_A8_to_GrayAlpha;
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kA16_float_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ return nullptr;
+ }
+ SkASSERT(false);
+ return nullptr;
+}
+
+static void set_icc(png_structp png_ptr, png_infop info_ptr, const SkImageInfo& info) {
+ sk_sp<SkData> icc = icc_from_color_space(info);
+ if (!icc) {
+ return;
+ }
+
+#if PNG_LIBPNG_VER_MAJOR > 1 || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 5)
+ const char* name = "Skia";
+ png_const_bytep iccPtr = icc->bytes();
+#else
+ SkString str("Skia");
+ char* name = str.writable_str();
+ png_charp iccPtr = (png_charp) icc->writable_data();
+#endif
+ png_set_iCCP(png_ptr, info_ptr, name, 0, iccPtr, icc->size());
+}
+
+bool SkPngEncoderMgr::setColorSpace(const SkImageInfo& info) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ if (info.colorSpace() && info.colorSpace()->isSRGB()) {
+ png_set_sRGB(fPngPtr, fInfoPtr, PNG_sRGB_INTENT_PERCEPTUAL);
+ } else {
+ set_icc(fPngPtr, fInfoPtr, info);
+ }
+
+ return true;
+}
+
+bool SkPngEncoderMgr::writeInfo(const SkImageInfo& srcInfo) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ png_write_info(fPngPtr, fInfoPtr);
+ if (kRGBA_F16_SkColorType == srcInfo.colorType() &&
+ kOpaque_SkAlphaType == srcInfo.alphaType())
+ {
+ // For kOpaque, kRGBA_F16, we will keep the row as RGBA and tell libpng
+ // to skip the alpha channel.
+ png_set_filler(fPngPtr, 0, PNG_FILLER_AFTER);
+ }
+
+ return true;
+}
+
+void SkPngEncoderMgr::chooseProc(const SkImageInfo& srcInfo) {
+ fProc = choose_proc(srcInfo);
+}
+
+std::unique_ptr<SkEncoder> SkPngEncoder::Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options) {
+ if (!SkPixmapIsValid(src)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkPngEncoderMgr> encoderMgr = SkPngEncoderMgr::Make(dst);
+ if (!encoderMgr) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->setHeader(src.info(), options)) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->setColorSpace(src.info())) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->writeInfo(src.info())) {
+ return nullptr;
+ }
+
+ encoderMgr->chooseProc(src.info());
+
+ return std::unique_ptr<SkPngEncoder>(new SkPngEncoder(std::move(encoderMgr), src));
+}
+
+SkPngEncoder::SkPngEncoder(std::unique_ptr<SkPngEncoderMgr> encoderMgr, const SkPixmap& src)
+ : INHERITED(src, encoderMgr->pngBytesPerPixel() * src.width())
+ , fEncoderMgr(std::move(encoderMgr))
+{}
+
+SkPngEncoder::~SkPngEncoder() {}
+
+bool SkPngEncoder::onEncodeRows(int numRows) {
+ if (setjmp(png_jmpbuf(fEncoderMgr->pngPtr()))) {
+ return false;
+ }
+
+ const void* srcRow = fSrc.addr(0, fCurrRow);
+ for (int y = 0; y < numRows; y++) {
+ sk_msan_assert_initialized(srcRow,
+ (const uint8_t*)srcRow + (fSrc.width() << fSrc.shiftPerPixel()));
+ fEncoderMgr->proc()((char*)fStorage.get(),
+ (const char*)srcRow,
+ fSrc.width(),
+ SkColorTypeBytesPerPixel(fSrc.colorType()));
+
+ png_bytep rowPtr = (png_bytep) fStorage.get();
+ png_write_rows(fEncoderMgr->pngPtr(), &rowPtr, 1);
+ srcRow = SkTAddOffset<const void>(srcRow, fSrc.rowBytes());
+ }
+
+ fCurrRow += numRows;
+ if (fCurrRow == fSrc.height()) {
+ png_write_end(fEncoderMgr->pngPtr(), fEncoderMgr->infoPtr());
+ }
+
+ return true;
+}
+
+bool SkPngEncoder::Encode(SkWStream* dst, const SkPixmap& src, const Options& options) {
+ auto encoder = SkPngEncoder::Make(dst, src, options);
+ return encoder.get() && encoder->encodeRows(src.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/images/SkWebpEncoder.cpp b/gfx/skia/skia/src/images/SkWebpEncoder.cpp
new file mode 100644
index 0000000000..32801cb341
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkWebpEncoder.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2010, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/images/SkImageEncoderPriv.h"
+
+#ifdef SK_HAS_WEBP_LIBRARY
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/encode/SkWebpEncoder.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "src/images/SkImageEncoderFns.h"
+#include "src/utils/SkUTF.h"
+
+// A WebP encoder only, on top of (subset of) libwebp
+// For more information on WebP image format, and libwebp library, see:
+// http://code.google.com/speed/webp/
+// http://www.webmproject.org/code/#libwebp_webp_image_decoder_library
+// http://review.webmproject.org/gitweb?p=libwebp.git
+
+#include <stdio.h>
+extern "C" {
+// If moving libwebp out of skia source tree, path for webp headers must be
+// updated accordingly. Here, we enforce using local copy in webp sub-directory.
+#include "webp/encode.h"
+#include "webp/mux.h"
+}
+
+static transform_scanline_proc choose_proc(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_RGBX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_memcpy;
+ case kPremul_SkAlphaType:
+ return transform_scanline_rgbA;
+ default:
+ return nullptr;
+ }
+ case kBGRA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_BGRX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_BGRA;
+ case kPremul_SkAlphaType:
+ return transform_scanline_bgrA;
+ default:
+ return nullptr;
+ }
+ case kRGB_565_SkColorType:
+ if (!info.isOpaque()) {
+ return nullptr;
+ }
+
+ return transform_scanline_565;
+ case kARGB_4444_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_444;
+ case kPremul_SkAlphaType:
+ return transform_scanline_4444;
+ default:
+ return nullptr;
+ }
+ case kGray_8_SkColorType:
+ return transform_scanline_gray;
+ case kRGBA_F16_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_F16_to_8888;
+ case kPremul_SkAlphaType:
+ return transform_scanline_F16_premul_to_8888;
+ default:
+ return nullptr;
+ }
+ default:
+ return nullptr;
+ }
+}
+
+static int stream_writer(const uint8_t* data, size_t data_size,
+ const WebPPicture* const picture) {
+ SkWStream* const stream = (SkWStream*)picture->custom_ptr;
+ return stream->write(data, data_size) ? 1 : 0;
+}
+
+bool SkWebpEncoder::Encode(SkWStream* stream, const SkPixmap& pixmap, const Options& opts) {
+ if (!SkPixmapIsValid(pixmap)) {
+ return false;
+ }
+
+ const transform_scanline_proc proc = choose_proc(pixmap.info());
+ if (!proc) {
+ return false;
+ }
+
+ int bpp;
+ if (kRGBA_F16_SkColorType == pixmap.colorType()) {
+ bpp = 4;
+ } else {
+ bpp = pixmap.isOpaque() ? 3 : 4;
+ }
+
+ if (nullptr == pixmap.addr()) {
+ return false;
+ }
+
+ WebPConfig webp_config;
+ if (!WebPConfigPreset(&webp_config, WEBP_PRESET_DEFAULT, opts.fQuality)) {
+ return false;
+ }
+
+ WebPPicture pic;
+ WebPPictureInit(&pic);
+ SkAutoTCallVProc<WebPPicture, WebPPictureFree> autoPic(&pic);
+ pic.width = pixmap.width();
+ pic.height = pixmap.height();
+ pic.writer = stream_writer;
+
+ // Set compression, method, and pixel format.
+ // libwebp recommends using BGRA for lossless and YUV for lossy.
+ // The choices of |webp_config.method| currently just match Chrome's defaults. We
+ // could potentially expose this decision to the client.
+ if (Compression::kLossy == opts.fCompression) {
+ webp_config.lossless = 0;
+#ifndef SK_WEBP_ENCODER_USE_DEFAULT_METHOD
+ webp_config.method = 3;
+#endif
+ pic.use_argb = 0;
+ } else {
+ webp_config.lossless = 1;
+ webp_config.method = 0;
+ pic.use_argb = 1;
+ }
+
+ // If there is no need to embed an ICC profile, we write directly to the input stream.
+ // Otherwise, we will first encode to |tmp| and use a mux to add the ICC chunk. libwebp
+ // forces us to have an encoded image before we can add a profile.
+ sk_sp<SkData> icc = icc_from_color_space(pixmap.info());
+ SkDynamicMemoryWStream tmp;
+ pic.custom_ptr = icc ? (void*)&tmp : (void*)stream;
+
+ const uint8_t* src = (uint8_t*)pixmap.addr();
+ const int rgbStride = pic.width * bpp;
+ const size_t rowBytes = pixmap.rowBytes();
+
+ // Import (for each scanline) the bit-map image (in appropriate color-space)
+ // to RGB color space.
+ std::unique_ptr<uint8_t[]> rgb(new uint8_t[rgbStride * pic.height]);
+ for (int y = 0; y < pic.height; ++y) {
+ proc((char*) &rgb[y * rgbStride],
+ (const char*) &src[y * rowBytes],
+ pic.width,
+ bpp);
+ }
+
+ auto importProc = WebPPictureImportRGB;
+ if (3 != bpp) {
+ if (pixmap.isOpaque()) {
+ importProc = WebPPictureImportRGBX;
+ } else {
+ importProc = WebPPictureImportRGBA;
+ }
+ }
+
+ if (!importProc(&pic, &rgb[0], rgbStride)) {
+ return false;
+ }
+
+ if (!WebPEncode(&webp_config, &pic)) {
+ return false;
+ }
+
+ if (icc) {
+ sk_sp<SkData> encodedData = tmp.detachAsData();
+ WebPData encoded = { encodedData->bytes(), encodedData->size() };
+ WebPData iccChunk = { icc->bytes(), icc->size() };
+
+ SkAutoTCallVProc<WebPMux, WebPMuxDelete> mux(WebPMuxNew());
+ if (WEBP_MUX_OK != WebPMuxSetImage(mux, &encoded, 0)) {
+ return false;
+ }
+
+ if (WEBP_MUX_OK != WebPMuxSetChunk(mux, "ICCP", &iccChunk, 0)) {
+ return false;
+ }
+
+ WebPData assembled;
+ if (WEBP_MUX_OK != WebPMuxAssemble(mux, &assembled)) {
+ return false;
+ }
+
+ stream->write(assembled.bytes, assembled.size);
+ WebPDataClear(&assembled);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
new file mode 100644
index 0000000000..bf6df9fb41
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkDiscardableMemory.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTInternalLList.h"
+#include "src/lazy/SkDiscardableMemoryPool.h"
+
+// Note:
+// A PoolDiscardableMemory is memory that is counted in a pool.
+// A DiscardableMemoryPool is a pool of PoolDiscardableMemorys.
+
+namespace {
+
+class PoolDiscardableMemory;
+
+/**
+ * This non-global pool can be used for unit tests to verify that the
+ * pool works.
+ */
+class DiscardableMemoryPool : public SkDiscardableMemoryPool {
+public:
+ DiscardableMemoryPool(size_t budget);
+ ~DiscardableMemoryPool() override;
+
+ std::unique_ptr<SkDiscardableMemory> make(size_t bytes);
+ SkDiscardableMemory* create(size_t bytes) override {
+ return this->make(bytes).release(); // TODO: change API
+ }
+
+ size_t getRAMUsed() override;
+ void setRAMBudget(size_t budget) override;
+ size_t getRAMBudget() override { return fBudget; }
+
+ /** purges all unlocked DMs */
+ void dumpPool() override;
+
+ #if SK_LAZY_CACHE_STATS // Defined in SkDiscardableMemoryPool.h
+ int getCacheHits() override { return fCacheHits; }
+ int getCacheMisses() override { return fCacheMisses; }
+ void resetCacheHitsAndMisses() override {
+ fCacheHits = fCacheMisses = 0;
+ }
+ int fCacheHits;
+ int fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+
+private:
+ SkMutex fMutex;
+ size_t fBudget;
+ size_t fUsed;
+ SkTInternalLList<PoolDiscardableMemory> fList;
+
+ /** Function called to free memory if needed */
+ void dumpDownTo(size_t budget);
+ /** called by DiscardableMemoryPool upon destruction */
+ void removeFromPool(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::lock() */
+ bool lock(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::unlock() */
+ void unlock(PoolDiscardableMemory* dm);
+
+ friend class PoolDiscardableMemory;
+
+ typedef SkDiscardableMemory::Factory INHERITED;
+};
+
+/**
+ * A PoolDiscardableMemory is a SkDiscardableMemory that relies on
+ * a DiscardableMemoryPool object to manage the memory.
+ */
+class PoolDiscardableMemory : public SkDiscardableMemory {
+public:
+ PoolDiscardableMemory(sk_sp<DiscardableMemoryPool> pool, SkAutoFree pointer, size_t bytes);
+ ~PoolDiscardableMemory() override;
+ bool lock() override;
+ void* data() override;
+ void unlock() override;
+ friend class DiscardableMemoryPool;
+private:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(PoolDiscardableMemory);
+ sk_sp<DiscardableMemoryPool> fPool;
+ bool fLocked;
+ SkAutoFree fPointer;
+ const size_t fBytes;
+};
+
+PoolDiscardableMemory::PoolDiscardableMemory(sk_sp<DiscardableMemoryPool> pool,
+ SkAutoFree pointer,
+ size_t bytes)
+ : fPool(std::move(pool)), fLocked(true), fPointer(std::move(pointer)), fBytes(bytes) {
+ SkASSERT(fPool != nullptr);
+ SkASSERT(fPointer != nullptr);
+ SkASSERT(fBytes > 0);
+}
+
+PoolDiscardableMemory::~PoolDiscardableMemory() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ fPool->removeFromPool(this);
+}
+
+bool PoolDiscardableMemory::lock() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ return fPool->lock(this);
+}
+
+void* PoolDiscardableMemory::data() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ return fPointer.get();
+}
+
+void PoolDiscardableMemory::unlock() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ fPool->unlock(this);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DiscardableMemoryPool::DiscardableMemoryPool(size_t budget)
+ : fBudget(budget)
+ , fUsed(0) {
+ #if SK_LAZY_CACHE_STATS
+ fCacheHits = 0;
+ fCacheMisses = 0;
+ #endif // SK_LAZY_CACHE_STATS
+}
+DiscardableMemoryPool::~DiscardableMemoryPool() {
+ // PoolDiscardableMemory objects that belong to this pool are
+ // always deleted before deleting this pool since each one has a
+ // ref to the pool.
+ SkASSERT(fList.isEmpty());
+}
+
+void DiscardableMemoryPool::dumpDownTo(size_t budget) {
+ fMutex.assertHeld();
+ if (fUsed <= budget) {
+ return;
+ }
+ using Iter = SkTInternalLList<PoolDiscardableMemory>::Iter;
+ Iter iter;
+ PoolDiscardableMemory* cur = iter.init(fList, Iter::kTail_IterStart);
+ while ((fUsed > budget) && (cur)) {
+ if (!cur->fLocked) {
+ PoolDiscardableMemory* dm = cur;
+ SkASSERT(dm->fPointer != nullptr);
+ dm->fPointer = nullptr;
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ cur = iter.prev();
+ // Purged DMs are taken out of the list. This saves times
+ // looking them up. Purged DMs are NOT deleted.
+ fList.remove(dm);
+ } else {
+ cur = iter.prev();
+ }
+ }
+}
+
+std::unique_ptr<SkDiscardableMemory> DiscardableMemoryPool::make(size_t bytes) {
+ SkAutoFree addr(sk_malloc_canfail(bytes));
+ if (nullptr == addr) {
+ return nullptr;
+ }
+ auto dm = skstd::make_unique<PoolDiscardableMemory>(sk_ref_sp(this), std::move(addr), bytes);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ fList.addToHead(dm.get());
+ fUsed += bytes;
+ this->dumpDownTo(fBudget);
+ return dm;
+}
+
+void DiscardableMemoryPool::removeFromPool(PoolDiscardableMemory* dm) {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ // This is called by dm's destructor.
+ if (dm->fPointer != nullptr) {
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ fList.remove(dm);
+ } else {
+ SkASSERT(!fList.isInList(dm));
+ }
+}
+
+bool DiscardableMemoryPool::lock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ if (nullptr == dm->fPointer) {
+ // May have been purged while waiting for lock.
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+ return false;
+ }
+ dm->fLocked = true;
+ fList.remove(dm);
+ fList.addToHead(dm);
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheHits;
+ #endif // SK_LAZY_CACHE_STATS
+ return true;
+}
+
+void DiscardableMemoryPool::unlock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ dm->fLocked = false;
+ this->dumpDownTo(fBudget);
+}
+
+size_t DiscardableMemoryPool::getRAMUsed() {
+ return fUsed;
+}
+void DiscardableMemoryPool::setRAMBudget(size_t budget) {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ fBudget = budget;
+ this->dumpDownTo(fBudget);
+}
+void DiscardableMemoryPool::dumpPool() {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ this->dumpDownTo(0);
+}
+
+} // namespace
+
+sk_sp<SkDiscardableMemoryPool> SkDiscardableMemoryPool::Make(size_t size) {
+ return sk_make_sp<DiscardableMemoryPool>(size);
+}
+
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool() {
+ // Intentionally leak this global pool.
+ static SkDiscardableMemoryPool* global =
+ new DiscardableMemoryPool(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE);
+ return global;
+}
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
new file mode 100644
index 0000000000..46519e3a18
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemoryPool_DEFINED
+#define SkDiscardableMemoryPool_DEFINED
+
+#include "include/private/SkMutex.h"
+#include "src/core/SkDiscardableMemory.h"
+
+#ifndef SK_LAZY_CACHE_STATS
+ #ifdef SK_DEBUG
+ #define SK_LAZY_CACHE_STATS 1
+ #else
+ #define SK_LAZY_CACHE_STATS 0
+ #endif
+#endif
+
+/**
+ * An implementation of Discardable Memory that manages a fixed-size
+ * budget of memory. When the allocated memory exceeds this size,
+ * unlocked blocks of memory are purged. If all memory is locked, it
+ * can exceed the memory-use budget.
+ */
+class SkDiscardableMemoryPool : public SkDiscardableMemory::Factory {
+public:
+ virtual ~SkDiscardableMemoryPool() { }
+
+ virtual size_t getRAMUsed() = 0;
+ virtual void setRAMBudget(size_t budget) = 0;
+ virtual size_t getRAMBudget() = 0;
+
+ /** purges all unlocked DMs */
+ virtual void dumpPool() = 0;
+
+ #if SK_LAZY_CACHE_STATS
+ /**
+ * These two values are a count of the number of successful and
+ * failed calls to SkDiscardableMemory::lock() for all DMs managed
+ * by this pool.
+ */
+ virtual int getCacheHits() = 0;
+ virtual int getCacheMisses() = 0;
+ virtual void resetCacheHitsAndMisses() = 0;
+ #endif
+
+ /**
+ * This non-global pool can be used for unit tests to verify that
+ * the pool works.
+ */
+ static sk_sp<SkDiscardableMemoryPool> Make(size_t size);
+};
+
+/**
+ * Returns (and creates if needed) a threadsafe global
+ * SkDiscardableMemoryPool.
+ */
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool();
+
+#if !defined(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE)
+#define SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE (128 * 1024 * 1024)
+#endif
+
+#endif // SkDiscardableMemoryPool_DEFINED
diff --git a/gfx/skia/skia/src/opts/Sk4px_NEON.h b/gfx/skia/skia/src/opts/Sk4px_NEON.h
new file mode 100644
index 0000000000..81e3f60d23
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_NEON.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+namespace { // NOLINT(google-build-namespaces)
+
+inline Sk4px::Wide Sk4px::widen() const {
+ return Sk16h(vmovl_u8(vget_low_u8 (this->fVec)),
+ vmovl_u8(vget_high_u8(this->fVec)));
+}
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)),
+ vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec)));
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ const Sk4px::Wide o(other); // Should be no code, but allows us to access fLo, fHi.
+ return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec),
+ vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as (x + (x+128)>>8 +128) >> 8. The 'r' in each instruction provides each +128.
+ return Sk16b(vcombine_u8(vraddhn_u16(this->fLo.fVec, vrshrq_n_u16(this->fLo.fVec, 8)),
+ vraddhn_u16(this->fHi.fVec, vrshrq_n_u16(this->fHi.fVec, 8))));
+}
+
+inline Sk4px Sk4px::alphas() const {
+ auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0
+ return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000
+}
+
+inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
+ a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
+ a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
+ a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0
+ a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0
+ auto a32 = (uint32x4_t)a8; //
+ return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // 3333 2222 1111 0000
+}
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
+ a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
+ a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
+ auto a32 = (uint32x4_t)a8; //
+ return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // ____ ____ 1111 0000
+}
+
+} // namespace
+
diff --git a/gfx/skia/skia/src/opts/Sk4px_SSE2.h b/gfx/skia/skia/src/opts/Sk4px_SSE2.h
new file mode 100644
index 0000000000..6ce6dd5698
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_SSE2.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+namespace { // NOLINT(google-build-namespaces)
+
+inline Sk4px::Wide Sk4px::widen() const {
+ return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()),
+ _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128()));
+}
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return this->widen() * Sk4px(other).widen();
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ Sk4px::Wide r = (*this + other) >> 8;
+ return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // (x + 127) / 255 == ((x+128) * 257)>>16,
+ // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+ const __m128i _128 = _mm_set1_epi16(128),
+ _257 = _mm_set1_epi16(257);
+ return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+ _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
+// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
+// These are safe on x86, often with no speed penalty.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
+ __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
+ return Sk16b(_mm_shuffle_epi8(this->fVec, splat));
+ }
+
+ inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ uint32_t as;
+ memcpy(&as, a, 4);
+ __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
+ return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat));
+ }
+#else
+ inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
+ // We exploit that A >= rgb for any premul pixel.
+ __m128i as = fVec; // 3xxx 2xxx 1xxx 0xxx
+ as = _mm_max_epu8(as, _mm_srli_epi32(as, 8)); // 33xx 22xx 11xx 00xx
+ as = _mm_max_epu8(as, _mm_srli_epi32(as, 16)); // 3333 2222 1111 0000
+ return Sk16b(as);
+ }
+
+ inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ __m128i as;
+ memcpy(&as, a, 4); // ____ ____ ____ 3210
+ as = _mm_unpacklo_epi8 (as, as); // ____ ____ 3322 1100
+ as = _mm_unpacklo_epi16(as, as); // 3333 2222 1111 0000
+ return Sk16b(as);
+ }
+#endif
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ uint16_t alphas;
+ memcpy(&alphas, a, 2);
+ uint32_t alphas_and_two_zeros = alphas; // Aa -> Aa00
+
+ return Load4Alphas((const SkAlpha*)&alphas_and_two_zeros);
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/opts/Sk4px_none.h b/gfx/skia/skia/src/opts/Sk4px_none.h
new file mode 100644
index 0000000000..41e8168bbb
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_none.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkUtils.h"
+
+namespace { // NOLINT(google-build-namespaces)
+
+inline Sk4px::Wide Sk4px::widen() const {
+ return Sk16h((*this)[ 0], (*this)[ 1], (*this)[ 2], (*this)[ 3],
+ (*this)[ 4], (*this)[ 5], (*this)[ 6], (*this)[ 7],
+ (*this)[ 8], (*this)[ 9], (*this)[10], (*this)[11],
+ (*this)[12], (*this)[13], (*this)[14], (*this)[15]);
+}
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return this->widen() * Sk4px(other).widen();
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ Sk4px::Wide r = (*this + other) >> 8;
+ return Sk16b(r[ 0], r[ 1], r[ 2], r[ 3],
+ r[ 4], r[ 5], r[ 6], r[ 7],
+ r[ 8], r[ 9], r[10], r[11],
+ r[12], r[13], r[14], r[15]);
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
+inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
+ return Sk16b((*this)[ 3], (*this)[ 3], (*this)[ 3], (*this)[ 3],
+ (*this)[ 7], (*this)[ 7], (*this)[ 7], (*this)[ 7],
+ (*this)[11], (*this)[11], (*this)[11], (*this)[11],
+ (*this)[15], (*this)[15], (*this)[15], (*this)[15]);
+}
+
+inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ return Sk16b(a[0], a[0], a[0], a[0],
+ a[1], a[1], a[1], a[1],
+ a[2], a[2], a[2], a[2],
+ a[3], a[3], a[3], a[3]);
+}
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ return Sk16b(a[0], a[0], a[0], a[0],
+ a[1], a[1], a[1], a[1],
+ 0,0,0,0,
+ 0,0,0,0);
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/opts/SkBitmapFilter_opts.h b/gfx/skia/skia/src/opts/SkBitmapFilter_opts.h
new file mode 100644
index 0000000000..240b21caf9
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapFilter_opts.h
@@ -0,0 +1,927 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapFilter_opts_DEFINED
+#define SkBitmapFilter_opts_DEFINED
+
+#include "src/core/SkConvolver.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+
+ static SK_ALWAYS_INLINE void AccumRemainder(const unsigned char* pixelsLeft,
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues, __m128i& accum, int r) {
+ int remainder[4] = {0};
+ for (int i = 0; i < r; i++) {
+ SkConvolutionFilter1D::ConvolutionFixed coeff = filterValues[i];
+ remainder[0] += coeff * pixelsLeft[i * 4 + 0];
+ remainder[1] += coeff * pixelsLeft[i * 4 + 1];
+ remainder[2] += coeff * pixelsLeft[i * 4 + 2];
+ remainder[3] += coeff * pixelsLeft[i * 4 + 3];
+ }
+ __m128i t = _mm_setr_epi32(remainder[0], remainder[1], remainder[2], remainder[3]);
+ accum = _mm_add_epi32(accum, t);
+ }
+
+ // Convolves horizontally along a single row. The row data is given in
+ // |srcData| and continues for the numValues() of the filter.
+ void convolve_horizontally(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow,
+ bool /*hasAlpha*/) {
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ // Get the filter that determines the current output pixel.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filterLength| pixels (4 bytes each) after this.
+ const unsigned char* rowToFilter = &srcData[filterOffset * 4];
+
+ __m128i zero = _mm_setzero_si128();
+ __m128i accum = _mm_setzero_si128();
+
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filterX = 0; filterX < filterLength >> 2; filterX++) {
+ // Load 4 coefficients => duplicate 1st and 2nd of them for all channels.
+ __m128i coeff, coeff16;
+ // [16] xx xx xx xx c3 c2 c1 c0
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filterValues));
+ // [16] xx xx xx xx c1 c1 c0 c0
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ // [16] c1 c1 c1 c1 c0 c0 c0 c0
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+
+ // Load four pixels => unpack the first two pixels to 16 bits =>
+ // multiply with coefficients => accumulate the convolution result.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src8 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(rowToFilter));
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0*c0 b0*c0 g0*c0 r0*c0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ // [32] a1*c1 b1*c1 g1*c1 r1*c1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+
+ // Duplicate 3rd and 4th coefficients for all channels =>
+ // unpack the 3rd and 4th pixels to 16 bits => multiply with coefficients
+ // => accumulate the convolution results.
+ // [16] xx xx xx xx c3 c3 c2 c2
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ // [16] c3 c3 c3 c3 c2 c2 c2 c2
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+ // [16] a3 g3 b3 r3 a2 g2 b2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2*c2 b2*c2 g2*c2 r2*c2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ // [32] a3*c3 b3*c3 g3*c3 r3*c3
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+
+ // Advance the pixel and coefficients pointers.
+ rowToFilter += 16;
+ filterValues += 4;
+ }
+
+ // When |filterLength| is not divisible by 4, we accumulate the last 1 - 3
+ // coefficients one at a time.
+ int r = filterLength & 3;
+ if (r) {
+ int remainderOffset = (filterOffset + filterLength - r) * 4;
+ AccumRemainder(srcData + remainderOffset, filterValues, accum, r);
+ }
+
+ // Shift right for fixed point implementation.
+ accum = _mm_srai_epi32(accum, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ accum = _mm_packs_epi32(accum, zero);
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ accum = _mm_packus_epi16(accum, zero);
+
+ // Store the pixel value of 32 bits.
+ *(reinterpret_cast<int*>(outRow)) = _mm_cvtsi128_si32(accum);
+ outRow += 4;
+ }
+ }
+
+ // Convolves horizontally along four rows. The row data is given in
+ // |srcData| and continues for the numValues() of the filter.
+ // The algorithm is almost same as |convolve_horizontally|. Please
+ // refer to that function for detailed comments.
+ void convolve_4_rows_horizontally(const unsigned char* srcData[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow[4],
+ size_t outRowBytes) {
+ SkDEBUGCODE(const unsigned char* out_row_0_start = outRow[0];)
+
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ __m128i zero = _mm_setzero_si128();
+
+ // four pixels in a column per iteration.
+ __m128i accum0 = _mm_setzero_si128();
+ __m128i accum1 = _mm_setzero_si128();
+ __m128i accum2 = _mm_setzero_si128();
+ __m128i accum3 = _mm_setzero_si128();
+
+ int start = filterOffset * 4;
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filterX = 0; filterX < (filterLength >> 2); filterX++) {
+ __m128i coeff, coeff16lo, coeff16hi;
+ // [16] xx xx xx xx c3 c2 c1 c0
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filterValues));
+ // [16] xx xx xx xx c1 c1 c0 c0
+ coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ // [16] c1 c1 c1 c1 c0 c0 c0 c0
+ coeff16lo = _mm_unpacklo_epi16(coeff16lo, coeff16lo);
+ // [16] xx xx xx xx c3 c3 c2 c2
+ coeff16hi = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ // [16] c3 c3 c3 c3 c2 c2 c2 c2
+ coeff16hi = _mm_unpacklo_epi16(coeff16hi, coeff16hi);
+
+ __m128i src8, src16, mul_hi, mul_lo, t;
+
+#define ITERATION(src, accum) \
+ src8 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)); \
+ src16 = _mm_unpacklo_epi8(src8, zero); \
+ mul_hi = _mm_mulhi_epi16(src16, coeff16lo); \
+ mul_lo = _mm_mullo_epi16(src16, coeff16lo); \
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ src16 = _mm_unpackhi_epi8(src8, zero); \
+ mul_hi = _mm_mulhi_epi16(src16, coeff16hi); \
+ mul_lo = _mm_mullo_epi16(src16, coeff16hi); \
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t)
+
+ ITERATION(srcData[0] + start, accum0);
+ ITERATION(srcData[1] + start, accum1);
+ ITERATION(srcData[2] + start, accum2);
+ ITERATION(srcData[3] + start, accum3);
+
+ start += 16;
+ filterValues += 4;
+ }
+
+ int r = filterLength & 3;
+ if (r) {
+ int remainderOffset = (filterOffset + filterLength - r) * 4;
+ AccumRemainder(srcData[0] + remainderOffset, filterValues, accum0, r);
+ AccumRemainder(srcData[1] + remainderOffset, filterValues, accum1, r);
+ AccumRemainder(srcData[2] + remainderOffset, filterValues, accum2, r);
+ AccumRemainder(srcData[3] + remainderOffset, filterValues, accum3, r);
+ }
+
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum0 = _mm_packs_epi32(accum0, zero);
+ accum0 = _mm_packus_epi16(accum0, zero);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_packs_epi32(accum1, zero);
+ accum1 = _mm_packus_epi16(accum1, zero);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_packs_epi32(accum2, zero);
+ accum2 = _mm_packus_epi16(accum2, zero);
+ accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
+ accum3 = _mm_packs_epi32(accum3, zero);
+ accum3 = _mm_packus_epi16(accum3, zero);
+
+ // We seem to be running off the edge here (chromium:491660).
+ SkASSERT(((size_t)outRow[0] - (size_t)out_row_0_start) < outRowBytes);
+
+ *(reinterpret_cast<int*>(outRow[0])) = _mm_cvtsi128_si32(accum0);
+ *(reinterpret_cast<int*>(outRow[1])) = _mm_cvtsi128_si32(accum1);
+ *(reinterpret_cast<int*>(outRow[2])) = _mm_cvtsi128_si32(accum2);
+ *(reinterpret_cast<int*>(outRow[3])) = _mm_cvtsi128_si32(accum3);
+
+ outRow[0] += 4;
+ outRow[1] += 4;
+ outRow[2] += 4;
+ outRow[3] += 4;
+ }
+ }
+
+ // Does vertical convolution to produce one output row. The filter values and
+ // length are given in the first two parameters. These are applied to each
+ // of the rows pointed to in the |sourceDataRows| array, with each row
+ // being |pixelWidth| wide.
+ //
+ // The output must have room for |pixelWidth * 4| bytes.
+ template<bool hasAlpha>
+ void ConvolveVertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow) {
+ // Output four pixels per iteration (16 bytes).
+ int width = pixelWidth & ~3;
+ __m128i zero = _mm_setzero_si128();
+ for (int outX = 0; outX < width; outX += 4) {
+ // Accumulated result for each pixel. 32 bits per RGBA channel.
+ __m128i accum0 = _mm_setzero_si128();
+ __m128i accum1 = _mm_setzero_si128();
+ __m128i accum2 = _mm_setzero_si128();
+ __m128i accum3 = _mm_setzero_si128();
+
+ // Convolve with one filter coefficient per iteration.
+ for (int filterY = 0; filterY < filterLength; filterY++) {
+
+ // Duplicate the filter coefficient 8 times.
+ // [16] cj cj cj cj cj cj cj cj
+ __m128i coeff16 = _mm_set1_epi16(filterValues[filterY]);
+
+ // Load four pixels (16 bytes) together.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ const __m128i* src = reinterpret_cast<const __m128i*>(
+ &sourceDataRows[filterY][outX << 2]);
+ __m128i src8 = _mm_loadu_si128(src);
+
+ // Unpack 1st and 2nd pixels from 8 bits to 16 bits for each channels =>
+ // multiply with current coefficient => accumulate the result.
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0 b0 g0 r0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum0 = _mm_add_epi32(accum0, t);
+ // [32] a1 b1 g1 r1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum1 = _mm_add_epi32(accum1, t);
+
+ // Unpack 3rd and 4th pixels from 8 bits to 16 bits for each channels =>
+ // multiply with current coefficient => accumulate the result.
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2 b2 g2 r2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum2 = _mm_add_epi32(accum2, t);
+ // [32] a3 b3 g3 r3
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum3 = _mm_add_epi32(accum3, t);
+ }
+
+ // Shift right for fixed point implementation.
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packs_epi32(accum0, accum1);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ accum2 = _mm_packs_epi32(accum2, accum3);
+
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packus_epi16(accum0, accum2);
+
+ if (hasAlpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ __m128i a = _mm_srli_epi32(accum0, 8);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ __m128i b = _mm_max_epu8(a, accum0); // Max of r and g.
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = _mm_srli_epi32(accum0, 16);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = _mm_max_epu8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = _mm_slli_epi32(b, 24);
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum0 = _mm_max_epu8(b, accum0);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ __m128i mask = _mm_set1_epi32(0xff000000);
+ accum0 = _mm_or_si128(accum0, mask);
+ }
+
+ // Store the convolution result (16 bytes) and advance the pixel pointers.
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(outRow), accum0);
+ outRow += 16;
+ }
+
+ // When the width of the output is not divisible by 4, We need to save one
+ // pixel (4 bytes) each time. And also the fourth pixel is always absent.
+ int r = pixelWidth & 3;
+ if (r) {
+ __m128i accum0 = _mm_setzero_si128();
+ __m128i accum1 = _mm_setzero_si128();
+ __m128i accum2 = _mm_setzero_si128();
+ for (int filterY = 0; filterY < filterLength; ++filterY) {
+ __m128i coeff16 = _mm_set1_epi16(filterValues[filterY]);
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ const __m128i* src = reinterpret_cast<const __m128i*>(
+ &sourceDataRows[filterY][width << 2]);
+ __m128i src8 = _mm_loadu_si128(src);
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0 b0 g0 r0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum0 = _mm_add_epi32(accum0, t);
+ // [32] a1 b1 g1 r1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum1 = _mm_add_epi32(accum1, t);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2 b2 g2 r2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum2 = _mm_add_epi32(accum2, t);
+ }
+
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packs_epi32(accum0, accum1);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ accum2 = _mm_packs_epi32(accum2, zero);
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packus_epi16(accum0, accum2);
+ if (hasAlpha) {
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ __m128i a = _mm_srli_epi32(accum0, 8);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ __m128i b = _mm_max_epu8(a, accum0); // Max of r and g.
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = _mm_srli_epi32(accum0, 16);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = _mm_max_epu8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = _mm_slli_epi32(b, 24);
+ accum0 = _mm_max_epu8(b, accum0);
+ } else {
+ __m128i mask = _mm_set1_epi32(0xff000000);
+ accum0 = _mm_or_si128(accum0, mask);
+ }
+
+ for (int i = 0; i < r; i++) {
+ *(reinterpret_cast<int*>(outRow)) = _mm_cvtsi128_si32(accum0);
+ accum0 = _mm_srli_si128(accum0, 4);
+ outRow += 4;
+ }
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+
+ static SK_ALWAYS_INLINE void AccumRemainder(const unsigned char* pixelsLeft,
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues, int32x4_t& accum, int r) {
+ int remainder[4] = {0};
+ for (int i = 0; i < r; i++) {
+ SkConvolutionFilter1D::ConvolutionFixed coeff = filterValues[i];
+ remainder[0] += coeff * pixelsLeft[i * 4 + 0];
+ remainder[1] += coeff * pixelsLeft[i * 4 + 1];
+ remainder[2] += coeff * pixelsLeft[i * 4 + 2];
+ remainder[3] += coeff * pixelsLeft[i * 4 + 3];
+ }
+ int32x4_t t = {remainder[0], remainder[1], remainder[2], remainder[3]};
+ accum += t;
+ }
+
+ // Convolves horizontally along a single row. The row data is given in
+ // |srcData| and continues for the numValues() of the filter.
+ void convolve_horizontally(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow,
+ bool /*hasAlpha*/) {
+ // Loop over each pixel on this row in the output image.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ uint8x8_t coeff_mask0 = vcreate_u8(0x0100010001000100);
+ uint8x8_t coeff_mask1 = vcreate_u8(0x0302030203020302);
+ uint8x8_t coeff_mask2 = vcreate_u8(0x0504050405040504);
+ uint8x8_t coeff_mask3 = vcreate_u8(0x0706070607060706);
+ // Get the filter that determines the current output pixel.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filterLength| pixels (4 bytes each) after this.
+ const unsigned char* rowToFilter = &srcData[filterOffset * 4];
+
+ // Apply the filter to the row to get the destination pixel in |accum|.
+ int32x4_t accum = vdupq_n_s32(0);
+ for (int filterX = 0; filterX < filterLength >> 2; filterX++) {
+ // Load 4 coefficients
+ int16x4_t coeffs, coeff0, coeff1, coeff2, coeff3;
+ coeffs = vld1_s16(filterValues);
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask2));
+ coeff3 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask3));
+
+ // Load pixels and calc
+ uint8x16_t pixels = vld1q_u8(rowToFilter);
+ int16x8_t p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels)));
+ int16x8_t p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels)));
+
+ int16x4_t p0_src = vget_low_s16(p01_16);
+ int16x4_t p1_src = vget_high_s16(p01_16);
+ int16x4_t p2_src = vget_low_s16(p23_16);
+ int16x4_t p3_src = vget_high_s16(p23_16);
+
+ int32x4_t p0 = vmull_s16(p0_src, coeff0);
+ int32x4_t p1 = vmull_s16(p1_src, coeff1);
+ int32x4_t p2 = vmull_s16(p2_src, coeff2);
+ int32x4_t p3 = vmull_s16(p3_src, coeff3);
+
+ accum += p0;
+ accum += p1;
+ accum += p2;
+ accum += p3;
+
+ // Advance the pointers
+ rowToFilter += 16;
+ filterValues += 4;
+ }
+
+ int r = filterLength & 3;
+ if (r) {
+ int remainder_offset = (filterOffset + filterLength - r) * 4;
+ AccumRemainder(srcData + remainder_offset, filterValues, accum, r);
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of fractional part.
+ accum = vshrq_n_s32(accum, SkConvolutionFilter1D::kShiftBits);
+
+ // Pack and store the new pixel.
+ int16x4_t accum16 = vqmovn_s32(accum);
+ uint8x8_t accum8 = vqmovun_s16(vcombine_s16(accum16, accum16));
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow), vreinterpret_u32_u8(accum8), 0);
+ outRow += 4;
+ }
+ }
+
+ // Convolves horizontally along four rows. The row data is given in
+ // |srcData| and continues for the numValues() of the filter.
+ // The algorithm is almost same as |convolve_horizontally|. Please
+ // refer to that function for detailed comments.
+ void convolve_4_rows_horizontally(const unsigned char* srcData[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow[4],
+ size_t outRowBytes) {
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // four pixels in a column per iteration.
+ int32x4_t accum0 = vdupq_n_s32(0);
+ int32x4_t accum1 = vdupq_n_s32(0);
+ int32x4_t accum2 = vdupq_n_s32(0);
+ int32x4_t accum3 = vdupq_n_s32(0);
+
+ uint8x8_t coeff_mask0 = vcreate_u8(0x0100010001000100);
+ uint8x8_t coeff_mask1 = vcreate_u8(0x0302030203020302);
+ uint8x8_t coeff_mask2 = vcreate_u8(0x0504050405040504);
+ uint8x8_t coeff_mask3 = vcreate_u8(0x0706070607060706);
+
+ int start = filterOffset * 4;
+
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filterX = 0; filterX < (filterLength >> 2); filterX++) {
+ int16x4_t coeffs, coeff0, coeff1, coeff2, coeff3;
+
+ coeffs = vld1_s16(filterValues);
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask2));
+ coeff3 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask3));
+
+ uint8x16_t pixels;
+ int16x8_t p01_16, p23_16;
+ int32x4_t p0, p1, p2, p3;
+
+#define ITERATION(src, accum) \
+ pixels = vld1q_u8(src); \
+ p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels))); \
+ p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels))); \
+ p0 = vmull_s16(vget_low_s16(p01_16), coeff0); \
+ p1 = vmull_s16(vget_high_s16(p01_16), coeff1); \
+ p2 = vmull_s16(vget_low_s16(p23_16), coeff2); \
+ p3 = vmull_s16(vget_high_s16(p23_16), coeff3); \
+ accum += p0; \
+ accum += p1; \
+ accum += p2; \
+ accum += p3
+
+ ITERATION(srcData[0] + start, accum0);
+ ITERATION(srcData[1] + start, accum1);
+ ITERATION(srcData[2] + start, accum2);
+ ITERATION(srcData[3] + start, accum3);
+
+ start += 16;
+ filterValues += 4;
+ }
+
+ int r = filterLength & 3;
+ if (r) {
+ int remainder_offset = (filterOffset + filterLength - r) * 4;
+ AccumRemainder(srcData[0] + remainder_offset, filterValues, accum0, r);
+ AccumRemainder(srcData[1] + remainder_offset, filterValues, accum1, r);
+ AccumRemainder(srcData[2] + remainder_offset, filterValues, accum2, r);
+ AccumRemainder(srcData[3] + remainder_offset, filterValues, accum3, r);
+ }
+
+ int16x4_t accum16;
+ uint8x8_t res0, res1, res2, res3;
+
+#define PACK_RESULT(accum, res) \
+ accum = vshrq_n_s32(accum, SkConvolutionFilter1D::kShiftBits); \
+ accum16 = vqmovn_s32(accum); \
+ res = vqmovun_s16(vcombine_s16(accum16, accum16));
+
+ PACK_RESULT(accum0, res0);
+ PACK_RESULT(accum1, res1);
+ PACK_RESULT(accum2, res2);
+ PACK_RESULT(accum3, res3);
+
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[0]), vreinterpret_u32_u8(res0), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[1]), vreinterpret_u32_u8(res1), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[2]), vreinterpret_u32_u8(res2), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[3]), vreinterpret_u32_u8(res3), 0);
+ outRow[0] += 4;
+ outRow[1] += 4;
+ outRow[2] += 4;
+ outRow[3] += 4;
+ }
+ }
+
+
+ // Does vertical convolution to produce one output row. The filter values and
+ // length are given in the first two parameters. These are applied to each
+ // of the rows pointed to in the |sourceDataRows| array, with each row
+ // being |pixelWidth| wide.
+ //
+ // The output must have room for |pixelWidth * 4| bytes.
+ template<bool hasAlpha>
+ void ConvolveVertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow) {
+ int width = pixelWidth & ~3;
+
+ // Output four pixels per iteration (16 bytes).
+ for (int outX = 0; outX < width; outX += 4) {
+
+ // Accumulated result for each pixel. 32 bits per RGBA channel.
+ int32x4_t accum0 = vdupq_n_s32(0);
+ int32x4_t accum1 = vdupq_n_s32(0);
+ int32x4_t accum2 = vdupq_n_s32(0);
+ int32x4_t accum3 = vdupq_n_s32(0);
+
+ // Convolve with one filter coefficient per iteration.
+ for (int filterY = 0; filterY < filterLength; filterY++) {
+
+ // Duplicate the filter coefficient 4 times.
+ // [16] cj cj cj cj
+ int16x4_t coeff16 = vdup_n_s16(filterValues[filterY]);
+
+ // Load four pixels (16 bytes) together.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][outX << 2]);
+
+ int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8)));
+ int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8)));
+ int16x4_t src16_0 = vget_low_s16(src16_01);
+ int16x4_t src16_1 = vget_high_s16(src16_01);
+ int16x4_t src16_2 = vget_low_s16(src16_23);
+ int16x4_t src16_3 = vget_high_s16(src16_23);
+
+ accum0 += vmull_s16(src16_0, coeff16);
+ accum1 += vmull_s16(src16_1, coeff16);
+ accum2 += vmull_s16(src16_2, coeff16);
+ accum3 += vmull_s16(src16_3, coeff16);
+ }
+
+ // Shift right for fixed point implementation.
+ accum0 = vshrq_n_s32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = vshrq_n_s32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = vshrq_n_s32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum3 = vshrq_n_s32(accum3, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ int16x8_t accum16_0 = vcombine_s16(vqmovn_s32(accum0), vqmovn_s32(accum1));
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ int16x8_t accum16_1 = vcombine_s16(vqmovn_s32(accum2), vqmovn_s32(accum3));
+
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t accum8 = vcombine_u8(vqmovun_s16(accum16_0), vqmovun_s16(accum16_1));
+
+ if (hasAlpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ uint8x16_t a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 8));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ uint8x16_t b = vmaxq_u8(a, accum8); // Max of r and g
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 16));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = vmaxq_u8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = vreinterpretq_u8_u32(vshlq_n_u32(vreinterpretq_u32_u8(b), 24));
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum8 = vmaxq_u8(b, accum8);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ accum8 = vreinterpretq_u8_u32(vreinterpretq_u32_u8(accum8) | vdupq_n_u32(0xFF000000));
+ }
+
+ // Store the convolution result (16 bytes) and advance the pixel pointers.
+ vst1q_u8(outRow, accum8);
+ outRow += 16;
+ }
+
+ // Process the leftovers when the width of the output is not divisible
+ // by 4, that is at most 3 pixels.
+ int r = pixelWidth & 3;
+ if (r) {
+
+ int32x4_t accum0 = vdupq_n_s32(0);
+ int32x4_t accum1 = vdupq_n_s32(0);
+ int32x4_t accum2 = vdupq_n_s32(0);
+
+ for (int filterY = 0; filterY < filterLength; ++filterY) {
+ int16x4_t coeff16 = vdup_n_s16(filterValues[filterY]);
+
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][width << 2]);
+
+ int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8)));
+ int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8)));
+ int16x4_t src16_0 = vget_low_s16(src16_01);
+ int16x4_t src16_1 = vget_high_s16(src16_01);
+ int16x4_t src16_2 = vget_low_s16(src16_23);
+
+ accum0 += vmull_s16(src16_0, coeff16);
+ accum1 += vmull_s16(src16_1, coeff16);
+ accum2 += vmull_s16(src16_2, coeff16);
+ }
+
+ accum0 = vshrq_n_s32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = vshrq_n_s32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = vshrq_n_s32(accum2, SkConvolutionFilter1D::kShiftBits);
+
+ int16x8_t accum16_0 = vcombine_s16(vqmovn_s32(accum0), vqmovn_s32(accum1));
+ int16x8_t accum16_1 = vcombine_s16(vqmovn_s32(accum2), vqmovn_s32(accum2));
+
+ uint8x16_t accum8 = vcombine_u8(vqmovun_s16(accum16_0), vqmovun_s16(accum16_1));
+
+ if (hasAlpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ uint8x16_t a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 8));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ uint8x16_t b = vmaxq_u8(a, accum8); // Max of r and g
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 16));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = vmaxq_u8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = vreinterpretq_u8_u32(vshlq_n_u32(vreinterpretq_u32_u8(b), 24));
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum8 = vmaxq_u8(b, accum8);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ accum8 = vreinterpretq_u8_u32(vreinterpretq_u32_u8(accum8) | vdupq_n_u32(0xFF000000));
+ }
+
+ switch(r) {
+ case 1:
+ vst1q_lane_u32(reinterpret_cast<uint32_t*>(outRow), vreinterpretq_u32_u8(accum8), 0);
+ break;
+ case 2:
+ vst1_u32(reinterpret_cast<uint32_t*>(outRow),
+ vreinterpret_u32_u8(vget_low_u8(accum8)));
+ break;
+ case 3:
+ vst1_u32(reinterpret_cast<uint32_t*>(outRow),
+ vreinterpret_u32_u8(vget_low_u8(accum8)));
+ vst1q_lane_u32(reinterpret_cast<uint32_t*>(outRow+8), vreinterpretq_u32_u8(accum8), 2);
+ break;
+ }
+ }
+ }
+
+#else
+
+ // Converts the argument to an 8-bit unsigned value by clamping to the range
+ // 0-255.
+ inline unsigned char ClampTo8(int a) {
+ if (static_cast<unsigned>(a) < 256) {
+ return a; // Avoid the extra check in the common case.
+ }
+ if (a < 0) {
+ return 0;
+ }
+ return 255;
+ }
+
+ // Convolves horizontally along a single row. The row data is given in
+ // |srcData| and continues for the numValues() of the filter.
+ template<bool hasAlpha>
+ void ConvolveHorizontally(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow) {
+ // Loop over each pixel on this row in the output image.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ // Get the filter that determines the current output pixel.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filterLength| pixels (4 bytes each) after this.
+ const unsigned char* rowToFilter = &srcData[filterOffset * 4];
+
+ // Apply the filter to the row to get the destination pixel in |accum|.
+ int accum[4] = {0};
+ for (int filterX = 0; filterX < filterLength; filterX++) {
+ SkConvolutionFilter1D::ConvolutionFixed curFilter = filterValues[filterX];
+ accum[0] += curFilter * rowToFilter[filterX * 4 + 0];
+ accum[1] += curFilter * rowToFilter[filterX * 4 + 1];
+ accum[2] += curFilter * rowToFilter[filterX * 4 + 2];
+ if (hasAlpha) {
+ accum[3] += curFilter * rowToFilter[filterX * 4 + 3];
+ }
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of fractional part.
+ accum[0] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[1] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[2] >>= SkConvolutionFilter1D::kShiftBits;
+ if (hasAlpha) {
+ accum[3] >>= SkConvolutionFilter1D::kShiftBits;
+ }
+
+ // Store the new pixel.
+ outRow[outX * 4 + 0] = ClampTo8(accum[0]);
+ outRow[outX * 4 + 1] = ClampTo8(accum[1]);
+ outRow[outX * 4 + 2] = ClampTo8(accum[2]);
+ if (hasAlpha) {
+ outRow[outX * 4 + 3] = ClampTo8(accum[3]);
+ }
+ }
+ }
+
+ // Does vertical convolution to produce one output row. The filter values and
+ // length are given in the first two parameters. These are applied to each
+ // of the rows pointed to in the |sourceDataRows| array, with each row
+ // being |pixelWidth| wide.
+ //
+ // The output must have room for |pixelWidth * 4| bytes.
+ template<bool hasAlpha>
+ void ConvolveVertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow) {
+ // We go through each column in the output and do a vertical convolution,
+ // generating one output pixel each time.
+ for (int outX = 0; outX < pixelWidth; outX++) {
+ // Compute the number of bytes over in each row that the current column
+ // we're convolving starts at. The pixel will cover the next 4 bytes.
+ int byteOffset = outX * 4;
+
+ // Apply the filter to one column of pixels.
+ int accum[4] = {0};
+ for (int filterY = 0; filterY < filterLength; filterY++) {
+ SkConvolutionFilter1D::ConvolutionFixed curFilter = filterValues[filterY];
+ accum[0] += curFilter * sourceDataRows[filterY][byteOffset + 0];
+ accum[1] += curFilter * sourceDataRows[filterY][byteOffset + 1];
+ accum[2] += curFilter * sourceDataRows[filterY][byteOffset + 2];
+ if (hasAlpha) {
+ accum[3] += curFilter * sourceDataRows[filterY][byteOffset + 3];
+ }
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of precision.
+ accum[0] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[1] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[2] >>= SkConvolutionFilter1D::kShiftBits;
+ if (hasAlpha) {
+ accum[3] >>= SkConvolutionFilter1D::kShiftBits;
+ }
+
+ // Store the new pixel.
+ outRow[byteOffset + 0] = ClampTo8(accum[0]);
+ outRow[byteOffset + 1] = ClampTo8(accum[1]);
+ outRow[byteOffset + 2] = ClampTo8(accum[2]);
+ if (hasAlpha) {
+ unsigned char alpha = ClampTo8(accum[3]);
+
+ // Make sure the alpha channel doesn't come out smaller than any of the
+ // color channels. We use premultipled alpha channels, so this should
+ // never happen, but rounding errors will cause this from time to time.
+ // These "impossible" colors will cause overflows (and hence random pixel
+ // values) when the resulting bitmap is drawn to the screen.
+ //
+ // We only need to do this when generating the final output row (here).
+ int maxColorChannel = SkTMax(outRow[byteOffset + 0],
+ SkTMax(outRow[byteOffset + 1],
+ outRow[byteOffset + 2]));
+ if (alpha < maxColorChannel) {
+ outRow[byteOffset + 3] = maxColorChannel;
+ } else {
+ outRow[byteOffset + 3] = alpha;
+ }
+ } else {
+ // No alpha channel, the image is opaque.
+ outRow[byteOffset + 3] = 0xff;
+ }
+ }
+ }
+
+ void convolve_horizontally(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow,
+ bool hasAlpha) {
+ if (hasAlpha) {
+ ConvolveHorizontally<true>(srcData, filter, outRow);
+ } else {
+ ConvolveHorizontally<false>(srcData, filter, outRow);
+ }
+ }
+
+ void (*convolve_4_rows_horizontally)(const unsigned char* srcData[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow[4],
+ size_t outRowBytes)
+ = nullptr;
+
+
+#endif
+
+ void convolve_vertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow,
+ bool hasAlpha) {
+ if (hasAlpha) {
+ ConvolveVertically<true>(filterValues, filterLength, sourceDataRows,
+ pixelWidth, outRow);
+ } else {
+ ConvolveVertically<false>(filterValues, filterLength, sourceDataRows,
+ pixelWidth, outRow);
+ }
+ }
+
+} // namespace SK_OPTS_NS
+
+#endif//SkBitmapFilter_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h b/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h
new file mode 100644
index 0000000000..85313bf99d
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_opts_DEFINED
+#define SkBitmapProcState_opts_DEFINED
+
+#include "include/private/SkVx.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkMSAN.h"
+
+// SkBitmapProcState optimized Shader, Sample, or Matrix procs.
+//
+// Only S32_alpha_D32_filter_DX exploits instructions beyond
+// our common baseline SSE2/NEON instruction sets, so that's
+// all that lives here.
+//
+// The rest are scattershot at the moment but I want to get them
+// all migrated to be normal code inside SkBitmapProcState.cpp.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+// This same basic packing scheme is used throughout the file.
+template <typename U32, typename Out>
+static void decode_packed_coordinates_and_weight(U32 packed, Out* v0, Out* v1, Out* w) {
+ *v0 = (packed >> 18); // Integer coordinate x0 or y0.
+ *v1 = (packed & 0x3fff); // Integer coordinate x1 or y1.
+ *w = (packed >> 14) & 0xf; // Lerp weight for v1; weight for v0 is 16-w.
+}
+
+#if 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // In a _DX variant only X varies; all samples share y0/y1 coordinates and wy weight.
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ const uint32_t* row0 = s.fPixmap.addr32(0,y0);
+ const uint32_t* row1 = s.fPixmap.addr32(0,y1);
+
+ auto bilerp = [&](skvx::Vec<8,uint32_t> packed_x_coordinates) -> skvx::Vec<8,uint32_t> {
+ // Decode up to 8 output pixels' x-coordinates and weights.
+ skvx::Vec<8,uint32_t> x0,x1,wx;
+ decode_packed_coordinates_and_weight(packed_x_coordinates, &x0, &x1, &wx);
+
+ // Splat wx to each color channel.
+ wx = (wx << 0)
+ | (wx << 8)
+ | (wx << 16)
+ | (wx << 24);
+
+ auto gather = [](const uint32_t* ptr, skvx::Vec<8,uint32_t> ix) {
+ #if 1
+ // Drop into AVX2 intrinsics for vpgatherdd.
+ return skvx::bit_pun<skvx::Vec<8,uint32_t>>(
+ _mm256_i32gather_epi32((const int*)ptr, skvx::bit_pun<__m256i>(ix), 4));
+ #else
+ // Portable version... sometimes I don't trust vpgatherdd.
+ return skvx::Vec<8,uint32_t>{
+ ptr[ix[0]], ptr[ix[1]], ptr[ix[2]], ptr[ix[3]],
+ ptr[ix[4]], ptr[ix[5]], ptr[ix[6]], ptr[ix[7]],
+ };
+ #endif
+ };
+
+ // Gather the 32 32-bit pixels that we'll bilerp into our 8 output pixels.
+ skvx::Vec<8,uint32_t> tl = gather(row0, x0), tr = gather(row0, x1),
+ bl = gather(row1, x0), br = gather(row1, x1);
+
+ #if 1
+ // We'll use _mm256_maddubs_epi16() to lerp much like in the SSSE3 code.
+ auto lerp_x = [&](skvx::Vec<8,uint32_t> L, skvx::Vec<8,uint32_t> R) {
+ __m256i l = skvx::bit_pun<__m256i>(L),
+ r = skvx::bit_pun<__m256i>(R),
+ wr = skvx::bit_pun<__m256i>(wx),
+ wl = _mm256_sub_epi8(_mm256_set1_epi8(16), wr);
+
+ // Interlace l,r bytewise and line them up with their weights, then lerp.
+ __m256i lo = _mm256_maddubs_epi16(_mm256_unpacklo_epi8( l, r),
+ _mm256_unpacklo_epi8(wl,wr));
+ __m256i hi = _mm256_maddubs_epi16(_mm256_unpackhi_epi8( l, r),
+ _mm256_unpackhi_epi8(wl,wr));
+
+ // Those _mm256_unpack??_epi8() calls left us in a bit of an odd order:
+ //
+ // if l = a b c d | e f g h
+ // and r = A B C D | E F G H
+ //
+ // then lo = a A b B | e E f F (low half of each input)
+ // and hi = c C d D | g G h H (high half of each input)
+ //
+ // To get everything back in original order we need to transpose that.
+ __m256i abcd = _mm256_permute2x128_si256(lo, hi, 0x20),
+ efgh = _mm256_permute2x128_si256(lo, hi, 0x31);
+
+ return skvx::join(skvx::bit_pun<skvx::Vec<16,uint16_t>>(abcd),
+ skvx::bit_pun<skvx::Vec<16,uint16_t>>(efgh));
+ };
+
+ skvx::Vec<32, uint16_t> top = lerp_x(tl, tr),
+ bot = lerp_x(bl, br),
+ sum = 16*top + (bot-top)*wy;
+ #else
+ // Treat 32-bit pixels as 4 8-bit values, and expand to 16-bit for room to multiply.
+ auto to_16x4 = [](auto v) -> skvx::Vec<32, uint16_t> {
+ return skvx::cast<uint16_t>(skvx::bit_pun<skvx::Vec<32, uint8_t>>(v));
+ };
+
+ // Sum up weighted sample pixels. The naive, redundant math would be,
+ //
+ // sum = tl * (16-wy) * (16-wx)
+ // + bl * ( wy) * (16-wx)
+ // + tr * (16-wy) * ( wx)
+ // + br * ( wy) * ( wx)
+ //
+ // But we refactor to eliminate a bunch of those common factors.
+ auto lerp = [](auto lo, auto hi, auto w) {
+ return 16*lo + (hi-lo)*w;
+ };
+ skvx::Vec<32, uint16_t> sum = lerp(lerp(to_16x4(tl), to_16x4(bl), wy),
+ lerp(to_16x4(tr), to_16x4(br), wy), to_16x4(wx));
+ #endif
+
+ // Get back to [0,255] by dividing by maximum weight 16x16 = 256.
+ sum >>= 8;
+
+ // Scale by [0,256] alpha.
+ sum *= s.fAlphaScale;
+ sum >>= 8;
+
+ // Pack back to 8-bit channels, undoing to_16x4().
+ return skvx::bit_pun<skvx::Vec<8,uint32_t>>(skvx::cast<uint8_t>(sum));
+ };
+
+ while (count >= 8) {
+ bilerp(skvx::Vec<8,uint32_t>::Load(xy)).store(colors);
+ xy += 8;
+ colors += 8;
+ count -= 8;
+ }
+ if (count > 0) {
+ __m256i active = skvx::bit_pun<__m256i>( count > skvx::Vec<8,int>{0,1,2,3, 4,5,6,7} ),
+ coords = _mm256_maskload_epi32((const int*)xy, active),
+ pixels;
+
+ bilerp(skvx::bit_pun<skvx::Vec<8,uint32_t>>(coords)).store(&pixels);
+ _mm256_maskstore_epi32((int*)colors, active, pixels);
+
+ sk_msan_mark_initialized(colors, colors+count,
+ "MSAN still doesn't understand AVX2 mask loads and stores.");
+ }
+ }
+
+#elif 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // interpolate_in_x() is the crux of the SSSE3 implementation,
+ // interpolating in X for up to two output pixels (A and B) using _mm_maddubs_epi16().
+ auto interpolate_in_x = [](uint32_t A0, uint32_t A1,
+ uint32_t B0, uint32_t B1,
+ __m128i interlaced_x_weights) {
+ // _mm_maddubs_epi16() is a little idiosyncratic, but great as the core of a lerp.
+ //
+ // It takes two arguments interlaced byte-wise:
+ // - first arg: [ l,r, ... 7 more pairs of unsigned 8-bit values ...]
+ // - second arg: [ w,W, ... 7 more pairs of signed 8-bit values ...]
+ // and returns 8 signed 16-bit values: [ l*w + r*W, ... 7 more ... ].
+ //
+ // That's why we go to all this trouble to make interlaced_x_weights,
+ // and here we're about to interlace A0 with A1 and B0 with B1 to match.
+ //
+ // Our interlaced_x_weights are all in [0,16], and so we need not worry about
+ // the signedness of that input nor about the signedness of the output.
+
+ __m128i interlaced_A = _mm_unpacklo_epi8(_mm_cvtsi32_si128(A0), _mm_cvtsi32_si128(A1)),
+ interlaced_B = _mm_unpacklo_epi8(_mm_cvtsi32_si128(B0), _mm_cvtsi32_si128(B1));
+
+ return _mm_maddubs_epi16(_mm_unpacklo_epi64(interlaced_A, interlaced_B),
+ interlaced_x_weights);
+ };
+
+ // Interpolate {A0..A3} --> output pixel A, and {B0..B3} --> output pixel B.
+ // Returns two pixels, with each color channel in a 16-bit lane of the __m128i.
+ auto interpolate_in_x_and_y = [&](uint32_t A0, uint32_t A1,
+ uint32_t A2, uint32_t A3,
+ uint32_t B0, uint32_t B1,
+ uint32_t B2, uint32_t B3,
+ __m128i interlaced_x_weights,
+ int wy) {
+ // Interpolate each row in X, leaving 16-bit lanes scaled by interlaced_x_weights.
+ __m128i top = interpolate_in_x(A0,A1, B0,B1, interlaced_x_weights),
+ bot = interpolate_in_x(A2,A3, B2,B3, interlaced_x_weights);
+
+ // Interpolate in Y. As in the SSE2 code, we calculate top*(16-wy) + bot*wy
+ // as 16*top + (bot-top)*wy to save a multiply.
+ __m128i px = _mm_add_epi16(_mm_slli_epi16(top, 4),
+ _mm_mullo_epi16(_mm_sub_epi16(bot, top),
+ _mm_set1_epi16(wy)));
+
+ // Scale down by total max weight 16x16 = 256.
+ px = _mm_srli_epi16(px, 8);
+
+ // Scale by alpha if needed.
+ if (s.fAlphaScale < 256) {
+ px = _mm_srli_epi16(_mm_mullo_epi16(px, _mm_set1_epi16(s.fAlphaScale)), 8);
+ }
+ return px;
+ };
+
+ // We're in _DX mode here, so we're only varying in X.
+ // That means the first entry of xy is our constant pair of Y coordinates and weight in Y.
+ // All the other entries in xy will be pairs of X coordinates and the X weight.
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes()),
+ row1 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes());
+
+ while (count >= 4) {
+ // We can really get going, loading 4 X-pairs at a time to produce 4 output pixels.
+ int x0[4],
+ x1[4];
+ __m128i wx;
+
+ // decode_packed_coordinates_and_weight(), 4x.
+ __m128i packed = _mm_loadu_si128((const __m128i*)xy);
+ _mm_storeu_si128((__m128i*)x0, _mm_srli_epi32(packed, 18));
+ _mm_storeu_si128((__m128i*)x1, _mm_and_si128 (packed, _mm_set1_epi32(0x3fff)));
+ wx = _mm_and_si128(_mm_srli_epi32(packed, 14), _mm_set1_epi32(0xf)); // [0,15]
+
+ // Splat each x weight 4x (for each color channel) as wr for pixels on the right at x1,
+ // and sixteen minus that as wl for pixels on the left at x0.
+ __m128i wr = _mm_shuffle_epi8(wx, _mm_setr_epi8(0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12)),
+ wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
+
+ // We need to interlace wl and wr for _mm_maddubs_epi16().
+ __m128i interlaced_x_weights_AB = _mm_unpacklo_epi8(wl,wr),
+ interlaced_x_weights_CD = _mm_unpackhi_epi8(wl,wr);
+
+ enum { A,B,C,D };
+
+ // interpolate_in_x_and_y() can produce two output pixels (A and B) at a time
+ // from eight input pixels {A0..A3} and {B0..B3}, arranged in a 2x2 grid for each.
+ __m128i AB = interpolate_in_x_and_y(row0[x0[A]], row0[x1[A]],
+ row1[x0[A]], row1[x1[A]],
+ row0[x0[B]], row0[x1[B]],
+ row1[x0[B]], row1[x1[B]],
+ interlaced_x_weights_AB, wy);
+
+ // Once more with the other half of the x-weights for two more pixels C,D.
+ __m128i CD = interpolate_in_x_and_y(row0[x0[C]], row0[x1[C]],
+ row1[x0[C]], row1[x1[C]],
+ row0[x0[D]], row0[x1[D]],
+ row1[x0[D]], row1[x1[D]],
+ interlaced_x_weights_CD, wy);
+
+ // Scale by alpha, pack back together to 8-bit lanes, and write out four pixels!
+ _mm_storeu_si128((__m128i*)colors, _mm_packus_epi16(AB, CD));
+ xy += 4;
+ colors += 4;
+ count -= 4;
+ }
+
+ while (count --> 0) {
+ // This is exactly the same flow as the count >= 4 loop above, but writing one pixel.
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ // As above, splat out wx four times as wr, and sixteen minus that as wl.
+ __m128i wr = _mm_set1_epi8(wx), // This splats it out 16 times, but that's fine.
+ wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
+
+ __m128i interlaced_x_weights = _mm_unpacklo_epi8(wl, wr);
+
+ __m128i A = interpolate_in_x_and_y(row0[x0], row0[x1],
+ row1[x0], row1[x1],
+ 0, 0,
+ 0, 0,
+ interlaced_x_weights, wy);
+
+ *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(A, _mm_setzero_si128()));
+ }
+ }
+
+
+#elif 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
+ row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
+
+ // We'll put one pixel in the low 4 16-bit lanes to line up with wy,
+ // and another in the upper 4 16-bit lanes to line up with 16 - wy.
+ const __m128i allY = _mm_unpacklo_epi64(_mm_set1_epi16( wy), // Bottom pixel goes here.
+ _mm_set1_epi16(16-wy)); // Top pixel goes here.
+
+ while (count --> 0) {
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ // Load the 4 pixels we're interpolating, in this grid:
+ // | tl tr |
+ // | bl br |
+ const __m128i tl = _mm_cvtsi32_si128(row0[x0]), tr = _mm_cvtsi32_si128(row0[x1]),
+ bl = _mm_cvtsi32_si128(row1[x0]), br = _mm_cvtsi32_si128(row1[x1]);
+
+ // We want to calculate a sum of 4 pixels weighted in two directions:
+ //
+ // sum = tl * (16-wy) * (16-wx)
+ // + bl * ( wy) * (16-wx)
+ // + tr * (16-wy) * ( wx)
+ // + br * ( wy) * ( wx)
+ //
+ // (Notice top --> 16-wy, bottom --> wy, left --> 16-wx, right --> wx.)
+ //
+ // We've already prepared allY as a vector containing [wy, 16-wy] as a way
+ // to apply those y-direction weights. So we'll start on the x-direction
+ // first, grouping into left and right halves, lined up with allY:
+ //
+ // L = [bl, tl]
+ // R = [br, tr]
+ //
+ // sum = horizontalSum( allY * (L*(16-wx) + R*wx) )
+ //
+ // Rewriting that one more step, we can replace a multiply with a shift:
+ //
+ // sum = horizontalSum( allY * (16*L + (R-L)*wx) )
+ //
+ // That's how we'll actually do this math.
+
+ __m128i L = _mm_unpacklo_epi8(_mm_unpacklo_epi32(bl, tl), _mm_setzero_si128()),
+ R = _mm_unpacklo_epi8(_mm_unpacklo_epi32(br, tr), _mm_setzero_si128());
+
+ __m128i inner = _mm_add_epi16(_mm_slli_epi16(L, 4),
+ _mm_mullo_epi16(_mm_sub_epi16(R,L), _mm_set1_epi16(wx)));
+
+ __m128i sum_in_x = _mm_mullo_epi16(inner, allY);
+
+ // sum = horizontalSum( ... )
+ __m128i sum = _mm_add_epi16(sum_in_x, _mm_srli_si128(sum_in_x, 8));
+
+ // Get back to [0,255] by dividing by maximum weight 16x16 = 256.
+ sum = _mm_srli_epi16(sum, 8);
+
+ if (s.fAlphaScale < 256) {
+ // Scale by alpha, which is in [0,256].
+ sum = _mm_mullo_epi16(sum, _mm_set1_epi16(s.fAlphaScale));
+ sum = _mm_srli_epi16(sum, 8);
+ }
+
+ // Pack back into 8-bit values and store.
+ *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(sum, _mm_setzero_si128()));
+ }
+ }
+
+#else
+
+ // The NEON code only actually differs from the portable code in the
+ // filtering step after we've loaded all four pixels we want to bilerp.
+
+ #if defined(SK_ARM_HAS_NEON)
+ static void filter_and_scale_by_alpha(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor *dst,
+ uint16_t scale) {
+ uint8x8_t vy, vconst16_8, v16_y, vres;
+ uint16x4_t vx, vconst16_16, v16_x, tmp, vscale;
+ uint32x2_t va0, va1;
+ uint16x8_t tmp1, tmp2;
+
+ vy = vdup_n_u8(y); // duplicate y into vy
+ vconst16_8 = vmov_n_u8(16); // set up constant in vconst16_8
+ v16_y = vsub_u8(vconst16_8, vy); // v16_y = 16-y
+
+ va0 = vdup_n_u32(a00); // duplicate a00
+ va1 = vdup_n_u32(a10); // duplicate a10
+ va0 = vset_lane_u32(a01, va0, 1); // set top to a01
+ va1 = vset_lane_u32(a11, va1, 1); // set top to a11
+
+ tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
+ tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
+
+ vx = vdup_n_u16(x); // duplicate x into vx
+ vconst16_16 = vmov_n_u16(16); // set up constant in vconst16_16
+ v16_x = vsub_u16(vconst16_16, vx); // v16_x = 16-x
+
+ tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
+ tmp = vmla_u16(tmp, vget_high_u16(tmp2), vx); // tmp += a11 * x
+ tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
+ tmp = vmla_u16(tmp, vget_low_u16(tmp2), v16_x); // tmp += a10 * (16-x)
+
+ if (scale < 256) {
+ vscale = vdup_n_u16(scale); // duplicate scale
+ tmp = vshr_n_u16(tmp, 8); // shift down result by 8
+ tmp = vmul_u16(tmp, vscale); // multiply result by scale
+ }
+
+ vres = vshrn_n_u16(vcombine_u16(tmp, vcreate_u16(0)), 8); // shift down result by 8
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); // store result
+ }
+ #else
+ static void filter_and_scale_by_alpha(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ int xy = x * y;
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*y - 16*x + xy;
+ uint32_t lo = (a00 & mask) * scale;
+ uint32_t hi = ((a00 >> 8) & mask) * scale;
+
+ scale = 16*x - xy;
+ lo += (a01 & mask) * scale;
+ hi += ((a01 >> 8) & mask) * scale;
+
+ scale = 16*y - xy;
+ lo += (a10 & mask) * scale;
+ hi += ((a10 >> 8) & mask) * scale;
+
+ lo += (a11 & mask) * xy;
+ hi += ((a11 >> 8) & mask) * xy;
+
+ if (alphaScale < 256) {
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+ }
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+ }
+ #endif
+
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
+ row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
+
+ while (count --> 0) {
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ filter_and_scale_by_alpha(wx, wy,
+ row0[x0], row0[x1],
+ row1[x0], row1[x1],
+ colors++,
+ s.fAlphaScale);
+ }
+ }
+
+#endif
+
+} // namespace SK_OPTS_NS
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts.h b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
new file mode 100644
index 0000000000..2dfe5ecf96
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitMask_opts_DEFINED
+#define SkBlitMask_opts_DEFINED
+
+#include "src/core/Sk4px.h"
+
+namespace SK_OPTS_NS {
+
+#if defined(SK_ARM_HAS_NEON)
+ // The Sk4px versions below will work fine with NEON, but we have had many indications
+ // that it doesn't perform as well as this NEON-specific code. TODO(mtklein): why?
+
+ #define NEON_A (SK_A32_SHIFT / 8)
+ #define NEON_R (SK_R32_SHIFT / 8)
+ #define NEON_G (SK_G32_SHIFT / 8)
+ #define NEON_B (SK_B32_SHIFT / 8)
+
+ static inline uint16x8_t SkAlpha255To256_neon8(uint8x8_t alpha) {
+ return vaddw_u8(vdupq_n_u16(1), alpha);
+ }
+
+ static inline uint8x8_t SkAlphaMul_neon8(uint8x8_t color, uint16x8_t scale) {
+ return vshrn_n_u16(vmovl_u8(color) * scale, 8);
+ }
+
+ static inline uint8x8x4_t SkAlphaMulQ_neon8(uint8x8x4_t color, uint16x8_t scale) {
+ uint8x8x4_t ret;
+
+ ret.val[0] = SkAlphaMul_neon8(color.val[0], scale);
+ ret.val[1] = SkAlphaMul_neon8(color.val[1], scale);
+ ret.val[2] = SkAlphaMul_neon8(color.val[2], scale);
+ ret.val[3] = SkAlphaMul_neon8(color.val[3], scale);
+
+ return ret;
+ }
+
+
+ template <bool isColor>
+ static void D32_A8_Opaque_Color_neon(void* SK_RESTRICT dst, size_t dstRB,
+ const void* SK_RESTRICT maskPtr, size_t maskRB,
+ SkColor color, int width, int height) {
+ SkPMColor pmc = SkPreMultiplyColor(color);
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+ uint8x8x4_t vpmc;
+
+ maskRB -= width;
+ dstRB -= (width << 2);
+
+ if (width >= 8) {
+ vpmc.val[NEON_A] = vdup_n_u8(SkGetPackedA32(pmc));
+ vpmc.val[NEON_R] = vdup_n_u8(SkGetPackedR32(pmc));
+ vpmc.val[NEON_G] = vdup_n_u8(SkGetPackedG32(pmc));
+ vpmc.val[NEON_B] = vdup_n_u8(SkGetPackedB32(pmc));
+ }
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale, vmask256 = SkAlpha255To256_neon8(vmask);
+ if (isColor) {
+ vscale = vsubw_u8(vdupq_n_u16(256),
+ SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256));
+ } else {
+ vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ }
+ uint8x8x4_t vdev = vld4_u8((uint8_t*)device);
+
+ vdev.val[NEON_A] = SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_A], vscale);
+ vdev.val[NEON_R] = SkAlphaMul_neon8(vpmc.val[NEON_R], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_R], vscale);
+ vdev.val[NEON_G] = SkAlphaMul_neon8(vpmc.val[NEON_G], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_G], vscale);
+ vdev.val[NEON_B] = SkAlphaMul_neon8(vpmc.val[NEON_B], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_B], vscale);
+
+ vst4_u8((uint8_t*)device, vdev);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+
+ while (w--) {
+ unsigned aa = *mask++;
+ if (isColor) {
+ *device = SkBlendARGB32(pmc, *device, aa);
+ } else {
+ *device = SkAlphaMulQ(pmc, SkAlpha255To256(aa))
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ }
+ device += 1;
+ }
+
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += maskRB;
+
+ } while (--height != 0);
+ }
+
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<true>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<false>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* maskPtr, size_t maskRB,
+ int width, int height) {
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+
+ maskRB -= width;
+ dstRB -= (width << 2);
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ uint8x8x4_t vdevice = vld4_u8((uint8_t*)device);
+
+ vdevice = SkAlphaMulQ_neon8(vdevice, vscale);
+ vdevice.val[NEON_A] += vmask;
+
+ vst4_u8((uint8_t*)device, vdevice);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+ while (w-- > 0) {
+ unsigned aa = *mask++;
+ *device = (aa << SK_A32_SHIFT)
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ device += 1;
+ }
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += maskRB;
+ } while (--height != 0);
+ }
+
+#else
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ auto left = s.approxMulDiv255(aa),
+ right = d.approxMulDiv255(left.alphas().inv());
+ return left + right; // This does not overflow (exhaustively checked).
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ SkASSERT(SkColorGetA(color) == 0xFF);
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // = s*aa + d(1-aa)
+ return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ int w, int h) {
+ auto fn = [](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // a = 1*aa + d(1-1*aa) = aa + d(1-aa)
+ // c = 0*aa + d(1-1*aa) = d(1-aa)
+ return Sk4px(Sk16b(aa) & Sk16b(0,0,0,255, 0,0,0,255, 0,0,0,255, 0,0,0,255))
+ + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+#endif
+
+/*not static*/ inline void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ if (color == SK_ColorBLACK) {
+ blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
+ } else if (SkColorGetA(color) == 0xFF) {
+ blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
+ } else {
+ blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
+ }
+}
+
+} // SK_OPTS_NS
+
+#endif//SkBlitMask_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts.h b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
new file mode 100644
index 0000000000..2e9e182582
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_opts_DEFINED
+#define SkBlitRow_opts_DEFINED
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkVx.h"
+#include "src/core/SkMSAN.h"
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #include <immintrin.h>
+
+ static inline __m256i SkPMSrcOver_AVX2(const __m256i& src, const __m256i& dst) {
+ // Abstractly srcover is
+ // b = s + d*(1-srcA)
+ //
+ // In terms of unorm8 bytes, that works out to
+ // b = s + (d*(255-srcA) + 127) / 255
+ //
+ // But we approximate that to within a bit with
+ // b = s + (d*(255-srcA) + d) / 256
+ // a.k.a
+ // b = s + (d*(256-srcA)) >> 8
+
+ // The bottleneck of this math is the multiply, and we want to do it as
+ // narrowly as possible, here getting inputs into 16-bit lanes and
+ // using 16-bit multiplies. We can do twice as many multiplies at once
+ // as using naive 32-bit multiplies, and on top of that, the 16-bit multiplies
+ // are themselves a couple cycles quicker. Win-win.
+
+ // We'll get everything in 16-bit lanes for two multiplies, one
+ // handling dst red and blue, the other green and alpha. (They're
+ // conveniently 16-bits apart, you see.) We don't need the individual
+ // src channels beyond alpha until the very end when we do the "s + "
+ // add, and we don't even need to unpack them; the adds cannot overflow.
+
+ // Shuffle each pixel's srcA to the low byte of each 16-bit half of the pixel.
+ const int _ = -1; // fills a literal 0 byte.
+ __m256i srcA_x2 = _mm256_shuffle_epi8(src,
+ _mm256_setr_epi8(3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_,
+ 3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_));
+ __m256i scale_x2 = _mm256_sub_epi16(_mm256_set1_epi16(256),
+ srcA_x2);
+
+ // Scale red and blue, leaving results in the low byte of each 16-bit lane.
+ __m256i rb = _mm256_and_si256(_mm256_set1_epi32(0x00ff00ff), dst);
+ rb = _mm256_mullo_epi16(rb, scale_x2);
+ rb = _mm256_srli_epi16 (rb, 8);
+
+ // Scale green and alpha, leaving results in the high byte, masking off the low bits.
+ __m256i ga = _mm256_srli_epi16(dst, 8);
+ ga = _mm256_mullo_epi16(ga, scale_x2);
+ ga = _mm256_andnot_si256(_mm256_set1_epi32(0x00ff00ff), ga);
+
+ return _mm256_add_epi32(src, _mm256_or_si256(rb, ga));
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+
+ static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
+ __m128i scale = _mm_sub_epi32(_mm_set1_epi32(256),
+ _mm_srli_epi32(src, 24));
+ __m128i scale_x2 = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
+
+ __m128i rb = _mm_and_si128(_mm_set1_epi32(0x00ff00ff), dst);
+ rb = _mm_mullo_epi16(rb, scale_x2);
+ rb = _mm_srli_epi16(rb, 8);
+
+ __m128i ga = _mm_srli_epi16(dst, 8);
+ ga = _mm_mullo_epi16(ga, scale_x2);
+ ga = _mm_andnot_si128(_mm_set1_epi32(0x00ff00ff), ga);
+
+ return _mm_add_epi32(src, _mm_or_si128(rb, ga));
+ }
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+ // SkMulDiv255Round() applied to each lane.
+ static inline uint8x8_t SkMulDiv255Round_neon8(uint8x8_t x, uint8x8_t y) {
+ uint16x8_t prod = vmull_u8(x, y);
+ return vraddhn_u16(prod, vrshrq_n_u16(prod, 8));
+ }
+ static inline uint8x8x4_t SkPMSrcOver_neon8(uint8x8x4_t dst, uint8x8x4_t src) {
+ uint8x8_t nalphas = vmvn_u8(src.val[3]); // 256 - alpha
+ return {
+ vadd_u8(src.val[0], SkMulDiv255Round_neon8(nalphas, dst.val[0])),
+ vadd_u8(src.val[1], SkMulDiv255Round_neon8(nalphas, dst.val[1])),
+ vadd_u8(src.val[2], SkMulDiv255Round_neon8(nalphas, dst.val[2])),
+ vadd_u8(src.val[3], SkMulDiv255Round_neon8(nalphas, dst.val[3])),
+ };
+ }
+ // Variant assuming dst and src contain the color components of two consecutive pixels.
+ static inline uint8x8_t SkPMSrcOver_neon2(uint8x8_t dst, uint8x8_t src) {
+ const uint8x8_t alpha_indices = vcreate_u8(0x0707070703030303);
+ uint8x8_t nalphas = vmvn_u8(vtbl1_u8(src, alpha_indices));
+ return vadd_u8(src, SkMulDiv255Round_neon8(nalphas, dst));
+ }
+#endif
+
+namespace SK_OPTS_NS {
+
+/*not static*/
+inline void blit_row_s32a_opaque(SkPMColor* dst, const SkPMColor* src, int len, U8CPU alpha) {
+ SkASSERT(alpha == 0xFF);
+ sk_msan_assert_initialized(src, src+len);
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ while (len >= 8) {
+ _mm256_storeu_si256((__m256i*)dst,
+ SkPMSrcOver_AVX2(_mm256_loadu_si256((const __m256i*)src),
+ _mm256_loadu_si256((const __m256i*)dst)));
+ src += 8;
+ dst += 8;
+ len -= 8;
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ while (len >= 4) {
+ _mm_storeu_si128((__m128i*)dst, SkPMSrcOver_SSE2(_mm_loadu_si128((const __m128i*)src),
+ _mm_loadu_si128((const __m128i*)dst)));
+ src += 4;
+ dst += 4;
+ len -= 4;
+ }
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ while (len >= 8) {
+ vst4_u8((uint8_t*)dst, SkPMSrcOver_neon8(vld4_u8((const uint8_t*)dst),
+ vld4_u8((const uint8_t*)src)));
+ src += 8;
+ dst += 8;
+ len -= 8;
+ }
+
+ while (len >= 2) {
+ vst1_u8((uint8_t*)dst, SkPMSrcOver_neon2(vld1_u8((const uint8_t*)dst),
+ vld1_u8((const uint8_t*)src)));
+ src += 2;
+ dst += 2;
+ len -= 2;
+ }
+
+ if (len != 0) {
+ uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8((uint64_t)*dst),
+ vcreate_u8((uint64_t)*src));
+ vst1_lane_u32(dst, vreinterpret_u32_u8(result), 0);
+ }
+ return;
+#endif
+
+ while (len --> 0) {
+ *dst = SkPMSrcOver(*src, *dst);
+ src++;
+ dst++;
+ }
+}
+
+// Blend constant color over count src pixels, writing into dst.
+/*not static*/
+inline void blit_row_color32(SkPMColor* dst, const SkPMColor* src, int count, SkPMColor color) {
+ constexpr int N = 4; // 8, 16 also reasonable choices
+ using U32 = skvx::Vec< N, uint32_t>;
+ using U16 = skvx::Vec<4*N, uint16_t>;
+ using U8 = skvx::Vec<4*N, uint8_t>;
+
+ auto kernel = [color](U32 src) {
+ unsigned invA = 255 - SkGetPackedA32(color);
+ invA += invA >> 7;
+ SkASSERT(0 < invA && invA < 256); // We handle alpha == 0 or alpha == 255 specially.
+
+ // (src * invA + (color << 8) + 128) >> 8
+ // Should all fit in 16 bits.
+ U8 s = skvx::bit_pun<U8>(src),
+ a = U8(invA);
+ U16 c = skvx::cast<uint16_t>(skvx::bit_pun<U8>(U32(color))),
+ d = (mull(s,a) + (c << 8) + 128)>>8;
+ return skvx::bit_pun<U32>(skvx::cast<uint8_t>(d));
+ };
+
+ while (count >= N) {
+ kernel(U32::Load(src)).store(dst);
+ src += N;
+ dst += N;
+ count -= N;
+ }
+ while (count --> 0) {
+ *dst++ = kernel(U32{*src++})[0];
+ }
+}
+
+} // SK_OPTS_NS
+
+#endif//SkBlitRow_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkChecksum_opts.h b/gfx/skia/skia/src/opts/SkChecksum_opts.h
new file mode 100644
index 0000000000..4e91443ee7
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkChecksum_opts.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_opts_DEFINED
+#define SkChecksum_opts_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "src/core/SkUtils.h" // sk_unaligned_load
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_CRC32)
+ #include <arm_acle.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42 && (defined(__x86_64__) || defined(_M_X64))
+ // This is not a CRC32. It's Just A Hash that uses those instructions because they're fast.
+ /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t seed) {
+ auto data = (const uint8_t*)vdata;
+
+ // _mm_crc32_u64() operates on 64-bit registers, so we use uint64_t for a while.
+ uint64_t hash = seed;
+ if (bytes >= 24) {
+ // We'll create 3 independent hashes, each using _mm_crc32_u64()
+ // to hash 8 bytes per step. Both 3 and independent are important:
+ // we can execute 3 of these instructions in parallel on a single core.
+ uint64_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/24;
+ while (steps --> 0) {
+ a = _mm_crc32_u64(a, sk_unaligned_load<uint64_t>(data+ 0));
+ b = _mm_crc32_u64(b, sk_unaligned_load<uint64_t>(data+ 8));
+ c = _mm_crc32_u64(c, sk_unaligned_load<uint64_t>(data+16));
+ data += 24;
+ }
+ bytes %= 24;
+ hash = _mm_crc32_u32(a, _mm_crc32_u32(b, c));
+ }
+
+ SkASSERT(bytes < 24);
+ if (bytes >= 16) {
+ hash = _mm_crc32_u64(hash, sk_unaligned_load<uint64_t>(data));
+ bytes -= 8;
+ data += 8;
+ }
+
+ SkASSERT(bytes < 16);
+ if (bytes & 8) {
+ hash = _mm_crc32_u64(hash, sk_unaligned_load<uint64_t>(data));
+ data += 8;
+ }
+
+ // The remainder of these _mm_crc32_u*() operate on a 32-bit register.
+ // We don't lose anything here: only the bottom 32-bits were populated.
+ auto hash32 = (uint32_t)hash;
+
+ if (bytes & 4) {
+ hash32 = _mm_crc32_u32(hash32, sk_unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash32 = _mm_crc32_u16(hash32, sk_unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash32 = _mm_crc32_u8(hash32, sk_unaligned_load<uint8_t>(data));
+ }
+ return hash32;
+ }
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ // 32-bit version of above, using _mm_crc32_u32() but not _mm_crc32_u64().
+ /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+
+ if (bytes >= 12) {
+ // We'll create 3 independent hashes, each using _mm_crc32_u32()
+ // to hash 4 bytes per step. Both 3 and independent are important:
+ // we can execute 3 of these instructions in parallel on a single core.
+ uint32_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/12;
+ while (steps --> 0) {
+ a = _mm_crc32_u32(a, sk_unaligned_load<uint32_t>(data+0));
+ b = _mm_crc32_u32(b, sk_unaligned_load<uint32_t>(data+4));
+ c = _mm_crc32_u32(c, sk_unaligned_load<uint32_t>(data+8));
+ data += 12;
+ }
+ bytes %= 12;
+ hash = _mm_crc32_u32(a, _mm_crc32_u32(b, c));
+ }
+
+ SkASSERT(bytes < 12);
+ if (bytes >= 8) {
+ hash = _mm_crc32_u32(hash, sk_unaligned_load<uint32_t>(data));
+ bytes -= 4;
+ data += 4;
+ }
+
+ SkASSERT(bytes < 8);
+ if (bytes & 4) {
+ hash = _mm_crc32_u32(hash, sk_unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash = _mm_crc32_u16(hash, sk_unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash = _mm_crc32_u8(hash, sk_unaligned_load<uint8_t>(data));
+ }
+ return hash;
+ }
+
+#elif defined(SK_ARM_HAS_CRC32)
+ /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+ if (bytes >= 24) {
+ uint32_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/24;
+ while (steps --> 0) {
+ a = __crc32d(a, sk_unaligned_load<uint64_t>(data+ 0));
+ b = __crc32d(b, sk_unaligned_load<uint64_t>(data+ 8));
+ c = __crc32d(c, sk_unaligned_load<uint64_t>(data+16));
+ data += 24;
+ }
+ bytes %= 24;
+ hash = __crc32w(a, __crc32w(b, c));
+ }
+
+ SkASSERT(bytes < 24);
+ if (bytes >= 16) {
+ hash = __crc32d(hash, sk_unaligned_load<uint64_t>(data));
+ bytes -= 8;
+ data += 8;
+ }
+
+ SkASSERT(bytes < 16);
+ if (bytes & 8) {
+ hash = __crc32d(hash, sk_unaligned_load<uint64_t>(data));
+ data += 8;
+ }
+ if (bytes & 4) {
+ hash = __crc32w(hash, sk_unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash = __crc32h(hash, sk_unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash = __crc32b(hash, sk_unaligned_load<uint8_t>(data));
+ }
+ return hash;
+ }
+
+#else
+ // This is Murmur3.
+ /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+
+ size_t original_bytes = bytes;
+
+ // Handle 4 bytes at a time while possible.
+ while (bytes >= 4) {
+ uint32_t k = sk_unaligned_load<uint32_t>(data);
+ k *= 0xcc9e2d51;
+ k = (k << 15) | (k >> 17);
+ k *= 0x1b873593;
+
+ hash ^= k;
+ hash = (hash << 13) | (hash >> 19);
+ hash *= 5;
+ hash += 0xe6546b64;
+
+ bytes -= 4;
+ data += 4;
+ }
+
+ // Handle last 0-3 bytes.
+ uint32_t k = 0;
+ switch (bytes & 3) {
+ case 3: k ^= data[2] << 16;
+ case 2: k ^= data[1] << 8;
+ case 1: k ^= data[0] << 0;
+ k *= 0xcc9e2d51;
+ k = (k << 15) | (k >> 17);
+ k *= 0x1b873593;
+ hash ^= k;
+ }
+
+ hash ^= original_bytes;
+ return SkChecksum::Mix(hash);
+ }
+#endif
+
+#undef unaligned_load
+
+} // namespace SK_OPTS_NS
+
+#endif//SkChecksum_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkOpts_avx.cpp b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
new file mode 100644
index 0000000000..229892f379
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#ifdef __clang__
+#define SK_OPTS_NS avx
+#include "src/opts/SkRasterPipeline_opts.h"
+#endif
+
+#include "src/opts/SkUtils_opts.h"
+
+namespace SkOpts {
+ void Init_avx() {
+ memset16 = SK_OPTS_NS::memset16;
+ memset32 = SK_OPTS_NS::memset32;
+ memset64 = SK_OPTS_NS::memset64;
+
+ rect_memset16 = SK_OPTS_NS::rect_memset16;
+ rect_memset32 = SK_OPTS_NS::rect_memset32;
+ rect_memset64 = SK_OPTS_NS::rect_memset64;
+
+#ifdef __clang__
+ #define M(st) stages_highp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ start_pipeline_highp = SK_OPTS_NS::start_pipeline;
+ #undef M
+
+ #define M(st) stages_lowp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::lowp::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ start_pipeline_lowp = SK_OPTS_NS::lowp::start_pipeline;
+ #undef M
+#endif
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_crc32.cpp b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
new file mode 100644
index 0000000000..205f0ebf37
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#define SK_OPTS_NS crc32
+#include "src/opts/SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_crc32() {
+ hash_fn = crc32::hash_fn;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_hsw.cpp b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
new file mode 100644
index 0000000000..f28835d432
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if defined(__AVX2__)
+
+#include <immintrin.h>
+#include <stdint.h>
+
+namespace hsw {
+
+ void convolve_vertically(const int16_t* filter, int filterLen,
+ uint8_t* const* srcRows, int width,
+ uint8_t* out, bool hasAlpha) {
+ // It's simpler to work with the output array in terms of 4-byte pixels.
+ auto dst = (int*)out;
+
+ // Output up to eight pixels per iteration.
+ for (int x = 0; x < width; x += 8) {
+ // Accumulated result for 4 (non-adjacent) pairs of pixels,
+ // with each channel in signed 17.14 fixed point.
+ auto accum04 = _mm256_setzero_si256(),
+ accum15 = _mm256_setzero_si256(),
+ accum26 = _mm256_setzero_si256(),
+ accum37 = _mm256_setzero_si256();
+
+ // Convolve with the filter. (This inner loop is where we spend ~all our time.)
+ // While we can, we consume 2 filter coefficients and 2 rows of 8 pixels each at a time.
+ auto convolve_16_pixels = [&](__m256i interlaced_coeffs,
+ __m256i pixels_01234567, __m256i pixels_89ABCDEF) {
+ // Interlaced R0R8 G0G8 B0B8 A0A8 R1R9 G1G9... 32 8-bit values each.
+ auto _08194C5D = _mm256_unpacklo_epi8(pixels_01234567, pixels_89ABCDEF),
+ _2A3B6E7F = _mm256_unpackhi_epi8(pixels_01234567, pixels_89ABCDEF);
+
+ // Still interlaced R0R8 G0G8... as above, each channel expanded to 16-bit lanes.
+ auto _084C = _mm256_unpacklo_epi8(_08194C5D, _mm256_setzero_si256()),
+ _195D = _mm256_unpackhi_epi8(_08194C5D, _mm256_setzero_si256()),
+ _2A6E = _mm256_unpacklo_epi8(_2A3B6E7F, _mm256_setzero_si256()),
+ _3B7F = _mm256_unpackhi_epi8(_2A3B6E7F, _mm256_setzero_si256());
+
+ // accum0_R += R0*coeff0 + R8*coeff1, etc.
+ accum04 = _mm256_add_epi32(accum04, _mm256_madd_epi16(_084C, interlaced_coeffs));
+ accum15 = _mm256_add_epi32(accum15, _mm256_madd_epi16(_195D, interlaced_coeffs));
+ accum26 = _mm256_add_epi32(accum26, _mm256_madd_epi16(_2A6E, interlaced_coeffs));
+ accum37 = _mm256_add_epi32(accum37, _mm256_madd_epi16(_3B7F, interlaced_coeffs));
+ };
+
+ int i = 0;
+ for (; i < filterLen/2*2; i += 2) {
+ convolve_16_pixels(_mm256_set1_epi32(*(const int32_t*)(filter+i)),
+ _mm256_loadu_si256((const __m256i*)(srcRows[i+0] + x*4)),
+ _mm256_loadu_si256((const __m256i*)(srcRows[i+1] + x*4)));
+ }
+ if (i < filterLen) {
+ convolve_16_pixels(_mm256_set1_epi32(*(const int16_t*)(filter+i)),
+ _mm256_loadu_si256((const __m256i*)(srcRows[i] + x*4)),
+ _mm256_setzero_si256());
+ }
+
+ // Trim the fractional parts off the accumulators.
+ accum04 = _mm256_srai_epi32(accum04, 14);
+ accum15 = _mm256_srai_epi32(accum15, 14);
+ accum26 = _mm256_srai_epi32(accum26, 14);
+ accum37 = _mm256_srai_epi32(accum37, 14);
+
+ // Pack back down to 8-bit channels.
+ auto pixels = _mm256_packus_epi16(_mm256_packs_epi32(accum04, accum15),
+ _mm256_packs_epi32(accum26, accum37));
+
+ if (hasAlpha) {
+ // Clamp alpha to the max of r,g,b to make sure we stay premultiplied.
+ __m256i max_rg = _mm256_max_epu8(pixels, _mm256_srli_epi32(pixels, 8)),
+ max_rgb = _mm256_max_epu8(max_rg, _mm256_srli_epi32(pixels, 16));
+ pixels = _mm256_max_epu8(pixels, _mm256_slli_epi32(max_rgb, 24));
+ } else {
+ // Force opaque.
+ pixels = _mm256_or_si256(pixels, _mm256_set1_epi32(0xff000000));
+ }
+
+ // Normal path to store 8 pixels.
+ if (x + 8 <= width) {
+ _mm256_storeu_si256((__m256i*)dst, pixels);
+ dst += 8;
+ continue;
+ }
+
+ // Store one pixel at a time on the last iteration.
+ for (int i = x; i < width; i++) {
+ *dst++ = _mm_cvtsi128_si32(_mm256_castsi256_si128(pixels));
+ pixels = _mm256_permutevar8x32_epi32(pixels, _mm256_setr_epi32(1,2,3,4,5,6,7,0));
+ }
+ }
+ }
+
+}
+
+#include "src/core/SkOpts.h"
+
+#define SK_OPTS_NS hsw
+#include "src/core/SkCubicSolver.h"
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitRow_opts.h"
+#include "src/opts/SkRasterPipeline_opts.h"
+#include "src/opts/SkUtils_opts.h"
+
+namespace SkOpts {
+ // See SkOpts.h, writing SkConvolutionFilter1D::ConvolutionFixed as the underlying type.
+ extern void (*convolve_vertically)(const int16_t* filter, int filterLen,
+ uint8_t* const* srcRows, int width,
+ uint8_t* out, bool hasAlpha);
+ void Init_hsw() {
+ convolve_vertically = hsw::convolve_vertically;
+
+ blit_row_color32 = hsw::blit_row_color32;
+ blit_row_s32a_opaque = hsw::blit_row_s32a_opaque;
+
+ S32_alpha_D32_filter_DX = hsw::S32_alpha_D32_filter_DX;
+
+ cubic_solver = SK_OPTS_NS::cubic_solver;
+
+ #define M(st) stages_highp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ start_pipeline_highp = SK_OPTS_NS::start_pipeline;
+ #undef M
+
+ #define M(st) stages_lowp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::lowp::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ start_pipeline_lowp = SK_OPTS_NS::lowp::start_pipeline;
+ #undef M
+ }
+}
+
+#else // defined(__AVX2__) is not true...
+
+namespace SkOpts { void Init_hsw() {} }
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkOpts_sse41.cpp b/gfx/skia/skia/src/opts/SkOpts_sse41.cpp
new file mode 100644
index 0000000000..8e525968da
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_sse41.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#define SK_OPTS_NS sse41
+#include "src/opts/SkBlitRow_opts.h"
+#include "src/opts/SkRasterPipeline_opts.h"
+
+namespace SkOpts {
+ void Init_sse41() {
+ blit_row_color32 = sse41::blit_row_color32;
+ blit_row_s32a_opaque = sse41::blit_row_s32a_opaque;
+
+ #define M(st) stages_highp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ start_pipeline_highp = SK_OPTS_NS::start_pipeline;
+ #undef M
+
+ #define M(st) stages_lowp[SkRasterPipeline::st] = (StageFn)SK_OPTS_NS::lowp::st;
+ SK_RASTER_PIPELINE_STAGES(M)
+ just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ start_pipeline_lowp = SK_OPTS_NS::lowp::start_pipeline;
+ #undef M
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_sse42.cpp b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
new file mode 100644
index 0000000000..8e80fffa4a
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#define SK_OPTS_NS sse42
+#include "src/opts/SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_sse42() {
+ hash_fn = sse42::hash_fn;
+ }
+}
+
diff --git a/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
new file mode 100644
index 0000000000..daa69872e0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+#define SK_OPTS_NS ssse3
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitMask_opts.h"
+#include "src/opts/SkSwizzler_opts.h"
+#include "src/opts/SkXfermode_opts.h"
+
+namespace SkOpts {
+ void Init_ssse3() {
+ create_xfermode = ssse3::create_xfermode;
+ blit_mask_d32_a8 = ssse3::blit_mask_d32_a8;
+
+ RGBA_to_BGRA = ssse3::RGBA_to_BGRA;
+ RGBA_to_rgbA = ssse3::RGBA_to_rgbA;
+ RGBA_to_bgrA = ssse3::RGBA_to_bgrA;
+ RGB_to_RGB1 = ssse3::RGB_to_RGB1;
+ RGB_to_BGR1 = ssse3::RGB_to_BGR1;
+ gray_to_RGB1 = ssse3::gray_to_RGB1;
+ grayA_to_RGBA = ssse3::grayA_to_RGBA;
+ grayA_to_rgbA = ssse3::grayA_to_rgbA;
+ inverted_CMYK_to_RGB1 = ssse3::inverted_CMYK_to_RGB1;
+ inverted_CMYK_to_BGR1 = ssse3::inverted_CMYK_to_BGR1;
+
+ S32_alpha_D32_filter_DX = ssse3::S32_alpha_D32_filter_DX;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
new file mode 100644
index 0000000000..5731f5863d
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
@@ -0,0 +1,4557 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_opts_DEFINED
+#define SkRasterPipeline_opts_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkUtils.h" // unaligned_{load,store}
+#include "src/sksl/SkSLByteCode.h"
+
+// Every function in this file should be marked static and inline using SI.
+#if defined(__clang__) || defined(__GNUC__)
+ #define SI __attribute__((always_inline)) static inline
+#else
+ #define SI static inline
+#endif
+
+template <typename Dst, typename Src>
+SI Dst bit_cast(const Src& src) {
+ static_assert(sizeof(Dst) == sizeof(Src), "");
+ return sk_unaligned_load<Dst>(&src);
+}
+
+template <typename Dst, typename Src>
+SI Dst widen_cast(const Src& src) {
+ static_assert(sizeof(Dst) > sizeof(Src), "");
+ Dst dst;
+ memcpy(&dst, &src, sizeof(Src));
+ return dst;
+}
+
+// Our program is an array of void*, either
+// - 1 void* per stage with no context pointer, the next stage;
+// - 2 void* per stage with a context pointer, first the context pointer, then the next stage.
+
+// load_and_inc() steps the program forward by 1 void*, returning that pointer.
+SI void* load_and_inc(void**& program) {
+#if defined(__GNUC__) && defined(__x86_64__)
+ // If program is in %rsi (we try to make this likely) then this is a single instruction.
+ void* rax;
+ asm("lodsq" : "=a"(rax), "+S"(program)); // Write-only %rax, read-write %rsi.
+ return rax;
+#else
+ // On ARM *program++ compiles into pretty ideal code without any handholding.
+ return *program++;
+#endif
+}
+
+// Lazily resolved on first cast. Does nothing if cast to Ctx::None.
+struct Ctx {
+ struct None {};
+
+ void* ptr;
+ void**& program;
+
+ explicit Ctx(void**& p) : ptr(nullptr), program(p) {}
+
+ template <typename T>
+ operator T*() {
+ if (!ptr) { ptr = load_and_inc(program); }
+ return (T*)ptr;
+ }
+ operator None() { return None{}; }
+};
+
+
+#if !defined(__clang__)
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define JUMPER_IS_SSE41
+ #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define JUMPER_IS_SSE2
+ #else
+ #define JUMPER_IS_SCALAR
+ #endif
+#elif defined(SK_ARM_HAS_NEON)
+ #define JUMPER_IS_NEON
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX512
+ #define JUMPER_IS_AVX512
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #define JUMPER_IS_HSW
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ #define JUMPER_IS_AVX
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define JUMPER_IS_SSE41
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define JUMPER_IS_SSE2
+#else
+ #define JUMPER_IS_SCALAR
+#endif
+
+// Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
+#if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
+ // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
+ #if defined(__apple_build_version__) && __clang_major__ < 9
+ #define JUMPER_IS_SCALAR
+ #elif __clang_major__ < 5
+ #define JUMPER_IS_SCALAR
+ #endif
+
+ #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
+ #undef JUMPER_IS_NEON
+ #endif
+#endif
+
+#if defined(JUMPER_IS_SCALAR)
+ #include <math.h>
+#elif defined(JUMPER_IS_NEON)
+ #include <arm_neon.h>
+#else
+ #include <immintrin.h>
+#endif
+
+#if !defined(__clang__) && !defined(JUMPER_IS_SCALAR)
+#include "include/private/SkNx.h"
+#endif
+
+#ifdef __clang__
+#define SK_ASSUME(cond) __builtin_assume(cond)
+#elif defined(__GNUC__)
+#define SK_ASSUME(cond) ((cond) ? (void)0 : __builtin_unreachable())
+#elif defined(_MSC_VER)
+#define SK_ASSUME(cond) __assume(cond)
+#else
+#define SK_ASSUME(cond) ((void)0)
+#endif
+
+#if defined(__clang__) || defined(__GNUC__)
+#define SK_EXPECT(exp, p) __builtin_expect(exp, p)
+#else
+#define SK_EXPECT(exp, p) (exp)
+#endif
+
+#if defined(JUMPER_IS_SCALAR)
+#define SK_CONVERTVECTOR(vec, type) ((type)(vec))
+#elif defined(__clang__)
+#define SK_CONVERTVECTOR(vec, type) __builtin_convertvector(vec, type)
+#else
+template <typename T> struct SkNx_element {};
+template <typename T, int N> struct SkNx_element<SkNx<N,T>> { typedef T type; };
+#define SK_CONVERTVECTOR(vec, vtype) SkNx_cast<typename SkNx_element<vtype>::type>(vec)
+#endif
+
+#ifdef __clang__
+#define SK_VECTORTYPE(type, size) type __attribute__((ext_vector_type(size)))
+#else
+#define SK_VECTORTYPE(type, size) SkNx<size, type>
+#endif
+
+namespace SK_OPTS_NS {
+
+#if defined(JUMPER_IS_SCALAR)
+ // This path should lead to portable scalar code.
+ using F = float ;
+ using I32 = int32_t;
+ using U64 = uint64_t;
+ using U32 = uint32_t;
+ using U16 = uint16_t;
+ using U8 = uint8_t ;
+
+ SI F mad(F f, F m, F a) { return f*m+a; }
+ SI F min(F a, F b) { return fminf(a,b); }
+ SI F max(F a, F b) { return fmaxf(a,b); }
+ SI F abs_ (F v) { return fabsf(v); }
+ SI F floor_(F v) { return floorf(v); }
+ SI F rcp (F v) { return 1.0f / v; }
+ SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
+ SI F sqrt_(F v) { return sqrtf(v); }
+ SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
+ SI U16 pack(U32 v) { return (U16)v; }
+ SI U8 pack(U16 v) { return (U8)v; }
+
+ SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
+
+ template <typename T>
+ SI T gather(const T* p, U32 ix) { return p[ix]; }
+
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ *r = ptr[0];
+ *g = ptr[1];
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ ptr[0] = r;
+ ptr[1] = g;
+ }
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ *a = ptr[3];
+ }
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ ptr[0] = r;
+ ptr[1] = g;
+ ptr[2] = b;
+ ptr[3] = a;
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ *r = ptr[0];
+ *g = ptr[1];
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ ptr[0] = r;
+ ptr[1] = g;
+ }
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ *a = ptr[3];
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ ptr[0] = r;
+ ptr[1] = g;
+ ptr[2] = b;
+ ptr[3] = a;
+ }
+
+#elif defined(JUMPER_IS_NEON)
+ // Since we know we're using Clang, we can use its vector extensions.
+ template <typename T> using V = SK_VECTORTYPE(T, 4);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ // We polyfill a few routines that Clang doesn't build into ext_vector_types.
+ SI F min(F a, F b) { return vminq_f32(a,b); }
+ SI F max(F a, F b) { return vmaxq_f32(a,b); }
+ SI F abs_ (F v) { return vabsq_f32(v); }
+ SI F rcp (F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
+ SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
+ SI U16 pack(U32 v) { return SK_CONVERTVECTOR(v, U16); }
+ SI U8 pack(U16 v) { return SK_CONVERTVECTOR(v, U8); }
+
+ SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
+
+ #if defined(SK_CPU_ARM64)
+ SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
+ SI F floor_(F v) { return vrndmq_f32(v); }
+ SI F sqrt_(F v) { return vsqrtq_f32(v); }
+ SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
+ #else
+ SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
+ SI F floor_(F v) {
+ F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ return roundtrip - if_then_else(roundtrip > v, 1, 0);
+ }
+
+ SI F sqrt_(F v) {
+ auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v).
+ e *= vrsqrtsq_f32(v,e*e);
+ e *= vrsqrtsq_f32(v,e*e);
+ return v*e; // sqrt(v) == v*rsqrt(v).
+ }
+
+ SI U32 round(F v, F scale) {
+ return vcvtq_u32_f32(mad(v,scale,0.5f));
+ }
+ #endif
+
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
+ }
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ uint16x4x2_t rg;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
+ if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
+ if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
+ } else {
+ rg = vld2_u16(ptr);
+ }
+ *r = rg.val[0];
+ *g = rg.val[1];
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
+ if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
+ if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
+ } else {
+ vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
+ }
+ }
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ uint16x4x3_t rgb;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
+ if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
+ if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
+ } else {
+ rgb = vld3_u16(ptr);
+ }
+ *r = rgb.val[0];
+ *g = rgb.val[1];
+ *b = rgb.val[2];
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ uint16x4x4_t rgba;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
+ if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
+ if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
+ } else {
+ rgba = vld4_u16(ptr);
+ }
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
+ if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
+ if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
+ } else {
+ vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
+ }
+ }
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ float32x4x2_t rg;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
+ if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
+ if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
+ } else {
+ rg = vld2q_f32(ptr);
+ }
+ *r = rg.val[0];
+ *g = rg.val[1];
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
+ if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
+ if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
+ } else {
+ vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
+ }
+ }
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ float32x4x4_t rgba;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
+ if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
+ if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
+ } else {
+ rgba = vld4q_f32(ptr);
+ }
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
+ if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
+ if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
+ } else {
+ vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
+ }
+ }
+
+#elif defined(JUMPER_IS_AVX) || defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ // These are __m256 and __m256i, but friendlier and strongly-typed.
+ template <typename T> using V = SK_VECTORTYPE(T, 8);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ SI F mad(F f, F m, F a) {
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ return _mm256_fmadd_ps(f,m,a);
+ #else
+ return f*m+a;
+ #endif
+ }
+
+ SI F min(F a, F b) { return _mm256_min_ps(a,b); }
+ SI F max(F a, F b) { return _mm256_max_ps(a,b); }
+ SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
+ SI F floor_(F v) { return _mm256_floor_ps(v); }
+ SI F rcp (F v) { return _mm256_rcp_ps (v); }
+ SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
+ SI F sqrt_(F v) { return _mm256_sqrt_ps (v); }
+ SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
+
+ SI U16 pack(U32 v) {
+ return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
+ _mm256_extractf128_si256(v, 1));
+ }
+ SI U8 pack(U16 v) {
+ auto r = _mm_packus_epi16(v,v);
+ return sk_unaligned_load<U8>(&r);
+ }
+
+ SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
+ p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
+ }
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
+ SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
+ SI U64 gather(const uint64_t* p, U32 ix) {
+ __m256i parts[] = {
+ _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
+ _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
+ };
+ return bit_cast<U64>(parts);
+ }
+ #endif
+
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ U16 _0123, _4567;
+ if (SK_EXPECT(tail,0)) {
+ _0123 = _4567 = _mm_setzero_si128();
+ auto* d = &_0123;
+ if (tail > 3) {
+ *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ tail -= 4;
+ ptr += 8;
+ d = &_4567;
+ }
+ bool high = false;
+ if (tail > 1) {
+ *d = _mm_loadu_si64(ptr);
+ tail -= 2;
+ ptr += 4;
+ high = true;
+ }
+ if (tail > 0) {
+ (*d)[high ? 4 : 0] = *(ptr + 0);
+ (*d)[high ? 5 : 1] = *(ptr + 1);
+ }
+ } else {
+ _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ }
+ *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
+ _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
+ *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
+ _mm_srai_epi32(_4567, 16));
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ auto _0123 = _mm_unpacklo_epi16(r, g),
+ _4567 = _mm_unpackhi_epi16(r, g);
+ if (SK_EXPECT(tail,0)) {
+ const auto* s = &_0123;
+ if (tail > 3) {
+ _mm_storeu_si128((__m128i*)ptr, *s);
+ s = &_4567;
+ tail -= 4;
+ ptr += 8;
+ }
+ bool high = false;
+ if (tail > 1) {
+ _mm_storel_epi64((__m128i*)ptr, *s);
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ if (high) {
+ *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
+ } else {
+ *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
+ }
+ }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _0123);
+ _mm_storeu_si128((__m128i*)ptr + 1, _4567);
+ }
+ }
+
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ __m128i _0,_1,_2,_3,_4,_5,_6,_7;
+ if (SK_EXPECT(tail,0)) {
+ auto load_rgb = [](const uint16_t* src) {
+ auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
+ return _mm_insert_epi16(v, src[2], 2);
+ };
+ _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
+ if ( true ) { _0 = load_rgb(ptr + 0); }
+ if (tail > 1) { _1 = load_rgb(ptr + 3); }
+ if (tail > 2) { _2 = load_rgb(ptr + 6); }
+ if (tail > 3) { _3 = load_rgb(ptr + 9); }
+ if (tail > 4) { _4 = load_rgb(ptr + 12); }
+ if (tail > 5) { _5 = load_rgb(ptr + 15); }
+ if (tail > 6) { _6 = load_rgb(ptr + 18); }
+ } else {
+ // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
+ auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ;
+ auto _23 = _mm_loadu_si128((const __m128i*)(ptr + 6)) ;
+ auto _45 = _mm_loadu_si128((const __m128i*)(ptr + 12)) ;
+ auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
+ _0 = _01; _1 = _mm_srli_si128(_01, 6);
+ _2 = _23; _3 = _mm_srli_si128(_23, 6);
+ _4 = _45; _5 = _mm_srli_si128(_45, 6);
+ _6 = _67; _7 = _mm_srli_si128(_67, 6);
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
+ _13 = _mm_unpacklo_epi16(_1, _3),
+ _46 = _mm_unpacklo_epi16(_4, _6),
+ _57 = _mm_unpacklo_epi16(_5, _7);
+
+ auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ bx0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 xx xx xx xx
+ rg4567 = _mm_unpacklo_epi16(_46, _57),
+ bx4567 = _mm_unpackhi_epi16(_46, _57);
+
+ *r = _mm_unpacklo_epi64(rg0123, rg4567);
+ *g = _mm_unpackhi_epi64(rg0123, rg4567);
+ *b = _mm_unpacklo_epi64(bx0123, bx4567);
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ __m128i _01, _23, _45, _67;
+ if (SK_EXPECT(tail,0)) {
+ auto src = (const double*)ptr;
+ _01 = _23 = _45 = _67 = _mm_setzero_si128();
+ if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
+ if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
+ if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
+ if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
+ if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
+ if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
+ if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
+ _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
+ _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
+ _46 = _mm_unpacklo_epi16(_45, _67),
+ _57 = _mm_unpackhi_epi16(_45, _67);
+
+ auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
+ rg4567 = _mm_unpacklo_epi16(_46, _57),
+ ba4567 = _mm_unpackhi_epi16(_46, _57);
+
+ *r = _mm_unpacklo_epi64(rg0123, rg4567);
+ *g = _mm_unpackhi_epi64(rg0123, rg4567);
+ *b = _mm_unpacklo_epi64(ba0123, ba4567);
+ *a = _mm_unpackhi_epi64(ba0123, ba4567);
+ }
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
+ rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
+ ba0123 = _mm_unpacklo_epi16(b, a),
+ ba4567 = _mm_unpackhi_epi16(b, a);
+
+ auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
+ _23 = _mm_unpackhi_epi32(rg0123, ba0123),
+ _45 = _mm_unpacklo_epi32(rg4567, ba4567),
+ _67 = _mm_unpackhi_epi32(rg4567, ba4567);
+
+ if (SK_EXPECT(tail,0)) {
+ auto dst = (double*)ptr;
+ if (tail > 0) { _mm_storel_pd(dst+0, _01); }
+ if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
+ if (tail > 2) { _mm_storel_pd(dst+2, _23); }
+ if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
+ if (tail > 4) { _mm_storel_pd(dst+4, _45); }
+ if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
+ if (tail > 6) { _mm_storel_pd(dst+6, _67); }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _01);
+ _mm_storeu_si128((__m128i*)ptr + 1, _23);
+ _mm_storeu_si128((__m128i*)ptr + 2, _45);
+ _mm_storeu_si128((__m128i*)ptr + 3, _67);
+ }
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ F _0123, _4567;
+ if (SK_EXPECT(tail, 0)) {
+ _0123 = _4567 = _mm256_setzero_ps();
+ F* d = &_0123;
+ if (tail > 3) {
+ *d = _mm256_loadu_ps(ptr);
+ ptr += 8;
+ tail -= 4;
+ d = &_4567;
+ }
+ bool high = false;
+ if (tail > 1) {
+ *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ *d = high ? _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 1)
+ : _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 0);
+ }
+ } else {
+ _0123 = _mm256_loadu_ps(ptr + 0);
+ _4567 = _mm256_loadu_ps(ptr + 8);
+ }
+
+ F _0145 = _mm256_permute2f128_pd(_0123, _4567, 0x20),
+ _2367 = _mm256_permute2f128_pd(_0123, _4567, 0x31);
+
+ *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
+ *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ F _0145 = _mm256_unpacklo_ps(r, g),
+ _2367 = _mm256_unpackhi_ps(r, g);
+ F _0123 = _mm256_permute2f128_pd(_0145, _2367, 0x20),
+ _4567 = _mm256_permute2f128_pd(_0145, _2367, 0x31);
+
+ if (SK_EXPECT(tail, 0)) {
+ const __m256* s = &_0123;
+ if (tail > 3) {
+ _mm256_storeu_ps(ptr, *s);
+ s = &_4567;
+ tail -= 4;
+ ptr += 8;
+ }
+ bool high = false;
+ if (tail > 1) {
+ _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ *(ptr + 0) = (*s)[ high ? 4 : 0];
+ *(ptr + 1) = (*s)[ high ? 5 : 1];
+ }
+ } else {
+ _mm256_storeu_ps(ptr + 0, _0123);
+ _mm256_storeu_ps(ptr + 8, _4567);
+ }
+ }
+
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ F _04, _15, _26, _37;
+ _04 = _15 = _26 = _37 = 0;
+ switch (tail) {
+ case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1);
+ case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1);
+ case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1);
+ case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1);
+ case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0);
+ case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0);
+ case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0);
+ case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
+ }
+
+ F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
+ ba0145 = _mm256_unpackhi_ps(_04,_15),
+ rg2367 = _mm256_unpacklo_ps(_26,_37),
+ ba2367 = _mm256_unpackhi_ps(_26,_37);
+
+ *r = _mm256_unpacklo_pd(rg0145, rg2367);
+ *g = _mm256_unpackhi_pd(rg0145, rg2367);
+ *b = _mm256_unpacklo_pd(ba0145, ba2367);
+ *a = _mm256_unpackhi_pd(ba0145, ba2367);
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
+ rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
+ ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
+ ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
+
+ F _04 = _mm256_unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
+ _15 = _mm256_unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
+ _26 = _mm256_unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
+ _37 = _mm256_unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
+
+ if (SK_EXPECT(tail, 0)) {
+ if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
+ if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
+ if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
+ if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
+ if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
+ if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
+ if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
+ } else {
+ F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
+ _23 = _mm256_permute2f128_ps(_26, _37, 32),
+ _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
+ _67 = _mm256_permute2f128_ps(_26, _37, 49);
+ _mm256_storeu_ps(ptr+ 0, _01);
+ _mm256_storeu_ps(ptr+ 8, _23);
+ _mm256_storeu_ps(ptr+16, _45);
+ _mm256_storeu_ps(ptr+24, _67);
+ }
+ }
+
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
+ template <typename T> using V = SK_VECTORTYPE(T, 4);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ #ifndef __clang__
+ template <typename T, typename P> SI T unaligned_load_SkNx(const P* p) { return T::Load(p); }
+ template <typename T, typename P> SI void unaligned_store_SkNx(P* p, T v) { v.store(p); }
+ #define sk_unaligned_load unaligned_load_SkNx
+ #define sk_unaligned_store unaligned_store_SkNx
+ template <typename Dst> SI __m128i widen_cast(const U16& src) { static_assert(sizeof(Dst) == sizeof(__m128i), ""); return src.fVec; }
+ #endif
+
+ SI F mad(F f, F m, F a) {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ return _mm_fmadd_ps(f,m,a);
+ #else
+ return f*m+a;
+ #endif
+ }
+
+ SI F min(F a, F b) { return _mm_min_ps(a,b); }
+ SI F max(F a, F b) { return _mm_max_ps(a,b); }
+ SI F abs_(F v) { return _mm_and_ps(v, -v); }
+ SI F rcp (F v) { return _mm_rcp_ps (v); }
+ SI F rsqrt (F v) { return _mm_rsqrt_ps(v); }
+ SI F sqrt_(F v) { return _mm_sqrt_ps (v); }
+ SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
+
+ SI U16 pack(U32 v) {
+ #if defined(JUMPER_IS_SSE41)
+ auto p = _mm_packus_epi32(v,v);
+ #else
+ // Sign extend so that _mm_packs_epi32() does the pack we want.
+ auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
+ p = _mm_packs_epi32(p,p);
+ #endif
+ return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
+ }
+ SI U8 pack(U16 v) {
+ auto r = widen_cast<__m128i>(v);
+ r = _mm_packus_epi16(r,r);
+ return sk_unaligned_load<U8>(&r);
+ }
+
+ SI F if_then_else(I32 c, F t, F e) {
+ #if defined(JUMPER_IS_SSE41)
+ return _mm_blendv_ps(e, t, _mm_castsi128_ps(c));
+ #else
+ return _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), t), _mm_andnot_ps(_mm_castsi128_ps(c), e));
+ #endif
+ }
+
+ SI F if_then_else(F c, F t, F e) {
+ #if defined(JUMPER_IS_SSE41)
+ return _mm_blendv_ps(e, t, c);
+ #else
+ return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
+ #endif
+ }
+
+ SI F floor_(F v) {
+ #if defined(JUMPER_IS_SSE41)
+ return _mm_floor_ps(v);
+ #else
+ F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
+ return roundtrip - if_then_else(roundtrip > v, 1, 0);
+ #endif
+ }
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
+ }
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ SI F gather(const float* p, U32 ix) { return _mm_i32gather_ps (p, ix, 4); }
+ SI U32 gather(const uint32_t* p, U32 ix) { return _mm_i32gather_epi32((const int*)p, ix, 4); }
+ SI U64 gather(const uint64_t* p, U32 ix) {
+ __m128i parts[] = {
+ _mm_i32gather_epi64((const long long int*)p, ix, 8),
+ _mm_i32gather_epi64((const long long int*)p, _mm_unpackhi_epi64(ix, _mm_setzero_si128()), 8),
+ };
+ return bit_cast<U64>(parts);
+ }
+ #endif
+
+ // TODO: these loads and stores are incredibly difficult to follow.
+
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ __m128i _01;
+ if (SK_EXPECT(tail,0)) {
+ _01 = _mm_setzero_si128();
+ if (tail > 1) {
+ _01 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_01), (const double*)ptr)); // r0 g0 r1 g1 00 00 00 00
+ if (tail > 2) {
+ _01 = _mm_insert_epi16(_01, *(ptr+4), 4); // r0 g0 r1 g1 r2 00 00 00
+ _01 = _mm_insert_epi16(_01, *(ptr+5), 5); // r0 g0 r1 g1 r2 g2 00 00
+ }
+ } else {
+ _01 = _mm_cvtsi32_si128(*(const uint32_t*)ptr); // r0 g0 00 00 00 00 00 00
+ }
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3
+ }
+ auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3
+ auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
+
+ auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
+ auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
+ *r = sk_unaligned_load<U16>(&R);
+ *g = sk_unaligned_load<U16>(&G);
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
+ if (SK_EXPECT(tail, 0)) {
+ if (tail > 1) {
+ _mm_storel_epi64((__m128i*)ptr, rg);
+ if (tail > 2) {
+ int32_t rgpair = rg[2];
+ memcpy(ptr + 4, &rgpair, sizeof(rgpair));
+ }
+ } else {
+ int32_t rgpair = rg[0];
+ memcpy(ptr, &rgpair, sizeof(rgpair));
+ }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, rg);
+ }
+ }
+
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ __m128i _0, _1, _2, _3;
+ if (SK_EXPECT(tail,0)) {
+ _1 = _2 = _3 = _mm_setzero_si128();
+ auto load_rgb = [](const uint16_t* src) {
+ auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
+ return _mm_insert_epi16(v, src[2], 2);
+ };
+ if ( true ) { _0 = load_rgb(ptr + 0); }
+ if (tail > 1) { _1 = load_rgb(ptr + 3); }
+ if (tail > 2) { _2 = load_rgb(ptr + 6); }
+ } else {
+ // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
+ auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ,
+ _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
+
+ // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
+ _0 = _01;
+ _1 = _mm_srli_si128(_01, 6);
+ _2 = _23;
+ _3 = _mm_srli_si128(_23, 6);
+ }
+
+ // De-interlace to R,G,B.
+ auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
+ _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx
+
+ auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ G = _mm_srli_si128(R, 8),
+ B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx
+
+ *r = sk_unaligned_load<U16>(&R);
+ *g = sk_unaligned_load<U16>(&G);
+ *b = sk_unaligned_load<U16>(&B);
+ }
+
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ __m128i _01, _23;
+ if (SK_EXPECT(tail,0)) {
+ _01 = _23 = _mm_setzero_si128();
+ auto src = (const double*)ptr;
+ if ( true ) { _01 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_01), src + 0)); } // r0 g0 b0 a0 00 00 00 00
+ if (tail > 1) { _01 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(_01), src + 1)); } // r0 g0 b0 a0 r1 g1 b1 a1
+ if (tail > 2) { _23 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_23), src + 2)); } // r2 g2 b2 a2 00 00 00 00
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
+ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
+ _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
+
+ auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
+
+ *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
+ *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
+ *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
+ *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
+ }
+
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
+ ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
+
+ if (SK_EXPECT(tail, 0)) {
+ auto dst = (double*)ptr;
+ if ( true ) { _mm_storel_pd(dst + 0, _mm_castsi128_pd(_mm_unpacklo_epi32(rg, ba))); }
+ if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_castsi128_pd(_mm_unpacklo_epi32(rg, ba))); }
+ if (tail > 2) { _mm_storel_pd(dst + 2, _mm_castsi128_pd(_mm_unpackhi_epi32(rg, ba))); }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
+ _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
+ }
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ F _01, _23;
+ if (SK_EXPECT(tail, 0)) {
+ _01 = _23 = _mm_setzero_ps();
+ if ( true ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
+ if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
+ if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
+ } else {
+ _01 = _mm_loadu_ps(ptr + 0);
+ _23 = _mm_loadu_ps(ptr + 4);
+ }
+ *r = _mm_shuffle_ps(_01, _23, 0x88);
+ *g = _mm_shuffle_ps(_01, _23, 0xDD);
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ F _01 = _mm_unpacklo_ps(r, g),
+ _23 = _mm_unpackhi_ps(r, g);
+ if (SK_EXPECT(tail, 0)) {
+ if ( true ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
+ if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
+ if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
+ } else {
+ _mm_storeu_ps(ptr + 0, _01);
+ _mm_storeu_ps(ptr + 4, _23);
+ }
+ }
+
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ F _0, _1, _2, _3;
+ if (SK_EXPECT(tail, 0)) {
+ _1 = _2 = _3 = _mm_setzero_ps();
+ if ( true ) { _0 = _mm_loadu_ps(ptr + 0); }
+ if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
+ if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
+ } else {
+ _0 = _mm_loadu_ps(ptr + 0);
+ _1 = _mm_loadu_ps(ptr + 4);
+ _2 = _mm_loadu_ps(ptr + 8);
+ _3 = _mm_loadu_ps(ptr +12);
+ }
+ _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
+ *r = _0;
+ *g = _1;
+ *b = _2;
+ *a = _3;
+ }
+
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ _MM_TRANSPOSE4_PS(r,g,b,a);
+ if (SK_EXPECT(tail, 0)) {
+ if ( true ) { _mm_storeu_ps(ptr + 0, r); }
+ if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
+ if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
+ } else {
+ _mm_storeu_ps(ptr + 0, r);
+ _mm_storeu_ps(ptr + 4, g);
+ _mm_storeu_ps(ptr + 8, b);
+ _mm_storeu_ps(ptr +12, a);
+ }
+ }
+#endif
+
+// We need to be a careful with casts.
+// (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
+// These named casts and bit_cast() are always what they seem to be.
+SI F cast (U32 v) { return SK_CONVERTVECTOR((I32)v, F); }
+SI F cast64(U64 v) { return SK_CONVERTVECTOR( v, F); }
+SI U32 trunc_(F v) { return (U32)SK_CONVERTVECTOR( v, I32); }
+SI U32 expand(U16 v) { return SK_CONVERTVECTOR( v, U32); }
+SI U32 expand(U8 v) { return SK_CONVERTVECTOR( v, U32); }
+
+#if defined(__clang__) || defined(JUMPER_IS_SCALAR)
+template <typename V>
+SI V if_then_else(I32 c, V t, V e) {
+ return bit_cast<V>(if_then_else(c, bit_cast<F>(t), bit_cast<F>(e)));
+}
+#else
+template <typename T, typename V>
+SI V if_then_else(T c, V t, V e) {
+ return bit_cast<V>(if_then_else(c, bit_cast<F>(t), bit_cast<F>(e)));
+}
+#endif
+
+SI U16 bswap(U16 x) {
+#if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
+ // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
+ // when generating code for SSE2 and SSE4.1. We'll do it manually...
+ auto v = widen_cast<__m128i>(x);
+ v = _mm_or_si128(_mm_slli_epi16(v,8), _mm_srli_epi16(v,8));
+ return sk_unaligned_load<U16>(&v);
+#else
+ return (x<<8) | (x>>8);
+#endif
+}
+
+SI F fract(F v) { return v - floor_(v); }
+
+// See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
+SI F approx_log2(F x) {
+ // e - 127 is a fair approximation of log2(x) in its own right...
+ F e = cast(bit_cast<U32>(x)) * (1.0f / (1<<23));
+
+ // ... but using the mantissa to refine its error is _much_ better.
+ F m = bit_cast<F>((bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
+ return e
+ - 124.225514990f
+ - 1.498030302f * m
+ - 1.725879990f / (0.3520887068f + m);
+}
+
+SI F approx_log(F x) {
+ const float ln2 = 0.69314718f;
+ return ln2 * approx_log2(x);
+}
+
+SI F approx_pow2(F x) {
+ F f = fract(x);
+ return bit_cast<F>(round(1.0f * (1<<23),
+ x + 121.274057500f
+ - 1.490129070f * f
+ + 27.728023300f / (4.84252568f - f)));
+}
+
+SI F approx_exp(F x) {
+ const float log2_e = 1.4426950408889634074f;
+ return approx_pow2(log2_e * x);
+}
+
+SI F approx_powf(F x, F y) {
+#if defined(SK_LEGACY_APPROX_POWF_SPECIALCASE)
+ return if_then_else((x == 0) , 0
+#else
+ return if_then_else((x == 0)|(x == 1), x
+#endif
+ , approx_pow2(approx_log2(x) * y));
+}
+
+SI F from_half(U16 h) {
+#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
+ && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
+ return vcvt_f32_f16(h);
+
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ return _mm256_cvtph_ps(h);
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ return _mm_cvtph_ps(h);
+#else
+ // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
+ U32 sem = expand(h),
+ s = sem & 0x8000,
+ em = sem ^ s;
+
+ // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
+ auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
+ return if_then_else(denorm, F(0)
+ , bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
+#endif
+}
+
+SI U16 to_half(F f) {
+#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
+ && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
+ return vcvt_f16_f32(f);
+
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ return _mm_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
+#else
+ // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
+ U32 sem = bit_cast<U32>(f),
+ s = sem & 0x80000000,
+ em = sem ^ s;
+
+ // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
+ auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
+ return pack(if_then_else(denorm, U32(0)
+ , (s>>16) + (em>>13) - ((127-15)<<10)));
+#endif
+}
+
+// Our fundamental vector depth is our pixel stride.
+static const size_t N = sizeof(F) / sizeof(float);
+
+// We're finally going to get to what a Stage function looks like!
+// tail == 0 ~~> work on a full N pixels
+// tail != 0 ~~> work on only the first tail pixels
+// tail is always < N.
+
+// Any custom ABI to use for all (non-externally-facing) stage functions?
+// Also decide here whether to use narrow (compromise) or wide (ideal) stages.
+#if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
+ // This lets us pass vectors more efficiently on 32-bit ARM.
+ // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
+ #define ABI __attribute__((pcs("aapcs-vfp")))
+ #define JUMPER_NARROW_STAGES 1
+#elif 0 && defined(_MSC_VER) && defined(__clang__) && defined(__x86_64__)
+ // SysV ABI makes it very sensible to use wide stages with clang-cl.
+ // TODO: crashes during compilation :(
+ #define ABI __attribute__((sysv_abi))
+ #define JUMPER_NARROW_STAGES 0
+#elif defined(_MSC_VER)
+ // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
+ // instead of {b,a} on the stack. Narrow stages work best for __vectorcall.
+ #define ABI __vectorcall
+ #define JUMPER_NARROW_STAGES 1
+#elif defined(__x86_64__) || defined(SK_CPU_ARM64)
+ // These platforms are ideal for wider stages, and their default ABI is ideal.
+ #define ABI
+ #define JUMPER_NARROW_STAGES 0
+#else
+ // 32-bit or unknown... shunt them down the narrow path.
+ // Odds are these have few registers and are better off there.
+ #define ABI
+ #define JUMPER_NARROW_STAGES 1
+#endif
+
+#if JUMPER_NARROW_STAGES
+ struct Params {
+ size_t dx, dy, tail;
+ F dr,dg,db,da;
+ };
+ using Stage = void(ABI*)(Params*, void** program, F r, F g, F b, F a);
+#else
+ // We keep program the second argument, so that it's passed in rsi for load_and_inc().
+ using Stage = void(ABI*)(size_t tail, void** program, size_t dx, size_t dy, F,F,F,F, F,F,F,F);
+#endif
+
+
+static void start_pipeline(size_t dx, size_t dy, size_t xlimit, size_t ylimit, void** program) {
+ auto start = (Stage)load_and_inc(program);
+ const size_t x0 = dx;
+ for (; dy < ylimit; dy++) {
+ #if JUMPER_NARROW_STAGES
+ Params params = { x0,dy,0, 0,0,0,0 };
+ while (params.dx + N <= xlimit) {
+ start(&params,program, 0,0,0,0);
+ params.dx += N;
+ }
+ if (size_t tail = xlimit - params.dx) {
+ params.tail = tail;
+ start(&params,program, 0,0,0,0);
+ }
+ #else
+ dx = x0;
+ while (dx + N <= xlimit) {
+ start(0,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ dx += N;
+ }
+ if (size_t tail = xlimit - dx) {
+ start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ #endif
+ }
+}
+
+#if JUMPER_NARROW_STAGES
+ #define STAGE(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
+ static void ABI name(Params* params, void** program, \
+ F r, F g, F b, F a) { \
+ name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a, \
+ params->dr, params->dg, params->db, params->da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(params,program, r,g,b,a); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
+#else
+ #define STAGE(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
+ static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
+ F r, F g, F b, F a, F dr, F dg, F db, F da) { \
+ name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
+#endif
+
+
+// just_return() is a simple no-op stage that only exists to end the chain,
+// returning back up to start_pipeline(), and from there to the caller.
+#if JUMPER_NARROW_STAGES
+ static void ABI just_return(Params*, void**, F,F,F,F) {}
+#else
+ static void ABI just_return(size_t, void**, size_t,size_t, F,F,F,F, F,F,F,F) {}
+#endif
+
+
+// We could start defining normal Stages now. But first, some helper functions.
+
+// These load() and store() methods are tail-aware,
+// but focus mainly on keeping the at-stride tail==0 case fast.
+
+template <typename V, typename T>
+SI V load(const T* src, size_t tail) {
+#if !defined(JUMPER_IS_SCALAR)
+ SK_ASSUME(tail < N);
+ if (SK_EXPECT(tail, 0)) {
+ #ifdef __clang__
+ V v{}; // Any inactive lanes are zeroed.
+ switch (tail) {
+ case 7: v[6] = src[6];
+ case 6: v[5] = src[5];
+ case 5: v[4] = src[4];
+ case 4: memcpy(&v, src, 4*sizeof(T)); break;
+ case 3: v[2] = src[2];
+ case 2: memcpy(&v, src, 2*sizeof(T)); break;
+ case 1: memcpy(&v, src, 1*sizeof(T)); break;
+ }
+ #else
+ V v(0);
+ memcpy(&v, src, tail * sizeof(T));
+ #endif
+ return v;
+ }
+#endif
+ return sk_unaligned_load<V>(src);
+}
+
+template <typename V, typename T>
+SI void store(T* dst, V v, size_t tail) {
+#if !defined(JUMPER_IS_SCALAR)
+ SK_ASSUME(tail < N);
+ if (SK_EXPECT(tail, 0)) {
+ #ifdef __clang__
+ switch (tail) {
+ case 7: dst[6] = v[6];
+ case 6: dst[5] = v[5];
+ case 5: dst[4] = v[4];
+ case 4: memcpy(dst, &v, 4*sizeof(T)); break;
+ case 3: dst[2] = v[2];
+ case 2: memcpy(dst, &v, 2*sizeof(T)); break;
+ case 1: memcpy(dst, &v, 1*sizeof(T)); break;
+ }
+ #else
+ memcpy(dst, &v, tail * sizeof(T));
+ #endif
+ return;
+ }
+#endif
+ sk_unaligned_store(dst, v);
+}
+
+SI F from_byte(U8 b) {
+ return cast(expand(b)) * (1/255.0f);
+}
+SI F from_short(U16 s) {
+ return cast(expand(s)) * (1/65535.0f);
+}
+SI void from_565(U16 _565, F* r, F* g, F* b) {
+ U32 wide = expand(_565);
+ *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
+ *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
+ *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
+}
+SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
+ U32 wide = expand(_4444);
+ *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
+ *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
+ *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
+ *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
+}
+SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
+ *r = cast((_8888 ) & 0xff) * (1/255.0f);
+ *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
+ *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
+ *a = cast((_8888 >> 24) ) * (1/255.0f);
+}
+SI void from_88(U16 _88, F* r, F* g) {
+ U32 wide = expand(_88);
+ *r = cast((wide ) & 0xff) * (1/255.0f);
+ *g = cast((wide >> 8) & 0xff) * (1/255.0f);
+}
+SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
+ *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
+ *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
+ *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
+ *a = cast((rgba >> 30) ) * (1/ 3.0f);
+}
+SI void from_1616(U32 _1616, F* r, F* g) {
+ *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
+ *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
+}
+SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
+ *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
+ *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
+ *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
+ *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
+}
+
+// Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
+template <typename T>
+SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
+ return (T*)ctx->pixels + dy*ctx->stride + dx;
+}
+
+// clamp v to [0,limit).
+SI F clamp(F v, F limit) {
+ F inclusive = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
+ return min(max(0, v), inclusive);
+}
+
+// Used by gather_ stages to calculate the base pointer and a vector of indices to load.
+template <typename T>
+SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
+ x = clamp(x, ctx->width);
+ y = clamp(y, ctx->height);
+
+ *ptr = (const T*)ctx->pixels;
+ return trunc_(y)*ctx->stride + trunc_(x);
+}
+
+// We often have a nominally [0,1] float value we need to scale and convert to an integer,
+// whether for a table lookup or to pack back down into bytes for storage.
+//
+// In practice, especially when dealing with interesting color spaces, that notionally
+// [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp.
+//
+// You can adjust the expected input to [0,bias] by tweaking that parameter.
+SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
+ // TODO: platform-specific implementations to to_unorm(), removing round() entirely?
+ // Any time we use round() we probably want to use to_unorm().
+ return round(min(max(0, v), bias), scale);
+}
+
+#if !defined(__clang__) && !defined(JUMPER_IS_SCALAR)
+SI I32 cond_to_mask(F cond) { return _mm_castps_si128(cond); }
+#else
+SI I32 cond_to_mask(I32 cond) { return if_then_else(cond, I32(~0), I32(0)); }
+#endif
+
+// Now finally, normal Stages!
+
+STAGE(seed_shader, Ctx::None) {
+ static const float iota[] = {
+ 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
+ 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
+ };
+ // It's important for speed to explicitly cast(dx) and cast(dy),
+ // which has the effect of splatting them to vectors before converting to floats.
+ // On Intel this breaks a data dependency on previous loop iterations' registers.
+ r = cast(dx) + sk_unaligned_load<F>(iota);
+ g = cast(dy) + 0.5f;
+ b = 1.0f;
+ a = 0;
+ dr = dg = db = da = 0;
+}
+
+STAGE(dither, const float* rate) {
+ // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
+ uint32_t iota[] = {0,1,2,3,4,5,6,7};
+ U32 X = sk_unaligned_load<U32>(iota) + dx,
+ Y = dy;
+
+ // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
+ // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
+
+ // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
+ Y ^= X;
+
+ // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
+ // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
+ U32 M = (Y & 1) << 5 | (X & 1) << 4
+ | (Y & 2) << 2 | (X & 2) << 1
+ | (Y & 4) >> 1 | (X & 4) >> 2;
+
+ // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
+ // We want to make sure our dither is less than 0.5 in either direction to keep exact values
+ // like 0 and 1 unchanged after rounding.
+ F dither = cast(M) * (2/128.0f) - (63/128.0f);
+
+ r += *rate*dither;
+ g += *rate*dither;
+ b += *rate*dither;
+
+ r = max(0, min(r, a));
+ g = max(0, min(g, a));
+ b = max(0, min(b, a));
+}
+
+// load 4 floats from memory, and splat them into r,g,b,a
+STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->r;
+ g = c->g;
+ b = c->b;
+ a = c->a;
+}
+STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->r;
+ g = c->g;
+ b = c->b;
+ a = c->a;
+}
+// load 4 floats from memory, and splat them into dr,dg,db,da
+STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
+ dr = c->r;
+ dg = c->g;
+ db = c->b;
+ da = c->a;
+}
+
+// splats opaque-black into r,g,b,a
+STAGE(black_color, Ctx::None) {
+ r = g = b = 0.0f;
+ a = 1.0f;
+}
+
+STAGE(white_color, Ctx::None) {
+ r = g = b = a = 1.0f;
+}
+
+// load registers r,g,b,a from context (mirrors store_rgba)
+STAGE(load_src, const float* ptr) {
+ r = sk_unaligned_load<F>(ptr + 0*N);
+ g = sk_unaligned_load<F>(ptr + 1*N);
+ b = sk_unaligned_load<F>(ptr + 2*N);
+ a = sk_unaligned_load<F>(ptr + 3*N);
+}
+
+// store registers r,g,b,a into context (mirrors load_rgba)
+STAGE(store_src, float* ptr) {
+ sk_unaligned_store(ptr + 0*N, r);
+ sk_unaligned_store(ptr + 1*N, g);
+ sk_unaligned_store(ptr + 2*N, b);
+ sk_unaligned_store(ptr + 3*N, a);
+}
+
+// load registers dr,dg,db,da from context (mirrors store_dst)
+STAGE(load_dst, const float* ptr) {
+ dr = sk_unaligned_load<F>(ptr + 0*N);
+ dg = sk_unaligned_load<F>(ptr + 1*N);
+ db = sk_unaligned_load<F>(ptr + 2*N);
+ da = sk_unaligned_load<F>(ptr + 3*N);
+}
+
+// store registers dr,dg,db,da into context (mirrors load_dst)
+STAGE(store_dst, float* ptr) {
+ sk_unaligned_store(ptr + 0*N, dr);
+ sk_unaligned_store(ptr + 1*N, dg);
+ sk_unaligned_store(ptr + 2*N, db);
+ sk_unaligned_store(ptr + 3*N, da);
+}
+
+// Most blend modes apply the same logic to each channel.
+#define BLEND_MODE(name) \
+ SI F name##_channel(F s, F d, F sa, F da); \
+ STAGE(name, Ctx::None) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = name##_channel(a,da,a,da); \
+ } \
+ SI F name##_channel(F s, F d, F sa, F da)
+
+SI F inv(F x) { return 1.0f - x; }
+SI F two(F x) { return x + x; }
+
+
+BLEND_MODE(clear) { return 0; }
+BLEND_MODE(srcatop) { return s*da + d*inv(sa); }
+BLEND_MODE(dstatop) { return d*sa + s*inv(da); }
+BLEND_MODE(srcin) { return s * da; }
+BLEND_MODE(dstin) { return d * sa; }
+BLEND_MODE(srcout) { return s * inv(da); }
+BLEND_MODE(dstout) { return d * inv(sa); }
+BLEND_MODE(srcover) { return mad(d, inv(sa), s); }
+BLEND_MODE(dstover) { return mad(s, inv(da), d); }
+
+BLEND_MODE(modulate) { return s*d; }
+BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
+BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa.
+BLEND_MODE(screen) { return s + d - s*d; }
+BLEND_MODE(xor_) { return s*inv(da) + d*inv(sa); }
+#undef BLEND_MODE
+
+// Most other blend modes apply the same logic to colors, and srcover to alpha.
+#define BLEND_MODE(name) \
+ SI F name##_channel(F s, F d, F sa, F da); \
+ STAGE(name, Ctx::None) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = mad(da, inv(a), a); \
+ } \
+ SI F name##_channel(F s, F d, F sa, F da)
+
+BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; }
+BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; }
+BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
+BLEND_MODE(exclusion) { return s + d - two(s*d); }
+
+BLEND_MODE(colorburn) {
+ return if_then_else(d == da, d + s*inv(da),
+ if_then_else(s == 0, /* s + */ d*inv(sa),
+ sa*(da - min(da, (da-d)*sa*rcp(s))) + s*inv(da) + d*inv(sa)));
+}
+BLEND_MODE(colordodge) {
+ return if_then_else(d == 0, /* d + */ s*inv(da),
+ if_then_else(s == sa, s + d*inv(sa),
+ sa*min(da, (d*sa)*rcp(sa - s)) + s*inv(da) + d*inv(sa)));
+}
+BLEND_MODE(hardlight) {
+ return s*inv(da) + d*inv(sa)
+ + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
+}
+BLEND_MODE(overlay) {
+ return s*inv(da) + d*inv(sa)
+ + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
+}
+
+BLEND_MODE(softlight) {
+ F m = if_then_else(da > 0, d / da, 0),
+ s2 = two(s),
+ m4 = two(two(m));
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+ F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
+ darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
+ liteDst = rcp(rsqrt(m)) - m, // Used in case 3.
+ liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
+ return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
+}
+#undef BLEND_MODE
+
+// We're basing our implemenation of non-separable blend modes on
+// https://www.w3.org/TR/compositing-1/#blendingnonseparable.
+// and
+// https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
+// They're equivalent, but ES' math has been better simplified.
+//
+// Anything extra we add beyond that is to make the math work with premul inputs.
+
+SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); }
+SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
+
+SI void set_sat(F* r, F* g, F* b, F s) {
+ F mn = min(*r, min(*g,*b)),
+ mx = max(*r, max(*g,*b)),
+ sat = mx - mn;
+
+ // Map min channel to 0, max channel to s, and scale the middle proportionally.
+ auto scale = [=](F c) {
+ return if_then_else(sat == 0, 0, (c - mn) * s / sat);
+ };
+ *r = scale(*r);
+ *g = scale(*g);
+ *b = scale(*b);
+}
+SI void set_lum(F* r, F* g, F* b, F l) {
+ F diff = l - lum(*r, *g, *b);
+ *r += diff;
+ *g += diff;
+ *b += diff;
+}
+SI void clip_color(F* r, F* g, F* b, F a) {
+ F mn = min(*r, min(*g, *b)),
+ mx = max(*r, max(*g, *b)),
+ l = lum(*r, *g, *b);
+
+ auto clip = [=](F c) {
+ c = if_then_else(mn >= 0, c, l + (c - l) * ( l) / (l - mn) );
+ c = if_then_else(mx > a, l + (c - l) * (a - l) / (mx - l), c);
+ c = max(c, 0); // Sometimes without this we may dip just a little negative.
+ return c;
+ };
+ *r = clip(*r);
+ *g = clip(*g);
+ *b = clip(*b);
+}
+
+STAGE(hue, Ctx::None) {
+ F R = r*a,
+ G = g*a,
+ B = b*a;
+
+ set_sat(&R, &G, &B, sat(dr,dg,db)*a);
+ set_lum(&R, &G, &B, lum(dr,dg,db)*a);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(saturation, Ctx::None) {
+ F R = dr*a,
+ G = dg*a,
+ B = db*a;
+
+ set_sat(&R, &G, &B, sat( r, g, b)*da);
+ set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(color, Ctx::None) {
+ F R = r*da,
+ G = g*da,
+ B = b*da;
+
+ set_lum(&R, &G, &B, lum(dr,dg,db)*a);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(luminosity, Ctx::None) {
+ F R = dr*a,
+ G = dg*a,
+ B = db*a;
+
+ set_lum(&R, &G, &B, lum(r,g,b)*da);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+
+STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 dst = load<U32>(ptr, tail);
+ dr = cast((dst ) & 0xff);
+ dg = cast((dst >> 8) & 0xff);
+ db = cast((dst >> 16) & 0xff);
+ da = cast((dst >> 24) );
+ // {dr,dg,db,da} are in [0,255]
+ // { r, g, b, a} are in [0, 1] (but may be out of gamut)
+
+ r = mad(dr, inv(a), r*255.0f);
+ g = mad(dg, inv(a), g*255.0f);
+ b = mad(db, inv(a), b*255.0f);
+ a = mad(da, inv(a), a*255.0f);
+ // { r, g, b, a} are now in [0,255] (but may be out of gamut)
+
+ // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
+ dst = to_unorm(r, 1, 255)
+ | to_unorm(g, 1, 255) << 8
+ | to_unorm(b, 1, 255) << 16
+ | to_unorm(a, 1, 255) << 24;
+ store(ptr, dst, tail);
+}
+
+STAGE(clamp_0, Ctx::None) {
+ r = max(r, 0);
+ g = max(g, 0);
+ b = max(b, 0);
+ a = max(a, 0);
+}
+
+STAGE(clamp_1, Ctx::None) {
+ r = min(r, 1.0f);
+ g = min(g, 1.0f);
+ b = min(b, 1.0f);
+ a = min(a, 1.0f);
+}
+
+STAGE(clamp_a, Ctx::None) {
+ a = min(a, 1.0f);
+ r = min(r, a);
+ g = min(g, a);
+ b = min(b, a);
+}
+
+STAGE(clamp_gamut, Ctx::None) {
+ // If you're using this stage, a should already be in [0,1].
+ r = min(max(r, 0), a);
+ g = min(max(g, 0), a);
+ b = min(max(b, 0), a);
+}
+
+STAGE(set_rgb, const float* rgb) {
+ r = rgb[0];
+ g = rgb[1];
+ b = rgb[2];
+}
+STAGE(unbounded_set_rgb, const float* rgb) {
+ r = rgb[0];
+ g = rgb[1];
+ b = rgb[2];
+}
+
+STAGE(swap_rb, Ctx::None) {
+ auto tmp = r;
+ r = b;
+ b = tmp;
+}
+STAGE(swap_rb_dst, Ctx::None) {
+ auto tmp = dr;
+ dr = db;
+ db = tmp;
+}
+
+STAGE(move_src_dst, Ctx::None) {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+}
+STAGE(move_dst_src, Ctx::None) {
+ r = dr;
+ g = dg;
+ b = db;
+ a = da;
+}
+
+STAGE(premul, Ctx::None) {
+ r = r * a;
+ g = g * a;
+ b = b * a;
+}
+STAGE(premul_dst, Ctx::None) {
+ dr = dr * da;
+ dg = dg * da;
+ db = db * da;
+}
+STAGE(unpremul, Ctx::None) {
+ float inf = bit_cast<float>(0x7f800000);
+ auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
+ r *= scale;
+ g *= scale;
+ b *= scale;
+}
+
+STAGE(force_opaque , Ctx::None) { a = 1; }
+STAGE(force_opaque_dst, Ctx::None) { da = 1; }
+
+STAGE(rgb_to_hsl, Ctx::None) {
+ F mx = max(r, max(g,b)),
+ mn = min(r, min(g,b)),
+ d = mx - mn,
+ d_rcp = 1.0f / d;
+
+ F h = (1/6.0f) *
+ if_then_else(mx == mn, 0,
+ if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0),
+ if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
+ (r-g)*d_rcp + 4.0f)));
+
+ F l = (mx + mn) * 0.5f;
+ F s = if_then_else(mx == mn, 0,
+ d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
+
+ r = h;
+ g = s;
+ b = l;
+}
+STAGE(hsl_to_rgb, Ctx::None) {
+ F h = r,
+ s = g,
+ l = b;
+
+ F q = l + if_then_else(l >= 0.5f, s - l*s, l*s),
+ p = 2.0f*l - q;
+
+ auto hue_to_rgb = [&](F t) {
+ t = fract(t);
+
+ F r = p;
+ r = if_then_else(t >= 4/6.0f, r, p + (q-p)*(4.0f - 6.0f*t));
+ r = if_then_else(t >= 3/6.0f, r, q);
+ r = if_then_else(t >= 1/6.0f, r, p + (q-p)*( 6.0f*t));
+ return r;
+ };
+
+ r = if_then_else(s == 0, l, hue_to_rgb(h + (1/3.0f)));
+ g = if_then_else(s == 0, l, hue_to_rgb(h ));
+ b = if_then_else(s == 0, l, hue_to_rgb(h - (1/3.0f)));
+}
+
+// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
+SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
+ return if_then_else(a < da, min(cr, min(cg,cb))
+ , max(cr, max(cg,cb)));
+}
+
+STAGE(scale_1_float, const float* c) {
+ r = r * *c;
+ g = g * *c;
+ b = b * *c;
+ a = a * *c;
+}
+STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ auto scales = load<U8>(ptr, tail);
+ auto c = from_byte(scales);
+
+ r = r * c;
+ g = g * c;
+ b = b * c;
+ a = a * c;
+}
+STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ F cr,cg,cb;
+ from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
+
+ F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = r * cr;
+ g = g * cg;
+ b = b * cb;
+ a = a * ca;
+}
+
+SI F lerp(F from, F to, F t) {
+ return mad(to-from, t, from);
+}
+
+STAGE(lerp_1_float, const float* c) {
+ r = lerp(dr, r, *c);
+ g = lerp(dg, g, *c);
+ b = lerp(db, b, *c);
+ a = lerp(da, a, *c);
+}
+STAGE(lerp_native, const float scales[]) {
+ auto c = sk_unaligned_load<F>(scales);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ auto scales = load<U8>(ptr, tail);
+ auto c = from_byte(scales);
+
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ F cr,cg,cb;
+ from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
+
+ F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = lerp(dr, r, cr);
+ g = lerp(dg, g, cg);
+ b = lerp(db, b, cb);
+ a = lerp(da, a, ca);
+}
+
+STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
+ auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
+ aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
+
+ F mul = from_byte(load<U8>(mptr, tail)),
+ add = from_byte(load<U8>(aptr, tail));
+
+ r = mad(r, mul, add);
+ g = mad(g, mul, add);
+ b = mad(b, mul, add);
+}
+
+STAGE(byte_tables, const void* ctx) { // TODO: rename Tables SkRasterPipeline_ByteTablesCtx
+ struct Tables { const uint8_t *r, *g, *b, *a; };
+ auto tables = (const Tables*)ctx;
+
+ r = from_byte(gather(tables->r, to_unorm(r, 255)));
+ g = from_byte(gather(tables->g, to_unorm(g, 255)));
+ b = from_byte(gather(tables->b, to_unorm(b, 255)));
+ a = from_byte(gather(tables->a, to_unorm(a, 255)));
+}
+
+SI F strip_sign(F x, U32* sign) {
+ U32 bits = bit_cast<U32>(x);
+ *sign = bits & 0x80000000;
+ return bit_cast<F>(bits ^ *sign);
+}
+
+SI F apply_sign(F x, U32 sign) {
+ return bit_cast<F>(sign | bit_cast<U32>(x));
+}
+
+STAGE(parametric, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
+ , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(gamma_, const float* G) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+ return apply_sign(approx_powf(v, *G), sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(PQish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0)
+ / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
+ ctx->f);
+
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(HLGish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ const float R = ctx->a, G = ctx->b,
+ a = ctx->c, b = ctx->d, c = ctx->e;
+
+ F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
+ , approx_exp((v-c)*a) + b);
+
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(HLGinvish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ const float R = ctx->a, G = ctx->b,
+ a = ctx->c, b = ctx->d, c = ctx->e;
+
+ F r = if_then_else(v <= 1, R * approx_powf(v, G)
+ , a * approx_log(v - b) + c);
+
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(from_srgb, Ctx::None) {
+ auto fn = [](F s) {
+ U32 sign;
+ s = strip_sign(s, &sign);
+ auto lo = s * (1/12.92f);
+ auto hi = mad(s*s, mad(s, 0.3000f, 0.6975f), 0.0025f);
+ return apply_sign(if_then_else(s < 0.055f, lo, hi), sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+STAGE(to_srgb, Ctx::None) {
+ auto fn = [](F l) {
+ U32 sign;
+ l = strip_sign(l, &sign);
+ // We tweak c and d for each instruction set to make sure fn(1) is exactly 1.
+ #if defined(JUMPER_IS_AVX512)
+ const float c = 1.130026340485f,
+ d = 0.141387879848f;
+ #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || \
+ defined(JUMPER_IS_AVX ) || defined(JUMPER_IS_HSW )
+ const float c = 1.130048394203f,
+ d = 0.141357362270f;
+ #elif defined(JUMPER_IS_NEON)
+ const float c = 1.129999995232f,
+ d = 0.141381442547f;
+ #else
+ const float c = 1.129999995232f,
+ d = 0.141377761960f;
+ #endif
+ F t = rsqrt(l);
+ auto lo = l * 12.92f;
+ auto hi = mad(t, mad(t, -0.0024542345f, 0.013832027f), c)
+ * rcp(d + t);
+ return apply_sign(if_then_else(l < 0.00465985f, lo, hi), sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ r = g = b = 0.0f;
+ a = from_byte(load<U8>(ptr, tail));
+}
+STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ dr = dg = db = 0.0f;
+ da = from_byte(load<U8>(ptr, tail));
+}
+STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint8_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ r = g = b = 0.0f;
+ a = from_byte(gather(ptr, ix));
+}
+STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
+
+ U8 packed = pack(pack(to_unorm(a, 255)));
+ store(ptr, packed, tail);
+}
+
+STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ from_565(load<U16>(ptr, tail), &r,&g,&b);
+ a = 1.0f;
+}
+STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ from_565(load<U16>(ptr, tail), &dr,&dg,&db);
+ da = 1.0f;
+}
+STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_565(gather(ptr, ix), &r,&g,&b);
+ a = 1.0f;
+}
+STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+
+ U16 px = pack( to_unorm(r, 31) << 11
+ | to_unorm(g, 63) << 5
+ | to_unorm(b, 31) );
+ store(ptr, px, tail);
+}
+
+STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_4444(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+ U16 px = pack( to_unorm(r, 15) << 12
+ | to_unorm(g, 15) << 8
+ | to_unorm(b, 15) << 4
+ | to_unorm(a, 15) );
+ store(ptr, px, tail);
+}
+
+STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_8888(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 255)
+ | to_unorm(g, 255) << 8
+ | to_unorm(b, 255) << 16
+ | to_unorm(a, 255) << 24;
+ store(ptr, px, tail);
+}
+
+STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ from_88(load<U16>(ptr, tail), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ from_88(load<U16>(ptr, tail), &dr, &dg);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_88(gather(ptr, ix), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
+ U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
+ store(ptr, px, tail);
+}
+
+STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ r = g = b = 0;
+ a = from_short(load<U16>(ptr, tail));
+}
+STAGE(load_a16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ dr = dg = db = 0.0f;
+ da = from_short(load<U16>(ptr, tail));
+}
+STAGE(gather_a16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ r = g = b = 0.0f;
+ a = from_short(gather(ptr, ix));
+}
+STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+
+ U16 px = pack(to_unorm(a, 65535));
+ store(ptr, px, tail);
+}
+
+STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+ b = 0; a = 1;
+ from_1616(load<U32>(ptr, tail), &r,&g);
+}
+STAGE(load_rg1616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+ from_1616(load<U32>(ptr, tail), &dr, &dg);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rg1616, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_1616(gather(ptr, ix), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 65535)
+ | to_unorm(g, 65535) << 16;
+ store(ptr, px, tail);
+}
+
+STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
+ from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
+}
+STAGE(load_16161616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
+ from_16161616(load<U64>(ptr, tail), &dr, &dg, &db, &da);
+}
+STAGE(gather_16161616, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint64_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_16161616(gather(ptr, ix), &r, &g, &b, &a);
+}
+STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
+
+ U16 R = pack(to_unorm(r, 65535)),
+ G = pack(to_unorm(g, 65535)),
+ B = pack(to_unorm(b, 65535)),
+ A = pack(to_unorm(a, 65535));
+
+ store4(ptr,tail, R,G,B,A);
+}
+
+
+STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_1010102(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 1023)
+ | to_unorm(g, 1023) << 10
+ | to_unorm(b, 1023) << 20
+ | to_unorm(a, 3) << 30;
+ store(ptr, px, tail);
+}
+
+STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
+ r = from_half(R);
+ g = from_half(G);
+ b = from_half(B);
+ a = from_half(A);
+}
+STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
+ dr = from_half(R);
+ dg = from_half(G);
+ db = from_half(B);
+ da = from_half(A);
+}
+STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint64_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ auto px = gather(ptr, ix);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)&px,0, &R,&G,&B,&A);
+ r = from_half(R);
+ g = from_half(G);
+ b = from_half(B);
+ a = from_half(A);
+}
+STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
+ store4((uint16_t*)ptr,tail, to_half(r)
+ , to_half(g)
+ , to_half(b)
+ , to_half(a));
+}
+
+STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
+
+ U16 R = bswap(pack(to_unorm(r, 65535))),
+ G = bswap(pack(to_unorm(g, 65535))),
+ B = bswap(pack(to_unorm(b, 65535))),
+ A = bswap(pack(to_unorm(a, 65535)));
+
+ store4(ptr,tail, R,G,B,A);
+}
+
+STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ U16 A = load<U16>((const uint16_t*)ptr, tail);
+ r = 0;
+ g = 0;
+ b = 0;
+ a = from_half(A);
+}
+STAGE(load_af16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+
+ U16 A = load<U16>((const uint16_t*)ptr, tail);
+ dr = dg = db = 0.0f;
+ da = from_half(A);
+}
+STAGE(gather_af16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ r = g = b = 0.0f;
+ a = from_half(gather(ptr, ix));
+}
+STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+ store(ptr, to_half(a), tail);
+}
+
+STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+
+ U16 R,G;
+ load2((const uint16_t*)ptr, tail, &R, &G);
+ r = from_half(R);
+ g = from_half(G);
+ b = 0;
+ a = 1;
+}
+STAGE(load_rgf16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+
+ U16 R,G;
+ load2((const uint16_t*)ptr, tail, &R, &G);
+ dr = from_half(R);
+ dg = from_half(G);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rgf16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ auto px = gather(ptr, ix);
+
+ U16 R,G;
+ load2((const uint16_t*)&px, 0, &R, &G);
+ r = from_half(R);
+ g = from_half(G);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
+ store2((uint16_t*)ptr, tail, to_half(r)
+ , to_half(g));
+}
+
+STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
+ load4(ptr,tail, &r,&g,&b,&a);
+}
+STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
+ load4(ptr,tail, &dr,&dg,&db,&da);
+}
+STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
+ const float* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ r = gather(ptr, (ix<<2) + 0);
+ g = gather(ptr, (ix<<2) + 1);
+ b = gather(ptr, (ix<<2) + 2);
+ a = gather(ptr, (ix<<2) + 3);
+}
+STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
+ store4(ptr,tail, r,g,b,a);
+}
+
+STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
+ load2(ptr, tail, &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
+ store2(ptr, tail, r, g);
+}
+
+SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
+ return v - floor_(v*ctx->invScale)*ctx->scale;
+}
+SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
+ auto limit = ctx->scale;
+ auto invLimit = ctx->invScale;
+ return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
+}
+// Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
+// The gather stages will hard clamp the output of these stages to [0,limit)...
+// we just need to do the basic repeat or mirroring.
+STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
+STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
+STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
+STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
+
+// Clamp x to [0,1], both sides inclusive (think, gradients).
+// Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
+SI F clamp_01(F v) { return min(max(0, v), 1); }
+
+STAGE( clamp_x_1, Ctx::None) { r = clamp_01(r); }
+STAGE(repeat_x_1, Ctx::None) { r = clamp_01(r - floor_(r)); }
+STAGE(mirror_x_1, Ctx::None) { r = clamp_01(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
+
+// Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
+// mask == 0x00000000 if the coordinate(s) are out of bounds
+// mask == 0xFFFFFFFF if the coordinate(s) are in bounds
+// After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
+// if either of the coordinates were out of bounds.
+
+STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ sk_unaligned_store(ctx->mask, cond_to_mask((0 <= r) & (r < w)));
+}
+STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask, cond_to_mask((0 <= g) & (g < h)));
+}
+STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask,
+ cond_to_mask((0 <= r) & (r < w) & (0 <= g) & (g < h)));
+}
+STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto mask = sk_unaligned_load<U32>(ctx->mask);
+ r = bit_cast<F>( bit_cast<U32>(r) & mask );
+ g = bit_cast<F>( bit_cast<U32>(g) & mask );
+ b = bit_cast<F>( bit_cast<U32>(b) & mask );
+ a = bit_cast<F>( bit_cast<U32>(a) & mask );
+}
+
+STAGE(alpha_to_gray, Ctx::None) {
+ r = g = b = a;
+ a = 1;
+}
+STAGE(alpha_to_gray_dst, Ctx::None) {
+ dr = dg = db = da;
+ da = 1;
+}
+STAGE(bt709_luminance_or_luma_to_alpha, Ctx::None) {
+ a = r*0.2126f + g*0.7152f + b*0.0722f;
+ r = g = b = 0;
+}
+
+STAGE(matrix_translate, const float* m) {
+ r += m[0];
+ g += m[1];
+}
+STAGE(matrix_scale_translate, const float* m) {
+ r = mad(r,m[0], m[2]);
+ g = mad(g,m[1], m[3]);
+}
+STAGE(matrix_2x3, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[2], m[4])),
+ G = mad(r,m[1], mad(g,m[3], m[5]));
+ r = R;
+ g = G;
+}
+STAGE(matrix_3x3, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
+ G = mad(r,m[1], mad(g,m[4], b*m[7])),
+ B = mad(r,m[2], mad(g,m[5], b*m[8]));
+ r = R;
+ g = G;
+ b = B;
+}
+STAGE(matrix_3x4, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
+ G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
+ B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
+ r = R;
+ g = G;
+ b = B;
+}
+STAGE(matrix_4x5, const float* m) {
+ auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
+ G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
+ B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
+ A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
+ r = R;
+ g = G;
+ b = B;
+ a = A;
+}
+STAGE(matrix_4x3, const float* m) {
+ auto X = r,
+ Y = g;
+
+ r = mad(X, m[0], mad(Y, m[4], m[ 8]));
+ g = mad(X, m[1], mad(Y, m[5], m[ 9]));
+ b = mad(X, m[2], mad(Y, m[6], m[10]));
+ a = mad(X, m[3], mad(Y, m[7], m[11]));
+}
+STAGE(matrix_perspective, const float* m) {
+ // N.B. Unlike the other matrix_ stages, this matrix is row-major.
+ auto R = mad(r,m[0], mad(g,m[1], m[2])),
+ G = mad(r,m[3], mad(g,m[4], m[5])),
+ Z = mad(r,m[6], mad(g,m[7], m[8]));
+ r = R * rcp(Z);
+ g = G * rcp(Z);
+}
+
+SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
+ F* r, F* g, F* b, F* a) {
+ F fr, br, fg, bg, fb, bb, fa, ba;
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ if (c->stopCount <=8) {
+ fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
+ br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
+ fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
+ bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
+ fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
+ bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
+ fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
+ ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
+ } else
+#elif defined(JUMPER_IS_AVX)
+ if (c->stopCount <= 4) {
+ auto permute = [](const float* ptr, U32 idx) {
+ __m128 v = _mm_loadu_ps(ptr);
+ __m256 vv = _mm256_insertf128_ps(_mm256_castps128_ps256(v), v, 1);
+ return _mm256_permutevar_ps(vv, idx);
+ };
+ fr = permute(c->fs[0], idx);
+ br = permute(c->bs[0], idx);
+ fg = permute(c->fs[1], idx);
+ bg = permute(c->bs[1], idx);
+ fb = permute(c->fs[2], idx);
+ bb = permute(c->bs[2], idx);
+ fa = permute(c->fs[3], idx);
+ ba = permute(c->bs[3], idx);
+ } else
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ if (c->stopCount <= 4) {
+ fr = _mm_permutevar_ps(_mm_loadu_ps(c->fs[0]), idx);
+ br = _mm_permutevar_ps(_mm_loadu_ps(c->bs[0]), idx);
+ fg = _mm_permutevar_ps(_mm_loadu_ps(c->fs[1]), idx);
+ bg = _mm_permutevar_ps(_mm_loadu_ps(c->bs[1]), idx);
+ fb = _mm_permutevar_ps(_mm_loadu_ps(c->fs[2]), idx);
+ bb = _mm_permutevar_ps(_mm_loadu_ps(c->bs[2]), idx);
+ fa = _mm_permutevar_ps(_mm_loadu_ps(c->fs[3]), idx);
+ ba = _mm_permutevar_ps(_mm_loadu_ps(c->bs[3]), idx);
+ } else
+#endif
+ {
+ fr = gather(c->fs[0], idx);
+ br = gather(c->bs[0], idx);
+ fg = gather(c->fs[1], idx);
+ bg = gather(c->bs[1], idx);
+ fb = gather(c->fs[2], idx);
+ bb = gather(c->bs[2], idx);
+ fa = gather(c->fs[3], idx);
+ ba = gather(c->bs[3], idx);
+ }
+
+ *r = mad(t, fr, br);
+ *g = mad(t, fg, bg);
+ *b = mad(t, fb, bb);
+ *a = mad(t, fa, ba);
+}
+
+STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = r;
+ auto idx = trunc_(t * (c->stopCount-1));
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = r;
+ U32 idx = 0;
+
+ // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
+ for (size_t i = 1; i < c->stopCount; i++) {
+#ifdef JUMPER_IS_SCALAR
+ if (t >= c->ts[i]) {
+ idx++;
+ } else {
+ break;
+ }
+#else
+ idx += bit_cast<U32>(t >= c->ts[i]);
+#endif
+ }
+
+#ifndef JUMPER_IS_SCALAR
+ idx = U32(0) - idx;
+#endif
+
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE(evenly_spaced_2_stop_gradient, const void* ctx) {
+ // TODO: Rename Ctx SkRasterPipeline_EvenlySpaced2StopGradientCtx.
+ struct Ctx { float f[4], b[4]; };
+ auto c = (const Ctx*)ctx;
+
+ auto t = r;
+ r = mad(t, c->f[0], c->b[0]);
+ g = mad(t, c->f[1], c->b[1]);
+ b = mad(t, c->f[2], c->b[2]);
+ a = mad(t, c->f[3], c->b[3]);
+}
+
+#ifdef JUMPER_IS_SCALAR
+SI bool isnan_(float v) { return sk_float_isnan(v); }
+#else
+template <typename T> SI auto isnan_(T v) -> decltype(v != v) { return v != v; }
+#endif
+
+STAGE(xy_to_unit_angle, Ctx::None) {
+ F X = r,
+ Y = g;
+ F xabs = abs_(X),
+ yabs = abs_(Y);
+
+ F slope = min(xabs, yabs)/max(xabs, yabs);
+ F s = slope * slope;
+
+ // Use a 7th degree polynomial to approximate atan.
+ // This was generated using sollya.gforge.inria.fr.
+ // A float optimized polynomial was generated using the following command.
+ // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
+ F phi = slope
+ * (0.15912117063999176025390625f + s
+ * (-5.185396969318389892578125e-2f + s
+ * (2.476101927459239959716796875e-2f + s
+ * (-7.0547382347285747528076171875e-3f))));
+
+ phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
+ phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
+ phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
+ phi = if_then_else(isnan_(phi), 0 , phi); // Check for NaN.
+ r = phi;
+}
+
+STAGE(xy_to_radius, Ctx::None) {
+ F X2 = r * r,
+ Y2 = g * g;
+ r = sqrt_(X2 + Y2);
+}
+
+// Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
+
+STAGE(negate_x, Ctx::None) { r = -r; }
+
+STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
+}
+
+STAGE(xy_to_2pt_conical_focal_on_circle, Ctx::None) {
+ F x = r, y = g, &t = r;
+ t = x + y*y / x; // (x^2 + y^2) / x
+}
+
+STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F& t = r;
+ t = t + ctx->fP1; // ctx->fP1 = f
+}
+
+STAGE(alter_2pt_conical_unswap, Ctx::None) {
+ F& t = r;
+ t = 1 - t;
+}
+
+STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
+ F& t = r;
+ auto is_degenerate = isnan_(t); // NaN
+ t = if_then_else(is_degenerate, F(0), t);
+ sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
+}
+
+STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
+ F& t = r;
+ auto is_degenerate = (t <= 0) | isnan_(t);
+ t = if_then_else(is_degenerate, F(0), t);
+ sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
+}
+
+STAGE(apply_vector_mask, const uint32_t* ctx) {
+ const U32 mask = sk_unaligned_load<U32>(ctx);
+ r = bit_cast<F>(bit_cast<U32>(r) & mask);
+ g = bit_cast<F>(bit_cast<U32>(g) & mask);
+ b = bit_cast<F>(bit_cast<U32>(b) & mask);
+ a = bit_cast<F>(bit_cast<U32>(a) & mask);
+}
+
+STAGE(save_xy, SkRasterPipeline_SamplerCtx* c) {
+ // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
+ // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
+ // surrounding (x,y) at (0.5,0.5) off-center.
+ F fx = fract(r + 0.5f),
+ fy = fract(g + 0.5f);
+
+ // Samplers will need to load x and fx, or y and fy.
+ sk_unaligned_store(c->x, r);
+ sk_unaligned_store(c->y, g);
+ sk_unaligned_store(c->fx, fx);
+ sk_unaligned_store(c->fy, fy);
+}
+
+STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
+ // Bilinear and bicubic filters are both separable, so we produce independent contributions
+ // from x and y, multiplying them together here to get each pixel's total scale factor.
+ auto scale = sk_unaligned_load<F>(c->scalex)
+ * sk_unaligned_load<F>(c->scaley);
+ dr = mad(scale, r, dr);
+ dg = mad(scale, g, dg);
+ db = mad(scale, b, db);
+ da = mad(scale, a, da);
+}
+
+// In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
+// are combined in direct proportion to their area overlapping that logical query pixel.
+// At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
+// The y-axis is symmetric.
+
+template <int kScale>
+SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
+ *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
+ F fx = sk_unaligned_load<F>(ctx->fx);
+
+ F scalex;
+ if (kScale == -1) { scalex = 1.0f - fx; }
+ if (kScale == +1) { scalex = fx; }
+ sk_unaligned_store(ctx->scalex, scalex);
+}
+template <int kScale>
+SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
+ *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
+ F fy = sk_unaligned_load<F>(ctx->fy);
+
+ F scaley;
+ if (kScale == -1) { scaley = 1.0f - fy; }
+ if (kScale == +1) { scaley = fy; }
+ sk_unaligned_store(ctx->scaley, scaley);
+}
+
+STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
+STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
+STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
+STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
+
+
+// In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
+// pixel center are combined with a non-uniform cubic filter, with higher values near the center.
+//
+// We break this function into two parts, one for near 0.5 offsets and one for far 1.5 offsets.
+// See GrCubicEffect for details of this particular filter.
+
+SI F bicubic_near(F t) {
+ // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
+ return mad(t, mad(t, mad((-21/18.0f), t, (27/18.0f)), (9/18.0f)), (1/18.0f));
+}
+SI F bicubic_far(F t) {
+ // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
+ return (t*t)*mad((7/18.0f), t, (-6/18.0f));
+}
+
+template <int kScale>
+SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
+ *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
+ F fx = sk_unaligned_load<F>(ctx->fx);
+
+ F scalex;
+ if (kScale == -3) { scalex = bicubic_far (1.0f - fx); }
+ if (kScale == -1) { scalex = bicubic_near(1.0f - fx); }
+ if (kScale == +1) { scalex = bicubic_near( fx); }
+ if (kScale == +3) { scalex = bicubic_far ( fx); }
+ sk_unaligned_store(ctx->scalex, scalex);
+}
+template <int kScale>
+SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
+ *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
+ F fy = sk_unaligned_load<F>(ctx->fy);
+
+ F scaley;
+ if (kScale == -3) { scaley = bicubic_far (1.0f - fy); }
+ if (kScale == -1) { scaley = bicubic_near(1.0f - fy); }
+ if (kScale == +1) { scaley = bicubic_near( fy); }
+ if (kScale == +3) { scaley = bicubic_far ( fy); }
+ sk_unaligned_store(ctx->scaley, scaley);
+}
+
+STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
+STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
+STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
+STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
+
+STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
+STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
+STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
+STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
+
+STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
+ store4(c->rgba,0, r,g,b,a);
+ c->fn(c, tail ? tail : N);
+ load4(c->read_from,0, &r,&g,&b,&a);
+}
+
+// shader: void main(float x, float y, inout half4 color)
+// colorfilter: void main(inout half4 color)
+STAGE(interpreter, SkRasterPipeline_InterpreterCtx* c) {
+ // If N is less than the interpreter's VecWidth, then we are doing more work than necessary in
+ // the interpreter. This is a known issue, and will be addressed at some point.
+ float xx[N], yy[N],
+ rr[N], gg[N], bb[N], aa[N];
+
+ float* args[] = { xx, yy, rr, gg, bb, aa };
+ float** in_args = args;
+ int in_count = 6;
+
+ if (c->shaderConvention) {
+ // our caller must have called seed_shader to set these
+ sk_unaligned_store(xx, r);
+ sk_unaligned_store(yy, g);
+ sk_unaligned_store(rr, F(c->paintColor.fR));
+ sk_unaligned_store(gg, F(c->paintColor.fG));
+ sk_unaligned_store(bb, F(c->paintColor.fB));
+ sk_unaligned_store(aa, F(c->paintColor.fA));
+ } else {
+ in_args += 2; // skip x,y
+ in_count = 4;
+ sk_unaligned_store(rr, r);
+ sk_unaligned_store(gg, g);
+ sk_unaligned_store(bb, b);
+ sk_unaligned_store(aa, a);
+ }
+
+ SkAssertResult(c->byteCode->runStriped(c->fn, tail ? tail : N, in_args, in_count,
+ nullptr, 0, (const float*)c->inputs, c->ninputs));
+
+ r = sk_unaligned_load<F>(rr);
+ g = sk_unaligned_load<F>(gg);
+ b = sk_unaligned_load<F>(bb);
+ a = sk_unaligned_load<F>(aa);
+}
+
+STAGE(gauss_a_to_rgba, Ctx::None) {
+ // x = 1 - x;
+ // exp(-x * x * 4) - 0.018f;
+ // ... now approximate with quartic
+ //
+ const float c4 = -2.26661229133605957031f;
+ const float c3 = 2.89795351028442382812f;
+ const float c2 = 0.21345567703247070312f;
+ const float c1 = 0.15489584207534790039f;
+ const float c0 = 0.00030726194381713867f;
+ a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
+ r = a;
+ g = a;
+ b = a;
+}
+
+SI F tile(F v, SkTileMode mode, float limit, float invLimit) {
+ // The ix_and_ptr() calls in sample() will clamp tile()'s output, so no need to clamp here.
+ switch (mode) {
+ case SkTileMode::kDecal: // TODO, for now fallthrough to clamp
+ case SkTileMode::kClamp: return v;
+ case SkTileMode::kRepeat: return v - floor_(v*invLimit)*limit;
+ case SkTileMode::kMirror:
+ return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
+ }
+ SkUNREACHABLE;
+}
+
+SI void sample(const SkRasterPipeline_SamplerCtx2* ctx, F x, F y,
+ F* r, F* g, F* b, F* a) {
+ x = tile(x, ctx->tileX, ctx->width , ctx->invWidth );
+ y = tile(y, ctx->tileY, ctx->height, ctx->invHeight);
+
+ switch (ctx->ct) {
+ default: *r = *g = *b = *a = 0; // TODO
+ break;
+
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_8888(gather(ptr, ix), r,g,b,a);
+ if (ctx->ct == kBGRA_8888_SkColorType) {
+ std::swap(*r,*b);
+ }
+ } break;
+ }
+}
+
+template <int D>
+SI void sampler(const SkRasterPipeline_SamplerCtx2* ctx,
+ F cx, F cy, const F (&wx)[D], const F (&wy)[D],
+ F* r, F* g, F* b, F* a) {
+
+ float start = -0.5f*(D-1);
+
+ *r = *g = *b = *a = 0;
+ F y = cy + start;
+ for (int j = 0; j < D; j++, y += 1.0f) {
+ F x = cx + start;
+ for (int i = 0; i < D; i++, x += 1.0f) {
+ F R,G,B,A;
+ sample(ctx, x,y, &R,&G,&B,&A);
+
+ F w = wx[i] * wy[j];
+ *r = mad(w,R,*r);
+ *g = mad(w,G,*g);
+ *b = mad(w,B,*b);
+ *a = mad(w,A,*a);
+ }
+ }
+}
+
+STAGE(bilinear, const SkRasterPipeline_SamplerCtx2* ctx) {
+ F x = r, fx = fract(x + 0.5f),
+ y = g, fy = fract(y + 0.5f);
+ const F wx[] = {1.0f - fx, fx};
+ const F wy[] = {1.0f - fy, fy};
+
+ sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
+}
+STAGE(bicubic, SkRasterPipeline_SamplerCtx2* ctx) {
+ F x = r, fx = fract(x + 0.5f),
+ y = g, fy = fract(y + 0.5f);
+ const F wx[] = { bicubic_far(1-fx), bicubic_near(1-fx), bicubic_near(fx), bicubic_far(fx) };
+ const F wy[] = { bicubic_far(1-fy), bicubic_near(1-fy), bicubic_near(fy), bicubic_far(fy) };
+
+ sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
+}
+
+// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
+STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // (cx,cy) are the center of our sample.
+ F cx = r,
+ cy = g;
+
+ // All sample points are at the same fractional offset (fx,fy).
+ // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
+ F fx = fract(cx + 0.5f),
+ fy = fract(cy + 0.5f);
+
+ // We'll accumulate the color of all four samples into {r,g,b,a} directly.
+ r = g = b = a = 0;
+
+ for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
+ for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
+ // (x,y) are the coordinates of this sample point.
+ F x = cx + dx,
+ y = cy + dy;
+
+ // ix_and_ptr() will clamp to the image's bounds for us.
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+
+ F sr,sg,sb,sa;
+ from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
+
+ // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
+ // are combined in direct proportion to their area overlapping that logical query pixel.
+ // At positive offsets, the x-axis contribution to that rectangle is fx,
+ // or (1-fx) at negative x. Same deal for y.
+ F sx = (dx > 0) ? fx : 1.0f - fx,
+ sy = (dy > 0) ? fy : 1.0f - fy,
+ area = sx * sy;
+
+ r += sr * area;
+ g += sg * area;
+ b += sb * area;
+ a += sa * area;
+ }
+}
+
+// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
+STAGE(bicubic_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // (cx,cy) are the center of our sample.
+ F cx = r,
+ cy = g;
+
+ // All sample points are at the same fractional offset (fx,fy).
+ // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
+ F fx = fract(cx + 0.5f),
+ fy = fract(cy + 0.5f);
+
+ // We'll accumulate the color of all four samples into {r,g,b,a} directly.
+ r = g = b = a = 0;
+
+ const F scaley[4] = {
+ bicubic_far (1.0f - fy), bicubic_near(1.0f - fy),
+ bicubic_near( fy), bicubic_far ( fy),
+ };
+ const F scalex[4] = {
+ bicubic_far (1.0f - fx), bicubic_near(1.0f - fx),
+ bicubic_near( fx), bicubic_far ( fx),
+ };
+
+ F sample_y = cy - 1.5f;
+ for (int yy = 0; yy <= 3; ++yy) {
+ F sample_x = cx - 1.5f;
+ for (int xx = 0; xx <= 3; ++xx) {
+ F scale = scalex[xx] * scaley[yy];
+
+ // ix_and_ptr() will clamp to the image's bounds for us.
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
+
+ F sr,sg,sb,sa;
+ from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
+
+ r = mad(scale, sr, r);
+ g = mad(scale, sg, g);
+ b = mad(scale, sb, b);
+ a = mad(scale, sa, a);
+
+ sample_x += 1.0f;
+ }
+ sample_y += 1.0f;
+ }
+}
+
+// ~~~~~~ GrSwizzle stage ~~~~~~ //
+
+STAGE(swizzle, void* ctx) {
+ auto ir = r, ig = g, ib = b, ia = a;
+ F* o[] = {&r, &g, &b, &a};
+ char swiz[4];
+ memcpy(swiz, &ctx, sizeof(swiz));
+
+ for (int i = 0; i < 4; ++i) {
+ switch (swiz[i]) {
+ case 'r': *o[i] = ir; break;
+ case 'g': *o[i] = ig; break;
+ case 'b': *o[i] = ib; break;
+ case 'a': *o[i] = ia; break;
+ case '0': *o[i] = F(0); break;
+ case '1': *o[i] = F(1); break;
+ default: break;
+ }
+ }
+}
+
+namespace lowp {
+#if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
+ // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
+ // we don't generate lowp stages. All these nullptrs will tell SkJumper.cpp to always use the
+ // highp float pipeline.
+ #define M(st) static void (*st)(void) = nullptr;
+ SK_RASTER_PIPELINE_STAGES(M)
+ #undef M
+ static void (*just_return)(void) = nullptr;
+
+ static void start_pipeline(size_t,size_t,size_t,size_t, void**) {}
+
+#else // We are compiling vector code with Clang... let's make some lowp stages!
+
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ template <typename T> using V = SK_VECTORTYPE(T, 16);
+#else
+ template <typename T> using V = SK_VECTORTYPE(T, 8);
+#endif
+ using U8 = V<uint8_t >;
+ using U16 = V<uint16_t>;
+ using I16 = V< int16_t>;
+ using I32 = V< int32_t>;
+ using U32 = V<uint32_t>;
+ using F = V<float >;
+
+static const size_t N = sizeof(U16) / sizeof(uint16_t);
+
+// Once again, some platforms benefit from a restricted Stage calling convention,
+// but others can pass tons and tons of registers and we're happy to exploit that.
+// It's exactly the same decision and implementation strategy as the F stages above.
+#if JUMPER_NARROW_STAGES
+ struct Params {
+ size_t dx, dy, tail;
+ U16 dr,dg,db,da;
+ };
+ using Stage = void(ABI*)(Params*, void** program, U16 r, U16 g, U16 b, U16 a);
+#else
+ // We pass program as the second argument so that load_and_inc() will find it in %rsi on x86-64.
+ using Stage = void (ABI*)(size_t tail, void** program, size_t dx, size_t dy,
+ U16 r, U16 g, U16 b, U16 a,
+ U16 dr, U16 dg, U16 db, U16 da);
+#endif
+
+static void start_pipeline(const size_t x0, const size_t y0,
+ const size_t xlimit, const size_t ylimit, void** program) {
+ auto start = (Stage)load_and_inc(program);
+ for (size_t dy = y0; dy < ylimit; dy++) {
+ #if JUMPER_NARROW_STAGES
+ Params params = { x0,dy,0, 0,0,0,0 };
+ for (; params.dx + N <= xlimit; params.dx += N) {
+ start(&params,program, 0,0,0,0);
+ }
+ if (size_t tail = xlimit - params.dx) {
+ params.tail = tail;
+ start(&params,program, 0,0,0,0);
+ }
+ #else
+ size_t dx = x0;
+ for (; dx + N <= xlimit; dx += N) {
+ start( 0,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ if (size_t tail = xlimit - dx) {
+ start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ #endif
+ }
+}
+
+#if JUMPER_NARROW_STAGES
+ static void ABI just_return(Params*, void**, U16,U16,U16,U16) {}
+#else
+ static void ABI just_return(size_t,void**,size_t,size_t, U16,U16,U16,U16, U16,U16,U16,U16) {}
+#endif
+
+// All stages use the same function call ABI to chain into each other, but there are three types:
+// GG: geometry in, geometry out -- think, a matrix
+// GP: geometry in, pixels out. -- think, a memory gather
+// PP: pixels in, pixels out. -- think, a blend mode
+//
+// (Some stages ignore their inputs or produce no logical output. That's perfectly fine.)
+//
+// These three STAGE_ macros let you define each type of stage,
+// and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
+
+#if JUMPER_NARROW_STAGES
+ #define STAGE_GG(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
+ static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y); \
+ split(x, &r,&g); \
+ split(y, &b,&a); \
+ auto next = (Stage)load_and_inc(program); \
+ next(params,program, r,g,b,a); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
+
+ #define STAGE_GP(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a, \
+ params->dr,params->dg,params->db,params->da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(params,program, r,g,b,a); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+
+ #define STAGE_PP(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a, \
+ params->dr,params->dg,params->db,params->da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(params,program, r,g,b,a); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+#else
+ #define STAGE_GG(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
+ static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, dx,dy,tail, x,y); \
+ split(x, &r,&g); \
+ split(y, &b,&a); \
+ auto next = (Stage)load_and_inc(program); \
+ next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
+
+ #define STAGE_GP(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+
+ #define STAGE_PP(name, ...) \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da); \
+ auto next = (Stage)load_and_inc(program); \
+ next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+#endif
+
+// ~~~~~~ Commonly used helper functions ~~~~~~ //
+
+SI U16 div255(U16 v) {
+#if 0
+ return (v+127)/255; // The ideal rounding divide by 255.
+#elif 1 && defined(JUMPER_IS_NEON)
+ // With NEON we can compute (v+127)/255 as (v + ((v+128)>>8) + 128)>>8
+ // just as fast as we can do the approximation below, so might as well be correct!
+ // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up.
+ return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
+#else
+ return (v+255) >> 8; // A good approximation of (v+127)/255.
+#endif
+}
+
+SI U16 inv(U16 v) { return 255-v; }
+
+#if defined(__clang__)
+SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & c) | (e & ~c); }
+SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & c) | (e & ~c); }
+SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
+SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
+#else
+template<typename T, typename R, typename V> SI R if_then_else(T c, V t, R e) { return bit_cast<R>(c).thenElse(t, e); }
+template<typename T, typename V> SI T max(V x, T y) { return T::Max(x, y); }
+template<typename T, typename V> SI T min(T x, V y) { return T::Min(x, y); }
+#endif
+
+SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
+
+SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
+
+template <typename D, typename S>
+SI D cast(S src) {
+ return SK_CONVERTVECTOR(src, D);
+}
+
+template <typename D, typename S>
+SI void split(S v, D* lo, D* hi) {
+ static_assert(2*sizeof(D) == sizeof(S), "");
+ memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
+ memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
+}
+template <typename D, typename S>
+SI D join(S lo, S hi) {
+ static_assert(sizeof(D) == 2*sizeof(S), "");
+ D v;
+ memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
+ memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
+ return v;
+}
+
+#if defined(__clang__)
+SI F if_then_else(I32 c, F t, F e) {
+ return bit_cast<F>( (bit_cast<I32>(t) & c) | (bit_cast<I32>(e) & ~c) );
+}
+SI F max(F x, F y) { return if_then_else(x < y, y, x); }
+SI F min(F x, F y) { return if_then_else(x < y, x, y); }
+
+SI U32 trunc_(F x) { return (U32)cast<I32>(x); }
+#else
+SI U32 trunc_(F x) { return SkNx_cast<uint32_t>(SkNx_cast<int32_t>(x)); }
+#endif
+
+SI F mad(F f, F m, F a) { return f*m+a; }
+
+SI F rcp(F x) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm256_rcp_ps(lo), _mm256_rcp_ps(hi));
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm_rcp_ps(lo), _mm_rcp_ps(hi));
+#elif defined(JUMPER_IS_NEON)
+ auto rcp = [](float32x4_t v) {
+ auto est = vrecpeq_f32(v);
+ return vrecpsq_f32(v,est)*est;
+ };
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(rcp(lo), rcp(hi));
+#else
+ return 1.0f / x;
+#endif
+}
+SI F sqrt_(F x) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
+#elif defined(SK_CPU_ARM64)
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
+#elif defined(JUMPER_IS_NEON)
+ auto sqrt = [](float32x4_t v) {
+ auto est = vrsqrteq_f32(v); // Estimate and two refinement steps for est = rsqrt(v).
+ est *= vrsqrtsq_f32(v,est*est);
+ est *= vrsqrtsq_f32(v,est*est);
+ return v*est; // sqrt(v) == v*rsqrt(v).
+ };
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(sqrt(lo), sqrt(hi));
+#else
+ return F{
+ sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
+ sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
+ };
+#endif
+}
+
+SI F floor_(F x) {
+#if defined(SK_CPU_ARM64)
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
+#elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
+#else
+ F roundtrip = cast<F>(cast<I32>(x));
+ return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
+#endif
+}
+SI F fract(F x) { return x - floor_(x); }
+SI F abs_(F x) { return bit_cast<F>( bit_cast<I32>(x) & 0x7fffffff ); }
+
+// ~~~~~~ Basic / misc. stages ~~~~~~ //
+
+STAGE_GG(seed_shader, Ctx::None) {
+ static const float iota[] = {
+ 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
+ 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
+ };
+ x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
+ y = cast<F>(I32(dy)) + 0.5f;
+}
+
+STAGE_GG(matrix_translate, const float* m) {
+ x += m[0];
+ y += m[1];
+}
+STAGE_GG(matrix_scale_translate, const float* m) {
+ x = mad(x,m[0], m[2]);
+ y = mad(y,m[1], m[3]);
+}
+STAGE_GG(matrix_2x3, const float* m) {
+ auto X = mad(x,m[0], mad(y,m[2], m[4])),
+ Y = mad(x,m[1], mad(y,m[3], m[5]));
+ x = X;
+ y = Y;
+}
+STAGE_GG(matrix_perspective, const float* m) {
+ // N.B. Unlike the other matrix_ stages, this matrix is row-major.
+ auto X = mad(x,m[0], mad(y,m[1], m[2])),
+ Y = mad(x,m[3], mad(y,m[4], m[5])),
+ Z = mad(x,m[6], mad(y,m[7], m[8]));
+ x = X * rcp(Z);
+ y = Y * rcp(Z);
+}
+
+STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->rgba[0];
+ g = c->rgba[1];
+ b = c->rgba[2];
+ a = c->rgba[3];
+}
+STAGE_PP(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
+ dr = c->rgba[0];
+ dg = c->rgba[1];
+ db = c->rgba[2];
+ da = c->rgba[3];
+}
+STAGE_PP(black_color, Ctx::None) { r = g = b = 0; a = 255; }
+STAGE_PP(white_color, Ctx::None) { r = g = b = 255; a = 255; }
+
+STAGE_PP(set_rgb, const float rgb[3]) {
+ r = from_float(rgb[0]);
+ g = from_float(rgb[1]);
+ b = from_float(rgb[2]);
+}
+
+STAGE_PP(clamp_0, Ctx::None) { /*definitely a noop*/ }
+STAGE_PP(clamp_1, Ctx::None) { /*_should_ be a noop*/ }
+
+STAGE_PP(clamp_a, Ctx::None) {
+ r = min(r, a);
+ g = min(g, a);
+ b = min(b, a);
+}
+
+STAGE_PP(clamp_gamut, Ctx::None) {
+ // It shouldn't be possible to get out-of-gamut
+ // colors when working in lowp.
+}
+
+STAGE_PP(premul, Ctx::None) {
+ r = div255(r * a);
+ g = div255(g * a);
+ b = div255(b * a);
+}
+STAGE_PP(premul_dst, Ctx::None) {
+ dr = div255(dr * da);
+ dg = div255(dg * da);
+ db = div255(db * da);
+}
+
+STAGE_PP(force_opaque , Ctx::None) { a = 255; }
+STAGE_PP(force_opaque_dst, Ctx::None) { da = 255; }
+
+STAGE_PP(swap_rb, Ctx::None) {
+ auto tmp = r;
+ r = b;
+ b = tmp;
+}
+STAGE_PP(swap_rb_dst, Ctx::None) {
+ auto tmp = dr;
+ dr = db;
+ db = tmp;
+}
+
+STAGE_PP(move_src_dst, Ctx::None) {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+}
+
+STAGE_PP(move_dst_src, Ctx::None) {
+ r = dr;
+ g = dg;
+ b = db;
+ a = da;
+}
+
+// ~~~~~~ Blend modes ~~~~~~ //
+
+// The same logic applied to all 4 channels.
+#define BLEND_MODE(name) \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
+ STAGE_PP(name, Ctx::None) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = name##_channel(a,da,a,da); \
+ } \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
+
+ BLEND_MODE(clear) { return 0; }
+ BLEND_MODE(srcatop) { return div255( s*da + d*inv(sa) ); }
+ BLEND_MODE(dstatop) { return div255( d*sa + s*inv(da) ); }
+ BLEND_MODE(srcin) { return div255( s*da ); }
+ BLEND_MODE(dstin) { return div255( d*sa ); }
+ BLEND_MODE(srcout) { return div255( s*inv(da) ); }
+ BLEND_MODE(dstout) { return div255( d*inv(sa) ); }
+ BLEND_MODE(srcover) { return s + div255( d*inv(sa) ); }
+ BLEND_MODE(dstover) { return d + div255( s*inv(da) ); }
+ BLEND_MODE(modulate) { return div255( s*d ); }
+ BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
+ BLEND_MODE(plus_) { return min(s+d, 255); }
+ BLEND_MODE(screen) { return s + d - div255( s*d ); }
+ BLEND_MODE(xor_) { return div255( s*inv(da) + d*inv(sa) ); }
+#undef BLEND_MODE
+
+// The same logic applied to color, and srcover for alpha.
+#define BLEND_MODE(name) \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
+ STAGE_PP(name, Ctx::None) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = a + div255( da*inv(a) ); \
+ } \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
+
+ BLEND_MODE(darken) { return s + d - div255( max(s*da, d*sa) ); }
+ BLEND_MODE(lighten) { return s + d - div255( min(s*da, d*sa) ); }
+ BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
+ BLEND_MODE(exclusion) { return s + d - 2*div255( s*d ); }
+
+ BLEND_MODE(hardlight) {
+ return div255( s*inv(da) + d*inv(sa) +
+ if_then_else(sa < 2*s, sa*da - 2*(sa-s)*(da-d), 2*s*d) );
+ }
+ BLEND_MODE(overlay) {
+ return div255( s*inv(da) + d*inv(sa) +
+ if_then_else(da < 2*d, sa*da - 2*(sa-s)*(da-d), 2*s*d) );
+ }
+#undef BLEND_MODE
+
+// ~~~~~~ Helpers for interacting with memory ~~~~~~ //
+
+template <typename T>
+SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
+ return (T*)ctx->pixels + dy*ctx->stride + dx;
+}
+
+template <typename T>
+SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
+ auto clamp = [](F v, F limit) {
+ limit = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
+ return min(max(0, v), limit);
+ };
+ x = clamp(x, ctx->width);
+ y = clamp(y, ctx->height);
+
+ *ptr = (const T*)ctx->pixels;
+ return trunc_(y)*ctx->stride + trunc_(x);
+}
+
+template <typename V, typename T>
+SI V load(const T* ptr, size_t tail) {
+ V v = 0;
+ switch (tail & (N-1)) {
+#if defined(__clang__)
+ case 0: memcpy(&v, ptr, sizeof(v)); break;
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ case 15: v[14] = ptr[14];
+ case 14: v[13] = ptr[13];
+ case 13: v[12] = ptr[12];
+ case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
+ case 11: v[10] = ptr[10];
+ case 10: v[ 9] = ptr[ 9];
+ case 9: v[ 8] = ptr[ 8];
+ case 8: memcpy(&v, ptr, 8*sizeof(T)); break;
+ #endif
+ case 7: v[ 6] = ptr[ 6];
+ case 6: v[ 5] = ptr[ 5];
+ case 5: v[ 4] = ptr[ 4];
+ case 4: memcpy(&v, ptr, 4*sizeof(T)); break;
+ case 3: v[ 2] = ptr[ 2];
+ case 2: memcpy(&v, ptr, 2*sizeof(T)); break;
+ case 1: v[ 0] = ptr[ 0];
+#else
+ case 0: v = V::Load(ptr); break;
+ default: memcpy(&v, ptr, (tail & (N-1)) * sizeof(T)); break;
+#endif
+ }
+ return v;
+}
+template <typename V, typename T>
+SI void store(T* ptr, size_t tail, V v) {
+ switch (tail & (N-1)) {
+#if defined(__clang__)
+ case 0: memcpy(ptr, &v, sizeof(v)); break;
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ case 15: ptr[14] = v[14];
+ case 14: ptr[13] = v[13];
+ case 13: ptr[12] = v[12];
+ case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
+ case 11: ptr[10] = v[10];
+ case 10: ptr[ 9] = v[ 9];
+ case 9: ptr[ 8] = v[ 8];
+ case 8: memcpy(ptr, &v, 8*sizeof(T)); break;
+ #endif
+ case 7: ptr[ 6] = v[ 6];
+ case 6: ptr[ 5] = v[ 5];
+ case 5: ptr[ 4] = v[ 4];
+ case 4: memcpy(ptr, &v, 4*sizeof(T)); break;
+ case 3: ptr[ 2] = v[ 2];
+ case 2: memcpy(ptr, &v, 2*sizeof(T)); break;
+ case 1: ptr[ 0] = v[ 0];
+#else
+ case 0: v.store(ptr); break;
+ default: memcpy(ptr, &v, (tail & (N-1)) * sizeof(T)); break;
+#endif
+ }
+}
+
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ template <typename V, typename T>
+ SI V gather(const T* ptr, U32 ix) {
+ return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
+ ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
+ ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
+ ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
+ }
+
+ template<>
+ F gather(const float* ptr, U32 ix) {
+ __m256i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
+ _mm256_i32gather_ps(ptr, hi, 4));
+ }
+
+ template<>
+ U32 gather(const uint32_t* ptr, U32 ix) {
+ __m256i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<U32>(_mm256_i32gather_epi32(ptr, lo, 4),
+ _mm256_i32gather_epi32(ptr, hi, 4));
+ }
+#else
+ template <typename V, typename T>
+ SI V gather(const T* ptr, U32 ix) {
+ return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
+ ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
+ }
+
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ template<>
+ F gather(const float* ptr, U32 ix) {
+ __m128i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<F>(_mm_i32gather_ps(ptr, lo, 4),
+ _mm_i32gather_ps(ptr, hi, 4));
+ }
+
+ template<>
+ U32 gather(const uint32_t* ptr, U32 ix) {
+ __m128i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<U32>(_mm_i32gather_epi32((const int*)ptr, lo, 4),
+ _mm_i32gather_epi32((const int*)ptr, hi, 4));
+ }
+ #endif
+#endif
+
+
+// ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
+
+SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
+#if 1 && defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
+ __m256i _01,_23;
+ split(rgba, &_01, &_23);
+ __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
+ _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
+ rgba = join<U32>(_02, _13);
+
+ auto cast_U16 = [](U32 v) -> U16 {
+ __m256i _02,_13;
+ split(v, &_02,&_13);
+ return _mm256_packus_epi32(_02,_13);
+ };
+#else
+ auto cast_U16 = [](U32 v) -> U16 {
+ return cast<U16>(v);
+ };
+#endif
+ *r = cast_U16(rgba & 65535) & 255;
+ *g = cast_U16(rgba & 65535) >> 8;
+ *b = cast_U16(rgba >> 16) & 255;
+ *a = cast_U16(rgba >> 16) >> 8;
+}
+
+SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x4_t rgba;
+ switch (tail & (N-1)) {
+ case 0: rgba = vld4_u8 ((const uint8_t*)(ptr+0) ); break;
+ case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6);
+ case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5);
+ case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4);
+ case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3);
+ case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2);
+ case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1);
+ case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
+ }
+ *r = cast<U16>(rgba.val[0]);
+ *g = cast<U16>(rgba.val[1]);
+ *b = cast<U16>(rgba.val[2]);
+ *a = cast<U16>(rgba.val[3]);
+#else
+ from_8888(load<U32>(ptr, tail), r,g,b,a);
+#endif
+}
+SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x4_t rgba = {{
+ cast<U8>(r),
+ cast<U8>(g),
+ cast<U8>(b),
+ cast<U8>(a),
+ }};
+ switch (tail & (N-1)) {
+ case 0: vst4_u8 ((uint8_t*)(ptr+0), rgba ); break;
+ case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6);
+ case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5);
+ case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4);
+ case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3);
+ case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2);
+ case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1);
+ case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
+ }
+#else
+ store(ptr, tail, cast<U32>(r | (g<<8)) << 0
+ | cast<U32>(b | (a<<8)) << 16);
+#endif
+}
+
+STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
+}
+STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
+}
+STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
+}
+STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
+}
+
+// ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
+
+SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
+ // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
+ U16 R = (rgb >> 11) & 31,
+ G = (rgb >> 5) & 63,
+ B = (rgb >> 0) & 31;
+
+ // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
+ *r = (R << 3) | (R >> 2);
+ *g = (G << 2) | (G >> 4);
+ *b = (B << 3) | (B >> 2);
+}
+SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ from_565(load<U16>(ptr, tail), r,g,b);
+}
+SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
+#ifdef __clang__
+ // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
+ // (Don't feel like you need to find some fundamental truth in these...
+ // they were brute-force searched.)
+ U16 R = (r * 9 + 36) / 74, // 9/74 ≈ 31/255, plus 36/74, about half.
+ G = (g * 21 + 42) / 85, // 21/85 = 63/255 exactly.
+ B = (b * 9 + 36) / 74;
+#else
+ // Select the top 5,6,5 bits.
+ U16 R = r >> 3,
+ G = g >> 2,
+ B = b >> 3;
+#endif
+ // Pack them back into 15|rrrrr gggggg bbbbb|0.
+ store(ptr, tail, R << 11
+ | G << 5
+ | B << 0);
+}
+
+STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
+ a = 255;
+}
+STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
+ da = 255;
+}
+STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
+}
+STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_565(gather<U16>(ptr, ix), &r, &g, &b);
+ a = 255;
+}
+
+SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
+ // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
+ U16 R = (rgba >> 12) & 15,
+ G = (rgba >> 8) & 15,
+ B = (rgba >> 4) & 15,
+ A = (rgba >> 0) & 15;
+
+ // Scale [0,15] to [0,255].
+ *r = (R << 4) | R;
+ *g = (G << 4) | G;
+ *b = (B << 4) | B;
+ *a = (A << 4) | A;
+}
+SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ from_4444(load<U16>(ptr, tail), r,g,b,a);
+}
+SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+#ifdef __clang__
+ // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
+ U16 R = (r + 8) / 17,
+ G = (g + 8) / 17,
+ B = (b + 8) / 17,
+ A = (a + 8) / 17;
+#else
+ // Select the top 4 bits of each.
+ U16 R = r >> 4,
+ G = g >> 4,
+ B = b >> 4,
+ A = a >> 4;
+#endif
+ // Pack them back into 15|rrrr gggg bbbb aaaa|0.
+ store(ptr, tail, R << 12
+ | G << 8
+ | B << 4
+ | A << 0);
+}
+
+STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
+}
+STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
+}
+STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
+}
+STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
+}
+
+SI void from_88(U16 rg, U16* r, U16* g) {
+ *r = (rg & 0xFF);
+ *g = (rg >> 8);
+}
+
+SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x2_t rg;
+ switch (tail & (N-1)) {
+ case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
+ case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6);
+ case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5);
+ case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4);
+ case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3);
+ case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2);
+ case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1);
+ case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
+ }
+ *r = cast<U16>(rg.val[0]);
+ *g = cast<U16>(rg.val[1]);
+#else
+ from_88(load<U16>(ptr, tail), r,g);
+#endif
+}
+
+SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x2_t rg = {{
+ cast<U8>(r),
+ cast<U8>(g),
+ }};
+ switch (tail & (N-1)) {
+ case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
+ case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6);
+ case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5);
+ case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4);
+ case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3);
+ case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2);
+ case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1);
+ case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
+ }
+#else
+ store(ptr, tail, (r | (g<<8)));
+#endif
+}
+
+STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &r, &g);
+ b = 0;
+ a = 255;
+}
+STAGE_PP(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &dr, &dg);
+ db = 0;
+ da = 255;
+}
+STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
+}
+STAGE_GP(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x, y);
+ from_88(gather<U16>(ptr, ix), &r, &g);
+ b = 0;
+ a = 255;
+}
+
+// ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
+
+SI U16 load_8(const uint8_t* ptr, size_t tail) {
+ return cast<U16>(load<U8>(ptr, tail));
+}
+SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
+ store(ptr, tail, cast<U8>(v));
+}
+
+STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ r = g = b = 0;
+ a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+}
+STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ dr = dg = db = 0;
+ da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+}
+STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
+}
+STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint8_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ r = g = b = 0;
+ a = cast<U16>(gather<U8>(ptr, ix));
+}
+
+STAGE_PP(alpha_to_gray, Ctx::None) {
+ r = g = b = a;
+ a = 255;
+}
+STAGE_PP(alpha_to_gray_dst, Ctx::None) {
+ dr = dg = db = da;
+ da = 255;
+}
+STAGE_PP(bt709_luminance_or_luma_to_alpha, Ctx::None) {
+ a = (r*54 + g*183 + b*19)>>8; // 0.2126, 0.7152, 0.0722 with 256 denominator.
+ r = g = b = 0;
+}
+
+// ~~~~~~ Coverage scales / lerps ~~~~~~ //
+
+STAGE_PP(load_src, const uint16_t* ptr) {
+ r = sk_unaligned_load<U16>(ptr + 0*N);
+ g = sk_unaligned_load<U16>(ptr + 1*N);
+ b = sk_unaligned_load<U16>(ptr + 2*N);
+ a = sk_unaligned_load<U16>(ptr + 3*N);
+}
+STAGE_PP(store_src, uint16_t* ptr) {
+ sk_unaligned_store(ptr + 0*N, r);
+ sk_unaligned_store(ptr + 1*N, g);
+ sk_unaligned_store(ptr + 2*N, b);
+ sk_unaligned_store(ptr + 3*N, a);
+}
+STAGE_PP(load_dst, const uint16_t* ptr) {
+ dr = sk_unaligned_load<U16>(ptr + 0*N);
+ dg = sk_unaligned_load<U16>(ptr + 1*N);
+ db = sk_unaligned_load<U16>(ptr + 2*N);
+ da = sk_unaligned_load<U16>(ptr + 3*N);
+}
+STAGE_PP(store_dst, uint16_t* ptr) {
+ sk_unaligned_store(ptr + 0*N, dr);
+ sk_unaligned_store(ptr + 1*N, dg);
+ sk_unaligned_store(ptr + 2*N, db);
+ sk_unaligned_store(ptr + 3*N, da);
+}
+
+// ~~~~~~ Coverage scales / lerps ~~~~~~ //
+
+STAGE_PP(scale_1_float, const float* f) {
+ U16 c = from_float(*f);
+ r = div255( r * c );
+ g = div255( g * c );
+ b = div255( b * c );
+ a = div255( a * c );
+}
+STAGE_PP(lerp_1_float, const float* f) {
+ U16 c = from_float(*f);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE_PP(lerp_native, const uint16_t scales[]) {
+ auto c = sk_unaligned_load<U16>(scales);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+
+STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+ r = div255( r * c );
+ g = div255( g * c );
+ b = div255( b * c );
+ a = div255( a * c );
+}
+STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+
+// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
+SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
+ return if_then_else(a < da, min(cr, min(cg,cb))
+ , max(cr, max(cg,cb)));
+}
+STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 cr,cg,cb;
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
+ U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = div255( r * cr );
+ g = div255( g * cg );
+ b = div255( b * cb );
+ a = div255( a * ca );
+}
+STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 cr,cg,cb;
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
+ U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = lerp(dr, r, cr);
+ g = lerp(dg, g, cg);
+ b = lerp(db, b, cb);
+ a = lerp(da, a, ca);
+}
+
+STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
+ U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
+ add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
+
+ r = min(div255(r*mul) + add, a);
+ g = min(div255(g*mul) + add, a);
+ b = min(div255(b*mul) + add, a);
+}
+
+
+// ~~~~~~ Gradient stages ~~~~~~ //
+
+// Clamp x to [0,1], both sides inclusive (think, gradients).
+// Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
+SI F clamp_01(F v) { return min(max(0, v), 1); }
+
+STAGE_GG(clamp_x_1 , Ctx::None) { x = clamp_01(x); }
+STAGE_GG(repeat_x_1, Ctx::None) { x = clamp_01(x - floor_(x)); }
+STAGE_GG(mirror_x_1, Ctx::None) {
+ auto two = [](F x){ return x+x; };
+ x = clamp_01(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
+}
+
+#if defined(__clang__)
+SI U16 cond_to_mask_16(I32 cond) { return cast<U16>(cond); }
+#else
+SI U16 cond_to_mask_16(F cond) {
+ return _mm_packs_epi32(_mm_castps_si128(cond.fLo.fVec), _mm_castps_si128(cond.fHi.fVec));
+}
+#endif
+
+STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
+}
+STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
+}
+STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
+}
+STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto mask = sk_unaligned_load<U16>(ctx->mask);
+ r = r & mask;
+ g = g & mask;
+ b = b & mask;
+ a = a & mask;
+}
+
+SI void round_F_to_U16(F R, F G, F B, F A, bool interpolatedInPremul,
+ U16* r, U16* g, U16* b, U16* a) {
+ auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
+
+ F limit = interpolatedInPremul ? A
+ : 1;
+ *r = round(min(max(0,R), limit));
+ *g = round(min(max(0,G), limit));
+ *b = round(min(max(0,B), limit));
+ *a = round(A); // we assume alpha is already in [0,1].
+}
+
+SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
+ U16* r, U16* g, U16* b, U16* a) {
+
+ F fr, fg, fb, fa, br, bg, bb, ba;
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
+ if (c->stopCount <=8) {
+ __m256i lo, hi;
+ split(idx, &lo, &hi);
+
+ fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
+ br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
+ fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
+ bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
+ fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
+ bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
+ fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
+ ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
+ } else
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ if (c->stopCount <= 4) {
+ auto permute = [](const float* ptr, U32 idx) {
+ __m128 v = _mm_loadu_ps(ptr);
+ __m256 vv = _mm256_insertf128_ps(_mm256_castps128_ps256(v), v, 1);
+ return bit_cast<F>(_mm256_permutevar_ps(vv, bit_cast<__m256i>(idx)));
+ };
+ fr = permute(c->fs[0], idx);
+ br = permute(c->bs[0], idx);
+ fg = permute(c->fs[1], idx);
+ bg = permute(c->bs[1], idx);
+ fb = permute(c->fs[2], idx);
+ bb = permute(c->bs[2], idx);
+ fa = permute(c->fs[3], idx);
+ ba = permute(c->bs[3], idx);
+ } else
+#endif
+ {
+ fr = gather<F>(c->fs[0], idx);
+ fg = gather<F>(c->fs[1], idx);
+ fb = gather<F>(c->fs[2], idx);
+ fa = gather<F>(c->fs[3], idx);
+ br = gather<F>(c->bs[0], idx);
+ bg = gather<F>(c->bs[1], idx);
+ bb = gather<F>(c->bs[2], idx);
+ ba = gather<F>(c->bs[3], idx);
+ }
+ round_F_to_U16(mad(t, fr, br),
+ mad(t, fg, bg),
+ mad(t, fb, bb),
+ mad(t, fa, ba),
+ c->interpolatedInPremul,
+ r,g,b,a);
+}
+
+STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = x;
+ U32 idx = 0;
+
+ // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
+ for (size_t i = 1; i < c->stopCount; i++) {
+ idx += bit_cast<U32>(t >= c->ts[i]);
+ }
+
+ idx = U32(0) - idx;
+
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = x;
+ auto idx = trunc_(t * (c->stopCount-1));
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
+ auto t = x;
+ round_F_to_U16(mad(t, c->f[0], c->b[0]),
+ mad(t, c->f[1], c->b[1]),
+ mad(t, c->f[2], c->b[2]),
+ mad(t, c->f[3], c->b[3]),
+ c->interpolatedInPremul,
+ &r,&g,&b,&a);
+}
+
+STAGE_GG(xy_to_unit_angle, Ctx::None) {
+ F xabs = abs_(x),
+ yabs = abs_(y);
+
+ F slope = min(xabs, yabs)/max(xabs, yabs);
+ F s = slope * slope;
+
+ // Use a 7th degree polynomial to approximate atan.
+ // This was generated using sollya.gforge.inria.fr.
+ // A float optimized polynomial was generated using the following command.
+ // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
+ F phi = slope
+ * (0.15912117063999176025390625f + s
+ * (-5.185396969318389892578125e-2f + s
+ * (2.476101927459239959716796875e-2f + s
+ * (-7.0547382347285747528076171875e-3f))));
+
+ phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
+ phi = if_then_else(x < 0.0f , 1.0f/2.0f - phi, phi);
+ phi = if_then_else(y < 0.0f , 1.0f - phi , phi);
+ phi = if_then_else(isnan_(phi), 0 , phi); // Check for NaN.
+ x = phi;
+}
+STAGE_GG(xy_to_radius, Ctx::None) {
+ x = sqrt_(x*x + y*y);
+}
+
+// ~~~~~~ Compound stages ~~~~~~ //
+
+STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ load_8888_(ptr, tail, &dr,&dg,&db,&da);
+ r = r + div255( dr*inv(a) );
+ g = g + div255( dg*inv(a) );
+ b = b + div255( db*inv(a) );
+ a = a + div255( da*inv(a) );
+ store_8888_(ptr, tail, r,g,b,a);
+}
+
+#if defined(SK_DISABLE_LOWP_BILERP_CLAMP_CLAMP_STAGE)
+ static void(*bilerp_clamp_8888)(void) = nullptr;
+ static void(*bilinear)(void) = nullptr;
+#else
+STAGE_GP(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // (cx,cy) are the center of our sample.
+ F cx = x,
+ cy = y;
+
+ // All sample points are at the same fractional offset (fx,fy).
+ // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
+ F fx = fract(cx + 0.5f),
+ fy = fract(cy + 0.5f);
+
+ // We'll accumulate the color of all four samples into {r,g,b,a} directly.
+ r = g = b = a = 0;
+
+ // The first three sample points will calculate their area using math
+ // just like in the float code above, but the fourth will take up all the rest.
+ //
+ // Logically this is the same as doing the math for the fourth pixel too,
+ // but rounding error makes this a better strategy, keeping opaque opaque, etc.
+ //
+ // We can keep up to 8 bits of fractional precision without overflowing 16-bit,
+ // so our "1.0" area is 256.
+ const uint16_t bias = 256;
+ const uint16_t biasShift = 8;
+ U16 remaining = bias;
+
+ for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
+ for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
+ // (x,y) are the coordinates of this sample point.
+ F x = cx + dx,
+ y = cy + dy;
+
+ // ix_and_ptr() will clamp to the image's bounds for us.
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+
+ U16 sr,sg,sb,sa;
+ from_8888(gather<U32>(ptr, ix), &sr,&sg,&sb,&sa);
+
+ // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
+ // are combined in direct proportion to their area overlapping that logical query pixel.
+ // At positive offsets, the x-axis contribution to that rectangle is fx,
+ // or (1-fx) at negative x. Same deal for y.
+ F sx = (dx > 0) ? fx : 1.0f - fx,
+ sy = (dy > 0) ? fy : 1.0f - fy;
+
+ U16 area = (dy == 0.5f && dx == 0.5f) ? remaining
+ : cast<U16>(sx * sy * bias);
+ for (size_t i = 0; i < N; i++) {
+ SkASSERT(remaining[i] >= area[i]);
+ }
+ remaining -= area;
+
+ r += sr * area;
+ g += sg * area;
+ b += sb * area;
+ a += sa * area;
+ }
+
+ r = (r + bias/2) >> biasShift;
+ g = (g + bias/2) >> biasShift;
+ b = (b + bias/2) >> biasShift;
+ a = (a + bias/2) >> biasShift;
+}
+
+// TODO: lowp::tile() is identical to the highp tile()... share?
+SI F tile(F v, SkTileMode mode, float limit, float invLimit) {
+ // After ix_and_ptr() will clamp the output of tile(), so we need not clamp here.
+ switch (mode) {
+ case SkTileMode::kDecal: // TODO, for now fallthrough to clamp
+ case SkTileMode::kClamp: return v;
+ case SkTileMode::kRepeat: return v - floor_(v*invLimit)*limit;
+ case SkTileMode::kMirror:
+ return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
+ }
+ SkUNREACHABLE;
+}
+
+SI void sample(const SkRasterPipeline_SamplerCtx2* ctx, F x, F y,
+ U16* r, U16* g, U16* b, U16* a) {
+ x = tile(x, ctx->tileX, ctx->width , ctx->invWidth );
+ y = tile(y, ctx->tileY, ctx->height, ctx->invHeight);
+
+ switch (ctx->ct) {
+ default: *r = *g = *b = *a = 0; // TODO
+ break;
+
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_8888(gather<U32>(ptr, ix), r,g,b,a);
+ if (ctx->ct == kBGRA_8888_SkColorType) {
+ std::swap(*r,*b);
+ }
+ } break;
+ }
+}
+
+template <int D>
+SI void sampler(const SkRasterPipeline_SamplerCtx2* ctx,
+ F cx, F cy, const F (&wx)[D], const F (&wy)[D],
+ U16* r, U16* g, U16* b, U16* a) {
+
+ float start = -0.5f*(D-1);
+
+ const uint16_t bias = 256;
+ const uint16_t biasShift = 8;
+ U16 remaining = bias;
+
+ *r = *g = *b = *a = 0;
+ F y = cy + start;
+ for (int j = 0; j < D; j++, y += 1.0f) {
+ F x = cx + start;
+ for (int i = 0; i < D; i++, x += 1.0f) {
+ U16 R,G,B,A;
+ sample(ctx, x,y, &R,&G,&B,&A);
+
+ U16 w = (i == D-1 && j == D-1) ? remaining
+ : cast<U16>(wx[i]*wy[j]*bias);
+ remaining -= w;
+ *r += w*R;
+ *g += w*G;
+ *b += w*B;
+ *a += w*A;
+ }
+ }
+ *r = (*r + bias/2) >> biasShift;
+ *g = (*g + bias/2) >> biasShift;
+ *b = (*b + bias/2) >> biasShift;
+ *a = (*a + bias/2) >> biasShift;
+}
+
+STAGE_GP(bilinear, const SkRasterPipeline_SamplerCtx2* ctx) {
+ F fx = fract(x + 0.5f),
+ fy = fract(y + 0.5f);
+ const F wx[] = {1.0f - fx, fx};
+ const F wy[] = {1.0f - fy, fy};
+
+ sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
+}
+#endif
+
+// ~~~~~~ GrSwizzle stage ~~~~~~ //
+
+STAGE_PP(swizzle, void* ctx) {
+ auto ir = r, ig = g, ib = b, ia = a;
+ U16* o[] = {&r, &g, &b, &a};
+ char swiz[4];
+ memcpy(swiz, &ctx, sizeof(swiz));
+
+ for (int i = 0; i < 4; ++i) {
+ switch (swiz[i]) {
+ case 'r': *o[i] = ir; break;
+ case 'g': *o[i] = ig; break;
+ case 'b': *o[i] = ib; break;
+ case 'a': *o[i] = ia; break;
+ case '0': *o[i] = U16(0); break;
+ case '1': *o[i] = U16(255); break;
+ default: break;
+ }
+ }
+}
+
+// Now we'll add null stand-ins for stages we haven't implemented in lowp.
+// If a pipeline uses these stages, it'll boot it out of lowp into highp.
+#define NOT_IMPLEMENTED(st) static void (*st)(void) = nullptr;
+ NOT_IMPLEMENTED(callback)
+ NOT_IMPLEMENTED(interpreter)
+ NOT_IMPLEMENTED(unbounded_set_rgb)
+ NOT_IMPLEMENTED(unbounded_uniform_color)
+ NOT_IMPLEMENTED(unpremul)
+ NOT_IMPLEMENTED(dither) // TODO
+ NOT_IMPLEMENTED(from_srgb)
+ NOT_IMPLEMENTED(to_srgb)
+ NOT_IMPLEMENTED(load_16161616)
+ NOT_IMPLEMENTED(load_16161616_dst)
+ NOT_IMPLEMENTED(store_16161616)
+ NOT_IMPLEMENTED(gather_16161616)
+ NOT_IMPLEMENTED(load_a16)
+ NOT_IMPLEMENTED(load_a16_dst)
+ NOT_IMPLEMENTED(store_a16)
+ NOT_IMPLEMENTED(gather_a16)
+ NOT_IMPLEMENTED(load_rg1616)
+ NOT_IMPLEMENTED(load_rg1616_dst)
+ NOT_IMPLEMENTED(store_rg1616)
+ NOT_IMPLEMENTED(gather_rg1616)
+ NOT_IMPLEMENTED(load_f16)
+ NOT_IMPLEMENTED(load_f16_dst)
+ NOT_IMPLEMENTED(store_f16)
+ NOT_IMPLEMENTED(gather_f16)
+ NOT_IMPLEMENTED(load_af16)
+ NOT_IMPLEMENTED(load_af16_dst)
+ NOT_IMPLEMENTED(store_af16)
+ NOT_IMPLEMENTED(gather_af16)
+ NOT_IMPLEMENTED(load_rgf16)
+ NOT_IMPLEMENTED(load_rgf16_dst)
+ NOT_IMPLEMENTED(store_rgf16)
+ NOT_IMPLEMENTED(gather_rgf16)
+ NOT_IMPLEMENTED(load_f32)
+ NOT_IMPLEMENTED(load_f32_dst)
+ NOT_IMPLEMENTED(store_f32)
+ NOT_IMPLEMENTED(gather_f32)
+ NOT_IMPLEMENTED(load_rgf32)
+ NOT_IMPLEMENTED(store_rgf32)
+ NOT_IMPLEMENTED(load_1010102)
+ NOT_IMPLEMENTED(load_1010102_dst)
+ NOT_IMPLEMENTED(store_1010102)
+ NOT_IMPLEMENTED(gather_1010102)
+ NOT_IMPLEMENTED(store_u16_be)
+ NOT_IMPLEMENTED(byte_tables) // TODO
+ NOT_IMPLEMENTED(colorburn)
+ NOT_IMPLEMENTED(colordodge)
+ NOT_IMPLEMENTED(softlight)
+ NOT_IMPLEMENTED(hue)
+ NOT_IMPLEMENTED(saturation)
+ NOT_IMPLEMENTED(color)
+ NOT_IMPLEMENTED(luminosity)
+ NOT_IMPLEMENTED(matrix_3x3)
+ NOT_IMPLEMENTED(matrix_3x4)
+ NOT_IMPLEMENTED(matrix_4x5) // TODO
+ NOT_IMPLEMENTED(matrix_4x3) // TODO
+ NOT_IMPLEMENTED(parametric)
+ NOT_IMPLEMENTED(gamma_)
+ NOT_IMPLEMENTED(PQish)
+ NOT_IMPLEMENTED(HLGish)
+ NOT_IMPLEMENTED(HLGinvish)
+ NOT_IMPLEMENTED(rgb_to_hsl)
+ NOT_IMPLEMENTED(hsl_to_rgb)
+ NOT_IMPLEMENTED(gauss_a_to_rgba) // TODO
+ NOT_IMPLEMENTED(mirror_x) // TODO
+ NOT_IMPLEMENTED(repeat_x) // TODO
+ NOT_IMPLEMENTED(mirror_y) // TODO
+ NOT_IMPLEMENTED(repeat_y) // TODO
+ NOT_IMPLEMENTED(negate_x)
+ NOT_IMPLEMENTED(bicubic) // TODO if I can figure out negative weights
+ NOT_IMPLEMENTED(bicubic_clamp_8888)
+ NOT_IMPLEMENTED(bilinear_nx) // TODO
+ NOT_IMPLEMENTED(bilinear_ny) // TODO
+ NOT_IMPLEMENTED(bilinear_px) // TODO
+ NOT_IMPLEMENTED(bilinear_py) // TODO
+ NOT_IMPLEMENTED(bicubic_n3x) // TODO
+ NOT_IMPLEMENTED(bicubic_n1x) // TODO
+ NOT_IMPLEMENTED(bicubic_p1x) // TODO
+ NOT_IMPLEMENTED(bicubic_p3x) // TODO
+ NOT_IMPLEMENTED(bicubic_n3y) // TODO
+ NOT_IMPLEMENTED(bicubic_n1y) // TODO
+ NOT_IMPLEMENTED(bicubic_p1y) // TODO
+ NOT_IMPLEMENTED(bicubic_p3y) // TODO
+ NOT_IMPLEMENTED(save_xy) // TODO
+ NOT_IMPLEMENTED(accumulate) // TODO
+ NOT_IMPLEMENTED(xy_to_2pt_conical_well_behaved)
+ NOT_IMPLEMENTED(xy_to_2pt_conical_strip)
+ NOT_IMPLEMENTED(xy_to_2pt_conical_focal_on_circle)
+ NOT_IMPLEMENTED(xy_to_2pt_conical_smaller)
+ NOT_IMPLEMENTED(xy_to_2pt_conical_greater)
+ NOT_IMPLEMENTED(alter_2pt_conical_compensate_focal)
+ NOT_IMPLEMENTED(alter_2pt_conical_unswap)
+ NOT_IMPLEMENTED(mask_2pt_conical_nan)
+ NOT_IMPLEMENTED(mask_2pt_conical_degenerates)
+ NOT_IMPLEMENTED(apply_vector_mask)
+#undef NOT_IMPLEMENTED
+
+#endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
+} // namespace lowp
+
+} // namespace SK_OPTS_NS
+
+#endif//SkRasterPipeline_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkSwizzler_opts.h b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
new file mode 100644
index 0000000000..f7f7c25457
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
@@ -0,0 +1,823 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_opts_DEFINED
+#define SkSwizzler_opts_DEFINED
+
+#include "include/private/SkColorData.h"
+
+#include <utility>
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+static void RGBA_to_rgbA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGBA_to_bgrA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void RGBA_to_BGRA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void RGB_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGB_to_BGR1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void gray_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)src[i] << 16
+ | (uint32_t)src[i] << 8
+ | (uint32_t)src[i] << 0;
+ }
+}
+
+static void grayA_to_RGBA_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void grayA_to_rgbA_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ g = (g*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void inverted_CMYK_to_RGB1_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t k = (src[i] >> 24) & 0xFF,
+ y = (src[i] >> 16) & 0xFF,
+ m = (src[i] >> 8) & 0xFF,
+ c = (src[i] >> 0) & 0xFF;
+ // See comments in SkSwizzler.cpp for details on the conversion formula.
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) b << 16
+ | (uint32_t) g << 8
+ | (uint32_t) r << 0;
+ }
+}
+
+static void inverted_CMYK_to_BGR1_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t k = (src[i] >> 24) & 0xFF,
+ y = (src[i] >> 16) & 0xFF,
+ m = (src[i] >> 8) & 0xFF,
+ c = (src[i] >> 0) & 0xFF;
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) r << 16
+ | (uint32_t) g << 8
+ | (uint32_t) b << 0;
+ }
+}
+
+#if defined(SK_ARM_HAS_NEON)
+
+// Rounded divide by 255, (x + 127) / 255
+static uint8x8_t div255_round(uint16x8_t x) {
+ // result = (x + 127) / 255
+ // result = (x + 127) / 256 + error1
+ //
+ // error1 = (x + 127) / (255 * 256)
+ // error1 = (x + 127) / (256 * 256) + error2
+ //
+ // error2 = (x + 127) / (255 * 256 * 256)
+ //
+ // The maximum value of error2 is too small to matter. Thus:
+ // result = (x + 127) / 256 + (x + 127) / (256 * 256)
+ // result = ((x + 127) / 256 + x + 127) / 256
+ // result = ((x + 127) >> 8 + x + 127) >> 8
+ //
+ // Use >>> to represent "rounded right shift" which, conveniently,
+ // NEON supports in one instruction.
+ // result = ((x >>> 8) + x) >>> 8
+ //
+ // Note that the second right shift is actually performed as an
+ // "add, round, and narrow back to 8-bits" instruction.
+ return vraddhn_u16(x, vrshrq_n_u16(x, 8));
+}
+
+// Scale a byte by another, (x * y + 127) / 255
+static uint8x8_t scale(uint8x8_t x, uint8x8_t y) {
+ return div255_round(vmull_u8(x, y));
+}
+
+template <bool kSwapRB>
+static void premul_should_swapRB(uint32_t* dst, const uint32_t* src, int count) {
+ while (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t a = rgba.val[3],
+ b = rgba.val[2],
+ g = rgba.val[1],
+ r = rgba.val[0];
+
+ // Premultiply.
+ b = scale(b, a);
+ g = scale(g, a);
+ r = scale(r, a);
+
+ // Store 8 premultiplied pixels.
+ if (kSwapRB) {
+ rgba.val[2] = r;
+ rgba.val[1] = g;
+ rgba.val[0] = b;
+ } else {
+ rgba.val[2] = b;
+ rgba.val[1] = g;
+ rgba.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB<false>(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB<true>(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ using std::swap;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x4_t rgba = vld4q_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ swap(rgba.val[0], rgba.val[2]);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ swap(rgba.val[0], rgba.val[2]);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+template <bool kSwapRB>
+static void insert_alpha_should_swaprb(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x3_t rgb = vld3q_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x16x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*3;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x3_t rgb = vld3_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x8x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*3;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb<false>(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb<true>(dst, src, count);
+}
+
+/*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16_t gray = vld1q_u8(src);
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8_t gray = vld1_u8(src);
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+template <bool kPremul>
+static void expand_grayA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x2_t ga = vld2q_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = vcombine_u8(
+ scale(vget_low_u8(ga.val[0]), vget_low_u8(ga.val[1])),
+ scale(vget_high_u8(ga.val[0]), vget_high_u8(ga.val[1])));
+ }
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*2;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x2_t ga = vld2_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = scale(ga.val[0], ga.val[1]);
+ }
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = kPremul ? grayA_to_rgbA_portable : grayA_to_RGBA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ expand_grayA<false>(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ expand_grayA<true>(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+template <Format format>
+static void inverted_cmyk_to(uint32_t* dst, const uint32_t* src, int count) {
+ while (count >= 8) {
+ // Load 8 cmyk pixels.
+ uint8x8x4_t pixels = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t k = pixels.val[3],
+ y = pixels.val[2],
+ m = pixels.val[1],
+ c = pixels.val[0];
+
+ // Scale to r, g, b.
+ uint8x8_t b = scale(y, k);
+ uint8x8_t g = scale(m, k);
+ uint8x8_t r = scale(c, k);
+
+ // Store 8 rgba pixels.
+ if (kBGR1 == format) {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = r;
+ pixels.val[1] = g;
+ pixels.val[0] = b;
+ } else {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = b;
+ pixels.val[1] = g;
+ pixels.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, pixels);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to<kRGB1>(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to<kBGR1>(dst, src, count);
+}
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+// Scale a byte by another.
+// Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
+static __m128i scale(__m128i x, __m128i y) {
+ const __m128i _128 = _mm_set1_epi16(128);
+ const __m128i _257 = _mm_set1_epi16(257);
+
+ // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
+ return _mm_mulhi_epu16(_mm_add_epi16(_mm_mullo_epi16(x, y), _128), _257);
+}
+
+template <bool kSwapRB>
+static void premul_should_swapRB(uint32_t* dst, const uint32_t* src, int count) {
+
+ auto premul8 = [](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kSwapRB) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // rrrrgggg bbbbaaaa
+ *hi = _mm_shuffle_epi8(*hi, planar); // RRRRGGGG BBBBAAAA
+ __m128i rg = _mm_unpacklo_epi32(*lo, *hi), // rrrrRRRR ggggGGGG
+ ba = _mm_unpackhi_epi32(*lo, *hi); // bbbbBBBB aaaaAAAA
+
+ // Unpack to 16-bit planar.
+ __m128i r = _mm_unpacklo_epi8(rg, zeros), // r_r_r_r_ R_R_R_R_
+ g = _mm_unpackhi_epi8(rg, zeros), // g_g_g_g_ G_G_G_G_
+ b = _mm_unpacklo_epi8(ba, zeros), // b_b_b_b_ B_B_B_B_
+ a = _mm_unpackhi_epi8(ba, zeros); // a_a_a_a_ A_A_A_A_
+
+ // Premultiply!
+ r = scale(r, a);
+ g = scale(g, a);
+ b = scale(b, a);
+
+ // Repack into interlaced pixels.
+ rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_slli_epi16(a, 8)); // babababa BABABABA
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGBARGBA RGBARGBA
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB<false>(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB<true>(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ const __m128i swapRB = _mm_setr_epi8(2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15);
+
+ while (count >= 4) {
+ __m128i rgba = _mm_loadu_si128((const __m128i*) src);
+ __m128i bgra = _mm_shuffle_epi8(rgba, swapRB);
+ _mm_storeu_si128((__m128i*) dst, bgra);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+template <bool kSwapRB>
+static void insert_alpha_should_swaprb(uint32_t dst[], const uint8_t* src, int count) {
+ const __m128i alphaMask = _mm_set1_epi32(0xFF000000);
+ __m128i expand;
+ const uint8_t X = 0xFF; // Used a placeholder. The value of X is irrelevant.
+ if (kSwapRB) {
+ expand = _mm_setr_epi8(2,1,0,X, 5,4,3,X, 8,7,6,X, 11,10,9,X);
+ } else {
+ expand = _mm_setr_epi8(0,1,2,X, 3,4,5,X, 6,7,8,X, 9,10,11,X);
+ }
+
+ while (count >= 6) {
+ // Load a vector. While this actually contains 5 pixels plus an
+ // extra component, we will discard all but the first four pixels on
+ // this iteration.
+ __m128i rgb = _mm_loadu_si128((const __m128i*) src);
+
+ // Expand the first four pixels to RGBX and then mask to RGB(FF).
+ __m128i rgba = _mm_or_si128(_mm_shuffle_epi8(rgb, expand), alphaMask);
+
+ // Store 4 pixels.
+ _mm_storeu_si128((__m128i*) dst, rgba);
+
+ src += 4*3;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb<false>(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb<true>(dst, src, count);
+}
+
+/*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ const __m128i alphas = _mm_set1_epi8((uint8_t) 0xFF);
+ while (count >= 16) {
+ __m128i grays = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg_lo = _mm_unpacklo_epi8(grays, grays);
+ __m128i gg_hi = _mm_unpackhi_epi8(grays, grays);
+ __m128i ga_lo = _mm_unpacklo_epi8(grays, alphas);
+ __m128i ga_hi = _mm_unpackhi_epi8(grays, alphas);
+
+ __m128i ggga0 = _mm_unpacklo_epi16(gg_lo, ga_lo);
+ __m128i ggga1 = _mm_unpackhi_epi16(gg_lo, ga_lo);
+ __m128i ggga2 = _mm_unpacklo_epi16(gg_hi, ga_hi);
+ __m128i ggga3 = _mm_unpackhi_epi16(gg_hi, ga_hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga0);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga1);
+ _mm_storeu_si128((__m128i*) (dst + 8), ggga2);
+ _mm_storeu_si128((__m128i*) (dst + 12), ggga3);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 8) {
+ __m128i ga = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg = _mm_or_si128(_mm_and_si128(ga, _mm_set1_epi16(0x00FF)),
+ _mm_slli_epi16(ga, 8));
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 8) {
+ __m128i grayA = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i g0 = _mm_and_si128(grayA, _mm_set1_epi16(0x00FF));
+ __m128i a0 = _mm_srli_epi16(grayA, 8);
+
+ // Premultiply
+ g0 = scale(g0, a0);
+
+ __m128i gg = _mm_or_si128(g0, _mm_slli_epi16(g0, 8));
+ __m128i ga = _mm_or_si128(g0, _mm_slli_epi16(a0, 8));
+
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+template <Format format>
+static void inverted_cmyk_to(uint32_t* dst, const uint32_t* src, int count) {
+ auto convert8 = [](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kBGR1 == format) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // ccccmmmm yyyykkkk
+ *hi = _mm_shuffle_epi8(*hi, planar); // CCCCMMMM YYYYKKKK
+ __m128i cm = _mm_unpacklo_epi32(*lo, *hi), // ccccCCCC mmmmMMMM
+ yk = _mm_unpackhi_epi32(*lo, *hi); // yyyyYYYY kkkkKKKK
+
+ // Unpack to 16-bit planar.
+ __m128i c = _mm_unpacklo_epi8(cm, zeros), // c_c_c_c_ C_C_C_C_
+ m = _mm_unpackhi_epi8(cm, zeros), // m_m_m_m_ M_M_M_M_
+ y = _mm_unpacklo_epi8(yk, zeros), // y_y_y_y_ Y_Y_Y_Y_
+ k = _mm_unpackhi_epi8(yk, zeros); // k_k_k_k_ K_K_K_K_
+
+ // Scale to r, g, b.
+ __m128i r = scale(c, k),
+ g = scale(m, k),
+ b = scale(y, k);
+
+ // Repack into interlaced pixels.
+ __m128i rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)), // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_set1_epi16((uint16_t) 0xFF00)); // b1b1b1b1 B1B1B1B1
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGB1RGB1 RGB1RGB1
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to<kRGB1>(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to<kBGR1>(dst, src, count);
+}
+
+#else
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_rgbA_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_bgrA_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ RGB_to_RGB1_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ RGB_to_BGR1_portable(dst, src, count);
+}
+
+/*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_CMYK_to_RGB1_portable(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_CMYK_to_BGR1_portable(dst, src, count);
+}
+
+#endif
+
+}
+
+#endif // SkSwizzler_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkUtils_opts.h b/gfx/skia/skia/src/opts/SkUtils_opts.h
new file mode 100644
index 0000000000..6605457b17
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkUtils_opts.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtils_opts_DEFINED
+#define SkUtils_opts_DEFINED
+
+#include <stdint.h>
+#include "include/private/SkNx.h"
+
+namespace SK_OPTS_NS {
+
+ template <typename T>
+ static void memsetT(T buffer[], T value, int count) {
+ #if defined(SK_CPU_SSE_LEVEL) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ static const int N = 32 / sizeof(T);
+ #else
+ static const int N = 16 / sizeof(T);
+ #endif
+ while (count >= N) {
+ SkNx<N,T>(value).store(buffer);
+ buffer += N;
+ count -= N;
+ }
+ while (count --> 0) {
+ *buffer++ = value;
+ }
+ }
+
+ /*not static*/ inline void memset16(uint16_t buffer[], uint16_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+ /*not static*/ inline void memset32(uint32_t buffer[], uint32_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+ /*not static*/ inline void memset64(uint64_t buffer[], uint64_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+
+ template <typename T>
+ static void rect_memsetT(T buffer[], T value, int count, size_t rowBytes, int height) {
+ while (height --> 0) {
+ memsetT(buffer, value, count);
+ buffer = (T*)((char*)buffer + rowBytes);
+ }
+ }
+
+ /*not static*/ inline void rect_memset16(uint16_t buffer[], uint16_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+ /*not static*/ inline void rect_memset32(uint32_t buffer[], uint32_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+ /*not static*/ inline void rect_memset64(uint64_t buffer[], uint64_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+
+}
+
+#endif//SkUtils_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkXfermode_opts.h b/gfx/skia/skia/src/opts/SkXfermode_opts.h
new file mode 100644
index 0000000000..577d6e2538
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkXfermode_opts.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4pxXfermode_DEFINED
+#define Sk4pxXfermode_DEFINED
+
+#include "include/private/SkNx.h"
+#include "src/core/Sk4px.h"
+#include "src/core/SkMSAN.h"
+#include "src/core/SkXfermodePriv.h"
+
+#ifdef SK_FORCE_RASTER_PIPELINE_BLITTER
+
+namespace SK_OPTS_NS {
+ /*not static*/ inline SkXfermode* create_xfermode(SkBlendMode) { return nullptr; }
+}
+
+#else
+
+namespace { // NOLINT(google-build-namespaces)
+
+// Most xfermodes can be done most efficiently 4 pixels at a time in 8 or 16-bit fixed point.
+#define XFERMODE(Xfermode) \
+ struct Xfermode { Sk4px operator()(const Sk4px&, const Sk4px&) const; }; \
+ inline Sk4px Xfermode::operator()(const Sk4px& d, const Sk4px& s) const
+
+XFERMODE(Clear) { return Sk4px::DupPMColor(0); }
+XFERMODE(Src) { return s; }
+XFERMODE(Dst) { return d; }
+XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); }
+XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); }
+XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); }
+XFERMODE(DstIn) { return SrcIn ()(s,d); }
+XFERMODE(DstOut) { return SrcOut ()(s,d); }
+XFERMODE(DstOver) { return SrcOver()(s,d); }
+
+// [ S * Da + (1 - Sa) * D]
+XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); }
+XFERMODE(DstATop) { return SrcATop()(s,d); }
+//[ S * (1 - Da) + (1 - Sa) * D ]
+XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); }
+// [S + D ]
+XFERMODE(Plus) { return s.saturatedAdd(d); }
+// [S * D ]
+XFERMODE(Modulate) { return s.approxMulDiv255(d); }
+// [S + D - S * D]
+XFERMODE(Screen) {
+ // Doing the math as S + (1-S)*D or S + (D - S*D) means the add and subtract can be done
+ // in 8-bit space without overflow. S + (1-S)*D is a touch faster because inv() is cheap.
+ return s + d.approxMulDiv255(s.inv());
+}
+
+#undef XFERMODE
+
+// A reasonable fallback mode for doing AA is to simply apply the transfermode first,
+// then linearly interpolate the AA.
+template <typename Xfermode>
+static Sk4px xfer_aa(const Sk4px& d, const Sk4px& s, const Sk4px& aa) {
+ Sk4px bw = Xfermode()(d, s);
+ return (bw * aa + d * aa.inv()).div255();
+}
+
+// For some transfermodes we specialize AA, either for correctness or performance.
+#define XFERMODE_AA(Xfermode) \
+ template <> Sk4px xfer_aa<Xfermode>(const Sk4px& d, const Sk4px& s, const Sk4px& aa)
+
+// Plus' clamp needs to happen after AA. skia:3852
+XFERMODE_AA(Plus) { // [ clamp( (1-AA)D + (AA)(S+D) ) == clamp(D + AA*S) ]
+ return d.saturatedAdd(s.approxMulDiv255(aa));
+}
+
+#undef XFERMODE_AA
+
+// Src and Clear modes are safe to use with unitialized dst buffers,
+// even if the implementation branches based on bytes from dst (e.g. asserts in Debug mode).
+// For those modes, just lie to MSAN that dst is always intialized.
+template <typename Xfermode> static void mark_dst_initialized_if_safe(void*, void*) {}
+template <> void mark_dst_initialized_if_safe<Src>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Src doesn't read dst.");
+}
+template <> void mark_dst_initialized_if_safe<Clear>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Clear doesn't read dst.");
+}
+
+template <typename Xfermode>
+class Sk4pxXfermode : public SkXfermode {
+public:
+ Sk4pxXfermode() {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ mark_dst_initialized_if_safe<Xfermode>(dst, dst+n);
+ if (nullptr == aa) {
+ Sk4px::MapDstSrc(n, dst, src, Xfermode());
+ } else {
+ Sk4px::MapDstSrcAlpha(n, dst, src, aa, xfer_aa<Xfermode>);
+ }
+ }
+};
+
+} // namespace
+
+namespace SK_OPTS_NS {
+
+/*not static*/ inline SkXfermode* create_xfermode(SkBlendMode mode) {
+ switch (mode) {
+#define CASE(Xfermode) \
+ case SkBlendMode::k##Xfermode: return new Sk4pxXfermode<Xfermode>()
+ CASE(Clear);
+ CASE(Src);
+ CASE(Dst);
+ CASE(SrcOver);
+ CASE(DstOver);
+ CASE(SrcIn);
+ CASE(DstIn);
+ CASE(SrcOut);
+ CASE(DstOut);
+ CASE(SrcATop);
+ CASE(DstATop);
+ CASE(Xor);
+ CASE(Plus);
+ CASE(Modulate);
+ CASE(Screen);
+ #undef CASE
+
+ default: break;
+ }
+ return nullptr;
+}
+
+} // namespace SK_OPTS_NS
+
+#endif // #ifdef SK_FORCE_RASTER_PIPELINE_BLITTER
+
+#endif//Sk4pxXfermode_DEFINED
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.cpp b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
new file mode 100644
index 0000000000..75de64e8c8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkPathOpsBounds.h"
+
+#include <utility>
+
+#if DEBUG_ADD_INTERSECTING_TS
+
+static void debugShowLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " LINE_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, LINE_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " LINE_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], LINE_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wtTs, 1) " " PT_DEBUG_STR, i[0][1], PT_DEBUG_DATA(i, 1));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wnTs, 1), i[1][1]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn,
+ const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()),
+ CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CUBIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CUBIC_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CUBIC_DEBUG_STR, i[1][0], CUBIC_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+#else
+static void debugShowLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+#endif
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence) {
+ if (test != next) {
+ if (AlmostLessUlps(test->bounds().fBottom, next->bounds().fTop)) {
+ return false;
+ }
+ // OPTIMIZATION: outset contour bounds a smidgen instead?
+ if (!SkPathOpsBounds::Intersects(test->bounds(), next->bounds())) {
+ return true;
+ }
+ }
+ SkIntersectionHelper wt;
+ wt.init(test);
+ do {
+ SkIntersectionHelper wn;
+ wn.init(next);
+ test->debugValidate();
+ next->debugValidate();
+ if (test == next && !wn.startAfter(wt)) {
+ continue;
+ }
+ do {
+ if (!SkPathOpsBounds::Intersects(wt.bounds(), wn.bounds())) {
+ continue;
+ }
+ int pts = 0;
+ SkIntersections ts { SkDEBUGCODE(test->globalState()) };
+ bool swap = false;
+ SkDQuad quad1, quad2;
+ SkDConic conic1, conic2;
+ SkDCubic cubic1, cubic2;
+ switch (wt.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ pts = ts.quadHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ pts = ts.conicHorizontal(wn.pts(), wn.weight(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ pts = ts.cubicHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment: {
+ pts = ts.lineVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.quadVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.conicVertical(wn.pts(), wn.weight(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.cubicVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.lineHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.lineVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineLine(wt.pts(), wn.pts());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ swap = true;
+ pts = ts.quadLine(wn.pts(), wt.pts());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ swap = true;
+ pts = ts.conicLine(wn.pts(), wn.weight(), wt.pts());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ swap = true;
+ pts = ts.cubicLine(wn.pts(), wt.pts());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.quadHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.quadVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.quadLine(wt.pts(), wn.pts());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(quad1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ swap = true;
+ pts = ts.intersect(conic2.set(wn.pts(), wn.weight()),
+ quad1.set(wt.pts()));
+ debugShowConicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()), quad1.set(wt.pts()));
+ debugShowCubicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.conicHorizontal(wt.pts(), wt.weight(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.conicVertical(wt.pts(), wt.weight(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.conicLine(wt.pts(), wt.weight(), wn.pts());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ quad2.set(wn.pts()));
+ debugShowConicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ conic2.set(wn.pts(), wn.weight()));
+ debugShowConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()
+ SkDEBUGPARAMS(ts.globalState())),
+ conic1.set(wt.pts(), wt.weight()
+ SkDEBUGPARAMS(ts.globalState())));
+ debugShowCubicConicIntersection(pts, wn, wt, ts);
+ break;
+ }
+ }
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.cubicHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.cubicVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.cubicLine(wt.pts(), wn.pts());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowCubicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()
+ SkDEBUGPARAMS(ts.globalState())),
+ conic2.set(wn.pts(), wn.weight()
+ SkDEBUGPARAMS(ts.globalState())));
+ debugShowCubicConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), cubic2.set(wn.pts()));
+ debugShowCubicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ default:
+ SkASSERT(0);
+ }
+#if DEBUG_T_SECT_LOOP_COUNT
+ test->globalState()->debugAddLoopCount(&ts, wt, wn);
+#endif
+ int coinIndex = -1;
+ SkOpPtT* coinPtT[2];
+ for (int pt = 0; pt < pts; ++pt) {
+ SkASSERT(ts[0][pt] >= 0 && ts[0][pt] <= 1);
+ SkASSERT(ts[1][pt] >= 0 && ts[1][pt] <= 1);
+ wt.segment()->debugValidate();
+ // if t value is used to compute pt in addT, error may creep in and
+ // rect intersections may result in non-rects. if pt value from intersection
+ // is passed in, current tests break. As a workaround, pass in pt
+ // value from intersection only if pt.x and pt.y is integral
+ SkPoint iPt = ts.pt(pt).asSkPoint();
+ bool iPtIsIntegral = iPt.fX == floor(iPt.fX) && iPt.fY == floor(iPt.fY);
+ SkOpPtT* testTAt = iPtIsIntegral ? wt.segment()->addT(ts[swap][pt], iPt)
+ : wt.segment()->addT(ts[swap][pt]);
+ wn.segment()->debugValidate();
+ SkOpPtT* nextTAt = iPtIsIntegral ? wn.segment()->addT(ts[!swap][pt], iPt)
+ : wn.segment()->addT(ts[!swap][pt]);
+ if (!testTAt->contains(nextTAt)) {
+ SkOpPtT* oppPrev = testTAt->oppPrev(nextTAt); // Returns nullptr if pair
+ if (oppPrev) { // already share a pt-t loop.
+ testTAt->span()->mergeMatches(nextTAt->span());
+ testTAt->addOpp(nextTAt, oppPrev);
+ }
+ if (testTAt->fPt != nextTAt->fPt) {
+ testTAt->span()->unaligned();
+ nextTAt->span()->unaligned();
+ }
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ }
+ if (!ts.isCoincident(pt)) {
+ continue;
+ }
+ if (coinIndex < 0) {
+ coinPtT[0] = testTAt;
+ coinPtT[1] = nextTAt;
+ coinIndex = pt;
+ continue;
+ }
+ if (coinPtT[0]->span() == testTAt->span()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (coinPtT[1]->span() == nextTAt->span()) {
+ coinIndex = -1; // coincidence span collapsed
+ continue;
+ }
+ if (swap) {
+ using std::swap;
+ swap(coinPtT[0], coinPtT[1]);
+ swap(testTAt, nextTAt);
+ }
+ SkASSERT(coincidence->globalState()->debugSkipAssert()
+ || coinPtT[0]->span()->t() < testTAt->span()->t());
+ if (coinPtT[0]->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (testTAt->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ coincidence->add(coinPtT[0], testTAt, coinPtT[1], nextTAt);
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ coinIndex = -1;
+ }
+ SkOPOBJASSERT(coincidence, coinIndex < 0); // expect coincidence to be paired
+ } while (wn.advance());
+ } while (wt.advance());
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.h b/gfx/skia/skia/src/pathops/SkAddIntersections.h
new file mode 100644
index 0000000000..b48493bf54
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkAddIntersections_DEFINED
+#define SkAddIntersections_DEFINED
+
+#include "src/pathops/SkIntersectionHelper.h"
+#include "src/pathops/SkIntersections.h"
+
+class SkOpCoincidence;
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
new file mode 100644
index 0000000000..6a9eb4b76c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+
+class LineConicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineConicIntersections(const SkDConic& c, const SkDLine& l, SkIntersections* i)
+ : fConic(c)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4); // allow short partial coincidence plus discrete intersection
+ }
+
+ LineConicIntersections(const SkDConic& c)
+ : fConic(c)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double conicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ double t = fLine->nearPoint(conicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+#ifdef SK_DEBUG
+ static bool close_to(double a, double b, const double c[3]) {
+ double max = SkTMax(-SkTMin(SkTMin(c[0], c[1]), c[2]), SkTMax(SkTMax(c[0], c[1]), c[2]));
+ return approximately_zero_when_compared_to(a - b, max);
+ }
+#endif
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ this->addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ this->addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[2];
+ int count = this->horizontalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fY, axisIntercept, conicVals));
+ double lineT = (pt.fX - left) / (right - left);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersect() {
+ this->addExactEndPoints();
+ if (fAllowNear) {
+ this->addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = this->intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double conicT = rootVals[index];
+ double lineT = this->findLineT(conicT);
+#ifdef SK_DEBUG
+ if (!fIntersections->globalState()
+ || !fIntersections->globalState()->debugSkipAssert()) {
+ SkDEBUGCODE(SkDPoint conicPt = fConic.ptAtT(conicT));
+ SkDEBUGCODE(SkDPoint linePt = fLine->ptAtT(lineT));
+ SkASSERT(conicPt.approximatelyDEqual(linePt));
+ }
+#endif
+ SkDPoint pt;
+ if (this->pinTs(&conicT, &lineT, &pt, kPointUninitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersectRay(double roots[2]) {
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fConic[n].fY - (*fLine)[0].fY) * adj - (fConic[n].fX - (*fLine)[0].fX) * opp;
+ }
+ return this->validT(r, 0, roots);
+ }
+
+ int validT(double r[3], double axisIntercept, double roots[2]) {
+ double A = r[2];
+ double B = r[1] * fConic.fWeight - axisIntercept * fConic.fWeight + axisIntercept;
+ double C = r[0];
+ A += C - 2 * B; // A = a + c - 2*(b*w - xCept*w + xCept)
+ B -= C; // B = b*w - w * xCept + xCept - a
+ C -= axisIntercept;
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ this->addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ this->addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[2];
+ int count = this->verticalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fX, axisIntercept, conicVals));
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+// OPTIMIZE: Functions of the form add .. points are indentical to the conic routines.
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = fLine->exactPoint(fConic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fConic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double conicT = ((SkDCurve*) &fConic)->nearPoint(SkPath::kConic_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (conicT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fConic.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* conicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *conicT = SkPinT(*conicT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fConic.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fConic[0].asSkPoint()) {
+ *pt = fConic[0];
+ *conicT = 0;
+ } else if (gridPt == fConic[2].asSkPoint()) {
+ *pt = fConic[2];
+ *conicT = 1;
+ }
+ return true;
+ }
+
+ bool uniqueAnswer(double conicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingConicT = (*fIntersections)[0][inner];
+ if (conicT == existingConicT) {
+ return false;
+ }
+ // check if midway on conic is also same point. If so, discard this
+ double conicMidT = (existingConicT + conicT) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ if (conicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fConic.ptAtT(conicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+private:
+ const SkDConic& fConic;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDConic& conic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineConicIntersections c(conic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDConic& conic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineConicIntersections c(conic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = conic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots) {
+ LineConicIntersections c(conic);
+ return c.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots) {
+ LineConicIntersections c(conic);
+ return c.verticalIntersect(x, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
new file mode 100644
index 0000000000..98cc03d5c0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+
+/*
+Find the interection of a line and cubic by solving for valid t values.
+
+Analogous to line-quadratic intersection, solve line-cubic intersection by
+representing the cubic as:
+ x = a(1-t)^3 + 2b(1-t)^2t + c(1-t)t^2 + dt^3
+ y = e(1-t)^3 + 2f(1-t)^2t + g(1-t)t^2 + ht^3
+and the line as:
+ y = i*x + j (if the line is more horizontal)
+or:
+ x = i*y + j (if the line is more vertical)
+
+Then using Mathematica, solve for the values of t where the cubic intersects the
+line:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - x,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - i*x - j, x]
+ (out) -e + j +
+ 3 e t - 3 f t -
+ 3 e t^2 + 6 f t^2 - 3 g t^2 +
+ e t^3 - 3 f t^3 + 3 g t^3 - h t^3 +
+ i ( a -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 )
+
+if i goes to infinity, we can rewrite the line in terms of x. Mathematica:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - i*y - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+ (out) a - j -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 -
+ i ( e -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3 )
+
+Solving this with Mathematica produces an expression with hundreds of terms;
+instead, use Numeric Solutions recipe to solve the cubic.
+
+The near-horizontal case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = (-(-e + 3*f - 3*g + h) + i*(-a + 3*b - 3*c + d) )
+ B = 3*(-( e - 2*f + g ) + i*( a - 2*b + c ) )
+ C = 3*(-(-e + f ) + i*(-a + b ) )
+ D = (-( e ) + i*( a ) + j )
+
+The near-vertical case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = ( (-a + 3*b - 3*c + d) - i*(-e + 3*f - 3*g + h) )
+ B = 3*( ( a - 2*b + c ) - i*( e - 2*f + g ) )
+ C = 3*( (-a + b ) - i*(-e + f ) )
+ D = ( ( a ) - i*( e ) - j )
+
+For horizontal lines:
+(in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+(out) e - j -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3
+ */
+
+class LineCubicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineCubicIntersections(const SkDCubic& c, const SkDLine& l, SkIntersections* i)
+ : fCubic(c)
+ , fLine(l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4);
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double cubicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ double t = fLine.nearPoint(cubicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ // see parallel routine in line quadratic intersections
+ int intersectRay(double roots[3]) {
+ double adj = fLine[1].fX - fLine[0].fX;
+ double opp = fLine[1].fY - fLine[0].fY;
+ SkDCubic c;
+ SkDEBUGCODE(c.fDebugGlobalState = fIntersections->globalState());
+ for (int n = 0; n < 4; ++n) {
+ c[n].fX = (fCubic[n].fY - fLine[0].fY) * adj - (fCubic[n].fX - fLine[0].fX) * opp;
+ }
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_zero(calcPt.fX)) {
+ for (int n = 0; n < 4; ++n) {
+ c[n].fY = (fCubic[n].fY - fLine[0].fY) * opp
+ + (fCubic[n].fX - fLine[0].fX) * adj;
+ }
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, 0, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[3];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double cubicT = rootVals[index];
+ double lineT = findLineT(cubicT);
+ SkDPoint pt;
+ if (pinTs(&cubicT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ static int HorizontalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fY, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fY, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fY, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kYAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[3];
+ int count = HorizontalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { fCubic.ptAtT(cubicT).fX, axisIntercept };
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double cubicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingCubicT = (*fIntersections)[0][inner];
+ if (cubicT == existingCubicT) {
+ return false;
+ }
+ // check if midway on cubic is also same point. If so, discard this
+ double cubicMidT = (existingCubicT + cubicT) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ if (cubicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint cPt = fCubic.ptAtT(cubicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ cPt.fX, cPt.fY);
+#endif
+ return true;
+ }
+
+ static int VerticalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fX, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[3];
+ int count = VerticalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { axisIntercept, fCubic.ptAtT(cubicT).fY };
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ protected:
+
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = fLine.exactPoint(fCubic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ /* Note that this does not look for endpoints of the line that are near the cubic.
+ These points are found later when check ends looks for missing points */
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = fLine.nearPoint(fCubic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double cubicT = ((SkDCurve*) &fCubic)->nearPoint(SkPath::kCubic_Verb,
+ fLine[lIndex], fLine[!lIndex]);
+ if (cubicT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fLine[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fCubic.ptAtT(t);
+ double dx = fLine[1].fX - fLine[0].fX;
+ double dy = fLine[1].fY - fLine[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - fLine[0].fX) / dx;
+ }
+ return (xy.fY - fLine[0].fY) / dy;
+ }
+
+ bool pinTs(double* cubicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more(*lineT)) {
+ return false;
+ }
+ double cT = *cubicT = SkPinT(*cubicT);
+ double lT = *lineT = SkPinT(*lineT);
+ SkDPoint lPt = fLine.ptAtT(lT);
+ SkDPoint cPt = fCubic.ptAtT(cT);
+ if (!lPt.roughlyEqual(cPt)) {
+ return false;
+ }
+ // FIXME: if points are roughly equal but not approximately equal, need to do
+ // a binary search like quad/quad intersection to find more precise t values
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && cT != 0 && cT != 1)) {
+ *pt = lPt;
+ } else if (ptSet == kPointUninitialized) {
+ *pt = cPt;
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (gridPt == fLine[0].asSkPoint()) {
+ *lineT = 0;
+ } else if (gridPt == fLine[1].asSkPoint()) {
+ *lineT = 1;
+ }
+ if (gridPt == fCubic[0].asSkPoint() && approximately_equal(*cubicT, 0)) {
+ *cubicT = 0;
+ } else if (gridPt == fCubic[3].asSkPoint() && approximately_equal(*cubicT, 1)) {
+ *cubicT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDCubic& fCubic;
+ const SkDLine& fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDCubic& cubic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDCubic& cubic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = cubic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+// SkDCubic accessors to Intersection utilities
+
+int SkDCubic::horizontalIntersect(double yIntercept, double roots[3]) const {
+ return LineCubicIntersections::HorizontalIntersect(*this, yIntercept, roots);
+}
+
+int SkDCubic::verticalIntersect(double xIntercept, double roots[3]) const {
+ return LineCubicIntersections::VerticalIntersect(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
new file mode 100644
index 0000000000..8f5c6fbde5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+http://stackoverflow.com/questions/2009160/how-do-i-convert-the-2-control-points-of-a-cubic-curve-to-the-single-control-poi
+*/
+
+/*
+Let's call the control points of the cubic Q0..Q3 and the control points of the quadratic P0..P2.
+Then for degree elevation, the equations are:
+
+Q0 = P0
+Q1 = 1/3 P0 + 2/3 P1
+Q2 = 2/3 P1 + 1/3 P2
+Q3 = P2
+In your case you have Q0..Q3 and you're solving for P0..P2. There are two ways to compute P1 from
+ the equations above:
+
+P1 = 3/2 Q1 - 1/2 Q0
+P1 = 3/2 Q2 - 1/2 Q3
+If this is a degree-elevated cubic, then both equations will give the same answer for P1. Since
+ it's likely not, your best bet is to average them. So,
+
+P1 = -1/4 Q0 + 3/4 Q1 + 3/4 Q2 - 1/4 Q3
+*/
+
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+// used for testing only
+SkDQuad SkDCubic::toQuad() const {
+ SkDQuad quad;
+ quad[0] = fPts[0];
+ const SkDPoint fromC1 = {(3 * fPts[1].fX - fPts[0].fX) / 2, (3 * fPts[1].fY - fPts[0].fY) / 2};
+ const SkDPoint fromC2 = {(3 * fPts[2].fX - fPts[3].fX) / 2, (3 * fPts[2].fY - fPts[3].fY) / 2};
+ quad[1].fX = (fromC1.fX + fromC2.fX) / 2;
+ quad[1].fY = (fromC1.fY + fromC2.fY) / 2;
+ quad[2] = fPts[3];
+ return quad;
+}
diff --git a/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
new file mode 100644
index 0000000000..83fe84cd90
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsLine.h"
+
+#include <utility>
+
+void SkIntersections::cleanUpParallelLines(bool parallel) {
+ while (fUsed > 2) {
+ removeOne(1);
+ }
+ if (fUsed == 2 && !parallel) {
+ bool startMatch = fT[0][0] == 0 || zero_or_one(fT[1][0]);
+ bool endMatch = fT[0][1] == 1 || zero_or_one(fT[1][1]);
+ if ((!startMatch && !endMatch) || approximately_equal(fT[0][0], fT[0][1])) {
+ SkASSERT(startMatch || endMatch);
+ if (startMatch && endMatch && (fT[0][0] != 0 || !zero_or_one(fT[1][0]))
+ && fT[0][1] == 1 && zero_or_one(fT[1][1])) {
+ removeOne(0);
+ } else {
+ removeOne(endMatch);
+ }
+ }
+ }
+ if (fUsed == 2) {
+ fIsCoincident[0] = fIsCoincident[1] = 0x03;
+ }
+}
+
+void SkIntersections::computePoints(const SkDLine& line, int used) {
+ fPt[0] = line.ptAtT(fT[0][0]);
+ if ((fUsed = used) == 2) {
+ fPt[1] = line.ptAtT(fT[0][1]);
+ }
+}
+
+int SkIntersections::intersectRay(const SkDLine& a, const SkDLine& b) {
+ fMax = 2;
+ SkDVector aLen = a[1] - a[0];
+ SkDVector bLen = b[1] - b[0];
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double denom = bLen.fY * aLen.fX - aLen.fY * bLen.fX;
+ int used;
+ if (!approximately_zero(denom)) {
+ SkDVector ab0 = a[0] - b[0];
+ double numerA = ab0.fY * bLen.fX - bLen.fY * ab0.fX;
+ double numerB = ab0.fY * aLen.fX - aLen.fY * ab0.fX;
+ numerA /= denom;
+ numerB /= denom;
+ fT[0][0] = numerA;
+ fT[1][0] = numerB;
+ used = 1;
+ } else {
+ /* See if the axis intercepts match:
+ ay - ax * ayLen / axLen == by - bx * ayLen / axLen
+ axLen * (ay - ax * ayLen / axLen) == axLen * (by - bx * ayLen / axLen)
+ axLen * ay - ax * ayLen == axLen * by - bx * ayLen
+ */
+ if (!AlmostEqualUlps(aLen.fX * a[0].fY - aLen.fY * a[0].fX,
+ aLen.fX * b[0].fY - aLen.fY * b[0].fX)) {
+ return fUsed = 0;
+ }
+ // there's no great answer for intersection points for coincident rays, but return something
+ fT[0][0] = fT[1][0] = 0;
+ fT[1][0] = fT[1][1] = 1;
+ used = 2;
+ }
+ computePoints(a, used);
+ return fUsed;
+}
+
+// note that this only works if both lines are neither horizontal nor vertical
+int SkIntersections::intersect(const SkDLine& a, const SkDLine& b) {
+ fMax = 3; // note that we clean up so that there is no more than two in the end
+ // see if end points intersect the opposite line
+ double t;
+ for (int iA = 0; iA < 2; ++iA) {
+ if ((t = b.exactPoint(a[iA])) >= 0) {
+ insert(iA, t, a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if ((t = a.exactPoint(b[iB])) >= 0) {
+ insert(t, iB, b[iB]);
+ }
+ }
+ /* Determine the intersection point of two line segments
+ Return FALSE if the lines don't intersect
+ from: http://paulbourke.net/geometry/lineline2d/ */
+ double axLen = a[1].fX - a[0].fX;
+ double ayLen = a[1].fY - a[0].fY;
+ double bxLen = b[1].fX - b[0].fX;
+ double byLen = b[1].fY - b[0].fY;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double axByLen = axLen * byLen;
+ double ayBxLen = ayLen * bxLen;
+ // detect parallel lines the same way here and in SkOpAngle operator <
+ // so that non-parallel means they are also sortable
+ bool unparallel = fAllowNear ? NotAlmostEqualUlps_Pin(axByLen, ayBxLen)
+ : NotAlmostDequalUlps(axByLen, ayBxLen);
+ if (unparallel && fUsed == 0) {
+ double ab0y = a[0].fY - b[0].fY;
+ double ab0x = a[0].fX - b[0].fX;
+ double numerA = ab0y * bxLen - byLen * ab0x;
+ double numerB = ab0y * axLen - ayLen * ab0x;
+ double denom = axByLen - ayBxLen;
+ if (between(0, numerA, denom) && between(0, numerB, denom)) {
+ fT[0][0] = numerA / denom;
+ fT[1][0] = numerB / denom;
+ computePoints(a, 1);
+ }
+ }
+/* Allow tracking that both sets of end points are near each other -- the lines are entirely
+ coincident -- even when the end points are not exactly the same.
+ Mark this as a 'wild card' for the end points, so that either point is considered totally
+ coincident. Then, avoid folding the lines over each other, but allow either end to mate
+ to the next set of lines.
+ */
+ if (fAllowNear || !unparallel) {
+ double aNearB[2];
+ double bNearA[2];
+ bool aNotB[2] = {false, false};
+ bool bNotA[2] = {false, false};
+ int nearCount = 0;
+ for (int index = 0; index < 2; ++index) {
+ aNearB[index] = t = b.nearPoint(a[index], &aNotB[index]);
+ nearCount += t >= 0;
+ bNearA[index] = t = a.nearPoint(b[index], &bNotA[index]);
+ nearCount += t >= 0;
+ }
+ if (nearCount > 0) {
+ // Skip if each segment contributes to one end point.
+ if (nearCount != 2 || aNotB[0] == aNotB[1]) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (!aNotB[iA]) {
+ continue;
+ }
+ int nearer = aNearB[iA] > 0.5;
+ if (!bNotA[nearer]) {
+ continue;
+ }
+ SkASSERT(a[iA] != b[nearer]);
+ SkOPASSERT(iA == (bNearA[nearer] > 0.5));
+ insertNear(iA, nearer, a[iA], b[nearer]);
+ aNearB[iA] = -1;
+ bNearA[nearer] = -1;
+ nearCount -= 2;
+ }
+ }
+ if (nearCount > 0) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (aNearB[iA] >= 0) {
+ insert(iA, aNearB[iA], a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if (bNearA[iB] >= 0) {
+ insert(bNearA[iB], iB, b[iB]);
+ }
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(!unparallel);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
+static int horizontal_coincident(const SkDLine& line, double y) {
+ double min = line[0].fY;
+ double max = line[1].fY;
+ if (min > max) {
+ using std::swap;
+ swap(min, max);
+ }
+ if (min > y || max < y) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max) && max - min < fabs(line[0].fX - line[1].fX)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::HorizontalIntercept(const SkDLine& line, double y) {
+ SkASSERT(line[1].fY != line[0].fY);
+ return SkPinT((y - line[0].fY) / (line[1].fY - line[0].fY));
+}
+
+int SkIntersections::horizontal(const SkDLine& line, double left, double right,
+ double y, bool flipped) {
+ fMax = 3; // clean up parallel at the end will limit the result to 2 at the most
+ // see if end points intersect the opposite line
+ double t;
+ const SkDPoint leftPt = { left, y };
+ if ((t = line.exactPoint(leftPt)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.exactPoint(rightPt)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = horizontal_coincident(line, y);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = HorizontalIntercept(line, y);
+ double xIntercept = line[0].fX + fT[0][0] * (line[1].fX - line[0].fX);
+ if (between(left, xIntercept, right)) {
+ fT[1][0] = (xIntercept - left) / (right - left);
+ if (flipped) {
+ // OPTIMIZATION: ? instead of swapping, pass original line, use [1].fX - [0].fX
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = xIntercept;
+ fPt[0].fY = y;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(leftPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.nearPoint(rightPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ return fUsed;
+}
+
+static int vertical_coincident(const SkDLine& line, double x) {
+ double min = line[0].fX;
+ double max = line[1].fX;
+ if (min > max) {
+ using std::swap;
+ swap(min, max);
+ }
+ if (!precisely_between(min, x, max)) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::VerticalIntercept(const SkDLine& line, double x) {
+ SkASSERT(line[1].fX != line[0].fX);
+ return SkPinT((x - line[0].fX) / (line[1].fX - line[0].fX));
+}
+
+int SkIntersections::vertical(const SkDLine& line, double top, double bottom,
+ double x, bool flipped) {
+ fMax = 3; // cleanup parallel lines will bring this back line
+ // see if end points intersect the opposite line
+ double t;
+ SkDPoint topPt = { x, top };
+ if ((t = line.exactPoint(topPt)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.exactPoint(bottomPt)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = vertical_coincident(line, x);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = VerticalIntercept(line, x);
+ double yIntercept = line[0].fY + fT[0][0] * (line[1].fY - line[0].fY);
+ if (between(top, yIntercept, bottom)) {
+ fT[1][0] = (yIntercept - top) / (bottom - top);
+ if (flipped) {
+ // OPTIMIZATION: instead of swapping, pass original line, use [1].fY - [0].fY
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = x;
+ fPt[0].fY = yIntercept;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(topPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.nearPoint(bottomPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
diff --git a/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
new file mode 100644
index 0000000000..95c56132a0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+/*
+Find the interection of a line and quadratic by solving for valid t values.
+
+From http://stackoverflow.com/questions/1853637/how-to-find-the-mathematical-function-defining-a-bezier-curve
+
+"A Bezier curve is a parametric function. A quadratic Bezier curve (i.e. three
+control points) can be expressed as: F(t) = A(1 - t)^2 + B(1 - t)t + Ct^2 where
+A, B and C are points and t goes from zero to one.
+
+This will give you two equations:
+
+ x = a(1 - t)^2 + b(1 - t)t + ct^2
+ y = d(1 - t)^2 + e(1 - t)t + ft^2
+
+If you add for instance the line equation (y = kx + m) to that, you'll end up
+with three equations and three unknowns (x, y and t)."
+
+Similar to above, the quadratic is represented as
+ x = a(1-t)^2 + 2b(1-t)t + ct^2
+ y = d(1-t)^2 + 2e(1-t)t + ft^2
+and the line as
+ y = g*x + h
+
+Using Mathematica, solve for the values of t where the quadratic intersects the
+line:
+
+ (in) t1 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - x,
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - g*x - h, x]
+ (out) -d + h + 2 d t - 2 e t - d t^2 + 2 e t^2 - f t^2 +
+ g (a - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2)
+ (in) Solve[t1 == 0, t]
+ (out) {
+ {t -> (-2 d + 2 e + 2 a g - 2 b g -
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ },
+ {t -> (-2 d + 2 e + 2 a g - 2 b g +
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ }
+ }
+
+Using the results above (when the line tends towards horizontal)
+ A = (-(d - 2*e + f) + g*(a - 2*b + c) )
+ B = 2*( (d - e ) - g*(a - b ) )
+ C = (-(d ) + g*(a ) + h )
+
+If g goes to infinity, we can rewrite the line in terms of x.
+ x = g'*y + h'
+
+And solve accordingly in Mathematica:
+
+ (in) t2 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - g'*y - h',
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - y, y]
+ (out) a - h' - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2 -
+ g' (d - 2 d t + 2 e t + d t^2 - 2 e t^2 + f t^2)
+ (in) Solve[t2 == 0, t]
+ (out) {
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' -
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')]) /
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ },
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' +
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')])/
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ }
+ }
+
+Thus, if the slope of the line tends towards vertical, we use:
+ A = ( (a - 2*b + c) - g'*(d - 2*e + f) )
+ B = 2*(-(a - b ) + g'*(d - e ) )
+ C = ( (a ) - g'*(d ) - h' )
+ */
+
+class LineQuadraticIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineQuadraticIntersections(const SkDQuad& q, const SkDLine& l, SkIntersections* i)
+ : fQuad(q)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(5); // allow short partial coincidence plus discrete intersections
+ }
+
+ LineQuadraticIntersections(const SkDQuad& q)
+ : fQuad(q)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double quadMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ double t = fLine->nearPoint(quadMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ int intersectRay(double roots[2]) {
+ /*
+ solve by rotating line+quad so line is horizontal, then finding the roots
+ set up matrix to rotate quad to x-axis
+ |cos(a) -sin(a)|
+ |sin(a) cos(a)|
+ note that cos(a) = A(djacent) / Hypoteneuse
+ sin(a) = O(pposite) / Hypoteneuse
+ since we are computing Ts, we can ignore hypoteneuse, the scale factor:
+ | A -O |
+ | O A |
+ A = line[1].fX - line[0].fX (adjacent side of the right triangle)
+ O = line[1].fY - line[0].fY (opposite side of the right triangle)
+ for each of the three points (e.g. n = 0 to 2)
+ quad[n].fY' = (quad[n].fY - line[0].fY) * A - (quad[n].fX - line[0].fX) * O
+ */
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fQuad[n].fY - (*fLine)[0].fY) * adj - (fQuad[n].fX - (*fLine)[0].fX) * opp;
+ }
+ double A = r[2];
+ double B = r[1];
+ double C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ double lineT = findLineT(quadT);
+ SkDPoint pt;
+ if (pinTs(&quadT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fY; // f
+ double E = fQuad[1].fY; // e
+ double F = fQuad[0].fY; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = horizontalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double quadT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingQuadT = (*fIntersections)[0][inner];
+ if (quadT == existingQuadT) {
+ return false;
+ }
+ // check if midway on quad is also same point. If so, discard this
+ double quadMidT = (existingQuadT + quadT) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ if (quadMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fQuad.ptAtT(quadT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fX; // f
+ double E = fQuad[1].fX; // e
+ double F = fQuad[0].fX; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = verticalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = fLine->exactPoint(fQuad[qIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fQuad[qIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double quadT = ((SkDCurve*) &fQuad)->nearPoint(SkPath::kQuad_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (quadT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fQuad.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* quadT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *quadT = SkPinT(*quadT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fQuad.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fQuad[0].asSkPoint()) {
+ *pt = fQuad[0];
+ *quadT = 0;
+ } else if (gridPt == fQuad[2].asSkPoint()) {
+ *pt = fQuad[2];
+ *quadT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDQuad& fQuad;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDQuad& quad, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDQuad& quad, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ q.allowNear(fAllowNear);
+ return q.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ fUsed = q.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = quad.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.verticalIntersect(x, roots);
+}
+
+// SkDQuad accessors to Intersection utilities
+
+int SkDQuad::horizontalIntersect(double yIntercept, double roots[2]) const {
+ return SkIntersections::HorizontalIntercept(*this, yIntercept, roots);
+}
+
+int SkDQuad::verticalIntersect(double xIntercept, double roots[2]) const {
+ return SkIntersections::VerticalIntercept(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersectionHelper.h b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
new file mode 100644
index 0000000000..9eb7cbf807
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersectionHelper_DEFINED
+#define SkIntersectionHelper_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+
+#ifdef SK_DEBUG
+#include "src/pathops/SkPathOpsPoint.h"
+#endif
+
+class SkIntersectionHelper {
+public:
+ enum SegmentType {
+ kHorizontalLine_Segment = -1,
+ kVerticalLine_Segment = 0,
+ kLine_Segment = SkPath::kLine_Verb,
+ kQuad_Segment = SkPath::kQuad_Verb,
+ kConic_Segment = SkPath::kConic_Verb,
+ kCubic_Segment = SkPath::kCubic_Verb,
+ };
+
+ bool advance() {
+ fSegment = fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar bottom() const {
+ return bounds().fBottom;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fSegment->bounds();
+ }
+
+ SkOpContour* contour() const {
+ return fSegment->contour();
+ }
+
+ void init(SkOpContour* contour) {
+ fSegment = contour->first();
+ }
+
+ SkScalar left() const {
+ return bounds().fLeft;
+ }
+
+ const SkPoint* pts() const {
+ return fSegment->pts();
+ }
+
+ SkScalar right() const {
+ return bounds().fRight;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ SegmentType segmentType() const {
+ SegmentType type = (SegmentType) fSegment->verb();
+ if (type != kLine_Segment) {
+ return type;
+ }
+ if (fSegment->isHorizontal()) {
+ return kHorizontalLine_Segment;
+ }
+ if (fSegment->isVertical()) {
+ return kVerticalLine_Segment;
+ }
+ return kLine_Segment;
+ }
+
+ bool startAfter(const SkIntersectionHelper& after) {
+ fSegment = after.fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar top() const {
+ return bounds().fTop;
+ }
+
+ SkScalar weight() const {
+ return fSegment->weight();
+ }
+
+ SkScalar x() const {
+ return bounds().fLeft;
+ }
+
+ bool xFlipped() const {
+ return x() != pts()[0].fX;
+ }
+
+ SkScalar y() const {
+ return bounds().fTop;
+ }
+
+ bool yFlipped() const {
+ return y() != pts()[0].fY;
+ }
+
+private:
+ SkOpSegment* fSegment;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.cpp b/gfx/skia/skia/src/pathops/SkIntersections.cpp
new file mode 100644
index 0000000000..2b1db9a45f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkIntersections.h"
+
+int SkIntersections::closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt,
+ double* closestDist) const {
+ int closest = -1;
+ *closestDist = SK_ScalarMax;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ const SkDPoint& iPt = fPt[index];
+ double dist = testPt.distanceSquared(iPt);
+ if (*closestDist > dist) {
+ *closestDist = dist;
+ closest = index;
+ }
+ }
+ return closest;
+}
+
+void SkIntersections::flip() {
+ for (int index = 0; index < fUsed; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+}
+
+int SkIntersections::insert(double one, double two, const SkDPoint& pt) {
+ if (fIsCoincident[0] == 3 && between(fT[0][0], one, fT[0][1])) {
+ // For now, don't allow a mix of coincident and non-coincident intersections
+ return -1;
+ }
+ SkASSERT(fUsed <= 1 || fT[0][0] <= fT[0][1]);
+ int index;
+ for (index = 0; index < fUsed; ++index) {
+ double oldOne = fT[0][index];
+ double oldTwo = fT[1][index];
+ if (one == oldOne && two == oldTwo) {
+ return -1;
+ }
+ if (more_roughly_equal(oldOne, one) && more_roughly_equal(oldTwo, two)) {
+ if ((!precisely_zero(one) || precisely_zero(oldOne))
+ && (!precisely_equal(one, 1) || precisely_equal(oldOne, 1))
+ && (!precisely_zero(two) || precisely_zero(oldTwo))
+ && (!precisely_equal(two, 1) || precisely_equal(oldTwo, 1))) {
+ return -1;
+ }
+ SkASSERT(one >= 0 && one <= 1);
+ SkASSERT(two >= 0 && two <= 1);
+ // remove this and reinsert below in case replacing would make list unsorted
+ int remaining = fUsed - index - 1;
+ memmove(&fPt[index], &fPt[index + 1], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index], &fT[0][index + 1], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index], &fT[1][index + 1], sizeof(fT[1][0]) * remaining);
+ int clearMask = ~((1 << index) - 1);
+ fIsCoincident[0] -= (fIsCoincident[0] >> 1) & clearMask;
+ fIsCoincident[1] -= (fIsCoincident[1] >> 1) & clearMask;
+ --fUsed;
+ break;
+ }
+ #if ONE_OFF_DEBUG
+ if (pt.roughlyEqual(fPt[index])) {
+ SkDebugf("%s t=%1.9g pts roughly equal\n", __FUNCTION__, one);
+ }
+ #endif
+ }
+ for (index = 0; index < fUsed; ++index) {
+ if (fT[0][index] > one) {
+ break;
+ }
+ }
+ if (fUsed >= fMax) {
+ SkOPASSERT(0); // FIXME : this error, if it is to be handled at runtime in release, must
+ // be propagated all the way back down to the caller, and return failure.
+ fUsed = 0;
+ return 0;
+ }
+ int remaining = fUsed - index;
+ if (remaining > 0) {
+ memmove(&fPt[index + 1], &fPt[index], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index + 1], &fT[0][index], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index + 1], &fT[1][index], sizeof(fT[1][0]) * remaining);
+ int clearMask = ~((1 << index) - 1);
+ fIsCoincident[0] += fIsCoincident[0] & clearMask;
+ fIsCoincident[1] += fIsCoincident[1] & clearMask;
+ }
+ fPt[index] = pt;
+ if (one < 0 || one > 1) {
+ return -1;
+ }
+ if (two < 0 || two > 1) {
+ return -1;
+ }
+ fT[0][index] = one;
+ fT[1][index] = two;
+ ++fUsed;
+ SkASSERT(fUsed <= SK_ARRAY_COUNT(fPt));
+ return index;
+}
+
+void SkIntersections::insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2) {
+ SkASSERT(one == 0 || one == 1);
+ SkASSERT(two == 0 || two == 1);
+ SkASSERT(pt1 != pt2);
+ fNearlySame[one ? 1 : 0] = true;
+ (void) insert(one, two, pt1);
+ fPt2[one ? 1 : 0] = pt2;
+}
+
+int SkIntersections::insertCoincident(double one, double two, const SkDPoint& pt) {
+ int index = insertSwap(one, two, pt);
+ if (index >= 0) {
+ setCoincident(index);
+ }
+ return index;
+}
+
+void SkIntersections::setCoincident(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] |= bit;
+ fIsCoincident[1] |= bit;
+}
+
+void SkIntersections::merge(const SkIntersections& a, int aIndex, const SkIntersections& b,
+ int bIndex) {
+ this->reset();
+ fT[0][0] = a.fT[0][aIndex];
+ fT[1][0] = b.fT[0][bIndex];
+ fPt[0] = a.fPt[aIndex];
+ fPt2[0] = b.fPt[bIndex];
+ fUsed = 1;
+}
+
+int SkIntersections::mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const {
+ int result = -1;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ if (result < 0) {
+ result = index;
+ continue;
+ }
+ SkDVector best = fPt[result] - origin;
+ SkDVector test = fPt[index] - origin;
+ if (test.crossCheck(best) < 0) {
+ result = index;
+ }
+ }
+ return result;
+}
+
+void SkIntersections::removeOne(int index) {
+ int remaining = --fUsed - index;
+ if (remaining <= 0) {
+ return;
+ }
+ memmove(&fPt[index], &fPt[index + 1], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index], &fT[0][index + 1], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index], &fT[1][index + 1], sizeof(fT[1][0]) * remaining);
+// SkASSERT(fIsCoincident[0] == 0);
+ int coBit = fIsCoincident[0] & (1 << index);
+ fIsCoincident[0] -= ((fIsCoincident[0] >> 1) & ~((1 << index) - 1)) + coBit;
+ SkASSERT(!(coBit ^ (fIsCoincident[1] & (1 << index))));
+ fIsCoincident[1] -= ((fIsCoincident[1] >> 1) & ~((1 << index) - 1)) + coBit;
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.h b/gfx/skia/skia/src/pathops/SkIntersections.h
new file mode 100644
index 0000000000..71d1c80fcc
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersections_DEFINE
+#define SkIntersections_DEFINE
+
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+class SkIntersections {
+public:
+ SkIntersections(SkDEBUGCODE(SkOpGlobalState* globalState = nullptr))
+ : fSwap(0)
+#ifdef SK_DEBUG
+ SkDEBUGPARAMS(fDebugGlobalState(globalState))
+ , fDepth(0)
+#endif
+ {
+ sk_bzero(fPt, sizeof(fPt));
+ sk_bzero(fPt2, sizeof(fPt2));
+ sk_bzero(fT, sizeof(fT));
+ sk_bzero(fNearlySame, sizeof(fNearlySame));
+#if DEBUG_T_SECT_LOOP_COUNT
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+#endif
+ reset();
+ fMax = 0; // require that the caller set the max
+ }
+
+ class TArray {
+ public:
+ explicit TArray(const double ts[10]) : fTArray(ts) {}
+ double operator[](int n) const {
+ return fTArray[n];
+ }
+ const double* fTArray;
+ };
+ TArray operator[](int n) const { return TArray(fT[n]); }
+
+ void allowNear(bool nearAllowed) {
+ fAllowNear = nearAllowed;
+ }
+
+ void clearCoincidence(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] &= ~bit;
+ fIsCoincident[1] &= ~bit;
+ }
+
+ int conicHorizontal(const SkPoint a[3], SkScalar weight, SkScalar left, SkScalar right,
+ SkScalar y, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return horizontal(conic, left, right, y, flipped);
+ }
+
+ int conicVertical(const SkPoint a[3], SkScalar weight, SkScalar top, SkScalar bottom,
+ SkScalar x, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return vertical(conic, top, bottom, x, flipped);
+ }
+
+ int conicLine(const SkPoint a[3], SkScalar weight, const SkPoint b[2]) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDLine line;
+ line.set(b);
+ fMax = 3; // 2; permit small coincident segment + non-coincident intersection
+ return intersect(conic, line);
+ }
+
+ int cubicHorizontal(const SkPoint a[4], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return horizontal(cubic, left, right, y, flipped);
+ }
+
+ int cubicVertical(const SkPoint a[4], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return vertical(cubic, top, bottom, x, flipped);
+ }
+
+ int cubicLine(const SkPoint a[4], const SkPoint b[2]) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDLine line;
+ line.set(b);
+ fMax = 3;
+ return intersect(cubic, line);
+ }
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ bool hasT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (t == 0 ? fT[0][0] == 0 : fT[0][fUsed - 1] == 1);
+ }
+
+ bool hasOppT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (fT[1][0] == t || fT[1][fUsed - 1] == t);
+ }
+
+ int insertSwap(double one, double two, const SkDPoint& pt) {
+ if (fSwap) {
+ return insert(two, one, pt);
+ } else {
+ return insert(one, two, pt);
+ }
+ }
+
+ bool isCoincident(int index) {
+ return (fIsCoincident[0] & 1 << index) != 0;
+ }
+
+ int lineHorizontal(const SkPoint a[2], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return horizontal(line, left, right, y, flipped);
+ }
+
+ int lineVertical(const SkPoint a[2], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return vertical(line, top, bottom, x, flipped);
+ }
+
+ int lineLine(const SkPoint a[2], const SkPoint b[2]) {
+ SkDLine aLine, bLine;
+ aLine.set(a);
+ bLine.set(b);
+ fMax = 2;
+ return intersect(aLine, bLine);
+ }
+
+ bool nearlySame(int index) const {
+ SkASSERT(index == 0 || index == 1);
+ return fNearlySame[index];
+ }
+
+ const SkDPoint& pt(int index) const {
+ return fPt[index];
+ }
+
+ const SkDPoint& pt2(int index) const {
+ return fPt2[index];
+ }
+
+ int quadHorizontal(const SkPoint a[3], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return horizontal(quad, left, right, y, flipped);
+ }
+
+ int quadVertical(const SkPoint a[3], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return vertical(quad, top, bottom, x, flipped);
+ }
+
+ int quadLine(const SkPoint a[3], const SkPoint b[2]) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDLine line;
+ line.set(b);
+ return intersect(quad, line);
+ }
+
+ // leaves swap, max alone
+ void reset() {
+ fAllowNear = true;
+ fUsed = 0;
+ sk_bzero(fIsCoincident, sizeof(fIsCoincident));
+ }
+
+ void set(bool swap, int tIndex, double t) {
+ fT[(int) swap][tIndex] = t;
+ }
+
+ void setMax(int max) {
+ SkASSERT(max <= (int) SK_ARRAY_COUNT(fPt));
+ fMax = max;
+ }
+
+ void swap() {
+ fSwap ^= true;
+ }
+
+ bool swapped() const {
+ return fSwap;
+ }
+
+ int used() const {
+ return fUsed;
+ }
+
+ void downDepth() {
+ SkASSERT(--fDepth >= 0);
+ }
+
+ bool unBumpT(int index) {
+ SkASSERT(fUsed == 1);
+ fT[0][index] = fT[0][index] * (1 + BUMP_EPSILON * 2) - BUMP_EPSILON;
+ if (!between(0, fT[0][index], 1)) {
+ fUsed = 0;
+ return false;
+ }
+ return true;
+ }
+
+ void upDepth() {
+ SkASSERT(++fDepth < 16);
+ }
+
+ void alignQuadPts(const SkPoint a[3], const SkPoint b[3]);
+ int cleanUpCoincidence();
+ int closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt, double* dist) const;
+ void cubicInsert(double one, double two, const SkDPoint& pt, const SkDCubic& c1,
+ const SkDCubic& c2);
+ void flip();
+ int horizontal(const SkDLine&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, double tRange[2]);
+ int horizontal(const SkDCubic&, double y, double tRange[3]);
+ int horizontal(const SkDConic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, double tRange[3]);
+ static double HorizontalIntercept(const SkDLine& line, double y);
+ static int HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots);
+ static int HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots);
+ // FIXME : does not respect swap
+ int insert(double one, double two, const SkDPoint& pt);
+ void insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2);
+ // start if index == 0 : end if index == 1
+ int insertCoincident(double one, double two, const SkDPoint& pt);
+ int intersect(const SkDLine&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDLine&);
+ int intersect(const SkDConic&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDLine&);
+ int intersect(const SkDCubic&, const SkDQuad&);
+ int intersect(const SkDCubic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDCubic&);
+ int intersectRay(const SkDLine&, const SkDLine&);
+ int intersectRay(const SkDQuad&, const SkDLine&);
+ int intersectRay(const SkDConic&, const SkDLine&);
+ int intersectRay(const SkDCubic&, const SkDLine&);
+ int intersectRay(const SkTCurve& tCurve, const SkDLine& line) {
+ return tCurve.intersectRay(this, line);
+ }
+
+ void merge(const SkIntersections& , int , const SkIntersections& , int );
+ int mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const;
+ void removeOne(int index);
+ void setCoincident(int index);
+ int vertical(const SkDLine&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDQuad&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDConic&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDCubic&, double top, double bottom, double x, bool flipped);
+ static double VerticalIntercept(const SkDLine& line, double x);
+ static int VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots);
+ static int VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots);
+
+ int depth() const {
+#ifdef SK_DEBUG
+ return fDepth;
+#else
+ return 0;
+#endif
+ }
+
+ enum DebugLoop {
+ kIterations_DebugLoop,
+ kCoinCheck_DebugLoop,
+ kComputePerp_DebugLoop,
+ };
+
+ void debugBumpLoopCount(DebugLoop );
+ int debugCoincidentUsed() const;
+ int debugLoopCount(DebugLoop ) const;
+ void debugResetLoopCount();
+ void dump() const; // implemented for testing only
+
+private:
+ bool cubicCheckCoincidence(const SkDCubic& c1, const SkDCubic& c2);
+ bool cubicExactEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2);
+ void cubicNearEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2, const SkDRect& );
+ void cleanUpParallelLines(bool parallel);
+ void computePoints(const SkDLine& line, int used);
+
+ SkDPoint fPt[13]; // FIXME: since scans store points as SkPoint, this should also
+ SkDPoint fPt2[2]; // used by nearly same to store alternate intersection point
+ double fT[2][13];
+ uint16_t fIsCoincident[2]; // bit set for each curve's coincident T
+ bool fNearlySame[2]; // true if end points nearly match
+ unsigned char fUsed;
+ unsigned char fMax;
+ bool fAllowNear;
+ bool fSwap;
+#ifdef SK_DEBUG
+ SkOpGlobalState* fDebugGlobalState;
+ int fDepth;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkLineParameters.h b/gfx/skia/skia/src/pathops/SkLineParameters.h
new file mode 100644
index 0000000000..45d1ed4ed6
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkLineParameters.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLineParameters_DEFINED
+#define SkLineParameters_DEFINED
+
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+// Sources
+// computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+// online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+// This turns a line segment into a parameterized line, of the form
+// ax + by + c = 0
+// When a^2 + b^2 == 1, the line is normalized.
+// The distance to the line for (x, y) is d(x,y) = ax + by + c
+//
+// Note that the distances below are not necessarily normalized. To get the true
+// distance, it's necessary to either call normalize() after xxxEndPoints(), or
+// divide the result of xxxDistance() by sqrt(normalSquared())
+
+class SkLineParameters {
+public:
+
+ bool cubicEndPoints(const SkDCubic& pts) {
+ int endIndex = 1;
+ cubicEndPoints(pts, 0, endIndex);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex);
+ SkASSERT(endIndex == 2);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex); // line
+ SkASSERT(endIndex == 3);
+ return false;
+ }
+ }
+ // FIXME: after switching to round sort, remove bumping fA
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // if cubic tangent is on x axis, look at next control point to break tie
+ // control point may be approximate, so it must move significantly to account for error
+ if (NotAlmostEqualUlps(pts[0].fY, pts[++endIndex].fY)) {
+ if (pts[0].fY > pts[endIndex].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+ if (endIndex == 3) {
+ return true;
+ }
+ SkASSERT(endIndex == 2);
+ if (pts[0].fY > pts[3].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+
+ void cubicEndPoints(const SkDCubic& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double cubicPart(const SkDCubic& part) {
+ cubicEndPoints(part);
+ if (part[0] == part[1] || ((const SkDLine& ) part[0]).nearRay(part[2])) {
+ return pointDistance(part[3]);
+ }
+ return pointDistance(part[2]);
+ }
+
+ void lineEndPoints(const SkDLine& pts) {
+ fA = pts[0].fY - pts[1].fY;
+ fB = pts[1].fX - pts[0].fX;
+ fC = pts[0].fX * pts[1].fY - pts[1].fX * pts[0].fY;
+ }
+
+ bool quadEndPoints(const SkDQuad& pts) {
+ quadEndPoints(pts, 0, 1);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ quadEndPoints(pts, 0, 2);
+ return false;
+ }
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // FIXME: after switching to round sort, remove this
+ if (pts[0].fY > pts[2].fY) {
+ fA = DBL_EPSILON;
+ }
+ return true;
+ }
+
+ void quadEndPoints(const SkDQuad& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double quadPart(const SkDQuad& part) {
+ quadEndPoints(part);
+ return pointDistance(part[2]);
+ }
+
+ double normalSquared() const {
+ return fA * fA + fB * fB;
+ }
+
+ bool normalize() {
+ double normal = sqrt(normalSquared());
+ if (approximately_zero(normal)) {
+ fA = fB = fC = 0;
+ return false;
+ }
+ double reciprocal = 1 / normal;
+ fA *= reciprocal;
+ fB *= reciprocal;
+ fC *= reciprocal;
+ return true;
+ }
+
+ void cubicDistanceY(const SkDCubic& pts, SkDCubic& distance) const {
+ double oneThird = 1 / 3.0;
+ for (int index = 0; index < 4; ++index) {
+ distance[index].fX = index * oneThird;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ void quadDistanceY(const SkDQuad& pts, SkDQuad& distance) const {
+ double oneHalf = 1 / 2.0;
+ for (int index = 0; index < 3; ++index) {
+ distance[index].fX = index * oneHalf;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ double controlPtDistance(const SkDCubic& pts, int index) const {
+ SkASSERT(index == 1 || index == 2);
+ return fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+
+ double controlPtDistance(const SkDQuad& pts) const {
+ return fA * pts[1].fX + fB * pts[1].fY + fC;
+ }
+
+ double pointDistance(const SkDPoint& pt) const {
+ return fA * pt.fX + fB * pt.fY + fC;
+ }
+
+ double dx() const {
+ return fB;
+ }
+
+ double dy() const {
+ return -fA;
+ }
+
+private:
+ double fA;
+ double fB;
+ double fC;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.cpp b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
new file mode 100644
index 0000000000..a701d0f2fe
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
@@ -0,0 +1,1141 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathOpsCurve.h"
+
+/* Angles are sorted counterclockwise. The smallest angle has a positive x and the smallest
+ positive y. The largest angle has a positive x and a zero y. */
+
+#if DEBUG_ANGLE
+ static bool CompareResult(const char* func, SkString* bugOut, SkString* bugPart, int append,
+ bool compare) {
+ SkDebugf("%s %c %d\n", bugOut->c_str(), compare ? 'T' : 'F', append);
+ SkDebugf("%sPart %s\n", func, bugPart[0].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[1].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[2].c_str());
+ return compare;
+ }
+
+ #define COMPARE_RESULT(append, compare) CompareResult(__FUNCTION__, &bugOut, bugPart, append, \
+ compare)
+#else
+ #define COMPARE_RESULT(append, compare) compare
+#endif
+
+/* quarter angle values for sector
+
+31 x > 0, y == 0 horizontal line (to the right)
+0 x > 0, y == epsilon quad/cubic horizontal tangent eventually going +y
+1 x > 0, y > 0, x > y nearer horizontal angle
+2 x + e == y quad/cubic 45 going horiz
+3 x > 0, y > 0, x == y 45 angle
+4 x == y + e quad/cubic 45 going vert
+5 x > 0, y > 0, x < y nearer vertical angle
+6 x == epsilon, y > 0 quad/cubic vertical tangent eventually going +x
+7 x == 0, y > 0 vertical line (to the top)
+
+ 8 7 6
+ 9 | 5
+ 10 | 4
+ 11 | 3
+ 12 \ | / 2
+ 13 | 1
+ 14 | 0
+ 15 --------------+------------- 31
+ 16 | 30
+ 17 | 29
+ 18 / | \ 28
+ 19 | 27
+ 20 | 26
+ 21 | 25
+ 22 23 24
+*/
+
+// return true if lh < this < rh
+bool SkOpAngle::after(SkOpAngle* test) {
+ SkOpAngle* lh = test;
+ SkOpAngle* rh = lh->fNext;
+ SkASSERT(lh != rh);
+ fPart.fCurve = fOriginalCurvePart;
+ lh->fPart.fCurve = lh->fOriginalCurvePart;
+ lh->fPart.fCurve.offset(lh->segment()->verb(), fPart.fCurve[0] - lh->fPart.fCurve[0]);
+ rh->fPart.fCurve = rh->fOriginalCurvePart;
+ rh->fPart.fCurve.offset(rh->segment()->verb(), fPart.fCurve[0] - rh->fPart.fCurve[0]);
+
+#if DEBUG_ANGLE
+ SkString bugOut;
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+ SkString bugPart[3] = { lh->debugPart(), this->debugPart(), rh->debugPart() };
+#endif
+ if (lh->fComputeSector && !lh->computeSector()) {
+ return COMPARE_RESULT(1, true);
+ }
+ if (fComputeSector && !this->computeSector()) {
+ return COMPARE_RESULT(2, true);
+ }
+ if (rh->fComputeSector && !rh->computeSector()) {
+ return COMPARE_RESULT(3, true);
+ }
+#if DEBUG_ANGLE // reset bugOut with computed sectors
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+#endif
+ bool ltrOverlap = (lh->fSectorMask | rh->fSectorMask) & fSectorMask;
+ bool lrOverlap = lh->fSectorMask & rh->fSectorMask;
+ int lrOrder; // set to -1 if either order works
+ if (!lrOverlap) { // no lh/rh sector overlap
+ if (!ltrOverlap) { // no lh/this/rh sector overlap
+ return COMPARE_RESULT(4, (lh->fSectorEnd > rh->fSectorStart)
+ ^ (fSectorStart > lh->fSectorEnd) ^ (fSectorStart > rh->fSectorStart));
+ }
+ int lrGap = (rh->fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ /* A tiny change can move the start +/- 4. The order can only be determined if
+ lr gap is not 12 to 20 or -12 to -20.
+ -31 ..-21 1
+ -20 ..-12 -1
+ -11 .. -1 0
+ 0 shouldn't get here
+ 11 .. 1 1
+ 12 .. 20 -1
+ 21 .. 31 0
+ */
+ lrOrder = lrGap > 20 ? 0 : lrGap > 11 ? -1 : 1;
+ } else {
+ lrOrder = lh->orderable(rh);
+ if (!ltrOverlap && lrOrder >= 0) {
+ return COMPARE_RESULT(5, !lrOrder);
+ }
+ }
+ int ltOrder;
+ SkASSERT((lh->fSectorMask & fSectorMask) || (rh->fSectorMask & fSectorMask) || -1 == lrOrder);
+ if (lh->fSectorMask & fSectorMask) {
+ ltOrder = lh->orderable(this);
+ } else {
+ int ltGap = (fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ ltOrder = ltGap > 20 ? 0 : ltGap > 11 ? -1 : 1;
+ }
+ int trOrder;
+ if (rh->fSectorMask & fSectorMask) {
+ trOrder = this->orderable(rh);
+ } else {
+ int trGap = (rh->fSectorStart - fSectorStart + 32) & 0x1f;
+ trOrder = trGap > 20 ? 0 : trGap > 11 ? -1 : 1;
+ }
+ this->alignmentSameSide(lh, &ltOrder);
+ this->alignmentSameSide(rh, &trOrder);
+ if (lrOrder >= 0 && ltOrder >= 0 && trOrder >= 0) {
+ return COMPARE_RESULT(7, lrOrder ? (ltOrder & trOrder) : (ltOrder | trOrder));
+ }
+// SkASSERT(lrOrder >= 0 || ltOrder >= 0 || trOrder >= 0);
+// There's not enough information to sort. Get the pairs of angles in opposite planes.
+// If an order is < 0, the pair is already in an opposite plane. Check the remaining pairs.
+ // FIXME : once all variants are understood, rewrite this more simply
+ if (ltOrder == 0 && lrOrder == 0) {
+ SkASSERT(trOrder < 0);
+ // FIXME : once this is verified to work, remove one opposite angle call
+ SkDEBUGCODE(bool lrOpposite = lh->oppositePlanes(rh));
+ bool ltOpposite = lh->oppositePlanes(this);
+ SkOPASSERT(lrOpposite != ltOpposite);
+ return COMPARE_RESULT(8, ltOpposite);
+ } else if (ltOrder == 1 && trOrder == 0) {
+ SkASSERT(lrOrder < 0);
+ bool trOpposite = oppositePlanes(rh);
+ return COMPARE_RESULT(9, trOpposite);
+ } else if (lrOrder == 1 && trOrder == 1) {
+ SkASSERT(ltOrder < 0);
+// SkDEBUGCODE(bool trOpposite = oppositePlanes(rh));
+ bool lrOpposite = lh->oppositePlanes(rh);
+// SkASSERT(lrOpposite != trOpposite);
+ return COMPARE_RESULT(10, lrOpposite);
+ }
+ // If a pair couldn't be ordered, there's not enough information to determine the sort.
+ // Refer to: https://docs.google.com/drawings/d/1KV-8SJTedku9fj4K6fd1SB-8divuV_uivHVsSgwXICQ
+ if (fUnorderable || lh->fUnorderable || rh->fUnorderable) {
+ // limit to lines; should work with curves, but wait for a failing test to verify
+ if (!fPart.isCurve() && !lh->fPart.isCurve() && !rh->fPart.isCurve()) {
+ // see if original raw data is orderable
+ // if two share a point, check if third has both points in same half plane
+ int ltShare = lh->fOriginalCurvePart[0] == fOriginalCurvePart[0];
+ int lrShare = lh->fOriginalCurvePart[0] == rh->fOriginalCurvePart[0];
+ int trShare = fOriginalCurvePart[0] == rh->fOriginalCurvePart[0];
+ // if only one pair are the same, the third point touches neither of the pair
+ if (ltShare + lrShare + trShare == 1) {
+ if (lrShare) {
+ int ltOOrder = lh->linesOnOriginalSide(this);
+ int rtOOrder = rh->linesOnOriginalSide(this);
+ if ((rtOOrder ^ ltOOrder) == 1) {
+ return ltOOrder;
+ }
+ } else if (trShare) {
+ int tlOOrder = this->linesOnOriginalSide(lh);
+ int rlOOrder = rh->linesOnOriginalSide(lh);
+ if ((tlOOrder ^ rlOOrder) == 1) {
+ return rlOOrder;
+ }
+ } else {
+ SkASSERT(ltShare);
+ int trOOrder = rh->linesOnOriginalSide(this);
+ int lrOOrder = lh->linesOnOriginalSide(rh);
+ // result must be 0 and 1 or 1 and 0 to be valid
+ if ((lrOOrder ^ trOOrder) == 1) {
+ return trOOrder;
+ }
+ }
+ }
+ }
+ }
+ if (lrOrder < 0) {
+ if (ltOrder < 0) {
+ return COMPARE_RESULT(11, trOrder);
+ }
+ return COMPARE_RESULT(12, ltOrder);
+ }
+ return COMPARE_RESULT(13, !lrOrder);
+}
+
+int SkOpAngle::lineOnOneSide(const SkDPoint& origin, const SkDVector& line, const SkOpAngle* test,
+ bool useOriginal) const {
+ double crosses[3];
+ SkPath::Verb testVerb = test->segment()->verb();
+ int iMax = SkPathOpsVerbToPoints(testVerb);
+// SkASSERT(origin == test.fCurveHalf[0]);
+ const SkDCurve& testCurve = useOriginal ? test->fOriginalCurvePart : test->fPart.fCurve;
+ for (int index = 1; index <= iMax; ++index) {
+ double xy1 = line.fX * (testCurve[index].fY - origin.fY);
+ double xy2 = line.fY * (testCurve[index].fX - origin.fX);
+ crosses[index - 1] = AlmostBequalUlps(xy1, xy2) ? 0 : xy1 - xy2;
+ }
+ if (crosses[0] * crosses[1] < 0) {
+ return -1;
+ }
+ if (SkPath::kCubic_Verb == testVerb) {
+ if (crosses[0] * crosses[2] < 0 || crosses[1] * crosses[2] < 0) {
+ return -1;
+ }
+ }
+ if (crosses[0]) {
+ return crosses[0] < 0;
+ }
+ if (crosses[1]) {
+ return crosses[1] < 0;
+ }
+ if (SkPath::kCubic_Verb == testVerb && crosses[2]) {
+ return crosses[2] < 0;
+ }
+ return -2;
+}
+
+// given a line, see if the opposite curve's convex hull is all on one side
+// returns -1=not on one side 0=this CW of test 1=this CCW of test
+int SkOpAngle::lineOnOneSide(const SkOpAngle* test, bool useOriginal) {
+ SkASSERT(!fPart.isCurve());
+ SkASSERT(test->fPart.isCurve());
+ SkDPoint origin = fPart.fCurve[0];
+ SkDVector line = fPart.fCurve[1] - origin;
+ int result = this->lineOnOneSide(origin, line, test, useOriginal);
+ if (-2 == result) {
+ fUnorderable = true;
+ result = -1;
+ }
+ return result;
+}
+
+// experiment works only with lines for now
+int SkOpAngle::linesOnOriginalSide(const SkOpAngle* test) {
+ SkASSERT(!fPart.isCurve());
+ SkASSERT(!test->fPart.isCurve());
+ SkDPoint origin = fOriginalCurvePart[0];
+ SkDVector line = fOriginalCurvePart[1] - origin;
+ double dots[2];
+ double crosses[2];
+ const SkDCurve& testCurve = test->fOriginalCurvePart;
+ for (int index = 0; index < 2; ++index) {
+ SkDVector testLine = testCurve[index] - origin;
+ double xy1 = line.fX * testLine.fY;
+ double xy2 = line.fY * testLine.fX;
+ dots[index] = line.fX * testLine.fX + line.fY * testLine.fY;
+ crosses[index] = AlmostBequalUlps(xy1, xy2) ? 0 : xy1 - xy2;
+ }
+ if (crosses[0] * crosses[1] < 0) {
+ return -1;
+ }
+ if (crosses[0]) {
+ return crosses[0] < 0;
+ }
+ if (crosses[1]) {
+ return crosses[1] < 0;
+ }
+ if ((!dots[0] && dots[1] < 0) || (dots[0] < 0 && !dots[1])) {
+ return 2; // 180 degrees apart
+ }
+ fUnorderable = true;
+ return -1;
+}
+
+// To sort the angles, all curves are translated to have the same starting point.
+// If the curve's control point in its original position is on one side of a compared line,
+// and translated is on the opposite side, reverse the previously computed order.
+void SkOpAngle::alignmentSameSide(const SkOpAngle* test, int* order) const {
+ if (*order < 0) {
+ return;
+ }
+ if (fPart.isCurve()) {
+ // This should support all curve types, but only bug that requires this has lines
+ // Turning on for curves causes existing tests to fail
+ return;
+ }
+ if (test->fPart.isCurve()) {
+ return;
+ }
+ const SkDPoint& xOrigin = test->fPart.fCurve.fLine[0];
+ const SkDPoint& oOrigin = test->fOriginalCurvePart.fLine[0];
+ if (xOrigin == oOrigin) {
+ return;
+ }
+ int iMax = SkPathOpsVerbToPoints(this->segment()->verb());
+ SkDVector xLine = test->fPart.fCurve.fLine[1] - xOrigin;
+ SkDVector oLine = test->fOriginalCurvePart.fLine[1] - oOrigin;
+ for (int index = 1; index <= iMax; ++index) {
+ const SkDPoint& testPt = fPart.fCurve[index];
+ double xCross = oLine.crossCheck(testPt - xOrigin);
+ double oCross = xLine.crossCheck(testPt - oOrigin);
+ if (oCross * xCross < 0) {
+ *order ^= 1;
+ break;
+ }
+ }
+}
+
+bool SkOpAngle::checkCrossesZero() const {
+ int start = SkTMin(fSectorStart, fSectorEnd);
+ int end = SkTMax(fSectorStart, fSectorEnd);
+ bool crossesZero = end - start > 16;
+ return crossesZero;
+}
+
+bool SkOpAngle::checkParallel(SkOpAngle* rh) {
+ SkDVector scratch[2];
+ const SkDVector* sweep, * tweep;
+ if (this->fPart.isOrdered()) {
+ sweep = this->fPart.fSweep;
+ } else {
+ scratch[0] = this->fPart.fCurve[1] - this->fPart.fCurve[0];
+ sweep = &scratch[0];
+ }
+ if (rh->fPart.isOrdered()) {
+ tweep = rh->fPart.fSweep;
+ } else {
+ scratch[1] = rh->fPart.fCurve[1] - rh->fPart.fCurve[0];
+ tweep = &scratch[1];
+ }
+ double s0xt0 = sweep->crossCheck(*tweep);
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ // compute the perpendicular to the endpoints and see where it intersects the opposite curve
+ // if the intersections within the t range, do a cross check on those
+ bool inside;
+ if (!fEnd->contains(rh->fEnd)) {
+ if (this->endToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->endToSide(this, &inside)) {
+ return !inside;
+ }
+ }
+ if (this->midToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->midToSide(this, &inside)) {
+ return !inside;
+ }
+ // compute the cross check from the mid T values (last resort)
+ SkDVector m0 = segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (m0xm1 == 0) {
+ this->fUnorderable = true;
+ rh->fUnorderable = true;
+ return true;
+ }
+ return m0xm1 < 0;
+}
+
+// the original angle is too short to get meaningful sector information
+// lengthen it until it is long enough to be meaningful or leave it unset if lengthening it
+// would cause it to intersect one of the adjacent angles
+bool SkOpAngle::computeSector() {
+ if (fComputedSector) {
+ return !fUnorderable;
+ }
+ fComputedSector = true;
+ bool stepUp = fStart->t() < fEnd->t();
+ SkOpSpanBase* checkEnd = fEnd;
+ if (checkEnd->final() && stepUp) {
+ fUnorderable = true;
+ return false;
+ }
+ do {
+// advance end
+ const SkOpSegment* other = checkEnd->segment();
+ const SkOpSpanBase* oSpan = other->head();
+ do {
+ if (oSpan->segment() != segment()) {
+ continue;
+ }
+ if (oSpan == checkEnd) {
+ continue;
+ }
+ if (!approximately_equal(oSpan->t(), checkEnd->t())) {
+ continue;
+ }
+ goto recomputeSector;
+ } while (!oSpan->final() && (oSpan = oSpan->upCast()->next()));
+ checkEnd = stepUp ? !checkEnd->final()
+ ? checkEnd->upCast()->next() : nullptr
+ : checkEnd->prev();
+ } while (checkEnd);
+recomputeSector:
+ SkOpSpanBase* computedEnd = stepUp ? checkEnd ? checkEnd->prev() : fEnd->segment()->head()
+ : checkEnd ? checkEnd->upCast()->next() : fEnd->segment()->tail();
+ if (checkEnd == fEnd || computedEnd == fEnd || computedEnd == fStart) {
+ fUnorderable = true;
+ return false;
+ }
+ if (stepUp != (fStart->t() < computedEnd->t())) {
+ fUnorderable = true;
+ return false;
+ }
+ SkOpSpanBase* saveEnd = fEnd;
+ fComputedEnd = fEnd = computedEnd;
+ setSpans();
+ setSector();
+ fEnd = saveEnd;
+ return !fUnorderable;
+}
+
+int SkOpAngle::convexHullOverlaps(const SkOpAngle* rh) {
+ const SkDVector* sweep = this->fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0xs1 = sweep[0].crossCheck(sweep[1]);
+ double s0xt0 = sweep[0].crossCheck(tweep[0]);
+ double s1xt0 = sweep[1].crossCheck(tweep[0]);
+ bool tBetweenS = s0xs1 > 0 ? s0xt0 > 0 && s1xt0 < 0 : s0xt0 < 0 && s1xt0 > 0;
+ double s0xt1 = sweep[0].crossCheck(tweep[1]);
+ double s1xt1 = sweep[1].crossCheck(tweep[1]);
+ tBetweenS |= s0xs1 > 0 ? s0xt1 > 0 && s1xt1 < 0 : s0xt1 < 0 && s1xt1 > 0;
+ double t0xt1 = tweep[0].crossCheck(tweep[1]);
+ if (tBetweenS) {
+ return -1;
+ }
+ if ((s0xt0 == 0 && s1xt1 == 0) || (s1xt0 == 0 && s0xt1 == 0)) { // s0 to s1 equals t0 to t1
+ return -1;
+ }
+ bool sBetweenT = t0xt1 > 0 ? s0xt0 < 0 && s0xt1 > 0 : s0xt0 > 0 && s0xt1 < 0;
+ sBetweenT |= t0xt1 > 0 ? s1xt0 < 0 && s1xt1 > 0 : s1xt0 > 0 && s1xt1 < 0;
+ if (sBetweenT) {
+ return -1;
+ }
+ // if all of the sweeps are in the same half plane, then the order of any pair is enough
+ if (s0xt0 >= 0 && s0xt1 >= 0 && s1xt0 >= 0 && s1xt1 >= 0) {
+ return 0;
+ }
+ if (s0xt0 <= 0 && s0xt1 <= 0 && s1xt0 <= 0 && s1xt1 <= 0) {
+ return 1;
+ }
+ // if the outside sweeps are greater than 180 degress:
+ // first assume the inital tangents are the ordering
+ // if the midpoint direction matches the inital order, that is enough
+ SkDVector m0 = this->segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (s0xt0 > 0 && m0xm1 > 0) {
+ return 0;
+ }
+ if (s0xt0 < 0 && m0xm1 < 0) {
+ return 1;
+ }
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ return m0xm1 < 0;
+}
+
+// OPTIMIZATION: longest can all be either lazily computed here or precomputed in setup
+double SkOpAngle::distEndRatio(double dist) const {
+ double longest = 0;
+ const SkOpSegment& segment = *this->segment();
+ int ptCount = SkPathOpsVerbToPoints(segment.verb());
+ const SkPoint* pts = segment.pts();
+ for (int idx1 = 0; idx1 <= ptCount - 1; ++idx1) {
+ for (int idx2 = idx1 + 1; idx2 <= ptCount; ++idx2) {
+ if (idx1 == idx2) {
+ continue;
+ }
+ SkDVector v;
+ v.set(pts[idx2] - pts[idx1]);
+ double lenSq = v.lengthSquared();
+ longest = SkTMax(longest, lenSq);
+ }
+ }
+ return sqrt(longest) / dist;
+}
+
+bool SkOpAngle::endsIntersect(SkOpAngle* rh) {
+ SkPath::Verb lVerb = this->segment()->verb();
+ SkPath::Verb rVerb = rh->segment()->verb();
+ int lPts = SkPathOpsVerbToPoints(lVerb);
+ int rPts = SkPathOpsVerbToPoints(rVerb);
+ SkDLine rays[] = {{{this->fPart.fCurve[0], rh->fPart.fCurve[rPts]}},
+ {{this->fPart.fCurve[0], this->fPart.fCurve[lPts]}}};
+ if (this->fEnd->contains(rh->fEnd)) {
+ return checkParallel(rh);
+ }
+ double smallTs[2] = {-1, -1};
+ bool limited[2] = {false, false};
+ for (int index = 0; index < 2; ++index) {
+ SkPath::Verb cVerb = index ? rVerb : lVerb;
+ // if the curve is a line, then the line and the ray intersect only at their crossing
+ if (cVerb == SkPath::kLine_Verb) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ SkIntersections i;
+ (*CurveIntersectRay[cVerb])(segment.pts(), segment.weight(), rays[index], &i);
+ double tStart = index ? rh->fStart->t() : this->fStart->t();
+ double tEnd = index ? rh->fComputedEnd->t() : this->fComputedEnd->t();
+ bool testAscends = tStart < (index ? rh->fComputedEnd->t() : this->fComputedEnd->t());
+ double t = testAscends ? 0 : 1;
+ for (int idx2 = 0; idx2 < i.used(); ++idx2) {
+ double testT = i[0][idx2];
+ if (!approximately_between_orderable(tStart, testT, tEnd)) {
+ continue;
+ }
+ if (approximately_equal_orderable(tStart, testT)) {
+ continue;
+ }
+ smallTs[index] = t = testAscends ? SkTMax(t, testT) : SkTMin(t, testT);
+ limited[index] = approximately_equal_orderable(t, tEnd);
+ }
+ }
+ bool sRayLonger = false;
+ SkDVector sCept = {0, 0};
+ double sCeptT = -1;
+ int sIndex = -1;
+ bool useIntersect = false;
+ for (int index = 0; index < 2; ++index) {
+ if (smallTs[index] < 0) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ const SkDPoint& dPt = segment.dPtAtT(smallTs[index]);
+ SkDVector cept = dPt - rays[index][0];
+ // If this point is on the curve, it should have been detected earlier by ordinary
+ // curve intersection. This may be hard to determine in general, but for lines,
+ // the point could be close to or equal to its end, but shouldn't be near the start.
+ if ((index ? lPts : rPts) == 1) {
+ SkDVector total = rays[index][1] - rays[index][0];
+ if (cept.lengthSquared() * 2 < total.lengthSquared()) {
+ continue;
+ }
+ }
+ SkDVector end = rays[index][1] - rays[index][0];
+ if (cept.fX * end.fX < 0 || cept.fY * end.fY < 0) {
+ continue;
+ }
+ double rayDist = cept.length();
+ double endDist = end.length();
+ bool rayLonger = rayDist > endDist;
+ if (limited[0] && limited[1] && rayLonger) {
+ useIntersect = true;
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ break;
+ }
+ double delta = fabs(rayDist - endDist);
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = index ? rh->fPart.fCurve : this->fPart.fCurve;
+ int ptCount = index ? rPts : lPts;
+ for (int idx2 = 0; idx2 <= ptCount; ++idx2) {
+ minX = SkTMin(minX, curve[idx2].fX);
+ minY = SkTMin(minY, curve[idx2].fY);
+ maxX = SkTMax(maxX, curve[idx2].fX);
+ maxY = SkTMax(maxY, curve[idx2].fY);
+ }
+ double maxWidth = SkTMax(maxX - minX, maxY - minY);
+ delta = sk_ieee_double_divide(delta, maxWidth);
+ // FIXME: move these magic numbers
+ // This fixes skbug.com/8380
+ // Larger changes (like changing the constant in the next block) cause other
+ // tests to fail as documented in the bug.
+ // This could probably become a more general test: e.g., if translating the
+ // curve causes the cross product of any control point or end point to change
+ // sign with regard to the opposite curve's hull, treat the curves as parallel.
+
+ // Moreso, this points to the general fragility of this approach of assigning
+ // winding by sorting the angles of curves sharing a common point, as mentioned
+ // in the bug.
+ if (delta < 4e-3 && delta > 1e-3 && !useIntersect && fPart.isCurve()
+ && rh->fPart.isCurve() && fOriginalCurvePart[0] != fPart.fCurve.fLine[0]) {
+ // see if original curve is on one side of hull; translated is on the other
+ const SkDPoint& origin = rh->fOriginalCurvePart[0];
+ int count = SkPathOpsVerbToPoints(rh->segment()->verb());
+ const SkDVector line = rh->fOriginalCurvePart[count] - origin;
+ int originalSide = rh->lineOnOneSide(origin, line, this, true);
+ if (originalSide >= 0) {
+ int translatedSide = rh->lineOnOneSide(origin, line, this, false);
+ if (originalSide != translatedSide) {
+ continue;
+ }
+ }
+ }
+ if (delta > 1e-3 && (useIntersect ^= true)) {
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ }
+ }
+ if (useIntersect) {
+ const SkDCurve& curve = sIndex ? rh->fPart.fCurve : this->fPart.fCurve;
+ const SkOpSegment& segment = sIndex ? *rh->segment() : *this->segment();
+ double tStart = sIndex ? rh->fStart->t() : fStart->t();
+ SkDVector mid = segment.dPtAtT(tStart + (sCeptT - tStart) / 2) - curve[0];
+ double septDir = mid.crossCheck(sCept);
+ if (!septDir) {
+ return checkParallel(rh);
+ }
+ return sRayLonger ^ (sIndex == 0) ^ (septDir < 0);
+ } else {
+ return checkParallel(rh);
+ }
+}
+
+bool SkOpAngle::endToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ SkDLine rayEnd;
+ rayEnd[0].set(this->fEnd->pt());
+ rayEnd[1] = rayEnd[0];
+ SkDVector slopeAtEnd = (*CurveDSlopeAtT[verb])(segment->pts(), segment->weight(),
+ this->fEnd->t());
+ rayEnd[1].fX += slopeAtEnd.fY;
+ rayEnd[1].fY -= slopeAtEnd.fX;
+ SkIntersections iEnd;
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayEnd, &iEnd);
+ double endDist;
+ int closestEnd = iEnd.closestTo(rh->fStart->t(), rh->fEnd->t(), rayEnd[0], &endDist);
+ if (closestEnd < 0) {
+ return false;
+ }
+ if (!endDist) {
+ return false;
+ }
+ SkDPoint start;
+ start.set(this->fStart->pt());
+ // OPTIMIZATION: multiple times in the code we find the max scalar
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = rh->fPart.fCurve;
+ int oppPts = SkPathOpsVerbToPoints(oppVerb);
+ for (int idx2 = 0; idx2 <= oppPts; ++idx2) {
+ minX = SkTMin(minX, curve[idx2].fX);
+ minY = SkTMin(minY, curve[idx2].fY);
+ maxX = SkTMax(maxX, curve[idx2].fX);
+ maxY = SkTMax(maxY, curve[idx2].fY);
+ }
+ double maxWidth = SkTMax(maxX - minX, maxY - minY);
+ endDist = sk_ieee_double_divide(endDist, maxWidth);
+ if (!(endDist >= 5e-12)) { // empirically found
+ return false; // ! above catches NaN
+ }
+ const SkDPoint* endPt = &rayEnd[0];
+ SkDPoint oppPt = iEnd.pt(closestEnd);
+ SkDVector vLeft = *endPt - start;
+ SkDVector vRight = oppPt - start;
+ double dir = vLeft.crossNoNormalCheck(vRight);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+/* y<0 y==0 y>0 x<0 x==0 x>0 xy<0 xy==0 xy>0
+ 0 x x x
+ 1 x x x
+ 2 x x x
+ 3 x x x
+ 4 x x x
+ 5 x x x
+ 6 x x x
+ 7 x x x
+ 8 x x x
+ 9 x x x
+ 10 x x x
+ 11 x x x
+ 12 x x x
+ 13 x x x
+ 14 x x x
+ 15 x x x
+*/
+int SkOpAngle::findSector(SkPath::Verb verb, double x, double y) const {
+ double absX = fabs(x);
+ double absY = fabs(y);
+ double xy = SkPath::kLine_Verb == verb || !AlmostEqualUlps(absX, absY) ? absX - absY : 0;
+ // If there are four quadrants and eight octants, and since the Latin for sixteen is sedecim,
+ // one could coin the term sedecimant for a space divided into 16 sections.
+ // http://english.stackexchange.com/questions/133688/word-for-something-partitioned-into-16-parts
+ static const int sedecimant[3][3][3] = {
+ // y<0 y==0 y>0
+ // x<0 x==0 x>0 x<0 x==0 x>0 x<0 x==0 x>0
+ {{ 4, 3, 2}, { 7, -1, 15}, {10, 11, 12}}, // abs(x) < abs(y)
+ {{ 5, -1, 1}, {-1, -1, -1}, { 9, -1, 13}}, // abs(x) == abs(y)
+ {{ 6, 3, 0}, { 7, -1, 15}, { 8, 11, 14}}, // abs(x) > abs(y)
+ };
+ int sector = sedecimant[(xy >= 0) + (xy > 0)][(y >= 0) + (y > 0)][(x >= 0) + (x > 0)] * 2 + 1;
+// SkASSERT(SkPath::kLine_Verb == verb || sector >= 0);
+ return sector;
+}
+
+SkOpGlobalState* SkOpAngle::globalState() const {
+ return this->segment()->globalState();
+}
+
+
+// OPTIMIZE: if this loops to only one other angle, after first compare fails, insert on other side
+// OPTIMIZE: return where insertion succeeded. Then, start next insertion on opposite side
+bool SkOpAngle::insert(SkOpAngle* angle) {
+ if (angle->fNext) {
+ if (loopCount() >= angle->loopCount()) {
+ if (!merge(angle)) {
+ return true;
+ }
+ } else if (fNext) {
+ if (!angle->merge(this)) {
+ return true;
+ }
+ } else {
+ angle->insert(this);
+ }
+ return true;
+ }
+ bool singleton = nullptr == fNext;
+ if (singleton) {
+ fNext = this;
+ }
+ SkOpAngle* next = fNext;
+ if (next->fNext == this) {
+ if (singleton || angle->after(this)) {
+ this->fNext = angle;
+ angle->fNext = next;
+ } else {
+ next->fNext = angle;
+ angle->fNext = this;
+ }
+ debugValidateNext();
+ return true;
+ }
+ SkOpAngle* last = this;
+ bool flipAmbiguity = false;
+ do {
+ SkASSERT(last->fNext == next);
+ if (angle->after(last) ^ (angle->tangentsAmbiguous() & flipAmbiguity)) {
+ last->fNext = angle;
+ angle->fNext = next;
+ debugValidateNext();
+ return true;
+ }
+ last = next;
+ if (last == this) {
+ FAIL_IF(flipAmbiguity);
+ // We're in a loop. If a sort was ambiguous, flip it to end the loop.
+ flipAmbiguity = true;
+ }
+ next = next->fNext;
+ } while (true);
+ return true;
+}
+
+SkOpSpanBase* SkOpAngle::lastMarked() const {
+ if (fLastMarked) {
+ if (fLastMarked->chased()) {
+ return nullptr;
+ }
+ fLastMarked->setChased(true);
+ }
+ return fLastMarked;
+}
+
+bool SkOpAngle::loopContains(const SkOpAngle* angle) const {
+ if (!fNext) {
+ return false;
+ }
+ const SkOpAngle* first = this;
+ const SkOpAngle* loop = this;
+ const SkOpSegment* tSegment = angle->fStart->segment();
+ double tStart = angle->fStart->t();
+ double tEnd = angle->fEnd->t();
+ do {
+ const SkOpSegment* lSegment = loop->fStart->segment();
+ if (lSegment != tSegment) {
+ continue;
+ }
+ double lStart = loop->fStart->t();
+ if (lStart != tEnd) {
+ continue;
+ }
+ double lEnd = loop->fEnd->t();
+ if (lEnd == tStart) {
+ return true;
+ }
+ } while ((loop = loop->fNext) != first);
+ return false;
+}
+
+int SkOpAngle::loopCount() const {
+ int count = 0;
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next = next->fNext;
+ ++count;
+ } while (next && next != first);
+ return count;
+}
+
+bool SkOpAngle::merge(SkOpAngle* angle) {
+ SkASSERT(fNext);
+ SkASSERT(angle->fNext);
+ SkOpAngle* working = angle;
+ do {
+ if (this == working) {
+ return false;
+ }
+ working = working->fNext;
+ } while (working != angle);
+ do {
+ SkOpAngle* next = working->fNext;
+ working->fNext = nullptr;
+ insert(working);
+ working = next;
+ } while (working != angle);
+ // it's likely that a pair of the angles are unorderable
+ debugValidateNext();
+ return true;
+}
+
+double SkOpAngle::midT() const {
+ return (fStart->t() + fEnd->t()) / 2;
+}
+
+bool SkOpAngle::midToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ const SkPoint& startPt = this->fStart->pt();
+ const SkPoint& endPt = this->fEnd->pt();
+ SkDPoint dStartPt;
+ dStartPt.set(startPt);
+ SkDLine rayMid;
+ rayMid[0].fX = (startPt.fX + endPt.fX) / 2;
+ rayMid[0].fY = (startPt.fY + endPt.fY) / 2;
+ rayMid[1].fX = rayMid[0].fX + (endPt.fY - startPt.fY);
+ rayMid[1].fY = rayMid[0].fY - (endPt.fX - startPt.fX);
+ SkIntersections iMid;
+ (*CurveIntersectRay[verb])(segment->pts(), segment->weight(), rayMid, &iMid);
+ int iOutside = iMid.mostOutside(this->fStart->t(), this->fEnd->t(), dStartPt);
+ if (iOutside < 0) {
+ return false;
+ }
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ SkIntersections oppMid;
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayMid, &oppMid);
+ int oppOutside = oppMid.mostOutside(rh->fStart->t(), rh->fEnd->t(), dStartPt);
+ if (oppOutside < 0) {
+ return false;
+ }
+ SkDVector iSide = iMid.pt(iOutside) - dStartPt;
+ SkDVector oppSide = oppMid.pt(oppOutside) - dStartPt;
+ double dir = iSide.crossCheck(oppSide);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+bool SkOpAngle::oppositePlanes(const SkOpAngle* rh) const {
+ int startSpan = SkTAbs(rh->fSectorStart - fSectorStart);
+ return startSpan >= 8;
+}
+
+int SkOpAngle::orderable(SkOpAngle* rh) {
+ int result;
+ if (!fPart.isCurve()) {
+ if (!rh->fPart.isCurve()) {
+ double leftX = fTangentHalf.dx();
+ double leftY = fTangentHalf.dy();
+ double rightX = rh->fTangentHalf.dx();
+ double rightY = rh->fTangentHalf.dy();
+ double x_ry = leftX * rightY;
+ double rx_y = rightX * leftY;
+ if (x_ry == rx_y) {
+ if (leftX * rightX < 0 || leftY * rightY < 0) {
+ return 1; // exactly 180 degrees apart
+ }
+ goto unorderable;
+ }
+ SkASSERT(x_ry != rx_y); // indicates an undetected coincidence -- worth finding earlier
+ return x_ry < rx_y ? 1 : 0;
+ }
+ if ((result = this->lineOnOneSide(rh, false)) >= 0) {
+ return result;
+ }
+ if (fUnorderable || approximately_zero(rh->fSide)) {
+ goto unorderable;
+ }
+ } else if (!rh->fPart.isCurve()) {
+ if ((result = rh->lineOnOneSide(this, false)) >= 0) {
+ return result ? 0 : 1;
+ }
+ if (rh->fUnorderable || approximately_zero(fSide)) {
+ goto unorderable;
+ }
+ } else if ((result = this->convexHullOverlaps(rh)) >= 0) {
+ return result;
+ }
+ return this->endsIntersect(rh) ? 1 : 0;
+unorderable:
+ fUnorderable = true;
+ rh->fUnorderable = true;
+ return -1;
+}
+
+// OPTIMIZE: if this shows up in a profile, add a previous pointer
+// as is, this should be rarely called
+SkOpAngle* SkOpAngle::previous() const {
+ SkOpAngle* last = fNext;
+ do {
+ SkOpAngle* next = last->fNext;
+ if (next == this) {
+ return last;
+ }
+ last = next;
+ } while (true);
+}
+
+SkOpSegment* SkOpAngle::segment() const {
+ return fStart->segment();
+}
+
+void SkOpAngle::set(SkOpSpanBase* start, SkOpSpanBase* end) {
+ fStart = start;
+ fComputedEnd = fEnd = end;
+ SkASSERT(start != end);
+ fNext = nullptr;
+ fComputeSector = fComputedSector = fCheckCoincidence = fTangentsAmbiguous = false;
+ setSpans();
+ setSector();
+ SkDEBUGCODE(fID = start ? start->globalState()->nextAngleID() : -1);
+}
+
+void SkOpAngle::setSpans() {
+ fUnorderable = false;
+ fLastMarked = nullptr;
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ const SkPoint* pts = segment->pts();
+ SkDEBUGCODE(fPart.fCurve.fVerb = SkPath::kCubic_Verb); // required for SkDCurve debug check
+ SkDEBUGCODE(fPart.fCurve[2].fX = fPart.fCurve[2].fY = fPart.fCurve[3].fX = fPart.fCurve[3].fY
+ = SK_ScalarNaN); // make the non-line part uninitialized
+ SkDEBUGCODE(fPart.fCurve.fVerb = segment->verb()); // set the curve type for real
+ segment->subDivide(fStart, fEnd, &fPart.fCurve); // set at least the line part if not more
+ fOriginalCurvePart = fPart.fCurve;
+ const SkPath::Verb verb = segment->verb();
+ fPart.setCurveHullSweep(verb);
+ if (SkPath::kLine_Verb != verb && !fPart.isCurve()) {
+ SkDLine lineHalf;
+ fPart.fCurve[1] = fPart.fCurve[SkPathOpsVerbToPoints(verb)];
+ fOriginalCurvePart[1] = fPart.fCurve[1];
+ lineHalf[0].set(fPart.fCurve[0].asSkPoint());
+ lineHalf[1].set(fPart.fCurve[1].asSkPoint());
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb: {
+ SkASSERT(fStart != fEnd);
+ const SkPoint& cP1 = pts[fStart->t() < fEnd->t()];
+ SkDLine lineHalf;
+ lineHalf[0].set(fStart->pt());
+ lineHalf[1].set(cP1);
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ } return;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.quadEndPoints(fPart.fCurve.fQuad);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[2]); // not normalized -- compare sign only
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.cubicPart(fPart.fCurve.fCubic);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[3]);
+ double testTs[4];
+ // OPTIMIZATION: keep inflections precomputed with cubic segment?
+ int testCount = SkDCubic::FindInflections(pts, testTs);
+ double startT = fStart->t();
+ double endT = fEnd->t();
+ double limitT = endT;
+ int index;
+ for (index = 0; index < testCount; ++index) {
+ if (!::between(startT, testTs[index], limitT)) {
+ testTs[index] = -1;
+ }
+ }
+ testTs[testCount++] = startT;
+ testTs[testCount++] = endT;
+ SkTQSort<double>(testTs, &testTs[testCount - 1]);
+ double bestSide = 0;
+ int testCases = (testCount << 1) - 1;
+ index = 0;
+ while (testTs[index] < 0) {
+ ++index;
+ }
+ index <<= 1;
+ for (; index < testCases; ++index) {
+ int testIndex = index >> 1;
+ double testT = testTs[testIndex];
+ if (index & 1) {
+ testT = (testT + testTs[testIndex + 1]) / 2;
+ }
+ // OPTIMIZE: could avoid call for t == startT, endT
+ SkDPoint pt = dcubic_xy_at_t(pts, segment->weight(), testT);
+ SkLineParameters tangentPart;
+ tangentPart.cubicEndPoints(fPart.fCurve.fCubic);
+ double testSide = tangentPart.pointDistance(pt);
+ if (fabs(bestSide) < fabs(testSide)) {
+ bestSide = testSide;
+ }
+ }
+ fSide = -bestSide; // compare sign only
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+void SkOpAngle::setSector() {
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ SkPath::Verb verb = segment->verb();
+ fSectorStart = this->findSector(verb, fPart.fSweep[0].fX, fPart.fSweep[0].fY);
+ if (fSectorStart < 0) {
+ goto deferTilLater;
+ }
+ if (!fPart.isCurve()) { // if it's a line or line-like, note that both sectors are the same
+ SkASSERT(fSectorStart >= 0);
+ fSectorEnd = fSectorStart;
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ SkASSERT(SkPath::kLine_Verb != verb);
+ fSectorEnd = this->findSector(verb, fPart.fSweep[1].fX, fPart.fSweep[1].fY);
+ if (fSectorEnd < 0) {
+deferTilLater:
+ fSectorStart = fSectorEnd = -1;
+ fSectorMask = 0;
+ fComputeSector = true; // can't determine sector until segment length can be found
+ return;
+ }
+ if (fSectorEnd == fSectorStart
+ && (fSectorStart & 3) != 3) { // if the sector has no span, it can't be an exact angle
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ bool crossesZero = this->checkCrossesZero();
+ int start = SkTMin(fSectorStart, fSectorEnd);
+ bool curveBendsCCW = (fSectorStart == start) ^ crossesZero;
+ // bump the start and end of the sector span if they are on exact compass points
+ if ((fSectorStart & 3) == 3) {
+ fSectorStart = (fSectorStart + (curveBendsCCW ? 1 : 31)) & 0x1f;
+ }
+ if ((fSectorEnd & 3) == 3) {
+ fSectorEnd = (fSectorEnd + (curveBendsCCW ? 31 : 1)) & 0x1f;
+ }
+ crossesZero = this->checkCrossesZero();
+ start = SkTMin(fSectorStart, fSectorEnd);
+ int end = SkTMax(fSectorStart, fSectorEnd);
+ if (!crossesZero) {
+ fSectorMask = (unsigned) -1 >> (31 - end + start) << start;
+ } else {
+ fSectorMask = (unsigned) -1 >> (31 - start) | ((unsigned) -1 << end);
+ }
+}
+
+SkOpSpan* SkOpAngle::starter() {
+ return fStart->starter(fEnd);
+}
+
+bool SkOpAngle::tangentsDiverge(const SkOpAngle* rh, double s0xt0) {
+ if (s0xt0 == 0) {
+ return false;
+ }
+ // if the ctrl tangents are not nearly parallel, use them
+ // solve for opposite direction displacement scale factor == m
+ // initial dir = v1.cross(v2) == v2.x * v1.y - v2.y * v1.x
+ // displacement of q1[1] : dq1 = { -m * v1.y, m * v1.x } + q1[1]
+ // straight angle when : v2.x * (dq1.y - q1[0].y) == v2.y * (dq1.x - q1[0].x)
+ // v2.x * (m * v1.x + v1.y) == v2.y * (-m * v1.y + v1.x)
+ // - m * (v2.x * v1.x + v2.y * v1.y) == v2.x * v1.y - v2.y * v1.x
+ // m = (v2.y * v1.x - v2.x * v1.y) / (v2.x * v1.x + v2.y * v1.y)
+ // m = v1.cross(v2) / v1.dot(v2)
+ const SkDVector* sweep = fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0dt0 = sweep[0].dot(tweep[0]);
+ if (!s0dt0) {
+ return true;
+ }
+ SkASSERT(s0dt0 != 0);
+ double m = s0xt0 / s0dt0;
+ double sDist = sweep[0].length() * m;
+ double tDist = tweep[0].length() * m;
+ bool useS = fabs(sDist) < fabs(tDist);
+ double mFactor = fabs(useS ? this->distEndRatio(sDist) : rh->distEndRatio(tDist));
+ fTangentsAmbiguous = mFactor >= 50 && mFactor < 200;
+ return mFactor < 50; // empirically found limit
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.h b/gfx/skia/skia/src/pathops/SkOpAngle.h
new file mode 100644
index 0000000000..73d0e11bdb
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpAngle_DEFINED
+#define SkOpAngle_DEFINED
+
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#if DEBUG_ANGLE
+#include "include/core/SkString.h"
+#endif
+
+class SkOpContour;
+class SkOpPtT;
+class SkOpSegment;
+class SkOpSpanBase;
+class SkOpSpan;
+
+class SkOpAngle {
+public:
+ enum IncludeType {
+ kUnaryWinding,
+ kUnaryXor,
+ kBinarySingle,
+ kBinaryOpp,
+ };
+
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_SORT
+ void debugLoop() const;
+#endif
+
+#if DEBUG_ANGLE
+ bool debugCheckCoincidence() const { return fCheckCoincidence; }
+ void debugCheckNearCoincidence() const;
+ SkString debugPart() const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ const SkOpSegment* debugSegment(int id) const;
+ int debugSign() const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+ void debugValidateNext() const; // in debug builds, verify that angle loop is uncorrupted
+ double distEndRatio(double dist) const;
+ // available to testing only
+ void dump() const;
+ void dumpCurves() const;
+ void dumpLoop() const;
+ void dumpOne(bool functionHeader) const;
+ void dumpTo(const SkOpSegment* fromSeg, const SkOpAngle* ) const;
+ void dumpTest() const;
+
+ SkOpSpanBase* end() const {
+ return fEnd;
+ }
+
+ bool insert(SkOpAngle* );
+ SkOpSpanBase* lastMarked() const;
+ bool loopContains(const SkOpAngle* ) const;
+ int loopCount() const;
+
+ SkOpAngle* next() const {
+ return fNext;
+ }
+
+ SkOpAngle* previous() const;
+ SkOpSegment* segment() const;
+ void set(SkOpSpanBase* start, SkOpSpanBase* end);
+
+ void setLastMarked(SkOpSpanBase* marked) {
+ fLastMarked = marked;
+ }
+
+ SkOpSpanBase* start() const {
+ return fStart;
+ }
+
+ SkOpSpan* starter();
+
+ bool tangentsAmbiguous() const {
+ return fTangentsAmbiguous;
+ }
+
+ bool unorderable() const {
+ return fUnorderable;
+ }
+
+private:
+ bool after(SkOpAngle* test);
+ void alignmentSameSide(const SkOpAngle* test, int* order) const;
+ bool checkCrossesZero() const;
+ bool checkParallel(SkOpAngle* );
+ bool computeSector();
+ int convexHullOverlaps(const SkOpAngle* );
+ bool endToSide(const SkOpAngle* rh, bool* inside) const;
+ bool endsIntersect(SkOpAngle* );
+ int findSector(SkPath::Verb verb, double x, double y) const;
+ SkOpGlobalState* globalState() const;
+ int lineOnOneSide(const SkDPoint& origin, const SkDVector& line, const SkOpAngle* test,
+ bool useOriginal) const;
+ int lineOnOneSide(const SkOpAngle* test, bool useOriginal);
+ int linesOnOriginalSide(const SkOpAngle* test);
+ bool merge(SkOpAngle* );
+ double midT() const;
+ bool midToSide(const SkOpAngle* rh, bool* inside) const;
+ bool oppositePlanes(const SkOpAngle* rh) const;
+ int orderable(SkOpAngle* rh); // false == this < rh ; true == this > rh; -1 == unorderable
+ void setSector();
+ void setSpans();
+ bool tangentsDiverge(const SkOpAngle* rh, double s0xt0);
+
+ SkDCurve fOriginalCurvePart; // the curve from start to end
+ SkDCurveSweep fPart; // the curve from start to end offset as needed
+ double fSide;
+ SkLineParameters fTangentHalf; // used only to sort a pair of lines or line-like sections
+ SkOpAngle* fNext;
+ SkOpSpanBase* fLastMarked;
+ SkOpSpanBase* fStart;
+ SkOpSpanBase* fEnd;
+ SkOpSpanBase* fComputedEnd;
+ int fSectorMask;
+ int8_t fSectorStart; // in 32nds of a circle
+ int8_t fSectorEnd;
+ bool fUnorderable;
+ bool fComputeSector;
+ bool fComputedSector;
+ bool fCheckCoincidence;
+ bool fTangentsAmbiguous;
+ SkDEBUGCODE(int fID);
+
+ friend class PathOpsAngleTester;
+};
+
+
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
new file mode 100644
index 0000000000..9dac160495
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+
+static bool one_contour(const SkPath& path) {
+ SkSTArenaAlloc<256> allocator;
+ int verbCount = path.countVerbs();
+ uint8_t* verbs = (uint8_t*) allocator.makeArrayDefault<uint8_t>(verbCount);
+ (void) path.getVerbs(verbs, verbCount);
+ for (int index = 1; index < verbCount; ++index) {
+ if (verbs[index] == SkPath::kMove_Verb) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkOpBuilder::ReversePath(SkPath* path) {
+ SkPath temp;
+ SkPoint lastPt;
+ SkAssertResult(path->getLastPt(&lastPt));
+ temp.moveTo(lastPt);
+ temp.reversePathTo(*path);
+ temp.close();
+ *path = temp;
+}
+
+bool SkOpBuilder::FixWinding(SkPath* path) {
+ SkPath::FillType fillType = path->getFillType();
+ if (fillType == SkPath::kInverseEvenOdd_FillType) {
+ fillType = SkPath::kInverseWinding_FillType;
+ } else if (fillType == SkPath::kEvenOdd_FillType) {
+ fillType = SkPath::kWinding_FillType;
+ }
+ SkPathPriv::FirstDirection dir;
+ if (one_contour(*path) && SkPathPriv::CheapComputeFirstDirection(*path, &dir)) {
+ if (dir != SkPathPriv::kCCW_FirstDirection) {
+ ReversePath(path);
+ }
+ path->setFillType(fillType);
+ return true;
+ }
+ SkSTArenaAlloc<4096> allocator;
+ SkOpContourHead contourHead;
+ SkOpGlobalState globalState(&contourHead, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ SkOpEdgeBuilder builder(*path, &contourHead, &globalState);
+ if (builder.unparseable() || !builder.finish()) {
+ return false;
+ }
+ if (!contourHead.count()) {
+ return true;
+ }
+ if (!contourHead.next()) {
+ return false;
+ }
+ contourHead.joinAllSegments();
+ contourHead.resetReverse();
+ bool writePath = false;
+ SkOpSpan* topSpan;
+ globalState.setPhase(SkOpPhase::kFixWinding);
+ while ((topSpan = FindSortableTop(&contourHead))) {
+ SkOpSegment* topSegment = topSpan->segment();
+ SkOpContour* topContour = topSegment->contour();
+ SkASSERT(topContour->isCcw() >= 0);
+#if DEBUG_WINDING
+ SkDebugf("%s id=%d nested=%d ccw=%d\n", __FUNCTION__,
+ topSegment->debugID(), globalState.nested(), topContour->isCcw());
+#endif
+ if ((globalState.nested() & 1) != SkToBool(topContour->isCcw())) {
+ topContour->setReverse();
+ writePath = true;
+ }
+ topContour->markAllDone();
+ globalState.clearNested();
+ }
+ if (!writePath) {
+ path->setFillType(fillType);
+ return true;
+ }
+ SkPath empty;
+ SkPathWriter woundPath(empty);
+ SkOpContour* test = &contourHead;
+ do {
+ if (!test->count()) {
+ continue;
+ }
+ if (test->reversed()) {
+ test->toReversePath(&woundPath);
+ } else {
+ test->toPath(&woundPath);
+ }
+ } while ((test = test->next()));
+ *path = *woundPath.nativePath();
+ path->setFillType(fillType);
+ return true;
+}
+
+void SkOpBuilder::add(const SkPath& path, SkPathOp op) {
+ if (0 == fOps.count() && op != kUnion_SkPathOp) {
+ fPathRefs.push_back() = SkPath();
+ *fOps.append() = kUnion_SkPathOp;
+ }
+ fPathRefs.push_back() = path;
+ *fOps.append() = op;
+}
+
+void SkOpBuilder::reset() {
+ fPathRefs.reset();
+ fOps.reset();
+}
+
+/* OPTIMIZATION: Union doesn't need to be all-or-nothing. A run of three or more convex
+ paths with union ops could be locally resolved and still improve over doing the
+ ops one at a time. */
+bool SkOpBuilder::resolve(SkPath* result) {
+ SkPath original = *result;
+ int count = fOps.count();
+ bool allUnion = true;
+ SkPathPriv::FirstDirection firstDir = SkPathPriv::kUnknown_FirstDirection;
+ for (int index = 0; index < count; ++index) {
+ SkPath* test = &fPathRefs[index];
+ if (kUnion_SkPathOp != fOps[index] || test->isInverseFillType()) {
+ allUnion = false;
+ break;
+ }
+ // If all paths are convex, track direction, reversing as needed.
+ if (test->isConvex()) {
+ SkPathPriv::FirstDirection dir;
+ if (!SkPathPriv::CheapComputeFirstDirection(*test, &dir)) {
+ allUnion = false;
+ break;
+ }
+ if (firstDir == SkPathPriv::kUnknown_FirstDirection) {
+ firstDir = dir;
+ } else if (firstDir != dir) {
+ ReversePath(test);
+ }
+ continue;
+ }
+ // If the path is not convex but its bounds do not intersect the others, simplify is enough.
+ const SkRect& testBounds = test->getBounds();
+ for (int inner = 0; inner < index; ++inner) {
+ // OPTIMIZE: check to see if the contour bounds do not intersect other contour bounds?
+ if (SkRect::Intersects(fPathRefs[inner].getBounds(), testBounds)) {
+ allUnion = false;
+ break;
+ }
+ }
+ }
+ if (!allUnion) {
+ *result = fPathRefs[0];
+ for (int index = 1; index < count; ++index) {
+ if (!Op(*result, fPathRefs[index], fOps[index], result)) {
+ reset();
+ *result = original;
+ return false;
+ }
+ }
+ reset();
+ return true;
+ }
+ SkPath sum;
+ for (int index = 0; index < count; ++index) {
+ if (!Simplify(fPathRefs[index], &fPathRefs[index])) {
+ reset();
+ *result = original;
+ return false;
+ }
+ if (!fPathRefs[index].isEmpty()) {
+ // convert the even odd result back to winding form before accumulating it
+ if (!FixWinding(&fPathRefs[index])) {
+ *result = original;
+ return false;
+ }
+ sum.addPath(fPathRefs[index]);
+ }
+ }
+ reset();
+ bool success = Simplify(sum, result);
+ if (!success) {
+ *result = original;
+ }
+ return success;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
new file mode 100644
index 0000000000..31de4f1a3c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
@@ -0,0 +1,1448 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathOpsTSect.h"
+
+#include <utility>
+
+// returns true if coincident span's start and end are the same
+bool SkCoincidentSpans::collapsed(const SkOpPtT* test) const {
+ return (fCoinPtTStart == test && fCoinPtTEnd->contains(test))
+ || (fCoinPtTEnd == test && fCoinPtTStart->contains(test))
+ || (fOppPtTStart == test && fOppPtTEnd->contains(test))
+ || (fOppPtTEnd == test && fOppPtTStart->contains(test));
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::coinPtTEnd() const {
+ return fCoinPtTEnd;
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::coinPtTStart() const {
+ return fCoinPtTStart;
+}
+
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::correctOneEnd(
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) ) {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ (this->*setEnd)(testPtT);
+ }
+}
+
+/* Please keep this in sync with debugCorrectEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::correctEnds() {
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTStart, &SkCoincidentSpans::setCoinPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTEnd, &SkCoincidentSpans::setCoinPtTEnd);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTStart, &SkCoincidentSpans::setOppPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTEnd, &SkCoincidentSpans::setOppPtTEnd);
+}
+
+/* Please keep this in sync with debugExpand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::expand() {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setStarts(prev->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setEnds(next->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ return expanded;
+}
+
+// increase the range of this span
+bool SkCoincidentSpans::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ bool result = false;
+ if (fCoinPtTStart->fT > coinPtTStart->fT || (this->flipped()
+ ? fOppPtTStart->fT < oppPtTStart->fT : fOppPtTStart->fT > oppPtTStart->fT)) {
+ this->setStarts(coinPtTStart, oppPtTStart);
+ result = true;
+ }
+ if (fCoinPtTEnd->fT < coinPtTEnd->fT || (this->flipped()
+ ? fOppPtTEnd->fT > oppPtTEnd->fT : fOppPtTEnd->fT < oppPtTEnd->fT)) {
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+ result = true;
+ }
+ return result;
+}
+
+// set the range of this span
+void SkCoincidentSpans::set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkASSERT(SkOpCoincidence::Ordered(coinPtTStart, oppPtTStart));
+ fNext = next;
+ this->setStarts(coinPtTStart, oppPtTStart);
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+}
+
+// returns true if both points are inside this
+bool SkCoincidentSpans::contains(const SkOpPtT* s, const SkOpPtT* e) const {
+ if (s->fT > e->fT) {
+ using std::swap;
+ swap(s, e);
+ }
+ if (s->segment() == fCoinPtTStart->segment()) {
+ return fCoinPtTStart->fT <= s->fT && e->fT <= fCoinPtTEnd->fT;
+ } else {
+ SkASSERT(s->segment() == fOppPtTStart->segment());
+ double oppTs = fOppPtTStart->fT;
+ double oppTe = fOppPtTEnd->fT;
+ if (oppTs > oppTe) {
+ using std::swap;
+ swap(oppTs, oppTe);
+ }
+ return oppTs <= s->fT && e->fT <= oppTe;
+ }
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::oppPtTStart() const {
+ return fOppPtTStart;
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::oppPtTEnd() const {
+ return fOppPtTEnd;
+}
+
+// A coincident span is unordered if the pairs of points in the main and opposite curves'
+// t values do not ascend or descend. For instance, if a tightly arced quadratic is
+// coincident with another curve, it may intersect it out of order.
+bool SkCoincidentSpans::ordered(bool* result) const {
+ const SkOpSpanBase* start = this->coinPtTStart()->span();
+ const SkOpSpanBase* end = this->coinPtTEnd()->span();
+ const SkOpSpanBase* next = start->upCast()->next();
+ if (next == end) {
+ *result = true;
+ return true;
+ }
+ bool flipped = this->flipped();
+ const SkOpSegment* oppSeg = this->oppPtTStart()->segment();
+ double oppLastT = fOppPtTStart->fT;
+ do {
+ const SkOpPtT* opp = next->contains(oppSeg);
+ if (!opp) {
+// SkOPOBJASSERT(start, 0); // may assert if coincident span isn't fully processed
+ return false;
+ }
+ if ((oppLastT > opp->fT) != flipped) {
+ *result = false;
+ return true;
+ }
+ oppLastT = opp->fT;
+ if (next == end) {
+ break;
+ }
+ if (!next->upCastable()) {
+ *result = false;
+ return true;
+ }
+ next = next->upCast()->next();
+ } while (true);
+ *result = true;
+ return true;
+}
+
+// if there is an existing pair that overlaps the addition, extend it
+bool SkOpCoincidence::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ swap(coinPtTStart, oppPtTStart);
+ swap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ swap(coinPtTStart, coinPtTEnd);
+ swap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = SkTMin(oppPtTStart->fT, oppPtTEnd->fT);
+ SkDEBUGCODE(double oppMaxT = SkTMax(oppPtTStart->fT, oppPtTEnd->fT));
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ double oTestMinT = SkTMin(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ double oTestMaxT = SkTMax(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ // if debug check triggers, caller failed to check if extended already exists
+ SkASSERT(test->coinPtTStart()->fT > coinPtTStart->fT
+ || coinPtTEnd->fT > test->coinPtTEnd()->fT
+ || oTestMinT > oppMinT || oppMaxT > oTestMaxT);
+ if ((test->coinPtTStart()->fT <= coinPtTEnd->fT
+ && coinPtTStart->fT <= test->coinPtTEnd()->fT)
+ || (oTestMinT <= oTestMaxT && oppMinT <= oTestMaxT)) {
+ test->extend(coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+// verifies that the coincidence hasn't already been added
+static void DebugCheckAdd(const SkCoincidentSpans* check, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+#if DEBUG_COINCIDENCE
+ while (check) {
+ SkASSERT(check->coinPtTStart() != coinPtTStart || check->coinPtTEnd() != coinPtTEnd
+ || check->oppPtTStart() != oppPtTStart || check->oppPtTEnd() != oppPtTEnd);
+ SkASSERT(check->coinPtTStart() != oppPtTStart || check->coinPtTEnd() != oppPtTEnd
+ || check->oppPtTStart() != coinPtTStart || check->oppPtTEnd() != coinPtTEnd);
+ check = check->next();
+ }
+#endif
+}
+
+// adds a new coincident pair
+void SkOpCoincidence::add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd) {
+ // OPTIMIZE: caller should have already sorted
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ if (oppPtTStart->fT < oppPtTEnd->fT) {
+ this->add(oppPtTStart, oppPtTEnd, coinPtTStart, coinPtTEnd);
+ } else {
+ this->add(oppPtTEnd, oppPtTStart, coinPtTEnd, coinPtTStart);
+ }
+ return;
+ }
+ SkASSERT(Ordered(coinPtTStart, oppPtTStart));
+ // choose the ptT at the front of the list to track
+ coinPtTStart = coinPtTStart->span()->ptT();
+ coinPtTEnd = coinPtTEnd->span()->ptT();
+ oppPtTStart = oppPtTStart->span()->ptT();
+ oppPtTEnd = oppPtTEnd->span()->ptT();
+ SkOPASSERT(coinPtTStart->fT < coinPtTEnd->fT);
+ SkOPASSERT(oppPtTStart->fT != oppPtTEnd->fT);
+ SkOPASSERT(!coinPtTStart->deleted());
+ SkOPASSERT(!coinPtTEnd->deleted());
+ SkOPASSERT(!oppPtTStart->deleted());
+ SkOPASSERT(!oppPtTEnd->deleted());
+ DebugCheckAdd(fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ DebugCheckAdd(fTop, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ SkCoincidentSpans* coinRec = this->globalState()->allocator()->make<SkCoincidentSpans>();
+ coinRec->init(SkDEBUGCODE(fGlobalState));
+ coinRec->set(this->fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ fHead = coinRec;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan) {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ int escapeHatch = 100000; // this is 100 times larger than the debugLoopLimit test
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ if (--escapeHatch <= 0) {
+ return false; // if triggered (likely by a fuzz-generated test) too complex to succeed
+ }
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i SkDEBUGCODE((this->globalState()));
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ bool added;
+ FAIL_IF(!this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added));
+ }
+ }
+ return true;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpPtT* ptT) {
+ FAIL_IF(!ptT->span()->upCastable());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF(!prev);
+ if (!prev->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->prev())) {
+ return false;
+ }
+ }
+ if (!base->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->next())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+bool SkOpCoincidence::addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return true;
+ }
+ fTop = span;
+ fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF(1 == span->coinPtTStart()->fT);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ if (!this->addEndMovedSpans(span->oppPtTStart())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTStart())) {
+ return false;
+ }
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ if (!this->addEndMovedSpans(span->oppPtTEnd())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTEnd())) {
+ return false;
+ }
+ }
+ }
+ } while ((span = span->next()));
+ this->restoreHead();
+ return true;
+}
+
+/* Please keep this in sync with debugAddExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the missing pt to the segment and loop it in the opposite span
+bool SkOpCoincidence::addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF(!startPtT->contains(oStartPtT));
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF(oEnd->deleted());
+ FAIL_IF(!start->upCastable());
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF(!coin->flipped() && !oStart->upCastable());
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF(!oTest);
+ SkOpSegment* seg = start->segment();
+ SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF(!walk->upCastable());
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ FAIL_IF(!walkOpp);
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = nextT - priorT;
+ FAIL_IF(!startRange);
+ double startPart = (test->t() - priorT) / startRange;
+ double oStartRange = oNextT - oPriorT;
+ FAIL_IF(!oStartRange);
+ double oStartPart = (oTest->t() - oPriorT) / oStartRange;
+ FAIL_IF(startPart == oStartPart);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ bool success = addToOpp ? oSeg->addExpanded(
+ oPriorT + oStartRange * startPart, test, &startOver)
+ : seg->addExpanded(
+ priorT + startRange * oStartPart, oTest, &startOver);
+ FAIL_IF(!success);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF(!test->upCastable());
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ if (coin->flipped()) {
+ oTest = oTest->prev();
+ } else {
+ FAIL_IF(!oTest->upCastable());
+ oTest = oTest->upCast()->next();
+ }
+ FAIL_IF(!oTest);
+ }
+
+ }
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// given a t span, map the same range on the coincident span
+/*
+the curves may not scale linearly, so interpolation may only happen within known points
+remap over1s, over1e, cointPtTStart, coinPtTEnd to smallest range that captures over1s
+then repeat to capture over1e
+*/
+double SkOpCoincidence::TRange(const SkOpPtT* overS, double t,
+ const SkOpSegment* coinSeg SkDEBUGPARAMS(const SkOpPtT* overE)) {
+ const SkOpSpanBase* work = overS->span();
+ const SkOpPtT* foundStart = nullptr;
+ const SkOpPtT* foundEnd = nullptr;
+ const SkOpPtT* coinStart = nullptr;
+ const SkOpPtT* coinEnd = nullptr;
+ do {
+ const SkOpPtT* contained = work->contains(coinSeg);
+ if (!contained) {
+ if (work->final()) {
+ break;
+ }
+ continue;
+ }
+ if (work->t() <= t) {
+ coinStart = contained;
+ foundStart = work->ptT();
+ }
+ if (work->t() >= t) {
+ coinEnd = contained;
+ foundEnd = work->ptT();
+ break;
+ }
+ SkASSERT(work->ptT() != overE);
+ } while ((work = work->upCast()->next()));
+ if (!coinStart || !coinEnd) {
+ return 1;
+ }
+ // while overS->fT <=t and overS contains coinSeg
+ double denom = foundEnd->fT - foundStart->fT;
+ double sRatio = denom ? (t - foundStart->fT) / denom : 1;
+ return coinStart->fT + (coinEnd->fT - coinStart->fT) * sRatio;
+}
+
+// return true if span overlaps existing and needs to adjust the coincident list
+bool SkOpCoincidence::checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const {
+ if (!Ordered(coinSeg, oppSeg)) {
+ if (oppTs < oppTe) {
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTs, oppTe, coinTs, coinTe,
+ overlaps);
+ }
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTe, oppTs, coinTe, coinTs, overlaps);
+ }
+ bool swapOpp = oppTs > oppTe;
+ if (swapOpp) {
+ using std::swap;
+ swap(oppTs, oppTe);
+ }
+ do {
+ if (check->coinPtTStart()->segment() != coinSeg) {
+ continue;
+ }
+ if (check->oppPtTStart()->segment() != oppSeg) {
+ continue;
+ }
+ double checkTs = check->coinPtTStart()->fT;
+ double checkTe = check->coinPtTEnd()->fT;
+ bool coinOutside = coinTe < checkTs || coinTs > checkTe;
+ double oCheckTs = check->oppPtTStart()->fT;
+ double oCheckTe = check->oppPtTEnd()->fT;
+ if (swapOpp) {
+ if (oCheckTs <= oCheckTe) {
+ return false;
+ }
+ using std::swap;
+ swap(oCheckTs, oCheckTe);
+ }
+ bool oppOutside = oppTe < oCheckTs || oppTs > oCheckTe;
+ if (coinOutside && oppOutside) {
+ continue;
+ }
+ bool coinInside = coinTe <= checkTe && coinTs >= checkTs;
+ bool oppInside = oppTe <= oCheckTe && oppTs >= oCheckTs;
+ if (coinInside && oppInside) { // already included, do nothing
+ return false;
+ }
+ *overlaps->append() = check; // partial overlap, extend existing entry
+ } while ((check = check->next()));
+ return true;
+}
+
+/* Please keep this in sync with debugAddIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+bool SkOpCoincidence::addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg, bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e)) {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ SkOpSpanBase::Collapsed result = coinSeg->collapsed(coinTs, coinTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return SkOpSpanBase::Collapsed::kYes == result;
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ result = oppSeg->collapsed(oppTs, oppTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return SkOpSpanBase::Collapsed::kYes == result;
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ (void) this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added);
+ return true;
+}
+
+/* Please keep this in sync with debugAddOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+bool SkOpCoincidence::addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ FAIL_IF(!fTop);
+ if (!this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ SkCoincidentSpans* overlap = overlaps.count() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.count(); ++index) { // combine overlaps before continuing
+ SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ overlap->setCoinPtTStart(test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ overlap->setCoinPtTEnd(test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ overlap->setOppPtTStart(test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ overlap->setOppPtTEnd(test->oppPtTEnd());
+ }
+ if (!fHead || !this->release(fHead, test)) {
+ SkAssertResult(this->release(fTop, test));
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ if (overlap && cs && ce && overlap->contains(cs, ce)) {
+ return true;
+ }
+ FAIL_IF(cs == ce && cs);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ if (overlap && os && oe && overlap->contains(os, oe)) {
+ return true;
+ }
+ FAIL_IF(cs && cs->deleted());
+ FAIL_IF(os && os->deleted());
+ FAIL_IF(ce && ce->deleted());
+ FAIL_IF(oe && oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ FAIL_IF(csExisting && csExisting == ceExisting);
+// FAIL_IF(csExisting && (csExisting == ce ||
+// csExisting->contains(ceExisting ? ceExisting : ce)));
+ FAIL_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)));
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ FAIL_IF(osExisting && osExisting == oeExisting);
+ FAIL_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)));
+ FAIL_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)));
+ // extra line in debug code
+ this->debugValidate();
+ if (!cs || !os) {
+ SkOpPtT* csWritable = cs ? const_cast<SkOpPtT*>(cs)
+ : coinSeg->addT(coinTs);
+ if (csWritable == ce) {
+ return true;
+ }
+ SkOpPtT* osWritable = os ? const_cast<SkOpPtT*>(os)
+ : oppSeg->addT(oppTs);
+ FAIL_IF(!csWritable || !osWritable);
+ csWritable->span()->addOpp(osWritable->span());
+ cs = csWritable;
+ os = osWritable->active();
+ FAIL_IF(!os);
+ FAIL_IF((ce && ce->deleted()) || (oe && oe->deleted()));
+ }
+ if (!ce || !oe) {
+ SkOpPtT* ceWritable = ce ? const_cast<SkOpPtT*>(ce)
+ : coinSeg->addT(coinTe);
+ SkOpPtT* oeWritable = oe ? const_cast<SkOpPtT*>(oe)
+ : oppSeg->addT(oppTe);
+ FAIL_IF(!ceWritable->span()->addOpp(oeWritable->span()));
+ ce = ceWritable;
+ oe = oeWritable;
+ }
+ this->debugValidate();
+ FAIL_IF(cs->deleted());
+ FAIL_IF(os->deleted());
+ FAIL_IF(ce->deleted());
+ FAIL_IF(oe->deleted());
+ FAIL_IF(cs->contains(ce) || os->contains(oe));
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ result = overlap->extend(cs, ce, os, oe);
+ } else {
+ if (os->fT > oe->fT) {
+ using std::swap;
+ swap(cs, ce);
+ swap(os, oe);
+ }
+ result = overlap->extend(os, oe, cs, ce);
+ }
+#if DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlaps[0]->debugShow();
+ }
+#endif
+ } else {
+ this->add(cs, ce, os, oe);
+#if DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ if (result) {
+ *added = true;
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+bool SkOpCoincidence::addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS()) {
+ SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return true;
+ }
+ fTop = outer;
+ fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ FAIL_IF(ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ FAIL_IF(outerCoin->done());
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return true;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkOPASSERT(!outerOpp->done());
+ SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+ SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ SkCoincidentSpans* inner = outer;
+#ifdef IS_FUZZING_WITH_LIBFUZZER
+ int safetyNet = 1000;
+#endif
+ while ((inner = inner->next())) {
+#ifdef IS_FUZZING_WITH_LIBFUZZER
+ if (!--safetyNet) {
+ return false;
+ }
+#endif
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ FAIL_IF(ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ FAIL_IF(innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ FAIL_IF(ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkOPASSERT(!innerOpp->done());
+ SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+ SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return true;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ FAIL_IF(ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOppWritable, innerOppWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ics->debugEnder(ice))));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ FAIL_IF(oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ FAIL_IF(ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOppWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ios->debugEnder(ioe))));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ FAIL_IF(ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ FAIL_IF(ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoinWritable, innerOppWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ics->debugEnder(ice))));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ FAIL_IF(ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return true;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoinWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ios->debugEnder(ioe))));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ this->restoreHead();
+ return true;
+}
+
+bool SkOpCoincidence::addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE) {
+ const SkOpPtT* s1 = overS->find(seg1);
+ const SkOpPtT* e1 = overE->find(seg1);
+ FAIL_IF(!s1);
+ FAIL_IF(!e1);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ s1 = overS->find(seg1o);
+ e1 = overE->find(seg1o);
+ FAIL_IF(!s1);
+ FAIL_IF(!e1);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ const SkOpPtT* s2 = overS->find(seg2);
+ const SkOpPtT* e2 = overE->find(seg2);
+ FAIL_IF(!s2);
+ FAIL_IF(!e2);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ s2 = overS->find(seg2o);
+ e2 = overE->find(seg2o);
+ FAIL_IF(!s2);
+ FAIL_IF(!e2);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ if (s1->segment() == s2->segment()) {
+ return true;
+ }
+ if (s1->fT > e1->fT) {
+ using std::swap;
+ swap(s1, e1);
+ swap(s2, e2);
+ }
+ this->add(s1, e1, s2, e2);
+ return true;
+}
+
+bool SkOpCoincidence::contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const {
+ if (this->contains(fHead, seg, opp, oppT)) {
+ return true;
+ }
+ if (this->contains(fTop, seg, opp, oppT)) {
+ return true;
+ }
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const {
+ if (!coin) {
+ return false;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == seg && coin->oppPtTStart()->segment() == opp
+ && between(coin->oppPtTStart()->fT, oppT, coin->oppPtTEnd()->fT)) {
+ return true;
+ }
+ if (coin->oppPtTStart()->segment() == seg && coin->coinPtTStart()->segment() == opp
+ && between(coin->coinPtTStart()->fT, oppT, coin->coinPtTEnd()->fT)) {
+ return true;
+ }
+ } while ((coin = coin->next()));
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const {
+ const SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ swap(coinPtTStart, oppPtTStart);
+ swap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ swap(coinPtTStart, coinPtTEnd);
+ swap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = SkTMin(oppPtTStart->fT, oppPtTEnd->fT);
+ double oppMaxT = SkTMax(oppPtTStart->fT, oppPtTEnd->fT);
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (coinPtTStart->fT < test->coinPtTStart()->fT) {
+ continue;
+ }
+ if (coinPtTEnd->fT > test->coinPtTEnd()->fT) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ if (oppMinT < SkTMin(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ if (oppMaxT > SkTMax(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ return true;
+ } while ((test = test->next()));
+ return false;
+}
+
+void SkOpCoincidence::correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->correctEnds();
+ } while ((coin = coin->next()));
+}
+
+// walk span sets in parallel, moving winding from one to the other
+bool SkOpCoincidence::apply(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ SkOpSpanBase* startSpan = coin->coinPtTStartWritable()->span();
+ FAIL_IF(!startSpan->upCastable());
+ SkOpSpan* start = startSpan->upCast();
+ if (start->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ FAIL_IF(start != start->starter(end));
+ bool flipped = coin->flipped();
+ SkOpSpanBase* oStartBase = (flipped ? coin->oppPtTEndWritable()
+ : coin->oppPtTStartWritable())->span();
+ FAIL_IF(!oStartBase->upCastable());
+ SkOpSpan* oStart = oStartBase->upCast();
+ if (oStart->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* oEnd = (flipped ? coin->oppPtTStart() : coin->oppPtTEnd())->span();
+ SkASSERT(oStart == oStart->starter(oEnd));
+ SkOpSegment* segment = start->segment();
+ SkOpSegment* oSegment = oStart->segment();
+ bool operandSwap = segment->operand() != oSegment->operand();
+ if (flipped) {
+ if (oEnd->deleted()) {
+ continue;
+ }
+ do {
+ SkOpSpanBase* oNext = oStart->next();
+ if (oNext == oEnd) {
+ break;
+ }
+ FAIL_IF(!oNext->upCastable());
+ oStart = oNext->upCast();
+ } while (true);
+ }
+ do {
+ int windValue = start->windValue();
+ int oppValue = start->oppValue();
+ int oWindValue = oStart->windValue();
+ int oOppValue = oStart->oppValue();
+ // winding values are added or subtracted depending on direction and wind type
+ // same or opposite values are summed depending on the operand value
+ int windDiff = operandSwap ? oOppValue : oWindValue;
+ int oWindDiff = operandSwap ? oppValue : windValue;
+ if (!flipped) {
+ windDiff = -windDiff;
+ oWindDiff = -oWindDiff;
+ }
+ bool addToStart = windValue && (windValue > windDiff || (windValue == windDiff
+ && oWindValue <= oWindDiff));
+ if (addToStart ? start->done() : oStart->done()) {
+ addToStart ^= true;
+ }
+ if (addToStart) {
+ if (operandSwap) {
+ using std::swap;
+ swap(oWindValue, oOppValue);
+ }
+ if (flipped) {
+ windValue -= oWindValue;
+ oppValue -= oOppValue;
+ } else {
+ windValue += oWindValue;
+ oppValue += oOppValue;
+ }
+ if (segment->isXor()) {
+ windValue &= 1;
+ }
+ if (segment->oppXor()) {
+ oppValue &= 1;
+ }
+ oWindValue = oOppValue = 0;
+ } else {
+ if (operandSwap) {
+ using std::swap;
+ swap(windValue, oppValue);
+ }
+ if (flipped) {
+ oWindValue -= windValue;
+ oOppValue -= oppValue;
+ } else {
+ oWindValue += windValue;
+ oOppValue += oppValue;
+ }
+ if (oSegment->isXor()) {
+ oWindValue &= 1;
+ }
+ if (oSegment->oppXor()) {
+ oOppValue &= 1;
+ }
+ windValue = oppValue = 0;
+ }
+#if 0 && DEBUG_COINCIDENCE
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", segment->debugID(),
+ start->debugID(), windValue, oppValue);
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", oSegment->debugID(),
+ oStart->debugID(), oWindValue, oOppValue);
+#endif
+ FAIL_IF(windValue <= -1);
+ start->setWindValue(windValue);
+ start->setOppValue(oppValue);
+ FAIL_IF(oWindValue <= -1);
+ oStart->setWindValue(oWindValue);
+ oStart->setOppValue(oOppValue);
+ if (!windValue && !oppValue) {
+ segment->markDone(start);
+ }
+ if (!oWindValue && !oOppValue) {
+ oSegment->markDone(oStart);
+ }
+ SkOpSpanBase* next = start->next();
+ SkOpSpanBase* oNext = flipped ? oStart->prev() : oStart->next();
+ if (next == end) {
+ break;
+ }
+ FAIL_IF(!next->upCastable());
+ start = next->upCast();
+ // if the opposite ran out too soon, just reuse the last span
+ if (!oNext || !oNext->upCastable()) {
+ oNext = oStart;
+ }
+ oStart = oNext->upCast();
+ } while (true);
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// Please keep this in sync with debugRelease()
+bool SkOpCoincidence::release(SkCoincidentSpans* coin, SkCoincidentSpans* remove) {
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ break;
+ }
+ prev = coin;
+ } while ((coin = next));
+ return coin != nullptr;
+}
+
+void SkOpCoincidence::releaseDeleted(SkCoincidentSpans* coin) {
+ if (!coin) {
+ return;
+ }
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin->coinPtTStart()->deleted()) {
+ SkOPASSERT(coin->flipped() ? coin->oppPtTEnd()->deleted() :
+ coin->oppPtTStart()->deleted());
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ } else {
+ SkOPASSERT(coin->flipped() ? !coin->oppPtTEnd()->deleted() :
+ !coin->oppPtTStart()->deleted());
+ prev = coin;
+ }
+ } while ((coin = next));
+}
+
+void SkOpCoincidence::releaseDeleted() {
+ this->releaseDeleted(fHead);
+ this->releaseDeleted(fTop);
+}
+
+void SkOpCoincidence::restoreHead() {
+ SkCoincidentSpans** headPtr = &fHead;
+ while (*headPtr) {
+ headPtr = (*headPtr)->nextPtr();
+ }
+ *headPtr = fTop;
+ fTop = nullptr;
+ // segments may have collapsed in the meantime; remove empty referenced segments
+ headPtr = &fHead;
+ while (*headPtr) {
+ SkCoincidentSpans* test = *headPtr;
+ if (test->coinPtTStart()->segment()->done() || test->oppPtTStart()->segment()->done()) {
+ *headPtr = test->next();
+ continue;
+ }
+ headPtr = (*headPtr)->nextPtr();
+ }
+}
+
+// Please keep this in sync with debugExpand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::expand(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->expand()) {
+ // check to see if multiple spans expanded so they are now identical
+ SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ this->release(fHead, test);
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+bool SkOpCoincidence::findOverlaps(SkOpCoincidence* overlaps DEBUG_COIN_DECLARE_PARAMS()) const {
+ DEBUG_SET_PHASE();
+ overlaps->fHead = overlaps->fTop = nullptr;
+ SkCoincidentSpans* outer = fHead;
+ while (outer) {
+ const SkOpSegment* outerCoin = outer->coinPtTStart()->segment();
+ const SkOpSegment* outerOpp = outer->oppPtTStart()->segment();
+ SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ const SkOpSegment* innerCoin = inner->coinPtTStart()->segment();
+ if (outerCoin == innerCoin) {
+ continue; // both winners are the same segment, so there's no additional overlap
+ }
+ const SkOpSegment* innerOpp = inner->oppPtTStart()->segment();
+ const SkOpPtT* overlapS;
+ const SkOpPtT* overlapE;
+ if ((outerOpp == innerCoin && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(),inner->coinPtTStart(), inner->coinPtTEnd(), &overlapS,
+ &overlapE))
+ || (outerCoin == innerOpp && SkOpPtT::Overlaps(outer->coinPtTStart(),
+ outer->coinPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))
+ || (outerOpp == innerOpp && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))) {
+ if (!overlaps->addOverlap(outerCoin, outerOpp, innerCoin, innerOpp,
+ overlapS, overlapE)) {
+ return false;
+ }
+ }
+ }
+ outer = outer->next();
+ }
+ return true;
+}
+
+void SkOpCoincidence::fixUp(SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkOPASSERT(deleted != kept);
+ if (fHead) {
+ this->fixUp(fHead, deleted, kept);
+ }
+ if (fTop) {
+ this->fixUp(fTop, deleted, kept);
+ }
+}
+
+void SkOpCoincidence::fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkCoincidentSpans* head = coin;
+ do {
+ if (coin->coinPtTStart() == deleted) {
+ if (coin->coinPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTStart(kept);
+ }
+ if (coin->coinPtTEnd() == deleted) {
+ if (coin->coinPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTEnd(kept);
+ }
+ if (coin->oppPtTStart() == deleted) {
+ if (coin->oppPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTStart(kept);
+ }
+ if (coin->oppPtTEnd() == deleted) {
+ if (coin->oppPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTEnd(kept);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Please keep this in sync with debugMark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+bool SkOpCoincidence::mark(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ SkOpSpanBase* startBase = coin->coinPtTStartWritable()->span();
+ FAIL_IF(!startBase->upCastable());
+ SkOpSpan* start = startBase->upCast();
+ FAIL_IF(start->deleted());
+ SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+ SkOPASSERT(!end->deleted());
+ SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+ SkOPASSERT(!oStart->deleted());
+ SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+ FAIL_IF(oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ using std::swap;
+ swap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ FAIL_IF(!oStart->upCastable());
+ start->insertCoincidence(oStart->upCast());
+ end->insertCoinEnd(oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ SkOpSpanBase* next = start;
+ SkOpSpanBase* oNext = oStart;
+ bool ordered;
+ FAIL_IF(!coin->ordered(&ordered));
+ while ((next = next->upCast()->next()) != end) {
+ FAIL_IF(!next->upCastable());
+ FAIL_IF(!next->upCast()->insertCoincidence(oSegment, flipped, ordered));
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ FAIL_IF(!oNext->upCastable());
+ FAIL_IF(!oNext->upCast()->insertCoincidence(segment, flipped, ordered));
+ }
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkCoincidentSpans* coin, SkOpPtT* test) {
+ SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ coin->coinPtTStartWritable()->segment()->markAllDone();
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ coin->oppPtTStartWritable()->segment()->markAllDone();
+ }
+ this->release(head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkOpPtT* test) {
+ markCollapsed(fHead, test);
+ markCollapsed(fTop, test);
+}
+
+bool SkOpCoincidence::Ordered(const SkOpSegment* coinSeg, const SkOpSegment* oppSeg) {
+ if (coinSeg->verb() < oppSeg->verb()) {
+ return true;
+ }
+ if (coinSeg->verb() > oppSeg->verb()) {
+ return false;
+ }
+ int count = (SkPathOpsVerbToPoints(coinSeg->verb()) + 1) * 2;
+ const SkScalar* cPt = &coinSeg->pts()[0].fX;
+ const SkScalar* oPt = &oppSeg->pts()[0].fX;
+ for (int index = 0; index < count; ++index) {
+ if (*cPt < *oPt) {
+ return true;
+ }
+ if (*cPt > *oPt) {
+ return false;
+ }
+ ++cPt;
+ ++oPt;
+ }
+ return true;
+}
+
+bool SkOpCoincidence::overlap(const SkOpPtT* coin1s, const SkOpPtT* coin1e,
+ const SkOpPtT* coin2s, const SkOpPtT* coin2e, double* overS, double* overE) const {
+ SkASSERT(coin1s->segment() == coin2s->segment());
+ *overS = SkTMax(SkTMin(coin1s->fT, coin1e->fT), SkTMin(coin2s->fT, coin2e->fT));
+ *overE = SkTMin(SkTMax(coin1s->fT, coin1e->fT), SkTMax(coin2s->fT, coin2e->fT));
+ return *overS < *overE;
+}
+
+// Commented-out lines keep this in sync with debugRelease()
+void SkOpCoincidence::release(const SkOpSegment* deleted) {
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ this->release(fHead, coin);
+ }
+ } while ((coin = coin->next()));
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.h b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
new file mode 100644
index 0000000000..283c28e105
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpCoincidence_DEFINED
+#define SkOpCoincidence_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkOpPtT;
+class SkOpSpanBase;
+
+class SkCoincidentSpans {
+public:
+ const SkOpPtT* coinPtTEnd() const;
+ const SkOpPtT* coinPtTStart() const;
+
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* coinPtTEndWritable() const { return const_cast<SkOpPtT*>(fCoinPtTEnd); }
+ SkOpPtT* coinPtTStartWritable() const { return const_cast<SkOpPtT*>(fCoinPtTStart); }
+
+ bool collapsed(const SkOpPtT* ) const;
+ bool contains(const SkOpPtT* s, const SkOpPtT* e) const;
+ void correctEnds();
+ void correctOneEnd(const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) );
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ void debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) const) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const char* debugID() const {
+#if DEBUG_COIN
+ return fGlobalState->debugCoinDictEntry().fFunctionName;
+#else
+ return nullptr;
+#endif
+ }
+
+ void debugShow() const;
+#ifdef SK_DEBUG
+ void debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const;
+#endif
+ void dump() const;
+ bool expand();
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+ bool flipped() const { return fOppPtTStart->fT > fOppPtTEnd->fT; }
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fGlobalState; })
+
+ void init(SkDEBUGCODE(SkOpGlobalState* globalState)) {
+ sk_bzero(this, sizeof(*this));
+ SkDEBUGCODE(fGlobalState = globalState);
+ }
+
+ SkCoincidentSpans* next() { return fNext; }
+ const SkCoincidentSpans* next() const { return fNext; }
+ SkCoincidentSpans** nextPtr() { return &fNext; }
+ const SkOpPtT* oppPtTStart() const;
+ const SkOpPtT* oppPtTEnd() const;
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* oppPtTStartWritable() const { return const_cast<SkOpPtT*>(fOppPtTStart); }
+ SkOpPtT* oppPtTEndWritable() const { return const_cast<SkOpPtT*>(fOppPtTEnd); }
+ bool ordered(bool* result) const;
+
+ void set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+
+ void setCoinPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fCoinPtTStart || ptT->fT != fCoinPtTStart->fT);
+ SkASSERT(!fCoinPtTStart || fCoinPtTStart->segment() == ptT->segment());
+ fCoinPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setCoinPtTStart(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fCoinPtTEnd || ptT->fT != fCoinPtTEnd->fT);
+ SkASSERT(!fCoinPtTEnd || fCoinPtTEnd->segment() == ptT->segment());
+ fCoinPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setEnds(const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTEnd) {
+ this->setCoinPtTEnd(coinPtTEnd);
+ this->setOppPtTEnd(oppPtTEnd);
+ }
+
+ void setOppPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fOppPtTStart || ptT->fT != fOppPtTStart->fT);
+ SkASSERT(!fOppPtTStart || fOppPtTStart->segment() == ptT->segment());
+ fOppPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setOppPtTStart(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fOppPtTEnd || ptT->fT != fOppPtTEnd->fT);
+ SkASSERT(!fOppPtTEnd || fOppPtTEnd->segment() == ptT->segment());
+ fOppPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setStarts(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ this->setCoinPtTStart(coinPtTStart);
+ this->setOppPtTStart(oppPtTStart);
+ }
+
+ void setNext(SkCoincidentSpans* next) { fNext = next; }
+
+private:
+ SkCoincidentSpans* fNext;
+ const SkOpPtT* fCoinPtTStart;
+ const SkOpPtT* fCoinPtTEnd;
+ const SkOpPtT* fOppPtTStart;
+ const SkOpPtT* fOppPtTEnd;
+ SkDEBUGCODE(SkOpGlobalState* fGlobalState);
+};
+
+class SkOpCoincidence {
+public:
+ SkOpCoincidence(SkOpGlobalState* globalState)
+ : fHead(nullptr)
+ , fTop(nullptr)
+ , fGlobalState(globalState)
+ , fContinue(false)
+ , fSpanDeleted(false)
+ , fPtAllocated(false)
+ , fCoinExtended(false)
+ , fSpanMerged(false) {
+ globalState->setCoincidence(this);
+ }
+
+ void add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd);
+ bool addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS());
+ bool apply(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const;
+ void correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+
+#if DEBUG_COIN
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const;
+ void debugAddExpanded(SkPathOpsDebug::GlitchLog* ) const;
+ void debugAddMissing(SkPathOpsDebug::GlitchLog* , bool* added) const;
+ void debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ bool* added) const;
+#endif
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugAngle(id), nullptr);
+ }
+
+ void debugCheckBetween() const;
+
+#if DEBUG_COIN
+ void debugCheckValid(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMark(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* coin, const SkOpPtT* test) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* , const SkOpPtT* test) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkCoincidentSpans* ,
+ const SkCoincidentSpans* ) const;
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkOpSegment* ) const;
+#endif
+ void debugShowCoincidence() const;
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSpan(id), nullptr);
+ }
+
+ void debugValidate() const;
+ void dump() const;
+ bool expand(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd);
+ bool findOverlaps(SkOpCoincidence* DEBUG_COIN_DECLARE_PARAMS()) const;
+ void fixUp(SkOpPtT* deleted, const SkOpPtT* kept);
+
+ SkOpGlobalState* globalState() {
+ return fGlobalState;
+ }
+
+ const SkOpGlobalState* globalState() const {
+ return fGlobalState;
+ }
+
+ bool isEmpty() const {
+ return !fHead && !fTop;
+ }
+
+ bool mark(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ void markCollapsed(SkOpPtT* );
+
+ static bool Ordered(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ return Ordered(coinPtTStart->segment(), oppPtTStart->segment());
+ }
+
+ static bool Ordered(const SkOpSegment* coin, const SkOpSegment* opp);
+ void release(const SkOpSegment* );
+ void releaseDeleted();
+
+private:
+ void add(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd) {
+ this->add(const_cast<SkOpPtT*>(coinPtTStart), const_cast<SkOpPtT*>(coinPtTEnd),
+ const_cast<SkOpPtT*>(oppPtTStart), const_cast<SkOpPtT*>(oppPtTEnd));
+ }
+
+ bool addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan);
+ bool addEndMovedSpans(const SkOpPtT* ptT);
+
+ bool addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e));
+ bool addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added);
+ bool addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE);
+ bool checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const;
+ bool contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const;
+ bool contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const;
+#if DEBUG_COIN
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* outer, const SkOpPtT* over1s,
+ const SkOpPtT* over1e) const;
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpan* base, const SkOpSpanBase* testSpan) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* ptT) const;
+#endif
+ void fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept);
+ void markCollapsed(SkCoincidentSpans* head, SkOpPtT* test);
+ bool overlap(const SkOpPtT* coinStart1, const SkOpPtT* coinEnd1,
+ const SkOpPtT* coinStart2, const SkOpPtT* coinEnd2,
+ double* overS, double* overE) const;
+ bool release(SkCoincidentSpans* coin, SkCoincidentSpans* );
+ void releaseDeleted(SkCoincidentSpans* );
+ void restoreHead();
+ // return coinPtT->segment()->t mapped from overS->fT <= t <= overE->fT
+ static double TRange(const SkOpPtT* overS, double t, const SkOpSegment* coinPtT
+ SkDEBUGPARAMS(const SkOpPtT* overE));
+
+ SkCoincidentSpans* fHead;
+ SkCoincidentSpans* fTop;
+ SkOpGlobalState* fGlobalState;
+ bool fContinue;
+ bool fSpanDeleted;
+ bool fPtAllocated;
+ bool fCoinExtended;
+ bool fSpanMerged;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.cpp b/gfx/skia/skia/src/pathops/SkOpContour.cpp
new file mode 100644
index 0000000000..508c324725
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.cpp
@@ -0,0 +1,109 @@
+/*
+* Copyright 2013 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkPathWriter.h"
+#include "src/pathops/SkReduceOrder.h"
+
+void SkOpContour::toPath(SkPathWriter* path) const {
+ if (!this->count()) {
+ return;
+ }
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ path->finishContour();
+ path->assemble();
+}
+
+void SkOpContour::toReversePath(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ path->finishContour();
+ path->assemble();
+}
+
+SkOpSpan* SkOpContour::undoneSpan() {
+ SkOpSegment* testSegment = &fHead;
+ do {
+ if (testSegment->done()) {
+ continue;
+ }
+ return testSegment->undoneSpan();
+ } while ((testSegment = testSegment->next()));
+ fDone = true;
+ return nullptr;
+}
+
+void SkOpContourBuilder::addConic(SkPoint pts[3], SkScalar weight) {
+ this->flush();
+ fContour->addConic(pts, weight);
+}
+
+void SkOpContourBuilder::addCubic(SkPoint pts[4]) {
+ this->flush();
+ fContour->addCubic(pts);
+}
+
+void SkOpContourBuilder::addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight) {
+ if (SkPath::kLine_Verb == verb) {
+ this->addLine(pts);
+ return;
+ }
+ SkArenaAlloc* allocator = fContour->globalState()->allocator();
+ switch (verb) {
+ case SkPath::kQuad_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ this->addQuad(ptStorage);
+ } break;
+ case SkPath::kConic_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ this->addConic(ptStorage, weight);
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(4);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 4);
+ this->addCubic(ptStorage);
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+void SkOpContourBuilder::addLine(const SkPoint pts[2]) {
+ // if the previous line added is the exact opposite, eliminate both
+ if (fLastIsLine) {
+ if (fLastLine[0] == pts[1] && fLastLine[1] == pts[0]) {
+ fLastIsLine = false;
+ return;
+ } else {
+ flush();
+ }
+ }
+ memcpy(fLastLine, pts, sizeof(fLastLine));
+ fLastIsLine = true;
+}
+
+void SkOpContourBuilder::addQuad(SkPoint pts[3]) {
+ this->flush();
+ fContour->addQuad(pts);
+}
+
+void SkOpContourBuilder::flush() {
+ if (!fLastIsLine)
+ return;
+ SkArenaAlloc* allocator = fContour->globalState()->allocator();
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(2);
+ memcpy(ptStorage, fLastLine, sizeof(fLastLine));
+ (void) fContour->addLine(ptStorage);
+ fLastIsLine = false;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.h b/gfx/skia/skia/src/pathops/SkOpContour.h
new file mode 100644
index 0000000000..e5a55fe964
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.h
@@ -0,0 +1,454 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpContour_DEFINED
+#define SkOpContour_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkOpSegment.h"
+
+enum class SkOpRayDir;
+struct SkOpRayHit;
+class SkPathWriter;
+
+class SkOpContour {
+public:
+ SkOpContour() {
+ reset();
+ }
+
+ bool operator<(const SkOpContour& rh) const {
+ return fBounds.fTop == rh.fBounds.fTop
+ ? fBounds.fLeft < rh.fBounds.fLeft
+ : fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ void addConic(SkPoint pts[3], SkScalar weight) {
+ appendSegment().addConic(pts, weight, this);
+ }
+
+ void addCubic(SkPoint pts[4]) {
+ appendSegment().addCubic(pts, this);
+ }
+
+ SkOpSegment* addLine(SkPoint pts[2]) {
+ SkASSERT(pts[0] != pts[1]);
+ return appendSegment().addLine(pts, this);
+ }
+
+ void addQuad(SkPoint pts[3]) {
+ appendSegment().addQuad(pts, this);
+ }
+
+ SkOpSegment& appendSegment() {
+ SkOpSegment* result = fCount++ ? this->globalState()->allocator()->make<SkOpSegment>()
+ : &fHead;
+ result->setPrev(fTail);
+ if (fTail) {
+ fTail->setNext(result);
+ }
+ fTail = result;
+ return *result;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void calcAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->calcAngles();
+ } while ((segment = segment->next()));
+ }
+
+ void complete() {
+ setBounds();
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ int debugIndent() const {
+ return SkDEBUGRELEASE(fDebugIndent, 0);
+ }
+
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugAngle(id), nullptr);
+ }
+
+ const SkOpCoincidence* debugCoincidence() const {
+ return this->globalState()->coincidence();
+ }
+
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans(SkString* str) {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->debugShowActiveSpans(str);
+ } while ((segment = segment->next()));
+ }
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSpan(id), nullptr);
+ }
+
+ SkOpGlobalState* globalState() const {
+ return fState;
+ }
+
+ void debugValidate() const {
+#if DEBUG_VALIDATE
+ const SkOpSegment* segment = &fHead;
+ const SkOpSegment* prior = nullptr;
+ do {
+ segment->debugValidate();
+ SkASSERT(segment->prev() == prior);
+ prior = segment;
+ } while ((segment = segment->next()));
+ SkASSERT(prior == fTail);
+#endif
+ }
+
+ bool done() const {
+ return fDone;
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpContours() const;
+ void dumpContoursAll() const;
+ void dumpContoursAngles() const;
+ void dumpContoursPts() const;
+ void dumpContoursPt(int segmentID) const;
+ void dumpContoursSegment(int segmentID) const;
+ void dumpContoursSpan(int segmentID) const;
+ void dumpContoursSpans() const;
+ void dumpPt(int ) const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsX(const char* prefix) const;
+ void dumpSegment(int ) const;
+ void dumpSegments(const char* prefix = "seg", SkPathOp op = (SkPathOp) -1) const;
+ void dumpSpan(int ) const;
+ void dumpSpans() const;
+
+ const SkPoint& end() const {
+ return fTail->pts()[SkPathOpsVerbToPoints(fTail->verb())];
+ }
+
+ SkOpSpan* findSortableTop(SkOpContour* );
+
+ SkOpSegment* first() {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ const SkOpSegment* first() const {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ void indentDump() const {
+ SkDEBUGCODE(fDebugIndent += 2);
+ }
+
+ void init(SkOpGlobalState* globalState, bool operand, bool isXor) {
+ fState = globalState;
+ fOperand = operand;
+ fXor = isXor;
+ SkDEBUGCODE(fID = globalState->nextContourID());
+ }
+
+ int isCcw() const {
+ return fCcw;
+ }
+
+ bool isXor() const {
+ return fXor;
+ }
+
+ void joinSegments() {
+ SkOpSegment* segment = &fHead;
+ SkOpSegment* next;
+ do {
+ next = segment->next();
+ segment->joinEnds(next ? next : &fHead);
+ } while ((segment = next));
+ }
+
+ void markAllDone() {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->markAllDone();
+ } while ((segment = segment->next()));
+ }
+
+ // Please keep this aligned with debugMissingCoincidence()
+ bool missingCoincidence() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ bool result = false;
+ do {
+ if (segment->missingCoincidence()) {
+ result = true;
+ }
+ segment = segment->next();
+ } while (segment);
+ return result;
+ }
+
+ bool moveMultiples() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ if (!segment->moveMultiples()) {
+ return false;
+ }
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ bool moveNearby() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ if (!segment->moveNearby()) {
+ return false;
+ }
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ SkOpContour* next() {
+ return fNext;
+ }
+
+ const SkOpContour* next() const {
+ return fNext;
+ }
+
+ bool operand() const {
+ return fOperand;
+ }
+
+ bool oppXor() const {
+ return fOppXor;
+ }
+
+ void outdentDump() const {
+ SkDEBUGCODE(fDebugIndent -= 2);
+ }
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkArenaAlloc*);
+
+ void reset() {
+ fTail = nullptr;
+ fNext = nullptr;
+ fCount = 0;
+ fDone = false;
+ SkDEBUGCODE(fBounds.setLTRB(SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin));
+ SkDEBUGCODE(fFirstSorted = -1);
+ SkDEBUGCODE(fDebugIndent = 0);
+ }
+
+ void resetReverse() {
+ SkOpContour* next = this;
+ do {
+ if (!next->count()) {
+ continue;
+ }
+ next->fCcw = -1;
+ next->fReverse = false;
+ } while ((next = next->next()));
+ }
+
+ bool reversed() const {
+ return fReverse;
+ }
+
+ void setBounds() {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ fBounds = segment->bounds();
+ while ((segment = segment->next())) {
+ fBounds.add(segment->bounds());
+ }
+ }
+
+ void setCcw(int ccw) {
+ fCcw = ccw;
+ }
+
+ void setGlobalState(SkOpGlobalState* state) {
+ fState = state;
+ }
+
+ void setNext(SkOpContour* contour) {
+// SkASSERT(!fNext == !!contour);
+ fNext = contour;
+ }
+
+ void setOperand(bool isOp) {
+ fOperand = isOp;
+ }
+
+ void setOppXor(bool isOppXor) {
+ fOppXor = isOppXor;
+ }
+
+ void setReverse() {
+ fReverse = true;
+ }
+
+ void setXor(bool isXor) {
+ fXor = isXor;
+ }
+
+ bool sortAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ FAIL_IF(!segment->sortAngles());
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ const SkPoint& start() const {
+ return fHead.pts()[0];
+ }
+
+ void toPartialBackward(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ }
+
+ void toPartialForward(SkPathWriter* path) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ }
+
+ void toReversePath(SkPathWriter* path) const;
+ void toPath(SkPathWriter* path) const;
+ SkOpSpan* undoneSpan();
+
+protected:
+ SkOpGlobalState* fState;
+ SkOpSegment fHead;
+ SkOpSegment* fTail;
+ SkOpContour* fNext;
+ SkPathOpsBounds fBounds;
+ int fCcw;
+ int fCount;
+ int fFirstSorted;
+ bool fDone; // set by find top segment
+ bool fOperand; // true for the second argument to a binary operator
+ bool fReverse; // true if contour should be reverse written to path (used only by fix winding)
+ bool fXor; // set if original path had even-odd fill
+ bool fOppXor; // set if opposite path had even-odd fill
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(mutable int fDebugIndent);
+};
+
+class SkOpContourHead : public SkOpContour {
+public:
+ SkOpContour* appendContour() {
+ SkOpContour* contour = this->globalState()->allocator()->make<SkOpContour>();
+ contour->setNext(nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next())) {
+ prev = next;
+ }
+ prev->setNext(contour);
+ return contour;
+ }
+
+ void joinAllSegments() {
+ SkOpContour* next = this;
+ do {
+ if (!next->count()) {
+ continue;
+ }
+ next->joinSegments();
+ } while ((next = next->next()));
+ }
+
+ void remove(SkOpContour* contour) {
+ if (contour == this) {
+ SkASSERT(this->count() == 0);
+ return;
+ }
+ SkASSERT(contour->next() == nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next()) != contour) {
+ SkASSERT(next);
+ prev = next;
+ }
+ SkASSERT(prev);
+ prev->setNext(nullptr);
+ }
+
+};
+
+class SkOpContourBuilder {
+public:
+ SkOpContourBuilder(SkOpContour* contour)
+ : fContour(contour)
+ , fLastIsLine(false) {
+ }
+
+ void addConic(SkPoint pts[3], SkScalar weight);
+ void addCubic(SkPoint pts[4]);
+ void addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight = 1);
+ void addLine(const SkPoint pts[2]);
+ void addQuad(SkPoint pts[3]);
+ void flush();
+ SkOpContour* contour() { return fContour; }
+ void setContour(SkOpContour* contour) { flush(); fContour = contour; }
+protected:
+ SkOpContour* fContour;
+ SkPoint fLastLine[2];
+ bool fLastIsLine;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
new file mode 100644
index 0000000000..61e49635f4
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsCubic.h"
+
+static bool rotate(const SkDCubic& cubic, int zero, int index, SkDCubic& rotPath) {
+ double dy = cubic[index].fY - cubic[zero].fY;
+ double dx = cubic[index].fX - cubic[zero].fX;
+ if (approximately_zero(dy)) {
+ if (approximately_zero(dx)) {
+ return false;
+ }
+ rotPath = cubic;
+ if (dy) {
+ rotPath[index].fY = cubic[zero].fY;
+ int mask = other_two(index, zero);
+ int side1 = index ^ mask;
+ int side2 = zero ^ mask;
+ if (approximately_equal(cubic[side1].fY, cubic[zero].fY)) {
+ rotPath[side1].fY = cubic[zero].fY;
+ }
+ if (approximately_equal(cubic[side2].fY, cubic[zero].fY)) {
+ rotPath[side2].fY = cubic[zero].fY;
+ }
+ }
+ return true;
+ }
+ for (int index = 0; index < 4; ++index) {
+ rotPath[index].fX = cubic[index].fX * dx + cubic[index].fY * dy;
+ rotPath[index].fY = cubic[index].fY * dx - cubic[index].fX * dy;
+ }
+ return true;
+}
+
+
+// Returns 0 if negative, 1 if zero, 2 if positive
+static int side(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Given a cubic, find the convex hull described by the end and control points.
+ The hull may have 3 or 4 points. Cubics that degenerate into a point or line
+ are not considered.
+
+ The hull is computed by assuming that three points, if unique and non-linear,
+ form a triangle. The fourth point may replace one of the first three, may be
+ discarded if in the triangle or on an edge, or may be inserted between any of
+ the three to form a convex quadralateral.
+
+ The indices returned in order describe the convex hull.
+*/
+int SkDCubic::convexHull(char order[4]) const {
+ size_t index;
+ // find top point
+ size_t yMin = 0;
+ for (index = 1; index < 4; ++index) {
+ if (fPts[yMin].fY > fPts[index].fY || (fPts[yMin].fY == fPts[index].fY
+ && fPts[yMin].fX > fPts[index].fX)) {
+ yMin = index;
+ }
+ }
+ order[0] = yMin;
+ int midX = -1;
+ int backupYMin = -1;
+ for (int pass = 0; pass < 2; ++pass) {
+ for (index = 0; index < 4; ++index) {
+ if (index == yMin) {
+ continue;
+ }
+ // rotate line from (yMin, index) to axis
+ // see if remaining two points are both above or below
+ // use this to find mid
+ int mask = other_two(yMin, index);
+ int side1 = yMin ^ mask;
+ int side2 = index ^ mask;
+ SkDCubic rotPath;
+ if (!rotate(*this, yMin, index, rotPath)) { // ! if cbc[yMin]==cbc[idx]
+ order[1] = side1;
+ order[2] = side2;
+ return 3;
+ }
+ int sides = side(rotPath[side1].fY - rotPath[yMin].fY);
+ sides ^= side(rotPath[side2].fY - rotPath[yMin].fY);
+ if (sides == 2) { // '2' means one remaining point <0, one >0
+ if (midX >= 0) {
+ // one of the control points is equal to an end point
+ order[0] = 0;
+ order[1] = 3;
+ if (fPts[1] == fPts[0] || fPts[1] == fPts[3]) {
+ order[2] = 2;
+ return 3;
+ }
+ if (fPts[2] == fPts[0] || fPts[2] == fPts[3]) {
+ order[2] = 1;
+ return 3;
+ }
+ // one of the control points may be very nearly but not exactly equal --
+ double dist1_0 = fPts[1].distanceSquared(fPts[0]);
+ double dist1_3 = fPts[1].distanceSquared(fPts[3]);
+ double dist2_0 = fPts[2].distanceSquared(fPts[0]);
+ double dist2_3 = fPts[2].distanceSquared(fPts[3]);
+ double smallest1distSq = SkTMin(dist1_0, dist1_3);
+ double smallest2distSq = SkTMin(dist2_0, dist2_3);
+ if (approximately_zero(SkTMin(smallest1distSq, smallest2distSq))) {
+ order[2] = smallest1distSq < smallest2distSq ? 2 : 1;
+ return 3;
+ }
+ }
+ midX = index;
+ } else if (sides == 0) { // '0' means both to one side or the other
+ backupYMin = index;
+ }
+ }
+ if (midX >= 0) {
+ break;
+ }
+ if (backupYMin < 0) {
+ break;
+ }
+ yMin = backupYMin;
+ backupYMin = -1;
+ }
+ if (midX < 0) {
+ midX = yMin ^ 3; // choose any other point
+ }
+ int mask = other_two(yMin, midX);
+ int least = yMin ^ mask;
+ int most = midX ^ mask;
+ order[0] = yMin;
+ order[1] = least;
+
+ // see if mid value is on same side of line (least, most) as yMin
+ SkDCubic midPath;
+ if (!rotate(*this, least, most, midPath)) { // ! if cbc[least]==cbc[most]
+ order[2] = midX;
+ return 3;
+ }
+ int midSides = side(midPath[yMin].fY - midPath[least].fY);
+ midSides ^= side(midPath[midX].fY - midPath[least].fY);
+ if (midSides != 2) { // if mid point is not between
+ order[2] = most;
+ return 3; // result is a triangle
+ }
+ order[2] = midX;
+ order[3] = most;
+ return 4; // result is a quadralateral
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
new file mode 100644
index 0000000000..3efb0e0326
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkGeometry.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkReduceOrder.h"
+
+void SkOpEdgeBuilder::init() {
+ fOperand = false;
+ fXorMask[0] = fXorMask[1] = (fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ fUnparseable = false;
+ fSecondHalf = preFetch();
+}
+
+// very tiny points cause numerical instability : don't allow them
+static void force_small_to_zero(SkPoint* pt) {
+ if (SkScalarAbs(pt->fX) < FLT_EPSILON_ORDERABLE_ERR) {
+ pt->fX = 0;
+ }
+ if (SkScalarAbs(pt->fY) < FLT_EPSILON_ORDERABLE_ERR) {
+ pt->fY = 0;
+ }
+}
+
+static bool can_add_curve(SkPath::Verb verb, SkPoint* curve) {
+ if (SkPath::kMove_Verb == verb) {
+ return false;
+ }
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ force_small_to_zero(&curve[index]);
+ }
+ return SkPath::kLine_Verb != verb || !SkDPoint::ApproximatelyEqual(curve[0], curve[1]);
+}
+
+void SkOpEdgeBuilder::addOperand(const SkPath& path) {
+ SkASSERT(fPathVerbs.count() > 0 && fPathVerbs.end()[-1] == SkPath::kDone_Verb);
+ fPathVerbs.pop();
+ fPath = &path;
+ fXorMask[1] = (fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ preFetch();
+}
+
+bool SkOpEdgeBuilder::finish() {
+ fOperand = false;
+ if (fUnparseable || !walk()) {
+ return false;
+ }
+ complete();
+ SkOpContour* contour = fContourBuilder.contour();
+ if (contour && !contour->count()) {
+ fContoursHead->remove(contour);
+ }
+ return true;
+}
+
+void SkOpEdgeBuilder::closeContour(const SkPoint& curveEnd, const SkPoint& curveStart) {
+ if (!SkDPoint::ApproximatelyEqual(curveEnd, curveStart)) {
+ *fPathVerbs.append() = SkPath::kLine_Verb;
+ *fPathPts.append() = curveStart;
+ } else {
+ int verbCount = fPathVerbs.count();
+ int ptsCount = fPathPts.count();
+ if (SkPath::kLine_Verb == fPathVerbs[verbCount - 1]
+ && fPathPts[ptsCount - 2] == curveStart) {
+ fPathVerbs.pop();
+ fPathPts.pop();
+ } else {
+ fPathPts[ptsCount - 1] = curveStart;
+ }
+ }
+ *fPathVerbs.append() = SkPath::kClose_Verb;
+}
+
+int SkOpEdgeBuilder::preFetch() {
+ if (!fPath->isFinite()) {
+ fUnparseable = true;
+ return 0;
+ }
+ SkPath::RawIter iter(*fPath);
+ SkPoint curveStart;
+ SkPoint curve[4];
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ bool lastCurve = false;
+ do {
+ verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = verb;
+ force_small_to_zero(&pts[0]);
+ *fPathPts.append() = pts[0];
+ curveStart = curve[0] = pts[0];
+ lastCurve = false;
+ continue;
+ case SkPath::kLine_Verb:
+ force_small_to_zero(&pts[1]);
+ if (SkDPoint::ApproximatelyEqual(curve[0], pts[1])) {
+ uint8_t lastVerb = fPathVerbs.top();
+ if (lastVerb != SkPath::kLine_Verb && lastVerb != SkPath::kMove_Verb) {
+ fPathPts.top() = curve[0] = pts[1];
+ }
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kQuad_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ verb = SkReduceOrder::Quad(curve, pts);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kConic_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ verb = SkReduceOrder::Quad(curve, pts);
+ if (SkPath::kQuad_Verb == verb && 1 != iter.conicWeight()) {
+ verb = SkPath::kConic_Verb;
+ } else if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ force_small_to_zero(&pts[3]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ curve[3] = pts[3];
+ verb = SkReduceOrder::Cubic(curve, pts);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kClose_Verb:
+ closeContour(curve[0], curveStart);
+ lastCurve = false;
+ continue;
+ case SkPath::kDone_Verb:
+ continue;
+ }
+ *fPathVerbs.append() = verb;
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ fPathPts.append(ptCount, &pts[1]);
+ if (verb == SkPath::kConic_Verb) {
+ *fWeights.append() = iter.conicWeight();
+ }
+ curve[0] = pts[ptCount];
+ lastCurve = true;
+ } while (verb != SkPath::kDone_Verb);
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = SkPath::kDone_Verb;
+ return fPathVerbs.count() - 1;
+}
+
+bool SkOpEdgeBuilder::close() {
+ complete();
+ return true;
+}
+
+bool SkOpEdgeBuilder::walk() {
+ uint8_t* verbPtr = fPathVerbs.begin();
+ uint8_t* endOfFirstHalf = &verbPtr[fSecondHalf];
+ SkPoint* pointsPtr = fPathPts.begin();
+ SkScalar* weightPtr = fWeights.begin();
+ SkPath::Verb verb;
+ SkOpContour* contour = fContourBuilder.contour();
+ int moveToPtrBump = 0;
+ while ((verb = (SkPath::Verb) *verbPtr) != SkPath::kDone_Verb) {
+ if (verbPtr == endOfFirstHalf) {
+ fOperand = true;
+ }
+ verbPtr++;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (contour && contour->count()) {
+ if (fAllowOpenContours) {
+ complete();
+ } else if (!close()) {
+ return false;
+ }
+ }
+ if (!contour) {
+ fContourBuilder.setContour(contour = fContoursHead->appendContour());
+ }
+ contour->init(fGlobalState, fOperand,
+ fXorMask[fOperand] == kEvenOdd_PathOpsMask);
+ pointsPtr += moveToPtrBump;
+ moveToPtrBump = 1;
+ continue;
+ case SkPath::kLine_Verb:
+ fContourBuilder.addLine(pointsPtr);
+ break;
+ case SkPath::kQuad_Verb:
+ {
+ SkVector v1 = pointsPtr[1] - pointsPtr[0];
+ SkVector v2 = pointsPtr[2] - pointsPtr[1];
+ if (v1.dot(v2) < 0) {
+ SkPoint pair[5];
+ if (SkChopQuadAtMaxCurvature(pointsPtr, pair) == 1) {
+ goto addOneQuad;
+ }
+ if (!SkScalarsAreFinite(&pair[0].fX, SK_ARRAY_COUNT(pair) * 2)) {
+ return false;
+ }
+ for (unsigned index = 0; index < SK_ARRAY_COUNT(pair); ++index) {
+ force_small_to_zero(&pair[index]);
+ }
+ SkPoint cStorage[2][2];
+ SkPath::Verb v1 = SkReduceOrder::Quad(&pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Quad(&pair[2], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? &pair[0] : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? &pair[2] : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fContourBuilder.addCurve(v1, curve1);
+ fContourBuilder.addCurve(v2, curve2);
+ break;
+ }
+ }
+ }
+ addOneQuad:
+ fContourBuilder.addQuad(pointsPtr);
+ break;
+ case SkPath::kConic_Verb: {
+ SkVector v1 = pointsPtr[1] - pointsPtr[0];
+ SkVector v2 = pointsPtr[2] - pointsPtr[1];
+ SkScalar weight = *weightPtr++;
+ if (v1.dot(v2) < 0) {
+ // FIXME: max curvature for conics hasn't been implemented; use placeholder
+ SkScalar maxCurvature = SkFindQuadMaxCurvature(pointsPtr);
+ if (0 < maxCurvature && maxCurvature < 1) {
+ SkConic conic(pointsPtr, weight);
+ SkConic pair[2];
+ if (!conic.chopAt(maxCurvature, pair)) {
+ // if result can't be computed, use original
+ fContourBuilder.addConic(pointsPtr, weight);
+ break;
+ }
+ SkPoint cStorage[2][3];
+ SkPath::Verb v1 = SkReduceOrder::Conic(pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Conic(pair[1], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? pair[0].fPts : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? pair[1].fPts : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fContourBuilder.addCurve(v1, curve1, pair[0].fW);
+ fContourBuilder.addCurve(v2, curve2, pair[1].fW);
+ break;
+ }
+ }
+ }
+ fContourBuilder.addConic(pointsPtr, weight);
+ } break;
+ case SkPath::kCubic_Verb:
+ {
+ // Split complex cubics (such as self-intersecting curves or
+ // ones with difficult curvature) in two before proceeding.
+ // This can be required for intersection to succeed.
+ SkScalar splitT[3];
+ int breaks = SkDCubic::ComplexBreak(pointsPtr, splitT);
+ if (!breaks) {
+ fContourBuilder.addCubic(pointsPtr);
+ break;
+ }
+ SkASSERT(breaks <= (int) SK_ARRAY_COUNT(splitT));
+ struct Splitsville {
+ double fT[2];
+ SkPoint fPts[4];
+ SkPoint fReduced[4];
+ SkPath::Verb fVerb;
+ bool fCanAdd;
+ } splits[4];
+ SkASSERT(SK_ARRAY_COUNT(splits) == SK_ARRAY_COUNT(splitT) + 1);
+ SkTQSort(splitT, &splitT[breaks - 1]);
+ for (int index = 0; index <= breaks; ++index) {
+ Splitsville* split = &splits[index];
+ split->fT[0] = index ? splitT[index - 1] : 0;
+ split->fT[1] = index < breaks ? splitT[index] : 1;
+ SkDCubic part = SkDCubic::SubDivide(pointsPtr, split->fT[0], split->fT[1]);
+ if (!part.toFloatPoints(split->fPts)) {
+ return false;
+ }
+ split->fVerb = SkReduceOrder::Cubic(split->fPts, split->fReduced);
+ SkPoint* curve = SkPath::kCubic_Verb == verb
+ ? split->fPts : split->fReduced;
+ split->fCanAdd = can_add_curve(split->fVerb, curve);
+ }
+ for (int index = 0; index <= breaks; ++index) {
+ Splitsville* split = &splits[index];
+ if (!split->fCanAdd) {
+ continue;
+ }
+ int prior = index;
+ while (prior > 0 && !splits[prior - 1].fCanAdd) {
+ --prior;
+ }
+ if (prior < index) {
+ split->fT[0] = splits[prior].fT[0];
+ split->fPts[0] = splits[prior].fPts[0];
+ }
+ int next = index;
+ int breakLimit = SkTMin(breaks, (int) SK_ARRAY_COUNT(splits) - 1);
+ while (next < breakLimit && !splits[next + 1].fCanAdd) {
+ ++next;
+ }
+ if (next > index) {
+ split->fT[1] = splits[next].fT[1];
+ split->fPts[3] = splits[next].fPts[3];
+ }
+ if (prior < index || next > index) {
+ split->fVerb = SkReduceOrder::Cubic(split->fPts, split->fReduced);
+ }
+ SkPoint* curve = SkPath::kCubic_Verb == split->fVerb
+ ? split->fPts : split->fReduced;
+ if (!can_add_curve(split->fVerb, curve)) {
+ return false;
+ }
+ fContourBuilder.addCurve(split->fVerb, curve);
+ }
+ }
+ break;
+ case SkPath::kClose_Verb:
+ SkASSERT(contour);
+ if (!close()) {
+ return false;
+ }
+ contour = nullptr;
+ continue;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return false;
+ }
+ SkASSERT(contour);
+ if (contour->count()) {
+ contour->debugValidate();
+ }
+ pointsPtr += SkPathOpsVerbToPoints(verb);
+ }
+ fContourBuilder.flush();
+ if (contour && contour->count() &&!fAllowOpenContours && !close()) {
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
new file mode 100644
index 0000000000..67dff910dc
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpEdgeBuilder_DEFINED
+#define SkOpEdgeBuilder_DEFINED
+
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkPathWriter.h"
+
+class SkOpEdgeBuilder {
+public:
+ SkOpEdgeBuilder(const SkPathWriter& path, SkOpContourHead* contours2,
+ SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(path.nativePath())
+ , fContourBuilder(contours2)
+ , fContoursHead(contours2)
+ , fAllowOpenContours(true) {
+ init();
+ }
+
+ SkOpEdgeBuilder(const SkPath& path, SkOpContourHead* contours2, SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(&path)
+ , fContourBuilder(contours2)
+ , fContoursHead(contours2)
+ , fAllowOpenContours(false) {
+ init();
+ }
+
+ void addOperand(const SkPath& path);
+
+ void complete() {
+ fContourBuilder.flush();
+ SkOpContour* contour = fContourBuilder.contour();
+ if (contour && contour->count()) {
+ contour->complete();
+ fContourBuilder.setContour(nullptr);
+ }
+ }
+
+ bool finish();
+
+ const SkOpContour* head() const {
+ return fContoursHead;
+ }
+
+ void init();
+ bool unparseable() const { return fUnparseable; }
+ SkPathOpsMask xorMask() const { return fXorMask[fOperand]; }
+
+private:
+ void closeContour(const SkPoint& curveEnd, const SkPoint& curveStart);
+ bool close();
+ int preFetch();
+ bool walk();
+
+ SkOpGlobalState* fGlobalState;
+ const SkPath* fPath;
+ SkTDArray<SkPoint> fPathPts;
+ SkTDArray<SkScalar> fWeights;
+ SkTDArray<uint8_t> fPathVerbs;
+ SkOpContourBuilder fContourBuilder;
+ SkOpContourHead* fContoursHead;
+ SkPathOpsMask fXorMask[2];
+ int fSecondHalf;
+ bool fOperand;
+ bool fAllowOpenContours;
+ bool fUnparseable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.cpp b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
new file mode 100644
index 0000000000..66fa6ee5ff
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
@@ -0,0 +1,1781 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkPointPriv.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <utility>
+
+/*
+After computing raw intersections, post process all segments to:
+- find small collections of points that can be collapsed to a single point
+- find missing intersections to resolve differences caused by different algorithms
+
+Consider segments containing tiny or small intervals. Consider coincident segments
+because coincidence finds intersections through distance measurement that non-coincident
+intersection tests cannot.
+ */
+
+#define F (false) // discard the edge
+#define T (true) // keep the edge
+
+static const bool gUnaryActiveEdge[2][2] = {
+// from=0 from=1
+// to=0,1 to=0,1
+ {F, T}, {T, F},
+};
+
+static const bool gActiveEdge[kXOR_SkPathOp + 1][2][2][2][2] = {
+// miFrom=0 miFrom=1
+// miTo=0 miTo=1 miTo=0 miTo=1
+// suFrom=0 1 suFrom=0 1 suFrom=0 1 suFrom=0 1
+// suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1
+ {{{{F, F}, {F, F}}, {{T, F}, {T, F}}}, {{{T, T}, {F, F}}, {{F, T}, {T, F}}}}, // mi - su
+ {{{{F, F}, {F, F}}, {{F, T}, {F, T}}}, {{{F, F}, {T, T}}, {{F, T}, {T, F}}}}, // mi & su
+ {{{{F, T}, {T, F}}, {{T, T}, {F, F}}}, {{{T, F}, {T, F}}, {{F, F}, {F, F}}}}, // mi | su
+ {{{{F, T}, {T, F}}, {{T, F}, {F, T}}}, {{{T, F}, {F, T}}, {{F, T}, {T, F}}}}, // mi ^ su
+};
+
+#undef F
+#undef T
+
+SkOpAngle* SkOpSegment::activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ if (SkOpAngle* result = activeAngleInner(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ if (SkOpAngle* result = activeAngleOther(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpSpan* upSpan = start->upCastable();
+ if (upSpan) {
+ if (upSpan->windValue() || upSpan->oppValue()) {
+ SkOpSpanBase* next = upSpan->next();
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = next;
+ }
+ if (!upSpan->done()) {
+ if (upSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, next);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(upSpan->done());
+ }
+ }
+ SkOpSpan* downSpan = start->prev();
+ // edge leading into junction
+ if (downSpan) {
+ if (downSpan->windValue() || downSpan->oppValue()) {
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = downSpan;
+ }
+ if (!downSpan->done()) {
+ if (downSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, downSpan);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(downSpan->done());
+ }
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpPtT* oPtT = start->ptT()->next();
+ SkOpSegment* other = oPtT->segment();
+ SkOpSpanBase* oSpan = oPtT->span();
+ return other->activeAngleInner(oSpan, startPtr, endPtr, done);
+}
+
+bool SkOpSegment::activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op) {
+ int sumMiWinding = this->updateWinding(end, start);
+ int sumSuWinding = this->updateOppWinding(end, start);
+#if DEBUG_LIMIT_WIND_SUM
+ SkASSERT(abs(sumMiWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(abs(sumSuWinding) <= DEBUG_LIMIT_WIND_SUM);
+#endif
+ if (this->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ return this->activeOp(xorMiMask, xorSuMask, start, end, op, &sumMiWinding, &sumSuWinding);
+}
+
+bool SkOpSegment::activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end,
+ SkPathOp op, int* sumMiWinding, int* sumSuWinding) {
+ int maxWinding, sumWinding, oppMaxWinding, oppSumWinding;
+ this->setUpWindings(start, end, sumMiWinding, sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ bool miFrom;
+ bool miTo;
+ bool suFrom;
+ bool suTo;
+ if (operand()) {
+ miFrom = (oppMaxWinding & xorMiMask) != 0;
+ miTo = (oppSumWinding & xorMiMask) != 0;
+ suFrom = (maxWinding & xorSuMask) != 0;
+ suTo = (sumWinding & xorSuMask) != 0;
+ } else {
+ miFrom = (maxWinding & xorMiMask) != 0;
+ miTo = (sumWinding & xorMiMask) != 0;
+ suFrom = (oppMaxWinding & xorSuMask) != 0;
+ suTo = (oppSumWinding & xorSuMask) != 0;
+ }
+ bool result = gActiveEdge[op][miFrom][miTo][suFrom][suTo];
+#if DEBUG_ACTIVE_OP
+ SkDebugf("%s id=%d t=%1.9g tEnd=%1.9g op=%s miFrom=%d miTo=%d suFrom=%d suTo=%d result=%d\n",
+ __FUNCTION__, debugID(), start->t(), end->t(),
+ SkPathOpsDebug::kPathOpStr[op], miFrom, miTo, suFrom, suTo, result);
+#endif
+ return result;
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ int sumWinding = updateWinding(end, start);
+ return activeWinding(start, end, &sumWinding);
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding) {
+ int maxWinding;
+ setUpWinding(start, end, &maxWinding, sumWinding);
+ bool from = maxWinding != 0;
+ bool to = *sumWinding != 0;
+ bool result = gUnaryActiveEdge[from][to];
+ return result;
+}
+
+bool SkOpSegment::addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkPathWriter* path) const {
+ const SkOpSpan* spanStart = start->starter(end);
+ FAIL_IF(spanStart->alreadyAdded());
+ const_cast<SkOpSpan*>(spanStart)->markAdded();
+ SkDCurveSweep curvePart;
+ start->segment()->subDivide(start, end, &curvePart.fCurve);
+ curvePart.setCurveHullSweep(fVerb);
+ SkPath::Verb verb = curvePart.isCurve() ? fVerb : SkPath::kLine_Verb;
+ path->deferredMove(start->ptT());
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ FAIL_IF(!path->deferredLine(end->ptT()));
+ break;
+ case SkPath::kQuad_Verb:
+ path->quadTo(curvePart.fCurve.fQuad[1].asSkPoint(), end->ptT());
+ break;
+ case SkPath::kConic_Verb:
+ path->conicTo(curvePart.fCurve.fConic[1].asSkPoint(), end->ptT(),
+ curvePart.fCurve.fConic.fWeight);
+ break;
+ case SkPath::kCubic_Verb:
+ path->cubicTo(curvePart.fCurve.fCubic[1].asSkPoint(),
+ curvePart.fCurve.fCubic[2].asSkPoint(), end->ptT());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return true;
+}
+
+const SkOpPtT* SkOpSegment::existing(double t, const SkOpSegment* opp) const {
+ const SkOpSpanBase* test = &fHead;
+ const SkOpPtT* testPtT;
+ SkPoint pt = this->ptAtT(t);
+ do {
+ testPtT = test->ptT();
+ if (testPtT->fT == t) {
+ break;
+ }
+ if (!this->match(testPtT, this, t, pt)) {
+ if (t < testPtT->fT) {
+ return nullptr;
+ }
+ continue;
+ }
+ if (!opp) {
+ return testPtT;
+ }
+ const SkOpPtT* loop = testPtT->next();
+ while (loop != testPtT) {
+ if (loop->segment() == this && loop->fT == t && loop->fPt == pt) {
+ goto foundMatch;
+ }
+ loop = loop->next();
+ }
+ return nullptr;
+ } while ((test = test->upCast()->next()));
+foundMatch:
+ return opp && !test->contains(opp) ? nullptr : testPtT;
+}
+
+// break the span so that the coincident part does not change the angle of the remainder
+bool SkOpSegment::addExpanded(double newT, const SkOpSpanBase* test, bool* startOver) {
+ if (this->contains(newT)) {
+ return true;
+ }
+ this->globalState()->resetAllocatedOpSpan();
+ FAIL_IF(!between(0, newT, 1));
+ SkOpPtT* newPtT = this->addT(newT);
+ *startOver |= this->globalState()->allocatedOpSpan();
+ if (!newPtT) {
+ return false;
+ }
+ newPtT->fPt = this->ptAtT(newT);
+ SkOpPtT* oppPrev = test->ptT()->oppPrev(newPtT);
+ if (oppPrev) {
+ // const cast away to change linked list; pt/t values stays unchanged
+ SkOpSpanBase* writableTest = const_cast<SkOpSpanBase*>(test);
+ writableTest->mergeMatches(newPtT->span());
+ writableTest->ptT()->addOpp(newPtT, oppPrev);
+ writableTest->checkForCollapsedCoincidence();
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddT()
+SkOpPtT* SkOpSegment::addT(double t, const SkPoint& pt) {
+ debugValidate();
+ SkOpSpanBase* spanBase = &fHead;
+ do {
+ SkOpPtT* result = spanBase->ptT();
+ if (t == result->fT || (!zero_or_one(t) && this->match(result, this, t, pt))) {
+ spanBase->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev);
+ // marks in global state that new op span has been allocated
+ SkOpSpan* span = this->insert(prev);
+ span->init(this, prev, t, pt);
+ this->debugValidate();
+#if DEBUG_ADD_T
+ SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+ span->segment()->debugID(), span->debugID());
+#endif
+ span->bumpSpanAdds();
+ return span->ptT();
+ }
+ FAIL_WITH_NULL_IF(spanBase == &fTail);
+ } while ((spanBase = spanBase->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+
+SkOpPtT* SkOpSegment::addT(double t) {
+ return addT(t, this->ptAtT(t));
+}
+
+void SkOpSegment::calcAngles() {
+ bool activePrior = !fHead.isCanceled();
+ if (activePrior && !fHead.simple()) {
+ addStartSpan();
+ }
+ SkOpSpan* prior = &fHead;
+ SkOpSpanBase* spanBase = fHead.next();
+ while (spanBase != &fTail) {
+ if (activePrior) {
+ SkOpAngle* priorAngle = this->globalState()->allocator()->make<SkOpAngle>();
+ priorAngle->set(spanBase, prior);
+ spanBase->setFromAngle(priorAngle);
+ }
+ SkOpSpan* span = spanBase->upCast();
+ bool active = !span->isCanceled();
+ SkOpSpanBase* next = span->next();
+ if (active) {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(span, next);
+ span->setToAngle(angle);
+ }
+ activePrior = active;
+ prior = span;
+ spanBase = next;
+ }
+ if (activePrior && !fTail.simple()) {
+ addEndSpan();
+ }
+}
+
+// Please keep this in sync with debugClearAll()
+void SkOpSegment::clearAll() {
+ SkOpSpan* span = &fHead;
+ do {
+ this->clearOne(span);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->release(this);
+}
+
+// Please keep this in sync with debugClearOne()
+void SkOpSegment::clearOne(SkOpSpan* span) {
+ span->setWindValue(0);
+ span->setOppValue(0);
+ this->markDone(span);
+}
+
+SkOpSpanBase::Collapsed SkOpSegment::collapsed(double s, double e) const {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ SkOpSpanBase::Collapsed result = span->collapsed(s, e);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return result;
+ }
+ } while (span->upCastable() && (span = span->upCast()->next()));
+ return SkOpSpanBase::Collapsed::kNo;
+}
+
+bool SkOpSegment::ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWindingReverse(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWindingReverse(baseAngle);
+ if (baseSegment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last = nullptr;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle, &last)) {
+ return false;
+ }
+ } else {
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, nextAngle, &last)) {
+ return false;
+ }
+ }
+ nextAngle->setLastMarked(last);
+ return true;
+}
+
+bool SkOpSegment::ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWinding(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWinding(baseAngle);
+ if (baseSegment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last = nullptr;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle, &last)) {
+ return false;
+ }
+ } else {
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, nextAngle, &last)) {
+ return false;
+ }
+ }
+ nextAngle->setLastMarked(last);
+ return true;
+}
+
+// at this point, the span is already ordered, or unorderable
+int SkOpSegment::computeSum(SkOpSpanBase* start, SkOpSpanBase* end,
+ SkOpAngle::IncludeType includeType) {
+ SkASSERT(includeType != SkOpAngle::kUnaryXor);
+ SkOpAngle* firstAngle = this->spanToAngle(end, start);
+ if (nullptr == firstAngle || nullptr == firstAngle->next()) {
+ return SK_NaN32;
+ }
+ // if all angles have a computed winding,
+ // or if no adjacent angles are orderable,
+ // or if adjacent orderable angles have no computed winding,
+ // there's nothing to do
+ // if two orderable angles are adjacent, and both are next to orderable angles,
+ // and one has winding computed, transfer to the other
+ SkOpAngle* baseAngle = nullptr;
+ bool tryReverse = false;
+ // look for counterclockwise transfers
+ SkOpAngle* angle = firstAngle->previous();
+ SkOpAngle* next = angle->next();
+ firstAngle = next;
+ do {
+ SkOpAngle* prior = angle;
+ angle = next;
+ next = angle->next();
+ SkASSERT(prior->next() == angle);
+ SkASSERT(angle->next() == next);
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ tryReverse = true;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSum(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (next != firstAngle);
+ if (baseAngle && SK_MinS32 == firstAngle->starter()->windSum()) {
+ firstAngle = baseAngle;
+ tryReverse = true;
+ }
+ if (tryReverse) {
+ baseAngle = nullptr;
+ SkOpAngle* prior = firstAngle;
+ do {
+ angle = prior;
+ prior = angle->previous();
+ SkASSERT(prior->next() == angle);
+ next = angle->next();
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSumReverse(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (prior != firstAngle);
+ }
+ return start->starter(end)->windSum();
+}
+
+bool SkOpSegment::contains(double newT) const {
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ if (spanBase->ptT()->contains(this, newT)) {
+ return true;
+ }
+ if (spanBase == &fTail) {
+ break;
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (true);
+ return false;
+}
+
+void SkOpSegment::release(const SkOpSpan* span) {
+ if (span->done()) {
+ --fDoneCount;
+ }
+ --fCount;
+ SkOPASSERT(fCount >= fDoneCount);
+}
+
+#if DEBUG_ANGLE
+// called only by debugCheckNearCoincidence
+double SkOpSegment::distSq(double t, const SkOpAngle* oppAngle) const {
+ SkDPoint testPt = this->dPtAtT(t);
+ SkDLine testPerp = {{ testPt, testPt }};
+ SkDVector slope = this->dSlopeAtT(t);
+ testPerp[1].fX += slope.fY;
+ testPerp[1].fY -= slope.fX;
+ SkIntersections i;
+ const SkOpSegment* oppSegment = oppAngle->segment();
+ (*CurveIntersectRay[oppSegment->verb()])(oppSegment->pts(), oppSegment->weight(), testPerp, &i);
+ double closestDistSq = SK_ScalarInfinity;
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(oppAngle->start()->t(), i[0][index], oppAngle->end()->t())) {
+ continue;
+ }
+ double testDistSq = testPt.distanceSquared(i.pt(index));
+ if (closestDistSq > testDistSq) {
+ closestDistSq = testDistSq;
+ }
+ }
+ return closestDistSq;
+}
+#endif
+
+/*
+ The M and S variable name parts stand for the operators.
+ Mi stands for Minuend (see wiki subtraction, analogous to difference)
+ Su stands for Subtrahend
+ The Opp variable name part designates that the value is for the Opposite operator.
+ Opposite values result from combining coincident spans.
+ */
+SkOpSegment* SkOpSegment::findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, bool* simple,
+ SkPathOp op, int xorMiMask, int xorSuMask) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if ((*simple = other)) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kBinaryOpp);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumMiWinding = updateWinding(end, start);
+ if (sumMiWinding == SK_MinS32) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ int sumSuWinding = updateOppWinding(end, start);
+ if (operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeOp(xorMiMask, xorSuMask, nextAngle->start(),
+ nextAngle->end(), op, &sumMiWinding, &sumSuWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end(), nullptr);
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextWinding(SkTDArray<SkOpSpanBase*>* chase,
+ SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kUnaryWinding);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumWinding = updateWinding(end, start);
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeWinding(nextAngle->start(), nextAngle->end(),
+ &sumWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end(), nullptr);
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd,
+ bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkDEBUGCODE(SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() \
+ : (*nextStart)->prev());
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (!angle || angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ if (!nextAngle) {
+ return nullptr;
+ }
+ nextSegment = nextAngle->segment();
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ if (!(foundDone = nextSegment->done(nextAngle))) {
+ break;
+ }
+ }
+ nextAngle = nextAngle->next();
+ } while (nextAngle != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpGlobalState* SkOpSegment::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSegment::init(SkPoint pts[], SkScalar weight, SkOpContour* contour, SkPath::Verb verb) {
+ fContour = contour;
+ fNext = nullptr;
+ fPts = pts;
+ fWeight = weight;
+ fVerb = verb;
+ fCount = 0;
+ fDoneCount = 0;
+ fVisited = false;
+ SkOpSpan* zeroSpan = &fHead;
+ zeroSpan->init(this, nullptr, 0, fPts[0]);
+ SkOpSpanBase* oneSpan = &fTail;
+ zeroSpan->setNext(oneSpan);
+ oneSpan->initBase(this, zeroSpan, 1, fPts[SkPathOpsVerbToPoints(fVerb)]);
+ SkDEBUGCODE(fID = globalState()->nextSegmentID());
+}
+
+bool SkOpSegment::isClose(double t, const SkOpSegment* opp) const {
+ SkDPoint cPt = this->dPtAtT(t);
+ SkDVector dxdy = (*CurveDSlopeAtT[this->verb()])(this->pts(), this->weight(), t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i;
+ (*CurveIntersectRay[opp->verb()])(opp->pts(), opp->weight(), perp, &i);
+ int used = i.used();
+ for (int index = 0; index < used; ++index) {
+ if (cPt.roughlyEqual(i.pt(index))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpSegment::isXor() const {
+ return fContour->isXor();
+}
+
+void SkOpSegment::markAllDone() {
+ SkOpSpan* span = this->head();
+ do {
+ this->markDone(span);
+ } while ((span = span->next()->upCastable()));
+}
+
+ bool SkOpSegment::markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end, SkOpSpanBase** found) {
+ int step = start->step(end);
+ SkOpSpan* minSpan = start->starter(end);
+ markDone(minSpan);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ SkOpSpan* priorDone = nullptr;
+ SkOpSpan* lastDone = nullptr;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &minSpan, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (other->done()) {
+ SkASSERT(!last);
+ break;
+ }
+ if (lastDone == minSpan || priorDone == minSpan) {
+ if (found) {
+ *found = nullptr;
+ }
+ return true;
+ }
+ other->markDone(minSpan);
+ priorDone = lastDone;
+ lastDone = minSpan;
+ }
+ if (found) {
+ *found = last;
+ }
+ return true;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (spanStart->windSum() != SK_MinS32) {
+// SkASSERT(spanStart->windSum() == winding); // FIXME: is this assert too aggressive?
+ SkASSERT(!last);
+ break;
+ }
+ (void) other->markWinding(spanStart, winding);
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end,
+ int winding, int oppWinding, SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding, oppWinding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (spanStart->windSum() != SK_MinS32) {
+ if (this->operand() == other->operand()) {
+ if (spanStart->windSum() != winding || spanStart->oppSum() != oppWinding) {
+ this->globalState()->setWindingFailed();
+ return true; // ... but let it succeed anyway
+ }
+ } else {
+ FAIL_IF(spanStart->windSum() != oppWinding);
+ FAIL_IF(spanStart->oppSum() != winding);
+ }
+ SkASSERT(!last);
+ break;
+ }
+ if (this->operand() == other->operand()) {
+ (void) other->markWinding(spanStart, winding, oppWinding);
+ } else {
+ (void) other->markWinding(spanStart, oppWinding, winding);
+ }
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+bool SkOpSegment::markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle,
+ SkOpSpanBase** result) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ if (!markAndChaseWinding(angle->start(), angle->end(), maxWinding, result)) {
+ return false;
+ }
+#if DEBUG_WINDING
+ SkOpSpanBase* last = *result;
+ if (last) {
+ SkDebugf("%s last seg=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+ }
+#endif
+ return true;
+}
+
+bool SkOpSegment::markAngle(int maxWinding, int sumWinding, int oppMaxWinding,
+ int oppSumWinding, const SkOpAngle* angle, SkOpSpanBase** result) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ if (oppMaxWinding != oppSumWinding && UseInnerWinding(oppMaxWinding, oppSumWinding)) {
+ oppMaxWinding = oppSumWinding;
+ }
+ // caller doesn't require that this marks anything
+ if (!markAndChaseWinding(angle->start(), angle->end(), maxWinding, oppMaxWinding, result)) {
+ return false;
+ }
+#if DEBUG_WINDING
+ if (result) {
+ SkOpSpanBase* last = *result;
+ if (last) {
+ SkDebugf("%s last segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf(" \n");
+ }
+ }
+#endif
+ return true;
+}
+
+void SkOpSegment::markDone(SkOpSpan* span) {
+ SkASSERT(this == span->segment());
+ if (span->done()) {
+ return;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, span->windSum(), span->oppSum());
+#endif
+ span->setDone(true);
+ ++fDoneCount;
+ debugValidate();
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding);
+#endif
+ span->setWindSum(winding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding, int oppWinding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding || oppWinding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding, oppWinding);
+#endif
+ span->setWindSum(winding);
+ span->setOppSum(oppWinding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::match(const SkOpPtT* base, const SkOpSegment* testParent, double testT,
+ const SkPoint& testPt) const {
+ SkASSERT(this == base->segment());
+ if (this == testParent) {
+ if (precisely_equal(base->fT, testT)) {
+ return true;
+ }
+ }
+ if (!SkDPoint::ApproximatelyEqual(testPt, base->fPt)) {
+ return false;
+ }
+ return this != testParent || !this->ptsDisjoint(base->fT, base->fPt, testT, testPt);
+}
+
+static SkOpSegment* set_last(SkOpSpanBase** last, SkOpSpanBase* endSpan) {
+ if (last) {
+ *last = endSpan;
+ }
+ return nullptr;
+}
+
+SkOpSegment* SkOpSegment::nextChase(SkOpSpanBase** startPtr, int* stepPtr, SkOpSpan** minPtr,
+ SkOpSpanBase** last) const {
+ SkOpSpanBase* origStart = *startPtr;
+ int step = *stepPtr;
+ SkOpSpanBase* endSpan = step > 0 ? origStart->upCast()->next() : origStart->prev();
+ SkASSERT(endSpan);
+ SkOpAngle* angle = step > 0 ? endSpan->fromAngle() : endSpan->upCast()->toAngle();
+ SkOpSpanBase* foundSpan;
+ SkOpSpanBase* otherEnd;
+ SkOpSegment* other;
+ if (angle == nullptr) {
+ if (endSpan->t() != 0 && endSpan->t() != 1) {
+ return nullptr;
+ }
+ SkOpPtT* otherPtT = endSpan->ptT()->next();
+ other = otherPtT->segment();
+ foundSpan = otherPtT->span();
+ otherEnd = step > 0
+ ? foundSpan->upCastable() ? foundSpan->upCast()->next() : nullptr
+ : foundSpan->prev();
+ } else {
+ int loopCount = angle->loopCount();
+ if (loopCount > 2) {
+ return set_last(last, endSpan);
+ }
+ const SkOpAngle* next = angle->next();
+ if (nullptr == next) {
+ return nullptr;
+ }
+#if DEBUG_WINDING
+ if (angle->debugSign() != next->debugSign() && !angle->segment()->contour()->isXor()
+ && !next->segment()->contour()->isXor()) {
+ SkDebugf("%s mismatched signs\n", __FUNCTION__);
+ }
+#endif
+ other = next->segment();
+ foundSpan = endSpan = next->start();
+ otherEnd = next->end();
+ }
+ if (!otherEnd) {
+ return nullptr;
+ }
+ int foundStep = foundSpan->step(otherEnd);
+ if (*stepPtr != foundStep) {
+ return set_last(last, endSpan);
+ }
+ SkASSERT(*startPtr);
+// SkASSERT(otherEnd >= 0);
+ SkOpSpan* origMin = step < 0 ? origStart->prev() : origStart->upCast();
+ SkOpSpan* foundMin = foundSpan->starter(otherEnd);
+ if (foundMin->windValue() != origMin->windValue()
+ || foundMin->oppValue() != origMin->oppValue()) {
+ return set_last(last, endSpan);
+ }
+ *startPtr = foundSpan;
+ *stepPtr = foundStep;
+ if (minPtr) {
+ *minPtr = foundMin;
+ }
+ return other;
+}
+
+// Please keep this in sync with DebugClearVisited()
+void SkOpSegment::ClearVisited(SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ SkOpSegment* opp = ptT->segment();
+ opp->resetVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+
+// Please keep this in sync with debugMissingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+bool SkOpSegment::missingCoincidence() {
+ if (this->done()) {
+ return false;
+ }
+ SkOpSpan* prior = nullptr;
+ SkOpSpanBase* spanBase = &fHead;
+ bool result = false;
+ int safetyNet = 100000;
+ do {
+ SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkOPASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (ptT->deleted()) {
+ continue;
+ }
+ SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->visited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->containsCoincidence(opp)) {
+ continue;
+ }
+ if (spanBase->containsCoinEnd(opp)) {
+ continue;
+ }
+ SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ SkOpSegment* priorOpp = nullptr;
+ SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ SkOpPtT* oppStart = prior->ptT();
+ SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ swap(oppStart, oppEnd);
+ }
+ SkOpCoincidence* coincidences = this->globalState()->coincidence();
+ SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ SkOpPtT* rootPtT = ptT->span()->ptT();
+ SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (this->testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+ SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+ rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+ rootOppEnd->debugID());
+#endif
+ if (!coincidences->extend(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd));
+#endif
+ result = true;
+ }
+ swapBack:
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ ClearVisited(&fHead);
+ return result;
+}
+
+// please keep this in sync with debugMoveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+bool SkOpSegment::moveMultiples() {
+ debugValidate();
+ SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+// FAIL_IF(addCount < 1);
+ if (addCount <= 1) {
+ continue;
+ }
+ SkOpPtT* startPtT = test->ptT();
+ SkOpPtT* testPtT = startPtT;
+ int safetyHatch = 1000000;
+ do { // iterate through all spans associated with start
+ if (!--safetyHatch) {
+ return false;
+ }
+ SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ SkOpSpanBase* oppPrev = oppSpan;
+ SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ SkOpSpanBase* oppNext = oppSpan;
+ SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ SkOpPtT* oppStartPtT = oppTest->ptT();
+ SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->mergeMatches(oppSpan);
+ oppTest->addOpp(oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return true;
+}
+
+// adjacent spans may have points close by
+bool SkOpSegment::spansNearby(const SkOpSpanBase* refSpan, const SkOpSpanBase* checkSpan,
+ bool* found) const {
+ const SkOpPtT* refHead = refSpan->ptT();
+ const SkOpPtT* checkHead = checkSpan->ptT();
+// if the first pt pair from adjacent spans are far apart, assume that all are far enough apart
+ if (!SkDPoint::WayRoughlyEqual(refHead->fPt, checkHead->fPt)) {
+#if DEBUG_COINCIDENCE
+ // verify that no combination of points are close
+ const SkOpPtT* dBugRef = refHead;
+ do {
+ const SkOpPtT* dBugCheck = checkHead;
+ do {
+ SkOPASSERT(!SkDPoint::ApproximatelyEqual(dBugRef->fPt, dBugCheck->fPt));
+ dBugCheck = dBugCheck->next();
+ } while (dBugCheck != checkHead);
+ dBugRef = dBugRef->next();
+ } while (dBugRef != refHead);
+#endif
+ *found = false;
+ return true;
+ }
+ // check only unique points
+ SkScalar distSqBest = SK_ScalarMax;
+ const SkOpPtT* refBest = nullptr;
+ const SkOpPtT* checkBest = nullptr;
+ const SkOpPtT* ref = refHead;
+ do {
+ if (ref->deleted()) {
+ continue;
+ }
+ while (ref->ptAlreadySeen(refHead)) {
+ ref = ref->next();
+ if (ref == refHead) {
+ goto doneCheckingDistance;
+ }
+ }
+ const SkOpPtT* check = checkHead;
+ const SkOpSegment* refSeg = ref->segment();
+ int escapeHatch = 100000; // defend against infinite loops
+ do {
+ if (check->deleted()) {
+ continue;
+ }
+ while (check->ptAlreadySeen(checkHead)) {
+ check = check->next();
+ if (check == checkHead) {
+ goto nextRef;
+ }
+ }
+ SkScalar distSq = SkPointPriv::DistanceToSqd(ref->fPt, check->fPt);
+ if (distSqBest > distSq && (refSeg != check->segment()
+ || !refSeg->ptsDisjoint(*ref, *check))) {
+ distSqBest = distSq;
+ refBest = ref;
+ checkBest = check;
+ }
+ if (--escapeHatch <= 0) {
+ return false;
+ }
+ } while ((check = check->next()) != checkHead);
+ nextRef:
+ ;
+ } while ((ref = ref->next()) != refHead);
+doneCheckingDistance:
+ *found = checkBest && refBest->segment()->match(refBest, checkBest->segment(), checkBest->fT,
+ checkBest->fPt);
+ return true;
+}
+
+// Please keep this function in sync with debugMoveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+bool SkOpSegment::moveNearby() {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ SkOpSpanBase* spanBase = &fHead;
+ int escapeHatch = 9999; // the largest count for a regular test is 50; for a fuzzer, 500
+ do {
+ SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ if (!--escapeHatch) {
+ return false;
+ }
+ SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ this->clearAll();
+ return true;
+ }
+ spanBase->upCast()->release(ptT);
+ } else if (test->prev()) {
+ test->upCast()->release(headPtT);
+ }
+ break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ SkOpSpanBase* test = spanBase->upCast()->next();
+ bool found;
+ if (!this->spansNearby(spanBase, test, &found)) {
+ return false;
+ }
+ if (found) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ test->merge(spanBase->upCast());
+ } else {
+ this->clearAll();
+ return true;
+ }
+ } else {
+ spanBase->merge(test->upCast());
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::operand() const {
+ return fContour->operand();
+}
+
+bool SkOpSegment::oppXor() const {
+ return fContour->oppXor();
+}
+
+bool SkOpSegment::ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const {
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ // quads (and cubics) can loop back to nearly a line so that an opposite curve
+ // hits in two places with very different t values.
+ // OPTIMIZATION: curves could be preflighted so that, for example, something like
+ // 'controls contained by ends' could avoid this check for common curves
+ // 'ends are extremes in x or y' is cheaper to compute and real-world common
+ // on the other hand, the below check is relatively inexpensive
+ double midT = (t1 + t2) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ double seDistSq = SkTMax(SkPointPriv::DistanceToSqd(pt1, pt2) * 2, FLT_EPSILON * 2);
+ return SkPointPriv::DistanceToSqd(midPt, pt1) > seDistSq ||
+ SkPointPriv::DistanceToSqd(midPt, pt2) > seDistSq;
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* sumSuWinding, int* maxWinding, int* sumWinding, int* oppMaxWinding,
+ int* oppSumWinding) {
+ int deltaSum = SpanSign(start, end);
+ int oppDeltaSum = OppSign(start, end);
+ if (operand()) {
+ *maxWinding = *sumSuWinding;
+ *sumWinding = *sumSuWinding -= deltaSum;
+ *oppMaxWinding = *sumMiWinding;
+ *oppSumWinding = *sumMiWinding -= oppDeltaSum;
+ } else {
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ *oppMaxWinding = *sumSuWinding;
+ *oppSumWinding = *sumSuWinding -= oppDeltaSum;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*oppSumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+bool SkOpSegment::sortAngles() {
+ SkOpSpanBase* span = &this->fHead;
+ do {
+ SkOpAngle* fromAngle = span->fromAngle();
+ SkOpAngle* toAngle = span->final() ? nullptr : span->upCast()->toAngle();
+ if (!fromAngle && !toAngle) {
+ continue;
+ }
+#if DEBUG_ANGLE
+ bool wroteAfterHeader = false;
+#endif
+ SkOpAngle* baseAngle = fromAngle;
+ if (fromAngle && toAngle) {
+#if DEBUG_ANGLE
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(), span->t(),
+ span->debugID());
+ wroteAfterHeader = true;
+#endif
+ FAIL_IF(!fromAngle->insert(toAngle));
+ } else if (!fromAngle) {
+ baseAngle = toAngle;
+ }
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ int safetyNet = 1000000;
+ do {
+ if (!--safetyNet) {
+ return false;
+ }
+ SkOpSpanBase* oSpan = ptT->span();
+ if (oSpan == span) {
+ continue;
+ }
+ SkOpAngle* oAngle = oSpan->fromAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ if (!oSpan->final()) {
+ oAngle = oSpan->upCast()->toAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ }
+ } while ((ptT = ptT->next()) != stopPtT);
+ if (baseAngle->loopCount() == 1) {
+ span->setFromAngle(nullptr);
+ if (toAngle) {
+ span->upCast()->setToAngle(nullptr);
+ }
+ baseAngle = nullptr;
+ }
+#if DEBUG_SORT
+ SkASSERT(!baseAngle || baseAngle->loopCount() > 1);
+#endif
+ } while (!span->final() && (span = span->upCast()->next()));
+ return true;
+}
+
+bool SkOpSegment::subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkDCurve* edge) const {
+ SkASSERT(start != end);
+ const SkOpPtT& startPtT = *start->ptT();
+ const SkOpPtT& endPtT = *end->ptT();
+ SkDEBUGCODE(edge->fVerb = fVerb);
+ edge->fCubic[0].set(startPtT.fPt);
+ int points = SkPathOpsVerbToPoints(fVerb);
+ edge->fCubic[points].set(endPtT.fPt);
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ double startT = startPtT.fT;
+ double endT = endPtT.fT;
+ if ((startT == 0 || endT == 0) && (startT == 1 || endT == 1)) {
+ // don't compute midpoints if we already have them
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fLine[1].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1].set(fPts[1]);
+ edge->fConic.fWeight = fWeight;
+ return false;
+ }
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ if (startT == 0) {
+ edge->fCubic[1].set(fPts[1]);
+ edge->fCubic[2].set(fPts[2]);
+ return false;
+ }
+ edge->fCubic[1].set(fPts[2]);
+ edge->fCubic[2].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fQuad[1] = SkDQuad::SubDivide(fPts, edge->fQuad[0], edge->fQuad[2], startT, endT);
+ } else if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1] = SkDConic::SubDivide(fPts, fWeight, edge->fQuad[0], edge->fQuad[2],
+ startT, endT, &edge->fConic.fWeight);
+ } else {
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ SkDCubic::SubDivide(fPts, edge->fCubic[0], edge->fCubic[3], startT, endT, &edge->fCubic[1]);
+ }
+ return true;
+}
+
+bool SkOpSegment::testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT,
+ const SkOpSpanBase* prior, const SkOpSpanBase* spanBase, const SkOpSegment* opp) const {
+ // average t, find mid pt
+ double midT = (prior->t() + spanBase->t()) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ bool coincident = true;
+ // if the mid pt is not near either end pt, project perpendicular through opp seg
+ if (!SkDPoint::ApproximatelyEqual(priorPtT->fPt, midPt)
+ && !SkDPoint::ApproximatelyEqual(ptT->fPt, midPt)) {
+ if (priorPtT->span() == ptT->span()) {
+ return false;
+ }
+ coincident = false;
+ SkIntersections i;
+ SkDCurve curvePart;
+ this->subDivide(prior, spanBase, &curvePart);
+ SkDVector dxdy = (*CurveDDSlopeAtT[fVerb])(curvePart, 0.5f);
+ SkDPoint partMidPt = (*CurveDDPointAtT[fVerb])(curvePart, 0.5f);
+ SkDLine ray = {{{midPt.fX, midPt.fY}, {partMidPt.fX + dxdy.fY, partMidPt.fY - dxdy.fX}}};
+ SkDCurve oppPart;
+ opp->subDivide(priorPtT->span(), ptT->span(), &oppPart);
+ (*CurveDIntersectRay[opp->verb()])(oppPart, ray, &i);
+ // measure distance and see if it's small enough to denote coincidence
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(0, i[0][index], 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (oppPt.approximatelyDEqual(midPt)) {
+ // the coincidence can occur at almost any angle
+ coincident = true;
+ }
+ }
+ }
+ return coincident;
+}
+
+SkOpSpan* SkOpSegment::undoneSpan() {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (!span->done()) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+int SkOpSegment::updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const {
+ const SkOpSpan* lesser = start->starter(end);
+ int oppWinding = lesser->oppSum();
+ int oppSpanWinding = SkOpSegment::OppSign(start, end);
+ if (oppSpanWinding && UseInnerWinding(oppWinding - oppSpanWinding, oppWinding)
+ && oppWinding != SK_MaxS32) {
+ oppWinding -= oppSpanWinding;
+ }
+ return oppWinding;
+}
+
+int SkOpSegment::updateOppWinding(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateOppWindingReverse(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(startSpan, endSpan);
+}
+
+int SkOpSegment::updateWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkOpSpan* lesser = start->starter(end);
+ int winding = lesser->windSum();
+ if (winding == SK_MinS32) {
+ winding = lesser->computeWindSum();
+ }
+ if (winding == SK_MinS32) {
+ return winding;
+ }
+ int spanWinding = SkOpSegment::SpanSign(start, end);
+ if (winding && UseInnerWinding(winding - spanWinding, winding)
+ && winding != SK_MaxS32) {
+ winding -= spanWinding;
+ }
+ return winding;
+}
+
+int SkOpSegment::updateWinding(SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateWindingReverse(const SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(startSpan, endSpan);
+}
+
+// OPTIMIZATION: does the following also work, and is it any faster?
+// return outerWinding * innerWinding > 0
+// || ((outerWinding + innerWinding < 0) ^ ((outerWinding - innerWinding) < 0)))
+bool SkOpSegment::UseInnerWinding(int outerWinding, int innerWinding) {
+ SkASSERT(outerWinding != SK_MaxS32);
+ SkASSERT(innerWinding != SK_MaxS32);
+ int absOut = SkTAbs(outerWinding);
+ int absIn = SkTAbs(innerWinding);
+ bool result = absOut == absIn ? outerWinding < 0 : absOut < absIn;
+ return result;
+}
+
+int SkOpSegment::windSum(const SkOpAngle* angle) const {
+ const SkOpSpan* minSpan = angle->start()->starter(angle->end());
+ return minSpan->windSum();
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.h b/gfx/skia/skia/src/pathops/SkOpSegment.h
new file mode 100644
index 0000000000..032f822d19
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSegment_DEFINE
+#define SkOpSegment_DEFINE
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+
+struct SkDCurve;
+class SkOpCoincidence;
+class SkOpContour;
+enum class SkOpRayDir;
+struct SkOpRayHit;
+class SkPathWriter;
+
+class SkOpSegment {
+public:
+ bool operator<(const SkOpSegment& rh) const {
+ return fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ SkOpAngle* activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr, SkOpSpanBase** endPtr,
+ bool* done);
+ SkOpAngle* activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ SkOpAngle* activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ bool activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op);
+ bool activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end, SkPathOp op,
+ int* sumMiWinding, int* sumSuWinding);
+
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding);
+
+ SkOpSegment* addConic(SkPoint pts[3], SkScalar weight, SkOpContour* parent) {
+ init(pts, weight, parent, SkPath::kConic_Verb);
+ SkDCurve curve;
+ curve.fConic.set(pts, weight);
+ curve.setConicBounds(pts, weight, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpSegment* addCubic(SkPoint pts[4], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kCubic_Verb);
+ SkDCurve curve;
+ curve.fCubic.set(pts);
+ curve.setCubicBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ bool addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end, SkPathWriter* path) const;
+
+ SkOpAngle* addEndSpan() {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(&fTail, fTail.prev());
+ fTail.setFromAngle(angle);
+ return angle;
+ }
+
+ bool addExpanded(double newT, const SkOpSpanBase* test, bool* startOver);
+
+ SkOpSegment* addLine(SkPoint pts[2], SkOpContour* parent) {
+ SkASSERT(pts[0] != pts[1]);
+ init(pts, 1, parent, SkPath::kLine_Verb);
+ fBounds.setBounds(pts, 2);
+ return this;
+ }
+
+ SkOpPtT* addMissing(double t, SkOpSegment* opp, bool* allExist);
+
+ SkOpAngle* addStartSpan() {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(&fHead, fHead.next());
+ fHead.setToAngle(angle);
+ return angle;
+ }
+
+ SkOpSegment* addQuad(SkPoint pts[3], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kQuad_Verb);
+ SkDCurve curve;
+ curve.fQuad.set(pts);
+ curve.setQuadBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpPtT* addT(double t);
+ SkOpPtT* addT(double t, const SkPoint& pt);
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void bumpCount() {
+ ++fCount;
+ }
+
+ void calcAngles();
+ SkOpSpanBase::Collapsed collapsed(double startT, double endT) const;
+ static bool ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ static bool ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ int computeSum(SkOpSpanBase* start, SkOpSpanBase* end, SkOpAngle::IncludeType includeType);
+
+ void clearAll();
+ void clearOne(SkOpSpan* span);
+ static void ClearVisited(SkOpSpanBase* span);
+ bool contains(double t) const;
+
+ SkOpContour* contour() const {
+ return fContour;
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ void debugAddAngle(double startT, double endT);
+#if DEBUG_COIN
+ const SkOpPtT* debugAddT(double t, SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_ANGLE
+ void debugCheckAngleCoin() const;
+#endif
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+ void debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ SkOpAngle* debugLastAngle();
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugReset();
+ const SkOpSegment* debugSegment(int id) const;
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans(SkString* str) const;
+#endif
+#if DEBUG_MARK_DONE
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding);
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding, int oppWinding);
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+#if DEBUG_COINCIDENCE_ORDER
+ void debugResetCoinT() const;
+ void debugSetCoinT(int, SkScalar ) const;
+#endif
+
+#if DEBUG_COIN
+ static void DebugClearVisited(const SkOpSpanBase* span);
+
+ bool debugVisited() const {
+ if (!fDebugVisited) {
+ fDebugVisited = true;
+ return false;
+ }
+ return true;
+ }
+#endif
+
+#if DEBUG_ANGLE
+ double distSq(double t, const SkOpAngle* opp) const;
+#endif
+
+ bool done() const {
+ SkOPASSERT(fDoneCount <= fCount);
+ return fDoneCount == fCount;
+ }
+
+ bool done(const SkOpAngle* angle) const {
+ return angle->start()->starter(angle->end())->done();
+ }
+
+ SkDPoint dPtAtT(double mid) const {
+ return (*CurveDPointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ SkDVector dSlopeAtT(double mid) const {
+ return (*CurveDSlopeAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpCoin() const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsInner(const char* prefix = "seg") const;
+
+ const SkOpPtT* existing(double t, const SkOpSegment* opp) const;
+ SkOpSegment* findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, bool* simple,
+ SkPathOp op, int xorMiMask, int xorSuMask);
+ SkOpSegment* findNextWinding(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSegment* findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSpan* findSortableTop(SkOpContour* );
+ SkOpGlobalState* globalState() const;
+
+ const SkOpSpan* head() const {
+ return &fHead;
+ }
+
+ SkOpSpan* head() {
+ return &fHead;
+ }
+
+ void init(SkPoint pts[], SkScalar weight, SkOpContour* parent, SkPath::Verb verb);
+
+ SkOpSpan* insert(SkOpSpan* prev) {
+ SkOpGlobalState* globalState = this->globalState();
+ globalState->setAllocatedOpSpan();
+ SkOpSpan* result = globalState->allocator()->make<SkOpSpan>();
+ SkOpSpanBase* next = prev->next();
+ result->setPrev(prev);
+ prev->setNext(result);
+ SkDEBUGCODE(result->ptT()->fT = 0);
+ result->setNext(next);
+ if (next) {
+ next->setPrev(result);
+ }
+ return result;
+ }
+
+ bool isClose(double t, const SkOpSegment* opp) const;
+
+ bool isHorizontal() const {
+ return fBounds.fTop == fBounds.fBottom;
+ }
+
+ SkOpSegment* isSimple(SkOpSpanBase** end, int* step) const {
+ return nextChase(end, step, nullptr, nullptr);
+ }
+
+ bool isVertical() const {
+ return fBounds.fLeft == fBounds.fRight;
+ }
+
+ bool isVertical(SkOpSpanBase* start, SkOpSpanBase* end) const {
+ return (*CurveIsVertical[fVerb])(fPts, fWeight, start->t(), end->t());
+ }
+
+ bool isXor() const;
+
+ void joinEnds(SkOpSegment* start) {
+ fTail.ptT()->addOpp(start->fHead.ptT(), start->fHead.ptT());
+ }
+
+ const SkPoint& lastPt() const {
+ return fPts[SkPathOpsVerbToPoints(fVerb)];
+ }
+
+ void markAllDone();
+ bool markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end, SkOpSpanBase** found);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ int oppWinding, SkOpSpanBase** lastPtr);
+ bool markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle, SkOpSpanBase** result);
+ bool markAngle(int maxWinding, int sumWinding, int oppMaxWinding, int oppSumWinding,
+ const SkOpAngle* angle, SkOpSpanBase** result);
+ void markDone(SkOpSpan* );
+ bool markWinding(SkOpSpan* , int winding);
+ bool markWinding(SkOpSpan* , int winding, int oppWinding);
+ bool match(const SkOpPtT* span, const SkOpSegment* parent, double t, const SkPoint& pt) const;
+ bool missingCoincidence();
+ bool moveMultiples();
+ bool moveNearby();
+
+ SkOpSegment* next() const {
+ return fNext;
+ }
+
+ SkOpSegment* nextChase(SkOpSpanBase** , int* step, SkOpSpan** , SkOpSpanBase** last) const;
+ bool operand() const;
+
+ static int OppSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->oppValue()
+ : end->upCast()->oppValue();
+ return result;
+ }
+
+ bool oppXor() const;
+
+ const SkOpSegment* prev() const {
+ return fPrev;
+ }
+
+ SkPoint ptAtT(double mid) const {
+ return (*CurvePointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ const SkPoint* pts() const {
+ return fPts;
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, const SkOpPtT& test) const {
+ SkASSERT(this == span.segment());
+ SkASSERT(this == test.segment());
+ return ptsDisjoint(span.fT, span.fPt, test.fT, test.fPt);
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, double t, const SkPoint& pt) const {
+ SkASSERT(this == span.segment());
+ return ptsDisjoint(span.fT, span.fPt, t, pt);
+ }
+
+ bool ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const;
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkArenaAlloc*);
+ void release(const SkOpSpan* );
+
+#if DEBUG_COIN
+ void resetDebugVisited() const {
+ fDebugVisited = false;
+ }
+#endif
+
+ void resetVisited() {
+ fVisited = false;
+ }
+
+ void setContour(SkOpContour* contour) {
+ fContour = contour;
+ }
+
+ void setNext(SkOpSegment* next) {
+ fNext = next;
+ }
+
+ void setPrev(SkOpSegment* prev) {
+ fPrev = prev;
+ }
+
+ void setUpWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumWinding;
+ if (*sumWinding == SK_MinS32) {
+ return;
+ }
+ *sumWinding -= deltaSum;
+ }
+
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding);
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding, int* sumSuWinding,
+ int* maxWinding, int* sumWinding, int* oppMaxWinding, int* oppSumWinding);
+ bool sortAngles();
+ bool spansNearby(const SkOpSpanBase* ref, const SkOpSpanBase* check, bool* found) const;
+
+ static int SpanSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->windValue()
+ : end->upCast()->windValue();
+ return result;
+ }
+
+ SkOpAngle* spanToAngle(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkASSERT(start != end);
+ return start->t() < end->t() ? start->upCast()->toAngle() : start->fromAngle();
+ }
+
+ bool subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end, SkDCurve* result) const;
+
+ const SkOpSpanBase* tail() const {
+ return &fTail;
+ }
+
+ SkOpSpanBase* tail() {
+ return &fTail;
+ }
+
+ bool testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT, const SkOpSpanBase* prior,
+ const SkOpSpanBase* spanBase, const SkOpSegment* opp) const;
+
+ SkOpSpan* undoneSpan();
+ int updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const;
+ int updateOppWinding(const SkOpAngle* angle) const;
+ int updateOppWindingReverse(const SkOpAngle* angle) const;
+ int updateWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ int updateWinding(SkOpAngle* angle);
+ int updateWindingReverse(const SkOpAngle* angle);
+
+ static bool UseInnerWinding(int outerWinding, int innerWinding);
+
+ SkPath::Verb verb() const {
+ return fVerb;
+ }
+
+ // look for two different spans that point to the same opposite segment
+ bool visited() {
+ if (!fVisited) {
+ fVisited = true;
+ return false;
+ }
+ return true;
+ }
+
+ SkScalar weight() const {
+ return fWeight;
+ }
+
+ SkOpSpan* windingSpanAtT(double tHit);
+ int windSum(const SkOpAngle* angle) const;
+
+private:
+ SkOpSpan fHead; // the head span always has its t set to zero
+ SkOpSpanBase fTail; // the tail span always has its t set to one
+ SkOpContour* fContour;
+ SkOpSegment* fNext; // forward-only linked list used by contour to walk the segments
+ const SkOpSegment* fPrev;
+ SkPoint* fPts; // pointer into array of points owned by edge builder that may be tweaked
+ SkPathOpsBounds fBounds; // tight bounds
+ SkScalar fWeight;
+ int fCount; // number of spans (one for a non-intersecting segment)
+ int fDoneCount; // number of processed spans (zero initially)
+ SkPath::Verb fVerb;
+ bool fVisited; // used by missing coincidence check
+#if DEBUG_COIN
+ mutable bool fDebugVisited; // used by debug missing coincidence check
+#endif
+#if DEBUG_COINCIDENCE_ORDER
+ mutable int fDebugBaseIndex;
+ mutable SkScalar fDebugBaseMin; // if > 0, the 1st t value in this seg vis-a-vis the ref seg
+ mutable SkScalar fDebugBaseMax;
+ mutable int fDebugLastIndex;
+ mutable SkScalar fDebugLastMin; // if > 0, the last t -- next t val - base has same sign
+ mutable SkScalar fDebugLastMax;
+#endif
+ SkDEBUGCODE(int fID);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.cpp b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
new file mode 100644
index 0000000000..ea57756b60
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
@@ -0,0 +1,484 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathWriter.h"
+
+bool SkOpPtT::alias() const {
+ return this->span()->ptT() != this;
+}
+
+const SkOpPtT* SkOpPtT::active() const {
+ if (!fDeleted) {
+ return this;
+ }
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fSpan == fSpan && !ptT->fDeleted) {
+ return ptT;
+ }
+ }
+ return nullptr; // should never return deleted; caller must abort
+}
+
+bool SkOpPtT::contains(const SkOpPtT* check) const {
+ SkOPASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, const SkPoint& pt) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fPt == pt && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, double t) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fT == t && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpPtT::contains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->segment() == check && !ptT->deleted()) {
+ return ptT;
+ }
+ }
+ return nullptr;
+}
+
+SkOpContour* SkOpPtT::contour() const {
+ return segment()->contour();
+}
+
+const SkOpPtT* SkOpPtT::find(const SkOpSegment* segment) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ do {
+ if (ptT->segment() == segment && !ptT->deleted()) {
+ return ptT;
+ }
+ ptT = ptT->fNext;
+ } while (stopPtT != ptT);
+// SkASSERT(0);
+ return nullptr;
+}
+
+SkOpGlobalState* SkOpPtT::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpPtT::init(SkOpSpanBase* span, double t, const SkPoint& pt, bool duplicate) {
+ fT = t;
+ fPt = pt;
+ fSpan = span;
+ fNext = this;
+ fDuplicatePt = duplicate;
+ fDeleted = false;
+ fCoincident = false;
+ SkDEBUGCODE(fID = span->globalState()->nextPtTID());
+}
+
+bool SkOpPtT::onEnd() const {
+ const SkOpSpanBase* span = this->span();
+ if (span->ptT() != this) {
+ return false;
+ }
+ const SkOpSegment* segment = this->segment();
+ return span == segment->head() || span == segment->tail();
+}
+
+bool SkOpPtT::ptAlreadySeen(const SkOpPtT* check) const {
+ while (this != check) {
+ if (this->fPt == check->fPt) {
+ return true;
+ }
+ check = check->fNext;
+ }
+ return false;
+}
+
+SkOpPtT* SkOpPtT::prev() {
+ SkOpPtT* result = this;
+ SkOpPtT* next = this;
+ while ((next = next->fNext) != this) {
+ result = next;
+ }
+ SkASSERT(result->fNext == this);
+ return result;
+}
+
+const SkOpSegment* SkOpPtT::segment() const {
+ return span()->segment();
+}
+
+SkOpSegment* SkOpPtT::segment() {
+ return span()->segment();
+}
+
+void SkOpPtT::setDeleted() {
+ SkASSERT(this->span()->debugDeleted() || this->span()->ptT() != this);
+ SkOPASSERT(!fDeleted);
+ fDeleted = true;
+}
+
+bool SkOpSpanBase::addOpp(SkOpSpanBase* opp) {
+ SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return true;
+ }
+ FAIL_IF(!this->mergeMatches(opp));
+ this->ptT()->addOpp(opp->ptT(), oppPrev);
+ this->checkForCollapsedCoincidence();
+ return true;
+}
+
+SkOpSpanBase::Collapsed SkOpSpanBase::collapsed(double s, double e) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* startNext = nullptr;
+ const SkOpPtT* walk = start;
+ double min = walk->fT;
+ double max = min;
+ const SkOpSegment* segment = this->segment();
+ int safetyNet = 100000;
+ while ((walk = walk->next()) != start) {
+ if (!--safetyNet) {
+ return Collapsed::kError;
+ }
+ if (walk == startNext) {
+ return Collapsed::kError;
+ }
+ if (walk->segment() != segment) {
+ continue;
+ }
+ min = SkTMin(min, walk->fT);
+ max = SkTMax(max, walk->fT);
+ if (between(min, s, max) && between(min, e, max)) {
+ return Collapsed::kYes;
+ }
+ startNext = start->next();
+ }
+ return Collapsed::kNo;
+}
+
+bool SkOpSpanBase::contains(const SkOpSpanBase* span) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* check = &span->fPtT;
+ SkOPASSERT(start != check);
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpSpanBase::contains(const SkOpSegment* segment) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk->deleted()) {
+ continue;
+ }
+ if (walk->segment() == segment && walk->span()->ptT() == walk) {
+ return walk;
+ }
+ }
+ return nullptr;
+}
+
+bool SkOpSpanBase::containsCoinEnd(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkOpContour* SkOpSpanBase::contour() const {
+ return segment()->contour();
+}
+
+SkOpGlobalState* SkOpSpanBase::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSpanBase::initBase(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ fSegment = segment;
+ fPtT.init(this, t, pt, false);
+ fCoinEnd = this;
+ fFromAngle = nullptr;
+ fPrev = prev;
+ fSpanAdds = 0;
+ fAligned = true;
+ fChased = false;
+ SkDEBUGCODE(fCount = 1);
+ SkDEBUGCODE(fID = globalState()->nextSpanID());
+ SkDEBUGCODE(fDebugDeleted = false);
+}
+
+// this pair of spans share a common t value or point; merge them and eliminate duplicates
+// this does not compute the best t or pt value; this merely moves all data into a single list
+void SkOpSpanBase::merge(SkOpSpan* span) {
+ SkOpPtT* spanPtT = span->ptT();
+ SkASSERT(this->t() != spanPtT->fT);
+ SkASSERT(!zero_or_one(spanPtT->fT));
+ span->release(this->ptT());
+ if (this->contains(span)) {
+ SkOPASSERT(0); // check to see if this ever happens -- should have been found earlier
+ return; // merge is already in the ptT loop
+ }
+ SkOpPtT* remainder = spanPtT->next();
+ this->ptT()->insert(spanPtT);
+ while (remainder != spanPtT) {
+ SkOpPtT* next = remainder->next();
+ SkOpPtT* compare = spanPtT->next();
+ while (compare != spanPtT) {
+ SkOpPtT* nextC = compare->next();
+ if (nextC->span() == remainder->span() && nextC->fT == remainder->fT) {
+ goto tryNextRemainder;
+ }
+ compare = nextC;
+ }
+ spanPtT->insert(remainder);
+tryNextRemainder:
+ remainder = next;
+ }
+ fSpanAdds += span->fSpanAdds;
+}
+
+// please keep in sync with debugCheckForCollapsedCoincidence()
+void SkOpSpanBase::checkForCollapsedCoincidence() {
+ SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ SkOpPtT* head = this->ptT();
+ SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->markCollapsed(test);
+ } while ((test = test->next()) != head);
+ coins->releaseDeleted();
+}
+
+// please keep in sync with debugMergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+bool SkOpSpanBase::mergeMatches(SkOpSpanBase* opp) {
+ SkOpPtT* test = &fPtT;
+ SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ int safetyHatch = 1000000;
+ do {
+ if (!--safetyHatch) {
+ return false;
+ }
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+ if (!zero_or_one(inner->fT)) {
+ innerBase->upCast()->release(test);
+ } else {
+ SkOPASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ testBase->upCast()->release(inner);
+ } else {
+ segment->markAllDone(); // mark segment as collapsed
+ SkDEBUGCODE(testBase->debugSetDeleted());
+ test->setDeleted();
+ SkDEBUGCODE(innerBase->debugSetDeleted());
+ inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->checkForCollapsedCoincidence();
+ return true;
+}
+
+int SkOpSpan::computeWindSum() {
+ SkOpGlobalState* globals = this->globalState();
+ SkOpContour* contourHead = globals->contourHead();
+ int windTry = 0;
+ while (!this->sortableTop(contourHead) && ++windTry < SkOpGlobalState::kMaxWindingTries) {
+ }
+ return this->windSum();
+}
+
+bool SkOpSpan::containsCoincidence(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpan* next = fCoincident;
+ do {
+ if (next->segment() == segment) {
+ return true;
+ }
+ } while ((next = next->fCoincident) != this);
+ return false;
+}
+
+void SkOpSpan::init(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ SkASSERT(t != 1);
+ initBase(segment, prev, t, pt);
+ fCoincident = this;
+ fToAngle = nullptr;
+ fWindSum = fOppSum = SK_MinS32;
+ fWindValue = 1;
+ fOppValue = 0;
+ fTopTTry = 0;
+ fChased = fDone = false;
+ segment->bumpCount();
+ fAlreadyAdded = false;
+}
+
+// Please keep this in sync with debugInsertCoincidence()
+bool SkOpSpan::insertCoincidence(const SkOpSegment* segment, bool flipped, bool ordered) {
+ if (this->containsCoincidence(segment)) {
+ return true;
+ }
+ SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ SkOpSpan* span;
+ SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpPtT* spanEndPtT = fNext->contains(segment);
+ FAIL_IF(!spanEndPtT);
+ const SkOpSpanBase* spanEnd = spanEndPtT->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF(!start->span()->upCastable());
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ } else if (flipped) {
+ span = base->prev();
+ FAIL_IF(!span);
+ } else {
+ FAIL_IF(!base->upCastable());
+ span = base->upCast();
+ }
+ this->insertCoincidence(span);
+ return true;
+ }
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(0); // FIXME? if we get here, the span is missing its opposite segment...
+#endif
+ return true;
+}
+
+void SkOpSpan::release(const SkOpPtT* kept) {
+ SkDEBUGCODE(fDebugDeleted = true);
+ SkOPASSERT(kept->span() != this);
+ SkASSERT(!final());
+ SkOpSpan* prev = this->prev();
+ SkASSERT(prev);
+ SkOpSpanBase* next = this->next();
+ SkASSERT(next);
+ prev->setNext(next);
+ next->setPrev(prev);
+ this->segment()->release(this);
+ SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ if (coincidence) {
+ coincidence->fixUp(this->ptT(), kept);
+ }
+ this->ptT()->setDeleted();
+ SkOpPtT* stopPtT = this->ptT();
+ SkOpPtT* testPtT = stopPtT;
+ const SkOpSpanBase* keptSpan = kept->span();
+ do {
+ if (this == testPtT->span()) {
+ testPtT->setSpan(keptSpan);
+ }
+ } while ((testPtT = testPtT->next()) != stopPtT);
+}
+
+void SkOpSpan::setOppSum(int oppSum) {
+ SkASSERT(!final());
+ if (fOppSum != SK_MinS32 && fOppSum != oppSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(oppSum) <= DEBUG_LIMIT_WIND_SUM);
+ fOppSum = oppSum;
+}
+
+void SkOpSpan::setWindSum(int windSum) {
+ SkASSERT(!final());
+ if (fWindSum != SK_MinS32 && fWindSum != windSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(windSum) <= DEBUG_LIMIT_WIND_SUM);
+ fWindSum = windSum;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.h b/gfx/skia/skia/src/pathops/SkOpSpan.h
new file mode 100644
index 0000000000..d04ae50ae8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.h
@@ -0,0 +1,578 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSpan_DEFINED
+#define SkOpSpan_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkArenaAlloc;
+class SkOpAngle;
+class SkOpContour;
+class SkOpGlobalState;
+class SkOpSegment;
+class SkOpSpanBase;
+class SkOpSpan;
+struct SkPathOpsBounds;
+
+// subset of op span used by terminal span (when t is equal to one)
+class SkOpPtT {
+public:
+ enum {
+ kIsAlias = 1,
+ kIsDuplicate = 1
+ };
+
+ const SkOpPtT* active() const;
+
+ // please keep in sync with debugAddOpp()
+ void addOpp(SkOpPtT* opp, SkOpPtT* oppPrev) {
+ SkOpPtT* oldNext = this->fNext;
+ SkASSERT(this != opp);
+ this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+ oppPrev->fNext = oldNext;
+ }
+
+ bool alias() const;
+ bool coincident() const { return fCoincident; }
+ bool contains(const SkOpPtT* ) const;
+ bool contains(const SkOpSegment*, const SkPoint& ) const;
+ bool contains(const SkOpSegment*, double t) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ void debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const;
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugContains(const SkOpPtT* ) const;
+ const SkOpPtT* debugContains(const SkOpSegment* check) const;
+ SkOpContour* debugContour(int id) const;
+ const SkOpPtT* debugEnder(const SkOpPtT* end) const;
+ int debugLoopLimit(bool report) const;
+ bool debugMatchID(int id) const;
+ const SkOpPtT* debugOppPrev(const SkOpPtT* opp) const;
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fDeleted;
+ }
+
+ bool duplicate() const {
+ return fDuplicatePt;
+ }
+
+ void dump() const; // available to testing only
+ void dumpAll() const;
+ void dumpBase() const;
+
+ const SkOpPtT* find(const SkOpSegment* ) const;
+ SkOpGlobalState* globalState() const;
+ void init(SkOpSpanBase* , double t, const SkPoint& , bool dup);
+
+ void insert(SkOpPtT* span) {
+ SkASSERT(span != this);
+ span->fNext = fNext;
+ fNext = span;
+ }
+
+ const SkOpPtT* next() const {
+ return fNext;
+ }
+
+ SkOpPtT* next() {
+ return fNext;
+ }
+
+ bool onEnd() const;
+
+ // returns nullptr if this is already in the opp ptT loop
+ SkOpPtT* oppPrev(const SkOpPtT* opp) const {
+ // find the fOpp ptr to opp
+ SkOpPtT* oppPrev = opp->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ while (oppPrev->fNext != opp) {
+ oppPrev = oppPrev->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ }
+ return oppPrev;
+ }
+
+ static bool Overlaps(const SkOpPtT* s1, const SkOpPtT* e1, const SkOpPtT* s2,
+ const SkOpPtT* e2, const SkOpPtT** sOut, const SkOpPtT** eOut) {
+ const SkOpPtT* start1 = s1->fT < e1->fT ? s1 : e1;
+ const SkOpPtT* start2 = s2->fT < e2->fT ? s2 : e2;
+ *sOut = between(s1->fT, start2->fT, e1->fT) ? start2
+ : between(s2->fT, start1->fT, e2->fT) ? start1 : nullptr;
+ const SkOpPtT* end1 = s1->fT < e1->fT ? e1 : s1;
+ const SkOpPtT* end2 = s2->fT < e2->fT ? e2 : s2;
+ *eOut = between(s1->fT, end2->fT, e1->fT) ? end2
+ : between(s2->fT, end1->fT, e2->fT) ? end1 : nullptr;
+ if (*sOut == *eOut) {
+ SkOPOBJASSERT(s1, start1->fT >= end2->fT || start2->fT >= end1->fT);
+ return false;
+ }
+ SkASSERT(!*sOut || *sOut != *eOut);
+ return *sOut && *eOut;
+ }
+
+ bool ptAlreadySeen(const SkOpPtT* head) const;
+ SkOpPtT* prev();
+
+ const SkOpSegment* segment() const;
+ SkOpSegment* segment();
+
+ void setCoincident() const {
+ SkOPASSERT(!fDeleted);
+ fCoincident = true;
+ }
+
+ void setDeleted();
+
+ void setSpan(const SkOpSpanBase* span) {
+ fSpan = const_cast<SkOpSpanBase*>(span);
+ }
+
+ const SkOpSpanBase* span() const {
+ return fSpan;
+ }
+
+ SkOpSpanBase* span() {
+ return fSpan;
+ }
+
+ const SkOpPtT* starter(const SkOpPtT* end) const {
+ return fT < end->fT ? this : end;
+ }
+
+ double fT;
+ SkPoint fPt; // cache of point value at this t
+protected:
+ SkOpSpanBase* fSpan; // contains winding data
+ SkOpPtT* fNext; // intersection on opposite curve or alias on this curve
+ bool fDeleted; // set if removed from span list
+ bool fDuplicatePt; // set if identical pt is somewhere in the next loop
+ // below mutable since referrer is otherwise always const
+ mutable bool fCoincident; // set if at some point a coincident span pointed here
+ SkDEBUGCODE(int fID);
+};
+
+class SkOpSpanBase {
+public:
+ enum class Collapsed {
+ kNo,
+ kYes,
+ kError,
+ };
+
+ bool addOpp(SkOpSpanBase* opp);
+
+ void bumpSpanAdds() {
+ ++fSpanAdds;
+ }
+
+ bool chased() const {
+ return fChased;
+ }
+
+ void checkForCollapsedCoincidence();
+
+ const SkOpSpanBase* coinEnd() const {
+ return fCoinEnd;
+ }
+
+ Collapsed collapsed(double s, double e) const;
+ bool contains(const SkOpSpanBase* ) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+
+ bool containsCoinEnd(const SkOpSpanBase* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool containsCoinEnd(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugBumpCount() {
+ return SkDEBUGRELEASE(++fCount, -1);
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_COIN
+ void debugAddOpp(SkPathOpsDebug::GlitchLog* , const SkOpSpanBase* opp) const;
+#endif
+ bool debugAlignedEnd(double t, const SkPoint& pt) const;
+ bool debugAlignedInner() const;
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_COIN
+ void debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugCoinEndLoopCheck() const;
+ SkOpContour* debugContour(int id) const;
+#ifdef SK_DEBUG
+ bool debugDeleted() const { return fDebugDeleted; }
+#endif
+#if DEBUG_COIN
+ void debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpanBase* ) const;
+ void debugMergeMatches(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSpanBase* opp) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+#ifdef SK_DEBUG
+ void debugSetDeleted() { fDebugDeleted = true; }
+#endif
+ const SkOpSpanBase* debugSpan(int id) const;
+ const SkOpSpan* debugStarter(SkOpSpanBase const** endPtr) const;
+ SkOpGlobalState* globalState() const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fPtT.deleted();
+ }
+
+ void dump() const; // available to testing only
+ void dumpCoin() const;
+ void dumpAll() const;
+ void dumpBase() const;
+ void dumpHead() const;
+
+ bool final() const {
+ return fPtT.fT == 1;
+ }
+
+ SkOpAngle* fromAngle() const {
+ return fFromAngle;
+ }
+
+ void initBase(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+
+ // Please keep this in sync with debugInsertCoinEnd()
+ void insertCoinEnd(SkOpSpanBase* coin) {
+ if (containsCoinEnd(coin)) {
+ SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpanBase* coinNext = coin->fCoinEnd;
+ coin->fCoinEnd = this->fCoinEnd;
+ this->fCoinEnd = coinNext;
+ debugValidate();
+ }
+
+ void merge(SkOpSpan* span);
+ bool mergeMatches(SkOpSpanBase* opp);
+
+ const SkOpSpan* prev() const {
+ return fPrev;
+ }
+
+ SkOpSpan* prev() {
+ return fPrev;
+ }
+
+ const SkPoint& pt() const {
+ return fPtT.fPt;
+ }
+
+ const SkOpPtT* ptT() const {
+ return &fPtT;
+ }
+
+ SkOpPtT* ptT() {
+ return &fPtT;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ void setAligned() {
+ fAligned = true;
+ }
+
+ void setChased(bool chased) {
+ fChased = chased;
+ }
+
+ void setFromAngle(SkOpAngle* angle) {
+ fFromAngle = angle;
+ }
+
+ void setPrev(SkOpSpan* prev) {
+ fPrev = prev;
+ }
+
+ bool simple() const {
+ fPtT.debugValidate();
+ return fPtT.next()->next() == &fPtT;
+ }
+
+ int spanAddsCount() const {
+ return fSpanAdds;
+ }
+
+ const SkOpSpan* starter(const SkOpSpanBase* end) const {
+ const SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase* end) {
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase** endPtr) {
+ SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+ }
+
+ int step(const SkOpSpanBase* end) const {
+ return t() < end->t() ? 1 : -1;
+ }
+
+ double t() const {
+ return fPtT.fT;
+ }
+
+ void unaligned() {
+ fAligned = false;
+ }
+
+ SkOpSpan* upCast() {
+ SkASSERT(!final());
+ return (SkOpSpan*) this;
+ }
+
+ const SkOpSpan* upCast() const {
+ SkOPASSERT(!final());
+ return (const SkOpSpan*) this;
+ }
+
+ SkOpSpan* upCastable() {
+ return final() ? nullptr : upCast();
+ }
+
+ const SkOpSpan* upCastable() const {
+ return final() ? nullptr : upCast();
+ }
+
+private:
+ void alignInner();
+
+protected: // no direct access to internals to avoid treating a span base as a span
+ SkOpPtT fPtT; // list of points and t values associated with the start of this span
+ SkOpSegment* fSegment; // segment that contains this span
+ SkOpSpanBase* fCoinEnd; // linked list of coincident spans that end here (may point to itself)
+ SkOpAngle* fFromAngle; // points to next angle from span start to end
+ SkOpSpan* fPrev; // previous intersection point
+ int fSpanAdds; // number of times intersections have been added to span
+ bool fAligned;
+ bool fChased; // set after span has been added to chase array
+ SkDEBUGCODE(int fCount); // number of pt/t pairs added
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(bool fDebugDeleted); // set when span was merged with another span
+};
+
+class SkOpSpan : public SkOpSpanBase {
+public:
+ bool alreadyAdded() const {
+ if (fAlreadyAdded) {
+ return true;
+ }
+ return false;
+ }
+
+ bool clearCoincident() {
+ SkASSERT(!final());
+ if (fCoincident == this) {
+ return false;
+ }
+ fCoincident = this;
+ return true;
+ }
+
+ int computeWindSum();
+ bool containsCoincidence(const SkOpSegment* ) const;
+
+ bool containsCoincidence(const SkOpSpan* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpan* next = this;
+ while ((next = next->fCoincident) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool debugCoinLoopCheck() const;
+#if DEBUG_COIN
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* , const SkOpSpan* ) const;
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSegment* , bool flipped, bool ordered) const;
+#endif
+ void dumpCoin() const;
+ bool dumpSpan() const;
+
+ bool done() const {
+ SkASSERT(!final());
+ return fDone;
+ }
+
+ void init(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+ bool insertCoincidence(const SkOpSegment* , bool flipped, bool ordered);
+
+ // Please keep this in sync with debugInsertCoincidence()
+ void insertCoincidence(SkOpSpan* coin) {
+ if (containsCoincidence(coin)) {
+ SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpan* coinNext = coin->fCoincident;
+ coin->fCoincident = this->fCoincident;
+ this->fCoincident = coinNext;
+ debugValidate();
+ }
+
+ bool isCanceled() const {
+ SkASSERT(!final());
+ return fWindValue == 0 && fOppValue == 0;
+ }
+
+ bool isCoincident() const {
+ SkASSERT(!final());
+ return fCoincident != this;
+ }
+
+ void markAdded() {
+ fAlreadyAdded = true;
+ }
+
+ SkOpSpanBase* next() const {
+ SkASSERT(!final());
+ return fNext;
+ }
+
+ int oppSum() const {
+ SkASSERT(!final());
+ return fOppSum;
+ }
+
+ int oppValue() const {
+ SkASSERT(!final());
+ return fOppValue;
+ }
+
+ void release(const SkOpPtT* );
+
+ SkOpPtT* setCoinStart(SkOpSpan* oldCoinStart, SkOpSegment* oppSegment);
+
+ void setDone(bool done) {
+ SkASSERT(!final());
+ fDone = done;
+ }
+
+ void setNext(SkOpSpanBase* nextT) {
+ SkASSERT(!final());
+ fNext = nextT;
+ }
+
+ void setOppSum(int oppSum);
+
+ void setOppValue(int oppValue) {
+ SkASSERT(!final());
+ SkASSERT(fOppSum == SK_MinS32);
+ SkOPASSERT(!oppValue || !fDone);
+ fOppValue = oppValue;
+ }
+
+ void setToAngle(SkOpAngle* angle) {
+ SkASSERT(!final());
+ fToAngle = angle;
+ }
+
+ void setWindSum(int windSum);
+
+ void setWindValue(int windValue) {
+ SkASSERT(!final());
+ SkASSERT(windValue >= 0);
+ SkASSERT(fWindSum == SK_MinS32);
+ SkOPASSERT(!windValue || !fDone);
+ fWindValue = windValue;
+ }
+
+ bool sortableTop(SkOpContour* );
+
+ SkOpAngle* toAngle() const {
+ SkASSERT(!final());
+ return fToAngle;
+ }
+
+ int windSum() const {
+ SkASSERT(!final());
+ return fWindSum;
+ }
+
+ int windValue() const {
+ SkOPASSERT(!final());
+ return fWindValue;
+ }
+
+private: // no direct access to internals to avoid treating a span base as a span
+ SkOpSpan* fCoincident; // linked list of spans coincident with this one (may point to itself)
+ SkOpAngle* fToAngle; // points to next angle from span start to end
+ SkOpSpanBase* fNext; // next intersection point
+ int fWindSum; // accumulated from contours surrounding this one.
+ int fOppSum; // for binary operators: the opposite winding sum
+ int fWindValue; // 0 == canceled; 1 == normal; >1 == coincident
+ int fOppValue; // normally 0 -- when binary coincident edges combine, opp value goes here
+ int fTopTTry; // specifies direction and t value to try next
+ bool fDone; // if set, this span to next higher T has been processed
+ bool fAlreadyAdded;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp b/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp
new file mode 100644
index 0000000000..80e52bee2a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkRect.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include <algorithm>
+#include <vector>
+
+using std::vector;
+
+struct Contour {
+ enum class Direction { // SkPath::Direction doesn't have 'none' state
+ kCCW = -1,
+ kNone,
+ kCW,
+ };
+
+ Contour(const SkRect& bounds, int lastStart, int verbStart)
+ : fBounds(bounds)
+ , fVerbStart(lastStart)
+ , fVerbEnd(verbStart) {
+ }
+
+ vector<Contour*> fChildren;
+ const SkRect fBounds;
+ SkPoint fMinXY{SK_ScalarMax, SK_ScalarMax};
+ const int fVerbStart;
+ const int fVerbEnd;
+ Direction fDirection{Direction::kNone};
+ bool fContained{false};
+ bool fReverse{false};
+};
+
+static const int kPtCount[] = { 1, 1, 2, 2, 3, 0 };
+static const int kPtIndex[] = { 0, 1, 1, 1, 1, 0 };
+
+static Contour::Direction to_direction(SkScalar dy) {
+ return dy > 0 ? Contour::Direction::kCCW : dy < 0 ? Contour::Direction::kCW :
+ Contour::Direction::kNone;
+}
+
+static int contains_edge(SkPoint pts[4], SkPath::Verb verb, SkScalar weight, const SkPoint& edge) {
+ SkRect bounds;
+ bounds.setBounds(pts, kPtCount[verb] + 1);
+ if (bounds.fTop > edge.fY) {
+ return 0;
+ }
+ if (bounds.fBottom <= edge.fY) { // check to see if y is at line end to avoid double counting
+ return 0;
+ }
+ if (bounds.fLeft >= edge.fX) {
+ return 0;
+ }
+ int winding = 0;
+ double tVals[3];
+ Contour::Direction directions[3];
+ // must intersect horz ray with curve in case it intersects more than once
+ int count = (*CurveIntercept[verb * 2])(pts, weight, edge.fY, tVals);
+ SkASSERT(between(0, count, 3));
+ // remove results to the right of edge
+ for (int index = 0; index < count; ) {
+ SkScalar intersectX = (*CurvePointAtT[verb])(pts, weight, tVals[index]).fX;
+ if (intersectX < edge.fX) {
+ ++index;
+ continue;
+ }
+ if (intersectX > edge.fX) {
+ tVals[index] = tVals[--count];
+ continue;
+ }
+ // if intersect x equals edge x, we need to determine if pts is to the left or right of edge
+ if (pts[0].fX < edge.fX && pts[kPtCount[verb]].fX < edge.fX) {
+ ++index;
+ continue;
+ }
+ // TODO : other cases need discriminating. need op angle code to figure it out
+ // example: edge ends 45 degree diagonal going up. If pts is to the left of edge, keep.
+ // if pts is to the right of edge, discard. With code as is, can't distiguish the two cases.
+ tVals[index] = tVals[--count];
+ }
+ // use first derivative to determine if intersection is contributing +1 or -1 to winding
+ for (int index = 0; index < count; ++index) {
+ directions[index] = to_direction((*CurveSlopeAtT[verb])(pts, weight, tVals[index]).fY);
+ }
+ for (int index = 0; index < count; ++index) {
+ // skip intersections that end at edge and go up
+ if (zero_or_one(tVals[index]) && Contour::Direction::kCCW != directions[index]) {
+ continue;
+ }
+ winding += (int) directions[index];
+ }
+ return winding; // note winding indicates containership, not contour direction
+}
+
+static SkScalar conic_weight(const SkPath::Iter& iter, SkPath::Verb verb) {
+ return SkPath::kConic_Verb == verb ? iter.conicWeight() : 1;
+}
+
+static SkPoint left_edge(SkPoint pts[4], SkPath::Verb verb, SkScalar weight,
+ Contour::Direction* direction) {
+ SkASSERT(SkPath::kLine_Verb <= verb && verb <= SkPath::kCubic_Verb);
+ SkPoint result;
+ double dy;
+ double t SK_INIT_TO_AVOID_WARNING;
+ int roots = 0;
+ if (SkPath::kLine_Verb == verb) {
+ result = pts[0].fX < pts[1].fX ? pts[0] : pts[1];
+ dy = pts[1].fY - pts[0].fY;
+ } else if (SkPath::kQuad_Verb == verb) {
+ SkDQuad quad;
+ quad.set(pts);
+ if (!quad.monotonicInX()) {
+ roots = SkDQuad::FindExtrema(&quad[0].fX, &t);
+ }
+ if (roots) {
+ result = quad.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[2].fX ? pts[0] : pts[2];
+ t = pts[0].fX < pts[2].fX ? 0 : 1;
+ }
+ dy = quad.dxdyAtT(t).fY;
+ } else if (SkPath::kConic_Verb == verb) {
+ SkDConic conic;
+ conic.set(pts, weight);
+ if (!conic.monotonicInX()) {
+ roots = SkDConic::FindExtrema(&conic[0].fX, weight, &t);
+ }
+ if (roots) {
+ result = conic.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[2].fX ? pts[0] : pts[2];
+ t = pts[0].fX < pts[2].fX ? 0 : 1;
+ }
+ dy = conic.dxdyAtT(t).fY;
+ } else {
+ SkASSERT(SkPath::kCubic_Verb == verb);
+ SkDCubic cubic;
+ cubic.set(pts);
+ if (!cubic.monotonicInX()) {
+ double tValues[2];
+ roots = SkDCubic::FindExtrema(&cubic[0].fX, tValues);
+ SkASSERT(roots <= 2);
+ for (int index = 0; index < roots; ++index) {
+ SkPoint temp = cubic.ptAtT(tValues[index]).asSkPoint();
+ if (0 == index || result.fX > temp.fX) {
+ result = temp;
+ t = tValues[index];
+ }
+ }
+ }
+ if (roots) {
+ result = cubic.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[3].fX ? pts[0] : pts[3];
+ t = pts[0].fX < pts[3].fX ? 0 : 1;
+ }
+ dy = cubic.dxdyAtT(t).fY;
+ }
+ *direction = to_direction(dy);
+ return result;
+}
+
+class OpAsWinding {
+public:
+ enum class Edge {
+ kInitial,
+ kCompare,
+ };
+
+ OpAsWinding(const SkPath& path)
+ : fPath(path) {
+ }
+
+ void contourBounds(vector<Contour>* containers) {
+ SkRect bounds;
+ bounds.setEmpty();
+ SkPath::RawIter iter(fPath);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ int lastStart = 0;
+ int verbStart = 0;
+ do {
+ verb = iter.next(pts);
+ if (SkPath::kMove_Verb == verb) {
+ if (!bounds.isEmpty()) {
+ containers->emplace_back(bounds, lastStart, verbStart);
+ lastStart = verbStart;
+ }
+ bounds.setBounds(&pts[kPtIndex[verb]], kPtCount[verb]);
+ }
+ if (SkPath::kLine_Verb <= verb && verb <= SkPath::kCubic_Verb) {
+ SkRect verbBounds;
+ verbBounds.setBounds(&pts[kPtIndex[verb]], kPtCount[verb]);
+ bounds.joinPossiblyEmptyRect(verbBounds);
+ }
+ ++verbStart;
+ } while (SkPath::kDone_Verb != verb);
+ if (!bounds.isEmpty()) {
+ containers->emplace_back(bounds, lastStart, verbStart);
+ }
+ }
+
+ int nextEdge(Contour& contour, Edge edge) {
+ SkPath::Iter iter(fPath, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ int verbCount = -1;
+ int winding = 0;
+ do {
+ verb = iter.next(pts);
+ if (++verbCount < contour.fVerbStart) {
+ continue;
+ }
+ if (verbCount >= contour.fVerbEnd) {
+ continue;
+ }
+ if (SkPath::kLine_Verb > verb || verb > SkPath::kCubic_Verb) {
+ continue;
+ }
+ bool horizontal = true;
+ for (int index = 1; index <= kPtCount[verb]; ++index) {
+ if (pts[0].fY != pts[index].fY) {
+ horizontal = false;
+ break;
+ }
+ }
+ if (horizontal) {
+ continue;
+ }
+ if (edge == Edge::kCompare) {
+ winding += contains_edge(pts, verb, conic_weight(iter, verb), contour.fMinXY);
+ continue;
+ }
+ SkASSERT(edge == Edge::kInitial);
+ Contour::Direction direction;
+ SkPoint minXY = left_edge(pts, verb, conic_weight(iter, verb), &direction);
+ if (minXY.fX > contour.fMinXY.fX) {
+ continue;
+ }
+ if (minXY.fX == contour.fMinXY.fX) {
+ if (minXY.fY != contour.fMinXY.fY) {
+ continue;
+ }
+ if (direction == contour.fDirection) {
+ continue;
+ }
+ // incomplete: must sort edges to find the one most to left
+ // File a bug if this code path is triggered and AsWinding was
+ // expected to succeed.
+ SkDEBUGF("incomplete\n");
+ // TODO: add edges as opangle and sort
+ }
+ contour.fMinXY = minXY;
+ contour.fDirection = direction;
+ } while (SkPath::kDone_Verb != verb);
+ return winding;
+ }
+
+ bool containerContains(Contour& contour, Contour& test) {
+ // find outside point on lesser contour
+ // arbitrarily, choose non-horizontal edge where point <= bounds left
+ // note that if leftmost point is control point, may need tight bounds
+ // to find edge with minimum-x
+ if (SK_ScalarMax == test.fMinXY.fX) {
+ this->nextEdge(test, Edge::kInitial);
+ }
+ // find all edges on greater equal or to the left of one on lesser
+ contour.fMinXY = test.fMinXY;
+ int winding = this->nextEdge(contour, Edge::kCompare);
+ // if edge is up, mark contour cw, otherwise, ccw
+ // sum of greater edges direction should be cw, 0, ccw
+ test.fContained = winding != 0;
+ return -1 <= winding && winding <= 1;
+ }
+
+ void inParent(Contour& contour, Contour& parent) {
+ // move contour into sibling list contained by parent
+ for (auto test : parent.fChildren) {
+ if (test->fBounds.contains(contour.fBounds)) {
+ inParent(contour, *test);
+ return;
+ }
+ }
+ // move parent's children into contour's children if contained by contour
+ for (auto iter = parent.fChildren.begin(); iter != parent.fChildren.end(); ) {
+ if (contour.fBounds.contains((*iter)->fBounds)) {
+ contour.fChildren.push_back(*iter);
+ iter = parent.fChildren.erase(iter);
+ continue;
+ }
+ ++iter;
+ }
+ parent.fChildren.push_back(&contour);
+ }
+
+ bool checkContainerChildren(Contour* parent, Contour* child) {
+ for (auto grandChild : child->fChildren) {
+ if (!checkContainerChildren(child, grandChild)) {
+ return false;
+ }
+ }
+ if (parent) {
+ if (!containerContains(*parent, *child)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool markReverse(Contour* parent, Contour* child) {
+ bool reversed = false;
+ for (auto grandChild : child->fChildren) {
+ reversed |= markReverse(grandChild->fContained ? child : parent, grandChild);
+ }
+ if (parent && parent->fDirection == child->fDirection) {
+ child->fReverse = true;
+ child->fDirection = (Contour::Direction) -(int) child->fDirection;
+ return true;
+ }
+ return reversed;
+ }
+
+ void reverseMarkedContours(vector<Contour>& contours, SkPath* result) {
+ SkPath::RawIter iter(fPath);
+ int verbCount = 0;
+ for (auto contour : contours) {
+ SkPath reverse;
+ SkPath* temp = contour.fReverse ? &reverse : result;
+ do {
+ SkPoint pts[4];
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ temp->moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ temp->lineTo(pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ temp->quadTo(pts[1], pts[2]);
+ break;
+ case SkPath::kConic_Verb:
+ temp->conicTo(pts[1], pts[2], iter.conicWeight());
+ break;
+ case SkPath::kCubic_Verb:
+ temp->cubicTo(pts[1], pts[2], pts[3]);
+ break;
+ case SkPath::kClose_Verb:
+ temp->close();
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ default:
+ SkASSERT(0);
+ }
+ } while (++verbCount < contour.fVerbEnd);
+ if (contour.fReverse) {
+ result->reverseAddPath(reverse);
+ }
+ }
+ }
+
+private:
+ const SkPath& fPath;
+};
+
+static bool set_result_path(SkPath* result, const SkPath& path, SkPath::FillType fillType) {
+ *result = path;
+ result->setFillType(fillType);
+ return true;
+}
+
+bool SK_API AsWinding(const SkPath& path, SkPath* result) {
+ if (!path.isFinite()) {
+ return false;
+ }
+ SkPath::FillType fillType = path.getFillType();
+ if (fillType == SkPath::kWinding_FillType
+ || fillType == SkPath::kInverseWinding_FillType ) {
+ return set_result_path(result, path, fillType);
+ }
+ fillType = path.isInverseFillType() ? SkPath::kInverseWinding_FillType :
+ SkPath::kWinding_FillType;
+ if (path.isEmpty() || path.isConvex()) {
+ return set_result_path(result, path, fillType);
+ }
+ // count contours
+ vector<Contour> contours; // one per contour
+ OpAsWinding winder(path);
+ winder.contourBounds(&contours);
+ if (contours.size() <= 1) {
+ return set_result_path(result, path, fillType);
+ }
+ // create contour bounding box tree
+ Contour sorted(SkRect(), 0, 0);
+ for (auto& contour : contours) {
+ winder.inParent(contour, sorted);
+ }
+ // if sorted has no grandchildren, no child has to fix its children's winding
+ if (std::all_of(sorted.fChildren.begin(), sorted.fChildren.end(),
+ [](const Contour* contour) -> bool { return !contour->fChildren.size(); } )) {
+ return set_result_path(result, path, fillType);
+ }
+ // starting with outermost and moving inward, see if one path contains another
+ for (auto contour : sorted.fChildren) {
+ winder.nextEdge(*contour, OpAsWinding::Edge::kInitial);
+ if (!winder.checkContainerChildren(nullptr, contour)) {
+ return false;
+ }
+ }
+ // starting with outermost and moving inward, mark paths to reverse
+ bool reversed = false;
+ for (auto contour : sorted.fChildren) {
+ reversed |= winder.markReverse(nullptr, contour);
+ }
+ if (!reversed) {
+ return set_result_path(result, path, fillType);
+ }
+ SkPath temp;
+ temp.setFillType(fillType);
+ winder.reverseMarkedContours(contours, &temp);
+ result->swap(temp);
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsBounds.h b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
new file mode 100644
index 0000000000..1ebd46f6c0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpBounds_DEFINED
+#define SkPathOpBounds_DEFINED
+
+#include "include/core/SkRect.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+// SkPathOpsBounds, unlike SkRect, does not consider a line to be empty.
+struct SkPathOpsBounds : public SkRect {
+ static bool Intersects(const SkPathOpsBounds& a, const SkPathOpsBounds& b) {
+ return AlmostLessOrEqualUlps(a.fLeft, b.fRight)
+ && AlmostLessOrEqualUlps(b.fLeft, a.fRight)
+ && AlmostLessOrEqualUlps(a.fTop, b.fBottom)
+ && AlmostLessOrEqualUlps(b.fTop, a.fBottom);
+ }
+
+ // Note that add(), unlike SkRect::join() or SkRect::growToInclude()
+ // does not treat the bounds of horizontal and vertical lines as
+ // empty rectangles.
+ void add(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ if (left < fLeft) fLeft = left;
+ if (top < fTop) fTop = top;
+ if (right > fRight) fRight = right;
+ if (bottom > fBottom) fBottom = bottom;
+ }
+
+ void add(const SkPathOpsBounds& toAdd) {
+ add(toAdd.fLeft, toAdd.fTop, toAdd.fRight, toAdd.fBottom);
+ }
+
+ void add(const SkPoint& pt) {
+ if (pt.fX < fLeft) fLeft = pt.fX;
+ if (pt.fY < fTop) fTop = pt.fY;
+ if (pt.fX > fRight) fRight = pt.fX;
+ if (pt.fY > fBottom) fBottom = pt.fY;
+ }
+
+ void add(const SkDPoint& pt) {
+ if (pt.fX < fLeft) fLeft = SkDoubleToScalar(pt.fX);
+ if (pt.fY < fTop) fTop = SkDoubleToScalar(pt.fY);
+ if (pt.fX > fRight) fRight = SkDoubleToScalar(pt.fX);
+ if (pt.fY > fBottom) fBottom = SkDoubleToScalar(pt.fY);
+ }
+
+ bool almostContains(const SkPoint& pt) const {
+ return AlmostLessOrEqualUlps(fLeft, pt.fX)
+ && AlmostLessOrEqualUlps(pt.fX, fRight)
+ && AlmostLessOrEqualUlps(fTop, pt.fY)
+ && AlmostLessOrEqualUlps(pt.fY, fBottom);
+ }
+
+ bool contains(const SkPoint& pt) const {
+ return fLeft <= pt.fX && fTop <= pt.fY &&
+ fRight >= pt.fX && fBottom >= pt.fY;
+ }
+
+ typedef SkRect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
new file mode 100644
index 0000000000..a4d0fcdc0e
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathWriter.h"
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortablePtr) {
+ // find first angle, initialize winding to computed fWindSum
+ SkOpSegment* segment = start->segment();
+ const SkOpAngle* angle = segment->spanToAngle(start, end);
+ if (!angle) {
+ *windingPtr = SK_MinS32;
+ return nullptr;
+ }
+ bool computeWinding = false;
+ const SkOpAngle* firstAngle = angle;
+ bool loop = false;
+ bool unorderable = false;
+ int winding = SK_MinS32;
+ do {
+ angle = angle->next();
+ if (!angle) {
+ return nullptr;
+ }
+ unorderable |= angle->unorderable();
+ if ((computeWinding = unorderable || (angle == firstAngle && loop))) {
+ break; // if we get here, there's no winding, loop is unorderable
+ }
+ loop |= angle == firstAngle;
+ segment = angle->segment();
+ winding = segment->windSum(angle);
+ } while (winding == SK_MinS32);
+ // if the angle loop contains an unorderable span, the angle order may be useless
+ // directly compute the winding in this case for each span
+ if (computeWinding) {
+ firstAngle = angle;
+ winding = SK_MinS32;
+ do {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ SkOpSpan* lesser = startSpan->starter(endSpan);
+ int testWinding = lesser->windSum();
+ if (testWinding == SK_MinS32) {
+ testWinding = lesser->computeWindSum();
+ }
+ if (testWinding != SK_MinS32) {
+ segment = angle->segment();
+ winding = testWinding;
+ }
+ angle = angle->next();
+ } while (angle != firstAngle);
+ }
+ *sortablePtr = !unorderable;
+ *windingPtr = winding;
+ return angle;
+}
+
+SkOpSpan* FindUndone(SkOpContourHead* contourHead) {
+ SkOpContour* contour = contourHead;
+ do {
+ if (contour->done()) {
+ continue;
+ }
+ SkOpSpan* result = contour->undoneSpan();
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ return nullptr;
+}
+
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr) {
+ while (chase->count()) {
+ SkOpSpanBase* span;
+ chase->pop(&span);
+ SkOpSegment* segment = span->segment();
+ *startPtr = span->ptT()->next()->span();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return last->segment();
+ }
+ if (done) {
+ continue;
+ }
+ // find first angle, initialize winding to computed wind sum
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ return nullptr;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment = angle->segment();
+ sumWinding = segment->updateWindingReverse(angle);
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment->setUpWinding(start, end, &maxWinding, &sumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ // TODO: add error handling
+ SkAssertResult(segment->markAngle(maxWinding, sumWinding, angle, nullptr));
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return first;
+ }
+ }
+ return nullptr;
+}
+
+bool SortContourList(SkOpContourHead** contourList, bool evenOdd, bool oppEvenOdd) {
+ SkTDArray<SkOpContour* > list;
+ SkOpContour* contour = *contourList;
+ do {
+ if (contour->count()) {
+ contour->setOppXor(contour->operand() ? evenOdd : oppEvenOdd);
+ *list.append() = contour;
+ }
+ } while ((contour = contour->next()));
+ int count = list.count();
+ if (!count) {
+ return false;
+ }
+ if (count > 1) {
+ SkTQSort<SkOpContour>(list.begin(), list.end() - 1);
+ }
+ contour = list[0];
+ SkOpContourHead* contourHead = static_cast<SkOpContourHead*>(contour);
+ contour->globalState()->setContourHead(contourHead);
+ *contourList = contourHead;
+ for (int index = 1; index < count; ++index) {
+ SkOpContour* next = list[index];
+ contour->setNext(next);
+ contour = next;
+ }
+ contour->setNext(nullptr);
+ return true;
+}
+
+static void calc_angles(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ contour->calcAngles();
+ } while ((contour = contour->next()));
+}
+
+static bool missing_coincidence(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ bool result = false;
+ do {
+ result |= contour->missingCoincidence();
+ } while ((contour = contour->next()));
+ return result;
+}
+
+static bool move_multiples(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->moveMultiples()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+static bool move_nearby(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->moveNearby()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+static bool sort_angles(SkOpContourHead* contourList) {
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->sortAngles()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+bool HandleCoincidence(SkOpContourHead* contourList, SkOpCoincidence* coincidence) {
+ SkOpGlobalState* globalState = contourList->globalState();
+ // match up points within the coincident runs
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kIntersecting))) {
+ return false;
+ }
+ // combine t values when multiple intersections occur on some segments but not others
+ if (!move_multiples(contourList DEBUG_PHASE_PARAMS(kWalking))) {
+ return false;
+ }
+ // move t values and points together to eliminate small/tiny gaps
+ if (!move_nearby(contourList DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ // add coincidence formed by pairing on curve points and endpoints
+ coincidence->correctEnds(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addEndMovedSpans(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ const int SAFETY_COUNT = 3;
+ int safetyHatch = SAFETY_COUNT;
+ // look for coincidence present in A-B and A-C but missing in B-C
+ do {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ if (!added) {
+ break;
+ }
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ move_nearby(contourList DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch - 1));
+ } while (true);
+ // check to see if, loosely, coincident ranges may be expanded
+ if (coincidence->expand(DEBUG_COIN_ONLY_PARAMS())) {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ if (!move_multiples(contourList DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ move_nearby(contourList DEBUG_COIN_PARAMS());
+ }
+ // the expanded ranges may not align -- add the missing spans
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kWalking))) {
+ return false;
+ }
+ // mark spans of coincident segments as coincident
+ coincidence->mark(DEBUG_COIN_ONLY_PARAMS());
+ // look for coincidence lines and curves undetected by intersection
+ if (missing_coincidence(contourList DEBUG_COIN_PARAMS())) {
+ (void) coincidence->expand(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ if (!coincidence->mark(DEBUG_PHASE_ONLY_PARAMS(kWalking))) {
+ return false;
+ }
+ } else {
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+ }
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+
+ SkOpCoincidence overlaps(globalState);
+ safetyHatch = SAFETY_COUNT;
+ do {
+ SkOpCoincidence* pairs = overlaps.isEmpty() ? coincidence : &overlaps;
+ // adjust the winding value to account for coincident edges
+ if (!pairs->apply(DEBUG_ITER_ONLY_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ // For each coincident pair that overlaps another, when the receivers (the 1st of the pair)
+ // are different, construct a new pair to resolve their mutual span
+ if (!pairs->findOverlaps(&overlaps DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ } while (!overlaps.isEmpty());
+ calc_angles(contourList DEBUG_COIN_PARAMS());
+ if (!sort_angles(contourList)) {
+ return false;
+ }
+#if DEBUG_COINCIDENCE_VERBOSE
+ coincidence->debugShowCoincidence();
+#endif
+#if DEBUG_COINCIDENCE
+ coincidence->debugValidate();
+#endif
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.h b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
new file mode 100644
index 0000000000..af27f2eaf4
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCommon_DEFINED
+#define SkPathOpsCommon_DEFINED
+
+#include "include/private/SkTDArray.h"
+#include "src/pathops/SkOpAngle.h"
+
+class SkOpCoincidence;
+class SkOpContour;
+class SkPathWriter;
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortable);
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr);
+SkOpSpan* FindSortableTop(SkOpContourHead* );
+SkOpSpan* FindUndone(SkOpContourHead* );
+bool FixWinding(SkPath* path);
+bool SortContourList(SkOpContourHead** , bool evenOdd, bool oppEvenOdd);
+bool HandleCoincidence(SkOpContourHead* , SkOpCoincidence* );
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert)
+ SkDEBUGPARAMS(const char* testName));
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
new file mode 100644
index 0000000000..c0bbda8ce9
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+// cribbed from the float version in SkGeometry.cpp
+static void conic_deriv_coeff(const double src[],
+ SkScalar w,
+ double coeff[3]) {
+ const double P20 = src[4] - src[0];
+ const double P10 = src[2] - src[0];
+ const double wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static double conic_eval_tan(const double coord[], SkScalar w, double t) {
+ double coeff[3];
+ conic_deriv_coeff(coord, w, coeff);
+ return t * (t * coeff[0] + coeff[1]) + coeff[2];
+}
+
+int SkDConic::FindExtrema(const double src[], SkScalar w, double t[1]) {
+ double coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ double tValues[2];
+ int roots = SkDQuad::RootsValidT(coeff[0], coeff[1], coeff[2], tValues);
+ // In extreme cases, the number of roots returned can be 2. Pathops
+ // will fail later on, so there's no advantage to plumbing in an error
+ // return here.
+ // SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ t[0] = tValues[0];
+ return 1;
+ }
+ return 0;
+}
+
+SkDVector SkDConic::dxdyAtT(double t) const {
+ SkDVector result = {
+ conic_eval_tan(&fPts[0].fX, fWeight, t),
+ conic_eval_tan(&fPts[0].fY, fWeight, t)
+ };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!k");
+ }
+ }
+ return result;
+}
+
+static double conic_eval_numerator(const double src[], SkScalar w, double t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ double src2w = src[2] * w;
+ double C = src[0];
+ double A = src[4] - 2 * src2w + C;
+ double B = 2 * (src2w - C);
+ return (A * t + B) * t + C;
+}
+
+
+static double conic_eval_denominator(SkScalar w, double t) {
+ double B = 2 * (w - 1);
+ double C = 1;
+ double A = -B;
+ return (A * t + B) * t + C;
+}
+
+bool SkDConic::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+SkDPoint SkDConic::ptAtT(double t) const {
+ if (t == 0) {
+ return fPts[0];
+ }
+ if (t == 1) {
+ return fPts[2];
+ }
+ double denominator = conic_eval_denominator(fWeight, t);
+ SkDPoint result = {
+ sk_ieee_double_divide(conic_eval_numerator(&fPts[0].fX, fWeight, t), denominator),
+ sk_ieee_double_divide(conic_eval_numerator(&fPts[0].fY, fWeight, t), denominator)
+ };
+ return result;
+}
+
+/* see quad subdivide for point rationale */
+/* w rationale : the mid point between t1 and t2 could be determined from the computed a/b/c
+ values if the computed w was known. Since we know the mid point at (t1+t2)/2, we'll assume
+ that it is the same as the point on the new curve t==(0+1)/2.
+
+ d / dz == conic_poly(dst, unknownW, .5) / conic_weight(unknownW, .5);
+
+ conic_poly(dst, unknownW, .5)
+ = a / 4 + (b * unknownW) / 2 + c / 4
+ = (a + c) / 4 + (bx * unknownW) / 2
+
+ conic_weight(unknownW, .5)
+ = unknownW / 2 + 1 / 2
+
+ d / dz == ((a + c) / 2 + b * unknownW) / (unknownW + 1)
+ d / dz * (unknownW + 1) == (a + c) / 2 + b * unknownW
+ unknownW = ((a + c) / 2 - d / dz) / (d / dz - b)
+
+ Thus, w is the ratio of the distance from the mid of end points to the on-curve point, and the
+ distance of the on-curve point to the control point.
+ */
+SkDConic SkDConic::subDivide(double t1, double t2) const {
+ double ax, ay, az;
+ if (t1 == 0) {
+ ax = fPts[0].fX;
+ ay = fPts[0].fY;
+ az = 1;
+ } else if (t1 != 1) {
+ ax = conic_eval_numerator(&fPts[0].fX, fWeight, t1);
+ ay = conic_eval_numerator(&fPts[0].fY, fWeight, t1);
+ az = conic_eval_denominator(fWeight, t1);
+ } else {
+ ax = fPts[2].fX;
+ ay = fPts[2].fY;
+ az = 1;
+ }
+ double midT = (t1 + t2) / 2;
+ double dx = conic_eval_numerator(&fPts[0].fX, fWeight, midT);
+ double dy = conic_eval_numerator(&fPts[0].fY, fWeight, midT);
+ double dz = conic_eval_denominator(fWeight, midT);
+ double cx, cy, cz;
+ if (t2 == 1) {
+ cx = fPts[2].fX;
+ cy = fPts[2].fY;
+ cz = 1;
+ } else if (t2 != 0) {
+ cx = conic_eval_numerator(&fPts[0].fX, fWeight, t2);
+ cy = conic_eval_numerator(&fPts[0].fY, fWeight, t2);
+ cz = conic_eval_denominator(fWeight, t2);
+ } else {
+ cx = fPts[0].fX;
+ cy = fPts[0].fY;
+ cz = 1;
+ }
+ double bx = 2 * dx - (ax + cx) / 2;
+ double by = 2 * dy - (ay + cy) / 2;
+ double bz = 2 * dz - (az + cz) / 2;
+ if (!bz) {
+ bz = 1; // if bz is 0, weight is 0, control point has no effect: any value will do
+ }
+ SkDConic dst = {{{{ax / az, ay / az}, {bx / bz, by / bz}, {cx / cz, cy / cz}}
+ SkDEBUGPARAMS(fPts.fDebugGlobalState) },
+ SkDoubleToScalar(bz / sqrt(az * cz)) };
+ return dst;
+}
+
+SkDPoint SkDConic::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const {
+ SkDConic chopped = this->subDivide(t1, t2);
+ *weight = chopped.fWeight;
+ return chopped[1];
+}
+
+int SkTConic::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fConic, line);
+}
+
+bool SkTConic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return quad.hullIntersects(fConic, isLinear);
+}
+
+bool SkTConic::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(fConic, isLinear);
+}
+
+void SkTConic::setBounds(SkDRect* rect) const {
+ rect->setBounds(fConic);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.h b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
new file mode 100644
index 0000000000..27893ee46d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsConic_DEFINED
+#define SkPathOpsConic_DEFINED
+
+#include "src/pathops/SkPathOpsQuad.h"
+
+struct SkDConic {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDQuad fPts;
+ SkScalar fWeight;
+
+ bool collapsed() const {
+ return fPts.collapsed();
+ }
+
+ bool controlsInside() const {
+ return fPts.controlsInside();
+ }
+
+ void debugInit() {
+ fPts.debugInit();
+ fWeight = 0;
+ }
+
+ void debugSet(const SkDPoint* pts, SkScalar weight);
+
+ SkDConic flip() const {
+ SkDConic result = {{{fPts[2], fPts[1], fPts[0]}
+ SkDEBUGPARAMS(fPts.fDebugGlobalState) }, fWeight};
+ return result;
+ }
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fPts.globalState(); }
+#endif
+
+ static bool IsConic() { return true; }
+
+ const SkDConic& set(const SkPoint pts[kPointCount], SkScalar weight
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts.set(pts SkDEBUGPARAMS(state));
+ fWeight = weight;
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { return fPts[n]; }
+ SkDPoint& operator[](int n) { return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t) {
+ return SkDQuad::AddValidTs(s, realRoots, t);
+ }
+
+ void align(int endIndex, SkDPoint* dstPt) const {
+ fPts.align(endIndex, dstPt);
+ }
+
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], SkScalar weight, double tValue[1]);
+
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return fPts.hullIntersects(quad, isLinear);
+ }
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return fPts.hullIntersects(conic.fPts, isLinear);
+ }
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const;
+
+ bool isLinear(int startIndex, int endIndex) const {
+ return fPts.isLinear(startIndex, endIndex);
+ }
+
+ static int maxIntersections() { return kMaxIntersections; }
+
+ bool monotonicInX() const {
+ return fPts.monotonicInX();
+ }
+
+ bool monotonicInY() const {
+ return fPts.monotonicInY();
+ }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ fPts.otherPts(oddMan, endPt);
+ }
+
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+
+ static int RootsReal(double A, double B, double C, double t[2]) {
+ return SkDQuad::RootsReal(A, B, C, t);
+ }
+
+ static int RootsValidT(const double A, const double B, const double C, double s[2]) {
+ return SkDQuad::RootsValidT(A, B, C, s);
+ }
+
+ SkDConic subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDConic* c) const { *c = this->subDivide(t1, t2); }
+
+ static SkDConic SubDivide(const SkPoint a[kPointCount], SkScalar weight, double t1, double t2) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.subDivide(t1, t2);
+ }
+
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const;
+
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], SkScalar weight,
+ const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2, SkScalar* newWeight) {
+ SkDConic conic;
+ conic.set(pts, weight);
+ return conic.subDivide(a, c, t1, t2, newWeight);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+
+};
+
+class SkTConic : public SkTCurve {
+public:
+ SkDConic fConic;
+
+ SkTConic() {}
+
+ SkTConic(const SkDConic& c)
+ : fConic(c) {
+ }
+
+ ~SkTConic() override {}
+
+ const SkDPoint& operator[](int n) const override { return fConic[n]; }
+ SkDPoint& operator[](int n) override { return fConic[n]; }
+
+ bool collapsed() const override { return fConic.collapsed(); }
+ bool controlsInside() const override { return fConic.controlsInside(); }
+ void debugInit() override { return fConic.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fConic.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fConic.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fConic.globalState(); }
+#endif
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override;
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override {
+ return conic.hullIntersects(fConic, isLinear);
+ }
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fConic, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return true; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTConic>(); }
+
+ int maxIntersections() const override { return SkDConic::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fConic.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDConic::kPointCount; }
+ int pointLast() const override { return SkDConic::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fConic.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTConic*) curve)->fConic = fConic.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
new file mode 100644
index 0000000000..bb5261f9e1
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
@@ -0,0 +1,750 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkGeometry.h"
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+const int SkDCubic::gPrecisionUnit = 256; // FIXME: test different values in test framework
+
+void SkDCubic::align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[ctrlIndex].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[ctrlIndex].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+// give up when changing t no longer moves point
+// also, copy point rather than recompute it when it does change
+double SkDCubic::binarySearch(double min, double max, double axisIntercept,
+ SearchAxis xAxis) const {
+ double t = (min + max) / 2;
+ double step = (t - min) / 2;
+ SkDPoint cubicAtT = ptAtT(t);
+ double calcPos = (&cubicAtT.fX)[xAxis];
+ double calcDist = calcPos - axisIntercept;
+ do {
+ double priorT = std::max(min, t - step);
+ SkDPoint lessPt = ptAtT(priorT);
+ if (approximately_equal_half(lessPt.fX, cubicAtT.fX)
+ && approximately_equal_half(lessPt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double lessDist = (&lessPt.fX)[xAxis] - axisIntercept;
+#if DEBUG_CUBIC_BINARY_SEARCH
+ SkDebugf("t=%1.9g calc=%1.9g dist=%1.9g step=%1.9g less=%1.9g\n", t, calcPos, calcDist,
+ step, lessDist);
+#endif
+ double lastStep = step;
+ step /= 2;
+ if (calcDist > 0 ? calcDist > lessDist : calcDist < lessDist) {
+ t = priorT;
+ } else {
+ double nextT = t + lastStep;
+ if (nextT > max) {
+ return -1;
+ }
+ SkDPoint morePt = ptAtT(nextT);
+ if (approximately_equal_half(morePt.fX, cubicAtT.fX)
+ && approximately_equal_half(morePt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double moreDist = (&morePt.fX)[xAxis] - axisIntercept;
+ if (calcDist > 0 ? calcDist <= moreDist : calcDist >= moreDist) {
+ continue;
+ }
+ t = nextT;
+ }
+ SkDPoint testAtT = ptAtT(t);
+ cubicAtT = testAtT;
+ calcPos = (&cubicAtT.fX)[xAxis];
+ calcDist = calcPos - axisIntercept;
+ } while (!approximately_equal(calcPos, axisIntercept));
+ return t;
+}
+
+// get the rough scale of the cubic; used to determine if curvature is extreme
+double SkDCubic::calcPrecision() const {
+ return ((fPts[1] - fPts[0]).length()
+ + (fPts[2] - fPts[1]).length()
+ + (fPts[3] - fPts[2]).length()) / gPrecisionUnit;
+}
+
+/* classic one t subdivision */
+static void interp_cubic_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = abc;
+ dst[6] = abcd;
+ dst[8] = bcd;
+ dst[10] = cd;
+ dst[12] = src[6];
+}
+
+SkDCubicPair SkDCubic::chopAt(double t) const {
+ SkDCubicPair dst;
+ if (t == 0.5) {
+ dst.pts[0] = fPts[0];
+ dst.pts[1].fX = (fPts[0].fX + fPts[1].fX) / 2;
+ dst.pts[1].fY = (fPts[0].fY + fPts[1].fY) / 2;
+ dst.pts[2].fX = (fPts[0].fX + 2 * fPts[1].fX + fPts[2].fX) / 4;
+ dst.pts[2].fY = (fPts[0].fY + 2 * fPts[1].fY + fPts[2].fY) / 4;
+ dst.pts[3].fX = (fPts[0].fX + 3 * (fPts[1].fX + fPts[2].fX) + fPts[3].fX) / 8;
+ dst.pts[3].fY = (fPts[0].fY + 3 * (fPts[1].fY + fPts[2].fY) + fPts[3].fY) / 8;
+ dst.pts[4].fX = (fPts[1].fX + 2 * fPts[2].fX + fPts[3].fX) / 4;
+ dst.pts[4].fY = (fPts[1].fY + 2 * fPts[2].fY + fPts[3].fY) / 4;
+ dst.pts[5].fX = (fPts[2].fX + fPts[3].fX) / 2;
+ dst.pts[5].fY = (fPts[2].fY + fPts[3].fY) / 2;
+ dst.pts[6] = fPts[3];
+ return dst;
+ }
+ interp_cubic_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_cubic_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+void SkDCubic::Coefficients(const double* src, double* A, double* B, double* C, double* D) {
+ *A = src[6]; // d
+ *B = src[4] * 3; // 3*c
+ *C = src[2] * 3; // 3*b
+ *D = src[0]; // a
+ *A -= *D - *C + *B; // A = -a + 3*b - 3*c + d
+ *B += 3 * *D - 2 * *C; // B = 3*a - 6*b + 3*c
+ *C -= 3 * *D; // C = -3*a + 3*b
+}
+
+bool SkDCubic::endsAreExtremaInXOrY() const {
+ return (between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && between(fPts[0].fX, fPts[2].fX, fPts[3].fX))
+ || (between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && between(fPts[0].fY, fPts[2].fY, fPts[3].fY));
+}
+
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one cubic's points. If the 2nd cubic's points
+// are on the line or on the opposite side from the 1st cubic's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if cubic's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the cubic pair have only the end point in common
+*/
+bool SkDCubic::hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const {
+ bool linear = true;
+ char hullOrder[4];
+ int hullCount = convexHull(hullOrder);
+ int end1 = hullOrder[0];
+ int hullIndex = 0;
+ const SkDPoint* endPt[2];
+ endPt[0] = &fPts[end1];
+ do {
+ hullIndex = (hullIndex + 1) % hullCount;
+ int end2 = hullOrder[hullIndex];
+ endPt[1] = &fPts[end2];
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ int oddManMask = other_two(end1, end2);
+ int oddMan = end1 ^ oddManMask;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ int oddMan2 = end2 ^ oddManMask;
+ double sign2 = (fPts[oddMan2].fY - origY) * adj - (fPts[oddMan2].fX - origX) * opp;
+ if (sign * sign2 < 0) {
+ continue;
+ }
+ if (approximately_zero(sign)) {
+ sign = sign2;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < ptCount; ++n) {
+ double test = (pts[n].fY - origY) * adj - (pts[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ endPt[0] = endPt[1];
+ end1 = end2;
+ } while (hullIndex);
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDCubic::hullIntersects(const SkDCubic& c2, bool* isLinear) const {
+ return hullIntersects(c2.fPts, c2.kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return hullIntersects(quad.fPts, quad.kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+
+ return hullIntersects(conic.fPts, isLinear);
+}
+
+bool SkDCubic::isLinear(int startIndex, int endIndex) const {
+ if (fPts[0].approximatelyDEqual(fPts[3])) {
+ return ((const SkDQuad *) this)->isLinear(0, 2);
+ }
+ SkLineParameters lineParameters;
+ lineParameters.cubicEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double tiniest = SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ double largest = SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ largest = SkTMax(largest, -tiniest);
+ double distance = lineParameters.controlPtDistance(*this, 1);
+ if (!approximately_zero_when_compared_to(distance, largest)) {
+ return false;
+ }
+ distance = lineParameters.controlPtDistance(*this, 2);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+// from http://www.cs.sunysb.edu/~qin/courses/geometry/4.pdf
+// c(t) = a(1-t)^3 + 3bt(1-t)^2 + 3c(1-t)t^2 + dt^3
+// c'(t) = -3a(1-t)^2 + 3b((1-t)^2 - 2t(1-t)) + 3c(2t(1-t) - t^2) + 3dt^2
+// = 3(b-a)(1-t)^2 + 6(c-b)t(1-t) + 3(d-c)t^2
+static double derivative_at_t(const double* src, double t) {
+ double one_t = 1 - t;
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ return 3 * ((b - a) * one_t * one_t + 2 * (c - b) * t * one_t + (d - c) * t * t);
+}
+
+int SkDCubic::ComplexBreak(const SkPoint pointsPtr[4], SkScalar* t) {
+ SkDCubic cubic;
+ cubic.set(pointsPtr);
+ if (cubic.monotonicInX() && cubic.monotonicInY()) {
+ return 0;
+ }
+ double tt[2], ss[2];
+ SkCubicType cubicType = SkClassifyCubic(pointsPtr, tt, ss);
+ switch (cubicType) {
+ case SkCubicType::kLoop: {
+ const double &td = tt[0], &te = tt[1], &sd = ss[0], &se = ss[1];
+ if (roughly_between(0, td, sd) && roughly_between(0, te, se)) {
+ t[0] = static_cast<SkScalar>((td * se + te * sd) / (2 * sd * se));
+ return (int) (t[0] > 0 && t[0] < 1);
+ }
+ }
+ // fall through if no t value found
+ case SkCubicType::kSerpentine:
+ case SkCubicType::kLocalCusp:
+ case SkCubicType::kCuspAtInfinity: {
+ double inflectionTs[2];
+ int infTCount = cubic.findInflections(inflectionTs);
+ double maxCurvature[3];
+ int roots = cubic.findMaxCurvature(maxCurvature);
+ #if DEBUG_CUBIC_SPLIT
+ SkDebugf("%s\n", __FUNCTION__);
+ cubic.dump();
+ for (int index = 0; index < infTCount; ++index) {
+ SkDebugf("inflectionsTs[%d]=%1.9g ", index, inflectionTs[index]);
+ SkDPoint pt = cubic.ptAtT(inflectionTs[index]);
+ SkDVector dPt = cubic.dxdyAtT(inflectionTs[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+ for (int index = 0; index < roots; ++index) {
+ SkDebugf("maxCurvature[%d]=%1.9g ", index, maxCurvature[index]);
+ SkDPoint pt = cubic.ptAtT(maxCurvature[index]);
+ SkDVector dPt = cubic.dxdyAtT(maxCurvature[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+ #endif
+ if (infTCount == 2) {
+ for (int index = 0; index < roots; ++index) {
+ if (between(inflectionTs[0], maxCurvature[index], inflectionTs[1])) {
+ t[0] = maxCurvature[index];
+ return (int) (t[0] > 0 && t[0] < 1);
+ }
+ }
+ } else {
+ int resultCount = 0;
+ // FIXME: constant found through experimentation -- maybe there's a better way....
+ double precision = cubic.calcPrecision() * 2;
+ for (int index = 0; index < roots; ++index) {
+ double testT = maxCurvature[index];
+ if (0 >= testT || testT >= 1) {
+ continue;
+ }
+ // don't call dxdyAtT since we want (0,0) results
+ SkDVector dPt = { derivative_at_t(&cubic.fPts[0].fX, testT),
+ derivative_at_t(&cubic.fPts[0].fY, testT) };
+ double dPtLen = dPt.length();
+ if (dPtLen < precision) {
+ t[resultCount++] = testT;
+ }
+ }
+ if (!resultCount && infTCount == 1) {
+ t[0] = inflectionTs[0];
+ resultCount = (int) (t[0] > 0 && t[0] < 1);
+ }
+ return resultCount;
+ }
+ }
+ default:
+ ;
+ }
+ return 0;
+}
+
+bool SkDCubic::monotonicInX() const {
+ return precisely_between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && precisely_between(fPts[0].fX, fPts[2].fX, fPts[3].fX);
+}
+
+bool SkDCubic::monotonicInY() const {
+ return precisely_between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && precisely_between(fPts[0].fY, fPts[2].fY, fPts[3].fY);
+}
+
+void SkDCubic::otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const {
+ int offset = (int) !SkToBool(index);
+ o1Pts[0] = &fPts[offset];
+ o1Pts[1] = &fPts[++offset];
+ o1Pts[2] = &fPts[++offset];
+}
+
+int SkDCubic::searchRoots(double extremeTs[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const {
+ extrema += findInflections(&extremeTs[extrema]);
+ extremeTs[extrema++] = 0;
+ extremeTs[extrema] = 1;
+ SkASSERT(extrema < 6);
+ SkTQSort(extremeTs, extremeTs + extrema);
+ int validCount = 0;
+ for (int index = 0; index < extrema; ) {
+ double min = extremeTs[index];
+ double max = extremeTs[++index];
+ if (min == max) {
+ continue;
+ }
+ double newT = binarySearch(min, max, axisIntercept, xAxis);
+ if (newT >= 0) {
+ if (validCount >= 3) {
+ return 0;
+ }
+ validRoots[validCount++] = newT;
+ }
+ }
+ return validCount;
+}
+
+// cubic roots
+
+static const double PI = 3.141592653589793;
+
+// from SkGeometry.cpp (and Numeric Solutions, 5.6)
+int SkDCubic::RootsValidT(double A, double B, double C, double D, double t[3]) {
+ double s[3];
+ int realRoots = RootsReal(A, B, C, D, s);
+ int foundRoots = SkDQuad::AddValidTs(s, realRoots, t);
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (!approximately_one_or_less(tValue) && between(1, tValue, 1.00005)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 1)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 1;
+ } else if (!approximately_zero_or_more(tValue) && between(-0.00005, tValue, 0)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 0)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 0;
+ }
+nextRoot:
+ ;
+ }
+ return foundRoots;
+}
+
+int SkDCubic::RootsReal(double A, double B, double C, double D, double s[3]) {
+#ifdef SK_DEBUG
+ // create a string mathematica understands
+ // GDB set print repe 15 # if repeated digits is a bother
+ // set print elements 400 # if line doesn't fit
+ char str[1024];
+ sk_bzero(str, sizeof(str));
+ SK_SNPRINTF(str, sizeof(str), "Solve[%1.19g x^3 + %1.19g x^2 + %1.19g x + %1.19g == 0, x]",
+ A, B, C, D);
+ SkPathOpsDebug::MathematicaIze(str, sizeof(str));
+#if ONE_OFF_DEBUG && ONE_OFF_DEBUG_MATHEMATICA
+ SkDebugf("%s\n", str);
+#endif
+#endif
+ if (approximately_zero(A)
+ && approximately_zero_when_compared_to(A, B)
+ && approximately_zero_when_compared_to(A, C)
+ && approximately_zero_when_compared_to(A, D)) { // we're just a quadratic
+ return SkDQuad::RootsReal(B, C, D, s);
+ }
+ if (approximately_zero_when_compared_to(D, A)
+ && approximately_zero_when_compared_to(D, B)
+ && approximately_zero_when_compared_to(D, C)) { // 0 is one root
+ int num = SkDQuad::RootsReal(A, B, C, s);
+ for (int i = 0; i < num; ++i) {
+ if (approximately_zero(s[i])) {
+ return num;
+ }
+ }
+ s[num++] = 0;
+ return num;
+ }
+ if (approximately_zero(A + B + C + D)) { // 1 is one root
+ int num = SkDQuad::RootsReal(A, A + B, -D, s);
+ for (int i = 0; i < num; ++i) {
+ if (AlmostDequalUlps(s[i], 1)) {
+ return num;
+ }
+ }
+ s[num++] = 1;
+ return num;
+ }
+ double a, b, c;
+ {
+ double invA = 1 / A;
+ a = B * invA;
+ b = C * invA;
+ c = D * invA;
+ }
+ double a2 = a * a;
+ double Q = (a2 - b * 3) / 9;
+ double R = (2 * a2 * a - 9 * a * b + 27 * c) / 54;
+ double R2 = R * R;
+ double Q3 = Q * Q * Q;
+ double R2MinusQ3 = R2 - Q3;
+ double adiv3 = a / 3;
+ double r;
+ double* roots = s;
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ double theta = acos(SkTPin(R / sqrt(Q3), -1., 1.));
+ double neg2RootQ = -2 * sqrt(Q);
+
+ r = neg2RootQ * cos(theta / 3) - adiv3;
+ *roots++ = r;
+
+ r = neg2RootQ * cos((theta + 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * cos((theta - 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r) && (roots - s == 1 || !AlmostDequalUlps(s[1], r))) {
+ *roots++ = r;
+ }
+ } else { // we have 1 real root
+ double sqrtR2MinusQ3 = sqrt(R2MinusQ3);
+ double A = fabs(R) + sqrtR2MinusQ3;
+ A = SkDCubeRoot(A);
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ r = A - adiv3;
+ *roots++ = r;
+ if (AlmostDequalUlps((double) R2, (double) Q3)) {
+ r = -A / 2 - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ }
+ }
+ return static_cast<int>(roots - s);
+}
+
+// OPTIMIZE? compute t^2, t(1-t), and (1-t)^2 and pass them to another version of derivative at t?
+SkDVector SkDCubic::dxdyAtT(double t) const {
+ SkDVector result = { derivative_at_t(&fPts[0].fX, t), derivative_at_t(&fPts[0].fY, t) };
+ if (result.fX == 0 && result.fY == 0) {
+ if (t == 0) {
+ result = fPts[2] - fPts[0];
+ } else if (t == 1) {
+ result = fPts[3] - fPts[1];
+ } else {
+ // incomplete
+ SkDebugf("!c");
+ }
+ if (result.fX == 0 && result.fY == 0 && zero_or_one(t)) {
+ result = fPts[3] - fPts[0];
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE? share code with formulate_F1DotF2
+int SkDCubic::findInflections(double tValues[]) const {
+ double Ax = fPts[1].fX - fPts[0].fX;
+ double Ay = fPts[1].fY - fPts[0].fY;
+ double Bx = fPts[2].fX - 2 * fPts[1].fX + fPts[0].fX;
+ double By = fPts[2].fY - 2 * fPts[1].fY + fPts[0].fY;
+ double Cx = fPts[3].fX + 3 * (fPts[1].fX - fPts[2].fX) - fPts[0].fX;
+ double Cy = fPts[3].fY + 3 * (fPts[1].fY - fPts[2].fY) - fPts[0].fY;
+ return SkDQuad::RootsValidT(Bx * Cy - By * Cx, Ax * Cy - Ay * Cx, Ax * By - Ay * Bx, tValues);
+}
+
+static void formulate_F1DotF2(const double src[], double coeff[4]) {
+ double a = src[2] - src[0];
+ double b = src[4] - 2 * src[2] + src[0];
+ double c = src[6] + 3 * (src[2] - src[4]) - src[0];
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/** SkDCubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit between 0 < t < 1
+*/
+int SkDCubic::FindExtrema(const double src[], double tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ double A = d - a + 3 * (b - c);
+ double B = 2 * (a - b - b + c);
+ double C = b - a;
+
+ return SkDQuad::RootsValidT(A, B, C, tValues);
+}
+
+/* from SkGeometry.cpp
+ Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkDCubic::findMaxCurvature(double tValues[]) const {
+ double coeffX[4], coeffY[4];
+ int i;
+ formulate_F1DotF2(&fPts[0].fX, coeffX);
+ formulate_F1DotF2(&fPts[0].fY, coeffY);
+ for (i = 0; i < 4; i++) {
+ coeffX[i] = coeffX[i] + coeffY[i];
+ }
+ return RootsValidT(coeffX[0], coeffX[1], coeffX[2], coeffX[3], tValues);
+}
+
+SkDPoint SkDCubic::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[3];
+ }
+ double one_t = 1 - t;
+ double one_t2 = one_t * one_t;
+ double a = one_t2 * one_t;
+ double b = 3 * one_t2 * t;
+ double t2 = t * t;
+ double c = 3 * one_t * t2;
+ double d = t2 * t;
+ SkDPoint result = {a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX + d * fPts[3].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY + d * fPts[3].fY};
+ return result;
+}
+
+/*
+ Given a cubic c, t1, and t2, find a small cubic segment.
+
+ The new cubic is defined as points A, B, C, and D, where
+ s1 = 1 - t1
+ s2 = 1 - t2
+ A = c[0]*s1*s1*s1 + 3*c[1]*s1*s1*t1 + 3*c[2]*s1*t1*t1 + c[3]*t1*t1*t1
+ D = c[0]*s2*s2*s2 + 3*c[1]*s2*s2*t2 + 3*c[2]*s2*t2*t2 + c[3]*t2*t2*t2
+
+ We don't have B or C. So We define two equations to isolate them.
+ First, compute two reference T values 1/3 and 2/3 from t1 to t2:
+
+ c(at (2*t1 + t2)/3) == E
+ c(at (t1 + 2*t2)/3) == F
+
+ Next, compute where those values must be if we know the values of B and C:
+
+ _12 = A*2/3 + B*1/3
+ 12_ = A*1/3 + B*2/3
+ _23 = B*2/3 + C*1/3
+ 23_ = B*1/3 + C*2/3
+ _34 = C*2/3 + D*1/3
+ 34_ = C*1/3 + D*2/3
+ _123 = (A*2/3 + B*1/3)*2/3 + (B*2/3 + C*1/3)*1/3 = A*4/9 + B*4/9 + C*1/9
+ 123_ = (A*1/3 + B*2/3)*1/3 + (B*1/3 + C*2/3)*2/3 = A*1/9 + B*4/9 + C*4/9
+ _234 = (B*2/3 + C*1/3)*2/3 + (C*2/3 + D*1/3)*1/3 = B*4/9 + C*4/9 + D*1/9
+ 234_ = (B*1/3 + C*2/3)*1/3 + (C*1/3 + D*2/3)*2/3 = B*1/9 + C*4/9 + D*4/9
+ _1234 = (A*4/9 + B*4/9 + C*1/9)*2/3 + (B*4/9 + C*4/9 + D*1/9)*1/3
+ = A*8/27 + B*12/27 + C*6/27 + D*1/27
+ = E
+ 1234_ = (A*1/9 + B*4/9 + C*4/9)*1/3 + (B*1/9 + C*4/9 + D*4/9)*2/3
+ = A*1/27 + B*6/27 + C*12/27 + D*8/27
+ = F
+ E*27 = A*8 + B*12 + C*6 + D
+ F*27 = A + B*6 + C*12 + D*8
+
+Group the known values on one side:
+
+ M = E*27 - A*8 - D = B*12 + C* 6
+ N = F*27 - A - D*8 = B* 6 + C*12
+ M*2 - N = B*18
+ N*2 - M = C*18
+ B = (M*2 - N)/18
+ C = (N*2 - M)/18
+ */
+
+static double interp_cubic_coords(const double* src, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+ return abcd;
+}
+
+SkDCubic SkDCubic::subDivide(double t1, double t2) const {
+ if (t1 == 0 || t2 == 1) {
+ if (t1 == 0 && t2 == 1) {
+ return *this;
+ }
+ SkDCubicPair pair = chopAt(t1 == 0 ? t2 : t1);
+ SkDCubic dst = t1 == 0 ? pair.first() : pair.second();
+ return dst;
+ }
+ SkDCubic dst;
+ double ax = dst[0].fX = interp_cubic_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_cubic_coords(&fPts[0].fY, t1);
+ double ex = interp_cubic_coords(&fPts[0].fX, (t1*2+t2)/3);
+ double ey = interp_cubic_coords(&fPts[0].fY, (t1*2+t2)/3);
+ double fx = interp_cubic_coords(&fPts[0].fX, (t1+t2*2)/3);
+ double fy = interp_cubic_coords(&fPts[0].fY, (t1+t2*2)/3);
+ double dx = dst[3].fX = interp_cubic_coords(&fPts[0].fX, t2);
+ double dy = dst[3].fY = interp_cubic_coords(&fPts[0].fY, t2);
+ double mx = ex * 27 - ax * 8 - dx;
+ double my = ey * 27 - ay * 8 - dy;
+ double nx = fx * 27 - ax - dx * 8;
+ double ny = fy * 27 - ay - dy * 8;
+ /* bx = */ dst[1].fX = (mx * 2 - nx) / 18;
+ /* by = */ dst[1].fY = (my * 2 - ny) / 18;
+ /* cx = */ dst[2].fX = (nx * 2 - mx) / 18;
+ /* cy = */ dst[2].fY = (ny * 2 - my) / 18;
+ // FIXME: call align() ?
+ return dst;
+}
+
+void SkDCubic::subDivide(const SkDPoint& a, const SkDPoint& d,
+ double t1, double t2, SkDPoint dst[2]) const {
+ SkASSERT(t1 != t2);
+ // this approach assumes that the control points computed directly are accurate enough
+ SkDCubic sub = subDivide(t1, t2);
+ dst[0] = sub[1] + (a - sub[0]);
+ dst[1] = sub[2] + (d - sub[3]);
+ if (t1 == 0 || t2 == 0) {
+ align(0, 1, t1 == 0 ? &dst[0] : &dst[1]);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(3, 2, t1 == 1 ? &dst[0] : &dst[1]);
+ }
+ if (AlmostBequalUlps(dst[0].fX, a.fX)) {
+ dst[0].fX = a.fX;
+ }
+ if (AlmostBequalUlps(dst[0].fY, a.fY)) {
+ dst[0].fY = a.fY;
+ }
+ if (AlmostBequalUlps(dst[1].fX, d.fX)) {
+ dst[1].fX = d.fX;
+ }
+ if (AlmostBequalUlps(dst[1].fY, d.fY)) {
+ dst[1].fY = d.fY;
+ }
+}
+
+bool SkDCubic::toFloatPoints(SkPoint* pts) const {
+ const double* dCubic = &fPts[0].fX;
+ SkScalar* cubic = &pts[0].fX;
+ for (int index = 0; index < kPointCount * 2; ++index) {
+ cubic[index] = SkDoubleToScalar(dCubic[index]);
+ if (SkScalarAbs(cubic[index]) < FLT_EPSILON_ORDERABLE_ERR) {
+ cubic[index] = 0;
+ }
+ }
+ return SkScalarsAreFinite(&pts->fX, kPointCount * 2);
+}
+
+double SkDCubic::top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const {
+ double extremeTs[2];
+ double topT = -1;
+ int roots = SkDCubic::FindExtrema(&fPts[0].fY, extremeTs);
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * extremeTs[index];
+ SkDPoint mid = dCurve.ptAtT(t);
+ if (topPt->fY > mid.fY || (topPt->fY == mid.fY && topPt->fX > mid.fX)) {
+ topT = t;
+ *topPt = mid;
+ }
+ }
+ return topT;
+}
+
+int SkTCubic::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fCubic, line);
+}
+
+bool SkTCubic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return quad.hullIntersects(fCubic, isLinear);
+}
+
+bool SkTCubic::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(fCubic, isLinear);
+}
+
+void SkTCubic::setBounds(SkDRect* rect) const {
+ rect->setBounds(fCubic);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.h b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
new file mode 100644
index 0000000000..cc923e458a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsCubic_DEFINED
+#define SkPathOpsCubic_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+struct SkDCubicPair;
+
+struct SkDCubic {
+ static const int kPointCount = 4;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 9;
+
+ enum SearchAxis {
+ kXAxis,
+ kYAxis
+ };
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2])
+ && fPts[0].approximatelyEqual(fPts[3]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v03 = fPts[0] - fPts[3];
+ SkDVector v13 = fPts[1] - fPts[3];
+ SkDVector v23 = fPts[2] - fPts[3];
+ return v03.dot(v01) > 0 && v03.dot(v02) > 0 && v03.dot(v13) > 0 && v03.dot(v23) > 0;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ void align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const;
+ double binarySearch(double min, double max, double axisIntercept, SearchAxis xAxis) const;
+ double calcPrecision() const;
+ SkDCubicPair chopAt(double t) const;
+ static void Coefficients(const double* cubic, double* A, double* B, double* C, double* D);
+ static int ComplexBreak(const SkPoint pts[4], SkScalar* t);
+ int convexHull(char order[kPointCount]) const;
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ void debugSet(const SkDPoint* pts);
+
+ void dump() const; // callable from the debugger when the implementation code is linked in
+ void dumpID(int id) const;
+ void dumpInner() const;
+ SkDVector dxdyAtT(double t) const;
+ bool endsAreExtremaInXOrY() const;
+ static int FindExtrema(const double src[], double tValue[2]);
+ int findInflections(double tValues[2]) const;
+
+ static int FindInflections(const SkPoint a[kPointCount], double tValues[2]) {
+ SkDCubic cubic;
+ return cubic.set(a).findInflections(tValues);
+ }
+
+ int findMaxCurvature(double tValues[]) const;
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ bool hullIntersects(const SkDCubic& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDConic& c, bool* isLinear) const;
+ bool hullIntersects(const SkDQuad& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ static int maxIntersections() { return kMaxIntersections; }
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const;
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double D, double t[3]);
+ static int RootsValidT(const double A, const double B, const double C, double D, double s[3]);
+
+ int searchRoots(double extremes[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const;
+
+ bool toFloatPoints(SkPoint* ) const;
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[3]) const;
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[3]) const;
+
+// add debug only global pointer so asserts can be skipped by fuzzers
+ const SkDCubic& set(const SkPoint pts[kPointCount]
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ fPts[3] = pts[3];
+ SkDEBUGCODE(fDebugGlobalState = state);
+ return *this;
+ }
+
+ SkDCubic subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDCubic* c) const { *c = this->subDivide(t1, t2); }
+
+ static SkDCubic SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDCubic cubic;
+ return cubic.set(a).subDivide(t1, t2);
+ }
+
+ void subDivide(const SkDPoint& a, const SkDPoint& d, double t1, double t2, SkDPoint p[2]) const;
+
+ static void SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& d, double t1,
+ double t2, SkDPoint p[2]) {
+ SkDCubic cubic;
+ cubic.set(pts).subDivide(a, d, t1, t2, p);
+ }
+
+ double top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const;
+ SkDQuad toQuad() const;
+
+ static const int gPrecisionUnit;
+ SkDPoint fPts[kPointCount];
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+};
+
+/* Given the set [0, 1, 2, 3], and two of the four members, compute an XOR mask
+ that computes the other two. Note that:
+
+ one ^ two == 3 for (0, 3), (1, 2)
+ one ^ two < 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+ 3 - (one ^ two) is either 0, 1, or 2
+ 1 >> (3 - (one ^ two)) is either 0 or 1
+thus:
+ returned == 2 for (0, 3), (1, 2)
+ returned == 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+given that:
+ (0, 3) ^ 2 -> (2, 1) (1, 2) ^ 2 -> (3, 0)
+ (0, 1) ^ 3 -> (3, 2) (0, 2) ^ 3 -> (3, 1) (1, 3) ^ 3 -> (2, 0) (2, 3) ^ 3 -> (1, 0)
+*/
+inline int other_two(int one, int two) {
+ return 1 >> (3 - (one ^ two)) ^ 3;
+}
+
+struct SkDCubicPair {
+ const SkDCubic first() const {
+#ifdef SK_DEBUG
+ SkDCubic result;
+ result.debugSet(&pts[0]);
+ return result;
+#else
+ return (const SkDCubic&) pts[0];
+#endif
+ }
+ const SkDCubic second() const {
+#ifdef SK_DEBUG
+ SkDCubic result;
+ result.debugSet(&pts[3]);
+ return result;
+#else
+ return (const SkDCubic&) pts[3];
+#endif
+ }
+ SkDPoint pts[7];
+};
+
+class SkTCubic : public SkTCurve {
+public:
+ SkDCubic fCubic;
+
+ SkTCubic() {}
+
+ SkTCubic(const SkDCubic& c)
+ : fCubic(c) {
+ }
+
+ ~SkTCubic() override {}
+
+ const SkDPoint& operator[](int n) const override { return fCubic[n]; }
+ SkDPoint& operator[](int n) override { return fCubic[n]; }
+
+ bool collapsed() const override { return fCubic.collapsed(); }
+ bool controlsInside() const override { return fCubic.controlsInside(); }
+ void debugInit() override { return fCubic.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fCubic.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fCubic.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fCubic.globalState(); }
+#endif
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override;
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override {
+ return cubic.hullIntersects(fCubic, isLinear);
+ }
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fCubic, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return false; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTCubic>(); }
+
+ int maxIntersections() const override { return SkDCubic::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fCubic.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDCubic::kPointCount; }
+ int pointLast() const override { return SkDCubic::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fCubic.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTCubic*) curve)->fCubic = fCubic.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
new file mode 100644
index 0000000000..734d599958
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+ // this cheats and assumes that the perpendicular to the point is the closest ray to the curve
+ // this case (where the line and the curve are nearly coincident) may be the only case that counts
+double SkDCurve::nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const {
+ int count = SkPathOpsVerbToPoints(verb);
+ double minX = fCubic.fPts[0].fX;
+ double maxX = minX;
+ for (int index = 1; index <= count; ++index) {
+ minX = SkTMin(minX, fCubic.fPts[index].fX);
+ maxX = SkTMax(maxX, fCubic.fPts[index].fX);
+ }
+ if (!AlmostBetweenUlps(minX, xy.fX, maxX)) {
+ return -1;
+ }
+ double minY = fCubic.fPts[0].fY;
+ double maxY = minY;
+ for (int index = 1; index <= count; ++index) {
+ minY = SkTMin(minY, fCubic.fPts[index].fY);
+ maxY = SkTMax(maxY, fCubic.fPts[index].fY);
+ }
+ if (!AlmostBetweenUlps(minY, xy.fY, maxY)) {
+ return -1;
+ }
+ SkIntersections i;
+ SkDLine perp = {{ xy, { xy.fX + opp.fY - xy.fY, xy.fY + xy.fX - opp.fX }}};
+ (*CurveDIntersectRay[verb])(*this, perp, &i);
+ int minIndex = -1;
+ double minDist = FLT_MAX;
+ for (int index = 0; index < i.used(); ++index) {
+ double dist = xy.distance(i.pt(index));
+ if (minDist > dist) {
+ minDist = dist;
+ minIndex = index;
+ }
+ }
+ if (minIndex < 0) {
+ return -1;
+ }
+ double largest = SkTMax(SkTMax(maxX, maxY), -SkTMin(minX, minY));
+ if (!AlmostEqualUlps_Pin(largest, largest + minDist)) { // is distance within ULPS tolerance?
+ return -1;
+ }
+ return SkPinT(i[0][minIndex]);
+}
+
+void SkDCurve::offset(SkPath::Verb verb, const SkDVector& off) {
+ int count = SkPathOpsVerbToPoints(verb);
+ for (int index = 0; index <= count; ++index) {
+ fCubic.fPts[index] += off;
+ }
+}
+
+void SkDCurve::setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDConic dCurve;
+ dCurve.set(curve, curveWeight);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fConic, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDCubic dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fCubic, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDQuad dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fQuad, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurveSweep::setCurveHullSweep(SkPath::Verb verb) {
+ fOrdered = true;
+ fSweep[0] = fCurve[1] - fCurve[0];
+ if (SkPath::kLine_Verb == verb) {
+ fSweep[1] = fSweep[0];
+ fIsCurve = false;
+ return;
+ }
+ fSweep[1] = fCurve[2] - fCurve[0];
+ // OPTIMIZE: I do the following float check a lot -- probably need a
+ // central place for this val-is-small-compared-to-curve check
+ double maxVal = 0;
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ maxVal = SkTMax(maxVal, SkTMax(SkTAbs(fCurve[index].fX),
+ SkTAbs(fCurve[index].fY)));
+ }
+ {
+ if (SkPath::kCubic_Verb != verb) {
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ }
+ goto setIsCurve;
+ }
+ SkDVector thirdSweep = fCurve[3] - fCurve[0];
+ if (fSweep[0].fX == 0 && fSweep[0].fY == 0) {
+ fSweep[0] = fSweep[1];
+ fSweep[1] = thirdSweep;
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ fCurve[1] = fCurve[3];
+ }
+ goto setIsCurve;
+ }
+ double s1x3 = fSweep[0].crossCheck(thirdSweep);
+ double s3x2 = thirdSweep.crossCheck(fSweep[1]);
+ if (s1x3 * s3x2 >= 0) { // if third vector is on or between first two vectors
+ goto setIsCurve;
+ }
+ double s2x1 = fSweep[1].crossCheck(fSweep[0]);
+ // FIXME: If the sweep of the cubic is greater than 180 degrees, we're in trouble
+ // probably such wide sweeps should be artificially subdivided earlier so that never happens
+ SkASSERT(s1x3 * s2x1 < 0 || s1x3 * s3x2 < 0);
+ if (s3x2 * s2x1 < 0) {
+ SkASSERT(s2x1 * s1x3 > 0);
+ fSweep[0] = fSweep[1];
+ fOrdered = false;
+ }
+ fSweep[1] = thirdSweep;
+ }
+setIsCurve:
+ fIsCurve = fSweep[0].crossCheck(fSweep[1]) != 0;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.h b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
new file mode 100644
index 0000000000..50511c876b
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCurve_DEFINE
+#define SkPathOpsCurve_DEFINE
+
+#include "src/pathops/SkIntersections.h"
+
+#ifndef SK_RELEASE
+#include "include/core/SkPath.h"
+#endif
+
+struct SkPathOpsBounds;
+
+struct SkOpCurve {
+ SkPoint fPts[4];
+ SkScalar fWeight;
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fPts[n];
+ }
+
+ void dump() const;
+
+ void set(const SkDQuad& quad) {
+ for (int index = 0; index < SkDQuad::kPointCount; ++index) {
+ fPts[index] = quad[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kQuad_Verb);
+ }
+
+ void set(const SkDCubic& cubic) {
+ for (int index = 0; index < SkDCubic::kPointCount; ++index) {
+ fPts[index] = cubic[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kCubic_Verb);
+ }
+
+};
+
+struct SkDCurve {
+ union {
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDConic fConic;
+ SkDCubic fCubic;
+ };
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkDPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint& operator[](int n) {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint conicTop(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, double* topT);
+ SkDPoint cubicTop(const SkPoint curve[4], SkScalar , double s, double e, double* topT);
+ void dump() const;
+ void dumpID(int ) const;
+ SkDPoint lineTop(const SkPoint[2], SkScalar , double , double , double* topT);
+ double nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const;
+ void offset(SkPath::Verb verb, const SkDVector& );
+ SkDPoint quadTop(const SkPoint curve[3], SkScalar , double s, double e, double* topT);
+
+ void setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, SkPathOpsBounds* );
+ void setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double s, double e, SkPathOpsBounds* );
+ void setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double s, double e, SkPathOpsBounds*);
+};
+
+class SkDCurveSweep {
+public:
+ bool isCurve() const { return fIsCurve; }
+ bool isOrdered() const { return fOrdered; }
+ void setCurveHullSweep(SkPath::Verb verb);
+
+ SkDCurve fCurve;
+ SkDVector fSweep[2];
+private:
+ bool fIsCurve;
+ bool fOrdered; // cleared when a cubic's control point isn't between the sweep vectors
+
+};
+
+extern SkDPoint (SkDCurve::* const Top[])(const SkPoint curve[], SkScalar cWeight,
+ double tStart, double tEnd, double* topT);
+
+static SkDPoint dline_xy_at_t(const SkPoint a[2], SkScalar , double t) {
+ SkDLine line;
+ line.set(a);
+ return line.ptAtT(t);
+}
+
+static SkDPoint dquad_xy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.ptAtT(t);
+}
+
+static SkDPoint dconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.ptAtT(t);
+}
+
+static SkDPoint dcubic_xy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDPointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_xy_at_t,
+ dquad_xy_at_t,
+ dconic_xy_at_t,
+ dcubic_xy_at_t
+};
+
+static SkDPoint ddline_xy_at_t(const SkDCurve& c, double t) {
+ return c.fLine.ptAtT(t);
+}
+
+static SkDPoint ddquad_xy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.ptAtT(t);
+}
+
+static SkDPoint ddconic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.ptAtT(t);
+}
+
+static SkDPoint ddcubic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDDPointAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_xy_at_t,
+ ddquad_xy_at_t,
+ ddconic_xy_at_t,
+ ddcubic_xy_at_t
+};
+
+static SkPoint fline_xy_at_t(const SkPoint a[2], SkScalar weight, double t) {
+ return dline_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fquad_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fcubic_xy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint (* const CurvePointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_xy_at_t,
+ fquad_xy_at_t,
+ fconic_xy_at_t,
+ fcubic_xy_at_t
+};
+
+static SkDVector dline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ SkDLine line;
+ line.set(a);
+ return line[1] - line[0];
+}
+
+static SkDVector dquad_dxdy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.dxdyAtT(t);
+}
+
+static SkDVector dconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.dxdyAtT(t);
+}
+
+static SkDVector dcubic_dxdy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_dxdy_at_t,
+ dquad_dxdy_at_t,
+ dconic_dxdy_at_t,
+ dcubic_dxdy_at_t
+};
+
+static SkDVector ddline_dxdy_at_t(const SkDCurve& c, double ) {
+ return c.fLine.fPts[1] - c.fLine.fPts[0];
+}
+
+static SkDVector ddquad_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.dxdyAtT(t);
+}
+
+static SkDVector ddconic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.dxdyAtT(t);
+}
+
+static SkDVector ddcubic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDDSlopeAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_dxdy_at_t,
+ ddquad_dxdy_at_t,
+ ddconic_dxdy_at_t,
+ ddcubic_dxdy_at_t
+};
+
+static SkVector fline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ return a[1] - a[0];
+}
+
+static SkVector fquad_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fcubic_dxdy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector (* const CurveSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_dxdy_at_t,
+ fquad_dxdy_at_t,
+ fconic_dxdy_at_t,
+ fcubic_dxdy_at_t
+};
+
+static bool line_is_vertical(const SkPoint a[2], SkScalar , double startT, double endT) {
+ SkDLine line;
+ line.set(a);
+ SkDPoint dst[2] = { line.ptAtT(startT), line.ptAtT(endT) };
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX);
+}
+
+static bool quad_is_vertical(const SkPoint a[3], SkScalar , double startT, double endT) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDQuad dst = quad.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool conic_is_vertical(const SkPoint a[3], SkScalar weight, double startT, double endT) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDConic dst = conic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool cubic_is_vertical(const SkPoint a[4], SkScalar , double startT, double endT) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDCubic dst = cubic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX)
+ && AlmostEqualUlps(dst[2].fX, dst[3].fX);
+}
+
+static bool (* const CurveIsVertical[])(const SkPoint[], SkScalar , double , double) = {
+ nullptr,
+ line_is_vertical,
+ quad_is_vertical,
+ conic_is_vertical,
+ cubic_is_vertical
+};
+
+static void line_intersect_ray(const SkPoint a[2], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDLine line;
+ line.set(a);
+ i->intersectRay(line, ray);
+}
+
+static void quad_intersect_ray(const SkPoint a[3], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDQuad quad;
+ quad.set(a);
+ i->intersectRay(quad, ray);
+}
+
+static void conic_intersect_ray(const SkPoint a[3], SkScalar weight, const SkDLine& ray,
+ SkIntersections* i) {
+ SkDConic conic;
+ conic.set(a, weight);
+ i->intersectRay(conic, ray);
+}
+
+static void cubic_intersect_ray(const SkPoint a[4], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDCubic cubic;
+ cubic.set(a);
+ i->intersectRay(cubic, ray);
+}
+
+static void (* const CurveIntersectRay[])(const SkPoint[] , SkScalar , const SkDLine& ,
+ SkIntersections* ) = {
+ nullptr,
+ line_intersect_ray,
+ quad_intersect_ray,
+ conic_intersect_ray,
+ cubic_intersect_ray
+};
+
+static void dline_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fLine, ray);
+}
+
+static void dquad_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fQuad, ray);
+}
+
+static void dconic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fConic, ray);
+}
+
+static void dcubic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fCubic, ray);
+}
+
+static void (* const CurveDIntersectRay[])(const SkDCurve& , const SkDLine& , SkIntersections* ) = {
+ nullptr,
+ dline_intersect_ray,
+ dquad_intersect_ray,
+ dconic_intersect_ray,
+ dcubic_intersect_ray
+};
+
+static int line_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ if (a[0].fY == a[1].fY) {
+ return false;
+ }
+ SkDLine line;
+ roots[0] = SkIntersections::HorizontalIntercept(line.set(a), y);
+ return between(0, roots[0], 1);
+}
+
+static int line_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ if (a[0].fX == a[1].fX) {
+ return false;
+ }
+ SkDLine line;
+ roots[0] = SkIntersections::VerticalIntercept(line.set(a), x);
+ return between(0, roots[0], 1);
+}
+
+static int quad_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::HorizontalIntercept(quad.set(a), y, roots);
+}
+
+static int quad_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::VerticalIntercept(quad.set(a), x, roots);
+}
+
+static int conic_intercept_h(const SkPoint a[2], SkScalar w, SkScalar y, double* roots) {
+ SkDConic conic;
+ return SkIntersections::HorizontalIntercept(conic.set(a, w), y, roots);
+}
+
+static int conic_intercept_v(const SkPoint a[2], SkScalar w, SkScalar x, double* roots) {
+ SkDConic conic;
+ return SkIntersections::VerticalIntercept(conic.set(a, w), x, roots);
+}
+
+static int cubic_intercept_h(const SkPoint a[3], SkScalar , SkScalar y, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).horizontalIntersect(y, roots);
+}
+
+static int cubic_intercept_v(const SkPoint a[3], SkScalar , SkScalar x, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).verticalIntersect(x, roots);
+}
+
+static int (* const CurveIntercept[])(const SkPoint[] , SkScalar , SkScalar , double* ) = {
+ nullptr,
+ nullptr,
+ line_intercept_h,
+ line_intercept_v,
+ quad_intercept_h,
+ quad_intercept_v,
+ conic_intercept_h,
+ conic_intercept_v,
+ cubic_intercept_h,
+ cubic_intercept_v,
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
new file mode 100644
index 0000000000..7ee44deb9d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
@@ -0,0 +1,3147 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkString.h"
+#include "include/private/SkMutex.h"
+#include "src/core/SkOSFile.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkPathOpsDebug.h"
+
+#include <utility>
+
+#if DEBUG_DUMP_VERIFY
+bool SkPathOpsDebug::gDumpOp; // set to true to write op to file before a crash
+bool SkPathOpsDebug::gVerifyOp; // set to true to compare result against regions
+#endif
+
+bool SkPathOpsDebug::gRunFail; // set to true to check for success on tests known to fail
+bool SkPathOpsDebug::gVeryVerbose; // set to true to run extensive checking tests
+
+#undef FAIL_IF
+#define FAIL_IF(cond, coin) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, coin); } while (false)
+
+#undef FAIL_WITH_NULL_IF
+#define FAIL_WITH_NULL_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, span); } while (false)
+
+#undef RETURN_FALSE_IF
+#define RETURN_FALSE_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kReturnFalse_Glitch, span); \
+ } while (false)
+
+class SkCoincidentSpans;
+
+#if DEBUG_SORT
+int SkPathOpsDebug::gSortCountDefault = SK_MaxS32;
+int SkPathOpsDebug::gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+const char* SkPathOpsDebug::kPathOpStr[] = {"diff", "sect", "union", "xor", "rdiff"};
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+
+int SkPathOpsDebug::gContourID = 0;
+int SkPathOpsDebug::gSegmentID = 0;
+
+bool SkPathOpsDebug::ChaseContains(const SkTDArray<SkOpSpanBase* >& chaseArray,
+ const SkOpSpanBase* span) {
+ for (int index = 0; index < chaseArray.count(); ++index) {
+ const SkOpSpanBase* entry = chaseArray[index];
+ if (entry == span) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+#if DEBUG_ACTIVE_SPANS
+SkString SkPathOpsDebug::gActiveSpans;
+#endif
+
+#if DEBUG_COIN
+
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumChangedDict;
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumVisitedDict;
+
+static const int kGlitchType_Count = SkPathOpsDebug::kUnalignedTail_Glitch + 1;
+
+struct SpanGlitch {
+ const SkOpSpanBase* fBase;
+ const SkOpSpanBase* fSuspect;
+ const SkOpSegment* fSegment;
+ const SkOpSegment* fOppSegment;
+ const SkOpPtT* fCoinSpan;
+ const SkOpPtT* fEndSpan;
+ const SkOpPtT* fOppSpan;
+ const SkOpPtT* fOppEndSpan;
+ double fStartT;
+ double fEndT;
+ double fOppStartT;
+ double fOppEndT;
+ SkPoint fPt;
+ SkPathOpsDebug::GlitchType fType;
+
+ void dumpType() const;
+};
+
+struct SkPathOpsDebug::GlitchLog {
+ void init(const SkOpGlobalState* state) {
+ fGlobalState = state;
+ }
+
+ SpanGlitch* recordCommon(GlitchType type) {
+ SpanGlitch* glitch = fGlitches.push();
+ glitch->fBase = nullptr;
+ glitch->fSuspect = nullptr;
+ glitch->fSegment = nullptr;
+ glitch->fOppSegment = nullptr;
+ glitch->fCoinSpan = nullptr;
+ glitch->fEndSpan = nullptr;
+ glitch->fOppSpan = nullptr;
+ glitch->fOppEndSpan = nullptr;
+ glitch->fStartT = SK_ScalarNaN;
+ glitch->fEndT = SK_ScalarNaN;
+ glitch->fOppStartT = SK_ScalarNaN;
+ glitch->fOppEndT = SK_ScalarNaN;
+ glitch->fPt = { SK_ScalarNaN, SK_ScalarNaN };
+ glitch->fType = type;
+ return glitch;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSpanBase* suspect = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSuspect = suspect;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = ptT;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkCoincidentSpans* opp = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ if (opp) {
+ glitch->fOppSpan = opp->coinPtTStart();
+ glitch->fOppEndSpan = opp->coinPtTEnd();
+ }
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSegment* seg, double t, SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSegment = seg;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base, double t,
+ SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* coinSpan, const SkOpPtT* endSpan) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ glitch->fEndSpan = endSpan;
+ glitch->fOppSpan = coinSpan;
+ glitch->fOppEndSpan = endSpan;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpSpanBase* base) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ }
+
+ void record(GlitchType type, const SkOpPtT* ptTS, const SkOpPtT* ptTE,
+ const SkOpPtT* oPtTS, const SkOpPtT* oPtTE) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = ptTS;
+ glitch->fEndSpan = ptTE;
+ glitch->fOppSpan = oPtTS;
+ glitch->fOppEndSpan = oPtTE;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg, double startT,
+ double endT, const SkOpSegment* oppSeg, double oppStartT, double oppEndT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fStartT = startT;
+ glitch->fEndT = endT;
+ glitch->fOppSegment = oppSeg;
+ glitch->fOppStartT = oppStartT;
+ glitch->fOppEndT = oppEndT;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg,
+ const SkOpSpan* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, double t, const SkOpSpanBase* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fStartT = t;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = ptT;
+ }
+
+ SkTDArray<SpanGlitch> fGlitches;
+ const SkOpGlobalState* fGlobalState;
+};
+
+
+void SkPathOpsDebug::CoinDict::add(const SkPathOpsDebug::CoinDict& dict) {
+ int count = dict.fDict.count();
+ for (int index = 0; index < count; ++index) {
+ this->add(dict.fDict[index]);
+ }
+}
+
+void SkPathOpsDebug::CoinDict::add(const CoinDictEntry& key) {
+ int count = fDict.count();
+ for (int index = 0; index < count; ++index) {
+ CoinDictEntry* entry = &fDict[index];
+ if (entry->fIteration == key.fIteration && entry->fLineNumber == key.fLineNumber) {
+ SkASSERT(!strcmp(entry->fFunctionName, key.fFunctionName));
+ if (entry->fGlitchType == kUninitialized_Glitch) {
+ entry->fGlitchType = key.fGlitchType;
+ }
+ return;
+ }
+ }
+ *fDict.append() = key;
+}
+
+#endif
+
+#if DEBUG_COIN
+static void missing_coincidence(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ // bool result = false;
+ do {
+ /* result |= */ contour->debugMissingCoincidence(glitches);
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_multiples(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ if (contour->debugMoveMultiples(glitches), false) {
+ return;
+ }
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_nearby(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ contour->debugMoveNearby(glitches);
+ } while ((contour = contour->next()));
+}
+
+
+#endif
+
+#if DEBUG_COIN
+void SkOpGlobalState::debugAddToCoinChangedDict() {
+
+#if DEBUG_COINCIDENCE
+ SkPathOpsDebug::CheckHealth(fContourHead);
+#endif
+ // see if next coincident operation makes a change; if so, record it
+ SkPathOpsDebug::GlitchLog glitches;
+ const char* funcName = fCoinDictEntry.fFunctionName;
+ if (!strcmp("calc_angles", funcName)) {
+ ;
+ } else if (!strcmp("missing_coincidence", funcName)) {
+ missing_coincidence(&glitches, fContourHead);
+ } else if (!strcmp("move_multiples", funcName)) {
+ move_multiples(&glitches, fContourHead);
+ } else if (!strcmp("move_nearby", funcName)) {
+ move_nearby(&glitches, fContourHead);
+ } else if (!strcmp("addExpanded", funcName)) {
+ fCoincidence->debugAddExpanded(&glitches);
+ } else if (!strcmp("addMissing", funcName)) {
+ bool added;
+ fCoincidence->debugAddMissing(&glitches, &added);
+ } else if (!strcmp("addEndMovedSpans", funcName)) {
+ fCoincidence->debugAddEndMovedSpans(&glitches);
+ } else if (!strcmp("correctEnds", funcName)) {
+ fCoincidence->debugCorrectEnds(&glitches);
+ } else if (!strcmp("expand", funcName)) {
+ fCoincidence->debugExpand(&glitches);
+ } else if (!strcmp("findOverlaps", funcName)) {
+ ;
+ } else if (!strcmp("mark", funcName)) {
+ fCoincidence->debugMark(&glitches);
+ } else if (!strcmp("apply", funcName)) {
+ ;
+ } else {
+ SkASSERT(0); // add missing case
+ }
+ if (glitches.fGlitches.count()) {
+ fCoinDictEntry.fGlitchType = glitches.fGlitches[0].fType;
+ }
+ fCoinChangedDict.add(fCoinDictEntry);
+}
+#endif
+
+void SkPathOpsDebug::ShowActiveSpans(SkOpContourHead* contourList) {
+#if DEBUG_ACTIVE_SPANS
+ SkString str;
+ SkOpContour* contour = contourList;
+ do {
+ contour->debugShowActiveSpans(&str);
+ } while ((contour = contour->next()));
+ if (!gActiveSpans.equals(str)) {
+ const char* s = str.c_str();
+ const char* end;
+ while ((end = strchr(s, '\n'))) {
+ SkDebugf("%.*s", end - s + 1, s);
+ s = end + 1;
+ }
+ gActiveSpans.set(str);
+ }
+#endif
+}
+
+#if DEBUG_COINCIDENCE || DEBUG_COIN
+void SkPathOpsDebug::CheckHealth(SkOpContourHead* contourList) {
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(true);
+#endif
+#if DEBUG_COIN
+ GlitchLog glitches;
+ const SkOpContour* contour = contourList;
+ const SkOpCoincidence* coincidence = contour->globalState()->coincidence();
+ coincidence->debugCheckValid(&glitches); // don't call validate; spans may be inconsistent
+ do {
+ contour->debugCheckHealth(&glitches);
+ contour->debugMissingCoincidence(&glitches);
+ } while ((contour = contour->next()));
+ bool added;
+ coincidence->debugAddMissing(&glitches, &added);
+ coincidence->debugExpand(&glitches);
+ coincidence->debugAddExpanded(&glitches);
+ coincidence->debugMark(&glitches);
+ unsigned mask = 0;
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ mask |= 1 << glitch.fType;
+ }
+ for (int index = 0; index < kGlitchType_Count; ++index) {
+ SkDebugf(mask & (1 << index) ? "x" : "-");
+ }
+ SkDebugf(" %s\n", contourList->globalState()->debugCoinDictEntry().fFunctionName);
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ SkDebugf("%02d: ", index);
+ if (glitch.fBase) {
+ SkDebugf(" seg/base=%d/%d", glitch.fBase->segment()->debugID(),
+ glitch.fBase->debugID());
+ }
+ if (glitch.fSuspect) {
+ SkDebugf(" seg/base=%d/%d", glitch.fSuspect->segment()->debugID(),
+ glitch.fSuspect->debugID());
+ }
+ if (glitch.fSegment) {
+ SkDebugf(" segment=%d", glitch.fSegment->debugID());
+ }
+ if (glitch.fCoinSpan) {
+ SkDebugf(" coinSeg/Span/PtT=%d/%d/%d", glitch.fCoinSpan->segment()->debugID(),
+ glitch.fCoinSpan->span()->debugID(), glitch.fCoinSpan->debugID());
+ }
+ if (glitch.fEndSpan) {
+ SkDebugf(" endSpan=%d", glitch.fEndSpan->debugID());
+ }
+ if (glitch.fOppSpan) {
+ SkDebugf(" oppSeg/Span/PtT=%d/%d/%d", glitch.fOppSpan->segment()->debugID(),
+ glitch.fOppSpan->span()->debugID(), glitch.fOppSpan->debugID());
+ }
+ if (glitch.fOppEndSpan) {
+ SkDebugf(" oppEndSpan=%d", glitch.fOppEndSpan->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fStartT)) {
+ SkDebugf(" startT=%g", glitch.fStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fEndT)) {
+ SkDebugf(" endT=%g", glitch.fEndT);
+ }
+ if (glitch.fOppSegment) {
+ SkDebugf(" segment=%d", glitch.fOppSegment->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fOppStartT)) {
+ SkDebugf(" oppStartT=%g", glitch.fOppStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fOppEndT)) {
+ SkDebugf(" oppEndT=%g", glitch.fOppEndT);
+ }
+ if (!SkScalarIsNaN(glitch.fPt.fX) || !SkScalarIsNaN(glitch.fPt.fY)) {
+ SkDebugf(" pt=%g,%g", glitch.fPt.fX, glitch.fPt.fY);
+ }
+ DumpGlitchType(glitch.fType);
+ SkDebugf("\n");
+ }
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(false);
+#endif
+#if 01 && DEBUG_ACTIVE_SPANS
+// SkDebugf("active after %s:\n", id);
+ ShowActiveSpans(contourList);
+#endif
+#endif
+}
+#endif
+
+#if DEBUG_COIN
+void SkPathOpsDebug::DumpGlitchType(GlitchType glitchType) {
+ switch (glitchType) {
+ case kAddCorruptCoin_Glitch: SkDebugf(" AddCorruptCoin"); break;
+ case kAddExpandedCoin_Glitch: SkDebugf(" AddExpandedCoin"); break;
+ case kAddExpandedFail_Glitch: SkDebugf(" AddExpandedFail"); break;
+ case kAddIfCollapsed_Glitch: SkDebugf(" AddIfCollapsed"); break;
+ case kAddIfMissingCoin_Glitch: SkDebugf(" AddIfMissingCoin"); break;
+ case kAddMissingCoin_Glitch: SkDebugf(" AddMissingCoin"); break;
+ case kAddMissingExtend_Glitch: SkDebugf(" AddMissingExtend"); break;
+ case kAddOrOverlap_Glitch: SkDebugf(" AAddOrOverlap"); break;
+ case kCollapsedCoin_Glitch: SkDebugf(" CollapsedCoin"); break;
+ case kCollapsedDone_Glitch: SkDebugf(" CollapsedDone"); break;
+ case kCollapsedOppValue_Glitch: SkDebugf(" CollapsedOppValue"); break;
+ case kCollapsedSpan_Glitch: SkDebugf(" CollapsedSpan"); break;
+ case kCollapsedWindValue_Glitch: SkDebugf(" CollapsedWindValue"); break;
+ case kCorrectEnd_Glitch: SkDebugf(" CorrectEnd"); break;
+ case kDeletedCoin_Glitch: SkDebugf(" DeletedCoin"); break;
+ case kExpandCoin_Glitch: SkDebugf(" ExpandCoin"); break;
+ case kFail_Glitch: SkDebugf(" Fail"); break;
+ case kMarkCoinEnd_Glitch: SkDebugf(" MarkCoinEnd"); break;
+ case kMarkCoinInsert_Glitch: SkDebugf(" MarkCoinInsert"); break;
+ case kMarkCoinMissing_Glitch: SkDebugf(" MarkCoinMissing"); break;
+ case kMarkCoinStart_Glitch: SkDebugf(" MarkCoinStart"); break;
+ case kMergeMatches_Glitch: SkDebugf(" MergeMatches"); break;
+ case kMissingCoin_Glitch: SkDebugf(" MissingCoin"); break;
+ case kMissingDone_Glitch: SkDebugf(" MissingDone"); break;
+ case kMissingIntersection_Glitch: SkDebugf(" MissingIntersection"); break;
+ case kMoveMultiple_Glitch: SkDebugf(" MoveMultiple"); break;
+ case kMoveNearbyClearAll_Glitch: SkDebugf(" MoveNearbyClearAll"); break;
+ case kMoveNearbyClearAll2_Glitch: SkDebugf(" MoveNearbyClearAll2"); break;
+ case kMoveNearbyMerge_Glitch: SkDebugf(" MoveNearbyMerge"); break;
+ case kMoveNearbyMergeFinal_Glitch: SkDebugf(" MoveNearbyMergeFinal"); break;
+ case kMoveNearbyRelease_Glitch: SkDebugf(" MoveNearbyRelease"); break;
+ case kMoveNearbyReleaseFinal_Glitch: SkDebugf(" MoveNearbyReleaseFinal"); break;
+ case kReleasedSpan_Glitch: SkDebugf(" ReleasedSpan"); break;
+ case kReturnFalse_Glitch: SkDebugf(" ReturnFalse"); break;
+ case kUnaligned_Glitch: SkDebugf(" Unaligned"); break;
+ case kUnalignedHead_Glitch: SkDebugf(" UnalignedHead"); break;
+ case kUnalignedTail_Glitch: SkDebugf(" UnalignedTail"); break;
+ case kUninitialized_Glitch: break;
+ default: SkASSERT(0);
+ }
+}
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+void SkPathOpsDebug::MathematicaIze(char* str, size_t bufferLen) {
+ size_t len = strlen(str);
+ bool num = false;
+ for (size_t idx = 0; idx < len; ++idx) {
+ if (num && str[idx] == 'e') {
+ if (len + 2 >= bufferLen) {
+ return;
+ }
+ memmove(&str[idx + 2], &str[idx + 1], len - idx);
+ str[idx] = '*';
+ str[idx + 1] = '^';
+ ++len;
+ }
+ num = str[idx] >= '0' && str[idx] <= '9';
+ }
+}
+
+bool SkPathOpsDebug::ValidWind(int wind) {
+ return wind > SK_MinS32 + 0xFFFF && wind < SK_MaxS32 - 0xFFFF;
+}
+
+void SkPathOpsDebug::WindingPrintf(int wind) {
+ if (wind == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", wind);
+ }
+}
+#endif // defined SK_DEBUG || !FORCE_RELEASE
+
+
+#if DEBUG_SHOW_TEST_NAME
+void* SkPathOpsDebug::CreateNameStr() { return new char[DEBUG_FILENAME_STRING_LENGTH]; }
+
+void SkPathOpsDebug::DeleteNameStr(void* v) { delete[] reinterpret_cast<char*>(v); }
+
+void SkPathOpsDebug::BumpTestName(char* test) {
+ char* num = test + strlen(test);
+ while (num[-1] >= '0' && num[-1] <= '9') {
+ --num;
+ }
+ if (num[0] == '\0') {
+ return;
+ }
+ int dec = atoi(num);
+ if (dec == 0) {
+ return;
+ }
+ ++dec;
+ SK_SNPRINTF(num, DEBUG_FILENAME_STRING_LENGTH - (num - test), "%d", dec);
+}
+#endif
+
+static void show_function_header(const char* functionName) {
+ SkDebugf("\nstatic void %s(skiatest::Reporter* reporter, const char* filename) {\n", functionName);
+ if (strcmp("skphealth_com76", functionName) == 0) {
+ SkDebugf("found it\n");
+ }
+}
+
+static const char* gOpStrs[] = {
+ "kDifference_SkPathOp",
+ "kIntersect_SkPathOp",
+ "kUnion_SkPathOp",
+ "kXOR_PathOp",
+ "kReverseDifference_SkPathOp",
+};
+
+const char* SkPathOpsDebug::OpStr(SkPathOp op) {
+ return gOpStrs[op];
+}
+
+static void show_op(SkPathOp op, const char* pathOne, const char* pathTwo) {
+ SkDebugf(" testPathOp(reporter, %s, %s, %s, filename);\n", pathOne, pathTwo, gOpStrs[op]);
+ SkDebugf("}\n");
+}
+
+void SkPathOpsDebug::ShowPath(const SkPath& a, const SkPath& b, SkPathOp shapeOp,
+ const char* testName) {
+ static SkMutex& mutex = *(new SkMutex);
+
+ SkAutoMutexExclusive ac(mutex);
+ show_function_header(testName);
+ ShowOnePath(a, "path", true);
+ ShowOnePath(b, "pathB", true);
+ show_op(shapeOp, "path", "pathB");
+}
+
+#include "src/pathops/SkIntersectionHelper.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#if DEBUG_COIN
+
+void SkOpGlobalState::debugAddToGlobalCoinDicts() {
+ static SkMutex& mutex = *(new SkMutex);
+ SkAutoMutexExclusive ac(mutex);
+ SkPathOpsDebug::gCoinSumChangedDict.add(fCoinChangedDict);
+ SkPathOpsDebug::gCoinSumVisitedDict.add(fCoinVisitedDict);
+}
+
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkOpGlobalState::debugAddLoopCount(SkIntersections* i, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn) {
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ SkIntersections::DebugLoop looper = (SkIntersections::DebugLoop) index;
+ if (fDebugLoopCount[index] >= i->debugLoopCount(looper)) {
+ continue;
+ }
+ fDebugLoopCount[index] = i->debugLoopCount(looper);
+ fDebugWorstVerb[index * 2] = wt.segment()->verb();
+ fDebugWorstVerb[index * 2 + 1] = wn.segment()->verb();
+ sk_bzero(&fDebugWorstPts[index * 8], sizeof(SkPoint) * 8);
+ memcpy(&fDebugWorstPts[index * 2 * 4], wt.pts(),
+ (SkPathOpsVerbToPoints(wt.segment()->verb()) + 1) * sizeof(SkPoint));
+ memcpy(&fDebugWorstPts[(index * 2 + 1) * 4], wn.pts(),
+ (SkPathOpsVerbToPoints(wn.segment()->verb()) + 1) * sizeof(SkPoint));
+ fDebugWorstWeight[index * 2] = wt.weight();
+ fDebugWorstWeight[index * 2 + 1] = wn.weight();
+ }
+ i->debugResetLoopCount();
+}
+
+void SkOpGlobalState::debugDoYourWorst(SkOpGlobalState* local) {
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ if (fDebugLoopCount[index] >= local->fDebugLoopCount[index]) {
+ continue;
+ }
+ fDebugLoopCount[index] = local->fDebugLoopCount[index];
+ fDebugWorstVerb[index * 2] = local->fDebugWorstVerb[index * 2];
+ fDebugWorstVerb[index * 2 + 1] = local->fDebugWorstVerb[index * 2 + 1];
+ memcpy(&fDebugWorstPts[index * 2 * 4], &local->fDebugWorstPts[index * 2 * 4],
+ sizeof(SkPoint) * 8);
+ fDebugWorstWeight[index * 2] = local->fDebugWorstWeight[index * 2];
+ fDebugWorstWeight[index * 2 + 1] = local->fDebugWorstWeight[index * 2 + 1];
+ }
+ local->debugResetLoopCounts();
+}
+
+static void dump_curve(SkPath::Verb verb, const SkPoint& pts, float weight) {
+ if (!verb) {
+ return;
+ }
+ const char* verbs[] = { "", "line", "quad", "conic", "cubic" };
+ SkDebugf("%s: {{", verbs[verb]);
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ for (int index = 0; index <= ptCount; ++index) {
+ SkDPoint::Dump((&pts)[index]);
+ if (index < ptCount - 1) {
+ SkDebugf(", ");
+ }
+ }
+ SkDebugf("}");
+ if (weight != 1) {
+ SkDebugf(", ");
+ if (weight == floorf(weight)) {
+ SkDebugf("%.0f", weight);
+ } else {
+ SkDebugf("%1.9gf", weight);
+ }
+ }
+ SkDebugf("}\n");
+}
+
+void SkOpGlobalState::debugLoopReport() {
+ const char* loops[] = { "iterations", "coinChecks", "perpCalcs" };
+ SkDebugf("\n");
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ SkDebugf("%s: %d\n", loops[index], fDebugLoopCount[index]);
+ dump_curve(fDebugWorstVerb[index * 2], fDebugWorstPts[index * 2 * 4],
+ fDebugWorstWeight[index * 2]);
+ dump_curve(fDebugWorstVerb[index * 2 + 1], fDebugWorstPts[(index * 2 + 1) * 4],
+ fDebugWorstWeight[index * 2 + 1]);
+ }
+}
+
+void SkOpGlobalState::debugResetLoopCounts() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+ sk_bzero(fDebugWorstVerb, sizeof(fDebugWorstVerb));
+ sk_bzero(fDebugWorstPts, sizeof(fDebugWorstPts));
+ sk_bzero(fDebugWorstWeight, sizeof(fDebugWorstWeight));
+}
+#endif
+
+bool SkOpGlobalState::DebugRunFail() {
+ return SkPathOpsDebug::gRunFail;
+}
+
+// this is const so it can be called by const methods that overwise don't alter state
+#if DEBUG_VALIDATE || DEBUG_COIN
+void SkOpGlobalState::debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const {
+ auto writable = const_cast<SkOpGlobalState*>(this);
+#if DEBUG_VALIDATE
+ writable->setPhase(phase);
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDictEntry* entry = &writable->fCoinDictEntry;
+ writable->fPreviousFuncName = entry->fFunctionName;
+ entry->fIteration = iteration;
+ entry->fLineNumber = lineNo;
+ entry->fGlitchType = SkPathOpsDebug::kUninitialized_Glitch;
+ entry->fFunctionName = funcName;
+ writable->fCoinVisitedDict.add(*entry);
+ writable->debugAddToCoinChangedDict();
+#endif
+}
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkIntersections::debugBumpLoopCount(DebugLoop index) {
+ fDebugLoopCount[index]++;
+}
+
+int SkIntersections::debugLoopCount(DebugLoop index) const {
+ return fDebugLoopCount[index];
+}
+
+void SkIntersections::debugResetLoopCount() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+}
+#endif
+
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+
+SkDCubic SkDQuad::debugToCubic() const {
+ SkDCubic cubic;
+ cubic[0] = fPts[0];
+ cubic[2] = fPts[1];
+ cubic[3] = fPts[2];
+ cubic[1].fX = (cubic[0].fX + cubic[2].fX * 2) / 3;
+ cubic[1].fY = (cubic[0].fY + cubic[2].fY * 2) / 3;
+ cubic[2].fX = (cubic[3].fX + cubic[2].fX * 2) / 3;
+ cubic[2].fY = (cubic[3].fY + cubic[2].fY * 2) / 3;
+ return cubic;
+}
+
+void SkDQuad::debugSet(const SkDPoint* pts) {
+ memcpy(fPts, pts, sizeof(fPts));
+ SkDEBUGCODE(fDebugGlobalState = nullptr);
+}
+
+void SkDCubic::debugSet(const SkDPoint* pts) {
+ memcpy(fPts, pts, sizeof(fPts));
+ SkDEBUGCODE(fDebugGlobalState = nullptr);
+}
+
+void SkDConic::debugSet(const SkDPoint* pts, SkScalar weight) {
+ fPts.debugSet(pts);
+ fWeight = weight;
+}
+
+void SkDRect::debugInit() {
+ fLeft = fTop = fRight = fBottom = SK_ScalarNaN;
+}
+
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpSegment.h"
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with addT()
+ const SkOpPtT* SkOpSegment::debugAddT(double t, SkPathOpsDebug::GlitchLog* log) const {
+ debugValidate();
+ SkPoint pt = this->ptAtT(t);
+ const SkOpSpanBase* span = &fHead;
+ do {
+ const SkOpPtT* result = span->ptT();
+ if (t == result->fT || this->match(result, this, t, pt)) {
+// span->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ const SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev, span);
+ // marks in global state that new op span has been allocated
+ this->globalState()->setAllocatedOpSpan();
+// span->init(this, prev, t, pt);
+ this->debugValidate();
+// #if DEBUG_ADD_T
+// SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+// span->segment()->debugID(), span->debugID());
+// #endif
+// span->bumpSpanAdds();
+ return nullptr;
+ }
+ FAIL_WITH_NULL_IF(span != &fTail, span);
+ } while ((span = span->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+#endif
+
+#if DEBUG_ANGLE
+void SkOpSegment::debugCheckAngleCoin() const {
+ const SkOpSpanBase* base = &fHead;
+ const SkOpSpan* span;
+ do {
+ const SkOpAngle* angle = base->fromAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ if (base->final()) {
+ break;
+ }
+ span = base->upCast();
+ angle = span->toAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ } while ((base = span->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// this mimics the order of the checks in handle coincidence
+void SkOpSegment::debugCheckHealth(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugMoveMultiples(glitches);
+ debugMoveNearby(glitches);
+ debugMissingCoincidence(glitches);
+}
+
+// commented-out lines keep this in sync with clearAll()
+void SkOpSegment::debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const {
+ const SkOpSpan* span = &fHead;
+ do {
+ this->debugClearOne(span, glitches);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->debugRelease(glitches, this);
+}
+
+// commented-out lines keep this in sync with clearOne()
+void SkOpSegment::debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const {
+ if (span->windValue()) glitches->record(SkPathOpsDebug::kCollapsedWindValue_Glitch, span);
+ if (span->oppValue()) glitches->record(SkPathOpsDebug::kCollapsedOppValue_Glitch, span);
+ if (!span->done()) glitches->record(SkPathOpsDebug::kCollapsedDone_Glitch, span);
+}
+#endif
+
+SkOpAngle* SkOpSegment::debugLastAngle() {
+ SkOpAngle* result = nullptr;
+ SkOpSpan* span = this->head();
+ do {
+ if (span->toAngle()) {
+ SkASSERT(!result);
+ result = span->toAngle();
+ }
+ } while ((span = span->next()->upCastable()));
+ SkASSERT(result);
+ return result;
+}
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with ClearVisited
+void SkOpSegment::DebugClearVisited(const SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ const SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ const SkOpSegment* opp = ptT->segment();
+ opp->resetDebugVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with missingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+void SkOpSegment::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ if (this->done()) {
+ return;
+ }
+ const SkOpSpan* prior = nullptr;
+ const SkOpSpanBase* spanBase = &fHead;
+// bool result = false;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (ptT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->debugVisited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ const SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->segment() != opp && span->containsCoincidence(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ if (spanBase->segment() != opp && spanBase->containsCoinEnd(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ const SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ const SkOpSegment* priorOpp = nullptr;
+ const SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ const SkOpPtT* oppStart = prior->ptT();
+ const SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ swap(oppStart, oppEnd);
+ }
+ const SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ const SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ const SkOpPtT* rootPtT = ptT->span()->ptT();
+ const SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ const SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidence->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+// SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+// rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+// rootOppEnd->debugID());
+#endif
+ log->record(SkPathOpsDebug::kMissingCoin_Glitch, priorPtT, ptT, oppStart, oppEnd);
+ // coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ // }
+#if DEBUG_COINCIDENCE
+// SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+#endif
+ // result = true;
+ }
+ swapBack:
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ DebugClearVisited(&fHead);
+ return;
+}
+
+// commented-out lines keep this in sync with moveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+void SkOpSegment::debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ const SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+// SkASSERT(addCount >= 1);
+ if (addCount <= 1) {
+ continue;
+ }
+ const SkOpPtT* startPtT = test->ptT();
+ const SkOpPtT* testPtT = startPtT;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ const SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ const SkOpSpanBase* oppPrev = oppSpan;
+ const SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ const SkOpSpanBase* oppNext = oppSpan;
+ const SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ const SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ const SkOpPtT* oppStartPtT = oppTest->ptT();
+ const SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ const SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ const SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->debugMergeMatches(glitches, oppSpan);
+ oppTest->debugAddOpp(glitches, oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return;
+}
+
+// commented-out lines keep this in sync with moveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+void SkOpSegment::debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ const SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll_Glitch, this);
+// return;
+ }
+ glitches->record(SkPathOpsDebug::kMoveNearbyReleaseFinal_Glitch, spanBase, ptT);
+ } else if (test->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyRelease_Glitch, test, headPtT);
+ }
+// break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* test = spanBase->upCast()->next();
+ bool found;
+ if (!this->spansNearby(spanBase, test, &found)) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMergeFinal_Glitch, test);
+ }
+ if (found) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMergeFinal_Glitch, test);
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll2_Glitch, this);
+ // return
+ }
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMerge_Glitch, spanBase);
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+}
+#endif
+
+void SkOpSegment::debugReset() {
+ this->init(this->fPts, this->fWeight, this->contour(), this->verb());
+}
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugSetCoinT(int index, SkScalar t) const {
+ if (fDebugBaseMax < 0 || fDebugBaseIndex == index) {
+ fDebugBaseIndex = index;
+ fDebugBaseMin = SkTMin(t, fDebugBaseMin);
+ fDebugBaseMax = SkTMax(t, fDebugBaseMax);
+ return;
+ }
+ SkASSERT(fDebugBaseMin >= t || t >= fDebugBaseMax);
+ if (fDebugLastMax < 0 || fDebugLastIndex == index) {
+ fDebugLastIndex = index;
+ fDebugLastMin = SkTMin(t, fDebugLastMin);
+ fDebugLastMax = SkTMax(t, fDebugLastMax);
+ return;
+ }
+ SkASSERT(fDebugLastMin >= t || t >= fDebugLastMax);
+ SkASSERT((t - fDebugBaseMin > 0) == (fDebugLastMin - fDebugBaseMin > 0));
+}
+#endif
+
+#if DEBUG_ACTIVE_SPANS
+void SkOpSegment::debugShowActiveSpans(SkString* str) const {
+ debugValidate();
+ if (done()) {
+ return;
+ }
+ int lastId = -1;
+ double lastT = -1;
+ const SkOpSpan* span = &fHead;
+ do {
+ if (span->done()) {
+ continue;
+ }
+ if (lastId == this->debugID() && lastT == span->t()) {
+ continue;
+ }
+ lastId = this->debugID();
+ lastT = span->t();
+ str->appendf("%s id=%d", __FUNCTION__, this->debugID());
+ // since endpoints may have be adjusted, show actual computed curves
+ SkDCurve curvePart;
+ this->subDivide(span, span->next(), &curvePart);
+ const SkDPoint* pts = curvePart.fCubic.fPts;
+ str->appendf(" (%1.9g,%1.9g", pts[0].fX, pts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ str->appendf(" %1.9g,%1.9g", pts[vIndex].fX, pts[vIndex].fY);
+ }
+ if (SkPath::kConic_Verb == fVerb) {
+ str->appendf(" %1.9gf", curvePart.fConic.fWeight);
+ }
+ str->appendf(") t=%1.9g tEnd=%1.9g", span->t(), span->next()->t());
+ if (span->windSum() == SK_MinS32) {
+ str->appendf(" windSum=?");
+ } else {
+ str->appendf(" windSum=%d", span->windSum());
+ }
+ if (span->oppValue() && span->oppSum() == SK_MinS32) {
+ str->appendf(" oppSum=?");
+ } else if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ str->appendf(" oppSum=%d", span->oppSum());
+ }
+ str->appendf(" windValue=%d", span->windValue());
+ if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ str->appendf(" oppValue=%d", span->oppValue());
+ }
+ str->appendf("\n");
+ } while ((span = span->next()->upCastable()));
+}
+#endif
+
+#if DEBUG_MARK_DONE
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t());
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d\n", span->windValue());
+}
+
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding,
+ int oppWinding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t(), winding, oppWinding);
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" newOppSum=");
+ if (oppWinding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", oppWinding);
+ }
+ SkDebugf(" oppSum=");
+ if (span->oppSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->oppSum());
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d oppValue=%d\n", span->windValue(), span->oppValue());
+}
+
+#endif
+
+// loop looking for a pair of angle parts that are too close to be sorted
+/* This is called after other more simple intersection and angle sorting tests have been exhausted.
+ This should be rarely called -- the test below is thorough and time consuming.
+ This checks the distance between start points; the distance between
+*/
+#if DEBUG_ANGLE
+void SkOpAngle::debugCheckNearCoincidence() const {
+ const SkOpAngle* test = this;
+ do {
+ const SkOpSegment* testSegment = test->segment();
+ double testStartT = test->start()->t();
+ SkDPoint testStartPt = testSegment->dPtAtT(testStartT);
+ double testEndT = test->end()->t();
+ SkDPoint testEndPt = testSegment->dPtAtT(testEndT);
+ double testLenSq = testStartPt.distanceSquared(testEndPt);
+ SkDebugf("%s testLenSq=%1.9g id=%d\n", __FUNCTION__, testLenSq, testSegment->debugID());
+ double testMidT = (testStartT + testEndT) / 2;
+ const SkOpAngle* next = test;
+ while ((next = next->fNext) != this) {
+ SkOpSegment* nextSegment = next->segment();
+ double testMidDistSq = testSegment->distSq(testMidT, next);
+ double testEndDistSq = testSegment->distSq(testEndT, next);
+ double nextStartT = next->start()->t();
+ SkDPoint nextStartPt = nextSegment->dPtAtT(nextStartT);
+ double distSq = testStartPt.distanceSquared(nextStartPt);
+ double nextEndT = next->end()->t();
+ double nextMidT = (nextStartT + nextEndT) / 2;
+ double nextMidDistSq = nextSegment->distSq(nextMidT, test);
+ double nextEndDistSq = nextSegment->distSq(nextEndT, test);
+ SkDebugf("%s distSq=%1.9g testId=%d nextId=%d\n", __FUNCTION__, distSq,
+ testSegment->debugID(), nextSegment->debugID());
+ SkDebugf("%s testMidDistSq=%1.9g\n", __FUNCTION__, testMidDistSq);
+ SkDebugf("%s testEndDistSq=%1.9g\n", __FUNCTION__, testEndDistSq);
+ SkDebugf("%s nextMidDistSq=%1.9g\n", __FUNCTION__, nextMidDistSq);
+ SkDebugf("%s nextEndDistSq=%1.9g\n", __FUNCTION__, nextEndDistSq);
+ SkDPoint nextEndPt = nextSegment->dPtAtT(nextEndT);
+ double nextLenSq = nextStartPt.distanceSquared(nextEndPt);
+ SkDebugf("%s nextLenSq=%1.9g\n", __FUNCTION__, nextLenSq);
+ SkDebugf("\n");
+ }
+ test = test->fNext;
+ } while (test->fNext != this);
+}
+#endif
+
+#if DEBUG_ANGLE
+SkString SkOpAngle::debugPart() const {
+ SkString result;
+ switch (this->segment()->verb()) {
+ case SkPath::kLine_Verb:
+ result.printf(LINE_DEBUG_STR " id=%d", LINE_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kQuad_Verb:
+ result.printf(QUAD_DEBUG_STR " id=%d", QUAD_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kConic_Verb:
+ result.printf(CONIC_DEBUG_STR " id=%d",
+ CONIC_DEBUG_DATA(fPart.fCurve, fPart.fCurve.fConic.fWeight),
+ this->segment()->debugID());
+ break;
+ case SkPath::kCubic_Verb:
+ result.printf(CUBIC_DEBUG_STR " id=%d", CUBIC_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return result;
+}
+#endif
+
+#if DEBUG_SORT
+void SkOpAngle::debugLoop() const {
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next->dumpOne(true);
+ SkDebugf("\n");
+ next = next->fNext;
+ } while (next && next != first);
+ next = first;
+ do {
+ next->debugValidate();
+ next = next->fNext;
+ } while (next && next != first);
+}
+#endif
+
+void SkOpAngle::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ int wind = 0;
+ int opp = 0;
+ int lastXor = -1;
+ int lastOppXor = -1;
+ do {
+ if (next->unorderable()) {
+ return;
+ }
+ const SkOpSpan* minSpan = next->start()->starter(next->end());
+ if (minSpan->windValue() == SK_MinS32) {
+ return;
+ }
+ bool op = next->segment()->operand();
+ bool isXor = next->segment()->isXor();
+ bool oppXor = next->segment()->oppXor();
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || between(0, minSpan->windValue(), DEBUG_LIMIT_WIND_SUM));
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM
+ || between(-DEBUG_LIMIT_WIND_SUM, minSpan->oppValue(), DEBUG_LIMIT_WIND_SUM));
+ bool useXor = op ? oppXor : isXor;
+ SkASSERT(lastXor == -1 || lastXor == (int) useXor);
+ lastXor = (int) useXor;
+ wind += next->debugSign() * (op ? minSpan->oppValue() : minSpan->windValue());
+ if (useXor) {
+ wind &= 1;
+ }
+ useXor = op ? isXor : oppXor;
+ SkASSERT(lastOppXor == -1 || lastOppXor == (int) useXor);
+ lastOppXor = (int) useXor;
+ opp += next->debugSign() * (op ? minSpan->windValue() : minSpan->oppValue());
+ if (useXor) {
+ opp &= 1;
+ }
+ next = next->fNext;
+ } while (next && next != first);
+ SkASSERT(wind == 0 || !SkPathOpsDebug::gRunFail);
+ SkASSERT(opp == 0 || !SkPathOpsDebug::gRunFail);
+#endif
+}
+
+void SkOpAngle::debugValidateNext() const {
+#if !FORCE_RELEASE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = first;
+ SkTDArray<const SkOpAngle*>(angles);
+ do {
+// SkASSERT_RELEASE(next->fSegment->debugContains(next));
+ angles.push_back(next);
+ next = next->next();
+ if (next == first) {
+ break;
+ }
+ SkASSERT_RELEASE(!angles.contains(next));
+ if (!next) {
+ return;
+ }
+ } while (true);
+#endif
+}
+
+#ifdef SK_DEBUG
+void SkCoincidentSpans::debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const {
+ SkASSERT(coinPtTEnd()->span() == over || !SkOpGlobalState::DebugRunFail());
+ SkASSERT(oppPtTEnd()->span() == outer || !SkOpGlobalState::DebugRunFail());
+}
+#endif
+
+#if DEBUG_COIN
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) const ) const {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ log->record(SkPathOpsDebug::kCorrectEnd_Glitch, this, origPtT, testPtT);
+ }
+}
+
+
+/* Commented-out lines keep this in sync with correctEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTEnd, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTEnd, nullptr);
+}
+
+/* Commented-out lines keep this in sync with expand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, prev->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, next->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ return expanded;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* base, const SkOpSpanBase* testSpan) const {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i;
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ bool added;
+ if (this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added), false) {
+ return;
+ }
+ }
+ }
+ return;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* ptT) const {
+ FAIL_IF(!ptT->span()->upCastable(), ptT->span());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF(!prev, ptT->span());
+ if (!prev->isCanceled()) {
+ if (this->debugAddEndMovedSpans(log, base, base->prev()), false) {
+ return;
+ }
+ }
+ if (!base->isCanceled()) {
+ if (this->debugAddEndMovedSpans(log, base, base->next()), false) {
+ return;
+ }
+ }
+ return;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return;
+ }
+// fTop = span;
+// fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF(1 == span->coinPtTStart()->fT, span);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ if (this->debugAddEndMovedSpans(log, span->oppPtTStart()), false) {
+ return;
+ }
+ }
+ } else if (oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->coinPtTStart()), false) {
+ return;
+ }
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->oppPtTEnd()), false) {
+ return;
+ }
+ }
+ } else if (oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->coinPtTEnd()), false) {
+ return;
+ }
+ }
+ }
+ } while ((span = span->next()));
+// this->restoreHead();
+ return;
+}
+
+/* Commented-out lines keep this in sync with addExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the mssing pt to the segment and loop it in the opposite span
+void SkOpCoincidence::debugAddExpanded(SkPathOpsDebug::GlitchLog* log) const {
+// DEBUG_SET_PHASE();
+ const SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF(!startPtT->contains(oStartPtT), coin);
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF(oEnd->deleted(), coin);
+ FAIL_IF(!start->upCastable(), coin);
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF(!coin->flipped() && !oStart->upCastable(), coin);
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF(!oTest, coin);
+ const SkOpSegment* seg = start->segment();
+ const SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF(!walk->upCastable(), coin);
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ FAIL_IF(!walkOpp, coin);
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = nextT - priorT;
+ FAIL_IF(!startRange, coin);
+ double startPart = (test->t() - priorT) / startRange;
+ double oStartRange = oNextT - oPriorT;
+ FAIL_IF(!oStartRange, coin);
+ double oStartPart = (oTest->t() - oStartPtT->fT) / oStartRange;
+ FAIL_IF(startPart == oStartPart, coin);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ addToOpp ? log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ oPriorT + oStartRange * startPart, test)
+ : log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ priorT + startRange * oStartPart, oTest);
+ // FAIL_IF(!success, coin);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF(!test->upCastable(), coin);
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ oTest = coin->flipped() ? oTest->prev() : oTest->upCast()->next();
+ FAIL_IF(!oTest, coin);
+ }
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+
+/* Commented-out lines keep this in sync addIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+void SkOpCoincidence::debugAddIfMissing(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ SkOpSpanBase::Collapsed result = coinSeg->collapsed(coinTs, coinTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, coinSeg);
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ result = oppSeg->collapsed(oppTs, oppTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, oppSeg);
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added);
+ return;
+}
+
+/* Commented-out lines keep this in sync addOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+void SkOpCoincidence::debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) const {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ SkOPASSERT(!fTop); // this is (correctly) reversed in addifMissing()
+ if (fTop && !this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe,
+ &overlaps)) {
+ return;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return;
+ }
+ const SkCoincidentSpans* overlap = overlaps.count() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.count(); ++index) { // combine overlaps before continuing
+ const SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTEnd());
+ }
+ if (!fHead) { this->debugRelease(log, fHead, test);
+ this->debugRelease(log, fTop, test);
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ RETURN_FALSE_IF(overlap && cs && ce && overlap->contains(cs, ce), coinSeg);
+ RETURN_FALSE_IF(cs != ce || !cs, coinSeg);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ RETURN_FALSE_IF(overlap && os && oe && overlap->contains(os, oe), oppSeg);
+ SkASSERT(true || !cs || !cs->deleted());
+ SkASSERT(true || !os || !os->deleted());
+ SkASSERT(true || !ce || !ce->deleted());
+ SkASSERT(true || !oe || !oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(csExisting && csExisting == ceExisting, coinSeg);
+ RETURN_FALSE_IF(csExisting && (csExisting == ce ||
+ csExisting->contains(ceExisting ? ceExisting : ce)), coinSeg);
+ RETURN_FALSE_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)), coinSeg);
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(osExisting && osExisting == oeExisting, oppSeg);
+ RETURN_FALSE_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)), oppSeg);
+ RETURN_FALSE_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)), oppSeg);
+ bool csDeleted = false, osDeleted = false, ceDeleted = false, oeDeleted = false;
+ this->debugValidate();
+ if (!cs || !os) {
+ if (!cs)
+ cs = coinSeg->debugAddT(coinTs, log);
+ if (!os)
+ os = oppSeg->debugAddT(oppTs, log);
+// RETURN_FALSE_IF(callerAborts, !csWritable || !osWritable);
+ if (cs && os) cs->span()->debugAddOpp(log, os->span());
+// cs = csWritable;
+// os = osWritable->active();
+ RETURN_FALSE_IF((ce && ce->deleted()) || (oe && oe->deleted()), coinSeg);
+ }
+ if (!ce || !oe) {
+ if (!ce)
+ ce = coinSeg->debugAddT(coinTe, log);
+ if (!oe)
+ oe = oppSeg->debugAddT(oppTe, log);
+ if (ce && oe) ce->span()->debugAddOpp(log, oe->span());
+// ce = ceWritable;
+// oe = oeWritable;
+ }
+ this->debugValidate();
+ RETURN_FALSE_IF(csDeleted, coinSeg);
+ RETURN_FALSE_IF(osDeleted, oppSeg);
+ RETURN_FALSE_IF(ceDeleted, coinSeg);
+ RETURN_FALSE_IF(oeDeleted, oppSeg);
+ RETURN_FALSE_IF(!cs || !ce || cs == ce || cs->contains(ce) || !os || !oe || os == oe || os->contains(oe), coinSeg);
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+ } else {
+ if (oppTs > oppTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, oppSeg, oppTs, oppTe, coinSeg, coinTs, coinTe);
+ }
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlap->debugShow();
+ }
+#endif
+ } else {
+ log->record(SkPathOpsDebug::kAddMissingCoin_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ return (void) result;
+}
+
+// Extra commented-out lines keep this in sync with addMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+void SkOpCoincidence::debugAddMissing(SkPathOpsDebug::GlitchLog* log, bool* added) const {
+ const SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return;
+ }
+ // fTop = outer;
+ // fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ SkASSERT(!ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ SkASSERT(!outerCoin->done()); // if it's done, should have already been removed from list
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkASSERT(!outerOpp->done());
+// SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+// SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ const SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ SkASSERT(!ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ SkASSERT(!innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ SkASSERT(!ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkASSERT(!innerOpp->done());
+// SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+// SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOpp, innerOpp, added,
+ ocs->debugEnder(oce),
+ ics->debugEnder(ice));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ SkASSERT(!oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ SkASSERT(!ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOpp, innerCoin, added,
+ ocs->debugEnder(oce),
+ ios->debugEnder(ioe));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoin, innerOpp, added,
+ oos->debugEnder(ooe),
+ ics->debugEnder(ice));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoin, innerCoin, added,
+ oos->debugEnder(ooe),
+ ios->debugEnder(ioe));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ // this->restoreHead();
+ return;
+}
+
+// Commented-out lines keep this in sync with release()
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkCoincidentSpans* remove) const {
+ const SkCoincidentSpans* head = coin;
+ const SkCoincidentSpans* prev = nullptr;
+ const SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+// prev->setNext(next);
+ } else if (head == fHead) {
+// fHead = next;
+ } else {
+// fTop = next;
+ }
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ prev = coin;
+ } while ((coin = next));
+ return;
+}
+
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* deleted) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Commented-out lines keep this in sync with expand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->debugExpand(log)) {
+ // check to see if multiple spans expanded so they are now identical
+ const SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, fHead, test->coinPtTStart());
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+// Commented-out lines keep this in sync with mark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+void SkOpCoincidence::debugMark(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ FAIL_IF(!coin->coinPtTStartWritable()->span()->upCastable(), coin);
+ const SkOpSpan* start = coin->coinPtTStartWritable()->span()->upCast();
+// SkASSERT(start->deleted());
+ const SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+// SkASSERT(end->deleted());
+ const SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+// SkASSERT(oStart->deleted());
+ const SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+// SkASSERT(oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ using std::swap;
+ swap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ start->debugInsertCoincidence(log, oStart->upCast());
+ end->debugInsertCoinEnd(log, oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ const SkOpSpanBase* next = start;
+ const SkOpSpanBase* oNext = oStart;
+ bool ordered;
+ FAIL_IF(!coin->ordered(&ordered), coin);
+ while ((next = next->upCast()->next()) != end) {
+ FAIL_IF(!next->upCastable(), coin);
+ if (next->upCast()->debugInsertCoincidence(log, oSegment, flipped, ordered), false) {
+ return;
+ }
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ FAIL_IF(!oNext->upCastable(), coin);
+ if (oNext->upCast()->debugInsertCoincidence(log, segment, flipped, ordered), false) {
+ return;
+ }
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+#endif
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkOpPtT* test) const {
+ const SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ this->debugRelease(log, head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* test) const {
+ this->debugMarkCollapsed(log, fHead, test);
+ this->debugMarkCollapsed(log, fTop, test);
+}
+#endif
+
+void SkCoincidentSpans::debugShow() const {
+ SkDebugf("coinSpan - id=%d t=%1.9g tEnd=%1.9g\n", coinPtTStart()->segment()->debugID(),
+ coinPtTStart()->fT, coinPtTEnd()->fT);
+ SkDebugf("coinSpan + id=%d t=%1.9g tEnd=%1.9g\n", oppPtTStart()->segment()->debugID(),
+ oppPtTStart()->fT, oppPtTEnd()->fT);
+}
+
+void SkOpCoincidence::debugShowCoincidence() const {
+#if DEBUG_COINCIDENCE
+ const SkCoincidentSpans* span = fHead;
+ while (span) {
+ span->debugShow();
+ span = span->next();
+ }
+#endif
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkOpSpanBase* next, const SkOpSpanBase* end,
+ double oStart, double oEnd, const SkOpSegment* oSegment,
+ SkPathOpsDebug::GlitchLog* log) {
+ SkASSERT(next != end);
+ SkASSERT(!next->contains(end) || log);
+ if (next->t() > end->t()) {
+ using std::swap;
+ swap(next, end);
+ }
+ do {
+ const SkOpPtT* ptT = next->ptT();
+ int index = 0;
+ bool somethingBetween = false;
+ do {
+ ++index;
+ ptT = ptT->next();
+ const SkOpPtT* checkPtT = next->ptT();
+ if (ptT == checkPtT) {
+ break;
+ }
+ bool looped = false;
+ for (int check = 0; check < index; ++check) {
+ if ((looped = checkPtT == ptT)) {
+ break;
+ }
+ checkPtT = checkPtT->next();
+ }
+ if (looped) {
+ SkASSERT(0);
+ break;
+ }
+ if (ptT->deleted()) {
+ continue;
+ }
+ if (ptT->segment() != oSegment) {
+ continue;
+ }
+ somethingBetween |= between(oStart, ptT->fT, oEnd);
+ } while (true);
+ SkASSERT(somethingBetween);
+ } while (next != end && (next = next->upCast()->next()));
+}
+
+static void DebugCheckOverlap(const SkCoincidentSpans* test, const SkCoincidentSpans* list,
+ SkPathOpsDebug::GlitchLog* log) {
+ if (!list) {
+ return;
+ }
+ const SkOpSegment* coinSeg = test->coinPtTStart()->segment();
+ SkASSERT(coinSeg == test->coinPtTEnd()->segment());
+ const SkOpSegment* oppSeg = test->oppPtTStart()->segment();
+ SkASSERT(oppSeg == test->oppPtTEnd()->segment());
+ SkASSERT(coinSeg != test->oppPtTStart()->segment());
+ SkDEBUGCODE(double tcs = test->coinPtTStart()->fT);
+ SkASSERT(between(0, tcs, 1));
+ SkDEBUGCODE(double tce = test->coinPtTEnd()->fT);
+ SkASSERT(between(0, tce, 1));
+ SkASSERT(tcs < tce);
+ double tos = test->oppPtTStart()->fT;
+ SkASSERT(between(0, tos, 1));
+ double toe = test->oppPtTEnd()->fT;
+ SkASSERT(between(0, toe, 1));
+ SkASSERT(tos != toe);
+ if (tos > toe) {
+ using std::swap;
+ swap(tos, toe);
+ }
+ do {
+ double lcs, lce, los, loe;
+ if (coinSeg == list->coinPtTStart()->segment()) {
+ if (oppSeg != list->oppPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->coinPtTStart()->fT;
+ lce = list->coinPtTEnd()->fT;
+ los = list->oppPtTStart()->fT;
+ loe = list->oppPtTEnd()->fT;
+ if (los > loe) {
+ using std::swap;
+ swap(los, loe);
+ }
+ } else if (coinSeg == list->oppPtTStart()->segment()) {
+ if (oppSeg != list->coinPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->oppPtTStart()->fT;
+ lce = list->oppPtTEnd()->fT;
+ if (lcs > lce) {
+ using std::swap;
+ swap(lcs, lce);
+ }
+ los = list->coinPtTStart()->fT;
+ loe = list->coinPtTEnd()->fT;
+ } else {
+ continue;
+ }
+ SkASSERT(tce < lcs || lce < tcs);
+ SkASSERT(toe < los || loe < tos);
+ } while ((list = list->next()));
+}
+
+
+static void DebugCheckOverlapTop(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // check for overlapping coincident spans
+ const SkCoincidentSpans* test = head;
+ while (test) {
+ const SkCoincidentSpans* next = test->next();
+ DebugCheckOverlap(test, next, log);
+ DebugCheckOverlap(test, opt, log);
+ test = next;
+ }
+}
+
+static void DebugValidate(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ SkASSERT(SkOpCoincidence::Ordered(coin->coinPtTStart()->segment(),
+ coin->oppPtTStart()->segment()));
+ SkASSERT(coin->coinPtTStart()->span()->ptT() == coin->coinPtTStart());
+ SkASSERT(coin->coinPtTEnd()->span()->ptT() == coin->coinPtTEnd());
+ SkASSERT(coin->oppPtTStart()->span()->ptT() == coin->oppPtTStart());
+ SkASSERT(coin->oppPtTEnd()->span()->ptT() == coin->oppPtTEnd());
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif
+
+void SkOpCoincidence::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ DebugValidate(fHead, fTop, nullptr);
+ DebugValidate(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ DebugCheckBetween(coin->coinPtTStart()->span(), coin->coinPtTEnd()->span(),
+ coin->oppPtTStart()->fT, coin->oppPtTEnd()->fT, coin->oppPtTStart()->segment(),
+ log);
+ DebugCheckBetween(coin->oppPtTStart()->span(), coin->oppPtTEnd()->span(),
+ coin->coinPtTStart()->fT, coin->coinPtTEnd()->fT, coin->coinPtTStart()->segment(),
+ log);
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif
+
+void SkOpCoincidence::debugCheckBetween() const {
+#if DEBUG_COINCIDENCE
+ if (fGlobalState->debugCheckHealth()) {
+ return;
+ }
+ DebugCheckBetween(fHead, fTop, nullptr);
+ DebugCheckBetween(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+void SkOpContour::debugCheckHealth(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugCheckHealth(log);
+ } while ((segment = segment->next()));
+}
+
+void SkOpCoincidence::debugCheckValid(SkPathOpsDebug::GlitchLog* log) const {
+#if DEBUG_VALIDATE
+ DebugValidate(fHead, fTop, log);
+ DebugValidate(fTop, nullptr, log);
+#endif
+}
+
+void SkOpCoincidence::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->debugCorrectEnds(log);
+ } while ((coin = coin->next()));
+}
+
+// commmented-out lines keep this aligned with missingCoincidence()
+void SkOpContour::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+// SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+// bool result = false;
+ do {
+ if (segment->debugMissingCoincidence(log), false) {
+// result = true;
+ }
+ segment = segment->next();
+ } while (segment);
+ return;
+}
+
+void SkOpContour::debugMoveMultiples(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ if (segment->debugMoveMultiples(log), false) {
+ return;
+ }
+ } while ((segment = segment->next()));
+ return;
+}
+
+void SkOpContour::debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugMoveNearby(log);
+ } while ((segment = segment->next()));
+}
+#endif
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugResetCoinT() const {
+ fDebugBaseIndex = -1;
+ fDebugBaseMin = 1;
+ fDebugBaseMax = -1;
+ fDebugLastIndex = -1;
+ fDebugLastMin = 1;
+ fDebugLastMax = -1;
+}
+#endif
+
+void SkOpSegment::debugValidate() const {
+#if DEBUG_COINCIDENCE_ORDER
+ {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ span->debugResetCoinT();
+ } while (!span->final() && (span = span->upCast()->next()));
+ span = &fHead;
+ int index = 0;
+ do {
+ span->debugSetCoinT(index++);
+ } while (!span->final() && (span = span->upCast()->next()));
+ }
+#endif
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpSpanBase* span = &fHead;
+ double lastT = -1;
+ const SkOpSpanBase* prev = nullptr;
+ int count = 0;
+ int done = 0;
+ do {
+ if (!span->final()) {
+ ++count;
+ done += span->upCast()->done() ? 1 : 0;
+ }
+ SkASSERT(span->segment() == this);
+ SkASSERT(!prev || prev->upCast()->next() == span);
+ SkASSERT(!prev || prev == span->prev());
+ prev = span;
+ double t = span->ptT()->fT;
+ SkASSERT(lastT < t);
+ lastT = t;
+ span->debugValidate();
+ } while (!span->final() && (span = span->upCast()->next()));
+ SkASSERT(count == fCount);
+ SkASSERT(done == fDoneCount);
+ SkASSERT(count >= fDoneCount);
+ SkASSERT(span->final());
+ span->debugValidate();
+#endif
+}
+
+#if DEBUG_COIN
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpSpanBase::debugAddOpp(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return;
+ }
+ this->debugMergeMatches(log, opp);
+ this->ptT()->debugAddOpp(opp->ptT(), oppPrev);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+// Commented-out lines keep this in sync with checkForCollapsedCoincidence()
+void SkOpSpanBase::debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ const SkOpPtT* head = this->ptT();
+ const SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->debugMarkCollapsed(log, test);
+ } while ((test = test->next()) != head);
+}
+#endif
+
+bool SkOpSpanBase::debugCoinEndLoopCheck() const {
+ int loop = 0;
+ const SkOpSpanBase* next = this;
+ SkOpSpanBase* nextCoin;
+ do {
+ nextCoin = next->fCoinEnd;
+ SkASSERT(nextCoin == this || nextCoin->fCoinEnd != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpanBase* checkCoin = this->fCoinEnd;
+ const SkOpSpanBase* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoinEnd;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident end loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoinEnd()
+void SkOpSpanBase::debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* coin) const {
+ if (containsCoinEnd(coin)) {
+// SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinEnd_Glitch, this, coin);
+// coin->fCoinEnd = this->fCoinEnd;
+// this->fCoinEnd = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with mergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+void SkOpSpanBase::debugMergeMatches(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* test = &fPtT;
+ const SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ do {
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ const SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ const SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+// if (!innerBase->hasMultipleHint() && !testBase->hasMultipleHint()) {
+ if (!zero_or_one(inner->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, innerBase, test);
+ } else {
+ SkASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, testBase, inner);
+ } else {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, segment);
+// SkDEBUGCODE(testBase->debugSetDeleted());
+// test->setDeleted();
+// SkDEBUGCODE(innerBase->debugSetDeleted());
+// inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+// }
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+#endif
+
+void SkOpSpanBase::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ ptT->debugResetCoinT();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+void SkOpSpanBase::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ if (!ptT->deleted()) {
+ ptT->debugSetCoinT(index);
+ }
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+const SkOpSpan* SkOpSpanBase::debugStarter(SkOpSpanBase const** endPtr) const {
+ const SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ const SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+}
+
+void SkOpSpanBase::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpPtT* ptT = &fPtT;
+ SkASSERT(ptT->span() == this);
+ do {
+// SkASSERT(SkDPoint::RoughlyEqual(fPtT.fPt, ptT->fPt));
+ ptT->debugValidate();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+ SkASSERT(this->debugCoinEndLoopCheck());
+ if (!this->final()) {
+ SkASSERT(this->upCast()->debugCoinLoopCheck());
+ }
+ if (fFromAngle) {
+ fFromAngle->debugValidate();
+ }
+ if (!this->final() && this->upCast()->toAngle()) {
+ this->upCast()->toAngle()->debugValidate();
+ }
+#endif
+}
+
+bool SkOpSpan::debugCoinLoopCheck() const {
+ int loop = 0;
+ const SkOpSpan* next = this;
+ SkOpSpan* nextCoin;
+ do {
+ nextCoin = next->fCoincident;
+ SkASSERT(nextCoin == this || nextCoin->fCoincident != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpan* checkCoin = this->fCoincident;
+ const SkOpSpan* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoincident;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoincidence() in header
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* coin) const {
+ if (containsCoincidence(coin)) {
+// SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinStart_Glitch, this, coin);
+// coin->fCoincident = this->fCoincident;
+// this->fCoincident = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with insertCoincidence()
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* segment, bool flipped, bool ordered) const {
+ if (this->containsCoincidence(segment)) {
+ return;
+ }
+ const SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ const SkOpSpan* span;
+ const SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpSpanBase* spanEnd = fNext->contains(segment)->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF(!start->span()->upCastable(), this);
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ }
+ else if (flipped) {
+ span = base->prev();
+ FAIL_IF(!span, this);
+ }
+ else {
+ FAIL_IF(!base->upCastable(), this);
+ span = base->upCast();
+ }
+ log->record(SkPathOpsDebug::kMarkCoinInsert_Glitch, span);
+ return;
+ }
+ }
+#if DEBUG_COIN
+ log->record(SkPathOpsDebug::kMarkCoinMissing_Glitch, segment, this);
+#endif
+ return;
+}
+#endif
+
+// called only by test code
+int SkIntersections::debugCoincidentUsed() const {
+ if (!fIsCoincident[0]) {
+ SkASSERT(!fIsCoincident[1]);
+ return 0;
+ }
+ int count = 0;
+ SkDEBUGCODE(int count2 = 0;)
+ for (int index = 0; index < fUsed; ++index) {
+ if (fIsCoincident[0] & (1 << index)) {
+ ++count;
+ }
+#ifdef SK_DEBUG
+ if (fIsCoincident[1] & (1 << index)) {
+ ++count2;
+ }
+#endif
+ }
+ SkASSERT(count == count2);
+ return count;
+}
+
+#include "src/pathops/SkOpContour.h"
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpPtT::debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const {
+ SkDEBUGCODE(const SkOpPtT* oldNext = this->fNext);
+ SkASSERT(this != opp);
+// this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+// oppPrev->fNext = oldNext;
+}
+
+bool SkOpPtT::debugContains(const SkOpPtT* check) const {
+ SkASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT == check) {
+ return true;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return false;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugContains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT->segment() == check) {
+ return ptT;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return nullptr;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugEnder(const SkOpPtT* end) const {
+ return fT < end->fT ? end : this;
+}
+
+int SkOpPtT::debugLoopLimit(bool report) const {
+ int loop = 0;
+ const SkOpPtT* next = this;
+ do {
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpPtT* checkPtT = this->fNext;
+ const SkOpPtT* innerPtT = checkPtT;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerPtT = innerPtT->fNext;
+ if (checkPtT == innerPtT) {
+ if (report) {
+ SkDebugf("*** bad ptT loop ***\n");
+ }
+ return loop;
+ }
+ }
+ }
+ // there's nothing wrong with extremely large loop counts -- but this may appear to hang
+ // by taking a very long time to figure out that no loop entry is a duplicate
+ // -- and it's likely that a large loop count is indicative of a bug somewhere
+ if (++loop > 1000) {
+ SkDebugf("*** loop count exceeds 1000 ***\n");
+ return 1000;
+ }
+ } while ((next = next->fNext) && next != this);
+ return 0;
+}
+
+const SkOpPtT* SkOpPtT::debugOppPrev(const SkOpPtT* opp) const {
+ return this->oppPrev(const_cast<SkOpPtT*>(opp));
+}
+
+void SkOpPtT::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugResetCoinT();
+#endif
+}
+
+void SkOpPtT::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugSetCoinT(index, fT);
+#endif
+}
+
+void SkOpPtT::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ SkOpPhase phase = contour()->globalState()->phase();
+ if (phase == SkOpPhase::kIntersecting || phase == SkOpPhase::kFixWinding) {
+ return;
+ }
+ SkASSERT(fNext);
+ SkASSERT(fNext != this);
+ SkASSERT(fNext->fNext);
+ SkASSERT(debugLoopLimit(false) == 0);
+#endif
+}
+
+static void output_scalar(SkScalar num) {
+ if (num == (int) num) {
+ SkDebugf("%d", (int) num);
+ } else {
+ SkString str;
+ str.printf("%1.9g", num);
+ int width = (int) str.size();
+ const char* cStr = str.c_str();
+ while (cStr[width - 1] == '0') {
+ --width;
+ }
+ str.resize(width);
+ SkDebugf("%sf", str.c_str());
+ }
+}
+
+static void output_points(const SkPoint* pts, int count) {
+ for (int index = 0; index < count; ++index) {
+ output_scalar(pts[index].fX);
+ SkDebugf(", ");
+ output_scalar(pts[index].fY);
+ if (index + 1 < count) {
+ SkDebugf(", ");
+ }
+ }
+}
+
+static void showPathContours(SkPath::RawIter& iter, const char* pathName) {
+ uint8_t verb;
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ SkDebugf(" %s.moveTo(", pathName);
+ output_points(&pts[0], 1);
+ SkDebugf(");\n");
+ continue;
+ case SkPath::kLine_Verb:
+ SkDebugf(" %s.lineTo(", pathName);
+ output_points(&pts[1], 1);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kQuad_Verb:
+ SkDebugf(" %s.quadTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kConic_Verb:
+ SkDebugf(" %s.conicTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(", %1.9gf);\n", iter.conicWeight());
+ break;
+ case SkPath::kCubic_Verb:
+ SkDebugf(" %s.cubicTo(", pathName);
+ output_points(&pts[1], 3);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kClose_Verb:
+ SkDebugf(" %s.close();\n", pathName);
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return;
+ }
+ }
+}
+
+static const char* gFillTypeStr[] = {
+ "kWinding_FillType",
+ "kEvenOdd_FillType",
+ "kInverseWinding_FillType",
+ "kInverseEvenOdd_FillType"
+};
+
+void SkPathOpsDebug::ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration) {
+ SkPath::RawIter iter(path);
+#define SUPPORT_RECT_CONTOUR_DETECTION 0
+#if SUPPORT_RECT_CONTOUR_DETECTION
+ int rectCount = path.isRectContours() ? path.rectContours(nullptr, nullptr) : 0;
+ if (rectCount > 0) {
+ SkTDArray<SkRect> rects;
+ SkTDArray<SkPath::Direction> directions;
+ rects.setCount(rectCount);
+ directions.setCount(rectCount);
+ path.rectContours(rects.begin(), directions.begin());
+ for (int contour = 0; contour < rectCount; ++contour) {
+ const SkRect& rect = rects[contour];
+ SkDebugf("path.addRect(%1.9g, %1.9g, %1.9g, %1.9g, %s);\n", rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, directions[contour] == SkPath::kCCW_Direction
+ ? "SkPath::kCCW_Direction" : "SkPath::kCW_Direction");
+ }
+ return;
+ }
+#endif
+ SkPath::FillType fillType = path.getFillType();
+ SkASSERT(fillType >= SkPath::kWinding_FillType && fillType <= SkPath::kInverseEvenOdd_FillType);
+ if (includeDeclaration) {
+ SkDebugf(" SkPath %s;\n", name);
+ }
+ SkDebugf(" %s.setFillType(SkPath::%s);\n", name, gFillTypeStr[fillType]);
+ iter.setPath(path);
+ showPathContours(iter, name);
+}
+
+#if DEBUG_DUMP_VERIFY
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+
+static void dump_path(FILE* file, const SkPath& path, bool force, bool dumpAsHex) {
+ SkDynamicMemoryWStream wStream;
+ path.dump(&wStream, force, dumpAsHex);
+ sk_sp<SkData> data(wStream.detachAsData());
+ fprintf(file, "%.*s\n", (int) data->size(), (char*) data->data());
+}
+
+static int dumpID = 0;
+
+void SkPathOpsDebug::DumpOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName) {
+ FILE* file = sk_fopen("op_dump.txt", kWrite_SkFILE_Flag);
+ DumpOp(file, one, two, op, testName);
+}
+
+void SkPathOpsDebug::DumpOp(FILE* file, const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName) {
+ const char* name = testName ? testName : "op";
+ fprintf(file,
+ "\nstatic void %s_%d(skiatest::Reporter* reporter, const char* filename) {\n",
+ name, ++dumpID);
+ fprintf(file, " SkPath path;\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", one.getFillType());
+ dump_path(file, one, false, true);
+ fprintf(file, " SkPath path1(path);\n");
+ fprintf(file, " path.reset();\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", two.getFillType());
+ dump_path(file, two, false, true);
+ fprintf(file, " SkPath path2(path);\n");
+ fprintf(file, " testPathOp(reporter, path1, path2, (SkPathOp) %d, filename);\n", op);
+ fprintf(file, "}\n\n");
+ fclose(file);
+}
+
+void SkPathOpsDebug::DumpSimplify(const SkPath& path, const char* testName) {
+ FILE* file = sk_fopen("simplify_dump.txt", kWrite_SkFILE_Flag);
+ DumpSimplify(file, path, testName);
+}
+
+void SkPathOpsDebug::DumpSimplify(FILE* file, const SkPath& path, const char* testName) {
+ const char* name = testName ? testName : "simplify";
+ fprintf(file,
+ "\nstatic void %s_%d(skiatest::Reporter* reporter, const char* filename) {\n",
+ name, ++dumpID);
+ fprintf(file, " SkPath path;\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", path.getFillType());
+ dump_path(file, path, false, true);
+ fprintf(file, " testSimplify(reporter, path, filename);\n");
+ fprintf(file, "}\n\n");
+ fclose(file);
+}
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+
+const int bitWidth = 64;
+const int bitHeight = 64;
+
+static void debug_scale_matrix(const SkPath& one, const SkPath* two, SkMatrix& scale) {
+ SkRect larger = one.getBounds();
+ if (two) {
+ larger.join(two->getBounds());
+ }
+ SkScalar largerWidth = larger.width();
+ if (largerWidth < 4) {
+ largerWidth = 4;
+ }
+ SkScalar largerHeight = larger.height();
+ if (largerHeight < 4) {
+ largerHeight = 4;
+ }
+ SkScalar hScale = (bitWidth - 2) / largerWidth;
+ SkScalar vScale = (bitHeight - 2) / largerHeight;
+ scale.reset();
+ scale.preScale(hScale, vScale);
+ larger.fLeft *= hScale;
+ larger.fRight *= hScale;
+ larger.fTop *= vScale;
+ larger.fBottom *= vScale;
+ SkScalar dx = -16000 > larger.fLeft ? -16000 - larger.fLeft
+ : 16000 < larger.fRight ? 16000 - larger.fRight : 0;
+ SkScalar dy = -16000 > larger.fTop ? -16000 - larger.fTop
+ : 16000 < larger.fBottom ? 16000 - larger.fBottom : 0;
+ scale.preTranslate(dx, dy);
+}
+
+static int debug_paths_draw_the_same(const SkPath& one, const SkPath& two, SkBitmap& bits) {
+ if (bits.width() == 0) {
+ bits.allocN32Pixels(bitWidth * 2, bitHeight);
+ }
+ SkCanvas canvas(bits);
+ canvas.drawColor(SK_ColorWHITE);
+ SkPaint paint;
+ canvas.save();
+ const SkRect& bounds1 = one.getBounds();
+ canvas.translate(-bounds1.fLeft + 1, -bounds1.fTop + 1);
+ canvas.drawPath(one, paint);
+ canvas.restore();
+ canvas.save();
+ canvas.translate(-bounds1.fLeft + 1 + bitWidth, -bounds1.fTop + 1);
+ canvas.drawPath(two, paint);
+ canvas.restore();
+ int errors = 0;
+ for (int y = 0; y < bitHeight - 1; ++y) {
+ uint32_t* addr1 = bits.getAddr32(0, y);
+ uint32_t* addr2 = bits.getAddr32(0, y + 1);
+ uint32_t* addr3 = bits.getAddr32(bitWidth, y);
+ uint32_t* addr4 = bits.getAddr32(bitWidth, y + 1);
+ for (int x = 0; x < bitWidth - 1; ++x) {
+ // count 2x2 blocks
+ bool err = addr1[x] != addr3[x];
+ if (err) {
+ errors += addr1[x + 1] != addr3[x + 1]
+ && addr2[x] != addr4[x] && addr2[x + 1] != addr4[x + 1];
+ }
+ }
+ }
+ return errors;
+}
+
+void SkPathOpsDebug::ReportOpFail(const SkPath& one, const SkPath& two, SkPathOp op) {
+ SkDebugf("// Op did not expect failure\n");
+ DumpOp(stderr, one, two, op, "opTest");
+ fflush(stderr);
+}
+
+void SkPathOpsDebug::VerifyOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const SkPath& result) {
+ SkPath pathOut, scaledPathOut;
+ SkRegion rgnA, rgnB, openClip, rgnOut;
+ openClip.setRect(-16000, -16000, 16000, 16000);
+ rgnA.setPath(one, openClip);
+ rgnB.setPath(two, openClip);
+ rgnOut.op(rgnA, rgnB, (SkRegion::Op) op);
+ rgnOut.getBoundaryPath(&pathOut);
+ SkMatrix scale;
+ debug_scale_matrix(one, &two, scale);
+ SkRegion scaledRgnA, scaledRgnB, scaledRgnOut;
+ SkPath scaledA, scaledB;
+ scaledA.addPath(one, scale);
+ scaledA.setFillType(one.getFillType());
+ scaledB.addPath(two, scale);
+ scaledB.setFillType(two.getFillType());
+ scaledRgnA.setPath(scaledA, openClip);
+ scaledRgnB.setPath(scaledB, openClip);
+ scaledRgnOut.op(scaledRgnA, scaledRgnB, (SkRegion::Op) op);
+ scaledRgnOut.getBoundaryPath(&scaledPathOut);
+ SkBitmap bitmap;
+ SkPath scaledOut;
+ scaledOut.addPath(result, scale);
+ scaledOut.setFillType(result.getFillType());
+ int errors = debug_paths_draw_the_same(scaledPathOut, scaledOut, bitmap);
+ const int MAX_ERRORS = 9;
+ if (errors > MAX_ERRORS) {
+ fprintf(stderr, "// Op did not expect errors=%d\n", errors);
+ DumpOp(stderr, one, two, op, "opTest");
+ fflush(stderr);
+ }
+}
+
+void SkPathOpsDebug::ReportSimplifyFail(const SkPath& path) {
+ SkDebugf("// Simplify did not expect failure\n");
+ DumpSimplify(stderr, path, "simplifyTest");
+ fflush(stderr);
+}
+
+void SkPathOpsDebug::VerifySimplify(const SkPath& path, const SkPath& result) {
+ SkPath pathOut, scaledPathOut;
+ SkRegion rgnA, openClip, rgnOut;
+ openClip.setRect(-16000, -16000, 16000, 16000);
+ rgnA.setPath(path, openClip);
+ rgnOut.getBoundaryPath(&pathOut);
+ SkMatrix scale;
+ debug_scale_matrix(path, nullptr, scale);
+ SkRegion scaledRgnA;
+ SkPath scaledA;
+ scaledA.addPath(path, scale);
+ scaledA.setFillType(path.getFillType());
+ scaledRgnA.setPath(scaledA, openClip);
+ scaledRgnA.getBoundaryPath(&scaledPathOut);
+ SkBitmap bitmap;
+ SkPath scaledOut;
+ scaledOut.addPath(result, scale);
+ scaledOut.setFillType(result.getFillType());
+ int errors = debug_paths_draw_the_same(scaledPathOut, scaledOut, bitmap);
+ const int MAX_ERRORS = 9;
+ if (errors > MAX_ERRORS) {
+ fprintf(stderr, "// Simplify did not expect errors=%d\n", errors);
+ DumpSimplify(stderr, path, "simplifyTest");
+ fflush(stderr);
+ }
+}
+
+#endif
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkPath& path) {
+ path.dump();
+}
+
+void DumpHex(const SkPath& path) {
+ path.dumpHex();
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.h b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
new file mode 100644
index 0000000000..dca0102443
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsDebug_DEFINED
+#define SkPathOpsDebug_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+enum class SkOpPhase : char;
+struct SkDQuad;
+class SkOpAngle;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpContourHead;
+class SkOpPtT;
+class SkOpSegment;
+class SkOpSpan;
+class SkOpSpanBase;
+struct SkDPoint;
+struct SkDLine;
+struct SkDQuad;
+struct SkDConic;
+struct SkDCubic;
+class SkTSect;
+
+// define this when running fuzz
+// #define IS_FUZZING_WITH_LIBFUZZER
+
+// dummy classes to fool msvs Visual Studio 2018 Immediate Window
+#define DummyClasses(a, b) \
+class SkDebugTCoincident##a##b; \
+class SkDebugTSect##a##b; \
+class SkDebugTSpan##a##b
+
+DummyClasses(Quad, Quad);
+DummyClasses(Conic, Quad);
+DummyClasses(Conic, Conic);
+DummyClasses(Cubic, Quad);
+DummyClasses(Cubic, Conic);
+DummyClasses(Cubic, Cubic);
+
+#undef DummyClasses
+
+#ifdef SK_RELEASE
+#define FORCE_RELEASE 1
+#else
+#define FORCE_RELEASE 1 // set force release to 1 for multiple thread -- no debugging
+#endif
+
+#define DEBUG_UNDER_DEVELOPMENT 0
+
+#define ONE_OFF_DEBUG 0
+#define ONE_OFF_DEBUG_MATHEMATICA 0
+
+#if defined(SK_BUILD_FOR_WIN) || defined(SK_BUILD_FOR_ANDROID)
+ #define SK_RAND(seed) rand()
+#else
+ #define SK_RAND(seed) rand_r(&seed)
+#endif
+#ifdef SK_BUILD_FOR_WIN
+ #define SK_SNPRINTF _snprintf
+#else
+ #define SK_SNPRINTF snprintf
+#endif
+
+#define WIND_AS_STRING(x) char x##Str[12]; \
+ if (!SkPathOpsDebug::ValidWind(x)) strcpy(x##Str, "?"); \
+ else SK_SNPRINTF(x##Str, sizeof(x##Str), "%d", x)
+
+#if FORCE_RELEASE
+
+#define DEBUG_ACTIVE_OP 0
+#define DEBUG_ACTIVE_SPANS 0
+#define DEBUG_ADD_INTERSECTING_TS 0
+#define DEBUG_ADD_T 0
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 0
+#define DEBUG_ASSEMBLE 0
+#define DEBUG_COINCIDENCE 0 // sanity checking
+#define DEBUG_COINCIDENCE_DUMP 0 // accumulate and dump which algorithms fired
+#define DEBUG_COINCIDENCE_ORDER 0 // for well behaved curves, check if pairs match up in t-order
+#define DEBUG_COINCIDENCE_VERBOSE 0 // usually whether the next function generates coincidence
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 0
+#define DEBUG_DUMP_SEGMENTS 0
+#define DEBUG_DUMP_VERIFY 0
+#define DEBUG_FLOW 0
+#define DEBUG_LIMIT_WIND_SUM 0
+#define DEBUG_MARK_DONE 0
+#define DEBUG_PATH_CONSTRUCTION 0
+#define DEBUG_PERP 0
+#define DEBUG_SHOW_TEST_NAME 0
+#define DEBUG_SORT 0
+#define DEBUG_T_SECT 0
+#define DEBUG_T_SECT_DUMP 0
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 0
+#define DEBUG_WINDING 0
+#define DEBUG_WINDING_AT_T 0
+
+#else
+
+#define DEBUG_ACTIVE_OP 1
+#define DEBUG_ACTIVE_SPANS 1
+#define DEBUG_ADD_INTERSECTING_TS 1
+#define DEBUG_ADD_T 1
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 1
+#define DEBUG_ASSEMBLE 1
+#define DEBUG_COINCIDENCE 1
+#define DEBUG_COINCIDENCE_DUMP 0
+#define DEBUG_COINCIDENCE_ORDER 0 // tight arc quads may generate out-of-order coincidence spans
+#define DEBUG_COINCIDENCE_VERBOSE 1
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 1
+#define DEBUG_DUMP_VERIFY 0
+#define DEBUG_DUMP_SEGMENTS 1
+#define DEBUG_FLOW 1
+#define DEBUG_LIMIT_WIND_SUM 15
+#define DEBUG_MARK_DONE 1
+#define DEBUG_PATH_CONSTRUCTION 1
+#define DEBUG_PERP 1
+#define DEBUG_SHOW_TEST_NAME 1
+#define DEBUG_SORT 1
+#define DEBUG_T_SECT 0 // enabling may trigger validate asserts even though op does not fail
+#define DEBUG_T_SECT_DUMP 0 // Use 1 normally. Use 2 to number segments, 3 for script output
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 1
+#define DEBUG_WINDING 1
+#define DEBUG_WINDING_AT_T 1
+
+#endif
+
+#ifdef SK_RELEASE
+ #define SkDEBUGRELEASE(a, b) b
+ #define SkDEBUGPARAMS(...)
+#else
+ #define SkDEBUGRELEASE(a, b) a
+ #define SkDEBUGPARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_VALIDATE == 0
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...)
+#else
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT == 0
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) b
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...)
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...)
+#else
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) a
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...) , __VA_ARGS__
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...) __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT_DUMP > 1
+ extern int gDumpTSectNum;
+#endif
+
+#if DEBUG_COINCIDENCE || DEBUG_COINCIDENCE_DUMP
+ #define DEBUG_COIN 1
+#else
+ #define DEBUG_COIN 0
+#endif
+
+#if DEBUG_COIN
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ int lineNo, SkOpPhase phase, int iteration
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ __LINE__, SkOpPhase::kNoChange, 0
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ __LINE__, SkOpPhase::kNoChange, iteration
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ __LINE__, SkOpPhase::phase, 0
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+#elif DEBUG_VALIDATE
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ SkOpPhase phase
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ SkOpPhase::kNoChange
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ SkOpPhase::kNoChange
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ SkOpPhase::phase
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(phase)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(phase)
+#else
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_DECLARE_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_COIN_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_ITER_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_PHASE_PARAMS(phase)
+ #define DEBUG_SET_PHASE()
+ #define DEBUG_STATIC_SET_PHASE(obj)
+#endif
+
+#define CUBIC_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define CONIC_DEBUG_STR "{{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}, %1.9g}"
+#define QUAD_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define LINE_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define PT_DEBUG_STR "{{%1.9g,%1.9g}}"
+
+#define T_DEBUG_STR(t, n) #t "[" #n "]=%1.9g"
+#define TX_DEBUG_STR(t) #t "[%d]=%1.9g"
+#define CUBIC_DEBUG_DATA(c) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, c[3].fX, c[3].fY
+#define CONIC_DEBUG_DATA(c, w) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, w
+#define QUAD_DEBUG_DATA(q) q[0].fX, q[0].fY, q[1].fX, q[1].fY, q[2].fX, q[2].fY
+#define LINE_DEBUG_DATA(l) l[0].fX, l[0].fY, l[1].fX, l[1].fY
+#define PT_DEBUG_DATA(i, n) i.pt(n).asSkPoint().fX, i.pt(n).asSkPoint().fY
+
+#ifndef DEBUG_TEST
+#define DEBUG_TEST 0
+#endif
+
+#if DEBUG_SHOW_TEST_NAME
+#include "src/core/SkTLS.h"
+#endif
+
+// Tests with extreme numbers may fail, but all other tests should never fail.
+#define FAIL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return false; } while (false)
+
+#define FAIL_WITH_NULL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return nullptr; } while (false)
+
+// Some functions serve two masters: one allows the function to fail, the other expects success
+// always. If abort is true, tests with normal numbers may not fail and assert if they do so.
+// If abort is false, both normal and extreme numbers may return false without asserting.
+#define RETURN_FALSE_IF(abort, cond) \
+ do { bool fail = (cond); SkOPASSERT(!(abort) || !fail); if (fail) return false; \
+ } while (false)
+
+class SkPathOpsDebug {
+public:
+#if DEBUG_COIN
+ struct GlitchLog;
+
+ enum GlitchType {
+ kUninitialized_Glitch,
+ kAddCorruptCoin_Glitch,
+ kAddExpandedCoin_Glitch,
+ kAddExpandedFail_Glitch,
+ kAddIfCollapsed_Glitch,
+ kAddIfMissingCoin_Glitch,
+ kAddMissingCoin_Glitch,
+ kAddMissingExtend_Glitch,
+ kAddOrOverlap_Glitch,
+ kCollapsedCoin_Glitch,
+ kCollapsedDone_Glitch,
+ kCollapsedOppValue_Glitch,
+ kCollapsedSpan_Glitch,
+ kCollapsedWindValue_Glitch,
+ kCorrectEnd_Glitch,
+ kDeletedCoin_Glitch,
+ kExpandCoin_Glitch,
+ kFail_Glitch,
+ kMarkCoinEnd_Glitch,
+ kMarkCoinInsert_Glitch,
+ kMarkCoinMissing_Glitch,
+ kMarkCoinStart_Glitch,
+ kMergeMatches_Glitch,
+ kMissingCoin_Glitch,
+ kMissingDone_Glitch,
+ kMissingIntersection_Glitch,
+ kMoveMultiple_Glitch,
+ kMoveNearbyClearAll_Glitch,
+ kMoveNearbyClearAll2_Glitch,
+ kMoveNearbyMerge_Glitch,
+ kMoveNearbyMergeFinal_Glitch,
+ kMoveNearbyRelease_Glitch,
+ kMoveNearbyReleaseFinal_Glitch,
+ kReleasedSpan_Glitch,
+ kReturnFalse_Glitch,
+ kUnaligned_Glitch,
+ kUnalignedHead_Glitch,
+ kUnalignedTail_Glitch,
+ };
+
+ struct CoinDictEntry {
+ int fIteration;
+ int fLineNumber;
+ GlitchType fGlitchType;
+ const char* fFunctionName;
+ };
+
+ struct CoinDict {
+ void add(const CoinDictEntry& key);
+ void add(const CoinDict& dict);
+ void dump(const char* str, bool visitCheck) const;
+ SkTDArray<CoinDictEntry> fDict;
+ };
+
+ static CoinDict gCoinSumChangedDict;
+ static CoinDict gCoinSumVisitedDict;
+ static CoinDict gCoinVistedDict;
+#endif
+
+#if defined(SK_DEBUG) || !FORCE_RELEASE
+ static int gContourID;
+ static int gSegmentID;
+#endif
+
+#if DEBUG_SORT
+ static int gSortCountDefault;
+ static int gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+ static const char* kPathOpStr[];
+#endif
+ static bool gRunFail;
+ static bool gVeryVerbose;
+
+#if DEBUG_ACTIVE_SPANS
+ static SkString gActiveSpans;
+#endif
+#if DEBUG_DUMP_VERIFY
+ static bool gDumpOp;
+ static bool gVerifyOp;
+#endif
+
+ static const char* OpStr(SkPathOp );
+ static void MathematicaIze(char* str, size_t bufferSize);
+ static bool ValidWind(int winding);
+ static void WindingPrintf(int winding);
+
+#if DEBUG_SHOW_TEST_NAME
+ static void* CreateNameStr();
+ static void DeleteNameStr(void* v);
+#define DEBUG_FILENAME_STRING_LENGTH 64
+#define DEBUG_FILENAME_STRING (reinterpret_cast<char* >(SkTLS::Get(SkPathOpsDebug::CreateNameStr, \
+ SkPathOpsDebug::DeleteNameStr)))
+ static void BumpTestName(char* );
+#endif
+ static void ShowActiveSpans(SkOpContourHead* contourList);
+ static void ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration);
+ static void ShowPath(const SkPath& one, const SkPath& two, SkPathOp op, const char* name);
+
+ static bool ChaseContains(const SkTDArray<SkOpSpanBase*>& , const SkOpSpanBase* );
+
+ static void CheckHealth(class SkOpContourHead* contourList);
+
+#if DEBUG_COIN
+ static void DumpCoinDict();
+ static void DumpGlitchType(GlitchType );
+#endif
+
+};
+
+// Visual Studio 2017 does not permit calling member functions from the Immediate Window.
+// Global functions work fine, however. Use globals rather than static members inside a class.
+const SkOpAngle* AngleAngle(const SkOpAngle*, int id);
+SkOpContour* AngleContour(SkOpAngle*, int id);
+const SkOpPtT* AnglePtT(const SkOpAngle*, int id);
+const SkOpSegment* AngleSegment(const SkOpAngle*, int id);
+const SkOpSpanBase* AngleSpan(const SkOpAngle*, int id);
+
+const SkOpAngle* ContourAngle(SkOpContour*, int id);
+SkOpContour* ContourContour(SkOpContour*, int id);
+const SkOpPtT* ContourPtT(SkOpContour*, int id);
+const SkOpSegment* ContourSegment(SkOpContour*, int id);
+const SkOpSpanBase* ContourSpan(SkOpContour*, int id);
+
+const SkOpAngle* CoincidenceAngle(SkOpCoincidence*, int id);
+SkOpContour* CoincidenceContour(SkOpCoincidence*, int id);
+const SkOpPtT* CoincidencePtT(SkOpCoincidence*, int id);
+const SkOpSegment* CoincidenceSegment(SkOpCoincidence*, int id);
+const SkOpSpanBase* CoincidenceSpan(SkOpCoincidence*, int id);
+
+const SkOpAngle* PtTAngle(const SkOpPtT*, int id);
+SkOpContour* PtTContour(SkOpPtT*, int id);
+const SkOpPtT* PtTPtT(const SkOpPtT*, int id);
+const SkOpSegment* PtTSegment(const SkOpPtT*, int id);
+const SkOpSpanBase* PtTSpan(const SkOpPtT*, int id);
+
+const SkOpAngle* SegmentAngle(const SkOpSegment*, int id);
+SkOpContour* SegmentContour(SkOpSegment*, int id);
+const SkOpPtT* SegmentPtT(const SkOpSegment*, int id);
+const SkOpSegment* SegmentSegment(const SkOpSegment*, int id);
+const SkOpSpanBase* SegmentSpan(const SkOpSegment*, int id);
+
+const SkOpAngle* SpanAngle(const SkOpSpanBase*, int id);
+SkOpContour* SpanContour(SkOpSpanBase*, int id);
+const SkOpPtT* SpanPtT(const SkOpSpanBase*, int id);
+const SkOpSegment* SpanSegment(const SkOpSpanBase*, int id);
+const SkOpSpanBase* SpanSpan(const SkOpSpanBase*, int id);
+
+#if DEBUG_DUMP_VERIFY
+void DumpOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName);
+void DumpOp(FILE* file, const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName);
+void DumpSimplify(const SkPath& path, const char* testName);
+void DumpSimplify(FILE* file, const SkPath& path, const char* testName);
+void ReportOpFail(const SkPath& one, const SkPath& two, SkPathOp op);
+void ReportSimplifyFail(const SkPath& path);
+void VerifyOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const SkPath& result);
+void VerifySimplify(const SkPath& path, const SkPath& result);
+#endif
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkOpContour& );
+void DumpAll(const SkOpContour& );
+void DumpAngles(const SkOpContour& );
+void DumpContours(const SkOpContour& );
+void DumpContoursAll(const SkOpContour& );
+void DumpContoursAngles(const SkOpContour& );
+void DumpContoursPts(const SkOpContour& );
+void DumpContoursPt(const SkOpContour& , int segmentID);
+void DumpContoursSegment(const SkOpContour& , int segmentID);
+void DumpContoursSpan(const SkOpContour& , int segmentID);
+void DumpContoursSpans(const SkOpContour& );
+void DumpPt(const SkOpContour& , int );
+void DumpPts(const SkOpContour& , const char* prefix = "seg");
+void DumpSegment(const SkOpContour& , int );
+void DumpSegments(const SkOpContour& , const char* prefix = "seg", SkPathOp op = (SkPathOp) -1);
+void DumpSpan(const SkOpContour& , int );
+void DumpSpans(const SkOpContour& );
+
+void Dump(const SkOpSegment& );
+void DumpAll(const SkOpSegment& );
+void DumpAngles(const SkOpSegment& );
+void DumpCoin(const SkOpSegment& );
+void DumpPts(const SkOpSegment& , const char* prefix = "seg");
+
+void Dump(const SkOpPtT& );
+void DumpAll(const SkOpPtT& );
+
+void Dump(const SkOpSpanBase& );
+void DumpCoin(const SkOpSpanBase& );
+void DumpAll(const SkOpSpanBase& );
+
+void DumpCoin(const SkOpSpan& );
+bool DumpSpan(const SkOpSpan& );
+
+void Dump(const SkDConic& );
+void DumpID(const SkDConic& , int id);
+
+void Dump(const SkDCubic& );
+void DumpID(const SkDCubic& , int id);
+
+void Dump(const SkDLine& );
+void DumpID(const SkDLine& , int id);
+
+void Dump(const SkDQuad& );
+void DumpID(const SkDQuad& , int id);
+
+void Dump(const SkDPoint& );
+
+void Dump(const SkOpAngle& );
+
+// generates tools/path_sorter.htm and path_visualizer.htm compatible data
+void DumpQ(const SkDQuad& quad1, const SkDQuad& quad2, int testNo);
+void DumpT(const SkDQuad& quad, double t);
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkPath& path);
+void DumpHex(const SkPath& path);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
new file mode 100644
index 0000000000..9003547036
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsLine.h"
+
+SkDPoint SkDLine::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[1];
+ }
+ double one_t = 1 - t;
+ SkDPoint result = { one_t * fPts[0].fX + t * fPts[1].fX, one_t * fPts[0].fY + t * fPts[1].fY };
+ return result;
+}
+
+double SkDLine::exactPoint(const SkDPoint& xy) const {
+ if (xy == fPts[0]) { // do cheapest test first
+ return 0;
+ }
+ if (xy == fPts[1]) {
+ return 1;
+ }
+ return -1;
+}
+
+double SkDLine::nearPoint(const SkDPoint& xy, bool* unequal) const {
+ if (!AlmostBetweenUlps(fPts[0].fX, xy.fX, fPts[1].fX)
+ || !AlmostBetweenUlps(fPts[0].fY, xy.fY, fPts[1].fY)) {
+ return -1;
+ }
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ if (!between(0, numer, denom)) {
+ return -1;
+ }
+ if (!denom) {
+ return 0;
+ }
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps_Pin(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ if (unequal) {
+ *unequal = (float) largest != (float) (largest + dist);
+ }
+ t = SkPinT(t); // a looser pin breaks skpwww_lptemp_com_3
+ SkASSERT(between(0, t, 1));
+ return t;
+}
+
+bool SkDLine::nearRay(const SkDPoint& xy) const {
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+}
+
+double SkDLine::ExactPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (xy.fY == y) {
+ if (xy.fX == left) {
+ return 0;
+ }
+ if (xy.fX == right) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (!AlmostBequalUlps(xy.fY, y)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(left, xy.fX, right)) {
+ return -1;
+ }
+ double t = (xy.fX - left) / (right - left);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtX = (1 - t) * left + t * right;
+ SkDVector distU = {xy.fY - y, xy.fX - realPtX};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(y, left), right);
+ double largest = SkTMax(SkTMax(y, left), right);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
+
+double SkDLine::ExactPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (xy.fX == x) {
+ if (xy.fY == top) {
+ return 0;
+ }
+ if (xy.fY == bottom) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (!AlmostBequalUlps(xy.fX, x)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(top, xy.fY, bottom)) {
+ return -1;
+ }
+ double t = (xy.fY - top) / (bottom - top);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtY = (1 - t) * top + t * bottom;
+ SkDVector distU = {xy.fX - x, xy.fY - realPtY};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(x, top), bottom);
+ double largest = SkTMax(SkTMax(x, top), bottom);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.h b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
new file mode 100644
index 0000000000..0c6c7ea6e4
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsLine_DEFINED
+#define SkPathOpsLine_DEFINED
+
+#include "src/pathops/SkPathOpsPoint.h"
+
+struct SkDLine {
+ SkDPoint fPts[2];
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+
+ const SkDLine& set(const SkPoint pts[2]) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ return *this;
+ }
+
+ double exactPoint(const SkDPoint& xy) const;
+ static double ExactPointH(const SkDPoint& xy, double left, double right, double y);
+ static double ExactPointV(const SkDPoint& xy, double top, double bottom, double x);
+
+ double nearPoint(const SkDPoint& xy, bool* unequal) const;
+ bool nearRay(const SkDPoint& xy) const;
+ static double NearPointH(const SkDPoint& xy, double left, double right, double y);
+ static double NearPointV(const SkDPoint& xy, double top, double bottom, double x);
+ SkDPoint ptAtT(double t) const;
+
+ void dump() const;
+ void dumpID(int ) const;
+ void dumpInner() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
new file mode 100644
index 0000000000..825ae7b197
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
@@ -0,0 +1,382 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <utility>
+
+static bool findChaseOp(SkTDArray<SkOpSpanBase*>& chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, SkOpSegment** result) {
+ while (chase.count()) {
+ SkOpSpanBase* span;
+ chase.pop(&span);
+ // OPTIMIZE: prev makes this compatible with old code -- but is it necessary?
+ *startPtr = span->ptT()->prev()->span();
+ SkOpSegment* segment = (*startPtr)->segment();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ *result = last->segment();
+ return true;
+ }
+ if (done) {
+ continue;
+ }
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ *result = nullptr;
+ return true;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumMiWinding, sumSuWinding;
+ if (sortable) {
+ segment = angle->segment();
+ sumMiWinding = segment->updateWindingReverse(angle);
+ if (sumMiWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ *result = nullptr;
+ return true;
+ }
+ sumSuWinding = segment->updateOppWindingReverse(angle);
+ if (sumSuWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ *result = nullptr;
+ return true;
+ }
+ if (segment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding = 0, sumWinding = 0, oppMaxWinding = 0, oppSumWinding = 0;
+ if (sortable) {
+ segment->setUpWindings(start, end, &sumMiWinding, &sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ if (!segment->markAngle(maxWinding, sumWinding, oppMaxWinding,
+ oppSumWinding, angle, nullptr)) {
+ return false;
+ }
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ *result = first;
+ return true;
+ }
+ }
+ *result = nullptr;
+ return true;
+}
+
+static bool bridgeOp(SkOpContourHead* contourList, const SkPathOp op,
+ const int xorMask, const int xorOpMask, SkPathWriter* writer) {
+ bool unsortable = false;
+ bool lastSimple = false;
+ bool simple = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeOp(start, end, xorMask, xorOpMask, op)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ lastSimple = simple;
+ SkOpSegment* next = current->findNextOp(&chase, &nextStart, &nextEnd,
+ &unsortable, &simple, op, xorMask, xorOpMask);
+ if (!next) {
+ if (!unsortable && writer->hasMove()
+ && current->verb() != SkPath::kLine_Verb
+ && !writer->isClosed()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ if (!writer->isClosed()) {
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ } else if (lastSimple) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ }
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ writer->finishContour();
+ } else {
+ SkOpSpanBase* last;
+ if (!current->markAndChaseDone(start, end, &last)) {
+ return false;
+ }
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ if (!findChaseOp(chase, &start, &end, &current)) {
+ return false;
+ }
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// diagram of why this simplifcation is possible is here:
+// https://skia.org/dev/present/pathops link at bottom of the page
+// https://drive.google.com/file/d/0BwoLUwz9PYkHLWpsaXd0UDdaN00/view?usp=sharing
+static const SkPathOp gOpInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+// inside minuend outside minuend
+// inside subtrahend outside subtrahend inside subtrahend outside subtrahend
+{{ kDifference_SkPathOp, kIntersect_SkPathOp }, { kUnion_SkPathOp, kReverseDifference_SkPathOp }},
+{{ kIntersect_SkPathOp, kDifference_SkPathOp }, { kReverseDifference_SkPathOp, kUnion_SkPathOp }},
+{{ kUnion_SkPathOp, kReverseDifference_SkPathOp }, { kDifference_SkPathOp, kIntersect_SkPathOp }},
+{{ kXOR_SkPathOp, kXOR_SkPathOp }, { kXOR_SkPathOp, kXOR_SkPathOp }},
+{{ kReverseDifference_SkPathOp, kUnion_SkPathOp }, { kIntersect_SkPathOp, kDifference_SkPathOp }},
+};
+
+static const bool gOutInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+ {{ false, false }, { true, false }}, // diff
+ {{ false, false }, { false, true }}, // sect
+ {{ false, true }, { true, true }}, // union
+ {{ false, true }, { true, false }}, // xor
+ {{ false, true }, { false, false }}, // rev diff
+};
+
+#if DEBUG_T_SECT_LOOP_COUNT
+
+#include "include/private/SkMutex.h"
+
+SkOpGlobalState debugWorstState(nullptr, nullptr SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr));
+
+void ReportPathOpsDebugging() {
+ debugWorstState.debugLoopReport();
+}
+
+extern void (*gVerboseFinalize)();
+
+#endif
+
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+#if DEBUG_DUMP_VERIFY
+#ifndef SK_DEBUG
+ const char* testName = "release";
+#endif
+ if (SkPathOpsDebug::gDumpOp) {
+ SkPathOpsDebug::DumpOp(one, two, op, testName);
+ }
+#endif
+ op = gOpInverse[op][one.isInverseFillType()][two.isInverseFillType()];
+ bool inverseFill = gOutInverse[op][one.isInverseFillType()][two.isInverseFillType()];
+ SkPath::FillType fillType = inverseFill ? SkPath::kInverseEvenOdd_FillType :
+ SkPath::kEvenOdd_FillType;
+ SkRect rect1, rect2;
+ if (kIntersect_SkPathOp == op && one.isRect(&rect1) && two.isRect(&rect2)) {
+ result->reset();
+ result->setFillType(fillType);
+ if (rect1.intersect(rect2)) {
+ result->addRect(rect1);
+ }
+ return true;
+ }
+ if (one.isEmpty() || two.isEmpty()) {
+ SkPath work;
+ switch (op) {
+ case kIntersect_SkPathOp:
+ break;
+ case kUnion_SkPathOp:
+ case kXOR_SkPathOp:
+ work = one.isEmpty() ? two : one;
+ break;
+ case kDifference_SkPathOp:
+ if (!one.isEmpty()) {
+ work = one;
+ }
+ break;
+ case kReverseDifference_SkPathOp:
+ if (!two.isEmpty()) {
+ work = two;
+ }
+ break;
+ default:
+ SkASSERT(0); // unhandled case
+ }
+ if (inverseFill != work.isInverseFillType()) {
+ work.toggleInverseFillType();
+ }
+ return Simplify(work, result);
+ }
+ SkSTArenaAlloc<4096> allocator; // FIXME: add a constant expression here, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+ const SkPath* minuend = &one;
+ const SkPath* subtrahend = &two;
+ if (op == kReverseDifference_SkPathOp) {
+ using std::swap;
+ swap(minuend, subtrahend);
+ op = kDifference_SkPathOp;
+ }
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ // turn path into list of segments
+ SkOpEdgeBuilder builder(*minuend, contourList, &globalState);
+ if (builder.unparseable()) {
+ return false;
+ }
+ const int xorMask = builder.xorMask();
+ builder.addOperand(*subtrahend);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contourList->dumpSegments("seg", op);
+#endif
+
+ const int xorOpMask = builder.xorMask();
+ if (!SortContourList(&contourList, xorMask == kEvenOdd_PathOpsMask,
+ xorOpMask == kEvenOdd_PathOpsMask)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()))
+ ;
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_ALIGNMENT
+ contourList->dumpSegments("aligned");
+#endif
+ // construct closed contours
+ SkPath original = *result;
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (!bridgeOp(contourList, op, xorMask, xorOpMask, &wrapper)) {
+ *result = original;
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+#if DEBUG_T_SECT_LOOP_COUNT
+ static SkMutex& debugWorstLoop = *(new SkMutex);
+ {
+ SkAutoMutexExclusive autoM(debugWorstLoop);
+ if (!gVerboseFinalize) {
+ gVerboseFinalize = &ReportPathOpsDebugging;
+ }
+ debugWorstState.debugDoYourWorst(&globalState);
+ }
+#endif
+ return true;
+}
+
+bool Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result) {
+#if DEBUG_DUMP_VERIFY
+ if (SkPathOpsDebug::gVerifyOp) {
+ if (!OpDebug(one, two, op, result SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr))) {
+ SkPathOpsDebug::ReportOpFail(one, two, op);
+ return false;
+ }
+ SkPathOpsDebug::VerifyOp(one, two, op, *result);
+ return true;
+ }
+#endif
+ return OpDebug(one, two, op, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsPoint.h b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
new file mode 100644
index 0000000000..bca0530fed
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsPoint_DEFINED
+#define SkPathOpsPoint_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+inline bool AlmostEqualUlps(const SkPoint& pt1, const SkPoint& pt2) {
+ return AlmostEqualUlps(pt1.fX, pt2.fX) && AlmostEqualUlps(pt1.fY, pt2.fY);
+}
+
+struct SkDVector {
+ double fX;
+ double fY;
+
+ SkDVector& set(const SkVector& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ return *this;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only called by nearestT, which is currently only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ void operator/=(const double s) {
+ fX /= s;
+ fY /= s;
+ }
+
+ // only used by testing
+ void operator*=(const double s) {
+ fX *= s;
+ fY *= s;
+ }
+
+ SkVector asSkVector() const {
+ SkVector v = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return v;
+ }
+
+ // only used by testing
+ double cross(const SkDVector& a) const {
+ return fX * a.fY - fY * a.fX;
+ }
+
+ // similar to cross, this bastardization considers nearly coincident to be zero
+ // uses ulps epsilon == 16
+ double crossCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlps(xy, yx) ? 0 : xy - yx;
+ }
+
+ // allow tinier numbers
+ double crossNoNormalCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlpsNoNormalCheck(xy, yx) ? 0 : xy - yx;
+ }
+
+ double dot(const SkDVector& a) const {
+ return fX * a.fX + fY * a.fY;
+ }
+
+ double length() const {
+ return sqrt(lengthSquared());
+ }
+
+ double lengthSquared() const {
+ return fX * fX + fY * fY;
+ }
+
+ SkDVector& normalize() {
+ double inverseLength = sk_ieee_double_divide(1, this->length());
+ fX *= inverseLength;
+ fY *= inverseLength;
+ return *this;
+ }
+
+ bool isFinite() const {
+ return std::isfinite(fX) && std::isfinite(fY);
+ }
+};
+
+struct SkDPoint {
+ double fX;
+ double fY;
+
+ void set(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ friend SkDVector operator-(const SkDPoint& a, const SkDPoint& b) {
+ return { a.fX - b.fX, a.fY - b.fY };
+ }
+
+ friend bool operator==(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ friend bool operator!=(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ void operator=(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ SkDPoint operator+(const SkDVector& v) {
+ SkDPoint result = *this;
+ result += v;
+ return result;
+ }
+
+ // only used by testing
+ SkDPoint operator-(const SkDVector& v) {
+ SkDPoint result = *this;
+ result -= v;
+ return result;
+ }
+
+ // note: this can not be implemented with
+ // return approximately_equal(a.fY, fY) && approximately_equal(a.fX, fX);
+ // because that will not take the magnitude of the values into account
+ bool approximatelyDEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostDequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyDEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyDEqual(dA);
+ }
+
+ bool approximatelyEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostPequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyEqual(dA);
+ }
+
+ static bool ApproximatelyEqual(const SkPoint& a, const SkPoint& b) {
+ if (approximately_equal(a.fX, b.fX) && approximately_equal(a.fY, b.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(a.fX, b.fX) || !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = SkTMin(SkTMin(SkTMin(a.fX, b.fX), a.fY), b.fY);
+ float largest = SkTMax(SkTMax(SkTMax(a.fX, b.fX), a.fY), b.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostDequalUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // only used by testing
+ bool approximatelyZero() const {
+ return approximately_zero(fX) && approximately_zero(fY);
+ }
+
+ SkPoint asSkPoint() const {
+ SkPoint pt = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return pt;
+ }
+
+ double distance(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.length();
+ }
+
+ double distanceSquared(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.lengthSquared();
+ }
+
+ static SkDPoint Mid(const SkDPoint& a, const SkDPoint& b) {
+ SkDPoint result;
+ result.fX = (a.fX + b.fX) / 2;
+ result.fY = (a.fY + b.fY) / 2;
+ return result;
+ }
+
+ bool roughlyEqual(const SkDPoint& a) const {
+ if (roughly_equal(fX, a.fX) && roughly_equal(fY, a.fY)) {
+ return true;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ static bool RoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ if (!RoughlyEqualUlps(a.fX, b.fX) && !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = SkTMin(SkTMin(SkTMin(a.fX, b.fX), a.fY), b.fY);
+ float largest = SkTMax(SkTMax(SkTMax(a.fX, b.fX), a.fY), b.fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // very light weight check, should only be used for inequality check
+ static bool WayRoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ float largestNumber = SkTMax(SkTAbs(a.fX), SkTMax(SkTAbs(a.fY),
+ SkTMax(SkTAbs(b.fX), SkTAbs(b.fY))));
+ SkVector diffs = a - b;
+ float largestDiff = SkTMax(diffs.fX, diffs.fY);
+ return roughly_zero_when_compared_to(largestDiff, largestNumber);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ static void Dump(const SkPoint& pt);
+ static void DumpHex(const SkPoint& pt);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
new file mode 100644
index 0000000000..45cd78b356
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+// from blackpawn.com/texts/pointinpoly
+static bool pointInTriangle(const SkDPoint fPts[3], const SkDPoint& test) {
+ SkDVector v0 = fPts[2] - fPts[0];
+ SkDVector v1 = fPts[1] - fPts[0];
+ SkDVector v2 = test - fPts[0];
+ double dot00 = v0.dot(v0);
+ double dot01 = v0.dot(v1);
+ double dot02 = v0.dot(v2);
+ double dot11 = v1.dot(v1);
+ double dot12 = v1.dot(v2);
+ // Compute barycentric coordinates
+ double denom = dot00 * dot11 - dot01 * dot01;
+ double u = dot11 * dot02 - dot01 * dot12;
+ double v = dot00 * dot12 - dot01 * dot02;
+ // Check if point is in triangle
+ if (denom >= 0) {
+ return u >= 0 && v >= 0 && u + v < denom;
+ }
+ return u <= 0 && v <= 0 && u + v > denom;
+}
+
+static bool matchesEnd(const SkDPoint fPts[3], const SkDPoint& test) {
+ return fPts[0] == test || fPts[2] == test;
+}
+
+/* started with at_most_end_pts_in_common from SkDQuadIntersection.cpp */
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one quad's points. If the 2nd quad's points
+// are on the line or on the opposite side from the 1st quad's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if quad's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the quad pair have only the end point in common
+*/
+bool SkDQuad::hullIntersects(const SkDQuad& q2, bool* isLinear) const {
+ bool linear = true;
+ for (int oddMan = 0; oddMan < kPointCount; ++oddMan) {
+ const SkDPoint* endPt[2];
+ this->otherPts(oddMan, endPt);
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < kPointCount; ++n) {
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ }
+ if (linear && !matchesEnd(fPts, q2.fPts[0]) && !matchesEnd(fPts, q2.fPts[2])) {
+ // if the end point of the opposite quad is inside the hull that is nearly a line,
+ // then representing the quad as a line may cause the intersection to be missed.
+ // Check to see if the endpoint is in the triangle.
+ if (pointInTriangle(fPts, q2.fPts[0]) || pointInTriangle(fPts, q2.fPts[2])) {
+ linear = false;
+ }
+ }
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDQuad::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(*this, isLinear);
+}
+
+bool SkDQuad::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+/* bit twiddling for finding the off curve index (x&~m is the pair in [0,1,2] excluding oddMan)
+oddMan opp x=oddMan^opp x=x-oddMan m=x>>2 x&~m
+ 0 1 1 1 0 1
+ 2 2 2 0 2
+ 1 1 0 -1 -1 0
+ 2 3 2 0 2
+ 2 1 3 1 0 1
+ 2 0 -2 -1 0
+*/
+void SkDQuad::otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ for (int opp = 1; opp < kPointCount; ++opp) {
+ int end = (oddMan ^ opp) - oddMan; // choose a value not equal to oddMan
+ end &= ~(end >> 2); // if the value went negative, set it to zero
+ endPt[opp - 1] = &fPts[end];
+ }
+}
+
+int SkDQuad::AddValidTs(double s[], int realRoots, double* t) {
+ int foundRoots = 0;
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (approximately_zero_or_more(tValue) && approximately_one_or_less(tValue)) {
+ if (approximately_less_than_zero(tValue)) {
+ tValue = 0;
+ } else if (approximately_greater_than_one(tValue)) {
+ tValue = 1;
+ }
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], tValue)) {
+ goto nextRoot;
+ }
+ }
+ t[foundRoots++] = tValue;
+ }
+nextRoot:
+ {}
+ }
+ return foundRoots;
+}
+
+// note: caller expects multiple results to be sorted smaller first
+// note: http://en.wikipedia.org/wiki/Loss_of_significance has an interesting
+// analysis of the quadratic equation, suggesting why the following looks at
+// the sign of B -- and further suggesting that the greatest loss of precision
+// is in b squared less two a c
+int SkDQuad::RootsValidT(double A, double B, double C, double t[2]) {
+ double s[2];
+ int realRoots = RootsReal(A, B, C, s);
+ int foundRoots = AddValidTs(s, realRoots, t);
+ return foundRoots;
+}
+
+static int handle_zero(const double B, const double C, double s[2]) {
+ if (approximately_zero(B)) {
+ s[0] = 0;
+ return C == 0;
+ }
+ s[0] = -C / B;
+ return 1;
+}
+
+/*
+Numeric Solutions (5.6) suggests to solve the quadratic by computing
+ Q = -1/2(B + sgn(B)Sqrt(B^2 - 4 A C))
+and using the roots
+ t1 = Q / A
+ t2 = C / Q
+*/
+// this does not discard real roots <= 0 or >= 1
+int SkDQuad::RootsReal(const double A, const double B, const double C, double s[2]) {
+ if (!A) {
+ return handle_zero(B, C, s);
+ }
+ const double p = B / (2 * A);
+ const double q = C / A;
+ if (approximately_zero(A) && (approximately_zero_inverse(p) || approximately_zero_inverse(q))) {
+ return handle_zero(B, C, s);
+ }
+ /* normal form: x^2 + px + q = 0 */
+ const double p2 = p * p;
+ if (!AlmostDequalUlps(p2, q) && p2 < q) {
+ return 0;
+ }
+ double sqrt_D = 0;
+ if (p2 > q) {
+ sqrt_D = sqrt(p2 - q);
+ }
+ s[0] = sqrt_D - p;
+ s[1] = -sqrt_D - p;
+ return 1 + !AlmostDequalUlps(s[0], s[1]);
+}
+
+bool SkDQuad::isLinear(int startIndex, int endIndex) const {
+ SkLineParameters lineParameters;
+ lineParameters.quadEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double distance = lineParameters.controlPtDistance(*this);
+ double tiniest = SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ double largest = SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ largest = SkTMax(largest, -tiniest);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+SkDVector SkDQuad::dxdyAtT(double t) const {
+ double a = t - 1;
+ double b = 1 - 2 * t;
+ double c = t;
+ SkDVector result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!q");
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE: assert if caller passes in t == 0 / t == 1 ?
+SkDPoint SkDQuad::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[2];
+ }
+ double one_t = 1 - t;
+ double a = one_t * one_t;
+ double b = 2 * one_t * t;
+ double c = t * t;
+ SkDPoint result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ return result;
+}
+
+static double interp_quad_coords(const double* src, double t) {
+ if (0 == t) {
+ return src[0];
+ }
+ if (1 == t) {
+ return src[4];
+ }
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double abc = SkDInterp(ab, bc, t);
+ return abc;
+}
+
+bool SkDQuad::monotonicInX() const {
+ return between(fPts[0].fX, fPts[1].fX, fPts[2].fX);
+}
+
+bool SkDQuad::monotonicInY() const {
+ return between(fPts[0].fY, fPts[1].fY, fPts[2].fY);
+}
+
+/*
+Given a quadratic q, t1, and t2, find a small quadratic segment.
+
+The new quadratic is defined by A, B, and C, where
+ A = c[0]*(1 - t1)*(1 - t1) + 2*c[1]*t1*(1 - t1) + c[2]*t1*t1
+ C = c[3]*(1 - t1)*(1 - t1) + 2*c[2]*t1*(1 - t1) + c[1]*t1*t1
+
+To find B, compute the point halfway between t1 and t2:
+
+q(at (t1 + t2)/2) == D
+
+Next, compute where D must be if we know the value of B:
+
+_12 = A/2 + B/2
+12_ = B/2 + C/2
+123 = A/4 + B/2 + C/4
+ = D
+
+Group the known values on one side:
+
+B = D*2 - A/2 - C/2
+*/
+
+// OPTIMIZE? : special case t1 = 1 && t2 = 0
+SkDQuad SkDQuad::subDivide(double t1, double t2) const {
+ if (0 == t1 && 1 == t2) {
+ return *this;
+ }
+ SkDQuad dst;
+ double ax = dst[0].fX = interp_quad_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_quad_coords(&fPts[0].fY, t1);
+ double dx = interp_quad_coords(&fPts[0].fX, (t1 + t2) / 2);
+ double dy = interp_quad_coords(&fPts[0].fY, (t1 + t2) / 2);
+ double cx = dst[2].fX = interp_quad_coords(&fPts[0].fX, t2);
+ double cy = dst[2].fY = interp_quad_coords(&fPts[0].fY, t2);
+ /* bx = */ dst[1].fX = 2 * dx - (ax + cx) / 2;
+ /* by = */ dst[1].fY = 2 * dy - (ay + cy) / 2;
+ return dst;
+}
+
+void SkDQuad::align(int endIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[1].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[1].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+SkDPoint SkDQuad::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const {
+ SkASSERT(t1 != t2);
+ SkDPoint b;
+ SkDQuad sub = subDivide(t1, t2);
+ SkDLine b0 = {{a, sub[1] + (a - sub[0])}};
+ SkDLine b1 = {{c, sub[1] + (c - sub[2])}};
+ SkIntersections i;
+ i.intersectRay(b0, b1);
+ if (i.used() == 1 && i[0][0] >= 0 && i[1][0] >= 0) {
+ b = i.pt(0);
+ } else {
+ SkASSERT(i.used() <= 2);
+ return SkDPoint::Mid(b0[1], b1[1]);
+ }
+ if (t1 == 0 || t2 == 0) {
+ align(0, &b);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(2, &b);
+ }
+ if (AlmostBequalUlps(b.fX, a.fX)) {
+ b.fX = a.fX;
+ } else if (AlmostBequalUlps(b.fX, c.fX)) {
+ b.fX = c.fX;
+ }
+ if (AlmostBequalUlps(b.fY, a.fY)) {
+ b.fY = a.fY;
+ } else if (AlmostBequalUlps(b.fY, c.fY)) {
+ b.fY = c.fY;
+ }
+ return b;
+}
+
+/* classic one t subdivision */
+static void interp_quad_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = SkDInterp(ab, bc, t);
+ dst[6] = bc;
+ dst[8] = src[4];
+}
+
+SkDQuadPair SkDQuad::chopAt(double t) const
+{
+ SkDQuadPair dst;
+ interp_quad_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_quad_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+static int valid_unit_divide(double numer, double denom, double* ratio)
+{
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+ double r = numer / denom;
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkDQuad::FindExtrema(const double src[], double tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+/* Parameterization form, given A*t*t + 2*B*t*(1-t) + C*(1-t)*(1-t)
+ *
+ * a = A - 2*B + C
+ * b = 2*B - 2*C
+ * c = C
+ */
+void SkDQuad::SetABC(const double* quad, double* a, double* b, double* c) {
+ *a = quad[0]; // a = A
+ *b = 2 * quad[2]; // b = 2*B
+ *c = quad[4]; // c = C
+ *b -= *c; // b = 2*B - C
+ *a -= *b; // a = A - 2*B + C
+ *b -= *c; // b = 2*B - 2*C
+}
+
+int SkTQuad::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fQuad, line);
+}
+
+bool SkTQuad::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(fQuad, isLinear);
+}
+
+bool SkTQuad::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(fQuad, isLinear);
+}
+
+void SkTQuad::setBounds(SkDRect* rect) const {
+ rect->setBounds(fQuad);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.h b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
new file mode 100644
index 0000000000..5489485eba
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsQuad_DEFINED
+#define SkPathOpsQuad_DEFINED
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+struct SkOpCurve;
+
+struct SkDQuadPair {
+ const SkDQuad& first() const { return (const SkDQuad&) pts[0]; }
+ const SkDQuad& second() const { return (const SkDQuad&) pts[2]; }
+ SkDPoint pts[5];
+};
+
+struct SkDQuad {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDPoint fPts[kPointCount];
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v12 = fPts[1] - fPts[2];
+ return v02.dot(v01) > 0 && v02.dot(v12) > 0;
+ }
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ void debugSet(const SkDPoint* pts);
+
+ SkDQuad flip() const {
+ SkDQuad result = {{fPts[2], fPts[1], fPts[0]} SkDEBUGPARAMS(fDebugGlobalState) };
+ return result;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDQuad& set(const SkPoint pts[kPointCount]
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ SkDEBUGCODE(fDebugGlobalState = state);
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t);
+ void align(int endIndex, SkDPoint* dstPt) const;
+ SkDQuadPair chopAt(double t) const;
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], double tValue[1]);
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[2]) const;
+
+ bool hullIntersects(const SkDQuad& , bool* isLinear) const;
+ bool hullIntersects(const SkDConic& , bool* isLinear) const;
+ bool hullIntersects(const SkDCubic& , bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ static int maxIntersections() { return kMaxIntersections; }
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const;
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double t[2]);
+ static int RootsValidT(const double A, const double B, const double C, double s[2]);
+ static void SetABC(const double* quad, double* a, double* b, double* c);
+ SkDQuad subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDQuad* quad) const { *quad = this->subDivide(t1, t2); }
+
+ static SkDQuad SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.subDivide(t1, t2);
+ }
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const;
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2) {
+ SkDQuad quad;
+ quad.set(pts);
+ return quad.subDivide(a, c, t1, t2);
+ }
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[2]) const;
+
+ SkDCubic debugToCubic() const;
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+};
+
+
+class SkTQuad : public SkTCurve {
+public:
+ SkDQuad fQuad;
+
+ SkTQuad() {}
+
+ SkTQuad(const SkDQuad& q)
+ : fQuad(q) {
+ }
+
+ ~SkTQuad() override {}
+
+ const SkDPoint& operator[](int n) const override { return fQuad[n]; }
+ SkDPoint& operator[](int n) override { return fQuad[n]; }
+
+ bool collapsed() const override { return fQuad.collapsed(); }
+ bool controlsInside() const override { return fQuad.controlsInside(); }
+ void debugInit() override { return fQuad.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fQuad.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fQuad.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fQuad.globalState(); }
+#endif
+
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override {
+ return quad.hullIntersects(fQuad, isLinear);
+ }
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override;
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fQuad, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return false; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTQuad>(); }
+
+ int maxIntersections() const override { return SkDQuad::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fQuad.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDQuad::kPointCount; }
+ int pointLast() const override { return SkDQuad::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fQuad.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTQuad*) curve)->fQuad = fQuad.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
new file mode 100644
index 0000000000..c2a0014ea7
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+void SkDRect::setBounds(const SkDQuad& curve, const SkDQuad& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDQuad::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDQuad::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDConic& curve, const SkDConic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDConic::FindExtrema(&sub[0].fX, sub.fWeight, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDConic::FindExtrema(&sub[0].fY, sub.fWeight, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDCubic& curve, const SkDCubic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[3]);
+ double tValues[4];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDCubic::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDCubic::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkTCurve& curve) {
+ curve.setBounds(this);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.h b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
new file mode 100644
index 0000000000..2f2a9f0c65
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsRect_DEFINED
+#define SkPathOpsRect_DEFINED
+
+#include "src/pathops/SkPathOpsPoint.h"
+
+class SkTCurve;
+
+struct SkDRect {
+ double fLeft, fTop, fRight, fBottom;
+
+ void add(const SkDPoint& pt) {
+ fLeft = SkTMin(fLeft, pt.fX);
+ fTop = SkTMin(fTop, pt.fY);
+ fRight = SkTMax(fRight, pt.fX);
+ fBottom = SkTMax(fBottom, pt.fY);
+ }
+
+ bool contains(const SkDPoint& pt) const {
+ return approximately_between(fLeft, pt.fX, fRight)
+ && approximately_between(fTop, pt.fY, fBottom);
+ }
+
+ void debugInit();
+
+ bool intersects(const SkDRect& r) const {
+ SkASSERT(fLeft <= fRight);
+ SkASSERT(fTop <= fBottom);
+ SkASSERT(r.fLeft <= r.fRight);
+ SkASSERT(r.fTop <= r.fBottom);
+ return r.fLeft <= fRight && fLeft <= r.fRight && r.fTop <= fBottom && fTop <= r.fBottom;
+ }
+
+ void set(const SkDPoint& pt) {
+ fLeft = fRight = pt.fX;
+ fTop = fBottom = pt.fY;
+ }
+
+ double width() const {
+ return fRight - fLeft;
+ }
+
+ double height() const {
+ return fBottom - fTop;
+ }
+
+ void setBounds(const SkDConic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDConic& curve, const SkDConic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDCubic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDCubic& curve, const SkDCubic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDQuad& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDQuad& curve, const SkDQuad& sub, double tStart, double tEnd);
+
+ void setBounds(const SkTCurve& curve);
+
+ bool valid() const {
+ return fLeft <= fRight && fTop <= fBottom;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
new file mode 100644
index 0000000000..f079b50a8d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathWriter.h"
+
+static bool bridgeWinding(SkOpContourHead* contourList, SkPathWriter* writer) {
+ bool unsortable = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeWinding(start, end)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextWinding(&chase, &nextStart, &nextEnd,
+ &unsortable);
+ if (!next) {
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ writer->finishContour();
+ } else {
+ SkOpSpanBase* last;
+ if (!current->markAndChaseDone(start, end, &last)) {
+ return false;
+ }
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ current = FindChase(&chase, &start, &end);
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// returns true if all edges were processed
+static bool bridgeXor(SkOpContourHead* contourList, SkPathWriter* writer) {
+ bool unsortable = false;
+ int safetyNet = 1000000;
+ do {
+ SkOpSpan* span = FindUndone(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ do {
+ if (--safetyNet < 0) {
+ return false;
+ }
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextXor(&nextStart, &nextEnd,
+ &unsortable);
+ if (!next) {
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (!writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ return false;
+ }
+ }
+ writer->finishContour();
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ } while (true);
+ return true;
+}
+
+// FIXME : add this as a member of SkPath
+bool SimplifyDebug(const SkPath& path, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ SkPath::FillType fillType = path.isInverseFillType() ? SkPath::kInverseEvenOdd_FillType
+ : SkPath::kEvenOdd_FillType;
+ if (path.isConvex()) {
+ if (result != &path) {
+ *result = path;
+ }
+ result->setFillType(fillType);
+ return true;
+ }
+ // turn path into list of segments
+ SkSTArenaAlloc<4096> allocator; // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+#if DEBUG_DUMP_VERIFY
+#ifndef SK_DEBUG
+ const char* testName = "release";
+#endif
+ if (SkPathOpsDebug::gDumpOp) {
+ SkPathOpsDebug::DumpSimplify(path, testName);
+ }
+#endif
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ SkOpEdgeBuilder builder(path, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contour.dumpSegments();
+#endif
+ if (!SortContourList(&contourList, false, false)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()));
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_DUMP_ALIGNMENT
+ contour.dumpSegments("aligned");
+#endif
+ // construct closed contours
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (builder.xorMask() == kWinding_PathOpsMask ? !bridgeWinding(contourList, &wrapper)
+ : !bridgeXor(contourList, &wrapper)) {
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+ return true;
+}
+
+bool Simplify(const SkPath& path, SkPath* result) {
+#if DEBUG_DUMP_VERIFY
+ if (SkPathOpsDebug::gVerifyOp) {
+ if (!SimplifyDebug(path, result SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr))) {
+ SkPathOpsDebug::ReportSimplifyFail(path);
+ return false;
+ }
+ SkPathOpsDebug::VerifySimplify(path, *result);
+ return true;
+ }
+#endif
+ return SimplifyDebug(path, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h b/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h
new file mode 100644
index 0000000000..f9053488b7
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsTCurve_DEFINED
+#define SkPathOpsTCurve_DEFINED
+
+#include "src/pathops/SkPathOpsPoint.h"
+
+class SkArenaAlloc;
+class SkIntersections;
+
+class SkTCurve {
+public:
+ virtual ~SkTCurve() {}
+ virtual const SkDPoint& operator[](int n) const = 0;
+ virtual SkDPoint& operator[](int n) = 0;
+
+ virtual bool collapsed() const = 0;
+ virtual bool controlsInside() const = 0;
+ virtual void debugInit() = 0;
+#if DEBUG_T_SECT
+ virtual void dumpID(int id) const = 0;
+#endif
+ virtual SkDVector dxdyAtT(double t) const = 0;
+ virtual bool hullIntersects(const SkDQuad& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkDConic& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkDCubic& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkTCurve& , bool* isLinear) const = 0;
+ virtual int intersectRay(SkIntersections* i, const SkDLine& line) const = 0;
+ virtual bool IsConic() const = 0;
+ virtual SkTCurve* make(SkArenaAlloc& ) const = 0;
+ virtual int maxIntersections() const = 0;
+ virtual void otherPts(int oddMan, const SkDPoint* endPt[2]) const = 0;
+ virtual int pointCount() const = 0;
+ virtual int pointLast() const = 0;
+ virtual SkDPoint ptAtT(double t) const = 0;
+ virtual void setBounds(SkDRect* ) const = 0;
+ virtual void subDivide(double t1, double t2, SkTCurve* curve) const = 0;
+#ifdef SK_DEBUG
+ virtual SkOpGlobalState* globalState() const = 0;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
new file mode 100644
index 0000000000..f24d4b51de
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
@@ -0,0 +1,2138 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkPathOpsTSect.h"
+
+#define COINCIDENT_SPAN_COUNT 9
+
+void SkTCoincident::setPerp(const SkTCurve& c1, double t,
+ const SkDPoint& cPt, const SkTCurve& c2) {
+ SkDVector dxdy = c1.dxdyAtT(t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i SkDEBUGCODE((c1.globalState()));
+ int used = i.intersectRay(c2, perp);
+ // only keep closest
+ if (used == 0 || used == 3) {
+ this->init();
+ return;
+ }
+ fPerpT = i[0][0];
+ fPerpPt = i.pt(0);
+ SkASSERT(used <= 2);
+ if (used == 2) {
+ double distSq = (fPerpPt - cPt).lengthSquared();
+ double dist2Sq = (i.pt(1) - cPt).lengthSquared();
+ if (dist2Sq < distSq) {
+ fPerpT = i[0][1];
+ fPerpPt = i.pt(1);
+ }
+ }
+#if DEBUG_T_SECT
+ SkDebugf("setPerp t=%1.9g cPt=(%1.9g,%1.9g) %s oppT=%1.9g fPerpPt=(%1.9g,%1.9g)\n",
+ t, cPt.fX, cPt.fY,
+ cPt.approximatelyEqual(fPerpPt) ? "==" : "!=", fPerpT, fPerpPt.fX, fPerpPt.fY);
+#endif
+ fMatch = cPt.approximatelyEqual(fPerpPt);
+#if DEBUG_T_SECT
+ if (fMatch) {
+ SkDebugf(""); // allow setting breakpoint
+ }
+#endif
+}
+
+void SkTSpan::addBounded(SkTSpan* span, SkArenaAlloc* heap) {
+ SkTSpanBounded* bounded = heap->make<SkTSpanBounded>();
+ bounded->fBounded = span;
+ bounded->fNext = fBounded;
+ fBounded = bounded;
+}
+
+SkTSpan* SkTSect::addFollowing(
+ SkTSpan* prior) {
+ SkTSpan* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->fStartT = prior ? prior->fEndT : 0;
+ SkTSpan* next = prior ? prior->fNext : fHead;
+ result->fEndT = next ? next->fStartT : 1;
+ result->fPrev = prior;
+ result->fNext = next;
+ if (prior) {
+ prior->fNext = result;
+ } else {
+ fHead = result;
+ }
+ if (next) {
+ next->fPrev = result;
+ }
+ result->resetBounds(fCurve);
+ // world may not be consistent to call validate here
+ result->validate();
+ return result;
+}
+
+void SkTSect::addForPerp(SkTSpan* span, double t) {
+ if (!span->hasOppT(t)) {
+ SkTSpan* priorSpan;
+ SkTSpan* opp = this->spanAtT(t, &priorSpan);
+ if (!opp) {
+ opp = this->addFollowing(priorSpan);
+#if DEBUG_PERP
+ SkDebugf("%s priorSpan=%d t=%1.9g opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, t, opp->debugID());
+#endif
+ }
+#if DEBUG_PERP
+ opp->dump(); SkDebugf("\n");
+ SkDebugf("%s addBounded span=%d opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, opp->debugID());
+#endif
+ opp->addBounded(span, &fHeap);
+ span->addBounded(opp, &fHeap);
+ }
+ this->validate();
+#if DEBUG_T_SECT
+ span->validatePerpT(t);
+#endif
+}
+
+double SkTSpan::closestBoundedT(const SkDPoint& pt) const {
+ double result = -1;
+ double closest = DBL_MAX;
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan* test = testBounded->fBounded;
+ double startDist = test->pointFirst().distanceSquared(pt);
+ if (closest > startDist) {
+ closest = startDist;
+ result = test->fStartT;
+ }
+ double endDist = test->pointLast().distanceSquared(pt);
+ if (closest > endDist) {
+ closest = endDist;
+ result = test->fEndT;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(between(0, result, 1));
+ return result;
+}
+
+#ifdef SK_DEBUG
+
+bool SkTSpan::debugIsBefore(const SkTSpan* span) const {
+ const SkTSpan* work = this;
+ do {
+ if (span == work) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+#endif
+
+bool SkTSpan::contains(double t) const {
+ const SkTSpan* work = this;
+ do {
+ if (between(work->fStartT, t, work->fEndT)) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+
+const SkTSect* SkTSpan::debugOpp() const {
+ return SkDEBUGRELEASE(fDebugSect->debugOpp(), nullptr);
+}
+
+SkTSpan* SkTSpan::findOppSpan(
+ const SkTSpan* opp) const {
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (opp == test) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+// returns 0 if no hull intersection
+// 1 if hulls intersect
+// 2 if hulls only share a common endpoint
+// -1 if linear and further checking is required
+
+int SkTSpan::hullCheck(const SkTSpan* opp,
+ bool* start, bool* oppStart) {
+ if (fIsLinear) {
+ return -1;
+ }
+ bool ptsInCommon;
+ if (onlyEndPointsInCommon(opp, start, oppStart, &ptsInCommon)) {
+ SkASSERT(ptsInCommon);
+ return 2;
+ }
+ bool linear;
+ if (fPart->hullIntersects(*opp->fPart, &linear)) {
+ if (!linear) { // check set true if linear
+ return 1;
+ }
+ fIsLinear = true;
+ fIsLine = fPart->controlsInside();
+ return ptsInCommon ? 1 : -1;
+ } else { // hull is not linear; check set true if intersected at the end points
+ return ((int) ptsInCommon) << 1; // 0 or 2
+ }
+ return 0;
+}
+
+// OPTIMIZE ? If at_most_end_pts_in_common detects that one quad is near linear,
+// use line intersection to guess a better split than 0.5
+// OPTIMIZE Once at_most_end_pts_in_common detects linear, mark span so all future splits are linear
+
+int SkTSpan::hullsIntersect(SkTSpan* opp,
+ bool* start, bool* oppStart) {
+ if (!fBounds.intersects(opp->fBounds)) {
+ return 0;
+ }
+ int hullSect = this->hullCheck(opp, start, oppStart);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ hullSect = opp->hullCheck(this, oppStart, start);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ return -1;
+}
+
+void SkTSpan::init(const SkTCurve& c) {
+ fPrev = fNext = nullptr;
+ fStartT = 0;
+ fEndT = 1;
+ fBounded = nullptr;
+ resetBounds(c);
+}
+
+bool SkTSpan::initBounds(const SkTCurve& c) {
+ if (SkDoubleIsNaN(fStartT) || SkDoubleIsNaN(fEndT)) {
+ return false;
+ }
+ c.subDivide(fStartT, fEndT, fPart);
+ fBounds.setBounds(*fPart);
+ fCoinStart.init();
+ fCoinEnd.init();
+ fBoundsMax = SkTMax(fBounds.width(), fBounds.height());
+ fCollapsed = fPart->collapsed();
+ fHasPerp = false;
+ fDeleted = false;
+#if DEBUG_T_SECT
+ if (fCollapsed) {
+ SkDebugf(""); // for convenient breakpoints
+ }
+#endif
+ return fBounds.valid();
+}
+
+bool SkTSpan::linearsIntersect(SkTSpan* span) {
+ int result = this->linearIntersects(*span->fPart);
+ if (result <= 1) {
+ return SkToBool(result);
+ }
+ SkASSERT(span->fIsLinear);
+ result = span->linearIntersects(*fPart);
+// SkASSERT(result <= 1);
+ return SkToBool(result);
+}
+
+double SkTSpan::linearT(const SkDPoint& pt) const {
+ SkDVector len = this->pointLast() - this->pointFirst();
+ return fabs(len.fX) > fabs(len.fY)
+ ? (pt.fX - this->pointFirst().fX) / len.fX
+ : (pt.fY - this->pointFirst().fY) / len.fY;
+}
+
+int SkTSpan::linearIntersects(const SkTCurve& q2) const {
+ // looks like q1 is near-linear
+ int start = 0, end = fPart->pointLast(); // the outside points are usually the extremes
+ if (!fPart->controlsInside()) {
+ double dist = 0; // if there's any question, compute distance to find best outsiders
+ for (int outer = 0; outer < this->pointCount() - 1; ++outer) {
+ for (int inner = outer + 1; inner < this->pointCount(); ++inner) {
+ double test = ((*fPart)[outer] - (*fPart)[inner]).lengthSquared();
+ if (dist > test) {
+ continue;
+ }
+ dist = test;
+ start = outer;
+ end = inner;
+ }
+ }
+ }
+ // see if q2 is on one side of the line formed by the extreme points
+ double origX = (*fPart)[start].fX;
+ double origY = (*fPart)[start].fY;
+ double adj = (*fPart)[end].fX - origX;
+ double opp = (*fPart)[end].fY - origY;
+ double maxPart = SkTMax(fabs(adj), fabs(opp));
+ double sign = 0; // initialization to shut up warning in release build
+ for (int n = 0; n < q2.pointCount(); ++n) {
+ double dx = q2[n].fY - origY;
+ double dy = q2[n].fX - origX;
+ double maxVal = SkTMax(maxPart, SkTMax(fabs(dx), fabs(dy)));
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (precisely_zero_when_compared_to(test, maxVal)) {
+ return 1;
+ }
+ if (approximately_zero_when_compared_to(test, maxVal)) {
+ return 3;
+ }
+ if (n == 0) {
+ sign = test;
+ continue;
+ }
+ if (test * sign < 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+bool SkTSpan::onlyEndPointsInCommon(const SkTSpan* opp,
+ bool* start, bool* oppStart, bool* ptsInCommon) {
+ if (opp->pointFirst() == this->pointFirst()) {
+ *start = *oppStart = true;
+ } else if (opp->pointFirst() == this->pointLast()) {
+ *start = false;
+ *oppStart = true;
+ } else if (opp->pointLast() == this->pointFirst()) {
+ *start = true;
+ *oppStart = false;
+ } else if (opp->pointLast() == this->pointLast()) {
+ *start = *oppStart = false;
+ } else {
+ *ptsInCommon = false;
+ return false;
+ }
+ *ptsInCommon = true;
+ const SkDPoint* otherPts[4], * oppOtherPts[4];
+// const SkDPoint* otherPts[this->pointCount() - 1], * oppOtherPts[opp->pointCount() - 1];
+ int baseIndex = *start ? 0 : fPart->pointLast();
+ fPart->otherPts(baseIndex, otherPts);
+ opp->fPart->otherPts(*oppStart ? 0 : opp->fPart->pointLast(), oppOtherPts);
+ const SkDPoint& base = (*fPart)[baseIndex];
+ for (int o1 = 0; o1 < this->pointCount() - 1; ++o1) {
+ SkDVector v1 = *otherPts[o1] - base;
+ for (int o2 = 0; o2 < opp->pointCount() - 1; ++o2) {
+ SkDVector v2 = *oppOtherPts[o2] - base;
+ if (v2.dot(v1) >= 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+SkTSpan* SkTSpan::oppT(double t) const {
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (between(test->fStartT, t, test->fEndT)) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+bool SkTSpan::removeAllBounded() {
+ bool deleteSpan = false;
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* opp = bounded->fBounded;
+ deleteSpan |= opp->removeBounded(this);
+ bounded = bounded->fNext;
+ }
+ return deleteSpan;
+}
+
+bool SkTSpan::removeBounded(const SkTSpan* opp) {
+ if (fHasPerp) {
+ bool foundStart = false;
+ bool foundEnd = false;
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (opp != test) {
+ foundStart |= between(test->fStartT, fCoinStart.perpT(), test->fEndT);
+ foundEnd |= between(test->fStartT, fCoinEnd.perpT(), test->fEndT);
+ }
+ bounded = bounded->fNext;
+ }
+ if (!foundStart || !foundEnd) {
+ fHasPerp = false;
+ fCoinStart.init();
+ fCoinEnd.init();
+ }
+ }
+ SkTSpanBounded* bounded = fBounded;
+ SkTSpanBounded* prev = nullptr;
+ while (bounded) {
+ SkTSpanBounded* boundedNext = bounded->fNext;
+ if (opp == bounded->fBounded) {
+ if (prev) {
+ prev->fNext = boundedNext;
+ return false;
+ } else {
+ fBounded = boundedNext;
+ return fBounded == nullptr;
+ }
+ }
+ prev = bounded;
+ bounded = boundedNext;
+ }
+ SkOPASSERT(0);
+ return false;
+}
+
+bool SkTSpan::splitAt(SkTSpan* work, double t, SkArenaAlloc* heap) {
+ fStartT = t;
+ fEndT = work->fEndT;
+ if (fStartT == fEndT) {
+ fCollapsed = true;
+ return false;
+ }
+ work->fEndT = t;
+ if (work->fStartT == work->fEndT) {
+ work->fCollapsed = true;
+ return false;
+ }
+ fPrev = work;
+ fNext = work->fNext;
+ fIsLinear = work->fIsLinear;
+ fIsLine = work->fIsLine;
+
+ work->fNext = this;
+ if (fNext) {
+ fNext->fPrev = this;
+ }
+ this->validate();
+ SkTSpanBounded* bounded = work->fBounded;
+ fBounded = nullptr;
+ while (bounded) {
+ this->addBounded(bounded->fBounded, heap);
+ bounded = bounded->fNext;
+ }
+ bounded = fBounded;
+ while (bounded) {
+ bounded->fBounded->addBounded(this, heap);
+ bounded = bounded->fNext;
+ }
+ return true;
+}
+
+void SkTSpan::validate() const {
+#if DEBUG_VALIDATE
+ SkASSERT(this != fPrev);
+ SkASSERT(this != fNext);
+ SkASSERT(fNext == nullptr || fNext != fPrev);
+ SkASSERT(fNext == nullptr || this == fNext->fPrev);
+ SkASSERT(fPrev == nullptr || this == fPrev->fNext);
+ this->validateBounded();
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fBounds.width() || fBounds.height() || fCollapsed);
+ SkASSERT(fBoundsMax == SkTMax(fBounds.width(), fBounds.height()) || fCollapsed == 0xFF);
+ SkASSERT(0 <= fStartT);
+ SkASSERT(fEndT <= 1);
+ SkASSERT(fStartT <= fEndT);
+ SkASSERT(fBounded || fCollapsed == 0xFF);
+ if (fHasPerp) {
+ if (fCoinStart.isMatch()) {
+ validatePerpT(fCoinStart.perpT());
+ validatePerpPt(fCoinStart.perpT(), fCoinStart.perpPt());
+ }
+ if (fCoinEnd.isMatch()) {
+ validatePerpT(fCoinEnd.perpT());
+ validatePerpPt(fCoinEnd.perpT(), fCoinEnd.perpPt());
+ }
+ }
+#endif
+}
+
+void SkTSpan::validateBounded() const {
+#if DEBUG_VALIDATE
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ SkDEBUGCODE(const SkTSpan* overlap = testBounded->fBounded);
+ SkASSERT(!overlap->fDeleted);
+#if DEBUG_T_SECT
+ SkASSERT(((this->debugID() ^ overlap->debugID()) & 1) == 1);
+ SkASSERT(overlap->findOppSpan(this));
+#endif
+ testBounded = testBounded->fNext;
+ }
+#endif
+}
+
+void SkTSpan::validatePerpT(double oppT) const {
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan* overlap = testBounded->fBounded;
+ if (precisely_between(overlap->fStartT, oppT, overlap->fEndT)) {
+ return;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(0);
+}
+
+void SkTSpan::validatePerpPt(double t, const SkDPoint& pt) const {
+ SkASSERT(fDebugSect->fOppSect->fCurve.ptAtT(t) == pt);
+}
+
+SkTSect::SkTSect(const SkTCurve& c
+ SkDEBUGPARAMS(SkOpGlobalState* debugGlobalState)
+ PATH_OPS_DEBUG_T_SECT_PARAMS(int id))
+ : fCurve(c)
+ , fHeap(sizeof(SkTSpan) * 4)
+ , fCoincident(nullptr)
+ , fDeleted(nullptr)
+ , fActiveCount(0)
+ , fHung(false)
+ SkDEBUGPARAMS(fDebugGlobalState(debugGlobalState))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fID(id))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugCount(0))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugAllocatedCount(0))
+{
+ this->resetRemovedEnds();
+ fHead = this->addOne();
+ SkDEBUGCODE(fHead->debugSetGlobalState(debugGlobalState));
+ fHead->init(c);
+}
+
+SkTSpan* SkTSect::addOne() {
+ SkTSpan* result;
+ if (fDeleted) {
+ result = fDeleted;
+ fDeleted = result->fNext;
+ } else {
+ result = fHeap.make<SkTSpan>(fCurve, fHeap);
+#if DEBUG_T_SECT
+ ++fDebugAllocatedCount;
+#endif
+ }
+ result->reset();
+ result->fHasPerp = false;
+ result->fDeleted = false;
+ ++fActiveCount;
+ PATH_OPS_DEBUG_T_SECT_CODE(result->fID = fDebugCount++ * 2 + fID);
+ SkDEBUGCODE(result->fDebugSect = this);
+#ifdef SK_DEBUG
+ result->debugInit(fCurve, fHeap);
+ result->fCoinStart.debugInit();
+ result->fCoinEnd.debugInit();
+ result->fPrev = result->fNext = nullptr;
+ result->fBounds.debugInit();
+ result->fStartT = result->fEndT = result->fBoundsMax = SK_ScalarNaN;
+ result->fCollapsed = result->fIsLinear = result->fIsLine = 0xFF;
+#endif
+ return result;
+}
+
+bool SkTSect::binarySearchCoin(SkTSect* sect2, double tStart,
+ double tStep, double* resultT, double* oppT, SkTSpan** oppFirst) {
+ SkTSpan work(fCurve, fHeap);
+ double result = work.fStartT = work.fEndT = tStart;
+ SkDEBUGCODE(work.fDebugSect = this);
+ SkDPoint last = fCurve.ptAtT(tStart);
+ SkDPoint oppPt;
+ bool flip = false;
+ bool contained = false;
+ bool down = tStep < 0;
+ const SkTCurve& opp = sect2->fCurve;
+ do {
+ tStep *= 0.5;
+ work.fStartT += tStep;
+ if (flip) {
+ tStep = -tStep;
+ flip = false;
+ }
+ work.initBounds(fCurve);
+ if (work.fCollapsed) {
+ return false;
+ }
+ if (last.approximatelyEqual(work.pointFirst())) {
+ break;
+ }
+ last = work.pointFirst();
+ work.fCoinStart.setPerp(fCurve, work.fStartT, last, opp);
+ if (work.fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work.validatePerpPt(work.fCoinStart.perpT(), work.fCoinStart.perpPt());
+#endif
+ double oppTTest = work.fCoinStart.perpT();
+ if (sect2->fHead->contains(oppTTest)) {
+ *oppT = oppTTest;
+ oppPt = work.fCoinStart.perpPt();
+ contained = true;
+ if (down ? result <= work.fStartT : result >= work.fStartT) {
+ *oppFirst = nullptr; // signal caller to fail
+ return false;
+ }
+ result = work.fStartT;
+ continue;
+ }
+ }
+ tStep = -tStep;
+ flip = true;
+ } while (true);
+ if (!contained) {
+ return false;
+ }
+ if (last.approximatelyEqual(fCurve[0])) {
+ result = 0;
+ } else if (last.approximatelyEqual(this->pointLast())) {
+ result = 1;
+ }
+ if (oppPt.approximatelyEqual(opp[0])) {
+ *oppT = 0;
+ } else if (oppPt.approximatelyEqual(sect2->pointLast())) {
+ *oppT = 1;
+ }
+ *resultT = result;
+ return true;
+}
+
+// OPTIMIZE ? keep a sorted list of sizes in the form of a doubly-linked list in quad span
+// so that each quad sect has a pointer to the largest, and can update it as spans
+// are split
+
+SkTSpan* SkTSect::boundsMax() {
+ SkTSpan* test = fHead;
+ SkTSpan* largest = fHead;
+ bool lCollapsed = largest->fCollapsed;
+ int safetyNet = 10000;
+ while ((test = test->fNext)) {
+ if (!--safetyNet) {
+ fHung = true;
+ return nullptr;
+ }
+ bool tCollapsed = test->fCollapsed;
+ if ((lCollapsed && !tCollapsed) || (lCollapsed == tCollapsed &&
+ largest->fBoundsMax < test->fBoundsMax)) {
+ largest = test;
+ lCollapsed = test->fCollapsed;
+ }
+ }
+ return largest;
+}
+
+bool SkTSect::coincidentCheck(SkTSect* sect2) {
+ SkTSpan* first = fHead;
+ if (!first) {
+ return false;
+ }
+ SkTSpan* last, * next;
+ do {
+ int consecutive = this->countConsecutiveSpans(first, &last);
+ next = last->fNext;
+ if (consecutive < COINCIDENT_SPAN_COUNT) {
+ continue;
+ }
+ this->validate();
+ sect2->validate();
+ this->computePerpendiculars(sect2, first, last);
+ this->validate();
+ sect2->validate();
+ // check to see if a range of points are on the curve
+ SkTSpan* coinStart = first;
+ do {
+ bool success = this->extractCoincident(sect2, coinStart, last, &coinStart);
+ if (!success) {
+ return false;
+ }
+ } while (coinStart && !last->fDeleted);
+ if (!fHead || !sect2->fHead) {
+ break;
+ }
+ if (!next || next->fDeleted) {
+ break;
+ }
+ } while ((first = next));
+ return true;
+}
+
+void SkTSect::coincidentForce(SkTSect* sect2,
+ double start1s, double start1e) {
+ SkTSpan* first = fHead;
+ SkTSpan* last = this->tail();
+ SkTSpan* oppFirst = sect2->fHead;
+ SkTSpan* oppLast = sect2->tail();
+ if (!last || !oppLast) {
+ return;
+ }
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fStartT = start1s;
+ first->fEndT = start1e;
+ first->resetBounds(fCurve);
+ first->fCoinStart.setPerp(fCurve, start1s, fCurve[0], sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, start1e, this->pointLast(), sect2->fCurve);
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double oppStartT = first->fCoinStart.perpT() == -1 ? 0 : SkTMax(0., first->fCoinStart.perpT());
+ double oppEndT = first->fCoinEnd.perpT() == -1 ? 1 : SkTMin(1., first->fCoinEnd.perpT());
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ this->removeCoincident(first, false);
+ sect2->removeCoincident(oppFirst, true);
+ if (deleteEmptySpans) {
+ this->deleteEmptySpans();
+ sect2->deleteEmptySpans();
+ }
+}
+
+bool SkTSect::coincidentHasT(double t) {
+ SkTSpan* test = fCoincident;
+ while (test) {
+ if (between(test->fStartT, t, test->fEndT)) {
+ return true;
+ }
+ test = test->fNext;
+ }
+ return false;
+}
+
+int SkTSect::collapsed() const {
+ int result = 0;
+ const SkTSpan* test = fHead;
+ while (test) {
+ if (test->fCollapsed) {
+ ++result;
+ }
+ test = test->next();
+ }
+ return result;
+}
+
+void SkTSect::computePerpendiculars(SkTSect* sect2,
+ SkTSpan* first, SkTSpan* last) {
+ if (!last) {
+ return;
+ }
+ const SkTCurve& opp = sect2->fCurve;
+ SkTSpan* work = first;
+ SkTSpan* prior = nullptr;
+ do {
+ if (!work->fHasPerp && !work->fCollapsed) {
+ if (prior) {
+ work->fCoinStart = prior->fCoinEnd;
+ } else {
+ work->fCoinStart.setPerp(fCurve, work->fStartT, work->pointFirst(), opp);
+ }
+ if (work->fCoinStart.isMatch()) {
+ double perpT = work->fCoinStart.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinStart.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fCoinEnd.setPerp(fCurve, work->fEndT, work->pointLast(), opp);
+ if (work->fCoinEnd.isMatch()) {
+ double perpT = work->fCoinEnd.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinEnd.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fHasPerp = true;
+ }
+ if (work == last) {
+ break;
+ }
+ prior = work;
+ work = work->fNext;
+ SkASSERT(work);
+ } while (true);
+}
+
+int SkTSect::countConsecutiveSpans(SkTSpan* first,
+ SkTSpan** lastPtr) const {
+ int consecutive = 1;
+ SkTSpan* last = first;
+ do {
+ SkTSpan* next = last->fNext;
+ if (!next) {
+ break;
+ }
+ if (next->fStartT > last->fEndT) {
+ break;
+ }
+ ++consecutive;
+ last = next;
+ } while (true);
+ *lastPtr = last;
+ return consecutive;
+}
+
+bool SkTSect::hasBounded(const SkTSpan* span) const {
+ const SkTSpan* test = fHead;
+ if (!test) {
+ return false;
+ }
+ do {
+ if (test->findOppSpan(span)) {
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+bool SkTSect::deleteEmptySpans() {
+ SkTSpan* test;
+ SkTSpan* next = fHead;
+ int safetyHatch = 1000;
+ while ((test = next)) {
+ next = test->fNext;
+ if (!test->fBounded) {
+ if (!this->removeSpan(test)) {
+ return false;
+ }
+ }
+ if (--safetyHatch < 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkTSect::extractCoincident(
+ SkTSect* sect2,
+ SkTSpan* first, SkTSpan* last,
+ SkTSpan** result) {
+ first = findCoincidentRun(first, &last);
+ if (!first || !last) {
+ *result = nullptr;
+ return true;
+ }
+ // march outwards to find limit of coincidence from here to previous and next spans
+ double startT = first->fStartT;
+ double oppStartT SK_INIT_TO_AVOID_WARNING;
+ double oppEndT SK_INIT_TO_AVOID_WARNING;
+ SkTSpan* prev = first->fPrev;
+ SkASSERT(first->fCoinStart.isMatch());
+ SkTSpan* oppFirst = first->findOppT(first->fCoinStart.perpT());
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double coinStart;
+ SkDEBUGCODE(double coinEnd);
+ SkTSpan* cutFirst;
+ if (prev && prev->fEndT == startT
+ && this->binarySearchCoin(sect2, startT, prev->fStartT - startT, &coinStart,
+ &oppStartT, &oppFirst)
+ && prev->fStartT < coinStart && coinStart < startT
+ && (cutFirst = prev->oppT(oppStartT))) {
+ oppFirst = cutFirst;
+ first = this->addSplitAt(prev, coinStart);
+ first->markCoincident();
+ prev->fCoinEnd.markCoincident();
+ if (oppFirst->fStartT < oppStartT && oppStartT < oppFirst->fEndT) {
+ SkTSpan* oppHalf = sect2->addSplitAt(oppFirst, oppStartT);
+ if (oppMatched) {
+ oppFirst->fCoinEnd.markCoincident();
+ oppHalf->markCoincident();
+ oppFirst = oppHalf;
+ } else {
+ oppFirst->markCoincident();
+ oppHalf->fCoinStart.markCoincident();
+ }
+ }
+ } else {
+ if (!oppFirst) {
+ return false;
+ }
+ SkDEBUGCODE(coinStart = first->fStartT);
+ SkDEBUGCODE(oppStartT = oppMatched ? oppFirst->fStartT : oppFirst->fEndT);
+ }
+ // FIXME: incomplete : if we're not at the end, find end of coin
+ SkTSpan* oppLast;
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ oppLast = last->findOppT(last->fCoinEnd.perpT());
+ SkDEBUGCODE(coinEnd = last->fEndT);
+#ifdef SK_DEBUG
+ if (!this->globalState() || !this->globalState()->debugSkipAssert()) {
+ oppEndT = oppMatched ? oppLast->fEndT : oppLast->fStartT;
+ }
+#endif
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppFirst, oppLast);
+ swap(oppStartT, oppEndT);
+ }
+ SkOPASSERT(oppStartT < oppEndT);
+ SkASSERT(coinStart == first->fStartT);
+ SkASSERT(coinEnd == last->fEndT);
+ if (!oppFirst) {
+ *result = nullptr;
+ return true;
+ }
+ SkOPASSERT(oppStartT == oppFirst->fStartT);
+ if (!oppLast) {
+ *result = nullptr;
+ return true;
+ }
+ SkOPASSERT(oppEndT == oppLast->fEndT);
+ // reduce coincident runs to single entries
+ this->validate();
+ sect2->validate();
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fEndT = last->fEndT;
+ first->resetBounds(this->fCurve);
+ first->fCoinStart.setPerp(fCurve, first->fStartT, first->pointFirst(), sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, first->fEndT, first->pointLast(), sect2->fCurve);
+ oppStartT = first->fCoinStart.perpT();
+ oppEndT = first->fCoinEnd.perpT();
+ if (between(0, oppStartT, 1) && between(0, oppEndT, 1)) {
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ }
+ this->validateBounded();
+ sect2->validateBounded();
+ last = first->fNext;
+ if (!this->removeCoincident(first, false)) {
+ return false;
+ }
+ if (!sect2->removeCoincident(oppFirst, true)) {
+ return false;
+ }
+ if (deleteEmptySpans) {
+ if (!this->deleteEmptySpans() || !sect2->deleteEmptySpans()) {
+ *result = nullptr;
+ return false;
+ }
+ }
+ this->validate();
+ sect2->validate();
+ *result = last && !last->fDeleted && fHead && sect2->fHead ? last : nullptr;
+ return true;
+}
+
+SkTSpan* SkTSect::findCoincidentRun(
+ SkTSpan* first, SkTSpan** lastPtr) {
+ SkTSpan* work = first;
+ SkTSpan* lastCandidate = nullptr;
+ first = nullptr;
+ // find the first fully coincident span
+ do {
+ if (work->fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work->validatePerpT(work->fCoinStart.perpT());
+ work->validatePerpPt(work->fCoinStart.perpT(), work->fCoinStart.perpPt());
+#endif
+ SkOPASSERT(work->hasOppT(work->fCoinStart.perpT()));
+ if (!work->fCoinEnd.isMatch()) {
+ break;
+ }
+ lastCandidate = work;
+ if (!first) {
+ first = work;
+ }
+ } else if (first && work->fCollapsed) {
+ *lastPtr = lastCandidate;
+ return first;
+ } else {
+ lastCandidate = nullptr;
+ SkOPASSERT(!first);
+ }
+ if (work == *lastPtr) {
+ return first;
+ }
+ work = work->fNext;
+ if (!work) {
+ return nullptr;
+ }
+ } while (true);
+ if (lastCandidate) {
+ *lastPtr = lastCandidate;
+ }
+ return first;
+}
+
+int SkTSect::intersects(SkTSpan* span,
+ SkTSect* opp,
+ SkTSpan* oppSpan, int* oppResult) {
+ bool spanStart, oppStart;
+ int hullResult = span->hullsIntersect(oppSpan, &spanStart, &oppStart);
+ if (hullResult >= 0) {
+ if (hullResult == 2) { // hulls have one point in common
+ if (!span->fBounded || !span->fBounded->fNext) {
+ SkASSERT(!span->fBounded || span->fBounded->fBounded == oppSpan);
+ if (spanStart) {
+ span->fEndT = span->fStartT;
+ } else {
+ span->fStartT = span->fEndT;
+ }
+ } else {
+ hullResult = 1;
+ }
+ if (!oppSpan->fBounded || !oppSpan->fBounded->fNext) {
+ if (oppSpan->fBounded && oppSpan->fBounded->fBounded != span) {
+ return 0;
+ }
+ if (oppStart) {
+ oppSpan->fEndT = oppSpan->fStartT;
+ } else {
+ oppSpan->fStartT = oppSpan->fEndT;
+ }
+ *oppResult = 2;
+ } else {
+ *oppResult = 1;
+ }
+ } else {
+ *oppResult = 1;
+ }
+ return hullResult;
+ }
+ if (span->fIsLine && oppSpan->fIsLine) {
+ SkIntersections i;
+ int sects = this->linesIntersect(span, opp, oppSpan, &i);
+ if (sects == 2) {
+ return *oppResult = 1;
+ }
+ if (!sects) {
+ return -1;
+ }
+ this->removedEndCheck(span);
+ span->fStartT = span->fEndT = i[0][0];
+ opp->removedEndCheck(oppSpan);
+ oppSpan->fStartT = oppSpan->fEndT = i[1][0];
+ return *oppResult = 2;
+ }
+ if (span->fIsLinear || oppSpan->fIsLinear) {
+ return *oppResult = (int) span->linearsIntersect(oppSpan);
+ }
+ return *oppResult = 1;
+}
+
+template<typename SkTCurve>
+static bool is_parallel(const SkDLine& thisLine, const SkTCurve& opp) {
+ if (!opp.IsConic()) {
+ return false; // FIXME : breaks a lot of stuff now
+ }
+ int finds = 0;
+ SkDLine thisPerp;
+ thisPerp.fPts[0].fX = thisLine.fPts[1].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[0].fY = thisLine.fPts[1].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[1] = thisLine.fPts[1];
+ SkIntersections perpRayI;
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[1]);
+ }
+ thisPerp.fPts[1].fX = thisLine.fPts[0].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[1].fY = thisLine.fPts[0].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[0] = thisLine.fPts[0];
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[0]);
+ }
+ return finds >= 2;
+}
+
+// while the intersection points are sufficiently far apart:
+// construct the tangent lines from the intersections
+// find the point where the tangent line intersects the opposite curve
+
+int SkTSect::linesIntersect(SkTSpan* span,
+ SkTSect* opp,
+ SkTSpan* oppSpan, SkIntersections* i) {
+ SkIntersections thisRayI SkDEBUGCODE((span->fDebugGlobalState));
+ SkIntersections oppRayI SkDEBUGCODE((span->fDebugGlobalState));
+ SkDLine thisLine = {{ span->pointFirst(), span->pointLast() }};
+ SkDLine oppLine = {{ oppSpan->pointFirst(), oppSpan->pointLast() }};
+ int loopCount = 0;
+ double bestDistSq = DBL_MAX;
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ return 0;
+ }
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ return 0;
+ }
+ // if the ends of each line intersect the opposite curve, the lines are coincident
+ if (thisRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int tIndex = 0; tIndex < thisRayI.used(); ++tIndex) {
+ for (int lIndex = 0; lIndex < (int) SK_ARRAY_COUNT(thisLine.fPts); ++lIndex) {
+ ptMatches += thisRayI.pt(tIndex).approximatelyEqual(thisLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2 || is_parallel(thisLine, opp->fCurve)) {
+ return 2;
+ }
+ }
+ if (oppRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int oIndex = 0; oIndex < oppRayI.used(); ++oIndex) {
+ for (int lIndex = 0; lIndex < (int) SK_ARRAY_COUNT(oppLine.fPts); ++lIndex) {
+ ptMatches += oppRayI.pt(oIndex).approximatelyEqual(oppLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2|| is_parallel(oppLine, this->fCurve)) {
+ return 2;
+ }
+ }
+ do {
+ // pick the closest pair of points
+ double closest = DBL_MAX;
+ int closeIndex SK_INIT_TO_AVOID_WARNING;
+ int oppCloseIndex SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < oppRayI.used(); ++index) {
+ if (!roughly_between(span->fStartT, oppRayI[0][index], span->fEndT)) {
+ continue;
+ }
+ for (int oIndex = 0; oIndex < thisRayI.used(); ++oIndex) {
+ if (!roughly_between(oppSpan->fStartT, thisRayI[0][oIndex], oppSpan->fEndT)) {
+ continue;
+ }
+ double distSq = thisRayI.pt(index).distanceSquared(oppRayI.pt(oIndex));
+ if (closest > distSq) {
+ closest = distSq;
+ closeIndex = index;
+ oppCloseIndex = oIndex;
+ }
+ }
+ }
+ if (closest == DBL_MAX) {
+ break;
+ }
+ const SkDPoint& oppIPt = thisRayI.pt(oppCloseIndex);
+ const SkDPoint& iPt = oppRayI.pt(closeIndex);
+ if (between(span->fStartT, oppRayI[0][closeIndex], span->fEndT)
+ && between(oppSpan->fStartT, thisRayI[0][oppCloseIndex], oppSpan->fEndT)
+ && oppIPt.approximatelyEqual(iPt)) {
+ i->merge(oppRayI, closeIndex, thisRayI, oppCloseIndex);
+ return i->used();
+ }
+ double distSq = oppIPt.distanceSquared(iPt);
+ if (bestDistSq < distSq || ++loopCount > 5) {
+ return 0;
+ }
+ bestDistSq = distSq;
+ double oppStart = oppRayI[0][closeIndex];
+ thisLine[0] = fCurve.ptAtT(oppStart);
+ thisLine[1] = thisLine[0] + fCurve.dxdyAtT(oppStart);
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ break;
+ }
+ double start = thisRayI[0][oppCloseIndex];
+ oppLine[0] = opp->fCurve.ptAtT(start);
+ oppLine[1] = oppLine[0] + opp->fCurve.dxdyAtT(start);
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ break;
+ }
+ } while (true);
+ // convergence may fail if the curves are nearly coincident
+ SkTCoincident oCoinS, oCoinE;
+ oCoinS.setPerp(opp->fCurve, oppSpan->fStartT, oppSpan->pointFirst(), fCurve);
+ oCoinE.setPerp(opp->fCurve, oppSpan->fEndT, oppSpan->pointLast(), fCurve);
+ double tStart = oCoinS.perpT();
+ double tEnd = oCoinE.perpT();
+ bool swap = tStart > tEnd;
+ if (swap) {
+ using std::swap;
+ swap(tStart, tEnd);
+ }
+ tStart = SkTMax(tStart, span->fStartT);
+ tEnd = SkTMin(tEnd, span->fEndT);
+ if (tStart > tEnd) {
+ return 0;
+ }
+ SkDVector perpS, perpE;
+ if (tStart == span->fStartT) {
+ SkTCoincident coinS;
+ coinS.setPerp(fCurve, span->fStartT, span->pointFirst(), opp->fCurve);
+ perpS = span->pointFirst() - coinS.perpPt();
+ } else if (swap) {
+ perpS = oCoinE.perpPt() - oppSpan->pointLast();
+ } else {
+ perpS = oCoinS.perpPt() - oppSpan->pointFirst();
+ }
+ if (tEnd == span->fEndT) {
+ SkTCoincident coinE;
+ coinE.setPerp(fCurve, span->fEndT, span->pointLast(), opp->fCurve);
+ perpE = span->pointLast() - coinE.perpPt();
+ } else if (swap) {
+ perpE = oCoinS.perpPt() - oppSpan->pointFirst();
+ } else {
+ perpE = oCoinE.perpPt() - oppSpan->pointLast();
+ }
+ if (perpS.dot(perpE) >= 0) {
+ return 0;
+ }
+ SkTCoincident coinW;
+ double workT = tStart;
+ double tStep = tEnd - tStart;
+ SkDPoint workPt;
+ do {
+ tStep *= 0.5;
+ if (precisely_zero(tStep)) {
+ return 0;
+ }
+ workT += tStep;
+ workPt = fCurve.ptAtT(workT);
+ coinW.setPerp(fCurve, workT, workPt, opp->fCurve);
+ double perpT = coinW.perpT();
+ if (coinW.isMatch() ? !between(oppSpan->fStartT, perpT, oppSpan->fEndT) : perpT < 0) {
+ continue;
+ }
+ SkDVector perpW = workPt - coinW.perpPt();
+ if ((perpS.dot(perpW) >= 0) == (tStep < 0)) {
+ tStep = -tStep;
+ }
+ if (workPt.approximatelyEqual(coinW.perpPt())) {
+ break;
+ }
+ } while (true);
+ double oppTTest = coinW.perpT();
+ if (!opp->fHead->contains(oppTTest)) {
+ return 0;
+ }
+ i->setMax(1);
+ i->insert(workT, oppTTest, workPt);
+ return 1;
+}
+
+bool SkTSect::markSpanGone(SkTSpan* span) {
+ if (--fActiveCount < 0) {
+ return false;
+ }
+ span->fNext = fDeleted;
+ fDeleted = span;
+ SkOPASSERT(!span->fDeleted);
+ span->fDeleted = true;
+ return true;
+}
+
+bool SkTSect::matchedDirection(double t, const SkTSect* sect2,
+ double t2) const {
+ SkDVector dxdy = this->fCurve.dxdyAtT(t);
+ SkDVector dxdy2 = sect2->fCurve.dxdyAtT(t2);
+ return dxdy.dot(dxdy2) >= 0;
+}
+
+void SkTSect::matchedDirCheck(double t, const SkTSect* sect2,
+ double t2, bool* calcMatched, bool* oppMatched) const {
+ if (*calcMatched) {
+ SkASSERT(*oppMatched == this->matchedDirection(t, sect2, t2));
+ } else {
+ *oppMatched = this->matchedDirection(t, sect2, t2);
+ *calcMatched = true;
+ }
+}
+
+void SkTSect::mergeCoincidence(SkTSect* sect2) {
+ double smallLimit = 0;
+ do {
+ // find the smallest unprocessed span
+ SkTSpan* smaller = nullptr;
+ SkTSpan* test = fCoincident;
+ do {
+ if (!test) {
+ return;
+ }
+ if (test->fStartT < smallLimit) {
+ continue;
+ }
+ if (smaller && smaller->fEndT < test->fStartT) {
+ continue;
+ }
+ smaller = test;
+ } while ((test = test->fNext));
+ if (!smaller) {
+ return;
+ }
+ smallLimit = smaller->fEndT;
+ // find next larger span
+ SkTSpan* prior = nullptr;
+ SkTSpan* larger = nullptr;
+ SkTSpan* largerPrior = nullptr;
+ test = fCoincident;
+ do {
+ if (test->fStartT < smaller->fEndT) {
+ continue;
+ }
+ SkOPASSERT(test->fStartT != smaller->fEndT);
+ if (larger && larger->fStartT < test->fStartT) {
+ continue;
+ }
+ largerPrior = prior;
+ larger = test;
+ } while ((void) (prior = test), (test = test->fNext));
+ if (!larger) {
+ continue;
+ }
+ // check middle t value to see if it is coincident as well
+ double midT = (smaller->fEndT + larger->fStartT) / 2;
+ SkDPoint midPt = fCurve.ptAtT(midT);
+ SkTCoincident coin;
+ coin.setPerp(fCurve, midT, midPt, sect2->fCurve);
+ if (coin.isMatch()) {
+ smaller->fEndT = larger->fEndT;
+ smaller->fCoinEnd = larger->fCoinEnd;
+ if (largerPrior) {
+ largerPrior->fNext = larger->fNext;
+ largerPrior->validate();
+ } else {
+ fCoincident = larger->fNext;
+ }
+ }
+ } while (true);
+}
+
+SkTSpan* SkTSect::prev(
+ SkTSpan* span) const {
+ SkTSpan* result = nullptr;
+ SkTSpan* test = fHead;
+ while (span != test) {
+ result = test;
+ test = test->fNext;
+ SkASSERT(test);
+ }
+ return result;
+}
+
+void SkTSect::recoverCollapsed() {
+ SkTSpan* deleted = fDeleted;
+ while (deleted) {
+ SkTSpan* delNext = deleted->fNext;
+ if (deleted->fCollapsed) {
+ SkTSpan** spanPtr = &fHead;
+ while (*spanPtr && (*spanPtr)->fEndT <= deleted->fStartT) {
+ spanPtr = &(*spanPtr)->fNext;
+ }
+ deleted->fNext = *spanPtr;
+ *spanPtr = deleted;
+ }
+ deleted = delNext;
+ }
+}
+
+void SkTSect::removeAllBut(const SkTSpan* keep,
+ SkTSpan* span, SkTSect* opp) {
+ const SkTSpanBounded* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan* bounded = testBounded->fBounded;
+ const SkTSpanBounded* next = testBounded->fNext;
+ // may have been deleted when opp did 'remove all but'
+ if (bounded != keep && !bounded->fDeleted) {
+ SkAssertResult(SkDEBUGCODE(!) span->removeBounded(bounded));
+ if (bounded->removeBounded(span)) {
+ opp->removeSpan(bounded);
+ }
+ }
+ testBounded = next;
+ }
+ SkASSERT(!span->fDeleted);
+ SkASSERT(span->findOppSpan(keep));
+ SkASSERT(keep->findOppSpan(span));
+}
+
+bool SkTSect::removeByPerpendicular(SkTSect* opp) {
+ SkTSpan* test = fHead;
+ SkTSpan* next;
+ do {
+ next = test->fNext;
+ if (test->fCoinStart.perpT() < 0 || test->fCoinEnd.perpT() < 0) {
+ continue;
+ }
+ SkDVector startV = test->fCoinStart.perpPt() - test->pointFirst();
+ SkDVector endV = test->fCoinEnd.perpPt() - test->pointLast();
+#if DEBUG_T_SECT
+ SkDebugf("%s startV=(%1.9g,%1.9g) endV=(%1.9g,%1.9g) dot=%1.9g\n", __FUNCTION__,
+ startV.fX, startV.fY, endV.fX, endV.fY, startV.dot(endV));
+#endif
+ if (startV.dot(endV) <= 0) {
+ continue;
+ }
+ if (!this->removeSpans(test, opp)) {
+ return false;
+ }
+ } while ((test = next));
+ return true;
+}
+
+bool SkTSect::removeCoincident(SkTSpan* span, bool isBetween) {
+ if (!this->unlinkSpan(span)) {
+ return false;
+ }
+ if (isBetween || between(0, span->fCoinStart.perpT(), 1)) {
+ --fActiveCount;
+ span->fNext = fCoincident;
+ fCoincident = span;
+ } else {
+ this->markSpanGone(span);
+ }
+ return true;
+}
+
+void SkTSect::removedEndCheck(SkTSpan* span) {
+ if (!span->fStartT) {
+ fRemovedStartT = true;
+ }
+ if (1 == span->fEndT) {
+ fRemovedEndT = true;
+ }
+}
+
+bool SkTSect::removeSpan(SkTSpan* span) {\
+ this->removedEndCheck(span);
+ if (!this->unlinkSpan(span)) {
+ return false;
+ }
+ return this->markSpanGone(span);
+}
+
+void SkTSect::removeSpanRange(SkTSpan* first,
+ SkTSpan* last) {
+ if (first == last) {
+ return;
+ }
+ SkTSpan* span = first;
+ SkASSERT(span);
+ SkTSpan* final = last->fNext;
+ SkTSpan* next = span->fNext;
+ while ((span = next) && span != final) {
+ next = span->fNext;
+ this->markSpanGone(span);
+ }
+ if (final) {
+ final->fPrev = first;
+ }
+ first->fNext = final;
+ // world may not be ready for validation here
+ first->validate();
+}
+
+bool SkTSect::removeSpans(SkTSpan* span,
+ SkTSect* opp) {
+ SkTSpanBounded* bounded = span->fBounded;
+ while (bounded) {
+ SkTSpan* spanBounded = bounded->fBounded;
+ SkTSpanBounded* next = bounded->fNext;
+ if (span->removeBounded(spanBounded)) { // shuffles last into position 0
+ this->removeSpan(span);
+ }
+ if (spanBounded->removeBounded(span)) {
+ opp->removeSpan(spanBounded);
+ }
+ if (span->fDeleted && opp->hasBounded(span)) {
+ return false;
+ }
+ bounded = next;
+ }
+ return true;
+}
+
+SkTSpan* SkTSect::spanAtT(double t,
+ SkTSpan** priorSpan) {
+ SkTSpan* test = fHead;
+ SkTSpan* prev = nullptr;
+ while (test && test->fEndT < t) {
+ prev = test;
+ test = test->fNext;
+ }
+ *priorSpan = prev;
+ return test && test->fStartT <= t ? test : nullptr;
+}
+
+SkTSpan* SkTSect::tail() {
+ SkTSpan* result = fHead;
+ SkTSpan* next = fHead;
+ int safetyNet = 100000;
+ while ((next = next->fNext)) {
+ if (!--safetyNet) {
+ return nullptr;
+ }
+ if (next->fEndT > result->fEndT) {
+ result = next;
+ }
+ }
+ return result;
+}
+
+/* Each span has a range of opposite spans it intersects. After the span is split in two,
+ adjust the range to its new size */
+
+bool SkTSect::trim(SkTSpan* span,
+ SkTSect* opp) {
+ FAIL_IF(!span->initBounds(fCurve));
+ const SkTSpanBounded* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan* test = testBounded->fBounded;
+ const SkTSpanBounded* next = testBounded->fNext;
+ int oppSects, sects = this->intersects(span, opp, test, &oppSects);
+ if (sects >= 1) {
+ if (oppSects == 2) {
+ test->initBounds(opp->fCurve);
+ opp->removeAllBut(span, test, this);
+ }
+ if (sects == 2) {
+ span->initBounds(fCurve);
+ this->removeAllBut(test, span, opp);
+ return true;
+ }
+ } else {
+ if (span->removeBounded(test)) {
+ this->removeSpan(span);
+ }
+ if (test->removeBounded(span)) {
+ opp->removeSpan(test);
+ }
+ }
+ testBounded = next;
+ }
+ return true;
+}
+
+bool SkTSect::unlinkSpan(SkTSpan* span) {
+ SkTSpan* prev = span->fPrev;
+ SkTSpan* next = span->fNext;
+ if (prev) {
+ prev->fNext = next;
+ if (next) {
+ next->fPrev = prev;
+ if (next->fStartT > next->fEndT) {
+ return false;
+ }
+ // world may not be ready for validate here
+ next->validate();
+ }
+ } else {
+ fHead = next;
+ if (next) {
+ next->fPrev = nullptr;
+ }
+ }
+ return true;
+}
+
+bool SkTSect::updateBounded(SkTSpan* first,
+ SkTSpan* last, SkTSpan* oppFirst) {
+ SkTSpan* test = first;
+ const SkTSpan* final = last->next();
+ bool deleteSpan = false;
+ do {
+ deleteSpan |= test->removeAllBounded();
+ } while ((test = test->fNext) != final && test);
+ first->fBounded = nullptr;
+ first->addBounded(oppFirst, &fHeap);
+ // cannot call validate until remove span range is called
+ return deleteSpan;
+}
+
+void SkTSect::validate() const {
+#if DEBUG_VALIDATE
+ int count = 0;
+ double last = 0;
+ if (fHead) {
+ const SkTSpan* span = fHead;
+ SkASSERT(!span->fPrev);
+ const SkTSpan* next;
+ do {
+ span->validate();
+ SkASSERT(span->fStartT >= last);
+ last = span->fEndT;
+ ++count;
+ next = span->fNext;
+ SkASSERT(next != span);
+ } while ((span = next) != nullptr);
+ }
+ SkASSERT(count == fActiveCount);
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fActiveCount <= fDebugAllocatedCount);
+ int deletedCount = 0;
+ const SkTSpan* deleted = fDeleted;
+ while (deleted) {
+ ++deletedCount;
+ deleted = deleted->fNext;
+ }
+ const SkTSpan* coincident = fCoincident;
+ while (coincident) {
+ ++deletedCount;
+ coincident = coincident->fNext;
+ }
+ SkASSERT(fActiveCount + deletedCount == fDebugAllocatedCount);
+#endif
+}
+
+void SkTSect::validateBounded() const {
+#if DEBUG_VALIDATE
+ if (!fHead) {
+ return;
+ }
+ const SkTSpan* span = fHead;
+ do {
+ span->validateBounded();
+ } while ((span = span->fNext) != nullptr);
+#endif
+}
+
+int SkTSect::EndsEqual(const SkTSect* sect1,
+ const SkTSect* sect2, SkIntersections* intersections) {
+ int zeroOneSet = 0;
+ if (sect1->fCurve[0] == sect2->fCurve[0]) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insert(0, 0, sect1->fCurve[0]);
+ }
+ if (sect1->fCurve[0] == sect2->pointLast()) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insert(0, 1, sect1->fCurve[0]);
+ }
+ if (sect1->pointLast() == sect2->fCurve[0]) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insert(1, 0, sect1->pointLast());
+ }
+ if (sect1->pointLast() == sect2->pointLast()) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insert(1, 1, sect1->pointLast());
+ }
+ // check for zero
+ if (!(zeroOneSet & (kZeroS1Set | kZeroS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insertNear(0, 0, sect1->fCurve[0], sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kZeroS1Set | kOneS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->pointLast())) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insertNear(0, 1, sect1->fCurve[0], sect2->pointLast());
+ }
+ // check for one
+ if (!(zeroOneSet & (kOneS1Set | kZeroS2Set))
+ && sect1->pointLast().approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insertNear(1, 0, sect1->pointLast(), sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kOneS1Set | kOneS2Set))
+ && sect1->pointLast().approximatelyEqual(sect2->pointLast())) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insertNear(1, 1, sect1->pointLast(), sect2->pointLast());
+ }
+ return zeroOneSet;
+}
+
+struct SkClosestRecord {
+ bool operator<(const SkClosestRecord& rh) const {
+ return fClosest < rh.fClosest;
+ }
+
+ void addIntersection(SkIntersections* intersections) const {
+ double r1t = fC1Index ? fC1Span->endT() : fC1Span->startT();
+ double r2t = fC2Index ? fC2Span->endT() : fC2Span->startT();
+ intersections->insert(r1t, r2t, fC1Span->part()[fC1Index]);
+ }
+
+ void findEnd(const SkTSpan* span1, const SkTSpan* span2,
+ int c1Index, int c2Index) {
+ const SkTCurve& c1 = span1->part();
+ const SkTCurve& c2 = span2->part();
+ if (!c1[c1Index].approximatelyEqual(c2[c2Index])) {
+ return;
+ }
+ double dist = c1[c1Index].distanceSquared(c2[c2Index]);
+ if (fClosest < dist) {
+ return;
+ }
+ fC1Span = span1;
+ fC2Span = span2;
+ fC1StartT = span1->startT();
+ fC1EndT = span1->endT();
+ fC2StartT = span2->startT();
+ fC2EndT = span2->endT();
+ fC1Index = c1Index;
+ fC2Index = c2Index;
+ fClosest = dist;
+ }
+
+ bool matesWith(const SkClosestRecord& mate SkDEBUGPARAMS(SkIntersections* i)) const {
+ SkOPOBJASSERT(i, fC1Span == mate.fC1Span || fC1Span->endT() <= mate.fC1Span->startT()
+ || mate.fC1Span->endT() <= fC1Span->startT());
+ SkOPOBJASSERT(i, fC2Span == mate.fC2Span || fC2Span->endT() <= mate.fC2Span->startT()
+ || mate.fC2Span->endT() <= fC2Span->startT());
+ return fC1Span == mate.fC1Span || fC1Span->endT() == mate.fC1Span->startT()
+ || fC1Span->startT() == mate.fC1Span->endT()
+ || fC2Span == mate.fC2Span
+ || fC2Span->endT() == mate.fC2Span->startT()
+ || fC2Span->startT() == mate.fC2Span->endT();
+ }
+
+ void merge(const SkClosestRecord& mate) {
+ fC1Span = mate.fC1Span;
+ fC2Span = mate.fC2Span;
+ fClosest = mate.fClosest;
+ fC1Index = mate.fC1Index;
+ fC2Index = mate.fC2Index;
+ }
+
+ void reset() {
+ fClosest = FLT_MAX;
+ SkDEBUGCODE(fC1Span = nullptr);
+ SkDEBUGCODE(fC2Span = nullptr);
+ SkDEBUGCODE(fC1Index = fC2Index = -1);
+ }
+
+ void update(const SkClosestRecord& mate) {
+ fC1StartT = SkTMin(fC1StartT, mate.fC1StartT);
+ fC1EndT = SkTMax(fC1EndT, mate.fC1EndT);
+ fC2StartT = SkTMin(fC2StartT, mate.fC2StartT);
+ fC2EndT = SkTMax(fC2EndT, mate.fC2EndT);
+ }
+
+ const SkTSpan* fC1Span;
+ const SkTSpan* fC2Span;
+ double fC1StartT;
+ double fC1EndT;
+ double fC2StartT;
+ double fC2EndT;
+ double fClosest;
+ int fC1Index;
+ int fC2Index;
+};
+
+struct SkClosestSect {
+ SkClosestSect()
+ : fUsed(0) {
+ fClosest.push_back().reset();
+ }
+
+ bool find(const SkTSpan* span1, const SkTSpan* span2
+ SkDEBUGPARAMS(SkIntersections* i)) {
+ SkClosestRecord* record = &fClosest[fUsed];
+ record->findEnd(span1, span2, 0, 0);
+ record->findEnd(span1, span2, 0, span2->part().pointLast());
+ record->findEnd(span1, span2, span1->part().pointLast(), 0);
+ record->findEnd(span1, span2, span1->part().pointLast(), span2->part().pointLast());
+ if (record->fClosest == FLT_MAX) {
+ return false;
+ }
+ for (int index = 0; index < fUsed; ++index) {
+ SkClosestRecord* test = &fClosest[index];
+ if (test->matesWith(*record SkDEBUGPARAMS(i))) {
+ if (test->fClosest > record->fClosest) {
+ test->merge(*record);
+ }
+ test->update(*record);
+ record->reset();
+ return false;
+ }
+ }
+ ++fUsed;
+ fClosest.push_back().reset();
+ return true;
+ }
+
+ void finish(SkIntersections* intersections) const {
+ SkSTArray<SkDCubic::kMaxIntersections * 3,
+ const SkClosestRecord*, true> closestPtrs;
+ for (int index = 0; index < fUsed; ++index) {
+ closestPtrs.push_back(&fClosest[index]);
+ }
+ SkTQSort<const SkClosestRecord >(closestPtrs.begin(), closestPtrs.end()
+ - 1);
+ for (int index = 0; index < fUsed; ++index) {
+ const SkClosestRecord* test = closestPtrs[index];
+ test->addIntersection(intersections);
+ }
+ }
+
+ // this is oversized so that an extra records can merge into final one
+ SkSTArray<SkDCubic::kMaxIntersections * 2, SkClosestRecord, true> fClosest;
+ int fUsed;
+};
+
+// returns true if the rect is too small to consider
+
+void SkTSect::BinarySearch(SkTSect* sect1,
+ SkTSect* sect2, SkIntersections* intersections) {
+#if DEBUG_T_SECT_DUMP > 1
+ gDumpTSectNum = 0;
+#endif
+ SkDEBUGCODE(sect1->fOppSect = sect2);
+ SkDEBUGCODE(sect2->fOppSect = sect1);
+ intersections->reset();
+ intersections->setMax(sect1->fCurve.maxIntersections() + 4); // give extra for slop
+ SkTSpan* span1 = sect1->fHead;
+ SkTSpan* span2 = sect2->fHead;
+ int oppSect, sect = sect1->intersects(span1, sect2, span2, &oppSect);
+// SkASSERT(between(0, sect, 2));
+ if (!sect) {
+ return;
+ }
+ if (sect == 2 && oppSect == 2) {
+ (void) EndsEqual(sect1, sect2, intersections);
+ return;
+ }
+ span1->addBounded(span2, &sect1->fHeap);
+ span2->addBounded(span1, &sect2->fHeap);
+ const int kMaxCoinLoopCount = 8;
+ int coinLoopCount = kMaxCoinLoopCount;
+ double start1s SK_INIT_TO_AVOID_WARNING;
+ double start1e SK_INIT_TO_AVOID_WARNING;
+ do {
+ // find the largest bounds
+ SkTSpan* largest1 = sect1->boundsMax();
+ if (!largest1) {
+ if (sect1->fHung) {
+ return;
+ }
+ break;
+ }
+ SkTSpan* largest2 = sect2->boundsMax();
+ // split it
+ if (!largest2 || (largest1 && (largest1->fBoundsMax > largest2->fBoundsMax
+ || (!largest1->fCollapsed && largest2->fCollapsed)))) {
+ if (sect2->fHung) {
+ return;
+ }
+ if (largest1->fCollapsed) {
+ break;
+ }
+ sect1->resetRemovedEnds();
+ sect2->resetRemovedEnds();
+ // trim parts that don't intersect the opposite
+ SkTSpan* half1 = sect1->addOne();
+ SkDEBUGCODE(half1->debugSetGlobalState(sect1->globalState()));
+ if (!half1->split(largest1, &sect1->fHeap)) {
+ break;
+ }
+ if (!sect1->trim(largest1, sect2)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ if (!sect1->trim(half1, sect2)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ } else {
+ if (largest2->fCollapsed) {
+ break;
+ }
+ sect1->resetRemovedEnds();
+ sect2->resetRemovedEnds();
+ // trim parts that don't intersect the opposite
+ SkTSpan* half2 = sect2->addOne();
+ SkDEBUGCODE(half2->debugSetGlobalState(sect2->globalState()));
+ if (!half2->split(largest2, &sect2->fHeap)) {
+ break;
+ }
+ if (!sect2->trim(largest2, sect1)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ if (!sect2->trim(half2, sect1)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kIterations_DebugLoop);
+#endif
+ // if there are 9 or more continuous spans on both sects, suspect coincidence
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ if (coinLoopCount == kMaxCoinLoopCount) {
+ start1s = sect1->fHead->fStartT;
+ start1e = sect1->tail()->fEndT;
+ }
+ if (!sect1->coincidentCheck(sect2)) {
+ return;
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kCoinCheck_DebugLoop);
+#endif
+ if (!--coinLoopCount && sect1->fHead && sect2->fHead) {
+ /* All known working cases resolve in two tries. Sadly, cubicConicTests[0]
+ gets stuck in a loop. It adds an extension to allow a coincident end
+ perpendicular to track its intersection in the opposite curve. However,
+ the bounding box of the extension does not intersect the original curve,
+ so the extension is discarded, only to be added again the next time around. */
+ sect1->coincidentForce(sect2, start1s, start1e);
+ sect1->validate();
+ sect2->validate();
+ }
+ }
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ if (!sect1->fHead) {
+ return;
+ }
+ sect1->computePerpendiculars(sect2, sect1->fHead, sect1->tail());
+ if (!sect2->fHead) {
+ return;
+ }
+ sect2->computePerpendiculars(sect1, sect2->fHead, sect2->tail());
+ if (!sect1->removeByPerpendicular(sect2)) {
+ return;
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kComputePerp_DebugLoop);
+#endif
+ if (sect1->collapsed() > sect1->fCurve.maxIntersections()) {
+ break;
+ }
+ }
+#if DEBUG_T_SECT_DUMP
+ sect1->dumpBoth(sect2);
+#endif
+ if (!sect1->fHead || !sect2->fHead) {
+ break;
+ }
+ } while (true);
+ SkTSpan* coincident = sect1->fCoincident;
+ if (coincident) {
+ // if there is more than one coincident span, check loosely to see if they should be joined
+ if (coincident->fNext) {
+ sect1->mergeCoincidence(sect2);
+ coincident = sect1->fCoincident;
+ }
+ SkASSERT(sect2->fCoincident); // courtesy check : coincidence only looks at sect 1
+ do {
+ if (!coincident) {
+ return;
+ }
+ if (!coincident->fCoinStart.isMatch()) {
+ continue;
+ }
+ if (!coincident->fCoinEnd.isMatch()) {
+ continue;
+ }
+ double perpT = coincident->fCoinStart.perpT();
+ if (perpT < 0) {
+ return;
+ }
+ int index = intersections->insertCoincident(coincident->fStartT,
+ perpT, coincident->pointFirst());
+ if ((intersections->insertCoincident(coincident->fEndT,
+ coincident->fCoinEnd.perpT(),
+ coincident->pointLast()) < 0) && index >= 0) {
+ intersections->clearCoincidence(index);
+ }
+ } while ((coincident = coincident->fNext));
+ }
+ int zeroOneSet = EndsEqual(sect1, sect2, intersections);
+// if (!sect1->fHead || !sect2->fHead) {
+ // if the final iteration contains an end (0 or 1),
+ if (sect1->fRemovedStartT && !(zeroOneSet & kZeroS1Set)) {
+ SkTCoincident perp; // intersect perpendicular with opposite curve
+ perp.setPerp(sect1->fCurve, 0, sect1->fCurve[0], sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(0, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect1->fRemovedEndT && !(zeroOneSet & kOneS1Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect1->fCurve, 1, sect1->pointLast(), sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(1, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedStartT && !(zeroOneSet & kZeroS2Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect2->fCurve, 0, sect2->fCurve[0], sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 0, perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedEndT && !(zeroOneSet & kOneS2Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect2->fCurve, 1, sect2->pointLast(), sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 1, perp.perpPt());
+ }
+ }
+// }
+ if (!sect1->fHead || !sect2->fHead) {
+ return;
+ }
+ sect1->recoverCollapsed();
+ sect2->recoverCollapsed();
+ SkTSpan* result1 = sect1->fHead;
+ // check heads and tails for zero and ones and insert them if we haven't already done so
+ const SkTSpan* head1 = result1;
+ if (!(zeroOneSet & kZeroS1Set) && approximately_less_than_zero(head1->fStartT)) {
+ const SkDPoint& start1 = sect1->fCurve[0];
+ if (head1->isBounded()) {
+ double t = head1->closestBoundedT(start1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(start1)) {
+ intersections->insert(0, t, start1);
+ }
+ }
+ }
+ const SkTSpan* head2 = sect2->fHead;
+ if (!(zeroOneSet & kZeroS2Set) && approximately_less_than_zero(head2->fStartT)) {
+ const SkDPoint& start2 = sect2->fCurve[0];
+ if (head2->isBounded()) {
+ double t = head2->closestBoundedT(start2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(start2)) {
+ intersections->insert(t, 0, start2);
+ }
+ }
+ }
+ if (!(zeroOneSet & kOneS1Set)) {
+ const SkTSpan* tail1 = sect1->tail();
+ if (!tail1) {
+ return;
+ }
+ if (approximately_greater_than_one(tail1->fEndT)) {
+ const SkDPoint& end1 = sect1->pointLast();
+ if (tail1->isBounded()) {
+ double t = tail1->closestBoundedT(end1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(end1)) {
+ intersections->insert(1, t, end1);
+ }
+ }
+ }
+ }
+ if (!(zeroOneSet & kOneS2Set)) {
+ const SkTSpan* tail2 = sect2->tail();
+ if (!tail2) {
+ return;
+ }
+ if (approximately_greater_than_one(tail2->fEndT)) {
+ const SkDPoint& end2 = sect2->pointLast();
+ if (tail2->isBounded()) {
+ double t = tail2->closestBoundedT(end2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(end2)) {
+ intersections->insert(t, 1, end2);
+ }
+ }
+ }
+ }
+ SkClosestSect closest;
+ do {
+ while (result1 && result1->fCoinStart.isMatch() && result1->fCoinEnd.isMatch()) {
+ result1 = result1->fNext;
+ }
+ if (!result1) {
+ break;
+ }
+ SkTSpan* result2 = sect2->fHead;
+ bool found = false;
+ while (result2) {
+ found |= closest.find(result1, result2 SkDEBUGPARAMS(intersections));
+ result2 = result2->fNext;
+ }
+ } while ((result1 = result1->fNext));
+ closest.finish(intersections);
+ // if there is more than one intersection and it isn't already coincident, check
+ int last = intersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ if (intersections->isCoincident(index) && intersections->isCoincident(index + 1)) {
+ ++index;
+ continue;
+ }
+ double midT = ((*intersections)[0][index] + (*intersections)[0][index + 1]) / 2;
+ SkDPoint midPt = sect1->fCurve.ptAtT(midT);
+ // intersect perpendicular with opposite curve
+ SkTCoincident perp;
+ perp.setPerp(sect1->fCurve, midT, midPt, sect2->fCurve);
+ if (!perp.isMatch()) {
+ ++index;
+ continue;
+ }
+ if (intersections->isCoincident(index)) {
+ intersections->removeOne(index);
+ --last;
+ } else if (intersections->isCoincident(index + 1)) {
+ intersections->removeOne(index + 1);
+ --last;
+ } else {
+ intersections->setCoincident(index++);
+ }
+ intersections->setCoincident(index);
+ }
+ SkOPOBJASSERT(intersections, intersections->used() <= sect1->fCurve.maxIntersections());
+}
+
+int SkIntersections::intersect(const SkDQuad& q1, const SkDQuad& q2) {
+ SkTQuad quad1(q1);
+ SkTQuad quad2(q2);
+ SkTSect sect1(quad1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& c, const SkDQuad& q) {
+ SkTConic conic(c);
+ SkTQuad quad(q);
+ SkTSect sect1(conic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& c1, const SkDConic& c2) {
+ SkTConic conic1(c1);
+ SkTConic conic2(c2);
+ SkTSect sect1(conic1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(conic2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& c, const SkDQuad& q) {
+ SkTCubic cubic(c);
+ SkTQuad quad(q);
+ SkTSect sect1(cubic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& cu, const SkDConic& co) {
+ SkTCubic cubic(cu);
+ SkTConic conic(co);
+ SkTSect sect1(cubic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(conic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+
+}
+
+int SkIntersections::intersect(const SkDCubic& c1, const SkDCubic& c2) {
+ SkTCubic cubic1(c1);
+ SkTCubic cubic2(c2);
+ SkTSect sect1(cubic1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(cubic2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.h b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
new file mode 100644
index 0000000000..ddd5acdf06
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTSect_DEFINED
+#define SkPathOpsTSect_DEFINED
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+#include <utility>
+
+#ifdef SK_DEBUG
+typedef uint8_t SkOpDebugBool;
+#else
+typedef bool SkOpDebugBool;
+#endif
+
+#define SkDoubleIsNaN sk_double_isnan
+
+class SkTCoincident {
+public:
+ SkTCoincident() {
+ this->init();
+ }
+
+ void debugInit() {
+#ifdef SK_DEBUG
+ this->fPerpPt.fX = this->fPerpPt.fY = SK_ScalarNaN;
+ this->fPerpT = SK_ScalarNaN;
+ this->fMatch = 0xFF;
+#endif
+ }
+
+ char dumpIsCoincidentStr() const;
+ void dump() const;
+
+ bool isMatch() const {
+ SkASSERT(!!fMatch == fMatch);
+ return SkToBool(fMatch);
+ }
+
+ void init() {
+ fPerpT = -1;
+ fMatch = false;
+ fPerpPt.fX = fPerpPt.fY = SK_ScalarNaN;
+ }
+
+ void markCoincident() {
+ if (!fMatch) {
+ fPerpT = -1;
+ }
+ fMatch = true;
+ }
+
+ const SkDPoint& perpPt() const {
+ return fPerpPt;
+ }
+
+ double perpT() const {
+ return fPerpT;
+ }
+
+ void setPerp(const SkTCurve& c1, double t, const SkDPoint& cPt, const SkTCurve& );
+
+private:
+ SkDPoint fPerpPt;
+ double fPerpT; // perpendicular intersection on opposite curve
+ SkOpDebugBool fMatch;
+};
+
+class SkTSect;
+class SkTSpan;
+
+struct SkTSpanBounded {
+ SkTSpan* fBounded;
+ SkTSpanBounded* fNext;
+};
+
+class SkTSpan {
+public:
+ SkTSpan(const SkTCurve& curve, SkArenaAlloc& heap) {
+ fPart = curve.make(heap);
+ }
+
+ void addBounded(SkTSpan* , SkArenaAlloc* );
+ double closestBoundedT(const SkDPoint& pt) const;
+ bool contains(double t) const;
+
+ void debugInit(const SkTCurve& curve, SkArenaAlloc& heap) {
+#ifdef SK_DEBUG
+ SkTCurve* dummy = curve.make(heap);
+ dummy->debugInit();
+ init(*dummy);
+ initBounds(*dummy);
+ fCoinStart.init();
+ fCoinEnd.init();
+#endif
+ }
+
+ const SkTSect* debugOpp() const;
+
+#ifdef SK_DEBUG
+ void debugSetGlobalState(SkOpGlobalState* state) {
+ fDebugGlobalState = state;
+ }
+
+ const SkTSpan* debugSpan(int ) const;
+ const SkTSpan* debugT(double t) const;
+ bool debugIsBefore(const SkTSpan* span) const;
+#endif
+ void dump() const;
+ void dumpAll() const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+
+ double endT() const {
+ return fEndT;
+ }
+
+ SkTSpan* findOppSpan(const SkTSpan* opp) const;
+
+ SkTSpan* findOppT(double t) const {
+ SkTSpan* result = oppT(t);
+ SkOPASSERT(result);
+ return result;
+ }
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() const { return fDebugGlobalState; })
+
+ bool hasOppT(double t) const {
+ return SkToBool(oppT(t));
+ }
+
+ int hullsIntersect(SkTSpan* span, bool* start, bool* oppStart);
+ void init(const SkTCurve& );
+ bool initBounds(const SkTCurve& );
+
+ bool isBounded() const {
+ return fBounded != nullptr;
+ }
+
+ bool linearsIntersect(SkTSpan* span);
+ double linearT(const SkDPoint& ) const;
+
+ void markCoincident() {
+ fCoinStart.markCoincident();
+ fCoinEnd.markCoincident();
+ }
+
+ const SkTSpan* next() const {
+ return fNext;
+ }
+
+ bool onlyEndPointsInCommon(const SkTSpan* opp, bool* start,
+ bool* oppStart, bool* ptsInCommon);
+
+ const SkTCurve& part() const {
+ return *fPart;
+ }
+
+ int pointCount() const {
+ return fPart->pointCount();
+ }
+
+ const SkDPoint& pointFirst() const {
+ return (*fPart)[0];
+ }
+
+ const SkDPoint& pointLast() const {
+ return (*fPart)[fPart->pointLast()];
+ }
+
+ bool removeAllBounded();
+ bool removeBounded(const SkTSpan* opp);
+
+ void reset() {
+ fBounded = nullptr;
+ }
+
+ void resetBounds(const SkTCurve& curve) {
+ fIsLinear = fIsLine = false;
+ initBounds(curve);
+ }
+
+ bool split(SkTSpan* work, SkArenaAlloc* heap) {
+ return splitAt(work, (work->fStartT + work->fEndT) * 0.5, heap);
+ }
+
+ bool splitAt(SkTSpan* work, double t, SkArenaAlloc* heap);
+
+ double startT() const {
+ return fStartT;
+ }
+
+private:
+
+ // implementation is for testing only
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ void dumpID() const;
+
+ int hullCheck(const SkTSpan* opp, bool* start, bool* oppStart);
+ int linearIntersects(const SkTCurve& ) const;
+ SkTSpan* oppT(double t) const;
+
+ void validate() const;
+ void validateBounded() const;
+ void validatePerpT(double oppT) const;
+ void validatePerpPt(double t, const SkDPoint& ) const;
+
+ SkTCurve* fPart;
+ SkTCoincident fCoinStart;
+ SkTCoincident fCoinEnd;
+ SkTSpanBounded* fBounded;
+ SkTSpan* fPrev;
+ SkTSpan* fNext;
+ SkDRect fBounds;
+ double fStartT;
+ double fEndT;
+ double fBoundsMax;
+ SkOpDebugBool fCollapsed;
+ SkOpDebugBool fHasPerp;
+ SkOpDebugBool fIsLinear;
+ SkOpDebugBool fIsLine;
+ SkOpDebugBool fDeleted;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect* fDebugSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ friend class SkTSect;
+};
+
+class SkTSect {
+public:
+ SkTSect(const SkTCurve& c
+ SkDEBUGPARAMS(SkOpGlobalState* ) PATH_OPS_DEBUG_T_SECT_PARAMS(int id));
+ static void BinarySearch(SkTSect* sect1, SkTSect* sect2,
+ SkIntersections* intersections);
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fDebugGlobalState; })
+ bool hasBounded(const SkTSpan* ) const;
+
+ const SkTSect* debugOpp() const {
+ return SkDEBUGRELEASE(fOppSect, nullptr);
+ }
+
+#ifdef SK_DEBUG
+ const SkTSpan* debugSpan(int id) const;
+ const SkTSpan* debugT(double t) const;
+#endif
+ void dump() const;
+ void dumpBoth(SkTSect* ) const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+ void dumpCoinCurves() const;
+ void dumpCurves() const;
+
+private:
+ enum {
+ kZeroS1Set = 1,
+ kOneS1Set = 2,
+ kZeroS2Set = 4,
+ kOneS2Set = 8
+ };
+
+ SkTSpan* addFollowing(SkTSpan* prior);
+ void addForPerp(SkTSpan* span, double t);
+ SkTSpan* addOne();
+
+ SkTSpan* addSplitAt(SkTSpan* span, double t) {
+ SkTSpan* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->splitAt(span, t, &fHeap);
+ result->initBounds(fCurve);
+ span->initBounds(fCurve);
+ return result;
+ }
+
+ bool binarySearchCoin(SkTSect* , double tStart, double tStep, double* t,
+ double* oppT, SkTSpan** oppFirst);
+ SkTSpan* boundsMax();
+ bool coincidentCheck(SkTSect* sect2);
+ void coincidentForce(SkTSect* sect2, double start1s, double start1e);
+ bool coincidentHasT(double t);
+ int collapsed() const;
+ void computePerpendiculars(SkTSect* sect2, SkTSpan* first,
+ SkTSpan* last);
+ int countConsecutiveSpans(SkTSpan* first,
+ SkTSpan** last) const;
+
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ bool deleteEmptySpans();
+ void dumpCommon(const SkTSpan* ) const;
+ void dumpCommonCurves(const SkTSpan* ) const;
+ static int EndsEqual(const SkTSect* sect1, const SkTSect* sect2,
+ SkIntersections* );
+ bool extractCoincident(SkTSect* sect2, SkTSpan* first,
+ SkTSpan* last, SkTSpan** result);
+ SkTSpan* findCoincidentRun(SkTSpan* first, SkTSpan** lastPtr);
+ int intersects(SkTSpan* span, SkTSect* opp,
+ SkTSpan* oppSpan, int* oppResult);
+ bool isParallel(const SkDLine& thisLine, const SkTSect* opp) const;
+ int linesIntersect(SkTSpan* span, SkTSect* opp,
+ SkTSpan* oppSpan, SkIntersections* );
+ bool markSpanGone(SkTSpan* span);
+ bool matchedDirection(double t, const SkTSect* sect2, double t2) const;
+ void matchedDirCheck(double t, const SkTSect* sect2, double t2,
+ bool* calcMatched, bool* oppMatched) const;
+ void mergeCoincidence(SkTSect* sect2);
+
+ const SkDPoint& pointLast() const {
+ return fCurve[fCurve.pointLast()];
+ }
+
+ SkTSpan* prev(SkTSpan* ) const;
+ bool removeByPerpendicular(SkTSect* opp);
+ void recoverCollapsed();
+ bool removeCoincident(SkTSpan* span, bool isBetween);
+ void removeAllBut(const SkTSpan* keep, SkTSpan* span,
+ SkTSect* opp);
+ bool removeSpan(SkTSpan* span);
+ void removeSpanRange(SkTSpan* first, SkTSpan* last);
+ bool removeSpans(SkTSpan* span, SkTSect* opp);
+ void removedEndCheck(SkTSpan* span);
+
+ void resetRemovedEnds() {
+ fRemovedStartT = fRemovedEndT = false;
+ }
+
+ SkTSpan* spanAtT(double t, SkTSpan** priorSpan);
+ SkTSpan* tail();
+ bool trim(SkTSpan* span, SkTSect* opp);
+ bool unlinkSpan(SkTSpan* span);
+ bool updateBounded(SkTSpan* first, SkTSpan* last,
+ SkTSpan* oppFirst);
+ void validate() const;
+ void validateBounded() const;
+
+ const SkTCurve& fCurve;
+ SkSTArenaAlloc<1024> fHeap;
+ SkTSpan* fHead;
+ SkTSpan* fCoincident;
+ SkTSpan* fDeleted;
+ int fActiveCount;
+ bool fRemovedStartT;
+ bool fRemovedEndT;
+ bool fHung;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect* fOppSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fDebugCount);
+#if DEBUG_T_SECT
+ int fDebugAllocatedCount;
+#endif
+ friend class SkTSpan;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
new file mode 100644
index 0000000000..ff1d177dc5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsCommon.h"
+
+bool TightBounds(const SkPath& path, SkRect* result) {
+ SkPath::RawIter iter(path);
+ SkRect moveBounds = { SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin };
+ bool wellBehaved = true;
+ SkPath::Verb verb;
+ do {
+ SkPoint pts[4];
+ verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ moveBounds.fLeft = SkTMin(moveBounds.fLeft, pts[0].fX);
+ moveBounds.fTop = SkTMin(moveBounds.fTop, pts[0].fY);
+ moveBounds.fRight = SkTMax(moveBounds.fRight, pts[0].fX);
+ moveBounds.fBottom = SkTMax(moveBounds.fBottom, pts[0].fY);
+ break;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[2].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[2].fY);
+ break;
+ case SkPath::kCubic_Verb:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[3].fY);
+ wellBehaved &= between(pts[0].fX, pts[2].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[2].fY, pts[3].fY);
+ break;
+ default:
+ break;
+ }
+ } while (verb != SkPath::kDone_Verb);
+ if (wellBehaved) {
+ *result = path.getBounds();
+ return true;
+ }
+ SkSTArenaAlloc<4096> allocator; // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ // turn path into list of segments
+ SkOpEdgeBuilder builder(path, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+ if (!SortContourList(&contourList, false, false)) {
+ *result = moveBounds;
+ return true;
+ }
+ SkOpContour* current = contourList;
+ SkPathOpsBounds bounds = current->bounds();
+ while ((current = current->next())) {
+ bounds.add(current->bounds());
+ }
+ *result = bounds;
+ if (!moveBounds.isEmpty()) {
+ result->join(moveBounds);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
new file mode 100644
index 0000000000..92c3a7ae87
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/private/SkFloatBits.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+static bool arguments_denormalized(float a, float b, int epsilon) {
+ float denormalizedCheck = FLT_EPSILON * epsilon / 2;
+ return fabsf(a) <= denormalizedCheck && fabsf(b) <= denormalizedCheck;
+}
+
+// from http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+// FIXME: move to SkFloatBits.h
+static bool equal_ulps(float a, float b, int epsilon, int depsilon) {
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_no_normal_check(float a, float b, int epsilon, int depsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_pin(float a, float b, int epsilon, int depsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool d_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool not_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool not_equal_ulps_pin(float a, float b, int epsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool d_not_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool less_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a <= b - FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits <= bBits - epsilon;
+}
+
+static bool less_or_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a < b + FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon;
+}
+
+// equality using the same error term as between
+bool AlmostBequalUlps(float a, float b) {
+ const int UlpsEpsilon = 2;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostPequalUlps(float a, float b) {
+ const int UlpsEpsilon = 8;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(double a, double b) {
+ if (fabs(a) < SK_ScalarMax && fabs(b) < SK_ScalarMax) {
+ return AlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+ }
+ return fabs(a - b) / SkTMax(fabs(a), fabs(b)) < FLT_EPSILON * 16;
+}
+
+bool AlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_no_normal_check(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_pin(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps_pin(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool RoughlyEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 256;
+ const int DUlpsEpsilon = 1024;
+ return equal_ulps(a, b, UlpsEpsilon, DUlpsEpsilon);
+}
+
+bool AlmostBetweenUlps(float a, float b, float c) {
+ const int UlpsEpsilon = 2;
+ return a <= c ? less_or_equal_ulps(a, b, UlpsEpsilon) && less_or_equal_ulps(b, c, UlpsEpsilon)
+ : less_or_equal_ulps(b, a, UlpsEpsilon) && less_or_equal_ulps(c, b, UlpsEpsilon);
+}
+
+bool AlmostLessUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostLessOrEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_or_equal_ulps(a, b, UlpsEpsilon);
+}
+
+int UlpsDistance(float a, float b) {
+ SkFloatIntUnion floatIntA, floatIntB;
+ floatIntA.fFloat = a;
+ floatIntB.fFloat = b;
+ // Different signs means they do not match.
+ if ((floatIntA.fSignBitInt < 0) != (floatIntB.fSignBitInt < 0)) {
+ // Check for equality to make sure +0 == -0
+ return a == b ? 0 : SK_MaxS32;
+ }
+ // Find the difference in ULPs.
+ return SkTAbs(floatIntA.fSignBitInt - floatIntB.fSignBitInt);
+}
+
+// cube root approximation using bit hack for 64-bit float
+// adapted from Kahan's cbrt
+static double cbrt_5d(double d) {
+ const unsigned int B1 = 715094163;
+ double t = 0.0;
+ unsigned int* pt = (unsigned int*) &t;
+ unsigned int* px = (unsigned int*) &d;
+ pt[1] = px[1] / 3 + B1;
+ return t;
+}
+
+// iterative cube root approximation using Halley's method (double)
+static double cbrta_halleyd(const double a, const double R) {
+ const double a3 = a * a * a;
+ const double b = a * (a3 + R + R) / (a3 + a3 + R);
+ return b;
+}
+
+// cube root approximation using 3 iterations of Halley's method (double)
+static double halley_cbrt3d(double d) {
+ double a = cbrt_5d(d);
+ a = cbrta_halleyd(a, d);
+ a = cbrta_halleyd(a, d);
+ return cbrta_halleyd(a, d);
+}
+
+double SkDCubeRoot(double x) {
+ if (approximately_zero_cubed(x)) {
+ return 0;
+ }
+ double result = halley_cbrt3d(fabs(x));
+ if (x < 0) {
+ result = -result;
+ }
+ return result;
+}
+
+SkOpGlobalState::SkOpGlobalState(SkOpContourHead* head,
+ SkArenaAlloc* allocator
+ SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName))
+ : fAllocator(allocator)
+ , fCoincidence(nullptr)
+ , fContourHead(head)
+ , fNested(0)
+ , fWindingFailed(false)
+ , fPhase(SkOpPhase::kIntersecting)
+ SkDEBUGPARAMS(fDebugTestName(testName))
+ SkDEBUGPARAMS(fAngleID(0))
+ SkDEBUGPARAMS(fCoinID(0))
+ SkDEBUGPARAMS(fContourID(0))
+ SkDEBUGPARAMS(fPtTID(0))
+ SkDEBUGPARAMS(fSegmentID(0))
+ SkDEBUGPARAMS(fSpanID(0))
+ SkDEBUGPARAMS(fDebugSkipAssert(debugSkipAssert)) {
+#if DEBUG_T_SECT_LOOP_COUNT
+ debugResetLoopCounts();
+#endif
+#if DEBUG_COIN
+ fPreviousFuncName = nullptr;
+#endif
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.h b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
new file mode 100644
index 0000000000..0edd1ba9e7
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTypes_DEFINED
+#define SkPathOpsTypes_DEFINED
+
+#include <float.h> // for FLT_EPSILON
+
+#include "include/core/SkPath.h"
+#include "include/core/SkScalar.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/SkFloatingPoint.h"
+#include "include/private/SkSafe_math.h"
+#include "src/pathops/SkPathOpsDebug.h"
+
+enum SkPathOpsMask {
+ kWinding_PathOpsMask = -1,
+ kNo_PathOpsMask = 0,
+ kEvenOdd_PathOpsMask = 1
+};
+
+class SkArenaAlloc;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpContourHead;
+class SkIntersections;
+class SkIntersectionHelper;
+
+enum class SkOpPhase : char {
+ kNoChange,
+ kIntersecting,
+ kWalking,
+ kFixWinding,
+};
+
+class SkOpGlobalState {
+public:
+ SkOpGlobalState(SkOpContourHead* head,
+ SkArenaAlloc* allocator SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName));
+
+ enum {
+ kMaxWindingTries = 10
+ };
+
+ bool allocatedOpSpan() const {
+ return fAllocatedOpSpan;
+ }
+
+ SkArenaAlloc* allocator() {
+ return fAllocator;
+ }
+
+ void bumpNested() {
+ ++fNested;
+ }
+
+ void clearNested() {
+ fNested = 0;
+ }
+
+ SkOpCoincidence* coincidence() {
+ return fCoincidence;
+ }
+
+ SkOpContourHead* contourHead() {
+ return fContourHead;
+ }
+
+#ifdef SK_DEBUG
+ const class SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+ const class SkOpPtT* debugPtT(int id) const;
+#endif
+
+ static bool DebugRunFail();
+
+#ifdef SK_DEBUG
+ const class SkOpSegment* debugSegment(int id) const;
+ bool debugSkipAssert() const { return fDebugSkipAssert; }
+ const class SkOpSpanBase* debugSpan(int id) const;
+ const char* debugTestName() const { return fDebugTestName; }
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+ void debugAddLoopCount(SkIntersections* , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& );
+ void debugDoYourWorst(SkOpGlobalState* );
+ void debugLoopReport();
+ void debugResetLoopCounts();
+#endif
+
+#if DEBUG_COINCIDENCE
+ void debugSetCheckHealth(bool check) { fDebugCheckHealth = check; }
+ bool debugCheckHealth() const { return fDebugCheckHealth; }
+#endif
+
+#if DEBUG_VALIDATE || DEBUG_COIN
+ void debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const;
+#endif
+
+#if DEBUG_COIN
+ void debugAddToCoinChangedDict();
+ void debugAddToGlobalCoinDicts();
+ SkPathOpsDebug::CoinDict* debugCoinChangedDict() { return &fCoinChangedDict; }
+ const SkPathOpsDebug::CoinDictEntry& debugCoinDictEntry() const { return fCoinDictEntry; }
+
+ static void DumpCoinDict();
+#endif
+
+
+ int nested() const {
+ return fNested;
+ }
+
+#ifdef SK_DEBUG
+ int nextAngleID() {
+ return ++fAngleID;
+ }
+
+ int nextCoinID() {
+ return ++fCoinID;
+ }
+
+ int nextContourID() {
+ return ++fContourID;
+ }
+
+ int nextPtTID() {
+ return ++fPtTID;
+ }
+
+ int nextSegmentID() {
+ return ++fSegmentID;
+ }
+
+ int nextSpanID() {
+ return ++fSpanID;
+ }
+#endif
+
+ SkOpPhase phase() const {
+ return fPhase;
+ }
+
+ void resetAllocatedOpSpan() {
+ fAllocatedOpSpan = false;
+ }
+
+ void setAllocatedOpSpan() {
+ fAllocatedOpSpan = true;
+ }
+
+ void setCoincidence(SkOpCoincidence* coincidence) {
+ fCoincidence = coincidence;
+ }
+
+ void setContourHead(SkOpContourHead* contourHead) {
+ fContourHead = contourHead;
+ }
+
+ void setPhase(SkOpPhase phase) {
+ if (SkOpPhase::kNoChange == phase) {
+ return;
+ }
+ SkASSERT(fPhase != phase);
+ fPhase = phase;
+ }
+
+ // called in very rare cases where angles are sorted incorrectly -- signfies op will fail
+ void setWindingFailed() {
+ fWindingFailed = true;
+ }
+
+ bool windingFailed() const {
+ return fWindingFailed;
+ }
+
+private:
+ SkArenaAlloc* fAllocator;
+ SkOpCoincidence* fCoincidence;
+ SkOpContourHead* fContourHead;
+ int fNested;
+ bool fAllocatedOpSpan;
+ bool fWindingFailed;
+ SkOpPhase fPhase;
+#ifdef SK_DEBUG
+ const char* fDebugTestName;
+ void* fDebugReporter;
+ int fAngleID;
+ int fCoinID;
+ int fContourID;
+ int fPtTID;
+ int fSegmentID;
+ int fSpanID;
+ bool fDebugSkipAssert;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+ SkPath::Verb fDebugWorstVerb[6];
+ SkPoint fDebugWorstPts[24];
+ float fDebugWorstWeight[6];
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDict fCoinChangedDict;
+ SkPathOpsDebug::CoinDict fCoinVisitedDict;
+ SkPathOpsDebug::CoinDictEntry fCoinDictEntry;
+ const char* fPreviousFuncName;
+#endif
+#if DEBUG_COINCIDENCE
+ bool fDebugCheckHealth;
+#endif
+};
+
+#ifdef SK_DEBUG
+#if DEBUG_COINCIDENCE
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ (this->globalState()->debugCheckHealth() || \
+ this->globalState()->debugSkipAssert())) || (cond))
+#else
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ this->globalState()->debugSkipAssert()) || (cond))
+#endif
+#define SkOPOBJASSERT(obj, cond) SkASSERT((obj->globalState() && \
+ obj->globalState()->debugSkipAssert()) || (cond))
+#else
+#define SkOPASSERT(cond)
+#define SkOPOBJASSERT(obj, cond)
+#endif
+
+// Use Almost Equal when comparing coordinates. Use epsilon to compare T values.
+bool AlmostEqualUlps(float a, float b);
+inline bool AlmostEqualUlps(double a, double b) {
+ return AlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b);
+inline bool AlmostEqualUlpsNoNormalCheck(double a, double b) {
+ return AlmostEqualUlpsNoNormalCheck(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlps_Pin(float a, float b);
+inline bool AlmostEqualUlps_Pin(double a, double b) {
+ return AlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Dequal when comparing should not special case denormalized values.
+bool AlmostDequalUlps(float a, float b);
+bool AlmostDequalUlps(double a, double b);
+
+bool NotAlmostEqualUlps(float a, float b);
+inline bool NotAlmostEqualUlps(double a, double b) {
+ return NotAlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b);
+inline bool NotAlmostEqualUlps_Pin(double a, double b) {
+ return NotAlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostDequalUlps(float a, float b);
+inline bool NotAlmostDequalUlps(double a, double b) {
+ return NotAlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Bequal when comparing coordinates in conjunction with between.
+bool AlmostBequalUlps(float a, float b);
+inline bool AlmostBequalUlps(double a, double b) {
+ return AlmostBequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostPequalUlps(float a, float b);
+inline bool AlmostPequalUlps(double a, double b) {
+ return AlmostPequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool RoughlyEqualUlps(float a, float b);
+inline bool RoughlyEqualUlps(double a, double b) {
+ return RoughlyEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessUlps(float a, float b);
+inline bool AlmostLessUlps(double a, double b) {
+ return AlmostLessUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessOrEqualUlps(float a, float b);
+inline bool AlmostLessOrEqualUlps(double a, double b) {
+ return AlmostLessOrEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostBetweenUlps(float a, float b, float c);
+inline bool AlmostBetweenUlps(double a, double b, double c) {
+ return AlmostBetweenUlps(SkDoubleToScalar(a), SkDoubleToScalar(b), SkDoubleToScalar(c));
+}
+
+int UlpsDistance(float a, float b);
+inline int UlpsDistance(double a, double b) {
+ return UlpsDistance(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// FLT_EPSILON == 1.19209290E-07 == 1 / (2 ^ 23)
+// DBL_EPSILON == 2.22045e-16
+const double FLT_EPSILON_CUBED = FLT_EPSILON * FLT_EPSILON * FLT_EPSILON;
+const double FLT_EPSILON_HALF = FLT_EPSILON / 2;
+const double FLT_EPSILON_DOUBLE = FLT_EPSILON * 2;
+const double FLT_EPSILON_ORDERABLE_ERR = FLT_EPSILON * 16;
+const double FLT_EPSILON_SQUARED = FLT_EPSILON * FLT_EPSILON;
+// Use a compile-time constant for FLT_EPSILON_SQRT to avoid initializers.
+// A 17 digit constant guarantees exact results.
+const double FLT_EPSILON_SQRT = 0.00034526697709225118; // sqrt(FLT_EPSILON);
+const double FLT_EPSILON_INVERSE = 1 / FLT_EPSILON;
+const double DBL_EPSILON_ERR = DBL_EPSILON * 4; // FIXME: tune -- allow a few bits of error
+const double DBL_EPSILON_SUBDIVIDE_ERR = DBL_EPSILON * 16;
+const double ROUGH_EPSILON = FLT_EPSILON * 64;
+const double MORE_ROUGH_EPSILON = FLT_EPSILON * 256;
+const double WAY_ROUGH_EPSILON = FLT_EPSILON * 2048;
+const double BUMP_EPSILON = FLT_EPSILON * 4096;
+
+const SkScalar INVERSE_NUMBER_RANGE = FLT_EPSILON_ORDERABLE_ERR;
+
+inline bool zero_or_one(double x) {
+ return x == 0 || x == 1;
+}
+
+inline bool approximately_zero(double x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool precisely_zero(double x) {
+ return fabs(x) < DBL_EPSILON_ERR;
+}
+
+inline bool precisely_subdivide_zero(double x) {
+ return fabs(x) < DBL_EPSILON_SUBDIVIDE_ERR;
+}
+
+inline bool approximately_zero(float x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool approximately_zero_cubed(double x) {
+ return fabs(x) < FLT_EPSILON_CUBED;
+}
+
+inline bool approximately_zero_half(double x) {
+ return fabs(x) < FLT_EPSILON_HALF;
+}
+
+inline bool approximately_zero_double(double x) {
+ return fabs(x) < FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_zero_orderable(double x) {
+ return fabs(x) < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool approximately_zero_squared(double x) {
+ return fabs(x) < FLT_EPSILON_SQUARED;
+}
+
+inline bool approximately_zero_sqrt(double x) {
+ return fabs(x) < FLT_EPSILON_SQRT;
+}
+
+inline bool roughly_zero(double x) {
+ return fabs(x) < ROUGH_EPSILON;
+}
+
+inline bool approximately_zero_inverse(double x) {
+ return fabs(x) > FLT_EPSILON_INVERSE;
+}
+
+inline bool approximately_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * FLT_EPSILON);
+}
+
+inline bool precisely_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * DBL_EPSILON);
+}
+
+inline bool roughly_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * ROUGH_EPSILON);
+}
+
+// Use this for comparing Ts in the range of 0 to 1. For general numbers (larger and smaller) use
+// AlmostEqualUlps instead.
+inline bool approximately_equal(double x, double y) {
+ return approximately_zero(x - y);
+}
+
+inline bool precisely_equal(double x, double y) {
+ return precisely_zero(x - y);
+}
+
+inline bool precisely_subdivide_equal(double x, double y) {
+ return precisely_subdivide_zero(x - y);
+}
+
+inline bool approximately_equal_half(double x, double y) {
+ return approximately_zero_half(x - y);
+}
+
+inline bool approximately_equal_double(double x, double y) {
+ return approximately_zero_double(x - y);
+}
+
+inline bool approximately_equal_orderable(double x, double y) {
+ return approximately_zero_orderable(x - y);
+}
+
+inline bool approximately_equal_squared(double x, double y) {
+ return approximately_equal(x, y);
+}
+
+inline bool approximately_greater(double x, double y) {
+ return x - FLT_EPSILON >= y;
+}
+
+inline bool approximately_greater_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE >= y;
+}
+
+inline bool approximately_greater_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR >= y;
+}
+
+inline bool approximately_greater_or_equal(double x, double y) {
+ return x + FLT_EPSILON > y;
+}
+
+inline bool approximately_greater_or_equal_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE > y;
+}
+
+inline bool approximately_greater_or_equal_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR > y;
+}
+
+inline bool approximately_lesser(double x, double y) {
+ return x + FLT_EPSILON <= y;
+}
+
+inline bool approximately_lesser_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE <= y;
+}
+
+inline bool approximately_lesser_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR <= y;
+}
+
+inline bool approximately_lesser_or_equal(double x, double y) {
+ return x - FLT_EPSILON < y;
+}
+
+inline bool approximately_lesser_or_equal_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE < y;
+}
+
+inline bool approximately_lesser_or_equal_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR < y;
+}
+
+inline bool approximately_greater_than_one(double x) {
+ return x > 1 - FLT_EPSILON;
+}
+
+inline bool precisely_greater_than_one(double x) {
+ return x > 1 - DBL_EPSILON_ERR;
+}
+
+inline bool approximately_less_than_zero(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool precisely_less_than_zero(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_negative(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool approximately_negative_orderable(double x) {
+ return x < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool precisely_negative(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_one_or_less(double x) {
+ return x < 1 + FLT_EPSILON;
+}
+
+inline bool approximately_one_or_less_double(double x) {
+ return x < 1 + FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_positive(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_positive_squared(double x) {
+ return x > -(FLT_EPSILON_SQUARED);
+}
+
+inline bool approximately_zero_or_more(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_zero_or_more_double(double x) {
+ return x > -FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_between_orderable(double a, double b, double c) {
+ return a <= c
+ ? approximately_negative_orderable(a - b) && approximately_negative_orderable(b - c)
+ : approximately_negative_orderable(b - a) && approximately_negative_orderable(c - b);
+}
+
+inline bool approximately_between(double a, double b, double c) {
+ return a <= c ? approximately_negative(a - b) && approximately_negative(b - c)
+ : approximately_negative(b - a) && approximately_negative(c - b);
+}
+
+inline bool precisely_between(double a, double b, double c) {
+ return a <= c ? precisely_negative(a - b) && precisely_negative(b - c)
+ : precisely_negative(b - a) && precisely_negative(c - b);
+}
+
+// returns true if (a <= b <= c) || (a >= b >= c)
+inline bool between(double a, double b, double c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (precisely_zero(a) && precisely_zero(b) && precisely_zero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+inline bool roughly_equal(double x, double y) {
+ return fabs(x - y) < ROUGH_EPSILON;
+}
+
+inline bool roughly_negative(double x) {
+ return x < ROUGH_EPSILON;
+}
+
+inline bool roughly_between(double a, double b, double c) {
+ return a <= c ? roughly_negative(a - b) && roughly_negative(b - c)
+ : roughly_negative(b - a) && roughly_negative(c - b);
+}
+
+inline bool more_roughly_equal(double x, double y) {
+ return fabs(x - y) < MORE_ROUGH_EPSILON;
+}
+
+struct SkDPoint;
+struct SkDVector;
+struct SkDLine;
+struct SkDQuad;
+struct SkDConic;
+struct SkDCubic;
+struct SkDRect;
+
+inline SkPath::Verb SkPathOpsPointsToVerb(int points) {
+ int verb = (1 << points) >> 1;
+#ifdef SK_DEBUG
+ switch (points) {
+ case 0: SkASSERT(SkPath::kMove_Verb == verb); break;
+ case 1: SkASSERT(SkPath::kLine_Verb == verb); break;
+ case 2: SkASSERT(SkPath::kQuad_Verb == verb); break;
+ case 3: SkASSERT(SkPath::kCubic_Verb == verb); break;
+ default: SkDEBUGFAIL("should not be here");
+ }
+#endif
+ return (SkPath::Verb)verb;
+}
+
+inline int SkPathOpsVerbToPoints(SkPath::Verb verb) {
+ int points = (int) verb - (((int) verb + 1) >> 2);
+#ifdef SK_DEBUG
+ switch (verb) {
+ case SkPath::kLine_Verb: SkASSERT(1 == points); break;
+ case SkPath::kQuad_Verb: SkASSERT(2 == points); break;
+ case SkPath::kConic_Verb: SkASSERT(2 == points); break;
+ case SkPath::kCubic_Verb: SkASSERT(3 == points); break;
+ default: SkDEBUGFAIL("should not get here");
+ }
+#endif
+ return points;
+}
+
+inline double SkDInterp(double A, double B, double t) {
+ return A + (B - A) * t;
+}
+
+double SkDCubeRoot(double x);
+
+/* Returns -1 if negative, 0 if zero, 1 if positive
+*/
+inline int SkDSign(double x) {
+ return (x > 0) - (x < 0);
+}
+
+/* Returns 0 if negative, 1 if zero, 2 if positive
+*/
+inline int SKDSide(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Returns 1 if negative, 2 if zero, 4 if positive
+*/
+inline int SkDSideBit(double x) {
+ return 1 << SKDSide(x);
+}
+
+inline double SkPinT(double t) {
+ return precisely_less_than_zero(t) ? 0 : precisely_greater_than_one(t) ? 1 : t;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
new file mode 100644
index 0000000000..4eb7298f3a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// given a prospective edge, compute its initial winding by projecting a ray
+// if the ray hits another edge
+ // if the edge doesn't have a winding yet, hop up to that edge and start over
+ // concern : check for hops forming a loop
+ // if the edge is unsortable, or
+ // the intersection is nearly at the ends, or
+ // the tangent at the intersection is nearly coincident to the ray,
+ // choose a different ray and try again
+ // concern : if it is unable to succeed after N tries, try another edge? direction?
+// if no edge is hit, compute the winding directly
+
+// given the top span, project the most perpendicular ray and look for intersections
+ // let's try up and then down. What the hey
+
+// bestXY is initialized by caller with basePt
+
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathOpsCurve.h"
+
+#include <utility>
+
+enum class SkOpRayDir {
+ kLeft,
+ kTop,
+ kRight,
+ kBottom,
+};
+
+#if DEBUG_WINDING
+const char* gDebugRayDirName[] = {
+ "kLeft",
+ "kTop",
+ "kRight",
+ "kBottom"
+};
+#endif
+
+static int xy_index(SkOpRayDir dir) {
+ return static_cast<int>(dir) & 1;
+}
+
+static SkScalar pt_xy(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[xy_index(dir)];
+}
+
+static SkScalar pt_yx(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[!xy_index(dir)];
+}
+
+static double pt_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[xy_index(dir)];
+}
+
+static double pt_dydx(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[!xy_index(dir)];
+}
+
+static SkScalar rect_side(const SkRect& r, SkOpRayDir dir) {
+ return (&r.fLeft)[static_cast<int>(dir)];
+}
+
+static bool sideways_overlap(const SkRect& rect, const SkPoint& pt, SkOpRayDir dir) {
+ int i = !xy_index(dir);
+ return approximately_between((&rect.fLeft)[i], (&pt.fX)[i], (&rect.fRight)[i]);
+}
+
+static bool less_than(SkOpRayDir dir) {
+ return static_cast<bool>((static_cast<int>(dir) & 2) == 0);
+}
+
+static bool ccw_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ bool vPartPos = pt_dydx(v, dir) > 0;
+ bool leftBottom = ((static_cast<int>(dir) + 1) & 2) != 0;
+ return vPartPos == leftBottom;
+}
+
+struct SkOpRayHit {
+ SkOpRayDir makeTestBase(SkOpSpan* span, double t) {
+ fNext = nullptr;
+ fSpan = span;
+ fT = span->t() * (1 - t) + span->next()->t() * t;
+ SkOpSegment* segment = span->segment();
+ fSlope = segment->dSlopeAtT(fT);
+ fPt = segment->ptAtT(fT);
+ fValid = true;
+ return fabs(fSlope.fX) < fabs(fSlope.fY) ? SkOpRayDir::kLeft : SkOpRayDir::kTop;
+ }
+
+ SkOpRayHit* fNext;
+ SkOpSpan* fSpan;
+ SkPoint fPt;
+ double fT;
+ SkDVector fSlope;
+ bool fValid;
+};
+
+void SkOpContour::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkArenaAlloc* allocator) {
+ // if the bounds extreme is outside the best, we're done
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ SkOpSegment* testSegment = &fHead;
+ do {
+ testSegment->rayCheck(base, dir, hits, allocator);
+ } while ((testSegment = testSegment->next()));
+}
+
+void SkOpSegment::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkArenaAlloc* allocator) {
+ if (!sideways_overlap(fBounds, base.fPt, dir)) {
+ return;
+ }
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ double tVals[3];
+ SkScalar baseYX = pt_yx(base.fPt, dir);
+ int roots = (*CurveIntercept[fVerb * 2 + xy_index(dir)])(fPts, fWeight, baseYX, tVals);
+ for (int index = 0; index < roots; ++index) {
+ double t = tVals[index];
+ if (base.fSpan->segment() == this && approximately_equal(base.fT, t)) {
+ continue;
+ }
+ SkDVector slope;
+ SkPoint pt;
+ SkDEBUGCODE(sk_bzero(&slope, sizeof(slope)));
+ bool valid = false;
+ if (approximately_zero(t)) {
+ pt = fPts[0];
+ } else if (approximately_equal(t, 1)) {
+ pt = fPts[SkPathOpsVerbToPoints(fVerb)];
+ } else {
+ SkASSERT(between(0, t, 1));
+ pt = this->ptAtT(t);
+ if (SkDPoint::ApproximatelyEqual(pt, base.fPt)) {
+ if (base.fSpan->segment() == this) {
+ continue;
+ }
+ } else {
+ SkScalar ptXY = pt_xy(pt, dir);
+ if (!approximately_equal(baseXY, ptXY) && (baseXY < ptXY) == checkLessThan) {
+ continue;
+ }
+ slope = this->dSlopeAtT(t);
+ if (fVerb == SkPath::kCubic_Verb && base.fSpan->segment() == this
+ && roughly_equal(base.fT, t)
+ && SkDPoint::RoughlyEqual(pt, base.fPt)) {
+ #if DEBUG_WINDING
+ SkDebugf("%s (rarely expect this)\n", __FUNCTION__);
+ #endif
+ continue;
+ }
+ if (fabs(pt_dydx(slope, dir) * 10000) > fabs(pt_dxdy(slope, dir))) {
+ valid = true;
+ }
+ }
+ }
+ SkOpSpan* span = this->windingSpanAtT(t);
+ if (!span) {
+ valid = false;
+ } else if (!span->windValue() && !span->oppValue()) {
+ continue;
+ }
+ SkOpRayHit* newHit = allocator->make<SkOpRayHit>();
+ newHit->fNext = *hits;
+ newHit->fPt = pt;
+ newHit->fSlope = slope;
+ newHit->fSpan = span;
+ newHit->fT = t;
+ newHit->fValid = valid;
+ *hits = newHit;
+ }
+}
+
+SkOpSpan* SkOpSegment::windingSpanAtT(double tHit) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (approximately_equal(tHit, next->t())) {
+ return nullptr;
+ }
+ if (tHit < next->t()) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+static bool hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fX < b->fPt.fX;
+}
+
+static bool reverse_hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fX < a->fPt.fX;
+}
+
+static bool hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fY < b->fPt.fY;
+}
+
+static bool reverse_hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fY < a->fPt.fY;
+}
+
+static double get_t_guess(int tTry, int* dirOffset) {
+ double t = 0.5;
+ *dirOffset = tTry & 1;
+ int tBase = tTry >> 1;
+ int tBits = 0;
+ while (tTry >>= 1) {
+ t /= 2;
+ ++tBits;
+ }
+ if (tBits) {
+ int tIndex = (tBase - 1) & ((1 << tBits) - 1);
+ t += t * 2 * tIndex;
+ }
+ return t;
+}
+
+bool SkOpSpan::sortableTop(SkOpContour* contourHead) {
+ SkSTArenaAlloc<1024> allocator;
+ int dirOffset;
+ double t = get_t_guess(fTopTTry++, &dirOffset);
+ SkOpRayHit hitBase;
+ SkOpRayDir dir = hitBase.makeTestBase(this, t);
+ if (hitBase.fSlope.fX == 0 && hitBase.fSlope.fY == 0) {
+ return false;
+ }
+ SkOpRayHit* hitHead = &hitBase;
+ dir = static_cast<SkOpRayDir>(static_cast<int>(dir) + dirOffset);
+ if (hitBase.fSpan && hitBase.fSpan->segment()->verb() > SkPath::kLine_Verb
+ && !pt_dydx(hitBase.fSlope, dir)) {
+ return false;
+ }
+ SkOpContour* contour = contourHead;
+ do {
+ if (!contour->count()) {
+ continue;
+ }
+ contour->rayCheck(hitBase, dir, &hitHead, &allocator);
+ } while ((contour = contour->next()));
+ // sort hits
+ SkSTArray<1, SkOpRayHit*> sorted;
+ SkOpRayHit* hit = hitHead;
+ while (hit) {
+ sorted.push_back(hit);
+ hit = hit->fNext;
+ }
+ int count = sorted.count();
+ SkTQSort(sorted.begin(), sorted.end() - 1, xy_index(dir)
+ ? less_than(dir) ? hit_compare_y : reverse_hit_compare_y
+ : less_than(dir) ? hit_compare_x : reverse_hit_compare_x);
+ // verify windings
+#if DEBUG_WINDING
+ SkDebugf("%s dir=%s seg=%d t=%1.9g pt=(%1.9g,%1.9g)\n", __FUNCTION__,
+ gDebugRayDirName[static_cast<int>(dir)], hitBase.fSpan->segment()->debugID(),
+ hitBase.fT, hitBase.fPt.fX, hitBase.fPt.fY);
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ SkOpSpan* span = hit->fSpan;
+ SkOpSegment* hitSegment = span ? span->segment() : nullptr;
+ bool operand = span ? hitSegment->operand() : false;
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+ SkDebugf("%s [%d] valid=%d operand=%d span=%d ccw=%d ", __FUNCTION__, index,
+ hit->fValid, operand, span ? span->debugID() : -1, ccw);
+ if (span) {
+ hitSegment->dumpPtsInner();
+ }
+ SkDebugf(" t=%1.9g pt=(%1.9g,%1.9g) slope=(%1.9g,%1.9g)\n", hit->fT,
+ hit->fPt.fX, hit->fPt.fY, hit->fSlope.fX, hit->fSlope.fY);
+ }
+#endif
+ const SkPoint* last = nullptr;
+ int wind = 0;
+ int oppWind = 0;
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ if (!hit->fValid) {
+ return false;
+ }
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+// SkASSERT(!approximately_zero(hit->fT) || !hit->fValid);
+ SkOpSpan* span = hit->fSpan;
+ if (!span) {
+ return false;
+ }
+ SkOpSegment* hitSegment = span->segment();
+ if (span->windValue() == 0 && span->oppValue() == 0) {
+ continue;
+ }
+ if (last && SkDPoint::ApproximatelyEqual(*last, hit->fPt)) {
+ return false;
+ }
+ if (index < count - 1) {
+ const SkPoint& next = sorted[index + 1]->fPt;
+ if (SkDPoint::ApproximatelyEqual(next, hit->fPt)) {
+ return false;
+ }
+ }
+ bool operand = hitSegment->operand();
+ if (operand) {
+ using std::swap;
+ swap(wind, oppWind);
+ }
+ int lastWind = wind;
+ int lastOpp = oppWind;
+ int windValue = ccw ? -span->windValue() : span->windValue();
+ int oppValue = ccw ? -span->oppValue() : span->oppValue();
+ wind += windValue;
+ oppWind += oppValue;
+ bool sumSet = false;
+ int spanSum = span->windSum();
+ int windSum = SkOpSegment::UseInnerWinding(lastWind, wind) ? wind : lastWind;
+ if (spanSum == SK_MinS32) {
+ span->setWindSum(windSum);
+ sumSet = true;
+ } else {
+ // the need for this condition suggests that UseInnerWinding is flawed
+ // happened when last = 1 wind = -1
+#if 0
+ SkASSERT((hitSegment->isXor() ? (windSum & 1) == (spanSum & 1) : windSum == spanSum)
+ || (abs(wind) == abs(lastWind)
+ && (windSum ^ wind ^ lastWind) == spanSum));
+#endif
+ }
+ int oSpanSum = span->oppSum();
+ int oppSum = SkOpSegment::UseInnerWinding(lastOpp, oppWind) ? oppWind : lastOpp;
+ if (oSpanSum == SK_MinS32) {
+ span->setOppSum(oppSum);
+ } else {
+#if 0
+ SkASSERT(hitSegment->oppXor() ? (oppSum & 1) == (oSpanSum & 1) : oppSum == oSpanSum
+ || (abs(oppWind) == abs(lastOpp)
+ && (oppSum ^ oppWind ^ lastOpp) == oSpanSum));
+#endif
+ }
+ if (sumSet) {
+ if (this->globalState()->phase() == SkOpPhase::kFixWinding) {
+ hitSegment->contour()->setCcw(ccw);
+ } else {
+ (void) hitSegment->markAndChaseWinding(span, span->next(), windSum, oppSum, nullptr);
+ (void) hitSegment->markAndChaseWinding(span->next(), span, windSum, oppSum, nullptr);
+ }
+ }
+ if (operand) {
+ using std::swap;
+ swap(wind, oppWind);
+ }
+ last = &hit->fPt;
+ this->globalState()->bumpNested();
+ }
+ return true;
+}
+
+SkOpSpan* SkOpSegment::findSortableTop(SkOpContour* contourHead) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (span->done()) {
+ continue;
+ }
+ if (span->windSum() != SK_MinS32) {
+ return span;
+ }
+ if (span->sortableTop(contourHead)) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+SkOpSpan* SkOpContour::findSortableTop(SkOpContour* contourHead) {
+ bool allDone = true;
+ if (fCount) {
+ SkOpSegment* testSegment = &fHead;
+ do {
+ if (testSegment->done()) {
+ continue;
+ }
+ allDone = false;
+ SkOpSpan* result = testSegment->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((testSegment = testSegment->next()));
+ }
+ if (allDone) {
+ fDone = true;
+ }
+ return nullptr;
+}
+
+SkOpSpan* FindSortableTop(SkOpContourHead* contourHead) {
+ for (int index = 0; index < SkOpGlobalState::kMaxWindingTries; ++index) {
+ SkOpContour* contour = contourHead;
+ do {
+ if (contour->done()) {
+ continue;
+ }
+ SkOpSpan* result = contour->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.cpp b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
new file mode 100644
index 0000000000..6c2faab068
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkTSort.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathWriter.h"
+
+// wrap path to keep track of whether the contour is initialized and non-empty
+SkPathWriter::SkPathWriter(SkPath& path)
+ : fPathPtr(&path)
+{
+ init();
+}
+
+void SkPathWriter::close() {
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ SkASSERT(this->isClosed());
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.close();\n");
+#endif
+ fCurrent.close();
+ fPathPtr->addPath(fCurrent);
+ fCurrent.reset();
+ init();
+}
+
+void SkPathWriter::conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight) {
+ SkPoint pt2pt = this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.conicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g);\n",
+ pt1.fX, pt1.fY, pt2pt.fX, pt2pt.fY, weight);
+#endif
+ fCurrent.conicTo(pt1, pt2pt, weight);
+}
+
+void SkPathWriter::cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3) {
+ SkPoint pt3pt = this->update(pt3);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.cubicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2.fX, pt2.fY, pt3pt.fX, pt3pt.fY);
+#endif
+ fCurrent.cubicTo(pt1, pt2, pt3pt);
+}
+
+bool SkPathWriter::deferredLine(const SkOpPtT* pt) {
+ SkASSERT(fFirstPtT);
+ SkASSERT(fDefer[0]);
+ if (fDefer[0] == pt) {
+ // FIXME: why we're adding a degenerate line? Caller should have preflighted this.
+ return true;
+ }
+ if (pt->contains(fDefer[0])) {
+ // FIXME: why we're adding a degenerate line?
+ return true;
+ }
+ if (this->matchedLast(pt)) {
+ return false;
+ }
+ if (fDefer[1] && this->changedSlopes(pt)) {
+ this->lineTo();
+ fDefer[0] = fDefer[1];
+ }
+ fDefer[1] = pt;
+ return true;
+}
+
+void SkPathWriter::deferredMove(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ fFirstPtT = fDefer[0] = pt;
+ return;
+ }
+ SkASSERT(fDefer[0]);
+ if (!this->matchedLast(pt)) {
+ this->finishContour();
+ fFirstPtT = fDefer[0] = pt;
+ }
+}
+
+void SkPathWriter::finishContour() {
+ if (!this->matchedLast(fDefer[0])) {
+ if (!fDefer[1]) {
+ return;
+ }
+ this->lineTo();
+ }
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ if (this->isClosed()) {
+ this->close();
+ } else {
+ SkASSERT(fDefer[1]);
+ fEndPtTs.push_back(fFirstPtT);
+ fEndPtTs.push_back(fDefer[1]);
+ fPartials.push_back(fCurrent);
+ this->init();
+ }
+}
+
+void SkPathWriter::init() {
+ fCurrent.reset();
+ fFirstPtT = fDefer[0] = fDefer[1] = nullptr;
+}
+
+bool SkPathWriter::isClosed() const {
+ return this->matchedLast(fFirstPtT);
+}
+
+void SkPathWriter::lineTo() {
+ if (fCurrent.isEmpty()) {
+ this->moveTo();
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.lineTo(%1.9g,%1.9g);\n", fDefer[1]->fPt.fX, fDefer[1]->fPt.fY);
+#endif
+ fCurrent.lineTo(fDefer[1]->fPt);
+}
+
+bool SkPathWriter::matchedLast(const SkOpPtT* test) const {
+ if (test == fDefer[1]) {
+ return true;
+ }
+ if (!test) {
+ return false;
+ }
+ if (!fDefer[1]) {
+ return false;
+ }
+ return test->contains(fDefer[1]);
+}
+
+void SkPathWriter::moveTo() {
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.moveTo(%1.9g,%1.9g);\n", fFirstPtT->fPt.fX, fFirstPtT->fPt.fY);
+#endif
+ fCurrent.moveTo(fFirstPtT->fPt);
+}
+
+void SkPathWriter::quadTo(const SkPoint& pt1, const SkOpPtT* pt2) {
+ SkPoint pt2pt = this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.quadTo(%1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2pt.fX, pt2pt.fY);
+#endif
+ fCurrent.quadTo(pt1, pt2pt);
+}
+
+// if last point to be written matches the current path's first point, alter the
+// last to avoid writing a degenerate lineTo when the path is closed
+SkPoint SkPathWriter::update(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ this->moveTo();
+ } else if (!this->matchedLast(fDefer[0])) {
+ this->lineTo();
+ }
+ SkPoint result = pt->fPt;
+ if (fFirstPtT && result != fFirstPtT->fPt && fFirstPtT->contains(pt)) {
+ result = fFirstPtT->fPt;
+ }
+ fDefer[0] = fDefer[1] = pt; // set both to know that there is not a pending deferred line
+ return result;
+}
+
+bool SkPathWriter::someAssemblyRequired() {
+ this->finishContour();
+ return fEndPtTs.count() > 0;
+}
+
+bool SkPathWriter::changedSlopes(const SkOpPtT* ptT) const {
+ if (matchedLast(fDefer[0])) {
+ return false;
+ }
+ SkVector deferDxdy = fDefer[1]->fPt - fDefer[0]->fPt;
+ SkVector lineDxdy = ptT->fPt - fDefer[1]->fPt;
+ return deferDxdy.fX * lineDxdy.fY != deferDxdy.fY * lineDxdy.fX;
+}
+
+class DistanceLessThan {
+public:
+ DistanceLessThan(double* distances) : fDistances(distances) { }
+ double* fDistances;
+ bool operator()(const int one, const int two) {
+ return fDistances[one] < fDistances[two];
+ }
+};
+
+ /*
+ check start and end of each contour
+ if not the same, record them
+ match them up
+ connect closest
+ reassemble contour pieces into new path
+ */
+void SkPathWriter::assemble() {
+#if DEBUG_SHOW_TEST_NAME
+ SkDebugf("</div>\n");
+#endif
+ if (!this->someAssemblyRequired()) {
+ return;
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("%s\n", __FUNCTION__);
+#endif
+ SkOpPtT const* const* runs = fEndPtTs.begin(); // starts, ends of partial contours
+ int endCount = fEndPtTs.count(); // all starts and ends
+ SkASSERT(endCount > 0);
+ SkASSERT(endCount == fPartials.count() * 2);
+#if DEBUG_ASSEMBLE
+ for (int index = 0; index < endCount; index += 2) {
+ const SkOpPtT* eStart = runs[index];
+ const SkOpPtT* eEnd = runs[index + 1];
+ SkASSERT(eStart != eEnd);
+ SkASSERT(!eStart->contains(eEnd));
+ SkDebugf("%s contour start=(%1.9g,%1.9g) end=(%1.9g,%1.9g)\n", __FUNCTION__,
+ eStart->fPt.fX, eStart->fPt.fY, eEnd->fPt.fX, eEnd->fPt.fY);
+ }
+#endif
+ // lengthen any partial contour adjacent to a simple segment
+ for (int pIndex = 0; pIndex < endCount; pIndex++) {
+ SkOpPtT* opPtT = const_cast<SkOpPtT*>(runs[pIndex]);
+ SkPath dummy;
+ SkPathWriter partWriter(dummy);
+ do {
+ if (!zero_or_one(opPtT->fT)) {
+ break;
+ }
+ SkOpSpanBase* opSpanBase = opPtT->span();
+ SkOpSpanBase* start = opPtT->fT ? opSpanBase->prev() : opSpanBase->upCast()->next();
+ int step = opPtT->fT ? 1 : -1;
+ const SkOpSegment* opSegment = opSpanBase->segment();
+ const SkOpSegment* nextSegment = opSegment->isSimple(&start, &step);
+ if (!nextSegment) {
+ break;
+ }
+ SkOpSpanBase* opSpanEnd = start->t() ? start->prev() : start->upCast()->next();
+ if (start->starter(opSpanEnd)->alreadyAdded()) {
+ break;
+ }
+ nextSegment->addCurveTo(start, opSpanEnd, &partWriter);
+ opPtT = opSpanEnd->ptT();
+ SkOpPtT** runsPtr = const_cast<SkOpPtT**>(&runs[pIndex]);
+ *runsPtr = opPtT;
+ } while (true);
+ partWriter.finishContour();
+ const SkTArray<SkPath>& partPartials = partWriter.partials();
+ if (!partPartials.count()) {
+ continue;
+ }
+ // if pIndex is even, reverse and prepend to fPartials; otherwise, append
+ SkPath& partial = const_cast<SkPath&>(fPartials[pIndex >> 1]);
+ const SkPath& part = partPartials[0];
+ if (pIndex & 1) {
+ partial.addPath(part, SkPath::kExtend_AddPathMode);
+ } else {
+ SkPath reverse;
+ reverse.reverseAddPath(part);
+ reverse.addPath(partial, SkPath::kExtend_AddPathMode);
+ partial = reverse;
+ }
+ }
+ SkTDArray<int> sLink, eLink;
+ int linkCount = endCount / 2; // number of partial contours
+ sLink.append(linkCount);
+ eLink.append(linkCount);
+ int rIndex, iIndex;
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ sLink[rIndex] = eLink[rIndex] = SK_MaxS32;
+ }
+ const int entries = endCount * (endCount - 1) / 2; // folded triangle
+ SkSTArray<8, double, true> distances(entries);
+ SkSTArray<8, int, true> sortedDist(entries);
+ SkSTArray<8, int, true> distLookup(entries);
+ int rRow = 0;
+ int dIndex = 0;
+ for (rIndex = 0; rIndex < endCount - 1; ++rIndex) {
+ const SkOpPtT* oPtT = runs[rIndex];
+ for (iIndex = rIndex + 1; iIndex < endCount; ++iIndex) {
+ const SkOpPtT* iPtT = runs[iIndex];
+ double dx = iPtT->fPt.fX - oPtT->fPt.fX;
+ double dy = iPtT->fPt.fY - oPtT->fPt.fY;
+ double dist = dx * dx + dy * dy;
+ distLookup.push_back(rRow + iIndex);
+ distances.push_back(dist); // oStart distance from iStart
+ sortedDist.push_back(dIndex++);
+ }
+ rRow += endCount;
+ }
+ SkASSERT(dIndex == entries);
+ SkTQSort<int>(sortedDist.begin(), sortedDist.end() - 1, DistanceLessThan(distances.begin()));
+ int remaining = linkCount; // number of start/end pairs
+ for (rIndex = 0; rIndex < entries; ++rIndex) {
+ int pair = sortedDist[rIndex];
+ pair = distLookup[pair];
+ int row = pair / endCount;
+ int col = pair - row * endCount;
+ int ndxOne = row >> 1;
+ bool endOne = row & 1;
+ int* linkOne = endOne ? eLink.begin() : sLink.begin();
+ if (linkOne[ndxOne] != SK_MaxS32) {
+ continue;
+ }
+ int ndxTwo = col >> 1;
+ bool endTwo = col & 1;
+ int* linkTwo = endTwo ? eLink.begin() : sLink.begin();
+ if (linkTwo[ndxTwo] != SK_MaxS32) {
+ continue;
+ }
+ SkASSERT(&linkOne[ndxOne] != &linkTwo[ndxTwo]);
+ bool flip = endOne == endTwo;
+ linkOne[ndxOne] = flip ? ~ndxTwo : ndxTwo;
+ linkTwo[ndxTwo] = flip ? ~ndxOne : ndxOne;
+ if (!--remaining) {
+ break;
+ }
+ }
+ SkASSERT(!remaining);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ int s = sLink[rIndex];
+ int e = eLink[rIndex];
+ SkDebugf("%s %c%d <- s%d - e%d -> %c%d\n", __FUNCTION__, s < 0 ? 's' : 'e',
+ s < 0 ? ~s : s, rIndex, rIndex, e < 0 ? 'e' : 's', e < 0 ? ~e : e);
+ }
+#endif
+ rIndex = 0;
+ do {
+ bool forward = true;
+ bool first = true;
+ int sIndex = sLink[rIndex];
+ SkASSERT(sIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ int eIndex;
+ if (sIndex < 0) {
+ eIndex = sLink[~sIndex];
+ sLink[~sIndex] = SK_MaxS32;
+ } else {
+ eIndex = eLink[sIndex];
+ eLink[sIndex] = SK_MaxS32;
+ }
+ SkASSERT(eIndex != SK_MaxS32);
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s sIndex=%c%d eIndex=%c%d\n", __FUNCTION__, sIndex < 0 ? 's' : 'e',
+ sIndex < 0 ? ~sIndex : sIndex, eIndex < 0 ? 's' : 'e',
+ eIndex < 0 ? ~eIndex : eIndex);
+#endif
+ do {
+ const SkPath& contour = fPartials[rIndex];
+ if (!first) {
+ SkPoint prior, next;
+ if (!fPathPtr->getLastPt(&prior)) {
+ return;
+ }
+ if (forward) {
+ next = contour.getPoint(0);
+ } else {
+ SkAssertResult(contour.getLastPt(&next));
+ }
+ if (prior != next) {
+ /* TODO: if there is a gap between open path written so far and path to come,
+ connect by following segments from one to the other, rather than introducing
+ a diagonal to connect the two.
+ */
+ SkDebugf("");
+ }
+ }
+ if (forward) {
+ fPathPtr->addPath(contour,
+ first ? SkPath::kAppend_AddPathMode : SkPath::kExtend_AddPathMode);
+ } else {
+ SkASSERT(!first);
+ fPathPtr->reversePathTo(contour);
+ }
+ if (first) {
+ first = false;
+ }
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s rIndex=%d eIndex=%s%d close=%d\n", __FUNCTION__, rIndex,
+ eIndex < 0 ? "~" : "", eIndex < 0 ? ~eIndex : eIndex,
+ sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex));
+#endif
+ if (sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex)) {
+ fPathPtr->close();
+ break;
+ }
+ if (forward) {
+ eIndex = eLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ eLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(sLink[eIndex] == rIndex);
+ sLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(eLink[~eIndex] == ~rIndex);
+ eLink[~eIndex] = SK_MaxS32;
+ }
+ } else {
+ eIndex = sLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(eLink[eIndex] == rIndex);
+ eLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(sLink[~eIndex] == ~rIndex);
+ sLink[~eIndex] = SK_MaxS32;
+ }
+ }
+ rIndex = eIndex;
+ if (rIndex < 0) {
+ forward ^= 1;
+ rIndex = ~rIndex;
+ }
+ } while (true);
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ if (sLink[rIndex] != SK_MaxS32) {
+ break;
+ }
+ }
+ } while (rIndex < linkCount);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ SkASSERT(sLink[rIndex] == SK_MaxS32);
+ SkASSERT(eLink[rIndex] == SK_MaxS32);
+ }
+#endif
+ return;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.h b/gfx/skia/skia/src/pathops/SkPathWriter.h
new file mode 100644
index 0000000000..3963710ff0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathWriter_DEFINED
+#define SkPathWriter_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+
+class SkOpPtT;
+
+// Construct the path one contour at a time.
+// If the contour is closed, copy it to the final output.
+// Otherwise, keep the partial contour for later assembly.
+
+class SkPathWriter {
+public:
+ SkPathWriter(SkPath& path);
+ void assemble();
+ void conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight);
+ void cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3);
+ bool deferredLine(const SkOpPtT* pt);
+ void deferredMove(const SkOpPtT* pt);
+ void finishContour();
+ bool hasMove() const { return !fFirstPtT; }
+ void init();
+ bool isClosed() const;
+ const SkPath* nativePath() const { return fPathPtr; }
+ void quadTo(const SkPoint& pt1, const SkOpPtT* pt2);
+
+private:
+ bool changedSlopes(const SkOpPtT* pt) const;
+ void close();
+ const SkTDArray<const SkOpPtT*>& endPtTs() const { return fEndPtTs; }
+ void lineTo();
+ bool matchedLast(const SkOpPtT*) const;
+ void moveTo();
+ const SkTArray<SkPath>& partials() const { return fPartials; }
+ bool someAssemblyRequired();
+ SkPoint update(const SkOpPtT* pt);
+
+ SkPath fCurrent; // contour under construction
+ SkTArray<SkPath> fPartials; // contours with mismatched starts and ends
+ SkTDArray<const SkOpPtT*> fEndPtTs; // possible pt values for partial starts and ends
+ SkPath* fPathPtr; // closed contours are written here
+ const SkOpPtT* fDefer[2]; // [0] deferred move, [1] deferred line
+ const SkOpPtT* fFirstPtT; // first in current contour
+};
+
+#endif /* defined(__PathOps__SkPathWriter__) */
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.cpp b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
new file mode 100644
index 0000000000..72a6168717
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkGeometry.h"
+#include "src/pathops/SkReduceOrder.h"
+
+int SkReduceOrder::reduce(const SkDLine& line) {
+ fLine[0] = line[0];
+ int different = line[0] != line[1];
+ fLine[1] = line[different];
+ return 1 + different;
+}
+
+static int coincident_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = reduction[1] = quad[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDQuad& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int check_linear(const SkDQuad& quad,
+ int minX, int maxX, int minY, int maxY, SkDQuad& reduction) {
+ if (!quad.isLinear(0, 2)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDQuad& quad) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 3; ++index) {
+ if (quad[minX].fX > quad[index].fX) {
+ minX = index;
+ }
+ if (quad[minY].fY > quad[index].fY) {
+ minY = index;
+ }
+ if (quad[maxX].fX < quad[index].fX) {
+ maxX = index;
+ }
+ if (quad[maxY].fY < quad[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 3; ++index) {
+ if (AlmostEqualUlps(quad[index].fX, quad[minX].fX)) {
+ minXSet |= 1 << index;
+ }
+ if (AlmostEqualUlps(quad[index].fY, quad[minY].fY)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if ((minXSet & 0x05) == 0x5 && (minYSet & 0x05) == 0x5) { // test for degenerate
+ // this quad starts and ends at the same place, so never contributes
+ // to the fill
+ return coincident_line(quad, fQuad);
+ }
+ if (minXSet == 0x7) { // test for vertical line
+ return vertical_line(quad, fQuad);
+ }
+ if (minYSet == 0x7) { // test for horizontal line
+ return horizontal_line(quad, fQuad);
+ }
+ int result = check_linear(quad, minX, maxX, minY, maxY, fQuad);
+ if (result) {
+ return result;
+ }
+ fQuad = quad;
+ return 3;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+static int coincident_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = reduction[1] = cubic[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDCubic& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+// check to see if it is a quadratic or a line
+static int check_quadratic(const SkDCubic& cubic, SkDCubic& reduction) {
+ double dx10 = cubic[1].fX - cubic[0].fX;
+ double dx23 = cubic[2].fX - cubic[3].fX;
+ double midX = cubic[0].fX + dx10 * 3 / 2;
+ double sideAx = midX - cubic[3].fX;
+ double sideBx = dx23 * 3 / 2;
+ if (approximately_zero(sideAx) ? !approximately_equal(sideAx, sideBx)
+ : !AlmostEqualUlps_Pin(sideAx, sideBx)) {
+ return 0;
+ }
+ double dy10 = cubic[1].fY - cubic[0].fY;
+ double dy23 = cubic[2].fY - cubic[3].fY;
+ double midY = cubic[0].fY + dy10 * 3 / 2;
+ double sideAy = midY - cubic[3].fY;
+ double sideBy = dy23 * 3 / 2;
+ if (approximately_zero(sideAy) ? !approximately_equal(sideAy, sideBy)
+ : !AlmostEqualUlps_Pin(sideAy, sideBy)) {
+ return 0;
+ }
+ reduction[0] = cubic[0];
+ reduction[1].fX = midX;
+ reduction[1].fY = midY;
+ reduction[2] = cubic[3];
+ return 3;
+}
+
+static int check_linear(const SkDCubic& cubic,
+ int minX, int maxX, int minY, int maxY, SkDCubic& reduction) {
+ if (!cubic.isLinear(0, 3)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+/* food for thought:
+http://objectmix.com/graphics/132906-fast-precision-driven-cubic-quadratic-piecewise-degree-reduction-algos-2-a.html
+
+Given points c1, c2, c3 and c4 of a cubic Bezier, the points of the
+corresponding quadratic Bezier are (given in convex combinations of
+points):
+
+q1 = (11/13)c1 + (3/13)c2 -(3/13)c3 + (2/13)c4
+q2 = -c1 + (3/2)c2 + (3/2)c3 - c4
+q3 = (2/13)c1 - (3/13)c2 + (3/13)c3 + (11/13)c4
+
+Of course, this curve does not interpolate the end-points, but it would
+be interesting to see the behaviour of such a curve in an applet.
+
+--
+Kalle Rutanen
+http://kaba.hilvi.org
+
+*/
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDCubic& cubic, Quadratics allowQuadratics) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 4; ++index) {
+ if (cubic[minX].fX > cubic[index].fX) {
+ minX = index;
+ }
+ if (cubic[minY].fY > cubic[index].fY) {
+ minY = index;
+ }
+ if (cubic[maxX].fX < cubic[index].fX) {
+ maxX = index;
+ }
+ if (cubic[maxY].fY < cubic[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 4; ++index) {
+ double cx = cubic[index].fX;
+ double cy = cubic[index].fY;
+ double denom = SkTMax(fabs(cx), SkTMax(fabs(cy),
+ SkTMax(fabs(cubic[minX].fX), fabs(cubic[minY].fY))));
+ if (denom == 0) {
+ minXSet |= 1 << index;
+ minYSet |= 1 << index;
+ continue;
+ }
+ double inv = 1 / denom;
+ if (approximately_equal_half(cx * inv, cubic[minX].fX * inv)) {
+ minXSet |= 1 << index;
+ }
+ if (approximately_equal_half(cy * inv, cubic[minY].fY * inv)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if (minXSet == 0xF) { // test for vertical line
+ if (minYSet == 0xF) { // return 1 if all four are coincident
+ return coincident_line(cubic, fCubic);
+ }
+ return vertical_line(cubic, fCubic);
+ }
+ if (minYSet == 0xF) { // test for horizontal line
+ return horizontal_line(cubic, fCubic);
+ }
+ int result = check_linear(cubic, minX, maxX, minY, maxY, fCubic);
+ if (result) {
+ return result;
+ }
+ if (allowQuadratics == SkReduceOrder::kAllow_Quadratics
+ && (result = check_quadratic(cubic, fCubic))) {
+ return result;
+ }
+ fCubic = cubic;
+ return 4;
+}
+
+SkPath::Verb SkReduceOrder::Quad(const SkPoint a[3], SkPoint* reducePts) {
+ SkDQuad quad;
+ quad.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(quad);
+ if (order == 2) { // quad became line
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fLine[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
+
+SkPath::Verb SkReduceOrder::Conic(const SkConic& c, SkPoint* reducePts) {
+ SkPath::Verb verb = SkReduceOrder::Quad(c.fPts, reducePts);
+ if (verb > SkPath::kLine_Verb && c.fW == 1) {
+ return SkPath::kQuad_Verb;
+ }
+ return verb == SkPath::kQuad_Verb ? SkPath::kConic_Verb : verb;
+}
+
+SkPath::Verb SkReduceOrder::Cubic(const SkPoint a[4], SkPoint* reducePts) {
+ if (SkDPoint::ApproximatelyEqual(a[0], a[1]) && SkDPoint::ApproximatelyEqual(a[0], a[2])
+ && SkDPoint::ApproximatelyEqual(a[0], a[3])) {
+ reducePts[0] = a[0];
+ return SkPath::kMove_Verb;
+ }
+ SkDCubic cubic;
+ cubic.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(cubic, kAllow_Quadratics);
+ if (order == 2 || order == 3) { // cubic became line or quad
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fQuad[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.h b/gfx/skia/skia/src/pathops/SkReduceOrder.h
new file mode 100644
index 0000000000..69ea0adb6f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkReduceOrder_DEFINED
+#define SkReduceOrder_DEFINED
+
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+struct SkConic;
+
+union SkReduceOrder {
+ enum Quadratics {
+ kNo_Quadratics,
+ kAllow_Quadratics
+ };
+
+ int reduce(const SkDCubic& cubic, Quadratics);
+ int reduce(const SkDLine& line);
+ int reduce(const SkDQuad& quad);
+
+ static SkPath::Verb Conic(const SkConic& conic, SkPoint* reducePts);
+ static SkPath::Verb Cubic(const SkPoint pts[4], SkPoint* reducePts);
+ static SkPath::Verb Quad(const SkPoint pts[3], SkPoint* reducePts);
+
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDCubic fCubic;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkBitmapKey.h b/gfx/skia/skia/src/pdf/SkBitmapKey.h
new file mode 100644
index 0000000000..72df0c7abe
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkBitmapKey.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapKey_DEFINED
+#define SkBitmapKey_DEFINED
+
+#include "include/core/SkRect.h"
+
+struct SkBitmapKey {
+ SkIRect fSubset;
+ uint32_t fID;
+ bool operator==(const SkBitmapKey& rhs) const {
+ return fID == rhs.fID && fSubset == rhs.fSubset;
+ }
+ bool operator!=(const SkBitmapKey& rhs) const { return !(*this == rhs); }
+};
+
+
+#endif // SkBitmapKey_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkClusterator.cpp b/gfx/skia/skia/src/pdf/SkClusterator.cpp
new file mode 100644
index 0000000000..ab2fceba71
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkClusterator.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkClusterator.h"
+
+#include "include/private/SkTo.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/utils/SkUTF.h"
+
+static bool is_reversed(const uint32_t* clusters, uint32_t count) {
+ // "ReversedChars" is how PDF deals with RTL text.
+ // return true if more than one cluster and monotonicly decreasing to zero.
+ if (count < 2 || clusters[0] == 0 || clusters[count - 1] != 0) {
+ return false;
+ }
+ for (uint32_t i = 0; i + 1 < count; ++i) {
+ if (clusters[i + 1] > clusters[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+SkClusterator::SkClusterator(const SkGlyphRun& run)
+ : fClusters(run.clusters().data())
+ , fUtf8Text(run.text().data())
+ , fGlyphCount(SkToU32(run.glyphsIDs().size()))
+ , fTextByteLength(SkToU32(run.text().size()))
+ , fReversedChars(fClusters ? is_reversed(fClusters, fGlyphCount) : false)
+{
+ if (fClusters) {
+ SkASSERT(fUtf8Text && fTextByteLength > 0 && fGlyphCount > 0);
+ } else {
+ SkASSERT(!fUtf8Text && fTextByteLength == 0);
+ }
+}
+
+SkClusterator::Cluster SkClusterator::next() {
+ if (fCurrentGlyphIndex >= fGlyphCount) {
+ return Cluster{nullptr, 0, 0, 0};
+ }
+ if (!fClusters || !fUtf8Text) {
+ return Cluster{nullptr, 0, fCurrentGlyphIndex++, 1};
+ }
+ uint32_t clusterGlyphIndex = fCurrentGlyphIndex;
+ uint32_t cluster = fClusters[clusterGlyphIndex];
+ do {
+ ++fCurrentGlyphIndex;
+ } while (fCurrentGlyphIndex < fGlyphCount && cluster == fClusters[fCurrentGlyphIndex]);
+ uint32_t clusterGlyphCount = fCurrentGlyphIndex - clusterGlyphIndex;
+ uint32_t clusterEnd = fTextByteLength;
+ for (unsigned i = 0; i < fGlyphCount; ++i) {
+ uint32_t c = fClusters[i];
+ if (c > cluster && c < clusterEnd) {
+ clusterEnd = c;
+ }
+ }
+ uint32_t clusterLen = clusterEnd - cluster;
+ return Cluster{fUtf8Text + cluster, clusterLen, clusterGlyphIndex, clusterGlyphCount};
+}
diff --git a/gfx/skia/skia/src/pdf/SkClusterator.h b/gfx/skia/skia/src/pdf/SkClusterator.h
new file mode 100644
index 0000000000..ec87d4dd66
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkClusterator.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkClusterator_DEFINED
+#define SkClusterator_DEFINED
+
+#include <vector>
+#include <cstdint>
+
+class SkGlyphRun;
+
+/** Given the m-to-n glyph-to-character mapping data (as returned by
+ harfbuzz), iterate over the clusters. */
+class SkClusterator {
+public:
+ SkClusterator(const SkGlyphRun& run);
+ uint32_t glyphCount() const { return fGlyphCount; }
+ bool reversedChars() const { return fReversedChars; }
+ struct Cluster {
+ const char* fUtf8Text;
+ uint32_t fTextByteLength;
+ uint32_t fGlyphIndex;
+ uint32_t fGlyphCount;
+ explicit operator bool() const { return fGlyphCount != 0; }
+ bool operator==(const SkClusterator::Cluster& o) {
+ return fUtf8Text == o.fUtf8Text
+ && fTextByteLength == o.fTextByteLength
+ && fGlyphIndex == o.fGlyphIndex
+ && fGlyphCount == o.fGlyphCount;
+ }
+ };
+ Cluster next();
+
+private:
+ uint32_t const * const fClusters;
+ char const * const fUtf8Text;
+ uint32_t const fGlyphCount;
+ uint32_t const fTextByteLength;
+ bool const fReversedChars;
+ uint32_t fCurrentGlyphIndex = 0;
+};
+
+
+#endif // SkClusterator_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.cpp b/gfx/skia/skia/src/pdf/SkDeflate.cpp
new file mode 100644
index 0000000000..f803c17b46
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkDeflate.h"
+
+#include "include/core/SkData.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTraceEvent.h"
+
+#include "zlib.h"
+
+namespace {
+
+// Different zlib implementations use different T.
+// We've seen size_t and unsigned.
+template <typename T> void* skia_alloc_func(void*, T items, T size) {
+ return sk_calloc_throw(SkToSizeT(items) * SkToSizeT(size));
+}
+
+void skia_free_func(void*, void* address) { sk_free(address); }
+
+} // namespace
+
+#define SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE 4096
+#define SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE 4224 // 4096 + 128, usually big
+ // enough to always do a
+ // single loop.
+
+// called by both write() and finalize()
+static void do_deflate(int flush,
+ z_stream* zStream,
+ SkWStream* out,
+ unsigned char* inBuffer,
+ size_t inBufferSize) {
+ zStream->next_in = inBuffer;
+ zStream->avail_in = SkToInt(inBufferSize);
+ unsigned char outBuffer[SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE];
+ SkDEBUGCODE(int returnValue;)
+ do {
+ zStream->next_out = outBuffer;
+ zStream->avail_out = sizeof(outBuffer);
+ SkDEBUGCODE(returnValue =) deflate(zStream, flush);
+ SkASSERT(!zStream->msg);
+
+ out->write(outBuffer, sizeof(outBuffer) - zStream->avail_out);
+ } while (zStream->avail_in || !zStream->avail_out);
+ SkASSERT(flush == Z_FINISH
+ ? returnValue == Z_STREAM_END
+ : returnValue == Z_OK);
+}
+
+// Hide all zlib impl details.
+struct SkDeflateWStream::Impl {
+ SkWStream* fOut;
+ unsigned char fInBuffer[SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE];
+ size_t fInBufferIndex;
+ z_stream fZStream;
+};
+
+SkDeflateWStream::SkDeflateWStream(SkWStream* out,
+ int compressionLevel,
+ bool gzip)
+ : fImpl(skstd::make_unique<SkDeflateWStream::Impl>()) {
+ fImpl->fOut = out;
+ fImpl->fInBufferIndex = 0;
+ if (!fImpl->fOut) {
+ return;
+ }
+ fImpl->fZStream.next_in = nullptr;
+ fImpl->fZStream.zalloc = &skia_alloc_func;
+ fImpl->fZStream.zfree = &skia_free_func;
+ fImpl->fZStream.opaque = nullptr;
+ SkASSERT(compressionLevel <= 9 && compressionLevel >= -1);
+ SkDEBUGCODE(int r =) deflateInit2(&fImpl->fZStream, compressionLevel,
+ Z_DEFLATED, gzip ? 0x1F : 0x0F,
+ 8, Z_DEFAULT_STRATEGY);
+ SkASSERT(Z_OK == r);
+}
+
+SkDeflateWStream::~SkDeflateWStream() { this->finalize(); }
+
+void SkDeflateWStream::finalize() {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (!fImpl->fOut) {
+ return;
+ }
+ do_deflate(Z_FINISH, &fImpl->fZStream, fImpl->fOut, fImpl->fInBuffer,
+ fImpl->fInBufferIndex);
+ (void)deflateEnd(&fImpl->fZStream);
+ fImpl->fOut = nullptr;
+}
+
+bool SkDeflateWStream::write(const void* void_buffer, size_t len) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (!fImpl->fOut) {
+ return false;
+ }
+ const char* buffer = (const char*)void_buffer;
+ while (len > 0) {
+ size_t tocopy =
+ SkTMin(len, sizeof(fImpl->fInBuffer) - fImpl->fInBufferIndex);
+ memcpy(fImpl->fInBuffer + fImpl->fInBufferIndex, buffer, tocopy);
+ len -= tocopy;
+ buffer += tocopy;
+ fImpl->fInBufferIndex += tocopy;
+ SkASSERT(fImpl->fInBufferIndex <= sizeof(fImpl->fInBuffer));
+
+ // if the buffer isn't filled, don't call into zlib yet.
+ if (sizeof(fImpl->fInBuffer) == fImpl->fInBufferIndex) {
+ do_deflate(Z_NO_FLUSH, &fImpl->fZStream, fImpl->fOut,
+ fImpl->fInBuffer, fImpl->fInBufferIndex);
+ fImpl->fInBufferIndex = 0;
+ }
+ }
+ return true;
+}
+
+size_t SkDeflateWStream::bytesWritten() const {
+ return fImpl->fZStream.total_in + fImpl->fInBufferIndex;
+}
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.h b/gfx/skia/skia/src/pdf/SkDeflate.h
new file mode 100644
index 0000000000..46e24a531f
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFlate_DEFINED
+#define SkFlate_DEFINED
+
+#include "include/core/SkStream.h"
+
+/**
+ * Wrap a stream in this class to compress the information written to
+ * this stream using the Deflate algorithm.
+ *
+ * See http://en.wikipedia.org/wiki/DEFLATE
+ */
+class SkDeflateWStream final : public SkWStream {
+public:
+ /** Does not take ownership of the stream.
+
+ @param compressionLevel - 0 is no compression; 1 is best
+ speed; 9 is best compression. The default, -1, is to use
+ zlib's Z_DEFAULT_COMPRESSION level.
+
+ @param gzip iff true, output a gzip file. "The gzip format is
+ a wrapper, documented in RFC 1952, around a deflate stream."
+ gzip adds a header with a magic number to the beginning of the
+ stream, allowing a client to identify a gzip file.
+ */
+ SkDeflateWStream(SkWStream*,
+ int compressionLevel = -1,
+ bool gzip = false);
+
+ /** The destructor calls finalize(). */
+ ~SkDeflateWStream() override;
+
+ /** Write the end of the compressed stream. All subsequent calls to
+ write() will fail. Subsequent calls to finalize() do nothing. */
+ void finalize();
+
+ // The SkWStream interface:
+ bool write(const void*, size_t) override;
+ size_t bytesWritten() const override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> fImpl;
+};
+
+#endif // SkFlate_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
new file mode 100644
index 0000000000..2dc580c34b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/docs/SkPDFDocument.h"
+
+sk_sp<SkDocument> SkPDF::MakeDocument(SkWStream*, const SkPDF::Metadata&) { return nullptr; }
+
+void SkPDF::SetNodeId(SkCanvas* c, int n) {
+ c->drawAnnotation({0, 0, 0, 0}, "PDF_Node_Key", SkData::MakeWithCopy(&n, sizeof(n)).get());
+}
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.cpp b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
new file mode 100644
index 0000000000..f2c3a5d0cb
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkJpegInfo.h"
+
+#include "include/private/SkTo.h"
+
+#ifndef SK_HAS_JPEG_LIBRARY
+
+namespace {
+class JpegSegment {
+public:
+ JpegSegment(const void* data, size_t size)
+ : fData(static_cast<const char*>(data))
+ , fSize(size)
+ , fOffset(0)
+ , fLength(0) {}
+ bool read() {
+ if (!this->readBigendianUint16(&fMarker)) {
+ return false;
+ }
+ if (JpegSegment::StandAloneMarker(fMarker)) {
+ fLength = 0;
+ fBuffer = nullptr;
+ return true;
+ }
+ if (!this->readBigendianUint16(&fLength) || fLength < 2) {
+ return false;
+ }
+ fLength -= 2; // Length includes itself for some reason.
+ if (fOffset + fLength > fSize) {
+ return false; // Segment too long.
+ }
+ fBuffer = &fData[fOffset];
+ fOffset += fLength;
+ return true;
+ }
+
+ bool isSOF() {
+ return (fMarker & 0xFFF0) == 0xFFC0 && fMarker != 0xFFC4 &&
+ fMarker != 0xFFC8 && fMarker != 0xFFCC;
+ }
+ uint16_t marker() { return fMarker; }
+ uint16_t length() { return fLength; }
+ const char* data() { return fBuffer; }
+
+ static uint16_t GetBigendianUint16(const char* ptr) {
+ // "the most significant byte shall come first"
+ return (static_cast<uint8_t>(ptr[0]) << 8) |
+ static_cast<uint8_t>(ptr[1]);
+ }
+
+private:
+ const char* const fData;
+ const size_t fSize;
+ size_t fOffset;
+ const char* fBuffer;
+ uint16_t fMarker;
+ uint16_t fLength;
+
+ bool readBigendianUint16(uint16_t* value) {
+ if (fOffset + 2 > fSize) {
+ return false;
+ }
+ *value = JpegSegment::GetBigendianUint16(&fData[fOffset]);
+ fOffset += 2;
+ return true;
+ }
+ static bool StandAloneMarker(uint16_t marker) {
+ // RST[m] markers or SOI, EOI, TEM
+ return (marker & 0xFFF8) == 0xFFD0 || marker == 0xFFD8 ||
+ marker == 0xFFD9 || marker == 0xFF01;
+ }
+};
+} // namespace
+
+bool SkGetJpegInfo(const void* data, size_t len,
+ SkISize* size,
+ SkEncodedInfo::Color* colorType,
+ SkEncodedOrigin* orientation) {
+ static const uint16_t kSOI = 0xFFD8;
+ static const uint16_t kAPP0 = 0xFFE0;
+ JpegSegment segment(data, len);
+ if (!segment.read() || segment.marker() != kSOI) {
+ return false; // not a JPEG
+ }
+ if (!segment.read() || segment.marker() != kAPP0) {
+ return false; // not an APP0 segment
+ }
+ static const char kJfif[] = {'J', 'F', 'I', 'F', '\0'};
+ SkASSERT(segment.data());
+ if (SkToSizeT(segment.length()) < sizeof(kJfif) ||
+ 0 != memcmp(segment.data(), kJfif, sizeof(kJfif))) {
+ return false; // Not JFIF JPEG
+ }
+ do {
+ if (!segment.read()) {
+ return false; // malformed JPEG
+ }
+ } while (!segment.isSOF());
+ if (segment.length() < 6) {
+ return false; // SOF segment is short
+ }
+ if (8 != segment.data()[0]) {
+ return false; // Only support 8-bit precision
+ }
+ int numberOfComponents = segment.data()[5];
+ if (numberOfComponents != 1 && numberOfComponents != 3) {
+ return false; // Invalid JFIF
+ }
+ if (size) {
+ *size = {JpegSegment::GetBigendianUint16(&segment.data()[3]),
+ JpegSegment::GetBigendianUint16(&segment.data()[1])};
+ }
+ if (colorType) {
+ *colorType = numberOfComponents == 3 ? SkEncodedInfo::kYUV_Color
+ : SkEncodedInfo::kGray_Color;
+ }
+ if (orientation) {
+ *orientation = kTopLeft_SkEncodedOrigin;
+ }
+ return true;
+}
+#endif // SK_HAS_JPEG_LIBRARY
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.h b/gfx/skia/skia/src/pdf/SkJpegInfo.h
new file mode 100644
index 0000000000..82a8a736fd
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkJpegInfo_DEFINED
+#define SkJpegInfo_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkEncodedInfo.h"
+
+/** Returns true if the data seems to be a valid JPEG image with a known colorType.
+
+ @param [out] size Image size in pixels
+ @param [out] colorType Encoded color type (kGray_Color, kYUV_Color, several others).
+ @param [out] orientation EXIF Orientation of the image.
+*/
+bool SkGetJpegInfo(const void* data, size_t len,
+ SkISize* size,
+ SkEncodedInfo::Color* colorType,
+ SkEncodedOrigin* orientation);
+
+#endif // SkJpegInfo_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkKeyedImage.cpp b/gfx/skia/skia/src/pdf/SkKeyedImage.cpp
new file mode 100644
index 0000000000..ce59da7bf3
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkKeyedImage.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkKeyedImage.h"
+
+#include "src/image/SkImage_Base.h"
+
+SkBitmapKey SkBitmapKeyFromImage(const SkImage* image) {
+ if (!image) {
+ return {{0, 0, 0, 0}, 0};
+ }
+ if (const SkBitmap* bm = as_IB(image)->onPeekBitmap()) {
+ SkIPoint o = bm->pixelRefOrigin();
+ return {image->bounds().makeOffset(o), bm->getGenerationID()};
+ }
+ return {image->bounds(), image->uniqueID()};
+}
+
+SkKeyedImage::SkKeyedImage(sk_sp<SkImage> i) : fImage(std::move(i)) {
+ fKey = SkBitmapKeyFromImage(fImage.get());
+}
+
+SkKeyedImage::SkKeyedImage(const SkBitmap& bm) : fImage(SkImage::MakeFromBitmap(bm)) {
+ if (fImage) {
+ fKey = {bm.getSubset(), bm.getGenerationID()};
+ }
+}
+
+SkKeyedImage SkKeyedImage::subset(SkIRect subset) const {
+ SkKeyedImage img;
+ if (fImage && subset.intersect(fImage->bounds())) {
+ img.fImage = fImage->makeSubset(subset);
+ if (img.fImage) {
+ img.fKey = {subset.makeOffset(fKey.fSubset.topLeft()), fKey.fID};
+ }
+ }
+ return img;
+}
+
+sk_sp<SkImage> SkKeyedImage::release() {
+ sk_sp<SkImage> image = std::move(fImage);
+ SkASSERT(nullptr == fImage);
+ fKey = {{0, 0, 0, 0}, 0};
+ return image;
+}
diff --git a/gfx/skia/skia/src/pdf/SkKeyedImage.h b/gfx/skia/skia/src/pdf/SkKeyedImage.h
new file mode 100644
index 0000000000..db7b09d7b6
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkKeyedImage.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkKeyedImage_DEFINED
+#define SkKeyedImage_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImage.h"
+#include "src/pdf/SkBitmapKey.h"
+
+/**
+ This class has all the advantages of SkBitmaps and SkImages.
+
+ The SkImage holds on to encoded data. The SkBitmapKey properly de-dups subsets.
+ */
+class SkKeyedImage {
+public:
+ SkKeyedImage() {}
+ SkKeyedImage(sk_sp<SkImage>);
+ SkKeyedImage(const SkBitmap&);
+ SkKeyedImage(SkKeyedImage&&) = default;
+ SkKeyedImage(const SkKeyedImage&) = default;
+
+ SkKeyedImage& operator=(SkKeyedImage&&) = default;
+ SkKeyedImage& operator=(const SkKeyedImage&) = default;
+
+ explicit operator bool() const { return fImage != nullptr; }
+ const SkBitmapKey& key() const { return fKey; }
+ const sk_sp<SkImage>& image() const { return fImage; }
+ sk_sp<SkImage> release();
+ SkKeyedImage subset(SkIRect subset) const;
+
+private:
+ sk_sp<SkImage> fImage;
+ SkBitmapKey fKey = {{0, 0, 0, 0}, 0};
+};
+
+/**
+ * Given an Image, return the Bitmap Key that corresponds to it. If the Image
+ * wraps a Bitmap, use that Bitmap's key.
+ */
+SkBitmapKey SkBitmapKeyFromImage(const SkImage*);
+#endif // SkKeyedImage_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
new file mode 100644
index 0000000000..beaf08846d
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFBitmap.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkExecutor.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkImageInfoPriv.h"
+#include "include/private/SkTo.h"
+#include "src/pdf/SkDeflate.h"
+#include "src/pdf/SkJpegInfo.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// write a single byte to a stream n times.
+static void fill_stream(SkWStream* out, char value, size_t n) {
+ char buffer[4096];
+ memset(buffer, value, sizeof(buffer));
+ for (size_t i = 0; i < n / sizeof(buffer); ++i) {
+ out->write(buffer, sizeof(buffer));
+ }
+ out->write(buffer, n % sizeof(buffer));
+}
+
+/* It is necessary to average the color component of transparent
+ pixels with their surrounding neighbors since the PDF renderer may
+ separately re-sample the alpha and color channels when the image is
+ not displayed at its native resolution. Since an alpha of zero
+ gives no information about the color component, the pathological
+ case is a white image with sharp transparency bounds - the color
+ channel goes to black, and the should-be-transparent pixels are
+ rendered as grey because of the separate soft mask and color
+ resizing. e.g.: gm/bitmappremul.cpp */
+static SkColor get_neighbor_avg_color(const SkPixmap& bm, int xOrig, int yOrig) {
+ SkASSERT(kBGRA_8888_SkColorType == bm.colorType());
+ unsigned r = 0, g = 0, b = 0, n = 0;
+ // Clamp the range to the edge of the bitmap.
+ int ymin = SkTMax(0, yOrig - 1);
+ int ymax = SkTMin(yOrig + 1, bm.height() - 1);
+ int xmin = SkTMax(0, xOrig - 1);
+ int xmax = SkTMin(xOrig + 1, bm.width() - 1);
+ for (int y = ymin; y <= ymax; ++y) {
+ const SkColor* scanline = bm.addr32(0, y);
+ for (int x = xmin; x <= xmax; ++x) {
+ SkColor color = scanline[x];
+ if (color != SK_ColorTRANSPARENT) {
+ r += SkColorGetR(color);
+ g += SkColorGetG(color);
+ b += SkColorGetB(color);
+ n++;
+ }
+ }
+ }
+ return n > 0 ? SkColorSetRGB(SkToU8(r / n), SkToU8(g / n), SkToU8(b / n))
+ : SK_ColorTRANSPARENT;
+}
+
+template <typename T>
+static void emit_image_stream(SkPDFDocument* doc,
+ SkPDFIndirectReference ref,
+ T writeStream,
+ SkISize size,
+ const char* colorSpace,
+ SkPDFIndirectReference sMask,
+ int length,
+ bool isJpeg) {
+ SkPDFDict pdfDict("XObject");
+ pdfDict.insertName("Subtype", "Image");
+ pdfDict.insertInt("Width", size.width());
+ pdfDict.insertInt("Height", size.height());
+ pdfDict.insertName("ColorSpace", colorSpace);
+ if (sMask) {
+ pdfDict.insertRef("SMask", sMask);
+ }
+ pdfDict.insertInt("BitsPerComponent", 8);
+ #ifdef SK_PDF_BASE85_BINARY
+ auto filters = SkPDFMakeArray();
+ filters->appendName("ASCII85Decode");
+ filters->appendName(isJpeg ? "DCTDecode" : "FlateDecode");
+ pdfDict.insertObject("Filter", std::move(filters));
+ #else
+ pdfDict.insertName("Filter", isJpeg ? "DCTDecode" : "FlateDecode");
+ #endif
+ if (isJpeg) {
+ pdfDict.insertInt("ColorTransform", 0);
+ }
+ pdfDict.insertInt("Length", length);
+ doc->emitStream(pdfDict, std::move(writeStream), ref);
+}
+
+static void do_deflated_alpha(const SkPixmap& pm, SkPDFDocument* doc, SkPDFIndirectReference ref) {
+ SkDynamicMemoryWStream buffer;
+ SkDeflateWStream deflateWStream(&buffer);
+ if (kAlpha_8_SkColorType == pm.colorType()) {
+ SkASSERT(pm.rowBytes() == (size_t)pm.width());
+ buffer.write(pm.addr8(), pm.width() * pm.height());
+ } else {
+ SkASSERT(pm.alphaType() == kUnpremul_SkAlphaType);
+ SkASSERT(pm.colorType() == kBGRA_8888_SkColorType);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width() * 4);
+ const uint32_t* ptr = pm.addr32();
+ const uint32_t* stop = ptr + pm.height() * pm.width();
+
+ uint8_t byteBuffer[4092];
+ uint8_t* bufferStop = byteBuffer + SK_ARRAY_COUNT(byteBuffer);
+ uint8_t* dst = byteBuffer;
+ while (ptr != stop) {
+ *dst++ = 0xFF & ((*ptr++) >> SK_BGRA_A32_SHIFT);
+ if (dst == bufferStop) {
+ deflateWStream.write(byteBuffer, sizeof(byteBuffer));
+ dst = byteBuffer;
+ }
+ }
+ deflateWStream.write(byteBuffer, dst - byteBuffer);
+ }
+ deflateWStream.finalize();
+
+ #ifdef SK_PDF_BASE85_BINARY
+ SkPDFUtils::Base85Encode(buffer.detachAsStream(), &buffer);
+ #endif
+ int length = SkToInt(buffer.bytesWritten());
+ emit_image_stream(doc, ref, [&buffer](SkWStream* stream) { buffer.writeToAndReset(stream); },
+ pm.info().dimensions(), "DeviceGray", SkPDFIndirectReference(),
+ length, false);
+}
+
+static void do_deflated_image(const SkPixmap& pm,
+ SkPDFDocument* doc,
+ bool isOpaque,
+ SkPDFIndirectReference ref) {
+ SkPDFIndirectReference sMask;
+ if (!isOpaque) {
+ sMask = doc->reserveRef();
+ }
+ SkDynamicMemoryWStream buffer;
+ SkDeflateWStream deflateWStream(&buffer);
+ const char* colorSpace = "DeviceGray";
+ switch (pm.colorType()) {
+ case kAlpha_8_SkColorType:
+ fill_stream(&deflateWStream, '\x00', pm.width() * pm.height());
+ break;
+ case kGray_8_SkColorType:
+ SkASSERT(sMask.fValue = -1);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width());
+ deflateWStream.write(pm.addr8(), pm.width() * pm.height());
+ break;
+ default:
+ colorSpace = "DeviceRGB";
+ SkASSERT(pm.alphaType() == kUnpremul_SkAlphaType);
+ SkASSERT(pm.colorType() == kBGRA_8888_SkColorType);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width() * 4);
+ uint8_t byteBuffer[3072];
+ static_assert(SK_ARRAY_COUNT(byteBuffer) % 3 == 0, "");
+ uint8_t* bufferStop = byteBuffer + SK_ARRAY_COUNT(byteBuffer);
+ uint8_t* dst = byteBuffer;
+ for (int y = 0; y < pm.height(); ++y) {
+ const SkColor* src = pm.addr32(0, y);
+ for (int x = 0; x < pm.width(); ++x) {
+ SkColor color = *src++;
+ if (SkColorGetA(color) == SK_AlphaTRANSPARENT) {
+ color = get_neighbor_avg_color(pm, x, y);
+ }
+ *dst++ = SkColorGetR(color);
+ *dst++ = SkColorGetG(color);
+ *dst++ = SkColorGetB(color);
+ if (dst == bufferStop) {
+ deflateWStream.write(byteBuffer, sizeof(byteBuffer));
+ dst = byteBuffer;
+ }
+ }
+ }
+ deflateWStream.write(byteBuffer, dst - byteBuffer);
+ }
+ deflateWStream.finalize();
+ #ifdef SK_PDF_BASE85_BINARY
+ SkPDFUtils::Base85Encode(buffer.detachAsStream(), &buffer);
+ #endif
+ int length = SkToInt(buffer.bytesWritten());
+ emit_image_stream(doc, ref, [&buffer](SkWStream* stream) { buffer.writeToAndReset(stream); },
+ pm.info().dimensions(), colorSpace, sMask, length, false);
+ if (!isOpaque) {
+ do_deflated_alpha(pm, doc, sMask);
+ }
+}
+
+static bool do_jpeg(sk_sp<SkData> data, SkPDFDocument* doc, SkISize size,
+ SkPDFIndirectReference ref) {
+ SkISize jpegSize;
+ SkEncodedInfo::Color jpegColorType;
+ SkEncodedOrigin exifOrientation;
+ if (!SkGetJpegInfo(data->data(), data->size(), &jpegSize,
+ &jpegColorType, &exifOrientation)) {
+ return false;
+ }
+ bool yuv = jpegColorType == SkEncodedInfo::kYUV_Color;
+ bool goodColorType = yuv || jpegColorType == SkEncodedInfo::kGray_Color;
+ if (jpegSize != size // Sanity check.
+ || !goodColorType
+ || kTopLeft_SkEncodedOrigin != exifOrientation) {
+ return false;
+ }
+ #ifdef SK_PDF_BASE85_BINARY
+ SkDynamicMemoryWStream buffer;
+ SkPDFUtils::Base85Encode(SkMemoryStream::MakeDirect(data->data(), data->size()), &buffer);
+ data = buffer.detachAsData();
+ #endif
+
+ emit_image_stream(doc, ref,
+ [&data](SkWStream* dst) { dst->write(data->data(), data->size()); },
+ jpegSize, yuv ? "DeviceRGB" : "DeviceGray",
+ SkPDFIndirectReference(), SkToInt(data->size()), true);
+ return true;
+}
+
+static SkBitmap to_pixels(const SkImage* image) {
+ SkBitmap bm;
+ int w = image->width(),
+ h = image->height();
+ switch (image->colorType()) {
+ case kAlpha_8_SkColorType:
+ bm.allocPixels(SkImageInfo::MakeA8(w, h));
+ break;
+ case kGray_8_SkColorType:
+ bm.allocPixels(SkImageInfo::Make(w, h, kGray_8_SkColorType, kOpaque_SkAlphaType));
+ break;
+ default: {
+ // TODO: makeColorSpace(sRGB) or actually tag the images
+ SkAlphaType at = bm.isOpaque() ? kOpaque_SkAlphaType : kUnpremul_SkAlphaType;
+ bm.allocPixels(SkImageInfo::Make(w, h, kBGRA_8888_SkColorType, at));
+ }
+ }
+ if (!image->readPixels(bm.pixmap(), 0, 0)) {
+ bm.eraseColor(SkColorSetARGB(0xFF, 0, 0, 0));
+ }
+ return bm;
+}
+
+void serialize_image(const SkImage* img,
+ int encodingQuality,
+ SkPDFDocument* doc,
+ SkPDFIndirectReference ref) {
+ SkASSERT(img);
+ SkASSERT(doc);
+ SkASSERT(encodingQuality >= 0);
+ SkISize dimensions = img->dimensions();
+ sk_sp<SkData> data = img->refEncodedData();
+ if (data && do_jpeg(std::move(data), doc, dimensions, ref)) {
+ return;
+ }
+ SkBitmap bm = to_pixels(img);
+ SkPixmap pm = bm.pixmap();
+ bool isOpaque = pm.isOpaque() || pm.computeIsOpaque();
+ if (encodingQuality <= 100 && isOpaque) {
+ sk_sp<SkData> data = img->encodeToData(SkEncodedImageFormat::kJPEG, encodingQuality);
+ if (data && do_jpeg(std::move(data), doc, dimensions, ref)) {
+ return;
+ }
+ }
+ do_deflated_image(pm, doc, isOpaque, ref);
+}
+
+SkPDFIndirectReference SkPDFSerializeImage(const SkImage* img,
+ SkPDFDocument* doc,
+ int encodingQuality) {
+ SkASSERT(img);
+ SkASSERT(doc);
+ SkPDFIndirectReference ref = doc->reserveRef();
+ if (SkExecutor* executor = doc->executor()) {
+ SkRef(img);
+ doc->incrementJobCount();
+ executor->add([img, encodingQuality, doc, ref]() {
+ serialize_image(img, encodingQuality, doc, ref);
+ SkSafeUnref(img);
+ doc->signalJobComplete();
+ });
+ return ref;
+ }
+ serialize_image(img, encodingQuality, doc, ref);
+ return ref;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.h b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
new file mode 100644
index 0000000000..bc2c57bd3b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFBitmap_DEFINED
+#define SkPDFBitmap_DEFINED
+
+class SkImage;
+class SkPDFDocument;
+struct SkPDFIndirectReference;
+
+/**
+ * Serialize a SkImage as an Image Xobject.
+ * quality > 100 means lossless
+ */
+SkPDFIndirectReference SkPDFSerializeImage(const SkImage* img,
+ SkPDFDocument* doc,
+ int encodingQuality = 101);
+
+#endif // SkPDFBitmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.cpp b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
new file mode 100644
index 0000000000..2ac998f757
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
@@ -0,0 +1,1765 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFDevice.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkAnnotationKeys.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyphRun.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScopeExit.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextFormatParams.h"
+#include "src/core/SkXfermodeInterpretation.h"
+#include "src/pdf/SkBitmapKey.h"
+#include "src/pdf/SkClusterator.h"
+#include "src/pdf/SkPDFBitmap.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFShader.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/utils/SkUTF.h"
+
+#include <vector>
+
+#ifndef SK_PDF_MASK_QUALITY
+ // If MASK_QUALITY is in [0,100], will be used for JpegEncoder.
+ // Otherwise, just encode masks losslessly.
+ #define SK_PDF_MASK_QUALITY 50
+ // Since these masks are used for blurry shadows, we shouldn't need
+ // high quality. Raise this value if your shadows have visible JPEG
+ // artifacts.
+ // If SkJpegEncoder::Encode fails, we will fall back to the lossless
+ // encoding.
+#endif
+
+// Utility functions
+
+static SkPath to_path(const SkRect& r) {
+ SkPath p;
+ p.addRect(r);
+ return p;
+}
+
+// This function destroys the mask and either frees or takes the pixels.
+sk_sp<SkImage> mask_to_greyscale_image(SkMask* mask) {
+ sk_sp<SkImage> img;
+ SkPixmap pm(SkImageInfo::Make(mask->fBounds.width(), mask->fBounds.height(),
+ kGray_8_SkColorType, kOpaque_SkAlphaType),
+ mask->fImage, mask->fRowBytes);
+ const int imgQuality = SK_PDF_MASK_QUALITY;
+ if (imgQuality <= 100 && imgQuality >= 0) {
+ SkDynamicMemoryWStream buffer;
+ SkJpegEncoder::Options jpegOptions;
+ jpegOptions.fQuality = imgQuality;
+ if (SkJpegEncoder::Encode(&buffer, pm, jpegOptions)) {
+ img = SkImage::MakeFromEncoded(buffer.detachAsData());
+ SkASSERT(img);
+ if (img) {
+ SkMask::FreeImage(mask->fImage);
+ }
+ }
+ }
+ if (!img) {
+ img = SkImage::MakeFromRaster(pm, [](const void* p, void*) { SkMask::FreeImage((void*)p); },
+ nullptr);
+ }
+ *mask = SkMask(); // destructive;
+ return img;
+}
+
+sk_sp<SkImage> alpha_image_to_greyscale_image(const SkImage* mask) {
+ int w = mask->width(), h = mask->height();
+ SkBitmap greyBitmap;
+ greyBitmap.allocPixels(SkImageInfo::Make(w, h, kGray_8_SkColorType, kOpaque_SkAlphaType));
+ if (!mask->readPixels(SkImageInfo::MakeA8(w, h),
+ greyBitmap.getPixels(), greyBitmap.rowBytes(), 0, 0)) {
+ return nullptr;
+ }
+ return SkImage::MakeFromBitmap(greyBitmap);
+}
+
+static int add_resource(SkTHashSet<SkPDFIndirectReference>& resources, SkPDFIndirectReference ref) {
+ resources.add(ref);
+ return ref.fValue;
+}
+
+static void draw_points(SkCanvas::PointMode mode,
+ size_t count,
+ const SkPoint* points,
+ const SkPaint& paint,
+ const SkIRect& bounds,
+ const SkMatrix& ctm,
+ SkBaseDevice* device) {
+ SkRasterClip rc(bounds);
+ SkDraw draw;
+ draw.fDst = SkPixmap(SkImageInfo::MakeUnknown(bounds.right(), bounds.bottom()), nullptr, 0);
+ draw.fMatrix = &ctm;
+ draw.fRC = &rc;
+ draw.drawPoints(mode, count, points, paint, device);
+}
+
+// A shader's matrix is: CTMM x LocalMatrix x WrappingLocalMatrix. We want to
+// switch to device space, where CTM = I, while keeping the original behavior.
+//
+// I * LocalMatrix * NewWrappingMatrix = CTM * LocalMatrix
+// LocalMatrix * NewWrappingMatrix = CTM * LocalMatrix
+// InvLocalMatrix * LocalMatrix * NewWrappingMatrix = InvLocalMatrix * CTM * LocalMatrix
+// NewWrappingMatrix = InvLocalMatrix * CTM * LocalMatrix
+//
+static void transform_shader(SkPaint* paint, const SkMatrix& ctm) {
+ SkASSERT(!ctm.isIdentity());
+ SkMatrix lm = SkPDFUtils::GetShaderLocalMatrix(paint->getShader());
+ SkMatrix lmInv;
+ if (lm.invert(&lmInv)) {
+ SkMatrix m = SkMatrix::Concat(SkMatrix::Concat(lmInv, ctm), lm);
+ paint->setShader(paint->getShader()->makeWithLocalMatrix(m));
+ }
+}
+
+static SkTCopyOnFirstWrite<SkPaint> clean_paint(const SkPaint& srcPaint) {
+ SkTCopyOnFirstWrite<SkPaint> paint(srcPaint);
+ // If the paint will definitely draw opaquely, replace kSrc with
+ // kSrcOver. http://crbug.com/473572
+ if (SkBlendMode::kSrcOver != paint->getBlendMode() &&
+ kSrcOver_SkXfermodeInterpretation == SkInterpretXfermode(*paint, false))
+ {
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ }
+ // If the paint has a color filter, apply the color filter to the shader or the
+ // paint color. Remove the color filter.
+ if (SkColorFilter* cf = paint->getColorFilter()) {
+ SkPaint* p = paint.writable();
+ if (SkShader* shader = paint->getShader()) {
+ p->setShader(shader->makeWithColorFilter(paint->refColorFilter()));
+ } else {
+ SkColorSpace* dstCS = sk_srgb_singleton(); // don't know PDF's space, so use srgb
+ SkColor4f newColor = cf->filterColor4f(p->getColor4f(), sk_srgb_singleton(), dstCS);
+ p->setColor4f(newColor, dstCS);
+ }
+ p->setColorFilter(nullptr);
+ }
+ return paint;
+}
+
+static void set_style(SkTCopyOnFirstWrite<SkPaint>* paint, SkPaint::Style style) {
+ if (paint->get()->getStyle() != style) {
+ paint->writable()->setStyle(style);
+ }
+}
+
+/* Calculate an inverted path's equivalent non-inverted path, given the
+ * canvas bounds.
+ * outPath may alias with invPath (since this is supported by PathOps).
+ */
+static bool calculate_inverse_path(const SkRect& bounds, const SkPath& invPath,
+ SkPath* outPath) {
+ SkASSERT(invPath.isInverseFillType());
+ return Op(to_path(bounds), invPath, kIntersect_SkPathOp, outPath);
+}
+
+SkBaseDevice* SkPDFDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint* layerPaint) {
+ // PDF does not support image filters, so render them on CPU.
+ // Note that this rendering is done at "screen" resolution (100dpi), not
+ // printer resolution.
+
+ // TODO: It may be possible to express some filters natively using PDF
+ // to improve quality and file size (https://bug.skia.org/3043)
+ if (layerPaint && (layerPaint->getImageFilter() || layerPaint->getColorFilter())) {
+ // need to return a raster device, which we will detect in drawDevice()
+ return SkBitmapDevice::Create(cinfo.fInfo, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ }
+ return new SkPDFDevice(cinfo.fInfo.dimensions(), fDocument);
+}
+
+// A helper class to automatically finish a ContentEntry at the end of a
+// drawing method and maintain the state needed between set up and finish.
+class ScopedContentEntry {
+public:
+ ScopedContentEntry(SkPDFDevice* device,
+ const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar textScale = 0)
+ : fDevice(device)
+ , fBlendMode(SkBlendMode::kSrcOver)
+ , fClipStack(clipStack)
+ {
+ if (matrix.hasPerspective()) {
+ NOT_IMPLEMENTED(!matrix.hasPerspective(), false);
+ return;
+ }
+ fBlendMode = paint.getBlendMode();
+ fContentStream =
+ fDevice->setUpContentEntry(clipStack, matrix, paint, textScale, &fDstFormXObject);
+ }
+ ScopedContentEntry(SkPDFDevice* dev, const SkPaint& paint, SkScalar textScale = 0)
+ : ScopedContentEntry(dev, &dev->cs(), dev->ctm(), paint, textScale) {}
+
+ ~ScopedContentEntry() {
+ if (fContentStream) {
+ SkPath* shape = &fShape;
+ if (shape->isEmpty()) {
+ shape = nullptr;
+ }
+ fDevice->finishContentEntry(fClipStack, fBlendMode, fDstFormXObject, shape);
+ }
+ }
+
+ explicit operator bool() const { return fContentStream != nullptr; }
+ SkDynamicMemoryWStream* stream() { return fContentStream; }
+
+ /* Returns true when we explicitly need the shape of the drawing. */
+ bool needShape() {
+ switch (fBlendMode) {
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /* Returns true unless we only need the shape of the drawing. */
+ bool needSource() {
+ if (fBlendMode == SkBlendMode::kClear) {
+ return false;
+ }
+ return true;
+ }
+
+ /* If the shape is different than the alpha component of the content, then
+ * setShape should be called with the shape. In particular, images and
+ * devices have rectangular shape.
+ */
+ void setShape(const SkPath& shape) {
+ fShape = shape;
+ }
+
+private:
+ SkPDFDevice* fDevice = nullptr;
+ SkDynamicMemoryWStream* fContentStream = nullptr;
+ SkBlendMode fBlendMode;
+ SkPDFIndirectReference fDstFormXObject;
+ SkPath fShape;
+ const SkClipStack* fClipStack;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDevice::SkPDFDevice(SkISize pageSize, SkPDFDocument* doc, const SkMatrix& transform)
+ : INHERITED(SkImageInfo::MakeUnknown(pageSize.width(), pageSize.height()),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fInitialTransform(transform)
+ , fNodeId(0)
+ , fDocument(doc)
+{
+ SkASSERT(!pageSize.isEmpty());
+}
+
+SkPDFDevice::~SkPDFDevice() = default;
+
+void SkPDFDevice::reset() {
+ fGraphicStateResources.reset();
+ fXObjectResources.reset();
+ fShaderResources.reset();
+ fFontResources.reset();
+ fContent.reset();
+ fActiveStackState = SkPDFGraphicStackState();
+}
+
+void SkPDFDevice::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ if (!value) {
+ return;
+ }
+ const SkMatrix& pageXform = fDocument->currentPageTransform();
+ SkPoint deviceOffset = {(float)this->getOrigin().x(), (float)this->getOrigin().y()};
+ if (rect.isEmpty()) {
+ if (!strcmp(key, SkPDFGetNodeIdKey())) {
+ int nodeID;
+ if (value->size() != sizeof(nodeID)) { return; }
+ memcpy(&nodeID, value->data(), sizeof(nodeID));
+ fNodeId = nodeID;
+ return;
+ }
+ if (!strcmp(SkAnnotationKeys::Define_Named_Dest_Key(), key)) {
+ SkPoint p = deviceOffset + this->ctm().mapXY(rect.x(), rect.y());
+ pageXform.mapPoints(&p, 1);
+ auto pg = fDocument->currentPage();
+ fDocument->fNamedDestinations.push_back(SkPDFNamedDestination{sk_ref_sp(value), p, pg});
+ }
+ return;
+ }
+ // Convert to path to handle non-90-degree rotations.
+ SkPath path = to_path(rect);
+ path.transform(this->ctm(), &path);
+ SkPath clip;
+ (void)this->cs().asPath(&clip);
+ Op(clip, path, kIntersect_SkPathOp, &path);
+ // PDF wants a rectangle only.
+ SkRect transformedRect =
+ pageXform.mapRect(path.getBounds().makeOffset(deviceOffset.x(), deviceOffset.y()));
+ if (transformedRect.isEmpty()) {
+ return;
+ }
+ if (!strcmp(SkAnnotationKeys::URL_Key(), key)) {
+ fDocument->fCurrentPageLinkToURLs.push_back(
+ std::make_pair(sk_ref_sp(value), transformedRect));
+ } else if (!strcmp(SkAnnotationKeys::Link_Named_Dest_Key(), key)) {
+ fDocument->fCurrentPageLinkToDestinations.emplace_back(
+ std::make_pair(sk_ref_sp(value), transformedRect));
+ }
+}
+
+void SkPDFDevice::drawPaint(const SkPaint& srcPaint) {
+ SkMatrix inverse;
+ if (!this->ctm().invert(&inverse)) {
+ return;
+ }
+ SkRect bbox = this->cs().bounds(this->bounds());
+ inverse.mapRect(&bbox);
+ bbox.roundOut(&bbox);
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ SkPaint newPaint = srcPaint;
+ newPaint.setStyle(SkPaint::kFill_Style);
+ this->drawRect(bbox, newPaint);
+}
+
+void SkPDFDevice::drawPoints(SkCanvas::PointMode mode,
+ size_t count,
+ const SkPoint* points,
+ const SkPaint& srcPaint) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(srcPaint));
+
+
+
+ if (SkCanvas::kPoints_PointMode != mode) {
+ set_style(&paint, SkPaint::kStroke_Style);
+ }
+
+ // SkDraw::drawPoints converts to multiple calls to fDevice->drawPath.
+ // We only use this when there's a path effect because of the overhead
+ // of multiple calls to setUpContentEntry it causes.
+ if (paint->getPathEffect()) {
+ draw_points(mode, count, points, *paint,
+ this->devClipBounds(), this->ctm(), this);
+ return;
+ }
+
+
+ if (mode == SkCanvas::kPoints_PointMode && paint->getStrokeCap() != SkPaint::kRound_Cap) {
+ if (paint->getStrokeWidth()) {
+ // PDF won't draw a single point with square/butt caps because the
+ // orientation is ambiguous. Draw a rectangle instead.
+ set_style(&paint, SkPaint::kFill_Style);
+ SkScalar strokeWidth = paint->getStrokeWidth();
+ SkScalar halfStroke = SkScalarHalf(strokeWidth);
+ for (size_t i = 0; i < count; i++) {
+ SkRect r = SkRect::MakeXYWH(points[i].fX, points[i].fY, 0, 0);
+ r.inset(-halfStroke, -halfStroke);
+ this->drawRect(r, *paint);
+ }
+ return;
+ } else {
+ if (paint->getStrokeCap() != SkPaint::kRound_Cap) {
+ paint.writable()->setStrokeCap(SkPaint::kRound_Cap);
+ }
+ }
+ }
+
+ ScopedContentEntry content(this, *paint);
+ if (!content) {
+ return;
+ }
+ SkDynamicMemoryWStream* contentStream = content.stream();
+ switch (mode) {
+ case SkCanvas::kPolygon_PointMode:
+ SkPDFUtils::MoveTo(points[0].fX, points[0].fY, contentStream);
+ for (size_t i = 1; i < count; i++) {
+ SkPDFUtils::AppendLine(points[i].fX, points[i].fY, contentStream);
+ }
+ SkPDFUtils::StrokePath(contentStream);
+ break;
+ case SkCanvas::kLines_PointMode:
+ for (size_t i = 0; i < count/2; i++) {
+ SkPDFUtils::MoveTo(points[i * 2].fX, points[i * 2].fY, contentStream);
+ SkPDFUtils::AppendLine(points[i * 2 + 1].fX, points[i * 2 + 1].fY, contentStream);
+ SkPDFUtils::StrokePath(contentStream);
+ }
+ break;
+ case SkCanvas::kPoints_PointMode:
+ SkASSERT(paint->getStrokeCap() == SkPaint::kRound_Cap);
+ for (size_t i = 0; i < count; i++) {
+ SkPDFUtils::MoveTo(points[i].fX, points[i].fY, contentStream);
+ SkPDFUtils::ClosePath(contentStream);
+ SkPDFUtils::StrokePath(contentStream);
+ }
+ break;
+ default:
+ SkASSERT(false);
+ }
+}
+
+void SkPDFDevice::drawRect(const SkRect& rect, const SkPaint& paint) {
+ SkRect r = rect;
+ r.sort();
+ this->internalDrawPath(this->cs(), this->ctm(), to_path(r), paint, true);
+}
+
+void SkPDFDevice::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(rrect);
+ this->internalDrawPath(this->cs(), this->ctm(), path, paint, true);
+}
+
+void SkPDFDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ SkPath path;
+ path.addOval(oval);
+ this->internalDrawPath(this->cs(), this->ctm(), path, paint, true);
+}
+
+void SkPDFDevice::drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) {
+ this->internalDrawPath(this->cs(), this->ctm(), path, paint, pathIsMutable);
+}
+
+void SkPDFDevice::internalDrawPathWithFilter(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& origPaint) {
+ SkASSERT(origPaint.getMaskFilter());
+ SkPath path(origPath);
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ SkStrokeRec::InitStyle initStyle = paint->getFillPath(path, &path)
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ path.transform(ctm, &path);
+
+ SkIRect bounds = clipStack.bounds(this->bounds()).roundOut();
+ SkMask sourceMask;
+ if (!SkDraw::DrawToMask(path, &bounds, paint->getMaskFilter(), &SkMatrix::I(),
+ &sourceMask, SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ initStyle)) {
+ return;
+ }
+ SkAutoMaskFreeImage srcAutoMaskFreeImage(sourceMask.fImage);
+ SkMask dstMask;
+ SkIPoint margin;
+ if (!as_MFB(paint->getMaskFilter())->filterMask(&dstMask, sourceMask, ctm, &margin)) {
+ return;
+ }
+ SkIRect dstMaskBounds = dstMask.fBounds;
+ sk_sp<SkImage> mask = mask_to_greyscale_image(&dstMask);
+ // PDF doesn't seem to allow masking vector graphics with an Image XObject.
+ // Must mask with a Form XObject.
+ sk_sp<SkPDFDevice> maskDevice = this->makeCongruentDevice();
+ {
+ SkCanvas canvas(maskDevice);
+ canvas.drawImage(mask, dstMaskBounds.x(), dstMaskBounds.y());
+ }
+ if (!ctm.isIdentity() && paint->getShader()) {
+ transform_shader(paint.writable(), ctm); // Since we are using identity matrix.
+ }
+ ScopedContentEntry content(this, &clipStack, SkMatrix::I(), *paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ maskDevice->makeFormXObjectFromDevice(dstMaskBounds, true), false,
+ SkPDFGraphicState::kLuminosity_SMaskMode, fDocument), content.stream());
+ SkPDFUtils::AppendRectangle(SkRect::Make(dstMaskBounds), content.stream());
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, path.getFillType(), content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+}
+
+void SkPDFDevice::setGraphicState(SkPDFIndirectReference gs, SkDynamicMemoryWStream* content) {
+ SkPDFUtils::ApplyGraphicState(add_resource(fGraphicStateResources, gs), content);
+}
+
+void SkPDFDevice::clearMaskOnGraphicState(SkDynamicMemoryWStream* contentStream) {
+ // The no-softmask graphic state is used to "turn off" the mask for later draw calls.
+ SkPDFIndirectReference& noSMaskGS = fDocument->fNoSmaskGraphicState;
+ if (!noSMaskGS) {
+ SkPDFDict tmp("ExtGState");
+ tmp.insertName("SMask", "None");
+ noSMaskGS = fDocument->emit(tmp);
+ }
+ this->setGraphicState(noSMaskGS, contentStream);
+}
+
+void SkPDFDevice::internalDrawPath(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& srcPaint,
+ bool pathIsMutable) {
+ if (clipStack.isEmpty(this->bounds())) {
+ return;
+ }
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(srcPaint));
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+
+ if (paint->getMaskFilter()) {
+ this->internalDrawPathWithFilter(clipStack, ctm, origPath, *paint);
+ return;
+ }
+
+ SkMatrix matrix = ctm;
+
+ if (paint->getPathEffect()) {
+ if (clipStack.isEmpty(this->bounds())) {
+ return;
+ }
+ if (!pathIsMutable) {
+ modifiedPath = origPath;
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ if (paint->getFillPath(*pathPtr, pathPtr)) {
+ set_style(&paint, SkPaint::kFill_Style);
+ } else {
+ set_style(&paint, SkPaint::kStroke_Style);
+ if (paint->getStrokeWidth() != 0) {
+ paint.writable()->setStrokeWidth(0);
+ }
+ }
+ paint.writable()->setPathEffect(nullptr);
+ }
+
+ if (this->handleInversePath(*pathPtr, *paint, pathIsMutable)) {
+ return;
+ }
+ if (matrix.getType() & SkMatrix::kPerspective_Mask) {
+ if (!pathIsMutable) {
+ modifiedPath = origPath;
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ pathPtr->transform(matrix);
+ if (paint->getShader()) {
+ transform_shader(paint.writable(), matrix);
+ }
+ matrix = SkMatrix::I();
+ }
+
+ ScopedContentEntry content(this, &clipStack, matrix, *paint);
+ if (!content) {
+ return;
+ }
+ constexpr SkScalar kToleranceScale = 0.0625f; // smaller = better conics (circles).
+ SkScalar matrixScale = matrix.mapRadius(1.0f);
+ SkScalar tolerance = matrixScale > 0.0f ? kToleranceScale / matrixScale : kToleranceScale;
+ bool consumeDegeratePathSegments =
+ paint->getStyle() == SkPaint::kFill_Style ||
+ (paint->getStrokeCap() != SkPaint::kRound_Cap &&
+ paint->getStrokeCap() != SkPaint::kSquare_Cap);
+ SkPDFUtils::EmitPath(*pathPtr, paint->getStyle(), consumeDegeratePathSegments, content.stream(),
+ tolerance);
+ SkPDFUtils::PaintPath(paint->getStyle(), pathPtr->getFillType(), content.stream());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFDevice::drawImageRect(const SkImage* image,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) {
+ SkASSERT(image);
+ this->internalDrawImageRect(SkKeyedImage(sk_ref_sp(const_cast<SkImage*>(image))),
+ src, dst, paint, this->ctm());
+}
+
+void SkPDFDevice::drawBitmapRect(const SkBitmap& bm,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) {
+ SkASSERT(!bm.drawsNothing());
+ this->internalDrawImageRect(SkKeyedImage(bm), src, dst, paint, this->ctm());
+}
+
+void SkPDFDevice::drawSprite(const SkBitmap& bm, int x, int y, const SkPaint& paint) {
+ SkASSERT(!bm.drawsNothing());
+ auto r = SkRect::MakeXYWH(x, y, bm.width(), bm.height());
+ this->internalDrawImageRect(SkKeyedImage(bm), nullptr, r, paint, SkMatrix::I());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+class GlyphPositioner {
+public:
+ GlyphPositioner(SkDynamicMemoryWStream* content,
+ SkScalar textSkewX,
+ SkPoint origin)
+ : fContent(content)
+ , fCurrentMatrixOrigin(origin)
+ , fTextSkewX(textSkewX) {
+ }
+ ~GlyphPositioner() { this->flush(); }
+ void flush() {
+ if (fInText) {
+ fContent->writeText("> Tj\n");
+ fInText = false;
+ }
+ }
+ void setWideChars(bool wide) {
+ this->flush();
+ fWideChars = wide;
+ }
+ void writeGlyph(SkPoint xy,
+ SkScalar advanceWidth,
+ uint16_t glyph) {
+ if (!fInitialized) {
+ // Flip the text about the x-axis to account for origin swap and include
+ // the passed parameters.
+ fContent->writeText("1 0 ");
+ SkPDFUtils::AppendScalar(-fTextSkewX, fContent);
+ fContent->writeText(" -1 ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.x(), fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.y(), fContent);
+ fContent->writeText(" Tm\n");
+ fCurrentMatrixOrigin.set(0.0f, 0.0f);
+ fInitialized = true;
+ }
+ SkPoint position = xy - fCurrentMatrixOrigin;
+ if (position != SkPoint{fXAdvance, 0}) {
+ this->flush();
+ SkPDFUtils::AppendScalar(position.x() - position.y() * fTextSkewX, fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(-position.y(), fContent);
+ fContent->writeText(" Td ");
+ fCurrentMatrixOrigin = xy;
+ fXAdvance = 0;
+ }
+ fXAdvance += advanceWidth;
+ if (!fInText) {
+ fContent->writeText("<");
+ fInText = true;
+ }
+ if (fWideChars) {
+ SkPDFUtils::WriteUInt16BE(fContent, glyph);
+ } else {
+ SkASSERT(0 == glyph >> 8);
+ SkPDFUtils::WriteUInt8(fContent, static_cast<uint8_t>(glyph));
+ }
+ }
+
+private:
+ SkDynamicMemoryWStream* fContent;
+ SkPoint fCurrentMatrixOrigin;
+ SkScalar fXAdvance = 0.0f;
+ SkScalar fTextSkewX;
+ bool fWideChars = true;
+ bool fInText = false;
+ bool fInitialized = false;
+};
+} // namespace
+
+static SkUnichar map_glyph(const std::vector<SkUnichar>& glyphToUnicode, SkGlyphID glyph) {
+ return glyph < glyphToUnicode.size() ? glyphToUnicode[SkToInt(glyph)] : -1;
+}
+
+namespace {
+struct PositionedGlyph {
+ SkPoint fPos;
+ SkGlyphID fGlyph;
+};
+}
+
+static SkRect get_glyph_bounds_device_space(const SkGlyph* glyph,
+ SkScalar xScale, SkScalar yScale,
+ SkPoint xy, const SkMatrix& ctm) {
+ SkRect glyphBounds = SkMatrix::MakeScale(xScale, yScale).mapRect(glyph->rect());
+ glyphBounds.offset(xy);
+ ctm.mapRect(&glyphBounds); // now in dev space.
+ return glyphBounds;
+}
+
+static bool contains(const SkRect& r, SkPoint p) {
+ return r.left() <= p.x() && p.x() <= r.right() &&
+ r.top() <= p.y() && p.y() <= r.bottom();
+}
+
+void SkPDFDevice::drawGlyphRunAsPath(
+ const SkGlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint) {
+ const SkFont& font = glyphRun.font();
+ SkPath path;
+
+ struct Rec {
+ SkPath* fPath;
+ SkPoint fOffset;
+ const SkPoint* fPos;
+ } rec = {&path, offset, glyphRun.positions().data()};
+
+ font.getPaths(glyphRun.glyphsIDs().data(), glyphRun.glyphsIDs().size(),
+ [](const SkPath* path, const SkMatrix& mx, void* ctx) {
+ Rec* rec = reinterpret_cast<Rec*>(ctx);
+ if (path) {
+ SkMatrix total = mx;
+ total.postTranslate(rec->fPos->fX + rec->fOffset.fX,
+ rec->fPos->fY + rec->fOffset.fY);
+ rec->fPath->addPath(*path, total);
+ }
+ rec->fPos += 1; // move to the next glyph's position
+ }, &rec);
+ this->internalDrawPath(this->cs(), this->ctm(), path, runPaint, true);
+
+ SkFont transparentFont = glyphRun.font();
+ transparentFont.setEmbolden(false); // Stop Recursion
+ SkGlyphRun tmpGlyphRun(glyphRun, transparentFont);
+
+ SkPaint transparent;
+ transparent.setColor(SK_ColorTRANSPARENT);
+
+ if (this->ctm().hasPerspective()) {
+ SkMatrix prevCTM = this->ctm();
+ this->setCTM(SkMatrix::I());
+ this->internalDrawGlyphRun(tmpGlyphRun, offset, transparent);
+ this->setCTM(prevCTM);
+ } else {
+ this->internalDrawGlyphRun(tmpGlyphRun, offset, transparent);
+ }
+}
+
+static bool needs_new_font(SkPDFFont* font, const SkGlyph* glyph,
+ SkAdvancedTypefaceMetrics::FontType fontType) {
+ if (!font || !font->hasGlyph(glyph->getGlyphID())) {
+ return true;
+ }
+ if (fontType == SkAdvancedTypefaceMetrics::kOther_Font) {
+ return false;
+ }
+ if (glyph->isEmpty()) {
+ return false;
+ }
+
+ bool bitmapOnly = nullptr == glyph->path();
+ bool convertedToType3 = (font->getType() == SkAdvancedTypefaceMetrics::kOther_Font);
+ return convertedToType3 != bitmapOnly;
+}
+
+void SkPDFDevice::internalDrawGlyphRun(
+ const SkGlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint) {
+
+ const SkGlyphID* glyphIDs = glyphRun.glyphsIDs().data();
+ uint32_t glyphCount = SkToU32(glyphRun.glyphsIDs().size());
+ const SkFont& glyphRunFont = glyphRun.font();
+
+ if (!glyphCount || !glyphIDs || glyphRunFont.getSize() <= 0 || this->hasEmptyClip()) {
+ return;
+ }
+ if (runPaint.getPathEffect()
+ || runPaint.getMaskFilter()
+ || glyphRunFont.isEmbolden()
+ || this->ctm().hasPerspective()
+ || SkPaint::kFill_Style != runPaint.getStyle()) {
+ // Stroked Text doesn't work well with Type3 fonts.
+ this->drawGlyphRunAsPath(glyphRun, offset, runPaint);
+ return;
+ }
+ SkTypeface* typeface = glyphRunFont.getTypefaceOrDefault();
+ if (!typeface) {
+ SkDebugf("SkPDF: SkTypeface::MakeDefault() returned nullptr.\n");
+ return;
+ }
+
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, fDocument);
+ if (!metrics) {
+ return;
+ }
+ SkAdvancedTypefaceMetrics::FontType fontType = SkPDFFont::FontType(*metrics);
+
+ const std::vector<SkUnichar>& glyphToUnicode = SkPDFFont::GetUnicodeMap(typeface, fDocument);
+
+ SkClusterator clusterator(glyphRun);
+
+ int emSize;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &emSize);
+
+ SkScalar textSize = glyphRunFont.getSize();
+ SkScalar advanceScale = textSize * glyphRunFont.getScaleX() / emSize;
+
+ // textScaleX and textScaleY are used to get a conservative bounding box for glyphs.
+ SkScalar textScaleY = textSize / emSize;
+ SkScalar textScaleX = advanceScale + glyphRunFont.getSkewX() * textScaleY;
+
+ SkRect clipStackBounds = this->cs().bounds(this->bounds());
+
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(runPaint));
+ ScopedContentEntry content(this, *paint, glyphRunFont.getScaleX());
+ if (!content) {
+ return;
+ }
+ SkDynamicMemoryWStream* out = content.stream();
+
+ out->writeText("BT\n");
+
+ int markId = -1;
+ if (fNodeId) {
+ markId = fDocument->getMarkIdForNodeId(fNodeId);
+ }
+
+ if (markId != -1) {
+ out->writeText("/P <</MCID ");
+ out->writeDecAsText(markId);
+ out->writeText(" >>BDC\n");
+ }
+ SK_AT_SCOPE_EXIT(if (markId != -1) out->writeText("EMC\n"));
+
+ SK_AT_SCOPE_EXIT(out->writeText("ET\n"));
+
+ const SkGlyphID maxGlyphID = SkToU16(typeface->countGlyphs() - 1);
+
+ if (clusterator.reversedChars()) {
+ out->writeText("/ReversedChars BMC\n");
+ }
+ SK_AT_SCOPE_EXIT(if (clusterator.reversedChars()) { out->writeText("EMC\n"); } );
+ GlyphPositioner glyphPositioner(out, glyphRunFont.getSkewX(), offset);
+ SkPDFFont* font = nullptr;
+
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+ auto glyphs = paths.glyphs(glyphRun.glyphsIDs());
+
+ while (SkClusterator::Cluster c = clusterator.next()) {
+ int index = c.fGlyphIndex;
+ int glyphLimit = index + c.fGlyphCount;
+
+ bool actualText = false;
+ SK_AT_SCOPE_EXIT(if (actualText) {
+ glyphPositioner.flush();
+ out->writeText("EMC\n");
+ });
+ if (c.fUtf8Text) { // real cluster
+ // Check if `/ActualText` needed.
+ const char* textPtr = c.fUtf8Text;
+ const char* textEnd = c.fUtf8Text + c.fTextByteLength;
+ SkUnichar unichar = SkUTF::NextUTF8(&textPtr, textEnd);
+ if (unichar < 0) {
+ return;
+ }
+ if (textPtr < textEnd || // more characters left
+ glyphLimit > index + 1 || // toUnicode wouldn't work
+ unichar != map_glyph(glyphToUnicode, glyphIDs[index])) // test single Unichar map
+ {
+ glyphPositioner.flush();
+ out->writeText("/Span<</ActualText <");
+ SkPDFUtils::WriteUTF16beHex(out, 0xFEFF); // U+FEFF = BYTE ORDER MARK
+ // the BOM marks this text as UTF-16BE, not PDFDocEncoding.
+ SkPDFUtils::WriteUTF16beHex(out, unichar); // first char
+ while (textPtr < textEnd) {
+ unichar = SkUTF::NextUTF8(&textPtr, textEnd);
+ if (unichar < 0) {
+ break;
+ }
+ SkPDFUtils::WriteUTF16beHex(out, unichar);
+ }
+ out->writeText("> >> BDC\n"); // begin marked-content sequence
+ // with an associated property list.
+ actualText = true;
+ }
+ }
+ for (; index < glyphLimit; ++index) {
+ SkGlyphID gid = glyphIDs[index];
+ if (gid > maxGlyphID) {
+ continue;
+ }
+ SkPoint xy = glyphRun.positions()[index];
+ // Do a glyph-by-glyph bounds-reject if positions are absolute.
+ SkRect glyphBounds = get_glyph_bounds_device_space(
+ glyphs[index], textScaleX, textScaleY,
+ xy + offset, this->ctm());
+ if (glyphBounds.isEmpty()) {
+ if (!contains(clipStackBounds, {glyphBounds.x(), glyphBounds.y()})) {
+ continue;
+ }
+ } else {
+ if (!clipStackBounds.intersects(glyphBounds)) {
+ continue; // reject glyphs as out of bounds
+ }
+ }
+ if (needs_new_font(font, glyphs[index], fontType)) {
+ // Not yet specified font or need to switch font.
+ font = SkPDFFont::GetFontResource(fDocument, glyphs[index], typeface);
+ SkASSERT(font); // All preconditions for SkPDFFont::GetFontResource are met.
+ glyphPositioner.flush();
+ glyphPositioner.setWideChars(font->multiByteGlyphs());
+ SkPDFWriteResourceName(out, SkPDFResourceType::kFont,
+ add_resource(fFontResources, font->indirectReference()));
+ out->writeText(" ");
+ SkPDFUtils::AppendScalar(textSize, out);
+ out->writeText(" Tf\n");
+
+ }
+ font->noteGlyphUsage(gid);
+ SkGlyphID encodedGlyph = font->multiByteGlyphs()
+ ? gid : font->glyphToPDFFontEncoding(gid);
+ SkScalar advance = advanceScale * glyphs[index]->advanceX();
+ glyphPositioner.writeGlyph(xy, advance, encodedGlyph);
+ }
+ }
+}
+
+void SkPDFDevice::drawGlyphRunList(const SkGlyphRunList& glyphRunList) {
+ for (const SkGlyphRun& glyphRun : glyphRunList) {
+ this->internalDrawGlyphRun(glyphRun, glyphRunList.origin(), glyphRunList.paint());
+ }
+}
+
+void SkPDFDevice::drawVertices(const SkVertices*, const SkVertices::Bone[], int, SkBlendMode,
+ const SkPaint&) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ // TODO: implement drawVertices
+}
+
+void SkPDFDevice::drawFormXObject(SkPDFIndirectReference xObject, SkDynamicMemoryWStream* content) {
+ SkASSERT(xObject);
+ SkPDFWriteResourceName(content, SkPDFResourceType::kXObject,
+ add_resource(fXObjectResources, xObject));
+ content->writeText(" Do\n");
+}
+
+void SkPDFDevice::drawDevice(SkBaseDevice* device, int x, int y, const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+
+ // Check if the source device is really a bitmapdevice (because that's what we returned
+ // from createDevice (likely due to an imagefilter)
+ SkPixmap pmap;
+ if (device->peekPixels(&pmap)) {
+ SkBitmap bitmap;
+ bitmap.installPixels(pmap);
+ this->drawSprite(bitmap, x, y, paint);
+ return;
+ }
+
+ // our onCreateCompatibleDevice() always creates SkPDFDevice subclasses.
+ SkPDFDevice* pdfDevice = static_cast<SkPDFDevice*>(device);
+
+ if (pdfDevice->isContentEmpty()) {
+ return;
+ }
+
+ SkMatrix matrix = SkMatrix::MakeTrans(SkIntToScalar(x), SkIntToScalar(y));
+ ScopedContentEntry content(this, &this->cs(), matrix, paint);
+ if (!content) {
+ return;
+ }
+ if (content.needShape()) {
+ SkISize dim = device->imageInfo().dimensions();
+ content.setShape(to_path(SkRect::Make(SkIRect::MakeXYWH(x, y, dim.width(), dim.height()))));
+ }
+ if (!content.needSource()) {
+ return;
+ }
+ this->drawFormXObject(pdfDevice->makeFormXObjectFromDevice(), content.stream());
+}
+
+sk_sp<SkSurface> SkPDFDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+static std::vector<SkPDFIndirectReference> sort(const SkTHashSet<SkPDFIndirectReference>& src) {
+ std::vector<SkPDFIndirectReference> dst;
+ dst.reserve(src.count());
+ src.foreach([&dst](SkPDFIndirectReference ref) { dst.push_back(ref); } );
+ std::sort(dst.begin(), dst.end(),
+ [](SkPDFIndirectReference a, SkPDFIndirectReference b) { return a.fValue < b.fValue; });
+ return dst;
+}
+
+std::unique_ptr<SkPDFDict> SkPDFDevice::makeResourceDict() {
+ return SkPDFMakeResourceDict(sort(fGraphicStateResources),
+ sort(fShaderResources),
+ sort(fXObjectResources),
+ sort(fFontResources));
+}
+
+std::unique_ptr<SkStreamAsset> SkPDFDevice::content() {
+ if (fActiveStackState.fContentStream) {
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState();
+ }
+ if (fContent.bytesWritten() == 0) {
+ return skstd::make_unique<SkMemoryStream>();
+ }
+ SkDynamicMemoryWStream buffer;
+ if (fInitialTransform.getType() != SkMatrix::kIdentity_Mask) {
+ SkPDFUtils::AppendTransform(fInitialTransform, &buffer);
+ }
+ if (fNeedsExtraSave) {
+ buffer.writeText("q\n");
+ }
+ fContent.writeToAndReset(&buffer);
+ if (fNeedsExtraSave) {
+ buffer.writeText("Q\n");
+ }
+ fNeedsExtraSave = false;
+ return std::unique_ptr<SkStreamAsset>(buffer.detachAsStream());
+}
+
+/* Draws an inverse filled path by using Path Ops to compute the positive
+ * inverse using the current clip as the inverse bounds.
+ * Return true if this was an inverse path and was properly handled,
+ * otherwise returns false and the normal drawing routine should continue,
+ * either as a (incorrect) fallback or because the path was not inverse
+ * in the first place.
+ */
+bool SkPDFDevice::handleInversePath(const SkPath& origPath,
+ const SkPaint& paint,
+ bool pathIsMutable) {
+ if (!origPath.isInverseFillType()) {
+ return false;
+ }
+
+ if (this->hasEmptyClip()) {
+ return false;
+ }
+
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+ SkPaint noInversePaint(paint);
+
+ // Merge stroking operations into final path.
+ if (SkPaint::kStroke_Style == paint.getStyle() ||
+ SkPaint::kStrokeAndFill_Style == paint.getStyle()) {
+ bool doFillPath = paint.getFillPath(origPath, &modifiedPath);
+ if (doFillPath) {
+ noInversePaint.setStyle(SkPaint::kFill_Style);
+ noInversePaint.setStrokeWidth(0);
+ pathPtr = &modifiedPath;
+ } else {
+ // To be consistent with the raster output, hairline strokes
+ // are rendered as non-inverted.
+ modifiedPath.toggleInverseFillType();
+ this->internalDrawPath(this->cs(), this->ctm(), modifiedPath, paint, true);
+ return true;
+ }
+ }
+
+ // Get bounds of clip in current transform space
+ // (clip bounds are given in device space).
+ SkMatrix transformInverse;
+ SkMatrix totalMatrix = this->ctm();
+
+ if (!totalMatrix.invert(&transformInverse)) {
+ return false;
+ }
+ SkRect bounds = this->cs().bounds(this->bounds());
+ transformInverse.mapRect(&bounds);
+
+ // Extend the bounds by the line width (plus some padding)
+ // so the edge doesn't cause a visible stroke.
+ bounds.outset(paint.getStrokeWidth() + SK_Scalar1,
+ paint.getStrokeWidth() + SK_Scalar1);
+
+ if (!calculate_inverse_path(bounds, *pathPtr, &modifiedPath)) {
+ return false;
+ }
+
+ this->internalDrawPath(this->cs(), this->ctm(), modifiedPath, noInversePaint, true);
+ return true;
+}
+
+SkPDFIndirectReference SkPDFDevice::makeFormXObjectFromDevice(SkIRect bounds, bool alpha) {
+ SkMatrix inverseTransform = SkMatrix::I();
+ if (!fInitialTransform.isIdentity()) {
+ if (!fInitialTransform.invert(&inverseTransform)) {
+ SkDEBUGFAIL("Layer initial transform should be invertible.");
+ inverseTransform.reset();
+ }
+ }
+ const char* colorSpace = alpha ? "DeviceGray" : nullptr;
+
+ SkPDFIndirectReference xobject =
+ SkPDFMakeFormXObject(fDocument, this->content(),
+ SkPDFMakeArray(bounds.left(), bounds.top(),
+ bounds.right(), bounds.bottom()),
+ this->makeResourceDict(), inverseTransform, colorSpace);
+ // We always draw the form xobjects that we create back into the device, so
+ // we simply preserve the font usage instead of pulling it out and merging
+ // it back in later.
+ this->reset();
+ return xobject;
+}
+
+SkPDFIndirectReference SkPDFDevice::makeFormXObjectFromDevice(bool alpha) {
+ return this->makeFormXObjectFromDevice(SkIRect{0, 0, this->width(), this->height()}, alpha);
+}
+
+void SkPDFDevice::drawFormXObjectWithMask(SkPDFIndirectReference xObject,
+ SkPDFIndirectReference sMask,
+ SkBlendMode mode,
+ bool invertClip) {
+ SkASSERT(sMask);
+ SkPaint paint;
+ paint.setBlendMode(mode);
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ sMask, invertClip, SkPDFGraphicState::kAlpha_SMaskMode,
+ fDocument), content.stream());
+ this->drawFormXObject(xObject, content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+}
+
+
+static bool treat_as_regular_pdf_blend_mode(SkBlendMode blendMode) {
+ return nullptr != SkPDFUtils::BlendModeName(blendMode);
+}
+
+static void populate_graphic_state_entry_from_paint(
+ SkPDFDocument* doc,
+ const SkMatrix& matrix,
+ const SkClipStack* clipStack,
+ SkIRect deviceBounds,
+ const SkPaint& paint,
+ const SkMatrix& initialTransform,
+ SkScalar textScale,
+ SkPDFGraphicStackState::Entry* entry,
+ SkTHashSet<SkPDFIndirectReference>* shaderResources,
+ SkTHashSet<SkPDFIndirectReference>* graphicStateResources) {
+ NOT_IMPLEMENTED(paint.getPathEffect() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getMaskFilter() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getColorFilter() != nullptr, false);
+
+ entry->fMatrix = matrix;
+ entry->fClipStackGenID = clipStack ? clipStack->getTopmostGenID()
+ : SkClipStack::kWideOpenGenID;
+ SkColor4f color = paint.getColor4f();
+ entry->fColor = {color.fR, color.fG, color.fB, 1};
+ entry->fShaderIndex = -1;
+
+ // PDF treats a shader as a color, so we only set one or the other.
+ SkShader* shader = paint.getShader();
+ if (shader) {
+ if (SkShader::kColor_GradientType == shader->asAGradient(nullptr)) {
+ // We don't have to set a shader just for a color.
+ SkShader::GradientInfo gradientInfo;
+ SkColor gradientColor = SK_ColorBLACK;
+ gradientInfo.fColors = &gradientColor;
+ gradientInfo.fColorOffsets = nullptr;
+ gradientInfo.fColorCount = 1;
+ SkAssertResult(shader->asAGradient(&gradientInfo) == SkShader::kColor_GradientType);
+ color = SkColor4f::FromColor(gradientColor);
+ entry->fColor ={color.fR, color.fG, color.fB, 1};
+
+ } else {
+ // PDF positions patterns relative to the initial transform, so
+ // we need to apply the current transform to the shader parameters.
+ SkMatrix transform = matrix;
+ transform.postConcat(initialTransform);
+
+ // PDF doesn't support kClamp_TileMode, so we simulate it by making
+ // a pattern the size of the current clip.
+ SkRect clipStackBounds = clipStack ? clipStack->bounds(deviceBounds)
+ : SkRect::Make(deviceBounds);
+
+ // We need to apply the initial transform to bounds in order to get
+ // bounds in a consistent coordinate system.
+ initialTransform.mapRect(&clipStackBounds);
+ SkIRect bounds;
+ clipStackBounds.roundOut(&bounds);
+
+ SkPDFIndirectReference pdfShader
+ = SkPDFMakeShader(doc, shader, transform, bounds, paint.getColor4f());
+
+ if (pdfShader) {
+ // pdfShader has been canonicalized so we can directly compare pointers.
+ entry->fShaderIndex = add_resource(*shaderResources, pdfShader);
+ }
+ }
+ }
+
+ SkPDFIndirectReference newGraphicState;
+ if (color == paint.getColor4f()) {
+ newGraphicState = SkPDFGraphicState::GetGraphicStateForPaint(doc, paint);
+ } else {
+ SkPaint newPaint = paint;
+ newPaint.setColor4f(color, nullptr);
+ newGraphicState = SkPDFGraphicState::GetGraphicStateForPaint(doc, newPaint);
+ }
+ entry->fGraphicStateIndex = add_resource(*graphicStateResources, newGraphicState);
+ entry->fTextScaleX = textScale;
+}
+
+SkDynamicMemoryWStream* SkPDFDevice::setUpContentEntry(const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar textScale,
+ SkPDFIndirectReference* dst) {
+ SkASSERT(!*dst);
+ SkBlendMode blendMode = paint.getBlendMode();
+
+ // Dst xfer mode doesn't draw source at all.
+ if (blendMode == SkBlendMode::kDst) {
+ return nullptr;
+ }
+
+ // For the following modes, we want to handle source and destination
+ // separately, so make an object of what's already there.
+ if (!treat_as_regular_pdf_blend_mode(blendMode) && blendMode != SkBlendMode::kDstOver) {
+ if (!isContentEmpty()) {
+ *dst = this->makeFormXObjectFromDevice();
+ SkASSERT(isContentEmpty());
+ } else if (blendMode != SkBlendMode::kSrc &&
+ blendMode != SkBlendMode::kSrcOut) {
+ // Except for Src and SrcOut, if there isn't anything already there,
+ // then we're done.
+ return nullptr;
+ }
+ }
+ // TODO(vandebo): Figure out how/if we can handle the following modes:
+ // Xor, Plus. For now, we treat them as SrcOver/Normal.
+
+ if (treat_as_regular_pdf_blend_mode(blendMode)) {
+ if (!fActiveStackState.fContentStream) {
+ if (fContent.bytesWritten() != 0) {
+ fContent.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fActiveStackState = SkPDFGraphicStackState(&fContent);
+ } else {
+ SkASSERT(fActiveStackState.fContentStream = &fContent);
+ }
+ } else {
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState(&fContentBuffer);
+ }
+ SkASSERT(fActiveStackState.fContentStream);
+ SkPDFGraphicStackState::Entry entry;
+ populate_graphic_state_entry_from_paint(
+ fDocument,
+ matrix,
+ clipStack,
+ this->bounds(),
+ paint,
+ fInitialTransform,
+ textScale,
+ &entry,
+ &fShaderResources,
+ &fGraphicStateResources);
+ fActiveStackState.updateClip(clipStack, this->bounds());
+ fActiveStackState.updateMatrix(entry.fMatrix);
+ fActiveStackState.updateDrawingState(entry);
+
+ return fActiveStackState.fContentStream;
+}
+
+void SkPDFDevice::finishContentEntry(const SkClipStack* clipStack,
+ SkBlendMode blendMode,
+ SkPDFIndirectReference dst,
+ SkPath* shape) {
+ SkASSERT(blendMode != SkBlendMode::kDst);
+ if (treat_as_regular_pdf_blend_mode(blendMode)) {
+ SkASSERT(!dst);
+ return;
+ }
+
+ SkASSERT(fActiveStackState.fContentStream);
+
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState();
+
+ if (blendMode == SkBlendMode::kDstOver) {
+ SkASSERT(!dst);
+ if (fContentBuffer.bytesWritten() != 0) {
+ if (fContent.bytesWritten() != 0) {
+ fContentBuffer.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fContentBuffer.prependToAndReset(&fContent);
+ SkASSERT(fContentBuffer.bytesWritten() == 0);
+ }
+ return;
+ }
+ if (fContentBuffer.bytesWritten() != 0) {
+ if (fContent.bytesWritten() != 0) {
+ fContent.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fContentBuffer.writeToAndReset(&fContent);
+ SkASSERT(fContentBuffer.bytesWritten() == 0);
+ }
+
+ if (!dst) {
+ SkASSERT(blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ }
+
+ SkASSERT(dst);
+ // Changing the current content into a form-xobject will destroy the clip
+ // objects which is fine since the xobject will already be clipped. However
+ // if source has shape, we need to clip it too, so a copy of the clip is
+ // saved.
+
+ SkPaint stockPaint;
+
+ SkPDFIndirectReference srcFormXObject;
+ if (this->isContentEmpty()) {
+ // If nothing was drawn and there's no shape, then the draw was a
+ // no-op, but dst needs to be restored for that to be true.
+ // If there is shape, then an empty source with Src, SrcIn, SrcOut,
+ // DstIn, DstAtop or Modulate reduces to Clear and DstOut or SrcAtop
+ // reduces to Dst.
+ if (shape == nullptr || blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ this->drawFormXObject(dst, content.stream());
+ return;
+ } else {
+ blendMode = SkBlendMode::kClear;
+ }
+ } else {
+ srcFormXObject = this->makeFormXObjectFromDevice();
+ }
+
+ // TODO(vandebo) srcFormXObject may contain alpha, but here we want it
+ // without alpha.
+ if (blendMode == SkBlendMode::kSrcATop) {
+ // TODO(vandebo): In order to properly support SrcATop we have to track
+ // the shape of what's been drawn at all times. It's the intersection of
+ // the non-transparent parts of the device and the outlines (shape) of
+ // all images and devices drawn.
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver, true);
+ } else {
+ if (shape != nullptr) {
+ // Draw shape into a form-xobject.
+ SkPaint filledPaint;
+ filledPaint.setColor(SK_ColorBLACK);
+ filledPaint.setStyle(SkPaint::kFill_Style);
+ SkClipStack empty;
+ SkPDFDevice shapeDev(this->size(), fDocument, fInitialTransform);
+ shapeDev.internalDrawPath(clipStack ? *clipStack : empty,
+ SkMatrix::I(), *shape, filledPaint, true);
+ this->drawFormXObjectWithMask(dst, shapeDev.makeFormXObjectFromDevice(),
+ SkBlendMode::kSrcOver, true);
+ } else {
+ this->drawFormXObjectWithMask(dst, srcFormXObject, SkBlendMode::kSrcOver, true);
+ }
+ }
+
+ if (blendMode == SkBlendMode::kClear) {
+ return;
+ } else if (blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kDstATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ if (content) {
+ this->drawFormXObject(srcFormXObject, content.stream());
+ }
+ if (blendMode == SkBlendMode::kSrc) {
+ return;
+ }
+ } else if (blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ if (content) {
+ this->drawFormXObject(dst, content.stream());
+ }
+ }
+
+ SkASSERT(blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kDstIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop ||
+ blendMode == SkBlendMode::kDstATop ||
+ blendMode == SkBlendMode::kModulate);
+
+ if (blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver,
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ } else {
+ SkBlendMode mode = SkBlendMode::kSrcOver;
+ if (blendMode == SkBlendMode::kModulate) {
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver, false);
+ mode = SkBlendMode::kMultiply;
+ }
+ this->drawFormXObjectWithMask(dst, srcFormXObject, mode, blendMode == SkBlendMode::kDstOut);
+ return;
+ }
+}
+
+bool SkPDFDevice::isContentEmpty() {
+ return fContent.bytesWritten() == 0 && fContentBuffer.bytesWritten() == 0;
+}
+
+static SkSize rect_to_size(const SkRect& r) { return {r.width(), r.height()}; }
+
+static sk_sp<SkImage> color_filter(const SkImage* image,
+ SkColorFilter* colorFilter) {
+ auto surface =
+ SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(image->dimensions()));
+ SkASSERT(surface);
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+ SkPaint paint;
+ paint.setColorFilter(sk_ref_sp(colorFilter));
+ canvas->drawImage(image, 0, 0, &paint);
+ return surface->makeImageSnapshot();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static bool is_integer(SkScalar x) {
+ return x == SkScalarTruncToScalar(x);
+}
+
+static bool is_integral(const SkRect& r) {
+ return is_integer(r.left()) &&
+ is_integer(r.top()) &&
+ is_integer(r.right()) &&
+ is_integer(r.bottom());
+}
+
+void SkPDFDevice::internalDrawImageRect(SkKeyedImage imageSubset,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& srcPaint,
+ const SkMatrix& ctm) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ if (!imageSubset) {
+ return;
+ }
+
+ // First, figure out the src->dst transform and subset the image if needed.
+ SkIRect bounds = imageSubset.image()->bounds();
+ SkRect srcRect = src ? *src : SkRect::Make(bounds);
+ SkMatrix transform;
+ transform.setRectToRect(srcRect, dst, SkMatrix::kFill_ScaleToFit);
+ if (src && *src != SkRect::Make(bounds)) {
+ if (!srcRect.intersect(SkRect::Make(bounds))) {
+ return;
+ }
+ srcRect.roundOut(&bounds);
+ transform.preTranslate(SkIntToScalar(bounds.x()),
+ SkIntToScalar(bounds.y()));
+ if (bounds != imageSubset.image()->bounds()) {
+ imageSubset = imageSubset.subset(bounds);
+ }
+ if (!imageSubset) {
+ return;
+ }
+ }
+
+ // If the image is opaque and the paint's alpha is too, replace
+ // kSrc blendmode with kSrcOver. http://crbug.com/473572
+ SkTCopyOnFirstWrite<SkPaint> paint(srcPaint);
+ if (SkBlendMode::kSrcOver != paint->getBlendMode() &&
+ imageSubset.image()->isOpaque() &&
+ kSrcOver_SkXfermodeInterpretation == SkInterpretXfermode(*paint, false))
+ {
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ }
+
+ // Alpha-only images need to get their color from the shader, before
+ // applying the colorfilter.
+ if (imageSubset.image()->isAlphaOnly() && paint->getColorFilter()) {
+ // must blend alpha image and shader before applying colorfilter.
+ auto surface =
+ SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(imageSubset.image()->dimensions()));
+ SkCanvas* canvas = surface->getCanvas();
+ SkPaint tmpPaint;
+ // In the case of alpha images with shaders, the shader's coordinate
+ // system is the image's coordiantes.
+ tmpPaint.setShader(sk_ref_sp(paint->getShader()));
+ tmpPaint.setColor4f(paint->getColor4f(), nullptr);
+ canvas->clear(0x00000000);
+ canvas->drawImage(imageSubset.image().get(), 0, 0, &tmpPaint);
+ if (paint->getShader() != nullptr) {
+ paint.writable()->setShader(nullptr);
+ }
+ imageSubset = SkKeyedImage(surface->makeImageSnapshot());
+ SkASSERT(!imageSubset.image()->isAlphaOnly());
+ }
+
+ if (imageSubset.image()->isAlphaOnly()) {
+ // The ColorFilter applies to the paint color/shader, not the alpha layer.
+ SkASSERT(nullptr == paint->getColorFilter());
+
+ sk_sp<SkImage> mask = alpha_image_to_greyscale_image(imageSubset.image().get());
+ if (!mask) {
+ return;
+ }
+ // PDF doesn't seem to allow masking vector graphics with an Image XObject.
+ // Must mask with a Form XObject.
+ sk_sp<SkPDFDevice> maskDevice = this->makeCongruentDevice();
+ {
+ SkCanvas canvas(maskDevice);
+ // This clip prevents the mask image shader from covering
+ // entire device if unnecessary.
+ canvas.clipRect(this->cs().bounds(this->bounds()));
+ canvas.concat(ctm);
+ if (paint->getMaskFilter()) {
+ SkPaint tmpPaint;
+ tmpPaint.setShader(mask->makeShader(&transform));
+ tmpPaint.setMaskFilter(sk_ref_sp(paint->getMaskFilter()));
+ canvas.drawRect(dst, tmpPaint);
+ } else {
+ if (src && !is_integral(*src)) {
+ canvas.clipRect(dst);
+ }
+ canvas.concat(transform);
+ canvas.drawImage(mask, 0, 0);
+ }
+ }
+ SkIRect maskDeviceBounds = maskDevice->cs().bounds(maskDevice->bounds()).roundOut();
+ if (!ctm.isIdentity() && paint->getShader()) {
+ transform_shader(paint.writable(), ctm); // Since we are using identity matrix.
+ }
+ ScopedContentEntry content(this, &this->cs(), SkMatrix::I(), *paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ maskDevice->makeFormXObjectFromDevice(maskDeviceBounds, true), false,
+ SkPDFGraphicState::kLuminosity_SMaskMode, fDocument), content.stream());
+ SkPDFUtils::AppendRectangle(SkRect::Make(this->size()), content.stream());
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, SkPath::kWinding_FillType, content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+ return;
+ }
+ if (paint->getMaskFilter()) {
+ paint.writable()->setShader(imageSubset.image()->makeShader(&transform));
+ SkPath path = to_path(dst); // handles non-integral clipping.
+ this->internalDrawPath(this->cs(), this->ctm(), path, *paint, true);
+ return;
+ }
+ transform.postConcat(ctm);
+
+ bool needToRestore = false;
+ if (src && !is_integral(*src)) {
+ // Need sub-pixel clipping to fix https://bug.skia.org/4374
+ this->cs().save();
+ this->cs().clipRect(dst, ctm, SkClipOp::kIntersect, true);
+ needToRestore = true;
+ }
+ SK_AT_SCOPE_EXIT(if (needToRestore) { this->cs().restore(); });
+
+ SkMatrix matrix = transform;
+
+ // Rasterize the bitmap using perspective in a new bitmap.
+ if (transform.hasPerspective()) {
+ // Transform the bitmap in the new space, without taking into
+ // account the initial transform.
+ SkRect imageBounds = SkRect::Make(imageSubset.image()->bounds());
+ SkPath perspectiveOutline = to_path(imageBounds);
+ perspectiveOutline.transform(transform);
+
+ // TODO(edisonn): perf - use current clip too.
+ // Retrieve the bounds of the new shape.
+ SkRect bounds = perspectiveOutline.getBounds();
+
+ // Transform the bitmap in the new space, taking into
+ // account the initial transform.
+ SkMatrix total = transform;
+ total.postConcat(fInitialTransform);
+
+ SkPath physicalPerspectiveOutline = to_path(imageBounds);
+ physicalPerspectiveOutline.transform(total);
+
+ SkRect physicalPerspectiveBounds =
+ physicalPerspectiveOutline.getBounds();
+ SkScalar scaleX = physicalPerspectiveBounds.width() / bounds.width();
+ SkScalar scaleY = physicalPerspectiveBounds.height() / bounds.height();
+
+ // TODO(edisonn): A better approach would be to use a bitmap shader
+ // (in clamp mode) and draw a rect over the entire bounding box. Then
+ // intersect perspectiveOutline to the clip. That will avoid introducing
+ // alpha to the image while still giving good behavior at the edge of
+ // the image. Avoiding alpha will reduce the pdf size and generation
+ // CPU time some.
+
+ SkISize wh = rect_to_size(physicalPerspectiveBounds).toCeil();
+
+ auto surface = SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(wh));
+ if (!surface) {
+ return;
+ }
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+
+ SkScalar deltaX = bounds.left();
+ SkScalar deltaY = bounds.top();
+
+ SkMatrix offsetMatrix = transform;
+ offsetMatrix.postTranslate(-deltaX, -deltaY);
+ offsetMatrix.postScale(scaleX, scaleY);
+
+ // Translate the draw in the new canvas, so we perfectly fit the
+ // shape in the bitmap.
+ canvas->setMatrix(offsetMatrix);
+ canvas->drawImage(imageSubset.image(), 0, 0);
+ // Make sure the final bits are in the bitmap.
+ surface->flush();
+
+ // In the new space, we use the identity matrix translated
+ // and scaled to reflect DPI.
+ matrix.setScale(1 / scaleX, 1 / scaleY);
+ matrix.postTranslate(deltaX, deltaY);
+
+ imageSubset = SkKeyedImage(surface->makeImageSnapshot());
+ if (!imageSubset) {
+ return;
+ }
+ }
+
+ SkMatrix scaled;
+ // Adjust for origin flip.
+ scaled.setScale(SK_Scalar1, -SK_Scalar1);
+ scaled.postTranslate(0, SK_Scalar1);
+ // Scale the image up from 1x1 to WxH.
+ SkIRect subset = imageSubset.image()->bounds();
+ scaled.postScale(SkIntToScalar(subset.width()),
+ SkIntToScalar(subset.height()));
+ scaled.postConcat(matrix);
+ ScopedContentEntry content(this, &this->cs(), scaled, *paint);
+ if (!content) {
+ return;
+ }
+ if (content.needShape()) {
+ SkPath shape = to_path(SkRect::Make(subset));
+ shape.transform(matrix);
+ content.setShape(shape);
+ }
+ if (!content.needSource()) {
+ return;
+ }
+
+ if (SkColorFilter* colorFilter = paint->getColorFilter()) {
+ sk_sp<SkImage> img = color_filter(imageSubset.image().get(), colorFilter);
+ imageSubset = SkKeyedImage(std::move(img));
+ if (!imageSubset) {
+ return;
+ }
+ // TODO(halcanary): de-dupe this by caching filtered images.
+ // (maybe in the resource cache?)
+ }
+
+ SkBitmapKey key = imageSubset.key();
+ SkPDFIndirectReference* pdfimagePtr = fDocument->fPDFBitmapMap.find(key);
+ SkPDFIndirectReference pdfimage = pdfimagePtr ? *pdfimagePtr : SkPDFIndirectReference();
+ if (!pdfimagePtr) {
+ SkASSERT(imageSubset);
+ pdfimage = SkPDFSerializeImage(imageSubset.image().get(), fDocument,
+ fDocument->metadata().fEncodingQuality);
+ SkASSERT((key != SkBitmapKey{{0, 0, 0, 0}, 0}));
+ fDocument->fPDFBitmapMap.set(key, pdfimage);
+ }
+ SkASSERT(pdfimage != SkPDFIndirectReference());
+ this->drawFormXObject(pdfimage, content.stream());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkImageFilter.h"
+#include "src/core/SkSpecialImage.h"
+
+void SkPDFDevice::drawSpecial(SkSpecialImage* srcImg, int x, int y, const SkPaint& paint,
+ SkImage* clipImage, const SkMatrix& clipMatrix) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ SkASSERT(!srcImg->isTextureBacked());
+
+ //TODO: clipImage support
+
+ SkBitmap resultBM;
+
+ SkImageFilter* filter = paint.getImageFilter();
+ if (filter) {
+ SkIPoint offset = SkIPoint::Make(0, 0);
+ SkMatrix matrix = this->ctm();
+ matrix.postTranslate(SkIntToScalar(-x), SkIntToScalar(-y));
+ const SkIRect clipBounds =
+ this->cs().bounds(this->bounds()).roundOut().makeOffset(-x, -y);
+ sk_sp<SkImageFilterCache> cache(this->getImageFilterCache());
+ // TODO: Should PDF be operating in a specified color type/space? For now, run the filter
+ // in the same color space as the source (this is different from all other backends).
+ SkImageFilter_Base::Context ctx(matrix, clipBounds, cache.get(), kN32_SkColorType,
+ srcImg->getColorSpace(), srcImg);
+
+ sk_sp<SkSpecialImage> resultImg(as_IFB(filter)->filterImage(ctx).imageAndOffset(&offset));
+ if (resultImg) {
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+ if (resultImg->getROPixels(&resultBM)) {
+ this->drawSprite(resultBM, x + offset.x(), y + offset.y(), tmpUnfiltered);
+ }
+ }
+ } else {
+ if (srcImg->getROPixels(&resultBM)) {
+ this->drawSprite(resultBM, x, y, paint);
+ }
+ }
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap);
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(nullptr, image->bounds(), image->makeNonTextureImage());
+}
+
+SkImageFilterCache* SkPDFDevice::getImageFilterCache() {
+ // We always return a transient cache, so it is freed after each
+ // filter traversal.
+ return SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.h b/gfx/skia/skia/src/pdf/SkPDFDevice.h
new file mode 100644
index 0000000000..ad8c12a19f
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFDevice_DEFINED
+#define SkPDFDevice_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkClipStackDevice.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/pdf/SkKeyedImage.h"
+#include "src/pdf/SkPDFGraphicStackState.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <vector>
+
+class SkGlyphRunList;
+class SkKeyedImage;
+class SkPDFArray;
+class SkPDFDevice;
+class SkPDFDict;
+class SkPDFDocument;
+class SkPDFFont;
+class SkPDFObject;
+class SkPath;
+class SkRRect;
+struct SkPDFIndirectReference;
+
+/**
+ * \class SkPDFDevice
+ *
+ * An SkPDFDevice is the drawing context for a page or layer of PDF
+ * content.
+ */
+class SkPDFDevice final : public SkClipStackDevice {
+public:
+ /**
+ * @param pageSize Page size in point units.
+ * 1 point == 127/360 mm == 1/72 inch
+ * @param document A non-null pointer back to the
+ * PDFDocument object. The document is responsible for
+ * de-duplicating across pages (via the SkPDFDocument) and
+ * for early serializing of large immutable objects, such
+ * as images (via SkPDFDocument::serialize()).
+ * @param initialTransform Transform to be applied to the entire page.
+ */
+ SkPDFDevice(SkISize pageSize, SkPDFDocument* document,
+ const SkMatrix& initialTransform = SkMatrix::I());
+
+ sk_sp<SkPDFDevice> makeCongruentDevice() {
+ return sk_make_sp<SkPDFDevice>(this->size(), fDocument);
+ }
+
+ ~SkPDFDevice() override;
+
+ /**
+ * These are called inside the per-device-layer loop for each draw call.
+ * When these are called, we have already applied any saveLayer
+ * operations, and are handling any looping from the paint.
+ */
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode,
+ size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr, const SkPaint& paint) override;
+ void drawPath(const SkPath& origpath, const SkPaint& paint, bool pathIsMutable) override;
+ void drawBitmapRect(const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint&, SkCanvas::SrcRectConstraint) override;
+ void drawSprite(const SkBitmap& bitmap, int x, int y,
+ const SkPaint& paint) override;
+
+ void drawImageRect(const SkImage*,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint&,
+ SkCanvas::SrcRectConstraint) override;
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override;
+ void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const SkPaint&) override;
+ void drawDevice(SkBaseDevice*, int x, int y,
+ const SkPaint&) override;
+
+ // PDF specific methods.
+
+ /** Create the resource dictionary for this device. Destructive. */
+ std::unique_ptr<SkPDFDict> makeResourceDict();
+
+ /** Returns a SkStream with the page contents.
+ */
+ std::unique_ptr<SkStreamAsset> content();
+
+ SkISize size() const { return this->imageInfo().dimensions(); }
+ SkIRect bounds() const { return this->imageInfo().bounds(); }
+
+ void DrawGlyphRunAsPath(SkPDFDevice* dev, const SkGlyphRun& glyphRun, SkPoint offset);
+
+ const SkMatrix& initialTransform() const { return fInitialTransform; }
+
+protected:
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ void drawAnnotation(const SkRect&, const char key[], SkData* value) override;
+
+ void drawSpecial(SkSpecialImage*, int x, int y, const SkPaint&,
+ SkImage*, const SkMatrix&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ SkImageFilterCache* getImageFilterCache() override;
+
+private:
+ // TODO(vandebo): push most of SkPDFDevice's state into a core object in
+ // order to get the right access levels without using friend.
+ friend class ScopedContentEntry;
+
+ SkMatrix fInitialTransform;
+
+ SkTHashSet<SkPDFIndirectReference> fGraphicStateResources;
+ SkTHashSet<SkPDFIndirectReference> fXObjectResources;
+ SkTHashSet<SkPDFIndirectReference> fShaderResources;
+ SkTHashSet<SkPDFIndirectReference> fFontResources;
+ int fNodeId;
+
+ SkDynamicMemoryWStream fContent;
+ SkDynamicMemoryWStream fContentBuffer;
+ bool fNeedsExtraSave = false;
+ SkPDFGraphicStackState fActiveStackState;
+ SkPDFDocument* fDocument;
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ // Set alpha to true if making a transparency group form x-objects.
+ SkPDFIndirectReference makeFormXObjectFromDevice(bool alpha = false);
+ SkPDFIndirectReference makeFormXObjectFromDevice(SkIRect bbox, bool alpha = false);
+
+ void drawFormXObjectWithMask(SkPDFIndirectReference xObject,
+ SkPDFIndirectReference sMask,
+ SkBlendMode,
+ bool invertClip);
+
+ // If the paint or clip is such that we shouldn't draw anything, this
+ // returns nullptr and does not create a content entry.
+ // setUpContentEntry and finishContentEntry can be used directly, but
+ // the preferred method is to use the ScopedContentEntry helper class.
+ SkDynamicMemoryWStream* setUpContentEntry(const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar,
+ SkPDFIndirectReference* dst);
+ void finishContentEntry(const SkClipStack*, SkBlendMode, SkPDFIndirectReference, SkPath*);
+ bool isContentEmpty();
+
+ void internalDrawGlyphRun(const SkGlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint);
+ void drawGlyphRunAsPath(const SkGlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint);
+
+ void internalDrawImageRect(SkKeyedImage,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint&,
+ const SkMatrix& canvasTransformationMatrix);
+
+ void internalDrawPath(const SkClipStack&,
+ const SkMatrix&,
+ const SkPath&,
+ const SkPaint&,
+ bool pathIsMutable);
+
+ void internalDrawPathWithFilter(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& paint);
+
+ bool handleInversePath(const SkPath& origPath, const SkPaint& paint, bool pathIsMutable);
+
+ void clearMaskOnGraphicState(SkDynamicMemoryWStream*);
+ void setGraphicState(SkPDFIndirectReference gs, SkDynamicMemoryWStream*);
+ void drawFormXObject(SkPDFIndirectReference xObject, SkDynamicMemoryWStream*);
+
+ bool hasEmptyClip() const { return this->cs().isEmpty(this->bounds()); }
+
+ void reset();
+
+ typedef SkClipStackDevice INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocument.cpp b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
new file mode 100644
index 0000000000..abf301596f
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+
+#include "include/core/SkStream.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFGradientShader.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFShader.h"
+#include "src/pdf/SkPDFTag.h"
+#include "src/pdf/SkPDFUtils.h"
+
+#include <utility>
+
+// For use in SkCanvas::drawAnnotation
+const char* SkPDFGetNodeIdKey() {
+ static constexpr char key[] = "PDF_Node_Key";
+ return key;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFOffsetMap::markStartOfDocument(const SkWStream* s) { fBaseOffset = s->bytesWritten(); }
+
+static size_t difference(size_t minuend, size_t subtrahend) {
+ return SkASSERT(minuend >= subtrahend), minuend - subtrahend;
+}
+
+void SkPDFOffsetMap::markStartOfObject(int referenceNumber, const SkWStream* s) {
+ SkASSERT(referenceNumber > 0);
+ size_t index = SkToSizeT(referenceNumber - 1);
+ if (index >= fOffsets.size()) {
+ fOffsets.resize(index + 1);
+ }
+ fOffsets[index] = SkToInt(difference(s->bytesWritten(), fBaseOffset));
+}
+
+int SkPDFOffsetMap::objectCount() const {
+ return SkToInt(fOffsets.size() + 1); // Include the special zeroth object in the count.
+}
+
+int SkPDFOffsetMap::emitCrossReferenceTable(SkWStream* s) const {
+ int xRefFileOffset = SkToInt(difference(s->bytesWritten(), fBaseOffset));
+ s->writeText("xref\n0 ");
+ s->writeDecAsText(this->objectCount());
+ s->writeText("\n0000000000 65535 f \n");
+ for (int offset : fOffsets) {
+ SkASSERT(offset > 0); // Offset was set.
+ s->writeBigDecAsText(offset, 10);
+ s->writeText(" 00000 n \n");
+ }
+ return xRefFileOffset;
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+
+#define SKPDF_MAGIC "\xD3\xEB\xE9\xE1"
+#ifndef SK_BUILD_FOR_WIN
+static_assert((SKPDF_MAGIC[0] & 0x7F) == "Skia"[0], "");
+static_assert((SKPDF_MAGIC[1] & 0x7F) == "Skia"[1], "");
+static_assert((SKPDF_MAGIC[2] & 0x7F) == "Skia"[2], "");
+static_assert((SKPDF_MAGIC[3] & 0x7F) == "Skia"[3], "");
+#endif
+static void serializeHeader(SkPDFOffsetMap* offsetMap, SkWStream* wStream) {
+ offsetMap->markStartOfDocument(wStream);
+ wStream->writeText("%PDF-1.4\n%" SKPDF_MAGIC "\n");
+ // The PDF spec recommends including a comment with four
+ // bytes, all with their high bits set. "\xD3\xEB\xE9\xE1" is
+ // "Skia" with the high bits set.
+}
+#undef SKPDF_MAGIC
+
+static void begin_indirect_object(SkPDFOffsetMap* offsetMap,
+ SkPDFIndirectReference ref,
+ SkWStream* s) {
+ offsetMap->markStartOfObject(ref.fValue, s);
+ s->writeDecAsText(ref.fValue);
+ s->writeText(" 0 obj\n"); // Generation number is always 0.
+}
+
+static void end_indirect_object(SkWStream* s) { s->writeText("\nendobj\n"); }
+
+// Xref table and footer
+static void serialize_footer(const SkPDFOffsetMap& offsetMap,
+ SkWStream* wStream,
+ SkPDFIndirectReference infoDict,
+ SkPDFIndirectReference docCatalog,
+ SkUUID uuid) {
+ int xRefFileOffset = offsetMap.emitCrossReferenceTable(wStream);
+ SkPDFDict trailerDict;
+ trailerDict.insertInt("Size", offsetMap.objectCount());
+ SkASSERT(docCatalog != SkPDFIndirectReference());
+ trailerDict.insertRef("Root", docCatalog);
+ SkASSERT(infoDict != SkPDFIndirectReference());
+ trailerDict.insertRef("Info", infoDict);
+ if (SkUUID() != uuid) {
+ trailerDict.insertObject("ID", SkPDFMetadata::MakePdfId(uuid, uuid));
+ }
+ wStream->writeText("trailer\n");
+ trailerDict.emitObject(wStream);
+ wStream->writeText("\nstartxref\n");
+ wStream->writeBigDecAsText(xRefFileOffset);
+ wStream->writeText("\n%%EOF");
+}
+
+static SkPDFIndirectReference generate_page_tree(
+ SkPDFDocument* doc,
+ std::vector<std::unique_ptr<SkPDFDict>> pages,
+ const std::vector<SkPDFIndirectReference>& pageRefs) {
+ // PDF wants a tree describing all the pages in the document. We arbitrary
+ // choose 8 (kNodeSize) as the number of allowed children. The internal
+ // nodes have type "Pages" with an array of children, a parent pointer, and
+ // the number of leaves below the node as "Count." The leaves are passed
+ // into the method, have type "Page" and need a parent pointer. This method
+ // builds the tree bottom up, skipping internal nodes that would have only
+ // one child.
+ SkASSERT(pages.size() > 0);
+ struct PageTreeNode {
+ std::unique_ptr<SkPDFDict> fNode;
+ SkPDFIndirectReference fReservedRef;
+ int fPageObjectDescendantCount;
+
+ static std::vector<PageTreeNode> Layer(std::vector<PageTreeNode> vec, SkPDFDocument* doc) {
+ std::vector<PageTreeNode> result;
+ static constexpr size_t kMaxNodeSize = 8;
+ const size_t n = vec.size();
+ SkASSERT(n >= 1);
+ const size_t result_len = (n - 1) / kMaxNodeSize + 1;
+ SkASSERT(result_len >= 1);
+ SkASSERT(n == 1 || result_len < n);
+ result.reserve(result_len);
+ size_t index = 0;
+ for (size_t i = 0; i < result_len; ++i) {
+ if (n != 1 && index + 1 == n) { // No need to create a new node.
+ result.push_back(std::move(vec[index++]));
+ continue;
+ }
+ SkPDFIndirectReference parent = doc->reserveRef();
+ auto kids_list = SkPDFMakeArray();
+ int descendantCount = 0;
+ for (size_t j = 0; j < kMaxNodeSize && index < n; ++j) {
+ PageTreeNode& node = vec[index++];
+ node.fNode->insertRef("Parent", parent);
+ kids_list->appendRef(doc->emit(*node.fNode, node.fReservedRef));
+ descendantCount += node.fPageObjectDescendantCount;
+ }
+ auto next = SkPDFMakeDict("Pages");
+ next->insertInt("Count", descendantCount);
+ next->insertObject("Kids", std::move(kids_list));
+ result.push_back(PageTreeNode{std::move(next), parent, descendantCount});
+ }
+ return result;
+ }
+ };
+ std::vector<PageTreeNode> currentLayer;
+ currentLayer.reserve(pages.size());
+ SkASSERT(pages.size() == pageRefs.size());
+ for (size_t i = 0; i < pages.size(); ++i) {
+ currentLayer.push_back(PageTreeNode{std::move(pages[i]), pageRefs[i], 1});
+ }
+ currentLayer = PageTreeNode::Layer(std::move(currentLayer), doc);
+ while (currentLayer.size() > 1) {
+ currentLayer = PageTreeNode::Layer(std::move(currentLayer), doc);
+ }
+ SkASSERT(currentLayer.size() == 1);
+ const PageTreeNode& root = currentLayer[0];
+ return doc->emit(*root.fNode, root.fReservedRef);
+}
+
+template<typename T, typename... Args>
+static void reset_object(T* dst, Args&&... args) {
+ dst->~T();
+ new (dst) T(std::forward<Args>(args)...);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDocument::SkPDFDocument(SkWStream* stream,
+ SkPDF::Metadata metadata)
+ : SkDocument(stream)
+ , fMetadata(std::move(metadata)) {
+ constexpr float kDpiForRasterScaleOne = 72.0f;
+ if (fMetadata.fRasterDPI != kDpiForRasterScaleOne) {
+ fInverseRasterScale = kDpiForRasterScaleOne / fMetadata.fRasterDPI;
+ fRasterScale = fMetadata.fRasterDPI / kDpiForRasterScaleOne;
+ }
+ if (fMetadata.fStructureElementTreeRoot) {
+ fTagTree.init(fMetadata.fStructureElementTreeRoot);
+ }
+ fExecutor = metadata.fExecutor;
+}
+
+SkPDFDocument::~SkPDFDocument() {
+ // subclasses of SkDocument must call close() in their destructors.
+ this->close();
+}
+
+SkPDFIndirectReference SkPDFDocument::emit(const SkPDFObject& object, SkPDFIndirectReference ref){
+ SkAutoMutexExclusive lock(fMutex);
+ object.emitObject(this->beginObject(ref));
+ this->endObject();
+ return ref;
+}
+
+SkWStream* SkPDFDocument::beginObject(SkPDFIndirectReference ref) SK_REQUIRES(fMutex) {
+ begin_indirect_object(&fOffsetMap, ref, this->getStream());
+ return this->getStream();
+};
+
+void SkPDFDocument::endObject() SK_REQUIRES(fMutex) {
+ end_indirect_object(this->getStream());
+};
+
+static SkSize operator*(SkISize u, SkScalar s) { return SkSize{u.width() * s, u.height() * s}; }
+static SkSize operator*(SkSize u, SkScalar s) { return SkSize{u.width() * s, u.height() * s}; }
+
+SkCanvas* SkPDFDocument::onBeginPage(SkScalar width, SkScalar height) {
+ SkASSERT(fCanvas.imageInfo().dimensions().isZero());
+ if (fPages.empty()) {
+ // if this is the first page if the document.
+ {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ serializeHeader(&fOffsetMap, this->getStream());
+
+ }
+
+ fInfoDict = this->emit(*SkPDFMetadata::MakeDocumentInformationDict(fMetadata));
+ if (fMetadata.fPDFA) {
+ fUUID = SkPDFMetadata::CreateUUID(fMetadata);
+ // We use the same UUID for Document ID and Instance ID since this
+ // is the first revision of this document (and Skia does not
+ // support revising existing PDF documents).
+ // If we are not in PDF/A mode, don't use a UUID since testing
+ // works best with reproducible outputs.
+ fXMP = SkPDFMetadata::MakeXMPObject(fMetadata, fUUID, fUUID, this);
+ }
+ }
+ // By scaling the page at the device level, we will create bitmap layer
+ // devices at the rasterized scale, not the 72dpi scale. Bitmap layer
+ // devices are created when saveLayer is called with an ImageFilter; see
+ // SkPDFDevice::onCreateDevice().
+ SkISize pageSize = (SkSize{width, height} * fRasterScale).toRound();
+ SkMatrix initialTransform;
+ // Skia uses the top left as the origin but PDF natively has the origin at the
+ // bottom left. This matrix corrects for that, as well as the raster scale.
+ initialTransform.setScaleTranslate(fInverseRasterScale, -fInverseRasterScale,
+ 0, fInverseRasterScale * pageSize.height());
+ fPageDevice = sk_make_sp<SkPDFDevice>(pageSize, this, initialTransform);
+ reset_object(&fCanvas, fPageDevice);
+ fCanvas.scale(fRasterScale, fRasterScale);
+ fPageRefs.push_back(this->reserveRef());
+ return &fCanvas;
+}
+
+static void populate_link_annotation(SkPDFDict* annotation, const SkRect& r) {
+ annotation->insertName("Subtype", "Link");
+ annotation->insertInt("F", 4); // required by ISO 19005
+ // Border: 0 = Horizontal corner radius.
+ // 0 = Vertical corner radius.
+ // 0 = Width, 0 = no border.
+ annotation->insertObject("Border", SkPDFMakeArray(0, 0, 0));
+ annotation->insertObject("Rect", SkPDFMakeArray(r.fLeft, r.fTop, r.fRight, r.fBottom));
+}
+
+static SkString to_string(const SkData& d) {
+ return SkString(static_cast<const char*>(d.data()), d.size() - 1);
+}
+
+static std::unique_ptr<SkPDFArray> get_annotations(
+ SkPDFDocument* doc,
+ const std::vector<std::pair<sk_sp<SkData>, SkRect>>& linkToURLs,
+ const std::vector<std::pair<sk_sp<SkData>, SkRect>>& linkToDestinations)
+{
+ std::unique_ptr<SkPDFArray> array;
+ size_t count = linkToURLs.size() + linkToDestinations.size();
+ if (0 == count) {
+ return array; // is nullptr
+ }
+ array = SkPDFMakeArray();
+ array->reserve(count);
+ for (const auto& rectWithURL : linkToURLs) {
+ SkPDFDict annotation("Annot");
+ populate_link_annotation(&annotation, rectWithURL.second);
+ std::unique_ptr<SkPDFDict> action = SkPDFMakeDict("Action");
+ action->insertName("S", "URI");
+ action->insertString("URI", to_string(*rectWithURL.first));
+ annotation.insertObject("A", std::move(action));
+ array->appendRef(doc->emit(annotation));
+ }
+ for (const auto& linkToDestination : linkToDestinations) {
+ SkPDFDict annotation("Annot");
+ populate_link_annotation(&annotation, linkToDestination.second);
+ annotation.insertName("Dest", to_string(*linkToDestination.first));
+ array->appendRef(doc->emit(annotation));
+ }
+ return array;
+}
+
+static SkPDFIndirectReference append_destinations(
+ SkPDFDocument* doc,
+ const std::vector<SkPDFNamedDestination>& namedDestinations)
+{
+ SkPDFDict destinations;
+ for (const SkPDFNamedDestination& dest : namedDestinations) {
+ auto pdfDest = SkPDFMakeArray();
+ pdfDest->reserve(5);
+ pdfDest->appendRef(dest.fPage);
+ pdfDest->appendName("XYZ");
+ pdfDest->appendScalar(dest.fPoint.x());
+ pdfDest->appendScalar(dest.fPoint.y());
+ pdfDest->appendInt(0); // Leave zoom unchanged
+ destinations.insertObject(SkString((const char*)dest.fName->data()), std::move(pdfDest));
+ }
+ return doc->emit(destinations);
+}
+
+void SkPDFDocument::onEndPage() {
+ SkASSERT(!fCanvas.imageInfo().dimensions().isZero());
+ reset_object(&fCanvas);
+ SkASSERT(fPageDevice);
+
+ auto page = SkPDFMakeDict("Page");
+
+ SkSize mediaSize = fPageDevice->imageInfo().dimensions() * fInverseRasterScale;
+ std::unique_ptr<SkStreamAsset> pageContent = fPageDevice->content();
+ auto resourceDict = fPageDevice->makeResourceDict();
+ SkASSERT(fPageRefs.size() > 0);
+ fPageDevice = nullptr;
+
+ page->insertObject("Resources", std::move(resourceDict));
+ page->insertObject("MediaBox", SkPDFUtils::RectToArray(SkRect::MakeSize(mediaSize)));
+
+ if (std::unique_ptr<SkPDFArray> annotations =
+ get_annotations(this, fCurrentPageLinkToURLs, fCurrentPageLinkToDestinations)) {
+ page->insertObject("Annots", std::move(annotations));
+ fCurrentPageLinkToURLs.clear();
+ fCurrentPageLinkToDestinations.clear();
+ }
+
+ page->insertRef("Contents", SkPDFStreamOut(nullptr, std::move(pageContent), this));
+ // The StructParents unique identifier for each page is just its
+ // 0-based page index.
+ page->insertInt("StructParents", SkToInt(this->currentPageIndex()));
+ fPages.emplace_back(std::move(page));
+}
+
+void SkPDFDocument::onAbort() {
+ this->waitForJobs();
+}
+
+static sk_sp<SkData> SkSrgbIcm() {
+ // Source: http://www.argyllcms.com/icclibsrc.html
+ static const char kProfile[] =
+ "\0\0\14\214argl\2 \0\0mntrRGB XYZ \7\336\0\1\0\6\0\26\0\17\0:acspM"
+ "SFT\0\0\0\0IEC sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\366\326\0\1\0\0\0\0"
+ "\323-argl\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\21desc\0\0\1P\0\0\0\231cprt\0"
+ "\0\1\354\0\0\0gdmnd\0\0\2T\0\0\0pdmdd\0\0\2\304\0\0\0\210tech\0\0\3"
+ "L\0\0\0\14vued\0\0\3X\0\0\0gview\0\0\3\300\0\0\0$lumi\0\0\3\344\0\0"
+ "\0\24meas\0\0\3\370\0\0\0$wtpt\0\0\4\34\0\0\0\24bkpt\0\0\0040\0\0\0"
+ "\24rXYZ\0\0\4D\0\0\0\24gXYZ\0\0\4X\0\0\0\24bXYZ\0\0\4l\0\0\0\24rTR"
+ "C\0\0\4\200\0\0\10\14gTRC\0\0\4\200\0\0\10\14bTRC\0\0\4\200\0\0\10"
+ "\14desc\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equivalent to www.srgb.co"
+ "m 1998 HP profile)\0\0\0\0\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equiva"
+ "lent to www.srgb.com 1998 HP profile)\0\0\0\0\0\0\0\0text\0\0\0\0C"
+ "reated by Graeme W. Gill. Released into the public domain. No Warr"
+ "anty, Use at your own risk.\0\0desc\0\0\0\0\0\0\0\26IEC http://www"
+ ".iec.ch\0\0\0\0\0\0\0\0\0\0\0\26IEC http://www.iec.ch\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0desc\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour sp"
+ "ace - sRGB\0\0\0\0\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour "
+ "space - sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0sig \0\0\0"
+ "\0CRT desc\0\0\0\0\0\0\0\rIEC61966-2.1\0\0\0\0\0\0\0\0\0\0\0\rIEC6"
+ "1966-2.1\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0view\0\0\0\0"
+ "\0\23\244|\0\24_0\0\20\316\2\0\3\355\262\0\4\23\n\0\3\\g\0\0\0\1XY"
+ "Z \0\0\0\0\0L\n=\0P\0\0\0W\36\270meas\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\2\217\0\0\0\2XYZ \0\0\0\0\0\0\363Q\0\1\0\0\0"
+ "\1\26\314XYZ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0XYZ \0\0\0\0\0\0o\240"
+ "\0\0008\365\0\0\3\220XYZ \0\0\0\0\0\0b\227\0\0\267\207\0\0\30\331X"
+ "YZ \0\0\0\0\0\0$\237\0\0\17\204\0\0\266\304curv\0\0\0\0\0\0\4\0\0\0"
+ "\0\5\0\n\0\17\0\24\0\31\0\36\0#\0(\0-\0002\0007\0;\0@\0E\0J\0O\0T\0"
+ "Y\0^\0c\0h\0m\0r\0w\0|\0\201\0\206\0\213\0\220\0\225\0\232\0\237\0"
+ "\244\0\251\0\256\0\262\0\267\0\274\0\301\0\306\0\313\0\320\0\325\0"
+ "\333\0\340\0\345\0\353\0\360\0\366\0\373\1\1\1\7\1\r\1\23\1\31\1\37"
+ "\1%\1+\0012\0018\1>\1E\1L\1R\1Y\1`\1g\1n\1u\1|\1\203\1\213\1\222\1"
+ "\232\1\241\1\251\1\261\1\271\1\301\1\311\1\321\1\331\1\341\1\351\1"
+ "\362\1\372\2\3\2\14\2\24\2\35\2&\2/\0028\2A\2K\2T\2]\2g\2q\2z\2\204"
+ "\2\216\2\230\2\242\2\254\2\266\2\301\2\313\2\325\2\340\2\353\2\365"
+ "\3\0\3\13\3\26\3!\3-\0038\3C\3O\3Z\3f\3r\3~\3\212\3\226\3\242\3\256"
+ "\3\272\3\307\3\323\3\340\3\354\3\371\4\6\4\23\4 \4-\4;\4H\4U\4c\4q"
+ "\4~\4\214\4\232\4\250\4\266\4\304\4\323\4\341\4\360\4\376\5\r\5\34"
+ "\5+\5:\5I\5X\5g\5w\5\206\5\226\5\246\5\265\5\305\5\325\5\345\5\366"
+ "\6\6\6\26\6'\0067\6H\6Y\6j\6{\6\214\6\235\6\257\6\300\6\321\6\343\6"
+ "\365\7\7\7\31\7+\7=\7O\7a\7t\7\206\7\231\7\254\7\277\7\322\7\345\7"
+ "\370\10\13\10\37\0102\10F\10Z\10n\10\202\10\226\10\252\10\276\10\322"
+ "\10\347\10\373\t\20\t%\t:\tO\td\ty\t\217\t\244\t\272\t\317\t\345\t"
+ "\373\n\21\n'\n=\nT\nj\n\201\n\230\n\256\n\305\n\334\n\363\13\13\13"
+ "\"\0139\13Q\13i\13\200\13\230\13\260\13\310\13\341\13\371\14\22\14"
+ "*\14C\14\\\14u\14\216\14\247\14\300\14\331\14\363\r\r\r&\r@\rZ\rt\r"
+ "\216\r\251\r\303\r\336\r\370\16\23\16.\16I\16d\16\177\16\233\16\266"
+ "\16\322\16\356\17\t\17%\17A\17^\17z\17\226\17\263\17\317\17\354\20"
+ "\t\20&\20C\20a\20~\20\233\20\271\20\327\20\365\21\23\0211\21O\21m\21"
+ "\214\21\252\21\311\21\350\22\7\22&\22E\22d\22\204\22\243\22\303\22"
+ "\343\23\3\23#\23C\23c\23\203\23\244\23\305\23\345\24\6\24'\24I\24j"
+ "\24\213\24\255\24\316\24\360\25\22\0254\25V\25x\25\233\25\275\25\340"
+ "\26\3\26&\26I\26l\26\217\26\262\26\326\26\372\27\35\27A\27e\27\211"
+ "\27\256\27\322\27\367\30\33\30@\30e\30\212\30\257\30\325\30\372\31"
+ " \31E\31k\31\221\31\267\31\335\32\4\32*\32Q\32w\32\236\32\305\32\354"
+ "\33\24\33;\33c\33\212\33\262\33\332\34\2\34*\34R\34{\34\243\34\314"
+ "\34\365\35\36\35G\35p\35\231\35\303\35\354\36\26\36@\36j\36\224\36"
+ "\276\36\351\37\23\37>\37i\37\224\37\277\37\352 \25 A l \230 \304 \360"
+ "!\34!H!u!\241!\316!\373\"'\"U\"\202\"\257\"\335#\n#8#f#\224#\302#\360"
+ "$\37$M$|$\253$\332%\t%8%h%\227%\307%\367&'&W&\207&\267&\350'\30'I'"
+ "z'\253'\334(\r(?(q(\242(\324)\6)8)k)\235)\320*\2*5*h*\233*\317+\2+"
+ "6+i+\235+\321,\5,9,n,\242,\327-\14-A-v-\253-\341.\26.L.\202.\267.\356"
+ "/$/Z/\221/\307/\376050l0\2440\3331\0221J1\2021\2721\3622*2c2\2332\324"
+ "3\r3F3\1773\2703\3614+4e4\2364\3305\0235M5\2075\3025\375676r6\2566"
+ "\3517$7`7\2347\3278\0248P8\2148\3109\0059B9\1779\2749\371:6:t:\262"
+ ":\357;-;k;\252;\350<'<e<\244<\343=\"=a=\241=\340> >`>\240>\340?!?a"
+ "?\242?\342@#@d@\246@\347A)AjA\254A\356B0BrB\265B\367C:C}C\300D\3DG"
+ "D\212D\316E\22EUE\232E\336F\"FgF\253F\360G5G{G\300H\5HKH\221H\327I"
+ "\35IcI\251I\360J7J}J\304K\14KSK\232K\342L*LrL\272M\2MJM\223M\334N%"
+ "NnN\267O\0OIO\223O\335P'PqP\273Q\6QPQ\233Q\346R1R|R\307S\23S_S\252"
+ "S\366TBT\217T\333U(UuU\302V\17V\\V\251V\367WDW\222W\340X/X}X\313Y\32"
+ "YiY\270Z\7ZVZ\246Z\365[E[\225[\345\\5\\\206\\\326]']x]\311^\32^l^\275"
+ "_\17_a_\263`\5`W`\252`\374aOa\242a\365bIb\234b\360cCc\227c\353d@d\224"
+ "d\351e=e\222e\347f=f\222f\350g=g\223g\351h?h\226h\354iCi\232i\361j"
+ "Hj\237j\367kOk\247k\377lWl\257m\10m`m\271n\22nkn\304o\36oxo\321p+p"
+ "\206p\340q:q\225q\360rKr\246s\1s]s\270t\24tpt\314u(u\205u\341v>v\233"
+ "v\370wVw\263x\21xnx\314y*y\211y\347zFz\245{\4{c{\302|!|\201|\341}A"
+ "}\241~\1~b~\302\177#\177\204\177\345\200G\200\250\201\n\201k\201\315"
+ "\2020\202\222\202\364\203W\203\272\204\35\204\200\204\343\205G\205"
+ "\253\206\16\206r\206\327\207;\207\237\210\4\210i\210\316\2113\211\231"
+ "\211\376\212d\212\312\2130\213\226\213\374\214c\214\312\2151\215\230"
+ "\215\377\216f\216\316\2176\217\236\220\6\220n\220\326\221?\221\250"
+ "\222\21\222z\222\343\223M\223\266\224 \224\212\224\364\225_\225\311"
+ "\2264\226\237\227\n\227u\227\340\230L\230\270\231$\231\220\231\374"
+ "\232h\232\325\233B\233\257\234\34\234\211\234\367\235d\235\322\236"
+ "@\236\256\237\35\237\213\237\372\240i\240\330\241G\241\266\242&\242"
+ "\226\243\6\243v\243\346\244V\244\307\2458\245\251\246\32\246\213\246"
+ "\375\247n\247\340\250R\250\304\2517\251\251\252\34\252\217\253\2\253"
+ "u\253\351\254\\\254\320\255D\255\270\256-\256\241\257\26\257\213\260"
+ "\0\260u\260\352\261`\261\326\262K\262\302\2638\263\256\264%\264\234"
+ "\265\23\265\212\266\1\266y\266\360\267h\267\340\270Y\270\321\271J\271"
+ "\302\272;\272\265\273.\273\247\274!\274\233\275\25\275\217\276\n\276"
+ "\204\276\377\277z\277\365\300p\300\354\301g\301\343\302_\302\333\303"
+ "X\303\324\304Q\304\316\305K\305\310\306F\306\303\307A\307\277\310="
+ "\310\274\311:\311\271\3128\312\267\3136\313\266\3145\314\265\3155\315"
+ "\265\3166\316\266\3177\317\270\3209\320\272\321<\321\276\322?\322\301"
+ "\323D\323\306\324I\324\313\325N\325\321\326U\326\330\327\\\327\340"
+ "\330d\330\350\331l\331\361\332v\332\373\333\200\334\5\334\212\335\20"
+ "\335\226\336\34\336\242\337)\337\257\3406\340\275\341D\341\314\342"
+ "S\342\333\343c\343\353\344s\344\374\345\204\346\r\346\226\347\37\347"
+ "\251\3502\350\274\351F\351\320\352[\352\345\353p\353\373\354\206\355"
+ "\21\355\234\356(\356\264\357@\357\314\360X\360\345\361r\361\377\362"
+ "\214\363\31\363\247\3644\364\302\365P\365\336\366m\366\373\367\212"
+ "\370\31\370\250\3718\371\307\372W\372\347\373w\374\7\374\230\375)\375"
+ "\272\376K\376\334\377m\377\377";
+ const size_t kProfileLength = 3212;
+ static_assert(kProfileLength == sizeof(kProfile) - 1, "");
+ return SkData::MakeWithoutCopy(kProfile, kProfileLength);
+}
+
+static SkPDFIndirectReference make_srgb_color_profile(SkPDFDocument* doc) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("N", 3);
+ dict->insertObject("Range", SkPDFMakeArray(0, 1, 0, 1, 0, 1));
+ return SkPDFStreamOut(std::move(dict), SkMemoryStream::Make(SkSrgbIcm()), doc, true);
+}
+
+static std::unique_ptr<SkPDFArray> make_srgb_output_intents(SkPDFDocument* doc) {
+ // sRGB is specified by HTML, CSS, and SVG.
+ auto outputIntent = SkPDFMakeDict("OutputIntent");
+ outputIntent->insertName("S", "GTS_PDFA1");
+ outputIntent->insertString("RegistryName", "http://www.color.org");
+ outputIntent->insertString("OutputConditionIdentifier",
+ "Custom");
+ outputIntent->insertString("Info","sRGB IEC61966-2.1");
+ outputIntent->insertRef("DestOutputProfile", make_srgb_color_profile(doc));
+ auto intentArray = SkPDFMakeArray();
+ intentArray->appendObject(std::move(outputIntent));
+ return intentArray;
+}
+
+SkPDFIndirectReference SkPDFDocument::getPage(size_t pageIndex) const {
+ SkASSERT(pageIndex < fPageRefs.size());
+ return fPageRefs[pageIndex];
+}
+
+const SkMatrix& SkPDFDocument::currentPageTransform() const {
+ return fPageDevice->initialTransform();
+}
+
+int SkPDFDocument::getMarkIdForNodeId(int nodeId) {
+ return fTagTree.getMarkIdForNodeId(nodeId, SkToUInt(this->currentPageIndex()));
+}
+
+static std::vector<const SkPDFFont*> get_fonts(const SkPDFDocument& canon) {
+ std::vector<const SkPDFFont*> fonts;
+ fonts.reserve(canon.fFontMap.count());
+ // Sort so the output PDF is reproducible.
+ canon.fFontMap.foreach([&fonts](uint64_t, const SkPDFFont& font) { fonts.push_back(&font); });
+ std::sort(fonts.begin(), fonts.end(), [](const SkPDFFont* u, const SkPDFFont* v) {
+ return u->indirectReference().fValue < v->indirectReference().fValue;
+ });
+ return fonts;
+}
+
+void SkPDFDocument::onClose(SkWStream* stream) {
+ SkASSERT(fCanvas.imageInfo().dimensions().isZero());
+ if (fPages.empty()) {
+ this->waitForJobs();
+ return;
+ }
+ auto docCatalog = SkPDFMakeDict("Catalog");
+ if (fMetadata.fPDFA) {
+ SkASSERT(fXMP != SkPDFIndirectReference());
+ docCatalog->insertRef("Metadata", fXMP);
+ // Don't specify OutputIntents if we are not in PDF/A mode since
+ // no one has ever asked for this feature.
+ docCatalog->insertObject("OutputIntents", make_srgb_output_intents(this));
+ }
+
+ docCatalog->insertRef("Pages", generate_page_tree(this, std::move(fPages), fPageRefs));
+
+ if (!fNamedDestinations.empty()) {
+ docCatalog->insertRef("Dests", append_destinations(this, fNamedDestinations));
+ fNamedDestinations.clear();
+ }
+
+ // Handle tagged PDFs.
+ if (SkPDFIndirectReference root = fTagTree.makeStructTreeRoot(this)) {
+ // In the document catalog, indicate that this PDF is tagged.
+ auto markInfo = SkPDFMakeDict("MarkInfo");
+ markInfo->insertBool("Marked", true);
+ docCatalog->insertObject("MarkInfo", std::move(markInfo));
+ docCatalog->insertRef("StructTreeRoot", root);
+ }
+
+ auto docCatalogRef = this->emit(*docCatalog);
+
+ for (const SkPDFFont* f : get_fonts(*this)) {
+ f->emitSubset(this);
+ }
+
+ this->waitForJobs();
+ {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ serialize_footer(fOffsetMap, this->getStream(), fInfoDict, docCatalogRef, fUUID);
+ }
+}
+
+void SkPDFDocument::incrementJobCount() { fJobCount++; }
+
+void SkPDFDocument::signalJobComplete() { fSemaphore.signal(); }
+
+void SkPDFDocument::waitForJobs() {
+ // fJobCount can increase while we wait.
+ while (fJobCount > 0) {
+ fSemaphore.wait();
+ --fJobCount;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPDF::SetNodeId(SkCanvas* canvas, int nodeID) {
+ sk_sp<SkData> payload = SkData::MakeWithCopy(&nodeID, sizeof(nodeID));
+ const char* key = SkPDFGetNodeIdKey();
+ canvas->drawAnnotation({0, 0, 0, 0}, key, payload.get());
+}
+
+sk_sp<SkDocument> SkPDF::MakeDocument(SkWStream* stream, const SkPDF::Metadata& metadata) {
+ SkPDF::Metadata meta = metadata;
+ if (meta.fRasterDPI <= 0) {
+ meta.fRasterDPI = 72.0f;
+ }
+ if (meta.fEncodingQuality < 0) {
+ meta.fEncodingQuality = 0;
+ }
+ return stream ? sk_make_sp<SkPDFDocument>(stream, std::move(meta)) : nullptr;
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h b/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h
new file mode 100644
index 0000000000..ab2a62edb9
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFDocumentPriv_DEFINED
+#define SkPDFDocumentPriv_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkStream.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTHash.h"
+#include "src/pdf/SkPDFMetadata.h"
+#include "src/pdf/SkPDFTag.h"
+
+#include <atomic>
+#include <vector>
+#include <memory>
+
+class SkExecutor;
+class SkPDFDevice;
+class SkPDFFont;
+struct SkAdvancedTypefaceMetrics;
+struct SkBitmapKey;
+struct SkPDFFillGraphicState;
+struct SkPDFImageShaderKey;
+struct SkPDFStrokeGraphicState;
+
+namespace SkPDFGradientShader {
+struct Key;
+struct KeyHash;
+}
+
+const char* SkPDFGetNodeIdKey();
+
+// Logically part of SkPDFDocument, but separate to keep similar functionality together.
+class SkPDFOffsetMap {
+public:
+ void markStartOfDocument(const SkWStream*);
+ void markStartOfObject(int referenceNumber, const SkWStream*);
+ int objectCount() const;
+ int emitCrossReferenceTable(SkWStream* s) const;
+private:
+ std::vector<int> fOffsets;
+ size_t fBaseOffset = SIZE_MAX;
+};
+
+
+struct SkPDFNamedDestination {
+ sk_sp<SkData> fName;
+ SkPoint fPoint;
+ SkPDFIndirectReference fPage;
+};
+
+/** Concrete implementation of SkDocument that creates PDF files. This
+ class does not produced linearized or optimized PDFs; instead it
+ it attempts to use a minimum amount of RAM. */
+class SkPDFDocument : public SkDocument {
+public:
+ SkPDFDocument(SkWStream*, SkPDF::Metadata);
+ ~SkPDFDocument() override;
+ SkCanvas* onBeginPage(SkScalar, SkScalar) override;
+ void onEndPage() override;
+ void onClose(SkWStream*) override;
+ void onAbort() override;
+
+ /**
+ Serialize the object, as well as any other objects it
+ indirectly refers to. If any any other objects have been added
+ to the SkPDFObjNumMap without serializing them, they will be
+ serialized as well.
+
+ It might go without saying that objects should not be changed
+ after calling serialize, since those changes will be too late.
+ */
+ SkPDFIndirectReference emit(const SkPDFObject&, SkPDFIndirectReference);
+ SkPDFIndirectReference emit(const SkPDFObject& o) { return this->emit(o, this->reserveRef()); }
+
+ template <typename T>
+ void emitStream(const SkPDFDict& dict, T writeStream, SkPDFIndirectReference ref) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkWStream* stream = this->beginObject(ref);
+ dict.emitObject(stream);
+ stream->writeText(" stream\n");
+ writeStream(stream);
+ stream->writeText("\nendstream");
+ this->endObject();
+ }
+
+ const SkPDF::Metadata& metadata() const { return fMetadata; }
+
+ SkPDFIndirectReference getPage(size_t pageIndex) const;
+ SkPDFIndirectReference currentPage() const {
+ return SkASSERT(!fPageRefs.empty()), fPageRefs.back();
+ }
+ // Returns -1 if no mark ID.
+ int getMarkIdForNodeId(int nodeId);
+
+ SkPDFIndirectReference reserveRef() { return SkPDFIndirectReference{fNextObjectNumber++}; }
+
+ SkExecutor* executor() const { return fExecutor; }
+ void incrementJobCount();
+ void signalJobComplete();
+ size_t currentPageIndex() { return fPages.size(); }
+ size_t pageCount() { return fPageRefs.size(); }
+
+ const SkMatrix& currentPageTransform() const;
+
+ // Canonicalized objects
+ SkTHashMap<SkPDFImageShaderKey, SkPDFIndirectReference> fImageShaderMap;
+ SkTHashMap<SkPDFGradientShader::Key, SkPDFIndirectReference, SkPDFGradientShader::KeyHash>
+ fGradientPatternMap;
+ SkTHashMap<SkBitmapKey, SkPDFIndirectReference> fPDFBitmapMap;
+ SkTHashMap<uint32_t, std::unique_ptr<SkAdvancedTypefaceMetrics>> fTypefaceMetrics;
+ SkTHashMap<uint32_t, std::vector<SkString>> fType1GlyphNames;
+ SkTHashMap<uint32_t, std::vector<SkUnichar>> fToUnicodeMap;
+ SkTHashMap<uint32_t, SkPDFIndirectReference> fFontDescriptors;
+ SkTHashMap<uint32_t, SkPDFIndirectReference> fType3FontDescriptors;
+ SkTHashMap<uint64_t, SkPDFFont> fFontMap;
+ SkTHashMap<SkPDFStrokeGraphicState, SkPDFIndirectReference> fStrokeGSMap;
+ SkTHashMap<SkPDFFillGraphicState, SkPDFIndirectReference> fFillGSMap;
+ SkPDFIndirectReference fInvertFunction;
+ SkPDFIndirectReference fNoSmaskGraphicState;
+
+ std::vector<std::pair<sk_sp<SkData>, SkRect>> fCurrentPageLinkToURLs;
+ std::vector<std::pair<sk_sp<SkData>, SkRect>> fCurrentPageLinkToDestinations;
+ std::vector<SkPDFNamedDestination> fNamedDestinations;
+
+private:
+ SkPDFOffsetMap fOffsetMap;
+ SkCanvas fCanvas;
+ std::vector<std::unique_ptr<SkPDFDict>> fPages;
+ std::vector<SkPDFIndirectReference> fPageRefs;
+
+ sk_sp<SkPDFDevice> fPageDevice;
+ std::atomic<int> fNextObjectNumber = {1};
+ std::atomic<int> fJobCount = {0};
+ SkUUID fUUID;
+ SkPDFIndirectReference fInfoDict;
+ SkPDFIndirectReference fXMP;
+ SkPDF::Metadata fMetadata;
+ SkScalar fRasterScale = 1;
+ SkScalar fInverseRasterScale = 1;
+ SkExecutor* fExecutor = nullptr;
+
+ // For tagged PDFs.
+ SkPDFTagTree fTagTree;
+
+ SkMutex fMutex;
+ SkSemaphore fSemaphore;
+
+ void waitForJobs();
+ SkWStream* beginObject(SkPDFIndirectReference);
+ void endObject();
+};
+
+#endif // SkPDFDocumentPriv_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.cpp b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
new file mode 100644
index 0000000000..c0275236a6
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
@@ -0,0 +1,690 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkBitmaskEnum.h"
+#include "include/private/SkTHash.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/pdf/SkPDFBitmap.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFMakeCIDGlyphWidthsArray.h"
+#include "src/pdf/SkPDFMakeToUnicodeCmap.h"
+#include "src/pdf/SkPDFSubsetFont.h"
+#include "src/pdf/SkPDFType1Font.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/utils/SkUTF.h"
+
+#include <limits.h>
+#include <initializer_list>
+#include <memory>
+#include <utility>
+
+void SkPDFFont::GetType1GlyphNames(const SkTypeface& face, SkString* dst) {
+ face.getPostScriptGlyphNames(dst);
+}
+
+namespace {
+// PDF's notion of symbolic vs non-symbolic is related to the character set, not
+// symbols vs. characters. Rarely is a font the right character set to call it
+// non-symbolic, so always call it symbolic. (PDF 1.4 spec, section 5.7.1)
+static const int32_t kPdfSymbolic = 4;
+
+
+// scale from em-units to base-1000, returning as a SkScalar
+inline SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ return emSize == 1000 ? scaled : scaled * 1000 / emSize;
+}
+
+inline SkScalar scaleFromFontUnits(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+void setGlyphWidthAndBoundingBox(SkScalar width, SkIRect box,
+ SkDynamicMemoryWStream* content) {
+ // Specify width and bounding box for the glyph.
+ SkPDFUtils::AppendScalar(width, content);
+ content->writeText(" 0 ");
+ content->writeDecAsText(box.fLeft);
+ content->writeText(" ");
+ content->writeDecAsText(box.fTop);
+ content->writeText(" ");
+ content->writeDecAsText(box.fRight);
+ content->writeText(" ");
+ content->writeDecAsText(box.fBottom);
+ content->writeText(" d1\n");
+}
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFFont
+///////////////////////////////////////////////////////////////////////////////
+
+/* Resources are canonicalized and uniqueified by pointer so there has to be
+ * some additional state indicating which subset of the font is used. It
+ * must be maintained at the document granularity.
+ */
+
+SkPDFFont::~SkPDFFont() = default;
+
+SkPDFFont::SkPDFFont(SkPDFFont&&) = default;
+
+SkPDFFont& SkPDFFont::operator=(SkPDFFont&&) = default;
+
+static bool can_embed(const SkAdvancedTypefaceMetrics& metrics) {
+ return !SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag);
+}
+
+const SkAdvancedTypefaceMetrics* SkPDFFont::GetMetrics(const SkTypeface* typeface,
+ SkPDFDocument* canon) {
+ SkASSERT(typeface);
+ SkFontID id = typeface->uniqueID();
+ if (std::unique_ptr<SkAdvancedTypefaceMetrics>* ptr = canon->fTypefaceMetrics.find(id)) {
+ return ptr->get(); // canon retains ownership.
+ }
+ int count = typeface->countGlyphs();
+ if (count <= 0 || count > 1 + SkTo<int>(UINT16_MAX)) {
+ // Cache nullptr to skip this check. Use SkSafeUnref().
+ canon->fTypefaceMetrics.set(id, nullptr);
+ return nullptr;
+ }
+ std::unique_ptr<SkAdvancedTypefaceMetrics> metrics = typeface->getAdvancedMetrics();
+ if (!metrics) {
+ metrics = skstd::make_unique<SkAdvancedTypefaceMetrics>();
+ }
+
+ if (0 == metrics->fStemV || 0 == metrics->fCapHeight) {
+ SkFont font;
+ font.setHinting(SkFontHinting::kNone);
+ font.setTypeface(sk_ref_sp(typeface));
+ font.setSize(1000); // glyph coordinate system
+ if (0 == metrics->fStemV) {
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t stemV = SHRT_MAX;
+ for (char c : {'i', 'I', '!', '1'}) {
+ uint16_t g = font.unicharToGlyph(c);
+ SkRect bounds;
+ font.getBounds(&g, 1, &bounds, nullptr);
+ stemV = SkTMin(stemV, SkToS16(SkScalarRoundToInt(bounds.width())));
+ }
+ metrics->fStemV = stemV;
+ }
+ if (0 == metrics->fCapHeight) {
+ // Figure out a good guess for CapHeight: average the height of M and X.
+ SkScalar capHeight = 0;
+ for (char c : {'M', 'X'}) {
+ uint16_t g = font.unicharToGlyph(c);
+ SkRect bounds;
+ font.getBounds(&g, 1, &bounds, nullptr);
+ capHeight += bounds.height();
+ }
+ metrics->fCapHeight = SkToS16(SkScalarRoundToInt(capHeight / 2));
+ }
+ }
+ return canon->fTypefaceMetrics.set(id, std::move(metrics))->get();
+}
+
+const std::vector<SkUnichar>& SkPDFFont::GetUnicodeMap(const SkTypeface* typeface,
+ SkPDFDocument* canon) {
+ SkASSERT(typeface);
+ SkASSERT(canon);
+ SkFontID id = typeface->uniqueID();
+ if (std::vector<SkUnichar>* ptr = canon->fToUnicodeMap.find(id)) {
+ return *ptr;
+ }
+ std::vector<SkUnichar> buffer(typeface->countGlyphs());
+ typeface->getGlyphToUnicodeMap(buffer.data());
+ return *canon->fToUnicodeMap.set(id, std::move(buffer));
+}
+
+SkAdvancedTypefaceMetrics::FontType SkPDFFont::FontType(const SkAdvancedTypefaceMetrics& metrics) {
+ if (SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag) ||
+ SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag)) {
+ // force Type3 fallback.
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return metrics.fType;
+}
+
+static SkGlyphID first_nonzero_glyph_for_single_byte_encoding(SkGlyphID gid) {
+ return gid != 0 ? gid - (gid - 1) % 255 : 1;
+}
+
+SkPDFFont* SkPDFFont::GetFontResource(SkPDFDocument* doc,
+ const SkGlyph* glyph,
+ SkTypeface* face) {
+ SkASSERT(doc);
+ SkASSERT(face); // All SkPDFDevice::internalDrawText ensures this.
+ const SkAdvancedTypefaceMetrics* fontMetrics = SkPDFFont::GetMetrics(face, doc);
+ SkASSERT(fontMetrics); // SkPDFDevice::internalDrawText ensures the typeface is good.
+ // GetMetrics only returns null to signify a bad typeface.
+ const SkAdvancedTypefaceMetrics& metrics = *fontMetrics;
+ SkAdvancedTypefaceMetrics::FontType type = SkPDFFont::FontType(metrics);
+ if (!(glyph->isEmpty() || glyph->path())) {
+ type = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ bool multibyte = SkPDFFont::IsMultiByte(type);
+ SkGlyphID subsetCode =
+ multibyte ? 0 : first_nonzero_glyph_for_single_byte_encoding(glyph->getGlyphID());
+ uint64_t fontID = (static_cast<uint64_t>(SkTypeface::UniqueID(face)) << 16) | subsetCode;
+
+ if (SkPDFFont* found = doc->fFontMap.find(fontID)) {
+ SkASSERT(multibyte == found->multiByteGlyphs());
+ return found;
+ }
+
+ sk_sp<SkTypeface> typeface(sk_ref_sp(face));
+ SkASSERT(typeface);
+
+ SkGlyphID lastGlyph = SkToU16(typeface->countGlyphs() - 1);
+
+ // should be caught by SkPDFDevice::internalDrawText
+ SkASSERT(glyph->getGlyphID() <= lastGlyph);
+
+ SkGlyphID firstNonZeroGlyph;
+ if (multibyte) {
+ firstNonZeroGlyph = 1;
+ } else {
+ firstNonZeroGlyph = subsetCode;
+ lastGlyph = SkToU16(SkTMin<int>((int)lastGlyph, 254 + (int)subsetCode));
+ }
+ auto ref = doc->reserveRef();
+ return doc->fFontMap.set(
+ fontID, SkPDFFont(std::move(typeface), firstNonZeroGlyph, lastGlyph, type, ref));
+}
+
+SkPDFFont::SkPDFFont(sk_sp<SkTypeface> typeface,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID,
+ SkAdvancedTypefaceMetrics::FontType fontType,
+ SkPDFIndirectReference indirectReference)
+ : fTypeface(std::move(typeface))
+ , fGlyphUsage(firstGlyphID, lastGlyphID)
+ , fIndirectReference(indirectReference)
+ , fFontType(fontType)
+{
+ // Always include glyph 0
+ this->noteGlyphUsage(0);
+}
+
+void SkPDFFont::PopulateCommonFontDescriptor(SkPDFDict* descriptor,
+ const SkAdvancedTypefaceMetrics& metrics,
+ uint16_t emSize,
+ int16_t defaultWidth) {
+ descriptor->insertName("FontName", metrics.fPostScriptName);
+ descriptor->insertInt("Flags", (size_t)(metrics.fStyle | kPdfSymbolic));
+ descriptor->insertScalar("Ascent",
+ scaleFromFontUnits(metrics.fAscent, emSize));
+ descriptor->insertScalar("Descent",
+ scaleFromFontUnits(metrics.fDescent, emSize));
+ descriptor->insertScalar("StemV",
+ scaleFromFontUnits(metrics.fStemV, emSize));
+ descriptor->insertScalar("CapHeight",
+ scaleFromFontUnits(metrics.fCapHeight, emSize));
+ descriptor->insertInt("ItalicAngle", metrics.fItalicAngle);
+ descriptor->insertObject("FontBBox",
+ SkPDFMakeArray(scaleFromFontUnits(metrics.fBBox.left(), emSize),
+ scaleFromFontUnits(metrics.fBBox.bottom(), emSize),
+ scaleFromFontUnits(metrics.fBBox.right(), emSize),
+ scaleFromFontUnits(metrics.fBBox.top(), emSize)));
+ if (defaultWidth > 0) {
+ descriptor->insertScalar("MissingWidth",
+ scaleFromFontUnits(defaultWidth, emSize));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type0Font
+///////////////////////////////////////////////////////////////////////////////
+
+// if possible, make no copy.
+static sk_sp<SkData> stream_to_data(std::unique_ptr<SkStreamAsset> stream) {
+ SkASSERT(stream);
+ (void)stream->rewind();
+ SkASSERT(stream->hasLength());
+ size_t size = stream->getLength();
+ if (const void* base = stream->getMemoryBase()) {
+ SkData::ReleaseProc proc =
+ [](const void*, void* ctx) { delete (SkStreamAsset*)ctx; };
+ return SkData::MakeWithProc(base, size, proc, stream.release());
+ }
+ return SkData::MakeFromStream(stream.get(), size);
+}
+
+static void emit_subset_type0(const SkPDFFont& font, SkPDFDocument* doc) {
+ const SkAdvancedTypefaceMetrics* metricsPtr =
+ SkPDFFont::GetMetrics(font.typeface(), doc);
+ SkASSERT(metricsPtr);
+ if (!metricsPtr) { return; }
+ const SkAdvancedTypefaceMetrics& metrics = *metricsPtr;
+ SkASSERT(can_embed(metrics));
+ SkAdvancedTypefaceMetrics::FontType type = font.getType();
+ SkTypeface* face = font.typeface();
+ SkASSERT(face);
+
+ auto descriptor = SkPDFMakeDict("FontDescriptor");
+ uint16_t emSize = SkToU16(font.typeface()->getUnitsPerEm());
+ SkPDFFont::PopulateCommonFontDescriptor(descriptor.get(), metrics, emSize, 0);
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> fontAsset = face->openStream(&ttcIndex);
+ size_t fontSize = fontAsset ? fontAsset->getLength() : 0;
+ if (0 == fontSize) {
+ SkDebugf("Error: (SkTypeface)(%p)::openStream() returned "
+ "empty stream (%p) when identified as kType1CID_Font "
+ "or kTrueType_Font.\n", face, fontAsset.get());
+ } else {
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kTrueType_Font: {
+ if (!SkToBool(metrics.fFlags &
+ SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag)) {
+ SkASSERT(font.firstGlyphID() == 1);
+ sk_sp<SkData> subsetFontData = SkPDFSubsetFont(
+ stream_to_data(std::move(fontAsset)), font.glyphUsage(),
+ doc->metadata().fSubsetter,
+ metrics.fFontName.c_str(), ttcIndex);
+ if (subsetFontData) {
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertInt("Length1", SkToInt(subsetFontData->size()));
+ descriptor->insertRef(
+ "FontFile2",
+ SkPDFStreamOut(std::move(tmp),
+ SkMemoryStream::Make(std::move(subsetFontData)),
+ doc, true));
+ break;
+ }
+ // If subsetting fails, fall back to original font data.
+ fontAsset = face->openStream(&ttcIndex);
+ SkASSERT(fontAsset);
+ SkASSERT(fontAsset->getLength() == fontSize);
+ if (!fontAsset || fontAsset->getLength() == 0) { break; }
+ }
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertInt("Length1", fontSize);
+ descriptor->insertRef("FontFile2",
+ SkPDFStreamOut(std::move(tmp), std::move(fontAsset),
+ doc, true));
+ break;
+ }
+ case SkAdvancedTypefaceMetrics::kType1CID_Font: {
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertName("Subtype", "CIDFontType0C");
+ descriptor->insertRef("FontFile3",
+ SkPDFStreamOut(std::move(tmp), std::move(fontAsset),
+ doc, true));
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ }
+
+ auto newCIDFont = SkPDFMakeDict("Font");
+ newCIDFont->insertRef("FontDescriptor", doc->emit(*descriptor));
+ newCIDFont->insertName("BaseFont", metrics.fPostScriptName);
+
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType0");
+ break;
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType2");
+ newCIDFont->insertName("CIDToGIDMap", "Identity");
+ break;
+ default:
+ SkASSERT(false);
+ }
+ auto sysInfo = SkPDFMakeDict();
+ sysInfo->insertString("Registry", "Adobe");
+ sysInfo->insertString("Ordering", "Identity");
+ sysInfo->insertInt("Supplement", 0);
+ newCIDFont->insertObject("CIDSystemInfo", std::move(sysInfo));
+
+ SkScalar defaultWidth = 0;
+ {
+ std::unique_ptr<SkPDFArray> widths = SkPDFMakeCIDGlyphWidthsArray(
+ *face, font.glyphUsage(), &defaultWidth);
+ if (widths && widths->size() > 0) {
+ newCIDFont->insertObject("W", std::move(widths));
+ }
+ newCIDFont->insertScalar("DW", defaultWidth);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ SkPDFDict fontDict("Font");
+ fontDict.insertName("Subtype", "Type0");
+ fontDict.insertName("BaseFont", metrics.fPostScriptName);
+ fontDict.insertName("Encoding", "Identity-H");
+ auto descendantFonts = SkPDFMakeArray();
+ descendantFonts->appendRef(doc->emit(*newCIDFont));
+ fontDict.insertObject("DescendantFonts", std::move(descendantFonts));
+
+ const std::vector<SkUnichar>& glyphToUnicode =
+ SkPDFFont::GetUnicodeMap(font.typeface(), doc);
+ SkASSERT(SkToSizeT(font.typeface()->countGlyphs()) == glyphToUnicode.size());
+ std::unique_ptr<SkStreamAsset> toUnicode =
+ SkPDFMakeToUnicodeCmap(glyphToUnicode.data(),
+ &font.glyphUsage(),
+ font.multiByteGlyphs(),
+ font.firstGlyphID(),
+ font.lastGlyphID());
+ fontDict.insertRef("ToUnicode", SkPDFStreamOut(nullptr, std::move(toUnicode), doc));
+
+ doc->emit(fontDict, font.indirectReference());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PDFType3Font
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+// returns [0, first, first+1, ... last-1, last]
+struct SingleByteGlyphIdIterator {
+ SingleByteGlyphIdIterator(SkGlyphID first, SkGlyphID last)
+ : fFirst(first), fLast(last) {
+ SkASSERT(fFirst > 0);
+ SkASSERT(fLast >= first);
+ }
+ struct Iter {
+ void operator++() {
+ fCurrent = (0 == fCurrent) ? fFirst : fCurrent + 1;
+ }
+ // This is an input_iterator
+ SkGlyphID operator*() const { return (SkGlyphID)fCurrent; }
+ bool operator!=(const Iter& rhs) const {
+ return fCurrent != rhs.fCurrent;
+ }
+ Iter(SkGlyphID f, int c) : fFirst(f), fCurrent(c) {}
+ private:
+ const SkGlyphID fFirst;
+ int fCurrent; // must be int to make fLast+1 to fit
+ };
+ Iter begin() const { return Iter(fFirst, 0); }
+ Iter end() const { return Iter(fFirst, (int)fLast + 1); }
+private:
+ const SkGlyphID fFirst;
+ const SkGlyphID fLast;
+};
+}
+
+struct ImageAndOffset {
+ sk_sp<SkImage> fImage;
+ SkIPoint fOffset;
+};
+static ImageAndOffset to_image(SkGlyphID gid, SkStrike* cache) {
+ (void)cache->prepareImage(cache->glyph(gid));
+ SkMask mask = cache->glyph(gid)->mask();
+ if (!mask.fImage) {
+ return {nullptr, {0, 0}};
+ }
+ SkIRect bounds = mask.fBounds;
+ SkBitmap bm;
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ bm.allocPixels(SkImageInfo::MakeA8(bounds.width(), bounds.height()));
+ for (int y = 0; y < bm.height(); ++y) {
+ for (int x8 = 0; x8 < bm.width(); x8 += 8) {
+ uint8_t v = *mask.getAddr1(x8 + bounds.x(), y + bounds.y());
+ int e = SkTMin(x8 + 8, bm.width());
+ for (int x = x8; x < e; ++x) {
+ *bm.getAddr8(x, y) = (v >> (x & 0x7)) & 0x1 ? 0xFF : 0x00;
+ }
+ }
+ }
+ bm.setImmutable();
+ return {SkImage::MakeFromBitmap(bm), {bounds.x(), bounds.y()}};
+ case SkMask::kA8_Format:
+ bm.installPixels(SkImageInfo::MakeA8(bounds.width(), bounds.height()),
+ mask.fImage, mask.fRowBytes);
+ return {SkMakeImageFromRasterBitmap(bm, kAlways_SkCopyPixelsMode),
+ {bounds.x(), bounds.y()}};
+ case SkMask::kARGB32_Format:
+ bm.installPixels(SkImageInfo::MakeN32Premul(bounds.width(), bounds.height()),
+ mask.fImage, mask.fRowBytes);
+ return {SkMakeImageFromRasterBitmap(bm, kAlways_SkCopyPixelsMode),
+ {bounds.x(), bounds.y()}};
+ case SkMask::k3D_Format:
+ case SkMask::kLCD16_Format:
+ default:
+ SkASSERT(false);
+ return {nullptr, {0, 0}};
+ }
+}
+
+static SkPDFIndirectReference type3_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface,
+ SkStrike* cache) {
+ if (SkPDFIndirectReference* ptr = doc->fType3FontDescriptors.find(typeface->uniqueID())) {
+ return *ptr;
+ }
+
+ SkPDFDict descriptor("FontDescriptor");
+ int32_t fontDescriptorFlags = kPdfSymbolic;
+ if (const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, doc)) {
+ // Type3 FontDescriptor does not require all the same fields.
+ descriptor.insertName("FontName", metrics->fPostScriptName);
+ descriptor.insertInt("ItalicAngle", metrics->fItalicAngle);
+ fontDescriptorFlags |= (int32_t)metrics->fStyle;
+ // Adobe requests CapHeight, XHeight, and StemV be added
+ // to "greatly help our workflow downstream".
+ if (metrics->fCapHeight != 0) { descriptor.insertInt("CapHeight", metrics->fCapHeight); }
+ if (metrics->fStemV != 0) { descriptor.insertInt("StemV", metrics->fStemV); }
+ SkScalar xHeight = cache->getFontMetrics().fXHeight;
+ if (xHeight != 0) {
+ descriptor.insertScalar("XHeight", xHeight);
+ }
+ }
+ descriptor.insertInt("Flags", fontDescriptorFlags);
+ SkPDFIndirectReference ref = doc->emit(descriptor);
+ doc->fType3FontDescriptors.set(typeface->uniqueID(), ref);
+ return ref;
+}
+
+#ifdef SK_PDF_BITMAP_GLYPH_RASTER_SIZE
+static constexpr float kBitmapFontSize = SK_PDF_BITMAP_GLYPH_RASTER_SIZE;
+#else
+static constexpr float kBitmapFontSize = 64;
+#endif
+
+SkStrikeSpec make_small_strike(const SkTypeface& typeface) {
+ SkFont font(sk_ref_sp(&typeface), kBitmapFontSize);
+ font.setHinting(SkFontHinting::kNone);
+ font.setEdging(SkFont::Edging::kAlias);
+ return SkStrikeSpec::MakeMask(font,
+ SkPaint(),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry),
+ kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+}
+
+static void emit_subset_type3(const SkPDFFont& pdfFont, SkPDFDocument* doc) {
+ SkTypeface* typeface = pdfFont.typeface();
+ SkGlyphID firstGlyphID = pdfFont.firstGlyphID();
+ SkGlyphID lastGlyphID = pdfFont.lastGlyphID();
+ const SkPDFGlyphUse& subset = pdfFont.glyphUsage();
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ // Remove unused glyphs at the end of the range.
+ // Keep the lastGlyphID >= firstGlyphID invariant true.
+ while (lastGlyphID > firstGlyphID && !subset.has(lastGlyphID)) {
+ --lastGlyphID;
+ }
+ int unitsPerEm;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &unitsPerEm);
+ auto cache = strikeSpec.findOrCreateExclusiveStrike();
+ SkASSERT(cache);
+ SkScalar emSize = (SkScalar)unitsPerEm;
+
+ SkStrikeSpec strikeSpecSmall = kBitmapFontSize > 0 ? make_small_strike(*typeface)
+ : strikeSpec;
+ auto smallCache = strikeSpecSmall.findOrCreateExclusiveStrike();
+ SkASSERT(smallCache);
+ float bitmapScale = kBitmapFontSize > 0 ? emSize / kBitmapFontSize : 1.0f;
+
+ SkPDFDict font("Font");
+ font.insertName("Subtype", "Type3");
+ // Flip about the x-axis and scale by 1/emSize.
+ SkMatrix fontMatrix;
+ fontMatrix.setScale(SkScalarInvert(emSize), -SkScalarInvert(emSize));
+ font.insertObject("FontMatrix", SkPDFUtils::MatrixToArray(fontMatrix));
+
+ auto charProcs = SkPDFMakeDict();
+ auto encoding = SkPDFMakeDict("Encoding");
+
+ auto encDiffs = SkPDFMakeArray();
+ // length(firstGlyphID .. lastGlyphID) == lastGlyphID - firstGlyphID + 1
+ // plus 1 for glyph 0;
+ SkASSERT(firstGlyphID > 0);
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ int glyphCount = lastGlyphID - firstGlyphID + 2;
+ // one other entry for the index of first glyph.
+ encDiffs->reserve(glyphCount + 1);
+ encDiffs->appendInt(0); // index of first glyph
+
+ auto widthArray = SkPDFMakeArray();
+ widthArray->reserve(glyphCount);
+
+ SkIRect bbox = SkIRect::MakeEmpty();
+
+ std::vector<std::pair<SkGlyphID, SkPDFIndirectReference>> imageGlyphs;
+ for (SkGlyphID gID : SingleByteGlyphIdIterator(firstGlyphID, lastGlyphID)) {
+ bool skipGlyph = gID != 0 && !subset.has(gID);
+ SkString characterName;
+ SkScalar advance = 0.0f;
+ SkIRect glyphBBox;
+ if (skipGlyph) {
+ characterName.set("g0");
+ } else {
+ characterName.printf("g%X", gID);
+ SkGlyph* glyph = cache->glyph(gID);
+ advance = glyph->advanceX();
+ glyphBBox = glyph->iRect();
+ bbox.join(glyphBBox);
+ const SkPath* path = cache->preparePath(glyph);
+ SkDynamicMemoryWStream content;
+ if (path && !path->isEmpty()) {
+ setGlyphWidthAndBoundingBox(glyph->advanceX(), glyphBBox, &content);
+ SkPDFUtils::EmitPath(*path, SkPaint::kFill_Style, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, path->getFillType(), &content);
+ } else {
+ auto pimg = to_image(gID, smallCache.get());
+ if (!pimg.fImage) {
+ setGlyphWidthAndBoundingBox(glyph->advanceX(), glyphBBox, &content);
+ } else {
+ using SkPDFUtils::AppendScalar;
+ imageGlyphs.emplace_back(gID, SkPDFSerializeImage(pimg.fImage.get(), doc));
+ AppendScalar(glyph->advanceX(), &content);
+ content.writeText(" 0 d0\n");
+ AppendScalar(pimg.fImage->width() * bitmapScale, &content);
+ content.writeText(" 0 0 ");
+ AppendScalar(-pimg.fImage->height() * bitmapScale, &content);
+ content.writeText(" ");
+ AppendScalar(pimg.fOffset.x() * bitmapScale, &content);
+ content.writeText(" ");
+ AppendScalar((pimg.fImage->height() + pimg.fOffset.y()) * bitmapScale,
+ &content);
+ content.writeText(" cm\n/X");
+ content.write(characterName.c_str(), characterName.size());
+ content.writeText(" Do\n");
+ }
+ }
+ charProcs->insertRef(characterName, SkPDFStreamOut(nullptr,
+ content.detachAsStream(), doc));
+ }
+ encDiffs->appendName(std::move(characterName));
+ widthArray->appendScalar(advance);
+ }
+
+ if (!imageGlyphs.empty()) {
+ auto d0 = SkPDFMakeDict();
+ for (const auto& pair : imageGlyphs) {
+ d0->insertRef(SkStringPrintf("Xg%X", pair.first), pair.second);
+ }
+ auto d1 = SkPDFMakeDict();
+ d1->insertObject("XObject", std::move(d0));
+ font.insertObject("Resources", std::move(d1));
+ }
+
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font.insertInt("FirstChar", 0);
+ font.insertInt("LastChar", lastGlyphID - firstGlyphID + 1);
+ /* FontBBox: "A rectangle expressed in the glyph coordinate
+ system, specifying the font bounding box. This is the smallest
+ rectangle enclosing the shape that would result if all of the
+ glyphs of the font were placed with their origins coincident and
+ then filled." */
+ font.insertObject("FontBBox", SkPDFMakeArray(bbox.left(),
+ bbox.bottom(),
+ bbox.right(),
+ bbox.top()));
+
+ font.insertName("CIDToGIDMap", "Identity");
+
+ const std::vector<SkUnichar>& glyphToUnicode = SkPDFFont::GetUnicodeMap(typeface, doc);
+ SkASSERT(glyphToUnicode.size() == SkToSizeT(typeface->countGlyphs()));
+ auto toUnicodeCmap = SkPDFMakeToUnicodeCmap(glyphToUnicode.data(),
+ &subset,
+ false,
+ firstGlyphID,
+ lastGlyphID);
+ font.insertRef("ToUnicode", SkPDFStreamOut(nullptr, std::move(toUnicodeCmap), doc));
+ font.insertRef("FontDescriptor", type3_descriptor(doc, typeface, cache.get()));
+ font.insertObject("Widths", std::move(widthArray));
+ font.insertObject("Encoding", std::move(encoding));
+ font.insertObject("CharProcs", std::move(charProcs));
+
+ doc->emit(font, pdfFont.indirectReference());
+}
+
+void SkPDFFont::emitSubset(SkPDFDocument* doc) const {
+ SkASSERT(fFontType != SkPDFFont().fFontType); // not default value
+ switch (fFontType) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ return emit_subset_type0(*this, doc);
+#ifndef SK_PDF_DO_NOT_SUPPORT_TYPE_1_FONTS
+ case SkAdvancedTypefaceMetrics::kType1_Font:
+ return SkPDFEmitType1Font(*this, doc);
+#endif
+ default:
+ return emit_subset_type3(*this, doc);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkPDFFont::CanEmbedTypeface(SkTypeface* typeface, SkPDFDocument* doc) {
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, doc);
+ return metrics && can_embed(*metrics);
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.h b/gfx/skia/skia/src/pdf/SkPDFFont.h
new file mode 100644
index 0000000000..f047aa17d2
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFFont_DEFINED
+#define SkPDFFont_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <vector>
+
+class SkPDFDocument;
+class SkStrike;
+class SkString;
+
+/** \class SkPDFFont
+ A PDF Object class representing a font. The font may have resources
+ attached to it in order to embed the font. SkPDFFonts are canonicalized
+ so that resource deduplication will only include one copy of a font.
+ This class uses the same pattern as SkPDFGraphicState, a static weak
+ reference to each instantiated class.
+*/
+class SkPDFFont {
+public:
+ SkPDFFont() {}
+ ~SkPDFFont();
+ SkPDFFont(SkPDFFont&&);
+ SkPDFFont& operator=(SkPDFFont&&);
+
+ /** Returns the typeface represented by this class. Returns nullptr for the
+ * default typeface.
+ */
+ SkTypeface* typeface() const { return fTypeface.get(); }
+
+ /** Returns the font type represented in this font. For Type0 fonts,
+ * returns the type of the descendant font.
+ */
+ SkAdvancedTypefaceMetrics::FontType getType() const { return fFontType; }
+
+ static SkAdvancedTypefaceMetrics::FontType FontType(const SkAdvancedTypefaceMetrics&);
+ static void GetType1GlyphNames(const SkTypeface&, SkString*);
+
+ static bool IsMultiByte(SkAdvancedTypefaceMetrics::FontType type) {
+ return type == SkAdvancedTypefaceMetrics::kType1CID_Font ||
+ type == SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ /** Returns true if this font encoding supports glyph IDs above 255.
+ */
+ bool multiByteGlyphs() const { return SkPDFFont::IsMultiByte(this->getType()); }
+
+ /** Return true if this font has an encoding for the passed glyph id.
+ */
+ bool hasGlyph(SkGlyphID gid) {
+ return (gid >= this->firstGlyphID() && gid <= this->lastGlyphID()) || gid == 0;
+ }
+
+ /** Convert the input glyph ID into the font encoding. */
+ SkGlyphID glyphToPDFFontEncoding(SkGlyphID gid) const {
+ if (this->multiByteGlyphs() || gid == 0) {
+ return gid;
+ }
+ SkASSERT(gid >= this->firstGlyphID() && gid <= this->lastGlyphID());
+ SkASSERT(this->firstGlyphID() > 0);
+ return gid - this->firstGlyphID() + 1;
+ }
+
+ void noteGlyphUsage(SkGlyphID glyph) {
+ SkASSERT(this->hasGlyph(glyph));
+ fGlyphUsage.set(glyph);
+ }
+
+ SkPDFIndirectReference indirectReference() const { return fIndirectReference; }
+
+ /** Get the font resource for the passed typeface and glyphID. The
+ * reference count of the object is incremented and it is the caller's
+ * responsibility to unreference it when done. This is needed to
+ * accommodate the weak reference pattern used when the returned object
+ * is new and has no other references.
+ * @param typeface The typeface to find, not nullptr.
+ * @param glyphID Specify which section of a large font is of interest.
+ */
+ static SkPDFFont* GetFontResource(SkPDFDocument* doc,
+ const SkGlyph* glyphs,
+ SkTypeface* typeface);
+
+ /** Gets SkAdvancedTypefaceMetrics, and caches the result.
+ * @param typeface can not be nullptr.
+ * @return nullptr only when typeface is bad.
+ */
+ static const SkAdvancedTypefaceMetrics* GetMetrics(const SkTypeface* typeface,
+ SkPDFDocument* canon);
+
+ static const std::vector<SkUnichar>& GetUnicodeMap(const SkTypeface* typeface,
+ SkPDFDocument* canon);
+
+ static void PopulateCommonFontDescriptor(SkPDFDict* descriptor,
+ const SkAdvancedTypefaceMetrics&,
+ uint16_t emSize,
+ int16_t defaultWidth);
+
+ void emitSubset(SkPDFDocument*) const;
+
+ /**
+ * Return false iff the typeface has its NotEmbeddable flag set.
+ * typeface is not nullptr
+ */
+ static bool CanEmbedTypeface(SkTypeface*, SkPDFDocument*);
+
+ SkGlyphID firstGlyphID() const { return fGlyphUsage.firstNonZero(); }
+ SkGlyphID lastGlyphID() const { return fGlyphUsage.lastGlyph(); }
+ const SkPDFGlyphUse& glyphUsage() const { return fGlyphUsage; }
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+private:
+ sk_sp<SkTypeface> fTypeface;
+ SkPDFGlyphUse fGlyphUsage;
+ SkPDFIndirectReference fIndirectReference;
+ SkAdvancedTypefaceMetrics::FontType fFontType = (SkAdvancedTypefaceMetrics::FontType)(-1);
+
+ SkPDFFont(sk_sp<SkTypeface>,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID,
+ SkAdvancedTypefaceMetrics::FontType fontType,
+ SkPDFIndirectReference indirectReference);
+ // The glyph IDs accessible with this font. For Type1 (non CID) fonts,
+ // this will be a subset if the font has more than 255 glyphs.
+
+ SkPDFFont(const SkPDFFont&) = delete;
+ SkPDFFont& operator=(const SkPDFFont&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
new file mode 100644
index 0000000000..cc07e2a0fd
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFUtils.h"
+
+SkPDFIndirectReference SkPDFMakeFormXObject(SkPDFDocument* doc,
+ std::unique_ptr<SkStreamAsset> content,
+ std::unique_ptr<SkPDFArray> mediaBox,
+ std::unique_ptr<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertName("Type", "XObject");
+ dict->insertName("Subtype", "Form");
+ if (!inverseTransform.isIdentity()) {
+ dict->insertObject("Matrix", SkPDFUtils::MatrixToArray(inverseTransform));
+ }
+ dict->insertObject("Resources", std::move(resourceDict));
+ dict->insertObject("BBox", std::move(mediaBox));
+
+ // Right now FormXObject is only used for saveLayer, which implies
+ // isolated blending. Do this conditionally if that changes.
+ // TODO(halcanary): Is this comment obsolete, since we use it for
+ // alpha masks?
+ auto group = SkPDFMakeDict("Group");
+ group->insertName("S", "Transparency");
+ if (colorSpace != nullptr) {
+ group->insertName("CS", colorSpace);
+ }
+ group->insertBool("I", true); // Isolated.
+ dict->insertObject("Group", std::move(group));
+ return SkPDFStreamOut(std::move(dict), std::move(content), doc);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.h b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
new file mode 100644
index 0000000000..b12c8b2ea7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFFormXObject_DEFINED
+#define SkPDFFormXObject_DEFINED
+
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFTypes.h"
+
+class SkPDFDocument;
+
+/** A form XObject is a self contained description of a graphics
+ object. A form XObject is a page object with slightly different
+ syntax, that can be drawn into a page content stream, just like a
+ bitmap XObject can be drawn into a page content stream.
+*/
+SkPDFIndirectReference SkPDFMakeFormXObject(SkPDFDocument* doc,
+ std::unique_ptr<SkStreamAsset> content,
+ std::unique_ptr<SkPDFArray> mediaBox,
+ std::unique_ptr<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace);
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h b/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h
new file mode 100644
index 0000000000..e4afa72dba
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFGlyphUse_DEFINED
+#define SkPDFGlyphUse_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/utils/SkBitSet.h"
+
+class SkPDFGlyphUse {
+public:
+ SkPDFGlyphUse() : fBitSet(0) {}
+ SkPDFGlyphUse(SkGlyphID firstNonZero, SkGlyphID lastGlyph)
+ : fBitSet((int)lastGlyph - firstNonZero + 2)
+ , fFirstNonZero(firstNonZero)
+ , fLastGlyph(lastGlyph) { SkASSERT(firstNonZero >= 1); }
+ ~SkPDFGlyphUse() = default;
+ SkPDFGlyphUse(SkPDFGlyphUse&&) = default;
+ SkPDFGlyphUse& operator=(SkPDFGlyphUse&&) = default;
+
+ SkGlyphID firstNonZero() const { return fFirstNonZero; }
+ SkGlyphID lastGlyph() const { return fLastGlyph; }
+ void set(SkGlyphID gid) { fBitSet.set(this->toCode(gid)); }
+ bool has(SkGlyphID gid) const { return fBitSet.has(this->toCode(gid)); }
+
+ template<typename FN>
+ void getSetValues(FN f) const {
+ if (fFirstNonZero == 1) {
+ return fBitSet.getSetValues(std::move(f));
+ }
+ uint16_t offset = fFirstNonZero - 1;
+ fBitSet.getSetValues([&f, offset](unsigned v) { f(v == 0 ? v : v + offset); });
+ }
+
+private:
+ SkBitSet fBitSet;
+ SkGlyphID fFirstNonZero = 0;
+ SkGlyphID fLastGlyph = 0;
+
+ uint16_t toCode(SkGlyphID gid) const {
+ if (gid == 0 || fFirstNonZero == 1) {
+ return gid;
+ }
+ SkASSERT(gid >= fFirstNonZero && gid <= fLastGlyph);
+ return gid - fFirstNonZero + 1;
+ }
+ SkPDFGlyphUse(const SkPDFGlyphUse&) = delete;
+ SkPDFGlyphUse& operator=(const SkPDFGlyphUse&) = delete;
+};
+#endif // SkPDFGlyphUse_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp b/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp
new file mode 100644
index 0000000000..919c07a543
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp
@@ -0,0 +1,921 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFGradientShader.h"
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/core/SkOpts.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static uint32_t hash(const SkShader::GradientInfo& v) {
+ uint32_t buffer[] = {
+ (uint32_t)v.fColorCount,
+ SkOpts::hash(v.fColors, v.fColorCount * sizeof(SkColor)),
+ SkOpts::hash(v.fColorOffsets, v.fColorCount * sizeof(SkScalar)),
+ SkOpts::hash(v.fPoint, 2 * sizeof(SkPoint)),
+ SkOpts::hash(v.fRadius, 2 * sizeof(SkScalar)),
+ (uint32_t)v.fTileMode,
+ v.fGradientFlags,
+ };
+ return SkOpts::hash(buffer, sizeof(buffer));
+}
+
+static uint32_t hash(const SkPDFGradientShader::Key& k) {
+ uint32_t buffer[] = {
+ (uint32_t)k.fType,
+ hash(k.fInfo),
+ SkOpts::hash(&k.fCanvasTransform, sizeof(SkMatrix)),
+ SkOpts::hash(&k.fShaderTransform, sizeof(SkMatrix)),
+ SkOpts::hash(&k.fBBox, sizeof(SkIRect))
+ };
+ return SkOpts::hash(buffer, sizeof(buffer));
+}
+
+static void unit_to_points_matrix(const SkPoint pts[2], SkMatrix* matrix) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ matrix->setSinCos(vec.fY, vec.fX);
+ matrix->preScale(mag, mag);
+ matrix->postTranslate(pts[0].fX, pts[0].fY);
+}
+
+static const int kColorComponents = 3;
+typedef uint8_t ColorTuple[kColorComponents];
+
+/* Assumes t + startOffset is on the stack and does a linear interpolation on t
+ between startOffset and endOffset from prevColor to curColor (for each color
+ component), leaving the result in component order on the stack. It assumes
+ there are always 3 components per color.
+ @param range endOffset - startOffset
+ @param curColor[components] The current color components.
+ @param prevColor[components] The previous color components.
+ @param result The result ps function.
+ */
+static void interpolate_color_code(SkScalar range, const ColorTuple& curColor,
+ const ColorTuple& prevColor,
+ SkDynamicMemoryWStream* result) {
+ SkASSERT(range != SkIntToScalar(0));
+
+ // Figure out how to scale each color component.
+ SkScalar multiplier[kColorComponents];
+ for (int i = 0; i < kColorComponents; i++) {
+ static const SkScalar kColorScale = SkScalarInvert(255);
+ multiplier[i] = kColorScale * (curColor[i] - prevColor[i]) / range;
+ }
+
+ // Calculate when we no longer need to keep a copy of the input parameter t.
+ // If the last component to use t is i, then dupInput[0..i - 1] = true
+ // and dupInput[i .. components] = false.
+ bool dupInput[kColorComponents];
+ dupInput[kColorComponents - 1] = false;
+ for (int i = kColorComponents - 2; i >= 0; i--) {
+ dupInput[i] = dupInput[i + 1] || multiplier[i + 1] != 0;
+ }
+
+ if (!dupInput[0] && multiplier[0] == 0) {
+ result->writeText("pop ");
+ }
+
+ for (int i = 0; i < kColorComponents; i++) {
+ // If the next components needs t and this component will consume a
+ // copy, make another copy.
+ if (dupInput[i] && multiplier[i] != 0) {
+ result->writeText("dup ");
+ }
+
+ if (multiplier[i] == 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" ");
+ } else {
+ if (multiplier[i] != 1) {
+ SkPDFUtils::AppendScalar(multiplier[i], result);
+ result->writeText(" mul ");
+ }
+ if (prevColor[i] != 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" add ");
+ }
+ }
+
+ if (dupInput[i]) {
+ result->writeText("exch\n");
+ }
+ }
+}
+
+/* Generate Type 4 function code to map t=[0,1) to the passed gradient,
+ clamping at the edges of the range. The generated code will be of the form:
+ if (t < 0) {
+ return colorData[0][r,g,b];
+ } else {
+ if (t < info.fColorOffsets[1]) {
+ return linearinterpolation(colorData[0][r,g,b],
+ colorData[1][r,g,b]);
+ } else {
+ if (t < info.fColorOffsets[2]) {
+ return linearinterpolation(colorData[1][r,g,b],
+ colorData[2][r,g,b]);
+ } else {
+
+ ... } else {
+ return colorData[info.fColorCount - 1][r,g,b];
+ }
+ ...
+ }
+ }
+ */
+static void gradient_function_code(const SkShader::GradientInfo& info,
+ SkDynamicMemoryWStream* result) {
+ /* We want to linearly interpolate from the previous color to the next.
+ Scale the colors from 0..255 to 0..1 and determine the multipliers
+ for interpolation.
+ C{r,g,b}(t, section) = t - offset_(section-1) + t * Multiplier{r,g,b}.
+ */
+
+ SkAutoSTMalloc<4, ColorTuple> colorDataAlloc(info.fColorCount);
+ ColorTuple *colorData = colorDataAlloc.get();
+ for (int i = 0; i < info.fColorCount; i++) {
+ colorData[i][0] = SkColorGetR(info.fColors[i]);
+ colorData[i][1] = SkColorGetG(info.fColors[i]);
+ colorData[i][2] = SkColorGetB(info.fColors[i]);
+ }
+
+ // Clamp the initial color.
+ result->writeText("dup 0 le {pop ");
+ SkPDFUtils::AppendColorComponent(colorData[0][0], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[0][1], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[0][2], result);
+ result->writeText(" }\n");
+
+ // The gradient colors.
+ int gradients = 0;
+ for (int i = 1 ; i < info.fColorCount; i++) {
+ if (info.fColorOffsets[i] == info.fColorOffsets[i - 1]) {
+ continue;
+ }
+ gradients++;
+
+ result->writeText("{dup ");
+ SkPDFUtils::AppendScalar(info.fColorOffsets[i], result);
+ result->writeText(" le {");
+ if (info.fColorOffsets[i - 1] != 0) {
+ SkPDFUtils::AppendScalar(info.fColorOffsets[i - 1], result);
+ result->writeText(" sub\n");
+ }
+
+ interpolate_color_code(info.fColorOffsets[i] - info.fColorOffsets[i - 1],
+ colorData[i], colorData[i - 1], result);
+ result->writeText("}\n");
+ }
+
+ // Clamp the final color.
+ result->writeText("{pop ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][0], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][1], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][2], result);
+
+ for (int i = 0 ; i < gradients + 1; i++) {
+ result->writeText("} ifelse\n");
+ }
+}
+
+static std::unique_ptr<SkPDFDict> createInterpolationFunction(const ColorTuple& color1,
+ const ColorTuple& color2) {
+ auto retval = SkPDFMakeDict();
+
+ auto c0 = SkPDFMakeArray();
+ c0->appendColorComponent(color1[0]);
+ c0->appendColorComponent(color1[1]);
+ c0->appendColorComponent(color1[2]);
+ retval->insertObject("C0", std::move(c0));
+
+ auto c1 = SkPDFMakeArray();
+ c1->appendColorComponent(color2[0]);
+ c1->appendColorComponent(color2[1]);
+ c1->appendColorComponent(color2[2]);
+ retval->insertObject("C1", std::move(c1));
+
+ retval->insertObject("Domain", SkPDFMakeArray(0, 1));
+
+ retval->insertInt("FunctionType", 2);
+ retval->insertScalar("N", 1.0f);
+
+ return retval;
+}
+
+static std::unique_ptr<SkPDFDict> gradientStitchCode(const SkShader::GradientInfo& info) {
+ auto retval = SkPDFMakeDict();
+
+ // normalize color stops
+ int colorCount = info.fColorCount;
+ std::vector<SkColor> colors(info.fColors, info.fColors + colorCount);
+ std::vector<SkScalar> colorOffsets(info.fColorOffsets, info.fColorOffsets + colorCount);
+
+ int i = 1;
+ while (i < colorCount - 1) {
+ // ensure stops are in order
+ if (colorOffsets[i - 1] > colorOffsets[i]) {
+ colorOffsets[i] = colorOffsets[i - 1];
+ }
+
+ // remove points that are between 2 coincident points
+ if ((colorOffsets[i - 1] == colorOffsets[i]) && (colorOffsets[i] == colorOffsets[i + 1])) {
+ colorCount -= 1;
+ colors.erase(colors.begin() + i);
+ colorOffsets.erase(colorOffsets.begin() + i);
+ } else {
+ i++;
+ }
+ }
+ // find coincident points and slightly move them over
+ for (i = 1; i < colorCount - 1; i++) {
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i] += 0.00001f;
+ }
+ }
+ // check if last 2 stops coincide
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i - 1] -= 0.00001f;
+ }
+
+ SkAutoSTMalloc<4, ColorTuple> colorDataAlloc(colorCount);
+ ColorTuple *colorData = colorDataAlloc.get();
+ for (int i = 0; i < colorCount; i++) {
+ colorData[i][0] = SkColorGetR(colors[i]);
+ colorData[i][1] = SkColorGetG(colors[i]);
+ colorData[i][2] = SkColorGetB(colors[i]);
+ }
+
+ // no need for a stitch function if there are only 2 stops.
+ if (colorCount == 2)
+ return createInterpolationFunction(colorData[0], colorData[1]);
+
+ auto encode = SkPDFMakeArray();
+ auto bounds = SkPDFMakeArray();
+ auto functions = SkPDFMakeArray();
+
+ retval->insertObject("Domain", SkPDFMakeArray(0, 1));
+ retval->insertInt("FunctionType", 3);
+
+ for (int i = 1; i < colorCount; i++) {
+ if (i > 1) {
+ bounds->appendScalar(colorOffsets[i-1]);
+ }
+
+ encode->appendScalar(0);
+ encode->appendScalar(1.0f);
+
+ functions->appendObject(createInterpolationFunction(colorData[i-1], colorData[i]));
+ }
+
+ retval->insertObject("Encode", std::move(encode));
+ retval->insertObject("Bounds", std::move(bounds));
+ retval->insertObject("Functions", std::move(functions));
+
+ return retval;
+}
+
+/* Map a value of t on the stack into [0, 1) for Repeat or Mirror tile mode. */
+static void tileModeCode(SkTileMode mode, SkDynamicMemoryWStream* result) {
+ if (mode == SkTileMode::kRepeat) {
+ result->writeText("dup truncate sub\n"); // Get the fractional part.
+ result->writeText("dup 0 le {1 add} if\n"); // Map (-1,0) => (0,1)
+ return;
+ }
+
+ if (mode == SkTileMode::kMirror) {
+ // Map t mod 2 into [0, 1, 1, 0].
+ // Code Stack
+ result->writeText("abs " // Map negative to positive.
+ "dup " // t.s t.s
+ "truncate " // t.s t
+ "dup " // t.s t t
+ "cvi " // t.s t T
+ "2 mod " // t.s t (i mod 2)
+ "1 eq " // t.s t true|false
+ "3 1 roll " // true|false t.s t
+ "sub " // true|false 0.s
+ "exch " // 0.s true|false
+ "{1 exch sub} if\n"); // 1 - 0.s|0.s
+ }
+}
+
+/**
+ * Returns PS function code that applies inverse perspective
+ * to a x, y point.
+ * The function assumes that the stack has at least two elements,
+ * and that the top 2 elements are numeric values.
+ * After executing this code on a PS stack, the last 2 elements are updated
+ * while the rest of the stack is preserved intact.
+ * inversePerspectiveMatrix is the inverse perspective matrix.
+ */
+static void apply_perspective_to_coordinates(const SkMatrix& inversePerspectiveMatrix,
+ SkDynamicMemoryWStream* code) {
+ if (!inversePerspectiveMatrix.hasPerspective()) {
+ return;
+ }
+
+ // Perspective matrix should be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+
+ const SkScalar p0 = inversePerspectiveMatrix[SkMatrix::kMPersp0];
+ const SkScalar p1 = inversePerspectiveMatrix[SkMatrix::kMPersp1];
+ const SkScalar p2 = inversePerspectiveMatrix[SkMatrix::kMPersp2];
+
+ // y = y / (p2 + p0 x + p1 y)
+ // x = x / (p2 + p0 x + p1 y)
+
+ // Input on stack: x y
+ code->writeText(" dup "); // x y y
+ SkPDFUtils::AppendScalar(p1, code); // x y y p1
+ code->writeText(" mul " // x y y*p1
+ " 2 index "); // x y y*p1 x
+ SkPDFUtils::AppendScalar(p0, code); // x y y p1 x p0
+ code->writeText(" mul "); // x y y*p1 x*p0
+ SkPDFUtils::AppendScalar(p2, code); // x y y p1 x*p0 p2
+ code->writeText(" add " // x y y*p1 x*p0+p2
+ "add " // x y y*p1+x*p0+p2
+ "3 1 roll " // y*p1+x*p0+p2 x y
+ "2 index " // z x y y*p1+x*p0+p2
+ "div " // y*p1+x*p0+p2 x y/(y*p1+x*p0+p2)
+ "3 1 roll " // y/(y*p1+x*p0+p2) y*p1+x*p0+p2 x
+ "exch " // y/(y*p1+x*p0+p2) x y*p1+x*p0+p2
+ "div " // y/(y*p1+x*p0+p2) x/(y*p1+x*p0+p2)
+ "exch\n"); // x/(y*p1+x*p0+p2) y/(y*p1+x*p0+p2)
+}
+
+static void linearCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("pop\n"); // Just ditch the y value.
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+static void radialCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ // Find the distance from the origin.
+ function->writeText("dup " // x y y
+ "mul " // x y^2
+ "exch " // y^2 x
+ "dup " // y^2 x x
+ "mul " // y^2 x^2
+ "add " // y^2+x^2
+ "sqrt\n"); // sqrt(y^2+x^2)
+
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+/* Conical gradient shader, based on the Canvas spec for radial gradients
+ See: http://www.w3.org/TR/2dcontext/#dom-context-2d-createradialgradient
+ */
+static void twoPointConicalCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ SkScalar dx = info.fPoint[1].fX - info.fPoint[0].fX;
+ SkScalar dy = info.fPoint[1].fY - info.fPoint[0].fY;
+ SkScalar r0 = info.fRadius[0];
+ SkScalar dr = info.fRadius[1] - info.fRadius[0];
+ SkScalar a = dx * dx + dy * dy - dr * dr;
+
+ // First compute t, if the pixel falls outside the cone, then we'll end
+ // with 'false' on the stack, otherwise we'll push 'true' with t below it
+
+ // We start with a stack of (x y), copy it and then consume one copy in
+ // order to calculate b and the other to calculate c.
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("2 copy ");
+
+ // Calculate b and b^2; b = -2 * (y * dy + x * dx + r0 * dr).
+ SkPDFUtils::AppendScalar(dy, function);
+ function->writeText(" mul exch ");
+ SkPDFUtils::AppendScalar(dx, function);
+ function->writeText(" mul add ");
+ SkPDFUtils::AppendScalar(r0 * dr, function);
+ function->writeText(" add -2 mul dup dup mul\n");
+
+ // c = x^2 + y^2 + radius0^2
+ function->writeText("4 2 roll dup mul exch dup mul add ");
+ SkPDFUtils::AppendScalar(r0 * r0, function);
+ function->writeText(" sub dup 4 1 roll\n");
+
+ // Contents of the stack at this point: c, b, b^2, c
+
+ // if a = 0, then we collapse to a simpler linear case
+ if (a == 0) {
+
+ // t = -c/b
+ function->writeText("pop pop div neg dup ");
+
+ // compute radius(t)
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, then it's outside the cone
+ function->writeText("0 lt {pop false} {true} ifelse\n");
+
+ } else {
+
+ // quadratic case: the Canvas spec wants the largest
+ // root t for which radius(t) > 0
+
+ // compute the discriminant (b^2 - 4ac)
+ SkPDFUtils::AppendScalar(a * 4, function);
+ function->writeText(" mul sub dup\n");
+
+ // if d >= 0, proceed
+ function->writeText("0 ge {\n");
+
+ // an intermediate value we'll use to compute the roots:
+ // q = -0.5 * (b +/- sqrt(d))
+ function->writeText("sqrt exch dup 0 lt {exch -1 mul} if");
+ function->writeText(" add -0.5 mul dup\n");
+
+ // first root = q / a
+ SkPDFUtils::AppendScalar(a, function);
+ function->writeText(" div\n");
+
+ // second root = c / q
+ function->writeText("3 1 roll div\n");
+
+ // put the larger root on top of the stack
+ function->writeText("2 copy gt {exch} if\n");
+
+ // compute radius(t) for larger root
+ function->writeText("dup ");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) > 0, we have our t, pop off the smaller root and we're done
+ function->writeText(" 0 gt {exch pop true}\n");
+
+ // otherwise, throw out the larger one and try the smaller root
+ function->writeText("{pop dup\n");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, push false, otherwise the smaller root is our t
+ function->writeText("0 le {pop false} {true} ifelse\n");
+ function->writeText("} ifelse\n");
+
+ // d < 0, clear the stack and push false
+ function->writeText("} {pop pop pop false} ifelse\n");
+ }
+
+ // if the pixel is in the cone, proceed to compute a color
+ function->writeText("{");
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+
+ // otherwise, just write black
+ function->writeText("} {0 0 0} ifelse }");
+}
+
+static void sweepCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{exch atan 360 div\n");
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+
+// catch cases where the inner just touches the outer circle
+// and make the inner circle just inside the outer one to match raster
+static void FixUpRadius(const SkPoint& p1, SkScalar& r1, const SkPoint& p2, SkScalar& r2) {
+ // detect touching circles
+ SkScalar distance = SkPoint::Distance(p1, p2);
+ SkScalar subtractRadii = fabs(r1 - r2);
+ if (fabs(distance - subtractRadii) < 0.002f) {
+ if (r1 > r2) {
+ r1 += 0.002f;
+ } else {
+ r2 += 0.002f;
+ }
+ }
+}
+
+// Finds affine and persp such that in = affine * persp.
+// but it returns the inverse of perspective matrix.
+static bool split_perspective(const SkMatrix in, SkMatrix* affine,
+ SkMatrix* perspectiveInverse) {
+ const SkScalar p2 = in[SkMatrix::kMPersp2];
+
+ if (SkScalarNearlyZero(p2)) {
+ return false;
+ }
+
+ const SkScalar zero = SkIntToScalar(0);
+ const SkScalar one = SkIntToScalar(1);
+
+ const SkScalar sx = in[SkMatrix::kMScaleX];
+ const SkScalar kx = in[SkMatrix::kMSkewX];
+ const SkScalar tx = in[SkMatrix::kMTransX];
+ const SkScalar ky = in[SkMatrix::kMSkewY];
+ const SkScalar sy = in[SkMatrix::kMScaleY];
+ const SkScalar ty = in[SkMatrix::kMTransY];
+ const SkScalar p0 = in[SkMatrix::kMPersp0];
+ const SkScalar p1 = in[SkMatrix::kMPersp1];
+
+ // Perspective matrix would be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+ // But we need the inverse of persp.
+ perspectiveInverse->setAll(one, zero, zero,
+ zero, one, zero,
+ -p0/p2, -p1/p2, 1/p2);
+
+ affine->setAll(sx - p0 * tx / p2, kx - p1 * tx / p2, tx / p2,
+ ky - p0 * ty / p2, sy - p1 * ty / p2, ty / p2,
+ zero, zero, one);
+
+ return true;
+}
+
+static SkPDFIndirectReference make_ps_function(std::unique_ptr<SkStreamAsset> psCode,
+ std::unique_ptr<SkPDFArray> domain,
+ std::unique_ptr<SkPDFObject> range,
+ SkPDFDocument* doc) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("FunctionType", 4);
+ dict->insertObject("Domain", std::move(domain));
+ dict->insertObject("Range", std::move(range));
+ return SkPDFStreamOut(std::move(dict), std::move(psCode), doc);
+}
+
+static SkPDFIndirectReference make_function_shader(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkPoint transformPoints[2];
+ const SkShader::GradientInfo& info = state.fInfo;
+ SkMatrix finalMatrix = state.fCanvasTransform;
+ finalMatrix.preConcat(state.fShaderTransform);
+
+ bool doStitchFunctions = (state.fType == SkShader::kLinear_GradientType ||
+ state.fType == SkShader::kRadial_GradientType ||
+ state.fType == SkShader::kConical_GradientType) &&
+ (SkTileMode)info.fTileMode == SkTileMode::kClamp &&
+ !finalMatrix.hasPerspective();
+
+ int32_t shadingType = 1;
+ auto pdfShader = SkPDFMakeDict();
+ // The two point radial gradient further references
+ // state.fInfo
+ // in translating from x, y coordinates to the t parameter. So, we have
+ // to transform the points and radii according to the calculated matrix.
+ if (doStitchFunctions) {
+ pdfShader->insertObject("Function", gradientStitchCode(info));
+ shadingType = (state.fType == SkShader::kLinear_GradientType) ? 2 : 3;
+
+ auto extend = SkPDFMakeArray();
+ extend->reserve(2);
+ extend->appendBool(true);
+ extend->appendBool(true);
+ pdfShader->insertObject("Extend", std::move(extend));
+
+ std::unique_ptr<SkPDFArray> coords;
+ if (state.fType == SkShader::kConical_GradientType) {
+ SkScalar r1 = info.fRadius[0];
+ SkScalar r2 = info.fRadius[1];
+ SkPoint pt1 = info.fPoint[0];
+ SkPoint pt2 = info.fPoint[1];
+ FixUpRadius(pt1, r1, pt2, r2);
+
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ r1,
+ pt2.x(),
+ pt2.y(),
+ r2);
+ } else if (state.fType == SkShader::kRadial_GradientType) {
+ const SkPoint& pt1 = info.fPoint[0];
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ 0,
+ pt1.x(),
+ pt1.y(),
+ info.fRadius[0]);
+ } else {
+ const SkPoint& pt1 = info.fPoint[0];
+ const SkPoint& pt2 = info.fPoint[1];
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ pt2.x(),
+ pt2.y());
+ }
+
+ pdfShader->insertObject("Coords", std::move(coords));
+ } else {
+ // Depending on the type of the gradient, we want to transform the
+ // coordinate space in different ways.
+ transformPoints[0] = info.fPoint[0];
+ transformPoints[1] = info.fPoint[1];
+ switch (state.fType) {
+ case SkShader::kLinear_GradientType:
+ break;
+ case SkShader::kRadial_GradientType:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += info.fRadius[0];
+ break;
+ case SkShader::kConical_GradientType: {
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ break;
+ }
+ case SkShader::kSweep_GradientType:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ break;
+ case SkShader::kColor_GradientType:
+ case SkShader::kNone_GradientType:
+ default:
+ return SkPDFIndirectReference();
+ }
+
+ // Move any scaling (assuming a unit gradient) or translation
+ // (and rotation for linear gradient), of the final gradient from
+ // info.fPoints to the matrix (updating bbox appropriately). Now
+ // the gradient can be drawn on on the unit segment.
+ SkMatrix mapperMatrix;
+ unit_to_points_matrix(transformPoints, &mapperMatrix);
+
+ finalMatrix.preConcat(mapperMatrix);
+
+ // Preserves as much as possible in the final matrix, and only removes
+ // the perspective. The inverse of the perspective is stored in
+ // perspectiveInverseOnly matrix and has 3 useful numbers
+ // (p0, p1, p2), while everything else is either 0 or 1.
+ // In this way the shader will handle it eficiently, with minimal code.
+ SkMatrix perspectiveInverseOnly = SkMatrix::I();
+ if (finalMatrix.hasPerspective()) {
+ if (!split_perspective(finalMatrix,
+ &finalMatrix, &perspectiveInverseOnly)) {
+ return SkPDFIndirectReference();
+ }
+ }
+
+ SkRect bbox;
+ bbox.set(state.fBBox);
+ if (!SkPDFUtils::InverseTransformBBox(finalMatrix, &bbox)) {
+ return SkPDFIndirectReference();
+ }
+ SkDynamicMemoryWStream functionCode;
+
+ SkShader::GradientInfo infoCopy = info;
+
+ if (state.fType == SkShader::kConical_GradientType) {
+ SkMatrix inverseMapperMatrix;
+ if (!mapperMatrix.invert(&inverseMapperMatrix)) {
+ return SkPDFIndirectReference();
+ }
+ inverseMapperMatrix.mapPoints(infoCopy.fPoint, 2);
+ infoCopy.fRadius[0] = inverseMapperMatrix.mapRadius(info.fRadius[0]);
+ infoCopy.fRadius[1] = inverseMapperMatrix.mapRadius(info.fRadius[1]);
+ }
+ switch (state.fType) {
+ case SkShader::kLinear_GradientType:
+ linearCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShader::kRadial_GradientType:
+ radialCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShader::kConical_GradientType:
+ twoPointConicalCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShader::kSweep_GradientType:
+ sweepCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ default:
+ SkASSERT(false);
+ }
+ pdfShader->insertObject(
+ "Domain", SkPDFMakeArray(bbox.left(), bbox.right(), bbox.top(), bbox.bottom()));
+
+ auto domain = SkPDFMakeArray(bbox.left(), bbox.right(), bbox.top(), bbox.bottom());
+ std::unique_ptr<SkPDFArray> rangeObject = SkPDFMakeArray(0, 1, 0, 1, 0, 1);
+ pdfShader->insertRef("Function",
+ make_ps_function(functionCode.detachAsStream(), std::move(domain),
+ std::move(rangeObject), doc));
+ }
+
+ pdfShader->insertInt("ShadingType", shadingType);
+ pdfShader->insertName("ColorSpace", "DeviceRGB");
+
+ SkPDFDict pdfFunctionShader("Pattern");
+ pdfFunctionShader.insertInt("PatternType", 2);
+ pdfFunctionShader.insertObject("Matrix", SkPDFUtils::MatrixToArray(finalMatrix));
+ pdfFunctionShader.insertObject("Shading", std::move(pdfShader));
+ return doc->emit(pdfFunctionShader);
+}
+
+static SkPDFIndirectReference find_pdf_shader(SkPDFDocument* doc,
+ SkPDFGradientShader::Key key,
+ bool keyHasAlpha);
+
+static std::unique_ptr<SkPDFDict> get_gradient_resource_dict(SkPDFIndirectReference functionShader,
+ SkPDFIndirectReference gState) {
+ std::vector<SkPDFIndirectReference> patternShaders;
+ if (functionShader != SkPDFIndirectReference()) {
+ patternShaders.push_back(functionShader);
+ }
+ std::vector<SkPDFIndirectReference> graphicStates;
+ if (gState != SkPDFIndirectReference()) {
+ graphicStates.push_back(gState);
+ }
+ return SkPDFMakeResourceDict(std::move(graphicStates),
+ std::move(patternShaders),
+ std::vector<SkPDFIndirectReference>(),
+ std::vector<SkPDFIndirectReference>());
+}
+
+// Creates a content stream which fills the pattern P0 across bounds.
+// @param gsIndex A graphics state resource index to apply, or <0 if no
+// graphics state to apply.
+static std::unique_ptr<SkStreamAsset> create_pattern_fill_content(int gsIndex,
+ int patternIndex,
+ SkRect& bounds) {
+ SkDynamicMemoryWStream content;
+ if (gsIndex >= 0) {
+ SkPDFUtils::ApplyGraphicState(gsIndex, &content);
+ }
+ SkPDFUtils::ApplyPattern(patternIndex, &content);
+ SkPDFUtils::AppendRectangle(bounds, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, SkPath::kEvenOdd_FillType, &content);
+ return content.detachAsStream();
+}
+
+static bool gradient_has_alpha(const SkPDFGradientShader::Key& key) {
+ SkASSERT(key.fType != SkShader::kNone_GradientType);
+ for (int i = 0; i < key.fInfo.fColorCount; i++) {
+ if ((SkAlpha)SkColorGetA(key.fInfo.fColors[i]) != SK_AlphaOPAQUE) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// warning: does not set fHash on new key. (Both callers need to change fields.)
+static SkPDFGradientShader::Key clone_key(const SkPDFGradientShader::Key& k) {
+ SkPDFGradientShader::Key clone = {
+ k.fType,
+ k.fInfo, // change pointers later.
+ std::unique_ptr<SkColor[]>(new SkColor[k.fInfo.fColorCount]),
+ std::unique_ptr<SkScalar[]>(new SkScalar[k.fInfo.fColorCount]),
+ k.fCanvasTransform,
+ k.fShaderTransform,
+ k.fBBox, 0};
+ clone.fInfo.fColors = clone.fColors.get();
+ clone.fInfo.fColorOffsets = clone.fStops.get();
+ for (int i = 0; i < clone.fInfo.fColorCount; i++) {
+ clone.fInfo.fColorOffsets[i] = k.fInfo.fColorOffsets[i];
+ clone.fInfo.fColors[i] = k.fInfo.fColors[i];
+ }
+ return clone;
+}
+
+static SkPDFIndirectReference create_smask_graphic_state(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkASSERT(state.fType != SkShader::kNone_GradientType);
+ SkPDFGradientShader::Key luminosityState = clone_key(state);
+ for (int i = 0; i < luminosityState.fInfo.fColorCount; i++) {
+ SkAlpha alpha = SkColorGetA(luminosityState.fInfo.fColors[i]);
+ luminosityState.fInfo.fColors[i] = SkColorSetARGB(255, alpha, alpha, alpha);
+ }
+ luminosityState.fHash = hash(luminosityState);
+
+ SkASSERT(!gradient_has_alpha(luminosityState));
+ SkPDFIndirectReference luminosityShader = find_pdf_shader(doc, std::move(luminosityState), false);
+ std::unique_ptr<SkPDFDict> resources = get_gradient_resource_dict(luminosityShader,
+ SkPDFIndirectReference());
+ SkRect bbox = SkRect::Make(state.fBBox);
+ SkPDFIndirectReference alphaMask =
+ SkPDFMakeFormXObject(doc,
+ create_pattern_fill_content(-1, luminosityShader.fValue, bbox),
+ SkPDFUtils::RectToArray(bbox),
+ std::move(resources),
+ SkMatrix::I(),
+ "DeviceRGB");
+ return SkPDFGraphicState::GetSMaskGraphicState(
+ alphaMask, false, SkPDFGraphicState::kLuminosity_SMaskMode, doc);
+}
+
+static SkPDFIndirectReference make_alpha_function_shader(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkASSERT(state.fType != SkShader::kNone_GradientType);
+ SkPDFGradientShader::Key opaqueState = clone_key(state);
+ for (int i = 0; i < opaqueState.fInfo.fColorCount; i++) {
+ opaqueState.fInfo.fColors[i] = SkColorSetA(opaqueState.fInfo.fColors[i], SK_AlphaOPAQUE);
+ }
+ opaqueState.fHash = hash(opaqueState);
+
+ SkASSERT(!gradient_has_alpha(opaqueState));
+ SkRect bbox = SkRect::Make(state.fBBox);
+ SkPDFIndirectReference colorShader = find_pdf_shader(doc, std::move(opaqueState), false);
+ if (!colorShader) {
+ return SkPDFIndirectReference();
+ }
+ // Create resource dict with alpha graphics state as G0 and
+ // pattern shader as P0, then write content stream.
+ SkPDFIndirectReference alphaGsRef = create_smask_graphic_state(doc, state);
+
+ std::unique_ptr<SkPDFDict> resourceDict = get_gradient_resource_dict(colorShader, alphaGsRef);
+
+ std::unique_ptr<SkStreamAsset> colorStream =
+ create_pattern_fill_content(alphaGsRef.fValue, colorShader.fValue, bbox);
+ std::unique_ptr<SkPDFDict> alphaFunctionShader = SkPDFMakeDict();
+ SkPDFUtils::PopulateTilingPatternDict(alphaFunctionShader.get(), bbox,
+ std::move(resourceDict), SkMatrix::I());
+ return SkPDFStreamOut(std::move(alphaFunctionShader), std::move(colorStream), doc);
+}
+
+static SkPDFGradientShader::Key make_key(const SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& bbox) {
+ SkPDFGradientShader::Key key = {
+ SkShader::kNone_GradientType,
+ {0, nullptr, nullptr, {{0, 0}, {0, 0}}, {0, 0}, SkTileMode::kClamp, 0},
+ nullptr,
+ nullptr,
+ canvasTransform,
+ SkPDFUtils::GetShaderLocalMatrix(shader),
+ bbox, 0};
+ key.fType = shader->asAGradient(&key.fInfo);
+ SkASSERT(SkShader::kNone_GradientType != key.fType);
+ SkASSERT(key.fInfo.fColorCount > 0);
+ key.fColors.reset(new SkColor[key.fInfo.fColorCount]);
+ key.fStops.reset(new SkScalar[key.fInfo.fColorCount]);
+ key.fInfo.fColors = key.fColors.get();
+ key.fInfo.fColorOffsets = key.fStops.get();
+ (void)shader->asAGradient(&key.fInfo);
+ key.fHash = hash(key);
+ return key;
+}
+
+static SkPDFIndirectReference find_pdf_shader(SkPDFDocument* doc,
+ SkPDFGradientShader::Key key,
+ bool keyHasAlpha) {
+ SkASSERT(gradient_has_alpha(key) == keyHasAlpha);
+ auto& gradientPatternMap = doc->fGradientPatternMap;
+ if (SkPDFIndirectReference* ptr = gradientPatternMap.find(key)) {
+ return *ptr;
+ }
+ SkPDFIndirectReference pdfShader;
+ if (keyHasAlpha) {
+ pdfShader = make_alpha_function_shader(doc, key);
+ } else {
+ pdfShader = make_function_shader(doc, key);
+ }
+ gradientPatternMap.set(std::move(key), pdfShader);
+ return pdfShader;
+}
+
+SkPDFIndirectReference SkPDFGradientShader::Make(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& bbox) {
+ SkASSERT(shader);
+ SkASSERT(SkShader::kNone_GradientType != shader->asAGradient(nullptr));
+ SkPDFGradientShader::Key key = make_key(shader, canvasTransform, bbox);
+ bool alpha = gradient_has_alpha(key);
+ return find_pdf_shader(doc, std::move(key), alpha);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGradientShader.h b/gfx/skia/skia/src/pdf/SkPDFGradientShader.h
new file mode 100644
index 0000000000..63876a7584
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGradientShader.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFGradientShader_DEFINED
+#define SkPDFGradientShader_DEFINED
+
+#include "include/core/SkShader.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+
+class SkMatrix;
+class SkPDFDocument;
+struct SkIRect;
+
+namespace SkPDFGradientShader {
+
+SkPDFIndirectReference Make(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& matrix,
+ const SkIRect& surfaceBBox);
+
+struct Key {
+ SkShader::GradientType fType;
+ SkShader::GradientInfo fInfo;
+ std::unique_ptr<SkColor[]> fColors;
+ std::unique_ptr<SkScalar[]> fStops;
+ SkMatrix fCanvasTransform;
+ SkMatrix fShaderTransform;
+ SkIRect fBBox;
+ uint32_t fHash;
+};
+
+struct KeyHash {
+ uint32_t operator()(const Key& k) const { return k.fHash; }
+};
+
+inline bool operator==(const SkShader::GradientInfo& u, const SkShader::GradientInfo& v) {
+ return u.fColorCount == v.fColorCount
+ && u.fPoint[0] == v.fPoint[0]
+ && u.fPoint[1] == v.fPoint[1]
+ && u.fRadius[0] == v.fRadius[0]
+ && u.fRadius[1] == v.fRadius[1]
+ && u.fTileMode == v.fTileMode
+ && u.fGradientFlags == v.fGradientFlags
+ && SkPackedArrayEqual(u.fColors, v.fColors, u.fColorCount)
+ && SkPackedArrayEqual(u.fColorOffsets, v.fColorOffsets, u.fColorCount);
+}
+
+inline bool operator==(const Key& u, const Key& v) {
+ SkASSERT(u.fInfo.fColors == u.fColors.get());
+ SkASSERT(u.fInfo.fColorOffsets == u.fStops.get());
+ SkASSERT(v.fInfo.fColors == v.fColors.get());
+ SkASSERT(v.fInfo.fColorOffsets == v.fStops.get());
+ return u.fType == v.fType
+ && u.fInfo == v.fInfo
+ && u.fCanvasTransform == v.fCanvasTransform
+ && u.fShaderTransform == v.fShaderTransform
+ && u.fBBox == v.fBBox;
+}
+inline bool operator!=(const Key& u, const Key& v) { return !(u == v); }
+
+} // namespace SkPDFGradientShader
+#endif // SkPDFGradientShader_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp
new file mode 100644
index 0000000000..a27170b707
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp
@@ -0,0 +1,250 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFGraphicStackState.h"
+
+#include "include/core/SkStream.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static SkPath to_path(const SkRect& r) {
+ SkPath p;
+ p.addRect(r);
+ return p;
+}
+
+static void emit_pdf_color(SkColor4f color, SkWStream* result) {
+ SkASSERT(color.fA == 1); // We handle alpha elsewhere.
+ SkPDFUtils::AppendColorComponentF(color.fR, result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponentF(color.fG, result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponentF(color.fB, result);
+ result->writeText(" ");
+}
+
+static SkRect rect_intersect(SkRect u, SkRect v) {
+ if (u.isEmpty() || v.isEmpty()) { return {0, 0, 0, 0}; }
+ return u.intersect(v) ? u : SkRect{0, 0, 0, 0};
+}
+
+// Test to see if the clipstack is a simple rect, If so, we can avoid all PathOps code
+// and speed thing up.
+static bool is_rect(const SkClipStack& clipStack, const SkRect& bounds, SkRect* dst) {
+ SkRect currentClip = bounds;
+ SkClipStack::Iter iter(clipStack, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkRect elementRect{0, 0, 0, 0};
+ switch (element->getDeviceSpaceType()) {
+ case SkClipStack::Element::DeviceSpaceType::kEmpty:
+ break;
+ case SkClipStack::Element::DeviceSpaceType::kRect:
+ elementRect = element->getDeviceSpaceRect();
+ break;
+ default:
+ return false;
+ }
+ switch (element->getOp()) {
+ case kReplace_SkClipOp:
+ currentClip = rect_intersect(bounds, elementRect);
+ break;
+ case SkClipOp::kIntersect:
+ currentClip = rect_intersect(currentClip, elementRect);
+ break;
+ default:
+ return false;
+ }
+ }
+ *dst = currentClip;
+ return true;
+}
+
+static bool is_complex_clip(const SkClipStack& stack) {
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ switch (element->getOp()) {
+ case SkClipOp::kDifference:
+ case SkClipOp::kIntersect:
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename F>
+static void apply_clip(const SkClipStack& stack, const SkRect& outerBounds, F fn) {
+ // assumes clipstack is not complex.
+ constexpr SkRect kHuge{-30000, -30000, 30000, 30000};
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kBottom_IterStart);
+ SkRect bounds = outerBounds;
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkPath operand;
+ element->asDeviceSpacePath(&operand);
+ SkPathOp op;
+ switch (element->getOp()) {
+ case SkClipOp::kDifference: op = kDifference_SkPathOp; break;
+ case SkClipOp::kIntersect: op = kIntersect_SkPathOp; break;
+ default: SkASSERT(false); return;
+ }
+ if (op == kDifference_SkPathOp ||
+ operand.isInverseFillType() ||
+ !kHuge.contains(operand.getBounds()))
+ {
+ Op(to_path(bounds), operand, op, &operand);
+ }
+ SkASSERT(!operand.isInverseFillType());
+ fn(operand);
+ if (!bounds.intersect(operand.getBounds())) {
+ return; // return early;
+ }
+ }
+}
+
+static void append_clip_path(const SkPath& clipPath, SkWStream* wStream) {
+ SkPDFUtils::EmitPath(clipPath, SkPaint::kFill_Style, wStream);
+ SkPath::FillType clipFill = clipPath.getFillType();
+ NOT_IMPLEMENTED(clipFill == SkPath::kInverseEvenOdd_FillType, false);
+ NOT_IMPLEMENTED(clipFill == SkPath::kInverseWinding_FillType, false);
+ if (clipFill == SkPath::kEvenOdd_FillType) {
+ wStream->writeText("W* n\n");
+ } else {
+ wStream->writeText("W n\n");
+ }
+}
+
+static void append_clip(const SkClipStack& clipStack,
+ const SkIRect& bounds,
+ SkWStream* wStream) {
+ // The bounds are slightly outset to ensure this is correct in the
+ // face of floating-point accuracy and possible SkRegion bitmap
+ // approximations.
+ SkRect outsetBounds = SkRect::Make(bounds.makeOutset(1, 1));
+
+ SkRect clipStackRect;
+ if (is_rect(clipStack, outsetBounds, &clipStackRect)) {
+ SkPDFUtils::AppendRectangle(clipStackRect, wStream);
+ wStream->writeText("W* n\n");
+ return;
+ }
+
+ if (is_complex_clip(clipStack)) {
+ SkPath clipPath;
+ (void)clipStack.asPath(&clipPath);
+ if (Op(clipPath, to_path(outsetBounds), kIntersect_SkPathOp, &clipPath)) {
+ append_clip_path(clipPath, wStream);
+ }
+ // If Op() fails (pathological case; e.g. input values are
+ // extremely large or NaN), emit no clip at all.
+ } else {
+ apply_clip(clipStack, outsetBounds, [wStream](const SkPath& path) {
+ append_clip_path(path, wStream);
+ });
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFGraphicStackState::updateClip(const SkClipStack* clipStack, const SkIRect& bounds) {
+ uint32_t clipStackGenID = clipStack ? clipStack->getTopmostGenID()
+ : SkClipStack::kWideOpenGenID;
+ if (clipStackGenID == currentEntry()->fClipStackGenID) {
+ return;
+ }
+ while (fStackDepth > 0) {
+ this->pop();
+ if (clipStackGenID == currentEntry()->fClipStackGenID) {
+ return;
+ }
+ }
+ SkASSERT(currentEntry()->fClipStackGenID == SkClipStack::kWideOpenGenID);
+ if (clipStackGenID != SkClipStack::kWideOpenGenID) {
+ SkASSERT(clipStack);
+ this->push();
+
+ currentEntry()->fClipStackGenID = clipStackGenID;
+ append_clip(*clipStack, bounds, fContentStream);
+ }
+}
+
+
+void SkPDFGraphicStackState::updateMatrix(const SkMatrix& matrix) {
+ if (matrix == currentEntry()->fMatrix) {
+ return;
+ }
+
+ if (currentEntry()->fMatrix.getType() != SkMatrix::kIdentity_Mask) {
+ SkASSERT(fStackDepth > 0);
+ SkASSERT(fEntries[fStackDepth].fClipStackGenID ==
+ fEntries[fStackDepth -1].fClipStackGenID);
+ this->pop();
+
+ SkASSERT(currentEntry()->fMatrix.getType() == SkMatrix::kIdentity_Mask);
+ }
+ if (matrix.getType() == SkMatrix::kIdentity_Mask) {
+ return;
+ }
+
+ this->push();
+ SkPDFUtils::AppendTransform(matrix, fContentStream);
+ currentEntry()->fMatrix = matrix;
+}
+
+void SkPDFGraphicStackState::updateDrawingState(const SkPDFGraphicStackState::Entry& state) {
+ // PDF treats a shader as a color, so we only set one or the other.
+ if (state.fShaderIndex >= 0) {
+ if (state.fShaderIndex != currentEntry()->fShaderIndex) {
+ SkPDFUtils::ApplyPattern(state.fShaderIndex, fContentStream);
+ currentEntry()->fShaderIndex = state.fShaderIndex;
+ }
+ } else {
+ if (state.fColor != currentEntry()->fColor ||
+ currentEntry()->fShaderIndex >= 0) {
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("RG ");
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("rg\n");
+ currentEntry()->fColor = state.fColor;
+ currentEntry()->fShaderIndex = -1;
+ }
+ }
+
+ if (state.fGraphicStateIndex != currentEntry()->fGraphicStateIndex) {
+ SkPDFUtils::ApplyGraphicState(state.fGraphicStateIndex, fContentStream);
+ currentEntry()->fGraphicStateIndex = state.fGraphicStateIndex;
+ }
+
+ if (state.fTextScaleX) {
+ if (state.fTextScaleX != currentEntry()->fTextScaleX) {
+ SkScalar pdfScale = state.fTextScaleX * 100;
+ SkPDFUtils::AppendScalar(pdfScale, fContentStream);
+ fContentStream->writeText(" Tz\n");
+ currentEntry()->fTextScaleX = state.fTextScaleX;
+ }
+ }
+}
+
+void SkPDFGraphicStackState::push() {
+ SkASSERT(fStackDepth < kMaxStackDepth);
+ fContentStream->writeText("q\n");
+ ++fStackDepth;
+ fEntries[fStackDepth] = fEntries[fStackDepth - 1];
+}
+
+void SkPDFGraphicStackState::pop() {
+ SkASSERT(fStackDepth > 0);
+ fContentStream->writeText("Q\n");
+ fEntries[fStackDepth] = SkPDFGraphicStackState::Entry();
+ --fStackDepth;
+}
+
+void SkPDFGraphicStackState::drainStack() {
+ if (fContentStream) {
+ while (fStackDepth) {
+ this->pop();
+ }
+ }
+ SkASSERT(fStackDepth == 0);
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h
new file mode 100644
index 0000000000..593e9b197b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h
@@ -0,0 +1,40 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFGraphicStackState_DEFINED
+#define SkPDFGraphicStackState_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkClipStack.h"
+
+class SkDynamicMemoryWStream;
+
+// It is important to not confuse SkPDFGraphicStackState with SkPDFGraphicState, the
+// later being our representation of an object in the PDF file.
+struct SkPDFGraphicStackState {
+ struct Entry {
+ SkMatrix fMatrix = SkMatrix::I();
+ uint32_t fClipStackGenID = SkClipStack::kWideOpenGenID;
+ SkColor4f fColor = {0, 0, 0, 1};
+ SkScalar fTextScaleX = 1; // Zero means we don't care what the value is.
+ int fShaderIndex = -1;
+ int fGraphicStateIndex = -1;
+ };
+ // Must use stack for matrix, and for clip, plus one for no matrix or clip.
+ static constexpr int kMaxStackDepth = 2;
+ Entry fEntries[kMaxStackDepth + 1];
+ int fStackDepth = 0;
+ SkDynamicMemoryWStream* fContentStream;
+
+ SkPDFGraphicStackState(SkDynamicMemoryWStream* s = nullptr) : fContentStream(s) {}
+ void updateClip(const SkClipStack* clipStack, const SkIRect& bounds);
+ void updateMatrix(const SkMatrix& matrix);
+ void updateDrawingState(const Entry& state);
+ void push();
+ void pop();
+ void drainStack();
+ Entry* currentEntry() { return &fEntries[fStackDepth]; }
+};
+
+#endif // SkPDFGraphicStackState_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
new file mode 100644
index 0000000000..5817c11f12
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFGraphicState.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkPaint.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkTo.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static const char* as_pdf_blend_mode_name(SkBlendMode mode) {
+ const char* name = SkPDFUtils::BlendModeName(mode);
+ SkASSERT(name);
+ return name;
+}
+
+static int to_stroke_cap(uint8_t cap) {
+ // PDF32000.book section 8.4.3.3 "Line Cap Style"
+ switch ((SkPaint::Cap)cap) {
+ case SkPaint::kButt_Cap: return 0;
+ case SkPaint::kRound_Cap: return 1;
+ case SkPaint::kSquare_Cap: return 2;
+ default: SkASSERT(false); return 0;
+ }
+}
+
+static int to_stroke_join(uint8_t join) {
+ // PDF32000.book section 8.4.3.4 "Line Join Style"
+ switch ((SkPaint::Join)join) {
+ case SkPaint::kMiter_Join: return 0;
+ case SkPaint::kRound_Join: return 1;
+ case SkPaint::kBevel_Join: return 2;
+ default: SkASSERT(false); return 0;
+ }
+}
+
+// If a SkXfermode is unsupported in PDF, this function returns
+// SrcOver, otherwise, it returns that Xfermode as a Mode.
+static uint8_t pdf_blend_mode(SkBlendMode mode) {
+ if (!SkPDFUtils::BlendModeName(mode)
+ || SkBlendMode::kXor == mode
+ || SkBlendMode::kPlus == mode)
+ {
+ mode = SkBlendMode::kSrcOver;
+ }
+ return SkToU8((unsigned)mode);
+}
+
+SkPDFIndirectReference SkPDFGraphicState::GetGraphicStateForPaint(SkPDFDocument* doc,
+ const SkPaint& p) {
+ SkASSERT(doc);
+ if (SkPaint::kFill_Style == p.getStyle()) {
+ SkPDFFillGraphicState fillKey = {p.getColor4f().fA, pdf_blend_mode(p.getBlendMode())};
+ auto& fillMap = doc->fFillGSMap;
+ if (SkPDFIndirectReference* statePtr = fillMap.find(fillKey)) {
+ return *statePtr;
+ }
+ SkPDFDict state;
+ state.reserve(2);
+ state.insertColorComponentF("ca", fillKey.fAlpha);
+ state.insertName("BM", as_pdf_blend_mode_name((SkBlendMode)fillKey.fBlendMode));
+ SkPDFIndirectReference ref = doc->emit(state);
+ fillMap.set(fillKey, ref);
+ return ref;
+ } else {
+ SkPDFStrokeGraphicState strokeKey = {
+ p.getStrokeWidth(),
+ p.getStrokeMiter(),
+ p.getColor4f().fA,
+ SkToU8(p.getStrokeCap()),
+ SkToU8(p.getStrokeJoin()),
+ pdf_blend_mode(p.getBlendMode())
+ };
+ auto& sMap = doc->fStrokeGSMap;
+ if (SkPDFIndirectReference* statePtr = sMap.find(strokeKey)) {
+ return *statePtr;
+ }
+ SkPDFDict state;
+ state.reserve(8);
+ state.insertColorComponentF("CA", strokeKey.fAlpha);
+ state.insertColorComponentF("ca", strokeKey.fAlpha);
+ state.insertInt("LC", to_stroke_cap(strokeKey.fStrokeCap));
+ state.insertInt("LJ", to_stroke_join(strokeKey.fStrokeJoin));
+ state.insertScalar("LW", strokeKey.fStrokeWidth);
+ state.insertScalar("ML", strokeKey.fStrokeMiter);
+ state.insertBool("SA", true); // SA = Auto stroke adjustment.
+ state.insertName("BM", as_pdf_blend_mode_name((SkBlendMode)strokeKey.fBlendMode));
+ SkPDFIndirectReference ref = doc->emit(state);
+ sMap.set(strokeKey, ref);
+ return ref;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static SkPDFIndirectReference make_invert_function(SkPDFDocument* doc) {
+ // Acrobat crashes if we use a type 0 function, kpdf crashes if we use
+ // a type 2 function, so we use a type 4 function.
+ static const char psInvert[] = "{1 exch sub}";
+ // Do not copy the trailing '\0' into the SkData.
+ auto invertFunction = SkData::MakeWithoutCopy(psInvert, strlen(psInvert));
+
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("FunctionType", 4);
+ dict->insertObject("Domain", SkPDFMakeArray(0, 1));
+ dict->insertObject("Range", SkPDFMakeArray(0, 1));
+ return SkPDFStreamOut(std::move(dict), SkMemoryStream::Make(std::move(invertFunction)), doc);
+}
+
+SkPDFIndirectReference SkPDFGraphicState::GetSMaskGraphicState(SkPDFIndirectReference sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFDocument* doc) {
+ // The practical chances of using the same mask more than once are unlikely
+ // enough that it's not worth canonicalizing.
+ auto sMaskDict = SkPDFMakeDict("Mask");
+ if (sMaskMode == kAlpha_SMaskMode) {
+ sMaskDict->insertName("S", "Alpha");
+ } else if (sMaskMode == kLuminosity_SMaskMode) {
+ sMaskDict->insertName("S", "Luminosity");
+ }
+ sMaskDict->insertRef("G", sMask);
+ if (invert) {
+ // let the doc deduplicate this object.
+ if (doc->fInvertFunction == SkPDFIndirectReference()) {
+ doc->fInvertFunction = make_invert_function(doc);
+ }
+ sMaskDict->insertRef("TR", doc->fInvertFunction);
+ }
+ SkPDFDict result("ExtGState");
+ result.insertObject("SMask", std::move(sMaskDict));
+ return doc->emit(result);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.h b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
new file mode 100644
index 0000000000..e5dd192990
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFGraphicState_DEFINED
+#define SkPDFGraphicState_DEFINED
+
+#include "include/private/SkMacros.h"
+#include "src/core/SkOpts.h"
+#include "src/pdf/SkPDFTypes.h"
+
+class SkPaint;
+
+
+/** \class SkPDFGraphicState
+ SkPaint objects roughly correspond to graphic state dictionaries that can
+ be installed. So that a given dictionary is only output to the pdf file
+ once, we want to canonicalize them.
+*/
+namespace SkPDFGraphicState {
+ enum SkPDFSMaskMode {
+ kAlpha_SMaskMode,
+ kLuminosity_SMaskMode
+ };
+
+ /** Get the graphic state for the passed SkPaint.
+ */
+ SkPDFIndirectReference GetGraphicStateForPaint(SkPDFDocument*, const SkPaint&);
+
+ /** Make a graphic state that only sets the passed soft mask.
+ * @param sMask The form xobject to use as a soft mask.
+ * @param invert Indicates if the alpha of the sMask should be inverted.
+ * @param sMaskMode Whether to use alpha or luminosity for the sMask.
+ *
+ * These are not de-duped.
+ */
+ SkPDFIndirectReference GetSMaskGraphicState(SkPDFIndirectReference sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFDocument* doc);
+}
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFStrokeGraphicState {
+ SkScalar fStrokeWidth;
+ SkScalar fStrokeMiter;
+ SkScalar fAlpha;
+ uint8_t fStrokeCap; // SkPaint::Cap
+ uint8_t fStrokeJoin; // SkPaint::Join
+ uint8_t fBlendMode; // SkBlendMode
+ uint8_t fPADDING = 0;
+ bool operator==(const SkPDFStrokeGraphicState& o) const { return !memcmp(this, &o, sizeof(o)); }
+ bool operator!=(const SkPDFStrokeGraphicState& o) const { return !(*this == o); }
+};
+SK_END_REQUIRE_DENSE
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFFillGraphicState {
+ SkScalar fAlpha;
+ uint8_t fBlendMode;
+ uint8_t fPADDING[3] = {0, 0, 0};
+ bool operator==(const SkPDFFillGraphicState& o) const { return !memcmp(this, &o, sizeof(o)); }
+ bool operator!=(const SkPDFFillGraphicState& o) const { return !(*this == o); }
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
new file mode 100644
index 0000000000..210202db4d
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMakeCIDGlyphWidthsArray.h"
+
+#include "include/core/SkPaint.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+
+#include <algorithm>
+#include <vector>
+
+// TODO(halcanary): Write unit tests for SkPDFMakeCIDGlyphWidthsArray().
+
+// TODO(halcanary): The logic in this file originated in several
+// disparate places. I feel sure that someone could simplify this
+// down to a single easy-to-read function.
+
+namespace {
+
+// scale from em-units to base-1000, returning as a SkScalar
+SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ if (emSize == 1000) {
+ return scaled;
+ } else {
+ return scaled * 1000 / emSize;
+ }
+}
+
+SkScalar scale_from_font_units(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+// Unfortunately poppler does not appear to respect the default width setting.
+#if defined(SK_PDF_CAN_USE_DW)
+int16_t findMode(SkSpan<const int16_t> advances) {
+ if (advances.empty()) {
+ return 0;
+ }
+
+ int16_t previousAdvance = advances[0];
+ int16_t currentModeAdvance = advances[0];
+ size_t currentCount = 1;
+ size_t currentModeCount = 1;
+
+ for (size_t i = 1; i < advances.size(); ++i) {
+ if (advances[i] == previousAdvance) {
+ ++currentCount;
+ } else {
+ if (currentCount > currentModeCount) {
+ currentModeAdvance = previousAdvance;
+ currentModeCount = currentCount;
+ }
+ previousAdvance = advances[i];
+ currentCount = 1;
+ }
+ }
+
+ return currentCount > currentModeCount ? previousAdvance : currentModeAdvance;
+}
+#endif
+} // namespace
+
+/** Retrieve advance data for glyphs. Used by the PDF backend. */
+// TODO(halcanary): this function is complex enough to need its logic
+// tested with unit tests.
+std::unique_ptr<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(const SkTypeface& typeface,
+ const SkPDFGlyphUse& subset,
+ SkScalar* defaultAdvance) {
+ // There are two ways of expressing advances
+ //
+ // range: " gfid [adv.ances adv.ances ... adv.ances]"
+ // run: " gfid gfid adv.ances"
+ //
+ // Assuming that on average
+ // the ASCII representation of an advance plus a space is 10 characters
+ // the ASCII representation of a glyph id plus a space is 4 characters
+ // the ASCII representation of unused gid plus a space in a range is 2 characters
+ //
+ // When not in a range or run
+ // a. Skipping don't cares or defaults is a win (trivial)
+ // b. Run wins for 2+ repeats " gid gid adv.ances"
+ // " gid [adv.ances adv.ances]"
+ // rule: 2+ repeats create run as long as possible, else start range
+ //
+ // When in a range
+ // Cost of stopping and starting a range is 8 characters "] gid ["
+ // c. Skipping defaults is always a win " adv.ances"
+ // rule: end range if default seen
+ // d. Skipping 4+ don't cares is a win " 0 0 0 0"
+ // rule: end range if 4+ don't cares
+ // Cost of stop and start range plus run is 28 characters "] gid gid adv.ances gid ["
+ // e. Switching for 2+ repeats and 4+ don't cares wins " 0 0 adv.ances 0 0 adv.ances"
+ // rule: end range for 2+ repeats with 4+ don't cares
+ // f. Switching for 3+ repeats wins " adv.ances adv.ances adv.ances"
+ // rule: end range for 3+ repeats
+
+ int emSize;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(typeface, &emSize);
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+
+ auto result = SkPDFMakeArray();
+
+ std::vector<SkGlyphID> glyphIDs;
+ subset.getSetValues([&](unsigned index) {
+ glyphIDs.push_back(SkToU16(index));
+ });
+ auto glyphs = paths.glyphs(SkMakeSpan(glyphIDs));
+
+#if defined(SK_PDF_CAN_USE_DW)
+ std::vector<int16_t> advances;
+ advances.reserve(glyphs.size());
+ for (const SkGlyph* glyph : glyphs) {
+ advances.push_back((int16_t)glyph->advanceX());
+ }
+ std::sort(advances.begin(), advances.end());
+ int16_t modeAdvance = findMode(SkMakeSpan(advances));
+ *defaultAdvance = scale_from_font_units(modeAdvance, emSize);
+#else
+ *defaultAdvance = 0;
+#endif
+
+ for (size_t i = 0; i < glyphs.size(); ++i) {
+ int16_t advance = (int16_t)glyphs[i]->advanceX();
+
+#if defined(SK_PDF_CAN_USE_DW)
+ // a. Skipping don't cares or defaults is a win (trivial)
+ if (advance == modeAdvance) {
+ continue;
+ }
+#endif
+
+ // b. 2+ repeats create run as long as possible, else start range
+ {
+ size_t j = i + 1; // j is always one past the last known repeat
+ for (; j < glyphs.size(); ++j) {
+ int16_t next_advance = (int16_t)glyphs[j]->advanceX();
+ if (advance != next_advance) {
+ break;
+ }
+ }
+ if (j - i >= 2) {
+ result->appendInt(glyphs[i]->getGlyphID());
+ result->appendInt(glyphs[j - 1]->getGlyphID());
+ result->appendScalar(scale_from_font_units(advance, emSize));
+ i = j - 1;
+ continue;
+ }
+ }
+
+ {
+ result->appendInt(glyphs[i]->getGlyphID());
+ auto advanceArray = SkPDFMakeArray();
+ advanceArray->appendScalar(scale_from_font_units(advance, emSize));
+ size_t j = i + 1; // j is always one past the last output
+ for (; j < glyphs.size(); ++j) {
+ advance = (int16_t)glyphs[j]->advanceX();
+#if defined(SK_PDF_CAN_USE_DW)
+ // c. end range if default seen
+ if (advance == modeAdvance) {
+ break;
+ }
+#endif
+
+ int dontCares = glyphs[j]->getGlyphID() - glyphs[j - 1]->getGlyphID() - 1;
+ // d. end range if 4+ don't cares
+ if (dontCares >= 4) {
+ break;
+ }
+
+ int16_t next_advance = 0;
+ // e. end range for 2+ repeats with 4+ don't cares
+ if (j + 1 < glyphs.size()) {
+ next_advance = (int16_t)glyphs[j+1]->advanceX();
+ int next_dontCares = glyphs[j+1]->getGlyphID() - glyphs[j]->getGlyphID() - 1;
+ if (advance == next_advance && dontCares + next_dontCares >= 4) {
+ break;
+ }
+ }
+
+ // f. end range for 3+ repeats
+ if (j + 2 < glyphs.size() && advance == next_advance) {
+ next_advance = (int16_t)glyphs[j+2]->advanceX();
+ if (advance == next_advance) {
+ break;
+ }
+ }
+
+ while (dontCares --> 0) {
+ advanceArray->appendScalar(0);
+ }
+ advanceArray->appendScalar(scale_from_font_units(advance, emSize));
+ }
+ result->appendObject(std::move(advanceArray));
+ i = j - 1;
+ }
+ }
+
+ return result;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
new file mode 100644
index 0000000000..041fe28d8c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeCIDGlyphWidthsArray_DEFINED
+#define SkPDFMakeCIDGlyphWidthsArray_DEFINED
+
+#include "src/pdf/SkPDFTypes.h"
+
+class SkStrike;
+class SkPDFGlyphUse;
+class SkTypeface;
+
+/* PDF 32000-1:2008, page 270: "The array's elements have a variable
+ format that can specify individual widths for consecutive CIDs or
+ one width for a range of CIDs". */
+std::unique_ptr<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(const SkTypeface& typeface,
+ const SkPDFGlyphUse& subset,
+ SkScalar* defaultAdvance);
+
+#endif // SkPDFMakeCIDGlyphWidthsArray_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
new file mode 100644
index 0000000000..edcaa65eb3
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMakeToUnicodeCmap.h"
+
+#include "include/private/SkTo.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/utils/SkUTF.h"
+
+static void append_tounicode_header(SkDynamicMemoryWStream* cmap,
+ bool multibyte) {
+ // 12 dict begin: 12 is an Adobe-suggested value. Shall not change.
+ // It's there to prevent old version Adobe Readers from malfunctioning.
+ const char* kHeader =
+ "/CIDInit /ProcSet findresource begin\n"
+ "12 dict begin\n"
+ "begincmap\n";
+ cmap->writeText(kHeader);
+
+ // The /CIDSystemInfo must be consistent to the one in
+ // SkPDFFont::populateCIDFont().
+ // We can not pass over the system info object here because the format is
+ // different. This is not a reference object.
+ const char* kSysInfo =
+ "/CIDSystemInfo\n"
+ "<< /Registry (Adobe)\n"
+ "/Ordering (UCS)\n"
+ "/Supplement 0\n"
+ ">> def\n";
+ cmap->writeText(kSysInfo);
+
+ // The CMapName must be consistent to /CIDSystemInfo above.
+ // /CMapType 2 means ToUnicode.
+ // Codespace range just tells the PDF processor the valid range.
+ const char* kTypeInfoHeader =
+ "/CMapName /Adobe-Identity-UCS def\n"
+ "/CMapType 2 def\n"
+ "1 begincodespacerange\n";
+ cmap->writeText(kTypeInfoHeader);
+ if (multibyte) {
+ cmap->writeText("<0000> <FFFF>\n");
+ } else {
+ cmap->writeText("<00> <FF>\n");
+ }
+ cmap->writeText("endcodespacerange\n");
+}
+
+static void append_cmap_footer(SkDynamicMemoryWStream* cmap) {
+ const char kFooter[] =
+ "endcmap\n"
+ "CMapName currentdict /CMap defineresource pop\n"
+ "end\n"
+ "end";
+ cmap->writeText(kFooter);
+}
+
+namespace {
+struct BFChar {
+ SkGlyphID fGlyphId;
+ SkUnichar fUnicode;
+};
+
+struct BFRange {
+ SkGlyphID fStart;
+ SkGlyphID fEnd;
+ SkUnichar fUnicode;
+};
+} // namespace
+
+static void write_glyph(SkDynamicMemoryWStream* cmap,
+ bool multiByte,
+ SkGlyphID gid) {
+ if (multiByte) {
+ SkPDFUtils::WriteUInt16BE(cmap, gid);
+ } else {
+ SkPDFUtils::WriteUInt8(cmap, SkToU8(gid));
+ }
+}
+
+static void append_bfchar_section(const std::vector<BFChar>& bfchar,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (size_t i = 0; i < bfchar.size(); i += 100) {
+ int count = SkToInt(bfchar.size() - i);
+ count = SkMin32(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfchar\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfchar[i + j].fGlyphId);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfchar[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfchar\n");
+ }
+}
+
+static void append_bfrange_section(const std::vector<BFRange>& bfrange,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (size_t i = 0; i < bfrange.size(); i += 100) {
+ int count = SkToInt(bfrange.size() - i);
+ count = SkMin32(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfrange\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfrange[i + j].fStart);
+ cmap->writeText("> <");
+ write_glyph(cmap, multiByte, bfrange[i + j].fEnd);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfrange[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfrange\n");
+ }
+}
+
+// Generate <bfchar> and <bfrange> table according to PDF spec 1.4 and Adobe
+// Technote 5014.
+// The function is not static so we can test it in unit tests.
+//
+// Current implementation guarantees bfchar and bfrange entries do not overlap.
+//
+// Current implementation does not attempt aggressive optimizations against
+// following case because the specification is not clear.
+//
+// 4 beginbfchar 1 beginbfchar
+// <0003> <0013> <0020> <0014>
+// <0005> <0015> to endbfchar
+// <0007> <0017> 1 beginbfrange
+// <0020> <0014> <0003> <0007> <0013>
+// endbfchar endbfrange
+//
+// Adobe Technote 5014 said: "Code mappings (unlike codespace ranges) may
+// overlap, but succeeding maps supersede preceding maps."
+//
+// In case of searching text in PDF, bfrange will have higher precedence so
+// typing char id 0x0014 in search box will get glyph id 0x0004 first. However,
+// the spec does not mention how will this kind of conflict being resolved.
+//
+// For the worst case (having 65536 continuous unicode and we use every other
+// one of them), the possible savings by aggressive optimization is 416KB
+// pre-compressed and does not provide enough motivation for implementation.
+void SkPDFAppendCmapSections(const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ int glyphOffset = 0;
+ if (!multiByteGlyphs) {
+ glyphOffset = firstGlyphID - 1;
+ }
+
+ std::vector<BFChar> bfcharEntries;
+ std::vector<BFRange> bfrangeEntries;
+
+ BFRange currentRangeEntry = {0, 0, 0};
+ bool rangeEmpty = true;
+ const int limit = (int)lastGlyphID + 1 - glyphOffset;
+
+ for (int i = firstGlyphID - glyphOffset; i < limit + 1; ++i) {
+ SkGlyphID gid = i + glyphOffset;
+ bool inSubset = i < limit && (subset == nullptr || subset->has(gid));
+ if (!rangeEmpty) {
+ // PDF spec requires bfrange not changing the higher byte,
+ // e.g. <1035> <10FF> <2222> is ok, but
+ // <1035> <1100> <2222> is no good
+ bool inRange =
+ i == currentRangeEntry.fEnd + 1 &&
+ i >> 8 == currentRangeEntry.fStart >> 8 &&
+ i < limit &&
+ glyphToUnicode[gid] ==
+ currentRangeEntry.fUnicode + i - currentRangeEntry.fStart;
+ if (!inSubset || !inRange) {
+ if (currentRangeEntry.fEnd > currentRangeEntry.fStart) {
+ bfrangeEntries.push_back(currentRangeEntry);
+ } else {
+ bfcharEntries.push_back({currentRangeEntry.fStart, currentRangeEntry.fUnicode});
+ }
+ rangeEmpty = true;
+ }
+ }
+ if (inSubset) {
+ currentRangeEntry.fEnd = i;
+ if (rangeEmpty) {
+ currentRangeEntry.fStart = i;
+ currentRangeEntry.fUnicode = glyphToUnicode[gid];
+ rangeEmpty = false;
+ }
+ }
+ }
+
+ // The spec requires all bfchar entries for a font must come before bfrange
+ // entries.
+ append_bfchar_section(bfcharEntries, multiByteGlyphs, cmap);
+ append_bfrange_section(bfrangeEntries, multiByteGlyphs, cmap);
+}
+
+std::unique_ptr<SkStreamAsset> SkPDFMakeToUnicodeCmap(
+ const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ SkDynamicMemoryWStream cmap;
+ append_tounicode_header(&cmap, multiByteGlyphs);
+ SkPDFAppendCmapSections(glyphToUnicode, subset, &cmap, multiByteGlyphs,
+ firstGlyphID, lastGlyphID);
+ append_cmap_footer(&cmap);
+ return cmap.detachAsStream();
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
new file mode 100644
index 0000000000..b77f23de16
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeToUnicodeCmap_DEFINED
+#define SkPDFMakeToUnicodeCmap_DEFINED
+
+#include "include/core/SkStream.h"
+#include "src/pdf/SkPDFFont.h"
+
+std::unique_ptr<SkStreamAsset> SkPDFMakeToUnicodeCmap(
+ const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+// Exposed for unit testing.
+void SkPDFAppendCmapSections(const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+#endif // SkPDFMakeToUnicodeCmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
new file mode 100644
index 0000000000..582fc1e5b7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMetadata.h"
+
+#include "include/core/SkMilestone.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMD5.h"
+#include "src/core/SkUtils.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <utility>
+
+#define SKPDF_STRING(X) SKPDF_STRING_IMPL(X)
+#define SKPDF_STRING_IMPL(X) #X
+#define SKPDF_PRODUCER "Skia/PDF m" SKPDF_STRING(SK_MILESTONE)
+#define SKPDF_CUSTOM_PRODUCER_KEY "ProductionLibrary"
+
+static constexpr SkTime::DateTime kZeroTime = {0, 0, 0, 0, 0, 0, 0, 0};
+
+static bool operator!=(const SkTime::DateTime& u, const SkTime::DateTime& v) {
+ return u.fTimeZoneMinutes != v.fTimeZoneMinutes ||
+ u.fYear != v.fYear ||
+ u.fMonth != v.fMonth ||
+ u.fDayOfWeek != v.fDayOfWeek ||
+ u.fDay != v.fDay ||
+ u.fHour != v.fHour ||
+ u.fMinute != v.fMinute ||
+ u.fSecond != v.fSecond;
+}
+
+static SkString pdf_date(const SkTime::DateTime& dt) {
+ int timeZoneMinutes = SkToInt(dt.fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ return SkStringPrintf(
+ "D:%04u%02u%02u%02u%02u%02u%c%02d'%02d'",
+ static_cast<unsigned>(dt.fYear), static_cast<unsigned>(dt.fMonth),
+ static_cast<unsigned>(dt.fDay), static_cast<unsigned>(dt.fHour),
+ static_cast<unsigned>(dt.fMinute),
+ static_cast<unsigned>(dt.fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+}
+
+static bool utf8_is_pdfdocencoding(const char* src, size_t len) {
+ const uint8_t* end = (const uint8_t*)src + len;
+ for (const uint8_t* ptr = (const uint8_t*)src; ptr < end; ++ptr) {
+ uint8_t v = *ptr;
+ // See Table D.2 (PDFDocEncoding Character Set) in the PDF3200_2008 spec.
+ if ((v > 23 && v < 32) || v > 126) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void write_utf16be(char** ptr, uint16_t value) {
+ *(*ptr)++ = (value >> 8);
+ *(*ptr)++ = (value & 0xFF);
+}
+
+// Please Note: This "abuses" the SkString, which "should" only hold UTF8.
+// But the SkString is written as if it is really just a ref-counted array of
+// chars, so this works, as long as we handle endiness and conversions ourselves.
+//
+// Input: UTF-8
+// Output UTF-16-BE
+static SkString to_utf16be(const char* src, size_t len) {
+ SkString ret;
+ const char* const end = src + len;
+ size_t n = 1; // BOM
+ for (const char* ptr = src; ptr < end;) {
+ SkUnichar u = SkUTF::NextUTF8(&ptr, end);
+ if (u < 0) {
+ break;
+ }
+ n += SkUTF::ToUTF16(u);
+ }
+ ret.resize(2 * n);
+ char* out = ret.writable_str();
+ write_utf16be(&out, 0xFEFF); // BOM
+ for (const char* ptr = src; ptr < end;) {
+ SkUnichar u = SkUTF::NextUTF8(&ptr, end);
+ if (u < 0) {
+ break;
+ }
+ uint16_t utf16[2];
+ size_t l = SkUTF::ToUTF16(u, utf16);
+ write_utf16be(&out, utf16[0]);
+ if (l == 2) {
+ write_utf16be(&out, utf16[1]);
+ }
+ }
+ SkASSERT(out == ret.writable_str() + 2 * n);
+ return ret;
+}
+
+// Input: UTF-8
+// Output UTF-16-BE OR PDFDocEncoding (if that encoding is identical to ASCII encoding).
+//
+// See sections 14.3.3 (Document Information Dictionary) and 7.9.2.2 (Text String Type)
+// of the PDF32000_2008 spec.
+static SkString convert(const SkString& s) {
+ return utf8_is_pdfdocencoding(s.c_str(), s.size()) ? s : to_utf16be(s.c_str(), s.size());
+}
+static SkString convert(const char* src) {
+ size_t len = strlen(src);
+ return utf8_is_pdfdocencoding(src, len) ? SkString(src, len) : to_utf16be(src, len);
+}
+
+namespace {
+static const struct {
+ const char* const key;
+ SkString SkPDF::Metadata::*const valuePtr;
+} gMetadataKeys[] = {
+ {"Title", &SkPDF::Metadata::fTitle},
+ {"Author", &SkPDF::Metadata::fAuthor},
+ {"Subject", &SkPDF::Metadata::fSubject},
+ {"Keywords", &SkPDF::Metadata::fKeywords},
+ {"Creator", &SkPDF::Metadata::fCreator},
+};
+} // namespace
+
+std::unique_ptr<SkPDFObject> SkPDFMetadata::MakeDocumentInformationDict(
+ const SkPDF::Metadata& metadata) {
+ auto dict = SkPDFMakeDict();
+ for (const auto keyValuePtr : gMetadataKeys) {
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ if (value.size() > 0) {
+ dict->insertString(keyValuePtr.key, convert(value));
+ }
+ }
+ if (metadata.fProducer.isEmpty()) {
+ dict->insertString("Producer", convert(SKPDF_PRODUCER));
+ } else {
+ dict->insertString("Producer", convert(metadata.fProducer));
+ dict->insertString(SKPDF_CUSTOM_PRODUCER_KEY, convert(SKPDF_PRODUCER));
+ }
+ if (metadata.fCreation != kZeroTime) {
+ dict->insertString("CreationDate", pdf_date(metadata.fCreation));
+ }
+ if (metadata.fModified != kZeroTime) {
+ dict->insertString("ModDate", pdf_date(metadata.fModified));
+ }
+ return dict;
+}
+
+SkUUID SkPDFMetadata::CreateUUID(const SkPDF::Metadata& metadata) {
+ // The main requirement is for the UUID to be unique; the exact
+ // format of the data that will be hashed is not important.
+ SkMD5 md5;
+ const char uuidNamespace[] = "org.skia.pdf\n";
+ md5.writeText(uuidNamespace);
+ double msec = SkTime::GetMSecs();
+ md5.write(&msec, sizeof(msec));
+ SkTime::DateTime dateTime;
+ SkTime::GetDateTime(&dateTime);
+ md5.write(&dateTime, sizeof(dateTime));
+ md5.write(&metadata.fCreation, sizeof(metadata.fCreation));
+ md5.write(&metadata.fModified, sizeof(metadata.fModified));
+
+ for (const auto keyValuePtr : gMetadataKeys) {
+ md5.writeText(keyValuePtr.key);
+ md5.write("\037", 1);
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ md5.write(value.c_str(), value.size());
+ md5.write("\036", 1);
+ }
+ SkMD5::Digest digest = md5.finish();
+ // See RFC 4122, page 6-7.
+ digest.data[6] = (digest.data[6] & 0x0F) | 0x30;
+ digest.data[8] = (digest.data[6] & 0x3F) | 0x80;
+ static_assert(sizeof(digest) == sizeof(SkUUID), "uuid_size");
+ SkUUID uuid;
+ memcpy(&uuid, &digest, sizeof(digest));
+ return uuid;
+}
+
+std::unique_ptr<SkPDFObject> SkPDFMetadata::MakePdfId(const SkUUID& doc,
+ const SkUUID& instance) {
+ // /ID [ <81b14aafa313db63dbd6f981e49f94f4>
+ // <81b14aafa313db63dbd6f981e49f94f4> ]
+ auto array = SkPDFMakeArray();
+ static_assert(sizeof(SkUUID) == 16, "uuid_size");
+ array->appendString(
+ SkString(reinterpret_cast<const char*>(&doc), sizeof(SkUUID)));
+ array->appendString(
+ SkString(reinterpret_cast<const char*>(&instance), sizeof(SkUUID)));
+ return array;
+}
+
+// Convert a block of memory to hexadecimal. Input and output pointers will be
+// moved to end of the range.
+static void hexify(const uint8_t** inputPtr, char** outputPtr, int count) {
+ SkASSERT(inputPtr && *inputPtr);
+ SkASSERT(outputPtr && *outputPtr);
+ while (count-- > 0) {
+ uint8_t value = *(*inputPtr)++;
+ *(*outputPtr)++ = SkHexadecimalDigits::gLower[value >> 4];
+ *(*outputPtr)++ = SkHexadecimalDigits::gLower[value & 0xF];
+ }
+}
+
+static SkString uuid_to_string(const SkUUID& uuid) {
+ // 8-4-4-4-12
+ char buffer[36]; // [32 + 4]
+ char* ptr = buffer;
+ const uint8_t* data = uuid.fData;
+ hexify(&data, &ptr, 4);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 6);
+ SkASSERT(ptr == buffer + 36);
+ SkASSERT(data == uuid.fData + 16);
+ return SkString(buffer, 36);
+}
+
+namespace {
+class PDFXMLObject final : public SkPDFObject {
+public:
+ PDFXMLObject(SkString xml) : fXML(std::move(xml)) {}
+ void emitObject(SkWStream* stream) const override {
+ SkPDFDict dict("Metadata");
+ dict.insertName("Subtype", "XML");
+ dict.insertInt("Length", fXML.size());
+ dict.emitObject(stream);
+ static const char streamBegin[] = " stream\n";
+ stream->writeText(streamBegin);
+ // Do not compress this. The standard requires that a
+ // program that does not understand PDF can grep for
+ // "<?xpacket" and extract the entire XML.
+ stream->write(fXML.c_str(), fXML.size());
+ static const char streamEnd[] = "\nendstream";
+ stream->writeText(streamEnd);
+ }
+
+private:
+ const SkString fXML;
+};
+} // namespace
+
+static int count_xml_escape_size(const SkString& input) {
+ int extra = 0;
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ extra += 4; // strlen("&amp;") - strlen("&")
+ } else if (input[i] == '<') {
+ extra += 3; // strlen("&lt;") - strlen("<")
+ }
+ }
+ return extra;
+}
+
+const SkString escape_xml(const SkString& input,
+ const char* before = nullptr,
+ const char* after = nullptr) {
+ if (input.size() == 0) {
+ return input;
+ }
+ // "&" --> "&amp;" and "<" --> "&lt;"
+ // text is assumed to be in UTF-8
+ // all strings are xml content, not attribute values.
+ size_t beforeLen = before ? strlen(before) : 0;
+ size_t afterLen = after ? strlen(after) : 0;
+ int extra = count_xml_escape_size(input);
+ SkString output(input.size() + extra + beforeLen + afterLen);
+ char* out = output.writable_str();
+ if (before) {
+ strncpy(out, before, beforeLen);
+ out += beforeLen;
+ }
+ static const char kAmp[] = "&amp;";
+ static const char kLt[] = "&lt;";
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ strncpy(out, kAmp, strlen(kAmp));
+ out += strlen(kAmp);
+ } else if (input[i] == '<') {
+ strncpy(out, kLt, strlen(kLt));
+ out += strlen(kLt);
+ } else {
+ *out++ = input[i];
+ }
+ }
+ if (after) {
+ strncpy(out, after, afterLen);
+ out += afterLen;
+ }
+ // Validate that we haven't written outside of our string.
+ SkASSERT(out == &output.writable_str()[output.size()]);
+ *out = '\0';
+ return output;
+}
+
+SkPDFIndirectReference SkPDFMetadata::MakeXMPObject(
+ const SkPDF::Metadata& metadata,
+ const SkUUID& doc,
+ const SkUUID& instance,
+ SkPDFDocument* docPtr) {
+ static const char templateString[] =
+ "<?xpacket begin=\"\" id=\"W5M0MpCehiHzreSzNTczkc9d\"?>\n"
+ "<x:xmpmeta xmlns:x=\"adobe:ns:meta/\"\n"
+ " x:xmptk=\"Adobe XMP Core 5.4-c005 78.147326, "
+ "2012/08/23-13:03:03\">\n"
+ "<rdf:RDF "
+ "xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
+ "<rdf:Description rdf:about=\"\"\n"
+ " xmlns:xmp=\"http://ns.adobe.com/xap/1.0/\"\n"
+ " xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n"
+ " xmlns:xmpMM=\"http://ns.adobe.com/xap/1.0/mm/\"\n"
+ " xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\"\n"
+ " xmlns:pdfaid=\"http://www.aiim.org/pdfa/ns/id/\">\n"
+ "<pdfaid:part>2</pdfaid:part>\n"
+ "<pdfaid:conformance>B</pdfaid:conformance>\n"
+ "%s" // ModifyDate
+ "%s" // CreateDate
+ "%s" // xmp:CreatorTool
+ "<dc:format>application/pdf</dc:format>\n"
+ "%s" // dc:title
+ "%s" // dc:description
+ "%s" // author
+ "%s" // keywords
+ "<xmpMM:DocumentID>uuid:%s</xmpMM:DocumentID>\n"
+ "<xmpMM:InstanceID>uuid:%s</xmpMM:InstanceID>\n"
+ "%s" // pdf:Producer
+ "%s" // pdf:Keywords
+ "</rdf:Description>\n"
+ "</rdf:RDF>\n"
+ "</x:xmpmeta>\n" // Note: the standard suggests 4k of padding.
+ "<?xpacket end=\"w\"?>\n";
+
+ SkString creationDate;
+ SkString modificationDate;
+ if (metadata.fCreation != kZeroTime) {
+ SkString tmp;
+ metadata.fCreation.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ // YYYY-mm-ddTHH:MM:SS[+|-]ZZ:ZZ; no need to escape
+ creationDate = SkStringPrintf("<xmp:CreateDate>%s</xmp:CreateDate>\n",
+ tmp.c_str());
+ }
+ if (metadata.fModified != kZeroTime) {
+ SkString tmp;
+ metadata.fModified.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ modificationDate = SkStringPrintf(
+ "<xmp:ModifyDate>%s</xmp:ModifyDate>\n", tmp.c_str());
+ }
+ SkString title =
+ escape_xml(metadata.fTitle,
+ "<dc:title><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:title>\n");
+ SkString author =
+ escape_xml(metadata.fAuthor, "<dc:creator><rdf:Bag><rdf:li>",
+ "</rdf:li></rdf:Bag></dc:creator>\n");
+ // TODO: in theory, XMP can support multiple authors. Split on a delimiter?
+ SkString subject = escape_xml(
+ metadata.fSubject,
+ "<dc:description><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:description>\n");
+ SkString keywords1 =
+ escape_xml(metadata.fKeywords, "<dc:subject><rdf:Bag><rdf:li>",
+ "</rdf:li></rdf:Bag></dc:subject>\n");
+ SkString keywords2 = escape_xml(metadata.fKeywords, "<pdf:Keywords>",
+ "</pdf:Keywords>\n");
+ // TODO: in theory, keywords can be a list too.
+
+ SkString producer("<pdf:Producer>" SKPDF_PRODUCER "</pdf:Producer>\n");
+ if (!metadata.fProducer.isEmpty()) {
+ // TODO: register a developer prefix to make
+ // <skia:SKPDF_CUSTOM_PRODUCER_KEY> a real XML tag.
+ producer = escape_xml(
+ metadata.fProducer, "<pdf:Producer>",
+ "</pdf:Producer>\n<!-- <skia:" SKPDF_CUSTOM_PRODUCER_KEY ">"
+ SKPDF_PRODUCER "</skia:" SKPDF_CUSTOM_PRODUCER_KEY "> -->\n");
+ }
+
+ SkString creator = escape_xml(metadata.fCreator, "<xmp:CreatorTool>",
+ "</xmp:CreatorTool>\n");
+ SkString documentID = uuid_to_string(doc); // no need to escape
+ SkASSERT(0 == count_xml_escape_size(documentID));
+ SkString instanceID = uuid_to_string(instance);
+ SkASSERT(0 == count_xml_escape_size(instanceID));
+
+
+ auto value = SkStringPrintf(
+ templateString, modificationDate.c_str(), creationDate.c_str(),
+ creator.c_str(), title.c_str(), subject.c_str(), author.c_str(),
+ keywords1.c_str(), documentID.c_str(), instanceID.c_str(),
+ producer.c_str(), keywords2.c_str());
+
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict("Metadata");
+ dict->insertName("Subtype", "XML");
+ return SkPDFStreamOut(std::move(dict),
+ SkMemoryStream::MakeCopy(value.c_str(), value.size()),
+ docPtr, false);
+}
+
+#undef SKPDF_CUSTOM_PRODUCER_KEY
+#undef SKPDF_PRODUCER
+#undef SKPDF_STRING
+#undef SKPDF_STRING_IMPL
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.h b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
new file mode 100644
index 0000000000..590caff735
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFMetadata_DEFINED
+#define SkPDFMetadata_DEFINED
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkUUID.h"
+
+class SkPDFObject;
+
+namespace SkPDFMetadata {
+std::unique_ptr<SkPDFObject> MakeDocumentInformationDict(const SkPDF::Metadata&);
+
+SkUUID CreateUUID(const SkPDF::Metadata&);
+
+std::unique_ptr<SkPDFObject> MakePdfId(const SkUUID& doc, const SkUUID& instance);
+
+SkPDFIndirectReference MakeXMPObject(const SkPDF::Metadata& metadata,
+ const SkUUID& doc,
+ const SkUUID& instance,
+ SkPDFDocument*);
+}
+#endif // SkPDFMetadata_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
new file mode 100644
index 0000000000..c143f8d816
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+
+// Sanity check that the values of enum ResourceType correspond to the
+// expected values as defined in the arrays below.
+// If these are failing, you may need to update the kResourceTypePrefixes
+// and kResourceTypeNames arrays below.
+static_assert(0 == (int)SkPDFResourceType::kExtGState, "resource_type_mismatch");
+static_assert(1 == (int)SkPDFResourceType::kPattern, "resource_type_mismatch");
+static_assert(2 == (int)SkPDFResourceType::kXObject, "resource_type_mismatch");
+static_assert(3 == (int)SkPDFResourceType::kFont, "resource_type_mismatch");
+
+// One extra character for the Prefix.
+constexpr size_t kMaxResourceNameLength = 1 + SkStrAppendS32_MaxSize;
+
+// returns pointer just past end of what's written into `dst`.
+static char* get_resource_name(char dst[kMaxResourceNameLength], SkPDFResourceType type, int key) {
+ static const char kResourceTypePrefixes[] = {
+ 'G', // kExtGState
+ 'P', // kPattern
+ 'X', // kXObject
+ 'F' // kFont
+ };
+ SkASSERT((unsigned)type < SK_ARRAY_COUNT(kResourceTypePrefixes));
+ dst[0] = kResourceTypePrefixes[(unsigned)type];
+ return SkStrAppendS32(dst + 1, key);
+}
+
+void SkPDFWriteResourceName(SkWStream* dst, SkPDFResourceType type, int key) {
+ // One extra character for the leading '/'.
+ char buffer[1 + kMaxResourceNameLength];
+ buffer[0] = '/';
+ char* end = get_resource_name(buffer + 1, type, key);
+ dst->write(buffer, (size_t)(end - buffer));
+}
+
+static const char* resource_name(SkPDFResourceType type) {
+ static const char* kResourceTypeNames[] = {
+ "ExtGState",
+ "Pattern",
+ "XObject",
+ "Font"
+ };
+ SkASSERT((unsigned)type < SK_ARRAY_COUNT(kResourceTypeNames));
+ return kResourceTypeNames[(unsigned)type];
+}
+
+static SkString resource(SkPDFResourceType type, int index) {
+ char buffer[kMaxResourceNameLength];
+ char* end = get_resource_name(buffer, type, index);
+ return SkString(buffer, (size_t)(end - buffer));
+}
+
+static void add_subdict(const std::vector<SkPDFIndirectReference>& resourceList,
+ SkPDFResourceType type,
+ SkPDFDict* dst) {
+ if (!resourceList.empty()) {
+ auto resources = SkPDFMakeDict();
+ for (SkPDFIndirectReference ref : resourceList) {
+ resources->insertRef(resource(type, ref.fValue), ref);
+ }
+ dst->insertObject(resource_name(type), std::move(resources));
+ }
+}
+
+static std::unique_ptr<SkPDFArray> make_proc_set() {
+ auto procSets = SkPDFMakeArray();
+ static const char kProcs[][7] = { "PDF", "Text", "ImageB", "ImageC", "ImageI"};
+ procSets->reserve(SK_ARRAY_COUNT(kProcs));
+ for (const char* proc : kProcs) {
+ procSets->appendName(proc);
+ }
+ return procSets;
+}
+
+std::unique_ptr<SkPDFDict> SkPDFMakeResourceDict(
+ const std::vector<SkPDFIndirectReference>& graphicStateResources,
+ const std::vector<SkPDFIndirectReference>& shaderResources,
+ const std::vector<SkPDFIndirectReference>& xObjectResources,
+ const std::vector<SkPDFIndirectReference>& fontResources) {
+ auto dict = SkPDFMakeDict();
+ dict->insertObject("ProcSet", make_proc_set());
+ add_subdict(graphicStateResources, SkPDFResourceType::kExtGState, dict.get());
+ add_subdict(shaderResources, SkPDFResourceType::kPattern, dict.get());
+ add_subdict(xObjectResources, SkPDFResourceType::kXObject, dict.get());
+ add_subdict(fontResources, SkPDFResourceType::kFont, dict.get());
+ return dict;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.h b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
new file mode 100644
index 0000000000..4cd9dfa1c3
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFResourceDict_DEFINED
+#define SkPDFResourceDict_DEFINED
+
+#include "src/pdf/SkPDFFont.h"
+
+#include <vector>
+
+class SkPDFDict;
+class SkPDFObject;
+class SkWStream;
+
+enum class SkPDFResourceType {
+ kExtGState = 0,
+ kPattern = 1,
+ kXObject = 2,
+ kFont = 3,
+ // These additional types are defined by the spec, but not
+ // currently used by Skia: ColorSpace, Shading, Properties
+};
+
+
+/** Create a PDF resource dictionary.
+ * The full set of ProcSet entries is automatically created for backwards
+ * compatibility, as recommended by the PDF spec.
+ *
+ * Any arguments can be nullptr.
+ */
+std::unique_ptr<SkPDFDict> SkPDFMakeResourceDict(
+ const std::vector<SkPDFIndirectReference>& graphicStateResources,
+ const std::vector<SkPDFIndirectReference>& shaderResources,
+ const std::vector<SkPDFIndirectReference>& xObjectResources,
+ const std::vector<SkPDFIndirectReference>& fontResources);
+
+/**
+ * Writes the name for the resource that will be generated by the resource
+ * dict.
+ *
+ * @param type The type of resource being entered
+ * @param key The resource key, should be unique within its type.
+ */
+void SkPDFWriteResourceName(SkWStream*, SkPDFResourceType type, int key);
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.cpp b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
new file mode 100644
index 0000000000..c08a76f734
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
@@ -0,0 +1,375 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFShader.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkSurface.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkTemplates.h"
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGradientShader.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static void draw(SkCanvas* canvas, const SkImage* image, SkColor4f paintColor) {
+ SkPaint paint(paintColor);
+ canvas->drawImage(image, 0, 0, &paint);
+}
+
+static SkBitmap to_bitmap(const SkImage* image) {
+ SkBitmap bitmap;
+ if (!SkPDFUtils::ToBitmap(image, &bitmap)) {
+ bitmap.allocN32Pixels(image->width(), image->height());
+ bitmap.eraseColor(0x00000000);
+ }
+ return bitmap;
+}
+
+static void draw_matrix(SkCanvas* canvas, const SkImage* image,
+ const SkMatrix& matrix, SkColor4f paintColor) {
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ draw(canvas, image, paintColor);
+}
+
+static void draw_bitmap_matrix(SkCanvas* canvas, const SkBitmap& bm,
+ const SkMatrix& matrix, SkColor4f paintColor) {
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ SkPaint paint(paintColor);
+ canvas->drawBitmap(bm, 0, 0, &paint);
+}
+
+static void fill_color_from_bitmap(SkCanvas* canvas,
+ float left, float top, float right, float bottom,
+ const SkBitmap& bitmap, int x, int y, float alpha) {
+ SkRect rect{left, top, right, bottom};
+ if (!rect.isEmpty()) {
+ SkColor4f color = SkColor4f::FromColor(bitmap.getColor(x, y));
+ SkPaint paint(SkColor4f{color.fR, color.fG, color.fB, alpha * color.fA});
+ canvas->drawRect(rect, paint);
+ }
+}
+
+static SkMatrix scale_translate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) {
+ SkMatrix m;
+ m.setScaleTranslate(sx, sy, tx, ty);
+ return m;
+}
+
+static bool is_tiled(SkTileMode m) { return SkTileMode::kMirror == m || SkTileMode::kRepeat == m; }
+
+static SkPDFIndirectReference make_image_shader(SkPDFDocument* doc,
+ SkMatrix finalMatrix,
+ SkTileMode tileModesX,
+ SkTileMode tileModesY,
+ SkRect bBox,
+ const SkImage* image,
+ SkColor4f paintColor) {
+ // The image shader pattern cell will be drawn into a separate device
+ // in pattern cell space (no scaling on the bitmap, though there may be
+ // translations so that all content is in the device, coordinates > 0).
+
+ // Map clip bounds to shader space to ensure the device is large enough
+ // to handle fake clamping.
+
+ SkRect deviceBounds = bBox;
+ if (!SkPDFUtils::InverseTransformBBox(finalMatrix, &deviceBounds)) {
+ return SkPDFIndirectReference();
+ }
+
+ SkRect bitmapBounds = SkRect::MakeSize(SkSize::Make(image->dimensions()));
+
+ // For tiling modes, the bounds should be extended to include the bitmap,
+ // otherwise the bitmap gets clipped out and the shader is empty and awful.
+ // For clamp modes, we're only interested in the clip region, whether
+ // or not the main bitmap is in it.
+ if (is_tiled(tileModesX) || is_tiled(tileModesY)) {
+ deviceBounds.join(bitmapBounds);
+ }
+
+ SkISize patternDeviceSize = {SkScalarCeilToInt(deviceBounds.width()),
+ SkScalarCeilToInt(deviceBounds.height())};
+ auto patternDevice = sk_make_sp<SkPDFDevice>(patternDeviceSize, doc);
+ SkCanvas canvas(patternDevice);
+
+ SkRect patternBBox = SkRect::MakeSize(SkSize::Make(image->dimensions()));
+ SkScalar width = patternBBox.width();
+ SkScalar height = patternBBox.height();
+
+ // Translate the canvas so that the bitmap origin is at (0, 0).
+ canvas.translate(-deviceBounds.left(), -deviceBounds.top());
+ patternBBox.offset(-deviceBounds.left(), -deviceBounds.top());
+ // Undo the translation in the final matrix
+ finalMatrix.preTranslate(deviceBounds.left(), deviceBounds.top());
+
+ // If the bitmap is out of bounds (i.e. clamp mode where we only see the
+ // stretched sides), canvas will clip this out and the extraneous data
+ // won't be saved to the PDF.
+ draw(&canvas, image, paintColor);
+
+ // Tiling is implied. First we handle mirroring.
+ if (tileModesX == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(-1, 1, 2 * width, 0), paintColor);
+ patternBBox.fRight += width;
+ }
+ if (tileModesY == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(1, -1, 0, 2 * height), paintColor);
+ patternBBox.fBottom += height;
+ }
+ if (tileModesX == SkTileMode::kMirror && tileModesY == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(-1, -1, 2 * width, 2 * height), paintColor);
+ }
+
+ // Then handle Clamping, which requires expanding the pattern canvas to
+ // cover the entire surfaceBBox.
+
+ SkBitmap bitmap;
+ if (tileModesX == SkTileMode::kClamp || tileModesY == SkTileMode::kClamp) {
+ // For now, the easiest way to access the colors in the corners and sides is
+ // to just make a bitmap from the image.
+ bitmap = to_bitmap(image);
+ }
+
+ // If both x and y are in clamp mode, we start by filling in the corners.
+ // (Which are just a rectangles of the corner colors.)
+ if (tileModesX == SkTileMode::kClamp && tileModesY == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+
+ fill_color_from_bitmap(&canvas, deviceBounds.left(), deviceBounds.top(), 0, 0,
+ bitmap, 0, 0, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, width, deviceBounds.top(), deviceBounds.right(), 0,
+ bitmap, bitmap.width() - 1, 0, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, width, height, deviceBounds.right(), deviceBounds.bottom(),
+ bitmap, bitmap.width() - 1, bitmap.height() - 1, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, deviceBounds.left(), height, 0, deviceBounds.bottom(),
+ bitmap, 0, bitmap.height() - 1, paintColor.fA);
+ }
+
+ // Then expand the left, right, top, then bottom.
+ if (tileModesX == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, 1, bitmap.height());
+ if (deviceBounds.left() < 0) {
+ SkBitmap left;
+ SkAssertResult(bitmap.extractSubset(&left, subset));
+
+ SkMatrix leftMatrix = scale_translate(-deviceBounds.left(), 1, deviceBounds.left(), 0);
+ draw_bitmap_matrix(&canvas, left, leftMatrix, paintColor);
+
+ if (tileModesY == SkTileMode::kMirror) {
+ leftMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ leftMatrix.postTranslate(0, 2 * height);
+ draw_bitmap_matrix(&canvas, left, leftMatrix, paintColor);
+ }
+ patternBBox.fLeft = 0;
+ }
+
+ if (deviceBounds.right() > width) {
+ SkBitmap right;
+ subset.offset(bitmap.width() - 1, 0);
+ SkAssertResult(bitmap.extractSubset(&right, subset));
+
+ SkMatrix rightMatrix = scale_translate(deviceBounds.right() - width, 1, width, 0);
+ draw_bitmap_matrix(&canvas, right, rightMatrix, paintColor);
+
+ if (tileModesY == SkTileMode::kMirror) {
+ rightMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ rightMatrix.postTranslate(0, 2 * height);
+ draw_bitmap_matrix(&canvas, right, rightMatrix, paintColor);
+ }
+ patternBBox.fRight = deviceBounds.width();
+ }
+ }
+ if (tileModesX == SkTileMode::kDecal) {
+ if (deviceBounds.left() < 0) {
+ patternBBox.fLeft = 0;
+ }
+ if (deviceBounds.right() > width) {
+ patternBBox.fRight = deviceBounds.width();
+ }
+ }
+
+ if (tileModesY == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, bitmap.width(), 1);
+ if (deviceBounds.top() < 0) {
+ SkBitmap top;
+ SkAssertResult(bitmap.extractSubset(&top, subset));
+
+ SkMatrix topMatrix = scale_translate(1, -deviceBounds.top(), 0, deviceBounds.top());
+ draw_bitmap_matrix(&canvas, top, topMatrix, paintColor);
+
+ if (tileModesX == SkTileMode::kMirror) {
+ topMatrix.postScale(-1, 1);
+ topMatrix.postTranslate(2 * width, 0);
+ draw_bitmap_matrix(&canvas, top, topMatrix, paintColor);
+ }
+ patternBBox.fTop = 0;
+ }
+
+ if (deviceBounds.bottom() > height) {
+ SkBitmap bottom;
+ subset.offset(0, bitmap.height() - 1);
+ SkAssertResult(bitmap.extractSubset(&bottom, subset));
+
+ SkMatrix bottomMatrix = scale_translate(1, deviceBounds.bottom() - height, 0, height);
+ draw_bitmap_matrix(&canvas, bottom, bottomMatrix, paintColor);
+
+ if (tileModesX == SkTileMode::kMirror) {
+ bottomMatrix.postScale(-1, 1);
+ bottomMatrix.postTranslate(2 * width, 0);
+ draw_bitmap_matrix(&canvas, bottom, bottomMatrix, paintColor);
+ }
+ patternBBox.fBottom = deviceBounds.height();
+ }
+ }
+ if (tileModesY == SkTileMode::kDecal) {
+ if (deviceBounds.top() < 0) {
+ patternBBox.fTop = 0;
+ }
+ if (deviceBounds.bottom() > height) {
+ patternBBox.fBottom = deviceBounds.height();
+ }
+ }
+
+ auto imageShader = patternDevice->content();
+ std::unique_ptr<SkPDFDict> resourceDict = patternDevice->makeResourceDict();
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ SkPDFUtils::PopulateTilingPatternDict(dict.get(), patternBBox,
+ std::move(resourceDict), finalMatrix);
+ return SkPDFStreamOut(std::move(dict), std::move(imageShader), doc);
+}
+
+// Generic fallback for unsupported shaders:
+// * allocate a surfaceBBox-sized bitmap
+// * shade the whole area
+// * use the result as a bitmap shader
+static SkPDFIndirectReference make_fallback_shader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor) {
+ // TODO(vandebo) This drops SKComposeShader on the floor. We could
+ // handle compose shader by pulling things up to a layer, drawing with
+ // the first shader, applying the xfer mode and drawing again with the
+ // second shader, then applying the layer to the original drawing.
+
+ SkMatrix shaderTransform = as_SB(shader)->getLocalMatrix();
+
+ // surfaceBBox is in device space. While that's exactly what we
+ // want for sizing our bitmap, we need to map it into
+ // shader space for adjustments (to match
+ // MakeImageShader's behavior).
+ SkRect shaderRect = SkRect::Make(surfaceBBox);
+ if (!SkPDFUtils::InverseTransformBBox(canvasTransform, &shaderRect)) {
+ return SkPDFIndirectReference();
+ }
+ // Clamp the bitmap size to about 1M pixels
+ static const int kMaxBitmapArea = 1024 * 1024;
+ SkScalar bitmapArea = (float)surfaceBBox.width() * (float)surfaceBBox.height();
+ SkScalar rasterScale = 1.0f;
+ if (bitmapArea > (float)kMaxBitmapArea) {
+ rasterScale *= SkScalarSqrt((float)kMaxBitmapArea / bitmapArea);
+ }
+
+ SkISize size = {
+ SkTClamp(SkScalarCeilToInt(rasterScale * surfaceBBox.width()), 1, kMaxBitmapArea),
+ SkTClamp(SkScalarCeilToInt(rasterScale * surfaceBBox.height()), 1, kMaxBitmapArea)};
+ SkSize scale = {SkIntToScalar(size.width()) / shaderRect.width(),
+ SkIntToScalar(size.height()) / shaderRect.height()};
+
+ auto surface = SkSurface::MakeRasterN32Premul(size.width(), size.height());
+ SkASSERT(surface);
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+
+ SkPaint p(paintColor);
+ p.setShader(sk_ref_sp(shader));
+
+ canvas->scale(scale.width(), scale.height());
+ canvas->translate(-shaderRect.x(), -shaderRect.y());
+ canvas->drawPaint(p);
+
+ shaderTransform.setTranslate(shaderRect.x(), shaderRect.y());
+ shaderTransform.preScale(1 / scale.width(), 1 / scale.height());
+
+ sk_sp<SkImage> image = surface->makeImageSnapshot();
+ SkASSERT(image);
+ return make_image_shader(doc,
+ SkMatrix::Concat(canvasTransform, shaderTransform),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ SkRect::Make(surfaceBBox),
+ image.get(),
+ paintColor);
+}
+
+static SkColor4f adjust_color(SkShader* shader, SkColor4f paintColor) {
+ if (SkImage* img = shader->isAImage(nullptr, (SkTileMode*)nullptr)) {
+ if (img->isAlphaOnly()) {
+ return paintColor;
+ }
+ }
+ return SkColor4f{0, 0, 0, paintColor.fA}; // only preserve the alpha.
+}
+
+SkPDFIndirectReference SkPDFMakeShader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor) {
+ SkASSERT(shader);
+ SkASSERT(doc);
+ if (SkShader::kNone_GradientType != shader->asAGradient(nullptr)) {
+ return SkPDFGradientShader::Make(doc, shader, canvasTransform, surfaceBBox);
+ }
+ if (surfaceBBox.isEmpty()) {
+ return SkPDFIndirectReference();
+ }
+ SkBitmap image;
+
+ SkASSERT(shader->asAGradient(nullptr) == SkShader::kNone_GradientType) ;
+
+ paintColor = adjust_color(shader, paintColor);
+ SkMatrix shaderTransform;
+ SkTileMode imageTileModes[2];
+ if (SkImage* skimg = shader->isAImage(&shaderTransform, imageTileModes)) {
+ SkMatrix finalMatrix = SkMatrix::Concat(canvasTransform, shaderTransform);
+ SkPDFImageShaderKey key = {
+ finalMatrix,
+ surfaceBBox,
+ SkBitmapKeyFromImage(skimg),
+ {imageTileModes[0], imageTileModes[1]},
+ paintColor};
+ SkPDFIndirectReference* shaderPtr = doc->fImageShaderMap.find(key);
+ if (shaderPtr) {
+ return *shaderPtr;
+ }
+ SkPDFIndirectReference pdfShader =
+ make_image_shader(doc,
+ finalMatrix,
+ imageTileModes[0],
+ imageTileModes[1],
+ SkRect::Make(surfaceBBox),
+ skimg,
+ paintColor);
+ doc->fImageShaderMap.set(std::move(key), pdfShader);
+ return pdfShader;
+ }
+ // Don't bother to de-dup fallback shader.
+ return make_fallback_shader(doc, shader, canvasTransform, surfaceBBox, paintColor);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.h b/gfx/skia/skia/src/pdf/SkPDFShader.h
new file mode 100644
index 0000000000..ee270ea3af
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFShader_DEFINED
+#define SkPDFShader_DEFINED
+
+#include "include/core/SkShader.h"
+#include "include/private/SkMacros.h"
+#include "src/pdf/SkBitmapKey.h"
+#include "src/pdf/SkPDFTypes.h"
+
+
+class SkPDFDocument;
+class SkMatrix;
+struct SkIRect;
+
+/** Make a PDF shader for the passed SkShader. If the SkShader is invalid in
+ * some way, returns nullptr.
+ *
+ * In PDF parlance, this is a pattern, used in place of a color when the
+ * pattern color space is selected.
+ *
+ * May cache the shader in the document for later re-use. If this function is
+ * called again with an equivalent shader, a new reference to the cached pdf
+ * shader may be returned.
+ *
+ * @param doc The parent document, must be non-null.
+ * @param shader The SkShader to emulate.
+ * @param ctm The current transform matrix. (PDF shaders are absolutely
+ * positioned, relative to where the page is drawn.)
+ * @param surfaceBBox The bounding box of the drawing surface (with matrix
+ * already applied).
+ * @param paintColor Color+Alpha of the paint. Color is usually ignored,
+ * unless it is a alpha shader.
+ */
+SkPDFIndirectReference SkPDFMakeShader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& ctm,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor);
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFImageShaderKey {
+ SkMatrix fTransform;
+ SkIRect fBBox;
+ SkBitmapKey fBitmapKey;
+ SkTileMode fImageTileModes[2];
+ SkColor4f fPaintColor;
+};
+SK_END_REQUIRE_DENSE
+
+inline bool operator==(const SkPDFImageShaderKey& a, const SkPDFImageShaderKey& b) {
+ SkASSERT(a.fBitmapKey.fID != 0);
+ SkASSERT(b.fBitmapKey.fID != 0);
+ return a.fTransform == b.fTransform
+ && a.fBBox == b.fBBox
+ && a.fBitmapKey == b.fBitmapKey
+ && a.fImageTileModes[0] == b.fImageTileModes[0]
+ && a.fImageTileModes[1] == b.fImageTileModes[1]
+ && a.fPaintColor == b.fPaintColor;
+}
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp
new file mode 100644
index 0000000000..df2999e67c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp
@@ -0,0 +1,181 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFSubsetFont.h"
+
+#if defined(SK_USING_THIRD_PARTY_ICU)
+#include "SkLoadICU.h"
+#endif
+
+#if defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/utils/SkCallableTraits.h"
+
+#include "hb.h"
+#include "hb-subset.h"
+
+template <class T, void(*P)(T*)> using resource =
+ std::unique_ptr<T, SkFunctionWrapper<skstd::remove_pointer_t<decltype(P)>, P>>;
+using HBBlob = resource<hb_blob_t, &hb_blob_destroy>;
+using HBFace = resource<hb_face_t, &hb_face_destroy>;
+using HBSubsetInput = resource<hb_subset_input_t, &hb_subset_input_destroy>;
+using HBSet = resource<hb_set_t, &hb_set_destroy>;
+
+static HBBlob to_blob(sk_sp<SkData> data) {
+ using blob_size_t = SkCallableTraits<decltype(hb_blob_create)>::argument<1>::type;
+ if (!SkTFitsIn<blob_size_t>(data->size())) {
+ return nullptr;
+ }
+ const char* blobData = static_cast<const char*>(data->data());
+ blob_size_t blobSize = SkTo<blob_size_t>(data->size());
+ return HBBlob(hb_blob_create(blobData, blobSize,
+ HB_MEMORY_MODE_READONLY,
+ data.release(), [](void* p){ ((SkData*)p)->unref(); }));
+}
+
+static sk_sp<SkData> to_data(HBBlob blob) {
+ if (!blob) {
+ return nullptr;
+ }
+ unsigned int length;
+ const char* data = hb_blob_get_data(blob.get(), &length);
+ if (!data || !length) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(data, SkToSizeT(length),
+ [](const void*, void* ctx) { hb_blob_destroy((hb_blob_t*)ctx); },
+ blob.release());
+}
+
+static sk_sp<SkData> subset_harfbuzz(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ int ttcIndex) {
+#if defined(SK_USING_THIRD_PARTY_ICU)
+ if (!SkLoadICU()) {
+ return nullptr;
+ }
+#endif
+ if (!fontData) {
+ return nullptr;
+ }
+ HBFace face(hb_face_create(to_blob(std::move(fontData)).get(), ttcIndex));
+ SkASSERT(face);
+
+ HBSubsetInput input(hb_subset_input_create_or_fail());
+ SkASSERT(input);
+ if (!face || !input) {
+ return nullptr;
+ }
+ hb_set_t* glyphs = hb_subset_input_glyph_set(input.get());
+ glyphUsage.getSetValues([&glyphs](unsigned gid) { hb_set_add(glyphs, gid);});
+
+ hb_subset_input_set_retain_gids(input.get(), true);
+ // TODO: When possible, check if a font is 'tricky' with FT_IS_TRICKY.
+ // If it isn't known if a font is 'tricky', retain the hints.
+ hb_subset_input_set_drop_hints(input.get(), false);
+ HBFace subset(hb_subset(face.get(), input.get()));
+ HBBlob result(hb_face_reference_blob(subset.get()));
+ return to_data(std::move(result));
+}
+
+#endif // defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_PDF_USE_SFNTLY)
+
+#include "sample/chromium/font_subsetter.h"
+#include <vector>
+
+static sk_sp<SkData> subset_sfntly(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ const char* fontName,
+ int ttcIndex) {
+#if defined(SK_USING_THIRD_PARTY_ICU)
+ if (!SkLoadICU()) {
+ return nullptr;
+ }
+#endif
+ // Generate glyph id array in format needed by sfntly.
+ // TODO(halcanary): sfntly should take a more compact format.
+ std::vector<unsigned> subset;
+ glyphUsage.getSetValues([&subset](unsigned v) { subset.push_back(v); });
+
+ unsigned char* subsetFont{nullptr};
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // TODO(halcanary): update SK_BUILD_FOR_GOOGLE3 to newest version of Sfntly.
+ (void)ttcIndex;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(fontName,
+ fontData->bytes(),
+ fontData->size(),
+ subset.data(),
+ subset.size(),
+ &subsetFont);
+#else // defined(SK_BUILD_FOR_GOOGLE3)
+ (void)fontName;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(ttcIndex,
+ fontData->bytes(),
+ fontData->size(),
+ subset.data(),
+ subset.size(),
+ &subsetFont);
+#endif // defined(SK_BUILD_FOR_GOOGLE3)
+ SkASSERT(subsetFontSize > 0 || subsetFont == nullptr);
+ if (subsetFontSize < 1 || subsetFont == nullptr) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(subsetFont, subsetFontSize,
+ [](const void* p, void*) { delete[] (unsigned char*)p; },
+ nullptr);
+}
+
+#endif // defined(SK_PDF_USE_SFNTLY)
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_PDF_USE_SFNTLY) && defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter subsetter,
+ const char* fontName,
+ int ttcIndex) {
+ switch (subsetter) {
+ case SkPDF::Metadata::kHarfbuzz_Subsetter:
+ return subset_harfbuzz(std::move(fontData), glyphUsage, ttcIndex);
+ case SkPDF::Metadata::kSfntly_Subsetter:
+ return subset_sfntly(std::move(fontData), glyphUsage, fontName, ttcIndex);
+ }
+ return nullptr;
+}
+
+#elif defined(SK_PDF_USE_SFNTLY)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter,
+ const char* fontName,
+ int ttcIndex) {
+ return subset_sfntly(std::move(fontData), glyphUsage, fontName, ttcIndex);
+}
+
+#elif defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter,
+ const char*,
+ int ttcIndex) {
+ return subset_harfbuzz(std::move(fontData), glyphUsage, ttcIndex);
+}
+
+#else
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData>, const SkPDFGlyphUse&, SkPDF::Metadata::Subsetter,
+ const char*, int) {
+ return nullptr;
+}
+#endif // defined(SK_PDF_USE_SFNTLY)
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h
new file mode 100644
index 0000000000..b812c52ff5
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h
@@ -0,0 +1,16 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFSubsetFont_DEFINED
+#define SkPDFSubsetFont_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter subsetter,
+ const char* fontName,
+ int ttcIndex);
+
+#endif // SkPDFSubsetFont_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFTag.cpp b/gfx/skia/skia/src/pdf/SkPDFTag.cpp
new file mode 100644
index 0000000000..a2248070e8
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTag.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFTag.h"
+
+// Table 333 in PDF 32000-1:2008
+static const char* tag_name_from_type(SkPDF::DocumentStructureType type) {
+ switch (type) {
+ #define M(X) case SkPDF::DocumentStructureType::k ## X: return #X
+ M(Document);
+ M(Part);
+ M(Art);
+ M(Sect);
+ M(Div);
+ M(BlockQuote);
+ M(Caption);
+ M(TOC);
+ M(TOCI);
+ M(Index);
+ M(NonStruct);
+ M(Private);
+ M(H);
+ M(H1);
+ M(H2);
+ M(H3);
+ M(H4);
+ M(H5);
+ M(H6);
+ M(P);
+ M(L);
+ M(LI);
+ M(Lbl);
+ M(LBody);
+ M(Table);
+ M(TR);
+ M(TH);
+ M(TD);
+ M(THead);
+ M(TBody);
+ M(TFoot);
+ M(Span);
+ M(Quote);
+ M(Note);
+ M(Reference);
+ M(BibEntry);
+ M(Code);
+ M(Link);
+ M(Annot);
+ M(Ruby);
+ M(RB);
+ M(RT);
+ M(RP);
+ M(Warichu);
+ M(WT);
+ M(WP);
+ M(Figure);
+ M(Formula);
+ M(Form);
+ #undef M
+ }
+ SK_ABORT("bad tag");
+}
+
+struct SkPDFTagNode {
+ SkPDFTagNode* fChildren = nullptr;
+ size_t fChildCount = 0;
+ struct MarkedContentInfo {
+ unsigned fPageIndex;
+ int fMarkId;
+ };
+ SkTArray<MarkedContentInfo> fMarkedContent;
+ int fNodeId;
+ SkPDF::DocumentStructureType fType;
+ SkPDFIndirectReference fRef;
+ enum State {
+ kUnknown,
+ kYes,
+ kNo,
+ } fCanDiscard = kUnknown;
+};
+
+SkPDFTagTree::SkPDFTagTree() : fArena(4 * sizeof(SkPDFTagNode)) {}
+
+SkPDFTagTree::~SkPDFTagTree() = default;
+
+static void copy(const SkPDF::StructureElementNode& node,
+ SkPDFTagNode* dst,
+ SkArenaAlloc* arena,
+ SkTHashMap<int, SkPDFTagNode*>* nodeMap) {
+ nodeMap->set(node.fNodeId, dst);
+ size_t childCount = node.fChildCount;
+ SkPDFTagNode* children = arena->makeArray<SkPDFTagNode>(childCount);
+ dst->fChildCount = childCount;
+ dst->fNodeId = node.fNodeId;
+ dst->fType = node.fType;
+ dst->fChildren = children;
+ for (size_t i = 0; i < childCount; ++i) {
+ copy(node.fChildren[i], &children[i], arena, nodeMap);
+ }
+}
+
+void SkPDFTagTree::init(const SkPDF::StructureElementNode* node) {
+ if (node) {
+ fRoot = fArena.make<SkPDFTagNode>();
+ copy(*node, fRoot, &fArena, &fNodeMap);
+ }
+}
+
+void SkPDFTagTree::reset() {
+ fArena.reset();
+ fNodeMap.reset();
+ fMarksPerPage.reset();
+ fRoot = nullptr;
+}
+
+int SkPDFTagTree::getMarkIdForNodeId(int nodeId, unsigned pageIndex) {
+ if (!fRoot) {
+ return -1;
+ }
+ SkPDFTagNode** tagPtr = fNodeMap.find(nodeId);
+ if (!tagPtr) {
+ return -1;
+ }
+ SkPDFTagNode* tag = *tagPtr;
+ SkASSERT(tag);
+ while (fMarksPerPage.size() < pageIndex + 1) {
+ fMarksPerPage.push_back();
+ }
+ SkTArray<SkPDFTagNode*>& pageMarks = fMarksPerPage[pageIndex];
+ int markId = pageMarks.count();
+ tag->fMarkedContent.push_back({pageIndex, markId});
+ pageMarks.push_back(tag);
+ return markId;
+}
+
+static bool can_discard(SkPDFTagNode* node) {
+ if (node->fCanDiscard == SkPDFTagNode::kYes) {
+ return true;
+ }
+ if (node->fCanDiscard == SkPDFTagNode::kNo) {
+ return false;
+ }
+ if (!node->fMarkedContent.empty()) {
+ node->fCanDiscard = SkPDFTagNode::kNo;
+ return false;
+ }
+ for (size_t i = 0; i < node->fChildCount; ++i) {
+ if (!can_discard(&node->fChildren[i])) {
+ node->fCanDiscard = SkPDFTagNode::kNo;
+ return false;
+ }
+ }
+ node->fCanDiscard = SkPDFTagNode::kYes;
+ return true;
+}
+
+
+SkPDFIndirectReference prepare_tag_tree_to_emit(SkPDFIndirectReference parent,
+ SkPDFTagNode* node,
+ SkPDFDocument* doc) {
+ SkPDFIndirectReference ref = doc->reserveRef();
+ std::unique_ptr<SkPDFArray> kids = SkPDFMakeArray();
+ SkPDFTagNode* children = node->fChildren;
+ size_t childCount = node->fChildCount;
+ for (size_t i = 0; i < childCount; ++i) {
+ SkPDFTagNode* child = &children[i];
+ if (!(can_discard(child))) {
+ kids->appendRef(prepare_tag_tree_to_emit(ref, child, doc));
+ }
+ }
+ for (const SkPDFTagNode::MarkedContentInfo& info : node->fMarkedContent) {
+ std::unique_ptr<SkPDFDict> mcr = SkPDFMakeDict("MCR");
+ mcr->insertRef("Pg", doc->getPage(info.fPageIndex));
+ mcr->insertInt("MCID", info.fMarkId);
+ kids->appendObject(std::move(mcr));
+ }
+ node->fRef = ref;
+ SkPDFDict dict("StructElem");
+ dict.insertName("S", tag_name_from_type(node->fType));
+ dict.insertRef("P", parent);
+ dict.insertObject("K", std::move(kids));
+ return doc->emit(dict, ref);
+}
+
+SkPDFIndirectReference SkPDFTagTree::makeStructTreeRoot(SkPDFDocument* doc) {
+ if (!fRoot) {
+ return SkPDFIndirectReference();
+ }
+ if (can_discard(fRoot)) {
+ SkDEBUGFAIL("PDF has tag tree but no marked content.");
+ }
+ SkPDFIndirectReference ref = doc->reserveRef();
+
+ unsigned pageCount = SkToUInt(doc->pageCount());
+
+ // Build the StructTreeRoot.
+ SkPDFDict structTreeRoot("StructTreeRoot");
+ structTreeRoot.insertRef("K", prepare_tag_tree_to_emit(ref, fRoot, doc));
+ structTreeRoot.insertInt("ParentTreeNextKey", SkToInt(pageCount));
+
+ // Build the parent tree, which is a mapping from the marked
+ // content IDs on each page to their corressponding tags.
+ SkPDFDict parentTree("ParentTree");
+ auto parentTreeNums = SkPDFMakeArray();
+
+ SkASSERT(fMarksPerPage.size() <= pageCount);
+ for (size_t j = 0; j < fMarksPerPage.size(); ++j) {
+ const SkTArray<SkPDFTagNode*>& pageMarks = fMarksPerPage[j];
+ SkPDFArray markToTagArray;
+ for (SkPDFTagNode* mark : pageMarks) {
+ SkASSERT(mark->fRef);
+ markToTagArray.appendRef(mark->fRef);
+ }
+ parentTreeNums->appendInt(j);
+ parentTreeNums->appendRef(doc->emit(markToTagArray));
+ }
+ parentTree.insertObject("Nums", std::move(parentTreeNums));
+ structTreeRoot.insertRef("ParentTree", doc->emit(parentTree));
+ return doc->emit(structTreeRoot, ref);
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFTag.h b/gfx/skia/skia/src/pdf/SkPDFTag.h
new file mode 100644
index 0000000000..d0718d381e
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTag.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFTag_DEFINED
+#define SkPDFTag_DEFINED
+
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTHash.h"
+#include "src/core/SkArenaAlloc.h"
+
+class SkPDFDocument;
+struct SkPDFIndirectReference;
+struct SkPDFTagNode;
+
+class SkPDFTagTree {
+public:
+ SkPDFTagTree();
+ ~SkPDFTagTree();
+ void init(const SkPDF::StructureElementNode*);
+ void reset();
+ int getMarkIdForNodeId(int nodeId, unsigned pageIndex);
+ SkPDFIndirectReference makeStructTreeRoot(SkPDFDocument* doc);
+
+private:
+ SkArenaAlloc fArena;
+ SkTHashMap<int, SkPDFTagNode*> fNodeMap;
+ SkPDFTagNode* fRoot = nullptr;
+ SkTArray<SkTArray<SkPDFTagNode*>> fMarksPerPage;
+
+ SkPDFTagTree(const SkPDFTagTree&) = delete;
+ SkPDFTagTree& operator=(const SkPDFTagTree&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp b/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp
new file mode 100644
index 0000000000..5bcb3f8e5a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp
@@ -0,0 +1,335 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFType1Font.h"
+
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkStrikeSpec.h"
+
+#include <ctype.h>
+
+/*
+ "A standard Type 1 font program, as described in the Adobe Type 1
+ Font Format specification, consists of three parts: a clear-text
+ portion (written using PostScript syntax), an encrypted portion, and
+ a fixed-content portion. The fixed-content portion contains 512
+ ASCII zeros followed by a cleartomark operator, and perhaps followed
+ by additional data. Although the encrypted portion of a standard
+ Type 1 font may be in binary or ASCII hexadecimal format, PDF
+ supports only the binary format."
+*/
+static bool parsePFBSection(const uint8_t** src, size_t* len, int sectionType,
+ size_t* size) {
+ // PFB sections have a two or six bytes header. 0x80 and a one byte
+ // section type followed by a four byte section length. Type one is
+ // an ASCII section (includes a length), type two is a binary section
+ // (includes a length) and type three is an EOF marker with no length.
+ const uint8_t* buf = *src;
+ if (*len < 2 || buf[0] != 0x80 || buf[1] != sectionType) {
+ return false;
+ } else if (buf[1] == 3) {
+ return true;
+ } else if (*len < 6) {
+ return false;
+ }
+
+ *size = (size_t)buf[2] | ((size_t)buf[3] << 8) | ((size_t)buf[4] << 16) |
+ ((size_t)buf[5] << 24);
+ size_t consumed = *size + 6;
+ if (consumed > *len) {
+ return false;
+ }
+ *src = *src + consumed;
+ *len = *len - consumed;
+ return true;
+}
+
+static bool parsePFB(const uint8_t* src, size_t size, size_t* headerLen,
+ size_t* dataLen, size_t* trailerLen) {
+ const uint8_t* srcPtr = src;
+ size_t remaining = size;
+
+ return parsePFBSection(&srcPtr, &remaining, 1, headerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 2, dataLen) &&
+ parsePFBSection(&srcPtr, &remaining, 1, trailerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 3, nullptr);
+}
+
+/* The sections of a PFA file are implicitly defined. The body starts
+ * after the line containing "eexec," and the trailer starts with 512
+ * literal 0's followed by "cleartomark" (plus arbitrary white space).
+ *
+ * This function assumes that src is NUL terminated, but the NUL
+ * termination is not included in size.
+ *
+ */
+static bool parsePFA(const char* src, size_t size, size_t* headerLen,
+ size_t* hexDataLen, size_t* dataLen, size_t* trailerLen) {
+ const char* end = src + size;
+
+ const char* dataPos = strstr(src, "eexec");
+ if (!dataPos) {
+ return false;
+ }
+ dataPos += strlen("eexec");
+ while ((*dataPos == '\n' || *dataPos == '\r' || *dataPos == ' ') &&
+ dataPos < end) {
+ dataPos++;
+ }
+ *headerLen = dataPos - src;
+
+ const char* trailerPos = strstr(dataPos, "cleartomark");
+ if (!trailerPos) {
+ return false;
+ }
+ int zeroCount = 0;
+ for (trailerPos--; trailerPos > dataPos && zeroCount < 512; trailerPos--) {
+ if (*trailerPos == '\n' || *trailerPos == '\r' || *trailerPos == ' ') {
+ continue;
+ } else if (*trailerPos == '0') {
+ zeroCount++;
+ } else {
+ return false;
+ }
+ }
+ if (zeroCount != 512) {
+ return false;
+ }
+
+ *hexDataLen = trailerPos - src - *headerLen;
+ *trailerLen = size - *headerLen - *hexDataLen;
+
+ // Verify that the data section is hex encoded and count the bytes.
+ int nibbles = 0;
+ for (; dataPos < trailerPos; dataPos++) {
+ if (isspace(*dataPos)) {
+ continue;
+ }
+ // isxdigit() is locale-sensitive https://bugs.skia.org/8285
+ if (nullptr == strchr("0123456789abcdefABCDEF", *dataPos)) {
+ return false;
+ }
+ nibbles++;
+ }
+ *dataLen = (nibbles + 1) / 2;
+
+ return true;
+}
+
+static int8_t hexToBin(uint8_t c) {
+ if (!isxdigit(c)) {
+ return -1;
+ } else if (c <= '9') {
+ return c - '0';
+ } else if (c <= 'F') {
+ return c - 'A' + 10;
+ } else if (c <= 'f') {
+ return c - 'a' + 10;
+ }
+ return -1;
+}
+
+static sk_sp<SkData> convert_type1_font_stream(std::unique_ptr<SkStreamAsset> srcStream,
+ size_t* headerLen,
+ size_t* dataLen,
+ size_t* trailerLen) {
+ size_t srcLen = srcStream ? srcStream->getLength() : 0;
+ SkASSERT(srcLen);
+ if (!srcLen) {
+ return nullptr;
+ }
+ // Flatten and Nul-terminate the source stream so that we can use
+ // strstr() to search it.
+ SkAutoTMalloc<uint8_t> sourceBuffer(SkToInt(srcLen + 1));
+ (void)srcStream->read(sourceBuffer.get(), srcLen);
+ sourceBuffer[SkToInt(srcLen)] = 0;
+ const uint8_t* src = sourceBuffer.get();
+
+ if (parsePFB(src, srcLen, headerLen, dataLen, trailerLen)) {
+ static const int kPFBSectionHeaderLength = 6;
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ SkASSERT(length + (2 * kPFBSectionHeaderLength) <= srcLen);
+
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+
+ const uint8_t* const srcHeader = src + kPFBSectionHeaderLength;
+ // There is a six-byte section header before header and data
+ // (but not trailer) that we're not going to copy.
+ const uint8_t* const srcData = srcHeader + *headerLen + kPFBSectionHeaderLength;
+ const uint8_t* const srcTrailer = srcData + *headerLen;
+
+ uint8_t* const resultHeader = (uint8_t*)data->writable_data();
+ uint8_t* const resultData = resultHeader + *headerLen;
+ uint8_t* const resultTrailer = resultData + *dataLen;
+
+ SkASSERT(resultTrailer + *trailerLen == resultHeader + length);
+
+ memcpy(resultHeader, srcHeader, *headerLen);
+ memcpy(resultData, srcData, *dataLen);
+ memcpy(resultTrailer, srcTrailer, *trailerLen);
+
+ return data;
+ }
+
+ // A PFA has to be converted for PDF.
+ size_t hexDataLen;
+ if (!parsePFA((const char*)src, srcLen, headerLen, &hexDataLen, dataLen,
+ trailerLen)) {
+ return nullptr;
+ }
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ auto data = SkData::MakeUninitialized(length);
+ uint8_t* buffer = (uint8_t*)data->writable_data();
+
+ memcpy(buffer, src, *headerLen);
+ uint8_t* const resultData = &(buffer[*headerLen]);
+
+ const uint8_t* hexData = src + *headerLen;
+ const uint8_t* trailer = hexData + hexDataLen;
+ size_t outputOffset = 0;
+ uint8_t dataByte = 0; // To hush compiler.
+ bool highNibble = true;
+ for (; hexData < trailer; hexData++) {
+ int8_t curNibble = hexToBin(*hexData);
+ if (curNibble < 0) {
+ continue;
+ }
+ if (highNibble) {
+ dataByte = curNibble << 4;
+ highNibble = false;
+ } else {
+ dataByte |= curNibble;
+ highNibble = true;
+ resultData[outputOffset++] = dataByte;
+ }
+ }
+ if (!highNibble) {
+ resultData[outputOffset++] = dataByte;
+ }
+ SkASSERT(outputOffset == *dataLen);
+
+ uint8_t* const resultTrailer = &(buffer[SkToInt(*headerLen + outputOffset)]);
+ memcpy(resultTrailer, src + *headerLen + hexDataLen, *trailerLen);
+ return data;
+}
+
+inline static bool can_embed(const SkAdvancedTypefaceMetrics& metrics) {
+ return !SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag);
+}
+
+inline static SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ return emSize == 1000 ? scaled : scaled * 1000 / emSize;
+}
+
+static SkPDFIndirectReference make_type1_font_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface,
+ const SkAdvancedTypefaceMetrics* info) {
+ SkPDFDict descriptor("FontDescriptor");
+ uint16_t emSize = SkToU16(typeface->getUnitsPerEm());
+ if (info) {
+ SkPDFFont::PopulateCommonFontDescriptor(&descriptor, *info, emSize, 0);
+ if (can_embed(*info)) {
+ int ttcIndex;
+ size_t header SK_INIT_TO_AVOID_WARNING;
+ size_t data SK_INIT_TO_AVOID_WARNING;
+ size_t trailer SK_INIT_TO_AVOID_WARNING;
+ std::unique_ptr<SkStreamAsset> rawFontData = typeface->openStream(&ttcIndex);
+ sk_sp<SkData> fontData = convert_type1_font_stream(std::move(rawFontData),
+ &header, &data, &trailer);
+ if (fontData) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("Length1", header);
+ dict->insertInt("Length2", data);
+ dict->insertInt("Length3", trailer);
+ auto fontStream = SkMemoryStream::Make(std::move(fontData));
+ descriptor.insertRef("FontFile", SkPDFStreamOut(std::move(dict),
+ std::move(fontStream), doc, true));
+ }
+ }
+ }
+ return doc->emit(descriptor);
+}
+
+
+static const std::vector<SkString>& type_1_glyphnames(SkPDFDocument* canon,
+ const SkTypeface* typeface) {
+ SkFontID fontID = typeface->uniqueID();
+ const std::vector<SkString>* glyphNames = canon->fType1GlyphNames.find(fontID);
+ if (!glyphNames) {
+ std::vector<SkString> names(typeface->countGlyphs());
+ SkPDFFont::GetType1GlyphNames(*typeface, names.data());
+ glyphNames = canon->fType1GlyphNames.set(fontID, std::move(names));
+ }
+ SkASSERT(glyphNames);
+ return *glyphNames;
+}
+
+static SkPDFIndirectReference type1_font_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface) {
+ SkFontID fontID = typeface->uniqueID();
+ if (SkPDFIndirectReference* ptr = doc->fFontDescriptors.find(fontID)) {
+ return *ptr;
+ }
+ const SkAdvancedTypefaceMetrics* info = SkPDFFont::GetMetrics(typeface, doc);
+ auto fontDescriptor = make_type1_font_descriptor(doc, typeface, info);
+ doc->fFontDescriptors.set(fontID, fontDescriptor);
+ return fontDescriptor;
+}
+
+
+void SkPDFEmitType1Font(const SkPDFFont& pdfFont, SkPDFDocument* doc) {
+ SkTypeface* typeface = pdfFont.typeface();
+ const std::vector<SkString> glyphNames = type_1_glyphnames(doc, typeface);
+ SkGlyphID firstGlyphID = pdfFont.firstGlyphID();
+ SkGlyphID lastGlyphID = pdfFont.lastGlyphID();
+
+ SkPDFDict font("Font");
+ font.insertRef("FontDescriptor", type1_font_descriptor(doc, typeface));
+ font.insertName("Subtype", "Type1");
+ if (const SkAdvancedTypefaceMetrics* info = SkPDFFont::GetMetrics(typeface, doc)) {
+ font.insertName("BaseFont", info->fPostScriptName);
+ }
+
+ // glyphCount not including glyph 0
+ unsigned glyphCount = 1 + lastGlyphID - firstGlyphID;
+ SkASSERT(glyphCount > 0 && glyphCount <= 255);
+ font.insertInt("FirstChar", (size_t)0);
+ font.insertInt("LastChar", (size_t)glyphCount);
+ {
+ int emSize;
+ auto widths = SkPDFMakeArray();
+
+ int glyphRangeSize = lastGlyphID - firstGlyphID + 2;
+ SkAutoTArray<SkGlyphID> glyphIDs{glyphRangeSize};
+ glyphIDs[0] = 0;
+ for (unsigned gId = firstGlyphID; gId <= lastGlyphID; gId++) {
+ glyphIDs[gId - firstGlyphID + 1] = gId;
+ }
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &emSize);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ auto glyphs = metrics.glyphs(SkMakeSpan(glyphIDs.get(), glyphRangeSize));
+ for (int i = 0; i < glyphRangeSize; ++i) {
+ widths->appendScalar(from_font_units(glyphs[i]->advanceX(), SkToU16(emSize)));
+ }
+ font.insertObject("Widths", std::move(widths));
+ }
+ auto encDiffs = SkPDFMakeArray();
+ encDiffs->reserve(lastGlyphID - firstGlyphID + 3);
+ encDiffs->appendInt(0);
+
+ SkASSERT(glyphNames.size() > lastGlyphID);
+ const SkString unknown("UNKNOWN");
+ encDiffs->appendName(glyphNames[0].isEmpty() ? unknown : glyphNames[0]);
+ for (int gID = firstGlyphID; gID <= lastGlyphID; gID++) {
+ encDiffs->appendName(glyphNames[gID].isEmpty() ? unknown : glyphNames[gID]);
+ }
+
+ auto encoding = SkPDFMakeDict("Encoding");
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font.insertObject("Encoding", std::move(encoding));
+
+ doc->emit(font, pdfFont.indirectReference());
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFType1Font.h b/gfx/skia/skia/src/pdf/SkPDFType1Font.h
new file mode 100644
index 0000000000..7f9d972fe5
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFType1Font.h
@@ -0,0 +1,11 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFType1Font_DEFINED
+#define SkPDFType1Font_DEFINED
+
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+
+void SkPDFEmitType1Font(const SkPDFFont&, SkPDFDocument*);
+
+#endif // SkPDFType1Font_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.cpp b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
new file mode 100644
index 0000000000..9a74fd2020
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFTypes.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkExecutor.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/pdf/SkDeflate.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFUnion.h"
+#include "src/pdf/SkPDFUtils.h"
+
+#include <new>
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFUnion::SkPDFUnion(Type t) : fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, int32_t v) : fIntValue (v), fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, bool v) : fBoolValue (v), fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, SkScalar v) : fScalarValue (v), fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, SkString v) : fType(t) { fSkString.init(std::move(v)); }
+
+SkPDFUnion::~SkPDFUnion() {
+ switch (fType) {
+ case Type::kNameSkS:
+ case Type::kStringSkS:
+ fSkString.destroy();
+ return;
+ case Type::kObject:
+ SkASSERT(fObject);
+ delete fObject;
+ return;
+ default:
+ return;
+ }
+}
+
+SkPDFUnion& SkPDFUnion::operator=(SkPDFUnion&& other) {
+ if (this != &other) {
+ this->~SkPDFUnion();
+ new (this) SkPDFUnion(std::move(other));
+ }
+ return *this;
+}
+
+SkPDFUnion::SkPDFUnion(SkPDFUnion&& other) {
+ SkASSERT(this != &other);
+ memcpy(this, &other, sizeof(*this));
+ other.fType = Type::kDestroyed;
+}
+
+#if 0
+SkPDFUnion SkPDFUnion::copy() const {
+ SkPDFUnion u(fType);
+ memcpy(&u, this, sizeof(u));
+ switch (fType) {
+ case Type::kNameSkS:
+ case Type::kStringSkS:
+ u.fSkString.init(fSkString.get());
+ return u;
+ case Type::kObject:
+ SkRef(u.fObject);
+ return u;
+ default:
+ return u;
+ }
+}
+SkPDFUnion& SkPDFUnion::operator=(const SkPDFUnion& other) {
+ return *this = other.copy();
+}
+SkPDFUnion::SkPDFUnion(const SkPDFUnion& other) {
+ *this = other.copy();
+}
+#endif
+
+bool SkPDFUnion::isName() const {
+ return Type::kName == fType || Type::kNameSkS == fType;
+}
+
+#ifdef SK_DEBUG
+// Most names need no escaping. Such names are handled as static
+// const strings.
+bool is_valid_name(const char* n) {
+ static const char kControlChars[] = "/%()<>[]{}";
+ while (*n) {
+ if (*n < '!' || *n > '~' || strchr(kControlChars, *n)) {
+ return false;
+ }
+ ++n;
+ }
+ return true;
+}
+#endif // SK_DEBUG
+
+// Given an arbitrary string, write it as a valid name (not including
+// leading slash).
+static void write_name_escaped(SkWStream* o, const char* name) {
+ static const char kToEscape[] = "#/%()<>[]{}";
+ for (const uint8_t* n = reinterpret_cast<const uint8_t*>(name); *n; ++n) {
+ uint8_t v = *n;
+ if (v < '!' || v > '~' || strchr(kToEscape, v)) {
+ char buffer[3] = {'#',
+ SkHexadecimalDigits::gUpper[v >> 4],
+ SkHexadecimalDigits::gUpper[v & 0xF]};
+ o->write(buffer, sizeof(buffer));
+ } else {
+ o->write(n, 1);
+ }
+ }
+}
+
+static void write_string(SkWStream* wStream, const char* cin, size_t len) {
+ SkDEBUGCODE(static const size_t kMaxLen = 65535;)
+ SkASSERT(len <= kMaxLen);
+
+ size_t extraCharacterCount = 0;
+ for (size_t i = 0; i < len; i++) {
+ if (cin[i] > '~' || cin[i] < ' ') {
+ extraCharacterCount += 3;
+ } else if (cin[i] == '\\' || cin[i] == '(' || cin[i] == ')') {
+ ++extraCharacterCount;
+ }
+ }
+ if (extraCharacterCount <= len) {
+ wStream->writeText("(");
+ for (size_t i = 0; i < len; i++) {
+ if (cin[i] > '~' || cin[i] < ' ') {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ uint8_t octal[4] = { '\\',
+ (uint8_t)('0' | ( c >> 6 )),
+ (uint8_t)('0' | ((c >> 3) & 0x07)),
+ (uint8_t)('0' | ( c & 0x07)) };
+ wStream->write(octal, 4);
+ } else {
+ if (cin[i] == '\\' || cin[i] == '(' || cin[i] == ')') {
+ wStream->writeText("\\");
+ }
+ wStream->write(&cin[i], 1);
+ }
+ }
+ wStream->writeText(")");
+ } else {
+ wStream->writeText("<");
+ for (size_t i = 0; i < len; i++) {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ char hexValue[2] = { SkHexadecimalDigits::gUpper[c >> 4],
+ SkHexadecimalDigits::gUpper[c & 0xF] };
+ wStream->write(hexValue, 2);
+ }
+ wStream->writeText(">");
+ }
+}
+
+void SkPDFWriteString(SkWStream* wStream, const char* cin, size_t len) {
+ write_string(wStream, cin, len);
+}
+
+void SkPDFUnion::emitObject(SkWStream* stream) const {
+ switch (fType) {
+ case Type::kInt:
+ stream->writeDecAsText(fIntValue);
+ return;
+ case Type::kColorComponent:
+ SkPDFUtils::AppendColorComponent(SkToU8(fIntValue), stream);
+ return;
+ case Type::kColorComponentF:
+ SkPDFUtils::AppendColorComponentF(fScalarValue, stream);
+ return;
+ case Type::kBool:
+ stream->writeText(fBoolValue ? "true" : "false");
+ return;
+ case Type::kScalar:
+ SkPDFUtils::AppendScalar(fScalarValue, stream);
+ return;
+ case Type::kName:
+ stream->writeText("/");
+ SkASSERT(is_valid_name(fStaticString));
+ stream->writeText(fStaticString);
+ return;
+ case Type::kString:
+ SkASSERT(fStaticString);
+ write_string(stream, fStaticString, strlen(fStaticString));
+ return;
+ case Type::kNameSkS:
+ stream->writeText("/");
+ write_name_escaped(stream, fSkString.get().c_str());
+ return;
+ case Type::kStringSkS:
+ write_string(stream, fSkString.get().c_str(), fSkString.get().size());
+ return;
+ case Type::kObject:
+ fObject->emitObject(stream);
+ return;
+ case Type::kRef:
+ SkASSERT(fIntValue >= 0);
+ stream->writeDecAsText(fIntValue);
+ stream->writeText(" 0 R"); // Generation number is always 0.
+ return;
+ default:
+ SkDEBUGFAIL("SkPDFUnion::emitObject with bad type");
+ }
+}
+
+SkPDFUnion SkPDFUnion::Int(int32_t value) { return SkPDFUnion(Type::kInt, value); }
+
+SkPDFUnion SkPDFUnion::ColorComponent(uint8_t value) {
+ return SkPDFUnion(Type::kColorComponent, (int32_t)value);
+}
+
+SkPDFUnion SkPDFUnion::ColorComponentF(float value) {
+ return SkPDFUnion(Type::kColorComponentF, (SkScalar)value);
+}
+
+SkPDFUnion SkPDFUnion::Bool(bool value) {
+ return SkPDFUnion(Type::kBool, value);
+}
+
+SkPDFUnion SkPDFUnion::Scalar(SkScalar value) {
+ return SkPDFUnion(Type::kScalar, value);
+}
+
+SkPDFUnion SkPDFUnion::Name(const char* value) {
+ SkPDFUnion u(Type::kName);
+ SkASSERT(value);
+ SkASSERT(is_valid_name(value));
+ u.fStaticString = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::String(const char* value) {
+ SkPDFUnion u(Type::kString);
+ SkASSERT(value);
+ u.fStaticString = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Name(SkString s) { return SkPDFUnion(Type::kNameSkS, std::move(s)); }
+
+SkPDFUnion SkPDFUnion::String(SkString s) { return SkPDFUnion(Type::kStringSkS, std::move(s)); }
+
+SkPDFUnion SkPDFUnion::Object(std::unique_ptr<SkPDFObject> objSp) {
+ SkPDFUnion u(Type::kObject);
+ SkASSERT(objSp.get());
+ u.fObject = objSp.release(); // take ownership into union{}
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Ref(SkPDFIndirectReference ref) {
+ return SkASSERT(ref.fValue > 0), SkPDFUnion(Type::kRef, (int32_t)ref.fValue);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // Enable if needed.
+void SkPDFAtom::emitObject(SkWStream* stream) const {
+ fValue.emitObject(stream);
+}
+#endif // 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFArray::SkPDFArray() {}
+
+SkPDFArray::~SkPDFArray() {}
+
+size_t SkPDFArray::size() const { return fValues.size(); }
+
+void SkPDFArray::reserve(int length) {
+ fValues.reserve(length);
+}
+
+void SkPDFArray::emitObject(SkWStream* stream) const {
+ stream->writeText("[");
+ for (size_t i = 0; i < fValues.size(); i++) {
+ fValues[i].emitObject(stream);
+ if (i + 1 < fValues.size()) {
+ stream->writeText(" ");
+ }
+ }
+ stream->writeText("]");
+}
+
+void SkPDFArray::append(SkPDFUnion&& value) {
+ fValues.emplace_back(std::move(value));
+}
+
+void SkPDFArray::appendInt(int32_t value) {
+ this->append(SkPDFUnion::Int(value));
+}
+
+void SkPDFArray::appendColorComponent(uint8_t value) {
+ this->append(SkPDFUnion::ColorComponent(value));
+}
+
+void SkPDFArray::appendBool(bool value) {
+ this->append(SkPDFUnion::Bool(value));
+}
+
+void SkPDFArray::appendScalar(SkScalar value) {
+ this->append(SkPDFUnion::Scalar(value));
+}
+
+void SkPDFArray::appendName(const char name[]) {
+ this->append(SkPDFUnion::Name(SkString(name)));
+}
+
+void SkPDFArray::appendName(SkString name) {
+ this->append(SkPDFUnion::Name(std::move(name)));
+}
+
+void SkPDFArray::appendString(SkString value) {
+ this->append(SkPDFUnion::String(std::move(value)));
+}
+
+void SkPDFArray::appendString(const char value[]) {
+ this->append(SkPDFUnion::String(value));
+}
+
+void SkPDFArray::appendObject(std::unique_ptr<SkPDFObject>&& objSp) {
+ this->append(SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFArray::appendRef(SkPDFIndirectReference ref) {
+ this->append(SkPDFUnion::Ref(ref));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPDFDict::~SkPDFDict() {}
+
+SkPDFDict::SkPDFDict(const char type[]) {
+ if (type) {
+ this->insertName("Type", type);
+ }
+}
+
+void SkPDFDict::emitObject(SkWStream* stream) const {
+ stream->writeText("<<");
+ for (size_t i = 0; i < fRecords.size(); ++i) {
+ const std::pair<SkPDFUnion, SkPDFUnion>& record = fRecords[i];
+ record.first.emitObject(stream);
+ stream->writeText(" ");
+ record.second.emitObject(stream);
+ if (i + 1 < fRecords.size()) {
+ stream->writeText("\n");
+ }
+ }
+ stream->writeText(">>");
+}
+
+size_t SkPDFDict::size() const { return fRecords.size(); }
+
+void SkPDFDict::reserve(int n) {
+ fRecords.reserve(n);
+}
+
+void SkPDFDict::insertRef(const char key[], SkPDFIndirectReference ref) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Ref(ref));
+}
+
+void SkPDFDict::insertRef(SkString key, SkPDFIndirectReference ref) {
+ fRecords.emplace_back(SkPDFUnion::Name(std::move(key)), SkPDFUnion::Ref(ref));
+}
+
+void SkPDFDict::insertObject(const char key[], std::unique_ptr<SkPDFObject>&& objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Object(std::move(objSp)));
+}
+void SkPDFDict::insertObject(SkString key, std::unique_ptr<SkPDFObject>&& objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(std::move(key)),
+ SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFDict::insertBool(const char key[], bool value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Bool(value));
+}
+
+void SkPDFDict::insertInt(const char key[], int32_t value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Int(value));
+}
+
+void SkPDFDict::insertInt(const char key[], size_t value) {
+ this->insertInt(key, SkToS32(value));
+}
+
+void SkPDFDict::insertColorComponentF(const char key[], SkScalar value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ColorComponentF(value));
+}
+
+void SkPDFDict::insertScalar(const char key[], SkScalar value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Scalar(value));
+}
+
+void SkPDFDict::insertName(const char key[], const char name[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(name));
+}
+
+void SkPDFDict::insertName(const char key[], SkString name) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(std::move(name)));
+}
+
+void SkPDFDict::insertString(const char key[], const char value[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::String(value));
+}
+
+void SkPDFDict::insertString(const char key[], SkString value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::String(std::move(value)));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+static void serialize_stream(SkPDFDict* origDict,
+ SkStreamAsset* stream,
+ bool deflate,
+ SkPDFDocument* doc,
+ SkPDFIndirectReference ref) {
+ // Code assumes that the stream starts at the beginning.
+ SkASSERT(stream && stream->hasLength());
+
+ std::unique_ptr<SkStreamAsset> tmp;
+ SkPDFDict tmpDict;
+ SkPDFDict& dict = origDict ? *origDict : tmpDict;
+ static const size_t kMinimumSavings = strlen("/Filter_/FlateDecode_");
+ if (deflate && stream->getLength() > kMinimumSavings) {
+ SkDynamicMemoryWStream compressedData;
+ SkDeflateWStream deflateWStream(&compressedData);
+ SkStreamCopy(&deflateWStream, stream);
+ deflateWStream.finalize();
+ #ifdef SK_PDF_BASE85_BINARY
+ {
+ SkPDFUtils::Base85Encode(compressedData.detachAsStream(), &compressedData);
+ tmp = compressedData.detachAsStream();
+ stream = tmp.get();
+ auto filters = SkPDFMakeArray();
+ filters->appendName("ASCII85Decode");
+ filters->appendName("FlateDecode");
+ dict.insertObject("Filter", std::move(filters));
+ }
+ #else
+ if (stream->getLength() > compressedData.bytesWritten() + kMinimumSavings) {
+ tmp = compressedData.detachAsStream();
+ stream = tmp.get();
+ dict.insertName("Filter", "FlateDecode");
+ } else {
+ SkAssertResult(stream->rewind());
+ }
+ #endif
+
+ }
+ dict.insertInt("Length", stream->getLength());
+ doc->emitStream(dict,
+ [stream](SkWStream* dst) { dst->writeStream(stream, stream->getLength()); },
+ ref);
+}
+
+SkPDFIndirectReference SkPDFStreamOut(std::unique_ptr<SkPDFDict> dict,
+ std::unique_ptr<SkStreamAsset> content,
+ SkPDFDocument* doc,
+ bool deflate) {
+ SkPDFIndirectReference ref = doc->reserveRef();
+ if (SkExecutor* executor = doc->executor()) {
+ SkPDFDict* dictPtr = dict.release();
+ SkStreamAsset* contentPtr = content.release();
+ // Pass ownership of both pointers into a std::function, which should
+ // only be executed once.
+ doc->incrementJobCount();
+ executor->add([dictPtr, contentPtr, deflate, doc, ref]() {
+ serialize_stream(dictPtr, contentPtr, deflate, doc, ref);
+ delete dictPtr;
+ delete contentPtr;
+ doc->signalJobComplete();
+ });
+ return ref;
+ }
+ serialize_stream(dict.get(), content.get(), deflate, doc, ref);
+ return ref;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.h b/gfx/skia/skia/src/pdf/SkPDFTypes.h
new file mode 100644
index 0000000000..06c90e2ca0
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFTypes_DEFINED
+#define SkPDFTypes_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTHash.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkMakeUnique.h"
+
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+#include <memory>
+
+class SkData;
+class SkPDFArray;
+
+class SkPDFDict;
+class SkPDFDocument;
+class SkPDFObject;
+class SkPDFUnion;
+class SkStreamAsset;
+class SkString;
+class SkWStream;
+struct SkPDFObjectSerializer;
+
+struct SkPDFIndirectReference {
+ int fValue = -1;
+ explicit operator bool() { return fValue != -1; }
+};
+
+inline static bool operator==(SkPDFIndirectReference u, SkPDFIndirectReference v) {
+ return u.fValue == v.fValue;
+}
+
+inline static bool operator!=(SkPDFIndirectReference u, SkPDFIndirectReference v) {
+ return u.fValue != v.fValue;
+}
+
+/** \class SkPDFObject
+
+ A PDF Object is the base class for primitive elements in a PDF file. A
+ common subtype is used to ease the use of indirect object references,
+ which are common in the PDF format.
+
+*/
+class SkPDFObject {
+public:
+ SkPDFObject() = default;
+
+ /** Subclasses must implement this method to print the object to the
+ * PDF file.
+ * @param catalog The object catalog to use.
+ * @param stream The writable output stream to send the output to.
+ */
+ virtual void emitObject(SkWStream* stream) const = 0;
+
+ virtual ~SkPDFObject() = default;
+
+private:
+ SkPDFObject(SkPDFObject&&) = delete;
+ SkPDFObject(const SkPDFObject&) = delete;
+ SkPDFObject& operator=(SkPDFObject&&) = delete;
+ SkPDFObject& operator=(const SkPDFObject&) = delete;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPDFArray
+
+ An array object in a PDF.
+*/
+class SkPDFArray final : public SkPDFObject {
+public:
+ /** Create a PDF array. Maximum length is 8191.
+ */
+ SkPDFArray();
+ ~SkPDFArray() override;
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream) const override;
+
+ /** The size of the array.
+ */
+ size_t size() const;
+
+ /** Preallocate space for the given number of entries.
+ * @param length The number of array slots to preallocate.
+ */
+ void reserve(int length);
+
+ /** Appends a value to the end of the array.
+ * @param value The value to add to the array.
+ */
+ void appendInt(int32_t);
+ void appendColorComponent(uint8_t);
+ void appendBool(bool);
+ void appendScalar(SkScalar);
+ void appendName(const char[]);
+ void appendName(SkString);
+ void appendString(const char[]);
+ void appendString(SkString);
+ void appendObject(std::unique_ptr<SkPDFObject>&&);
+ void appendRef(SkPDFIndirectReference);
+
+private:
+ std::vector<SkPDFUnion> fValues;
+ void append(SkPDFUnion&& value);
+};
+
+static inline void SkPDFArray_Append(SkPDFArray* a, int v) { a->appendInt(v); }
+
+static inline void SkPDFArray_Append(SkPDFArray* a, SkScalar v) { a->appendScalar(v); }
+
+template <typename T, typename... Args>
+static inline void SkPDFArray_Append(SkPDFArray* a, T v, Args... args) {
+ SkPDFArray_Append(a, v);
+ SkPDFArray_Append(a, args...);
+}
+
+static inline void SkPDFArray_Append(SkPDFArray* a) {}
+
+template <typename... Args>
+static inline std::unique_ptr<SkPDFArray> SkPDFMakeArray(Args... args) {
+ std::unique_ptr<SkPDFArray> ret(new SkPDFArray());
+ ret->reserve(sizeof...(Args));
+ SkPDFArray_Append(ret.get(), args...);
+ return ret;
+}
+
+/** \class SkPDFDict
+
+ A dictionary object in a PDF.
+*/
+class SkPDFDict final : public SkPDFObject {
+public:
+ /** Create a PDF dictionary.
+ * @param type The value of the Type entry, nullptr for no type.
+ */
+ explicit SkPDFDict(const char type[] = nullptr);
+
+ ~SkPDFDict() override;
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream) const override;
+
+ /** The size of the dictionary.
+ */
+ size_t size() const;
+
+ /** Preallocate space for n key-value pairs */
+ void reserve(int n);
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertObject(const char key[], std::unique_ptr<SkPDFObject>&&);
+ void insertObject(SkString, std::unique_ptr<SkPDFObject>&&);
+ void insertRef(const char key[], SkPDFIndirectReference);
+ void insertRef(SkString, SkPDFIndirectReference);
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertBool(const char key[], bool value);
+ void insertInt(const char key[], int32_t value);
+ void insertInt(const char key[], size_t value);
+ void insertScalar(const char key[], SkScalar value);
+ void insertColorComponentF(const char key[], SkScalar value);
+ void insertName(const char key[], const char nameValue[]);
+ void insertName(const char key[], SkString nameValue);
+ void insertString(const char key[], const char value[]);
+ void insertString(const char key[], SkString value);
+
+private:
+ std::vector<std::pair<SkPDFUnion, SkPDFUnion>> fRecords;
+};
+
+static inline std::unique_ptr<SkPDFDict> SkPDFMakeDict(const char* type = nullptr) {
+ return std::unique_ptr<SkPDFDict>(new SkPDFDict(type));
+}
+
+#ifdef SK_PDF_LESS_COMPRESSION
+ static constexpr bool kSkPDFDefaultDoDeflate = false;
+#else
+ static constexpr bool kSkPDFDefaultDoDeflate = true;
+#endif
+
+SkPDFIndirectReference SkPDFStreamOut(std::unique_ptr<SkPDFDict> dict,
+ std::unique_ptr<SkStreamAsset> stream,
+ SkPDFDocument* doc,
+ bool deflate = kSkPDFDefaultDoDeflate);
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFUnion.h b/gfx/skia/skia/src/pdf/SkPDFUnion.h
new file mode 100644
index 0000000000..9eeec20480
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUnion.h
@@ -0,0 +1,128 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFUnion_DEFINED
+#define SkPDFUnion_DEFINED
+
+#include "src/pdf/SkPDFTypes.h"
+
+template <class T>
+class SkStorageFor {
+public:
+ const T& get() const { return *reinterpret_cast<const T*>(&fStore); }
+ T& get() { return *reinterpret_cast<T*>(&fStore); }
+ // Up to caller to keep track of status.
+ template<class... Args> void init(Args&&... args) {
+ new (&this->get()) T(std::forward<Args>(args)...);
+ }
+ void destroy() { this->get().~T(); }
+private:
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type fStore;
+};
+
+// Exposed for unit testing.
+void SkPDFWriteString(SkWStream* wStream, const char* cin, size_t len);
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ A SkPDFUnion is a non-virtualized implementation of the
+ non-compound, non-specialized PDF Object types: Name, String,
+ Number, Boolean.
+ */
+class SkPDFUnion {
+public:
+ // Move contstructor and assignment operator destroy the argument
+ // and steal their references (if needed).
+ SkPDFUnion(SkPDFUnion&& other);
+ SkPDFUnion& operator=(SkPDFUnion&& other);
+
+ ~SkPDFUnion();
+
+ /** The following nine functions are the standard way of creating
+ SkPDFUnion objects. */
+
+ static SkPDFUnion Int(int32_t);
+
+ static SkPDFUnion Int(size_t v) { return SkPDFUnion::Int(SkToS32(v)); }
+
+ static SkPDFUnion Bool(bool);
+
+ static SkPDFUnion Scalar(SkScalar);
+
+ static SkPDFUnion ColorComponent(uint8_t);
+
+ static SkPDFUnion ColorComponentF(float);
+
+ /** These two functions do NOT take ownership of char*, and do NOT
+ copy the string. Suitable for passing in static const
+ strings. For example:
+ SkPDFUnion n = SkPDFUnion::Name("Length");
+ SkPDFUnion u = SkPDFUnion::String("Identity"); */
+
+ /** SkPDFUnion::Name(const char*) assumes that the passed string
+ is already a valid name (that is: it has no control or
+ whitespace characters). This will not copy the name. */
+ static SkPDFUnion Name(const char*);
+
+ /** SkPDFUnion::String will encode the passed string. This will
+ not copy the name. */
+ static SkPDFUnion String(const char*);
+
+ /** SkPDFUnion::Name(SkString) does not assume that the
+ passed string is already a valid name and it will escape the
+ string. */
+ static SkPDFUnion Name(SkString);
+
+ /** SkPDFUnion::String will encode the passed string. */
+ static SkPDFUnion String(SkString);
+
+ static SkPDFUnion Object(std::unique_ptr<SkPDFObject>);
+
+ static SkPDFUnion Ref(SkPDFIndirectReference);
+
+ /** These two non-virtual methods mirror SkPDFObject's
+ corresponding virtuals. */
+ void emitObject(SkWStream*) const;
+
+ bool isName() const;
+
+private:
+ union {
+ int32_t fIntValue;
+ bool fBoolValue;
+ SkScalar fScalarValue;
+ const char* fStaticString;
+ SkStorageFor<SkString> fSkString;
+ SkPDFObject* fObject;
+ };
+ enum class Type : char {
+ /** It is an error to call emitObject() or addResources() on an
+ kDestroyed object. */
+ kDestroyed = 0,
+ kInt,
+ kColorComponent,
+ kColorComponentF,
+ kBool,
+ kScalar,
+ kName,
+ kString,
+ kNameSkS,
+ kStringSkS,
+ kObject,
+ kRef,
+ };
+ Type fType;
+
+ SkPDFUnion(Type);
+ SkPDFUnion(Type, int32_t);
+ SkPDFUnion(Type, bool);
+ SkPDFUnion(Type, SkScalar);
+ SkPDFUnion(Type, SkString);
+ // We do not now need copy constructor and copy assignment, so we
+ // will disable this functionality.
+ SkPDFUnion& operator=(const SkPDFUnion&) = delete;
+ SkPDFUnion(const SkPDFUnion&) = delete;
+};
+static_assert(sizeof(SkString) == sizeof(void*), "SkString_size");
+
+#endif // SkPDFUnion_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.cpp b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
new file mode 100644
index 0000000000..9beedfb5ab
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFUtils.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkFixed.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/image/SkImage_Base.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <cmath>
+
+const char* SkPDFUtils::BlendModeName(SkBlendMode mode) {
+ // PDF32000.book section 11.3.5 "Blend Mode"
+ switch (mode) {
+ case SkBlendMode::kSrcOver: return "Normal";
+ case SkBlendMode::kXor: return "Normal"; // (unsupported mode)
+ case SkBlendMode::kPlus: return "Normal"; // (unsupported mode)
+ case SkBlendMode::kScreen: return "Screen";
+ case SkBlendMode::kOverlay: return "Overlay";
+ case SkBlendMode::kDarken: return "Darken";
+ case SkBlendMode::kLighten: return "Lighten";
+ case SkBlendMode::kColorDodge: return "ColorDodge";
+ case SkBlendMode::kColorBurn: return "ColorBurn";
+ case SkBlendMode::kHardLight: return "HardLight";
+ case SkBlendMode::kSoftLight: return "SoftLight";
+ case SkBlendMode::kDifference: return "Difference";
+ case SkBlendMode::kExclusion: return "Exclusion";
+ case SkBlendMode::kMultiply: return "Multiply";
+ case SkBlendMode::kHue: return "Hue";
+ case SkBlendMode::kSaturation: return "Saturation";
+ case SkBlendMode::kColor: return "Color";
+ case SkBlendMode::kLuminosity: return "Luminosity";
+ // Other blendmodes are handled in SkPDFDevice::setUpContentEntry.
+ default: return nullptr;
+ }
+}
+
+std::unique_ptr<SkPDFArray> SkPDFUtils::RectToArray(const SkRect& r) {
+ return SkPDFMakeArray(r.left(), r.top(), r.right(), r.bottom());
+}
+
+std::unique_ptr<SkPDFArray> SkPDFUtils::MatrixToArray(const SkMatrix& matrix) {
+ SkScalar a[6];
+ if (!matrix.asAffine(a)) {
+ SkMatrix::SetAffineIdentity(a);
+ }
+ return SkPDFMakeArray(a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+void SkPDFUtils::MoveTo(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" m\n");
+}
+
+void SkPDFUtils::AppendLine(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" l\n");
+}
+
+static void append_cubic(SkScalar ctl1X, SkScalar ctl1Y,
+ SkScalar ctl2X, SkScalar ctl2Y,
+ SkScalar dstX, SkScalar dstY, SkWStream* content) {
+ SkString cmd("y\n");
+ SkPDFUtils::AppendScalar(ctl1X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl1Y, content);
+ content->writeText(" ");
+ if (ctl2X != dstX || ctl2Y != dstY) {
+ cmd.set("c\n");
+ SkPDFUtils::AppendScalar(ctl2X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl2Y, content);
+ content->writeText(" ");
+ }
+ SkPDFUtils::AppendScalar(dstX, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(dstY, content);
+ content->writeText(" ");
+ content->writeText(cmd.c_str());
+}
+
+static void append_quad(const SkPoint quad[], SkWStream* content) {
+ SkPoint cubic[4];
+ SkConvertQuadToCubic(quad, cubic);
+ append_cubic(cubic[1].fX, cubic[1].fY, cubic[2].fX, cubic[2].fY,
+ cubic[3].fX, cubic[3].fY, content);
+}
+
+void SkPDFUtils::AppendRectangle(const SkRect& rect, SkWStream* content) {
+ // Skia has 0,0 at top left, pdf at bottom left. Do the right thing.
+ SkScalar bottom = SkMinScalar(rect.fBottom, rect.fTop);
+
+ SkPDFUtils::AppendScalar(rect.fLeft, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(bottom, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.width(), content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.height(), content);
+ content->writeText(" re\n");
+}
+
+void SkPDFUtils::EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content,
+ SkScalar tolerance) {
+ if (path.isEmpty() && SkPaint::kFill_Style == paintStyle) {
+ SkPDFUtils::AppendRectangle({0, 0, 0, 0}, content);
+ return;
+ }
+ // Filling a path with no area results in a drawing in PDF renderers but
+ // Chrome expects to be able to draw some such entities with no visible
+ // result, so we detect those cases and discard the drawing for them.
+ // Specifically: moveTo(X), lineTo(Y) and moveTo(X), lineTo(X), lineTo(Y).
+
+ SkRect rect;
+ bool isClosed; // Both closure and direction need to be checked.
+ SkPath::Direction direction;
+ if (path.isRect(&rect, &isClosed, &direction) &&
+ isClosed &&
+ (SkPath::kCW_Direction == direction ||
+ SkPath::kEvenOdd_FillType == path.getFillType()))
+ {
+ SkPDFUtils::AppendRectangle(rect, content);
+ return;
+ }
+
+ enum SkipFillState {
+ kEmpty_SkipFillState,
+ kSingleLine_SkipFillState,
+ kNonSingleLine_SkipFillState,
+ };
+ SkipFillState fillState = kEmpty_SkipFillState;
+ //if (paintStyle != SkPaint::kFill_Style) {
+ // fillState = kNonSingleLine_SkipFillState;
+ //}
+ SkPoint lastMovePt = SkPoint::Make(0,0);
+ SkDynamicMemoryWStream currentSegment;
+ SkPoint args[4];
+ SkPath::Iter iter(path, false);
+ for (SkPath::Verb verb = iter.next(args);
+ verb != SkPath::kDone_Verb;
+ verb = iter.next(args)) {
+ // args gets all the points, even the implicit first point.
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ MoveTo(args[0].fX, args[0].fY, &currentSegment);
+ lastMovePt = args[0];
+ fillState = kEmpty_SkipFillState;
+ break;
+ case SkPath::kLine_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 2)) {
+ AppendLine(args[1].fX, args[1].fY, &currentSegment);
+ if ((fillState == kEmpty_SkipFillState) && (args[0] != lastMovePt)) {
+ fillState = kSingleLine_SkipFillState;
+ break;
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kQuad_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 3)) {
+ append_quad(args, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kConic_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 3)) {
+ SkAutoConicToQuads converter;
+ const SkPoint* quads = converter.computeQuads(args, iter.conicWeight(), tolerance);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ append_quad(&quads[i * 2], &currentSegment);
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 4)) {
+ append_cubic(args[1].fX, args[1].fY, args[2].fX, args[2].fY,
+ args[3].fX, args[3].fY, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ ClosePath(&currentSegment);
+ currentSegment.writeToStream(content);
+ currentSegment.reset();
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ }
+ if (currentSegment.bytesWritten() > 0) {
+ currentSegment.writeToStream(content);
+ }
+}
+
+void SkPDFUtils::ClosePath(SkWStream* content) {
+ content->writeText("h\n");
+}
+
+void SkPDFUtils::PaintPath(SkPaint::Style style, SkPath::FillType fill,
+ SkWStream* content) {
+ if (style == SkPaint::kFill_Style) {
+ content->writeText("f");
+ } else if (style == SkPaint::kStrokeAndFill_Style) {
+ content->writeText("B");
+ } else if (style == SkPaint::kStroke_Style) {
+ content->writeText("S");
+ }
+
+ if (style != SkPaint::kStroke_Style) {
+ NOT_IMPLEMENTED(fill == SkPath::kInverseEvenOdd_FillType, false);
+ NOT_IMPLEMENTED(fill == SkPath::kInverseWinding_FillType, false);
+ if (fill == SkPath::kEvenOdd_FillType) {
+ content->writeText("*");
+ }
+ }
+ content->writeText("\n");
+}
+
+void SkPDFUtils::StrokePath(SkWStream* content) {
+ SkPDFUtils::PaintPath(
+ SkPaint::kStroke_Style, SkPath::kWinding_FillType, content);
+}
+
+void SkPDFUtils::ApplyGraphicState(int objectIndex, SkWStream* content) {
+ SkPDFWriteResourceName(content, SkPDFResourceType::kExtGState, objectIndex);
+ content->writeText(" gs\n");
+}
+
+void SkPDFUtils::ApplyPattern(int objectIndex, SkWStream* content) {
+ // Select Pattern color space (CS, cs) and set pattern object as current
+ // color (SCN, scn)
+ content->writeText("/Pattern CS/Pattern cs");
+ SkPDFWriteResourceName(content, SkPDFResourceType::kPattern, objectIndex);
+ content->writeText(" SCN");
+ SkPDFWriteResourceName(content, SkPDFResourceType::kPattern, objectIndex);
+ content->writeText(" scn\n");
+}
+
+// return "x/pow(10, places)", given 0<x<pow(10, places)
+// result points to places+2 chars.
+static size_t print_permil_as_decimal(int x, char* result, unsigned places) {
+ result[0] = '.';
+ for (int i = places; i > 0; --i) {
+ result[i] = '0' + x % 10;
+ x /= 10;
+ }
+ int j;
+ for (j = places; j > 1; --j) {
+ if (result[j] != '0') {
+ break;
+ }
+ }
+ result[j + 1] = '\0';
+ return j + 1;
+}
+
+
+static constexpr int int_pow(int base, unsigned exp, int acc = 1) {
+ return exp < 1 ? acc
+ : int_pow(base * base,
+ exp / 2,
+ (exp % 2) ? acc * base : acc);
+}
+
+
+size_t SkPDFUtils::ColorToDecimalF(float value, char result[kFloatColorDecimalCount + 2]) {
+ static constexpr int kFactor = int_pow(10, kFloatColorDecimalCount);
+ int x = sk_float_round2int(value * kFactor);
+ if (x >= kFactor || x <= 0) { // clamp to 0-1
+ result[0] = x > 0 ? '1' : '0';
+ result[1] = '\0';
+ return 1;
+ }
+ return print_permil_as_decimal(x, result, kFloatColorDecimalCount);
+}
+
+size_t SkPDFUtils::ColorToDecimal(uint8_t value, char result[5]) {
+ if (value == 255 || value == 0) {
+ result[0] = value ? '1' : '0';
+ result[1] = '\0';
+ return 1;
+ }
+ // int x = 0.5 + (1000.0 / 255.0) * value;
+ int x = SkFixedRoundToInt((SK_Fixed1 * 1000 / 255) * value);
+ return print_permil_as_decimal(x, result, 3);
+}
+
+bool SkPDFUtils::InverseTransformBBox(const SkMatrix& matrix, SkRect* bbox) {
+ SkMatrix inverse;
+ if (!matrix.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(bbox);
+ return true;
+}
+
+void SkPDFUtils::PopulateTilingPatternDict(SkPDFDict* pattern,
+ SkRect& bbox,
+ std::unique_ptr<SkPDFDict> resources,
+ const SkMatrix& matrix) {
+ const int kTiling_PatternType = 1;
+ const int kColoredTilingPattern_PaintType = 1;
+ const int kConstantSpacing_TilingType = 1;
+
+ pattern->insertName("Type", "Pattern");
+ pattern->insertInt("PatternType", kTiling_PatternType);
+ pattern->insertInt("PaintType", kColoredTilingPattern_PaintType);
+ pattern->insertInt("TilingType", kConstantSpacing_TilingType);
+ pattern->insertObject("BBox", SkPDFUtils::RectToArray(bbox));
+ pattern->insertScalar("XStep", bbox.width());
+ pattern->insertScalar("YStep", bbox.height());
+ pattern->insertObject("Resources", std::move(resources));
+ if (!matrix.isIdentity()) {
+ pattern->insertObject("Matrix", SkPDFUtils::MatrixToArray(matrix));
+ }
+}
+
+bool SkPDFUtils::ToBitmap(const SkImage* img, SkBitmap* dst) {
+ SkASSERT(img);
+ SkASSERT(dst);
+ SkBitmap bitmap;
+ if(as_IB(img)->getROPixels(&bitmap)) {
+ SkASSERT(bitmap.dimensions() == img->dimensions());
+ SkASSERT(!bitmap.drawsNothing());
+ *dst = std::move(bitmap);
+ return true;
+ }
+ return false;
+}
+
+#ifdef SK_PDF_BASE85_BINARY
+void SkPDFUtils::Base85Encode(std::unique_ptr<SkStreamAsset> stream, SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(stream);
+ dst->writeText("\n");
+ int column = 0;
+ while (true) {
+ uint8_t src[4] = {0, 0, 0, 0};
+ size_t count = stream->read(src, 4);
+ SkASSERT(count < 5);
+ if (0 == count) {
+ dst->writeText("~>\n");
+ return;
+ }
+ uint32_t v = ((uint32_t)src[0] << 24) | ((uint32_t)src[1] << 16) |
+ ((uint32_t)src[2] << 8) | src[3];
+ if (v == 0 && count == 4) {
+ dst->writeText("z");
+ column += 1;
+ } else {
+ char buffer[5];
+ for (int n = 4; n > 0; --n) {
+ buffer[n] = (v % 85) + '!';
+ v /= 85;
+ }
+ buffer[0] = v + '!';
+ dst->write(buffer, count + 1);
+ column += count + 1;
+ }
+ if (column > 74) {
+ dst->writeText("\n");
+ column = 0;
+ }
+ }
+}
+#endif // SK_PDF_BASE85_BINARY
+
+void SkPDFUtils::AppendTransform(const SkMatrix& matrix, SkWStream* content) {
+ SkScalar values[6];
+ if (!matrix.asAffine(values)) {
+ SkMatrix::SetAffineIdentity(values);
+ }
+ for (SkScalar v : values) {
+ SkPDFUtils::AppendScalar(v, content);
+ content->writeText(" ");
+ }
+ content->writeText("cm\n");
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.h b/gfx/skia/skia/src/pdf/SkPDFUtils.h
new file mode 100644
index 0000000000..90b1c572eb
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFUtils_DEFINED
+#define SkPDFUtils_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStream.h"
+#include "src/core/SkUtils.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/utils/SkFloatToDecimal.h"
+
+class SkMatrix;
+class SkPDFArray;
+struct SkRect;
+
+template <typename T>
+bool SkPackedArrayEqual(T* u, T* v, size_t n) {
+ SkASSERT(u);
+ SkASSERT(v);
+ return 0 == memcmp(u, v, n * sizeof(T));
+}
+
+#if 0
+#define PRINT_NOT_IMPL(str) fprintf(stderr, str)
+#else
+#define PRINT_NOT_IMPL(str)
+#endif
+
+#define NOT_IMPLEMENTED(condition, assert) \
+ do { \
+ if ((bool)(condition)) { \
+ PRINT_NOT_IMPL("NOT_IMPLEMENTED: " #condition "\n"); \
+ SkDEBUGCODE(SkASSERT(!assert);) \
+ } \
+ } while (0)
+
+namespace SkPDFUtils {
+
+const char* BlendModeName(SkBlendMode);
+
+std::unique_ptr<SkPDFArray> RectToArray(const SkRect& rect);
+std::unique_ptr<SkPDFArray> MatrixToArray(const SkMatrix& matrix);
+
+void MoveTo(SkScalar x, SkScalar y, SkWStream* content);
+void AppendLine(SkScalar x, SkScalar y, SkWStream* content);
+void AppendRectangle(const SkRect& rect, SkWStream* content);
+void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content, SkScalar tolerance = 0.25f);
+inline void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ SkWStream* content, SkScalar tolerance = 0.25f) {
+ SkPDFUtils::EmitPath(path, paintStyle, true, content, tolerance);
+}
+void ClosePath(SkWStream* content);
+void PaintPath(SkPaint::Style style, SkPath::FillType fill,
+ SkWStream* content);
+void StrokePath(SkWStream* content);
+void ApplyGraphicState(int objectIndex, SkWStream* content);
+void ApplyPattern(int objectIndex, SkWStream* content);
+
+// Converts (value / 255.0) with three significant digits of accuracy.
+// Writes value as string into result. Returns strlen() of result.
+size_t ColorToDecimal(uint8_t value, char result[5]);
+
+static constexpr unsigned kFloatColorDecimalCount = 4;
+size_t ColorToDecimalF(float value, char result[kFloatColorDecimalCount + 2]);
+inline void AppendColorComponent(uint8_t value, SkWStream* wStream) {
+ char buffer[5];
+ size_t len = SkPDFUtils::ColorToDecimal(value, buffer);
+ wStream->write(buffer, len);
+}
+inline void AppendColorComponentF(float value, SkWStream* wStream) {
+ char buffer[kFloatColorDecimalCount + 2];
+ size_t len = SkPDFUtils::ColorToDecimalF(value, buffer);
+ wStream->write(buffer, len);
+}
+
+inline void AppendScalar(SkScalar value, SkWStream* stream) {
+ char result[kMaximumSkFloatToDecimalLength];
+ size_t len = SkFloatToDecimal(SkScalarToFloat(value), result);
+ SkASSERT(len < kMaximumSkFloatToDecimalLength);
+ stream->write(result, len);
+}
+
+inline void WriteUInt16BE(SkDynamicMemoryWStream* wStream, uint16_t value) {
+ char result[4] = { SkHexadecimalDigits::gUpper[ value >> 12 ],
+ SkHexadecimalDigits::gUpper[0xF & (value >> 8 )],
+ SkHexadecimalDigits::gUpper[0xF & (value >> 4 )],
+ SkHexadecimalDigits::gUpper[0xF & (value )] };
+ wStream->write(result, 4);
+}
+
+inline void WriteUInt8(SkDynamicMemoryWStream* wStream, uint8_t value) {
+ char result[2] = { SkHexadecimalDigits::gUpper[value >> 4],
+ SkHexadecimalDigits::gUpper[value & 0xF] };
+ wStream->write(result, 2);
+}
+
+inline void WriteUTF16beHex(SkDynamicMemoryWStream* wStream, SkUnichar utf32) {
+ uint16_t utf16[2] = {0, 0};
+ size_t len = SkUTF::ToUTF16(utf32, utf16);
+ SkASSERT(len == 1 || len == 2);
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[0]);
+ if (len == 2) {
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[1]);
+ }
+}
+
+inline SkMatrix GetShaderLocalMatrix(const SkShader* shader) {
+ SkMatrix localMatrix;
+ if (sk_sp<SkShader> s = as_SB(shader)->makeAsALocalMatrixShader(&localMatrix)) {
+ return SkMatrix::Concat(as_SB(s)->getLocalMatrix(), localMatrix);
+ }
+ return as_SB(shader)->getLocalMatrix();
+}
+bool InverseTransformBBox(const SkMatrix& matrix, SkRect* bbox);
+void PopulateTilingPatternDict(SkPDFDict* pattern,
+ SkRect& bbox,
+ std::unique_ptr<SkPDFDict> resources,
+ const SkMatrix& matrix);
+
+bool ToBitmap(const SkImage* img, SkBitmap* dst);
+
+#ifdef SK_PDF_BASE85_BINARY
+void Base85Encode(std::unique_ptr<SkStreamAsset> src, SkDynamicMemoryWStream* dst);
+#endif // SK_PDF_BASE85_BINARY
+
+void AppendTransform(const SkMatrix&, SkWStream*);
+} // namespace SkPDFUtils
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkUUID.h b/gfx/skia/skia/src/pdf/SkUUID.h
new file mode 100644
index 0000000000..3d81865dc0
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkUUID.h
@@ -0,0 +1,18 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkUUID_DEFINED
+#define SkUUID_DEFINED
+
+#include <cstdint>
+#include <cstring>
+
+struct SkUUID {
+ uint8_t fData[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+};
+
+static inline bool operator==(const SkUUID& u, const SkUUID& v) {
+ return 0 == memcmp(u.fData, v.fData, sizeof(u.fData));
+}
+static inline bool operator!=(const SkUUID& u, const SkUUID& v) { return !(u == v); }
+
+#endif // SkUUID_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkDebug_android.cpp b/gfx/skia/skia/src/ports/SkDebug_android.cpp
new file mode 100644
index 0000000000..5e32174797
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_android.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdio.h>
+
+#define LOG_TAG "skia"
+#include <android/log.h>
+
+// Print debug output to stdout as well. This is useful for command line
+// applications (e.g. skia_launcher).
+bool gSkDebugToStdOut = false;
+
+void SkDebugf(const char format[], ...) {
+ va_list args1, args2;
+ va_start(args1, format);
+
+ if (gSkDebugToStdOut) {
+ va_copy(args2, args1);
+ vprintf(format, args2);
+ va_end(args2);
+ }
+
+ __android_log_vprint(ANDROID_LOG_DEBUG, LOG_TAG, format, args1);
+
+ va_end(args1);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_stdio.cpp b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
new file mode 100644
index 0000000000..1bba63cc0a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN) && !defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdarg.h>
+#include <stdio.h>
+
+void SkDebugf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+}
+#endif//!defined(SK_BUILD_FOR_WIN) && !defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_win.cpp b/gfx/skia/skia/src/ports/SkDebug_win.cpp
new file mode 100644
index 0000000000..ce2c980555
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_win.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/core/SkLeanWindows.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+static const size_t kBufferSize = 2048;
+
+void SkDebugf(const char format[], ...) {
+ char buffer[kBufferSize + 1];
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ fflush(stderr); // stderr seems to be buffered on Windows.
+
+ va_start(args, format);
+ vsnprintf(buffer, kBufferSize, format, args);
+ va_end(args);
+
+ OutputDebugStringA(buffer);
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
new file mode 100644
index 0000000000..794775a02a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkDiscardableMemory.h"
+#include "src/lazy/SkDiscardableMemoryPool.h"
+
+SkDiscardableMemory* SkDiscardableMemory::Create(size_t bytes) {
+ return SkGetGlobalDiscardableMemoryPool()->create(bytes);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
new file mode 100644
index 0000000000..0ec9c9a547
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkRefCnt.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/private/SkMutex.h"
+
+static SkMutex& font_config_interface_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+static SkFontConfigInterface* gFontConfigInterface;
+
+sk_sp<SkFontConfigInterface> SkFontConfigInterface::RefGlobal() {
+ SkAutoMutexExclusive ac(font_config_interface_mutex());
+
+ if (gFontConfigInterface) {
+ return sk_ref_sp(gFontConfigInterface);
+ }
+ return sk_ref_sp(SkFontConfigInterface::GetSingletonDirectInterface());
+}
+
+void SkFontConfigInterface::SetGlobal(sk_sp<SkFontConfigInterface> fc) {
+ SkAutoMutexExclusive ac(font_config_interface_mutex());
+
+ SkSafeUnref(gFontConfigInterface);
+ gFontConfigInterface = fc.release();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
new file mode 100644
index 0000000000..84fd11b1f3
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
@@ -0,0 +1,710 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkBuffer.h"
+#include "src/ports/SkFontConfigInterface_direct.h"
+
+#include <fontconfig/fontconfig.h>
+#include <unistd.h>
+
+#ifdef SK_DEBUG
+# include "src/core/SkTLS.h"
+#endif
+
+namespace {
+
+// Fontconfig is not threadsafe before 2.10.91. Before that, we lock with a global mutex.
+// See https://bug.skia.org/1497 for background.
+static SkMutex& f_c_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+#ifdef SK_DEBUG
+void* CreateThreadFcLocked() { return new bool(false); }
+void DeleteThreadFcLocked(void* v) { delete static_cast<bool*>(v); }
+# define THREAD_FC_LOCKED \
+ static_cast<bool*>(SkTLS::Get(CreateThreadFcLocked, DeleteThreadFcLocked))
+#endif
+
+struct FCLocker {
+ // Assume FcGetVersion() has always been thread safe.
+
+ FCLocker() {
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().acquire();
+ } else {
+ SkDEBUGCODE(bool* threadLocked = THREAD_FC_LOCKED);
+ SkASSERT(false == *threadLocked);
+ SkDEBUGCODE(*threadLocked = true);
+ }
+ }
+
+ ~FCLocker() {
+ AssertHeld();
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().release();
+ } else {
+ SkDEBUGCODE(*THREAD_FC_LOCKED = false);
+ }
+ }
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().assertHeld();
+ } else {
+ SkASSERT(true == *THREAD_FC_LOCKED);
+ }
+ ) }
+};
+
+} // namespace
+
+size_t SkFontConfigInterface::FontIdentity::writeToMemory(void* addr) const {
+ size_t size = sizeof(fID) + sizeof(fTTCIndex);
+ size += sizeof(int32_t) + sizeof(int32_t) + sizeof(uint8_t); // weight, width, italic
+ size += sizeof(int32_t) + fString.size(); // store length+data
+ if (addr) {
+ SkWBuffer buffer(addr, size);
+
+ buffer.write32(fID);
+ buffer.write32(fTTCIndex);
+ buffer.write32(fString.size());
+ buffer.write32(fStyle.weight());
+ buffer.write32(fStyle.width());
+ buffer.write8(fStyle.slant());
+ buffer.write(fString.c_str(), fString.size());
+ buffer.padToAlign4();
+
+ SkASSERT(buffer.pos() == size);
+ }
+ return size;
+}
+
+size_t SkFontConfigInterface::FontIdentity::readFromMemory(const void* addr,
+ size_t size) {
+ SkRBuffer buffer(addr, size);
+
+ (void)buffer.readU32(&fID);
+ (void)buffer.readS32(&fTTCIndex);
+ uint32_t strLen, weight, width;
+ (void)buffer.readU32(&strLen);
+ (void)buffer.readU32(&weight);
+ (void)buffer.readU32(&width);
+ uint8_t u8;
+ (void)buffer.readU8(&u8);
+ SkFontStyle::Slant slant = (SkFontStyle::Slant)u8;
+ fStyle = SkFontStyle(weight, width, slant);
+ fString.resize(strLen);
+ (void)buffer.read(fString.writable_str(), strLen);
+ buffer.skipToAlign4();
+
+ return buffer.pos(); // the actual number of bytes read
+}
+
+#ifdef SK_DEBUG
+static void make_iden(SkFontConfigInterface::FontIdentity* iden) {
+ iden->fID = 10;
+ iden->fTTCIndex = 2;
+ iden->fString.set("Hello world");
+ iden->fStyle = SkFontStyle(300, 6, SkFontStyle::kItalic_Slant);
+}
+
+static void test_writeToMemory(const SkFontConfigInterface::FontIdentity& iden0,
+ int initValue) {
+ SkFontConfigInterface::FontIdentity iden1;
+
+ size_t size0 = iden0.writeToMemory(nullptr);
+
+ SkAutoMalloc storage(size0);
+ memset(storage.get(), initValue, size0);
+
+ size_t size1 = iden0.writeToMemory(storage.get());
+ SkASSERT(size0 == size1);
+
+ SkASSERT(iden0 != iden1);
+ size_t size2 = iden1.readFromMemory(storage.get(), size1);
+ SkASSERT(size2 == size1);
+ SkASSERT(iden0 == iden1);
+}
+
+static void fontconfiginterface_unittest() {
+ SkFontConfigInterface::FontIdentity iden0, iden1;
+
+ SkASSERT(iden0 == iden1);
+
+ make_iden(&iden0);
+ SkASSERT(iden0 != iden1);
+
+ make_iden(&iden1);
+ SkASSERT(iden0 == iden1);
+
+ test_writeToMemory(iden0, 0);
+ test_writeToMemory(iden0, 0);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns the string from the pattern, or nullptr
+static const char* get_string(FcPattern* pattern, const char field[], int index = 0) {
+ const char* name;
+ if (FcPatternGetString(pattern, field, index, (FcChar8**)&name) != FcResultMatch) {
+ name = nullptr;
+ }
+ return name;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// Equivalence classes, used to match the Liberation and other fonts
+// with their metric-compatible replacements. See the discussion in
+// GetFontEquivClass().
+enum FontEquivClass
+{
+ OTHER,
+ SANS,
+ SERIF,
+ MONO,
+ SYMBOL,
+ PGOTHIC,
+ GOTHIC,
+ PMINCHO,
+ MINCHO,
+ SIMSUN,
+ NSIMSUN,
+ SIMHEI,
+ PMINGLIU,
+ MINGLIU,
+ PMINGLIUHK,
+ MINGLIUHK,
+ CAMBRIA,
+ CALIBRI,
+};
+
+// Match the font name against a whilelist of fonts, returning the equivalence
+// class.
+FontEquivClass GetFontEquivClass(const char* fontname)
+{
+ // It would be nice for fontconfig to tell us whether a given suggested
+ // replacement is a "strong" match (that is, an equivalent font) or
+ // a "weak" match (that is, fontconfig's next-best attempt at finding a
+ // substitute). However, I played around with the fontconfig API for
+ // a good few hours and could not make it reveal this information.
+ //
+ // So instead, we hardcode. Initially this function emulated
+ // /etc/fonts/conf.d/30-metric-aliases.conf
+ // from my Ubuntu system, but we're better off being very conservative.
+
+ // Arimo, Tinos and Cousine are a set of fonts metric-compatible with
+ // Arial, Times New Roman and Courier New with a character repertoire
+ // much larger than Liberation. Note that Cousine is metrically
+ // compatible with Courier New, but the former is sans-serif while
+ // the latter is serif.
+
+
+ struct FontEquivMap {
+ FontEquivClass clazz;
+ const char name[40];
+ };
+
+ static const FontEquivMap kFontEquivMap[] = {
+ { SANS, "Arial" },
+ { SANS, "Arimo" },
+ { SANS, "Liberation Sans" },
+
+ { SERIF, "Times New Roman" },
+ { SERIF, "Tinos" },
+ { SERIF, "Liberation Serif" },
+
+ { MONO, "Courier New" },
+ { MONO, "Cousine" },
+ { MONO, "Liberation Mono" },
+
+ { SYMBOL, "Symbol" },
+ { SYMBOL, "Symbol Neu" },
+
+ // MS Pゴシック
+ { PGOTHIC, "MS PGothic" },
+ { PGOTHIC, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { PGOTHIC, "Noto Sans CJK JP" },
+ { PGOTHIC, "IPAPGothic" },
+ { PGOTHIC, "MotoyaG04Gothic" },
+
+ // MS ゴシック
+ { GOTHIC, "MS Gothic" },
+ { GOTHIC, "\xef\xbc\xad\xef\xbc\xb3 "
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { GOTHIC, "Noto Sans Mono CJK JP" },
+ { GOTHIC, "IPAGothic" },
+ { GOTHIC, "MotoyaG04GothicMono" },
+
+ // MS P明朝
+ { PMINCHO, "MS PMincho" },
+ { PMINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe6\x98\x8e\xe6\x9c\x9d"},
+ { PMINCHO, "Noto Serif CJK JP" },
+ { PMINCHO, "IPAPMincho" },
+ { PMINCHO, "MotoyaG04Mincho" },
+
+ // MS 明朝
+ { MINCHO, "MS Mincho" },
+ { MINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xe6\x98\x8e\xe6\x9c\x9d" },
+ { MINCHO, "Noto Serif CJK JP" },
+ { MINCHO, "IPAMincho" },
+ { MINCHO, "MotoyaG04MinchoMono" },
+
+ // 宋体
+ { SIMSUN, "Simsun" },
+ { SIMSUN, "\xe5\xae\x8b\xe4\xbd\x93" },
+ { SIMSUN, "Noto Serif CJK SC" },
+ { SIMSUN, "MSung GB18030" },
+ { SIMSUN, "Song ASC" },
+
+ // 新宋体
+ { NSIMSUN, "NSimsun" },
+ { NSIMSUN, "\xe6\x96\xb0\xe5\xae\x8b\xe4\xbd\x93" },
+ { NSIMSUN, "Noto Serif CJK SC" },
+ { NSIMSUN, "MSung GB18030" },
+ { NSIMSUN, "N Song ASC" },
+
+ // 黑体
+ { SIMHEI, "Simhei" },
+ { SIMHEI, "\xe9\xbb\x91\xe4\xbd\x93" },
+ { SIMHEI, "Noto Sans CJK SC" },
+ { SIMHEI, "MYingHeiGB18030" },
+ { SIMHEI, "MYingHeiB5HK" },
+
+ // 新細明體
+ { PMINGLIU, "PMingLiU"},
+ { PMINGLIU, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { PMINGLIU, "Noto Serif CJK TC"},
+ { PMINGLIU, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIU, "MingLiU"},
+ { MINGLIU, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { MINGLIU, "Noto Serif CJK TC"},
+ { MINGLIU, "MSung B5HK"},
+
+ // 新細明體
+ { PMINGLIUHK, "PMingLiU_HKSCS"},
+ { PMINGLIUHK, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { PMINGLIUHK, "Noto Serif CJK TC"},
+ { PMINGLIUHK, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIUHK, "MingLiU_HKSCS"},
+ { MINGLIUHK, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { MINGLIUHK, "Noto Serif CJK TC"},
+ { MINGLIUHK, "MSung B5HK"},
+
+ // Cambria
+ { CAMBRIA, "Cambria" },
+ { CAMBRIA, "Caladea" },
+
+ // Calibri
+ { CALIBRI, "Calibri" },
+ { CALIBRI, "Carlito" },
+ };
+
+ static const size_t kFontCount =
+ sizeof(kFontEquivMap)/sizeof(kFontEquivMap[0]);
+
+ // TODO(jungshik): If this loop turns out to be hot, turn
+ // the array to a static (hash)map to speed it up.
+ for (size_t i = 0; i < kFontCount; ++i) {
+ if (strcasecmp(kFontEquivMap[i].name, fontname) == 0)
+ return kFontEquivMap[i].clazz;
+ }
+ return OTHER;
+}
+
+
+// Return true if |font_a| and |font_b| are visually and at the metrics
+// level interchangeable.
+bool IsMetricCompatibleReplacement(const char* font_a, const char* font_b)
+{
+ FontEquivClass class_a = GetFontEquivClass(font_a);
+ FontEquivClass class_b = GetFontEquivClass(font_b);
+
+ return class_a != OTHER && class_a == class_b;
+}
+
+// Normally we only return exactly the font asked for. In last-resort
+// cases, the request either doesn't specify a font or is one of the
+// basic font names like "Sans", "Serif" or "Monospace". This function
+// tells you whether a given request is for such a fallback.
+bool IsFallbackFontAllowed(const SkString& family) {
+ const char* family_cstr = family.c_str();
+ return family.isEmpty() ||
+ strcasecmp(family_cstr, "sans") == 0 ||
+ strcasecmp(family_cstr, "serif") == 0 ||
+ strcasecmp(family_cstr, "monospace") == 0;
+}
+
+// Retrieves |is_bold|, |is_italic| and |font_family| properties from |font|.
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int map_range(SkScalar value,
+ SkScalar old_min, SkScalar old_max,
+ SkScalar new_min, SkScalar new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + ((value - old_min) * (new_max - new_min) / (old_max - old_min));
+}
+
+struct MapRanges {
+ SkScalar old_val;
+ SkScalar new_val;
+};
+
+static SkScalar map_ranges(SkScalar val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+#ifndef FC_WEIGHT_DEMILIGHT
+#define FC_WEIGHT_DEMILIGHT 65
+#endif
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static constexpr MapRanges weightRanges[] = {
+ { FC_WEIGHT_THIN, SkFS::kThin_Weight },
+ { FC_WEIGHT_EXTRALIGHT, SkFS::kExtraLight_Weight },
+ { FC_WEIGHT_LIGHT, SkFS::kLight_Weight },
+ { FC_WEIGHT_DEMILIGHT, 350 },
+ { FC_WEIGHT_BOOK, 380 },
+ { FC_WEIGHT_REGULAR, SkFS::kNormal_Weight },
+ { FC_WEIGHT_MEDIUM, SkFS::kMedium_Weight },
+ { FC_WEIGHT_DEMIBOLD, SkFS::kSemiBold_Weight },
+ { FC_WEIGHT_BOLD, SkFS::kBold_Weight },
+ { FC_WEIGHT_EXTRABOLD, SkFS::kExtraBold_Weight },
+ { FC_WEIGHT_BLACK, SkFS::kBlack_Weight },
+ { FC_WEIGHT_EXTRABLACK, SkFS::kExtraBlack_Weight },
+ };
+ SkScalar weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { FC_WIDTH_ULTRACONDENSED, SkFS::kUltraCondensed_Width },
+ { FC_WIDTH_EXTRACONDENSED, SkFS::kExtraCondensed_Width },
+ { FC_WIDTH_CONDENSED, SkFS::kCondensed_Width },
+ { FC_WIDTH_SEMICONDENSED, SkFS::kSemiCondensed_Width },
+ { FC_WIDTH_NORMAL, SkFS::kNormal_Width },
+ { FC_WIDTH_SEMIEXPANDED, SkFS::kSemiExpanded_Width },
+ { FC_WIDTH_EXPANDED, SkFS::kExpanded_Width },
+ { FC_WIDTH_EXTRAEXPANDED, SkFS::kExtraExpanded_Width },
+ { FC_WIDTH_ULTRAEXPANDED, SkFS::kUltraExpanded_Width },
+ };
+ SkScalar width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(SkScalarRoundToInt(weight), SkScalarRoundToInt(width), slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static constexpr MapRanges weightRanges[] = {
+ { SkFS::kThin_Weight, FC_WEIGHT_THIN },
+ { SkFS::kExtraLight_Weight, FC_WEIGHT_EXTRALIGHT },
+ { SkFS::kLight_Weight, FC_WEIGHT_LIGHT },
+ { 350, FC_WEIGHT_DEMILIGHT },
+ { 380, FC_WEIGHT_BOOK },
+ { SkFS::kNormal_Weight, FC_WEIGHT_REGULAR },
+ { SkFS::kMedium_Weight, FC_WEIGHT_MEDIUM },
+ { SkFS::kSemiBold_Weight, FC_WEIGHT_DEMIBOLD },
+ { SkFS::kBold_Weight, FC_WEIGHT_BOLD },
+ { SkFS::kExtraBold_Weight, FC_WEIGHT_EXTRABOLD },
+ { SkFS::kBlack_Weight, FC_WEIGHT_BLACK },
+ { SkFS::kExtraBlack_Weight, FC_WEIGHT_EXTRABLACK },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { SkFS::kUltraCondensed_Width, FC_WIDTH_ULTRACONDENSED },
+ { SkFS::kExtraCondensed_Width, FC_WIDTH_EXTRACONDENSED },
+ { SkFS::kCondensed_Width, FC_WIDTH_CONDENSED },
+ { SkFS::kSemiCondensed_Width, FC_WIDTH_SEMICONDENSED },
+ { SkFS::kNormal_Width, FC_WIDTH_NORMAL },
+ { SkFS::kSemiExpanded_Width, FC_WIDTH_SEMIEXPANDED },
+ { SkFS::kExpanded_Width, FC_WIDTH_EXPANDED },
+ { SkFS::kExtraExpanded_Width, FC_WIDTH_EXTRAEXPANDED },
+ { SkFS::kUltraExpanded_Width, FC_WIDTH_ULTRAEXPANDED },
+ };
+ int width = map_ranges(style.width(), widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxFontFamilyLength 2048
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+const char* kFontFormatTrueType = "TrueType";
+const char* kFontFormatCFF = "CFF";
+#endif
+
+SkFontConfigInterfaceDirect::SkFontConfigInterfaceDirect() {
+ FCLocker lock;
+
+ FcInit();
+
+ SkDEBUGCODE(fontconfiginterface_unittest();)
+}
+
+SkFontConfigInterfaceDirect::~SkFontConfigInterfaceDirect() {
+}
+
+bool SkFontConfigInterfaceDirect::isAccessible(const char* filename) {
+ if (access(filename, R_OK) != 0) {
+ return false;
+ }
+ return true;
+}
+
+bool SkFontConfigInterfaceDirect::isValidPattern(FcPattern* pattern) {
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+ const char* font_format = get_string(pattern, FC_FONTFORMAT);
+ if (font_format
+ && strcmp(font_format, kFontFormatTrueType) != 0
+ && strcmp(font_format, kFontFormatCFF) != 0)
+ {
+ return false;
+ }
+#endif
+
+ // fontconfig can also return fonts which are unreadable
+ const char* c_filename = get_string(pattern, FC_FILE);
+ if (!c_filename) {
+ return false;
+ }
+ const char* sysroot = (const char*)FcConfigGetSysRoot(nullptr);
+ SkString resolvedFilename;
+ if (sysroot) {
+ resolvedFilename = sysroot;
+ resolvedFilename += c_filename;
+ c_filename = resolvedFilename.c_str();
+ }
+ return this->isAccessible(c_filename);
+}
+
+// Find matching font from |font_set| for the given font family.
+FcPattern* SkFontConfigInterfaceDirect::MatchFont(FcFontSet* font_set,
+ const char* post_config_family,
+ const SkString& family) {
+ // Older versions of fontconfig have a bug where they cannot select
+ // only scalable fonts so we have to manually filter the results.
+ FcPattern* match = nullptr;
+ for (int i = 0; i < font_set->nfont; ++i) {
+ FcPattern* current = font_set->fonts[i];
+ if (this->isValidPattern(current)) {
+ match = current;
+ break;
+ }
+ }
+
+ if (match && !IsFallbackFontAllowed(family)) {
+ bool acceptable_substitute = false;
+ for (int id = 0; id < 255; ++id) {
+ const char* post_match_family = get_string(match, FC_FAMILY, id);
+ if (!post_match_family)
+ break;
+ acceptable_substitute =
+ (strcasecmp(post_config_family, post_match_family) == 0 ||
+ // Workaround for Issue 12530:
+ // requested family: "Bitstream Vera Sans"
+ // post_config_family: "Arial"
+ // post_match_family: "Bitstream Vera Sans"
+ // -> We should treat this case as a good match.
+ strcasecmp(family.c_str(), post_match_family) == 0) ||
+ IsMetricCompatibleReplacement(family.c_str(), post_match_family);
+ if (acceptable_substitute)
+ break;
+ }
+ if (!acceptable_substitute)
+ return nullptr;
+ }
+
+ return match;
+}
+
+bool SkFontConfigInterfaceDirect::matchFamilyName(const char familyName[],
+ SkFontStyle style,
+ FontIdentity* outIdentity,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) {
+ SkString familyStr(familyName ? familyName : "");
+ if (familyStr.size() > kMaxFontFamilyLength) {
+ return false;
+ }
+
+ FCLocker lock;
+
+ FcPattern* pattern = FcPatternCreate();
+
+ if (familyName) {
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ FcPatternAddBool(pattern, FC_SCALABLE, FcTrue);
+
+ FcConfigSubstitute(nullptr, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // Font matching:
+ // CSS often specifies a fallback list of families:
+ // font-family: a, b, c, serif;
+ // However, fontconfig will always do its best to find *a* font when asked
+ // for something so we need a way to tell if the match which it has found is
+ // "good enough" for us. Otherwise, we can return nullptr which gets piped up
+ // and lets WebKit know to try the next CSS family name. However, fontconfig
+ // configs allow substitutions (mapping "Arial -> Helvetica" etc) and we
+ // wish to support that.
+ //
+ // Thus, if a specific family is requested we set @family_requested. Then we
+ // record two strings: the family name after config processing and the
+ // family name after resolving. If the two are equal, it's a good match.
+ //
+ // So consider the case where a user has mapped Arial to Helvetica in their
+ // config.
+ // requested family: "Arial"
+ // post_config_family: "Helvetica"
+ // post_match_family: "Helvetica"
+ // -> good match
+ //
+ // and for a missing font:
+ // requested family: "Monaco"
+ // post_config_family: "Monaco"
+ // post_match_family: "Times New Roman"
+ // -> BAD match
+ //
+ // However, we special-case fallback fonts; see IsFallbackFontAllowed().
+
+ const char* post_config_family = get_string(pattern, FC_FAMILY);
+ if (!post_config_family) {
+ // we can just continue with an empty name, e.g. default font
+ post_config_family = "";
+ }
+
+ FcResult result;
+ FcFontSet* font_set = FcFontSort(nullptr, pattern, 0, nullptr, &result);
+ if (!font_set) {
+ FcPatternDestroy(pattern);
+ return false;
+ }
+
+ FcPattern* match = this->MatchFont(font_set, post_config_family, familyStr);
+ if (!match) {
+ FcPatternDestroy(pattern);
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ FcPatternDestroy(pattern);
+
+ // From here out we just extract our results from 'match'
+
+ post_config_family = get_string(match, FC_FAMILY);
+ if (!post_config_family) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ const char* c_filename = get_string(match, FC_FILE);
+ if (!c_filename) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+ const char* sysroot = (const char*)FcConfigGetSysRoot(nullptr);
+ SkString resolvedFilename;
+ if (sysroot) {
+ resolvedFilename = sysroot;
+ resolvedFilename += c_filename;
+ c_filename = resolvedFilename.c_str();
+ }
+
+ int face_index = get_int(match, FC_INDEX, 0);
+
+ FcFontSetDestroy(font_set);
+
+ if (outIdentity) {
+ outIdentity->fTTCIndex = face_index;
+ outIdentity->fString.set(c_filename);
+ }
+ if (outFamilyName) {
+ outFamilyName->set(post_config_family);
+ }
+ if (outStyle) {
+ *outStyle = skfontstyle_from_fcpattern(match);
+ }
+ return true;
+}
+
+SkStreamAsset* SkFontConfigInterfaceDirect::openStream(const FontIdentity& identity) {
+ return SkStream::MakeFromFile(identity.fString.c_str()).release();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
new file mode 100644
index 0000000000..f31047f170
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+#ifndef SKFONTCONFIGINTERFACE_DIRECT_H_
+#define SKFONTCONFIGINTERFACE_DIRECT_H_
+
+#include "include/ports/SkFontConfigInterface.h"
+
+#include <fontconfig/fontconfig.h>
+
+class SkFontConfigInterfaceDirect : public SkFontConfigInterface {
+public:
+ SkFontConfigInterfaceDirect();
+ ~SkFontConfigInterfaceDirect() override;
+
+ bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) override;
+
+ SkStreamAsset* openStream(const FontIdentity&) override;
+
+protected:
+ virtual bool isAccessible(const char* filename);
+
+private:
+ bool isValidPattern(FcPattern* pattern);
+ FcPattern* MatchFont(FcFontSet* font_set, const char* post_config_family,
+ const SkString& family);
+ typedef SkFontConfigInterface INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
new file mode 100644
index 0000000000..7b70b64136
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkOnce.h"
+#include "src/ports/SkFontConfigInterface_direct.h"
+
+SkFontConfigInterface* SkFontConfigInterface::GetSingletonDirectInterface() {
+ static SkFontConfigInterface* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkFontConfigInterfaceDirect(); });
+ return singleton;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigTypeface.h b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
new file mode 100644
index 0000000000..269486de74
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontConfigTypeface_DEFINED
+#define SkFontConfigTypeface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+class SkFontDescriptor;
+
+class SkTypeface_FCI : public SkTypeface_FreeType {
+ sk_sp<SkFontConfigInterface> fFCI;
+ SkFontConfigInterface::FontIdentity fIdentity;
+ SkString fFamilyName;
+ std::unique_ptr<SkFontData> fFontData;
+
+public:
+ static SkTypeface_FCI* Create(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ SkString familyName,
+ const SkFontStyle& style)
+ {
+ return new SkTypeface_FCI(std::move(fci), fi, std::move(familyName), style);
+ }
+
+ static SkTypeface_FCI* Create(std::unique_ptr<SkFontData> data,
+ SkString familyName, SkFontStyle style, bool isFixedPitch)
+ {
+ return new SkTypeface_FCI(std::move(data), std::move(familyName), style, isFixedPitch);
+ }
+
+ const SkFontConfigInterface::FontIdentity& getIdentity() const {
+ return fIdentity;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_sp<SkTypeface>(new SkTypeface_FCI(std::move(data),
+ fFamilyName,
+ this->fontStyle(),
+ this->isFixedPitch()));
+ }
+
+protected:
+ SkTypeface_FCI(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ SkString familyName,
+ const SkFontStyle& style)
+ : INHERITED(style, false)
+ , fFCI(std::move(fci))
+ , fIdentity(fi)
+ , fFamilyName(std::move(familyName))
+ , fFontData(nullptr) {}
+
+ SkTypeface_FCI(std::unique_ptr<SkFontData> data,
+ SkString familyName, SkFontStyle style, bool isFixedPitch)
+ : INHERITED(style, isFixedPitch)
+ , fFamilyName(std::move(familyName))
+ , fFontData(std::move(data))
+ {
+ SkASSERT(fFontData);
+ fIdentity.fTTCIndex = fFontData->getIndex();
+ }
+
+ void onGetFamilyName(SkString* familyName) const override { *familyName = fFamilyName; }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+
+private:
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+#endif // SkFontConfigTypeface_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
new file mode 100644
index 0000000000..34f9f47549
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
@@ -0,0 +1,2034 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkScalerContext.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/utils/SkCallableTraits.h"
+#include "src/utils/SkMatrix22.h"
+
+#include <memory>
+
+#include <ft2build.h>
+#include FT_ADVANCES_H
+#include FT_BITMAP_H
+#ifdef FT_COLOR_H
+# include FT_COLOR_H
+#endif
+#include FT_FREETYPE_H
+#include FT_LCD_FILTER_H
+#include FT_MODULE_H
+#include FT_MULTIPLE_MASTERS_H
+#include FT_OUTLINE_H
+#include FT_SIZES_H
+#include FT_SYSTEM_H
+#include FT_TRUETYPE_TABLES_H
+#include FT_TYPE1_TABLES_H
+#include FT_XFREE86_H
+
+// SK_FREETYPE_MINIMUM_RUNTIME_VERSION 0x<major><minor><patch><flags>
+// Flag SK_FREETYPE_DLOPEN: also try dlopen to get newer features.
+#define SK_FREETYPE_DLOPEN (0x1)
+#ifndef SK_FREETYPE_MINIMUM_RUNTIME_VERSION
+# if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || defined (SK_BUILD_FOR_GOOGLE3)
+# define SK_FREETYPE_MINIMUM_RUNTIME_VERSION (((FREETYPE_MAJOR) << 24) | ((FREETYPE_MINOR) << 16) | ((FREETYPE_PATCH) << 8))
+# else
+# define SK_FREETYPE_MINIMUM_RUNTIME_VERSION ((2 << 24) | (3 << 16) | (11 << 8) | (SK_FREETYPE_DLOPEN))
+# endif
+#endif
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION & SK_FREETYPE_DLOPEN
+# include <dlfcn.h>
+#endif
+
+// FT_LOAD_COLOR and the corresponding FT_Pixel_Mode::FT_PIXEL_MODE_BGRA
+// were introduced in FreeType 2.5.0.
+// The following may be removed once FreeType 2.5.0 is required to build.
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+// FT_LOAD_BITMAP_METRICS_ONLY was introduced in FreeType 2.7.1
+// The following may be removed once FreeType 2.7.1 is required to build.
+#ifndef FT_LOAD_BITMAP_METRICS_ONLY
+# define FT_LOAD_BITMAP_METRICS_ONLY ( 1L << 22 )
+#endif
+
+// FT_VAR_AXIS_FLAG_HIDDEN was introduced in FreeType 2.8.1
+// The variation axis should not be exposed to user interfaces.
+#ifndef FT_VAR_AXIS_FLAG_HIDDEN
+# define FT_VAR_AXIS_FLAG_HIDDEN 1
+#endif
+
+//#define ENABLE_GLYPH_SPEW // for tracing calls
+//#define DUMP_STRIKE_CREATION
+//#define SK_FONTHOST_FREETYPE_RUNTIME_VERSION
+//#define SK_GAMMA_APPLY_TO_A8
+
+#if 1
+ #define LOG_INFO(...)
+#else
+ #define LOG_INFO SkDEBUGF
+#endif
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static SkScalar SkFT_FixedToScalar(FT_Fixed x) {
+ return SkFixedToScalar(x);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+using FT_Alloc_size_t = SkCallableTraits<FT_Alloc_Func>::argument<1>::type;
+static_assert(std::is_same<FT_Alloc_size_t, long >::value ||
+ std::is_same<FT_Alloc_size_t, size_t>::value,"");
+
+extern "C" {
+ static void* sk_ft_alloc(FT_Memory, FT_Alloc_size_t size) {
+ return sk_malloc_throw(size);
+ }
+ static void sk_ft_free(FT_Memory, void* block) {
+ sk_free(block);
+ }
+ static void* sk_ft_realloc(FT_Memory, FT_Alloc_size_t cur_size,
+ FT_Alloc_size_t new_size, void* block) {
+ return sk_realloc_throw(block, new_size);
+ }
+};
+FT_MemoryRec_ gFTMemory = { nullptr, sk_ft_alloc, sk_ft_free, sk_ft_realloc };
+
+class FreeTypeLibrary : SkNoncopyable {
+public:
+ FreeTypeLibrary()
+ : fGetVarDesignCoordinates(nullptr)
+ , fGetVarAxisFlags(nullptr)
+ , fLibrary(nullptr)
+ , fIsLCDSupported(false)
+ , fLightHintingIsYOnly(false)
+ , fLCDExtra(0)
+ {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+
+ // When using dlsym
+ // *(void**)(&procPtr) = dlsym(self, "proc");
+ // is non-standard, but safe for POSIX. Cannot write
+ // *reinterpret_cast<void**>(&procPtr) = dlsym(self, "proc");
+ // because clang has not implemented DR573. See http://clang.llvm.org/cxx_dr_status.html .
+
+ FT_Int major, minor, patch;
+ FT_Library_Version(fLibrary, &major, &minor, &patch);
+
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION >= 0x02070100
+ fGetVarDesignCoordinates = FT_Get_Var_Design_Coordinates;
+#elif SK_FREETYPE_MINIMUM_RUNTIME_VERSION & SK_FREETYPE_DLOPEN
+ if (major > 2 || ((major == 2 && minor > 7) || (major == 2 && minor == 7 && patch >= 0))) {
+ //The FreeType library is already loaded, so symbols are available in process.
+ void* self = dlopen(nullptr, RTLD_LAZY);
+ if (self) {
+ *(void**)(&fGetVarDesignCoordinates) = dlsym(self, "FT_Get_Var_Design_Coordinates");
+ dlclose(self);
+ }
+ }
+#endif
+
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION >= 0x02070200
+ FT_Set_Default_Properties(fLibrary);
+#elif SK_FREETYPE_MINIMUM_RUNTIME_VERSION & SK_FREETYPE_DLOPEN
+ if (major > 2 || ((major == 2 && minor > 7) || (major == 2 && minor == 7 && patch >= 1))) {
+ //The FreeType library is already loaded, so symbols are available in process.
+ void* self = dlopen(nullptr, RTLD_LAZY);
+ if (self) {
+ FT_Set_Default_PropertiesProc setDefaultProperties;
+ *(void**)(&setDefaultProperties) = dlsym(self, "FT_Set_Default_Properties");
+ dlclose(self);
+
+ if (setDefaultProperties) {
+ setDefaultProperties(fLibrary);
+ }
+ }
+ }
+#endif
+
+// The 'light' hinting is vertical only starting in 2.8.0.
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION >= 0x02080000
+ fLightHintingIsYOnly = true;
+#else
+ if (major > 2 || ((major == 2 && minor > 8) || (major == 2 && minor == 8 && patch >= 0))) {
+ fLightHintingIsYOnly = true;
+ }
+#endif
+
+
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION >= 0x02080100
+ fGetVarAxisFlags = FT_Get_Var_Axis_Flags;
+#elif SK_FREETYPE_MINIMUM_RUNTIME_VERSION & SK_FREETYPE_DLOPEN
+ if (major > 2 || ((major == 2 && minor > 7) || (major == 2 && minor == 7 && patch >= 0))) {
+ //The FreeType library is already loaded, so symbols are available in process.
+ void* self = dlopen(nullptr, RTLD_LAZY);
+ if (self) {
+ *(void**)(&fGetVarAxisFlags) = dlsym(self, "FT_Get_Var_Axis_Flags");
+ dlclose(self);
+ }
+ }
+#endif
+
+ // Setup LCD filtering. This reduces color fringes for LCD smoothed glyphs.
+ // The default has changed over time, so this doesn't mean the same thing to all users.
+ if (FT_Library_SetLcdFilter(fLibrary, FT_LCD_FILTER_DEFAULT) == 0) {
+ fIsLCDSupported = true;
+ fLCDExtra = 2; //Using a filter adds one full pixel to each side.
+ }
+ }
+ ~FreeTypeLibrary() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+ }
+
+ FT_Library library() { return fLibrary; }
+ bool isLCDSupported() { return fIsLCDSupported; }
+ int lcdExtra() { return fLCDExtra; }
+ bool lightHintingIsYOnly() { return fLightHintingIsYOnly; }
+
+ // FT_Get_{MM,Var}_{Blend,Design}_Coordinates were added in FreeType 2.7.1.
+ // Prior to this there was no way to get the coordinates out of the FT_Face.
+ // This wasn't too bad because you needed to specify them anyway, and the clamp was provided.
+ // However, this doesn't work when face_index specifies named variations as introduced in 2.6.1.
+ using FT_Get_Var_Blend_CoordinatesProc = FT_Error (*)(FT_Face, FT_UInt, FT_Fixed*);
+ FT_Get_Var_Blend_CoordinatesProc fGetVarDesignCoordinates;
+
+ // FT_Get_Var_Axis_Flags was introduced in FreeType 2.8.1
+ // Get the ‘flags’ field of an OpenType Variation Axis Record.
+ // Not meaningful for Adobe MM fonts (‘*flags’ is always zero).
+ using FT_Get_Var_Axis_FlagsProc = FT_Error (*)(FT_MM_Var*, FT_UInt, FT_UInt*);
+ FT_Get_Var_Axis_FlagsProc fGetVarAxisFlags;
+
+private:
+ FT_Library fLibrary;
+ bool fIsLCDSupported;
+ bool fLightHintingIsYOnly;
+ int fLCDExtra;
+
+ // FT_Library_SetLcdFilterWeights was introduced in FreeType 2.4.0.
+ // The following platforms provide FreeType of at least 2.4.0.
+ // Ubuntu >= 11.04 (previous deprecated April 2013)
+ // Debian >= 6.0 (good)
+ // OpenSuse >= 11.4 (previous deprecated January 2012 / Nov 2013 for Evergreen 11.2)
+ // Fedora >= 14 (good)
+ // Android >= Gingerbread (good)
+ // RHEL >= 7 (6 has 2.3.11, EOL Nov 2020, Phase 3 May 2017)
+ using FT_Library_SetLcdFilterWeightsProc = FT_Error (*)(FT_Library, unsigned char*);
+
+ // FreeType added the ability to read global properties in 2.7.0. After 2.7.1 a means for users
+ // of FT_New_Library to request these global properties to be read was added.
+ using FT_Set_Default_PropertiesProc = void (*)(FT_Library);
+};
+
+struct SkFaceRec;
+
+static SkMutex& f_t_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+static FreeTypeLibrary* gFTLibrary;
+static SkFaceRec* gFaceRecHead;
+
+// Private to ref_ft_library and unref_ft_library
+static int gFTCount;
+
+// Caller must lock f_t_mutex() before calling this function.
+static bool ref_ft_library() {
+ f_t_mutex().assertHeld();
+ SkASSERT(gFTCount >= 0);
+
+ if (0 == gFTCount) {
+ SkASSERT(nullptr == gFTLibrary);
+ gFTLibrary = new FreeTypeLibrary;
+ }
+ ++gFTCount;
+ return gFTLibrary->library();
+}
+
+// Caller must lock f_t_mutex() before calling this function.
+static void unref_ft_library() {
+ f_t_mutex().assertHeld();
+ SkASSERT(gFTCount > 0);
+
+ --gFTCount;
+ if (0 == gFTCount) {
+ SkASSERT(nullptr == gFaceRecHead);
+ SkASSERT(nullptr != gFTLibrary);
+ delete gFTLibrary;
+ SkDEBUGCODE(gFTLibrary = nullptr;)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+struct SkFaceRec {
+ SkFaceRec* fNext;
+ std::unique_ptr<FT_FaceRec, SkFunctionWrapper<decltype(FT_Done_Face), FT_Done_Face>> fFace;
+ FT_StreamRec fFTStream;
+ std::unique_ptr<SkStreamAsset> fSkStream;
+ uint32_t fRefCnt;
+ uint32_t fFontID;
+
+ // FreeType prior to 2.7.1 does not implement retreiving variation design metrics.
+ // Cache the variation design metrics used to create the font if the user specifies them.
+ SkAutoSTMalloc<4, SkFixed> fAxes;
+ int fAxesCount;
+
+ // FreeType from 2.6.1 (14d6b5d7) until 2.7.0 (ee3f36f6b38) uses font_index for both font index
+ // and named variation index on input, but masks the named variation index part on output.
+ // Manually keep track of when a named variation is requested for 2.6.1 until 2.7.1.
+ bool fNamedVariationSpecified;
+
+ SkFaceRec(std::unique_ptr<SkStreamAsset> stream, uint32_t fontID);
+};
+
+extern "C" {
+ static unsigned long sk_ft_stream_io(FT_Stream ftStream,
+ unsigned long offset,
+ unsigned char* buffer,
+ unsigned long count)
+ {
+ SkStreamAsset* stream = static_cast<SkStreamAsset*>(ftStream->descriptor.pointer);
+
+ if (count) {
+ if (!stream->seek(offset)) {
+ return 0;
+ }
+ count = stream->read(buffer, count);
+ }
+ return count;
+ }
+
+ static void sk_ft_stream_close(FT_Stream) {}
+}
+
+SkFaceRec::SkFaceRec(std::unique_ptr<SkStreamAsset> stream, uint32_t fontID)
+ : fNext(nullptr), fSkStream(std::move(stream)), fRefCnt(1), fFontID(fontID)
+ , fAxesCount(0), fNamedVariationSpecified(false)
+{
+ sk_bzero(&fFTStream, sizeof(fFTStream));
+ fFTStream.size = fSkStream->getLength();
+ fFTStream.descriptor.pointer = fSkStream.get();
+ fFTStream.read = sk_ft_stream_io;
+ fFTStream.close = sk_ft_stream_close;
+}
+
+static void ft_face_setup_axes(SkFaceRec* rec, const SkFontData& data) {
+ if (!(rec->fFace->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return;
+ }
+
+ // If a named variation is requested, don't overwrite the named variation's position.
+ if (data.getIndex() > 0xFFFF) {
+ rec->fNamedVariationSpecified = true;
+ return;
+ }
+
+ SkDEBUGCODE(
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(rec->fFace.get(), &variations)) {
+ LOG_INFO("INFO: font %s claims variations, but none found.\n",
+ rec->fFace->family_name);
+ return;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ if (static_cast<FT_UInt>(data.getAxisCount()) != variations->num_axis) {
+ LOG_INFO("INFO: font %s has %d variations, but %d were specified.\n",
+ rec->fFace->family_name, variations->num_axis, data.getAxisCount());
+ return;
+ }
+ )
+
+ SkAutoSTMalloc<4, FT_Fixed> coords(data.getAxisCount());
+ for (int i = 0; i < data.getAxisCount(); ++i) {
+ coords[i] = data.getAxis()[i];
+ }
+ if (FT_Set_Var_Design_Coordinates(rec->fFace.get(), data.getAxisCount(), coords.get())) {
+ LOG_INFO("INFO: font %s has variations, but specified variations could not be set.\n",
+ rec->fFace->family_name);
+ return;
+ }
+
+ rec->fAxesCount = data.getAxisCount();
+ rec->fAxes.reset(rec->fAxesCount);
+ for (int i = 0; i < rec->fAxesCount; ++i) {
+ rec->fAxes[i] = data.getAxis()[i];
+ }
+}
+
+// Will return nullptr on failure
+// Caller must lock f_t_mutex() before calling this function.
+static SkFaceRec* ref_ft_face(const SkTypeface* typeface) {
+ f_t_mutex().assertHeld();
+
+ const SkFontID fontID = typeface->uniqueID();
+ SkFaceRec* cachedRec = gFaceRecHead;
+ while (cachedRec) {
+ if (cachedRec->fFontID == fontID) {
+ SkASSERT(cachedRec->fFace);
+ cachedRec->fRefCnt += 1;
+ return cachedRec;
+ }
+ cachedRec = cachedRec->fNext;
+ }
+
+ std::unique_ptr<SkFontData> data = typeface->makeFontData();
+ if (nullptr == data || !data->hasStream()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkFaceRec> rec(new SkFaceRec(data->detachStream(), fontID));
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+ const void* memoryBase = rec->fSkStream->getMemoryBase();
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = rec->fSkStream->getLength();
+ } else {
+ args.flags = FT_OPEN_STREAM;
+ args.stream = &rec->fFTStream;
+ }
+
+ {
+ FT_Face rawFace;
+ FT_Error err = FT_Open_Face(gFTLibrary->library(), &args, data->getIndex(), &rawFace);
+ if (err) {
+ SK_TRACEFTR(err, "unable to open font '%x'", fontID);
+ return nullptr;
+ }
+ rec->fFace.reset(rawFace);
+ }
+ SkASSERT(rec->fFace);
+
+ ft_face_setup_axes(rec.get(), *data);
+
+ // FreeType will set the charmap to the "most unicode" cmap if it exists.
+ // If there are no unicode cmaps, the charmap is set to nullptr.
+ // However, "symbol" cmaps should also be considered "fallback unicode" cmaps
+ // because they are effectively private use area only (even if they aren't).
+ // This is the last on the fallback list at
+ // https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
+ if (!rec->fFace->charmap) {
+ FT_Select_Charmap(rec->fFace.get(), FT_ENCODING_MS_SYMBOL);
+ }
+
+ rec->fNext = gFaceRecHead;
+ gFaceRecHead = rec.get();
+ return rec.release();
+}
+
+// Caller must lock f_t_mutex() before calling this function.
+// Marked extern because vc++ does not support internal linkage template parameters.
+extern /*static*/ void unref_ft_face(SkFaceRec* faceRec) {
+ f_t_mutex().assertHeld();
+
+ SkFaceRec* rec = gFaceRecHead;
+ SkFaceRec* prev = nullptr;
+ while (rec) {
+ SkFaceRec* next = rec->fNext;
+ if (rec->fFace == faceRec->fFace) {
+ if (--rec->fRefCnt == 0) {
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ gFaceRecHead = next;
+ }
+ delete rec;
+ }
+ return;
+ }
+ prev = rec;
+ rec = next;
+ }
+ SkDEBUGFAIL("shouldn't get here, face not in list");
+}
+
+class AutoFTAccess {
+public:
+ AutoFTAccess(const SkTypeface* tf) : fFaceRec(nullptr) {
+ f_t_mutex().acquire();
+ SkASSERT_RELEASE(ref_ft_library());
+ fFaceRec = ref_ft_face(tf);
+ }
+
+ ~AutoFTAccess() {
+ if (fFaceRec) {
+ unref_ft_face(fFaceRec);
+ }
+ unref_ft_library();
+ f_t_mutex().release();
+ }
+
+ FT_Face face() { return fFaceRec ? fFaceRec->fFace.get() : nullptr; }
+ int getAxesCount() { return fFaceRec ? fFaceRec->fAxesCount : 0; }
+ SkFixed* getAxes() { return fFaceRec ? fFaceRec->fAxes.get() : nullptr; }
+ bool isNamedVariationSpecified() {
+ return fFaceRec ? fFaceRec->fNamedVariationSpecified : false;
+ }
+
+private:
+ SkFaceRec* fFaceRec;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+class SkScalerContext_FreeType : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_FreeType(sk_sp<SkTypeface>,
+ const SkScalerContextEffects&,
+ const SkDescriptor* desc);
+ ~SkScalerContext_FreeType() override;
+
+ bool success() const {
+ return fFTSize != nullptr && fFace != nullptr;
+ }
+
+protected:
+ unsigned generateGlyphCount() override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyphID, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ using UnrefFTFace = SkFunctionWrapper<decltype(unref_ft_face), unref_ft_face>;
+ std::unique_ptr<SkFaceRec, UnrefFTFace> fFaceRec;
+
+ FT_Face fFace; // Borrowed face from gFaceRecHead.
+ FT_Size fFTSize; // The size on the fFace for this scaler.
+ FT_Int fStrikeIndex;
+
+ /** The rest of the matrix after FreeType handles the size.
+ * With outline font rasterization this is handled by FreeType with FT_Set_Transform.
+ * With bitmap only fonts this matrix must be applied to scale the bitmap.
+ */
+ SkMatrix fMatrix22Scalar;
+ /** Same as fMatrix22Scalar, but in FreeType units and space. */
+ FT_Matrix fMatrix22;
+ /** The actual size requested. */
+ SkVector fScale;
+
+ uint32_t fLoadGlyphFlags;
+ bool fDoLinearMetrics;
+ bool fLCDIsVert;
+
+ FT_Error setupSize();
+ void getBBoxForCurrentGlyph(const SkGlyph* glyph, FT_BBox* bbox,
+ bool snapToPixelBoundary = false);
+ bool getCBoxForLetter(char letter, FT_BBox* bbox);
+ // Caller must lock f_t_mutex() before calling this function.
+ void updateGlyphIfLCD(SkGlyph* glyph);
+ // Caller must lock f_t_mutex() before calling this function.
+ // update FreeType2 glyph slot with glyph emboldened
+ void emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph, SkGlyphID gid);
+ bool shouldSubpixelBitmap(const SkGlyph&, const SkMatrix&);
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool canEmbed(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & (FT_FSTYPE_RESTRICTED_LICENSE_EMBEDDING |
+ FT_FSTYPE_BITMAP_EMBEDDING_ONLY)) == 0;
+}
+
+static bool canSubset(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & FT_FSTYPE_NO_SUBSETTING) == 0;
+}
+
+static SkAdvancedTypefaceMetrics::FontType get_font_type(FT_Face face) {
+ const char* fontType = FT_Get_X11_Font_Format(face);
+ static struct { const char* s; SkAdvancedTypefaceMetrics::FontType t; } values[] = {
+ { "Type 1", SkAdvancedTypefaceMetrics::kType1_Font },
+ { "CID Type 1", SkAdvancedTypefaceMetrics::kType1CID_Font },
+ { "CFF", SkAdvancedTypefaceMetrics::kCFF_Font },
+ { "TrueType", SkAdvancedTypefaceMetrics::kTrueType_Font },
+ };
+ for(const auto& v : values) { if (strcmp(fontType, v.s) == 0) { return v.t; } }
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface_FreeType::onGetAdvancedMetrics() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(new SkAdvancedTypefaceMetrics);
+ info->fPostScriptName.set(FT_Get_Postscript_Name(face));
+ info->fFontName = info->fPostScriptName;
+
+ if (FT_HAS_MULTIPLE_MASTERS(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag;
+ }
+ if (!canEmbed(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (!canSubset(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+
+ info->fType = get_font_type(face);
+ info->fStyle = (SkAdvancedTypefaceMetrics::StyleFlags)0;
+ if (FT_IS_FIXED_WIDTH(face)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+
+ PS_FontInfoRec psFontInfo;
+ TT_Postscript* postTable;
+ if (FT_Get_PS_Font_Info(face, &psFontInfo) == 0) {
+ info->fItalicAngle = psFontInfo.italic_angle;
+ } else if ((postTable = (TT_Postscript*)FT_Get_Sfnt_Table(face, ft_sfnt_post)) != nullptr) {
+ info->fItalicAngle = SkFixedFloorToInt(postTable->italicAngle);
+ } else {
+ info->fItalicAngle = 0;
+ }
+
+ info->fAscent = face->ascender;
+ info->fDescent = face->descender;
+
+ TT_PCLT* pcltTable;
+ TT_OS2* os2Table;
+ if ((pcltTable = (TT_PCLT*)FT_Get_Sfnt_Table(face, ft_sfnt_pclt)) != nullptr) {
+ info->fCapHeight = pcltTable->CapHeight;
+ uint8_t serif_style = pcltTable->SerifStyle & 0x3F;
+ if (2 <= serif_style && serif_style <= 6) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (9 <= serif_style && serif_style <= 12) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ } else if (((os2Table = (TT_OS2*)FT_Get_Sfnt_Table(face, ft_sfnt_os2)) != nullptr) &&
+ // sCapHeight is available only when version 2 or later.
+ os2Table->version != 0xFFFF &&
+ os2Table->version >= 2)
+ {
+ info->fCapHeight = os2Table->sCapHeight;
+ }
+ info->fBBox = SkIRect::MakeLTRB(face->bbox.xMin, face->bbox.yMax,
+ face->bbox.xMax, face->bbox.yMin);
+ return info;
+}
+
+void SkTypeface_FreeType::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ SkASSERT(dstArray);
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ FT_Long numGlyphs = face->num_glyphs;
+ sk_bzero(dstArray, sizeof(SkUnichar) * numGlyphs);
+
+ FT_UInt glyphIndex;
+ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
+ while (glyphIndex) {
+ SkASSERT(glyphIndex < SkToUInt(numGlyphs));
+ // Use the first character that maps to this glyphID. https://crbug.com/359065
+ if (0 == dstArray[glyphIndex]) {
+ dstArray[glyphIndex] = charCode;
+ }
+ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
+ }
+}
+
+void SkTypeface_FreeType::getPostScriptGlyphNames(SkString* dstArray) const {
+ SkASSERT(dstArray);
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (face && FT_HAS_GLYPH_NAMES(face)) {
+ for (int gID = 0; gID < face->num_glyphs; gID++) {
+ char glyphName[128]; // PS limit for names is 127 bytes.
+ FT_Get_Glyph_Name(face, gID, glyphName, 128);
+ dstArray[gID] = glyphName;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+SkScalerContext* SkTypeface_FreeType::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ auto c = skstd::make_unique<SkScalerContext_FreeType>(
+ sk_ref_sp(const_cast<SkTypeface_FreeType*>(this)), effects, desc);
+ if (!c->success()) {
+ return nullptr;
+ }
+ return c.release();
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FreeType::cloneFontData(
+ const SkFontArguments& args) const {
+ SkString name;
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ Scanner::AxisDefinitions axisDefinitions;
+
+ if (!Scanner::GetAxes(face, &axisDefinitions)) {
+ return nullptr;
+ }
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, args.getVariationDesignPosition(),
+ axisValues, name);
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> stream = this->openStream(&ttcIndex);
+ return skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, axisValues.get(),
+ axisDefinitions.count());
+}
+
+void SkTypeface_FreeType::onFilterRec(SkScalerContextRec* rec) const {
+ //BOGUS: http://code.google.com/p/chromium/issues/detail?id=121119
+ //Cap the requested size as larger sizes give bogus values.
+ //Remove when http://code.google.com/p/skia/issues/detail?id=554 is fixed.
+ //Note that this also currently only protects against large text size requests,
+ //the total matrix is not taken into account here.
+ if (rec->fTextSize > SkIntToScalar(1 << 14)) {
+ rec->fTextSize = SkIntToScalar(1 << 14);
+ }
+
+ if (isLCD(*rec)) {
+ // TODO: re-work so that FreeType is set-up and selected by the SkFontMgr.
+ SkAutoMutexExclusive ama(f_t_mutex());
+ ref_ft_library();
+ if (!gFTLibrary->isLCDSupported()) {
+ // If the runtime Freetype library doesn't support LCD, disable it here.
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+ unref_ft_library();
+ }
+
+ SkFontHinting h = rec->getHinting();
+ if (SkFontHinting::kFull == h && !isLCD(*rec)) {
+ // collapse full->normal hinting if we're not doing LCD
+ h = SkFontHinting::kNormal;
+ }
+
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!isAxisAligned(*rec)) {
+ h = SkFontHinting::kNone;
+ }
+ rec->setHinting(h);
+
+#ifndef SK_GAMMA_APPLY_TO_A8
+ if (!isLCD(*rec)) {
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+ }
+#endif
+}
+
+int SkTypeface_FreeType::GetUnitsPerEm(FT_Face face) {
+ if (!face) {
+ return 0;
+ }
+
+ SkScalar upem = SkIntToScalar(face->units_per_EM);
+ // At least some versions of FreeType set face->units_per_EM to 0 for bitmap only fonts.
+ if (upem == 0) {
+ TT_Header* ttHeader = (TT_Header*)FT_Get_Sfnt_Table(face, ft_sfnt_head);
+ if (ttHeader) {
+ upem = SkIntToScalar(ttHeader->Units_Per_EM);
+ }
+ }
+ return upem;
+}
+
+int SkTypeface_FreeType::onGetUPEM() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ return GetUnitsPerEm(face);
+}
+
+bool SkTypeface_FreeType::onGetKerningPairAdjustments(const uint16_t glyphs[],
+ int count, int32_t adjustments[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face || !FT_HAS_KERNING(face)) {
+ return false;
+ }
+
+ for (int i = 0; i < count - 1; ++i) {
+ FT_Vector delta;
+ FT_Error err = FT_Get_Kerning(face, glyphs[i], glyphs[i+1],
+ FT_KERNING_UNSCALED, &delta);
+ if (err) {
+ return false;
+ }
+ adjustments[i] = delta.x;
+ }
+ return true;
+}
+
+/** Returns the bitmap strike equal to or just larger than the requested size. */
+static FT_Int chooseBitmapStrike(FT_Face face, FT_F26Dot6 scaleY) {
+ if (face == nullptr) {
+ LOG_INFO("chooseBitmapStrike aborted due to nullptr face.\n");
+ return -1;
+ }
+
+ FT_Pos requestedPPEM = scaleY; // FT_Bitmap_Size::y_ppem is in 26.6 format.
+ FT_Int chosenStrikeIndex = -1;
+ FT_Pos chosenPPEM = 0;
+ for (FT_Int strikeIndex = 0; strikeIndex < face->num_fixed_sizes; ++strikeIndex) {
+ FT_Pos strikePPEM = face->available_sizes[strikeIndex].y_ppem;
+ if (strikePPEM == requestedPPEM) {
+ // exact match - our search stops here
+ return strikeIndex;
+ } else if (chosenPPEM < requestedPPEM) {
+ // attempt to increase chosenPPEM
+ if (chosenPPEM < strikePPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ } else {
+ // attempt to decrease chosenPPEM, but not below requestedPPEM
+ if (requestedPPEM < strikePPEM && strikePPEM < chosenPPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ }
+ }
+ return chosenStrikeIndex;
+}
+
+SkScalerContext_FreeType::SkScalerContext_FreeType(sk_sp<SkTypeface> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext_FreeType_Base(std::move(typeface), effects, desc)
+ , fFace(nullptr)
+ , fFTSize(nullptr)
+ , fStrikeIndex(-1)
+{
+ SkAutoMutexExclusive ac(f_t_mutex());
+ SkASSERT_RELEASE(ref_ft_library());
+
+ fFaceRec.reset(ref_ft_face(this->getTypeface()));
+
+ // load the font file
+ if (nullptr == fFaceRec) {
+ LOG_INFO("Could not create FT_Face.\n");
+ return;
+ }
+
+ fLCDIsVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ // compute the flags we send to Load_Glyph
+ bool linearMetrics = this->isLinearMetrics();
+ {
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
+ loadFlags = FT_LOAD_TARGET_MONO;
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ loadFlags = FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ }
+ } else {
+ switch (fRec.getHinting()) {
+ case SkFontHinting::kNone:
+ loadFlags = FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ break;
+ case SkFontHinting::kSlight:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ if (gFTLibrary->lightHintingIsYOnly()) {
+ linearMetrics = true;
+ }
+ break;
+ case SkFontHinting::kNormal:
+ loadFlags = FT_LOAD_TARGET_NORMAL;
+ break;
+ case SkFontHinting::kFull:
+ loadFlags = FT_LOAD_TARGET_NORMAL;
+ if (isLCD(fRec)) {
+ if (fLCDIsVert) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ break;
+ default:
+ LOG_INFO("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ } else {
+ loadFlags |= FT_LOAD_NO_AUTOHINT;
+#endif
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ // Use vertical layout if requested.
+ if (this->isVertical()) {
+ loadFlags |= FT_LOAD_VERTICAL_LAYOUT;
+ }
+
+ loadFlags |= FT_LOAD_COLOR;
+
+ fLoadGlyphFlags = loadFlags;
+ }
+
+ using DoneFTSize = SkFunctionWrapper<decltype(FT_Done_Size), FT_Done_Size>;
+ std::unique_ptr<skstd::remove_pointer_t<FT_Size>, DoneFTSize> ftSize([this]() -> FT_Size {
+ FT_Size size;
+ FT_Error err = FT_New_Size(fFaceRec->fFace.get(), &size);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_New_Size(%s) failed.", fFaceRec->fFace->family_name);
+ return nullptr;
+ }
+ return size;
+ }());
+ if (nullptr == ftSize) {
+ LOG_INFO("Could not create FT_Size.\n");
+ return;
+ }
+
+ FT_Error err = FT_Activate_Size(ftSize.get());
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Activate_Size(%s) failed.", fFaceRec->fFace->family_name);
+ return;
+ }
+
+ fRec.computeMatrices(SkScalerContextRec::kFull_PreMatrixScale, &fScale, &fMatrix22Scalar);
+ FT_F26Dot6 scaleX = SkScalarToFDot6(fScale.fX);
+ FT_F26Dot6 scaleY = SkScalarToFDot6(fScale.fY);
+
+ if (FT_IS_SCALABLE(fFaceRec->fFace)) {
+ err = FT_Set_Char_Size(fFaceRec->fFace.get(), scaleX, scaleY, 72, 72);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Set_CharSize(%s, %f, %f) failed.",
+ fFaceRec->fFace->family_name, fScale.fX, fScale.fY);
+ return;
+ }
+
+ // Adjust the matrix to reflect the actually chosen scale.
+ // FreeType currently does not allow requesting sizes less than 1, this allow for scaling.
+ // Don't do this at all sizes as that will interfere with hinting.
+ if (fScale.fX < 1 || fScale.fY < 1) {
+ SkScalar upem = fFaceRec->fFace->units_per_EM;
+ FT_Size_Metrics& ftmetrics = fFaceRec->fFace->size->metrics;
+ SkScalar x_ppem = upem * SkFT_FixedToScalar(ftmetrics.x_scale) / 64.0f;
+ SkScalar y_ppem = upem * SkFT_FixedToScalar(ftmetrics.y_scale) / 64.0f;
+ fMatrix22Scalar.preScale(fScale.x() / x_ppem, fScale.y() / y_ppem);
+ }
+
+ } else if (FT_HAS_FIXED_SIZES(fFaceRec->fFace)) {
+ fStrikeIndex = chooseBitmapStrike(fFaceRec->fFace.get(), scaleY);
+ if (fStrikeIndex == -1) {
+ LOG_INFO("No glyphs for font \"%s\" size %f.\n",
+ fFaceRec->fFace->family_name, fScale.fY);
+ return;
+ }
+
+ err = FT_Select_Size(fFaceRec->fFace.get(), fStrikeIndex);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Select_Size(%s, %d) failed.",
+ fFaceRec->fFace->family_name, fStrikeIndex);
+ fStrikeIndex = -1;
+ return;
+ }
+
+ // Adjust the matrix to reflect the actually chosen scale.
+ // It is likely that the ppem chosen was not the one requested, this allows for scaling.
+ fMatrix22Scalar.preScale(fScale.x() / fFaceRec->fFace->size->metrics.x_ppem,
+ fScale.y() / fFaceRec->fFace->size->metrics.y_ppem);
+
+ // FreeType does not provide linear metrics for bitmap fonts.
+ linearMetrics = false;
+
+ // FreeType documentation says:
+ // FT_LOAD_NO_BITMAP -- Ignore bitmap strikes when loading.
+ // Bitmap-only fonts ignore this flag.
+ //
+ // However, in FreeType 2.5.1 color bitmap only fonts do not ignore this flag.
+ // Force this flag off for bitmap only fonts.
+ fLoadGlyphFlags &= ~FT_LOAD_NO_BITMAP;
+ } else {
+ LOG_INFO("Unknown kind of font \"%s\" size %f.\n", fFaceRec->fFace->family_name, fScale.fY);
+ return;
+ }
+
+ fMatrix22.xx = SkScalarToFixed(fMatrix22Scalar.getScaleX());
+ fMatrix22.xy = SkScalarToFixed(-fMatrix22Scalar.getSkewX());
+ fMatrix22.yx = SkScalarToFixed(-fMatrix22Scalar.getSkewY());
+ fMatrix22.yy = SkScalarToFixed(fMatrix22Scalar.getScaleY());
+
+#ifdef FT_COLOR_H
+ FT_Palette_Select(fFaceRec->fFace.get(), 0, nullptr);
+#endif
+
+ fFTSize = ftSize.release();
+ fFace = fFaceRec->fFace.get();
+ fDoLinearMetrics = linearMetrics;
+}
+
+SkScalerContext_FreeType::~SkScalerContext_FreeType() {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (fFTSize != nullptr) {
+ FT_Done_Size(fFTSize);
+ }
+
+ fFaceRec = nullptr;
+
+ unref_ft_library();
+}
+
+/* We call this before each use of the fFace, since we may be sharing
+ this face with other context (at different sizes).
+*/
+FT_Error SkScalerContext_FreeType::setupSize() {
+ f_t_mutex().assertHeld();
+ FT_Error err = FT_Activate_Size(fFTSize);
+ if (err != 0) {
+ return err;
+ }
+ FT_Set_Transform(fFace, &fMatrix22, nullptr);
+ return 0;
+}
+
+unsigned SkScalerContext_FreeType::generateGlyphCount() {
+ return fFace->num_glyphs;
+}
+
+bool SkScalerContext_FreeType::generateAdvance(SkGlyph* glyph) {
+ /* unhinted and light hinted text have linearly scaled advances
+ * which are very cheap to compute with some font formats...
+ */
+ if (!fDoLinearMetrics) {
+ return false;
+ }
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return true;
+ }
+
+ FT_Error error;
+ FT_Fixed advance;
+
+ error = FT_Get_Advance( fFace, glyph->getGlyphID(),
+ fLoadGlyphFlags | FT_ADVANCE_FLAG_FAST_ONLY,
+ &advance );
+
+ if (error != 0) {
+ return false;
+ }
+
+ const SkScalar advanceScalar = SkFT_FixedToScalar(advance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ return true;
+}
+
+void SkScalerContext_FreeType::getBBoxForCurrentGlyph(const SkGlyph* glyph,
+ FT_BBox* bbox,
+ bool snapToPixelBoundary) {
+
+ FT_Outline_Get_CBox(&fFace->glyph->outline, bbox);
+
+ if (this->isSubpixel()) {
+ int dx = SkFixedToFDot6(glyph->getSubXFixed());
+ int dy = SkFixedToFDot6(glyph->getSubYFixed());
+ // negate dy since freetype-y-goes-up and skia-y-goes-down
+ bbox->xMin += dx;
+ bbox->yMin -= dy;
+ bbox->xMax += dx;
+ bbox->yMax -= dy;
+ }
+
+ // outset the box to integral boundaries
+ if (snapToPixelBoundary) {
+ bbox->xMin &= ~63;
+ bbox->yMin &= ~63;
+ bbox->xMax = (bbox->xMax + 63) & ~63;
+ bbox->yMax = (bbox->yMax + 63) & ~63;
+ }
+
+ // Must come after snapToPixelBoundary so that the width and height are
+ // consistent. Otherwise asserts will fire later on when generating the
+ // glyph image.
+ if (this->isVertical()) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ bbox->xMin += vector.x;
+ bbox->xMax += vector.x;
+ bbox->yMin += vector.y;
+ bbox->yMax += vector.y;
+ }
+}
+
+bool SkScalerContext_FreeType::getCBoxForLetter(char letter, FT_BBox* bbox) {
+ const FT_UInt glyph_id = FT_Get_Char_Index(fFace, letter);
+ if (!glyph_id) {
+ return false;
+ }
+ if (FT_Load_Glyph(fFace, glyph_id, fLoadGlyphFlags) != 0) {
+ return false;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, SkTo<SkGlyphID>(glyph_id));
+ FT_Outline_Get_CBox(&fFace->glyph->outline, bbox);
+ return true;
+}
+
+void SkScalerContext_FreeType::updateGlyphIfLCD(SkGlyph* glyph) {
+ if (glyph->fMaskFormat == SkMask::kLCD16_Format) {
+ if (fLCDIsVert) {
+ glyph->fHeight += gFTLibrary->lcdExtra();
+ glyph->fTop -= gFTLibrary->lcdExtra() >> 1;
+ } else {
+ glyph->fWidth += gFTLibrary->lcdExtra();
+ glyph->fLeft -= gFTLibrary->lcdExtra() >> 1;
+ }
+ }
+}
+
+bool SkScalerContext_FreeType::shouldSubpixelBitmap(const SkGlyph& glyph, const SkMatrix& matrix) {
+ // If subpixel rendering of a bitmap *can* be done.
+ bool mechanism = fFace->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ this->isSubpixel() &&
+ (glyph.getSubXFixed() || glyph.getSubYFixed());
+
+ // If subpixel rendering of a bitmap *should* be done.
+ // 1. If the face is not scalable then always allow subpixel rendering.
+ // Otherwise, if the font has an 8ppem strike 7 will subpixel render but 8 won't.
+ // 2. If the matrix is already not identity the bitmap will already be resampled,
+ // so resampling slightly differently shouldn't make much difference.
+ bool policy = !FT_IS_SCALABLE(fFace) || !matrix.isIdentity();
+
+ return mechanism && policy;
+}
+
+void SkScalerContext_FreeType::generateMetrics(SkGlyph* glyph) {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ FT_Error err;
+ err = FT_Load_Glyph( fFace, glyph->getGlyphID(),
+ fLoadGlyphFlags | FT_LOAD_BITMAP_METRICS_ONLY );
+ if (err != 0) {
+ glyph->zeroMetrics();
+ return;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, glyph->getGlyphID());
+
+ if (fFace->glyph->format == FT_GLYPH_FORMAT_OUTLINE) {
+ using FT_PosLimits = std::numeric_limits<FT_Pos>;
+ FT_BBox bounds = { FT_PosLimits::max(), FT_PosLimits::max(),
+ FT_PosLimits::min(), FT_PosLimits::min() };
+#ifdef FT_COLOR_H
+ FT_Bool haveLayers = false;
+ FT_LayerIterator layerIterator = { 0, 0, nullptr };
+ FT_UInt layerGlyphIndex;
+ FT_UInt layerColorIndex;
+ while (FT_Get_Color_Glyph_Layer(fFace, glyph->getGlyphID(),
+ &layerGlyphIndex, &layerColorIndex, &layerIterator))
+ {
+ haveLayers = true;
+ err = FT_Load_Glyph(fFace, layerGlyphIndex,
+ fLoadGlyphFlags | FT_LOAD_BITMAP_METRICS_ONLY);
+ if (err != 0) {
+ glyph->zeroMetrics();
+ return;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, layerGlyphIndex);
+
+ if (0 < fFace->glyph->outline.n_contours) {
+ FT_BBox bbox;
+ getBBoxForCurrentGlyph(glyph, &bbox, true);
+
+ // Union
+ bounds.xMin = std::min(bbox.xMin, bounds.xMin);
+ bounds.yMin = std::min(bbox.yMin, bounds.yMin);
+ bounds.xMax = std::max(bbox.xMax, bounds.xMax);
+ bounds.yMax = std::max(bbox.yMax, bounds.yMax);
+ }
+ }
+
+ if (haveLayers) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ if (!(bounds.xMin < bounds.xMax && bounds.yMin < bounds.yMax)) {
+ bounds = { 0, 0, 0, 0 };
+ }
+ } else {
+#endif
+ if (0 < fFace->glyph->outline.n_contours) {
+ getBBoxForCurrentGlyph(glyph, &bounds, true);
+ } else {
+ bounds = { 0, 0, 0, 0 };
+ }
+#ifdef FT_COLOR_H
+ }
+#endif
+ // Round out, no longer dot6.
+ bounds.xMin = SkFDot6Floor(bounds.xMin);
+ bounds.yMin = SkFDot6Floor(bounds.yMin);
+ bounds.xMax = SkFDot6Ceil (bounds.xMax);
+ bounds.yMax = SkFDot6Ceil (bounds.yMax);
+
+ FT_Pos width = bounds.xMax - bounds.xMin;
+ FT_Pos height = bounds.yMax - bounds.yMin;
+ FT_Pos top = -bounds.yMax; // Freetype y-up, Skia y-down.
+ FT_Pos left = bounds.xMin;
+ if (!SkTFitsIn<decltype(glyph->fWidth )>(width ) ||
+ !SkTFitsIn<decltype(glyph->fHeight)>(height) ||
+ !SkTFitsIn<decltype(glyph->fTop )>(top ) ||
+ !SkTFitsIn<decltype(glyph->fLeft )>(left ) )
+ {
+ width = height = top = left = 0;
+ }
+
+ glyph->fWidth = SkToU16(width );
+ glyph->fHeight = SkToU16(height);
+ glyph->fTop = SkToS16(top );
+ glyph->fLeft = SkToS16(left );
+ updateGlyphIfLCD(glyph);
+
+ } else if (fFace->glyph->format == FT_GLYPH_FORMAT_BITMAP) {
+ if (this->isVertical()) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ fFace->glyph->bitmap_left += SkFDot6Floor(vector.x);
+ fFace->glyph->bitmap_top += SkFDot6Floor(vector.y);
+ }
+
+ if (fFace->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ {
+ SkRect rect = SkRect::MakeXYWH(SkIntToScalar(fFace->glyph->bitmap_left),
+ -SkIntToScalar(fFace->glyph->bitmap_top),
+ SkIntToScalar(fFace->glyph->bitmap.width),
+ SkIntToScalar(fFace->glyph->bitmap.rows));
+ fMatrix22Scalar.mapRect(&rect);
+ if (this->shouldSubpixelBitmap(*glyph, fMatrix22Scalar)) {
+ rect.offset(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ SkIRect irect = rect.roundOut();
+ glyph->fWidth = SkToU16(irect.width());
+ glyph->fHeight = SkToU16(irect.height());
+ glyph->fTop = SkToS16(irect.top());
+ glyph->fLeft = SkToS16(irect.left());
+ }
+ } else {
+ SkDEBUGFAIL("unknown glyph format");
+ glyph->zeroMetrics();
+ return;
+ }
+
+ if (this->isVertical()) {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearVertAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getSkewX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getScaleY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = -SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = SkFDot6ToFloat(fFace->glyph->advance.y);
+ }
+ } else {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearHoriAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(fFace->glyph->advance.y);
+ }
+ }
+
+#ifdef ENABLE_GLYPH_SPEW
+ LOG_INFO("Metrics(glyph:%d flags:0x%x) w:%d\n", glyph->getGlyphID(), fLoadGlyphFlags, glyph->fWidth);
+#endif
+}
+
+void SkScalerContext_FreeType::generateImage(const SkGlyph& glyph) {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+
+ FT_Error err = FT_Load_Glyph(fFace, glyph.getGlyphID(), fLoadGlyphFlags);
+ if (err != 0) {
+ SK_TRACEFTR(err, "SkScalerContext_FreeType::generateImage: FT_Load_Glyph(glyph:%d "
+ "width:%d height:%d rb:%d flags:%d) failed.",
+ glyph.getGlyphID(), glyph.width(), glyph.height(), glyph.rowBytes(),
+ fLoadGlyphFlags);
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+
+ emboldenIfNeeded(fFace, fFace->glyph, glyph.getGlyphID());
+ SkMatrix* bitmapMatrix = &fMatrix22Scalar;
+ SkMatrix subpixelBitmapMatrix;
+ if (this->shouldSubpixelBitmap(glyph, *bitmapMatrix)) {
+ subpixelBitmapMatrix = fMatrix22Scalar;
+ subpixelBitmapMatrix.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ bitmapMatrix = &subpixelBitmapMatrix;
+ }
+ generateGlyphImage(fFace, glyph, *bitmapMatrix);
+}
+
+
+bool SkScalerContext_FreeType::generatePath(SkGlyphID glyphID, SkPath* path) {
+ SkASSERT(path);
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ // FT_IS_SCALABLE is documented to mean the face contains outline glyphs.
+ if (!FT_IS_SCALABLE(fFace) || this->setupSize()) {
+ path->reset();
+ return false;
+ }
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = FT_Load_Glyph(fFace, glyphID, flags);
+ if (err != 0 || fFace->glyph->format != FT_GLYPH_FORMAT_OUTLINE) {
+ path->reset();
+ return false;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, glyphID);
+
+ if (!generateGlyphPath(fFace, path)) {
+ path->reset();
+ return false;
+ }
+
+ // The path's origin from FreeType is always the horizontal layout origin.
+ // Offset the path so that it is relative to the vertical origin if needed.
+ if (this->isVertical()) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ path->offset(SkFDot6ToScalar(vector.x), -SkFDot6ToScalar(vector.y));
+ }
+ return true;
+}
+
+void SkScalerContext_FreeType::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ FT_Face face = fFace;
+ metrics->fFlags = 0;
+
+ SkScalar upem = SkIntToScalar(SkTypeface_FreeType::GetUnitsPerEm(face));
+
+ // use the os/2 table as a source of reasonable defaults.
+ SkScalar x_height = 0.0f;
+ SkScalar avgCharWidth = 0.0f;
+ SkScalar cap_height = 0.0f;
+ SkScalar strikeoutThickness = 0.0f, strikeoutPosition = 0.0f;
+ TT_OS2* os2 = (TT_OS2*) FT_Get_Sfnt_Table(face, ft_sfnt_os2);
+ if (os2) {
+ x_height = SkIntToScalar(os2->sxHeight) / upem * fScale.y();
+ avgCharWidth = SkIntToScalar(os2->xAvgCharWidth) / upem;
+ strikeoutThickness = SkIntToScalar(os2->yStrikeoutSize) / upem;
+ strikeoutPosition = -SkIntToScalar(os2->yStrikeoutPosition) / upem;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutPositionIsValid_Flag;
+ if (os2->version != 0xFFFF && os2->version >= 2) {
+ cap_height = SkIntToScalar(os2->sCapHeight) / upem * fScale.y();
+ }
+ }
+
+ // pull from format-specific metrics as needed
+ SkScalar ascent, descent, leading, xmin, xmax, ymin, ymax;
+ SkScalar underlineThickness, underlinePosition;
+ if (face->face_flags & FT_FACE_FLAG_SCALABLE) { // scalable outline font
+ // FreeType will always use HHEA metrics if they're not zero.
+ // It completely ignores the OS/2 fsSelection::UseTypoMetrics bit.
+ // It also ignores the VDMX tables, which are also of interest here
+ // (and override everything else when they apply).
+ static const int kUseTypoMetricsMask = (1 << 7);
+ if (os2 && os2->version != 0xFFFF && (os2->fsSelection & kUseTypoMetricsMask)) {
+ ascent = -SkIntToScalar(os2->sTypoAscender) / upem;
+ descent = -SkIntToScalar(os2->sTypoDescender) / upem;
+ leading = SkIntToScalar(os2->sTypoLineGap) / upem;
+ } else {
+ ascent = -SkIntToScalar(face->ascender) / upem;
+ descent = -SkIntToScalar(face->descender) / upem;
+ leading = SkIntToScalar(face->height + (face->descender - face->ascender)) / upem;
+ }
+ xmin = SkIntToScalar(face->bbox.xMin) / upem;
+ xmax = SkIntToScalar(face->bbox.xMax) / upem;
+ ymin = -SkIntToScalar(face->bbox.yMin) / upem;
+ ymax = -SkIntToScalar(face->bbox.yMax) / upem;
+ underlineThickness = SkIntToScalar(face->underline_thickness) / upem;
+ underlinePosition = -SkIntToScalar(face->underline_position +
+ face->underline_thickness / 2) / upem;
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ // we may be able to synthesize x_height and cap_height from outline
+ if (!x_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('x', &bbox)) {
+ x_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ if (!cap_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('H', &bbox)) {
+ cap_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ } else if (fStrikeIndex != -1) { // bitmap strike metrics
+ SkScalar xppem = SkIntToScalar(face->size->metrics.x_ppem);
+ SkScalar yppem = SkIntToScalar(face->size->metrics.y_ppem);
+ ascent = -SkIntToScalar(face->size->metrics.ascender) / (yppem * 64.0f);
+ descent = -SkIntToScalar(face->size->metrics.descender) / (yppem * 64.0f);
+ leading = (SkIntToScalar(face->size->metrics.height) / (yppem * 64.0f)) + ascent - descent;
+ xmin = 0.0f;
+ xmax = SkIntToScalar(face->available_sizes[fStrikeIndex].width) / xppem;
+ ymin = descent;
+ ymax = ascent;
+ underlineThickness = 0;
+ underlinePosition = 0;
+ metrics->fFlags &= ~SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags &= ~SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ TT_Postscript* post = (TT_Postscript*) FT_Get_Sfnt_Table(face, ft_sfnt_post);
+ if (post) {
+ underlineThickness = SkIntToScalar(post->underlineThickness) / upem;
+ underlinePosition = -SkIntToScalar(post->underlinePosition) / upem;
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+ }
+ } else {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ // synthesize elements that were not provided by the os/2 table or format-specific metrics
+ if (!x_height) {
+ x_height = -ascent * fScale.y();
+ }
+ if (!avgCharWidth) {
+ avgCharWidth = xmax - xmin;
+ }
+ if (!cap_height) {
+ cap_height = -ascent * fScale.y();
+ }
+
+ // disallow negative linespacing
+ if (leading < 0.0f) {
+ leading = 0.0f;
+ }
+
+ metrics->fTop = ymax * fScale.y();
+ metrics->fAscent = ascent * fScale.y();
+ metrics->fDescent = descent * fScale.y();
+ metrics->fBottom = ymin * fScale.y();
+ metrics->fLeading = leading * fScale.y();
+ metrics->fAvgCharWidth = avgCharWidth * fScale.y();
+ metrics->fXMin = xmin * fScale.y();
+ metrics->fXMax = xmax * fScale.y();
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ metrics->fXHeight = x_height;
+ metrics->fCapHeight = cap_height;
+ metrics->fUnderlineThickness = underlineThickness * fScale.y();
+ metrics->fUnderlinePosition = underlinePosition * fScale.y();
+ metrics->fStrikeoutThickness = strikeoutThickness * fScale.y();
+ metrics->fStrikeoutPosition = strikeoutPosition * fScale.y();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// hand-tuned value to reduce outline embolden strength
+#ifndef SK_OUTLINE_EMBOLDEN_DIVISOR
+ #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 34
+ #else
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 24
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScalerContext_FreeType::emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph, SkGlyphID gid) {
+ // check to see if the embolden bit is set
+ if (0 == (fRec.fFlags & SkScalerContext::kEmbolden_Flag)) {
+ return;
+ }
+
+ switch (glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ FT_Pos strength;
+ strength = FT_MulFix(face->units_per_EM, face->size->metrics.y_scale)
+ / SK_OUTLINE_EMBOLDEN_DIVISOR;
+ FT_Outline_Embolden(&glyph->outline, strength);
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (!fFace->glyph->bitmap.buffer) {
+ FT_Load_Glyph(fFace, gid, fLoadGlyphFlags);
+ }
+ FT_GlyphSlot_Own_Bitmap(glyph);
+ FT_Bitmap_Embolden(glyph->library, &glyph->bitmap, kBitmapEmboldenStrength, 0);
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkUtils.h"
+
+// Just made up, so we don't end up storing 1000s of entries
+constexpr int kMaxC2GCacheCount = 512;
+
+void SkTypeface_FreeType::onCharsToGlyphs(const SkUnichar uni[], int count,
+ SkGlyphID glyphs[]) const {
+ // Try the cache first, *before* accessing freetype lib/face, as that
+ // can be very slow. If we do need to compute a new glyphID, then
+ // access those freetype objects and continue the loop.
+
+ SkAutoMutexExclusive ama(fC2GCacheMutex);
+
+ int i;
+ for (i = 0; i < count; ++i) {
+ int index = fC2GCache.findGlyphIndex(uni[i]);
+ if (index < 0) {
+ break;
+ }
+ glyphs[i] = SkToU16(index);
+ }
+ if (i == count) {
+ // we're done, no need to access the freetype objects
+ return;
+ }
+
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ sk_bzero(glyphs, count * sizeof(glyphs[0]));
+ return;
+ }
+
+ for (; i < count; ++i) {
+ SkUnichar c = uni[i];
+ int index = fC2GCache.findGlyphIndex(c);
+ if (index >= 0) {
+ glyphs[i] = SkToU16(index);
+ } else {
+ glyphs[i] = SkToU16(FT_Get_Char_Index(face, c));
+ fC2GCache.insertCharAndGlyph(~index, c, glyphs[i]);
+ }
+ }
+
+ if (fC2GCache.count() > kMaxC2GCacheCount) {
+ fC2GCache.reset();
+ }
+}
+
+int SkTypeface_FreeType::onCountGlyphs() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ return face ? face->num_glyphs : 0;
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_FreeType::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(familyName, language);
+ }
+ return nameIter.release();
+}
+
+int SkTypeface_FreeType::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return -1;
+ }
+
+ if (!(face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return 0;
+ }
+
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(face, &variations)) {
+ return -1;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ if (!coordinates || coordinateCount < SkToInt(variations->num_axis)) {
+ return variations->num_axis;
+ }
+
+ SkAutoSTMalloc<4, FT_Fixed> coords(variations->num_axis);
+ // FT_Get_{MM,Var}_{Blend,Design}_Coordinates were added in FreeType 2.7.1.
+ if (gFTLibrary->fGetVarDesignCoordinates &&
+ !gFTLibrary->fGetVarDesignCoordinates(face, variations->num_axis, coords.get()))
+ {
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ coordinates[i].axis = variations->axis[i].tag;
+ coordinates[i].value = SkFixedToScalar(coords[i]);
+ }
+ } else if (static_cast<FT_UInt>(fta.getAxesCount()) == variations->num_axis) {
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ coordinates[i].axis = variations->axis[i].tag;
+ coordinates[i].value = SkFixedToScalar(fta.getAxes()[i]);
+ }
+ } else if (fta.isNamedVariationSpecified()) {
+ // The font has axes, they cannot be retrieved, and some named axis was specified.
+ return -1;
+ } else {
+ // The font has axes, they cannot be retrieved, but no named instance was specified.
+ return 0;
+ }
+
+ return variations->num_axis;
+}
+
+int SkTypeface_FreeType::onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return -1;
+ }
+
+ if (!(face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return 0;
+ }
+
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(face, &variations)) {
+ return -1;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ if (!parameters || parameterCount < SkToInt(variations->num_axis)) {
+ return variations->num_axis;
+ }
+
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ parameters[i].tag = variations->axis[i].tag;
+ parameters[i].min = SkFixedToScalar(variations->axis[i].minimum);
+ parameters[i].def = SkFixedToScalar(variations->axis[i].def);
+ parameters[i].max = SkFixedToScalar(variations->axis[i].maximum);
+ FT_UInt flags = 0;
+ bool hidden = gFTLibrary->fGetVarAxisFlags &&
+ !gFTLibrary->fGetVarAxisFlags(variations, i, &flags) &&
+ (flags & FT_VAR_AXIS_FLAG_HIDDEN);
+ parameters[i].setHidden(hidden);
+ }
+
+ return variations->num_axis;
+}
+
+int SkTypeface_FreeType::onGetTableTags(SkFontTableTag tags[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+
+ FT_ULong tableCount = 0;
+ FT_Error error;
+
+ // When 'tag' is nullptr, returns number of tables in 'length'.
+ error = FT_Sfnt_Table_Info(face, 0, nullptr, &tableCount);
+ if (error) {
+ return 0;
+ }
+
+ if (tags) {
+ for (FT_ULong tableIndex = 0; tableIndex < tableCount; ++tableIndex) {
+ FT_ULong tableTag;
+ FT_ULong tablelength;
+ error = FT_Sfnt_Table_Info(face, tableIndex, &tableTag, &tablelength);
+ if (error) {
+ return 0;
+ }
+ tags[tableIndex] = static_cast<SkFontTableTag>(tableTag);
+ }
+ }
+ return tableCount;
+}
+
+size_t SkTypeface_FreeType::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+
+ FT_ULong tableLength = 0;
+ FT_Error error;
+
+ // When 'length' is 0 it is overwritten with the full table length; 'offset' is ignored.
+ error = FT_Load_Sfnt_Table(face, tag, 0, nullptr, &tableLength);
+ if (error) {
+ return 0;
+ }
+
+ if (offset > tableLength) {
+ return 0;
+ }
+ FT_ULong size = SkTMin((FT_ULong)length, tableLength - (FT_ULong)offset);
+ if (data) {
+ error = FT_Load_Sfnt_Table(face, tag, offset, reinterpret_cast<FT_Byte*>(data), &size);
+ if (error) {
+ return 0;
+ }
+ }
+
+ return size;
+}
+
+sk_sp<SkData> SkTypeface_FreeType::onCopyTableData(SkFontTableTag tag) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+
+ FT_ULong tableLength = 0;
+ FT_Error error;
+
+ // When 'length' is 0 it is overwritten with the full table length; 'offset' is ignored.
+ error = FT_Load_Sfnt_Table(face, tag, 0, nullptr, &tableLength);
+ if (error) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(tableLength);
+ if (data) {
+ error = FT_Load_Sfnt_Table(face, tag, 0,
+ reinterpret_cast<FT_Byte*>(data->writable_data()), &tableLength);
+ if (error) {
+ data.reset();
+ }
+ }
+ return data;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypeface_FreeType::Scanner::Scanner() : fLibrary(nullptr) {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+}
+SkTypeface_FreeType::Scanner::~Scanner() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+}
+
+FT_Face SkTypeface_FreeType::Scanner::openFace(SkStreamAsset* stream, int ttcIndex,
+ FT_Stream ftStream) const
+{
+ if (fLibrary == nullptr) {
+ return nullptr;
+ }
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+
+ const void* memoryBase = stream->getMemoryBase();
+
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = stream->getLength();
+ } else {
+ memset(ftStream, 0, sizeof(*ftStream));
+ ftStream->size = stream->getLength();
+ ftStream->descriptor.pointer = stream;
+ ftStream->read = sk_ft_stream_io;
+ ftStream->close = sk_ft_stream_close;
+
+ args.flags = FT_OPEN_STREAM;
+ args.stream = ftStream;
+ }
+
+ FT_Face face;
+ if (FT_Open_Face(fLibrary, &args, ttcIndex, &face)) {
+ return nullptr;
+ }
+ return face;
+}
+
+bool SkTypeface_FreeType::Scanner::recognizedFont(SkStreamAsset* stream, int* numFaces) const {
+ SkAutoMutexExclusive libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ FT_Face face = this->openFace(stream, -1, &streamRec);
+ if (nullptr == face) {
+ return false;
+ }
+
+ *numFaces = face->num_faces;
+
+ FT_Done_Face(face);
+ return true;
+}
+
+#include "src/core/SkTSearch.h"
+bool SkTypeface_FreeType::Scanner::scanFont(
+ SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch, AxisDefinitions* axes) const
+{
+ SkAutoMutexExclusive libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ FT_Face face = this->openFace(stream, ttcIndex, &streamRec);
+ if (nullptr == face) {
+ return false;
+ }
+
+ int weight = SkFontStyle::kNormal_Weight;
+ int width = SkFontStyle::kNormal_Width;
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ if (face->style_flags & FT_STYLE_FLAG_BOLD) {
+ weight = SkFontStyle::kBold_Weight;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ slant = SkFontStyle::kItalic_Slant;
+ }
+
+ PS_FontInfoRec psFontInfo;
+ TT_OS2* os2 = static_cast<TT_OS2*>(FT_Get_Sfnt_Table(face, ft_sfnt_os2));
+ if (os2 && os2->version != 0xffff) {
+ weight = os2->usWeightClass;
+ width = os2->usWidthClass;
+
+ // OS/2::fsSelection bit 9 indicates oblique.
+ if (SkToBool(os2->fsSelection & (1u << 9))) {
+ slant = SkFontStyle::kOblique_Slant;
+ }
+ } else if (0 == FT_Get_PS_Font_Info(face, &psFontInfo) && psFontInfo.weight) {
+ static const struct {
+ char const * const name;
+ int const weight;
+ } commonWeights [] = {
+ // There are probably more common names, but these are known to exist.
+ { "all", SkFontStyle::kNormal_Weight }, // Multiple Masters usually default to normal.
+ { "black", SkFontStyle::kBlack_Weight },
+ { "bold", SkFontStyle::kBold_Weight },
+ { "book", (SkFontStyle::kNormal_Weight + SkFontStyle::kLight_Weight)/2 },
+ { "demi", SkFontStyle::kSemiBold_Weight },
+ { "demibold", SkFontStyle::kSemiBold_Weight },
+ { "extra", SkFontStyle::kExtraBold_Weight },
+ { "extrabold", SkFontStyle::kExtraBold_Weight },
+ { "extralight", SkFontStyle::kExtraLight_Weight },
+ { "hairline", SkFontStyle::kThin_Weight },
+ { "heavy", SkFontStyle::kBlack_Weight },
+ { "light", SkFontStyle::kLight_Weight },
+ { "medium", SkFontStyle::kMedium_Weight },
+ { "normal", SkFontStyle::kNormal_Weight },
+ { "plain", SkFontStyle::kNormal_Weight },
+ { "regular", SkFontStyle::kNormal_Weight },
+ { "roman", SkFontStyle::kNormal_Weight },
+ { "semibold", SkFontStyle::kSemiBold_Weight },
+ { "standard", SkFontStyle::kNormal_Weight },
+ { "thin", SkFontStyle::kThin_Weight },
+ { "ultra", SkFontStyle::kExtraBold_Weight },
+ { "ultrablack", SkFontStyle::kExtraBlack_Weight },
+ { "ultrabold", SkFontStyle::kExtraBold_Weight },
+ { "ultraheavy", SkFontStyle::kExtraBlack_Weight },
+ { "ultralight", SkFontStyle::kExtraLight_Weight },
+ };
+ int const index = SkStrLCSearch(&commonWeights[0].name, SK_ARRAY_COUNT(commonWeights),
+ psFontInfo.weight, sizeof(commonWeights[0]));
+ if (index >= 0) {
+ weight = commonWeights[index].weight;
+ } else {
+ LOG_INFO("Do not know weight for: %s (%s) \n", face->family_name, psFontInfo.weight);
+ }
+ }
+
+ if (name) {
+ name->set(face->family_name);
+ }
+ if (style) {
+ *style = SkFontStyle(weight, width, slant);
+ }
+ if (isFixedPitch) {
+ *isFixedPitch = FT_IS_FIXED_WIDTH(face);
+ }
+
+ bool success = GetAxes(face, axes);
+ FT_Done_Face(face);
+ return success;
+}
+
+bool SkTypeface_FreeType::Scanner::GetAxes(FT_Face face, AxisDefinitions* axes) {
+ if (axes && face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS) {
+ FT_MM_Var* variations = nullptr;
+ FT_Error err = FT_Get_MM_Var(face, &variations);
+ if (err) {
+ LOG_INFO("INFO: font %s claims to have variations, but none found.\n",
+ face->family_name);
+ return false;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ axes->reset(variations->num_axis);
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ const FT_Var_Axis& ftAxis = variations->axis[i];
+ (*axes)[i].fTag = ftAxis.tag;
+ (*axes)[i].fMinimum = ftAxis.minimum;
+ (*axes)[i].fDefault = ftAxis.def;
+ (*axes)[i].fMaximum = ftAxis.maximum;
+ }
+ }
+ return true;
+}
+
+/*static*/ void SkTypeface_FreeType::Scanner::computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontArguments::VariationPosition position,
+ SkFixed* axisValues,
+ const SkString& name)
+{
+ for (int i = 0; i < axisDefinitions.count(); ++i) {
+ const Scanner::AxisDefinition& axisDefinition = axisDefinitions[i];
+ const SkScalar axisMin = SkFixedToScalar(axisDefinition.fMinimum);
+ const SkScalar axisMax = SkFixedToScalar(axisDefinition.fMaximum);
+ axisValues[i] = axisDefinition.fDefault;
+ // The position may be over specified. If there are multiple values for a given axis,
+ // use the last one since that's what css-fonts-4 requires.
+ for (int j = position.coordinateCount; j --> 0;) {
+ const auto& coordinate = position.coordinates[j];
+ if (axisDefinition.fTag == coordinate.axis) {
+ const SkScalar axisValue = SkTPin(coordinate.value, axisMin, axisMax);
+ if (coordinate.value != axisValue) {
+ LOG_INFO("Requested font axis value out of range: "
+ "%s '%c%c%c%c' %f; pinned to %f.\n",
+ name.c_str(),
+ (axisDefinition.fTag >> 24) & 0xFF,
+ (axisDefinition.fTag >> 16) & 0xFF,
+ (axisDefinition.fTag >> 8) & 0xFF,
+ (axisDefinition.fTag ) & 0xFF,
+ SkScalarToDouble(coordinate.value),
+ SkScalarToDouble(axisValue));
+ }
+ axisValues[i] = SkScalarToFixed(axisValue);
+ break;
+ }
+ }
+ // TODO: warn on defaulted axis?
+ }
+
+ SkDEBUGCODE(
+ // Check for axis specified, but not matched in font.
+ for (int i = 0; i < position.coordinateCount; ++i) {
+ SkFourByteTag skTag = position.coordinates[i].axis;
+ bool found = false;
+ for (int j = 0; j < axisDefinitions.count(); ++j) {
+ if (skTag == axisDefinitions[j].fTag) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ LOG_INFO("Requested font axis not found: %s '%c%c%c%c'\n",
+ name.c_str(),
+ (skTag >> 24) & 0xFF,
+ (skTag >> 16) & 0xFF,
+ (skTag >> 8) & 0xFF,
+ (skTag) & 0xFF);
+ }
+ }
+ )
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
new file mode 100644
index 0000000000..531ca88357
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
@@ -0,0 +1,760 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkPath.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkFDot6.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include <utility>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_BITMAP_H
+#ifdef FT_COLOR_H
+# include FT_COLOR_H
+#endif
+#include FT_IMAGE_H
+#include FT_OUTLINE_H
+// In the past, FT_GlyphSlot_Own_Bitmap was defined in this header file.
+#include FT_SYNTHESIS_H
+
+// FT_LOAD_COLOR and the corresponding FT_Pixel_Mode::FT_PIXEL_MODE_BGRA
+// were introduced in FreeType 2.5.0.
+// The following may be removed once FreeType 2.5.0 is required to build.
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+#ifdef SK_DEBUG
+const char* SkTraceFtrGetError(int e) {
+ switch ((FT_Error)e) {
+ #undef FTERRORS_H_
+ #define FT_ERRORDEF( e, v, s ) case v: return s;
+ #define FT_ERROR_START_LIST
+ #define FT_ERROR_END_LIST
+ #include FT_ERRORS_H
+ #undef FT_ERRORDEF
+ #undef FT_ERROR_START_LIST
+ #undef FT_ERROR_END_LIST
+ default: return "";
+ }
+}
+#endif // SK_DEBUG
+
+namespace {
+
+FT_Pixel_Mode compute_pixel_mode(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ return FT_PIXEL_MODE_MONO;
+ case SkMask::kA8_Format:
+ default:
+ return FT_PIXEL_MODE_GRAY;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint16_t packTriple(U8CPU r, U8CPU g, U8CPU b) {
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkTMax(r, (U8CPU)0x40);
+ g = SkTMax(g, (U8CPU)0x40);
+ b = SkTMax(b, (U8CPU)0x40);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+
+uint16_t grayToRGB16(U8CPU gray) {
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ gray = SkTMax(gray, (U8CPU)0x40);
+#endif
+ return SkPack888ToRGB16(gray, gray, gray);
+}
+
+int bittst(const uint8_t data[], int bitOffset) {
+ SkASSERT(bitOffset >= 0);
+ int lowBit = data[bitOffset >> 3] >> (~bitOffset & 7);
+ return lowBit & 1;
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * FT_PIXEL_MODE_MONO
+ * FT_PIXEL_MODE_GRAY
+ * FT_PIXEL_MODE_LCD
+ * FT_PIXEL_MODE_LCD_V
+ */
+template<bool APPLY_PREBLEND>
+void copyFT2LCD16(const FT_Bitmap& bitmap, const SkMask& mask, int lcdIsBGR,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB)
+{
+ SkASSERT(SkMask::kLCD16_Format == mask.fFormat);
+ if (FT_PIXEL_MODE_LCD != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.width() == static_cast<int>(bitmap.width));
+ }
+ if (FT_PIXEL_MODE_LCD_V != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ }
+
+ const uint8_t* src = bitmap.buffer;
+ uint16_t* dst = reinterpret_cast<uint16_t*>(mask.fImage);
+ const size_t dstRB = mask.fRowBytes;
+
+ const int width = mask.fBounds.width();
+ const int height = mask.fBounds.height();
+
+ switch (bitmap.pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = -bittst(src, x);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_GRAY:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_LCD:
+ SkASSERT(3 * mask.fBounds.width() == static_cast<int>(bitmap.width));
+ for (int y = height; y --> 0;) {
+ const uint8_t* triple = src;
+ if (lcdIsBGR) {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableB));
+ triple += 3;
+ }
+ } else {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableB));
+ triple += 3;
+ }
+ }
+ src += bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ case FT_PIXEL_MODE_LCD_V:
+ SkASSERT(3 * mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ for (int y = height; y --> 0;) {
+ const uint8_t* srcR = src;
+ const uint8_t* srcG = srcR + bitmap.pitch;
+ const uint8_t* srcB = srcG + bitmap.pitch;
+ if (lcdIsBGR) {
+ using std::swap;
+ swap(srcR, srcB);
+ }
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(*srcR++, tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcG++, tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcB++, tableB));
+ }
+ src += 3 * bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ default:
+ SkDEBUGF("FT_Pixel_Mode %d", bitmap.pixel_mode);
+ SkDEBUGFAIL("unsupported FT_Pixel_Mode for LCD16");
+ break;
+ }
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * Yes, No, Never Requested, Never Produced
+ *
+ * kBW kA8 k3D kARGB32 kLCD16
+ * FT_PIXEL_MODE_MONO Y Y NR N Y
+ * FT_PIXEL_MODE_GRAY N Y NR N Y
+ * FT_PIXEL_MODE_GRAY2 NP NP NR NP NP
+ * FT_PIXEL_MODE_GRAY4 NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD_V NP NP NR NP NP
+ * FT_PIXEL_MODE_BGRA N N NR Y N
+ *
+ * TODO: All of these N need to be Y or otherwise ruled out.
+ */
+void copyFTBitmap(const FT_Bitmap& srcFTBitmap, SkMask& dstMask) {
+ SkASSERTF(dstMask.fBounds.width() == static_cast<int>(srcFTBitmap.width),
+ "dstMask.fBounds.width() = %d\n"
+ "static_cast<int>(srcFTBitmap.width) = %d",
+ dstMask.fBounds.width(),
+ static_cast<int>(srcFTBitmap.width)
+ );
+ SkASSERTF(dstMask.fBounds.height() == static_cast<int>(srcFTBitmap.rows),
+ "dstMask.fBounds.height() = %d\n"
+ "static_cast<int>(srcFTBitmap.rows) = %d",
+ dstMask.fBounds.height(),
+ static_cast<int>(srcFTBitmap.rows)
+ );
+
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(srcFTBitmap.buffer);
+ const FT_Pixel_Mode srcFormat = static_cast<FT_Pixel_Mode>(srcFTBitmap.pixel_mode);
+ // FT_Bitmap::pitch is an int and allowed to be negative.
+ const int srcPitch = srcFTBitmap.pitch;
+ const size_t srcRowBytes = SkTAbs(srcPitch);
+
+ uint8_t* dst = dstMask.fImage;
+ const SkMask::Format dstFormat = static_cast<SkMask::Format>(dstMask.fFormat);
+ const size_t dstRowBytes = dstMask.fRowBytes;
+
+ const size_t width = srcFTBitmap.width;
+ const size_t height = srcFTBitmap.rows;
+
+ if (SkMask::kLCD16_Format == dstFormat) {
+ copyFT2LCD16<false>(srcFTBitmap, dstMask, false, nullptr, nullptr, nullptr);
+ return;
+ }
+
+ if ((FT_PIXEL_MODE_MONO == srcFormat && SkMask::kBW_Format == dstFormat) ||
+ (FT_PIXEL_MODE_GRAY == srcFormat && SkMask::kA8_Format == dstFormat))
+ {
+ size_t commonRowBytes = SkTMin(srcRowBytes, dstRowBytes);
+ for (size_t y = height; y --> 0;) {
+ memcpy(dst, src, commonRowBytes);
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_MONO == srcFormat && SkMask::kA8_Format == dstFormat) {
+ for (size_t y = height; y --> 0;) {
+ uint8_t byte = 0;
+ int bits = 0;
+ const uint8_t* src_row = src;
+ uint8_t* dst_row = dst;
+ for (size_t x = width; x --> 0;) {
+ if (0 == bits) {
+ byte = *src_row++;
+ bits = 8;
+ }
+ *dst_row++ = byte & 0x80 ? 0xff : 0x00;
+ bits--;
+ byte <<= 1;
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_BGRA == srcFormat && SkMask::kARGB32_Format == dstFormat) {
+ // FT_PIXEL_MODE_BGRA is pre-multiplied.
+ for (size_t y = height; y --> 0;) {
+ const uint8_t* src_row = src;
+ SkPMColor* dst_row = reinterpret_cast<SkPMColor*>(dst);
+ for (size_t x = 0; x < width; ++x) {
+ uint8_t b = *src_row++;
+ uint8_t g = *src_row++;
+ uint8_t r = *src_row++;
+ uint8_t a = *src_row++;
+ *dst_row++ = SkPackARGB32(a, r, g, b);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ *(dst_row-1) = SkFourByteInterp256(*(dst_row-1), SK_ColorWHITE, 0x40);
+#endif
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else {
+ SkDEBUGF("FT_Pixel_Mode %d, SkMask::Format %d\n", srcFormat, dstFormat);
+ SkDEBUGFAIL("unsupported combination of FT_Pixel_Mode and SkMask::Format");
+ }
+}
+
+inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ // Arbitrary decision that making the cutoff at 1/4 instead of 1/2 in general looks better.
+ return (byte >> 6) != 0;
+}
+
+uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ const int srcPad = srcRB - width;
+ SkASSERT(srcPad >= 0);
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+inline SkMask::Format SkMaskFormat_for_SkColorType(SkColorType colorType) {
+ switch (colorType) {
+ case kAlpha_8_SkColorType:
+ return SkMask::kA8_Format;
+ case kN32_SkColorType:
+ return SkMask::kARGB32_Format;
+ default:
+ SkDEBUGFAIL("unsupported SkBitmap::Config");
+ return SkMask::kA8_Format;
+ }
+}
+
+inline SkColorType SkColorType_for_FTPixelMode(FT_Pixel_Mode pixel_mode) {
+ switch (pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ case FT_PIXEL_MODE_GRAY:
+ return kAlpha_8_SkColorType;
+ case FT_PIXEL_MODE_BGRA:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported FT_PIXEL_MODE");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+inline SkColorType SkColorType_for_SkMaskFormat(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kA8_Format:
+ case SkMask::kLCD16_Format:
+ return kAlpha_8_SkColorType;
+ case SkMask::kARGB32_Format:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported destination SkBitmap::Config");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+} // namespace
+
+void SkScalerContext_FreeType_Base::generateGlyphImage(
+ FT_Face face,
+ const SkGlyph& glyph,
+ const SkMatrix& bitmapTransform)
+{
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ switch ( face->glyph->format ) {
+ case FT_GLYPH_FORMAT_OUTLINE: {
+ FT_Outline* outline = &face->glyph->outline;
+
+ int dx = 0, dy = 0;
+ if (this->isSubpixel()) {
+ dx = SkFixedToFDot6(glyph.getSubXFixed());
+ dy = SkFixedToFDot6(glyph.getSubYFixed());
+ // negate dy since freetype-y-goes-up and skia-y-goes-down
+ dy = -dy;
+ }
+
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+
+#ifdef FT_COLOR_H
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ kN32_SkColorType,
+ kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ // Scale unscaledBitmap into dstBitmap.
+ SkCanvas canvas(dstBitmap);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ canvas.clear(0x33FF0000);
+#else
+ canvas.clear(SK_ColorTRANSPARENT);
+#endif
+ canvas.translate(-glyph.fLeft, -glyph.fTop);
+
+ if (this->isSubpixel()) {
+ canvas.translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ FT_Color *palette;
+ FT_Error err = FT_Palette_Select(face, 0, &palette);
+ if (err) {
+ SK_TRACEFTR(err, "Could not get palette from %s fontFace.", face->family_name);
+ return;
+ }
+ FT_LayerIterator layerIterator;
+ layerIterator.p = NULL;
+ FT_Bool haveLayers = false;
+ FT_UInt layerGlyphIndex;
+ FT_UInt layerColorIndex;
+
+ while (FT_Get_Color_Glyph_Layer(face, glyph.getGlyphID(), &layerGlyphIndex,
+ &layerColorIndex,
+ &layerIterator)) {
+ haveLayers = true;
+ if (layerColorIndex == 0xFFFF) {
+ paint.setColor(SK_ColorBLACK);
+ } else {
+ SkColor color = SkColorSetARGB(palette[layerColorIndex].alpha,
+ palette[layerColorIndex].red,
+ palette[layerColorIndex].green,
+ palette[layerColorIndex].blue);
+ paint.setColor(color);
+ }
+ SkPath path;
+ if (this->generateFacePath(face, layerGlyphIndex, &path)) {
+ canvas.drawPath(path, paint);
+ }
+ }
+
+ if (!haveLayers) {
+ SK_TRACEFTR(err, "Could not get layers from %s fontFace.", face->family_name);
+ return;
+ }
+ } else
+#endif
+ if (SkMask::kLCD16_Format == glyph.fMaskFormat) {
+ FT_Outline_Translate(outline, dx, dy);
+ FT_Error err = FT_Render_Glyph(face->glyph, doVert ? FT_RENDER_MODE_LCD_V :
+ FT_RENDER_MODE_LCD);
+ if (err) {
+ SK_TRACEFTR(err, "Could not render glyph %x.", face->glyph);
+ return;
+ }
+
+ SkMask mask = glyph.mask();
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ memset(mask.fImage, 0x80, mask.fBounds.height() * mask.fRowBytes);
+#endif
+ FT_GlyphSlotRec& ftGlyph = *face->glyph;
+
+ if (!SkIRect::Intersects(mask.fBounds,
+ SkIRect::MakeXYWH( ftGlyph.bitmap_left,
+ -ftGlyph.bitmap_top,
+ ftGlyph.bitmap.width,
+ ftGlyph.bitmap.rows)))
+ {
+ return;
+ }
+
+ // If the FT_Bitmap extent is larger, discard bits of the bitmap outside the mask.
+ // If the SkMask extent is larger, shrink mask to fit bitmap (clearing discarded).
+ unsigned char* origBuffer = ftGlyph.bitmap.buffer;
+ // First align the top left (origin).
+ if (-ftGlyph.bitmap_top < mask.fBounds.fTop) {
+ int32_t topDiff = mask.fBounds.fTop - (-ftGlyph.bitmap_top);
+ ftGlyph.bitmap.buffer += ftGlyph.bitmap.pitch * topDiff;
+ ftGlyph.bitmap.rows -= topDiff;
+ ftGlyph.bitmap_top = -mask.fBounds.fTop;
+ }
+ if (ftGlyph.bitmap_left < mask.fBounds.fLeft) {
+ int32_t leftDiff = mask.fBounds.fLeft - ftGlyph.bitmap_left;
+ ftGlyph.bitmap.buffer += leftDiff;
+ ftGlyph.bitmap.width -= leftDiff;
+ ftGlyph.bitmap_left = mask.fBounds.fLeft;
+ }
+ if (mask.fBounds.fTop < -ftGlyph.bitmap_top) {
+ mask.fImage += mask.fRowBytes * (-ftGlyph.bitmap_top - mask.fBounds.fTop);
+ mask.fBounds.fTop = -ftGlyph.bitmap_top;
+ }
+ if (mask.fBounds.fLeft < ftGlyph.bitmap_left) {
+ mask.fImage += sizeof(uint16_t) * (ftGlyph.bitmap_left - mask.fBounds.fLeft);
+ mask.fBounds.fLeft = ftGlyph.bitmap_left;
+ }
+ // Origins aligned, clean up the width and height.
+ int ftVertScale = (doVert ? 3 : 1);
+ int ftHoriScale = (doVert ? 1 : 3);
+ if (mask.fBounds.height() * ftVertScale < SkToInt(ftGlyph.bitmap.rows)) {
+ ftGlyph.bitmap.rows = mask.fBounds.height() * ftVertScale;
+ }
+ if (mask.fBounds.width() * ftHoriScale < SkToInt(ftGlyph.bitmap.width)) {
+ ftGlyph.bitmap.width = mask.fBounds.width() * ftHoriScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.rows) < mask.fBounds.height() * ftVertScale) {
+ mask.fBounds.fBottom = mask.fBounds.fTop + ftGlyph.bitmap.rows / ftVertScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.width) < mask.fBounds.width() * ftHoriScale) {
+ mask.fBounds.fRight = mask.fBounds.fLeft + ftGlyph.bitmap.width / ftHoriScale;
+ }
+ if (fPreBlend.isApplicable()) {
+ copyFT2LCD16<true>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ copyFT2LCD16<false>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ // Restore the buffer pointer so FreeType can properly free it.
+ ftGlyph.bitmap.buffer = origBuffer;
+ } else {
+ FT_BBox bbox;
+ FT_Bitmap target;
+ FT_Outline_Get_CBox(outline, &bbox);
+ /*
+ what we really want to do for subpixel is
+ offset(dx, dy)
+ compute_bounds
+ offset(bbox & !63)
+ but that is two calls to offset, so we do the following, which
+ achieves the same thing with only one offset call.
+ */
+ FT_Outline_Translate(outline, dx - ((bbox.xMin + dx) & ~63),
+ dy - ((bbox.yMin + dy) & ~63));
+
+ target.width = glyph.fWidth;
+ target.rows = glyph.fHeight;
+ target.pitch = glyph.rowBytes();
+ target.buffer = reinterpret_cast<uint8_t*>(glyph.fImage);
+ target.pixel_mode = compute_pixel_mode( (SkMask::Format)glyph.fMaskFormat);
+ target.num_grays = 256;
+
+ FT_Outline_Get_Bitmap(face->glyph->library, outline, &target);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.fWidth; ++x) {
+ uint8_t& a = ((uint8_t*)glyph.fImage)[(glyph.rowBytes() * y) + x];
+ a = SkTMax<uint8_t>(a, 0x20);
+ }
+ }
+#endif
+ }
+ } break;
+
+ case FT_GLYPH_FORMAT_BITMAP: {
+ FT_Pixel_Mode pixel_mode = static_cast<FT_Pixel_Mode>(face->glyph->bitmap.pixel_mode);
+ SkMask::Format maskFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
+
+ // Assume that the other formats do not exist.
+ SkASSERT(FT_PIXEL_MODE_MONO == pixel_mode ||
+ FT_PIXEL_MODE_GRAY == pixel_mode ||
+ FT_PIXEL_MODE_BGRA == pixel_mode);
+
+ // These are the only formats this ScalerContext should request.
+ SkASSERT(SkMask::kBW_Format == maskFormat ||
+ SkMask::kA8_Format == maskFormat ||
+ SkMask::kARGB32_Format == maskFormat ||
+ SkMask::kLCD16_Format == maskFormat);
+
+ // If no scaling needed, directly copy glyph bitmap.
+ if (bitmapTransform.isIdentity()) {
+ SkMask dstMask = glyph.mask();
+ copyFTBitmap(face->glyph->bitmap, dstMask);
+ break;
+ }
+
+ // Otherwise, scale the bitmap.
+
+ // Copy the FT_Bitmap into an SkBitmap (either A8 or ARGB)
+ SkBitmap unscaledBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ unscaledBitmap.allocPixels(SkImageInfo::Make(face->glyph->bitmap.width,
+ face->glyph->bitmap.rows,
+ SkColorType_for_FTPixelMode(pixel_mode),
+ kPremul_SkAlphaType));
+
+ SkMask unscaledBitmapAlias;
+ unscaledBitmapAlias.fImage = reinterpret_cast<uint8_t*>(unscaledBitmap.getPixels());
+ unscaledBitmapAlias.fBounds.setWH(unscaledBitmap.width(), unscaledBitmap.height());
+ unscaledBitmapAlias.fRowBytes = unscaledBitmap.rowBytes();
+ unscaledBitmapAlias.fFormat = SkMaskFormat_for_SkColorType(unscaledBitmap.colorType());
+ copyFTBitmap(face->glyph->bitmap, unscaledBitmapAlias);
+
+ // Wrap the glyph's mask in a bitmap, unless the glyph's mask is BW or LCD.
+ // BW requires an A8 target for resizing, which can then be down sampled.
+ // LCD should use a 4x A8 target, which will then be down sampled.
+ // For simplicity, LCD uses A8 and is replicated.
+ int bitmapRowBytes = 0;
+ if (SkMask::kBW_Format != maskFormat && SkMask::kLCD16_Format != maskFormat) {
+ bitmapRowBytes = glyph.rowBytes();
+ }
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ SkColorType_for_SkMaskFormat(maskFormat),
+ kPremul_SkAlphaType),
+ bitmapRowBytes);
+ if (SkMask::kBW_Format == maskFormat || SkMask::kLCD16_Format == maskFormat) {
+ dstBitmap.allocPixels();
+ } else {
+ dstBitmap.setPixels(glyph.fImage);
+ }
+
+ // Scale unscaledBitmap into dstBitmap.
+ SkCanvas canvas(dstBitmap);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ canvas.clear(0x33FF0000);
+#else
+ canvas.clear(SK_ColorTRANSPARENT);
+#endif
+ canvas.translate(-glyph.fLeft, -glyph.fTop);
+ canvas.concat(bitmapTransform);
+ canvas.translate(face->glyph->bitmap_left, -face->glyph->bitmap_top);
+
+ SkPaint paint;
+ // Using kMedium FilterQuality will cause mipmaps to be generated. Use
+ // kLow when the results will be roughly the same in order to avoid
+ // the mipmap generation cost.
+ // See skbug.com/6967
+ if (bitmapTransform.getMinScale() < 0.5) {
+ paint.setFilterQuality(kMedium_SkFilterQuality);
+ } else {
+ paint.setFilterQuality(kLow_SkFilterQuality);
+ }
+ canvas.drawBitmap(unscaledBitmap, 0, 0, &paint);
+
+ // If the destination is BW or LCD, convert from A8.
+ if (SkMask::kBW_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the A1 glyph.fImage.
+ SkMask dstMask = glyph.mask();
+ packA8ToA1(dstMask, dstBitmap.getAddr8(0, 0), dstBitmap.rowBytes());
+ } else if (SkMask::kLCD16_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the LCD16 glyph.fImage.
+ uint8_t* src = dstBitmap.getAddr8(0, 0);
+ uint16_t* dst = reinterpret_cast<uint16_t*>(glyph.fImage);
+ for (int y = dstBitmap.height(); y --> 0;) {
+ for (int x = 0; x < dstBitmap.width(); ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + glyph.rowBytes());
+ src += dstBitmap.rowBytes();
+ }
+ }
+
+ } break;
+
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+// We used to always do this pre-USE_COLOR_LUMINANCE, but with colorlum,
+// it is optional
+#if defined(SK_GAMMA_APPLY_TO_A8)
+ if (SkMask::kA8_Format == glyph.fMaskFormat && fPreBlend.isApplicable()) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyph.fImage;
+ unsigned rowBytes = glyph.rowBytes();
+
+ for (int y = glyph.fHeight - 1; y >= 0; --y) {
+ for (int x = glyph.fWidth - 1; x >= 0; --x) {
+ dst[x] = fPreBlend.fG[dst[x]];
+ }
+ dst += rowBytes;
+ }
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+int move_proc(const FT_Vector* pt, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->close(); // to close the previous contour (if any)
+ path->moveTo(SkFDot6ToScalar(pt->x), -SkFDot6ToScalar(pt->y));
+ return 0;
+}
+
+int line_proc(const FT_Vector* pt, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->lineTo(SkFDot6ToScalar(pt->x), -SkFDot6ToScalar(pt->y));
+ return 0;
+}
+
+int quad_proc(const FT_Vector* pt0, const FT_Vector* pt1, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->quadTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y));
+ return 0;
+}
+
+int cubic_proc(const FT_Vector* pt0, const FT_Vector* pt1, const FT_Vector* pt2, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->cubicTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y),
+ SkFDot6ToScalar(pt2->x), -SkFDot6ToScalar(pt2->y));
+ return 0;
+}
+
+} // namespace
+
+bool SkScalerContext_FreeType_Base::generateGlyphPath(FT_Face face, SkPath* path) {
+ FT_Outline_Funcs funcs;
+
+ funcs.move_to = move_proc;
+ funcs.line_to = line_proc;
+ funcs.conic_to = quad_proc;
+ funcs.cubic_to = cubic_proc;
+ funcs.shift = 0;
+ funcs.delta = 0;
+
+ FT_Error err = FT_Outline_Decompose(&face->glyph->outline, &funcs, path);
+
+ if (err != 0) {
+ path->reset();
+ return false;
+ }
+
+ path->close();
+ return true;
+}
+
+bool SkScalerContext_FreeType_Base::generateFacePath(FT_Face face, SkGlyphID glyphID, SkPath* path) {
+ uint32_t flags = 0; //fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = FT_Load_Glyph(face, glyphID, flags);
+ if (err != 0) {
+ path->reset();
+ return false;
+ }
+
+ if (!generateGlyphPath(face, path)) {
+ path->reset();
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
new file mode 100644
index 0000000000..31b46d3cad
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKFONTHOST_FREETYPE_COMMON_H_
+#define SKFONTHOST_FREETYPE_COMMON_H_
+
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkMutex.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkScalerContext.h"
+#include "src/utils/SkCharToGlyphCache.h"
+
+#include "include/core/SkFontMgr.h"
+
+// These are forward declared to avoid pimpl but also hide the FreeType implementation.
+typedef struct FT_LibraryRec_* FT_Library;
+typedef struct FT_FaceRec_* FT_Face;
+typedef struct FT_StreamRec_* FT_Stream;
+typedef signed long FT_Pos;
+
+
+#ifdef SK_DEBUG
+const char* SkTraceFtrGetError(int);
+#define SK_TRACEFTR(ERR, MSG, ...) \
+ SkDebugf("%s:%lu:1: error: 0x%x '%s' " MSG "\n", __FILE__, __LINE__, ERR, \
+ SkTraceFtrGetError((int)(ERR)), __VA_ARGS__)
+#else
+#define SK_TRACEFTR(ERR, ...) do { sk_ignore_unused_variable(ERR); } while (false)
+#endif
+
+
+class SkScalerContext_FreeType_Base : public SkScalerContext {
+protected:
+ // See http://freetype.sourceforge.net/freetype2/docs/reference/ft2-bitmap_handling.html#FT_Bitmap_Embolden
+ // This value was chosen by eyeballing the result in Firefox and trying to match it.
+ static const FT_Pos kBitmapEmboldenStrength = 1 << 6;
+
+ SkScalerContext_FreeType_Base(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor *desc)
+ : INHERITED(std::move(typeface), effects, desc)
+ {}
+
+ void generateGlyphImage(FT_Face face, const SkGlyph& glyph, const SkMatrix& bitmapTransform);
+ bool generateGlyphPath(FT_Face face, SkPath* path);
+ bool generateFacePath(FT_Face face, SkGlyphID glyphID, SkPath* path);
+private:
+ typedef SkScalerContext INHERITED;
+};
+
+class SkTypeface_FreeType : public SkTypeface {
+public:
+ /** For SkFontMgrs to make use of our ability to extract
+ * name and style from a stream, using FreeType's API.
+ */
+ class Scanner : ::SkNoncopyable {
+ public:
+ Scanner();
+ ~Scanner();
+ struct AxisDefinition {
+ SkFourByteTag fTag;
+ SkFixed fMinimum;
+ SkFixed fDefault;
+ SkFixed fMaximum;
+ };
+ using AxisDefinitions = SkSTArray<4, AxisDefinition, true>;
+ bool recognizedFont(SkStreamAsset* stream, int* numFonts) const;
+ bool scanFont(SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch,
+ AxisDefinitions* axes) const;
+ static void computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontArguments::VariationPosition position,
+ SkFixed* axisValues,
+ const SkString& name);
+ static bool GetAxes(FT_Face face, AxisDefinitions* axes);
+
+ private:
+ FT_Face openFace(SkStreamAsset* stream, int ttcIndex, FT_Stream ftStream) const;
+ FT_Library fLibrary;
+ mutable SkMutex fLibraryMutex;
+ };
+
+ /** Fetch units/EM from "head" table if needed (ie for bitmap fonts) */
+ static int GetUnitsPerEm(FT_Face face);
+protected:
+ SkTypeface_FreeType(const SkFontStyle& style, bool isFixedPitch)
+ : INHERITED(style, isFixedPitch)
+ {}
+
+ std::unique_ptr<SkFontData> cloneFontData(const SkFontArguments&) const;
+ virtual SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void getPostScriptGlyphNames(SkString* dstArray) const override;
+ int onGetUPEM() const override;
+ bool onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const override;
+ void onCharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+
+ LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+
+private:
+ mutable SkMutex fC2GCacheMutex;
+ mutable SkCharToGlyphCache fC2GCache;
+
+ typedef SkTypeface INHERITED;
+};
+
+#endif // SKFONTHOST_FREETYPE_COMMON_H_
diff --git a/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
new file mode 100644
index 0000000000..b620a4cc50
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
@@ -0,0 +1,677 @@
+
+/*
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFDot6.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkTypefaceCache.h"
+
+#include <cmath>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_OUTLINE_H
+
+// for FT_GlyphSlot_Embolden
+#ifdef FT_SYNTHESIS_H
+#include FT_SYNTHESIS_H
+#endif
+
+// for FT_Library_SetLcdFilter
+#ifdef FT_LCD_FILTER_H
+#include FT_LCD_FILTER_H
+#else
+typedef enum FT_LcdFilter_
+{
+ FT_LCD_FILTER_NONE = 0,
+ FT_LCD_FILTER_DEFAULT = 1,
+ FT_LCD_FILTER_LIGHT = 2,
+ FT_LCD_FILTER_LEGACY = 16,
+} FT_LcdFilter;
+#endif
+
+// If compiling with FreeType before 2.5.0
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+#ifndef SK_CAN_USE_DLOPEN
+#define SK_CAN_USE_DLOPEN 1
+#endif
+#if SK_CAN_USE_DLOPEN
+#include <dlfcn.h>
+#endif
+
+#ifndef SK_FONTHOST_CAIRO_STANDALONE
+#define SK_FONTHOST_CAIRO_STANDALONE 1
+#endif
+
+static bool gFontHintingEnabled = true;
+static FT_Error (*gSetLcdFilter)(FT_Library, FT_LcdFilter) = nullptr;
+
+extern "C"
+{
+ void mozilla_LockFTLibrary(FT_Library aLibrary);
+ void mozilla_UnlockFTLibrary(FT_Library aLibrary);
+ void mozilla_AddRefSharedFTFace(void* aContext);
+ void mozilla_ReleaseSharedFTFace(void* aContext, void* aOwner);
+ void mozilla_ForgetSharedFTFaceLockOwner(void* aContext, void* aOwner);
+ int mozilla_LockSharedFTFace(void* aContext, void* aOwner);
+ void mozilla_UnlockSharedFTFace(void* aContext);
+ FT_Error mozilla_LoadFTGlyph(FT_Face aFace, uint32_t aGlyphIndex, int32_t aFlags);
+ // Implemented in webrender:
+ void mozilla_glyphslot_embolden_less(FT_GlyphSlot slot);
+}
+
+void SkInitCairoFT(bool fontHintingEnabled)
+{
+ gFontHintingEnabled = fontHintingEnabled;
+#if SK_CAN_USE_DLOPEN
+ gSetLcdFilter = (FT_Error (*)(FT_Library, FT_LcdFilter))dlsym(RTLD_DEFAULT, "FT_Library_SetLcdFilter");
+#else
+ gSetLcdFilter = &FT_Library_SetLcdFilter;
+#endif
+ // FT_Library_SetLcdFilter may be provided but have no effect if FreeType
+ // is built without FT_CONFIG_OPTION_SUBPIXEL_RENDERING.
+ if (gSetLcdFilter &&
+ gSetLcdFilter(nullptr, FT_LCD_FILTER_NONE) == FT_Err_Unimplemented_Feature) {
+ gSetLcdFilter = nullptr;
+ }
+}
+
+class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_CairoFT(sk_sp<SkTypeface> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, FT_Face face,
+ void* faceContext, SkPixelGeometry pixelGeometry,
+ FT_LcdFilter lcdFilter);
+
+ virtual ~SkScalerContext_CairoFT() {
+ mozilla_ForgetSharedFTFaceLockOwner(fFTFaceContext, this);
+ }
+
+ bool isValid() const { return fFTFaceContext != nullptr; }
+
+ void Lock() {
+ if (!mozilla_LockSharedFTFace(fFTFaceContext, this)) {
+ FT_Set_Transform(fFTFace, fHaveShape ? &fShapeMatrixFT : nullptr, nullptr);
+ FT_Set_Char_Size(fFTFace, FT_F26Dot6(fScaleX * 64.0f + 0.5f),
+ FT_F26Dot6(fScaleY * 64.0f + 0.5f), 0, 0);
+ }
+ }
+
+ void Unlock() { mozilla_UnlockSharedFTFace(fFTFaceContext); }
+
+protected:
+ unsigned generateGlyphCount() override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyphID, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics* metrics) override;
+
+private:
+ bool computeShapeMatrix(const SkMatrix& m);
+ void prepareGlyph(FT_GlyphSlot glyph);
+
+ FT_Face fFTFace;
+ void* fFTFaceContext;
+ FT_Int32 fLoadGlyphFlags;
+ FT_LcdFilter fLcdFilter;
+ SkScalar fScaleX;
+ SkScalar fScaleY;
+ SkMatrix fShapeMatrix;
+ FT_Matrix fShapeMatrixFT;
+ bool fHaveShape;
+};
+
+class AutoLockFTFace {
+public:
+ AutoLockFTFace(SkScalerContext_CairoFT* scalerContext)
+ : fScalerContext(scalerContext) {
+ fScalerContext->Lock();
+ }
+
+ ~AutoLockFTFace() { fScalerContext->Unlock(); }
+
+private:
+ SkScalerContext_CairoFT* fScalerContext;
+};
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+class SkCairoFTTypeface : public SkTypeface {
+public:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int*) const override { return nullptr; }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedMetrics unimplemented\n"));
+ return nullptr;
+ }
+
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects& effects, const SkDescriptor* desc) const override
+ {
+ SkScalerContext_CairoFT* ctx = new SkScalerContext_CairoFT(
+ sk_ref_sp(const_cast<SkCairoFTTypeface*>(this)), effects, desc,
+ fFTFace, fFTFaceContext, fPixelGeometry, fLcdFilter);
+ if (!ctx->isValid()) {
+ delete ctx;
+ return nullptr;
+ }
+ return ctx;
+ }
+
+ void onFilterRec(SkScalerContextRec* rec) const override
+ {
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!gFontHintingEnabled || !isAxisAligned(*rec)) {
+ rec->setHinting(SkFontHinting::kNone);
+ }
+
+ // Don't apply any gamma so that we match cairo-ft's results.
+ rec->ignorePreBlend();
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
+ }
+
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override
+ {
+ mozilla_LockSharedFTFace(fFTFaceContext, nullptr);
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = SkToU16(FT_Get_Char_Index(fFTFace, chars[i]));
+ }
+ mozilla_UnlockSharedFTFace(fFTFaceContext);
+ }
+
+ int onCountGlyphs() const override
+ {
+ return fFTFace->num_glyphs;
+ }
+
+ int onGetUPEM() const override
+ {
+ return 0;
+ }
+
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override
+ {
+ return nullptr;
+ }
+
+ void onGetFamilyName(SkString* familyName) const override
+ {
+ familyName->reset();
+ }
+
+ int onGetTableTags(SkFontTableTag*) const override
+ {
+ return 0;
+ }
+
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override
+ {
+ return 0;
+ }
+
+ void getPostScriptGlyphNames(SkString*) const override {}
+
+ void getGlyphToUnicodeMap(SkUnichar*) const override {}
+
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return 0;
+ }
+
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return 0;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ return sk_ref_sp(this);
+ }
+
+ SkCairoFTTypeface(FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry, FT_LcdFilter lcdFilter)
+ : SkTypeface(SkFontStyle::Normal())
+ , fFTFace(face)
+ , fFTFaceContext(faceContext)
+ , fPixelGeometry(pixelGeometry)
+ , fLcdFilter(lcdFilter)
+ {
+ mozilla_AddRefSharedFTFace(fFTFaceContext);
+ }
+
+ void* GetFTFaceContext() const { return fFTFaceContext; }
+
+ bool hasColorGlyphs() const override
+ {
+ // Check if the font has scalable outlines. If not, then avoid trying
+ // to render it as a path.
+ if (fFTFace) {
+ return !FT_IS_SCALABLE(fFTFace);
+ }
+ return false;
+ }
+
+private:
+ ~SkCairoFTTypeface()
+ {
+ mozilla_ReleaseSharedFTFace(fFTFaceContext, nullptr);
+ }
+
+ FT_Face fFTFace;
+ void* fFTFaceContext;
+ SkPixelGeometry fPixelGeometry;
+ FT_LcdFilter fLcdFilter;
+};
+
+static bool FindByFTFaceContext(SkTypeface* typeface, void* context) {
+ return static_cast<SkCairoFTTypeface*>(typeface)->GetFTFaceContext() == context;
+}
+
+SkTypeface* SkCreateTypefaceFromCairoFTFont(FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry,
+ uint8_t lcdFilter)
+{
+ sk_sp<SkTypeface> typeface =
+ SkTypefaceCache::FindByProcAndRef(FindByFTFaceContext, faceContext);
+ if (!typeface) {
+ typeface = sk_make_sp<SkCairoFTTypeface>(face, faceContext, pixelGeometry,
+ (FT_LcdFilter)lcdFilter);
+ SkTypefaceCache::Add(typeface);
+ }
+
+ return typeface.release();
+}
+
+SkScalerContext_CairoFT::SkScalerContext_CairoFT(
+ sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry, FT_LcdFilter lcdFilter)
+ : SkScalerContext_FreeType_Base(std::move(typeface), effects, desc)
+ , fFTFace(face)
+ , fFTFaceContext(faceContext)
+ , fLcdFilter(lcdFilter)
+{
+ SkMatrix matrix;
+ fRec.getSingleMatrix(&matrix);
+
+ computeShapeMatrix(matrix);
+
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ loadFlags |= FT_LOAD_NO_HINTING;
+ } else {
+ loadFlags = FT_LOAD_TARGET_MONO;
+ }
+ loadFlags |= FT_LOAD_MONOCHROME;
+ } else {
+ if (isLCD(fRec)) {
+ switch (pixelGeometry) {
+ case kRGB_H_SkPixelGeometry:
+ default:
+ break;
+ case kRGB_V_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case kBGR_H_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case kBGR_V_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ }
+ }
+
+ switch (fRec.getHinting()) {
+ case SkFontHinting::kNone:
+ loadFlags |= FT_LOAD_NO_HINTING;
+ break;
+ case SkFontHinting::kSlight:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ break;
+ case SkFontHinting::kNormal:
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ case SkFontHinting::kFull:
+ if (isLCD(fRec)) {
+ if (fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ default:
+ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ // Disable autohinting when asked to disable hinting, except for "tricky" fonts.
+ if (!gFontHintingEnabled) {
+ if (fFTFace && !(fFTFace->face_flags & FT_FACE_FLAG_TRICKY)) {
+ loadFlags |= FT_LOAD_NO_AUTOHINT;
+ }
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ loadFlags |= FT_LOAD_COLOR;
+
+ fLoadGlyphFlags = loadFlags;
+}
+
+bool SkScalerContext_CairoFT::computeShapeMatrix(const SkMatrix& m)
+{
+ // Compute a shape matrix compatible with Cairo's _compute_transform.
+ // Finds major/minor scales and uses them to normalize the transform.
+ double scaleX = m.getScaleX();
+ double skewX = m.getSkewX();
+ double skewY = m.getSkewY();
+ double scaleY = m.getScaleY();
+ double det = scaleX * scaleY - skewY * skewX;
+ if (!std::isfinite(det)) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ double major = det != 0.0 ? hypot(scaleX, skewY) : 0.0;
+ double minor = major != 0.0 ? fabs(det) / major : 0.0;
+ // Limit scales to be above 1pt.
+ major = SkTMax(major, 1.0);
+ minor = SkTMax(minor, 1.0);
+
+ // If the font is not scalable, then choose the best available size.
+ if (fFTFace && !FT_IS_SCALABLE(fFTFace)) {
+ double bestDist = DBL_MAX;
+ FT_Int bestSize = -1;
+ for (FT_Int i = 0; i < fFTFace->num_fixed_sizes; i++) {
+ // Distance is positive if strike is larger than desired size,
+ // or negative if smaller. If previously a found smaller strike,
+ // then prefer a larger strike. Otherwise, minimize distance.
+ double dist = fFTFace->available_sizes[i].y_ppem / 64.0 - minor;
+ if (bestDist < 0 ? dist >= bestDist : fabs(dist) <= bestDist) {
+ bestDist = dist;
+ bestSize = i;
+ }
+ }
+ if (bestSize < 0) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ major = fFTFace->available_sizes[bestSize].x_ppem / 64.0;
+ minor = fFTFace->available_sizes[bestSize].y_ppem / 64.0;
+ fHaveShape = true;
+ } else {
+ fHaveShape = !m.isScaleTranslate() || scaleX < 0.0 || scaleY < 0.0;
+ }
+
+ fScaleX = SkDoubleToScalar(major);
+ fScaleY = SkDoubleToScalar(minor);
+
+ if (fHaveShape) {
+ // Normalize the transform and convert to fixed-point.
+ fShapeMatrix = m;
+ fShapeMatrix.preScale(SkDoubleToScalar(1.0 / major), SkDoubleToScalar(1.0 / minor));
+
+ fShapeMatrixFT.xx = SkScalarToFixed(fShapeMatrix.getScaleX());
+ fShapeMatrixFT.yx = SkScalarToFixed(-fShapeMatrix.getSkewY());
+ fShapeMatrixFT.xy = SkScalarToFixed(-fShapeMatrix.getSkewX());
+ fShapeMatrixFT.yy = SkScalarToFixed(fShapeMatrix.getScaleY());
+ }
+ return true;
+}
+
+unsigned SkScalerContext_CairoFT::generateGlyphCount()
+{
+ return fFTFace->num_glyphs;
+}
+
+bool SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
+{
+ generateMetrics(glyph);
+ return !glyph->isEmpty();
+}
+
+void SkScalerContext_CairoFT::prepareGlyph(FT_GlyphSlot glyph)
+{
+ if (fRec.fFlags & SkScalerContext::kEmbolden_Flag) {
+ // Not FT_GlyphSlot_Embolden because we want a less extreme effect.
+ mozilla_glyphslot_embolden_less(glyph);
+ }
+}
+
+void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph)
+{
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ glyph->zeroMetrics();
+
+ AutoLockFTFace faceLock(this);
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyph->getGlyphID(), fLoadGlyphFlags);
+ if (err != 0) {
+ return;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ glyph->fAdvanceX = SkFDot6ToFloat(fFTFace->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(fFTFace->glyph->advance.y);
+
+ SkIRect bounds;
+ switch (fFTFace->glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ if (!fFTFace->glyph->outline.n_contours) {
+ return;
+ }
+
+ FT_BBox bbox;
+ FT_Outline_Get_CBox(&fFTFace->glyph->outline, &bbox);
+ if (this->isSubpixel()) {
+ int dx = SkFixedToFDot6(glyph->getSubXFixed());
+ int dy = SkFixedToFDot6(glyph->getSubYFixed());
+ bbox.xMin += dx;
+ bbox.yMin -= dy;
+ bbox.xMax += dx;
+ bbox.yMax -= dy;
+ }
+ bbox.xMin &= ~63;
+ bbox.yMin &= ~63;
+ bbox.xMax = (bbox.xMax + 63) & ~63;
+ bbox.yMax = (bbox.yMax + 63) & ~63;
+ bounds = SkIRect::MakeLTRB(SkFDot6Floor(bbox.xMin),
+ -SkFDot6Floor(bbox.yMax),
+ SkFDot6Floor(bbox.xMax),
+ -SkFDot6Floor(bbox.yMin));
+
+ if (isLCD(fRec)) {
+ // In FreeType < 2.8.1, LCD filtering, if explicitly used, may
+ // add padding to the glyph. When not used, there is no padding.
+ // As of 2.8.1, LCD filtering is now always supported and may
+ // add padding even if an LCD filter is not explicitly set.
+ // Regardless, if no LCD filtering is used, or if LCD filtering
+ // doesn't add padding, it is safe to modify the glyph's bounds
+ // here. generateGlyphImage will detect if the mask is smaller
+ // than the bounds and clip things appropriately.
+ if (fRec.fFlags & kLCD_Vertical_Flag) {
+ bounds.outset(0, 1);
+ } else {
+ bounds.outset(1, 0);
+ }
+ }
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (fFTFace->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ if (isLCD(fRec)) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ if (fHaveShape) {
+ // Ensure filtering is preserved when the bitmap is transformed.
+ // Otherwise, the result will look horrifically aliased.
+ if (fRec.fMaskFormat == SkMask::kBW_Format) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ // Apply the shape matrix to the glyph's bounding box.
+ SkRect srcRect = SkRect::MakeXYWH(
+ SkIntToScalar(fFTFace->glyph->bitmap_left),
+ -SkIntToScalar(fFTFace->glyph->bitmap_top),
+ SkIntToScalar(fFTFace->glyph->bitmap.width),
+ SkIntToScalar(fFTFace->glyph->bitmap.rows));
+ SkRect destRect;
+ fShapeMatrix.mapRect(&destRect, srcRect);
+ SkIRect glyphRect = destRect.roundOut();
+ bounds = SkIRect::MakeXYWH(SkScalarRoundToInt(destRect.fLeft),
+ SkScalarRoundToInt(destRect.fTop),
+ glyphRect.width(),
+ glyphRect.height());
+ } else {
+ bounds = SkIRect::MakeXYWH(fFTFace->glyph->bitmap_left,
+ -fFTFace->glyph->bitmap_top,
+ fFTFace->glyph->bitmap.width,
+ fFTFace->glyph->bitmap.rows);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ return;
+ }
+
+ if (SkIRect::MakeXYWH(SHRT_MIN, SHRT_MIN, USHRT_MAX, USHRT_MAX).contains(bounds)) {
+ glyph->fWidth = SkToU16(bounds.width());
+ glyph->fHeight = SkToU16(bounds.height());
+ glyph->fLeft = SkToS16(bounds.left());
+ glyph->fTop = SkToS16(bounds.top());
+ }
+}
+
+void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
+{
+ AutoLockFTFace faceLock(this);
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyph.getGlyphID(), fLoadGlyphFlags);
+
+ if (err != 0) {
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ bool useLcdFilter =
+ fFTFace->glyph->format == FT_GLYPH_FORMAT_OUTLINE &&
+ glyph.maskFormat() == SkMask::kLCD16_Format &&
+ gSetLcdFilter;
+ if (useLcdFilter) {
+ mozilla_LockFTLibrary(fFTFace->glyph->library);
+ gSetLcdFilter(fFTFace->glyph->library, fLcdFilter);
+ }
+
+ SkMatrix matrix;
+ if (fFTFace->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ fHaveShape) {
+ matrix = fShapeMatrix;
+ } else {
+ matrix.setIdentity();
+ }
+ generateGlyphImage(fFTFace, glyph, matrix);
+
+ if (useLcdFilter) {
+ gSetLcdFilter(fFTFace->glyph->library, FT_LCD_FILTER_NONE);
+ mozilla_UnlockFTLibrary(fFTFace->glyph->library);
+ }
+}
+
+bool SkScalerContext_CairoFT::generatePath(SkGlyphID glyphID, SkPath* path)
+{
+ AutoLockFTFace faceLock(this);
+
+ SkASSERT(path);
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyphID, flags);
+
+ if (err != 0) {
+ path->reset();
+ return false;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ return generateGlyphPath(fFTFace, path);
+}
+
+void SkScalerContext_CairoFT::generateFontMetrics(SkFontMetrics* metrics)
+{
+ if (metrics) {
+ memset(metrics, 0, sizeof(SkFontMetrics));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkFontMgr.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ // todo
+ return nullptr;
+}
+
diff --git a/gfx/skia/skia/src/ports/SkFontHost_mac.cpp b/gfx/skia/skia/src/ports/SkFontHost_mac.cpp
new file mode 100644
index 0000000000..e57b5eb5fb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_mac.cpp
@@ -0,0 +1,3011 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/ports/SkTypeface_mac.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkFloatingPoint.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/core/SkUtils.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/utils/SkUTF.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#include <dlfcn.h>
+
+#include <utility>
+
+// Set to make glyph bounding boxes visible.
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face) {
+ return face ? (CTFontRef)face->internal_private_getCTFontRef() : nullptr;
+}
+
+class SkScalerContext_Mac;
+
+static SkUniqueCFRef<CFStringRef> make_CFString(const char s[]) {
+ return SkUniqueCFRef<CFStringRef>(CFStringCreateWithCString(nullptr, s, kCFStringEncodingUTF8));
+}
+
+// inline versions of these rect helpers
+
+static bool CGRectIsEmpty_inline(const CGRect& rect) {
+ return rect.size.width <= 0 || rect.size.height <= 0;
+}
+
+static CGFloat CGRectGetMinX_inline(const CGRect& rect) {
+ return rect.origin.x;
+}
+
+static CGFloat CGRectGetMaxX_inline(const CGRect& rect) {
+ return rect.origin.x + rect.size.width;
+}
+
+static CGFloat CGRectGetMinY_inline(const CGRect& rect) {
+ return rect.origin.y;
+}
+
+static CGFloat CGRectGetMaxY_inline(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+}
+
+static CGFloat CGRectGetWidth_inline(const CGRect& rect) {
+ return rect.size.width;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void sk_memset_rect32(uint32_t* ptr, uint32_t value,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+ while (height) {
+ sk_memset32(ptr, value, width);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ return;
+ }
+
+ rowBytes -= width * sizeof(uint32_t);
+
+ if (width >= 8) {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ w -= 8;
+ } while (w >= 8);
+ while (--w >= 0) {
+ *ptr++ = value;
+ }
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ } else {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+}
+
+typedef uint32_t CGRGBPixel;
+
+static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+}
+
+static CGFloat ScalarToCG(SkScalar scalar) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return SkScalarToFloat(scalar);
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return (CGFloat) SkScalarToDouble(scalar);
+ }
+}
+
+static SkScalar CGToScalar(CGFloat cgFloat) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return SkFloatToScalar(cgFloat);
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return SkDoubleToScalar(cgFloat);
+ }
+}
+
+static float CGToFloat(CGFloat cgFloat) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return cgFloat;
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return static_cast<float>(cgFloat);
+ }
+}
+
+static CGAffineTransform MatrixToCGAffineTransform(const SkMatrix& matrix) {
+ return CGAffineTransformMake( ScalarToCG(matrix[SkMatrix::kMScaleX]),
+ -ScalarToCG(matrix[SkMatrix::kMSkewY] ),
+ -ScalarToCG(matrix[SkMatrix::kMSkewX] ),
+ ScalarToCG(matrix[SkMatrix::kMScaleY]),
+ ScalarToCG(matrix[SkMatrix::kMTransX]),
+ ScalarToCG(matrix[SkMatrix::kMTransY]));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define BITMAP_INFO_RGB (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host)
+
+/** Drawn in FontForge, reduced with fonttools ttx, converted by xxd -i,
+ * this TrueType font contains a glyph of the spider.
+ *
+ * To re-forge the original bytes of the TrueType font file,
+ * remove all ',|( +0x)' from this definition,
+ * copy the data to the clipboard,
+ * run 'pbpaste | xxd -p -r - spider.ttf'.
+ */
+static constexpr const uint8_t kSpiderSymbol_ttf[] = {
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x80, 0x00, 0x03, 0x00, 0x40,
+ 0x47, 0x44, 0x45, 0x46, 0x00, 0x14, 0x00, 0x14, 0x00, 0x00, 0x07, 0xa8,
+ 0x00, 0x00, 0x00, 0x18, 0x4f, 0x53, 0x2f, 0x32, 0x8a, 0xf4, 0xfb, 0xdb,
+ 0x00, 0x00, 0x01, 0x48, 0x00, 0x00, 0x00, 0x60, 0x63, 0x6d, 0x61, 0x70,
+ 0xe0, 0x7f, 0x10, 0x7e, 0x00, 0x00, 0x01, 0xb8, 0x00, 0x00, 0x00, 0x54,
+ 0x67, 0x61, 0x73, 0x70, 0xff, 0xff, 0x00, 0x03, 0x00, 0x00, 0x07, 0xa0,
+ 0x00, 0x00, 0x00, 0x08, 0x67, 0x6c, 0x79, 0x66, 0x97, 0x0b, 0x6a, 0xf6,
+ 0x00, 0x00, 0x02, 0x18, 0x00, 0x00, 0x03, 0x40, 0x68, 0x65, 0x61, 0x64,
+ 0x0f, 0xa2, 0x24, 0x1a, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x00, 0x36,
+ 0x68, 0x68, 0x65, 0x61, 0x0e, 0xd3, 0x07, 0x3f, 0x00, 0x00, 0x01, 0x04,
+ 0x00, 0x00, 0x00, 0x24, 0x68, 0x6d, 0x74, 0x78, 0x10, 0x03, 0x00, 0x44,
+ 0x00, 0x00, 0x01, 0xa8, 0x00, 0x00, 0x00, 0x0e, 0x6c, 0x6f, 0x63, 0x61,
+ 0x01, 0xb4, 0x00, 0x28, 0x00, 0x00, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x0a,
+ 0x6d, 0x61, 0x78, 0x70, 0x00, 0x4a, 0x01, 0x4d, 0x00, 0x00, 0x01, 0x28,
+ 0x00, 0x00, 0x00, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0xc3, 0xe5, 0x39, 0xd4,
+ 0x00, 0x00, 0x05, 0x58, 0x00, 0x00, 0x02, 0x28, 0x70, 0x6f, 0x73, 0x74,
+ 0xff, 0x03, 0x00, 0x67, 0x00, 0x00, 0x07, 0x80, 0x00, 0x00, 0x00, 0x20,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0b, 0x0f, 0x08, 0x1d,
+ 0x5f, 0x0f, 0x3c, 0xf5, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd1, 0x97, 0xa8, 0x5a, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xe8, 0x32, 0x33,
+ 0x00, 0x03, 0xff, 0x3b, 0x08, 0x00, 0x05, 0x55, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x05, 0x55, 0xff, 0x3b, 0x01, 0x79, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x04, 0x01, 0x1c, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x01, 0x90, 0x00, 0x05,
+ 0x00, 0x00, 0x05, 0x33, 0x05, 0x99, 0x00, 0x00, 0x01, 0x1e, 0x05, 0x33,
+ 0x05, 0x99, 0x00, 0x00, 0x03, 0xd7, 0x00, 0x66, 0x02, 0x12, 0x00, 0x00,
+ 0x05, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x73, 0x6b, 0x69, 0x61, 0x00, 0xc0, 0x00, 0x00, 0xf0, 0x21,
+ 0x06, 0x66, 0xfe, 0x66, 0x01, 0x79, 0x05, 0x55, 0x00, 0xc5, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x20, 0x00, 0x01, 0x08, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x04, 0x00, 0x48,
+ 0x00, 0x00, 0x00, 0x0e, 0x00, 0x08, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x09, 0x00, 0x0d, 0x00, 0x1d, 0x00, 0x21, 0xf0, 0x21, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x1d, 0x00, 0x21,
+ 0xf0, 0x21, 0xff, 0xff, 0x00, 0x01, 0xff, 0xf9, 0xff, 0xf5, 0xff, 0xe4,
+ 0xff, 0xe2, 0x0f, 0xe2, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x14, 0x00, 0x14, 0x01, 0xa0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x44,
+ 0x00, 0x00, 0x02, 0x64, 0x05, 0x55, 0x00, 0x03, 0x00, 0x07, 0x00, 0x00,
+ 0x33, 0x11, 0x21, 0x11, 0x25, 0x21, 0x11, 0x21, 0x44, 0x02, 0x20, 0xfe,
+ 0x24, 0x01, 0x98, 0xfe, 0x68, 0x05, 0x55, 0xfa, 0xab, 0x44, 0x04, 0xcd,
+ 0x00, 0x04, 0x00, 0x03, 0xff, 0x3b, 0x08, 0x00, 0x05, 0x4c, 0x00, 0x15,
+ 0x00, 0x1d, 0x00, 0x25, 0x01, 0x1b, 0x00, 0x00, 0x01, 0x36, 0x37, 0x36,
+ 0x27, 0x26, 0x07, 0x06, 0x06, 0x23, 0x22, 0x27, 0x26, 0x27, 0x26, 0x07,
+ 0x06, 0x17, 0x16, 0x17, 0x16, 0x32, 0x37, 0x32, 0x35, 0x34, 0x23, 0x22,
+ 0x15, 0x14, 0x27, 0x32, 0x35, 0x34, 0x23, 0x22, 0x15, 0x14, 0x03, 0x32,
+ 0x17, 0x30, 0x17, 0x31, 0x36, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x33,
+ 0x32, 0x33, 0x16, 0x33, 0x32, 0x17, 0x16, 0x07, 0x06, 0x23, 0x22, 0x27,
+ 0x26, 0x27, 0x26, 0x23, 0x22, 0x07, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06,
+ 0x1f, 0x02, 0x37, 0x36, 0x37, 0x36, 0x33, 0x32, 0x17, 0x17, 0x16, 0x33,
+ 0x16, 0x17, 0x16, 0x07, 0x06, 0x23, 0x22, 0x27, 0x27, 0x26, 0x23, 0x22,
+ 0x07, 0x06, 0x07, 0x06, 0x17, 0x16, 0x17, 0x16, 0x33, 0x32, 0x33, 0x32,
+ 0x37, 0x36, 0x37, 0x36, 0x17, 0x16, 0x1f, 0x02, 0x16, 0x17, 0x16, 0x15,
+ 0x14, 0x23, 0x22, 0x27, 0x27, 0x26, 0x27, 0x27, 0x26, 0x27, 0x26, 0x07,
+ 0x06, 0x07, 0x06, 0x17, 0x16, 0x17, 0x16, 0x15, 0x14, 0x07, 0x06, 0x07,
+ 0x06, 0x23, 0x22, 0x27, 0x26, 0x07, 0x06, 0x07, 0x06, 0x15, 0x14, 0x17,
+ 0x16, 0x17, 0x16, 0x15, 0x14, 0x07, 0x06, 0x23, 0x22, 0x27, 0x26, 0x27,
+ 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36, 0x37, 0x34, 0x27, 0x26, 0x07,
+ 0x06, 0x07, 0x06, 0x0f, 0x02, 0x06, 0x23, 0x22, 0x27, 0x26, 0x35, 0x34,
+ 0x37, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x27, 0x26, 0x27,
+ 0x26, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, 0x07, 0x06, 0x23, 0x22,
+ 0x27, 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x37, 0x36, 0x37, 0x37, 0x36,
+ 0x37, 0x36, 0x37, 0x36, 0x35, 0x34, 0x27, 0x26, 0x27, 0x26, 0x27, 0x26,
+ 0x23, 0x22, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x27, 0x26, 0x27, 0x26,
+ 0x27, 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x33, 0x32,
+ 0x17, 0x16, 0x33, 0x32, 0x37, 0x36, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36,
+ 0x33, 0x04, 0xf5, 0x23, 0x13, 0x11, 0x14, 0x16, 0x1d, 0x1b, 0x4c, 0x1f,
+ 0x0e, 0x2d, 0x23, 0x14, 0x2c, 0x13, 0x18, 0x25, 0x2c, 0x10, 0x3c, 0x71,
+ 0x1d, 0x5c, 0x5c, 0x3f, 0xae, 0x5c, 0x5c, 0x3f, 0x6a, 0x27, 0x31, 0x5b,
+ 0x09, 0x27, 0x36, 0x03, 0x0a, 0x26, 0x35, 0x2e, 0x09, 0x08, 0xc6, 0x13,
+ 0x81, 0x17, 0x20, 0x18, 0x21, 0x1e, 0x04, 0x04, 0x15, 0x5c, 0x22, 0x26,
+ 0x48, 0x56, 0x3b, 0x10, 0x21, 0x01, 0x0c, 0x06, 0x06, 0x0f, 0x31, 0x44,
+ 0x3c, 0x52, 0x4a, 0x1d, 0x11, 0x3f, 0xb4, 0x71, 0x01, 0x26, 0x06, 0x0d,
+ 0x15, 0x1a, 0x2a, 0x13, 0x53, 0xaa, 0x42, 0x1d, 0x0a, 0x33, 0x20, 0x21,
+ 0x2b, 0x01, 0x02, 0x3e, 0x21, 0x09, 0x02, 0x02, 0x0f, 0x2d, 0x4b, 0x0a,
+ 0x22, 0x15, 0x20, 0x1f, 0x72, 0x8b, 0x2d, 0x2f, 0x1d, 0x1f, 0x0e, 0x25,
+ 0x3f, 0x4d, 0x1b, 0x63, 0x2a, 0x2c, 0x14, 0x22, 0x18, 0x1c, 0x0f, 0x08,
+ 0x2a, 0x08, 0x08, 0x0d, 0x3b, 0x4c, 0x52, 0x74, 0x27, 0x71, 0x2e, 0x01,
+ 0x0c, 0x10, 0x15, 0x0d, 0x06, 0x0d, 0x05, 0x01, 0x06, 0x2c, 0x28, 0x14,
+ 0x1b, 0x05, 0x04, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x16, 0x27, 0x03, 0x0d,
+ 0x30, 0x4c, 0x4c, 0x4b, 0x1f, 0x0b, 0x22, 0x26, 0x0d, 0x15, 0x0d, 0x2d,
+ 0x68, 0x34, 0x14, 0x3c, 0x25, 0x12, 0x04, 0x10, 0x18, 0x0b, 0x09, 0x30,
+ 0x2b, 0x44, 0x66, 0x14, 0x47, 0x47, 0x59, 0x73, 0x25, 0x05, 0x03, 0x1f,
+ 0x01, 0x08, 0x3f, 0x48, 0x4b, 0x4b, 0x76, 0x2f, 0x49, 0x2d, 0x22, 0x24,
+ 0x0c, 0x15, 0x08, 0x0e, 0x33, 0x03, 0x44, 0x4c, 0x10, 0x46, 0x13, 0x1f,
+ 0x27, 0x1b, 0x1d, 0x13, 0x02, 0x24, 0x08, 0x02, 0x42, 0x0e, 0x4d, 0x3c,
+ 0x19, 0x1b, 0x40, 0x2b, 0x2b, 0x1e, 0x16, 0x11, 0x04, 0x1f, 0x11, 0x04,
+ 0x18, 0x11, 0x35, 0x01, 0xa3, 0x13, 0x24, 0x1f, 0x0b, 0x0c, 0x19, 0x19,
+ 0x18, 0x13, 0x0f, 0x0c, 0x1a, 0x18, 0x1f, 0x19, 0x1e, 0x07, 0x1a, 0xc3,
+ 0x54, 0x51, 0x54, 0x51, 0x04, 0x53, 0x51, 0x54, 0x50, 0x02, 0x48, 0x1a,
+ 0x31, 0x18, 0x55, 0x74, 0x04, 0x0e, 0x09, 0x0d, 0x06, 0x10, 0x16, 0x1b,
+ 0x24, 0x01, 0x04, 0x0b, 0x04, 0x10, 0x3f, 0x0a, 0x41, 0x02, 0x41, 0x20,
+ 0x06, 0x12, 0x16, 0x21, 0x17, 0x2a, 0x1e, 0x15, 0x40, 0x27, 0x11, 0x0e,
+ 0x1e, 0x11, 0x15, 0x1f, 0x43, 0x13, 0x1a, 0x10, 0x15, 0x1b, 0x04, 0x09,
+ 0x4d, 0x2a, 0x0f, 0x19, 0x0a, 0x0a, 0x03, 0x05, 0x15, 0x3c, 0x64, 0x21,
+ 0x4b, 0x2e, 0x21, 0x28, 0x13, 0x47, 0x44, 0x19, 0x3f, 0x11, 0x18, 0x0b,
+ 0x0a, 0x07, 0x18, 0x0d, 0x07, 0x24, 0x2c, 0x2b, 0x21, 0x32, 0x10, 0x48,
+ 0x2a, 0x2d, 0x1e, 0x1a, 0x01, 0x0c, 0x43, 0x59, 0x28, 0x4e, 0x1c, 0x0d,
+ 0x5d, 0x24, 0x14, 0x0a, 0x05, 0x1f, 0x24, 0x32, 0x46, 0x3e, 0x5f, 0x3e,
+ 0x44, 0x1a, 0x30, 0x15, 0x0d, 0x07, 0x18, 0x2b, 0x03, 0x0d, 0x1a, 0x28,
+ 0x28, 0x57, 0xb2, 0x29, 0x27, 0x40, 0x2c, 0x23, 0x16, 0x63, 0x58, 0x1a,
+ 0x0a, 0x18, 0x11, 0x23, 0x08, 0x1b, 0x29, 0x05, 0x04, 0x0b, 0x15, 0x0d,
+ 0x14, 0x0b, 0x2a, 0x29, 0x5a, 0x62, 0x01, 0x19, 0x1e, 0x05, 0x05, 0x26,
+ 0x42, 0x42, 0x2a, 0x2a, 0x3f, 0x0d, 0x0f, 0x09, 0x05, 0x07, 0x01, 0x0b,
+ 0x25, 0x3e, 0x0d, 0x17, 0x11, 0x01, 0x03, 0x0d, 0x13, 0x20, 0x19, 0x11,
+ 0x03, 0x02, 0x01, 0x04, 0x11, 0x04, 0x05, 0x1b, 0x3d, 0x10, 0x29, 0x20,
+ 0x04, 0x04, 0x0a, 0x07, 0x04, 0x1f, 0x15, 0x20, 0x3e, 0x0f, 0x2a, 0x1e,
+ 0x00, 0x00, 0x00, 0x1b, 0x01, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x07, 0x00, 0x27, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x05, 0x00, 0x02, 0x00, 0x2e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0d, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0e, 0x00, 0x1a, 0x00, 0x30, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x01, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x02, 0x00, 0x0e, 0x00, 0x98, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x03, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x04, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x05, 0x00, 0x04, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x06, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x0d, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x0e, 0x00, 0x34, 0x00, 0xaa, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x01, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x02, 0x00, 0x0e, 0x00, 0x98, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x03, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x04, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x05, 0x00, 0x04, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x06, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x0d, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x0e, 0x00, 0x34, 0x00, 0xaa, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69,
+ 0x67, 0x68, 0x74, 0x20, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x31, 0x35,
+ 0x2c, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x53, 0x70, 0x69,
+ 0x64, 0x65, 0x72, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x52, 0x65, 0x67,
+ 0x75, 0x6c, 0x61, 0x72, 0x56, 0x31, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f,
+ 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x2e, 0x73, 0x69, 0x6c,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x4f, 0x46, 0x4c, 0x00, 0x43, 0x00, 0x6f,
+ 0x00, 0x70, 0x00, 0x79, 0x00, 0x72, 0x00, 0x69, 0x00, 0x67, 0x00, 0x68,
+ 0x00, 0x74, 0x00, 0x20, 0x00, 0x28, 0x00, 0x63, 0x00, 0x29, 0x00, 0x20,
+ 0x00, 0x32, 0x00, 0x30, 0x00, 0x31, 0x00, 0x35, 0x00, 0x2c, 0x00, 0x20,
+ 0x00, 0x47, 0x00, 0x6f, 0x00, 0x6f, 0x00, 0x67, 0x00, 0x6c, 0x00, 0x65,
+ 0x00, 0x2e, 0x00, 0x53, 0x00, 0x70, 0x00, 0x69, 0x00, 0x64, 0x00, 0x65,
+ 0x00, 0x72, 0x00, 0x53, 0x00, 0x79, 0x00, 0x6d, 0x00, 0x62, 0x00, 0x6f,
+ 0x00, 0x6c, 0x00, 0x52, 0x00, 0x65, 0x00, 0x67, 0x00, 0x75, 0x00, 0x6c,
+ 0x00, 0x61, 0x00, 0x72, 0x00, 0x56, 0x00, 0x31, 0x00, 0x68, 0x00, 0x74,
+ 0x00, 0x74, 0x00, 0x70, 0x00, 0x3a, 0x00, 0x2f, 0x00, 0x2f, 0x00, 0x73,
+ 0x00, 0x63, 0x00, 0x72, 0x00, 0x69, 0x00, 0x70, 0x00, 0x74, 0x00, 0x73,
+ 0x00, 0x2e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6c, 0x00, 0x2e, 0x00, 0x6f,
+ 0x00, 0x72, 0x00, 0x67, 0x00, 0x2f, 0x00, 0x4f, 0x00, 0x46, 0x00, 0x4c,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x66,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0xff, 0xff, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x00
+};
+
+enum class SmoothBehavior {
+ none, // SmoothFonts produces no effect.
+ some, // SmoothFonts produces some effect, but not subpixel coverage.
+ subpixel, // SmoothFonts produces some effect and provides subpixel coverage.
+};
+
+/**
+ * There does not appear to be a publicly accessable API for determining if lcd
+ * font smoothing will be applied if we request it. The main issue is that if
+ * smoothing is applied a gamma of 2.0 will be used, if not a gamma of 1.0.
+ */
+static SmoothBehavior smooth_behavior() {
+ static SmoothBehavior gSmoothBehavior = []{
+ uint32_t noSmoothBitmap[16][16] = {};
+ uint32_t smoothBitmap[16][16] = {};
+
+ SkUniqueCFRef<CGColorSpaceRef> colorspace(CGColorSpaceCreateDeviceRGB());
+ SkUniqueCFRef<CGContextRef> noSmoothContext(
+ CGBitmapContextCreate(&noSmoothBitmap, 16, 16, 8, 16*4,
+ colorspace.get(), BITMAP_INFO_RGB));
+ SkUniqueCFRef<CGContextRef> smoothContext(
+ CGBitmapContextCreate(&smoothBitmap, 16, 16, 8, 16*4,
+ colorspace.get(), BITMAP_INFO_RGB));
+
+ SkUniqueCFRef<CGDataProviderRef> data(
+ CGDataProviderCreateWithData(nullptr, kSpiderSymbol_ttf,
+ SK_ARRAY_COUNT(kSpiderSymbol_ttf), nullptr));
+ SkUniqueCFRef<CGFontRef> cgFont(CGFontCreateWithDataProvider(data.get()));
+ SkASSERT(cgFont);
+ SkUniqueCFRef<CTFontRef> ctFont(
+ CTFontCreateWithGraphicsFont(cgFont.get(), 16, nullptr, nullptr));
+ SkASSERT(ctFont);
+
+ CGContextSetShouldSmoothFonts(noSmoothContext.get(), false);
+ CGContextSetShouldAntialias(noSmoothContext.get(), true);
+ CGContextSetTextDrawingMode(noSmoothContext.get(), kCGTextFill);
+ CGContextSetGrayFillColor(noSmoothContext.get(), 1, 1);
+
+ CGContextSetShouldSmoothFonts(smoothContext.get(), true);
+ CGContextSetShouldAntialias(smoothContext.get(), true);
+ CGContextSetTextDrawingMode(smoothContext.get(), kCGTextFill);
+ CGContextSetGrayFillColor(smoothContext.get(), 1, 1);
+
+ CGPoint point = CGPointMake(0, 3);
+ CGGlyph spiderGlyph = 3;
+ CTFontDrawGlyphs(ctFont.get(), &spiderGlyph, &point, 1, noSmoothContext.get());
+ CTFontDrawGlyphs(ctFont.get(), &spiderGlyph, &point, 1, smoothContext.get());
+
+ // For debugging.
+ //SkUniqueCFRef<CGImageRef> image(CGBitmapContextCreateImage(noSmoothContext()));
+ //SkUniqueCFRef<CGImageRef> image(CGBitmapContextCreateImage(smoothContext()));
+
+ SmoothBehavior smoothBehavior = SmoothBehavior::none;
+ for (int x = 0; x < 16; ++x) {
+ for (int y = 0; y < 16; ++y) {
+ uint32_t smoothPixel = smoothBitmap[x][y];
+ uint32_t r = (smoothPixel >> 16) & 0xFF;
+ uint32_t g = (smoothPixel >> 8) & 0xFF;
+ uint32_t b = (smoothPixel >> 0) & 0xFF;
+ if (r != g || r != b) {
+ return SmoothBehavior::subpixel;
+ }
+ if (noSmoothBitmap[x][y] != smoothPixel) {
+ smoothBehavior = SmoothBehavior::some;
+ }
+ }
+ }
+ return smoothBehavior;
+ }();
+ return gSmoothBehavior;
+}
+
+class Offscreen {
+public:
+ Offscreen()
+ : fRGBSpace(nullptr)
+ , fCG(nullptr)
+ , fDoAA(false)
+ , fDoLCD(false)
+ {
+ fSize.set(0, 0);
+ }
+
+ CGRGBPixel* getCG(const SkScalerContext_Mac& context, const SkGlyph& glyph,
+ CGGlyph glyphID, size_t* rowBytesPtr, bool generateA8FromLCD,
+ bool lightOnDark);
+
+private:
+ enum {
+ kSize = 32 * 32 * sizeof(CGRGBPixel)
+ };
+ SkAutoSMalloc<kSize> fImageStorage;
+ SkUniqueCFRef<CGColorSpaceRef> fRGBSpace;
+
+ // cached state
+ SkUniqueCFRef<CGContextRef> fCG;
+ SkISize fSize;
+ bool fDoAA;
+ bool fDoLCD;
+
+ static int RoundSize(int dimension) {
+ return SkNextPow2(dimension);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_dict_CGFloat(CFDictionaryRef dict, CFStringRef name, CGFloat* value) {
+ CFNumberRef num;
+ return CFDictionaryGetValueIfPresent(dict, name, (const void**)&num)
+ && CFNumberIsFloatType(num)
+ && CFNumberGetValue(num, kCFNumberCGFloatType, value);
+}
+
+template <typename S, typename D, typename C> struct LinearInterpolater {
+ struct Mapping {
+ S src_val;
+ D dst_val;
+ };
+ constexpr LinearInterpolater(Mapping const mapping[], int mappingCount)
+ : fMapping(mapping), fMappingCount(mappingCount) {}
+
+ static D map(S value, S src_min, S src_max, D dst_min, D dst_max) {
+ SkASSERT(src_min < src_max);
+ SkASSERT(dst_min <= dst_max);
+ return C()(dst_min + (((value - src_min) * (dst_max - dst_min)) / (src_max - src_min)));
+ }
+
+ D map(S val) const {
+ // -Inf to [0]
+ if (val < fMapping[0].src_val) {
+ return fMapping[0].dst_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < fMappingCount - 1; ++i) {
+ if (val < fMapping[i+1].src_val) {
+ return map(val, fMapping[i].src_val, fMapping[i+1].src_val,
+ fMapping[i].dst_val, fMapping[i+1].dst_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return fMapping[fMappingCount - 1].dst_val;
+ }
+
+ Mapping const * fMapping;
+ int fMappingCount;
+};
+
+struct RoundCGFloatToInt {
+ int operator()(CGFloat s) { return s + 0.5; }
+};
+struct CGFloatIdentity {
+ CGFloat operator()(CGFloat s) { return s; }
+};
+
+/** Returns the [-1, 1] CTFontDescriptor weights for the
+ * <0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000> CSS weights.
+ *
+ * It is assumed that the values will be interpolated linearly between these points.
+ * NSFontWeightXXX were added in 10.11, appear in 10.10, but do not appear in 10.9.
+ * The actual values appear to be stable, but they may change in the future without notice.
+ */
+static CGFloat(&get_NSFontWeight_mapping())[11] {
+
+ // Declarations in <AppKit/AppKit.h> on macOS, <UIKit/UIKit.h> on iOS
+#ifdef SK_BUILD_FOR_MAC
+# define SK_KIT_FONT_WEIGHT_PREFIX "NS"
+#endif
+#ifdef SK_BUILD_FOR_IOS
+# define SK_KIT_FONT_WEIGHT_PREFIX "UI"
+#endif
+ static constexpr struct {
+ CGFloat defaultValue;
+ const char* name;
+ } nsFontWeightLoaderInfos[] = {
+ { -0.80f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightUltraLight" },
+ { -0.60f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightThin" },
+ { -0.40f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightLight" },
+ { 0.00f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightRegular" },
+ { 0.23f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightMedium" },
+ { 0.30f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightSemibold" },
+ { 0.40f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightBold" },
+ { 0.56f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightHeavy" },
+ { 0.62f, SK_KIT_FONT_WEIGHT_PREFIX "FontWeightBlack" },
+ };
+
+ static_assert(SK_ARRAY_COUNT(nsFontWeightLoaderInfos) == 9, "");
+ static CGFloat nsFontWeights[11];
+ static SkOnce once;
+ once([&] {
+ size_t i = 0;
+ nsFontWeights[i++] = -1.00;
+ for (const auto& nsFontWeightLoaderInfo : nsFontWeightLoaderInfos) {
+ void* nsFontWeightValuePtr = dlsym(RTLD_DEFAULT, nsFontWeightLoaderInfo.name);
+ if (nsFontWeightValuePtr) {
+ nsFontWeights[i++] = *(static_cast<CGFloat*>(nsFontWeightValuePtr));
+ } else {
+ nsFontWeights[i++] = nsFontWeightLoaderInfo.defaultValue;
+ }
+ }
+ nsFontWeights[i++] = 1.00;
+ });
+ return nsFontWeights;
+}
+
+/** Convert the [0, 1000] CSS weight to [-1, 1] CTFontDescriptor weight (for system fonts).
+ *
+ * The -1 to 1 weights reported by CTFontDescriptors have different mappings depending on if the
+ * CTFont is native or created from a CGDataProvider.
+ */
+static CGFloat fontstyle_to_ct_weight(int fontstyleWeight) {
+ using Interpolator = LinearInterpolater<int, CGFloat, CGFloatIdentity>;
+
+ // Note that Mac supports the old OS2 version A so 0 through 10 are as if multiplied by 100.
+ // However, on this end we can't tell, so this is ignored.
+
+ static Interpolator::Mapping nativeWeightMappings[11];
+ static SkOnce once;
+ once([&] {
+ CGFloat(&nsFontWeights)[11] = get_NSFontWeight_mapping();
+ for (int i = 0; i < 11; ++i) {
+ nativeWeightMappings[i].src_val = i * 100;
+ nativeWeightMappings[i].dst_val = nsFontWeights[i];
+ }
+ });
+ static constexpr Interpolator nativeInterpolator(
+ nativeWeightMappings, SK_ARRAY_COUNT(nativeWeightMappings));
+
+ return nativeInterpolator.map(fontstyleWeight);
+}
+
+
+/** Convert the [-1, 1] CTFontDescriptor weight to [0, 1000] CSS weight.
+ *
+ * The -1 to 1 weights reported by CTFontDescriptors have different mappings depending on if the
+ * CTFont is native or created from a CGDataProvider.
+ */
+static int ct_weight_to_fontstyle(CGFloat cgWeight, bool fromDataProvider) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Note that Mac supports the old OS2 version A so 0 through 10 are as if multiplied by 100.
+ // However, on this end we can't tell, so this is ignored.
+
+ /** This mapping for CGDataProvider created fonts is determined by creating font data with every
+ * weight, creating a CTFont, and asking the CTFont for its weight. See the TypefaceStyle test
+ * in tests/TypefaceTest.cpp for the code used to determine these values.
+ */
+ static constexpr Interpolator::Mapping dataProviderWeightMappings[] = {
+ { -1.00, 0 },
+ { -0.70, 100 },
+ { -0.50, 200 },
+ { -0.23, 300 },
+ { 0.00, 400 },
+ { 0.20, 500 },
+ { 0.30, 600 },
+ { 0.40, 700 },
+ { 0.60, 800 },
+ { 0.80, 900 },
+ { 1.00, 1000 },
+ };
+ static constexpr Interpolator dataProviderInterpolator(
+ dataProviderWeightMappings, SK_ARRAY_COUNT(dataProviderWeightMappings));
+
+ static Interpolator::Mapping nativeWeightMappings[11];
+ static SkOnce once;
+ once([&] {
+ CGFloat(&nsFontWeights)[11] = get_NSFontWeight_mapping();
+ for (int i = 0; i < 11; ++i) {
+ nativeWeightMappings[i].src_val = nsFontWeights[i];
+ nativeWeightMappings[i].dst_val = i * 100;
+ }
+ });
+ static constexpr Interpolator nativeInterpolator(
+ nativeWeightMappings, SK_ARRAY_COUNT(nativeWeightMappings));
+
+ return fromDataProvider ? dataProviderInterpolator.map(cgWeight)
+ : nativeInterpolator.map(cgWeight);
+}
+
+/** Convert the [0, 10] CSS weight to [-1, 1] CTFontDescriptor width. */
+static int fontstyle_to_ct_width(int fontstyleWidth) {
+ using Interpolator = LinearInterpolater<int, CGFloat, CGFloatIdentity>;
+
+ // Values determined by creating font data with every width, creating a CTFont,
+ // and asking the CTFont for its width. See TypefaceStyle test for basics.
+ static constexpr Interpolator::Mapping widthMappings[] = {
+ { 0, -0.5 },
+ { 10, 0.5 },
+ };
+ static constexpr Interpolator interpolator(widthMappings, SK_ARRAY_COUNT(widthMappings));
+ return interpolator.map(fontstyleWidth);
+}
+
+/** Convert the [-1, 1] CTFontDescriptor width to [0, 10] CSS weight. */
+static int ct_width_to_fontstyle(CGFloat cgWidth) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Values determined by creating font data with every width, creating a CTFont,
+ // and asking the CTFont for its width. See TypefaceStyle test for basics.
+ static constexpr Interpolator::Mapping widthMappings[] = {
+ { -0.5, 0 },
+ { 0.5, 10 },
+ };
+ static constexpr Interpolator interpolator(widthMappings, SK_ARRAY_COUNT(widthMappings));
+ return interpolator.map(cgWidth);
+}
+
+static SkFontStyle fontstyle_from_descriptor(CTFontDescriptorRef desc, bool fromDataProvider) {
+ SkUniqueCFRef<CFTypeRef> traits(CTFontDescriptorCopyAttribute(desc, kCTFontTraitsAttribute));
+ if (!traits || CFDictionaryGetTypeID() != CFGetTypeID(traits.get())) {
+ return SkFontStyle();
+ }
+ SkUniqueCFRef<CFDictionaryRef> fontTraitsDict(static_cast<CFDictionaryRef>(traits.release()));
+
+ CGFloat weight, width, slant;
+ if (!find_dict_CGFloat(fontTraitsDict.get(), kCTFontWeightTrait, &weight)) {
+ weight = 0;
+ }
+ if (!find_dict_CGFloat(fontTraitsDict.get(), kCTFontWidthTrait, &width)) {
+ width = 0;
+ }
+ if (!find_dict_CGFloat(fontTraitsDict.get(), kCTFontSlantTrait, &slant)) {
+ slant = 0;
+ }
+
+ return SkFontStyle(ct_weight_to_fontstyle(weight, fromDataProvider),
+ ct_width_to_fontstyle(width),
+ slant ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
+
+class SkTypeface_Mac : public SkTypeface {
+public:
+ SkTypeface_Mac(SkUniqueCFRef<CTFontRef> fontRef, SkUniqueCFRef<CFTypeRef> resourceRef,
+ const SkFontStyle& fs, bool isFixedPitch,
+ std::unique_ptr<SkStreamAsset> providedData)
+ : SkTypeface(fs, isFixedPitch)
+ , fFontRef(std::move(fontRef))
+ , fOriginatingCFTypeRef(std::move(resourceRef))
+ , fHasColorGlyphs(
+ SkToBool(CTFontGetSymbolicTraits(fFontRef.get()) & kCTFontColorGlyphsTrait))
+ , fStream(std::move(providedData))
+ , fIsFromStream(fStream)
+ {
+ SkASSERT(fFontRef);
+ }
+
+ SkUniqueCFRef<CTFontRef> fFontRef;
+ SkUniqueCFRef<CFTypeRef> fOriginatingCFTypeRef;
+ const bool fHasColorGlyphs;
+ bool hasColorGlyphs() const override { return fHasColorGlyphs; }
+
+protected:
+ int onGetUPEM() const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override {}
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return -1;
+ }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const override {
+ return nullptr;
+ }
+
+ void* onGetCTFontRef() const override { return (void*)fFontRef.get(); }
+
+private:
+ mutable std::unique_ptr<SkStreamAsset> fStream;
+ bool fIsFromStream;
+ mutable SkOnce fInitStream;
+
+ typedef SkTypeface INHERITED;
+};
+
+static bool find_by_CTFontRef(SkTypeface* cached, void* context) {
+ CTFontRef self = (CTFontRef)context;
+ CTFontRef other = (CTFontRef)cached->internal_private_getCTFontRef();
+
+ return CFEqual(self, other);
+}
+
+/** Creates a typeface, searching the cache if isLocalStream is false. */
+static sk_sp<SkTypeface> create_from_CTFontRef(SkUniqueCFRef<CTFontRef> font,
+ SkUniqueCFRef<CFTypeRef> resource,
+ std::unique_ptr<SkStreamAsset> providedData) {
+ SkASSERT(font);
+ const bool isFromStream(providedData);
+
+ if (!isFromStream) {
+ sk_sp<SkTypeface> face = SkTypefaceCache::FindByProcAndRef(find_by_CTFontRef,
+ (void*)font.get());
+ if (face) {
+ return face;
+ }
+ }
+
+ SkUniqueCFRef<CTFontDescriptorRef> desc(CTFontCopyFontDescriptor(font.get()));
+ SkFontStyle style = fontstyle_from_descriptor(desc.get(), isFromStream);
+ CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(font.get());
+ bool isFixedPitch = SkToBool(traits & kCTFontMonoSpaceTrait);
+
+ sk_sp<SkTypeface> face(new SkTypeface_Mac(std::move(font), std::move(resource),
+ style, isFixedPitch, std::move(providedData)));
+ if (!isFromStream) {
+ SkTypefaceCache::Add(face);
+ }
+ return face;
+}
+
+/** Creates a typeface from a descriptor, searching the cache. */
+static sk_sp<SkTypeface> create_from_desc(CTFontDescriptorRef desc) {
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+ if (!ctFont) {
+ return nullptr;
+ }
+
+ return create_from_CTFontRef(std::move(ctFont), nullptr, nullptr);
+}
+
+static SkUniqueCFRef<CTFontDescriptorRef> create_descriptor(const char familyName[],
+ const SkFontStyle& style) {
+ SkUniqueCFRef<CFMutableDictionaryRef> cfAttributes(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ SkUniqueCFRef<CFMutableDictionaryRef> cfTraits(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ if (!cfAttributes || !cfTraits) {
+ return nullptr;
+ }
+
+ // CTFontTraits (symbolic)
+ // macOS 14 and iOS 12 seem to behave badly when kCTFontSymbolicTrait is set.
+
+ // CTFontTraits (weight)
+ CGFloat ctWeight = fontstyle_to_ct_weight(style.weight());
+ SkUniqueCFRef<CFNumberRef> cfFontWeight(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctWeight));
+ if (cfFontWeight) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontWeightTrait, cfFontWeight.get());
+ }
+ // CTFontTraits (width)
+ CGFloat ctWidth = fontstyle_to_ct_width(style.width());
+ SkUniqueCFRef<CFNumberRef> cfFontWidth(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctWidth));
+ if (cfFontWidth) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontWidthTrait, cfFontWidth.get());
+ }
+ // CTFontTraits (slant)
+ CGFloat ctSlant = style.slant() == SkFontStyle::kUpright_Slant ? 0 : 1;
+ SkUniqueCFRef<CFNumberRef> cfFontSlant(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctSlant));
+ if (cfFontSlant) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontSlantTrait, cfFontSlant.get());
+ }
+ // CTFontTraits
+ CFDictionaryAddValue(cfAttributes.get(), kCTFontTraitsAttribute, cfTraits.get());
+
+ // CTFontFamilyName
+ if (familyName) {
+ SkUniqueCFRef<CFStringRef> cfFontName = make_CFString(familyName);
+ if (cfFontName) {
+ CFDictionaryAddValue(cfAttributes.get(), kCTFontFamilyNameAttribute, cfFontName.get());
+ }
+ }
+
+ return SkUniqueCFRef<CTFontDescriptorRef>(
+ CTFontDescriptorCreateWithAttributes(cfAttributes.get()));
+}
+
+// Same as the above function except style is included so we can
+// compare whether the created font conforms to the style. If not, we need
+// to recreate the font with symbolic traits. This is needed due to MacOS 10.11
+// font creation problem https://bugs.chromium.org/p/skia/issues/detail?id=8447.
+static sk_sp<SkTypeface> create_from_desc_and_style(CTFontDescriptorRef desc,
+ const SkFontStyle& style) {
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+ if (!ctFont) {
+ return nullptr;
+ }
+
+ const CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(ctFont.get());
+ CTFontSymbolicTraits expected_traits = traits;
+ if (style.slant() != SkFontStyle::kUpright_Slant) {
+ expected_traits |= kCTFontItalicTrait;
+ }
+ if (style.weight() >= SkFontStyle::kBold_Weight) {
+ expected_traits |= kCTFontBoldTrait;
+ }
+
+ if (expected_traits != traits) {
+ SkUniqueCFRef<CTFontRef> ctNewFont(CTFontCreateCopyWithSymbolicTraits(ctFont.get(), 0, nullptr, expected_traits, expected_traits));
+ if (ctNewFont) {
+ ctFont = std::move(ctNewFont);
+ }
+ }
+
+ return create_from_CTFontRef(std::move(ctFont), nullptr, nullptr);
+}
+
+/** Creates a typeface from a name, searching the cache. */
+static sk_sp<SkTypeface> create_from_name(const char familyName[], const SkFontStyle& style) {
+ SkUniqueCFRef<CTFontDescriptorRef> desc = create_descriptor(familyName, style);
+ if (!desc) {
+ return nullptr;
+ }
+ return create_from_desc_and_style(desc.get(), style);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* This function is visible on the outside. It first searches the cache, and if
+ * not found, returns a new entry (after adding it to the cache).
+ */
+SkTypeface* SkCreateTypefaceFromCTFont(CTFontRef font, CFTypeRef resource) {
+ CFRetain(font);
+ if (resource) {
+ CFRetain(resource);
+ }
+ return create_from_CTFontRef(SkUniqueCFRef<CTFontRef>(font),
+ SkUniqueCFRef<CFTypeRef>(resource),
+ nullptr).release();
+}
+
+static const char* map_css_names(const char* name) {
+ static const struct {
+ const char* fFrom; // name the caller specified
+ const char* fTo; // "canonical" name we map to
+ } gPairs[] = {
+ { "sans-serif", "Helvetica" },
+ { "serif", "Times" },
+ { "monospace", "Courier" }
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPairs); i++) {
+ if (strcmp(name, gPairs[i].fFrom) == 0) {
+ return gPairs[i].fTo;
+ }
+ }
+ return name; // no change
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkScalerContext_Mac : public SkScalerContext {
+public:
+ SkScalerContext_Mac(sk_sp<SkTypeface_Mac>, const SkScalerContextEffects&, const SkDescriptor*);
+
+protected:
+ unsigned generateGlyphCount(void) override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ static void CTPathElement(void *info, const CGPathElement *element);
+ template<bool APPLY_PREBLEND>
+ static void RGBToA8(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes,
+ const SkGlyph& glyph, const uint8_t* table8);
+ template<bool APPLY_PREBLEND>
+ static uint16_t RGBToLcd16(CGRGBPixel rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB);
+ template<bool APPLY_PREBLEND>
+ static void RGBToLcd16(const CGRGBPixel* SK_RESTRICT cgPixels,
+ size_t cgRowBytes,
+ const SkGlyph& glyph,
+ const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB);
+
+ Offscreen fOffscreen;
+
+ /** Unrotated variant of fCTFont.
+ *
+ * In 10.10.1 CTFontGetAdvancesForGlyphs applies the font transform to the width of the
+ * advances, but always sets the height to 0. This font is used to get the advances of the
+ * unrotated glyph, and then the rotation is applied separately.
+ *
+ * CT vertical metrics are pre-rotated (in em space, before transform) 90deg clock-wise.
+ * This makes kCTFontOrientationDefault dangerous, because the metrics from
+ * kCTFontOrientationHorizontal are in a different space from kCTFontOrientationVertical.
+ * With kCTFontOrientationVertical the advances must be unrotated.
+ *
+ * Sometimes, creating a copy of a CTFont with the same size but different trasform will select
+ * different underlying font data. As a result, avoid ever creating more than one CTFont per
+ * SkScalerContext to ensure that only one CTFont is used.
+ *
+ * As a result of the above (and other constraints) this font contains the size, but not the
+ * transform. The transform must always be applied separately.
+ */
+ SkUniqueCFRef<CTFontRef> fCTFont;
+
+ /** The transform without the font size. */
+ CGAffineTransform fTransform;
+ CGAffineTransform fInvTransform;
+
+ SkUniqueCFRef<CGFontRef> fCGFont;
+ uint16_t fGlyphCount;
+ const bool fDoSubPosition;
+
+ friend class Offscreen;
+
+ typedef SkScalerContext INHERITED;
+};
+
+// CTFontCreateCopyWithAttributes or CTFontCreateCopyWithSymbolicTraits cannot be used on 10.10
+// and later, as they will return different underlying fonts depending on the size requested.
+static SkUniqueCFRef<CTFontRef> ctfont_create_exact_copy(CTFontRef baseFont, CGFloat textSize,
+ const CGAffineTransform* transform)
+{
+ // To figure out if a font is installed locally or used from a @font-face
+ // resource, we check whether its descriptor can provide a URL. This will
+ // be present for installed fonts, but not for those activated from an
+ // in-memory resource.
+ auto IsInstalledFont = [](CTFontRef aFont) {
+ CTFontDescriptorRef desc = CTFontCopyFontDescriptor(aFont);
+ CFTypeRef attr = CTFontDescriptorCopyAttribute(desc, kCTFontURLAttribute);
+ CFRelease(desc);
+ bool result = false;
+ if (attr) {
+ result = true;
+ CFRelease(attr);
+ }
+ return result;
+ };
+
+ // If we have a system font we need to use the CGFont APIs to avoid having the
+ // underlying font change for us when using CTFontCreateCopyWithAttributes.
+ if (IsInstalledFont(baseFont)) {
+ SkUniqueCFRef<CGFontRef> baseCGFont(CTFontCopyGraphicsFont(baseFont, nullptr));
+
+ // The last parameter (CTFontDescriptorRef attributes) *must* be nullptr.
+ // If non-nullptr then with fonts with variation axes, the copy will fail in
+ // CGFontVariationFromDictCallback when it assumes kCGFontVariationAxisName is CFNumberRef
+ // which it quite obviously is not.
+
+ // Because we cannot setup the CTFont descriptor to match, the same restriction applies here
+ // as other uses of CTFontCreateWithGraphicsFont which is that such CTFonts should not escape
+ // the scaler context, since they aren't 'normal'.
+
+ // Avoid calling potentially buggy variation APIs on pre-Sierra macOS
+ // versions (see bug 1331683).
+ //
+ // And on HighSierra, CTFontCreateWithGraphicsFont properly carries over
+ // variation settings from the CGFont to CTFont, so we don't need to do
+ // the extra work here -- and this seems to avoid Core Text crashiness
+ // seen in bug 1454094.
+ //
+ // However, for installed fonts it seems we DO need to copy the variations
+ // explicitly even on 10.13, otherwise fonts fail to render (as in bug
+ // 1455494) when non-default values are used. Fortunately, the crash
+ // mentioned above occurs with data fonts, not (AFAICT) with system-
+ // installed fonts.
+ //
+ // So we only need to do this "the hard way" on Sierra, and for installed
+ // fonts on HighSierra+; otherwise, just let the standard CTFont function
+ // do its thing.
+ //
+ // NOTE in case this ever needs further adjustment: there is similar logic
+ // in four places in the tree (sadly):
+ // CreateCTFontFromCGFontWithVariations in gfxMacFont.cpp
+ // CreateCTFontFromCGFontWithVariations in ScaledFontMac.cpp
+ // CreateCTFontFromCGFontWithVariations in cairo-quartz-font.c
+ // ctfont_create_exact_copy in SkFontHost_mac.cpp
+
+ // Not UniqueCFRef<> because CGFontCopyVariations can return null!
+ CFDictionaryRef variations = CGFontCopyVariations(baseCGFont.get());
+ if (variations) {
+ SkUniqueCFRef<CFDictionaryRef>
+ varAttr(CFDictionaryCreate(nullptr,
+ (const void**)&kCTFontVariationAttribute,
+ (const void**)&variations,
+ 1,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+ CFRelease(variations);
+
+ SkUniqueCFRef<CTFontDescriptorRef>
+ varDesc(CTFontDescriptorCreateWithAttributes(varAttr.get()));
+
+ return SkUniqueCFRef<CTFontRef>(
+ CTFontCreateWithGraphicsFont(baseCGFont.get(), textSize, transform, varDesc.get()));
+ }
+ return SkUniqueCFRef<CTFontRef>(
+ CTFontCreateWithGraphicsFont(baseCGFont.get(), textSize, transform, nullptr));
+ } else {
+ return SkUniqueCFRef<CTFontRef>(CTFontCreateCopyWithAttributes(baseFont, textSize, transform, nullptr));
+ }
+}
+
+SkScalerContext_Mac::SkScalerContext_Mac(sk_sp<SkTypeface_Mac> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : INHERITED(std::move(typeface), effects, desc)
+ , fDoSubPosition(SkToBool(fRec.fFlags & kSubpixelPositioning_Flag))
+
+{
+ CTFontRef ctFont = (CTFontRef)this->getTypeface()->internal_private_getCTFontRef();
+ CFIndex numGlyphs = CTFontGetGlyphCount(ctFont);
+ SkASSERT(numGlyphs >= 1 && numGlyphs <= 0xFFFF);
+ fGlyphCount = SkToU16(numGlyphs);
+
+ // CT on (at least) 10.9 will size color glyphs down from the requested size, but not up.
+ // As a result, it is necessary to know the actual device size and request that.
+ SkVector scale;
+ SkMatrix skTransform;
+ bool invertible = fRec.computeMatrices(SkScalerContextRec::kVertical_PreMatrixScale,
+ &scale, &skTransform, nullptr, nullptr, nullptr);
+ fTransform = MatrixToCGAffineTransform(skTransform);
+ // CGAffineTransformInvert documents that if the transform is non-invertible it will return the
+ // passed transform unchanged. It does so, but then also prints a message to stdout. Avoid this.
+ if (invertible) {
+ fInvTransform = CGAffineTransformInvert(fTransform);
+ } else {
+ fInvTransform = fTransform;
+ }
+
+ // The transform contains everything except the requested text size.
+ // Some properties, like 'trak', are based on the text size (before applying the matrix).
+ CGFloat textSize = ScalarToCG(scale.y());
+ fCTFont = ctfont_create_exact_copy(ctFont, textSize, nullptr);
+ fCGFont.reset(CTFontCopyGraphicsFont(fCTFont.get(), nullptr));
+}
+
+CGRGBPixel* Offscreen::getCG(const SkScalerContext_Mac& context, const SkGlyph& glyph,
+ CGGlyph glyphID, size_t* rowBytesPtr,
+ bool generateA8FromLCD, bool lightOnDark) {
+ if (!fRGBSpace) {
+ //It doesn't appear to matter what color space is specified.
+ //Regular blends and antialiased text are always (s*a + d*(1-a))
+ //and subpixel antialiased text is always g=2.0.
+ fRGBSpace.reset(CGColorSpaceCreateDeviceRGB());
+ }
+
+ // default to kBW_Format
+ bool doAA = false;
+ bool doLCD = false;
+
+ if (SkMask::kBW_Format != glyph.maskFormat()) {
+ doLCD = true;
+ doAA = true;
+ }
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ if (!generateA8FromLCD && SkMask::kA8_Format == glyph.maskFormat()) {
+ doLCD = false;
+ doAA = true;
+ }
+
+ // If this font might have color glyphs, disable LCD as there's no way to support it.
+ // CoreText doesn't tell us which format it ended up using, so we can't detect it.
+ // A8 will end up black on transparent, but TODO: we can detect gray and set to A8.
+ if (SkMask::kARGB32_Format == glyph.maskFormat()) {
+ doLCD = false;
+ }
+
+ size_t rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ if (!fCG || fSize.fWidth < glyph.width() || fSize.fHeight < glyph.height()) {
+ if (fSize.fWidth < glyph.width()) {
+ fSize.fWidth = RoundSize(glyph.width());
+ }
+ if (fSize.fHeight < glyph.height()) {
+ fSize.fHeight = RoundSize(glyph.height());
+ }
+
+ rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ void* image = fImageStorage.reset(rowBytes * fSize.fHeight);
+ const CGImageAlphaInfo alpha = (glyph.isColor())
+ ? kCGImageAlphaPremultipliedFirst
+ : kCGImageAlphaNoneSkipFirst;
+ const CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Host | alpha;
+ fCG.reset(CGBitmapContextCreate(image, fSize.fWidth, fSize.fHeight, 8,
+ rowBytes, fRGBSpace.get(), bitmapInfo));
+
+ // Skia handles quantization and subpixel positioning,
+ // so disable quantization and enabe subpixel positioning in CG.
+ CGContextSetAllowsFontSubpixelQuantization(fCG.get(), false);
+ CGContextSetShouldSubpixelQuantizeFonts(fCG.get(), false);
+
+ // Because CG always draws from the horizontal baseline,
+ // if there is a non-integral translation from the horizontal origin to the vertical origin,
+ // then CG cannot draw the glyph in the correct location without subpixel positioning.
+ CGContextSetAllowsFontSubpixelPositioning(fCG.get(), true);
+ CGContextSetShouldSubpixelPositionFonts(fCG.get(), true);
+
+ CGContextSetTextDrawingMode(fCG.get(), kCGTextFill);
+
+ // Draw black on white to create mask. (Special path exists to speed this up in CG.)
+ // If light-on-dark is requested, draw white on black.
+ CGContextSetGrayFillColor(fCG.get(), lightOnDark ? 1.0f : 0.0f, 1.0f);
+
+ // force our checks below to happen
+ fDoAA = !doAA;
+ fDoLCD = !doLCD;
+
+ CGContextSetTextMatrix(fCG.get(), context.fTransform);
+ }
+
+ if (fDoAA != doAA) {
+ CGContextSetShouldAntialias(fCG.get(), doAA);
+ fDoAA = doAA;
+ }
+ if (fDoLCD != doLCD) {
+ CGContextSetShouldSmoothFonts(fCG.get(), doLCD);
+ fDoLCD = doLCD;
+ }
+
+ CGRGBPixel* image = (CGRGBPixel*)fImageStorage.get();
+ // skip rows based on the glyph's height
+ image += (fSize.fHeight - glyph.height()) * fSize.fWidth;
+
+ // Erase to white (or transparent black if it's a color glyph, to not composite against white).
+ // For light-on-dark, instead erase to black.
+ uint32_t bgColor = (!glyph.isColor()) ? (lightOnDark ? 0xFF000000 : 0xFFFFFFFF) : 0x00000000;
+ sk_memset_rect32(image, bgColor, glyph.width(), glyph.height(), rowBytes);
+
+ float subX = 0;
+ float subY = 0;
+ if (context.fDoSubPosition) {
+ subX = SkFixedToFloat(glyph.getSubXFixed());
+ subY = SkFixedToFloat(glyph.getSubYFixed());
+ }
+
+ CGPoint point = CGPointMake(-glyph.left() + subX, glyph.top() + glyph.height() - subY);
+ // Prior to 10.10, CTFontDrawGlyphs acted like CGContextShowGlyphsAtPositions and took
+ // 'positions' which are in text space. The glyph location (in device space) must be
+ // mapped into text space, so that CG can convert it back into device space.
+ // In 10.10.1, this is handled directly in CTFontDrawGlyphs.
+ //
+ // However, in 10.10.2 color glyphs no longer rotate based on the font transform.
+ // So always make the font transform identity and place the transform on the context.
+ point = CGPointApplyAffineTransform(point, context.fInvTransform);
+
+ CTFontDrawGlyphs(context.fCTFont.get(), &glyphID, &point, 1, fCG.get());
+
+ SkASSERT(rowBytesPtr);
+ *rowBytesPtr = rowBytes;
+ return image;
+}
+
+unsigned SkScalerContext_Mac::generateGlyphCount(void) {
+ return fGlyphCount;
+}
+
+bool SkScalerContext_Mac::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContext_Mac::generateMetrics(SkGlyph* glyph) {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ const CGGlyph cgGlyph = (CGGlyph) glyph->getGlyphID();
+ glyph->zeroMetrics();
+
+ // The following block produces cgAdvance in CG units (pixels, y up).
+ CGSize cgAdvance;
+ CTFontGetAdvancesForGlyphs(fCTFont.get(), kCTFontOrientationHorizontal,
+ &cgGlyph, &cgAdvance, 1);
+ cgAdvance = CGSizeApplyAffineTransform(cgAdvance, fTransform);
+ glyph->fAdvanceX = CGToFloat(cgAdvance.width);
+ glyph->fAdvanceY = -CGToFloat(cgAdvance.height);
+
+ // The following produces skBounds in SkGlyph units (pixels, y down),
+ // or returns early if skBounds would be empty.
+ SkRect skBounds;
+
+ // Glyphs are always drawn from the horizontal origin. The caller must manually use the result
+ // of CTFontGetVerticalTranslationsForGlyphs to calculate where to draw the glyph for vertical
+ // glyphs. As a result, always get the horizontal bounds of a glyph and translate it if the
+ // glyph is vertical. This avoids any diagreement between the various means of retrieving
+ // vertical metrics.
+ {
+ // CTFontGetBoundingRectsForGlyphs produces cgBounds in CG units (pixels, y up).
+ CGRect cgBounds;
+ CTFontGetBoundingRectsForGlyphs(fCTFont.get(), kCTFontOrientationHorizontal,
+ &cgGlyph, &cgBounds, 1);
+ cgBounds = CGRectApplyAffineTransform(cgBounds, fTransform);
+
+ // BUG?
+ // 0x200B (zero-advance space) seems to return a huge (garbage) bounds, when
+ // it should be empty. So, if we see a zero-advance, we check if it has an
+ // empty path or not, and if so, we jam the bounds to 0. Hopefully a zero-advance
+ // is rare, so we won't incur a big performance cost for this extra check.
+ // Avoid trying to create a path from a color font due to crashing on 10.9.
+ if (0 == cgAdvance.width && 0 == cgAdvance.height &&
+ SkMask::kARGB32_Format != glyph->fMaskFormat) {
+ SkUniqueCFRef<CGPathRef> path(CTFontCreatePathForGlyph(fCTFont.get(), cgGlyph,nullptr));
+ if (!path || CGPathIsEmpty(path.get())) {
+ return;
+ }
+ }
+
+ if (CGRectIsEmpty_inline(cgBounds)) {
+ return;
+ }
+
+ // Convert cgBounds to SkGlyph units (pixels, y down).
+ skBounds = SkRect::MakeXYWH(cgBounds.origin.x, -cgBounds.origin.y - cgBounds.size.height,
+ cgBounds.size.width, cgBounds.size.height);
+ }
+
+ // Currently the bounds are based on being rendered at (0,0).
+ // The top left must not move, since that is the base from which subpixel positioning is offset.
+ if (fDoSubPosition) {
+ skBounds.fRight += SkFixedToFloat(glyph->getSubXFixed());
+ skBounds.fBottom += SkFixedToFloat(glyph->getSubYFixed());
+ }
+
+ // We're trying to pack left and top into int16_t,
+ // and width and height into uint16_t, after outsetting by 1.
+ if (!SkRect::MakeXYWH(-32767, -32767, 65535, 65535).contains(skBounds)) {
+ return;
+ }
+
+ SkIRect skIBounds;
+ skBounds.roundOut(&skIBounds);
+ // Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
+ // Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
+ // is not currently known, as CG dilates the outlines by some percentage.
+ // Note that if this context is A8 and not back-forming from LCD, there is no need to outset.
+ skIBounds.outset(1, 1);
+ glyph->fLeft = SkToS16(skIBounds.fLeft);
+ glyph->fTop = SkToS16(skIBounds.fTop);
+ glyph->fWidth = SkToU16(skIBounds.width());
+ glyph->fHeight = SkToU16(skIBounds.height());
+}
+
+#include "include/private/SkColorData.h"
+
+static constexpr uint8_t sk_pow2_table(size_t i) {
+ return SkToU8(((i * i + 128) / 255));
+}
+
+/**
+ * This will invert the gamma applied by CoreGraphics, so we can get linear
+ * values.
+ *
+ * CoreGraphics obscurely defaults to 2.0 as the subpixel coverage gamma value.
+ * The color space used does not appear to affect this choice.
+ */
+static constexpr auto gLinearCoverageFromCGLCDValue = SkMakeArray<256>(sk_pow2_table);
+
+static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= ((CGRGBPixel_getAlpha(*src++) >> 7) ^ 0x1) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(CGRGBPixel rgb, const uint8_t* table8) {
+ U8CPU r = 0xFF - ((rgb >> 16) & 0xFF);
+ U8CPU g = 0xFF - ((rgb >> 8) & 0xFF);
+ U8CPU b = 0xFF - ((rgb >> 0) & 0xFF);
+ U8CPU lum = sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ lum = SkTMax(lum, (U8CPU)0x30);
+#endif
+ return lum;
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_Mac::RGBToA8(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes,
+ const SkGlyph& glyph, const uint8_t* table8) {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyph.fImage;
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; ++i) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(cgPixels[i], table8);
+ }
+ cgPixels = SkTAddOffset<const CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND>
+uint16_t SkScalerContext_Mac::RGBToLcd16(CGRGBPixel rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 16) & 0xFF), tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 8) & 0xFF), tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 0) & 0xFF), tableB);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkTMax(r, (U8CPU)0x30);
+ g = SkTMax(g, (U8CPU)0x30);
+ b = SkTMax(b, (U8CPU)0x30);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_Mac::RGBToLcd16(const CGRGBPixel* SK_RESTRICT cgPixels,
+ size_t cgRowBytes,
+ const SkGlyph& glyph,
+ const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint16_t* SK_RESTRICT dst = (uint16_t*)glyph.fImage;
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = RGBToLcd16<APPLY_PREBLEND>(cgPixels[i], tableR, tableG, tableB);
+ }
+ cgPixels = SkTAddOffset<const CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint16_t>(dst, dstRB);
+ }
+}
+
+static SkPMColor cgpixels_to_pmcolor(CGRGBPixel rgb) {
+ U8CPU a = (rgb >> 24) & 0xFF;
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ a = SkTMax(a, (U8CPU)0x30);
+#endif
+ return SkPackARGB32(a, r, g, b);
+}
+
+void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = SkTo<CGGlyph>(glyph.getGlyphID());
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ bool requestSmooth = fRec.getHinting() != SkFontHinting::kNone;
+ bool lightOnDark = (fRec.fFlags & SkScalerContext::kLightOnDark_Flag) != 0;
+
+ // Draw the glyph
+ size_t cgRowBytes;
+ CGRGBPixel* cgPixels = fOffscreen.getCG(*this, glyph, cgGlyph, &cgRowBytes, requestSmooth, lightOnDark);
+ if (cgPixels == nullptr) {
+ return;
+ }
+
+ // Fix the glyph
+ if ((glyph.fMaskFormat == SkMask::kLCD16_Format) ||
+ (glyph.fMaskFormat == SkMask::kA8_Format
+ && requestSmooth
+ && smooth_behavior() != SmoothBehavior::none))
+ {
+ const uint8_t* linear = gLinearCoverageFromCGLCDValue.data();
+
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ CGRGBPixel* addr = cgPixels;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.fWidth; ++x) {
+ int r = linear[(addr[x] >> 16) & 0xFF];
+ int g = linear[(addr[x] >> 8) & 0xFF];
+ int b = linear[(addr[x] >> 0) & 0xFF];
+ // If light-on-dark was requested, the mask is drawn inverted.
+ if (lightOnDark) {
+ r = 255 - r;
+ g = 255 - g;
+ b = 255 - b;
+ }
+ addr[x] = (r << 16) | (g << 8) | b;
+ }
+ addr = SkTAddOffset<CGRGBPixel>(addr, cgRowBytes);
+ }
+ }
+
+ // Convert glyph to mask
+ switch (glyph.fMaskFormat) {
+ case SkMask::kLCD16_Format: {
+ if (fPreBlend.isApplicable()) {
+ RGBToLcd16<true>(cgPixels, cgRowBytes, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ RGBToLcd16<false>(cgPixels, cgRowBytes, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ } break;
+ case SkMask::kA8_Format: {
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(cgPixels, cgRowBytes, glyph, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(cgPixels, cgRowBytes, glyph, fPreBlend.fG);
+ }
+ } break;
+ case SkMask::kBW_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* dst = (uint8_t*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ cgpixels_to_bits(dst, cgPixels, width);
+ cgPixels = SkTAddOffset<CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+ } break;
+ case SkMask::kARGB32_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ SkPMColor* dst = (SkPMColor*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = cgpixels_to_pmcolor(cgPixels[x]);
+ }
+ cgPixels = SkTAddOffset<CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<SkPMColor>(dst, dstRB);
+ }
+ } break;
+ default:
+ SkDEBUGFAIL("unexpected mask format");
+ break;
+ }
+}
+
+/*
+ * Our subpixel resolution is only 2 bits in each direction, so a scale of 4
+ * seems sufficient, and possibly even correct, to allow the hinted outline
+ * to be subpixel positioned.
+ */
+#define kScaleForSubPixelPositionHinting (4.0f)
+
+bool SkScalerContext_Mac::generatePath(SkGlyphID glyph, SkPath* path) {
+ SkScalar scaleX = SK_Scalar1;
+ SkScalar scaleY = SK_Scalar1;
+
+ CGAffineTransform xform = fTransform;
+ /*
+ * For subpixel positioning, we want to return an unhinted outline, so it
+ * can be positioned nicely at fractional offsets. However, we special-case
+ * if the baseline of the (horizontal) text is axis-aligned. In those cases
+ * we want to retain hinting in the direction orthogonal to the baseline.
+ * e.g. for horizontal baseline, we want to retain hinting in Y.
+ * The way we remove hinting is to scale the font by some value (4) in that
+ * direction, ask for the path, and then scale the path back down.
+ */
+ if (fDoSubPosition) {
+ // start out by assuming that we want no hining in X and Y
+ scaleX = scaleY = kScaleForSubPixelPositionHinting;
+ // now see if we need to restore hinting for axis-aligned baselines
+ switch (this->computeAxisAlignmentForHText()) {
+ case kX_SkAxisAlignment:
+ scaleY = SK_Scalar1; // want hinting in the Y direction
+ break;
+ case kY_SkAxisAlignment:
+ scaleX = SK_Scalar1; // want hinting in the X direction
+ break;
+ default:
+ break;
+ }
+
+ CGAffineTransform scale(CGAffineTransformMakeScale(ScalarToCG(scaleX), ScalarToCG(scaleY)));
+ xform = CGAffineTransformConcat(fTransform, scale);
+ }
+
+ CGGlyph cgGlyph = SkTo<CGGlyph>(glyph);
+ SkUniqueCFRef<CGPathRef> cgPath(CTFontCreatePathForGlyph(fCTFont.get(), cgGlyph, &xform));
+
+ path->reset();
+ if (!cgPath) {
+ return false;
+ }
+
+ CGPathApply(cgPath.get(), path, SkScalerContext_Mac::CTPathElement);
+ if (fDoSubPosition) {
+ SkMatrix m;
+ m.setScale(SkScalarInvert(scaleX), SkScalarInvert(scaleY));
+ path->transform(m);
+ }
+ return true;
+}
+
+void SkScalerContext_Mac::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ CGRect theBounds = CTFontGetBoundingBox(fCTFont.get());
+
+ metrics->fTop = CGToScalar(-CGRectGetMaxY_inline(theBounds));
+ metrics->fAscent = CGToScalar(-CTFontGetAscent(fCTFont.get()));
+ metrics->fDescent = CGToScalar( CTFontGetDescent(fCTFont.get()));
+ metrics->fBottom = CGToScalar(-CGRectGetMinY_inline(theBounds));
+ metrics->fLeading = CGToScalar( CTFontGetLeading(fCTFont.get()));
+ metrics->fAvgCharWidth = CGToScalar( CGRectGetWidth_inline(theBounds));
+ metrics->fXMin = CGToScalar( CGRectGetMinX_inline(theBounds));
+ metrics->fXMax = CGToScalar( CGRectGetMaxX_inline(theBounds));
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ metrics->fXHeight = CGToScalar( CTFontGetXHeight(fCTFont.get()));
+ metrics->fCapHeight = CGToScalar( CTFontGetCapHeight(fCTFont.get()));
+ metrics->fUnderlineThickness = CGToScalar( CTFontGetUnderlineThickness(fCTFont.get()));
+ metrics->fUnderlinePosition = -CGToScalar( CTFontGetUnderlinePosition(fCTFont.get()));
+
+ metrics->fFlags = 0;
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ // See https://bugs.chromium.org/p/skia/issues/detail?id=6203
+ // At least on 10.12.3 with memory based fonts the x-height is always 0.6666 of the ascent and
+ // the cap-height is always 0.8888 of the ascent. It appears that the values from the 'OS/2'
+ // table are read, but then overwritten if the font is not a system font. As a result, if there
+ // is a valid 'OS/2' table available use the values from the table if they aren't too strange.
+ struct OS2HeightMetrics {
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ } heights;
+ size_t bytesRead = this->getTypeface()->getTableData(
+ SkTEndian_SwapBE32(SkOTTableOS2::TAG), offsetof(SkOTTableOS2, version.v2.sxHeight),
+ sizeof(heights), &heights);
+ if (bytesRead == sizeof(heights)) {
+ // 'fontSize' is correct because the entire resolved size is set by the constructor.
+ CGFloat fontSize = CTFontGetSize(this->fCTFont.get());
+ unsigned upem = CTFontGetUnitsPerEm(this->fCTFont.get());
+ unsigned maxSaneHeight = upem * 2;
+ uint16_t xHeight = SkEndian_SwapBE16(heights.sxHeight);
+ if (xHeight && xHeight < maxSaneHeight) {
+ metrics->fXHeight = CGToScalar(xHeight * fontSize / upem);
+ }
+ uint16_t capHeight = SkEndian_SwapBE16(heights.sCapHeight);
+ if (capHeight && capHeight < maxSaneHeight) {
+ metrics->fCapHeight = CGToScalar(capHeight * fontSize / upem);
+ }
+ }
+}
+
+void SkScalerContext_Mac::CTPathElement(void *info, const CGPathElement *element) {
+ SkPath* skPath = (SkPath*)info;
+
+ // Process the path element
+ switch (element->type) {
+ case kCGPathElementMoveToPoint:
+ skPath->moveTo(element->points[0].x, -element->points[0].y);
+ break;
+
+ case kCGPathElementAddLineToPoint:
+ skPath->lineTo(element->points[0].x, -element->points[0].y);
+ break;
+
+ case kCGPathElementAddQuadCurveToPoint:
+ skPath->quadTo(element->points[0].x, -element->points[0].y,
+ element->points[1].x, -element->points[1].y);
+ break;
+
+ case kCGPathElementAddCurveToPoint:
+ skPath->cubicTo(element->points[0].x, -element->points[0].y,
+ element->points[1].x, -element->points[1].y,
+ element->points[2].x, -element->points[2].y);
+ break;
+
+ case kCGPathElementCloseSubpath:
+ skPath->close();
+ break;
+
+ default:
+ SkDEBUGFAIL("Unknown path element!");
+ break;
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns nullptr on failure
+// Call must still manage its ownership of provider
+static sk_sp<SkTypeface> create_from_dataProvider(SkUniqueCFRef<CGDataProviderRef> provider,
+ std::unique_ptr<SkStreamAsset> providedData,
+ int ttcIndex) {
+ if (ttcIndex != 0) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CGFontRef> cg(CGFontCreateWithDataProvider(provider.get()));
+ if (!cg) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CTFontRef> ct(CTFontCreateWithGraphicsFont(cg.get(), 0, nullptr, nullptr));
+ if (!ct) {
+ return nullptr;
+ }
+ return create_from_CTFontRef(std::move(ct), nullptr, std::move(providedData));
+}
+
+// Web fonts added to the CTFont registry do not return their character set.
+// Iterate through the font in this case. The existing caller caches the result,
+// so the performance impact isn't too bad.
+static void populate_glyph_to_unicode_slow(CTFontRef ctFont, CFIndex glyphCount,
+ SkUnichar* out) {
+ sk_bzero(out, glyphCount * sizeof(SkUnichar));
+ UniChar unichar = 0;
+ while (glyphCount > 0) {
+ CGGlyph glyph;
+ if (CTFontGetGlyphsForCharacters(ctFont, &unichar, &glyph, 1)) {
+ if (out[glyph] == 0) {
+ out[glyph] = unichar;
+ --glyphCount;
+ }
+ }
+ if (++unichar == 0) {
+ break;
+ }
+ }
+}
+
+static constexpr uint16_t kPlaneSize = 1 << 13;
+
+static void get_plane_glyph_map(const uint8_t* bits,
+ CTFontRef ctFont,
+ CFIndex glyphCount,
+ SkUnichar* glyphToUnicode,
+ uint8_t planeIndex) {
+ SkUnichar planeOrigin = (SkUnichar)planeIndex << 16; // top half of codepoint.
+ for (uint16_t i = 0; i < kPlaneSize; i++) {
+ uint8_t mask = bits[i];
+ if (!mask) {
+ continue;
+ }
+ for (uint8_t j = 0; j < 8; j++) {
+ if (0 == (mask & ((uint8_t)1 << j))) {
+ continue;
+ }
+ uint16_t planeOffset = (i << 3) | j;
+ SkUnichar codepoint = planeOrigin | (SkUnichar)planeOffset;
+ uint16_t utf16[2] = {planeOffset, 0};
+ size_t count = 1;
+ if (planeOrigin != 0) {
+ count = SkUTF::ToUTF16(codepoint, utf16);
+ }
+ CGGlyph glyphs[2] = {0, 0};
+ if (CTFontGetGlyphsForCharacters(ctFont, utf16, glyphs, count)) {
+ SkASSERT(glyphs[1] == 0);
+ SkASSERT(glyphs[0] < glyphCount);
+ // CTFontCopyCharacterSet and CTFontGetGlyphsForCharacters seem to add 'support'
+ // for characters 0x9, 0xA, and 0xD mapping them to the glyph for character 0x20?
+ // Prefer mappings to codepoints at or above 0x20.
+ if (glyphToUnicode[glyphs[0]] < 0x20) {
+ glyphToUnicode[glyphs[0]] = codepoint;
+ }
+ }
+ }
+ }
+}
+// Construct Glyph to Unicode table.
+static void populate_glyph_to_unicode(CTFontRef ctFont, CFIndex glyphCount,
+ SkUnichar* glyphToUnicode) {
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * glyphCount);
+ SkUniqueCFRef<CFCharacterSetRef> charSet(CTFontCopyCharacterSet(ctFont));
+ if (!charSet) {
+ populate_glyph_to_unicode_slow(ctFont, glyphCount, glyphToUnicode);
+ return;
+ }
+
+ SkUniqueCFRef<CFDataRef> bitmap(
+ CFCharacterSetCreateBitmapRepresentation(nullptr, charSet.get()));
+ if (!bitmap) {
+ return;
+ }
+ CFIndex dataLength = CFDataGetLength(bitmap.get());
+ if (!dataLength) {
+ return;
+ }
+ SkASSERT(dataLength >= kPlaneSize);
+ const UInt8* bits = CFDataGetBytePtr(bitmap.get());
+
+ get_plane_glyph_map(bits, ctFont, glyphCount, glyphToUnicode, 0);
+ /*
+ A CFData object that specifies the bitmap representation of the Unicode
+ character points the for the new character set. The bitmap representation could
+ contain all the Unicode character range starting from BMP to Plane 16. The
+ first 8KiB (8192 bytes) of the data represent the BMP range. The BMP range 8KiB
+ can be followed by zero to sixteen 8KiB bitmaps, each prepended with the plane
+ index byte. For example, the bitmap representing the BMP and Plane 2 has the
+ size of 16385 bytes (8KiB for BMP, 1 byte index, and a 8KiB bitmap for Plane
+ 2). The plane index byte, in this case, contains the integer value two.
+ */
+
+ if (dataLength <= kPlaneSize) {
+ return;
+ }
+ int extraPlaneCount = (dataLength - kPlaneSize) / (1 + kPlaneSize);
+ SkASSERT(dataLength == kPlaneSize + extraPlaneCount * (1 + kPlaneSize));
+ while (extraPlaneCount-- > 0) {
+ bits += kPlaneSize;
+ uint8_t planeIndex = *bits++;
+ SkASSERT(planeIndex >= 1);
+ SkASSERT(planeIndex <= 16);
+ get_plane_glyph_map(bits, ctFont, glyphCount, glyphToUnicode, planeIndex);
+ }
+}
+
+/** Assumes src and dst are not nullptr. */
+static void CFStringToSkString(CFStringRef src, SkString* dst) {
+ // Reserve enough room for the worst-case string,
+ // plus 1 byte for the trailing null.
+ CFIndex length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(src),
+ kCFStringEncodingUTF8) + 1;
+ dst->resize(length);
+ CFStringGetCString(src, dst->writable_str(), length, kCFStringEncodingUTF8);
+ // Resize to the actual UTF-8 length used, stripping the null character.
+ dst->resize(strlen(dst->c_str()));
+}
+
+void SkTypeface_Mac::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ SkUniqueCFRef<CTFontRef> ctFont =
+ ctfont_create_exact_copy(fFontRef.get(), CTFontGetUnitsPerEm(fFontRef.get()), nullptr);
+ CFIndex glyphCount = CTFontGetGlyphCount(ctFont.get());
+ populate_glyph_to_unicode(ctFont.get(), glyphCount, dstArray);
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface_Mac::onGetAdvancedMetrics() const {
+
+ SkUniqueCFRef<CTFontRef> ctFont =
+ ctfont_create_exact_copy(fFontRef.get(), CTFontGetUnitsPerEm(fFontRef.get()), nullptr);
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(new SkAdvancedTypefaceMetrics);
+
+ {
+ SkUniqueCFRef<CFStringRef> fontName(CTFontCopyPostScriptName(ctFont.get()));
+ if (fontName.get()) {
+ CFStringToSkString(fontName.get(), &info->fPostScriptName);
+ info->fFontName = info->fPostScriptName;
+ }
+ }
+
+ // In 10.10 and earlier, CTFontCopyVariationAxes and CTFontCopyVariation do not work when
+ // applied to fonts which started life with CGFontCreateWithDataProvider (they simply always
+ // return nullptr). As a result, we are limited to CGFontCopyVariationAxes and
+ // CGFontCopyVariations here until support for 10.10 and earlier is removed.
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(ctFont.get(), nullptr));
+ if (cgFont) {
+ SkUniqueCFRef<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cgFont.get()));
+ if (cgAxes && CFArrayGetCount(cgAxes.get()) > 0) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag;
+ }
+ }
+
+ SkOTTableOS2_V4::Type fsType;
+ if (sizeof(fsType) == this->getTableData(SkTEndian_SwapBE32(SkOTTableOS2::TAG),
+ offsetof(SkOTTableOS2_V4, fsType),
+ sizeof(fsType),
+ &fsType)) {
+ SkOTUtils::SetAdvancedTypefaceFlags(fsType, info.get());
+ }
+
+ // If it's not a truetype font, mark it as 'other'. Assume that TrueType
+ // fonts always have both glyf and loca tables. At the least, this is what
+ // sfntly needs to subset the font. CTFontCopyAttribute() does not always
+ // succeed in determining this directly.
+ if (!this->getTableSize('glyf') || !this->getTableSize('loca')) {
+ return info;
+ }
+
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ CTFontSymbolicTraits symbolicTraits = CTFontGetSymbolicTraits(ctFont.get());
+ if (symbolicTraits & kCTFontMonoSpaceTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (symbolicTraits & kCTFontItalicTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ CTFontStylisticClass stylisticClass = symbolicTraits & kCTFontClassMaskTrait;
+ if (stylisticClass >= kCTFontOldStyleSerifsClass && stylisticClass <= kCTFontSlabSerifsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (stylisticClass & kCTFontScriptsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ info->fItalicAngle = (int16_t) CTFontGetSlantAngle(ctFont.get());
+ info->fAscent = (int16_t) CTFontGetAscent(ctFont.get());
+ info->fDescent = (int16_t) CTFontGetDescent(ctFont.get());
+ info->fCapHeight = (int16_t) CTFontGetCapHeight(ctFont.get());
+ CGRect bbox = CTFontGetBoundingBox(ctFont.get());
+
+ SkRect r;
+ r.setLTRB(CGToScalar(CGRectGetMinX_inline(bbox)), // Left
+ CGToScalar(CGRectGetMaxY_inline(bbox)), // Top
+ CGToScalar(CGRectGetMaxX_inline(bbox)), // Right
+ CGToScalar(CGRectGetMinY_inline(bbox))); // Bottom
+
+ r.roundOut(&(info->fBBox));
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t min_width = SHRT_MAX;
+ info->fStemV = 0;
+ static const UniChar stem_chars[] = {'i', 'I', '!', '1'};
+ const size_t count = sizeof(stem_chars) / sizeof(stem_chars[0]);
+ CGGlyph glyphs[count];
+ CGRect boundingRects[count];
+ if (CTFontGetGlyphsForCharacters(ctFont.get(), stem_chars, glyphs, count)) {
+ CTFontGetBoundingRectsForGlyphs(ctFont.get(), kCTFontOrientationHorizontal,
+ glyphs, boundingRects, count);
+ for (size_t i = 0; i < count; i++) {
+ int16_t width = (int16_t) boundingRects[i].size.width;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+ return info;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SK_SFNT_ULONG get_font_type_tag(CTFontRef ctFont) {
+ SkUniqueCFRef<CFNumberRef> fontFormatRef(
+ static_cast<CFNumberRef>(CTFontCopyAttribute(ctFont, kCTFontFormatAttribute)));
+ if (!fontFormatRef) {
+ return 0;
+ }
+
+ SInt32 fontFormatValue;
+ if (!CFNumberGetValue(fontFormatRef.get(), kCFNumberSInt32Type, &fontFormatValue)) {
+ return 0;
+ }
+
+ switch (fontFormatValue) {
+ case kCTFontFormatOpenTypePostScript:
+ return SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ case kCTFontFormatOpenTypeTrueType:
+ return SkSFNTHeader::fontType_WindowsTrueType::TAG;
+ case kCTFontFormatTrueType:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatPostScript:
+ return SkSFNTHeader::fontType_PostScript::TAG;
+ case kCTFontFormatBitmap:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatUnrecognized:
+ default:
+ return 0;
+ }
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Mac::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+
+ fInitStream([this]{
+ if (fStream) {
+ return;
+ }
+
+ SK_SFNT_ULONG fontType = get_font_type_tag(fFontRef.get());
+
+ // get table tags
+ int numTables = this->countTables();
+ SkTDArray<SkFontTableTag> tableTags;
+ tableTags.setCount(numTables);
+ this->getTableTags(tableTags.begin());
+
+ // CT seems to be unreliable in being able to obtain the type,
+ // even if all we want is the first four bytes of the font resource.
+ // Just the presence of the FontForge 'FFTM' table seems to throw it off.
+ if (fontType == 0) {
+ fontType = SkSFNTHeader::fontType_WindowsTrueType::TAG;
+
+ // see https://skbug.com/7630#c7
+ bool couldBeCFF = false;
+ constexpr SkFontTableTag CFFTag = SkSetFourByteTag('C', 'F', 'F', ' ');
+ constexpr SkFontTableTag CFF2Tag = SkSetFourByteTag('C', 'F', 'F', '2');
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (CFFTag == tableTags[tableIndex] || CFF2Tag == tableTags[tableIndex]) {
+ couldBeCFF = true;
+ }
+ }
+ if (couldBeCFF) {
+ fontType = SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ }
+ }
+
+ // Sometimes CoreGraphics incorrectly thinks a font is kCTFontFormatPostScript.
+ // It is exceedingly unlikely that this is the case, so double check
+ // (see https://crbug.com/809763 ).
+ if (fontType == SkSFNTHeader::fontType_PostScript::TAG) {
+ // see if there are any required 'typ1' tables (see Adobe Technical Note #5180)
+ bool couldBeTyp1 = false;
+ constexpr SkFontTableTag TYPE1Tag = SkSetFourByteTag('T', 'Y', 'P', '1');
+ constexpr SkFontTableTag CIDTag = SkSetFourByteTag('C', 'I', 'D', ' ');
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (TYPE1Tag == tableTags[tableIndex] || CIDTag == tableTags[tableIndex]) {
+ couldBeTyp1 = true;
+ }
+ }
+ if (!couldBeTyp1) {
+ fontType = SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ }
+ }
+
+ // get the table sizes and accumulate the total size of the font
+ SkTDArray<size_t> tableSizes;
+ size_t totalSize = sizeof(SkSFNTHeader) + sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = this->getTableSize(tableTags[tableIndex]);
+ totalSize += (tableSize + 3) & ~3;
+ *tableSizes.append() = tableSize;
+ }
+
+ // reserve memory for stream, and zero it (tables must be zero padded)
+ fStream.reset(new SkMemoryStream(totalSize));
+ char* dataStart = (char*)fStream->getMemoryBase();
+ sk_bzero(dataStart, totalSize);
+ char* dataPtr = dataStart;
+
+ // compute font header entries
+ uint16_t entrySelector = 0;
+ uint16_t searchRange = 1;
+ while (searchRange < numTables >> 1) {
+ entrySelector++;
+ searchRange <<= 1;
+ }
+ searchRange <<= 4;
+ uint16_t rangeShift = (numTables << 4) - searchRange;
+
+ // write font header
+ SkSFNTHeader* header = (SkSFNTHeader*)dataPtr;
+ header->fontType = fontType;
+ header->numTables = SkEndian_SwapBE16(numTables);
+ header->searchRange = SkEndian_SwapBE16(searchRange);
+ header->entrySelector = SkEndian_SwapBE16(entrySelector);
+ header->rangeShift = SkEndian_SwapBE16(rangeShift);
+ dataPtr += sizeof(SkSFNTHeader);
+
+ // write tables
+ SkSFNTHeader::TableDirectoryEntry* entry = (SkSFNTHeader::TableDirectoryEntry*)dataPtr;
+ dataPtr += sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = tableSizes[tableIndex];
+ this->getTableData(tableTags[tableIndex], 0, tableSize, dataPtr);
+ entry->tag = SkEndian_SwapBE32(tableTags[tableIndex]);
+ entry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum((SK_OT_ULONG*)dataPtr,
+ tableSize));
+ entry->offset = SkEndian_SwapBE32(SkToU32(dataPtr - dataStart));
+ entry->logicalLength = SkEndian_SwapBE32(SkToU32(tableSize));
+
+ dataPtr += (tableSize + 3) & ~3;
+ ++entry;
+ }
+ });
+ return fStream->duplicate();
+}
+
+struct NonDefaultAxesContext {
+ SkFixed* axisValue;
+ CFArrayRef cgAxes;
+};
+static void set_non_default_axes(CFTypeRef key, CFTypeRef value, void* context) {
+ NonDefaultAxesContext* self = static_cast<NonDefaultAxesContext*>(context);
+
+ if (CFGetTypeID(key) != CFStringGetTypeID() || CFGetTypeID(value) != CFNumberGetTypeID()) {
+ return;
+ }
+
+ // The key is a CFString which is a string from the 'name' table.
+ // Search the cgAxes for an axis with this name, and use its index to store the value.
+ CFIndex keyIndex = -1;
+ CFStringRef keyString = static_cast<CFStringRef>(key);
+ for (CFIndex i = 0; i < CFArrayGetCount(self->cgAxes); ++i) {
+ CFTypeRef cgAxis = CFArrayGetValueAtIndex(self->cgAxes, i);
+ if (CFGetTypeID(cgAxis) != CFDictionaryGetTypeID()) {
+ continue;
+ }
+
+ CFDictionaryRef cgAxisDict = static_cast<CFDictionaryRef>(cgAxis);
+ CFTypeRef cgAxisName = CFDictionaryGetValue(cgAxisDict, kCGFontVariationAxisName);
+ if (!cgAxisName || CFGetTypeID(cgAxisName) != CFStringGetTypeID()) {
+ continue;
+ }
+ CFStringRef cgAxisNameString = static_cast<CFStringRef>(cgAxisName);
+ if (CFStringCompare(keyString, cgAxisNameString, 0) == kCFCompareEqualTo) {
+ keyIndex = i;
+ break;
+ }
+ }
+ if (keyIndex == -1) {
+ return;
+ }
+
+ CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
+ double valueDouble;
+ if (!CFNumberGetValue(valueNumber, kCFNumberDoubleType, &valueDouble) ||
+ valueDouble < SkFixedToDouble(SK_FixedMin) || SkFixedToDouble(SK_FixedMax) < valueDouble)
+ {
+ return;
+ }
+ self->axisValue[keyIndex] = SkDoubleToFixed(valueDouble);
+}
+static bool get_variations(CTFontRef ctFont, CFIndex* cgAxisCount,
+ SkAutoSTMalloc<4, SkFixed>* axisValues)
+{
+ // In 10.10 and earlier, CTFontCopyVariationAxes and CTFontCopyVariation do not work when
+ // applied to fonts which started life with CGFontCreateWithDataProvider (they simply always
+ // return nullptr). As a result, we are limited to CGFontCopyVariationAxes and
+ // CGFontCopyVariations here until support for 10.10 and earlier is removed.
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(ctFont, nullptr));
+ if (!cgFont) {
+ return false;
+ }
+
+ SkUniqueCFRef<CFDictionaryRef> cgVariations(CGFontCopyVariations(cgFont.get()));
+ // If a font has no variations CGFontCopyVariations returns nullptr (instead of an empty dict).
+ if (!cgVariations) {
+ return false;
+ }
+
+ SkUniqueCFRef<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cgFont.get()));
+ if (!cgAxes) {
+ return false;
+ }
+ *cgAxisCount = CFArrayGetCount(cgAxes.get());
+ axisValues->reset(*cgAxisCount);
+
+ // Set all of the axes to their default values.
+ // Fail if any default value cannot be determined.
+ for (CFIndex i = 0; i < *cgAxisCount; ++i) {
+ CFTypeRef cgAxis = CFArrayGetValueAtIndex(cgAxes.get(), i);
+ if (CFGetTypeID(cgAxis) != CFDictionaryGetTypeID()) {
+ return false;
+ }
+
+ CFDictionaryRef cgAxisDict = static_cast<CFDictionaryRef>(cgAxis);
+ CFTypeRef axisDefaultValue = CFDictionaryGetValue(cgAxisDict,
+ kCGFontVariationAxisDefaultValue);
+ if (!axisDefaultValue || CFGetTypeID(axisDefaultValue) != CFNumberGetTypeID()) {
+ return false;
+ }
+ CFNumberRef axisDefaultValueNumber = static_cast<CFNumberRef>(axisDefaultValue);
+ double axisDefaultValueDouble;
+ if (!CFNumberGetValue(axisDefaultValueNumber, kCFNumberDoubleType, &axisDefaultValueDouble))
+ {
+ return false;
+ }
+ if (axisDefaultValueDouble < SkFixedToDouble(SK_FixedMin) ||
+ SkFixedToDouble(SK_FixedMax) < axisDefaultValueDouble)
+ {
+ return false;
+ }
+ (*axisValues)[(int)i] = SkDoubleToFixed(axisDefaultValueDouble);
+ }
+
+ // Override the default values with the given font's stated axis values.
+ NonDefaultAxesContext c = { axisValues->get(), cgAxes.get() };
+ CFDictionaryApplyFunction(cgVariations.get(), set_non_default_axes, &c);
+
+ return true;
+}
+std::unique_ptr<SkFontData> SkTypeface_Mac::onMakeFontData() const {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+
+ CFIndex cgAxisCount;
+ SkAutoSTMalloc<4, SkFixed> axisValues;
+ if (get_variations(fFontRef.get(), &cgAxisCount, &axisValues)) {
+ return skstd::make_unique<SkFontData>(std::move(stream), index,
+ axisValues.get(), cgAxisCount);
+ }
+ return skstd::make_unique<SkFontData>(std::move(stream), index, nullptr, 0);
+}
+
+/** Creates a CT variation dictionary {tag, value} from a CG variation dictionary {name, value}. */
+static SkUniqueCFRef<CFDictionaryRef> ct_variation_from_cg_variation(CFDictionaryRef cgVariations,
+ CFArrayRef ctAxes) {
+
+ SkUniqueCFRef<CFMutableDictionaryRef> ctVariations(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CFIndex axisCount = CFArrayGetCount(ctAxes);
+ for (CFIndex i = 0; i < axisCount; ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(ctAxes, i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ // The assumption is that values produced by kCTFontVariationAxisNameKey and
+ // kCGFontVariationAxisName will always be equal.
+ CFTypeRef axisName = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisNameKey);
+ if (!axisName || CFGetTypeID(axisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ CFTypeRef axisValue = CFDictionaryGetValue(cgVariations, axisName);
+ if (!axisValue || CFGetTypeID(axisValue) != CFNumberGetTypeID()) {
+ return nullptr;
+ }
+
+ CFTypeRef axisTag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!axisTag || CFGetTypeID(axisTag) != CFNumberGetTypeID()) {
+ return nullptr;
+ }
+
+ CFDictionaryAddValue(ctVariations.get(), axisTag, axisValue);
+ }
+ return ctVariations;
+}
+
+int SkTypeface_Mac::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ // The CGFont variation data does not contain the tag.
+
+ // CTFontCopyVariationAxes returns nullptr for CGFontCreateWithDataProvider fonts with
+ // macOS 10.10 and iOS 9 or earlier. When this happens, there is no API to provide the tag.
+ SkUniqueCFRef<CFArrayRef> ctAxes(CTFontCopyVariationAxes(fFontRef.get()));
+ if (!ctAxes) {
+ return -1;
+ }
+ CFIndex axisCount = CFArrayGetCount(ctAxes.get());
+ if (!coordinates || coordinateCount < axisCount) {
+ return axisCount;
+ }
+
+ // This call always returns nullptr on 10.11 and under for CGFontCreateWithDataProvider fonts.
+ // When this happens, try converting the CG variation to a CT variation.
+ // On 10.12 and later, this only returns non-default variations.
+ SkUniqueCFRef<CFDictionaryRef> ctVariations(CTFontCopyVariation(fFontRef.get()));
+ if (!ctVariations) {
+ // When 10.11 and earlier are no longer supported, the following code can be replaced with
+ // return -1 and ct_variation_from_cg_variation can be removed.
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(fFontRef.get(), nullptr));
+ if (!cgFont) {
+ return -1;
+ }
+ SkUniqueCFRef<CFDictionaryRef> cgVariations(CGFontCopyVariations(cgFont.get()));
+ if (!cgVariations) {
+ return -1;
+ }
+ ctVariations = ct_variation_from_cg_variation(cgVariations.get(), ctAxes.get());
+ if (!ctVariations) {
+ return -1;
+ }
+ }
+
+ for (int i = 0; i < axisCount; ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(ctAxes.get(), i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return -1;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ CFTypeRef tag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!tag || CFGetTypeID(tag) != CFNumberGetTypeID()) {
+ return -1;
+ }
+ CFNumberRef tagNumber = static_cast<CFNumberRef>(tag);
+ int64_t tagLong;
+ if (!CFNumberGetValue(tagNumber, kCFNumberSInt64Type, &tagLong)) {
+ return -1;
+ }
+ coordinates[i].axis = tagLong;
+
+ CGFloat variationCGFloat;
+ CFTypeRef variationValue = CFDictionaryGetValue(ctVariations.get(), tagNumber);
+ if (variationValue) {
+ if (CFGetTypeID(variationValue) != CFNumberGetTypeID()) {
+ return -1;
+ }
+ CFNumberRef variationNumber = static_cast<CFNumberRef>(variationValue);
+ if (!CFNumberGetValue(variationNumber, kCFNumberCGFloatType, &variationCGFloat)) {
+ return -1;
+ }
+ } else {
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisDefaultValueKey);
+ if (!def || CFGetTypeID(def) != CFNumberGetTypeID()) {
+ return -1;
+ }
+ CFNumberRef defNumber = static_cast<CFNumberRef>(def);
+ if (!CFNumberGetValue(defNumber, kCFNumberCGFloatType, &variationCGFloat)) {
+ return -1;
+ }
+ }
+ coordinates[i].value = CGToScalar(variationCGFloat);
+
+ }
+ return axisCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+int SkTypeface_Mac::onGetUPEM() const {
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(fFontRef.get(), nullptr));
+ return CGFontGetUnitsPerEm(cgFont.get());
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_Mac::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ CFStringRef cfLanguageRaw;
+ SkUniqueCFRef<CFStringRef> cfFamilyName(
+ CTFontCopyLocalizedName(fFontRef.get(), kCTFontFamilyNameKey, &cfLanguageRaw));
+ SkUniqueCFRef<CFStringRef> cfLanguage(cfLanguageRaw);
+
+ SkString skLanguage;
+ SkString skFamilyName;
+ if (cfLanguage) {
+ CFStringToSkString(cfLanguage.get(), &skLanguage);
+ } else {
+ skLanguage = "und"; //undetermined
+ }
+ if (cfFamilyName) {
+ CFStringToSkString(cfFamilyName.get(), &skFamilyName);
+ }
+
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(skFamilyName, skLanguage);
+ }
+ return nameIter.release();
+}
+
+int SkTypeface_Mac::onGetTableTags(SkFontTableTag tags[]) const {
+ SkUniqueCFRef<CFArrayRef> cfArray(
+ CTFontCopyAvailableTables(fFontRef.get(), kCTFontTableOptionNoOptions));
+ if (!cfArray) {
+ return 0;
+ }
+ int count = SkToInt(CFArrayGetCount(cfArray.get()));
+ if (tags) {
+ for (int i = 0; i < count; ++i) {
+ uintptr_t fontTag = reinterpret_cast<uintptr_t>(
+ CFArrayGetValueAtIndex(cfArray.get(), i));
+ tags[i] = static_cast<SkFontTableTag>(fontTag);
+ }
+ }
+ return count;
+}
+
+// If, as is the case with web fonts, the CTFont data isn't available,
+// the CGFont data may work. While the CGFont may always provide the
+// right result, leave the CTFont code path to minimize disruption.
+static SkUniqueCFRef<CFDataRef> copy_table_from_font(CTFontRef ctFont, SkFontTableTag tag) {
+ SkUniqueCFRef<CFDataRef> data(CTFontCopyTable(ctFont, (CTFontTableTag) tag,
+ kCTFontTableOptionNoOptions));
+ if (!data) {
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(ctFont, nullptr));
+ data.reset(CGFontCopyTableForTag(cgFont.get(), tag));
+ }
+ return data;
+}
+
+size_t SkTypeface_Mac::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* dstData) const {
+ SkUniqueCFRef<CFDataRef> srcData = copy_table_from_font(fFontRef.get(), tag);
+ if (!srcData) {
+ return 0;
+ }
+
+ size_t srcSize = CFDataGetLength(srcData.get());
+ if (offset >= srcSize) {
+ return 0;
+ }
+ if (length > srcSize - offset) {
+ length = srcSize - offset;
+ }
+ if (dstData) {
+ memcpy(dstData, CFDataGetBytePtr(srcData.get()) + offset, length);
+ }
+ return length;
+}
+
+sk_sp<SkData> SkTypeface_Mac::onCopyTableData(SkFontTableTag tag) const {
+ SkUniqueCFRef<CFDataRef> srcData = copy_table_from_font(fFontRef.get(), tag);
+ if (!srcData) {
+ return nullptr;
+ }
+ const UInt8* data = CFDataGetBytePtr(srcData.get());
+ CFIndex length = CFDataGetLength(srcData.get());
+ return SkData::MakeWithProc(data, length,
+ [](const void*, void* ctx) {
+ CFRelease((CFDataRef)ctx);
+ }, (void*)srcData.release());
+}
+
+SkScalerContext* SkTypeface_Mac::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkScalerContext_Mac(sk_ref_sp(const_cast<SkTypeface_Mac*>(this)), effects, desc);
+}
+
+void SkTypeface_Mac::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ // Render the glyphs as close as possible to what was requested.
+ // The above turns off subpixel rendering, but the user requested it.
+ // Normal hinting will cause the A8 masks to be generated from CoreGraphics subpixel masks.
+ // See comments below for more details.
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ const SmoothBehavior smoothBehavior = smooth_behavior();
+
+ // Only two levels of hinting are supported.
+ // kNo_Hinting means avoid CoreGraphics outline dilation (smoothing).
+ // kNormal_Hinting means CoreGraphics outline dilation (smoothing) is allowed.
+ if (rec->getHinting() != SkFontHinting::kNone) {
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+ // If smoothing has no effect, don't request it.
+ if (smoothBehavior == SmoothBehavior::none) {
+ rec->setHinting(SkFontHinting::kNone);
+ }
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ // Tracked by http://code.google.com/p/skia/issues/detail?id=915 .
+ // There is no current means to honor a request for unhinted lcd,
+ // so arbitrarilly ignore the hinting request and honor lcd.
+
+ // Hinting and smoothing should be orthogonal, but currently they are not.
+ // CoreGraphics has no API to influence hinting. However, its lcd smoothed
+ // output is drawn from auto-dilated outlines (the amount of which is
+ // determined by AppleFontSmoothing). Its regular anti-aliased output is
+ // drawn from un-dilated outlines.
+
+ // The behavior of Skia is as follows:
+ // [AA][no-hint]: generate AA using CoreGraphic's AA output.
+ // [AA][yes-hint]: use CoreGraphic's LCD output and reduce it to a single
+ // channel. This matches [LCD][yes-hint] in weight.
+ // [LCD][no-hint]: curently unable to honor, and must pick which to respect.
+ // Currenly side with LCD, effectively ignoring the hinting setting.
+ // [LCD][yes-hint]: generate LCD using CoreGraphic's LCD output.
+ if (rec->fMaskFormat == SkMask::kLCD16_Format) {
+ if (smoothBehavior == SmoothBehavior::subpixel) {
+ //CoreGraphics creates 555 masks for smoothed text anyway.
+ rec->fMaskFormat = SkMask::kLCD16_Format;
+ rec->setHinting(SkFontHinting::kNormal);
+ } else {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ if (smoothBehavior != SmoothBehavior::none) {
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+ }
+ }
+
+ // CoreText provides no information as to whether a glyph will be color or not.
+ // Fonts may mix outlines and bitmaps, so information is needed on a glyph by glyph basis.
+ // If a font contains an 'sbix' table, consider it to be a color font, and disable lcd.
+ if (fHasColorGlyphs) {
+ rec->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ // Smoothing will be used if the format is either LCD or if there is hinting.
+ // In those cases, we need to choose the proper dilation mask based on the color.
+ if (rec->fMaskFormat == SkMask::kLCD16_Format ||
+ (rec->fMaskFormat == SkMask::kA8_Format && rec->getHinting() != SkFontHinting::kNone)) {
+ SkColor color = rec->getLuminanceColor();
+ int r = SkColorGetR(color);
+ int g = SkColorGetG(color);
+ int b = SkColorGetB(color);
+ // Choose whether to draw using a light-on-dark mask based on observed
+ // color/luminance thresholds that CoreText uses.
+ if (r >= 85 && g >= 85 && b >= 85 && r + g + b >= 2 * 255) {
+ rec->fFlags |= SkScalerContext::kLightOnDark_Flag;
+ }
+ }
+
+ // Unhinted A8 masks (those not derived from LCD masks) must respect SK_GAMMA_APPLY_TO_A8.
+ // All other masks can use regular gamma.
+ if (SkMask::kA8_Format == rec->fMaskFormat && SkFontHinting::kNone == rec->getHinting()) {
+#ifndef SK_GAMMA_APPLY_TO_A8
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+#endif
+ } else {
+#ifndef SK_IGNORE_MAC_BLENDING_MATCH_FIX
+ SkColor color = rec->getLuminanceColor();
+ if (smoothBehavior == SmoothBehavior::some) {
+ // CoreGraphics smoothed text without subpixel coverage blitting goes from a gamma of
+ // 2.0 for black foreground to a gamma of 1.0 for white foreground. Emulate this
+ // through the mask gamma by reducing the color values to 1/2.
+ color = SkColorSetRGB(SkColorGetR(color) * 1/2,
+ SkColorGetG(color) * 1/2,
+ SkColorGetB(color) * 1/2);
+ } else if (smoothBehavior == SmoothBehavior::subpixel) {
+ // CoreGraphics smoothed text with subpixel coverage blitting goes from a gamma of
+ // 2.0 for black foreground to a gamma of ~1.4? for white foreground. Emulate this
+ // through the mask gamma by reducing the color values to 3/4.
+ color = SkColorSetRGB(SkColorGetR(color) * 3/4,
+ SkColorGetG(color) * 3/4,
+ SkColorGetB(color) * 3/4);
+ }
+ rec->setLuminanceColor(color);
+#endif
+ // CoreGraphics dialates smoothed text to provide contrast.
+ rec->setContrast(0);
+ }
+}
+
+/** Takes ownership of the CFStringRef. */
+static const char* get_str(CFStringRef ref, SkString* str) {
+ if (nullptr == ref) {
+ return nullptr;
+ }
+ CFStringToSkString(ref, str);
+ CFRelease(ref);
+ return str->c_str();
+}
+
+void SkTypeface_Mac::onGetFamilyName(SkString* familyName) const {
+ get_str(CTFontCopyFamilyName(fFontRef.get()), familyName);
+}
+
+void SkTypeface_Mac::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString tmpStr;
+
+ desc->setFamilyName(get_str(CTFontCopyFamilyName(fFontRef.get()), &tmpStr));
+ desc->setFullName(get_str(CTFontCopyFullName(fFontRef.get()), &tmpStr));
+ desc->setPostscriptName(get_str(CTFontCopyPostScriptName(fFontRef.get()), &tmpStr));
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = fIsFromStream;
+}
+
+void SkTypeface_Mac::onCharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ // Undocumented behavior of CTFontGetGlyphsForCharacters with non-bmp code points:
+ // When a surrogate pair is detected, the glyph index used is the index of the high surrogate.
+ // It is documented that if a mapping is unavailable, the glyph will be set to 0.
+
+ SkAutoSTMalloc<1024, UniChar> charStorage;
+ const UniChar* src; // UniChar is a UTF-16 16-bit code unit.
+ int srcCount;
+ const SkUnichar* utf32 = reinterpret_cast<const SkUnichar*>(uni);
+ UniChar* utf16 = charStorage.reset(2 * count);
+ src = utf16;
+ for (int i = 0; i < count; ++i) {
+ utf16 += SkUTF::ToUTF16(utf32[i], utf16);
+ }
+ srcCount = SkToInt(utf16 - src);
+
+ // If there are any non-bmp code points, the provided 'glyphs' storage will be inadequate.
+ SkAutoSTMalloc<1024, uint16_t> glyphStorage;
+ uint16_t* macGlyphs = glyphs;
+ if (srcCount > count) {
+ macGlyphs = glyphStorage.reset(srcCount);
+ }
+
+ CTFontGetGlyphsForCharacters(fFontRef.get(), src, macGlyphs, srcCount);
+
+ // If there were any non-bmp, then copy and compact.
+ // If all are bmp, 'glyphs' already contains the compact glyphs.
+ // If some are non-bmp, copy and compact into 'glyphs'.
+ if (srcCount > count) {
+ SkASSERT(glyphs != macGlyphs);
+ int extra = 0;
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = macGlyphs[i + extra];
+ if (SkUTF16_IsLeadingSurrogate(src[i + extra])) {
+ ++extra;
+ }
+ }
+ } else {
+ SkASSERT(glyphs == macGlyphs);
+ }
+}
+
+int SkTypeface_Mac::onCountGlyphs() const {
+ return SkToInt(CTFontGetGlyphCount(fFontRef.get()));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_desc_str(CTFontDescriptorRef desc, CFStringRef name, SkString* value) {
+ SkUniqueCFRef<CFStringRef> ref((CFStringRef)CTFontDescriptorCopyAttribute(desc, name));
+ if (!ref) {
+ return false;
+ }
+ CFStringToSkString(ref.get(), value);
+ return true;
+}
+
+#include "include/core/SkFontMgr.h"
+
+static inline int sqr(int value) {
+ SkASSERT(SkAbs32(value) < 0x7FFF); // check for overflow
+ return value * value;
+}
+
+// We normalize each axis (weight, width, italic) to be base-900
+static int compute_metric(const SkFontStyle& a, const SkFontStyle& b) {
+ return sqr(a.weight() - b.weight()) +
+ sqr((a.width() - b.width()) * 100) +
+ sqr((a.slant() != b.slant()) * 900);
+}
+
+class SkFontStyleSet_Mac : public SkFontStyleSet {
+public:
+ SkFontStyleSet_Mac(CTFontDescriptorRef desc)
+ : fArray(CTFontDescriptorCreateMatchingFontDescriptors(desc, nullptr))
+ , fCount(0)
+ {
+ if (!fArray) {
+ fArray.reset(CFArrayCreate(nullptr, nullptr, 0, nullptr));
+ }
+ fCount = SkToInt(CFArrayGetCount(fArray.get()));
+ }
+
+ int count() override {
+ return fCount;
+ }
+
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), index);
+ if (style) {
+ *style = fontstyle_from_descriptor(desc, false);
+ }
+ if (name) {
+ if (!find_desc_str(desc, kCTFontStyleNameAttribute, name)) {
+ name->reset();
+ }
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT((unsigned)index < (unsigned)CFArrayGetCount(fArray.get()));
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), index);
+
+ return create_from_desc(desc).release();
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (0 == fCount) {
+ return nullptr;
+ }
+ return create_from_desc(findMatchingDesc(pattern)).release();
+ }
+
+private:
+ SkUniqueCFRef<CFArrayRef> fArray;
+ int fCount;
+
+ CTFontDescriptorRef findMatchingDesc(const SkFontStyle& pattern) const {
+ int bestMetric = SK_MaxS32;
+ CTFontDescriptorRef bestDesc = nullptr;
+
+ for (int i = 0; i < fCount; ++i) {
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), i);
+ int metric = compute_metric(pattern, fontstyle_from_descriptor(desc, false));
+ if (0 == metric) {
+ return desc;
+ }
+ if (metric < bestMetric) {
+ bestMetric = metric;
+ bestDesc = desc;
+ }
+ }
+ SkASSERT(bestDesc);
+ return bestDesc;
+ }
+};
+
+class SkFontMgr_Mac : public SkFontMgr {
+ SkUniqueCFRef<CFArrayRef> fNames;
+ int fCount;
+
+ CFStringRef getFamilyNameAt(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return (CFStringRef)CFArrayGetValueAtIndex(fNames.get(), index);
+ }
+
+ static SkFontStyleSet* CreateSet(CFStringRef cfFamilyName) {
+ SkUniqueCFRef<CFMutableDictionaryRef> cfAttr(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CFDictionaryAddValue(cfAttr.get(), kCTFontFamilyNameAttribute, cfFamilyName);
+
+ SkUniqueCFRef<CTFontDescriptorRef> desc(
+ CTFontDescriptorCreateWithAttributes(cfAttr.get()));
+ return new SkFontStyleSet_Mac(desc.get());
+ }
+
+ /** CTFontManagerCopyAvailableFontFamilyNames() is not always available, so we
+ * provide a wrapper here that will return an empty array if need be.
+ */
+ static SkUniqueCFRef<CFArrayRef> CopyAvailableFontFamilyNames() {
+#ifdef SK_BUILD_FOR_IOS
+ return SkUniqueCFRef<CFArrayRef>(CFArrayCreate(nullptr, nullptr, 0, nullptr));
+#else
+ return SkUniqueCFRef<CFArrayRef>(CTFontManagerCopyAvailableFontFamilyNames());
+#endif
+ }
+
+public:
+ SkFontMgr_Mac()
+ : fNames(CopyAvailableFontFamilyNames())
+ , fCount(fNames ? SkToInt(CFArrayGetCount(fNames.get())) : 0) {}
+
+protected:
+ int onCountFamilies() const override {
+ return fCount;
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if ((unsigned)index < (unsigned)fCount) {
+ CFStringToSkString(this->getFamilyNameAt(index), familyName);
+ } else {
+ familyName->reset();
+ }
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if ((unsigned)index >= (unsigned)fCount) {
+ return nullptr;
+ }
+ return CreateSet(this->getFamilyNameAt(index));
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CFStringRef> cfName = make_CFString(familyName);
+ return CreateSet(cfName.get());
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override {
+ SkUniqueCFRef<CTFontDescriptorRef> desc = create_descriptor(familyName, style);
+ return create_from_desc(desc.get()).release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ SkUniqueCFRef<CTFontDescriptorRef> desc = create_descriptor(familyName, style);
+ SkUniqueCFRef<CTFontRef> familyFont(CTFontCreateWithFontDescriptor(desc.get(), 0, nullptr));
+
+ // kCFStringEncodingUTF32 is BE unless there is a BOM.
+ // Since there is no machine endian option, explicitly state machine endian.
+#ifdef SK_CPU_LENDIAN
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32LE;
+#else
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32BE;
+#endif
+ SkUniqueCFRef<CFStringRef> string(CFStringCreateWithBytes(
+ kCFAllocatorDefault, reinterpret_cast<const UInt8 *>(&character), sizeof(character),
+ encoding, false));
+ CFRange range = CFRangeMake(0, CFStringGetLength(string.get())); // in UniChar units.
+ SkUniqueCFRef<CTFontRef> fallbackFont(
+ CTFontCreateForString(familyFont.get(), string.get(), range));
+ return create_from_CTFontRef(std::move(fallbackFont), nullptr, nullptr).release();
+ }
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle&) const override {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ SkUniqueCFRef<CGDataProviderRef> pr(SkCreateDataProviderFromData(data));
+ if (!pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(std::move(pr), SkMemoryStream::Make(std::move(data)),
+ ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ SkUniqueCFRef<CGDataProviderRef> pr(SkCreateDataProviderFromStream(stream->duplicate()));
+ if (!pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(std::move(pr), std::move(stream), ttcIndex);
+ }
+
+ /** Creates a dictionary suitable for setting the axes on a CGFont. */
+ static SkUniqueCFRef<CFDictionaryRef> copy_axes(CGFontRef cg, const SkFontArguments& args) {
+ // The CGFont variation data is keyed by name, but lacks the tag.
+ // The CTFont variation data is keyed by tag, and also has the name.
+ // We would like to work with CTFont variations, but creating a CTFont font with
+ // CTFont variation dictionary runs into bugs. So use the CTFont variation data
+ // to match names to tags to create the appropriate CGFont.
+ SkUniqueCFRef<CTFontRef> ct(CTFontCreateWithGraphicsFont(cg, 0, nullptr, nullptr));
+ // CTFontCopyVariationAxes returns nullptr for CGFontCreateWithDataProvider fonts with
+ // macOS 10.10 and iOS 9 or earlier. When this happens, there is no API to provide the tag.
+ SkUniqueCFRef<CFArrayRef> ctAxes(CTFontCopyVariationAxes(ct.get()));
+ if (!ctAxes) {
+ return nullptr;
+ }
+ CFIndex axisCount = CFArrayGetCount(ctAxes.get());
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+
+ SkUniqueCFRef<CFMutableDictionaryRef> dict(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, axisCount,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ for (int i = 0; i < axisCount; ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(ctAxes.get(), i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ // The assumption is that values produced by kCTFontVariationAxisNameKey and
+ // kCGFontVariationAxisName will always be equal.
+ // If they are ever not, seach the project history for "get_tag_for_name".
+ CFTypeRef axisName = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisNameKey);
+ if (!axisName || CFGetTypeID(axisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ CFTypeRef tag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!tag || CFGetTypeID(tag) != CFNumberGetTypeID()) {
+ return nullptr;
+ }
+ CFNumberRef tagNumber = static_cast<CFNumberRef>(tag);
+ int64_t tagLong;
+ if (!CFNumberGetValue(tagNumber, kCFNumberSInt64Type, &tagLong)) {
+ return nullptr;
+ }
+
+ // The variation axes can be set to any value, but cg will effectively pin them.
+ // Pin them here to normalize.
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMinimumValueKey);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMaximumValueKey);
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisDefaultValueKey);
+ if (!min || CFGetTypeID(min) != CFNumberGetTypeID() ||
+ !max || CFGetTypeID(max) != CFNumberGetTypeID() ||
+ !def || CFGetTypeID(def) != CFNumberGetTypeID())
+ {
+ return nullptr;
+ }
+ CFNumberRef minNumber = static_cast<CFNumberRef>(min);
+ CFNumberRef maxNumber = static_cast<CFNumberRef>(max);
+ CFNumberRef defNumber = static_cast<CFNumberRef>(def);
+ double minDouble;
+ double maxDouble;
+ double defDouble;
+ if (!CFNumberGetValue(minNumber, kCFNumberDoubleType, &minDouble) ||
+ !CFNumberGetValue(maxNumber, kCFNumberDoubleType, &maxDouble) ||
+ !CFNumberGetValue(defNumber, kCFNumberDoubleType, &defDouble))
+ {
+ return nullptr;
+ }
+
+ double value = defDouble;
+ // The position may be over specified. If there are multiple values for a given axis,
+ // use the last one since that's what css-fonts-4 requires.
+ for (int j = position.coordinateCount; j --> 0;) {
+ if (position.coordinates[j].axis == tagLong) {
+ value = SkTPin(SkScalarToDouble(position.coordinates[j].value),
+ minDouble, maxDouble);
+ break;
+ }
+ }
+ SkUniqueCFRef<CFNumberRef> valueNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &value));
+ CFDictionaryAddValue(dict.get(), axisName, valueNumber.get());
+ }
+ return dict;
+ }
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> s,
+ const SkFontArguments& args) const override {
+ if (args.getCollectionIndex() != 0) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CGDataProviderRef> provider(SkCreateDataProviderFromStream(s->duplicate()));
+ if (!provider) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CGFontRef> cg(CGFontCreateWithDataProvider(provider.get()));
+ if (!cg) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFDictionaryRef> cgVariations = copy_axes(cg.get(), args);
+ // The CGFontRef returned by CGFontCreateCopyWithVariations when the passed CGFontRef was
+ // created from a data provider does not appear to have any ownership of the underlying
+ // data. The original CGFontRef must be kept alive until the copy will no longer be used.
+ SkUniqueCFRef<CGFontRef> cgVariant;
+ if (cgVariations) {
+ cgVariant.reset(CGFontCreateCopyWithVariations(cg.get(), cgVariations.get()));
+ } else {
+ cgVariant.reset(cg.release());
+ }
+
+ SkUniqueCFRef<CTFontRef> ct(
+ CTFontCreateWithGraphicsFont(cgVariant.get(), 0, nullptr, nullptr));
+ if (!ct) {
+ return nullptr;
+ }
+ return create_from_CTFontRef(std::move(ct), std::move(cg), std::move(s));
+ }
+
+ /** Creates a dictionary suitable for setting the axes on a CGFont. */
+ static SkUniqueCFRef<CFDictionaryRef> copy_axes(CGFontRef cg, SkFontData* fontData) {
+ SkUniqueCFRef<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cg));
+ if (!cgAxes) {
+ return nullptr;
+ }
+
+ CFIndex axisCount = CFArrayGetCount(cgAxes.get());
+ if (0 == axisCount || axisCount != fontData->getAxisCount()) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFMutableDictionaryRef> dict(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, axisCount,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ for (int i = 0; i < fontData->getAxisCount(); ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(cgAxes.get(), i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ CFTypeRef axisName = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisName);
+ if (!axisName || CFGetTypeID(axisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ // The variation axes can be set to any value, but cg will effectively pin them.
+ // Pin them here to normalize.
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMinValue);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMaxValue);
+ if (!min || CFGetTypeID(min) != CFNumberGetTypeID() ||
+ !max || CFGetTypeID(max) != CFNumberGetTypeID())
+ {
+ return nullptr;
+ }
+ CFNumberRef minNumber = static_cast<CFNumberRef>(min);
+ CFNumberRef maxNumber = static_cast<CFNumberRef>(max);
+ double minDouble;
+ double maxDouble;
+ if (!CFNumberGetValue(minNumber, kCFNumberDoubleType, &minDouble) ||
+ !CFNumberGetValue(maxNumber, kCFNumberDoubleType, &maxDouble))
+ {
+ return nullptr;
+ }
+ double value = SkTPin(SkFixedToDouble(fontData->getAxis()[i]), minDouble, maxDouble);
+ SkUniqueCFRef<CFNumberRef> valueNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &value));
+ CFDictionaryAddValue(dict.get(), axisName, valueNumber.get());
+ }
+ return dict;
+ }
+ sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData> fontData) const override {
+ if (fontData->getIndex() != 0) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CGDataProviderRef> provider(
+ SkCreateDataProviderFromStream(fontData->getStream()->duplicate()));
+ if (!provider) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CGFontRef> cg(CGFontCreateWithDataProvider(provider.get()));
+ if (!cg) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFDictionaryRef> cgVariations = copy_axes(cg.get(), fontData.get());
+ // The CGFontRef returned by CGFontCreateCopyWithVariations when the passed CGFontRef was
+ // created from a data provider does not appear to have any ownership of the underlying
+ // data. The original CGFontRef must be kept alive until the copy will no longer be used.
+ SkUniqueCFRef<CGFontRef> cgVariant;
+ if (cgVariations) {
+ cgVariant.reset(CGFontCreateCopyWithVariations(cg.get(), cgVariations.get()));
+ } else {
+ cgVariant.reset(cg.release());
+ }
+
+ SkUniqueCFRef<CTFontRef> ct(
+ CTFontCreateWithGraphicsFont(cgVariant.get(), 0, nullptr, nullptr));
+ if (!ct) {
+ return nullptr;
+ }
+ return create_from_CTFontRef(std::move(ct), std::move(cg), fontData->detachStream());
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ SkUniqueCFRef<CGDataProviderRef> pr(CGDataProviderCreateWithFilename(path));
+ if (!pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(std::move(pr), SkFILEStream::Make(path), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ familyName = map_css_names(familyName);
+ }
+
+ sk_sp<SkTypeface> face = create_from_name(familyName, style);
+ if (face) {
+ return face;
+ }
+
+ static SkTypeface* gDefaultFace;
+ static SkOnce lookupDefault;
+ static const char FONT_DEFAULT_NAME[] = "Lucida Sans";
+ lookupDefault([]{
+ gDefaultFace = create_from_name(FONT_DEFAULT_NAME, SkFontStyle()).release();
+ });
+ return sk_ref_sp(gDefaultFace);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() { return sk_make_sp<SkFontMgr_Mac>(); }
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
new file mode 100644
index 0000000000..2090727ae2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
@@ -0,0 +1,2287 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkData.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/ports/SkTypeface_win.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkOnce.h"
+#include "include/private/SkTemplates.h"
+#include "include/private/SkTo.h"
+#include "include/utils/SkBase64.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkLeanWindows.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/core/SkUtils.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTTable_maxp.h"
+#include "src/sfnt/SkOTTable_name.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/utils/SkMatrix22.h"
+#include "src/utils/win/SkHRESULT.h"
+
+#include <tchar.h>
+#include <usp10.h>
+#include <objbase.h>
+
+static void (*gEnsureLOGFONTAccessibleProc)(const LOGFONT&);
+
+void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*proc)(const LOGFONT&)) {
+ gEnsureLOGFONTAccessibleProc = proc;
+}
+
+static void call_ensure_accessible(const LOGFONT& lf) {
+ if (gEnsureLOGFONTAccessibleProc) {
+ gEnsureLOGFONTAccessibleProc(lf);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// always packed xxRRGGBB
+typedef uint32_t SkGdiRGB;
+
+// define this in your Makefile or .gyp to enforce AA requests
+// which GDI ignores at small sizes. This flag guarantees AA
+// for rotated text, regardless of GDI's notions.
+//#define SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+static bool needToRenderWithSkia(const SkScalerContextRec& rec) {
+#ifdef SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+ // What we really want to catch is when GDI will ignore the AA request and give
+ // us BW instead. Smallish rotated text is one heuristic, so this code is just
+ // an approximation. We shouldn't need to do this for larger sizes, but at those
+ // sizes, the quality difference gets less and less between our general
+ // scanconverter and GDI's.
+ if (SkMask::kA8_Format == rec.fMaskFormat && !isAxisAligned(rec)) {
+ return true;
+ }
+#endif
+ return rec.getHinting() == SkFontHinting::kNone || rec.getHinting() == SkFontHinting::kSlight;
+}
+
+static void tchar_to_skstring(const TCHAR t[], SkString* s) {
+#ifdef UNICODE
+ size_t sSize = WideCharToMultiByte(CP_UTF8, 0, t, -1, nullptr, 0, nullptr, nullptr);
+ s->resize(sSize);
+ WideCharToMultiByte(CP_UTF8, 0, t, -1, s->writable_str(), sSize, nullptr, nullptr);
+#else
+ s->set(t);
+#endif
+}
+
+static void dcfontname_to_skstring(HDC deviceContext, const LOGFONT& lf, SkString* familyName) {
+ int fontNameLen; //length of fontName in TCHARS.
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ call_ensure_accessible(lf);
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ fontNameLen = 0;
+ }
+ }
+
+ SkAutoSTArray<LF_FULLFACESIZE, TCHAR> fontName(fontNameLen+1);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ fontName[0] = 0;
+ }
+ }
+
+ tchar_to_skstring(fontName.get(), familyName);
+}
+
+static void make_canonical(LOGFONT* lf) {
+ lf->lfHeight = -64;
+ lf->lfWidth = 0; // lfWidth is related to lfHeight, not to the OS/2::usWidthClass.
+ lf->lfQuality = CLEARTYPE_QUALITY;//PROOF_QUALITY;
+ lf->lfCharSet = DEFAULT_CHARSET;
+// lf->lfClipPrecision = 64;
+}
+
+static SkFontStyle get_style(const LOGFONT& lf) {
+ return SkFontStyle(lf.lfWeight,
+ SkFontStyle::kNormal_Width,
+ lf.lfItalic ? SkFontStyle::kItalic_Slant : SkFontStyle::kUpright_Slant);
+}
+
+static inline FIXED SkFixedToFIXED(SkFixed x) {
+ return *(FIXED*)(&x);
+}
+static inline SkFixed SkFIXEDToFixed(FIXED x) {
+ return *(SkFixed*)(&x);
+}
+
+static inline FIXED SkScalarToFIXED(SkScalar x) {
+ return SkFixedToFIXED(SkScalarToFixed(x));
+}
+
+static inline SkScalar SkFIXEDToScalar(FIXED x) {
+ return SkFixedToScalar(SkFIXEDToFixed(x));
+}
+
+static unsigned calculateGlyphCount(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmLastChar;
+ }
+
+ // The 'maxp' table stores the number of glyphs at offset 4, in 2 bytes.
+ uint16_t glyphs;
+ if (GDI_ERROR != GetFontData(hdc, SkOTTableMaximumProfile::TAG, 4, &glyphs, sizeof(glyphs))) {
+ return SkEndian_SwapBE16(glyphs);
+ }
+
+ // Binary search for glyph count.
+ static const MAT2 mat2 = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+ int32_t max = UINT16_MAX + 1;
+ int32_t min = 0;
+ GLYPHMETRICS gm;
+ while (min < max) {
+ int32_t mid = min + ((max - min) / 2);
+ if (GetGlyphOutlineW(hdc, mid, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0,
+ nullptr, &mat2) == GDI_ERROR) {
+ max = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ SkASSERT(min == max);
+ return min;
+}
+
+static unsigned calculateUPEM(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmMaxCharWidth;
+ }
+
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+
+ return (0 == otmRet) ? 0 : otm.otmEMSquare;
+}
+
+class SkAutoHDC {
+public:
+ explicit SkAutoHDC(const LOGFONT& lf)
+ : fHdc(::CreateCompatibleDC(nullptr))
+ , fFont(::CreateFontIndirect(&lf))
+ , fSavefont((HFONT)::SelectObject(fHdc, fFont))
+ { }
+ ~SkAutoHDC() {
+ if (fHdc) {
+ ::SelectObject(fHdc, fSavefont);
+ ::DeleteDC(fHdc);
+ }
+ if (fFont) {
+ ::DeleteObject(fFont);
+ }
+ }
+ operator HDC() { return fHdc; }
+private:
+ HDC fHdc;
+ HFONT fFont;
+ HFONT fSavefont;
+};
+#define SkAutoHDC(...) SK_REQUIRE_LOCAL_VAR(SkAutoHDC)
+
+class LogFontTypeface : public SkTypeface {
+public:
+ LogFontTypeface(const SkFontStyle& style, const LOGFONT& lf, bool serializeAsStream)
+ : SkTypeface(style, false)
+ , fLogFont(lf)
+ , fSerializeAsStream(serializeAsStream)
+ {
+ SkAutoHDC hdc(fLogFont);
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+
+ // The fixed pitch bit is set if the font is *not* fixed pitch.
+ this->setIsFixedPitch((textMetric.tmPitchAndFamily & TMPF_FIXED_PITCH) == 0);
+ this->setFontStyle(SkFontStyle(textMetric.tmWeight, style.width(), style.slant()));
+
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript (cubic) fonts.
+ // If the font has cubic outlines, it will not be rendered with ClearType.
+ fCanBeLCD = !((textMetric.tmPitchAndFamily & TMPF_VECTOR) &&
+ (textMetric.tmPitchAndFamily & TMPF_DEVICE));
+ }
+
+ LOGFONT fLogFont;
+ bool fSerializeAsStream;
+ bool fCanBeLCD;
+
+ static sk_sp<LogFontTypeface> Make(const LOGFONT& lf) {
+ return sk_sp<LogFontTypeface>(new LogFontTypeface(get_style(lf), lf, false));
+ }
+
+ static void EnsureAccessible(const SkTypeface* face) {
+ call_ensure_accessible(static_cast<const LogFontTypeface*>(face)->fLogFont);
+ }
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return -1;
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return -1;
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+};
+
+class FontMemResourceTypeface : public LogFontTypeface {
+public:
+ /**
+ * The created FontMemResourceTypeface takes ownership of fontMemResource.
+ */
+ static sk_sp<FontMemResourceTypeface> Make(const LOGFONT& lf, HANDLE fontMemResource) {
+ return sk_sp<FontMemResourceTypeface>(
+ new FontMemResourceTypeface(get_style(lf), lf, fontMemResource));
+ }
+
+protected:
+ void weak_dispose() const override {
+ RemoveFontMemResourceEx(fFontMemResource);
+ INHERITED::weak_dispose();
+ }
+
+private:
+ /**
+ * Takes ownership of fontMemResource.
+ */
+ FontMemResourceTypeface(const SkFontStyle& style, const LOGFONT& lf, HANDLE fontMemResource)
+ : LogFontTypeface(style, lf, true), fFontMemResource(fontMemResource)
+ { }
+
+ HANDLE fFontMemResource;
+
+ typedef LogFontTypeface INHERITED;
+};
+
+static const LOGFONT& get_default_font() {
+ static LOGFONT gDefaultFont;
+ return gDefaultFont;
+}
+
+static bool FindByLogFont(SkTypeface* face, void* ctx) {
+ LogFontTypeface* lface = static_cast<LogFontTypeface*>(face);
+ const LOGFONT* lf = reinterpret_cast<const LOGFONT*>(ctx);
+
+ return !memcmp(&lface->fLogFont, lf, sizeof(LOGFONT));
+}
+
+/**
+ * This guy is public. It first searches the cache, and if a match is not found,
+ * it creates a new face.
+ */
+SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT& origLF) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ sk_sp<SkTypeface> face = SkTypefaceCache::FindByProcAndRef(FindByLogFont, &lf);
+ if (!face) {
+ face = LogFontTypeface::Make(lf);
+ SkTypefaceCache::Add(face);
+ }
+ return face.release();
+}
+
+/***
+ * This guy is public.
+ */
+SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ int aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel)
+{
+ return DWriteFontTypeface::Create(aFactory, aFontFace, aStyle,
+ (DWRITE_RENDERING_MODE)aRenderingMode,
+ aGamma, aContrast, aClearTypeLevel);
+}
+
+/**
+ * The created SkTypeface takes ownership of fontMemResource.
+ */
+sk_sp<SkTypeface> SkCreateFontMemResourceTypefaceFromLOGFONT(const LOGFONT& origLF, HANDLE fontMemResource) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ // We'll never get a cache hit, so no point in putting this in SkTypefaceCache.
+ return FontMemResourceTypeface::Make(lf, fontMemResource);
+}
+
+/**
+ * This guy is public
+ */
+void SkLOGFONTFromTypeface(const SkTypeface* face, LOGFONT* lf) {
+ if (nullptr == face) {
+ *lf = get_default_font();
+ } else {
+ *lf = static_cast<const LogFontTypeface*>(face)->fLogFont;
+ }
+}
+
+// Construct Glyph to Unicode table.
+// Unicode code points that require conjugate pairs in utf16 are not
+// supported.
+// TODO(arthurhsu): Add support for conjugate pairs. It looks like that may
+// require parsing the TTF cmap table (platform 4, encoding 12) directly instead
+// of calling GetFontUnicodeRange().
+static void populate_glyph_to_unicode(HDC fontHdc, const unsigned glyphCount,
+ SkUnichar* glyphToUnicode) {
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * glyphCount);
+ DWORD glyphSetBufferSize = GetFontUnicodeRanges(fontHdc, nullptr);
+ if (!glyphSetBufferSize) {
+ return;
+ }
+
+ std::unique_ptr<BYTE[]> glyphSetBuffer(new BYTE[glyphSetBufferSize]);
+ GLYPHSET* glyphSet =
+ reinterpret_cast<LPGLYPHSET>(glyphSetBuffer.get());
+ if (GetFontUnicodeRanges(fontHdc, glyphSet) != glyphSetBufferSize) {
+ return;
+ }
+
+ for (DWORD i = 0; i < glyphSet->cRanges; ++i) {
+ // There is no guarantee that within a Unicode range, the corresponding
+ // glyph id in a font file are continuous. So, even if we have ranges,
+ // we can't just use the first and last entry of the range to compute
+ // result. We need to enumerate them one by one.
+ int count = glyphSet->ranges[i].cGlyphs;
+ SkAutoTArray<WCHAR> chars(count + 1);
+ chars[count] = 0; // termintate string
+ SkAutoTArray<WORD> glyph(count);
+ for (USHORT j = 0; j < count; ++j) {
+ chars[j] = glyphSet->ranges[i].wcLow + j;
+ }
+ GetGlyphIndicesW(fontHdc, chars.get(), count, glyph.get(),
+ GGI_MARK_NONEXISTING_GLYPHS);
+ // If the glyph ID is valid, and the glyph is not mapped, then we will
+ // fill in the char id into the vector. If the glyph is mapped already,
+ // skip it.
+ // TODO(arthurhsu): better improve this. e.g. Get all used char ids from
+ // font cache, then generate this mapping table from there. It's
+ // unlikely to have collisions since glyph reuse happens mostly for
+ // different Unicode pages.
+ for (USHORT j = 0; j < count; ++j) {
+ if (glyph[j] != 0xFFFF && glyph[j] < glyphCount && glyphToUnicode[glyph[j]] == 0) {
+ glyphToUnicode[glyph[j]] = chars[j];
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+static int alignTo32(int n) {
+ return (n + 31) & ~31;
+}
+
+struct MyBitmapInfo : public BITMAPINFO {
+ RGBQUAD fMoreSpaceForColors[1];
+};
+
+class HDCOffscreen {
+public:
+ HDCOffscreen() = default;
+
+ ~HDCOffscreen() {
+ if (fDC) {
+ ::SelectObject(fDC, fSavefont);
+ ::DeleteDC(fDC);
+ }
+ if (fBM) {
+ DeleteObject(fBM);
+ }
+ }
+
+ void init(HFONT font, const XFORM& xform) {
+ fFont = font;
+ fXform = xform;
+ }
+
+ const void* draw(const SkGlyph&, bool isBW, size_t* srcRBPtr);
+
+private:
+ HDC fDC{0};
+ HFONT fSavefont{0};
+ HBITMAP fBM{0};
+ HFONT fFont{0};
+ XFORM fXform{1, 0, 0, 1, 0, 0};
+ void* fBits{nullptr}; // points into fBM
+ int fWidth{0};
+ int fHeight{0};
+ bool fIsBW{false};
+};
+
+const void* HDCOffscreen::draw(const SkGlyph& glyph, bool isBW,
+ size_t* srcRBPtr) {
+ // Can we share the scalercontext's fDDC, so we don't need to create
+ // a separate fDC here?
+ if (0 == fDC) {
+ fDC = CreateCompatibleDC(0);
+ if (0 == fDC) {
+ return nullptr;
+ }
+ SetGraphicsMode(fDC, GM_ADVANCED);
+ SetBkMode(fDC, TRANSPARENT);
+ SetTextAlign(fDC, TA_LEFT | TA_BASELINE);
+ fSavefont = (HFONT)SelectObject(fDC, fFont);
+
+ COLORREF color = 0x00FFFFFF;
+ SkDEBUGCODE(COLORREF prev =) SetTextColor(fDC, color);
+ SkASSERT(prev != CLR_INVALID);
+ }
+
+ if (fBM && (fIsBW != isBW || fWidth < glyph.width() || fHeight < glyph.height())) {
+ DeleteObject(fBM);
+ fBM = 0;
+ }
+ fIsBW = isBW;
+
+ fWidth = SkMax32(fWidth, glyph.width());
+ fHeight = SkMax32(fHeight, glyph.height());
+
+ int biWidth = isBW ? alignTo32(fWidth) : fWidth;
+
+ if (0 == fBM) {
+ MyBitmapInfo info;
+ sk_bzero(&info, sizeof(info));
+ if (isBW) {
+ RGBQUAD blackQuad = { 0, 0, 0, 0 };
+ RGBQUAD whiteQuad = { 0xFF, 0xFF, 0xFF, 0 };
+ info.bmiColors[0] = blackQuad;
+ info.bmiColors[1] = whiteQuad;
+ }
+ info.bmiHeader.biSize = sizeof(info.bmiHeader);
+ info.bmiHeader.biWidth = biWidth;
+ info.bmiHeader.biHeight = fHeight;
+ info.bmiHeader.biPlanes = 1;
+ info.bmiHeader.biBitCount = isBW ? 1 : 32;
+ info.bmiHeader.biCompression = BI_RGB;
+ if (isBW) {
+ info.bmiHeader.biClrUsed = 2;
+ }
+ fBM = CreateDIBSection(fDC, &info, DIB_RGB_COLORS, &fBits, 0, 0);
+ if (0 == fBM) {
+ return nullptr;
+ }
+ SelectObject(fDC, fBM);
+ }
+
+ // erase
+ size_t srcRB = isBW ? (biWidth >> 3) : (fWidth << 2);
+ size_t size = fHeight * srcRB;
+ memset(fBits, 0, size);
+
+ XFORM xform = fXform;
+ xform.eDx = (float)-glyph.left();
+ xform.eDy = (float)-glyph.top();
+ SetWorldTransform(fDC, &xform);
+
+ uint16_t glyphID = glyph.getGlyphID();
+ BOOL ret = ExtTextOutW(fDC, 0, 0, ETO_GLYPH_INDEX, nullptr, reinterpret_cast<LPCWSTR>(&glyphID), 1, nullptr);
+ GdiFlush();
+ if (0 == ret) {
+ return nullptr;
+ }
+ *srcRBPtr = srcRB;
+ // offset to the start of the image
+ return (const char*)fBits + (fHeight - glyph.height()) * srcRB;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#define BUFFERSIZE (1 << 13)
+
+class SkScalerContext_GDI : public SkScalerContext {
+public:
+ SkScalerContext_GDI(sk_sp<LogFontTypeface>,
+ const SkScalerContextEffects&,
+ const SkDescriptor* desc);
+ ~SkScalerContext_GDI() override;
+
+ // Returns true if the constructor was able to complete all of its
+ // initializations (which may include calling GDI).
+ bool isValid() const;
+
+protected:
+ unsigned generateGlyphCount() override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ DWORD getGDIGlyphPath(SkGlyphID glyph, UINT flags,
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf);
+ template<bool APPLY_PREBLEND>
+ static void RGBToA8(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph, const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND>
+ static void RGBToLcd16(const SkGdiRGB* SK_RESTRICT src, size_t srcRB, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB);
+
+ HDCOffscreen fOffscreen;
+ /** fGsA is the non-rotational part of total matrix without the text height scale.
+ * Used to find the magnitude of advances.
+ */
+ MAT2 fGsA;
+ /** The total matrix without the textSize. */
+ MAT2 fMat22;
+ /** Scales font to EM size. */
+ MAT2 fHighResMat22;
+ HDC fDDC;
+ HFONT fSavefont;
+ HFONT fFont;
+ SCRIPT_CACHE fSC;
+ int fGlyphCount;
+
+ /** The total matrix which also removes EM scale. */
+ SkMatrix fHiResMatrix;
+ /** fG_inv is the inverse of the rotational part of the total matrix.
+ * Used to set the direction of advances.
+ */
+ SkMatrix fG_inv;
+ enum Type {
+ kTrueType_Type, kBitmap_Type, kLine_Type
+ } fType;
+ TEXTMETRIC fTM;
+};
+
+static FIXED float2FIXED(float x) {
+ return SkFixedToFIXED(SkFloatToFixed(x));
+}
+
+static inline float FIXED2float(FIXED x) {
+ return SkFixedToFloat(SkFIXEDToFixed(x));
+}
+
+static BYTE compute_quality(const SkScalerContextRec& rec) {
+ switch (rec.fMaskFormat) {
+ case SkMask::kBW_Format:
+ return NONANTIALIASED_QUALITY;
+ case SkMask::kLCD16_Format:
+ return CLEARTYPE_QUALITY;
+ default:
+ if (rec.fFlags & SkScalerContext::kGenA8FromLCD_Flag) {
+ return CLEARTYPE_QUALITY;
+ } else {
+ return ANTIALIASED_QUALITY;
+ }
+ }
+}
+
+SkScalerContext_GDI::SkScalerContext_GDI(sk_sp<LogFontTypeface> rawTypeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(rawTypeface), effects, desc)
+ , fDDC(0)
+ , fSavefont(0)
+ , fFont(0)
+ , fSC(0)
+ , fGlyphCount(-1)
+{
+ LogFontTypeface* typeface = static_cast<LogFontTypeface*>(this->getTypeface());
+
+ fDDC = ::CreateCompatibleDC(nullptr);
+ if (!fDDC) {
+ return;
+ }
+ SetGraphicsMode(fDDC, GM_ADVANCED);
+ SetBkMode(fDDC, TRANSPARENT);
+
+ // When GDI hinting, remove the entire Y scale from sA and GsA. (Prevents 'linear' metrics.)
+ // When not hinting, remove only the integer Y scale from sA and GsA. (Applied by GDI.)
+ SkScalerContextRec::PreMatrixScale scaleConstraints =
+ (fRec.getHinting() == SkFontHinting::kNone || fRec.getHinting() == SkFontHinting::kSlight)
+ ? SkScalerContextRec::kVerticalInteger_PreMatrixScale
+ : SkScalerContextRec::kVertical_PreMatrixScale;
+ SkVector scale;
+ SkMatrix sA;
+ SkMatrix GsA;
+ SkMatrix A;
+ fRec.computeMatrices(scaleConstraints, &scale, &sA, &GsA, &fG_inv, &A);
+
+ fGsA.eM11 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleX));
+ fGsA.eM12 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewY)); // This should be ~0.
+ fGsA.eM21 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewX));
+ fGsA.eM22 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleY));
+
+ // When not hinting, scale was computed with kVerticalInteger, so is already an integer.
+ // The sA and GsA transforms will be used to create 'linear' metrics.
+
+ // When hinting, scale was computed with kVertical, stating that our port can handle
+ // non-integer scales. This is done so that sA and GsA are computed without any 'residual'
+ // scale in them, preventing 'linear' metrics. However, GDI cannot actually handle non-integer
+ // scales so we need to round in this case. This is fine, since all of the scale has been
+ // removed from sA and GsA, so GDI will be handling the scale completely.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(scale.fY);
+
+ // GDI will not accept a size of zero, so round the range [0, 1] to 1.
+ // If the size was non-zero, the scale factors will also be non-zero and 1px tall text is drawn.
+ // If the size actually was zero, the scale factors will also be zero, so GDI will draw nothing.
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ LOGFONT lf = typeface->fLogFont;
+ lf.lfHeight = -SkScalarTruncToInt(gdiTextSize);
+ lf.lfQuality = compute_quality(fRec);
+ fFont = CreateFontIndirect(&lf);
+ if (!fFont) {
+ return;
+ }
+
+ fSavefont = (HFONT)SelectObject(fDDC, fFont);
+
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ fTM.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+
+ XFORM xform;
+ if (fTM.tmPitchAndFamily & TMPF_VECTOR) {
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript fonts.
+
+ // If TMPF_VECTOR is set, one of TMPF_TRUETYPE or TMPF_DEVICE means that
+ // we have an outline font. Otherwise we have a vector FON, which is
+ // scalable, but not an outline font.
+ // This was determined by testing with Type1 PFM/PFB and
+ // OpenTypeCFF OTF, as well as looking at Wine bugs and sources.
+ if (fTM.tmPitchAndFamily & (TMPF_TRUETYPE | TMPF_DEVICE)) {
+ // Truetype or PostScript.
+ fType = SkScalerContext_GDI::kTrueType_Type;
+ } else {
+ // Stroked FON.
+ fType = SkScalerContext_GDI::kLine_Type;
+ }
+
+ // fPost2x2 is column-major, left handed (y down).
+ // XFORM 2x2 is row-major, left handed (y down).
+ xform.eM11 = SkScalarToFloat(sA.get(SkMatrix::kMScaleX));
+ xform.eM12 = SkScalarToFloat(sA.get(SkMatrix::kMSkewY));
+ xform.eM21 = SkScalarToFloat(sA.get(SkMatrix::kMSkewX));
+ xform.eM22 = SkScalarToFloat(sA.get(SkMatrix::kMScaleY));
+ xform.eDx = 0;
+ xform.eDy = 0;
+
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = float2FIXED(xform.eM11);
+ fMat22.eM12 = float2FIXED(-xform.eM12);
+ fMat22.eM21 = float2FIXED(-xform.eM21);
+ fMat22.eM22 = float2FIXED(xform.eM22);
+
+ if (needToRenderWithSkia(fRec)) {
+ this->forceGenerateImageFromPath();
+ }
+
+ // Create a hires matrix if we need linear metrics.
+ if (this->isLinearMetrics()) {
+ OUTLINETEXTMETRIC otm;
+ UINT success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == success) {
+ call_ensure_accessible(lf);
+ success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 != success) {
+ SkScalar upem = SkIntToScalar(otm.otmEMSquare);
+
+ SkScalar gdiTextSizeToEMScale = upem / gdiTextSize;
+ fHighResMat22.eM11 = float2FIXED(gdiTextSizeToEMScale);
+ fHighResMat22.eM12 = float2FIXED(0);
+ fHighResMat22.eM21 = float2FIXED(0);
+ fHighResMat22.eM22 = float2FIXED(gdiTextSizeToEMScale);
+
+ SkScalar removeEMScale = SkScalarInvert(upem);
+ fHiResMatrix = A;
+ fHiResMatrix.preScale(removeEMScale, removeEMScale);
+ }
+ }
+
+ } else {
+ // Assume bitmap
+ fType = SkScalerContext_GDI::kBitmap_Type;
+
+ xform.eM11 = 1.0f;
+ xform.eM12 = 0.0f;
+ xform.eM21 = 0.0f;
+ xform.eM22 = 1.0f;
+ xform.eDx = 0.0f;
+ xform.eDy = 0.0f;
+
+ // fPost2x2 is column-major, left handed (y down).
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = SkScalarToFIXED(fRec.fPost2x2[0][0]);
+ fMat22.eM12 = SkScalarToFIXED(-fRec.fPost2x2[1][0]);
+ fMat22.eM21 = SkScalarToFIXED(-fRec.fPost2x2[0][1]);
+ fMat22.eM22 = SkScalarToFIXED(fRec.fPost2x2[1][1]);
+ }
+
+ fOffscreen.init(fFont, xform);
+}
+
+SkScalerContext_GDI::~SkScalerContext_GDI() {
+ if (fDDC) {
+ ::SelectObject(fDDC, fSavefont);
+ ::DeleteDC(fDDC);
+ }
+ if (fFont) {
+ ::DeleteObject(fFont);
+ }
+ if (fSC) {
+ ::ScriptFreeCache(&fSC);
+ }
+}
+
+bool SkScalerContext_GDI::isValid() const {
+ return fDDC && fFont;
+}
+
+unsigned SkScalerContext_GDI::generateGlyphCount() {
+ if (fGlyphCount < 0) {
+ fGlyphCount = calculateGlyphCount(
+ fDDC, static_cast<const LogFontTypeface*>(this->getTypeface())->fLogFont);
+ }
+ return fGlyphCount;
+}
+
+bool SkScalerContext_GDI::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContext_GDI::generateMetrics(SkGlyph* glyph) {
+ SkASSERT(fDDC);
+
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+ SIZE size;
+ WORD glyphs = glyph->getGlyphID();
+ if (0 == GetTextExtentPointI(fDDC, &glyphs, 1, &size)) {
+ glyph->fWidth = SkToS16(fTM.tmMaxCharWidth);
+ glyph->fHeight = SkToS16(fTM.tmHeight);
+ } else {
+ glyph->fWidth = SkToS16(size.cx);
+ glyph->fHeight = SkToS16(size.cy);
+ }
+
+ glyph->fTop = SkToS16(-fTM.tmAscent);
+ // Bitmap FON cannot underhang, but vector FON may.
+ // There appears no means of determining underhang of vector FON.
+ glyph->fLeft = SkToS16(0);
+ glyph->fAdvanceX = glyph->width();
+ glyph->fAdvanceY = 0;
+
+ // Vector FON will transform nicely, but bitmap FON do not.
+ if (fType == SkScalerContext_GDI::kLine_Type) {
+ SkRect bounds = SkRect::MakeXYWH(glyph->fLeft, glyph->fTop,
+ glyph->width(), glyph->height());
+ SkMatrix m;
+ m.setAll(SkFIXEDToScalar(fMat22.eM11), -SkFIXEDToScalar(fMat22.eM21), 0,
+ -SkFIXEDToScalar(fMat22.eM12), SkFIXEDToScalar(fMat22.eM22), 0,
+ 0, 0, 1);
+ m.mapRect(&bounds);
+ bounds.roundOut(&bounds);
+ glyph->fLeft = SkScalarTruncToInt(bounds.fLeft);
+ glyph->fTop = SkScalarTruncToInt(bounds.fTop);
+ glyph->fWidth = SkScalarTruncToInt(bounds.width());
+ glyph->fHeight = SkScalarTruncToInt(bounds.height());
+ }
+
+ // Apply matrix to advance.
+ glyph->fAdvanceY = -FIXED2float(fMat22.eM12) * glyph->fAdvanceX;
+ glyph->fAdvanceX *= FIXED2float(fMat22.eM11);
+
+ return;
+ }
+
+ UINT glyphId = glyph->getGlyphID();
+
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+
+ DWORD status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ glyph->zeroMetrics();
+ return;
+ }
+ }
+
+ bool empty = false;
+ // The black box is either the embedded bitmap size or the outline extent.
+ // It is 1x1 if nothing is to be drawn, but will also be 1x1 if something very small
+ // is to be drawn, like a '.'. We need to outset '.' but do not wish to outset ' '.
+ if (1 == gm.gmBlackBoxX && 1 == gm.gmBlackBoxY) {
+ // If GetGlyphOutline with GGO_NATIVE returns 0, we know there was no outline.
+ DWORD bufferSize = GetGlyphOutlineW(fDDC, glyphId, GGO_NATIVE | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ empty = (0 == bufferSize);
+ }
+
+ glyph->fTop = SkToS16(-gm.gmptGlyphOrigin.y);
+ glyph->fLeft = SkToS16(gm.gmptGlyphOrigin.x);
+ if (empty) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ } else {
+ // Outset, since the image may bleed out of the black box.
+ // For embedded bitmaps the black box should be exact.
+ // For outlines we need to outset by 1 in all directions for bleed.
+ // For ClearType we need to outset by 2 for bleed.
+ glyph->fWidth = gm.gmBlackBoxX + 4;
+ glyph->fHeight = gm.gmBlackBoxY + 4;
+ glyph->fTop -= 2;
+ glyph->fLeft -= 2;
+ }
+ // TODO(benjaminwagner): What is the type of gm.gmCellInc[XY]?
+ glyph->fAdvanceX = (float)((int)gm.gmCellIncX);
+ glyph->fAdvanceY = (float)((int)gm.gmCellIncY);
+
+ if ((fTM.tmPitchAndFamily & TMPF_VECTOR) && this->isLinearMetrics()) {
+ sk_bzero(&gm, sizeof(gm));
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fHighResMat22);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fHiResMatrix.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ } else if (!isAxisAligned(this->fRec)) {
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fGsA);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fG_inv.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ }
+}
+
+static const MAT2 gMat2Identity = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+void SkScalerContext_GDI::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+ sk_bzero(metrics, sizeof(*metrics));
+
+ SkASSERT(fDDC);
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+#endif
+ metrics->fTop = SkIntToScalar(-fTM.tmAscent);
+ metrics->fAscent = SkIntToScalar(-fTM.tmAscent);
+ metrics->fDescent = SkIntToScalar(fTM.tmDescent);
+ metrics->fBottom = SkIntToScalar(fTM.tmDescent);
+ metrics->fLeading = SkIntToScalar(fTM.tmExternalLeading);
+ metrics->fAvgCharWidth = SkIntToScalar(fTM.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(fTM.tmMaxCharWidth);
+ metrics->fXMin = 0;
+ metrics->fXMax = metrics->fMaxCharWidth;
+ //metrics->fXHeight = 0;
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ return;
+ }
+#endif
+
+ OUTLINETEXTMETRIC otm;
+
+ uint32_t ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 == ret) {
+ return;
+ }
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ metrics->fTop = SkIntToScalar(-otm.otmrcFontBox.top);
+ metrics->fAscent = SkIntToScalar(-otm.otmAscent);
+ metrics->fDescent = SkIntToScalar(-otm.otmDescent);
+ metrics->fBottom = SkIntToScalar(-otm.otmrcFontBox.bottom);
+ metrics->fLeading = SkIntToScalar(otm.otmLineGap);
+ metrics->fAvgCharWidth = SkIntToScalar(otm.otmTextMetrics.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(otm.otmTextMetrics.tmMaxCharWidth);
+ metrics->fXMin = SkIntToScalar(otm.otmrcFontBox.left);
+ metrics->fXMax = SkIntToScalar(otm.otmrcFontBox.right);
+#endif
+ metrics->fUnderlineThickness = SkIntToScalar(otm.otmsUnderscoreSize);
+ metrics->fUnderlinePosition = -SkIntToScalar(otm.otmsUnderscorePosition);
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ metrics->fXHeight = SkIntToScalar(otm.otmsXHeight);
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+ DWORD len = GetGlyphOutlineW(fDDC, 'x', GGO_METRICS, &gm, 0, 0, &gMat2Identity);
+ if (len != GDI_ERROR && gm.gmBlackBoxY > 0) {
+ metrics->fXHeight = SkIntToScalar(gm.gmBlackBoxY);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+static void build_power_table(uint8_t table[], float ee) {
+ for (int i = 0; i < 256; i++) {
+ float x = i / 255.f;
+ x = sk_float_pow(x, ee);
+ int xx = SkScalarRoundToInt(x * 255);
+ table[i] = SkToU8(xx);
+ }
+}
+
+/**
+ * This will invert the gamma applied by GDI (gray-scale antialiased), so we
+ * can get linear values.
+ *
+ * GDI grayscale appears to use a hard-coded gamma of 2.3.
+ *
+ * GDI grayscale appears to draw using the black and white rasterizer at four
+ * times the size and then downsamples to compute the coverage mask. As a
+ * result there are only seventeen total grays. This lack of fidelity means
+ * that shifting into other color spaces is imprecise.
+ */
+static const uint8_t* getInverseGammaTableGDI() {
+ static SkOnce once;
+ static uint8_t gTableGdi[256];
+ once([]{
+ build_power_table(gTableGdi, 2.3f);
+ });
+ return gTableGdi;
+}
+
+/**
+ * This will invert the gamma applied by GDI ClearType, so we can get linear
+ * values.
+ *
+ * GDI ClearType uses SPI_GETFONTSMOOTHINGCONTRAST / 1000 as the gamma value.
+ * If this value is not specified, the default is a gamma of 1.4.
+ */
+static const uint8_t* getInverseGammaTableClearType() {
+ static SkOnce once;
+ static uint8_t gTableClearType[256];
+ once([]{
+ UINT level = 0;
+ if (!SystemParametersInfo(SPI_GETFONTSMOOTHINGCONTRAST, 0, &level, 0) || !level) {
+ // can't get the data, so use a default
+ level = 1400;
+ }
+ build_power_table(gTableClearType, level / 1000.0f);
+ });
+ return gTableClearType;
+}
+
+#include "include/private/SkColorData.h"
+
+//Cannot assume that the input rgb is gray due to possible setting of kGenA8FromLCD_Flag.
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(SkGdiRGB rgb, const uint8_t* table8) {
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+ return sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint16_t rgb_to_lcd16(SkGdiRGB rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 16) & 0xFF, tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 8) & 0xFF, tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 0) & 0xFF, tableB);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkMax32(r, 10); g = SkMax32(g, 10); b = SkMax32(b, 10);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_GDI::RGBToA8(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph, const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = (uint8_t*)((char*)glyph.fImage + (glyph.height() - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(src[i], table8);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ dst[i] = SkMax32(dst[i], 10);
+#endif
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst -= dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_GDI::RGBToLcd16(
+ const SkGdiRGB* SK_RESTRICT src, size_t srcRB, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint16_t* SK_RESTRICT dst = (uint16_t*)((char*)glyph.fImage + (glyph.height() - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_lcd16<APPLY_PREBLEND>(src[i], tableR, tableG, tableB);
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst = (uint16_t*)((char*)dst - dstRB);
+ }
+}
+
+void SkScalerContext_GDI::generateImage(const SkGlyph& glyph) {
+ SkASSERT(fDDC);
+
+ const bool isBW = SkMask::kBW_Format == fRec.fMaskFormat;
+ const bool isAA = !isLCD(fRec);
+
+ size_t srcRB;
+ const void* bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+ }
+
+ if (!isBW) {
+ const uint8_t* table;
+ //The offscreen contains a GDI blit if isAA and kGenA8FromLCD_Flag is not set.
+ //Otherwise the offscreen contains a ClearType blit.
+ if (isAA && !(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag)) {
+ table = getInverseGammaTableGDI();
+ } else {
+ table = getInverseGammaTableClearType();
+ }
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ SkGdiRGB* addr = (SkGdiRGB*)bits;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.width(); ++x) {
+ int r = (addr[x] >> 16) & 0xFF;
+ int g = (addr[x] >> 8) & 0xFF;
+ int b = (addr[x] >> 0) & 0xFF;
+ addr[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ addr = SkTAddOffset<SkGdiRGB>(addr, srcRB);
+ }
+ }
+
+ size_t dstRB = glyph.rowBytes();
+ if (isBW) {
+ const uint8_t* src = (const uint8_t*)bits;
+ uint8_t* dst = (uint8_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+ for (int y = 0; y < glyph.fHeight; y++) {
+ memcpy(dst, src, dstRB);
+ src += srcRB;
+ dst -= dstRB;
+ }
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ if (glyph.width() > 0 && glyph.fHeight > 0) {
+ int bitCount = glyph.width() & 7;
+ uint8_t* first = (uint8_t*)glyph.fImage;
+ uint8_t* last = (uint8_t*)((char*)glyph.fImage + glyph.height() * dstRB - 1);
+ *first |= 1 << 7;
+ *last |= bitCount == 0 ? 1 : 1 << (8 - bitCount);
+ }
+#endif
+ } else if (isAA) {
+ // since the caller may require A8 for maskfilters, we can't check for BW
+ // ... until we have the caller tell us that explicitly
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(src, srcRB, glyph, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(src, srcRB, glyph, fPreBlend.fG);
+ }
+ } else { // LCD16
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ RGBToLcd16<true>(src, srcRB, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ RGBToLcd16<false>(src, srcRB, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ }
+}
+
+class GDIGlyphbufferPointIter {
+public:
+ GDIGlyphbufferPointIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fHeaderIter(glyphbuf, total_size), fCurveIter(), fPointIter()
+ { }
+
+ POINTFX const * next() {
+nextHeader:
+ if (!fCurveIter.isSet()) {
+ const TTPOLYGONHEADER* header = fHeaderIter.next();
+ if (nullptr == header) {
+ return nullptr;
+ }
+ fCurveIter.set(header);
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ return nullptr;
+ }
+ fPointIter.set(curve);
+ return &header->pfxStart;
+ }
+
+ const POINTFX* nextPoint = fPointIter.next();
+ if (nullptr == nextPoint) {
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ fCurveIter.set();
+ goto nextHeader;
+ } else {
+ fPointIter.set(curve);
+ }
+ nextPoint = fPointIter.next();
+ }
+ return nextPoint;
+ }
+
+ WORD currentCurveType() {
+ return fPointIter.fCurveType;
+ }
+
+private:
+ /** Iterates over all of the polygon headers in a glyphbuf. */
+ class GDIPolygonHeaderIter {
+ public:
+ GDIPolygonHeaderIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fCurPolygon(reinterpret_cast<const TTPOLYGONHEADER*>(glyphbuf))
+ , fEndPolygon(SkTAddOffset<const TTPOLYGONHEADER>(glyphbuf, total_size))
+ { }
+
+ const TTPOLYGONHEADER* next() {
+ if (fCurPolygon >= fEndPolygon) {
+ return nullptr;
+ }
+ const TTPOLYGONHEADER* thisPolygon = fCurPolygon;
+ fCurPolygon = SkTAddOffset<const TTPOLYGONHEADER>(fCurPolygon, fCurPolygon->cb);
+ return thisPolygon;
+ }
+ private:
+ const TTPOLYGONHEADER* fCurPolygon;
+ const TTPOLYGONHEADER* fEndPolygon;
+ };
+
+ /** Iterates over all of the polygon curves in a polygon header. */
+ class GDIPolygonCurveIter {
+ public:
+ GDIPolygonCurveIter() : fCurCurve(nullptr), fEndCurve(nullptr) { }
+
+ GDIPolygonCurveIter(const TTPOLYGONHEADER* curPolygon)
+ : fCurCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER)))
+ , fEndCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb))
+ { }
+
+ bool isSet() { return fCurCurve != nullptr; }
+
+ void set(const TTPOLYGONHEADER* curPolygon) {
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER));
+ fEndCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb);
+ }
+ void set() {
+ fCurCurve = nullptr;
+ fEndCurve = nullptr;
+ }
+
+ const TTPOLYCURVE* next() {
+ if (fCurCurve >= fEndCurve) {
+ return nullptr;
+ }
+ const TTPOLYCURVE* thisCurve = fCurCurve;
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(fCurCurve, size_of_TTPOLYCURVE(*fCurCurve));
+ return thisCurve;
+ }
+ private:
+ size_t size_of_TTPOLYCURVE(const TTPOLYCURVE& curve) {
+ return 2*sizeof(WORD) + curve.cpfx*sizeof(POINTFX);
+ }
+ const TTPOLYCURVE* fCurCurve;
+ const TTPOLYCURVE* fEndCurve;
+ };
+
+ /** Iterates over all of the polygon points in a polygon curve. */
+ class GDIPolygonCurvePointIter {
+ public:
+ GDIPolygonCurvePointIter() : fCurveType(0), fCurPoint(nullptr), fEndPoint(nullptr) { }
+
+ GDIPolygonCurvePointIter(const TTPOLYCURVE* curPolygon)
+ : fCurveType(curPolygon->wType)
+ , fCurPoint(&curPolygon->apfx[0])
+ , fEndPoint(&curPolygon->apfx[curPolygon->cpfx])
+ { }
+
+ bool isSet() { return fCurPoint != nullptr; }
+
+ void set(const TTPOLYCURVE* curPolygon) {
+ fCurveType = curPolygon->wType;
+ fCurPoint = &curPolygon->apfx[0];
+ fEndPoint = &curPolygon->apfx[curPolygon->cpfx];
+ }
+ void set() {
+ fCurPoint = nullptr;
+ fEndPoint = nullptr;
+ }
+
+ const POINTFX* next() {
+ if (fCurPoint >= fEndPoint) {
+ return nullptr;
+ }
+ const POINTFX* thisPoint = fCurPoint;
+ ++fCurPoint;
+ return thisPoint;
+ }
+
+ WORD fCurveType;
+ private:
+ const POINTFX* fCurPoint;
+ const POINTFX* fEndPoint;
+ };
+
+ GDIPolygonHeaderIter fHeaderIter;
+ GDIPolygonCurveIter fCurveIter;
+ GDIPolygonCurvePointIter fPointIter;
+};
+
+static void sk_path_from_gdi_path(SkPath* path, const uint8_t* glyphbuf, DWORD total_size) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ path->moveTo(SkFixedToScalar( SkFIXEDToFixed(th->pfxStart.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(th->pfxStart.y)));
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < pc->cpfx; i++) {
+ path->lineTo(SkFixedToScalar( SkFIXEDToFixed(pc->apfx[i].x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pc->apfx[i].y)));
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ for (uint16_t u = 0; u < pc->cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = pc->apfx[u]; // B is always the current point
+ POINTFX pnt_c = pc->apfx[u+1];
+
+ if (u < pc->cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+ path->quadTo(SkFixedToScalar( SkFIXEDToFixed(pnt_b.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_b.y)),
+ SkFixedToScalar( SkFIXEDToFixed(pnt_c.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_c.y)));
+ }
+ }
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * pc->cpfx;
+ }
+ cur_glyph += th->cb;
+ path->close();
+ }
+}
+
+#define move_next_expected_hinted_point(iter, pElem) do {\
+ pElem = iter.next(); \
+ if (nullptr == pElem) return false; \
+} while(0)
+
+// It is possible for the hinted and unhinted versions of the same path to have
+// a different number of points due to GDI's handling of flipped points.
+// If this is detected, this will return false.
+static bool sk_path_from_gdi_paths(SkPath* path, const uint8_t* glyphbuf, DWORD total_size,
+ GDIGlyphbufferPointIter hintedYs) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ POINTFX const * hintedPoint;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ path->moveTo(SkFixedToScalar( SkFIXEDToFixed(th->pfxStart.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(hintedPoint->y)));
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < pc->cpfx; i++) {
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ path->lineTo(SkFixedToScalar( SkFIXEDToFixed(pc->apfx[i].x)),
+ SkFixedToScalar(-SkFIXEDToFixed(hintedPoint->y)));
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ POINTFX currentPoint = pc->apfx[0];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ currentPoint.y = hintedPoint->y;
+ }
+ for (uint16_t u = 0; u < pc->cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = currentPoint;//pc->apfx[u]; // B is always the current point
+ POINTFX pnt_c = pc->apfx[u+1];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ pnt_c.y = hintedPoint->y;
+ }
+ currentPoint.x = pnt_c.x;
+ currentPoint.y = pnt_c.y;
+
+ if (u < pc->cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+ path->quadTo(SkFixedToScalar( SkFIXEDToFixed(pnt_b.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_b.y)),
+ SkFixedToScalar( SkFIXEDToFixed(pnt_c.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_c.y)));
+ }
+ }
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * pc->cpfx;
+ }
+ cur_glyph += th->cb;
+ path->close();
+ }
+ return true;
+}
+
+DWORD SkScalerContext_GDI::getGDIGlyphPath(SkGlyphID glyph, UINT flags,
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf)
+{
+ GLYPHMETRICS gm;
+
+ DWORD total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, BUFFERSIZE, glyphbuf->get(), &fMat22);
+ // Sometimes GetGlyphOutlineW returns a number larger than BUFFERSIZE even if BUFFERSIZE > 0.
+ // It has been verified that this does not involve a buffer overrun.
+ if (GDI_ERROR == total_size || total_size > BUFFERSIZE) {
+ // GDI_ERROR because the BUFFERSIZE was too small, or because the data was not accessible.
+ // When the data is not accessable GetGlyphOutlineW fails rather quickly,
+ // so just try to get the size. If that fails then ensure the data is accessible.
+ total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ // GetGlyphOutlineW is known to fail for some characters, such as spaces.
+ // In these cases, just return that the glyph does not have a shape.
+ return 0;
+ }
+ }
+
+ glyphbuf->reset(total_size);
+
+ DWORD ret = GetGlyphOutlineW(fDDC, glyph, flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetGlyphOutlineW(fDDC, glyph, flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ SkASSERT(false);
+ return 0;
+ }
+ }
+ }
+ return total_size;
+}
+
+bool SkScalerContext_GDI::generatePath(SkGlyphID glyph, SkPath* path) {
+ SkASSERT(path);
+ SkASSERT(fDDC);
+
+ path->reset();
+
+ // Out of all the fonts on a typical Windows box,
+ // 25% of glyphs require more than 2KB.
+ // 1% of glyphs require more than 4KB.
+ // 0.01% of glyphs require more than 8KB.
+ // 8KB is less than 1% of the normal 1MB stack on Windows.
+ // Note that some web fonts glyphs require more than 20KB.
+ //static const DWORD BUFFERSIZE = (1 << 13);
+
+ //GDI only uses hinted outlines when axis aligned.
+ UINT format = GGO_NATIVE | GGO_GLYPH_INDEX;
+ if (fRec.getHinting() == SkFontHinting::kNone || fRec.getHinting() == SkFontHinting::kSlight){
+ format |= GGO_UNHINTED;
+ }
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t> glyphbuf(BUFFERSIZE);
+ DWORD total_size = getGDIGlyphPath(glyph, format, &glyphbuf);
+ if (0 == total_size) {
+ return false;
+ }
+
+ if (fRec.getHinting() != SkFontHinting::kSlight) {
+ sk_path_from_gdi_path(path, glyphbuf, total_size);
+ } else {
+ //GDI only uses hinted outlines when axis aligned.
+ UINT format = GGO_NATIVE | GGO_GLYPH_INDEX;
+
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t> hintedGlyphbuf(BUFFERSIZE);
+ DWORD hinted_total_size = getGDIGlyphPath(glyph, format, &hintedGlyphbuf);
+ if (0 == hinted_total_size) {
+ return false;
+ }
+
+ if (!sk_path_from_gdi_paths(path, glyphbuf, total_size,
+ GDIGlyphbufferPointIter(hintedGlyphbuf, hinted_total_size)))
+ {
+ path->reset();
+ sk_path_from_gdi_path(path, glyphbuf, total_size);
+ }
+ }
+ return true;
+}
+
+static void logfont_for_name(const char* familyName, LOGFONT* lf) {
+ sk_bzero(lf, sizeof(LOGFONT));
+#ifdef UNICODE
+ // Get the buffer size needed first.
+ size_t str_len = ::MultiByteToWideChar(CP_UTF8, 0, familyName,
+ -1, nullptr, 0);
+ // Allocate a buffer (str_len already has terminating null
+ // accounted for).
+ wchar_t *wideFamilyName = new wchar_t[str_len];
+ // Now actually convert the string.
+ ::MultiByteToWideChar(CP_UTF8, 0, familyName, -1,
+ wideFamilyName, str_len);
+ ::wcsncpy(lf->lfFaceName, wideFamilyName, LF_FACESIZE - 1);
+ delete [] wideFamilyName;
+ lf->lfFaceName[LF_FACESIZE-1] = L'\0';
+#else
+ ::strncpy(lf->lfFaceName, familyName, LF_FACESIZE - 1);
+ lf->lfFaceName[LF_FACESIZE - 1] = '\0';
+#endif
+}
+
+void LogFontTypeface::onGetFamilyName(SkString* familyName) const {
+ // Get the actual name of the typeface. The logfont may not know this.
+ SkAutoHDC hdc(fLogFont);
+ dcfontname_to_skstring(hdc, fLogFont, familyName);
+}
+
+void LogFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString familyName;
+ this->onGetFamilyName(&familyName);
+ desc->setFamilyName(familyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = this->fSerializeAsStream;
+}
+
+void LogFontTypeface::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ SkAutoHDC hdc(fLogFont);
+ unsigned int glyphCount = calculateGlyphCount(hdc, fLogFont);
+ populate_glyph_to_unicode(hdc, glyphCount, dstArray);
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> LogFontTypeface::onGetAdvancedMetrics() const {
+ LOGFONT lf = fLogFont;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(nullptr);
+
+ // The design HFONT must be destroyed after the HDC
+ using HFONT_T = typename std::remove_pointer<HFONT>::type;
+ std::unique_ptr<HFONT_T, SkFunctionWrapper<decltype(DeleteObject), DeleteObject>> designFont;
+ SkAutoHDC hdc(lf);
+
+ const char stem_chars[] = {'i', 'I', '!', '1'};
+ int16_t min_width;
+ unsigned glyphCount;
+
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+ if (!otmRet || !GetTextFace(hdc, LF_FACESIZE, lf.lfFaceName)) {
+ return info;
+ }
+ lf.lfHeight = -SkToS32(otm.otmEMSquare);
+ designFont.reset(CreateFontIndirect(&lf));
+ SelectObject(hdc, designFont.get());
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ return info;
+ }
+ glyphCount = calculateGlyphCount(hdc, fLogFont);
+
+ info.reset(new SkAdvancedTypefaceMetrics);
+ tchar_to_skstring(lf.lfFaceName, &info->fFontName);
+
+ SkOTTableOS2_V4::Type fsType;
+ if (sizeof(fsType) == this->getTableData(SkTEndian_SwapBE32(SkOTTableOS2::TAG),
+ offsetof(SkOTTableOS2_V4, fsType),
+ sizeof(fsType),
+ &fsType)) {
+ SkOTUtils::SetAdvancedTypefaceFlags(fsType, info.get());
+ } else {
+ // If bit 1 is set, the font may not be embedded in a document.
+ // If bit 1 is clear, the font can be embedded.
+ // If bit 2 is set, the embedding is read-only.
+ if (otm.otmfsType & 0x1) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ }
+
+ if (glyphCount == 0 || (otm.otmTextMetrics.tmPitchAndFamily & TMPF_TRUETYPE) == 0) {
+ return info;
+ }
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+
+ // If this bit is clear the font is a fixed pitch font.
+ if (!(otm.otmTextMetrics.tmPitchAndFamily & TMPF_FIXED_PITCH)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (otm.otmTextMetrics.tmItalic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ if (otm.otmTextMetrics.tmPitchAndFamily & FF_ROMAN) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (otm.otmTextMetrics.tmPitchAndFamily & FF_SCRIPT) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ // The main italic angle of the font, in tenths of a degree counterclockwise
+ // from vertical.
+ info->fItalicAngle = otm.otmItalicAngle / 10;
+ info->fAscent = SkToS16(otm.otmTextMetrics.tmAscent);
+ info->fDescent = SkToS16(-otm.otmTextMetrics.tmDescent);
+ // TODO(ctguil): Use alternate cap height calculation.
+ // MSDN says otmsCapEmHeight is not support but it is returning a value on
+ // my Win7 box.
+ info->fCapHeight = otm.otmsCapEmHeight;
+ info->fBBox =
+ SkIRect::MakeLTRB(otm.otmrcFontBox.left, otm.otmrcFontBox.top,
+ otm.otmrcFontBox.right, otm.otmrcFontBox.bottom);
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ min_width = SHRT_MAX;
+ info->fStemV = 0;
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
+ int16_t width = abcWidths.abcB;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+
+ return info;
+}
+
+//Dummy representation of a Base64 encoded GUID from create_unique_font_name.
+#define BASE64_GUID_ID "XXXXXXXXXXXXXXXXXXXXXXXX"
+//Length of GUID representation from create_id, including nullptr terminator.
+#define BASE64_GUID_ID_LEN SK_ARRAY_COUNT(BASE64_GUID_ID)
+
+static_assert(BASE64_GUID_ID_LEN < LF_FACESIZE, "GUID_longer_than_facesize");
+
+/**
+ NameID 6 Postscript names cannot have the character '/'.
+ It would be easier to hex encode the GUID, but that is 32 bytes,
+ and many systems have issues with names longer than 28 bytes.
+ The following need not be any standard base64 encoding.
+ The encoded value is never decoded.
+*/
+static const char postscript_safe_base64_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789-_=";
+
+/**
+ Formats a GUID into Base64 and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static void format_guid_b64(const GUID& guid, char* buffer, size_t bufferSize) {
+ SkASSERT(bufferSize >= BASE64_GUID_ID_LEN);
+ size_t written = SkBase64::Encode(&guid, sizeof(guid), buffer, postscript_safe_base64_encode);
+ SkASSERT(written < LF_FACESIZE);
+ buffer[written] = '\0';
+}
+
+/**
+ Creates a Base64 encoded GUID and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static HRESULT create_unique_font_name(char* buffer, size_t bufferSize) {
+ GUID guid = {};
+ if (FAILED(CoCreateGuid(&guid))) {
+ return E_UNEXPECTED;
+ }
+ format_guid_b64(guid, buffer, bufferSize);
+
+ return S_OK;
+}
+
+/**
+ Introduces a font to GDI. On failure will return nullptr. The returned handle
+ should eventually be passed to RemoveFontMemResourceEx.
+*/
+static HANDLE activate_font(SkData* fontData) {
+ DWORD numFonts = 0;
+ //AddFontMemResourceEx just copies the data, but does not specify const.
+ HANDLE fontHandle = AddFontMemResourceEx(const_cast<void*>(fontData->data()),
+ static_cast<DWORD>(fontData->size()),
+ 0,
+ &numFonts);
+
+ if (fontHandle != nullptr && numFonts < 1) {
+ RemoveFontMemResourceEx(fontHandle);
+ return nullptr;
+ }
+
+ return fontHandle;
+}
+
+// Does not affect ownership of stream.
+static sk_sp<SkTypeface> create_from_stream(std::unique_ptr<SkStreamAsset> stream) {
+ // Create a unique and unpredictable font name.
+ // Avoids collisions and access from CSS.
+ char familyName[BASE64_GUID_ID_LEN];
+ const int familyNameSize = SK_ARRAY_COUNT(familyName);
+ if (FAILED(create_unique_font_name(familyName, familyNameSize))) {
+ return nullptr;
+ }
+
+ // Change the name of the font.
+ sk_sp<SkData> rewrittenFontData(SkOTUtils::RenameFont(stream.get(), familyName, familyNameSize-1));
+ if (nullptr == rewrittenFontData.get()) {
+ return nullptr;
+ }
+
+ // Register the font with GDI.
+ HANDLE fontReference = activate_font(rewrittenFontData.get());
+ if (nullptr == fontReference) {
+ return nullptr;
+ }
+
+ // Create the typeface.
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+
+ return sk_sp<SkTypeface>(SkCreateFontMemResourceTypefaceFromLOGFONT(lf, fontReference));
+}
+
+std::unique_ptr<SkStreamAsset> LogFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+
+ const DWORD kTTCTag = SkEndian_SwapBE32(SkSetFourByteTag('t', 't', 'c', 'f'));
+ LOGFONT lf = fLogFont;
+
+ SkAutoHDC hdc(lf);
+
+ std::unique_ptr<SkStreamAsset> stream;
+ DWORD tables[2] = {kTTCTag, 0};
+ for (size_t i = 0; i < SK_ARRAY_COUNT(tables); i++) {
+ DWORD bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ }
+ if (bufferSize != GDI_ERROR) {
+ stream.reset(new SkMemoryStream(bufferSize));
+ if (GetFontData(hdc, tables[i], 0, (void*)stream->getMemoryBase(), bufferSize)) {
+ break;
+ } else {
+ stream.reset();
+ }
+ }
+ }
+ return stream;
+}
+
+sk_sp<SkTypeface> LogFontTypeface::onMakeClone(const SkFontArguments& args) const {
+ return sk_ref_sp(this);
+}
+
+static void bmpCharsToGlyphs(HDC hdc, const WCHAR* bmpChars, int count, uint16_t* glyphs,
+ bool Ox1FHack)
+{
+ // Type1 fonts fail with uniscribe API. Use GetGlyphIndices for plane 0.
+
+ /** Real documentation for GetGlyphIndicesW:
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is not specified and a character does not map to a
+ * glyph, then the 'default character's glyph is returned instead. The 'default character'
+ * is available in fTM.tmDefaultChar. FON fonts have a default character, and there exists
+ * a usDefaultChar in the 'OS/2' table, version 2 and later. If there is no
+ * 'default character' specified by the font, then often the first character found is used.
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is specified and a character does not map to a glyph,
+ * then the glyph 0xFFFF is used. In Windows XP and earlier, Bitmap/Vector FON usually use
+ * glyph 0x1F instead ('Terminal' appears to be special, returning 0xFFFF).
+ * Type1 PFM/PFB, TT, OT TT, OT CFF all appear to use 0xFFFF, even on XP.
+ */
+ DWORD result = GetGlyphIndicesW(hdc, bmpChars, count, glyphs, GGI_MARK_NONEXISTING_GLYPHS);
+ if (GDI_ERROR == result) {
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = 0;
+ }
+ return;
+ }
+
+ if (Ox1FHack) {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i] || 0x1F == glyphs[i]) {
+ glyphs[i] = 0;
+ }
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i]){
+ glyphs[i] = 0;
+ }
+ }
+ }
+}
+
+static uint16_t nonBmpCharToGlyph(HDC hdc, SCRIPT_CACHE* scriptCache, const WCHAR utf16[2]) {
+ uint16_t index = 0;
+ // Use uniscribe to detemine glyph index for non-BMP characters.
+ static const int numWCHAR = 2;
+ static const int maxItems = 2;
+ // MSDN states that this can be nullptr, but some things don't work then.
+ SCRIPT_CONTROL scriptControl;
+ memset(&scriptControl, 0, sizeof(scriptControl));
+ // Add extra item to SCRIPT_ITEM to work around a bug (now documented).
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=366643
+ SCRIPT_ITEM si[maxItems + 1];
+ int numItems;
+ HRZM(ScriptItemize(utf16, numWCHAR, maxItems, &scriptControl, nullptr, si, &numItems),
+ "Could not itemize character.");
+
+ // Sometimes ScriptShape cannot find a glyph for a non-BMP and returns 2 space glyphs.
+ static const int maxGlyphs = 2;
+ SCRIPT_VISATTR vsa[maxGlyphs];
+ WORD outGlyphs[maxGlyphs];
+ WORD logClust[numWCHAR];
+ int numGlyphs;
+ SCRIPT_ANALYSIS& script = si[0].a;
+ script.eScript = SCRIPT_UNDEFINED;
+ script.fRTL = FALSE;
+ script.fLayoutRTL = FALSE;
+ script.fLinkBefore = FALSE;
+ script.fLinkAfter = FALSE;
+ script.fLogicalOrder = FALSE;
+ script.fNoGlyphIndex = FALSE;
+ script.s.uBidiLevel = 0;
+ script.s.fOverrideDirection = 0;
+ script.s.fInhibitSymSwap = TRUE;
+ script.s.fCharShape = FALSE;
+ script.s.fDigitSubstitute = FALSE;
+ script.s.fInhibitLigate = FALSE;
+ script.s.fDisplayZWG = TRUE;
+ script.s.fArabicNumContext = FALSE;
+ script.s.fGcpClusters = FALSE;
+ script.s.fReserved = 0;
+ script.s.fEngineReserved = 0;
+ // For the future, 0x80040200 from here is USP_E_SCRIPT_NOT_IN_FONT
+ HRZM(ScriptShape(hdc, scriptCache, utf16, numWCHAR, maxGlyphs, &script,
+ outGlyphs, logClust, vsa, &numGlyphs),
+ "Could not shape character.");
+ if (1 == numGlyphs) {
+ index = outGlyphs[0];
+ }
+ return index;
+}
+
+void LogFontTypeface::onCharsToGlyphs(const SkUnichar* uni, int glyphCount,
+ SkGlyphID glyphs[]) const
+{
+ SkAutoHDC hdc(fLogFont);
+
+ TEXTMETRIC tm;
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ call_ensure_accessible(fLogFont);
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ tm.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+ bool Ox1FHack = !(tm.tmPitchAndFamily & TMPF_VECTOR) /*&& winVer < Vista */;
+
+ SCRIPT_CACHE sc = 0;
+ static const int scratchCount = 256;
+ WCHAR scratch[scratchCount];
+ int glyphIndex = 0;
+ const uint32_t* utf32 = reinterpret_cast<const uint32_t*>(uni);
+ while (glyphIndex < glyphCount) {
+ // Try a run of bmp.
+ int glyphsLeft = SkTMin(glyphCount - glyphIndex, scratchCount);
+ int runLength = 0;
+ while (runLength < glyphsLeft && utf32[glyphIndex + runLength] <= 0xFFFF) {
+ scratch[runLength] = static_cast<WCHAR>(utf32[glyphIndex + runLength]);
+ ++runLength;
+ }
+ if (runLength) {
+ bmpCharsToGlyphs(hdc, scratch, runLength, &glyphs[glyphIndex], Ox1FHack);
+ glyphIndex += runLength;
+ }
+
+ // Try a run of non-bmp.
+ while (glyphIndex < glyphCount && utf32[glyphIndex] > 0xFFFF) {
+ SkUTF::ToUTF16(utf32[glyphIndex], reinterpret_cast<uint16_t*>(scratch));
+ glyphs[glyphIndex] = nonBmpCharToGlyph(hdc, &sc, scratch);
+ ++glyphIndex;
+ }
+ }
+
+ if (sc) {
+ ::ScriptFreeCache(&sc);
+ }
+}
+
+int LogFontTypeface::onCountGlyphs() const {
+ SkAutoHDC hdc(fLogFont);
+ return calculateGlyphCount(hdc, fLogFont);
+}
+
+void LogFontTypeface::getPostScriptGlyphNames(SkString*) const {}
+
+int LogFontTypeface::onGetUPEM() const {
+ SkAutoHDC hdc(fLogFont);
+ return calculateUPEM(hdc, fLogFont);
+}
+
+SkTypeface::LocalizedStrings* LogFontTypeface::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(familyName, language);
+ }
+ return nameIter.release();
+}
+
+int LogFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ SkSFNTHeader header;
+ if (sizeof(header) != this->onGetTableData(0, 0, sizeof(header), &header)) {
+ return 0;
+ }
+
+ int numTables = SkEndian_SwapBE16(header.numTables);
+
+ if (tags) {
+ size_t size = numTables * sizeof(SkSFNTHeader::TableDirectoryEntry);
+ SkAutoSTMalloc<0x20, SkSFNTHeader::TableDirectoryEntry> dir(numTables);
+ if (size != this->onGetTableData(0, sizeof(header), size, dir.get())) {
+ return 0;
+ }
+
+ for (int i = 0; i < numTables; ++i) {
+ tags[i] = SkEndian_SwapBE32(dir[i].tag);
+ }
+ }
+ return numTables;
+}
+
+size_t LogFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ LOGFONT lf = fLogFont;
+ SkAutoHDC hdc(lf);
+
+ tag = SkEndian_SwapBE32(tag);
+ if (nullptr == data) {
+ length = 0;
+ }
+ DWORD bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ }
+ return bufferSize == GDI_ERROR ? 0 : bufferSize;
+}
+
+sk_sp<SkData> LogFontTypeface::onCopyTableData(SkFontTableTag tag) const {
+ LOGFONT lf = fLogFont;
+ SkAutoHDC hdc(lf);
+
+ tag = SkEndian_SwapBE32(tag);
+ DWORD size = GetFontData(hdc, tag, 0, nullptr, 0);
+ if (size == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ size = GetFontData(hdc, tag, 0, nullptr, 0);
+ }
+
+ sk_sp<SkData> data;
+ if (size != GDI_ERROR) {
+ data = SkData::MakeUninitialized(size);
+ if (GetFontData(hdc, tag, 0, data->writable_data(), size) == GDI_ERROR) {
+ data.reset();
+ }
+ }
+ return data;
+}
+
+SkScalerContext* LogFontTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ auto ctx = skstd::make_unique<SkScalerContext_GDI>(
+ sk_ref_sp(const_cast<LogFontTypeface*>(this)), effects, desc);
+ if (!ctx->isValid()) {
+ return nullptr;
+ }
+ return ctx.release();
+}
+
+void LogFontTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbeddedBitmapText_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkFontHinting h = rec->getHinting();
+ switch (h) {
+ case SkFontHinting::kNone:
+ break;
+ case SkFontHinting::kSlight:
+ // Only do slight hinting when axis aligned.
+ // TODO: re-enable slight hinting when FontHostTest can pass.
+ //if (!isAxisAligned(*rec)) {
+ h = SkFontHinting::kNone;
+ //}
+ break;
+ case SkFontHinting::kNormal:
+ case SkFontHinting::kFull:
+ // TODO: need to be able to distinguish subpixel positioned glyphs
+ // and linear metrics.
+ //rec->fFlags &= ~SkScalerContext::kSubpixelPositioning_Flag;
+ h = SkFontHinting::kNormal;
+ break;
+ default:
+ SkDEBUGFAIL("unknown hinting");
+ }
+ //TODO: if this is a bitmap font, squash hinting and subpixel.
+ rec->setHinting(h);
+
+// turn this off since GDI might turn A8 into BW! Need a bigger fix.
+#if 0
+ // Disable LCD when rotated, since GDI's output is ugly
+ if (isLCD(*rec) && !isAxisAligned(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+#endif
+
+ if (!fCanBeLCD && isLCD(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags &= ~SkScalerContext::kGenA8FromLCD_Flag;
+ } else if (rec->fMaskFormat == SkMask::kA8_Format) {
+ // Bug 1277404
+ // If we have non LCD GDI text, render the fonts as cleartype and convert them
+ // to grayscale. This seems to be what Chrome and IE are doing on Windows 7.
+ // This also applies if cleartype is disabled system wide.
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkDataTable.h"
+#include "include/core/SkFontMgr.h"
+
+static bool valid_logfont_for_enum(const LOGFONT& lf) {
+ // TODO: Vector FON is unsupported and should not be listed.
+ return
+ // Ignore implicit vertical variants.
+ lf.lfFaceName[0] && lf.lfFaceName[0] != '@'
+
+ // DEFAULT_CHARSET is used to get all fonts, but also implies all
+ // character sets. Filter assuming all fonts support ANSI_CHARSET.
+ && ANSI_CHARSET == lf.lfCharSet
+ ;
+}
+
+/** An EnumFontFamExProc implementation which interprets builderParam as
+ * an SkTDArray<ENUMLOGFONTEX>* and appends logfonts which
+ * pass the valid_logfont_for_enum predicate.
+ */
+static int CALLBACK enum_family_proc(const LOGFONT* lf, const TEXTMETRIC*,
+ DWORD fontType, LPARAM builderParam) {
+ if (valid_logfont_for_enum(*lf)) {
+ SkTDArray<ENUMLOGFONTEX>* array = (SkTDArray<ENUMLOGFONTEX>*)builderParam;
+ *array->append() = *(ENUMLOGFONTEX*)lf;
+ }
+ return 1; // non-zero means continue
+}
+
+class SkFontStyleSetGDI : public SkFontStyleSet {
+public:
+ SkFontStyleSetGDI(const TCHAR familyName[]) {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+ _tcscpy_s(lf.lfFaceName, familyName);
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+ int count() override {
+ return fArray.count();
+ }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override {
+ if (fs) {
+ *fs = get_style(fArray[index].elfLogFont);
+ }
+ if (styleName) {
+ const ENUMLOGFONTEX& ref = fArray[index];
+ // For some reason, ENUMLOGFONTEX and LOGFONT disagree on their type in the
+ // non-unicode version.
+ // ENUMLOGFONTEX uses BYTE
+ // LOGFONT uses CHAR
+ // Here we assert they that the style name is logically the same (size) as
+ // a TCHAR, so we can use the same converter function.
+ SkASSERT(sizeof(TCHAR) == sizeof(ref.elfStyle[0]));
+ tchar_to_skstring((const TCHAR*)ref.elfStyle, styleName);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return SkCreateTypefaceFromLOGFONT(fArray[index].elfLogFont);
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ return this->matchStyleCSS3(pattern);
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fArray;
+};
+
+class SkFontMgrGDI : public SkFontMgr {
+public:
+ SkFontMgrGDI() {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fLogFontArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fLogFontArray.count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkASSERT((unsigned)index < (unsigned)fLogFontArray.count());
+ tchar_to_skstring(fLogFontArray[index].elfLogFont.lfFaceName, familyName);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkASSERT((unsigned)index < (unsigned)fLogFontArray.count());
+ return new SkFontStyleSetGDI(fLogFontArray[index].elfLogFont.lfFaceName);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (nullptr == familyName) {
+ familyName = ""; // do we need this check???
+ }
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+ return new SkFontStyleSetGDI(lf.lfFaceName);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override {
+ // could be in base impl
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const override {
+ // could be in base impl
+ SkString familyName;
+ ((LogFontTypeface*)familyMember)->getFamilyName(&familyName);
+ return this->matchFamilyStyle(familyName.c_str(), fontstyle);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ if (ttcIndex != 0) {
+ return nullptr;
+ }
+ return create_from_stream(std::move(stream));
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ // could be in base impl
+ return this->makeFromStream(std::unique_ptr<SkStreamAsset>(new SkMemoryStream(std::move(data))),
+ ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ // could be in base impl
+ auto stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ LOGFONT lf;
+ if (nullptr == familyName) {
+ lf = get_default_font();
+ } else {
+ logfont_for_name(familyName, &lf);
+ }
+
+ lf.lfWeight = style.weight();
+ lf.lfItalic = style.slant() == SkFontStyle::kUpright_Slant ? FALSE : TRUE;
+ return sk_sp<SkTypeface>(SkCreateTypefaceFromLOGFONT(lf));
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fLogFontArray;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFontMgr> SkFontMgr_New_GDI() { return sk_make_sp<SkFontMgrGDI>(); }
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
new file mode 100644
index 0000000000..23986cbaaa
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/ports/SkFontMgr_FontConfigInterface.h"
+#include "include/private/SkMutex.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontConfigTypeface.h"
+#include <new>
+
+std::unique_ptr<SkStreamAsset> SkTypeface_FCI::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = this->getIdentity().fTTCIndex;
+
+ if (fFontData) {
+ SkStreamAsset* stream = fFontData->getStream();
+ if (!stream) {
+ return nullptr;
+ }
+ return stream->duplicate();
+ }
+
+ return std::unique_ptr<SkStreamAsset>(fFCI->openStream(this->getIdentity()));
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FCI::onMakeFontData() const {
+ if (fFontData) {
+ return skstd::make_unique<SkFontData>(*fFontData);
+ }
+
+ const SkFontConfigInterface::FontIdentity& id = this->getIdentity();
+ return skstd::make_unique<SkFontData>(std::unique_ptr<SkStreamAsset>(fFCI->openStream(id)),
+ id.fTTCIndex, nullptr, 0);
+}
+
+void SkTypeface_FCI::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocalStream) const {
+ SkString name;
+ this->getFamilyName(&name);
+ desc->setFamilyName(name.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = SkToBool(fFontData);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontStyleSet_FCI : public SkFontStyleSet {
+public:
+ SkFontStyleSet_FCI() {}
+
+ int count() override { return 0; }
+ void getStyle(int index, SkFontStyle*, SkString* style) override { SkASSERT(false); }
+ SkTypeface* createTypeface(int index) override { SkASSERT(false); return nullptr; }
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override { return nullptr; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontRequestCache {
+public:
+ struct Request : public SkResourceCache::Key {
+ private:
+ Request(const char* name, size_t nameLen, const SkFontStyle& style) : fStyle(style) {
+ /** Pointer to just after the last field of this class. */
+ char* content = const_cast<char*>(SkTAfter<const char>(&this->fStyle));
+
+ // No holes.
+ SkASSERT(SkTAddOffset<char>(this, sizeof(SkResourceCache::Key) + keySize) == content);
+
+ // Has a size divisible by size of uint32_t.
+ SkASSERT((content - reinterpret_cast<char*>(this)) % sizeof(uint32_t) == 0);
+
+ size_t contentLen = SkAlign4(nameLen);
+ sk_careful_memcpy(content, name, nameLen);
+ sk_bzero(content + nameLen, contentLen - nameLen);
+ this->init(nullptr, 0, keySize + contentLen);
+ }
+ const SkFontStyle fStyle;
+ /** The sum of the sizes of the fields of this class. */
+ static const size_t keySize = sizeof(fStyle);
+
+ public:
+ static Request* Create(const char* name, const SkFontStyle& style) {
+ size_t nameLen = name ? strlen(name) : 0;
+ size_t contentLen = SkAlign4(nameLen);
+ char* storage = new char[sizeof(Request) + contentLen];
+ return new (storage) Request(name, nameLen, style);
+ }
+ void operator delete(void* storage) {
+ delete[] reinterpret_cast<char*>(storage);
+ }
+ };
+
+
+private:
+ struct Result : public SkResourceCache::Rec {
+ Result(Request* request, sk_sp<SkTypeface> typeface)
+ : fRequest(request), fFace(std::move(typeface)) {}
+ Result(Result&&) = default;
+ Result& operator=(Result&&) = default;
+
+ const Key& getKey() const override { return *fRequest; }
+ size_t bytesUsed() const override { return fRequest->size() + sizeof(fFace); }
+ const char* getCategory() const override { return "request_cache"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ std::unique_ptr<Request> fRequest;
+ sk_sp<SkTypeface> fFace;
+ };
+
+ SkResourceCache fCachedResults;
+
+public:
+ SkFontRequestCache(size_t maxSize) : fCachedResults(maxSize) {}
+
+ /** Takes ownership of request. It will be deleted when no longer needed. */
+ void add(sk_sp<SkTypeface> face, Request* request) {
+ fCachedResults.add(new Result(request, std::move(face)));
+ }
+ /** Does not take ownership of request. */
+ sk_sp<SkTypeface> findAndRef(Request* request) {
+ sk_sp<SkTypeface> face;
+ fCachedResults.find(*request, [](const SkResourceCache::Rec& rec, void* context) -> bool {
+ const Result& result = static_cast<const Result&>(rec);
+ sk_sp<SkTypeface>* face = static_cast<sk_sp<SkTypeface>*>(context);
+
+ *face = result.fFace;
+ return true;
+ }, &face);
+ return face;
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_by_FontIdentity(SkTypeface* cachedTypeface, void* ctx) {
+ typedef SkFontConfigInterface::FontIdentity FontIdentity;
+ SkTypeface_FCI* cachedFCTypeface = static_cast<SkTypeface_FCI*>(cachedTypeface);
+ FontIdentity* identity = static_cast<FontIdentity*>(ctx);
+
+ return cachedFCTypeface->getIdentity() == *identity;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_FCI : public SkFontMgr {
+ sk_sp<SkFontConfigInterface> fFCI;
+ SkTypeface_FreeType::Scanner fScanner;
+
+ mutable SkMutex fMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ // The value of maxSize here is a compromise between cache hits and cache size.
+ // See https://crbug.com/424082#63 for reason for current size.
+ static const size_t kMaxSize = 1 << 15;
+ mutable SkFontRequestCache fCache;
+
+public:
+ SkFontMgr_FCI(sk_sp<SkFontConfigInterface> fci)
+ : fFCI(std::move(fci))
+ , fCache(kMaxSize)
+ {}
+
+protected:
+ int onCountFamilies() const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char requestedFamilyName[],
+ const SkFontStyle& requestedStyle) const override
+ {
+ SkAutoMutexExclusive ama(fMutex);
+
+ SkFontConfigInterface::FontIdentity identity;
+ SkString outFamilyName;
+ SkFontStyle outStyle;
+ if (!fFCI->matchFamilyName(requestedFamilyName, requestedStyle,
+ &identity, &outFamilyName, &outStyle))
+ {
+ return nullptr;
+ }
+
+ // Check if a typeface with this FontIdentity is already in the FontIdentity cache.
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(find_by_FontIdentity, &identity);
+ if (!face) {
+ face.reset(SkTypeface_FCI::Create(fFCI, identity, std::move(outFamilyName), outStyle));
+ // Add this FontIdentity to the FontIdentity cache.
+ fTFCache.add(face);
+ }
+ return face.release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface*, const SkFontStyle&) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->onMakeFromStreamIndex(SkMemoryStream::Make(std::move(data)), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ const size_t length = stream->getLength();
+ if (!length) {
+ return nullptr;
+ }
+ if (length >= 1024 * 1024 * 1024) {
+ return nullptr; // don't accept too large fonts (>= 1GB) for safety.
+ }
+
+ // TODO should the caller give us the style or should we get it from freetype?
+ SkString name;
+ SkFontStyle style;
+ bool isFixedPitch = false;
+ if (!fScanner.scanFont(stream.get(), 0, &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+
+ auto fontData = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return sk_sp<SkTypeface>(SkTypeface_FCI::Create(std::move(fontData), std::move(name),
+ style, isFixedPitch));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ const size_t length = stream->getLength();
+ if (!length) {
+ return nullptr;
+ }
+ if (length >= 1024 * 1024 * 1024) {
+ return nullptr; // don't accept too large fonts (>= 1GB) for safety.
+ }
+
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), args.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, args.getVariationDesignPosition(),
+ axisValues, name);
+
+ auto fontData = skstd::make_unique<SkFontData>(std::move(stream),
+ args.getCollectionIndex(),
+ axisValues.get(),
+ axisDefinitions.count());
+ return sk_sp<SkTypeface>(SkTypeface_FCI::Create(std::move(fontData), std::move(name),
+ style, isFixedPitch));
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char requestedFamilyName[],
+ SkFontStyle requestedStyle) const override
+ {
+ SkAutoMutexExclusive ama(fMutex);
+
+ // Check if this request is already in the request cache.
+ using Request = SkFontRequestCache::Request;
+ std::unique_ptr<Request> request(Request::Create(requestedFamilyName, requestedStyle));
+ sk_sp<SkTypeface> face = fCache.findAndRef(request.get());
+ if (face) {
+ return sk_sp<SkTypeface>(face);
+ }
+
+ SkFontConfigInterface::FontIdentity identity;
+ SkString outFamilyName;
+ SkFontStyle outStyle;
+ if (!fFCI->matchFamilyName(requestedFamilyName, requestedStyle,
+ &identity, &outFamilyName, &outStyle))
+ {
+ return nullptr;
+ }
+
+ // Check if a typeface with this FontIdentity is already in the FontIdentity cache.
+ face = fTFCache.findByProcAndRef(find_by_FontIdentity, &identity);
+ if (!face) {
+ face.reset(SkTypeface_FCI::Create(fFCI, identity, std::move(outFamilyName), outStyle));
+ // Add this FontIdentity to the FontIdentity cache.
+ fTFCache.add(face);
+ }
+ // Add this request to the request cache.
+ fCache.add(face, request.release());
+
+ return face;
+ }
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci) {
+ SkASSERT(fci);
+ return sk_make_sp<SkFontMgr_FCI>(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
new file mode 100644
index 0000000000..cb64ec1ed4
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/ports/SkFontMgr_FontConfigInterface.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ sk_sp<SkFontConfigInterface> fci(SkFontConfigInterface::RefGlobal());
+ if (!fci) {
+ return nullptr;
+ }
+ return SkFontMgr_New_FCI(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
new file mode 100644
index 0000000000..5642402750
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/ports/SkFontMgr_android.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/ports/SkFontMgr_android_parser.h"
+
+#include <algorithm>
+#include <limits>
+
+class SkData;
+
+class SkTypeface_Android : public SkTypeface_FreeType {
+public:
+ SkTypeface_Android(const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName)
+ : INHERITED(style, isFixedPitch)
+ , fFamilyName(familyName)
+ { }
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = fFamilyName;
+ }
+
+ SkString fFamilyName;
+
+private:
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkTypeface_AndroidSystem : public SkTypeface_Android {
+public:
+ SkTypeface_AndroidSystem(const SkString& pathName,
+ const bool cacheFontFiles,
+ int index,
+ const SkFixed* axes, int axesCount,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName,
+ const SkTArray<SkLanguage, true>& lang,
+ FontVariant variantStyle)
+ : INHERITED(style, isFixedPitch, familyName)
+ , fPathName(pathName)
+ , fIndex(index)
+ , fAxes(axes, axesCount)
+ , fLang(lang)
+ , fVariantStyle(variantStyle)
+ , fFile(cacheFontFiles ? sk_fopen(fPathName.c_str(), kRead_SkFILE_Flag) : nullptr) {
+ if (cacheFontFiles) {
+ SkASSERT(fFile);
+ }
+ }
+
+ std::unique_ptr<SkStreamAsset> makeStream() const {
+ if (fFile) {
+ sk_sp<SkData> data(SkData::MakeFromFILE(fFile));
+ return data ? skstd::make_unique<SkMemoryStream>(std::move(data)) : nullptr;
+ }
+ return SkStream::MakeFromFile(fPathName.c_str());
+ }
+
+ virtual void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ SkASSERT(desc);
+ SkASSERT(serialize);
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *serialize = false;
+ }
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fIndex;
+ return this->makeStream();
+ }
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(this->makeStream(), fIndex,
+ fAxes.begin(), fAxes.count());
+ }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_make_sp<SkTypeface_AndroidSystem>(fPathName,
+ fFile,
+ fIndex,
+ data->getAxis(),
+ data->getAxisCount(),
+ this->fontStyle(),
+ this->isFixedPitch(),
+ fFamilyName,
+ fLang,
+ fVariantStyle);
+ }
+
+ const SkString fPathName;
+ int fIndex;
+ const SkSTArray<4, SkFixed, true> fAxes;
+ const SkSTArray<4, SkLanguage, true> fLang;
+ const FontVariant fVariantStyle;
+ SkAutoTCallVProc<FILE, sk_fclose> fFile;
+
+ typedef SkTypeface_Android INHERITED;
+};
+
+class SkTypeface_AndroidStream : public SkTypeface_Android {
+public:
+ SkTypeface_AndroidStream(std::unique_ptr<SkFontData> data,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName)
+ : INHERITED(style, isFixedPitch, familyName)
+ , fData(std::move(data))
+ { }
+
+ virtual void onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* serialize) const override {
+ SkASSERT(desc);
+ SkASSERT(serialize);
+ desc->setFamilyName(fFamilyName.c_str());
+ *serialize = true;
+ }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(*fData);
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_make_sp<SkTypeface_AndroidStream>(std::move(data),
+ this->fontStyle(),
+ this->isFixedPitch(),
+ fFamilyName);
+ }
+
+private:
+ const std::unique_ptr<const SkFontData> fData;
+ typedef SkTypeface_Android INHERITED;
+};
+
+class SkFontStyleSet_Android : public SkFontStyleSet {
+ typedef SkTypeface_FreeType::Scanner Scanner;
+
+public:
+ explicit SkFontStyleSet_Android(const FontFamily& family, const Scanner& scanner,
+ const bool cacheFontFiles) {
+ const SkString* cannonicalFamilyName = nullptr;
+ if (family.fNames.count() > 0) {
+ cannonicalFamilyName = &family.fNames[0];
+ }
+ fFallbackFor = family.fFallbackFor;
+
+ // TODO? make this lazy
+ for (int i = 0; i < family.fFonts.count(); ++i) {
+ const FontFileInfo& fontFile = family.fFonts[i];
+
+ SkString pathName(family.fBasePath);
+ pathName.append(fontFile.fFileName);
+
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(pathName.c_str());
+ if (!stream) {
+ SkDEBUGF("Requested font file %s does not exist or cannot be opened.\n",
+ pathName.c_str());
+ continue;
+ }
+
+ const int ttcIndex = fontFile.fIndex;
+ SkString familyName;
+ SkFontStyle style;
+ bool isFixedWidth;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), ttcIndex,
+ &familyName, &style, &isFixedWidth, &axisDefinitions))
+ {
+ SkDEBUGF("Requested font file %s exists, but is not a valid font.\n",
+ pathName.c_str());
+ continue;
+ }
+
+ int weight = fontFile.fWeight != 0 ? fontFile.fWeight : style.weight();
+ SkFontStyle::Slant slant = style.slant();
+ switch (fontFile.fStyle) {
+ case FontFileInfo::Style::kAuto: slant = style.slant(); break;
+ case FontFileInfo::Style::kNormal: slant = SkFontStyle::kUpright_Slant; break;
+ case FontFileInfo::Style::kItalic: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ style = SkFontStyle(weight, style.width(), slant);
+
+ uint32_t variant = family.fVariant;
+ if (kDefault_FontVariant == variant) {
+ variant = kCompact_FontVariant | kElegant_FontVariant;
+ }
+
+ // The first specified family name overrides the family name found in the font.
+ // TODO: SkTypeface_AndroidSystem::onCreateFamilyNameIterator should return
+ // all of the specified family names in addition to the names found in the font.
+ if (cannonicalFamilyName != nullptr) {
+ familyName = *cannonicalFamilyName;
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ SkFontArguments::VariationPosition position = {
+ fontFile.fVariationDesignPosition.begin(),
+ fontFile.fVariationDesignPosition.count()
+ };
+ Scanner::computeAxisValues(axisDefinitions, position,
+ axisValues, familyName);
+
+ fStyles.push_back().reset(new SkTypeface_AndroidSystem(
+ pathName, cacheFontFiles, ttcIndex, axisValues.get(), axisDefinitions.count(),
+ style, isFixedWidth, familyName, family.fLanguages, variant));
+ }
+ }
+
+ int count() override {
+ return fStyles.count();
+ }
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ if (index < 0 || fStyles.count() <= index) {
+ return;
+ }
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+ }
+ SkTypeface_AndroidSystem* createTypeface(int index) override {
+ if (index < 0 || fStyles.count() <= index) {
+ return nullptr;
+ }
+ return SkRef(fStyles[index].get());
+ }
+
+ SkTypeface_AndroidSystem* matchStyle(const SkFontStyle& pattern) override {
+ return static_cast<SkTypeface_AndroidSystem*>(this->matchStyleCSS3(pattern));
+ }
+
+private:
+ SkTArray<sk_sp<SkTypeface_AndroidSystem>> fStyles;
+ SkString fFallbackFor;
+
+ friend struct NameToFamily;
+ friend class SkFontMgr_Android;
+
+ typedef SkFontStyleSet INHERITED;
+};
+
+/** On Android a single family can have many names, but our API assumes unique names.
+ * Map names to the back end so that all names for a given family refer to the same
+ * (non-replicated) set of typefaces.
+ * SkTDict<> doesn't let us do index-based lookup, so we write our own mapping.
+ */
+struct NameToFamily {
+ SkString name;
+ SkFontStyleSet_Android* styleSet;
+};
+
+class SkFontMgr_Android : public SkFontMgr {
+public:
+ SkFontMgr_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ SkTDArray<FontFamily*> families;
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem != custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ if (!custom ||
+ (custom && SkFontMgr_Android_CustomFonts::kOnlyCustom != custom->fSystemFontUse))
+ {
+ SkFontMgr_Android_Parser::GetSystemFontFamilies(families);
+ }
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem == custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ this->buildNameToFamilyMap(families, custom ? custom->fIsolated : false);
+ this->findDefaultStyleSet();
+ families.deleteAll();
+ }
+
+protected:
+ /** Returns not how many families we have, but how many unique names
+ * exist among the families.
+ */
+ int onCountFamilies() const override {
+ return fNameToFamilyMap.count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if (index < 0 || fNameToFamilyMap.count() <= index) {
+ familyName->reset();
+ return;
+ }
+ familyName->set(fNameToFamilyMap[index].name);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if (index < 0 || fNameToFamilyMap.count() <= index) {
+ return nullptr;
+ }
+ return SkRef(fNameToFamilyMap[index].styleSet);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ SkAutoAsciiToLC tolc(familyName);
+ for (int i = 0; i < fNameToFamilyMap.count(); ++i) {
+ if (fNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fNameToFamilyMap[i].styleSet);
+ }
+ }
+ // TODO: eventually we should not need to name fallback families.
+ for (int i = 0; i < fFallbackNameToFamilyMap.count(); ++i) {
+ if (fFallbackNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fFallbackNameToFamilyMap[i].styleSet);
+ }
+ }
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override {
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(style);
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* typeface,
+ const SkFontStyle& style) const override {
+ for (int i = 0; i < fStyleSets.count(); ++i) {
+ for (int j = 0; j < fStyleSets[i]->fStyles.count(); ++j) {
+ if (fStyleSets[i]->fStyles[j].get() == typeface) {
+ return fStyleSets[i]->matchStyle(style);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ static sk_sp<SkTypeface_AndroidSystem> find_family_style_character(
+ const SkString& familyName,
+ const SkTArray<NameToFamily, true>& fallbackNameToFamilyMap,
+ const SkFontStyle& style, bool elegant,
+ const SkString& langTag, SkUnichar character)
+ {
+ for (int i = 0; i < fallbackNameToFamilyMap.count(); ++i) {
+ SkFontStyleSet_Android* family = fallbackNameToFamilyMap[i].styleSet;
+ if (familyName != family->fFallbackFor) {
+ continue;
+ }
+ sk_sp<SkTypeface_AndroidSystem> face(family->matchStyle(style));
+
+ if (!langTag.isEmpty() &&
+ std::none_of(face->fLang.begin(), face->fLang.end(), [&](SkLanguage lang){
+ return lang.getTag().startsWith(langTag.c_str());
+ }))
+ {
+ continue;
+ }
+
+ if (SkToBool(face->fVariantStyle & kElegant_FontVariant) != elegant) {
+ continue;
+ }
+
+ if (face->unicharToGlyph(character) != 0) {
+ return face;
+ }
+ }
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override
+ {
+ // The variant 'elegant' is 'not squashed', 'compact' is 'stays in ascent/descent'.
+ // The variant 'default' means 'compact and elegant'.
+ // As a result, it is not possible to know the variant context from the font alone.
+ // TODO: add 'is_elegant' and 'is_compact' bits to 'style' request.
+
+ SkString familyNameString(familyName);
+ for (const SkString& currentFamilyName : { familyNameString, SkString() }) {
+ // The first time match anything elegant, second time anything not elegant.
+ for (int elegant = 2; elegant --> 0;) {
+ for (int bcp47Index = bcp47Count; bcp47Index --> 0;) {
+ SkLanguage lang(bcp47[bcp47Index]);
+ while (!lang.getTag().isEmpty()) {
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(currentFamilyName, fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ lang.getTag(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+
+ lang = lang.getParent();
+ }
+ }
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(currentFamilyName, fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ SkString(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->makeFromStream(std::unique_ptr<SkStreamAsset>(new SkMemoryStream(std::move(data))),
+ ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream.get() ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(stream.get(), ttcIndex, &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return sk_sp<SkTypeface>(new SkTypeface_AndroidStream(std::move(data),
+ style, isFixedPitch, name));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), args.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, args.getVariationDesignPosition(),
+ axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), args.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return sk_sp<SkTypeface>(new SkTypeface_AndroidStream(std::move(data),
+ style, isFixedPitch, name));
+ }
+
+ sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData> data) const override {
+ SkStreamAsset* stream(data->getStream());
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(stream, data->getIndex(), &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+ return sk_sp<SkTypeface>(new SkTypeface_AndroidStream(std::move(data),
+ style, isFixedPitch, name));
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ // On Android, we must return nullptr when we can't find the requested
+ // named typeface so that the system/app can provide their own recovery
+ // mechanism. On other platforms we'd provide a typeface from the
+ // default family instead.
+ return sk_sp<SkTypeface>(this->onMatchFamilyStyle(familyName, style));
+ }
+ return sk_sp<SkTypeface>(fDefaultStyleSet->matchStyle(style));
+ }
+
+
+private:
+
+ SkTypeface_FreeType::Scanner fScanner;
+
+ SkTArray<sk_sp<SkFontStyleSet_Android>> fStyleSets;
+ sk_sp<SkFontStyleSet> fDefaultStyleSet;
+
+ SkTArray<NameToFamily, true> fNameToFamilyMap;
+ SkTArray<NameToFamily, true> fFallbackNameToFamilyMap;
+
+ void addFamily(FontFamily& family, const bool isolated, int familyIndex) {
+ SkTArray<NameToFamily, true>* nameToFamily = &fNameToFamilyMap;
+ if (family.fIsFallbackFont) {
+ nameToFamily = &fFallbackNameToFamilyMap;
+
+ if (0 == family.fNames.count()) {
+ SkString& fallbackName = family.fNames.push_back();
+ fallbackName.printf("%.2x##fallback", familyIndex);
+ }
+ }
+
+ sk_sp<SkFontStyleSet_Android> newSet =
+ sk_make_sp<SkFontStyleSet_Android>(family, fScanner, isolated);
+ if (0 == newSet->count()) {
+ return;
+ }
+
+ for (const SkString& name : family.fNames) {
+ nameToFamily->emplace_back(NameToFamily{name, newSet.get()});
+ }
+ fStyleSets.emplace_back(std::move(newSet));
+ }
+ void buildNameToFamilyMap(SkTDArray<FontFamily*> families, const bool isolated) {
+ int familyIndex = 0;
+ for (FontFamily* family : families) {
+ addFamily(*family, isolated, familyIndex++);
+ family->fallbackFamilies.foreach([this, isolated, &familyIndex]
+ (SkString, std::unique_ptr<FontFamily>* fallbackFamily) {
+ addFamily(*(*fallbackFamily).get(), isolated, familyIndex++);
+ }
+ );
+ }
+ }
+
+ void findDefaultStyleSet() {
+ SkASSERT(!fStyleSets.empty());
+
+ static const char* defaultNames[] = { "sans-serif" };
+ for (const char* defaultName : defaultNames) {
+ fDefaultStyleSet.reset(this->onMatchFamily(defaultName));
+ if (fDefaultStyleSet) {
+ break;
+ }
+ }
+ if (nullptr == fDefaultStyleSet) {
+ fDefaultStyleSet = fStyleSets[0];
+ }
+ SkASSERT(fDefaultStyleSet);
+ }
+
+ typedef SkFontMgr INHERITED;
+};
+
+#ifdef SK_DEBUG
+static char const * const gSystemFontUseStrings[] = {
+ "OnlyCustom", "PreferCustom", "PreferSystem"
+};
+#endif
+
+sk_sp<SkFontMgr> SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ if (custom) {
+ SkASSERT(0 <= custom->fSystemFontUse);
+ SkASSERT(custom->fSystemFontUse < SK_ARRAY_COUNT(gSystemFontUseStrings));
+ SkDEBUGF("SystemFontUse: %s BasePath: %s Fonts: %s FallbackFonts: %s\n",
+ gSystemFontUseStrings[custom->fSystemFontUse],
+ custom->fBasePath,
+ custom->fFontsXml,
+ custom->fFallbackFontsXml);
+ }
+ return sk_make_sp<SkFontMgr_Android>(custom);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
new file mode 100644
index 0000000000..9ec10a9eb9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_android.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Android(nullptr);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
new file mode 100644
index 0000000000..6f4ec7632b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
@@ -0,0 +1,836 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Despite the name and location, this is portable code.
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTLogic.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkTSearch.h"
+#include "src/ports/SkFontMgr_android_parser.h"
+
+#include <expat.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#define LMP_SYSTEM_FONTS_FILE "/system/etc/fonts.xml"
+#define OLD_SYSTEM_FONTS_FILE "/system/etc/system_fonts.xml"
+#define FALLBACK_FONTS_FILE "/system/etc/fallback_fonts.xml"
+#define VENDOR_FONTS_FILE "/vendor/etc/fallback_fonts.xml"
+
+#define LOCALE_FALLBACK_FONTS_SYSTEM_DIR "/system/etc"
+#define LOCALE_FALLBACK_FONTS_VENDOR_DIR "/vendor/etc"
+#define LOCALE_FALLBACK_FONTS_PREFIX "fallback_fonts-"
+#define LOCALE_FALLBACK_FONTS_SUFFIX ".xml"
+
+#ifndef SK_FONT_FILE_PREFIX
+# define SK_FONT_FILE_PREFIX "/fonts/"
+#endif
+
+/**
+ * This file contains TWO 'familyset' handlers:
+ * One for JB and earlier which works with
+ * /system/etc/system_fonts.xml
+ * /system/etc/fallback_fonts.xml
+ * /vendor/etc/fallback_fonts.xml
+ * /system/etc/fallback_fonts-XX.xml
+ * /vendor/etc/fallback_fonts-XX.xml
+ * and the other for LMP and later which works with
+ * /system/etc/fonts.xml
+ *
+ * If the 'familyset' 'version' attribute is 21 or higher the LMP parser is used, otherwise the JB.
+ */
+
+struct FamilyData;
+
+struct TagHandler {
+ /** Called at the start tag.
+ * Called immediately after the parent tag retuns this handler from a call to 'tag'.
+ * Allows setting up for handling the tag content and processing attributes.
+ * If nullptr, will not be called.
+ */
+ void (*start)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** Called at the end tag.
+ * Allows post-processing of any accumulated information.
+ * This will be the last call made in relation to the current tag.
+ * If nullptr, will not be called.
+ */
+ void (*end)(FamilyData* data, const char* tag);
+
+ /** Called when a nested tag is encountered.
+ * This is responsible for determining how to handle the tag.
+ * If the tag is not recognized, return nullptr to skip the tag.
+ * If nullptr, all nested tags will be skipped.
+ */
+ const TagHandler* (*tag)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** The character handler for this tag.
+ * This is only active for character data contained directly in this tag (not sub-tags).
+ * The first parameter will be castable to a FamilyData*.
+ * If nullptr, any character data in this tag will be ignored.
+ */
+ XML_CharacterDataHandler chars;
+};
+
+/** Represents the current parsing state. */
+struct FamilyData {
+ FamilyData(XML_Parser parser, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback, const char* filename,
+ const TagHandler* topLevelHandler)
+ : fParser(parser)
+ , fFamilies(families)
+ , fCurrentFamily(nullptr)
+ , fCurrentFontInfo(nullptr)
+ , fVersion(0)
+ , fBasePath(basePath)
+ , fIsFallback(isFallback)
+ , fFilename(filename)
+ , fDepth(1)
+ , fSkip(0)
+ , fHandler(&topLevelHandler, 1)
+ { }
+
+ XML_Parser fParser; // The expat parser doing the work, owned by caller
+ SkTDArray<FontFamily*>& fFamilies; // The array to append families, owned by caller
+ std::unique_ptr<FontFamily> fCurrentFamily; // The family being created, owned by this
+ FontFileInfo* fCurrentFontInfo; // The info being created, owned by fCurrentFamily
+ int fVersion; // The version of the file parsed.
+ const SkString& fBasePath; // The current base path.
+ const bool fIsFallback; // The file being parsed is a fallback file
+ const char* fFilename; // The name of the file currently being parsed.
+
+ int fDepth; // The current element depth of the parse.
+ int fSkip; // The depth to stop skipping, 0 if not skipping.
+ SkTDArray<const TagHandler*> fHandler; // The stack of current tag handlers.
+};
+
+static bool memeq(const char* s1, const char* s2, size_t n1, size_t n2) {
+ return n1 == n2 && 0 == memcmp(s1, s2, n1);
+}
+#define MEMEQ(c, s, n) memeq(c, s, sizeof(c) - 1, n)
+
+#define ATTS_NON_NULL(a, i) (a[i] != nullptr && a[i+1] != nullptr)
+
+#define SK_FONTMGR_ANDROID_PARSER_PREFIX "[SkFontMgr Android Parser] "
+
+#define SK_FONTCONFIGPARSER_WARNING(message, ...) \
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d: warning: " message "\n", self->fFilename, \
+ XML_GetCurrentLineNumber(self->fParser), XML_GetCurrentColumnNumber(self->fParser), \
+ ##__VA_ARGS__)
+
+static bool is_whitespace(char c) {
+ return c == ' ' || c == '\n'|| c == '\r' || c == '\t';
+}
+
+static void trim_string(SkString* s) {
+ char* str = s->writable_str();
+ const char* start = str; // start is inclusive
+ const char* end = start + s->size(); // end is exclusive
+ while (is_whitespace(*start)) { ++start; }
+ if (start != end) {
+ --end; // make end inclusive
+ while (is_whitespace(*end)) { --end; }
+ ++end; // make end exclusive
+ }
+ size_t len = end - start;
+ memmove(str, start, len);
+ s->resize(len);
+}
+
+namespace lmpParser {
+
+static const TagHandler axisHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ FontFileInfo& file = *self->fCurrentFontInfo;
+ SkFourByteTag axisTag = SkSetFourByteTag('\0','\0','\0','\0');
+ SkFixed axisStyleValue = 0;
+ bool axisTagIsValid = false;
+ bool axisStyleValueIsValid = false;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("tag", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (valueLen == 4) {
+ axisTag = SkSetFourByteTag(value[0], value[1], value[2], value[3]);
+ axisTagIsValid = true;
+ for (int j = 0; j < file.fVariationDesignPosition.count() - 1; ++j) {
+ if (file.fVariationDesignPosition[j].axis == axisTag) {
+ axisTagIsValid = false;
+ SK_FONTCONFIGPARSER_WARNING("'%c%c%c%c' axis specified more than once",
+ (axisTag >> 24) & 0xFF,
+ (axisTag >> 16) & 0xFF,
+ (axisTag >> 8) & 0xFF,
+ (axisTag ) & 0xFF);
+ }
+ }
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis tag", value);
+ }
+ } else if (MEMEQ("stylevalue", name, nameLen)) {
+ if (parse_fixed<16>(value, &axisStyleValue)) {
+ axisStyleValueIsValid = true;
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis stylevalue", value);
+ }
+ }
+ }
+ if (axisTagIsValid && axisStyleValueIsValid) {
+ auto& coordinate = file.fVariationDesignPosition.push_back();
+ coordinate.axis = axisTag;
+ coordinate.value = SkFixedToScalar(axisStyleValue);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler fontHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'weight' (non-negative integer) [default 0]
+ // 'style' ("normal", "italic") [default "auto"]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFileInfo& file = self->fCurrentFamily->fFonts.push_back();
+ self->fCurrentFontInfo = &file;
+ SkString fallbackFor;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fWeight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ } else if (MEMEQ("style", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (MEMEQ("normal", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kNormal;
+ } else if (MEMEQ("italic", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kItalic;
+ }
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ } else if (MEMEQ("fallbackFor", name, nameLen)) {
+ /** fallbackFor specifies a family fallback and should have been on family. */
+ fallbackFor = value;
+ }
+ }
+ if (!fallbackFor.isEmpty()) {
+ std::unique_ptr<FontFamily>* fallbackFamily =
+ self->fCurrentFamily->fallbackFamilies.find(fallbackFor);
+ if (!fallbackFamily) {
+ std::unique_ptr<FontFamily> newFallbackFamily(
+ new FontFamily(self->fCurrentFamily->fBasePath, true));
+ fallbackFamily = self->fCurrentFamily->fallbackFamilies.set(
+ fallbackFor, std::move(newFallbackFamily));
+ (*fallbackFamily)->fLanguages = self->fCurrentFamily->fLanguages;
+ (*fallbackFamily)->fVariant = self->fCurrentFamily->fVariant;
+ (*fallbackFamily)->fOrder = self->fCurrentFamily->fOrder;
+ (*fallbackFamily)->fFallbackFor = fallbackFor;
+ }
+ self->fCurrentFontInfo = &(*fallbackFamily)->fFonts.emplace_back(file);
+ self->fCurrentFamily->fFonts.pop_back();
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ trim_string(&self->fCurrentFontInfo->fFileName);
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("axis", tag, len)) {
+ return &axisHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) [optional]
+ // 'lang' (space separated string) [default ""]
+ // 'variant' ("elegant", "compact") [default "default"]
+ // If there is no name, this is a fallback only font.
+ FontFamily* family = new FontFamily(self->fBasePath, true);
+ self->fCurrentFamily.reset(family);
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ family->fNames.push_back().set(tolc.lc());
+ family->fIsFallbackFont = false;
+ } else if (MEMEQ("lang", name, nameLen)) {
+ size_t i = 0;
+ while (true) {
+ for (; i < valueLen && is_whitespace(value[i]); ++i) { }
+ if (i == valueLen) { break; }
+ size_t j;
+ for (j = i + 1; j < valueLen && !is_whitespace(value[j]); ++j) { }
+ family->fLanguages.emplace_back(value + i, j - i);
+ i = j;
+ if (i == valueLen) { break; }
+ }
+ } else if (MEMEQ("variant", name, nameLen)) {
+ if (MEMEQ("elegant", value, valueLen)) {
+ family->fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ family->fVariant = kCompact_FontVariant;
+ }
+ }
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("font", tag, len)) {
+ return &fontHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static FontFamily* find_family(FamilyData* self, const SkString& familyName) {
+ for (int i = 0; i < self->fFamilies.count(); i++) {
+ FontFamily* candidate = self->fFamilies[i];
+ for (int j = 0; j < candidate->fNames.count(); j++) {
+ if (candidate->fNames[j] == familyName) {
+ return candidate;
+ }
+ }
+ }
+ return nullptr;
+}
+
+static const TagHandler aliasHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) introduces a new family name.
+ // 'to' (string) specifies which (previous) family to alias
+ // 'weight' (non-negative integer) [optional]
+ // If it *does not* have a weight, 'name' is an alias for the entire 'to' family.
+ // If it *does* have a weight, 'name' is a new family consisting of
+ // the font(s) with 'weight' from the 'to' family.
+
+ SkString aliasName;
+ SkString to;
+ int weight = 0;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ aliasName.set(tolc.lc());
+ } else if (MEMEQ("to", name, nameLen)) {
+ to.set(value);
+ } else if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &weight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ }
+ }
+
+ // Assumes that the named family is already declared
+ FontFamily* targetFamily = find_family(self, to);
+ if (!targetFamily) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' alias target not found", to.c_str());
+ return;
+ }
+
+ if (weight) {
+ FontFamily* family = new FontFamily(targetFamily->fBasePath, self->fIsFallback);
+ family->fNames.push_back().set(aliasName);
+
+ for (int i = 0; i < targetFamily->fFonts.count(); i++) {
+ if (targetFamily->fFonts[i].fWeight == weight) {
+ family->fFonts.push_back(targetFamily->fFonts[i]);
+ }
+ }
+ *self->fFamilies.append() = family;
+ } else {
+ targetFamily->fNames.push_back().set(aliasName);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) { },
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ } else if (MEMEQ("alias", tag, len)) {
+ return &aliasHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // lmpParser
+
+namespace jbParser {
+
+static const TagHandler fileHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'variant' ("elegant", "compact") [default "default"]
+ // 'lang' (string) [default ""]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFamily& currentFamily = *self->fCurrentFamily.get();
+ FontFileInfo& newFileInfo = currentFamily.fFonts.push_back();
+ if (attributes) {
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("variant", name, nameLen)) {
+ const FontVariant prevVariant = currentFamily.fVariant;
+ if (MEMEQ("elegant", value, valueLen)) {
+ currentFamily.fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ currentFamily.fVariant = kCompact_FontVariant;
+ }
+ if (currentFamily.fFonts.count() > 1 && currentFamily.fVariant != prevVariant) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected variant found\n"
+ "Note: Every font file within a family must have identical variants.",
+ value);
+ }
+
+ } else if (MEMEQ("lang", name, nameLen)) {
+ SkLanguage currentLanguage = SkLanguage(value, valueLen);
+ bool showWarning = false;
+ if (currentFamily.fLanguages.empty()) {
+ showWarning = (currentFamily.fFonts.count() > 1);
+ currentFamily.fLanguages.push_back(std::move(currentLanguage));
+ } else if (currentFamily.fLanguages[0] != currentLanguage) {
+ showWarning = true;
+ currentFamily.fLanguages[0] = std::move(currentLanguage);
+ }
+ if (showWarning) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected language found\n"
+ "Note: Every font file within a family must have identical languages.",
+ value);
+ }
+
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &newFileInfo.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ }
+ }
+ }
+ self->fCurrentFontInfo = &newFileInfo;
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler fileSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("file", tag, len)) {
+ return &fileHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler nameHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // The character data should be a name for the font.
+ self->fCurrentFamily->fNames.push_back();
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SkAutoAsciiToLC tolc(s, len);
+ self->fCurrentFamily->fNames.back().append(tolc.lc(), len);
+ }
+};
+
+static const TagHandler nameSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("name", tag, len)) {
+ return &nameHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ self->fCurrentFamily.reset(new FontFamily(self->fBasePath, self->fIsFallback));
+ // 'order' (non-negative integer) [default -1]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* value = attributes[i+1];
+ parse_non_negative_integer(value, &self->fCurrentFamily->fOrder);
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("nameset", tag, len)) {
+ return &nameSetHandler;
+ } else if (MEMEQ("fileset", tag, len)) {
+ return &fileSetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // namespace jbParser
+
+static const TagHandler topLevelHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("familyset", tag, len)) {
+ // 'version' (non-negative integer) [default 0]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("version", name, nameLen)) {
+ const char* value = attributes[i+1];
+ if (parse_non_negative_integer(value, &self->fVersion)) {
+ if (self->fVersion >= 21) {
+ return &lmpParser::familySetHandler;
+ }
+ }
+ }
+ }
+ return &jbParser::familySetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static void XMLCALL start_element_handler(void *data, const char *tag, const char **attributes) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+
+ if (!self->fSkip) {
+ const TagHandler* parent = self->fHandler.top();
+ const TagHandler* child = parent->tag ? parent->tag(self, tag, attributes) : nullptr;
+ if (child) {
+ if (child->start) {
+ child->start(self, tag, attributes);
+ }
+ self->fHandler.push_back(child);
+ XML_SetCharacterDataHandler(self->fParser, child->chars);
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' tag not recognized, skipping", tag);
+ XML_SetCharacterDataHandler(self->fParser, nullptr);
+ self->fSkip = self->fDepth;
+ }
+ }
+
+ ++self->fDepth;
+}
+
+static void XMLCALL end_element_handler(void* data, const char* tag) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ --self->fDepth;
+
+ if (!self->fSkip) {
+ const TagHandler* child = self->fHandler.top();
+ if (child->end) {
+ child->end(self, tag);
+ }
+ self->fHandler.pop();
+ const TagHandler* parent = self->fHandler.top();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+
+ if (self->fSkip == self->fDepth) {
+ self->fSkip = 0;
+ const TagHandler* parent = self->fHandler.top();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+}
+
+static void XMLCALL xml_entity_decl_handler(void *data,
+ const XML_Char *entityName,
+ int is_parameter_entity,
+ const XML_Char *value,
+ int value_length,
+ const XML_Char *base,
+ const XML_Char *systemId,
+ const XML_Char *publicId,
+ const XML_Char *notationName)
+{
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SK_FONTCONFIGPARSER_WARNING("'%s' entity declaration found, stopping processing", entityName);
+ XML_StopParser(self->fParser, XML_FALSE);
+}
+
+static const XML_Memory_Handling_Suite sk_XML_alloc = {
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free
+};
+
+/**
+ * This function parses the given filename and stores the results in the given
+ * families array. Returns the version of the file, negative if the file does not exist.
+ */
+static int parse_config_file(const char* filename, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback)
+{
+ SkFILEStream file(filename);
+
+ // Some of the files we attempt to parse (in particular, /vendor/etc/fallback_fonts.xml)
+ // are optional - failure here is okay because one of these optional files may not exist.
+ if (!file.isValid()) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "'%s' could not be opened\n", filename);
+ return -1;
+ }
+
+ SkAutoTCallVProc<skstd::remove_pointer_t<XML_Parser>, XML_ParserFree> parser(
+ XML_ParserCreate_MM(nullptr, &sk_XML_alloc, nullptr));
+ if (!parser) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not create XML parser\n");
+ return -1;
+ }
+
+ FamilyData self(parser, families, basePath, isFallback, filename, &topLevelHandler);
+ XML_SetUserData(parser, &self);
+
+ // Disable entity processing, to inhibit internal entity expansion. See expat CVE-2013-0340
+ XML_SetEntityDeclHandler(parser, xml_entity_decl_handler);
+
+ // Start parsing oldschool; switch these in flight if we detect a newer version of the file.
+ XML_SetElementHandler(parser, start_element_handler, end_element_handler);
+
+ // One would assume it would be faster to have a buffer on the stack and call XML_Parse.
+ // But XML_Parse will call XML_GetBuffer anyway and memmove the passed buffer into it.
+ // (Unless XML_CONTEXT_BYTES is undefined, but all users define it.)
+ // In debug, buffer a small odd number of bytes to detect slicing in XML_CharacterDataHandler.
+ static const int bufferSize = 512 SkDEBUGCODE( - 507);
+ bool done = false;
+ while (!done) {
+ void* buffer = XML_GetBuffer(parser, bufferSize);
+ if (!buffer) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not buffer enough to continue\n");
+ return -1;
+ }
+ size_t len = file.read(buffer, bufferSize);
+ done = file.isAtEnd();
+ XML_Status status = XML_ParseBuffer(parser, len, done);
+ if (XML_STATUS_ERROR == status) {
+ XML_Error error = XML_GetErrorCode(parser);
+ int line = XML_GetCurrentLineNumber(parser);
+ int column = XML_GetCurrentColumnNumber(parser);
+ const XML_LChar* errorString = XML_ErrorString(error);
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d error %d: %s.\n",
+ filename, line, column, error, errorString);
+ return -1;
+ }
+ }
+ return self.fVersion;
+}
+
+/** Returns the version of the system font file actually found, negative if none. */
+static int append_system_font_families(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath)
+{
+ int initialCount = fontFamilies.count();
+ int version = parse_config_file(LMP_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ if (version < 0 || fontFamilies.count() == initialCount) {
+ version = parse_config_file(OLD_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ }
+ return version;
+}
+
+/**
+ * In some versions of Android prior to Android 4.2 (JellyBean MR1 at API
+ * Level 17) the fallback fonts for certain locales were encoded in their own
+ * XML files with a suffix that identified the locale. We search the provided
+ * directory for those files,add all of their entries to the fallback chain, and
+ * include the locale as part of each entry.
+ */
+static void append_fallback_font_families_for_locale(SkTDArray<FontFamily*>& fallbackFonts,
+ const char* dir,
+ const SkString& basePath)
+{
+ SkOSFile::Iter iter(dir, nullptr);
+ SkString fileName;
+ while (iter.next(&fileName, false)) {
+ // The size of the prefix and suffix.
+ static const size_t fixedLen = sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1
+ + sizeof(LOCALE_FALLBACK_FONTS_SUFFIX) - 1;
+
+ // The size of the prefix, suffix, and a minimum valid language code
+ static const size_t minSize = fixedLen + 2;
+
+ if (fileName.size() < minSize ||
+ !fileName.startsWith(LOCALE_FALLBACK_FONTS_PREFIX) ||
+ !fileName.endsWith(LOCALE_FALLBACK_FONTS_SUFFIX))
+ {
+ continue;
+ }
+
+ SkString locale(fileName.c_str() + sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1,
+ fileName.size() - fixedLen);
+
+ SkString absoluteFilename;
+ absoluteFilename.printf("%s/%s", dir, fileName.c_str());
+
+ SkTDArray<FontFamily*> langSpecificFonts;
+ parse_config_file(absoluteFilename.c_str(), langSpecificFonts, basePath, true);
+
+ for (int i = 0; i < langSpecificFonts.count(); ++i) {
+ FontFamily* family = langSpecificFonts[i];
+ family->fLanguages.emplace_back(locale);
+ *fallbackFonts.append() = family;
+ }
+ }
+}
+
+static void append_system_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ parse_config_file(FALLBACK_FONTS_FILE, fallbackFonts, basePath, true);
+ append_fallback_font_families_for_locale(fallbackFonts,
+ LOCALE_FALLBACK_FONTS_SYSTEM_DIR,
+ basePath);
+}
+
+static void mixin_vendor_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ SkTDArray<FontFamily*> vendorFonts;
+ parse_config_file(VENDOR_FONTS_FILE, vendorFonts, basePath, true);
+ append_fallback_font_families_for_locale(vendorFonts,
+ LOCALE_FALLBACK_FONTS_VENDOR_DIR,
+ basePath);
+
+ // This loop inserts the vendor fallback fonts in the correct order in the
+ // overall fallbacks list.
+ int currentOrder = -1;
+ for (int i = 0; i < vendorFonts.count(); ++i) {
+ FontFamily* family = vendorFonts[i];
+ int order = family->fOrder;
+ if (order < 0) {
+ if (currentOrder < 0) {
+ // Default case - just add it to the end of the fallback list
+ *fallbackFonts.append() = family;
+ } else {
+ // no order specified on this font, but we're incrementing the order
+ // based on an earlier order insertion request
+ *fallbackFonts.insert(currentOrder++) = family;
+ }
+ } else {
+ // Add the font into the fallback list in the specified order. Set
+ // currentOrder for correct placement of other fonts in the vendor list.
+ *fallbackFonts.insert(order) = family;
+ currentOrder = order + 1;
+ }
+ }
+}
+
+void SkFontMgr_Android_Parser::GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies) {
+ // Version 21 of the system font configuration does not need any fallback configuration files.
+ SkString basePath(getenv("ANDROID_ROOT"));
+ basePath.append(SK_FONT_FILE_PREFIX, sizeof(SK_FONT_FILE_PREFIX) - 1);
+
+ if (append_system_font_families(fontFamilies, basePath) >= 21) {
+ return;
+ }
+
+ // Append all the fallback fonts to system fonts
+ SkTDArray<FontFamily*> fallbackFonts;
+ append_system_fallback_font_families(fallbackFonts, basePath);
+ mixin_vendor_fallback_font_families(fallbackFonts, basePath);
+ fontFamilies.append(fallbackFonts.count(), fallbackFonts.begin());
+}
+
+void SkFontMgr_Android_Parser::GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir)
+{
+ if (fontsXml) {
+ parse_config_file(fontsXml, fontFamilies, basePath, false);
+ }
+ if (fallbackFontsXml) {
+ parse_config_file(fallbackFontsXml, fontFamilies, basePath, true);
+ }
+ if (langFallbackFontsDir) {
+ append_fallback_font_families_for_locale(fontFamilies,
+ langFallbackFontsDir,
+ basePath);
+ }
+}
+
+SkLanguage SkLanguage::getParent() const {
+ SkASSERT(!fTag.isEmpty());
+ const char* tag = fTag.c_str();
+
+ // strip off the rightmost "-.*"
+ const char* parentTagEnd = strrchr(tag, '-');
+ if (parentTagEnd == nullptr) {
+ return SkLanguage();
+ }
+ size_t parentTagLen = parentTagEnd - tag;
+ return SkLanguage(tag, parentTagLen);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
new file mode 100644
index 0000000000..7a33f30b89
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_parser_DEFINED
+#define SkFontMgr_android_parser_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTHash.h"
+
+#include <climits>
+#include <limits>
+
+/** \class SkLanguage
+
+ The SkLanguage class represents a human written language, and is used by
+ text draw operations to determine which glyph to draw when drawing
+ characters with variants (ie Han-derived characters).
+*/
+class SkLanguage {
+public:
+ SkLanguage() { }
+ SkLanguage(const SkString& tag) : fTag(tag) { }
+ SkLanguage(const char* tag) : fTag(tag) { }
+ SkLanguage(const char* tag, size_t len) : fTag(tag, len) { }
+ SkLanguage(const SkLanguage& b) : fTag(b.fTag) { }
+
+ /** Gets a BCP 47 language identifier for this SkLanguage.
+ @return a BCP 47 language identifier representing this language
+ */
+ const SkString& getTag() const { return fTag; }
+
+ /** Performs BCP 47 fallback to return an SkLanguage one step more general.
+ @return an SkLanguage one step more general
+ */
+ SkLanguage getParent() const;
+
+ bool operator==(const SkLanguage& b) const {
+ return fTag == b.fTag;
+ }
+ bool operator!=(const SkLanguage& b) const {
+ return fTag != b.fTag;
+ }
+ SkLanguage& operator=(const SkLanguage& b) {
+ fTag = b.fTag;
+ return *this;
+ }
+
+private:
+ //! BCP 47 language identifier
+ SkString fTag;
+};
+
+enum FontVariants {
+ kDefault_FontVariant = 0x01,
+ kCompact_FontVariant = 0x02,
+ kElegant_FontVariant = 0x04,
+ kLast_FontVariant = kElegant_FontVariant,
+};
+typedef uint32_t FontVariant;
+
+// Must remain trivially movable (can be memmoved).
+struct FontFileInfo {
+ FontFileInfo() : fIndex(0), fWeight(0), fStyle(Style::kAuto) { }
+
+ SkString fFileName;
+ int fIndex;
+ int fWeight;
+ enum class Style { kAuto, kNormal, kItalic } fStyle;
+ SkTArray<SkFontArguments::VariationPosition::Coordinate, true> fVariationDesignPosition;
+};
+
+/**
+ * A font family provides one or more names for a collection of fonts, each of
+ * which has a different style (normal, italic) or weight (thin, light, bold,
+ * etc).
+ * Some fonts may occur in compact variants for use in the user interface.
+ * Android distinguishes "fallback" fonts to support non-ASCII character sets.
+ */
+struct FontFamily {
+ FontFamily(const SkString& basePath, bool isFallbackFont)
+ : fVariant(kDefault_FontVariant)
+ , fOrder(-1)
+ , fIsFallbackFont(isFallbackFont)
+ , fBasePath(basePath)
+ { }
+
+ SkTArray<SkString, true> fNames;
+ SkTArray<FontFileInfo, true> fFonts;
+ SkTArray<SkLanguage, true> fLanguages;
+ SkTHashMap<SkString, std::unique_ptr<FontFamily>> fallbackFamilies;
+ FontVariant fVariant;
+ int fOrder; // internal to the parser, not useful to users.
+ bool fIsFallbackFont;
+ SkString fFallbackFor;
+ const SkString fBasePath;
+};
+
+namespace SkFontMgr_Android_Parser {
+
+/** Parses system font configuration files and appends result to fontFamilies. */
+void GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies);
+
+/** Parses font configuration files and appends result to fontFamilies. */
+void GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir = nullptr);
+
+} // SkFontMgr_Android_Parser namespace
+
+
+/** Parses a null terminated string into an integer type, checking for overflow.
+ * http://www.w3.org/TR/html-markup/datatypes.html#common.data.integer.non-negative-def
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <typename T> static bool parse_non_negative_integer(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = std::numeric_limits<T>::max() / 10;
+ const T dMax = std::numeric_limits<T>::max() - (nMax * 10);
+ T n = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ *value = n;
+ return true;
+}
+
+/** Parses a null terminated string into a signed fixed point value with bias N.
+ *
+ * Like http://www.w3.org/TR/html-markup/datatypes.html#common.data.float-def ,
+ * but may start with '.' and does not support 'e'. '-?((:digit:+(.:digit:+)?)|(.:digit:+))'
+ *
+ * Checks for overflow.
+ * Low bit rounding is not defined (is currently truncate).
+ * Bias (N) required to allow for the sign bit and 4 bits of integer.
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <int N, typename T> static bool parse_fixed(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+ static_assert(std::numeric_limits<T>::is_signed, "T_must_be_signed");
+ static_assert(sizeof(T) * CHAR_BIT - N >= 5, "N_must_leave_four_bits_plus_sign");
+
+ bool negate = false;
+ if (*s == '-') {
+ ++s;
+ negate = true;
+ }
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = (std::numeric_limits<T>::max() >> N) / 10;
+ const T dMax = (std::numeric_limits<T>::max() >> N) - (nMax * 10);
+ T n = 0;
+ T frac = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ // If it wasn't a digit, check if it is a '.' followed by something.
+ if (*s != '.' || s[1] == '\0') {
+ return false;
+ }
+ // Find the end, verify digits.
+ for (++s; *s; ++s) {
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ }
+ // Read back toward the '.'.
+ for (--s; *s != '.'; --s) {
+ T d = *s - '0';
+ frac = (frac + (d << N)) / 10; // This requires four bits overhead.
+ }
+ break;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ if (negate) {
+ n = -n;
+ frac = -frac;
+ }
+ *value = SkLeftShift(n, N) + frac;
+ return true;
+}
+
+#endif /* SkFontMgr_android_parser_DEFINED */
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
new file mode 100644
index 0000000000..6ee4e5454f
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+#include <limits>
+#include <memory>
+
+class SkData;
+
+SkTypeface_Custom::SkTypeface_Custom(const SkFontStyle& style, bool isFixedPitch,
+ bool sysFont, const SkString familyName, int index)
+ : INHERITED(style, isFixedPitch)
+ , fIsSysFont(sysFont), fFamilyName(familyName), fIndex(index)
+{ }
+
+bool SkTypeface_Custom::isSysFont() const { return fIsSysFont; }
+
+void SkTypeface_Custom::onGetFamilyName(SkString* familyName) const {
+ *familyName = fFamilyName;
+}
+
+void SkTypeface_Custom::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocal = !this->isSysFont();
+}
+
+int SkTypeface_Custom::getIndex() const { return fIndex; }
+
+
+SkTypeface_Empty::SkTypeface_Empty() : INHERITED(SkFontStyle(), false, true, SkString(), 0) {}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Empty::onOpenStream(int*) const { return nullptr; }
+
+sk_sp<SkTypeface> SkTypeface_Empty::onMakeClone(const SkFontArguments& args) const {
+ return sk_ref_sp(this);
+}
+
+SkTypeface_Stream::SkTypeface_Stream(std::unique_ptr<SkFontData> fontData,
+ const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName)
+ : INHERITED(style, isFixedPitch, sysFont, familyName, fontData->getIndex())
+ , fData(std::move(fontData))
+{ }
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Stream::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+}
+
+std::unique_ptr<SkFontData> SkTypeface_Stream::onMakeFontData() const {
+ return skstd::make_unique<SkFontData>(*fData);
+}
+
+sk_sp<SkTypeface> SkTypeface_Stream::onMakeClone(const SkFontArguments& args) const {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ SkString familyName;
+ this->getFamilyName(&familyName);
+
+ return sk_make_sp<SkTypeface_Stream>(std::move(data),
+ this->fontStyle(),
+ this->isFixedPitch(),
+ this->isSysFont(),
+ familyName);
+}
+
+SkTypeface_File::SkTypeface_File(const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName, const char path[], int index)
+ : INHERITED(style, isFixedPitch, sysFont, familyName, index)
+ , fPath(path)
+{ }
+
+std::unique_ptr<SkStreamAsset> SkTypeface_File::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = this->getIndex();
+ return SkStream::MakeFromFile(fPath.c_str());
+}
+
+sk_sp<SkTypeface> SkTypeface_File::onMakeClone(const SkFontArguments& args) const {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ SkString familyName;
+ this->getFamilyName(&familyName);
+
+ return sk_make_sp<SkTypeface_Stream>(std::move(data),
+ this->fontStyle(),
+ this->isFixedPitch(),
+ this->isSysFont(),
+ familyName);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontStyleSet_Custom::SkFontStyleSet_Custom(const SkString familyName) : fFamilyName(familyName) {}
+
+void SkFontStyleSet_Custom::appendTypeface(sk_sp<SkTypeface_Custom> typeface) {
+ fStyles.emplace_back(std::move(typeface));
+}
+
+int SkFontStyleSet_Custom::count() {
+ return fStyles.count();
+}
+
+void SkFontStyleSet_Custom::getStyle(int index, SkFontStyle* style, SkString* name) {
+ SkASSERT(index < fStyles.count());
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+}
+
+SkTypeface* SkFontStyleSet_Custom::createTypeface(int index) {
+ SkASSERT(index < fStyles.count());
+ return SkRef(fStyles[index].get());
+}
+
+SkTypeface* SkFontStyleSet_Custom::matchStyle(const SkFontStyle& pattern) {
+ return this->matchStyleCSS3(pattern);
+}
+
+SkString SkFontStyleSet_Custom::getFamilyName() { return fFamilyName; }
+
+
+SkFontMgr_Custom::SkFontMgr_Custom(const SystemFontLoader& loader) : fDefaultFamily(nullptr) {
+ loader.loadSystemFonts(fScanner, &fFamilies);
+
+ // Try to pick a default font.
+ static const char* defaultNames[] = {
+ "Arial", "Verdana", "Times New Roman", "Droid Sans", nullptr
+ };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(defaultNames); ++i) {
+ sk_sp<SkFontStyleSet_Custom> set(this->onMatchFamily(defaultNames[i]));
+ if (nullptr == set) {
+ continue;
+ }
+
+ sk_sp<SkTypeface> tf(set->matchStyle(SkFontStyle(SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ SkFontStyle::kUpright_Slant)));
+ if (nullptr == tf) {
+ continue;
+ }
+
+ fDefaultFamily = set.get();
+ break;
+ }
+ if (nullptr == fDefaultFamily) {
+ fDefaultFamily = fFamilies[0].get();
+ }
+}
+
+int SkFontMgr_Custom::onCountFamilies() const {
+ return fFamilies.count();
+}
+
+void SkFontMgr_Custom::onGetFamilyName(int index, SkString* familyName) const {
+ SkASSERT(index < fFamilies.count());
+ familyName->set(fFamilies[index]->getFamilyName());
+}
+
+SkFontStyleSet_Custom* SkFontMgr_Custom::onCreateStyleSet(int index) const {
+ SkASSERT(index < fFamilies.count());
+ return SkRef(fFamilies[index].get());
+}
+
+SkFontStyleSet_Custom* SkFontMgr_Custom::onMatchFamily(const char familyName[]) const {
+ for (int i = 0; i < fFamilies.count(); ++i) {
+ if (fFamilies[i]->getFamilyName().equals(familyName)) {
+ return SkRef(fFamilies[i].get());
+ }
+ }
+ return nullptr;
+}
+
+SkTypeface* SkFontMgr_Custom::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const
+{
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontStyle);
+}
+
+SkTypeface* SkFontMgr_Custom::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const
+{
+ return nullptr;
+}
+
+SkTypeface* SkFontMgr_Custom::onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const
+{
+ for (int i = 0; i < fFamilies.count(); ++i) {
+ for (int j = 0; j < fFamilies[i]->fStyles.count(); ++j) {
+ if (fFamilies[i]->fStyles[j].get() == familyMember) {
+ return fFamilies[i]->matchStyle(fontStyle);
+ }
+ }
+ }
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return this->makeFromStream(skstd::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ return this->makeFromStream(std::move(stream), SkFontArguments().setCollectionIndex(ttcIndex));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), args.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, position, axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), args.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return sk_sp<SkTypeface>(new SkTypeface_Stream(std::move(data), style, isFixedPitch, false, name));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromFontData(std::unique_ptr<SkFontData> data) const {
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(data->getStream(), data->getIndex(),
+ &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+ return sk_sp<SkTypeface>(new SkTypeface_Stream(std::move(data), style, isFixedPitch, false, name));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromFile(const char path[], int ttcIndex) const {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ sk_sp<SkTypeface> tf;
+
+ if (familyName) {
+ tf.reset(this->onMatchFamilyStyle(familyName, style));
+ }
+
+ if (nullptr == tf) {
+ tf.reset(fDefaultFamily->matchStyle(style));
+ }
+
+ return tf;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom.h b/gfx/skia/skia/src/ports/SkFontMgr_custom.h
new file mode 100644
index 0000000000..4d76c2986b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_custom_DEFINED
+#define SkFontMgr_custom_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTArray.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+class SkData;
+class SkFontDescriptor;
+class SkStreamAsset;
+class SkTypeface;
+
+/** The base SkTypeface implementation for the custom font manager. */
+class SkTypeface_Custom : public SkTypeface_FreeType {
+public:
+ SkTypeface_Custom(const SkFontStyle& style, bool isFixedPitch,
+ bool sysFont, const SkString familyName, int index);
+ bool isSysFont() const;
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override;
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const override;
+ int getIndex() const;
+
+private:
+ const bool fIsSysFont;
+ const SkString fFamilyName;
+ const int fIndex;
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+/** The empty SkTypeface implementation for the custom font manager.
+ * Used as the last resort fallback typeface.
+ */
+class SkTypeface_Empty : public SkTypeface_Custom {
+public:
+ SkTypeface_Empty() ;
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int*) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+
+private:
+ typedef SkTypeface_Custom INHERITED;
+};
+
+/** The stream SkTypeface implementation for the custom font manager. */
+class SkTypeface_Stream : public SkTypeface_Custom {
+public:
+ SkTypeface_Stream(std::unique_ptr<SkFontData> fontData,
+ const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName);
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+
+private:
+ const std::unique_ptr<const SkFontData> fData;
+
+ typedef SkTypeface_Custom INHERITED;
+};
+
+/** The file SkTypeface implementation for the custom font manager. */
+class SkTypeface_File : public SkTypeface_Custom {
+public:
+ SkTypeface_File(const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName, const char path[], int index);
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+
+private:
+ SkString fPath;
+
+ typedef SkTypeface_Custom INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * SkFontStyleSet_Custom
+ *
+ * This class is used by SkFontMgr_Custom to hold SkTypeface_Custom families.
+ */
+class SkFontStyleSet_Custom : public SkFontStyleSet {
+public:
+ explicit SkFontStyleSet_Custom(const SkString familyName);
+
+ /** Should only be called during the initial build phase. */
+ void appendTypeface(sk_sp<SkTypeface_Custom> typeface);
+ int count() override;
+ void getStyle(int index, SkFontStyle* style, SkString* name) override;
+ SkTypeface* createTypeface(int index) override;
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override;
+ SkString getFamilyName();
+
+private:
+ SkTArray<sk_sp<SkTypeface_Custom>> fStyles;
+ SkString fFamilyName;
+
+ friend class SkFontMgr_Custom;
+};
+
+/**
+ * SkFontMgr_Custom
+ *
+ * This class is essentially a collection of SkFontStyleSet_Custom,
+ * one SkFontStyleSet_Custom for each family. This class may be modified
+ * to load fonts from any source by changing the initialization.
+ */
+class SkFontMgr_Custom : public SkFontMgr {
+public:
+ typedef SkTArray<sk_sp<SkFontStyleSet_Custom>> Families;
+ class SystemFontLoader {
+ public:
+ virtual ~SystemFontLoader() { }
+ virtual void loadSystemFonts(const SkTypeface_FreeType::Scanner&, Families*) const = 0;
+ };
+ explicit SkFontMgr_Custom(const SystemFontLoader& loader);
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet_Custom* onCreateStyleSet(int index) const override;
+ SkFontStyleSet_Custom* onMatchFamily(const char familyName[]) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData> data) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override;
+
+private:
+ Families fFamilies;
+ SkFontStyleSet_Custom* fDefaultFamily;
+ SkTypeface_FreeType::Scanner fScanner;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp
new file mode 100644
index 0000000000..c4c1c3599b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/ports/SkFontMgr_directory.h"
+#include "src/core/SkOSFile.h"
+#include "src/ports/SkFontMgr_custom.h"
+#include "src/utils/SkOSPath.h"
+
+class DirectorySystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ DirectorySystemFontLoader(const char* dir) : fBaseDirectory(dir) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ load_directory_fonts(scanner, fBaseDirectory, ".ttf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".ttc", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".otf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".pfb", families);
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+private:
+ static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+ {
+ for (int i = 0; i < families.count(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+ }
+
+ static void load_directory_fonts(const SkTypeface_FreeType::Scanner& scanner,
+ const SkString& directory, const char* suffix,
+ SkFontMgr_Custom::Families* families)
+ {
+ SkOSFile::Iter iter(directory.c_str(), suffix);
+ SkString name;
+
+ while (iter.next(&name, false)) {
+ SkString filename(SkOSPath::Join(directory.c_str(), name.c_str()));
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(filename.c_str());
+ if (!stream) {
+ // SkDebugf("---- failed to open <%s>\n", filename.c_str());
+ continue;
+ }
+
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ // SkDebugf("---- failed to open <%s> as a font\n", filename.c_str());
+ continue;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ // SkDebugf("---- failed to open <%s> <%d> as a font\n",
+ // filename.c_str(), faceIndex);
+ continue;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ addTo->appendTypeface(sk_make_sp<SkTypeface_File>(style, isFixedPitch, true,
+ realname, filename.c_str(),
+ faceIndex));
+ }
+ }
+
+ SkOSFile::Iter dirIter(directory.c_str());
+ while (dirIter.next(&name, true)) {
+ if (name.startsWith(".")) {
+ continue;
+ }
+ SkString dirname(SkOSPath::Join(directory.c_str(), name.c_str()));
+ load_directory_fonts(scanner, dirname, suffix, families);
+ }
+ }
+
+ SkString fBaseDirectory;
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Directory(const char* dir) {
+ return sk_make_sp<SkFontMgr_Custom>(DirectorySystemFontLoader(dir));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
new file mode 100644
index 0000000000..3bb52eb339
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_directory.h"
+
+#ifndef SK_FONT_FILE_PREFIX
+# define SK_FONT_FILE_PREFIX "/usr/share/fonts/"
+#endif
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Directory(SK_FONT_FILE_PREFIX);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp
new file mode 100644
index 0000000000..45a3151134
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+
+static void load_font_from_data(const SkTypeface_FreeType::Scanner& scanner,
+ const uint8_t* data, size_t size, int index,
+ SkFontMgr_Custom::Families* families);
+
+class EmbeddedSystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmbeddedSystemFontLoader(const SkEmbeddedResourceHeader* header) : fHeader(header) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ for (int i = 0; i < fHeader->count; ++i) {
+ const SkEmbeddedResource& fontEntry = fHeader->entries[i];
+ load_font_from_data(scanner, fontEntry.data, fontEntry.size, i, families);
+ }
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+ const SkEmbeddedResourceHeader* fHeader;
+};
+
+class DataFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ DataFontLoader(const uint8_t** datas, const size_t* sizes, int n) : fDatas(datas), fSizes(sizes), fNum(n) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ for (int i = 0; i < fNum; ++i) {
+ load_font_from_data(scanner, fDatas[i], fSizes[i], i, families);
+ }
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+ const uint8_t** fDatas;
+ const size_t* fSizes;
+ const int fNum;
+};
+
+static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+{
+ for (int i = 0; i < families.count(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+}
+
+static void load_font_from_data(const SkTypeface_FreeType::Scanner& scanner,
+ const uint8_t* data, size_t size, int index,
+ SkFontMgr_Custom::Families* families)
+{
+ auto stream = skstd::make_unique<SkMemoryStream>(data, size, false);
+
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ SkDebugf("---- failed to open <%d> as a font\n", index);
+ return;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ SkDebugf("---- failed to open <%d> <%d> as a font\n", index, faceIndex);
+ return;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), faceIndex, nullptr, 0);
+ addTo->appendTypeface(sk_make_sp<SkTypeface_Stream>(std::move(data),
+ style, isFixedPitch,
+ true, realname));
+ }
+}
+
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header) {
+ return sk_make_sp<SkFontMgr_Custom>(EmbeddedSystemFontLoader(header));
+}
+
+// SkFontMgr_New_Custom_Data expects to be called with the data for n font files. datas and sizes
+// are parallel arrays of bytes and byte lengths.
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Data(const uint8_t** datas, const size_t* sizes, int n) {
+ SkASSERT(datas != nullptr);
+ SkASSERT(sizes != nullptr);
+ SkASSERT(n > 0);
+ return sk_make_sp<SkFontMgr_Custom>(DataFontLoader(datas, sizes, n));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
new file mode 100644
index 0000000000..82e1b842ad
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header);
+
+extern "C" const SkEmbeddedResourceHeader SK_EMBEDDED_FONTS;
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Embedded(&SK_EMBEDDED_FONTS);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp
new file mode 100644
index 0000000000..0e3e18aefd
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkFontMgr_empty.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+class EmptyFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmptyFontLoader() { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Empty() {
+ return sk_make_sp<SkFontMgr_Custom>(EmptyFontLoader());
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
new file mode 100644
index 0000000000..b97c199490
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_empty.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Empty();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
new file mode 100644
index 0000000000..69410c5ef9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ // Always return nullptr, an empty SkFontMgr will be used.
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
new file mode 100644
index 0000000000..5131562faf
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDataTable.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkMath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include <fontconfig/fontconfig.h>
+#include <string.h>
+
+class SkData;
+
+// FC_POSTSCRIPT_NAME was added with b561ff20 which ended up in 2.10.92
+// Ubuntu 14.04 is on 2.11.0
+// Debian 8 and 9 are on 2.11
+// OpenSUSE Leap 42.1 is on 2.11.0 (42.3 is on 2.11.1)
+// Fedora 24 is on 2.11.94
+#ifndef FC_POSTSCRIPT_NAME
+# define FC_POSTSCRIPT_NAME "postscriptname"
+#endif
+
+#ifdef SK_DEBUG
+# include "src/core/SkTLS.h"
+#endif
+
+/** Since FontConfig is poorly documented, this gives a high level overview:
+ *
+ * FcConfig is a handle to a FontConfig configuration instance. Each 'configuration' is independent
+ * from any others which may exist. There exists a default global configuration which is created
+ * and destroyed by FcInit and FcFini, but this default should not normally be used.
+ * Instead, one should use FcConfigCreate and FcInit* to have a named local state.
+ *
+ * FcPatterns are {objectName -> [element]} (maps from object names to a list of elements).
+ * Each element is some internal data plus an FcValue which is a variant (a union with a type tag).
+ * Lists of elements are not typed, except by convention. Any collection of FcValues must be
+ * assumed to be heterogeneous by the code, but the code need not do anything particularly
+ * interesting if the values go against convention.
+ *
+ * Somewhat like DirectWrite, FontConfig supports synthetics through FC_EMBOLDEN and FC_MATRIX.
+ * Like all synthetic information, such information must be passed with the font data.
+ */
+
+namespace {
+
+// Fontconfig is not threadsafe before 2.10.91. Before that, we lock with a global mutex.
+// See https://bug.skia.org/1497 for background.
+static SkMutex& f_c_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+#ifdef SK_DEBUG
+void* CreateThreadFcLocked() { return new bool(false); }
+void DeleteThreadFcLocked(void* v) { delete static_cast<bool*>(v); }
+# define THREAD_FC_LOCKED \
+ static_cast<bool*>(SkTLS::Get(CreateThreadFcLocked, DeleteThreadFcLocked))
+#endif
+
+class FCLocker {
+ // Assume FcGetVersion() has always been thread safe.
+ static void lock() SK_NO_THREAD_SAFETY_ANALYSIS {
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().acquire();
+ } else {
+ SkDEBUGCODE(bool* threadLocked = THREAD_FC_LOCKED);
+ SkASSERT(false == *threadLocked);
+ SkDEBUGCODE(*threadLocked = true);
+ }
+ }
+ static void unlock() SK_NO_THREAD_SAFETY_ANALYSIS {
+ AssertHeld();
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().release();
+ } else {
+ SkDEBUGCODE(*THREAD_FC_LOCKED = false);
+ }
+ }
+
+public:
+ FCLocker() { lock(); }
+ ~FCLocker() { unlock(); }
+
+ /** If acquire and release were free, FCLocker would be used around each call into FontConfig.
+ * Instead a much more granular approach is taken, but this means there are times when the
+ * mutex is held when it should not be. A Suspend will drop the lock until it is destroyed.
+ * While a Suspend exists, FontConfig should not be used without re-taking the lock.
+ */
+ struct Suspend {
+ Suspend() { unlock(); }
+ ~Suspend() { lock(); }
+ };
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < 21091) {
+ f_c_mutex().assertHeld();
+ } else {
+ SkASSERT(true == *THREAD_FC_LOCKED);
+ }
+ ) }
+};
+
+} // namespace
+
+template<typename T, void (*D)(T*)> void FcTDestroy(T* t) {
+ FCLocker::AssertHeld();
+ D(t);
+}
+template <typename T, T* (*C)(), void (*D)(T*)> class SkAutoFc
+ : public SkAutoTCallVProc<T, FcTDestroy<T, D> > {
+public:
+ SkAutoFc() : SkAutoTCallVProc<T, FcTDestroy<T, D> >(C()) {
+ T* obj = this->operator T*();
+ SkASSERT_RELEASE(nullptr != obj);
+ }
+ explicit SkAutoFc(T* obj) : SkAutoTCallVProc<T, FcTDestroy<T, D> >(obj) {}
+};
+
+typedef SkAutoFc<FcCharSet, FcCharSetCreate, FcCharSetDestroy> SkAutoFcCharSet;
+typedef SkAutoFc<FcConfig, FcConfigCreate, FcConfigDestroy> SkAutoFcConfig;
+typedef SkAutoFc<FcFontSet, FcFontSetCreate, FcFontSetDestroy> SkAutoFcFontSet;
+typedef SkAutoFc<FcLangSet, FcLangSetCreate, FcLangSetDestroy> SkAutoFcLangSet;
+typedef SkAutoFc<FcObjectSet, FcObjectSetCreate, FcObjectSetDestroy> SkAutoFcObjectSet;
+typedef SkAutoFc<FcPattern, FcPatternCreate, FcPatternDestroy> SkAutoFcPattern;
+
+static bool get_bool(FcPattern* pattern, const char object[], bool missing = false) {
+ FcBool value;
+ if (FcPatternGetBool(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static const char* get_string(FcPattern* pattern, const char object[], const char* missing = "") {
+ FcChar8* value;
+ if (FcPatternGetString(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return (const char*)value;
+}
+
+static const FcMatrix* get_matrix(FcPattern* pattern, const char object[]) {
+ FcMatrix* matrix;
+ if (FcPatternGetMatrix(pattern, object, 0, &matrix) != FcResultMatch) {
+ return nullptr;
+ }
+ return matrix;
+}
+
+enum SkWeakReturn {
+ kIsWeak_WeakReturn,
+ kIsStrong_WeakReturn,
+ kNoId_WeakReturn
+};
+/** Ideally there would exist a call like
+ * FcResult FcPatternIsWeak(pattern, object, id, FcBool* isWeak);
+ * Sometime after 2.12.4 FcPatternGetWithBinding was added which can retrieve the binding.
+ *
+ * However, there is no such call and as of Fc 2.11.0 even FcPatternEquals ignores the weak bit.
+ * Currently, the only reliable way of finding the weak bit is by its effect on matching.
+ * The weak bit only affects the matching of FC_FAMILY and FC_POSTSCRIPT_NAME object values.
+ * A element with the weak bit is scored after FC_LANG, without the weak bit is scored before.
+ * Note that the weak bit is stored on the element, not on the value it holds.
+ */
+static SkWeakReturn is_weak(FcPattern* pattern, const char object[], int id) {
+ FCLocker::AssertHeld();
+
+ FcResult result;
+
+ // Create a copy of the pattern with only the value 'pattern'['object'['id']] in it.
+ // Internally, FontConfig pattern objects are linked lists, so faster to remove from head.
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+ FcBool hasId = true;
+ for (int i = 0; hasId && i < id; ++i) {
+ hasId = FcPatternRemove(minimal, object, 0);
+ }
+ if (!hasId) {
+ return kNoId_WeakReturn;
+ }
+ FcValue value;
+ result = FcPatternGet(minimal, object, 0, &value);
+ if (result != FcResultMatch) {
+ return kNoId_WeakReturn;
+ }
+ while (hasId) {
+ hasId = FcPatternRemove(minimal, object, 1);
+ }
+
+ // Create a font set with two patterns.
+ // 1. the same 'object' as minimal and a lang object with only 'nomatchlang'.
+ // 2. a different 'object' from minimal and a lang object with only 'matchlang'.
+ SkAutoFcFontSet fontSet;
+
+ SkAutoFcLangSet strongLangSet;
+ FcLangSetAdd(strongLangSet, (const FcChar8*)"nomatchlang");
+ SkAutoFcPattern strong(FcPatternDuplicate(minimal));
+ FcPatternAddLangSet(strong, FC_LANG, strongLangSet);
+
+ SkAutoFcLangSet weakLangSet;
+ FcLangSetAdd(weakLangSet, (const FcChar8*)"matchlang");
+ SkAutoFcPattern weak;
+ FcPatternAddString(weak, object, (const FcChar8*)"nomatchstring");
+ FcPatternAddLangSet(weak, FC_LANG, weakLangSet);
+
+ FcFontSetAdd(fontSet, strong.release());
+ FcFontSetAdd(fontSet, weak.release());
+
+ // Add 'matchlang' to the copy of the pattern.
+ FcPatternAddLangSet(minimal, FC_LANG, weakLangSet);
+
+ // Run a match against the copy of the pattern.
+ // If the 'id' was weak, then we should match the pattern with 'matchlang'.
+ // If the 'id' was strong, then we should match the pattern with 'nomatchlang'.
+
+ // Note that this config is only used for FcFontRenderPrepare, which we don't even want.
+ // However, there appears to be no way to match/sort without it.
+ SkAutoFcConfig config;
+ FcFontSet* fontSets[1] = { fontSet };
+ SkAutoFcPattern match(FcFontSetMatch(config, fontSets, SK_ARRAY_COUNT(fontSets),
+ minimal, &result));
+
+ FcLangSet* matchLangSet;
+ FcPatternGetLangSet(match, FC_LANG, 0, &matchLangSet);
+ return FcLangEqual == FcLangSetHasLang(matchLangSet, (const FcChar8*)"matchlang")
+ ? kIsWeak_WeakReturn : kIsStrong_WeakReturn;
+}
+
+/** Removes weak elements from either FC_FAMILY or FC_POSTSCRIPT_NAME objects in the property.
+ * This can be quite expensive, and should not be used more than once per font lookup.
+ * This removes all of the weak elements after the last strong element.
+ */
+static void remove_weak(FcPattern* pattern, const char object[]) {
+ FCLocker::AssertHeld();
+
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+
+ int lastStrongId = -1;
+ int numIds;
+ SkWeakReturn result;
+ for (int id = 0; ; ++id) {
+ result = is_weak(minimal, object, 0);
+ if (kNoId_WeakReturn == result) {
+ numIds = id;
+ break;
+ }
+ if (kIsStrong_WeakReturn == result) {
+ lastStrongId = id;
+ }
+ SkAssertResult(FcPatternRemove(minimal, object, 0));
+ }
+
+ // If they were all weak, then leave the pattern alone.
+ if (lastStrongId < 0) {
+ return;
+ }
+
+ // Remove everything after the last strong.
+ for (int id = lastStrongId + 1; id < numIds; ++id) {
+ SkAssertResult(FcPatternRemove(pattern, object, lastStrongId + 1));
+ }
+}
+
+static int map_range(SkScalar value,
+ SkScalar old_min, SkScalar old_max,
+ SkScalar new_min, SkScalar new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + ((value - old_min) * (new_max - new_min) / (old_max - old_min));
+}
+
+struct MapRanges {
+ SkScalar old_val;
+ SkScalar new_val;
+};
+
+static SkScalar map_ranges(SkScalar val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+#ifndef FC_WEIGHT_DEMILIGHT
+#define FC_WEIGHT_DEMILIGHT 65
+#endif
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ // FcWeightToOpenType was buggy until 2.12.4
+ static constexpr MapRanges weightRanges[] = {
+ { FC_WEIGHT_THIN, SkFS::kThin_Weight },
+ { FC_WEIGHT_EXTRALIGHT, SkFS::kExtraLight_Weight },
+ { FC_WEIGHT_LIGHT, SkFS::kLight_Weight },
+ { FC_WEIGHT_DEMILIGHT, 350 },
+ { FC_WEIGHT_BOOK, 380 },
+ { FC_WEIGHT_REGULAR, SkFS::kNormal_Weight },
+ { FC_WEIGHT_MEDIUM, SkFS::kMedium_Weight },
+ { FC_WEIGHT_DEMIBOLD, SkFS::kSemiBold_Weight },
+ { FC_WEIGHT_BOLD, SkFS::kBold_Weight },
+ { FC_WEIGHT_EXTRABOLD, SkFS::kExtraBold_Weight },
+ { FC_WEIGHT_BLACK, SkFS::kBlack_Weight },
+ { FC_WEIGHT_EXTRABLACK, SkFS::kExtraBlack_Weight },
+ };
+ SkScalar weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { FC_WIDTH_ULTRACONDENSED, SkFS::kUltraCondensed_Width },
+ { FC_WIDTH_EXTRACONDENSED, SkFS::kExtraCondensed_Width },
+ { FC_WIDTH_CONDENSED, SkFS::kCondensed_Width },
+ { FC_WIDTH_SEMICONDENSED, SkFS::kSemiCondensed_Width },
+ { FC_WIDTH_NORMAL, SkFS::kNormal_Width },
+ { FC_WIDTH_SEMIEXPANDED, SkFS::kSemiExpanded_Width },
+ { FC_WIDTH_EXPANDED, SkFS::kExpanded_Width },
+ { FC_WIDTH_EXTRAEXPANDED, SkFS::kExtraExpanded_Width },
+ { FC_WIDTH_ULTRAEXPANDED, SkFS::kUltraExpanded_Width },
+ };
+ SkScalar width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(SkScalarRoundToInt(weight), SkScalarRoundToInt(width), slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ FCLocker::AssertHeld();
+
+ typedef SkFontStyle SkFS;
+
+ // FcWeightFromOpenType was buggy until 2.12.4
+ static constexpr MapRanges weightRanges[] = {
+ { SkFS::kThin_Weight, FC_WEIGHT_THIN },
+ { SkFS::kExtraLight_Weight, FC_WEIGHT_EXTRALIGHT },
+ { SkFS::kLight_Weight, FC_WEIGHT_LIGHT },
+ { 350, FC_WEIGHT_DEMILIGHT },
+ { 380, FC_WEIGHT_BOOK },
+ { SkFS::kNormal_Weight, FC_WEIGHT_REGULAR },
+ { SkFS::kMedium_Weight, FC_WEIGHT_MEDIUM },
+ { SkFS::kSemiBold_Weight, FC_WEIGHT_DEMIBOLD },
+ { SkFS::kBold_Weight, FC_WEIGHT_BOLD },
+ { SkFS::kExtraBold_Weight, FC_WEIGHT_EXTRABOLD },
+ { SkFS::kBlack_Weight, FC_WEIGHT_BLACK },
+ { SkFS::kExtraBlack_Weight, FC_WEIGHT_EXTRABLACK },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { SkFS::kUltraCondensed_Width, FC_WIDTH_ULTRACONDENSED },
+ { SkFS::kExtraCondensed_Width, FC_WIDTH_EXTRACONDENSED },
+ { SkFS::kCondensed_Width, FC_WIDTH_CONDENSED },
+ { SkFS::kSemiCondensed_Width, FC_WIDTH_SEMICONDENSED },
+ { SkFS::kNormal_Width, FC_WIDTH_NORMAL },
+ { SkFS::kSemiExpanded_Width, FC_WIDTH_SEMIEXPANDED },
+ { SkFS::kExpanded_Width, FC_WIDTH_EXPANDED },
+ { SkFS::kExtraExpanded_Width, FC_WIDTH_EXTRAEXPANDED },
+ { SkFS::kUltraExpanded_Width, FC_WIDTH_ULTRAEXPANDED },
+ };
+ int width = map_ranges(style.width(), widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+class SkTypeface_stream : public SkTypeface_FreeType {
+public:
+ SkTypeface_stream(std::unique_ptr<SkFontData> data,
+ SkString familyName, const SkFontStyle& style, bool fixedWidth)
+ : INHERITED(style, fixedWidth)
+ , fFamilyName(std::move(familyName))
+ , fData(std::move(data))
+ { }
+
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = fFamilyName;
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ *serialize = true;
+ }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(*fData);
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_make_sp<SkTypeface_stream>(std::move(data),
+ fFamilyName,
+ this->fontStyle(),
+ this->isFixedPitch());
+ }
+
+private:
+ SkString fFamilyName;
+ const std::unique_ptr<const SkFontData> fData;
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkTypeface_fontconfig : public SkTypeface_FreeType {
+public:
+ static sk_sp<SkTypeface_fontconfig> Make(SkAutoFcPattern pattern, SkString sysroot) {
+ return sk_sp<SkTypeface_fontconfig>(new SkTypeface_fontconfig(std::move(pattern),
+ std::move(sysroot)));
+ }
+ mutable SkAutoFcPattern fPattern; // Mutable for passing to FontConfig API.
+ const SkString fSysroot;
+
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = get_string(fPattern, FC_FAMILY);
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ FCLocker lock;
+ desc->setFamilyName(get_string(fPattern, FC_FAMILY));
+ desc->setFullName(get_string(fPattern, FC_FULLNAME));
+ desc->setPostscriptName(get_string(fPattern, FC_POSTSCRIPT_NAME));
+ desc->setStyle(this->fontStyle());
+ *serialize = false;
+ }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ FCLocker lock;
+ *ttcIndex = get_int(fPattern, FC_INDEX, 0);
+ const char* filename = get_string(fPattern, FC_FILE);
+ // See FontAccessible for note on searching sysroot then non-sysroot path.
+ SkString resolvedFilename;
+ if (!fSysroot.isEmpty()) {
+ resolvedFilename = fSysroot;
+ resolvedFilename += filename;
+ if (sk_exists(resolvedFilename.c_str(), kRead_SkFILE_Flag)) {
+ filename = resolvedFilename.c_str();
+ }
+ }
+ return SkStream::MakeFromFile(filename);
+ }
+
+ void onFilterRec(SkScalerContextRec* rec) const override {
+ // FontConfig provides 10-scale-bitmap-fonts.conf which applies an inverse "pixelsize"
+ // matrix. It is not known if this .conf is active or not, so it is not clear if
+ // "pixelsize" should be applied before this matrix. Since using a matrix with a bitmap
+ // font isn't a great idea, only apply the matrix to outline fonts.
+ const FcMatrix* fcMatrix = get_matrix(fPattern, FC_MATRIX);
+ bool fcOutline = get_bool(fPattern, FC_OUTLINE, true);
+ if (fcOutline && fcMatrix) {
+ // fPost2x2 is column-major, left handed (y down).
+ // FcMatrix is column-major, right handed (y up).
+ SkMatrix fm;
+ fm.setAll(fcMatrix->xx,-fcMatrix->xy, 0,
+ -fcMatrix->yx, fcMatrix->yy, 0,
+ 0 , 0 , 1);
+
+ SkMatrix sm;
+ rec->getMatrixFrom2x2(&sm);
+
+ sm.preConcat(fm);
+ rec->fPost2x2[0][0] = sm.getScaleX();
+ rec->fPost2x2[0][1] = sm.getSkewX();
+ rec->fPost2x2[1][0] = sm.getSkewY();
+ rec->fPost2x2[1][1] = sm.getScaleY();
+ }
+ if (get_bool(fPattern, FC_EMBOLDEN)) {
+ rec->fFlags |= SkScalerContext::kEmbolden_Flag;
+ }
+ this->INHERITED::onFilterRec(rec);
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info =
+ this->INHERITED::onGetAdvancedMetrics();
+
+ // Simulated fonts shouldn't be considered to be of the type of their data.
+ if (get_matrix(fPattern, FC_MATRIX) || get_bool(fPattern, FC_EMBOLDEN)) {
+ info->fType = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return info;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ SkString familyName;
+ this->getFamilyName(&familyName);
+
+ return sk_make_sp<SkTypeface_stream>(std::move(data),
+ familyName,
+ this->fontStyle(),
+ this->isFixedPitch());
+ }
+
+ ~SkTypeface_fontconfig() override {
+ // Hold the lock while unrefing the pattern.
+ FCLocker lock;
+ fPattern.reset();
+ }
+
+private:
+ SkTypeface_fontconfig(SkAutoFcPattern pattern, SkString sysroot)
+ : INHERITED(skfontstyle_from_fcpattern(pattern),
+ FC_PROPORTIONAL != get_int(pattern, FC_SPACING, FC_PROPORTIONAL))
+ , fPattern(std::move(pattern))
+ , fSysroot(std::move(sysroot))
+ { }
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkFontMgr_fontconfig : public SkFontMgr {
+ mutable SkAutoFcConfig fFC; // Only mutable to avoid const cast when passed to FontConfig API.
+ const SkString fSysroot;
+ const sk_sp<SkDataTable> fFamilyNames;
+ const SkTypeface_FreeType::Scanner fScanner;
+
+ class StyleSet : public SkFontStyleSet {
+ public:
+ StyleSet(sk_sp<SkFontMgr_fontconfig> parent, SkAutoFcFontSet fontSet)
+ : fFontMgr(std::move(parent)), fFontSet(std::move(fontSet))
+ { }
+
+ ~StyleSet() override {
+ // Hold the lock while unrefing the font set.
+ FCLocker lock;
+ fFontSet.reset();
+ }
+
+ int count() override { return fFontSet->nfont; }
+
+ void getStyle(int index, SkFontStyle* style, SkString* styleName) override {
+ if (index < 0 || fFontSet->nfont <= index) {
+ return;
+ }
+
+ FCLocker lock;
+ if (style) {
+ *style = skfontstyle_from_fcpattern(fFontSet->fonts[index]);
+ }
+ if (styleName) {
+ *styleName = get_string(fFontSet->fonts[index], FC_STYLE);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ FCLocker lock;
+
+ FcPattern* match = fFontSet->fonts[index];
+ return fFontMgr->createTypefaceFromFcPattern(match).release();
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& style) override {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFontMgr->fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ FcFontSet* fontSets[1] = { fFontSet };
+ SkAutoFcPattern match(FcFontSetMatch(fFontMgr->fFC,
+ fontSets, SK_ARRAY_COUNT(fontSets),
+ pattern, &result));
+ if (nullptr == match) {
+ return nullptr;
+ }
+
+ return fFontMgr->createTypefaceFromFcPattern(match).release();
+ }
+
+ private:
+ sk_sp<SkFontMgr_fontconfig> fFontMgr;
+ SkAutoFcFontSet fFontSet;
+ };
+
+ static bool FindName(const SkTDArray<const char*>& list, const char* str) {
+ int count = list.count();
+ for (int i = 0; i < count; ++i) {
+ if (!strcmp(list[i], str)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static sk_sp<SkDataTable> GetFamilyNames(FcConfig* fcconfig) {
+ FCLocker lock;
+
+ SkTDArray<const char*> names;
+ SkTDArray<size_t> sizes;
+
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)SK_ARRAY_COUNT(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fcconfig, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* current = allFonts->fonts[fontIndex];
+ for (int id = 0; ; ++id) {
+ FcChar8* fcFamilyName;
+ FcResult result = FcPatternGetString(current, FC_FAMILY, id, &fcFamilyName);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ const char* familyName = reinterpret_cast<const char*>(fcFamilyName);
+ if (familyName && !FindName(names, familyName)) {
+ *names.append() = familyName;
+ *sizes.append() = strlen(familyName) + 1;
+ }
+ }
+ }
+ }
+
+ return SkDataTable::MakeCopyArrays((void const *const *)names.begin(),
+ sizes.begin(), names.count());
+ }
+
+ static bool FindByFcPattern(SkTypeface* cached, void* ctx) {
+ SkTypeface_fontconfig* cshFace = static_cast<SkTypeface_fontconfig*>(cached);
+ FcPattern* ctxPattern = static_cast<FcPattern*>(ctx);
+ return FcTrue == FcPatternEqual(cshFace->fPattern, ctxPattern);
+ }
+
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+ /** Creates a typeface using a typeface cache.
+ * @param pattern a complete pattern from FcFontRenderPrepare.
+ */
+ sk_sp<SkTypeface> createTypefaceFromFcPattern(FcPattern* pattern) const {
+ FCLocker::AssertHeld();
+ SkAutoMutexExclusive ama(fTFCacheMutex);
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(FindByFcPattern, pattern);
+ if (!face) {
+ FcPatternReference(pattern);
+ face = SkTypeface_fontconfig::Make(SkAutoFcPattern(pattern), fSysroot);
+ if (face) {
+ // Cannot hold the lock when calling add; an evicted typeface may need to lock.
+ FCLocker::Suspend suspend;
+ fTFCache.add(face);
+ }
+ }
+ return face;
+ }
+
+public:
+ /** Takes control of the reference to 'config'. */
+ explicit SkFontMgr_fontconfig(FcConfig* config)
+ : fFC(config ? config : FcInitLoadConfigAndFonts())
+ , fSysroot(reinterpret_cast<const char*>(FcConfigGetSysRoot(fFC)))
+ , fFamilyNames(GetFamilyNames(fFC)) { }
+
+ ~SkFontMgr_fontconfig() override {
+ // Hold the lock while unrefing the config.
+ FCLocker lock;
+ fFC.reset();
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fFamilyNames->count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ familyName->set(fFamilyNames->atStr(index));
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ return this->onMatchFamily(fFamilyNames->atStr(index));
+ }
+
+ /** True if any string object value in the font is the same
+ * as a string object value in the pattern.
+ */
+ static bool AnyMatching(FcPattern* font, FcPattern* pattern, const char* object) {
+ FcChar8* fontString;
+ FcChar8* patternString;
+ FcResult result;
+ // Set an arbitrary limit on the number of pattern object values to consider.
+ // TODO: re-write this to avoid N*M
+ static const int maxId = 16;
+ for (int patternId = 0; patternId < maxId; ++patternId) {
+ result = FcPatternGetString(pattern, object, patternId, &patternString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ for (int fontId = 0; fontId < maxId; ++fontId) {
+ result = FcPatternGetString(font, object, fontId, &fontString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (0 == FcStrCmpIgnoreCase(patternString, fontString)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ bool FontAccessible(FcPattern* font) const {
+ // FontConfig can return fonts which are unreadable.
+ const char* filename = get_string(font, FC_FILE, nullptr);
+ if (nullptr == filename) {
+ return false;
+ }
+
+ // When sysroot was implemented in e96d7760886a3781a46b3271c76af99e15cb0146 (before 2.11.0)
+ // it was broken; mostly fixed in d17f556153fbaf8fe57fdb4fc1f0efa4313f0ecf (after 2.11.1).
+ // This leaves Debian 8 and 9 with broken support for this feature.
+ // As a result, this feature should not be used until at least 2.11.91.
+ // The broken support is mostly around not making all paths relative to the sysroot.
+ // However, even at 2.13.1 it is possible to get a mix of sysroot and non-sysroot paths,
+ // as any added file path not lexically starting with the sysroot will be unchanged.
+ // To allow users to add local app files outside the sysroot,
+ // prefer the sysroot but also look without the sysroot.
+ if (!fSysroot.isEmpty()) {
+ SkString resolvedFilename;
+ resolvedFilename = fSysroot;
+ resolvedFilename += filename;
+ if (sk_exists(resolvedFilename.c_str(), kRead_SkFILE_Flag)) {
+ return true;
+ }
+ }
+ return sk_exists(filename, kRead_SkFILE_Flag);
+ }
+
+ static bool FontFamilyNameMatches(FcPattern* font, FcPattern* pattern) {
+ return AnyMatching(font, pattern, FC_FAMILY);
+ }
+
+ static bool FontContainsCharacter(FcPattern* font, uint32_t character) {
+ FcResult result;
+ FcCharSet* matchCharSet;
+ for (int charSetId = 0; ; ++charSetId) {
+ result = FcPatternGetCharSet(font, FC_CHARSET, charSetId, &matchCharSet);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (FcCharSetHasChar(matchCharSet, character)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ SkAutoFcFontSet matches;
+ // TODO: Some families have 'duplicates' due to symbolic links.
+ // The patterns are exactly the same except for the FC_FILE.
+ // It should be possible to collapse these patterns by normalizing.
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)SK_ARRAY_COUNT(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fFC, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* font = allFonts->fonts[fontIndex];
+ if (FontAccessible(font) && FontFamilyNameMatches(font, matchPattern)) {
+ FcFontSetAdd(matches, FcFontRenderPrepare(fFC, pattern, font));
+ }
+ }
+ }
+
+ return new StyleSet(sk_ref_sp(this), std::move(matches));
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // We really want to match strong (prefered) and same (acceptable) only here.
+ // If a family name was specified, assume that any weak matches after the last strong match
+ // are weak (default) and ignore them.
+ // The reason for is that after substitution the pattern for 'sans-serif' looks like
+ // "wwwwwwwwwwwwwwswww" where there are many weak but preferred names, followed by defaults.
+ // So it is possible to have weakly matching but preferred names.
+ // In aliases, bindings are weak by default, so this is easy and common.
+ // If no family name was specified, we'll probably only get weak matches, but that's ok.
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (nullptr == font || !FontAccessible(font) || !FontFamilyNameMatches(font, matchPattern)) {
+ return nullptr;
+ }
+
+ return createTypefaceFromFcPattern(font).release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override
+ {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ if (familyName) {
+ FcValue familyNameValue;
+ familyNameValue.type = FcTypeString;
+ familyNameValue.u.s = reinterpret_cast<const FcChar8*>(familyName);
+ FcPatternAddWeak(pattern, FC_FAMILY, familyNameValue, FcFalse);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ SkAutoFcCharSet charSet;
+ FcCharSetAddChar(charSet, character);
+ FcPatternAddCharSet(pattern, FC_CHARSET, charSet);
+
+ if (bcp47Count > 0) {
+ SkASSERT(bcp47);
+ SkAutoFcLangSet langSet;
+ for (int i = bcp47Count; i --> 0;) {
+ FcLangSetAdd(langSet, (const FcChar8*)bcp47[i]);
+ }
+ FcPatternAddLangSet(pattern, FC_LANG, langSet);
+ }
+
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (nullptr == font || !FontAccessible(font) || !FontContainsCharacter(font, character)) {
+ return nullptr;
+ }
+
+ return createTypefaceFromFcPattern(font).release();
+ }
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface* typeface,
+ const SkFontStyle& style) const override
+ {
+ //TODO: should the SkTypeface_fontconfig know its family?
+ const SkTypeface_fontconfig* fcTypeface =
+ static_cast<const SkTypeface_fontconfig*>(typeface);
+ return this->matchFamilyStyle(get_string(fcTypeface->fPattern, FC_FAMILY), style);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ const size_t length = stream->getLength();
+ if (length <= 0 || (1u << 30) < length) {
+ return nullptr;
+ }
+
+ SkString name;
+ SkFontStyle style;
+ bool isFixedWidth = false;
+ if (!fScanner.scanFont(stream.get(), ttcIndex, &name, &style, &isFixedWidth, nullptr)) {
+ return nullptr;
+ }
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return sk_sp<SkTypeface>(new SkTypeface_stream(std::move(data), std::move(name),
+ style, isFixedWidth));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), args.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, args.getVariationDesignPosition(),
+ axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), args.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return sk_sp<SkTypeface>(new SkTypeface_stream(std::move(data), std::move(name),
+ style, isFixedPitch));
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->makeFromStream(skstd::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ return this->makeFromStream(SkStream::MakeFromFile(path), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFontData(std::unique_ptr<SkFontData> fontData) const override {
+ SkStreamAsset* stream(fontData->getStream());
+ const size_t length = stream->getLength();
+ if (length <= 0 || (1u << 30) < length) {
+ return nullptr;
+ }
+
+ const int ttcIndex = fontData->getIndex();
+ SkString name;
+ SkFontStyle style;
+ bool isFixedWidth = false;
+ if (!fScanner.scanFont(stream, ttcIndex, &name, &style, &isFixedWidth, nullptr)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkTypeface>(new SkTypeface_stream(std::move(fontData), std::move(name),
+ style, isFixedWidth));
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ sk_sp<SkTypeface> typeface(this->matchFamilyStyle(familyName, style));
+ if (typeface) {
+ return typeface;
+ }
+
+ return sk_sp<SkTypeface>(this->matchFamilyStyle(nullptr, style));
+ }
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FontConfig(FcConfig* fc) {
+ return sk_make_sp<SkFontMgr_fontconfig>(fc);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
new file mode 100644
index 0000000000..a011ec5c1e
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkFontMgr_fontconfig.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_FontConfig(nullptr);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp
new file mode 100644
index 0000000000..ceb7c54ac9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp
@@ -0,0 +1,505 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkFontMgr_fuchsia.h"
+
+#include <fuchsia/fonts/cpp/fidl.h>
+#include <lib/zx/vmar.h>
+#include <strings.h>
+#include <memory>
+#include <unordered_map>
+
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkTypefaceCache.h"
+
+void UnmapMemory(const void* buffer, uint64_t size) {
+ static_assert(sizeof(void*) == sizeof(uint64_t), "pointers aren't 64-bit");
+ zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(buffer), size);
+}
+
+struct ReleaseSkDataContext {
+ uint64_t fBufferSize;
+ std::function<void()> releaseProc;
+
+ ReleaseSkDataContext(uint64_t bufferSize, const std::function<void()>& releaseProc)
+ : fBufferSize(bufferSize), releaseProc(releaseProc) {}
+};
+
+void ReleaseSkData(const void* buffer, void* context) {
+ auto releaseSkDataContext = reinterpret_cast<ReleaseSkDataContext*>(context);
+ SkASSERT(releaseSkDataContext);
+ UnmapMemory(buffer, releaseSkDataContext->fBufferSize);
+ releaseSkDataContext->releaseProc();
+ delete releaseSkDataContext;
+}
+
+sk_sp<SkData> MakeSkDataFromBuffer(const fuchsia::mem::Buffer& data,
+ std::function<void()> release_proc) {
+ uint64_t size = data.size;
+ uintptr_t buffer = 0;
+ zx_status_t status = zx::vmar::root_self()->map(0, data.vmo, 0, size, ZX_VM_PERM_READ, &buffer);
+ if (status != ZX_OK) return nullptr;
+ auto context = new ReleaseSkDataContext(size, release_proc);
+ return SkData::MakeWithProc(reinterpret_cast<void*>(buffer), size, ReleaseSkData, context);
+}
+
+fuchsia::fonts::Slant SkToFuchsiaSlant(SkFontStyle::Slant slant) {
+ switch (slant) {
+ case SkFontStyle::kOblique_Slant:
+ return fuchsia::fonts::Slant::OBLIQUE;
+ case SkFontStyle::kItalic_Slant:
+ return fuchsia::fonts::Slant::ITALIC;
+ case SkFontStyle::kUpright_Slant:
+ default:
+ return fuchsia::fonts::Slant::UPRIGHT;
+ }
+}
+
+SkFontStyle::Slant FuchsiaToSkSlant(fuchsia::fonts::Slant slant) {
+ switch (slant) {
+ case fuchsia::fonts::Slant::OBLIQUE:
+ return SkFontStyle::kOblique_Slant;
+ case fuchsia::fonts::Slant::ITALIC:
+ return SkFontStyle::kItalic_Slant;
+ case fuchsia::fonts::Slant::UPRIGHT:
+ default:
+ return SkFontStyle::kUpright_Slant;
+ }
+}
+
+fuchsia::fonts::Width SkToFuchsiaWidth(SkFontStyle::Width width) {
+ switch (width) {
+ case SkFontStyle::Width::kUltraCondensed_Width:
+ return fuchsia::fonts::Width::ULTRA_CONDENSED;
+ case SkFontStyle::Width::kExtraCondensed_Width:
+ return fuchsia::fonts::Width::EXTRA_CONDENSED;
+ case SkFontStyle::Width::kCondensed_Width:
+ return fuchsia::fonts::Width::CONDENSED;
+ case SkFontStyle::Width::kSemiCondensed_Width:
+ return fuchsia::fonts::Width::SEMI_CONDENSED;
+ case SkFontStyle::Width::kNormal_Width:
+ return fuchsia::fonts::Width::NORMAL;
+ case SkFontStyle::Width::kSemiExpanded_Width:
+ return fuchsia::fonts::Width::SEMI_EXPANDED;
+ case SkFontStyle::Width::kExpanded_Width:
+ return fuchsia::fonts::Width::EXPANDED;
+ case SkFontStyle::Width::kExtraExpanded_Width:
+ return fuchsia::fonts::Width::EXTRA_EXPANDED;
+ case SkFontStyle::Width::kUltraExpanded_Width:
+ return fuchsia::fonts::Width::ULTRA_EXPANDED;
+ }
+}
+
+// Tries to convert the given integer Skia style width value to the Fuchsia equivalent.
+//
+// On success, returns true. On failure, returns false, and `outFuchsiaWidth` is left untouched.
+bool SkToFuchsiaWidth(int skWidth, fuchsia::fonts::Width* outFuchsiaWidth) {
+ if (skWidth < SkFontStyle::Width::kUltraCondensed_Width ||
+ skWidth > SkFontStyle::Width::kUltraExpanded_Width) {
+ return false;
+ }
+ auto typedSkWidth = static_cast<SkFontStyle::Width>(skWidth);
+ *outFuchsiaWidth = SkToFuchsiaWidth(typedSkWidth);
+ return true;
+}
+
+SkFontStyle::Width FuchsiaToSkWidth(fuchsia::fonts::Width width) {
+ switch (width) {
+ case fuchsia::fonts::Width::ULTRA_CONDENSED:
+ return SkFontStyle::Width::kUltraCondensed_Width;
+ case fuchsia::fonts::Width::EXTRA_CONDENSED:
+ return SkFontStyle::Width::kExtraCondensed_Width;
+ case fuchsia::fonts::Width::CONDENSED:
+ return SkFontStyle::Width::kCondensed_Width;
+ case fuchsia::fonts::Width::SEMI_CONDENSED:
+ return SkFontStyle::Width::kSemiCondensed_Width;
+ case fuchsia::fonts::Width::NORMAL:
+ return SkFontStyle::Width::kNormal_Width;
+ case fuchsia::fonts::Width::SEMI_EXPANDED:
+ return SkFontStyle::Width::kSemiExpanded_Width;
+ case fuchsia::fonts::Width::EXPANDED:
+ return SkFontStyle::Width::kExpanded_Width;
+ case fuchsia::fonts::Width::EXTRA_EXPANDED:
+ return SkFontStyle::Width::kExtraExpanded_Width;
+ case fuchsia::fonts::Width::ULTRA_EXPANDED:
+ return SkFontStyle::Width::kUltraExpanded_Width;
+ }
+}
+
+fuchsia::fonts::Style2 SkToFuchsiaStyle(const SkFontStyle& style) {
+ fuchsia::fonts::Style2 fuchsiaStyle;
+ fuchsiaStyle.set_slant(SkToFuchsiaSlant(style.slant())).set_weight(style.weight());
+
+ fuchsia::fonts::Width fuchsiaWidth = fuchsia::fonts::Width::NORMAL;
+ if (SkToFuchsiaWidth(style.width(), &fuchsiaWidth)) {
+ fuchsiaStyle.set_width(fuchsiaWidth);
+ }
+
+ return fuchsiaStyle;
+}
+
+constexpr struct {
+ const char* fName;
+ fuchsia::fonts::GenericFontFamily fGenericFontFamily;
+} kGenericFontFamiliesByName[] = {{"serif", fuchsia::fonts::GenericFontFamily::SERIF},
+ {"sans", fuchsia::fonts::GenericFontFamily::SANS_SERIF},
+ {"sans-serif", fuchsia::fonts::GenericFontFamily::SANS_SERIF},
+ {"mono", fuchsia::fonts::GenericFontFamily::MONOSPACE},
+ {"monospace", fuchsia::fonts::GenericFontFamily::MONOSPACE},
+ {"cursive", fuchsia::fonts::GenericFontFamily::CURSIVE},
+ {"fantasy", fuchsia::fonts::GenericFontFamily::FANTASY},
+ {"system-ui", fuchsia::fonts::GenericFontFamily::SYSTEM_UI},
+ {"emoji", fuchsia::fonts::GenericFontFamily::EMOJI},
+ {"math", fuchsia::fonts::GenericFontFamily::MATH},
+ {"fangsong", fuchsia::fonts::GenericFontFamily::FANGSONG}};
+
+// Tries to find a generic font family with the given name. If none is found, returns false.
+bool GetGenericFontFamilyByName(const char* name,
+ fuchsia::fonts::GenericFontFamily* outGenericFamily) {
+ if (!name) return false;
+ for (auto& genericFamily : kGenericFontFamiliesByName) {
+ if (strcasecmp(genericFamily.fName, name) == 0) {
+ *outGenericFamily = genericFamily.fGenericFontFamily;
+ return true;
+ }
+ }
+ return false;
+}
+
+struct TypefaceId {
+ uint32_t bufferId;
+ uint32_t ttcIndex;
+
+ bool operator==(TypefaceId& other) {
+ return std::tie(bufferId, ttcIndex) == std::tie(other.bufferId, other.ttcIndex);
+ }
+}
+
+constexpr kNullTypefaceId = {0xFFFFFFFF, 0xFFFFFFFF};
+
+class SkTypeface_Fuchsia : public SkTypeface_Stream {
+public:
+ SkTypeface_Fuchsia(std::unique_ptr<SkFontData> fontData, const SkFontStyle& style,
+ bool isFixedPitch, const SkString familyName, TypefaceId id)
+ : SkTypeface_Stream(std::move(fontData), style, isFixedPitch,
+ /*sys_font=*/true, familyName)
+ , fId(id) {}
+
+ TypefaceId id() { return fId; }
+
+private:
+ TypefaceId fId;
+};
+
+sk_sp<SkTypeface> CreateTypefaceFromSkStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args, TypefaceId id) {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ Scanner scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), args.getCollectionIndex(), &name, &style, &isFixedPitch,
+ &axisDefinitions)) {
+ return nullptr;
+ }
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, position, axisValues, name);
+
+ auto fontData = std::make_unique<SkFontData>(std::move(stream), args.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return sk_make_sp<SkTypeface_Fuchsia>(std::move(fontData), style, isFixedPitch, name, id);
+}
+
+sk_sp<SkTypeface> CreateTypefaceFromSkData(sk_sp<SkData> data, TypefaceId id) {
+ return CreateTypefaceFromSkStream(std::make_unique<SkMemoryStream>(std::move(data)),
+ SkFontArguments().setCollectionIndex(id.ttcIndex), id);
+}
+
+class SkFontMgr_Fuchsia final : public SkFontMgr {
+public:
+ SkFontMgr_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider);
+ ~SkFontMgr_Fuchsia() override;
+
+protected:
+ // SkFontMgr overrides.
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[], const SkFontStyle&) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ SkTypeface* onMatchFaceStyle(const SkTypeface*, const SkFontStyle&) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ friend class SkFontStyleSet_Fuchsia;
+
+ sk_sp<SkTypeface> FetchTypeface(const char familyName[], const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count, SkUnichar character,
+ bool allow_fallback, bool exact_style_match) const;
+
+ sk_sp<SkData> GetOrCreateSkData(int bufferId, const fuchsia::mem::Buffer& buffer) const;
+ void OnSkDataDeleted(int bufferId) const;
+
+ sk_sp<SkTypeface> GetOrCreateTypeface(TypefaceId id, const fuchsia::mem::Buffer& buffer) const;
+
+ mutable fuchsia::fonts::ProviderSyncPtr fFontProvider;
+
+ mutable SkMutex fCacheMutex;
+
+ // Must be accessed only with fCacheMutex acquired.
+ mutable std::unordered_map<int, SkData*> fBufferCache;
+ mutable SkTypefaceCache fTypefaceCache;
+};
+
+class SkFontStyleSet_Fuchsia : public SkFontStyleSet {
+public:
+ SkFontStyleSet_Fuchsia(sk_sp<SkFontMgr_Fuchsia> font_manager, std::string familyName,
+ std::vector<SkFontStyle> styles)
+ : fFontManager(font_manager), fFamilyName(familyName), fStyles(styles) {}
+
+ ~SkFontStyleSet_Fuchsia() override = default;
+
+ int count() override { return fStyles.size(); }
+
+ void getStyle(int index, SkFontStyle* style, SkString* styleName) override {
+ SkASSERT(index >= 0 && index < static_cast<int>(fStyles.size()));
+ if (style) *style = fStyles[index];
+
+ // We don't have style names. Return an empty name.
+ if (styleName) styleName->reset();
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT(index >= 0 && index < static_cast<int>(fStyles.size()));
+
+ if (fTypefaces.empty()) fTypefaces.resize(fStyles.size());
+
+ if (!fTypefaces[index]) {
+ fTypefaces[index] = fFontManager->FetchTypeface(
+ fFamilyName.c_str(), fStyles[index], /*bcp47=*/nullptr,
+ /*bcp47Count=*/0, /*character=*/0,
+ /*allow_fallback=*/false, /*exact_style_match=*/true);
+ }
+
+ return SkSafeRef(fTypefaces[index].get());
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override { return matchStyleCSS3(pattern); }
+
+private:
+ sk_sp<SkFontMgr_Fuchsia> fFontManager;
+ std::string fFamilyName;
+ std::vector<SkFontStyle> fStyles;
+ std::vector<sk_sp<SkTypeface>> fTypefaces;
+};
+
+SkFontMgr_Fuchsia::SkFontMgr_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider)
+ : fFontProvider(std::move(provider)) {}
+
+SkFontMgr_Fuchsia::~SkFontMgr_Fuchsia() = default;
+
+int SkFontMgr_Fuchsia::onCountFamilies() const {
+ // Family enumeration is not supported.
+ return 0;
+}
+
+void SkFontMgr_Fuchsia::onGetFamilyName(int index, SkString* familyName) const {
+ // Family enumeration is not supported.
+ familyName->reset();
+}
+
+SkFontStyleSet* SkFontMgr_Fuchsia::onCreateStyleSet(int index) const {
+ // Family enumeration is not supported.
+ return nullptr;
+}
+
+SkFontStyleSet* SkFontMgr_Fuchsia::onMatchFamily(const char familyName[]) const {
+ fuchsia::fonts::FamilyName typedFamilyName;
+ typedFamilyName.name = familyName;
+
+ fuchsia::fonts::FontFamilyInfo familyInfo;
+ int result = fFontProvider->GetFontFamilyInfo(typedFamilyName, &familyInfo);
+ if (result != ZX_OK || !familyInfo.has_styles() || familyInfo.styles().empty()) return nullptr;
+
+ std::vector<SkFontStyle> styles;
+ for (auto& style : familyInfo.styles()) {
+ styles.push_back(SkFontStyle(style.weight(), FuchsiaToSkWidth(style.width()),
+ FuchsiaToSkSlant(style.slant())));
+ }
+
+ return new SkFontStyleSet_Fuchsia(sk_ref_sp(this), familyInfo.name().name, std::move(styles));
+}
+
+SkTypeface* SkFontMgr_Fuchsia::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const {
+ sk_sp<SkTypeface> typeface =
+ FetchTypeface(familyName, style, /*bcp47=*/nullptr,
+ /*bcp47Count=*/0, /*character=*/0,
+ /*allow_fallback=*/false, /*exact_style_match=*/false);
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Fuchsia::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ sk_sp<SkTypeface> typeface =
+ FetchTypeface(familyName, style, bcp47, bcp47Count, character, /*allow_fallback=*/true,
+ /*exact_style_match=*/false);
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Fuchsia::onMatchFaceStyle(const SkTypeface*, const SkFontStyle&) const {
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromData(sk_sp<SkData>, int ttcIndex) const {
+ SkASSERT(false);
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> asset,
+ int ttcIndex) const {
+ return makeFromStream(std::move(asset), SkFontArguments().setCollectionIndex(ttcIndex));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> asset,
+ const SkFontArguments& args) const {
+ return CreateTypefaceFromSkStream(std::move(asset), args, kNullTypefaceId);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromFile(const char path[], int ttcIndex) const {
+ return makeFromStream(std::make_unique<SkFILEStream>(path), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ return sk_sp<SkTypeface>(matchFamilyStyle(familyName, style));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::FetchTypeface(const char familyName[],
+ const SkFontStyle& style, const char* bcp47[],
+ int bcp47Count, SkUnichar character,
+ bool allow_fallback,
+ bool exact_style_match) const {
+ fuchsia::fonts::TypefaceQuery query;
+ query.set_style(SkToFuchsiaStyle(style));
+
+ if (bcp47Count > 0) {
+ std::vector<fuchsia::intl::LocaleId> languages{};
+ for (int i = 0; i < bcp47Count; i++) {
+ fuchsia::intl::LocaleId localeId;
+ localeId.id = bcp47[i];
+ languages.push_back(localeId);
+ }
+ query.set_languages(std::move(languages));
+ }
+
+ if (character) {
+ query.set_code_points({static_cast<uint32_t>(character)});
+ }
+
+ // If family name is not specified or is a generic family name (e.g. "serif"), then enable
+ // fallback; otherwise, pass the family name as is.
+ fuchsia::fonts::GenericFontFamily genericFontFamily =
+ fuchsia::fonts::GenericFontFamily::SANS_SERIF;
+ bool isGenericFontFamily = GetGenericFontFamilyByName(familyName, &genericFontFamily);
+ if (!familyName || *familyName == '\0' || isGenericFontFamily) {
+ if (isGenericFontFamily) {
+ query.set_fallback_family(genericFontFamily);
+ }
+ allow_fallback = true;
+ } else {
+ fuchsia::fonts::FamilyName typedFamilyName{};
+ typedFamilyName.name = familyName;
+ query.set_family(typedFamilyName);
+ }
+
+ fuchsia::fonts::TypefaceRequestFlags flags{};
+ if (!allow_fallback) flags |= fuchsia::fonts::TypefaceRequestFlags::EXACT_FAMILY;
+ if (exact_style_match) flags |= fuchsia::fonts::TypefaceRequestFlags::EXACT_STYLE;
+
+ fuchsia::fonts::TypefaceRequest request;
+ request.set_query(std::move(query));
+ request.set_flags(flags);
+
+ fuchsia::fonts::TypefaceResponse response;
+ int result = fFontProvider->GetTypeface(std::move(request), &response);
+ if (result != ZX_OK) return nullptr;
+
+ // The service may return an empty response if there is no font matching the request.
+ if (response.IsEmpty()) return nullptr;
+
+ return GetOrCreateTypeface(TypefaceId{response.buffer_id(), response.font_index()},
+ response.buffer());
+}
+
+sk_sp<SkData> SkFontMgr_Fuchsia::GetOrCreateSkData(int bufferId,
+ const fuchsia::mem::Buffer& buffer) const {
+ fCacheMutex.assertHeld();
+
+ auto iter = fBufferCache.find(bufferId);
+ if (iter != fBufferCache.end()) {
+ return sk_ref_sp(iter->second);
+ }
+ auto font_mgr = sk_ref_sp(this);
+ auto data = MakeSkDataFromBuffer(
+ buffer, [font_mgr, bufferId]() { font_mgr->OnSkDataDeleted(bufferId); });
+ if (!data) {
+ return nullptr;
+ }
+ fBufferCache[bufferId] = data.get();
+ return data;
+}
+
+void SkFontMgr_Fuchsia::OnSkDataDeleted(int bufferId) const {
+ SK_UNUSED bool wasFound = fBufferCache.erase(bufferId) != 0;
+ SkASSERT(wasFound);
+}
+
+static bool FindByTypefaceId(SkTypeface* cachedTypeface, void* ctx) {
+ SkTypeface_Fuchsia* cachedFuchsiaTypeface = static_cast<SkTypeface_Fuchsia*>(cachedTypeface);
+ TypefaceId* id = static_cast<TypefaceId*>(ctx);
+
+ return cachedFuchsiaTypeface->id() == *id;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::GetOrCreateTypeface(TypefaceId id,
+ const fuchsia::mem::Buffer& buffer) const {
+ SkAutoMutexExclusive mutexLock(fCacheMutex);
+
+ sk_sp<SkTypeface> cached = fTypefaceCache.findByProcAndRef(FindByTypefaceId, &id);
+ if (cached) return cached;
+
+ sk_sp<SkData> data = GetOrCreateSkData(id.bufferId, buffer);
+ if (!data) return nullptr;
+
+ auto result = CreateTypefaceFromSkData(std::move(data), id);
+ fTypefaceCache.add(result);
+ return result;
+}
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider) {
+ return sk_make_sp<SkFontMgr_Fuchsia>(std::move(provider));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
new file mode 100644
index 0000000000..acf59190fe
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
@@ -0,0 +1,1227 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkMutex.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/utils/SkUTF.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_2.h>
+#include <dwrite_3.h>
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontFileLoader : public IDWriteFontFileLoader {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileLoader methods
+ SK_STDMETHODIMP CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream) override;
+
+ // Takes ownership of stream.
+ static HRESULT Create(std::unique_ptr<SkStreamAsset> stream,
+ StreamFontFileLoader** streamFontFileLoader) {
+ *streamFontFileLoader = new StreamFontFileLoader(std::move(stream));
+ if (nullptr == *streamFontFileLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+
+private:
+ StreamFontFileLoader(std::unique_ptr<SkStreamAsset> stream)
+ : fStream(std::move(stream)), fRefCount(1)
+ {}
+ virtual ~StreamFontFileLoader() { }
+
+ std::unique_ptr<SkStreamAsset> fStream;
+ ULONG fRefCount;
+};
+
+SK_STDMETHODIMP StreamFontFileLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP StreamFontFileLoader::CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream)
+{
+ SkTScopedComPtr<SkDWriteFontFileStreamWrapper> stream;
+ HR(SkDWriteFontFileStreamWrapper::Create(fStream->duplicate().release(), &stream));
+ *fontFileStream = stream.release();
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontFileEnumerator : public IDWriteFontFileEnumerator {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileEnumerator methods
+ SK_STDMETHODIMP MoveNext(BOOL* hasCurrentFile) override;
+ SK_STDMETHODIMP GetCurrentFontFile(IDWriteFontFile** fontFile) override;
+
+ static HRESULT Create(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader,
+ StreamFontFileEnumerator** streamFontFileEnumerator) {
+ *streamFontFileEnumerator = new StreamFontFileEnumerator(factory, fontFileLoader);
+ if (nullptr == *streamFontFileEnumerator) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontFileEnumerator(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader);
+ virtual ~StreamFontFileEnumerator() { }
+
+ ULONG fRefCount;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFile> fCurrentFile;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+ bool fHasNext;
+};
+
+StreamFontFileEnumerator::StreamFontFileEnumerator(IDWriteFactory* factory,
+ IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFactory(SkRefComPtr(factory))
+ , fCurrentFile()
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ , fHasNext(true)
+{ }
+
+SK_STDMETHODIMP StreamFontFileEnumerator::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileEnumerator)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileEnumerator::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileEnumerator::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP StreamFontFileEnumerator::MoveNext(BOOL* hasCurrentFile) {
+ *hasCurrentFile = FALSE;
+
+ if (!fHasNext) {
+ return S_OK;
+ }
+ fHasNext = false;
+
+ UINT32 dummy = 0;
+ HR(fFactory->CreateCustomFontFileReference(
+ &dummy, //cannot be nullptr
+ sizeof(dummy), //even if this is 0
+ fFontFileLoader.get(),
+ &fCurrentFile));
+
+ *hasCurrentFile = TRUE;
+ return S_OK;
+}
+
+SK_STDMETHODIMP StreamFontFileEnumerator::GetCurrentFontFile(IDWriteFontFile** fontFile) {
+ if (fCurrentFile.get() == nullptr) {
+ *fontFile = nullptr;
+ return E_FAIL;
+ }
+
+ *fontFile = SkRefComPtr(fCurrentFile.get());
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontCollectionLoader : public IDWriteFontCollectionLoader {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontCollectionLoader methods
+ SK_STDMETHODIMP CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator) override;
+
+ static HRESULT Create(IDWriteFontFileLoader* fontFileLoader,
+ StreamFontCollectionLoader** streamFontCollectionLoader) {
+ *streamFontCollectionLoader = new StreamFontCollectionLoader(fontFileLoader);
+ if (nullptr == *streamFontCollectionLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontCollectionLoader(IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ { }
+ virtual ~StreamFontCollectionLoader() { }
+
+ ULONG fRefCount;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+};
+
+SK_STDMETHODIMP StreamFontCollectionLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontCollectionLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontCollectionLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontCollectionLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP StreamFontCollectionLoader::CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator)
+{
+ SkTScopedComPtr<StreamFontFileEnumerator> enumerator;
+ HR(StreamFontFileEnumerator::Create(factory, fFontFileLoader.get(), &enumerator));
+ *fontFileEnumerator = enumerator.release();
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_DirectWrite : public SkFontMgr {
+public:
+ /** localeNameLength and defaultFamilyNameLength must include the null terminator. */
+ SkFontMgr_DirectWrite(IDWriteFactory* factory, IDWriteFontCollection* fontCollection,
+ IDWriteFontFallback* fallback,
+ const WCHAR* localeName, int localeNameLength,
+ const WCHAR* defaultFamilyName, int defaultFamilyNameLength)
+ : fFactory(SkRefComPtr(factory))
+ , fFontFallback(SkSafeRefComPtr(fallback))
+ , fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ , fDefaultFamilyName(defaultFamilyNameLength)
+ {
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ memcpy(fDefaultFamilyName.get(), defaultFamilyName, defaultFamilyNameLength*sizeof(WCHAR));
+ }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ HRESULT getByFamilyName(const WCHAR familyName[], IDWriteFontFamily** fontFamily) const;
+ sk_sp<SkTypeface> fallback(const WCHAR* dwFamilyName, DWriteStyle,
+ const WCHAR* dwBcp47, UINT32 character) const;
+ sk_sp<SkTypeface> layoutFallback(const WCHAR* dwFamilyName, DWriteStyle,
+ const WCHAR* dwBcp47, UINT32 character) const;
+
+ /** Creates a typeface using a typeface cache. */
+ sk_sp<SkTypeface> makeTypefaceFromDWriteFont(IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFallback> fFontFallback;
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+ SkSMallocWCHAR fDefaultFamilyName;
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ friend class SkFontStyleSet_DirectWrite;
+ friend class FontFallbackRenderer;
+};
+
+class SkFontStyleSet_DirectWrite : public SkFontStyleSet {
+public:
+ SkFontStyleSet_DirectWrite(const SkFontMgr_DirectWrite* fontMgr,
+ IDWriteFontFamily* fontFamily)
+ : fFontMgr(SkRef(fontMgr))
+ , fFontFamily(SkRefComPtr(fontFamily))
+ { }
+
+ int count() override;
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override;
+ SkTypeface* createTypeface(int index) override;
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override;
+
+private:
+ sk_sp<const SkFontMgr_DirectWrite> fFontMgr;
+ SkTScopedComPtr<IDWriteFontFamily> fFontFamily;
+};
+
+static HRESULT are_same(IUnknown* a, IUnknown* b, bool& same) {
+ SkTScopedComPtr<IUnknown> iunkA;
+ HRM(a->QueryInterface(&iunkA), "Failed to QI<IUnknown> for a.");
+
+ SkTScopedComPtr<IUnknown> iunkB;
+ HRM(b->QueryInterface(&iunkB), "Failed to QI<IUnknown> for b.");
+
+ same = (iunkA.get() == iunkB.get());
+ return S_OK;
+}
+
+struct ProtoDWriteTypeface {
+ IDWriteFontFace* fDWriteFontFace;
+ IDWriteFont* fDWriteFont;
+ IDWriteFontFamily* fDWriteFontFamily;
+};
+
+static bool FindByDWriteFont(SkTypeface* cached, void* ctx) {
+ DWriteFontTypeface* cshFace = reinterpret_cast<DWriteFontTypeface*>(cached);
+ ProtoDWriteTypeface* ctxFace = reinterpret_cast<ProtoDWriteTypeface*>(ctx);
+ bool same;
+
+ //Check to see if the two fonts are identical.
+ HRB(are_same(cshFace->fDWriteFont.get(), ctxFace->fDWriteFont, same));
+ if (same) {
+ return true;
+ }
+
+ HRB(are_same(cshFace->fDWriteFontFace.get(), ctxFace->fDWriteFontFace, same));
+ if (same) {
+ return true;
+ }
+
+ //Check if the two fonts share the same loader and have the same key.
+ UINT32 cshNumFiles;
+ UINT32 ctxNumFiles;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, nullptr));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, nullptr));
+ if (cshNumFiles != ctxNumFiles) {
+ return false;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> cshFontFile;
+ SkTScopedComPtr<IDWriteFontFile> ctxFontFile;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, &cshFontFile));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, &ctxFontFile));
+
+ //for (each file) { //we currently only admit fonts from one file.
+ SkTScopedComPtr<IDWriteFontFileLoader> cshFontFileLoader;
+ SkTScopedComPtr<IDWriteFontFileLoader> ctxFontFileLoader;
+ HRB(cshFontFile->GetLoader(&cshFontFileLoader));
+ HRB(ctxFontFile->GetLoader(&ctxFontFileLoader));
+ HRB(are_same(cshFontFileLoader.get(), ctxFontFileLoader.get(), same));
+ if (!same) {
+ return false;
+ }
+ //}
+
+ const void* cshRefKey;
+ UINT32 cshRefKeySize;
+ const void* ctxRefKey;
+ UINT32 ctxRefKeySize;
+ HRB(cshFontFile->GetReferenceKey(&cshRefKey, &cshRefKeySize));
+ HRB(ctxFontFile->GetReferenceKey(&ctxRefKey, &ctxRefKeySize));
+ if (cshRefKeySize != ctxRefKeySize) {
+ return false;
+ }
+ if (0 != memcmp(cshRefKey, ctxRefKey, ctxRefKeySize)) {
+ return false;
+ }
+
+ //TODO: better means than comparing name strings?
+ //NOTE: .ttc and fake bold/italic will end up here.
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFaceNames;
+ HRB(cshFace->fDWriteFontFamily->GetFamilyNames(&cshFamilyNames));
+ HRB(cshFace->fDWriteFont->GetFaceNames(&cshFaceNames));
+ UINT32 cshFamilyNameLength;
+ UINT32 cshFaceNameLength;
+ HRB(cshFamilyNames->GetStringLength(0, &cshFamilyNameLength));
+ HRB(cshFaceNames->GetStringLength(0, &cshFaceNameLength));
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFaceNames;
+ HRB(ctxFace->fDWriteFontFamily->GetFamilyNames(&ctxFamilyNames));
+ HRB(ctxFace->fDWriteFont->GetFaceNames(&ctxFaceNames));
+ UINT32 ctxFamilyNameLength;
+ UINT32 ctxFaceNameLength;
+ HRB(ctxFamilyNames->GetStringLength(0, &ctxFamilyNameLength));
+ HRB(ctxFaceNames->GetStringLength(0, &ctxFaceNameLength));
+
+ if (cshFamilyNameLength != ctxFamilyNameLength ||
+ cshFaceNameLength != ctxFaceNameLength)
+ {
+ return false;
+ }
+
+ SkSMallocWCHAR cshFamilyName(cshFamilyNameLength+1);
+ SkSMallocWCHAR cshFaceName(cshFaceNameLength+1);
+ HRB(cshFamilyNames->GetString(0, cshFamilyName.get(), cshFamilyNameLength+1));
+ HRB(cshFaceNames->GetString(0, cshFaceName.get(), cshFaceNameLength+1));
+
+ SkSMallocWCHAR ctxFamilyName(ctxFamilyNameLength+1);
+ SkSMallocWCHAR ctxFaceName(ctxFaceNameLength+1);
+ HRB(ctxFamilyNames->GetString(0, ctxFamilyName.get(), ctxFamilyNameLength+1));
+ HRB(ctxFaceNames->GetString(0, ctxFaceName.get(), ctxFaceNameLength+1));
+
+ return wcscmp(cshFamilyName.get(), ctxFamilyName.get()) == 0 &&
+ wcscmp(cshFaceName.get(), ctxFaceName.get()) == 0;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::makeTypefaceFromDWriteFont(
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const {
+ SkAutoMutexExclusive ama(fTFCacheMutex);
+ ProtoDWriteTypeface spec = { fontFace, font, fontFamily };
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(FindByDWriteFont, &spec);
+ if (nullptr == face) {
+ face = DWriteFontTypeface::Make(fFactory.get(), fontFace, font, fontFamily);
+ if (face) {
+ fTFCache.add(face);
+ }
+ }
+ return face;
+}
+
+int SkFontMgr_DirectWrite::onCountFamilies() const {
+ return fFontCollection->GetFontFamilyCount();
+}
+
+void SkFontMgr_DirectWrite::onGetFamilyName(int index, SkString* familyName) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRVM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRVM(fontFamily->GetFamilyNames(&familyNames), "Could not get family names.");
+
+ sk_get_locale_string(familyNames.get(), fLocaleName.get(), familyName);
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onCreateStyleSet(int index) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ return new SkFontStyleSet_DirectWrite(this, fontFamily.get());
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onMatchFamily(const char familyName[]) const {
+ if (!familyName) {
+ return nullptr;
+ }
+
+ SkSMallocWCHAR dwFamilyName;
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyName));
+
+ UINT32 index;
+ BOOL exists;
+ HRNM(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.");
+ if (!exists) {
+ return nullptr;
+ }
+
+ return this->onCreateStyleSet(index);
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const {
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+}
+
+class FontFallbackRenderer : public IDWriteTextRenderer {
+public:
+ FontFallbackRenderer(const SkFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character), fResolvedTypeface(nullptr) {
+ }
+
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(IID const& riid, void** ppvObject) override {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ // IDWriteTextRenderer methods
+ SK_STDMETHODIMP DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ if (!glyphRun->fontFace) {
+ HRM(E_INVALIDARG, "Glyph run without font face.");
+ }
+
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRM(font->GetFontFamily(&fontFamily), "Could not get family.");
+ fResolvedTypeface = fOuter->makeTypefaceFromDWriteFont(glyphRun->fontFace,
+ font.get(),
+ fontFamily.get());
+ }
+
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ SK_STDMETHODIMP IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = { 1.0, 0.0, 0.0, 1.0, 0.0, 0.0 };
+ *transform = ident;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ sk_sp<SkTypeface> ConsumeFallbackTypeface() { return std::move(fResolvedTypeface); }
+
+private:
+ virtual ~FontFallbackRenderer() { }
+
+ ULONG fRefCount;
+ sk_sp<const SkFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ sk_sp<SkTypeface> fResolvedTypeface;
+};
+
+class FontFallbackSource : public IDWriteTextAnalysisSource {
+public:
+ FontFallbackSource(const WCHAR* string, UINT32 length, const WCHAR* locale,
+ IDWriteNumberSubstitution* numberSubstitution)
+ : fRefCount(1)
+ , fString(string)
+ , fLength(length)
+ , fLocale(locale)
+ , fNumberSubstitution(numberSubstitution)
+ { }
+
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(IID const& riid, void** ppvObject) override {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWriteTextAnalysisSource) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ // IDWriteTextAnalysisSource methods
+ SK_STDMETHODIMP GetTextAtPosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString + textPosition;
+ *textLength = fLength - textPosition;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetTextBeforePosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (textPosition < 1 || fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString;
+ *textLength = textPosition;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP_(DWRITE_READING_DIRECTION) GetParagraphReadingDirection() override {
+ // TODO: this is also interesting.
+ return DWRITE_READING_DIRECTION_LEFT_TO_RIGHT;
+ }
+
+ SK_STDMETHODIMP GetLocaleName(
+ UINT32 textPosition,
+ UINT32* textLength,
+ WCHAR const** localeName) override
+ {
+ *localeName = fLocale;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetNumberSubstitution(
+ UINT32 textPosition,
+ UINT32* textLength,
+ IDWriteNumberSubstitution** numberSubstitution) override
+ {
+ *numberSubstitution = fNumberSubstitution;
+ return S_OK;
+ }
+
+private:
+ virtual ~FontFallbackSource() { }
+
+ ULONG fRefCount;
+ const WCHAR* fString;
+ UINT32 fLength;
+ const WCHAR* fLocale;
+ IDWriteNumberSubstitution* fNumberSubstitution;
+};
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const
+{
+ const DWriteStyle dwStyle(style);
+
+ const WCHAR* dwFamilyName = nullptr;
+ SkSMallocWCHAR dwFamilyNameLocal;
+ if (familyName) {
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyNameLocal));
+ dwFamilyName = dwFamilyNameLocal;
+ }
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ // TODO: support fallback stack.
+ // TODO: DirectWrite supports 'zh-CN' or 'zh-Hans', but 'zh' misses completely
+ // and may produce a Japanese font.
+ HRN(sk_cstring_to_wchar(bcp47[bcp47Count - 1], &dwBcp47Local));
+ dwBcp47 = &dwBcp47Local;
+ }
+
+ if (fFontFallback) {
+ return this->fallback(dwFamilyName, dwStyle, dwBcp47->get(), character).release();
+ }
+
+ // LayoutFallback may use the system font collection for fallback.
+ return this->layoutFallback(dwFamilyName, dwStyle, dwBcp47->get(), character).release();
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::fallback(const WCHAR* dwFamilyName,
+ DWriteStyle dwStyle,
+ const WCHAR* dwBcp47,
+ UINT32 character) const
+{
+ WCHAR str[16];
+ UINT32 strLen = SkTo<UINT32>(SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+
+ if (!fFontFallback) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteNumberSubstitution> numberSubstitution;
+ HRNM(fFactory->CreateNumberSubstitution(DWRITE_NUMBER_SUBSTITUTION_METHOD_NONE, dwBcp47,
+ TRUE, &numberSubstitution),
+ "Could not create number substitution.");
+ SkTScopedComPtr<FontFallbackSource> fontFallbackSource(
+ new FontFallbackSource(str, strLen, dwBcp47, numberSubstitution.get()));
+
+ UINT32 mappedLength;
+ SkTScopedComPtr<IDWriteFont> font;
+ FLOAT scale;
+ HRNM(fFontFallback->MapCharacters(fontFallbackSource.get(),
+ 0, // textPosition,
+ strLen,
+ fFontCollection.get(),
+ dwFamilyName,
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ &mappedLength,
+ &font,
+ &scale),
+ "Could not map characters");
+ if (!font.get()) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not get font face from font.");
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(font->GetFontFamily(&fontFamily), "Could not get family from font.");
+ return this->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::layoutFallback(const WCHAR* dwFamilyName,
+ DWriteStyle dwStyle,
+ const WCHAR* dwBcp47,
+ UINT32 character) const
+{
+ WCHAR str[16];
+ UINT32 strLen = SkTo<UINT32>(SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HRNM(fFactory->CreateTextFormat(dwFamilyName ? dwFamilyName : L"",
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.");
+
+ // No matter how the font collection is set on this IDWriteTextLayout, it is not possible to
+ // disable use of the system font collection in fallback.
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HRNM(fFactory->CreateTextLayout(str, strLen, fallbackFormat.get(),
+ 200.0f, 200.0f,
+ &fallbackLayout),
+ "Could not create text layout.");
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HRNM(fallbackLayout->SetFontCollection(fFontCollection.get(), { 0, strLen }),
+ "Could not set layout font collection.");
+ HRNM(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.");
+
+ return fontFallbackRenderer->ConsumeFallbackTypeface();
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const {
+ SkString familyName;
+ SkFontStyleSet_DirectWrite sset(
+ this, ((DWriteFontTypeface*)familyMember)->fDWriteFontFamily.get()
+ );
+ return sset.matchStyle(fontstyle);
+}
+
+template <typename T> class SkAutoIDWriteUnregister {
+public:
+ SkAutoIDWriteUnregister(IDWriteFactory* factory, T* unregister)
+ : fFactory(factory), fUnregister(unregister)
+ { }
+
+ ~SkAutoIDWriteUnregister() {
+ if (fUnregister) {
+ unregister(fFactory, fUnregister);
+ }
+ }
+
+ T* detatch() {
+ T* old = fUnregister;
+ fUnregister = nullptr;
+ return old;
+ }
+
+private:
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontFileLoader* unregister) {
+ return factory->UnregisterFontFileLoader(unregister);
+ }
+
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontCollectionLoader* unregister) {
+ return factory->UnregisterFontCollectionLoader(unregister);
+ }
+
+ IDWriteFactory* fFactory;
+ T* fUnregister;
+};
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ SkTScopedComPtr<StreamFontFileLoader> fontFileLoader;
+ // This transfers ownership of stream to the new object.
+ HRN(StreamFontFileLoader::Create(std::move(stream), &fontFileLoader));
+ HRN(fFactory->RegisterFontFileLoader(fontFileLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontFileLoader> autoUnregisterFontFileLoader(
+ fFactory.get(), fontFileLoader.get());
+
+ SkTScopedComPtr<StreamFontCollectionLoader> fontCollectionLoader;
+ HRN(StreamFontCollectionLoader::Create(fontFileLoader.get(), &fontCollectionLoader));
+ HRN(fFactory->RegisterFontCollectionLoader(fontCollectionLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontCollectionLoader> autoUnregisterFontCollectionLoader(
+ fFactory.get(), fontCollectionLoader.get());
+
+ SkTScopedComPtr<IDWriteFontCollection> fontCollection;
+ HRN(fFactory->CreateCustomFontCollection(fontCollectionLoader.get(), nullptr, 0, &fontCollection));
+
+ // Find the first non-simulated font which has the given ttc index.
+ UINT32 familyCount = fontCollection->GetFontFamilyCount();
+ for (UINT32 familyIndex = 0; familyIndex < familyCount; ++familyIndex) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRN(fontCollection->GetFontFamily(familyIndex, &fontFamily));
+
+ UINT32 fontCount = fontFamily->GetFontCount();
+ for (UINT32 fontIndex = 0; fontIndex < fontCount; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRN(fontFamily->GetFont(fontIndex, &font));
+ if (font->GetSimulations() != DWRITE_FONT_SIMULATIONS_NONE) {
+ continue;
+ }
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRN(font->CreateFontFace(&fontFace));
+
+ int faceIndex = fontFace->GetIndex();
+ if (faceIndex == ttcIndex) {
+ return DWriteFontTypeface::Make(fFactory.get(),
+ fontFace.get(), font.get(), fontFamily.get(),
+ autoUnregisterFontFileLoader.detatch(),
+ autoUnregisterFontCollectionLoader.detatch());
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ SkTScopedComPtr<StreamFontFileLoader> fontFileLoader;
+ // This transfers ownership of stream to the new object.
+ HRN(StreamFontFileLoader::Create(std::move(stream), &fontFileLoader));
+ HRN(fFactory->RegisterFontFileLoader(fontFileLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontFileLoader> autoUnregisterFontFileLoader(
+ fFactory.get(), fontFileLoader.get());
+
+ SkTScopedComPtr<StreamFontCollectionLoader> fontCollectionLoader;
+ HRN(StreamFontCollectionLoader::Create(fontFileLoader.get(), &fontCollectionLoader));
+ HRN(fFactory->RegisterFontCollectionLoader(fontCollectionLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontCollectionLoader> autoUnregisterFontCollectionLoader(
+ fFactory.get(), fontCollectionLoader.get());
+
+ SkTScopedComPtr<IDWriteFontCollection> fontCollection;
+ HRN(fFactory->CreateCustomFontCollection(fontCollectionLoader.get(), nullptr, 0,
+ &fontCollection));
+
+ // Find the first non-simulated font which has the given ttc index.
+ UINT32 familyCount = fontCollection->GetFontFamilyCount();
+ for (UINT32 familyIndex = 0; familyIndex < familyCount; ++familyIndex) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRN(fontCollection->GetFontFamily(familyIndex, &fontFamily));
+
+ UINT32 fontCount = fontFamily->GetFontCount();
+ for (UINT32 fontIndex = 0; fontIndex < fontCount; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRN(fontFamily->GetFont(fontIndex, &font));
+
+ // Skip if the current font is simulated
+ if (font->GetSimulations() != DWRITE_FONT_SIMULATIONS_NONE) {
+ continue;
+ }
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRN(font->CreateFontFace(&fontFace));
+ int faceIndex = fontFace->GetIndex();
+ int ttcIndex = args.getCollectionIndex();
+
+ // Skip if the current face index does not match the ttcIndex
+ if (faceIndex != ttcIndex) {
+ continue;
+ }
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (SUCCEEDED(fontFace->QueryInterface(&fontFace5)) && fontFace5->HasVariations()) {
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ UINT32 argsCoordCount = args.getVariationDesignPosition().coordinateCount;
+ SkAutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisValues(fontAxisCount);
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HRN(fontFace5->GetFontResource(&fontResource));
+ // Set all axes by default values
+ HRN(fontResource->GetDefaultFontAxisValues(fontAxisValues, fontAxisCount));
+
+ for (UINT32 fontIndex = 0; fontIndex < fontAxisCount; ++fontIndex) {
+ for (UINT32 argsIndex = 0; argsIndex < argsCoordCount; ++argsIndex) {
+ if (SkEndian_SwapBE32(fontAxisValues[fontIndex].axisTag) ==
+ args.getVariationDesignPosition().coordinates[argsIndex].axis) {
+ fontAxisValues[fontIndex].value =
+ args.getVariationDesignPosition().coordinates[argsIndex].value;
+ }
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5_Out;
+ HRN(fontResource->CreateFontFace(DWRITE_FONT_SIMULATIONS_NONE,
+ fontAxisValues.get(),
+ fontAxisCount,
+ &fontFace5_Out));
+ fontFace.reset();
+ HRN(fontFace5_Out->QueryInterface(&fontFace));
+ }
+
+#endif
+
+ return DWriteFontTypeface::Make(
+ fFactory.get(), fontFace.get(), font.get(), fontFamily.get(),
+ autoUnregisterFontFileLoader.detatch(),
+ autoUnregisterFontCollectionLoader.detatch());
+ }
+ }
+
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return this->makeFromStream(skstd::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromFile(const char path[], int ttcIndex) const {
+ return this->makeFromStream(SkStream::MakeFromFile(path), ttcIndex);
+}
+
+HRESULT SkFontMgr_DirectWrite::getByFamilyName(const WCHAR wideFamilyName[],
+ IDWriteFontFamily** fontFamily) const {
+ UINT32 index;
+ BOOL exists;
+ HR(fFontCollection->FindFamilyName(wideFamilyName, &index, &exists));
+
+ if (exists) {
+ HR(fFontCollection->GetFontFamily(index, fontFamily));
+ }
+ return S_OK;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ const DWriteStyle dwStyle(style);
+ if (familyName) {
+ SkSMallocWCHAR dwFamilyName;
+ if (SUCCEEDED(sk_cstring_to_wchar(familyName, &dwFamilyName))) {
+ this->getByFamilyName(dwFamilyName, &fontFamily);
+ if (!fontFamily && fFontFallback) {
+ return this->fallback(dwFamilyName, dwStyle, fLocaleName.get(), 32);
+ }
+ }
+ }
+
+ if (!fontFamily) {
+ if (fFontFallback) {
+ return this->fallback(nullptr, dwStyle, fLocaleName.get(), 32);
+ }
+ // SPI_GETNONCLIENTMETRICS lfMessageFont can fail in Win8. (DisallowWin32kSystemCalls)
+ // layoutFallback causes DCHECK in Chromium. (Uses system font collection.)
+ HRNM(this->getByFamilyName(fDefaultFamilyName, &fontFamily),
+ "Could not create DWrite font family from LOGFONT.");
+ }
+
+ if (!fontFamily) {
+ // Could not obtain the default font.
+ HRNM(fFontCollection->GetFontFamily(0, &fontFamily),
+ "Could not get default-default font family.");
+ }
+
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth, dwStyle.fSlant, &font),
+ "Could not get matching font.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return this->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStyleSet_DirectWrite::count() {
+ return fFontFamily->GetFontCount();
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::createTypeface(int index) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fFontFamily.get()).release();
+}
+
+void SkFontStyleSet_DirectWrite::getStyle(int index, SkFontStyle* fs, SkString* styleName) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRVM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ if (fs) {
+ *fs = get_style(font.get());
+ }
+
+ if (styleName) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> faceNames;
+ if (SUCCEEDED(font->GetFaceNames(&faceNames))) {
+ sk_get_locale_string(faceNames.get(), fFontMgr->fLocaleName.get(), styleName);
+ }
+ }
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::matchStyle(const SkFontStyle& pattern) {
+ SkTScopedComPtr<IDWriteFont> font;
+ DWriteStyle dwStyle(pattern);
+ // TODO: perhaps use GetMatchingFonts and get the least simulated?
+ HRNM(fFontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth, dwStyle.fSlant, &font),
+ "Could not match font in family.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->makeTypefaceFromDWriteFont(fontFace.get(), font.get(),
+ fFontFamily.get()).release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+#include "include/ports/SkTypeface_win.h"
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection) {
+ return SkFontMgr_New_DirectWrite(factory, collection, nullptr);
+}
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback) {
+ if (nullptr == factory) {
+ factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> systemFontCollection;
+ if (nullptr == collection) {
+ HRNM(factory->GetSystemFontCollection(&systemFontCollection, FALSE),
+ "Could not get system font collection.");
+ collection = systemFontCollection.get();
+ }
+
+ // It is possible to have been provided a font fallback when factory2 is not available.
+ SkTScopedComPtr<IDWriteFontFallback> systemFontFallback;
+ if (nullptr == fallback) {
+ SkTScopedComPtr<IDWriteFactory2> factory2;
+ if (!SUCCEEDED(factory->QueryInterface(&factory2))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == factory2.get());
+ } else {
+ HRNM(factory2->GetSystemFontFallback(&systemFontFallback),
+ "Could not get system fallback.");
+ fallback = systemFontFallback.get();
+ }
+ }
+
+ const WCHAR* defaultFamilyName = L"";
+ int defaultFamilyNameLen = 1;
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+ if (nullptr == fallback) {
+ if (SystemParametersInfoW(SPI_GETNONCLIENTMETRICS, sizeof(metrics), &metrics, 0)) {
+ defaultFamilyName = metrics.lfMessageFont.lfFaceName;
+ defaultFamilyNameLen = LF_FACESIZE;
+ }
+ }
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ const WCHAR* localeName = L"";
+ int localeNameLen = 1;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ int size = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (size) {
+ localeName = localeNameStorage;
+ localeNameLen = size;
+ }
+ }
+
+ return sk_make_sp<SkFontMgr_DirectWrite>(factory, collection, fallback,
+ localeName, localeNameLen,
+ defaultFamilyName, defaultFamilyNameLen);
+}
+
+#include "include/ports/SkFontMgr_indirect.h"
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWriteRenderer(sk_sp<SkRemotableFontMgr> proxy) {
+ sk_sp<SkFontMgr> impl(SkFontMgr_New_DirectWrite());
+ if (!impl) {
+ return nullptr;
+ }
+ return sk_make_sp<SkFontMgr_Indirect>(std::move(impl), std::move(proxy));
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
new file mode 100644
index 0000000000..08195c569f
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN) // And !SKIA_GDI?
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkTypeface_win.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_DirectWrite();
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
new file mode 100644
index 0000000000..5b4ca74985
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+
+#if defined(SK_DISABLE_EFFECT_DESERIALIZATION)
+
+ void SkFlattenable::PrivateInitializer::InitEffects() {}
+ void SkFlattenable::PrivateInitializer::InitImageFilters() {}
+
+#else
+
+ #include "include/core/SkColorFilter.h"
+ #include "src/effects/SkDashImpl.h"
+ #include "include/effects/SkGradientShader.h"
+ #include "include/core/SkMaskFilter.h"
+
+ #include "include/effects/SkBlurImageFilter.h"
+ #include "include/effects/SkComposeImageFilter.h"
+
+ /*
+ * Register most effects for deserialization.
+ *
+ * None of these are strictly required for Skia to operate, so if you're
+ * not using deserialization yourself, you can define
+ * SK_DISABLE_EFFECT_SERIALIZATION, or modify/replace this file as needed.
+ */
+ void SkFlattenable::PrivateInitializer::InitEffects() {
+ // Shaders.
+ SkGradientShader::RegisterFlattenables();
+
+ // Color filters.
+ SkColorFilter::RegisterFlattenables();
+
+ // Mask filters.
+ SkMaskFilter::RegisterFlattenables();
+
+ // Path effects.
+ SK_REGISTER_FLATTENABLE(SkDashImpl);
+ }
+
+ /*
+ * Register SkImageFilters for deserialization.
+ *
+ * None of these are strictly required for Skia to operate, so if you're
+ * not using deserialization yourself, you can define
+ * SK_DISABLE_EFFECT_SERIALIZATION, or modify/replace this file as needed.
+ */
+ void SkFlattenable::PrivateInitializer::InitImageFilters() {
+ SkBlurImageFilter::RegisterFlattenables();
+ SkComposeImageFilter::RegisterFlattenables();
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
new file mode 100644
index 0000000000..af43829deb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/images/SkImageEncoderPriv.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTemplates.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+static size_t consumer_put(void* info, const void* buffer, size_t count) {
+ SkWStream* stream = reinterpret_cast<SkWStream*>(info);
+ return stream->write(buffer, count) ? count : 0;
+}
+
+static void consumer_release(void* info) {
+ // we do nothing, since by design we don't "own" the stream (i.e. info)
+}
+
+static SkUniqueCFRef<CGDataConsumerRef> SkStreamToCGDataConsumer(SkWStream* stream) {
+ CGDataConsumerCallbacks procs;
+ procs.putBytes = consumer_put;
+ procs.releaseConsumer = consumer_release;
+ // we don't own/reference the stream, so it our consumer must not live
+ // longer that our caller's ownership of the stream
+ return SkUniqueCFRef<CGDataConsumerRef>(CGDataConsumerCreate(stream, &procs));
+}
+
+static SkUniqueCFRef<CGImageDestinationRef> SkStreamToImageDestination(SkWStream* stream,
+ CFStringRef type) {
+ SkUniqueCFRef<CGDataConsumerRef> consumer = SkStreamToCGDataConsumer(stream);
+ if (nullptr == consumer) {
+ return nullptr;
+ }
+
+ return SkUniqueCFRef<CGImageDestinationRef>(
+ CGImageDestinationCreateWithDataConsumer(consumer.get(), type, 1, nullptr));
+}
+
+/* Encode bitmaps via CGImageDestination. We setup a DataConsumer which writes
+ to our SkWStream. Since we don't reference/own the SkWStream, our consumer
+ must only live for the duration of the onEncode() method.
+ */
+bool SkEncodeImageWithCG(SkWStream* stream, const SkPixmap& pixmap, SkEncodedImageFormat format) {
+ SkBitmap bm;
+ if (!bm.installPixels(pixmap)) {
+ return false;
+ }
+ bm.setImmutable();
+
+ CFStringRef type;
+ switch (format) {
+ case SkEncodedImageFormat::kICO:
+ type = kUTTypeICO;
+ break;
+ case SkEncodedImageFormat::kBMP:
+ type = kUTTypeBMP;
+ break;
+ case SkEncodedImageFormat::kGIF:
+ type = kUTTypeGIF;
+ break;
+ case SkEncodedImageFormat::kJPEG:
+ type = kUTTypeJPEG;
+ break;
+ case SkEncodedImageFormat::kPNG:
+ // PNG encoding an ARGB_4444 bitmap gives the following errors in GM:
+ // <Error>: CGImageDestinationAddImage image could not be converted to destination
+ // format.
+ // <Error>: CGImageDestinationFinalize image destination does not have enough images
+ // So instead we copy to 8888.
+ if (bm.colorType() == kARGB_4444_SkColorType) {
+ SkBitmap bitmapN32;
+ bitmapN32.allocPixels(bm.info().makeColorType(kN32_SkColorType));
+ bm.readPixels(bitmapN32.info(), bitmapN32.getPixels(), bitmapN32.rowBytes(), 0, 0);
+ bm.swap(bitmapN32);
+ }
+ type = kUTTypePNG;
+ break;
+ default:
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageDestinationRef> dst = SkStreamToImageDestination(stream, type);
+ if (nullptr == dst) {
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageRef> image(SkCreateCGImageRef(bm));
+ if (nullptr == image) {
+ return false;
+ }
+
+ CGImageDestinationAddImage(dst.get(), image.get(), nullptr);
+ return CGImageDestinationFinalize(dst.get());
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
new file mode 100644
index 0000000000..af1ec698ac
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkAutoMalloc.h"
+#include "src/images/SkImageEncoderPriv.h"
+#include "src/utils/win/SkAutoCoInitialize.h"
+#include "src/utils/win/SkIStream.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+#include <wincodec.h>
+
+//All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+//In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+//but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+//Undo this #define if it has been done so that we link against the symbols
+//we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+#undef CLSID_WICImagingFactory
+#endif
+
+bool SkEncodeImageWithWIC(SkWStream* stream, const SkPixmap& pixmap,
+ SkEncodedImageFormat format, int quality) {
+ GUID type;
+ switch (format) {
+ case SkEncodedImageFormat::kJPEG:
+ type = GUID_ContainerFormatJpeg;
+ break;
+ case SkEncodedImageFormat::kPNG:
+ type = GUID_ContainerFormatPng;
+ break;
+ default:
+ return false;
+ }
+ SkBitmap bitmapOrig;
+ if (!bitmapOrig.installPixels(pixmap)) {
+ return false;
+ }
+ bitmapOrig.setImmutable();
+
+ // First convert to BGRA if necessary.
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(bitmapOrig.info().makeColorType(kBGRA_8888_SkColorType)) ||
+ !bitmapOrig.readPixels(bitmap.info(), bitmap.getPixels(), bitmap.rowBytes(), 0, 0))
+ {
+ return false;
+ }
+
+ // WIC expects unpremultiplied pixels. Unpremultiply if necessary.
+ if (kPremul_SkAlphaType == bitmap.alphaType()) {
+ uint8_t* pixels = reinterpret_cast<uint8_t*>(bitmap.getPixels());
+ for (int y = 0; y < bitmap.height(); ++y) {
+ for (int x = 0; x < bitmap.width(); ++x) {
+ uint8_t* bytes = pixels + y * bitmap.rowBytes() + x * bitmap.bytesPerPixel();
+ SkPMColor* src = reinterpret_cast<SkPMColor*>(bytes);
+ SkColor* dst = reinterpret_cast<SkColor*>(bytes);
+ *dst = SkUnPreMultiply::PMColorToColor(*src);
+ }
+ }
+ }
+
+ // Finally, if we are performing a jpeg encode, we must convert to BGR.
+ void* pixels = bitmap.getPixels();
+ size_t rowBytes = bitmap.rowBytes();
+ SkAutoMalloc pixelStorage;
+ WICPixelFormatGUID formatDesired = GUID_WICPixelFormat32bppBGRA;
+ if (SkEncodedImageFormat::kJPEG == format) {
+ formatDesired = GUID_WICPixelFormat24bppBGR;
+ rowBytes = SkAlign4(bitmap.width() * 3);
+ pixelStorage.reset(rowBytes * bitmap.height());
+ for (int y = 0; y < bitmap.height(); y++) {
+ uint8_t* dstRow = SkTAddOffset<uint8_t>(pixelStorage.get(), y * rowBytes);
+ for (int x = 0; x < bitmap.width(); x++) {
+ uint32_t bgra = *bitmap.getAddr32(x, y);
+ dstRow[0] = (uint8_t) ((bgra >> 0) & 0xFF);
+ dstRow[1] = (uint8_t) ((bgra >> 8) & 0xFF);
+ dstRow[2] = (uint8_t) ((bgra >> 16) & 0xFF);
+ dstRow += 3;
+ }
+ }
+
+ pixels = pixelStorage.get();
+ }
+
+
+ //Initialize COM.
+ SkAutoCoInitialize scopedCo;
+ if (!scopedCo.succeeded()) {
+ return false;
+ }
+
+ HRESULT hr = S_OK;
+
+ //Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> piImagingFactory;
+ if (SUCCEEDED(hr)) {
+ hr = CoCreateInstance(
+ CLSID_WICImagingFactory
+ , nullptr
+ , CLSCTX_INPROC_SERVER
+ , IID_PPV_ARGS(&piImagingFactory)
+ );
+ }
+
+ //Convert the SkWStream to an IStream.
+ SkTScopedComPtr<IStream> piStream;
+ if (SUCCEEDED(hr)) {
+ hr = SkWIStream::CreateFromSkWStream(stream, &piStream);
+ }
+
+ //Create an encode of the appropriate type.
+ SkTScopedComPtr<IWICBitmapEncoder> piEncoder;
+ if (SUCCEEDED(hr)) {
+ hr = piImagingFactory->CreateEncoder(type, nullptr, &piEncoder);
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Initialize(piStream.get(), WICBitmapEncoderNoCache);
+ }
+
+ //Create a the frame.
+ SkTScopedComPtr<IWICBitmapFrameEncode> piBitmapFrameEncode;
+ SkTScopedComPtr<IPropertyBag2> piPropertybag;
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->CreateNewFrame(&piBitmapFrameEncode, &piPropertybag);
+ }
+
+ if (SUCCEEDED(hr)) {
+ PROPBAG2 name;
+ memset(&name, 0, sizeof(name));
+ name.dwType = PROPBAG2_TYPE_DATA;
+ name.vt = VT_R4;
+ name.pstrName = const_cast<LPOLESTR>(L"ImageQuality");
+
+ VARIANT value;
+ VariantInit(&value);
+ value.vt = VT_R4;
+ value.fltVal = (FLOAT)(quality / 100.0);
+
+ //Ignore result code.
+ // This returns E_FAIL if the named property is not in the bag.
+ //TODO(bungeman) enumerate the properties,
+ // write and set hr iff property exists.
+ piPropertybag->Write(1, &name, &value);
+ }
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Initialize(piPropertybag.get());
+ }
+
+ //Set the size of the frame.
+ const UINT width = bitmap.width();
+ const UINT height = bitmap.height();
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetSize(width, height);
+ }
+
+ //Set the pixel format of the frame. If native encoded format cannot match BGRA,
+ //it will choose the closest pixel format that it supports.
+ WICPixelFormatGUID formatGUID = formatDesired;
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetPixelFormat(&formatGUID);
+ }
+ if (SUCCEEDED(hr)) {
+ //Be sure the image format is the one requested.
+ hr = IsEqualGUID(formatGUID, formatDesired) ? S_OK : E_FAIL;
+ }
+
+ //Write the pixels into the frame.
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->WritePixels(height,
+ (UINT) rowBytes,
+ (UINT) rowBytes * height,
+ reinterpret_cast<BYTE*>(pixels));
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Commit();
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Commit();
+ }
+
+ return SUCCEEDED(hr);
+}
+
+#endif // defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
new file mode 100644
index 0000000000..bda9a04beb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/ports/SkImageGeneratorCG.h"
+#include "include/private/SkTemplates.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/core/SkPixmapPriv.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+namespace {
+class ImageGeneratorCG : public SkImageGenerator {
+public:
+ ImageGeneratorCG(const SkImageInfo&, SkUniqueCFRef<CGImageSourceRef> imageSrc,
+ sk_sp<SkData> data, SkEncodedOrigin);
+
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&) override;
+
+private:
+ const SkUniqueCFRef<CGImageSourceRef> fImageSrc;
+ const sk_sp<SkData> fData;
+ const SkEncodedOrigin fOrigin;
+
+ typedef SkImageGenerator INHERITED;
+};
+
+static SkUniqueCFRef<CGImageSourceRef> data_to_CGImageSrc(SkData* data) {
+ SkUniqueCFRef<CGDataProviderRef> cgData(
+ CGDataProviderCreateWithData(data, data->data(), data->size(), nullptr));
+ if (!cgData) {
+ return nullptr;
+ }
+ return SkUniqueCFRef<CGImageSourceRef>(
+ CGImageSourceCreateWithDataProvider(cgData.get(), nullptr));
+}
+
+} // namespace
+
+std::unique_ptr<SkImageGenerator> SkImageGeneratorCG::MakeFromEncodedCG(sk_sp<SkData> data) {
+ SkUniqueCFRef<CGImageSourceRef> imageSrc = data_to_CGImageSrc(data.get());
+ if (!imageSrc) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFDictionaryRef> properties(
+ CGImageSourceCopyPropertiesAtIndex(imageSrc.get(), 0, nullptr));
+ if (!properties) {
+ return nullptr;
+ }
+
+ CFNumberRef widthRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyPixelWidth));
+ CFNumberRef heightRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyPixelHeight));
+ if (nullptr == widthRef || nullptr == heightRef) {
+ return nullptr;
+ }
+
+ int width, height;
+ if (!CFNumberGetValue(widthRef , kCFNumberIntType, &width ) ||
+ !CFNumberGetValue(heightRef, kCFNumberIntType, &height))
+ {
+ return nullptr;
+ }
+
+ bool hasAlpha = bool(CFDictionaryGetValue(properties.get(), kCGImagePropertyHasAlpha));
+ SkAlphaType alphaType = hasAlpha ? kPremul_SkAlphaType : kOpaque_SkAlphaType;
+ SkImageInfo info = SkImageInfo::MakeS32(width, height, alphaType);
+
+ SkEncodedOrigin origin = kDefault_SkEncodedOrigin;
+ CFNumberRef orientationRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyOrientation));
+ int originInt;
+ if (orientationRef && CFNumberGetValue(orientationRef, kCFNumberIntType, &originInt)) {
+ origin = (SkEncodedOrigin) originInt;
+ }
+
+ if (SkPixmapPriv::ShouldSwapWidthHeight(origin)) {
+ info = SkPixmapPriv::SwapWidthHeight(info);
+ }
+
+ // FIXME: We have the opportunity to extract color space information here,
+ // though I think it makes sense to wait until we understand how
+ // we want to communicate it to the generator.
+
+ return std::unique_ptr<SkImageGenerator>(new ImageGeneratorCG(info, std::move(imageSrc),
+ std::move(data), origin));
+}
+
+ImageGeneratorCG::ImageGeneratorCG(const SkImageInfo& info, SkUniqueCFRef<CGImageSourceRef> src,
+ sk_sp<SkData> data, SkEncodedOrigin origin)
+ : INHERITED(info)
+ , fImageSrc(std::move(src))
+ , fData(std::move(data))
+ , fOrigin(origin)
+{}
+
+sk_sp<SkData> ImageGeneratorCG::onRefEncodedData() {
+ return fData;
+}
+
+bool ImageGeneratorCG::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options&)
+{
+ if (kN32_SkColorType != info.colorType()) {
+ // FIXME: Support other colorTypes.
+ return false;
+ }
+
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ if (kOpaque_SkAlphaType != this->getInfo().alphaType()) {
+ return false;
+ }
+ break;
+ case kPremul_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageRef> image(CGImageSourceCreateImageAtIndex(fImageSrc.get(), 0, nullptr));
+ if (!image) {
+ return false;
+ }
+
+ SkPixmap dst(info, pixels, rowBytes);
+ auto decode = [&image](const SkPixmap& pm) {
+ // FIXME: Using SkCopyPixelsFromCGImage (as opposed to swizzling
+ // ourselves) greatly restricts the color and alpha types that we
+ // support. If we swizzle ourselves, we can add support for:
+ // kUnpremul_SkAlphaType
+ // 16-bit per component RGBA
+ // kGray_8_SkColorType
+ // Additionally, it would be interesting to compare the performance
+ // of SkSwizzler with CG's built in swizzler.
+ return SkCopyPixelsFromCGImage(pm, image.get());
+ };
+ return SkPixmapPriv::Orient(dst, fOrigin, decode);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
new file mode 100644
index 0000000000..cea31ad641
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/ports/SkImageGeneratorWIC.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/utils/win/SkIStream.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <wincodec.h>
+
+// All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+// In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+// but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+// Undo this #define if it has been done so that we link against the symbols
+// we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+ #undef CLSID_WICImagingFactory
+#endif
+
+namespace {
+class ImageGeneratorWIC : public SkImageGenerator {
+public:
+ /*
+ * Takes ownership of the imagingFactory
+ * Takes ownership of the imageSource
+ */
+ ImageGeneratorWIC(const SkImageInfo& info, IWICImagingFactory* imagingFactory,
+ IWICBitmapSource* imageSource, sk_sp<SkData>);
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options&)
+ override;
+
+private:
+ SkTScopedComPtr<IWICImagingFactory> fImagingFactory;
+ SkTScopedComPtr<IWICBitmapSource> fImageSource;
+ sk_sp<SkData> fData;
+
+ typedef SkImageGenerator INHERITED;
+};
+} // namespace
+
+std::unique_ptr<SkImageGenerator> SkImageGeneratorWIC::MakeFromEncodedWIC(sk_sp<SkData> data) {
+ // Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> imagingFactory;
+ HRESULT hr = CoCreateInstance(CLSID_WICImagingFactory, nullptr, CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&imagingFactory));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create an IStream.
+ SkTScopedComPtr<IStream> iStream;
+ // Note that iStream will take ownership of the new memory stream because
+ // we set |deleteOnRelease| to true.
+ hr = SkIStream::CreateFromSkStream(skstd::make_unique<SkMemoryStream>(data), &iStream);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create the decoder from the stream.
+ SkTScopedComPtr<IWICBitmapDecoder> decoder;
+ hr = imagingFactory->CreateDecoderFromStream(iStream.get(), nullptr,
+ WICDecodeMetadataCacheOnDemand, &decoder);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Select the first frame from the decoder.
+ SkTScopedComPtr<IWICBitmapFrameDecode> imageFrame;
+ hr = decoder->GetFrame(0, &imageFrame);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Treat the frame as an image source.
+ SkTScopedComPtr<IWICBitmapSource> imageSource;
+ hr = imageFrame->QueryInterface(IID_PPV_ARGS(&imageSource));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the size of the image.
+ UINT width;
+ UINT height;
+ hr = imageSource->GetSize(&width, &height);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the encoded pixel format.
+ WICPixelFormatGUID format;
+ hr = imageSource->GetPixelFormat(&format);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Recommend kOpaque if the image is opaque and kPremul otherwise.
+ // FIXME: We are stuck recommending kPremul for all indexed formats
+ // (Ex: GUID_WICPixelFormat8bppIndexed) because we don't have
+ // a way to check if the image has alpha.
+ SkAlphaType alphaType = kPremul_SkAlphaType;
+
+ if (GUID_WICPixelFormat16bppBGR555 == format ||
+ GUID_WICPixelFormat16bppBGR565 == format ||
+ GUID_WICPixelFormat32bppBGR101010 == format ||
+ GUID_WICPixelFormatBlackWhite == format ||
+ GUID_WICPixelFormat2bppGray == format ||
+ GUID_WICPixelFormat4bppGray == format ||
+ GUID_WICPixelFormat8bppGray == format ||
+ GUID_WICPixelFormat16bppGray == format ||
+ GUID_WICPixelFormat16bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat16bppGrayHalf == format ||
+ GUID_WICPixelFormat32bppGrayFloat == format ||
+ GUID_WICPixelFormat32bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGBE == format ||
+ GUID_WICPixelFormat24bppRGB == format ||
+ GUID_WICPixelFormat24bppBGR == format ||
+ GUID_WICPixelFormat32bppBGR == format ||
+ GUID_WICPixelFormat48bppRGB == format ||
+ GUID_WICPixelFormat48bppBGR == format ||
+ GUID_WICPixelFormat48bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat48bppBGRFixedPoint == format ||
+ GUID_WICPixelFormat48bppRGBHalf == format ||
+ GUID_WICPixelFormat64bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat64bppRGBHalf == format ||
+ GUID_WICPixelFormat96bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat128bppRGBFloat == format ||
+ GUID_WICPixelFormat128bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGB == format ||
+ GUID_WICPixelFormat64bppRGB == format ||
+ GUID_WICPixelFormat96bppRGBFloat == format ||
+ GUID_WICPixelFormat32bppCMYK == format ||
+ GUID_WICPixelFormat64bppCMYK == format ||
+ GUID_WICPixelFormat8bppY == format ||
+ GUID_WICPixelFormat8bppCb == format ||
+ GUID_WICPixelFormat8bppCr == format ||
+ GUID_WICPixelFormat16bppCbCr == format)
+ {
+ alphaType = kOpaque_SkAlphaType;
+ }
+
+ // FIXME: If we change the implementation to handle swizzling ourselves,
+ // we can support more output formats.
+ SkImageInfo info = SkImageInfo::MakeS32(width, height, alphaType);
+ return std::unique_ptr<SkImageGenerator>(
+ new ImageGeneratorWIC(info, imagingFactory.release(), imageSource.release(),
+ std::move(data)));
+}
+
+ImageGeneratorWIC::ImageGeneratorWIC(const SkImageInfo& info,
+ IWICImagingFactory* imagingFactory, IWICBitmapSource* imageSource, sk_sp<SkData> data)
+ : INHERITED(info)
+ , fImagingFactory(imagingFactory)
+ , fImageSource(imageSource)
+ , fData(std::move(data))
+{}
+
+sk_sp<SkData> ImageGeneratorWIC::onRefEncodedData() {
+ return fData;
+}
+
+bool ImageGeneratorWIC::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options&) {
+ if (kN32_SkColorType != info.colorType()) {
+ return false;
+ }
+
+ // Create a format converter.
+ SkTScopedComPtr<IWICFormatConverter> formatConverter;
+ HRESULT hr = fImagingFactory->CreateFormatConverter(&formatConverter);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ GUID format = GUID_WICPixelFormat32bppPBGRA;
+ if (kUnpremul_SkAlphaType == info.alphaType()) {
+ format = GUID_WICPixelFormat32bppBGRA;
+ }
+
+ hr = formatConverter->Initialize(fImageSource.get(), format, WICBitmapDitherTypeNone, nullptr,
+ 0.0, WICBitmapPaletteTypeCustom);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Treat the format converter as an image source.
+ SkTScopedComPtr<IWICBitmapSource> formatConverterSrc;
+ hr = formatConverter->QueryInterface(IID_PPV_ARGS(&formatConverterSrc));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Set the destination pixels.
+ hr = formatConverterSrc->CopyPixels(nullptr, (UINT) rowBytes, (UINT) rowBytes * info.height(),
+ (BYTE*) pixels);
+
+ return SUCCEEDED(hr);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
new file mode 100644
index 0000000000..197876505d
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageGenerator.h"
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncodedImpl(sk_sp<SkData>) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
new file mode 100644
index 0000000000..c62a8435f9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "src/codec/SkCodecImageGenerator.h"
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncodedImpl(sk_sp<SkData> data) {
+ return SkCodecImageGenerator::MakeFromEncodedCodec(std::move(data));
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_malloc.cpp b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
new file mode 100644
index 0000000000..7fc6eb19f5
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+
+#include <cstdlib>
+
+#define SK_DEBUGFAILF(fmt, ...) \
+ SkASSERT((SkDebugf(fmt"\n", __VA_ARGS__), false))
+
+static inline void sk_out_of_memory(size_t size) {
+ SK_DEBUGFAILF("sk_out_of_memory (asked for " SK_SIZE_T_SPECIFIER " bytes)",
+ size);
+#if defined(IS_FUZZING_WITH_AFL)
+ exit(1);
+#else
+ abort();
+#endif
+}
+
+static inline void* throw_on_failure(size_t size, void* p) {
+ if (size > 0 && p == nullptr) {
+ // If we've got a nullptr here, the only reason we should have failed is running out of RAM.
+ sk_out_of_memory(size);
+ }
+ return p;
+}
+
+bool sk_abort_is_enabled() { return true; }
+
+void sk_abort_no_print() {
+#if defined(SK_BUILD_FOR_WIN) && defined(SK_IS_BOT)
+ // do not display a system dialog before aborting the process
+ _set_abort_behavior(0, _WRITE_ABORT_MSG);
+#endif
+#if defined(SK_DEBUG) && defined(SK_BUILD_FOR_WIN)
+ __debugbreak();
+#elif defined(__clang__)
+ __builtin_debugtrap();
+#else
+ abort();
+#endif
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+#if defined(IS_FUZZING_WITH_AFL)
+ exit(1);
+#else
+ abort();
+#endif
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ return throw_on_failure(size, realloc(addr, size));
+}
+
+void sk_free(void* p) {
+ if (p) {
+ free(p);
+ }
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ void* p;
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ p = calloc(size, 1);
+ } else {
+ p = malloc(size);
+ }
+ if (flags & SK_MALLOC_THROW) {
+ return throw_on_failure(size, p);
+ } else {
+ return p;
+ }
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
new file mode 100644
index 0000000000..626404fb32
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Google Inc.
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkMalloc.h"
+
+#include "include/core/SkTypes.h"
+#include "mozilla/mozalloc.h"
+#include "mozilla/mozalloc_abort.h"
+#include "mozilla/mozalloc_oom.h"
+#include "prenv.h"
+
+bool sk_abort_is_enabled() {
+#ifdef SK_DEBUG
+ const char* env = PR_GetEnv("MOZ_SKIA_DISABLE_ASSERTS");
+ if (env && *env != '0') {
+ return false;
+ }
+#endif
+ return true;
+}
+
+void sk_abort_no_print() {
+ mozalloc_abort("Abort from sk_abort");
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+ mozalloc_handle_oom(0);
+}
+
+void sk_free(void* p) {
+ free(p);
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ return moz_xrealloc(addr, size);
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ return (flags & SK_MALLOC_THROW) ? moz_xcalloc(size, 1) : calloc(size, 1);
+ }
+ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_ios.h b/gfx/skia/skia/src/ports/SkOSFile_ios.h
new file mode 100644
index 0000000000..e541b3ce72
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_ios.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSFile_ios_DEFINED
+#define SkOSFile_ios_DEFINED
+
+#include "include/core/SkString.h"
+
+#ifdef SK_BUILD_FOR_IOS
+#import <CoreFoundation/CoreFoundation.h>
+
+static bool ios_get_path_in_bundle(const char path[], SkString* result) {
+ // Get a reference to the main bundle
+ CFBundleRef mainBundle = CFBundleGetMainBundle();
+
+ // Get a reference to the file's URL
+ CFStringRef pathRef = CFStringCreateWithCString(nullptr, path, kCFStringEncodingUTF8);
+ // We use "data" as our subdirectory to match {{bundle_resources_dir}}/data in GN
+ // Unfortunately "resources" is not a valid top-level name in iOS, so we push it one level down
+ CFURLRef imageURL = CFBundleCopyResourceURL(mainBundle, pathRef, nullptr, CFSTR("data"));
+ CFRelease(pathRef);
+ if (!imageURL) {
+ return false;
+ }
+ if (!result) {
+ return true;
+ }
+
+ // Convert the URL reference into a string reference
+ CFStringRef imagePath = CFURLCopyFileSystemPath(imageURL, kCFURLPOSIXPathStyle);
+ CFRelease(imageURL);
+
+ // Get the system encoding method
+ CFStringEncoding encodingMethod = CFStringGetSystemEncoding();
+
+ // Convert the string reference into an SkString
+ result->set(CFStringGetCStringPtr(imagePath, encodingMethod));
+ CFRelease(imagePath);
+ return true;
+}
+#endif
+
+#endif // SkOSFile_ios_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkOSFile_posix.cpp b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
new file mode 100644
index 0000000000..91a9f9a2ff
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTFitsIn.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkOSFile.h"
+
+#include <dirent.h>
+#include <new>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifdef SK_BUILD_FOR_IOS
+#include "src/ports/SkOSFile_ios.h"
+#endif
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = F_OK;
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= R_OK;
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= W_OK;
+ }
+#ifdef SK_BUILD_FOR_IOS
+ // if the default path fails, check the bundle (but only if read-only)
+ if (0 == access(path, mode)) {
+ return true;
+ } else {
+ return (kRead_SkFILE_Flag == flags && ios_get_path_in_bundle(path, nullptr));
+ }
+#else
+ return (0 == access(path, mode));
+#endif
+}
+
+typedef struct {
+ dev_t dev;
+ ino_t ino;
+} SkFILEID;
+
+static bool sk_ino(FILE* a, SkFILEID* id) {
+ int fd = fileno(a);
+ if (fd < 0) {
+ return 0;
+ }
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return 0;
+ }
+ id->dev = status.st_dev;
+ id->ino = status.st_ino;
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.ino == bID.ino
+ && aID.dev == bID.dev;
+}
+
+void sk_fmunmap(const void* addr, size_t length) {
+ munmap(const_cast<void*>(addr), length);
+}
+
+void* sk_fdmmap(int fd, size_t* size) {
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return nullptr;
+ }
+ if (!S_ISREG(status.st_mode)) {
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(status.st_size)) {
+ return nullptr;
+ }
+ size_t fileSize = static_cast<size_t>(status.st_size);
+
+ void* addr = mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (MAP_FAILED == addr) {
+ return nullptr;
+ }
+
+ *size = fileSize;
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return fileno(f);
+}
+
+void* sk_fmmap(FILE* f, size_t* size) {
+ int fd = sk_fileno(f);
+ if (fd < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fd, size);
+}
+
+size_t sk_qread(FILE* file, void* buffer, size_t count, size_t offset) {
+ int fd = sk_fileno(file);
+ if (fd < 0) {
+ return SIZE_MAX;
+ }
+ ssize_t bytesRead = pread(fd, buffer, count, offset);
+ if (bytesRead < 0) {
+ return SIZE_MAX;
+ }
+ return bytesRead;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fDIR(nullptr) { }
+ DIR* fDIR;
+ SkString fPath, fSuffix;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+SkOSFile::Iter::Iter() { new (fSelf.get()) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf.get()) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ self.fDIR = nullptr;
+ }
+ self.fPath.set(path);
+
+ if (path) {
+ self.fDIR = ::opendir(path);
+#ifdef SK_BUILD_FOR_IOS
+ // check bundle for directory
+ if (!self.fDIR && ios_get_path_in_bundle(path, &self.fPath)) {
+ self.fDIR = ::opendir(self.fPath.c_str());
+ }
+#endif
+ self.fSuffix.set(suffix);
+ } else {
+ self.fSuffix.reset();
+ }
+}
+
+// returns true if suffix is empty, or if str ends with suffix
+static bool issuffixfor(const SkString& suffix, const char str[]) {
+ size_t suffixLen = suffix.size();
+ size_t strLen = strlen(str);
+
+ return strLen >= suffixLen &&
+ memcmp(suffix.c_str(), str + strLen - suffixLen, suffixLen) == 0;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ dirent* entry;
+
+ while ((entry = ::readdir(self.fDIR)) != nullptr) {
+ struct stat s;
+ SkString str(self.fPath);
+
+ if (!str.endsWith("/") && !str.endsWith("\\")) {
+ str.append("/");
+ }
+ str.append(entry->d_name);
+
+ if (0 == stat(str.c_str(), &s)) {
+ if (getDir) {
+ if (s.st_mode & S_IFDIR) {
+ break;
+ }
+ } else {
+ if (!(s.st_mode & S_IFDIR) && issuffixfor(self.fSuffix, entry->d_name)) {
+ break;
+ }
+ }
+ }
+ }
+ if (entry) { // we broke out with a file
+ if (name) {
+ name->set(entry->d_name);
+ }
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
new file mode 100644
index 0000000000..f870c5bfd2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkOSFile.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#ifdef SK_BUILD_FOR_UNIX
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <direct.h>
+#include <io.h>
+#include <vector>
+#include "src/utils/SkUTF.h"
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include "src/ports/SkOSFile_ios.h"
+#endif
+
+#ifdef _WIN32
+static bool is_ascii(const char* s) {
+ while (char v = *s++) {
+ if ((v & 0x80) != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static FILE* fopen_win(const char* utf8path, const char* perm) {
+ if (is_ascii(utf8path)) {
+ return fopen(utf8path, perm);
+ }
+
+ const char* ptr = utf8path;
+ const char* end = utf8path + strlen(utf8path);
+ size_t n = 0;
+ while (ptr < end) {
+ SkUnichar u = SkUTF::NextUTF8(&ptr, end);
+ if (u < 0) {
+ return nullptr; // malformed UTF-8
+ }
+ n += SkUTF::ToUTF16(u);
+ }
+ std::vector<uint16_t> wchars(n + 1);
+ uint16_t* out = wchars.data();
+ for (const char* ptr = utf8path; ptr < end;) {
+ out += SkUTF::ToUTF16(SkUTF::NextUTF8(&ptr, end), out);
+ }
+ SkASSERT(out == &wchars[n]);
+ *out = 0; // final null
+ wchar_t wperms[4] = {(wchar_t)perm[0], (wchar_t)perm[1], (wchar_t)perm[2], (wchar_t)perm[3]};
+ return _wfopen((wchar_t*)wchars.data(), wperms);
+}
+#endif
+
+FILE* sk_fopen(const char path[], SkFILE_Flags flags) {
+ char perm[4] = {0, 0, 0, 0};
+ char* p = perm;
+
+ if (flags & kRead_SkFILE_Flag) {
+ *p++ = 'r';
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ *p++ = 'w';
+ }
+ *p = 'b';
+
+ FILE* file = nullptr;
+#ifdef _WIN32
+ file = fopen_win(path, perm);
+#else
+ file = fopen(path, perm);
+#endif
+#ifdef SK_BUILD_FOR_IOS
+ // if not found in default path and read-only, try to open from bundle
+ if (!file && kRead_SkFILE_Flag == flags) {
+ SkString bundlePath;
+ if (ios_get_path_in_bundle(path, &bundlePath)) {
+ file = fopen(bundlePath.c_str(), perm);
+ }
+ }
+#endif
+
+ if (nullptr == file && (flags & kWrite_SkFILE_Flag)) {
+ SkDEBUGF("sk_fopen: fopen(\"%s\", \"%s\") returned nullptr (errno:%d): %s\n",
+ path, perm, errno, strerror(errno));
+ }
+ return file;
+}
+
+size_t sk_fgetsize(FILE* f) {
+ SkASSERT(f);
+
+ long curr = ftell(f); // remember where we are
+ if (curr < 0) {
+ return 0;
+ }
+
+ fseek(f, 0, SEEK_END); // go to the end
+ long size = ftell(f); // record the size
+ if (size < 0) {
+ size = 0;
+ }
+
+ fseek(f, curr, SEEK_SET); // go back to our prev location
+ return size;
+}
+
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE* f) {
+ SkASSERT(f);
+ return fwrite(buffer, 1, byteCount, f);
+}
+
+void sk_fflush(FILE* f) {
+ SkASSERT(f);
+ fflush(f);
+}
+
+void sk_fsync(FILE* f) {
+#if !defined(_WIN32) && !defined(SK_BUILD_FOR_ANDROID) && !defined(__UCLIBC__) \
+ && !defined(_NEWLIB_VERSION)
+ int fd = fileno(f);
+ fsync(fd);
+#endif
+}
+
+size_t sk_ftell(FILE* f) {
+ long curr = ftell(f);
+ if (curr < 0) {
+ return 0;
+ }
+ return curr;
+}
+
+void sk_fclose(FILE* f) {
+ if (f) {
+ fclose(f);
+ }
+}
+
+bool sk_isdir(const char *path) {
+ struct stat status;
+ if (0 != stat(path, &status)) {
+#ifdef SK_BUILD_FOR_IOS
+ // check the bundle directory if not in default path
+ SkString bundlePath;
+ if (ios_get_path_in_bundle(path, &bundlePath)) {
+ if (0 != stat(bundlePath.c_str(), &status)) {
+ return false;
+ }
+ }
+#else
+ return false;
+#endif
+ }
+ return SkToBool(status.st_mode & S_IFDIR);
+}
+
+bool sk_mkdir(const char* path) {
+ if (sk_isdir(path)) {
+ return true;
+ }
+ if (sk_exists(path)) {
+ fprintf(stderr,
+ "sk_mkdir: path '%s' already exists but is not a directory\n",
+ path);
+ return false;
+ }
+
+ int retval;
+#ifdef _WIN32
+ retval = _mkdir(path);
+#else
+ retval = mkdir(path, 0777);
+#endif
+ return 0 == retval;
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_win.cpp b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
new file mode 100644
index 0000000000..3dcdc1fb09
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/private/SkMalloc.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTFitsIn.h"
+#include "src/core/SkLeanWindows.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkStringUtils.h"
+
+#include <io.h>
+#include <new>
+#include <stdio.h>
+#include <sys/stat.h>
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = 0; // existence
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= 4; // read
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= 2; // write
+ }
+ return (0 == _access(path, mode));
+}
+
+typedef struct {
+ ULONGLONG fVolume;
+ ULONGLONG fLsbSize;
+ ULONGLONG fMsbSize;
+} SkFILEID;
+
+static bool sk_ino(FILE* f, SkFILEID* id) {
+ int fileno = _fileno((FILE*)f);
+ if (fileno < 0) {
+ return false;
+ }
+
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return false;
+ }
+
+ //TODO: call GetFileInformationByHandleEx on Vista and later with FileIdInfo.
+ BY_HANDLE_FILE_INFORMATION info;
+ if (0 == GetFileInformationByHandle(file, &info)) {
+ return false;
+ }
+ id->fVolume = info.dwVolumeSerialNumber;
+ id->fLsbSize = info.nFileIndexLow + (((ULONGLONG)info.nFileIndexHigh) << 32);
+ id->fMsbSize = 0;
+
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.fLsbSize == bID.fLsbSize
+ && aID.fMsbSize == bID.fMsbSize
+ && aID.fVolume == bID.fVolume;
+}
+
+class SkAutoNullKernelHandle : SkNoncopyable {
+public:
+ SkAutoNullKernelHandle(const HANDLE handle) : fHandle(handle) { }
+ ~SkAutoNullKernelHandle() { CloseHandle(fHandle); }
+ operator HANDLE() const { return fHandle; }
+ bool isValid() const { return SkToBool(fHandle); }
+private:
+ HANDLE fHandle;
+};
+typedef SkAutoNullKernelHandle SkAutoWinMMap;
+
+void sk_fmunmap(const void* addr, size_t) {
+ UnmapViewOfFile(addr);
+}
+
+void* sk_fdmmap(int fileno, size_t* length) {
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return nullptr;
+ }
+
+ LARGE_INTEGER fileSize;
+ if (0 == GetFileSizeEx(file, &fileSize)) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not get file size.") to report.
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(fileSize.QuadPart)) {
+ return nullptr;
+ }
+
+ SkAutoWinMMap mmap(CreateFileMapping(file, nullptr, PAGE_READONLY, 0, 0, nullptr));
+ if (!mmap.isValid()) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not create file mapping.") to report.
+ return nullptr;
+ }
+
+ // Eventually call UnmapViewOfFile
+ void* addr = MapViewOfFile(mmap, FILE_MAP_READ, 0, 0, 0);
+ if (nullptr == addr) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not map view of file.") to report.
+ return nullptr;
+ }
+
+ *length = static_cast<size_t>(fileSize.QuadPart);
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return _fileno((FILE*)f);
+}
+
+void* sk_fmmap(FILE* f, size_t* length) {
+ int fileno = sk_fileno(f);
+ if (fileno < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fileno, length);
+}
+
+size_t sk_qread(FILE* file, void* buffer, size_t count, size_t offset) {
+ int fileno = sk_fileno(file);
+ HANDLE fileHandle = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return SIZE_MAX;
+ }
+
+ OVERLAPPED overlapped;
+ memset(&overlapped, 0, sizeof(overlapped));
+ ULARGE_INTEGER winOffset;
+ winOffset.QuadPart = offset;
+ overlapped.Offset = winOffset.LowPart;
+ overlapped.OffsetHigh = winOffset.HighPart;
+
+ if (!SkTFitsIn<DWORD>(count)) {
+ count = std::numeric_limits<DWORD>::max();
+ }
+
+ DWORD bytesRead;
+ if (ReadFile(fileHandle, buffer, static_cast<DWORD>(count), &bytesRead, &overlapped)) {
+ return bytesRead;
+ }
+ if (GetLastError() == ERROR_HANDLE_EOF) {
+ return 0;
+ }
+ return SIZE_MAX;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fHandle(0), fPath16(nullptr) { }
+ HANDLE fHandle;
+ uint16_t* fPath16;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+static uint16_t* concat_to_16(const char src[], const char suffix[]) {
+ size_t i, len = strlen(src);
+ size_t len2 = 3 + (suffix ? strlen(suffix) : 0);
+ uint16_t* dst = (uint16_t*)sk_malloc_throw((len + len2) * sizeof(uint16_t));
+
+ for (i = 0; i < len; i++) {
+ dst[i] = src[i];
+ }
+
+ if (i > 0 && dst[i-1] != '/') {
+ dst[i++] = '/';
+ }
+ dst[i++] = '*';
+
+ if (suffix) {
+ while (*suffix) {
+ dst[i++] = *suffix++;
+ }
+ }
+ dst[i] = 0;
+ SkASSERT(i + 1 <= len + len2);
+
+ return dst;
+}
+
+SkOSFile::Iter::Iter() { new (fSelf.get()) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf.get()) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ sk_free(self.fPath16);
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ self.fHandle = 0;
+ }
+ if (nullptr == path) {
+ path = "";
+ }
+
+ sk_free(self.fPath16);
+ self.fPath16 = concat_to_16(path, suffix);
+}
+
+static bool is_magic_dir(const uint16_t dir[]) {
+ // return true for "." and ".."
+ return dir[0] == '.' && (dir[1] == 0 || (dir[1] == '.' && dir[2] == 0));
+}
+
+static bool get_the_file(HANDLE handle, SkString* name, WIN32_FIND_DATAW* dataPtr, bool getDir) {
+ WIN32_FIND_DATAW data;
+
+ if (nullptr == dataPtr) {
+ if (::FindNextFileW(handle, &data))
+ dataPtr = &data;
+ else
+ return false;
+ }
+
+ for (;;) {
+ if (getDir) {
+ if ((dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+ !is_magic_dir((uint16_t*)dataPtr->cFileName))
+ {
+ break;
+ }
+ } else {
+ if (!(dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ break;
+ }
+ }
+ if (!::FindNextFileW(handle, dataPtr)) {
+ return false;
+ }
+ }
+ // if we get here, we've found a file/dir
+ if (name) {
+ const uint16_t* utf16name = (const uint16_t*)dataPtr->cFileName;
+ const uint16_t* ptr = utf16name;
+ while (*ptr != 0) { ++ptr; }
+ *name = SkStringFromUTF16(utf16name, ptr - utf16name);
+ }
+ return true;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ WIN32_FIND_DATAW data;
+ WIN32_FIND_DATAW* dataPtr = nullptr;
+
+ if (self.fHandle == 0) { // our first time
+ if (self.fPath16 == nullptr || *self.fPath16 == 0) { // check for no path
+ return false;
+ }
+
+ self.fHandle = ::FindFirstFileW((LPCWSTR)self.fPath16, &data);
+ if (self.fHandle != 0 && self.fHandle != (HANDLE)~0) {
+ dataPtr = &data;
+ }
+ }
+ return self.fHandle != (HANDLE)~0 && get_the_file(self.fHandle, name, dataPtr, getDir);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary.h b/gfx/skia/skia/src/ports/SkOSLibrary.h
new file mode 100644
index 0000000000..ea1378f8b0
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSLibrary_DEFINED
+#define SkOSLibrary_DEFINED
+
+void* DynamicLoadLibrary(const char* libraryName);
+void* GetProcedureAddress(void* library, const char* functionName);
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
new file mode 100644
index 0000000000..b4b957e062
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN)
+
+#include "src/ports/SkOSLibrary.h"
+
+#include <dlfcn.h>
+
+void* DynamicLoadLibrary(const char* libraryName) {
+ return dlopen(libraryName, RTLD_LAZY);
+}
+
+void* GetProcedureAddress(void* library, const char* functionName) {
+ return dlsym(library, functionName);
+}
+#endif//!defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
new file mode 100644
index 0000000000..6e19065f4b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/core/SkLeanWindows.h"
+#include "src/ports/SkOSLibrary.h"
+
+void* DynamicLoadLibrary(const char* libraryName) {
+ return LoadLibraryA(libraryName);
+}
+
+void* GetProcedureAddress(void* library, const char* functionName) {
+ return reinterpret_cast<void*>(::GetProcAddress((HMODULE)library, functionName));
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
new file mode 100644
index 0000000000..fc9c6d8cac
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTArray.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/utils/SkUTF.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+class SK_API SkRemotableFontMgr_DirectWrite : public SkRemotableFontMgr {
+private:
+ struct DataId {
+ IUnknown* fLoader; // In COM only IUnknown pointers may be safely used for identity.
+ void* fKey;
+ UINT32 fKeySize;
+
+ DataId() { }
+
+ DataId(DataId&& that) : fLoader(that.fLoader), fKey(that.fKey), fKeySize(that.fKeySize) {
+ that.fLoader = nullptr;
+ that.fKey = nullptr;
+ SkDEBUGCODE(that.fKeySize = 0xFFFFFFFF;)
+ }
+
+ ~DataId() {
+ if (fLoader) {
+ fLoader->Release();
+ }
+ sk_free(fKey);
+ }
+ };
+
+ mutable SkTArray<DataId> fDataIdCache;
+ mutable SkMutex fDataIdCacheMutex;
+
+ int FindOrAdd(IDWriteFontFileLoader* fontFileLoader,
+ const void* refKey, UINT32 refKeySize) const
+ {
+ SkTScopedComPtr<IUnknown> fontFileLoaderId;
+ HR_GENERAL(fontFileLoader->QueryInterface(&fontFileLoaderId),
+ "Failed to re-convert to IDWriteFontFileLoader.",
+ SkFontIdentity::kInvalidDataId);
+
+ SkAutoMutexExclusive ama(fDataIdCacheMutex);
+ int count = fDataIdCache.count();
+ int i;
+ for (i = 0; i < count; ++i) {
+ const DataId& current = fDataIdCache[i];
+ if (fontFileLoaderId.get() == current.fLoader &&
+ refKeySize == current.fKeySize &&
+ 0 == memcmp(refKey, current.fKey, refKeySize))
+ {
+ return i;
+ }
+ }
+ DataId& added = fDataIdCache.push_back();
+ added.fLoader = fontFileLoaderId.release(); // Ref is passed.
+ added.fKey = sk_malloc_throw(refKeySize);
+ memcpy(added.fKey, refKey, refKeySize);
+ added.fKeySize = refKeySize;
+
+ return i;
+ }
+
+public:
+
+
+ /** localeNameLength must include the null terminator. */
+ SkRemotableFontMgr_DirectWrite(IDWriteFontCollection* fontCollection,
+ WCHAR* localeName, int localeNameLength)
+ : fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ {
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ }
+
+ HRESULT FontToIdentity(IDWriteFont* font, SkFontIdentity* fontId) const {
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ UINT32 numFiles;
+ HR(fontFace->GetFiles(&numFiles, nullptr));
+ if (numFiles > 1) {
+ return E_FAIL;
+ }
+
+ // data id
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HR(fontFace->GetFiles(&numFiles, &fontFile));
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HR(fontFile->GetLoader(&fontFileLoader));
+
+ const void* refKey;
+ UINT32 refKeySize;
+ HR(fontFile->GetReferenceKey(&refKey, &refKeySize));
+
+ fontId->fDataId = FindOrAdd(fontFileLoader.get(), refKey, refKeySize);
+
+ // index
+ fontId->fTtcIndex = fontFace->GetIndex();
+
+ // style
+ fontId->fFontStyle = get_style(font);
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* getIndex(int familyIndex) const override {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.");
+
+ int count = fontFamily->GetFontCount();
+ SkFontIdentity* fontIds;
+ sk_sp<SkRemotableFontIdentitySet> fontIdSet(
+ new SkRemotableFontIdentitySet(count, &fontIds));
+ for (int fontIndex = 0; fontIndex < count; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fontFamily->GetFont(fontIndex, &font), "Could not get font.");
+
+ HRN(FontToIdentity(font.get(), &fontIds[fontIndex]));
+ }
+ return fontIdSet.release();
+ }
+
+ virtual SkFontIdentity matchIndexStyle(int familyIndex,
+ const SkFontStyle& pattern) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HR_GENERAL(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.",
+ identity);
+
+ const DWriteStyle dwStyle(pattern);
+ SkTScopedComPtr<IDWriteFont> font;
+ HR_GENERAL(fontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth,
+ dwStyle.fSlant, &font),
+ "Could not match font in family.",
+ identity);
+
+ HR_GENERAL(FontToIdentity(font.get(), &identity), nullptr, identity);
+
+ return identity;
+ }
+
+ static HRESULT getDefaultFontFamilyName(SkSMallocWCHAR* name) {
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+ if (0 == SystemParametersInfoW(SPI_GETNONCLIENTMETRICS,
+ sizeof(metrics),
+ &metrics,
+ 0)) {
+ return E_UNEXPECTED;
+ }
+
+ size_t len = wcsnlen_s(metrics.lfMessageFont.lfFaceName, LF_FACESIZE) + 1;
+ if (0 != wcsncpy_s(name->reset(len), len, metrics.lfMessageFont.lfFaceName, _TRUNCATE)) {
+ return E_UNEXPECTED;
+ }
+
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* matchName(const char familyName[]) const override {
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ SkRemotableFontIdentitySet::NewEmpty());
+ if (!exists) {
+ return SkRemotableFontIdentitySet::NewEmpty();
+ }
+
+ return this->getIndex(index);
+ }
+
+ virtual SkFontIdentity matchNameStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ identity);
+ if (!exists) {
+ return identity;
+ }
+
+ return this->matchIndexStyle(index, style);
+ }
+
+ class FontFallbackRenderer : public IDWriteTextRenderer {
+ public:
+ FontFallbackRenderer(const SkRemotableFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character) {
+ fIdentity.fDataId = SkFontIdentity::kInvalidDataId;
+ }
+
+ virtual ~FontFallbackRenderer() { }
+
+ // IDWriteTextRenderer methods
+ SK_STDMETHODIMP DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ HR(fOuter->FontToIdentity(font.get(), &fIdentity));
+ }
+
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ SK_STDMETHODIMP IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = {1.0, 0.0, 0.0, 1.0, 0.0, 0.0};
+ *transform = ident;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ // IUnknown methods
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ SK_STDMETHODIMP QueryInterface(
+ IID const& riid, void** ppvObject) override
+ {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ const SkFontIdentity FallbackIdentity() { return fIdentity; }
+
+ protected:
+ ULONG fRefCount;
+ sk_sp<const SkRemotableFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ SkFontIdentity fIdentity;
+ };
+
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[],
+ const SkFontStyle& pattern,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ IDWriteFactory* dwFactory = sk_get_dwrite_factory();
+ if (nullptr == dwFactory) {
+ return identity;
+ }
+
+ // TODO: use IDWriteFactory2::GetSystemFontFallback when available.
+
+ const DWriteStyle dwStyle(pattern);
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ //TODO: support fallback stack.
+ HR_GENERAL(sk_cstring_to_wchar(bcp47[bcp47Count-1], &dwBcp47Local), nullptr, identity);
+ dwBcp47 = &dwBcp47Local;
+ }
+
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HR_GENERAL(dwFactory->CreateTextFormat(dwFamilyName,
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ *dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.",
+ identity);
+
+ WCHAR str[16];
+ UINT32 strLen = static_cast<UINT32>(
+ SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HR_GENERAL(dwFactory->CreateTextLayout(str, strLen, fallbackFormat.get(),
+ 200.0f, 200.0f,
+ &fallbackLayout),
+ "Could not create text layout.",
+ identity);
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HR_GENERAL(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.",
+ identity);
+
+ return fontFallbackRenderer->FallbackIdentity();
+ }
+
+ SkStreamAsset* getData(int dataId) const override {
+ SkAutoMutexExclusive ama(fDataIdCacheMutex);
+ if (dataId >= fDataIdCache.count()) {
+ return nullptr;
+ }
+ const DataId& id = fDataIdCache[dataId];
+
+ SkTScopedComPtr<IDWriteFontFileLoader> loader;
+ HRNM(id.fLoader->QueryInterface(&loader), "QuerryInterface IDWriteFontFileLoader failed");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(loader->CreateStreamFromKey(id.fKey, id.fKeySize, &fontFileStream),
+ "Could not create font file stream.");
+
+ return new SkDWriteFontFileStream(fontFileStream.get());
+ }
+
+private:
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+
+ typedef SkRemotableFontMgr INHERITED;
+};
+
+SkRemotableFontMgr* SkRemotableFontMgr_New_DirectWrite() {
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> sysFontCollection;
+ HRNM(factory->GetSystemFontCollection(&sysFontCollection, FALSE),
+ "Could not get system font collection.");
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ WCHAR* localeName = nullptr;
+ int localeNameLen = 0;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ localeNameLen = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (localeNameLen) {
+ localeName = localeNameStorage;
+ };
+ }
+
+ return new SkRemotableFontMgr_DirectWrite(sysFontCollection.get(), localeName, localeNameLen);
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
new file mode 100644
index 0000000000..c54abbdea8
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
@@ -0,0 +1,1240 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#undef GetGlyphIndices
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkSharedMutex.h"
+#include "src/ports/SkScalerContext_win_dw.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/sfnt/SkOTTable_EBLC.h"
+#include "src/sfnt/SkOTTable_EBSC.h"
+#include "src/sfnt/SkOTTable_gasp.h"
+#include "src/sfnt/SkOTTable_maxp.h"
+#include "src/utils/SkMatrix22.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteGeometrySink.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_1.h>
+#include <dwrite_3.h>
+
+/* Note:
+ * In versions 8 and 8.1 of Windows, some calls in DWrite are not thread safe.
+ * The DWriteFactoryMutex protects the calls that are problematic.
+ *
+ * On DWrite 3 or above, which is only available on Windows 10, we don't enable
+ * the locking to avoid thread contention.
+ */
+static SkSharedMutex DWriteFactoryMutex;
+
+struct MaybeExclusive {
+ MaybeExclusive(SkScalerContext_DW* ctx) : fEnabled(!ctx->isDWrite3()) {
+ if (fEnabled) {
+ DWriteFactoryMutex.acquire();
+ }
+ }
+ ~MaybeExclusive() {
+ if (fEnabled) {
+ DWriteFactoryMutex.release();
+ }
+ }
+ bool fEnabled;
+};
+
+struct MaybeShared {
+ MaybeShared(SkScalerContext_DW* ctx) : fEnabled(!ctx->isDWrite3()) {
+ if (fEnabled) {
+ DWriteFactoryMutex.acquireShared();
+ }
+ }
+ ~MaybeShared() {
+ if (fEnabled) {
+ DWriteFactoryMutex.releaseShared();
+ }
+ }
+ bool fEnabled;
+};
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool is_hinted(SkScalerContext_DW* ctx, DWriteFontTypeface* typeface) {
+ MaybeExclusive l(ctx);
+ AutoTDWriteTable<SkOTTableMaximumProfile> maxp(typeface->fDWriteFontFace.get());
+ if (!maxp.fExists) {
+ return false;
+ }
+ if (maxp.fSize < sizeof(SkOTTableMaximumProfile::Version::TT)) {
+ return false;
+ }
+ if (maxp->version.version != SkOTTableMaximumProfile::Version::TT::VERSION) {
+ return false;
+ }
+ return (0 != maxp->version.tt.maxSizeOfInstructions);
+}
+
+/** A GaspRange is inclusive, [min, max]. */
+struct GaspRange {
+ using Behavior = SkOTTableGridAndScanProcedure::GaspRange::behavior;
+ GaspRange(int min, int max, int version, Behavior flags)
+ : fMin(min), fMax(max), fVersion(version), fFlags(flags) { }
+ int fMin;
+ int fMax;
+ int fVersion;
+ Behavior fFlags;
+};
+
+bool get_gasp_range(DWriteFontTypeface* typeface, int size, GaspRange* range) {
+ AutoTDWriteTable<SkOTTableGridAndScanProcedure> gasp(typeface->fDWriteFontFace.get());
+ if (!gasp.fExists) {
+ return false;
+ }
+ if (gasp.fSize < sizeof(SkOTTableGridAndScanProcedure)) {
+ return false;
+ }
+ if (gasp->version != SkOTTableGridAndScanProcedure::version0 &&
+ gasp->version != SkOTTableGridAndScanProcedure::version1)
+ {
+ return false;
+ }
+
+ uint16_t numRanges = SkEndianSwap16(gasp->numRanges);
+ if (numRanges > 1024 ||
+ gasp.fSize < sizeof(SkOTTableGridAndScanProcedure) +
+ sizeof(SkOTTableGridAndScanProcedure::GaspRange) * numRanges)
+ {
+ return false;
+ }
+
+ const SkOTTableGridAndScanProcedure::GaspRange* rangeTable =
+ SkTAfter<const SkOTTableGridAndScanProcedure::GaspRange>(gasp.get());
+ int minPPEM = -1;
+ for (uint16_t i = 0; i < numRanges; ++i, ++rangeTable) {
+ int maxPPEM = SkEndianSwap16(rangeTable->maxPPEM);
+ if (minPPEM < size && size <= maxPPEM) {
+ range->fMin = minPPEM + 1;
+ range->fMax = maxPPEM;
+ range->fVersion = SkEndian_SwapBE16(gasp->version);
+ range->fFlags = rangeTable->flags;
+ return true;
+ }
+ minPPEM = maxPPEM;
+ }
+ return false;
+}
+/** If the rendering mode for the specified 'size' is gridfit, then place
+ * the gridfit range into 'range'. Otherwise, leave 'range' alone.
+ */
+static bool is_gridfit_only(GaspRange::Behavior flags) {
+ return flags.raw.value == GaspRange::Behavior::Raw::GridfitMask;
+}
+
+static bool has_bitmap_strike(SkScalerContext_DW* ctx, DWriteFontTypeface* typeface, GaspRange range) {
+ MaybeExclusive l(ctx);
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapLocation> eblc(typeface->fDWriteFontFace.get());
+ if (!eblc.fExists) {
+ return false;
+ }
+ if (eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation)) {
+ return false;
+ }
+ if (eblc->version != SkOTTableEmbeddedBitmapLocation::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(eblc->numSizes);
+ if (numSizes > 1024 ||
+ eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation) +
+ sizeof(SkOTTableEmbeddedBitmapLocation::BitmapSizeTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable* sizeTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable>(eblc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++sizeTable) {
+ if (sizeTable->ppemX == sizeTable->ppemY &&
+ range.fMin <= sizeTable->ppemX && sizeTable->ppemX <= range.fMax)
+ {
+ // TODO: determine if we should dig through IndexSubTableArray/IndexSubTable
+ // to determine the actual number of glyphs with bitmaps.
+
+ // TODO: Ensure that the bitmaps actually cover a significant portion of the strike.
+
+ // TODO: Ensure that the bitmaps are bi-level?
+ if (sizeTable->endGlyphIndex >= sizeTable->startGlyphIndex + 3) {
+ return true;
+ }
+ }
+ }
+ }
+
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapScaling> ebsc(typeface->fDWriteFontFace.get());
+ if (!ebsc.fExists) {
+ return false;
+ }
+ if (ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling)) {
+ return false;
+ }
+ if (ebsc->version != SkOTTableEmbeddedBitmapScaling::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(ebsc->numSizes);
+ if (numSizes > 1024 ||
+ ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling) +
+ sizeof(SkOTTableEmbeddedBitmapScaling::BitmapScaleTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable* scaleTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable>(ebsc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++scaleTable) {
+ if (scaleTable->ppemX == scaleTable->ppemY &&
+ range.fMin <= scaleTable->ppemX && scaleTable->ppemX <= range.fMax) {
+ // EBSC tables are normally only found in bitmap only fonts.
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool both_zero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool is_axis_aligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (both_zero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ both_zero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+SkScalerContext_DW::SkScalerContext_DW(sk_sp<DWriteFontTypeface> typefaceRef,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(typefaceRef), effects, desc)
+ , fGlyphCount(-1) {
+
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+ fIsColorFont = typeface->fFactory2 &&
+ typeface->fDWriteFontFace2 &&
+ typeface->fDWriteFontFace2->IsColorFont();
+ fClearTypeLevel = int(typeface->GetClearTypeLevel() * 256);
+
+ // In general, all glyphs should use DWriteFontFace::GetRecommendedRenderingMode
+ // except when bi-level rendering is requested or there are embedded
+ // bi-level bitmaps (and the embedded bitmap flag is set and no rotation).
+ //
+ // DirectWrite's IDWriteFontFace::GetRecommendedRenderingMode does not do
+ // this. As a result, determine the actual size of the text and then see if
+ // there are any embedded bi-level bitmaps of that size. If there are, then
+ // force bitmaps by requesting bi-level rendering.
+ //
+ // FreeType allows for separate ppemX and ppemY, but DirectWrite assumes
+ // square pixels and only uses ppemY. Therefore the transform must track any
+ // non-uniform x-scale.
+ //
+ // Also, rotated glyphs should have the same absolute advance widths as
+ // horizontal glyphs and the subpixel flag should not affect glyph shapes.
+
+ SkVector scale;
+ fRec.computeMatrices(SkScalerContextRec::kVertical_PreMatrixScale, &scale, &fSkXform);
+
+ fXform.m11 = SkScalarToFloat(fSkXform.getScaleX());
+ fXform.m12 = SkScalarToFloat(fSkXform.getSkewY());
+ fXform.m21 = SkScalarToFloat(fSkXform.getSkewX());
+ fXform.m22 = SkScalarToFloat(fSkXform.getScaleY());
+ fXform.dx = 0;
+ fXform.dy = 0;
+
+ // realTextSize is the actual device size we want (as opposed to the size the user requested).
+ // gdiTextSize is the size we request when GDI compatible.
+ // If the scale is negative, this means the matrix will do the flip anyway.
+ const SkScalar realTextSize = scale.fY;
+ // Due to floating point math, the lower bits are suspect. Round carefully.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(realTextSize * 64.0f) / 64.0f;
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ bool bitmapRequested = SkToBool(fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag);
+ bool treatLikeBitmap = false;
+ bool axisAlignedBitmap = false;
+ if (bitmapRequested) {
+ // When embedded bitmaps are requested, treat the entire range like
+ // a bitmap strike if the range is gridfit only and contains a bitmap.
+ int bitmapPPEM = SkScalarTruncToInt(gdiTextSize);
+ GaspRange range(bitmapPPEM, bitmapPPEM, 0, GaspRange::Behavior());
+ if (get_gasp_range(typeface, bitmapPPEM, &range)) {
+ if (!is_gridfit_only(range.fFlags)) {
+ range = GaspRange(bitmapPPEM, bitmapPPEM, 0, GaspRange::Behavior());
+ }
+ }
+ treatLikeBitmap = has_bitmap_strike(this, typeface, range);
+
+ axisAlignedBitmap = is_axis_aligned(fRec);
+ }
+
+ GaspRange range(0, 0xFFFF, 0, GaspRange::Behavior());
+
+ // If the user requested aliased, do so with aliased compatible metrics.
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ fTextureType = DWRITE_TEXTURE_ALIASED_1x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If we can use a bitmap, use gdi classic rendering and measurement.
+ // This will not always provide a bitmap, but matches expected behavior.
+ } else if ((treatLikeBitmap && axisAlignedBitmap) || typeface->ForceGDI()) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_GDI_CLASSIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If rotated but the horizontal text could have used a bitmap,
+ // render high quality rotated glyphs but measure using bitmap metrics.
+ } else if (treatLikeBitmap) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If the font has a gasp table version 1, use it to determine symmetric rendering.
+ } else if ((get_gasp_range(typeface, SkScalarRoundToInt(gdiTextSize), &range) &&
+ range.fVersion >= 1) ||
+ realTextSize > SkIntToScalar(20) || !is_hinted(this, typeface)) {
+ fTextSizeRender = realTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+
+ switch (typeface->GetRenderingMode()) {
+ case DWRITE_RENDERING_MODE_NATURAL:
+ case DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC:
+ fRenderingMode = typeface->GetRenderingMode();
+ break;
+ default:
+ if (IDWriteRenderingParams* params = sk_get_dwrite_default_rendering_params()) {
+ typeface->fDWriteFontFace->GetRecommendedRenderingMode(
+ fTextSizeRender, 1.0f, fMeasuringMode, params, &fRenderingMode);
+ }
+ break;
+ }
+
+ // We don't support outline mode right now.
+ if (fRenderingMode == DWRITE_RENDERING_MODE_OUTLINE) {
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL_SYMMETRIC;
+ }
+
+ // Fonts with hints, no gasp or gasp version 0, and below 20px get non-symmetric rendering.
+ // Often such fonts have hints which were only tested with GDI ClearType classic.
+ // Some of these fonts rely on drop out control in the y direction in order to be legible.
+ // Tenor Sans
+ // https://fonts.google.com/specimen/Tenor+Sans
+ // Gill Sans W04
+ // https://cdn.leagueoflegends.com/lolkit/1.1.9/resources/fonts/gill-sans-w04-book.woff
+ // https://na.leagueoflegends.com/en/news/game-updates/patch/patch-410-notes
+ // See https://crbug.com/385897
+ } else {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+
+ // DirectWrite2 allows for grayscale hinting.
+ fAntiAliasMode = DWRITE_TEXT_ANTIALIAS_MODE_CLEARTYPE;
+ if (typeface->fFactory2 && typeface->fDWriteFontFace2 &&
+ SkMask::kA8_Format == fRec.fMaskFormat &&
+ !(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag))
+ {
+ // DWRITE_TEXTURE_ALIASED_1x1 is now misnamed, it must also be used with grayscale.
+ fTextureType = DWRITE_TEXTURE_ALIASED_1x1;
+ fAntiAliasMode = DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE;
+ }
+
+ // DirectWrite2 allows hinting to be disabled.
+ fGridFitMode = DWRITE_GRID_FIT_MODE_ENABLED;
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ fGridFitMode = DWRITE_GRID_FIT_MODE_DISABLED;
+ if (fRenderingMode != DWRITE_RENDERING_MODE_ALIASED) {
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ }
+ }
+
+ if (this->isLinearMetrics()) {
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+}
+
+SkScalerContext_DW::~SkScalerContext_DW() {
+}
+
+unsigned SkScalerContext_DW::generateGlyphCount() {
+ if (fGlyphCount < 0) {
+ fGlyphCount = this->getDWriteTypeface()->fDWriteFontFace->GetGlyphCount();
+ }
+ return fGlyphCount;
+}
+
+bool SkScalerContext_DW::generateAdvance(SkGlyph* glyph) {
+ glyph->fAdvanceX = 0;
+ glyph->fAdvanceY = 0;
+
+ uint16_t glyphId = glyph->getGlyphID();
+ DWRITE_GLYPH_METRICS gm;
+
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ MaybeExclusive l(this);
+ HRBM(this->getDWriteTypeface()->fDWriteFontFace->GetGdiCompatibleGlyphMetrics(
+ fTextSizeMeasure,
+ 1.0f, // pixelsPerDip
+ // This parameter does not act like the lpmat2 parameter to GetGlyphOutlineW.
+ // If it did then GsA here and G_inv below to mapVectors.
+ nullptr,
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode,
+ &glyphId, 1,
+ &gm),
+ "Could not get gdi compatible glyph metrics.");
+ } else {
+ MaybeExclusive l(this);
+ HRBM(this->getDWriteTypeface()->fDWriteFontFace->GetDesignGlyphMetrics(&glyphId, 1, &gm),
+ "Could not get design metrics.");
+ }
+
+ DWRITE_FONT_METRICS dwfm;
+ {
+ MaybeShared l(this);
+ this->getDWriteTypeface()->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+ SkScalar advanceX = fTextSizeMeasure * gm.advanceWidth / dwfm.designUnitsPerEm;
+
+ SkVector advance = { advanceX, 0 };
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ // DirectWrite produced 'compatible' metrics, but while close,
+ // the end result is not always an integer as it would be with GDI.
+ advance.fX = SkScalarRoundToScalar(advance.fX);
+ }
+ fSkXform.mapVectors(&advance, 1);
+
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ return true;
+}
+
+HRESULT SkScalerContext_DW::getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox)
+{
+ //Measure raster size.
+ fXform.dx = SkFixedToFloat(glyph->getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph->getSubYFixed());
+
+ FLOAT advance = 0;
+
+ UINT16 glyphId = glyph->getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = this->getDWriteTypeface()->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ MaybeExclusive l(this);
+ // IDWriteFactory2::CreateGlyphRunAnalysis is very bad at aliased glyphs.
+ if (this->getDWriteTypeface()->fFactory2 &&
+ (fGridFitMode == DWRITE_GRID_FIT_MODE_DISABLED ||
+ fAntiAliasMode == DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE))
+ {
+ HRM(this->getDWriteTypeface()->fFactory2->CreateGlyphRunAnalysis(
+ &run,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ fGridFitMode,
+ fAntiAliasMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create DW2 glyph run analysis.");
+ } else {
+ HRM(this->getDWriteTypeface()->fFactory->CreateGlyphRunAnalysis(&run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ }
+ {
+ MaybeShared l(this);
+ HRM(glyphRunAnalysis->GetAlphaTextureBounds(textureType, bbox),
+ "Could not get texture bounds.");
+ }
+ return S_OK;
+}
+
+bool SkScalerContext_DW::isColorGlyph(const SkGlyph& glyph) {
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayer;
+ return getColorGlyphRun(glyph, &colorLayer);
+}
+
+bool SkScalerContext_DW::isPngGlyph(const SkGlyph& glyph) {
+ if (!this->getDWriteTypeface()->fDWriteFontFace4) {
+ return false;
+ }
+
+ DWRITE_GLYPH_IMAGE_FORMATS f;
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ HRBM(fontFace4->GetGlyphImageFormats(glyph.getGlyphID(), 0, UINT32_MAX, &f),
+ "Cannot get glyph image formats.");
+ return f & DWRITE_GLYPH_IMAGE_FORMATS_PNG;
+}
+
+bool SkScalerContext_DW::getColorGlyphRun(const SkGlyph& glyph,
+ IDWriteColorGlyphRunEnumerator** colorGlyph)
+{
+ FLOAT advance = 0;
+ UINT16 glyphId = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = this->getDWriteTypeface()->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ HRESULT hr = this->getDWriteTypeface()->fFactory2->TranslateColorGlyphRun(
+ 0, 0, &run, nullptr, fMeasuringMode, &fXform, 0, colorGlyph);
+ if (hr == DWRITE_E_NOCOLOR) {
+ return false;
+ }
+ HRBM(hr, "Failed to translate color glyph run");
+ return true;
+}
+
+void SkScalerContext_DW::generateColorMetrics(SkGlyph* glyph) {
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayers;
+ HRVM(getColorGlyphRun(*glyph, &colorLayers), "Could not get color glyph run");
+ SkASSERT(colorLayers.get());
+
+ SkRect bounds = SkRect::MakeEmpty();
+ BOOL hasNextRun = FALSE;
+ while (SUCCEEDED(colorLayers->MoveNext(&hasNextRun)) && hasNextRun) {
+ const DWRITE_COLOR_GLYPH_RUN* colorGlyph;
+ HRVM(colorLayers->GetCurrentRun(&colorGlyph), "Could not get current color glyph run");
+
+ SkPath path;
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRVM(SkDWriteGeometrySink::Create(&path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ {
+ MaybeExclusive l(this);
+ HRVM(colorGlyph->glyphRun.fontFace->GetGlyphRunOutline(
+ colorGlyph->glyphRun.fontEmSize,
+ colorGlyph->glyphRun.glyphIndices,
+ colorGlyph->glyphRun.glyphAdvances,
+ colorGlyph->glyphRun.glyphOffsets,
+ colorGlyph->glyphRun.glyphCount,
+ colorGlyph->glyphRun.isSideways,
+ colorGlyph->glyphRun.bidiLevel % 2, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+ bounds.join(path.getBounds());
+ }
+ SkMatrix matrix = fSkXform;
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ matrix.mapRect(&bounds);
+ // Round float bound values into integer.
+ SkIRect ibounds = bounds.roundOut();
+
+ glyph->fWidth = ibounds.fRight - ibounds.fLeft;
+ glyph->fHeight = ibounds.fBottom - ibounds.fTop;
+ glyph->fLeft = ibounds.fLeft;
+ glyph->fTop = ibounds.fTop;
+}
+
+#ifdef USE_PNG
+namespace {
+struct Context {
+ SkTScopedComPtr<IDWriteFontFace4> fontFace4;
+ void* glyphDataContext;
+ Context(IDWriteFontFace4* face4, void* context)
+ : fontFace4(SkRefComPtr(face4))
+ , glyphDataContext(context)
+ {}
+};
+
+static void ReleaseProc(const void* ptr, void* context) {
+ Context* ctx = (Context*)context;
+ ctx->fontFace4->ReleaseGlyphImageData(ctx->glyphDataContext);
+ delete ctx;
+}
+}
+
+void SkScalerContext_DW::generatePngMetrics(SkGlyph* glyph) {
+ SkASSERT(isPngGlyph(*glyph));
+ SkASSERT(glyph->fMaskFormat == SkMask::Format::kARGB32_Format);
+ SkASSERT(this->getDWriteTypeface()->fDWriteFontFace4);
+
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ DWRITE_GLYPH_IMAGE_DATA glyphData;
+ void* glyphDataContext;
+ HRVM(fontFace4->GetGlyphImageData(glyph->getGlyphID(),
+ fTextSizeRender,
+ DWRITE_GLYPH_IMAGE_FORMATS_PNG,
+ &glyphData,
+ &glyphDataContext),
+ "Glyph image data could not be acquired.");
+
+ Context* context = new Context(fontFace4, glyphDataContext);
+ sk_sp<SkData> data = SkData::MakeWithProc(glyphData.imageData,
+ glyphData.imageDataSize,
+ &ReleaseProc,
+ context);
+
+ std::unique_ptr<SkCodec> codec = SkCodec::MakeFromData(std::move(data));
+ if (!codec) {
+ return;
+ }
+
+ SkImageInfo info = codec->getInfo();
+ SkRect bounds = SkRect::MakeLTRB(SkIntToScalar(info.bounds().fLeft),
+ SkIntToScalar(info.bounds().fTop),
+ SkIntToScalar(info.bounds().fRight),
+ SkIntToScalar(info.bounds().fBottom));
+
+ SkMatrix matrix = fSkXform;
+ SkScalar scale = fTextSizeRender / glyphData.pixelsPerEm;
+ matrix.preScale(scale, scale);
+ matrix.preTranslate(-glyphData.horizontalLeftOrigin.x, -glyphData.horizontalLeftOrigin.y);
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ matrix.mapRect(&bounds);
+ bounds.roundOut();
+
+ glyph->fWidth = bounds.width();
+ glyph->fHeight = bounds.height();
+ glyph->fLeft = bounds.left();
+ glyph->fTop = bounds.top();
+ return;
+}
+#endif
+
+void SkScalerContext_DW::generateMetrics(SkGlyph* glyph) {
+
+
+ // GetAlphaTextureBounds succeeds but sometimes returns empty bounds like
+ // { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
+ // for small, but not quite zero, sized glyphs.
+ // Only set as non-empty if the returned bounds are non-empty.
+ auto glyphCheckAndSetBounds = [](SkGlyph* glyph, const RECT& bbox) {
+ if (bbox.left >= bbox.right || bbox.top >= bbox.bottom) {
+ return false;
+ }
+
+ // We're trying to pack left and top into int16_t,
+ // and width and height into uint16_t, after outsetting by 1.
+ if (!SkIRect::MakeXYWH(-32767, -32767, 65535, 65535).contains(
+ SkIRect::MakeLTRB(bbox.left, bbox.top, bbox.right, bbox.bottom))) {
+ return false;
+ }
+
+ glyph->fWidth = SkToU16(bbox.right - bbox.left);
+ glyph->fHeight = SkToU16(bbox.bottom - bbox.top);
+ glyph->fLeft = SkToS16(bbox.left);
+ glyph->fTop = SkToS16(bbox.top);
+ return true;
+ };
+
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ if (!this->generateAdvance(glyph)) {
+ return;
+ }
+
+ if (fIsColorFont && isColorGlyph(*glyph)) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ generateColorMetrics(glyph);
+ return;
+ }
+
+ if (fIsColorFont && isPngGlyph(*glyph)) {
+#ifdef USE_PNG
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ generatePngMetrics(glyph);
+#endif
+ return;
+ }
+
+ RECT bbox;
+ HRVM(this->getBoundingBox(glyph, fRenderingMode, fTextureType, &bbox),
+ "Requested bounding box could not be determined.");
+
+ if (glyphCheckAndSetBounds(glyph, bbox)) {
+ return;
+ }
+
+ // GetAlphaTextureBounds succeeds but returns an empty RECT if there are no
+ // glyphs of the specified texture type or it is too big for smoothing.
+ // When this happens, try with the alternate texture type.
+ if (DWRITE_TEXTURE_ALIASED_1x1 != fTextureType ||
+ DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE == fAntiAliasMode)
+ {
+ HRVM(this->getBoundingBox(glyph,
+ DWRITE_RENDERING_MODE_ALIASED,
+ DWRITE_TEXTURE_ALIASED_1x1,
+ &bbox),
+ "Fallback bounding box could not be determined.");
+ if (glyphCheckAndSetBounds(glyph, bbox)) {
+ glyph->fForceBW = 1;
+ glyph->fMaskFormat = SkMask::kBW_Format;
+ }
+ }
+ // TODO: handle the case where a request for DWRITE_TEXTURE_ALIASED_1x1
+ // fails, and try DWRITE_TEXTURE_CLEARTYPE_3x1.
+}
+
+void SkScalerContext_DW::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ sk_bzero(metrics, sizeof(*metrics));
+
+ DWRITE_FONT_METRICS dwfm;
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ this->getDWriteTypeface()->fDWriteFontFace->GetGdiCompatibleMetrics(
+ fTextSizeRender,
+ 1.0f, // pixelsPerDip
+ &fXform,
+ &dwfm);
+ } else {
+ this->getDWriteTypeface()->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+
+ SkScalar upem = SkIntToScalar(dwfm.designUnitsPerEm);
+
+ metrics->fAscent = -fTextSizeRender * SkIntToScalar(dwfm.ascent) / upem;
+ metrics->fDescent = fTextSizeRender * SkIntToScalar(dwfm.descent) / upem;
+ metrics->fLeading = fTextSizeRender * SkIntToScalar(dwfm.lineGap) / upem;
+ metrics->fXHeight = fTextSizeRender * SkIntToScalar(dwfm.xHeight) / upem;
+ metrics->fCapHeight = fTextSizeRender * SkIntToScalar(dwfm.capHeight) / upem;
+ metrics->fUnderlineThickness = fTextSizeRender * SkIntToScalar(dwfm.underlineThickness) / upem;
+ metrics->fUnderlinePosition = -(fTextSizeRender * SkIntToScalar(dwfm.underlinePosition) / upem);
+ metrics->fStrikeoutThickness = fTextSizeRender * SkIntToScalar(dwfm.strikethroughThickness) / upem;
+ metrics->fStrikeoutPosition = -(fTextSizeRender * SkIntToScalar(dwfm.strikethroughPosition) / upem);
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutPositionIsValid_Flag;
+
+ if (this->getDWriteTypeface()->fDWriteFontFace1.get()) {
+ DWRITE_FONT_METRICS1 dwfm1;
+ this->getDWriteTypeface()->fDWriteFontFace1->GetMetrics(&dwfm1);
+ metrics->fTop = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxTop) / upem;
+ metrics->fBottom = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxBottom) / upem;
+ metrics->fXMin = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxLeft) / upem;
+ metrics->fXMax = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxRight) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+
+ AutoTDWriteTable<SkOTTableHead> head(this->getDWriteTypeface()->fDWriteFontFace.get());
+ if (head.fExists &&
+ head.fSize >= sizeof(SkOTTableHead) &&
+ head->version == SkOTTableHead::version1)
+ {
+ metrics->fTop = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMax) / upem;
+ metrics->fBottom = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMin) / upem;
+ metrics->fXMin = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMin) / upem;
+ metrics->fXMax = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMax) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+
+ metrics->fTop = metrics->fAscent;
+ metrics->fBottom = metrics->fDescent;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/private/SkColorData.h"
+
+void SkScalerContext_DW::BilevelToBW(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph) {
+ const int width = glyph.width();
+ const size_t dstRB = (width + 7) >> 3;
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ int byteCount = width >> 3;
+ int bitCount = width & 7;
+
+ for (int y = 0; y < glyph.height(); ++y) {
+ if (byteCount > 0) {
+ for (int i = 0; i < byteCount; ++i) {
+ unsigned byte = 0;
+ byte |= src[0] & (1 << 7);
+ byte |= src[1] & (1 << 6);
+ byte |= src[2] & (1 << 5);
+ byte |= src[3] & (1 << 4);
+ byte |= src[4] & (1 << 3);
+ byte |= src[5] & (1 << 2);
+ byte |= src[6] & (1 << 1);
+ byte |= src[7] & (1 << 0);
+ dst[i] = byte;
+ src += 8;
+ }
+ }
+ if (bitCount > 0) {
+ unsigned byte = 0;
+ unsigned mask = 0x80;
+ for (int i = 0; i < bitCount; i++) {
+ byte |= (src[i]) & mask;
+ mask >>= 1;
+ }
+ dst[byteCount] = byte;
+ }
+ src += bitCount;
+ dst += dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_DW::GrayscaleToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ U8CPU a = *(src++);
+ dst[i] = sk_apply_lut_if<APPLY_PREBLEND>(a, table8);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_DW::RGBToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ U8CPU g = src[1];
+ src += 3;
+
+ // Ignore the R, B channels. It looks the closest to what
+ // D2D with grayscale AA has. But there's no way
+ // to just get a grayscale AA alpha texture from a glyph run.
+ dst[i] = sk_apply_lut_if<APPLY_PREBLEND>(g, table8);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND, bool RGB>
+void SkScalerContext_DW::RGBToLcd16(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG,
+ const uint8_t* tableB, int clearTypeLevel) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint16_t* SK_RESTRICT dst = static_cast<uint16_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ int r, g, b;
+ if (RGB) {
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ } else {
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ }
+ r = g + (((r - g) * clearTypeLevel) >> 8);
+ b = g + (((b - g) * clearTypeLevel) >> 8);
+ dst[i] = SkPack888ToRGB16(r, g, b);
+ }
+ dst = SkTAddOffset<uint16_t>(dst, dstRB);
+ }
+}
+
+const void* SkScalerContext_DW::drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType)
+{
+ int sizeNeeded = glyph.width() * glyph.height();
+ if (DWRITE_TEXTURE_CLEARTYPE_3x1 == textureType) {
+ sizeNeeded *= 3;
+ }
+ if (sizeNeeded > fBits.count()) {
+ fBits.setCount(sizeNeeded);
+ }
+
+ // erase
+ memset(fBits.begin(), 0, sizeNeeded);
+
+ fXform.dx = SkFixedToFloat(glyph.getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph.getSubYFixed());
+
+ FLOAT advance = 0.0f;
+
+ UINT16 index = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = this->getDWriteTypeface()->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &index;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+ {
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ MaybeExclusive l(this);
+ // IDWriteFactory2::CreateGlyphRunAnalysis is very bad at aliased glyphs.
+ if (this->getDWriteTypeface()->fFactory2 &&
+ (fGridFitMode == DWRITE_GRID_FIT_MODE_DISABLED ||
+ fAntiAliasMode == DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE))
+ {
+ HRNM(this->getDWriteTypeface()->fFactory2->CreateGlyphRunAnalysis(&run,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ fGridFitMode,
+ fAntiAliasMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create DW2 glyph run analysis.");
+ } else {
+ HRNM(this->getDWriteTypeface()->fFactory->CreateGlyphRunAnalysis(&run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ }
+ //NOTE: this assumes that the glyph has already been measured
+ //with an exact same glyph run analysis.
+ RECT bbox;
+ bbox.left = glyph.left();
+ bbox.top = glyph.top();
+ bbox.right = glyph.left() + glyph.width();
+ bbox.bottom = glyph.top() + glyph.height();
+ {
+ MaybeShared l(this);
+ HRNM(glyphRunAnalysis->CreateAlphaTexture(textureType,
+ &bbox,
+ fBits.begin(),
+ sizeNeeded),
+ "Could not draw mask.");
+ }
+ }
+ return fBits.begin();
+}
+
+void SkScalerContext_DW::generateColorGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isColorGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+
+ memset(glyph.fImage, 0, glyph.imageSize());
+
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayers;
+ getColorGlyphRun(glyph, &colorLayers);
+ SkASSERT(colorLayers.get());
+
+ SkMatrix matrix = fSkXform;
+ matrix.postTranslate(-SkIntToScalar(glyph.left()), -SkIntToScalar(glyph.top()));
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ SkRasterClip rc(SkIRect::MakeWH(glyph.width(), glyph.height()));
+ SkDraw draw;
+ draw.fDst = SkPixmap(SkImageInfo::MakeN32(glyph.width(), glyph.height(), kPremul_SkAlphaType),
+ glyph.fImage,
+ glyph.rowBytesUsingFormat(SkMask::Format::kARGB32_Format));
+ draw.fMatrix = &matrix;
+ draw.fRC = &rc;
+
+ SkPaint paint;
+ paint.setAntiAlias(fRenderingMode != DWRITE_RENDERING_MODE_ALIASED);
+
+ BOOL hasNextRun = FALSE;
+ while (SUCCEEDED(colorLayers->MoveNext(&hasNextRun)) && hasNextRun) {
+ const DWRITE_COLOR_GLYPH_RUN* colorGlyph;
+ HRVM(colorLayers->GetCurrentRun(&colorGlyph), "Could not get current color glyph run");
+
+ SkColor color;
+ if (colorGlyph->paletteIndex != 0xffff) {
+ color = SkColorSetARGB(sk_float_round2int(colorGlyph->runColor.a * 255),
+ sk_float_round2int(colorGlyph->runColor.r * 255),
+ sk_float_round2int(colorGlyph->runColor.g * 255),
+ sk_float_round2int(colorGlyph->runColor.b * 255));
+ } else {
+ // If all components of runColor are 0 or (equivalently) paletteIndex is 0xFFFF then
+ // the 'current brush' is used. fRec.getLuminanceColor() is kinda sorta what is wanted
+ // here, but not really, it will often be the wrong value because it wan't designed for
+ // this.
+ // TODO: implement this fully, bug.skia.org/5788
+ color = fRec.getLuminanceColor();
+ }
+ paint.setColor(color);
+
+ SkPath path;
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRVM(SkDWriteGeometrySink::Create(&path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ {
+ MaybeExclusive l(this);
+ HRVM(colorGlyph->glyphRun.fontFace->GetGlyphRunOutline(
+ colorGlyph->glyphRun.fontEmSize,
+ colorGlyph->glyphRun.glyphIndices,
+ colorGlyph->glyphRun.glyphAdvances,
+ colorGlyph->glyphRun.glyphOffsets,
+ colorGlyph->glyphRun.glyphCount,
+ colorGlyph->glyphRun.isSideways,
+ colorGlyph->glyphRun.bidiLevel % 2, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+ draw.drawPath(path, paint, nullptr, true /* pathIsMutable */);
+ }
+}
+
+#ifdef USE_PNG
+void SkScalerContext_DW::generatePngGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isPngGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+ SkASSERT(this->getDWriteTypeface()->fDWriteFontFace4);
+
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ DWRITE_GLYPH_IMAGE_DATA glyphData;
+ void* glyphDataContext;
+ HRVM(fontFace4->GetGlyphImageData(glyph.getGlyphID(),
+ fTextSizeRender,
+ DWRITE_GLYPH_IMAGE_FORMATS_PNG,
+ &glyphData,
+ &glyphDataContext),
+ "Glyph image data could not be acquired.");
+ Context* context = new Context(fontFace4, glyphDataContext);
+ sk_sp<SkData> data = SkData::MakeWithProc(glyphData.imageData,
+ glyphData.imageDataSize,
+ &ReleaseProc,
+ context);
+ sk_sp<SkImage> image = SkImage::MakeFromEncoded(std::move(data));
+
+ SkBitmap dstBitmap;
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.width(), glyph.height(),
+ kN32_SkColorType,
+ kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ SkCanvas canvas(dstBitmap);
+ canvas.clear(SK_ColorTRANSPARENT);
+ canvas.translate(-glyph.left(), -glyph.top());
+ if (this->isSubpixel()) {
+ canvas.translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ canvas.concat(fSkXform);
+ SkScalar ratio = fTextSizeRender / glyphData.pixelsPerEm;
+ canvas.scale(ratio, ratio);
+ canvas.translate(-glyphData.horizontalLeftOrigin.x, -glyphData.horizontalLeftOrigin.y);
+ canvas.drawImage(image, 0, 0, nullptr);
+}
+#endif
+
+void SkScalerContext_DW::generateImage(const SkGlyph& glyph) {
+ //Create the mask.
+ DWRITE_RENDERING_MODE renderingMode = fRenderingMode;
+ DWRITE_TEXTURE_TYPE textureType = fTextureType;
+ if (glyph.fForceBW) {
+ renderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ textureType = DWRITE_TEXTURE_ALIASED_1x1;
+ }
+
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ if (fIsColorFont) {
+ if (isColorGlyph(glyph)) {
+ generateColorGlyphImage(glyph);
+ return;
+#ifdef USE_PNG
+ } else if (isPngGlyph(glyph)) {
+ generatePngGlyphImage(glyph);
+ return;
+#endif
+ }
+ }
+ SkDEBUGFAIL("Could not generate image from the given color font format.");
+ return;
+ }
+
+ const void* bits = this->drawDWMask(glyph, renderingMode, textureType);
+ if (!bits) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+
+ //Copy the mask into the glyph.
+ const uint8_t* src = (const uint8_t*)bits;
+ if (DWRITE_RENDERING_MODE_ALIASED == renderingMode) {
+ SkASSERT(SkMask::kBW_Format == glyph.fMaskFormat);
+ SkASSERT(DWRITE_TEXTURE_ALIASED_1x1 == textureType);
+ BilevelToBW(src, glyph);
+ } else if (!isLCD(fRec)) {
+ if (textureType == DWRITE_TEXTURE_ALIASED_1x1) {
+ if (fPreBlend.isApplicable()) {
+ GrayscaleToA8<true>(src, glyph, fPreBlend.fG);
+ } else {
+ GrayscaleToA8<false>(src, glyph, fPreBlend.fG);
+ }
+ } else {
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(src, glyph, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(src, glyph, fPreBlend.fG);
+ }
+ }
+ } else {
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ RGBToLcd16<true, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ } else {
+ RGBToLcd16<true, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ }
+ } else {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ RGBToLcd16<false, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ } else {
+ RGBToLcd16<false, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ }
+ }
+ }
+}
+
+bool SkScalerContext_DW::generatePath(SkGlyphID glyph, SkPath* path) {
+ SkASSERT(path);
+
+ path->reset();
+
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRBM(SkDWriteGeometrySink::Create(path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ UINT16 glyphId = SkTo<UINT16>(glyph);
+ {
+ MaybeExclusive l(this);
+ //TODO: convert to<->from DIUs? This would make a difference if hinting.
+ //It may not be needed, it appears that DirectWrite only hints at em size.
+ HRBM(this->getDWriteTypeface()->fDWriteFontFace->GetGlyphRunOutline(
+ SkScalarToFloat(fTextSizeRender),
+ &glyphId,
+ nullptr, //advances
+ nullptr, //offsets
+ 1, //num glyphs
+ FALSE, //sideways
+ FALSE, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+
+ path->transform(fSkXform);
+ return true;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
new file mode 100644
index 0000000000..ac7dae6e37
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalarContext_win_dw_DEFINED
+#define SkScalarContext_win_dw_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkScalerContext.h"
+#include "src/ports/SkTypeface_win_dw.h"
+
+#include <dwrite.h>
+#include <dwrite_2.h>
+
+class SkGlyph;
+class SkDescriptor;
+
+class SkScalerContext_DW : public SkScalerContext {
+public:
+ SkScalerContext_DW(sk_sp<DWriteFontTypeface>,
+ const SkScalerContextEffects&,
+ const SkDescriptor*);
+ ~SkScalerContext_DW() override;
+
+ // The IDWriteFontFace4 interface is only available in DWrite 3,
+ // so checking if it was found is sufficient to detect DWrite 3.
+ bool isDWrite3() { return bool(getDWriteTypeface()->fDWriteFontFace4); }
+
+protected:
+ unsigned generateGlyphCount() override;
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(SkGlyphID glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ static void BilevelToBW(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph);
+
+ template<bool APPLY_PREBLEND>
+ static void GrayscaleToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND>
+ static void RGBToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND, bool RGB>
+ static void RGBToLcd16(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB,
+ int clearTypeLevel);
+
+ const void* drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType);
+
+ HRESULT getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox);
+
+ bool isColorGlyph(const SkGlyph& glyph);
+
+ bool isPngGlyph(const SkGlyph& glyph);
+
+ DWriteFontTypeface* getDWriteTypeface() {
+ return static_cast<DWriteFontTypeface*>(this->getTypeface());
+ }
+
+ bool getColorGlyphRun(const SkGlyph& glyph, IDWriteColorGlyphRunEnumerator** colorGlyph);
+
+ void generateColorMetrics(SkGlyph* glyph);
+
+ void generateColorGlyphImage(const SkGlyph& glyph);
+
+ void generatePngMetrics(SkGlyph* glyph);
+
+ void generatePngGlyphImage(const SkGlyph& glyph);
+
+
+ SkTDArray<uint8_t> fBits;
+ /** The total matrix without the text height scale. */
+ SkMatrix fSkXform;
+ /** The total matrix without the text height scale. */
+ DWRITE_MATRIX fXform;
+ /** The text size to render with. */
+ SkScalar fTextSizeRender;
+ /** The text size to measure with. */
+ SkScalar fTextSizeMeasure;
+ int fGlyphCount;
+ DWRITE_RENDERING_MODE fRenderingMode;
+ DWRITE_TEXTURE_TYPE fTextureType;
+ DWRITE_MEASURING_MODE fMeasuringMode;
+ DWRITE_TEXT_ANTIALIAS_MODE fAntiAliasMode;
+ DWRITE_GRID_FIT_MODE fGridFitMode;
+ bool fIsColorFont;
+ int fClearTypeLevel;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkTLS_none.cpp b/gfx/skia/skia/src/ports/SkTLS_none.cpp
new file mode 100644
index 0000000000..7d9b31f1ea
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_none.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkTLS.h"
+
+static void* gSpecific = nullptr;
+
+void* SkTLS::PlatformGetSpecific(bool) {
+ return gSpecific;
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ gSpecific = ptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkTLS_pthread.cpp b/gfx/skia/skia/src/ports/SkTLS_pthread.cpp
new file mode 100644
index 0000000000..c96271b2a2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_pthread.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkOnce.h"
+#include "src/core/SkTLS.h"
+
+#include <pthread.h>
+
+static pthread_key_t gSkTLSKey;
+
+void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
+ // should we use forceCreateTheSlot to potentially just return nullptr if
+ // we've never been called with forceCreateTheSlot==true ?
+ static SkOnce once;
+ once(pthread_key_create, &gSkTLSKey, SkTLS::Destructor);
+ return pthread_getspecific(gSkTLSKey);
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ (void)pthread_setspecific(gSkTLSKey, ptr);
+}
diff --git a/gfx/skia/skia/src/ports/SkTLS_win.cpp b/gfx/skia/skia/src/ports/SkTLS_win.cpp
new file mode 100644
index 0000000000..e598c2fb7d
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_win.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/private/SkMutex.h"
+#include "src/core/SkLeanWindows.h"
+#include "src/core/SkTLS.h"
+
+static bool gOnce = false;
+static DWORD gTlsIndex;
+
+void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
+ static SkMutex& mutex = *(new SkMutex);
+ if (!forceCreateTheSlot && !gOnce) {
+ return nullptr;
+ }
+
+ if (!gOnce) {
+ SkAutoMutexExclusive tmp(mutex);
+ if (!gOnce) {
+ gTlsIndex = TlsAlloc();
+ gOnce = true;
+ }
+ }
+ return TlsGetValue(gTlsIndex);
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ SkASSERT(gOnce);
+ (void)TlsSetValue(gTlsIndex, ptr);
+}
+
+// Call TLS destructors on thread exit. Code based on Chromium's
+// base/threading/thread_local_storage_win.cc
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:skia_tls_callback")
+
+#else
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_skia_tls_callback")
+
+#endif
+
+void NTAPI onTLSCallback(PVOID unused, DWORD reason, PVOID unused2) {
+ if ((DLL_THREAD_DETACH == reason || DLL_PROCESS_DETACH == reason) && gOnce) {
+ void* ptr = TlsGetValue(gTlsIndex);
+ if (ptr != nullptr) {
+ SkTLS::Destructor(ptr);
+ TlsSetValue(gTlsIndex, nullptr);
+ }
+ }
+}
+
+extern "C" {
+
+#ifdef _WIN64
+
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK skia_tls_callback;
+const PIMAGE_TLS_CALLBACK skia_tls_callback = onTLSCallback;
+#pragma const_seg()
+
+#else
+
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK skia_tls_callback = onTLSCallback;
+#pragma data_seg()
+
+#endif
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
new file mode 100644
index 0000000000..cdc243b59d
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
@@ -0,0 +1,533 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+// SkTypes will include Windows.h, which will pull in all of the GDI defines.
+// GDI #defines GetGlyphIndices to GetGlyphIndicesA or GetGlyphIndicesW, but
+// IDWriteFontFace has a method called GetGlyphIndices. Since this file does
+// not use GDI, undefing GetGlyphIndices makes things less confusing.
+#undef GetGlyphIndices
+
+#include "include/core/SkData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkFontStream.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkUtils.h"
+#include "src/ports/SkScalerContext_win_dw.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTTable_fvar.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_hhea.h"
+#include "src/sfnt/SkOTTable_post.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+
+void DWriteFontTypeface::onGetFamilyName(SkString* familyName) const {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, familyName);
+}
+
+void DWriteFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ // Get the family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ SkString utf8FamilyName;
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, &utf8FamilyName);
+
+ desc->setFamilyName(utf8FamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = SkToBool(fDWriteFontFileLoader.get());
+}
+
+void DWriteFontTypeface::onCharsToGlyphs(const SkUnichar uni[], int count,
+ SkGlyphID glyphs[]) const {
+ fDWriteFontFace->GetGlyphIndices((const UINT32*)uni, count, glyphs);
+}
+
+int DWriteFontTypeface::onCountGlyphs() const {
+ return fDWriteFontFace->GetGlyphCount();
+}
+
+void DWriteFontTypeface::getPostScriptGlyphNames(SkString*) const {}
+
+int DWriteFontTypeface::onGetUPEM() const {
+ DWRITE_FONT_METRICS metrics;
+ fDWriteFontFace->GetMetrics(&metrics);
+ return metrics.designUnitsPerEm;
+}
+
+class LocalizedStrings_IDWriteLocalizedStrings : public SkTypeface::LocalizedStrings {
+public:
+ /** Takes ownership of the IDWriteLocalizedStrings. */
+ explicit LocalizedStrings_IDWriteLocalizedStrings(IDWriteLocalizedStrings* strings)
+ : fIndex(0), fStrings(strings)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ if (fIndex >= fStrings->GetCount()) {
+ return false;
+ }
+
+ // String
+ UINT32 stringLen;
+ HRBM(fStrings->GetStringLength(fIndex, &stringLen), "Could not get string length.");
+
+ SkSMallocWCHAR wString(stringLen+1);
+ HRBM(fStrings->GetString(fIndex, wString.get(), stringLen+1), "Could not get string.");
+
+ HRB(sk_wchar_to_skstring(wString.get(), stringLen, &localizedString->fString));
+
+ // Locale
+ UINT32 localeLen;
+ HRBM(fStrings->GetLocaleNameLength(fIndex, &localeLen), "Could not get locale length.");
+
+ SkSMallocWCHAR wLocale(localeLen+1);
+ HRBM(fStrings->GetLocaleName(fIndex, wLocale.get(), localeLen+1), "Could not get locale.");
+
+ HRB(sk_wchar_to_skstring(wLocale.get(), localeLen, &localizedString->fLanguage));
+
+ ++fIndex;
+ return true;
+ }
+
+private:
+ UINT32 fIndex;
+ SkTScopedComPtr<IDWriteLocalizedStrings> fStrings;
+};
+
+SkTypeface::LocalizedStrings* DWriteFontTypeface::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRNM(fDWriteFontFamily->GetFamilyNames(&familyNames), "Could not obtain family names.");
+ nameIter = sk_make_sp<LocalizedStrings_IDWriteLocalizedStrings>(familyNames.release());
+ }
+ return nameIter.release();
+}
+
+int DWriteFontTypeface::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (FAILED(fDWriteFontFace->QueryInterface(&fontFace5))) {
+ return -1;
+ }
+
+ // Return 0 if the font is not variable font.
+ if (!fontFace5->HasVariations()) {
+ return 0;
+ }
+
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HR_GENERAL(fontFace5->GetFontResource(&fontResource), nullptr, -1);
+ UINT32 variableAxisCount = 0;
+ for (UINT32 i = 0; i < fontAxisCount; ++i) {
+ if (fontResource->GetFontAxisAttributes(i) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ ++variableAxisCount;
+ }
+ }
+
+ if (!coordinates || coordinateCount < 0 || (unsigned)coordinateCount < variableAxisCount) {
+ return SkTo<int>(variableAxisCount);
+ }
+
+ SkAutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisValue(fontAxisCount);
+ HR_GENERAL(fontFace5->GetFontAxisValues(fontAxisValue.get(), fontAxisCount), nullptr, -1);
+ UINT32 coordIndex = 0;
+ for (UINT32 axisIndex = 0; axisIndex < fontAxisCount; ++axisIndex) {
+ if (fontResource->GetFontAxisAttributes(axisIndex) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ coordinates[coordIndex].axis = SkEndian_SwapBE32(fontAxisValue[axisIndex].axisTag);
+ coordinates[coordIndex].value = fontAxisValue[axisIndex].value;
+ ++coordIndex;
+ }
+ }
+
+ SkASSERT(coordIndex == variableAxisCount);
+ return SkTo<int>(variableAxisCount);
+
+#endif
+
+ return -1;
+}
+
+int DWriteFontTypeface::onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (FAILED(fDWriteFontFace->QueryInterface(&fontFace5))) {
+ return -1;
+ }
+
+ // Return 0 if the font is not variable font.
+ if (!fontFace5->HasVariations()) {
+ return 0;
+ }
+
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HR_GENERAL(fontFace5->GetFontResource(&fontResource), nullptr, -1);
+ int variableAxisCount = 0;
+ for (UINT32 i = 0; i < fontAxisCount; ++i) {
+ if (fontResource->GetFontAxisAttributes(i) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ variableAxisCount++;
+ }
+ }
+
+ if (!parameters || parameterCount < variableAxisCount) {
+ return variableAxisCount;
+ }
+
+ SkAutoSTMalloc<8, DWRITE_FONT_AXIS_RANGE> fontAxisRange(fontAxisCount);
+ HR_GENERAL(fontResource->GetFontAxisRanges(fontAxisRange.get(), fontAxisCount), nullptr, -1);
+ SkAutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisDefaultValue(fontAxisCount);
+ HR_GENERAL(fontResource->GetDefaultFontAxisValues(fontAxisDefaultValue.get(), fontAxisCount),
+ nullptr, -1);
+ UINT32 coordIndex = 0;
+
+ for (UINT32 axisIndex = 0; axisIndex < fontAxisCount; ++axisIndex) {
+ if (fontResource->GetFontAxisAttributes(axisIndex) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ parameters[coordIndex].tag = SkEndian_SwapBE32(fontAxisDefaultValue[axisIndex].axisTag);
+ parameters[coordIndex].min = fontAxisRange[axisIndex].minValue;
+ parameters[coordIndex].def = fontAxisDefaultValue[axisIndex].value;
+ parameters[coordIndex].max = fontAxisRange[axisIndex].maxValue;
+ parameters[coordIndex].setHidden(fontResource->GetFontAxisAttributes(axisIndex) &
+ DWRITE_FONT_AXIS_ATTRIBUTES_HIDDEN);
+ }
+ }
+
+ return variableAxisCount;
+
+#endif
+
+ return -1;
+}
+
+int DWriteFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ DWRITE_FONT_FACE_TYPE type = fDWriteFontFace->GetType();
+ if (type != DWRITE_FONT_FACE_TYPE_CFF &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return 0;
+ }
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> stream = this->openStream(&ttcIndex);
+ return stream.get() ? SkFontStream::GetTableTags(stream.get(), ttcIndex, tags) : 0;
+}
+
+size_t DWriteFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoDWriteTable table(fDWriteFontFace.get(), SkEndian_SwapBE32(tag));
+ if (!table.fExists) {
+ return 0;
+ }
+
+ if (offset > table.fSize) {
+ return 0;
+ }
+ size_t size = SkTMin(length, table.fSize - offset);
+ if (data) {
+ memcpy(data, table.fData + offset, size);
+ }
+
+ return size;
+}
+
+sk_sp<SkData> DWriteFontTypeface::onCopyTableData(SkFontTableTag tag) const {
+ const uint8_t* data;
+ UINT32 size;
+ void* lock;
+ BOOL exists;
+ fDWriteFontFace->TryGetFontTable(SkEndian_SwapBE32(tag),
+ reinterpret_cast<const void **>(&data), &size, &lock, &exists);
+ if (!exists) {
+ return nullptr;
+ }
+ struct Context {
+ Context(void* lock, IDWriteFontFace* face) : fLock(lock), fFontFace(SkRefComPtr(face)) {}
+ ~Context() { fFontFace->ReleaseFontTable(fLock); }
+ void* fLock;
+ SkTScopedComPtr<IDWriteFontFace> fFontFace;
+ };
+ return SkData::MakeWithProc(data, size,
+ [](const void*, void* ctx) { delete (Context*)ctx; },
+ new Context(lock, fDWriteFontFace.get()));
+}
+
+sk_sp<SkTypeface> DWriteFontTypeface::onMakeClone(const SkFontArguments& args) const {
+ // Skip if the current face index does not match the ttcIndex
+ if (fDWriteFontFace->GetIndex() != SkTo<UINT32>(args.getCollectionIndex())) {
+ return sk_ref_sp(this);
+ }
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+
+ if (SUCCEEDED(fDWriteFontFace->QueryInterface(&fontFace5)) && fontFace5->HasVariations()) {
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ UINT32 argsCoordCount = args.getVariationDesignPosition().coordinateCount;
+ SkAutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisValue(fontAxisCount);
+ HRN(fontFace5->GetFontAxisValues(fontAxisValue.get(), fontAxisCount));
+
+ for (UINT32 fontIndex = 0; fontIndex < fontAxisCount; ++fontIndex) {
+ for (UINT32 argsIndex = 0; argsIndex < argsCoordCount; ++argsIndex) {
+ if (SkEndian_SwapBE32(fontAxisValue[fontIndex].axisTag) ==
+ args.getVariationDesignPosition().coordinates[argsIndex].axis) {
+ fontAxisValue[fontIndex].value =
+ args.getVariationDesignPosition().coordinates[argsIndex].value;
+ }
+ }
+ }
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HRN(fontFace5->GetFontResource(&fontResource));
+ SkTScopedComPtr<IDWriteFontFace5> newFontFace5;
+ HRN(fontResource->CreateFontFace(fDWriteFont->GetSimulations(),
+ fontAxisValue.get(),
+ fontAxisCount,
+ &newFontFace5));
+
+ SkTScopedComPtr<IDWriteFontFace> newFontFace;
+ HRN(newFontFace5->QueryInterface(&newFontFace));
+ return DWriteFontTypeface::Make(fFactory.get(),
+ newFontFace.get(),
+ fDWriteFont.get(),
+ fDWriteFontFamily.get(),
+ fDWriteFontFileLoader.get(),
+ fDWriteFontCollectionLoader.get());
+ }
+
+#endif
+
+ return sk_ref_sp(this);
+}
+
+std::unique_ptr<SkStreamAsset> DWriteFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = fDWriteFontFace->GetIndex();
+
+ UINT32 numFiles;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, nullptr),
+ "Could not get number of font files.");
+ if (numFiles != 1) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, &fontFile), "Could not get font files.");
+
+ const void* fontFileKey;
+ UINT32 fontFileKeySize;
+ HRNM(fontFile->GetReferenceKey(&fontFileKey, &fontFileKeySize),
+ "Could not get font file reference key.");
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HRNM(fontFile->GetLoader(&fontFileLoader), "Could not get font file loader.");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(fontFileLoader->CreateStreamFromKey(fontFileKey, fontFileKeySize,
+ &fontFileStream),
+ "Could not create font file stream.");
+
+ return std::unique_ptr<SkStreamAsset>(new SkDWriteFontFileStream(fontFileStream.get()));
+}
+
+SkScalerContext* DWriteFontTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkScalerContext_DW(sk_ref_sp(const_cast<DWriteFontTypeface*>(this)), effects, desc);
+}
+
+void DWriteFontTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkFontHinting h = rec->getHinting();
+ // DirectWrite2 allows for hinting to be turned off. Force everything else to normal.
+ if (h != SkFontHinting::kNone || !fFactory2 || !fDWriteFontFace2) {
+ h = SkFontHinting::kNormal;
+ }
+ rec->setHinting(h);
+
+#if defined(SK_FONT_HOST_USE_SYSTEM_SETTINGS)
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (factory != nullptr) {
+ SkTScopedComPtr<IDWriteRenderingParams> defaultRenderingParams;
+ if (SUCCEEDED(factory->CreateRenderingParams(&defaultRenderingParams))) {
+ float gamma = defaultRenderingParams->GetGamma();
+ rec->setDeviceGamma(gamma);
+ rec->setPaintGamma(gamma);
+
+ rec->setContrast(defaultRenderingParams->GetEnhancedContrast());
+ }
+ }
+#elif defined(MOZ_SKIA)
+ rec->setContrast(fContrast);
+
+ rec->setDeviceGamma(fGamma);
+ rec->setPaintGamma(fGamma);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//PDF Support
+
+void DWriteFontTypeface::getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const {
+ unsigned glyphCount = fDWriteFontFace->GetGlyphCount();
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * glyphCount);
+ IDWriteFontFace* fontFace = fDWriteFontFace.get();
+ int maxGlyph = -1;
+ unsigned remainingGlyphCount = glyphCount;
+ for (UINT32 c = 0; c < 0x10FFFF && remainingGlyphCount != 0; ++c) {
+ UINT16 glyph = 0;
+ HRVM(fontFace->GetGlyphIndices(&c, 1, &glyph), "Failed to get glyph index.");
+ // Intermittent DW bug on Windows 10. See crbug.com/470146.
+ if (glyph >= glyphCount) {
+ return;
+ }
+ if (0 < glyph && glyphToUnicode[glyph] == 0) {
+ maxGlyph = SkTMax(static_cast<int>(glyph), maxGlyph);
+ glyphToUnicode[glyph] = c; // Always use lowest-index unichar.
+ --remainingGlyphCount;
+ }
+ }
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> DWriteFontTypeface::onGetAdvancedMetrics() const {
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(nullptr);
+
+ DWRITE_FONT_METRICS dwfm;
+ fDWriteFontFace->GetMetrics(&dwfm);
+
+ info.reset(new SkAdvancedTypefaceMetrics);
+
+ info->fAscent = SkToS16(dwfm.ascent);
+ info->fDescent = SkToS16(dwfm.descent);
+ info->fCapHeight = SkToS16(dwfm.capHeight);
+
+ {
+ SkTScopedComPtr<IDWriteLocalizedStrings> postScriptNames;
+ BOOL exists = FALSE;
+ if (FAILED(fDWriteFont->GetInformationalStrings(
+ DWRITE_INFORMATIONAL_STRING_POSTSCRIPT_NAME,
+ &postScriptNames,
+ &exists)) ||
+ !exists ||
+ FAILED(sk_get_locale_string(postScriptNames.get(), nullptr, &info->fPostScriptName)))
+ {
+ SkDEBUGF("Unable to get postscript name for typeface %p\n", this);
+ }
+ }
+
+ // SkAdvancedTypefaceMetrics::fFontName must actually be a family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ if (FAILED(fDWriteFontFamily->GetFamilyNames(&familyNames)) ||
+ FAILED(sk_get_locale_string(familyNames.get(), nullptr, &info->fFontName)))
+ {
+ SkDEBUGF("Unable to get family name for typeface 0x%p\n", this);
+ }
+ if (info->fPostScriptName.isEmpty()) {
+ info->fPostScriptName = info->fFontName;
+ }
+
+ DWRITE_FONT_FACE_TYPE fontType = fDWriteFontFace->GetType();
+ if (fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return info;
+ }
+
+ // Simulated fonts aren't really TrueType fonts.
+ if (fDWriteFontFace->GetSimulations() == DWRITE_FONT_SIMULATIONS_NONE) {
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ AutoTDWriteTable<SkOTTableHead> headTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTablePostScript> postTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableHorizontalHeader> hheaTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableOS2> os2Table(fDWriteFontFace.get());
+ if (!headTable.fExists || !postTable.fExists || !hheaTable.fExists || !os2Table.fExists) {
+ return info;
+ }
+
+ SkOTUtils::SetAdvancedTypefaceFlags(os2Table->version.v4.fsType, info.get());
+
+ // There are versions of DirectWrite which support named instances for system variation fonts,
+ // but no means to indicate that such a typeface is a variation.
+ AutoTDWriteTable<SkOTTableFontVariations> fvarTable(fDWriteFontFace.get());
+ if (fvarTable.fExists) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag;
+ }
+
+ //There exist CJK fonts which set the IsFixedPitch and Monospace bits,
+ //but have full width, latin half-width, and half-width kana.
+ bool fixedWidth = (postTable->isFixedPitch &&
+ (1 == SkEndian_SwapBE16(hheaTable->numberOfHMetrics)));
+ //Monospace
+ if (fixedWidth) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ //Italic
+ if (os2Table->version.v0.fsSelection.field.Italic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ //Serif
+ using SerifStyle = SkPanose::Data::TextAndDisplay::SerifStyle;
+ SerifStyle serifStyle = os2Table->version.v0.panose.data.textAndDisplay.bSerifStyle;
+ if (SkPanose::FamilyType::TextAndDisplay == os2Table->version.v0.panose.bFamilyType) {
+ if (SerifStyle::Cove == serifStyle ||
+ SerifStyle::ObtuseCove == serifStyle ||
+ SerifStyle::SquareCove == serifStyle ||
+ SerifStyle::ObtuseSquareCove == serifStyle ||
+ SerifStyle::Square == serifStyle ||
+ SerifStyle::Thin == serifStyle ||
+ SerifStyle::Bone == serifStyle ||
+ SerifStyle::Exaggerated == serifStyle ||
+ SerifStyle::Triangle == serifStyle)
+ {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ }
+ //Script
+ } else if (SkPanose::FamilyType::Script == os2Table->version.v0.panose.bFamilyType) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ info->fItalicAngle = SkEndian_SwapBE32(postTable->italicAngle) >> 16;
+
+ info->fBBox = SkIRect::MakeLTRB((int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMin),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMin));
+ return info;
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.h b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
new file mode 100644
index 0000000000..392dec4330
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_dw_DEFINED
+#define SkTypeface_win_dw_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkLeanWindows.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_1.h>
+#include <dwrite_2.h>
+#include <dwrite_3.h>
+
+#if !defined(__MINGW32__) && WINVER < 0x0A00
+#include "mozilla/gfx/dw-extra.h"
+#endif
+
+class SkFontDescriptor;
+struct SkScalerContextRec;
+
+static SkFontStyle get_style(IDWriteFont* font) {
+ int weight = font->GetWeight();
+ int width = font->GetStretch();
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ switch (font->GetStyle()) {
+ case DWRITE_FONT_STYLE_NORMAL: slant = SkFontStyle::kUpright_Slant; break;
+ case DWRITE_FONT_STYLE_OBLIQUE: slant = SkFontStyle::kOblique_Slant; break;
+ case DWRITE_FONT_STYLE_ITALIC: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ return SkFontStyle(weight, width, slant);
+}
+
+class DWriteFontTypeface : public SkTypeface {
+private:
+ DWriteFontTypeface(const SkFontStyle& style,
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font = nullptr,
+ IDWriteFontFamily* fontFamily = nullptr,
+ IDWriteFontFileLoader* fontFileLoader = nullptr,
+ IDWriteFontCollectionLoader* fontCollectionLoader = nullptr)
+ : SkTypeface(style, false)
+ , fFactory(SkRefComPtr(factory))
+ , fDWriteFontCollectionLoader(SkSafeRefComPtr(fontCollectionLoader))
+ , fDWriteFontFileLoader(SkSafeRefComPtr(fontFileLoader))
+ , fDWriteFontFamily(SkSafeRefComPtr(fontFamily))
+ , fDWriteFont(SkSafeRefComPtr(font))
+ , fDWriteFontFace(SkRefComPtr(fontFace))
+ , fRenderingMode(DWRITE_RENDERING_MODE_DEFAULT)
+ , fGamma(2.2f)
+ , fContrast(1.0f)
+ , fClearTypeLevel(1.0f)
+ {
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace1))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace1.get());
+ }
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace2))) {
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace2.get());
+ }
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace4))) {
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace4.get());
+ }
+ if (!SUCCEEDED(fFactory->QueryInterface(&fFactory2))) {
+ SkASSERT_RELEASE(nullptr == fFactory2.get());
+ }
+ }
+
+public:
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFactory2> fFactory2;
+ SkTScopedComPtr<IDWriteFontCollectionLoader> fDWriteFontCollectionLoader;
+ SkTScopedComPtr<IDWriteFontFileLoader> fDWriteFontFileLoader;
+ SkTScopedComPtr<IDWriteFontFamily> fDWriteFontFamily;
+ SkTScopedComPtr<IDWriteFont> fDWriteFont;
+ SkTScopedComPtr<IDWriteFontFace> fDWriteFontFace;
+ SkTScopedComPtr<IDWriteFontFace1> fDWriteFontFace1;
+ SkTScopedComPtr<IDWriteFontFace2> fDWriteFontFace2;
+ SkTScopedComPtr<IDWriteFontFace4> fDWriteFontFace4;
+
+ static sk_sp<DWriteFontTypeface> Make(
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily,
+ IDWriteFontFileLoader* fontFileLoader = nullptr,
+ IDWriteFontCollectionLoader* fontCollectionLoader = nullptr)
+ {
+ return sk_sp<DWriteFontTypeface>(
+ new DWriteFontTypeface(get_style(font), factory, fontFace, font, fontFamily,
+ fontFileLoader, fontCollectionLoader));
+ }
+
+ static DWriteFontTypeface* Create(IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ SkFontStyle aStyle,
+ DWRITE_RENDERING_MODE aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel) {
+ DWriteFontTypeface* typeface =
+ new DWriteFontTypeface(aStyle, factory, fontFace,
+ nullptr, nullptr,
+ nullptr, nullptr);
+ typeface->fRenderingMode = aRenderingMode;
+ typeface->fGamma = aGamma;
+ typeface->fContrast = aContrast;
+ typeface->fClearTypeLevel = aClearTypeLevel;
+ return typeface;
+ }
+
+ bool ForceGDI() const { return fRenderingMode == DWRITE_RENDERING_MODE_GDI_CLASSIC; }
+ DWRITE_RENDERING_MODE GetRenderingMode() const { return fRenderingMode; }
+ float GetClearTypeLevel() const { return fClearTypeLevel; }
+
+protected:
+ void weak_dispose() const override {
+ if (fDWriteFontCollectionLoader.get()) {
+ HRV(fFactory->UnregisterFontCollectionLoader(fDWriteFontCollectionLoader.get()));
+ }
+ if (fDWriteFontFileLoader.get()) {
+ HRV(fFactory->UnregisterFontFileLoader(fDWriteFontFileLoader.get()));
+ }
+
+ //SkTypefaceCache::Remove(this);
+ INHERITED::weak_dispose();
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+
+private:
+ typedef SkTypeface INHERITED;
+ DWRITE_RENDERING_MODE fRenderingMode;
+ float fGamma;
+ float fContrast;
+ float fClearTypeLevel;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
new file mode 100644
index 0000000000..8ad1dbeaf4
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIBMFamilyClass_DEFINED
+#define SkIBMFamilyClass_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkIBMFamilyClass {
+ enum class Class : SK_OT_BYTE {
+ NoClassification = 0,
+ OldstyleSerifs = 1,
+ TransitionalSerifs = 2,
+ ModernSerifs = 3,
+ ClarendonSerifs = 4,
+ SlabSerifs = 5,
+ //6 reserved for future use
+ FreeformSerifs = 7,
+ SansSerif = 8,
+ Ornamentals = 9,
+ Scripts = 10,
+ //11 reserved for future use
+ Symbolic = 12,
+ //13-15 reserved for future use
+ } familyClass;
+ union SubClass {
+ enum class OldstyleSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMRoundedLegibility = 1,
+ Garalde = 2,
+ Venetian = 3,
+ ModifiedVenetian = 4,
+ DutchModern = 5,
+ DutchTraditional = 6,
+ Contemporary = 7,
+ Calligraphic = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } oldstyleSerifs;
+ enum class TransitionalSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ DirectLine = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } transitionalSerifs;
+ enum class ModernSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Italian = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } modernSerifs;
+ enum class ClarendonSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Clarendon = 1,
+ Modern = 2,
+ Traditional = 3,
+ Newspaper = 4,
+ StubSerif = 5,
+ Monotone = 6,
+ Typewriter = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } clarendonSerifs;
+ enum class SlabSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Monotone = 1,
+ Humanist = 2,
+ Geometric = 3,
+ Swiss = 4,
+ Typewriter = 5,
+ //6-14 reserved for future use
+ Miscellaneous = 15,
+ } slabSerifs;
+ enum class FreeformSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Modern = 1,
+ //2-14 reserved for future use
+ Miscellaneous = 15,
+ } freeformSerifs;
+ enum class SansSerif : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMNeoGrotesqueGothic = 1,
+ Humanist = 2,
+ LowXRoundGeometric = 3,
+ HighXRoundGeometric = 4,
+ NeoGrotesqueGothic = 5,
+ ModifiedNeoGrotesqueGothic = 6,
+ //7-8 reserved for future use
+ TypewriterGothic = 9,
+ Matrix = 10,
+ //11-14 reserved for future use
+ Miscellaneous = 15,
+ } sansSerif;
+ enum class Ornamentals : SK_OT_BYTE {
+ NoClassification = 0,
+ Engraver = 1,
+ BlackLetter = 2,
+ Decorative = 3,
+ ThreeDimensional = 4,
+ //5-14 reserved for future use
+ Miscellaneous = 15,
+ } ornamentals;
+ enum class Scripts : SK_OT_BYTE {
+ NoClassification = 0,
+ Uncial = 1,
+ Brush_Joined = 2,
+ Formal_Joined = 3,
+ Monotone_Joined = 4,
+ Calligraphic = 5,
+ Brush_Unjoined = 6,
+ Formal_Unjoined = 7,
+ Monotone_Unjoined = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } scripts;
+ enum class Symbolic : SK_OT_BYTE {
+ NoClassification = 0,
+ //1-2 reserved for future use
+ MixedSerif = 3,
+ //4-5 reserved for future use
+ OldstyleSerif = 6,
+ NeoGrotesqueSansSerif = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } symbolic;
+ } familySubClass;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkIBMFamilyClass) == 2, "sizeof_SkIBMFamilyClass_not_2");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTableTypes.h b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
new file mode 100644
index 0000000000..c5f352e24b
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTableTypes_DEFINED
+#define SkOTTableTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkEndian.h"
+
+//All SK_OT_ prefixed types should be considered as big endian.
+typedef uint8_t SK_OT_BYTE;
+#if CHAR_BIT == 8
+typedef signed char SK_OT_CHAR; //easier to debug
+#else
+typedef int8_t SK_OT_CHAR;
+#endif
+typedef uint16_t SK_OT_SHORT;
+typedef uint16_t SK_OT_USHORT;
+typedef uint32_t SK_OT_ULONG;
+typedef uint32_t SK_OT_LONG;
+//16.16 Signed fixed point representation.
+typedef int32_t SK_OT_Fixed;
+//2.14 Signed fixed point representation.
+typedef uint16_t SK_OT_F2DOT14;
+//F units are the units of measurement in em space.
+typedef uint16_t SK_OT_FWORD;
+typedef uint16_t SK_OT_UFWORD;
+//Number of seconds since 12:00 midnight, January 1, 1904.
+typedef uint64_t SK_OT_LONGDATETIME;
+
+#define SK_OT_BYTE_BITFIELD SK_UINT8_BITFIELD
+
+template<typename T> class SkOTTableTAG {
+public:
+ /**
+ * SkOTTableTAG<T>::value is the big endian value of an OpenType table tag.
+ * It may be directly compared with raw big endian table data.
+ */
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(
+ SkSetFourByteTag(T::TAG0, T::TAG1, T::TAG2, T::TAG3)
+ );
+};
+
+/** SkOTSetUSHORTBit<N>::value is an SK_OT_USHORT with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetUSHORTBit {
+ static_assert(N < 16, "NTooBig");
+ static const uint16_t bit = 1u << N;
+ static const SK_OT_USHORT value = SkTEndian_SwapBE16(bit);
+};
+
+/** SkOTSetULONGBit<N>::value is an SK_OT_ULONG with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetULONGBit {
+ static_assert(N < 32, "NTooBig");
+ static const uint32_t bit = 1u << N;
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(bit);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
new file mode 100644
index 0000000000..c8d887a31b
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBDT_DEFINED
+#define SkOTTable_EBDT_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapData {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'D';
+ static const SK_OT_CHAR TAG3 = 'T';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapData>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ struct BigGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR horiBearingX;
+ SK_OT_CHAR horiBearingY;
+ SK_OT_BYTE horiAdvance;
+ SK_OT_CHAR vertBearingX;
+ SK_OT_CHAR vertBearingY;
+ SK_OT_BYTE vertAdvance;
+ };
+
+ struct SmallGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR bearingX;
+ SK_OT_CHAR bearingY;
+ SK_OT_BYTE advance;
+ };
+
+ // Small metrics, byte-aligned data.
+ struct Format1 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Small metrics, bit-aligned data.
+ struct Format2 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Format 3 is not used.
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), compressed data.
+ // Only used on Mac.
+ struct Format4 {
+ SK_OT_ULONG whiteTreeOffset;
+ SK_OT_ULONG blackTreeOffset;
+ SK_OT_ULONG glyphDataOffset;
+ };
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), bit-aligned data.
+ struct Format5 {
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Big metrics, byte-aligned data.
+ struct Format6 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Big metrics, bit-aligned data.
+ struct Format7 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ struct EBDTComponent {
+ SK_OT_USHORT glyphCode; // Component glyph code
+ SK_OT_CHAR xOffset; // Position of component left
+ SK_OT_CHAR yOffset; // Position of component top
+ };
+
+ struct Format8 {
+ SmallGlyphMetrics smallMetrics; // Metrics information for the glyph
+ SK_OT_BYTE pad; // Pad to short boundary
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+
+ struct Format9 {
+ BigGlyphMetrics bigMetrics; // Metrics information for the glyph
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
new file mode 100644
index 0000000000..a97e45fa29
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBLC_DEFINED
+#define SkOTTable_EBLC_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_EBDT.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapLocation {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'L';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapLocation>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct SbitLineMetrics {
+ SK_OT_CHAR ascender;
+ SK_OT_CHAR descender;
+ SK_OT_BYTE widthMax;
+ SK_OT_CHAR caretSlopeNumerator;
+ SK_OT_CHAR caretSlopeDenominator;
+ SK_OT_CHAR caretOffset;
+ SK_OT_CHAR minOriginSB;
+ SK_OT_CHAR minAdvanceSB;
+ SK_OT_CHAR maxBeforeBL;
+ SK_OT_CHAR minAfterBL;
+ SK_OT_CHAR pad1;
+ SK_OT_CHAR pad2;
+ };
+
+ struct BitmapSizeTable {
+ SK_OT_ULONG indexSubTableArrayOffset; //offset to indexSubtableArray from beginning of EBLC.
+ SK_OT_ULONG indexTablesSize; //number of bytes in corresponding index subtables and array
+ SK_OT_ULONG numberOfIndexSubTables; //an index subtable for each range or format change
+ SK_OT_ULONG colorRef; //not used; set to 0.
+ SbitLineMetrics hori; //line metrics for text rendered horizontally
+ SbitLineMetrics vert; //line metrics for text rendered vertically
+ SK_OT_USHORT startGlyphIndex; //lowest glyph index for this size
+ SK_OT_USHORT endGlyphIndex; //highest glyph index for this size
+ SK_OT_BYTE ppemX; //horizontal pixels per Em
+ SK_OT_BYTE ppemY; //vertical pixels per Em
+ struct BitDepth {
+ enum Value : SK_OT_BYTE {
+ BW = 1,
+ Gray4 = 2,
+ Gray16 = 4,
+ Gray256 = 8,
+ };
+ SK_OT_BYTE value;
+ } bitDepth; //the Microsoft rasterizer v.1.7 or greater supports
+ union Flags {
+ struct Field {
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Horizontal, // Horizontal small glyph metrics
+ Vertical, // Vertical small glyph metrics
+ Reserved02,
+ Reserved03,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_CHAR Horizontal = 1u << 0;
+ static const SK_OT_CHAR Vertical = 1u << 1;
+ SK_OT_CHAR value;
+ } raw;
+ } flags;
+ }; //bitmapSizeTable[numSizes];
+
+ struct IndexSubTableArray {
+ SK_OT_USHORT firstGlyphIndex; //first glyph code of this range
+ SK_OT_USHORT lastGlyphIndex; //last glyph code of this range (inclusive)
+ SK_OT_ULONG additionalOffsetToIndexSubtable; //add to BitmapSizeTable::indexSubTableArrayOffset to get offset from beginning of 'EBLC'
+ }; //indexSubTableArray[BitmapSizeTable::numberOfIndexSubTables];
+
+ struct IndexSubHeader {
+ SK_OT_USHORT indexFormat; //format of this indexSubTable
+ SK_OT_USHORT imageFormat; //format of 'EBDT' image data
+ SK_OT_ULONG imageDataOffset; //offset to image data in 'EBDT' table
+ };
+
+ // Variable metrics glyphs with 4 byte offsets
+ struct IndexSubTable1 {
+ IndexSubHeader header;
+ //SK_OT_ULONG offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // All Glyphs have identical metrics
+ struct IndexSubTable2 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; // all glyphs are of the same size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; // all glyphs have the same metrics; glyph data may be compressed, byte-aligned, or bit-aligned
+ };
+
+ // Variable metrics glyphs with 2 byte offsets
+ struct IndexSubTable3 {
+ IndexSubHeader header;
+ //SK_OT_USHORT offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph, may have extra element to force even number of elements
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // Variable metrics glyphs with sparse glyph codes
+ struct IndexSubTable4 {
+ IndexSubHeader header;
+ SK_OT_ULONG numGlyphs;
+ struct CodeOffsetPair {
+ SK_OT_USHORT glyphCode;
+ SK_OT_USHORT offset; //location in EBDT
+ }; //glyphArray[numGlyphs+1]
+ };
+
+ // Constant metrics glyphs with sparse glyph codes
+ struct IndexSubTable5 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; //all glyphs have the same data size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; //all glyphs have the same metrics
+ SK_OT_ULONG numGlyphs;
+ //SK_OT_USHORT glyphCodeArray[numGlyphs] //must have even number of entries (set pad to 0)
+ };
+
+ union IndexSubTable {
+ IndexSubHeader header;
+ IndexSubTable1 format1;
+ IndexSubTable2 format2;
+ IndexSubTable3 format3;
+ IndexSubTable4 format4;
+ IndexSubTable5 format5;
+ };
+
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
new file mode 100644
index 0000000000..1e60ec0ed2
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBSC_DEFINED
+#define SkOTTable_EBSC_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_EBLC.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapScaling {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'B';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapScaling>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct BitmapScaleTable {
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics hori;
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics vert;
+ SK_OT_BYTE ppemX; //target horizontal pixels per EM
+ SK_OT_BYTE ppemY; //target vertical pixels per EM
+ SK_OT_BYTE substitutePpemX; //use bitmaps of this size
+ SK_OT_BYTE substitutePpemY; //use bitmaps of this size
+ }; //bitmapScaleTable[numSizes];
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
new file mode 100644
index 0000000000..92619c48dd
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_DEFINED
+#define SkOTTable_OS_2_DEFINED
+
+#include "src/sfnt/SkOTTable_OS_2_V0.h"
+#include "src/sfnt/SkOTTable_OS_2_V1.h"
+#include "src/sfnt/SkOTTable_OS_2_V2.h"
+#include "src/sfnt/SkOTTable_OS_2_V3.h"
+#include "src/sfnt/SkOTTable_OS_2_V4.h"
+#include "src/sfnt/SkOTTable_OS_2_VA.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2 {
+ static constexpr SK_OT_CHAR TAG0 = 'O';
+ static constexpr SK_OT_CHAR TAG1 = 'S';
+ static constexpr SK_OT_CHAR TAG2 = '/';
+ static constexpr SK_OT_CHAR TAG3 = '2';
+ static constexpr SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableOS2>::value;
+
+ union Version {
+ SK_OT_USHORT version;
+
+ //original V0 TT
+ struct VA : SkOTTableOS2_VA { } vA;
+ struct V0 : SkOTTableOS2_V0 { } v0;
+ struct V1 : SkOTTableOS2_V1 { } v1;
+ struct V2 : SkOTTableOS2_V2 { } v2;
+ //makes fsType 0-3 exclusive
+ struct V3 : SkOTTableOS2_V3 { } v3;
+ //defines fsSelection bits 7-9
+ struct V4 : SkOTTableOS2_V4 { } v4;
+ } version;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2::Version::VA) == 68, "sizeof_SkOTTableOS2__VA_not_68");
+static_assert(sizeof(SkOTTableOS2::Version::V0) == 78, "sizeof_SkOTTableOS2__V0_not_78");
+static_assert(sizeof(SkOTTableOS2::Version::V1) == 86, "sizeof_SkOTTableOS2__V1_not_86");
+static_assert(sizeof(SkOTTableOS2::Version::V2) == 96, "sizeof_SkOTTableOS2__V2_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V3) == 96, "sizeof_SkOTTableOS2__V3_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V4) == 96, "sizeof_SkOTTableOS2__V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
new file mode 100644
index 0000000000..286b7e2720
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V0_DEFINED
+#define SkOTTable_OS_2_V0_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V0 {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V0) == 78, "sizeof_SkOTTableOS2_V0_not_78");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
new file mode 100644
index 0000000000..0f58eb8f1e
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V1_DEFINED
+#define SkOTTable_OS_2_V1_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V1 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(1);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ BasicGeorgian,
+ GeorgianExtended,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ GreekSymbolsAndCoptic,
+ Cyrillic,
+ Armenian,
+ BasicHebrew,
+ HebrewExtendedAB,
+ BasicArabic,
+ ArabicExtended,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ BasicGreek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Reserved057,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved080,
+ Reserved081,
+ Reserved082,
+ Reserved083,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved072,
+ Reserved073,
+ Reserved074,
+ Reserved075,
+ Reserved076,
+ Reserved077,
+ Reserved078,
+ Reserved079)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Reserved70,
+ Reserved71)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG BasicGreekMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG GreekSymbolsAndCCopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG BasicHebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG HebrewExtendedABMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG BasicArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG ArabicExtendedMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG BasicGeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG GeorgianExtendedMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ //Reserved
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V1) == 86, "sizeof_SkOTTableOS2_V1_not_86");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
new file mode 100644
index 0000000000..1fa053dae6
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V2_DEFINED
+#define SkOTTable_OS_2_V2_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V2 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(2);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ Greek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Surrogates,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG SurrogatesMask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V2) == 96, "sizeof_SkOTTableOS2_V2_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
new file mode 100644
index 0000000000..f390269930
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V3_DEFINED
+#define SkOTTable_OS_2_V3_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V3 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(3);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ Reserved053,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V3) == 96, "sizeof_SkOTTableOS2_V3_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
new file mode 100644
index 0000000000..46712e0aff
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V4_DEFINED
+#define SkOTTable_OS_2_V4_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V4 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(4);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Balinese,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Coptic,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Vai,
+ Arabic,
+ NKo,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Phoenician,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ PhagsPa,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Limbu,
+ TaiLe,
+ NewTaiLue)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ PhaistosDisc,
+ Carian_Lycian_Lydian,
+ DominoTiles_MahjongTiles,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Sundanese,
+ Lepcha,
+ OlChiki,
+ Saurashtra,
+ KayahLi,
+ Rejang,
+ Cham,
+ AncientSymbols)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ OldPersian,
+ Shavian,
+ Osmanya,
+ CypriotSyllabary,
+ Kharoshthi,
+ TaiXuanJingSymbols,
+ Cuneiform,
+ CountingRodNumerals)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Buginese,
+ Glagolitic,
+ Tifinagh,
+ YijingHexagramSymbols,
+ SylotiNagri,
+ LinearB_AegeanNumbers,
+ AncientGreekNumbers,
+ Ugaritic)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG CopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG VaiMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG NKoMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG BalineseMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG PhagsPaMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG PhoenicianMask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ static const SK_OT_ULONG LimbuMask = SkOTSetULONGBit<93 - 64>::value;
+ static const SK_OT_ULONG TaiLeMask = SkOTSetULONGBit<94 - 64>::value;
+ static const SK_OT_ULONG NewTaiLueMask = SkOTSetULONGBit<95 - 64>::value;
+ };
+ struct l3 {
+ static const SK_OT_ULONG BugineseMask = SkOTSetULONGBit<96 - 96>::value;
+ static const SK_OT_ULONG GlagoliticMask = SkOTSetULONGBit<97 - 96>::value;
+ static const SK_OT_ULONG TifinaghMask = SkOTSetULONGBit<98 - 96>::value;
+ static const SK_OT_ULONG YijingHexagramSymbolsMask = SkOTSetULONGBit<99 - 96>::value;
+ static const SK_OT_ULONG SylotiNagriMask = SkOTSetULONGBit<100 - 96>::value;
+ static const SK_OT_ULONG LinearB_AegeanNumbersMask = SkOTSetULONGBit<101 - 96>::value;
+ static const SK_OT_ULONG AncientGreekNumbersMask = SkOTSetULONGBit<102 - 96>::value;
+ static const SK_OT_ULONG UgariticMask = SkOTSetULONGBit<103 - 96>::value;
+ static const SK_OT_ULONG OldPersianMask = SkOTSetULONGBit<104 - 96>::value;
+ static const SK_OT_ULONG ShavianMask = SkOTSetULONGBit<105 - 96>::value;
+ static const SK_OT_ULONG OsmanyaMask = SkOTSetULONGBit<106 - 96>::value;
+ static const SK_OT_ULONG CypriotSyllabaryMask = SkOTSetULONGBit<107 - 96>::value;
+ static const SK_OT_ULONG KharoshthiMask = SkOTSetULONGBit<108 - 96>::value;
+ static const SK_OT_ULONG TaiXuanJingSymbolsMask = SkOTSetULONGBit<109 - 96>::value;
+ static const SK_OT_ULONG CuneiformMask = SkOTSetULONGBit<110 - 96>::value;
+ static const SK_OT_ULONG CountingRodNumeralsMask = SkOTSetULONGBit<111 - 96>::value;
+ static const SK_OT_ULONG SundaneseMask = SkOTSetULONGBit<112 - 96>::value;
+ static const SK_OT_ULONG LepchaMask = SkOTSetULONGBit<113 - 96>::value;
+ static const SK_OT_ULONG OlChikiMask = SkOTSetULONGBit<114 - 96>::value;
+ static const SK_OT_ULONG SaurashtraMask = SkOTSetULONGBit<115 - 96>::value;
+ static const SK_OT_ULONG KayahLiMask = SkOTSetULONGBit<116 - 96>::value;
+ static const SK_OT_ULONG RejangMask = SkOTSetULONGBit<117 - 96>::value;
+ static const SK_OT_ULONG ChamMask = SkOTSetULONGBit<118 - 96>::value;
+ static const SK_OT_ULONG AncientSymbolsMask = SkOTSetULONGBit<119 - 96>::value;
+ static const SK_OT_ULONG PhaistosDiscMask = SkOTSetULONGBit<120 - 96>::value;
+ static const SK_OT_ULONG Carian_Lycian_LydianMask = SkOTSetULONGBit<121 - 96>::value;
+ static const SK_OT_ULONG DominoTiles_MahjongTilesMask = SkOTSetULONGBit<122 - 96>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WWS,
+ Oblique,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ UseTypoMetrics)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ static const SK_OT_USHORT UseTypoMetricsMask = SkOTSetUSHORTBit<7>::value;
+ static const SK_OT_USHORT WWSMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT ObliqueMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V4) == 96, "sizeof_SkOTTableOS2_V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
new file mode 100644
index 0000000000..3476c99377
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_VA_DEFINED
+#define SkOTTable_OS_2_VA_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+//Original V0 TT
+struct SkOTTableOS2_VA {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ UltraLight = SkTEndian_SwapBE16(1),
+ ExtraLight = SkTEndian_SwapBE16(2),
+ Light = SkTEndian_SwapBE16(3),
+ SemiLight = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiBold = SkTEndian_SwapBE16(6),
+ Bold = SkTEndian_SwapBE16(7),
+ ExtraBold = SkTEndian_SwapBE16(8),
+ UltraBold = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_VA) == 68, "sizeof_SkOTTableOS2_VA_not_68");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h b/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h
new file mode 100644
index 0000000000..5720e64d01
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_fvar_DEFINED
+#define SkOTTable_fvar_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableFontVariations {
+ static const SK_OT_CHAR TAG0 = 'f';
+ static const SK_OT_CHAR TAG1 = 'v';
+ static const SK_OT_CHAR TAG2 = 'a';
+ static const SK_OT_CHAR TAG3 = 'r';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableFontVariations>::value;
+
+ SK_OT_USHORT majorVersion;
+ SK_OT_USHORT minorVersion;
+ SK_OT_USHORT offsetToAxesArray;
+ SK_OT_USHORT reserved;
+ SK_OT_USHORT axisCount;
+ SK_OT_USHORT axisSize; // Must be 0x0014 in v1.0
+ SK_OT_USHORT instanceCount;
+ SK_OT_USHORT instanceSize; // Must be axisCount * sizeof(Fixed) + (4 | 6)
+
+ struct VariationAxisRecord {
+ SK_OT_ULONG axisTag;
+ SK_OT_Fixed minValue;
+ SK_OT_Fixed defaultValue;
+ SK_OT_Fixed maxValue;
+ SK_OT_USHORT flags; // Must be 0
+ SK_OT_USHORT axisNameID;
+ }; // axes[axisCount];
+
+ template <size_t AxisCount> struct InstanceRecord {
+ SK_OT_USHORT subfamilyNameID;
+ SK_OT_USHORT flags; // Must be 0
+ SK_OT_Fixed coordinates[AxisCount];
+ SK_OT_USHORT postScriptNameID;
+ }; // instances[instanceCount];
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableFontVariations, instanceSize) == 14, "SkOTTableFontVariations_instanceSize_not_at_14");
+static_assert(sizeof(SkOTTableFontVariations) == 16, "sizeof_SkOTTableFontVariations_not_16");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
new file mode 100644
index 0000000000..d99c95130d
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_gasp_DEFINED
+#define SkOTTable_gasp_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGridAndScanProcedure {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGridAndScanProcedure>::value;
+
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT version0 = SkTEndian_SwapBE16(0);
+ static const SK_OT_USHORT version1 = SkTEndian_SwapBE16(1);
+
+ SK_OT_USHORT numRanges;
+
+ struct GaspRange {
+ SK_OT_USHORT maxPPEM;
+ union behavior {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Gridfit,
+ DoGray,
+ SymmetricGridfit, // Version 1
+ SymmetricSmoothing, // Version 1
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT GridfitMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT DoGrayMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT SymmetricGridfitMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT SymmetricSmoothingMask = SkTEndian_SwapBE16(1 << 3);
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ }; //gaspRange[numRanges]
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableGridAndScanProcedure, numRanges) == 2, "SkOTTableGridAndScanProcedure_numRanges_not_at_2");
+static_assert(sizeof(SkOTTableGridAndScanProcedure) == 4, "sizeof_SkOTTableGridAndScanProcedure_not_4");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
new file mode 100644
index 0000000000..98c4b2021d
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_glyf_DEFINED
+#define SkOTTable_glyf_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGlyphData;
+
+extern uint8_t const * const SK_OT_GlyphData_NoOutline;
+
+struct SkOTTableGlyph {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'l';
+ static const SK_OT_CHAR TAG2 = 'y';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGlyph>::value;
+
+ class Iterator {
+ public:
+ Iterator(const SkOTTableGlyph& glyf,
+ const SkOTTableIndexToLocation& loca,
+ SkOTTableHead::IndexToLocFormat locaFormat)
+ : fGlyf(glyf)
+ , fLocaFormat(SkOTTableHead::IndexToLocFormat::ShortOffsets == locaFormat.value ? 0 : 1)
+ , fCurrentGlyphOffset(0)
+ { fLocaPtr.shortOffset = reinterpret_cast<const SK_OT_USHORT*>(&loca); }
+
+ void advance(uint16_t num) {
+ fLocaPtr.shortOffset += num << fLocaFormat;
+ fCurrentGlyphOffset = fLocaFormat ? SkEndian_SwapBE32(*fLocaPtr.longOffset)
+ : uint32_t(SkEndian_SwapBE16(*fLocaPtr.shortOffset) << 1);
+ }
+ const SkOTTableGlyphData* next() {
+ uint32_t previousGlyphOffset = fCurrentGlyphOffset;
+ advance(1);
+ if (previousGlyphOffset == fCurrentGlyphOffset) {
+ return reinterpret_cast<const SkOTTableGlyphData*>(&SK_OT_GlyphData_NoOutline);
+ } else {
+ return reinterpret_cast<const SkOTTableGlyphData*>(
+ reinterpret_cast<const SK_OT_BYTE*>(&fGlyf) + previousGlyphOffset
+ );
+ }
+ }
+ private:
+ const SkOTTableGlyph& fGlyf;
+ uint16_t fLocaFormat; //0 or 1
+ uint32_t fCurrentGlyphOffset;
+ union LocaPtr {
+ const SK_OT_USHORT* shortOffset;
+ const SK_OT_ULONG* longOffset;
+ } fLocaPtr;
+ };
+};
+
+struct SkOTTableGlyphData {
+ SK_OT_SHORT numberOfContours; //== -1 Composite, > 0 Simple
+ SK_OT_FWORD xMin;
+ SK_OT_FWORD yMin;
+ SK_OT_FWORD xMax;
+ SK_OT_FWORD yMax;
+
+ struct Simple {
+ SK_OT_USHORT endPtsOfContours[1/*numberOfContours*/];
+
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+
+ union Flags {
+ struct Field {
+ SK_OT_BYTE_BITFIELD(
+ OnCurve,
+ xShortVector,
+ yShortVector,
+ Repeat,
+ xIsSame_xShortVectorPositive,
+ yIsSame_yShortVectorPositive,
+ Reserved6,
+ Reserved7)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT OnCurveMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT xShortVectorMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT yShortVectorMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT RepeatMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT xIsSame_xShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT yIsSame_yShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 5);
+ SK_OT_BYTE value;
+ } raw;
+ };
+
+ //xCoordinates
+ //yCoordinates
+ };
+
+ struct Composite {
+ struct Component {
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WE_HAVE_INSTRUCTIONS,
+ USE_MY_METRICS,
+ OVERLAP_COMPOUND,
+ SCALED_COMPONENT_OFFSET,
+ UNSCALED_COMPONENT_OFFSET,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ ARG_1_AND_2_ARE_WORDS,
+ ARGS_ARE_XY_VALUES,
+ ROUND_XY_TO_GRID,
+ WE_HAVE_A_SCALE,
+ RESERVED,
+ MORE_COMPONENTS,
+ WE_HAVE_AN_X_AND_Y_SCALE,
+ WE_HAVE_A_TWO_BY_TWO)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ARG_1_AND_2_ARE_WORDS_Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT ARGS_ARE_XY_VALUES_Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT ROUND_XY_TO_GRID_Mask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT WE_HAVE_A_SCALE_Mask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT RESERVED_Mask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT MORE_COMPONENTS_Mask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT WE_HAVE_AN_X_AND_Y_SCALE_Mask = SkTEndian_SwapBE16(1 << 6);
+ static const SK_OT_USHORT WE_HAVE_A_TWO_BY_TWO_Mask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT WE_HAVE_INSTRUCTIONS_Mask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT USE_MY_METRICS_Mask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT OVERLAP_COMPOUND_Mask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT SCALED_COMPONENT_OFFSET_Mask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT UNSCALED_COMPONENT_OFFSET_mask = SkTEndian_SwapBE16(1 << 12);
+ //Reserved
+ //Reserved
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT glyphIndex;
+ union Transform {
+ union Matrix {
+ /** !WE_HAVE_A_SCALE & !WE_HAVE_AN_X_AND_Y_SCALE & !WE_HAVE_A_TWO_BY_TWO */
+ struct None { } none;
+ /** WE_HAVE_A_SCALE */
+ struct Scale {
+ SK_OT_F2DOT14 a_d;
+ } scale;
+ /** WE_HAVE_AN_X_AND_Y_SCALE */
+ struct ScaleXY {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 d;
+ } scaleXY;
+ /** WE_HAVE_A_TWO_BY_TWO */
+ struct TwoByTwo {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 b;
+ SK_OT_F2DOT14 c;
+ SK_OT_F2DOT14 d;
+ } twoByTwo;
+ };
+ /** ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct WordValue {
+ SK_OT_FWORD e;
+ SK_OT_FWORD f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordValue;
+ /** !ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct ByteValue {
+ SK_OT_CHAR e;
+ SK_OT_CHAR f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteValue;
+ /** ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct WordIndex {
+ SK_OT_USHORT compoundPointIndex;
+ SK_OT_USHORT componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordIndex;
+ /** !ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct ByteIndex {
+ SK_OT_BYTE compoundPointIndex;
+ SK_OT_BYTE componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteIndex;
+ } transform;
+ } component;//[] last element does not set MORE_COMPONENTS
+
+ /** Comes after the last Component if the last component has WE_HAVE_INSTR. */
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
new file mode 100644
index 0000000000..71a443ebfc
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_head_DEFINED
+#define SkOTTable_head_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHead {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'e';
+ static const SK_OT_CHAR TAG2 = 'a';
+ static const SK_OT_CHAR TAG3 = 'd';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHead>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_Fixed fontRevision;
+ static const uint32_t fontChecksum = 0xB1B0AFBA; //checksum of all TT fonts
+ SK_OT_ULONG checksumAdjustment;
+ SK_OT_ULONG magicNumber;
+ static const SK_OT_ULONG magicNumberConst = SkTEndian_SwapBE32(0x5F0F3CF5);
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ GXMetamorphosis_Apple,
+ HasStrongRTL_Apple,
+ HasIndicStyleRearrangement,
+ AgfaMicroTypeExpressProcessed,
+ FontConverted,
+ DesignedForClearType,
+ LastResort,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ BaselineAtY0,
+ LeftSidebearingAtX0,
+ InstructionsDependOnPointSize,
+ IntegerScaling,
+ InstructionsAlterAdvanceWidth,
+ VerticalCenteredGlyphs_Apple,
+ Reserved06,
+ RequiresLayout_Apple)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BaselineAtY0Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT LeftSidebearingAtX0Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT InstructionsDependOnPointSizeMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT IntegerScalingMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT InstructionsAlterAdvanceWidthMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT VerticalCenteredGlyphs_AppleMask = SkTEndian_SwapBE16(1 << 5);
+ //Reserved
+ static const SK_OT_USHORT RequiresLayout_AppleMask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT GXMetamorphosis_AppleMask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT HasStrongRTL_AppleMask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT HasIndicStyleRearrangementMask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT AgfaMicroTypeExpressProcessedMask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT FontConvertedMask = SkTEndian_SwapBE16(1 << 12);
+ static const SK_OT_USHORT DesignedForClearTypeMask = SkTEndian_SwapBE16(1 << 13);
+ static const SK_OT_USHORT LastResortMask = SkTEndian_SwapBE16(1 << 14);
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT unitsPerEm;
+ SK_OT_LONGDATETIME created;
+ SK_OT_LONGDATETIME modified;
+ SK_OT_SHORT xMin;
+ SK_OT_SHORT yMin;
+ SK_OT_SHORT xMax;
+ SK_OT_SHORT yMax;
+ union MacStyle {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Bold,
+ Italic,
+ Underline,
+ Outline,
+ Shadow,
+ Condensed,
+ Extended,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BoldMask = SkTEndian_SwapBE16(1);
+ static const SK_OT_USHORT ItalicMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT UnderlineMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT OutlineMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT ShadowMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT CondensedMask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT ExtendedMask = SkTEndian_SwapBE16(1 << 6);
+
+ SK_OT_USHORT value;
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ enum Value : SK_OT_SHORT {
+ FullyMixedDirectionalGlyphs = SkTEndian_SwapBE16(0),
+ OnlyStronglyLTR = SkTEndian_SwapBE16(1),
+ StronglyLTR = SkTEndian_SwapBE16(2),
+ OnlyStronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-1)),
+ StronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-2)),
+ } value;
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ enum Value : SK_OT_SHORT {
+ ShortOffsets = SkTEndian_SwapBE16(0),
+ LongOffsets = SkTEndian_SwapBE16(1),
+ } value;
+ } indexToLocFormat;
+ struct GlyphDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } glyphDataFormat;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHead, glyphDataFormat) == 52, "SkOTTableHead_glyphDataFormat_not_at_52");
+static_assert(sizeof(SkOTTableHead) == 54, "sizeof_SkOTTableHead_not_54");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
new file mode 100644
index 0000000000..1044e79319
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_hhea_DEFINED
+#define SkOTTable_hhea_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHorizontalHeader {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'h';
+ static const SK_OT_CHAR TAG2 = 'e';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHorizontalHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_FWORD Ascender;
+ SK_OT_FWORD Descender;
+ SK_OT_FWORD LineGap;
+ SK_OT_UFWORD advanceWidthMax;
+ SK_OT_FWORD minLeftSideBearing;
+ SK_OT_FWORD minRightSideBearing;
+ SK_OT_FWORD xMaxExtent;
+ SK_OT_SHORT caretSlopeRise;
+ SK_OT_SHORT caretSlopeRun;
+ SK_OT_SHORT caretOffset;
+ SK_OT_SHORT Reserved24;
+ SK_OT_SHORT Reserved26;
+ SK_OT_SHORT Reserved28;
+ SK_OT_SHORT Reserved30;
+ struct MetricDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } metricDataFormat;
+ SK_OT_USHORT numberOfHMetrics;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHorizontalHeader, numberOfHMetrics) == 34, "SkOTTableHorizontalHeader_numberOfHMetrics_not_at_34");
+static_assert(sizeof(SkOTTableHorizontalHeader) == 36, "sizeof_SkOTTableHorizontalHeader_not_36");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_loca.h b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
new file mode 100644
index 0000000000..98df2833e4
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_loca_DEFINED
+#define SkOTTable_loca_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableIndexToLocation {
+ static const SK_OT_CHAR TAG0 = 'l';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableIndexToLocation>::value;
+
+ union Offsets {
+ SK_OT_USHORT shortOffset[1];
+ SK_OT_ULONG longOffset[1];
+ } offsets;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
new file mode 100644
index 0000000000..aaae28a9e3
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_DEFINED
+#define SkOTTable_maxp_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_maxp_CFF.h"
+#include "src/sfnt/SkOTTable_maxp_TT.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile {
+ static const SK_OT_CHAR TAG0 = 'm';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'x';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableMaximumProfile>::value;
+
+ union Version {
+ SK_OT_Fixed version;
+
+ struct CFF : SkOTTableMaximumProfile_CFF { } cff;
+ struct TT : SkOTTableMaximumProfile_TT { } tt;
+ } version;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
new file mode 100644
index 0000000000..bf69beda7c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_CFF_DEFINED
+#define SkOTTable_maxp_CFF_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_CFF {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00005000);
+
+ SK_OT_USHORT numGlyphs;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_CFF, numGlyphs) == 4, "SkOTTableMaximumProfile_CFF_numGlyphs_not_at_4");
+static_assert(sizeof(SkOTTableMaximumProfile_CFF) == 6, "sizeof_SkOTTableMaximumProfile_CFF_not_6");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
new file mode 100644
index 0000000000..53b7a70104
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_TT_DEFINED
+#define SkOTTable_maxp_TT_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_TT {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00010000);
+
+ SK_OT_USHORT numGlyphs;
+ SK_OT_USHORT maxPoints;
+ SK_OT_USHORT maxContours;
+ SK_OT_USHORT maxCompositePoints;
+ SK_OT_USHORT maxCompositeContours;
+ struct MaxZones {
+ enum Value : SK_OT_USHORT {
+ DoesNotUseTwilightZone = SkTEndian_SwapBE16(1),
+ UsesTwilightZone = SkTEndian_SwapBE16(2),
+ } value;
+ } maxZones;
+ SK_OT_USHORT maxTwilightPoints;
+ SK_OT_USHORT maxStorage;
+ SK_OT_USHORT maxFunctionDefs;
+ SK_OT_USHORT maxInstructionDefs;
+ SK_OT_USHORT maxStackElements;
+ SK_OT_USHORT maxSizeOfInstructions;
+ SK_OT_USHORT maxComponentElements;
+ SK_OT_USHORT maxComponentDepth;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_TT, maxComponentDepth) == 30, "SkOTTableMaximumProfile_TT_maxComponentDepth_not_at_30");
+static_assert(sizeof(SkOTTableMaximumProfile_TT) == 32, "sizeof_SkOTTableMaximumProfile_TT_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
new file mode 100644
index 0000000000..185c789f90
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sfnt/SkOTTable_name.h"
+
+#include "include/private/SkTemplates.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkUtils.h"
+
+static SkUnichar next_unichar_UTF16BE(const uint8_t** srcPtr, size_t* length) {
+ SkASSERT(srcPtr && *srcPtr && length);
+ SkASSERT(*length > 0);
+
+ uint16_t leading;
+ if (*length < sizeof(leading)) {
+ *length = 0;
+ return 0xFFFD;
+ }
+ memcpy(&leading, *srcPtr, sizeof(leading));
+ *srcPtr += sizeof(leading);
+ *length -= sizeof(leading);
+ SkUnichar c = SkEndian_SwapBE16(leading);
+
+ if (SkUTF16_IsTrailingSurrogate(c)) {
+ return 0xFFFD;
+ }
+ if (SkUTF16_IsLeadingSurrogate(c)) {
+ uint16_t trailing;
+ if (*length < sizeof(trailing)) {
+ *length = 0;
+ return 0xFFFD;
+ }
+ memcpy(&trailing, *srcPtr, sizeof(trailing));
+ SkUnichar c2 = SkEndian_SwapBE16(trailing);
+ if (!SkUTF16_IsTrailingSurrogate(c2)) {
+ return 0xFFFD;
+ }
+ *srcPtr += sizeof(trailing);
+ *length -= sizeof(trailing);
+
+ c = (c << 10) + c2 + (0x10000 - (0xD800 << 10) - 0xDC00);
+ }
+ return c;
+}
+
+static void SkString_from_UTF16BE(const uint8_t* utf16be, size_t length, SkString& utf8) {
+ // Note that utf16be may not be 2-byte aligned.
+ SkASSERT(utf16be != nullptr);
+
+ utf8.reset();
+ while (length) {
+ utf8.appendUnichar(next_unichar_UTF16BE(&utf16be, &length));
+ }
+}
+
+/** UnicodeFromMacRoman[macRomanPoint - 0x80] -> unicodeCodePoint.
+ * Derived from http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/ROMAN.TXT .
+ * In MacRoman the first 128 code points match ASCII code points.
+ * This maps the second 128 MacRoman code points to unicode code points.
+ */
+static const uint16_t UnicodeFromMacRoman[0x80] = {
+ 0x00C4, 0x00C5, 0x00C7, 0x00C9, 0x00D1, 0x00D6, 0x00DC, 0x00E1,
+ 0x00E0, 0x00E2, 0x00E4, 0x00E3, 0x00E5, 0x00E7, 0x00E9, 0x00E8,
+ 0x00EA, 0x00EB, 0x00ED, 0x00EC, 0x00EE, 0x00EF, 0x00F1, 0x00F3,
+ 0x00F2, 0x00F4, 0x00F6, 0x00F5, 0x00FA, 0x00F9, 0x00FB, 0x00FC,
+ 0x2020, 0x00B0, 0x00A2, 0x00A3, 0x00A7, 0x2022, 0x00B6, 0x00DF,
+ 0x00AE, 0x00A9, 0x2122, 0x00B4, 0x00A8, 0x2260, 0x00C6, 0x00D8,
+ 0x221E, 0x00B1, 0x2264, 0x2265, 0x00A5, 0x00B5, 0x2202, 0x2211,
+ 0x220F, 0x03C0, 0x222B, 0x00AA, 0x00BA, 0x03A9, 0x00E6, 0x00F8,
+ 0x00BF, 0x00A1, 0x00AC, 0x221A, 0x0192, 0x2248, 0x2206, 0x00AB,
+ 0x00BB, 0x2026, 0x00A0, 0x00C0, 0x00C3, 0x00D5, 0x0152, 0x0153,
+ 0x2013, 0x2014, 0x201C, 0x201D, 0x2018, 0x2019, 0x00F7, 0x25CA,
+ 0x00FF, 0x0178, 0x2044, 0x20AC, 0x2039, 0x203A, 0xFB01, 0xFB02,
+ 0x2021, 0x00B7, 0x201A, 0x201E, 0x2030, 0x00C2, 0x00CA, 0x00C1,
+ 0x00CB, 0x00C8, 0x00CD, 0x00CE, 0x00CF, 0x00CC, 0x00D3, 0x00D4,
+ 0xF8FF, 0x00D2, 0x00DA, 0x00DB, 0x00D9, 0x0131, 0x02C6, 0x02DC,
+ 0x00AF, 0x02D8, 0x02D9, 0x02DA, 0x00B8, 0x02DD, 0x02DB, 0x02C7,
+};
+
+static void SkStringFromMacRoman(const uint8_t* macRoman, size_t length, SkString& utf8) {
+ utf8.reset();
+ for (size_t i = 0; i < length; ++i) {
+ utf8.appendUnichar(macRoman[i] < 0x80 ? macRoman[i]
+ : UnicodeFromMacRoman[macRoman[i] - 0x80]);
+ }
+}
+
+static const struct BCP47FromLanguageId {
+ uint16_t languageID;
+ const char* bcp47;
+}
+/** The Mac and Windows values do not conflict, so this is currently one single table. */
+BCP47FromLanguageID[] = {
+ /** A mapping from Mac Language Designators to BCP 47 codes.
+ * The following list was constructed more or less manually.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ {0, "en"}, //English
+ {1, "fr"}, //French
+ {2, "de"}, //German
+ {3, "it"}, //Italian
+ {4, "nl"}, //Dutch
+ {5, "sv"}, //Swedish
+ {6, "es"}, //Spanish
+ {7, "da"}, //Danish
+ {8, "pt"}, //Portuguese
+ {9, "nb"}, //Norwegian
+ {10, "he"}, //Hebrew
+ {11, "ja"}, //Japanese
+ {12, "ar"}, //Arabic
+ {13, "fi"}, //Finnish
+ {14, "el"}, //Greek
+ {15, "is"}, //Icelandic
+ {16, "mt"}, //Maltese
+ {17, "tr"}, //Turkish
+ {18, "hr"}, //Croatian
+ {19, "zh-Hant"}, //Chinese (Traditional)
+ {20, "ur"}, //Urdu
+ {21, "hi"}, //Hindi
+ {22, "th"}, //Thai
+ {23, "ko"}, //Korean
+ {24, "lt"}, //Lithuanian
+ {25, "pl"}, //Polish
+ {26, "hu"}, //Hungarian
+ {27, "et"}, //Estonian
+ {28, "lv"}, //Latvian
+ {29, "se"}, //Sami
+ {30, "fo"}, //Faroese
+ {31, "fa"}, //Farsi (Persian)
+ {32, "ru"}, //Russian
+ {33, "zh-Hans"}, //Chinese (Simplified)
+ {34, "nl"}, //Dutch
+ {35, "ga"}, //Irish(Gaelic)
+ {36, "sq"}, //Albanian
+ {37, "ro"}, //Romanian
+ {38, "cs"}, //Czech
+ {39, "sk"}, //Slovak
+ {40, "sl"}, //Slovenian
+ {41, "yi"}, //Yiddish
+ {42, "sr"}, //Serbian
+ {43, "mk"}, //Macedonian
+ {44, "bg"}, //Bulgarian
+ {45, "uk"}, //Ukrainian
+ {46, "be"}, //Byelorussian
+ {47, "uz"}, //Uzbek
+ {48, "kk"}, //Kazakh
+ {49, "az-Cyrl"}, //Azerbaijani (Cyrillic)
+ {50, "az-Arab"}, //Azerbaijani (Arabic)
+ {51, "hy"}, //Armenian
+ {52, "ka"}, //Georgian
+ {53, "mo"}, //Moldavian
+ {54, "ky"}, //Kirghiz
+ {55, "tg"}, //Tajiki
+ {56, "tk"}, //Turkmen
+ {57, "mn-Mong"}, //Mongolian (Traditional)
+ {58, "mn-Cyrl"}, //Mongolian (Cyrillic)
+ {59, "ps"}, //Pashto
+ {60, "ku"}, //Kurdish
+ {61, "ks"}, //Kashmiri
+ {62, "sd"}, //Sindhi
+ {63, "bo"}, //Tibetan
+ {64, "ne"}, //Nepali
+ {65, "sa"}, //Sanskrit
+ {66, "mr"}, //Marathi
+ {67, "bn"}, //Bengali
+ {68, "as"}, //Assamese
+ {69, "gu"}, //Gujarati
+ {70, "pa"}, //Punjabi
+ {71, "or"}, //Oriya
+ {72, "ml"}, //Malayalam
+ {73, "kn"}, //Kannada
+ {74, "ta"}, //Tamil
+ {75, "te"}, //Telugu
+ {76, "si"}, //Sinhalese
+ {77, "my"}, //Burmese
+ {78, "km"}, //Khmer
+ {79, "lo"}, //Lao
+ {80, "vi"}, //Vietnamese
+ {81, "id"}, //Indonesian
+ {82, "tl"}, //Tagalog
+ {83, "ms-Latn"}, //Malay (Roman)
+ {84, "ms-Arab"}, //Malay (Arabic)
+ {85, "am"}, //Amharic
+ {86, "ti"}, //Tigrinya
+ {87, "om"}, //Oromo
+ {88, "so"}, //Somali
+ {89, "sw"}, //Swahili
+ {90, "rw"}, //Kinyarwanda/Ruanda
+ {91, "rn"}, //Rundi
+ {92, "ny"}, //Nyanja/Chewa
+ {93, "mg"}, //Malagasy
+ {94, "eo"}, //Esperanto
+ {128, "cy"}, //Welsh
+ {129, "eu"}, //Basque
+ {130, "ca"}, //Catalan
+ {131, "la"}, //Latin
+ {132, "qu"}, //Quechua
+ {133, "gn"}, //Guarani
+ {134, "ay"}, //Aymara
+ {135, "tt"}, //Tatar
+ {136, "ug"}, //Uighur
+ {137, "dz"}, //Dzongkha
+ {138, "jv-Latn"}, //Javanese (Roman)
+ {139, "su-Latn"}, //Sundanese (Roman)
+ {140, "gl"}, //Galician
+ {141, "af"}, //Afrikaans
+ {142, "br"}, //Breton
+ {143, "iu"}, //Inuktitut
+ {144, "gd"}, //Scottish (Gaelic)
+ {145, "gv"}, //Manx (Gaelic)
+ {146, "ga"}, //Irish (Gaelic with Lenition)
+ {147, "to"}, //Tongan
+ {148, "el"}, //Greek (Polytonic) Note: ISO 15924 does not have an equivalent script name.
+ {149, "kl"}, //Greenlandic
+ {150, "az-Latn"}, //Azerbaijani (Roman)
+ {151, "nn"}, //Nynorsk
+
+ /** A mapping from Windows LCID to BCP 47 codes.
+ * This list is the sorted, curated output of tools/win_lcid.cpp.
+ * Note that these are sorted by value for quick binary lookup, and not logically by lsb.
+ * The 'bare' language ids (e.g. 0x0001 for Arabic) are ommitted
+ * as they do not appear as valid language ids in the OpenType specification.
+ */
+ { 0x0401, "ar-SA" }, //Arabic
+ { 0x0402, "bg-BG" }, //Bulgarian
+ { 0x0403, "ca-ES" }, //Catalan
+ { 0x0404, "zh-TW" }, //Chinese (Traditional)
+ { 0x0405, "cs-CZ" }, //Czech
+ { 0x0406, "da-DK" }, //Danish
+ { 0x0407, "de-DE" }, //German
+ { 0x0408, "el-GR" }, //Greek
+ { 0x0409, "en-US" }, //English
+ { 0x040a, "es-ES_tradnl" }, //Spanish
+ { 0x040b, "fi-FI" }, //Finnish
+ { 0x040c, "fr-FR" }, //French
+ { 0x040d, "he-IL" }, //Hebrew
+ { 0x040d, "he" }, //Hebrew
+ { 0x040e, "hu-HU" }, //Hungarian
+ { 0x040e, "hu" }, //Hungarian
+ { 0x040f, "is-IS" }, //Icelandic
+ { 0x0410, "it-IT" }, //Italian
+ { 0x0411, "ja-JP" }, //Japanese
+ { 0x0412, "ko-KR" }, //Korean
+ { 0x0413, "nl-NL" }, //Dutch
+ { 0x0414, "nb-NO" }, //Norwegian (Bokmål)
+ { 0x0415, "pl-PL" }, //Polish
+ { 0x0416, "pt-BR" }, //Portuguese
+ { 0x0417, "rm-CH" }, //Romansh
+ { 0x0418, "ro-RO" }, //Romanian
+ { 0x0419, "ru-RU" }, //Russian
+ { 0x041a, "hr-HR" }, //Croatian
+ { 0x041b, "sk-SK" }, //Slovak
+ { 0x041c, "sq-AL" }, //Albanian
+ { 0x041d, "sv-SE" }, //Swedish
+ { 0x041e, "th-TH" }, //Thai
+ { 0x041f, "tr-TR" }, //Turkish
+ { 0x0420, "ur-PK" }, //Urdu
+ { 0x0421, "id-ID" }, //Indonesian
+ { 0x0422, "uk-UA" }, //Ukrainian
+ { 0x0423, "be-BY" }, //Belarusian
+ { 0x0424, "sl-SI" }, //Slovenian
+ { 0x0425, "et-EE" }, //Estonian
+ { 0x0426, "lv-LV" }, //Latvian
+ { 0x0427, "lt-LT" }, //Lithuanian
+ { 0x0428, "tg-Cyrl-TJ" }, //Tajik (Cyrillic)
+ { 0x0429, "fa-IR" }, //Persian
+ { 0x042a, "vi-VN" }, //Vietnamese
+ { 0x042b, "hy-AM" }, //Armenian
+ { 0x042c, "az-Latn-AZ" }, //Azeri (Latin)
+ { 0x042d, "eu-ES" }, //Basque
+ { 0x042e, "hsb-DE" }, //Upper Sorbian
+ { 0x042f, "mk-MK" }, //Macedonian (FYROM)
+ { 0x0432, "tn-ZA" }, //Setswana
+ { 0x0434, "xh-ZA" }, //isiXhosa
+ { 0x0435, "zu-ZA" }, //isiZulu
+ { 0x0436, "af-ZA" }, //Afrikaans
+ { 0x0437, "ka-GE" }, //Georgian
+ { 0x0438, "fo-FO" }, //Faroese
+ { 0x0439, "hi-IN" }, //Hindi
+ { 0x043a, "mt-MT" }, //Maltese
+ { 0x043b, "se-NO" }, //Sami (Northern)
+ { 0x043e, "ms-MY" }, //Malay
+ { 0x043f, "kk-KZ" }, //Kazakh
+ { 0x0440, "ky-KG" }, //Kyrgyz
+ { 0x0441, "sw-KE" }, //Kiswahili
+ { 0x0442, "tk-TM" }, //Turkmen
+ { 0x0443, "uz-Latn-UZ" }, //Uzbek (Latin)
+ { 0x0443, "uz" }, //Uzbek
+ { 0x0444, "tt-RU" }, //Tatar
+ { 0x0445, "bn-IN" }, //Bengali
+ { 0x0446, "pa-IN" }, //Punjabi
+ { 0x0447, "gu-IN" }, //Gujarati
+ { 0x0448, "or-IN" }, //Oriya
+ { 0x0449, "ta-IN" }, //Tamil
+ { 0x044a, "te-IN" }, //Telugu
+ { 0x044b, "kn-IN" }, //Kannada
+ { 0x044c, "ml-IN" }, //Malayalam
+ { 0x044d, "as-IN" }, //Assamese
+ { 0x044e, "mr-IN" }, //Marathi
+ { 0x044f, "sa-IN" }, //Sanskrit
+ { 0x0450, "mn-Cyrl" }, //Mongolian (Cyrillic)
+ { 0x0451, "bo-CN" }, //Tibetan
+ { 0x0452, "cy-GB" }, //Welsh
+ { 0x0453, "km-KH" }, //Khmer
+ { 0x0454, "lo-LA" }, //Lao
+ { 0x0456, "gl-ES" }, //Galician
+ { 0x0457, "kok-IN" }, //Konkani
+ { 0x045a, "syr-SY" }, //Syriac
+ { 0x045b, "si-LK" }, //Sinhala
+ { 0x045d, "iu-Cans-CA" }, //Inuktitut (Syllabics)
+ { 0x045e, "am-ET" }, //Amharic
+ { 0x0461, "ne-NP" }, //Nepali
+ { 0x0462, "fy-NL" }, //Frisian
+ { 0x0463, "ps-AF" }, //Pashto
+ { 0x0464, "fil-PH" }, //Filipino
+ { 0x0465, "dv-MV" }, //Divehi
+ { 0x0468, "ha-Latn-NG" }, //Hausa (Latin)
+ { 0x046a, "yo-NG" }, //Yoruba
+ { 0x046b, "quz-BO" }, //Quechua
+ { 0x046c, "nso-ZA" }, //Sesotho sa Leboa
+ { 0x046d, "ba-RU" }, //Bashkir
+ { 0x046e, "lb-LU" }, //Luxembourgish
+ { 0x046f, "kl-GL" }, //Greenlandic
+ { 0x0470, "ig-NG" }, //Igbo
+ { 0x0478, "ii-CN" }, //Yi
+ { 0x047a, "arn-CL" }, //Mapudungun
+ { 0x047c, "moh-CA" }, //Mohawk
+ { 0x047e, "br-FR" }, //Breton
+ { 0x0480, "ug-CN" }, //Uyghur
+ { 0x0481, "mi-NZ" }, //Maori
+ { 0x0482, "oc-FR" }, //Occitan
+ { 0x0483, "co-FR" }, //Corsican
+ { 0x0484, "gsw-FR" }, //Alsatian
+ { 0x0485, "sah-RU" }, //Yakut
+ { 0x0486, "qut-GT" }, //K'iche
+ { 0x0487, "rw-RW" }, //Kinyarwanda
+ { 0x0488, "wo-SN" }, //Wolof
+ { 0x048c, "prs-AF" }, //Dari
+ { 0x0491, "gd-GB" }, //Scottish Gaelic
+ { 0x0801, "ar-IQ" }, //Arabic
+ { 0x0804, "zh-Hans" }, //Chinese (Simplified)
+ { 0x0807, "de-CH" }, //German
+ { 0x0809, "en-GB" }, //English
+ { 0x080a, "es-MX" }, //Spanish
+ { 0x080c, "fr-BE" }, //French
+ { 0x0810, "it-CH" }, //Italian
+ { 0x0813, "nl-BE" }, //Dutch
+ { 0x0814, "nn-NO" }, //Norwegian (Nynorsk)
+ { 0x0816, "pt-PT" }, //Portuguese
+ { 0x081a, "sr-Latn-CS" }, //Serbian (Latin)
+ { 0x081d, "sv-FI" }, //Swedish
+ { 0x082c, "az-Cyrl-AZ" }, //Azeri (Cyrillic)
+ { 0x082e, "dsb-DE" }, //Lower Sorbian
+ { 0x082e, "dsb" }, //Lower Sorbian
+ { 0x083b, "se-SE" }, //Sami (Northern)
+ { 0x083c, "ga-IE" }, //Irish
+ { 0x083e, "ms-BN" }, //Malay
+ { 0x0843, "uz-Cyrl-UZ" }, //Uzbek (Cyrillic)
+ { 0x0845, "bn-BD" }, //Bengali
+ { 0x0850, "mn-Mong-CN" }, //Mongolian (Traditional Mongolian)
+ { 0x085d, "iu-Latn-CA" }, //Inuktitut (Latin)
+ { 0x085f, "tzm-Latn-DZ" }, //Tamazight (Latin)
+ { 0x086b, "quz-EC" }, //Quechua
+ { 0x0c01, "ar-EG" }, //Arabic
+ { 0x0c04, "zh-Hant" }, //Chinese (Traditional)
+ { 0x0c07, "de-AT" }, //German
+ { 0x0c09, "en-AU" }, //English
+ { 0x0c0a, "es-ES" }, //Spanish
+ { 0x0c0c, "fr-CA" }, //French
+ { 0x0c1a, "sr-Cyrl-CS" }, //Serbian (Cyrillic)
+ { 0x0c3b, "se-FI" }, //Sami (Northern)
+ { 0x0c6b, "quz-PE" }, //Quechua
+ { 0x1001, "ar-LY" }, //Arabic
+ { 0x1004, "zh-SG" }, //Chinese (Simplified)
+ { 0x1007, "de-LU" }, //German
+ { 0x1009, "en-CA" }, //English
+ { 0x100a, "es-GT" }, //Spanish
+ { 0x100c, "fr-CH" }, //French
+ { 0x101a, "hr-BA" }, //Croatian (Latin)
+ { 0x103b, "smj-NO" }, //Sami (Lule)
+ { 0x1401, "ar-DZ" }, //Arabic
+ { 0x1404, "zh-MO" }, //Chinese (Traditional)
+ { 0x1407, "de-LI" }, //German
+ { 0x1409, "en-NZ" }, //English
+ { 0x140a, "es-CR" }, //Spanish
+ { 0x140c, "fr-LU" }, //French
+ { 0x141a, "bs-Latn-BA" }, //Bosnian (Latin)
+ { 0x141a, "bs" }, //Bosnian
+ { 0x143b, "smj-SE" }, //Sami (Lule)
+ { 0x143b, "smj" }, //Sami (Lule)
+ { 0x1801, "ar-MA" }, //Arabic
+ { 0x1809, "en-IE" }, //English
+ { 0x180a, "es-PA" }, //Spanish
+ { 0x180c, "fr-MC" }, //French
+ { 0x181a, "sr-Latn-BA" }, //Serbian (Latin)
+ { 0x183b, "sma-NO" }, //Sami (Southern)
+ { 0x1c01, "ar-TN" }, //Arabic
+ { 0x1c09, "en-ZA" }, //English
+ { 0x1c0a, "es-DO" }, //Spanish
+ { 0x1c1a, "sr-Cyrl-BA" }, //Serbian (Cyrillic)
+ { 0x1c3b, "sma-SE" }, //Sami (Southern)
+ { 0x1c3b, "sma" }, //Sami (Southern)
+ { 0x2001, "ar-OM" }, //Arabic
+ { 0x2009, "en-JM" }, //English
+ { 0x200a, "es-VE" }, //Spanish
+ { 0x201a, "bs-Cyrl-BA" }, //Bosnian (Cyrillic)
+ { 0x201a, "bs-Cyrl" }, //Bosnian (Cyrillic)
+ { 0x203b, "sms-FI" }, //Sami (Skolt)
+ { 0x203b, "sms" }, //Sami (Skolt)
+ { 0x2401, "ar-YE" }, //Arabic
+ { 0x2409, "en-029" }, //English
+ { 0x240a, "es-CO" }, //Spanish
+ { 0x241a, "sr-Latn-RS" }, //Serbian (Latin)
+ { 0x243b, "smn-FI" }, //Sami (Inari)
+ { 0x2801, "ar-SY" }, //Arabic
+ { 0x2809, "en-BZ" }, //English
+ { 0x280a, "es-PE" }, //Spanish
+ { 0x281a, "sr-Cyrl-RS" }, //Serbian (Cyrillic)
+ { 0x2c01, "ar-JO" }, //Arabic
+ { 0x2c09, "en-TT" }, //English
+ { 0x2c0a, "es-AR" }, //Spanish
+ { 0x2c1a, "sr-Latn-ME" }, //Serbian (Latin)
+ { 0x3001, "ar-LB" }, //Arabic
+ { 0x3009, "en-ZW" }, //English
+ { 0x300a, "es-EC" }, //Spanish
+ { 0x301a, "sr-Cyrl-ME" }, //Serbian (Cyrillic)
+ { 0x3401, "ar-KW" }, //Arabic
+ { 0x3409, "en-PH" }, //English
+ { 0x340a, "es-CL" }, //Spanish
+ { 0x3801, "ar-AE" }, //Arabic
+ { 0x380a, "es-UY" }, //Spanish
+ { 0x3c01, "ar-BH" }, //Arabic
+ { 0x3c0a, "es-PY" }, //Spanish
+ { 0x4001, "ar-QA" }, //Arabic
+ { 0x4009, "en-IN" }, //English
+ { 0x400a, "es-BO" }, //Spanish
+ { 0x4409, "en-MY" }, //English
+ { 0x440a, "es-SV" }, //Spanish
+ { 0x4809, "en-SG" }, //English
+ { 0x480a, "es-HN" }, //Spanish
+ { 0x4c0a, "es-NI" }, //Spanish
+ { 0x500a, "es-PR" }, //Spanish
+ { 0x540a, "es-US" }, //Spanish
+};
+
+namespace {
+bool BCP47FromLanguageIdLess(const BCP47FromLanguageId& a, const BCP47FromLanguageId& b) {
+ return a.languageID < b.languageID;
+}
+}
+
+bool SkOTTableName::Iterator::next(SkOTTableName::Iterator::Record& record) {
+ SkOTTableName nameTable;
+ if (fNameTableSize < sizeof(nameTable)) {
+ return false;
+ }
+ memcpy(&nameTable, fNameTable, sizeof(nameTable));
+
+ const uint8_t* nameRecords = fNameTable + sizeof(nameTable);
+ const size_t nameRecordsSize = fNameTableSize - sizeof(nameTable);
+
+ const size_t stringTableOffset = SkEndian_SwapBE16(nameTable.stringOffset);
+ if (fNameTableSize < stringTableOffset) {
+ return false;
+ }
+ const uint8_t* stringTable = fNameTable + stringTableOffset;
+ const size_t stringTableSize = fNameTableSize - stringTableOffset;
+
+ // Find the next record which matches the requested type.
+ SkOTTableName::Record nameRecord;
+ const size_t nameRecordsCount = SkEndian_SwapBE16(nameTable.count);
+ const size_t nameRecordsMax = SkTMin(nameRecordsCount, nameRecordsSize / sizeof(nameRecord));
+ do {
+ if (fIndex >= nameRecordsMax) {
+ return false;
+ }
+
+ memcpy(&nameRecord, nameRecords + sizeof(nameRecord)*fIndex, sizeof(nameRecord));
+ ++fIndex;
+ } while (fType != -1 && nameRecord.nameID.fontSpecific != fType);
+
+ record.type = nameRecord.nameID.fontSpecific;
+
+ // Decode the name into UTF-8.
+ const size_t nameOffset = SkEndian_SwapBE16(nameRecord.offset);
+ const size_t nameLength = SkEndian_SwapBE16(nameRecord.length);
+ if (stringTableSize < nameOffset + nameLength) {
+ return false; // continue?
+ }
+ const uint8_t* nameString = stringTable + nameOffset;
+ switch (nameRecord.platformID.value) {
+ case SkOTTableName::Record::PlatformID::Windows:
+ if (SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2
+ != nameRecord.encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::UnicodeUCS4
+ != nameRecord.encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::Symbol
+ != nameRecord.encodingID.windows.value)
+ {
+ record.name.reset();
+ break; // continue?
+ }
+ case SkOTTableName::Record::PlatformID::Unicode:
+ case SkOTTableName::Record::PlatformID::ISO:
+ SkString_from_UTF16BE(nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Macintosh:
+ // TODO: need better decoding, especially on Mac.
+ if (SkOTTableName::Record::EncodingID::Macintosh::Roman
+ != nameRecord.encodingID.macintosh.value)
+ {
+ record.name.reset();
+ break; // continue?
+ }
+ SkStringFromMacRoman(nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Custom:
+ // These should never appear in a 'name' table.
+ default:
+ SkASSERT(false);
+ record.name.reset();
+ break; // continue?
+ }
+
+ // Determine the language.
+ const uint16_t languageID = SkEndian_SwapBE16(nameRecord.languageID.languageTagID);
+
+ // Handle format 1 languages.
+ if (SkOTTableName::format_1 == nameTable.format && languageID >= 0x8000) {
+ const uint16_t languageTagRecordIndex = languageID - 0x8000;
+
+ if (nameRecordsSize < sizeof(nameRecord)*nameRecordsCount) {
+ return false; //"und" or break?
+ }
+ const uint8_t* format1extData = nameRecords + sizeof(nameRecord)*nameRecordsCount;
+ size_t format1extSize = nameRecordsSize - sizeof(nameRecord)*nameRecordsCount;
+ SkOTTableName::Format1Ext format1ext;
+ if (format1extSize < sizeof(format1ext)) {
+ return false; // "und" or break?
+ }
+ memcpy(&format1ext, format1extData, sizeof(format1ext));
+
+ const uint8_t* languageTagRecords = format1extData + sizeof(format1ext);
+ size_t languageTagRecordsSize = format1extSize - sizeof(format1ext);
+ if (languageTagRecordIndex < SkEndian_SwapBE16(format1ext.langTagCount)) {
+ SkOTTableName::Format1Ext::LangTagRecord languageTagRecord;
+ if (languageTagRecordsSize < sizeof(languageTagRecord)*(languageTagRecordIndex+1)) {
+ return false; // "und"?
+ }
+ const uint8_t* languageTagData = languageTagRecords
+ + sizeof(languageTagRecord)*languageTagRecordIndex;
+ memcpy(&languageTagRecord, languageTagData, sizeof(languageTagRecord));
+
+ uint16_t languageOffset = SkEndian_SwapBE16(languageTagRecord.offset);
+ uint16_t languageLength = SkEndian_SwapBE16(languageTagRecord.length);
+
+ if (fNameTableSize < stringTableOffset + languageOffset + languageLength) {
+ return false; // "und"?
+ }
+ const uint8_t* languageString = stringTable + languageOffset;
+ SkString_from_UTF16BE(languageString, languageLength, record.language);
+ return true;
+ }
+ }
+
+ // Handle format 0 languages, translating them into BCP 47.
+ const BCP47FromLanguageId target = { languageID, "" };
+ int languageIndex = SkTSearch<BCP47FromLanguageId, BCP47FromLanguageIdLess>(
+ BCP47FromLanguageID, SK_ARRAY_COUNT(BCP47FromLanguageID), target, sizeof(target));
+ if (languageIndex >= 0) {
+ record.language = BCP47FromLanguageID[languageIndex].bcp47;
+ return true;
+ }
+
+ // Unknown language, return the BCP 47 code 'und' for 'undetermined'.
+ record.language = "und";
+ return true;
+}
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.h b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
new file mode 100644
index 0000000000..07cffd7748
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_name_DEFINED
+#define SkOTTable_name_DEFINED
+
+#include "include/core/SkString.h"
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableName {
+ static const SK_OT_CHAR TAG0 = 'n';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'm';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableName>::value;
+
+ SK_OT_USHORT format;
+ static const SK_OT_USHORT format_0 = SkTEndian_SwapBE16(0);
+ /** Format 1 was added in OpenType 1.6 (April 2009). */
+ static const SK_OT_USHORT format_1 = SkTEndian_SwapBE16(1);
+
+ /** The number of name records which follow. */
+ SK_OT_USHORT count;
+
+ /** Offset in SK_OT_BYTEs to start of string storage area (from start of table). */
+ SK_OT_USHORT stringOffset;
+
+ struct Record {
+ /** The platform ID specifies how to interpret the encoding and language ID. */
+ struct PlatformID {
+ enum Value : SK_OT_USHORT {
+ Unicode = SkTEndian_SwapBE16(0),
+ Macintosh = SkTEndian_SwapBE16(1),
+ ISO = SkTEndian_SwapBE16(2), // Deprecated, use Unicode instead.
+ Windows = SkTEndian_SwapBE16(3),
+ Custom = SkTEndian_SwapBE16(4),
+ } value;
+ } platformID;
+
+ union EncodingID {
+ SK_OT_USHORT custom;
+
+ /** Always UTF-16BE. */
+ struct Unicode {
+ enum Value : SK_OT_USHORT {
+ Unicode10 = SkTEndian_SwapBE16(0),
+ Unicode11 = SkTEndian_SwapBE16(1),
+ ISO10646 = SkTEndian_SwapBE16(2), //deprecated, use Unicode11
+ Unicode20BMP = SkTEndian_SwapBE16(3),
+ Unicode20 = SkTEndian_SwapBE16(4),
+ UnicodeVariationSequences = SkTEndian_SwapBE16(5),
+ UnicodeFull = SkTEndian_SwapBE16(6),
+ } value;
+ } unicode;
+
+ /** These are Mac encodings, see http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/
+ * for their mappings to unicode.
+ * Name table strings using PlatformID::Macintosh must use Roman.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ Roman = SkTEndian_SwapBE16(0),
+ Japanese = SkTEndian_SwapBE16(1),
+ ChineseTraditional = SkTEndian_SwapBE16(2),
+ Korean = SkTEndian_SwapBE16(3),
+ Arabic = SkTEndian_SwapBE16(4),
+ Hebrew = SkTEndian_SwapBE16(5),
+ Greek = SkTEndian_SwapBE16(6),
+ Russian = SkTEndian_SwapBE16(7),
+ RSymbol = SkTEndian_SwapBE16(8),
+ Devanagari = SkTEndian_SwapBE16(9),
+ Gurmukhi = SkTEndian_SwapBE16(10),
+ Gujarati = SkTEndian_SwapBE16(11),
+ Oriya = SkTEndian_SwapBE16(12),
+ Bengali = SkTEndian_SwapBE16(13),
+ Tamil = SkTEndian_SwapBE16(14),
+ Telugu = SkTEndian_SwapBE16(15),
+ Kannada = SkTEndian_SwapBE16(16),
+ Malayalam = SkTEndian_SwapBE16(17),
+ Sinhalese = SkTEndian_SwapBE16(18),
+ Burmese = SkTEndian_SwapBE16(19),
+ Khmer = SkTEndian_SwapBE16(20),
+ Thai = SkTEndian_SwapBE16(21),
+ Laotian = SkTEndian_SwapBE16(22),
+ Georgian = SkTEndian_SwapBE16(23),
+ Armenian = SkTEndian_SwapBE16(24),
+ ChineseSimplified = SkTEndian_SwapBE16(25),
+ Tibetan = SkTEndian_SwapBE16(26),
+ Mongolian = SkTEndian_SwapBE16(27),
+ Geez = SkTEndian_SwapBE16(28),
+ Slavic = SkTEndian_SwapBE16(29),
+ Vietnamese = SkTEndian_SwapBE16(30),
+ Sindhi = SkTEndian_SwapBE16(31),
+ Uninterpreted = SkTEndian_SwapBE16(32),
+ } value;
+ } macintosh;
+
+ /** Deprecated, use Unicode instead. */
+ struct ISO {
+ enum Value : SK_OT_USHORT {
+ ASCII7 = SkTEndian_SwapBE16(0),
+ ISO10646 = SkTEndian_SwapBE16(1),
+ ISO88591 = SkTEndian_SwapBE16(2),
+ } value;
+ } iso;
+
+ /** Name table strings using PlatformID::Windows must use Symbol, UnicodeBMPUCS2, or
+ * UnicodeUCS4. Symbol and UnicodeBMPUCS2 are both UCS2-BE, UnicodeUCS4 is actually
+ * UTF-16BE.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Symbol = SkTEndian_SwapBE16(0), // UCS2-BE, but don't use this font to display it's own name.
+ UnicodeBMPUCS2 = SkTEndian_SwapBE16(1), // UCS2-BE, Windows default
+ ShiftJIS = SkTEndian_SwapBE16(2),
+ PRC = SkTEndian_SwapBE16(3),
+ Big5 = SkTEndian_SwapBE16(4),
+ Wansung = SkTEndian_SwapBE16(5),
+ Johab = SkTEndian_SwapBE16(6),
+ UnicodeUCS4 = SkTEndian_SwapBE16(10), // UTF-16BE. It means UCS4 in charmaps.
+ } value;
+ } windows;
+ } encodingID;
+
+ /** LanguageIDs <= 0x7FFF are predefined.
+ * LanguageIDs > 0x7FFF are indexes into the langTagRecord array
+ * (in format 1 name tables, see SkOTTableName::format).
+ */
+ union LanguageID {
+ /** A value greater than 0x7FFF.
+ * languageTagID - 0x8000 is an index into the langTagRecord array.
+ */
+ SK_OT_USHORT languageTagID;
+
+ /** These are known as Language Designators.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ English = SkTEndian_SwapBE16(0),
+ French = SkTEndian_SwapBE16(1),
+ German = SkTEndian_SwapBE16(2),
+ Italian = SkTEndian_SwapBE16(3),
+ Dutch = SkTEndian_SwapBE16(4),
+ Swedish = SkTEndian_SwapBE16(5),
+ Spanish = SkTEndian_SwapBE16(6),
+ Danish = SkTEndian_SwapBE16(7),
+ Portuguese = SkTEndian_SwapBE16(8),
+ Norwegian = SkTEndian_SwapBE16(9),
+ Hebrew = SkTEndian_SwapBE16(10),
+ Japanese = SkTEndian_SwapBE16(11),
+ Arabic = SkTEndian_SwapBE16(12),
+ Finnish = SkTEndian_SwapBE16(13),
+ Greek = SkTEndian_SwapBE16(14),
+ Icelandic = SkTEndian_SwapBE16(15),
+ Maltese = SkTEndian_SwapBE16(16),
+ Turkish = SkTEndian_SwapBE16(17),
+ Croatian = SkTEndian_SwapBE16(18),
+ ChineseTraditional = SkTEndian_SwapBE16(19),
+ Urdu = SkTEndian_SwapBE16(20),
+ Hindi = SkTEndian_SwapBE16(21),
+ Thai = SkTEndian_SwapBE16(22),
+ Korean = SkTEndian_SwapBE16(23),
+ Lithuanian = SkTEndian_SwapBE16(24),
+ Polish = SkTEndian_SwapBE16(25),
+ Hungarian = SkTEndian_SwapBE16(26),
+ Estonian = SkTEndian_SwapBE16(27),
+ Latvian = SkTEndian_SwapBE16(28),
+ Sami = SkTEndian_SwapBE16(29),
+ Faroese = SkTEndian_SwapBE16(30),
+ Farsi_Persian = SkTEndian_SwapBE16(31),
+ Russian = SkTEndian_SwapBE16(32),
+ ChineseSimplified = SkTEndian_SwapBE16(33),
+ Flemish = SkTEndian_SwapBE16(34),
+ IrishGaelic = SkTEndian_SwapBE16(35),
+ Albanian = SkTEndian_SwapBE16(36),
+ Romanian = SkTEndian_SwapBE16(37),
+ Czech = SkTEndian_SwapBE16(38),
+ Slovak = SkTEndian_SwapBE16(39),
+ Slovenian = SkTEndian_SwapBE16(40),
+ Yiddish = SkTEndian_SwapBE16(41),
+ Serbian = SkTEndian_SwapBE16(42),
+ Macedonian = SkTEndian_SwapBE16(43),
+ Bulgarian = SkTEndian_SwapBE16(44),
+ Ukrainian = SkTEndian_SwapBE16(45),
+ Byelorussian = SkTEndian_SwapBE16(46),
+ Uzbek = SkTEndian_SwapBE16(47),
+ Kazakh = SkTEndian_SwapBE16(48),
+ AzerbaijaniCyrillic = SkTEndian_SwapBE16(49),
+ AzerbaijaniArabic = SkTEndian_SwapBE16(50),
+ Armenian = SkTEndian_SwapBE16(51),
+ Georgian = SkTEndian_SwapBE16(52),
+ Moldavian = SkTEndian_SwapBE16(53),
+ Kirghiz = SkTEndian_SwapBE16(54),
+ Tajiki = SkTEndian_SwapBE16(55),
+ Turkmen = SkTEndian_SwapBE16(56),
+ MongolianTraditional = SkTEndian_SwapBE16(57),
+ MongolianCyrillic = SkTEndian_SwapBE16(58),
+ Pashto = SkTEndian_SwapBE16(59),
+ Kurdish = SkTEndian_SwapBE16(60),
+ Kashmiri = SkTEndian_SwapBE16(61),
+ Sindhi = SkTEndian_SwapBE16(62),
+ Tibetan = SkTEndian_SwapBE16(63),
+ Nepali = SkTEndian_SwapBE16(64),
+ Sanskrit = SkTEndian_SwapBE16(65),
+ Marathi = SkTEndian_SwapBE16(66),
+ Bengali = SkTEndian_SwapBE16(67),
+ Assamese = SkTEndian_SwapBE16(68),
+ Gujarati = SkTEndian_SwapBE16(69),
+ Punjabi = SkTEndian_SwapBE16(70),
+ Oriya = SkTEndian_SwapBE16(71),
+ Malayalam = SkTEndian_SwapBE16(72),
+ Kannada = SkTEndian_SwapBE16(73),
+ Tamil = SkTEndian_SwapBE16(74),
+ Telugu = SkTEndian_SwapBE16(75),
+ Sinhalese = SkTEndian_SwapBE16(76),
+ Burmese = SkTEndian_SwapBE16(77),
+ Khmer = SkTEndian_SwapBE16(78),
+ Lao = SkTEndian_SwapBE16(79),
+ Vietnamese = SkTEndian_SwapBE16(80),
+ Indonesian = SkTEndian_SwapBE16(81),
+ Tagalong = SkTEndian_SwapBE16(82),
+ MalayRoman = SkTEndian_SwapBE16(83),
+ MalayArabic = SkTEndian_SwapBE16(84),
+ Amharic = SkTEndian_SwapBE16(85),
+ Tigrinya = SkTEndian_SwapBE16(86),
+ Galla = SkTEndian_SwapBE16(87),
+ Somali = SkTEndian_SwapBE16(88),
+ Swahili = SkTEndian_SwapBE16(89),
+ Kinyarwanda_Ruanda = SkTEndian_SwapBE16(90),
+ Rundi = SkTEndian_SwapBE16(91),
+ Nyanja_Chewa = SkTEndian_SwapBE16(92),
+ Malagasy = SkTEndian_SwapBE16(93),
+ Esperanto = SkTEndian_SwapBE16(94),
+ Welsh = SkTEndian_SwapBE16(128),
+ Basque = SkTEndian_SwapBE16(129),
+ Catalan = SkTEndian_SwapBE16(130),
+ Latin = SkTEndian_SwapBE16(131),
+ Quenchua = SkTEndian_SwapBE16(132),
+ Guarani = SkTEndian_SwapBE16(133),
+ Aymara = SkTEndian_SwapBE16(134),
+ Tatar = SkTEndian_SwapBE16(135),
+ Uighur = SkTEndian_SwapBE16(136),
+ Dzongkha = SkTEndian_SwapBE16(137),
+ JavaneseRoman = SkTEndian_SwapBE16(138),
+ SundaneseRoman = SkTEndian_SwapBE16(139),
+ Galician = SkTEndian_SwapBE16(140),
+ Afrikaans = SkTEndian_SwapBE16(141),
+ Breton = SkTEndian_SwapBE16(142),
+ Inuktitut = SkTEndian_SwapBE16(143),
+ ScottishGaelic = SkTEndian_SwapBE16(144),
+ ManxGaelic = SkTEndian_SwapBE16(145),
+ IrishGaelicWithLenition = SkTEndian_SwapBE16(146),
+ Tongan = SkTEndian_SwapBE16(147),
+ GreekPolytonic = SkTEndian_SwapBE16(148),
+ Greenlandic = SkTEndian_SwapBE16(149),
+ AzerbaijaniRoman = SkTEndian_SwapBE16(150),
+ } value;
+ } macintosh;
+
+ /** These are known as LCIDs.
+ * On Windows the current set can be had from EnumSystemLocalesEx and LocaleNameToLCID.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Afrikaans_SouthAfrica = SkTEndian_SwapBE16(0x0436),
+ Albanian_Albania = SkTEndian_SwapBE16(0x041C),
+ Alsatian_France = SkTEndian_SwapBE16(0x0484),
+ Amharic_Ethiopia = SkTEndian_SwapBE16(0x045E),
+ Arabic_Algeria = SkTEndian_SwapBE16(0x1401),
+ Arabic_Bahrain = SkTEndian_SwapBE16(0x3C01),
+ Arabic_Egypt = SkTEndian_SwapBE16(0x0C01),
+ Arabic_Iraq = SkTEndian_SwapBE16(0x0801),
+ Arabic_Jordan = SkTEndian_SwapBE16(0x2C01),
+ Arabic_Kuwait = SkTEndian_SwapBE16(0x3401),
+ Arabic_Lebanon = SkTEndian_SwapBE16(0x3001),
+ Arabic_Libya = SkTEndian_SwapBE16(0x1001),
+ Arabic_Morocco = SkTEndian_SwapBE16(0x1801),
+ Arabic_Oman = SkTEndian_SwapBE16(0x2001),
+ Arabic_Qatar = SkTEndian_SwapBE16(0x4001),
+ Arabic_SaudiArabia = SkTEndian_SwapBE16(0x0401),
+ Arabic_Syria = SkTEndian_SwapBE16(0x2801),
+ Arabic_Tunisia = SkTEndian_SwapBE16(0x1C01),
+ Arabic_UAE = SkTEndian_SwapBE16(0x3801),
+ Arabic_Yemen = SkTEndian_SwapBE16(0x2401),
+ Armenian_Armenia = SkTEndian_SwapBE16(0x042B),
+ Assamese_India = SkTEndian_SwapBE16(0x044D),
+ AzeriCyrillic_Azerbaijan = SkTEndian_SwapBE16(0x082C),
+ AzeriLatin_Azerbaijan = SkTEndian_SwapBE16(0x042C),
+ Bashkir_Russia = SkTEndian_SwapBE16(0x046D),
+ Basque_Basque = SkTEndian_SwapBE16(0x042D),
+ Belarusian_Belarus = SkTEndian_SwapBE16(0x0423),
+ Bengali_Bangladesh = SkTEndian_SwapBE16(0x0845),
+ Bengali_India = SkTEndian_SwapBE16(0x0445),
+ BosnianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x201A),
+ BosnianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x141A),
+ Breton_France = SkTEndian_SwapBE16(0x047E),
+ Bulgarian_Bulgaria = SkTEndian_SwapBE16(0x0402),
+ Catalan_Catalan = SkTEndian_SwapBE16(0x0403),
+ Chinese_HongKongSAR = SkTEndian_SwapBE16(0x0C04),
+ Chinese_MacaoSAR = SkTEndian_SwapBE16(0x1404),
+ Chinese_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0804),
+ Chinese_Singapore = SkTEndian_SwapBE16(0x1004),
+ Chinese_Taiwan = SkTEndian_SwapBE16(0x0404),
+ Corsican_France = SkTEndian_SwapBE16(0x0483),
+ Croatian_Croatia = SkTEndian_SwapBE16(0x041A),
+ CroatianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x101A),
+ Czech_CzechRepublic = SkTEndian_SwapBE16(0x0405),
+ Danish_Denmark = SkTEndian_SwapBE16(0x0406),
+ Dari_Afghanistan = SkTEndian_SwapBE16(0x048C),
+ Divehi_Maldives = SkTEndian_SwapBE16(0x0465),
+ Dutch_Belgium = SkTEndian_SwapBE16(0x0813),
+ Dutch_Netherlands = SkTEndian_SwapBE16(0x0413),
+ English_Australia = SkTEndian_SwapBE16(0x0C09),
+ English_Belize = SkTEndian_SwapBE16(0x2809),
+ English_Canada = SkTEndian_SwapBE16(0x1009),
+ English_Caribbean = SkTEndian_SwapBE16(0x2409),
+ English_India = SkTEndian_SwapBE16(0x4009),
+ English_Ireland = SkTEndian_SwapBE16(0x1809),
+ English_Jamaica = SkTEndian_SwapBE16(0x2009),
+ English_Malaysia = SkTEndian_SwapBE16(0x4409),
+ English_NewZealand = SkTEndian_SwapBE16(0x1409),
+ English_RepublicOfThePhilippines = SkTEndian_SwapBE16(0x3409),
+ English_Singapore = SkTEndian_SwapBE16(0x4809),
+ English_SouthAfrica = SkTEndian_SwapBE16(0x1C09),
+ English_TrinidadAndTobago = SkTEndian_SwapBE16(0x2C09),
+ English_UnitedKingdom = SkTEndian_SwapBE16(0x0809),
+ English_UnitedStates = SkTEndian_SwapBE16(0x0409),
+ English_Zimbabwe = SkTEndian_SwapBE16(0x3009),
+ Estonian_Estonia = SkTEndian_SwapBE16(0x0425),
+ Faroese_FaroeIslands = SkTEndian_SwapBE16(0x0438),
+ Filipino_Philippines = SkTEndian_SwapBE16(0x0464),
+ Finnish_Finland = SkTEndian_SwapBE16(0x040B),
+ French_Belgium = SkTEndian_SwapBE16(0x080C),
+ French_Canada = SkTEndian_SwapBE16(0x0C0C),
+ French_France = SkTEndian_SwapBE16(0x040C),
+ French_Luxembourg = SkTEndian_SwapBE16(0x140c),
+ French_PrincipalityOfMonoco = SkTEndian_SwapBE16(0x180C),
+ French_Switzerland = SkTEndian_SwapBE16(0x100C),
+ Frisian_Netherlands = SkTEndian_SwapBE16(0x0462),
+ Galician_Galician = SkTEndian_SwapBE16(0x0456),
+ Georgian_Georgia = SkTEndian_SwapBE16(0x0437),
+ German_Austria = SkTEndian_SwapBE16(0x0C07),
+ German_Germany = SkTEndian_SwapBE16(0x0407),
+ German_Liechtenstein = SkTEndian_SwapBE16(0x1407),
+ German_Luxembourg = SkTEndian_SwapBE16(0x1007),
+ German_Switzerland = SkTEndian_SwapBE16(0x0807),
+ Greek_Greece = SkTEndian_SwapBE16(0x0408),
+ Greenlandic_Greenland = SkTEndian_SwapBE16(0x046F),
+ Gujarati_India = SkTEndian_SwapBE16(0x0447),
+ HausaLatin_Nigeria = SkTEndian_SwapBE16(0x0468),
+ Hebrew_Israel = SkTEndian_SwapBE16(0x040D),
+ Hindi_India = SkTEndian_SwapBE16(0x0439),
+ Hungarian_Hungary = SkTEndian_SwapBE16(0x040E),
+ Icelandic_Iceland = SkTEndian_SwapBE16(0x040F),
+ Igbo_Nigeria = SkTEndian_SwapBE16(0x0470),
+ Indonesian_Indonesia = SkTEndian_SwapBE16(0x0421),
+ Inuktitut_Canada = SkTEndian_SwapBE16(0x045D),
+ InuktitutLatin_Canada = SkTEndian_SwapBE16(0x085D),
+ Irish_Ireland = SkTEndian_SwapBE16(0x083C),
+ isiXhosa_SouthAfrica = SkTEndian_SwapBE16(0x0434),
+ isiZulu_SouthAfrica = SkTEndian_SwapBE16(0x0435),
+ Italian_Italy = SkTEndian_SwapBE16(0x0410),
+ Italian_Switzerland = SkTEndian_SwapBE16(0x0810),
+ Japanese_Japan = SkTEndian_SwapBE16(0x0411),
+ Kannada_India = SkTEndian_SwapBE16(0x044B),
+ Kazakh_Kazakhstan = SkTEndian_SwapBE16(0x043F),
+ Khmer_Cambodia = SkTEndian_SwapBE16(0x0453),
+ Kiche_Guatemala = SkTEndian_SwapBE16(0x0486),
+ Kinyarwanda_Rwanda = SkTEndian_SwapBE16(0x0487),
+ Kiswahili_Kenya = SkTEndian_SwapBE16(0x0441),
+ Konkani_India = SkTEndian_SwapBE16(0x0457),
+ Korean_Korea = SkTEndian_SwapBE16(0x0412),
+ Kyrgyz_Kyrgyzstan = SkTEndian_SwapBE16(0x0440),
+ Lao_LaoPDR = SkTEndian_SwapBE16(0x0454),
+ Latvian_Latvia = SkTEndian_SwapBE16(0x0426),
+ Lithuanian_Lithuania = SkTEndian_SwapBE16(0x0427),
+ LowerSorbian_Germany = SkTEndian_SwapBE16(0x082E),
+ Luxembourgish_Luxembourg = SkTEndian_SwapBE16(0x046E),
+ MacedonianFYROM_FormerYugoslavRepublicOfMacedonia = SkTEndian_SwapBE16(0x042F),
+ Malay_BruneiDarussalam = SkTEndian_SwapBE16(0x083E),
+ Malay_Malaysia = SkTEndian_SwapBE16(0x043E),
+ Malayalam_India = SkTEndian_SwapBE16(0x044C),
+ Maltese_Malta = SkTEndian_SwapBE16(0x043A),
+ Maori_NewZealand = SkTEndian_SwapBE16(0x0481),
+ Mapudungun_Chile = SkTEndian_SwapBE16(0x047A),
+ Marathi_India = SkTEndian_SwapBE16(0x044E),
+ Mohawk_Mohawk = SkTEndian_SwapBE16(0x047C),
+ MongolianCyrillic_Mongolia = SkTEndian_SwapBE16(0x0450),
+ MongolianTraditional_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0850),
+ Nepali_Nepal = SkTEndian_SwapBE16(0x0461),
+ NorwegianBokmal_Norway = SkTEndian_SwapBE16(0x0414),
+ NorwegianNynorsk_Norway = SkTEndian_SwapBE16(0x0814),
+ Occitan_France = SkTEndian_SwapBE16(0x0482),
+ Odia_India = SkTEndian_SwapBE16(0x0448),
+ Pashto_Afghanistan = SkTEndian_SwapBE16(0x0463),
+ Polish_Poland = SkTEndian_SwapBE16(0x0415),
+ Portuguese_Brazil = SkTEndian_SwapBE16(0x0416),
+ Portuguese_Portugal = SkTEndian_SwapBE16(0x0816),
+ Punjabi_India = SkTEndian_SwapBE16(0x0446),
+ Quechua_Bolivia = SkTEndian_SwapBE16(0x046B),
+ Quechua_Ecuador = SkTEndian_SwapBE16(0x086B),
+ Quechua_Peru = SkTEndian_SwapBE16(0x0C6B),
+ Romanian_Romania = SkTEndian_SwapBE16(0x0418),
+ Romansh_Switzerland = SkTEndian_SwapBE16(0x0417),
+ Russian_Russia = SkTEndian_SwapBE16(0x0419),
+ SamiInari_Finland = SkTEndian_SwapBE16(0x243B),
+ SamiLule_Norway = SkTEndian_SwapBE16(0x103B),
+ SamiLule_Sweden = SkTEndian_SwapBE16(0x143B),
+ SamiNorthern_Finland = SkTEndian_SwapBE16(0x0C3B),
+ SamiNorthern_Norway = SkTEndian_SwapBE16(0x043B),
+ SamiNorthern_Sweden = SkTEndian_SwapBE16(0x083B),
+ SamiSkolt_Finland = SkTEndian_SwapBE16(0x203B),
+ SamiSouthern_Norway = SkTEndian_SwapBE16(0x183B),
+ SamiSouthern_Sweden = SkTEndian_SwapBE16(0x1C3B),
+ Sanskrit_India = SkTEndian_SwapBE16(0x044F),
+ SerbianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x1C1A),
+ SerbianCyrillic_Serbia = SkTEndian_SwapBE16(0x0C1A),
+ SerbianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x181A),
+ SerbianLatin_Serbia = SkTEndian_SwapBE16(0x081A),
+ SesothoSaLeboa_SouthAfrica = SkTEndian_SwapBE16(0x046C),
+ Setswana_SouthAfrica = SkTEndian_SwapBE16(0x0432),
+ Sinhala_SriLanka = SkTEndian_SwapBE16(0x045B),
+ Slovak_Slovakia = SkTEndian_SwapBE16(0x041B),
+ Slovenian_Slovenia = SkTEndian_SwapBE16(0x0424),
+ Spanish_Argentina = SkTEndian_SwapBE16(0x2C0A),
+ Spanish_Bolivia = SkTEndian_SwapBE16(0x400A),
+ Spanish_Chile = SkTEndian_SwapBE16(0x340A),
+ Spanish_Colombia = SkTEndian_SwapBE16(0x240A),
+ Spanish_CostaRica = SkTEndian_SwapBE16(0x140A),
+ Spanish_DominicanRepublic = SkTEndian_SwapBE16(0x1C0A),
+ Spanish_Ecuador = SkTEndian_SwapBE16(0x300A),
+ Spanish_ElSalvador = SkTEndian_SwapBE16(0x440A),
+ Spanish_Guatemala = SkTEndian_SwapBE16(0x100A),
+ Spanish_Honduras = SkTEndian_SwapBE16(0x480A),
+ Spanish_Mexico = SkTEndian_SwapBE16(0x080A),
+ Spanish_Nicaragua = SkTEndian_SwapBE16(0x4C0A),
+ Spanish_Panama = SkTEndian_SwapBE16(0x180A),
+ Spanish_Paraguay = SkTEndian_SwapBE16(0x3C0A),
+ Spanish_Peru = SkTEndian_SwapBE16(0x280A),
+ Spanish_PuertoRico = SkTEndian_SwapBE16(0x500A),
+ SpanishModernSort_Spain = SkTEndian_SwapBE16(0x0C0A),
+ SpanishTraditionalSort_Spain = SkTEndian_SwapBE16(0x040A),
+ Spanish_UnitedStates = SkTEndian_SwapBE16(0x540A),
+ Spanish_Uruguay = SkTEndian_SwapBE16(0x380A),
+ Spanish_Venezuela = SkTEndian_SwapBE16(0x200A),
+ Sweden_Finland = SkTEndian_SwapBE16(0x081D),
+ Swedish_Sweden = SkTEndian_SwapBE16(0x041D),
+ Syriac_Syria = SkTEndian_SwapBE16(0x045A),
+ TajikCyrillic_Tajikistan = SkTEndian_SwapBE16(0x0428),
+ TamazightLatin_Algeria = SkTEndian_SwapBE16(0x085F),
+ Tamil_India = SkTEndian_SwapBE16(0x0449),
+ Tatar_Russia = SkTEndian_SwapBE16(0x0444),
+ Telugu_India = SkTEndian_SwapBE16(0x044A),
+ Thai_Thailand = SkTEndian_SwapBE16(0x041E),
+ Tibetan_PRC = SkTEndian_SwapBE16(0x0451),
+ Turkish_Turkey = SkTEndian_SwapBE16(0x041F),
+ Turkmen_Turkmenistan = SkTEndian_SwapBE16(0x0442),
+ Uighur_PRC = SkTEndian_SwapBE16(0x0480),
+ Ukrainian_Ukraine = SkTEndian_SwapBE16(0x0422),
+ UpperSorbian_Germany = SkTEndian_SwapBE16(0x042E),
+ Urdu_IslamicRepublicOfPakistan = SkTEndian_SwapBE16(0x0420),
+ UzbekCyrillic_Uzbekistan = SkTEndian_SwapBE16(0x0843),
+ UzbekLatin_Uzbekistan = SkTEndian_SwapBE16(0x0443),
+ Vietnamese_Vietnam = SkTEndian_SwapBE16(0x042A),
+ Welsh_UnitedKingdom = SkTEndian_SwapBE16(0x0452),
+ Wolof_Senegal = SkTEndian_SwapBE16(0x0488),
+ Yakut_Russia = SkTEndian_SwapBE16(0x0485),
+ Yi_PRC = SkTEndian_SwapBE16(0x0478),
+ Yoruba_Nigeria = SkTEndian_SwapBE16(0x046A),
+ } value;
+ } windows;
+ } languageID;
+
+ /** NameIDs <= 0xFF are predefined. Those > 0xFF are font specific. */
+ union NameID {
+ /** A font specific name id which should be greater than 0xFF. */
+ SK_OT_USHORT fontSpecific;
+ struct Predefined {
+ enum Value : SK_OT_USHORT {
+ CopyrightNotice = SkTEndian_SwapBE16(0),
+ FontFamilyName = SkTEndian_SwapBE16(1),
+ FontSubfamilyName = SkTEndian_SwapBE16(2),
+ UniqueFontIdentifier = SkTEndian_SwapBE16(3),
+ FullFontName = SkTEndian_SwapBE16(4),
+ VersionString = SkTEndian_SwapBE16(5), //Version <number>.<number>
+ PostscriptName = SkTEndian_SwapBE16(6), //See spec for constraints.
+ Trademark = SkTEndian_SwapBE16(7),
+ ManufacturerName = SkTEndian_SwapBE16(8),
+ Designer = SkTEndian_SwapBE16(9),
+ Description = SkTEndian_SwapBE16(10),
+ URLVendor = SkTEndian_SwapBE16(11),
+ URLDesigner = SkTEndian_SwapBE16(12),
+ LicenseDescription = SkTEndian_SwapBE16(13),
+ LicenseInfoURL = SkTEndian_SwapBE16(14),
+ PreferredFamily = SkTEndian_SwapBE16(16),
+ PreferredSubfamily = SkTEndian_SwapBE16(17),
+ CompatibleFullName = SkTEndian_SwapBE16(18),
+ SampleText = SkTEndian_SwapBE16(19),
+ PostscriptCIDFindfontName = SkTEndian_SwapBE16(20),
+ WWSFamilyName = SkTEndian_SwapBE16(21),
+ WWSSubfamilyName = SkTEndian_SwapBE16(22),
+ } value;
+ } predefined;
+ } nameID;
+
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //nameRecord[count];
+
+ struct Format1Ext {
+ /** The number of languageTagRecords which follow. */
+ SK_OT_USHORT langTagCount;
+
+ /** The encoding of a langTagRecord string is always UTF-16BE.
+ * The content should follow IETF specification BCP 47.
+ */
+ struct LangTagRecord {
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //langTagRecord[langTagCount]
+ }; //format1ext (if format == format_1)
+
+ class Iterator {
+ public:
+ Iterator(const uint8_t* nameTable, size_t size)
+ : fNameTable(nameTable), fNameTableSize(size), fIndex(0), fType(-1) { }
+ Iterator(const uint8_t* nameTable, size_t size, SK_OT_USHORT type)
+ : fNameTable(nameTable), fNameTableSize(size), fIndex(0), fType(type)
+ { }
+
+ void reset(SK_OT_USHORT type) {
+ fIndex = 0;
+ fType = type;
+ }
+
+ struct Record {
+ SkString name;
+ SkString language;
+ SK_OT_USHORT type;
+ };
+ bool next(Record&);
+
+ private:
+ const uint8_t* fNameTable;
+ const size_t fNameTableSize;
+ size_t fIndex;
+ int fType;
+ };
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableName) == 6, "sizeof_SkOTTableName_not_6");
+static_assert(sizeof(SkOTTableName::Format1Ext) == 2, "sizeof_SkOTTableNameF1_not_2");
+static_assert(sizeof(SkOTTableName::Format1Ext::LangTagRecord) == 4, "sizeof_SkOTTableNameLangTagRecord_not_4");
+static_assert(sizeof(SkOTTableName::Record) == 12, "sizeof_SkOTTableNameRecord_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_post.h b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
new file mode 100644
index 0000000000..943bffb9f0
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_post_DEFINED
+#define SkOTTable_post_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTablePostScript {
+ static const SK_OT_CHAR TAG0 = 'p';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 't';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTablePostScript>::value;
+
+ struct Format {
+ enum Value : SK_OT_Fixed {
+ version1 = SkTEndian_SwapBE32(0x00010000),
+ version2 = SkTEndian_SwapBE32(0x00020000),
+ version2_5 = SkTEndian_SwapBE32(0x00025000),
+ version3 = SkTEndian_SwapBE32(0x00030000),
+ version4 = SkTEndian_SwapBE32(0x00040000),
+ };
+ SK_OT_Fixed value;
+ } format;
+ SK_OT_Fixed italicAngle;
+ SK_OT_FWORD underlinePosition;
+ SK_OT_FWORD underlineThickness;
+ SK_OT_ULONG isFixedPitch;
+ SK_OT_ULONG minMemType42;
+ SK_OT_ULONG maxMemType42;
+ SK_OT_ULONG minMemType1;
+ SK_OT_ULONG maxMemType1;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTablePostScript, maxMemType1) == 28, "SkOTTablePostScript_maxMemType1_not_at_28");
+static_assert(sizeof(SkOTTablePostScript) == 32, "sizeof_SkOTTablePostScript_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.cpp b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
new file mode 100644
index 0000000000..061ceb069e
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sfnt/SkOTUtils.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_name.h"
+#include "src/sfnt/SkSFNTHeader.h"
+
+extern const uint8_t SK_OT_GlyphData_NoOutline[] = {
+ 0x0,0x0, //SkOTTableGlyphData::numberOfContours
+ 0x0,0x0, //SkOTTableGlyphData::xMin
+ 0x0,0x0, //SkOTTableGlyphData::yMin
+ 0x0,0x0, //SkOTTableGlyphData::xMax
+ 0x0,0x0, //SkOTTableGlyphData::yMax
+
+ 0x0,0x0, //SkOTTableGlyphDataInstructions::length
+};
+
+uint32_t SkOTUtils::CalcTableChecksum(SK_OT_ULONG *data, size_t length) {
+ uint32_t sum = 0;
+ SK_OT_ULONG *dataEnd = data + ((length + 3) & ~3) / sizeof(SK_OT_ULONG);
+ for (; data < dataEnd; ++data) {
+ sum += SkEndian_SwapBE32(*data);
+ }
+ return sum;
+}
+
+SkData* SkOTUtils::RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen) {
+
+ // Get the sfnt header.
+ SkSFNTHeader sfntHeader;
+ if (fontData->read(&sfntHeader, sizeof(sfntHeader)) < sizeof(sfntHeader)) {
+ return nullptr;
+ }
+
+ // Find the existing 'name' table.
+ int tableIndex;
+ SkSFNTHeader::TableDirectoryEntry tableEntry;
+ int numTables = SkEndian_SwapBE16(sfntHeader.numTables);
+ for (tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (fontData->read(&tableEntry, sizeof(tableEntry)) < sizeof(tableEntry)) {
+ return nullptr;
+ }
+ if (SkOTTableName::TAG == tableEntry.tag) {
+ break;
+ }
+ }
+ if (tableIndex == numTables) {
+ return nullptr;
+ }
+
+ if (!fontData->rewind()) {
+ return nullptr;
+ }
+
+ // The required 'name' record types: Family, Style, Unique, Full and PostScript.
+ static constexpr std::array<SkOTTableName::Record::NameID::Predefined::Value, 5> names{{
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::FontSubfamilyName,
+ SkOTTableName::Record::NameID::Predefined::UniqueFontIdentifier,
+ SkOTTableName::Record::NameID::Predefined::FullFontName,
+ SkOTTableName::Record::NameID::Predefined::PostscriptName,
+ }};
+
+ // GDI will not use a Symbol cmap table if there is no Symbol encoded name.
+ static constexpr std::array<SkOTTableName::Record::EncodingID::Windows::Value, 2> encodings{{
+ SkOTTableName::Record::EncodingID::Windows::Symbol,
+ SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2,
+ }};
+
+ // Copy the data, leaving out the old name table.
+ // In theory, we could also remove the DSIG table if it exists.
+ size_t nameTableLogicalSize = sizeof(SkOTTableName)
+ + (encodings.size() * names.size() * sizeof(SkOTTableName::Record))
+ + (fontNameLen * sizeof(SK_OT_USHORT));
+ size_t nameTablePhysicalSize = (nameTableLogicalSize + 3) & ~3; // Rounded up to a multiple of 4.
+
+ size_t oldNameTablePhysicalSize = (SkEndian_SwapBE32(tableEntry.logicalLength) + 3) & ~3; // Rounded up to a multiple of 4.
+ size_t oldNameTableOffset = SkEndian_SwapBE32(tableEntry.offset);
+
+ //originalDataSize is the size of the original data without the name table.
+ size_t originalDataSize = fontData->getLength() - oldNameTablePhysicalSize;
+ size_t newDataSize = originalDataSize + nameTablePhysicalSize;
+
+ auto rewrittenFontData = SkData::MakeUninitialized(newDataSize);
+ SK_OT_BYTE* data = static_cast<SK_OT_BYTE*>(rewrittenFontData->writable_data());
+
+ if (fontData->read(data, oldNameTableOffset) < oldNameTableOffset) {
+ return nullptr;
+ }
+ if (fontData->skip(oldNameTablePhysicalSize) < oldNameTablePhysicalSize) {
+ return nullptr;
+ }
+ if (fontData->read(data + oldNameTableOffset, originalDataSize - oldNameTableOffset) < originalDataSize - oldNameTableOffset) {
+ return nullptr;
+ }
+
+ //Fix up the offsets of the directory entries after the old 'name' table entry.
+ SkSFNTHeader::TableDirectoryEntry* currentEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader));
+ SkSFNTHeader::TableDirectoryEntry* endEntry = currentEntry + numTables;
+ SkSFNTHeader::TableDirectoryEntry* headTableEntry = nullptr;
+ for (; currentEntry < endEntry; ++currentEntry) {
+ uint32_t oldOffset = SkEndian_SwapBE32(currentEntry->offset);
+ if (oldOffset > oldNameTableOffset) {
+ currentEntry->offset = SkEndian_SwapBE32(SkToU32(oldOffset - oldNameTablePhysicalSize));
+ }
+
+ if (SkOTTableHead::TAG == currentEntry->tag) {
+ headTableEntry = currentEntry;
+ }
+ }
+
+ // Make the table directory entry point to the new 'name' table.
+ SkSFNTHeader::TableDirectoryEntry* nameTableEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader)) + tableIndex;
+ nameTableEntry->logicalLength = SkEndian_SwapBE32(SkToU32(nameTableLogicalSize));
+ nameTableEntry->offset = SkEndian_SwapBE32(SkToU32(originalDataSize));
+
+ // Write the new 'name' table after the original font data.
+ SkOTTableName* nameTable = reinterpret_cast<SkOTTableName*>(data + originalDataSize);
+ unsigned short stringOffset = sizeof(SkOTTableName) + (encodings.size() * names.size() * sizeof(SkOTTableName::Record));
+ nameTable->format = SkOTTableName::format_0;
+ nameTable->count = SkEndian_SwapBE16(encodings.size() * names.size());
+ nameTable->stringOffset = SkEndian_SwapBE16(stringOffset);
+
+ SkOTTableName::Record* nameRecord = reinterpret_cast<SkOTTableName::Record*>(data + originalDataSize + sizeof(SkOTTableName));
+ for (const auto& encoding : encodings) {
+ for (const auto& name : names) {
+ nameRecord->platformID.value = SkOTTableName::Record::PlatformID::Windows;
+ nameRecord->encodingID.windows.value = encoding;
+ nameRecord->languageID.windows.value = SkOTTableName::Record::LanguageID::Windows::English_UnitedStates;
+ nameRecord->nameID.predefined.value = name;
+ nameRecord->offset = SkEndian_SwapBE16(0);
+ nameRecord->length = SkEndian_SwapBE16(SkToU16(fontNameLen * sizeof(SK_OT_USHORT)));
+ ++nameRecord;
+ }
+ }
+
+ SK_OT_USHORT* nameString = reinterpret_cast<SK_OT_USHORT*>(data + originalDataSize + stringOffset);
+ for (int i = 0; i < fontNameLen; ++i) {
+ nameString[i] = SkEndian_SwapBE16(fontName[i]);
+ }
+
+ unsigned char* logical = data + originalDataSize + nameTableLogicalSize;
+ unsigned char* physical = data + originalDataSize + nameTablePhysicalSize;
+ for (; logical < physical; ++logical) {
+ *logical = 0;
+ }
+
+ // Update the table checksum in the directory entry.
+ nameTableEntry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(nameTable), nameTableLogicalSize));
+
+ // Update the checksum adjustment in the head table.
+ if (headTableEntry) {
+ size_t headTableOffset = SkEndian_SwapBE32(headTableEntry->offset);
+ if (headTableOffset + sizeof(SkOTTableHead) < originalDataSize) {
+ SkOTTableHead* headTable = reinterpret_cast<SkOTTableHead*>(data + headTableOffset);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(0);
+ uint32_t unadjustedFontChecksum = SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(data), originalDataSize + nameTablePhysicalSize);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(SkOTTableHead::fontChecksum - unadjustedFontChecksum);
+ }
+ }
+
+ return rewrittenFontData.release();
+}
+
+sk_sp<SkOTUtils::LocalizedStrings_NameTable>
+SkOTUtils::LocalizedStrings_NameTable::Make(const SkTypeface& typeface,
+ SK_OT_USHORT types[],
+ int typesCount)
+{
+ static const SkFontTableTag nameTag = SkSetFourByteTag('n','a','m','e');
+ size_t nameTableSize = typeface.getTableSize(nameTag);
+ if (0 == nameTableSize) {
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> nameTableData(new uint8_t[nameTableSize]);
+ size_t copied = typeface.getTableData(nameTag, 0, nameTableSize, nameTableData.get());
+ if (copied != nameTableSize) {
+ return nullptr;
+ }
+
+ return sk_sp<SkOTUtils::LocalizedStrings_NameTable>(
+ new SkOTUtils::LocalizedStrings_NameTable(std::move(nameTableData), nameTableSize,
+ types, typesCount));
+}
+
+sk_sp<SkOTUtils::LocalizedStrings_NameTable>
+SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(const SkTypeface& typeface) {
+ return Make(typeface,
+ SkOTUtils::LocalizedStrings_NameTable::familyNameTypes,
+ SK_ARRAY_COUNT(SkOTUtils::LocalizedStrings_NameTable::familyNameTypes));
+}
+
+bool SkOTUtils::LocalizedStrings_NameTable::next(SkTypeface::LocalizedString* localizedString) {
+ do {
+ SkOTTableName::Iterator::Record record;
+ if (fFamilyNameIter.next(record)) {
+ localizedString->fString = record.name;
+ localizedString->fLanguage = record.language;
+ return true;
+ }
+ if (fTypesCount == fTypesIndex + 1) {
+ return false;
+ }
+ ++fTypesIndex;
+ fFamilyNameIter.reset(fTypes[fTypesIndex]);
+ } while (true);
+}
+
+SK_OT_USHORT SkOTUtils::LocalizedStrings_NameTable::familyNameTypes[3] = {
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::PreferredFamily,
+ SkOTTableName::Record::NameID::Predefined::WWSFamilyName,
+};
+
+void SkOTUtils::SetAdvancedTypefaceFlags(SkOTTableOS2_V4::Type fsType,
+ SkAdvancedTypefaceMetrics* info) {
+ SkASSERT(info);
+ // The logic should be identical to SkTypeface_FreeType::onGetAdvancedMetrics().
+ if (fsType.raw.value != 0) {
+ if (SkToBool(fsType.field.Restricted) || SkToBool(fsType.field.Bitmap)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (SkToBool(fsType.field.NoSubsetting)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.h b/gfx/skia/skia/src/sfnt/SkOTUtils.h
new file mode 100644
index 0000000000..fb2732385c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTUtils_DEFINED
+#define SkOTUtils_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_OS_2_V4.h"
+#include "src/sfnt/SkOTTable_name.h"
+
+class SkData;
+class SkStream;
+struct SkAdvancedTypefaceMetrics;
+
+struct SkOTUtils {
+ /**
+ * Calculates the OpenType checksum for data.
+ */
+ static uint32_t CalcTableChecksum(SK_OT_ULONG *data, size_t length);
+
+ /**
+ * Renames an sfnt font. On failure (invalid data or not an sfnt font)
+ * returns nullptr.
+ *
+ * Essentially, this removes any existing 'name' table and replaces it
+ * with a new one in which FontFamilyName, FontSubfamilyName,
+ * UniqueFontIdentifier, FullFontName, and PostscriptName are fontName.
+ *
+ * The new 'name' table records will be written with the Windows,
+ * UnicodeBMPUCS2, and English_UnitedStates settings.
+ *
+ * fontName and fontNameLen must be specified in terms of ASCII chars.
+ *
+ * Does not affect fontData's ownership.
+ */
+ static SkData* RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen);
+
+ /** An implementation of LocalizedStrings which obtains it's data from a 'name' table. */
+ class LocalizedStrings_NameTable : public SkTypeface::LocalizedStrings {
+ public:
+ /** Takes ownership of the nameTableData and will free it with SK_DELETE. */
+ LocalizedStrings_NameTable(std::unique_ptr<uint8_t[]> nameTableData, size_t size,
+ SK_OT_USHORT types[],
+ int typesCount)
+ : fTypes(types), fTypesCount(typesCount), fTypesIndex(0)
+ , fNameTableData(std::move(nameTableData))
+ , fFamilyNameIter(fNameTableData.get(), size, fTypes[fTypesIndex])
+ { }
+
+ /** Creates an iterator over all data in the 'name' table of a typeface.
+ * If no valid 'name' table can be found, returns nullptr.
+ */
+ static sk_sp<LocalizedStrings_NameTable> Make(
+ const SkTypeface& typeface,
+ SK_OT_USHORT types[],
+ int typesCount);
+
+ /** Creates an iterator over all the family names in the 'name' table of a typeface.
+ * If no valid 'name' table can be found, returns nullptr.
+ */
+ static sk_sp<LocalizedStrings_NameTable> MakeForFamilyNames(const SkTypeface& typeface);
+
+ bool next(SkTypeface::LocalizedString* localizedString) override;
+ private:
+ static SK_OT_USHORT familyNameTypes[3];
+
+ SK_OT_USHORT* fTypes;
+ int fTypesCount;
+ int fTypesIndex;
+ std::unique_ptr<uint8_t[]> fNameTableData;
+ SkOTTableName::Iterator fFamilyNameIter;
+ };
+
+ /** An implementation of LocalizedStrings which has one name. */
+ class LocalizedStrings_SingleName : public SkTypeface::LocalizedStrings {
+ public:
+ LocalizedStrings_SingleName(SkString name, SkString language)
+ : fName(name), fLanguage(language), fHasNext(true)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ localizedString->fString = fName;
+ localizedString->fLanguage = fLanguage;
+
+ bool hadNext = fHasNext;
+ fHasNext = false;
+ return hadNext;
+ }
+
+ private:
+ SkString fName;
+ SkString fLanguage;
+ bool fHasNext;
+ };
+
+ static void SetAdvancedTypefaceFlags(SkOTTableOS2_V4::Type fsType,
+ SkAdvancedTypefaceMetrics* info);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkPanose.h b/gfx/skia/skia/src/sfnt/SkPanose.h
new file mode 100644
index 0000000000..50ccb7a301
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkPanose.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPanose_DEFINED
+#define SkPanose_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkPanose {
+ //This value changes the meaning of the following 9 bytes.
+ enum class FamilyType : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ TextAndDisplay = 2,
+ Script = 3,
+ Decorative = 4,
+ Pictoral = 5,
+ } bFamilyType;
+
+ union Data {
+ struct TextAndDisplay {
+ enum class SerifStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Bone = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ } bSerifStyle;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Proportion : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ OldStyle = 2,
+ Modern = 3,
+ EvenWidth = 4,
+ Expanded = 5,
+ Condensed = 6,
+ VeryExpanded = 7,
+ VeryCondensed = 8,
+ Monospaced = 9,
+ } bProportion;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+#ifdef SK_WIN_PANOSE
+ //This is what Windows (and FontForge and Apple TT spec) define.
+ //The Impact font uses 9.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ GradualDiagonal = 2,
+ GradualTransitional = 3,
+ GradualVertical = 4,
+ GradualHorizontal = 5,
+ RapidVertical = 6,
+ RapidHorizontal = 7,
+ InstantVertical = 8,
+ } bStrokeVariation;
+#else
+ //Stroke variation description in OT OS/2 ver0,ver1 is incorrect.
+ //This is what HP Panose says.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoVariation = 2,
+ Gradual_Diagonal = 3,
+ Gradual_Transitional = 4,
+ Gradual_Vertical = 5,
+ Gradual_Horizontal = 6,
+ Rapid_Vertical = 7,
+ Rapid_Horizontal = 8,
+ Instant_Vertical = 9,
+ Instant_Horizontal = 10,
+ } bStrokeVariation;
+#endif
+
+ enum class ArmStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ StraightArms_Horizontal = 2,
+ StraightArms_Wedge = 3,
+ StraightArms_Vertical = 4,
+ StraightArms_SingleSerif = 5,
+ StraightArms_DoubleSerif = 6,
+ NonStraightArms_Horizontal = 7,
+ NonStraightArms_Wedge = 8,
+ NonStraightArms_Vertical = 9,
+ NonStraightArms_SingleSerif = 10,
+ NonStraightArms_DoubleSerif = 11,
+ } bArmStyle;
+
+ enum class Letterform : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Normal_Contact = 2,
+ Normal_Weighted = 3,
+ Normal_Boxed = 4,
+ Normal_Flattened = 5,
+ Normal_Rounded = 6,
+ Normal_OffCenter = 7,
+ Normal_Square = 8,
+ Oblique_Contact = 9,
+ Oblique_Weighted = 10,
+ Oblique_Boxed = 11,
+ Oblique_Flattened = 12,
+ Oblique_Rounded = 13,
+ Oblique_OffCenter = 14,
+ Oblique_Square = 15,
+ } bLetterform;
+
+ enum class Midline : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard_Trimmed = 2,
+ Standard_Pointed = 3,
+ Standard_Serifed = 4,
+ High_Trimmed = 5,
+ High_Pointed = 6,
+ High_Serifed = 7,
+ Constant_Trimmed = 8,
+ Constant_Pointed = 9,
+ Constant_Serifed = 10,
+ Low_Trimmed = 11,
+ Low_Pointed = 12,
+ Low_Serifed = 13,
+ } bMidline;
+
+ enum class XHeight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Constant_Small = 2,
+ Constant_Standard = 3,
+ Constant_Large = 4,
+ Ducking_Small = 5,
+ Ducking_Standard = 6,
+ Ducking_Large = 7,
+ } bXHeight;
+ } textAndDisplay;
+
+ struct Script {
+ enum class ToolKind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ FlatNib = 2,
+ PressurePoint = 3,
+ Engraved = 4,
+ Ball = 5,
+ Brush = 6,
+ Rough = 7,
+ FeltPen = 8,
+ WildBrush = 9,
+ } bToolKind;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatio : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryCondensed = 2,
+ Condensed = 3,
+ Normal = 4,
+ Expanded = 5,
+ VeryExpanded = 6,
+ } bAspectRatio;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Roman_Disconnected = 2,
+ Roman_Trailing = 3,
+ Roman_Connected = 4,
+ Cursive_Disconnected = 5,
+ Cursive_Trailing = 6,
+ Cursive_Connected = 7,
+ Blackletter_Disconnected = 8,
+ Blackletter_Trailing = 9,
+ Blackletter_Connected = 10,
+ } bTopology;
+
+ enum class Form : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Upright_NoWrapping = 2,
+ Upright_SomeWrapping = 3,
+ Upright_MoreWrapping = 4,
+ Upright_ExtremeWrapping = 5,
+ Oblique_NoWrapping = 6,
+ Oblique_SomeWrapping = 7,
+ Oblique_MoreWrapping = 8,
+ Oblique_ExtremeWrapping = 9,
+ Exaggerated_NoWrapping = 10,
+ Exaggerated_SomeWrapping = 11,
+ Exaggerated_MoreWrapping = 12,
+ Exaggerated_ExtremeWrapping = 13,
+ } bForm;
+
+ enum class Finials : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_NoLoops = 2,
+ None_ClosedLoops = 3,
+ None_OpenLoops = 4,
+ Sharp_NoLoops = 5,
+ Sharp_ClosedLoops = 6,
+ Sharp_OpenLoops = 7,
+ Tapered_NoLoops = 8,
+ Tapered_ClosedLoops = 9,
+ Tapered_OpenLoops = 10,
+ Round_NoLoops = 11,
+ Round_ClosedLoops = 12,
+ Round_OpenLoops = 13,
+ } bFinials;
+
+ enum class XAscent : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLow = 2,
+ Low = 3,
+ Medium = 4,
+ High = 5,
+ VeryHigh = 6,
+ } bXAscent;
+ } script;
+
+ struct Decorative {
+ enum class Class : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Derivative = 2,
+ NonStandard_Topology = 3,
+ NonStandard_Elements = 4,
+ NonStandard_Aspect = 5,
+ Initials = 6,
+ Cartoon = 7,
+ PictureStems = 8,
+ Ornamented = 9,
+ TextAndBackground = 10,
+ Collage = 11,
+ Montage = 12,
+ } bClass;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Aspect : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ SuperCondensed = 2,
+ VeryCondensed = 3,
+ Condensed = 4,
+ Normal = 5,
+ Extended = 6,
+ VeryExtended = 7,
+ SuperExtended = 8,
+ Monospaced = 9,
+ } bAspect;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ HorizontalLow = 10,
+ HorizontalMedium = 11,
+ HorizontalHigh = 12,
+ Broken = 13,
+ } bContrast;
+
+ enum class SerifVariant : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Oval = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpendicularSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ Script = 16,
+ } bSerifVariant;
+
+ enum class Treatment : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_StandardSolidFill = 2,
+ White_NoFill = 3,
+ PatternedFill = 4,
+ ComplexFill = 5,
+ ShapedFill = 6,
+ DrawnDistressed = 7,
+ } bTreatment;
+
+ enum class Lining : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ Inline = 3,
+ Outline = 4,
+ Engraved = 5,
+ Shadow = 6,
+ Relief = 7,
+ Backdrop = 8,
+ } bLining;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard = 2,
+ Square = 3,
+ MultipleSegment = 4,
+ DecoWacoMidlines = 5,
+ UnevenWeighting = 6,
+ DiverseArms = 7,
+ DiverseForms = 8,
+ LombardicForms = 9,
+ UpperCaseInLowerCase = 10,
+ ImpliedTopology = 11,
+ HorseshoeEandA = 12,
+ Cursive = 13,
+ Blackletter = 14,
+ SwashVariance = 15,
+ } bTopology;
+
+ enum class RangeOfCharacters : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ExtendedCollection = 2,
+ Litterals = 3,
+ NoLowerCase = 4,
+ SmallCaps = 5,
+ } bRangeOfCharacters;
+ } decorative;
+
+ struct Pictoral {
+ enum class Kind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Montages = 2,
+ Pictures = 3,
+ Shapes = 4,
+ Scientific = 5,
+ Music = 6,
+ Expert = 7,
+ Patterns = 8,
+ Boarders = 9,
+ Icons = 10,
+ Logos = 11,
+ IndustrySpecific = 12,
+ } bKind;
+
+ enum class Weight : SK_OT_BYTE {
+ NoFit = 1,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatioAndContrast : SK_OT_BYTE {
+ NoFit = 1,
+ } bAspectRatioAndContrast;
+
+ enum class AspectRatio94 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio94;
+
+ enum class AspectRatio119 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio119;
+
+ enum class AspectRatio157 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio157;
+
+ enum class AspectRatio163 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio163;
+ } pictoral;
+ } data;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkPanose) == 10, "sizeof_SkPanose_not_10");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkSFNTHeader.h b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
new file mode 100644
index 0000000000..06745c39bf
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSFNTHeader_DEFINED
+#define SkSFNTHeader_DEFINED
+
+#include "src/core/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+//All SK_SFNT_ prefixed types should be considered as big endian.
+typedef uint16_t SK_SFNT_USHORT;
+typedef uint32_t SK_SFNT_ULONG;
+
+#pragma pack(push, 1)
+
+struct SkSFNTHeader {
+ SK_SFNT_ULONG fontType;
+ struct fontType_WindowsTrueType {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 1;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_WindowsTrueType>::value;
+ };
+ struct fontType_MacTrueType {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'r';
+ static const SK_OT_CHAR TAG2 = 'u';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_MacTrueType>::value;
+ };
+ struct fontType_PostScript {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'y';
+ static const SK_OT_CHAR TAG2 = 'p';
+ static const SK_OT_CHAR TAG3 = '1';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_PostScript>::value;
+ };
+ struct fontType_OpenTypeCFF {
+ static const SK_OT_CHAR TAG0 = 'O';
+ static const SK_OT_CHAR TAG1 = 'T';
+ static const SK_OT_CHAR TAG2 = 'T';
+ static const SK_OT_CHAR TAG3 = 'O';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_OpenTypeCFF>::value;
+ };
+
+ SK_SFNT_USHORT numTables;
+ SK_SFNT_USHORT searchRange;
+ SK_SFNT_USHORT entrySelector;
+ SK_SFNT_USHORT rangeShift;
+
+ struct TableDirectoryEntry {
+ SK_SFNT_ULONG tag;
+ SK_SFNT_ULONG checksum;
+ SK_SFNT_ULONG offset; //From beginning of header.
+ SK_SFNT_ULONG logicalLength;
+ }; //tableDirectoryEntries[numTables]
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkSFNTHeader) == 12, "sizeof_SkSFNTHeader_not_12");
+static_assert(sizeof(SkSFNTHeader::TableDirectoryEntry) == 16, "sizeof_SkSFNTHeader_TableDirectoryEntry_not_16");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkTTCFHeader.h b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
new file mode 100644
index 0000000000..63eac7c3e2
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTTCFHeader_DEFINED
+#define SkTTCFHeader_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkSFNTHeader.h"
+
+#pragma pack(push, 1)
+
+struct SkTTCFHeader {
+ SK_SFNT_ULONG ttcTag;
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 't';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkTTCFHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_1 = SkTEndian_SwapBE32(1 << 16);
+ static const SK_OT_Fixed version_2 = SkTEndian_SwapBE32(2 << 16);
+
+ SK_OT_ULONG numOffsets;
+ //SK_OT_ULONG offset[numOffsets]
+
+ struct Version2Ext {
+ SK_OT_ULONG dsigType;
+ struct dsigType_None {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 0;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_None>::value;
+ };
+ struct dsigType_Format1 {
+ static const SK_OT_CHAR TAG0 = 'D';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'I';
+ static const SK_OT_CHAR TAG3 = 'G';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_Format1>::value;
+ };
+ SK_OT_ULONG dsigLength; //Length of DSIG table (in bytes).
+ SK_OT_ULONG dsigOffset; //Offset of DSIG table from the beginning of file (in bytes).
+ };// version2ext (if version == version_2)
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkTTCFHeader) == 12, "sizeof_SkTTCFHeader_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp b/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp
new file mode 100644
index 0000000000..d157a73595
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkBitmapProcShader.h"
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkXfermodePriv.h"
+
+static bool only_scale_and_translate(const SkMatrix& matrix) {
+ unsigned mask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
+ return (matrix.getType() & ~mask) == 0;
+}
+
+class BitmapProcInfoContext : public SkShaderBase::Context {
+public:
+ // The info has been allocated elsewhere, but we are responsible for calling its destructor.
+ BitmapProcInfoContext(const SkShaderBase& shader, const SkShaderBase::ContextRec& rec,
+ SkBitmapProcInfo* info)
+ : INHERITED(shader, rec)
+ , fInfo(info)
+ {
+ fFlags = 0;
+ if (fInfo->fPixmap.isOpaque() && (255 == this->getPaintAlpha())) {
+ fFlags |= SkShaderBase::kOpaqueAlpha_Flag;
+ }
+
+ if (1 == fInfo->fPixmap.height() && only_scale_and_translate(this->getTotalInverse())) {
+ fFlags |= SkShaderBase::kConstInY32_Flag;
+ }
+ }
+
+ uint32_t getFlags() const override { return fFlags; }
+
+private:
+ SkBitmapProcInfo* fInfo;
+ uint32_t fFlags;
+
+ typedef SkShaderBase::Context INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class BitmapProcShaderContext : public BitmapProcInfoContext {
+public:
+ BitmapProcShaderContext(const SkShaderBase& shader, const SkShaderBase::ContextRec& rec,
+ SkBitmapProcState* state)
+ : INHERITED(shader, rec, state)
+ , fState(state)
+ {}
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override {
+ const SkBitmapProcState& state = *fState;
+ if (state.getShaderProc32()) {
+ state.getShaderProc32()(&state, x, y, dstC, count);
+ return;
+ }
+
+ const int BUF_MAX = 128;
+ uint32_t buffer[BUF_MAX];
+ SkBitmapProcState::MatrixProc mproc = state.getMatrixProc();
+ SkBitmapProcState::SampleProc32 sproc = state.getSampleProc32();
+ const int max = state.maxCountForBufferSize(sizeof(buffer[0]) * BUF_MAX);
+
+ SkASSERT(state.fPixmap.addr());
+
+ for (;;) {
+ int n = SkTMin(count, max);
+ SkASSERT(n > 0 && n < BUF_MAX*2);
+ mproc(state, buffer, n, x, y);
+ sproc(state, buffer, n, dstC);
+
+ if ((count -= n) == 0) {
+ break;
+ }
+ SkASSERT(count > 0);
+ x += n;
+ dstC += n;
+ }
+ }
+
+private:
+ SkBitmapProcState* fState;
+
+ typedef BitmapProcInfoContext INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkShaderBase::Context* SkBitmapProcLegacyShader::MakeContext(
+ const SkShaderBase& shader, SkTileMode tmx, SkTileMode tmy,
+ const SkImage_Base* image, const ContextRec& rec, SkArenaAlloc* alloc)
+{
+ SkMatrix totalInverse;
+ // Do this first, so we know the matrix can be inverted.
+ if (!shader.computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &totalInverse)) {
+ return nullptr;
+ }
+
+ SkBitmapProcState* state = alloc->make<SkBitmapProcState>(image, tmx, tmy);
+ if (!state->setup(totalInverse, *rec.fPaint)) {
+ return nullptr;
+ }
+ return alloc->make<BitmapProcShaderContext>(shader, rec, state);
+}
diff --git a/gfx/skia/skia/src/shaders/SkBitmapProcShader.h b/gfx/skia/skia/src/shaders/SkBitmapProcShader.h
new file mode 100644
index 0000000000..0509191ad4
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkBitmapProcShader.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapProcShader_DEFINED
+#define SkBitmapProcShader_DEFINED
+
+#include "src/core/SkImagePriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkImage_Base;
+
+class SkBitmapProcLegacyShader : public SkShaderBase {
+private:
+ friend class SkImageShader;
+
+ static Context* MakeContext(const SkShaderBase&, SkTileMode tmx, SkTileMode tmy,
+ const SkImage_Base*, const ContextRec&, SkArenaAlloc* alloc);
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp b/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp
new file mode 100644
index 0000000000..50bb4236c9
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkColorFilterShader.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#endif
+
+SkColorFilterShader::SkColorFilterShader(sk_sp<SkShader> shader, sk_sp<SkColorFilter> filter)
+ : fShader(std::move(shader))
+ , fFilter(std::move(filter))
+{
+ SkASSERT(fShader);
+ SkASSERT(fFilter);
+}
+
+sk_sp<SkFlattenable> SkColorFilterShader::CreateProc(SkReadBuffer& buffer) {
+ auto shader = buffer.readShader();
+ auto filter = buffer.readColorFilter();
+ if (!shader || !filter) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColorFilterShader>(shader, filter);
+}
+
+void SkColorFilterShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+ buffer.writeFlattenable(fFilter.get());
+}
+
+bool SkColorFilterShader::onAppendStages(const SkStageRec& rec) const {
+ if (!as_SB(fShader)->appendStages(rec)) {
+ return false;
+ }
+ fFilter->appendStages(rec, fShader->isOpaque());
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+/////////////////////////////////////////////////////////////////////
+
+#include "include/gpu/GrContext.h"
+
+std::unique_ptr<GrFragmentProcessor> SkColorFilterShader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ auto fp1 = as_SB(fShader)->asFragmentProcessor(args);
+ if (!fp1) {
+ return nullptr;
+ }
+
+ auto fp2 = fFilter->asFragmentProcessor(args.fContext, *args.fDstColorInfo);
+ if (!fp2) {
+ return fp1;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fpSeries[] = { std::move(fp1), std::move(fp2) };
+ return GrFragmentProcessor::RunInSeries(fpSeries, 2);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkShader::makeWithColorFilter(sk_sp<SkColorFilter> filter) const {
+ SkShader* base = const_cast<SkShader*>(this);
+ if (!filter) {
+ return sk_ref_sp(base);
+ }
+ return sk_make_sp<SkColorFilterShader>(sk_ref_sp(base), filter);
+}
diff --git a/gfx/skia/skia/src/shaders/SkColorFilterShader.h b/gfx/skia/skia/src/shaders/SkColorFilterShader.h
new file mode 100644
index 0000000000..bbe9f16132
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorFilterShader.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterShader_DEFINED
+#define SkColorFilterShader_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkArenaAlloc;
+
+class SkColorFilterShader : public SkShaderBase {
+public:
+ SkColorFilterShader(sk_sp<SkShader> shader, sk_sp<SkColorFilter> filter);
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkColorFilterShader)
+
+ sk_sp<SkShader> fShader;
+ sk_sp<SkColorFilter> fFilter;
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkColorShader.cpp b/gfx/skia/skia/src/shaders/SkColorShader.cpp
new file mode 100644
index 0000000000..5d40c7b9df
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorShader.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkUtils.h"
+#include "src/shaders/SkColorShader.h"
+
+SkColorShader::SkColorShader(SkColor c) : fColor(c) {}
+
+bool SkColorShader::isOpaque() const {
+ return SkColorGetA(fColor) == 255;
+}
+
+sk_sp<SkFlattenable> SkColorShader::CreateProc(SkReadBuffer& buffer) {
+ return sk_make_sp<SkColorShader>(buffer.readColor());
+}
+
+void SkColorShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fColor);
+}
+
+SkShader::GradientType SkColorShader::asAGradient(GradientInfo* info) const {
+ if (info) {
+ if (info->fColors && info->fColorCount >= 1) {
+ info->fColors[0] = fColor;
+ }
+ info->fColorCount = 1;
+ info->fTileMode = SkTileMode::kRepeat;
+ }
+ return kColor_GradientType;
+}
+
+SkColor4Shader::SkColor4Shader(const SkColor4f& color, sk_sp<SkColorSpace> space)
+ : fColorSpace(std::move(space))
+ , fColor(color)
+{}
+
+sk_sp<SkFlattenable> SkColor4Shader::CreateProc(SkReadBuffer& buffer) {
+ SkColor4f color;
+ sk_sp<SkColorSpace> colorSpace;
+ buffer.readColor4f(&color);
+ if (buffer.readBool()) {
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ colorSpace = data ? SkColorSpace::Deserialize(data->data(), data->size()) : nullptr;
+ }
+ return SkShaders::Color(color, std::move(colorSpace));
+}
+
+void SkColor4Shader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor4f(fColor);
+ sk_sp<SkData> colorSpaceData = fColorSpace ? fColorSpace->serialize() : nullptr;
+ if (colorSpaceData) {
+ buffer.writeBool(true);
+ buffer.writeDataAsByteArray(colorSpaceData.get());
+ } else {
+ buffer.writeBool(false);
+ }
+}
+
+
+sk_sp<SkShader> SkShaders::Color(const SkColor4f& color, sk_sp<SkColorSpace> space) {
+ if (!SkScalarsAreFinite(color.vec(), 4)) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColor4Shader>(color, std::move(space));
+}
+
+bool SkColorShader::onAppendStages(const SkStageRec& rec) const {
+ SkColor4f color = SkColor4f::FromColor(fColor);
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType).apply(color.vec());
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.premul().vec());
+ return true;
+}
+
+bool SkColor4Shader::onAppendStages(const SkStageRec& rec) const {
+ SkColor4f color = fColor;
+ SkColorSpaceXformSteps(fColorSpace.get(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType).apply(color.vec());
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.premul().vec());
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrColorSpaceXform.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+
+std::unique_ptr<GrFragmentProcessor> SkColorShader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ SkPMColor4f color = SkColorToPMColor4f(fColor, *args.fDstColorInfo);
+ return GrConstColorProcessor::Make(color, GrConstColorProcessor::InputMode::kModulateA);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkColor4Shader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ SkColorSpaceXformSteps steps{ fColorSpace.get(), kUnpremul_SkAlphaType,
+ args.fDstColorInfo->colorSpace(), kUnpremul_SkAlphaType };
+ SkColor4f color = fColor;
+ steps.apply(color.vec());
+ return GrConstColorProcessor::Make(color.premul(),
+ GrConstColorProcessor::InputMode::kModulateA);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkColorShader.h b/gfx/skia/skia/src/shaders/SkColorShader.h
new file mode 100644
index 0000000000..4e152e3289
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorShader.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorShader_DEFINED
+#define SkColorShader_DEFINED
+
+#include "src/shaders/SkShaderBase.h"
+
+/** \class SkColorShader
+ A Shader that represents a single color. In general, this effect can be
+ accomplished by just using the color field on the paint, but if an
+ actual shader object is needed, this provides that feature.
+*/
+class SkColorShader : public SkShaderBase {
+public:
+ /** Create a ColorShader that ignores the color in the paint, and uses the
+ specified color. Note: like all shaders, at draw time the paint's alpha
+ will be respected, and is applied to the specified color.
+ */
+ explicit SkColorShader(SkColor c);
+
+ bool isOpaque() const override;
+ bool isConstant() const override { return true; }
+
+ GradientType asAGradient(GradientInfo* info) const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkColorShader)
+
+ void flatten(SkWriteBuffer&) const override;
+
+ bool onAsLuminanceColor(SkColor* lum) const override {
+ *lum = fColor;
+ return true;
+ }
+
+ bool onAppendStages(const SkStageRec&) const override;
+
+ SkColor fColor;
+};
+
+class SkColor4Shader : public SkShaderBase {
+public:
+ SkColor4Shader(const SkColor4f&, sk_sp<SkColorSpace>);
+
+ bool isOpaque() const override { return fColor.isOpaque(); }
+ bool isConstant() const override { return true; }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkColor4Shader)
+
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkColor4f fColor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkComposeShader.cpp b/gfx/skia/skia/src/shaders/SkComposeShader.cpp
new file mode 100644
index 0000000000..f3a51a46db
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkComposeShader.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkColorShader.h"
+#include "src/shaders/SkComposeShader.h"
+
+namespace {
+
+sk_sp<SkShader> wrap_lm(sk_sp<SkShader> shader, const SkMatrix* lm) {
+ return (shader && lm) ? shader->makeWithLocalMatrix(*lm) : shader;
+}
+
+struct LocalMatrixStageRec final : public SkStageRec {
+ LocalMatrixStageRec(const SkStageRec& rec, const SkMatrix& lm)
+ : INHERITED(rec) {
+ if (!lm.isIdentity()) {
+ if (fLocalM) {
+ fStorage.setConcat(lm, *fLocalM);
+ fLocalM = fStorage.isIdentity() ? nullptr : &fStorage;
+ } else {
+ fLocalM = &lm;
+ }
+ }
+ }
+
+private:
+ SkMatrix fStorage;
+
+ using INHERITED = SkStageRec;
+};
+
+} // namespace
+
+sk_sp<SkShader> SkShaders::Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* lm) {
+ switch (mode) {
+ case SkBlendMode::kClear: return Color(0);
+ case SkBlendMode::kDst: return wrap_lm(std::move(dst), lm);
+ case SkBlendMode::kSrc: return wrap_lm(std::move(src), lm);
+ default: break;
+ }
+ return sk_sp<SkShader>(new SkShader_Blend(mode, std::move(dst), std::move(src), lm));
+}
+
+sk_sp<SkShader> SkShaders::Lerp(float weight, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* lm) {
+ if (SkScalarIsNaN(weight)) {
+ return nullptr;
+ }
+ if (dst == src || weight <= 0) {
+ return wrap_lm(std::move(dst), lm);
+ }
+ if (weight >= 1) {
+ return wrap_lm(std::move(src), lm);
+ }
+ return sk_sp<SkShader>(new SkShader_Lerp(weight, std::move(dst), std::move(src), lm));
+}
+
+sk_sp<SkShader> SkShaders::Lerp(sk_sp<SkShader> red, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* lm) {
+ if (!red) {
+ return nullptr;
+ }
+ if (dst == src) {
+ return wrap_lm(std::move(dst), lm);
+ }
+ return sk_sp<SkShader>(new SkShader_LerpRed(std::move(red), std::move(dst), std::move(src),
+ lm));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool append_shader_or_paint(const SkStageRec& rec, SkShader* shader) {
+ if (shader) {
+ if (!as_SB(shader)->appendStages(rec)) {
+ return false;
+ }
+ } else {
+ rec.fPipeline->append_constant_color(rec.fAlloc, rec.fPaint.getColor4f().premul().vec());
+ }
+ return true;
+}
+
+// Returns the output of e0, and leaves the output of e1 in r,g,b,a
+static float* append_two_shaders(const SkStageRec& rec, SkShader* s0, SkShader* s1) {
+ struct Storage {
+ float fRes0[4 * SkRasterPipeline_kMaxStride];
+ };
+ auto storage = rec.fAlloc->make<Storage>();
+
+ if (!append_shader_or_paint(rec, s0)) {
+ return nullptr;
+ }
+ rec.fPipeline->append(SkRasterPipeline::store_src, storage->fRes0);
+
+ if (!append_shader_or_paint(rec, s1)) {
+ return nullptr;
+ }
+ return storage->fRes0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkShader_Blend::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> dst(buffer.readShader());
+ sk_sp<SkShader> src(buffer.readShader());
+ unsigned mode = buffer.read32();
+
+ // check for valid mode before we cast to the enum type
+ if (!buffer.validate(mode <= (unsigned)SkBlendMode::kLastMode)) {
+ return nullptr;
+ }
+ return SkShaders::Blend(static_cast<SkBlendMode>(mode), std::move(dst), std::move(src));
+}
+
+void SkShader_Blend::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fDst.get());
+ buffer.writeFlattenable(fSrc.get());
+ buffer.write32((int)fMode);
+}
+
+bool SkShader_Blend::onAppendStages(const SkStageRec& orig_rec) const {
+ const LocalMatrixStageRec rec(orig_rec, this->getLocalMatrix());
+
+ float* res0 = append_two_shaders(rec, fDst.get(), fSrc.get());
+ if (!res0) {
+ return false;
+ }
+
+ rec.fPipeline->append(SkRasterPipeline::load_dst, res0);
+ SkBlendMode_AppendStages(fMode, rec.fPipeline);
+ return true;
+}
+
+sk_sp<SkFlattenable> SkShader_Lerp::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> dst(buffer.readShader());
+ sk_sp<SkShader> src(buffer.readShader());
+ float t = buffer.readScalar();
+ return buffer.isValid() ? SkShaders::Lerp(t, std::move(dst), std::move(src)) : nullptr;
+}
+
+void SkShader_Lerp::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fDst.get());
+ buffer.writeFlattenable(fSrc.get());
+ buffer.writeScalar(fWeight);
+}
+
+bool SkShader_Lerp::onAppendStages(const SkStageRec& orig_rec) const {
+ const LocalMatrixStageRec rec(orig_rec, this->getLocalMatrix());
+
+ float* res0 = append_two_shaders(rec, fDst.get(), fSrc.get());
+ if (!res0) {
+ return false;
+ }
+
+ rec.fPipeline->append(SkRasterPipeline::load_dst, res0);
+ rec.fPipeline->append(SkRasterPipeline::lerp_1_float, &fWeight);
+ return true;
+}
+
+sk_sp<SkFlattenable> SkShader_LerpRed::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> dst(buffer.readShader());
+ sk_sp<SkShader> src(buffer.readShader());
+ sk_sp<SkShader> red(buffer.readShader());
+ return buffer.isValid() ?
+ SkShaders::Lerp(std::move(red), std::move(dst), std::move(src)) : nullptr;
+}
+
+void SkShader_LerpRed::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fDst.get());
+ buffer.writeFlattenable(fSrc.get());
+ buffer.writeFlattenable(fRed.get());
+}
+
+bool SkShader_LerpRed::onAppendStages(const SkStageRec& orig_rec) const {
+ const LocalMatrixStageRec rec(orig_rec, this->getLocalMatrix());
+
+ struct Storage {
+ float fRed[4 * SkRasterPipeline_kMaxStride];
+ };
+ auto storage = rec.fAlloc->make<Storage>();
+ if (!as_SB(fRed)->appendStages(rec)) {
+ return false;
+ }
+ // actually, we just need the first (red) channel, but for now we store rgba
+ rec.fPipeline->append(SkRasterPipeline::store_src, storage->fRed);
+
+ float* res0 = append_two_shaders(rec, fDst.get(), fSrc.get());
+ if (!res0) {
+ return false;
+ }
+
+ rec.fPipeline->append(SkRasterPipeline::load_dst, res0);
+ rec.fPipeline->append(SkRasterPipeline::lerp_native, &storage->fRed[0]);
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/effects/GrXfermodeFragmentProcessor.h"
+#include "src/gpu/effects/generated/GrComposeLerpEffect.h"
+#include "src/gpu/effects/generated/GrComposeLerpRedEffect.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+
+static std::unique_ptr<GrFragmentProcessor> as_fp(const GrFPArgs& args, SkShader* shader) {
+ return shader ? as_SB(shader)->asFragmentProcessor(args) : nullptr;
+}
+
+std::unique_ptr<GrFragmentProcessor> SkShader_Blend::asFragmentProcessor(
+ const GrFPArgs& orig_args) const {
+ const GrFPArgs::WithPreLocalMatrix args(orig_args, this->getLocalMatrix());
+ auto fpA = as_fp(args, fDst.get());
+ auto fpB = as_fp(args, fSrc.get());
+ if (!fpA || !fpB) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromTwoProcessors(std::move(fpB),
+ std::move(fpA), fMode);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkShader_Lerp::asFragmentProcessor(
+ const GrFPArgs& orig_args) const {
+ const GrFPArgs::WithPreLocalMatrix args(orig_args, this->getLocalMatrix());
+ auto fpA = as_fp(args, fDst.get());
+ auto fpB = as_fp(args, fSrc.get());
+ return GrComposeLerpEffect::Make(std::move(fpA), std::move(fpB), fWeight);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkShader_LerpRed::asFragmentProcessor(
+ const GrFPArgs& orig_args) const {
+ const GrFPArgs::WithPreLocalMatrix args(orig_args, this->getLocalMatrix());
+ auto fpA = as_fp(args, fDst.get());
+ auto fpB = as_fp(args, fSrc.get());
+ auto red = as_SB(fRed)->asFragmentProcessor(args);
+ if (!red) {
+ return nullptr;
+ }
+ return GrComposeLerpRedEffect::Make(std::move(fpA), std::move(fpB), std::move(red));
+}
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkComposeShader.h b/gfx/skia/skia/src/shaders/SkComposeShader.h
new file mode 100644
index 0000000000..4ea44cff8b
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkComposeShader.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkComposeShader_DEFINED
+#define SkComposeShader_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkShader_Blend final : public SkShaderBase {
+public:
+ SkShader_Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src, const SkMatrix* lm)
+ : INHERITED(lm)
+ , fDst(std::move(dst))
+ , fSrc(std::move(src))
+ , fMode(mode)
+ {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ SkShader_Blend(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkShader_Blend)
+
+ sk_sp<SkShader> fDst;
+ sk_sp<SkShader> fSrc;
+ const SkBlendMode fMode;
+
+ typedef SkShaderBase INHERITED;
+};
+
+class SkShader_Lerp final : public SkShaderBase {
+public:
+ SkShader_Lerp(float weight, sk_sp<SkShader> dst, sk_sp<SkShader> src, const SkMatrix* lm)
+ : INHERITED(lm)
+ , fDst(std::move(dst))
+ , fSrc(std::move(src))
+ , fWeight(weight)
+ {
+ SkASSERT(weight >= 0 && weight <= 1);
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ SkShader_Lerp(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkShader_Lerp)
+
+ sk_sp<SkShader> fDst;
+ sk_sp<SkShader> fSrc;
+ const float fWeight;
+
+ typedef SkShaderBase INHERITED;
+};
+
+class SkShader_LerpRed final : public SkShaderBase {
+public:
+ SkShader_LerpRed(sk_sp<SkShader> red, sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ const SkMatrix* lm)
+ : INHERITED(lm)
+ , fDst(std::move(dst))
+ , fSrc(std::move(src))
+ , fRed(std::move(red))
+ {}
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ SkShader_LerpRed(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkShader_LerpRed)
+
+ sk_sp<SkShader> fDst;
+ sk_sp<SkShader> fSrc;
+ sk_sp<SkShader> fRed;
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkEmptyShader.h b/gfx/skia/skia/src/shaders/SkEmptyShader.h
new file mode 100644
index 0000000000..439082b1f9
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkEmptyShader.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEmptyShader_DEFINED
+#define SkEmptyShader_DEFINED
+
+#include "src/shaders/SkShaderBase.h"
+
+// TODO: move this to private, as there is a public factory on SkShader
+
+/**
+ * \class SkEmptyShader
+ * A Shader that always draws nothing. Its createContext always returns nullptr.
+ */
+class SkEmptyShader : public SkShaderBase {
+public:
+ SkEmptyShader() {}
+
+protected:
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override {
+ return nullptr;
+ }
+#endif
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ // Do nothing.
+ // We just don't want to fall through to SkShader::flatten(),
+ // which will write data we don't care to serialize or decode.
+ }
+
+ bool onAppendStages(const SkStageRec&) const override {
+ return false;
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkEmptyShader)
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkImageShader.cpp b/gfx/skia/skia/src/shaders/SkImageShader.cpp
new file mode 100644
index 0000000000..5bc13e587c
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkImageShader.cpp
@@ -0,0 +1,614 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapController.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkEmptyShader.h"
+#include "src/shaders/SkImageShader.h"
+
+/**
+ * We are faster in clamp, so always use that tiling when we can.
+ */
+static SkTileMode optimize(SkTileMode tm, int dimension) {
+ SkASSERT(dimension > 0);
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // need to update frameworks/base/libs/hwui/tests/unit/SkiaBehaviorTests.cpp:55 to allow
+ // for transforming to clamp.
+ return tm;
+#else
+ return dimension == 1 ? SkTileMode::kClamp : tm;
+#endif
+}
+
+SkImageShader::SkImageShader(sk_sp<SkImage> img,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul)
+ : INHERITED(localMatrix)
+ , fImage(std::move(img))
+ , fTileModeX(optimize(tmx, fImage->width()))
+ , fTileModeY(optimize(tmy, fImage->height()))
+ , fClampAsIfUnpremul(clampAsIfUnpremul)
+{}
+
+// fClampAsIfUnpremul is always false when constructed through public APIs,
+// so there's no need to read or write it here.
+
+sk_sp<SkFlattenable> SkImageShader::CreateProc(SkReadBuffer& buffer) {
+ auto tmx = buffer.read32LE<SkTileMode>(SkTileMode::kLastTileMode);
+ auto tmy = buffer.read32LE<SkTileMode>(SkTileMode::kLastTileMode);
+ SkMatrix localMatrix;
+ buffer.readMatrix(&localMatrix);
+ sk_sp<SkImage> img = buffer.readImage();
+ if (!img) {
+ return nullptr;
+ }
+ return SkImageShader::Make(std::move(img), tmx, tmy, &localMatrix);
+}
+
+void SkImageShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeUInt((unsigned)fTileModeX);
+ buffer.writeUInt((unsigned)fTileModeY);
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.writeImage(fImage.get());
+ SkASSERT(fClampAsIfUnpremul == false);
+}
+
+bool SkImageShader::isOpaque() const {
+ return fImage->isOpaque() &&
+ fTileModeX != SkTileMode::kDecal && fTileModeY != SkTileMode::kDecal;
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+static bool legacy_shader_can_handle(const SkMatrix& inv) {
+ if (!inv.isScaleTranslate()) {
+ return false;
+ }
+
+ // legacy code uses SkFixed 32.32, so ensure the inverse doesn't map device coordinates
+ // out of range.
+ const SkScalar max_dev_coord = 32767.0f;
+ SkRect src;
+ SkAssertResult(inv.mapRect(&src, SkRect::MakeWH(max_dev_coord, max_dev_coord)));
+
+ // take 1/4 of max signed 32bits so we have room to subtract local values
+ const SkScalar max_fixed32dot32 = SK_MaxS32 * 0.25f;
+ if (!SkRect::MakeLTRB(-max_fixed32dot32, -max_fixed32dot32,
+ max_fixed32dot32, max_fixed32dot32).contains(src)) {
+ return false;
+ }
+
+ // legacy shader impl should be able to handle these matrices
+ return true;
+}
+
+SkShaderBase::Context* SkImageShader::onMakeContext(const ContextRec& rec,
+ SkArenaAlloc* alloc) const {
+ if (fImage->alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+ if (fImage->colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+ if (fTileModeX != fTileModeY) {
+ return nullptr;
+ }
+ if (fTileModeX == SkTileMode::kDecal || fTileModeY == SkTileMode::kDecal) {
+ return nullptr;
+ }
+
+ // SkBitmapProcShader stores bitmap coordinates in a 16bit buffer,
+ // so it can't handle bitmaps larger than 65535.
+ //
+ // We back off another bit to 32767 to make small amounts of
+ // intermediate math safe, e.g. in
+ //
+ // SkFixed fx = ...;
+ // fx = tile(fx + SK_Fixed1);
+ //
+ // we want to make sure (fx + SK_Fixed1) never overflows.
+ if (fImage-> width() > 32767 ||
+ fImage->height() > 32767) {
+ return nullptr;
+ }
+
+ SkMatrix inv;
+ if (!this->computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &inv) ||
+ !legacy_shader_can_handle(inv)) {
+ return nullptr;
+ }
+
+ if (!rec.isLegacyCompatible(fImage->colorSpace())) {
+ return nullptr;
+ }
+
+ return SkBitmapProcLegacyShader::MakeContext(*this, fTileModeX, fTileModeY,
+ as_IB(fImage.get()), rec, alloc);
+}
+#endif
+
+SkImage* SkImageShader::onIsAImage(SkMatrix* texM, SkTileMode xy[]) const {
+ if (texM) {
+ *texM = this->getLocalMatrix();
+ }
+ if (xy) {
+ xy[0] = fTileModeX;
+ xy[1] = fTileModeY;
+ }
+ return const_cast<SkImage*>(fImage.get());
+}
+
+sk_sp<SkShader> SkImageShader::Make(sk_sp<SkImage> image,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul) {
+ if (!image) {
+ return sk_make_sp<SkEmptyShader>();
+ }
+ return sk_sp<SkShader>{ new SkImageShader(image, tmx, tmy, localMatrix, clampAsIfUnpremul) };
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/GrBicubicEffect.h"
+#include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
+
+static GrSamplerState::WrapMode tile_mode_to_wrap_mode(const SkTileMode tileMode) {
+ switch (tileMode) {
+ case SkTileMode::kClamp:
+ return GrSamplerState::WrapMode::kClamp;
+ case SkTileMode::kRepeat:
+ return GrSamplerState::WrapMode::kRepeat;
+ case SkTileMode::kMirror:
+ return GrSamplerState::WrapMode::kMirrorRepeat;
+ case SkTileMode::kDecal:
+ return GrSamplerState::WrapMode::kClampToBorder;
+ }
+ SK_ABORT("Unknown tile mode.");
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImageShader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ const auto lm = this->totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix);
+ SkMatrix lmInverse;
+ if (!lm->invert(&lmInverse)) {
+ return nullptr;
+ }
+
+ GrSamplerState::WrapMode wrapModes[] = {tile_mode_to_wrap_mode(fTileModeX),
+ tile_mode_to_wrap_mode(fTileModeY)};
+
+ // If either domainX or domainY are un-ignored, a texture domain effect has to be used to
+ // implement the decal mode (while leaving non-decal axes alone). The wrap mode originally
+ // clamp-to-border is reset to clamp since the hw cannot implement it directly.
+ GrTextureDomain::Mode domainX = GrTextureDomain::kIgnore_Mode;
+ GrTextureDomain::Mode domainY = GrTextureDomain::kIgnore_Mode;
+ if (!args.fContext->priv().caps()->clampToBorderSupport()) {
+ if (wrapModes[0] == GrSamplerState::WrapMode::kClampToBorder) {
+ domainX = GrTextureDomain::kDecal_Mode;
+ wrapModes[0] = GrSamplerState::WrapMode::kClamp;
+ }
+ if (wrapModes[1] == GrSamplerState::WrapMode::kClampToBorder) {
+ domainY = GrTextureDomain::kDecal_Mode;
+ wrapModes[1] = GrSamplerState::WrapMode::kClamp;
+ }
+ }
+
+ // Must set wrap and filter on the sampler before requesting a texture. In two places below
+ // we check the matrix scale factors to determine how to interpret the filter quality setting.
+ // This completely ignores the complexity of the drawVertices case where explicit local coords
+ // are provided by the caller.
+ bool doBicubic;
+ GrSamplerState::Filter textureFilterMode = GrSkFilterQualityToGrFilterMode(
+ fImage->width(), fImage->height(), args.fFilterQuality, *args.fViewMatrix, *lm,
+ args.fContext->priv().options().fSharpenMipmappedTextures, &doBicubic);
+ GrSamplerState samplerState(wrapModes, textureFilterMode);
+ SkScalar scaleAdjust[2] = { 1.0f, 1.0f };
+ sk_sp<GrTextureProxy> proxy(as_IB(fImage)->asTextureProxyRef(args.fContext, samplerState,
+ scaleAdjust));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ GrColorType srcColorType = SkColorTypeToGrColorType(fImage->colorType());
+
+ lmInverse.postScale(scaleAdjust[0], scaleAdjust[1]);
+
+ std::unique_ptr<GrFragmentProcessor> inner;
+ if (doBicubic) {
+ // domainX and domainY will properly apply the decal effect with the texture domain used in
+ // the bicubic filter if clamp to border was unsupported in hardware
+ static constexpr auto kDir = GrBicubicEffect::Direction::kXY;
+ inner = GrBicubicEffect::Make(std::move(proxy), srcColorType, lmInverse, wrapModes, domainX,
+ domainY, kDir, fImage->alphaType());
+ } else {
+ if (domainX != GrTextureDomain::kIgnore_Mode || domainY != GrTextureDomain::kIgnore_Mode) {
+ SkRect domain = GrTextureDomain::MakeTexelDomain(
+ SkIRect::MakeWH(proxy->width(), proxy->height()),
+ domainX, domainY);
+ inner = GrTextureDomainEffect::Make(std::move(proxy), srcColorType, lmInverse, domain,
+ domainX, domainY, samplerState);
+ } else {
+ inner = GrSimpleTextureEffect::Make(std::move(proxy), srcColorType, lmInverse,
+ samplerState);
+ }
+ }
+ inner = GrColorSpaceXformEffect::Make(std::move(inner), fImage->colorSpace(),
+ fImage->alphaType(), args.fDstColorInfo->colorSpace());
+
+ bool isAlphaOnly = SkColorTypeIsAlphaOnly(fImage->colorType());
+ if (isAlphaOnly) {
+ return inner;
+ } else if (args.fInputColorIsOpaque) {
+ return GrFragmentProcessor::OverrideInput(std::move(inner), SK_PMColor4fWHITE, false);
+ }
+ return GrFragmentProcessor::MulChildByInputAlpha(std::move(inner));
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "src/core/SkImagePriv.h"
+
+sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix, SkCopyPixelsMode cpm) {
+ return SkImageShader::Make(SkMakeImageFromRasterBitmap(src, cpm),
+ tmx, tmy, localMatrix);
+}
+
+sk_sp<SkShader> SkMakeBitmapShaderForPaint(const SkPaint& paint, const SkBitmap& src,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix, SkCopyPixelsMode mode) {
+ auto s = SkMakeBitmapShader(src, tmx, tmy, localMatrix, mode);
+ if (!s) {
+ return nullptr;
+ }
+ if (src.colorType() == kAlpha_8_SkColorType && paint.getShader()) {
+ // Compose the image shader with the paint's shader. Alpha images+shaders should output the
+ // texture's alpha multiplied by the shader's color. DstIn (d*sa) will achieve this with
+ // the source image and dst shader (MakeBlend takes dst first, src second).
+ s = SkShaders::Blend(SkBlendMode::kDstIn, paint.refShader(), std::move(s));
+ }
+ return s;
+}
+
+void SkShaderBase::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkImageShader); }
+
+class SkImageStageUpdater : public SkStageUpdater {
+public:
+ const SkImageShader* fShader;
+
+ float fMatrixStorage[6];
+
+#if 0 // TODO: when we support mipmaps
+ SkRasterPipeline_GatherCtx* fGather;
+ SkRasterPipeline_TileCtx* fLimitX;
+ SkRasterPipeline_TileCtx* fLimitY;
+ SkRasterPipeline_DecalTileCtx* fDecal;
+#endif
+
+ bool update(const SkMatrix& ctm, const SkMatrix* localM) override {
+ SkMatrix matrix;
+ return fShader->computeTotalInverse(ctm, localM, &matrix) &&
+ matrix.asAffine(fMatrixStorage);
+ }
+};
+
+bool SkImageShader::doStages(const SkStageRec& rec, SkImageStageUpdater* updater) const {
+ if (updater &&
+ (rec.fPaint.getFilterQuality() == kMedium_SkFilterQuality ||
+ rec.fCTM.hasPerspective()))
+ {
+ // TODO: handle these cases
+ // medium: recall RequestBitmap and update width/height accordingly
+ // perspt: store 9 floats and use persp stage
+ return false;
+ }
+
+ SkRasterPipeline* p = rec.fPipeline;
+ SkArenaAlloc* alloc = rec.fAlloc;
+ auto quality = rec.fPaint.getFilterQuality();
+
+ SkMatrix matrix;
+ if (!this->computeTotalInverse(rec.fCTM, rec.fLocalM, &matrix)) {
+ return false;
+ }
+
+ const auto* state = SkBitmapController::RequestBitmap(as_IB(fImage.get()),
+ matrix, quality, alloc);
+ if (!state) {
+ return false;
+ }
+
+ const SkPixmap& pm = state->pixmap();
+ matrix = state->invMatrix();
+ quality = state->quality();
+ auto info = pm.info();
+
+ p->append(SkRasterPipeline::seed_shader);
+
+ if (updater) {
+ p->append(SkRasterPipeline::matrix_2x3, updater->fMatrixStorage);
+ } else {
+ // When the matrix is just an integer translate, bilerp == nearest neighbor.
+ if (quality == kLow_SkFilterQuality &&
+ matrix.getType() <= SkMatrix::kTranslate_Mask &&
+ matrix.getTranslateX() == (int)matrix.getTranslateX() &&
+ matrix.getTranslateY() == (int)matrix.getTranslateY()) {
+ quality = kNone_SkFilterQuality;
+ }
+
+ // See skia:4649 and the GM image_scale_aligned.
+ if (quality == kNone_SkFilterQuality) {
+ if (matrix.getScaleX() >= 0) {
+ matrix.setTranslateX(nextafterf(matrix.getTranslateX(),
+ floorf(matrix.getTranslateX())));
+ }
+ if (matrix.getScaleY() >= 0) {
+ matrix.setTranslateY(nextafterf(matrix.getTranslateY(),
+ floorf(matrix.getTranslateY())));
+ }
+ }
+ p->append_matrix(alloc, matrix);
+ }
+
+ auto gather = alloc->make<SkRasterPipeline_GatherCtx>();
+ gather->pixels = pm.addr();
+ gather->stride = pm.rowBytesAsPixels();
+ gather->width = pm.width();
+ gather->height = pm.height();
+
+ auto limit_x = alloc->make<SkRasterPipeline_TileCtx>(),
+ limit_y = alloc->make<SkRasterPipeline_TileCtx>();
+ limit_x->scale = pm.width();
+ limit_x->invScale = 1.0f / pm.width();
+ limit_y->scale = pm.height();
+ limit_y->invScale = 1.0f / pm.height();
+
+ SkRasterPipeline_DecalTileCtx* decal_ctx = nullptr;
+ bool decal_x_and_y = fTileModeX == SkTileMode::kDecal && fTileModeY == SkTileMode::kDecal;
+ if (fTileModeX == SkTileMode::kDecal || fTileModeY == SkTileMode::kDecal) {
+ decal_ctx = alloc->make<SkRasterPipeline_DecalTileCtx>();
+ decal_ctx->limit_x = limit_x->scale;
+ decal_ctx->limit_y = limit_y->scale;
+ }
+
+#if 0 // TODO: when we support kMedium
+ if (updator && (quality == kMedium_SkFilterQuality)) {
+ // if we change levels in mipmap, we need to update the scales (and invScales)
+ updator->fGather = gather;
+ updator->fLimitX = limit_x;
+ updator->fLimitY = limit_y;
+ updator->fDecal = decal_ctx;
+ }
+#endif
+
+ auto append_tiling_and_gather = [&] {
+ if (decal_x_and_y) {
+ p->append(SkRasterPipeline::decal_x_and_y, decal_ctx);
+ } else {
+ switch (fTileModeX) {
+ case SkTileMode::kClamp: /* The gather_xxx stage will clamp for us. */ break;
+ case SkTileMode::kMirror: p->append(SkRasterPipeline::mirror_x, limit_x); break;
+ case SkTileMode::kRepeat: p->append(SkRasterPipeline::repeat_x, limit_x); break;
+ case SkTileMode::kDecal: p->append(SkRasterPipeline::decal_x, decal_ctx); break;
+ }
+ switch (fTileModeY) {
+ case SkTileMode::kClamp: /* The gather_xxx stage will clamp for us. */ break;
+ case SkTileMode::kMirror: p->append(SkRasterPipeline::mirror_y, limit_y); break;
+ case SkTileMode::kRepeat: p->append(SkRasterPipeline::repeat_y, limit_y); break;
+ case SkTileMode::kDecal: p->append(SkRasterPipeline::decal_y, decal_ctx); break;
+ }
+ }
+
+ void* ctx = gather;
+ switch (info.colorType()) {
+ case kAlpha_8_SkColorType: p->append(SkRasterPipeline::gather_a8, ctx); break;
+ case kA16_unorm_SkColorType: p->append(SkRasterPipeline::gather_a16, ctx); break;
+ case kA16_float_SkColorType: p->append(SkRasterPipeline::gather_af16, ctx); break;
+ case kRGB_565_SkColorType: p->append(SkRasterPipeline::gather_565, ctx); break;
+ case kARGB_4444_SkColorType: p->append(SkRasterPipeline::gather_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: p->append(SkRasterPipeline::gather_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: p->append(SkRasterPipeline::gather_rg1616, ctx); break;
+ case kR16G16_float_SkColorType: p->append(SkRasterPipeline::gather_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: p->append(SkRasterPipeline::gather_8888, ctx); break;
+ case kRGBA_1010102_SkColorType: p->append(SkRasterPipeline::gather_1010102, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType:
+ p->append(SkRasterPipeline::gather_16161616,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: p->append(SkRasterPipeline::gather_f16, ctx); break;
+ case kRGBA_F32_SkColorType: p->append(SkRasterPipeline::gather_f32, ctx); break;
+
+ case kGray_8_SkColorType: p->append(SkRasterPipeline::gather_a8, ctx);
+ p->append(SkRasterPipeline::alpha_to_gray ); break;
+
+ case kRGB_888x_SkColorType: p->append(SkRasterPipeline::gather_8888, ctx);
+ p->append(SkRasterPipeline::force_opaque ); break;
+
+ case kRGB_101010x_SkColorType: p->append(SkRasterPipeline::gather_1010102, ctx);
+ p->append(SkRasterPipeline::force_opaque ); break;
+
+ case kBGRA_8888_SkColorType: p->append(SkRasterPipeline::gather_8888, ctx);
+ p->append(SkRasterPipeline::swap_rb ); break;
+
+ case kUnknown_SkColorType: SkASSERT(false);
+ }
+ if (decal_ctx) {
+ p->append(SkRasterPipeline::check_decal_mask, decal_ctx);
+ }
+ };
+
+ auto append_misc = [&] {
+ // TODO: if ref.fDstCS isn't null, we'll premul here then immediately unpremul
+ // to do the color space transformation. Might be possible to streamline.
+ if (info.colorType() == kAlpha_8_SkColorType) {
+ // The color for A8 images comes from the (sRGB) paint color.
+ p->append_set_rgb(alloc, rec.fPaint.getColor4f());
+ p->append(SkRasterPipeline::premul);
+ } else if (info.alphaType() == kUnpremul_SkAlphaType) {
+ // Convert unpremul images to premul before we carry on with the rest of the pipeline.
+ p->append(SkRasterPipeline::premul);
+ }
+
+ if (quality > kLow_SkFilterQuality) {
+ // Bicubic filtering naturally produces out of range values on both sides.
+ p->append(SkRasterPipeline::clamp_0);
+ p->append(fClampAsIfUnpremul ? SkRasterPipeline::clamp_1
+ : SkRasterPipeline::clamp_a);
+ }
+
+ if (rec.fDstCS) {
+ // If color managed, convert from premul source all the way to premul dst color space.
+ auto srcCS = info.colorSpace();
+ if (!srcCS || info.colorType() == kAlpha_8_SkColorType) {
+ // We treat untagged images as sRGB.
+ // A8 images get their r,g,b from the paint color, so they're also sRGB.
+ srcCS = sk_srgb_singleton();
+ }
+ alloc->make<SkColorSpaceXformSteps>(srcCS , kPremul_SkAlphaType,
+ rec.fDstCS, kPremul_SkAlphaType)
+ ->apply(p, info.colorType());
+ }
+
+ return true;
+ };
+
+ // Check for fast-path stages.
+ auto ct = info.colorType();
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType)
+ && quality == kLow_SkFilterQuality
+ && fTileModeX == SkTileMode::kClamp && fTileModeY == SkTileMode::kClamp) {
+
+ p->append(SkRasterPipeline::bilerp_clamp_8888, gather);
+ if (ct == kBGRA_8888_SkColorType) {
+ p->append(SkRasterPipeline::swap_rb);
+ }
+ return append_misc();
+ }
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType) // TODO: all formats
+ && quality == kLow_SkFilterQuality
+ && fTileModeX != SkTileMode::kDecal // TODO decal too?
+ && fTileModeY != SkTileMode::kDecal) {
+
+ auto ctx = alloc->make<SkRasterPipeline_SamplerCtx2>();
+ *(SkRasterPipeline_GatherCtx*)(ctx) = *gather;
+ ctx->ct = ct;
+ ctx->tileX = fTileModeX;
+ ctx->tileY = fTileModeY;
+ ctx->invWidth = 1.0f / ctx->width;
+ ctx->invHeight = 1.0f / ctx->height;
+ p->append(SkRasterPipeline::bilinear, ctx);
+ return append_misc();
+ }
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType)
+ && quality == kHigh_SkFilterQuality
+ && fTileModeX == SkTileMode::kClamp && fTileModeY == SkTileMode::kClamp) {
+
+ p->append(SkRasterPipeline::bicubic_clamp_8888, gather);
+ if (ct == kBGRA_8888_SkColorType) {
+ p->append(SkRasterPipeline::swap_rb);
+ }
+ return append_misc();
+ }
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType) // TODO: all formats
+ && quality == kHigh_SkFilterQuality
+ && fTileModeX != SkTileMode::kDecal // TODO decal too?
+ && fTileModeY != SkTileMode::kDecal) {
+
+ auto ctx = alloc->make<SkRasterPipeline_SamplerCtx2>();
+ *(SkRasterPipeline_GatherCtx*)(ctx) = *gather;
+ ctx->ct = ct;
+ ctx->tileX = fTileModeX;
+ ctx->tileY = fTileModeY;
+ ctx->invWidth = 1.0f / ctx->width;
+ ctx->invHeight = 1.0f / ctx->height;
+ p->append(SkRasterPipeline::bicubic, ctx);
+ return append_misc();
+ }
+
+ SkRasterPipeline_SamplerCtx* sampler = nullptr;
+ if (quality != kNone_SkFilterQuality) {
+ sampler = alloc->make<SkRasterPipeline_SamplerCtx>();
+ }
+
+ auto sample = [&](SkRasterPipeline::StockStage setup_x,
+ SkRasterPipeline::StockStage setup_y) {
+ p->append(setup_x, sampler);
+ p->append(setup_y, sampler);
+ append_tiling_and_gather();
+ p->append(SkRasterPipeline::accumulate, sampler);
+ };
+
+ if (quality == kNone_SkFilterQuality) {
+ append_tiling_and_gather();
+ } else if (quality == kLow_SkFilterQuality) {
+ p->append(SkRasterPipeline::save_xy, sampler);
+
+ sample(SkRasterPipeline::bilinear_nx, SkRasterPipeline::bilinear_ny);
+ sample(SkRasterPipeline::bilinear_px, SkRasterPipeline::bilinear_ny);
+ sample(SkRasterPipeline::bilinear_nx, SkRasterPipeline::bilinear_py);
+ sample(SkRasterPipeline::bilinear_px, SkRasterPipeline::bilinear_py);
+
+ p->append(SkRasterPipeline::move_dst_src);
+
+ } else {
+ p->append(SkRasterPipeline::save_xy, sampler);
+
+ sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_n3y);
+ sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_n3y);
+ sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_n3y);
+ sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_n3y);
+
+ sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_n1y);
+ sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_n1y);
+ sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_n1y);
+ sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_n1y);
+
+ sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_p1y);
+ sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_p1y);
+ sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_p1y);
+ sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_p1y);
+
+ sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_p3y);
+ sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_p3y);
+ sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_p3y);
+ sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_p3y);
+
+ p->append(SkRasterPipeline::move_dst_src);
+ }
+
+ return append_misc();
+}
+
+bool SkImageShader::onAppendStages(const SkStageRec& rec) const {
+ return this->doStages(rec, nullptr);
+}
+
+SkStageUpdater* SkImageShader::onAppendUpdatableStages(const SkStageRec& rec) const {
+ auto updater = rec.fAlloc->make<SkImageStageUpdater>();
+ updater->fShader = this;
+ return this->doStages(rec, updater) ? updater : nullptr;
+}
+
diff --git a/gfx/skia/skia/src/shaders/SkImageShader.h b/gfx/skia/skia/src/shaders/SkImageShader.h
new file mode 100644
index 0000000000..5a01f0ae87
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkImageShader.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageShader_DEFINED
+#define SkImageShader_DEFINED
+
+#include "include/core/SkImage.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+// private subclass of SkStageUpdater
+class SkImageStageUpdater;
+
+class SkImageShader : public SkShaderBase {
+public:
+ static sk_sp<SkShader> Make(sk_sp<SkImage>,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul = false);
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkImageShader)
+
+ SkImageShader(sk_sp<SkImage>,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul);
+
+ void flatten(SkWriteBuffer&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc* storage) const override;
+#endif
+ SkImage* onIsAImage(SkMatrix*, SkTileMode*) const override;
+
+ bool onAppendStages(const SkStageRec&) const override;
+ SkStageUpdater* onAppendUpdatableStages(const SkStageRec&) const override;
+
+ bool doStages(const SkStageRec&, SkImageStageUpdater* = nullptr) const;
+
+ sk_sp<SkImage> fImage;
+ const SkTileMode fTileModeX;
+ const SkTileMode fTileModeY;
+ const bool fClampAsIfUnpremul;
+
+ friend class SkShaderBase;
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkLightingShader.cpp b/gfx/skia/skia/src/shaders/SkLightingShader.cpp
new file mode 100644
index 0000000000..4614431fc9
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLightingShader.cpp
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkMathPriv.h"
+#include "src/core/SkNormalSource.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkEmptyShader.h"
+#include "src/shaders/SkLightingShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+////////////////////////////////////////////////////////////////////////////
+
+/*
+ SkLightingShader TODOs:
+ support different light types
+ support multiple lights
+ fix non-opaque diffuse textures
+
+ To Test:
+ A8 diffuse textures
+ down & upsampled draws
+*/
+
+
+
+/** \class SkLightingShaderImpl
+ This subclass of shader applies lighting.
+*/
+class SkLightingShaderImpl : public SkShaderBase {
+public:
+ /** Create a new lighting shader that uses the provided normal map and
+ lights to light the diffuse bitmap.
+ @param diffuseShader the shader that provides the diffuse colors
+ @param normalSource the source of normals for lighting computation
+ @param lights the lights applied to the geometry
+ */
+ SkLightingShaderImpl(sk_sp<SkShader> diffuseShader,
+ sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights)
+ : fDiffuseShader(std::move(diffuseShader))
+ , fNormalSource(std::move(normalSource))
+ , fLights(std::move(lights)) {}
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+ class LightingShaderContext : public Context {
+ public:
+ // The context takes ownership of the context and provider. It will call their destructors
+ // and then indirectly free their memory by calling free() on heapAllocated
+ LightingShaderContext(const SkLightingShaderImpl&, const ContextRec&,
+ SkShaderBase::Context* diffuseContext, SkNormalSource::Provider*,
+ void* heapAllocated);
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ private:
+ SkShaderBase::Context* fDiffuseContext;
+ SkNormalSource::Provider* fNormalProvider;
+ SkColor fPaintColor;
+ uint32_t fFlags;
+
+ typedef Context INHERITED;
+ };
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLightingShaderImpl)
+
+ sk_sp<SkShader> fDiffuseShader;
+ sk_sp<SkNormalSource> fNormalSource;
+ sk_sp<SkLights> fLights;
+
+ friend class SkLightingShader;
+
+ typedef SkShaderBase INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+// This FP expects a premul'd color input for its diffuse color. Premul'ing of the paint's color is
+// handled by the asFragmentProcessor() factory, but shaders providing diffuse color must output it
+// premul'd.
+class LightingFP : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> normalFP,
+ sk_sp<SkLights> lights) {
+ return std::unique_ptr<GrFragmentProcessor>(new LightingFP(std::move(normalFP),
+ std::move(lights)));
+ }
+
+ const char* name() const override { return "LightingFP"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new LightingFP(*this));
+ }
+
+ const SkTArray<SkLights::Light>& directionalLights() const { return fDirectionalLights; }
+ const SkColor3f& ambientColor() const { return fAmbientColor; }
+
+private:
+ class GLSLLightingFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLLightingFP() {
+ fAmbientColor.fX = 0.0f;
+ }
+
+ void emitCode(EmitArgs& args) override {
+
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const LightingFP& lightingFP = args.fFp.cast<LightingFP>();
+
+ const char *lightDirsUniName = nullptr;
+ const char *lightColorsUniName = nullptr;
+ if (lightingFP.fDirectionalLights.count() != 0) {
+ fLightDirsUni = uniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kFloat3_GrSLType,
+ "LightDir",
+ lightingFP.fDirectionalLights.count(),
+ &lightDirsUniName);
+ fLightColorsUni = uniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kFloat3_GrSLType,
+ "LightColor",
+ lightingFP.fDirectionalLights.count(),
+ &lightColorsUniName);
+ }
+
+ const char* ambientColorUniName = nullptr;
+ fAmbientColorUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat3_GrSLType,
+ "AmbientColor", &ambientColorUniName);
+
+ fragBuilder->codeAppendf("half4 diffuseColor = %s;", args.fInputColor);
+
+ SkString dstNormalName("dstNormal");
+ this->invokeChild(0, &dstNormalName, args);
+
+ fragBuilder->codeAppendf("float3 normal = %s.xyz;", dstNormalName.c_str());
+
+ fragBuilder->codeAppend( "half3 result = half3(0.0);");
+
+ // diffuse light
+ if (lightingFP.fDirectionalLights.count() != 0) {
+ fragBuilder->codeAppendf("for (int i = 0; i < %d; i++) {",
+ lightingFP.fDirectionalLights.count());
+ // TODO: modulate the contribution from each light based on the shadow map
+ fragBuilder->codeAppendf(" half NdotL = saturate(half(dot(normal, %s[i])));",
+ lightDirsUniName);
+ fragBuilder->codeAppendf(" result += half3(%s[i])*diffuseColor.rgb*NdotL;",
+ lightColorsUniName);
+ fragBuilder->codeAppend("}");
+ }
+
+ // ambient light
+ fragBuilder->codeAppendf("result += half3(%s) * diffuseColor.rgb;",
+ ambientColorUniName);
+
+ // Clamping to alpha (equivalent to an unpremul'd clamp to 1.0)
+ fragBuilder->codeAppendf("%s = half4(clamp(result.rgb, 0.0, diffuseColor.a), "
+ "diffuseColor.a);", args.fOutputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrShaderCaps&, GrProcessorKeyBuilder* b) {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+ b->add32(lightingFP.fDirectionalLights.count());
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) override {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+
+ const SkTArray<SkLights::Light>& directionalLights = lightingFP.directionalLights();
+ if (directionalLights != fDirectionalLights) {
+ SkTArray<SkColor3f> lightDirs(directionalLights.count());
+ SkTArray<SkVector3> lightColors(directionalLights.count());
+ for (const SkLights::Light& light : directionalLights) {
+ lightDirs.push_back(light.dir());
+ lightColors.push_back(light.color());
+ }
+
+ pdman.set3fv(fLightDirsUni, directionalLights.count(), &(lightDirs[0].fX));
+ pdman.set3fv(fLightColorsUni, directionalLights.count(), &(lightColors[0].fX));
+
+ fDirectionalLights = directionalLights;
+ }
+
+ const SkColor3f& ambientColor = lightingFP.ambientColor();
+ if (ambientColor != fAmbientColor) {
+ pdman.set3fv(fAmbientColorUni, 1, &ambientColor.fX);
+ fAmbientColor = ambientColor;
+ }
+ }
+
+ private:
+ SkTArray<SkLights::Light> fDirectionalLights;
+ GrGLSLProgramDataManager::UniformHandle fLightDirsUni;
+ GrGLSLProgramDataManager::UniformHandle fLightColorsUni;
+
+ SkColor3f fAmbientColor;
+ GrGLSLProgramDataManager::UniformHandle fAmbientColorUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLLightingFP::GenKey(*this, caps, b);
+ }
+
+ LightingFP(std::unique_ptr<GrFragmentProcessor> normalFP, sk_sp<SkLights> lights)
+ : INHERITED(kLightingFP_ClassID, kPreservesOpaqueInput_OptimizationFlag) {
+ // fuse all ambient lights into a single one
+ fAmbientColor = lights->ambientLightColor();
+ for (int i = 0; i < lights->numLights(); ++i) {
+ if (SkLights::Light::kDirectional_LightType == lights->light(i).type()) {
+ fDirectionalLights.push_back(lights->light(i));
+ // TODO get the handle to the shadow map if there is one
+ } else {
+ SkDEBUGFAIL("Unimplemented Light Type passed to LightingFP");
+ }
+ }
+
+ this->registerChildProcessor(std::move(normalFP));
+ }
+
+ LightingFP(const LightingFP& that)
+ : INHERITED(kLightingFP_ClassID, kPreservesOpaqueInput_OptimizationFlag)
+ , fDirectionalLights(that.fDirectionalLights)
+ , fAmbientColor(that.fAmbientColor) {
+ this->registerChildProcessor(that.childProcessor(0).clone());
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLLightingFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+ return fDirectionalLights == lightingFP.fDirectionalLights &&
+ fAmbientColor == lightingFP.fAmbientColor;
+ }
+
+ SkTArray<SkLights::Light> fDirectionalLights;
+ SkColor3f fAmbientColor;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> SkLightingShaderImpl::asFragmentProcessor(const GrFPArgs& args) const {
+ std::unique_ptr<GrFragmentProcessor> normalFP(fNormalSource->asFragmentProcessor(args));
+ if (!normalFP) {
+ return nullptr;
+ }
+
+ if (fDiffuseShader) {
+ std::unique_ptr<GrFragmentProcessor> fpPipeline[] = {
+ as_SB(fDiffuseShader)->asFragmentProcessor(args),
+ LightingFP::Make(std::move(normalFP), fLights)
+ };
+ if (!fpPipeline[0] || !fpPipeline[1]) {
+ return nullptr;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> innerLightFP = GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+ // FP is wrapped because paint's alpha needs to be applied to output
+ return GrFragmentProcessor::MulChildByInputAlpha(std::move(innerLightFP));
+ } else {
+ // FP is wrapped because paint comes in unpremul'd to fragment shader, but LightingFP
+ // expects premul'd color.
+ return GrFragmentProcessor::PremulInput(LightingFP::Make(std::move(normalFP), fLights));
+ }
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkLightingShaderImpl::isOpaque() const {
+ return (fDiffuseShader ? fDiffuseShader->isOpaque() : false);
+}
+
+SkLightingShaderImpl::LightingShaderContext::LightingShaderContext(
+ const SkLightingShaderImpl& shader, const ContextRec& rec,
+ SkShaderBase::Context* diffuseContext, SkNormalSource::Provider* normalProvider,
+ void* heapAllocated)
+ : INHERITED(shader, rec)
+ , fDiffuseContext(diffuseContext)
+ , fNormalProvider(normalProvider) {
+ bool isOpaque = shader.isOpaque();
+
+ // update fFlags
+ uint32_t flags = 0;
+ if (isOpaque && (255 == this->getPaintAlpha())) {
+ flags |= kOpaqueAlpha_Flag;
+ }
+
+ fPaintColor = rec.fPaint->getColor();
+ fFlags = flags;
+}
+
+static inline SkPMColor convert(SkColor3f color, U8CPU a) {
+ if (color.fX <= 0.0f) {
+ color.fX = 0.0f;
+ } else if (color.fX >= 255.0f) {
+ color.fX = 255.0f;
+ }
+
+ if (color.fY <= 0.0f) {
+ color.fY = 0.0f;
+ } else if (color.fY >= 255.0f) {
+ color.fY = 255.0f;
+ }
+
+ if (color.fZ <= 0.0f) {
+ color.fZ = 0.0f;
+ } else if (color.fZ >= 255.0f) {
+ color.fZ = 255.0f;
+ }
+
+ return SkPreMultiplyARGB(a, (int) color.fX, (int) color.fY, (int) color.fZ);
+}
+
+// larger is better (fewer times we have to loop), but we shouldn't
+// take up too much stack-space (each one here costs 16 bytes)
+#define BUFFER_MAX 16
+void SkLightingShaderImpl::LightingShaderContext::shadeSpan(int x, int y,
+ SkPMColor result[], int count) {
+ const SkLightingShaderImpl& lightShader = static_cast<const SkLightingShaderImpl&>(fShader);
+
+ SkPMColor diffuse[BUFFER_MAX];
+ SkPoint3 normals[BUFFER_MAX];
+
+ SkColor diffColor = fPaintColor;
+
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ fNormalProvider->fillScanLine(x, y, normals, n);
+
+ if (fDiffuseContext) {
+ fDiffuseContext->shadeSpan(x, y, diffuse, n);
+ }
+
+ for (int i = 0; i < n; ++i) {
+ if (fDiffuseContext) {
+ diffColor = SkUnPreMultiply::PMColorToColor(diffuse[i]);
+ }
+
+ SkColor3f accum = SkColor3f::Make(0.0f, 0.0f, 0.0f);
+
+ // Adding ambient light
+ accum.fX += lightShader.fLights->ambientLightColor().fX * SkColorGetR(diffColor);
+ accum.fY += lightShader.fLights->ambientLightColor().fY * SkColorGetG(diffColor);
+ accum.fZ += lightShader.fLights->ambientLightColor().fZ * SkColorGetB(diffColor);
+
+ // This is all done in linear unpremul color space (each component 0..255.0f though)
+ for (int l = 0; l < lightShader.fLights->numLights(); ++l) {
+ const SkLights::Light& light = lightShader.fLights->light(l);
+
+ SkScalar illuminanceScalingFactor = 1.0f;
+
+ if (SkLights::Light::kDirectional_LightType == light.type()) {
+ illuminanceScalingFactor = normals[i].dot(light.dir());
+ if (illuminanceScalingFactor < 0.0f) {
+ illuminanceScalingFactor = 0.0f;
+ }
+ }
+
+ accum.fX += light.color().fX * SkColorGetR(diffColor) * illuminanceScalingFactor;
+ accum.fY += light.color().fY * SkColorGetG(diffColor) * illuminanceScalingFactor;
+ accum.fZ += light.color().fZ * SkColorGetB(diffColor) * illuminanceScalingFactor;
+ }
+
+ // convert() premultiplies the accumulate color with alpha
+ result[i] = convert(accum, SkColorGetA(diffColor));
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkLightingShaderImpl::CreateProc(SkReadBuffer& buf) {
+
+ // Discarding SkShader flattenable params
+ bool hasLocalMatrix = buf.readBool();
+ if (hasLocalMatrix) {
+ return nullptr;
+ }
+
+ sk_sp<SkLights> lights = SkLights::MakeFromBuffer(buf);
+
+ sk_sp<SkNormalSource> normalSource(buf.readFlattenable<SkNormalSource>());
+
+ bool hasDiffuse = buf.readBool();
+ sk_sp<SkShader> diffuseShader = nullptr;
+ if (hasDiffuse) {
+ diffuseShader = buf.readFlattenable<SkShaderBase>();
+ }
+
+ return sk_make_sp<SkLightingShaderImpl>(std::move(diffuseShader), std::move(normalSource),
+ std::move(lights));
+}
+
+void SkLightingShaderImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ fLights->flatten(buf);
+
+ buf.writeFlattenable(fNormalSource.get());
+ buf.writeBool(static_cast<bool>(fDiffuseShader));
+ if (fDiffuseShader) {
+ buf.writeFlattenable(fDiffuseShader.get());
+ }
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkLightingShaderImpl::onMakeContext(
+ const ContextRec& rec, SkArenaAlloc* alloc) const
+{
+ SkShaderBase::Context *diffuseContext = nullptr;
+ if (fDiffuseShader) {
+ diffuseContext = as_SB(fDiffuseShader)->makeContext(rec, alloc);
+ if (!diffuseContext) {
+ return nullptr;
+ }
+ }
+
+ SkNormalSource::Provider* normalProvider = fNormalSource->asProvider(rec, alloc);
+ if (!normalProvider) {
+ return nullptr;
+ }
+
+ // The diffuse shader can inspect the rec and make its decision about rec's colorspace.
+ // What about the lighting shader? Is lighting sensitive to the rec's (device) colorspace?
+ return alloc->make<LightingShaderContext>(*this, rec, diffuseContext, normalProvider, nullptr);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkLightingShader::Make(sk_sp<SkShader> diffuseShader,
+ sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights) {
+ SkASSERT(lights);
+ if (!normalSource) {
+ normalSource = SkNormalSource::MakeFlat();
+ }
+
+ return sk_make_sp<SkLightingShaderImpl>(std::move(diffuseShader), std::move(normalSource),
+ std::move(lights));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLightingShader::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkLightingShaderImpl); }
diff --git a/gfx/skia/skia/src/shaders/SkLightingShader.h b/gfx/skia/skia/src/shaders/SkLightingShader.h
new file mode 100644
index 0000000000..cdf27cb40e
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLightingShader.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLightingShader_DEFINED
+#define SkLightingShader_DEFINED
+
+#include "include/core/SkShader.h"
+#include "src/shaders/SkLights.h"
+
+class SkBitmap;
+class SkMatrix;
+class SkNormalSource;
+
+class SkLightingShader {
+public:
+ /** Returns a shader that lights the shape, colored by the diffuseShader, using the
+ normals from normalSource, with the set of lights provided.
+
+ @param diffuseShader the shader that provides the colors. If nullptr, uses the paint's
+ color.
+ @param normalSource the source for the shape's normals. If nullptr, assumes straight
+ up normals (<0,0,1>).
+ @param lights the lights applied to the normals
+
+ The lighting equation is currently:
+ result = (LightColor * dot(Normal, LightDir) + AmbientColor) * DiffuseColor
+
+ */
+ static sk_sp<SkShader> Make(sk_sp<SkShader> diffuseShader, sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkLights.cpp b/gfx/skia/skia/src/shaders/SkLights.cpp
new file mode 100644
index 0000000000..e80cf2788b
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLights.cpp
@@ -0,0 +1,68 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkReadBuffer.h"
+#include "src/shaders/SkLights.h"
+
+sk_sp<SkLights> SkLights::MakeFromBuffer(SkReadBuffer& buf) {
+ Builder builder;
+
+ SkColor3f ambColor;
+ if (!buf.readScalarArray(&ambColor.fX, 3)) {
+ return nullptr;
+ }
+
+ builder.setAmbientLightColor(ambColor);
+
+ int numLights = buf.readInt();
+
+ for (int l = 0; l < numLights; ++l) {
+ bool isPoint = buf.readBool();
+
+ SkColor3f color;
+ if (!buf.readScalarArray(&color.fX, 3)) {
+ return nullptr;
+ }
+
+ SkVector3 dirOrPos;
+ if (!buf.readScalarArray(&dirOrPos.fX, 3)) {
+ return nullptr;
+ }
+
+ if (isPoint) {
+ SkScalar intensity;
+ intensity = buf.readScalar();
+ Light light = Light::MakePoint(color, dirOrPos, intensity);
+ builder.add(light);
+ } else {
+ Light light = Light::MakeDirectional(color, dirOrPos);
+ builder.add(light);
+ }
+ }
+
+ return builder.finish();
+}
+
+void SkLights::flatten(SkWriteBuffer& buf) const {
+ buf.writeScalarArray(&this->ambientLightColor().fX, 3);
+
+ buf.writeInt(this->numLights());
+ for (int l = 0; l < this->numLights(); ++l) {
+ const Light& light = this->light(l);
+
+ bool isPoint = Light::kPoint_LightType == light.type();
+
+ buf.writeBool(isPoint);
+ buf.writeScalarArray(&light.color().fX, 3);
+ buf.writeScalarArray(&light.dir().fX, 3);
+
+ if (isPoint) {
+ buf.writeScalar(light.intensity());
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/shaders/SkLights.h b/gfx/skia/skia/src/shaders/SkLights.h
new file mode 100644
index 0000000000..f22d6bbadb
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLights.h
@@ -0,0 +1,192 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLights_DEFINED
+#define SkLights_DEFINED
+
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkTArray.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+/** \class SkLights
+ SkLights encapsulates a set of directional, point and ambient lights for use with the
+ SkLightingShader.
+*/
+class SK_API SkLights : public SkRefCnt {
+public:
+ class Light {
+ public:
+ enum LightType {
+ kDirectional_LightType,
+ kPoint_LightType
+ };
+
+ Light(const Light& other)
+ : fType(other.fType)
+ , fColor(other.fColor)
+ , fDirOrPos(other.fDirOrPos)
+ , fIntensity(other.fIntensity) {}
+
+ Light(Light&& other)
+ : fType(other.fType)
+ , fColor(other.fColor)
+ , fDirOrPos(other.fDirOrPos)
+ , fIntensity(other.fIntensity) {}
+
+ static Light MakeDirectional(const SkColor3f& color, const SkVector3& dir) {
+ Light light(kDirectional_LightType, color, dir, 0.0f);
+ if (!light.fDirOrPos.normalize()) {
+ light.fDirOrPos.set(0.0f, 0.0f, 1.0f);
+ }
+ return light;
+ }
+
+ static Light MakePoint(const SkColor3f& color, const SkPoint3& pos, SkScalar intensity) {
+ return Light(kPoint_LightType, color, pos, intensity);
+ }
+
+ LightType type() const { return fType; }
+ const SkColor3f& color() const { return fColor; }
+ const SkVector3& dir() const {
+ SkASSERT(kDirectional_LightType == fType);
+ return fDirOrPos;
+ }
+ const SkPoint3& pos() const {
+ SkASSERT(kPoint_LightType == fType);
+ return fDirOrPos;
+ }
+ SkScalar intensity() const {
+ SkASSERT(kPoint_LightType == fType);
+ return fIntensity;
+ }
+
+ Light& operator=(const Light& other) {
+ if (this == &other) {
+ return *this;
+ }
+
+ fType = other.fType;
+ fColor = other.fColor;
+ fDirOrPos = other.fDirOrPos;
+ fIntensity = other.fIntensity;
+ return *this;
+ }
+
+ bool operator==(const Light& other) {
+ return (fType == other.fType) &&
+ (fColor == other.fColor) &&
+ (fDirOrPos == other.fDirOrPos) &&
+ (fIntensity == other.fIntensity);
+ }
+
+ bool operator!=(const Light& other) { return !(this->operator==(other)); }
+
+ private:
+ friend class SkLights;
+
+ Light(LightType type, const SkColor3f& color, const SkVector3& dirOrPos,
+ SkScalar intensity)
+ : fType(type)
+ , fColor(color)
+ , fDirOrPos(dirOrPos)
+ , fIntensity(intensity) {}
+
+ LightType fType;
+ SkColor3f fColor; // linear (unpremul) color. Range is 0..1 in each channel.
+
+ SkVector3 fDirOrPos; // For directional lights, holds the direction towards the
+ // light (+Z is out of the screen).
+ // If degenerate, it will be replaced with (0, 0, 1).
+ // For point lights, holds location of point light
+
+ SkScalar fIntensity; // For point lights, dictates the light intensity.
+ // Simply a multiplier to the final light output value.
+ };
+
+ class Builder {
+ public:
+ Builder() : fLights(new SkLights) {}
+
+ void add(const Light& light) {
+ if (fLights) {
+ fLights->fLights.push_back(light);
+ }
+ }
+
+ void add(Light&& light) {
+ if (fLights) {
+ fLights->fLights.push_back(std::move(light));
+ }
+ }
+
+ void setAmbientLightColor(const SkColor3f& color) {
+ if (fLights) {
+ fLights->fAmbientLightColor = color;
+ }
+ }
+
+ sk_sp<SkLights> finish() {
+ return std::move(fLights);
+ }
+
+ private:
+ sk_sp<SkLights> fLights;
+ };
+
+ /** Returns number of lights not including the ambient light.
+
+ @return number of lights not including the ambient light
+ */
+ int numLights() const { return fLights.count(); }
+
+ /** Returns the index-th light.
+
+ @param index the index of the desired light
+ @return the index-th light
+ */
+ const Light& light(int index) const { return fLights[index]; }
+
+ /** Returns the ambient light.
+
+ @return the ambient light
+ */
+ const SkColor3f& ambientLightColor() const {
+ return fAmbientLightColor;
+ }
+
+ /**
+ * Recreate an SkLights object that was serialized into a buffer.
+ *
+ * @param SkReadBuffer Serialized blob data.
+ * @return A new SkLights representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkLights> MakeFromBuffer(SkReadBuffer& buf);
+
+ /**
+ * Serialize to a buffer.
+ *
+ * @param buffer the write buffer to write out to
+ */
+ void flatten(SkWriteBuffer& buf) const;
+
+private:
+ friend class SkLightingShaderImpl;
+
+ SkLights() : fAmbientLightColor(SkColor3f::Make(0.0f, 0.0f, 0.0f)) {}
+
+ SkTArray<Light> fLights;
+ SkColor3f fAmbientLightColor;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp
new file mode 100644
index 0000000000..be9a6fac5e
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkTLazy.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#endif
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkLocalMatrixShader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ return as_SB(fProxyShader)->asFragmentProcessor(
+ GrFPArgs::WithPreLocalMatrix(args, this->getLocalMatrix()));
+}
+#endif
+
+sk_sp<SkFlattenable> SkLocalMatrixShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ auto baseShader(buffer.readShader());
+ if (!baseShader) {
+ return nullptr;
+ }
+ return baseShader->makeWithLocalMatrix(lm);
+}
+
+void SkLocalMatrixShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.writeFlattenable(fProxyShader.get());
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkLocalMatrixShader::onMakeContext(
+ const ContextRec& rec, SkArenaAlloc* alloc) const
+{
+ SkTCopyOnFirstWrite<SkMatrix> lm(this->getLocalMatrix());
+ if (rec.fLocalMatrix) {
+ lm.writable()->preConcat(*rec.fLocalMatrix);
+ }
+
+ ContextRec newRec(rec);
+ newRec.fLocalMatrix = lm;
+
+ return as_SB(fProxyShader)->makeContext(newRec, alloc);
+}
+#endif
+
+SkImage* SkLocalMatrixShader::onIsAImage(SkMatrix* outMatrix, SkTileMode* mode) const {
+ SkMatrix imageMatrix;
+ SkImage* image = fProxyShader->isAImage(&imageMatrix, mode);
+ if (image && outMatrix) {
+ // Local matrix must be applied first so it is on the right side of the concat.
+ *outMatrix = SkMatrix::Concat(imageMatrix, this->getLocalMatrix());
+ }
+
+ return image;
+}
+
+SkPicture* SkLocalMatrixShader::isAPicture(SkMatrix* matrix,
+ SkTileMode tileModes[2],
+ SkRect* tile) const {
+ SkMatrix proxyMatrix;
+ SkPicture* picture = as_SB(fProxyShader)->isAPicture(&proxyMatrix, tileModes, tile);
+ if (picture && matrix) {
+ *matrix = SkMatrix::Concat(proxyMatrix, this->getLocalMatrix());
+ }
+ return picture;
+}
+
+bool SkLocalMatrixShader::onAppendStages(const SkStageRec& rec) const {
+ SkTCopyOnFirstWrite<SkMatrix> lm(this->getLocalMatrix());
+ if (rec.fLocalM) {
+ lm.writable()->preConcat(*rec.fLocalM);
+ }
+
+ SkStageRec newRec = rec;
+ newRec.fLocalM = lm;
+ return as_SB(fProxyShader)->appendStages(newRec);
+}
+
+sk_sp<SkShader> SkShader::makeWithLocalMatrix(const SkMatrix& localMatrix) const {
+ if (localMatrix.isIdentity()) {
+ return sk_ref_sp(const_cast<SkShader*>(this));
+ }
+
+ const SkMatrix* lm = &localMatrix;
+
+ sk_sp<SkShader> baseShader;
+ SkMatrix otherLocalMatrix;
+ sk_sp<SkShader> proxy(as_SB(this)->makeAsALocalMatrixShader(&otherLocalMatrix));
+ if (proxy) {
+ otherLocalMatrix.preConcat(localMatrix);
+ lm = &otherLocalMatrix;
+ baseShader = proxy;
+ } else {
+ baseShader = sk_ref_sp(const_cast<SkShader*>(this));
+ }
+
+ return sk_make_sp<SkLocalMatrixShader>(std::move(baseShader), *lm);
+}
diff --git a/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h
new file mode 100644
index 0000000000..2895dca659
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixShader_DEFINED
+#define SkLocalMatrixShader_DEFINED
+
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+class GrFragmentProcessor;
+class SkArenaAlloc;
+
+class SkLocalMatrixShader final : public SkShaderBase {
+public:
+ SkLocalMatrixShader(sk_sp<SkShader> proxy, const SkMatrix& localMatrix)
+ : INHERITED(&localMatrix)
+ , fProxyShader(std::move(proxy))
+ {}
+
+ GradientType asAGradient(GradientInfo* info) const override {
+ return fProxyShader->asAGradient(info);
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+ sk_sp<SkShader> makeAsALocalMatrixShader(SkMatrix* localMatrix) const override {
+ if (localMatrix) {
+ *localMatrix = this->getLocalMatrix();
+ }
+ return fProxyShader;
+ }
+
+ SkPicture* isAPicture(SkMatrix*, SkTileMode[2], SkRect* tile) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+ SkImage* onIsAImage(SkMatrix* matrix, SkTileMode* mode) const override;
+
+ bool onAppendStages(const SkStageRec&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLocalMatrixShader)
+
+ sk_sp<SkShader> fProxyShader;
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp b/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp
new file mode 100644
index 0000000000..812dc1694f
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp
@@ -0,0 +1,1546 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkPerlinNoiseShader.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCoordTransform.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#include "src/gpu/effects/generated/GrConstColorProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+#endif
+
+static const int kBlockSize = 256;
+static const int kBlockMask = kBlockSize - 1;
+static const int kPerlinNoise = 4096;
+static const int kRandMaximum = SK_MaxS32; // 2**31 - 1
+
+static uint8_t improved_noise_permutations[] = {
+ 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103,
+ 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26,
+ 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
+ 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231,
+ 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143,
+ 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
+ 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124,
+ 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17,
+ 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
+ 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185,
+ 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81,
+ 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176,
+ 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243,
+ 141, 128, 195, 78, 66, 215, 61, 156, 180,
+ 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103,
+ 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26,
+ 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
+ 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231,
+ 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143,
+ 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
+ 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124,
+ 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17,
+ 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
+ 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185,
+ 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81,
+ 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176,
+ 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243,
+ 141, 128, 195, 78, 66, 215, 61, 156, 180
+};
+
+class SkPerlinNoiseShaderImpl : public SkShaderBase {
+public:
+ struct StitchData {
+ StitchData()
+ : fWidth(0)
+ , fWrapX(0)
+ , fHeight(0)
+ , fWrapY(0)
+ {}
+
+ StitchData(SkScalar w, SkScalar h)
+ : fWidth(SkTMin(SkScalarRoundToInt(w), SK_MaxS32 - kPerlinNoise))
+ , fWrapX(kPerlinNoise + fWidth)
+ , fHeight(SkTMin(SkScalarRoundToInt(h), SK_MaxS32 - kPerlinNoise))
+ , fWrapY(kPerlinNoise + fHeight) {}
+
+ bool operator==(const StitchData& other) const {
+ return fWidth == other.fWidth &&
+ fWrapX == other.fWrapX &&
+ fHeight == other.fHeight &&
+ fWrapY == other.fWrapY;
+ }
+
+ int fWidth; // How much to subtract to wrap for stitching.
+ int fWrapX; // Minimum value to wrap.
+ int fHeight;
+ int fWrapY;
+ };
+
+ struct PaintingData {
+ PaintingData(const SkISize& tileSize, SkScalar seed,
+ SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ const SkMatrix& matrix)
+ {
+ SkVector tileVec;
+ matrix.mapVector(SkIntToScalar(tileSize.fWidth), SkIntToScalar(tileSize.fHeight),
+ &tileVec);
+
+ SkSize scale;
+ if (!matrix.decomposeScale(&scale, nullptr)) {
+ scale.set(SK_ScalarNearlyZero, SK_ScalarNearlyZero);
+ }
+ fBaseFrequency.set(baseFrequencyX * SkScalarInvert(scale.width()),
+ baseFrequencyY * SkScalarInvert(scale.height()));
+ fTileSize.set(SkScalarRoundToInt(tileVec.fX), SkScalarRoundToInt(tileVec.fY));
+ this->init(seed);
+ if (!fTileSize.isEmpty()) {
+ this->stitch();
+ }
+
+ #if SK_SUPPORT_GPU
+ SkImageInfo info = SkImageInfo::MakeA8(kBlockSize, 1);
+ SkPixmap permutationsPixmap(info, fLatticeSelector, info.minRowBytes());
+ fPermutationsImage = SkImage::MakeFromRaster(permutationsPixmap, nullptr, nullptr);
+
+ info = SkImageInfo::MakeN32Premul(kBlockSize, 4);
+ SkPixmap noisePixmap(info, fNoise[0][0], info.minRowBytes());
+ fNoiseImage = SkImage::MakeFromRaster(noisePixmap, nullptr, nullptr);
+
+ info = SkImageInfo::MakeA8(256, 1);
+ SkPixmap impPermutationsPixmap(info, improved_noise_permutations, info.minRowBytes());
+ fImprovedPermutationsImage = SkImage::MakeFromRaster(impPermutationsPixmap, nullptr,
+ nullptr);
+
+ static uint8_t gradients[] = { 2, 2, 1, 0,
+ 0, 2, 1, 0,
+ 2, 0, 1, 0,
+ 0, 0, 1, 0,
+ 2, 1, 2, 0,
+ 0, 1, 2, 0,
+ 2, 1, 0, 0,
+ 0, 1, 0, 0,
+ 1, 2, 2, 0,
+ 1, 0, 2, 0,
+ 1, 2, 0, 0,
+ 1, 0, 0, 0,
+ 2, 2, 1, 0,
+ 1, 0, 2, 0,
+ 0, 2, 1, 0,
+ 1, 0, 0, 0 };
+ info = SkImageInfo::MakeN32Premul(16, 1);
+ SkPixmap gradPixmap(info, gradients, info.minRowBytes());
+ fGradientImage = SkImage::MakeFromRaster(gradPixmap, nullptr, nullptr);
+ #endif
+ }
+
+ #if SK_SUPPORT_GPU
+ PaintingData(const PaintingData& that)
+ : fSeed(that.fSeed)
+ , fTileSize(that.fTileSize)
+ , fBaseFrequency(that.fBaseFrequency)
+ , fStitchDataInit(that.fStitchDataInit)
+ , fPermutationsImage(that.fPermutationsImage)
+ , fNoiseImage(that.fNoiseImage)
+ , fImprovedPermutationsImage(that.fImprovedPermutationsImage)
+ , fGradientImage(that.fGradientImage) {
+ memcpy(fLatticeSelector, that.fLatticeSelector, sizeof(fLatticeSelector));
+ memcpy(fNoise, that.fNoise, sizeof(fNoise));
+ memcpy(fGradient, that.fGradient, sizeof(fGradient));
+ }
+ #endif
+
+ int fSeed;
+ uint8_t fLatticeSelector[kBlockSize];
+ uint16_t fNoise[4][kBlockSize][2];
+ SkPoint fGradient[4][kBlockSize];
+ SkISize fTileSize;
+ SkVector fBaseFrequency;
+ StitchData fStitchDataInit;
+
+ private:
+
+ #if SK_SUPPORT_GPU
+ sk_sp<SkImage> fPermutationsImage;
+ sk_sp<SkImage> fNoiseImage;
+ sk_sp<SkImage> fImprovedPermutationsImage;
+ sk_sp<SkImage> fGradientImage;
+ #endif
+
+ inline int random() {
+ static const int gRandAmplitude = 16807; // 7**5; primitive root of m
+ static const int gRandQ = 127773; // m / a
+ static const int gRandR = 2836; // m % a
+
+ int result = gRandAmplitude * (fSeed % gRandQ) - gRandR * (fSeed / gRandQ);
+ if (result <= 0)
+ result += kRandMaximum;
+ fSeed = result;
+ return result;
+ }
+
+ // Only called once. Could be part of the constructor.
+ void init(SkScalar seed)
+ {
+ static const SkScalar gInvBlockSizef = SkScalarInvert(SkIntToScalar(kBlockSize));
+
+ // According to the SVG spec, we must truncate (not round) the seed value.
+ fSeed = SkScalarTruncToInt(seed);
+ // The seed value clamp to the range [1, kRandMaximum - 1].
+ if (fSeed <= 0) {
+ fSeed = -(fSeed % (kRandMaximum - 1)) + 1;
+ }
+ if (fSeed > kRandMaximum - 1) {
+ fSeed = kRandMaximum - 1;
+ }
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fLatticeSelector[i] = i;
+ fNoise[channel][i][0] = (random() % (2 * kBlockSize));
+ fNoise[channel][i][1] = (random() % (2 * kBlockSize));
+ }
+ }
+ for (int i = kBlockSize - 1; i > 0; --i) {
+ int k = fLatticeSelector[i];
+ int j = random() % kBlockSize;
+ SkASSERT(j >= 0);
+ SkASSERT(j < kBlockSize);
+ fLatticeSelector[i] = fLatticeSelector[j];
+ fLatticeSelector[j] = k;
+ }
+
+ // Perform the permutations now
+ {
+ // Copy noise data
+ uint16_t noise[4][kBlockSize][2];
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ noise[channel][i][j] = fNoise[channel][i][j];
+ }
+ }
+ }
+ // Do permutations on noise data
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ fNoise[channel][i][j] = noise[channel][fLatticeSelector[i]][j];
+ }
+ }
+ }
+ }
+
+ // Half of the largest possible value for 16 bit unsigned int
+ static const SkScalar gHalfMax16bits = 32767.5f;
+
+ // Compute gradients from permutated noise data
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fGradient[channel][i] = SkPoint::Make(
+ (fNoise[channel][i][0] - kBlockSize) * gInvBlockSizef,
+ (fNoise[channel][i][1] - kBlockSize) * gInvBlockSizef);
+ fGradient[channel][i].normalize();
+ // Put the normalized gradient back into the noise data
+ fNoise[channel][i][0] = SkScalarRoundToInt(
+ (fGradient[channel][i].fX + 1) * gHalfMax16bits);
+ fNoise[channel][i][1] = SkScalarRoundToInt(
+ (fGradient[channel][i].fY + 1) * gHalfMax16bits);
+ }
+ }
+ }
+
+ // Only called once. Could be part of the constructor.
+ void stitch() {
+ SkScalar tileWidth = SkIntToScalar(fTileSize.width());
+ SkScalar tileHeight = SkIntToScalar(fTileSize.height());
+ SkASSERT(tileWidth > 0 && tileHeight > 0);
+ // When stitching tiled turbulence, the frequencies must be adjusted
+ // so that the tile borders will be continuous.
+ if (fBaseFrequency.fX) {
+ SkScalar lowFrequencx =
+ SkScalarFloorToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ SkScalar highFrequencx =
+ SkScalarCeilToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ // BaseFrequency should be non-negative according to the standard.
+ // lowFrequencx can be 0 if fBaseFrequency.fX is very small.
+ if (sk_ieee_float_divide(fBaseFrequency.fX, lowFrequencx) < highFrequencx / fBaseFrequency.fX) {
+ fBaseFrequency.fX = lowFrequencx;
+ } else {
+ fBaseFrequency.fX = highFrequencx;
+ }
+ }
+ if (fBaseFrequency.fY) {
+ SkScalar lowFrequency =
+ SkScalarFloorToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ SkScalar highFrequency =
+ SkScalarCeilToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ // lowFrequency can be 0 if fBaseFrequency.fY is very small.
+ if (sk_ieee_float_divide(fBaseFrequency.fY, lowFrequency) < highFrequency / fBaseFrequency.fY) {
+ fBaseFrequency.fY = lowFrequency;
+ } else {
+ fBaseFrequency.fY = highFrequency;
+ }
+ }
+ // Set up TurbulenceInitial stitch values.
+ fStitchDataInit = StitchData(tileWidth * fBaseFrequency.fX,
+ tileHeight * fBaseFrequency.fY);
+ }
+
+ public:
+
+#if SK_SUPPORT_GPU
+ const sk_sp<SkImage> getPermutationsImage() const { return fPermutationsImage; }
+
+ const sk_sp<SkImage> getNoiseImage() const { return fNoiseImage; }
+
+ const sk_sp<SkImage> getImprovedPermutationsImage() const {
+ return fImprovedPermutationsImage;
+ }
+
+ const sk_sp<SkImage> getGradientImage() const { return fGradientImage; }
+#endif
+ };
+
+ /**
+ * About the noise types : the difference between the first 2 is just minor tweaks to the
+ * algorithm, they're not 2 entirely different noises. The output looks different, but once the
+ * noise is generated in the [1, -1] range, the output is brought back in the [0, 1] range by
+ * doing :
+ * kFractalNoise_Type : noise * 0.5 + 0.5
+ * kTurbulence_Type : abs(noise)
+ * Very little differences between the 2 types, although you can tell the difference visually.
+ * "Improved" is based on the Improved Perlin Noise algorithm described at
+ * http://mrl.nyu.edu/~perlin/noise/. It is quite distinct from the other two, and the noise is
+ * a 2D slice of a 3D noise texture. Minor changes to the Z coordinate will result in minor
+ * changes to the noise, making it suitable for animated noise.
+ */
+ enum Type {
+ kFractalNoise_Type,
+ kTurbulence_Type,
+ kImprovedNoise_Type,
+ kLast_Type = kImprovedNoise_Type
+ };
+
+ static const int kMaxOctaves = 255; // numOctaves must be <= 0 and <= kMaxOctaves
+
+ SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::Type type, SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY, int numOctaves, SkScalar seed,
+ const SkISize* tileSize);
+
+ class PerlinNoiseShaderContext : public Context {
+ public:
+ PerlinNoiseShaderContext(const SkPerlinNoiseShaderImpl& shader, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ private:
+ SkPMColor shade(const SkPoint& point, StitchData& stitchData) const;
+ SkScalar calculateTurbulenceValueForPoint(
+ int channel,
+ StitchData& stitchData, const SkPoint& point) const;
+ SkScalar calculateImprovedNoiseValueForPoint(int channel, const SkPoint& point) const;
+ SkScalar noise2D(int channel,
+ const StitchData& stitchData, const SkPoint& noiseVector) const;
+
+ SkMatrix fMatrix;
+ PaintingData fPaintingData;
+
+ typedef Context INHERITED;
+ };
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPerlinNoiseShaderImpl)
+
+ const SkPerlinNoiseShaderImpl::Type fType;
+ const SkScalar fBaseFrequencyX;
+ const SkScalar fBaseFrequencyY;
+ const int fNumOctaves;
+ const SkScalar fSeed;
+ const SkISize fTileSize;
+ const bool fStitchTiles;
+
+ friend class ::SkPerlinNoiseShader;
+
+ typedef SkShaderBase INHERITED;
+};
+
+namespace {
+
+// noiseValue is the color component's value (or color)
+// limitValue is the maximum perlin noise array index value allowed
+// newValue is the current noise dimension (either width or height)
+inline int checkNoise(int noiseValue, int limitValue, int newValue) {
+ // If the noise value would bring us out of bounds of the current noise array while we are
+ // stiching noise tiles together, wrap the noise around the current dimension of the noise to
+ // stay within the array bounds in a continuous fashion (so that tiling lines are not visible)
+ if (noiseValue >= limitValue) {
+ noiseValue -= newValue;
+ }
+ return noiseValue;
+}
+
+inline SkScalar smoothCurve(SkScalar t) {
+ return t * t * (3 - 2 * t);
+}
+
+} // end namespace
+
+SkPerlinNoiseShaderImpl::SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::Type type,
+ SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves,
+ SkScalar seed,
+ const SkISize* tileSize)
+ : fType(type)
+ , fBaseFrequencyX(baseFrequencyX)
+ , fBaseFrequencyY(baseFrequencyY)
+ , fNumOctaves(numOctaves > kMaxOctaves ? kMaxOctaves : numOctaves/*[0,255] octaves allowed*/)
+ , fSeed(seed)
+ , fTileSize(nullptr == tileSize ? SkISize::Make(0, 0) : *tileSize)
+ , fStitchTiles(!fTileSize.isEmpty())
+{
+ SkASSERT(numOctaves >= 0 && numOctaves <= kMaxOctaves);
+ SkASSERT(fBaseFrequencyX >= 0);
+ SkASSERT(fBaseFrequencyY >= 0);
+}
+
+sk_sp<SkFlattenable> SkPerlinNoiseShaderImpl::CreateProc(SkReadBuffer& buffer) {
+ Type type = buffer.read32LE(kLast_Type);
+
+ SkScalar freqX = buffer.readScalar();
+ SkScalar freqY = buffer.readScalar();
+ int octaves = buffer.read32LE<int>(kMaxOctaves);
+
+ SkScalar seed = buffer.readScalar();
+ SkISize tileSize;
+ tileSize.fWidth = buffer.readInt();
+ tileSize.fHeight = buffer.readInt();
+
+ switch (type) {
+ case kFractalNoise_Type:
+ return SkPerlinNoiseShader::MakeFractalNoise(freqX, freqY, octaves, seed, &tileSize);
+ case kTurbulence_Type:
+ return SkPerlinNoiseShader::MakeTurbulence(freqX, freqY, octaves, seed, &tileSize);
+ case kImprovedNoise_Type:
+ return SkPerlinNoiseShader::MakeImprovedNoise(freqX, freqY, octaves, seed);
+ default:
+ // Really shouldn't get here b.c. of earlier check on type
+ buffer.validate(false);
+ return nullptr;
+ }
+}
+
+void SkPerlinNoiseShaderImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt((int) fType);
+ buffer.writeScalar(fBaseFrequencyX);
+ buffer.writeScalar(fBaseFrequencyY);
+ buffer.writeInt(fNumOctaves);
+ buffer.writeScalar(fSeed);
+ buffer.writeInt(fTileSize.fWidth);
+ buffer.writeInt(fTileSize.fHeight);
+}
+
+SkScalar SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::noise2D(
+ int channel, const StitchData& stitchData, const SkPoint& noiseVector) const {
+ struct Noise {
+ int noisePositionIntegerValue;
+ int nextNoisePositionIntegerValue;
+ SkScalar noisePositionFractionValue;
+ Noise(SkScalar component)
+ {
+ SkScalar position = component + kPerlinNoise;
+ noisePositionIntegerValue = SkScalarFloorToInt(position);
+ noisePositionFractionValue = position - SkIntToScalar(noisePositionIntegerValue);
+ nextNoisePositionIntegerValue = noisePositionIntegerValue + 1;
+ }
+ };
+ Noise noiseX(noiseVector.x());
+ Noise noiseY(noiseVector.y());
+ SkScalar u, v;
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ // If stitching, adjust lattice points accordingly.
+ if (perlinNoiseShader.fStitchTiles) {
+ noiseX.noisePositionIntegerValue =
+ checkNoise(noiseX.noisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.noisePositionIntegerValue =
+ checkNoise(noiseY.noisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ noiseX.nextNoisePositionIntegerValue =
+ checkNoise(noiseX.nextNoisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.nextNoisePositionIntegerValue =
+ checkNoise(noiseY.nextNoisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ }
+ noiseX.noisePositionIntegerValue &= kBlockMask;
+ noiseY.noisePositionIntegerValue &= kBlockMask;
+ noiseX.nextNoisePositionIntegerValue &= kBlockMask;
+ noiseY.nextNoisePositionIntegerValue &= kBlockMask;
+ int i = fPaintingData.fLatticeSelector[noiseX.noisePositionIntegerValue];
+ int j = fPaintingData.fLatticeSelector[noiseX.nextNoisePositionIntegerValue];
+ int b00 = (i + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b10 = (j + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b01 = (i + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ int b11 = (j + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ SkScalar sx = smoothCurve(noiseX.noisePositionFractionValue);
+ SkScalar sy = smoothCurve(noiseY.noisePositionFractionValue);
+
+ if (sx < 0 || sy < 0 || sx > 1 || sy > 1) {
+ return 0; // Check for pathological inputs.
+ }
+
+ // This is taken 1:1 from SVG spec: http://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement
+ SkPoint fractionValue = SkPoint::Make(noiseX.noisePositionFractionValue,
+ noiseY.noisePositionFractionValue); // Offset (0,0)
+ u = fPaintingData.fGradient[channel][b00].dot(fractionValue);
+ fractionValue.fX -= SK_Scalar1; // Offset (-1,0)
+ v = fPaintingData.fGradient[channel][b10].dot(fractionValue);
+ SkScalar a = SkScalarInterp(u, v, sx);
+ fractionValue.fY -= SK_Scalar1; // Offset (-1,-1)
+ v = fPaintingData.fGradient[channel][b11].dot(fractionValue);
+ fractionValue.fX = noiseX.noisePositionFractionValue; // Offset (0,-1)
+ u = fPaintingData.fGradient[channel][b01].dot(fractionValue);
+ SkScalar b = SkScalarInterp(u, v, sx);
+ return SkScalarInterp(a, b, sy);
+}
+
+SkScalar SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::calculateTurbulenceValueForPoint(
+ int channel, StitchData& stitchData, const SkPoint& point) const {
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ if (perlinNoiseShader.fStitchTiles) {
+ // Set up TurbulenceInitial stitch values.
+ stitchData = fPaintingData.fStitchDataInit;
+ }
+ SkScalar turbulenceFunctionResult = 0;
+ SkPoint noiseVector(SkPoint::Make(point.x() * fPaintingData.fBaseFrequency.fX,
+ point.y() * fPaintingData.fBaseFrequency.fY));
+ SkScalar ratio = SK_Scalar1;
+ for (int octave = 0; octave < perlinNoiseShader.fNumOctaves; ++octave) {
+ SkScalar noise = noise2D(channel, stitchData, noiseVector);
+ SkScalar numer = (perlinNoiseShader.fType == kFractalNoise_Type) ?
+ noise : SkScalarAbs(noise);
+ turbulenceFunctionResult += numer / ratio;
+ noiseVector.fX *= 2;
+ noiseVector.fY *= 2;
+ ratio *= 2;
+ if (perlinNoiseShader.fStitchTiles) {
+ // Update stitch values
+ stitchData = StitchData(SkIntToScalar(stitchData.fWidth) * 2,
+ SkIntToScalar(stitchData.fHeight) * 2);
+ }
+ }
+
+ // The value of turbulenceFunctionResult comes from ((turbulenceFunctionResult) + 1) / 2
+ // by fractalNoise and (turbulenceFunctionResult) by turbulence.
+ if (perlinNoiseShader.fType == kFractalNoise_Type) {
+ turbulenceFunctionResult = SkScalarHalf(turbulenceFunctionResult + 1);
+ }
+
+ if (channel == 3) { // Scale alpha by paint value
+ turbulenceFunctionResult *= SkIntToScalar(getPaintAlpha()) / 255;
+ }
+
+ // Clamp result
+ return SkScalarPin(turbulenceFunctionResult, 0, SK_Scalar1);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Improved Perlin Noise based on Java implementation found at http://mrl.nyu.edu/~perlin/noise/
+static SkScalar fade(SkScalar t) {
+ return t * t * t * (t * (t * 6 - 15) + 10);
+}
+
+static SkScalar lerp(SkScalar t, SkScalar a, SkScalar b) {
+ return a + t * (b - a);
+}
+
+static SkScalar grad(int hash, SkScalar x, SkScalar y, SkScalar z) {
+ int h = hash & 15;
+ SkScalar u = h < 8 ? x : y;
+ SkScalar v = h < 4 ? y : h == 12 || h == 14 ? x : z;
+ return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
+}
+
+SkScalar SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::calculateImprovedNoiseValueForPoint(
+ int channel, const SkPoint& point) const {
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ SkScalar x = point.fX * perlinNoiseShader.fBaseFrequencyX;
+ SkScalar y = point.fY * perlinNoiseShader.fBaseFrequencyY;
+ // z offset between different channels, chosen arbitrarily
+ static const SkScalar CHANNEL_DELTA = 1000.0f;
+ SkScalar z = channel * CHANNEL_DELTA + perlinNoiseShader.fSeed;
+ SkScalar result = 0;
+ SkScalar ratio = SK_Scalar1;
+ for (int i = 0; i < perlinNoiseShader.fNumOctaves; i++) {
+ int X = SkScalarFloorToInt(x) & 255;
+ int Y = SkScalarFloorToInt(y) & 255;
+ int Z = SkScalarFloorToInt(z) & 255;
+ SkScalar px = x - SkScalarFloorToScalar(x);
+ SkScalar py = y - SkScalarFloorToScalar(y);
+ SkScalar pz = z - SkScalarFloorToScalar(z);
+ SkScalar u = fade(px);
+ SkScalar v = fade(py);
+ SkScalar w = fade(pz);
+ uint8_t* permutations = improved_noise_permutations;
+ int A = permutations[X] + Y;
+ int AA = permutations[A] + Z;
+ int AB = permutations[A + 1] + Z;
+ int B = permutations[X + 1] + Y;
+ int BA = permutations[B] + Z;
+ int BB = permutations[B + 1] + Z;
+ result += lerp(w, lerp(v, lerp(u, grad(permutations[AA ], px , py , pz ),
+ grad(permutations[BA ], px - 1, py , pz )),
+ lerp(u, grad(permutations[AB ], px , py - 1, pz ),
+ grad(permutations[BB ], px - 1, py - 1, pz ))),
+ lerp(v, lerp(u, grad(permutations[AA + 1], px , py , pz - 1),
+ grad(permutations[BA + 1], px - 1, py , pz - 1)),
+ lerp(u, grad(permutations[AB + 1], px , py - 1, pz - 1),
+ grad(permutations[BB + 1], px - 1, py - 1, pz - 1)))) /
+ ratio;
+ x *= 2;
+ y *= 2;
+ ratio *= 2;
+ }
+ result = SkScalarClampMax((result + 1.0f) / 2.0f, 1.0f);
+ return result;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPMColor SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::shade(
+ const SkPoint& point, StitchData& stitchData) const {
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ SkPoint newPoint;
+ fMatrix.mapPoints(&newPoint, &point, 1);
+ newPoint.fX = SkScalarRoundToScalar(newPoint.fX);
+ newPoint.fY = SkScalarRoundToScalar(newPoint.fY);
+
+ U8CPU rgba[4];
+ for (int channel = 3; channel >= 0; --channel) {
+ SkScalar value;
+ if (perlinNoiseShader.fType == kImprovedNoise_Type) {
+ value = calculateImprovedNoiseValueForPoint(channel, newPoint);
+ }
+ else {
+ value = calculateTurbulenceValueForPoint(channel, stitchData, newPoint);
+ }
+ rgba[channel] = SkScalarFloorToInt(255 * value);
+ }
+ return SkPreMultiplyARGB(rgba[3], rgba[0], rgba[1], rgba[2]);
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkPerlinNoiseShaderImpl::onMakeContext(const ContextRec& rec,
+ SkArenaAlloc* alloc) const {
+ // should we pay attention to rec's device-colorspace?
+ return alloc->make<PerlinNoiseShaderContext>(*this, rec);
+}
+#endif
+
+static inline SkMatrix total_matrix(const SkShaderBase::ContextRec& rec,
+ const SkShaderBase& shader) {
+ SkMatrix matrix = SkMatrix::Concat(*rec.fMatrix, shader.getLocalMatrix());
+ if (rec.fLocalMatrix) {
+ matrix.preConcat(*rec.fLocalMatrix);
+ }
+
+ return matrix;
+}
+
+SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::PerlinNoiseShaderContext(
+ const SkPerlinNoiseShaderImpl& shader, const ContextRec& rec)
+ : INHERITED(shader, rec)
+ , fMatrix(total_matrix(rec, shader)) // used for temp storage, adjusted below
+ , fPaintingData(shader.fTileSize, shader.fSeed, shader.fBaseFrequencyX,
+ shader.fBaseFrequencyY, fMatrix)
+{
+ // This (1,1) translation is due to WebKit's 1 based coordinates for the noise
+ // (as opposed to 0 based, usually). The same adjustment is in the setData() function.
+ fMatrix.setTranslate(-fMatrix.getTranslateX() + SK_Scalar1,
+ -fMatrix.getTranslateY() + SK_Scalar1);
+}
+
+void SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::shadeSpan(
+ int x, int y, SkPMColor result[], int count) {
+ SkPoint point = SkPoint::Make(SkIntToScalar(x), SkIntToScalar(y));
+ StitchData stitchData;
+ for (int i = 0; i < count; ++i) {
+ result[i] = shade(point, stitchData);
+ point.fX += SK_Scalar1;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+class GrGLPerlinNoise : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fStitchDataUni;
+ GrGLSLProgramDataManager::UniformHandle fBaseFrequencyUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrPerlinNoise2Effect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ SkPerlinNoiseShaderImpl::Type type, int numOctaves, bool stitchTiles,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ sk_sp<GrTextureProxy> permutationsProxy, sk_sp<GrTextureProxy> noiseProxy,
+ const SkMatrix& matrix) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrPerlinNoise2Effect(
+ type, numOctaves, stitchTiles, std::move(paintingData),
+ std::move(permutationsProxy), std::move(noiseProxy), matrix));
+ }
+
+ const char* name() const override { return "PerlinNoise"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrPerlinNoise2Effect(*this));
+ }
+
+ const SkPerlinNoiseShaderImpl::StitchData& stitchData() const { return fPaintingData->fStitchDataInit; }
+
+ SkPerlinNoiseShaderImpl::Type type() const { return fType; }
+ bool stitchTiles() const { return fStitchTiles; }
+ const SkVector& baseFrequency() const { return fPaintingData->fBaseFrequency; }
+ int numOctaves() const { return fNumOctaves; }
+ const SkMatrix& matrix() const { return fCoordTransform.getMatrix(); }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GrGLPerlinNoise;
+ }
+
+ virtual void onGetGLSLProcessorKey(const GrShaderCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GrGLPerlinNoise::GenKey(*this, caps, b);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const GrPerlinNoise2Effect& s = sBase.cast<GrPerlinNoise2Effect>();
+ return fType == s.fType &&
+ fPaintingData->fBaseFrequency == s.fPaintingData->fBaseFrequency &&
+ fNumOctaves == s.fNumOctaves &&
+ fStitchTiles == s.fStitchTiles &&
+ fPaintingData->fStitchDataInit == s.fPaintingData->fStitchDataInit;
+ }
+
+ GrPerlinNoise2Effect(SkPerlinNoiseShaderImpl::Type type, int numOctaves, bool stitchTiles,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ sk_sp<GrTextureProxy> permutationsProxy,
+ sk_sp<GrTextureProxy> noiseProxy,
+ const SkMatrix& matrix)
+ : INHERITED(kGrPerlinNoise2Effect_ClassID, kNone_OptimizationFlags)
+ , fType(type)
+ , fNumOctaves(numOctaves)
+ , fStitchTiles(stitchTiles)
+ , fPermutationsSampler(std::move(permutationsProxy))
+ , fNoiseSampler(std::move(noiseProxy))
+ , fPaintingData(std::move(paintingData)) {
+ this->setTextureSamplerCnt(2);
+ fCoordTransform = GrCoordTransform(matrix);
+ this->addCoordTransform(&fCoordTransform);
+ }
+
+ GrPerlinNoise2Effect(const GrPerlinNoise2Effect& that)
+ : INHERITED(kGrPerlinNoise2Effect_ClassID, kNone_OptimizationFlags)
+ , fType(that.fType)
+ , fCoordTransform(that.fCoordTransform)
+ , fNumOctaves(that.fNumOctaves)
+ , fStitchTiles(that.fStitchTiles)
+ , fPermutationsSampler(that.fPermutationsSampler)
+ , fNoiseSampler(that.fNoiseSampler)
+ , fPaintingData(new SkPerlinNoiseShaderImpl::PaintingData(*that.fPaintingData)) {
+ this->setTextureSamplerCnt(2);
+ this->addCoordTransform(&fCoordTransform);
+ }
+
+ const TextureSampler& onTextureSampler(int i) const override {
+ return IthTextureSampler(i, fPermutationsSampler, fNoiseSampler);
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ SkPerlinNoiseShaderImpl::Type fType;
+ GrCoordTransform fCoordTransform;
+ int fNumOctaves;
+ bool fStitchTiles;
+ TextureSampler fPermutationsSampler;
+ TextureSampler fNoiseSampler;
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> fPaintingData;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrPerlinNoise2Effect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrPerlinNoise2Effect::TestCreate(GrProcessorTestData* d) {
+ int numOctaves = d->fRandom->nextRangeU(2, 10);
+ bool stitchTiles = d->fRandom->nextBool();
+ SkScalar seed = SkIntToScalar(d->fRandom->nextU());
+ SkISize tileSize = SkISize::Make(d->fRandom->nextRangeU(4, 4096),
+ d->fRandom->nextRangeU(4, 4096));
+ SkScalar baseFrequencyX = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+ SkScalar baseFrequencyY = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+
+ sk_sp<SkShader> shader(d->fRandom->nextBool() ?
+ SkPerlinNoiseShader::MakeFractalNoise(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr) :
+ SkPerlinNoiseShader::MakeTurbulence(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr));
+
+ GrTest::TestAsFPArgs asFPArgs(d);
+ return as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+}
+#endif
+
+void GrGLPerlinNoise::emitCode(EmitArgs& args) {
+ const GrPerlinNoise2Effect& pne = args.fFp.cast<GrPerlinNoise2Effect>();
+
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ SkString vCoords = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ fBaseFrequencyUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "baseFrequency");
+ const char* baseFrequencyUni = uniformHandler->getUniformCStr(fBaseFrequencyUni);
+
+ const char* stitchDataUni = nullptr;
+ if (pne.stitchTiles()) {
+ fStitchDataUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "stitchData");
+ stitchDataUni = uniformHandler->getUniformCStr(fStitchDataUni);
+ }
+
+ // There are 4 lines, so the center of each line is 1/8, 3/8, 5/8 and 7/8
+ const char* chanCoordR = "0.125";
+ const char* chanCoordG = "0.375";
+ const char* chanCoordB = "0.625";
+ const char* chanCoordA = "0.875";
+ const char* chanCoord = "chanCoord";
+ const char* stitchData = "stitchData";
+ const char* ratio = "ratio";
+ const char* noiseVec = "noiseVec";
+ const char* noiseSmooth = "noiseSmooth";
+ const char* floorVal = "floorVal";
+ const char* fractVal = "fractVal";
+ const char* uv = "uv";
+ const char* ab = "ab";
+ const char* latticeIdx = "latticeIdx";
+ const char* bcoords = "bcoords";
+ const char* lattice = "lattice";
+ const char* inc8bit = "0.00390625"; // 1.0 / 256.0
+ // This is the math to convert the two 16bit integer packed into rgba 8 bit input into a
+ // [-1,1] vector and perform a dot product between that vector and the provided vector.
+ const char* dotLattice = "dot(((%s.ga + %s.rb * half2(%s)) * half2(2.0) - half2(1.0)), %s);";
+
+ // Add noise function
+ const GrShaderVar gPerlinNoiseArgs[] = {
+ GrShaderVar(chanCoord, kHalf_GrSLType),
+ GrShaderVar(noiseVec, kHalf2_GrSLType)
+ };
+
+ const GrShaderVar gPerlinNoiseStitchArgs[] = {
+ GrShaderVar(chanCoord, kHalf_GrSLType),
+ GrShaderVar(noiseVec, kHalf2_GrSLType),
+ GrShaderVar(stitchData, kHalf2_GrSLType)
+ };
+
+ SkString noiseCode;
+
+ noiseCode.appendf("\thalf4 %s;\n", floorVal);
+ noiseCode.appendf("\t%s.xy = floor(%s);\n", floorVal, noiseVec);
+ noiseCode.appendf("\t%s.zw = %s.xy + half2(1.0);\n", floorVal, floorVal);
+ noiseCode.appendf("\thalf2 %s = fract(%s);\n", fractVal, noiseVec);
+
+ // smooth curve : t * t * (3 - 2 * t)
+ noiseCode.appendf("\n\thalf2 %s = %s * %s * (half2(3.0) - half2(2.0) * %s);",
+ noiseSmooth, fractVal, fractVal, fractVal);
+
+ // Adjust frequencies if we're stitching tiles
+ if (pne.stitchTiles()) {
+ noiseCode.appendf("\n\tif(%s.x >= %s.x) { %s.x -= %s.x; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.y >= %s.y) { %s.y -= %s.y; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.z >= %s.x) { %s.z -= %s.x; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.w >= %s.y) { %s.w -= %s.y; }",
+ floorVal, stitchData, floorVal, stitchData);
+ }
+
+ // Get texture coordinates and normalize
+ noiseCode.appendf("\n\t%s = fract(floor(mod(%s, 256.0)) / half4(256.0));\n",
+ floorVal, floorVal);
+
+ // Get permutation for x
+ {
+ SkString xCoords("");
+ xCoords.appendf("half2(%s.x, 0.5)", floorVal);
+
+ noiseCode.appendf("\n\thalf2 %s;\n\t%s.x = ", latticeIdx, latticeIdx);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[0], xCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.append(".r;");
+ }
+
+ // Get permutation for x + 1
+ {
+ SkString xCoords("");
+ xCoords.appendf("half2(%s.z, 0.5)", floorVal);
+
+ noiseCode.appendf("\n\t%s.y = ", latticeIdx);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[0], xCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.append(".r;");
+ }
+
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Android rounding for Tegra devices, like, for example: Xoom (Tegra 2), Nexus 7 (Tegra 3).
+ // The issue is that colors aren't accurate enough on Tegra devices. For example, if an 8 bit
+ // value of 124 (or 0.486275 here) is entered, we can get a texture value of 123.513725
+ // (or 0.484368 here). The following rounding operation prevents these precision issues from
+ // affecting the result of the noise by making sure that we only have multiples of 1/255.
+ // (Note that 1/255 is about 0.003921569, which is the value used here).
+ noiseCode.appendf("\n\t%s = floor(%s * half2(255.0) + half2(0.5)) * half2(0.003921569);",
+ latticeIdx, latticeIdx);
+#endif
+
+ // Get (x,y) coordinates with the permutated x
+ noiseCode.appendf("\n\thalf4 %s = fract(%s.xyxy + %s.yyww);", bcoords, latticeIdx, floorVal);
+
+ noiseCode.appendf("\n\n\thalf2 %s;", uv);
+ // Compute u, at offset (0,0)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("half2(%s.x, %s)", bcoords, chanCoord);
+ noiseCode.appendf("\n\thalf4 %s = ", lattice);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.x = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ noiseCode.appendf("\n\t%s.x -= 1.0;", fractVal);
+ // Compute v, at offset (-1,0)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("half2(%s.y, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.y = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ // Compute 'a' as a linear interpolation of 'u' and 'v'
+ noiseCode.appendf("\n\thalf2 %s;", ab);
+ noiseCode.appendf("\n\t%s.x = mix(%s.x, %s.y, %s.x);", ab, uv, uv, noiseSmooth);
+
+ noiseCode.appendf("\n\t%s.y -= 1.0;", fractVal);
+ // Compute v, at offset (-1,-1)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("half2(%s.w, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.y = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ noiseCode.appendf("\n\t%s.x += 1.0;", fractVal);
+ // Compute u, at offset (0,-1)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("half2(%s.z, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kHalf2_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.x = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ // Compute 'b' as a linear interpolation of 'u' and 'v'
+ noiseCode.appendf("\n\t%s.y = mix(%s.x, %s.y, %s.x);", ab, uv, uv, noiseSmooth);
+ // Compute the noise as a linear interpolation of 'a' and 'b'
+ noiseCode.appendf("\n\treturn mix(%s.x, %s.y, %s.y);\n", ab, ab, noiseSmooth);
+
+ SkString noiseFuncName;
+ if (pne.stitchTiles()) {
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "perlinnoise", SK_ARRAY_COUNT(gPerlinNoiseStitchArgs),
+ gPerlinNoiseStitchArgs, noiseCode.c_str(), &noiseFuncName);
+ } else {
+ fragBuilder->emitFunction(kHalf_GrSLType,
+ "perlinnoise", SK_ARRAY_COUNT(gPerlinNoiseArgs),
+ gPerlinNoiseArgs, noiseCode.c_str(), &noiseFuncName);
+ }
+
+ // There are rounding errors if the floor operation is not performed here
+ fragBuilder->codeAppendf("\n\t\thalf2 %s = half2(floor(%s.xy) * %s);",
+ noiseVec, vCoords.c_str(), baseFrequencyUni);
+
+ // Clear the color accumulator
+ fragBuilder->codeAppendf("\n\t\t%s = half4(0.0);", args.fOutputColor);
+
+ if (pne.stitchTiles()) {
+ // Set up TurbulenceInitial stitch values.
+ fragBuilder->codeAppendf("\n\t\thalf2 %s = %s;", stitchData, stitchDataUni);
+ }
+
+ fragBuilder->codeAppendf("\n\t\thalf %s = 1.0;", ratio);
+
+ // Loop over all octaves
+ fragBuilder->codeAppendf("for (int octave = 0; octave < %d; ++octave) {", pne.numOctaves());
+
+ fragBuilder->codeAppendf("\n\t\t\t%s += ", args.fOutputColor);
+ if (pne.type() != SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ fragBuilder->codeAppend("abs(");
+ }
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf(
+ "half4(\n\t\t\t\t%s(%s, %s, %s),\n\t\t\t\t%s(%s, %s, %s),"
+ "\n\t\t\t\t%s(%s, %s, %s),\n\t\t\t\t%s(%s, %s, %s))",
+ noiseFuncName.c_str(), chanCoordR, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordG, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordB, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordA, noiseVec, stitchData);
+ } else {
+ fragBuilder->codeAppendf(
+ "half4(\n\t\t\t\t%s(%s, %s),\n\t\t\t\t%s(%s, %s),"
+ "\n\t\t\t\t%s(%s, %s),\n\t\t\t\t%s(%s, %s))",
+ noiseFuncName.c_str(), chanCoordR, noiseVec,
+ noiseFuncName.c_str(), chanCoordG, noiseVec,
+ noiseFuncName.c_str(), chanCoordB, noiseVec,
+ noiseFuncName.c_str(), chanCoordA, noiseVec);
+ }
+ if (pne.type() != SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ fragBuilder->codeAppendf(")"); // end of "abs("
+ }
+ fragBuilder->codeAppendf(" * %s;", ratio);
+
+ fragBuilder->codeAppendf("\n\t\t\t%s *= half2(2.0);", noiseVec);
+ fragBuilder->codeAppendf("\n\t\t\t%s *= 0.5;", ratio);
+
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf("\n\t\t\t%s *= half2(2.0);", stitchData);
+ }
+ fragBuilder->codeAppend("\n\t\t}"); // end of the for loop on octaves
+
+ if (pne.type() == SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ // The value of turbulenceFunctionResult comes from ((turbulenceFunctionResult) + 1) / 2
+ // by fractalNoise and (turbulenceFunctionResult) by turbulence.
+ fragBuilder->codeAppendf("\n\t\t%s = %s * half4(0.5) + half4(0.5);",
+ args.fOutputColor,args.fOutputColor);
+ }
+
+ // Clamp values
+ fragBuilder->codeAppendf("\n\t\t%s = saturate(%s);", args.fOutputColor, args.fOutputColor);
+
+ // Pre-multiply the result
+ fragBuilder->codeAppendf("\n\t\t%s = half4(%s.rgb * %s.aaa, %s.a);\n",
+ args.fOutputColor, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor);
+}
+
+void GrGLPerlinNoise::GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrPerlinNoise2Effect& turbulence = processor.cast<GrPerlinNoise2Effect>();
+
+ uint32_t key = turbulence.numOctaves();
+
+ key = key << 3; // Make room for next 3 bits
+
+ switch (turbulence.type()) {
+ case SkPerlinNoiseShaderImpl::kFractalNoise_Type:
+ key |= 0x1;
+ break;
+ case SkPerlinNoiseShaderImpl::kTurbulence_Type:
+ key |= 0x2;
+ break;
+ default:
+ // leave key at 0
+ break;
+ }
+
+ if (turbulence.stitchTiles()) {
+ key |= 0x4; // Flip the 3rd bit if tile stitching is on
+ }
+
+ b->add32(key);
+}
+
+void GrGLPerlinNoise::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+
+ const GrPerlinNoise2Effect& turbulence = processor.cast<GrPerlinNoise2Effect>();
+
+ const SkVector& baseFrequency = turbulence.baseFrequency();
+ pdman.set2f(fBaseFrequencyUni, baseFrequency.fX, baseFrequency.fY);
+
+ if (turbulence.stitchTiles()) {
+ const SkPerlinNoiseShaderImpl::StitchData& stitchData = turbulence.stitchData();
+ pdman.set2f(fStitchDataUni, SkIntToScalar(stitchData.fWidth),
+ SkIntToScalar(stitchData.fHeight));
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+class GrGLImprovedPerlinNoise : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrShaderCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fZUni;
+ GrGLSLProgramDataManager::UniformHandle fBaseFrequencyUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrImprovedPerlinNoiseEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ int octaves, SkScalar z,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ sk_sp<GrTextureProxy> permutationsProxy, sk_sp<GrTextureProxy> gradientProxy,
+ const SkMatrix& matrix) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrImprovedPerlinNoiseEffect(
+ octaves, z, std::move(paintingData), std::move(permutationsProxy),
+ std::move(gradientProxy), matrix));
+ }
+
+ const char* name() const override { return "ImprovedPerlinNoise"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrImprovedPerlinNoiseEffect(*this));
+ }
+
+ const SkVector& baseFrequency() const { return fPaintingData->fBaseFrequency; }
+ SkScalar z() const { return fZ; }
+ int octaves() const { return fOctaves; }
+ const SkMatrix& matrix() const { return fCoordTransform.getMatrix(); }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GrGLImprovedPerlinNoise;
+ }
+
+ void onGetGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GrGLImprovedPerlinNoise::GenKey(*this, caps, b);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const GrImprovedPerlinNoiseEffect& s = sBase.cast<GrImprovedPerlinNoiseEffect>();
+ return fZ == fZ &&
+ fPaintingData->fBaseFrequency == s.fPaintingData->fBaseFrequency;
+ }
+
+ GrImprovedPerlinNoiseEffect(int octaves, SkScalar z,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ sk_sp<GrTextureProxy> permutationsProxy,
+ sk_sp<GrTextureProxy> gradientProxy,
+ const SkMatrix& matrix)
+ : INHERITED(kGrImprovedPerlinNoiseEffect_ClassID, kNone_OptimizationFlags)
+ , fOctaves(octaves)
+ , fZ(z)
+ , fPermutationsSampler(std::move(permutationsProxy))
+ , fGradientSampler(std::move(gradientProxy))
+ , fPaintingData(std::move(paintingData)) {
+ this->setTextureSamplerCnt(2);
+ fCoordTransform = GrCoordTransform(matrix);
+ this->addCoordTransform(&fCoordTransform);
+ }
+
+ GrImprovedPerlinNoiseEffect(const GrImprovedPerlinNoiseEffect& that)
+ : INHERITED(kGrImprovedPerlinNoiseEffect_ClassID, kNone_OptimizationFlags)
+ , fCoordTransform(that.fCoordTransform)
+ , fOctaves(that.fOctaves)
+ , fZ(that.fZ)
+ , fPermutationsSampler(that.fPermutationsSampler)
+ , fGradientSampler(that.fGradientSampler)
+ , fPaintingData(new SkPerlinNoiseShaderImpl::PaintingData(*that.fPaintingData)) {
+ this->setTextureSamplerCnt(2);
+ this->addCoordTransform(&fCoordTransform);
+ }
+
+ const TextureSampler& onTextureSampler(int i) const override {
+ return IthTextureSampler(i, fPermutationsSampler, fGradientSampler);
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ GrCoordTransform fCoordTransform;
+ int fOctaves;
+ SkScalar fZ;
+ TextureSampler fPermutationsSampler;
+ TextureSampler fGradientSampler;
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> fPaintingData;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrImprovedPerlinNoiseEffect);
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrImprovedPerlinNoiseEffect::TestCreate(
+ GrProcessorTestData* d) {
+ SkScalar baseFrequencyX = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+ SkScalar baseFrequencyY = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+ int numOctaves = d->fRandom->nextRangeU(2, 10);
+ SkScalar z = SkIntToScalar(d->fRandom->nextU());
+
+ sk_sp<SkShader> shader(SkPerlinNoiseShader::MakeImprovedNoise(baseFrequencyX,
+ baseFrequencyY,
+ numOctaves,
+ z));
+
+ GrTest::TestAsFPArgs asFPArgs(d);
+ return as_SB(shader)->asFragmentProcessor(asFPArgs.args());
+}
+#endif
+
+void GrGLImprovedPerlinNoise::emitCode(EmitArgs& args) {
+ const GrImprovedPerlinNoiseEffect& pne = args.fFp.cast<GrImprovedPerlinNoiseEffect>();
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ SkString vCoords = fragBuilder->ensureCoords2D(args.fTransformedCoords[0].fVaryingPoint);
+
+ fBaseFrequencyUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf2_GrSLType,
+ "baseFrequency");
+ const char* baseFrequencyUni = uniformHandler->getUniformCStr(fBaseFrequencyUni);
+
+ fZUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kHalf_GrSLType, "z");
+ const char* zUni = uniformHandler->getUniformCStr(fZUni);
+
+ // fade function
+ const GrShaderVar fadeArgs[] = {
+ GrShaderVar("t", kHalf3_GrSLType)
+ };
+ SkString fadeFuncName;
+ fragBuilder->emitFunction(kHalf3_GrSLType, "fade", SK_ARRAY_COUNT(fadeArgs),
+ fadeArgs,
+ "return t * t * t * (t * (t * 6.0 - 15.0) + 10.0);",
+ &fadeFuncName);
+
+ // perm function
+ const GrShaderVar permArgs[] = {
+ GrShaderVar("x", kHalf_GrSLType)
+ };
+ SkString permFuncName;
+ SkString permCode("return ");
+ // FIXME even though I'm creating these textures with kRepeat_TileMode, they're clamped. Not
+ // sure why. Using fract() (here and the next texture lookup) as a workaround.
+ fragBuilder->appendTextureLookup(&permCode, args.fTexSamplers[0], "float2(fract(x / 256.0), 0.0)",
+ kHalf2_GrSLType);
+ permCode.append(".r * 255.0;");
+ fragBuilder->emitFunction(kHalf_GrSLType, "perm", SK_ARRAY_COUNT(permArgs), permArgs,
+ permCode.c_str(), &permFuncName);
+
+ // grad function
+ const GrShaderVar gradArgs[] = {
+ GrShaderVar("x", kHalf_GrSLType),
+ GrShaderVar("p", kHalf3_GrSLType)
+ };
+ SkString gradFuncName;
+ SkString gradCode("return half(dot(");
+ fragBuilder->appendTextureLookup(&gradCode, args.fTexSamplers[1], "float2(fract(x / 16.0), 0.0)",
+ kHalf2_GrSLType);
+ gradCode.append(".rgb * 255.0 - float3(1.0), p));");
+ fragBuilder->emitFunction(kHalf_GrSLType, "grad", SK_ARRAY_COUNT(gradArgs), gradArgs,
+ gradCode.c_str(), &gradFuncName);
+
+ // lerp function
+ const GrShaderVar lerpArgs[] = {
+ GrShaderVar("a", kHalf_GrSLType),
+ GrShaderVar("b", kHalf_GrSLType),
+ GrShaderVar("w", kHalf_GrSLType)
+ };
+ SkString lerpFuncName;
+ fragBuilder->emitFunction(kHalf_GrSLType, "lerp", SK_ARRAY_COUNT(lerpArgs), lerpArgs,
+ "return a + w * (b - a);", &lerpFuncName);
+
+ // noise function
+ const GrShaderVar noiseArgs[] = {
+ GrShaderVar("p", kHalf3_GrSLType),
+ };
+ SkString noiseFuncName;
+ SkString noiseCode;
+ noiseCode.append("half3 P = mod(floor(p), 256.0);");
+ noiseCode.append("p -= floor(p);");
+ noiseCode.appendf("half3 f = %s(p);", fadeFuncName.c_str());
+ noiseCode.appendf("half A = %s(P.x) + P.y;", permFuncName.c_str());
+ noiseCode.appendf("half AA = %s(A) + P.z;", permFuncName.c_str());
+ noiseCode.appendf("half AB = %s(A + 1.0) + P.z;", permFuncName.c_str());
+ noiseCode.appendf("half B = %s(P.x + 1.0) + P.y;", permFuncName.c_str());
+ noiseCode.appendf("half BA = %s(B) + P.z;", permFuncName.c_str());
+ noiseCode.appendf("half BB = %s(B + 1.0) + P.z;", permFuncName.c_str());
+ noiseCode.appendf("half result = %s(", lerpFuncName.c_str());
+ noiseCode.appendf("%s(%s(%s(%s(AA), p),", lerpFuncName.c_str(), lerpFuncName.c_str(),
+ gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.appendf("%s(%s(BA), p + half3(-1.0, 0.0, 0.0)), f.x),", gradFuncName.c_str(),
+ permFuncName.c_str());
+ noiseCode.appendf("%s(%s(%s(AB), p + half3(0.0, -1.0, 0.0)),", lerpFuncName.c_str(),
+ gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.appendf("%s(%s(BB), p + half3(-1.0, -1.0, 0.0)), f.x), f.y),",
+ gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.appendf("%s(%s(%s(%s(AA + 1.0), p + half3(0.0, 0.0, -1.0)),",
+ lerpFuncName.c_str(), lerpFuncName.c_str(), gradFuncName.c_str(),
+ permFuncName.c_str());
+ noiseCode.appendf("%s(%s(BA + 1.0), p + half3(-1.0, 0.0, -1.0)), f.x),",
+ gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.appendf("%s(%s(%s(AB + 1.0), p + half3(0.0, -1.0, -1.0)),",
+ lerpFuncName.c_str(), gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.appendf("%s(%s(BB + 1.0), p + half3(-1.0, -1.0, -1.0)), f.x), f.y), f.z);",
+ gradFuncName.c_str(), permFuncName.c_str());
+ noiseCode.append("return result;");
+ fragBuilder->emitFunction(kHalf_GrSLType, "noise", SK_ARRAY_COUNT(noiseArgs), noiseArgs,
+ noiseCode.c_str(), &noiseFuncName);
+
+ // noiseOctaves function
+ const GrShaderVar noiseOctavesArgs[] = {
+ GrShaderVar("p", kHalf3_GrSLType)
+ };
+ SkString noiseOctavesFuncName;
+ SkString noiseOctavesCode;
+ noiseOctavesCode.append("half result = 0.0;");
+ noiseOctavesCode.append("half ratio = 1.0;");
+ noiseOctavesCode.appendf("for (half i = 0.0; i < %d; i++) {", pne.octaves());
+ noiseOctavesCode.appendf("result += %s(p) / ratio;", noiseFuncName.c_str());
+ noiseOctavesCode.append("p *= 2.0;");
+ noiseOctavesCode.append("ratio *= 2.0;");
+ noiseOctavesCode.append("}");
+ noiseOctavesCode.append("return (result + 1.0) / 2.0;");
+ fragBuilder->emitFunction(kHalf_GrSLType, "noiseOctaves", SK_ARRAY_COUNT(noiseOctavesArgs),
+ noiseOctavesArgs, noiseOctavesCode.c_str(), &noiseOctavesFuncName);
+
+ fragBuilder->codeAppendf("half2 coords = half2(%s * %s);", vCoords.c_str(), baseFrequencyUni);
+ fragBuilder->codeAppendf("half r = %s(half3(coords, %s));", noiseOctavesFuncName.c_str(),
+ zUni);
+ fragBuilder->codeAppendf("half g = %s(half3(coords, %s + 0000.0));",
+ noiseOctavesFuncName.c_str(), zUni);
+ fragBuilder->codeAppendf("half b = %s(half3(coords, %s + 0000.0));",
+ noiseOctavesFuncName.c_str(), zUni);
+ fragBuilder->codeAppendf("half a = %s(half3(coords, %s + 0000.0));",
+ noiseOctavesFuncName.c_str(), zUni);
+ fragBuilder->codeAppendf("%s = half4(r, g, b, a);", args.fOutputColor);
+
+ // Clamp values
+ fragBuilder->codeAppendf("%s = saturate(%s);", args.fOutputColor, args.fOutputColor);
+
+ // Pre-multiply the result
+ fragBuilder->codeAppendf("\n\t\t%s = half4(%s.rgb * %s.aaa, %s.a);\n",
+ args.fOutputColor, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor);
+}
+
+void GrGLImprovedPerlinNoise::GenKey(const GrProcessor& processor, const GrShaderCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrImprovedPerlinNoiseEffect& pne = processor.cast<GrImprovedPerlinNoiseEffect>();
+ b->add32(pne.octaves());
+}
+
+void GrGLImprovedPerlinNoise::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+
+ const GrImprovedPerlinNoiseEffect& noise = processor.cast<GrImprovedPerlinNoiseEffect>();
+
+ const SkVector& baseFrequency = noise.baseFrequency();
+ pdman.set2f(fBaseFrequencyUni, baseFrequency.fX, baseFrequency.fY);
+
+ pdman.set1f(fZUni, noise.z());
+}
+
+/////////////////////////////////////////////////////////////////////
+std::unique_ptr<GrFragmentProcessor> SkPerlinNoiseShaderImpl::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ SkASSERT(args.fContext);
+
+ const auto localMatrix = this->totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix);
+ const auto paintMatrix = SkMatrix::Concat(*args.fViewMatrix, *localMatrix);
+
+ // Either we don't stitch tiles, either we have a valid tile size
+ SkASSERT(!fStitchTiles || !fTileSize.isEmpty());
+
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData =
+ skstd::make_unique<SkPerlinNoiseShaderImpl::PaintingData>(fTileSize,
+ fSeed,
+ fBaseFrequencyX,
+ fBaseFrequencyY,
+ paintMatrix);
+
+ SkMatrix m = *args.fViewMatrix;
+ m.setTranslateX(-localMatrix->getTranslateX() + SK_Scalar1);
+ m.setTranslateY(-localMatrix->getTranslateY() + SK_Scalar1);
+
+ auto proxyProvider = args.fContext->priv().proxyProvider();
+ if (fType == kImprovedNoise_Type) {
+ // Need to assert that the textures we'll create are power of 2 so a copy isn't needed.
+ // We also know that we will not be using mipmaps. If things things weren't true we should
+ // go through GrBitmapTextureMaker to handle needed copies.
+ const sk_sp<SkImage> permutationsImage = paintingData->getImprovedPermutationsImage();
+ SkASSERT(SkIsPow2(permutationsImage->width()) && SkIsPow2(permutationsImage->height()));
+ sk_sp<GrTextureProxy> permutationsTexture(
+ GrMakeCachedImageProxy(proxyProvider, std::move(permutationsImage)));
+
+ const sk_sp<SkImage> gradientImage = paintingData->getGradientImage();
+ SkASSERT(SkIsPow2(gradientImage->width()) && SkIsPow2(gradientImage->height()));
+ sk_sp<GrTextureProxy> gradientTexture(
+ GrMakeCachedImageProxy(proxyProvider, std::move(gradientImage)));
+ return GrImprovedPerlinNoiseEffect::Make(fNumOctaves, fSeed, std::move(paintingData),
+ std::move(permutationsTexture),
+ std::move(gradientTexture), m);
+ }
+
+ if (0 == fNumOctaves) {
+ if (kFractalNoise_Type == fType) {
+ // Extract the incoming alpha and emit rgba = (a/4, a/4, a/4, a/2)
+ // TODO: Either treat the output of this shader as sRGB or allow client to specify a
+ // color space of the noise. Either way, this case (and the GLSL) need to convert to
+ // the destination.
+ auto inner =
+ GrConstColorProcessor::Make(SkPMColor4f::FromBytes_RGBA(0x80404040),
+ GrConstColorProcessor::InputMode::kModulateRGBA);
+ return GrFragmentProcessor::MulChildByInputAlpha(std::move(inner));
+ }
+ // Emit zero.
+ return GrConstColorProcessor::Make(SK_PMColor4fTRANSPARENT,
+ GrConstColorProcessor::InputMode::kIgnore);
+ }
+
+ // Need to assert that the textures we'll create are power of 2 so that now copy is needed. We
+ // also know that we will not be using mipmaps. If things things weren't true we should go
+ // through GrBitmapTextureMaker to handle needed copies.
+ const sk_sp<SkImage> permutationsImage = paintingData->getPermutationsImage();
+ SkASSERT(SkIsPow2(permutationsImage->width()) && SkIsPow2(permutationsImage->height()));
+ sk_sp<GrTextureProxy> permutationsProxy = GrMakeCachedImageProxy(proxyProvider,
+ std::move(permutationsImage));
+
+ const sk_sp<SkImage> noiseImage = paintingData->getNoiseImage();
+ SkASSERT(SkIsPow2(noiseImage->width()) && SkIsPow2(noiseImage->height()));
+ sk_sp<GrTextureProxy> noiseProxy = GrMakeCachedImageProxy(proxyProvider,
+ std::move(noiseImage));
+
+ if (permutationsProxy && noiseProxy) {
+ auto inner = GrPerlinNoise2Effect::Make(fType,
+ fNumOctaves,
+ fStitchTiles,
+ std::move(paintingData),
+ std::move(permutationsProxy),
+ std::move(noiseProxy),
+ m);
+ return GrFragmentProcessor::MulChildByInputAlpha(std::move(inner));
+ }
+ return nullptr;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool valid_input(SkScalar baseX, SkScalar baseY, int numOctaves, const SkISize* tileSize,
+ SkScalar seed) {
+ if (!(baseX >= 0 && baseY >= 0)) {
+ return false;
+ }
+ if (!(numOctaves >= 0 && numOctaves <= SkPerlinNoiseShaderImpl::kMaxOctaves)) {
+ return false;
+ }
+ if (tileSize && !(tileSize->width() >= 0 && tileSize->height() >= 0)) {
+ return false;
+ }
+ if (!SkScalarIsFinite(seed)) {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeFractalNoise(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ if (!valid_input(baseFrequencyX, baseFrequencyY, numOctaves, tileSize, seed)) {
+ return nullptr;
+ }
+ return sk_sp<SkShader>(new SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::kFractalNoise_Type,
+ baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ tileSize));
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeTurbulence(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ if (!valid_input(baseFrequencyX, baseFrequencyY, numOctaves, tileSize, seed)) {
+ return nullptr;
+ }
+ return sk_sp<SkShader>(new SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::kTurbulence_Type,
+ baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ tileSize));
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeImprovedNoise(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar z) {
+ if (!valid_input(baseFrequencyX, baseFrequencyY, numOctaves, nullptr, z)) {
+ return nullptr;
+ }
+ return sk_sp<SkShader>(new SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::kImprovedNoise_Type,
+ baseFrequencyX, baseFrequencyY, numOctaves, z,
+ nullptr));
+}
+
+void SkPerlinNoiseShader::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPerlinNoiseShaderImpl);
+}
diff --git a/gfx/skia/skia/src/shaders/SkPictureShader.cpp b/gfx/skia/skia/src/shaders/SkPictureShader.cpp
new file mode 100644
index 0000000000..fc99118e42
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPictureShader.cpp
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkPictureShader.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkResourceCache.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkImageShader.h"
+#include <atomic>
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+#endif
+
+sk_sp<SkShader> SkPicture::makeShader(SkTileMode tmx, SkTileMode tmy, const SkMatrix* localMatrix,
+ const SkRect* tile) const {
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+ return SkPictureShader::Make(sk_ref_sp(this), tmx, tmy, localMatrix, tile);
+}
+
+sk_sp<SkShader> SkPicture::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix) const {
+ return this->makeShader(tmx, tmy, localMatrix, nullptr);
+}
+
+namespace {
+static unsigned gBitmapShaderKeyNamespaceLabel;
+
+struct BitmapShaderKey : public SkResourceCache::Key {
+public:
+ BitmapShaderKey(SkColorSpace* colorSpace,
+ SkImage::BitDepth bitDepth,
+ uint32_t shaderID,
+ const SkSize& scale)
+ : fColorSpaceXYZHash(colorSpace->toXYZD50Hash())
+ , fColorSpaceTransferFnHash(colorSpace->transferFnHash())
+ , fBitDepth(bitDepth)
+ , fScale(scale) {
+
+ static const size_t keySize = sizeof(fColorSpaceXYZHash) +
+ sizeof(fColorSpaceTransferFnHash) +
+ sizeof(fBitDepth) +
+ sizeof(fScale);
+ // This better be packed.
+ SkASSERT(sizeof(uint32_t) * (&fEndOfStruct - &fColorSpaceXYZHash) == keySize);
+ this->init(&gBitmapShaderKeyNamespaceLabel, MakeSharedID(shaderID), keySize);
+ }
+
+ static uint64_t MakeSharedID(uint32_t shaderID) {
+ uint64_t sharedID = SkSetFourByteTag('p', 's', 'd', 'r');
+ return (sharedID << 32) | shaderID;
+ }
+
+private:
+ uint32_t fColorSpaceXYZHash;
+ uint32_t fColorSpaceTransferFnHash;
+ SkImage::BitDepth fBitDepth;
+ SkSize fScale;
+
+ SkDEBUGCODE(uint32_t fEndOfStruct;)
+};
+
+struct BitmapShaderRec : public SkResourceCache::Rec {
+ BitmapShaderRec(const BitmapShaderKey& key, SkShader* tileShader)
+ : fKey(key)
+ , fShader(SkRef(tileShader)) {}
+
+ BitmapShaderKey fKey;
+ sk_sp<SkShader> fShader;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override {
+ // Just the record overhead -- the actual pixels are accounted by SkImage_Lazy.
+ return sizeof(fKey) + sizeof(SkImageShader);
+ }
+ const char* getCategory() const override { return "bitmap-shader"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextShader) {
+ const BitmapShaderRec& rec = static_cast<const BitmapShaderRec&>(baseRec);
+ sk_sp<SkShader>* result = reinterpret_cast<sk_sp<SkShader>*>(contextShader);
+
+ *result = rec.fShader;
+
+ // The bitmap shader is backed by an image generator, thus it can always re-generate its
+ // pixels if discarded.
+ return true;
+ }
+};
+
+uint32_t next_id() {
+ static std::atomic<uint32_t> nextID{1};
+
+ uint32_t id;
+ do {
+ id = nextID++;
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+} // namespace
+
+SkPictureShader::SkPictureShader(sk_sp<SkPicture> picture, SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile)
+ : INHERITED(localMatrix)
+ , fPicture(std::move(picture))
+ , fTile(tile ? *tile : fPicture->cullRect())
+ , fTmx(tmx)
+ , fTmy(tmy)
+ , fUniqueID(next_id())
+ , fAddedToCache(false) {}
+
+SkPictureShader::~SkPictureShader() {
+ if (fAddedToCache.load()) {
+ SkResourceCache::PostPurgeSharedID(BitmapShaderKey::MakeSharedID(fUniqueID));
+ }
+}
+
+sk_sp<SkShader> SkPictureShader::Make(sk_sp<SkPicture> picture, SkTileMode tmx, SkTileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile) {
+ if (!picture || picture->cullRect().isEmpty() || (tile && tile->isEmpty())) {
+ return SkShaders::Empty();
+ }
+ return sk_sp<SkShader>(new SkPictureShader(std::move(picture), tmx, tmy, localMatrix, tile));
+}
+
+SkPicture* SkPictureShader::isAPicture(SkMatrix* matrix,
+ SkTileMode tileModes[2],
+ SkRect* tile) const {
+ if (matrix) {
+ *matrix = this->getLocalMatrix();
+ }
+ if (tileModes) {
+ tileModes[0] = fTmx;
+ tileModes[1] = fTmy;
+ }
+ if (tile) {
+ *tile = fTile;
+ }
+ return fPicture.get();
+}
+
+sk_sp<SkFlattenable> SkPictureShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ auto tmx = buffer.read32LE(SkTileMode::kLastTileMode);
+ auto tmy = buffer.read32LE(SkTileMode::kLastTileMode);
+ SkRect tile;
+ buffer.readRect(&tile);
+
+ sk_sp<SkPicture> picture;
+
+ bool didSerialize = buffer.readBool();
+ if (didSerialize) {
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ return SkPictureShader::Make(picture, tmx, tmy, &lm, &tile);
+}
+
+void SkPictureShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.write32((unsigned)fTmx);
+ buffer.write32((unsigned)fTmy);
+ buffer.writeRect(fTile);
+
+ buffer.writeBool(true);
+ SkPicturePriv::Flatten(fPicture, buffer);
+}
+
+// Returns a cached image shader, which wraps a single picture tile at the given
+// CTM/local matrix. Also adjusts the local matrix for tile scaling.
+sk_sp<SkShader> SkPictureShader::refBitmapShader(const SkMatrix& viewMatrix,
+ SkTCopyOnFirstWrite<SkMatrix>* localMatrix,
+ SkColorType dstColorType,
+ SkColorSpace* dstColorSpace,
+ const int maxTextureSize) const {
+ SkASSERT(fPicture && !fPicture->cullRect().isEmpty());
+
+ const SkMatrix m = SkMatrix::Concat(viewMatrix, **localMatrix);
+
+ // Use a rotation-invariant scale
+ SkPoint scale;
+ //
+ // TODO: replace this with decomposeScale() -- but beware LayoutTest rebaselines!
+ //
+ if (!SkDecomposeUpper2x2(m, nullptr, &scale, nullptr)) {
+ // Decomposition failed, use an approximation.
+ scale.set(SkScalarSqrt(m.getScaleX() * m.getScaleX() + m.getSkewX() * m.getSkewX()),
+ SkScalarSqrt(m.getScaleY() * m.getScaleY() + m.getSkewY() * m.getSkewY()));
+ }
+ SkSize scaledSize = SkSize::Make(SkScalarAbs(scale.x() * fTile.width()),
+ SkScalarAbs(scale.y() * fTile.height()));
+
+ // Clamp the tile size to about 4M pixels
+ static const SkScalar kMaxTileArea = 2048 * 2048;
+ SkScalar tileArea = scaledSize.width() * scaledSize.height();
+ if (tileArea > kMaxTileArea) {
+ SkScalar clampScale = SkScalarSqrt(kMaxTileArea / tileArea);
+ scaledSize.set(scaledSize.width() * clampScale,
+ scaledSize.height() * clampScale);
+ }
+#if SK_SUPPORT_GPU
+ // Scale down the tile size if larger than maxTextureSize for GPU Path or it should fail on create texture
+ if (maxTextureSize) {
+ if (scaledSize.width() > maxTextureSize || scaledSize.height() > maxTextureSize) {
+ SkScalar downScale = maxTextureSize / SkMaxScalar(scaledSize.width(), scaledSize.height());
+ scaledSize.set(SkScalarFloorToScalar(scaledSize.width() * downScale),
+ SkScalarFloorToScalar(scaledSize.height() * downScale));
+ }
+ }
+#endif
+
+ const SkISize tileSize = scaledSize.toCeil();
+ if (tileSize.isEmpty()) {
+ return SkShaders::Empty();
+ }
+
+ // The actual scale, compensating for rounding & clamping.
+ const SkSize tileScale = SkSize::Make(SkIntToScalar(tileSize.width()) / fTile.width(),
+ SkIntToScalar(tileSize.height()) / fTile.height());
+
+
+ sk_sp<SkColorSpace> imgCS = dstColorSpace ? sk_ref_sp(dstColorSpace): SkColorSpace::MakeSRGB();
+ SkImage::BitDepth bitDepth =
+ dstColorType >= kRGBA_F16Norm_SkColorType
+ ? SkImage::BitDepth::kF16 : SkImage::BitDepth::kU8;
+
+ BitmapShaderKey key(imgCS.get(), bitDepth, fUniqueID, tileScale);
+
+ sk_sp<SkShader> tileShader;
+ if (!SkResourceCache::Find(key, BitmapShaderRec::Visitor, &tileShader)) {
+ SkMatrix tileMatrix;
+ tileMatrix.setRectToRect(fTile, SkRect::MakeIWH(tileSize.width(), tileSize.height()),
+ SkMatrix::kFill_ScaleToFit);
+
+ sk_sp<SkImage> tileImage = SkImage::MakeFromPicture(fPicture, tileSize, &tileMatrix,
+ nullptr, bitDepth, std::move(imgCS));
+ if (!tileImage) {
+ return nullptr;
+ }
+
+ tileShader = tileImage->makeShader(fTmx, fTmy);
+
+ SkResourceCache::Add(new BitmapShaderRec(key, tileShader.get()));
+ fAddedToCache.store(true);
+ }
+
+ if (tileScale.width() != 1 || tileScale.height() != 1) {
+ localMatrix->writable()->preScale(1 / tileScale.width(), 1 / tileScale.height());
+ }
+
+ return tileShader;
+}
+
+bool SkPictureShader::onAppendStages(const SkStageRec& rec) const {
+ auto lm = this->totalLocalMatrix(rec.fLocalM);
+
+ // Keep bitmapShader alive by using alloc instead of stack memory
+ auto& bitmapShader = *rec.fAlloc->make<sk_sp<SkShader>>();
+ bitmapShader = this->refBitmapShader(rec.fCTM, &lm, rec.fDstColorType, rec.fDstCS);
+
+ if (!bitmapShader) {
+ return false;
+ }
+
+ SkStageRec localRec = rec;
+ localRec.fLocalM = lm->isIdentity() ? nullptr : lm.get();
+
+ return as_SB(bitmapShader)->appendStages(localRec);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkPictureShader::onMakeContext(const ContextRec& rec, SkArenaAlloc* alloc)
+const {
+ auto lm = this->totalLocalMatrix(rec.fLocalMatrix);
+ sk_sp<SkShader> bitmapShader = this->refBitmapShader(*rec.fMatrix, &lm, rec.fDstColorType,
+ rec.fDstColorSpace);
+ if (!bitmapShader) {
+ return nullptr;
+ }
+
+ ContextRec localRec = rec;
+ localRec.fLocalMatrix = lm->isIdentity() ? nullptr : lm.get();
+
+ PictureShaderContext* ctx =
+ alloc->make<PictureShaderContext>(*this, localRec, std::move(bitmapShader), alloc);
+ if (nullptr == ctx->fBitmapShaderContext) {
+ ctx = nullptr;
+ }
+ return ctx;
+}
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+SkPictureShader::PictureShaderContext::PictureShaderContext(
+ const SkPictureShader& shader, const ContextRec& rec, sk_sp<SkShader> bitmapShader,
+ SkArenaAlloc* alloc)
+ : INHERITED(shader, rec)
+ , fBitmapShader(std::move(bitmapShader))
+{
+ fBitmapShaderContext = as_SB(fBitmapShader)->makeContext(rec, alloc);
+ //if fBitmapShaderContext is null, we are invalid
+}
+
+uint32_t SkPictureShader::PictureShaderContext::getFlags() const {
+ SkASSERT(fBitmapShaderContext);
+ return fBitmapShaderContext->getFlags();
+}
+
+void SkPictureShader::PictureShaderContext::shadeSpan(int x, int y, SkPMColor dstC[], int count) {
+ SkASSERT(fBitmapShaderContext);
+ fBitmapShaderContext->shadeSpan(x, y, dstC, count);
+}
+
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContext.h"
+#include "src/gpu/GrContextPriv.h"
+
+std::unique_ptr<GrFragmentProcessor> SkPictureShader::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ int maxTextureSize = 0;
+ if (args.fContext) {
+ maxTextureSize = args.fContext->priv().caps()->maxTextureSize();
+ }
+
+ auto lm = this->totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix);
+ SkColorType dstColorType = GrColorTypeToSkColorType(args.fDstColorInfo->colorType());
+ if (dstColorType == kUnknown_SkColorType) {
+ dstColorType = kRGBA_8888_SkColorType;
+ }
+ sk_sp<SkShader> bitmapShader(this->refBitmapShader(*args.fViewMatrix, &lm, dstColorType,
+ args.fDstColorInfo->colorSpace(),
+ maxTextureSize));
+ if (!bitmapShader) {
+ return nullptr;
+ }
+
+ // We want to *reset* args.fPreLocalMatrix, not compose it.
+ GrFPArgs newArgs(args.fContext, args.fViewMatrix, args.fFilterQuality, args.fDstColorInfo);
+ newArgs.fPreLocalMatrix = lm.get();
+
+ return as_SB(bitmapShader)->asFragmentProcessor(newArgs);
+}
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkPictureShader.h b/gfx/skia/skia/src/shaders/SkPictureShader.h
new file mode 100644
index 0000000000..4ebf39b708
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPictureShader.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureShader_DEFINED
+#define SkPictureShader_DEFINED
+
+#include "include/core/SkTileMode.h"
+#include "src/shaders/SkShaderBase.h"
+#include <atomic>
+
+class SkArenaAlloc;
+class SkBitmap;
+class SkPicture;
+
+/*
+ * An SkPictureShader can be used to draw SkPicture-based patterns.
+ *
+ * The SkPicture is first rendered into a tile, which is then used to shade the area according
+ * to specified tiling rules.
+ */
+class SkPictureShader : public SkShaderBase {
+public:
+ ~SkPictureShader() override;
+
+ static sk_sp<SkShader> Make(sk_sp<SkPicture>, SkTileMode, SkTileMode, const SkMatrix*,
+ const SkRect*);
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+ SkPicture* isAPicture(SkMatrix*, SkTileMode[2], SkRect* tile) const override;
+
+protected:
+ SkPictureShader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPictureShader)
+
+ SkPictureShader(sk_sp<SkPicture>, SkTileMode, SkTileMode, const SkMatrix*, const SkRect*);
+
+ sk_sp<SkShader> refBitmapShader(const SkMatrix&, SkTCopyOnFirstWrite<SkMatrix>* localMatrix,
+ SkColorType dstColorType, SkColorSpace* dstColorSpace,
+ const int maxTextureSize = 0) const;
+
+ class PictureShaderContext : public Context {
+ public:
+ PictureShaderContext(
+ const SkPictureShader&, const ContextRec&, sk_sp<SkShader> bitmapShader, SkArenaAlloc*);
+
+ uint32_t getFlags() const override;
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ sk_sp<SkShader> fBitmapShader;
+ SkShaderBase::Context* fBitmapShaderContext;
+ void* fBitmapShaderContextStorage;
+
+ typedef Context INHERITED;
+ };
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fTile;
+ SkTileMode fTmx, fTmy;
+
+ const uint32_t fUniqueID;
+ mutable std::atomic<bool> fAddedToCache;
+
+ typedef SkShaderBase INHERITED;
+};
+
+#endif // SkPictureShader_DEFINED
diff --git a/gfx/skia/skia/src/shaders/SkRTShader.cpp b/gfx/skia/skia/src/shaders/SkRTShader.cpp
new file mode 100644
index 0000000000..c42a8d0e51
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkRTShader.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkRTShader.h"
+
+#include "src/sksl/SkSLByteCode.h"
+#include "src/sksl/SkSLCompiler.h"
+
+#if SK_SUPPORT_GPU
+#include "include/private/GrRecordingContext.h"
+#include "src/gpu/GrCaps.h"
+#include "src/gpu/GrColorInfo.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/SkGr.h"
+
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/effects/GrSkSLFP.h"
+#include "src/gpu/effects/generated/GrMixerEffect.h"
+
+static inline uint32_t new_sksl_unique_id() {
+ return GrSkSLFP::NewIndex();
+}
+#else
+static inline uint32_t new_sksl_unique_id() {
+ return 0; // not used w/o GPU
+}
+#endif
+
+SkRTShader::SkRTShader(int index, SkString sksl, sk_sp<SkData> inputs, const SkMatrix* localMatrix,
+ bool isOpaque)
+ : SkShaderBase(localMatrix)
+ , fSkSL(std::move(sksl))
+ , fInputs(std::move(inputs))
+ , fUniqueID(index)
+ , fIsOpaque(isOpaque)
+{}
+
+bool SkRTShader::onAppendStages(const SkStageRec& rec) const {
+ SkMatrix inverse;
+ if (!this->computeTotalInverse(rec.fCTM, rec.fLocalM, &inverse)) {
+ return false;
+ }
+
+ auto ctx = rec.fAlloc->make<SkRasterPipeline_InterpreterCtx>();
+ ctx->paintColor = rec.fPaint.getColor4f();
+ ctx->inputs = fInputs->data();
+ ctx->ninputs = fInputs->size() / 4;
+ ctx->shaderConvention = true;
+
+ SkAutoMutexExclusive ama(fByteCodeMutex);
+ if (!fByteCode) {
+ SkSL::Compiler c;
+ auto prog = c.convertProgram(SkSL::Program::kPipelineStage_Kind,
+ SkSL::String(fSkSL.c_str()),
+ SkSL::Program::Settings());
+ if (c.errorCount()) {
+ SkDebugf("%s\n", c.errorText().c_str());
+ return false;
+ }
+ fByteCode = c.toByteCode(*prog);
+ if (c.errorCount()) {
+ SkDebugf("%s\n", c.errorText().c_str());
+ return false;
+ }
+ SkASSERT(fByteCode);
+ if (!fByteCode->getFunction("main")) {
+ return false;
+ }
+ }
+ ctx->byteCode = fByteCode.get();
+ ctx->fn = ctx->byteCode->getFunction("main");
+
+ rec.fPipeline->append(SkRasterPipeline::seed_shader);
+ rec.fPipeline->append_matrix(rec.fAlloc, inverse);
+ rec.fPipeline->append(SkRasterPipeline::interpreter, ctx);
+ return true;
+}
+
+enum Flags {
+ kIsOpaque_Flag = 1 << 0,
+ kHasLocalMatrix_Flag = 1 << 1,
+};
+
+void SkRTShader::flatten(SkWriteBuffer& buffer) const {
+ uint32_t flags = 0;
+ if (fIsOpaque) {
+ flags |= kIsOpaque_Flag;
+ }
+ if (!this->getLocalMatrix().isIdentity()) {
+ flags |= kHasLocalMatrix_Flag;
+ }
+
+ buffer.writeString(fSkSL.c_str());
+ if (fInputs) {
+ buffer.writeDataAsByteArray(fInputs.get());
+ } else {
+ buffer.writeByteArray(nullptr, 0);
+ }
+ buffer.write32(flags);
+ if (flags & kHasLocalMatrix_Flag) {
+ buffer.writeMatrix(this->getLocalMatrix());
+ }
+}
+
+sk_sp<SkFlattenable> SkRTShader::CreateProc(SkReadBuffer& buffer) {
+ // We don't have a way to ensure that indices are consistent and correct when deserializing.
+ // Perhaps we should have a hash table to map strings to indices? For now, all shaders get a
+ // new unique ID after serialization.
+ int index = new_sksl_unique_id();
+
+ SkString sksl;
+ buffer.readString(&sksl);
+ sk_sp<SkData> inputs = buffer.readByteArrayAsData();
+ uint32_t flags = buffer.read32();
+
+ bool isOpaque = SkToBool(flags & kIsOpaque_Flag);
+ SkMatrix localM, *localMPtr = nullptr;
+ if (flags & kHasLocalMatrix_Flag) {
+ buffer.readMatrix(&localM);
+ localMPtr = &localM;
+ }
+
+ return sk_sp<SkFlattenable>(new SkRTShader(index, std::move(sksl), std::move(inputs),
+ localMPtr, isOpaque));
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkRTShader::asFragmentProcessor(const GrFPArgs& args) const {
+ SkMatrix matrix;
+ if (!this->totalLocalMatrix(args.fPreLocalMatrix, args.fPostLocalMatrix)->invert(&matrix)) {
+ return nullptr;
+ }
+ return GrSkSLFP::Make(args.fContext, fUniqueID, "runtime-shader", fSkSL,
+ fInputs->data(), fInputs->size(), SkSL::Program::kPipelineStage_Kind,
+ &matrix);
+}
+#endif
+
+SkRuntimeShaderFactory::SkRuntimeShaderFactory(SkString sksl, bool isOpaque)
+ : fIndex(new_sksl_unique_id())
+ , fSkSL(std::move(sksl))
+ , fIsOpaque(isOpaque) {}
+
+sk_sp<SkShader> SkRuntimeShaderFactory::make(sk_sp<SkData> inputs, const SkMatrix* localMatrix) {
+ return sk_sp<SkShader>(
+ new SkRTShader(fIndex, fSkSL, std::move(inputs), localMatrix, fIsOpaque));
+}
diff --git a/gfx/skia/skia/src/shaders/SkRTShader.h b/gfx/skia/skia/src/shaders/SkRTShader.h
new file mode 100644
index 0000000000..57d69c324e
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkRTShader.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRTShader_DEFINED
+#define SkRTShader_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/private/SkMutex.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/sksl/SkSLByteCode.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#endif
+
+class SkData;
+class SkMatrix;
+
+class SkRTShader : public SkShaderBase {
+public:
+ SkRTShader(int index, SkString sksl, sk_sp<SkData> inputs, const SkMatrix* localMatrix,
+ bool isOpaque);
+
+ bool isOpaque() const override { return fIsOpaque; }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onAppendStages(const SkStageRec& rec) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkRTShader)
+
+ SkString fSkSL;
+ sk_sp<SkData> fInputs;
+ const uint32_t fUniqueID;
+ const bool fIsOpaque;
+
+ mutable SkMutex fByteCodeMutex;
+ mutable std::unique_ptr<SkSL::ByteCode> fByteCode;
+
+ typedef SkShaderBase INHERITED;
+};
+
+class SkRuntimeShaderFactory {
+public:
+ SkRuntimeShaderFactory(SkString sksl, bool isOpaque);
+
+ sk_sp<SkShader> make(sk_sp<SkData> inputs, const SkMatrix* localMatrix);
+
+private:
+ int fIndex;
+ SkString fSkSL;
+ bool fIsOpaque;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkShader.cpp b/gfx/skia/skia/src/shaders/SkShader.cpp
new file mode 100644
index 0000000000..0095a9509c
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkShader.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMallocPixelRef.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkColorShader.h"
+#include "src/shaders/SkEmptyShader.h"
+#include "src/shaders/SkPictureShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFragmentProcessor.h"
+#endif
+
+SkShaderBase::SkShaderBase(const SkMatrix* localMatrix)
+ : fLocalMatrix(localMatrix ? *localMatrix : SkMatrix::I()) {
+ // Pre-cache so future calls to fLocalMatrix.getType() are threadsafe.
+ (void)fLocalMatrix.getType();
+}
+
+SkShaderBase::~SkShaderBase() {}
+
+void SkShaderBase::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ bool hasLocalM = !fLocalMatrix.isIdentity();
+ buffer.writeBool(hasLocalM);
+ if (hasLocalM) {
+ buffer.writeMatrix(fLocalMatrix);
+ }
+}
+
+SkTCopyOnFirstWrite<SkMatrix>
+SkShaderBase::totalLocalMatrix(const SkMatrix* preLocalMatrix,
+ const SkMatrix* postLocalMatrix) const {
+ SkTCopyOnFirstWrite<SkMatrix> m(fLocalMatrix);
+
+ if (preLocalMatrix) {
+ m.writable()->preConcat(*preLocalMatrix);
+ }
+
+ if (postLocalMatrix) {
+ m.writable()->postConcat(*postLocalMatrix);
+ }
+
+ return m;
+}
+
+bool SkShaderBase::computeTotalInverse(const SkMatrix& ctm,
+ const SkMatrix* outerLocalMatrix,
+ SkMatrix* totalInverse) const {
+ return SkMatrix::Concat(ctm, *this->totalLocalMatrix(outerLocalMatrix)).invert(totalInverse);
+}
+
+bool SkShaderBase::asLuminanceColor(SkColor* colorPtr) const {
+ SkColor storage;
+ if (nullptr == colorPtr) {
+ colorPtr = &storage;
+ }
+ if (this->onAsLuminanceColor(colorPtr)) {
+ *colorPtr = SkColorSetA(*colorPtr, 0xFF); // we only return opaque
+ return true;
+ }
+ return false;
+}
+
+SkShaderBase::Context* SkShaderBase::makeContext(const ContextRec& rec, SkArenaAlloc* alloc) const {
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ // We always fall back to raster pipeline when perspective is present.
+ if (rec.fMatrix->hasPerspective() ||
+ fLocalMatrix.hasPerspective() ||
+ (rec.fLocalMatrix && rec.fLocalMatrix->hasPerspective()) ||
+ !this->computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, nullptr)) {
+ return nullptr;
+ }
+
+ return this->onMakeContext(rec, alloc);
+#else
+ return nullptr;
+#endif
+}
+
+SkShaderBase::Context::Context(const SkShaderBase& shader, const ContextRec& rec)
+ : fShader(shader), fCTM(*rec.fMatrix)
+{
+ // We should never use a context with perspective.
+ SkASSERT(!rec.fMatrix->hasPerspective());
+ SkASSERT(!rec.fLocalMatrix || !rec.fLocalMatrix->hasPerspective());
+ SkASSERT(!shader.getLocalMatrix().hasPerspective());
+
+ // Because the context parameters must be valid at this point, we know that the matrix is
+ // invertible.
+ SkAssertResult(fShader.computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &fTotalInverse));
+
+ fPaintAlpha = rec.fPaint->getAlpha();
+}
+
+SkShaderBase::Context::~Context() {}
+
+bool SkShaderBase::ContextRec::isLegacyCompatible(SkColorSpace* shaderColorSpace) const {
+ return !SkColorSpaceXformSteps::Required(shaderColorSpace, fDstColorSpace);
+}
+
+SkImage* SkShader::isAImage(SkMatrix* localMatrix, SkTileMode xy[2]) const {
+ return as_SB(this)->onIsAImage(localMatrix, xy);
+}
+
+SkShader::GradientType SkShader::asAGradient(GradientInfo* info) const {
+ return kNone_GradientType;
+}
+
+#if SK_SUPPORT_GPU
+std::unique_ptr<GrFragmentProcessor> SkShaderBase::asFragmentProcessor(const GrFPArgs&) const {
+ return nullptr;
+}
+#endif
+
+sk_sp<SkShader> SkShaderBase::makeAsALocalMatrixShader(SkMatrix*) const {
+ return nullptr;
+}
+
+sk_sp<SkShader> SkShaders::Empty() { return sk_make_sp<SkEmptyShader>(); }
+sk_sp<SkShader> SkShaders::Color(SkColor color) { return sk_make_sp<SkColorShader>(color); }
+
+sk_sp<SkShader> SkBitmap::makeShader(SkTileMode tmx, SkTileMode tmy, const SkMatrix* lm) const {
+ if (lm && !lm->invert(nullptr)) {
+ return nullptr;
+ }
+ return SkMakeBitmapShader(*this, tmx, tmy, lm, kIfMutable_SkCopyPixelsMode);
+}
+
+sk_sp<SkShader> SkBitmap::makeShader(const SkMatrix* lm) const {
+ return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, lm);
+}
+
+bool SkShaderBase::appendStages(const SkStageRec& rec) const {
+ return this->onAppendStages(rec);
+}
+
+bool SkShaderBase::onAppendStages(const SkStageRec& rec) const {
+ // SkShader::Context::shadeSpan() handles the paint opacity internally,
+ // but SkRasterPipelineBlitter applies it as a separate stage.
+ // We skip the internal shadeSpan() step by forcing the paint opaque.
+ SkTCopyOnFirstWrite<SkPaint> opaquePaint(rec.fPaint);
+ if (rec.fPaint.getAlpha() != SK_AlphaOPAQUE) {
+ opaquePaint.writable()->setAlpha(SK_AlphaOPAQUE);
+ }
+
+ ContextRec cr(*opaquePaint, rec.fCTM, rec.fLocalM, rec.fDstColorType, sk_srgb_singleton());
+
+ struct CallbackCtx : SkRasterPipeline_CallbackCtx {
+ sk_sp<const SkShader> shader;
+ Context* ctx;
+ };
+ auto cb = rec.fAlloc->make<CallbackCtx>();
+ cb->shader = sk_ref_sp(this);
+ cb->ctx = as_SB(this)->makeContext(cr, rec.fAlloc);
+ cb->fn = [](SkRasterPipeline_CallbackCtx* self, int active_pixels) {
+ auto c = (CallbackCtx*)self;
+ int x = (int)c->rgba[0],
+ y = (int)c->rgba[1];
+ SkPMColor tmp[SkRasterPipeline_kMaxStride];
+ c->ctx->shadeSpan(x,y, tmp, active_pixels);
+
+ for (int i = 0; i < active_pixels; i++) {
+ auto rgba_4f = SkPMColor4f::FromPMColor(tmp[i]);
+ memcpy(c->rgba + 4*i, rgba_4f.vec(), 4*sizeof(float));
+ }
+ };
+
+ if (cb->ctx) {
+ rec.fPipeline->append(SkRasterPipeline::seed_shader);
+ rec.fPipeline->append(SkRasterPipeline::callback, cb);
+ rec.fAlloc->make<SkColorSpaceXformSteps>(sk_srgb_singleton(), kPremul_SkAlphaType,
+ rec.fDstCS, kPremul_SkAlphaType)
+ ->apply(rec.fPipeline, true);
+ return true;
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkEmptyShader::CreateProc(SkReadBuffer&) {
+ return SkShaders::Empty();
+}
diff --git a/gfx/skia/skia/src/shaders/SkShaderBase.h b/gfx/skia/skia/src/shaders/SkShaderBase.h
new file mode 100644
index 0000000000..e071e22db9
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkShaderBase.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaderBase_DEFINED
+#define SkShaderBase_DEFINED
+
+#include "include/core/SkFilterQuality.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkTLazy.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/GrFPArgs.h"
+#endif
+
+class GrContext;
+class GrFragmentProcessor;
+class SkArenaAlloc;
+class SkColorSpace;
+class SkImage;
+struct SkImageInfo;
+class SkPaint;
+class SkRasterPipeline;
+
+/**
+ * Shaders can optionally return a subclass of this when appending their stages.
+ * Doing so tells the caller that the stages can be reused with different CTMs (but nothing
+ * else can change), by calling the updater's udpate() method before each use.
+ *
+ * This can be a perf-win bulk draws like drawAtlas and drawVertices, where most of the setup
+ * (i.e. uniforms) are constant, and only something small is changing (i.e. matrices). This
+ * reuse skips the cost of computing the stages (and/or avoids having to allocate a separate
+ * shader for each small draw.
+ */
+class SkStageUpdater {
+public:
+ virtual ~SkStageUpdater() {}
+
+ virtual bool update(const SkMatrix& ctm, const SkMatrix* localM) = 0;
+};
+
+class SkShaderBase : public SkShader {
+public:
+ ~SkShaderBase() override;
+
+ /**
+ * Returns true if the shader is guaranteed to produce only a single color.
+ * Subclasses can override this to allow loop-hoisting optimization.
+ */
+ virtual bool isConstant() const { return false; }
+
+ const SkMatrix& getLocalMatrix() const { return fLocalMatrix; }
+
+ enum Flags {
+ //!< set if all of the colors will be opaque
+ kOpaqueAlpha_Flag = 1 << 0,
+
+ /** set if the spans only vary in X (const in Y).
+ e.g. an Nx1 bitmap that is being tiled in Y, or a linear-gradient
+ that varies from left-to-right. This flag specifies this for
+ shadeSpan().
+ */
+ kConstInY32_Flag = 1 << 1,
+
+ /** hint for the blitter that 4f is the preferred shading mode.
+ */
+ kPrefers4f_Flag = 1 << 2,
+ };
+
+ /**
+ * ContextRec acts as a parameter bundle for creating Contexts.
+ */
+ struct ContextRec {
+ ContextRec(const SkPaint& paint, const SkMatrix& matrix, const SkMatrix* localM,
+ SkColorType dstColorType, SkColorSpace* dstColorSpace)
+ : fPaint(&paint)
+ , fMatrix(&matrix)
+ , fLocalMatrix(localM)
+ , fDstColorType(dstColorType)
+ , fDstColorSpace(dstColorSpace) {}
+
+ const SkPaint* fPaint; // the current paint associated with the draw
+ const SkMatrix* fMatrix; // the current matrix in the canvas
+ const SkMatrix* fLocalMatrix; // optional local matrix
+ SkColorType fDstColorType; // the color type of the dest surface
+ SkColorSpace* fDstColorSpace; // the color space of the dest surface (if any)
+
+ bool isLegacyCompatible(SkColorSpace* shadersColorSpace) const;
+ };
+
+ class Context : public ::SkNoncopyable {
+ public:
+ Context(const SkShaderBase& shader, const ContextRec&);
+
+ virtual ~Context();
+
+ /**
+ * Called sometimes before drawing with this shader. Return the type of
+ * alpha your shader will return. The default implementation returns 0.
+ * Your subclass should override if it can (even sometimes) report a
+ * non-zero value, since that will enable various blitters to perform
+ * faster.
+ */
+ virtual uint32_t getFlags() const { return 0; }
+
+ /**
+ * Called for each span of the object being drawn. Your subclass should
+ * set the appropriate colors (with premultiplied alpha) that correspond
+ * to the specified device coordinates.
+ */
+ virtual void shadeSpan(int x, int y, SkPMColor[], int count) = 0;
+
+ protected:
+ // Reference to shader, so we don't have to dupe information.
+ const SkShaderBase& fShader;
+
+ uint8_t getPaintAlpha() const { return fPaintAlpha; }
+ const SkMatrix& getTotalInverse() const { return fTotalInverse; }
+ const SkMatrix& getCTM() const { return fCTM; }
+
+ private:
+ SkMatrix fCTM;
+ SkMatrix fTotalInverse;
+ uint8_t fPaintAlpha;
+
+ typedef SkNoncopyable INHERITED;
+ };
+
+ /**
+ * Make a context using the memory provided by the arena.
+ *
+ * @return pointer to context or nullptr if can't be created
+ */
+ Context* makeContext(const ContextRec&, SkArenaAlloc*) const;
+
+#if SK_SUPPORT_GPU
+ /**
+ * Returns a GrFragmentProcessor that implements the shader for the GPU backend. NULL is
+ * returned if there is no GPU implementation.
+ *
+ * The GPU device does not call SkShader::createContext(), instead we pass the view matrix,
+ * local matrix, and filter quality directly.
+ *
+ * The GrContext may be used by the to create textures that are required by the returned
+ * processor.
+ *
+ * The returned GrFragmentProcessor should expect an unpremultiplied input color and
+ * produce a premultiplied output.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const;
+#endif
+
+ /**
+ * If the shader can represent its "average" luminance in a single color, return true and
+ * if color is not NULL, return that color. If it cannot, return false and ignore the color
+ * parameter.
+ *
+ * Note: if this returns true, the returned color will always be opaque, as only the RGB
+ * components are used to compute luminance.
+ */
+ bool asLuminanceColor(SkColor*) const;
+
+ // If this returns false, then we draw nothing (do not fall back to shader context)
+ bool appendStages(const SkStageRec&) const;
+
+ bool SK_WARN_UNUSED_RESULT computeTotalInverse(const SkMatrix& ctm,
+ const SkMatrix* outerLocalMatrix,
+ SkMatrix* totalInverse) const;
+
+ // Returns the total local matrix for this shader:
+ //
+ // M = postLocalMatrix x shaderLocalMatrix x preLocalMatrix
+ //
+ SkTCopyOnFirstWrite<SkMatrix> totalLocalMatrix(const SkMatrix* preLocalMatrix,
+ const SkMatrix* postLocalMatrix = nullptr) const;
+
+ virtual SkImage* onIsAImage(SkMatrix*, SkTileMode[2]) const {
+ return nullptr;
+ }
+ virtual SkPicture* isAPicture(SkMatrix*, SkTileMode[2], SkRect* tile) const { return nullptr; }
+
+ static Type GetFlattenableType() { return kSkShaderBase_Type; }
+ Type getFlattenableType() const override { return GetFlattenableType(); }
+
+ static sk_sp<SkShaderBase> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkShaderBase>(static_cast<SkShaderBase*>(
+ SkFlattenable::Deserialize(GetFlattenableType(), data, size, procs).release()));
+ }
+ static void RegisterFlattenables();
+
+ /** DEPRECATED. skbug.com/8941
+ * If this shader can be represented by another shader + a localMatrix, return that shader and
+ * the localMatrix. If not, return nullptr and ignore the localMatrix parameter.
+ */
+ virtual sk_sp<SkShader> makeAsALocalMatrixShader(SkMatrix* localMatrix) const;
+
+ SkStageUpdater* appendUpdatableStages(const SkStageRec& rec) const {
+ return this->onAppendUpdatableStages(rec);
+ }
+
+protected:
+ SkShaderBase(const SkMatrix* localMatrix = nullptr);
+
+ void flatten(SkWriteBuffer&) const override;
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ /**
+ * Specialize creating a SkShader context using the supplied allocator.
+ * @return pointer to context owned by the arena allocator.
+ */
+ virtual Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const {
+ return nullptr;
+ }
+#endif
+
+ virtual bool onAsLuminanceColor(SkColor*) const {
+ return false;
+ }
+
+ // Default impl creates shadercontext and calls that (not very efficient)
+ virtual bool onAppendStages(const SkStageRec&) const;
+
+ virtual SkStageUpdater* onAppendUpdatableStages(const SkStageRec&) const { return nullptr; }
+
+private:
+ // This is essentially const, but not officially so it can be modified in constructors.
+ SkMatrix fLocalMatrix;
+
+ typedef SkShader INHERITED;
+};
+
+inline SkShaderBase* as_SB(SkShader* shader) {
+ return static_cast<SkShaderBase*>(shader);
+}
+
+inline const SkShaderBase* as_SB(const SkShader* shader) {
+ return static_cast<const SkShaderBase*>(shader);
+}
+
+inline const SkShaderBase* as_SB(const sk_sp<SkShader>& shader) {
+ return static_cast<SkShaderBase*>(shader.get());
+}
+
+#endif // SkShaderBase_DEFINED
diff --git a/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.cpp b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.cpp
new file mode 100644
index 0000000000..1a269e6e28
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/shaders/gradients/Sk4fGradientBase.h"
+#include <functional>
+
+namespace {
+
+Sk4f pack_color(const SkColor4f& c4f, bool premul, const Sk4f& component_scale) {
+ Sk4f pm4f = premul
+ ? Sk4f::Load(c4f.premul().vec())
+ : Sk4f::Load(c4f.vec());
+
+ if (premul) {
+ // If the stops are premul, we clamp them to gamut now.
+ // If the stops are unpremul, the colors will eventually go through Sk4f_toL32(),
+ // which ends up clamping to gamut then.
+ pm4f = Sk4f::Max(0, Sk4f::Min(pm4f, pm4f[3]));
+ }
+
+ return pm4f * component_scale;
+}
+
+class IntervalIterator {
+public:
+ IntervalIterator(const SkGradientShaderBase& shader, bool reverse)
+ : fShader(shader)
+ , fFirstPos(reverse ? SK_Scalar1 : 0)
+ , fBegin(reverse ? shader.fColorCount - 1 : 0)
+ , fAdvance(reverse ? -1 : 1) {
+ SkASSERT(shader.fColorCount > 0);
+ }
+
+ void iterate(const SkColor4f* colors,
+ std::function<void(const SkColor4f&, const SkColor4f&,
+ SkScalar, SkScalar)> func) const {
+ if (!fShader.fOrigPos) {
+ this->iterateImplicitPos(colors, func);
+ return;
+ }
+
+ const int end = fBegin + fAdvance * (fShader.fColorCount - 1);
+ int prev = fBegin;
+ SkScalar prevPos = fFirstPos;
+
+ do {
+ const int curr = prev + fAdvance;
+ SkASSERT(curr >= 0 && curr < fShader.fColorCount);
+
+ const SkScalar currPos = fShader.fOrigPos[curr];
+ if (currPos != prevPos) {
+ SkASSERT((currPos - prevPos > 0) == (fAdvance > 0));
+ func(colors[prev], colors[curr], prevPos, currPos);
+ }
+
+ prev = curr;
+ prevPos = currPos;
+ } while (prev != end);
+ }
+
+private:
+ void iterateImplicitPos(const SkColor4f* colors,
+ std::function<void(const SkColor4f&, const SkColor4f&,
+ SkScalar, SkScalar)> func) const {
+ // When clients don't provide explicit color stop positions (fPos == nullptr),
+ // the color stops are distributed evenly across the unit interval
+ // (implicit positioning).
+ const SkScalar dt = fAdvance * SK_Scalar1 / (fShader.fColorCount - 1);
+ const int end = fBegin + fAdvance * (fShader.fColorCount - 2);
+ int prev = fBegin;
+ SkScalar prevPos = fFirstPos;
+
+ while (prev != end) {
+ const int curr = prev + fAdvance;
+ SkASSERT(curr >= 0 && curr < fShader.fColorCount);
+
+ const SkScalar currPos = prevPos + dt;
+ func(colors[prev], colors[curr], prevPos, currPos);
+ prev = curr;
+ prevPos = currPos;
+ }
+
+ // emit the last interval with a pinned end position, to avoid precision issues
+ func(colors[prev], colors[prev + fAdvance], prevPos, 1 - fFirstPos);
+ }
+
+ const SkGradientShaderBase& fShader;
+ const SkScalar fFirstPos;
+ const int fBegin;
+ const int fAdvance;
+};
+
+void addMirrorIntervals(const SkGradientShaderBase& shader,
+ const SkColor4f* colors,
+ const Sk4f& componentScale,
+ bool premulColors, bool reverse,
+ Sk4fGradientIntervalBuffer::BufferType* buffer) {
+ const IntervalIterator iter(shader, reverse);
+ iter.iterate(colors, [&] (const SkColor4f& c0, const SkColor4f& c1, SkScalar t0, SkScalar t1) {
+ SkASSERT(buffer->empty() || buffer->back().fT1 == 2 - t0);
+
+ const auto mirror_t0 = 2 - t0;
+ const auto mirror_t1 = 2 - t1;
+ // mirror_p1 & mirror_p1 may collapse for very small values - recheck to avoid
+ // triggering Interval asserts.
+ if (mirror_t0 != mirror_t1) {
+ buffer->emplace_back(pack_color(c0, premulColors, componentScale), mirror_t0,
+ pack_color(c1, premulColors, componentScale), mirror_t1);
+ }
+ });
+}
+
+} // anonymous namespace
+
+Sk4fGradientInterval::Sk4fGradientInterval(const Sk4f& c0, SkScalar t0,
+ const Sk4f& c1, SkScalar t1)
+ : fT0(t0)
+ , fT1(t1) {
+ SkASSERT(t0 != t1);
+ // Either p0 or p1 can be (-)inf for synthetic clamp edge intervals.
+ SkASSERT(SkScalarIsFinite(t0) || SkScalarIsFinite(t1));
+
+ const auto dt = t1 - t0;
+
+ // Clamp edge intervals are always zero-ramp.
+ SkASSERT(SkScalarIsFinite(dt) || (c0 == c1).allTrue());
+ SkASSERT(SkScalarIsFinite(t0) || (c0 == c1).allTrue());
+ const Sk4f dc = SkScalarIsFinite(dt) ? (c1 - c0) / dt : 0;
+ const Sk4f bias = c0 - (SkScalarIsFinite(t0) ? t0 * dc : 0);
+
+ bias.store(fCb.vec());
+ dc.store(fCg.vec());
+}
+
+void Sk4fGradientIntervalBuffer::init(const SkGradientShaderBase& shader, SkColorSpace* dstCS,
+ SkTileMode tileMode, bool premulColors,
+ SkScalar alpha, bool reverse) {
+ // The main job here is to build a specialized interval list: a different
+ // representation of the color stops data, optimized for efficient scan line
+ // access during shading.
+ //
+ // [{P0,C0} , {P1,C1}) [{P1,C2} , {P2,c3}) ... [{Pn,C2n} , {Pn+1,C2n+1})
+ //
+ // The list may be inverted when requested (such that e.g. points are sorted
+ // in increasing x order when dx < 0).
+ //
+ // Note: the current representation duplicates pos data; we could refactor to
+ // avoid this if interval storage size becomes a concern.
+ //
+ // Aside from reordering, we also perform two more pre-processing steps at
+ // this stage:
+ //
+ // 1) scale the color components depending on paint alpha and the requested
+ // interpolation space (note: the interval color storage is SkPMColor4f, but
+ // that doesn't necessarily mean the colors are premultiplied; that
+ // property is tracked in fColorsArePremul)
+ //
+ // 2) inject synthetic intervals to support tiling.
+ //
+ // * for kRepeat, no extra intervals are needed - the iterator just
+ // wraps around at the end:
+ //
+ // ->[P0,P1)->..[Pn-1,Pn)->
+ //
+ // * for kClamp, we add two "infinite" intervals before/after:
+ //
+ // [-/+inf , P0)->[P0 , P1)->..[Pn-1 , Pn)->[Pn , +/-inf)
+ //
+ // (the iterator should never run off the end in this mode)
+ //
+ // * for kMirror, we extend the range to [0..2] and add a flipped
+ // interval series - then the iterator operates just as in the
+ // kRepeat case:
+ //
+ // ->[P0,P1)->..[Pn-1,Pn)->[2 - Pn,2 - Pn-1)->..[2 - P1,2 - P0)->
+ //
+ // TODO: investigate collapsing intervals << 1px.
+
+ const auto count = shader.fColorCount;
+
+ SkASSERT(count > 0);
+
+ fIntervals.reset();
+
+ const Sk4f componentScale = premulColors
+ ? Sk4f(alpha)
+ : Sk4f(1.0f, 1.0f, 1.0f, alpha);
+ const int first_index = reverse ? count - 1 : 0;
+ const int last_index = count - 1 - first_index;
+ const SkScalar first_pos = reverse ? SK_Scalar1 : 0;
+ const SkScalar last_pos = SK_Scalar1 - first_pos;
+
+ // Transform all of the colors to destination color space
+ SkColor4fXformer xformedColors(shader.fOrigColors4f, count, shader.fColorSpace.get(), dstCS);
+
+ if (tileMode == SkTileMode::kClamp) {
+ // synthetic edge interval: -/+inf .. P0
+ const Sk4f clamp_color = pack_color(xformedColors.fColors[first_index],
+ premulColors, componentScale);
+ const SkScalar clamp_pos = reverse ? SK_ScalarInfinity : SK_ScalarNegativeInfinity;
+ fIntervals.emplace_back(clamp_color, clamp_pos,
+ clamp_color, first_pos);
+ } else if (tileMode == SkTileMode::kMirror && reverse) {
+ // synthetic mirror intervals injected before main intervals: (2 .. 1]
+ addMirrorIntervals(shader, xformedColors.fColors, componentScale, premulColors, false,
+ &fIntervals);
+ }
+
+ const IntervalIterator iter(shader, reverse);
+ iter.iterate(xformedColors.fColors,
+ [&] (const SkColor4f& c0, const SkColor4f& c1, SkScalar t0, SkScalar t1) {
+ SkASSERT(fIntervals.empty() || fIntervals.back().fT1 == t0);
+
+ fIntervals.emplace_back(pack_color(c0, premulColors, componentScale), t0,
+ pack_color(c1, premulColors, componentScale), t1);
+ });
+
+ if (tileMode == SkTileMode::kClamp) {
+ // synthetic edge interval: Pn .. +/-inf
+ const Sk4f clamp_color = pack_color(xformedColors.fColors[last_index],
+ premulColors, componentScale);
+ const SkScalar clamp_pos = reverse ? SK_ScalarNegativeInfinity : SK_ScalarInfinity;
+ fIntervals.emplace_back(clamp_color, last_pos,
+ clamp_color, clamp_pos);
+ } else if (tileMode == SkTileMode::kMirror && !reverse) {
+ // synthetic mirror intervals injected after main intervals: [1 .. 2)
+ addMirrorIntervals(shader, xformedColors.fColors, componentScale, premulColors, true,
+ &fIntervals);
+ }
+}
+
+const Sk4fGradientInterval* Sk4fGradientIntervalBuffer::find(SkScalar t) const {
+ // Binary search.
+ const auto* i0 = fIntervals.begin();
+ const auto* i1 = fIntervals.end() - 1;
+
+ while (i0 != i1) {
+ SkASSERT(i0 < i1);
+ SkASSERT(t >= i0->fT0 && t <= i1->fT1);
+
+ const auto* i = i0 + ((i1 - i0) >> 1);
+
+ if (t > i->fT1) {
+ i0 = i + 1;
+ } else {
+ i1 = i;
+ }
+ }
+
+ SkASSERT(i0->contains(t));
+ return i0;
+}
+
+const Sk4fGradientInterval* Sk4fGradientIntervalBuffer::findNext(
+ SkScalar t, const Sk4fGradientInterval* prev, bool increasing) const {
+
+ SkASSERT(!prev->contains(t));
+ SkASSERT(prev >= fIntervals.begin() && prev < fIntervals.end());
+ SkASSERT(t >= fIntervals.front().fT0 && t <= fIntervals.back().fT1);
+
+ const auto* i = prev;
+
+ // Use the |increasing| signal to figure which direction we should search for
+ // the next interval, then perform a linear search.
+ if (increasing) {
+ do {
+ i += 1;
+ if (i >= fIntervals.end()) {
+ i = fIntervals.begin();
+ }
+ } while (!i->contains(t));
+ } else {
+ do {
+ i -= 1;
+ if (i < fIntervals.begin()) {
+ i = fIntervals.end() - 1;
+ }
+ } while (!i->contains(t));
+ }
+
+ return i;
+}
+
+SkGradientShaderBase::
+GradientShaderBase4fContext::GradientShaderBase4fContext(const SkGradientShaderBase& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec)
+ , fFlags(this->INHERITED::getFlags())
+ , fDither(rec.fPaint->isDither())
+{
+ const SkMatrix& inverse = this->getTotalInverse();
+ fDstToPos.setConcat(shader.fPtsToUnit, inverse);
+ SkASSERT(!fDstToPos.hasPerspective());
+ fDstToPosProc = SkMatrixPriv::GetMapXYProc(fDstToPos);
+
+ if (shader.fColorsAreOpaque && this->getPaintAlpha() == SK_AlphaOPAQUE) {
+ fFlags |= kOpaqueAlpha_Flag;
+ }
+
+ fColorsArePremul =
+ (shader.fGradFlags & SkGradientShader::kInterpolateColorsInPremul_Flag)
+ || shader.fColorsAreOpaque;
+}
+
+bool SkGradientShaderBase::
+GradientShaderBase4fContext::isValid() const {
+ return fDstToPos.isFinite();
+}
diff --git a/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.h b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.h
new file mode 100644
index 0000000000..fcc8fae05a
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientBase.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fGradientBase_DEFINED
+#define Sk4fGradientBase_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/private/SkNx.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/shaders/gradients/Sk4fGradientPriv.h"
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+
+struct Sk4fGradientInterval {
+ Sk4fGradientInterval(const Sk4f& c0, SkScalar t0,
+ const Sk4f& c1, SkScalar t1);
+
+ bool contains(SkScalar t) const {
+ // True if t is in [p0,p1]. Note: this helper assumes a
+ // natural/increasing interval - so it's not usable in Sk4fLinearGradient.
+ SkASSERT(fT0 < fT1);
+ return t >= fT0 && t <= fT1;
+ }
+
+ // Color bias and color gradient, such that for a t in this interval
+ //
+ // C = fCb + t * fCg;
+ SkPMColor4f fCb, fCg;
+ SkScalar fT0, fT1;
+};
+
+class Sk4fGradientIntervalBuffer {
+public:
+ void init(const SkGradientShaderBase&, SkColorSpace* dstCS, SkTileMode tileMode,
+ bool premulColors, SkScalar alpha, bool reverse);
+
+ const Sk4fGradientInterval* find(SkScalar t) const;
+ const Sk4fGradientInterval* findNext(SkScalar t, const Sk4fGradientInterval* prev,
+ bool increasing) const;
+
+ using BufferType = SkSTArray<8, Sk4fGradientInterval, true>;
+
+ const BufferType* operator->() const { return &fIntervals; }
+
+private:
+ BufferType fIntervals;
+};
+
+class SkGradientShaderBase::
+GradientShaderBase4fContext : public Context {
+public:
+ GradientShaderBase4fContext(const SkGradientShaderBase&,
+ const ContextRec&);
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ bool isValid() const;
+
+protected:
+ Sk4fGradientIntervalBuffer fIntervals;
+ SkMatrix fDstToPos;
+ SkMatrixPriv::MapXYProc fDstToPosProc;
+ uint8_t fFlags;
+ bool fColorsArePremul;
+ bool fDither;
+
+private:
+ using INHERITED = Context;
+
+ void addMirrorIntervals(const SkGradientShaderBase&,
+ const Sk4f& componentScale, bool reverse);
+};
+
+#endif // Sk4fGradientBase_DEFINED
diff --git a/gfx/skia/skia/src/shaders/gradients/Sk4fGradientPriv.h b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientPriv.h
new file mode 100644
index 0000000000..15731ec31f
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/Sk4fGradientPriv.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fGradientPriv_DEFINED
+#define Sk4fGradientPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkHalf.h"
+#include "include/private/SkNx.h"
+#include "src/core/SkUtils.h"
+
+// Templates shared by various 4f gradient flavors.
+
+namespace { // NOLINT(google-build-namespaces)
+
+enum class ApplyPremul { True, False };
+
+template <ApplyPremul>
+struct PremulTraits;
+
+template <>
+struct PremulTraits<ApplyPremul::False> {
+ static Sk4f apply(const Sk4f& c) { return c; }
+};
+
+template <>
+struct PremulTraits<ApplyPremul::True> {
+ static Sk4f apply(const Sk4f& c) {
+ const float alpha = c[3];
+ // FIXME: portable swizzle?
+ return c * Sk4f(alpha, alpha, alpha, 1);
+ }
+};
+
+// Struct encapsulating various dest-dependent ops:
+//
+// - load() Load a SkPMColor4f value into Sk4f. Normally called once per interval
+// advance. Also applies a scale and swizzle suitable for DstType.
+//
+// - store() Store one Sk4f to dest. Optionally handles premul, color space
+// conversion, etc.
+//
+// - store(count) Store the Sk4f value repeatedly to dest, count times.
+//
+// - store4x() Store 4 Sk4f values to dest (opportunistic optimization).
+//
+
+template <ApplyPremul premul>
+struct DstTraits {
+ using PM = PremulTraits<premul>;
+
+ // For L32, prescaling by 255 saves a per-pixel multiplication when premul is not needed.
+ static Sk4f load(const SkPMColor4f& c) {
+ Sk4f c4f = swizzle_rb_if_bgra(Sk4f::Load(c.vec()));
+ return premul == ApplyPremul::False
+ ? c4f * Sk4f(255)
+ : c4f;
+ }
+
+ static void store(const Sk4f& c, SkPMColor* dst, const Sk4f& bias) {
+ if (premul == ApplyPremul::False) {
+ // c is pre-scaled by 255 and pre-biased, just store.
+ SkNx_cast<uint8_t>(c).store(dst);
+ } else {
+ *dst = Sk4f_toL32(PM::apply(c) + bias);
+ }
+ }
+
+ static void store(const Sk4f& c, SkPMColor* dst, int n) {
+ SkPMColor pmc;
+ store(c, &pmc, Sk4f(0));
+ sk_memset32(dst, pmc, n);
+ }
+
+ static void store4x(const Sk4f& c0, const Sk4f& c1,
+ const Sk4f& c2, const Sk4f& c3,
+ SkPMColor* dst,
+ const Sk4f& bias0,
+ const Sk4f& bias1) {
+ if (premul == ApplyPremul::False) {
+ // colors are pre-scaled and pre-biased.
+ Sk4f_ToBytes((uint8_t*)dst, c0, c1, c2, c3);
+ } else {
+ store(c0, dst + 0, bias0);
+ store(c1, dst + 1, bias1);
+ store(c2, dst + 2, bias0);
+ store(c3, dst + 3, bias1);
+ }
+ }
+
+ static Sk4f pre_lerp_bias(const Sk4f& bias) {
+ // We can apply the bias before interpolation when the colors are premultiplied.
+ return premul == ApplyPremul::False ? bias : 0;
+ }
+};
+
+} // anonymous namespace
+
+#endif // Sk4fGradientPriv_DEFINED
diff --git a/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.cpp b/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.cpp
new file mode 100644
index 0000000000..b4266ced9d
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/shaders/gradients/Sk4fLinearGradient.h"
+
+#include <cmath>
+#include <utility>
+
+namespace {
+
+template<ApplyPremul premul>
+void ramp(const Sk4f& c, const Sk4f& dc, SkPMColor dst[], int n,
+ const Sk4f& bias0, const Sk4f& bias1) {
+ SkASSERT(n > 0);
+
+ const Sk4f dc2 = dc + dc,
+ dc4 = dc2 + dc2;
+
+ Sk4f c0 = c + DstTraits<premul>::pre_lerp_bias(bias0),
+ c1 = c + dc + DstTraits<premul>::pre_lerp_bias(bias1),
+ c2 = c0 + dc2,
+ c3 = c1 + dc2;
+
+ while (n >= 4) {
+ DstTraits<premul>::store4x(c0, c1, c2, c3, dst, bias0, bias1);
+ dst += 4;
+
+ c0 = c0 + dc4;
+ c1 = c1 + dc4;
+ c2 = c2 + dc4;
+ c3 = c3 + dc4;
+ n -= 4;
+ }
+ if (n & 2) {
+ DstTraits<premul>::store(c0, dst++, bias0);
+ DstTraits<premul>::store(c1, dst++, bias1);
+ c0 = c0 + dc2;
+ }
+ if (n & 1) {
+ DstTraits<premul>::store(c0, dst, bias0);
+ }
+}
+
+template<SkTileMode>
+SkScalar pinFx(SkScalar);
+
+template<>
+SkScalar pinFx<SkTileMode::kClamp>(SkScalar fx) {
+ return fx;
+}
+
+template<>
+SkScalar pinFx<SkTileMode::kRepeat>(SkScalar fx) {
+ SkScalar f = SkScalarIsFinite(fx) ? SkScalarFraction(fx) : 0;
+ if (f < 0) {
+ f = SkTMin(f + 1, nextafterf(1, 0));
+ }
+ SkASSERT(f >= 0);
+ SkASSERT(f < 1.0f);
+ return f;
+}
+
+template<>
+SkScalar pinFx<SkTileMode::kMirror>(SkScalar fx) {
+ SkScalar f = SkScalarIsFinite(fx) ? SkScalarMod(fx, 2.0f) : 0;
+ if (f < 0) {
+ f = SkTMin(f + 2, nextafterf(2, 0));
+ }
+ SkASSERT(f >= 0);
+ SkASSERT(f < 2.0f);
+ return f;
+}
+
+// true when x is in [k1,k2], or [k2, k1] when the interval is reversed.
+// TODO(fmalita): hoist the reversed interval check out of this helper.
+bool in_range(SkScalar x, SkScalar k1, SkScalar k2) {
+ SkASSERT(k1 != k2);
+ return (k1 < k2)
+ ? (x >= k1 && x <= k2)
+ : (x >= k2 && x <= k1);
+}
+
+} // anonymous namespace
+
+SkLinearGradient::
+LinearGradient4fContext::LinearGradient4fContext(const SkLinearGradient& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec) {
+
+ // Our fast path expects interval points to be monotonically increasing in x.
+ const bool reverseIntervals = std::signbit(fDstToPos.getScaleX());
+ fIntervals.init(shader, rec.fDstColorSpace, shader.fTileMode,
+ fColorsArePremul, rec.fPaint->getAlpha() * (1.0f / 255), reverseIntervals);
+
+ SkASSERT(fIntervals->count() > 0);
+ fCachedInterval = fIntervals->begin();
+}
+
+const Sk4fGradientInterval*
+SkLinearGradient::LinearGradient4fContext::findInterval(SkScalar fx) const {
+ SkASSERT(in_range(fx, fIntervals->front().fT0, fIntervals->back().fT1));
+
+ if (1) {
+ // Linear search, using the last scanline interval as a starting point.
+ SkASSERT(fCachedInterval >= fIntervals->begin());
+ SkASSERT(fCachedInterval < fIntervals->end());
+ const int search_dir = fDstToPos.getScaleX() >= 0 ? 1 : -1;
+ while (!in_range(fx, fCachedInterval->fT0, fCachedInterval->fT1)) {
+ fCachedInterval += search_dir;
+ if (fCachedInterval >= fIntervals->end()) {
+ fCachedInterval = fIntervals->begin();
+ } else if (fCachedInterval < fIntervals->begin()) {
+ fCachedInterval = fIntervals->end() - 1;
+ }
+ }
+ return fCachedInterval;
+ } else {
+ // Binary search. Seems less effective than linear + caching.
+ const auto* i0 = fIntervals->begin();
+ const auto* i1 = fIntervals->end() - 1;
+
+ while (i0 != i1) {
+ SkASSERT(i0 < i1);
+ SkASSERT(in_range(fx, i0->fT0, i1->fT1));
+
+ const auto* i = i0 + ((i1 - i0) >> 1);
+
+ if (in_range(fx, i0->fT0, i->fT1)) {
+ i1 = i;
+ } else {
+ SkASSERT(in_range(fx, i->fT1, i1->fT1));
+ i0 = i + 1;
+ }
+ }
+
+ SkASSERT(in_range(fx, i0->fT0, i0->fT1));
+ return i0;
+ }
+}
+
+
+void SkLinearGradient::
+LinearGradient4fContext::shadeSpan(int x, int y, SkPMColor dst[], int count) {
+ SkASSERT(count > 0);
+
+ float bias0 = 0,
+ bias1 = 0;
+
+ if (fDither) {
+ static constexpr float dither_cell[] = {
+ -3/8.0f, 1/8.0f,
+ 3/8.0f, -1/8.0f,
+ };
+
+ const int rowIndex = (y & 1) << 1;
+ bias0 = dither_cell[rowIndex + 0];
+ bias1 = dither_cell[rowIndex + 1];
+
+ if (x & 1) {
+ using std::swap;
+ swap(bias0, bias1);
+ }
+ }
+
+ if (fColorsArePremul) {
+ // In premul interpolation mode, components are pre-scaled by 255 and the store
+ // op is truncating. We pre-bias here to achieve rounding.
+ bias0 += 0.5f;
+ bias1 += 0.5f;
+
+ this->shadePremulSpan<ApplyPremul::False>(x, y, dst, count, bias0, bias1);
+ } else {
+ // In unpremul interpolation mode, Components are not pre-scaled.
+ bias0 *= 1/255.0f;
+ bias1 *= 1/255.0f;
+
+ this->shadePremulSpan<ApplyPremul::True >(x, y, dst, count, bias0, bias1);
+ }
+}
+
+template<ApplyPremul premul>
+void SkLinearGradient::
+LinearGradient4fContext::shadePremulSpan(int x, int y, SkPMColor dst[], int count,
+ float bias0, float bias1) const {
+ const SkLinearGradient& shader = static_cast<const SkLinearGradient&>(fShader);
+ switch (shader.fTileMode) {
+ case SkTileMode::kDecal:
+ SkASSERT(false); // decal only supported via stages
+ // fall-through
+ case SkTileMode::kClamp:
+ this->shadeSpanInternal<premul, SkTileMode::kClamp >(x, y, dst, count, bias0, bias1);
+ break;
+ case SkTileMode::kRepeat:
+ this->shadeSpanInternal<premul, SkTileMode::kRepeat>(x, y, dst, count, bias0, bias1);
+ break;
+ case SkTileMode::kMirror:
+ this->shadeSpanInternal<premul, SkTileMode::kMirror>(x, y, dst, count, bias0, bias1);
+ break;
+ }
+}
+
+template<ApplyPremul premul, SkTileMode tileMode>
+void SkLinearGradient::
+LinearGradient4fContext::shadeSpanInternal(int x, int y, SkPMColor dst[], int count,
+ float bias0, float bias1) const {
+ SkPoint pt;
+ fDstToPosProc(fDstToPos,
+ x + SK_ScalarHalf,
+ y + SK_ScalarHalf,
+ &pt);
+ const SkScalar fx = pinFx<tileMode>(pt.x());
+ const SkScalar dx = fDstToPos.getScaleX();
+ LinearIntervalProcessor<premul, tileMode> proc(fIntervals->begin(),
+ fIntervals->end() - 1,
+ this->findInterval(fx),
+ fx,
+ dx,
+ SkScalarNearlyZero(dx * count));
+ Sk4f bias4f0(bias0),
+ bias4f1(bias1);
+
+ while (count > 0) {
+ // What we really want here is SkTPin(advance, 1, count)
+ // but that's a significant perf hit for >> stops; investigate.
+ const int n = SkTMin(SkScalarTruncToInt(proc.currentAdvance() + 1), count);
+
+ // The current interval advance can be +inf (e.g. when reaching
+ // the clamp mode end intervals) - when that happens, we expect to
+ // a) consume all remaining count in one swoop
+ // b) return a zero color gradient
+ SkASSERT(SkScalarIsFinite(proc.currentAdvance())
+ || (n == count && proc.currentRampIsZero()));
+
+ if (proc.currentRampIsZero()) {
+ DstTraits<premul>::store(proc.currentColor(), dst, n);
+ } else {
+ ramp<premul>(proc.currentColor(), proc.currentColorGrad(), dst, n,
+ bias4f0, bias4f1);
+ }
+
+ proc.advance(SkIntToScalar(n));
+ count -= n;
+ dst += n;
+
+ if (n & 1) {
+ using std::swap;
+ swap(bias4f0, bias4f1);
+ }
+ }
+}
+
+template<ApplyPremul premul, SkTileMode tileMode>
+class SkLinearGradient::
+LinearGradient4fContext::LinearIntervalProcessor {
+public:
+ LinearIntervalProcessor(const Sk4fGradientInterval* firstInterval,
+ const Sk4fGradientInterval* lastInterval,
+ const Sk4fGradientInterval* i,
+ SkScalar fx,
+ SkScalar dx,
+ bool is_vertical)
+ : fAdvX(is_vertical ? SK_ScalarInfinity : (i->fT1 - fx) / dx)
+ , fFirstInterval(firstInterval)
+ , fLastInterval(lastInterval)
+ , fInterval(i)
+ , fDx(dx)
+ , fIsVertical(is_vertical)
+ {
+ SkASSERT(fAdvX >= 0);
+ SkASSERT(firstInterval <= lastInterval);
+
+ if (tileMode != SkTileMode::kClamp && !is_vertical) {
+ const auto spanX = (lastInterval->fT1 - firstInterval->fT0) / dx;
+ SkASSERT(spanX >= 0);
+
+ // If we're in a repeating tile mode and the whole gradient is compressed into a
+ // fraction of a pixel, we just use the average color in zero-ramp mode.
+ // This also avoids cases where we make no progress due to interval advances being
+ // close to zero.
+ static constexpr SkScalar kMinSpanX = .25f;
+ if (spanX < kMinSpanX) {
+ this->init_average_props();
+ return;
+ }
+ }
+
+ this->compute_interval_props(fx);
+ }
+
+ SkScalar currentAdvance() const {
+ SkASSERT(fAdvX >= 0);
+ SkASSERT(!std::isfinite(fAdvX) || fAdvX <= (fInterval->fT1 - fInterval->fT0) / fDx);
+ return fAdvX;
+ }
+
+ bool currentRampIsZero() const { return fZeroRamp; }
+ const Sk4f& currentColor() const { return fCc; }
+ const Sk4f& currentColorGrad() const { return fDcDx; }
+
+ void advance(SkScalar advX) {
+ SkASSERT(advX > 0);
+ SkASSERT(fAdvX >= 0);
+
+ if (advX >= fAdvX) {
+ advX = this->advance_interval(advX);
+ }
+ SkASSERT(advX < fAdvX);
+
+ fCc = fCc + fDcDx * Sk4f(advX);
+ fAdvX -= advX;
+ }
+
+private:
+ void compute_interval_props(SkScalar t) {
+ SkASSERT(in_range(t, fInterval->fT0, fInterval->fT1));
+
+ const Sk4f dc = DstTraits<premul>::load(fInterval->fCg);
+ fCc = DstTraits<premul>::load(fInterval->fCb) + dc * Sk4f(t);
+ fDcDx = dc * fDx;
+ fZeroRamp = fIsVertical || (dc == 0).allTrue();
+ }
+
+ void init_average_props() {
+ fAdvX = SK_ScalarInfinity;
+ fZeroRamp = true;
+ fDcDx = 0;
+ fCc = Sk4f(0);
+
+ // TODO: precompute the average at interval setup time?
+ for (const auto* i = fFirstInterval; i <= fLastInterval; ++i) {
+ // Each interval contributes its average color to the total/weighted average:
+ //
+ // C = (c0 + c1) / 2 = (Cb + Cg * t0 + Cb + Cg * t1) / 2 = Cb + Cg *(t0 + t1) / 2
+ //
+ // Avg += C * (t1 - t0)
+ //
+ const auto c = DstTraits<premul>::load(i->fCb)
+ + DstTraits<premul>::load(i->fCg) * (i->fT0 + i->fT1) * 0.5f;
+ fCc = fCc + c * (i->fT1 - i->fT0);
+ }
+ }
+
+ const Sk4fGradientInterval* next_interval(const Sk4fGradientInterval* i) const {
+ SkASSERT(i >= fFirstInterval);
+ SkASSERT(i <= fLastInterval);
+ i++;
+
+ if (tileMode == SkTileMode::kClamp) {
+ SkASSERT(i <= fLastInterval);
+ return i;
+ }
+
+ return (i <= fLastInterval) ? i : fFirstInterval;
+ }
+
+ SkScalar advance_interval(SkScalar advX) {
+ SkASSERT(advX >= fAdvX);
+
+ do {
+ advX -= fAdvX;
+ fInterval = this->next_interval(fInterval);
+ fAdvX = (fInterval->fT1 - fInterval->fT0) / fDx;
+ SkASSERT(fAdvX > 0);
+ } while (advX >= fAdvX);
+
+ compute_interval_props(fInterval->fT0);
+
+ SkASSERT(advX >= 0);
+ return advX;
+ }
+
+ // Current interval properties.
+ Sk4f fDcDx; // dst color gradient (dc/dx)
+ Sk4f fCc; // current color, interpolated in dst
+ SkScalar fAdvX; // remaining interval advance in dst
+ bool fZeroRamp; // current interval color grad is 0
+
+ const Sk4fGradientInterval* fFirstInterval;
+ const Sk4fGradientInterval* fLastInterval;
+ const Sk4fGradientInterval* fInterval; // current interval
+ const SkScalar fDx; // 'dx' for consistency with other impls; actually dt/dx
+ const bool fIsVertical;
+};
diff --git a/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.h b/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.h
new file mode 100644
index 0000000000..a7885895ee
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/Sk4fLinearGradient.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fLinearGradient_DEFINED
+#define Sk4fLinearGradient_DEFINED
+
+#include "src/shaders/gradients/Sk4fGradientBase.h"
+#include "src/shaders/gradients/SkLinearGradient.h"
+
+class SkLinearGradient::
+LinearGradient4fContext final : public GradientShaderBase4fContext {
+public:
+ LinearGradient4fContext(const SkLinearGradient&, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor dst[], int count) override;
+
+private:
+ using INHERITED = GradientShaderBase4fContext;
+
+ template<ApplyPremul, SkTileMode>
+ class LinearIntervalProcessor;
+
+ template <ApplyPremul premul>
+ void shadePremulSpan(int x, int y, SkPMColor dst[], int count,
+ float bias0, float bias1) const;
+
+ template <ApplyPremul premul, SkTileMode tileMode>
+ void shadeSpanInternal(int x, int y, SkPMColor dst[], int count,
+ float bias0, float bias1) const;
+
+ const Sk4fGradientInterval* findInterval(SkScalar fx) const;
+
+ mutable const Sk4fGradientInterval* fCachedInterval;
+};
+
+#endif // Sk4fLinearGradient_DEFINED
diff --git a/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp b/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp
new file mode 100644
index 0000000000..641cbce2c3
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <algorithm>
+#include "include/core/SkMallocPixelRef.h"
+#include "include/private/SkFloatBits.h"
+#include "include/private/SkHalf.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/gradients/Sk4fLinearGradient.h"
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+#include "src/shaders/gradients/SkLinearGradient.h"
+#include "src/shaders/gradients/SkRadialGradient.h"
+#include "src/shaders/gradients/SkSweepGradient.h"
+#include "src/shaders/gradients/SkTwoPointConicalGradient.h"
+
+enum GradientSerializationFlags {
+ // Bits 29:31 used for various boolean flags
+ kHasPosition_GSF = 0x80000000,
+ kHasLocalMatrix_GSF = 0x40000000,
+ kHasColorSpace_GSF = 0x20000000,
+
+ // Bits 12:28 unused
+
+ // Bits 8:11 for fTileMode
+ kTileModeShift_GSF = 8,
+ kTileModeMask_GSF = 0xF,
+
+ // Bits 0:7 for fGradFlags (note that kForce4fContext_PrivateFlag is 0x80)
+ kGradFlagsShift_GSF = 0,
+ kGradFlagsMask_GSF = 0xFF,
+};
+
+void SkGradientShaderBase::Descriptor::flatten(SkWriteBuffer& buffer) const {
+ uint32_t flags = 0;
+ if (fPos) {
+ flags |= kHasPosition_GSF;
+ }
+ if (fLocalMatrix) {
+ flags |= kHasLocalMatrix_GSF;
+ }
+ sk_sp<SkData> colorSpaceData = fColorSpace ? fColorSpace->serialize() : nullptr;
+ if (colorSpaceData) {
+ flags |= kHasColorSpace_GSF;
+ }
+ SkASSERT(static_cast<uint32_t>(fTileMode) <= kTileModeMask_GSF);
+ flags |= ((unsigned)fTileMode << kTileModeShift_GSF);
+ SkASSERT(fGradFlags <= kGradFlagsMask_GSF);
+ flags |= (fGradFlags << kGradFlagsShift_GSF);
+
+ buffer.writeUInt(flags);
+
+ buffer.writeColor4fArray(fColors, fCount);
+ if (colorSpaceData) {
+ buffer.writeDataAsByteArray(colorSpaceData.get());
+ }
+ if (fPos) {
+ buffer.writeScalarArray(fPos, fCount);
+ }
+ if (fLocalMatrix) {
+ buffer.writeMatrix(*fLocalMatrix);
+ }
+}
+
+template <int N, typename T, bool MEM_MOVE>
+static bool validate_array(SkReadBuffer& buffer, size_t count, SkSTArray<N, T, MEM_MOVE>* array) {
+ if (!buffer.validateCanReadN<T>(count)) {
+ return false;
+ }
+
+ array->resize_back(count);
+ return true;
+}
+
+bool SkGradientShaderBase::DescriptorScope::unflatten(SkReadBuffer& buffer) {
+ // New gradient format. Includes floating point color, color space, densely packed flags
+ uint32_t flags = buffer.readUInt();
+
+ fTileMode = (SkTileMode)((flags >> kTileModeShift_GSF) & kTileModeMask_GSF);
+ fGradFlags = (flags >> kGradFlagsShift_GSF) & kGradFlagsMask_GSF;
+
+ fCount = buffer.getArrayCount();
+
+ if (!(validate_array(buffer, fCount, &fColorStorage) &&
+ buffer.readColor4fArray(fColorStorage.begin(), fCount))) {
+ return false;
+ }
+ fColors = fColorStorage.begin();
+
+ if (SkToBool(flags & kHasColorSpace_GSF)) {
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ fColorSpace = data ? SkColorSpace::Deserialize(data->data(), data->size()) : nullptr;
+ } else {
+ fColorSpace = nullptr;
+ }
+ if (SkToBool(flags & kHasPosition_GSF)) {
+ if (!(validate_array(buffer, fCount, &fPosStorage) &&
+ buffer.readScalarArray(fPosStorage.begin(), fCount))) {
+ return false;
+ }
+ fPos = fPosStorage.begin();
+ } else {
+ fPos = nullptr;
+ }
+ if (SkToBool(flags & kHasLocalMatrix_GSF)) {
+ fLocalMatrix = &fLocalMatrixStorage;
+ buffer.readMatrix(&fLocalMatrixStorage);
+ } else {
+ fLocalMatrix = nullptr;
+ }
+ return buffer.isValid();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit)
+ : INHERITED(desc.fLocalMatrix)
+ , fPtsToUnit(ptsToUnit)
+ , fColorSpace(desc.fColorSpace ? desc.fColorSpace : SkColorSpace::MakeSRGB())
+ , fColorsAreOpaque(true)
+{
+ fPtsToUnit.getType(); // Precache so reads are threadsafe.
+ SkASSERT(desc.fCount > 1);
+
+ fGradFlags = static_cast<uint8_t>(desc.fGradFlags);
+
+ SkASSERT((unsigned)desc.fTileMode < kSkTileModeCount);
+ fTileMode = desc.fTileMode;
+
+ /* Note: we let the caller skip the first and/or last position.
+ i.e. pos[0] = 0.3, pos[1] = 0.7
+ In these cases, we insert dummy entries to ensure that the final data
+ will be bracketed by [0, 1].
+ i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1
+
+ Thus colorCount (the caller's value, and fColorCount (our value) may
+ differ by up to 2. In the above example:
+ colorCount = 2
+ fColorCount = 4
+ */
+ fColorCount = desc.fCount;
+ // check if we need to add in dummy start and/or end position/colors
+ bool dummyFirst = false;
+ bool dummyLast = false;
+ if (desc.fPos) {
+ dummyFirst = desc.fPos[0] != 0;
+ dummyLast = desc.fPos[desc.fCount - 1] != SK_Scalar1;
+ fColorCount += dummyFirst + dummyLast;
+ }
+
+ size_t storageSize = fColorCount * (sizeof(SkColor4f) + (desc.fPos ? sizeof(SkScalar) : 0));
+ fOrigColors4f = reinterpret_cast<SkColor4f*>(fStorage.reset(storageSize));
+ fOrigPos = desc.fPos ? reinterpret_cast<SkScalar*>(fOrigColors4f + fColorCount)
+ : nullptr;
+
+ // Now copy over the colors, adding the dummies as needed
+ SkColor4f* origColors = fOrigColors4f;
+ if (dummyFirst) {
+ *origColors++ = desc.fColors[0];
+ }
+ for (int i = 0; i < desc.fCount; ++i) {
+ origColors[i] = desc.fColors[i];
+ fColorsAreOpaque = fColorsAreOpaque && (desc.fColors[i].fA == 1);
+ }
+ if (dummyLast) {
+ origColors += desc.fCount;
+ *origColors = desc.fColors[desc.fCount - 1];
+ }
+
+ if (desc.fPos) {
+ SkScalar prev = 0;
+ SkScalar* origPosPtr = fOrigPos;
+ *origPosPtr++ = prev; // force the first pos to 0
+
+ int startIndex = dummyFirst ? 0 : 1;
+ int count = desc.fCount + dummyLast;
+
+ bool uniformStops = true;
+ const SkScalar uniformStep = desc.fPos[startIndex] - prev;
+ for (int i = startIndex; i < count; i++) {
+ // Pin the last value to 1.0, and make sure pos is monotonic.
+ auto curr = (i == desc.fCount) ? 1 : SkScalarPin(desc.fPos[i], prev, 1);
+ uniformStops &= SkScalarNearlyEqual(uniformStep, curr - prev);
+
+ *origPosPtr++ = prev = curr;
+ }
+
+ // If the stops are uniform, treat them as implicit.
+ if (uniformStops) {
+ fOrigPos = nullptr;
+ }
+ }
+}
+
+SkGradientShaderBase::~SkGradientShaderBase() {}
+
+void SkGradientShaderBase::flatten(SkWriteBuffer& buffer) const {
+ Descriptor desc;
+ desc.fColors = fOrigColors4f;
+ desc.fColorSpace = fColorSpace;
+ desc.fPos = fOrigPos;
+ desc.fCount = fColorCount;
+ desc.fTileMode = fTileMode;
+ desc.fGradFlags = fGradFlags;
+
+ const SkMatrix& m = this->getLocalMatrix();
+ desc.fLocalMatrix = m.isIdentity() ? nullptr : &m;
+ desc.flatten(buffer);
+}
+
+static void add_stop_color(SkRasterPipeline_GradientCtx* ctx, size_t stop, SkPMColor4f Fs, SkPMColor4f Bs) {
+ (ctx->fs[0])[stop] = Fs.fR;
+ (ctx->fs[1])[stop] = Fs.fG;
+ (ctx->fs[2])[stop] = Fs.fB;
+ (ctx->fs[3])[stop] = Fs.fA;
+ (ctx->bs[0])[stop] = Bs.fR;
+ (ctx->bs[1])[stop] = Bs.fG;
+ (ctx->bs[2])[stop] = Bs.fB;
+ (ctx->bs[3])[stop] = Bs.fA;
+}
+
+static void add_const_color(SkRasterPipeline_GradientCtx* ctx, size_t stop, SkPMColor4f color) {
+ add_stop_color(ctx, stop, { 0, 0, 0, 0 }, color);
+}
+
+// Calculate a factor F and a bias B so that color = F*t + B when t is in range of
+// the stop. Assume that the distance between stops is 1/gapCount.
+static void init_stop_evenly(
+ SkRasterPipeline_GradientCtx* ctx, float gapCount, size_t stop, SkPMColor4f c_l, SkPMColor4f c_r) {
+ // Clankium's GCC 4.9 targeting ARMv7 is barfing when we use Sk4f math here, so go scalar...
+ SkPMColor4f Fs = {
+ (c_r.fR - c_l.fR) * gapCount,
+ (c_r.fG - c_l.fG) * gapCount,
+ (c_r.fB - c_l.fB) * gapCount,
+ (c_r.fA - c_l.fA) * gapCount,
+ };
+ SkPMColor4f Bs = {
+ c_l.fR - Fs.fR*(stop/gapCount),
+ c_l.fG - Fs.fG*(stop/gapCount),
+ c_l.fB - Fs.fB*(stop/gapCount),
+ c_l.fA - Fs.fA*(stop/gapCount),
+ };
+ add_stop_color(ctx, stop, Fs, Bs);
+}
+
+// For each stop we calculate a bias B and a scale factor F, such that
+// for any t between stops n and n+1, the color we want is B[n] + F[n]*t.
+static void init_stop_pos(
+ SkRasterPipeline_GradientCtx* ctx, size_t stop, float t_l, float t_r, SkPMColor4f c_l, SkPMColor4f c_r) {
+ // See note about Clankium's old compiler in init_stop_evenly().
+ SkPMColor4f Fs = {
+ (c_r.fR - c_l.fR) / (t_r - t_l),
+ (c_r.fG - c_l.fG) / (t_r - t_l),
+ (c_r.fB - c_l.fB) / (t_r - t_l),
+ (c_r.fA - c_l.fA) / (t_r - t_l),
+ };
+ SkPMColor4f Bs = {
+ c_l.fR - Fs.fR*t_l,
+ c_l.fG - Fs.fG*t_l,
+ c_l.fB - Fs.fB*t_l,
+ c_l.fA - Fs.fA*t_l,
+ };
+ ctx->ts[stop] = t_l;
+ add_stop_color(ctx, stop, Fs, Bs);
+}
+
+bool SkGradientShaderBase::onAppendStages(const SkStageRec& rec) const {
+ SkRasterPipeline* p = rec.fPipeline;
+ SkArenaAlloc* alloc = rec.fAlloc;
+ SkRasterPipeline_DecalTileCtx* decal_ctx = nullptr;
+
+ SkMatrix matrix;
+ if (!this->computeTotalInverse(rec.fCTM, rec.fLocalM, &matrix)) {
+ return false;
+ }
+ matrix.postConcat(fPtsToUnit);
+
+ SkRasterPipeline_<256> postPipeline;
+
+ p->append(SkRasterPipeline::seed_shader);
+ p->append_matrix(alloc, matrix);
+ this->appendGradientStages(alloc, p, &postPipeline);
+
+ switch(fTileMode) {
+ case SkTileMode::kMirror: p->append(SkRasterPipeline::mirror_x_1); break;
+ case SkTileMode::kRepeat: p->append(SkRasterPipeline::repeat_x_1); break;
+ case SkTileMode::kDecal:
+ decal_ctx = alloc->make<SkRasterPipeline_DecalTileCtx>();
+ decal_ctx->limit_x = SkBits2Float(SkFloat2Bits(1.0f) + 1);
+ // reuse mask + limit_x stage, or create a custom decal_1 that just stores the mask
+ p->append(SkRasterPipeline::decal_x, decal_ctx);
+ // fall-through to clamp
+ case SkTileMode::kClamp:
+ if (!fOrigPos) {
+ // We clamp only when the stops are evenly spaced.
+ // If not, there may be hard stops, and clamping ruins hard stops at 0 and/or 1.
+ // In that case, we must make sure we're using the general "gradient" stage,
+ // which is the only stage that will correctly handle unclamped t.
+ p->append(SkRasterPipeline::clamp_x_1);
+ }
+ break;
+ }
+
+ const bool premulGrad = fGradFlags & SkGradientShader::kInterpolateColorsInPremul_Flag;
+
+ // Transform all of the colors to destination color space
+ SkColor4fXformer xformedColors(fOrigColors4f, fColorCount, fColorSpace.get(), rec.fDstCS);
+
+ auto prepareColor = [premulGrad, &xformedColors](int i) {
+ SkColor4f c = xformedColors.fColors[i];
+ return premulGrad ? c.premul()
+ : SkPMColor4f{ c.fR, c.fG, c.fB, c.fA };
+ };
+
+ // The two-stop case with stops at 0 and 1.
+ if (fColorCount == 2 && fOrigPos == nullptr) {
+ const SkPMColor4f c_l = prepareColor(0),
+ c_r = prepareColor(1);
+
+ // See F and B below.
+ auto ctx = alloc->make<SkRasterPipeline_EvenlySpaced2StopGradientCtx>();
+ (Sk4f::Load(c_r.vec()) - Sk4f::Load(c_l.vec())).store(ctx->f);
+ ( Sk4f::Load(c_l.vec())).store(ctx->b);
+ ctx->interpolatedInPremul = premulGrad;
+
+ p->append(SkRasterPipeline::evenly_spaced_2_stop_gradient, ctx);
+ } else {
+ auto* ctx = alloc->make<SkRasterPipeline_GradientCtx>();
+ ctx->interpolatedInPremul = premulGrad;
+
+ // Note: In order to handle clamps in search, the search assumes a stop conceptully placed
+ // at -inf. Therefore, the max number of stops is fColorCount+1.
+ for (int i = 0; i < 4; i++) {
+ // Allocate at least at for the AVX2 gather from a YMM register.
+ ctx->fs[i] = alloc->makeArray<float>(std::max(fColorCount+1, 8));
+ ctx->bs[i] = alloc->makeArray<float>(std::max(fColorCount+1, 8));
+ }
+
+ if (fOrigPos == nullptr) {
+ // Handle evenly distributed stops.
+
+ size_t stopCount = fColorCount;
+ float gapCount = stopCount - 1;
+
+ SkPMColor4f c_l = prepareColor(0);
+ for (size_t i = 0; i < stopCount - 1; i++) {
+ SkPMColor4f c_r = prepareColor(i + 1);
+ init_stop_evenly(ctx, gapCount, i, c_l, c_r);
+ c_l = c_r;
+ }
+ add_const_color(ctx, stopCount - 1, c_l);
+
+ ctx->stopCount = stopCount;
+ p->append(SkRasterPipeline::evenly_spaced_gradient, ctx);
+ } else {
+ // Handle arbitrary stops.
+
+ ctx->ts = alloc->makeArray<float>(fColorCount+1);
+
+ // Remove the dummy stops inserted by SkGradientShaderBase::SkGradientShaderBase
+ // because they are naturally handled by the search method.
+ int firstStop;
+ int lastStop;
+ if (fColorCount > 2) {
+ firstStop = fOrigColors4f[0] != fOrigColors4f[1] ? 0 : 1;
+ lastStop = fOrigColors4f[fColorCount - 2] != fOrigColors4f[fColorCount - 1]
+ ? fColorCount - 1 : fColorCount - 2;
+ } else {
+ firstStop = 0;
+ lastStop = 1;
+ }
+
+ size_t stopCount = 0;
+ float t_l = fOrigPos[firstStop];
+ SkPMColor4f c_l = prepareColor(firstStop);
+ add_const_color(ctx, stopCount++, c_l);
+ // N.B. lastStop is the index of the last stop, not one after.
+ for (int i = firstStop; i < lastStop; i++) {
+ float t_r = fOrigPos[i + 1];
+ SkPMColor4f c_r = prepareColor(i + 1);
+ SkASSERT(t_l <= t_r);
+ if (t_l < t_r) {
+ init_stop_pos(ctx, stopCount, t_l, t_r, c_l, c_r);
+ stopCount += 1;
+ }
+ t_l = t_r;
+ c_l = c_r;
+ }
+
+ ctx->ts[stopCount] = t_l;
+ add_const_color(ctx, stopCount++, c_l);
+
+ ctx->stopCount = stopCount;
+ p->append(SkRasterPipeline::gradient, ctx);
+ }
+ }
+
+ if (decal_ctx) {
+ p->append(SkRasterPipeline::check_decal_mask, decal_ctx);
+ }
+
+ if (!premulGrad && !this->colorsAreOpaque()) {
+ p->append(SkRasterPipeline::premul);
+ }
+
+ p->extend(postPipeline);
+
+ return true;
+}
+
+
+bool SkGradientShaderBase::isOpaque() const {
+ return fColorsAreOpaque && (this->getTileMode() != SkTileMode::kDecal);
+}
+
+static unsigned rounded_divide(unsigned numer, unsigned denom) {
+ return (numer + (denom >> 1)) / denom;
+}
+
+bool SkGradientShaderBase::onAsLuminanceColor(SkColor* lum) const {
+ // we just compute an average color.
+ // possibly we could weight this based on the proportional width for each color
+ // assuming they are not evenly distributed in the fPos array.
+ int r = 0;
+ int g = 0;
+ int b = 0;
+ const int n = fColorCount;
+ // TODO: use linear colors?
+ for (int i = 0; i < n; ++i) {
+ SkColor c = this->getLegacyColor(i);
+ r += SkColorGetR(c);
+ g += SkColorGetG(c);
+ b += SkColorGetB(c);
+ }
+ *lum = SkColorSetRGB(rounded_divide(r, n), rounded_divide(g, n), rounded_divide(b, n));
+ return true;
+}
+
+SkColor4fXformer::SkColor4fXformer(const SkColor4f* colors, int colorCount,
+ SkColorSpace* src, SkColorSpace* dst) {
+ fColors = colors;
+
+ if (dst && !SkColorSpace::Equals(src, dst)) {
+ fStorage.reset(colorCount);
+
+ auto info = SkImageInfo::Make(colorCount,1, kRGBA_F32_SkColorType, kUnpremul_SkAlphaType);
+
+ SkConvertPixels(info.makeColorSpace(sk_ref_sp(dst)), fStorage.begin(), info.minRowBytes(),
+ info.makeColorSpace(sk_ref_sp(src)), fColors , info.minRowBytes());
+
+ fColors = fStorage.begin();
+ }
+}
+
+void SkGradientShaderBase::commonAsAGradient(GradientInfo* info) const {
+ if (info) {
+ if (info->fColorCount >= fColorCount) {
+ if (info->fColors) {
+ for (int i = 0; i < fColorCount; ++i) {
+ info->fColors[i] = this->getLegacyColor(i);
+ }
+ }
+ if (info->fColorOffsets) {
+ for (int i = 0; i < fColorCount; ++i) {
+ info->fColorOffsets[i] = this->getPos(i);
+ }
+ }
+ }
+ info->fColorCount = fColorCount;
+ info->fTileMode = fTileMode;
+ info->fGradientFlags = fGradFlags;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+// Return true if these parameters are valid/legal/safe to construct a gradient
+//
+static bool valid_grad(const SkColor4f colors[], const SkScalar pos[], int count,
+ SkTileMode tileMode) {
+ return nullptr != colors && count >= 1 && (unsigned)tileMode < kSkTileModeCount;
+}
+
+static void desc_init(SkGradientShaderBase::Descriptor* desc,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkTileMode mode, uint32_t flags, const SkMatrix* localMatrix) {
+ SkASSERT(colorCount > 1);
+
+ desc->fColors = colors;
+ desc->fColorSpace = std::move(colorSpace);
+ desc->fPos = pos;
+ desc->fCount = colorCount;
+ desc->fTileMode = mode;
+ desc->fGradFlags = flags;
+ desc->fLocalMatrix = localMatrix;
+}
+
+static SkColor4f average_gradient_color(const SkColor4f colors[], const SkScalar pos[],
+ int colorCount) {
+ // The gradient is a piecewise linear interpolation between colors. For a given interval,
+ // the integral between the two endpoints is 0.5 * (ci + cj) * (pj - pi), which provides that
+ // intervals average color. The overall average color is thus the sum of each piece. The thing
+ // to keep in mind is that the provided gradient definition may implicitly use p=0 and p=1.
+ Sk4f blend(0.0);
+ // Bake 1/(colorCount - 1) uniform stop difference into this scale factor
+ SkScalar wScale = pos ? 0.5 : 0.5 / (colorCount - 1);
+ for (int i = 0; i < colorCount - 1; ++i) {
+ // Calculate the average color for the interval between pos(i) and pos(i+1)
+ Sk4f c0 = Sk4f::Load(&colors[i]);
+ Sk4f c1 = Sk4f::Load(&colors[i + 1]);
+ // when pos == null, there are colorCount uniformly distributed stops, going from 0 to 1,
+ // so pos[i + 1] - pos[i] = 1/(colorCount-1)
+ SkScalar w = pos ? (pos[i + 1] - pos[i]) : SK_Scalar1;
+ blend += wScale * w * (c1 + c0);
+ }
+
+ // Now account for any implicit intervals at the start or end of the stop definitions
+ if (pos) {
+ if (pos[0] > 0.0) {
+ // The first color is fixed between p = 0 to pos[0], so 0.5 * (ci + cj) * (pj - pi)
+ // becomes 0.5 * (c + c) * (pj - 0) = c * pj
+ Sk4f c = Sk4f::Load(&colors[0]);
+ blend += pos[0] * c;
+ }
+ if (pos[colorCount - 1] < SK_Scalar1) {
+ // The last color is fixed between pos[n-1] to p = 1, so 0.5 * (ci + cj) * (pj - pi)
+ // becomes 0.5 * (c + c) * (1 - pi) = c * (1 - pi)
+ Sk4f c = Sk4f::Load(&colors[colorCount - 1]);
+ blend += (1 - pos[colorCount - 1]) * c;
+ }
+ }
+
+ SkColor4f avg;
+ blend.store(&avg);
+ return avg;
+}
+
+// The default SkScalarNearlyZero threshold of .0024 is too big and causes regressions for svg
+// gradients defined in the wild.
+static constexpr SkScalar kDegenerateThreshold = SK_Scalar1 / (1 << 15);
+
+// Except for special circumstances of clamped gradients, every gradient shape--when degenerate--
+// can be mapped to the same fallbacks. The specific shape factories must account for special
+// clamped conditions separately, this will always return the last color for clamped gradients.
+static sk_sp<SkShader> make_degenerate_gradient(const SkColor4f colors[], const SkScalar pos[],
+ int colorCount, sk_sp<SkColorSpace> colorSpace,
+ SkTileMode mode) {
+ switch(mode) {
+ case SkTileMode::kDecal:
+ // normally this would reject the area outside of the interpolation region, so since
+ // inside region is empty when the radii are equal, the entire draw region is empty
+ return SkShaders::Empty();
+ case SkTileMode::kRepeat:
+ case SkTileMode::kMirror:
+ // repeat and mirror are treated the same: the border colors are never visible,
+ // but approximate the final color as infinite repetitions of the colors, so
+ // it can be represented as the average color of the gradient.
+ return SkShaders::Color(
+ average_gradient_color(colors, pos, colorCount), std::move(colorSpace));
+ case SkTileMode::kClamp:
+ // Depending on how the gradient shape degenerates, there may be a more specialized
+ // fallback representation for the factories to use, but this is a reasonable default.
+ return SkShaders::Color(colors[colorCount - 1], std::move(colorSpace));
+ }
+ SkDEBUGFAIL("Should not be reached");
+ return nullptr;
+}
+
+// assumes colors is SkColor4f* and pos is SkScalar*
+#define EXPAND_1_COLOR(count) \
+ SkColor4f tmp[2]; \
+ do { \
+ if (1 == count) { \
+ tmp[0] = tmp[1] = colors[0]; \
+ colors = tmp; \
+ pos = nullptr; \
+ count = 2; \
+ } \
+ } while (0)
+
+struct ColorStopOptimizer {
+ ColorStopOptimizer(const SkColor4f* colors, const SkScalar* pos, int count, SkTileMode mode)
+ : fColors(colors)
+ , fPos(pos)
+ , fCount(count) {
+
+ if (!pos || count != 3) {
+ return;
+ }
+
+ if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 0.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkTileMode::kRepeat == mode || SkTileMode::kMirror == mode ||
+ colors[0] == colors[1]) {
+
+ // Ignore the leftmost color/pos.
+ fColors += 1;
+ fPos += 1;
+ fCount = 2;
+ }
+ } else if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 1.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkTileMode::kRepeat == mode || SkTileMode::kMirror == mode ||
+ colors[1] == colors[2]) {
+
+ // Ignore the rightmost color/pos.
+ fCount = 2;
+ }
+ }
+ }
+
+ const SkColor4f* fColors;
+ const SkScalar* fPos;
+ int fCount;
+};
+
+struct ColorConverter {
+ ColorConverter(const SkColor* colors, int count) {
+ const float ONE_OVER_255 = 1.f / 255;
+ for (int i = 0; i < count; ++i) {
+ fColors4f.push_back({
+ SkColorGetR(colors[i]) * ONE_OVER_255,
+ SkColorGetG(colors[i]) * ONE_OVER_255,
+ SkColorGetB(colors[i]) * ONE_OVER_255,
+ SkColorGetA(colors[i]) * ONE_OVER_255 });
+ }
+ }
+
+ SkSTArray<2, SkColor4f, true> fColors4f;
+};
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor colors[],
+ const SkScalar pos[], int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeLinear(pts, converter.fColors4f.begin(), nullptr, pos, colorCount, mode, flags,
+ localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (!pts || !SkScalarIsFinite((pts[1] - pts[0]).length())) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyZero((pts[1] - pts[0]).length(), kDegenerateThreshold)) {
+ // Degenerate gradient, the only tricky complication is when in clamp mode, the limit of
+ // the gradient approaches two half planes of solid color (first and last). However, they
+ // are divided by the line perpendicular to the start and end point, which becomes undefined
+ // once start and end are exactly the same, so just use the end color for a stable solution.
+ return make_degenerate_gradient(colors, pos, colorCount, std::move(colorSpace), mode);
+ }
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkLinearGradient>(pts, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[],
+ const SkScalar pos[], int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeRadial(center, radius, converter.fColors4f.begin(), nullptr, pos, colorCount, mode,
+ flags, localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (radius < 0) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyZero(radius, kDegenerateThreshold)) {
+ // Degenerate gradient optimization, and no special logic needed for clamped radial gradient
+ return make_degenerate_gradient(colors, pos, colorCount, std::move(colorSpace), mode);
+ }
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkRadialGradient>(center, radius, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeTwoPointConical(start, startRadius, end, endRadius, converter.fColors4f.begin(),
+ nullptr, pos, colorCount, mode, flags, localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (startRadius < 0 || endRadius < 0) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (SkScalarNearlyZero((start - end).length(), kDegenerateThreshold)) {
+ // If the center positions are the same, then the gradient is the radial variant of a 2 pt
+ // conical gradient, an actual radial gradient (startRadius == 0), or it is fully degenerate
+ // (startRadius == endRadius).
+ if (SkScalarNearlyEqual(startRadius, endRadius, kDegenerateThreshold)) {
+ // Degenerate case, where the interpolation region area approaches zero. The proper
+ // behavior depends on the tile mode, which is consistent with the default degenerate
+ // gradient behavior, except when mode = clamp and the radii > 0.
+ if (mode == SkTileMode::kClamp && endRadius > kDegenerateThreshold) {
+ // The interpolation region becomes an infinitely thin ring at the radius, so the
+ // final gradient will be the first color repeated from p=0 to 1, and then a hard
+ // stop switching to the last color at p=1.
+ static constexpr SkScalar circlePos[3] = {0, 1, 1};
+ SkColor4f reColors[3] = {colors[0], colors[0], colors[colorCount - 1]};
+ return MakeRadial(start, endRadius, reColors, std::move(colorSpace),
+ circlePos, 3, mode, flags, localMatrix);
+ } else {
+ // Otherwise use the default degenerate case
+ return make_degenerate_gradient(
+ colors, pos, colorCount, std::move(colorSpace), mode);
+ }
+ } else if (SkScalarNearlyZero(startRadius, kDegenerateThreshold)) {
+ // We can treat this gradient as radial, which is faster. If we got here, we know
+ // that endRadius is not equal to 0, so this produces a meaningful gradient
+ return MakeRadial(start, endRadius, colors, std::move(colorSpace), pos, colorCount,
+ mode, flags, localMatrix);
+ }
+ // Else it's the 2pt conical radial variant with no degenerate radii, so fall through to the
+ // regular 2pt constructor.
+ }
+
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+ EXPAND_1_COLOR(colorCount);
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return SkTwoPointConicalGradient::Create(start, startRadius, end, endRadius, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ SkScalar startAngle,
+ SkScalar endAngle,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeSweep(cx, cy, converter.fColors4f.begin(), nullptr, pos, colorCount,
+ mode, startAngle, endAngle, flags, localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ SkScalar startAngle,
+ SkScalar endAngle,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (!SkScalarIsFinite(startAngle) || !SkScalarIsFinite(endAngle) || startAngle > endAngle) {
+ return nullptr;
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyEqual(startAngle, endAngle, kDegenerateThreshold)) {
+ // Degenerate gradient, which should follow default degenerate behavior unless it is
+ // clamped and the angle is greater than 0.
+ if (mode == SkTileMode::kClamp && endAngle > kDegenerateThreshold) {
+ // In this case, the first color is repeated from 0 to the angle, then a hardstop
+ // switches to the last color (all other colors are compressed to the infinitely thin
+ // interpolation region).
+ static constexpr SkScalar clampPos[3] = {0, 1, 1};
+ SkColor4f reColors[3] = {colors[0], colors[0], colors[colorCount - 1]};
+ return MakeSweep(cx, cy, reColors, std::move(colorSpace), clampPos, 3, mode, 0,
+ endAngle, flags, localMatrix);
+ } else {
+ return make_degenerate_gradient(colors, pos, colorCount, std::move(colorSpace), mode);
+ }
+ }
+
+ if (startAngle <= 0 && endAngle >= 360) {
+ // If the t-range includes [0,1], then we can always use clamping (presumably faster).
+ mode = SkTileMode::kClamp;
+ }
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+
+ const SkScalar t0 = startAngle / 360,
+ t1 = endAngle / 360;
+
+ return sk_make_sp<SkSweepGradient>(SkPoint::Make(cx, cy), t0, t1, desc);
+}
+
+void SkGradientShader::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkLinearGradient);
+ SK_REGISTER_FLATTENABLE(SkRadialGradient);
+ SK_REGISTER_FLATTENABLE(SkSweepGradient);
+ SK_REGISTER_FLATTENABLE(SkTwoPointConicalGradient);
+}
diff --git a/gfx/skia/skia/src/shaders/gradients/SkGradientShaderPriv.h b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderPriv.h
new file mode 100644
index 0000000000..b362e90c83
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderPriv.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShaderPriv_DEFINED
+#define SkGradientShaderPriv_DEFINED
+
+#include "include/effects/SkGradientShader.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkColorSpace;
+class SkRasterPipeline;
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkGradientShaderBase : public SkShaderBase {
+public:
+ struct Descriptor {
+ Descriptor() {
+ sk_bzero(this, sizeof(*this));
+ fTileMode = SkTileMode::kClamp;
+ }
+
+ const SkMatrix* fLocalMatrix;
+ const SkColor4f* fColors;
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkScalar* fPos;
+ int fCount;
+ SkTileMode fTileMode;
+ uint32_t fGradFlags;
+
+ void flatten(SkWriteBuffer&) const;
+ };
+
+ class DescriptorScope : public Descriptor {
+ public:
+ DescriptorScope() {}
+
+ bool unflatten(SkReadBuffer&);
+
+ // fColors and fPos always point into local memory, so they can be safely mutated
+ //
+ SkColor4f* mutableColors() { return const_cast<SkColor4f*>(fColors); }
+ SkScalar* mutablePos() { return const_cast<SkScalar*>(fPos); }
+
+ private:
+ SkSTArray<16, SkColor4f, true> fColorStorage;
+ SkSTArray<16, SkScalar , true> fPosStorage;
+ SkMatrix fLocalMatrixStorage;
+ };
+
+ SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit);
+ ~SkGradientShaderBase() override;
+
+ bool isOpaque() const override;
+
+ uint32_t getGradFlags() const { return fGradFlags; }
+
+ const SkMatrix& getGradientMatrix() const { return fPtsToUnit; }
+
+protected:
+ class GradientShaderBase4fContext;
+
+ SkGradientShaderBase(SkReadBuffer& );
+ void flatten(SkWriteBuffer&) const override;
+
+ void commonAsAGradient(GradientInfo*) const;
+
+ bool onAsLuminanceColor(SkColor*) const override;
+
+ bool onAppendStages(const SkStageRec&) const override;
+
+ virtual void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const = 0;
+
+ template <typename T, typename... Args>
+ static Context* CheckedMakeContext(SkArenaAlloc* alloc, Args&&... args) {
+ auto* ctx = alloc->make<T>(std::forward<Args>(args)...);
+ if (!ctx->isValid()) {
+ return nullptr;
+ }
+ return ctx;
+ }
+
+ const SkMatrix fPtsToUnit;
+ SkTileMode fTileMode;
+ uint8_t fGradFlags;
+
+public:
+ SkScalar getPos(int i) const {
+ SkASSERT(i < fColorCount);
+ return fOrigPos ? fOrigPos[i] : SkIntToScalar(i) / (fColorCount - 1);
+ }
+
+ SkColor getLegacyColor(int i) const {
+ SkASSERT(i < fColorCount);
+ return fOrigColors4f[i].toSkColor();
+ }
+
+ bool colorsCanConvertToSkColor() const {
+ bool canConvert = true;
+ for (int i = 0; i < fColorCount; ++i) {
+ canConvert &= fOrigColors4f[i].fitsInBytes();
+ }
+ return canConvert;
+ }
+
+ SkColor4f* fOrigColors4f; // original colors, as floats
+ SkScalar* fOrigPos; // original positions
+ int fColorCount;
+ sk_sp<SkColorSpace> fColorSpace; // color space of gradient stops
+
+ bool colorsAreOpaque() const { return fColorsAreOpaque; }
+
+ SkTileMode getTileMode() const { return fTileMode; }
+
+private:
+ // Reserve inline space for up to 4 stops.
+ static constexpr size_t kInlineStopCount = 4;
+ static constexpr size_t kInlineStorageSize = (sizeof(SkColor4f) + sizeof(SkScalar))
+ * kInlineStopCount;
+ SkAutoSTMalloc<kInlineStorageSize, uint8_t> fStorage;
+
+ bool fColorsAreOpaque;
+
+ typedef SkShaderBase INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkColor4fXformer {
+ SkColor4fXformer(const SkColor4f* colors, int colorCount, SkColorSpace* src, SkColorSpace* dst);
+
+ const SkColor4f* fColors;
+ SkSTArray<4, SkColor4f, true> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp
new file mode 100644
index 0000000000..98f15cf7f2
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/gradients/SkLinearGradient.h"
+
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/gradients/Sk4fLinearGradient.h"
+
+static SkMatrix pts_to_unit_matrix(const SkPoint pts[2]) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ SkMatrix matrix;
+ matrix.setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
+ matrix.postTranslate(-pts[0].fX, -pts[0].fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLinearGradient::SkLinearGradient(const SkPoint pts[2], const Descriptor& desc)
+ : SkGradientShaderBase(desc, pts_to_unit_matrix(pts))
+ , fStart(pts[0])
+ , fEnd(pts[1]) {
+}
+
+sk_sp<SkFlattenable> SkLinearGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ SkPoint pts[2];
+ pts[0] = buffer.readPoint();
+ pts[1] = buffer.readPoint();
+ return SkGradientShader::MakeLinear(pts, desc.fColors, std::move(desc.fColorSpace), desc.fPos,
+ desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkLinearGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fStart);
+ buffer.writePoint(fEnd);
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkLinearGradient::onMakeContext(
+ const ContextRec& rec, SkArenaAlloc* alloc) const
+{
+ // make sure our colorspaces are compatible with legacy blits
+ if (!rec.isLegacyCompatible(fColorSpace.get())) {
+ return nullptr;
+ }
+ // Can't use legacy blit if we can't represent our colors as SkColors
+ if (!this->colorsCanConvertToSkColor()) {
+ return nullptr;
+ }
+
+ return fTileMode != SkTileMode::kDecal
+ ? CheckedMakeContext<LinearGradient4fContext>(alloc, *this, rec)
+ : nullptr;
+}
+#endif
+
+void SkLinearGradient::appendGradientStages(SkArenaAlloc*, SkRasterPipeline*,
+ SkRasterPipeline*) const {
+ // No extra stage needed for linear gradients.
+}
+
+SkShader::GradientType SkLinearGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fStart;
+ info->fPoint[1] = fEnd;
+ }
+ return kLinear_GradientType;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkLinearGradient::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ return GrGradientShader::MakeLinear(*this, args);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h
new file mode 100644
index 0000000000..fff649380b
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearGradient_DEFINED
+#define SkLinearGradient_DEFINED
+
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+
+class SkLinearGradient : public SkGradientShaderBase {
+public:
+ SkLinearGradient(const SkPoint pts[2], const Descriptor&);
+
+ GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ SkLinearGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const final;
+
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLinearGradient)
+
+ class LinearGradient4fContext;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+ const SkPoint fStart;
+ const SkPoint fEnd;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp
new file mode 100644
index 0000000000..679aa8f557
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/gradients/SkRadialGradient.h"
+
+namespace {
+
+SkMatrix rad_to_unit_matrix(const SkPoint& center, SkScalar radius) {
+ SkScalar inv = SkScalarInvert(radius);
+
+ SkMatrix matrix;
+ matrix.setTranslate(-center.fX, -center.fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+} // namespace
+
+/////////////////////////////////////////////////////////////////////
+
+SkRadialGradient::SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor& desc)
+ : SkGradientShaderBase(desc, rad_to_unit_matrix(center, radius))
+ , fCenter(center)
+ , fRadius(radius) {
+}
+
+SkShader::GradientType SkRadialGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ info->fRadius[0] = fRadius;
+ }
+ return kRadial_GradientType;
+}
+
+sk_sp<SkFlattenable> SkRadialGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+ const SkScalar radius = buffer.readScalar();
+ return SkGradientShader::MakeRadial(center, radius, desc.fColors, std::move(desc.fColorSpace),
+ desc.fPos, desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkRadialGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter);
+ buffer.writeScalar(fRadius);
+}
+
+void SkRadialGradient::appendGradientStages(SkArenaAlloc*, SkRasterPipeline* p,
+ SkRasterPipeline*) const {
+ p->append(SkRasterPipeline::xy_to_radius);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkRadialGradient::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ return GrGradientShader::MakeRadial(*this, args);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.h b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.h
new file mode 100644
index 0000000000..0e615048fb
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRadialGradient_DEFINED
+#define SkRadialGradient_DEFINED
+
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+
+class SkRadialGradient final : public SkGradientShaderBase {
+public:
+ SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor&);
+
+ GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+protected:
+ SkRadialGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkRadialGradient)
+
+ const SkPoint fCenter;
+ const SkScalar fRadius;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp
new file mode 100644
index 0000000000..68f0cbac61
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkFloatingPoint.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/gradients/SkSweepGradient.h"
+
+SkSweepGradient::SkSweepGradient(const SkPoint& center, SkScalar t0, SkScalar t1,
+ const Descriptor& desc)
+ : SkGradientShaderBase(desc, SkMatrix::MakeTrans(-center.x(), -center.y()))
+ , fCenter(center)
+ , fTBias(-t0)
+ , fTScale(1 / (t1 - t0))
+{
+ SkASSERT(t0 < t1);
+}
+
+SkShader::GradientType SkSweepGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ }
+ return kSweep_GradientType;
+}
+
+static std::tuple<SkScalar, SkScalar> angles_from_t_coeff(SkScalar tBias, SkScalar tScale) {
+ return std::make_tuple(-tBias * 360, (sk_ieee_float_divide(1, tScale) - tBias) * 360);
+}
+
+sk_sp<SkFlattenable> SkSweepGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+
+ SkScalar startAngle = 0,
+ endAngle = 360;
+ if (!buffer.isVersionLT(SkPicturePriv::kTileInfoInSweepGradient_Version)) {
+ const auto tBias = buffer.readScalar(),
+ tScale = buffer.readScalar();
+ std::tie(startAngle, endAngle) = angles_from_t_coeff(tBias, tScale);
+ }
+
+ return SkGradientShader::MakeSweep(center.x(), center.y(), desc.fColors,
+ std::move(desc.fColorSpace), desc.fPos, desc.fCount,
+ desc.fTileMode, startAngle, endAngle,
+ desc.fGradFlags, desc.fLocalMatrix);
+}
+
+void SkSweepGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter);
+ buffer.writeScalar(fTBias);
+ buffer.writeScalar(fTScale);
+}
+
+void SkSweepGradient::appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* p,
+ SkRasterPipeline*) const {
+ p->append(SkRasterPipeline::xy_to_unit_angle);
+ p->append_matrix(alloc, SkMatrix::Concat(SkMatrix::MakeScale(fTScale, 1),
+ SkMatrix::MakeTrans(fTBias , 0)));
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkSweepGradient::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ return GrGradientShader::MakeSweep(*this, args);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.h b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.h
new file mode 100644
index 0000000000..71d2221dc7
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSweepGradient_DEFINED
+#define SkSweepGradient_DEFINED
+
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+
+class SkSweepGradient final : public SkGradientShaderBase {
+public:
+ SkSweepGradient(const SkPoint& center, SkScalar t0, SkScalar t1, const Descriptor&);
+
+ GradientType asAGradient(GradientInfo* info) const override;
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+
+ SkScalar getTBias() const { return fTBias; }
+
+ SkScalar getTScale() const { return fTScale; }
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkSweepGradient)
+
+ const SkPoint fCenter;
+ const SkScalar fTBias,
+ fTScale;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp
new file mode 100644
index 0000000000..f158bdd975
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkFloatingPoint.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/gradients/SkTwoPointConicalGradient.h"
+
+#include <utility>
+
+// Please see https://skia.org/dev/design/conical for how our shader works.
+
+bool SkTwoPointConicalGradient::FocalData::set(SkScalar r0, SkScalar r1, SkMatrix* matrix) {
+ fIsSwapped = false;
+ fFocalX = sk_ieee_float_divide(r0, (r0 - r1));
+ if (SkScalarNearlyZero(fFocalX - 1)) {
+ // swap r0, r1
+ matrix->postTranslate(-1, 0);
+ matrix->postScale(-1, 1);
+ std::swap(r0, r1);
+ fFocalX = 0; // because r0 is now 0
+ fIsSwapped = true;
+ }
+
+ // Map {focal point, (1, 0)} to {(0, 0), (1, 0)}
+ const SkPoint from[2] = { {fFocalX, 0}, {1, 0} };
+ const SkPoint to[2] = { {0, 0}, {1, 0} };
+ SkMatrix focalMatrix;
+ if (!focalMatrix.setPolyToPoly(from, to, 2)) {
+ return false;
+ }
+ matrix->postConcat(focalMatrix);
+ fR1 = r1 / SkScalarAbs(1 - fFocalX); // focalMatrix has a scale of 1/(1-f)
+
+ // The following transformations are just to accelerate the shader computation by saving
+ // some arithmatic operations.
+ if (this->isFocalOnCircle()) {
+ matrix->postScale(0.5, 0.5);
+ } else {
+ matrix->postScale(fR1 / (fR1 * fR1 - 1), 1 / sqrt(SkScalarAbs(fR1 * fR1 - 1)));
+ }
+ matrix->postScale(SkScalarAbs(1 - fFocalX), SkScalarAbs(1 - fFocalX)); // scale |1 - f|
+ return true;
+}
+
+sk_sp<SkShader> SkTwoPointConicalGradient::Create(const SkPoint& c0, SkScalar r0,
+ const SkPoint& c1, SkScalar r1,
+ const Descriptor& desc) {
+ SkMatrix gradientMatrix;
+ Type gradientType;
+
+ if (SkScalarNearlyZero((c0 - c1).length())) {
+ if (SkScalarNearlyZero(SkTMax(r0, r1)) || SkScalarNearlyEqual(r0, r1)) {
+ // Degenerate case; avoid dividing by zero. Should have been caught by caller but
+ // just in case, recheck here.
+ return nullptr;
+ }
+ // Concentric case: we can pretend we're radial (with a tiny twist).
+ const SkScalar scale = sk_ieee_float_divide(1, SkTMax(r0, r1));
+ gradientMatrix = SkMatrix::MakeTrans(-c1.x(), -c1.y());
+ gradientMatrix.postScale(scale, scale);
+
+ gradientType = Type::kRadial;
+ } else {
+ const SkPoint centers[2] = { c0 , c1 };
+ const SkPoint unitvec[2] = { {0, 0}, {1, 0} };
+
+ if (!gradientMatrix.setPolyToPoly(centers, unitvec, 2)) {
+ // Degenerate case.
+ return nullptr;
+ }
+
+ gradientType = SkScalarNearlyZero(r1 - r0) ? Type::kStrip : Type::kFocal;
+ }
+
+ FocalData focalData;
+ if (gradientType == Type::kFocal) {
+ const auto dCenter = (c0 - c1).length();
+ if (!focalData.set(r0 / dCenter, r1 / dCenter, &gradientMatrix)) {
+ return nullptr;
+ }
+ }
+ return sk_sp<SkShader>(new SkTwoPointConicalGradient(c0, r0, c1, r1, desc,
+ gradientType, gradientMatrix, focalData));
+}
+
+SkTwoPointConicalGradient::SkTwoPointConicalGradient(
+ const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const Descriptor& desc, Type type, const SkMatrix& gradientMatrix, const FocalData& data)
+ : SkGradientShaderBase(desc, gradientMatrix)
+ , fCenter1(start)
+ , fCenter2(end)
+ , fRadius1(startRadius)
+ , fRadius2(endRadius)
+ , fType(type)
+{
+ // this is degenerate, and should be caught by our caller
+ SkASSERT(fCenter1 != fCenter2 || fRadius1 != fRadius2);
+ if (type == Type::kFocal) {
+ fFocalData = data;
+ }
+}
+
+bool SkTwoPointConicalGradient::isOpaque() const {
+ // Because areas outside the cone are left untouched, we cannot treat the
+ // shader as opaque even if the gradient itself is opaque.
+ // TODO(junov): Compute whether the cone fills the plane crbug.com/222380
+ return false;
+}
+
+// Returns the original non-sorted version of the gradient
+SkShader::GradientType SkTwoPointConicalGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ }
+ return kConical_GradientType;
+}
+
+sk_sp<SkFlattenable> SkTwoPointConicalGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ SkPoint c1 = buffer.readPoint();
+ SkPoint c2 = buffer.readPoint();
+ SkScalar r1 = buffer.readScalar();
+ SkScalar r2 = buffer.readScalar();
+
+ if (buffer.isVersionLT(SkPicturePriv::k2PtConicalNoFlip_Version) && buffer.readBool()) {
+ using std::swap;
+ // legacy flipped gradient
+ swap(c1, c2);
+ swap(r1, r2);
+
+ SkColor4f* colors = desc.mutableColors();
+ SkScalar* pos = desc.mutablePos();
+ const int last = desc.fCount - 1;
+ const int half = desc.fCount >> 1;
+ for (int i = 0; i < half; ++i) {
+ swap(colors[i], colors[last - i]);
+ if (pos) {
+ SkScalar tmp = pos[i];
+ pos[i] = SK_Scalar1 - pos[last - i];
+ pos[last - i] = SK_Scalar1 - tmp;
+ }
+ }
+ if (pos) {
+ if (desc.fCount & 1) {
+ pos[half] = SK_Scalar1 - pos[half];
+ }
+ }
+ }
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkGradientShader::MakeTwoPointConical(c1, r1, c2, r2, desc.fColors,
+ std::move(desc.fColorSpace), desc.fPos,
+ desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkTwoPointConicalGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter1);
+ buffer.writePoint(fCenter2);
+ buffer.writeScalar(fRadius1);
+ buffer.writeScalar(fRadius2);
+}
+
+void SkTwoPointConicalGradient::appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* p,
+ SkRasterPipeline* postPipeline) const {
+ const auto dRadius = fRadius2 - fRadius1;
+
+ if (fType == Type::kRadial) {
+ p->append(SkRasterPipeline::xy_to_radius);
+
+ // Tiny twist: radial computes a t for [0, r2], but we want a t for [r1, r2].
+ auto scale = SkTMax(fRadius1, fRadius2) / dRadius;
+ auto bias = -fRadius1 / dRadius;
+
+ p->append_matrix(alloc, SkMatrix::Concat(SkMatrix::MakeTrans(bias, 0),
+ SkMatrix::MakeScale(scale, 1)));
+ return;
+ }
+
+ if (fType == Type::kStrip) {
+ auto* ctx = alloc->make<SkRasterPipeline_2PtConicalCtx>();
+ SkScalar scaledR0 = fRadius1 / this->getCenterX1();
+ ctx->fP0 = scaledR0 * scaledR0;
+ p->append(SkRasterPipeline::xy_to_2pt_conical_strip, ctx);
+ p->append(SkRasterPipeline::mask_2pt_conical_nan, ctx);
+ postPipeline->append(SkRasterPipeline::apply_vector_mask, &ctx->fMask);
+ return;
+ }
+
+ auto* ctx = alloc->make<SkRasterPipeline_2PtConicalCtx>();
+ ctx->fP0 = 1/fFocalData.fR1;
+ ctx->fP1 = fFocalData.fFocalX;
+
+ if (fFocalData.isFocalOnCircle()) {
+ p->append(SkRasterPipeline::xy_to_2pt_conical_focal_on_circle);
+ } else if (fFocalData.isWellBehaved()) {
+ p->append(SkRasterPipeline::xy_to_2pt_conical_well_behaved, ctx);
+ } else if (fFocalData.isSwapped() || 1 - fFocalData.fFocalX < 0) {
+ p->append(SkRasterPipeline::xy_to_2pt_conical_smaller, ctx);
+ } else {
+ p->append(SkRasterPipeline::xy_to_2pt_conical_greater, ctx);
+ }
+
+ if (!fFocalData.isWellBehaved()) {
+ p->append(SkRasterPipeline::mask_2pt_conical_degenerates, ctx);
+ }
+ if (1 - fFocalData.fFocalX < 0) {
+ p->append(SkRasterPipeline::negate_x);
+ }
+ if (!fFocalData.isNativelyFocal()) {
+ p->append(SkRasterPipeline::alter_2pt_conical_compensate_focal, ctx);
+ }
+ if (fFocalData.isSwapped()) {
+ p->append(SkRasterPipeline::alter_2pt_conical_unswap);
+ }
+ if (!fFocalData.isWellBehaved()) {
+ postPipeline->append(SkRasterPipeline::apply_vector_mask, &ctx->fMask);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "src/gpu/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkTwoPointConicalGradient::asFragmentProcessor(
+ const GrFPArgs& args) const {
+ return GrGradientShader::MakeConical(*this, args);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.h b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.h
new file mode 100644
index 0000000000..252696f2d7
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTwoPointConicalGradient_DEFINED
+#define SkTwoPointConicalGradient_DEFINED
+
+#include "src/shaders/gradients/SkGradientShaderPriv.h"
+
+class SkTwoPointConicalGradient final : public SkGradientShaderBase {
+public:
+ // See https://skia.org/dev/design/conical for what focal data means and how our shader works.
+ // We make it public so the GPU shader can also use it.
+ struct FocalData {
+ SkScalar fR1; // r1 after mapping focal point to (0, 0)
+ SkScalar fFocalX; // f
+ bool fIsSwapped; // whether we swapped r0, r1
+
+ // The input r0, r1 are the radii when we map centers to {(0, 0), (1, 0)}.
+ // We'll post concat matrix with our transformation matrix that maps focal point to (0, 0).
+ // Returns true if the set succeeded
+ bool set(SkScalar r0, SkScalar r1, SkMatrix* matrix);
+
+ // Whether the focal point (0, 0) is on the end circle with center (1, 0) and radius r1. If
+ // this is true, it's as if an aircraft is flying at Mach 1 and all circles (soundwaves)
+ // will go through the focal point (aircraft). In our previous implementations, this was
+ // known as the edge case where the inside circle touches the outside circle (on the focal
+ // point). If we were to solve for t bruteforcely using a quadratic equation, this case
+ // implies that the quadratic equation degenerates to a linear equation.
+ bool isFocalOnCircle() const { return SkScalarNearlyZero(1 - fR1); }
+
+ bool isSwapped() const { return fIsSwapped; }
+ bool isWellBehaved() const { return !this->isFocalOnCircle() && fR1 > 1; }
+ bool isNativelyFocal() const { return SkScalarNearlyZero(fFocalX); }
+ };
+
+ enum class Type {
+ kRadial,
+ kStrip,
+ kFocal
+ };
+
+ static sk_sp<SkShader> Create(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const Descriptor&);
+
+ SkShader::GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&) const override;
+#endif
+ bool isOpaque() const override;
+
+ SkScalar getCenterX1() const { return SkPoint::Distance(fCenter1, fCenter2); }
+ SkScalar getStartRadius() const { return fRadius1; }
+ SkScalar getDiffRadius() const { return fRadius2 - fRadius1; }
+ const SkPoint& getStartCenter() const { return fCenter1; }
+ const SkPoint& getEndCenter() const { return fCenter2; }
+ SkScalar getEndRadius() const { return fRadius2; }
+
+ Type getType() const { return fType; }
+ const FocalData& getFocalData() const { return fFocalData; }
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTwoPointConicalGradient)
+
+ SkTwoPointConicalGradient(const SkPoint& c0, SkScalar r0,
+ const SkPoint& c1, SkScalar r1,
+ const Descriptor&, Type, const SkMatrix&, const FocalData&);
+
+ SkPoint fCenter1;
+ SkPoint fCenter2;
+ SkScalar fRadius1;
+ SkScalar fRadius2;
+ Type fType;
+
+ FocalData fFocalData;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/GLSL.std.450.h b/gfx/skia/skia/src/sksl/GLSL.std.450.h
new file mode 100644
index 0000000000..943fd8650f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/gfx/skia/skia/src/sksl/README b/gfx/skia/skia/src/sksl/README
new file mode 100644
index 0000000000..c1d7ae6b33
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/README
@@ -0,0 +1,173 @@
+Overview
+========
+
+SkSL ("Skia Shading Language") is a variant of GLSL which is used as Skia's
+internal shading language. SkSL is, at its heart, a single standardized version
+of GLSL which avoids all of the various version and dialect differences found
+in GLSL "in the wild", but it does bring a few of its own changes to the table.
+
+Skia uses the SkSL compiler to convert SkSL code to GLSL, GLSL ES, or SPIR-V
+before handing it over to the graphics driver.
+
+
+Differences from GLSL
+=====================
+
+* Precision modifiers are not used. 'float', 'int', and 'uint' are always high
+ precision. New types 'half', 'short', and 'ushort' are medium precision (we
+ do not use low precision).
+* Vector types are named <base type><columns>, so float2 instead of vec2 and
+ bool4 instead of bvec4
+* Matrix types are named <base type><columns>x<rows>, so float2x3 instead of
+ mat2x3 and double4x4 instead of dmat4
+* "@if" and "@switch" are static versions of if and switch. They behave exactly
+ the same as if and switch in all respects other than it being a compile-time
+ error to use a non-constant expression as a test.
+* GLSL caps can be referenced via the syntax 'sk_Caps.<name>', e.g.
+ sk_Caps.sampleVariablesSupport. The value will be a constant boolean or int,
+ as appropriate. As SkSL supports constant folding and branch elimination, this
+ means that an 'if' statement which statically queries a cap will collapse down
+ to the chosen branch, meaning that:
+
+ if (sk_Caps.externalTextureSupport)
+ do_something();
+ else
+ do_something_else();
+
+ will compile as if you had written either 'do_something();' or
+ 'do_something_else();', depending on whether that cap is enabled or not.
+* no #version statement is required, and it will be ignored if present
+* the output color is sk_FragColor (do not declare it)
+* use sk_Position instead of gl_Position. sk_Position is in device coordinates
+ rather than normalized coordinates.
+* use sk_PointSize instead of gl_PointSize
+* use sk_VertexID instead of gl_VertexID
+* use sk_InstanceID instead of gl_InstanceID
+* the fragment coordinate is sk_FragCoord, and is always relative to the upper
+ left.
+* use sk_Clockwise instead of gl_FrontFacing. This is always relative to an
+ upper left origin.
+* you do not need to include ".0" to make a number a float (meaning that
+ "float2(x, y) * 4" is perfectly legal in SkSL, unlike GLSL where it would
+ often have to be expressed "float2(x, y) * 4.0". There is no performance
+ penalty for this, as the number is converted to a float at compile time)
+* type suffixes on numbers (1.0f, 0xFFu) are both unnecessary and unsupported
+* creating a smaller vector from a larger vector (e.g. float2(float3(1))) is
+ intentionally disallowed, as it is just a wordier way of performing a swizzle.
+ Use swizzles instead.
+* Swizzle components, in addition to the normal rgba / xyzw components, can also
+ be LTRB (meaning "left/top/right/bottom", for when we store rectangles in
+ vectors), and may also be the constants '0' or '1' to produce a constant 0 or
+ 1 in that channel instead of selecting anything from the source vector.
+ foo.rgb1 is equivalent to float4(foo.rgb, 1).
+* All texture functions are named "sample", e.g. sample(sampler2D, float3) is
+ equivalent to GLSL's textureProj(sampler2D, float3).
+* Render target width and height are available via sk_Width and sk_Height
+* some built-in functions and one or two rarely-used language features are not
+ yet supported (sorry!)
+
+SkSL is still under development, and is expected to diverge further from GLSL
+over time.
+
+
+SkSL Fragment Processors
+========================
+
+********************************************************************************
+*** IMPORTANT: You must set gn arg "skia_compile_processors = true" to cause ***
+*** .fp files to be recompiled! In order for compilation to succeed, you ***
+*** must run bin/fetch-clang-format (once) to install our blessed version. ***
+********************************************************************************
+
+An extension of SkSL allows for the creation of fragment processors in pure
+SkSL. The program defines its inputs similarly to a normal SkSL program (with
+'in' and 'uniform' variables), but the 'main()' function represents only this
+fragment processor's portion of the overall fragment shader.
+
+Within an '.fp' fragment processor file:
+
+* C++ code can be embedded in sections of the form:
+
+ @section_name { <arbitrary C++ code> }
+
+ Supported section are:
+ @header (in the .h file, outside the class declaration)
+ @headerEnd (at the end of the .h file)
+ @class (in the .h file, inside the class declaration)
+ @cpp (in the .cpp file)
+ @cppEnd (at the end of the .cpp file)
+ @constructorParams (extra parameters to the constructor, comma-separated)
+ @constructor (replaces the default constructor)
+ @initializers (constructor initializer list, comma-separated)
+ @emitCode (extra code for the emitCode function)
+ @fields (extra private fields, each terminated with a semicolon)
+ @make (replaces the default Make function)
+ @clone (replaces the default clone() function)
+ @setData(<pdman>) (extra code for the setData function, where <pdman> is
+ the name of the GrGLSLProgramDataManager)
+ @test(<testData>) (the body of the TestCreate function, where <testData> is
+ the name of the GrProcessorTestData* parameter)
+ @coordTransform(<sampler>)
+ (the matrix to attach to the named sampler2D's
+ GrCoordTransform)
+ @samplerParams(<sampler>)
+ (the sampler params to attach to the named sampler2D)
+* global 'in' variables represent data passed to the fragment processor at
+ construction time. These variables become constructor parameters and are
+ stored in fragment processor fields. By default float2/half2 maps to SkPoints,
+ and float4/half4 maps to SkRects (in x, y, width, height) order. Similarly,
+ int2/short2 maps to SkIPoint and int4/half4 maps to SkIRect. Use ctype
+ (below) to override this default mapping.
+* global variables support an additional 'ctype' layout key, providing the type
+ they should be represented as from within the C++ code. For instance, you can
+ use 'layout(ctype=SkPMColor4f) in half4 color;' to create a variable that looks
+ like a half4 on the SkSL side of things, and a SkPMColor4f on the C++ side of
+ things.
+* 'uniform' variables become, as one would expect, top-level uniforms. By
+ default they do not have any data provided to them; you will need to provide
+ them with data via the @setData section.
+* 'in uniform' variables are uniforms that are automatically wired up to
+ fragment processor constructor parameters. The fragment processor will accept
+ a parameter representing the uniform's value, and automatically plumb it
+ through to the uniform's value in its generated setData() function.
+* 'in uniform' variables support a 'tracked' flag in the layout that will
+ have the generated code automatically implement state tracking on the uniform
+ value to minimize GPU calls.
+* the 'sk_TransformedCoords2D' array provides access to 2D transformed
+ coordinates. sk_TransformedCoords2D[0] is equivalent to calling
+ fragBuilder->ensureCoords2D(args.fTransformedCoords[0]) (and the result is
+ cached, so you need not worry about using the value repeatedly).
+* Uniform variables support an additional 'when' layout key.
+ 'layout(when=foo) uniform int x;' means that this uniform will only be
+ emitted when the 'foo' expression is true.
+* 'in' variables support an additional 'key' layout key.
+ 'layout(key) in uniform int x;' means that this uniform should be included in
+ the program's key. Matrix variables additionally support 'key=identity',
+ which causes the key to consider only whether or not the matrix is an
+ identity matrix.
+* child processors can be declared with 'in fragmentProcessor <name>;', and can
+ be invoked by calling 'sample(<name>)' or 'sample(<name>, <inputColor>)'.
+ The first variant emits the child with a solid white input color. The second
+ variant emits the child with the result of the 2nd argument's expression,
+ which must evaluate to a half4. The process function returns a half4.
+* By default, fragment processors must be non-null. The type for a nullable
+ fragment processor is 'fragmentProcessor?', as in
+ 'in fragmentProcessor? <name>'. You can check for the presence of such a
+ fragment processor by comparing it to 'null'.
+
+
+Creating a new .fp file
+=======================
+
+1. Ensure that you have set gn arg "skia_compile_processors = true"
+2. Create your new .fp file, generally under src/gpu/effects.
+3. Add the .fp file to sksl.gni.
+4. Build Skia. This will cause the .fp file to be compiled, resulting in a new
+ .cpp and .h file for the fragment processor.
+5. Add the .cpp and .h files to gpu.gni.
+6. Add the new processor's ClassID (k<ProcessorName>_ClassID) to
+ GrProcessor::ClassID.
+7. At this point you can reference the new fragment processor from within Skia.
+
+Once you have done this initial setup, simply re-build Skia to pick up any
+changes to the .fp file.
diff --git a/gfx/skia/skia/src/sksl/SkSLASTFile.h b/gfx/skia/skia/src/sksl/SkSLASTFile.h
new file mode 100644
index 0000000000..71fb8eb2df
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLASTFile.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTFILE
+#define SKSL_ASTFILE
+
+#include "src/sksl/SkSLASTNode.h"
+
+namespace SkSL {
+
+struct ASTFile {
+ ASTFile()
+ : fRoot(ASTNode::ID::Invalid()) {}
+
+ ASTNode& root() {
+ return fNodes[fRoot.fValue];
+ }
+
+private:
+ std::vector<ASTNode> fNodes;
+
+ ASTNode::ID fRoot;
+
+ friend class IRGenerator;
+ friend class Parser;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLASTNode.cpp b/gfx/skia/skia/src/sksl/SkSLASTNode.cpp
new file mode 100644
index 0000000000..34e59ebb32
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLASTNode.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLASTNode.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLString.h"
+
+namespace SkSL {
+
+String ASTNode::description() const {
+ switch (fKind) {
+ case Kind::kNull: return "";
+ case Kind::kBinary:
+ return "(" + this->begin()->description() + " " +
+ Compiler::OperatorName(getToken().fKind) + " " +
+ (this->begin() + 1)->description() + ")";
+ case Kind::kBlock: {
+ String result = "{\n";
+ for (const auto& c : *this) {
+ result += c.description();
+ result += "\n";
+ }
+ result += "}";
+ return result;
+ }
+ case Kind::kBool:
+ return getBool() ? "true" : "false";
+ case Kind::kBreak:
+ return "break";
+ case Kind::kCall: {
+ auto iter = this->begin();
+ String result = iter->description();
+ result += "(";
+ const char* separator = "";
+ while (iter != this->end()) {
+ result += separator;
+ result += (iter++)->description();
+ separator = ",";
+ }
+ result += ")";
+ return result;
+ }
+ case Kind::kContinue:
+ return "continue";
+ case Kind::kDiscard:
+ return "discard";
+ case Kind::kDo:
+ return "do " + this->begin()->description() + " while (" +
+ (this->begin() + 1)->description() + ")";
+ case Kind::kEnum: {
+ String result = "enum ";
+ result += getString();
+ result += " {\n";
+ for (const auto& c : *this) {
+ result += c.description();
+ result += "\n";
+ }
+ result += "};";
+ return result;
+ }
+ case Kind::kEnumCase:
+ if (this->begin() != this->end()) {
+ return String(getString()) + " = " + this->begin()->description();
+ }
+ return getString();
+ case Kind::kExtension:
+ return "#extension " + getString();
+ case Kind::kField:
+ return this->begin()->description() + "." + getString();
+ case Kind::kFile: {
+ String result;
+ for (const auto& c : *this) {
+ result += c.description();
+ result += "\n";
+ }
+ return result;
+ }
+ case Kind::kFloat:
+ return to_string(getFloat());
+ case Kind::kFor:
+ return "for (" + this->begin()->description() + "; " +
+ (this->begin() + 1)->description() + "; " + (this->begin() + 2)->description() +
+ ") " + (this->begin() + 3)->description();
+ case Kind::kFunction: {
+ FunctionData fd = getFunctionData();
+ String result = fd.fModifiers.description();
+ if (result.size()) {
+ result += " ";
+ }
+ auto iter = this->begin();
+ result += (iter++)->description() + " " + fd.fName + "(";
+ const char* separator = "";
+ for (size_t i = 0; i < fd.fParameterCount; ++i) {
+ result += separator;
+ result += (iter++)->description();
+ separator = ", ";
+ }
+ result += ")";
+ if (iter != this->end()) {
+ result += " " + (iter++)->description();
+ SkASSERT(iter == this->end());
+ }
+ else {
+ result += ";";
+ }
+ return result;
+ }
+ case Kind::kIdentifier:
+ return getString();
+ case Kind::kIndex:
+ return this->begin()->description() + "[" + (this->begin() + 1)->description() + "]";
+ case Kind::kIf: {
+ String result;
+ if (getBool()) {
+ result = "@";
+ }
+ auto iter = this->begin();
+ result += "if (" + (iter++)->description() + ") ";
+ result += (iter++)->description();
+ if (iter != this->end()) {
+ result += " else " + (iter++)->description();
+ SkASSERT(iter == this->end());
+ }
+ return result;
+ }
+ case Kind::kInt:
+ return to_string(getInt());
+ case Kind::kInterfaceBlock: {
+ InterfaceBlockData id = getInterfaceBlockData();
+ String result = id.fModifiers.description() + " " + id.fTypeName + " {\n";
+ auto iter = this->begin();
+ for (size_t i = 0; i < id.fDeclarationCount; ++i) {
+ result += (iter++)->description() + "\n";
+ }
+ result += "} ";
+ result += id.fInstanceName;
+ for (size_t i = 0; i < id.fSizeCount; ++i) {
+ result += "[" + (iter++)->description() + "]";
+ }
+ SkASSERT(iter == this->end());
+ result += ";";
+ return result;
+ }
+ case Kind::kModifiers:
+ return getModifiers().description();
+ case Kind::kParameter: {
+ ParameterData pd = getParameterData();
+ auto iter = this->begin();
+ String result = (iter++)->description() + " " + pd.fName;
+ for (size_t i = 0; i < pd.fSizeCount; ++i) {
+ result += "[" + (iter++)->description() + "]";
+ }
+ if (iter != this->end()) {
+ result += " = " + (iter++)->description();
+ SkASSERT(iter == this->end());
+ }
+ return result;
+ }
+ case Kind::kPostfix:
+ return this->begin()->description() + Compiler::OperatorName(getToken().fKind);
+ case Kind::kPrefix:
+ return Compiler::OperatorName(getToken().fKind) + this->begin()->description();
+ case Kind::kReturn:
+ if (this->begin() != this->end()) {
+ return "return " + this->begin()->description() + ";";
+ }
+ return "return;";
+ case Kind::kSection:
+ return "@section { ... }";
+ case Kind::kSwitchCase: {
+ auto iter = this->begin();
+ String result;
+ if (*iter) {
+ result.appendf("case %s:\n", iter->description().c_str());
+ } else {
+ result = "default:\n";
+ }
+ for (++iter; iter != this->end(); ++iter) {
+ result += "\n" + iter->description();
+ }
+ return result;
+ }
+ case Kind::kSwitch: {
+ auto iter = this->begin();
+ String result;
+ if (getBool()) {
+ result = "@";
+ }
+ result += "switch (" + (iter++)->description() + ") {";
+ for (; iter != this->end(); ++iter) {
+ result += iter->description() + "\n";
+ }
+ result += "}";
+ return result;
+ }
+ case Kind::kTernary:
+ return "(" + this->begin()->description() + " ? " + (this->begin() + 1)->description() +
+ " : " + (this->begin() + 2)->description() + ")";
+ case Kind::kType:
+ return String(getTypeData().fName);
+ case Kind::kVarDeclaration: {
+ VarData vd = getVarData();
+ String result = vd.fName;
+ auto iter = this->begin();
+ for (size_t i = 0; i < vd.fSizeCount; ++i) {
+ result += "[" + (iter++)->description() + "]";
+ }
+ if (iter != this->end()) {
+ result += " = " + (iter++)->description();
+ SkASSERT(iter == this->end());
+ }
+ return result;
+ }
+ case Kind::kVarDeclarations: {
+ auto iter = this->begin();
+ String result = (iter++)->description();
+ if (result.size()) {
+ result += " ";
+ }
+ result += (iter++)->description();
+ const char* separator = " ";
+ for (; iter != this->end(); ++iter) {
+ result += separator + iter->description();
+ separator = ", ";
+ }
+ return result;
+ }
+ default:
+ SkASSERT(false);
+ return "<error>";
+ }
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLASTNode.h b/gfx/skia/skia/src/sksl/SkSLASTNode.h
new file mode 100644
index 0000000000..cdb741b6f2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLASTNode.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTNODE
+#define SKSL_ASTNODE
+
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/SkSLString.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+
+#include <vector>
+
+namespace SkSL {
+
+// std::max isn't constexpr in some compilers
+static constexpr size_t Max(size_t a, size_t b) {
+ return a > b ? a : b;
+}
+
+/**
+ * Represents a node in the abstract syntax tree (AST). The AST is based directly on the parse tree;
+ * it is a parsed-but-not-yet-analyzed version of the program.
+ */
+struct ASTNode {
+ class ID {
+ public:
+ static ID Invalid() {
+ return ID();
+ }
+
+ bool operator==(const ID& other) {
+ return fValue == other.fValue;
+ }
+
+ bool operator!=(const ID& other) {
+ return fValue != other.fValue;
+ }
+
+ MOZ_IMPLICIT operator bool() const { return fValue >= 0; }
+
+ private:
+ ID()
+ : fValue(-1) {}
+
+ ID(int value)
+ : fValue(value) {}
+
+ int fValue;
+
+ friend struct ASTFile;
+ friend struct ASTNode;
+ friend class Parser;
+ };
+
+ enum class Kind {
+ // data: operator(Token), children: left, right
+ kBinary,
+ // children: statements
+ kBlock,
+ // data: value(bool)
+ kBool,
+ kBreak,
+ // children: target, arg1, arg2...
+ kCall,
+ kContinue,
+ kDiscard,
+ // children: statement, test
+ kDo,
+ // data: name(StringFragment), children: enumCases
+ kEnum,
+ // data: name(StringFragment), children: value?
+ kEnumCase,
+ // data: name(StringFragment)
+ kExtension,
+ // data: field(StringFragment), children: base
+ kField,
+ // children: declarations
+ kFile,
+ // data: value(float)
+ kFloat,
+ // children: init, test, next, statement
+ kFor,
+ // data: FunctionData, children: returnType, parameters, statement?
+ kFunction,
+ // data: name(StringFragment)
+ kIdentifier,
+ // children: base, index?
+ kIndex,
+ // data: isStatic(bool), children: test, ifTrue, ifFalse?
+ kIf,
+ // value(data): int
+ kInt,
+ // data: InterfaceBlockData, children: declaration1, declaration2, ..., size1, size2, ...
+ kInterfaceBlock,
+ // data: Modifiers
+ kModifiers,
+ kNull,
+ // data: ParameterData, children: type, arraySize1, arraySize2, ..., value?
+ kParameter,
+ // data: operator(Token), children: operand
+ kPostfix,
+ // data: operator(Token), children: operand
+ kPrefix,
+ // children: value
+ kReturn,
+ // ...
+ kSection,
+ // children: value, statement 1, statement 2...
+ kSwitchCase,
+ // children: value, case 1, case 2...
+ kSwitch,
+ // children: test, ifTrue, ifFalse
+ kTernary,
+ // data: TypeData, children: sizes
+ kType,
+ // data: VarData, children: arraySize1, arraySize2, ..., value?
+ kVarDeclaration,
+ // children: modifiers, type, varDeclaration1, varDeclaration2, ...
+ kVarDeclarations,
+ // children: test, statement
+ kWhile,
+ };
+
+ class iterator {
+ public:
+ iterator operator++() {
+ SkASSERT(fID);
+ fID = (**this).fNext;
+ return *this;
+ }
+
+ iterator operator++(int) {
+ SkASSERT(fID);
+ iterator old = *this;
+ fID = (**this).fNext;
+ return old;
+ }
+
+ iterator operator+=(int count) {
+ SkASSERT(count >= 0);
+ for (; count > 0; --count) {
+ ++(*this);
+ }
+ return *this;
+ }
+
+ iterator operator+(int count) {
+ iterator result(*this);
+ return result += count;
+ }
+
+ bool operator==(const iterator& other) const {
+ return fID == other.fID;
+ }
+
+ bool operator!=(const iterator& other) const {
+ return fID != other.fID;
+ }
+
+ ASTNode& operator*() {
+ SkASSERT(fID);
+ return (*fNodes)[fID.fValue];
+ }
+
+ ASTNode* operator->() {
+ SkASSERT(fID);
+ return &(*fNodes)[fID.fValue];
+ }
+
+ private:
+ iterator(std::vector<ASTNode>* nodes, ID id)
+ : fNodes(nodes)
+ , fID(id) {}
+
+ std::vector<ASTNode>* fNodes;
+
+ ID fID;
+
+ friend struct ASTNode;
+ };
+
+ struct TypeData {
+ TypeData() {}
+
+ TypeData(StringFragment name, bool isStructDeclaration, bool isNullable)
+ : fName(name)
+ , fIsStructDeclaration(isStructDeclaration)
+ , fIsNullable(isNullable) {}
+
+ StringFragment fName;
+ bool fIsStructDeclaration;
+ bool fIsNullable;
+ };
+
+ struct ParameterData {
+ ParameterData() {}
+
+ ParameterData(Modifiers modifiers, StringFragment name, size_t sizeCount)
+ : fModifiers(modifiers)
+ , fName(name)
+ , fSizeCount(sizeCount) {}
+
+ Modifiers fModifiers;
+ StringFragment fName;
+ size_t fSizeCount;
+ };
+
+ struct VarData {
+ VarData() {}
+
+ VarData(StringFragment name, size_t sizeCount)
+ : fName(name)
+ , fSizeCount(sizeCount) {}
+
+ StringFragment fName;
+ size_t fSizeCount;
+ };
+
+ struct FunctionData {
+ FunctionData() {}
+
+ FunctionData(Modifiers modifiers, StringFragment name, size_t parameterCount)
+ : fModifiers(modifiers)
+ , fName(name)
+ , fParameterCount(parameterCount) {}
+
+ Modifiers fModifiers;
+ StringFragment fName;
+ size_t fParameterCount;
+ };
+
+ struct InterfaceBlockData {
+ InterfaceBlockData() {}
+
+ InterfaceBlockData(Modifiers modifiers, StringFragment typeName, size_t declarationCount,
+ StringFragment instanceName, size_t sizeCount)
+ : fModifiers(modifiers)
+ , fTypeName(typeName)
+ , fDeclarationCount(declarationCount)
+ , fInstanceName(instanceName)
+ , fSizeCount(sizeCount) {}
+
+ Modifiers fModifiers;
+ StringFragment fTypeName;
+ size_t fDeclarationCount;
+ StringFragment fInstanceName;
+ size_t fSizeCount;
+ };
+
+ struct SectionData {
+ SectionData() {}
+
+ SectionData(StringFragment name, StringFragment argument, StringFragment text)
+ : fName(name)
+ , fArgument(argument)
+ , fText(text) {}
+
+ StringFragment fName;
+ StringFragment fArgument;
+ StringFragment fText;
+ };
+
+ struct NodeData {
+ char fBytes[Max(sizeof(Token),
+ Max(sizeof(StringFragment),
+ Max(sizeof(bool),
+ Max(sizeof(SKSL_INT),
+ Max(sizeof(SKSL_FLOAT),
+ Max(sizeof(Modifiers),
+ Max(sizeof(TypeData),
+ Max(sizeof(FunctionData),
+ Max(sizeof(ParameterData),
+ Max(sizeof(VarData),
+ Max(sizeof(InterfaceBlockData),
+ sizeof(SectionData))))))))))))];
+
+ enum class Kind {
+ kToken,
+ kStringFragment,
+ kBool,
+ kInt,
+ kFloat,
+ kModifiers,
+ kTypeData,
+ kFunctionData,
+ kParameterData,
+ kVarData,
+ kInterfaceBlockData,
+ kSectionData
+ } fKind;
+
+ NodeData() = default;
+
+ NodeData(Token data)
+ : fKind(Kind::kToken) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(StringFragment data)
+ : fKind(Kind::kStringFragment) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(bool data)
+ : fKind(Kind::kBool) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(SKSL_INT data)
+ : fKind(Kind::kInt) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(SKSL_FLOAT data)
+ : fKind(Kind::kFloat) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(Modifiers data)
+ : fKind(Kind::kModifiers) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(TypeData data)
+ : fKind(Kind::kTypeData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(FunctionData data)
+ : fKind(Kind::kFunctionData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(VarData data)
+ : fKind(Kind::kVarData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(ParameterData data)
+ : fKind(Kind::kParameterData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(InterfaceBlockData data)
+ : fKind(Kind::kInterfaceBlockData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+
+ NodeData(SectionData data)
+ : fKind(Kind::kSectionData) {
+ memcpy(fBytes, &data, sizeof(data));
+ }
+ };
+
+ ASTNode()
+ : fOffset(-1)
+ , fKind(Kind::kNull) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind)
+ : fNodes(nodes)
+ , fOffset(offset)
+ , fKind(kind) {
+ switch (kind) {
+ case Kind::kBinary:
+ case Kind::kPostfix:
+ case Kind::kPrefix:
+ fData.fKind = NodeData::Kind::kToken;
+ break;
+
+ case Kind::kBool:
+ case Kind::kIf:
+ case Kind::kSwitch:
+ fData.fKind = NodeData::Kind::kBool;
+ break;
+
+ case Kind::kEnum:
+ case Kind::kEnumCase:
+ case Kind::kExtension:
+ case Kind::kField:
+ case Kind::kIdentifier:
+ fData.fKind = NodeData::Kind::kStringFragment;
+ break;
+
+ case Kind::kFloat:
+ fData.fKind = NodeData::Kind::kFloat;
+ break;
+
+ case Kind::kFunction:
+ fData.fKind = NodeData::Kind::kFunctionData;
+ break;
+
+ case Kind::kInt:
+ fData.fKind = NodeData::Kind::kInt;
+ break;
+
+ case Kind::kInterfaceBlock:
+ fData.fKind = NodeData::Kind::kInterfaceBlockData;
+ break;
+
+ case Kind::kModifiers:
+ fData.fKind = NodeData::Kind::kModifiers;
+ break;
+
+ case Kind::kParameter:
+ fData.fKind = NodeData::Kind::kParameterData;
+ break;
+
+ case Kind::kVarDeclaration:
+ fData.fKind = NodeData::Kind::kVarData;
+ break;
+
+ case Kind::kType:
+ fData.fKind = NodeData::Kind::kTypeData;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, Token t)
+ : fNodes(nodes)
+ , fData(t)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, StringFragment s)
+ : fNodes(nodes)
+ , fData(s)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, const char* s)
+ : fNodes(nodes)
+ , fData(StringFragment(s))
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, bool b)
+ : fNodes(nodes)
+ , fData(b)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, SKSL_INT i)
+ : fNodes(nodes)
+ , fData(i)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, SKSL_FLOAT f)
+ : fNodes(nodes)
+ , fData(f)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, Modifiers m)
+ : fNodes(nodes)
+ , fData(m)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, TypeData td)
+ : fNodes(nodes)
+ , fData(td)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ ASTNode(std::vector<ASTNode>* nodes, int offset, Kind kind, SectionData s)
+ : fNodes(nodes)
+ , fData(s)
+ , fOffset(offset)
+ , fKind(kind) {}
+
+ MOZ_IMPLICIT operator bool() const {
+ return fKind != Kind::kNull;
+ }
+
+ Token getToken() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kToken);
+ Token result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ bool getBool() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kBool);
+ bool result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ SKSL_INT getInt() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kInt);
+ SKSL_INT result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ SKSL_FLOAT getFloat() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kFloat);
+ SKSL_FLOAT result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ StringFragment getString() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kStringFragment);
+ StringFragment result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ Modifiers getModifiers() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kModifiers);
+ Modifiers result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setModifiers(const Modifiers& m) {
+ memcpy(fData.fBytes, &m, sizeof(m));
+ }
+
+ TypeData getTypeData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kTypeData);
+ TypeData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setTypeData(const ASTNode::TypeData& td) {
+ SkASSERT(fData.fKind == NodeData::Kind::kTypeData);
+ memcpy(fData.fBytes, &td, sizeof(td));
+ }
+
+ ParameterData getParameterData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kParameterData);
+ ParameterData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setParameterData(const ASTNode::ParameterData& pd) {
+ SkASSERT(fData.fKind == NodeData::Kind::kParameterData);
+ memcpy(fData.fBytes, &pd, sizeof(pd));
+ }
+
+ VarData getVarData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kVarData);
+ VarData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setVarData(const ASTNode::VarData& vd) {
+ SkASSERT(fData.fKind == NodeData::Kind::kVarData);
+ memcpy(fData.fBytes, &vd, sizeof(vd));
+ }
+
+ FunctionData getFunctionData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kFunctionData);
+ FunctionData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setFunctionData(const ASTNode::FunctionData& fd) {
+ SkASSERT(fData.fKind == NodeData::Kind::kFunctionData);
+ memcpy(fData.fBytes, &fd, sizeof(fd));
+ }
+
+ InterfaceBlockData getInterfaceBlockData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kInterfaceBlockData);
+ InterfaceBlockData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void setInterfaceBlockData(const ASTNode::InterfaceBlockData& id) {
+ SkASSERT(fData.fKind == NodeData::Kind::kInterfaceBlockData);
+ memcpy(fData.fBytes, &id, sizeof(id));
+ }
+
+ SectionData getSectionData() const {
+ SkASSERT(fData.fKind == NodeData::Kind::kSectionData);
+ SectionData result;
+ memcpy(&result, fData.fBytes, sizeof(result));
+ return result;
+ }
+
+ void addChild(ID id) {
+ SkASSERT(!(*fNodes)[id.fValue].fNext);
+ if (fLastChild) {
+ SkASSERT(!(*fNodes)[fLastChild.fValue].fNext);
+ (*fNodes)[fLastChild.fValue].fNext = id;
+ } else {
+ fFirstChild = id;
+ }
+ fLastChild = id;
+ SkASSERT(!(*fNodes)[fLastChild.fValue].fNext);
+ }
+
+ iterator begin() const {
+ return iterator(fNodes, fFirstChild);
+ }
+
+ iterator end() const {
+ return iterator(fNodes, ID(-1));
+ }
+
+ String description() const;
+
+ std::vector<ASTNode>* fNodes;
+
+ NodeData fData;
+
+ int fOffset;
+
+ Kind fKind;
+
+ ID fFirstChild;
+
+ ID fLastChild;
+
+ ID fNext;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLByteCode.cpp b/gfx/skia/skia/src/sksl/SkSLByteCode.cpp
new file mode 100644
index 0000000000..b03e2ee328
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLByteCode.cpp
@@ -0,0 +1,1760 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STANDALONE
+
+#include "include/core/SkPoint3.h"
+#include "include/private/SkVx.h"
+#include "src/core/SkUtils.h" // sk_unaligned_load
+#include "src/sksl/SkSLByteCode.h"
+#include "src/sksl/SkSLByteCodeGenerator.h"
+#include "src/sksl/SkSLExternalValue.h"
+
+#include <vector>
+
+namespace SkSL {
+
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+
+constexpr int VecWidth = ByteCode::kVecWidth;
+
+struct Interpreter {
+
+using F32 = skvx::Vec<VecWidth, float>;
+using I32 = skvx::Vec<VecWidth, int32_t>;
+using U32 = skvx::Vec<VecWidth, uint32_t>;
+
+#define READ8() (*(ip++))
+#define READ16() (ip += 2, sk_unaligned_load<uint16_t>(ip - 2))
+#define READ32() (ip += 4, sk_unaligned_load<uint32_t>(ip - 4))
+#define READ_INST() (ip += sizeof(instruction), \
+ sk_unaligned_load<instruction>(ip - sizeof(instruction)))
+
+#define VECTOR_DISASSEMBLE(op, text) \
+ case ByteCodeInstruction::op: printf(text); ++ip; break; \
+ case ByteCodeInstruction::op##2: printf(text "2"); ++ip; break; \
+ case ByteCodeInstruction::op##3: printf(text "3"); ++ip; break; \
+ case ByteCodeInstruction::op##4: printf(text "4"); ++ip; break;
+
+#define VECTOR_DISASSEMBLE_NO_COUNT(op, text) \
+ case ByteCodeInstruction::op: printf(text); break; \
+ case ByteCodeInstruction::op##2: printf(text "2"); break; \
+ case ByteCodeInstruction::op##3: printf(text "3"); break; \
+ case ByteCodeInstruction::op##4: printf(text "4"); break;
+
+#define VECTOR_MATRIX_DISASSEMBLE(op, text) \
+ VECTOR_DISASSEMBLE(op, text) \
+ case ByteCodeInstruction::op##N: printf(text "N %d", READ8()); break;
+
+#define VECTOR_MATRIX_DISASSEMBLE_NO_COUNT(op, text) \
+ VECTOR_DISASSEMBLE_NO_COUNT(op, text) \
+ case ByteCodeInstruction::op##N: printf(text "N %d", READ8()); break;
+
+static const uint8_t* DisassembleInstruction(const uint8_t* ip) {
+ switch ((ByteCodeInstruction) (intptr_t) READ_INST()) {
+ VECTOR_MATRIX_DISASSEMBLE(kAddF, "addf")
+ VECTOR_DISASSEMBLE(kAddI, "addi")
+ case ByteCodeInstruction::kAndB: printf("andb"); break;
+ case ByteCodeInstruction::kBranch: printf("branch %d", READ16()); break;
+ case ByteCodeInstruction::kCall: printf("call %d", READ8()); break;
+ case ByteCodeInstruction::kCallExternal: {
+ int argumentCount = READ8();
+ int returnCount = READ8();
+ int externalValue = READ8();
+ printf("callexternal %d, %d, %d", argumentCount, returnCount, externalValue);
+ break;
+ }
+ case ByteCodeInstruction::kClampIndex: printf("clampindex %d", READ8()); break;
+ VECTOR_DISASSEMBLE(kCompareIEQ, "compareieq")
+ VECTOR_DISASSEMBLE(kCompareINEQ, "compareineq")
+ VECTOR_MATRIX_DISASSEMBLE(kCompareFEQ, "comparefeq")
+ VECTOR_MATRIX_DISASSEMBLE(kCompareFNEQ, "comparefneq")
+ VECTOR_DISASSEMBLE(kCompareFGT, "comparefgt")
+ VECTOR_DISASSEMBLE(kCompareFGTEQ, "comparefgteq")
+ VECTOR_DISASSEMBLE(kCompareFLT, "compareflt")
+ VECTOR_DISASSEMBLE(kCompareFLTEQ, "compareflteq")
+ VECTOR_DISASSEMBLE(kCompareSGT, "comparesgt")
+ VECTOR_DISASSEMBLE(kCompareSGTEQ, "comparesgteq")
+ VECTOR_DISASSEMBLE(kCompareSLT, "compareslt")
+ VECTOR_DISASSEMBLE(kCompareSLTEQ, "compareslteq")
+ VECTOR_DISASSEMBLE(kCompareUGT, "compareugt")
+ VECTOR_DISASSEMBLE(kCompareUGTEQ, "compareugteq")
+ VECTOR_DISASSEMBLE(kCompareULT, "compareult")
+ VECTOR_DISASSEMBLE(kCompareULTEQ, "compareulteq")
+ VECTOR_DISASSEMBLE_NO_COUNT(kConvertFtoI, "convertftoi")
+ VECTOR_DISASSEMBLE_NO_COUNT(kConvertStoF, "convertstof")
+ VECTOR_DISASSEMBLE_NO_COUNT(kConvertUtoF, "convertutof")
+ VECTOR_DISASSEMBLE(kCos, "cos")
+ VECTOR_MATRIX_DISASSEMBLE(kDivideF, "dividef")
+ VECTOR_DISASSEMBLE(kDivideS, "divideS")
+ VECTOR_DISASSEMBLE(kDivideU, "divideu")
+ VECTOR_MATRIX_DISASSEMBLE(kDup, "dup")
+ case ByteCodeInstruction::kInverse2x2: printf("inverse2x2"); break;
+ case ByteCodeInstruction::kInverse3x3: printf("inverse3x3"); break;
+ case ByteCodeInstruction::kInverse4x4: printf("inverse4x4"); break;
+ case ByteCodeInstruction::kLoad: printf("load %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoad2: printf("load2 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoad3: printf("load3 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoad4: printf("load4 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadGlobal: printf("loadglobal %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadGlobal2: printf("loadglobal2 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadGlobal3: printf("loadglobal3 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadGlobal4: printf("loadglobal4 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadUniform: printf("loaduniform %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadUniform2: printf("loaduniform2 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadUniform3: printf("loaduniform3 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadUniform4: printf("loaduniform4 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kLoadSwizzle: {
+ int target = READ8();
+ int count = READ8();
+ printf("loadswizzle %d %d", target, count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kLoadSwizzleGlobal: {
+ int target = READ8();
+ int count = READ8();
+ printf("loadswizzleglobal %d %d", target, count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kLoadSwizzleUniform: {
+ int target = READ8();
+ int count = READ8();
+ printf("loadswizzleuniform %d %d", target, count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kLoadExtended: printf("loadextended %d", READ8()); break;
+ case ByteCodeInstruction::kLoadExtendedGlobal: printf("loadextendedglobal %d", READ8());
+ break;
+ case ByteCodeInstruction::kLoadExtendedUniform: printf("loadextendeduniform %d", READ8());
+ break;
+ case ByteCodeInstruction::kMatrixToMatrix: {
+ int srcCols = READ8();
+ int srcRows = READ8();
+ int dstCols = READ8();
+ int dstRows = READ8();
+ printf("matrixtomatrix %dx%d %dx%d", srcCols, srcRows, dstCols, dstRows);
+ break;
+ }
+ case ByteCodeInstruction::kMatrixMultiply: {
+ int lCols = READ8();
+ int lRows = READ8();
+ int rCols = READ8();
+ printf("matrixmultiply %dx%d %dx%d", lCols, lRows, rCols, lCols);
+ break;
+ }
+ VECTOR_MATRIX_DISASSEMBLE(kMultiplyF, "multiplyf")
+ VECTOR_DISASSEMBLE(kMultiplyI, "multiplyi")
+ VECTOR_MATRIX_DISASSEMBLE_NO_COUNT(kNegateF, "negatef")
+ VECTOR_DISASSEMBLE_NO_COUNT(kNegateI, "negatei")
+ case ByteCodeInstruction::kNotB: printf("notb"); break;
+ case ByteCodeInstruction::kOrB: printf("orb"); break;
+ VECTOR_MATRIX_DISASSEMBLE_NO_COUNT(kPop, "pop")
+ case ByteCodeInstruction::kPushImmediate: {
+ uint32_t v = READ32();
+ union { uint32_t u; float f; } pun = { v };
+ printf("pushimmediate %s", (to_string(v) + "(" + to_string(pun.f) + ")").c_str());
+ break;
+ }
+ case ByteCodeInstruction::kReadExternal: printf("readexternal %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kReadExternal2: printf("readexternal2 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kReadExternal3: printf("readexternal3 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kReadExternal4: printf("readexternal4 %d", READ16() >> 8); break;
+ VECTOR_DISASSEMBLE(kRemainderF, "remainderf")
+ VECTOR_DISASSEMBLE(kRemainderS, "remainders")
+ VECTOR_DISASSEMBLE(kRemainderU, "remainderu")
+ case ByteCodeInstruction::kReserve: printf("reserve %d", READ8()); break;
+ case ByteCodeInstruction::kReturn: printf("return %d", READ8()); break;
+ case ByteCodeInstruction::kScalarToMatrix: {
+ int cols = READ8();
+ int rows = READ8();
+ printf("scalartomatrix %dx%d", cols, rows);
+ break;
+ }
+ case ByteCodeInstruction::kShiftLeft: printf("shl %d", READ8()); break;
+ case ByteCodeInstruction::kShiftRightS: printf("shrs %d", READ8()); break;
+ case ByteCodeInstruction::kShiftRightU: printf("shru %d", READ8()); break;
+ VECTOR_DISASSEMBLE(kSin, "sin")
+ VECTOR_DISASSEMBLE_NO_COUNT(kSqrt, "sqrt")
+ case ByteCodeInstruction::kStore: printf("store %d", READ8()); break;
+ case ByteCodeInstruction::kStore2: printf("store2 %d", READ8()); break;
+ case ByteCodeInstruction::kStore3: printf("store3 %d", READ8()); break;
+ case ByteCodeInstruction::kStore4: printf("store4 %d", READ8()); break;
+ case ByteCodeInstruction::kStoreGlobal: printf("storeglobal %d", READ8()); break;
+ case ByteCodeInstruction::kStoreGlobal2: printf("storeglobal2 %d", READ8()); break;
+ case ByteCodeInstruction::kStoreGlobal3: printf("storeglobal3 %d", READ8()); break;
+ case ByteCodeInstruction::kStoreGlobal4: printf("storeglobal4 %d", READ8()); break;
+ case ByteCodeInstruction::kStoreSwizzle: {
+ int target = READ8();
+ int count = READ8();
+ printf("storeswizzle %d %d", target, count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kStoreSwizzleGlobal: {
+ int target = READ8();
+ int count = READ8();
+ printf("storeswizzleglobal %d %d", target, count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kStoreSwizzleIndirect: {
+ int count = READ8();
+ printf("storeswizzleindirect %d", count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kStoreSwizzleIndirectGlobal: {
+ int count = READ8();
+ printf("storeswizzleindirectglobal %d", count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ case ByteCodeInstruction::kStoreExtended: printf("storeextended %d", READ8()); break;
+ case ByteCodeInstruction::kStoreExtendedGlobal: printf("storeextendedglobal %d", READ8());
+ break;
+ VECTOR_MATRIX_DISASSEMBLE(kSubtractF, "subtractf")
+ VECTOR_DISASSEMBLE(kSubtractI, "subtracti")
+ case ByteCodeInstruction::kSwizzle: {
+ printf("swizzle %d, ", READ8());
+ int count = READ8();
+ printf("%d", count);
+ for (int i = 0; i < count; ++i) {
+ printf(", %d", READ8());
+ }
+ break;
+ }
+ VECTOR_DISASSEMBLE(kTan, "tan")
+ case ByteCodeInstruction::kWriteExternal: printf("writeexternal %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kWriteExternal2: printf("writeexternal2 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kWriteExternal3: printf("writeexternal3 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kWriteExternal4: printf("writeexternal4 %d", READ16() >> 8); break;
+ case ByteCodeInstruction::kXorB: printf("xorb"); break;
+ case ByteCodeInstruction::kMaskPush: printf("maskpush"); break;
+ case ByteCodeInstruction::kMaskPop: printf("maskpop"); break;
+ case ByteCodeInstruction::kMaskNegate: printf("masknegate"); break;
+ case ByteCodeInstruction::kMaskBlend: printf("maskblend %d", READ8()); break;
+ case ByteCodeInstruction::kBranchIfAllFalse:
+ printf("branchifallfalse %d", READ16());
+ break;
+ case ByteCodeInstruction::kLoopBegin: printf("loopbegin"); break;
+ case ByteCodeInstruction::kLoopNext: printf("loopnext"); break;
+ case ByteCodeInstruction::kLoopMask: printf("loopmask"); break;
+ case ByteCodeInstruction::kLoopEnd: printf("loopend"); break;
+ case ByteCodeInstruction::kLoopContinue: printf("loopcontinue"); break;
+ case ByteCodeInstruction::kLoopBreak: printf("loopbreak"); break;
+ default:
+ ip -= sizeof(instruction);
+ printf("unknown(%d)\n", (int) (intptr_t) READ_INST());
+ SkASSERT(false);
+ }
+ return ip;
+}
+
+#ifdef SKSLC_THREADED_CODE
+ #define LABEL(name) name:
+ #ifdef TRACE
+ #define NEXT() goto next
+ #else
+ #define NEXT() goto *READ_INST()
+ #endif
+#else
+ #define LABEL(name) case ByteCodeInstruction::name:
+ #define NEXT() continue
+#endif
+
+#define VECTOR_BINARY_OP(base, field, op) \
+ LABEL(base ## 4) \
+ sp[-4] = sp[-4].field op sp[0].field; \
+ POP(); \
+ /* fall through */ \
+ LABEL(base ## 3) { \
+ sp[-ip[0]] = sp[-ip[0]].field op sp[0].field; \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base ## 2) { \
+ sp[-ip[0]] = sp[-ip[0]].field op sp[0].field; \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base) { \
+ sp[-ip[0]] = sp[-ip[0]].field op sp[0].field; \
+ POP(); \
+ ++ip; \
+ NEXT(); \
+ }
+
+// A naive implementation of / or % using skvx operations will likely crash with a divide by zero
+// in inactive vector lanesm, so we need to be sure to avoid masked-off lanes.
+#define VECTOR_BINARY_MASKED_OP(base, field, op) \
+ LABEL(base ## 4) \
+ for (int i = 0; i < VecWidth; ++i) { \
+ if (mask()[i]) { \
+ sp[-4].field[i] op ## = sp[0].field[i]; \
+ } \
+ } \
+ POP(); \
+ /* fall through */ \
+ LABEL(base ## 3) { \
+ for (int i = 0; i < VecWidth; ++i) { \
+ if (mask()[i]) { \
+ sp[-ip[0]].field[i] op ## = sp[0].field[i]; \
+ } \
+ } \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base ## 2) { \
+ for (int i = 0; i < VecWidth; ++i) { \
+ if (mask()[i]) { \
+ sp[-ip[0]].field[i] op ## = sp[0].field[i]; \
+ } \
+ } \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base) { \
+ for (int i = 0; i < VecWidth; ++i) { \
+ if (mask()[i]) { \
+ sp[-ip[0]].field[i] op ## = sp[0].field[i]; \
+ } \
+ } \
+ POP(); \
+ ++ip; \
+ NEXT(); \
+ }
+
+
+#define VECTOR_MATRIX_BINARY_OP(base, field, op) \
+ VECTOR_BINARY_OP(base, field, op) \
+ LABEL(base ## N) { \
+ int count = READ8(); \
+ for (int i = count; i > 0; --i) { \
+ sp[-count] = sp[-count].field op sp[0].field; \
+ POP(); \
+ } \
+ NEXT(); \
+ }
+
+#define VECTOR_BINARY_FN(base, field, fn) \
+ LABEL(base ## 4) \
+ sp[-4] = fn(sp[-4].field, sp[0].field); \
+ POP(); \
+ /* fall through */ \
+ LABEL(base ## 3) { \
+ sp[-ip[0]] = fn(sp[-ip[0]].field, sp[0].field); \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base ## 2) { \
+ sp[-ip[0]] = fn(sp[-ip[0]].field, sp[0].field); \
+ POP(); \
+ } /* fall through */ \
+ LABEL(base) { \
+ sp[-ip[0]] = fn(sp[-ip[0]].field, sp[0].field); \
+ POP(); \
+ ++ip; \
+ NEXT(); \
+ }
+
+#define VECTOR_UNARY_FN(base, fn, field) \
+ LABEL(base ## 4) sp[-3] = fn(sp[-3].field); \
+ LABEL(base ## 3) sp[-2] = fn(sp[-2].field); \
+ LABEL(base ## 2) sp[-1] = fn(sp[-1].field); \
+ LABEL(base) sp[ 0] = fn(sp[ 0].field); \
+ NEXT();
+
+#define VECTOR_UNARY_FN_VEC(base, fn) \
+ LABEL(base ## 4) \
+ LABEL(base ## 3) \
+ LABEL(base ## 2) \
+ LABEL(base) { \
+ int count = READ8(); \
+ float* v = (float*)sp - count + 1; \
+ for (int i = VecWidth * count; i > 0; --i, ++v) { \
+ *v = fn(*v); \
+ } \
+ NEXT(); \
+ }
+
+#define VECTOR_LABELS(base) \
+ &&base ## 4, \
+ &&base ## 3, \
+ &&base ## 2, \
+ &&base
+
+#define VECTOR_MATRIX_LABELS(base) \
+ VECTOR_LABELS(base), \
+ &&base ## N
+
+// If you trip this assert, it means that the order of the opcodes listed in ByteCodeInstruction
+// does not match the order of the opcodes listed in the 'labels' array in innerRun().
+#define CHECK_LABEL(name) \
+ SkASSERT(labels[(int) ByteCodeInstruction::name] == &&name)
+
+#define CHECK_VECTOR_LABELS(name) \
+ CHECK_LABEL(name ## 4); \
+ CHECK_LABEL(name ## 3); \
+ CHECK_LABEL(name ## 2); \
+ CHECK_LABEL(name)
+
+#define CHECK_VECTOR_MATRIX_LABELS(name) \
+ CHECK_VECTOR_LABELS(name); \
+ CHECK_LABEL(name ## N)
+
+union VValue {
+ VValue() {}
+ VValue(F32 f) : fFloat(f) {}
+ VValue(I32 s) : fSigned(s) {}
+ VValue(U32 u) : fUnsigned(u) {}
+
+ F32 fFloat;
+ I32 fSigned;
+ U32 fUnsigned;
+};
+
+struct StackFrame {
+ const uint8_t* fCode;
+ const uint8_t* fIP;
+ VValue* fStack;
+ int fParameterCount;
+};
+
+static F32 VecMod(F32 a, F32 b) {
+ return a - skvx::trunc(a / b) * b;
+}
+
+#define spf(index) sp[index].fFloat
+
+static void CallExternal(const ByteCode* byteCode, const uint8_t*& ip, VValue*& sp,
+ int baseIndex, I32 mask) {
+ int argumentCount = READ8();
+ int returnCount = READ8();
+ int target = READ8();
+ ExternalValue* v = byteCode->fExternalValues[target];
+ sp -= argumentCount - 1;
+
+ float tmpArgs[4];
+ float tmpReturn[4];
+ SkASSERT(argumentCount <= (int)SK_ARRAY_COUNT(tmpArgs));
+ SkASSERT(returnCount <= (int)SK_ARRAY_COUNT(tmpReturn));
+
+ for (int i = 0; i < VecWidth; ++i) {
+ if (mask[i]) {
+ for (int j = 0; j < argumentCount; ++j) {
+ tmpArgs[j] = sp[j].fFloat[i];
+ }
+ v->call(baseIndex + i, tmpArgs, tmpReturn);
+ for (int j = 0; j < returnCount; ++j) {
+ sp[j].fFloat[i] = tmpReturn[j];
+ }
+ }
+ }
+ sp += returnCount - 1;
+}
+
+static void Inverse2x2(VValue* sp) {
+ F32 a = sp[-3].fFloat,
+ b = sp[-2].fFloat,
+ c = sp[-1].fFloat,
+ d = sp[ 0].fFloat;
+ F32 idet = F32(1) / (a*d - b*c);
+ sp[-3].fFloat = d * idet;
+ sp[-2].fFloat = -b * idet;
+ sp[-1].fFloat = -c * idet;
+ sp[ 0].fFloat = a * idet;
+}
+
+static void Inverse3x3(VValue* sp) {
+ F32 a11 = sp[-8].fFloat, a12 = sp[-5].fFloat, a13 = sp[-2].fFloat,
+ a21 = sp[-7].fFloat, a22 = sp[-4].fFloat, a23 = sp[-1].fFloat,
+ a31 = sp[-6].fFloat, a32 = sp[-3].fFloat, a33 = sp[ 0].fFloat;
+ F32 idet = F32(1) / (a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 -
+ a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31);
+ sp[-8].fFloat = (a22 * a33 - a23 * a32) * idet;
+ sp[-7].fFloat = (a23 * a31 - a21 * a33) * idet;
+ sp[-6].fFloat = (a21 * a32 - a22 * a31) * idet;
+ sp[-5].fFloat = (a13 * a32 - a12 * a33) * idet;
+ sp[-4].fFloat = (a11 * a33 - a13 * a31) * idet;
+ sp[-3].fFloat = (a12 * a31 - a11 * a32) * idet;
+ sp[-2].fFloat = (a12 * a23 - a13 * a22) * idet;
+ sp[-1].fFloat = (a13 * a21 - a11 * a23) * idet;
+ sp[ 0].fFloat = (a11 * a22 - a12 * a21) * idet;
+}
+
+static void Inverse4x4(VValue* sp) {
+ F32 a00 = spf(-15), a10 = spf(-11), a20 = spf( -7), a30 = spf( -3),
+ a01 = spf(-14), a11 = spf(-10), a21 = spf( -6), a31 = spf( -2),
+ a02 = spf(-13), a12 = spf( -9), a22 = spf( -5), a32 = spf( -1),
+ a03 = spf(-12), a13 = spf( -8), a23 = spf( -4), a33 = spf( 0);
+
+ F32 b00 = a00 * a11 - a01 * a10,
+ b01 = a00 * a12 - a02 * a10,
+ b02 = a00 * a13 - a03 * a10,
+ b03 = a01 * a12 - a02 * a11,
+ b04 = a01 * a13 - a03 * a11,
+ b05 = a02 * a13 - a03 * a12,
+ b06 = a20 * a31 - a21 * a30,
+ b07 = a20 * a32 - a22 * a30,
+ b08 = a20 * a33 - a23 * a30,
+ b09 = a21 * a32 - a22 * a31,
+ b10 = a21 * a33 - a23 * a31,
+ b11 = a22 * a33 - a23 * a32;
+
+ F32 idet = F32(1) /
+ (b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06);
+
+ b00 *= idet;
+ b01 *= idet;
+ b02 *= idet;
+ b03 *= idet;
+ b04 *= idet;
+ b05 *= idet;
+ b06 *= idet;
+ b07 *= idet;
+ b08 *= idet;
+ b09 *= idet;
+ b10 *= idet;
+ b11 *= idet;
+
+ spf(-15) = a11 * b11 - a12 * b10 + a13 * b09;
+ spf(-14) = a02 * b10 - a01 * b11 - a03 * b09;
+ spf(-13) = a31 * b05 - a32 * b04 + a33 * b03;
+ spf(-12) = a22 * b04 - a21 * b05 - a23 * b03;
+ spf(-11) = a12 * b08 - a10 * b11 - a13 * b07;
+ spf(-10) = a00 * b11 - a02 * b08 + a03 * b07;
+ spf( -9) = a32 * b02 - a30 * b05 - a33 * b01;
+ spf( -8) = a20 * b05 - a22 * b02 + a23 * b01;
+ spf( -7) = a10 * b10 - a11 * b08 + a13 * b06;
+ spf( -6) = a01 * b08 - a00 * b10 - a03 * b06;
+ spf( -5) = a30 * b04 - a31 * b02 + a33 * b00;
+ spf( -4) = a21 * b02 - a20 * b04 - a23 * b00;
+ spf( -3) = a11 * b07 - a10 * b09 - a12 * b06;
+ spf( -2) = a00 * b09 - a01 * b07 + a02 * b06;
+ spf( -1) = a31 * b01 - a30 * b03 - a32 * b00;
+ spf( 0) = a20 * b03 - a21 * b01 + a22 * b00;
+}
+
+static bool InnerRun(const ByteCode* byteCode, const ByteCodeFunction* f, VValue* stack,
+ float* outReturn[], VValue globals[], const float uniforms[],
+ bool stripedOutput, int N, int baseIndex) {
+#ifdef SKSLC_THREADED_CODE
+ static const void* labels[] = {
+ // If you aren't familiar with it, the &&label syntax is the GCC / Clang "labels as values"
+ // extension. If you add anything to this array, be sure to add the corresponding
+ // CHECK_LABEL() or CHECK_*_LABELS() assert below.
+ VECTOR_MATRIX_LABELS(kAddF),
+ VECTOR_LABELS(kAddI),
+ &&kAndB,
+ &&kBranch,
+ &&kCall,
+ &&kCallExternal,
+ &&kClampIndex,
+ VECTOR_LABELS(kCompareIEQ),
+ VECTOR_LABELS(kCompareINEQ),
+ VECTOR_MATRIX_LABELS(kCompareFEQ),
+ VECTOR_MATRIX_LABELS(kCompareFNEQ),
+ VECTOR_LABELS(kCompareFGT),
+ VECTOR_LABELS(kCompareFGTEQ),
+ VECTOR_LABELS(kCompareFLT),
+ VECTOR_LABELS(kCompareFLTEQ),
+ VECTOR_LABELS(kCompareSGT),
+ VECTOR_LABELS(kCompareSGTEQ),
+ VECTOR_LABELS(kCompareSLT),
+ VECTOR_LABELS(kCompareSLTEQ),
+ VECTOR_LABELS(kCompareUGT),
+ VECTOR_LABELS(kCompareUGTEQ),
+ VECTOR_LABELS(kCompareULT),
+ VECTOR_LABELS(kCompareULTEQ),
+ VECTOR_LABELS(kConvertFtoI),
+ VECTOR_LABELS(kConvertStoF),
+ VECTOR_LABELS(kConvertUtoF),
+ VECTOR_LABELS(kCos),
+ VECTOR_MATRIX_LABELS(kDivideF),
+ VECTOR_LABELS(kDivideS),
+ VECTOR_LABELS(kDivideU),
+ VECTOR_MATRIX_LABELS(kDup),
+ &&kInverse2x2,
+ &&kInverse3x3,
+ &&kInverse4x4,
+ VECTOR_LABELS(kLoad),
+ VECTOR_LABELS(kLoadGlobal),
+ VECTOR_LABELS(kLoadUniform),
+ &&kLoadSwizzle,
+ &&kLoadSwizzleGlobal,
+ &&kLoadSwizzleUniform,
+ &&kLoadExtended,
+ &&kLoadExtendedGlobal,
+ &&kLoadExtendedUniform,
+ &&kMatrixToMatrix,
+ &&kMatrixMultiply,
+ VECTOR_MATRIX_LABELS(kNegateF),
+ VECTOR_LABELS(kNegateI),
+ VECTOR_MATRIX_LABELS(kMultiplyF),
+ VECTOR_LABELS(kMultiplyI),
+ &&kNotB,
+ &&kOrB,
+ VECTOR_MATRIX_LABELS(kPop),
+ &&kPushImmediate,
+ VECTOR_LABELS(kReadExternal),
+ VECTOR_LABELS(kRemainderF),
+ VECTOR_LABELS(kRemainderS),
+ VECTOR_LABELS(kRemainderU),
+ &&kReserve,
+ &&kReturn,
+ &&kScalarToMatrix,
+ &&kShiftLeft,
+ &&kShiftRightS,
+ &&kShiftRightU,
+ VECTOR_LABELS(kSin),
+ VECTOR_LABELS(kSqrt),
+ VECTOR_LABELS(kStore),
+ VECTOR_LABELS(kStoreGlobal),
+ &&kStoreExtended,
+ &&kStoreExtendedGlobal,
+ &&kStoreSwizzle,
+ &&kStoreSwizzleGlobal,
+ &&kStoreSwizzleIndirect,
+ &&kStoreSwizzleIndirectGlobal,
+ &&kSwizzle,
+ VECTOR_MATRIX_LABELS(kSubtractF),
+ VECTOR_LABELS(kSubtractI),
+ VECTOR_LABELS(kTan),
+ VECTOR_LABELS(kWriteExternal),
+ &&kXorB,
+
+ &&kMaskPush,
+ &&kMaskPop,
+ &&kMaskNegate,
+ &&kMaskBlend,
+ &&kBranchIfAllFalse,
+
+ &&kLoopBegin,
+ &&kLoopNext,
+ &&kLoopMask,
+ &&kLoopEnd,
+ &&kLoopBreak,
+ &&kLoopContinue,
+ };
+ // Verify that the order of the labels array matches the order of the ByteCodeInstruction enum.
+ CHECK_VECTOR_MATRIX_LABELS(kAddF);
+ CHECK_VECTOR_LABELS(kAddI);
+ CHECK_LABEL(kAndB);
+ CHECK_LABEL(kBranch);
+ CHECK_LABEL(kCall);
+ CHECK_LABEL(kCallExternal);
+ CHECK_LABEL(kClampIndex);
+ CHECK_VECTOR_LABELS(kCompareIEQ);
+ CHECK_VECTOR_LABELS(kCompareINEQ);
+ CHECK_VECTOR_MATRIX_LABELS(kCompareFEQ);
+ CHECK_VECTOR_MATRIX_LABELS(kCompareFNEQ);
+ CHECK_VECTOR_LABELS(kCompareFGT);
+ CHECK_VECTOR_LABELS(kCompareFGTEQ);
+ CHECK_VECTOR_LABELS(kCompareFLT);
+ CHECK_VECTOR_LABELS(kCompareFLTEQ);
+ CHECK_VECTOR_LABELS(kCompareSGT);
+ CHECK_VECTOR_LABELS(kCompareSGTEQ);
+ CHECK_VECTOR_LABELS(kCompareSLT);
+ CHECK_VECTOR_LABELS(kCompareSLTEQ);
+ CHECK_VECTOR_LABELS(kCompareUGT);
+ CHECK_VECTOR_LABELS(kCompareUGTEQ);
+ CHECK_VECTOR_LABELS(kCompareULT);
+ CHECK_VECTOR_LABELS(kCompareULTEQ);
+ CHECK_VECTOR_LABELS(kConvertFtoI);
+ CHECK_VECTOR_LABELS(kConvertStoF);
+ CHECK_VECTOR_LABELS(kConvertUtoF);
+ CHECK_VECTOR_LABELS(kCos);
+ CHECK_VECTOR_MATRIX_LABELS(kDivideF);
+ CHECK_VECTOR_LABELS(kDivideS);
+ CHECK_VECTOR_LABELS(kDivideU);
+ CHECK_VECTOR_MATRIX_LABELS(kDup);
+ CHECK_LABEL(kInverse2x2);
+ CHECK_LABEL(kInverse3x3);
+ CHECK_LABEL(kInverse4x4);
+ CHECK_VECTOR_LABELS(kLoad);
+ CHECK_VECTOR_LABELS(kLoadGlobal);
+ CHECK_VECTOR_LABELS(kLoadUniform);
+ CHECK_LABEL(kLoadSwizzle);
+ CHECK_LABEL(kLoadSwizzleGlobal);
+ CHECK_LABEL(kLoadSwizzleUniform);
+ CHECK_LABEL(kLoadExtended);
+ CHECK_LABEL(kLoadExtendedGlobal);
+ CHECK_LABEL(kLoadExtendedUniform);
+ CHECK_LABEL(kMatrixToMatrix);
+ CHECK_LABEL(kMatrixMultiply);
+ CHECK_VECTOR_MATRIX_LABELS(kNegateF);
+ CHECK_VECTOR_LABELS(kNegateI);
+ CHECK_VECTOR_MATRIX_LABELS(kMultiplyF);
+ CHECK_VECTOR_LABELS(kMultiplyI);
+ CHECK_LABEL(kNotB);
+ CHECK_LABEL(kOrB);
+ CHECK_VECTOR_MATRIX_LABELS(kPop);
+ CHECK_LABEL(kPushImmediate);
+ CHECK_VECTOR_LABELS(kReadExternal);
+ CHECK_VECTOR_LABELS(kRemainderF);
+ CHECK_VECTOR_LABELS(kRemainderS);
+ CHECK_VECTOR_LABELS(kRemainderU);
+ CHECK_LABEL(kReserve);
+ CHECK_LABEL(kReturn);
+ CHECK_LABEL(kScalarToMatrix);
+ CHECK_LABEL(kShiftLeft);
+ CHECK_LABEL(kShiftRightS);
+ CHECK_LABEL(kShiftRightU);
+ CHECK_VECTOR_LABELS(kSin);
+ CHECK_VECTOR_LABELS(kSqrt);
+ CHECK_VECTOR_LABELS(kStore);
+ CHECK_VECTOR_LABELS(kStoreGlobal);
+ CHECK_LABEL(kStoreExtended);
+ CHECK_LABEL(kStoreExtendedGlobal);
+ CHECK_LABEL(kStoreSwizzle);
+ CHECK_LABEL(kStoreSwizzleGlobal);
+ CHECK_LABEL(kStoreSwizzleIndirect);
+ CHECK_LABEL(kStoreSwizzleIndirectGlobal);
+ CHECK_LABEL(kSwizzle);
+ CHECK_VECTOR_MATRIX_LABELS(kSubtractF);
+ CHECK_VECTOR_LABELS(kSubtractI);
+ CHECK_VECTOR_LABELS(kTan);
+ CHECK_VECTOR_LABELS(kWriteExternal);
+ CHECK_LABEL(kXorB);
+ CHECK_LABEL(kMaskPush);
+ CHECK_LABEL(kMaskPop);
+ CHECK_LABEL(kMaskNegate);
+ CHECK_LABEL(kMaskBlend);
+ CHECK_LABEL(kBranchIfAllFalse);
+ CHECK_LABEL(kLoopBegin);
+ CHECK_LABEL(kLoopNext);
+ CHECK_LABEL(kLoopMask);
+ CHECK_LABEL(kLoopEnd);
+ CHECK_LABEL(kLoopBreak);
+ CHECK_LABEL(kLoopContinue);
+ f->fPreprocessOnce([f] { ((ByteCodeFunction*)f)->preprocess(labels); });
+#endif
+
+ // Needs to be the first N non-negative integers, at least as large as VecWidth
+ static const Interpreter::I32 gLanes = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ };
+
+ VValue* sp = stack + f->fParameterCount + f->fLocalCount - 1;
+
+ #define POP() (*(sp--))
+ #define PUSH(v) (sp[1] = v, ++sp)
+
+ const uint8_t* code = f->fCode.data();
+ const uint8_t* ip = code;
+ std::vector<StackFrame> frames;
+
+ I32 condStack[16]; // Independent condition masks
+ I32 maskStack[16]; // Combined masks (eg maskStack[0] & maskStack[1] & ...)
+ I32 contStack[16]; // Continue flags for loops
+ I32 loopStack[16]; // Loop execution masks
+ condStack[0] = maskStack[0] = (gLanes < N);
+ contStack[0] = I32( 0);
+ loopStack[0] = I32(~0);
+ I32* condPtr = condStack;
+ I32* maskPtr = maskStack;
+ I32* contPtr = contStack;
+ I32* loopPtr = loopStack;
+
+ if (f->fConditionCount + 1 > (int)SK_ARRAY_COUNT(condStack) ||
+ f->fLoopCount + 1 > (int)SK_ARRAY_COUNT(loopStack)) {
+ return false;
+ }
+
+ auto mask = [&]() { return *maskPtr & *loopPtr; };
+
+#ifdef SKSLC_THREADED_CODE
+ // If the "labels as values" extension is available, we implement this using threaded code.
+ // Instead of opcodes, the code directly contains the addresses of the labels to jump to. Then
+ // the code for each opcode simply grabs the address of the next opcode and uses a goto to jump
+ // there.
+ NEXT();
+#else
+ // Otherwise, we have to use a switch statement and a loop to execute the right label.
+ for (;;) {
+ #ifdef TRACE
+ printf("at %3d ", (int) (ip - code));
+ disassemble_instruction(ip);
+ printf(" (stack: %d)\n", (int) (sp - stack) + 1);
+ #endif
+ switch ((ByteCodeInstruction) READ16()) {
+#endif
+
+ VECTOR_MATRIX_BINARY_OP(kAddF, fFloat, +)
+ VECTOR_BINARY_OP(kAddI, fSigned, +)
+
+ // Booleans are integer masks: 0/~0 for false/true. So bitwise ops do what we want:
+ LABEL(kAndB)
+ sp[-1] = sp[-1].fSigned & sp[0].fSigned;
+ POP();
+ NEXT();
+ LABEL(kNotB)
+ sp[0] = ~sp[0].fSigned;
+ NEXT();
+ LABEL(kOrB)
+ sp[-1] = sp[-1].fSigned | sp[0].fSigned;
+ POP();
+ NEXT();
+ LABEL(kXorB)
+ sp[-1] = sp[-1].fSigned ^ sp[0].fSigned;
+ POP();
+ NEXT();
+
+ LABEL(kBranch)
+ ip = code + READ16();
+ NEXT();
+
+ LABEL(kCall) {
+ // Precursor code reserved space for the return value, and pushed all parameters to
+ // the stack. Update our bottom of stack to point at the first parameter, and our
+ // sp to point past those parameters (plus space for locals).
+ int target = READ8();
+ const ByteCodeFunction* fun = byteCode->fFunctions[target].get();
+#ifdef SKSLC_THREADED_CODE
+ fun->fPreprocessOnce([fun] { ((ByteCodeFunction*)fun)->preprocess(labels); });
+#endif
+ if (skvx::any(mask())) {
+ frames.push_back({ code, ip, stack, fun->fParameterCount });
+ ip = code = fun->fCode.data();
+ stack = sp - fun->fParameterCount + 1;
+ sp = stack + fun->fParameterCount + fun->fLocalCount - 1;
+ }
+ NEXT();
+ }
+
+ LABEL(kCallExternal) {
+ CallExternal(byteCode, ip, sp, baseIndex, mask());
+ NEXT();
+ }
+
+ LABEL(kClampIndex) {
+ int length = READ8();
+ if (skvx::any(mask() & ((sp[0].fSigned < 0) | (sp[0].fSigned >= length)))) {
+ return false;
+ }
+ NEXT();
+ }
+
+ VECTOR_BINARY_OP(kCompareIEQ, fSigned, ==)
+ VECTOR_MATRIX_BINARY_OP(kCompareFEQ, fFloat, ==)
+ VECTOR_BINARY_OP(kCompareINEQ, fSigned, !=)
+ VECTOR_MATRIX_BINARY_OP(kCompareFNEQ, fFloat, !=)
+ VECTOR_BINARY_OP(kCompareSGT, fSigned, >)
+ VECTOR_BINARY_OP(kCompareUGT, fUnsigned, >)
+ VECTOR_BINARY_OP(kCompareFGT, fFloat, >)
+ VECTOR_BINARY_OP(kCompareSGTEQ, fSigned, >=)
+ VECTOR_BINARY_OP(kCompareUGTEQ, fUnsigned, >=)
+ VECTOR_BINARY_OP(kCompareFGTEQ, fFloat, >=)
+ VECTOR_BINARY_OP(kCompareSLT, fSigned, <)
+ VECTOR_BINARY_OP(kCompareULT, fUnsigned, <)
+ VECTOR_BINARY_OP(kCompareFLT, fFloat, <)
+ VECTOR_BINARY_OP(kCompareSLTEQ, fSigned, <=)
+ VECTOR_BINARY_OP(kCompareULTEQ, fUnsigned, <=)
+ VECTOR_BINARY_OP(kCompareFLTEQ, fFloat, <=)
+
+ LABEL(kConvertFtoI4) sp[-3] = skvx::cast<int>(sp[-3].fFloat);
+ LABEL(kConvertFtoI3) sp[-2] = skvx::cast<int>(sp[-2].fFloat);
+ LABEL(kConvertFtoI2) sp[-1] = skvx::cast<int>(sp[-1].fFloat);
+ LABEL(kConvertFtoI) sp[ 0] = skvx::cast<int>(sp[ 0].fFloat);
+ NEXT();
+
+ LABEL(kConvertStoF4) sp[-3] = skvx::cast<float>(sp[-3].fSigned);
+ LABEL(kConvertStoF3) sp[-2] = skvx::cast<float>(sp[-2].fSigned);
+ LABEL(kConvertStoF2) sp[-1] = skvx::cast<float>(sp[-1].fSigned);
+ LABEL(kConvertStoF) sp[ 0] = skvx::cast<float>(sp[ 0].fSigned);
+ NEXT();
+
+ LABEL(kConvertUtoF4) sp[-3] = skvx::cast<float>(sp[-3].fUnsigned);
+ LABEL(kConvertUtoF3) sp[-2] = skvx::cast<float>(sp[-2].fUnsigned);
+ LABEL(kConvertUtoF2) sp[-1] = skvx::cast<float>(sp[-1].fUnsigned);
+ LABEL(kConvertUtoF) sp[ 0] = skvx::cast<float>(sp[ 0].fUnsigned);
+ NEXT();
+
+ VECTOR_UNARY_FN_VEC(kCos, cosf)
+
+ VECTOR_BINARY_MASKED_OP(kDivideS, fSigned, /)
+ VECTOR_BINARY_MASKED_OP(kDivideU, fUnsigned, /)
+ VECTOR_MATRIX_BINARY_OP(kDivideF, fFloat, /)
+
+ LABEL(kDup4) PUSH(sp[1 - ip[0]]);
+ LABEL(kDup3) PUSH(sp[1 - ip[0]]);
+ LABEL(kDup2) PUSH(sp[1 - ip[0]]);
+ LABEL(kDup) PUSH(sp[1 - ip[0]]);
+ ++ip;
+ NEXT();
+
+ LABEL(kDupN) {
+ int count = READ8();
+ memcpy(sp + 1, sp - count + 1, count * sizeof(VValue));
+ sp += count;
+ NEXT();
+ }
+
+ LABEL(kInverse2x2) {
+ Inverse2x2(sp);
+ NEXT();
+ }
+ LABEL(kInverse3x3) {
+ Inverse3x3(sp);
+ NEXT();
+ }
+ LABEL(kInverse4x4) {
+ Inverse4x4(sp);
+ NEXT();
+ }
+
+ LABEL(kLoad4) sp[4] = stack[ip[1] + 3];
+ LABEL(kLoad3) sp[3] = stack[ip[1] + 2];
+ LABEL(kLoad2) sp[2] = stack[ip[1] + 1];
+ LABEL(kLoad) sp[1] = stack[ip[1] + 0];
+ sp += ip[0];
+ ip += 2;
+ NEXT();
+
+ LABEL(kLoadGlobal4) sp[4] = globals[ip[1] + 3];
+ LABEL(kLoadGlobal3) sp[3] = globals[ip[1] + 2];
+ LABEL(kLoadGlobal2) sp[2] = globals[ip[1] + 1];
+ LABEL(kLoadGlobal) sp[1] = globals[ip[1] + 0];
+ sp += ip[0];
+ ip += 2;
+ NEXT();
+
+ LABEL(kLoadUniform4) sp[4].fFloat = uniforms[ip[1] + 3];
+ LABEL(kLoadUniform3) sp[3].fFloat = uniforms[ip[1] + 2];
+ LABEL(kLoadUniform2) sp[2].fFloat = uniforms[ip[1] + 1];
+ LABEL(kLoadUniform) sp[1].fFloat = uniforms[ip[1] + 0];
+ sp += ip[0];
+ ip += 2;
+ NEXT();
+
+ LABEL(kLoadExtended) {
+ int count = READ8();
+ I32 src = POP().fSigned;
+ I32 m = mask();
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ sp[i + 1].fSigned[j] = stack[src[j] + i].fSigned[j];
+ }
+ }
+ }
+ sp += count;
+ NEXT();
+ }
+
+ LABEL(kLoadExtendedGlobal) {
+ int count = READ8();
+ I32 src = POP().fSigned;
+ I32 m = mask();
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ sp[i + 1].fSigned[j] = globals[src[j] + i].fSigned[j];
+ }
+ }
+ }
+ sp += count;
+ NEXT();
+ }
+
+ LABEL(kLoadExtendedUniform) {
+ int count = READ8();
+ I32 src = POP().fSigned;
+ I32 m = mask();
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ sp[i + 1].fFloat[j] = uniforms[src[j] + i];
+ }
+ }
+ }
+ sp += count;
+ NEXT();
+ }
+
+ LABEL(kLoadSwizzle) {
+ int src = READ8();
+ int count = READ8();
+ for (int i = 0; i < count; ++i) {
+ PUSH(stack[src + *(ip + i)]);
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kLoadSwizzleGlobal) {
+ int src = READ8();
+ int count = READ8();
+ for (int i = 0; i < count; ++i) {
+ PUSH(globals[src + *(ip + i)]);
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kLoadSwizzleUniform) {
+ int src = READ8();
+ int count = READ8();
+ for (int i = 0; i < count; ++i) {
+ PUSH(F32(uniforms[src + *(ip + i)]));
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kMatrixToMatrix) {
+ int srcCols = READ8();
+ int srcRows = READ8();
+ int dstCols = READ8();
+ int dstRows = READ8();
+ SkASSERT(srcCols >= 2 && srcCols <= 4);
+ SkASSERT(srcRows >= 2 && srcRows <= 4);
+ SkASSERT(dstCols >= 2 && dstCols <= 4);
+ SkASSERT(dstRows >= 2 && dstRows <= 4);
+ F32 tmp[16];
+ memset(tmp, 0, sizeof(tmp));
+ tmp[0] = tmp[5] = tmp[10] = tmp[15] = F32(1.0f);
+ for (int c = srcCols - 1; c >= 0; --c) {
+ for (int r = srcRows - 1; r >= 0; --r) {
+ tmp[c*4 + r] = POP().fFloat;
+ }
+ }
+ for (int c = 0; c < dstCols; ++c) {
+ for (int r = 0; r < dstRows; ++r) {
+ PUSH(tmp[c*4 + r]);
+ }
+ }
+ NEXT();
+ }
+
+ LABEL(kMatrixMultiply) {
+ int lCols = READ8();
+ int lRows = READ8();
+ int rCols = READ8();
+ int rRows = lCols;
+ F32 tmp[16] = { 0.0f };
+ F32* B = &(sp - (rCols * rRows) + 1)->fFloat;
+ F32* A = B - (lCols * lRows);
+ for (int c = 0; c < rCols; ++c) {
+ for (int r = 0; r < lRows; ++r) {
+ for (int j = 0; j < lCols; ++j) {
+ tmp[c*lRows + r] += A[j*lRows + r] * B[c*rRows + j];
+ }
+ }
+ }
+ sp -= (lCols * lRows) + (rCols * rRows);
+ memcpy(sp + 1, tmp, rCols * lRows * sizeof(VValue));
+ sp += (rCols * lRows);
+ NEXT();
+ }
+
+ VECTOR_BINARY_OP(kMultiplyI, fSigned, *)
+ VECTOR_MATRIX_BINARY_OP(kMultiplyF, fFloat, *)
+
+ LABEL(kNegateF4) sp[-3] = -sp[-3].fFloat;
+ LABEL(kNegateF3) sp[-2] = -sp[-2].fFloat;
+ LABEL(kNegateF2) sp[-1] = -sp[-1].fFloat;
+ LABEL(kNegateF) sp[ 0] = -sp[ 0].fFloat;
+ NEXT();
+
+ LABEL(kNegateFN) {
+ int count = READ8();
+ for (int i = count - 1; i >= 0; --i) {
+ sp[-i] = -sp[-i].fFloat;
+ }
+ NEXT();
+ }
+
+ LABEL(kNegateI4) sp[-3] = -sp[-3].fSigned;
+ LABEL(kNegateI3) sp[-2] = -sp[-2].fSigned;
+ LABEL(kNegateI2) sp[-1] = -sp[-1].fSigned;
+ LABEL(kNegateI) sp[ 0] = -sp[ 0].fSigned;
+ NEXT();
+
+ LABEL(kPop4) POP();
+ LABEL(kPop3) POP();
+ LABEL(kPop2) POP();
+ LABEL(kPop) POP();
+ NEXT();
+
+ LABEL(kPopN)
+ sp -= READ8();
+ NEXT();
+
+ LABEL(kPushImmediate)
+ PUSH(U32(READ32()));
+ NEXT();
+
+ LABEL(kReadExternal)
+ LABEL(kReadExternal2)
+ LABEL(kReadExternal3)
+ LABEL(kReadExternal4) {
+ int count = READ8();
+ int src = READ8();
+ float tmp[4];
+ I32 m = mask();
+ for (int i = 0; i < VecWidth; ++i) {
+ if (m[i]) {
+ byteCode->fExternalValues[src]->read(baseIndex + i, tmp);
+ for (int j = 0; j < count; ++j) {
+ sp[j + 1].fFloat[i] = tmp[j];
+ }
+ }
+ }
+ sp += count;
+ NEXT();
+ }
+
+ VECTOR_BINARY_FN(kRemainderF, fFloat, VecMod)
+ VECTOR_BINARY_MASKED_OP(kRemainderS, fSigned, %)
+ VECTOR_BINARY_MASKED_OP(kRemainderU, fUnsigned, %)
+
+ LABEL(kReserve)
+ sp += READ8();
+ NEXT();
+
+ LABEL(kReturn) {
+ int count = READ8();
+ if (frames.empty()) {
+ if (outReturn) {
+ VValue* src = sp - count + 1;
+ if (stripedOutput) {
+ for (int i = 0; i < count; ++i) {
+ memcpy(outReturn[i], &src->fFloat, N * sizeof(float));
+ ++src;
+ }
+ } else {
+ float* outPtr = outReturn[0];
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < N; ++j) {
+ outPtr[count * j] = src->fFloat[j];
+ }
+ ++outPtr;
+ ++src;
+ }
+ }
+ }
+ return true;
+ } else {
+ // When we were called, the caller reserved stack space for their copy of our
+ // return value, then 'stack' was positioned after that, where our parameters
+ // were placed. Copy our return values to their reserved area.
+ memcpy(stack - count, sp - count + 1, count * sizeof(VValue));
+
+ // Now move the stack pointer to the end of the passed-in parameters. This odd
+ // calling convention requires the caller to pop the arguments after calling,
+ // but allows them to store any out-parameters back during that unwinding.
+ // After that sequence finishes, the return value will be the top of the stack.
+ const StackFrame& frame(frames.back());
+ sp = stack + frame.fParameterCount - 1;
+ stack = frame.fStack;
+ code = frame.fCode;
+ ip = frame.fIP;
+ frames.pop_back();
+ NEXT();
+ }
+ }
+
+ LABEL(kScalarToMatrix) {
+ int cols = READ8();
+ int rows = READ8();
+ VValue v = POP();
+ for (int c = 0; c < cols; ++c) {
+ for (int r = 0; r < rows; ++r) {
+ PUSH(c == r ? v : F32(0.0f));
+ }
+ }
+ NEXT();
+ }
+
+ LABEL(kShiftLeft)
+ sp[0] = sp[0].fSigned << READ8();
+ NEXT();
+ LABEL(kShiftRightS)
+ sp[0] = sp[0].fSigned >> READ8();
+ NEXT();
+ LABEL(kShiftRightU)
+ sp[0] = sp[0].fUnsigned >> READ8();
+ NEXT();
+
+ VECTOR_UNARY_FN_VEC(kSin, sinf)
+ VECTOR_UNARY_FN(kSqrt, skvx::sqrt, fFloat)
+
+ LABEL(kStore4)
+ stack[*ip+3] = skvx::if_then_else(mask(), POP().fFloat, stack[*ip+3].fFloat);
+ LABEL(kStore3)
+ stack[*ip+2] = skvx::if_then_else(mask(), POP().fFloat, stack[*ip+2].fFloat);
+ LABEL(kStore2)
+ stack[*ip+1] = skvx::if_then_else(mask(), POP().fFloat, stack[*ip+1].fFloat);
+ LABEL(kStore)
+ stack[*ip+0] = skvx::if_then_else(mask(), POP().fFloat, stack[*ip+0].fFloat);
+ ++ip;
+ NEXT();
+
+ LABEL(kStoreGlobal4)
+ globals[*ip+3] = skvx::if_then_else(mask(), POP().fFloat, globals[*ip+3].fFloat);
+ LABEL(kStoreGlobal3)
+ globals[*ip+2] = skvx::if_then_else(mask(), POP().fFloat, globals[*ip+2].fFloat);
+ LABEL(kStoreGlobal2)
+ globals[*ip+1] = skvx::if_then_else(mask(), POP().fFloat, globals[*ip+1].fFloat);
+ LABEL(kStoreGlobal)
+ globals[*ip+0] = skvx::if_then_else(mask(), POP().fFloat, globals[*ip+0].fFloat);
+ ++ip;
+ NEXT();
+
+ LABEL(kStoreExtended) {
+ int count = READ8();
+ I32 target = POP().fSigned;
+ VValue* src = sp - count + 1;
+ I32 m = mask();
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ stack[target[j] + i].fSigned[j] = src[i].fSigned[j];
+ }
+ }
+ }
+ sp -= count;
+ NEXT();
+ }
+ LABEL(kStoreExtendedGlobal) {
+ int count = READ8();
+ I32 target = POP().fSigned;
+ VValue* src = sp - count + 1;
+ I32 m = mask();
+ for (int i = 0; i < count; ++i) {
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ globals[target[j] + i].fSigned[j] = src[i].fSigned[j];
+ }
+ }
+ }
+ sp -= count;
+ NEXT();
+ }
+
+ LABEL(kStoreSwizzle) {
+ int target = READ8();
+ int count = READ8();
+ for (int i = count - 1; i >= 0; --i) {
+ stack[target + *(ip + i)] = skvx::if_then_else(
+ mask(), POP().fFloat, stack[target + *(ip + i)].fFloat);
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kStoreSwizzleGlobal) {
+ int target = READ8();
+ int count = READ8();
+ for (int i = count - 1; i >= 0; --i) {
+ globals[target + *(ip + i)] = skvx::if_then_else(
+ mask(), POP().fFloat, globals[target + *(ip + i)].fFloat);
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kStoreSwizzleIndirect) {
+ int count = READ8();
+ I32 target = POP().fSigned;
+ I32 m = mask();
+ for (int i = count - 1; i >= 0; --i) {
+ I32 v = POP().fSigned;
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ stack[target[j] + *(ip + i)].fSigned[j] = v[j];
+ }
+ }
+ }
+ ip += count;
+ NEXT();
+ }
+
+ LABEL(kStoreSwizzleIndirectGlobal) {
+ int count = READ8();
+ I32 target = POP().fSigned;
+ I32 m = mask();
+ for (int i = count - 1; i >= 0; --i) {
+ I32 v = POP().fSigned;
+ for (int j = 0; j < VecWidth; ++j) {
+ if (m[j]) {
+ globals[target[j] + *(ip + i)].fSigned[j] = v[j];
+ }
+ }
+ }
+ ip += count;
+ NEXT();
+ }
+
+ VECTOR_BINARY_OP(kSubtractI, fSigned, -)
+ VECTOR_MATRIX_BINARY_OP(kSubtractF, fFloat, -)
+
+ LABEL(kSwizzle) {
+ VValue tmp[4];
+ for (int i = READ8() - 1; i >= 0; --i) {
+ tmp[i] = POP();
+ }
+ for (int i = READ8() - 1; i >= 0; --i) {
+ PUSH(tmp[READ8()]);
+ }
+ NEXT();
+ }
+
+ VECTOR_UNARY_FN_VEC(kTan, tanf)
+
+ LABEL(kWriteExternal4)
+ LABEL(kWriteExternal3)
+ LABEL(kWriteExternal2)
+ LABEL(kWriteExternal) {
+ int count = READ8();
+ int target = READ8();
+ float tmp[4];
+ I32 m = mask();
+ sp -= count;
+ for (int i = 0; i < VecWidth; ++i) {
+ if (m[i]) {
+ for (int j = 0; j < count; ++j) {
+ tmp[j] = sp[j + 1].fFloat[i];
+ }
+ byteCode->fExternalValues[target]->write(baseIndex + i, tmp);
+ }
+ }
+ NEXT();
+ }
+
+ LABEL(kMaskPush)
+ condPtr[1] = POP().fSigned;
+ maskPtr[1] = maskPtr[0] & condPtr[1];
+ ++condPtr; ++maskPtr;
+ NEXT();
+ LABEL(kMaskPop)
+ --condPtr; --maskPtr;
+ NEXT();
+ LABEL(kMaskNegate)
+ maskPtr[0] = maskPtr[-1] & ~condPtr[0];
+ NEXT();
+ LABEL(kMaskBlend) {
+ int count = READ8();
+ I32 m = condPtr[0];
+ --condPtr; --maskPtr;
+ for (int i = 0; i < count; ++i) {
+ sp[-count] = skvx::if_then_else(m, sp[-count].fFloat, sp[0].fFloat);
+ --sp;
+ }
+ NEXT();
+ }
+ LABEL(kBranchIfAllFalse) {
+ int target = READ16();
+ if (!skvx::any(mask())) {
+ ip = code + target;
+ }
+ NEXT();
+ }
+
+ LABEL(kLoopBegin)
+ contPtr[1] = 0;
+ loopPtr[1] = loopPtr[0];
+ ++contPtr; ++loopPtr;
+ NEXT();
+ LABEL(kLoopNext)
+ *loopPtr |= *contPtr;
+ *contPtr = 0;
+ NEXT();
+ LABEL(kLoopMask)
+ *loopPtr &= POP().fSigned;
+ NEXT();
+ LABEL(kLoopEnd)
+ --contPtr; --loopPtr;
+ NEXT();
+ LABEL(kLoopBreak)
+ *loopPtr &= ~mask();
+ NEXT();
+ LABEL(kLoopContinue) {
+ I32 m = mask();
+ *contPtr |= m;
+ *loopPtr &= ~m;
+ NEXT();
+ }
+#ifdef SKSLC_THREADED_CODE
+ #ifdef TRACE
+ next:
+ printf("at %3d (stack: %d) (disable threaded code for disassembly)\n",
+ (int) (ip - code), (int) (sp - stack) + 1);
+ goto *READ_INST();
+ #endif
+#else
+ }
+ }
+#endif
+}
+
+}; // class Interpreter
+
+#endif // SK_ENABLE_SKSL_INTERPRETER
+
+#undef spf
+
+void ByteCodeFunction::disassemble() const {
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+ const uint8_t* ip = fCode.data();
+ while (ip < fCode.data() + fCode.size()) {
+ printf("%d: ", (int)(ip - fCode.data()));
+ ip = Interpreter::DisassembleInstruction(ip);
+ printf("\n");
+ }
+#endif
+}
+
+#define VECTOR_PREPROCESS(base) \
+ case ByteCodeInstruction::base ## 4: \
+ case ByteCodeInstruction::base ## 3: \
+ case ByteCodeInstruction::base ## 2: \
+ case ByteCodeInstruction::base: READ8(); break;
+
+#define VECTOR_PREPROCESS_NO_COUNT(base) \
+ case ByteCodeInstruction::base ## 4: \
+ case ByteCodeInstruction::base ## 3: \
+ case ByteCodeInstruction::base ## 2: \
+ case ByteCodeInstruction::base: break;
+
+#define VECTOR_MATRIX_PREPROCESS(base) \
+ VECTOR_PREPROCESS(base) \
+ case ByteCodeInstruction::base ## N: READ8(); break;
+
+#define VECTOR_MATRIX_PREPROCESS_NO_COUNT(base) \
+ VECTOR_PREPROCESS_NO_COUNT(base) \
+ case ByteCodeInstruction::base ## N: READ8(); break;
+
+void ByteCodeFunction::preprocess(const void* labels[]) {
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+#ifdef TRACE
+ this->disassemble();
+#endif
+ uint8_t* ip = fCode.data();
+ while (ip < fCode.data() + fCode.size()) {
+ ByteCodeInstruction inst = (ByteCodeInstruction) (intptr_t) READ_INST();
+ const void* label = labels[(int) inst];
+ memcpy(ip - sizeof(instruction), &label, sizeof(label));
+ switch (inst) {
+ VECTOR_MATRIX_PREPROCESS(kAddF)
+ VECTOR_PREPROCESS(kAddI)
+ case ByteCodeInstruction::kAndB: break;
+ case ByteCodeInstruction::kBranch: READ16(); break;
+ case ByteCodeInstruction::kCall: READ8(); break;
+ case ByteCodeInstruction::kCallExternal: {
+ READ8();
+ READ8();
+ READ8();
+ break;
+ }
+ case ByteCodeInstruction::kClampIndex: READ8(); break;
+ VECTOR_PREPROCESS(kCompareIEQ)
+ VECTOR_PREPROCESS(kCompareINEQ)
+ VECTOR_MATRIX_PREPROCESS(kCompareFEQ)
+ VECTOR_MATRIX_PREPROCESS(kCompareFNEQ)
+ VECTOR_PREPROCESS(kCompareFGT)
+ VECTOR_PREPROCESS(kCompareFGTEQ)
+ VECTOR_PREPROCESS(kCompareFLT)
+ VECTOR_PREPROCESS(kCompareFLTEQ)
+ VECTOR_PREPROCESS(kCompareSGT)
+ VECTOR_PREPROCESS(kCompareSGTEQ)
+ VECTOR_PREPROCESS(kCompareSLT)
+ VECTOR_PREPROCESS(kCompareSLTEQ)
+ VECTOR_PREPROCESS(kCompareUGT)
+ VECTOR_PREPROCESS(kCompareUGTEQ)
+ VECTOR_PREPROCESS(kCompareULT)
+ VECTOR_PREPROCESS(kCompareULTEQ)
+ VECTOR_PREPROCESS_NO_COUNT(kConvertFtoI)
+ VECTOR_PREPROCESS_NO_COUNT(kConvertStoF)
+ VECTOR_PREPROCESS_NO_COUNT(kConvertUtoF)
+ VECTOR_PREPROCESS(kCos)
+ VECTOR_MATRIX_PREPROCESS(kDivideF)
+ VECTOR_PREPROCESS(kDivideS)
+ VECTOR_PREPROCESS(kDivideU)
+ VECTOR_MATRIX_PREPROCESS(kDup)
+
+ case ByteCodeInstruction::kInverse2x2:
+ case ByteCodeInstruction::kInverse3x3:
+ case ByteCodeInstruction::kInverse4x4: break;
+
+ case ByteCodeInstruction::kLoad:
+ case ByteCodeInstruction::kLoad2:
+ case ByteCodeInstruction::kLoad3:
+ case ByteCodeInstruction::kLoad4:
+ case ByteCodeInstruction::kLoadGlobal:
+ case ByteCodeInstruction::kLoadGlobal2:
+ case ByteCodeInstruction::kLoadGlobal3:
+ case ByteCodeInstruction::kLoadGlobal4:
+ case ByteCodeInstruction::kLoadUniform:
+ case ByteCodeInstruction::kLoadUniform2:
+ case ByteCodeInstruction::kLoadUniform3:
+ case ByteCodeInstruction::kLoadUniform4: READ16(); break;
+
+ case ByteCodeInstruction::kLoadSwizzle:
+ case ByteCodeInstruction::kLoadSwizzleGlobal:
+ case ByteCodeInstruction::kLoadSwizzleUniform: {
+ READ8();
+ int count = READ8();
+ ip += count;
+ break;
+ }
+
+ case ByteCodeInstruction::kLoadExtended:
+ case ByteCodeInstruction::kLoadExtendedGlobal:
+ case ByteCodeInstruction::kLoadExtendedUniform:
+ READ8();
+ break;
+
+ case ByteCodeInstruction::kMatrixToMatrix: {
+ READ8();
+ READ8();
+ READ8();
+ READ8();
+ break;
+ }
+ case ByteCodeInstruction::kMatrixMultiply: {
+ READ8();
+ READ8();
+ READ8();
+ break;
+ }
+ VECTOR_MATRIX_PREPROCESS(kMultiplyF)
+ VECTOR_PREPROCESS(kMultiplyI)
+ VECTOR_MATRIX_PREPROCESS_NO_COUNT(kNegateF)
+ VECTOR_PREPROCESS_NO_COUNT(kNegateI)
+ case ByteCodeInstruction::kNotB: break;
+ case ByteCodeInstruction::kOrB: break;
+ VECTOR_MATRIX_PREPROCESS_NO_COUNT(kPop)
+ case ByteCodeInstruction::kPushImmediate: READ32(); break;
+
+ case ByteCodeInstruction::kReadExternal:
+ case ByteCodeInstruction::kReadExternal2:
+ case ByteCodeInstruction::kReadExternal3:
+ case ByteCodeInstruction::kReadExternal4: READ16(); break;
+
+ VECTOR_PREPROCESS(kRemainderF)
+ VECTOR_PREPROCESS(kRemainderS)
+ VECTOR_PREPROCESS(kRemainderU)
+ case ByteCodeInstruction::kReserve: READ8(); break;
+ case ByteCodeInstruction::kReturn: READ8(); break;
+ case ByteCodeInstruction::kScalarToMatrix: READ8(); READ8(); break;
+ case ByteCodeInstruction::kShiftLeft: READ8(); break;
+ case ByteCodeInstruction::kShiftRightS: READ8(); break;
+ case ByteCodeInstruction::kShiftRightU: READ8(); break;
+ VECTOR_PREPROCESS(kSin)
+ VECTOR_PREPROCESS_NO_COUNT(kSqrt)
+
+ case ByteCodeInstruction::kStore:
+ case ByteCodeInstruction::kStore2:
+ case ByteCodeInstruction::kStore3:
+ case ByteCodeInstruction::kStore4:
+ case ByteCodeInstruction::kStoreGlobal:
+ case ByteCodeInstruction::kStoreGlobal2:
+ case ByteCodeInstruction::kStoreGlobal3:
+ case ByteCodeInstruction::kStoreGlobal4: READ8(); break;
+
+ case ByteCodeInstruction::kStoreSwizzle:
+ case ByteCodeInstruction::kStoreSwizzleGlobal: {
+ READ8();
+ int count = READ8();
+ ip += count;
+ break;
+ }
+
+ case ByteCodeInstruction::kStoreSwizzleIndirect:
+ case ByteCodeInstruction::kStoreSwizzleIndirectGlobal: {
+ int count = READ8();
+ ip += count;
+ break;
+ }
+
+ case ByteCodeInstruction::kStoreExtended: READ8(); break;
+ case ByteCodeInstruction::kStoreExtendedGlobal: READ8(); break;
+
+ VECTOR_MATRIX_PREPROCESS(kSubtractF)
+ VECTOR_PREPROCESS(kSubtractI)
+
+ case ByteCodeInstruction::kSwizzle: {
+ READ8();
+ int count = READ8();
+ ip += count;
+ break;
+ }
+ VECTOR_PREPROCESS(kTan)
+ case ByteCodeInstruction::kWriteExternal:
+ case ByteCodeInstruction::kWriteExternal2:
+ case ByteCodeInstruction::kWriteExternal3:
+ case ByteCodeInstruction::kWriteExternal4: READ16(); break;
+
+ case ByteCodeInstruction::kXorB: break;
+ case ByteCodeInstruction::kMaskPush: break;
+ case ByteCodeInstruction::kMaskPop: break;
+ case ByteCodeInstruction::kMaskNegate: break;
+ case ByteCodeInstruction::kMaskBlend: READ8(); break;
+ case ByteCodeInstruction::kBranchIfAllFalse: READ16(); break;
+ case ByteCodeInstruction::kLoopBegin: break;
+ case ByteCodeInstruction::kLoopNext: break;
+ case ByteCodeInstruction::kLoopMask: break;
+ case ByteCodeInstruction::kLoopEnd: break;
+ case ByteCodeInstruction::kLoopContinue: break;
+ case ByteCodeInstruction::kLoopBreak: break;
+ default:
+ ip -= 2;
+ printf("unknown(%d)\n", READ16());
+ SkASSERT(false);
+ }
+ }
+#endif
+}
+
+bool ByteCode::run(const ByteCodeFunction* f,
+ float* args, int argCount,
+ float* outReturn, int returnCount,
+ const float* uniforms, int uniformCount) const {
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+ Interpreter::VValue stack[128];
+ int stackNeeded = f->fParameterCount + f->fLocalCount + f->fStackCount;
+ if (stackNeeded > (int)SK_ARRAY_COUNT(stack)) {
+ return false;
+ }
+
+ if (argCount != f->fParameterCount ||
+ returnCount != f->fReturnCount ||
+ uniformCount != fUniformSlotCount) {
+ return false;
+ }
+
+ Interpreter::VValue globals[32];
+ if (fGlobalSlotCount > (int)SK_ARRAY_COUNT(globals)) {
+ return false;
+ }
+
+ // Transpose args into stack
+ {
+ float* src = args;
+ float* dst = (float*)stack;
+ for (int i = 0; i < argCount; ++i) {
+ *dst = *src++;
+ dst += VecWidth;
+ }
+ }
+
+ bool stripedOutput = false;
+ float** outArray = outReturn ? &outReturn : nullptr;
+ if (!Interpreter::InnerRun(this, f, stack, outArray, globals, uniforms, stripedOutput, 1, 0)) {
+ return false;
+ }
+
+ // Transpose out parameters back
+ {
+ float* dst = args;
+ float* src = (float*)stack;
+ for (const auto& p : f->fParameters) {
+ if (p.fIsOutParameter) {
+ for (int i = p.fSlotCount; i > 0; --i) {
+ *dst++ = *src;
+ src += VecWidth;
+ }
+ } else {
+ dst += p.fSlotCount;
+ src += p.fSlotCount * VecWidth;
+ }
+ }
+ }
+
+ return true;
+#else
+ SkDEBUGFAIL("ByteCode interpreter not enabled");
+ return false;
+#endif
+}
+
+bool ByteCode::runStriped(const ByteCodeFunction* f, int N,
+ float* args[], int argCount,
+ float* outReturn[], int returnCount,
+ const float* uniforms, int uniformCount) const {
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+ Interpreter::VValue stack[128];
+ int stackNeeded = f->fParameterCount + f->fLocalCount + f->fStackCount;
+ if (stackNeeded > (int)SK_ARRAY_COUNT(stack)) {
+ return false;
+ }
+
+ if (argCount != f->fParameterCount ||
+ returnCount != f->fReturnCount ||
+ uniformCount != fUniformSlotCount) {
+ return false;
+ }
+
+ Interpreter::VValue globals[32];
+ if (fGlobalSlotCount > (int)SK_ARRAY_COUNT(globals)) {
+ return false;
+ }
+
+ // innerRun just takes outArgs, so clear it if the count is zero
+ if (returnCount == 0) {
+ outReturn = nullptr;
+ }
+
+ int baseIndex = 0;
+
+ while (N) {
+ int w = std::min(N, VecWidth);
+
+ // Copy args into stack
+ for (int i = 0; i < argCount; ++i) {
+ memcpy(stack + i, args[i], w * sizeof(float));
+ }
+
+ bool stripedOutput = true;
+ if (!Interpreter::InnerRun(this, f, stack, outReturn, globals, uniforms, stripedOutput, w,
+ baseIndex)) {
+ return false;
+ }
+
+ // Copy out parameters back
+ int slot = 0;
+ for (const auto& p : f->fParameters) {
+ if (p.fIsOutParameter) {
+ for (int i = slot; i < slot + p.fSlotCount; ++i) {
+ memcpy(args[i], stack + i, w * sizeof(float));
+ }
+ }
+ slot += p.fSlotCount;
+ }
+
+ // Step each argument pointer ahead
+ for (int i = 0; i < argCount; ++i) {
+ args[i] += w;
+ }
+ N -= w;
+ baseIndex += w;
+ }
+
+ return true;
+#else
+ SkDEBUGFAIL("ByteCode interpreter not enabled");
+ return false;
+#endif
+}
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLByteCode.h b/gfx/skia/skia/src/sksl/SkSLByteCode.h
new file mode 100644
index 0000000000..ce3a9188de
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLByteCode.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BYTECODE
+#define SKSL_BYTECODE
+
+#include "include/private/SkOnce.h"
+#include "src/sksl/SkSLString.h"
+
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+class ExternalValue;
+struct FunctionDeclaration;
+
+// GCC and Clang support the "labels as values" extension which we need to implement the interpreter
+// using threaded code. Otherwise, we fall back to using a switch statement in a for loop.
+#if defined(__GNUC__) || defined(__clang__)
+ #define SKSLC_THREADED_CODE
+ using instruction = void*;
+#else
+ using instruction = uint16_t;
+#endif
+
+#define VECTOR(name) name ## 4, name ## 3, name ## 2, name
+#define VECTOR_MATRIX(name) name ## 4, name ## 3, name ## 2, name, name ## N
+
+enum class ByteCodeInstruction : uint16_t {
+ // B = bool, F = float, I = int, S = signed, U = unsigned
+ // All binary VECTOR instructions (kAddF, KSubtractI, kCompareIEQ, etc.) are followed by a byte
+ // indicating the count, even though it is redundant due to the count appearing in the opcode.
+ // This is because the original opcodes are lost after we preprocess it into threaded code, and
+ // we need to still be able to access the count so as to permit the implementation to use opcode
+ // fallthrough.
+ VECTOR_MATRIX(kAddF),
+ VECTOR(kAddI),
+ kAndB,
+ kBranch,
+ // Followed by a byte indicating the index of the function to call
+ kCall,
+ // Followed by three bytes indicating: the number of argument slots, the number of return slots,
+ // and the index of the external value to call
+ kCallExternal,
+ // For dynamic array access: Followed by byte indicating length of array
+ kClampIndex,
+ VECTOR(kCompareIEQ),
+ VECTOR(kCompareINEQ),
+ VECTOR_MATRIX(kCompareFEQ),
+ VECTOR_MATRIX(kCompareFNEQ),
+ VECTOR(kCompareFGT),
+ VECTOR(kCompareFGTEQ),
+ VECTOR(kCompareFLT),
+ VECTOR(kCompareFLTEQ),
+ VECTOR(kCompareSGT),
+ VECTOR(kCompareSGTEQ),
+ VECTOR(kCompareSLT),
+ VECTOR(kCompareSLTEQ),
+ VECTOR(kCompareUGT),
+ VECTOR(kCompareUGTEQ),
+ VECTOR(kCompareULT),
+ VECTOR(kCompareULTEQ),
+ VECTOR(kConvertFtoI),
+ VECTOR(kConvertStoF),
+ VECTOR(kConvertUtoF),
+ // Followed by a (redundant) byte indicating the count
+ VECTOR(kCos),
+ VECTOR_MATRIX(kDivideF),
+ VECTOR(kDivideS),
+ VECTOR(kDivideU),
+ // Duplicates the top stack value. Followed by a (redundant) byte indicating the count.
+ VECTOR_MATRIX(kDup),
+ kInverse2x2,
+ kInverse3x3,
+ kInverse4x4,
+ // kLoad/kLoadGlobal are followed by a byte indicating the count, and a byte indicating the
+ // local/global slot to load
+ VECTOR(kLoad),
+ VECTOR(kLoadGlobal),
+ VECTOR(kLoadUniform),
+ // As kLoad/kLoadGlobal, then a count byte (1-4), and then one byte per swizzle component (0-3).
+ kLoadSwizzle,
+ kLoadSwizzleGlobal,
+ kLoadSwizzleUniform,
+ // kLoadExtended* are fallback load ops when we lack a specialization. They are followed by a
+ // count byte, and get the slot to load from the top of the stack.
+ kLoadExtended,
+ kLoadExtendedGlobal,
+ kLoadExtendedUniform,
+ // Followed by four bytes: srcCols, srcRows, dstCols, dstRows. Consumes the src matrix from the
+ // stack, and replaces it with the dst matrix. Per GLSL rules, there are no restrictions on
+ // dimensions. Any overlapping values are copied, and any other values are filled in with the
+ // identity matrix.
+ kMatrixToMatrix,
+ // Followed by three bytes: leftCols (== rightRows), leftRows, rightCols
+ kMatrixMultiply,
+ VECTOR_MATRIX(kNegateF),
+ VECTOR(kNegateI),
+ VECTOR_MATRIX(kMultiplyF),
+ VECTOR(kMultiplyI),
+ kNotB,
+ kOrB,
+ VECTOR_MATRIX(kPop),
+ // Followed by a 32 bit value containing the value to push
+ kPushImmediate,
+ // Followed by a byte indicating external value to read
+ VECTOR(kReadExternal),
+ VECTOR(kRemainderF),
+ VECTOR(kRemainderS),
+ VECTOR(kRemainderU),
+ // Followed by a byte indicating the number of slots to reserve on the stack (for later return)
+ kReserve,
+ // Followed by a byte indicating the number of slots being returned
+ kReturn,
+ // Followed by two bytes indicating columns and rows of matrix (2, 3, or 4 each).
+ // Takes a single value from the top of the stack, and converts to a CxR matrix with that value
+ // replicated along the diagonal (and zero elsewhere), per the GLSL matrix construction rules.
+ kScalarToMatrix,
+ // Followed by a byte indicating the number of bits to shift
+ kShiftLeft,
+ kShiftRightS,
+ kShiftRightU,
+ // Followed by a (redundant) byte indicating the count
+ VECTOR(kSin),
+ VECTOR(kSqrt),
+ // kStore/kStoreGlobal are followed by a byte indicating the local/global slot to store
+ VECTOR(kStore),
+ VECTOR(kStoreGlobal),
+ // Fallback stores. Followed by count byte, and get the slot to store from the top of the stack
+ kStoreExtended,
+ kStoreExtendedGlobal,
+ // As kStore/kStoreGlobal, then a count byte (1-4), then one byte per swizzle component (0-3).
+ // Expects the stack to look like: ... v1 v2 v3 v4, where the number of 'v's is equal to the
+ // number of swizzle components. After the store, all v's are popped from the stack.
+ kStoreSwizzle,
+ kStoreSwizzleGlobal,
+ // As above, but gets the store slot from the top of the stack (before values to be stored)
+ kStoreSwizzleIndirect,
+ kStoreSwizzleIndirectGlobal,
+ // Followed by two count bytes (1-4), and then one byte per swizzle component (0-3). The first
+ // count byte provides the current vector size (the vector is the top n stack elements), and the
+ // second count byte provides the swizzle component count.
+ kSwizzle,
+ VECTOR_MATRIX(kSubtractF),
+ VECTOR(kSubtractI),
+ // Followed by a (redundant) byte indicating the count
+ VECTOR(kTan),
+ // Followed by a byte indicating external value to write
+ VECTOR(kWriteExternal),
+ kXorB,
+
+ kMaskPush,
+ kMaskPop,
+ kMaskNegate,
+ // Followed by count byte
+ kMaskBlend,
+ // Followed by address
+ kBranchIfAllFalse,
+
+ kLoopBegin,
+ kLoopNext,
+ kLoopMask,
+ kLoopEnd,
+ kLoopBreak,
+ kLoopContinue,
+};
+#undef VECTOR
+
+class ByteCodeFunction {
+public:
+ int getParameterCount() const { return fParameterCount; }
+ int getReturnCount() const { return fReturnCount; }
+
+ /**
+ * Print bytecode disassembly to stdout.
+ */
+ void disassemble() const;
+
+private:
+ ByteCodeFunction(const FunctionDeclaration* declaration);
+
+ friend class ByteCode;
+ friend class ByteCodeGenerator;
+ friend struct Interpreter;
+
+ struct Parameter {
+ int fSlotCount;
+ bool fIsOutParameter;
+ };
+
+ SkSL::String fName;
+ std::vector<Parameter> fParameters;
+ int fParameterCount;
+ int fReturnCount = 0;
+
+ int fLocalCount = 0;
+ int fStackCount = 0;
+ int fConditionCount = 0;
+ int fLoopCount = 0;
+ mutable SkOnce fPreprocessOnce;
+ std::vector<uint8_t> fCode;
+
+ /**
+ * Replace each opcode with the corresponding entry from the labels array.
+ */
+ void preprocess(const void* labels[]);
+};
+
+enum class TypeCategory {
+ kBool,
+ kSigned,
+ kUnsigned,
+ kFloat,
+};
+
+class SK_API ByteCode {
+public:
+ static constexpr int kVecWidth = 16;
+
+ ByteCode() = default;
+
+ const ByteCodeFunction* getFunction(const char* name) const {
+ for (const auto& f : fFunctions) {
+ if (f->fName == name) {
+ return f.get();
+ }
+ }
+ return nullptr;
+ }
+
+ /**
+ * Invokes the specified function once, with the given arguments.
+ * 'args', 'outReturn', and 'uniforms' are collections of 32-bit values (typically floats,
+ * but possibly int32_t or uint32_t, depending on the types used in the SkSL).
+ * Any 'out' or 'inout' parameters will result in the 'args' array being modified.
+ * The return value is stored in 'outReturn' (may be null, to discard the return value).
+ * 'uniforms' are mapped to 'uniform' globals, in order.
+ */
+ bool SKSL_WARN_UNUSED_RESULT run(const ByteCodeFunction*,
+ float* args, int argCount,
+ float* outReturn, int returnCount,
+ const float* uniforms, int uniformCount) const;
+
+ /**
+ * Invokes the specified function with the given arguments, 'N' times. 'args' and 'outReturn'
+ * are accepted and returned in structure-of-arrays form:
+ * args[0] points to an array of N values, the first argument for each invocation
+ * ...
+ * args[argCount - 1] points to an array of N values, the last argument for each invocation
+ *
+ * All values in 'args', 'outReturn', and 'uniforms' are 32-bit values (typically floats,
+ * but possibly int32_t or uint32_t, depending on the types used in the SkSL).
+ * Any 'out' or 'inout' parameters will result in the 'args' array being modified.
+ * The return value is stored in 'outReturn' (may be null, to discard the return value).
+ * 'uniforms' are mapped to 'uniform' globals, in order.
+ */
+ bool SKSL_WARN_UNUSED_RESULT runStriped(const ByteCodeFunction*, int N,
+ float* args[], int argCount,
+ float* outReturn[], int returnCount,
+ const float* uniforms, int uniformCount) const;
+
+ struct Uniform {
+ SkSL::String fName;
+ TypeCategory fType;
+ int fColumns;
+ int fRows;
+ int fSlot;
+ };
+
+ int getUniformSlotCount() const { return fUniformSlotCount; }
+ int getUniformCount() const { return fUniforms.size(); }
+ int getUniformLocation(const char* name) const {
+ for (int i = 0; i < (int)fUniforms.size(); ++i) {
+ if (fUniforms[i].fName == name) {
+ return fUniforms[i].fSlot;
+ }
+ }
+ return -1;
+ }
+ const Uniform& getUniform(int i) const { return fUniforms[i]; }
+
+private:
+ ByteCode(const ByteCode&) = delete;
+ ByteCode& operator=(const ByteCode&) = delete;
+
+ friend class ByteCodeGenerator;
+ friend struct Interpreter;
+
+ int fGlobalSlotCount = 0;
+ int fUniformSlotCount = 0;
+ std::vector<Uniform> fUniforms;
+
+ std::vector<std::unique_ptr<ByteCodeFunction>> fFunctions;
+ std::vector<ExternalValue*> fExternalValues;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.cpp
new file mode 100644
index 0000000000..560a3a76b8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.cpp
@@ -0,0 +1,1665 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLByteCodeGenerator.h"
+
+#include <algorithm>
+
+namespace SkSL {
+
+static TypeCategory type_category(const Type& type) {
+ switch (type.kind()) {
+ case Type::Kind::kVector_Kind:
+ case Type::Kind::kMatrix_Kind:
+ return type_category(type.componentType());
+ default:
+ if (type.fName == "bool") {
+ return TypeCategory::kBool;
+ } else if (type.fName == "int" || type.fName == "short") {
+ return TypeCategory::kSigned;
+ } else if (type.fName == "uint" || type.fName == "ushort") {
+ return TypeCategory::kUnsigned;
+ } else {
+ SkASSERT(type.fName == "float" || type.fName == "half");
+ return TypeCategory::kFloat;
+ }
+ ABORT("unsupported type: %s\n", type.description().c_str());
+ }
+}
+
+
+ByteCodeGenerator::ByteCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ ByteCode* output)
+ : INHERITED(program, errors, nullptr)
+ , fContext(*context)
+ , fOutput(output)
+ , fIntrinsics {
+ { "cos", ByteCodeInstruction::kCos },
+ { "dot", SpecialIntrinsic::kDot },
+ { "inverse", ByteCodeInstruction::kInverse2x2 },
+ { "sin", ByteCodeInstruction::kSin },
+ { "sqrt", ByteCodeInstruction::kSqrt },
+ { "tan", ByteCodeInstruction::kTan },
+ } {}
+
+
+int ByteCodeGenerator::SlotCount(const Type& type) {
+ if (type.kind() == Type::kOther_Kind) {
+ return 0;
+ } else if (type.kind() == Type::kStruct_Kind) {
+ int slots = 0;
+ for (const auto& f : type.fields()) {
+ slots += SlotCount(*f.fType);
+ }
+ SkASSERT(slots <= 255);
+ return slots;
+ } else if (type.kind() == Type::kArray_Kind) {
+ int columns = type.columns();
+ SkASSERT(columns >= 0);
+ int slots = columns * SlotCount(type.componentType());
+ SkASSERT(slots <= 255);
+ return slots;
+ } else {
+ return type.columns() * type.rows();
+ }
+}
+
+static inline bool is_uniform(const SkSL::Variable& var) {
+ return var.fModifiers.fFlags & Modifiers::kUniform_Flag;
+}
+
+void ByteCodeGenerator::gatherUniforms(const Type& type, const String& name) {
+ if (type.kind() == Type::kOther_Kind) {
+ return;
+ } else if (type.kind() == Type::kStruct_Kind) {
+ for (const auto& f : type.fields()) {
+ this->gatherUniforms(*f.fType, name + "." + f.fName);
+ }
+ } else if (type.kind() == Type::kArray_Kind) {
+ for (int i = 0; i < type.columns(); ++i) {
+ this->gatherUniforms(type.componentType(), String::printf("%s[%d]", name.c_str(), i));
+ }
+ } else {
+ fOutput->fUniforms.push_back({ name, type_category(type), type.rows(), type.columns(),
+ fOutput->fUniformSlotCount });
+ fOutput->fUniformSlotCount += type.columns() * type.rows();
+ }
+}
+
+bool ByteCodeGenerator::generateCode() {
+ for (const auto& e : fProgram) {
+ switch (e.fKind) {
+ case ProgramElement::kFunction_Kind: {
+ std::unique_ptr<ByteCodeFunction> f = this->writeFunction((FunctionDefinition&) e);
+ if (!f) {
+ return false;
+ }
+ fOutput->fFunctions.push_back(std::move(f));
+ fFunctions.push_back(&(FunctionDefinition&)e);
+ break;
+ }
+ case ProgramElement::kVar_Kind: {
+ VarDeclarations& decl = (VarDeclarations&) e;
+ for (const auto& v : decl.fVars) {
+ const Variable* declVar = ((VarDeclaration&) *v).fVar;
+ if (declVar->fModifiers.fLayout.fBuiltin >= 0) {
+ continue;
+ }
+ // if you trip this assert, it means the program has raw 'in' variables. You
+ // should either specialize the program (Compiler::specialize) to bake in the
+ // final values of the 'in' variables, or not use 'in' variables (maybe you
+ // meant to use 'uniform' instead?).
+ SkASSERT(!(declVar->fModifiers.fFlags & Modifiers::kIn_Flag));
+ if (is_uniform(*declVar)) {
+ this->gatherUniforms(declVar->fType, declVar->fName);
+ } else {
+ fOutput->fGlobalSlotCount += SlotCount(declVar->fType);
+ }
+ }
+ break;
+ }
+ default:
+ ; // ignore
+ }
+ }
+ return 0 == fErrors.errorCount();
+}
+
+std::unique_ptr<ByteCodeFunction> ByteCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ fFunction = &f;
+ std::unique_ptr<ByteCodeFunction> result(new ByteCodeFunction(&f.fDeclaration));
+ fParameterCount = result->fParameterCount;
+ fLoopCount = fMaxLoopCount = 0;
+ fConditionCount = fMaxConditionCount = 0;
+ fStackCount = fMaxStackCount = 0;
+ fCode = &result->fCode;
+
+ this->writeStatement(*f.fBody);
+ if (0 == fErrors.errorCount()) {
+ SkASSERT(fLoopCount == 0);
+ SkASSERT(fConditionCount == 0);
+ SkASSERT(fStackCount == 0);
+ }
+ this->write(ByteCodeInstruction::kReturn, 0);
+ this->write8(0);
+
+ result->fLocalCount = fLocals.size();
+ result->fConditionCount = fMaxConditionCount;
+ result->fLoopCount = fMaxLoopCount;
+ result->fStackCount = fMaxStackCount;
+
+ const Type& returnType = f.fDeclaration.fReturnType;
+ if (returnType != *fContext.fVoid_Type) {
+ result->fReturnCount = SlotCount(returnType);
+ }
+ fLocals.clear();
+ fFunction = nullptr;
+ return result;
+}
+
+// A "simple" Swizzle is based on a variable (or a compound variable like a struct or array), and
+// that references consecutive values, such that it can be implemented using normal load/store ops
+// with an offset. Note that all single-component swizzles (of suitable base types) are simple.
+static bool swizzle_is_simple(const Swizzle& s) {
+ switch (s.fBase->fKind) {
+ case Expression::kFieldAccess_Kind:
+ case Expression::kIndex_Kind:
+ case Expression::kVariableReference_Kind:
+ break;
+ default:
+ return false;
+ }
+
+ for (size_t i = 1; i < s.fComponents.size(); ++i) {
+ if (s.fComponents[i] != s.fComponents[i - 1] + 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int ByteCodeGenerator::StackUsage(ByteCodeInstruction inst, int count_) {
+ // Ensures that we use count iff we're passed a non-default value. Most instructions have an
+ // implicit count, so the caller shouldn't need to worry about it (or count makes no sense).
+ // The asserts avoids callers thinking they're supplying useful information in that scenario,
+ // or failing to supply necessary information for the ops that need a count.
+ struct CountValue {
+ operator int() {
+ SkASSERT(val != ByteCodeGenerator::kUnusedStackCount);
+ SkDEBUGCODE(used = true);
+ return val;
+ }
+ ~CountValue() {
+ SkASSERT(used || val == ByteCodeGenerator::kUnusedStackCount);
+ }
+ int val;
+ SkDEBUGCODE(bool used = false;)
+ } count = { count_ };
+
+ switch (inst) {
+ // Unary functions/operators that don't change stack depth at all:
+#define VECTOR_UNARY_OP(base) \
+ case ByteCodeInstruction::base: \
+ case ByteCodeInstruction::base ## 2: \
+ case ByteCodeInstruction::base ## 3: \
+ case ByteCodeInstruction::base ## 4: \
+ return 0;
+
+ VECTOR_UNARY_OP(kConvertFtoI)
+ VECTOR_UNARY_OP(kConvertStoF)
+ VECTOR_UNARY_OP(kConvertUtoF)
+
+ VECTOR_UNARY_OP(kCos)
+ VECTOR_UNARY_OP(kSin)
+ VECTOR_UNARY_OP(kSqrt)
+ VECTOR_UNARY_OP(kTan)
+
+ VECTOR_UNARY_OP(kNegateF)
+ VECTOR_UNARY_OP(kNegateI)
+
+ case ByteCodeInstruction::kInverse2x2:
+ case ByteCodeInstruction::kInverse3x3:
+ case ByteCodeInstruction::kInverse4x4: return 0;
+
+ case ByteCodeInstruction::kClampIndex: return 0;
+ case ByteCodeInstruction::kNotB: return 0;
+ case ByteCodeInstruction::kNegateFN: return 0;
+ case ByteCodeInstruction::kShiftLeft: return 0;
+ case ByteCodeInstruction::kShiftRightS: return 0;
+ case ByteCodeInstruction::kShiftRightU: return 0;
+
+#undef VECTOR_UNARY_OP
+
+ // Binary functions/operators that do a 2 -> 1 reduction (possibly N times)
+#define VECTOR_BINARY_OP(base) \
+ case ByteCodeInstruction::base: return -1; \
+ case ByteCodeInstruction::base ## 2: return -2; \
+ case ByteCodeInstruction::base ## 3: return -3; \
+ case ByteCodeInstruction::base ## 4: return -4;
+
+#define VECTOR_MATRIX_BINARY_OP(base) \
+ VECTOR_BINARY_OP(base) \
+ case ByteCodeInstruction::base ## N: return -count;
+
+ case ByteCodeInstruction::kAndB: return -1;
+ case ByteCodeInstruction::kOrB: return -1;
+ case ByteCodeInstruction::kXorB: return -1;
+
+ VECTOR_BINARY_OP(kAddI)
+ VECTOR_MATRIX_BINARY_OP(kAddF)
+
+ VECTOR_BINARY_OP(kCompareIEQ)
+ VECTOR_MATRIX_BINARY_OP(kCompareFEQ)
+ VECTOR_BINARY_OP(kCompareINEQ)
+ VECTOR_MATRIX_BINARY_OP(kCompareFNEQ)
+ VECTOR_BINARY_OP(kCompareSGT)
+ VECTOR_BINARY_OP(kCompareUGT)
+ VECTOR_BINARY_OP(kCompareFGT)
+ VECTOR_BINARY_OP(kCompareSGTEQ)
+ VECTOR_BINARY_OP(kCompareUGTEQ)
+ VECTOR_BINARY_OP(kCompareFGTEQ)
+ VECTOR_BINARY_OP(kCompareSLT)
+ VECTOR_BINARY_OP(kCompareULT)
+ VECTOR_BINARY_OP(kCompareFLT)
+ VECTOR_BINARY_OP(kCompareSLTEQ)
+ VECTOR_BINARY_OP(kCompareULTEQ)
+ VECTOR_BINARY_OP(kCompareFLTEQ)
+
+ VECTOR_BINARY_OP(kDivideS)
+ VECTOR_BINARY_OP(kDivideU)
+ VECTOR_MATRIX_BINARY_OP(kDivideF)
+ VECTOR_BINARY_OP(kMultiplyI)
+ VECTOR_MATRIX_BINARY_OP(kMultiplyF)
+ VECTOR_BINARY_OP(kRemainderF)
+ VECTOR_BINARY_OP(kRemainderS)
+ VECTOR_BINARY_OP(kRemainderU)
+ VECTOR_BINARY_OP(kSubtractI)
+ VECTOR_MATRIX_BINARY_OP(kSubtractF)
+
+#undef VECTOR_BINARY_OP
+#undef VECTOR_MATRIX_BINARY_OP
+
+ // Ops that push or load data to grow the stack:
+ case ByteCodeInstruction::kDup:
+ case ByteCodeInstruction::kLoad:
+ case ByteCodeInstruction::kLoadGlobal:
+ case ByteCodeInstruction::kLoadUniform:
+ case ByteCodeInstruction::kReadExternal:
+ case ByteCodeInstruction::kPushImmediate:
+ return 1;
+
+ case ByteCodeInstruction::kDup2:
+ case ByteCodeInstruction::kLoad2:
+ case ByteCodeInstruction::kLoadGlobal2:
+ case ByteCodeInstruction::kLoadUniform2:
+ case ByteCodeInstruction::kReadExternal2:
+ return 2;
+
+ case ByteCodeInstruction::kDup3:
+ case ByteCodeInstruction::kLoad3:
+ case ByteCodeInstruction::kLoadGlobal3:
+ case ByteCodeInstruction::kLoadUniform3:
+ case ByteCodeInstruction::kReadExternal3:
+ return 3;
+
+ case ByteCodeInstruction::kDup4:
+ case ByteCodeInstruction::kLoad4:
+ case ByteCodeInstruction::kLoadGlobal4:
+ case ByteCodeInstruction::kLoadUniform4:
+ case ByteCodeInstruction::kReadExternal4:
+ return 4;
+
+ case ByteCodeInstruction::kDupN:
+ case ByteCodeInstruction::kLoadSwizzle:
+ case ByteCodeInstruction::kLoadSwizzleGlobal:
+ case ByteCodeInstruction::kLoadSwizzleUniform:
+ return count;
+
+ // Pushes 'count' values, minus one for the 'address' that's consumed first
+ case ByteCodeInstruction::kLoadExtended:
+ case ByteCodeInstruction::kLoadExtendedGlobal:
+ case ByteCodeInstruction::kLoadExtendedUniform:
+ return count - 1;
+
+ // Ops that pop or store data to shrink the stack:
+ case ByteCodeInstruction::kPop:
+ case ByteCodeInstruction::kStore:
+ case ByteCodeInstruction::kStoreGlobal:
+ case ByteCodeInstruction::kWriteExternal:
+ return -1;
+
+ case ByteCodeInstruction::kPop2:
+ case ByteCodeInstruction::kStore2:
+ case ByteCodeInstruction::kStoreGlobal2:
+ case ByteCodeInstruction::kWriteExternal2:
+ return -2;
+
+ case ByteCodeInstruction::kPop3:
+ case ByteCodeInstruction::kStore3:
+ case ByteCodeInstruction::kStoreGlobal3:
+ case ByteCodeInstruction::kWriteExternal3:
+ return -3;
+
+ case ByteCodeInstruction::kPop4:
+ case ByteCodeInstruction::kStore4:
+ case ByteCodeInstruction::kStoreGlobal4:
+ case ByteCodeInstruction::kWriteExternal4:
+ return -4;
+
+ case ByteCodeInstruction::kPopN:
+ case ByteCodeInstruction::kStoreSwizzle:
+ case ByteCodeInstruction::kStoreSwizzleGlobal:
+ return -count;
+
+ // Consumes 'count' values, plus one for the 'address'
+ case ByteCodeInstruction::kStoreExtended:
+ case ByteCodeInstruction::kStoreExtendedGlobal:
+ case ByteCodeInstruction::kStoreSwizzleIndirect:
+ case ByteCodeInstruction::kStoreSwizzleIndirectGlobal:
+ return -count - 1;
+
+ // Strange ops where the caller computes the delta for us:
+ case ByteCodeInstruction::kCallExternal:
+ case ByteCodeInstruction::kMatrixToMatrix:
+ case ByteCodeInstruction::kMatrixMultiply:
+ case ByteCodeInstruction::kReserve:
+ case ByteCodeInstruction::kReturn:
+ case ByteCodeInstruction::kScalarToMatrix:
+ case ByteCodeInstruction::kSwizzle:
+ return count;
+
+ // Miscellaneous
+
+ // kCall is net-zero. Max stack depth is adjusted in writeFunctionCall.
+ case ByteCodeInstruction::kCall: return 0;
+ case ByteCodeInstruction::kBranch: return 0;
+ case ByteCodeInstruction::kBranchIfAllFalse: return 0;
+
+ case ByteCodeInstruction::kMaskPush: return -1;
+ case ByteCodeInstruction::kMaskPop: return 0;
+ case ByteCodeInstruction::kMaskNegate: return 0;
+ case ByteCodeInstruction::kMaskBlend: return -count;
+
+ case ByteCodeInstruction::kLoopBegin: return 0;
+ case ByteCodeInstruction::kLoopNext: return 0;
+ case ByteCodeInstruction::kLoopMask: return -1;
+ case ByteCodeInstruction::kLoopEnd: return 0;
+ case ByteCodeInstruction::kLoopBreak: return 0;
+ case ByteCodeInstruction::kLoopContinue: return 0;
+
+ default:
+ ABORT("unsupported instruction %d\n", (int)inst);
+ return 0;
+ }
+}
+
+ByteCodeGenerator::Location ByteCodeGenerator::getLocation(const Variable& var) {
+ // given that we seldom have more than a couple of variables, linear search is probably the most
+ // efficient way to handle lookups
+ switch (var.fStorage) {
+ case Variable::kLocal_Storage: {
+ for (int i = fLocals.size() - 1; i >= 0; --i) {
+ if (fLocals[i] == &var) {
+ SkASSERT(fParameterCount + i <= 255);
+ return { fParameterCount + i, Storage::kLocal };
+ }
+ }
+ int result = fParameterCount + fLocals.size();
+ fLocals.push_back(&var);
+ for (int i = 0; i < SlotCount(var.fType) - 1; ++i) {
+ fLocals.push_back(nullptr);
+ }
+ SkASSERT(result <= 255);
+ return { result, Storage::kLocal };
+ }
+ case Variable::kParameter_Storage: {
+ int offset = 0;
+ for (const auto& p : fFunction->fDeclaration.fParameters) {
+ if (p == &var) {
+ SkASSERT(offset <= 255);
+ return { offset, Storage::kLocal };
+ }
+ offset += SlotCount(p->fType);
+ }
+ SkASSERT(false);
+ return Location::MakeInvalid();
+ }
+ case Variable::kGlobal_Storage: {
+ int offset = 0;
+ bool isUniform = is_uniform(var);
+ for (const auto& e : fProgram) {
+ if (e.fKind == ProgramElement::kVar_Kind) {
+ VarDeclarations& decl = (VarDeclarations&) e;
+ for (const auto& v : decl.fVars) {
+ const Variable* declVar = ((VarDeclaration&) *v).fVar;
+ if (declVar->fModifiers.fLayout.fBuiltin >= 0) {
+ continue;
+ }
+ if (isUniform != is_uniform(*declVar)) {
+ continue;
+ }
+ if (declVar == &var) {
+ SkASSERT(offset <= 255);
+ return { offset, isUniform ? Storage::kUniform : Storage::kGlobal };
+ }
+ offset += SlotCount(declVar->fType);
+ }
+ }
+ }
+ SkASSERT(false);
+ return Location::MakeInvalid();
+ }
+ default:
+ SkASSERT(false);
+ return Location::MakeInvalid();
+ }
+}
+
+ByteCodeGenerator::Location ByteCodeGenerator::getLocation(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kFieldAccess_Kind: {
+ const FieldAccess& f = (const FieldAccess&)expr;
+ Location baseLoc = this->getLocation(*f.fBase);
+ int offset = 0;
+ for (int i = 0; i < f.fFieldIndex; ++i) {
+ offset += SlotCount(*f.fBase->fType.fields()[i].fType);
+ }
+ if (baseLoc.isOnStack()) {
+ if (offset != 0) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(offset);
+ this->write(ByteCodeInstruction::kAddI);
+ this->write8(1);
+ }
+ return baseLoc;
+ } else {
+ return baseLoc + offset;
+ }
+ }
+ case Expression::kIndex_Kind: {
+ const IndexExpression& i = (const IndexExpression&)expr;
+ int stride = SlotCount(i.fType);
+ int length = i.fBase->fType.columns();
+ SkASSERT(length <= 255);
+ int offset = -1;
+ if (i.fIndex->isConstant()) {
+ int64_t index = i.fIndex->getConstantInt();
+ if (index < 0 || index >= length) {
+ fErrors.error(i.fIndex->fOffset, "Array index out of bounds.");
+ return Location::MakeInvalid();
+ }
+ offset = index * stride;
+ } else {
+ if (i.fIndex->hasSideEffects()) {
+ // Having a side-effect in an indexer is technically safe for an rvalue,
+ // but with lvalues we have to evaluate the indexer twice, so make it an error.
+ fErrors.error(i.fIndex->fOffset,
+ "Index expressions with side-effects not supported in byte code.");
+ return Location::MakeInvalid();
+ }
+ this->writeExpression(*i.fIndex);
+ this->write(ByteCodeInstruction::kClampIndex);
+ this->write8(length);
+ if (stride != 1) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(stride);
+ this->write(ByteCodeInstruction::kMultiplyI);
+ this->write8(1);
+ }
+ }
+ Location baseLoc = this->getLocation(*i.fBase);
+
+ // Are both components known statically?
+ if (!baseLoc.isOnStack() && offset >= 0) {
+ return baseLoc + offset;
+ }
+
+ // At least one component is dynamic (and on the stack).
+
+ // If the other component is zero, we're done
+ if (baseLoc.fSlot == 0 || offset == 0) {
+ return baseLoc.makeOnStack();
+ }
+
+ // Push the non-dynamic component (if any) to the stack, then add the two
+ if (!baseLoc.isOnStack()) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(baseLoc.fSlot);
+ }
+ if (offset >= 0) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(offset);
+ }
+ this->write(ByteCodeInstruction::kAddI);
+ this->write8(1);
+ return baseLoc.makeOnStack();
+ }
+ case Expression::kSwizzle_Kind: {
+ const Swizzle& s = (const Swizzle&)expr;
+ SkASSERT(swizzle_is_simple(s));
+ Location baseLoc = this->getLocation(*s.fBase);
+ int offset = s.fComponents[0];
+ if (baseLoc.isOnStack()) {
+ if (offset != 0) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(offset);
+ this->write(ByteCodeInstruction::kAddI);
+ this->write8(1);
+ }
+ return baseLoc;
+ } else {
+ return baseLoc + offset;
+ }
+ }
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((const VariableReference&)expr).fVariable;
+ return this->getLocation(var);
+ }
+ default:
+ SkASSERT(false);
+ return Location::MakeInvalid();
+ }
+}
+
+void ByteCodeGenerator::write8(uint8_t b) {
+ fCode->push_back(b);
+}
+
+void ByteCodeGenerator::write16(uint16_t i) {
+ size_t n = fCode->size();
+ fCode->resize(n+2);
+ memcpy(fCode->data() + n, &i, 2);
+}
+
+void ByteCodeGenerator::write32(uint32_t i) {
+ size_t n = fCode->size();
+ fCode->resize(n+4);
+ memcpy(fCode->data() + n, &i, 4);
+}
+
+void ByteCodeGenerator::write(ByteCodeInstruction i, int count) {
+ switch (i) {
+ case ByteCodeInstruction::kLoopBegin: this->enterLoop(); break;
+ case ByteCodeInstruction::kLoopEnd: this->exitLoop(); break;
+
+ case ByteCodeInstruction::kMaskPush: this->enterCondition(); break;
+ case ByteCodeInstruction::kMaskPop:
+ case ByteCodeInstruction::kMaskBlend: this->exitCondition(); break;
+ default: /* Do nothing */ break;
+ }
+ instruction val = (instruction) i;
+ size_t n = fCode->size();
+ fCode->resize(n + sizeof(val));
+ memcpy(fCode->data() + n, &val, sizeof(val));
+ fStackCount += StackUsage(i, count);
+ fMaxStackCount = std::max(fMaxStackCount, fStackCount);
+}
+
+static ByteCodeInstruction vector_instruction(ByteCodeInstruction base, int count) {
+ SkASSERT(count >= 1 && count <= 4);
+ return ((ByteCodeInstruction) ((int) base + 1 - count));
+}
+
+void ByteCodeGenerator::writeTypedInstruction(const Type& type, ByteCodeInstruction s,
+ ByteCodeInstruction u, ByteCodeInstruction f,
+ int count, bool writeCount) {
+ switch (type_category(type)) {
+ case TypeCategory::kSigned:
+ this->write(vector_instruction(s, count));
+ break;
+ case TypeCategory::kUnsigned:
+ this->write(vector_instruction(u, count));
+ break;
+ case TypeCategory::kFloat: {
+ if (count > 4) {
+ this->write((ByteCodeInstruction)((int)f + 1), count);
+ } else {
+ this->write(vector_instruction(f, count));
+ }
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ if (writeCount) {
+ this->write8(count);
+ }
+}
+
+bool ByteCodeGenerator::writeBinaryExpression(const BinaryExpression& b, bool discard) {
+ if (b.fOperator == Token::Kind::EQ) {
+ std::unique_ptr<LValue> lvalue = this->getLValue(*b.fLeft);
+ this->writeExpression(*b.fRight);
+ lvalue->store(discard);
+ discard = false;
+ return discard;
+ }
+ const Type& lType = b.fLeft->fType;
+ const Type& rType = b.fRight->fType;
+ bool lVecOrMtx = (lType.kind() == Type::kVector_Kind || lType.kind() == Type::kMatrix_Kind);
+ bool rVecOrMtx = (rType.kind() == Type::kVector_Kind || rType.kind() == Type::kMatrix_Kind);
+ Token::Kind op;
+ std::unique_ptr<LValue> lvalue;
+ if (is_assignment(b.fOperator)) {
+ lvalue = this->getLValue(*b.fLeft);
+ lvalue->load();
+ op = remove_assignment(b.fOperator);
+ } else {
+ this->writeExpression(*b.fLeft);
+ op = b.fOperator;
+ if (!lVecOrMtx && rVecOrMtx) {
+ for (int i = SlotCount(rType); i > 1; --i) {
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ }
+ }
+ }
+ int count = std::max(SlotCount(lType), SlotCount(rType));
+ SkDEBUGCODE(TypeCategory tc = type_category(lType));
+ switch (op) {
+ case Token::Kind::LOGICALAND: {
+ SkASSERT(tc == SkSL::TypeCategory::kBool && count == 1);
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ this->write(ByteCodeInstruction::kMaskPush);
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation falseLocation(this);
+ this->writeExpression(*b.fRight);
+ this->write(ByteCodeInstruction::kAndB);
+ falseLocation.set();
+ this->write(ByteCodeInstruction::kMaskPop);
+ return false;
+ }
+ case Token::Kind::LOGICALOR: {
+ SkASSERT(tc == SkSL::TypeCategory::kBool && count == 1);
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ this->write(ByteCodeInstruction::kNotB);
+ this->write(ByteCodeInstruction::kMaskPush);
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation falseLocation(this);
+ this->writeExpression(*b.fRight);
+ this->write(ByteCodeInstruction::kOrB);
+ falseLocation.set();
+ this->write(ByteCodeInstruction::kMaskPop);
+ return false;
+ }
+ case Token::Kind::SHL:
+ case Token::Kind::SHR: {
+ SkASSERT(count == 1 && (tc == SkSL::TypeCategory::kSigned ||
+ tc == SkSL::TypeCategory::kUnsigned));
+ if (!b.fRight->isConstant()) {
+ fErrors.error(b.fRight->fOffset, "Shift amounts must be constant");
+ return false;
+ }
+ int64_t shift = b.fRight->getConstantInt();
+ if (shift < 0 || shift > 31) {
+ fErrors.error(b.fRight->fOffset, "Shift amount out of range");
+ return false;
+ }
+
+ if (op == Token::Kind::SHL) {
+ this->write(ByteCodeInstruction::kShiftLeft);
+ } else {
+ this->write(type_category(lType) == TypeCategory::kSigned
+ ? ByteCodeInstruction::kShiftRightS
+ : ByteCodeInstruction::kShiftRightU);
+ }
+ this->write8(shift);
+ return false;
+ }
+
+ default:
+ break;
+ }
+ this->writeExpression(*b.fRight);
+ if (lVecOrMtx && !rVecOrMtx) {
+ for (int i = SlotCount(lType); i > 1; --i) {
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ }
+ }
+ // Special case for M*V, V*M, M*M (but not V*V!)
+ if (op == Token::Kind::STAR && lVecOrMtx && rVecOrMtx &&
+ !(lType.kind() == Type::kVector_Kind && rType.kind() == Type::kVector_Kind)) {
+ this->write(ByteCodeInstruction::kMatrixMultiply,
+ SlotCount(b.fType) - (SlotCount(lType) + SlotCount(rType)));
+ int rCols = rType.columns(),
+ rRows = rType.rows(),
+ lCols = lType.columns(),
+ lRows = lType.rows();
+ // M*V treats the vector as a column
+ if (rType.kind() == Type::kVector_Kind) {
+ std::swap(rCols, rRows);
+ }
+ SkASSERT(lCols == rRows);
+ SkASSERT(SlotCount(b.fType) == lRows * rCols);
+ this->write8(lCols);
+ this->write8(lRows);
+ this->write8(rCols);
+ } else {
+ switch (op) {
+ case Token::Kind::EQEQ:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareIEQ,
+ ByteCodeInstruction::kCompareIEQ,
+ ByteCodeInstruction::kCompareFEQ,
+ count);
+ // Collapse to a single bool
+ for (int i = count; i > 1; --i) {
+ this->write(ByteCodeInstruction::kAndB);
+ }
+ break;
+ case Token::Kind::GT:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareSGT,
+ ByteCodeInstruction::kCompareUGT,
+ ByteCodeInstruction::kCompareFGT,
+ count);
+ break;
+ case Token::Kind::GTEQ:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareSGTEQ,
+ ByteCodeInstruction::kCompareUGTEQ,
+ ByteCodeInstruction::kCompareFGTEQ,
+ count);
+ break;
+ case Token::Kind::LT:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareSLT,
+ ByteCodeInstruction::kCompareULT,
+ ByteCodeInstruction::kCompareFLT,
+ count);
+ break;
+ case Token::Kind::LTEQ:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareSLTEQ,
+ ByteCodeInstruction::kCompareULTEQ,
+ ByteCodeInstruction::kCompareFLTEQ,
+ count);
+ break;
+ case Token::Kind::MINUS:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractF,
+ count);
+ break;
+ case Token::Kind::NEQ:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kCompareINEQ,
+ ByteCodeInstruction::kCompareINEQ,
+ ByteCodeInstruction::kCompareFNEQ,
+ count);
+ // Collapse to a single bool
+ for (int i = count; i > 1; --i) {
+ this->write(ByteCodeInstruction::kOrB);
+ }
+ break;
+ case Token::Kind::PERCENT:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kRemainderS,
+ ByteCodeInstruction::kRemainderU,
+ ByteCodeInstruction::kRemainderF,
+ count);
+ break;
+ case Token::Kind::PLUS:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddF,
+ count);
+ break;
+ case Token::Kind::SLASH:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kDivideS,
+ ByteCodeInstruction::kDivideU,
+ ByteCodeInstruction::kDivideF,
+ count);
+ break;
+ case Token::Kind::STAR:
+ this->writeTypedInstruction(lType, ByteCodeInstruction::kMultiplyI,
+ ByteCodeInstruction::kMultiplyI,
+ ByteCodeInstruction::kMultiplyF,
+ count);
+ break;
+
+ case Token::Kind::LOGICALXOR:
+ SkASSERT(tc == SkSL::TypeCategory::kBool && count == 1);
+ this->write(ByteCodeInstruction::kXorB);
+ break;
+
+ case Token::Kind::BITWISEAND:
+ SkASSERT(count == 1 && (tc == SkSL::TypeCategory::kSigned ||
+ tc == SkSL::TypeCategory::kUnsigned));
+ this->write(ByteCodeInstruction::kAndB);
+ break;
+ case Token::Kind::BITWISEOR:
+ SkASSERT(count == 1 && (tc == SkSL::TypeCategory::kSigned ||
+ tc == SkSL::TypeCategory::kUnsigned));
+ this->write(ByteCodeInstruction::kOrB);
+ break;
+ case Token::Kind::BITWISEXOR:
+ SkASSERT(count == 1 && (tc == SkSL::TypeCategory::kSigned ||
+ tc == SkSL::TypeCategory::kUnsigned));
+ this->write(ByteCodeInstruction::kXorB);
+ break;
+
+ default:
+ fErrors.error(b.fOffset, SkSL::String::printf("Unsupported binary operator '%s'",
+ Compiler::OperatorName(op)));
+ break;
+ }
+ }
+ if (lvalue) {
+ lvalue->store(discard);
+ discard = false;
+ }
+ return discard;
+}
+
+void ByteCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(b.fValue ? ~0 : 0);
+}
+
+void ByteCodeGenerator::writeConstructor(const Constructor& c) {
+ for (const auto& arg : c.fArguments) {
+ this->writeExpression(*arg);
+ }
+ if (c.fArguments.size() == 1) {
+ const Type& inType = c.fArguments[0]->fType;
+ const Type& outType = c.fType;
+ TypeCategory inCategory = type_category(inType);
+ TypeCategory outCategory = type_category(outType);
+ int inCount = SlotCount(inType);
+ int outCount = SlotCount(outType);
+ if (inCategory != outCategory) {
+ SkASSERT(inCount == outCount);
+ if (inCategory == TypeCategory::kFloat) {
+ SkASSERT(outCategory == TypeCategory::kSigned ||
+ outCategory == TypeCategory::kUnsigned);
+ this->write(vector_instruction(ByteCodeInstruction::kConvertFtoI, outCount));
+ } else if (outCategory == TypeCategory::kFloat) {
+ if (inCategory == TypeCategory::kSigned) {
+ this->write(vector_instruction(ByteCodeInstruction::kConvertStoF, outCount));
+ } else {
+ SkASSERT(inCategory == TypeCategory::kUnsigned);
+ this->write(vector_instruction(ByteCodeInstruction::kConvertUtoF, outCount));
+ }
+ } else {
+ SkASSERT(false);
+ }
+ }
+ if (inType.kind() == Type::kMatrix_Kind && outType.kind() == Type::kMatrix_Kind) {
+ this->write(ByteCodeInstruction::kMatrixToMatrix,
+ SlotCount(outType) - SlotCount(inType));
+ this->write8(inType.columns());
+ this->write8(inType.rows());
+ this->write8(outType.columns());
+ this->write8(outType.rows());
+ } else if (inCount != outCount) {
+ SkASSERT(inCount == 1);
+ if (outType.kind() == Type::kMatrix_Kind) {
+ this->write(ByteCodeInstruction::kScalarToMatrix, SlotCount(outType) - 1);
+ this->write8(outType.columns());
+ this->write8(outType.rows());
+ } else {
+ SkASSERT(outType.kind() == Type::kVector_Kind);
+ for (; inCount != outCount; ++inCount) {
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ }
+ }
+ }
+ }
+}
+
+void ByteCodeGenerator::writeExternalFunctionCall(const ExternalFunctionCall& f) {
+ int argumentCount = 0;
+ for (const auto& arg : f.fArguments) {
+ this->writeExpression(*arg);
+ argumentCount += SlotCount(arg->fType);
+ }
+ this->write(ByteCodeInstruction::kCallExternal, SlotCount(f.fType) - argumentCount);
+ SkASSERT(argumentCount <= 255);
+ this->write8(argumentCount);
+ this->write8(SlotCount(f.fType));
+ int index = fOutput->fExternalValues.size();
+ fOutput->fExternalValues.push_back(f.fFunction);
+ SkASSERT(index <= 255);
+ this->write8(index);
+}
+
+void ByteCodeGenerator::writeExternalValue(const ExternalValueReference& e) {
+ int count = SlotCount(e.fValue->type());
+ this->write(vector_instruction(ByteCodeInstruction::kReadExternal, count));
+ this->write8(count);
+ int index = fOutput->fExternalValues.size();
+ fOutput->fExternalValues.push_back(e.fValue);
+ SkASSERT(index <= 255);
+ this->write8(index);
+}
+
+void ByteCodeGenerator::writeVariableExpression(const Expression& expr) {
+ Location location = this->getLocation(expr);
+ int count = SlotCount(expr.fType);
+ if (location.isOnStack() || count > 4) {
+ if (!location.isOnStack()) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(location.fSlot);
+ }
+ this->write(location.selectLoad(ByteCodeInstruction::kLoadExtended,
+ ByteCodeInstruction::kLoadExtendedGlobal,
+ ByteCodeInstruction::kLoadExtendedUniform),
+ count);
+ this->write8(count);
+ } else {
+ this->write(vector_instruction(location.selectLoad(ByteCodeInstruction::kLoad,
+ ByteCodeInstruction::kLoadGlobal,
+ ByteCodeInstruction::kLoadUniform),
+ count));
+ this->write8(count);
+ this->write8(location.fSlot);
+ }
+}
+
+static inline uint32_t float_to_bits(float x) {
+ uint32_t u;
+ memcpy(&u, &x, sizeof(uint32_t));
+ return u;
+}
+
+void ByteCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(float_to_bits(f.fValue));
+}
+
+void ByteCodeGenerator::writeIntrinsicCall(const FunctionCall& c) {
+ auto found = fIntrinsics.find(c.fFunction.fName);
+ if (found == fIntrinsics.end()) {
+ fErrors.error(c.fOffset, "unsupported intrinsic function");
+ return;
+ }
+ int count = SlotCount(c.fArguments[0]->fType);
+ if (found->second.fIsSpecial) {
+ SpecialIntrinsic special = found->second.fValue.fSpecial;
+ switch (special) {
+ case SpecialIntrinsic::kDot: {
+ SkASSERT(c.fArguments.size() == 2);
+ SkASSERT(count == SlotCount(c.fArguments[1]->fType));
+ this->write(vector_instruction(ByteCodeInstruction::kMultiplyF, count));
+ this->write8(count);
+ for (int i = count; i > 1; --i) {
+ this->write(ByteCodeInstruction::kAddF);
+ this->write8(1);
+ }
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ } else {
+ switch (found->second.fValue.fInstruction) {
+ case ByteCodeInstruction::kCos:
+ case ByteCodeInstruction::kSin:
+ case ByteCodeInstruction::kTan:
+ SkASSERT(c.fArguments.size() > 0);
+ this->write(vector_instruction(found->second.fValue.fInstruction, count));
+ this->write8(count);
+ break;
+ case ByteCodeInstruction::kSqrt:
+ SkASSERT(c.fArguments.size() > 0);
+ this->write(vector_instruction(found->second.fValue.fInstruction, count));
+ break;
+ case ByteCodeInstruction::kInverse2x2: {
+ SkASSERT(c.fArguments.size() > 0);
+ auto op = ByteCodeInstruction::kInverse2x2;
+ switch (count) {
+ case 4: break; // float2x2
+ case 9: op = ByteCodeInstruction::kInverse3x3; break;
+ case 16: op = ByteCodeInstruction::kInverse4x4; break;
+ default: SkASSERT(false);
+ }
+ this->write(op);
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ }
+}
+
+void ByteCodeGenerator::writeFunctionCall(const FunctionCall& f) {
+ // Builtins have simple signatures...
+ if (f.fFunction.fBuiltin) {
+ for (const auto& arg : f.fArguments) {
+ this->writeExpression(*arg);
+ }
+ this->writeIntrinsicCall(f);
+ return;
+ }
+
+ // Find the index of the function we're calling. We explicitly do not allow calls to functions
+ // before they're defined. This is an easy-to-understand rule that prevents recursion.
+ size_t idx;
+ for (idx = 0; idx < fFunctions.size(); ++idx) {
+ if (f.fFunction.matches(fFunctions[idx]->fDeclaration)) {
+ break;
+ }
+ }
+ if (idx > 255) {
+ fErrors.error(f.fOffset, "Function count limit exceeded");
+ return;
+ } else if (idx >= fFunctions.size()) {
+ fErrors.error(f.fOffset, "Call to undefined function");
+ return;
+ }
+
+ // We may need to deal with out parameters, so the sequence is tricky
+ if (int returnCount = SlotCount(f.fType)) {
+ this->write(ByteCodeInstruction::kReserve, returnCount);
+ this->write8(returnCount);
+ }
+
+ int argCount = f.fArguments.size();
+ std::vector<std::unique_ptr<LValue>> lvalues;
+ for (int i = 0; i < argCount; ++i) {
+ const auto& param = f.fFunction.fParameters[i];
+ const auto& arg = f.fArguments[i];
+ if (param->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ lvalues.emplace_back(this->getLValue(*arg));
+ lvalues.back()->load();
+ } else {
+ this->writeExpression(*arg);
+ }
+ }
+
+ // The space used by the call is based on the callee, but it also unwinds all of that before
+ // we continue execution. We adjust our max stack depths below.
+ this->write(ByteCodeInstruction::kCall);
+ this->write8(idx);
+
+ const ByteCodeFunction* callee = fOutput->fFunctions[idx].get();
+ fMaxLoopCount = std::max(fMaxLoopCount, fLoopCount + callee->fLoopCount);
+ fMaxConditionCount = std::max(fMaxConditionCount, fConditionCount + callee->fConditionCount);
+ fMaxStackCount = std::max(fMaxStackCount, fStackCount + callee->fLocalCount
+ + callee->fStackCount);
+
+ // After the called function returns, the stack will still contain our arguments. We have to
+ // pop them (storing any out parameters back to their lvalues as we go). We glob together slot
+ // counts for all parameters that aren't out-params, so we can pop them in one big chunk.
+ int popCount = 0;
+ auto pop = [&]() {
+ if (popCount > 4) {
+ this->write(ByteCodeInstruction::kPopN, popCount);
+ this->write8(popCount);
+ } else if (popCount > 0) {
+ this->write(vector_instruction(ByteCodeInstruction::kPop, popCount));
+ }
+ popCount = 0;
+ };
+
+ for (int i = argCount - 1; i >= 0; --i) {
+ const auto& param = f.fFunction.fParameters[i];
+ const auto& arg = f.fArguments[i];
+ if (param->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ pop();
+ lvalues.back()->store(true);
+ lvalues.pop_back();
+ } else {
+ popCount += SlotCount(arg->fType);
+ }
+ }
+ pop();
+}
+
+void ByteCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(i.fValue);
+}
+
+void ByteCodeGenerator::writeNullLiteral(const NullLiteral& n) {
+ // not yet implemented
+ abort();
+}
+
+bool ByteCodeGenerator::writePrefixExpression(const PrefixExpression& p, bool discard) {
+ switch (p.fOperator) {
+ case Token::Kind::PLUSPLUS: // fall through
+ case Token::Kind::MINUSMINUS: {
+ SkASSERT(SlotCount(p.fOperand->fType) == 1);
+ std::unique_ptr<LValue> lvalue = this->getLValue(*p.fOperand);
+ lvalue->load();
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(type_category(p.fType) == TypeCategory::kFloat ? float_to_bits(1.0f) : 1);
+ if (p.fOperator == Token::Kind::PLUSPLUS) {
+ this->writeTypedInstruction(p.fType,
+ ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddF,
+ 1);
+ } else {
+ this->writeTypedInstruction(p.fType,
+ ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractF,
+ 1);
+ }
+ lvalue->store(discard);
+ discard = false;
+ break;
+ }
+ case Token::Kind::MINUS: {
+ this->writeExpression(*p.fOperand);
+ this->writeTypedInstruction(p.fType,
+ ByteCodeInstruction::kNegateI,
+ ByteCodeInstruction::kNegateI,
+ ByteCodeInstruction::kNegateF,
+ SlotCount(p.fOperand->fType),
+ false);
+ break;
+ }
+ case Token::Kind::LOGICALNOT:
+ case Token::Kind::BITWISENOT: {
+ SkASSERT(SlotCount(p.fOperand->fType) == 1);
+ SkDEBUGCODE(TypeCategory tc = type_category(p.fOperand->fType));
+ SkASSERT((p.fOperator == Token::Kind::LOGICALNOT && tc == TypeCategory::kBool) ||
+ (p.fOperator == Token::Kind::BITWISENOT && (tc == TypeCategory::kSigned ||
+ tc == TypeCategory::kUnsigned)));
+ this->writeExpression(*p.fOperand);
+ this->write(ByteCodeInstruction::kNotB);
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ return discard;
+}
+
+bool ByteCodeGenerator::writePostfixExpression(const PostfixExpression& p, bool discard) {
+ switch (p.fOperator) {
+ case Token::Kind::PLUSPLUS: // fall through
+ case Token::Kind::MINUSMINUS: {
+ SkASSERT(SlotCount(p.fOperand->fType) == 1);
+ std::unique_ptr<LValue> lvalue = this->getLValue(*p.fOperand);
+ lvalue->load();
+ // If we're not supposed to discard the result, then make a copy *before* the +/-
+ if (!discard) {
+ this->write(ByteCodeInstruction::kDup);
+ this->write8(1);
+ }
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(type_category(p.fType) == TypeCategory::kFloat ? float_to_bits(1.0f) : 1);
+ if (p.fOperator == Token::Kind::PLUSPLUS) {
+ this->writeTypedInstruction(p.fType,
+ ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddI,
+ ByteCodeInstruction::kAddF,
+ 1);
+ } else {
+ this->writeTypedInstruction(p.fType,
+ ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractI,
+ ByteCodeInstruction::kSubtractF,
+ 1);
+ }
+ // Always consume the result as part of the store
+ lvalue->store(true);
+ discard = false;
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ return discard;
+}
+
+void ByteCodeGenerator::writeSwizzle(const Swizzle& s) {
+ if (swizzle_is_simple(s)) {
+ this->writeVariableExpression(s);
+ return;
+ }
+
+ switch (s.fBase->fKind) {
+ case Expression::kVariableReference_Kind: {
+ Location location = this->getLocation(*s.fBase);
+ this->write(location.selectLoad(ByteCodeInstruction::kLoadSwizzle,
+ ByteCodeInstruction::kLoadSwizzleGlobal,
+ ByteCodeInstruction::kLoadSwizzleUniform),
+ s.fComponents.size());
+ this->write8(location.fSlot);
+ this->write8(s.fComponents.size());
+ for (int c : s.fComponents) {
+ this->write8(c);
+ }
+ break;
+ }
+ default:
+ this->writeExpression(*s.fBase);
+ this->write(ByteCodeInstruction::kSwizzle,
+ s.fComponents.size() - s.fBase->fType.columns());
+ this->write8(s.fBase->fType.columns());
+ this->write8(s.fComponents.size());
+ for (int c : s.fComponents) {
+ this->write8(c);
+ }
+ }
+}
+
+void ByteCodeGenerator::writeTernaryExpression(const TernaryExpression& t) {
+ int count = SlotCount(t.fType);
+ SkASSERT(count == SlotCount(t.fIfTrue->fType));
+ SkASSERT(count == SlotCount(t.fIfFalse->fType));
+
+ this->writeExpression(*t.fTest);
+ this->write(ByteCodeInstruction::kMaskPush);
+ this->writeExpression(*t.fIfTrue);
+ this->write(ByteCodeInstruction::kMaskNegate);
+ this->writeExpression(*t.fIfFalse);
+ this->write(ByteCodeInstruction::kMaskBlend, count);
+ this->write8(count);
+}
+
+void ByteCodeGenerator::writeExpression(const Expression& e, bool discard) {
+ switch (e.fKind) {
+ case Expression::kBinary_Kind:
+ discard = this->writeBinaryExpression((BinaryExpression&) e, discard);
+ break;
+ case Expression::kBoolLiteral_Kind:
+ this->writeBoolLiteral((BoolLiteral&) e);
+ break;
+ case Expression::kConstructor_Kind:
+ this->writeConstructor((Constructor&) e);
+ break;
+ case Expression::kExternalFunctionCall_Kind:
+ this->writeExternalFunctionCall((ExternalFunctionCall&) e);
+ break;
+ case Expression::kExternalValue_Kind:
+ this->writeExternalValue((ExternalValueReference&) e);
+ break;
+ case Expression::kFieldAccess_Kind:
+ case Expression::kIndex_Kind:
+ case Expression::kVariableReference_Kind:
+ this->writeVariableExpression(e);
+ break;
+ case Expression::kFloatLiteral_Kind:
+ this->writeFloatLiteral((FloatLiteral&) e);
+ break;
+ case Expression::kFunctionCall_Kind:
+ this->writeFunctionCall((FunctionCall&) e);
+ break;
+ case Expression::kIntLiteral_Kind:
+ this->writeIntLiteral((IntLiteral&) e);
+ break;
+ case Expression::kNullLiteral_Kind:
+ this->writeNullLiteral((NullLiteral&) e);
+ break;
+ case Expression::kPrefix_Kind:
+ discard = this->writePrefixExpression((PrefixExpression&) e, discard);
+ break;
+ case Expression::kPostfix_Kind:
+ discard = this->writePostfixExpression((PostfixExpression&) e, discard);
+ break;
+ case Expression::kSwizzle_Kind:
+ this->writeSwizzle((Swizzle&) e);
+ break;
+ case Expression::kTernary_Kind:
+ this->writeTernaryExpression((TernaryExpression&) e);
+ break;
+ default:
+ printf("unsupported expression %s\n", e.description().c_str());
+ SkASSERT(false);
+ }
+ if (discard) {
+ int count = SlotCount(e.fType);
+ if (count > 4) {
+ this->write(ByteCodeInstruction::kPopN, count);
+ this->write8(count);
+ } else if (count != 0) {
+ this->write(vector_instruction(ByteCodeInstruction::kPop, count));
+ }
+ discard = false;
+ }
+}
+
+class ByteCodeExternalValueLValue : public ByteCodeGenerator::LValue {
+public:
+ ByteCodeExternalValueLValue(ByteCodeGenerator* generator, ExternalValue& value, int index)
+ : INHERITED(*generator)
+ , fCount(ByteCodeGenerator::SlotCount(value.type()))
+ , fIndex(index) {}
+
+ void load() override {
+ fGenerator.write(vector_instruction(ByteCodeInstruction::kReadExternal, fCount));
+ fGenerator.write8(fCount);
+ fGenerator.write8(fIndex);
+ }
+
+ void store(bool discard) override {
+ if (!discard) {
+ fGenerator.write(vector_instruction(ByteCodeInstruction::kDup, fCount));
+ fGenerator.write8(fCount);
+ }
+ fGenerator.write(vector_instruction(ByteCodeInstruction::kWriteExternal, fCount));
+ fGenerator.write8(fCount);
+ fGenerator.write8(fIndex);
+ }
+
+private:
+ typedef LValue INHERITED;
+
+ int fCount;
+
+ int fIndex;
+};
+
+class ByteCodeSwizzleLValue : public ByteCodeGenerator::LValue {
+public:
+ ByteCodeSwizzleLValue(ByteCodeGenerator* generator, const Swizzle& swizzle)
+ : INHERITED(*generator)
+ , fSwizzle(swizzle) {}
+
+ void load() override {
+ fGenerator.writeSwizzle(fSwizzle);
+ }
+
+ void store(bool discard) override {
+ int count = fSwizzle.fComponents.size();
+ if (!discard) {
+ fGenerator.write(vector_instruction(ByteCodeInstruction::kDup, count));
+ fGenerator.write8(count);
+ }
+ ByteCodeGenerator::Location location = fGenerator.getLocation(*fSwizzle.fBase);
+ if (location.isOnStack()) {
+ fGenerator.write(location.selectStore(ByteCodeInstruction::kStoreSwizzleIndirect,
+ ByteCodeInstruction::kStoreSwizzleIndirectGlobal),
+ count);
+ } else {
+ fGenerator.write(location.selectStore(ByteCodeInstruction::kStoreSwizzle,
+ ByteCodeInstruction::kStoreSwizzleGlobal),
+ count);
+ fGenerator.write8(location.fSlot);
+ }
+ fGenerator.write8(count);
+ for (int c : fSwizzle.fComponents) {
+ fGenerator.write8(c);
+ }
+ }
+
+private:
+ const Swizzle& fSwizzle;
+
+ typedef LValue INHERITED;
+};
+
+class ByteCodeExpressionLValue : public ByteCodeGenerator::LValue {
+public:
+ ByteCodeExpressionLValue(ByteCodeGenerator* generator, const Expression& expr)
+ : INHERITED(*generator)
+ , fExpression(expr) {}
+
+ void load() override {
+ fGenerator.writeVariableExpression(fExpression);
+ }
+
+ void store(bool discard) override {
+ int count = ByteCodeGenerator::SlotCount(fExpression.fType);
+ if (!discard) {
+ if (count > 4) {
+ fGenerator.write(ByteCodeInstruction::kDupN, count);
+ fGenerator.write8(count);
+ } else {
+ fGenerator.write(vector_instruction(ByteCodeInstruction::kDup, count));
+ fGenerator.write8(count);
+ }
+ }
+ ByteCodeGenerator::Location location = fGenerator.getLocation(fExpression);
+ if (location.isOnStack() || count > 4) {
+ if (!location.isOnStack()) {
+ fGenerator.write(ByteCodeInstruction::kPushImmediate);
+ fGenerator.write32(location.fSlot);
+ }
+ fGenerator.write(location.selectStore(ByteCodeInstruction::kStoreExtended,
+ ByteCodeInstruction::kStoreExtendedGlobal),
+ count);
+ fGenerator.write8(count);
+ } else {
+ fGenerator.write(
+ vector_instruction(location.selectStore(ByteCodeInstruction::kStore,
+ ByteCodeInstruction::kStoreGlobal),
+ count));
+ fGenerator.write8(location.fSlot);
+ }
+ }
+
+private:
+ typedef LValue INHERITED;
+
+ const Expression& fExpression;
+};
+
+std::unique_ptr<ByteCodeGenerator::LValue> ByteCodeGenerator::getLValue(const Expression& e) {
+ switch (e.fKind) {
+ case Expression::kExternalValue_Kind: {
+ ExternalValue* value = ((ExternalValueReference&) e).fValue;
+ int index = fOutput->fExternalValues.size();
+ fOutput->fExternalValues.push_back(value);
+ SkASSERT(index <= 255);
+ return std::unique_ptr<LValue>(new ByteCodeExternalValueLValue(this, *value, index));
+ }
+ case Expression::kFieldAccess_Kind:
+ case Expression::kIndex_Kind:
+ case Expression::kVariableReference_Kind:
+ return std::unique_ptr<LValue>(new ByteCodeExpressionLValue(this, e));
+ case Expression::kSwizzle_Kind: {
+ const Swizzle& s = (const Swizzle&) e;
+ return swizzle_is_simple(s)
+ ? std::unique_ptr<LValue>(new ByteCodeExpressionLValue(this, e))
+ : std::unique_ptr<LValue>(new ByteCodeSwizzleLValue(this, s));
+ }
+ case Expression::kTernary_Kind:
+ default:
+ printf("unsupported lvalue %s\n", e.description().c_str());
+ return nullptr;
+ }
+}
+
+void ByteCodeGenerator::writeBlock(const Block& b) {
+ for (const auto& s : b.fStatements) {
+ this->writeStatement(*s);
+ }
+}
+
+void ByteCodeGenerator::setBreakTargets() {
+ std::vector<DeferredLocation>& breaks = fBreakTargets.top();
+ for (DeferredLocation& b : breaks) {
+ b.set();
+ }
+ fBreakTargets.pop();
+}
+
+void ByteCodeGenerator::setContinueTargets() {
+ std::vector<DeferredLocation>& continues = fContinueTargets.top();
+ for (DeferredLocation& c : continues) {
+ c.set();
+ }
+ fContinueTargets.pop();
+}
+
+void ByteCodeGenerator::writeBreakStatement(const BreakStatement& b) {
+ // TODO: Include BranchIfAllFalse to top-most LoopNext
+ this->write(ByteCodeInstruction::kLoopBreak);
+}
+
+void ByteCodeGenerator::writeContinueStatement(const ContinueStatement& c) {
+ // TODO: Include BranchIfAllFalse to top-most LoopNext
+ this->write(ByteCodeInstruction::kLoopContinue);
+}
+
+void ByteCodeGenerator::writeDoStatement(const DoStatement& d) {
+ this->write(ByteCodeInstruction::kLoopBegin);
+ size_t start = fCode->size();
+ this->writeStatement(*d.fStatement);
+ this->write(ByteCodeInstruction::kLoopNext);
+ this->writeExpression(*d.fTest);
+ this->write(ByteCodeInstruction::kLoopMask);
+ // TODO: Could shorten this with kBranchIfAnyTrue
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation endLocation(this);
+ this->write(ByteCodeInstruction::kBranch);
+ this->write16(start);
+ endLocation.set();
+ this->write(ByteCodeInstruction::kLoopEnd);
+}
+
+void ByteCodeGenerator::writeForStatement(const ForStatement& f) {
+ fContinueTargets.emplace();
+ fBreakTargets.emplace();
+ if (f.fInitializer) {
+ this->writeStatement(*f.fInitializer);
+ }
+ this->write(ByteCodeInstruction::kLoopBegin);
+ size_t start = fCode->size();
+ if (f.fTest) {
+ this->writeExpression(*f.fTest);
+ this->write(ByteCodeInstruction::kLoopMask);
+ }
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation endLocation(this);
+ this->writeStatement(*f.fStatement);
+ this->write(ByteCodeInstruction::kLoopNext);
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, true);
+ }
+ this->write(ByteCodeInstruction::kBranch);
+ this->write16(start);
+ endLocation.set();
+ this->write(ByteCodeInstruction::kLoopEnd);
+}
+
+void ByteCodeGenerator::writeIfStatement(const IfStatement& i) {
+ this->writeExpression(*i.fTest);
+ this->write(ByteCodeInstruction::kMaskPush);
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation falseLocation(this);
+ this->writeStatement(*i.fIfTrue);
+ falseLocation.set();
+ if (i.fIfFalse) {
+ this->write(ByteCodeInstruction::kMaskNegate);
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation endLocation(this);
+ this->writeStatement(*i.fIfFalse);
+ endLocation.set();
+ }
+ this->write(ByteCodeInstruction::kMaskPop);
+}
+
+void ByteCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ if (fLoopCount || fConditionCount) {
+ fErrors.error(r.fOffset, "return not allowed inside conditional or loop");
+ return;
+ }
+ int count = SlotCount(r.fExpression->fType);
+ this->writeExpression(*r.fExpression);
+
+ // Technically, the kReturn also pops fOutput->fLocalCount values from the stack, too, but we
+ // haven't counted pushing those (they're outside the scope of our stack tracking). Instead,
+ // we account for those in writeFunction().
+
+ // This is all fine because we don't allow conditional returns, so we only return once anyway.
+ this->write(ByteCodeInstruction::kReturn, -count);
+ this->write8(count);
+}
+
+void ByteCodeGenerator::writeSwitchStatement(const SwitchStatement& r) {
+ // not yet implemented
+ abort();
+}
+
+void ByteCodeGenerator::writeVarDeclarations(const VarDeclarations& v) {
+ for (const auto& declStatement : v.fVars) {
+ const VarDeclaration& decl = (VarDeclaration&) *declStatement;
+ // we need to grab the location even if we don't use it, to ensure it has been allocated
+ Location location = this->getLocation(*decl.fVar);
+ if (decl.fValue) {
+ this->writeExpression(*decl.fValue);
+ int count = SlotCount(decl.fValue->fType);
+ if (count > 4) {
+ this->write(ByteCodeInstruction::kPushImmediate);
+ this->write32(location.fSlot);
+ this->write(ByteCodeInstruction::kStoreExtended, count);
+ this->write8(count);
+ } else {
+ this->write(vector_instruction(ByteCodeInstruction::kStore, count));
+ this->write8(location.fSlot);
+ }
+ }
+ }
+}
+
+void ByteCodeGenerator::writeWhileStatement(const WhileStatement& w) {
+ this->write(ByteCodeInstruction::kLoopBegin);
+ size_t cond = fCode->size();
+ this->writeExpression(*w.fTest);
+ this->write(ByteCodeInstruction::kLoopMask);
+ this->write(ByteCodeInstruction::kBranchIfAllFalse);
+ DeferredLocation endLocation(this);
+ this->writeStatement(*w.fStatement);
+ this->write(ByteCodeInstruction::kLoopNext);
+ this->write(ByteCodeInstruction::kBranch);
+ this->write16(cond);
+ endLocation.set();
+ this->write(ByteCodeInstruction::kLoopEnd);
+}
+
+void ByteCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s);
+ break;
+ case Statement::kBreak_Kind:
+ this->writeBreakStatement((BreakStatement&) s);
+ break;
+ case Statement::kContinue_Kind:
+ this->writeContinueStatement((ContinueStatement&) s);
+ break;
+ case Statement::kDiscard_Kind:
+ // not yet implemented
+ abort();
+ case Statement::kDo_Kind:
+ this->writeDoStatement((DoStatement&) s);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, true);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s);
+ break;
+ case Statement::kNop_Kind:
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s);
+ break;
+ case Statement::kSwitch_Kind:
+ this->writeSwitchStatement((SwitchStatement&) s);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration);
+ break;
+ case Statement::kWhile_Kind:
+ this->writeWhileStatement((WhileStatement&) s);
+ break;
+ default:
+ SkASSERT(false);
+ }
+}
+
+ByteCodeFunction::ByteCodeFunction(const FunctionDeclaration* declaration)
+ : fName(declaration->fName) {
+ fParameterCount = 0;
+ for (const auto& p : declaration->fParameters) {
+ int slots = ByteCodeGenerator::SlotCount(p->fType);
+ fParameters.push_back({ slots, (bool)(p->fModifiers.fFlags & Modifiers::kOut_Flag) });
+ fParameterCount += slots;
+ }
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.h
new file mode 100644
index 0000000000..4e3accd49d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLByteCodeGenerator.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BYTECODEGENERATOR
+#define SKSL_BYTECODEGENERATOR
+
+#include <algorithm>
+#include <stack>
+#include <unordered_map>
+
+#include "src/sksl/SkSLByteCode.h"
+#include "src/sksl/SkSLCodeGenerator.h"
+#include "src/sksl/SkSLMemoryLayout.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLBreakStatement.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLContinueStatement.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExternalFunctionCall.h"
+#include "src/sksl/ir/SkSLExternalValueReference.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLNullLiteral.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+#include "src/sksl/spirv.h"
+
+namespace SkSL {
+
+class ByteCodeGenerator : public CodeGenerator {
+public:
+ class LValue {
+ public:
+ LValue(ByteCodeGenerator& generator)
+ : fGenerator(generator) {}
+
+ virtual ~LValue() {}
+
+ /**
+ * Stack before call: ... lvalue
+ * Stack after call: ... lvalue load
+ */
+ virtual void load() = 0;
+
+ /**
+ * Stack before call: ... lvalue value
+ * Stack after call: ...
+ */
+ virtual void store(bool discard) = 0;
+
+ protected:
+ ByteCodeGenerator& fGenerator;
+ };
+
+ ByteCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ ByteCode* output);
+
+ bool generateCode() override;
+
+ void write8(uint8_t b);
+
+ void write16(uint16_t b);
+
+ void write32(uint32_t b);
+
+ void write(ByteCodeInstruction inst, int count = kUnusedStackCount);
+
+ /**
+ * Based on 'type', writes the s (signed), u (unsigned), or f (float) instruction.
+ */
+ void writeTypedInstruction(const Type& type, ByteCodeInstruction s, ByteCodeInstruction u,
+ ByteCodeInstruction f, int count, bool writeCount = true);
+
+ static int SlotCount(const Type& type);
+
+private:
+ static constexpr int kUnusedStackCount = INT32_MAX;
+ static int StackUsage(ByteCodeInstruction, int count);
+
+ // reserves 16 bits in the output code, to be filled in later with an address once we determine
+ // it
+ class DeferredLocation {
+ public:
+ DeferredLocation(ByteCodeGenerator* generator)
+ : fGenerator(*generator)
+ , fOffset(generator->fCode->size()) {
+ generator->write16(0);
+ }
+
+#ifdef SK_DEBUG
+ ~DeferredLocation() {
+ SkASSERT(fSet);
+ }
+#endif
+
+ void set() {
+ int target = fGenerator.fCode->size();
+ SkASSERT(target <= 65535);
+ (*fGenerator.fCode)[fOffset] = target;
+ (*fGenerator.fCode)[fOffset + 1] = target >> 8;
+#ifdef SK_DEBUG
+ fSet = true;
+#endif
+ }
+
+ private:
+ ByteCodeGenerator& fGenerator;
+ size_t fOffset;
+#ifdef SK_DEBUG
+ bool fSet = false;
+#endif
+ };
+
+ // Intrinsics which do not simply map to a single opcode
+ enum class SpecialIntrinsic {
+ kDot,
+ };
+
+ struct Intrinsic {
+ Intrinsic(ByteCodeInstruction instruction)
+ : fIsSpecial(false)
+ , fValue(instruction) {}
+
+ Intrinsic(SpecialIntrinsic special)
+ : fIsSpecial(true)
+ , fValue(special) {}
+
+ bool fIsSpecial;
+
+ union Value {
+ Value(ByteCodeInstruction instruction)
+ : fInstruction(instruction) {}
+
+ Value(SpecialIntrinsic special)
+ : fSpecial(special) {}
+
+ ByteCodeInstruction fInstruction;
+ SpecialIntrinsic fSpecial;
+ } fValue;
+ };
+
+
+ // Similar to Variable::Storage, but locals and parameters are grouped together, and globals
+ // are further subidivided into uniforms and other (writable) globals.
+ enum class Storage {
+ kLocal, // include parameters
+ kGlobal, // non-uniform globals
+ kUniform, // uniform globals
+ };
+
+ struct Location {
+ int fSlot;
+ Storage fStorage;
+
+ // Not really invalid, but a "safe" placeholder to be more explicit at call-sites
+ static Location MakeInvalid() { return { 0, Storage::kLocal }; }
+
+ Location makeOnStack() { return { -1, fStorage }; }
+ bool isOnStack() const { return fSlot < 0; }
+
+ Location operator+(int offset) {
+ SkASSERT(fSlot >= 0);
+ return { fSlot + offset, fStorage };
+ }
+
+ ByteCodeInstruction selectLoad(ByteCodeInstruction local,
+ ByteCodeInstruction global,
+ ByteCodeInstruction uniform) const {
+ switch (fStorage) {
+ case Storage::kLocal: return local;
+ case Storage::kGlobal: return global;
+ case Storage::kUniform: return uniform;
+ }
+ SkUNREACHABLE;
+ }
+
+ ByteCodeInstruction selectStore(ByteCodeInstruction local,
+ ByteCodeInstruction global) const {
+ switch (fStorage) {
+ case Storage::kLocal: return local;
+ case Storage::kGlobal: return global;
+ case Storage::kUniform: ABORT("Trying to store to a uniform"); break;
+ }
+ return local;
+ }
+ };
+
+ /**
+ * Returns the local slot into which var should be stored, allocating a new slot if it has not
+ * already been assigned one. Compound variables (e.g. vectors) will consume more than one local
+ * slot, with the getLocation return value indicating where the first element should be stored.
+ */
+ Location getLocation(const Variable& var);
+
+ /**
+ * As above, but computes the (possibly dynamic) address of an expression involving indexing &
+ * field access. If the address is known, it's returned. If not, -1 is returned, and the
+ * location will be left on the top of the stack.
+ */
+ Location getLocation(const Expression& expr);
+
+ void gatherUniforms(const Type& type, const String& name);
+
+ std::unique_ptr<ByteCodeFunction> writeFunction(const FunctionDefinition& f);
+
+ void writeVarDeclarations(const VarDeclarations& decl);
+
+ void writeVariableExpression(const Expression& expr);
+
+ void writeExpression(const Expression& expr, bool discard = false);
+
+ /**
+ * Pushes whatever values are required by the lvalue onto the stack, and returns an LValue
+ * permitting loads and stores to it.
+ */
+ std::unique_ptr<LValue> getLValue(const Expression& expr);
+
+ void writeIntrinsicCall(const FunctionCall& c);
+
+ void writeFunctionCall(const FunctionCall& c);
+
+ void writeConstructor(const Constructor& c);
+
+ void writeExternalFunctionCall(const ExternalFunctionCall& c);
+
+ void writeExternalValue(const ExternalValueReference& r);
+
+ void writeSwizzle(const Swizzle& swizzle);
+
+ bool writeBinaryExpression(const BinaryExpression& b, bool discard);
+
+ void writeTernaryExpression(const TernaryExpression& t);
+
+ void writeNullLiteral(const NullLiteral& n);
+
+ bool writePrefixExpression(const PrefixExpression& p, bool discard);
+
+ bool writePostfixExpression(const PostfixExpression& p, bool discard);
+
+ void writeBoolLiteral(const BoolLiteral& b);
+
+ void writeIntLiteral(const IntLiteral& i);
+
+ void writeFloatLiteral(const FloatLiteral& f);
+
+ void writeStatement(const Statement& s);
+
+ void writeBlock(const Block& b);
+
+ void writeBreakStatement(const BreakStatement& b);
+
+ void writeContinueStatement(const ContinueStatement& c);
+
+ void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeWhileStatement(const WhileStatement& w);
+
+ void writeDoStatement(const DoStatement& d);
+
+ void writeSwitchStatement(const SwitchStatement& s);
+
+ void writeReturnStatement(const ReturnStatement& r);
+
+ // updates the current set of breaks to branch to the current location
+ void setBreakTargets();
+
+ // updates the current set of continues to branch to the current location
+ void setContinueTargets();
+
+ void enterLoop() {
+ fLoopCount++;
+ fMaxLoopCount = std::max(fMaxLoopCount, fLoopCount);
+ }
+
+ void exitLoop() {
+ SkASSERT(fLoopCount > 0);
+ fLoopCount--;
+ }
+
+ void enterCondition() {
+ fConditionCount++;
+ fMaxConditionCount = std::max(fMaxConditionCount, fConditionCount);
+ }
+
+ void exitCondition() {
+ SkASSERT(fConditionCount > 0);
+ fConditionCount--;
+ }
+
+ const Context& fContext;
+
+ ByteCode* fOutput;
+
+ const FunctionDefinition* fFunction;
+
+ std::vector<uint8_t>* fCode;
+
+ std::vector<const Variable*> fLocals;
+
+ std::stack<std::vector<DeferredLocation>> fContinueTargets;
+
+ std::stack<std::vector<DeferredLocation>> fBreakTargets;
+
+ std::vector<const FunctionDefinition*> fFunctions;
+
+ int fParameterCount;
+ int fStackCount;
+ int fMaxStackCount;
+
+ int fLoopCount;
+ int fMaxLoopCount;
+ int fConditionCount;
+ int fMaxConditionCount;
+
+ const std::unordered_map<String, Intrinsic> fIntrinsics;
+
+ friend class DeferredLocation;
+ friend class ByteCodeExpressionLValue;
+ friend class ByteCodeSwizzleLValue;
+
+ typedef CodeGenerator INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCFGGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLCFGGenerator.cpp
new file mode 100644
index 0000000000..8ef2dd59e8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCFGGenerator.cpp
@@ -0,0 +1,673 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCFGGenerator.h"
+
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExternalFunctionCall.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+BlockId CFG::newBlock() {
+ BlockId result = fBlocks.size();
+ fBlocks.emplace_back();
+ if (fBlocks.size() > 1) {
+ this->addExit(fCurrent, result);
+ }
+ fCurrent = result;
+ return result;
+}
+
+BlockId CFG::newIsolatedBlock() {
+ BlockId result = fBlocks.size();
+ fBlocks.emplace_back();
+ return result;
+}
+
+void CFG::addExit(BlockId from, BlockId to) {
+ if (from == 0 || fBlocks[from].fEntrances.size()) {
+ fBlocks[from].fExits.insert(to);
+ fBlocks[to].fEntrances.insert(from);
+ }
+}
+
+void CFG::dump() {
+ for (size_t i = 0; i < fBlocks.size(); i++) {
+ printf("Block %d\n-------\nBefore: ", (int) i);
+ const char* separator = "";
+ for (auto iter = fBlocks[i].fBefore.begin(); iter != fBlocks[i].fBefore.end(); iter++) {
+ printf("%s%s = %s", separator, iter->first->description().c_str(),
+ iter->second ? (*iter->second)->description().c_str() : "<undefined>");
+ separator = ", ";
+ }
+ printf("\nEntrances: ");
+ separator = "";
+ for (BlockId b : fBlocks[i].fEntrances) {
+ printf("%s%d", separator, (int) b);
+ separator = ", ";
+ }
+ printf("\n");
+ for (size_t j = 0; j < fBlocks[i].fNodes.size(); j++) {
+ BasicBlock::Node& n = fBlocks[i].fNodes[j];
+ printf("Node %d (%p): %s\n", (int) j, &n, n.fKind == BasicBlock::Node::kExpression_Kind
+ ? (*n.expression())->description().c_str()
+ : (*n.statement())->description().c_str());
+ }
+ printf("Exits: ");
+ separator = "";
+ for (BlockId b : fBlocks[i].fExits) {
+ printf("%s%d", separator, (int) b);
+ separator = ", ";
+ }
+ printf("\n\n");
+ }
+}
+
+bool BasicBlock::tryRemoveExpressionBefore(std::vector<BasicBlock::Node>::iterator* iter,
+ Expression* e) {
+ if (e->fKind == Expression::kTernary_Kind) {
+ return false;
+ }
+ bool result;
+ if ((*iter)->fKind == BasicBlock::Node::kExpression_Kind) {
+ SkASSERT((*iter)->expression()->get() != e);
+ Expression* old = (*iter)->expression()->get();
+ do {
+ if ((*iter) == fNodes.begin()) {
+ return false;
+ }
+ --(*iter);
+ } while ((*iter)->fKind != BasicBlock::Node::kExpression_Kind ||
+ (*iter)->expression()->get() != e);
+ result = this->tryRemoveExpression(iter);
+ while ((*iter)->fKind != BasicBlock::Node::kExpression_Kind ||
+ (*iter)->expression()->get() != old) {
+ SkASSERT(*iter != fNodes.end());
+ ++(*iter);
+ }
+ } else {
+ Statement* old = (*iter)->statement()->get();
+ do {
+ if ((*iter) == fNodes.begin()) {
+ return false;
+ }
+ --(*iter);
+ } while ((*iter)->fKind != BasicBlock::Node::kExpression_Kind ||
+ (*iter)->expression()->get() != e);
+ result = this->tryRemoveExpression(iter);
+ while ((*iter)->fKind != BasicBlock::Node::kStatement_Kind ||
+ (*iter)->statement()->get() != old) {
+ SkASSERT(*iter != fNodes.end());
+ ++(*iter);
+ }
+ }
+ return result;
+}
+
+bool BasicBlock::tryRemoveLValueBefore(std::vector<BasicBlock::Node>::iterator* iter,
+ Expression* lvalue) {
+ switch (lvalue->fKind) {
+ case Expression::kExternalValue_Kind: // fall through
+ case Expression::kVariableReference_Kind:
+ return true;
+ case Expression::kSwizzle_Kind:
+ return this->tryRemoveLValueBefore(iter, ((Swizzle*) lvalue)->fBase.get());
+ case Expression::kFieldAccess_Kind:
+ return this->tryRemoveLValueBefore(iter, ((FieldAccess*) lvalue)->fBase.get());
+ case Expression::kIndex_Kind:
+ if (!this->tryRemoveLValueBefore(iter, ((IndexExpression*) lvalue)->fBase.get())) {
+ return false;
+ }
+ return this->tryRemoveExpressionBefore(iter, ((IndexExpression*) lvalue)->fIndex.get());
+ case Expression::kTernary_Kind:
+ if (!this->tryRemoveExpressionBefore(iter,
+ ((TernaryExpression*) lvalue)->fTest.get())) {
+ return false;
+ }
+ if (!this->tryRemoveLValueBefore(iter, ((TernaryExpression*) lvalue)->fIfTrue.get())) {
+ return false;
+ }
+ return this->tryRemoveLValueBefore(iter, ((TernaryExpression*) lvalue)->fIfFalse.get());
+ default:
+ ABORT("invalid lvalue: %s\n", lvalue->description().c_str());
+ }
+}
+
+bool BasicBlock::tryRemoveExpression(std::vector<BasicBlock::Node>::iterator* iter) {
+ Expression* expr = (*iter)->expression()->get();
+ switch (expr->fKind) {
+ case Expression::kBinary_Kind: {
+ BinaryExpression* b = (BinaryExpression*) expr;
+ if (b->fOperator == Token::EQ) {
+ if (!this->tryRemoveLValueBefore(iter, b->fLeft.get())) {
+ return false;
+ }
+ } else if (!this->tryRemoveExpressionBefore(iter, b->fLeft.get())) {
+ return false;
+ }
+ if (!this->tryRemoveExpressionBefore(iter, b->fRight.get())) {
+ return false;
+ }
+ SkASSERT((*iter)->expression()->get() == expr);
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kTernary_Kind: {
+ // ternaries cross basic block boundaries, must regenerate the CFG to remove it
+ return false;
+ }
+ case Expression::kFieldAccess_Kind: {
+ FieldAccess* f = (FieldAccess*) expr;
+ if (!this->tryRemoveExpressionBefore(iter, f->fBase.get())) {
+ return false;
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kSwizzle_Kind: {
+ Swizzle* s = (Swizzle*) expr;
+ if (s->fBase && !this->tryRemoveExpressionBefore(iter, s->fBase.get())) {
+ return false;
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kIndex_Kind: {
+ IndexExpression* idx = (IndexExpression*) expr;
+ if (!this->tryRemoveExpressionBefore(iter, idx->fBase.get())) {
+ return false;
+ }
+ if (!this->tryRemoveExpressionBefore(iter, idx->fIndex.get())) {
+ return false;
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kConstructor_Kind: {
+ Constructor* c = (Constructor*) expr;
+ for (auto& arg : c->fArguments) {
+ if (!this->tryRemoveExpressionBefore(iter, arg.get())) {
+ return false;
+ }
+ SkASSERT((*iter)->expression()->get() == expr);
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kFunctionCall_Kind: {
+ FunctionCall* f = (FunctionCall*) expr;
+ for (auto& arg : f->fArguments) {
+ if (!this->tryRemoveExpressionBefore(iter, arg.get())) {
+ return false;
+ }
+ SkASSERT((*iter)->expression()->get() == expr);
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ }
+ case Expression::kPrefix_Kind:
+ if (!this->tryRemoveExpressionBefore(iter,
+ ((PrefixExpression*) expr)->fOperand.get())) {
+ return false;
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ case Expression::kPostfix_Kind:
+ if (!this->tryRemoveExpressionBefore(iter,
+ ((PrefixExpression*) expr)->fOperand.get())) {
+ return false;
+ }
+ *iter = fNodes.erase(*iter);
+ return true;
+ case Expression::kBoolLiteral_Kind: // fall through
+ case Expression::kFloatLiteral_Kind: // fall through
+ case Expression::kIntLiteral_Kind: // fall through
+ case Expression::kSetting_Kind: // fall through
+ case Expression::kVariableReference_Kind:
+ *iter = fNodes.erase(*iter);
+ return true;
+ default:
+ ABORT("unhandled expression: %s\n", expr->description().c_str());
+ }
+}
+
+bool BasicBlock::tryInsertExpression(std::vector<BasicBlock::Node>::iterator* iter,
+ std::unique_ptr<Expression>* expr) {
+ switch ((*expr)->fKind) {
+ case Expression::kBinary_Kind: {
+ BinaryExpression* b = (BinaryExpression*) expr->get();
+ if (!this->tryInsertExpression(iter, &b->fRight)) {
+ return false;
+ }
+ ++(*iter);
+ if (!this->tryInsertExpression(iter, &b->fLeft)) {
+ return false;
+ }
+ ++(*iter);
+ BasicBlock::Node node = { BasicBlock::Node::kExpression_Kind, true, expr, nullptr };
+ *iter = fNodes.insert(*iter, node);
+ return true;
+ }
+ case Expression::kBoolLiteral_Kind: // fall through
+ case Expression::kFloatLiteral_Kind: // fall through
+ case Expression::kIntLiteral_Kind: // fall through
+ case Expression::kVariableReference_Kind: {
+ BasicBlock::Node node = { BasicBlock::Node::kExpression_Kind, true, expr, nullptr };
+ *iter = fNodes.insert(*iter, node);
+ return true;
+ }
+ case Expression::kConstructor_Kind: {
+ Constructor* c = (Constructor*) expr->get();
+ for (auto& arg : c->fArguments) {
+ if (!this->tryInsertExpression(iter, &arg)) {
+ return false;
+ }
+ ++(*iter);
+ }
+ BasicBlock::Node node = { BasicBlock::Node::kExpression_Kind, true, expr, nullptr };
+ *iter = fNodes.insert(*iter, node);
+ return true;
+ }
+ case Expression::kSwizzle_Kind: {
+ Swizzle* s = (Swizzle*) expr->get();
+ if (!this->tryInsertExpression(iter, &s->fBase)) {
+ return false;
+ }
+ ++(*iter);
+ BasicBlock::Node node = { BasicBlock::Node::kExpression_Kind, true, expr, nullptr };
+ *iter = fNodes.insert(*iter, node);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+void CFGGenerator::addExpression(CFG& cfg, std::unique_ptr<Expression>* e, bool constantPropagate) {
+ SkASSERT(e);
+ switch ((*e)->fKind) {
+ case Expression::kBinary_Kind: {
+ BinaryExpression* b = (BinaryExpression*) e->get();
+ switch (b->fOperator) {
+ case Token::LOGICALAND: // fall through
+ case Token::LOGICALOR: {
+ // this isn't as precise as it could be -- we don't bother to track that if we
+ // early exit from a logical and/or, we know which branch of an 'if' we're going
+ // to hit -- but it won't make much difference in practice.
+ this->addExpression(cfg, &b->fLeft, constantPropagate);
+ BlockId start = cfg.fCurrent;
+ cfg.newBlock();
+ this->addExpression(cfg, &b->fRight, constantPropagate);
+ cfg.newBlock();
+ cfg.addExit(start, cfg.fCurrent);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({
+ BasicBlock::Node::kExpression_Kind,
+ constantPropagate,
+ e,
+ nullptr
+ });
+ break;
+ }
+ case Token::EQ: {
+ this->addExpression(cfg, &b->fRight, constantPropagate);
+ this->addLValue(cfg, &b->fLeft);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({
+ BasicBlock::Node::kExpression_Kind,
+ constantPropagate,
+ e,
+ nullptr
+ });
+ break;
+ }
+ default:
+ this->addExpression(cfg, &b->fLeft, !Compiler::IsAssignment(b->fOperator));
+ this->addExpression(cfg, &b->fRight, constantPropagate);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({
+ BasicBlock::Node::kExpression_Kind,
+ constantPropagate,
+ e,
+ nullptr
+ });
+ }
+ break;
+ }
+ case Expression::kConstructor_Kind: {
+ Constructor* c = (Constructor*) e->get();
+ for (auto& arg : c->fArguments) {
+ this->addExpression(cfg, &arg, constantPropagate);
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ }
+ case Expression::kExternalFunctionCall_Kind: {
+ ExternalFunctionCall* c = (ExternalFunctionCall*) e->get();
+ for (auto& arg : c->fArguments) {
+ this->addExpression(cfg, &arg, constantPropagate);
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ }
+ case Expression::kFunctionCall_Kind: {
+ FunctionCall* c = (FunctionCall*) e->get();
+ for (auto& arg : c->fArguments) {
+ this->addExpression(cfg, &arg, constantPropagate);
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ }
+ case Expression::kFieldAccess_Kind:
+ this->addExpression(cfg, &((FieldAccess*) e->get())->fBase, constantPropagate);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ case Expression::kIndex_Kind:
+ this->addExpression(cfg, &((IndexExpression*) e->get())->fBase, constantPropagate);
+ this->addExpression(cfg, &((IndexExpression*) e->get())->fIndex, constantPropagate);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ case Expression::kPrefix_Kind: {
+ PrefixExpression* p = (PrefixExpression*) e->get();
+ this->addExpression(cfg, &p->fOperand, constantPropagate &&
+ p->fOperator != Token::PLUSPLUS &&
+ p->fOperator != Token::MINUSMINUS);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ }
+ case Expression::kPostfix_Kind:
+ this->addExpression(cfg, &((PostfixExpression*) e->get())->fOperand, false);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ case Expression::kSwizzle_Kind:
+ this->addExpression(cfg, &((Swizzle*) e->get())->fBase, constantPropagate);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ case Expression::kBoolLiteral_Kind: // fall through
+ case Expression::kExternalValue_Kind: // fall through
+ case Expression::kFloatLiteral_Kind: // fall through
+ case Expression::kIntLiteral_Kind: // fall through
+ case Expression::kNullLiteral_Kind: // fall through
+ case Expression::kSetting_Kind: // fall through
+ case Expression::kVariableReference_Kind:
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ break;
+ case Expression::kTernary_Kind: {
+ TernaryExpression* t = (TernaryExpression*) e->get();
+ this->addExpression(cfg, &t->fTest, constantPropagate);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kExpression_Kind,
+ constantPropagate, e, nullptr });
+ BlockId start = cfg.fCurrent;
+ cfg.newBlock();
+ this->addExpression(cfg, &t->fIfTrue, constantPropagate);
+ BlockId next = cfg.newBlock();
+ cfg.fCurrent = start;
+ cfg.newBlock();
+ this->addExpression(cfg, &t->fIfFalse, constantPropagate);
+ cfg.addExit(cfg.fCurrent, next);
+ cfg.fCurrent = next;
+ break;
+ }
+ case Expression::kFunctionReference_Kind: // fall through
+ case Expression::kTypeReference_Kind: // fall through
+ case Expression::kDefined_Kind:
+ SkASSERT(false);
+ break;
+ }
+}
+
+// adds expressions that are evaluated as part of resolving an lvalue
+void CFGGenerator::addLValue(CFG& cfg, std::unique_ptr<Expression>* e) {
+ switch ((*e)->fKind) {
+ case Expression::kFieldAccess_Kind:
+ this->addLValue(cfg, &((FieldAccess&) **e).fBase);
+ break;
+ case Expression::kIndex_Kind:
+ this->addLValue(cfg, &((IndexExpression&) **e).fBase);
+ this->addExpression(cfg, &((IndexExpression&) **e).fIndex, true);
+ break;
+ case Expression::kSwizzle_Kind:
+ this->addLValue(cfg, &((Swizzle&) **e).fBase);
+ break;
+ case Expression::kExternalValue_Kind: // fall through
+ case Expression::kVariableReference_Kind:
+ break;
+ case Expression::kTernary_Kind:
+ this->addExpression(cfg, &((TernaryExpression&) **e).fTest, true);
+ // Technically we will of course only evaluate one or the other, but if the test turns
+ // out to be constant, the ternary will get collapsed down to just one branch anyway. So
+ // it should be ok to pretend that we always evaluate both branches here.
+ this->addLValue(cfg, &((TernaryExpression&) **e).fIfTrue);
+ this->addLValue(cfg, &((TernaryExpression&) **e).fIfFalse);
+ break;
+ default:
+ // not an lvalue, can't happen
+ SkASSERT(false);
+ break;
+ }
+}
+
+static bool is_true(Expression& expr) {
+ return expr.fKind == Expression::kBoolLiteral_Kind && ((BoolLiteral&) expr).fValue;
+}
+
+void CFGGenerator::addStatement(CFG& cfg, std::unique_ptr<Statement>* s) {
+ switch ((*s)->fKind) {
+ case Statement::kBlock_Kind:
+ for (auto& child : ((Block&) **s).fStatements) {
+ addStatement(cfg, &child);
+ }
+ break;
+ case Statement::kIf_Kind: {
+ IfStatement& ifs = (IfStatement&) **s;
+ this->addExpression(cfg, &ifs.fTest, true);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ BlockId start = cfg.fCurrent;
+ cfg.newBlock();
+ this->addStatement(cfg, &ifs.fIfTrue);
+ BlockId next = cfg.newBlock();
+ if (ifs.fIfFalse) {
+ cfg.fCurrent = start;
+ cfg.newBlock();
+ this->addStatement(cfg, &ifs.fIfFalse);
+ cfg.addExit(cfg.fCurrent, next);
+ cfg.fCurrent = next;
+ } else {
+ cfg.addExit(start, next);
+ }
+ break;
+ }
+ case Statement::kExpression_Kind: {
+ this->addExpression(cfg, &((ExpressionStatement&) **s).fExpression, true);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ break;
+ }
+ case Statement::kVarDeclarations_Kind: {
+ VarDeclarationsStatement& decls = ((VarDeclarationsStatement&) **s);
+ for (auto& stmt : decls.fDeclaration->fVars) {
+ if (stmt->fKind == Statement::kNop_Kind) {
+ continue;
+ }
+ VarDeclaration& vd = (VarDeclaration&) *stmt;
+ if (vd.fValue) {
+ this->addExpression(cfg, &vd.fValue, true);
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind,
+ false, nullptr, &stmt });
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ break;
+ }
+ case Statement::kDiscard_Kind:
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ cfg.fCurrent = cfg.newIsolatedBlock();
+ break;
+ case Statement::kReturn_Kind: {
+ ReturnStatement& r = ((ReturnStatement&) **s);
+ if (r.fExpression) {
+ this->addExpression(cfg, &r.fExpression, true);
+ }
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ cfg.fCurrent = cfg.newIsolatedBlock();
+ break;
+ }
+ case Statement::kBreak_Kind:
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ cfg.addExit(cfg.fCurrent, fLoopExits.top());
+ cfg.fCurrent = cfg.newIsolatedBlock();
+ break;
+ case Statement::kContinue_Kind:
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ cfg.addExit(cfg.fCurrent, fLoopContinues.top());
+ cfg.fCurrent = cfg.newIsolatedBlock();
+ break;
+ case Statement::kWhile_Kind: {
+ WhileStatement& w = (WhileStatement&) **s;
+ BlockId loopStart = cfg.newBlock();
+ fLoopContinues.push(loopStart);
+ BlockId loopExit = cfg.newIsolatedBlock();
+ fLoopExits.push(loopExit);
+ this->addExpression(cfg, &w.fTest, true);
+ BlockId test = cfg.fCurrent;
+ if (!is_true(*w.fTest)) {
+ cfg.addExit(test, loopExit);
+ }
+ cfg.newBlock();
+ this->addStatement(cfg, &w.fStatement);
+ cfg.addExit(cfg.fCurrent, loopStart);
+ fLoopContinues.pop();
+ fLoopExits.pop();
+ cfg.fCurrent = loopExit;
+ break;
+ }
+ case Statement::kDo_Kind: {
+ DoStatement& d = (DoStatement&) **s;
+ BlockId loopStart = cfg.newBlock();
+ fLoopContinues.push(loopStart);
+ BlockId loopExit = cfg.newIsolatedBlock();
+ fLoopExits.push(loopExit);
+ this->addStatement(cfg, &d.fStatement);
+ this->addExpression(cfg, &d.fTest, true);
+ cfg.addExit(cfg.fCurrent, loopExit);
+ cfg.addExit(cfg.fCurrent, loopStart);
+ fLoopContinues.pop();
+ fLoopExits.pop();
+ cfg.fCurrent = loopExit;
+ break;
+ }
+ case Statement::kFor_Kind: {
+ ForStatement& f = (ForStatement&) **s;
+ if (f.fInitializer) {
+ this->addStatement(cfg, &f.fInitializer);
+ }
+ BlockId loopStart = cfg.newBlock();
+ BlockId next = cfg.newIsolatedBlock();
+ fLoopContinues.push(next);
+ BlockId loopExit = cfg.newIsolatedBlock();
+ fLoopExits.push(loopExit);
+ if (f.fTest) {
+ this->addExpression(cfg, &f.fTest, true);
+ // this isn't quite right; we should have an exit from here to the loop exit, and
+ // remove the exit from the loop body to the loop exit. Structuring it like this
+ // forces the optimizer to believe that the loop body is always executed at least
+ // once. While not strictly correct, this avoids incorrect "variable not assigned"
+ // errors on variables which are assigned within the loop. The correct solution to
+ // this is to analyze the loop to see whether or not at least one iteration is
+ // guaranteed to happen, but for the time being we take the easy way out.
+ }
+ cfg.newBlock();
+ this->addStatement(cfg, &f.fStatement);
+ cfg.addExit(cfg.fCurrent, next);
+ cfg.fCurrent = next;
+ if (f.fNext) {
+ this->addExpression(cfg, &f.fNext, true);
+ }
+ cfg.addExit(cfg.fCurrent, loopStart);
+ cfg.addExit(cfg.fCurrent, loopExit);
+ fLoopContinues.pop();
+ fLoopExits.pop();
+ cfg.fCurrent = loopExit;
+ break;
+ }
+ case Statement::kSwitch_Kind: {
+ SwitchStatement& ss = (SwitchStatement&) **s;
+ this->addExpression(cfg, &ss.fValue, true);
+ cfg.fBlocks[cfg.fCurrent].fNodes.push_back({ BasicBlock::Node::kStatement_Kind, false,
+ nullptr, s });
+ BlockId start = cfg.fCurrent;
+ BlockId switchExit = cfg.newIsolatedBlock();
+ fLoopExits.push(switchExit);
+ for (const auto& c : ss.fCases) {
+ cfg.newBlock();
+ cfg.addExit(start, cfg.fCurrent);
+ if (c->fValue) {
+ // technically this should go in the start block, but it doesn't actually matter
+ // because it must be constant. Not worth running two loops for.
+ this->addExpression(cfg, &c->fValue, true);
+ }
+ for (auto& caseStatement : c->fStatements) {
+ this->addStatement(cfg, &caseStatement);
+ }
+ }
+ cfg.addExit(cfg.fCurrent, switchExit);
+ // note that unlike GLSL, our grammar requires the default case to be last
+ if (0 == ss.fCases.size() || ss.fCases[ss.fCases.size() - 1]->fValue) {
+ // switch does not have a default clause, mark that it can skip straight to the end
+ cfg.addExit(start, switchExit);
+ }
+ fLoopExits.pop();
+ cfg.fCurrent = switchExit;
+ break;
+ }
+ case Statement::kNop_Kind:
+ break;
+ default:
+ printf("statement: %s\n", (*s)->description().c_str());
+ ABORT("unsupported statement kind");
+ }
+}
+
+CFG CFGGenerator::getCFG(FunctionDefinition& f) {
+ CFG result;
+ result.fStart = result.newBlock();
+ result.fCurrent = result.fStart;
+ this->addStatement(result, &f.fBody);
+ result.newBlock();
+ result.fExit = result.fCurrent;
+ return result;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLCFGGenerator.h b/gfx/skia/skia/src/sksl/SkSLCFGGenerator.h
new file mode 100644
index 0000000000..7bcb89e360
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCFGGenerator.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CFGGENERATOR
+#define SKSL_CFGGENERATOR
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+
+#include <set>
+#include <stack>
+
+namespace SkSL {
+
+// index of a block within CFG.fBlocks
+typedef size_t BlockId;
+
+struct BasicBlock {
+ struct Node {
+ enum Kind {
+ kStatement_Kind,
+ kExpression_Kind
+ };
+
+ Node(Kind kind, bool constantPropagation, std::unique_ptr<Expression>* expression,
+ std::unique_ptr<Statement>* statement)
+ : fKind(kind)
+ , fConstantPropagation(constantPropagation)
+ , fExpression(expression)
+ , fStatement(statement) {}
+
+ std::unique_ptr<Expression>* expression() const {
+ SkASSERT(fKind == kExpression_Kind);
+ return fExpression;
+ }
+
+ void setExpression(std::unique_ptr<Expression> expr) {
+ SkASSERT(fKind == kExpression_Kind);
+ *fExpression = std::move(expr);
+ }
+
+ std::unique_ptr<Statement>* statement() const {
+ SkASSERT(fKind == kStatement_Kind);
+ return fStatement;
+ }
+
+ void setStatement(std::unique_ptr<Statement> stmt) {
+ SkASSERT(fKind == kStatement_Kind);
+ *fStatement = std::move(stmt);
+ }
+
+ String description() const {
+ if (fKind == kStatement_Kind) {
+ return (*fStatement)->description();
+ } else {
+ SkASSERT(fKind == kExpression_Kind);
+ return (*fExpression)->description();
+ }
+ }
+
+ Kind fKind;
+ // if false, this node should not be subject to constant propagation. This happens with
+ // compound assignment (i.e. x *= 2), in which the value x is used as an rvalue for
+ // multiplication by 2 and then as an lvalue for assignment purposes. Since there is only
+ // one "x" node, replacing it with a constant would break the assignment and we suppress
+ // it. Down the road, we should handle this more elegantly by substituting a regular
+ // assignment if the target is constant (i.e. x = 1; x *= 2; should become x = 1; x = 1 * 2;
+ // and then collapse down to a simple x = 2;).
+ bool fConstantPropagation;
+
+ private:
+ // we store pointers to the unique_ptrs so that we can replace expressions or statements
+ // during optimization without having to regenerate the entire tree
+ std::unique_ptr<Expression>* fExpression;
+ std::unique_ptr<Statement>* fStatement;
+ };
+
+ /**
+ * Attempts to remove the expression (and its subexpressions) pointed to by the iterator. If the
+ * expression can be cleanly removed, returns true and updates the iterator to point to the
+ * expression after the deleted expression. Otherwise returns false (and the CFG will need to be
+ * regenerated).
+ */
+ bool tryRemoveExpression(std::vector<BasicBlock::Node>::iterator* iter);
+
+ /**
+ * Locates and attempts remove an expression occurring before the expression pointed to by iter.
+ * If the expression can be cleanly removed, returns true and resets iter to a valid iterator
+ * pointing to the same expression it did initially. Otherwise returns false (and the CFG will
+ * need to be regenerated).
+ */
+ bool tryRemoveExpressionBefore(std::vector<BasicBlock::Node>::iterator* iter, Expression* e);
+
+ /**
+ * As tryRemoveExpressionBefore, but for lvalues. As lvalues are at most partially evaluated
+ * (for instance, x[i] = 0 evaluates i but not x) this will only look for the parts of the
+ * lvalue that are actually evaluated.
+ */
+ bool tryRemoveLValueBefore(std::vector<BasicBlock::Node>::iterator* iter, Expression* lvalue);
+
+ /**
+ * Attempts to inserts a new expression before the node pointed to by iter. If the
+ * expression can be cleanly inserted, returns true and updates the iterator to point to the
+ * newly inserted expression. Otherwise returns false (and the CFG will need to be regenerated).
+ */
+ bool tryInsertExpression(std::vector<BasicBlock::Node>::iterator* iter,
+ std::unique_ptr<Expression>* expr);
+
+ std::vector<Node> fNodes;
+ std::set<BlockId> fEntrances;
+ std::set<BlockId> fExits;
+ // variable definitions upon entering this basic block (null expression = undefined)
+ DefinitionMap fBefore;
+};
+
+struct CFG {
+ BlockId fStart;
+ BlockId fExit;
+ std::vector<BasicBlock> fBlocks;
+
+ void dump();
+
+private:
+ BlockId fCurrent;
+
+ // Adds a new block, adds an exit* from the current block to the new block, then marks the new
+ // block as the current block
+ // *see note in addExit()
+ BlockId newBlock();
+
+ // Adds a new block, but does not mark it current or add an exit from the current block
+ BlockId newIsolatedBlock();
+
+ // Adds an exit from the 'from' block to the 'to' block
+ // Note that we skip adding the exit if the 'from' block is itself unreachable; this means that
+ // we don't actually have to trace the tree to see if a particular block is unreachable, we can
+ // just check to see if it has any entrances. This does require a bit of care in the order in
+ // which we set the CFG up.
+ void addExit(BlockId from, BlockId to);
+
+ friend class CFGGenerator;
+};
+
+/**
+ * Converts functions into control flow graphs.
+ */
+class CFGGenerator {
+public:
+ CFGGenerator() {}
+
+ CFG getCFG(FunctionDefinition& f);
+
+private:
+ void addStatement(CFG& cfg, std::unique_ptr<Statement>* s);
+
+ void addExpression(CFG& cfg, std::unique_ptr<Expression>* e, bool constantPropagate);
+
+ void addLValue(CFG& cfg, std::unique_ptr<Expression>* e);
+
+ std::stack<BlockId> fLoopContinues;
+ std::stack<BlockId> fLoopExits;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCPP.h b/gfx/skia/skia/src/sksl/SkSLCPP.h
new file mode 100644
index 0000000000..11b5b00407
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCPP.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CPP
+#define SKSL_CPP
+
+// functions used by CPP programs created by skslc
+
+#include <cmath>
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+using std::abs;
+
+struct Float4 {
+ Float4(float x, float y, float z, float w)
+ : fX(x)
+ , fY(y)
+ , fZ(z)
+ , fW(w) {}
+
+ operator SkRect() const {
+ return SkRect::MakeLTRB(fX, fY, fZ, fW);
+ }
+
+private:
+ float fX;
+ float fY;
+ float fZ;
+ float fW;
+};
+
+// macros to make sk_Caps.<cap name> work from C++ code
+#define sk_Caps (*args.fShaderCaps)
+
+#define floatIs32Bits floatIs32Bits()
+
+// functions to make GLSL constructors work from C++ code
+inline SkPoint float2(float xy) { return SkPoint::Make(xy, xy); }
+
+inline SkPoint float2(float x, float y) { return SkPoint::Make(x, y); }
+
+inline Float4 float4(float xyzw) { return Float4(xyzw, xyzw, xyzw, xyzw); }
+
+inline Float4 float4(float x, float y, float z, float w) { return Float4(x, y, z, w); }
+
+#define half2 float2
+
+#define half3 float3
+
+#define half4 float4
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.cpp
new file mode 100644
index 0000000000..a4d2e3b590
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.cpp
@@ -0,0 +1,1343 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCPPCodeGenerator.h"
+
+#include "src/sksl/SkSLCPPUniformCTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLHCodeGenerator.h"
+
+#include <algorithm>
+
+namespace SkSL {
+
+static bool needs_uniform_var(const Variable& var) {
+ return (var.fModifiers.fFlags & Modifiers::kUniform_Flag) &&
+ var.fType.kind() != Type::kSampler_Kind;
+}
+
+CPPCodeGenerator::CPPCodeGenerator(const Context* context, const Program* program,
+ ErrorReporter* errors, String name, OutputStream* out)
+: INHERITED(context, program, errors, out)
+, fName(std::move(name))
+, fFullName(String::printf("Gr%s", fName.c_str()))
+, fSectionAndParameterHelper(program, *errors) {
+ fLineEnding = "\\n";
+ fTextureFunctionOverride = "sample";
+}
+
+void CPPCodeGenerator::writef(const char* s, va_list va) {
+ static constexpr int BUFFER_SIZE = 1024;
+ va_list copy;
+ va_copy(copy, va);
+ char buffer[BUFFER_SIZE];
+ int length = vsnprintf(buffer, BUFFER_SIZE, s, va);
+ if (length < BUFFER_SIZE) {
+ fOut->write(buffer, length);
+ } else {
+ std::unique_ptr<char[]> heap(new char[length + 1]);
+ vsprintf(heap.get(), s, copy);
+ fOut->write(heap.get(), length);
+ }
+ va_end(copy);
+}
+
+void CPPCodeGenerator::writef(const char* s, ...) {
+ va_list va;
+ va_start(va, s);
+ this->writef(s, va);
+ va_end(va);
+}
+
+void CPPCodeGenerator::writeHeader() {
+}
+
+bool CPPCodeGenerator::usesPrecisionModifiers() const {
+ return false;
+}
+
+String CPPCodeGenerator::getTypeName(const Type& type) {
+ return type.name();
+}
+
+void CPPCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ if (b.fOperator == Token::PERCENT) {
+ // need to use "%%" instead of "%" b/c the code will be inside of a printf
+ Precedence precedence = GetBinaryPrecedence(b.fOperator);
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*b.fLeft, precedence);
+ this->write(" %% ");
+ this->writeExpression(*b.fRight, precedence);
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+ } else if (b.fLeft->fKind == Expression::kNullLiteral_Kind ||
+ b.fRight->fKind == Expression::kNullLiteral_Kind) {
+ const Variable* var;
+ if (b.fLeft->fKind != Expression::kNullLiteral_Kind) {
+ SkASSERT(b.fLeft->fKind == Expression::kVariableReference_Kind);
+ var = &((VariableReference&) *b.fLeft).fVariable;
+ } else {
+ SkASSERT(b.fRight->fKind == Expression::kVariableReference_Kind);
+ var = &((VariableReference&) *b.fRight).fVariable;
+ }
+ SkASSERT(var->fType.kind() == Type::kNullable_Kind &&
+ var->fType.componentType() == *fContext.fFragmentProcessor_Type);
+ this->write("%s");
+ const char* op;
+ switch (b.fOperator) {
+ case Token::EQEQ:
+ op = "<";
+ break;
+ case Token::NEQ:
+ op = ">=";
+ break;
+ default:
+ SkASSERT(false);
+ }
+ fFormatArgs.push_back("_outer." + String(var->fName) + "_index " + op + " 0 ? \"true\" "
+ ": \"false\"");
+ } else {
+ INHERITED::writeBinaryExpression(b, parentPrecedence);
+ }
+}
+
+void CPPCodeGenerator::writeIndexExpression(const IndexExpression& i) {
+ const Expression& base = *i.fBase;
+ if (base.fKind == Expression::kVariableReference_Kind) {
+ int builtin = ((VariableReference&) base).fVariable.fModifiers.fLayout.fBuiltin;
+ if (SK_TRANSFORMEDCOORDS2D_BUILTIN == builtin) {
+ this->write("%s");
+ if (i.fIndex->fKind != Expression::kIntLiteral_Kind) {
+ fErrors.error(i.fIndex->fOffset,
+ "index into sk_TransformedCoords2D must be an integer literal");
+ return;
+ }
+ int64_t index = ((IntLiteral&) *i.fIndex).fValue;
+ String name = "sk_TransformedCoords2D_" + to_string(index);
+ fFormatArgs.push_back("_outer.computeLocalCoordsInVertexShader() ? " + name +
+ ".c_str() : \"_coords\"");
+ if (fWrittenTransformedCoords.find(index) == fWrittenTransformedCoords.end()) {
+ addExtraEmitCodeLine("SkString " + name +
+ " = fragBuilder->ensureCoords2D(args.fTransformedCoords[" +
+ to_string(index) + "].fVaryingPoint);");
+ fWrittenTransformedCoords.insert(index);
+ }
+ return;
+ } else if (SK_TEXTURESAMPLERS_BUILTIN == builtin) {
+ this->write("%s");
+ if (i.fIndex->fKind != Expression::kIntLiteral_Kind) {
+ fErrors.error(i.fIndex->fOffset,
+ "index into sk_TextureSamplers must be an integer literal");
+ return;
+ }
+ int64_t index = ((IntLiteral&) *i.fIndex).fValue;
+ fFormatArgs.push_back(" fragBuilder->getProgramBuilder()->samplerVariable("
+ "args.fTexSamplers[" + to_string(index) + "])");
+ return;
+ }
+ }
+ INHERITED::writeIndexExpression(i);
+}
+
+static String default_value(const Type& type) {
+ if (type.fName == "bool") {
+ return "false";
+ }
+ switch (type.kind()) {
+ case Type::kScalar_Kind: return "0";
+ case Type::kVector_Kind: return type.name() + "(0)";
+ case Type::kMatrix_Kind: return type.name() + "(1)";
+ default: ABORT("unsupported default_value type\n");
+ }
+}
+
+static String default_value(const Variable& var) {
+ if (var.fModifiers.fLayout.fCType == SkSL::Layout::CType::kSkPMColor4f) {
+ return "{SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN}";
+ }
+ return default_value(var.fType);
+}
+
+static bool is_private(const Variable& var) {
+ return !(var.fModifiers.fFlags & Modifiers::kUniform_Flag) &&
+ !(var.fModifiers.fFlags & Modifiers::kIn_Flag) &&
+ var.fStorage == Variable::kGlobal_Storage &&
+ var.fModifiers.fLayout.fBuiltin == -1;
+}
+
+static bool is_uniform_in(const Variable& var) {
+ return (var.fModifiers.fFlags & Modifiers::kUniform_Flag) &&
+ (var.fModifiers.fFlags & Modifiers::kIn_Flag) &&
+ var.fType.kind() != Type::kSampler_Kind;
+}
+
+void CPPCodeGenerator::writeRuntimeValue(const Type& type, const Layout& layout,
+ const String& cppCode) {
+ if (type.isFloat()) {
+ this->write("%f");
+ fFormatArgs.push_back(cppCode);
+ } else if (type == *fContext.fInt_Type) {
+ this->write("%d");
+ fFormatArgs.push_back(cppCode);
+ } else if (type == *fContext.fBool_Type) {
+ this->write("%s");
+ fFormatArgs.push_back("(" + cppCode + " ? \"true\" : \"false\")");
+ } else if (type == *fContext.fFloat2_Type || type == *fContext.fHalf2_Type) {
+ this->write(type.name() + "(%f, %f)");
+ fFormatArgs.push_back(cppCode + ".fX");
+ fFormatArgs.push_back(cppCode + ".fY");
+ } else if (type == *fContext.fFloat4_Type || type == *fContext.fHalf4_Type) {
+ this->write(type.name() + "(%f, %f, %f, %f)");
+ switch (layout.fCType) {
+ case Layout::CType::kSkPMColor:
+ fFormatArgs.push_back("SkGetPackedR32(" + cppCode + ") / 255.0");
+ fFormatArgs.push_back("SkGetPackedG32(" + cppCode + ") / 255.0");
+ fFormatArgs.push_back("SkGetPackedB32(" + cppCode + ") / 255.0");
+ fFormatArgs.push_back("SkGetPackedA32(" + cppCode + ") / 255.0");
+ break;
+ case Layout::CType::kSkPMColor4f:
+ fFormatArgs.push_back(cppCode + ".fR");
+ fFormatArgs.push_back(cppCode + ".fG");
+ fFormatArgs.push_back(cppCode + ".fB");
+ fFormatArgs.push_back(cppCode + ".fA");
+ break;
+ case Layout::CType::kSkVector4:
+ fFormatArgs.push_back(cppCode + ".fData[0]");
+ fFormatArgs.push_back(cppCode + ".fData[1]");
+ fFormatArgs.push_back(cppCode + ".fData[2]");
+ fFormatArgs.push_back(cppCode + ".fData[3]");
+ break;
+ case Layout::CType::kSkRect: // fall through
+ case Layout::CType::kDefault:
+ fFormatArgs.push_back(cppCode + ".left()");
+ fFormatArgs.push_back(cppCode + ".top()");
+ fFormatArgs.push_back(cppCode + ".right()");
+ fFormatArgs.push_back(cppCode + ".bottom()");
+ break;
+ default:
+ SkASSERT(false);
+ }
+ } else if (type.kind() == Type::kEnum_Kind) {
+ this->write("%d");
+ fFormatArgs.push_back("(int) " + cppCode);
+ } else if (type == *fContext.fInt4_Type ||
+ type == *fContext.fShort4_Type ||
+ type == *fContext.fByte4_Type) {
+ this->write(type.name() + "(%d, %d, %d, %d)");
+ fFormatArgs.push_back(cppCode + ".left()");
+ fFormatArgs.push_back(cppCode + ".top()");
+ fFormatArgs.push_back(cppCode + ".right()");
+ fFormatArgs.push_back(cppCode + ".bottom()");
+ } else {
+ printf("unsupported runtime value type '%s'\n", String(type.fName).c_str());
+ SkASSERT(false);
+ }
+}
+
+void CPPCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
+ if (is_private(var)) {
+ this->writeRuntimeValue(var.fType, var.fModifiers.fLayout, var.fName);
+ } else {
+ this->writeExpression(value, kTopLevel_Precedence);
+ }
+}
+
+String CPPCodeGenerator::getSamplerHandle(const Variable& var) {
+ int samplerCount = 0;
+ for (const auto param : fSectionAndParameterHelper.getParameters()) {
+ if (&var == param) {
+ return "args.fTexSamplers[" + to_string(samplerCount) + "]";
+ }
+ if (param->fType.kind() == Type::kSampler_Kind) {
+ ++samplerCount;
+ }
+ }
+ ABORT("should have found sampler in parameters\n");
+}
+
+void CPPCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ this->write(to_string((int32_t) i.fValue));
+}
+
+void CPPCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ if (fCPPMode) {
+ SkASSERT(swizzle.fComponents.size() == 1); // no support for multiple swizzle components yet
+ this->writeExpression(*swizzle.fBase, kPostfix_Precedence);
+ switch (swizzle.fComponents[0]) {
+ case 0: this->write(".left()"); break;
+ case 1: this->write(".top()"); break;
+ case 2: this->write(".right()"); break;
+ case 3: this->write(".bottom()"); break;
+ }
+ } else {
+ INHERITED::writeSwizzle(swizzle);
+ }
+}
+
+void CPPCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ if (fCPPMode) {
+ this->write(ref.fVariable.fName);
+ return;
+ }
+ switch (ref.fVariable.fModifiers.fLayout.fBuiltin) {
+ case SK_INCOLOR_BUILTIN:
+ this->write("%s");
+ // EmitArgs.fInputColor is automatically set to half4(1) if
+ // no input was specified
+ fFormatArgs.push_back(String("args.fInputColor"));
+ break;
+ case SK_OUTCOLOR_BUILTIN:
+ this->write("%s");
+ fFormatArgs.push_back(String("args.fOutputColor"));
+ break;
+ case SK_WIDTH_BUILTIN:
+ this->write("sk_Width");
+ break;
+ case SK_HEIGHT_BUILTIN:
+ this->write("sk_Height");
+ break;
+ default:
+ if (ref.fVariable.fType.kind() == Type::kSampler_Kind) {
+ this->write("%s");
+ fFormatArgs.push_back("fragBuilder->getProgramBuilder()->samplerVariable(" +
+ this->getSamplerHandle(ref.fVariable) + ")");
+ return;
+ }
+ if (ref.fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag) {
+ this->write("%s");
+ String name = ref.fVariable.fName;
+ String var = String::printf("args.fUniformHandler->getUniformCStr(%sVar)",
+ HCodeGenerator::FieldName(name.c_str()).c_str());
+ String code;
+ if (ref.fVariable.fModifiers.fLayout.fWhen.fLength) {
+ code = String::printf("%sVar.isValid() ? %s : \"%s\"",
+ HCodeGenerator::FieldName(name.c_str()).c_str(),
+ var.c_str(),
+ default_value(ref.fVariable.fType).c_str());
+ } else {
+ code = var;
+ }
+ fFormatArgs.push_back(code);
+ } else if (SectionAndParameterHelper::IsParameter(ref.fVariable)) {
+ String name(ref.fVariable.fName);
+ this->writeRuntimeValue(ref.fVariable.fType, ref.fVariable.fModifiers.fLayout,
+ String::printf("_outer.%s", name.c_str()).c_str());
+ } else {
+ this->write(ref.fVariable.fName);
+ }
+ }
+}
+
+void CPPCodeGenerator::writeIfStatement(const IfStatement& s) {
+ if (s.fIsStatic) {
+ this->write("@");
+ }
+ INHERITED::writeIfStatement(s);
+}
+
+void CPPCodeGenerator::writeReturnStatement(const ReturnStatement& s) {
+ if (fInMain) {
+ fErrors.error(s.fOffset, "fragmentProcessor main() may not contain return statements");
+ }
+ INHERITED::writeReturnStatement(s);
+}
+
+void CPPCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ if (s.fIsStatic) {
+ this->write("@");
+ }
+ INHERITED::writeSwitchStatement(s);
+}
+
+void CPPCodeGenerator::writeFieldAccess(const FieldAccess& access) {
+ if (access.fBase->fType.name() == "fragmentProcessor") {
+ // Special field access on fragment processors are converted into function calls on
+ // GrFragmentProcessor's getters.
+ if (access.fBase->fKind != Expression::kVariableReference_Kind) {
+ fErrors.error(access.fBase->fOffset, "fragmentProcessor must be a reference\n");
+ return;
+ }
+
+ const Type::Field& field = fContext.fFragmentProcessor_Type->fields()[access.fFieldIndex];
+ const Variable& var = ((const VariableReference&) *access.fBase).fVariable;
+ String cppAccess = String::printf("_outer.childProcessor(_outer.%s_index).%s()",
+ String(var.fName).c_str(),
+ String(field.fName).c_str());
+
+ if (fCPPMode) {
+ this->write(cppAccess.c_str());
+ } else {
+ writeRuntimeValue(*field.fType, Layout(), cppAccess);
+ }
+ return;
+ }
+ INHERITED::writeFieldAccess(access);
+}
+
+int CPPCodeGenerator::getChildFPIndex(const Variable& var) const {
+ int index = 0;
+ bool found = false;
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ const VarDeclaration& decl = (VarDeclaration&) *raw;
+ if (decl.fVar == &var) {
+ found = true;
+ } else if (decl.fVar->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ ++index;
+ }
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ SkASSERT(found);
+ return index;
+}
+
+void CPPCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ if (c.fFunction.fBuiltin && c.fFunction.fName == "sample" &&
+ c.fArguments[0]->fType.kind() != Type::Kind::kSampler_Kind) {
+ // Sanity checks that are detected by function definition in sksl_fp.inc
+ SkASSERT(c.fArguments.size() >= 1 && c.fArguments.size() <= 3);
+ SkASSERT("fragmentProcessor" == c.fArguments[0]->fType.name() ||
+ "fragmentProcessor?" == c.fArguments[0]->fType.name());
+
+ // Actually fail during compilation if arguments with valid types are
+ // provided that are not variable references, since sample() is a
+ // special function that impacts code emission.
+ if (c.fArguments[0]->fKind != Expression::kVariableReference_Kind) {
+ fErrors.error(c.fArguments[0]->fOffset,
+ "sample()'s fragmentProcessor argument must be a variable reference\n");
+ return;
+ }
+ const Variable& child = ((const VariableReference&) *c.fArguments[0]).fVariable;
+
+ // Start a new extra emit code section so that the emitted child processor can depend on
+ // sksl variables defined in earlier sksl code.
+ this->newExtraEmitCodeBlock();
+
+ // Set to the empty string when no input color parameter should be emitted, which means this
+ // must be properly formatted with a prefixed comma when the parameter should be inserted
+ // into the invokeChild() parameter list.
+ String inputArg;
+ if (c.fArguments.size() > 1 && c.fArguments[1]->fType.name() == "half4") {
+ // Use the invokeChild() variant that accepts an input color, so convert the 2nd
+ // argument's expression into C++ code that produces sksl stored in an SkString.
+ String inputName = "_input" + to_string(c.fOffset);
+ addExtraEmitCodeLine(convertSKSLExpressionToCPP(*c.fArguments[1], inputName));
+
+ // invokeChild() needs a char*
+ inputArg = ", " + inputName + ".c_str()";
+ }
+
+ bool hasCoords = c.fArguments.back()->fType.name() == "float2";
+
+ // Write the output handling after the possible input handling
+ String childName = "_sample" + to_string(c.fOffset);
+ addExtraEmitCodeLine("SkString " + childName + "(\"" + childName + "\");");
+ String coordsName;
+ if (hasCoords) {
+ coordsName = "_coords" + to_string(c.fOffset);
+ addExtraEmitCodeLine(convertSKSLExpressionToCPP(*c.fArguments.back(), coordsName));
+ }
+ if (c.fArguments[0]->fType.kind() == Type::kNullable_Kind) {
+ addExtraEmitCodeLine("if (_outer." + String(child.fName) + "_index >= 0) {\n ");
+ }
+ if (hasCoords) {
+ addExtraEmitCodeLine("this->invokeChild(_outer." + String(child.fName) + "_index" +
+ inputArg + ", &" + childName + ", args, " + coordsName +
+ ".c_str());");
+ } else {
+ addExtraEmitCodeLine("this->invokeChild(_outer." + String(child.fName) + "_index" +
+ inputArg + ", &" + childName + ", args);");
+ }
+
+ if (c.fArguments[0]->fType.kind() == Type::kNullable_Kind) {
+ // Null FPs are not emitted, but their output can still be referenced in dependent
+ // expressions - thus we always declare the variable.
+ // Note: this is essentially dead code required to satisfy the compiler, because
+ // 'process' function calls should always be guarded at a higher level, in the .fp
+ // source.
+ addExtraEmitCodeLine(
+ "} else {"
+ " fragBuilder->codeAppendf(\"half4 %s;\", " + childName + ".c_str());"
+ "}");
+ }
+ this->write("%s");
+ fFormatArgs.push_back(childName + ".c_str()");
+ return;
+ }
+ if (c.fFunction.fBuiltin) {
+ INHERITED::writeFunctionCall(c);
+ } else {
+ this->write("%s");
+ fFormatArgs.push_back((String(c.fFunction.fName) + "_name.c_str()").c_str());
+ this->write("(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ this->write(")");
+ }
+ if (c.fFunction.fBuiltin && c.fFunction.fName == "sample") {
+ this->write(".%s");
+ SkASSERT(c.fArguments.size() >= 1);
+ SkASSERT(c.fArguments[0]->fKind == Expression::kVariableReference_Kind);
+ String sampler = this->getSamplerHandle(((VariableReference&) *c.fArguments[0]).fVariable);
+ fFormatArgs.push_back("fragBuilder->getProgramBuilder()->samplerSwizzle(" + sampler +
+ ").c_str()");
+ }
+}
+
+static const char* glsltype_string(const Context& context, const Type& type) {
+ if (type == *context.fFloat_Type) {
+ return "kFloat_GrSLType";
+ } else if (type == *context.fHalf_Type) {
+ return "kHalf_GrSLType";
+ } else if (type == *context.fFloat2_Type) {
+ return "kFloat2_GrSLType";
+ } else if (type == *context.fHalf2_Type) {
+ return "kHalf2_GrSLType";
+ } else if (type == *context.fFloat4_Type) {
+ return "kFloat4_GrSLType";
+ } else if (type == *context.fHalf4_Type) {
+ return "kHalf4_GrSLType";
+ } else if (type == *context.fFloat4x4_Type) {
+ return "kFloat4x4_GrSLType";
+ } else if (type == *context.fHalf4x4_Type) {
+ return "kHalf4x4_GrSLType";
+ } else if (type == *context.fVoid_Type) {
+ return "kVoid_GrSLType";
+ }
+ SkASSERT(false);
+ return nullptr;
+}
+
+void CPPCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ const FunctionDeclaration& decl = f.fDeclaration;
+ fFunctionHeader = "";
+ OutputStream* oldOut = fOut;
+ StringStream buffer;
+ fOut = &buffer;
+ if (decl.fName == "main") {
+ fInMain = true;
+ for (const auto& s : ((Block&) *f.fBody).fStatements) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ fInMain = false;
+
+ fOut = oldOut;
+ this->write(fFunctionHeader);
+ this->write(buffer.str());
+ } else {
+ this->addExtraEmitCodeLine("SkString " + decl.fName + "_name;");
+ String args = "const GrShaderVar " + decl.fName + "_args[] = { ";
+ const char* separator = "";
+ for (const auto& param : decl.fParameters) {
+ args += String(separator) + "GrShaderVar(\"" + param->fName + "\", " +
+ glsltype_string(fContext, param->fType) + ")";
+ separator = ", ";
+ }
+ args += "};";
+ this->addExtraEmitCodeLine(args.c_str());
+ for (const auto& s : ((Block&) *f.fBody).fStatements) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+
+ fOut = oldOut;
+ String emit = "fragBuilder->emitFunction(";
+ emit += glsltype_string(fContext, decl.fReturnType);
+ emit += ", \"" + decl.fName + "\"";
+ emit += ", " + to_string((int64_t) decl.fParameters.size());
+ emit += ", " + decl.fName + "_args";
+ emit += ", \"" + buffer.str() + "\"";
+ emit += ", &" + decl.fName + "_name);";
+ this->addExtraEmitCodeLine(emit.c_str());
+ }
+}
+
+void CPPCodeGenerator::writeSetting(const Setting& s) {
+ static constexpr const char* kPrefix = "sk_Args.";
+ if (!strncmp(s.fName.c_str(), kPrefix, strlen(kPrefix))) {
+ const char* name = s.fName.c_str() + strlen(kPrefix);
+ this->writeRuntimeValue(s.fType, Layout(), HCodeGenerator::FieldName(name).c_str());
+ } else {
+ this->write(s.fName.c_str());
+ }
+}
+
+bool CPPCodeGenerator::writeSection(const char* name, const char* prefix) {
+ const Section* s = fSectionAndParameterHelper.getSection(name);
+ if (s) {
+ this->writef("%s%s", prefix, s->fText.c_str());
+ return true;
+ }
+ return false;
+}
+
+void CPPCodeGenerator::writeProgramElement(const ProgramElement& p) {
+ if (p.fKind == ProgramElement::kSection_Kind) {
+ return;
+ }
+ if (p.fKind == ProgramElement::kVar_Kind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ if (!decls.fVars.size()) {
+ return;
+ }
+ const Variable& var = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if (var.fModifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kUniform_Flag) ||
+ -1 != var.fModifiers.fLayout.fBuiltin) {
+ return;
+ }
+ }
+ INHERITED::writeProgramElement(p);
+}
+
+void CPPCodeGenerator::addUniform(const Variable& var) {
+ if (!needs_uniform_var(var)) {
+ return;
+ }
+ if (var.fModifiers.fLayout.fWhen.fLength) {
+ this->writef(" if (%s) {\n ", String(var.fModifiers.fLayout.fWhen).c_str());
+ }
+ const char* type = glsltype_string(fContext, var.fType);
+ String name(var.fName);
+ this->writef(" %sVar = args.fUniformHandler->addUniform(kFragment_GrShaderFlag, %s, "
+ "\"%s\");\n", HCodeGenerator::FieldName(name.c_str()).c_str(), type,
+ name.c_str());
+ if (var.fModifiers.fLayout.fWhen.fLength) {
+ this->write(" }\n");
+ }
+}
+
+void CPPCodeGenerator::writeInputVars() {
+}
+
+void CPPCodeGenerator::writePrivateVars() {
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ if (is_private(*decl.fVar)) {
+ if (decl.fVar->fType == *fContext.fFragmentProcessor_Type) {
+ fErrors.error(decl.fOffset,
+ "fragmentProcessor variables must be declared 'in'");
+ return;
+ }
+ this->writef("%s %s = %s;\n",
+ HCodeGenerator::FieldType(fContext, decl.fVar->fType,
+ decl.fVar->fModifiers.fLayout).c_str(),
+ String(decl.fVar->fName).c_str(),
+ default_value(*decl.fVar).c_str());
+ } else if (decl.fVar->fModifiers.fLayout.fFlags & Layout::kTracked_Flag) {
+ // An auto-tracked uniform in variable, so add a field to hold onto the prior
+ // state. Note that tracked variables must be uniform in's and that is validated
+ // before writePrivateVars() is called.
+ const UniformCTypeMapper* mapper = UniformCTypeMapper::Get(fContext, *decl.fVar);
+ SkASSERT(mapper && mapper->supportsTracking());
+
+ String name = HCodeGenerator::FieldName(String(decl.fVar->fName).c_str());
+ // The member statement is different if the mapper reports a default value
+ if (mapper->defaultValue().size() > 0) {
+ this->writef("%s %sPrev = %s;\n",
+ Layout::CTypeToStr(mapper->ctype()), name.c_str(),
+ mapper->defaultValue().c_str());
+ } else {
+ this->writef("%s %sPrev;\n",
+ Layout::CTypeToStr(mapper->ctype()), name.c_str());
+ }
+ }
+ }
+ }
+ }
+}
+
+void CPPCodeGenerator::writePrivateVarValues() {
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ if (is_private(*decl.fVar) && decl.fValue) {
+ this->writef("%s = ", String(decl.fVar->fName).c_str());
+ fCPPMode = true;
+ this->writeExpression(*decl.fValue, kAssignment_Precedence);
+ fCPPMode = false;
+ this->write(";\n");
+ }
+ }
+ }
+ }
+}
+
+static bool is_accessible(const Variable& var) {
+ const Type& type = var.fType.nonnullable();
+ return Type::kSampler_Kind != type.kind() &&
+ Type::kOther_Kind != type.kind();
+}
+
+void CPPCodeGenerator::newExtraEmitCodeBlock() {
+ // This should only be called when emitting SKSL for emitCode(), which can be detected if the
+ // cpp buffer is not null, and the cpp buffer is not the current output.
+ SkASSERT(fCPPBuffer && fCPPBuffer != fOut);
+
+ // Start a new block as an empty string
+ fExtraEmitCodeBlocks.push_back("");
+ // Mark its location in the output buffer, uses ${\d} for the token since ${} will not occur in
+ // valid sksl and makes detection trivial.
+ this->writef("${%zu}", fExtraEmitCodeBlocks.size() - 1);
+}
+
+void CPPCodeGenerator::addExtraEmitCodeLine(const String& toAppend) {
+ SkASSERT(fExtraEmitCodeBlocks.size() > 0);
+ String& currentBlock = fExtraEmitCodeBlocks[fExtraEmitCodeBlocks.size() - 1];
+ // Automatically add indentation and newline
+ currentBlock += " " + toAppend + "\n";
+}
+
+void CPPCodeGenerator::flushEmittedCode() {
+ if (fCPPBuffer == nullptr) {
+ // Not actually within writeEmitCode() so nothing to flush
+ return;
+ }
+
+ StringStream* skslBuffer = static_cast<StringStream*>(fOut);
+
+ String sksl = skslBuffer->str();
+ // Empty the accumulation buffer since its current contents are consumed.
+ skslBuffer->reset();
+
+ // Switch to the cpp buffer
+ fOut = fCPPBuffer;
+
+ // Iterate through the sksl, keeping track of where the last statement ended (e.g. the latest
+ // encountered ';', '{', or '}'). If an extra emit code block token is encountered then the
+ // code from 0 to last statement end is sent to writeCodeAppend, the extra code block is
+ // appended to the cpp buffer, and then the sksl string is trimmed to start where the last
+ // statement left off (minus the encountered token).
+ size_t i = 0;
+ int flushPoint = -1;
+ int tokenStart = -1;
+ while (i < sksl.size()) {
+ if (tokenStart >= 0) {
+ // Looking for the end of the token
+ if (sksl[i] == '}') {
+ // Must append the sksl from 0 to flushPoint (inclusive) then the extra code
+ // accumulated in the block with index parsed from chars [tokenStart+2, i-1]
+ String toFlush = String(sksl.c_str(), flushPoint + 1);
+ // writeCodeAppend automatically removes the format args that it consumed, so
+ // fFormatArgs will be in a valid state for any future sksl
+ this->writeCodeAppend(toFlush);
+
+ int codeBlock = stoi(String(sksl.c_str() + tokenStart + 2, i - tokenStart - 2));
+ SkASSERT(codeBlock < (int) fExtraEmitCodeBlocks.size());
+ if (fExtraEmitCodeBlocks[codeBlock].size() > 0) {
+ this->write(fExtraEmitCodeBlocks[codeBlock].c_str());
+ }
+
+ // Now reset the sksl buffer to start after the flush point, but remove the token.
+ String compacted = String(sksl.c_str() + flushPoint + 1,
+ tokenStart - flushPoint - 1);
+ if (i < sksl.size() - 1) {
+ compacted += String(sksl.c_str() + i + 1, sksl.size() - i - 1);
+ }
+ sksl = compacted;
+
+ // And reset iteration
+ i = -1;
+ flushPoint = -1;
+ tokenStart = -1;
+ }
+ } else {
+ // Looking for the start of extra emit block tokens, and tracking when statements end
+ if (sksl[i] == ';' || sksl[i] == '{' || sksl[i] == '}') {
+ flushPoint = i;
+ } else if (i < sksl.size() - 1 && sksl[i] == '$' && sksl[i + 1] == '{') {
+ // found an extra emit code block token
+ tokenStart = i++;
+ }
+ }
+ i++;
+ }
+
+ // Once we've gone through the sksl string to this point, there are no remaining extra emit
+ // code blocks to interleave, so append the remainder as usual.
+ this->writeCodeAppend(sksl);
+
+ // After appending, switch back to the emptied sksl buffer and reset the extra code blocks
+ fOut = skslBuffer;
+ fExtraEmitCodeBlocks.clear();
+}
+
+void CPPCodeGenerator::writeCodeAppend(const String& code) {
+ // codeAppendf can only handle appending 1024 bytes at a time, so we need to break the string
+ // into chunks. Unfortunately we can't tell exactly how long the string is going to end up,
+ // because printf escape sequences get replaced by strings of unknown length, but keeping the
+ // format string below 512 bytes is probably safe.
+ static constexpr size_t maxChunkSize = 512;
+ size_t start = 0;
+ size_t index = 0;
+ size_t argStart = 0;
+ size_t argCount;
+ while (index < code.size()) {
+ argCount = 0;
+ this->write(" fragBuilder->codeAppendf(\"");
+ while (index < code.size() && index < start + maxChunkSize) {
+ if ('%' == code[index]) {
+ if (index == start + maxChunkSize - 1 || index == code.size() - 1) {
+ break;
+ }
+ if (code[index + 1] != '%') {
+ ++argCount;
+ }
+ } else if ('\\' == code[index] && index == start + maxChunkSize - 1) {
+ // avoid splitting an escape sequence that happens to fall across a chunk boundary
+ break;
+ }
+ ++index;
+ }
+ fOut->write(code.c_str() + start, index - start);
+ this->write("\"");
+ for (size_t i = argStart; i < argStart + argCount; ++i) {
+ this->writef(", %s", fFormatArgs[i].c_str());
+ }
+ this->write(");\n");
+ argStart += argCount;
+ start = index;
+ }
+
+ // argStart is equal to the number of fFormatArgs that were consumed
+ // so they should be removed from the list
+ if (argStart > 0) {
+ fFormatArgs.erase(fFormatArgs.begin(), fFormatArgs.begin() + argStart);
+ }
+}
+
+String CPPCodeGenerator::convertSKSLExpressionToCPP(const Expression& e,
+ const String& cppVar) {
+ // To do this conversion, we temporarily switch the sksl output stream
+ // to an empty stringstream and reset the format args to empty.
+ OutputStream* oldSKSL = fOut;
+ StringStream exprBuffer;
+ fOut = &exprBuffer;
+
+ std::vector<String> oldArgs(fFormatArgs);
+ fFormatArgs.clear();
+
+ // Convert the argument expression into a format string and args
+ this->writeExpression(e, Precedence::kTopLevel_Precedence);
+ std::vector<String> newArgs(fFormatArgs);
+ String expr = exprBuffer.str();
+
+ // After generating, restore the original output stream and format args
+ fFormatArgs = oldArgs;
+ fOut = oldSKSL;
+
+ // The sksl written to exprBuffer is not processed by flushEmittedCode(), so any extra emit code
+ // block tokens won't get handled. So we need to strip them from the expression and stick them
+ // to the end of the original sksl stream.
+ String exprFormat = "";
+ int tokenStart = -1;
+ for (size_t i = 0; i < expr.size(); i++) {
+ if (tokenStart >= 0) {
+ if (expr[i] == '}') {
+ // End of the token, so append the token to fOut
+ fOut->write(expr.c_str() + tokenStart, i - tokenStart + 1);
+ tokenStart = -1;
+ }
+ } else {
+ if (i < expr.size() - 1 && expr[i] == '$' && expr[i + 1] == '{') {
+ tokenStart = i++;
+ } else {
+ exprFormat += expr[i];
+ }
+ }
+ }
+
+ // Now build the final C++ code snippet from the format string and args
+ String cppExpr;
+ if (newArgs.size() == 0) {
+ // This was a static expression, so we can simplify the input
+ // color declaration in the emitted code to just a static string
+ cppExpr = "SkString " + cppVar + "(\"" + exprFormat + "\");";
+ } else {
+ // String formatting must occur dynamically, so have the C++ declaration
+ // use SkStringPrintf with the format args that were accumulated
+ // when the expression was written.
+ cppExpr = "SkString " + cppVar + " = SkStringPrintf(\"" + exprFormat + "\"";
+ for (size_t i = 0; i < newArgs.size(); i++) {
+ cppExpr += ", " + newArgs[i];
+ }
+ cppExpr += ");";
+ }
+ return cppExpr;
+}
+
+bool CPPCodeGenerator::writeEmitCode(std::vector<const Variable*>& uniforms) {
+ this->write(" void emitCode(EmitArgs& args) override {\n"
+ " GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;\n");
+ this->writef(" const %s& _outer = args.fFp.cast<%s>();\n"
+ " (void) _outer;\n",
+ fFullName.c_str(), fFullName.c_str());
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ String nameString(decl.fVar->fName);
+ const char* name = nameString.c_str();
+ if (SectionAndParameterHelper::IsParameter(*decl.fVar) &&
+ is_accessible(*decl.fVar)) {
+ this->writef(" auto %s = _outer.%s;\n"
+ " (void) %s;\n",
+ name, name, name);
+ }
+ }
+ }
+ }
+ this->writePrivateVarValues();
+ for (const auto u : uniforms) {
+ this->addUniform(*u);
+ }
+ this->writeSection(EMIT_CODE_SECTION);
+
+ // Save original buffer as the CPP buffer for flushEmittedCode()
+ fCPPBuffer = fOut;
+ StringStream skslBuffer;
+ fOut = &skslBuffer;
+
+ this->newExtraEmitCodeBlock();
+ bool result = INHERITED::generateCode();
+ this->flushEmittedCode();
+
+ // Then restore the original CPP buffer and close the function
+ fOut = fCPPBuffer;
+ fCPPBuffer = nullptr;
+ this->write(" }\n");
+ return result;
+}
+
+void CPPCodeGenerator::writeSetData(std::vector<const Variable*>& uniforms) {
+ const char* fullName = fFullName.c_str();
+ const Section* section = fSectionAndParameterHelper.getSection(SET_DATA_SECTION);
+ const char* pdman = section ? section->fArgument.c_str() : "pdman";
+ this->writef(" void onSetData(const GrGLSLProgramDataManager& %s, "
+ "const GrFragmentProcessor& _proc) override {\n",
+ pdman);
+ bool wroteProcessor = false;
+ for (const auto u : uniforms) {
+ if (is_uniform_in(*u)) {
+ if (!wroteProcessor) {
+ this->writef(" const %s& _outer = _proc.cast<%s>();\n", fullName, fullName);
+ wroteProcessor = true;
+ this->writef(" {\n");
+ }
+
+ const UniformCTypeMapper* mapper = UniformCTypeMapper::Get(fContext, *u);
+ SkASSERT(mapper);
+
+ String nameString(u->fName);
+ const char* name = nameString.c_str();
+
+ // Switches for setData behavior in the generated code
+ bool conditionalUniform = u->fModifiers.fLayout.fWhen != "";
+ bool isTracked = u->fModifiers.fLayout.fFlags & Layout::kTracked_Flag;
+ bool needsValueDeclaration = isTracked || !mapper->canInlineUniformValue();
+
+ String uniformName = HCodeGenerator::FieldName(name) + "Var";
+
+ String indent = " "; // 8 by default, 12 when nested for conditional uniforms
+ if (conditionalUniform) {
+ // Add a pre-check to make sure the uniform was emitted
+ // before trying to send any data to the GPU
+ this->writef(" if (%s.isValid()) {\n", uniformName.c_str());
+ indent += " ";
+ }
+
+ String valueVar = "";
+ if (needsValueDeclaration) {
+ valueVar.appendf("%sValue", name);
+ // Use AccessType since that will match the return type of _outer's public API.
+ String valueType = HCodeGenerator::AccessType(fContext, u->fType,
+ u->fModifiers.fLayout);
+ this->writef("%s%s %s = _outer.%s;\n",
+ indent.c_str(), valueType.c_str(), valueVar.c_str(), name);
+ } else {
+ // Not tracked and the mapper only needs to use the value once
+ // so send it a safe expression instead of the variable name
+ valueVar.appendf("(_outer.%s)", name);
+ }
+
+ if (isTracked) {
+ SkASSERT(mapper->supportsTracking());
+
+ String prevVar = HCodeGenerator::FieldName(name) + "Prev";
+ this->writef("%sif (%s) {\n"
+ "%s %s;\n"
+ "%s %s;\n"
+ "%s}\n", indent.c_str(),
+ mapper->dirtyExpression(valueVar, prevVar).c_str(), indent.c_str(),
+ mapper->saveState(valueVar, prevVar).c_str(), indent.c_str(),
+ mapper->setUniform(pdman, uniformName, valueVar).c_str(), indent.c_str());
+ } else {
+ this->writef("%s%s;\n", indent.c_str(),
+ mapper->setUniform(pdman, uniformName, valueVar).c_str());
+ }
+
+ if (conditionalUniform) {
+ // Close the earlier precheck block
+ this->writef(" }\n");
+ }
+ }
+ }
+ if (wroteProcessor) {
+ this->writef(" }\n");
+ }
+ if (section) {
+ int samplerIndex = 0;
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ String nameString(decl.fVar->fName);
+ const char* name = nameString.c_str();
+ if (decl.fVar->fType.kind() == Type::kSampler_Kind) {
+ this->writef(" GrSurfaceProxy& %sProxy = "
+ "*_outer.textureSampler(%d).proxy();\n",
+ name, samplerIndex);
+ this->writef(" GrTexture& %s = *%sProxy.peekTexture();\n",
+ name, name);
+ this->writef(" (void) %s;\n", name);
+ ++samplerIndex;
+ } else if (needs_uniform_var(*decl.fVar)) {
+ this->writef(" UniformHandle& %s = %sVar;\n"
+ " (void) %s;\n",
+ name, HCodeGenerator::FieldName(name).c_str(), name);
+ } else if (SectionAndParameterHelper::IsParameter(*decl.fVar) &&
+ decl.fVar->fType != *fContext.fFragmentProcessor_Type) {
+ if (!wroteProcessor) {
+ this->writef(" const %s& _outer = _proc.cast<%s>();\n", fullName,
+ fullName);
+ wroteProcessor = true;
+ }
+ this->writef(" auto %s = _outer.%s;\n"
+ " (void) %s;\n",
+ name, name, name);
+ }
+ }
+ }
+ }
+ this->writeSection(SET_DATA_SECTION);
+ }
+ this->write(" }\n");
+}
+
+void CPPCodeGenerator::writeOnTextureSampler() {
+ bool foundSampler = false;
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.kind() == Type::kSampler_Kind) {
+ if (!foundSampler) {
+ this->writef(
+ "const GrFragmentProcessor::TextureSampler& %s::onTextureSampler(int "
+ "index) const {\n",
+ fFullName.c_str());
+ this->writef(" return IthTextureSampler(index, %s",
+ HCodeGenerator::FieldName(String(param->fName).c_str()).c_str());
+ foundSampler = true;
+ } else {
+ this->writef(", %s",
+ HCodeGenerator::FieldName(String(param->fName).c_str()).c_str());
+ }
+ }
+ }
+ if (foundSampler) {
+ this->write(");\n}\n");
+ }
+}
+
+void CPPCodeGenerator::writeClone() {
+ if (!this->writeSection(CLONE_SECTION)) {
+ if (fSectionAndParameterHelper.getSection(FIELDS_SECTION)) {
+ fErrors.error(0, "fragment processors with custom @fields must also have a custom"
+ "@clone");
+ }
+ this->writef("%s::%s(const %s& src)\n"
+ ": INHERITED(k%s_ClassID, src.optimizationFlags())", fFullName.c_str(),
+ fFullName.c_str(), fFullName.c_str(), fFullName.c_str());
+ const auto transforms = fSectionAndParameterHelper.getSections(COORD_TRANSFORM_SECTION);
+ for (size_t i = 0; i < transforms.size(); ++i) {
+ const Section& s = *transforms[i];
+ String fieldName = HCodeGenerator::CoordTransformName(s.fArgument, i);
+ this->writef("\n, %s(src.%s)", fieldName.c_str(), fieldName.c_str());
+ }
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ String fieldName = HCodeGenerator::FieldName(String(param->fName).c_str());
+ if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ this->writef("\n, %s_index(src.%s_index)",
+ fieldName.c_str(),
+ fieldName.c_str());
+ } else {
+ this->writef("\n, %s(src.%s)",
+ fieldName.c_str(),
+ fieldName.c_str());
+ }
+ }
+ this->writef(" {\n");
+ int samplerCount = 0;
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.kind() == Type::kSampler_Kind) {
+ ++samplerCount;
+ } else if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ String fieldName = HCodeGenerator::FieldName(String(param->fName).c_str());
+ if (param->fType.kind() == Type::kNullable_Kind) {
+ this->writef(" if (%s_index >= 0) {\n ", fieldName.c_str());
+ }
+ this->writef(" this->registerChildProcessor(src.childProcessor(%s_index)."
+ "clone());\n", fieldName.c_str());
+ if (param->fType.kind() == Type::kNullable_Kind) {
+ this->writef(" }\n");
+ }
+ }
+ }
+ if (samplerCount) {
+ this->writef(" this->setTextureSamplerCnt(%d);", samplerCount);
+ }
+ for (size_t i = 0; i < transforms.size(); ++i) {
+ const Section& s = *transforms[i];
+ String fieldName = HCodeGenerator::CoordTransformName(s.fArgument, i);
+ this->writef(" this->addCoordTransform(&%s);\n", fieldName.c_str());
+ }
+ this->write("}\n");
+ this->writef("std::unique_ptr<GrFragmentProcessor> %s::clone() const {\n",
+ fFullName.c_str());
+ this->writef(" return std::unique_ptr<GrFragmentProcessor>(new %s(*this));\n",
+ fFullName.c_str());
+ this->write("}\n");
+ }
+}
+
+void CPPCodeGenerator::writeTest() {
+ const Section* test = fSectionAndParameterHelper.getSection(TEST_CODE_SECTION);
+ if (test) {
+ this->writef(
+ "GR_DEFINE_FRAGMENT_PROCESSOR_TEST(%s);\n"
+ "#if GR_TEST_UTILS\n"
+ "std::unique_ptr<GrFragmentProcessor> %s::TestCreate(GrProcessorTestData* %s) {\n",
+ fFullName.c_str(),
+ fFullName.c_str(),
+ test->fArgument.c_str());
+ this->writeSection(TEST_CODE_SECTION);
+ this->write("}\n"
+ "#endif\n");
+ }
+}
+
+void CPPCodeGenerator::writeGetKey() {
+ this->writef("void %s::onGetGLSLProcessorKey(const GrShaderCaps& caps, "
+ "GrProcessorKeyBuilder* b) const {\n",
+ fFullName.c_str());
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ const VarDeclaration& decl = (VarDeclaration&) *raw;
+ const Variable& var = *decl.fVar;
+ String nameString(var.fName);
+ const char* name = nameString.c_str();
+ if (var.fModifiers.fLayout.fKey != Layout::kNo_Key &&
+ (var.fModifiers.fFlags & Modifiers::kUniform_Flag)) {
+ fErrors.error(var.fOffset,
+ "layout(key) may not be specified on uniforms");
+ }
+ switch (var.fModifiers.fLayout.fKey) {
+ case Layout::kKey_Key:
+ if (is_private(var)) {
+ this->writef("%s %s =",
+ HCodeGenerator::FieldType(fContext, var.fType,
+ var.fModifiers.fLayout).c_str(),
+ String(var.fName).c_str());
+ if (decl.fValue) {
+ fCPPMode = true;
+ this->writeExpression(*decl.fValue, kAssignment_Precedence);
+ fCPPMode = false;
+ } else {
+ this->writef("%s", default_value(var).c_str());
+ }
+ this->write(";\n");
+ }
+ if (var.fModifiers.fLayout.fWhen.fLength) {
+ this->writef("if (%s) {", String(var.fModifiers.fLayout.fWhen).c_str());
+ }
+ if (var.fType == *fContext.fFloat4x4_Type) {
+ ABORT("no automatic key handling for float4x4\n");
+ } else if (var.fType == *fContext.fFloat2_Type) {
+ this->writef(" b->add32(%s.fX);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" b->add32(%s.fY);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ } else if (var.fType == *fContext.fFloat4_Type) {
+ this->writef(" b->add32(%s.x());\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" b->add32(%s.y());\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" b->add32(%s.width());\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" b->add32(%s.height());\n",
+ HCodeGenerator::FieldName(name).c_str());
+ } else if (var.fType == *fContext.fHalf4_Type) {
+ this->writef(" uint16_t red = SkFloatToHalf(%s.fR);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" uint16_t green = SkFloatToHalf(%s.fG);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" uint16_t blue = SkFloatToHalf(%s.fB);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->writef(" uint16_t alpha = SkFloatToHalf(%s.fA);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ this->write(" b->add32(((uint32_t)red << 16) | green);\n");
+ this->write(" b->add32(((uint32_t)blue << 16) | alpha);\n");
+ } else {
+ this->writef(" b->add32((int32_t) %s);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ }
+ if (var.fModifiers.fLayout.fWhen.fLength) {
+ this->write("}");
+ }
+ break;
+ case Layout::kIdentity_Key:
+ if (var.fType.kind() != Type::kMatrix_Kind) {
+ fErrors.error(var.fOffset,
+ "layout(key=identity) requires matrix type");
+ }
+ this->writef(" b->add32(%s.isIdentity() ? 1 : 0);\n",
+ HCodeGenerator::FieldName(name).c_str());
+ break;
+ case Layout::kNo_Key:
+ break;
+ }
+ }
+ }
+ }
+ this->write("}\n");
+}
+
+bool CPPCodeGenerator::generateCode() {
+ std::vector<const Variable*> uniforms;
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ if ((decl.fVar->fModifiers.fFlags & Modifiers::kUniform_Flag) &&
+ decl.fVar->fType.kind() != Type::kSampler_Kind) {
+ uniforms.push_back(decl.fVar);
+ }
+
+ if (is_uniform_in(*decl.fVar)) {
+ // Validate the "uniform in" declarations to make sure they are fully supported,
+ // instead of generating surprising C++
+ const UniformCTypeMapper* mapper =
+ UniformCTypeMapper::Get(fContext, *decl.fVar);
+ if (mapper == nullptr) {
+ fErrors.error(decl.fOffset, String(decl.fVar->fName)
+ + "'s type is not supported for use as a 'uniform in'");
+ return false;
+ }
+ if (decl.fVar->fModifiers.fLayout.fFlags & Layout::kTracked_Flag) {
+ if (!mapper->supportsTracking()) {
+ fErrors.error(decl.fOffset, String(decl.fVar->fName)
+ + "'s type does not support state tracking");
+ return false;
+ }
+ }
+
+ } else {
+ // If it's not a uniform_in, it's an error to be tracked
+ if (decl.fVar->fModifiers.fLayout.fFlags & Layout::kTracked_Flag) {
+ fErrors.error(decl.fOffset, "Non-'in uniforms' cannot be tracked");
+ return false;
+ }
+ }
+ }
+ }
+ }
+ const char* baseName = fName.c_str();
+ const char* fullName = fFullName.c_str();
+ this->writef("%s\n", HCodeGenerator::GetHeader(fProgram, fErrors).c_str());
+ this->writef(kFragmentProcessorHeader, fullName);
+ this->writef("#include \"%s.h\"\n\n", fullName);
+ this->writeSection(CPP_SECTION);
+ this->writef("#include \"include/gpu/GrTexture.h\"\n"
+ "#include \"src/gpu/glsl/GrGLSLFragmentProcessor.h\"\n"
+ "#include \"src/gpu/glsl/GrGLSLFragmentShaderBuilder.h\"\n"
+ "#include \"src/gpu/glsl/GrGLSLProgramBuilder.h\"\n"
+ "#include \"src/sksl/SkSLCPP.h\"\n"
+ "#include \"src/sksl/SkSLUtil.h\"\n"
+ "class GrGLSL%s : public GrGLSLFragmentProcessor {\n"
+ "public:\n"
+ " GrGLSL%s() {}\n",
+ baseName, baseName);
+ bool result = this->writeEmitCode(uniforms);
+ this->write("private:\n");
+ this->writeSetData(uniforms);
+ this->writePrivateVars();
+ for (const auto& u : uniforms) {
+ if (needs_uniform_var(*u) && !(u->fModifiers.fFlags & Modifiers::kIn_Flag)) {
+ this->writef(" UniformHandle %sVar;\n",
+ HCodeGenerator::FieldName(String(u->fName).c_str()).c_str());
+ }
+ }
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (needs_uniform_var(*param)) {
+ this->writef(" UniformHandle %sVar;\n",
+ HCodeGenerator::FieldName(String(param->fName).c_str()).c_str());
+ }
+ }
+ this->writef("};\n"
+ "GrGLSLFragmentProcessor* %s::onCreateGLSLInstance() const {\n"
+ " return new GrGLSL%s();\n"
+ "}\n",
+ fullName, baseName);
+ this->writeGetKey();
+ this->writef("bool %s::onIsEqual(const GrFragmentProcessor& other) const {\n"
+ " const %s& that = other.cast<%s>();\n"
+ " (void) that;\n",
+ fullName, fullName, fullName);
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ continue;
+ }
+ String nameString(param->fName);
+ const char* name = nameString.c_str();
+ this->writef(" if (%s != that.%s) return false;\n",
+ HCodeGenerator::FieldName(name).c_str(),
+ HCodeGenerator::FieldName(name).c_str());
+ }
+ this->write(" return true;\n"
+ "}\n");
+ this->writeClone();
+ this->writeOnTextureSampler();
+ this->writeTest();
+ this->writeSection(CPP_END_SECTION);
+
+ result &= 0 == fErrors.errorCount();
+ return result;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.h
new file mode 100644
index 0000000000..803270cc57
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCPPCodeGenerator.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CPPCODEGENERATOR
+#define SKSL_CPPCODEGENERATOR
+
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+#include "src/sksl/SkSLSectionAndParameterHelper.h"
+
+#include <set>
+
+namespace SkSL {
+
+class CPPCodeGenerator : public GLSLCodeGenerator {
+public:
+ CPPCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ String name, OutputStream* out);
+
+ bool generateCode() override;
+
+private:
+ void writef(const char* s, va_list va) SKSL_PRINTF_LIKE(2, 0);
+
+ void writef(const char* s, ...) SKSL_PRINTF_LIKE(2, 3);
+
+ bool writeSection(const char* name, const char* prefix = "");
+
+ void writeHeader() override;
+
+ bool usesPrecisionModifiers() const override;
+
+ String getTypeName(const Type& type) override;
+
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence) override;
+
+ void writeIndexExpression(const IndexExpression& i) override;
+
+ void writeIntLiteral(const IntLiteral& i) override;
+
+ void writeSwizzle(const Swizzle& swizzle) override;
+
+ void writeFieldAccess(const FieldAccess& access) override;
+
+ void writeVariableReference(const VariableReference& ref) override;
+
+ String getSamplerHandle(const Variable& var);
+
+ void writeIfStatement(const IfStatement& s) override;
+
+ void writeReturnStatement(const ReturnStatement& s) override;
+
+ void writeSwitchStatement(const SwitchStatement& s) override;
+
+ void writeFunctionCall(const FunctionCall& c) override;
+
+ void writeFunction(const FunctionDefinition& f) override;
+
+ void writeSetting(const Setting& s) override;
+
+ void writeProgramElement(const ProgramElement& p) override;
+
+ void addUniform(const Variable& var);
+
+ // writes a printf escape that will be filled in at runtime by the given C++ expression string
+ void writeRuntimeValue(const Type& type, const Layout& layout, const String& cppCode);
+
+ void writeVarInitializer(const Variable& var, const Expression& value) override;
+
+ void writeInputVars() override;
+
+ void writePrivateVars();
+
+ void writePrivateVarValues();
+
+ void writeCodeAppend(const String& code);
+
+ bool writeEmitCode(std::vector<const Variable*>& uniforms);
+
+ void writeSetData(std::vector<const Variable*>& uniforms);
+
+ void writeGetKey();
+
+ void writeOnTextureSampler();
+
+ void writeClone();
+
+ void writeTest();
+
+ // If the returned C++ is included in the generated code, then the variable name stored in
+ // cppVar will refer to a valid SkString that matches the Expression. Successful returns leave
+ // the output buffer (and related state) unmodified.
+ //
+ // In the simplest cases, this will return "SkString {cppVar}(\"{e}\");", while more advanced
+ // cases will properly insert format arguments.
+ String convertSKSLExpressionToCPP(const Expression& e, const String& cppVar);
+
+ // Process accumulated sksl to split it into appended code sections, properly interleaved with
+ // the extra emit code blocks, based on statement/block locations and the inserted tokens
+ // from newExtraEmitCodeBlock(). It is necessary to split the sksl after the program has been
+ // fully walked since many elements redirect fOut to simultaneously build header sections and
+ // bodies that are then concatenated; due to this it is not possible to split the sksl emission
+ // on the fly.
+ void flushEmittedCode();
+
+ // Start a new extra emit code block for accumulating C++ code. This will insert a token into
+ // the sksl stream to mark the fence between previous complete sksl statements and where the
+ // C++ code added to the new block will be added to emitCode(). These tokens are removed by
+ // flushEmittedCode() as it consumes them before passing pure sksl to writeCodeAppend().
+ void newExtraEmitCodeBlock();
+
+ // Append CPP code to the current extra emit code block.
+ void addExtraEmitCodeLine(const String& toAppend);
+
+ int getChildFPIndex(const Variable& var) const;
+
+ String fName;
+ String fFullName;
+ SectionAndParameterHelper fSectionAndParameterHelper;
+ std::vector<String> fExtraEmitCodeBlocks;
+
+ std::vector<String> fFormatArgs;
+ std::set<int> fWrittenTransformedCoords;
+ // if true, we are writing a C++ expression instead of a GLSL expression
+ bool fCPPMode = false;
+ bool fInMain = false;
+
+ // if not null, we are accumulating SkSL for emitCode into fOut, which
+ // replaced the original buffer with a StringStream. The original buffer is
+ // stored here for restoration.
+ OutputStream* fCPPBuffer = nullptr;
+
+ typedef GLSLCodeGenerator INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.cpp b/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.cpp
new file mode 100644
index 0000000000..2740762ac8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCPPUniformCTypes.h"
+#include "src/sksl/SkSLHCodeGenerator.h"
+#include "src/sksl/SkSLStringStream.h"
+
+#include <vector>
+
+namespace SkSL {
+
+/////////////////////////
+// Template evaluation //
+/////////////////////////
+
+static String eval_template(const String& format, const std::vector<String>& tokens,
+ const std::vector<const String*>& values) {
+ StringStream stream;
+
+ int tokenNameStart = -1;
+ for (size_t i = 0; i < format.size(); i++) {
+ if (tokenNameStart >= 0) {
+ // Within a token name so check if it is the end
+ if (format[i] == '}') {
+ // Skip 2 extra characters at the beginning for the $ and {, which must exist since
+ // otherwise tokenNameStart < 0
+ String token(format.c_str() + tokenNameStart + 2, i - tokenNameStart - 2);
+ // Search for the token in supported list
+ bool found = false;
+ for (size_t j = 0; j < tokens.size(); j++) {
+ if (token == tokens[j]) {
+ // Found a match so append the value corresponding to j to the output
+ stream.writeText(values[j]->c_str());
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ // Write out original characters as if we didn't consider it to be a token name
+ stream.writeText("${");
+ stream.writeText(token.c_str());
+ stream.writeText("}");
+ }
+
+ // And end the token name state
+ tokenNameStart = -1;
+ }
+ } else {
+ // Outside of a token name, so check if this character starts a name:
+ // i == $ and i+1 == {
+ if (i < format.size() - 1 && format[i] == '$' && format[i + 1] == '{') {
+ // Begin parsing the token
+ tokenNameStart = i;
+ } else {
+ // Just a character so append it
+ stream.write8(format[i]);
+ }
+ }
+ }
+
+ return stream.str();
+}
+
+static bool determine_inline_from_template(const String& uniformTemplate) {
+ // True if there is at most one instance of the ${var} template matcher in fUniformTemplate.
+ int firstMatch = uniformTemplate.find("${var}");
+
+ if (firstMatch < 0) {
+ // Template doesn't use the value variable at all, so it can "inlined"
+ return true;
+ }
+
+ // Check for another occurrence of ${var}, after firstMatch + 6
+ int secondMatch = uniformTemplate.find("${var}", firstMatch + strlen("${var}"));
+ // If there's no second match, then the value can be inlined in the c++ code
+ return secondMatch < 0;
+}
+
+///////////////////////////////////////
+// UniformCTypeMapper implementation //
+///////////////////////////////////////
+
+String UniformCTypeMapper::dirtyExpression(const String& newVar, const String& oldVar) const {
+ if (fSupportsTracking) {
+ std::vector<String> tokens = { "newVar", "oldVar" };
+ std::vector<const String*> values = { &newVar, &oldVar };
+ return eval_template(fDirtyExpressionTemplate, tokens, values);
+ } else {
+ return "";
+ }
+}
+
+String UniformCTypeMapper::saveState(const String& newVar, const String& oldVar) const {
+ if (fSupportsTracking) {
+ std::vector<String> tokens = { "newVar", "oldVar" };
+ std::vector<const String*> values = { &newVar, &oldVar };
+ return eval_template(fSaveStateTemplate, tokens, values);
+ } else {
+ return "";
+ }
+}
+
+String UniformCTypeMapper::setUniform(const String& pdman, const String& uniform,
+ const String& var) const {
+ std::vector<String> tokens = { "pdman", "uniform", "var" };
+ std::vector<const String*> values = { &pdman, &uniform, &var };
+ return eval_template(fUniformTemplate, tokens, values);
+}
+
+UniformCTypeMapper::UniformCTypeMapper(
+ Layout::CType ctype, const std::vector<String>& skslTypes, const String& setUniformFormat,
+ bool enableTracking, const String& defaultValue, const String& dirtyExpressionFormat,
+ const String& saveStateFormat)
+ : fCType(ctype)
+ , fSKSLTypes(skslTypes)
+ , fUniformTemplate(setUniformFormat)
+ , fInlineValue(determine_inline_from_template(setUniformFormat))
+ , fSupportsTracking(enableTracking)
+ , fDefaultValue(defaultValue)
+ , fDirtyExpressionTemplate(dirtyExpressionFormat)
+ , fSaveStateTemplate(saveStateFormat) { }
+
+// NOTE: These would be macros, but C++ initialization lists for the sksl type names do not play
+// well with macro parsing.
+
+static UniformCTypeMapper REGISTER(Layout::CType ctype, const std::vector<String>& skslTypes,
+ const char* uniformFormat, const char* defaultValue,
+ const char* dirtyExpression) {
+ return UniformCTypeMapper(ctype, skslTypes, uniformFormat, defaultValue, dirtyExpression,
+ "${oldVar} = ${newVar}");
+}
+
+static UniformCTypeMapper REGISTER(Layout::CType ctype, const std::vector<String>& skslTypes,
+ const char* uniformFormat, const char* defaultValue) {
+ return REGISTER(ctype, skslTypes, uniformFormat, defaultValue,
+ "${oldVar} != ${newVar}");
+}
+
+//////////////////////////////
+// Currently defined ctypes //
+//////////////////////////////
+
+static const std::vector<UniformCTypeMapper>& get_mappers() {
+ static const std::vector<UniformCTypeMapper> registeredMappers = {
+ REGISTER(Layout::CType::kSkRect, { "half4", "float4", "double4" },
+ "${pdman}.set4fv(${uniform}, 1, reinterpret_cast<const float*>(&${var}))", // to gpu
+ "SkRect::MakeEmpty()", // default value
+ "${oldVar}.isEmpty() || ${oldVar} != ${newVar}"), // dirty check
+
+ REGISTER(Layout::CType::kSkIRect, { "int4", "short4", "byte4" },
+ "${pdman}.set4iv(${uniform}, 1, reinterpret_cast<const int*>(&${var}))", // to gpu
+ "SkIRect::MakeEmpty()", // default value
+ "${oldVar}.isEmpty() || ${oldVar} != ${newVar}"), // dirty check
+
+ REGISTER(Layout::CType::kSkPMColor4f, { "half4", "float4", "double4" },
+ "${pdman}.set4fv(${uniform}, 1, ${var}.vec())", // to gpu
+ "{SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN}"), // default value
+
+ REGISTER(Layout::CType::kSkVector4, { "half4", "float4", "double4" },
+ "${pdman}.set4fv(${uniform}, 1, ${var}.fData)", // to gpu
+ "SkVector4(SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN, SK_MScalarNaN)", // default value
+ "${oldVar} != (${newVar})"), // dirty check
+
+ REGISTER(Layout::CType::kSkPoint, { "half2", "float2", "double2" } ,
+ "${pdman}.set2f(${uniform}, ${var}.fX, ${var}.fY)", // to gpu
+ "SkPoint::Make(SK_FloatNaN, SK_FloatNaN)"), // default value
+
+ REGISTER(Layout::CType::kSkIPoint, { "int2", "short2", "byte2" },
+ "${pdman}.set2i(${uniform}, ${var}.fX, ${var}.fY)", // to gpu
+ "SkIPoint::Make(SK_NaN32, SK_NaN32)"), // default value
+
+ REGISTER(Layout::CType::kSkMatrix, { "half3x3", "float3x3", "double3x3" },
+ "${pdman}.setSkMatrix(${uniform}, ${var})", // to gpu
+ "SkMatrix::MakeScale(SK_FloatNaN)", // default value
+ "!${oldVar}.cheapEqualTo(${newVar})"), // dirty check
+
+ REGISTER(Layout::CType::kSkMatrix44, { "half4x4", "float4x4", "double4x4" },
+ "${pdman}.setSkMatrix44(${uniform}, ${var})", // to gpu
+ "SkMatrix44(SkMatrix44::kNaN_Constructor)", // default value
+ "${oldVar} != (${newVar})"), // dirty check
+
+ REGISTER(Layout::CType::kFloat, { "half", "float", "double" },
+ "${pdman}.set1f(${uniform}, ${var})", // to gpu
+ "SK_FloatNaN"), // default value
+
+ REGISTER(Layout::CType::kInt32, { "int", "short", "byte" },
+ "${pdman}.set1i(${uniform}, ${var})", // to gpu
+ "SK_NaN32"), // default value
+ };
+
+ return registeredMappers;
+}
+
+/////
+
+// Greedy search through registered handlers for one that has a matching
+// ctype and supports the sksl type of the variable.
+const UniformCTypeMapper* UniformCTypeMapper::Get(const Context& context, const Type& type,
+ const Layout& layout) {
+ const std::vector<UniformCTypeMapper>& registeredMappers = get_mappers();
+
+ Layout::CType ctype = layout.fCType;
+ // If there's no custom ctype declared in the layout, use the default type mapping
+ if (ctype == Layout::CType::kDefault) {
+ ctype = HCodeGenerator::ParameterCType(context, type, layout);
+ }
+
+ const String& skslType = type.name();
+
+ for (size_t i = 0; i < registeredMappers.size(); i++) {
+ if (registeredMappers[i].ctype() == ctype) {
+ // Check for sksl support, since some c types (e.g. SkMatrix) can be used in multiple
+ // uniform types and send data to the gpu differently in those conditions
+ const std::vector<String> supportedSKSL = registeredMappers[i].supportedTypeNames();
+ for (size_t j = 0; j < supportedSKSL.size(); j++) {
+ if (supportedSKSL[j] == skslType) {
+ // Found a match, so return it or an explicitly untracked version if tracking is
+ // disabled in the layout
+ return &registeredMappers[i];
+ }
+ }
+ }
+ }
+
+ // Didn't find a match
+ return nullptr;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.h b/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.h
new file mode 100644
index 0000000000..c2c01f9984
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCPPUniformCTypes.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLUniformCTypes_DEFINED
+#define SkSLUniformCTypes_DEFINED
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLString.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+// This uses templates to define dirtyExpression(), saveState() and setUniform(). Each template can
+// reference token names formatted ${name} that are replaced with the actual values passed into the
+// functions.
+//
+// dirtyExpression() and saveState() support the following tokens:
+// - ${newVar} replaced with value of newValueVarName (1st argument)
+// - ${oldVar} replaced with value of oldValueVarName (2nd argument)
+//
+// setUniform() supports these tokens:
+// - ${pdman} replaced with value of pdmanName (1st argument)
+// - ${uniform} replaced with value of uniformHandleName (2nd argument)
+// - ${var} replaced with value of valueVarName (3rd argument)
+//
+// All templates and C++ snippets should produce valid expressions, but do not need to include
+// semicolons or newlines, which will be handled by the code generation itself.
+class UniformCTypeMapper {
+public:
+ // Create a templated mapper that does not support state tracking
+ UniformCTypeMapper(Layout::CType ctype, const std::vector<String>& skslTypes,
+ const char* setUniformFormat)
+ : UniformCTypeMapper(ctype, skslTypes, setUniformFormat, false, "", "", "") { }
+
+ // Create a templated mapper that provides extra patterns for the state
+ // tracking expressions.
+ UniformCTypeMapper(Layout::CType ctype, const std::vector<String>& skslTypes,
+ const String& setUniformFormat, const String& defaultValue,
+ const String& dirtyExpressionFormat, const String& saveStateFormat)
+ : UniformCTypeMapper(ctype, skslTypes, setUniformFormat,
+ true, defaultValue, dirtyExpressionFormat, saveStateFormat) { }
+
+ // Returns nullptr if the type and layout are not supported; the returned pointer's ownership
+ // is not transfered to the caller.
+ //
+ // The returned mapper can support tracking even if tracking is disabled based on the flags in
+ // the layout.
+ static const UniformCTypeMapper* Get(const Context& context, const Type& type,
+ const Layout& layout);
+
+ static const UniformCTypeMapper* Get(const Context& context, const Variable& variable) {
+ return Get(context, variable.fType, variable.fModifiers.fLayout);
+ }
+
+ // The C++ type name that this mapper applies to
+ Layout::CType ctype() const {
+ return fCType;
+ }
+
+ // The sksl type names that the mapper's ctype can be mapped to
+ const std::vector<String>& supportedTypeNames() const {
+ return fSKSLTypes;
+ }
+
+ // Whether or not this handler knows how to write state tracking code
+ // for the uniform variables
+ bool supportsTracking() const {
+ return fSupportsTracking;
+ }
+
+ // What the C++ class fields are initialized to in the GLSLFragmentProcessor The empty string
+ // implies the no-arg constructor is suitable. This is not used if supportsTracking() returns
+ // false.
+ //
+ // The returned snippet will be a valid as the lhs of an assignment.
+ const String& defaultValue() const {
+ return fDefaultValue;
+ }
+
+ // Return a boolean expression that returns true if the variables specified by newValueVarName
+ // and oldValueVarName have different values. This is ignored if supportsTracking() returns
+ // false.
+ //
+ // The returned snippet will be a valid expression to be inserted into the condition of an 'if'
+ // statement.
+ String dirtyExpression(const String& newValueVarName, const String& oldValueVarName) const;
+
+ // Return a statement that stores the value of newValueVarName into the variable specified by
+ // oldValueVarName. This is ignored if supportsTracking() returns false.
+ //
+ // The returned snippet will be a valid expression.
+ String saveState(const String& newValueVarName, const String& oldValueVarName) const;
+
+ // Return a statement that invokes the appropriate setX method on the GrGLSLProgramDataManager
+ // specified by pdmanName, where the uniform is provided by the expression stored in
+ // uniformHandleName, and valueVarName is the variable name pointing to the ctype instance
+ // holding the new value.
+ //
+ // The returned snippet will be a valid expression.
+ String setUniform(const String& pdmanName, const String& uniformHandleName,
+ const String& valueVarName) const;
+
+ // True if the setUniform() template only uses the value variable once in its expression. The
+ // variable does not necessarily get inlined if this returns true, since a local variable may be
+ // needed if state tracking is employed for a particular uniform.
+ bool canInlineUniformValue() const {
+ return fInlineValue;
+ }
+
+private:
+ UniformCTypeMapper(Layout::CType ctype, const std::vector<String>& skslTypes,
+ const String& setUniformFormat, bool enableTracking, const String& defaultValue,
+ const String& dirtyExpressionFormat, const String& saveStateFormat);
+
+ Layout::CType fCType;
+ std::vector<String> fSKSLTypes;
+ String fUniformTemplate;
+ bool fInlineValue; // Cached value calculated from fUniformTemplate
+
+ bool fSupportsTracking;
+ String fDefaultValue;
+ String fDirtyExpressionTemplate;
+ String fSaveStateTemplate;
+};
+
+} // namespace
+
+#endif // SkSLUniformCTypes_DEFINED
diff --git a/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h
new file mode 100644
index 0000000000..8541c29468
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CODEGENERATOR
+#define SKSL_CODEGENERATOR
+
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+namespace SkSL {
+
+/**
+ * Abstract superclass of all code generators, which take a Program as input and produce code as
+ * output.
+ */
+class CodeGenerator {
+public:
+ CodeGenerator(const Program* program, ErrorReporter* errors, OutputStream* out)
+ : fProgram(*program)
+ , fErrors(*errors)
+ , fOut(out) {
+ SkASSERT(program->fIsOptimized);
+ }
+
+ virtual ~CodeGenerator() {}
+
+ virtual bool generateCode() = 0;
+
+protected:
+
+ const Program& fProgram;
+ ErrorReporter& fErrors;
+ OutputStream* fOut;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.cpp b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
new file mode 100644
index 0000000000..7aa6b9f641
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
@@ -0,0 +1,1676 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCompiler.h"
+
+#include "src/sksl/SkSLByteCodeGenerator.h"
+#include "src/sksl/SkSLCFGGenerator.h"
+#include "src/sksl/SkSLCPPCodeGenerator.h"
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+#include "src/sksl/SkSLHCodeGenerator.h"
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/SkSLMetalCodeGenerator.h"
+#include "src/sksl/SkSLPipelineStageCodeGenerator.h"
+#include "src/sksl/SkSLSPIRVCodeGenerator.h"
+#include "src/sksl/ir/SkSLEnum.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLUnresolvedFunction.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#ifdef SK_ENABLE_SPIRV_VALIDATION
+#include "spirv-tools/libspirv.hpp"
+#endif
+
+// include the built-in shader symbols as static strings
+
+#define STRINGIFY(x) #x
+
+static const char* SKSL_GPU_INCLUDE =
+#include "sksl_gpu.inc"
+;
+
+static const char* SKSL_INTERP_INCLUDE =
+#include "sksl_interp.inc"
+;
+
+static const char* SKSL_VERT_INCLUDE =
+#include "sksl_vert.inc"
+;
+
+static const char* SKSL_FRAG_INCLUDE =
+#include "sksl_frag.inc"
+;
+
+static const char* SKSL_GEOM_INCLUDE =
+#include "sksl_geom.inc"
+;
+
+static const char* SKSL_FP_INCLUDE =
+#include "sksl_enums.inc"
+#include "sksl_fp.inc"
+;
+
+static const char* SKSL_PIPELINE_INCLUDE =
+#include "sksl_pipeline.inc"
+;
+
+namespace SkSL {
+
+Compiler::Compiler(Flags flags)
+: fFlags(flags)
+, fContext(new Context())
+, fErrorCount(0) {
+ auto types = std::shared_ptr<SymbolTable>(new SymbolTable(this));
+ auto symbols = std::shared_ptr<SymbolTable>(new SymbolTable(types, this));
+ fIRGenerator = new IRGenerator(fContext.get(), symbols, *this);
+ fTypes = types;
+ #define ADD_TYPE(t) types->addWithoutOwnership(fContext->f ## t ## _Type->fName, \
+ fContext->f ## t ## _Type.get())
+ ADD_TYPE(Void);
+ ADD_TYPE(Float);
+ ADD_TYPE(Float2);
+ ADD_TYPE(Float3);
+ ADD_TYPE(Float4);
+ ADD_TYPE(Half);
+ ADD_TYPE(Half2);
+ ADD_TYPE(Half3);
+ ADD_TYPE(Half4);
+ ADD_TYPE(Double);
+ ADD_TYPE(Double2);
+ ADD_TYPE(Double3);
+ ADD_TYPE(Double4);
+ ADD_TYPE(Int);
+ ADD_TYPE(Int2);
+ ADD_TYPE(Int3);
+ ADD_TYPE(Int4);
+ ADD_TYPE(UInt);
+ ADD_TYPE(UInt2);
+ ADD_TYPE(UInt3);
+ ADD_TYPE(UInt4);
+ ADD_TYPE(Short);
+ ADD_TYPE(Short2);
+ ADD_TYPE(Short3);
+ ADD_TYPE(Short4);
+ ADD_TYPE(UShort);
+ ADD_TYPE(UShort2);
+ ADD_TYPE(UShort3);
+ ADD_TYPE(UShort4);
+ ADD_TYPE(Byte);
+ ADD_TYPE(Byte2);
+ ADD_TYPE(Byte3);
+ ADD_TYPE(Byte4);
+ ADD_TYPE(UByte);
+ ADD_TYPE(UByte2);
+ ADD_TYPE(UByte3);
+ ADD_TYPE(UByte4);
+ ADD_TYPE(Bool);
+ ADD_TYPE(Bool2);
+ ADD_TYPE(Bool3);
+ ADD_TYPE(Bool4);
+ ADD_TYPE(Float2x2);
+ ADD_TYPE(Float2x3);
+ ADD_TYPE(Float2x4);
+ ADD_TYPE(Float3x2);
+ ADD_TYPE(Float3x3);
+ ADD_TYPE(Float3x4);
+ ADD_TYPE(Float4x2);
+ ADD_TYPE(Float4x3);
+ ADD_TYPE(Float4x4);
+ ADD_TYPE(Half2x2);
+ ADD_TYPE(Half2x3);
+ ADD_TYPE(Half2x4);
+ ADD_TYPE(Half3x2);
+ ADD_TYPE(Half3x3);
+ ADD_TYPE(Half3x4);
+ ADD_TYPE(Half4x2);
+ ADD_TYPE(Half4x3);
+ ADD_TYPE(Half4x4);
+ ADD_TYPE(Double2x2);
+ ADD_TYPE(Double2x3);
+ ADD_TYPE(Double2x4);
+ ADD_TYPE(Double3x2);
+ ADD_TYPE(Double3x3);
+ ADD_TYPE(Double3x4);
+ ADD_TYPE(Double4x2);
+ ADD_TYPE(Double4x3);
+ ADD_TYPE(Double4x4);
+ ADD_TYPE(GenType);
+ ADD_TYPE(GenHType);
+ ADD_TYPE(GenDType);
+ ADD_TYPE(GenIType);
+ ADD_TYPE(GenUType);
+ ADD_TYPE(GenBType);
+ ADD_TYPE(Mat);
+ ADD_TYPE(Vec);
+ ADD_TYPE(GVec);
+ ADD_TYPE(GVec2);
+ ADD_TYPE(GVec3);
+ ADD_TYPE(GVec4);
+ ADD_TYPE(HVec);
+ ADD_TYPE(DVec);
+ ADD_TYPE(IVec);
+ ADD_TYPE(UVec);
+ ADD_TYPE(SVec);
+ ADD_TYPE(USVec);
+ ADD_TYPE(ByteVec);
+ ADD_TYPE(UByteVec);
+ ADD_TYPE(BVec);
+
+ ADD_TYPE(Sampler1D);
+ ADD_TYPE(Sampler2D);
+ ADD_TYPE(Sampler3D);
+ ADD_TYPE(SamplerExternalOES);
+ ADD_TYPE(SamplerCube);
+ ADD_TYPE(Sampler2DRect);
+ ADD_TYPE(Sampler1DArray);
+ ADD_TYPE(Sampler2DArray);
+ ADD_TYPE(SamplerCubeArray);
+ ADD_TYPE(SamplerBuffer);
+ ADD_TYPE(Sampler2DMS);
+ ADD_TYPE(Sampler2DMSArray);
+
+ ADD_TYPE(ISampler2D);
+
+ ADD_TYPE(Image2D);
+ ADD_TYPE(IImage2D);
+
+ ADD_TYPE(SubpassInput);
+ ADD_TYPE(SubpassInputMS);
+
+ ADD_TYPE(GSampler1D);
+ ADD_TYPE(GSampler2D);
+ ADD_TYPE(GSampler3D);
+ ADD_TYPE(GSamplerCube);
+ ADD_TYPE(GSampler2DRect);
+ ADD_TYPE(GSampler1DArray);
+ ADD_TYPE(GSampler2DArray);
+ ADD_TYPE(GSamplerCubeArray);
+ ADD_TYPE(GSamplerBuffer);
+ ADD_TYPE(GSampler2DMS);
+ ADD_TYPE(GSampler2DMSArray);
+
+ ADD_TYPE(Sampler1DShadow);
+ ADD_TYPE(Sampler2DShadow);
+ ADD_TYPE(SamplerCubeShadow);
+ ADD_TYPE(Sampler2DRectShadow);
+ ADD_TYPE(Sampler1DArrayShadow);
+ ADD_TYPE(Sampler2DArrayShadow);
+ ADD_TYPE(SamplerCubeArrayShadow);
+ ADD_TYPE(GSampler2DArrayShadow);
+ ADD_TYPE(GSamplerCubeArrayShadow);
+ ADD_TYPE(FragmentProcessor);
+ ADD_TYPE(Sampler);
+ ADD_TYPE(Texture2D);
+
+ StringFragment skCapsName("sk_Caps");
+ Variable* skCaps = new Variable(-1, Modifiers(), skCapsName,
+ *fContext->fSkCaps_Type, Variable::kGlobal_Storage);
+ fIRGenerator->fSymbolTable->add(skCapsName, std::unique_ptr<Symbol>(skCaps));
+
+ StringFragment skArgsName("sk_Args");
+ Variable* skArgs = new Variable(-1, Modifiers(), skArgsName,
+ *fContext->fSkArgs_Type, Variable::kGlobal_Storage);
+ fIRGenerator->fSymbolTable->add(skArgsName, std::unique_ptr<Symbol>(skArgs));
+
+ std::vector<std::unique_ptr<ProgramElement>> ignored;
+ this->processIncludeFile(Program::kFragment_Kind, SKSL_GPU_INCLUDE, strlen(SKSL_GPU_INCLUDE),
+ symbols, &ignored, &fGpuSymbolTable);
+ this->processIncludeFile(Program::kVertex_Kind, SKSL_VERT_INCLUDE, strlen(SKSL_VERT_INCLUDE),
+ fGpuSymbolTable, &fVertexInclude, &fVertexSymbolTable);
+ this->processIncludeFile(Program::kFragment_Kind, SKSL_FRAG_INCLUDE, strlen(SKSL_FRAG_INCLUDE),
+ fGpuSymbolTable, &fFragmentInclude, &fFragmentSymbolTable);
+ this->processIncludeFile(Program::kGeometry_Kind, SKSL_GEOM_INCLUDE, strlen(SKSL_GEOM_INCLUDE),
+ fGpuSymbolTable, &fGeometryInclude, &fGeometrySymbolTable);
+ this->processIncludeFile(Program::kPipelineStage_Kind, SKSL_PIPELINE_INCLUDE,
+ strlen(SKSL_PIPELINE_INCLUDE), fGpuSymbolTable, &fPipelineInclude,
+ &fPipelineSymbolTable);
+ this->processIncludeFile(Program::kGeneric_Kind, SKSL_INTERP_INCLUDE,
+ strlen(SKSL_INTERP_INCLUDE), symbols, &fInterpreterInclude,
+ &fInterpreterSymbolTable);
+}
+
+Compiler::~Compiler() {
+ delete fIRGenerator;
+}
+
+void Compiler::processIncludeFile(Program::Kind kind, const char* src, size_t length,
+ std::shared_ptr<SymbolTable> base,
+ std::vector<std::unique_ptr<ProgramElement>>* outElements,
+ std::shared_ptr<SymbolTable>* outSymbolTable) {
+ fIRGenerator->fSymbolTable = std::move(base);
+ Program::Settings settings;
+ fIRGenerator->start(&settings, nullptr);
+ fIRGenerator->convertProgram(kind, src, length, *fTypes, outElements);
+ if (this->fErrorCount) {
+ printf("Unexpected errors: %s\n", this->fErrorText.c_str());
+ }
+ SkASSERT(!fErrorCount);
+ fIRGenerator->fSymbolTable->markAllFunctionsBuiltin();
+ *outSymbolTable = fIRGenerator->fSymbolTable;
+}
+
+// add the definition created by assigning to the lvalue to the definition set
+void Compiler::addDefinition(const Expression* lvalue, std::unique_ptr<Expression>* expr,
+ DefinitionMap* definitions) {
+ switch (lvalue->fKind) {
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference*) lvalue)->fVariable;
+ if (var.fStorage == Variable::kLocal_Storage) {
+ (*definitions)[&var] = expr;
+ }
+ break;
+ }
+ case Expression::kSwizzle_Kind:
+ // We consider the variable written to as long as at least some of its components have
+ // been written to. This will lead to some false negatives (we won't catch it if you
+ // write to foo.x and then read foo.y), but being stricter could lead to false positives
+ // (we write to foo.x, and then pass foo to a function which happens to only read foo.x,
+ // but since we pass foo as a whole it is flagged as an error) unless we perform a much
+ // more complicated whole-program analysis. This is probably good enough.
+ this->addDefinition(((Swizzle*) lvalue)->fBase.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ break;
+ case Expression::kIndex_Kind:
+ // see comments in Swizzle
+ this->addDefinition(((IndexExpression*) lvalue)->fBase.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ break;
+ case Expression::kFieldAccess_Kind:
+ // see comments in Swizzle
+ this->addDefinition(((FieldAccess*) lvalue)->fBase.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ break;
+ case Expression::kTernary_Kind:
+ // To simplify analysis, we just pretend that we write to both sides of the ternary.
+ // This allows for false positives (meaning we fail to detect that a variable might not
+ // have been assigned), but is preferable to false negatives.
+ this->addDefinition(((TernaryExpression*) lvalue)->fIfTrue.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ this->addDefinition(((TernaryExpression*) lvalue)->fIfFalse.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ break;
+ case Expression::kExternalValue_Kind:
+ break;
+ default:
+ // not an lvalue, can't happen
+ SkASSERT(false);
+ }
+}
+
+// add local variables defined by this node to the set
+void Compiler::addDefinitions(const BasicBlock::Node& node,
+ DefinitionMap* definitions) {
+ switch (node.fKind) {
+ case BasicBlock::Node::kExpression_Kind: {
+ SkASSERT(node.expression());
+ const Expression* expr = (Expression*) node.expression()->get();
+ switch (expr->fKind) {
+ case Expression::kBinary_Kind: {
+ BinaryExpression* b = (BinaryExpression*) expr;
+ if (b->fOperator == Token::EQ) {
+ this->addDefinition(b->fLeft.get(), &b->fRight, definitions);
+ } else if (Compiler::IsAssignment(b->fOperator)) {
+ this->addDefinition(
+ b->fLeft.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+
+ }
+ break;
+ }
+ case Expression::kFunctionCall_Kind: {
+ const FunctionCall& c = (const FunctionCall&) *expr;
+ for (size_t i = 0; i < c.fFunction.fParameters.size(); ++i) {
+ if (c.fFunction.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ this->addDefinition(
+ c.fArguments[i].get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ }
+ }
+ break;
+ }
+ case Expression::kPrefix_Kind: {
+ const PrefixExpression* p = (PrefixExpression*) expr;
+ if (p->fOperator == Token::MINUSMINUS || p->fOperator == Token::PLUSPLUS) {
+ this->addDefinition(
+ p->fOperand.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ }
+ break;
+ }
+ case Expression::kPostfix_Kind: {
+ const PostfixExpression* p = (PostfixExpression*) expr;
+ if (p->fOperator == Token::MINUSMINUS || p->fOperator == Token::PLUSPLUS) {
+ this->addDefinition(
+ p->fOperand.get(),
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ }
+ break;
+ }
+ case Expression::kVariableReference_Kind: {
+ const VariableReference* v = (VariableReference*) expr;
+ if (v->fRefKind != VariableReference::kRead_RefKind) {
+ this->addDefinition(
+ v,
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
+ definitions);
+ }
+ }
+ default:
+ break;
+ }
+ break;
+ }
+ case BasicBlock::Node::kStatement_Kind: {
+ const Statement* stmt = (Statement*) node.statement()->get();
+ if (stmt->fKind == Statement::kVarDeclaration_Kind) {
+ VarDeclaration& vd = (VarDeclaration&) *stmt;
+ if (vd.fValue) {
+ (*definitions)[vd.fVar] = &vd.fValue;
+ }
+ }
+ break;
+ }
+ }
+}
+
+void Compiler::scanCFG(CFG* cfg, BlockId blockId, std::set<BlockId>* workList) {
+ BasicBlock& block = cfg->fBlocks[blockId];
+
+ // compute definitions after this block
+ DefinitionMap after = block.fBefore;
+ for (const BasicBlock::Node& n : block.fNodes) {
+ this->addDefinitions(n, &after);
+ }
+
+ // propagate definitions to exits
+ for (BlockId exitId : block.fExits) {
+ if (exitId == blockId) {
+ continue;
+ }
+ BasicBlock& exit = cfg->fBlocks[exitId];
+ for (const auto& pair : after) {
+ std::unique_ptr<Expression>* e1 = pair.second;
+ auto found = exit.fBefore.find(pair.first);
+ if (found == exit.fBefore.end()) {
+ // exit has no definition for it, just copy it
+ workList->insert(exitId);
+ exit.fBefore[pair.first] = e1;
+ } else {
+ // exit has a (possibly different) value already defined
+ std::unique_ptr<Expression>* e2 = exit.fBefore[pair.first];
+ if (e1 != e2) {
+ // definition has changed, merge and add exit block to worklist
+ workList->insert(exitId);
+ if (e1 && e2) {
+ exit.fBefore[pair.first] =
+ (std::unique_ptr<Expression>*) &fContext->fDefined_Expression;
+ } else {
+ exit.fBefore[pair.first] = nullptr;
+ }
+ }
+ }
+ }
+ }
+}
+
+// returns a map which maps all local variables in the function to null, indicating that their value
+// is initially unknown
+static DefinitionMap compute_start_state(const CFG& cfg) {
+ DefinitionMap result;
+ for (const auto& block : cfg.fBlocks) {
+ for (const auto& node : block.fNodes) {
+ if (node.fKind == BasicBlock::Node::kStatement_Kind) {
+ SkASSERT(node.statement());
+ const Statement* s = node.statement()->get();
+ if (s->fKind == Statement::kVarDeclarations_Kind) {
+ const VarDeclarationsStatement* vd = (const VarDeclarationsStatement*) s;
+ for (const auto& decl : vd->fDeclaration->fVars) {
+ if (decl->fKind == Statement::kVarDeclaration_Kind) {
+ result[((VarDeclaration&) *decl).fVar] = nullptr;
+ }
+ }
+ }
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * Returns true if assigning to this lvalue has no effect.
+ */
+static bool is_dead(const Expression& lvalue) {
+ switch (lvalue.fKind) {
+ case Expression::kVariableReference_Kind:
+ return ((VariableReference&) lvalue).fVariable.dead();
+ case Expression::kSwizzle_Kind:
+ return is_dead(*((Swizzle&) lvalue).fBase);
+ case Expression::kFieldAccess_Kind:
+ return is_dead(*((FieldAccess&) lvalue).fBase);
+ case Expression::kIndex_Kind: {
+ const IndexExpression& idx = (IndexExpression&) lvalue;
+ return is_dead(*idx.fBase) && !idx.fIndex->hasSideEffects();
+ }
+ case Expression::kTernary_Kind: {
+ const TernaryExpression& t = (TernaryExpression&) lvalue;
+ return !t.fTest->hasSideEffects() && is_dead(*t.fIfTrue) && is_dead(*t.fIfFalse);
+ }
+ case Expression::kExternalValue_Kind:
+ return false;
+ default:
+ ABORT("invalid lvalue: %s\n", lvalue.description().c_str());
+ }
+}
+
+/**
+ * Returns true if this is an assignment which can be collapsed down to just the right hand side due
+ * to a dead target and lack of side effects on the left hand side.
+ */
+static bool dead_assignment(const BinaryExpression& b) {
+ if (!Compiler::IsAssignment(b.fOperator)) {
+ return false;
+ }
+ return is_dead(*b.fLeft);
+}
+
+void Compiler::computeDataFlow(CFG* cfg) {
+ cfg->fBlocks[cfg->fStart].fBefore = compute_start_state(*cfg);
+ std::set<BlockId> workList;
+ for (BlockId i = 0; i < cfg->fBlocks.size(); i++) {
+ workList.insert(i);
+ }
+ while (workList.size()) {
+ BlockId next = *workList.begin();
+ workList.erase(workList.begin());
+ this->scanCFG(cfg, next, &workList);
+ }
+}
+
+/**
+ * Attempts to replace the expression pointed to by iter with a new one (in both the CFG and the
+ * IR). If the expression can be cleanly removed, returns true and updates the iterator to point to
+ * the newly-inserted element. Otherwise updates only the IR and returns false (and the CFG will
+ * need to be regenerated).
+ */
+bool try_replace_expression(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ std::unique_ptr<Expression>* newExpression) {
+ std::unique_ptr<Expression>* target = (*iter)->expression();
+ if (!b->tryRemoveExpression(iter)) {
+ *target = std::move(*newExpression);
+ return false;
+ }
+ *target = std::move(*newExpression);
+ return b->tryInsertExpression(iter, target);
+}
+
+/**
+ * Returns true if the expression is a constant numeric literal with the specified value, or a
+ * constant vector with all elements equal to the specified value.
+ */
+bool is_constant(const Expression& expr, double value) {
+ switch (expr.fKind) {
+ case Expression::kIntLiteral_Kind:
+ return ((IntLiteral&) expr).fValue == value;
+ case Expression::kFloatLiteral_Kind:
+ return ((FloatLiteral&) expr).fValue == value;
+ case Expression::kConstructor_Kind: {
+ Constructor& c = (Constructor&) expr;
+ bool isFloat = c.fType.columns() > 1 ? c.fType.componentType().isFloat()
+ : c.fType.isFloat();
+ if (c.fType.kind() == Type::kVector_Kind && c.isConstant()) {
+ for (int i = 0; i < c.fType.columns(); ++i) {
+ if (isFloat) {
+ if (c.getFVecComponent(i) != value) {
+ return false;
+ }
+ } else if (c.getIVecComponent(i) != value) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+/**
+ * Collapses the binary expression pointed to by iter down to just the right side (in both the IR
+ * and CFG structures).
+ */
+void delete_left(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ *outUpdated = true;
+ std::unique_ptr<Expression>* target = (*iter)->expression();
+ SkASSERT((*target)->fKind == Expression::kBinary_Kind);
+ BinaryExpression& bin = (BinaryExpression&) **target;
+ SkASSERT(!bin.fLeft->hasSideEffects());
+ bool result;
+ if (bin.fOperator == Token::EQ) {
+ result = b->tryRemoveLValueBefore(iter, bin.fLeft.get());
+ } else {
+ result = b->tryRemoveExpressionBefore(iter, bin.fLeft.get());
+ }
+ *target = std::move(bin.fRight);
+ if (!result) {
+ *outNeedsRescan = true;
+ return;
+ }
+ if (*iter == b->fNodes.begin()) {
+ *outNeedsRescan = true;
+ return;
+ }
+ --(*iter);
+ if ((*iter)->fKind != BasicBlock::Node::kExpression_Kind ||
+ (*iter)->expression() != &bin.fRight) {
+ *outNeedsRescan = true;
+ return;
+ }
+ *iter = b->fNodes.erase(*iter);
+ SkASSERT((*iter)->expression() == target);
+}
+
+/**
+ * Collapses the binary expression pointed to by iter down to just the left side (in both the IR and
+ * CFG structures).
+ */
+void delete_right(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ *outUpdated = true;
+ std::unique_ptr<Expression>* target = (*iter)->expression();
+ SkASSERT((*target)->fKind == Expression::kBinary_Kind);
+ BinaryExpression& bin = (BinaryExpression&) **target;
+ SkASSERT(!bin.fRight->hasSideEffects());
+ if (!b->tryRemoveExpressionBefore(iter, bin.fRight.get())) {
+ *target = std::move(bin.fLeft);
+ *outNeedsRescan = true;
+ return;
+ }
+ *target = std::move(bin.fLeft);
+ if (*iter == b->fNodes.begin()) {
+ *outNeedsRescan = true;
+ return;
+ }
+ --(*iter);
+ if (((*iter)->fKind != BasicBlock::Node::kExpression_Kind ||
+ (*iter)->expression() != &bin.fLeft)) {
+ *outNeedsRescan = true;
+ return;
+ }
+ *iter = b->fNodes.erase(*iter);
+ SkASSERT((*iter)->expression() == target);
+}
+
+/**
+ * Constructs the specified type using a single argument.
+ */
+static std::unique_ptr<Expression> construct(const Type& type, std::unique_ptr<Expression> v) {
+ std::vector<std::unique_ptr<Expression>> args;
+ args.push_back(std::move(v));
+ auto result = std::unique_ptr<Expression>(new Constructor(-1, type, std::move(args)));
+ return result;
+}
+
+/**
+ * Used in the implementations of vectorize_left and vectorize_right. Given a vector type and an
+ * expression x, deletes the expression pointed to by iter and replaces it with <type>(x).
+ */
+static void vectorize(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ const Type& type,
+ std::unique_ptr<Expression>* otherExpression,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ SkASSERT((*(*iter)->expression())->fKind == Expression::kBinary_Kind);
+ SkASSERT(type.kind() == Type::kVector_Kind);
+ SkASSERT((*otherExpression)->fType.kind() == Type::kScalar_Kind);
+ *outUpdated = true;
+ std::unique_ptr<Expression>* target = (*iter)->expression();
+ if (!b->tryRemoveExpression(iter)) {
+ *target = construct(type, std::move(*otherExpression));
+ *outNeedsRescan = true;
+ } else {
+ *target = construct(type, std::move(*otherExpression));
+ if (!b->tryInsertExpression(iter, target)) {
+ *outNeedsRescan = true;
+ }
+ }
+}
+
+/**
+ * Given a binary expression of the form x <op> vec<n>(y), deletes the right side and vectorizes the
+ * left to yield vec<n>(x).
+ */
+static void vectorize_left(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ BinaryExpression& bin = (BinaryExpression&) **(*iter)->expression();
+ vectorize(b, iter, bin.fRight->fType, &bin.fLeft, outUpdated, outNeedsRescan);
+}
+
+/**
+ * Given a binary expression of the form vec<n>(x) <op> y, deletes the left side and vectorizes the
+ * right to yield vec<n>(y).
+ */
+static void vectorize_right(BasicBlock* b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ BinaryExpression& bin = (BinaryExpression&) **(*iter)->expression();
+ vectorize(b, iter, bin.fLeft->fType, &bin.fRight, outUpdated, outNeedsRescan);
+}
+
+// Mark that an expression which we were writing to is no longer being written to
+void clear_write(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ ((VariableReference&) expr).setRefKind(VariableReference::kRead_RefKind);
+ break;
+ }
+ case Expression::kFieldAccess_Kind:
+ clear_write(*((FieldAccess&) expr).fBase);
+ break;
+ case Expression::kSwizzle_Kind:
+ clear_write(*((Swizzle&) expr).fBase);
+ break;
+ case Expression::kIndex_Kind:
+ clear_write(*((IndexExpression&) expr).fBase);
+ break;
+ default:
+ ABORT("shouldn't be writing to this kind of expression\n");
+ break;
+ }
+}
+
+void Compiler::simplifyExpression(DefinitionMap& definitions,
+ BasicBlock& b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ std::unordered_set<const Variable*>* undefinedVariables,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ Expression* expr = (*iter)->expression()->get();
+ SkASSERT(expr);
+ if ((*iter)->fConstantPropagation) {
+ std::unique_ptr<Expression> optimized = expr->constantPropagate(*fIRGenerator, definitions);
+ if (optimized) {
+ *outUpdated = true;
+ if (!try_replace_expression(&b, iter, &optimized)) {
+ *outNeedsRescan = true;
+ return;
+ }
+ SkASSERT((*iter)->fKind == BasicBlock::Node::kExpression_Kind);
+ expr = (*iter)->expression()->get();
+ }
+ }
+ switch (expr->fKind) {
+ case Expression::kVariableReference_Kind: {
+ const VariableReference& ref = (VariableReference&) *expr;
+ const Variable& var = ref.fVariable;
+ if (ref.refKind() != VariableReference::kWrite_RefKind &&
+ ref.refKind() != VariableReference::kPointer_RefKind &&
+ var.fStorage == Variable::kLocal_Storage && !definitions[&var] &&
+ (*undefinedVariables).find(&var) == (*undefinedVariables).end()) {
+ (*undefinedVariables).insert(&var);
+ this->error(expr->fOffset,
+ "'" + var.fName + "' has not been assigned");
+ }
+ break;
+ }
+ case Expression::kTernary_Kind: {
+ TernaryExpression* t = (TernaryExpression*) expr;
+ if (t->fTest->fKind == Expression::kBoolLiteral_Kind) {
+ // ternary has a constant test, replace it with either the true or
+ // false branch
+ if (((BoolLiteral&) *t->fTest).fValue) {
+ (*iter)->setExpression(std::move(t->fIfTrue));
+ } else {
+ (*iter)->setExpression(std::move(t->fIfFalse));
+ }
+ *outUpdated = true;
+ *outNeedsRescan = true;
+ }
+ break;
+ }
+ case Expression::kBinary_Kind: {
+ BinaryExpression* bin = (BinaryExpression*) expr;
+ if (dead_assignment(*bin)) {
+ delete_left(&b, iter, outUpdated, outNeedsRescan);
+ break;
+ }
+ // collapse useless expressions like x * 1 or x + 0
+ if (((bin->fLeft->fType.kind() != Type::kScalar_Kind) &&
+ (bin->fLeft->fType.kind() != Type::kVector_Kind)) ||
+ ((bin->fRight->fType.kind() != Type::kScalar_Kind) &&
+ (bin->fRight->fType.kind() != Type::kVector_Kind))) {
+ break;
+ }
+ switch (bin->fOperator) {
+ case Token::STAR:
+ if (is_constant(*bin->fLeft, 1)) {
+ if (bin->fLeft->fType.kind() == Type::kVector_Kind &&
+ bin->fRight->fType.kind() == Type::kScalar_Kind) {
+ // float4(1) * x -> float4(x)
+ vectorize_right(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // 1 * x -> x
+ // 1 * float4(x) -> float4(x)
+ // float4(1) * float4(x) -> float4(x)
+ delete_left(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ else if (is_constant(*bin->fLeft, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind &&
+ !bin->fRight->hasSideEffects()) {
+ // 0 * float4(x) -> float4(0)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // 0 * x -> 0
+ // float4(0) * x -> float4(0)
+ // float4(0) * float4(x) -> float4(0)
+ if (!bin->fRight->hasSideEffects()) {
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ }
+ else if (is_constant(*bin->fRight, 1)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind) {
+ // x * float4(1) -> float4(x)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // x * 1 -> x
+ // float4(x) * 1 -> float4(x)
+ // float4(x) * float4(1) -> float4(x)
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ else if (is_constant(*bin->fRight, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kVector_Kind &&
+ bin->fRight->fType.kind() == Type::kScalar_Kind &&
+ !bin->fLeft->hasSideEffects()) {
+ // float4(x) * 0 -> float4(0)
+ vectorize_right(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // x * 0 -> 0
+ // x * float4(0) -> float4(0)
+ // float4(x) * float4(0) -> float4(0)
+ if (!bin->fLeft->hasSideEffects()) {
+ delete_left(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ }
+ break;
+ case Token::PLUS:
+ if (is_constant(*bin->fLeft, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kVector_Kind &&
+ bin->fRight->fType.kind() == Type::kScalar_Kind) {
+ // float4(0) + x -> float4(x)
+ vectorize_right(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // 0 + x -> x
+ // 0 + float4(x) -> float4(x)
+ // float4(0) + float4(x) -> float4(x)
+ delete_left(&b, iter, outUpdated, outNeedsRescan);
+ }
+ } else if (is_constant(*bin->fRight, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind) {
+ // x + float4(0) -> float4(x)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // x + 0 -> x
+ // float4(x) + 0 -> float4(x)
+ // float4(x) + float4(0) -> float4(x)
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ break;
+ case Token::MINUS:
+ if (is_constant(*bin->fRight, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind) {
+ // x - float4(0) -> float4(x)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // x - 0 -> x
+ // float4(x) - 0 -> float4(x)
+ // float4(x) - float4(0) -> float4(x)
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ break;
+ case Token::SLASH:
+ if (is_constant(*bin->fRight, 1)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind) {
+ // x / float4(1) -> float4(x)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // x / 1 -> x
+ // float4(x) / 1 -> float4(x)
+ // float4(x) / float4(1) -> float4(x)
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ } else if (is_constant(*bin->fLeft, 0)) {
+ if (bin->fLeft->fType.kind() == Type::kScalar_Kind &&
+ bin->fRight->fType.kind() == Type::kVector_Kind &&
+ !bin->fRight->hasSideEffects()) {
+ // 0 / float4(x) -> float4(0)
+ vectorize_left(&b, iter, outUpdated, outNeedsRescan);
+ } else {
+ // 0 / x -> 0
+ // float4(0) / x -> float4(0)
+ // float4(0) / float4(x) -> float4(0)
+ if (!bin->fRight->hasSideEffects()) {
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ }
+ }
+ break;
+ case Token::PLUSEQ:
+ if (is_constant(*bin->fRight, 0)) {
+ clear_write(*bin->fLeft);
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ break;
+ case Token::MINUSEQ:
+ if (is_constant(*bin->fRight, 0)) {
+ clear_write(*bin->fLeft);
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ break;
+ case Token::STAREQ:
+ if (is_constant(*bin->fRight, 1)) {
+ clear_write(*bin->fLeft);
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ break;
+ case Token::SLASHEQ:
+ if (is_constant(*bin->fRight, 1)) {
+ clear_write(*bin->fLeft);
+ delete_right(&b, iter, outUpdated, outNeedsRescan);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case Expression::kSwizzle_Kind: {
+ Swizzle& s = (Swizzle&) *expr;
+ // detect identity swizzles like foo.rgba
+ if ((int) s.fComponents.size() == s.fBase->fType.columns()) {
+ bool identity = true;
+ for (int i = 0; i < (int) s.fComponents.size(); ++i) {
+ if (s.fComponents[i] != i) {
+ identity = false;
+ break;
+ }
+ }
+ if (identity) {
+ *outUpdated = true;
+ if (!try_replace_expression(&b, iter, &s.fBase)) {
+ *outNeedsRescan = true;
+ return;
+ }
+ SkASSERT((*iter)->fKind == BasicBlock::Node::kExpression_Kind);
+ break;
+ }
+ }
+ // detect swizzles of swizzles, e.g. replace foo.argb.r000 with foo.a000
+ if (s.fBase->fKind == Expression::kSwizzle_Kind) {
+ Swizzle& base = (Swizzle&) *s.fBase;
+ std::vector<int> final;
+ for (int c : s.fComponents) {
+ if (c == SKSL_SWIZZLE_0 || c == SKSL_SWIZZLE_1) {
+ final.push_back(c);
+ } else {
+ final.push_back(base.fComponents[c]);
+ }
+ }
+ *outUpdated = true;
+ std::unique_ptr<Expression> replacement(new Swizzle(*fContext, base.fBase->clone(),
+ std::move(final)));
+ if (!try_replace_expression(&b, iter, &replacement)) {
+ *outNeedsRescan = true;
+ return;
+ }
+ SkASSERT((*iter)->fKind == BasicBlock::Node::kExpression_Kind);
+ break;
+ }
+ }
+ default:
+ break;
+ }
+}
+
+// returns true if this statement could potentially execute a break at the current level (we ignore
+// nested loops and switches, since any breaks inside of them will merely break the loop / switch)
+static bool contains_conditional_break(Statement& s, bool inConditional) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ for (const auto& sub : ((Block&) s).fStatements) {
+ if (contains_conditional_break(*sub, inConditional)) {
+ return true;
+ }
+ }
+ return false;
+ case Statement::kBreak_Kind:
+ return inConditional;
+ case Statement::kIf_Kind: {
+ const IfStatement& i = (IfStatement&) s;
+ return contains_conditional_break(*i.fIfTrue, true) ||
+ (i.fIfFalse && contains_conditional_break(*i.fIfFalse, true));
+ }
+ default:
+ return false;
+ }
+}
+
+// returns true if this statement definitely executes a break at the current level (we ignore
+// nested loops and switches, since any breaks inside of them will merely break the loop / switch)
+static bool contains_unconditional_break(Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ for (const auto& sub : ((Block&) s).fStatements) {
+ if (contains_unconditional_break(*sub)) {
+ return true;
+ }
+ }
+ return false;
+ case Statement::kBreak_Kind:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns a block containing all of the statements that will be run if the given case matches
+// (which, owing to the statements being owned by unique_ptrs, means the switch itself will be
+// broken by this call and must then be discarded).
+// Returns null (and leaves the switch unmodified) if no such simple reduction is possible, such as
+// when break statements appear inside conditionals.
+static std::unique_ptr<Statement> block_for_case(SwitchStatement* s, SwitchCase* c) {
+ bool capturing = false;
+ std::vector<std::unique_ptr<Statement>*> statementPtrs;
+ for (const auto& current : s->fCases) {
+ if (current.get() == c) {
+ capturing = true;
+ }
+ if (capturing) {
+ for (auto& stmt : current->fStatements) {
+ if (contains_conditional_break(*stmt, s->fKind == Statement::kIf_Kind)) {
+ return nullptr;
+ }
+ if (contains_unconditional_break(*stmt)) {
+ capturing = false;
+ break;
+ }
+ statementPtrs.push_back(&stmt);
+ }
+ if (!capturing) {
+ break;
+ }
+ }
+ }
+ std::vector<std::unique_ptr<Statement>> statements;
+ for (const auto& s : statementPtrs) {
+ statements.push_back(std::move(*s));
+ }
+ return std::unique_ptr<Statement>(new Block(-1, std::move(statements), s->fSymbols));
+}
+
+void Compiler::simplifyStatement(DefinitionMap& definitions,
+ BasicBlock& b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ std::unordered_set<const Variable*>* undefinedVariables,
+ bool* outUpdated,
+ bool* outNeedsRescan) {
+ Statement* stmt = (*iter)->statement()->get();
+ switch (stmt->fKind) {
+ case Statement::kVarDeclaration_Kind: {
+ const auto& varDecl = (VarDeclaration&) *stmt;
+ if (varDecl.fVar->dead() &&
+ (!varDecl.fValue ||
+ !varDecl.fValue->hasSideEffects())) {
+ if (varDecl.fValue) {
+ SkASSERT((*iter)->statement()->get() == stmt);
+ if (!b.tryRemoveExpressionBefore(iter, varDecl.fValue.get())) {
+ *outNeedsRescan = true;
+ }
+ }
+ (*iter)->setStatement(std::unique_ptr<Statement>(new Nop()));
+ *outUpdated = true;
+ }
+ break;
+ }
+ case Statement::kIf_Kind: {
+ IfStatement& i = (IfStatement&) *stmt;
+ if (i.fTest->fKind == Expression::kBoolLiteral_Kind) {
+ // constant if, collapse down to a single branch
+ if (((BoolLiteral&) *i.fTest).fValue) {
+ SkASSERT(i.fIfTrue);
+ (*iter)->setStatement(std::move(i.fIfTrue));
+ } else {
+ if (i.fIfFalse) {
+ (*iter)->setStatement(std::move(i.fIfFalse));
+ } else {
+ (*iter)->setStatement(std::unique_ptr<Statement>(new Nop()));
+ }
+ }
+ *outUpdated = true;
+ *outNeedsRescan = true;
+ break;
+ }
+ if (i.fIfFalse && i.fIfFalse->isEmpty()) {
+ // else block doesn't do anything, remove it
+ i.fIfFalse.reset();
+ *outUpdated = true;
+ *outNeedsRescan = true;
+ }
+ if (!i.fIfFalse && i.fIfTrue->isEmpty()) {
+ // if block doesn't do anything, no else block
+ if (i.fTest->hasSideEffects()) {
+ // test has side effects, keep it
+ (*iter)->setStatement(std::unique_ptr<Statement>(
+ new ExpressionStatement(std::move(i.fTest))));
+ } else {
+ // no if, no else, no test side effects, kill the whole if
+ // statement
+ (*iter)->setStatement(std::unique_ptr<Statement>(new Nop()));
+ }
+ *outUpdated = true;
+ *outNeedsRescan = true;
+ }
+ break;
+ }
+ case Statement::kSwitch_Kind: {
+ SwitchStatement& s = (SwitchStatement&) *stmt;
+ if (s.fValue->isConstant()) {
+ // switch is constant, replace it with the case that matches
+ bool found = false;
+ SwitchCase* defaultCase = nullptr;
+ for (const auto& c : s.fCases) {
+ if (!c->fValue) {
+ defaultCase = c.get();
+ continue;
+ }
+ SkASSERT(c->fValue->fKind == s.fValue->fKind);
+ found = c->fValue->compareConstant(*fContext, *s.fValue);
+ if (found) {
+ std::unique_ptr<Statement> newBlock = block_for_case(&s, c.get());
+ if (newBlock) {
+ (*iter)->setStatement(std::move(newBlock));
+ break;
+ } else {
+ if (s.fIsStatic && !(fFlags & kPermitInvalidStaticTests_Flag)) {
+ this->error(s.fOffset,
+ "static switch contains non-static conditional break");
+ s.fIsStatic = false;
+ }
+ return; // can't simplify
+ }
+ }
+ }
+ if (!found) {
+ // no matching case. use default if it exists, or kill the whole thing
+ if (defaultCase) {
+ std::unique_ptr<Statement> newBlock = block_for_case(&s, defaultCase);
+ if (newBlock) {
+ (*iter)->setStatement(std::move(newBlock));
+ } else {
+ if (s.fIsStatic && !(fFlags & kPermitInvalidStaticTests_Flag)) {
+ this->error(s.fOffset,
+ "static switch contains non-static conditional break");
+ s.fIsStatic = false;
+ }
+ return; // can't simplify
+ }
+ } else {
+ (*iter)->setStatement(std::unique_ptr<Statement>(new Nop()));
+ }
+ }
+ *outUpdated = true;
+ *outNeedsRescan = true;
+ }
+ break;
+ }
+ case Statement::kExpression_Kind: {
+ ExpressionStatement& e = (ExpressionStatement&) *stmt;
+ SkASSERT((*iter)->statement()->get() == &e);
+ if (!e.fExpression->hasSideEffects()) {
+ // Expression statement with no side effects, kill it
+ if (!b.tryRemoveExpressionBefore(iter, e.fExpression.get())) {
+ *outNeedsRescan = true;
+ }
+ SkASSERT((*iter)->statement()->get() == stmt);
+ (*iter)->setStatement(std::unique_ptr<Statement>(new Nop()));
+ *outUpdated = true;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void Compiler::scanCFG(FunctionDefinition& f) {
+ CFG cfg = CFGGenerator().getCFG(f);
+ this->computeDataFlow(&cfg);
+
+ // check for unreachable code
+ for (size_t i = 0; i < cfg.fBlocks.size(); i++) {
+ if (i != cfg.fStart && !cfg.fBlocks[i].fEntrances.size() &&
+ cfg.fBlocks[i].fNodes.size()) {
+ int offset;
+ switch (cfg.fBlocks[i].fNodes[0].fKind) {
+ case BasicBlock::Node::kStatement_Kind:
+ offset = (*cfg.fBlocks[i].fNodes[0].statement())->fOffset;
+ break;
+ case BasicBlock::Node::kExpression_Kind:
+ offset = (*cfg.fBlocks[i].fNodes[0].expression())->fOffset;
+ break;
+ }
+ this->error(offset, String("unreachable"));
+ }
+ }
+ if (fErrorCount) {
+ return;
+ }
+
+ // check for dead code & undefined variables, perform constant propagation
+ std::unordered_set<const Variable*> undefinedVariables;
+ bool updated;
+ bool needsRescan = false;
+ do {
+ if (needsRescan) {
+ cfg = CFGGenerator().getCFG(f);
+ this->computeDataFlow(&cfg);
+ needsRescan = false;
+ }
+
+ updated = false;
+ for (BasicBlock& b : cfg.fBlocks) {
+ DefinitionMap definitions = b.fBefore;
+
+ for (auto iter = b.fNodes.begin(); iter != b.fNodes.end() && !needsRescan; ++iter) {
+ if (iter->fKind == BasicBlock::Node::kExpression_Kind) {
+ this->simplifyExpression(definitions, b, &iter, &undefinedVariables, &updated,
+ &needsRescan);
+ } else {
+ this->simplifyStatement(definitions, b, &iter, &undefinedVariables, &updated,
+ &needsRescan);
+ }
+ if (needsRescan) {
+ break;
+ }
+ this->addDefinitions(*iter, &definitions);
+ }
+ }
+ } while (updated);
+ SkASSERT(!needsRescan);
+
+ // verify static ifs & switches, clean up dead variable decls
+ for (BasicBlock& b : cfg.fBlocks) {
+ DefinitionMap definitions = b.fBefore;
+
+ for (auto iter = b.fNodes.begin(); iter != b.fNodes.end() && !needsRescan;) {
+ if (iter->fKind == BasicBlock::Node::kStatement_Kind) {
+ const Statement& s = **iter->statement();
+ switch (s.fKind) {
+ case Statement::kIf_Kind:
+ if (((const IfStatement&) s).fIsStatic &&
+ !(fFlags & kPermitInvalidStaticTests_Flag)) {
+ this->error(s.fOffset, "static if has non-static test");
+ }
+ ++iter;
+ break;
+ case Statement::kSwitch_Kind:
+ if (((const SwitchStatement&) s).fIsStatic &&
+ !(fFlags & kPermitInvalidStaticTests_Flag)) {
+ this->error(s.fOffset, "static switch has non-static test");
+ }
+ ++iter;
+ break;
+ case Statement::kVarDeclarations_Kind: {
+ VarDeclarations& decls = *((VarDeclarationsStatement&) s).fDeclaration;
+ for (auto varIter = decls.fVars.begin(); varIter != decls.fVars.end();) {
+ if ((*varIter)->fKind == Statement::kNop_Kind) {
+ varIter = decls.fVars.erase(varIter);
+ } else {
+ ++varIter;
+ }
+ }
+ if (!decls.fVars.size()) {
+ iter = b.fNodes.erase(iter);
+ } else {
+ ++iter;
+ }
+ break;
+ }
+ default:
+ ++iter;
+ break;
+ }
+ } else {
+ ++iter;
+ }
+ }
+ }
+
+ // check for missing return
+ if (f.fDeclaration.fReturnType != *fContext->fVoid_Type) {
+ if (cfg.fBlocks[cfg.fExit].fEntrances.size()) {
+ this->error(f.fOffset, String("function can exit without returning a value"));
+ }
+ }
+}
+
+void Compiler::registerExternalValue(ExternalValue* value) {
+ fIRGenerator->fRootSymbolTable->addWithoutOwnership(value->fName, value);
+}
+
+Symbol* Compiler::takeOwnership(std::unique_ptr<Symbol> symbol) {
+ return fIRGenerator->fRootSymbolTable->takeOwnership(std::move(symbol));
+}
+
+std::unique_ptr<Program> Compiler::convertProgram(Program::Kind kind, String text,
+ const Program::Settings& settings) {
+ fErrorText = "";
+ fErrorCount = 0;
+ std::vector<std::unique_ptr<ProgramElement>>* inherited;
+ std::vector<std::unique_ptr<ProgramElement>> elements;
+ switch (kind) {
+ case Program::kVertex_Kind:
+ inherited = &fVertexInclude;
+ fIRGenerator->fSymbolTable = fVertexSymbolTable;
+ fIRGenerator->start(&settings, inherited);
+ break;
+ case Program::kFragment_Kind:
+ inherited = &fFragmentInclude;
+ fIRGenerator->fSymbolTable = fFragmentSymbolTable;
+ fIRGenerator->start(&settings, inherited);
+ break;
+ case Program::kGeometry_Kind:
+ inherited = &fGeometryInclude;
+ fIRGenerator->fSymbolTable = fGeometrySymbolTable;
+ fIRGenerator->start(&settings, inherited);
+ break;
+ case Program::kFragmentProcessor_Kind:
+ inherited = nullptr;
+ fIRGenerator->fSymbolTable = fGpuSymbolTable;
+ fIRGenerator->start(&settings, nullptr);
+ fIRGenerator->convertProgram(kind, SKSL_FP_INCLUDE, strlen(SKSL_FP_INCLUDE), *fTypes,
+ &elements);
+ fIRGenerator->fSymbolTable->markAllFunctionsBuiltin();
+ break;
+ case Program::kPipelineStage_Kind:
+ inherited = &fPipelineInclude;
+ fIRGenerator->fSymbolTable = fPipelineSymbolTable;
+ fIRGenerator->start(&settings, inherited);
+ break;
+ case Program::kGeneric_Kind:
+ inherited = &fInterpreterInclude;
+ fIRGenerator->fSymbolTable = fInterpreterSymbolTable;
+ fIRGenerator->start(&settings, inherited);
+ break;
+ }
+ for (auto& element : elements) {
+ if (element->fKind == ProgramElement::kEnum_Kind) {
+ ((Enum&) *element).fBuiltin = true;
+ }
+ }
+ std::unique_ptr<String> textPtr(new String(std::move(text)));
+ fSource = textPtr.get();
+ fIRGenerator->convertProgram(kind, textPtr->c_str(), textPtr->size(), *fTypes, &elements);
+ auto result = std::unique_ptr<Program>(new Program(kind,
+ std::move(textPtr),
+ settings,
+ fContext,
+ inherited,
+ std::move(elements),
+ fIRGenerator->fSymbolTable,
+ fIRGenerator->fInputs));
+ if (fErrorCount) {
+ return nullptr;
+ }
+ return result;
+}
+
+bool Compiler::optimize(Program& program) {
+ SkASSERT(!fErrorCount);
+ if (!program.fIsOptimized) {
+ program.fIsOptimized = true;
+ fIRGenerator->fKind = program.fKind;
+ fIRGenerator->fSettings = &program.fSettings;
+ for (auto& element : program) {
+ if (element.fKind == ProgramElement::kFunction_Kind) {
+ this->scanCFG((FunctionDefinition&) element);
+ }
+ }
+ if (program.fKind != Program::kFragmentProcessor_Kind) {
+ for (auto iter = program.fElements.begin(); iter != program.fElements.end();) {
+ if ((*iter)->fKind == ProgramElement::kVar_Kind) {
+ VarDeclarations& vars = (VarDeclarations&) **iter;
+ for (auto varIter = vars.fVars.begin(); varIter != vars.fVars.end();) {
+ const Variable& var = *((VarDeclaration&) **varIter).fVar;
+ if (var.dead()) {
+ varIter = vars.fVars.erase(varIter);
+ } else {
+ ++varIter;
+ }
+ }
+ if (vars.fVars.size() == 0) {
+ iter = program.fElements.erase(iter);
+ continue;
+ }
+ }
+ ++iter;
+ }
+ }
+ }
+ return fErrorCount == 0;
+}
+
+std::unique_ptr<Program> Compiler::specialize(
+ Program& program,
+ const std::unordered_map<SkSL::String, SkSL::Program::Settings::Value>& inputs) {
+ std::vector<std::unique_ptr<ProgramElement>> elements;
+ for (const auto& e : program) {
+ elements.push_back(e.clone());
+ }
+ Program::Settings settings;
+ settings.fCaps = program.fSettings.fCaps;
+ for (auto iter = inputs.begin(); iter != inputs.end(); ++iter) {
+ settings.fArgs.insert(*iter);
+ }
+ std::unique_ptr<Program> result(new Program(program.fKind,
+ nullptr,
+ settings,
+ program.fContext,
+ program.fInheritedElements,
+ std::move(elements),
+ program.fSymbols,
+ program.fInputs));
+ return result;
+}
+
+#if defined(SKSL_STANDALONE) || SK_SUPPORT_GPU
+
+bool Compiler::toSPIRV(Program& program, OutputStream& out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+#ifdef SK_ENABLE_SPIRV_VALIDATION
+ StringStream buffer;
+ fSource = program.fSource.get();
+ SPIRVCodeGenerator cg(fContext.get(), &program, this, &buffer);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+ if (result) {
+ spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_0);
+ const String& data = buffer.str();
+ SkASSERT(0 == data.size() % 4);
+ auto dumpmsg = [](spv_message_level_t, const char*, const spv_position_t&, const char* m) {
+ SkDebugf("SPIR-V validation error: %s\n", m);
+ };
+ tools.SetMessageConsumer(dumpmsg);
+ // Verify that the SPIR-V we produced is valid. If this SkASSERT fails, check the logs prior
+ // to the failure to see the validation errors.
+ SkAssertResult(tools.Validate((const uint32_t*) data.c_str(), data.size() / 4));
+ out.write(data.c_str(), data.size());
+ }
+#else
+ fSource = program.fSource.get();
+ SPIRVCodeGenerator cg(fContext.get(), &program, this, &out);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+#endif
+ return result;
+}
+
+bool Compiler::toSPIRV(Program& program, String* out) {
+ StringStream buffer;
+ bool result = this->toSPIRV(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toGLSL(Program& program, OutputStream& out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+ fSource = program.fSource.get();
+ GLSLCodeGenerator cg(fContext.get(), &program, this, &out);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+ return result;
+}
+
+bool Compiler::toGLSL(Program& program, String* out) {
+ StringStream buffer;
+ bool result = this->toGLSL(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toMetal(Program& program, OutputStream& out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+ MetalCodeGenerator cg(fContext.get(), &program, this, &out);
+ bool result = cg.generateCode();
+ return result;
+}
+
+bool Compiler::toMetal(Program& program, String* out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+ StringStream buffer;
+ bool result = this->toMetal(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toCPP(Program& program, String name, OutputStream& out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+ fSource = program.fSource.get();
+ CPPCodeGenerator cg(fContext.get(), &program, this, name, &out);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+ return result;
+}
+
+bool Compiler::toH(Program& program, String name, OutputStream& out) {
+ if (!this->optimize(program)) {
+ return false;
+ }
+ fSource = program.fSource.get();
+ HCodeGenerator cg(fContext.get(), &program, this, name, &out);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+ return result;
+}
+
+#endif
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+bool Compiler::toPipelineStage(const Program& program, String* out,
+ std::vector<FormatArg>* outFormatArgs,
+ std::vector<GLSLFunction>* outFunctions) {
+ SkASSERT(program.fIsOptimized);
+ fSource = program.fSource.get();
+ StringStream buffer;
+ PipelineStageCodeGenerator cg(fContext.get(), &program, this, &buffer, outFormatArgs,
+ outFunctions);
+ bool result = cg.generateCode();
+ fSource = nullptr;
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+#endif
+
+std::unique_ptr<ByteCode> Compiler::toByteCode(Program& program) {
+#if defined(SK_ENABLE_SKSL_INTERPRETER)
+ if (!this->optimize(program)) {
+ return nullptr;
+ }
+ std::unique_ptr<ByteCode> result(new ByteCode());
+ ByteCodeGenerator cg(fContext.get(), &program, this, result.get());
+ if (cg.generateCode()) {
+ return result;
+ }
+#else
+ ABORT("ByteCode interpreter not enabled");
+#endif
+ return nullptr;
+}
+
+const char* Compiler::OperatorName(Token::Kind kind) {
+ switch (kind) {
+ case Token::PLUS: return "+";
+ case Token::MINUS: return "-";
+ case Token::STAR: return "*";
+ case Token::SLASH: return "/";
+ case Token::PERCENT: return "%";
+ case Token::SHL: return "<<";
+ case Token::SHR: return ">>";
+ case Token::LOGICALNOT: return "!";
+ case Token::LOGICALAND: return "&&";
+ case Token::LOGICALOR: return "||";
+ case Token::LOGICALXOR: return "^^";
+ case Token::BITWISENOT: return "~";
+ case Token::BITWISEAND: return "&";
+ case Token::BITWISEOR: return "|";
+ case Token::BITWISEXOR: return "^";
+ case Token::EQ: return "=";
+ case Token::EQEQ: return "==";
+ case Token::NEQ: return "!=";
+ case Token::LT: return "<";
+ case Token::GT: return ">";
+ case Token::LTEQ: return "<=";
+ case Token::GTEQ: return ">=";
+ case Token::PLUSEQ: return "+=";
+ case Token::MINUSEQ: return "-=";
+ case Token::STAREQ: return "*=";
+ case Token::SLASHEQ: return "/=";
+ case Token::PERCENTEQ: return "%=";
+ case Token::SHLEQ: return "<<=";
+ case Token::SHREQ: return ">>=";
+ case Token::LOGICALANDEQ: return "&&=";
+ case Token::LOGICALOREQ: return "||=";
+ case Token::LOGICALXOREQ: return "^^=";
+ case Token::BITWISEANDEQ: return "&=";
+ case Token::BITWISEOREQ: return "|=";
+ case Token::BITWISEXOREQ: return "^=";
+ case Token::PLUSPLUS: return "++";
+ case Token::MINUSMINUS: return "--";
+ case Token::COMMA: return ",";
+ default:
+ ABORT("unsupported operator: %d\n", kind);
+ }
+}
+
+
+bool Compiler::IsAssignment(Token::Kind op) {
+ switch (op) {
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALANDEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Position Compiler::position(int offset) {
+ SkASSERT(fSource);
+ int line = 1;
+ int column = 1;
+ for (int i = 0; i < offset; i++) {
+ if ((*fSource)[i] == '\n') {
+ ++line;
+ column = 1;
+ }
+ else {
+ ++column;
+ }
+ }
+ return Position(line, column);
+}
+
+void Compiler::error(int offset, String msg) {
+ fErrorCount++;
+ Position pos = this->position(offset);
+ fErrorText += "error: " + to_string(pos.fLine) + ": " + msg.c_str() + "\n";
+}
+
+String Compiler::errorText() {
+ this->writeErrorCount();
+ fErrorCount = 0;
+ String result = fErrorText;
+ return result;
+}
+
+void Compiler::writeErrorCount() {
+ if (fErrorCount) {
+ fErrorText += to_string(fErrorCount) + " error";
+ if (fErrorCount > 1) {
+ fErrorText += "s";
+ }
+ fErrorText += "\n";
+ }
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.h b/gfx/skia/skia/src/sksl/SkSLCompiler.h
new file mode 100644
index 0000000000..7ef368d58b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_COMPILER
+#define SKSL_COMPILER
+
+#include <set>
+#include <unordered_set>
+#include <vector>
+#include "src/sksl/SkSLCFGGenerator.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+#include "src/gpu/GrShaderVar.h"
+#endif
+
+#define SK_FRAGCOLOR_BUILTIN 10001
+#define SK_IN_BUILTIN 10002
+#define SK_INCOLOR_BUILTIN 10003
+#define SK_OUTCOLOR_BUILTIN 10004
+#define SK_TRANSFORMEDCOORDS2D_BUILTIN 10005
+#define SK_TEXTURESAMPLERS_BUILTIN 10006
+#define SK_OUT_BUILTIN 10007
+#define SK_LASTFRAGCOLOR_BUILTIN 10008
+#define SK_MAIN_X_BUILTIN 10009
+#define SK_MAIN_Y_BUILTIN 10010
+#define SK_WIDTH_BUILTIN 10011
+#define SK_HEIGHT_BUILTIN 10012
+#define SK_FRAGCOORD_BUILTIN 15
+#define SK_CLOCKWISE_BUILTIN 17
+#define SK_VERTEXID_BUILTIN 42
+#define SK_INSTANCEID_BUILTIN 43
+#define SK_CLIPDISTANCE_BUILTIN 3
+#define SK_INVOCATIONID_BUILTIN 8
+#define SK_POSITION_BUILTIN 0
+
+namespace SkSL {
+
+class ByteCode;
+class ExternalValue;
+class IRGenerator;
+
+/**
+ * Main compiler entry point. This is a traditional compiler design which first parses the .sksl
+ * file into an abstract syntax tree (a tree of ASTNodes), then performs semantic analysis to
+ * produce a Program (a tree of IRNodes), then feeds the Program into a CodeGenerator to produce
+ * compiled output.
+ *
+ * See the README for information about SkSL.
+ */
+class SK_API Compiler : public ErrorReporter {
+public:
+ static constexpr const char* RTADJUST_NAME = "sk_RTAdjust";
+ static constexpr const char* PERVERTEX_NAME = "sk_PerVertex";
+
+ enum Flags {
+ kNone_Flags = 0,
+ // permits static if/switch statements to be used with non-constant tests. This is used when
+ // producing H and CPP code; the static tests don't have to have constant values *yet*, but
+ // the generated code will contain a static test which then does have to be a constant.
+ kPermitInvalidStaticTests_Flag = 1,
+ };
+
+ struct FormatArg {
+ enum class Kind {
+ kInput,
+ kOutput,
+ kCoordX,
+ kCoordY,
+ kUniform,
+ kChildProcessor,
+ kFunctionName
+ };
+
+ FormatArg(Kind kind)
+ : fKind(kind) {}
+
+ FormatArg(Kind kind, int index)
+ : fKind(kind)
+ , fIndex(index) {}
+
+ Kind fKind;
+
+ int fIndex;
+ };
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+ /**
+ * Represents the arguments to GrGLSLShaderBuilder::emitFunction.
+ */
+ struct GLSLFunction {
+ GrSLType fReturnType;
+ SkString fName;
+ std::vector<GrShaderVar> fParameters;
+ SkString fBody;
+ };
+#endif
+
+ Compiler(Flags flags = kNone_Flags);
+
+ ~Compiler() override;
+
+ Compiler(const Compiler&) = delete;
+ Compiler& operator=(const Compiler&) = delete;
+
+ /**
+ * Registers an ExternalValue as a top-level symbol which is visible in the global namespace.
+ */
+ void registerExternalValue(ExternalValue* value);
+
+ std::unique_ptr<Program> convertProgram(Program::Kind kind, String text,
+ const Program::Settings& settings);
+
+ bool optimize(Program& program);
+
+ std::unique_ptr<Program> specialize(Program& program,
+ const std::unordered_map<SkSL::String, SkSL::Program::Settings::Value>& inputs);
+
+ bool toSPIRV(Program& program, OutputStream& out);
+
+ bool toSPIRV(Program& program, String* out);
+
+ bool toGLSL(Program& program, OutputStream& out);
+
+ bool toGLSL(Program& program, String* out);
+
+ bool toMetal(Program& program, OutputStream& out);
+
+ bool toMetal(Program& program, String* out);
+
+ bool toCPP(Program& program, String name, OutputStream& out);
+
+ bool toH(Program& program, String name, OutputStream& out);
+
+ std::unique_ptr<ByteCode> toByteCode(Program& program);
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+ bool toPipelineStage(const Program& program, String* out,
+ std::vector<FormatArg>* outFormatArgs,
+ std::vector<GLSLFunction>* outFunctions);
+#endif
+
+ /**
+ * Takes ownership of the given symbol. It will be destroyed when the compiler is destroyed.
+ */
+ Symbol* takeOwnership(std::unique_ptr<Symbol> symbol);
+
+ void error(int offset, String msg) override;
+
+ String errorText();
+
+ void writeErrorCount();
+
+ int errorCount() override {
+ return fErrorCount;
+ }
+
+ Context& context() {
+ return *fContext;
+ }
+
+ static const char* OperatorName(Token::Kind token);
+
+ static bool IsAssignment(Token::Kind token);
+
+private:
+ void processIncludeFile(Program::Kind kind, const char* src, size_t length,
+ std::shared_ptr<SymbolTable> base,
+ std::vector<std::unique_ptr<ProgramElement>>* outElements,
+ std::shared_ptr<SymbolTable>* outSymbolTable);
+
+ void addDefinition(const Expression* lvalue, std::unique_ptr<Expression>* expr,
+ DefinitionMap* definitions);
+
+ void addDefinitions(const BasicBlock::Node& node, DefinitionMap* definitions);
+
+ void scanCFG(CFG* cfg, BlockId block, std::set<BlockId>* workList);
+
+ void computeDataFlow(CFG* cfg);
+
+ /**
+ * Simplifies the expression pointed to by iter (in both the IR and CFG structures), if
+ * possible.
+ */
+ void simplifyExpression(DefinitionMap& definitions,
+ BasicBlock& b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ std::unordered_set<const Variable*>* undefinedVariables,
+ bool* outUpdated,
+ bool* outNeedsRescan);
+
+ /**
+ * Simplifies the statement pointed to by iter (in both the IR and CFG structures), if
+ * possible.
+ */
+ void simplifyStatement(DefinitionMap& definitions,
+ BasicBlock& b,
+ std::vector<BasicBlock::Node>::iterator* iter,
+ std::unordered_set<const Variable*>* undefinedVariables,
+ bool* outUpdated,
+ bool* outNeedsRescan);
+
+ void scanCFG(FunctionDefinition& f);
+
+ Position position(int offset);
+
+ std::shared_ptr<SymbolTable> fGpuSymbolTable;
+ std::vector<std::unique_ptr<ProgramElement>> fVertexInclude;
+ std::shared_ptr<SymbolTable> fVertexSymbolTable;
+ std::vector<std::unique_ptr<ProgramElement>> fFragmentInclude;
+ std::shared_ptr<SymbolTable> fFragmentSymbolTable;
+ std::vector<std::unique_ptr<ProgramElement>> fGeometryInclude;
+ std::shared_ptr<SymbolTable> fGeometrySymbolTable;
+ std::vector<std::unique_ptr<ProgramElement>> fPipelineInclude;
+ std::shared_ptr<SymbolTable> fPipelineSymbolTable;
+ std::vector<std::unique_ptr<ProgramElement>> fInterpreterInclude;
+ std::shared_ptr<SymbolTable> fInterpreterSymbolTable;
+
+ std::shared_ptr<SymbolTable> fTypes;
+ IRGenerator* fIRGenerator;
+ int fFlags;
+
+ const String* fSource;
+ std::shared_ptr<Context> fContext;
+ int fErrorCount;
+ String fErrorText;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLContext.h b/gfx/skia/skia/src/sksl/SkSLContext.h
new file mode 100644
index 0000000000..acfafc587d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLContext.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTEXT
+#define SKSL_CONTEXT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Contains compiler-wide objects, which currently means the core types.
+ */
+class Context {
+public:
+ Context()
+ : fInvalid_Type(new Type("<INVALID>"))
+ , fVoid_Type(new Type("void"))
+ , fNull_Type(new Type("null"))
+ , fFloatLiteral_Type(new Type("$floatLiteral", Type::kFloat_NumberKind, 3))
+ , fIntLiteral_Type(new Type("$intLiteral", Type::kSigned_NumberKind, 1))
+ , fDouble_Type(new Type("double", Type::kFloat_NumberKind, 6, true))
+ , fDouble2_Type(new Type("double2", *fDouble_Type, 2))
+ , fDouble3_Type(new Type("double3", *fDouble_Type, 3))
+ , fDouble4_Type(new Type("double4", *fDouble_Type, 4))
+ , fFloat_Type(new Type("float", Type::kFloat_NumberKind, 5, true))
+ , fFloat2_Type(new Type("float2", *fFloat_Type, 2))
+ , fFloat3_Type(new Type("float3", *fFloat_Type, 3))
+ , fFloat4_Type(new Type("float4", *fFloat_Type, 4))
+ , fHalf_Type(new Type("half", Type::kFloat_NumberKind, 4))
+ , fHalf2_Type(new Type("half2", *fHalf_Type, 2))
+ , fHalf3_Type(new Type("half3", *fHalf_Type, 3))
+ , fHalf4_Type(new Type("half4", *fHalf_Type, 4))
+ , fUInt_Type(new Type("uint", Type::kUnsigned_NumberKind, 2, true))
+ , fUInt2_Type(new Type("uint2", *fUInt_Type, 2))
+ , fUInt3_Type(new Type("uint3", *fUInt_Type, 3))
+ , fUInt4_Type(new Type("uint4", *fUInt_Type, 4))
+ , fInt_Type(new Type("int", Type::kSigned_NumberKind, 2, true))
+ , fInt2_Type(new Type("int2", *fInt_Type, 2))
+ , fInt3_Type(new Type("int3", *fInt_Type, 3))
+ , fInt4_Type(new Type("int4", *fInt_Type, 4))
+ , fUShort_Type(new Type("ushort", Type::kUnsigned_NumberKind, 0))
+ , fUShort2_Type(new Type("ushort2", *fUShort_Type, 2))
+ , fUShort3_Type(new Type("ushort3", *fUShort_Type, 3))
+ , fUShort4_Type(new Type("ushort4", *fUShort_Type, 4))
+ , fShort_Type(new Type("short", Type::kSigned_NumberKind, 0))
+ , fShort2_Type(new Type("short2", *fShort_Type, 2))
+ , fShort3_Type(new Type("short3", *fShort_Type, 3))
+ , fShort4_Type(new Type("short4", *fShort_Type, 4))
+ , fUByte_Type(new Type("ubyte", Type::kUnsigned_NumberKind, 0))
+ , fUByte2_Type(new Type("ubyte2", *fUByte_Type, 2))
+ , fUByte3_Type(new Type("ubyte3", *fUByte_Type, 3))
+ , fUByte4_Type(new Type("ubyte4", *fUByte_Type, 4))
+ , fByte_Type(new Type("byte", Type::kSigned_NumberKind, 0))
+ , fByte2_Type(new Type("byte2", *fByte_Type, 2))
+ , fByte3_Type(new Type("byte3", *fByte_Type, 3))
+ , fByte4_Type(new Type("byte4", *fByte_Type, 4))
+ , fBool_Type(new Type("bool", Type::kNonnumeric_NumberKind, -1))
+ , fBool2_Type(new Type("bool2", *fBool_Type, 2))
+ , fBool3_Type(new Type("bool3", *fBool_Type, 3))
+ , fBool4_Type(new Type("bool4", *fBool_Type, 4))
+ , fFloat2x2_Type(new Type("float2x2", *fFloat_Type, 2, 2))
+ , fFloat2x3_Type(new Type("float2x3", *fFloat_Type, 2, 3))
+ , fFloat2x4_Type(new Type("float2x4", *fFloat_Type, 2, 4))
+ , fFloat3x2_Type(new Type("float3x2", *fFloat_Type, 3, 2))
+ , fFloat3x3_Type(new Type("float3x3", *fFloat_Type, 3, 3))
+ , fFloat3x4_Type(new Type("float3x4", *fFloat_Type, 3, 4))
+ , fFloat4x2_Type(new Type("float4x2", *fFloat_Type, 4, 2))
+ , fFloat4x3_Type(new Type("float4x3", *fFloat_Type, 4, 3))
+ , fFloat4x4_Type(new Type("float4x4", *fFloat_Type, 4, 4))
+ , fHalf2x2_Type(new Type("half2x2", *fHalf_Type, 2, 2))
+ , fHalf2x3_Type(new Type("half2x3", *fHalf_Type, 2, 3))
+ , fHalf2x4_Type(new Type("half2x4", *fHalf_Type, 2, 4))
+ , fHalf3x2_Type(new Type("half3x2", *fHalf_Type, 3, 2))
+ , fHalf3x3_Type(new Type("half3x3", *fHalf_Type, 3, 3))
+ , fHalf3x4_Type(new Type("half3x4", *fHalf_Type, 3, 4))
+ , fHalf4x2_Type(new Type("half4x2", *fHalf_Type, 4, 2))
+ , fHalf4x3_Type(new Type("half4x3", *fHalf_Type, 4, 3))
+ , fHalf4x4_Type(new Type("half4x4", *fHalf_Type, 4, 4))
+ , fDouble2x2_Type(new Type("double2x2", *fDouble_Type, 2, 2))
+ , fDouble2x3_Type(new Type("double2x3", *fDouble_Type, 2, 3))
+ , fDouble2x4_Type(new Type("double2x4", *fDouble_Type, 2, 4))
+ , fDouble3x2_Type(new Type("double3x2", *fDouble_Type, 3, 2))
+ , fDouble3x3_Type(new Type("double3x3", *fDouble_Type, 3, 3))
+ , fDouble3x4_Type(new Type("double3x4", *fDouble_Type, 3, 4))
+ , fDouble4x2_Type(new Type("double4x2", *fDouble_Type, 4, 2))
+ , fDouble4x3_Type(new Type("double4x3", *fDouble_Type, 4, 3))
+ , fDouble4x4_Type(new Type("double4x4", *fDouble_Type, 4, 4))
+ , fTexture1D_Type(new Type("texture1D", SpvDim1D, false, false, false, true))
+ , fTexture2D_Type(new Type("texture2D", SpvDim2D, false, false, false, true))
+ , fTexture3D_Type(new Type("texture3D", SpvDim3D, false, false, false, true))
+ , fTextureExternalOES_Type(new Type("textureExternalOES", SpvDim2D, false, false, false, true))
+ , fTextureCube_Type(new Type("textureCube", SpvDimCube, false, false, false, true))
+ , fTexture2DRect_Type(new Type("texture2DRect", SpvDimRect, false, false, false, true))
+ , fTextureBuffer_Type(new Type("textureBuffer", SpvDimBuffer, false, false, false, true))
+ , fITexture2D_Type(new Type("itexture2D", SpvDim2D, false, false, false, true))
+ , fSampler1D_Type(new Type("sampler1D", *fTexture1D_Type))
+ , fSampler2D_Type(new Type("sampler2D", *fTexture2D_Type))
+ , fSampler3D_Type(new Type("sampler3D", *fTexture3D_Type))
+ , fSamplerExternalOES_Type(new Type("samplerExternalOES", *fTextureExternalOES_Type))
+ , fSamplerCube_Type(new Type("samplerCube", *fTextureCube_Type))
+ , fSampler2DRect_Type(new Type("sampler2DRect", *fTexture2DRect_Type))
+ , fSampler1DArray_Type(new Type("sampler1DArray"))
+ , fSampler2DArray_Type(new Type("sampler2DArray"))
+ , fSamplerCubeArray_Type(new Type("samplerCubeArray"))
+ , fSamplerBuffer_Type(new Type("samplerBuffer", *fTextureBuffer_Type))
+ , fSampler2DMS_Type(new Type("sampler2DMS"))
+ , fSampler2DMSArray_Type(new Type("sampler2DMSArray"))
+ , fSampler1DShadow_Type(new Type("sampler1DShadow"))
+ , fSampler2DShadow_Type(new Type("sampler2DShadow"))
+ , fSamplerCubeShadow_Type(new Type("samplerCubeShadow"))
+ , fSampler2DRectShadow_Type(new Type("sampler2DRectShadow"))
+ , fSampler1DArrayShadow_Type(new Type("sampler1DArrayShadow"))
+ , fSampler2DArrayShadow_Type(new Type("sampler2DArrayShadow"))
+ , fSamplerCubeArrayShadow_Type(new Type("samplerCubeArrayShadow"))
+
+ // Related to below FIXME, gsampler*s don't currently expand to cover integer case.
+ , fISampler2D_Type(new Type("isampler2D", *fITexture2D_Type))
+
+ , fSampler_Type(new Type("sampler", Type::kSeparateSampler_Kind))
+ // FIXME express these as "gimage2D" that expand to image2D, iimage2D, and uimage2D.
+ , fImage2D_Type(new Type("image2D", SpvDim2D, false, false, false, true))
+ , fIImage2D_Type(new Type("iimage2D", SpvDim2D, false, false, false, true))
+
+ // FIXME express these as "gsubpassInput" that expand to subpassInput, isubpassInput,
+ // and usubpassInput.
+ , fSubpassInput_Type(new Type("subpassInput", SpvDimSubpassData, false, false,
+ false, false))
+ , fSubpassInputMS_Type(new Type("subpassInputMS", SpvDimSubpassData, false, false,
+ true, false))
+
+ // FIXME figure out what we're supposed to do with the gsampler et al. types)
+ , fGSampler1D_Type(new Type("$gsampler1D", static_type(*fSampler1D_Type)))
+ , fGSampler2D_Type(new Type("$gsampler2D", static_type(*fSampler2D_Type)))
+ , fGSampler3D_Type(new Type("$gsampler3D", static_type(*fSampler3D_Type)))
+ , fGSamplerCube_Type(new Type("$gsamplerCube", static_type(*fSamplerCube_Type)))
+ , fGSampler2DRect_Type(new Type("$gsampler2DRect", static_type(*fSampler2DRect_Type)))
+ , fGSampler1DArray_Type(new Type("$gsampler1DArray",
+ static_type(*fSampler1DArray_Type)))
+ , fGSampler2DArray_Type(new Type("$gsampler2DArray",
+ static_type(*fSampler2DArray_Type)))
+ , fGSamplerCubeArray_Type(new Type("$gsamplerCubeArray",
+ static_type(*fSamplerCubeArray_Type)))
+ , fGSamplerBuffer_Type(new Type("$gsamplerBuffer", static_type(*fSamplerBuffer_Type)))
+ , fGSampler2DMS_Type(new Type("$gsampler2DMS", static_type(*fSampler2DMS_Type)))
+ , fGSampler2DMSArray_Type(new Type("$gsampler2DMSArray",
+ static_type(*fSampler2DMSArray_Type)))
+ , fGSampler2DArrayShadow_Type(new Type("$gsampler2DArrayShadow",
+ static_type(*fSampler2DArrayShadow_Type)))
+ , fGSamplerCubeArrayShadow_Type(new Type("$gsamplerCubeArrayShadow",
+ static_type(*fSamplerCubeArrayShadow_Type)))
+ , fGenType_Type(new Type("$genType", { fFloat_Type.get(), fFloat2_Type.get(),
+ fFloat3_Type.get(), fFloat4_Type.get() }))
+ , fGenHType_Type(new Type("$genHType", { fHalf_Type.get(), fHalf2_Type.get(),
+ fHalf3_Type.get(), fHalf4_Type.get() }))
+ , fGenDType_Type(new Type("$genDType", { fDouble_Type.get(), fDouble2_Type.get(),
+ fDouble3_Type.get(), fDouble4_Type.get() }))
+ , fGenIType_Type(new Type("$genIType", { fInt_Type.get(), fInt2_Type.get(),
+ fInt3_Type.get(), fInt4_Type.get() }))
+ , fGenUType_Type(new Type("$genUType", { fUInt_Type.get(), fUInt2_Type.get(),
+ fUInt3_Type.get(), fUInt4_Type.get() }))
+ , fGenBType_Type(new Type("$genBType", { fBool_Type.get(), fBool2_Type.get(),
+ fBool3_Type.get(), fBool4_Type.get() }))
+ , fMat_Type(new Type("$mat", { fFloat2x2_Type.get(), fFloat2x3_Type.get(),
+ fFloat2x4_Type.get(), fFloat3x2_Type.get(),
+ fFloat3x3_Type.get(), fFloat3x4_Type.get(),
+ fFloat4x2_Type.get(), fFloat4x3_Type.get(),
+ fFloat4x4_Type.get(), fHalf2x2_Type.get(),
+ fHalf2x3_Type.get(), fHalf2x4_Type.get(),
+ fHalf3x2_Type.get(), fHalf3x3_Type.get(),
+ fHalf3x4_Type.get(), fHalf4x2_Type.get(),
+ fHalf4x3_Type.get(), fHalf4x4_Type.get(),
+ fDouble2x2_Type.get(), fDouble2x3_Type.get(),
+ fDouble2x4_Type.get(), fDouble3x2_Type.get(),
+ fDouble3x3_Type.get(), fDouble3x4_Type.get(),
+ fDouble4x2_Type.get(), fDouble4x3_Type.get(),
+ fDouble4x4_Type.get() }))
+ , fVec_Type(new Type("$vec", { fInvalid_Type.get(), fFloat2_Type.get(),
+ fFloat3_Type.get(), fFloat4_Type.get() }))
+ , fGVec_Type(new Type("$gvec"))
+ , fGVec2_Type(new Type("$gfloat2"))
+ , fGVec3_Type(new Type("$gfloat3"))
+ , fGVec4_Type(new Type("$gfloat4", static_type(*fFloat4_Type)))
+ , fHVec_Type(new Type("$hvec", { fInvalid_Type.get(), fHalf2_Type.get(),
+ fHalf3_Type.get(), fHalf4_Type.get() }))
+ , fDVec_Type(new Type("$dvec", { fInvalid_Type.get(), fDouble2_Type.get(),
+ fDouble3_Type.get(), fDouble4_Type.get() }))
+ , fIVec_Type(new Type("$ivec", { fInvalid_Type.get(), fInt2_Type.get(),
+ fInt3_Type.get(), fInt4_Type.get() }))
+ , fUVec_Type(new Type("$uvec", { fInvalid_Type.get(), fUInt2_Type.get(),
+ fUInt3_Type.get(), fUInt4_Type.get() }))
+ , fSVec_Type(new Type("$svec", { fInvalid_Type.get(), fShort2_Type.get(),
+ fShort3_Type.get(), fShort4_Type.get() }))
+ , fUSVec_Type(new Type("$usvec", { fInvalid_Type.get(), fUShort2_Type.get(),
+ fUShort3_Type.get(), fUShort4_Type.get() }))
+ , fByteVec_Type(new Type("$bytevec", { fInvalid_Type.get(), fByte2_Type.get(),
+ fByte3_Type.get(), fByte4_Type.get() }))
+ , fUByteVec_Type(new Type("$ubytevec", { fInvalid_Type.get(), fUByte2_Type.get(),
+ fUByte3_Type.get(), fUByte4_Type.get() }))
+ , fBVec_Type(new Type("$bvec", { fInvalid_Type.get(), fBool2_Type.get(),
+ fBool3_Type.get(), fBool4_Type.get() }))
+ , fSkCaps_Type(new Type("$sk_Caps"))
+ , fSkArgs_Type(new Type("$sk_Args"))
+ , fFragmentProcessor_Type(fp_type(fInt_Type.get(), fBool_Type.get()))
+ , fDefined_Expression(new Defined(*fInvalid_Type)) {}
+
+ static std::vector<const Type*> static_type(const Type& t) {
+ return { &t, &t, &t, &t };
+ }
+
+ const std::unique_ptr<Type> fInvalid_Type;
+ const std::unique_ptr<Type> fVoid_Type;
+ const std::unique_ptr<Type> fNull_Type;
+ const std::unique_ptr<Type> fFloatLiteral_Type;
+ const std::unique_ptr<Type> fIntLiteral_Type;
+
+ const std::unique_ptr<Type> fDouble_Type;
+ const std::unique_ptr<Type> fDouble2_Type;
+ const std::unique_ptr<Type> fDouble3_Type;
+ const std::unique_ptr<Type> fDouble4_Type;
+
+ const std::unique_ptr<Type> fFloat_Type;
+ const std::unique_ptr<Type> fFloat2_Type;
+ const std::unique_ptr<Type> fFloat3_Type;
+ const std::unique_ptr<Type> fFloat4_Type;
+
+ const std::unique_ptr<Type> fHalf_Type;
+ const std::unique_ptr<Type> fHalf2_Type;
+ const std::unique_ptr<Type> fHalf3_Type;
+ const std::unique_ptr<Type> fHalf4_Type;
+
+ const std::unique_ptr<Type> fUInt_Type;
+ const std::unique_ptr<Type> fUInt2_Type;
+ const std::unique_ptr<Type> fUInt3_Type;
+ const std::unique_ptr<Type> fUInt4_Type;
+
+ const std::unique_ptr<Type> fInt_Type;
+ const std::unique_ptr<Type> fInt2_Type;
+ const std::unique_ptr<Type> fInt3_Type;
+ const std::unique_ptr<Type> fInt4_Type;
+
+ const std::unique_ptr<Type> fUShort_Type;
+ const std::unique_ptr<Type> fUShort2_Type;
+ const std::unique_ptr<Type> fUShort3_Type;
+ const std::unique_ptr<Type> fUShort4_Type;
+
+ const std::unique_ptr<Type> fShort_Type;
+ const std::unique_ptr<Type> fShort2_Type;
+ const std::unique_ptr<Type> fShort3_Type;
+ const std::unique_ptr<Type> fShort4_Type;
+
+ const std::unique_ptr<Type> fUByte_Type;
+ const std::unique_ptr<Type> fUByte2_Type;
+ const std::unique_ptr<Type> fUByte3_Type;
+ const std::unique_ptr<Type> fUByte4_Type;
+
+ const std::unique_ptr<Type> fByte_Type;
+ const std::unique_ptr<Type> fByte2_Type;
+ const std::unique_ptr<Type> fByte3_Type;
+ const std::unique_ptr<Type> fByte4_Type;
+
+ const std::unique_ptr<Type> fBool_Type;
+ const std::unique_ptr<Type> fBool2_Type;
+ const std::unique_ptr<Type> fBool3_Type;
+ const std::unique_ptr<Type> fBool4_Type;
+
+ const std::unique_ptr<Type> fFloat2x2_Type;
+ const std::unique_ptr<Type> fFloat2x3_Type;
+ const std::unique_ptr<Type> fFloat2x4_Type;
+ const std::unique_ptr<Type> fFloat3x2_Type;
+ const std::unique_ptr<Type> fFloat3x3_Type;
+ const std::unique_ptr<Type> fFloat3x4_Type;
+ const std::unique_ptr<Type> fFloat4x2_Type;
+ const std::unique_ptr<Type> fFloat4x3_Type;
+ const std::unique_ptr<Type> fFloat4x4_Type;
+
+ const std::unique_ptr<Type> fHalf2x2_Type;
+ const std::unique_ptr<Type> fHalf2x3_Type;
+ const std::unique_ptr<Type> fHalf2x4_Type;
+ const std::unique_ptr<Type> fHalf3x2_Type;
+ const std::unique_ptr<Type> fHalf3x3_Type;
+ const std::unique_ptr<Type> fHalf3x4_Type;
+ const std::unique_ptr<Type> fHalf4x2_Type;
+ const std::unique_ptr<Type> fHalf4x3_Type;
+ const std::unique_ptr<Type> fHalf4x4_Type;
+
+ const std::unique_ptr<Type> fDouble2x2_Type;
+ const std::unique_ptr<Type> fDouble2x3_Type;
+ const std::unique_ptr<Type> fDouble2x4_Type;
+ const std::unique_ptr<Type> fDouble3x2_Type;
+ const std::unique_ptr<Type> fDouble3x3_Type;
+ const std::unique_ptr<Type> fDouble3x4_Type;
+ const std::unique_ptr<Type> fDouble4x2_Type;
+ const std::unique_ptr<Type> fDouble4x3_Type;
+ const std::unique_ptr<Type> fDouble4x4_Type;
+
+ const std::unique_ptr<Type> fTexture1D_Type;
+ const std::unique_ptr<Type> fTexture2D_Type;
+ const std::unique_ptr<Type> fTexture3D_Type;
+ const std::unique_ptr<Type> fTextureExternalOES_Type;
+ const std::unique_ptr<Type> fTextureCube_Type;
+ const std::unique_ptr<Type> fTexture2DRect_Type;
+ const std::unique_ptr<Type> fTextureBuffer_Type;
+ const std::unique_ptr<Type> fITexture2D_Type;
+
+ const std::unique_ptr<Type> fSampler1D_Type;
+ const std::unique_ptr<Type> fSampler2D_Type;
+ const std::unique_ptr<Type> fSampler3D_Type;
+ const std::unique_ptr<Type> fSamplerExternalOES_Type;
+ const std::unique_ptr<Type> fSamplerCube_Type;
+ const std::unique_ptr<Type> fSampler2DRect_Type;
+ const std::unique_ptr<Type> fSampler1DArray_Type;
+ const std::unique_ptr<Type> fSampler2DArray_Type;
+ const std::unique_ptr<Type> fSamplerCubeArray_Type;
+ const std::unique_ptr<Type> fSamplerBuffer_Type;
+ const std::unique_ptr<Type> fSampler2DMS_Type;
+ const std::unique_ptr<Type> fSampler2DMSArray_Type;
+ const std::unique_ptr<Type> fSampler1DShadow_Type;
+ const std::unique_ptr<Type> fSampler2DShadow_Type;
+ const std::unique_ptr<Type> fSamplerCubeShadow_Type;
+ const std::unique_ptr<Type> fSampler2DRectShadow_Type;
+ const std::unique_ptr<Type> fSampler1DArrayShadow_Type;
+ const std::unique_ptr<Type> fSampler2DArrayShadow_Type;
+ const std::unique_ptr<Type> fSamplerCubeArrayShadow_Type;
+
+ const std::unique_ptr<Type> fISampler2D_Type;
+ const std::unique_ptr<Type> fSampler_Type;
+
+ const std::unique_ptr<Type> fImage2D_Type;
+ const std::unique_ptr<Type> fIImage2D_Type;
+
+ const std::unique_ptr<Type> fSubpassInput_Type;
+ const std::unique_ptr<Type> fSubpassInputMS_Type;
+
+ const std::unique_ptr<Type> fGSampler1D_Type;
+ const std::unique_ptr<Type> fGSampler2D_Type;
+ const std::unique_ptr<Type> fGSampler3D_Type;
+ const std::unique_ptr<Type> fGSamplerCube_Type;
+ const std::unique_ptr<Type> fGSampler2DRect_Type;
+ const std::unique_ptr<Type> fGSampler1DArray_Type;
+ const std::unique_ptr<Type> fGSampler2DArray_Type;
+ const std::unique_ptr<Type> fGSamplerCubeArray_Type;
+ const std::unique_ptr<Type> fGSamplerBuffer_Type;
+ const std::unique_ptr<Type> fGSampler2DMS_Type;
+ const std::unique_ptr<Type> fGSampler2DMSArray_Type;
+ const std::unique_ptr<Type> fGSampler2DArrayShadow_Type;
+ const std::unique_ptr<Type> fGSamplerCubeArrayShadow_Type;
+
+ const std::unique_ptr<Type> fGenType_Type;
+ const std::unique_ptr<Type> fGenHType_Type;
+ const std::unique_ptr<Type> fGenDType_Type;
+ const std::unique_ptr<Type> fGenIType_Type;
+ const std::unique_ptr<Type> fGenUType_Type;
+ const std::unique_ptr<Type> fGenBType_Type;
+
+ const std::unique_ptr<Type> fMat_Type;
+
+ const std::unique_ptr<Type> fVec_Type;
+
+ const std::unique_ptr<Type> fGVec_Type;
+ const std::unique_ptr<Type> fGVec2_Type;
+ const std::unique_ptr<Type> fGVec3_Type;
+ const std::unique_ptr<Type> fGVec4_Type;
+ const std::unique_ptr<Type> fHVec_Type;
+ const std::unique_ptr<Type> fDVec_Type;
+ const std::unique_ptr<Type> fIVec_Type;
+ const std::unique_ptr<Type> fUVec_Type;
+ const std::unique_ptr<Type> fSVec_Type;
+ const std::unique_ptr<Type> fUSVec_Type;
+ const std::unique_ptr<Type> fByteVec_Type;
+ const std::unique_ptr<Type> fUByteVec_Type;
+
+ const std::unique_ptr<Type> fBVec_Type;
+
+ const std::unique_ptr<Type> fSkCaps_Type;
+ const std::unique_ptr<Type> fSkArgs_Type;
+ const std::unique_ptr<Type> fFragmentProcessor_Type;
+
+ // dummy expression used to mark that a variable has a value during dataflow analysis (when it
+ // could have several different values, or the analyzer is otherwise unable to assign it a
+ // specific expression)
+ const std::unique_ptr<Expression> fDefined_Expression;
+
+private:
+ class Defined : public Expression {
+ public:
+ Defined(const Type& type)
+ : INHERITED(-1, kDefined_Kind, type) {}
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ String description() const override {
+ return "<defined>";
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new Defined(fType));
+ }
+
+ typedef Expression INHERITED;
+ };
+
+ static std::unique_ptr<Type> fp_type(const Type* intType, const Type* boolType) {
+ // Build fields for FragmentProcessors, which should parallel the
+ // C++ API for GrFragmentProcessor.
+ Modifiers mods(Layout(), Modifiers::kConst_Flag);
+ std::vector<Type::Field> fields = {
+ Type::Field(mods, "numTextureSamplers", intType),
+ Type::Field(mods, "numCoordTransforms", intType),
+ Type::Field(mods, "numChildProcessors", intType),
+ Type::Field(mods, "usesLocalCoords", boolType),
+ Type::Field(mods, "compatibleWithCoverageAsAlpha", boolType),
+ Type::Field(mods, "preservesOpaqueInput", boolType),
+ Type::Field(mods, "hasConstantOutputForConstantInput", boolType)
+ };
+ return std::unique_ptr<Type>(new Type("fragmentProcessor", fields));
+ }
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLDefines.h b/gfx/skia/skia/src/sksl/SkSLDefines.h
new file mode 100644
index 0000000000..d0ba9d7572
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLDefines.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DEFINES
+#define SKSL_DEFINES
+
+#include <cstdint>
+
+#ifdef SKSL_STANDALONE
+#if defined(_WIN32) || defined(__SYMBIAN32__)
+#define SKSL_BUILD_FOR_WIN
+#endif
+#else
+#ifdef SK_BUILD_FOR_WIN
+#define SKSL_BUILD_FOR_WIN
+#endif // SK_BUILD_FOR_WIN
+#endif // SKSL_STANDALONE
+
+#ifdef SKSL_STANDALONE
+#define SkASSERT(x) do { if (!(x)) abort(); } while (false)
+#define SkAssertResult(x) do { if (!(x)) abort(); } while (false)
+#define SkDEBUGCODE(...) __VA_ARGS__
+#define SK_API
+#if !defined(SkUNREACHABLE)
+# if defined(_MSC_VER) && !defined(__clang__)
+# define SkUNREACHABLE __assume(false)
+# else
+# define SkUNREACHABLE __builtin_unreachable()
+# endif
+#endif
+#else
+#include "include/core/SkTypes.h"
+#endif
+
+#if defined(__clang__) || defined(__GNUC__)
+#define SKSL_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
+#define SKSL_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define SKSL_PRINTF_LIKE(A, B)
+#define SKSL_WARN_UNUSED_RESULT
+#endif
+
+#define ABORT(...) (printf(__VA_ARGS__), sksl_abort())
+
+#if _MSC_VER
+#define NORETURN __declspec(noreturn)
+#else
+#define NORETURN __attribute__((__noreturn__))
+#endif
+
+using SKSL_INT = int32_t;
+using SKSL_FLOAT = float;
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLErrorReporter.h b/gfx/skia/skia/src/sksl/SkSLErrorReporter.h
new file mode 100644
index 0000000000..a487e80e90
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLErrorReporter.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ERRORREPORTER
+#define SKSL_ERRORREPORTER
+
+#include "src/sksl/SkSLPosition.h"
+
+namespace SkSL {
+
+/**
+ * Interface for the compiler to report errors.
+ */
+class ErrorReporter {
+public:
+ virtual ~ErrorReporter() {}
+
+ void error(int offset, const char* msg) {
+ this->error(offset, String(msg));
+ }
+
+ virtual void error(int offset, String msg) = 0;
+
+ virtual int errorCount() = 0;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLExternalValue.h b/gfx/skia/skia/src/sksl/SkSLExternalValue.h
new file mode 100644
index 0000000000..d8aba5de49
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLExternalValue.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTERNALVALUE
+#define SKSL_EXTERNALVALUE
+
+#include "src/sksl/ir/SkSLSymbol.h"
+
+namespace SkSL {
+
+class String;
+class Type;
+
+class ExternalValue : public Symbol {
+public:
+ ExternalValue(const char* name, const Type& type)
+ : INHERITED(-1, kExternal_Kind, name)
+ , fType(type) {}
+
+ virtual bool canRead() const {
+ return false;
+ }
+
+ virtual bool canWrite() const {
+ return false;
+ }
+
+ virtual bool canCall() const {
+ return false;
+ }
+
+ /**
+ * Returns the type for purposes of read and write operations.
+ */
+ virtual const Type& type() const {
+ return fType;
+ }
+
+ virtual int callParameterCount() const {
+ return -1;
+ }
+
+ /**
+ * Fills in the outTypes array with pointers to the parameter types. outTypes must be able to
+ * hold callParameterCount() pointers.
+ */
+ virtual void getCallParameterTypes(const Type** outTypes) const {
+ SkASSERT(false);
+ }
+
+ /**
+ * Returns the return type resulting from a call operation.
+ */
+ virtual const Type& callReturnType() const {
+ return fType;
+ }
+
+ /**
+ * Reads the external value and stores the resulting data in target. The caller must ensure
+ * that target is a valid pointer to a region of sufficient size to hold the data contained
+ * in this external value.
+ * 'index' is the element index ([0 .. N-1]) within a call to ByteCode::run()
+ */
+ virtual void read(int index, float* target) {
+ SkASSERT(false);
+ }
+
+ /**
+ * Copies the value in src into this external value. The caller must ensure that src is a
+ * pointer to the type of data expected by this external value.
+ * 'index' is the element index ([0 .. N-1]) within a call to ByteCode::run()
+ */
+ virtual void write(int index, float* src) {
+ SkASSERT(false);
+ }
+
+ /**
+ * Calls the value as a function with the specified parameters. arguments must be a pointer to
+ * a structure containing the arguments expected by the external value in source order, and
+ * outResult must be a pointer to a region of sufficient size to hold the function's return
+ * value.
+ * 'index' is the element index ([0 .. N-1]) within a call to ByteCode::run()
+ */
+ virtual void call(int index, float* arguments, float* outResult) {
+ SkASSERT(false);
+ }
+
+ /**
+ * Resolves 'name' within this context and returns an ExternalValue which represents it, or
+ * null if no such child exists. If the implementation of this method creates new
+ * ExternalValues and there isn't a more convenient place for ownership of the objects to
+ * reside, the compiler's takeOwnership method may be useful.
+ *
+ * The 'name' string may not persist after this call; do not store this pointer.
+ */
+ virtual ExternalValue* getChild(const char* name) const {
+ return nullptr;
+ }
+
+ String description() const override {
+ return String("external<") + fName + ">";
+ }
+
+private:
+ typedef Symbol INHERITED;
+
+ const Type& fType;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h b/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h
new file mode 100644
index 0000000000..e7de13488b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FILEOUTPUTSTREAM
+#define SKSL_FILEOUTPUTSTREAM
+
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLUtil.h"
+#include <stdio.h>
+
+namespace SkSL {
+
+class FileOutputStream : public OutputStream {
+public:
+ FileOutputStream(const char* name) {
+ fFile = fopen(name, "wb");
+ }
+
+ ~FileOutputStream() override {
+ SkASSERT(!fOpen);
+ }
+
+ bool isValid() const override {
+ return nullptr != fFile;
+ }
+
+ void write8(uint8_t b) override {
+ SkASSERT(fOpen);
+ if (isValid()) {
+ if (EOF == fputc(b, fFile)) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ void writeText(const char* s) override {
+ SkASSERT(fOpen);
+ if (isValid()) {
+ if (EOF == fputs(s, fFile)) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ void write(const void* s, size_t size) override {
+ if (isValid()) {
+ size_t written = fwrite(s, 1, size, fFile);
+ if (written != size) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ bool close() {
+ fOpen = false;
+ if (isValid() && fclose(fFile)) {
+ fFile = nullptr;
+ return false;
+ }
+ return true;
+ }
+
+private:
+ bool fOpen = true;
+ FILE *fFile;
+
+ typedef OutputStream INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp
new file mode 100644
index 0000000000..df7c0b0729
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp
@@ -0,0 +1,1786 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#ifndef SKSL_STANDALONE
+#include "include/private/SkOnce.h"
+#endif
+
+namespace SkSL {
+
+void GLSLCodeGenerator::write(const char* s) {
+ if (s[0] == 0) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->writeText(s);
+ fAtLineStart = false;
+}
+
+void GLSLCodeGenerator::writeLine(const char* s) {
+ this->write(s);
+ fOut->writeText(fLineEnding);
+ fAtLineStart = true;
+}
+
+void GLSLCodeGenerator::write(const String& s) {
+ this->write(s.c_str());
+}
+
+void GLSLCodeGenerator::write(StringFragment s) {
+ if (!s.fLength) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->write(s.fChars, s.fLength);
+ fAtLineStart = false;
+}
+
+void GLSLCodeGenerator::writeLine(const String& s) {
+ this->writeLine(s.c_str());
+}
+
+void GLSLCodeGenerator::writeLine() {
+ this->writeLine("");
+}
+
+void GLSLCodeGenerator::writeExtension(const String& name) {
+ this->writeExtension(name, true);
+}
+
+void GLSLCodeGenerator::writeExtension(const String& name, bool require) {
+ fExtensions.writeText("#extension ");
+ fExtensions.write(name.c_str(), name.length());
+ fExtensions.writeText(require ? " : require\n" : " : enable\n");
+}
+
+bool GLSLCodeGenerator::usesPrecisionModifiers() const {
+ return fProgram.fSettings.fCaps->usesPrecisionModifiers();
+}
+
+String GLSLCodeGenerator::getTypeName(const Type& type) {
+ switch (type.kind()) {
+ case Type::kVector_Kind: {
+ Type component = type.componentType();
+ String result;
+ if (component == *fContext.fFloat_Type || component == *fContext.fHalf_Type) {
+ result = "vec";
+ }
+ else if (component == *fContext.fDouble_Type) {
+ result = "dvec";
+ }
+ else if (component.isSigned()) {
+ result = "ivec";
+ }
+ else if (component.isUnsigned()) {
+ result = "uvec";
+ }
+ else if (component == *fContext.fBool_Type) {
+ result = "bvec";
+ }
+ else {
+ ABORT("unsupported vector type");
+ }
+ result += to_string(type.columns());
+ return result;
+ }
+ case Type::kMatrix_Kind: {
+ String result;
+ Type component = type.componentType();
+ if (component == *fContext.fFloat_Type || component == *fContext.fHalf_Type) {
+ result = "mat";
+ }
+ else if (component == *fContext.fDouble_Type) {
+ result = "dmat";
+ }
+ else {
+ ABORT("unsupported matrix type");
+ }
+ result += to_string(type.columns());
+ if (type.columns() != type.rows()) {
+ result += "x";
+ result += to_string(type.rows());
+ }
+ return result;
+ }
+ case Type::kArray_Kind: {
+ String result = this->getTypeName(type.componentType()) + "[";
+ if (type.columns() != -1) {
+ result += to_string(type.columns());
+ }
+ result += "]";
+ return result;
+ }
+ case Type::kScalar_Kind: {
+ if (type == *fContext.fHalf_Type) {
+ return "float";
+ }
+ else if (type == *fContext.fShort_Type) {
+ return "int";
+ }
+ else if (type == *fContext.fUShort_Type) {
+ return "uint";
+ }
+ else if (type == *fContext.fByte_Type) {
+ return "int";
+ }
+ else if (type == *fContext.fUByte_Type) {
+ return "uint";
+ }
+ else {
+ return type.name();
+ }
+ break;
+ }
+ default:
+ return type.name();
+ }
+}
+
+void GLSLCodeGenerator::writeType(const Type& type) {
+ if (type.kind() == Type::kStruct_Kind) {
+ for (const Type* search : fWrittenStructs) {
+ if (*search == type) {
+ // already written
+ this->write(type.fName);
+ return;
+ }
+ }
+ fWrittenStructs.push_back(&type);
+ this->write("struct ");
+ this->write(type.fName);
+ this->writeLine(" {");
+ fIndentation++;
+ for (const auto& f : type.fields()) {
+ this->writeModifiers(f.fModifiers, false);
+ this->writeTypePrecision(*f.fType);
+ // sizes (which must be static in structs) are part of the type name here
+ this->writeType(*f.fType);
+ this->write(" ");
+ this->write(f.fName);
+ this->writeLine(";");
+ }
+ fIndentation--;
+ this->write("}");
+ } else {
+ this->write(this->getTypeName(type));
+ }
+}
+
+void GLSLCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
+ switch (expr.fKind) {
+ case Expression::kBinary_Kind:
+ this->writeBinaryExpression((BinaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kBoolLiteral_Kind:
+ this->writeBoolLiteral((BoolLiteral&) expr);
+ break;
+ case Expression::kConstructor_Kind:
+ this->writeConstructor((Constructor&) expr, parentPrecedence);
+ break;
+ case Expression::kIntLiteral_Kind:
+ this->writeIntLiteral((IntLiteral&) expr);
+ break;
+ case Expression::kFieldAccess_Kind:
+ this->writeFieldAccess(((FieldAccess&) expr));
+ break;
+ case Expression::kFloatLiteral_Kind:
+ this->writeFloatLiteral(((FloatLiteral&) expr));
+ break;
+ case Expression::kFunctionCall_Kind:
+ this->writeFunctionCall((FunctionCall&) expr);
+ break;
+ case Expression::kPrefix_Kind:
+ this->writePrefixExpression((PrefixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kPostfix_Kind:
+ this->writePostfixExpression((PostfixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kSetting_Kind:
+ this->writeSetting((Setting&) expr);
+ break;
+ case Expression::kSwizzle_Kind:
+ this->writeSwizzle((Swizzle&) expr);
+ break;
+ case Expression::kVariableReference_Kind:
+ this->writeVariableReference((VariableReference&) expr);
+ break;
+ case Expression::kTernary_Kind:
+ this->writeTernaryExpression((TernaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kIndex_Kind:
+ this->writeIndexExpression((IndexExpression&) expr);
+ break;
+ default:
+ ABORT("unsupported expression: %s", expr.description().c_str());
+ }
+}
+
+static bool is_abs(Expression& expr) {
+ if (expr.fKind != Expression::kFunctionCall_Kind) {
+ return false;
+ }
+ return ((FunctionCall&) expr).fFunction.fName == "abs";
+}
+
+// turns min(abs(x), y) into ((tmpVar1 = abs(x)) < (tmpVar2 = y) ? tmpVar1 : tmpVar2) to avoid a
+// Tegra3 compiler bug.
+void GLSLCodeGenerator::writeMinAbsHack(Expression& absExpr, Expression& otherExpr) {
+ SkASSERT(!fProgram.fSettings.fCaps->canUseMinAndAbsTogether());
+ String tmpVar1 = "minAbsHackVar" + to_string(fVarCount++);
+ String tmpVar2 = "minAbsHackVar" + to_string(fVarCount++);
+ this->fFunctionHeader += String(" ") + this->getTypePrecision(absExpr.fType) +
+ this->getTypeName(absExpr.fType) + " " + tmpVar1 + ";\n";
+ this->fFunctionHeader += String(" ") + this->getTypePrecision(otherExpr.fType) +
+ this->getTypeName(otherExpr.fType) + " " + tmpVar2 + ";\n";
+ this->write("((" + tmpVar1 + " = ");
+ this->writeExpression(absExpr, kTopLevel_Precedence);
+ this->write(") < (" + tmpVar2 + " = ");
+ this->writeExpression(otherExpr, kAssignment_Precedence);
+ this->write(") ? " + tmpVar1 + " : " + tmpVar2 + ")");
+}
+
+void GLSLCodeGenerator::writeInverseSqrtHack(const Expression& x) {
+ this->write("(1.0 / sqrt(");
+ this->writeExpression(x, kTopLevel_Precedence);
+ this->write("))");
+}
+
+void GLSLCodeGenerator::writeDeterminantHack(const Expression& mat) {
+ String name;
+ if (mat.fType == *fContext.fFloat2x2_Type || mat.fType == *fContext.fHalf2x2_Type) {
+ name = "_determinant2";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "float " + name + "(mat2 m) {"
+ " return m[0][0] * m[1][1] - m[0][1] * m[1][0];"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat3x3_Type || mat.fType == *fContext.fHalf3x3_Type) {
+ name = "_determinant3";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "float " + name + "(mat3 m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2];"
+ " float b01 = a22 * a11 - a12 * a21;"
+ " float b11 = -a22 * a10 + a12 * a20;"
+ " float b21 = a21 * a10 - a11 * a20;"
+ " return a00 * b01 + a01 * b11 + a02 * b21;"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat4x4_Type || mat.fType == *fContext.fHalf4x4_Type) {
+ name = "_determinant3";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "mat4 " + name + "(mat4 m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3];"
+ " float a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3];"
+ " float b00 = a00 * a11 - a01 * a10;"
+ " float b01 = a00 * a12 - a02 * a10;"
+ " float b02 = a00 * a13 - a03 * a10;"
+ " float b03 = a01 * a12 - a02 * a11;"
+ " float b04 = a01 * a13 - a03 * a11;"
+ " float b05 = a02 * a13 - a03 * a12;"
+ " float b06 = a20 * a31 - a21 * a30;"
+ " float b07 = a20 * a32 - a22 * a30;"
+ " float b08 = a20 * a33 - a23 * a30;"
+ " float b09 = a21 * a32 - a22 * a31;"
+ " float b10 = a21 * a33 - a23 * a31;"
+ " float b11 = a22 * a33 - a23 * a32;"
+ " return b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;"
+ "}"
+ ).c_str());
+ }
+ }
+ else {
+ SkASSERT(false);
+ }
+ this->write(name + "(");
+ this->writeExpression(mat, kTopLevel_Precedence);
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeInverseHack(const Expression& mat) {
+ String name;
+ if (mat.fType == *fContext.fFloat2x2_Type || mat.fType == *fContext.fHalf2x2_Type) {
+ name = "_inverse2";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "mat2 " + name + "(mat2 m) {"
+ " return mat2(m[1][1], -m[0][1], -m[1][0], m[0][0]) / "
+ "(m[0][0] * m[1][1] - m[0][1] * m[1][0]);"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat3x3_Type || mat.fType == *fContext.fHalf3x3_Type) {
+ name = "_inverse3";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "mat3 " + name + "(mat3 m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2];"
+ " float b01 = a22 * a11 - a12 * a21;"
+ " float b11 = -a22 * a10 + a12 * a20;"
+ " float b21 = a21 * a10 - a11 * a20;"
+ " float det = a00 * b01 + a01 * b11 + a02 * b21;"
+ " return mat3(b01, (-a22 * a01 + a02 * a21), (a12 * a01 - a02 * a11),"
+ " b11, (a22 * a00 - a02 * a20), (-a12 * a00 + a02 * a10),"
+ " b21, (-a21 * a00 + a01 * a20), (a11 * a00 - a01 * a10)) / det;"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat4x4_Type || mat.fType == *fContext.fHalf4x4_Type) {
+ name = "_inverse4";
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "mat4 " + name + "(mat4 m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3];"
+ " float a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3];"
+ " float b00 = a00 * a11 - a01 * a10;"
+ " float b01 = a00 * a12 - a02 * a10;"
+ " float b02 = a00 * a13 - a03 * a10;"
+ " float b03 = a01 * a12 - a02 * a11;"
+ " float b04 = a01 * a13 - a03 * a11;"
+ " float b05 = a02 * a13 - a03 * a12;"
+ " float b06 = a20 * a31 - a21 * a30;"
+ " float b07 = a20 * a32 - a22 * a30;"
+ " float b08 = a20 * a33 - a23 * a30;"
+ " float b09 = a21 * a32 - a22 * a31;"
+ " float b10 = a21 * a33 - a23 * a31;"
+ " float b11 = a22 * a33 - a23 * a32;"
+ " float det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - "
+ " b04 * b07 + b05 * b06;"
+ " return mat4("
+ " a11 * b11 - a12 * b10 + a13 * b09,"
+ " a02 * b10 - a01 * b11 - a03 * b09,"
+ " a31 * b05 - a32 * b04 + a33 * b03,"
+ " a22 * b04 - a21 * b05 - a23 * b03,"
+ " a12 * b08 - a10 * b11 - a13 * b07,"
+ " a00 * b11 - a02 * b08 + a03 * b07,"
+ " a32 * b02 - a30 * b05 - a33 * b01,"
+ " a20 * b05 - a22 * b02 + a23 * b01,"
+ " a10 * b10 - a11 * b08 + a13 * b06,"
+ " a01 * b08 - a00 * b10 - a03 * b06,"
+ " a30 * b04 - a31 * b02 + a33 * b00,"
+ " a21 * b02 - a20 * b04 - a23 * b00,"
+ " a11 * b07 - a10 * b09 - a12 * b06,"
+ " a00 * b09 - a01 * b07 + a02 * b06,"
+ " a31 * b01 - a30 * b03 - a32 * b00,"
+ " a20 * b03 - a21 * b01 + a22 * b00) / det;"
+ "}"
+ ).c_str());
+ }
+ }
+ else {
+ SkASSERT(false);
+ }
+ this->write(name + "(");
+ this->writeExpression(mat, kTopLevel_Precedence);
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeTransposeHack(const Expression& mat) {
+ String name = "transpose" + to_string(mat.fType.columns()) + to_string(mat.fType.rows());
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ String type = this->getTypeName(mat.fType);
+ const Type& base = mat.fType.componentType();
+ String transposed = this->getTypeName(base.toCompound(fContext,
+ mat.fType.rows(),
+ mat.fType.columns()));
+ fExtraFunctions.writeText((transposed + " " + name + "(" + type + " m) {\nreturn " +
+ transposed + "(").c_str());
+ const char* separator = "";
+ for (int row = 0; row < mat.fType.rows(); ++row) {
+ for (int column = 0; column < mat.fType.columns(); ++column) {
+ fExtraFunctions.writeText(separator);
+ fExtraFunctions.writeText(("m[" + to_string(column) + "][" + to_string(row) +
+ "]").c_str());
+ separator = ", ";
+ }
+ }
+ fExtraFunctions.writeText("); }");
+ }
+ this->write(name + "(");
+ this->writeExpression(mat, kTopLevel_Precedence);
+ this->write(")");
+}
+
+std::unordered_map<StringFragment, GLSLCodeGenerator::FunctionClass>*
+ GLSLCodeGenerator::fFunctionClasses = nullptr;
+
+void GLSLCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+#ifdef SKSL_STANDALONE
+ if (!fFunctionClasses) {
+#else
+ static SkOnce once;
+ once([] {
+#endif
+ fFunctionClasses = new std::unordered_map<StringFragment, FunctionClass>();
+ (*fFunctionClasses)["abs"] = FunctionClass::kAbs;
+ (*fFunctionClasses)["atan"] = FunctionClass::kAtan;
+ (*fFunctionClasses)["determinant"] = FunctionClass::kDeterminant;
+ (*fFunctionClasses)["dFdx"] = FunctionClass::kDFdx;
+ (*fFunctionClasses)["dFdy"] = FunctionClass::kDFdy;
+ (*fFunctionClasses)["fwidth"] = FunctionClass::kFwidth;
+ (*fFunctionClasses)["fma"] = FunctionClass::kFMA;
+ (*fFunctionClasses)["fract"] = FunctionClass::kFract;
+ (*fFunctionClasses)["inverse"] = FunctionClass::kInverse;
+ (*fFunctionClasses)["inverseSqrt"] = FunctionClass::kInverseSqrt;
+ (*fFunctionClasses)["min"] = FunctionClass::kMin;
+ (*fFunctionClasses)["pow"] = FunctionClass::kPow;
+ (*fFunctionClasses)["saturate"] = FunctionClass::kSaturate;
+ (*fFunctionClasses)["sample"] = FunctionClass::kTexture;
+ (*fFunctionClasses)["transpose"] = FunctionClass::kTranspose;
+ }
+#ifndef SKSL_STANDALONE
+ );
+#endif
+ const auto found = c.fFunction.fBuiltin ? fFunctionClasses->find(c.fFunction.fName) :
+ fFunctionClasses->end();
+ bool isTextureFunctionWithBias = false;
+ bool nameWritten = false;
+ if (found != fFunctionClasses->end()) {
+ switch (found->second) {
+ case FunctionClass::kAbs: {
+ if (!fProgram.fSettings.fCaps->emulateAbsIntFunction())
+ break;
+ SkASSERT(c.fArguments.size() == 1);
+ if (c.fArguments[0]->fType != *fContext.fInt_Type)
+ break;
+ // abs(int) on Intel OSX is incorrect, so emulate it:
+ String name = "_absemulation";
+ this->write(name);
+ nameWritten = true;
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ "int " + name + "(int x) {\n"
+ " return x * sign(x);\n"
+ "}\n"
+ ).c_str());
+ }
+ break;
+ }
+ case FunctionClass::kAtan:
+ if (fProgram.fSettings.fCaps->mustForceNegatedAtanParamToFloat() &&
+ c.fArguments.size() == 2 &&
+ c.fArguments[1]->fKind == Expression::kPrefix_Kind) {
+ const PrefixExpression& p = (PrefixExpression&) *c.fArguments[1];
+ if (p.fOperator == Token::MINUS) {
+ this->write("atan(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(", -1.0 * ");
+ this->writeExpression(*p.fOperand, kMultiplicative_Precedence);
+ this->write(")");
+ return;
+ }
+ }
+ break;
+ case FunctionClass::kDFdy:
+ if (fProgram.fSettings.fFlipY) {
+ // Flipping Y also negates the Y derivatives.
+ this->write("-dFdy");
+ nameWritten = true;
+ }
+ // fallthru
+ case FunctionClass::kDFdx:
+ case FunctionClass::kFwidth:
+ if (!fFoundDerivatives &&
+ fProgram.fSettings.fCaps->shaderDerivativeExtensionString()) {
+ SkASSERT(fProgram.fSettings.fCaps->shaderDerivativeSupport());
+ this->writeExtension(fProgram.fSettings.fCaps->shaderDerivativeExtensionString());
+ fFoundDerivatives = true;
+ }
+ break;
+ case FunctionClass::kDeterminant:
+ if (fProgram.fSettings.fCaps->generation() < k150_GrGLSLGeneration) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->writeDeterminantHack(*c.fArguments[0]);
+ return;
+ }
+ break;
+ case FunctionClass::kFMA:
+ if (!fProgram.fSettings.fCaps->builtinFMASupport()) {
+ SkASSERT(c.fArguments.size() == 3);
+ this->write("((");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(") * (");
+ this->writeExpression(*c.fArguments[1], kSequence_Precedence);
+ this->write(") + (");
+ this->writeExpression(*c.fArguments[2], kSequence_Precedence);
+ this->write("))");
+ return;
+ }
+ break;
+ case FunctionClass::kFract:
+ if (!fProgram.fSettings.fCaps->canUseFractForNegativeValues()) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->write("(0.5 - sign(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(") * (0.5 - fract(abs(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write("))))");
+ return;
+ }
+ break;
+ case FunctionClass::kInverse:
+ if (fProgram.fSettings.fCaps->generation() < k140_GrGLSLGeneration) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->writeInverseHack(*c.fArguments[0]);
+ return;
+ }
+ break;
+ case FunctionClass::kInverseSqrt:
+ if (fProgram.fSettings.fCaps->generation() < k130_GrGLSLGeneration) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->writeInverseSqrtHack(*c.fArguments[0]);
+ return;
+ }
+ break;
+ case FunctionClass::kMin:
+ if (!fProgram.fSettings.fCaps->canUseMinAndAbsTogether()) {
+ SkASSERT(c.fArguments.size() == 2);
+ if (is_abs(*c.fArguments[0])) {
+ this->writeMinAbsHack(*c.fArguments[0], *c.fArguments[1]);
+ return;
+ }
+ if (is_abs(*c.fArguments[1])) {
+ // note that this violates the GLSL left-to-right evaluation semantics.
+ // I doubt it will ever end up mattering, but it's worth calling out.
+ this->writeMinAbsHack(*c.fArguments[1], *c.fArguments[0]);
+ return;
+ }
+ }
+ break;
+ case FunctionClass::kPow:
+ if (!fProgram.fSettings.fCaps->removePowWithConstantExponent()) {
+ break;
+ }
+ // pow(x, y) on some NVIDIA drivers causes crashes if y is a
+ // constant. It's hard to tell what constitutes "constant" here
+ // so just replace in all cases.
+
+ // Change pow(x, y) into exp2(y * log2(x))
+ this->write("exp2(");
+ this->writeExpression(*c.fArguments[1], kMultiplicative_Precedence);
+ this->write(" * log2(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write("))");
+ return;
+ case FunctionClass::kSaturate:
+ SkASSERT(c.fArguments.size() == 1);
+ this->write("clamp(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(", 0.0, 1.0)");
+ return;
+ case FunctionClass::kTexture: {
+ const char* dim = "";
+ bool proj = false;
+ switch (c.fArguments[0]->fType.dimensions()) {
+ case SpvDim1D:
+ dim = "1D";
+ isTextureFunctionWithBias = true;
+ if (c.fArguments[1]->fType == *fContext.fFloat_Type) {
+ proj = false;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat2_Type);
+ proj = true;
+ }
+ break;
+ case SpvDim2D:
+ dim = "2D";
+ if (c.fArguments[0]->fType != *fContext.fSamplerExternalOES_Type) {
+ isTextureFunctionWithBias = true;
+ }
+ if (c.fArguments[1]->fType == *fContext.fFloat2_Type) {
+ proj = false;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat3_Type);
+ proj = true;
+ }
+ break;
+ case SpvDim3D:
+ dim = "3D";
+ isTextureFunctionWithBias = true;
+ if (c.fArguments[1]->fType == *fContext.fFloat3_Type) {
+ proj = false;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat4_Type);
+ proj = true;
+ }
+ break;
+ case SpvDimCube:
+ dim = "Cube";
+ isTextureFunctionWithBias = true;
+ proj = false;
+ break;
+ case SpvDimRect:
+ dim = "2DRect";
+ proj = false;
+ break;
+ case SpvDimBuffer:
+ SkASSERT(false); // doesn't exist
+ dim = "Buffer";
+ proj = false;
+ break;
+ case SpvDimSubpassData:
+ SkASSERT(false); // doesn't exist
+ dim = "SubpassData";
+ proj = false;
+ break;
+ }
+ if (fTextureFunctionOverride != "") {
+ this->write(fTextureFunctionOverride.c_str());
+ } else {
+ this->write("texture");
+ if (fProgram.fSettings.fCaps->generation() < k130_GrGLSLGeneration) {
+ this->write(dim);
+ }
+ if (proj) {
+ this->write("Proj");
+ }
+ }
+ nameWritten = true;
+ break;
+ }
+ case FunctionClass::kTranspose:
+ if (fProgram.fSettings.fCaps->generation() < k130_GrGLSLGeneration) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->writeTransposeHack(*c.fArguments[0]);
+ return;
+ }
+ break;
+ }
+ }
+ if (!nameWritten) {
+ this->write(c.fFunction.fName);
+ }
+ this->write("(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ if (fProgram.fSettings.fSharpenTextures && isTextureFunctionWithBias) {
+ this->write(", -0.5");
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeConstructor(const Constructor& c, Precedence parentPrecedence) {
+ if (c.fArguments.size() == 1 &&
+ (this->getTypeName(c.fType) == this->getTypeName(c.fArguments[0]->fType) ||
+ (c.fType.kind() == Type::kScalar_Kind &&
+ c.fArguments[0]->fType == *fContext.fFloatLiteral_Type))) {
+ // in cases like half(float), they're different types as far as SkSL is concerned but the
+ // same type as far as GLSL is concerned. We avoid a redundant float(float) by just writing
+ // out the inner expression here.
+ this->writeExpression(*c.fArguments[0], parentPrecedence);
+ return;
+ }
+ this->writeType(c.fType);
+ this->write("(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeFragCoord() {
+ if (!fProgram.fSettings.fCaps->canUseFragCoord()) {
+ if (!fSetupFragCoordWorkaround) {
+ const char* precision = usesPrecisionModifiers() ? "highp " : "";
+ fFunctionHeader += precision;
+ fFunctionHeader += " float sk_FragCoord_InvW = 1. / sk_FragCoord_Workaround.w;\n";
+ fFunctionHeader += precision;
+ fFunctionHeader += " vec4 sk_FragCoord_Resolved = "
+ "vec4(sk_FragCoord_Workaround.xyz * sk_FragCoord_InvW, sk_FragCoord_InvW);\n";
+ // Ensure that we get exact .5 values for x and y.
+ fFunctionHeader += " sk_FragCoord_Resolved.xy = floor(sk_FragCoord_Resolved.xy) + "
+ "vec2(.5);\n";
+ fSetupFragCoordWorkaround = true;
+ }
+ this->write("sk_FragCoord_Resolved");
+ return;
+ }
+
+ // We only declare "gl_FragCoord" when we're in the case where we want to use layout qualifiers
+ // to reverse y. Otherwise it isn't necessary and whether the "in" qualifier appears in the
+ // declaration varies in earlier GLSL specs. So it is simpler to omit it.
+ if (!fProgram.fSettings.fFlipY) {
+ this->write("gl_FragCoord");
+ } else if (const char* extension =
+ fProgram.fSettings.fCaps->fragCoordConventionsExtensionString()) {
+ if (!fSetupFragPositionGlobal) {
+ if (fProgram.fSettings.fCaps->generation() < k150_GrGLSLGeneration) {
+ this->writeExtension(extension);
+ }
+ fGlobals.writeText("layout(origin_upper_left) in vec4 gl_FragCoord;\n");
+ fSetupFragPositionGlobal = true;
+ }
+ this->write("gl_FragCoord");
+ } else {
+ if (!fSetupFragPositionLocal) {
+ fFunctionHeader += usesPrecisionModifiers() ? "highp " : "";
+ fFunctionHeader += " vec4 sk_FragCoord = vec4(gl_FragCoord.x, " SKSL_RTHEIGHT_NAME
+ " - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);\n";
+ fSetupFragPositionLocal = true;
+ }
+ this->write("sk_FragCoord");
+ }
+}
+
+void GLSLCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ switch (ref.fVariable.fModifiers.fLayout.fBuiltin) {
+ case SK_FRAGCOLOR_BUILTIN:
+ if (fProgram.fSettings.fCaps->mustDeclareFragmentShaderOutput()) {
+ this->write("sk_FragColor");
+ } else {
+ this->write("gl_FragColor");
+ }
+ break;
+ case SK_FRAGCOORD_BUILTIN:
+ this->writeFragCoord();
+ break;
+ case SK_WIDTH_BUILTIN:
+ this->write("u_skRTWidth");
+ break;
+ case SK_HEIGHT_BUILTIN:
+ this->write("u_skRTHeight");
+ break;
+ case SK_CLOCKWISE_BUILTIN:
+ this->write(fProgram.fSettings.fFlipY ? "(!gl_FrontFacing)" : "gl_FrontFacing");
+ break;
+ case SK_VERTEXID_BUILTIN:
+ this->write("gl_VertexID");
+ break;
+ case SK_INSTANCEID_BUILTIN:
+ this->write("gl_InstanceID");
+ break;
+ case SK_CLIPDISTANCE_BUILTIN:
+ this->write("gl_ClipDistance");
+ break;
+ case SK_IN_BUILTIN:
+ this->write("gl_in");
+ break;
+ case SK_INVOCATIONID_BUILTIN:
+ this->write("gl_InvocationID");
+ break;
+ case SK_LASTFRAGCOLOR_BUILTIN:
+ this->write(fProgram.fSettings.fCaps->fbFetchColorName());
+ break;
+ default:
+ this->write(ref.fVariable.fName);
+ }
+}
+
+void GLSLCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ this->writeExpression(*expr.fBase, kPostfix_Precedence);
+ this->write("[");
+ this->writeExpression(*expr.fIndex, kTopLevel_Precedence);
+ this->write("]");
+}
+
+bool is_sk_position(const FieldAccess& f) {
+ return "sk_Position" == f.fBase->fType.fields()[f.fFieldIndex].fName;
+}
+
+void GLSLCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ if (f.fOwnerKind == FieldAccess::kDefault_OwnerKind) {
+ this->writeExpression(*f.fBase, kPostfix_Precedence);
+ this->write(".");
+ }
+ switch (f.fBase->fType.fields()[f.fFieldIndex].fModifiers.fLayout.fBuiltin) {
+ case SK_CLIPDISTANCE_BUILTIN:
+ this->write("gl_ClipDistance");
+ break;
+ default:
+ StringFragment name = f.fBase->fType.fields()[f.fFieldIndex].fName;
+ if (name == "sk_Position") {
+ this->write("gl_Position");
+ } else if (name == "sk_PointSize") {
+ this->write("gl_PointSize");
+ } else {
+ this->write(f.fBase->fType.fields()[f.fFieldIndex].fName);
+ }
+ }
+}
+
+void GLSLCodeGenerator::writeConstantSwizzle(const Swizzle& swizzle, const String& constants) {
+ this->writeType(swizzle.fType);
+ this->write("(");
+ this->write(constants);
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeSwizzleMask(const Swizzle& swizzle, const String& mask) {
+ this->writeExpression(*swizzle.fBase, kPostfix_Precedence);
+ this->write(".");
+ this->write(mask);
+}
+
+void GLSLCodeGenerator::writeSwizzleConstructor(const Swizzle& swizzle, const String& constants,
+ const String& mask,
+ GLSLCodeGenerator::SwizzleOrder order) {
+ this->writeType(swizzle.fType);
+ this->write("(");
+ if (order == SwizzleOrder::CONSTANTS_FIRST) {
+ this->write(constants);
+ this->write(", ");
+ this->writeSwizzleMask(swizzle, mask);
+ } else {
+ this->writeSwizzleMask(swizzle, mask);
+ this->write(", ");
+ this->write(constants);
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeSwizzleConstructor(const Swizzle& swizzle, const String& constants,
+ const String& mask, const String& reswizzle) {
+ this->writeSwizzleConstructor(swizzle, constants, mask, SwizzleOrder::MASK_FIRST);
+ this->write(".");
+ this->write(reswizzle);
+}
+
+// Writing a swizzle is complicated due to the handling of constant swizzle components. The most
+// problematic case is a mask like '.r00a'. A naive approach might turn that into
+// 'vec4(base.r, 0, 0, base.a)', but that would cause 'base' to be evaluated twice. We instead
+// group the swizzle mask ('ra') and constants ('0, 0') together and use a secondary swizzle to put
+// them back into the right order, so in this case we end up with something like
+// 'vec4(base4.ra, 0, 0).rbag'.
+void GLSLCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ // has a 1 bit in every position for which the swizzle mask is a constant, so 'r0b1' would
+ // yield binary 0101.
+ int constantBits = 0;
+ String mask;
+ String constants;
+ // compute mask ("ra") and constant ("0, 0") strings, and fill in constantBits
+ for (int c : swizzle.fComponents) {
+ constantBits <<= 1;
+ switch (c) {
+ case SKSL_SWIZZLE_0:
+ constantBits |= 1;
+ if (constants.length() > 0) {
+ constants += ", ";
+ }
+ constants += "0";
+ break;
+ case SKSL_SWIZZLE_1:
+ constantBits |= 1;
+ if (constants.length() > 0) {
+ constants += ", ";
+ }
+ constants += "1";
+ break;
+ case 0:
+ mask += "x";
+ break;
+ case 1:
+ mask += "y";
+ break;
+ case 2:
+ mask += "z";
+ break;
+ case 3:
+ mask += "w";
+ break;
+ default:
+ SkASSERT(false);
+ }
+ }
+ switch (swizzle.fComponents.size()) {
+ case 1:
+ if (constantBits == 1) {
+ this->write(constants);
+ }
+ else {
+ this->writeSwizzleMask(swizzle, mask);
+ }
+ break;
+ case 2:
+ switch (constantBits) {
+ case 0: // 00
+ this->writeSwizzleMask(swizzle, mask);
+ break;
+ case 1: // 01
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::MASK_FIRST);
+ break;
+ case 2: // 10
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::CONSTANTS_FIRST);
+ break;
+ case 3: // 11
+ this->writeConstantSwizzle(swizzle, constants);
+ break;
+ default:
+ SkASSERT(false);
+ }
+ break;
+ case 3:
+ switch (constantBits) {
+ case 0: // 000
+ this->writeSwizzleMask(swizzle, mask);
+ break;
+ case 1: // 001
+ case 3: // 011
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::MASK_FIRST);
+ break;
+ case 4: // 100
+ case 6: // 110
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::CONSTANTS_FIRST);
+ break;
+ case 2: // 010
+ this->writeSwizzleConstructor(swizzle, constants, mask, "xzy");
+ break;
+ case 5: // 101
+ this->writeSwizzleConstructor(swizzle, constants, mask, "yxz");
+ break;
+ case 7: // 111
+ this->writeConstantSwizzle(swizzle, constants);
+ break;
+ }
+ break;
+ case 4:
+ switch (constantBits) {
+ case 0: // 0000
+ this->writeSwizzleMask(swizzle, mask);
+ break;
+ case 1: // 0001
+ case 3: // 0011
+ case 7: // 0111
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::MASK_FIRST);
+ break;
+ case 8: // 1000
+ case 12: // 1100
+ case 14: // 1110
+ this->writeSwizzleConstructor(swizzle, constants, mask,
+ SwizzleOrder::CONSTANTS_FIRST);
+ break;
+ case 2: // 0010
+ this->writeSwizzleConstructor(swizzle, constants, mask, "xywz");
+ break;
+ case 4: // 0100
+ this->writeSwizzleConstructor(swizzle, constants, mask, "xwyz");
+ break;
+ case 5: // 0101
+ this->writeSwizzleConstructor(swizzle, constants, mask, "xzyw");
+ break;
+ case 6: // 0110
+ this->writeSwizzleConstructor(swizzle, constants, mask, "xzwy");
+ break;
+ case 9: // 1001
+ this->writeSwizzleConstructor(swizzle, constants, mask, "zxyw");
+ break;
+ case 10: // 1010
+ this->writeSwizzleConstructor(swizzle, constants, mask, "zxwy");
+ break;
+ case 11: // 1011
+ this->writeSwizzleConstructor(swizzle, constants, mask, "yxzw");
+ break;
+ case 13: // 1101
+ this->writeSwizzleConstructor(swizzle, constants, mask, "yzxw");
+ break;
+ case 15: // 1111
+ this->writeConstantSwizzle(swizzle, constants);
+ break;
+ }
+ }
+}
+
+GLSLCodeGenerator::Precedence GLSLCodeGenerator::GetBinaryPrecedence(Token::Kind op) {
+ switch (op) {
+ case Token::STAR: // fall through
+ case Token::SLASH: // fall through
+ case Token::PERCENT: return GLSLCodeGenerator::kMultiplicative_Precedence;
+ case Token::PLUS: // fall through
+ case Token::MINUS: return GLSLCodeGenerator::kAdditive_Precedence;
+ case Token::SHL: // fall through
+ case Token::SHR: return GLSLCodeGenerator::kShift_Precedence;
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ: return GLSLCodeGenerator::kRelational_Precedence;
+ case Token::EQEQ: // fall through
+ case Token::NEQ: return GLSLCodeGenerator::kEquality_Precedence;
+ case Token::BITWISEAND: return GLSLCodeGenerator::kBitwiseAnd_Precedence;
+ case Token::BITWISEXOR: return GLSLCodeGenerator::kBitwiseXor_Precedence;
+ case Token::BITWISEOR: return GLSLCodeGenerator::kBitwiseOr_Precedence;
+ case Token::LOGICALAND: return GLSLCodeGenerator::kLogicalAnd_Precedence;
+ case Token::LOGICALXOR: return GLSLCodeGenerator::kLogicalXor_Precedence;
+ case Token::LOGICALOR: return GLSLCodeGenerator::kLogicalOr_Precedence;
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEOREQ: return GLSLCodeGenerator::kAssignment_Precedence;
+ case Token::COMMA: return GLSLCodeGenerator::kSequence_Precedence;
+ default: ABORT("unsupported binary operator");
+ }
+}
+
+void GLSLCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ if (fProgram.fSettings.fCaps->unfoldShortCircuitAsTernary() &&
+ (b.fOperator == Token::LOGICALAND || b.fOperator == Token::LOGICALOR)) {
+ this->writeShortCircuitWorkaroundExpression(b, parentPrecedence);
+ return;
+ }
+
+ Precedence precedence = GetBinaryPrecedence(b.fOperator);
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ bool positionWorkaround = fProgramKind == Program::Kind::kVertex_Kind &&
+ Compiler::IsAssignment(b.fOperator) &&
+ Expression::kFieldAccess_Kind == b.fLeft->fKind &&
+ is_sk_position((FieldAccess&) *b.fLeft) &&
+ !strstr(b.fRight->description().c_str(), "sk_RTAdjust") &&
+ !fProgram.fSettings.fCaps->canUseFragCoord();
+ if (positionWorkaround) {
+ this->write("sk_FragCoord_Workaround = (");
+ }
+ this->writeExpression(*b.fLeft, precedence);
+ this->write(" ");
+ this->write(Compiler::OperatorName(b.fOperator));
+ this->write(" ");
+ this->writeExpression(*b.fRight, precedence);
+ if (positionWorkaround) {
+ this->write(")");
+ }
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeShortCircuitWorkaroundExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+
+ // Transform:
+ // a && b => a ? b : false
+ // a || b => a ? true : b
+ this->writeExpression(*b.fLeft, kTernary_Precedence);
+ this->write(" ? ");
+ if (b.fOperator == Token::LOGICALAND) {
+ this->writeExpression(*b.fRight, kTernary_Precedence);
+ } else {
+ BoolLiteral boolTrue(fContext, -1, true);
+ this->writeBoolLiteral(boolTrue);
+ }
+ this->write(" : ");
+ if (b.fOperator == Token::LOGICALAND) {
+ BoolLiteral boolFalse(fContext, -1, false);
+ this->writeBoolLiteral(boolFalse);
+ } else {
+ this->writeExpression(*b.fRight, kTernary_Precedence);
+ }
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.fTest, kTernary_Precedence);
+ this->write(" ? ");
+ this->writeExpression(*t.fIfTrue, kTernary_Precedence);
+ this->write(" : ");
+ this->writeExpression(*t.fIfFalse, kTernary_Precedence);
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->write(Compiler::OperatorName(p.fOperator));
+ this->writeExpression(*p.fOperand, kPrefix_Precedence);
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.fOperand, kPostfix_Precedence);
+ this->write(Compiler::OperatorName(p.fOperator));
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ this->write(b.fValue ? "true" : "false");
+}
+
+void GLSLCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ if (i.fType == *fContext.fUInt_Type) {
+ this->write(to_string(i.fValue & 0xffffffff) + "u");
+ } else if (i.fType == *fContext.fUShort_Type) {
+ this->write(to_string(i.fValue & 0xffff) + "u");
+ } else if (i.fType == *fContext.fUByte_Type) {
+ this->write(to_string(i.fValue & 0xff) + "u");
+ } else {
+ this->write(to_string((int32_t) i.fValue));
+ }
+}
+
+void GLSLCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ this->write(to_string(f.fValue));
+}
+
+void GLSLCodeGenerator::writeSetting(const Setting& s) {
+ ABORT("internal error; setting was not folded to a constant during compilation\n");
+}
+
+void GLSLCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ fSetupFragPositionLocal = false;
+ fSetupFragCoordWorkaround = false;
+ if (fProgramKind != Program::kPipelineStage_Kind) {
+ this->writeTypePrecision(f.fDeclaration.fReturnType);
+ this->writeType(f.fDeclaration.fReturnType);
+ this->write(" " + f.fDeclaration.fName + "(");
+ const char* separator = "";
+ for (const auto& param : f.fDeclaration.fParameters) {
+ this->write(separator);
+ separator = ", ";
+ this->writeModifiers(param->fModifiers, false);
+ std::vector<int> sizes;
+ const Type* type = &param->fType;
+ while (type->kind() == Type::kArray_Kind) {
+ sizes.push_back(type->columns());
+ type = &type->componentType();
+ }
+ this->writeTypePrecision(*type);
+ this->writeType(*type);
+ this->write(" " + param->fName);
+ for (int s : sizes) {
+ if (s <= 0) {
+ this->write("[]");
+ } else {
+ this->write("[" + to_string(s) + "]");
+ }
+ }
+ }
+ this->writeLine(") {");
+ fIndentation++;
+ }
+ fFunctionHeader = "";
+ OutputStream* oldOut = fOut;
+ StringStream buffer;
+ fOut = &buffer;
+ this->writeStatements(((Block&) *f.fBody).fStatements);
+ if (fProgramKind != Program::kPipelineStage_Kind) {
+ fIndentation--;
+ this->writeLine("}");
+ }
+
+ fOut = oldOut;
+ this->write(fFunctionHeader);
+ this->write(buffer.str());
+}
+
+void GLSLCodeGenerator::writeModifiers(const Modifiers& modifiers,
+ bool globalContext) {
+ if (modifiers.fFlags & Modifiers::kFlat_Flag) {
+ this->write("flat ");
+ }
+ if (modifiers.fFlags & Modifiers::kNoPerspective_Flag) {
+ this->write("noperspective ");
+ }
+ String layout = modifiers.fLayout.description();
+ if (layout.size()) {
+ this->write(layout + " ");
+ }
+ if (modifiers.fFlags & Modifiers::kReadOnly_Flag) {
+ this->write("readonly ");
+ }
+ if (modifiers.fFlags & Modifiers::kWriteOnly_Flag) {
+ this->write("writeonly ");
+ }
+ if (modifiers.fFlags & Modifiers::kCoherent_Flag) {
+ this->write("coherent ");
+ }
+ if (modifiers.fFlags & Modifiers::kVolatile_Flag) {
+ this->write("volatile ");
+ }
+ if (modifiers.fFlags & Modifiers::kRestrict_Flag) {
+ this->write("restrict ");
+ }
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) &&
+ (modifiers.fFlags & Modifiers::kOut_Flag)) {
+ this->write("inout ");
+ } else if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ if (globalContext &&
+ fProgram.fSettings.fCaps->generation() < GrGLSLGeneration::k130_GrGLSLGeneration) {
+ this->write(fProgramKind == Program::kVertex_Kind ? "attribute "
+ : "varying ");
+ } else {
+ this->write("in ");
+ }
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ if (globalContext &&
+ fProgram.fSettings.fCaps->generation() < GrGLSLGeneration::k130_GrGLSLGeneration) {
+ this->write("varying ");
+ } else {
+ this->write("out ");
+ }
+ }
+ if (modifiers.fFlags & Modifiers::kUniform_Flag) {
+ this->write("uniform ");
+ }
+ if (modifiers.fFlags & Modifiers::kConst_Flag) {
+ this->write("const ");
+ }
+ if (modifiers.fFlags & Modifiers::kPLS_Flag) {
+ this->write("__pixel_localEXT ");
+ }
+ if (modifiers.fFlags & Modifiers::kPLSIn_Flag) {
+ this->write("__pixel_local_inEXT ");
+ }
+ if (modifiers.fFlags & Modifiers::kPLSOut_Flag) {
+ this->write("__pixel_local_outEXT ");
+ }
+ switch (modifiers.fLayout.fFormat) {
+ case Layout::Format::kUnspecified:
+ break;
+ case Layout::Format::kRGBA32F: // fall through
+ case Layout::Format::kR32F:
+ this->write("highp ");
+ break;
+ case Layout::Format::kRGBA16F: // fall through
+ case Layout::Format::kR16F: // fall through
+ case Layout::Format::kLUMINANCE16F: // fall through
+ case Layout::Format::kRG16F:
+ this->write("mediump ");
+ break;
+ case Layout::Format::kRGBA8: // fall through
+ case Layout::Format::kR8: // fall through
+ case Layout::Format::kRGBA8I: // fall through
+ case Layout::Format::kR8I:
+ this->write("lowp ");
+ break;
+ }
+}
+
+void GLSLCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ if (intf.fTypeName == "sk_PerVertex") {
+ return;
+ }
+ this->writeModifiers(intf.fVariable.fModifiers, true);
+ this->writeLine(intf.fTypeName + " {");
+ fIndentation++;
+ const Type* structType = &intf.fVariable.fType;
+ while (structType->kind() == Type::kArray_Kind) {
+ structType = &structType->componentType();
+ }
+ for (const auto& f : structType->fields()) {
+ this->writeModifiers(f.fModifiers, false);
+ this->writeTypePrecision(*f.fType);
+ this->writeType(*f.fType);
+ this->writeLine(" " + f.fName + ";");
+ }
+ fIndentation--;
+ this->write("}");
+ if (intf.fInstanceName.size()) {
+ this->write(" ");
+ this->write(intf.fInstanceName);
+ for (const auto& size : intf.fSizes) {
+ this->write("[");
+ if (size) {
+ this->writeExpression(*size, kTopLevel_Precedence);
+ }
+ this->write("]");
+ }
+ }
+ this->writeLine(";");
+}
+
+void GLSLCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
+ this->writeExpression(value, kTopLevel_Precedence);
+}
+
+const char* GLSLCodeGenerator::getTypePrecision(const Type& type) {
+ if (usesPrecisionModifiers()) {
+ switch (type.kind()) {
+ case Type::kScalar_Kind:
+ if (type == *fContext.fShort_Type || type == *fContext.fUShort_Type ||
+ type == *fContext.fByte_Type || type == *fContext.fUByte_Type) {
+ if (fProgram.fSettings.fForceHighPrecision ||
+ fProgram.fSettings.fCaps->incompleteShortIntPrecision()) {
+ return "highp ";
+ }
+ return "mediump ";
+ }
+ if (type == *fContext.fHalf_Type) {
+ return fProgram.fSettings.fForceHighPrecision ? "highp " : "mediump ";
+ }
+ if (type == *fContext.fFloat_Type || type == *fContext.fInt_Type ||
+ type == *fContext.fUInt_Type) {
+ return "highp ";
+ }
+ return "";
+ case Type::kVector_Kind: // fall through
+ case Type::kMatrix_Kind:
+ return this->getTypePrecision(type.componentType());
+ default:
+ break;
+ }
+ }
+ return "";
+}
+
+void GLSLCodeGenerator::writeTypePrecision(const Type& type) {
+ this->write(this->getTypePrecision(type));
+}
+
+void GLSLCodeGenerator::writeVarDeclarations(const VarDeclarations& decl, bool global) {
+ if (!decl.fVars.size()) {
+ return;
+ }
+ bool wroteType = false;
+ for (const auto& stmt : decl.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ if (wroteType) {
+ this->write(", ");
+ } else {
+ this->writeModifiers(var.fVar->fModifiers, global);
+ this->writeTypePrecision(decl.fBaseType);
+ this->writeType(decl.fBaseType);
+ this->write(" ");
+ wroteType = true;
+ }
+ this->write(var.fVar->fName);
+ for (const auto& size : var.fSizes) {
+ this->write("[");
+ if (size) {
+ this->writeExpression(*size, kTopLevel_Precedence);
+ }
+ this->write("]");
+ }
+ if (var.fValue) {
+ this->write(" = ");
+ this->writeVarInitializer(*var.fVar, *var.fValue);
+ }
+ if (!fFoundExternalSamplerDecl && var.fVar->fType == *fContext.fSamplerExternalOES_Type) {
+ if (fProgram.fSettings.fCaps->externalTextureExtensionString()) {
+ this->writeExtension(fProgram.fSettings.fCaps->externalTextureExtensionString());
+ }
+ if (fProgram.fSettings.fCaps->secondExternalTextureExtensionString()) {
+ this->writeExtension(
+ fProgram.fSettings.fCaps->secondExternalTextureExtensionString());
+ }
+ fFoundExternalSamplerDecl = true;
+ }
+ if (!fFoundRectSamplerDecl && var.fVar->fType == *fContext.fSampler2DRect_Type) {
+ fFoundRectSamplerDecl = true;
+ }
+ }
+ if (wroteType) {
+ this->write(";");
+ }
+}
+
+void GLSLCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, kTopLevel_Precedence);
+ this->write(";");
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration, false);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s);
+ break;
+ case Statement::kWhile_Kind:
+ this->writeWhileStatement((WhileStatement&) s);
+ break;
+ case Statement::kDo_Kind:
+ this->writeDoStatement((DoStatement&) s);
+ break;
+ case Statement::kSwitch_Kind:
+ this->writeSwitchStatement((SwitchStatement&) s);
+ break;
+ case Statement::kBreak_Kind:
+ this->write("break;");
+ break;
+ case Statement::kContinue_Kind:
+ this->write("continue;");
+ break;
+ case Statement::kDiscard_Kind:
+ this->write("discard;");
+ break;
+ case Statement::kNop_Kind:
+ this->write(";");
+ break;
+ default:
+ ABORT("unsupported statement: %s", s.description().c_str());
+ }
+}
+
+void GLSLCodeGenerator::writeStatements(const std::vector<std::unique_ptr<Statement>>& statements) {
+ for (const auto& s : statements) {
+ if (!s->isEmpty()) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ }
+}
+
+void GLSLCodeGenerator::writeBlock(const Block& b) {
+ this->writeLine("{");
+ fIndentation++;
+ this->writeStatements(b.fStatements);
+ fIndentation--;
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*stmt.fIfTrue);
+ if (stmt.fIfFalse) {
+ this->write(" else ");
+ this->writeStatement(*stmt.fIfFalse);
+ }
+}
+
+void GLSLCodeGenerator::writeForStatement(const ForStatement& f) {
+ this->write("for (");
+ if (f.fInitializer && !f.fInitializer->isEmpty()) {
+ this->writeStatement(*f.fInitializer);
+ } else {
+ this->write("; ");
+ }
+ if (f.fTest) {
+ if (fProgram.fSettings.fCaps->addAndTrueToLoopCondition()) {
+ std::unique_ptr<Expression> and_true(new BinaryExpression(
+ -1, f.fTest->clone(), Token::LOGICALAND,
+ std::unique_ptr<BoolLiteral>(new BoolLiteral(fContext, -1,
+ true)),
+ *fContext.fBool_Type));
+ this->writeExpression(*and_true, kTopLevel_Precedence);
+ } else {
+ this->writeExpression(*f.fTest, kTopLevel_Precedence);
+ }
+ }
+ this->write("; ");
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, kTopLevel_Precedence);
+ }
+ this->write(") ");
+ this->writeStatement(*f.fStatement);
+}
+
+void GLSLCodeGenerator::writeWhileStatement(const WhileStatement& w) {
+ this->write("while (");
+ this->writeExpression(*w.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*w.fStatement);
+}
+
+void GLSLCodeGenerator::writeDoStatement(const DoStatement& d) {
+ if (!fProgram.fSettings.fCaps->rewriteDoWhileLoops()) {
+ this->write("do ");
+ this->writeStatement(*d.fStatement);
+ this->write(" while (");
+ this->writeExpression(*d.fTest, kTopLevel_Precedence);
+ this->write(");");
+ return;
+ }
+
+ // Otherwise, do the do while loop workaround, to rewrite loops of the form:
+ // do {
+ // CODE;
+ // } while (CONDITION)
+ //
+ // to loops of the form
+ // bool temp = false;
+ // while (true) {
+ // if (temp) {
+ // if (!CONDITION) {
+ // break;
+ // }
+ // }
+ // temp = true;
+ // CODE;
+ // }
+ String tmpVar = "_tmpLoopSeenOnce" + to_string(fVarCount++);
+ this->write("bool ");
+ this->write(tmpVar);
+ this->writeLine(" = false;");
+ this->writeLine("while (true) {");
+ fIndentation++;
+ this->write("if (");
+ this->write(tmpVar);
+ this->writeLine(") {");
+ fIndentation++;
+ this->write("if (!");
+ this->writeExpression(*d.fTest, kPrefix_Precedence);
+ this->writeLine(") {");
+ fIndentation++;
+ this->writeLine("break;");
+ fIndentation--;
+ this->writeLine("}");
+ fIndentation--;
+ this->writeLine("}");
+ this->write(tmpVar);
+ this->writeLine(" = true;");
+ this->writeStatement(*d.fStatement);
+ this->writeLine();
+ fIndentation--;
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ this->write("switch (");
+ this->writeExpression(*s.fValue, kTopLevel_Precedence);
+ this->writeLine(") {");
+ fIndentation++;
+ for (const auto& c : s.fCases) {
+ if (c->fValue) {
+ this->write("case ");
+ this->writeExpression(*c->fValue, kTopLevel_Precedence);
+ this->writeLine(":");
+ } else {
+ this->writeLine("default:");
+ }
+ fIndentation++;
+ for (const auto& stmt : c->fStatements) {
+ this->writeStatement(*stmt);
+ this->writeLine();
+ }
+ fIndentation--;
+ }
+ fIndentation--;
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ this->write("return");
+ if (r.fExpression) {
+ this->write(" ");
+ this->writeExpression(*r.fExpression, kTopLevel_Precedence);
+ }
+ this->write(";");
+}
+
+void GLSLCodeGenerator::writeHeader() {
+ this->write(fProgram.fSettings.fCaps->versionDeclString());
+ this->writeLine();
+}
+
+void GLSLCodeGenerator::writeProgramElement(const ProgramElement& e) {
+ switch (e.fKind) {
+ case ProgramElement::kExtension_Kind:
+ this->writeExtension(((Extension&) e).fName);
+ break;
+ case ProgramElement::kVar_Kind: {
+ VarDeclarations& decl = (VarDeclarations&) e;
+ if (decl.fVars.size() > 0) {
+ int builtin = ((VarDeclaration&) *decl.fVars[0]).fVar->fModifiers.fLayout.fBuiltin;
+ if (builtin == -1) {
+ // normal var
+ this->writeVarDeclarations(decl, true);
+ this->writeLine();
+ } else if (builtin == SK_FRAGCOLOR_BUILTIN &&
+ fProgram.fSettings.fCaps->mustDeclareFragmentShaderOutput() &&
+ ((VarDeclaration&) *decl.fVars[0]).fVar->fWriteCount) {
+ if (fProgram.fSettings.fFragColorIsInOut) {
+ this->write("inout ");
+ } else {
+ this->write("out ");
+ }
+ if (usesPrecisionModifiers()) {
+ this->write("mediump ");
+ }
+ this->writeLine("vec4 sk_FragColor;");
+ }
+ }
+ break;
+ }
+ case ProgramElement::kInterfaceBlock_Kind:
+ this->writeInterfaceBlock((InterfaceBlock&) e);
+ break;
+ case ProgramElement::kFunction_Kind:
+ this->writeFunction((FunctionDefinition&) e);
+ break;
+ case ProgramElement::kModifiers_Kind: {
+ const Modifiers& modifiers = ((ModifiersDeclaration&) e).fModifiers;
+ if (!fFoundGSInvocations && modifiers.fLayout.fInvocations >= 0) {
+ if (fProgram.fSettings.fCaps->gsInvocationsExtensionString()) {
+ this->writeExtension(fProgram.fSettings.fCaps->gsInvocationsExtensionString());
+ }
+ fFoundGSInvocations = true;
+ }
+ this->writeModifiers(modifiers, true);
+ this->writeLine(";");
+ break;
+ }
+ case ProgramElement::kEnum_Kind:
+ break;
+ default:
+ printf("%s\n", e.description().c_str());
+ ABORT("unsupported program element");
+ }
+}
+
+void GLSLCodeGenerator::writeInputVars() {
+ if (fProgram.fInputs.fRTWidth) {
+ const char* precision = usesPrecisionModifiers() ? "highp " : "";
+ fGlobals.writeText("uniform ");
+ fGlobals.writeText(precision);
+ fGlobals.writeText("float " SKSL_RTWIDTH_NAME ";\n");
+ }
+ if (fProgram.fInputs.fRTHeight) {
+ const char* precision = usesPrecisionModifiers() ? "highp " : "";
+ fGlobals.writeText("uniform ");
+ fGlobals.writeText(precision);
+ fGlobals.writeText("float " SKSL_RTHEIGHT_NAME ";\n");
+ }
+}
+
+bool GLSLCodeGenerator::generateCode() {
+ if (fProgramKind != Program::kPipelineStage_Kind) {
+ this->writeHeader();
+ }
+ if (Program::kGeometry_Kind == fProgramKind &&
+ fProgram.fSettings.fCaps->geometryShaderExtensionString()) {
+ this->writeExtension(fProgram.fSettings.fCaps->geometryShaderExtensionString());
+ }
+ OutputStream* rawOut = fOut;
+ StringStream body;
+ fOut = &body;
+ for (const auto& e : fProgram) {
+ this->writeProgramElement(e);
+ }
+ fOut = rawOut;
+
+ write_stringstream(fExtensions, *rawOut);
+ this->writeInputVars();
+ write_stringstream(fGlobals, *rawOut);
+
+ if (!fProgram.fSettings.fCaps->canUseFragCoord()) {
+ Layout layout;
+ switch (fProgram.fKind) {
+ case Program::kVertex_Kind: {
+ Modifiers modifiers(layout, Modifiers::kOut_Flag);
+ this->writeModifiers(modifiers, true);
+ if (this->usesPrecisionModifiers()) {
+ this->write("highp ");
+ }
+ this->write("vec4 sk_FragCoord_Workaround;\n");
+ break;
+ }
+ case Program::kFragment_Kind: {
+ Modifiers modifiers(layout, Modifiers::kIn_Flag);
+ this->writeModifiers(modifiers, true);
+ if (this->usesPrecisionModifiers()) {
+ this->write("highp ");
+ }
+ this->write("vec4 sk_FragCoord_Workaround;\n");
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (this->usesPrecisionModifiers()) {
+ this->writeLine("precision mediump float;");
+ this->writeLine("precision mediump sampler2D;");
+ if (fFoundExternalSamplerDecl &&
+ !fProgram.fSettings.fCaps->noDefaultPrecisionForExternalSamplers()) {
+ this->writeLine("precision mediump samplerExternalOES;");
+ }
+ if (fFoundRectSamplerDecl) {
+ this->writeLine("precision mediump sampler2DRect;");
+ }
+ }
+ write_stringstream(fExtraFunctions, *rawOut);
+ write_stringstream(body, *rawOut);
+ return true;
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h
new file mode 100644
index 0000000000..9977cf8dcf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_GLSLCODEGENERATOR
+#define SKSL_GLSLCODEGENERATOR
+
+#include <stack>
+#include <tuple>
+#include <unordered_map>
+
+#include "src/sksl/SkSLCodeGenerator.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+/**
+ * Converts a Program into GLSL code.
+ */
+class GLSLCodeGenerator : public CodeGenerator {
+public:
+ enum Precedence {
+ kParentheses_Precedence = 1,
+ kPostfix_Precedence = 2,
+ kPrefix_Precedence = 3,
+ kMultiplicative_Precedence = 4,
+ kAdditive_Precedence = 5,
+ kShift_Precedence = 6,
+ kRelational_Precedence = 7,
+ kEquality_Precedence = 8,
+ kBitwiseAnd_Precedence = 9,
+ kBitwiseXor_Precedence = 10,
+ kBitwiseOr_Precedence = 11,
+ kLogicalAnd_Precedence = 12,
+ kLogicalXor_Precedence = 13,
+ kLogicalOr_Precedence = 14,
+ kTernary_Precedence = 15,
+ kAssignment_Precedence = 16,
+ kSequence_Precedence = 17,
+ kTopLevel_Precedence = kSequence_Precedence
+ };
+
+ GLSLCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ OutputStream* out)
+ : INHERITED(program, errors, out)
+ , fLineEnding("\n")
+ , fContext(*context)
+ , fProgramKind(program->fKind) {}
+
+ bool generateCode() override;
+
+protected:
+ enum class SwizzleOrder {
+ MASK_FIRST,
+ CONSTANTS_FIRST
+ };
+
+ void write(const char* s);
+
+ void writeLine();
+
+ void writeLine(const char* s);
+
+ void write(const String& s);
+
+ void write(StringFragment s);
+
+ void writeLine(const String& s);
+
+ virtual void writeHeader();
+
+ virtual bool usesPrecisionModifiers() const;
+
+ virtual String getTypeName(const Type& type);
+
+ void writeType(const Type& type);
+
+ void writeExtension(const String& name);
+
+ void writeExtension(const String& name, bool require);
+
+ void writeInterfaceBlock(const InterfaceBlock& intf);
+
+ void writeFunctionStart(const FunctionDeclaration& f);
+
+ void writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ virtual void writeFunction(const FunctionDefinition& f);
+
+ void writeLayout(const Layout& layout);
+
+ void writeModifiers(const Modifiers& modifiers, bool globalContext);
+
+ virtual void writeInputVars();
+
+ virtual void writeVarInitializer(const Variable& var, const Expression& value);
+
+ const char* getTypePrecision(const Type& type);
+
+ void writeTypePrecision(const Type& type);
+
+ void writeVarDeclarations(const VarDeclarations& decl, bool global);
+
+ void writeFragCoord();
+
+ virtual void writeVariableReference(const VariableReference& ref);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+
+ void writeIntrinsicCall(const FunctionCall& c);
+
+ void writeMinAbsHack(Expression& absExpr, Expression& otherExpr);
+
+ void writeDeterminantHack(const Expression& mat);
+
+ void writeInverseHack(const Expression& mat);
+
+ void writeTransposeHack(const Expression& mat);
+
+ void writeInverseSqrtHack(const Expression& x);
+
+ virtual void writeFunctionCall(const FunctionCall& c);
+
+ void writeConstructor(const Constructor& c, Precedence parentPrecedence);
+
+ virtual void writeFieldAccess(const FieldAccess& f);
+
+ void writeConstantSwizzle(const Swizzle& swizzle, const String& constants);
+
+ void writeSwizzleMask(const Swizzle& swizzle, const String& mask);
+
+ void writeSwizzleConstructor(const Swizzle& swizzle, const String& constants,
+ const String& mask, SwizzleOrder order);
+
+ void writeSwizzleConstructor(const Swizzle& swizzle, const String& constants,
+ const String& mask, const String& reswizzle);
+ virtual void writeSwizzle(const Swizzle& swizzle);
+
+ static Precedence GetBinaryPrecedence(Token::Kind op);
+
+ virtual void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+ void writeShortCircuitWorkaroundExpression(const BinaryExpression& b,
+ Precedence parentPrecedence);
+
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+
+ virtual void writeIndexExpression(const IndexExpression& expr);
+
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+
+ void writeBoolLiteral(const BoolLiteral& b);
+
+ virtual void writeIntLiteral(const IntLiteral& i);
+
+ void writeFloatLiteral(const FloatLiteral& f);
+
+ virtual void writeSetting(const Setting& s);
+
+ void writeStatement(const Statement& s);
+
+ void writeStatements(const std::vector<std::unique_ptr<Statement>>& statements);
+
+ void writeBlock(const Block& b);
+
+ virtual void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeWhileStatement(const WhileStatement& w);
+
+ void writeDoStatement(const DoStatement& d);
+
+ virtual void writeSwitchStatement(const SwitchStatement& s);
+
+ virtual void writeReturnStatement(const ReturnStatement& r);
+
+ virtual void writeProgramElement(const ProgramElement& e);
+
+ const char* fLineEnding;
+ const Context& fContext;
+ StringStream fExtensions;
+ StringStream fGlobals;
+ StringStream fExtraFunctions;
+ String fFunctionHeader;
+ Program::Kind fProgramKind;
+ int fVarCount = 0;
+ int fIndentation = 0;
+ bool fAtLineStart = false;
+ // Keeps track of which struct types we have written. Given that we are unlikely to ever write
+ // more than one or two structs per shader, a simple linear search will be faster than anything
+ // fancier.
+ std::vector<const Type*> fWrittenStructs;
+ std::set<String> fWrittenIntrinsics;
+ // true if we have run into usages of dFdx / dFdy
+ bool fFoundDerivatives = false;
+ bool fFoundExternalSamplerDecl = false;
+ bool fFoundRectSamplerDecl = false;
+ bool fFoundGSInvocations = false;
+ bool fSetupFragPositionGlobal = false;
+ bool fSetupFragPositionLocal = false;
+ bool fSetupFragCoordWorkaround = false;
+ // if non-empty, replace all texture / texture2D / textureProj / etc. calls with this name
+ String fTextureFunctionOverride;
+
+ // We map function names to function class so we can quickly deal with function calls that need
+ // extra processing
+ enum class FunctionClass {
+ kAbs,
+ kAtan,
+ kDeterminant,
+ kDFdx,
+ kDFdy,
+ kFwidth,
+ kFMA,
+ kFract,
+ kInverse,
+ kInverseSqrt,
+ kMin,
+ kPow,
+ kSaturate,
+ kTexture,
+ kTranspose
+ };
+ static std::unordered_map<StringFragment, FunctionClass>* fFunctionClasses;
+
+ typedef CodeGenerator INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.cpp
new file mode 100644
index 0000000000..31cc890c41
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.cpp
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLHCodeGenerator.h"
+
+#include "src/sksl/SkSLParser.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLEnum.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLSection.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#include <set>
+
+namespace SkSL {
+
+HCodeGenerator::HCodeGenerator(const Context* context, const Program* program,
+ ErrorReporter* errors, String name, OutputStream* out)
+: INHERITED(program, errors, out)
+, fContext(*context)
+, fName(std::move(name))
+, fFullName(String::printf("Gr%s", fName.c_str()))
+, fSectionAndParameterHelper(program, *errors) {}
+
+String HCodeGenerator::ParameterType(const Context& context, const Type& type,
+ const Layout& layout) {
+ Layout::CType ctype = ParameterCType(context, type, layout);
+ if (ctype != Layout::CType::kDefault) {
+ return Layout::CTypeToStr(ctype);
+ }
+ return type.name();
+}
+
+Layout::CType HCodeGenerator::ParameterCType(const Context& context, const Type& type,
+ const Layout& layout) {
+ if (layout.fCType != Layout::CType::kDefault) {
+ return layout.fCType;
+ }
+ if (type.kind() == Type::kNullable_Kind) {
+ return ParameterCType(context, type.componentType(), layout);
+ } else if (type == *context.fFloat_Type || type == *context.fHalf_Type) {
+ return Layout::CType::kFloat;
+ } else if (type == *context.fInt_Type ||
+ type == *context.fShort_Type ||
+ type == *context.fByte_Type) {
+ return Layout::CType::kInt32;
+ } else if (type == *context.fFloat2_Type || type == *context.fHalf2_Type) {
+ return Layout::CType::kSkPoint;
+ } else if (type == *context.fInt2_Type ||
+ type == *context.fShort2_Type ||
+ type == *context.fByte2_Type) {
+ return Layout::CType::kSkIPoint;
+ } else if (type == *context.fInt4_Type ||
+ type == *context.fShort4_Type ||
+ type == *context.fByte4_Type) {
+ return Layout::CType::kSkIRect;
+ } else if (type == *context.fFloat4_Type || type == *context.fHalf4_Type) {
+ return Layout::CType::kSkRect;
+ } else if (type == *context.fFloat3x3_Type || type == *context.fHalf3x3_Type) {
+ return Layout::CType::kSkMatrix;
+ } else if (type == *context.fFloat4x4_Type || type == *context.fHalf4x4_Type) {
+ return Layout::CType::kSkMatrix44;
+ } else if (type.kind() == Type::kSampler_Kind) {
+ return Layout::CType::kGrTextureProxy;
+ } else if (type == *context.fFragmentProcessor_Type) {
+ return Layout::CType::kGrFragmentProcessor;
+ }
+ return Layout::CType::kDefault;
+}
+
+String HCodeGenerator::FieldType(const Context& context, const Type& type,
+ const Layout& layout) {
+ if (type.kind() == Type::kSampler_Kind) {
+ return "TextureSampler";
+ } else if (type == *context.fFragmentProcessor_Type) {
+ // we don't store fragment processors in fields, they get registered via
+ // registerChildProcessor instead
+ SkASSERT(false);
+ return "<error>";
+ }
+ return ParameterType(context, type, layout);
+}
+
+String HCodeGenerator::AccessType(const Context& context, const Type& type,
+ const Layout& layout) {
+ static const std::set<String> primitiveTypes = { "int32_t", "float", "bool", "SkPMColor" };
+
+ String fieldType = FieldType(context, type, layout);
+ bool isPrimitive = primitiveTypes.find(fieldType) != primitiveTypes.end();
+ if (isPrimitive) {
+ return fieldType;
+ } else {
+ return String::printf("const %s&", fieldType.c_str());
+ }
+}
+
+void HCodeGenerator::writef(const char* s, va_list va) {
+ static constexpr int BUFFER_SIZE = 1024;
+ va_list copy;
+ va_copy(copy, va);
+ char buffer[BUFFER_SIZE];
+ int length = vsnprintf(buffer, BUFFER_SIZE, s, va);
+ if (length < BUFFER_SIZE) {
+ fOut->write(buffer, length);
+ } else {
+ std::unique_ptr<char[]> heap(new char[length + 1]);
+ vsprintf(heap.get(), s, copy);
+ fOut->write(heap.get(), length);
+ }
+ va_end(copy);
+}
+
+void HCodeGenerator::writef(const char* s, ...) {
+ va_list va;
+ va_start(va, s);
+ this->writef(s, va);
+ va_end(va);
+}
+
+bool HCodeGenerator::writeSection(const char* name, const char* prefix) {
+ const Section* s = fSectionAndParameterHelper.getSection(name);
+ if (s) {
+ this->writef("%s%s", prefix, s->fText.c_str());
+ return true;
+ }
+ return false;
+}
+
+void HCodeGenerator::writeExtraConstructorParams(const char* separator) {
+ // super-simple parse, just assume the last token before a comma is the name of a parameter
+ // (which is true as long as there are no multi-parameter template types involved). Will replace
+ // this with something more robust if the need arises.
+ const Section* section = fSectionAndParameterHelper.getSection(CONSTRUCTOR_PARAMS_SECTION);
+ if (section) {
+ const char* s = section->fText.c_str();
+ #define BUFFER_SIZE 64
+ char lastIdentifier[BUFFER_SIZE];
+ int lastIdentifierLength = 0;
+ bool foundBreak = false;
+ while (*s) {
+ char c = *s;
+ ++s;
+ if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') ||
+ c == '_') {
+ if (foundBreak) {
+ lastIdentifierLength = 0;
+ foundBreak = false;
+ }
+ SkASSERT(lastIdentifierLength < BUFFER_SIZE);
+ lastIdentifier[lastIdentifierLength] = c;
+ ++lastIdentifierLength;
+ } else {
+ foundBreak = true;
+ if (c == ',') {
+ SkASSERT(lastIdentifierLength < BUFFER_SIZE);
+ lastIdentifier[lastIdentifierLength] = 0;
+ this->writef("%s%s", separator, lastIdentifier);
+ separator = ", ";
+ } else if (c != ' ' && c != '\t' && c != '\n' && c != '\r') {
+ lastIdentifierLength = 0;
+ }
+ }
+ }
+ if (lastIdentifierLength) {
+ SkASSERT(lastIdentifierLength < BUFFER_SIZE);
+ lastIdentifier[lastIdentifierLength] = 0;
+ this->writef("%s%s", separator, lastIdentifier);
+ }
+ }
+}
+
+void HCodeGenerator::writeMake() {
+ const char* separator;
+ if (!this->writeSection(MAKE_SECTION)) {
+ this->writef(" static std::unique_ptr<GrFragmentProcessor> Make(");
+ separator = "";
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ this->writef("%s%s %s", separator, ParameterType(fContext, param->fType,
+ param->fModifiers.fLayout).c_str(),
+ String(param->fName).c_str());
+ separator = ", ";
+ }
+ this->writeSection(CONSTRUCTOR_PARAMS_SECTION, separator);
+ this->writef(") {\n"
+ " return std::unique_ptr<GrFragmentProcessor>(new %s(",
+ fFullName.c_str());
+ separator = "";
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ this->writef("%sstd::move(%s)", separator, String(param->fName).c_str());
+ } else {
+ this->writef("%s%s", separator, String(param->fName).c_str());
+ }
+ separator = ", ";
+ }
+ this->writeExtraConstructorParams(separator);
+ this->writef("));\n"
+ " }\n");
+ }
+}
+
+void HCodeGenerator::failOnSection(const char* section, const char* msg) {
+ std::vector<const Section*> s = fSectionAndParameterHelper.getSections(section);
+ if (s.size()) {
+ fErrors.error(s[0]->fOffset, String("@") + section + " " + msg);
+ }
+}
+
+void HCodeGenerator::writeConstructor() {
+ if (this->writeSection(CONSTRUCTOR_SECTION)) {
+ const char* msg = "may not be present when constructor is overridden";
+ this->failOnSection(CONSTRUCTOR_CODE_SECTION, msg);
+ this->failOnSection(CONSTRUCTOR_PARAMS_SECTION, msg);
+ this->failOnSection(INITIALIZERS_SECTION, msg);
+ this->failOnSection(OPTIMIZATION_FLAGS_SECTION, msg);
+ return;
+ }
+ this->writef(" %s(", fFullName.c_str());
+ const char* separator = "";
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ this->writef("%s%s %s", separator, ParameterType(fContext, param->fType,
+ param->fModifiers.fLayout).c_str(),
+ String(param->fName).c_str());
+ separator = ", ";
+ }
+ this->writeSection(CONSTRUCTOR_PARAMS_SECTION, separator);
+ this->writef(")\n"
+ " : INHERITED(k%s_ClassID", fFullName.c_str());
+ if (!this->writeSection(OPTIMIZATION_FLAGS_SECTION, ", (OptimizationFlags) ")) {
+ this->writef(", kNone_OptimizationFlags");
+ }
+ this->writef(")");
+ this->writeSection(INITIALIZERS_SECTION, "\n , ");
+ const auto transforms = fSectionAndParameterHelper.getSections(COORD_TRANSFORM_SECTION);
+ for (size_t i = 0; i < transforms.size(); ++i) {
+ const Section& s = *transforms[i];
+ String field = CoordTransformName(s.fArgument.c_str(), i);
+ if (s.fArgument.size()) {
+ this->writef("\n , %s(%s, %s.get())", field.c_str(), s.fText.c_str(),
+ FieldName(s.fArgument.c_str()).c_str());
+ }
+ else {
+ this->writef("\n , %s(%s)", field.c_str(), s.fText.c_str());
+ }
+ }
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ String nameString(param->fName);
+ const char* name = nameString.c_str();
+ const Type& type = param->fType.nonnullable();
+ if (type.kind() == Type::kSampler_Kind) {
+ this->writef("\n , %s(std::move(%s)", FieldName(name).c_str(), name);
+ for (const Section* s : fSectionAndParameterHelper.getSections(
+ SAMPLER_PARAMS_SECTION)) {
+ if (s->fArgument == name) {
+ this->writef(", %s", s->fText.c_str());
+ }
+ }
+ this->writef(")");
+ } else if (type == *fContext.fFragmentProcessor_Type) {
+ // do nothing
+ } else {
+ this->writef("\n , %s(%s)", FieldName(name).c_str(), name);
+ }
+ }
+ this->writef(" {\n");
+ this->writeSection(CONSTRUCTOR_CODE_SECTION);
+ int samplerCount = 0;
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.kind() == Type::kSampler_Kind) {
+ ++samplerCount;
+ } else if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ if (param->fType.kind() == Type::kNullable_Kind) {
+ this->writef(" if (%s) {\n", String(param->fName).c_str());
+ } else {
+ this->writef(" SkASSERT(%s);", String(param->fName).c_str());
+ }
+ this->writef(" %s_index = this->numChildProcessors();",
+ FieldName(String(param->fName).c_str()).c_str());
+ if (fSectionAndParameterHelper.hasCoordOverrides(*param)) {
+ this->writef(" %s->setComputeLocalCoordsInVertexShader(false);",
+ String(param->fName).c_str());
+ }
+ this->writef(" this->registerChildProcessor(std::move(%s));",
+ String(param->fName).c_str());
+ if (param->fType.kind() == Type::kNullable_Kind) {
+ this->writef(" }");
+ }
+ }
+ }
+ if (samplerCount) {
+ this->writef(" this->setTextureSamplerCnt(%d);", samplerCount);
+ }
+ for (size_t i = 0; i < transforms.size(); ++i) {
+ const Section& s = *transforms[i];
+ String field = CoordTransformName(s.fArgument.c_str(), i);
+ this->writef(" this->addCoordTransform(&%s);\n", field.c_str());
+ }
+ this->writef(" }\n");
+}
+
+void HCodeGenerator::writeFields() {
+ this->writeSection(FIELDS_SECTION);
+ const auto transforms = fSectionAndParameterHelper.getSections(COORD_TRANSFORM_SECTION);
+ for (size_t i = 0; i < transforms.size(); ++i) {
+ const Section& s = *transforms[i];
+ this->writef(" GrCoordTransform %s;\n",
+ CoordTransformName(s.fArgument.c_str(), i).c_str());
+ }
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ String name = FieldName(String(param->fName).c_str());
+ if (param->fType.nonnullable() == *fContext.fFragmentProcessor_Type) {
+ this->writef(" int %s_index = -1;\n", name.c_str());
+ } else {
+ this->writef(" %s %s;\n", FieldType(fContext, param->fType,
+ param->fModifiers.fLayout).c_str(),
+ name.c_str());
+ }
+ }
+}
+
+String HCodeGenerator::GetHeader(const Program& program, ErrorReporter& errors) {
+ SymbolTable types(&errors);
+ Parser parser(program.fSource->c_str(), program.fSource->length(), types, errors);
+ for (;;) {
+ Token header = parser.nextRawToken();
+ switch (header.fKind) {
+ case Token::WHITESPACE:
+ break;
+ case Token::BLOCK_COMMENT:
+ return String(program.fSource->c_str() + header.fOffset, header.fLength);
+ default:
+ return "";
+ }
+ }
+}
+
+bool HCodeGenerator::generateCode() {
+ this->writef("%s\n", GetHeader(fProgram, fErrors).c_str());
+ this->writef(kFragmentProcessorHeader, fFullName.c_str());
+ this->writef("#ifndef %s_DEFINED\n"
+ "#define %s_DEFINED\n",
+ fFullName.c_str(),
+ fFullName.c_str());
+ this->writef("#include \"include/core/SkTypes.h\"\n");
+ this->writeSection(HEADER_SECTION);
+ this->writef("\n"
+ "#include \"src/gpu/GrCoordTransform.h\"\n"
+ "#include \"src/gpu/GrFragmentProcessor.h\"\n");
+ this->writef("class %s : public GrFragmentProcessor {\n"
+ "public:\n",
+ fFullName.c_str());
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kEnum_Kind == p.fKind && !((Enum&) p).fBuiltin) {
+ this->writef("%s\n", p.description().c_str());
+ }
+ }
+ this->writeSection(CLASS_SECTION);
+ this->writeMake();
+ this->writef(" %s(const %s& src);\n"
+ " std::unique_ptr<GrFragmentProcessor> clone() const override;\n"
+ " const char* name() const override { return \"%s\"; }\n",
+ fFullName.c_str(), fFullName.c_str(), fName.c_str());
+ this->writeFields();
+ this->writef("private:\n");
+ this->writeConstructor();
+ this->writef(" GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;\n"
+ " void onGetGLSLProcessorKey(const GrShaderCaps&,"
+ "GrProcessorKeyBuilder*) const override;\n"
+ " bool onIsEqual(const GrFragmentProcessor&) const override;\n");
+ for (const auto& param : fSectionAndParameterHelper.getParameters()) {
+ if (param->fType.kind() == Type::kSampler_Kind) {
+ this->writef(" const TextureSampler& onTextureSampler(int) const override;");
+ break;
+ }
+ }
+ this->writef(" GR_DECLARE_FRAGMENT_PROCESSOR_TEST\n");
+ this->writef(" typedef GrFragmentProcessor INHERITED;\n"
+ "};\n");
+ this->writeSection(HEADER_END_SECTION);
+ this->writef("#endif\n");
+ return 0 == fErrors.errorCount();
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.h
new file mode 100644
index 0000000000..03ab038a6d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLHCodeGenerator.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_HCODEGENERATOR
+#define SKSL_HCODEGENERATOR
+
+#include "src/sksl/SkSLCodeGenerator.h"
+#include "src/sksl/SkSLSectionAndParameterHelper.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <cctype>
+
+constexpr const char* kFragmentProcessorHeader =
+R"(
+/**************************************************************************************************
+ *** This file was autogenerated from %s.fp; do not modify.
+ **************************************************************************************************/
+)";
+
+namespace SkSL {
+
+class HCodeGenerator : public CodeGenerator {
+public:
+ HCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ String name, OutputStream* out);
+
+ bool generateCode() override;
+
+ static String ParameterType(const Context& context, const Type& type, const Layout& layout);
+
+ static Layout::CType ParameterCType(const Context& context, const Type& type,
+ const Layout& layout);
+
+ static String FieldType(const Context& context, const Type& type, const Layout& layout);
+
+ // Either the field type, or a const reference of the field type if the field type is complex.
+ static String AccessType(const Context& context, const Type& type, const Layout& layout);
+
+ static String FieldName(const char* varName) {
+ return String(varName);
+ }
+
+ static String CoordTransformName(const String& arg, int index) {
+ if (arg.size()) {
+ return HCodeGenerator::FieldName(arg.c_str()) + "CoordTransform";
+ }
+ return "fCoordTransform" + to_string(index);
+ }
+
+ static String GetHeader(const Program& program, ErrorReporter& errors);
+
+private:
+ void writef(const char* s, va_list va) SKSL_PRINTF_LIKE(2, 0);
+
+ void writef(const char* s, ...) SKSL_PRINTF_LIKE(2, 3);
+
+ bool writeSection(const char* name, const char* prefix = "");
+
+ // given a @constructorParams section of e.g. 'int x, float y', writes out "<separator>x, y".
+ // Writes nothing (not even the separator) if there is no @constructorParams section.
+ void writeExtraConstructorParams(const char* separator);
+
+ void writeMake();
+
+ void writeConstructor();
+
+ void writeFields();
+
+ void failOnSection(const char* section, const char* msg);
+
+ const Context& fContext;
+ String fName;
+ String fFullName;
+ SectionAndParameterHelper fSectionAndParameterHelper;
+
+ typedef CodeGenerator INHERITED;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp
new file mode 100644
index 0000000000..9b71c17ffd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp
@@ -0,0 +1,2493 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLIRGenerator.h"
+
+#include "limits.h"
+#include <unordered_set>
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLParser.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLBreakStatement.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLContinueStatement.h"
+#include "src/sksl/ir/SkSLDiscardStatement.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLEnum.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExternalFunctionCall.h"
+#include "src/sksl/ir/SkSLExternalValueReference.h"
+#include "src/sksl/ir/SkSLField.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLFunctionReference.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLLayout.h"
+#include "src/sksl/ir/SkSLNullLiteral.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLUnresolvedFunction.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+class AutoSymbolTable {
+public:
+ AutoSymbolTable(IRGenerator* ir)
+ : fIR(ir)
+ , fPrevious(fIR->fSymbolTable) {
+ fIR->pushSymbolTable();
+ }
+
+ ~AutoSymbolTable() {
+ fIR->popSymbolTable();
+ SkASSERT(fPrevious == fIR->fSymbolTable);
+ }
+
+ IRGenerator* fIR;
+ std::shared_ptr<SymbolTable> fPrevious;
+};
+
+class AutoLoopLevel {
+public:
+ AutoLoopLevel(IRGenerator* ir)
+ : fIR(ir) {
+ fIR->fLoopLevel++;
+ }
+
+ ~AutoLoopLevel() {
+ fIR->fLoopLevel--;
+ }
+
+ IRGenerator* fIR;
+};
+
+class AutoSwitchLevel {
+public:
+ AutoSwitchLevel(IRGenerator* ir)
+ : fIR(ir) {
+ fIR->fSwitchLevel++;
+ }
+
+ ~AutoSwitchLevel() {
+ fIR->fSwitchLevel--;
+ }
+
+ IRGenerator* fIR;
+};
+
+IRGenerator::IRGenerator(const Context* context, std::shared_ptr<SymbolTable> symbolTable,
+ ErrorReporter& errorReporter)
+: fContext(*context)
+, fCurrentFunction(nullptr)
+, fRootSymbolTable(symbolTable)
+, fSymbolTable(symbolTable)
+, fLoopLevel(0)
+, fSwitchLevel(0)
+, fTmpCount(0)
+, fErrors(errorReporter) {}
+
+void IRGenerator::pushSymbolTable() {
+ fSymbolTable.reset(new SymbolTable(std::move(fSymbolTable), &fErrors));
+}
+
+void IRGenerator::popSymbolTable() {
+ fSymbolTable = fSymbolTable->fParent;
+}
+
+static void fill_caps(const SKSL_CAPS_CLASS& caps,
+ std::unordered_map<String, Program::Settings::Value>* capsMap) {
+#define CAP(name) \
+ capsMap->insert(std::make_pair(String(#name), Program::Settings::Value(caps.name())))
+ CAP(fbFetchSupport);
+ CAP(fbFetchNeedsCustomOutput);
+ CAP(flatInterpolationSupport);
+ CAP(noperspectiveInterpolationSupport);
+ CAP(sampleVariablesSupport);
+ CAP(externalTextureSupport);
+ CAP(mustEnableAdvBlendEqs);
+ CAP(mustEnableSpecificAdvBlendEqs);
+ CAP(mustDeclareFragmentShaderOutput);
+ CAP(mustDoOpBetweenFloorAndAbs);
+ CAP(atan2ImplementedAsAtanYOverX);
+ CAP(canUseAnyFunctionInShader);
+ CAP(floatIs32Bits);
+ CAP(integerSupport);
+#undef CAP
+}
+
+void IRGenerator::start(const Program::Settings* settings,
+ std::vector<std::unique_ptr<ProgramElement>>* inherited) {
+ if (fStarted) {
+ this->popSymbolTable();
+ }
+ fSettings = settings;
+ fCapsMap.clear();
+ if (settings->fCaps) {
+ fill_caps(*settings->fCaps, &fCapsMap);
+ } else {
+ fCapsMap.insert(std::make_pair(String("integerSupport"),
+ Program::Settings::Value(true)));
+ }
+ this->pushSymbolTable();
+ fInvocations = -1;
+ fInputs.reset();
+ fSkPerVertex = nullptr;
+ fRTAdjust = nullptr;
+ fRTAdjustInterfaceBlock = nullptr;
+ if (inherited) {
+ for (const auto& e : *inherited) {
+ if (e->fKind == ProgramElement::kInterfaceBlock_Kind) {
+ InterfaceBlock& intf = (InterfaceBlock&) *e;
+ if (intf.fVariable.fName == Compiler::PERVERTEX_NAME) {
+ SkASSERT(!fSkPerVertex);
+ fSkPerVertex = &intf.fVariable;
+ }
+ }
+ }
+ }
+}
+
+std::unique_ptr<Extension> IRGenerator::convertExtension(int offset, StringFragment name) {
+ return std::unique_ptr<Extension>(new Extension(offset, name));
+}
+
+void IRGenerator::finish() {
+ this->popSymbolTable();
+ fSettings = nullptr;
+}
+
+std::unique_ptr<Statement> IRGenerator::convertStatement(const ASTNode& statement) {
+ switch (statement.fKind) {
+ case ASTNode::Kind::kBlock:
+ return this->convertBlock(statement);
+ case ASTNode::Kind::kVarDeclarations:
+ return this->convertVarDeclarationStatement(statement);
+ case ASTNode::Kind::kIf:
+ return this->convertIf(statement);
+ case ASTNode::Kind::kFor:
+ return this->convertFor(statement);
+ case ASTNode::Kind::kWhile:
+ return this->convertWhile(statement);
+ case ASTNode::Kind::kDo:
+ return this->convertDo(statement);
+ case ASTNode::Kind::kSwitch:
+ return this->convertSwitch(statement);
+ case ASTNode::Kind::kReturn:
+ return this->convertReturn(statement);
+ case ASTNode::Kind::kBreak:
+ return this->convertBreak(statement);
+ case ASTNode::Kind::kContinue:
+ return this->convertContinue(statement);
+ case ASTNode::Kind::kDiscard:
+ return this->convertDiscard(statement);
+ default:
+ // it's an expression
+ std::unique_ptr<Statement> result = this->convertExpressionStatement(statement);
+ if (fRTAdjust && Program::kGeometry_Kind == fKind) {
+ SkASSERT(result->fKind == Statement::kExpression_Kind);
+ Expression& expr = *((ExpressionStatement&) *result).fExpression;
+ if (expr.fKind == Expression::kFunctionCall_Kind) {
+ FunctionCall& fc = (FunctionCall&) expr;
+ if (fc.fFunction.fBuiltin && fc.fFunction.fName == "EmitVertex") {
+ std::vector<std::unique_ptr<Statement>> statements;
+ statements.push_back(getNormalizeSkPositionCode());
+ statements.push_back(std::move(result));
+ return std::unique_ptr<Block>(new Block(statement.fOffset,
+ std::move(statements),
+ fSymbolTable));
+ }
+ }
+ }
+ return result;
+ }
+}
+
+std::unique_ptr<Block> IRGenerator::convertBlock(const ASTNode& block) {
+ SkASSERT(block.fKind == ASTNode::Kind::kBlock);
+ AutoSymbolTable table(this);
+ std::vector<std::unique_ptr<Statement>> statements;
+ for (const auto& child : block) {
+ std::unique_ptr<Statement> statement = this->convertStatement(child);
+ if (!statement) {
+ return nullptr;
+ }
+ statements.push_back(std::move(statement));
+ }
+ return std::unique_ptr<Block>(new Block(block.fOffset, std::move(statements), fSymbolTable));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertVarDeclarationStatement(const ASTNode& s) {
+ SkASSERT(s.fKind == ASTNode::Kind::kVarDeclarations);
+ auto decl = this->convertVarDeclarations(s, Variable::kLocal_Storage);
+ if (!decl) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new VarDeclarationsStatement(std::move(decl)));
+}
+
+std::unique_ptr<VarDeclarations> IRGenerator::convertVarDeclarations(const ASTNode& decls,
+ Variable::Storage storage) {
+ SkASSERT(decls.fKind == ASTNode::Kind::kVarDeclarations);
+ auto iter = decls.begin();
+ const Modifiers& modifiers = iter++->getModifiers();
+ const ASTNode& rawType = *(iter++);
+ std::vector<std::unique_ptr<VarDeclaration>> variables;
+ const Type* baseType = this->convertType(rawType);
+ if (!baseType) {
+ return nullptr;
+ }
+ if (fKind != Program::kFragmentProcessor_Kind &&
+ (modifiers.fFlags & Modifiers::kIn_Flag) &&
+ baseType->kind() == Type::Kind::kMatrix_Kind) {
+ fErrors.error(decls.fOffset, "'in' variables may not have matrix type");
+ }
+ if (modifiers.fLayout.fWhen.fLength && fKind != Program::kFragmentProcessor_Kind &&
+ fKind != Program::kPipelineStage_Kind) {
+ fErrors.error(decls.fOffset, "'when' is only permitted within fragment processors");
+ }
+ if (modifiers.fLayout.fKey) {
+ if (fKind != Program::kFragmentProcessor_Kind && fKind != Program::kPipelineStage_Kind) {
+ fErrors.error(decls.fOffset, "'key' is only permitted within fragment processors");
+ }
+ if ((modifiers.fFlags & Modifiers::kUniform_Flag) != 0) {
+ fErrors.error(decls.fOffset, "'key' is not permitted on 'uniform' variables");
+ }
+ }
+ for (; iter != decls.end(); ++iter) {
+ const ASTNode& varDecl = *iter;
+ if (modifiers.fLayout.fLocation == 0 && modifiers.fLayout.fIndex == 0 &&
+ (modifiers.fFlags & Modifiers::kOut_Flag) && fKind == Program::kFragment_Kind &&
+ varDecl.getVarData().fName != "sk_FragColor") {
+ fErrors.error(varDecl.fOffset,
+ "out location=0, index=0 is reserved for sk_FragColor");
+ }
+ const ASTNode::VarData& varData = varDecl.getVarData();
+ const Type* type = baseType;
+ std::vector<std::unique_ptr<Expression>> sizes;
+ auto iter = varDecl.begin();
+ for (size_t i = 0; i < varData.fSizeCount; ++i, ++iter) {
+ const ASTNode& rawSize = *iter;
+ if (rawSize) {
+ auto size = this->coerce(this->convertExpression(rawSize), *fContext.fInt_Type);
+ if (!size) {
+ return nullptr;
+ }
+ String name(type->fName);
+ int64_t count;
+ if (size->fKind == Expression::kIntLiteral_Kind) {
+ count = ((IntLiteral&) *size).fValue;
+ if (count <= 0) {
+ fErrors.error(size->fOffset, "array size must be positive");
+ return nullptr;
+ }
+ name += "[" + to_string(count) + "]";
+ } else {
+ fErrors.error(size->fOffset, "array size must be specified");
+ return nullptr;
+ }
+ type = (Type*) fSymbolTable->takeOwnership(
+ std::unique_ptr<Symbol>(new Type(name,
+ Type::kArray_Kind,
+ *type,
+ (int) count)));
+ sizes.push_back(std::move(size));
+ } else {
+ type = (Type*) fSymbolTable->takeOwnership(
+ std::unique_ptr<Symbol>(new Type(type->name() + "[]",
+ Type::kArray_Kind,
+ *type,
+ -1)));
+ sizes.push_back(nullptr);
+ }
+ }
+ auto var = std::unique_ptr<Variable>(new Variable(varDecl.fOffset, modifiers,
+ varData.fName, *type, storage));
+ if (var->fName == Compiler::RTADJUST_NAME) {
+ SkASSERT(!fRTAdjust);
+ SkASSERT(var->fType == *fContext.fFloat4_Type);
+ fRTAdjust = var.get();
+ }
+ std::unique_ptr<Expression> value;
+ if (iter != varDecl.end()) {
+ value = this->convertExpression(*iter);
+ if (!value) {
+ return nullptr;
+ }
+ value = this->coerce(std::move(value), *type);
+ if (!value) {
+ return nullptr;
+ }
+ var->fWriteCount = 1;
+ var->fInitialValue = value.get();
+ }
+ if (storage == Variable::kGlobal_Storage && var->fName == "sk_FragColor" &&
+ (*fSymbolTable)[var->fName]) {
+ // already defined, ignore
+ } else if (storage == Variable::kGlobal_Storage && (*fSymbolTable)[var->fName] &&
+ (*fSymbolTable)[var->fName]->fKind == Symbol::kVariable_Kind &&
+ ((Variable*) (*fSymbolTable)[var->fName])->fModifiers.fLayout.fBuiltin >= 0) {
+ // already defined, just update the modifiers
+ Variable* old = (Variable*) (*fSymbolTable)[var->fName];
+ old->fModifiers = var->fModifiers;
+ } else {
+ variables.emplace_back(new VarDeclaration(var.get(), std::move(sizes),
+ std::move(value)));
+ StringFragment name = var->fName;
+ fSymbolTable->add(name, std::move(var));
+ }
+ }
+ return std::unique_ptr<VarDeclarations>(new VarDeclarations(decls.fOffset,
+ baseType,
+ std::move(variables)));
+}
+
+std::unique_ptr<ModifiersDeclaration> IRGenerator::convertModifiersDeclaration(const ASTNode& m) {
+ SkASSERT(m.fKind == ASTNode::Kind::kModifiers);
+ Modifiers modifiers = m.getModifiers();
+ if (modifiers.fLayout.fInvocations != -1) {
+ if (fKind != Program::kGeometry_Kind) {
+ fErrors.error(m.fOffset, "'invocations' is only legal in geometry shaders");
+ return nullptr;
+ }
+ fInvocations = modifiers.fLayout.fInvocations;
+ if (fSettings->fCaps && !fSettings->fCaps->gsInvocationsSupport()) {
+ modifiers.fLayout.fInvocations = -1;
+ Variable* invocationId = (Variable*) (*fSymbolTable)["sk_InvocationID"];
+ SkASSERT(invocationId);
+ invocationId->fModifiers.fFlags = 0;
+ invocationId->fModifiers.fLayout.fBuiltin = -1;
+ if (modifiers.fLayout.description() == "") {
+ return nullptr;
+ }
+ }
+ }
+ if (modifiers.fLayout.fMaxVertices != -1 && fInvocations > 0 && fSettings->fCaps &&
+ !fSettings->fCaps->gsInvocationsSupport()) {
+ modifiers.fLayout.fMaxVertices *= fInvocations;
+ }
+ return std::unique_ptr<ModifiersDeclaration>(new ModifiersDeclaration(modifiers));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertIf(const ASTNode& n) {
+ SkASSERT(n.fKind == ASTNode::Kind::kIf);
+ auto iter = n.begin();
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*(iter++)),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> ifTrue = this->convertStatement(*(iter++));
+ if (!ifTrue) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> ifFalse;
+ if (iter != n.end()) {
+ ifFalse = this->convertStatement(*(iter++));
+ if (!ifFalse) {
+ return nullptr;
+ }
+ }
+ if (test->fKind == Expression::kBoolLiteral_Kind) {
+ // static boolean value, fold down to a single branch
+ if (((BoolLiteral&) *test).fValue) {
+ return ifTrue;
+ } else if (ifFalse) {
+ return ifFalse;
+ } else {
+ // False & no else clause. Not an error, so don't return null!
+ std::vector<std::unique_ptr<Statement>> empty;
+ return std::unique_ptr<Statement>(new Block(n.fOffset, std::move(empty),
+ fSymbolTable));
+ }
+ }
+ return std::unique_ptr<Statement>(new IfStatement(n.fOffset, n.getBool(), std::move(test),
+ std::move(ifTrue), std::move(ifFalse)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertFor(const ASTNode& f) {
+ SkASSERT(f.fKind == ASTNode::Kind::kFor);
+ AutoLoopLevel level(this);
+ AutoSymbolTable table(this);
+ std::unique_ptr<Statement> initializer;
+ auto iter = f.begin();
+ if (*iter) {
+ initializer = this->convertStatement(*iter);
+ if (!initializer) {
+ return nullptr;
+ }
+ }
+ ++iter;
+ std::unique_ptr<Expression> test;
+ if (*iter) {
+ test = this->coerce(this->convertExpression(*iter), *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ }
+ ++iter;
+ std::unique_ptr<Expression> next;
+ if (*iter) {
+ next = this->convertExpression(*iter);
+ if (!next) {
+ return nullptr;
+ }
+ this->checkValid(*next);
+ }
+ ++iter;
+ std::unique_ptr<Statement> statement = this->convertStatement(*iter);
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new ForStatement(f.fOffset, std::move(initializer),
+ std::move(test), std::move(next),
+ std::move(statement), fSymbolTable));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertWhile(const ASTNode& w) {
+ SkASSERT(w.fKind == ASTNode::Kind::kWhile);
+ AutoLoopLevel level(this);
+ auto iter = w.begin();
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*(iter++)),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> statement = this->convertStatement(*(iter++));
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new WhileStatement(w.fOffset, std::move(test),
+ std::move(statement)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertDo(const ASTNode& d) {
+ SkASSERT(d.fKind == ASTNode::Kind::kDo);
+ AutoLoopLevel level(this);
+ auto iter = d.begin();
+ std::unique_ptr<Statement> statement = this->convertStatement(*(iter++));
+ if (!statement) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*(iter++)),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new DoStatement(d.fOffset, std::move(statement),
+ std::move(test)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertSwitch(const ASTNode& s) {
+ SkASSERT(s.fKind == ASTNode::Kind::kSwitch);
+ AutoSwitchLevel level(this);
+ auto iter = s.begin();
+ std::unique_ptr<Expression> value = this->convertExpression(*(iter++));
+ if (!value) {
+ return nullptr;
+ }
+ if (value->fType != *fContext.fUInt_Type && value->fType.kind() != Type::kEnum_Kind) {
+ value = this->coerce(std::move(value), *fContext.fInt_Type);
+ if (!value) {
+ return nullptr;
+ }
+ }
+ AutoSymbolTable table(this);
+ std::unordered_set<int> caseValues;
+ std::vector<std::unique_ptr<SwitchCase>> cases;
+ for (; iter != s.end(); ++iter) {
+ const ASTNode& c = *iter;
+ SkASSERT(c.fKind == ASTNode::Kind::kSwitchCase);
+ std::unique_ptr<Expression> caseValue;
+ auto childIter = c.begin();
+ if (*childIter) {
+ caseValue = this->convertExpression(*childIter);
+ if (!caseValue) {
+ return nullptr;
+ }
+ caseValue = this->coerce(std::move(caseValue), value->fType);
+ if (!caseValue) {
+ return nullptr;
+ }
+ if (!caseValue->isConstant()) {
+ fErrors.error(caseValue->fOffset, "case value must be a constant");
+ return nullptr;
+ }
+ int64_t v;
+ this->getConstantInt(*caseValue, &v);
+ if (caseValues.find(v) != caseValues.end()) {
+ fErrors.error(caseValue->fOffset, "duplicate case value");
+ }
+ caseValues.insert(v);
+ }
+ ++childIter;
+ std::vector<std::unique_ptr<Statement>> statements;
+ for (; childIter != c.end(); ++childIter) {
+ std::unique_ptr<Statement> converted = this->convertStatement(*childIter);
+ if (!converted) {
+ return nullptr;
+ }
+ statements.push_back(std::move(converted));
+ }
+ cases.emplace_back(new SwitchCase(c.fOffset, std::move(caseValue),
+ std::move(statements)));
+ }
+ return std::unique_ptr<Statement>(new SwitchStatement(s.fOffset, s.getBool(),
+ std::move(value), std::move(cases),
+ fSymbolTable));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertExpressionStatement(const ASTNode& s) {
+ std::unique_ptr<Expression> e = this->convertExpression(s);
+ if (!e) {
+ return nullptr;
+ }
+ this->checkValid(*e);
+ return std::unique_ptr<Statement>(new ExpressionStatement(std::move(e)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertReturn(const ASTNode& r) {
+ SkASSERT(r.fKind == ASTNode::Kind::kReturn);
+ SkASSERT(fCurrentFunction);
+ // early returns from a vertex main function will bypass the sk_Position normalization, so
+ // SkASSERT that we aren't doing that. It is of course possible to fix this by adding a
+ // normalization before each return, but it will probably never actually be necessary.
+ SkASSERT(Program::kVertex_Kind != fKind || !fRTAdjust || "main" != fCurrentFunction->fName);
+ if (r.begin() != r.end()) {
+ std::unique_ptr<Expression> result = this->convertExpression(*r.begin());
+ if (!result) {
+ return nullptr;
+ }
+ if (fCurrentFunction->fReturnType == *fContext.fVoid_Type) {
+ fErrors.error(result->fOffset, "may not return a value from a void function");
+ } else {
+ result = this->coerce(std::move(result), fCurrentFunction->fReturnType);
+ if (!result) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Statement>(new ReturnStatement(std::move(result)));
+ } else {
+ if (fCurrentFunction->fReturnType != *fContext.fVoid_Type) {
+ fErrors.error(r.fOffset, "expected function to return '" +
+ fCurrentFunction->fReturnType.description() + "'");
+ }
+ return std::unique_ptr<Statement>(new ReturnStatement(r.fOffset));
+ }
+}
+
+std::unique_ptr<Statement> IRGenerator::convertBreak(const ASTNode& b) {
+ SkASSERT(b.fKind == ASTNode::Kind::kBreak);
+ if (fLoopLevel > 0 || fSwitchLevel > 0) {
+ return std::unique_ptr<Statement>(new BreakStatement(b.fOffset));
+ } else {
+ fErrors.error(b.fOffset, "break statement must be inside a loop or switch");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Statement> IRGenerator::convertContinue(const ASTNode& c) {
+ SkASSERT(c.fKind == ASTNode::Kind::kContinue);
+ if (fLoopLevel > 0) {
+ return std::unique_ptr<Statement>(new ContinueStatement(c.fOffset));
+ } else {
+ fErrors.error(c.fOffset, "continue statement must be inside a loop");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Statement> IRGenerator::convertDiscard(const ASTNode& d) {
+ SkASSERT(d.fKind == ASTNode::Kind::kDiscard);
+ return std::unique_ptr<Statement>(new DiscardStatement(d.fOffset));
+}
+
+std::unique_ptr<Block> IRGenerator::applyInvocationIDWorkaround(std::unique_ptr<Block> main) {
+ Layout invokeLayout;
+ Modifiers invokeModifiers(invokeLayout, Modifiers::kHasSideEffects_Flag);
+ FunctionDeclaration* invokeDecl = new FunctionDeclaration(-1,
+ invokeModifiers,
+ "_invoke",
+ std::vector<const Variable*>(),
+ *fContext.fVoid_Type);
+ fProgramElements->push_back(std::unique_ptr<ProgramElement>(
+ new FunctionDefinition(-1, *invokeDecl, std::move(main))));
+ fSymbolTable->add(invokeDecl->fName, std::unique_ptr<FunctionDeclaration>(invokeDecl));
+
+ std::vector<std::unique_ptr<VarDeclaration>> variables;
+ Variable* loopIdx = (Variable*) (*fSymbolTable)["sk_InvocationID"];
+ SkASSERT(loopIdx);
+ std::unique_ptr<Expression> test(new BinaryExpression(-1,
+ std::unique_ptr<Expression>(new VariableReference(-1, *loopIdx)),
+ Token::LT,
+ std::unique_ptr<IntLiteral>(new IntLiteral(fContext, -1, fInvocations)),
+ *fContext.fBool_Type));
+ std::unique_ptr<Expression> next(new PostfixExpression(
+ std::unique_ptr<Expression>(
+ new VariableReference(-1,
+ *loopIdx,
+ VariableReference::kReadWrite_RefKind)),
+ Token::PLUSPLUS));
+ ASTNode endPrimitiveID(&fFile->fNodes, -1, ASTNode::Kind::kIdentifier, "EndPrimitive");
+ std::unique_ptr<Expression> endPrimitive = this->convertExpression(endPrimitiveID);
+ SkASSERT(endPrimitive);
+
+ std::vector<std::unique_ptr<Statement>> loopBody;
+ std::vector<std::unique_ptr<Expression>> invokeArgs;
+ loopBody.push_back(std::unique_ptr<Statement>(new ExpressionStatement(
+ this->call(-1,
+ *invokeDecl,
+ std::vector<std::unique_ptr<Expression>>()))));
+ loopBody.push_back(std::unique_ptr<Statement>(new ExpressionStatement(
+ this->call(-1,
+ std::move(endPrimitive),
+ std::vector<std::unique_ptr<Expression>>()))));
+ std::unique_ptr<Expression> assignment(new BinaryExpression(-1,
+ std::unique_ptr<Expression>(new VariableReference(-1, *loopIdx)),
+ Token::EQ,
+ std::unique_ptr<IntLiteral>(new IntLiteral(fContext, -1, 0)),
+ *fContext.fInt_Type));
+ std::unique_ptr<Statement> initializer(new ExpressionStatement(std::move(assignment)));
+ std::unique_ptr<Statement> loop = std::unique_ptr<Statement>(
+ new ForStatement(-1,
+ std::move(initializer),
+ std::move(test),
+ std::move(next),
+ std::unique_ptr<Block>(new Block(-1, std::move(loopBody))),
+ fSymbolTable));
+ std::vector<std::unique_ptr<Statement>> children;
+ children.push_back(std::move(loop));
+ return std::unique_ptr<Block>(new Block(-1, std::move(children)));
+}
+
+std::unique_ptr<Statement> IRGenerator::getNormalizeSkPositionCode() {
+ // sk_Position = float4(sk_Position.xy * rtAdjust.xz + sk_Position.ww * rtAdjust.yw,
+ // 0,
+ // sk_Position.w);
+ SkASSERT(fSkPerVertex && fRTAdjust);
+ #define REF(var) std::unique_ptr<Expression>(\
+ new VariableReference(-1, *var, VariableReference::kRead_RefKind))
+ #define FIELD(var, idx) std::unique_ptr<Expression>(\
+ new FieldAccess(REF(var), idx, FieldAccess::kAnonymousInterfaceBlock_OwnerKind))
+ #define POS std::unique_ptr<Expression>(new FieldAccess(REF(fSkPerVertex), 0, \
+ FieldAccess::kAnonymousInterfaceBlock_OwnerKind))
+ #define ADJUST (fRTAdjustInterfaceBlock ? \
+ FIELD(fRTAdjustInterfaceBlock, fRTAdjustFieldIndex) : \
+ REF(fRTAdjust))
+ #define SWIZZLE(expr, ...) std::unique_ptr<Expression>(new Swizzle(fContext, expr, \
+ { __VA_ARGS__ }))
+ #define OP(left, op, right) std::unique_ptr<Expression>( \
+ new BinaryExpression(-1, left, op, right, \
+ *fContext.fFloat2_Type))
+ std::vector<std::unique_ptr<Expression>> children;
+ children.push_back(OP(OP(SWIZZLE(POS, 0, 1), Token::STAR, SWIZZLE(ADJUST, 0, 2)),
+ Token::PLUS,
+ OP(SWIZZLE(POS, 3, 3), Token::STAR, SWIZZLE(ADJUST, 1, 3))));
+ children.push_back(std::unique_ptr<Expression>(new FloatLiteral(fContext, -1, 0.0)));
+ children.push_back(SWIZZLE(POS, 3));
+ std::unique_ptr<Expression> result = OP(POS, Token::EQ,
+ std::unique_ptr<Expression>(new Constructor(-1,
+ *fContext.fFloat4_Type,
+ std::move(children))));
+ return std::unique_ptr<Statement>(new ExpressionStatement(std::move(result)));
+}
+
+void IRGenerator::convertFunction(const ASTNode& f) {
+ auto iter = f.begin();
+ const Type* returnType = this->convertType(*(iter++));
+ if (!returnType) {
+ return;
+ }
+ const ASTNode::FunctionData& fd = f.getFunctionData();
+ std::vector<const Variable*> parameters;
+ for (size_t i = 0; i < fd.fParameterCount; ++i) {
+ const ASTNode& param = *(iter++);
+ SkASSERT(param.fKind == ASTNode::Kind::kParameter);
+ ASTNode::ParameterData pd = param.getParameterData();
+ auto paramIter = param.begin();
+ const Type* type = this->convertType(*(paramIter++));
+ if (!type) {
+ return;
+ }
+ for (int j = (int) pd.fSizeCount; j >= 1; j--) {
+ int size = (param.begin() + j)->getInt();
+ String name = type->name() + "[" + to_string(size) + "]";
+ type = (Type*) fSymbolTable->takeOwnership(
+ std::unique_ptr<Symbol>(new Type(std::move(name),
+ Type::kArray_Kind,
+ *type,
+ size)));
+ }
+ StringFragment name = pd.fName;
+ Variable* var = (Variable*) fSymbolTable->takeOwnership(
+ std::unique_ptr<Symbol>(new Variable(param.fOffset,
+ pd.fModifiers,
+ name,
+ *type,
+ Variable::kParameter_Storage)));
+ parameters.push_back(var);
+ }
+
+ if (fd.fName == "main") {
+ switch (fKind) {
+ case Program::kPipelineStage_Kind: {
+ bool valid;
+ switch (parameters.size()) {
+ case 3:
+ valid = parameters[0]->fType == *fContext.fFloat_Type &&
+ parameters[0]->fModifiers.fFlags == 0 &&
+ parameters[1]->fType == *fContext.fFloat_Type &&
+ parameters[1]->fModifiers.fFlags == 0 &&
+ parameters[2]->fType == *fContext.fHalf4_Type &&
+ parameters[2]->fModifiers.fFlags == (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag);
+ break;
+ case 1:
+ valid = parameters[0]->fType == *fContext.fHalf4_Type &&
+ parameters[0]->fModifiers.fFlags == (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag);
+ break;
+ default:
+ valid = false;
+ }
+ if (!valid) {
+ fErrors.error(f.fOffset, "pipeline stage 'main' must be declared main(float, "
+ "float, inout half4) or main(inout half4)");
+ return;
+ }
+ break;
+ }
+ case Program::kGeneric_Kind:
+ break;
+ default:
+ if (parameters.size()) {
+ fErrors.error(f.fOffset, "shader 'main' must have zero parameters");
+ }
+ }
+ }
+
+ // find existing declaration
+ const FunctionDeclaration* decl = nullptr;
+ auto entry = (*fSymbolTable)[fd.fName];
+ if (entry) {
+ std::vector<const FunctionDeclaration*> functions;
+ switch (entry->fKind) {
+ case Symbol::kUnresolvedFunction_Kind:
+ functions = ((UnresolvedFunction*) entry)->fFunctions;
+ break;
+ case Symbol::kFunctionDeclaration_Kind:
+ functions.push_back((FunctionDeclaration*) entry);
+ break;
+ default:
+ fErrors.error(f.fOffset, "symbol '" + fd.fName + "' was already defined");
+ return;
+ }
+ for (const auto& other : functions) {
+ SkASSERT(other->fName == fd.fName);
+ if (parameters.size() == other->fParameters.size()) {
+ bool match = true;
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->fType != other->fParameters[i]->fType) {
+ match = false;
+ break;
+ }
+ }
+ if (match) {
+ if (*returnType != other->fReturnType) {
+ FunctionDeclaration newDecl(f.fOffset, fd.fModifiers, fd.fName, parameters,
+ *returnType);
+ fErrors.error(f.fOffset, "functions '" + newDecl.description() +
+ "' and '" + other->description() +
+ "' differ only in return type");
+ return;
+ }
+ decl = other;
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->fModifiers != other->fParameters[i]->fModifiers) {
+ fErrors.error(f.fOffset, "modifiers on parameter " +
+ to_string((uint64_t) i + 1) +
+ " differ between declaration and "
+ "definition");
+ return;
+ }
+ }
+ if (other->fDefined) {
+ fErrors.error(f.fOffset, "duplicate definition of " +
+ other->description());
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (!decl) {
+ // couldn't find an existing declaration
+ auto newDecl = std::unique_ptr<FunctionDeclaration>(new FunctionDeclaration(f.fOffset,
+ fd.fModifiers,
+ fd.fName,
+ parameters,
+ *returnType));
+ decl = newDecl.get();
+ fSymbolTable->add(decl->fName, std::move(newDecl));
+ }
+ if (iter != f.end()) {
+ // compile body
+ SkASSERT(!fCurrentFunction);
+ fCurrentFunction = decl;
+ decl->fDefined = true;
+ std::shared_ptr<SymbolTable> old = fSymbolTable;
+ AutoSymbolTable table(this);
+ if (fd.fName == "main" && fKind == Program::kPipelineStage_Kind) {
+ if (parameters.size() == 3) {
+ parameters[0]->fModifiers.fLayout.fBuiltin = SK_MAIN_X_BUILTIN;
+ parameters[1]->fModifiers.fLayout.fBuiltin = SK_MAIN_Y_BUILTIN;
+ parameters[2]->fModifiers.fLayout.fBuiltin = SK_OUTCOLOR_BUILTIN;
+ } else {
+ SkASSERT(parameters.size() == 1);
+ parameters[0]->fModifiers.fLayout.fBuiltin = SK_OUTCOLOR_BUILTIN;
+ }
+ }
+ for (size_t i = 0; i < parameters.size(); i++) {
+ fSymbolTable->addWithoutOwnership(parameters[i]->fName, decl->fParameters[i]);
+ }
+ bool needInvocationIDWorkaround = fInvocations != -1 && fd.fName == "main" &&
+ fSettings->fCaps &&
+ !fSettings->fCaps->gsInvocationsSupport();
+ SkASSERT(!fExtraVars.size());
+ std::unique_ptr<Block> body = this->convertBlock(*iter);
+ for (auto& v : fExtraVars) {
+ body->fStatements.insert(body->fStatements.begin(), std::move(v));
+ }
+ fExtraVars.clear();
+ fCurrentFunction = nullptr;
+ if (!body) {
+ return;
+ }
+ if (needInvocationIDWorkaround) {
+ body = this->applyInvocationIDWorkaround(std::move(body));
+ }
+ // conservatively assume all user-defined functions have side effects
+ ((Modifiers&) decl->fModifiers).fFlags |= Modifiers::kHasSideEffects_Flag;
+ if (Program::kVertex_Kind == fKind && fd.fName == "main" && fRTAdjust) {
+ body->fStatements.insert(body->fStatements.end(), this->getNormalizeSkPositionCode());
+ }
+ fProgramElements->push_back(std::unique_ptr<FunctionDefinition>(
+ new FunctionDefinition(f.fOffset, *decl, std::move(body))));
+ }
+}
+
+std::unique_ptr<InterfaceBlock> IRGenerator::convertInterfaceBlock(const ASTNode& intf) {
+ SkASSERT(intf.fKind == ASTNode::Kind::kInterfaceBlock);
+ ASTNode::InterfaceBlockData id = intf.getInterfaceBlockData();
+ std::shared_ptr<SymbolTable> old = fSymbolTable;
+ this->pushSymbolTable();
+ std::shared_ptr<SymbolTable> symbols = fSymbolTable;
+ std::vector<Type::Field> fields;
+ bool haveRuntimeArray = false;
+ bool foundRTAdjust = false;
+ auto iter = intf.begin();
+ for (size_t i = 0; i < id.fDeclarationCount; ++i) {
+ std::unique_ptr<VarDeclarations> decl = this->convertVarDeclarations(
+ *(iter++),
+ Variable::kInterfaceBlock_Storage);
+ if (!decl) {
+ return nullptr;
+ }
+ for (const auto& stmt : decl->fVars) {
+ VarDeclaration& vd = (VarDeclaration&) *stmt;
+ if (haveRuntimeArray) {
+ fErrors.error(decl->fOffset,
+ "only the last entry in an interface block may be a runtime-sized "
+ "array");
+ }
+ if (vd.fVar == fRTAdjust) {
+ foundRTAdjust = true;
+ SkASSERT(vd.fVar->fType == *fContext.fFloat4_Type);
+ fRTAdjustFieldIndex = fields.size();
+ }
+ fields.push_back(Type::Field(vd.fVar->fModifiers, vd.fVar->fName,
+ &vd.fVar->fType));
+ if (vd.fValue) {
+ fErrors.error(decl->fOffset,
+ "initializers are not permitted on interface block fields");
+ }
+ if (vd.fVar->fModifiers.fFlags & (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag |
+ Modifiers::kUniform_Flag |
+ Modifiers::kBuffer_Flag |
+ Modifiers::kConst_Flag)) {
+ fErrors.error(decl->fOffset,
+ "interface block fields may not have storage qualifiers");
+ }
+ if (vd.fVar->fType.kind() == Type::kArray_Kind &&
+ vd.fVar->fType.columns() == -1) {
+ haveRuntimeArray = true;
+ }
+ }
+ }
+ this->popSymbolTable();
+ Type* type = (Type*) old->takeOwnership(std::unique_ptr<Symbol>(new Type(intf.fOffset,
+ id.fTypeName,
+ fields)));
+ std::vector<std::unique_ptr<Expression>> sizes;
+ for (size_t i = 0; i < id.fSizeCount; ++i) {
+ const ASTNode& size = *(iter++);
+ if (size) {
+ std::unique_ptr<Expression> converted = this->convertExpression(size);
+ if (!converted) {
+ return nullptr;
+ }
+ String name = type->fName;
+ int64_t count;
+ if (converted->fKind == Expression::kIntLiteral_Kind) {
+ count = ((IntLiteral&) *converted).fValue;
+ if (count <= 0) {
+ fErrors.error(converted->fOffset, "array size must be positive");
+ return nullptr;
+ }
+ name += "[" + to_string(count) + "]";
+ } else {
+ fErrors.error(intf.fOffset, "array size must be specified");
+ return nullptr;
+ }
+ type = (Type*) symbols->takeOwnership(std::unique_ptr<Symbol>(
+ new Type(name,
+ Type::kArray_Kind,
+ *type,
+ (int) count)));
+ sizes.push_back(std::move(converted));
+ } else {
+ fErrors.error(intf.fOffset, "array size must be specified");
+ return nullptr;
+ }
+ }
+ Variable* var = (Variable*) old->takeOwnership(std::unique_ptr<Symbol>(
+ new Variable(intf.fOffset,
+ id.fModifiers,
+ id.fInstanceName.fLength ? id.fInstanceName : id.fTypeName,
+ *type,
+ Variable::kGlobal_Storage)));
+ if (foundRTAdjust) {
+ fRTAdjustInterfaceBlock = var;
+ }
+ if (id.fInstanceName.fLength) {
+ old->addWithoutOwnership(id.fInstanceName, var);
+ } else {
+ for (size_t i = 0; i < fields.size(); i++) {
+ old->add(fields[i].fName, std::unique_ptr<Field>(new Field(intf.fOffset, *var,
+ (int) i)));
+ }
+ }
+ return std::unique_ptr<InterfaceBlock>(new InterfaceBlock(intf.fOffset,
+ var,
+ id.fTypeName,
+ id.fInstanceName,
+ std::move(sizes),
+ symbols));
+}
+
+void IRGenerator::getConstantInt(const Expression& value, int64_t* out) {
+ switch (value.fKind) {
+ case Expression::kIntLiteral_Kind:
+ *out = ((const IntLiteral&) value).fValue;
+ break;
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference&) value).fVariable;
+ if ((var.fModifiers.fFlags & Modifiers::kConst_Flag) &&
+ var.fInitialValue) {
+ this->getConstantInt(*var.fInitialValue, out);
+ }
+ break;
+ }
+ default:
+ fErrors.error(value.fOffset, "expected a constant int");
+ }
+}
+
+void IRGenerator::convertEnum(const ASTNode& e) {
+ SkASSERT(e.fKind == ASTNode::Kind::kEnum);
+ std::vector<Variable*> variables;
+ int64_t currentValue = 0;
+ Layout layout;
+ ASTNode enumType(e.fNodes, e.fOffset, ASTNode::Kind::kType,
+ ASTNode::TypeData(e.getString(), false, false));
+ const Type* type = this->convertType(enumType);
+ Modifiers modifiers(layout, Modifiers::kConst_Flag);
+ std::shared_ptr<SymbolTable> symbols(new SymbolTable(fSymbolTable, &fErrors));
+ fSymbolTable = symbols;
+ for (auto iter = e.begin(); iter != e.end(); ++iter) {
+ const ASTNode& child = *iter;
+ SkASSERT(child.fKind == ASTNode::Kind::kEnumCase);
+ std::unique_ptr<Expression> value;
+ if (child.begin() != child.end()) {
+ value = this->convertExpression(*child.begin());
+ if (!value) {
+ fSymbolTable = symbols->fParent;
+ return;
+ }
+ this->getConstantInt(*value, &currentValue);
+ }
+ value = std::unique_ptr<Expression>(new IntLiteral(fContext, e.fOffset, currentValue));
+ ++currentValue;
+ auto var = std::unique_ptr<Variable>(new Variable(e.fOffset, modifiers, child.getString(),
+ *type, Variable::kGlobal_Storage,
+ value.get()));
+ variables.push_back(var.get());
+ symbols->add(child.getString(), std::move(var));
+ symbols->takeOwnership(std::move(value));
+ }
+ fProgramElements->push_back(std::unique_ptr<ProgramElement>(new Enum(e.fOffset, e.getString(),
+ symbols)));
+ fSymbolTable = symbols->fParent;
+}
+
+const Type* IRGenerator::convertType(const ASTNode& type) {
+ ASTNode::TypeData td = type.getTypeData();
+ const Symbol* result = (*fSymbolTable)[td.fName];
+ if (result && result->fKind == Symbol::kType_Kind) {
+ if (td.fIsNullable) {
+ if (((Type&) *result) == *fContext.fFragmentProcessor_Type) {
+ if (type.begin() != type.end()) {
+ fErrors.error(type.fOffset, "type '" + td.fName + "' may not be used in "
+ "an array");
+ }
+ result = fSymbolTable->takeOwnership(std::unique_ptr<Symbol>(
+ new Type(String(result->fName) + "?",
+ Type::kNullable_Kind,
+ (const Type&) *result)));
+ } else {
+ fErrors.error(type.fOffset, "type '" + td.fName + "' may not be nullable");
+ }
+ }
+ for (const auto& size : type) {
+ String name(result->fName);
+ name += "[";
+ if (size) {
+ name += to_string(size.getInt());
+ }
+ name += "]";
+ result = (Type*) fSymbolTable->takeOwnership(std::unique_ptr<Symbol>(
+ new Type(name,
+ Type::kArray_Kind,
+ (const Type&) *result,
+ size ? size.getInt()
+ : 0)));
+ }
+ return (const Type*) result;
+ }
+ fErrors.error(type.fOffset, "unknown type '" + td.fName + "'");
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertExpression(const ASTNode& expr) {
+ switch (expr.fKind) {
+ case ASTNode::Kind::kBinary:
+ return this->convertBinaryExpression(expr);
+ case ASTNode::Kind::kBool:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, expr.fOffset,
+ expr.getBool()));
+ case ASTNode::Kind::kCall:
+ return this->convertCallExpression(expr);
+ case ASTNode::Kind::kField:
+ return this->convertFieldExpression(expr);
+ case ASTNode::Kind::kFloat:
+ return std::unique_ptr<Expression>(new FloatLiteral(fContext, expr.fOffset,
+ expr.getFloat()));
+ case ASTNode::Kind::kIdentifier:
+ return this->convertIdentifier(expr);
+ case ASTNode::Kind::kIndex:
+ return this->convertIndexExpression(expr);
+ case ASTNode::Kind::kInt:
+ return std::unique_ptr<Expression>(new IntLiteral(fContext, expr.fOffset,
+ expr.getInt()));
+ case ASTNode::Kind::kNull:
+ return std::unique_ptr<Expression>(new NullLiteral(fContext, expr.fOffset));
+ case ASTNode::Kind::kPostfix:
+ return this->convertPostfixExpression(expr);
+ case ASTNode::Kind::kPrefix:
+ return this->convertPrefixExpression(expr);
+ case ASTNode::Kind::kTernary:
+ return this->convertTernaryExpression(expr);
+ default:
+ ABORT("unsupported expression: %s\n", expr.description().c_str());
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertIdentifier(const ASTNode& identifier) {
+ SkASSERT(identifier.fKind == ASTNode::Kind::kIdentifier);
+ const Symbol* result = (*fSymbolTable)[identifier.getString()];
+ if (!result) {
+ fErrors.error(identifier.fOffset, "unknown identifier '" + identifier.getString() + "'");
+ return nullptr;
+ }
+ switch (result->fKind) {
+ case Symbol::kFunctionDeclaration_Kind: {
+ std::vector<const FunctionDeclaration*> f = {
+ (const FunctionDeclaration*) result
+ };
+ return std::unique_ptr<FunctionReference>(new FunctionReference(fContext,
+ identifier.fOffset,
+ f));
+ }
+ case Symbol::kUnresolvedFunction_Kind: {
+ const UnresolvedFunction* f = (const UnresolvedFunction*) result;
+ return std::unique_ptr<FunctionReference>(new FunctionReference(fContext,
+ identifier.fOffset,
+ f->fFunctions));
+ }
+ case Symbol::kVariable_Kind: {
+ const Variable* var = (const Variable*) result;
+ switch (var->fModifiers.fLayout.fBuiltin) {
+ case SK_WIDTH_BUILTIN:
+ fInputs.fRTWidth = true;
+ break;
+ case SK_HEIGHT_BUILTIN:
+ fInputs.fRTHeight = true;
+ break;
+#ifndef SKSL_STANDALONE
+ case SK_FRAGCOORD_BUILTIN:
+ fInputs.fFlipY = true;
+ if (fSettings->fFlipY &&
+ (!fSettings->fCaps ||
+ !fSettings->fCaps->fragCoordConventionsExtensionString())) {
+ fInputs.fRTHeight = true;
+ }
+#endif
+ }
+ if (fKind == Program::kFragmentProcessor_Kind &&
+ (var->fModifiers.fFlags & Modifiers::kIn_Flag) &&
+ !(var->fModifiers.fFlags & Modifiers::kUniform_Flag) &&
+ !var->fModifiers.fLayout.fKey &&
+ var->fModifiers.fLayout.fBuiltin == -1 &&
+ var->fType.nonnullable() != *fContext.fFragmentProcessor_Type &&
+ var->fType.kind() != Type::kSampler_Kind) {
+ bool valid = false;
+ for (const auto& decl : fFile->root()) {
+ if (decl.fKind == ASTNode::Kind::kSection) {
+ ASTNode::SectionData section = decl.getSectionData();
+ if (section.fName == "setData") {
+ valid = true;
+ break;
+ }
+ }
+ }
+ if (!valid) {
+ fErrors.error(identifier.fOffset, "'in' variable must be either 'uniform' or "
+ "'layout(key)', or there must be a custom "
+ "@setData function");
+ }
+ }
+ // default to kRead_RefKind; this will be corrected later if the variable is written to
+ return std::unique_ptr<VariableReference>(new VariableReference(
+ identifier.fOffset,
+ *var,
+ VariableReference::kRead_RefKind));
+ }
+ case Symbol::kField_Kind: {
+ const Field* field = (const Field*) result;
+ VariableReference* base = new VariableReference(identifier.fOffset, field->fOwner,
+ VariableReference::kRead_RefKind);
+ return std::unique_ptr<Expression>(new FieldAccess(
+ std::unique_ptr<Expression>(base),
+ field->fFieldIndex,
+ FieldAccess::kAnonymousInterfaceBlock_OwnerKind));
+ }
+ case Symbol::kType_Kind: {
+ const Type* t = (const Type*) result;
+ return std::unique_ptr<TypeReference>(new TypeReference(fContext, identifier.fOffset,
+ *t));
+ }
+ case Symbol::kExternal_Kind: {
+ ExternalValue* r = (ExternalValue*) result;
+ return std::unique_ptr<ExternalValueReference>(
+ new ExternalValueReference(identifier.fOffset, r));
+ }
+ default:
+ ABORT("unsupported symbol type %d\n", result->fKind);
+ }
+}
+
+std::unique_ptr<Section> IRGenerator::convertSection(const ASTNode& s) {
+ ASTNode::SectionData section = s.getSectionData();
+ return std::unique_ptr<Section>(new Section(s.fOffset, section.fName, section.fArgument,
+ section.fText));
+}
+
+
+std::unique_ptr<Expression> IRGenerator::coerce(std::unique_ptr<Expression> expr,
+ const Type& type) {
+ if (!expr) {
+ return nullptr;
+ }
+ if (expr->fType == type) {
+ return expr;
+ }
+ this->checkValid(*expr);
+ if (expr->fType == *fContext.fInvalid_Type) {
+ return nullptr;
+ }
+ if (expr->coercionCost(type) == INT_MAX) {
+ fErrors.error(expr->fOffset, "expected '" + type.description() + "', but found '" +
+ expr->fType.description() + "'");
+ return nullptr;
+ }
+ if (type.kind() == Type::kScalar_Kind) {
+ std::vector<std::unique_ptr<Expression>> args;
+ args.push_back(std::move(expr));
+ std::unique_ptr<Expression> ctor;
+ if (type == *fContext.fFloatLiteral_Type) {
+ ctor = this->convertIdentifier(ASTNode(&fFile->fNodes, -1, ASTNode::Kind::kIdentifier,
+ "float"));
+ } else if (type == *fContext.fIntLiteral_Type) {
+ ctor = this->convertIdentifier(ASTNode(&fFile->fNodes, -1, ASTNode::Kind::kIdentifier,
+ "int"));
+ } else {
+ ctor = this->convertIdentifier(ASTNode(&fFile->fNodes, -1, ASTNode::Kind::kIdentifier,
+ type.fName));
+ }
+ if (!ctor) {
+ printf("error, null identifier: %s\n", String(type.fName).c_str());
+ }
+ SkASSERT(ctor);
+ return this->call(-1, std::move(ctor), std::move(args));
+ }
+ if (expr->fKind == Expression::kNullLiteral_Kind) {
+ SkASSERT(type.kind() == Type::kNullable_Kind);
+ return std::unique_ptr<Expression>(new NullLiteral(expr->fOffset, type));
+ }
+ std::vector<std::unique_ptr<Expression>> args;
+ args.push_back(std::move(expr));
+ return std::unique_ptr<Expression>(new Constructor(-1, type, std::move(args)));
+}
+
+static bool is_matrix_multiply(const Type& left, const Type& right) {
+ if (left.kind() == Type::kMatrix_Kind) {
+ return right.kind() == Type::kMatrix_Kind || right.kind() == Type::kVector_Kind;
+ }
+ return left.kind() == Type::kVector_Kind && right.kind() == Type::kMatrix_Kind;
+}
+
+/**
+ * Determines the operand and result types of a binary expression. Returns true if the expression is
+ * legal, false otherwise. If false, the values of the out parameters are undefined.
+ */
+static bool determine_binary_type(const Context& context,
+ Token::Kind op,
+ const Type& left,
+ const Type& right,
+ const Type** outLeftType,
+ const Type** outRightType,
+ const Type** outResultType,
+ bool tryFlipped) {
+ bool isLogical;
+ bool validMatrixOrVectorOp;
+ switch (op) {
+ case Token::EQ:
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = &left;
+ return right.canCoerceTo(left);
+ case Token::EQEQ: // fall through
+ case Token::NEQ:
+ if (right.canCoerceTo(left)) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = context.fBool_Type.get();
+ return true;
+ } if (left.canCoerceTo(right)) {
+ *outLeftType = &right;
+ *outRightType = &right;
+ *outResultType = context.fBool_Type.get();
+ return true;
+ }
+ return false;
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ:
+ isLogical = true;
+ validMatrixOrVectorOp = false;
+ break;
+ case Token::LOGICALOR: // fall through
+ case Token::LOGICALAND: // fall through
+ case Token::LOGICALXOR: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ:
+ *outLeftType = context.fBool_Type.get();
+ *outRightType = context.fBool_Type.get();
+ *outResultType = context.fBool_Type.get();
+ return left.canCoerceTo(*context.fBool_Type) &&
+ right.canCoerceTo(*context.fBool_Type);
+ case Token::STAREQ:
+ if (left.kind() == Type::kScalar_Kind) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = &left;
+ return right.canCoerceTo(left);
+ }
+ // fall through
+ case Token::STAR:
+ if (is_matrix_multiply(left, right)) {
+ // determine final component type
+ if (determine_binary_type(context, Token::STAR, left.componentType(),
+ right.componentType(), outLeftType, outRightType,
+ outResultType, false)) {
+ *outLeftType = &(*outResultType)->toCompound(context, left.columns(),
+ left.rows());
+ *outRightType = &(*outResultType)->toCompound(context, right.columns(),
+ right.rows());
+ int leftColumns = left.columns();
+ int leftRows = left.rows();
+ int rightColumns;
+ int rightRows;
+ if (right.kind() == Type::kVector_Kind) {
+ // matrix * vector treats the vector as a column vector, so we need to
+ // transpose it
+ rightColumns = right.rows();
+ rightRows = right.columns();
+ SkASSERT(rightColumns == 1);
+ } else {
+ rightColumns = right.columns();
+ rightRows = right.rows();
+ }
+ if (rightColumns > 1) {
+ *outResultType = &(*outResultType)->toCompound(context, rightColumns,
+ leftRows);
+ } else {
+ // result was a column vector, transpose it back to a row
+ *outResultType = &(*outResultType)->toCompound(context, leftRows,
+ rightColumns);
+ }
+ return leftColumns == rightRows;
+ } else {
+ return false;
+ }
+ }
+ isLogical = false;
+ validMatrixOrVectorOp = true;
+ break;
+ case Token::PLUSEQ:
+ case Token::MINUSEQ:
+ case Token::SLASHEQ:
+ case Token::PERCENTEQ:
+ case Token::SHLEQ:
+ case Token::SHREQ:
+ if (left.kind() == Type::kScalar_Kind) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = &left;
+ return right.canCoerceTo(left);
+ }
+ // fall through
+ case Token::PLUS: // fall through
+ case Token::MINUS: // fall through
+ case Token::SLASH: // fall through
+ isLogical = false;
+ validMatrixOrVectorOp = true;
+ break;
+ case Token::COMMA:
+ *outLeftType = &left;
+ *outRightType = &right;
+ *outResultType = &right;
+ return true;
+ default:
+ isLogical = false;
+ validMatrixOrVectorOp = false;
+ }
+ bool isVectorOrMatrix = left.kind() == Type::kVector_Kind || left.kind() == Type::kMatrix_Kind;
+ if (left.kind() == Type::kScalar_Kind && right.kind() == Type::kScalar_Kind &&
+ right.canCoerceTo(left)) {
+ if (left.priority() > right.priority()) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ } else {
+ *outLeftType = &right;
+ *outRightType = &right;
+ }
+ if (isLogical) {
+ *outResultType = context.fBool_Type.get();
+ } else {
+ *outResultType = &left;
+ }
+ return true;
+ }
+ if (right.canCoerceTo(left) && isVectorOrMatrix && validMatrixOrVectorOp) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ if (isLogical) {
+ *outResultType = context.fBool_Type.get();
+ } else {
+ *outResultType = &left;
+ }
+ return true;
+ }
+ if ((left.kind() == Type::kVector_Kind || left.kind() == Type::kMatrix_Kind) &&
+ (right.kind() == Type::kScalar_Kind)) {
+ if (determine_binary_type(context, op, left.componentType(), right, outLeftType,
+ outRightType, outResultType, false)) {
+ *outLeftType = &(*outLeftType)->toCompound(context, left.columns(), left.rows());
+ if (!isLogical) {
+ *outResultType = &(*outResultType)->toCompound(context, left.columns(),
+ left.rows());
+ }
+ return true;
+ }
+ return false;
+ }
+ if (tryFlipped) {
+ return determine_binary_type(context, op, right, left, outRightType, outLeftType,
+ outResultType, false);
+ }
+ return false;
+}
+
+static std::unique_ptr<Expression> short_circuit_boolean(const Context& context,
+ const Expression& left,
+ Token::Kind op,
+ const Expression& right) {
+ SkASSERT(left.fKind == Expression::kBoolLiteral_Kind);
+ bool leftVal = ((BoolLiteral&) left).fValue;
+ if (op == Token::LOGICALAND) {
+ // (true && expr) -> (expr) and (false && expr) -> (false)
+ return leftVal ? right.clone()
+ : std::unique_ptr<Expression>(new BoolLiteral(context, left.fOffset, false));
+ } else if (op == Token::LOGICALOR) {
+ // (true || expr) -> (true) and (false || expr) -> (expr)
+ return leftVal ? std::unique_ptr<Expression>(new BoolLiteral(context, left.fOffset, true))
+ : right.clone();
+ } else {
+ // Can't short circuit XOR
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::constantFold(const Expression& left,
+ Token::Kind op,
+ const Expression& right) const {
+ // If the left side is a constant boolean literal, the right side does not need to be constant
+ // for short circuit optimizations to allow the constant to be folded.
+ if (left.fKind == Expression::kBoolLiteral_Kind && !right.isConstant()) {
+ return short_circuit_boolean(fContext, left, op, right);
+ } else if (right.fKind == Expression::kBoolLiteral_Kind && !left.isConstant()) {
+ // There aren't side effects in SKSL within expressions, so (left OP right) is equivalent to
+ // (right OP left) for short-circuit optimizations
+ return short_circuit_boolean(fContext, right, op, left);
+ }
+
+ // Other than the short-circuit cases above, constant folding requires both sides to be constant
+ if (!left.isConstant() || !right.isConstant()) {
+ return nullptr;
+ }
+ // Note that we expressly do not worry about precision and overflow here -- we use the maximum
+ // precision to calculate the results and hope the result makes sense. The plan is to move the
+ // Skia caps into SkSL, so we have access to all of them including the precisions of the various
+ // types, which will let us be more intelligent about this.
+ if (left.fKind == Expression::kBoolLiteral_Kind &&
+ right.fKind == Expression::kBoolLiteral_Kind) {
+ bool leftVal = ((BoolLiteral&) left).fValue;
+ bool rightVal = ((BoolLiteral&) right).fValue;
+ bool result;
+ switch (op) {
+ case Token::LOGICALAND: result = leftVal && rightVal; break;
+ case Token::LOGICALOR: result = leftVal || rightVal; break;
+ case Token::LOGICALXOR: result = leftVal ^ rightVal; break;
+ default: return nullptr;
+ }
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, left.fOffset, result));
+ }
+ #define RESULT(t, op) std::unique_ptr<Expression>(new t ## Literal(fContext, left.fOffset, \
+ leftVal op rightVal))
+ if (left.fKind == Expression::kIntLiteral_Kind && right.fKind == Expression::kIntLiteral_Kind) {
+ int64_t leftVal = ((IntLiteral&) left).fValue;
+ int64_t rightVal = ((IntLiteral&) right).fValue;
+ switch (op) {
+ case Token::PLUS: return RESULT(Int, +);
+ case Token::MINUS: return RESULT(Int, -);
+ case Token::STAR: return RESULT(Int, *);
+ case Token::SLASH:
+ if (rightVal) {
+ return RESULT(Int, /);
+ }
+ fErrors.error(right.fOffset, "division by zero");
+ return nullptr;
+ case Token::PERCENT:
+ if (rightVal) {
+ return RESULT(Int, %);
+ }
+ fErrors.error(right.fOffset, "division by zero");
+ return nullptr;
+ case Token::BITWISEAND: return RESULT(Int, &);
+ case Token::BITWISEOR: return RESULT(Int, |);
+ case Token::BITWISEXOR: return RESULT(Int, ^);
+ case Token::EQEQ: return RESULT(Bool, ==);
+ case Token::NEQ: return RESULT(Bool, !=);
+ case Token::GT: return RESULT(Bool, >);
+ case Token::GTEQ: return RESULT(Bool, >=);
+ case Token::LT: return RESULT(Bool, <);
+ case Token::LTEQ: return RESULT(Bool, <=);
+ case Token::SHL:
+ if (rightVal >= 0 && rightVal <= 31) {
+ return RESULT(Int, <<);
+ }
+ fErrors.error(right.fOffset, "shift value out of range");
+ return nullptr;
+ case Token::SHR:
+ if (rightVal >= 0 && rightVal <= 31) {
+ return RESULT(Int, >>);
+ }
+ fErrors.error(right.fOffset, "shift value out of range");
+ return nullptr;
+
+ default:
+ return nullptr;
+ }
+ }
+ if (left.fKind == Expression::kFloatLiteral_Kind &&
+ right.fKind == Expression::kFloatLiteral_Kind) {
+ double leftVal = ((FloatLiteral&) left).fValue;
+ double rightVal = ((FloatLiteral&) right).fValue;
+ switch (op) {
+ case Token::PLUS: return RESULT(Float, +);
+ case Token::MINUS: return RESULT(Float, -);
+ case Token::STAR: return RESULT(Float, *);
+ case Token::SLASH:
+ if (rightVal) {
+ return RESULT(Float, /);
+ }
+ fErrors.error(right.fOffset, "division by zero");
+ return nullptr;
+ case Token::EQEQ: return RESULT(Bool, ==);
+ case Token::NEQ: return RESULT(Bool, !=);
+ case Token::GT: return RESULT(Bool, >);
+ case Token::GTEQ: return RESULT(Bool, >=);
+ case Token::LT: return RESULT(Bool, <);
+ case Token::LTEQ: return RESULT(Bool, <=);
+ default: return nullptr;
+ }
+ }
+ if (left.fType.kind() == Type::kVector_Kind && left.fType.componentType().isFloat() &&
+ left.fType == right.fType) {
+ std::vector<std::unique_ptr<Expression>> args;
+ #define RETURN_VEC_COMPONENTWISE_RESULT(op) \
+ for (int i = 0; i < left.fType.columns(); i++) { \
+ float value = left.getFVecComponent(i) op \
+ right.getFVecComponent(i); \
+ args.emplace_back(new FloatLiteral(fContext, -1, value)); \
+ } \
+ return std::unique_ptr<Expression>(new Constructor(-1, left.fType, \
+ std::move(args)))
+ switch (op) {
+ case Token::EQEQ:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, -1,
+ left.compareConstant(fContext, right)));
+ case Token::NEQ:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, -1,
+ !left.compareConstant(fContext, right)));
+ case Token::PLUS: RETURN_VEC_COMPONENTWISE_RESULT(+);
+ case Token::MINUS: RETURN_VEC_COMPONENTWISE_RESULT(-);
+ case Token::STAR: RETURN_VEC_COMPONENTWISE_RESULT(*);
+ case Token::SLASH:
+ for (int i = 0; i < left.fType.columns(); i++) {
+ SKSL_FLOAT rvalue = right.getFVecComponent(i);
+ if (rvalue == 0.0) {
+ fErrors.error(right.fOffset, "division by zero");
+ return nullptr;
+ }
+ float value = left.getFVecComponent(i) / rvalue;
+ args.emplace_back(new FloatLiteral(fContext, -1, value));
+ }
+ return std::unique_ptr<Expression>(new Constructor(-1, left.fType,
+ std::move(args)));
+ default: return nullptr;
+ }
+ }
+ if (left.fType.kind() == Type::kMatrix_Kind &&
+ right.fType.kind() == Type::kMatrix_Kind &&
+ left.fKind == right.fKind) {
+ switch (op) {
+ case Token::EQEQ:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, -1,
+ left.compareConstant(fContext, right)));
+ case Token::NEQ:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, -1,
+ !left.compareConstant(fContext, right)));
+ default:
+ return nullptr;
+ }
+ }
+ #undef RESULT
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertBinaryExpression(const ASTNode& expression) {
+ SkASSERT(expression.fKind == ASTNode::Kind::kBinary);
+ auto iter = expression.begin();
+ std::unique_ptr<Expression> left = this->convertExpression(*(iter++));
+ if (!left) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> right = this->convertExpression(*(iter++));
+ if (!right) {
+ return nullptr;
+ }
+ const Type* leftType;
+ const Type* rightType;
+ const Type* resultType;
+ const Type* rawLeftType;
+ if (left->fKind == Expression::kIntLiteral_Kind && right->fType.isInteger()) {
+ rawLeftType = &right->fType;
+ } else {
+ rawLeftType = &left->fType;
+ }
+ const Type* rawRightType;
+ if (right->fKind == Expression::kIntLiteral_Kind && left->fType.isInteger()) {
+ rawRightType = &left->fType;
+ } else {
+ rawRightType = &right->fType;
+ }
+ Token::Kind op = expression.getToken().fKind;
+ if (!determine_binary_type(fContext, op, *rawLeftType, *rawRightType, &leftType, &rightType,
+ &resultType, !Compiler::IsAssignment(op))) {
+ fErrors.error(expression.fOffset, String("type mismatch: '") +
+ Compiler::OperatorName(expression.getToken().fKind) +
+ "' cannot operate on '" + left->fType.description() +
+ "', '" + right->fType.description() + "'");
+ return nullptr;
+ }
+ if (Compiler::IsAssignment(op)) {
+ this->setRefKind(*left, op != Token::EQ ? VariableReference::kReadWrite_RefKind :
+ VariableReference::kWrite_RefKind);
+ }
+ left = this->coerce(std::move(left), *leftType);
+ right = this->coerce(std::move(right), *rightType);
+ if (!left || !right) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> result = this->constantFold(*left.get(), op, *right.get());
+ if (!result) {
+ result = std::unique_ptr<Expression>(new BinaryExpression(expression.fOffset,
+ std::move(left),
+ op,
+ std::move(right),
+ *resultType));
+ }
+ return result;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertTernaryExpression(const ASTNode& node) {
+ SkASSERT(node.fKind == ASTNode::Kind::kTernary);
+ auto iter = node.begin();
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*(iter++)),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> ifTrue = this->convertExpression(*(iter++));
+ if (!ifTrue) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> ifFalse = this->convertExpression(*(iter++));
+ if (!ifFalse) {
+ return nullptr;
+ }
+ const Type* trueType;
+ const Type* falseType;
+ const Type* resultType;
+ if (!determine_binary_type(fContext, Token::EQEQ, ifTrue->fType, ifFalse->fType, &trueType,
+ &falseType, &resultType, true) || trueType != falseType) {
+ fErrors.error(node.fOffset, "ternary operator result mismatch: '" +
+ ifTrue->fType.description() + "', '" +
+ ifFalse->fType.description() + "'");
+ return nullptr;
+ }
+ ifTrue = this->coerce(std::move(ifTrue), *trueType);
+ if (!ifTrue) {
+ return nullptr;
+ }
+ ifFalse = this->coerce(std::move(ifFalse), *falseType);
+ if (!ifFalse) {
+ return nullptr;
+ }
+ if (test->fKind == Expression::kBoolLiteral_Kind) {
+ // static boolean test, just return one of the branches
+ if (((BoolLiteral&) *test).fValue) {
+ return ifTrue;
+ } else {
+ return ifFalse;
+ }
+ }
+ return std::unique_ptr<Expression>(new TernaryExpression(node.fOffset,
+ std::move(test),
+ std::move(ifTrue),
+ std::move(ifFalse)));
+}
+
+std::unique_ptr<Expression> IRGenerator::call(int offset,
+ const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments) {
+ if (function.fParameters.size() != arguments.size()) {
+ String msg = "call to '" + function.fName + "' expected " +
+ to_string((uint64_t) function.fParameters.size()) +
+ " argument";
+ if (function.fParameters.size() != 1) {
+ msg += "s";
+ }
+ msg += ", but found " + to_string((uint64_t) arguments.size());
+ fErrors.error(offset, msg);
+ return nullptr;
+ }
+ std::vector<const Type*> types;
+ const Type* returnType;
+ if (!function.determineFinalTypes(arguments, &types, &returnType)) {
+ String msg = "no match for " + function.fName + "(";
+ String separator;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ msg += separator;
+ separator = ", ";
+ msg += arguments[i]->fType.description();
+ }
+ msg += ")";
+ fErrors.error(offset, msg);
+ return nullptr;
+ }
+ for (size_t i = 0; i < arguments.size(); i++) {
+ arguments[i] = this->coerce(std::move(arguments[i]), *types[i]);
+ if (!arguments[i]) {
+ return nullptr;
+ }
+ if (arguments[i] && (function.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag)) {
+ this->setRefKind(*arguments[i],
+ function.fParameters[i]->fModifiers.fFlags & Modifiers::kIn_Flag ?
+ VariableReference::kReadWrite_RefKind :
+ VariableReference::kPointer_RefKind);
+ }
+ }
+ return std::unique_ptr<FunctionCall>(new FunctionCall(offset, *returnType, function,
+ std::move(arguments)));
+}
+
+/**
+ * Determines the cost of coercing the arguments of a function to the required types. Cost has no
+ * particular meaning other than "lower costs are preferred". Returns INT_MAX if the call is not
+ * valid.
+ */
+int IRGenerator::callCost(const FunctionDeclaration& function,
+ const std::vector<std::unique_ptr<Expression>>& arguments) {
+ if (function.fParameters.size() != arguments.size()) {
+ return INT_MAX;
+ }
+ int total = 0;
+ std::vector<const Type*> types;
+ const Type* ignored;
+ if (!function.determineFinalTypes(arguments, &types, &ignored)) {
+ return INT_MAX;
+ }
+ for (size_t i = 0; i < arguments.size(); i++) {
+ int cost = arguments[i]->coercionCost(*types[i]);
+ if (cost != INT_MAX) {
+ total += cost;
+ } else {
+ return INT_MAX;
+ }
+ }
+ return total;
+}
+
+std::unique_ptr<Expression> IRGenerator::call(int offset,
+ std::unique_ptr<Expression> functionValue,
+ std::vector<std::unique_ptr<Expression>> arguments) {
+ switch (functionValue->fKind) {
+ case Expression::kTypeReference_Kind:
+ return this->convertConstructor(offset,
+ ((TypeReference&) *functionValue).fValue,
+ std::move(arguments));
+ case Expression::kExternalValue_Kind: {
+ ExternalValue* v = ((ExternalValueReference&) *functionValue).fValue;
+ if (!v->canCall()) {
+ fErrors.error(offset, "this external value is not a function");
+ return nullptr;
+ }
+ int count = v->callParameterCount();
+ if (count != (int) arguments.size()) {
+ fErrors.error(offset, "external function expected " + to_string(count) +
+ " arguments, but found " + to_string((int) arguments.size()));
+ return nullptr;
+ }
+ static constexpr int PARAMETER_MAX = 16;
+ SkASSERT(count < PARAMETER_MAX);
+ const Type* types[PARAMETER_MAX];
+ v->getCallParameterTypes(types);
+ for (int i = 0; i < count; ++i) {
+ arguments[i] = this->coerce(std::move(arguments[i]), *types[i]);
+ if (!arguments[i]) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Expression>(new ExternalFunctionCall(offset, v->callReturnType(),
+ v, std::move(arguments)));
+ }
+ case Expression::kFunctionReference_Kind: {
+ FunctionReference* ref = (FunctionReference*) functionValue.get();
+ int bestCost = INT_MAX;
+ const FunctionDeclaration* best = nullptr;
+ if (ref->fFunctions.size() > 1) {
+ for (const auto& f : ref->fFunctions) {
+ int cost = this->callCost(*f, arguments);
+ if (cost < bestCost) {
+ bestCost = cost;
+ best = f;
+ }
+ }
+ if (best) {
+ return this->call(offset, *best, std::move(arguments));
+ }
+ String msg = "no match for " + ref->fFunctions[0]->fName + "(";
+ String separator;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ msg += separator;
+ separator = ", ";
+ msg += arguments[i]->fType.description();
+ }
+ msg += ")";
+ fErrors.error(offset, msg);
+ return nullptr;
+ }
+ return this->call(offset, *ref->fFunctions[0], std::move(arguments));
+ }
+ default:
+ fErrors.error(offset, "'" + functionValue->description() + "' is not a function");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertNumberConstructor(
+ int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> args) {
+ SkASSERT(type.isNumber());
+ if (args.size() != 1) {
+ fErrors.error(offset, "invalid arguments to '" + type.description() +
+ "' constructor, (expected exactly 1 argument, but found " +
+ to_string((uint64_t) args.size()) + ")");
+ return nullptr;
+ }
+ if (type == args[0]->fType) {
+ return std::move(args[0]);
+ }
+ if (type.isFloat() && args.size() == 1 && args[0]->fKind == Expression::kFloatLiteral_Kind) {
+ double value = ((FloatLiteral&) *args[0]).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(offset, value, &type));
+ }
+ if (type.isFloat() && args.size() == 1 && args[0]->fKind == Expression::kIntLiteral_Kind) {
+ int64_t value = ((IntLiteral&) *args[0]).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(offset, (double) value, &type));
+ }
+ if (args[0]->fKind == Expression::kIntLiteral_Kind && (type == *fContext.fInt_Type ||
+ type == *fContext.fUInt_Type)) {
+ return std::unique_ptr<Expression>(new IntLiteral(offset,
+ ((IntLiteral&) *args[0]).fValue,
+ &type));
+ }
+ if (args[0]->fType == *fContext.fBool_Type) {
+ std::unique_ptr<IntLiteral> zero(new IntLiteral(fContext, offset, 0));
+ std::unique_ptr<IntLiteral> one(new IntLiteral(fContext, offset, 1));
+ return std::unique_ptr<Expression>(
+ new TernaryExpression(offset, std::move(args[0]),
+ this->coerce(std::move(one), type),
+ this->coerce(std::move(zero),
+ type)));
+ }
+ if (!args[0]->fType.isNumber()) {
+ fErrors.error(offset, "invalid argument to '" + type.description() +
+ "' constructor (expected a number or bool, but found '" +
+ args[0]->fType.description() + "')");
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new Constructor(offset, type, std::move(args)));
+}
+
+int component_count(const Type& type) {
+ switch (type.kind()) {
+ case Type::kVector_Kind:
+ return type.columns();
+ case Type::kMatrix_Kind:
+ return type.columns() * type.rows();
+ default:
+ return 1;
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertCompoundConstructor(
+ int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> args) {
+ SkASSERT(type.kind() == Type::kVector_Kind || type.kind() == Type::kMatrix_Kind);
+ if (type.kind() == Type::kMatrix_Kind && args.size() == 1 &&
+ args[0]->fType.kind() == Type::kMatrix_Kind) {
+ // matrix from matrix is always legal
+ return std::unique_ptr<Expression>(new Constructor(offset, type, std::move(args)));
+ }
+ int actual = 0;
+ int expected = type.rows() * type.columns();
+ if (args.size() != 1 || expected != component_count(args[0]->fType) ||
+ type.componentType().isNumber() != args[0]->fType.componentType().isNumber()) {
+ for (size_t i = 0; i < args.size(); i++) {
+ if (args[i]->fType.kind() == Type::kVector_Kind) {
+ if (type.componentType().isNumber() !=
+ args[i]->fType.componentType().isNumber()) {
+ fErrors.error(offset, "'" + args[i]->fType.description() + "' is not a valid "
+ "parameter to '" + type.description() +
+ "' constructor");
+ return nullptr;
+ }
+ actual += args[i]->fType.columns();
+ } else if (args[i]->fType.kind() == Type::kScalar_Kind) {
+ actual += 1;
+ if (type.kind() != Type::kScalar_Kind) {
+ args[i] = this->coerce(std::move(args[i]), type.componentType());
+ if (!args[i]) {
+ return nullptr;
+ }
+ }
+ } else {
+ fErrors.error(offset, "'" + args[i]->fType.description() + "' is not a valid "
+ "parameter to '" + type.description() + "' constructor");
+ return nullptr;
+ }
+ }
+ if (actual != 1 && actual != expected) {
+ fErrors.error(offset, "invalid arguments to '" + type.description() +
+ "' constructor (expected " + to_string(expected) +
+ " scalars, but found " + to_string(actual) + ")");
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Expression>(new Constructor(offset, type, std::move(args)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertConstructor(
+ int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> args) {
+ // FIXME: add support for structs
+ Type::Kind kind = type.kind();
+ if (args.size() == 1 && args[0]->fType == type) {
+ // argument is already the right type, just return it
+ return std::move(args[0]);
+ }
+ if (type.isNumber()) {
+ return this->convertNumberConstructor(offset, type, std::move(args));
+ } else if (kind == Type::kArray_Kind) {
+ const Type& base = type.componentType();
+ for (size_t i = 0; i < args.size(); i++) {
+ args[i] = this->coerce(std::move(args[i]), base);
+ if (!args[i]) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Expression>(new Constructor(offset, type, std::move(args)));
+ } else if (kind == Type::kVector_Kind || kind == Type::kMatrix_Kind) {
+ return this->convertCompoundConstructor(offset, type, std::move(args));
+ } else {
+ fErrors.error(offset, "cannot construct '" + type.description() + "'");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertPrefixExpression(const ASTNode& expression) {
+ SkASSERT(expression.fKind == ASTNode::Kind::kPrefix);
+ std::unique_ptr<Expression> base = this->convertExpression(*expression.begin());
+ if (!base) {
+ return nullptr;
+ }
+ switch (expression.getToken().fKind) {
+ case Token::PLUS:
+ if (!base->fType.isNumber() && base->fType.kind() != Type::kVector_Kind &&
+ base->fType != *fContext.fFloatLiteral_Type) {
+ fErrors.error(expression.fOffset,
+ "'+' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ return base;
+ case Token::MINUS:
+ if (base->fKind == Expression::kIntLiteral_Kind) {
+ return std::unique_ptr<Expression>(new IntLiteral(fContext, base->fOffset,
+ -((IntLiteral&) *base).fValue));
+ }
+ if (base->fKind == Expression::kFloatLiteral_Kind) {
+ double value = -((FloatLiteral&) *base).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(fContext, base->fOffset,
+ value));
+ }
+ if (!base->fType.isNumber() && base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(expression.fOffset,
+ "'-' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new PrefixExpression(Token::MINUS, std::move(base)));
+ case Token::PLUSPLUS:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fOffset,
+ String("'") + Compiler::OperatorName(expression.getToken().fKind) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->setRefKind(*base, VariableReference::kReadWrite_RefKind);
+ break;
+ case Token::MINUSMINUS:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fOffset,
+ String("'") + Compiler::OperatorName(expression.getToken().fKind) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->setRefKind(*base, VariableReference::kReadWrite_RefKind);
+ break;
+ case Token::LOGICALNOT:
+ if (base->fType != *fContext.fBool_Type) {
+ fErrors.error(expression.fOffset,
+ String("'") + Compiler::OperatorName(expression.getToken().fKind) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ if (base->fKind == Expression::kBoolLiteral_Kind) {
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, base->fOffset,
+ !((BoolLiteral&) *base).fValue));
+ }
+ break;
+ case Token::BITWISENOT:
+ if (base->fType != *fContext.fInt_Type && base->fType != *fContext.fUInt_Type) {
+ fErrors.error(expression.fOffset,
+ String("'") + Compiler::OperatorName(expression.getToken().fKind) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ break;
+ default:
+ ABORT("unsupported prefix operator\n");
+ }
+ return std::unique_ptr<Expression>(new PrefixExpression(expression.getToken().fKind,
+ std::move(base)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertIndex(std::unique_ptr<Expression> base,
+ const ASTNode& index) {
+ if (base->fKind == Expression::kTypeReference_Kind) {
+ if (index.fKind == ASTNode::Kind::kInt) {
+ const Type& oldType = ((TypeReference&) *base).fValue;
+ SKSL_INT size = index.getInt();
+ Type* newType = (Type*) fSymbolTable->takeOwnership(std::unique_ptr<Symbol>(
+ new Type(oldType.name() + "[" + to_string(size) + "]",
+ Type::kArray_Kind, oldType, size)));
+ return std::unique_ptr<Expression>(new TypeReference(fContext, base->fOffset,
+ *newType));
+
+ } else {
+ fErrors.error(base->fOffset, "array size must be a constant");
+ return nullptr;
+ }
+ }
+ if (base->fType.kind() != Type::kArray_Kind && base->fType.kind() != Type::kMatrix_Kind &&
+ base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(base->fOffset, "expected array, but found '" + base->fType.description() +
+ "'");
+ return nullptr;
+ }
+ std::unique_ptr<Expression> converted = this->convertExpression(index);
+ if (!converted) {
+ return nullptr;
+ }
+ if (converted->fType != *fContext.fUInt_Type) {
+ converted = this->coerce(std::move(converted), *fContext.fInt_Type);
+ if (!converted) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Expression>(new IndexExpression(fContext, std::move(base),
+ std::move(converted)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertField(std::unique_ptr<Expression> base,
+ StringFragment field) {
+ if (base->fKind == Expression::kExternalValue_Kind) {
+ ExternalValue& ev = *((ExternalValueReference&) *base).fValue;
+ ExternalValue* result = ev.getChild(String(field).c_str());
+ if (!result) {
+ fErrors.error(base->fOffset, "external value does not have a child named '" + field +
+ "'");
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new ExternalValueReference(base->fOffset, result));
+ }
+ auto fields = base->fType.fields();
+ for (size_t i = 0; i < fields.size(); i++) {
+ if (fields[i].fName == field) {
+ return std::unique_ptr<Expression>(new FieldAccess(std::move(base), (int) i));
+ }
+ }
+ fErrors.error(base->fOffset, "type '" + base->fType.description() + "' does not have a "
+ "field named '" + field + "");
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertSwizzle(std::unique_ptr<Expression> base,
+ StringFragment fields) {
+ if (base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(base->fOffset, "cannot swizzle type '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ std::vector<int> swizzleComponents;
+ for (size_t i = 0; i < fields.fLength; i++) {
+ switch (fields[i]) {
+ case '0':
+ swizzleComponents.push_back(SKSL_SWIZZLE_0);
+ break;
+ case '1':
+ swizzleComponents.push_back(SKSL_SWIZZLE_1);
+ break;
+ case 'x':
+ case 'r':
+ case 's':
+ case 'L':
+ swizzleComponents.push_back(0);
+ break;
+ case 'y':
+ case 'g':
+ case 't':
+ case 'T':
+ if (base->fType.columns() >= 2) {
+ swizzleComponents.push_back(1);
+ break;
+ }
+ // fall through
+ case 'z':
+ case 'b':
+ case 'p':
+ case 'R':
+ if (base->fType.columns() >= 3) {
+ swizzleComponents.push_back(2);
+ break;
+ }
+ // fall through
+ case 'w':
+ case 'a':
+ case 'q':
+ case 'B':
+ if (base->fType.columns() >= 4) {
+ swizzleComponents.push_back(3);
+ break;
+ }
+ // fall through
+ default:
+ fErrors.error(base->fOffset, String::printf("invalid swizzle component '%c'",
+ fields[i]));
+ return nullptr;
+ }
+ }
+ SkASSERT(swizzleComponents.size() > 0);
+ if (swizzleComponents.size() > 4) {
+ fErrors.error(base->fOffset, "too many components in swizzle mask '" + fields + "'");
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new Swizzle(fContext, std::move(base), swizzleComponents));
+}
+
+std::unique_ptr<Expression> IRGenerator::getCap(int offset, String name) {
+ auto found = fCapsMap.find(name);
+ if (found == fCapsMap.end()) {
+ fErrors.error(offset, "unknown capability flag '" + name + "'");
+ return nullptr;
+ }
+ String fullName = "sk_Caps." + name;
+ return std::unique_ptr<Expression>(new Setting(offset, fullName,
+ found->second.literal(fContext, offset)));
+}
+
+std::unique_ptr<Expression> IRGenerator::getArg(int offset, String name) const {
+ auto found = fSettings->fArgs.find(name);
+ if (found == fSettings->fArgs.end()) {
+ return nullptr;
+ }
+ String fullName = "sk_Args." + name;
+ return std::unique_ptr<Expression>(new Setting(offset,
+ fullName,
+ found->second.literal(fContext, offset)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertTypeField(int offset, const Type& type,
+ StringFragment field) {
+ std::unique_ptr<Expression> result;
+ for (const auto& e : *fProgramElements) {
+ if (e->fKind == ProgramElement::kEnum_Kind && type.name() == ((Enum&) *e).fTypeName) {
+ std::shared_ptr<SymbolTable> old = fSymbolTable;
+ fSymbolTable = ((Enum&) *e).fSymbols;
+ result = convertIdentifier(ASTNode(&fFile->fNodes, offset, ASTNode::Kind::kIdentifier,
+ field));
+ fSymbolTable = old;
+ }
+ }
+ if (!result) {
+ fErrors.error(offset, "type '" + type.fName + "' does not have a field named '" + field +
+ "'");
+ }
+ return result;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertIndexExpression(const ASTNode& index) {
+ SkASSERT(index.fKind == ASTNode::Kind::kIndex);
+ auto iter = index.begin();
+ std::unique_ptr<Expression> base = this->convertExpression(*(iter++));
+ if (!base) {
+ return nullptr;
+ }
+ if (iter != index.end()) {
+ return this->convertIndex(std::move(base), *(iter++));
+ } else if (base->fKind == Expression::kTypeReference_Kind) {
+ const Type& oldType = ((TypeReference&) *base).fValue;
+ Type* newType = (Type*) fSymbolTable->takeOwnership(std::unique_ptr<Symbol>(
+ new Type(oldType.name() + "[]",
+ Type::kArray_Kind,
+ oldType,
+ -1)));
+ return std::unique_ptr<Expression>(new TypeReference(fContext, base->fOffset,
+ *newType));
+ }
+ fErrors.error(index.fOffset, "'[]' must follow a type name");
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertCallExpression(const ASTNode& callNode) {
+ SkASSERT(callNode.fKind == ASTNode::Kind::kCall);
+ auto iter = callNode.begin();
+ std::unique_ptr<Expression> base = this->convertExpression(*(iter++));
+ if (!base) {
+ return nullptr;
+ }
+ std::vector<std::unique_ptr<Expression>> arguments;
+ for (; iter != callNode.end(); ++iter) {
+ std::unique_ptr<Expression> converted = this->convertExpression(*iter);
+ if (!converted) {
+ return nullptr;
+ }
+ arguments.push_back(std::move(converted));
+ }
+ return this->call(callNode.fOffset, std::move(base), std::move(arguments));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertFieldExpression(const ASTNode& fieldNode) {
+ std::unique_ptr<Expression> base = this->convertExpression(*fieldNode.begin());
+ if (!base) {
+ return nullptr;
+ }
+ StringFragment field = fieldNode.getString();
+ if (base->fType == *fContext.fSkCaps_Type) {
+ return this->getCap(fieldNode.fOffset, field);
+ }
+ if (base->fType == *fContext.fSkArgs_Type) {
+ return this->getArg(fieldNode.fOffset, field);
+ }
+ if (base->fKind == Expression::kTypeReference_Kind) {
+ return this->convertTypeField(base->fOffset, ((TypeReference&) *base).fValue,
+ field);
+ }
+ if (base->fKind == Expression::kExternalValue_Kind) {
+ return this->convertField(std::move(base), field);
+ }
+ switch (base->fType.kind()) {
+ case Type::kVector_Kind:
+ return this->convertSwizzle(std::move(base), field);
+ case Type::kOther_Kind:
+ case Type::kStruct_Kind:
+ return this->convertField(std::move(base), field);
+ default:
+ fErrors.error(base->fOffset, "cannot swizzle value of type '" +
+ base->fType.description() + "'");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertPostfixExpression(const ASTNode& expression) {
+ std::unique_ptr<Expression> base = this->convertExpression(*expression.begin());
+ if (!base) {
+ return nullptr;
+ }
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fOffset,
+ "'" + String(Compiler::OperatorName(expression.getToken().fKind)) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->setRefKind(*base, VariableReference::kReadWrite_RefKind);
+ return std::unique_ptr<Expression>(new PostfixExpression(std::move(base),
+ expression.getToken().fKind));
+}
+
+void IRGenerator::checkValid(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kFunctionReference_Kind:
+ fErrors.error(expr.fOffset, "expected '(' to begin function call");
+ break;
+ case Expression::kTypeReference_Kind:
+ fErrors.error(expr.fOffset, "expected '(' to begin constructor invocation");
+ break;
+ default:
+ if (expr.fType == *fContext.fInvalid_Type) {
+ fErrors.error(expr.fOffset, "invalid expression");
+ }
+ }
+}
+
+bool IRGenerator::checkSwizzleWrite(const Swizzle& swizzle) {
+ int bits = 0;
+ for (int idx : swizzle.fComponents) {
+ if (idx < 0) {
+ fErrors.error(swizzle.fOffset, "cannot write to a swizzle mask containing a constant");
+ return false;
+ }
+ SkASSERT(idx <= 3);
+ int bit = 1 << idx;
+ if (bits & bit) {
+ fErrors.error(swizzle.fOffset,
+ "cannot write to the same swizzle field more than once");
+ return false;
+ }
+ bits |= bit;
+ }
+ return true;
+}
+
+void IRGenerator::setRefKind(const Expression& expr, VariableReference::RefKind kind) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference&) expr).fVariable;
+ if (var.fModifiers.fFlags & (Modifiers::kConst_Flag | Modifiers::kUniform_Flag)) {
+ fErrors.error(expr.fOffset,
+ "cannot modify immutable variable '" + var.fName + "'");
+ }
+ ((VariableReference&) expr).setRefKind(kind);
+ break;
+ }
+ case Expression::kFieldAccess_Kind:
+ this->setRefKind(*((FieldAccess&) expr).fBase, kind);
+ break;
+ case Expression::kSwizzle_Kind: {
+ const Swizzle& swizzle = (Swizzle&) expr;
+ this->checkSwizzleWrite(swizzle);
+ this->setRefKind(*swizzle.fBase, kind);
+ break;
+ }
+ case Expression::kIndex_Kind:
+ this->setRefKind(*((IndexExpression&) expr).fBase, kind);
+ break;
+ case Expression::kTernary_Kind: {
+ TernaryExpression& t = (TernaryExpression&) expr;
+ this->setRefKind(*t.fIfTrue, kind);
+ this->setRefKind(*t.fIfFalse, kind);
+ break;
+ }
+ case Expression::kExternalValue_Kind: {
+ const ExternalValue& v = *((ExternalValueReference&) expr).fValue;
+ if (!v.canWrite()) {
+ fErrors.error(expr.fOffset,
+ "cannot modify immutable external value '" + v.fName + "'");
+ }
+ break;
+ }
+ default:
+ fErrors.error(expr.fOffset, "cannot assign to '" + expr.description() + "'");
+ break;
+ }
+}
+
+void IRGenerator::convertProgram(Program::Kind kind,
+ const char* text,
+ size_t length,
+ SymbolTable& types,
+ std::vector<std::unique_ptr<ProgramElement>>* out) {
+ fKind = kind;
+ fProgramElements = out;
+ Parser parser(text, length, types, fErrors);
+ fFile = parser.file();
+ if (fErrors.errorCount()) {
+ return;
+ }
+ SkASSERT(fFile);
+ for (const auto& decl : fFile->root()) {
+ switch (decl.fKind) {
+ case ASTNode::Kind::kVarDeclarations: {
+ std::unique_ptr<VarDeclarations> s = this->convertVarDeclarations(
+ decl,
+ Variable::kGlobal_Storage);
+ if (s) {
+ fProgramElements->push_back(std::move(s));
+ }
+ break;
+ }
+ case ASTNode::Kind::kEnum: {
+ this->convertEnum(decl);
+ break;
+ }
+ case ASTNode::Kind::kFunction: {
+ this->convertFunction(decl);
+ break;
+ }
+ case ASTNode::Kind::kModifiers: {
+ std::unique_ptr<ModifiersDeclaration> f = this->convertModifiersDeclaration(decl);
+ if (f) {
+ fProgramElements->push_back(std::move(f));
+ }
+ break;
+ }
+ case ASTNode::Kind::kInterfaceBlock: {
+ std::unique_ptr<InterfaceBlock> i = this->convertInterfaceBlock(decl);
+ if (i) {
+ fProgramElements->push_back(std::move(i));
+ }
+ break;
+ }
+ case ASTNode::Kind::kExtension: {
+ std::unique_ptr<Extension> e = this->convertExtension(decl.fOffset,
+ decl.getString());
+ if (e) {
+ fProgramElements->push_back(std::move(e));
+ }
+ break;
+ }
+ case ASTNode::Kind::kSection: {
+ std::unique_ptr<Section> s = this->convertSection(decl);
+ if (s) {
+ fProgramElements->push_back(std::move(s));
+ }
+ break;
+ }
+ default:
+ ABORT("unsupported declaration: %s\n", decl.description().c_str());
+ }
+ }
+}
+
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLIRGenerator.h b/gfx/skia/skia/src/sksl/SkSLIRGenerator.h
new file mode 100644
index 0000000000..20a556f4e3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIRGenerator.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRGENERATOR
+#define SKSL_IRGENERATOR
+
+#include "src/sksl/SkSLASTFile.h"
+#include "src/sksl/SkSLASTNode.h"
+#include "src/sksl/SkSLErrorReporter.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSection.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLTypeReference.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+struct Swizzle;
+
+/**
+ * Performs semantic analysis on an abstract syntax tree (AST) and produces the corresponding
+ * (unoptimized) intermediate representation (IR).
+ */
+class IRGenerator {
+public:
+ IRGenerator(const Context* context, std::shared_ptr<SymbolTable> root,
+ ErrorReporter& errorReporter);
+
+ void convertProgram(Program::Kind kind,
+ const char* text,
+ size_t length,
+ SymbolTable& types,
+ std::vector<std::unique_ptr<ProgramElement>>* result);
+
+ /**
+ * If both operands are compile-time constants and can be folded, returns an expression
+ * representing the folded value. Otherwise, returns null. Note that unlike most other functions
+ * here, null does not represent a compilation error.
+ */
+ std::unique_ptr<Expression> constantFold(const Expression& left,
+ Token::Kind op,
+ const Expression& right) const;
+
+ std::unique_ptr<Expression> getArg(int offset, String name) const;
+
+ Program::Inputs fInputs;
+ const Program::Settings* fSettings;
+ const Context& fContext;
+ Program::Kind fKind;
+
+private:
+ /**
+ * Prepare to compile a program. Resets state, pushes a new symbol table, and installs the
+ * settings.
+ */
+ void start(const Program::Settings* settings,
+ std::vector<std::unique_ptr<ProgramElement>>* inherited);
+
+ /**
+ * Performs cleanup after compilation is complete.
+ */
+ void finish();
+
+ void pushSymbolTable();
+ void popSymbolTable();
+
+ std::unique_ptr<VarDeclarations> convertVarDeclarations(const ASTNode& decl,
+ Variable::Storage storage);
+ void convertFunction(const ASTNode& f);
+ std::unique_ptr<Statement> convertStatement(const ASTNode& statement);
+ std::unique_ptr<Expression> convertExpression(const ASTNode& expression);
+ std::unique_ptr<ModifiersDeclaration> convertModifiersDeclaration(const ASTNode& m);
+
+ const Type* convertType(const ASTNode& type);
+ std::unique_ptr<Expression> call(int offset,
+ const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments);
+ int callCost(const FunctionDeclaration& function,
+ const std::vector<std::unique_ptr<Expression>>& arguments);
+ std::unique_ptr<Expression> call(int offset, std::unique_ptr<Expression> function,
+ std::vector<std::unique_ptr<Expression>> arguments);
+ int coercionCost(const Expression& expr, const Type& type);
+ std::unique_ptr<Expression> coerce(std::unique_ptr<Expression> expr, const Type& type);
+ std::unique_ptr<Block> convertBlock(const ASTNode& block);
+ std::unique_ptr<Statement> convertBreak(const ASTNode& b);
+ std::unique_ptr<Expression> convertNumberConstructor(
+ int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> params);
+ std::unique_ptr<Expression> convertCompoundConstructor(
+ int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> params);
+ std::unique_ptr<Expression> convertConstructor(int offset,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> params);
+ std::unique_ptr<Statement> convertContinue(const ASTNode& c);
+ std::unique_ptr<Statement> convertDiscard(const ASTNode& d);
+ std::unique_ptr<Statement> convertDo(const ASTNode& d);
+ std::unique_ptr<Statement> convertSwitch(const ASTNode& s);
+ std::unique_ptr<Expression> convertBinaryExpression(const ASTNode& expression);
+ std::unique_ptr<Extension> convertExtension(int offset, StringFragment name);
+ std::unique_ptr<Statement> convertExpressionStatement(const ASTNode& s);
+ std::unique_ptr<Statement> convertFor(const ASTNode& f);
+ std::unique_ptr<Expression> convertIdentifier(const ASTNode& identifier);
+ std::unique_ptr<Statement> convertIf(const ASTNode& s);
+ std::unique_ptr<Expression> convertIndex(std::unique_ptr<Expression> base,
+ const ASTNode& index);
+ std::unique_ptr<InterfaceBlock> convertInterfaceBlock(const ASTNode& s);
+ Modifiers convertModifiers(const Modifiers& m);
+ std::unique_ptr<Expression> convertPrefixExpression(const ASTNode& expression);
+ std::unique_ptr<Statement> convertReturn(const ASTNode& r);
+ std::unique_ptr<Section> convertSection(const ASTNode& e);
+ std::unique_ptr<Expression> getCap(int offset, String name);
+ std::unique_ptr<Expression> convertCallExpression(const ASTNode& expression);
+ std::unique_ptr<Expression> convertFieldExpression(const ASTNode& expression);
+ std::unique_ptr<Expression> convertIndexExpression(const ASTNode& expression);
+ std::unique_ptr<Expression> convertPostfixExpression(const ASTNode& expression);
+ std::unique_ptr<Expression> convertTypeField(int offset, const Type& type,
+ StringFragment field);
+ std::unique_ptr<Expression> convertField(std::unique_ptr<Expression> base,
+ StringFragment field);
+ std::unique_ptr<Expression> convertSwizzle(std::unique_ptr<Expression> base,
+ StringFragment fields);
+ std::unique_ptr<Expression> convertTernaryExpression(const ASTNode& expression);
+ std::unique_ptr<Statement> convertVarDeclarationStatement(const ASTNode& s);
+ std::unique_ptr<Statement> convertWhile(const ASTNode& w);
+ void convertEnum(const ASTNode& e);
+ std::unique_ptr<Block> applyInvocationIDWorkaround(std::unique_ptr<Block> main);
+ // returns a statement which converts sk_Position from device to normalized coordinates
+ std::unique_ptr<Statement> getNormalizeSkPositionCode();
+
+ void checkValid(const Expression& expr);
+ void setRefKind(const Expression& expr, VariableReference::RefKind kind);
+ void getConstantInt(const Expression& value, int64_t* out);
+ bool checkSwizzleWrite(const Swizzle& swizzle);
+
+ std::unique_ptr<ASTFile> fFile;
+ const FunctionDeclaration* fCurrentFunction;
+ std::unordered_map<String, Program::Settings::Value> fCapsMap;
+ std::shared_ptr<SymbolTable> fRootSymbolTable;
+ std::shared_ptr<SymbolTable> fSymbolTable;
+ // holds extra temp variable declarations needed for the current function
+ std::vector<std::unique_ptr<Statement>> fExtraVars;
+ int fLoopLevel;
+ int fSwitchLevel;
+ // count of temporary variables we have created
+ int fTmpCount;
+ ErrorReporter& fErrors;
+ int fInvocations;
+ std::vector<std::unique_ptr<ProgramElement>>* fProgramElements;
+ const Variable* fSkPerVertex = nullptr;
+ Variable* fRTAdjust;
+ Variable* fRTAdjustInterfaceBlock;
+ int fRTAdjustFieldIndex;
+ bool fStarted = false;
+
+ friend class AutoSymbolTable;
+ friend class AutoLoopLevel;
+ friend class AutoSwitchLevel;
+ friend class Compiler;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLLexer.cpp b/gfx/skia/skia/src/sksl/SkSLLexer.cpp
new file mode 100644
index 0000000000..2815408341
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLLexer.cpp
@@ -0,0 +1,1037 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+/*****************************************************************************************
+ ******************** This file was generated by sksllex. Do not edit. *******************
+ *****************************************************************************************/
+#include "src/sksl/SkSLLexer.h"
+
+namespace SkSL {
+
+static const uint8_t INVALID_CHAR = 18;
+static int8_t mappings[127] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 4, 3, 5, 6, 7, 8, 3, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 26, 26, 26, 27, 26, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 28, 6, 6, 6,
+ 29, 6, 6, 30, 3, 31, 32, 33, 3, 34, 35, 36, 37, 38, 39, 6, 40, 41, 6, 42, 43, 44,
+ 45, 46, 47, 6, 48, 49, 50, 51, 52, 53, 54, 55, 6, 56, 57, 58, 59};
+static int16_t transitions[60][304] = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 35, 36, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 44, 0, 0, 47, 0,
+ 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 26, 0, 0, 0, 0, 0, 32, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 44, 0, 0, 47, 0,
+ 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0,
+ 0, 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 38, 35, 37, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 40, 0, 0, 0, 0, 0, 0, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 33, 33, 33, 0, 35, 35, 0, 38, 0, 49, 42,
+ 42, 45, 45, 45, 48, 48, 48, 49, 52, 52, 52, 54, 54, 49, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 55, 0, 0, 0, 0, 0, 0, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 33, 33, 33, 0, 35, 35, 0, 38, 0, 49, 42,
+ 42, 45, 45, 45, 48, 48, 48, 49, 52, 52, 52, 54, 54, 49, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 63, 0, 0, 0, 6, 0, 0, 0, 0, 0, 12, 0, 16, 15, 0, 0, 0, 0, 20, 0, 23, 0, 0,
+ 0, 27, 0, 0, 0, 0, 0, 0, 0, 0, 39, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62, 61, 0, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, 0, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 299, 0, 301, 0, 0, 0,
+ },
+ {
+ 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 50, 46, 43, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 54, 54, 50, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 99, 10, 10, 10, 10, 10, 105, 10, 10, 10, 10, 10, 111, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 101, 10, 10, 10, 10, 10, 107, 10, 10, 10, 10, 10, 113, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 100, 10, 10, 10, 10, 10, 106, 10, 10, 10, 10, 10, 112, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 86, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 87, 10, 10, 10, 10, 10, 93, 10, 10,
+ 10, 10, 10, 102, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 238, 10, 10, 10, 242, 10, 10, 10, 10, 247,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 97, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 117, 10, 10, 10, 10, 10, 10, 10, 125, 10, 10, 10, 129, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 150, 10, 10,
+ 10, 10, 10, 10, 157, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 169, 10, 10,
+ 10, 10, 174, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 185, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 220, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 240, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 279, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 114, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 124, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 78, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 96, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 156, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 198, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 212, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 230, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 252, 10, 10, 10, 10, 10, 258, 10, 10, 10, 10, 263, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 147, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 159, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 221, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 245, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 161, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 50, 46, 43, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 54,
+ 54, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71,
+ 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10,
+ 91, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 116, 10, 10, 10, 10, 10, 122, 10, 10, 10, 10,
+ 127, 10, 10, 10, 10, 10, 10, 134, 10, 136, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 146, 10, 148, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 164, 10, 10, 10, 10, 10, 10, 10, 172, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 193, 10, 10, 10, 197, 10,
+ 10, 10, 10, 202, 10, 10, 10, 10, 10, 10, 10, 10, 211, 10, 10, 10, 10, 10,
+ 10, 10, 219, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 246, 10, 248, 10, 10, 251, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 268, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 283, 10, 10, 10, 10, 288,
+ 10, 10, 10, 292, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 168, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 54, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 73, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 120, 121, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 149, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 179, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 249, 250, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 272, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 79, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 133,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 239, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 264, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 285,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 178, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72, 71,
+ 71, 71, 71, 76, 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 89, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 103, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 143, 10,
+ 10, 10, 10, 154, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 200, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 213, 10, 215, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 229, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 244, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 261, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 271, 10, 10, 10, 10, 10, 10, 10, 10, 10, 281, 10, 10, 10, 10, 286, 10, 10,
+ 10, 290, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 118, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 237, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 184, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71,
+ 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10,
+ 10, 92, 10, 94, 10, 10, 10, 98, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 128, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 152, 10, 10, 10, 10, 10, 10, 10, 10, 10, 162,
+ 10, 10, 10, 10, 10, 10, 173, 170, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 204, 205, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 224, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 278, 10, 10, 10, 282, 10, 10, 10, 10, 287, 10,
+ 10, 10, 10, 10, 10, 10, 295, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 167, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 275, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 190, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 104, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 139,
+ 10, 10, 10, 137, 10, 10, 10, 10, 10, 10, 144, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 165, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 180, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 217, 10, 10, 10, 10, 10, 223, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 235, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 270, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 294, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 206, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71,
+ 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10,
+ 10, 10, 10, 10, 95, 10, 10, 10, 10, 10, 10, 10, 108, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 132, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 160, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 176, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 181, 10, 10, 10, 10, 10, 187, 10, 10, 10, 191, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 216,
+ 10, 10, 10, 10, 10, 222, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 273, 10, 10, 10, 277, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 293, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 209, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 88, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 192, 10, 10, 10, 196, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 218, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 115, 10, 10, 10, 10, 10, 10, 10, 123, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 135, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 158, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 177, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 194, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 210, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 228,
+ 10, 10, 10, 10, 10, 234, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 256, 10, 10, 10, 10, 10, 10, 10, 10, 10, 266,
+ 10, 10, 10, 10, 10, 10, 10, 274, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 289,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 236, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 71,
+ 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 126,
+ 10, 10, 10, 130, 131, 10, 10, 10, 10, 10, 10, 10, 10, 140, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 155, 10, 10, 10, 10, 10, 10, 10,
+ 163, 10, 10, 10, 10, 10, 10, 10, 171, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 195, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 214, 10, 10,
+ 10, 10, 10, 226, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 241, 10, 243, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 254, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 265, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35,
+ 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71,
+ 71, 71, 71, 71, 77, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 110, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 138, 10, 142, 141, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 153, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 175, 10, 10, 10, 10, 10,
+ 10, 10, 183, 10, 10, 10, 10, 10, 189, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 199, 10, 10, 10, 10, 10, 10, 10, 10, 208, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 232, 10, 10, 10, 10, 10, 10, 227, 10, 10, 10, 231, 10, 10, 10,
+ 10, 10, 255, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 253, 10, 10, 10, 10, 10, 259, 10, 10, 262, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 280, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 291, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 269, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 109, 10, 10, 10, 10, 10,
+ 119, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 145, 10, 10, 10, 10, 10, 151, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 166, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 182, 10, 10, 10, 10, 10, 188, 10, 10,
+ 203, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 207, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 233, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 257, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 267, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 276, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 201, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 284, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 75, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 260, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 53, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 0, 0, 0, 0,
+ 0, 0, 10, 10, 10, 90, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 9, 0, 0, 0, 0, 0, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0,
+ 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 71, 71, 71, 71, 71,
+ 71, 71, 71, 71, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 186, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 225, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 296, 10, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 297, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 298, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 300, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 302, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 35, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+};
+
+static int8_t accepts[304] = {
+ -1, -1, 95, 95, 98, 69, 75, 98, 43, 42, 42, 59, 84, 64, 68, 92, 89, 45, 46, 57, 82, 55,
+ 53, 80, 52, 56, 54, 81, 94, 51, 1, -1, -1, 1, 58, -1, -1, 97, 96, 83, 2, 1, 1, -1,
+ -1, 1, -1, -1, 1, 2, -1, -1, 1, -1, 2, 2, 72, 71, 93, 77, 60, 85, 79, 73, 74, 76,
+ 78, 61, 86, 70, 98, 44, 44, 6, 44, 44, 44, 44, 44, 12, 49, 50, 63, 88, 67, 91, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 34, 42, 42, 42, 42, 42, 35, 42, 42,
+ 42, 42, 42, 36, 42, 42, 42, 42, 15, 42, 42, 42, 42, 32, 42, 42, 42, 13, 42, 42, 42, 41,
+ 42, 42, 42, 42, 42, 42, 29, 42, 42, 24, 42, 42, 42, 42, 16, 42, 42, 42, 42, 42, 42, 14,
+ 42, 42, 42, 42, 42, 17, 10, 42, 42, 42, 7, 42, 42, 40, 42, 42, 42, 42, 4, 42, 42, 25,
+ 42, 8, 42, 5, 20, 42, 42, 22, 42, 42, 42, 42, 42, 38, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 26, 42, 42, 19, 42, 42, 21, 42, 42, 42, 42, 42, 42, 42, 42, 39, 42, 42,
+ 42, 42, 42, 42, 42, 27, 42, 42, 42, 42, 42, 31, 42, 42, 42, 18, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 33, 42, 42, 42, 42, 37, 42, 42, 42, 42,
+ 11, 42, 42, 42, 3, 42, 42, 42, 42, 42, 42, 23, 42, 42, 42, 42, 42, 42, 42, 30, 42, 42,
+ 42, 42, 9, 42, 42, 42, 42, 42, 42, 42, 28, 47, 62, 87, 66, 90, 48, 65,
+};
+
+Token Lexer::next() {
+ // note that we cheat here: normally a lexer needs to worry about the case
+ // where a token has a prefix which is not itself a valid token - for instance,
+ // maybe we have a valid token 'while', but 'w', 'wh', etc. are not valid
+ // tokens. Our grammar doesn't have this property, so we can simplify the logic
+ // a bit.
+ int32_t startOffset = fOffset;
+ if (startOffset == fLength) {
+ return Token(Token::END_OF_FILE, startOffset, 0);
+ }
+ int16_t state = 1;
+ for (;;) {
+ if (fOffset >= fLength) {
+ if (accepts[state] == -1) {
+ return Token(Token::END_OF_FILE, startOffset, 0);
+ }
+ break;
+ }
+ uint8_t c = (uint8_t)fText[fOffset];
+ if (c <= 8 || c >= 127) {
+ c = INVALID_CHAR;
+ }
+ int16_t newState = transitions[mappings[c]][state];
+ if (!newState) {
+ break;
+ }
+ state = newState;
+ ++fOffset;
+ }
+ Token::Kind kind = (Token::Kind)accepts[state];
+ return Token(kind, startOffset, fOffset - startOffset);
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLLexer.h b/gfx/skia/skia/src/sksl/SkSLLexer.h
new file mode 100644
index 0000000000..233ccf9509
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLLexer.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+/*****************************************************************************************
+ ******************** This file was generated by sksllex. Do not edit. *******************
+ *****************************************************************************************/
+#ifndef SKSL_Lexer
+#define SKSL_Lexer
+#include <cstddef>
+#include <cstdint>
+namespace SkSL {
+
+struct Token {
+ enum Kind {
+#undef END_OF_FILE
+ END_OF_FILE,
+#undef FLOAT_LITERAL
+ FLOAT_LITERAL,
+#undef INT_LITERAL
+ INT_LITERAL,
+#undef TRUE_LITERAL
+ TRUE_LITERAL,
+#undef FALSE_LITERAL
+ FALSE_LITERAL,
+#undef IF
+ IF,
+#undef STATIC_IF
+ STATIC_IF,
+#undef ELSE
+ ELSE,
+#undef FOR
+ FOR,
+#undef WHILE
+ WHILE,
+#undef DO
+ DO,
+#undef SWITCH
+ SWITCH,
+#undef STATIC_SWITCH
+ STATIC_SWITCH,
+#undef CASE
+ CASE,
+#undef DEFAULT
+ DEFAULT,
+#undef BREAK
+ BREAK,
+#undef CONTINUE
+ CONTINUE,
+#undef DISCARD
+ DISCARD,
+#undef RETURN
+ RETURN,
+#undef NULL_LITERAL
+ NULL_LITERAL,
+#undef IN
+ IN,
+#undef OUT
+ OUT,
+#undef INOUT
+ INOUT,
+#undef UNIFORM
+ UNIFORM,
+#undef CONST
+ CONST,
+#undef FLAT
+ FLAT,
+#undef NOPERSPECTIVE
+ NOPERSPECTIVE,
+#undef READONLY
+ READONLY,
+#undef WRITEONLY
+ WRITEONLY,
+#undef COHERENT
+ COHERENT,
+#undef VOLATILE
+ VOLATILE,
+#undef RESTRICT
+ RESTRICT,
+#undef BUFFER
+ BUFFER,
+#undef HASSIDEEFFECTS
+ HASSIDEEFFECTS,
+#undef PLS
+ PLS,
+#undef PLSIN
+ PLSIN,
+#undef PLSOUT
+ PLSOUT,
+#undef STRUCT
+ STRUCT,
+#undef LAYOUT
+ LAYOUT,
+#undef PRECISION
+ PRECISION,
+#undef ENUM
+ ENUM,
+#undef CLASS
+ CLASS,
+#undef IDENTIFIER
+ IDENTIFIER,
+#undef DIRECTIVE
+ DIRECTIVE,
+#undef SECTION
+ SECTION,
+#undef LPAREN
+ LPAREN,
+#undef RPAREN
+ RPAREN,
+#undef LBRACE
+ LBRACE,
+#undef RBRACE
+ RBRACE,
+#undef LBRACKET
+ LBRACKET,
+#undef RBRACKET
+ RBRACKET,
+#undef DOT
+ DOT,
+#undef COMMA
+ COMMA,
+#undef PLUSPLUS
+ PLUSPLUS,
+#undef MINUSMINUS
+ MINUSMINUS,
+#undef PLUS
+ PLUS,
+#undef MINUS
+ MINUS,
+#undef STAR
+ STAR,
+#undef SLASH
+ SLASH,
+#undef PERCENT
+ PERCENT,
+#undef SHL
+ SHL,
+#undef SHR
+ SHR,
+#undef BITWISEOR
+ BITWISEOR,
+#undef BITWISEXOR
+ BITWISEXOR,
+#undef BITWISEAND
+ BITWISEAND,
+#undef BITWISENOT
+ BITWISENOT,
+#undef LOGICALOR
+ LOGICALOR,
+#undef LOGICALXOR
+ LOGICALXOR,
+#undef LOGICALAND
+ LOGICALAND,
+#undef LOGICALNOT
+ LOGICALNOT,
+#undef QUESTION
+ QUESTION,
+#undef COLONCOLON
+ COLONCOLON,
+#undef COLON
+ COLON,
+#undef EQ
+ EQ,
+#undef EQEQ
+ EQEQ,
+#undef NEQ
+ NEQ,
+#undef GT
+ GT,
+#undef LT
+ LT,
+#undef GTEQ
+ GTEQ,
+#undef LTEQ
+ LTEQ,
+#undef PLUSEQ
+ PLUSEQ,
+#undef MINUSEQ
+ MINUSEQ,
+#undef STAREQ
+ STAREQ,
+#undef SLASHEQ
+ SLASHEQ,
+#undef PERCENTEQ
+ PERCENTEQ,
+#undef SHLEQ
+ SHLEQ,
+#undef SHREQ
+ SHREQ,
+#undef BITWISEOREQ
+ BITWISEOREQ,
+#undef BITWISEXOREQ
+ BITWISEXOREQ,
+#undef BITWISEANDEQ
+ BITWISEANDEQ,
+#undef LOGICALOREQ
+ LOGICALOREQ,
+#undef LOGICALXOREQ
+ LOGICALXOREQ,
+#undef LOGICALANDEQ
+ LOGICALANDEQ,
+#undef SEMICOLON
+ SEMICOLON,
+#undef ARROW
+ ARROW,
+#undef WHITESPACE
+ WHITESPACE,
+#undef LINE_COMMENT
+ LINE_COMMENT,
+#undef BLOCK_COMMENT
+ BLOCK_COMMENT,
+#undef INVALID
+ INVALID,
+ };
+
+ Token() : fKind(Kind::INVALID), fOffset(-1), fLength(-1) {}
+
+ Token(Kind kind, int32_t offset, int32_t length)
+ : fKind(kind), fOffset(offset), fLength(length) {}
+
+ Kind fKind;
+ int fOffset;
+ int fLength;
+};
+
+class Lexer {
+public:
+ void start(const char* text, int32_t length) {
+ fText = text;
+ fLength = length;
+ fOffset = 0;
+ }
+
+ Token next();
+
+private:
+ const char* fText;
+ int32_t fLength;
+ int32_t fOffset;
+};
+
+} // namespace
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMain.cpp b/gfx/skia/skia/src/sksl/SkSLMain.cpp
new file mode 100644
index 0000000000..b8009cddee
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMain.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <fstream>
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLFileOutputStream.h"
+
+// Given the path to a file (e.g. src/gpu/effects/GrFooFragmentProcessor.fp) and the expected
+// filename prefix and suffix (e.g. "Gr" and ".fp"), returns the "base name" of the
+// file (in this case, 'FooFragmentProcessor'). If no match, returns the empty string.
+static SkSL::String base_name(const char* fpPath, const char* prefix, const char* suffix) {
+ SkSL::String result;
+ const char* end = fpPath + strlen(fpPath);
+ const char* fileName = end;
+ // back up until we find a slash
+ while (fileName != fpPath && '/' != *(fileName - 1) && '\\' != *(fileName - 1)) {
+ --fileName;
+ }
+ if (!strncmp(fileName, prefix, strlen(prefix)) &&
+ !strncmp(end - strlen(suffix), suffix, strlen(suffix))) {
+ result.append(fileName + strlen(prefix), end - fileName - strlen(prefix) - strlen(suffix));
+ }
+ return result;
+}
+
+/**
+ * Very simple standalone executable to facilitate testing.
+ */
+int main(int argc, const char** argv) {
+ if (argc != 3) {
+ printf("usage: skslc <input> <output>\n");
+ exit(1);
+ }
+ SkSL::Program::Kind kind;
+ SkSL::String input(argv[1]);
+ if (input.endsWith(".vert")) {
+ kind = SkSL::Program::kVertex_Kind;
+ } else if (input.endsWith(".frag")) {
+ kind = SkSL::Program::kFragment_Kind;
+ } else if (input.endsWith(".geom")) {
+ kind = SkSL::Program::kGeometry_Kind;
+ } else if (input.endsWith(".fp")) {
+ kind = SkSL::Program::kFragmentProcessor_Kind;
+ } else if (input.endsWith(".stage")) {
+ kind = SkSL::Program::kPipelineStage_Kind;
+ } else {
+ printf("input filename must end in '.vert', '.frag', '.geom', '.fp', or '.stage'\n");
+ exit(1);
+ }
+
+ std::ifstream in(argv[1]);
+ std::string stdText((std::istreambuf_iterator<char>(in)),
+ std::istreambuf_iterator<char>());
+ SkSL::String text(stdText.c_str());
+ if (in.rdstate()) {
+ printf("error reading '%s'\n", argv[1]);
+ exit(2);
+ }
+ SkSL::Program::Settings settings;
+ settings.fArgs.insert(std::make_pair("gpImplementsDistanceVector", 1));
+ SkSL::String name(argv[2]);
+ if (name.endsWith(".spirv")) {
+ SkSL::FileOutputStream out(argv[2]);
+ SkSL::Compiler compiler;
+ if (!out.isValid()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ std::unique_ptr<SkSL::Program> program = compiler.convertProgram(kind, text, settings);
+ if (!program || !compiler.toSPIRV(*program, out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (!out.close()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ } else if (name.endsWith(".glsl")) {
+ SkSL::FileOutputStream out(argv[2]);
+ SkSL::Compiler compiler;
+ if (!out.isValid()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ std::unique_ptr<SkSL::Program> program = compiler.convertProgram(kind, text, settings);
+ if (!program || !compiler.toGLSL(*program, out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (!out.close()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ } else if (name.endsWith(".metal")) {
+ SkSL::FileOutputStream out(argv[2]);
+ SkSL::Compiler compiler;
+ if (!out.isValid()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ std::unique_ptr<SkSL::Program> program = compiler.convertProgram(kind, text, settings);
+ if (!program || !compiler.toMetal(*program, out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (!out.close()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ } else if (name.endsWith(".h")) {
+ SkSL::FileOutputStream out(argv[2]);
+ SkSL::Compiler compiler(SkSL::Compiler::kPermitInvalidStaticTests_Flag);
+ if (!out.isValid()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ settings.fReplaceSettings = false;
+ std::unique_ptr<SkSL::Program> program = compiler.convertProgram(kind, text, settings);
+ if (!program || !compiler.toH(*program, base_name(argv[1], "Gr", ".fp"), out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (!out.close()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ } else if (name.endsWith(".cpp")) {
+ SkSL::FileOutputStream out(argv[2]);
+ SkSL::Compiler compiler(SkSL::Compiler::kPermitInvalidStaticTests_Flag);
+ if (!out.isValid()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ settings.fReplaceSettings = false;
+ std::unique_ptr<SkSL::Program> program = compiler.convertProgram(kind, text, settings);
+ if (!program || !compiler.toCPP(*program, base_name(argv[1], "Gr", ".fp"), out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (!out.close()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+ } else {
+ printf("expected output filename to end with '.spirv', '.glsl', '.cpp', '.h', or '.metal'");
+ exit(1);
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h b/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h
new file mode 100644
index 0000000000..4b3227601d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKIASL_MEMORYLAYOUT
+#define SKIASL_MEMORYLAYOUT
+
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+class MemoryLayout {
+public:
+ enum Standard {
+ k140_Standard,
+ k430_Standard,
+ kMetal_Standard
+ };
+
+ MemoryLayout(Standard std)
+ : fStd(std) {}
+
+ static size_t vector_alignment(size_t componentSize, int columns) {
+ return componentSize * (columns + columns % 2);
+ }
+
+ /**
+ * Rounds up to the nearest multiple of 16 if in std140, otherwise returns the parameter
+ * unchanged (std140 requires various things to be rounded up to the nearest multiple of 16,
+ * std430 does not).
+ */
+ size_t roundUpIfNeeded(size_t raw) const {
+ switch (fStd) {
+ case k140_Standard: return (raw + 15) & ~15;
+ case k430_Standard: return raw;
+ case kMetal_Standard: return raw;
+ }
+ ABORT("unreachable");
+ }
+
+ /**
+ * Returns a type's required alignment when used as a standalone variable.
+ */
+ size_t alignment(const Type& type) const {
+ // See OpenGL Spec 7.6.2.2 Standard Uniform Block Layout
+ switch (type.kind()) {
+ case Type::kScalar_Kind:
+ return this->size(type);
+ case Type::kVector_Kind:
+ return vector_alignment(this->size(type.componentType()), type.columns());
+ case Type::kMatrix_Kind:
+ return this->roundUpIfNeeded(vector_alignment(this->size(type.componentType()),
+ type.rows()));
+ case Type::kArray_Kind:
+ return this->roundUpIfNeeded(this->alignment(type.componentType()));
+ case Type::kStruct_Kind: {
+ size_t result = 0;
+ for (const auto& f : type.fields()) {
+ size_t alignment = this->alignment(*f.fType);
+ if (alignment > result) {
+ result = alignment;
+ }
+ }
+ return this->roundUpIfNeeded(result);
+ }
+ default:
+ ABORT("cannot determine size of type %s", type.name().c_str());
+ }
+ }
+
+ /**
+ * For matrices and arrays, returns the number of bytes from the start of one entry (row, in
+ * the case of matrices) to the start of the next.
+ */
+ size_t stride(const Type& type) const {
+ switch (type.kind()) {
+ case Type::kMatrix_Kind: {
+ size_t base = vector_alignment(this->size(type.componentType()), type.rows());
+ return this->roundUpIfNeeded(base);
+ }
+ case Type::kArray_Kind: {
+ int align = this->alignment(type.componentType());
+ int stride = this->size(type.componentType()) + align - 1;
+ stride -= stride % align;
+ return this->roundUpIfNeeded(stride);
+ }
+ default:
+ ABORT("type does not have a stride");
+ }
+ }
+
+ /**
+ * Returns the size of a type in bytes.
+ */
+ size_t size(const Type& type) const {
+ switch (type.kind()) {
+ case Type::kScalar_Kind:
+ if (type.name() == "bool") {
+ return 1;
+ }
+ // FIXME need to take precision into account, once we figure out how we want to
+ // handle it...
+ return 4;
+ case Type::kVector_Kind:
+ if (fStd == kMetal_Standard && type.columns() == 3) {
+ return 4 * this->size(type.componentType());
+ }
+ return type.columns() * this->size(type.componentType());
+ case Type::kMatrix_Kind: // fall through
+ case Type::kArray_Kind:
+ return type.columns() * this->stride(type);
+ case Type::kStruct_Kind: {
+ size_t total = 0;
+ for (const auto& f : type.fields()) {
+ size_t alignment = this->alignment(*f.fType);
+ if (total % alignment != 0) {
+ total += alignment - total % alignment;
+ }
+ SkASSERT(total % alignment == 0);
+ total += this->size(*f.fType);
+ }
+ size_t alignment = this->alignment(type);
+ SkASSERT(!type.fields().size() ||
+ (0 == alignment % this->alignment(*type.fields()[0].fType)));
+ return (total + alignment - 1) & ~(alignment - 1);
+ }
+ default:
+ ABORT("cannot determine size of type %s", type.name().c_str());
+ }
+ }
+
+ const Standard fStd;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.cpp
new file mode 100644
index 0000000000..641e4c0435
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.cpp
@@ -0,0 +1,1702 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLMetalCodeGenerator.h"
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#ifdef SK_MOLTENVK
+ static const uint32_t MVKMagicNum = 0x19960412;
+#endif
+
+namespace SkSL {
+
+void MetalCodeGenerator::setupIntrinsics() {
+#define METAL(x) std::make_pair(kMetal_IntrinsicKind, k ## x ## _MetalIntrinsic)
+#define SPECIAL(x) std::make_pair(kSpecial_IntrinsicKind, k ## x ## _SpecialIntrinsic)
+ fIntrinsicMap[String("sample")] = SPECIAL(Texture);
+ fIntrinsicMap[String("mod")] = SPECIAL(Mod);
+ fIntrinsicMap[String("equal")] = METAL(Equal);
+ fIntrinsicMap[String("notEqual")] = METAL(NotEqual);
+ fIntrinsicMap[String("lessThan")] = METAL(LessThan);
+ fIntrinsicMap[String("lessThanEqual")] = METAL(LessThanEqual);
+ fIntrinsicMap[String("greaterThan")] = METAL(GreaterThan);
+ fIntrinsicMap[String("greaterThanEqual")] = METAL(GreaterThanEqual);
+}
+
+void MetalCodeGenerator::write(const char* s) {
+ if (!s[0]) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->writeText(s);
+ fAtLineStart = false;
+}
+
+void MetalCodeGenerator::writeLine(const char* s) {
+ this->write(s);
+ fOut->writeText(fLineEnding);
+ fAtLineStart = true;
+}
+
+void MetalCodeGenerator::write(const String& s) {
+ this->write(s.c_str());
+}
+
+void MetalCodeGenerator::writeLine(const String& s) {
+ this->writeLine(s.c_str());
+}
+
+void MetalCodeGenerator::writeLine() {
+ this->writeLine("");
+}
+
+void MetalCodeGenerator::writeExtension(const Extension& ext) {
+ this->writeLine("#extension " + ext.fName + " : enable");
+}
+
+void MetalCodeGenerator::writeType(const Type& type) {
+ switch (type.kind()) {
+ case Type::kStruct_Kind:
+ for (const Type* search : fWrittenStructs) {
+ if (*search == type) {
+ // already written
+ this->write(type.name());
+ return;
+ }
+ }
+ fWrittenStructs.push_back(&type);
+ this->writeLine("struct " + type.name() + " {");
+ fIndentation++;
+ this->writeFields(type.fields(), type.fOffset);
+ fIndentation--;
+ this->write("}");
+ break;
+ case Type::kVector_Kind:
+ this->writeType(type.componentType());
+ this->write(to_string(type.columns()));
+ break;
+ case Type::kMatrix_Kind:
+ this->writeType(type.componentType());
+ this->write(to_string(type.columns()));
+ this->write("x");
+ this->write(to_string(type.rows()));
+ break;
+ case Type::kSampler_Kind:
+ this->write("texture2d<float> "); // FIXME - support other texture types;
+ break;
+ default:
+ if (type == *fContext.fHalf_Type) {
+ // FIXME - Currently only supporting floats in MSL to avoid type coercion issues.
+ this->write(fContext.fFloat_Type->name());
+ } else if (type == *fContext.fByte_Type) {
+ this->write("char");
+ } else if (type == *fContext.fUByte_Type) {
+ this->write("uchar");
+ } else {
+ this->write(type.name());
+ }
+ }
+}
+
+void MetalCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
+ switch (expr.fKind) {
+ case Expression::kBinary_Kind:
+ this->writeBinaryExpression((BinaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kBoolLiteral_Kind:
+ this->writeBoolLiteral((BoolLiteral&) expr);
+ break;
+ case Expression::kConstructor_Kind:
+ this->writeConstructor((Constructor&) expr, parentPrecedence);
+ break;
+ case Expression::kIntLiteral_Kind:
+ this->writeIntLiteral((IntLiteral&) expr);
+ break;
+ case Expression::kFieldAccess_Kind:
+ this->writeFieldAccess(((FieldAccess&) expr));
+ break;
+ case Expression::kFloatLiteral_Kind:
+ this->writeFloatLiteral(((FloatLiteral&) expr));
+ break;
+ case Expression::kFunctionCall_Kind:
+ this->writeFunctionCall((FunctionCall&) expr);
+ break;
+ case Expression::kPrefix_Kind:
+ this->writePrefixExpression((PrefixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kPostfix_Kind:
+ this->writePostfixExpression((PostfixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kSetting_Kind:
+ this->writeSetting((Setting&) expr);
+ break;
+ case Expression::kSwizzle_Kind:
+ this->writeSwizzle((Swizzle&) expr);
+ break;
+ case Expression::kVariableReference_Kind:
+ this->writeVariableReference((VariableReference&) expr);
+ break;
+ case Expression::kTernary_Kind:
+ this->writeTernaryExpression((TernaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kIndex_Kind:
+ this->writeIndexExpression((IndexExpression&) expr);
+ break;
+ default:
+ ABORT("unsupported expression: %s", expr.description().c_str());
+ }
+}
+
+void MetalCodeGenerator::writeIntrinsicCall(const FunctionCall& c) {
+ auto i = fIntrinsicMap.find(c.fFunction.fName);
+ SkASSERT(i != fIntrinsicMap.end());
+ Intrinsic intrinsic = i->second;
+ int32_t intrinsicId = intrinsic.second;
+ switch (intrinsic.first) {
+ case kSpecial_IntrinsicKind:
+ return this->writeSpecialIntrinsic(c, (SpecialIntrinsic) intrinsicId);
+ break;
+ case kMetal_IntrinsicKind:
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ switch ((MetalIntrinsic) intrinsicId) {
+ case kEqual_MetalIntrinsic:
+ this->write(" == ");
+ break;
+ case kNotEqual_MetalIntrinsic:
+ this->write(" != ");
+ break;
+ case kLessThan_MetalIntrinsic:
+ this->write(" < ");
+ break;
+ case kLessThanEqual_MetalIntrinsic:
+ this->write(" <= ");
+ break;
+ case kGreaterThan_MetalIntrinsic:
+ this->write(" > ");
+ break;
+ case kGreaterThanEqual_MetalIntrinsic:
+ this->write(" >= ");
+ break;
+ default:
+ ABORT("unsupported metal intrinsic kind");
+ }
+ this->writeExpression(*c.fArguments[1], kSequence_Precedence);
+ break;
+ default:
+ ABORT("unsupported intrinsic kind");
+ }
+}
+
+void MetalCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ const auto& entry = fIntrinsicMap.find(c.fFunction.fName);
+ if (entry != fIntrinsicMap.end()) {
+ this->writeIntrinsicCall(c);
+ return;
+ }
+ if (c.fFunction.fBuiltin && "atan" == c.fFunction.fName && 2 == c.fArguments.size()) {
+ this->write("atan2");
+ } else if (c.fFunction.fBuiltin && "inversesqrt" == c.fFunction.fName) {
+ this->write("rsqrt");
+ } else if (c.fFunction.fBuiltin && "inverse" == c.fFunction.fName) {
+ SkASSERT(c.fArguments.size() == 1);
+ this->writeInverseHack(*c.fArguments[0]);
+ } else if (c.fFunction.fBuiltin && "dFdx" == c.fFunction.fName) {
+ this->write("dfdx");
+ } else if (c.fFunction.fBuiltin && "dFdy" == c.fFunction.fName) {
+ // Flipping Y also negates the Y derivatives.
+ this->write((fProgram.fSettings.fFlipY) ? "-dfdy" : "dfdy");
+ } else {
+ this->writeName(c.fFunction.fName);
+ }
+ this->write("(");
+ const char* separator = "";
+ if (this->requirements(c.fFunction) & kInputs_Requirement) {
+ this->write("_in");
+ separator = ", ";
+ }
+ if (this->requirements(c.fFunction) & kOutputs_Requirement) {
+ this->write(separator);
+ this->write("_out");
+ separator = ", ";
+ }
+ if (this->requirements(c.fFunction) & kUniforms_Requirement) {
+ this->write(separator);
+ this->write("_uniforms");
+ separator = ", ";
+ }
+ if (this->requirements(c.fFunction) & kGlobals_Requirement) {
+ this->write(separator);
+ this->write("_globals");
+ separator = ", ";
+ }
+ if (this->requirements(c.fFunction) & kFragCoord_Requirement) {
+ this->write(separator);
+ this->write("_fragCoord");
+ separator = ", ";
+ }
+ for (size_t i = 0; i < c.fArguments.size(); ++i) {
+ const Expression& arg = *c.fArguments[i];
+ this->write(separator);
+ separator = ", ";
+ if (c.fFunction.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ this->write("&");
+ }
+ this->writeExpression(arg, kSequence_Precedence);
+ }
+ this->write(")");
+}
+
+void MetalCodeGenerator::writeInverseHack(const Expression& mat) {
+ String typeName = mat.fType.name();
+ String name = typeName + "_inverse";
+ if (mat.fType == *fContext.fFloat2x2_Type || mat.fType == *fContext.fHalf2x2_Type) {
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ typeName + " " + name + "(" + typeName + " m) {"
+ " return float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]) * (1/determinant(m));"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat3x3_Type || mat.fType == *fContext.fHalf3x3_Type) {
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ typeName + " " + name + "(" + typeName + " m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2];"
+ " float b01 = a22 * a11 - a12 * a21;"
+ " float b11 = -a22 * a10 + a12 * a20;"
+ " float b21 = a21 * a10 - a11 * a20;"
+ " float det = a00 * b01 + a01 * b11 + a02 * b21;"
+ " return " + typeName +
+ " (b01, (-a22 * a01 + a02 * a21), (a12 * a01 - a02 * a11),"
+ " b11, (a22 * a00 - a02 * a20), (-a12 * a00 + a02 * a10),"
+ " b21, (-a21 * a00 + a01 * a20), (a11 * a00 - a01 * a10)) * "
+ " (1/det);"
+ "}"
+ ).c_str());
+ }
+ }
+ else if (mat.fType == *fContext.fFloat4x4_Type || mat.fType == *fContext.fHalf4x4_Type) {
+ if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
+ fWrittenIntrinsics.insert(name);
+ fExtraFunctions.writeText((
+ typeName + " " + name + "(" + typeName + " m) {"
+ " float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3];"
+ " float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3];"
+ " float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3];"
+ " float a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3];"
+ " float b00 = a00 * a11 - a01 * a10;"
+ " float b01 = a00 * a12 - a02 * a10;"
+ " float b02 = a00 * a13 - a03 * a10;"
+ " float b03 = a01 * a12 - a02 * a11;"
+ " float b04 = a01 * a13 - a03 * a11;"
+ " float b05 = a02 * a13 - a03 * a12;"
+ " float b06 = a20 * a31 - a21 * a30;"
+ " float b07 = a20 * a32 - a22 * a30;"
+ " float b08 = a20 * a33 - a23 * a30;"
+ " float b09 = a21 * a32 - a22 * a31;"
+ " float b10 = a21 * a33 - a23 * a31;"
+ " float b11 = a22 * a33 - a23 * a32;"
+ " float det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - "
+ " b04 * b07 + b05 * b06;"
+ " return " + typeName + "(a11 * b11 - a12 * b10 + a13 * b09,"
+ " a02 * b10 - a01 * b11 - a03 * b09,"
+ " a31 * b05 - a32 * b04 + a33 * b03,"
+ " a22 * b04 - a21 * b05 - a23 * b03,"
+ " a12 * b08 - a10 * b11 - a13 * b07,"
+ " a00 * b11 - a02 * b08 + a03 * b07,"
+ " a32 * b02 - a30 * b05 - a33 * b01,"
+ " a20 * b05 - a22 * b02 + a23 * b01,"
+ " a10 * b10 - a11 * b08 + a13 * b06,"
+ " a01 * b08 - a00 * b10 - a03 * b06,"
+ " a30 * b04 - a31 * b02 + a33 * b00,"
+ " a21 * b02 - a20 * b04 - a23 * b00,"
+ " a11 * b07 - a10 * b09 - a12 * b06,"
+ " a00 * b09 - a01 * b07 + a02 * b06,"
+ " a31 * b01 - a30 * b03 - a32 * b00,"
+ " a20 * b03 - a21 * b01 + a22 * b00) / det;"
+ "}"
+ ).c_str());
+ }
+ }
+ this->write(name);
+}
+
+void MetalCodeGenerator::writeSpecialIntrinsic(const FunctionCall & c, SpecialIntrinsic kind) {
+ switch (kind) {
+ case kTexture_SpecialIntrinsic:
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(".sample(");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(SAMPLER_SUFFIX);
+ this->write(", ");
+ this->writeExpression(*c.fArguments[1], kSequence_Precedence);
+ if (c.fArguments[1]->fType == *fContext.fFloat3_Type) {
+ this->write(".xy)"); // FIXME - add projection functionality
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat2_Type);
+ this->write(")");
+ }
+ break;
+ case kMod_SpecialIntrinsic:
+ // fmod(x, y) in metal calculates x - y * trunc(x / y) instead of x - y * floor(x / y)
+ this->write("((");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(") - (");
+ this->writeExpression(*c.fArguments[1], kSequence_Precedence);
+ this->write(") * floor((");
+ this->writeExpression(*c.fArguments[0], kSequence_Precedence);
+ this->write(") / (");
+ this->writeExpression(*c.fArguments[1], kSequence_Precedence);
+ this->write(")))");
+ break;
+ default:
+ ABORT("unsupported special intrinsic kind");
+ }
+}
+
+// If it hasn't already been written, writes a constructor for 'matrix' which takes a single value
+// of type 'arg'.
+String MetalCodeGenerator::getMatrixConstructHelper(const Type& matrix, const Type& arg) {
+ String key = matrix.name() + arg.name();
+ auto found = fHelpers.find(key);
+ if (found != fHelpers.end()) {
+ return found->second;
+ }
+ String name;
+ int columns = matrix.columns();
+ int rows = matrix.rows();
+ if (arg.isNumber()) {
+ // creating a matrix from a single scalar value
+ name = "float" + to_string(columns) + "x" + to_string(rows) + "_from_float";
+ fExtraFunctions.printf("float%dx%d %s(float x) {\n",
+ columns, rows, name.c_str());
+ fExtraFunctions.printf(" return float%dx%d(", columns, rows);
+ for (int i = 0; i < columns; ++i) {
+ if (i > 0) {
+ fExtraFunctions.writeText(", ");
+ }
+ fExtraFunctions.printf("float%d(", rows);
+ for (int j = 0; j < rows; ++j) {
+ if (j > 0) {
+ fExtraFunctions.writeText(", ");
+ }
+ if (i == j) {
+ fExtraFunctions.writeText("x");
+ } else {
+ fExtraFunctions.writeText("0");
+ }
+ }
+ fExtraFunctions.writeText(")");
+ }
+ fExtraFunctions.writeText(");\n}\n");
+ } else if (arg.kind() == Type::kMatrix_Kind) {
+ // creating a matrix from another matrix
+ int argColumns = arg.columns();
+ int argRows = arg.rows();
+ name = "float" + to_string(columns) + "x" + to_string(rows) + "_from_float" +
+ to_string(argColumns) + "x" + to_string(argRows);
+ fExtraFunctions.printf("float%dx%d %s(float%dx%d m) {\n",
+ columns, rows, name.c_str(), argColumns, argRows);
+ fExtraFunctions.printf(" return float%dx%d(", columns, rows);
+ for (int i = 0; i < columns; ++i) {
+ if (i > 0) {
+ fExtraFunctions.writeText(", ");
+ }
+ fExtraFunctions.printf("float%d(", rows);
+ for (int j = 0; j < rows; ++j) {
+ if (j > 0) {
+ fExtraFunctions.writeText(", ");
+ }
+ if (i < argColumns && j < argRows) {
+ fExtraFunctions.printf("m[%d][%d]", i, j);
+ } else {
+ fExtraFunctions.writeText("0");
+ }
+ }
+ fExtraFunctions.writeText(")");
+ }
+ fExtraFunctions.writeText(");\n}\n");
+ } else if (matrix.rows() == 2 && matrix.columns() == 2 && arg == *fContext.fFloat4_Type) {
+ // float2x2(float4) doesn't work, need to split it into float2x2(float2, float2)
+ name = "float2x2_from_float4";
+ fExtraFunctions.printf(
+ "float2x2 %s(float4 v) {\n"
+ " return float2x2(float2(v[0], v[1]), float2(v[2], v[3]));\n"
+ "}\n",
+ name.c_str()
+ );
+ } else {
+ SkASSERT(false);
+ name = "<error>";
+ }
+ fHelpers[key] = name;
+ return name;
+}
+
+bool MetalCodeGenerator::canCoerce(const Type& t1, const Type& t2) {
+ if (t1.columns() != t2.columns() || t1.rows() != t2.rows()) {
+ return false;
+ }
+ if (t1.columns() > 1) {
+ return this->canCoerce(t1.componentType(), t2.componentType());
+ }
+ return t1.isFloat() && t2.isFloat();
+}
+
+void MetalCodeGenerator::writeConstructor(const Constructor& c, Precedence parentPrecedence) {
+ if (c.fArguments.size() == 1 && this->canCoerce(c.fType, c.fArguments[0]->fType)) {
+ this->writeExpression(*c.fArguments[0], parentPrecedence);
+ return;
+ }
+ if (c.fType.kind() == Type::kMatrix_Kind && c.fArguments.size() == 1) {
+ const Expression& arg = *c.fArguments[0];
+ String name = this->getMatrixConstructHelper(c.fType, arg.fType);
+ this->write(name);
+ this->write("(");
+ this->writeExpression(arg, kSequence_Precedence);
+ this->write(")");
+ } else {
+ this->writeType(c.fType);
+ this->write("(");
+ const char* separator = "";
+ int scalarCount = 0;
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ if (Type::kMatrix_Kind == c.fType.kind() && arg->fType.columns() != c.fType.rows()) {
+ // merge scalars and smaller vectors together
+ if (!scalarCount) {
+ this->writeType(c.fType.componentType());
+ this->write(to_string(c.fType.rows()));
+ this->write("(");
+ }
+ scalarCount += arg->fType.columns();
+ }
+ this->writeExpression(*arg, kSequence_Precedence);
+ if (scalarCount && scalarCount == c.fType.rows()) {
+ this->write(")");
+ scalarCount = 0;
+ }
+ }
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writeFragCoord() {
+ if (fRTHeightName.length()) {
+ this->write("float4(_fragCoord.x, ");
+ this->write(fRTHeightName.c_str());
+ this->write(" - _fragCoord.y, 0.0, _fragCoord.w)");
+ } else {
+ this->write("float4(_fragCoord.x, _fragCoord.y, 0.0, _fragCoord.w)");
+ }
+}
+
+void MetalCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ switch (ref.fVariable.fModifiers.fLayout.fBuiltin) {
+ case SK_FRAGCOLOR_BUILTIN:
+ this->write("_out->sk_FragColor");
+ break;
+ case SK_FRAGCOORD_BUILTIN:
+ this->writeFragCoord();
+ break;
+ case SK_VERTEXID_BUILTIN:
+ this->write("sk_VertexID");
+ break;
+ case SK_INSTANCEID_BUILTIN:
+ this->write("sk_InstanceID");
+ break;
+ case SK_CLOCKWISE_BUILTIN:
+ // We'd set the front facing winding in the MTLRenderCommandEncoder to be counter
+ // clockwise to match Skia convention. This is also the default in MoltenVK.
+ this->write(fProgram.fSettings.fFlipY ? "_frontFacing" : "(!_frontFacing)");
+ break;
+ default:
+ if (Variable::kGlobal_Storage == ref.fVariable.fStorage) {
+ if (ref.fVariable.fModifiers.fFlags & Modifiers::kIn_Flag) {
+ this->write("_in.");
+ } else if (ref.fVariable.fModifiers.fFlags & Modifiers::kOut_Flag) {
+ this->write("_out->");
+ } else if (ref.fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag &&
+ ref.fVariable.fType.kind() != Type::kSampler_Kind) {
+ this->write("_uniforms.");
+ } else {
+ this->write("_globals->");
+ }
+ }
+ this->writeName(ref.fVariable.fName);
+ }
+}
+
+void MetalCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ this->writeExpression(*expr.fBase, kPostfix_Precedence);
+ this->write("[");
+ this->writeExpression(*expr.fIndex, kTopLevel_Precedence);
+ this->write("]");
+}
+
+void MetalCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ const Type::Field* field = &f.fBase->fType.fields()[f.fFieldIndex];
+ if (FieldAccess::kDefault_OwnerKind == f.fOwnerKind) {
+ this->writeExpression(*f.fBase, kPostfix_Precedence);
+ this->write(".");
+ }
+ switch (field->fModifiers.fLayout.fBuiltin) {
+ case SK_CLIPDISTANCE_BUILTIN:
+ this->write("gl_ClipDistance");
+ break;
+ case SK_POSITION_BUILTIN:
+ this->write("_out->sk_Position");
+ break;
+ default:
+ if (field->fName == "sk_PointSize") {
+ this->write("_out->sk_PointSize");
+ } else {
+ if (FieldAccess::kAnonymousInterfaceBlock_OwnerKind == f.fOwnerKind) {
+ this->write("_globals->");
+ this->write(fInterfaceBlockNameMap[fInterfaceBlockMap[field]]);
+ this->write("->");
+ }
+ this->writeName(field->fName);
+ }
+ }
+}
+
+void MetalCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ int last = swizzle.fComponents.back();
+ if (last == SKSL_SWIZZLE_0 || last == SKSL_SWIZZLE_1) {
+ this->writeType(swizzle.fType);
+ this->write("(");
+ }
+ this->writeExpression(*swizzle.fBase, kPostfix_Precedence);
+ this->write(".");
+ for (int c : swizzle.fComponents) {
+ if (c >= 0) {
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+ }
+ if (last == SKSL_SWIZZLE_0) {
+ this->write(", 0)");
+ }
+ else if (last == SKSL_SWIZZLE_1) {
+ this->write(", 1)");
+ }
+}
+
+MetalCodeGenerator::Precedence MetalCodeGenerator::GetBinaryPrecedence(Token::Kind op) {
+ switch (op) {
+ case Token::STAR: // fall through
+ case Token::SLASH: // fall through
+ case Token::PERCENT: return MetalCodeGenerator::kMultiplicative_Precedence;
+ case Token::PLUS: // fall through
+ case Token::MINUS: return MetalCodeGenerator::kAdditive_Precedence;
+ case Token::SHL: // fall through
+ case Token::SHR: return MetalCodeGenerator::kShift_Precedence;
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ: return MetalCodeGenerator::kRelational_Precedence;
+ case Token::EQEQ: // fall through
+ case Token::NEQ: return MetalCodeGenerator::kEquality_Precedence;
+ case Token::BITWISEAND: return MetalCodeGenerator::kBitwiseAnd_Precedence;
+ case Token::BITWISEXOR: return MetalCodeGenerator::kBitwiseXor_Precedence;
+ case Token::BITWISEOR: return MetalCodeGenerator::kBitwiseOr_Precedence;
+ case Token::LOGICALAND: return MetalCodeGenerator::kLogicalAnd_Precedence;
+ case Token::LOGICALXOR: return MetalCodeGenerator::kLogicalXor_Precedence;
+ case Token::LOGICALOR: return MetalCodeGenerator::kLogicalOr_Precedence;
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEOREQ: return MetalCodeGenerator::kAssignment_Precedence;
+ case Token::COMMA: return MetalCodeGenerator::kSequence_Precedence;
+ default: ABORT("unsupported binary operator");
+ }
+}
+
+void MetalCodeGenerator::writeMatrixTimesEqualHelper(const Type& left, const Type& right,
+ const Type& result) {
+ String key = "TimesEqual" + left.name() + right.name();
+ if (fHelpers.find(key) == fHelpers.end()) {
+ fExtraFunctions.printf("%s operator*=(thread %s& left, thread const %s& right) {\n"
+ " left = left * right;\n"
+ " return left;\n"
+ "}", result.name().c_str(), left.name().c_str(),
+ right.name().c_str());
+ }
+}
+
+void MetalCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ Precedence precedence = GetBinaryPrecedence(b.fOperator);
+ bool needParens = precedence >= parentPrecedence;
+ switch (b.fOperator) {
+ case Token::EQEQ:
+ if (b.fLeft->fType.kind() == Type::kVector_Kind) {
+ this->write("all");
+ needParens = true;
+ }
+ break;
+ case Token::NEQ:
+ if (b.fLeft->fType.kind() == Type::kVector_Kind) {
+ this->write("any");
+ needParens = true;
+ }
+ break;
+ default:
+ break;
+ }
+ if (needParens) {
+ this->write("(");
+ }
+ if (Compiler::IsAssignment(b.fOperator) &&
+ Expression::kVariableReference_Kind == b.fLeft->fKind &&
+ Variable::kParameter_Storage == ((VariableReference&) *b.fLeft).fVariable.fStorage &&
+ (((VariableReference&) *b.fLeft).fVariable.fModifiers.fFlags & Modifiers::kOut_Flag)) {
+ // writing to an out parameter. Since we have to turn those into pointers, we have to
+ // dereference it here.
+ this->write("*");
+ }
+ if (b.fOperator == Token::STAREQ && b.fLeft->fType.kind() == Type::kMatrix_Kind &&
+ b.fRight->fType.kind() == Type::kMatrix_Kind) {
+ this->writeMatrixTimesEqualHelper(b.fLeft->fType, b.fRight->fType, b.fType);
+ }
+ this->writeExpression(*b.fLeft, precedence);
+ if (b.fOperator != Token::EQ && Compiler::IsAssignment(b.fOperator) &&
+ Expression::kSwizzle_Kind == b.fLeft->fKind && !b.fLeft->hasSideEffects()) {
+ // This doesn't compile in Metal:
+ // float4 x = float4(1);
+ // x.xy *= float2x2(...);
+ // with the error message "non-const reference cannot bind to vector element",
+ // but switching it to x.xy = x.xy * float2x2(...) fixes it. We perform this tranformation
+ // as long as the LHS has no side effects, and hope for the best otherwise.
+ this->write(" = ");
+ this->writeExpression(*b.fLeft, kAssignment_Precedence);
+ this->write(" ");
+ String op = Compiler::OperatorName(b.fOperator);
+ SkASSERT(op.endsWith("="));
+ this->write(op.substr(0, op.size() - 1).c_str());
+ this->write(" ");
+ } else {
+ this->write(String(" ") + Compiler::OperatorName(b.fOperator) + " ");
+ }
+ this->writeExpression(*b.fRight, precedence);
+ if (needParens) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.fTest, kTernary_Precedence);
+ this->write(" ? ");
+ this->writeExpression(*t.fIfTrue, kTernary_Precedence);
+ this->write(" : ");
+ this->writeExpression(*t.fIfFalse, kTernary_Precedence);
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->write(Compiler::OperatorName(p.fOperator));
+ this->writeExpression(*p.fOperand, kPrefix_Precedence);
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.fOperand, kPostfix_Precedence);
+ this->write(Compiler::OperatorName(p.fOperator));
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ this->write(b.fValue ? "true" : "false");
+}
+
+void MetalCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ if (i.fType == *fContext.fUInt_Type) {
+ this->write(to_string(i.fValue & 0xffffffff) + "u");
+ } else {
+ this->write(to_string((int32_t) i.fValue));
+ }
+}
+
+void MetalCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ this->write(to_string(f.fValue));
+}
+
+void MetalCodeGenerator::writeSetting(const Setting& s) {
+ ABORT("internal error; setting was not folded to a constant during compilation\n");
+}
+
+void MetalCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ fRTHeightName = fProgram.fInputs.fRTHeight ? "_globals->_anonInterface0->u_skRTHeight" : "";
+ const char* separator = "";
+ if ("main" == f.fDeclaration.fName) {
+ switch (fProgram.fKind) {
+ case Program::kFragment_Kind:
+#ifdef SK_MOLTENVK
+ this->write("fragment Outputs main0");
+#else
+ this->write("fragment Outputs fragmentMain");
+#endif
+ break;
+ case Program::kVertex_Kind:
+#ifdef SK_MOLTENVK
+ this->write("vertex Outputs main0");
+#else
+ this->write("vertex Outputs vertexMain");
+#endif
+ break;
+ default:
+ SkASSERT(false);
+ }
+ this->write("(Inputs _in [[stage_in]]");
+ if (-1 != fUniformBuffer) {
+ this->write(", constant Uniforms& _uniforms [[buffer(" +
+ to_string(fUniformBuffer) + ")]]");
+ }
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kVar_Kind == e.fKind) {
+ VarDeclarations& decls = (VarDeclarations&) e;
+ if (!decls.fVars.size()) {
+ continue;
+ }
+ for (const auto& stmt: decls.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ if (var.fVar->fType.kind() == Type::kSampler_Kind) {
+ this->write(", texture2d<float> "); // FIXME - support other texture types
+ this->writeName(var.fVar->fName);
+ this->write("[[texture(");
+ this->write(to_string(var.fVar->fModifiers.fLayout.fBinding));
+ this->write(")]]");
+ this->write(", sampler ");
+ this->writeName(var.fVar->fName);
+ this->write(SAMPLER_SUFFIX);
+ this->write("[[sampler(");
+ this->write(to_string(var.fVar->fModifiers.fLayout.fBinding));
+ this->write(")]]");
+ }
+ }
+ } else if (ProgramElement::kInterfaceBlock_Kind == e.fKind) {
+ InterfaceBlock& intf = (InterfaceBlock&) e;
+ if ("sk_PerVertex" == intf.fTypeName) {
+ continue;
+ }
+ this->write(", constant ");
+ this->writeType(intf.fVariable.fType);
+ this->write("& " );
+ this->write(fInterfaceBlockNameMap[&intf]);
+ this->write(" [[buffer(");
+#ifdef SK_MOLTENVK
+ this->write(to_string(intf.fVariable.fModifiers.fLayout.fSet));
+#else
+ this->write(to_string(intf.fVariable.fModifiers.fLayout.fBinding));
+#endif
+ this->write(")]]");
+ }
+ }
+ if (fProgram.fKind == Program::kFragment_Kind) {
+ if (fProgram.fInputs.fRTHeight && fInterfaceBlockNameMap.empty()) {
+#ifdef SK_MOLTENVK
+ this->write(", constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(0)]]");
+#else
+ this->write(", constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(1)]]");
+#endif
+ fRTHeightName = "_anonInterface0.u_skRTHeight";
+ }
+ this->write(", bool _frontFacing [[front_facing]]");
+ this->write(", float4 _fragCoord [[position]]");
+ } else if (fProgram.fKind == Program::kVertex_Kind) {
+ this->write(", uint sk_VertexID [[vertex_id]], uint sk_InstanceID [[instance_id]]");
+ }
+ separator = ", ";
+ } else {
+ this->writeType(f.fDeclaration.fReturnType);
+ this->write(" ");
+ this->writeName(f.fDeclaration.fName);
+ this->write("(");
+ Requirements requirements = this->requirements(f.fDeclaration);
+ if (requirements & kInputs_Requirement) {
+ this->write("Inputs _in");
+ separator = ", ";
+ }
+ if (requirements & kOutputs_Requirement) {
+ this->write(separator);
+ this->write("thread Outputs* _out");
+ separator = ", ";
+ }
+ if (requirements & kUniforms_Requirement) {
+ this->write(separator);
+ this->write("Uniforms _uniforms");
+ separator = ", ";
+ }
+ if (requirements & kGlobals_Requirement) {
+ this->write(separator);
+ this->write("thread Globals* _globals");
+ separator = ", ";
+ }
+ if (requirements & kFragCoord_Requirement) {
+ this->write(separator);
+ this->write("float4 _fragCoord");
+ separator = ", ";
+ }
+ }
+ for (const auto& param : f.fDeclaration.fParameters) {
+ this->write(separator);
+ separator = ", ";
+ this->writeModifiers(param->fModifiers, false);
+ std::vector<int> sizes;
+ const Type* type = &param->fType;
+ while (Type::kArray_Kind == type->kind()) {
+ sizes.push_back(type->columns());
+ type = &type->componentType();
+ }
+ this->writeType(*type);
+ if (param->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ this->write("*");
+ }
+ this->write(" ");
+ this->writeName(param->fName);
+ for (int s : sizes) {
+ if (s <= 0) {
+ this->write("[]");
+ } else {
+ this->write("[" + to_string(s) + "]");
+ }
+ }
+ }
+ this->writeLine(") {");
+
+ SkASSERT(!fProgram.fSettings.fFragColorIsInOut);
+
+ if ("main" == f.fDeclaration.fName) {
+ if (fNeedsGlobalStructInit) {
+ this->writeLine(" Globals globalStruct;");
+ this->writeLine(" thread Globals* _globals = &globalStruct;");
+ for (const auto& intf: fInterfaceBlockNameMap) {
+ const auto& intfName = intf.second;
+ this->write(" _globals->");
+ this->writeName(intfName);
+ this->write(" = &");
+ this->writeName(intfName);
+ this->write(";\n");
+ }
+ for (const auto& var: fInitNonConstGlobalVars) {
+ this->write(" _globals->");
+ this->writeName(var->fVar->fName);
+ this->write(" = ");
+ this->writeVarInitializer(*var->fVar, *var->fValue);
+ this->writeLine(";");
+ }
+ for (const auto& texture: fTextures) {
+ this->write(" _globals->");
+ this->writeName(texture->fName);
+ this->write(" = ");
+ this->writeName(texture->fName);
+ this->write(";\n");
+ this->write(" _globals->");
+ this->writeName(texture->fName);
+ this->write(SAMPLER_SUFFIX);
+ this->write(" = ");
+ this->writeName(texture->fName);
+ this->write(SAMPLER_SUFFIX);
+ this->write(";\n");
+ }
+ }
+ this->writeLine(" Outputs _outputStruct;");
+ this->writeLine(" thread Outputs* _out = &_outputStruct;");
+ }
+ fFunctionHeader = "";
+ OutputStream* oldOut = fOut;
+ StringStream buffer;
+ fOut = &buffer;
+ fIndentation++;
+ this->writeStatements(((Block&) *f.fBody).fStatements);
+ if ("main" == f.fDeclaration.fName) {
+ switch (fProgram.fKind) {
+ case Program::kFragment_Kind:
+ this->writeLine("return *_out;");
+ break;
+ case Program::kVertex_Kind:
+ this->writeLine("_out->sk_Position.y = -_out->sk_Position.y;");
+ this->writeLine("return *_out;"); // FIXME - detect if function already has return
+ break;
+ default:
+ SkASSERT(false);
+ }
+ }
+ fIndentation--;
+ this->writeLine("}");
+
+ fOut = oldOut;
+ this->write(fFunctionHeader);
+ this->write(buffer.str());
+}
+
+void MetalCodeGenerator::writeModifiers(const Modifiers& modifiers,
+ bool globalContext) {
+ if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ this->write("thread ");
+ }
+ if (modifiers.fFlags & Modifiers::kConst_Flag) {
+ this->write("constant ");
+ }
+}
+
+void MetalCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ if ("sk_PerVertex" == intf.fTypeName) {
+ return;
+ }
+ this->writeModifiers(intf.fVariable.fModifiers, true);
+ this->write("struct ");
+ this->writeLine(intf.fTypeName + " {");
+ const Type* structType = &intf.fVariable.fType;
+ fWrittenStructs.push_back(structType);
+ while (Type::kArray_Kind == structType->kind()) {
+ structType = &structType->componentType();
+ }
+ fIndentation++;
+ writeFields(structType->fields(), structType->fOffset, &intf);
+ if (fProgram.fInputs.fRTHeight) {
+ this->writeLine("float u_skRTHeight;");
+ }
+ fIndentation--;
+ this->write("}");
+ if (intf.fInstanceName.size()) {
+ this->write(" ");
+ this->write(intf.fInstanceName);
+ for (const auto& size : intf.fSizes) {
+ this->write("[");
+ if (size) {
+ this->writeExpression(*size, kTopLevel_Precedence);
+ }
+ this->write("]");
+ }
+ fInterfaceBlockNameMap[&intf] = intf.fInstanceName;
+ } else {
+ fInterfaceBlockNameMap[&intf] = "_anonInterface" + to_string(fAnonInterfaceCount++);
+ }
+ this->writeLine(";");
+}
+
+void MetalCodeGenerator::writeFields(const std::vector<Type::Field>& fields, int parentOffset,
+ const InterfaceBlock* parentIntf) {
+#ifdef SK_MOLTENVK
+ MemoryLayout memoryLayout(MemoryLayout::k140_Standard);
+#else
+ MemoryLayout memoryLayout(MemoryLayout::kMetal_Standard);
+#endif
+ int currentOffset = 0;
+ for (const auto& field: fields) {
+ int fieldOffset = field.fModifiers.fLayout.fOffset;
+ const Type* fieldType = field.fType;
+ if (fieldOffset != -1) {
+ if (currentOffset > fieldOffset) {
+ fErrors.error(parentOffset,
+ "offset of field '" + field.fName + "' must be at least " +
+ to_string((int) currentOffset));
+ } else if (currentOffset < fieldOffset) {
+ this->write("char pad");
+ this->write(to_string(fPaddingCount++));
+ this->write("[");
+ this->write(to_string(fieldOffset - currentOffset));
+ this->writeLine("];");
+ currentOffset = fieldOffset;
+ }
+ int alignment = memoryLayout.alignment(*fieldType);
+ if (fieldOffset % alignment) {
+ fErrors.error(parentOffset,
+ "offset of field '" + field.fName + "' must be a multiple of " +
+ to_string((int) alignment));
+ }
+ }
+#ifdef SK_MOLTENVK
+ if (fieldType->kind() == Type::kVector_Kind &&
+ fieldType->columns() == 3) {
+ SkASSERT(memoryLayout.size(*fieldType) == 3);
+ // Pack all vec3 types so that their size in bytes will match what was expected in the
+ // original SkSL code since MSL has vec3 sizes equal to 4 * component type, while SkSL
+ // has vec3 equal to 3 * component type.
+
+ // FIXME - Packed vectors can't be accessed by swizzles, but can be indexed into. A
+ // combination of this being a problem which only occurs when using MoltenVK and the
+ // fact that we haven't swizzled a vec3 yet means that this problem hasn't been
+ // addressed.
+ this->write(PACKED_PREFIX);
+ }
+#endif
+ currentOffset += memoryLayout.size(*fieldType);
+ std::vector<int> sizes;
+ while (fieldType->kind() == Type::kArray_Kind) {
+ sizes.push_back(fieldType->columns());
+ fieldType = &fieldType->componentType();
+ }
+ this->writeModifiers(field.fModifiers, false);
+ this->writeType(*fieldType);
+ this->write(" ");
+ this->writeName(field.fName);
+ for (int s : sizes) {
+ if (s <= 0) {
+ this->write("[]");
+ } else {
+ this->write("[" + to_string(s) + "]");
+ }
+ }
+ this->writeLine(";");
+ if (parentIntf) {
+ fInterfaceBlockMap[&field] = parentIntf;
+ }
+ }
+}
+
+void MetalCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
+ this->writeExpression(value, kTopLevel_Precedence);
+}
+
+void MetalCodeGenerator::writeName(const String& name) {
+ if (fReservedWords.find(name) != fReservedWords.end()) {
+ this->write("_"); // adding underscore before name to avoid conflict with reserved words
+ }
+ this->write(name);
+}
+
+void MetalCodeGenerator::writeVarDeclarations(const VarDeclarations& decl, bool global) {
+ SkASSERT(decl.fVars.size() > 0);
+ bool wroteType = false;
+ for (const auto& stmt : decl.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ if (global && !(var.fVar->fModifiers.fFlags & Modifiers::kConst_Flag)) {
+ continue;
+ }
+ if (wroteType) {
+ this->write(", ");
+ } else {
+ this->writeModifiers(var.fVar->fModifiers, global);
+ this->writeType(decl.fBaseType);
+ this->write(" ");
+ wroteType = true;
+ }
+ this->writeName(var.fVar->fName);
+ for (const auto& size : var.fSizes) {
+ this->write("[");
+ if (size) {
+ this->writeExpression(*size, kTopLevel_Precedence);
+ }
+ this->write("]");
+ }
+ if (var.fValue) {
+ this->write(" = ");
+ this->writeVarInitializer(*var.fVar, *var.fValue);
+ }
+ }
+ if (wroteType) {
+ this->write(";");
+ }
+}
+
+void MetalCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, kTopLevel_Precedence);
+ this->write(";");
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration, false);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s);
+ break;
+ case Statement::kWhile_Kind:
+ this->writeWhileStatement((WhileStatement&) s);
+ break;
+ case Statement::kDo_Kind:
+ this->writeDoStatement((DoStatement&) s);
+ break;
+ case Statement::kSwitch_Kind:
+ this->writeSwitchStatement((SwitchStatement&) s);
+ break;
+ case Statement::kBreak_Kind:
+ this->write("break;");
+ break;
+ case Statement::kContinue_Kind:
+ this->write("continue;");
+ break;
+ case Statement::kDiscard_Kind:
+ this->write("discard_fragment();");
+ break;
+ case Statement::kNop_Kind:
+ this->write(";");
+ break;
+ default:
+ ABORT("unsupported statement: %s", s.description().c_str());
+ }
+}
+
+void MetalCodeGenerator::writeStatements(const std::vector<std::unique_ptr<Statement>>& statements) {
+ for (const auto& s : statements) {
+ if (!s->isEmpty()) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ }
+}
+
+void MetalCodeGenerator::writeBlock(const Block& b) {
+ this->writeLine("{");
+ fIndentation++;
+ this->writeStatements(b.fStatements);
+ fIndentation--;
+ this->write("}");
+}
+
+void MetalCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*stmt.fIfTrue);
+ if (stmt.fIfFalse) {
+ this->write(" else ");
+ this->writeStatement(*stmt.fIfFalse);
+ }
+}
+
+void MetalCodeGenerator::writeForStatement(const ForStatement& f) {
+ this->write("for (");
+ if (f.fInitializer && !f.fInitializer->isEmpty()) {
+ this->writeStatement(*f.fInitializer);
+ } else {
+ this->write("; ");
+ }
+ if (f.fTest) {
+ this->writeExpression(*f.fTest, kTopLevel_Precedence);
+ }
+ this->write("; ");
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, kTopLevel_Precedence);
+ }
+ this->write(") ");
+ this->writeStatement(*f.fStatement);
+}
+
+void MetalCodeGenerator::writeWhileStatement(const WhileStatement& w) {
+ this->write("while (");
+ this->writeExpression(*w.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*w.fStatement);
+}
+
+void MetalCodeGenerator::writeDoStatement(const DoStatement& d) {
+ this->write("do ");
+ this->writeStatement(*d.fStatement);
+ this->write(" while (");
+ this->writeExpression(*d.fTest, kTopLevel_Precedence);
+ this->write(");");
+}
+
+void MetalCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ this->write("switch (");
+ this->writeExpression(*s.fValue, kTopLevel_Precedence);
+ this->writeLine(") {");
+ fIndentation++;
+ for (const auto& c : s.fCases) {
+ if (c->fValue) {
+ this->write("case ");
+ this->writeExpression(*c->fValue, kTopLevel_Precedence);
+ this->writeLine(":");
+ } else {
+ this->writeLine("default:");
+ }
+ fIndentation++;
+ for (const auto& stmt : c->fStatements) {
+ this->writeStatement(*stmt);
+ this->writeLine();
+ }
+ fIndentation--;
+ }
+ fIndentation--;
+ this->write("}");
+}
+
+void MetalCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ this->write("return");
+ if (r.fExpression) {
+ this->write(" ");
+ this->writeExpression(*r.fExpression, kTopLevel_Precedence);
+ }
+ this->write(";");
+}
+
+void MetalCodeGenerator::writeHeader() {
+ this->write("#include <metal_stdlib>\n");
+ this->write("#include <simd/simd.h>\n");
+ this->write("using namespace metal;\n");
+}
+
+void MetalCodeGenerator::writeUniformStruct() {
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kVar_Kind == e.fKind) {
+ VarDeclarations& decls = (VarDeclarations&) e;
+ if (!decls.fVars.size()) {
+ continue;
+ }
+ const Variable& first = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if (first.fModifiers.fFlags & Modifiers::kUniform_Flag &&
+ first.fType.kind() != Type::kSampler_Kind) {
+ if (-1 == fUniformBuffer) {
+ this->write("struct Uniforms {\n");
+ fUniformBuffer = first.fModifiers.fLayout.fSet;
+ if (-1 == fUniformBuffer) {
+ fErrors.error(decls.fOffset, "Metal uniforms must have 'layout(set=...)'");
+ }
+ } else if (first.fModifiers.fLayout.fSet != fUniformBuffer) {
+ if (-1 == fUniformBuffer) {
+ fErrors.error(decls.fOffset, "Metal backend requires all uniforms to have "
+ "the same 'layout(set=...)'");
+ }
+ }
+ this->write(" ");
+ this->writeType(first.fType);
+ this->write(" ");
+ for (const auto& stmt : decls.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ this->writeName(var.fVar->fName);
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ if (-1 != fUniformBuffer) {
+ this->write("};\n");
+ }
+}
+
+void MetalCodeGenerator::writeInputStruct() {
+ this->write("struct Inputs {\n");
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kVar_Kind == e.fKind) {
+ VarDeclarations& decls = (VarDeclarations&) e;
+ if (!decls.fVars.size()) {
+ continue;
+ }
+ const Variable& first = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if (first.fModifiers.fFlags & Modifiers::kIn_Flag &&
+ -1 == first.fModifiers.fLayout.fBuiltin) {
+ this->write(" ");
+ this->writeType(first.fType);
+ this->write(" ");
+ for (const auto& stmt : decls.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ this->writeName(var.fVar->fName);
+ if (-1 != var.fVar->fModifiers.fLayout.fLocation) {
+ if (fProgram.fKind == Program::kVertex_Kind) {
+ this->write(" [[attribute(" +
+ to_string(var.fVar->fModifiers.fLayout.fLocation) + ")]]");
+ } else if (fProgram.fKind == Program::kFragment_Kind) {
+ this->write(" [[user(locn" +
+ to_string(var.fVar->fModifiers.fLayout.fLocation) + ")]]");
+ }
+ }
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ this->write("};\n");
+}
+
+void MetalCodeGenerator::writeOutputStruct() {
+ this->write("struct Outputs {\n");
+ if (fProgram.fKind == Program::kVertex_Kind) {
+ this->write(" float4 sk_Position [[position]];\n");
+ } else if (fProgram.fKind == Program::kFragment_Kind) {
+ this->write(" float4 sk_FragColor [[color(0)]];\n");
+ }
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kVar_Kind == e.fKind) {
+ VarDeclarations& decls = (VarDeclarations&) e;
+ if (!decls.fVars.size()) {
+ continue;
+ }
+ const Variable& first = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if (first.fModifiers.fFlags & Modifiers::kOut_Flag &&
+ -1 == first.fModifiers.fLayout.fBuiltin) {
+ this->write(" ");
+ this->writeType(first.fType);
+ this->write(" ");
+ for (const auto& stmt : decls.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ this->writeName(var.fVar->fName);
+ if (fProgram.fKind == Program::kVertex_Kind) {
+ this->write(" [[user(locn" +
+ to_string(var.fVar->fModifiers.fLayout.fLocation) + ")]]");
+ } else if (fProgram.fKind == Program::kFragment_Kind) {
+ this->write(" [[color(" +
+ to_string(var.fVar->fModifiers.fLayout.fLocation) +")");
+ int colorIndex = var.fVar->fModifiers.fLayout.fIndex;
+ if (colorIndex) {
+ this->write(", index(" + to_string(colorIndex) + ")");
+ }
+ this->write("]]");
+ }
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ if (fProgram.fKind == Program::kVertex_Kind) {
+ this->write(" float sk_PointSize;\n");
+ }
+ this->write("};\n");
+}
+
+void MetalCodeGenerator::writeInterfaceBlocks() {
+ bool wroteInterfaceBlock = false;
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kInterfaceBlock_Kind == e.fKind) {
+ this->writeInterfaceBlock((InterfaceBlock&) e);
+ wroteInterfaceBlock = true;
+ }
+ }
+ if (!wroteInterfaceBlock && fProgram.fInputs.fRTHeight) {
+ this->writeLine("struct sksl_synthetic_uniforms {");
+ this->writeLine(" float u_skRTHeight;");
+ this->writeLine("};");
+ }
+}
+
+void MetalCodeGenerator::writeGlobalStruct() {
+ bool wroteStructDecl = false;
+ for (const auto& intf : fInterfaceBlockNameMap) {
+ if (!wroteStructDecl) {
+ this->write("struct Globals {\n");
+ wroteStructDecl = true;
+ }
+ fNeedsGlobalStructInit = true;
+ const auto& intfType = intf.first;
+ const auto& intfName = intf.second;
+ this->write(" constant ");
+ this->write(intfType->fTypeName);
+ this->write("* ");
+ this->writeName(intfName);
+ this->write(";\n");
+ }
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kVar_Kind == e.fKind) {
+ VarDeclarations& decls = (VarDeclarations&) e;
+ if (!decls.fVars.size()) {
+ continue;
+ }
+ const Variable& first = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if ((!first.fModifiers.fFlags && -1 == first.fModifiers.fLayout.fBuiltin) ||
+ first.fType.kind() == Type::kSampler_Kind) {
+ if (!wroteStructDecl) {
+ this->write("struct Globals {\n");
+ wroteStructDecl = true;
+ }
+ fNeedsGlobalStructInit = true;
+ this->write(" ");
+ this->writeType(first.fType);
+ this->write(" ");
+ for (const auto& stmt : decls.fVars) {
+ VarDeclaration& var = (VarDeclaration&) *stmt;
+ this->writeName(var.fVar->fName);
+ if (var.fVar->fType.kind() == Type::kSampler_Kind) {
+ fTextures.push_back(var.fVar);
+ this->write(";\n");
+ this->write(" sampler ");
+ this->writeName(var.fVar->fName);
+ this->write(SAMPLER_SUFFIX);
+ }
+ if (var.fValue) {
+ fInitNonConstGlobalVars.push_back(&var);
+ }
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ if (wroteStructDecl) {
+ this->write("};\n");
+ }
+}
+
+void MetalCodeGenerator::writeProgramElement(const ProgramElement& e) {
+ switch (e.fKind) {
+ case ProgramElement::kExtension_Kind:
+ break;
+ case ProgramElement::kVar_Kind: {
+ VarDeclarations& decl = (VarDeclarations&) e;
+ if (decl.fVars.size() > 0) {
+ int builtin = ((VarDeclaration&) *decl.fVars[0]).fVar->fModifiers.fLayout.fBuiltin;
+ if (-1 == builtin) {
+ // normal var
+ this->writeVarDeclarations(decl, true);
+ this->writeLine();
+ } else if (SK_FRAGCOLOR_BUILTIN == builtin) {
+ // ignore
+ }
+ }
+ break;
+ }
+ case ProgramElement::kInterfaceBlock_Kind:
+ // handled in writeInterfaceBlocks, do nothing
+ break;
+ case ProgramElement::kFunction_Kind:
+ this->writeFunction((FunctionDefinition&) e);
+ break;
+ case ProgramElement::kModifiers_Kind:
+ this->writeModifiers(((ModifiersDeclaration&) e).fModifiers, true);
+ this->writeLine(";");
+ break;
+ default:
+ printf("%s\n", e.description().c_str());
+ ABORT("unsupported program element");
+ }
+}
+
+MetalCodeGenerator::Requirements MetalCodeGenerator::requirements(const Expression& e) {
+ switch (e.fKind) {
+ case Expression::kFunctionCall_Kind: {
+ const FunctionCall& f = (const FunctionCall&) e;
+ Requirements result = this->requirements(f.fFunction);
+ for (const auto& e : f.fArguments) {
+ result |= this->requirements(*e);
+ }
+ return result;
+ }
+ case Expression::kConstructor_Kind: {
+ const Constructor& c = (const Constructor&) e;
+ Requirements result = kNo_Requirements;
+ for (const auto& e : c.fArguments) {
+ result |= this->requirements(*e);
+ }
+ return result;
+ }
+ case Expression::kFieldAccess_Kind: {
+ const FieldAccess& f = (const FieldAccess&) e;
+ if (FieldAccess::kAnonymousInterfaceBlock_OwnerKind == f.fOwnerKind) {
+ return kGlobals_Requirement;
+ }
+ return this->requirements(*((const FieldAccess&) e).fBase);
+ }
+ case Expression::kSwizzle_Kind:
+ return this->requirements(*((const Swizzle&) e).fBase);
+ case Expression::kBinary_Kind: {
+ const BinaryExpression& b = (const BinaryExpression&) e;
+ return this->requirements(*b.fLeft) | this->requirements(*b.fRight);
+ }
+ case Expression::kIndex_Kind: {
+ const IndexExpression& idx = (const IndexExpression&) e;
+ return this->requirements(*idx.fBase) | this->requirements(*idx.fIndex);
+ }
+ case Expression::kPrefix_Kind:
+ return this->requirements(*((const PrefixExpression&) e).fOperand);
+ case Expression::kPostfix_Kind:
+ return this->requirements(*((const PostfixExpression&) e).fOperand);
+ case Expression::kTernary_Kind: {
+ const TernaryExpression& t = (const TernaryExpression&) e;
+ return this->requirements(*t.fTest) | this->requirements(*t.fIfTrue) |
+ this->requirements(*t.fIfFalse);
+ }
+ case Expression::kVariableReference_Kind: {
+ const VariableReference& v = (const VariableReference&) e;
+ Requirements result = kNo_Requirements;
+ if (v.fVariable.fModifiers.fLayout.fBuiltin == SK_FRAGCOORD_BUILTIN) {
+ result = kGlobals_Requirement | kFragCoord_Requirement;
+ } else if (Variable::kGlobal_Storage == v.fVariable.fStorage) {
+ if (v.fVariable.fModifiers.fFlags & Modifiers::kIn_Flag) {
+ result = kInputs_Requirement;
+ } else if (v.fVariable.fModifiers.fFlags & Modifiers::kOut_Flag) {
+ result = kOutputs_Requirement;
+ } else if (v.fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag &&
+ v.fVariable.fType.kind() != Type::kSampler_Kind) {
+ result = kUniforms_Requirement;
+ } else {
+ result = kGlobals_Requirement;
+ }
+ }
+ return result;
+ }
+ default:
+ return kNo_Requirements;
+ }
+}
+
+MetalCodeGenerator::Requirements MetalCodeGenerator::requirements(const Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind: {
+ Requirements result = kNo_Requirements;
+ for (const auto& child : ((const Block&) s).fStatements) {
+ result |= this->requirements(*child);
+ }
+ return result;
+ }
+ case Statement::kVarDeclaration_Kind: {
+ Requirements result = kNo_Requirements;
+ const VarDeclaration& var = (const VarDeclaration&) s;
+ if (var.fValue) {
+ result = this->requirements(*var.fValue);
+ }
+ return result;
+ }
+ case Statement::kVarDeclarations_Kind: {
+ Requirements result = kNo_Requirements;
+ const VarDeclarations& decls = *((const VarDeclarationsStatement&) s).fDeclaration;
+ for (const auto& stmt : decls.fVars) {
+ result |= this->requirements(*stmt);
+ }
+ return result;
+ }
+ case Statement::kExpression_Kind:
+ return this->requirements(*((const ExpressionStatement&) s).fExpression);
+ case Statement::kReturn_Kind: {
+ const ReturnStatement& r = (const ReturnStatement&) s;
+ if (r.fExpression) {
+ return this->requirements(*r.fExpression);
+ }
+ return kNo_Requirements;
+ }
+ case Statement::kIf_Kind: {
+ const IfStatement& i = (const IfStatement&) s;
+ return this->requirements(*i.fTest) |
+ this->requirements(*i.fIfTrue) |
+ (i.fIfFalse ? this->requirements(*i.fIfFalse) : 0);
+ }
+ case Statement::kFor_Kind: {
+ const ForStatement& f = (const ForStatement&) s;
+ return this->requirements(*f.fInitializer) |
+ this->requirements(*f.fTest) |
+ this->requirements(*f.fNext) |
+ this->requirements(*f.fStatement);
+ }
+ case Statement::kWhile_Kind: {
+ const WhileStatement& w = (const WhileStatement&) s;
+ return this->requirements(*w.fTest) |
+ this->requirements(*w.fStatement);
+ }
+ case Statement::kDo_Kind: {
+ const DoStatement& d = (const DoStatement&) s;
+ return this->requirements(*d.fTest) |
+ this->requirements(*d.fStatement);
+ }
+ case Statement::kSwitch_Kind: {
+ const SwitchStatement& sw = (const SwitchStatement&) s;
+ Requirements result = this->requirements(*sw.fValue);
+ for (const auto& c : sw.fCases) {
+ for (const auto& st : c->fStatements) {
+ result |= this->requirements(*st);
+ }
+ }
+ return result;
+ }
+ default:
+ return kNo_Requirements;
+ }
+}
+
+MetalCodeGenerator::Requirements MetalCodeGenerator::requirements(const FunctionDeclaration& f) {
+ if (f.fBuiltin) {
+ return kNo_Requirements;
+ }
+ auto found = fRequirements.find(&f);
+ if (found == fRequirements.end()) {
+ fRequirements[&f] = kNo_Requirements;
+ for (const auto& e : fProgram) {
+ if (ProgramElement::kFunction_Kind == e.fKind) {
+ const FunctionDefinition& def = (const FunctionDefinition&) e;
+ if (&def.fDeclaration == &f) {
+ Requirements reqs = this->requirements(*def.fBody);
+ fRequirements[&f] = reqs;
+ return reqs;
+ }
+ }
+ }
+ }
+ return found->second;
+}
+
+bool MetalCodeGenerator::generateCode() {
+ OutputStream* rawOut = fOut;
+ fOut = &fHeader;
+#ifdef SK_MOLTENVK
+ fOut->write((const char*) &MVKMagicNum, sizeof(MVKMagicNum));
+#endif
+ fProgramKind = fProgram.fKind;
+ this->writeHeader();
+ this->writeUniformStruct();
+ this->writeInputStruct();
+ this->writeOutputStruct();
+ this->writeInterfaceBlocks();
+ this->writeGlobalStruct();
+ StringStream body;
+ fOut = &body;
+ for (const auto& e : fProgram) {
+ this->writeProgramElement(e);
+ }
+ fOut = rawOut;
+
+ write_stringstream(fHeader, *rawOut);
+ write_stringstream(fExtraFunctions, *rawOut);
+ write_stringstream(body, *rawOut);
+#ifdef SK_MOLTENVK
+ this->write("\0");
+#endif
+ return true;
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.h
new file mode 100644
index 0000000000..ad7ea1e949
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMetalCodeGenerator.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_METALCODEGENERATOR
+#define SKSL_METALCODEGENERATOR
+
+#include <stack>
+#include <tuple>
+#include <unordered_map>
+
+#include "src/sksl/SkSLCodeGenerator.h"
+#include "src/sksl/SkSLMemoryLayout.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+/**
+ * Converts a Program into Metal code.
+ */
+class MetalCodeGenerator : public CodeGenerator {
+public:
+ static constexpr const char* SAMPLER_SUFFIX = "Smplr";
+ static constexpr const char* PACKED_PREFIX = "packed_";
+
+ enum Precedence {
+ kParentheses_Precedence = 1,
+ kPostfix_Precedence = 2,
+ kPrefix_Precedence = 3,
+ kMultiplicative_Precedence = 4,
+ kAdditive_Precedence = 5,
+ kShift_Precedence = 6,
+ kRelational_Precedence = 7,
+ kEquality_Precedence = 8,
+ kBitwiseAnd_Precedence = 9,
+ kBitwiseXor_Precedence = 10,
+ kBitwiseOr_Precedence = 11,
+ kLogicalAnd_Precedence = 12,
+ kLogicalXor_Precedence = 13,
+ kLogicalOr_Precedence = 14,
+ kTernary_Precedence = 15,
+ kAssignment_Precedence = 16,
+ kSequence_Precedence = 17,
+ kTopLevel_Precedence = kSequence_Precedence
+ };
+
+ MetalCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ OutputStream* out)
+ : INHERITED(program, errors, out)
+ , fReservedWords({"atan2", "rsqrt", "dfdx", "dfdy", "vertex", "fragment"})
+ , fLineEnding("\n")
+ , fContext(*context) {
+ this->setupIntrinsics();
+ }
+
+ bool generateCode() override;
+
+protected:
+ typedef int Requirements;
+ static constexpr Requirements kNo_Requirements = 0;
+ static constexpr Requirements kInputs_Requirement = 1 << 0;
+ static constexpr Requirements kOutputs_Requirement = 1 << 1;
+ static constexpr Requirements kUniforms_Requirement = 1 << 2;
+ static constexpr Requirements kGlobals_Requirement = 1 << 3;
+ static constexpr Requirements kFragCoord_Requirement = 1 << 4;
+
+ enum IntrinsicKind {
+ kSpecial_IntrinsicKind,
+ kMetal_IntrinsicKind,
+ };
+
+ enum SpecialIntrinsic {
+ kTexture_SpecialIntrinsic,
+ kMod_SpecialIntrinsic,
+ };
+
+ enum MetalIntrinsic {
+ kEqual_MetalIntrinsic,
+ kNotEqual_MetalIntrinsic,
+ kLessThan_MetalIntrinsic,
+ kLessThanEqual_MetalIntrinsic,
+ kGreaterThan_MetalIntrinsic,
+ kGreaterThanEqual_MetalIntrinsic,
+ };
+
+ void setupIntrinsics();
+
+ void write(const char* s);
+
+ void writeLine();
+
+ void writeLine(const char* s);
+
+ void write(const String& s);
+
+ void writeLine(const String& s);
+
+ void writeHeader();
+
+ void writeUniformStruct();
+
+ void writeInputStruct();
+
+ void writeOutputStruct();
+
+ void writeInterfaceBlocks();
+
+ void writeFields(const std::vector<Type::Field>& fields, int parentOffset,
+ const InterfaceBlock* parentIntf = nullptr);
+
+ int size(const Type* type, bool isPacked) const;
+
+ int alignment(const Type* type, bool isPacked) const;
+
+ void writeGlobalStruct();
+
+ void writePrecisionModifier();
+
+ void writeType(const Type& type);
+
+ void writeExtension(const Extension& ext);
+
+ void writeInterfaceBlock(const InterfaceBlock& intf);
+
+ void writeFunctionStart(const FunctionDeclaration& f);
+
+ void writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ void writeFunction(const FunctionDefinition& f);
+
+ void writeLayout(const Layout& layout);
+
+ void writeModifiers(const Modifiers& modifiers, bool globalContext);
+
+ void writeGlobalVars(const VarDeclaration& vs);
+
+ void writeVarInitializer(const Variable& var, const Expression& value);
+
+ void writeName(const String& name);
+
+ void writeVarDeclarations(const VarDeclarations& decl, bool global);
+
+ void writeFragCoord();
+
+ void writeVariableReference(const VariableReference& ref);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+
+ void writeIntrinsicCall(const FunctionCall& c);
+
+ void writeMinAbsHack(Expression& absExpr, Expression& otherExpr);
+
+ void writeFunctionCall(const FunctionCall& c);
+
+ void writeInverseHack(const Expression& mat);
+
+ String getMatrixConstructHelper(const Type& matrix, const Type& arg);
+
+ void writeMatrixTimesEqualHelper(const Type& left, const Type& right, const Type& result);
+
+ void writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind);
+
+ bool canCoerce(const Type& t1, const Type& t2);
+
+ void writeConstructor(const Constructor& c, Precedence parentPrecedence);
+
+ void writeFieldAccess(const FieldAccess& f);
+
+ void writeSwizzle(const Swizzle& swizzle);
+
+ static Precedence GetBinaryPrecedence(Token::Kind op);
+
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+
+ void writeIndexExpression(const IndexExpression& expr);
+
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+
+ void writeBoolLiteral(const BoolLiteral& b);
+
+ void writeIntLiteral(const IntLiteral& i);
+
+ void writeFloatLiteral(const FloatLiteral& f);
+
+ void writeSetting(const Setting& s);
+
+ void writeStatement(const Statement& s);
+
+ void writeStatements(const std::vector<std::unique_ptr<Statement>>& statements);
+
+ void writeBlock(const Block& b);
+
+ void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeWhileStatement(const WhileStatement& w);
+
+ void writeDoStatement(const DoStatement& d);
+
+ void writeSwitchStatement(const SwitchStatement& s);
+
+ void writeReturnStatement(const ReturnStatement& r);
+
+ void writeProgramElement(const ProgramElement& e);
+
+ Requirements requirements(const FunctionDeclaration& f);
+
+ Requirements requirements(const Expression& e);
+
+ Requirements requirements(const Statement& e);
+
+ typedef std::pair<IntrinsicKind, int32_t> Intrinsic;
+ std::unordered_map<String, Intrinsic> fIntrinsicMap;
+ std::unordered_set<String> fReservedWords;
+ std::vector<const VarDeclaration*> fInitNonConstGlobalVars;
+ std::vector<const Variable*> fTextures;
+ std::unordered_map<const Type::Field*, const InterfaceBlock*> fInterfaceBlockMap;
+ std::unordered_map<const InterfaceBlock*, String> fInterfaceBlockNameMap;
+ int fAnonInterfaceCount = 0;
+ int fPaddingCount = 0;
+ bool fNeedsGlobalStructInit = false;
+ const char* fLineEnding;
+ const Context& fContext;
+ StringStream fHeader;
+ String fFunctionHeader;
+ StringStream fExtraFunctions;
+ Program::Kind fProgramKind;
+ int fVarCount = 0;
+ int fIndentation = 0;
+ bool fAtLineStart = false;
+ // Keeps track of which struct types we have written. Given that we are unlikely to ever write
+ // more than one or two structs per shader, a simple linear search will be faster than anything
+ // fancier.
+ std::vector<const Type*> fWrittenStructs;
+ std::set<String> fWrittenIntrinsics;
+ // true if we have run into usages of dFdx / dFdy
+ bool fFoundDerivatives = false;
+ std::unordered_map<const FunctionDeclaration*, Requirements> fRequirements;
+ bool fSetupFragPositionGlobal = false;
+ bool fSetupFragPositionLocal = false;
+ std::unordered_map<String, String> fHelpers;
+ int fUniformBuffer = -1;
+ String fRTHeightName;
+
+ typedef CodeGenerator INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp b/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp
new file mode 100644
index 0000000000..f72637d4cb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLOutputStream.h"
+
+namespace SkSL {
+
+void OutputStream::writeString(String s) {
+ this->write(s.c_str(), s.size());
+}
+
+void OutputStream::printf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ this->appendVAList(format, args);
+ va_end(args);
+}
+
+void OutputStream::appendVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ int length = vsnprintf(buffer, kBufferSize, format, args);
+ SkASSERT(length >= 0 && length < (int) kBufferSize);
+ this->write(buffer, length);
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLOutputStream.h b/gfx/skia/skia/src/sksl/SkSLOutputStream.h
new file mode 100644
index 0000000000..c7e979b273
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLOutputStream.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_OUTPUTSTREAM
+#define SKSL_OUTPUTSTREAM
+
+#include "src/sksl/SkSLDefines.h"
+#include "src/sksl/SkSLString.h"
+
+namespace SkSL {
+
+class OutputStream {
+public:
+ virtual bool isValid() const {
+ return true;
+ }
+
+ virtual void write8(uint8_t b) = 0;
+
+ virtual void writeText(const char* s) = 0;
+
+ virtual void write(const void* s, size_t size) = 0;
+
+ void writeString(String s);
+
+ void printf(const char format[], ...) SKSL_PRINTF_LIKE(2, 3);
+
+ void appendVAList(const char format[], va_list args);
+
+ virtual ~OutputStream() {}
+
+private:
+ static const int kBufferSize = 1024;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.cpp b/gfx/skia/skia/src/sksl/SkSLParser.cpp
new file mode 100644
index 0000000000..ddca218c38
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.cpp
@@ -0,0 +1,2150 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "stdio.h"
+#include "src/sksl/SkSLASTNode.h"
+#include "src/sksl/SkSLParser.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#ifndef SKSL_STANDALONE
+#include "include/private/SkOnce.h"
+#endif
+
+namespace SkSL {
+
+#define MAX_PARSE_DEPTH 50
+
+class AutoDepth {
+public:
+ AutoDepth(Parser* p)
+ : fParser(p)
+ , fDepth(0) {}
+
+ ~AutoDepth() {
+ fParser->fDepth -= fDepth;
+ }
+
+ bool increase() {
+ ++fDepth;
+ ++fParser->fDepth;
+ if (fParser->fDepth > MAX_PARSE_DEPTH) {
+ fParser->error(fParser->peek(), String("exceeded max parse depth"));
+ return false;
+ }
+ return true;
+ }
+
+private:
+ Parser* fParser;
+ int fDepth;
+};
+
+std::unordered_map<String, Parser::LayoutToken>* Parser::layoutTokens;
+
+void Parser::InitLayoutMap() {
+ layoutTokens = new std::unordered_map<String, LayoutToken>;
+ #define TOKEN(name, text) (*layoutTokens)[text] = LayoutToken::name
+ TOKEN(LOCATION, "location");
+ TOKEN(OFFSET, "offset");
+ TOKEN(BINDING, "binding");
+ TOKEN(INDEX, "index");
+ TOKEN(SET, "set");
+ TOKEN(BUILTIN, "builtin");
+ TOKEN(INPUT_ATTACHMENT_INDEX, "input_attachment_index");
+ TOKEN(ORIGIN_UPPER_LEFT, "origin_upper_left");
+ TOKEN(OVERRIDE_COVERAGE, "override_coverage");
+ TOKEN(BLEND_SUPPORT_ALL_EQUATIONS, "blend_support_all_equations");
+ TOKEN(BLEND_SUPPORT_MULTIPLY, "blend_support_multiply");
+ TOKEN(BLEND_SUPPORT_SCREEN, "blend_support_screen");
+ TOKEN(BLEND_SUPPORT_OVERLAY, "blend_support_overlay");
+ TOKEN(BLEND_SUPPORT_DARKEN, "blend_support_darken");
+ TOKEN(BLEND_SUPPORT_LIGHTEN, "blend_support_lighten");
+ TOKEN(BLEND_SUPPORT_COLORDODGE, "blend_support_colordodge");
+ TOKEN(BLEND_SUPPORT_COLORBURN, "blend_support_colorburn");
+ TOKEN(BLEND_SUPPORT_HARDLIGHT, "blend_support_hardlight");
+ TOKEN(BLEND_SUPPORT_SOFTLIGHT, "blend_support_softlight");
+ TOKEN(BLEND_SUPPORT_DIFFERENCE, "blend_support_difference");
+ TOKEN(BLEND_SUPPORT_EXCLUSION, "blend_support_exclusion");
+ TOKEN(BLEND_SUPPORT_HSL_HUE, "blend_support_hsl_hue");
+ TOKEN(BLEND_SUPPORT_HSL_SATURATION, "blend_support_hsl_saturation");
+ TOKEN(BLEND_SUPPORT_HSL_COLOR, "blend_support_hsl_color");
+ TOKEN(BLEND_SUPPORT_HSL_LUMINOSITY, "blend_support_hsl_luminosity");
+ TOKEN(PUSH_CONSTANT, "push_constant");
+ TOKEN(POINTS, "points");
+ TOKEN(LINES, "lines");
+ TOKEN(LINE_STRIP, "line_strip");
+ TOKEN(LINES_ADJACENCY, "lines_adjacency");
+ TOKEN(TRIANGLES, "triangles");
+ TOKEN(TRIANGLE_STRIP, "triangle_strip");
+ TOKEN(TRIANGLES_ADJACENCY, "triangles_adjacency");
+ TOKEN(MAX_VERTICES, "max_vertices");
+ TOKEN(INVOCATIONS, "invocations");
+ TOKEN(WHEN, "when");
+ TOKEN(KEY, "key");
+ TOKEN(TRACKED, "tracked");
+ TOKEN(CTYPE, "ctype");
+ TOKEN(SKPMCOLOR4F, "SkPMColor4f");
+ TOKEN(SKVECTOR4, "SkVector4");
+ TOKEN(SKRECT, "SkRect");
+ TOKEN(SKIRECT, "SkIRect");
+ TOKEN(SKPMCOLOR, "SkPMColor");
+ TOKEN(SKMATRIX44, "SkMatrix44");
+ TOKEN(BOOL, "bool");
+ TOKEN(INT, "int");
+ TOKEN(FLOAT, "float");
+ #undef TOKEN
+}
+
+Parser::Parser(const char* text, size_t length, SymbolTable& types, ErrorReporter& errors)
+: fText(text)
+, fPushback(Token::INVALID, -1, -1)
+, fTypes(types)
+, fErrors(errors) {
+ fLexer.start(text, length);
+ static const bool layoutMapInitialized = []{ return (void)InitLayoutMap(), true; }();
+ (void) layoutMapInitialized;
+}
+
+#define CREATE_NODE(result, ...) \
+ ASTNode::ID result(fFile->fNodes.size()); \
+ fFile->fNodes.emplace_back(&fFile->fNodes, __VA_ARGS__)
+
+#define RETURN_NODE(...) \
+ do { \
+ CREATE_NODE(result, __VA_ARGS__); \
+ return result; \
+ } while (false)
+
+#define CREATE_CHILD(child, target, ...) \
+ CREATE_NODE(child, __VA_ARGS__); \
+ fFile->fNodes[target.fValue].addChild(child)
+
+#define CREATE_EMPTY_CHILD(target) \
+ do { \
+ ASTNode::ID child(fFile->fNodes.size()); \
+ fFile->fNodes.emplace_back(); \
+ fFile->fNodes[target.fValue].addChild(child); \
+ } while (false)
+
+/* (directive | section | declaration)* END_OF_FILE */
+std::unique_ptr<ASTFile> Parser::file() {
+ fFile.reset(new ASTFile());
+ CREATE_NODE(result, 0, ASTNode::Kind::kFile);
+ fFile->fRoot = result;
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::END_OF_FILE:
+ return std::move(fFile);
+ case Token::DIRECTIVE: {
+ ASTNode::ID dir = this->directive();
+ if (fErrors.errorCount()) {
+ return nullptr;
+ }
+ if (dir) {
+ getNode(result).addChild(dir);
+ }
+ break;
+ }
+ case Token::SECTION: {
+ ASTNode::ID section = this->section();
+ if (fErrors.errorCount()) {
+ return nullptr;
+ }
+ if (section) {
+ getNode(result).addChild(section);
+ }
+ break;
+ }
+ default: {
+ ASTNode::ID decl = this->declaration();
+ if (fErrors.errorCount()) {
+ return nullptr;
+ }
+ if (decl) {
+ getNode(result).addChild(decl);
+ }
+ }
+ }
+ }
+ return std::move(fFile);
+}
+
+Token Parser::nextRawToken() {
+ if (fPushback.fKind != Token::INVALID) {
+ Token result = fPushback;
+ fPushback.fKind = Token::INVALID;
+ return result;
+ }
+ Token result = fLexer.next();
+ return result;
+}
+
+Token Parser::nextToken() {
+ Token token = this->nextRawToken();
+ while (token.fKind == Token::WHITESPACE || token.fKind == Token::LINE_COMMENT ||
+ token.fKind == Token::BLOCK_COMMENT) {
+ token = this->nextRawToken();
+ }
+ return token;
+}
+
+void Parser::pushback(Token t) {
+ SkASSERT(fPushback.fKind == Token::INVALID);
+ fPushback = std::move(t);
+}
+
+Token Parser::peek() {
+ if (fPushback.fKind == Token::INVALID) {
+ fPushback = this->nextToken();
+ }
+ return fPushback;
+}
+
+bool Parser::checkNext(Token::Kind kind, Token* result) {
+ if (fPushback.fKind != Token::INVALID && fPushback.fKind != kind) {
+ return false;
+ }
+ Token next = this->nextToken();
+ if (next.fKind == kind) {
+ if (result) {
+ *result = next;
+ }
+ return true;
+ }
+ this->pushback(std::move(next));
+ return false;
+}
+
+bool Parser::expect(Token::Kind kind, const char* expected, Token* result) {
+ Token next = this->nextToken();
+ if (next.fKind == kind) {
+ if (result) {
+ *result = std::move(next);
+ }
+ return true;
+ } else {
+ this->error(next, "expected " + String(expected) + ", but found '" +
+ this->text(next) + "'");
+ return false;
+ }
+}
+
+StringFragment Parser::text(Token token) {
+ return StringFragment(fText + token.fOffset, token.fLength);
+}
+
+void Parser::error(Token token, String msg) {
+ this->error(token.fOffset, msg);
+}
+
+void Parser::error(int offset, String msg) {
+ fErrors.error(offset, msg);
+}
+
+bool Parser::isType(StringFragment name) {
+ return nullptr != fTypes[name];
+}
+
+/* DIRECTIVE(#version) INT_LITERAL ("es" | "compatibility")? |
+ DIRECTIVE(#extension) IDENTIFIER COLON IDENTIFIER */
+ASTNode::ID Parser::directive() {
+ Token start;
+ if (!this->expect(Token::DIRECTIVE, "a directive", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ StringFragment text = this->text(start);
+ if (text == "#extension") {
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::COLON, "':'")) {
+ return ASTNode::ID::Invalid();
+ }
+ // FIXME: need to start paying attention to this token
+ if (!this->expect(Token::IDENTIFIER, "an identifier")) {
+ return ASTNode::ID::Invalid();
+ }
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kExtension, this->text(name));
+ } else {
+ this->error(start, "unsupported directive '" + this->text(start) + "'");
+ return ASTNode::ID::Invalid();
+ }
+}
+
+/* SECTION LBRACE (LPAREN IDENTIFIER RPAREN)? <any sequence of tokens with balanced braces>
+ RBRACE */
+ASTNode::ID Parser::section() {
+ Token start;
+ if (!this->expect(Token::SECTION, "a section token", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ StringFragment argument;
+ if (this->peek().fKind == Token::LPAREN) {
+ this->nextToken();
+ Token argToken;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &argToken)) {
+ return ASTNode::ID::Invalid();
+ }
+ argument = this->text(argToken);
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ }
+ if (!this->expect(Token::LBRACE, "'{'")) {
+ return ASTNode::ID::Invalid();
+ }
+ StringFragment text;
+ Token codeStart = this->nextRawToken();
+ size_t startOffset = codeStart.fOffset;
+ this->pushback(codeStart);
+ text.fChars = fText + startOffset;
+ int level = 1;
+ for (;;) {
+ Token next = this->nextRawToken();
+ switch (next.fKind) {
+ case Token::LBRACE:
+ ++level;
+ break;
+ case Token::RBRACE:
+ --level;
+ break;
+ case Token::END_OF_FILE:
+ this->error(start, "reached end of file while parsing section");
+ return ASTNode::ID::Invalid();
+ default:
+ break;
+ }
+ if (!level) {
+ text.fLength = next.fOffset - startOffset;
+ break;
+ }
+ }
+ StringFragment name = this->text(start);
+ ++name.fChars;
+ --name.fLength;
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kSection,
+ ASTNode::SectionData(name, argument, text));
+}
+
+/* ENUM CLASS IDENTIFIER LBRACE (IDENTIFIER (EQ expression)? (COMMA IDENTIFIER (EQ expression))*)?
+ RBRACE */
+ASTNode::ID Parser::enumDeclaration() {
+ Token start;
+ if (!this->expect(Token::ENUM, "'enum'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::CLASS, "'class'")) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LBRACE, "'{'")) {
+ return ASTNode::ID::Invalid();
+ }
+ fTypes.add(this->text(name), std::unique_ptr<Symbol>(new Type(this->text(name),
+ Type::kEnum_Kind)));
+ CREATE_NODE(result, name.fOffset, ASTNode::Kind::kEnum, this->text(name));
+ if (!this->checkNext(Token::RBRACE)) {
+ Token id;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &id)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (this->checkNext(Token::EQ)) {
+ ASTNode::ID value = this->assignmentExpression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_CHILD(child, result, id.fOffset, ASTNode::Kind::kEnumCase, this->text(id));
+ getNode(child).addChild(value);
+ } else {
+ CREATE_CHILD(child, result, id.fOffset, ASTNode::Kind::kEnumCase, this->text(id));
+ }
+ while (!this->checkNext(Token::RBRACE)) {
+ if (!this->expect(Token::COMMA, "','")) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &id)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (this->checkNext(Token::EQ)) {
+ ASTNode::ID value = this->assignmentExpression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_CHILD(child, result, id.fOffset, ASTNode::Kind::kEnumCase, this->text(id));
+ getNode(child).addChild(value);
+ } else {
+ CREATE_CHILD(child, result, id.fOffset, ASTNode::Kind::kEnumCase, this->text(id));
+ }
+ }
+ }
+ this->expect(Token::SEMICOLON, "';'");
+ return result;
+}
+
+/* enumDeclaration | modifiers (structVarDeclaration | type IDENTIFIER ((LPAREN parameter
+ (COMMA parameter)* RPAREN (block | SEMICOLON)) | SEMICOLON) | interfaceBlock) */
+ASTNode::ID Parser::declaration() {
+ Token lookahead = this->peek();
+ if (lookahead.fKind == Token::ENUM) {
+ return this->enumDeclaration();
+ }
+ Modifiers modifiers = this->modifiers();
+ lookahead = this->peek();
+ if (lookahead.fKind == Token::IDENTIFIER && !this->isType(this->text(lookahead))) {
+ // we have an identifier that's not a type, could be the start of an interface block
+ return this->interfaceBlock(modifiers);
+ }
+ if (lookahead.fKind == Token::STRUCT) {
+ return this->structVarDeclaration(modifiers);
+ }
+ if (lookahead.fKind == Token::SEMICOLON) {
+ this->nextToken();
+ RETURN_NODE(lookahead.fOffset, ASTNode::Kind::kModifiers, modifiers);
+ }
+ ASTNode::ID type = this->type();
+ if (!type) {
+ return ASTNode::ID::Invalid();
+ }
+ if (getNode(type).getTypeData().fIsStructDeclaration && this->checkNext(Token::SEMICOLON)) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (this->checkNext(Token::LPAREN)) {
+ CREATE_NODE(result, name.fOffset, ASTNode::Kind::kFunction);
+ ASTNode::FunctionData fd(modifiers, this->text(name), 0);
+ getNode(result).addChild(type);
+ if (this->peek().fKind != Token::RPAREN) {
+ for (;;) {
+ ASTNode::ID parameter = this->parameter();
+ if (!parameter) {
+ return ASTNode::ID::Invalid();
+ }
+ ++fd.fParameterCount;
+ getNode(result).addChild(parameter);
+ if (!this->checkNext(Token::COMMA)) {
+ break;
+ }
+ }
+ }
+ getNode(result).setFunctionData(fd);
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID body;
+ if (!this->checkNext(Token::SEMICOLON)) {
+ body = this->block();
+ if (!body) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(body);
+ }
+ return result;
+ } else {
+ return this->varDeclarationEnd(modifiers, type, this->text(name));
+ }
+}
+
+/* modifiers type IDENTIFIER varDeclarationEnd */
+ASTNode::ID Parser::varDeclarations() {
+ Modifiers modifiers = this->modifiers();
+ ASTNode::ID type = this->type();
+ if (!type) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ return this->varDeclarationEnd(modifiers, type, this->text(name));
+}
+
+/* STRUCT IDENTIFIER LBRACE varDeclaration* RBRACE */
+ASTNode::ID Parser::structDeclaration() {
+ if (!this->expect(Token::STRUCT, "'struct'")) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LBRACE, "'{'")) {
+ return ASTNode::ID::Invalid();
+ }
+ std::vector<Type::Field> fields;
+ while (this->peek().fKind != Token::RBRACE) {
+ ASTNode::ID decls = this->varDeclarations();
+ if (!decls) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode& declsNode = getNode(decls);
+ auto type = (const Type*) fTypes[(declsNode.begin() + 1)->getTypeData().fName];
+ for (auto iter = declsNode.begin() + 2; iter != declsNode.end(); ++iter) {
+ ASTNode& var = *iter;
+ ASTNode::VarData vd = var.getVarData();
+ for (int j = vd.fSizeCount - 1; j >= 0; j--) {
+ const ASTNode& size = *(var.begin() + j);
+ if (!size || size.fKind != ASTNode::Kind::kInt) {
+ this->error(declsNode.fOffset, "array size in struct field must be a constant");
+ return ASTNode::ID::Invalid();
+ }
+ uint64_t columns = size.getInt();
+ String name = type->name() + "[" + to_string(columns) + "]";
+ type = (Type*) fTypes.takeOwnership(std::unique_ptr<Symbol>(
+ new Type(name,
+ Type::kArray_Kind,
+ *type,
+ (int) columns)));
+ }
+ fields.push_back(Type::Field(declsNode.begin()->getModifiers(), vd.fName, type));
+ if (vd.fSizeCount ? (var.begin() + (vd.fSizeCount - 1))->fNext : var.fFirstChild) {
+ this->error(declsNode.fOffset, "initializers are not permitted on struct fields");
+ }
+ }
+ }
+ if (!this->expect(Token::RBRACE, "'}'")) {
+ return ASTNode::ID::Invalid();
+ }
+ fTypes.add(this->text(name), std::unique_ptr<Type>(new Type(name.fOffset, this->text(name),
+ fields)));
+ RETURN_NODE(name.fOffset, ASTNode::Kind::kType,
+ ASTNode::TypeData(this->text(name), true, false));
+}
+
+/* structDeclaration ((IDENTIFIER varDeclarationEnd) | SEMICOLON) */
+ASTNode::ID Parser::structVarDeclaration(Modifiers modifiers) {
+ ASTNode::ID type = this->structDeclaration();
+ if (!type) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (this->checkNext(Token::IDENTIFIER, &name)) {
+ return this->varDeclarationEnd(modifiers, std::move(type), this->text(name));
+ }
+ this->expect(Token::SEMICOLON, "';'");
+ return ASTNode::ID::Invalid();
+}
+
+/* (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)? (COMMA IDENTIFER
+ (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)?)* SEMICOLON */
+ASTNode::ID Parser::varDeclarationEnd(Modifiers mods, ASTNode::ID type, StringFragment name) {
+ CREATE_NODE(result, -1, ASTNode::Kind::kVarDeclarations);
+ CREATE_CHILD(modifiers, result, -1, ASTNode::Kind::kModifiers, mods);
+ getNode(result).addChild(type);
+ CREATE_NODE(currentVar, -1, ASTNode::Kind::kVarDeclaration);
+ ASTNode::VarData vd(name, 0);
+ getNode(result).addChild(currentVar);
+ while (this->checkNext(Token::LBRACKET)) {
+ if (this->checkNext(Token::RBRACKET)) {
+ CREATE_EMPTY_CHILD(currentVar);
+ } else {
+ ASTNode::ID size = this->expression();
+ if (!size) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(currentVar).addChild(size);
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return ASTNode::ID::Invalid();
+ }
+ }
+ ++vd.fSizeCount;
+ }
+ getNode(currentVar).setVarData(vd);
+ if (this->checkNext(Token::EQ)) {
+ ASTNode::ID value = this->assignmentExpression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(currentVar).addChild(value);
+ }
+ while (this->checkNext(Token::COMMA)) {
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ currentVar = ASTNode::ID(fFile->fNodes.size());
+ vd = ASTNode::VarData(this->text(name), 0);
+ fFile->fNodes.emplace_back(&fFile->fNodes, -1, ASTNode::Kind::kVarDeclaration);
+ getNode(result).addChild(currentVar);
+ while (this->checkNext(Token::LBRACKET)) {
+ if (this->checkNext(Token::RBRACKET)) {
+ CREATE_EMPTY_CHILD(currentVar);
+ } else {
+ ASTNode::ID size = this->expression();
+ if (!size) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(currentVar).addChild(size);
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return ASTNode::ID::Invalid();
+ }
+ }
+ ++vd.fSizeCount;
+ }
+ getNode(currentVar).setVarData(vd);
+ if (this->checkNext(Token::EQ)) {
+ ASTNode::ID value = this->assignmentExpression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(currentVar).addChild(value);
+ }
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ return result;
+}
+
+/* modifiers type IDENTIFIER (LBRACKET INT_LITERAL RBRACKET)? */
+ASTNode::ID Parser::parameter() {
+ Modifiers modifiers = this->modifiersWithDefaults(0);
+ ASTNode::ID type = this->type();
+ if (!type) {
+ return ASTNode::ID::Invalid();
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, name.fOffset, ASTNode::Kind::kParameter);
+ ASTNode::ParameterData pd(modifiers, this->text(name), 0);
+ getNode(result).addChild(type);
+ while (this->checkNext(Token::LBRACKET)) {
+ Token sizeToken;
+ if (!this->expect(Token::INT_LITERAL, "a positive integer", &sizeToken)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_CHILD(child, result, sizeToken.fOffset, ASTNode::Kind::kInt,
+ SkSL::stoi(this->text(sizeToken)));
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ++pd.fSizeCount;
+ }
+ getNode(result).setParameterData(pd);
+ return result;
+}
+
+/** EQ INT_LITERAL */
+int Parser::layoutInt() {
+ if (!this->expect(Token::EQ, "'='")) {
+ return -1;
+ }
+ Token resultToken;
+ if (this->expect(Token::INT_LITERAL, "a non-negative integer", &resultToken)) {
+ return SkSL::stoi(this->text(resultToken));
+ }
+ return -1;
+}
+
+/** EQ IDENTIFIER */
+StringFragment Parser::layoutIdentifier() {
+ if (!this->expect(Token::EQ, "'='")) {
+ return StringFragment();
+ }
+ Token resultToken;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &resultToken)) {
+ return StringFragment();
+ }
+ return this->text(resultToken);
+}
+
+
+/** EQ <any sequence of tokens with balanced parentheses and no top-level comma> */
+StringFragment Parser::layoutCode() {
+ if (!this->expect(Token::EQ, "'='")) {
+ return "";
+ }
+ Token start = this->nextRawToken();
+ this->pushback(start);
+ StringFragment code;
+ code.fChars = fText + start.fOffset;
+ int level = 1;
+ bool done = false;
+ while (!done) {
+ Token next = this->nextRawToken();
+ switch (next.fKind) {
+ case Token::LPAREN:
+ ++level;
+ break;
+ case Token::RPAREN:
+ --level;
+ break;
+ case Token::COMMA:
+ if (level == 1) {
+ done = true;
+ }
+ break;
+ case Token::END_OF_FILE:
+ this->error(start, "reached end of file while parsing layout");
+ return "";
+ default:
+ break;
+ }
+ if (!level) {
+ done = true;
+ }
+ if (done) {
+ code.fLength = next.fOffset - start.fOffset;
+ this->pushback(std::move(next));
+ }
+ }
+ return code;
+}
+
+/** (EQ IDENTIFIER('identity'))? */
+Layout::Key Parser::layoutKey() {
+ if (this->peek().fKind == Token::EQ) {
+ this->expect(Token::EQ, "'='");
+ Token key;
+ if (this->expect(Token::IDENTIFIER, "an identifer", &key)) {
+ if (this->text(key) == "identity") {
+ return Layout::kIdentity_Key;
+ } else {
+ this->error(key, "unsupported layout key");
+ }
+ }
+ }
+ return Layout::kKey_Key;
+}
+
+Layout::CType Parser::layoutCType() {
+ if (this->expect(Token::EQ, "'='")) {
+ Token t = this->nextToken();
+ String text = this->text(t);
+ auto found = layoutTokens->find(text);
+ if (found != layoutTokens->end()) {
+ switch (found->second) {
+ case LayoutToken::SKPMCOLOR4F:
+ return Layout::CType::kSkPMColor4f;
+ case LayoutToken::SKVECTOR4:
+ return Layout::CType::kSkVector4;
+ case LayoutToken::SKRECT:
+ return Layout::CType::kSkRect;
+ case LayoutToken::SKIRECT:
+ return Layout::CType::kSkIRect;
+ case LayoutToken::SKPMCOLOR:
+ return Layout::CType::kSkPMColor;
+ case LayoutToken::BOOL:
+ return Layout::CType::kBool;
+ case LayoutToken::INT:
+ return Layout::CType::kInt32;
+ case LayoutToken::FLOAT:
+ return Layout::CType::kFloat;
+ case LayoutToken::SKMATRIX44:
+ return Layout::CType::kSkMatrix44;
+ default:
+ break;
+ }
+ }
+ this->error(t, "unsupported ctype");
+ }
+ return Layout::CType::kDefault;
+}
+
+/* LAYOUT LPAREN IDENTIFIER (EQ INT_LITERAL)? (COMMA IDENTIFIER (EQ INT_LITERAL)?)* RPAREN */
+Layout Parser::layout() {
+ int flags = 0;
+ int location = -1;
+ int offset = -1;
+ int binding = -1;
+ int index = -1;
+ int set = -1;
+ int builtin = -1;
+ int inputAttachmentIndex = -1;
+ Layout::Format format = Layout::Format::kUnspecified;
+ Layout::Primitive primitive = Layout::kUnspecified_Primitive;
+ int maxVertices = -1;
+ int invocations = -1;
+ StringFragment when;
+ Layout::Key key = Layout::kNo_Key;
+ Layout::CType ctype = Layout::CType::kDefault;
+ if (this->checkNext(Token::LAYOUT)) {
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return Layout(flags, location, offset, binding, index, set, builtin,
+ inputAttachmentIndex, format, primitive, maxVertices, invocations, when,
+ key, ctype);
+ }
+ for (;;) {
+ Token t = this->nextToken();
+ String text = this->text(t);
+ auto found = layoutTokens->find(text);
+ if (found != layoutTokens->end()) {
+ switch (found->second) {
+ case LayoutToken::LOCATION:
+ location = this->layoutInt();
+ break;
+ case LayoutToken::OFFSET:
+ offset = this->layoutInt();
+ break;
+ case LayoutToken::BINDING:
+ binding = this->layoutInt();
+ break;
+ case LayoutToken::INDEX:
+ index = this->layoutInt();
+ break;
+ case LayoutToken::SET:
+ set = this->layoutInt();
+ break;
+ case LayoutToken::BUILTIN:
+ builtin = this->layoutInt();
+ break;
+ case LayoutToken::INPUT_ATTACHMENT_INDEX:
+ inputAttachmentIndex = this->layoutInt();
+ break;
+ case LayoutToken::ORIGIN_UPPER_LEFT:
+ flags |= Layout::kOriginUpperLeft_Flag;
+ break;
+ case LayoutToken::OVERRIDE_COVERAGE:
+ flags |= Layout::kOverrideCoverage_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_ALL_EQUATIONS:
+ flags |= Layout::kBlendSupportAllEquations_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_MULTIPLY:
+ flags |= Layout::kBlendSupportMultiply_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_SCREEN:
+ flags |= Layout::kBlendSupportScreen_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_OVERLAY:
+ flags |= Layout::kBlendSupportOverlay_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_DARKEN:
+ flags |= Layout::kBlendSupportDarken_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_LIGHTEN:
+ flags |= Layout::kBlendSupportLighten_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_COLORDODGE:
+ flags |= Layout::kBlendSupportColorDodge_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_COLORBURN:
+ flags |= Layout::kBlendSupportColorBurn_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_HARDLIGHT:
+ flags |= Layout::kBlendSupportHardLight_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_SOFTLIGHT:
+ flags |= Layout::kBlendSupportSoftLight_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_DIFFERENCE:
+ flags |= Layout::kBlendSupportDifference_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_EXCLUSION:
+ flags |= Layout::kBlendSupportExclusion_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_HSL_HUE:
+ flags |= Layout::kBlendSupportHSLHue_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_HSL_SATURATION:
+ flags |= Layout::kBlendSupportHSLSaturation_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_HSL_COLOR:
+ flags |= Layout::kBlendSupportHSLColor_Flag;
+ break;
+ case LayoutToken::BLEND_SUPPORT_HSL_LUMINOSITY:
+ flags |= Layout::kBlendSupportHSLLuminosity_Flag;
+ break;
+ case LayoutToken::PUSH_CONSTANT:
+ flags |= Layout::kPushConstant_Flag;
+ break;
+ case LayoutToken::TRACKED:
+ flags |= Layout::kTracked_Flag;
+ break;
+ case LayoutToken::POINTS:
+ primitive = Layout::kPoints_Primitive;
+ break;
+ case LayoutToken::LINES:
+ primitive = Layout::kLines_Primitive;
+ break;
+ case LayoutToken::LINE_STRIP:
+ primitive = Layout::kLineStrip_Primitive;
+ break;
+ case LayoutToken::LINES_ADJACENCY:
+ primitive = Layout::kLinesAdjacency_Primitive;
+ break;
+ case LayoutToken::TRIANGLES:
+ primitive = Layout::kTriangles_Primitive;
+ break;
+ case LayoutToken::TRIANGLE_STRIP:
+ primitive = Layout::kTriangleStrip_Primitive;
+ break;
+ case LayoutToken::TRIANGLES_ADJACENCY:
+ primitive = Layout::kTrianglesAdjacency_Primitive;
+ break;
+ case LayoutToken::MAX_VERTICES:
+ maxVertices = this->layoutInt();
+ break;
+ case LayoutToken::INVOCATIONS:
+ invocations = this->layoutInt();
+ break;
+ case LayoutToken::WHEN:
+ when = this->layoutCode();
+ break;
+ case LayoutToken::KEY:
+ key = this->layoutKey();
+ break;
+ case LayoutToken::CTYPE:
+ ctype = this->layoutCType();
+ break;
+ default:
+ this->error(t, ("'" + text + "' is not a valid layout qualifier").c_str());
+ break;
+ }
+ } else if (Layout::ReadFormat(text, &format)) {
+ // AST::ReadFormat stored the result in 'format'.
+ } else {
+ this->error(t, ("'" + text + "' is not a valid layout qualifier").c_str());
+ }
+ if (this->checkNext(Token::RPAREN)) {
+ break;
+ }
+ if (!this->expect(Token::COMMA, "','")) {
+ break;
+ }
+ }
+ }
+ return Layout(flags, location, offset, binding, index, set, builtin, inputAttachmentIndex,
+ format, primitive, maxVertices, invocations, when, key, ctype);
+}
+
+/* layout? (UNIFORM | CONST | IN | OUT | INOUT | LOWP | MEDIUMP | HIGHP | FLAT | NOPERSPECTIVE |
+ READONLY | WRITEONLY | COHERENT | VOLATILE | RESTRICT | BUFFER | PLS | PLSIN |
+ PLSOUT)* */
+Modifiers Parser::modifiers() {
+ Layout layout = this->layout();
+ int flags = 0;
+ for (;;) {
+ // TODO: handle duplicate / incompatible flags
+ switch (peek().fKind) {
+ case Token::UNIFORM:
+ this->nextToken();
+ flags |= Modifiers::kUniform_Flag;
+ break;
+ case Token::CONST:
+ this->nextToken();
+ flags |= Modifiers::kConst_Flag;
+ break;
+ case Token::IN:
+ this->nextToken();
+ flags |= Modifiers::kIn_Flag;
+ break;
+ case Token::OUT:
+ this->nextToken();
+ flags |= Modifiers::kOut_Flag;
+ break;
+ case Token::INOUT:
+ this->nextToken();
+ flags |= Modifiers::kIn_Flag;
+ flags |= Modifiers::kOut_Flag;
+ break;
+ case Token::FLAT:
+ this->nextToken();
+ flags |= Modifiers::kFlat_Flag;
+ break;
+ case Token::NOPERSPECTIVE:
+ this->nextToken();
+ flags |= Modifiers::kNoPerspective_Flag;
+ break;
+ case Token::READONLY:
+ this->nextToken();
+ flags |= Modifiers::kReadOnly_Flag;
+ break;
+ case Token::WRITEONLY:
+ this->nextToken();
+ flags |= Modifiers::kWriteOnly_Flag;
+ break;
+ case Token::COHERENT:
+ this->nextToken();
+ flags |= Modifiers::kCoherent_Flag;
+ break;
+ case Token::VOLATILE:
+ this->nextToken();
+ flags |= Modifiers::kVolatile_Flag;
+ break;
+ case Token::RESTRICT:
+ this->nextToken();
+ flags |= Modifiers::kRestrict_Flag;
+ break;
+ case Token::BUFFER:
+ this->nextToken();
+ flags |= Modifiers::kBuffer_Flag;
+ break;
+ case Token::HASSIDEEFFECTS:
+ this->nextToken();
+ flags |= Modifiers::kHasSideEffects_Flag;
+ break;
+ case Token::PLS:
+ this->nextToken();
+ flags |= Modifiers::kPLS_Flag;
+ break;
+ case Token::PLSIN:
+ this->nextToken();
+ flags |= Modifiers::kPLSIn_Flag;
+ break;
+ case Token::PLSOUT:
+ this->nextToken();
+ flags |= Modifiers::kPLSOut_Flag;
+ break;
+ default:
+ return Modifiers(layout, flags);
+ }
+ }
+}
+
+Modifiers Parser::modifiersWithDefaults(int defaultFlags) {
+ Modifiers result = this->modifiers();
+ if (!result.fFlags) {
+ return Modifiers(result.fLayout, defaultFlags);
+ }
+ return result;
+}
+
+/* ifStatement | forStatement | doStatement | whileStatement | block | expression */
+ASTNode::ID Parser::statement() {
+ Token start = this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ this->pushback(start);
+ switch (start.fKind) {
+ case Token::IF: // fall through
+ case Token::STATIC_IF:
+ return this->ifStatement();
+ case Token::FOR:
+ return this->forStatement();
+ case Token::DO:
+ return this->doStatement();
+ case Token::WHILE:
+ return this->whileStatement();
+ case Token::SWITCH: // fall through
+ case Token::STATIC_SWITCH:
+ return this->switchStatement();
+ case Token::RETURN:
+ return this->returnStatement();
+ case Token::BREAK:
+ return this->breakStatement();
+ case Token::CONTINUE:
+ return this->continueStatement();
+ case Token::DISCARD:
+ return this->discardStatement();
+ case Token::LBRACE:
+ return this->block();
+ case Token::SEMICOLON:
+ this->nextToken();
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kBlock);
+ case Token::CONST:
+ return this->varDeclarations();
+ case Token::IDENTIFIER:
+ if (this->isType(this->text(start))) {
+ return this->varDeclarations();
+ }
+ // fall through
+ default:
+ return this->expressionStatement();
+ }
+}
+
+/* IDENTIFIER(type) (LBRACKET intLiteral? RBRACKET)* QUESTION? */
+ASTNode::ID Parser::type() {
+ Token type;
+ if (!this->expect(Token::IDENTIFIER, "a type", &type)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->isType(this->text(type))) {
+ this->error(type, ("no type named '" + this->text(type) + "'").c_str());
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, type.fOffset, ASTNode::Kind::kType);
+ ASTNode::TypeData td(this->text(type), false, false);
+ while (this->checkNext(Token::LBRACKET)) {
+ if (this->peek().fKind != Token::RBRACKET) {
+ SKSL_INT i;
+ if (this->intLiteral(&i)) {
+ CREATE_CHILD(child, result, -1, ASTNode::Kind::kInt, i);
+ } else {
+ return ASTNode::ID::Invalid();
+ }
+ } else {
+ CREATE_EMPTY_CHILD(result);
+ }
+ this->expect(Token::RBRACKET, "']'");
+ }
+ td.fIsNullable = this->checkNext(Token::QUESTION);
+ getNode(result).setTypeData(td);
+ return result;
+}
+
+/* IDENTIFIER LBRACE varDeclaration* RBRACE (IDENTIFIER (LBRACKET expression? RBRACKET)*)? */
+ASTNode::ID Parser::interfaceBlock(Modifiers mods) {
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (peek().fKind != Token::LBRACE) {
+ // we only get into interfaceBlock if we found a top-level identifier which was not a type.
+ // 99% of the time, the user was not actually intending to create an interface block, so
+ // it's better to report it as an unknown type
+ this->error(name, "no type named '" + this->text(name) + "'");
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, name.fOffset, ASTNode::Kind::kInterfaceBlock);
+ ASTNode::InterfaceBlockData id(mods, this->text(name), 0, "", 0);
+ this->nextToken();
+ while (this->peek().fKind != Token::RBRACE) {
+ ASTNode::ID decl = this->varDeclarations();
+ if (!decl) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(decl);
+ ++id.fDeclarationCount;
+ }
+ this->nextToken();
+ std::vector<ASTNode> sizes;
+ StringFragment instanceName;
+ Token instanceNameToken;
+ if (this->checkNext(Token::IDENTIFIER, &instanceNameToken)) {
+ id.fInstanceName = this->text(instanceNameToken);
+ while (this->checkNext(Token::LBRACKET)) {
+ if (this->peek().fKind != Token::RBRACKET) {
+ ASTNode::ID size = this->expression();
+ if (!size) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(size);
+ } else {
+ CREATE_EMPTY_CHILD(result);
+ }
+ ++id.fSizeCount;
+ this->expect(Token::RBRACKET, "']'");
+ }
+ instanceName = this->text(instanceNameToken);
+ }
+ getNode(result).setInterfaceBlockData(id);
+ this->expect(Token::SEMICOLON, "';'");
+ return result;
+}
+
+/* IF LPAREN expression RPAREN statement (ELSE statement)? */
+ASTNode::ID Parser::ifStatement() {
+ Token start;
+ bool isStatic = this->checkNext(Token::STATIC_IF, &start);
+ if (!isStatic && !this->expect(Token::IF, "'if'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kIf, isStatic);
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID test = this->expression();
+ if (!test) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(test);
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID ifTrue = this->statement();
+ if (!ifTrue) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(ifTrue);
+ ASTNode::ID ifFalse;
+ if (this->checkNext(Token::ELSE)) {
+ ifFalse = this->statement();
+ if (!ifFalse) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(ifFalse);
+ }
+ return result;
+}
+
+/* DO statement WHILE LPAREN expression RPAREN SEMICOLON */
+ASTNode::ID Parser::doStatement() {
+ Token start;
+ if (!this->expect(Token::DO, "'do'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kDo);
+ ASTNode::ID statement = this->statement();
+ if (!statement) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(statement);
+ if (!this->expect(Token::WHILE, "'while'")) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID test = this->expression();
+ if (!test) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(test);
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ return result;
+}
+
+/* WHILE LPAREN expression RPAREN STATEMENT */
+ASTNode::ID Parser::whileStatement() {
+ Token start;
+ if (!this->expect(Token::WHILE, "'while'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kWhile);
+ ASTNode::ID test = this->expression();
+ if (!test) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(test);
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID statement = this->statement();
+ if (!statement) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(statement);
+ return result;
+}
+
+/* CASE expression COLON statement* */
+ASTNode::ID Parser::switchCase() {
+ Token start;
+ if (!this->expect(Token::CASE, "'case'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kSwitchCase);
+ ASTNode::ID value = this->expression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::COLON, "':'")) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(value);
+ while (this->peek().fKind != Token::RBRACE && this->peek().fKind != Token::CASE &&
+ this->peek().fKind != Token::DEFAULT) {
+ ASTNode::ID s = this->statement();
+ if (!s) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(s);
+ }
+ return result;
+}
+
+/* SWITCH LPAREN expression RPAREN LBRACE switchCase* (DEFAULT COLON statement*)? RBRACE */
+ASTNode::ID Parser::switchStatement() {
+ Token start;
+ bool isStatic = this->checkNext(Token::STATIC_SWITCH, &start);
+ if (!isStatic && !this->expect(Token::SWITCH, "'switch'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID value = this->expression();
+ if (!value) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LBRACE, "'{'")) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kSwitch, isStatic);
+ getNode(result).addChild(value);
+ while (this->peek().fKind == Token::CASE) {
+ ASTNode::ID c = this->switchCase();
+ if (!c) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(c);
+ }
+ // Requiring default: to be last (in defiance of C and GLSL) was a deliberate decision. Other
+ // parts of the compiler may rely upon this assumption.
+ if (this->peek().fKind == Token::DEFAULT) {
+ Token defaultStart;
+ SkAssertResult(this->expect(Token::DEFAULT, "'default'", &defaultStart));
+ if (!this->expect(Token::COLON, "':'")) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_CHILD(defaultCase, result, defaultStart.fOffset, ASTNode::Kind::kSwitchCase);
+ CREATE_EMPTY_CHILD(defaultCase); // empty test to signify default case
+ while (this->peek().fKind != Token::RBRACE) {
+ ASTNode::ID s = this->statement();
+ if (!s) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(defaultCase).addChild(s);
+ }
+ }
+ if (!this->expect(Token::RBRACE, "'}'")) {
+ return ASTNode::ID::Invalid();
+ }
+ return result;
+}
+
+/* FOR LPAREN (declaration | expression)? SEMICOLON expression? SEMICOLON expression? RPAREN
+ STATEMENT */
+ASTNode::ID Parser::forStatement() {
+ Token start;
+ if (!this->expect(Token::FOR, "'for'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kFor);
+ ASTNode::ID initializer;
+ Token nextToken = this->peek();
+ switch (nextToken.fKind) {
+ case Token::SEMICOLON:
+ this->nextToken();
+ CREATE_EMPTY_CHILD(result);
+ break;
+ case Token::CONST: {
+ initializer = this->varDeclarations();
+ if (!initializer) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(initializer);
+ break;
+ }
+ case Token::IDENTIFIER: {
+ if (this->isType(this->text(nextToken))) {
+ initializer = this->varDeclarations();
+ if (!initializer) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(initializer);
+ break;
+ }
+ } // fall through
+ default:
+ initializer = this->expressionStatement();
+ if (!initializer) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(initializer);
+ }
+ ASTNode::ID test;
+ if (this->peek().fKind != Token::SEMICOLON) {
+ test = this->expression();
+ if (!test) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(test);
+ } else {
+ CREATE_EMPTY_CHILD(result);
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID next;
+ if (this->peek().fKind != Token::RPAREN) {
+ next = this->expression();
+ if (!next) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(next);
+ } else {
+ CREATE_EMPTY_CHILD(result);
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID statement = this->statement();
+ if (!statement) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(statement);
+ return result;
+}
+
+/* RETURN expression? SEMICOLON */
+ASTNode::ID Parser::returnStatement() {
+ Token start;
+ if (!this->expect(Token::RETURN, "'return'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kReturn);
+ if (this->peek().fKind != Token::SEMICOLON) {
+ ASTNode::ID expression = this->expression();
+ if (!expression) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(expression);
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ return result;
+}
+
+/* BREAK SEMICOLON */
+ASTNode::ID Parser::breakStatement() {
+ Token start;
+ if (!this->expect(Token::BREAK, "'break'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kBreak);
+}
+
+/* CONTINUE SEMICOLON */
+ASTNode::ID Parser::continueStatement() {
+ Token start;
+ if (!this->expect(Token::CONTINUE, "'continue'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kContinue);
+}
+
+/* DISCARD SEMICOLON */
+ASTNode::ID Parser::discardStatement() {
+ Token start;
+ if (!this->expect(Token::DISCARD, "'continue'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return ASTNode::ID::Invalid();
+ }
+ RETURN_NODE(start.fOffset, ASTNode::Kind::kDiscard);
+}
+
+/* LBRACE statement* RBRACE */
+ASTNode::ID Parser::block() {
+ Token start;
+ if (!this->expect(Token::LBRACE, "'{'", &start)) {
+ return ASTNode::ID::Invalid();
+ }
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, start.fOffset, ASTNode::Kind::kBlock);
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::RBRACE:
+ this->nextToken();
+ return result;
+ case Token::END_OF_FILE:
+ this->error(this->peek(), "expected '}', but found end of file");
+ return ASTNode::ID::Invalid();
+ default: {
+ ASTNode::ID statement = this->statement();
+ if (!statement) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(statement);
+ }
+ }
+ }
+ return result;
+}
+
+/* expression SEMICOLON */
+ASTNode::ID Parser::expressionStatement() {
+ ASTNode::ID expr = this->expression();
+ if (expr) {
+ if (this->expect(Token::SEMICOLON, "';'")) {
+ return expr;
+ }
+ }
+ return ASTNode::ID::Invalid();
+}
+
+/* assignmentExpression (COMMA assignmentExpression)* */
+ASTNode::ID Parser::expression() {
+ ASTNode::ID result = this->assignmentExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::COMMA, &t)) {
+ ASTNode::ID right = this->assignmentExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, t.fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* ternaryExpression ((EQEQ | STAREQ | SLASHEQ | PERCENTEQ | PLUSEQ | MINUSEQ | SHLEQ | SHREQ |
+ BITWISEANDEQ | BITWISEXOREQ | BITWISEOREQ | LOGICALANDEQ | LOGICALXOREQ | LOGICALOREQ)
+ assignmentExpression)*
+ */
+ASTNode::ID Parser::assignmentExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->ternaryExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::EQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALOREQ: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->assignmentExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* logicalOrExpression ('?' expression ':' assignmentExpression)? */
+ASTNode::ID Parser::ternaryExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID base = this->logicalOrExpression();
+ if (!base) {
+ return ASTNode::ID::Invalid();
+ }
+ if (this->checkNext(Token::QUESTION)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID trueExpr = this->expression();
+ if (!trueExpr) {
+ return ASTNode::ID::Invalid();
+ }
+ if (this->expect(Token::COLON, "':'")) {
+ ASTNode::ID falseExpr = this->assignmentExpression();
+ if (!falseExpr) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(ternary, getNode(base).fOffset, ASTNode::Kind::kTernary);
+ getNode(ternary).addChild(base);
+ getNode(ternary).addChild(trueExpr);
+ getNode(ternary).addChild(falseExpr);
+ return ternary;
+ }
+ return ASTNode::ID::Invalid();
+ }
+ return base;
+}
+
+/* logicalXorExpression (LOGICALOR logicalXorExpression)* */
+ASTNode::ID Parser::logicalOrExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->logicalXorExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::LOGICALOR, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->logicalXorExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* logicalAndExpression (LOGICALXOR logicalAndExpression)* */
+ASTNode::ID Parser::logicalXorExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->logicalAndExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::LOGICALXOR, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->logicalAndExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* bitwiseOrExpression (LOGICALAND bitwiseOrExpression)* */
+ASTNode::ID Parser::logicalAndExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->bitwiseOrExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::LOGICALAND, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->bitwiseOrExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* bitwiseXorExpression (BITWISEOR bitwiseXorExpression)* */
+ASTNode::ID Parser::bitwiseOrExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->bitwiseXorExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::BITWISEOR, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->bitwiseXorExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* bitwiseAndExpression (BITWISEXOR bitwiseAndExpression)* */
+ASTNode::ID Parser::bitwiseXorExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->bitwiseAndExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::BITWISEXOR, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->bitwiseAndExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* equalityExpression (BITWISEAND equalityExpression)* */
+ASTNode::ID Parser::bitwiseAndExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->equalityExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t;
+ while (this->checkNext(Token::BITWISEAND, &t)) {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID right = this->equalityExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary, std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ }
+ return result;
+}
+
+/* relationalExpression ((EQEQ | NEQ) relationalExpression)* */
+ASTNode::ID Parser::equalityExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->relationalExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::EQEQ: // fall through
+ case Token::NEQ: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->relationalExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* shiftExpression ((LT | GT | LTEQ | GTEQ) shiftExpression)* */
+ASTNode::ID Parser::relationalExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->shiftExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->shiftExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* additiveExpression ((SHL | SHR) additiveExpression)* */
+ASTNode::ID Parser::shiftExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->additiveExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::SHL: // fall through
+ case Token::SHR: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->additiveExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* multiplicativeExpression ((PLUS | MINUS) multiplicativeExpression)* */
+ASTNode::ID Parser::additiveExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->multiplicativeExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::PLUS: // fall through
+ case Token::MINUS: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->multiplicativeExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* unaryExpression ((STAR | SLASH | PERCENT) unaryExpression)* */
+ASTNode::ID Parser::multiplicativeExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->unaryExpression();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::STAR: // fall through
+ case Token::SLASH: // fall through
+ case Token::PERCENT: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID right = this->unaryExpression();
+ if (!right) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(newResult, getNode(result).fOffset, ASTNode::Kind::kBinary,
+ std::move(t));
+ getNode(newResult).addChild(result);
+ getNode(newResult).addChild(right);
+ result = newResult;
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* postfixExpression | (PLUS | MINUS | NOT | PLUSPLUS | MINUSMINUS) unaryExpression */
+ASTNode::ID Parser::unaryExpression() {
+ AutoDepth depth(this);
+ switch (this->peek().fKind) {
+ case Token::PLUS: // fall through
+ case Token::MINUS: // fall through
+ case Token::LOGICALNOT: // fall through
+ case Token::BITWISENOT: // fall through
+ case Token::PLUSPLUS: // fall through
+ case Token::MINUSMINUS: {
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ Token t = this->nextToken();
+ ASTNode::ID expr = this->unaryExpression();
+ if (!expr) {
+ return ASTNode::ID::Invalid();
+ }
+ CREATE_NODE(result, t.fOffset, ASTNode::Kind::kPrefix, std::move(t));
+ getNode(result).addChild(expr);
+ return result;
+ }
+ default:
+ return this->postfixExpression();
+ }
+}
+
+/* term suffix* */
+ASTNode::ID Parser::postfixExpression() {
+ AutoDepth depth(this);
+ ASTNode::ID result = this->term();
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ for (;;) {
+ Token t = this->peek();
+ switch (t.fKind) {
+ case Token::FLOAT_LITERAL:
+ if (this->text(t)[0] != '.') {
+ return result;
+ }
+ // fall through
+ case Token::LBRACKET:
+ case Token::DOT:
+ case Token::LPAREN:
+ case Token::PLUSPLUS:
+ case Token::MINUSMINUS:
+ case Token::COLONCOLON:
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ result = this->suffix(result);
+ if (!result) {
+ return ASTNode::ID::Invalid();
+ }
+ break;
+ default:
+ return result;
+ }
+ }
+}
+
+/* LBRACKET expression? RBRACKET | DOT IDENTIFIER | LPAREN parameters RPAREN |
+ PLUSPLUS | MINUSMINUS | COLONCOLON IDENTIFIER | FLOAT_LITERAL [IDENTIFIER] */
+ASTNode::ID Parser::suffix(ASTNode::ID base) {
+ SkASSERT(base);
+ Token next = this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ switch (next.fKind) {
+ case Token::LBRACKET: {
+ if (this->checkNext(Token::RBRACKET)) {
+ CREATE_NODE(result, next.fOffset, ASTNode::Kind::kIndex);
+ getNode(result).addChild(base);
+ return result;
+ }
+ ASTNode::ID e = this->expression();
+ if (!e) {
+ return ASTNode::ID::Invalid();
+ }
+ this->expect(Token::RBRACKET, "']' to complete array access expression");
+ CREATE_NODE(result, next.fOffset, ASTNode::Kind::kIndex);
+ getNode(result).addChild(base);
+ getNode(result).addChild(e);
+ return result;
+ }
+ case Token::DOT: // fall through
+ case Token::COLONCOLON: {
+ int offset = this->peek().fOffset;
+ StringFragment text;
+ if (this->identifier(&text)) {
+ CREATE_NODE(result, offset, ASTNode::Kind::kField, std::move(text));
+ getNode(result).addChild(base);
+ return result;
+ }
+ }
+ case Token::FLOAT_LITERAL: {
+ // Swizzles that start with a constant number, e.g. '.000r', will be tokenized as
+ // floating point literals, possibly followed by an identifier. Handle that here.
+ StringFragment field = this->text(next);
+ SkASSERT(field.fChars[0] == '.');
+ ++field.fChars;
+ --field.fLength;
+ for (size_t i = 0; i < field.fLength; ++i) {
+ if (field.fChars[i] != '0' && field.fChars[i] != '1') {
+ this->error(next, "invalid swizzle");
+ return ASTNode::ID::Invalid();
+ }
+ }
+ // use the next *raw* token so we don't ignore whitespace - we only care about
+ // identifiers that directly follow the float
+ Token id = this->nextRawToken();
+ if (id.fKind == Token::IDENTIFIER) {
+ field.fLength += id.fLength;
+ } else {
+ this->pushback(id);
+ }
+ CREATE_NODE(result, next.fOffset, ASTNode::Kind::kField, field);
+ getNode(result).addChild(base);
+ return result;
+ }
+ case Token::LPAREN: {
+ CREATE_NODE(result, next.fOffset, ASTNode::Kind::kCall);
+ getNode(result).addChild(base);
+ if (this->peek().fKind != Token::RPAREN) {
+ for (;;) {
+ ASTNode::ID expr = this->assignmentExpression();
+ if (!expr) {
+ return ASTNode::ID::Invalid();
+ }
+ getNode(result).addChild(expr);
+ if (!this->checkNext(Token::COMMA)) {
+ break;
+ }
+ }
+ }
+ this->expect(Token::RPAREN, "')' to complete function parameters");
+ return result;
+ }
+ case Token::PLUSPLUS: // fall through
+ case Token::MINUSMINUS: {
+ CREATE_NODE(result, next.fOffset, ASTNode::Kind::kPostfix, next);
+ getNode(result).addChild(base);
+ return result;
+ }
+ default: {
+ this->error(next, "expected expression suffix, but found '" + this->text(next) + "'");
+ return ASTNode::ID::Invalid();
+ }
+ }
+}
+
+/* IDENTIFIER | intLiteral | floatLiteral | boolLiteral | NULL_LITERAL | '(' expression ')' */
+ASTNode::ID Parser::term() {
+ Token t = this->peek();
+ switch (t.fKind) {
+ case Token::IDENTIFIER: {
+ StringFragment text;
+ if (this->identifier(&text)) {
+ RETURN_NODE(t.fOffset, ASTNode::Kind::kIdentifier, std::move(text));
+ }
+ }
+ case Token::INT_LITERAL: {
+ SKSL_INT i;
+ if (this->intLiteral(&i)) {
+ RETURN_NODE(t.fOffset, ASTNode::Kind::kInt, i);
+ }
+ break;
+ }
+ case Token::FLOAT_LITERAL: {
+ SKSL_FLOAT f;
+ if (this->floatLiteral(&f)) {
+ RETURN_NODE(t.fOffset, ASTNode::Kind::kFloat, f);
+ }
+ break;
+ }
+ case Token::TRUE_LITERAL: // fall through
+ case Token::FALSE_LITERAL: {
+ bool b;
+ if (this->boolLiteral(&b)) {
+ RETURN_NODE(t.fOffset, ASTNode::Kind::kBool, b);
+ }
+ break;
+ }
+ case Token::NULL_LITERAL:
+ this->nextToken();
+ RETURN_NODE(t.fOffset, ASTNode::Kind::kNull);
+ case Token::LPAREN: {
+ this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return ASTNode::ID::Invalid();
+ }
+ ASTNode::ID result = this->expression();
+ if (result) {
+ this->expect(Token::RPAREN, "')' to complete expression");
+ return result;
+ }
+ break;
+ }
+ default:
+ this->nextToken();
+ this->error(t.fOffset, "expected expression, but found '" + this->text(t) + "'");
+ }
+ return ASTNode::ID::Invalid();
+}
+
+/* INT_LITERAL */
+bool Parser::intLiteral(SKSL_INT* dest) {
+ Token t;
+ if (this->expect(Token::INT_LITERAL, "integer literal", &t)) {
+ *dest = SkSL::stol(this->text(t));
+ return true;
+ }
+ return false;
+}
+
+/* FLOAT_LITERAL */
+bool Parser::floatLiteral(SKSL_FLOAT* dest) {
+ Token t;
+ if (this->expect(Token::FLOAT_LITERAL, "float literal", &t)) {
+ *dest = SkSL::stod(this->text(t));
+ return true;
+ }
+ return false;
+}
+
+/* TRUE_LITERAL | FALSE_LITERAL */
+bool Parser::boolLiteral(bool* dest) {
+ Token t = this->nextToken();
+ switch (t.fKind) {
+ case Token::TRUE_LITERAL:
+ *dest = true;
+ return true;
+ case Token::FALSE_LITERAL:
+ *dest = false;
+ return true;
+ default:
+ this->error(t, "expected 'true' or 'false', but found '" + this->text(t) + "'");
+ return false;
+ }
+}
+
+/* IDENTIFIER */
+bool Parser::identifier(StringFragment* dest) {
+ Token t;
+ if (this->expect(Token::IDENTIFIER, "identifier", &t)) {
+ *dest = this->text(t);
+ return true;
+ }
+ return false;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.h b/gfx/skia/skia/src/sksl/SkSLParser.h
new file mode 100644
index 0000000000..33beb7fdef
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PARSER
+#define SKSL_PARSER
+
+#include <vector>
+#include <memory>
+#include <unordered_map>
+#include <unordered_set>
+#include "src/sksl/SkSLASTFile.h"
+#include "src/sksl/SkSLASTNode.h"
+#include "src/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/ir/SkSLLayout.h"
+
+struct yy_buffer_state;
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+
+namespace SkSL {
+
+struct Modifiers;
+class SymbolTable;
+
+/**
+ * Consumes .sksl text and produces an abstract syntax tree describing the contents.
+ */
+class Parser {
+public:
+ enum class LayoutToken {
+ LOCATION,
+ OFFSET,
+ BINDING,
+ INDEX,
+ SET,
+ BUILTIN,
+ INPUT_ATTACHMENT_INDEX,
+ ORIGIN_UPPER_LEFT,
+ OVERRIDE_COVERAGE,
+ BLEND_SUPPORT_ALL_EQUATIONS,
+ BLEND_SUPPORT_MULTIPLY,
+ BLEND_SUPPORT_SCREEN,
+ BLEND_SUPPORT_OVERLAY,
+ BLEND_SUPPORT_DARKEN,
+ BLEND_SUPPORT_LIGHTEN,
+ BLEND_SUPPORT_COLORDODGE,
+ BLEND_SUPPORT_COLORBURN,
+ BLEND_SUPPORT_HARDLIGHT,
+ BLEND_SUPPORT_SOFTLIGHT,
+ BLEND_SUPPORT_DIFFERENCE,
+ BLEND_SUPPORT_EXCLUSION,
+ BLEND_SUPPORT_HSL_HUE,
+ BLEND_SUPPORT_HSL_SATURATION,
+ BLEND_SUPPORT_HSL_COLOR,
+ BLEND_SUPPORT_HSL_LUMINOSITY,
+ PUSH_CONSTANT,
+ POINTS,
+ LINES,
+ LINE_STRIP,
+ LINES_ADJACENCY,
+ TRIANGLES,
+ TRIANGLE_STRIP,
+ TRIANGLES_ADJACENCY,
+ MAX_VERTICES,
+ INVOCATIONS,
+ WHEN,
+ KEY,
+ TRACKED,
+ CTYPE,
+ SKPMCOLOR4F,
+ SKVECTOR4,
+ SKRECT,
+ SKIRECT,
+ SKPMCOLOR,
+ SKMATRIX44,
+ BOOL,
+ INT,
+ FLOAT,
+ };
+
+ Parser(const char* text, size_t length, SymbolTable& types, ErrorReporter& errors);
+
+ /**
+ * Consumes a complete .sksl file and returns the parse tree. Errors are reported via the
+ * ErrorReporter; the return value may contain some declarations even when errors have occurred.
+ */
+ std::unique_ptr<ASTFile> file();
+
+ StringFragment text(Token token);
+
+ Position position(Token token);
+
+private:
+ static void InitLayoutMap();
+
+ /**
+ * Return the next token, including whitespace tokens, from the parse stream.
+ */
+ Token nextRawToken();
+
+ /**
+ * Return the next non-whitespace token from the parse stream.
+ */
+ Token nextToken();
+
+ /**
+ * Push a token back onto the parse stream, so that it is the next one read. Only a single level
+ * of pushback is supported (that is, it is an error to call pushback() twice in a row without
+ * an intervening nextToken()).
+ */
+ void pushback(Token t);
+
+ /**
+ * Returns the next non-whitespace token without consuming it from the stream.
+ */
+ Token peek();
+
+ /**
+ * Checks to see if the next token is of the specified type. If so, stores it in result (if
+ * result is non-null) and returns true. Otherwise, pushes it back and returns false.
+ */
+ bool checkNext(Token::Kind kind, Token* result = nullptr);
+
+ /**
+ * Reads the next non-whitespace token and generates an error if it is not the expected type.
+ * The 'expected' string is part of the error message, which reads:
+ *
+ * "expected <expected>, but found '<actual text>'"
+ *
+ * If 'result' is non-null, it is set to point to the token that was read.
+ * Returns true if the read token was as expected, false otherwise.
+ */
+ bool expect(Token::Kind kind, const char* expected, Token* result = nullptr);
+ bool expect(Token::Kind kind, String expected, Token* result = nullptr);
+
+ void error(Token token, String msg);
+ void error(int offset, String msg);
+ /**
+ * Returns true if the 'name' identifier refers to a type name. For instance, isType("int") will
+ * always return true.
+ */
+ bool isType(StringFragment name);
+
+ // The pointer to the node may be invalidated by modifying the fNodes vector
+ ASTNode& getNode(ASTNode::ID id) {
+ SkASSERT(id.fValue >= 0 && id.fValue < (int) fFile->fNodes.size());
+ return fFile->fNodes[id.fValue];
+ }
+
+ // these functions parse individual grammar rules from the current parse position; you probably
+ // don't need to call any of these outside of the parser. The function declarations in the .cpp
+ // file have comments describing the grammar rules.
+
+ ASTNode::ID precision();
+
+ ASTNode::ID directive();
+
+ ASTNode::ID section();
+
+ ASTNode::ID enumDeclaration();
+
+ ASTNode::ID declaration();
+
+ ASTNode::ID varDeclarations();
+
+ ASTNode::ID structDeclaration();
+
+ ASTNode::ID structVarDeclaration(Modifiers modifiers);
+
+ ASTNode::ID varDeclarationEnd(Modifiers modifiers, ASTNode::ID type, StringFragment name);
+
+ ASTNode::ID parameter();
+
+ int layoutInt();
+
+ StringFragment layoutIdentifier();
+
+ StringFragment layoutCode();
+
+ Layout::Key layoutKey();
+
+ Layout::CType layoutCType();
+
+ Layout layout();
+
+ Modifiers modifiers();
+
+ Modifiers modifiersWithDefaults(int defaultFlags);
+
+ ASTNode::ID statement();
+
+ ASTNode::ID type();
+
+ ASTNode::ID interfaceBlock(Modifiers mods);
+
+ ASTNode::ID ifStatement();
+
+ ASTNode::ID doStatement();
+
+ ASTNode::ID whileStatement();
+
+ ASTNode::ID forStatement();
+
+ ASTNode::ID switchCase();
+
+ ASTNode::ID switchStatement();
+
+ ASTNode::ID returnStatement();
+
+ ASTNode::ID breakStatement();
+
+ ASTNode::ID continueStatement();
+
+ ASTNode::ID discardStatement();
+
+ ASTNode::ID block();
+
+ ASTNode::ID expressionStatement();
+
+ ASTNode::ID expression();
+
+ ASTNode::ID assignmentExpression();
+
+ ASTNode::ID ternaryExpression();
+
+ ASTNode::ID logicalOrExpression();
+
+ ASTNode::ID logicalXorExpression();
+
+ ASTNode::ID logicalAndExpression();
+
+ ASTNode::ID bitwiseOrExpression();
+
+ ASTNode::ID bitwiseXorExpression();
+
+ ASTNode::ID bitwiseAndExpression();
+
+ ASTNode::ID equalityExpression();
+
+ ASTNode::ID relationalExpression();
+
+ ASTNode::ID shiftExpression();
+
+ ASTNode::ID additiveExpression();
+
+ ASTNode::ID multiplicativeExpression();
+
+ ASTNode::ID unaryExpression();
+
+ ASTNode::ID postfixExpression();
+
+ ASTNode::ID suffix(ASTNode::ID base);
+
+ ASTNode::ID term();
+
+ bool intLiteral(SKSL_INT* dest);
+
+ bool floatLiteral(SKSL_FLOAT* dest);
+
+ bool boolLiteral(bool* dest);
+
+ bool identifier(StringFragment* dest);
+
+ static std::unordered_map<String, LayoutToken>* layoutTokens;
+
+ const char* fText;
+ Lexer fLexer;
+ YY_BUFFER_STATE fBuffer;
+ // current parse depth, used to enforce a recursion limit to try to keep us from overflowing the
+ // stack on pathological inputs
+ int fDepth = 0;
+ Token fPushback;
+ SymbolTable& fTypes;
+ ErrorReporter& fErrors;
+
+ std::unique_ptr<ASTFile> fFile;
+
+ friend class AutoDepth;
+ friend class HCodeGenerator;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.cpp
new file mode 100644
index 0000000000..dde55f64d1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLPipelineStageCodeGenerator.h"
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLHCodeGenerator.h"
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+
+namespace SkSL {
+
+PipelineStageCodeGenerator::PipelineStageCodeGenerator(
+ const Context* context,
+ const Program* program,
+ ErrorReporter* errors,
+ OutputStream* out,
+ std::vector<Compiler::FormatArg>* outFormatArgs,
+ std::vector<Compiler::GLSLFunction>* outFunctions)
+: INHERITED(context, program, errors, out)
+, fName("Temp")
+, fFullName(String::printf("Gr%s", fName.c_str()))
+, fSectionAndParameterHelper(program, *errors)
+, fFormatArgs(outFormatArgs)
+, fFunctions(outFunctions) {}
+
+void PipelineStageCodeGenerator::writef(const char* s, va_list va) {
+ static constexpr int BUFFER_SIZE = 1024;
+ va_list copy;
+ va_copy(copy, va);
+ char buffer[BUFFER_SIZE];
+ int length = vsnprintf(buffer, BUFFER_SIZE, s, va);
+ if (length < BUFFER_SIZE) {
+ fOut->write(buffer, length);
+ } else {
+ std::unique_ptr<char[]> heap(new char[length + 1]);
+ vsprintf(heap.get(), s, copy);
+ fOut->write(heap.get(), length);
+ }
+ va_end(copy);
+}
+
+void PipelineStageCodeGenerator::writef(const char* s, ...) {
+ va_list va;
+ va_start(va, s);
+ this->writef(s, va);
+ va_end(va);
+}
+
+void PipelineStageCodeGenerator::writeHeader() {
+}
+
+bool PipelineStageCodeGenerator::usesPrecisionModifiers() const {
+ return false;
+}
+
+String PipelineStageCodeGenerator::getTypeName(const Type& type) {
+ return type.name();
+}
+
+void PipelineStageCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ if (b.fOperator == Token::PERCENT) {
+ // need to use "%%" instead of "%" b/c the code will be inside of a printf
+ Precedence precedence = GetBinaryPrecedence(b.fOperator);
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*b.fLeft, precedence);
+ this->write(" %% ");
+ this->writeExpression(*b.fRight, precedence);
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+ } else {
+ INHERITED::writeBinaryExpression(b, parentPrecedence);
+ }
+}
+
+void PipelineStageCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ if (c.fFunction.fBuiltin && c.fFunction.fName == "sample" &&
+ c.fArguments[0]->fType.kind() != Type::Kind::kSampler_Kind) {
+ SkASSERT(c.fArguments.size() == 1);
+ SkASSERT("fragmentProcessor" == c.fArguments[0]->fType.name() ||
+ "fragmentProcessor?" == c.fArguments[0]->fType.name());
+ SkASSERT(Expression::kVariableReference_Kind == c.fArguments[0]->fKind);
+ int index = 0;
+ bool found = false;
+ for (const auto& p : fProgram) {
+ if (ProgramElement::kVar_Kind == p.fKind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ VarDeclaration& decl = (VarDeclaration&) *raw;
+ if (decl.fVar == &((VariableReference&) *c.fArguments[0]).fVariable) {
+ found = true;
+ } else if (decl.fVar->fType == *fContext.fFragmentProcessor_Type) {
+ ++index;
+ }
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ SkASSERT(found);
+ this->write("%s");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kChildProcessor,
+ index));
+ return;
+ }
+ if (c.fFunction.fBuiltin) {
+ INHERITED::writeFunctionCall(c);
+ } else {
+ this->write("%s");
+ int index = 0;
+ for (const auto& e : fProgram) {
+ if (e.fKind == ProgramElement::kFunction_Kind) {
+ if (&((FunctionDefinition&) e).fDeclaration == &c.fFunction) {
+ break;
+ }
+ ++index;
+ }
+ }
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kFunctionName,
+ index));
+ this->write("(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ this->write(")");
+ }
+}
+
+void PipelineStageCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ this->write(to_string((int32_t) i.fValue));
+}
+
+void PipelineStageCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ switch (ref.fVariable.fModifiers.fLayout.fBuiltin) {
+ case SK_INCOLOR_BUILTIN:
+ this->write("%s");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kInput));
+ break;
+ case SK_OUTCOLOR_BUILTIN:
+ this->write("%s");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kOutput));
+ break;
+ case SK_MAIN_X_BUILTIN:
+ this->write("%s");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kCoordX));
+ break;
+ case SK_MAIN_Y_BUILTIN:
+ this->write("%s");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kCoordY));
+ break;
+ default:
+ if (ref.fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag) {
+ this->write("%s");
+ int index = 0;
+ bool found = false;
+ for (const auto& e : fProgram) {
+ if (found) {
+ break;
+ }
+ if (e.fKind == ProgramElement::Kind::kVar_Kind) {
+ const VarDeclarations& decls = (const VarDeclarations&) e;
+ for (const auto& decl : decls.fVars) {
+ const Variable& var = *((VarDeclaration&) *decl).fVar;
+ if (&var == &ref.fVariable) {
+ found = true;
+ break;
+ }
+ if (var.fModifiers.fFlags & Modifiers::kUniform_Flag) {
+ ++index;
+ }
+ }
+ }
+ }
+ SkASSERT(found);
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kUniform,
+ index));
+ } else {
+ this->write(ref.fVariable.fName);
+ }
+ }
+}
+
+void PipelineStageCodeGenerator::writeIfStatement(const IfStatement& s) {
+ if (s.fIsStatic) {
+ this->write("@");
+ }
+ INHERITED::writeIfStatement(s);
+}
+
+void PipelineStageCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ if (s.fIsStatic) {
+ this->write("@");
+ }
+ INHERITED::writeSwitchStatement(s);
+}
+
+static GrSLType glsltype(const Context& context, const Type& type) {
+ if (type == *context.fFloat_Type) {
+ return GrSLType::kFloat_GrSLType;
+ } else if (type == *context.fHalf_Type) {
+ return GrSLType::kHalf_GrSLType;
+ } else if (type == *context.fFloat2_Type) {
+ return GrSLType::kFloat2_GrSLType;
+ } else if (type == *context.fHalf2_Type) {
+ return GrSLType::kHalf2_GrSLType;
+ } else if (type == *context.fFloat4_Type) {
+ return GrSLType::kFloat4_GrSLType;
+ } else if (type == *context.fHalf4_Type) {
+ return GrSLType::kHalf4_GrSLType;
+ } else if (type == *context.fFloat4x4_Type) {
+ return GrSLType::kFloat4x4_GrSLType;
+ } else if (type == *context.fHalf4x4_Type) {
+ return GrSLType::kHalf4x4_GrSLType;
+ } else if (type == *context.fVoid_Type) {
+ return GrSLType::kVoid_GrSLType;
+ }
+ SkASSERT(false);
+ return GrSLType::kVoid_GrSLType;
+}
+
+
+void PipelineStageCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ fCurrentFunction = &f.fDeclaration;
+ fFunctionHeader = "";
+ OutputStream* oldOut = fOut;
+ StringStream buffer;
+ fOut = &buffer;
+ if (f.fDeclaration.fName == "main") {
+ this->write("%s = %s;\n");
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kOutput));
+ fFormatArgs->push_back(Compiler::FormatArg(Compiler::FormatArg::Kind::kInput));
+ for (const auto& s : ((Block&) *f.fBody).fStatements) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ fOut = oldOut;
+ this->write(fFunctionHeader);
+ this->writef("%s", buffer.str().c_str());
+ } else {
+ const FunctionDeclaration& decl = f.fDeclaration;
+ Compiler::GLSLFunction result;
+ result.fReturnType = glsltype(fContext, decl.fReturnType);
+ result.fName = decl.fName;
+ for (const Variable* v : decl.fParameters) {
+ result.fParameters.emplace_back(v->fName, glsltype(fContext, v->fType));
+ }
+ for (const auto& s : ((Block&) *f.fBody).fStatements) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ fOut = oldOut;
+ result.fBody = buffer.str();
+ fFunctions->push_back(result);
+ }
+}
+
+bool PipelineStageCodeGenerator::writeSection(const char* name, const char* prefix) {
+ const Section* s = fSectionAndParameterHelper.getSection(name);
+ if (s) {
+ this->writef("%s%s", prefix, s->fText.c_str());
+ return true;
+ }
+ return false;
+}
+
+void PipelineStageCodeGenerator::writeProgramElement(const ProgramElement& p) {
+ if (p.fKind == ProgramElement::kSection_Kind) {
+ return;
+ }
+ if (p.fKind == ProgramElement::kVar_Kind) {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ if (!decls.fVars.size()) {
+ return;
+ }
+ const Variable& var = *((VarDeclaration&) *decls.fVars[0]).fVar;
+ if (var.fModifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kUniform_Flag) ||
+ -1 != var.fModifiers.fLayout.fBuiltin) {
+ return;
+ }
+ }
+ INHERITED::writeProgramElement(p);
+}
+
+} // namespace
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.h
new file mode 100644
index 0000000000..183b66b752
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPipelineStageCodeGenerator.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PIPELINESTAGECODEGENERATOR
+#define SKSL_PIPELINESTAGECODEGENERATOR
+
+#include "src/sksl/SkSLGLSLCodeGenerator.h"
+#include "src/sksl/SkSLSectionAndParameterHelper.h"
+
+#include <set>
+
+#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
+
+namespace SkSL {
+
+class PipelineStageCodeGenerator : public GLSLCodeGenerator {
+public:
+ PipelineStageCodeGenerator(const Context* context, const Program* program,
+ ErrorReporter* errors, OutputStream* out,
+ std::vector<Compiler::FormatArg>* outFormatArgs,
+ std::vector<Compiler::GLSLFunction>* outFunctions);
+
+private:
+ void writef(const char* s, va_list va) SKSL_PRINTF_LIKE(2, 0);
+
+ void writef(const char* s, ...) SKSL_PRINTF_LIKE(2, 3);
+
+ bool writeSection(const char* name, const char* prefix = "");
+
+ void writeHeader() override;
+
+ bool usesPrecisionModifiers() const override;
+
+ String getTypeName(const Type& type) override;
+
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence) override;
+
+ void writeFunctionCall(const FunctionCall& c) override;
+
+ void writeIntLiteral(const IntLiteral& i) override;
+
+ void writeVariableReference(const VariableReference& ref) override;
+
+ void writeIfStatement(const IfStatement& s) override;
+
+ void writeSwitchStatement(const SwitchStatement& s) override;
+
+ void writeFunction(const FunctionDefinition& f) override;
+
+ void writeProgramElement(const ProgramElement& p) override;
+
+ bool writeEmitCode(std::vector<const Variable*>& uniforms);
+
+ String fName;
+ String fFullName;
+ SectionAndParameterHelper fSectionAndParameterHelper;
+ std::set<int> fWrittenTransformedCoords;
+ std::vector<Compiler::FormatArg>* fFormatArgs;
+ std::vector<Compiler::GLSLFunction>* fFunctions;
+ const FunctionDeclaration* fCurrentFunction;
+
+ typedef GLSLCodeGenerator INHERITED;
+};
+
+}
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPosition.h b/gfx/skia/skia/src/sksl/SkSLPosition.h
new file mode 100644
index 0000000000..96a2f4b919
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPosition.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSITION
+#define SKSL_POSITION
+
+#include "src/sksl/SkSLString.h"
+#include "src/sksl/SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Represents a position in the source code. Both line and column are one-based. Column is currently
+ * ignored.
+ */
+struct Position {
+ Position()
+ : fLine(-1)
+ , fColumn(-1) {}
+
+ Position(int line, int column)
+ : fLine(line)
+ , fColumn(column) {}
+
+ String description() const {
+ return to_string(fLine);
+ }
+
+ int fLine;
+ int fColumn;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp
new file mode 100644
index 0000000000..b7ffd1c7de
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp
@@ -0,0 +1,3257 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLSPIRVCodeGenerator.h"
+
+#include "src/sksl/GLSL.std.450.h"
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkCaps.h"
+#endif
+
+namespace SkSL {
+
+static const int32_t SKSL_MAGIC = 0x0; // FIXME: we should probably register a magic number
+
+void SPIRVCodeGenerator::setupIntrinsics() {
+#define ALL_GLSL(x) std::make_tuple(kGLSL_STD_450_IntrinsicKind, GLSLstd450 ## x, GLSLstd450 ## x, \
+ GLSLstd450 ## x, GLSLstd450 ## x)
+#define BY_TYPE_GLSL(ifFloat, ifInt, ifUInt) std::make_tuple(kGLSL_STD_450_IntrinsicKind, \
+ GLSLstd450 ## ifFloat, \
+ GLSLstd450 ## ifInt, \
+ GLSLstd450 ## ifUInt, \
+ SpvOpUndef)
+#define ALL_SPIRV(x) std::make_tuple(kSPIRV_IntrinsicKind, SpvOp ## x, SpvOp ## x, SpvOp ## x, \
+ SpvOp ## x)
+#define SPECIAL(x) std::make_tuple(kSpecial_IntrinsicKind, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic)
+ fIntrinsicMap[String("round")] = ALL_GLSL(Round);
+ fIntrinsicMap[String("roundEven")] = ALL_GLSL(RoundEven);
+ fIntrinsicMap[String("trunc")] = ALL_GLSL(Trunc);
+ fIntrinsicMap[String("abs")] = BY_TYPE_GLSL(FAbs, SAbs, SAbs);
+ fIntrinsicMap[String("sign")] = BY_TYPE_GLSL(FSign, SSign, SSign);
+ fIntrinsicMap[String("floor")] = ALL_GLSL(Floor);
+ fIntrinsicMap[String("ceil")] = ALL_GLSL(Ceil);
+ fIntrinsicMap[String("fract")] = ALL_GLSL(Fract);
+ fIntrinsicMap[String("radians")] = ALL_GLSL(Radians);
+ fIntrinsicMap[String("degrees")] = ALL_GLSL(Degrees);
+ fIntrinsicMap[String("sin")] = ALL_GLSL(Sin);
+ fIntrinsicMap[String("cos")] = ALL_GLSL(Cos);
+ fIntrinsicMap[String("tan")] = ALL_GLSL(Tan);
+ fIntrinsicMap[String("asin")] = ALL_GLSL(Asin);
+ fIntrinsicMap[String("acos")] = ALL_GLSL(Acos);
+ fIntrinsicMap[String("atan")] = SPECIAL(Atan);
+ fIntrinsicMap[String("sinh")] = ALL_GLSL(Sinh);
+ fIntrinsicMap[String("cosh")] = ALL_GLSL(Cosh);
+ fIntrinsicMap[String("tanh")] = ALL_GLSL(Tanh);
+ fIntrinsicMap[String("asinh")] = ALL_GLSL(Asinh);
+ fIntrinsicMap[String("acosh")] = ALL_GLSL(Acosh);
+ fIntrinsicMap[String("atanh")] = ALL_GLSL(Atanh);
+ fIntrinsicMap[String("pow")] = ALL_GLSL(Pow);
+ fIntrinsicMap[String("exp")] = ALL_GLSL(Exp);
+ fIntrinsicMap[String("log")] = ALL_GLSL(Log);
+ fIntrinsicMap[String("exp2")] = ALL_GLSL(Exp2);
+ fIntrinsicMap[String("log2")] = ALL_GLSL(Log2);
+ fIntrinsicMap[String("sqrt")] = ALL_GLSL(Sqrt);
+ fIntrinsicMap[String("inverse")] = ALL_GLSL(MatrixInverse);
+ fIntrinsicMap[String("transpose")] = ALL_SPIRV(Transpose);
+ fIntrinsicMap[String("inversesqrt")] = ALL_GLSL(InverseSqrt);
+ fIntrinsicMap[String("determinant")] = ALL_GLSL(Determinant);
+ fIntrinsicMap[String("matrixInverse")] = ALL_GLSL(MatrixInverse);
+ fIntrinsicMap[String("mod")] = SPECIAL(Mod);
+ fIntrinsicMap[String("min")] = SPECIAL(Min);
+ fIntrinsicMap[String("max")] = SPECIAL(Max);
+ fIntrinsicMap[String("clamp")] = SPECIAL(Clamp);
+ fIntrinsicMap[String("saturate")] = SPECIAL(Saturate);
+ fIntrinsicMap[String("dot")] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDot,
+ SpvOpUndef, SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap[String("mix")] = SPECIAL(Mix);
+ fIntrinsicMap[String("step")] = ALL_GLSL(Step);
+ fIntrinsicMap[String("smoothstep")] = ALL_GLSL(SmoothStep);
+ fIntrinsicMap[String("fma")] = ALL_GLSL(Fma);
+ fIntrinsicMap[String("frexp")] = ALL_GLSL(Frexp);
+ fIntrinsicMap[String("ldexp")] = ALL_GLSL(Ldexp);
+
+#define PACK(type) fIntrinsicMap[String("pack" #type)] = ALL_GLSL(Pack ## type); \
+ fIntrinsicMap[String("unpack" #type)] = ALL_GLSL(Unpack ## type)
+ PACK(Snorm4x8);
+ PACK(Unorm4x8);
+ PACK(Snorm2x16);
+ PACK(Unorm2x16);
+ PACK(Half2x16);
+ PACK(Double2x32);
+ fIntrinsicMap[String("length")] = ALL_GLSL(Length);
+ fIntrinsicMap[String("distance")] = ALL_GLSL(Distance);
+ fIntrinsicMap[String("cross")] = ALL_GLSL(Cross);
+ fIntrinsicMap[String("normalize")] = ALL_GLSL(Normalize);
+ fIntrinsicMap[String("faceForward")] = ALL_GLSL(FaceForward);
+ fIntrinsicMap[String("reflect")] = ALL_GLSL(Reflect);
+ fIntrinsicMap[String("refract")] = ALL_GLSL(Refract);
+ fIntrinsicMap[String("findLSB")] = ALL_GLSL(FindILsb);
+ fIntrinsicMap[String("findMSB")] = BY_TYPE_GLSL(FindSMsb, FindSMsb, FindUMsb);
+ fIntrinsicMap[String("dFdx")] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDPdx,
+ SpvOpUndef, SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap[String("dFdy")] = SPECIAL(DFdy);
+ fIntrinsicMap[String("fwidth")] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpFwidth,
+ SpvOpUndef, SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap[String("makeSampler2D")] = SPECIAL(SampledImage);
+
+ fIntrinsicMap[String("sample")] = SPECIAL(Texture);
+ fIntrinsicMap[String("subpassLoad")] = SPECIAL(SubpassLoad);
+
+ fIntrinsicMap[String("any")] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef, SpvOpAny);
+ fIntrinsicMap[String("all")] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef, SpvOpAll);
+ fIntrinsicMap[String("equal")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdEqual, SpvOpIEqual,
+ SpvOpIEqual, SpvOpLogicalEqual);
+ fIntrinsicMap[String("notEqual")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdNotEqual, SpvOpINotEqual,
+ SpvOpINotEqual,
+ SpvOpLogicalNotEqual);
+ fIntrinsicMap[String("lessThan")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdLessThan, SpvOpSLessThan,
+ SpvOpULessThan, SpvOpUndef);
+ fIntrinsicMap[String("lessThanEqual")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdLessThanEqual,
+ SpvOpSLessThanEqual,
+ SpvOpULessThanEqual,
+ SpvOpUndef);
+ fIntrinsicMap[String("greaterThan")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdGreaterThan,
+ SpvOpSGreaterThan,
+ SpvOpUGreaterThan,
+ SpvOpUndef);
+ fIntrinsicMap[String("greaterThanEqual")] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpFOrdGreaterThanEqual,
+ SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual,
+ SpvOpUndef);
+ fIntrinsicMap[String("EmitVertex")] = ALL_SPIRV(EmitVertex);
+ fIntrinsicMap[String("EndPrimitive")] = ALL_SPIRV(EndPrimitive);
+// interpolateAt* not yet supported...
+}
+
+void SPIRVCodeGenerator::writeWord(int32_t word, OutputStream& out) {
+ out.write((const char*) &word, sizeof(word));
+}
+
+static bool is_float(const Context& context, const Type& type) {
+ if (type.columns() > 1) {
+ return is_float(context, type.componentType());
+ }
+ return type == *context.fFloat_Type || type == *context.fHalf_Type ||
+ type == *context.fDouble_Type;
+}
+
+static bool is_signed(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_signed(context, type.componentType());
+ }
+ return type == *context.fInt_Type || type == *context.fShort_Type ||
+ type == *context.fByte_Type;
+}
+
+static bool is_unsigned(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_unsigned(context, type.componentType());
+ }
+ return type == *context.fUInt_Type || type == *context.fUShort_Type ||
+ type == *context.fUByte_Type;
+}
+
+static bool is_bool(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_bool(context, type.componentType());
+ }
+ return type == *context.fBool_Type;
+}
+
+static bool is_out(const Variable& var) {
+ return (var.fModifiers.fFlags & Modifiers::kOut_Flag) != 0;
+}
+
+void SPIRVCodeGenerator::writeOpCode(SpvOp_ opCode, int length, OutputStream& out) {
+ SkASSERT(opCode != SpvOpLoad || &out != &fConstantBuffer);
+ SkASSERT(opCode != SpvOpUndef);
+ switch (opCode) {
+ case SpvOpReturn: // fall through
+ case SpvOpReturnValue: // fall through
+ case SpvOpKill: // fall through
+ case SpvOpBranch: // fall through
+ case SpvOpBranchConditional:
+ SkASSERT(fCurrentBlock);
+ fCurrentBlock = 0;
+ break;
+ case SpvOpConstant: // fall through
+ case SpvOpConstantTrue: // fall through
+ case SpvOpConstantFalse: // fall through
+ case SpvOpConstantComposite: // fall through
+ case SpvOpTypeVoid: // fall through
+ case SpvOpTypeInt: // fall through
+ case SpvOpTypeFloat: // fall through
+ case SpvOpTypeBool: // fall through
+ case SpvOpTypeVector: // fall through
+ case SpvOpTypeMatrix: // fall through
+ case SpvOpTypeArray: // fall through
+ case SpvOpTypePointer: // fall through
+ case SpvOpTypeFunction: // fall through
+ case SpvOpTypeRuntimeArray: // fall through
+ case SpvOpTypeStruct: // fall through
+ case SpvOpTypeImage: // fall through
+ case SpvOpTypeSampledImage: // fall through
+ case SpvOpTypeSampler: // fall through
+ case SpvOpVariable: // fall through
+ case SpvOpFunction: // fall through
+ case SpvOpFunctionParameter: // fall through
+ case SpvOpFunctionEnd: // fall through
+ case SpvOpExecutionMode: // fall through
+ case SpvOpMemoryModel: // fall through
+ case SpvOpCapability: // fall through
+ case SpvOpExtInstImport: // fall through
+ case SpvOpEntryPoint: // fall through
+ case SpvOpSource: // fall through
+ case SpvOpSourceExtension: // fall through
+ case SpvOpName: // fall through
+ case SpvOpMemberName: // fall through
+ case SpvOpDecorate: // fall through
+ case SpvOpMemberDecorate:
+ break;
+ default:
+ SkASSERT(fCurrentBlock);
+ }
+ this->writeWord((length << 16) | opCode, out);
+}
+
+void SPIRVCodeGenerator::writeLabel(SpvId label, OutputStream& out) {
+ fCurrentBlock = label;
+ this->writeInstruction(SpvOpLabel, label, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, OutputStream& out) {
+ this->writeOpCode(opCode, 1, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, OutputStream& out) {
+ this->writeOpCode(opCode, 2, out);
+ this->writeWord(word1, out);
+}
+
+void SPIRVCodeGenerator::writeString(const char* string, size_t length, OutputStream& out) {
+ out.write(string, length);
+ switch (length % 4) {
+ case 1:
+ out.write8(0);
+ // fall through
+ case 2:
+ out.write8(0);
+ // fall through
+ case 3:
+ out.write8(0);
+ break;
+ default:
+ this->writeWord(0, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, StringFragment string, OutputStream& out) {
+ this->writeOpCode(opCode, 1 + (string.fLength + 4) / 4, out);
+ this->writeString(string.fChars, string.fLength, out);
+}
+
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, StringFragment string,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 2 + (string.fLength + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeString(string.fChars, string.fLength, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ StringFragment string, OutputStream& out) {
+ this->writeOpCode(opCode, 3 + (string.fLength + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeString(string.fChars, string.fLength, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 3, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, OutputStream& out) {
+ this->writeOpCode(opCode, 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, OutputStream& out) {
+ this->writeOpCode(opCode, 5, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 6, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, OutputStream& out) {
+ this->writeOpCode(opCode, 7, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, OutputStream& out) {
+ this->writeOpCode(opCode, 8, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, int32_t word8,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 9, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+ this->writeWord(word8, out);
+}
+
+void SPIRVCodeGenerator::writeCapabilities(OutputStream& out) {
+ for (uint64_t i = 0, bit = 1; i <= kLast_Capability; i++, bit <<= 1) {
+ if (fCapabilities & bit) {
+ this->writeInstruction(SpvOpCapability, (SpvId) i, out);
+ }
+ }
+ if (fProgram.fKind == Program::kGeometry_Kind) {
+ this->writeInstruction(SpvOpCapability, SpvCapabilityGeometry, out);
+ }
+ else {
+ this->writeInstruction(SpvOpCapability, SpvCapabilityShader, out);
+ }
+}
+
+SpvId SPIRVCodeGenerator::nextId() {
+ return fIdCount++;
+}
+
+void SPIRVCodeGenerator::writeStruct(const Type& type, const MemoryLayout& memoryLayout,
+ SpvId resultId) {
+ this->writeInstruction(SpvOpName, resultId, type.name().c_str(), fNameBuffer);
+ // go ahead and write all of the field types, so we don't inadvertently write them while we're
+ // in the middle of writing the struct instruction
+ std::vector<SpvId> types;
+ for (const auto& f : type.fields()) {
+ types.push_back(this->getType(*f.fType, memoryLayout));
+ }
+ this->writeOpCode(SpvOpTypeStruct, 2 + (int32_t) types.size(), fConstantBuffer);
+ this->writeWord(resultId, fConstantBuffer);
+ for (SpvId id : types) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ size_t offset = 0;
+ for (int32_t i = 0; i < (int32_t) type.fields().size(); i++) {
+ const Type::Field& field = type.fields()[i];
+ size_t size = memoryLayout.size(*field.fType);
+ size_t alignment = memoryLayout.alignment(*field.fType);
+ const Layout& fieldLayout = field.fModifiers.fLayout;
+ if (fieldLayout.fOffset >= 0) {
+ if (fieldLayout.fOffset < (int) offset) {
+ fErrors.error(type.fOffset,
+ "offset of field '" + field.fName + "' must be at "
+ "least " + to_string((int) offset));
+ }
+ if (fieldLayout.fOffset % alignment) {
+ fErrors.error(type.fOffset,
+ "offset of field '" + field.fName + "' must be a multiple"
+ " of " + to_string((int) alignment));
+ }
+ offset = fieldLayout.fOffset;
+ } else {
+ size_t mod = offset % alignment;
+ if (mod) {
+ offset += alignment - mod;
+ }
+ }
+ this->writeInstruction(SpvOpMemberName, resultId, i, field.fName, fNameBuffer);
+ this->writeLayout(fieldLayout, resultId, i);
+ if (field.fModifiers.fLayout.fBuiltin < 0) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, (SpvId) i, SpvDecorationOffset,
+ (SpvId) offset, fDecorationBuffer);
+ }
+ if (field.fType->kind() == Type::kMatrix_Kind) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationColMajor,
+ fDecorationBuffer);
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationMatrixStride,
+ (SpvId) memoryLayout.stride(*field.fType),
+ fDecorationBuffer);
+ }
+ if (!field.fType->highPrecision()) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, (SpvId) i,
+ SpvDecorationRelaxedPrecision, fDecorationBuffer);
+ }
+ offset += size;
+ Type::Kind kind = field.fType->kind();
+ if ((kind == Type::kArray_Kind || kind == Type::kStruct_Kind) && offset % alignment != 0) {
+ offset += alignment - offset % alignment;
+ }
+ }
+}
+
+Type SPIRVCodeGenerator::getActualType(const Type& type) {
+ if (type.isFloat()) {
+ return *fContext.fFloat_Type;
+ }
+ if (type.isSigned()) {
+ return *fContext.fInt_Type;
+ }
+ if (type.isUnsigned()) {
+ return *fContext.fUInt_Type;
+ }
+ if (type.kind() == Type::kMatrix_Kind || type.kind() == Type::kVector_Kind) {
+ if (type.componentType() == *fContext.fHalf_Type) {
+ return fContext.fFloat_Type->toCompound(fContext, type.columns(), type.rows());
+ }
+ if (type.componentType() == *fContext.fShort_Type ||
+ type.componentType() == *fContext.fByte_Type) {
+ return fContext.fInt_Type->toCompound(fContext, type.columns(), type.rows());
+ }
+ if (type.componentType() == *fContext.fUShort_Type ||
+ type.componentType() == *fContext.fUByte_Type) {
+ return fContext.fUInt_Type->toCompound(fContext, type.columns(), type.rows());
+ }
+ }
+ return type;
+}
+
+SpvId SPIRVCodeGenerator::getType(const Type& type) {
+ return this->getType(type, fDefaultLayout);
+}
+
+SpvId SPIRVCodeGenerator::getType(const Type& rawType, const MemoryLayout& layout) {
+ Type type = this->getActualType(rawType);
+ String key = type.name() + to_string((int) layout.fStd);
+ auto entry = fTypeMap.find(key);
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ switch (type.kind()) {
+ case Type::kScalar_Kind:
+ if (type == *fContext.fBool_Type) {
+ this->writeInstruction(SpvOpTypeBool, result, fConstantBuffer);
+ } else if (type == *fContext.fInt_Type || type == *fContext.fShort_Type ||
+ type == *fContext.fIntLiteral_Type) {
+ this->writeInstruction(SpvOpTypeInt, result, 32, 1, fConstantBuffer);
+ } else if (type == *fContext.fUInt_Type || type == *fContext.fUShort_Type) {
+ this->writeInstruction(SpvOpTypeInt, result, 32, 0, fConstantBuffer);
+ } else if (type == *fContext.fFloat_Type || type == *fContext.fHalf_Type ||
+ type == *fContext.fFloatLiteral_Type) {
+ this->writeInstruction(SpvOpTypeFloat, result, 32, fConstantBuffer);
+ } else if (type == *fContext.fDouble_Type) {
+ this->writeInstruction(SpvOpTypeFloat, result, 64, fConstantBuffer);
+ } else {
+ SkASSERT(false);
+ }
+ break;
+ case Type::kVector_Kind:
+ this->writeInstruction(SpvOpTypeVector, result,
+ this->getType(type.componentType(), layout),
+ type.columns(), fConstantBuffer);
+ break;
+ case Type::kMatrix_Kind:
+ this->writeInstruction(SpvOpTypeMatrix, result,
+ this->getType(index_type(fContext, type), layout),
+ type.columns(), fConstantBuffer);
+ break;
+ case Type::kStruct_Kind:
+ this->writeStruct(type, layout, result);
+ break;
+ case Type::kArray_Kind: {
+ if (type.columns() > 0) {
+ IntLiteral count(fContext, -1, type.columns());
+ this->writeInstruction(SpvOpTypeArray, result,
+ this->getType(type.componentType(), layout),
+ this->writeIntLiteral(count), fConstantBuffer);
+ this->writeInstruction(SpvOpDecorate, result, SpvDecorationArrayStride,
+ (int32_t) layout.stride(type),
+ fDecorationBuffer);
+ } else {
+ SkASSERT(false); // we shouldn't have any runtime-sized arrays right now
+ this->writeInstruction(SpvOpTypeRuntimeArray, result,
+ this->getType(type.componentType(), layout),
+ fConstantBuffer);
+ this->writeInstruction(SpvOpDecorate, result, SpvDecorationArrayStride,
+ (int32_t) layout.stride(type),
+ fDecorationBuffer);
+ }
+ break;
+ }
+ case Type::kSampler_Kind: {
+ SpvId image = result;
+ if (SpvDimSubpassData != type.dimensions()) {
+ image = this->getType(type.textureType(), layout);
+ }
+ if (SpvDimBuffer == type.dimensions()) {
+ fCapabilities |= (((uint64_t) 1) << SpvCapabilitySampledBuffer);
+ }
+ if (SpvDimSubpassData != type.dimensions()) {
+ this->writeInstruction(SpvOpTypeSampledImage, result, image, fConstantBuffer);
+ }
+ break;
+ }
+ case Type::kSeparateSampler_Kind: {
+ this->writeInstruction(SpvOpTypeSampler, result, fConstantBuffer);
+ break;
+ }
+ case Type::kTexture_Kind: {
+ this->writeInstruction(SpvOpTypeImage, result,
+ this->getType(*fContext.fFloat_Type, layout),
+ type.dimensions(), type.isDepth(), type.isArrayed(),
+ type.isMultisampled(), type.isSampled() ? 1 : 2,
+ SpvImageFormatUnknown, fConstantBuffer);
+ fImageTypeMap[key] = result;
+ break;
+ }
+ default:
+ if (type == *fContext.fVoid_Type) {
+ this->writeInstruction(SpvOpTypeVoid, result, fConstantBuffer);
+ } else {
+ ABORT("invalid type: %s", type.description().c_str());
+ }
+ }
+ fTypeMap[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::getImageType(const Type& type) {
+ SkASSERT(type.kind() == Type::kSampler_Kind);
+ this->getType(type);
+ String key = type.name() + to_string((int) fDefaultLayout.fStd);
+ SkASSERT(fImageTypeMap.find(key) != fImageTypeMap.end());
+ return fImageTypeMap[key];
+}
+
+SpvId SPIRVCodeGenerator::getFunctionType(const FunctionDeclaration& function) {
+ String key = function.fReturnType.description() + "(";
+ String separator;
+ for (size_t i = 0; i < function.fParameters.size(); i++) {
+ key += separator;
+ separator = ", ";
+ key += function.fParameters[i]->fType.description();
+ }
+ key += ")";
+ auto entry = fTypeMap.find(key);
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ int32_t length = 3 + (int32_t) function.fParameters.size();
+ SpvId returnType = this->getType(function.fReturnType);
+ std::vector<SpvId> parameterTypes;
+ for (size_t i = 0; i < function.fParameters.size(); i++) {
+ // glslang seems to treat all function arguments as pointers whether they need to be or
+ // not. I was initially puzzled by this until I ran bizarre failures with certain
+ // patterns of function calls and control constructs, as exemplified by this minimal
+ // failure case:
+ //
+ // void sphere(float x) {
+ // }
+ //
+ // void map() {
+ // sphere(1.0);
+ // }
+ //
+ // void main() {
+ // for (int i = 0; i < 1; i++) {
+ // map();
+ // }
+ // }
+ //
+ // As of this writing, compiling this in the "obvious" way (with sphere taking a float)
+ // crashes. Making it take a float* and storing the argument in a temporary variable,
+ // as glslang does, fixes it. It's entirely possible I simply missed whichever part of
+ // the spec makes this make sense.
+// if (is_out(function->fParameters[i])) {
+ parameterTypes.push_back(this->getPointerType(function.fParameters[i]->fType,
+ SpvStorageClassFunction));
+// } else {
+// parameterTypes.push_back(this->getType(function.fParameters[i]->fType));
+// }
+ }
+ this->writeOpCode(SpvOpTypeFunction, length, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ this->writeWord(returnType, fConstantBuffer);
+ for (SpvId id : parameterTypes) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ fTypeMap[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::getPointerType(const Type& type, SpvStorageClass_ storageClass) {
+ return this->getPointerType(type, fDefaultLayout, storageClass);
+}
+
+SpvId SPIRVCodeGenerator::getPointerType(const Type& rawType, const MemoryLayout& layout,
+ SpvStorageClass_ storageClass) {
+ Type type = this->getActualType(rawType);
+ String key = type.description() + "*" + to_string(layout.fStd) + to_string(storageClass);
+ auto entry = fTypeMap.find(key);
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpTypePointer, result, storageClass,
+ this->getType(type), fConstantBuffer);
+ fTypeMap[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::writeExpression(const Expression& expr, OutputStream& out) {
+ switch (expr.fKind) {
+ case Expression::kBinary_Kind:
+ return this->writeBinaryExpression((BinaryExpression&) expr, out);
+ case Expression::kBoolLiteral_Kind:
+ return this->writeBoolLiteral((BoolLiteral&) expr);
+ case Expression::kConstructor_Kind:
+ return this->writeConstructor((Constructor&) expr, out);
+ case Expression::kIntLiteral_Kind:
+ return this->writeIntLiteral((IntLiteral&) expr);
+ case Expression::kFieldAccess_Kind:
+ return this->writeFieldAccess(((FieldAccess&) expr), out);
+ case Expression::kFloatLiteral_Kind:
+ return this->writeFloatLiteral(((FloatLiteral&) expr));
+ case Expression::kFunctionCall_Kind:
+ return this->writeFunctionCall((FunctionCall&) expr, out);
+ case Expression::kPrefix_Kind:
+ return this->writePrefixExpression((PrefixExpression&) expr, out);
+ case Expression::kPostfix_Kind:
+ return this->writePostfixExpression((PostfixExpression&) expr, out);
+ case Expression::kSwizzle_Kind:
+ return this->writeSwizzle((Swizzle&) expr, out);
+ case Expression::kVariableReference_Kind:
+ return this->writeVariableReference((VariableReference&) expr, out);
+ case Expression::kTernary_Kind:
+ return this->writeTernaryExpression((TernaryExpression&) expr, out);
+ case Expression::kIndex_Kind:
+ return this->writeIndexExpression((IndexExpression&) expr, out);
+ default:
+ ABORT("unsupported expression: %s", expr.description().c_str());
+ }
+ return -1;
+}
+
+SpvId SPIRVCodeGenerator::writeIntrinsicCall(const FunctionCall& c, OutputStream& out) {
+ auto intrinsic = fIntrinsicMap.find(c.fFunction.fName);
+ SkASSERT(intrinsic != fIntrinsicMap.end());
+ int32_t intrinsicId;
+ if (c.fArguments.size() > 0) {
+ const Type& type = c.fArguments[0]->fType;
+ if (std::get<0>(intrinsic->second) == kSpecial_IntrinsicKind || is_float(fContext, type)) {
+ intrinsicId = std::get<1>(intrinsic->second);
+ } else if (is_signed(fContext, type)) {
+ intrinsicId = std::get<2>(intrinsic->second);
+ } else if (is_unsigned(fContext, type)) {
+ intrinsicId = std::get<3>(intrinsic->second);
+ } else if (is_bool(fContext, type)) {
+ intrinsicId = std::get<4>(intrinsic->second);
+ } else {
+ intrinsicId = std::get<1>(intrinsic->second);
+ }
+ } else {
+ intrinsicId = std::get<1>(intrinsic->second);
+ }
+ switch (std::get<0>(intrinsic->second)) {
+ case kGLSL_STD_450_IntrinsicKind: {
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ if (c.fFunction.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ arguments.push_back(this->getLValue(*c.fArguments[i], out)->getPointer());
+ } else {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(intrinsicId, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+ }
+ case kSPIRV_IntrinsicKind: {
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ if (c.fFunction.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ arguments.push_back(this->getLValue(*c.fArguments[i], out)->getPointer());
+ } else {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ }
+ if (c.fType != *fContext.fVoid_Type) {
+ this->writeOpCode((SpvOp_) intrinsicId, 3 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ } else {
+ this->writeOpCode((SpvOp_) intrinsicId, 1 + (int32_t) arguments.size(), out);
+ }
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+ }
+ case kSpecial_IntrinsicKind:
+ return this->writeSpecialIntrinsic(c, (SpecialIntrinsic) intrinsicId, out);
+ default:
+ ABORT("unsupported intrinsic kind");
+ }
+}
+
+std::vector<SpvId> SPIRVCodeGenerator::vectorize(
+ const std::vector<std::unique_ptr<Expression>>& args,
+ OutputStream& out) {
+ int vectorSize = 0;
+ for (const auto& a : args) {
+ if (a->fType.kind() == Type::kVector_Kind) {
+ if (vectorSize) {
+ SkASSERT(a->fType.columns() == vectorSize);
+ }
+ else {
+ vectorSize = a->fType.columns();
+ }
+ }
+ }
+ std::vector<SpvId> result;
+ for (const auto& a : args) {
+ SpvId raw = this->writeExpression(*a, out);
+ if (vectorSize && a->fType.kind() == Type::kScalar_Kind) {
+ SpvId vector = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + vectorSize, out);
+ this->writeWord(this->getType(a->fType.toCompound(fContext, vectorSize, 1)), out);
+ this->writeWord(vector, out);
+ for (int i = 0; i < vectorSize; i++) {
+ this->writeWord(raw, out);
+ }
+ this->writePrecisionModifier(a->fType, vector);
+ result.push_back(vector);
+ } else {
+ result.push_back(raw);
+ }
+ }
+ return result;
+}
+
+void SPIRVCodeGenerator::writeGLSLExtendedInstruction(const Type& type, SpvId id, SpvId floatInst,
+ SpvId signedInst, SpvId unsignedInst,
+ const std::vector<SpvId>& args,
+ OutputStream& out) {
+ this->writeOpCode(SpvOpExtInst, 5 + args.size(), out);
+ this->writeWord(this->getType(type), out);
+ this->writeWord(id, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+
+ if (is_float(fContext, type)) {
+ this->writeWord(floatInst, out);
+ } else if (is_signed(fContext, type)) {
+ this->writeWord(signedInst, out);
+ } else if (is_unsigned(fContext, type)) {
+ this->writeWord(unsignedInst, out);
+ } else {
+ SkASSERT(false);
+ }
+ for (SpvId a : args) {
+ this->writeWord(a, out);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind,
+ OutputStream& out) {
+ SpvId result = this->nextId();
+ switch (kind) {
+ case kAtan_SpecialIntrinsic: {
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(arguments.size() == 2 ? GLSLstd450Atan2 : GLSLstd450Atan, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ break;
+ }
+ case kSampledImage_SpecialIntrinsic: {
+ SkASSERT(2 == c.fArguments.size());
+ SpvId img = this->writeExpression(*c.fArguments[0], out);
+ SpvId sampler = this->writeExpression(*c.fArguments[1], out);
+ this->writeInstruction(SpvOpSampledImage,
+ this->getType(c.fType),
+ result,
+ img,
+ sampler,
+ out);
+ break;
+ }
+ case kSubpassLoad_SpecialIntrinsic: {
+ SpvId img = this->writeExpression(*c.fArguments[0], out);
+ std::vector<std::unique_ptr<Expression>> args;
+ args.emplace_back(new FloatLiteral(fContext, -1, 0.0));
+ args.emplace_back(new FloatLiteral(fContext, -1, 0.0));
+ Constructor ctor(-1, *fContext.fFloat2_Type, std::move(args));
+ SpvId coords = this->writeConstantVector(ctor);
+ if (1 == c.fArguments.size()) {
+ this->writeInstruction(SpvOpImageRead,
+ this->getType(c.fType),
+ result,
+ img,
+ coords,
+ out);
+ } else {
+ SkASSERT(2 == c.fArguments.size());
+ SpvId sample = this->writeExpression(*c.fArguments[1], out);
+ this->writeInstruction(SpvOpImageRead,
+ this->getType(c.fType),
+ result,
+ img,
+ coords,
+ SpvImageOperandsSampleMask,
+ sample,
+ out);
+ }
+ break;
+ }
+ case kTexture_SpecialIntrinsic: {
+ SpvOp_ op = SpvOpImageSampleImplicitLod;
+ switch (c.fArguments[0]->fType.dimensions()) {
+ case SpvDim1D:
+ if (c.fArguments[1]->fType == *fContext.fFloat2_Type) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat_Type);
+ }
+ break;
+ case SpvDim2D:
+ if (c.fArguments[1]->fType == *fContext.fFloat3_Type) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat2_Type);
+ }
+ break;
+ case SpvDim3D:
+ if (c.fArguments[1]->fType == *fContext.fFloat4_Type) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(c.fArguments[1]->fType == *fContext.fFloat3_Type);
+ }
+ break;
+ case SpvDimCube: // fall through
+ case SpvDimRect: // fall through
+ case SpvDimBuffer: // fall through
+ case SpvDimSubpassData:
+ break;
+ }
+ SpvId type = this->getType(c.fType);
+ SpvId sampler = this->writeExpression(*c.fArguments[0], out);
+ SpvId uv = this->writeExpression(*c.fArguments[1], out);
+ if (c.fArguments.size() == 3) {
+ this->writeInstruction(op, type, result, sampler, uv,
+ SpvImageOperandsBiasMask,
+ this->writeExpression(*c.fArguments[2], out),
+ out);
+ } else {
+ SkASSERT(c.fArguments.size() == 2);
+ if (fProgram.fSettings.fSharpenTextures) {
+ FloatLiteral lodBias(fContext, -1, -0.5);
+ this->writeInstruction(op, type, result, sampler, uv,
+ SpvImageOperandsBiasMask,
+ this->writeFloatLiteral(lodBias),
+ out);
+ } else {
+ this->writeInstruction(op, type, result, sampler, uv,
+ out);
+ }
+ }
+ break;
+ }
+ case kMod_SpecialIntrinsic: {
+ std::vector<SpvId> args = this->vectorize(c.fArguments, out);
+ SkASSERT(args.size() == 2);
+ const Type& operandType = c.fArguments[0]->fType;
+ SpvOp_ op;
+ if (is_float(fContext, operandType)) {
+ op = SpvOpFMod;
+ } else if (is_signed(fContext, operandType)) {
+ op = SpvOpSMod;
+ } else if (is_unsigned(fContext, operandType)) {
+ op = SpvOpUMod;
+ } else {
+ SkASSERT(false);
+ return 0;
+ }
+ this->writeOpCode(op, 5, out);
+ this->writeWord(this->getType(operandType), out);
+ this->writeWord(result, out);
+ this->writeWord(args[0], out);
+ this->writeWord(args[1], out);
+ break;
+ }
+ case kDFdy_SpecialIntrinsic: {
+ SpvId fn = this->writeExpression(*c.fArguments[0], out);
+ this->writeOpCode(SpvOpDPdy, 4, out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(fn, out);
+ if (fProgram.fSettings.fFlipY) {
+ // Flipping Y also negates the Y derivatives.
+ SpvId flipped = this->nextId();
+ this->writeInstruction(SpvOpFNegate, this->getType(c.fType), flipped, result, out);
+ this->writePrecisionModifier(c.fType, flipped);
+ return flipped;
+ }
+ break;
+ }
+ case kClamp_SpecialIntrinsic: {
+ std::vector<SpvId> args = this->vectorize(c.fArguments, out);
+ SkASSERT(args.size() == 3);
+ this->writeGLSLExtendedInstruction(c.fType, result, GLSLstd450FClamp, GLSLstd450SClamp,
+ GLSLstd450UClamp, args, out);
+ break;
+ }
+ case kMax_SpecialIntrinsic: {
+ std::vector<SpvId> args = this->vectorize(c.fArguments, out);
+ SkASSERT(args.size() == 2);
+ this->writeGLSLExtendedInstruction(c.fType, result, GLSLstd450FMax, GLSLstd450SMax,
+ GLSLstd450UMax, args, out);
+ break;
+ }
+ case kMin_SpecialIntrinsic: {
+ std::vector<SpvId> args = this->vectorize(c.fArguments, out);
+ SkASSERT(args.size() == 2);
+ this->writeGLSLExtendedInstruction(c.fType, result, GLSLstd450FMin, GLSLstd450SMin,
+ GLSLstd450UMin, args, out);
+ break;
+ }
+ case kMix_SpecialIntrinsic: {
+ std::vector<SpvId> args = this->vectorize(c.fArguments, out);
+ SkASSERT(args.size() == 3);
+ this->writeGLSLExtendedInstruction(c.fType, result, GLSLstd450FMix, SpvOpUndef,
+ SpvOpUndef, args, out);
+ break;
+ }
+ case kSaturate_SpecialIntrinsic: {
+ SkASSERT(c.fArguments.size() == 1);
+ std::vector<std::unique_ptr<Expression>> finalArgs;
+ finalArgs.push_back(c.fArguments[0]->clone());
+ finalArgs.emplace_back(new FloatLiteral(fContext, -1, 0));
+ finalArgs.emplace_back(new FloatLiteral(fContext, -1, 1));
+ std::vector<SpvId> spvArgs = this->vectorize(finalArgs, out);
+ this->writeGLSLExtendedInstruction(c.fType, result, GLSLstd450FClamp, GLSLstd450SClamp,
+ GLSLstd450UClamp, spvArgs, out);
+ break;
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionCall(const FunctionCall& c, OutputStream& out) {
+ const auto& entry = fFunctionMap.find(&c.fFunction);
+ if (entry == fFunctionMap.end()) {
+ return this->writeIntrinsicCall(c, out);
+ }
+ // stores (variable, type, lvalue) pairs to extract and save after the function call is complete
+ std::vector<std::tuple<SpvId, const Type*, std::unique_ptr<LValue>>> lvalues;
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ // id of temporary variable that we will use to hold this argument, or 0 if it is being
+ // passed directly
+ SpvId tmpVar;
+ // if we need a temporary var to store this argument, this is the value to store in the var
+ SpvId tmpValueId;
+ if (is_out(*c.fFunction.fParameters[i])) {
+ std::unique_ptr<LValue> lv = this->getLValue(*c.fArguments[i], out);
+ SpvId ptr = lv->getPointer();
+ if (ptr) {
+ arguments.push_back(ptr);
+ continue;
+ } else {
+ // lvalue cannot simply be read and written via a pointer (e.g. a swizzle). Need to
+ // copy it into a temp, call the function, read the value out of the temp, and then
+ // update the lvalue.
+ tmpValueId = lv->load(out);
+ tmpVar = this->nextId();
+ lvalues.push_back(std::make_tuple(tmpVar, &c.fArguments[i]->fType, std::move(lv)));
+ }
+ } else {
+ // see getFunctionType for an explanation of why we're always using pointer parameters
+ tmpValueId = this->writeExpression(*c.fArguments[i], out);
+ tmpVar = this->nextId();
+ }
+ this->writeInstruction(SpvOpVariable,
+ this->getPointerType(c.fArguments[i]->fType,
+ SpvStorageClassFunction),
+ tmpVar,
+ SpvStorageClassFunction,
+ fVariableBuffer);
+ this->writeInstruction(SpvOpStore, tmpVar, tmpValueId, out);
+ arguments.push_back(tmpVar);
+ }
+ SpvId result = this->nextId();
+ this->writeOpCode(SpvOpFunctionCall, 4 + (int32_t) c.fArguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(entry->second, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ // now that the call is complete, we may need to update some lvalues with the new values of out
+ // arguments
+ for (const auto& tuple : lvalues) {
+ SpvId load = this->nextId();
+ this->writeInstruction(SpvOpLoad, getType(*std::get<1>(tuple)), load, std::get<0>(tuple),
+ out);
+ this->writePrecisionModifier(*std::get<1>(tuple), load);
+ std::get<2>(tuple)->store(load, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeConstantVector(const Constructor& c) {
+ SkASSERT(c.fType.kind() == Type::kVector_Kind && c.isConstant());
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], fConstantBuffer));
+ }
+ SpvId type = this->getType(c.fType);
+ if (c.fArguments.size() == 1) {
+ // with a single argument, a vector will have all of its entries equal to the argument
+ this->writeOpCode(SpvOpConstantComposite, 3 + c.fType.columns(), fConstantBuffer);
+ this->writeWord(type, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ for (int i = 0; i < c.fType.columns(); i++) {
+ this->writeWord(arguments[0], fConstantBuffer);
+ }
+ } else {
+ this->writeOpCode(SpvOpConstantComposite, 3 + (int32_t) c.fArguments.size(),
+ fConstantBuffer);
+ this->writeWord(type, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ for (SpvId id : arguments) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFloatConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.isFloat());
+ SkASSERT(c.fArguments.size() == 1);
+ SkASSERT(c.fArguments[0]->fType.isNumber());
+ SpvId result = this->nextId();
+ SpvId parameter = this->writeExpression(*c.fArguments[0], out);
+ if (c.fArguments[0]->fType.isSigned()) {
+ this->writeInstruction(SpvOpConvertSToF, this->getType(c.fType), result, parameter,
+ out);
+ } else {
+ SkASSERT(c.fArguments[0]->fType.isUnsigned());
+ this->writeInstruction(SpvOpConvertUToF, this->getType(c.fType), result, parameter,
+ out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeIntConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.isSigned());
+ SkASSERT(c.fArguments.size() == 1);
+ SkASSERT(c.fArguments[0]->fType.isNumber());
+ SpvId result = this->nextId();
+ SpvId parameter = this->writeExpression(*c.fArguments[0], out);
+ if (c.fArguments[0]->fType.isFloat()) {
+ this->writeInstruction(SpvOpConvertFToS, this->getType(c.fType), result, parameter,
+ out);
+ }
+ else {
+ SkASSERT(c.fArguments[0]->fType.isUnsigned());
+ this->writeInstruction(SpvOpBitcast, this->getType(c.fType), result, parameter,
+ out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeUIntConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.isUnsigned());
+ SkASSERT(c.fArguments.size() == 1);
+ SkASSERT(c.fArguments[0]->fType.isNumber());
+ SpvId result = this->nextId();
+ SpvId parameter = this->writeExpression(*c.fArguments[0], out);
+ if (c.fArguments[0]->fType.isFloat()) {
+ this->writeInstruction(SpvOpConvertFToU, this->getType(c.fType), result, parameter,
+ out);
+ } else {
+ SkASSERT(c.fArguments[0]->fType.isSigned());
+ this->writeInstruction(SpvOpBitcast, this->getType(c.fType), result, parameter,
+ out);
+ }
+ return result;
+}
+
+void SPIRVCodeGenerator::writeUniformScaleMatrix(SpvId id, SpvId diagonal, const Type& type,
+ OutputStream& out) {
+ FloatLiteral zero(fContext, -1, 0);
+ SpvId zeroId = this->writeFloatLiteral(zero);
+ std::vector<SpvId> columnIds;
+ for (int column = 0; column < type.columns(); column++) {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + type.rows(),
+ out);
+ this->writeWord(this->getType(type.componentType().toCompound(fContext, type.rows(), 1)),
+ out);
+ SpvId columnId = this->nextId();
+ this->writeWord(columnId, out);
+ columnIds.push_back(columnId);
+ for (int row = 0; row < type.columns(); row++) {
+ this->writeWord(row == column ? diagonal : zeroId, out);
+ }
+ this->writePrecisionModifier(type, columnId);
+ }
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + type.columns(),
+ out);
+ this->writeWord(this->getType(type), out);
+ this->writeWord(id, out);
+ for (SpvId id : columnIds) {
+ this->writeWord(id, out);
+ }
+ this->writePrecisionModifier(type, id);
+}
+
+void SPIRVCodeGenerator::writeMatrixCopy(SpvId id, SpvId src, const Type& srcType,
+ const Type& dstType, OutputStream& out) {
+ SkASSERT(srcType.kind() == Type::kMatrix_Kind);
+ SkASSERT(dstType.kind() == Type::kMatrix_Kind);
+ SkASSERT(srcType.componentType() == dstType.componentType());
+ SpvId srcColumnType = this->getType(srcType.componentType().toCompound(fContext,
+ srcType.rows(),
+ 1));
+ SpvId dstColumnType = this->getType(dstType.componentType().toCompound(fContext,
+ dstType.rows(),
+ 1));
+ SpvId zeroId;
+ if (dstType.componentType() == *fContext.fFloat_Type) {
+ FloatLiteral zero(fContext, -1, 0.0);
+ zeroId = this->writeFloatLiteral(zero);
+ } else if (dstType.componentType() == *fContext.fInt_Type) {
+ IntLiteral zero(fContext, -1, 0);
+ zeroId = this->writeIntLiteral(zero);
+ } else {
+ ABORT("unsupported matrix component type");
+ }
+ SpvId zeroColumn = 0;
+ SpvId columns[4];
+ for (int i = 0; i < dstType.columns(); i++) {
+ if (i < srcType.columns()) {
+ // we're still inside the src matrix, copy the column
+ SpvId srcColumn = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, srcColumnType, srcColumn, src, i, out);
+ this->writePrecisionModifier(dstType, srcColumn);
+ SpvId dstColumn;
+ if (srcType.rows() == dstType.rows()) {
+ // columns are equal size, don't need to do anything
+ dstColumn = srcColumn;
+ }
+ else if (dstType.rows() > srcType.rows()) {
+ // dst column is bigger, need to zero-pad it
+ dstColumn = this->nextId();
+ int delta = dstType.rows() - srcType.rows();
+ this->writeOpCode(SpvOpCompositeConstruct, 4 + delta, out);
+ this->writeWord(dstColumnType, out);
+ this->writeWord(dstColumn, out);
+ this->writeWord(srcColumn, out);
+ for (int i = 0; i < delta; ++i) {
+ this->writeWord(zeroId, out);
+ }
+ this->writePrecisionModifier(dstType, dstColumn);
+ }
+ else {
+ // dst column is smaller, need to swizzle the src column
+ dstColumn = this->nextId();
+ int count = dstType.rows();
+ this->writeOpCode(SpvOpVectorShuffle, 5 + count, out);
+ this->writeWord(dstColumnType, out);
+ this->writeWord(dstColumn, out);
+ this->writeWord(srcColumn, out);
+ this->writeWord(srcColumn, out);
+ for (int i = 0; i < count; i++) {
+ this->writeWord(i, out);
+ }
+ this->writePrecisionModifier(dstType, dstColumn);
+ }
+ columns[i] = dstColumn;
+ } else {
+ // we're past the end of the src matrix, need a vector of zeroes
+ if (!zeroColumn) {
+ zeroColumn = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + dstType.rows(), out);
+ this->writeWord(dstColumnType, out);
+ this->writeWord(zeroColumn, out);
+ for (int i = 0; i < dstType.rows(); ++i) {
+ this->writeWord(zeroId, out);
+ }
+ this->writePrecisionModifier(dstType, zeroColumn);
+ }
+ columns[i] = zeroColumn;
+ }
+ }
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + dstType.columns(), out);
+ this->writeWord(this->getType(dstType), out);
+ this->writeWord(id, out);
+ for (int i = 0; i < dstType.columns(); i++) {
+ this->writeWord(columns[i], out);
+ }
+ this->writePrecisionModifier(dstType, id);
+}
+
+void SPIRVCodeGenerator::addColumnEntry(SpvId columnType, Precision precision,
+ std::vector<SpvId>* currentColumn,
+ std::vector<SpvId>* columnIds,
+ int* currentCount, int rows, SpvId entry,
+ OutputStream& out) {
+ SkASSERT(*currentCount < rows);
+ ++(*currentCount);
+ currentColumn->push_back(entry);
+ if (*currentCount == rows) {
+ *currentCount = 0;
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + currentColumn->size(), out);
+ this->writeWord(columnType, out);
+ SpvId columnId = this->nextId();
+ this->writeWord(columnId, out);
+ columnIds->push_back(columnId);
+ for (SpvId id : *currentColumn) {
+ this->writeWord(id, out);
+ }
+ currentColumn->clear();
+ this->writePrecisionModifier(precision, columnId);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.kind() == Type::kMatrix_Kind);
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ SpvId result = this->nextId();
+ int rows = c.fType.rows();
+ int columns = c.fType.columns();
+ if (arguments.size() == 1 && c.fArguments[0]->fType.kind() == Type::kScalar_Kind) {
+ this->writeUniformScaleMatrix(result, arguments[0], c.fType, out);
+ } else if (arguments.size() == 1 && c.fArguments[0]->fType.kind() == Type::kMatrix_Kind) {
+ this->writeMatrixCopy(result, arguments[0], c.fArguments[0]->fType, c.fType, out);
+ } else if (arguments.size() == 1 && c.fArguments[0]->fType.kind() == Type::kVector_Kind) {
+ SkASSERT(c.fType.rows() == 2 && c.fType.columns() == 2);
+ SkASSERT(c.fArguments[0]->fType.columns() == 4);
+ SpvId componentType = this->getType(c.fType.componentType());
+ SpvId v[4];
+ for (int i = 0; i < 4; ++i) {
+ v[i] = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, componentType, v[i], arguments[0], i, out);
+ }
+ SpvId columnType = this->getType(c.fType.componentType().toCompound(fContext, 2, 1));
+ SpvId column1 = this->nextId();
+ this->writeInstruction(SpvOpCompositeConstruct, columnType, column1, v[0], v[1], out);
+ SpvId column2 = this->nextId();
+ this->writeInstruction(SpvOpCompositeConstruct, columnType, column2, v[2], v[3], out);
+ this->writeInstruction(SpvOpCompositeConstruct, this->getType(c.fType), result, column1,
+ column2, out);
+ } else {
+ SpvId columnType = this->getType(c.fType.componentType().toCompound(fContext, rows, 1));
+ std::vector<SpvId> columnIds;
+ // ids of vectors and scalars we have written to the current column so far
+ std::vector<SpvId> currentColumn;
+ // the total number of scalars represented by currentColumn's entries
+ int currentCount = 0;
+ Precision precision = c.fType.highPrecision() ? Precision::kHigh : Precision::kLow;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ if (currentCount == 0 && c.fArguments[i]->fType.kind() == Type::kVector_Kind &&
+ c.fArguments[i]->fType.columns() == c.fType.rows()) {
+ // this is a complete column by itself
+ columnIds.push_back(arguments[i]);
+ } else {
+ if (c.fArguments[i]->fType.columns() == 1) {
+ this->addColumnEntry(columnType, precision, &currentColumn, &columnIds,
+ &currentCount, rows, arguments[i], out);
+ } else {
+ SpvId componentType = this->getType(c.fArguments[i]->fType.componentType());
+ for (int j = 0; j < c.fArguments[i]->fType.columns(); ++j) {
+ SpvId swizzle = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, componentType, swizzle,
+ arguments[i], j, out);
+ this->addColumnEntry(columnType, precision, &currentColumn, &columnIds,
+ &currentCount, rows, swizzle, out);
+ }
+ }
+ }
+ }
+ SkASSERT(columnIds.size() == (size_t) columns);
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + columns, out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : columnIds) {
+ this->writeWord(id, out);
+ }
+ }
+ this->writePrecisionModifier(c.fType, result);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeVectorConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.kind() == Type::kVector_Kind);
+ if (c.isConstant()) {
+ return this->writeConstantVector(c);
+ }
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ if (c.fArguments[i]->fType.kind() == Type::kVector_Kind) {
+ // SPIR-V doesn't support vector(vector-of-different-type) directly, so we need to
+ // extract the components and convert them in that case manually. On top of that,
+ // as of this writing there's a bug in the Intel Vulkan driver where OpCreateComposite
+ // doesn't handle vector arguments at all, so we always extract vector components and
+ // pass them into OpCreateComposite individually.
+ SpvId vec = this->writeExpression(*c.fArguments[i], out);
+ SpvOp_ op = SpvOpUndef;
+ const Type& src = c.fArguments[i]->fType.componentType();
+ const Type& dst = c.fType.componentType();
+ if (dst == *fContext.fFloat_Type || dst == *fContext.fHalf_Type) {
+ if (src == *fContext.fFloat_Type || src == *fContext.fHalf_Type) {
+ if (c.fArguments.size() == 1) {
+ return vec;
+ }
+ } else if (src == *fContext.fInt_Type ||
+ src == *fContext.fShort_Type ||
+ src == *fContext.fByte_Type) {
+ op = SpvOpConvertSToF;
+ } else if (src == *fContext.fUInt_Type ||
+ src == *fContext.fUShort_Type ||
+ src == *fContext.fUByte_Type) {
+ op = SpvOpConvertUToF;
+ } else {
+ SkASSERT(false);
+ }
+ } else if (dst == *fContext.fInt_Type ||
+ dst == *fContext.fShort_Type ||
+ dst == *fContext.fByte_Type) {
+ if (src == *fContext.fFloat_Type || src == *fContext.fHalf_Type) {
+ op = SpvOpConvertFToS;
+ } else if (src == *fContext.fInt_Type ||
+ src == *fContext.fShort_Type ||
+ src == *fContext.fByte_Type) {
+ if (c.fArguments.size() == 1) {
+ return vec;
+ }
+ } else if (src == *fContext.fUInt_Type ||
+ src == *fContext.fUShort_Type ||
+ src == *fContext.fUByte_Type) {
+ op = SpvOpBitcast;
+ } else {
+ SkASSERT(false);
+ }
+ } else if (dst == *fContext.fUInt_Type ||
+ dst == *fContext.fUShort_Type ||
+ dst == *fContext.fUByte_Type) {
+ if (src == *fContext.fFloat_Type || src == *fContext.fHalf_Type) {
+ op = SpvOpConvertFToS;
+ } else if (src == *fContext.fInt_Type ||
+ src == *fContext.fShort_Type ||
+ src == *fContext.fByte_Type) {
+ op = SpvOpBitcast;
+ } else if (src == *fContext.fUInt_Type ||
+ src == *fContext.fUShort_Type ||
+ src == *fContext.fUByte_Type) {
+ if (c.fArguments.size() == 1) {
+ return vec;
+ }
+ } else {
+ SkASSERT(false);
+ }
+ }
+ for (int j = 0; j < c.fArguments[i]->fType.columns(); j++) {
+ SpvId swizzle = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(src), swizzle, vec, j,
+ out);
+ if (op != SpvOpUndef) {
+ SpvId cast = this->nextId();
+ this->writeInstruction(op, this->getType(dst), cast, swizzle, out);
+ arguments.push_back(cast);
+ } else {
+ arguments.push_back(swizzle);
+ }
+ }
+ } else {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ }
+ SpvId result = this->nextId();
+ if (arguments.size() == 1 && c.fArguments[0]->fType.kind() == Type::kScalar_Kind) {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + c.fType.columns(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (int i = 0; i < c.fType.columns(); i++) {
+ this->writeWord(arguments[0], out);
+ }
+ } else {
+ SkASSERT(arguments.size() > 1);
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeArrayConstructor(const Constructor& c, OutputStream& out) {
+ SkASSERT(c.fType.kind() == Type::kArray_Kind);
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ SpvId result = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + (int32_t) c.fArguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeConstructor(const Constructor& c, OutputStream& out) {
+ if (c.fArguments.size() == 1 &&
+ this->getActualType(c.fType) == this->getActualType(c.fArguments[0]->fType)) {
+ return this->writeExpression(*c.fArguments[0], out);
+ }
+ if (c.fType == *fContext.fFloat_Type || c.fType == *fContext.fHalf_Type) {
+ return this->writeFloatConstructor(c, out);
+ } else if (c.fType == *fContext.fInt_Type ||
+ c.fType == *fContext.fShort_Type ||
+ c.fType == *fContext.fByte_Type) {
+ return this->writeIntConstructor(c, out);
+ } else if (c.fType == *fContext.fUInt_Type ||
+ c.fType == *fContext.fUShort_Type ||
+ c.fType == *fContext.fUByte_Type) {
+ return this->writeUIntConstructor(c, out);
+ }
+ switch (c.fType.kind()) {
+ case Type::kVector_Kind:
+ return this->writeVectorConstructor(c, out);
+ case Type::kMatrix_Kind:
+ return this->writeMatrixConstructor(c, out);
+ case Type::kArray_Kind:
+ return this->writeArrayConstructor(c, out);
+ default:
+ ABORT("unsupported constructor: %s", c.description().c_str());
+ }
+}
+
+SpvStorageClass_ get_storage_class(const Modifiers& modifiers) {
+ if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ SkASSERT(!(modifiers.fLayout.fFlags & Layout::kPushConstant_Flag));
+ return SpvStorageClassInput;
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ SkASSERT(!(modifiers.fLayout.fFlags & Layout::kPushConstant_Flag));
+ return SpvStorageClassOutput;
+ } else if (modifiers.fFlags & Modifiers::kUniform_Flag) {
+ if (modifiers.fLayout.fFlags & Layout::kPushConstant_Flag) {
+ return SpvStorageClassPushConstant;
+ }
+ return SpvStorageClassUniform;
+ } else {
+ return SpvStorageClassFunction;
+ }
+}
+
+SpvStorageClass_ get_storage_class(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference&) expr).fVariable;
+ if (var.fStorage != Variable::kGlobal_Storage) {
+ return SpvStorageClassFunction;
+ }
+ SpvStorageClass_ result = get_storage_class(var.fModifiers);
+ if (result == SpvStorageClassFunction) {
+ result = SpvStorageClassPrivate;
+ }
+ return result;
+ }
+ case Expression::kFieldAccess_Kind:
+ return get_storage_class(*((FieldAccess&) expr).fBase);
+ case Expression::kIndex_Kind:
+ return get_storage_class(*((IndexExpression&) expr).fBase);
+ default:
+ return SpvStorageClassFunction;
+ }
+}
+
+std::vector<SpvId> SPIRVCodeGenerator::getAccessChain(const Expression& expr, OutputStream& out) {
+ std::vector<SpvId> chain;
+ switch (expr.fKind) {
+ case Expression::kIndex_Kind: {
+ IndexExpression& indexExpr = (IndexExpression&) expr;
+ chain = this->getAccessChain(*indexExpr.fBase, out);
+ chain.push_back(this->writeExpression(*indexExpr.fIndex, out));
+ break;
+ }
+ case Expression::kFieldAccess_Kind: {
+ FieldAccess& fieldExpr = (FieldAccess&) expr;
+ chain = this->getAccessChain(*fieldExpr.fBase, out);
+ IntLiteral index(fContext, -1, fieldExpr.fFieldIndex);
+ chain.push_back(this->writeIntLiteral(index));
+ break;
+ }
+ default: {
+ SpvId id = this->getLValue(expr, out)->getPointer();
+ SkASSERT(id != 0);
+ chain.push_back(id);
+ }
+ }
+ return chain;
+}
+
+class PointerLValue : public SPIRVCodeGenerator::LValue {
+public:
+ PointerLValue(SPIRVCodeGenerator& gen, SpvId pointer, SpvId type,
+ SPIRVCodeGenerator::Precision precision)
+ : fGen(gen)
+ , fPointer(pointer)
+ , fType(type)
+ , fPrecision(precision) {}
+
+ virtual SpvId getPointer() override {
+ return fPointer;
+ }
+
+ virtual SpvId load(OutputStream& out) override {
+ SpvId result = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fType, result, fPointer, out);
+ fGen.writePrecisionModifier(fPrecision, result);
+ return result;
+ }
+
+ virtual void store(SpvId value, OutputStream& out) override {
+ fGen.writeInstruction(SpvOpStore, fPointer, value, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fPointer;
+ const SpvId fType;
+ const SPIRVCodeGenerator::Precision fPrecision;
+};
+
+class SwizzleLValue : public SPIRVCodeGenerator::LValue {
+public:
+ SwizzleLValue(SPIRVCodeGenerator& gen, SpvId vecPointer, const std::vector<int>& components,
+ const Type& baseType, const Type& swizzleType,
+ SPIRVCodeGenerator::Precision precision)
+ : fGen(gen)
+ , fVecPointer(vecPointer)
+ , fComponents(components)
+ , fBaseType(baseType)
+ , fSwizzleType(swizzleType)
+ , fPrecision(precision) {}
+
+ virtual SpvId getPointer() override {
+ return 0;
+ }
+
+ virtual SpvId load(OutputStream& out) override {
+ SpvId base = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(fBaseType), base, fVecPointer, out);
+ fGen.writePrecisionModifier(fPrecision, base);
+ SpvId result = fGen.nextId();
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) fComponents.size(), out);
+ fGen.writeWord(fGen.getType(fSwizzleType), out);
+ fGen.writeWord(result, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(base, out);
+ for (int component : fComponents) {
+ fGen.writeWord(component, out);
+ }
+ fGen.writePrecisionModifier(fPrecision, result);
+ return result;
+ }
+
+ virtual void store(SpvId value, OutputStream& out) override {
+ // use OpVectorShuffle to mix and match the vector components. We effectively create
+ // a virtual vector out of the concatenation of the left and right vectors, and then
+ // select components from this virtual vector to make the result vector. For
+ // instance, given:
+ // float3L = ...;
+ // float3R = ...;
+ // L.xz = R.xy;
+ // we end up with the virtual vector (L.x, L.y, L.z, R.x, R.y, R.z). Then we want
+ // our result vector to look like (R.x, L.y, R.y), so we need to select indices
+ // (3, 1, 4).
+ SpvId base = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(fBaseType), base, fVecPointer, out);
+ SpvId shuffle = fGen.nextId();
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + fBaseType.columns(), out);
+ fGen.writeWord(fGen.getType(fBaseType), out);
+ fGen.writeWord(shuffle, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(value, out);
+ for (int i = 0; i < fBaseType.columns(); i++) {
+ // current offset into the virtual vector, defaults to pulling the unmodified
+ // value from the left side
+ int offset = i;
+ // check to see if we are writing this component
+ for (size_t j = 0; j < fComponents.size(); j++) {
+ if (fComponents[j] == i) {
+ // we're writing to this component, so adjust the offset to pull from
+ // the correct component of the right side instead of preserving the
+ // value from the left
+ offset = (int) (j + fBaseType.columns());
+ break;
+ }
+ }
+ fGen.writeWord(offset, out);
+ }
+ fGen.writePrecisionModifier(fPrecision, shuffle);
+ fGen.writeInstruction(SpvOpStore, fVecPointer, shuffle, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fVecPointer;
+ const std::vector<int>& fComponents;
+ const Type& fBaseType;
+ const Type& fSwizzleType;
+ const SPIRVCodeGenerator::Precision fPrecision;
+};
+
+std::unique_ptr<SPIRVCodeGenerator::LValue> SPIRVCodeGenerator::getLValue(const Expression& expr,
+ OutputStream& out) {
+ Precision precision = expr.fType.highPrecision() ? Precision::kHigh : Precision::kLow;
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ SpvId type;
+ const Variable& var = ((VariableReference&) expr).fVariable;
+ if (var.fModifiers.fLayout.fBuiltin == SK_IN_BUILTIN) {
+ type = this->getType(Type("sk_in", Type::kArray_Kind, var.fType.componentType(),
+ fSkInCount));
+ } else {
+ type = this->getType(expr.fType);
+ }
+ auto entry = fVariableMap.find(&var);
+ SkASSERT(entry != fVariableMap.end());
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(*this,
+ entry->second,
+ type,
+ precision));
+ }
+ case Expression::kIndex_Kind: // fall through
+ case Expression::kFieldAccess_Kind: {
+ std::vector<SpvId> chain = this->getAccessChain(expr, out);
+ SpvId member = this->nextId();
+ this->writeOpCode(SpvOpAccessChain, (SpvId) (3 + chain.size()), out);
+ this->writeWord(this->getPointerType(expr.fType, get_storage_class(expr)), out);
+ this->writeWord(member, out);
+ for (SpvId idx : chain) {
+ this->writeWord(idx, out);
+ }
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ member,
+ this->getType(expr.fType),
+ precision));
+ }
+ case Expression::kSwizzle_Kind: {
+ Swizzle& swizzle = (Swizzle&) expr;
+ size_t count = swizzle.fComponents.size();
+ SpvId base = this->getLValue(*swizzle.fBase, out)->getPointer();
+ SkASSERT(base);
+ if (count == 1) {
+ IntLiteral index(fContext, -1, swizzle.fComponents[0]);
+ SpvId member = this->nextId();
+ this->writeInstruction(SpvOpAccessChain,
+ this->getPointerType(swizzle.fType,
+ get_storage_class(*swizzle.fBase)),
+ member,
+ base,
+ this->writeIntLiteral(index),
+ out);
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ member,
+ this->getType(expr.fType),
+ precision));
+ } else {
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new SwizzleLValue(
+ *this,
+ base,
+ swizzle.fComponents,
+ swizzle.fBase->fType,
+ expr.fType,
+ precision));
+ }
+ }
+ case Expression::kTernary_Kind: {
+ TernaryExpression& t = (TernaryExpression&) expr;
+ SpvId test = this->writeExpression(*t.fTest, out);
+ SpvId end = this->nextId();
+ SpvId ifTrueLabel = this->nextId();
+ SpvId ifFalseLabel = this->nextId();
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrueLabel, ifFalseLabel, out);
+ this->writeLabel(ifTrueLabel, out);
+ SpvId ifTrue = this->getLValue(*t.fIfTrue, out)->getPointer();
+ SkASSERT(ifTrue);
+ this->writeInstruction(SpvOpBranch, end, out);
+ ifTrueLabel = fCurrentBlock;
+ SpvId ifFalse = this->getLValue(*t.fIfFalse, out)->getPointer();
+ SkASSERT(ifFalse);
+ ifFalseLabel = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fBool_Type), result, ifTrue,
+ ifTrueLabel, ifFalse, ifFalseLabel, out);
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ result,
+ this->getType(expr.fType),
+ precision));
+ }
+ default:
+ // expr isn't actually an lvalue, create a dummy variable for it. This case happens due
+ // to the need to store values in temporary variables during function calls (see
+ // comments in getFunctionType); erroneous uses of rvalues as lvalues should have been
+ // caught by IRGenerator
+ SpvId result = this->nextId();
+ SpvId type = this->getPointerType(expr.fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, type, result, SpvStorageClassFunction,
+ fVariableBuffer);
+ this->writeInstruction(SpvOpStore, result, this->writeExpression(expr, out), out);
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ result,
+ this->getType(expr.fType),
+ precision));
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeVariableReference(const VariableReference& ref, OutputStream& out) {
+ SpvId result = this->nextId();
+ auto entry = fVariableMap.find(&ref.fVariable);
+ SkASSERT(entry != fVariableMap.end());
+ SpvId var = entry->second;
+ this->writeInstruction(SpvOpLoad, this->getType(ref.fVariable.fType), result, var, out);
+ this->writePrecisionModifier(ref.fVariable.fType, result);
+ if (ref.fVariable.fModifiers.fLayout.fBuiltin == SK_FRAGCOORD_BUILTIN &&
+ fProgram.fSettings.fFlipY) {
+ // need to remap to a top-left coordinate system
+ if (fRTHeightStructId == (SpvId) -1) {
+ // height variable hasn't been written yet
+ std::shared_ptr<SymbolTable> st(new SymbolTable(&fErrors));
+ SkASSERT(fRTHeightFieldIndex == (SpvId) -1);
+ std::vector<Type::Field> fields;
+ SkASSERT(fProgram.fSettings.fRTHeightOffset >= 0);
+ fields.emplace_back(Modifiers(Layout(0, -1, fProgram.fSettings.fRTHeightOffset, -1,
+ -1, -1, -1, -1, Layout::Format::kUnspecified,
+ Layout::kUnspecified_Primitive, -1, -1, "",
+ Layout::kNo_Key, Layout::CType::kDefault), 0),
+ SKSL_RTHEIGHT_NAME, fContext.fFloat_Type.get());
+ StringFragment name("sksl_synthetic_uniforms");
+ Type intfStruct(-1, name, fields);
+ int binding;
+ int set;
+#ifdef SK_VULKAN
+ const GrVkCaps* vkCaps = fProgram.fSettings.fVkCaps;
+ SkASSERT(vkCaps);
+ binding = vkCaps->getFragmentUniformBinding();
+ set = vkCaps->getFragmentUniformSet();
+#else
+ binding = 0;
+ set = 0;
+#endif
+ Layout layout(0, -1, -1, binding, -1, set, -1, -1, Layout::Format::kUnspecified,
+ Layout::kUnspecified_Primitive, -1, -1, "", Layout::kNo_Key,
+ Layout::CType::kDefault);
+ Variable* intfVar = (Variable*) fSynthetics.takeOwnership(std::unique_ptr<Symbol>(
+ new Variable(-1,
+ Modifiers(layout, Modifiers::kUniform_Flag),
+ name,
+ intfStruct,
+ Variable::kGlobal_Storage)));
+ InterfaceBlock intf(-1, intfVar, name, String(""),
+ std::vector<std::unique_ptr<Expression>>(), st);
+ fRTHeightStructId = this->writeInterfaceBlock(intf);
+ fRTHeightFieldIndex = 0;
+ }
+ SkASSERT(fRTHeightFieldIndex != (SpvId) -1);
+ // write float4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, 0.0, gl_FragCoord.w)
+ SpvId xId = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(*fContext.fFloat_Type), xId,
+ result, 0, out);
+ IntLiteral fieldIndex(fContext, -1, fRTHeightFieldIndex);
+ SpvId fieldIndexId = this->writeIntLiteral(fieldIndex);
+ SpvId heightPtr = this->nextId();
+ this->writeOpCode(SpvOpAccessChain, 5, out);
+ this->writeWord(this->getPointerType(*fContext.fFloat_Type, SpvStorageClassUniform), out);
+ this->writeWord(heightPtr, out);
+ this->writeWord(fRTHeightStructId, out);
+ this->writeWord(fieldIndexId, out);
+ SpvId heightRead = this->nextId();
+ this->writeInstruction(SpvOpLoad, this->getType(*fContext.fFloat_Type), heightRead,
+ heightPtr, out);
+ SpvId rawYId = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(*fContext.fFloat_Type), rawYId,
+ result, 1, out);
+ SpvId flippedYId = this->nextId();
+ this->writeInstruction(SpvOpFSub, this->getType(*fContext.fFloat_Type), flippedYId,
+ heightRead, rawYId, out);
+ FloatLiteral zero(fContext, -1, 0.0);
+ SpvId zeroId = writeFloatLiteral(zero);
+ FloatLiteral one(fContext, -1, 1.0);
+ SpvId wId = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(*fContext.fFloat_Type), wId,
+ result, 3, out);
+ SpvId flipped = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 7, out);
+ this->writeWord(this->getType(*fContext.fFloat4_Type), out);
+ this->writeWord(flipped, out);
+ this->writeWord(xId, out);
+ this->writeWord(flippedYId, out);
+ this->writeWord(zeroId, out);
+ this->writeWord(wId, out);
+ return flipped;
+ }
+ if (ref.fVariable.fModifiers.fLayout.fBuiltin == SK_CLOCKWISE_BUILTIN &&
+ !fProgram.fSettings.fFlipY) {
+ // FrontFacing in Vulkan is defined in terms of a top-down render target. In skia, we use
+ // the default convention of "counter-clockwise face is front".
+ SpvId inverse = this->nextId();
+ this->writeInstruction(SpvOpLogicalNot, this->getType(*fContext.fBool_Type), inverse,
+ result, out);
+ return inverse;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeIndexExpression(const IndexExpression& expr, OutputStream& out) {
+ if (expr.fBase->fType.kind() == Type::Kind::kVector_Kind) {
+ SpvId base = this->writeExpression(*expr.fBase, out);
+ SpvId index = this->writeExpression(*expr.fIndex, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpVectorExtractDynamic, this->getType(expr.fType), result, base,
+ index, out);
+ return result;
+ }
+ return getLValue(expr, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeFieldAccess(const FieldAccess& f, OutputStream& out) {
+ return getLValue(f, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeSwizzle(const Swizzle& swizzle, OutputStream& out) {
+ SpvId base = this->writeExpression(*swizzle.fBase, out);
+ SpvId result = this->nextId();
+ size_t count = swizzle.fComponents.size();
+ if (count == 1) {
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(swizzle.fType), result, base,
+ swizzle.fComponents[0], out);
+ } else {
+ this->writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) count, out);
+ this->writeWord(this->getType(swizzle.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(base, out);
+ SpvId other = base;
+ for (int c : swizzle.fComponents) {
+ if (c < 0) {
+ if (!fConstantZeroOneVector) {
+ FloatLiteral zero(fContext, -1, 0);
+ SpvId zeroId = this->writeFloatLiteral(zero);
+ FloatLiteral one(fContext, -1, 1);
+ SpvId oneId = this->writeFloatLiteral(one);
+ SpvId type = this->getType(*fContext.fFloat2_Type);
+ fConstantZeroOneVector = this->nextId();
+ this->writeOpCode(SpvOpConstantComposite, 5, fConstantBuffer);
+ this->writeWord(type, fConstantBuffer);
+ this->writeWord(fConstantZeroOneVector, fConstantBuffer);
+ this->writeWord(zeroId, fConstantBuffer);
+ this->writeWord(oneId, fConstantBuffer);
+ }
+ other = fConstantZeroOneVector;
+ break;
+ }
+ }
+ this->writeWord(other, out);
+ for (int component : swizzle.fComponents) {
+ if (component == SKSL_SWIZZLE_0) {
+ this->writeWord(swizzle.fBase->fType.columns(), out);
+ } else if (component == SKSL_SWIZZLE_1) {
+ this->writeWord(swizzle.fBase->fType.columns() + 1, out);
+ } else {
+ this->writeWord(component, out);
+ }
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryOperation(const Type& resultType,
+ const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt,
+ SpvOp_ ifUInt, SpvOp_ ifBool, OutputStream& out) {
+ SpvId result = this->nextId();
+ if (is_float(fContext, operandType)) {
+ this->writeInstruction(ifFloat, this->getType(resultType), result, lhs, rhs, out);
+ } else if (is_signed(fContext, operandType)) {
+ this->writeInstruction(ifInt, this->getType(resultType), result, lhs, rhs, out);
+ } else if (is_unsigned(fContext, operandType)) {
+ this->writeInstruction(ifUInt, this->getType(resultType), result, lhs, rhs, out);
+ } else if (operandType == *fContext.fBool_Type) {
+ this->writeInstruction(ifBool, this->getType(resultType), result, lhs, rhs, out);
+ return result; // skip RelaxedPrecision check
+ } else {
+ ABORT("invalid operandType: %s", operandType.description().c_str());
+ }
+ if (getActualType(resultType) == operandType && !resultType.highPrecision()) {
+ this->writeInstruction(SpvOpDecorate, result, SpvDecorationRelaxedPrecision,
+ fDecorationBuffer);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::foldToBool(SpvId id, const Type& operandType, SpvOp op,
+ OutputStream& out) {
+ if (operandType.kind() == Type::kVector_Kind) {
+ SpvId result = this->nextId();
+ this->writeInstruction(op, this->getType(*fContext.fBool_Type), result, id, out);
+ return result;
+ }
+ return id;
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixComparison(const Type& operandType, SpvId lhs, SpvId rhs,
+ SpvOp_ floatOperator, SpvOp_ intOperator,
+ SpvOp_ vectorMergeOperator, SpvOp_ mergeOperator,
+ OutputStream& out) {
+ SpvOp_ compareOp = is_float(fContext, operandType) ? floatOperator : intOperator;
+ SkASSERT(operandType.kind() == Type::kMatrix_Kind);
+ SpvId columnType = this->getType(operandType.componentType().toCompound(fContext,
+ operandType.rows(),
+ 1));
+ SpvId bvecType = this->getType(fContext.fBool_Type->toCompound(fContext,
+ operandType.rows(),
+ 1));
+ SpvId boolType = this->getType(*fContext.fBool_Type);
+ SpvId result = 0;
+ for (int i = 0; i < operandType.columns(); i++) {
+ SpvId columnL = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, columnType, columnL, lhs, i, out);
+ SpvId columnR = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, columnType, columnR, rhs, i, out);
+ SpvId compare = this->nextId();
+ this->writeInstruction(compareOp, bvecType, compare, columnL, columnR, out);
+ SpvId merge = this->nextId();
+ this->writeInstruction(vectorMergeOperator, boolType, merge, compare, out);
+ if (result != 0) {
+ SpvId next = this->nextId();
+ this->writeInstruction(mergeOperator, boolType, next, result, merge, out);
+ result = next;
+ }
+ else {
+ result = merge;
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeComponentwiseMatrixBinary(const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ floatOperator,
+ SpvOp_ intOperator,
+ OutputStream& out) {
+ SpvOp_ op = is_float(fContext, operandType) ? floatOperator : intOperator;
+ SkASSERT(operandType.kind() == Type::kMatrix_Kind);
+ SpvId columnType = this->getType(operandType.componentType().toCompound(fContext,
+ operandType.rows(),
+ 1));
+ SpvId columns[4];
+ for (int i = 0; i < operandType.columns(); i++) {
+ SpvId columnL = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, columnType, columnL, lhs, i, out);
+ SpvId columnR = this->nextId();
+ this->writeInstruction(SpvOpCompositeExtract, columnType, columnR, rhs, i, out);
+ columns[i] = this->nextId();
+ this->writeInstruction(op, columnType, columns[i], columnL, columnR, out);
+ }
+ SpvId result = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + operandType.columns(), out);
+ this->writeWord(this->getType(operandType), out);
+ this->writeWord(result, out);
+ for (int i = 0; i < operandType.columns(); i++) {
+ this->writeWord(columns[i], out);
+ }
+ return result;
+}
+
+std::unique_ptr<Expression> create_literal_1(const Context& context, const Type& type) {
+ if (type.isInteger()) {
+ return std::unique_ptr<Expression>(new IntLiteral(-1, 1, &type));
+ }
+ else if (type.isFloat()) {
+ return std::unique_ptr<Expression>(new FloatLiteral(-1, 1.0, &type));
+ } else {
+ ABORT("math is unsupported on type '%s'", type.name().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryExpression(const Type& leftType, SpvId lhs, Token::Kind op,
+ const Type& rightType, SpvId rhs,
+ const Type& resultType, OutputStream& out) {
+ Type tmp("<invalid>");
+ // overall type we are operating on: float2, int, uint4...
+ const Type* operandType;
+ // IR allows mismatched types in expressions (e.g. float2 * float), but they need special
+ // handling in SPIR-V
+ if (this->getActualType(leftType) != this->getActualType(rightType)) {
+ if (leftType.kind() == Type::kVector_Kind && rightType.isNumber()) {
+ if (op == Token::SLASH) {
+ SpvId one = this->writeExpression(*create_literal_1(fContext, rightType), out);
+ SpvId inverse = this->nextId();
+ this->writeInstruction(SpvOpFDiv, this->getType(rightType), inverse, one, rhs, out);
+ rhs = inverse;
+ op = Token::STAR;
+ }
+ if (op == Token::STAR) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpVectorTimesScalar, this->getType(resultType),
+ result, lhs, rhs, out);
+ return result;
+ }
+ // promote number to vector
+ SpvId vec = this->nextId();
+ const Type& vecType = leftType;
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + vecType.columns(), out);
+ this->writeWord(this->getType(vecType), out);
+ this->writeWord(vec, out);
+ for (int i = 0; i < vecType.columns(); i++) {
+ this->writeWord(rhs, out);
+ }
+ rhs = vec;
+ operandType = &leftType;
+ } else if (rightType.kind() == Type::kVector_Kind && leftType.isNumber()) {
+ if (op == Token::STAR) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpVectorTimesScalar, this->getType(resultType),
+ result, rhs, lhs, out);
+ return result;
+ }
+ // promote number to vector
+ SpvId vec = this->nextId();
+ const Type& vecType = rightType;
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + vecType.columns(), out);
+ this->writeWord(this->getType(vecType), out);
+ this->writeWord(vec, out);
+ for (int i = 0; i < vecType.columns(); i++) {
+ this->writeWord(lhs, out);
+ }
+ lhs = vec;
+ operandType = &rightType;
+ } else if (leftType.kind() == Type::kMatrix_Kind) {
+ SpvOp_ spvop;
+ if (rightType.kind() == Type::kMatrix_Kind) {
+ spvop = SpvOpMatrixTimesMatrix;
+ } else if (rightType.kind() == Type::kVector_Kind) {
+ spvop = SpvOpMatrixTimesVector;
+ } else {
+ SkASSERT(rightType.kind() == Type::kScalar_Kind);
+ spvop = SpvOpMatrixTimesScalar;
+ }
+ SpvId result = this->nextId();
+ this->writeInstruction(spvop, this->getType(resultType), result, lhs, rhs, out);
+ return result;
+ } else if (rightType.kind() == Type::kMatrix_Kind) {
+ SpvId result = this->nextId();
+ if (leftType.kind() == Type::kVector_Kind) {
+ this->writeInstruction(SpvOpVectorTimesMatrix, this->getType(resultType), result,
+ lhs, rhs, out);
+ } else {
+ SkASSERT(leftType.kind() == Type::kScalar_Kind);
+ this->writeInstruction(SpvOpMatrixTimesScalar, this->getType(resultType), result,
+ rhs, lhs, out);
+ }
+ return result;
+ } else {
+ SkASSERT(false);
+ return -1;
+ }
+ } else {
+ tmp = this->getActualType(leftType);
+ operandType = &tmp;
+ SkASSERT(*operandType == this->getActualType(rightType));
+ }
+ switch (op) {
+ case Token::EQEQ: {
+ if (operandType->kind() == Type::kMatrix_Kind) {
+ return this->writeMatrixComparison(*operandType, lhs, rhs, SpvOpFOrdEqual,
+ SpvOpIEqual, SpvOpAll, SpvOpLogicalAnd, out);
+ }
+ SkASSERT(resultType == *fContext.fBool_Type);
+ const Type* tmpType;
+ if (operandType->kind() == Type::kVector_Kind) {
+ tmpType = &fContext.fBool_Type->toCompound(fContext,
+ operandType->columns(),
+ operandType->rows());
+ } else {
+ tmpType = &resultType;
+ }
+ return this->foldToBool(this->writeBinaryOperation(*tmpType, *operandType, lhs, rhs,
+ SpvOpFOrdEqual, SpvOpIEqual,
+ SpvOpIEqual, SpvOpLogicalEqual, out),
+ *operandType, SpvOpAll, out);
+ }
+ case Token::NEQ:
+ if (operandType->kind() == Type::kMatrix_Kind) {
+ return this->writeMatrixComparison(*operandType, lhs, rhs, SpvOpFOrdNotEqual,
+ SpvOpINotEqual, SpvOpAny, SpvOpLogicalOr, out);
+ }
+ SkASSERT(resultType == *fContext.fBool_Type);
+ const Type* tmpType;
+ if (operandType->kind() == Type::kVector_Kind) {
+ tmpType = &fContext.fBool_Type->toCompound(fContext,
+ operandType->columns(),
+ operandType->rows());
+ } else {
+ tmpType = &resultType;
+ }
+ return this->foldToBool(this->writeBinaryOperation(*tmpType, *operandType, lhs, rhs,
+ SpvOpFOrdNotEqual, SpvOpINotEqual,
+ SpvOpINotEqual, SpvOpLogicalNotEqual,
+ out),
+ *operandType, SpvOpAny, out);
+ case Token::GT:
+ SkASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThan, SpvOpSGreaterThan,
+ SpvOpUGreaterThan, SpvOpUndef, out);
+ case Token::LT:
+ SkASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFOrdLessThan,
+ SpvOpSLessThan, SpvOpULessThan, SpvOpUndef, out);
+ case Token::GTEQ:
+ SkASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThanEqual, SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual, SpvOpUndef, out);
+ case Token::LTEQ:
+ SkASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdLessThanEqual, SpvOpSLessThanEqual,
+ SpvOpULessThanEqual, SpvOpUndef, out);
+ case Token::PLUS:
+ if (leftType.kind() == Type::kMatrix_Kind &&
+ rightType.kind() == Type::kMatrix_Kind) {
+ SkASSERT(leftType == rightType);
+ return this->writeComponentwiseMatrixBinary(leftType, lhs, rhs,
+ SpvOpFAdd, SpvOpIAdd, out);
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ case Token::MINUS:
+ if (leftType.kind() == Type::kMatrix_Kind &&
+ rightType.kind() == Type::kMatrix_Kind) {
+ SkASSERT(leftType == rightType);
+ return this->writeComponentwiseMatrixBinary(leftType, lhs, rhs,
+ SpvOpFSub, SpvOpISub, out);
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ case Token::STAR:
+ if (leftType.kind() == Type::kMatrix_Kind &&
+ rightType.kind() == Type::kMatrix_Kind) {
+ // matrix multiply
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpMatrixTimesMatrix, this->getType(resultType), result,
+ lhs, rhs, out);
+ return result;
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMul,
+ SpvOpIMul, SpvOpIMul, SpvOpUndef, out);
+ case Token::SLASH:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFDiv,
+ SpvOpSDiv, SpvOpUDiv, SpvOpUndef, out);
+ case Token::PERCENT:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMod,
+ SpvOpSMod, SpvOpUMod, SpvOpUndef, out);
+ case Token::SHL:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpShiftLeftLogical, SpvOpShiftLeftLogical,
+ SpvOpUndef, out);
+ case Token::SHR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpShiftRightArithmetic, SpvOpShiftRightLogical,
+ SpvOpUndef, out);
+ case Token::BITWISEAND:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseAnd, SpvOpBitwiseAnd, SpvOpUndef, out);
+ case Token::BITWISEOR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseOr, SpvOpBitwiseOr, SpvOpUndef, out);
+ case Token::BITWISEXOR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseXor, SpvOpBitwiseXor, SpvOpUndef, out);
+ case Token::COMMA:
+ return rhs;
+ default:
+ SkASSERT(false);
+ return -1;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryExpression(const BinaryExpression& b, OutputStream& out) {
+ // handle cases where we don't necessarily evaluate both LHS and RHS
+ switch (b.fOperator) {
+ case Token::EQ: {
+ SpvId rhs = this->writeExpression(*b.fRight, out);
+ this->getLValue(*b.fLeft, out)->store(rhs, out);
+ return rhs;
+ }
+ case Token::LOGICALAND:
+ return this->writeLogicalAnd(b, out);
+ case Token::LOGICALOR:
+ return this->writeLogicalOr(b, out);
+ default:
+ break;
+ }
+
+ std::unique_ptr<LValue> lvalue;
+ SpvId lhs;
+ if (is_assignment(b.fOperator)) {
+ lvalue = this->getLValue(*b.fLeft, out);
+ lhs = lvalue->load(out);
+ } else {
+ lvalue = nullptr;
+ lhs = this->writeExpression(*b.fLeft, out);
+ }
+ SpvId rhs = this->writeExpression(*b.fRight, out);
+ SpvId result = this->writeBinaryExpression(b.fLeft->fType, lhs, remove_assignment(b.fOperator),
+ b.fRight->fType, rhs, b.fType, out);
+ if (lvalue) {
+ lvalue->store(result, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalAnd(const BinaryExpression& a, OutputStream& out) {
+ SkASSERT(a.fOperator == Token::LOGICALAND);
+ BoolLiteral falseLiteral(fContext, -1, false);
+ SpvId falseConstant = this->writeBoolLiteral(falseLiteral);
+ SpvId lhs = this->writeExpression(*a.fLeft, out);
+ SpvId rhsLabel = this->nextId();
+ SpvId end = this->nextId();
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, rhsLabel, end, out);
+ this->writeLabel(rhsLabel, out);
+ SpvId rhs = this->writeExpression(*a.fRight, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fBool_Type), result, falseConstant,
+ lhsBlock, rhs, rhsBlock, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalOr(const BinaryExpression& o, OutputStream& out) {
+ SkASSERT(o.fOperator == Token::LOGICALOR);
+ BoolLiteral trueLiteral(fContext, -1, true);
+ SpvId trueConstant = this->writeBoolLiteral(trueLiteral);
+ SpvId lhs = this->writeExpression(*o.fLeft, out);
+ SpvId rhsLabel = this->nextId();
+ SpvId end = this->nextId();
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, end, rhsLabel, out);
+ this->writeLabel(rhsLabel, out);
+ SpvId rhs = this->writeExpression(*o.fRight, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fBool_Type), result, trueConstant,
+ lhsBlock, rhs, rhsBlock, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeTernaryExpression(const TernaryExpression& t, OutputStream& out) {
+ SpvId test = this->writeExpression(*t.fTest, out);
+ if (t.fIfTrue->fType.columns() == 1 && t.fIfTrue->isConstant() && t.fIfFalse->isConstant()) {
+ // both true and false are constants, can just use OpSelect
+ SpvId result = this->nextId();
+ SpvId trueId = this->writeExpression(*t.fIfTrue, out);
+ SpvId falseId = this->writeExpression(*t.fIfFalse, out);
+ this->writeInstruction(SpvOpSelect, this->getType(t.fType), result, test, trueId, falseId,
+ out);
+ return result;
+ }
+ // was originally using OpPhi to choose the result, but for some reason that is crashing on
+ // Adreno. Switched to storing the result in a temp variable as glslang does.
+ SpvId var = this->nextId();
+ this->writeInstruction(SpvOpVariable, this->getPointerType(t.fType, SpvStorageClassFunction),
+ var, SpvStorageClassFunction, fVariableBuffer);
+ SpvId trueLabel = this->nextId();
+ SpvId falseLabel = this->nextId();
+ SpvId end = this->nextId();
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, trueLabel, falseLabel, out);
+ this->writeLabel(trueLabel, out);
+ this->writeInstruction(SpvOpStore, var, this->writeExpression(*t.fIfTrue, out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(falseLabel, out);
+ this->writeInstruction(SpvOpStore, var, this->writeExpression(*t.fIfFalse, out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpLoad, this->getType(t.fType), result, var, out);
+ this->writePrecisionModifier(t.fType, result);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writePrefixExpression(const PrefixExpression& p, OutputStream& out) {
+ if (p.fOperator == Token::MINUS) {
+ SpvId result = this->nextId();
+ SpvId typeId = this->getType(p.fType);
+ SpvId expr = this->writeExpression(*p.fOperand, out);
+ if (is_float(fContext, p.fType)) {
+ this->writeInstruction(SpvOpFNegate, typeId, result, expr, out);
+ } else if (is_signed(fContext, p.fType)) {
+ this->writeInstruction(SpvOpSNegate, typeId, result, expr, out);
+ } else {
+ ABORT("unsupported prefix expression %s", p.description().c_str());
+ }
+ this->writePrecisionModifier(p.fType, result);
+ return result;
+ }
+ switch (p.fOperator) {
+ case Token::PLUS:
+ return this->writeExpression(*p.fOperand, out);
+ case Token::PLUSPLUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ SpvId result = this->writeBinaryOperation(p.fType, p.fType, lv->load(out), one,
+ SpvOpFAdd, SpvOpIAdd, SpvOpIAdd, SpvOpUndef,
+ out);
+ lv->store(result, out);
+ return result;
+ }
+ case Token::MINUSMINUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ SpvId result = this->writeBinaryOperation(p.fType, p.fType, lv->load(out), one,
+ SpvOpFSub, SpvOpISub, SpvOpISub, SpvOpUndef,
+ out);
+ lv->store(result, out);
+ return result;
+ }
+ case Token::LOGICALNOT: {
+ SkASSERT(p.fOperand->fType == *fContext.fBool_Type);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpLogicalNot, this->getType(p.fOperand->fType), result,
+ this->writeExpression(*p.fOperand, out), out);
+ return result;
+ }
+ case Token::BITWISENOT: {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpNot, this->getType(p.fOperand->fType), result,
+ this->writeExpression(*p.fOperand, out), out);
+ return result;
+ }
+ default:
+ ABORT("unsupported prefix expression: %s", p.description().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writePostfixExpression(const PostfixExpression& p, OutputStream& out) {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId result = lv->load(out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ switch (p.fOperator) {
+ case Token::PLUSPLUS: {
+ SpvId temp = this->writeBinaryOperation(p.fType, p.fType, result, one, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ case Token::MINUSMINUS: {
+ SpvId temp = this->writeBinaryOperation(p.fType, p.fType, result, one, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ default:
+ ABORT("unsupported postfix expression %s", p.description().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ if (b.fValue) {
+ if (fBoolTrue == 0) {
+ fBoolTrue = this->nextId();
+ this->writeInstruction(SpvOpConstantTrue, this->getType(b.fType), fBoolTrue,
+ fConstantBuffer);
+ }
+ return fBoolTrue;
+ } else {
+ if (fBoolFalse == 0) {
+ fBoolFalse = this->nextId();
+ this->writeInstruction(SpvOpConstantFalse, this->getType(b.fType), fBoolFalse,
+ fConstantBuffer);
+ }
+ return fBoolFalse;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ ConstantType type;
+ if (i.fType == *fContext.fInt_Type) {
+ type = ConstantType::kInt;
+ } else if (i.fType == *fContext.fUInt_Type) {
+ type = ConstantType::kUInt;
+ } else if (i.fType == *fContext.fShort_Type || i.fType == *fContext.fByte_Type) {
+ type = ConstantType::kShort;
+ } else if (i.fType == *fContext.fUShort_Type || i.fType == *fContext.fUByte_Type) {
+ type = ConstantType::kUShort;
+ } else {
+ SkASSERT(false);
+ }
+ std::pair<ConstantValue, ConstantType> key(i.fValue, type);
+ auto entry = fNumberConstants.find(key);
+ if (entry == fNumberConstants.end()) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpConstant, this->getType(i.fType), result, (SpvId) i.fValue,
+ fConstantBuffer);
+ fNumberConstants[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ if (f.fType != *fContext.fDouble_Type) {
+ ConstantType type;
+ if (f.fType == *fContext.fHalf_Type) {
+ type = ConstantType::kHalf;
+ } else {
+ type = ConstantType::kFloat;
+ }
+ float value = (float) f.fValue;
+ std::pair<ConstantValue, ConstantType> key(f.fValue, type);
+ auto entry = fNumberConstants.find(key);
+ if (entry == fNumberConstants.end()) {
+ SpvId result = this->nextId();
+ uint32_t bits;
+ SkASSERT(sizeof(bits) == sizeof(value));
+ memcpy(&bits, &value, sizeof(bits));
+ this->writeInstruction(SpvOpConstant, this->getType(f.fType), result, bits,
+ fConstantBuffer);
+ fNumberConstants[key] = result;
+ return result;
+ }
+ return entry->second;
+ } else {
+ std::pair<ConstantValue, ConstantType> key(f.fValue, ConstantType::kDouble);
+ auto entry = fNumberConstants.find(key);
+ if (entry == fNumberConstants.end()) {
+ SpvId result = this->nextId();
+ uint64_t bits;
+ SkASSERT(sizeof(bits) == sizeof(f.fValue));
+ memcpy(&bits, &f.fValue, sizeof(bits));
+ this->writeInstruction(SpvOpConstant, this->getType(f.fType), result,
+ bits & 0xffffffff, bits >> 32, fConstantBuffer);
+ fNumberConstants[key] = result;
+ return result;
+ }
+ return entry->second;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionStart(const FunctionDeclaration& f, OutputStream& out) {
+ SpvId result = fFunctionMap[&f];
+ this->writeInstruction(SpvOpFunction, this->getType(f.fReturnType), result,
+ SpvFunctionControlMaskNone, this->getFunctionType(f), out);
+ this->writeInstruction(SpvOpName, result, f.fName, fNameBuffer);
+ for (size_t i = 0; i < f.fParameters.size(); i++) {
+ SpvId id = this->nextId();
+ fVariableMap[f.fParameters[i]] = id;
+ SpvId type;
+ type = this->getPointerType(f.fParameters[i]->fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpFunctionParameter, type, id, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunction(const FunctionDefinition& f, OutputStream& out) {
+ fVariableBuffer.reset();
+ SpvId result = this->writeFunctionStart(f.fDeclaration, out);
+ this->writeLabel(this->nextId(), out);
+ StringStream bodyBuffer;
+ this->writeBlock((Block&) *f.fBody, bodyBuffer);
+ write_stringstream(fVariableBuffer, out);
+ if (f.fDeclaration.fName == "main") {
+ write_stringstream(fGlobalInitializersBuffer, out);
+ }
+ write_stringstream(bodyBuffer, out);
+ if (fCurrentBlock) {
+ if (f.fDeclaration.fReturnType == *fContext.fVoid_Type) {
+ this->writeInstruction(SpvOpReturn, out);
+ } else {
+ this->writeInstruction(SpvOpUnreachable, out);
+ }
+ }
+ this->writeInstruction(SpvOpFunctionEnd, out);
+ return result;
+}
+
+void SPIRVCodeGenerator::writeLayout(const Layout& layout, SpvId target) {
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationLocation, layout.fLocation,
+ fDecorationBuffer);
+ }
+ if (layout.fBinding >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBinding, layout.fBinding,
+ fDecorationBuffer);
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationIndex, layout.fIndex,
+ fDecorationBuffer);
+ }
+ if (layout.fSet >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationDescriptorSet, layout.fSet,
+ fDecorationBuffer);
+ }
+ if (layout.fInputAttachmentIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationInputAttachmentIndex,
+ layout.fInputAttachmentIndex, fDecorationBuffer);
+ fCapabilities |= (((uint64_t) 1) << SpvCapabilityInputAttachment);
+ }
+ if (layout.fBuiltin >= 0 && layout.fBuiltin != SK_FRAGCOLOR_BUILTIN &&
+ layout.fBuiltin != SK_IN_BUILTIN && layout.fBuiltin != SK_OUT_BUILTIN) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBuiltIn, layout.fBuiltin,
+ fDecorationBuffer);
+ }
+}
+
+void SPIRVCodeGenerator::writeLayout(const Layout& layout, SpvId target, int member) {
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationLocation,
+ layout.fLocation, fDecorationBuffer);
+ }
+ if (layout.fBinding >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationBinding,
+ layout.fBinding, fDecorationBuffer);
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationIndex,
+ layout.fIndex, fDecorationBuffer);
+ }
+ if (layout.fSet >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationDescriptorSet,
+ layout.fSet, fDecorationBuffer);
+ }
+ if (layout.fInputAttachmentIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, member, SpvDecorationInputAttachmentIndex,
+ layout.fInputAttachmentIndex, fDecorationBuffer);
+ }
+ if (layout.fBuiltin >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationBuiltIn,
+ layout.fBuiltin, fDecorationBuffer);
+ }
+}
+
+static void update_sk_in_count(const Modifiers& m, int* outSkInCount) {
+ switch (m.fLayout.fPrimitive) {
+ case Layout::kPoints_Primitive:
+ *outSkInCount = 1;
+ break;
+ case Layout::kLines_Primitive:
+ *outSkInCount = 2;
+ break;
+ case Layout::kLinesAdjacency_Primitive:
+ *outSkInCount = 4;
+ break;
+ case Layout::kTriangles_Primitive:
+ *outSkInCount = 3;
+ break;
+ case Layout::kTrianglesAdjacency_Primitive:
+ *outSkInCount = 6;
+ break;
+ default:
+ return;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ bool isBuffer = (0 != (intf.fVariable.fModifiers.fFlags & Modifiers::kBuffer_Flag));
+ bool pushConstant = (0 != (intf.fVariable.fModifiers.fLayout.fFlags &
+ Layout::kPushConstant_Flag));
+ MemoryLayout memoryLayout = (pushConstant || isBuffer) ?
+ MemoryLayout(MemoryLayout::k430_Standard) :
+ fDefaultLayout;
+ SpvId result = this->nextId();
+ const Type* type = &intf.fVariable.fType;
+ if (fProgram.fInputs.fRTHeight) {
+ SkASSERT(fRTHeightStructId == (SpvId) -1);
+ SkASSERT(fRTHeightFieldIndex == (SpvId) -1);
+ std::vector<Type::Field> fields = type->fields();
+ fRTHeightStructId = result;
+ fRTHeightFieldIndex = fields.size();
+ fields.emplace_back(Modifiers(), StringFragment(SKSL_RTHEIGHT_NAME), fContext.fFloat_Type.get());
+ type = new Type(type->fOffset, type->name(), fields);
+ }
+ SpvId typeId;
+ if (intf.fVariable.fModifiers.fLayout.fBuiltin == SK_IN_BUILTIN) {
+ for (const auto& e : fProgram) {
+ if (e.fKind == ProgramElement::kModifiers_Kind) {
+ const Modifiers& m = ((ModifiersDeclaration&) e).fModifiers;
+ update_sk_in_count(m, &fSkInCount);
+ }
+ }
+ typeId = this->getType(Type("sk_in", Type::kArray_Kind, intf.fVariable.fType.componentType(),
+ fSkInCount), memoryLayout);
+ } else {
+ typeId = this->getType(*type, memoryLayout);
+ }
+ if (intf.fVariable.fModifiers.fFlags & Modifiers::kBuffer_Flag) {
+ this->writeInstruction(SpvOpDecorate, typeId, SpvDecorationBufferBlock, fDecorationBuffer);
+ } else if (intf.fVariable.fModifiers.fLayout.fBuiltin == -1) {
+ this->writeInstruction(SpvOpDecorate, typeId, SpvDecorationBlock, fDecorationBuffer);
+ }
+ SpvStorageClass_ storageClass = get_storage_class(intf.fVariable.fModifiers);
+ SpvId ptrType = this->nextId();
+ this->writeInstruction(SpvOpTypePointer, ptrType, storageClass, typeId, fConstantBuffer);
+ this->writeInstruction(SpvOpVariable, ptrType, result, storageClass, fConstantBuffer);
+ Layout layout = intf.fVariable.fModifiers.fLayout;
+ if (intf.fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag && layout.fSet == -1) {
+ layout.fSet = 0;
+ }
+ this->writeLayout(layout, result);
+ fVariableMap[&intf.fVariable] = result;
+ if (fProgram.fInputs.fRTHeight) {
+ delete type;
+ }
+ return result;
+}
+
+void SPIRVCodeGenerator::writePrecisionModifier(const Type& type, SpvId id) {
+ this->writePrecisionModifier(type.highPrecision() ? Precision::kHigh : Precision::kLow, id);
+}
+
+void SPIRVCodeGenerator::writePrecisionModifier(Precision precision, SpvId id) {
+ if (precision == Precision::kLow) {
+ this->writeInstruction(SpvOpDecorate, id, SpvDecorationRelaxedPrecision, fDecorationBuffer);
+ }
+}
+
+#define BUILTIN_IGNORE 9999
+void SPIRVCodeGenerator::writeGlobalVars(Program::Kind kind, const VarDeclarations& decl,
+ OutputStream& out) {
+ for (size_t i = 0; i < decl.fVars.size(); i++) {
+ if (decl.fVars[i]->fKind == Statement::kNop_Kind) {
+ continue;
+ }
+ const VarDeclaration& varDecl = (VarDeclaration&) *decl.fVars[i];
+ const Variable* var = varDecl.fVar;
+ // These haven't been implemented in our SPIR-V generator yet and we only currently use them
+ // in the OpenGL backend.
+ SkASSERT(!(var->fModifiers.fFlags & (Modifiers::kReadOnly_Flag |
+ Modifiers::kWriteOnly_Flag |
+ Modifiers::kCoherent_Flag |
+ Modifiers::kVolatile_Flag |
+ Modifiers::kRestrict_Flag)));
+ if (var->fModifiers.fLayout.fBuiltin == BUILTIN_IGNORE) {
+ continue;
+ }
+ if (var->fModifiers.fLayout.fBuiltin == SK_FRAGCOLOR_BUILTIN &&
+ kind != Program::kFragment_Kind) {
+ SkASSERT(!fProgram.fSettings.fFragColorIsInOut);
+ continue;
+ }
+ if (!var->fReadCount && !var->fWriteCount &&
+ !(var->fModifiers.fFlags & (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag |
+ Modifiers::kUniform_Flag |
+ Modifiers::kBuffer_Flag))) {
+ // variable is dead and not an input / output var (the Vulkan debug layers complain if
+ // we elide an interface var, even if it's dead)
+ continue;
+ }
+ SpvStorageClass_ storageClass;
+ if (var->fModifiers.fFlags & Modifiers::kIn_Flag) {
+ storageClass = SpvStorageClassInput;
+ } else if (var->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ storageClass = SpvStorageClassOutput;
+ } else if (var->fModifiers.fFlags & Modifiers::kUniform_Flag) {
+ if (var->fType.kind() == Type::kSampler_Kind ||
+ var->fType.kind() == Type::kSeparateSampler_Kind ||
+ var->fType.kind() == Type::kTexture_Kind) {
+ storageClass = SpvStorageClassUniformConstant;
+ } else {
+ storageClass = SpvStorageClassUniform;
+ }
+ } else {
+ storageClass = SpvStorageClassPrivate;
+ }
+ SpvId id = this->nextId();
+ fVariableMap[var] = id;
+ SpvId type;
+ if (var->fModifiers.fLayout.fBuiltin == SK_IN_BUILTIN) {
+ type = this->getPointerType(Type("sk_in", Type::kArray_Kind,
+ var->fType.componentType(), fSkInCount),
+ storageClass);
+ } else {
+ type = this->getPointerType(var->fType, storageClass);
+ }
+ this->writeInstruction(SpvOpVariable, type, id, storageClass, fConstantBuffer);
+ this->writeInstruction(SpvOpName, id, var->fName, fNameBuffer);
+ this->writePrecisionModifier(var->fType, id);
+ if (varDecl.fValue) {
+ SkASSERT(!fCurrentBlock);
+ fCurrentBlock = -1;
+ SpvId value = this->writeExpression(*varDecl.fValue, fGlobalInitializersBuffer);
+ this->writeInstruction(SpvOpStore, id, value, fGlobalInitializersBuffer);
+ fCurrentBlock = 0;
+ }
+ this->writeLayout(var->fModifiers.fLayout, id);
+ if (var->fModifiers.fFlags & Modifiers::kFlat_Flag) {
+ this->writeInstruction(SpvOpDecorate, id, SpvDecorationFlat, fDecorationBuffer);
+ }
+ if (var->fModifiers.fFlags & Modifiers::kNoPerspective_Flag) {
+ this->writeInstruction(SpvOpDecorate, id, SpvDecorationNoPerspective,
+ fDecorationBuffer);
+ }
+ }
+}
+
+void SPIRVCodeGenerator::writeVarDeclarations(const VarDeclarations& decl, OutputStream& out) {
+ for (const auto& stmt : decl.fVars) {
+ SkASSERT(stmt->fKind == Statement::kVarDeclaration_Kind);
+ VarDeclaration& varDecl = (VarDeclaration&) *stmt;
+ const Variable* var = varDecl.fVar;
+ // These haven't been implemented in our SPIR-V generator yet and we only currently use them
+ // in the OpenGL backend.
+ SkASSERT(!(var->fModifiers.fFlags & (Modifiers::kReadOnly_Flag |
+ Modifiers::kWriteOnly_Flag |
+ Modifiers::kCoherent_Flag |
+ Modifiers::kVolatile_Flag |
+ Modifiers::kRestrict_Flag)));
+ SpvId id = this->nextId();
+ fVariableMap[var] = id;
+ SpvId type = this->getPointerType(var->fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, type, id, SpvStorageClassFunction, fVariableBuffer);
+ this->writeInstruction(SpvOpName, id, var->fName, fNameBuffer);
+ if (varDecl.fValue) {
+ SpvId value = this->writeExpression(*varDecl.fValue, out);
+ this->writeInstruction(SpvOpStore, id, value, out);
+ }
+ }
+}
+
+void SPIRVCodeGenerator::writeStatement(const Statement& s, OutputStream& out) {
+ switch (s.fKind) {
+ case Statement::kNop_Kind:
+ break;
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s, out);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, out);
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s, out);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration, out);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s, out);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s, out);
+ break;
+ case Statement::kWhile_Kind:
+ this->writeWhileStatement((WhileStatement&) s, out);
+ break;
+ case Statement::kDo_Kind:
+ this->writeDoStatement((DoStatement&) s, out);
+ break;
+ case Statement::kSwitch_Kind:
+ this->writeSwitchStatement((SwitchStatement&) s, out);
+ break;
+ case Statement::kBreak_Kind:
+ this->writeInstruction(SpvOpBranch, fBreakTarget.top(), out);
+ break;
+ case Statement::kContinue_Kind:
+ this->writeInstruction(SpvOpBranch, fContinueTarget.top(), out);
+ break;
+ case Statement::kDiscard_Kind:
+ this->writeInstruction(SpvOpKill, out);
+ break;
+ default:
+ ABORT("unsupported statement: %s", s.description().c_str());
+ }
+}
+
+void SPIRVCodeGenerator::writeBlock(const Block& b, OutputStream& out) {
+ for (size_t i = 0; i < b.fStatements.size(); i++) {
+ this->writeStatement(*b.fStatements[i], out);
+ }
+}
+
+void SPIRVCodeGenerator::writeIfStatement(const IfStatement& stmt, OutputStream& out) {
+ SpvId test = this->writeExpression(*stmt.fTest, out);
+ SpvId ifTrue = this->nextId();
+ SpvId ifFalse = this->nextId();
+ if (stmt.fIfFalse) {
+ SpvId end = this->nextId();
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, out);
+ this->writeStatement(*stmt.fIfTrue, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(ifFalse, out);
+ this->writeStatement(*stmt.fIfFalse, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(end, out);
+ } else {
+ this->writeInstruction(SpvOpSelectionMerge, ifFalse, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, out);
+ this->writeStatement(*stmt.fIfTrue, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, ifFalse, out);
+ }
+ this->writeLabel(ifFalse, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeForStatement(const ForStatement& f, OutputStream& out) {
+ if (f.fInitializer) {
+ this->writeStatement(*f.fInitializer, out);
+ }
+ SpvId header = this->nextId();
+ SpvId start = this->nextId();
+ SpvId body = this->nextId();
+ SpvId next = this->nextId();
+ fContinueTarget.push(next);
+ SpvId end = this->nextId();
+ fBreakTarget.push(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, out);
+ this->writeInstruction(SpvOpLoopMerge, end, next, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, out);
+ if (f.fTest) {
+ SpvId test = this->writeExpression(*f.fTest, out);
+ this->writeInstruction(SpvOpBranchConditional, test, body, end, out);
+ }
+ this->writeLabel(body, out);
+ this->writeStatement(*f.fStatement, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, next, out);
+ }
+ this->writeLabel(next, out);
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, out);
+ }
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(end, out);
+ fBreakTarget.pop();
+ fContinueTarget.pop();
+}
+
+void SPIRVCodeGenerator::writeWhileStatement(const WhileStatement& w, OutputStream& out) {
+ SpvId header = this->nextId();
+ SpvId start = this->nextId();
+ SpvId body = this->nextId();
+ SpvId continueTarget = this->nextId();
+ fContinueTarget.push(continueTarget);
+ SpvId end = this->nextId();
+ fBreakTarget.push(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, out);
+ this->writeInstruction(SpvOpLoopMerge, end, continueTarget, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, out);
+ SpvId test = this->writeExpression(*w.fTest, out);
+ this->writeInstruction(SpvOpBranchConditional, test, body, end, out);
+ this->writeLabel(body, out);
+ this->writeStatement(*w.fStatement, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, continueTarget, out);
+ }
+ this->writeLabel(continueTarget, out);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(end, out);
+ fBreakTarget.pop();
+ fContinueTarget.pop();
+}
+
+void SPIRVCodeGenerator::writeDoStatement(const DoStatement& d, OutputStream& out) {
+ // We believe the do loop code below will work, but Skia doesn't actually use them and
+ // adequately testing this code in the absence of Skia exercising it isn't straightforward. For
+ // the time being, we just fail with an error due to the lack of testing. If you encounter this
+ // message, simply remove the error call below to see whether our do loop support actually
+ // works.
+ fErrors.error(d.fOffset, "internal error: do loop support has been disabled in SPIR-V, see "
+ "SkSLSPIRVCodeGenerator.cpp for details");
+
+ SpvId header = this->nextId();
+ SpvId start = this->nextId();
+ SpvId next = this->nextId();
+ SpvId continueTarget = this->nextId();
+ fContinueTarget.push(continueTarget);
+ SpvId end = this->nextId();
+ fBreakTarget.push(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, out);
+ this->writeInstruction(SpvOpLoopMerge, end, continueTarget, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, out);
+ this->writeStatement(*d.fStatement, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, next, out);
+ }
+ this->writeLabel(next, out);
+ SpvId test = this->writeExpression(*d.fTest, out);
+ this->writeInstruction(SpvOpBranchConditional, test, continueTarget, end, out);
+ this->writeLabel(continueTarget, out);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(end, out);
+ fBreakTarget.pop();
+ fContinueTarget.pop();
+}
+
+void SPIRVCodeGenerator::writeSwitchStatement(const SwitchStatement& s, OutputStream& out) {
+ SpvId value = this->writeExpression(*s.fValue, out);
+ std::vector<SpvId> labels;
+ SpvId end = this->nextId();
+ SpvId defaultLabel = end;
+ fBreakTarget.push(end);
+ int size = 3;
+ for (const auto& c : s.fCases) {
+ SpvId label = this->nextId();
+ labels.push_back(label);
+ if (c->fValue) {
+ size += 2;
+ } else {
+ defaultLabel = label;
+ }
+ }
+ labels.push_back(end);
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeOpCode(SpvOpSwitch, size, out);
+ this->writeWord(value, out);
+ this->writeWord(defaultLabel, out);
+ for (size_t i = 0; i < s.fCases.size(); ++i) {
+ if (!s.fCases[i]->fValue) {
+ continue;
+ }
+ SkASSERT(s.fCases[i]->fValue->fKind == Expression::kIntLiteral_Kind);
+ this->writeWord(((IntLiteral&) *s.fCases[i]->fValue).fValue, out);
+ this->writeWord(labels[i], out);
+ }
+ for (size_t i = 0; i < s.fCases.size(); ++i) {
+ this->writeLabel(labels[i], out);
+ for (const auto& stmt : s.fCases[i]->fStatements) {
+ this->writeStatement(*stmt, out);
+ }
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, labels[i + 1], out);
+ }
+ }
+ this->writeLabel(end, out);
+ fBreakTarget.pop();
+}
+
+void SPIRVCodeGenerator::writeReturnStatement(const ReturnStatement& r, OutputStream& out) {
+ if (r.fExpression) {
+ this->writeInstruction(SpvOpReturnValue, this->writeExpression(*r.fExpression, out),
+ out);
+ } else {
+ this->writeInstruction(SpvOpReturn, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeGeometryShaderExecutionMode(SpvId entryPoint, OutputStream& out) {
+ SkASSERT(fProgram.fKind == Program::kGeometry_Kind);
+ int invocations = 1;
+ for (const auto& e : fProgram) {
+ if (e.fKind == ProgramElement::kModifiers_Kind) {
+ const Modifiers& m = ((ModifiersDeclaration&) e).fModifiers;
+ if (m.fFlags & Modifiers::kIn_Flag) {
+ if (m.fLayout.fInvocations != -1) {
+ invocations = m.fLayout.fInvocations;
+ }
+ SpvId input;
+ switch (m.fLayout.fPrimitive) {
+ case Layout::kPoints_Primitive:
+ input = SpvExecutionModeInputPoints;
+ break;
+ case Layout::kLines_Primitive:
+ input = SpvExecutionModeInputLines;
+ break;
+ case Layout::kLinesAdjacency_Primitive:
+ input = SpvExecutionModeInputLinesAdjacency;
+ break;
+ case Layout::kTriangles_Primitive:
+ input = SpvExecutionModeTriangles;
+ break;
+ case Layout::kTrianglesAdjacency_Primitive:
+ input = SpvExecutionModeInputTrianglesAdjacency;
+ break;
+ default:
+ input = 0;
+ break;
+ }
+ update_sk_in_count(m, &fSkInCount);
+ if (input) {
+ this->writeInstruction(SpvOpExecutionMode, entryPoint, input, out);
+ }
+ } else if (m.fFlags & Modifiers::kOut_Flag) {
+ SpvId output;
+ switch (m.fLayout.fPrimitive) {
+ case Layout::kPoints_Primitive:
+ output = SpvExecutionModeOutputPoints;
+ break;
+ case Layout::kLineStrip_Primitive:
+ output = SpvExecutionModeOutputLineStrip;
+ break;
+ case Layout::kTriangleStrip_Primitive:
+ output = SpvExecutionModeOutputTriangleStrip;
+ break;
+ default:
+ output = 0;
+ break;
+ }
+ if (output) {
+ this->writeInstruction(SpvOpExecutionMode, entryPoint, output, out);
+ }
+ if (m.fLayout.fMaxVertices != -1) {
+ this->writeInstruction(SpvOpExecutionMode, entryPoint,
+ SpvExecutionModeOutputVertices, m.fLayout.fMaxVertices,
+ out);
+ }
+ }
+ }
+ }
+ this->writeInstruction(SpvOpExecutionMode, entryPoint, SpvExecutionModeInvocations,
+ invocations, out);
+}
+
+void SPIRVCodeGenerator::writeInstructions(const Program& program, OutputStream& out) {
+ fGLSLExtendedInstructions = this->nextId();
+ StringStream body;
+ std::set<SpvId> interfaceVars;
+ // assign IDs to functions, determine sk_in size
+ int skInSize = -1;
+ for (const auto& e : program) {
+ switch (e.fKind) {
+ case ProgramElement::kFunction_Kind: {
+ FunctionDefinition& f = (FunctionDefinition&) e;
+ fFunctionMap[&f.fDeclaration] = this->nextId();
+ break;
+ }
+ case ProgramElement::kModifiers_Kind: {
+ Modifiers& m = ((ModifiersDeclaration&) e).fModifiers;
+ if (m.fFlags & Modifiers::kIn_Flag) {
+ switch (m.fLayout.fPrimitive) {
+ case Layout::kPoints_Primitive: // break
+ case Layout::kLines_Primitive:
+ skInSize = 1;
+ break;
+ case Layout::kLinesAdjacency_Primitive: // break
+ skInSize = 2;
+ break;
+ case Layout::kTriangles_Primitive: // break
+ case Layout::kTrianglesAdjacency_Primitive:
+ skInSize = 3;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ for (const auto& e : program) {
+ if (e.fKind == ProgramElement::kInterfaceBlock_Kind) {
+ InterfaceBlock& intf = (InterfaceBlock&) e;
+ if (SK_IN_BUILTIN == intf.fVariable.fModifiers.fLayout.fBuiltin) {
+ SkASSERT(skInSize != -1);
+ intf.fSizes.emplace_back(new IntLiteral(fContext, -1, skInSize));
+ }
+ SpvId id = this->writeInterfaceBlock(intf);
+ if (((intf.fVariable.fModifiers.fFlags & Modifiers::kIn_Flag) ||
+ (intf.fVariable.fModifiers.fFlags & Modifiers::kOut_Flag)) &&
+ intf.fVariable.fModifiers.fLayout.fBuiltin == -1) {
+ interfaceVars.insert(id);
+ }
+ }
+ }
+ for (const auto& e : program) {
+ if (e.fKind == ProgramElement::kVar_Kind) {
+ this->writeGlobalVars(program.fKind, ((VarDeclarations&) e), body);
+ }
+ }
+ for (const auto& e : program) {
+ if (e.fKind == ProgramElement::kFunction_Kind) {
+ this->writeFunction(((FunctionDefinition&) e), body);
+ }
+ }
+ const FunctionDeclaration* main = nullptr;
+ for (auto entry : fFunctionMap) {
+ if (entry.first->fName == "main") {
+ main = entry.first;
+ }
+ }
+ if (!main) {
+ fErrors.error(0, "program does not contain a main() function");
+ return;
+ }
+ for (auto entry : fVariableMap) {
+ const Variable* var = entry.first;
+ if (var->fStorage == Variable::kGlobal_Storage &&
+ ((var->fModifiers.fFlags & Modifiers::kIn_Flag) ||
+ (var->fModifiers.fFlags & Modifiers::kOut_Flag))) {
+ interfaceVars.insert(entry.second);
+ }
+ }
+ this->writeCapabilities(out);
+ this->writeInstruction(SpvOpExtInstImport, fGLSLExtendedInstructions, "GLSL.std.450", out);
+ this->writeInstruction(SpvOpMemoryModel, SpvAddressingModelLogical, SpvMemoryModelGLSL450, out);
+ this->writeOpCode(SpvOpEntryPoint, (SpvId) (3 + (main->fName.fLength + 4) / 4) +
+ (int32_t) interfaceVars.size(), out);
+ switch (program.fKind) {
+ case Program::kVertex_Kind:
+ this->writeWord(SpvExecutionModelVertex, out);
+ break;
+ case Program::kFragment_Kind:
+ this->writeWord(SpvExecutionModelFragment, out);
+ break;
+ case Program::kGeometry_Kind:
+ this->writeWord(SpvExecutionModelGeometry, out);
+ break;
+ default:
+ ABORT("cannot write this kind of program to SPIR-V\n");
+ }
+ SpvId entryPoint = fFunctionMap[main];
+ this->writeWord(entryPoint, out);
+ this->writeString(main->fName.fChars, main->fName.fLength, out);
+ for (int var : interfaceVars) {
+ this->writeWord(var, out);
+ }
+ if (program.fKind == Program::kGeometry_Kind) {
+ this->writeGeometryShaderExecutionMode(entryPoint, out);
+ }
+ if (program.fKind == Program::kFragment_Kind) {
+ this->writeInstruction(SpvOpExecutionMode,
+ fFunctionMap[main],
+ SpvExecutionModeOriginUpperLeft,
+ out);
+ }
+ for (const auto& e : program) {
+ if (e.fKind == ProgramElement::kExtension_Kind) {
+ this->writeInstruction(SpvOpSourceExtension, ((Extension&) e).fName.c_str(), out);
+ }
+ }
+
+ write_stringstream(fExtraGlobalsBuffer, out);
+ write_stringstream(fNameBuffer, out);
+ write_stringstream(fDecorationBuffer, out);
+ write_stringstream(fConstantBuffer, out);
+ write_stringstream(fExternalFunctionsBuffer, out);
+ write_stringstream(body, out);
+}
+
+bool SPIRVCodeGenerator::generateCode() {
+ SkASSERT(!fErrors.errorCount());
+ this->writeWord(SpvMagicNumber, *fOut);
+ this->writeWord(SpvVersion, *fOut);
+ this->writeWord(SKSL_MAGIC, *fOut);
+ StringStream buffer;
+ this->writeInstructions(fProgram, buffer);
+ this->writeWord(fIdCount, *fOut);
+ this->writeWord(0, *fOut); // reserved, always zero
+ write_stringstream(buffer, *fOut);
+ return 0 == fErrors.errorCount();
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h
new file mode 100644
index 0000000000..8f14b46fb5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SPIRVCODEGENERATOR
+#define SKSL_SPIRVCODEGENERATOR
+
+#include <stack>
+#include <tuple>
+#include <unordered_map>
+
+#include "src/sksl/SkSLCodeGenerator.h"
+#include "src/sksl/SkSLMemoryLayout.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+#include "src/sksl/spirv.h"
+
+union ConstantValue {
+ ConstantValue(int64_t i)
+ : fInt(i) {}
+
+ ConstantValue(double d)
+ : fDouble(d) {}
+
+ bool operator==(const ConstantValue& other) const {
+ return fInt == other.fInt;
+ }
+
+ int64_t fInt;
+ double fDouble;
+};
+
+enum class ConstantType {
+ kInt,
+ kUInt,
+ kShort,
+ kUShort,
+ kFloat,
+ kDouble,
+ kHalf,
+};
+
+namespace std {
+
+template <>
+struct hash<std::pair<ConstantValue, ConstantType>> {
+ size_t operator()(const std::pair<ConstantValue, ConstantType>& key) const {
+ return key.first.fInt ^ (int) key.second;
+ }
+};
+
+}
+
+namespace SkSL {
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+/**
+ * Converts a Program into a SPIR-V binary.
+ */
+class SPIRVCodeGenerator : public CodeGenerator {
+public:
+ class LValue {
+ public:
+ virtual ~LValue() {}
+
+ // returns a pointer to the lvalue, if possible. If the lvalue cannot be directly referenced
+ // by a pointer (e.g. vector swizzles), returns 0.
+ virtual SpvId getPointer() = 0;
+
+ virtual SpvId load(OutputStream& out) = 0;
+
+ virtual void store(SpvId value, OutputStream& out) = 0;
+ };
+
+ SPIRVCodeGenerator(const Context* context, const Program* program, ErrorReporter* errors,
+ OutputStream* out)
+ : INHERITED(program, errors, out)
+ , fContext(*context)
+ , fDefaultLayout(MemoryLayout::k140_Standard)
+ , fCapabilities(0)
+ , fIdCount(1)
+ , fBoolTrue(0)
+ , fBoolFalse(0)
+ , fSetupFragPosition(false)
+ , fCurrentBlock(0)
+ , fSynthetics(nullptr, errors) {
+ this->setupIntrinsics();
+ }
+
+ bool generateCode() override;
+
+private:
+ enum IntrinsicKind {
+ kGLSL_STD_450_IntrinsicKind,
+ kSPIRV_IntrinsicKind,
+ kSpecial_IntrinsicKind
+ };
+
+ enum SpecialIntrinsic {
+ kAtan_SpecialIntrinsic,
+ kClamp_SpecialIntrinsic,
+ kMax_SpecialIntrinsic,
+ kMin_SpecialIntrinsic,
+ kMix_SpecialIntrinsic,
+ kMod_SpecialIntrinsic,
+ kDFdy_SpecialIntrinsic,
+ kSaturate_SpecialIntrinsic,
+ kSampledImage_SpecialIntrinsic,
+ kSubpassLoad_SpecialIntrinsic,
+ kTexture_SpecialIntrinsic,
+ };
+
+ enum class Precision {
+ kLow,
+ kHigh,
+ };
+
+ void setupIntrinsics();
+
+ SpvId nextId();
+
+ Type getActualType(const Type& type);
+
+ SpvId getType(const Type& type);
+
+ SpvId getType(const Type& type, const MemoryLayout& layout);
+
+ SpvId getImageType(const Type& type);
+
+ SpvId getFunctionType(const FunctionDeclaration& function);
+
+ SpvId getPointerType(const Type& type, SpvStorageClass_ storageClass);
+
+ SpvId getPointerType(const Type& type, const MemoryLayout& layout,
+ SpvStorageClass_ storageClass);
+
+ void writePrecisionModifier(Precision precision, SpvId id);
+
+ void writePrecisionModifier(const Type& type, SpvId id);
+
+ std::vector<SpvId> getAccessChain(const Expression& expr, OutputStream& out);
+
+ void writeLayout(const Layout& layout, SpvId target);
+
+ void writeLayout(const Layout& layout, SpvId target, int member);
+
+ void writeStruct(const Type& type, const MemoryLayout& layout, SpvId resultId);
+
+ void writeProgramElement(const ProgramElement& pe, OutputStream& out);
+
+ SpvId writeInterfaceBlock(const InterfaceBlock& intf);
+
+ SpvId writeFunctionStart(const FunctionDeclaration& f, OutputStream& out);
+
+ SpvId writeFunctionDeclaration(const FunctionDeclaration& f, OutputStream& out);
+
+ SpvId writeFunction(const FunctionDefinition& f, OutputStream& out);
+
+ void writeGlobalVars(Program::Kind kind, const VarDeclarations& v, OutputStream& out);
+
+ void writeVarDeclarations(const VarDeclarations& decl, OutputStream& out);
+
+ SpvId writeVariableReference(const VariableReference& ref, OutputStream& out);
+
+ std::unique_ptr<LValue> getLValue(const Expression& value, OutputStream& out);
+
+ SpvId writeExpression(const Expression& expr, OutputStream& out);
+
+ SpvId writeIntrinsicCall(const FunctionCall& c, OutputStream& out);
+
+ SpvId writeFunctionCall(const FunctionCall& c, OutputStream& out);
+
+
+ void writeGLSLExtendedInstruction(const Type& type, SpvId id, SpvId floatInst,
+ SpvId signedInst, SpvId unsignedInst,
+ const std::vector<SpvId>& args, OutputStream& out);
+
+ /**
+ * Given a list of potentially mixed scalars and vectors, promotes the scalars to match the
+ * size of the vectors and returns the ids of the written expressions. e.g. given (float, vec2),
+ * returns (vec2(float), vec2). It is an error to use mismatched vector sizes, e.g. (float,
+ * vec2, vec3).
+ */
+ std::vector<SpvId> vectorize(const std::vector<std::unique_ptr<Expression>>& args,
+ OutputStream& out);
+
+ SpvId writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind, OutputStream& out);
+
+ SpvId writeConstantVector(const Constructor& c);
+
+ SpvId writeFloatConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeIntConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeUIntConstructor(const Constructor& c, OutputStream& out);
+
+ /**
+ * Writes a matrix with the diagonal entries all equal to the provided expression, and all other
+ * entries equal to zero.
+ */
+ void writeUniformScaleMatrix(SpvId id, SpvId diagonal, const Type& type, OutputStream& out);
+
+ /**
+ * Writes a potentially-different-sized copy of a matrix. Entries which do not exist in the
+ * source matrix are filled with zero; entries which do not exist in the destination matrix are
+ * ignored.
+ */
+ void writeMatrixCopy(SpvId id, SpvId src, const Type& srcType, const Type& dstType,
+ OutputStream& out);
+
+ void addColumnEntry(SpvId columnType, Precision precision, std::vector<SpvId>* currentColumn,
+ std::vector<SpvId>* columnIds, int* currentCount, int rows, SpvId entry,
+ OutputStream& out);
+
+ SpvId writeMatrixConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeVectorConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeArrayConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeConstructor(const Constructor& c, OutputStream& out);
+
+ SpvId writeFieldAccess(const FieldAccess& f, OutputStream& out);
+
+ SpvId writeSwizzle(const Swizzle& swizzle, OutputStream& out);
+
+ /**
+ * Folds the potentially-vector result of a logical operation down to a single bool. If
+ * operandType is a vector type, assumes that the intermediate result in id is a bvec of the
+ * same dimensions, and applys all() to it to fold it down to a single bool value. Otherwise,
+ * returns the original id value.
+ */
+ SpvId foldToBool(SpvId id, const Type& operandType, SpvOp op, OutputStream& out);
+
+ SpvId writeMatrixComparison(const Type& operandType, SpvId lhs, SpvId rhs, SpvOp_ floatOperator,
+ SpvOp_ intOperator, SpvOp_ vectorMergeOperator,
+ SpvOp_ mergeOperator, OutputStream& out);
+
+ SpvId writeComponentwiseMatrixBinary(const Type& operandType, SpvId lhs, SpvId rhs,
+ SpvOp_ floatOperator, SpvOp_ intOperator,
+ OutputStream& out);
+
+ SpvId writeBinaryOperation(const Type& resultType, const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt, SpvOp_ ifUInt,
+ SpvOp_ ifBool, OutputStream& out);
+
+ SpvId writeBinaryOperation(const BinaryExpression& expr, SpvOp_ ifFloat, SpvOp_ ifInt,
+ SpvOp_ ifUInt, OutputStream& out);
+
+ SpvId writeBinaryExpression(const Type& leftType, SpvId lhs, Token::Kind op,
+ const Type& rightType, SpvId rhs, const Type& resultType,
+ OutputStream& out);
+
+ SpvId writeBinaryExpression(const BinaryExpression& b, OutputStream& out);
+
+ SpvId writeTernaryExpression(const TernaryExpression& t, OutputStream& out);
+
+ SpvId writeIndexExpression(const IndexExpression& expr, OutputStream& out);
+
+ SpvId writeLogicalAnd(const BinaryExpression& b, OutputStream& out);
+
+ SpvId writeLogicalOr(const BinaryExpression& o, OutputStream& out);
+
+ SpvId writePrefixExpression(const PrefixExpression& p, OutputStream& out);
+
+ SpvId writePostfixExpression(const PostfixExpression& p, OutputStream& out);
+
+ SpvId writeBoolLiteral(const BoolLiteral& b);
+
+ SpvId writeIntLiteral(const IntLiteral& i);
+
+ SpvId writeFloatLiteral(const FloatLiteral& f);
+
+ void writeStatement(const Statement& s, OutputStream& out);
+
+ void writeBlock(const Block& b, OutputStream& out);
+
+ void writeIfStatement(const IfStatement& stmt, OutputStream& out);
+
+ void writeForStatement(const ForStatement& f, OutputStream& out);
+
+ void writeWhileStatement(const WhileStatement& w, OutputStream& out);
+
+ void writeDoStatement(const DoStatement& d, OutputStream& out);
+
+ void writeSwitchStatement(const SwitchStatement& s, OutputStream& out);
+
+ void writeReturnStatement(const ReturnStatement& r, OutputStream& out);
+
+ void writeCapabilities(OutputStream& out);
+
+ void writeInstructions(const Program& program, OutputStream& out);
+
+ void writeOpCode(SpvOp_ opCode, int length, OutputStream& out);
+
+ void writeWord(int32_t word, OutputStream& out);
+
+ void writeString(const char* string, size_t length, OutputStream& out);
+
+ void writeLabel(SpvId id, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, StringFragment string, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, StringFragment string, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, StringFragment string,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, int32_t word8,
+ OutputStream& out);
+
+ void writeGeometryShaderExecutionMode(SpvId entryPoint, OutputStream& out);
+
+ const Context& fContext;
+ const MemoryLayout fDefaultLayout;
+
+ uint64_t fCapabilities;
+ SpvId fIdCount;
+ SpvId fGLSLExtendedInstructions;
+ typedef std::tuple<IntrinsicKind, int32_t, int32_t, int32_t, int32_t> Intrinsic;
+ std::unordered_map<String, Intrinsic> fIntrinsicMap;
+ std::unordered_map<const FunctionDeclaration*, SpvId> fFunctionMap;
+ std::unordered_map<const Variable*, SpvId> fVariableMap;
+ std::unordered_map<const Variable*, int32_t> fInterfaceBlockMap;
+ std::unordered_map<String, SpvId> fImageTypeMap;
+ std::unordered_map<String, SpvId> fTypeMap;
+ StringStream fCapabilitiesBuffer;
+ StringStream fGlobalInitializersBuffer;
+ StringStream fConstantBuffer;
+ StringStream fExtraGlobalsBuffer;
+ StringStream fExternalFunctionsBuffer;
+ StringStream fVariableBuffer;
+ StringStream fNameBuffer;
+ StringStream fDecorationBuffer;
+
+ SpvId fBoolTrue;
+ SpvId fBoolFalse;
+ std::unordered_map<std::pair<ConstantValue, ConstantType>, SpvId> fNumberConstants;
+ // The constant float2(0, 1), used in swizzling
+ SpvId fConstantZeroOneVector = 0;
+ bool fSetupFragPosition;
+ // label of the current block, or 0 if we are not in a block
+ SpvId fCurrentBlock;
+ std::stack<SpvId> fBreakTarget;
+ std::stack<SpvId> fContinueTarget;
+ SpvId fRTHeightStructId = (SpvId) -1;
+ SpvId fRTHeightFieldIndex = (SpvId) -1;
+ // holds variables synthesized during output, for lifetime purposes
+ SymbolTable fSynthetics;
+ int fSkInCount = 1;
+
+ friend class PointerLValue;
+ friend class SwizzleLValue;
+
+ typedef CodeGenerator INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.cpp b/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.cpp
new file mode 100644
index 0000000000..edd9fbe42b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLSectionAndParameterHelper.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarationsStatement.h"
+#include "src/sksl/ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+SectionAndParameterHelper::SectionAndParameterHelper(const Program* program, ErrorReporter& errors)
+ : fProgram(*program) {
+ for (const auto& p : fProgram) {
+ switch (p.fKind) {
+ case ProgramElement::kVar_Kind: {
+ const VarDeclarations& decls = (const VarDeclarations&) p;
+ for (const auto& raw : decls.fVars) {
+ const VarDeclaration& decl = (VarDeclaration&) *raw;
+ if (IsParameter(*decl.fVar)) {
+ fParameters.push_back(decl.fVar);
+ }
+ }
+ break;
+ }
+ case ProgramElement::kSection_Kind: {
+ const Section& s = (const Section&) p;
+ if (IsSupportedSection(s.fName.c_str())) {
+ if (SectionRequiresArgument(s.fName.c_str()) && !s.fArgument.size()) {
+ errors.error(s.fOffset,
+ ("section '@" + s.fName +
+ "' requires one parameter").c_str());
+ }
+ if (!SectionAcceptsArgument(s.fName.c_str()) && s.fArgument.size()) {
+ errors.error(s.fOffset,
+ ("section '@" + s.fName + "' has no parameters").c_str());
+ }
+ } else {
+ errors.error(s.fOffset,
+ ("unsupported section '@" + s.fName + "'").c_str());
+ }
+ if (!SectionPermitsDuplicates(s.fName.c_str()) &&
+ fSections.find(s.fName) != fSections.end()) {
+ errors.error(s.fOffset,
+ ("duplicate section '@" + s.fName + "'").c_str());
+ }
+ fSections[s.fName].push_back(&s);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+bool SectionAndParameterHelper::hasCoordOverrides(const Variable& fp) {
+ for (const auto& pe : fProgram) {
+ if (this->hasCoordOverrides(pe, fp)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SectionAndParameterHelper::hasCoordOverrides(const ProgramElement& pe, const Variable& fp) {
+ if (pe.fKind == ProgramElement::kFunction_Kind) {
+ return this->hasCoordOverrides(*((const FunctionDefinition&) pe).fBody, fp);
+ }
+ return false;
+}
+
+bool SectionAndParameterHelper::hasCoordOverrides(const Expression& e, const Variable& fp) {
+ switch (e.fKind) {
+ case Expression::kFunctionCall_Kind: {
+ const FunctionCall& fc = (const FunctionCall&) e;
+ const FunctionDeclaration& f = fc.fFunction;
+ if (f.fBuiltin && f.fName == "sample" && fc.fArguments.size() >= 2 &&
+ fc.fArguments.back()->fType == *fProgram.fContext->fFloat2_Type &&
+ fc.fArguments[0]->fKind == Expression::kVariableReference_Kind &&
+ &((VariableReference&) *fc.fArguments[0]).fVariable == &fp) {
+ return true;
+ }
+ for (const auto& e : fc.fArguments) {
+ if (this->hasCoordOverrides(*e, fp)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case Expression::kConstructor_Kind: {
+ const Constructor& c = (const Constructor&) e;
+ for (const auto& e : c.fArguments) {
+ if (this->hasCoordOverrides(*e, fp)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case Expression::kFieldAccess_Kind: {
+ return this->hasCoordOverrides(*((const FieldAccess&) e).fBase, fp);
+ }
+ case Expression::kSwizzle_Kind:
+ return this->hasCoordOverrides(*((const Swizzle&) e).fBase, fp);
+ case Expression::kBinary_Kind: {
+ const BinaryExpression& b = (const BinaryExpression&) e;
+ return this->hasCoordOverrides(*b.fLeft, fp) ||
+ this->hasCoordOverrides(*b.fRight, fp);
+ }
+ case Expression::kIndex_Kind: {
+ const IndexExpression& idx = (const IndexExpression&) e;
+ return this->hasCoordOverrides(*idx.fBase, fp) ||
+ this->hasCoordOverrides(*idx.fIndex, fp);
+ }
+ case Expression::kPrefix_Kind:
+ return this->hasCoordOverrides(*((const PrefixExpression&) e).fOperand, fp);
+ case Expression::kPostfix_Kind:
+ return this->hasCoordOverrides(*((const PostfixExpression&) e).fOperand, fp);
+ case Expression::kTernary_Kind: {
+ const TernaryExpression& t = (const TernaryExpression&) e;
+ return this->hasCoordOverrides(*t.fTest, fp) ||
+ this->hasCoordOverrides(*t.fIfTrue, fp) ||
+ this->hasCoordOverrides(*t.fIfFalse, fp);
+ }
+ case Expression::kVariableReference_Kind:
+ return false;
+ case Expression::kBoolLiteral_Kind:
+ case Expression::kDefined_Kind:
+ case Expression::kExternalFunctionCall_Kind:
+ case Expression::kExternalValue_Kind:
+ case Expression::kFloatLiteral_Kind:
+ case Expression::kFunctionReference_Kind:
+ case Expression::kIntLiteral_Kind:
+ case Expression::kNullLiteral_Kind:
+ case Expression::kSetting_Kind:
+ case Expression::kTypeReference_Kind:
+ return false;
+ }
+ SkASSERT(false);
+ return false;
+}
+
+bool SectionAndParameterHelper::hasCoordOverrides(const Statement& s, const Variable& fp) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind: {
+ for (const auto& child : ((const Block&) s).fStatements) {
+ if (this->hasCoordOverrides(*child, fp)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case Statement::kVarDeclaration_Kind: {
+ const VarDeclaration& var = (const VarDeclaration&) s;
+ if (var.fValue) {
+ return hasCoordOverrides(*var.fValue, fp);
+ }
+ return false;
+ }
+ case Statement::kVarDeclarations_Kind: {
+ const VarDeclarations& decls = *((const VarDeclarationsStatement&) s).fDeclaration;
+ for (const auto& stmt : decls.fVars) {
+ if (this->hasCoordOverrides(*stmt, fp)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case Statement::kExpression_Kind:
+ return this->hasCoordOverrides(*((const ExpressionStatement&) s).fExpression, fp);
+ case Statement::kReturn_Kind: {
+ const ReturnStatement& r = (const ReturnStatement&) s;
+ if (r.fExpression) {
+ return this->hasCoordOverrides(*r.fExpression, fp);
+ }
+ return false;
+ }
+ case Statement::kIf_Kind: {
+ const IfStatement& i = (const IfStatement&) s;
+ return this->hasCoordOverrides(*i.fTest, fp) ||
+ this->hasCoordOverrides(*i.fIfTrue, fp) ||
+ (i.fIfFalse && this->hasCoordOverrides(*i.fIfFalse, fp));
+ }
+ case Statement::kFor_Kind: {
+ const ForStatement& f = (const ForStatement&) s;
+ return this->hasCoordOverrides(*f.fInitializer, fp) ||
+ this->hasCoordOverrides(*f.fTest, fp) ||
+ this->hasCoordOverrides(*f.fNext, fp) ||
+ this->hasCoordOverrides(*f.fStatement, fp);
+ }
+ case Statement::kWhile_Kind: {
+ const WhileStatement& w = (const WhileStatement&) s;
+ return this->hasCoordOverrides(*w.fTest, fp) ||
+ this->hasCoordOverrides(*w.fStatement, fp);
+ }
+ case Statement::kDo_Kind: {
+ const DoStatement& d = (const DoStatement&) s;
+ return this->hasCoordOverrides(*d.fTest, fp) ||
+ this->hasCoordOverrides(*d.fStatement, fp);
+ }
+ case Statement::kSwitch_Kind: {
+ const SwitchStatement& sw = (const SwitchStatement&) s;
+ for (const auto& c : sw.fCases) {
+ for (const auto& st : c->fStatements) {
+ if (this->hasCoordOverrides(*st, fp)) {
+ return true;
+ }
+ }
+ }
+ return this->hasCoordOverrides(*sw.fValue, fp);
+ }
+ case Statement::kBreak_Kind:
+ case Statement::kContinue_Kind:
+ case Statement::kDiscard_Kind:
+ case Statement::kGroup_Kind:
+ case Statement::kNop_Kind:
+ return false;
+ }
+ SkASSERT(false);
+ return false;
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.h b/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.h
new file mode 100644
index 0000000000..3251024f2e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSectionAndParameterHelper.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SECTIONANDPARAMETERHELPER
+#define SKSL_SECTIONANDPARAMETERHELPER
+
+#include "src/sksl/SkSLErrorReporter.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSection.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include <unordered_map>
+#include <vector>
+
+namespace SkSL {
+
+#define CLASS_SECTION "class"
+#define CLONE_SECTION "clone"
+#define CONSTRUCTOR_SECTION "constructor"
+#define CONSTRUCTOR_CODE_SECTION "constructorCode"
+#define CONSTRUCTOR_PARAMS_SECTION "constructorParams"
+#define COORD_TRANSFORM_SECTION "coordTransform"
+#define CPP_SECTION "cpp"
+#define CPP_END_SECTION "cppEnd"
+#define HEADER_SECTION "header"
+#define HEADER_END_SECTION "headerEnd"
+#define EMIT_CODE_SECTION "emitCode"
+#define FIELDS_SECTION "fields"
+#define INITIALIZERS_SECTION "initializers"
+#define MAKE_SECTION "make"
+#define OPTIMIZATION_FLAGS_SECTION "optimizationFlags"
+#define SAMPLER_PARAMS_SECTION "samplerParams"
+#define SET_DATA_SECTION "setData"
+#define TEST_CODE_SECTION "test"
+
+class SectionAndParameterHelper {
+public:
+ SectionAndParameterHelper(const Program* program, ErrorReporter& errors);
+
+ const Section* getSection(const char* name) {
+ SkASSERT(!SectionPermitsDuplicates(name));
+ auto found = fSections.find(name);
+ if (found == fSections.end()) {
+ return nullptr;
+ }
+ SkASSERT(found->second.size() == 1);
+ return found->second[0];
+ }
+
+ std::vector<const Section*> getSections(const char* name) {
+ auto found = fSections.find(name);
+ if (found == fSections.end()) {
+ return std::vector<const Section*>();
+ }
+ return found->second;
+ }
+
+ const std::vector<const Variable*>& getParameters() {
+ return fParameters;
+ }
+
+ bool hasCoordOverrides(const Variable& fp);
+
+ static bool IsParameter(const Variable& var) {
+ return (var.fModifiers.fFlags & Modifiers::kIn_Flag) &&
+ -1 == var.fModifiers.fLayout.fBuiltin;
+ }
+
+ static bool IsSupportedSection(const char* name) {
+ return !strcmp(name, CLASS_SECTION) ||
+ !strcmp(name, CLONE_SECTION) ||
+ !strcmp(name, CONSTRUCTOR_SECTION) ||
+ !strcmp(name, CONSTRUCTOR_CODE_SECTION) ||
+ !strcmp(name, CONSTRUCTOR_PARAMS_SECTION) ||
+ !strcmp(name, COORD_TRANSFORM_SECTION) ||
+ !strcmp(name, CPP_SECTION) ||
+ !strcmp(name, CPP_END_SECTION) ||
+ !strcmp(name, EMIT_CODE_SECTION) ||
+ !strcmp(name, FIELDS_SECTION) ||
+ !strcmp(name, HEADER_SECTION) ||
+ !strcmp(name, HEADER_END_SECTION) ||
+ !strcmp(name, INITIALIZERS_SECTION) ||
+ !strcmp(name, MAKE_SECTION) ||
+ !strcmp(name, OPTIMIZATION_FLAGS_SECTION) ||
+ !strcmp(name, SAMPLER_PARAMS_SECTION) ||
+ !strcmp(name, SET_DATA_SECTION) ||
+ !strcmp(name, TEST_CODE_SECTION);
+ }
+
+ static bool SectionAcceptsArgument(const char* name) {
+ return !strcmp(name, COORD_TRANSFORM_SECTION) ||
+ !strcmp(name, SAMPLER_PARAMS_SECTION) ||
+ !strcmp(name, SET_DATA_SECTION) ||
+ !strcmp(name, TEST_CODE_SECTION);
+ }
+
+ static bool SectionRequiresArgument(const char* name) {
+ return !strcmp(name, SAMPLER_PARAMS_SECTION) ||
+ !strcmp(name, SET_DATA_SECTION) ||
+ !strcmp(name, TEST_CODE_SECTION);
+ }
+
+ static bool SectionPermitsDuplicates(const char* name) {
+ return !strcmp(name, COORD_TRANSFORM_SECTION) ||
+ !strcmp(name, SAMPLER_PARAMS_SECTION);
+ }
+
+private:
+ bool hasCoordOverrides(const Statement& s, const Variable& fp);
+
+ bool hasCoordOverrides(const Expression& e, const Variable& fp);
+
+ bool hasCoordOverrides(const ProgramElement& p, const Variable& fp);
+
+ const Program& fProgram;
+ std::vector<const Variable*> fParameters;
+ std::unordered_map<String, std::vector<const Section*>> fSections;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLString.cpp b/gfx/skia/skia/src/sksl/SkSLString.cpp
new file mode 100644
index 0000000000..88eb1c7d3b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLString.cpp
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLString.h"
+
+#include "src/sksl/SkSLUtil.h"
+#include <algorithm>
+#include <errno.h>
+#include <limits.h>
+#include <locale>
+#include <sstream>
+#include <string>
+
+namespace SkSL {
+
+String String::printf(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ String result;
+ result.vappendf(fmt, args);
+ va_end(args);
+ return result;
+}
+
+void String::appendf(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ this->vappendf(fmt, args);
+ va_end(args);
+}
+
+void String::reset() {
+ this->clear();
+}
+
+int String::findLastOf(const char c) const {
+ // Rely on find_last_of and remap the output
+ size_t index = this->find_last_of(c);
+ return (index == std::string::npos ? -1 : index);
+}
+
+void String::vappendf(const char* fmt, va_list args) {
+#ifdef SKSL_BUILD_FOR_WIN
+ #define VSNPRINTF _vsnprintf
+#else
+ #define VSNPRINTF vsnprintf
+#endif
+ #define BUFFER_SIZE 256
+ char buffer[BUFFER_SIZE];
+ va_list reuse;
+ va_copy(reuse, args);
+ size_t size = VSNPRINTF(buffer, BUFFER_SIZE, fmt, args);
+ if (BUFFER_SIZE >= size) {
+ this->append(buffer, size);
+ } else {
+ auto newBuffer = std::unique_ptr<char[]>(new char[size + 1]);
+ VSNPRINTF(newBuffer.get(), size + 1, fmt, reuse);
+ this->append(newBuffer.get(), size);
+ }
+ va_end(reuse);
+}
+
+
+bool String::startsWith(const char* s) const {
+ return !strncmp(c_str(), s, strlen(s));
+}
+
+bool String::endsWith(const char* s) const {
+ size_t len = strlen(s);
+ if (size() < len) {
+ return false;
+ }
+ return !strncmp(c_str() + size() - len, s, len);
+}
+
+int String::find(const String& substring, int fromPos) const {
+ return find(substring.c_str(), fromPos);
+}
+
+int String::find(const char* substring, int fromPos) const {
+ SkASSERT(fromPos >= 0);
+ size_t found = INHERITED::find(substring, (size_t) fromPos);
+ return found == std::string::npos ? -1 : found;
+}
+
+String String::operator+(const char* s) const {
+ String result(*this);
+ result.append(s);
+ return result;
+}
+
+String String::operator+(const String& s) const {
+ String result(*this);
+ result.append(s);
+ return result;
+}
+
+String String::operator+(StringFragment s) const {
+ String result(*this);
+ result.append(s.fChars, s.fLength);
+ return result;
+}
+
+String& String::operator+=(char c) {
+ INHERITED::operator+=(c);
+ return *this;
+}
+
+String& String::operator+=(const char* s) {
+ INHERITED::operator+=(s);
+ return *this;
+}
+
+String& String::operator+=(const String& s) {
+ INHERITED::operator+=(s);
+ return *this;
+}
+
+String& String::operator+=(StringFragment s) {
+ this->append(s.fChars, s.fLength);
+ return *this;
+}
+
+bool String::operator==(const String& s) const {
+ return this->size() == s.size() && !memcmp(c_str(), s.c_str(), this->size());
+}
+
+bool String::operator!=(const String& s) const {
+ return !(*this == s);
+}
+
+bool String::operator==(const char* s) const {
+ return this->size() == strlen(s) && !memcmp(c_str(), s, this->size());
+}
+
+bool String::operator!=(const char* s) const {
+ return !(*this == s);
+}
+
+String operator+(const char* s1, const String& s2) {
+ String result(s1);
+ result.append(s2);
+ return result;
+}
+
+bool operator==(const char* s1, const String& s2) {
+ return s2 == s1;
+}
+
+bool operator!=(const char* s1, const String& s2) {
+ return s2 != s1;
+}
+
+bool StringFragment::operator==(StringFragment s) const {
+ if (fLength != s.fLength) {
+ return false;
+ }
+ return !memcmp(fChars, s.fChars, fLength);
+}
+
+bool StringFragment::operator!=(StringFragment s) const {
+ if (fLength != s.fLength) {
+ return true;
+ }
+ return memcmp(fChars, s.fChars, fLength);
+}
+
+bool StringFragment::operator==(const char* s) const {
+ for (size_t i = 0; i < fLength; ++i) {
+ if (fChars[i] != s[i]) {
+ return false;
+ }
+ }
+ return 0 == s[fLength];
+}
+
+bool StringFragment::operator!=(const char* s) const {
+ for (size_t i = 0; i < fLength; ++i) {
+ if (fChars[i] != s[i]) {
+ return true;
+ }
+ }
+ return 0 != s[fLength];
+}
+
+bool StringFragment::operator<(StringFragment other) const {
+ int comparison = strncmp(fChars, other.fChars, std::min(fLength, other.fLength));
+ if (comparison) {
+ return comparison < 0;
+ }
+ return fLength < other.fLength;
+}
+
+bool operator==(const char* s1, StringFragment s2) {
+ return s2 == s1;
+}
+
+bool operator!=(const char* s1, StringFragment s2) {
+ return s2 != s1;
+}
+
+String to_string(int32_t value) {
+ return SkSL::String::printf("%d", value);
+}
+
+String to_string(uint32_t value) {
+ return SkSL::String::printf("%u", value);
+}
+
+String to_string(int64_t value) {
+ std::stringstream buffer;
+ buffer << value;
+ return String(buffer.str().c_str());
+}
+
+String to_string(uint64_t value) {
+ std::stringstream buffer;
+ buffer << value;
+ return String(buffer.str().c_str());
+}
+
+String to_string(double value) {
+ std::stringstream buffer;
+ buffer.imbue(std::locale::classic());
+ buffer.precision(17);
+ buffer << value;
+ bool needsDotZero = true;
+ const std::string str = buffer.str();
+ for (int i = str.size() - 1; i >= 0; --i) {
+ char c = str[i];
+ if (c == '.' || c == 'e') {
+ needsDotZero = false;
+ break;
+ }
+ }
+ if (needsDotZero) {
+ buffer << ".0";
+ }
+ return String(buffer.str().c_str());
+}
+
+SKSL_INT stoi(const String& s) {
+ char* p;
+ SkDEBUGCODE(errno = 0;)
+ long result = strtoul(s.c_str(), &p, 0);
+ SkASSERT(*p == 0);
+ SkASSERT(!errno);
+ return result;
+}
+
+SKSL_FLOAT stod(const String& s) {
+ double result;
+ std::string str(s.c_str(), s.size());
+ std::stringstream buffer(str);
+ buffer.imbue(std::locale::classic());
+ buffer >> result;
+ SkASSERT(!buffer.fail());
+ return result;
+}
+
+long stol(const String& s) {
+ char* p;
+ SkDEBUGCODE(errno = 0;)
+ long result = strtoul(s.c_str(), &p, 0);
+ SkASSERT(*p == 0);
+ SkASSERT(!errno);
+ return result;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLString.h b/gfx/skia/skia/src/sksl/SkSLString.h
new file mode 100644
index 0000000000..c60a74877d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLString.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRING
+#define SKSL_STRING
+
+#include "src/sksl/SkSLDefines.h"
+#include <cstring>
+#include <stdarg.h>
+#include <string>
+
+#ifndef SKSL_STANDALONE
+#include "include/core/SkString.h"
+#endif
+
+namespace SkSL {
+
+// Represents a (not necessarily null-terminated) slice of a string.
+struct StringFragment {
+ StringFragment()
+ : fChars("")
+ , fLength(0) {}
+
+ StringFragment(const char* chars)
+ : fChars(chars)
+ , fLength(strlen(chars)) {}
+
+ StringFragment(const char* chars, size_t length)
+ : fChars(chars)
+ , fLength(length) {}
+
+ char operator[](size_t idx) const {
+ return fChars[idx];
+ }
+
+ bool operator==(const char* s) const;
+ bool operator!=(const char* s) const;
+ bool operator==(StringFragment s) const;
+ bool operator!=(StringFragment s) const;
+ bool operator<(StringFragment s) const;
+
+#ifndef SKSL_STANDALONE
+ operator SkString() const { return SkString(fChars, fLength); }
+#endif
+
+ const char* fChars;
+ size_t fLength;
+};
+
+bool operator==(const char* s1, StringFragment s2);
+
+bool operator!=(const char* s1, StringFragment s2);
+
+class SK_API String : public std::string {
+public:
+ String() = default;
+ String(const String&) = default;
+ String(String&&) = default;
+ String& operator=(const String&) = default;
+ String& operator=(String&&) = default;
+
+ String(const char* s)
+ : INHERITED(s) {}
+
+ String(const char* s, size_t size)
+ : INHERITED(s, size) {}
+
+ String(StringFragment s)
+ : INHERITED(s.fChars, s.fLength) {}
+
+ static String printf(const char* fmt, ...);
+
+ void appendf(const char* fmt, ...);
+ // For API compatibility with SkString's reset (vs. std:string's clear)
+ void reset();
+ // For API compatibility with SkString's findLastOf(vs. find_last_of -> size_t)
+ int findLastOf(const char c) const;
+
+ void vappendf(const char* fmt, va_list va);
+
+ bool startsWith(const char* s) const;
+ bool endsWith(const char* s) const;
+
+ int find(const char* substring, int fromPos = 0) const;
+ int find(const String& substring, int fromPos = 0) const;
+
+ String operator+(const char* s) const;
+ String operator+(const String& s) const;
+ String operator+(StringFragment s) const;
+ String& operator+=(char c);
+ String& operator+=(const char* s);
+ String& operator+=(const String& s);
+ String& operator+=(StringFragment s);
+ bool operator==(const char* s) const;
+ bool operator!=(const char* s) const;
+ bool operator==(const String& s) const;
+ bool operator!=(const String& s) const;
+ friend String operator+(const char* s1, const String& s2);
+ friend bool operator==(const char* s1, const String& s2);
+ friend bool operator!=(const char* s1, const String& s2);
+
+#ifndef SKSL_STANDALONE
+ operator SkString() const { return SkString(c_str()); }
+#endif
+
+private:
+ typedef std::string INHERITED;
+};
+
+String operator+(const char* s1, const String& s2);
+bool operator!=(const char* s1, const String& s2);
+
+String to_string(double value);
+
+String to_string(int32_t value);
+
+String to_string(uint32_t value);
+
+String to_string(int64_t value);
+
+String to_string(uint64_t value);
+
+SKSL_INT stoi(const String& s);
+
+SKSL_FLOAT stod(const String& s);
+
+long stol(const String& s);
+
+} // namespace SkSL
+
+namespace std {
+ template<> struct hash<SkSL::StringFragment> {
+ size_t operator()(const SkSL::StringFragment& s) const {
+ size_t result = 0;
+ for (size_t i = 0; i < s.fLength; ++i) {
+ result = result * 101 + s.fChars[i];
+ }
+ return result;
+ }
+ };
+
+ template<> struct hash<SkSL::String> {
+ size_t operator()(const SkSL::String& s) const {
+ return hash<std::string>{}(s);
+ }
+ };
+} // namespace std
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLStringStream.h b/gfx/skia/skia/src/sksl/SkSLStringStream.h
new file mode 100644
index 0000000000..d34d08dd3d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLStringStream.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRINGSTREAM
+#define SKSL_STRINGSTREAM
+
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLString.h"
+
+#ifdef SKSL_STANDALONE
+
+namespace SkSL {
+
+class StringStream : public OutputStream {
+public:
+ void write8(uint8_t b) override {
+ fBuffer += (char) b;
+ }
+
+ void writeText(const char* s) override {
+ fBuffer += s;
+ }
+
+ void write(const void* s, size_t size) override {
+ fBuffer.append((const char*) s, size);
+ }
+
+ const String& str() const {
+ return fBuffer;
+ }
+
+ void reset() {
+ fBuffer = "";
+ }
+
+private:
+ String fBuffer;
+};
+
+#else
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+
+namespace SkSL {
+
+class StringStream : public OutputStream {
+public:
+ void write8(uint8_t b) override {
+ fStream.write8(b);
+ }
+
+ void writeText(const char* s) override {
+ fStream.writeText(s);
+ }
+
+ void write(const void* s, size_t size) override {
+ fStream.write(s, size);
+ }
+
+ const String& str() const {
+ if (!fString.size()) {
+ sk_sp<SkData> data = fStream.detachAsData();
+ fString = String((const char*) data->data(), data->size());
+ }
+ return fString;
+ }
+
+ void reset() {
+ fStream.reset();
+ fString = "";
+ }
+
+private:
+ mutable SkDynamicMemoryWStream fStream;
+ mutable String fString;
+};
+
+#endif // SKSL_STANDALONE
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.cpp b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
new file mode 100644
index 0000000000..b2c516245d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLUtil.h"
+
+#include "src/sksl/SkSLStringStream.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+namespace SkSL {
+
+#if defined(SKSL_STANDALONE) || !SK_SUPPORT_GPU
+StandaloneShaderCaps standaloneCaps;
+#endif
+
+void sksl_abort() {
+#ifdef SKSL_STANDALONE
+ abort();
+#else
+ sk_abort_no_print();
+ exit(1);
+#endif
+}
+
+void write_stringstream(const StringStream& s, OutputStream& out) {
+ out.write(s.str().c_str(), s.str().size());
+}
+
+bool is_assignment(Token::Kind op) {
+ switch (op) {
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALANDEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Token::Kind remove_assignment(Token::Kind op) {
+ switch (op) {
+ case Token::PLUSEQ: return Token::PLUS;
+ case Token::MINUSEQ: return Token::MINUS;
+ case Token::STAREQ: return Token::STAR;
+ case Token::SLASHEQ: return Token::SLASH;
+ case Token::PERCENTEQ: return Token::PERCENT;
+ case Token::SHLEQ: return Token::SHL;
+ case Token::SHREQ: return Token::SHR;
+ case Token::BITWISEOREQ: return Token::BITWISEOR;
+ case Token::BITWISEXOREQ: return Token::BITWISEXOR;
+ case Token::BITWISEANDEQ: return Token::BITWISEAND;
+ case Token::LOGICALOREQ: return Token::LOGICALOR;
+ case Token::LOGICALXOREQ: return Token::LOGICALXOR;
+ case Token::LOGICALANDEQ: return Token::LOGICALAND;
+ default: return op;
+ }
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.h b/gfx/skia/skia/src/sksl/SkSLUtil.h
new file mode 100644
index 0000000000..60057b6c09
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.h
@@ -0,0 +1,396 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_UTIL
+#define SKSL_UTIL
+
+#include <cstdarg>
+#include <memory>
+#include "stdlib.h"
+#include "string.h"
+#include "src/sksl/SkSLDefines.h"
+#include "src/sksl/SkSLLexer.h"
+
+#ifndef SKSL_STANDALONE
+#include "include/core/SkTypes.h"
+#if SK_SUPPORT_GPU
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/GrShaderCaps.h"
+#endif // SK_SUPPORT_GPU
+#endif // SKSL_STANDALONE
+
+class GrShaderCaps;
+
+namespace SkSL {
+
+class OutputStream;
+class StringStream;
+
+#if defined(SKSL_STANDALONE) || !SK_SUPPORT_GPU
+
+// we're being compiled standalone, so we don't have access to caps...
+enum GrGLSLGeneration {
+ k110_GrGLSLGeneration,
+ k130_GrGLSLGeneration,
+ k140_GrGLSLGeneration,
+ k150_GrGLSLGeneration,
+ k330_GrGLSLGeneration,
+ k400_GrGLSLGeneration,
+ k420_GrGLSLGeneration,
+ k310es_GrGLSLGeneration,
+ k320es_GrGLSLGeneration,
+};
+
+#define SKSL_CAPS_CLASS StandaloneShaderCaps
+class StandaloneShaderCaps {
+public:
+ GrGLSLGeneration generation() const {
+ return k400_GrGLSLGeneration;
+ }
+
+ bool atan2ImplementedAsAtanYOverX() const {
+ return false;
+ }
+
+ bool canUseMinAndAbsTogether() const {
+ return true;
+ }
+
+ bool mustForceNegatedAtanParamToFloat() const {
+ return false;
+ }
+
+ bool shaderDerivativeSupport() const {
+ return true;
+ }
+
+ bool usesPrecisionModifiers() const {
+ return true;
+ }
+
+ bool mustDeclareFragmentShaderOutput() const {
+ return true;
+ }
+
+ bool fbFetchSupport() const {
+ return true;
+ }
+
+ bool fbFetchNeedsCustomOutput() const {
+ return false;
+ }
+
+ bool flatInterpolationSupport() const {
+ return true;
+ }
+
+ bool noperspectiveInterpolationSupport() const {
+ return true;
+ }
+
+ bool multisampleInterpolationSupport() const {
+ return true;
+ }
+
+ bool sampleVariablesSupport() const {
+ return true;
+ }
+
+ bool externalTextureSupport() const {
+ return true;
+ }
+
+ bool mustDoOpBetweenFloorAndAbs() const {
+ return false;
+ }
+
+ bool mustEnableAdvBlendEqs() const {
+ return false;
+ }
+
+ bool mustEnableSpecificAdvBlendEqs() const {
+ return false;
+ }
+
+ bool canUseAnyFunctionInShader() const {
+ return false;
+ }
+
+ bool noDefaultPrecisionForExternalSamplers() const {
+ return false;
+ }
+
+ bool floatIs32Bits() const {
+ return true;
+ }
+
+ bool integerSupport() const {
+ return false;
+ }
+
+ bool builtinFMASupport() const {
+ return true;
+ }
+
+ const char* shaderDerivativeExtensionString() const {
+ return nullptr;
+ }
+
+ const char* fragCoordConventionsExtensionString() const {
+ return nullptr;
+ }
+
+ const char* geometryShaderExtensionString() const {
+ return nullptr;
+ }
+
+ const char* gsInvocationsExtensionString() const {
+ return nullptr;
+ }
+
+ const char* externalTextureExtensionString() const {
+ return nullptr;
+ }
+
+ const char* secondExternalTextureExtensionString() const {
+ return nullptr;
+ }
+
+ const char* versionDeclString() const {
+ return "";
+ }
+
+ bool gsInvocationsSupport() const {
+ return true;
+ }
+
+ bool canUseFractForNegativeValues() const {
+ return true;
+ }
+
+ bool canUseFragCoord() const {
+ return true;
+ }
+
+ bool incompleteShortIntPrecision() const {
+ return false;
+ }
+
+ bool addAndTrueToLoopCondition() const {
+ return false;
+ }
+
+ bool unfoldShortCircuitAsTernary() const {
+ return false;
+ }
+
+ bool emulateAbsIntFunction() const {
+ return false;
+ }
+
+ bool rewriteDoWhileLoops() const {
+ return false;
+ }
+
+ bool removePowWithConstantExponent() const {
+ return false;
+ }
+
+ const char* fbFetchColorName() const {
+ return nullptr;
+ }
+};
+
+extern StandaloneShaderCaps standaloneCaps;
+
+#else
+
+#define SKSL_CAPS_CLASS GrShaderCaps
+// Various sets of caps for use in tests
+class ShaderCapsFactory {
+public:
+ static sk_sp<GrShaderCaps> Default() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fShaderDerivativeSupport = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> Version450Core() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 450 core";
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> Version110() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 110";
+ result->fGLSLGeneration = GrGLSLGeneration::k110_GrGLSLGeneration;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> UsesPrecisionModifiers() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fUsesPrecisionModifiers = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> CannotUseMinAndAbsTogether() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fCanUseMinAndAbsTogether = false;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> CannotUseFractForNegativeValues() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fCanUseFractForNegativeValues = false;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> MustForceNegatedAtanParamToFloat() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fMustForceNegatedAtanParamToFloat = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> ShaderDerivativeExtensionString() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fShaderDerivativeSupport = true;
+ result->fShaderDerivativeExtensionString = "GL_OES_standard_derivatives";
+ result->fUsesPrecisionModifiers = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> FragCoordsOld() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 110";
+ result->fGLSLGeneration = GrGLSLGeneration::k110_GrGLSLGeneration;
+ result->fFragCoordConventionsExtensionString = "GL_ARB_fragment_coord_conventions";
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> FragCoordsNew() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fFragCoordConventionsExtensionString = "GL_ARB_fragment_coord_conventions";
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> GeometryShaderSupport() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fGeometryShaderSupport = true;
+ result->fGSInvocationsSupport = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> NoGSInvocationsSupport() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fGeometryShaderSupport = true;
+ result->fGSInvocationsSupport = false;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> GeometryShaderExtensionString() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 310es";
+ result->fGeometryShaderSupport = true;
+ result->fGeometryShaderExtensionString = "GL_EXT_geometry_shader";
+ result->fGSInvocationsSupport = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> GSInvocationsExtensionString() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fGeometryShaderSupport = true;
+ result->fGSInvocationsSupport = true;
+ result->fGSInvocationsExtensionString = "GL_ARB_gpu_shader5";
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> VariousCaps() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fExternalTextureSupport = true;
+ result->fFBFetchSupport = false;
+ result->fCanUseAnyFunctionInShader = false;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> CannotUseFragCoord() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fCanUseFragCoord = false;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> IncompleteShortIntPrecision() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 310es";
+ result->fUsesPrecisionModifiers = true;
+ result->fIncompleteShortIntPrecision = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> AddAndTrueToLoopCondition() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fAddAndTrueToLoopCondition = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> UnfoldShortCircuitAsTernary() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fUnfoldShortCircuitAsTernary = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> EmulateAbsIntFunction() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fEmulateAbsIntFunction = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> RewriteDoWhileLoops() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fRewriteDoWhileLoops = true;
+ return result;
+ }
+
+ static sk_sp<GrShaderCaps> RemovePowWithConstantExponent() {
+ sk_sp<GrShaderCaps> result = sk_make_sp<GrShaderCaps>(GrContextOptions());
+ result->fVersionDeclString = "#version 400";
+ result->fRemovePowWithConstantExponent = true;
+ return result;
+ }
+};
+#endif
+
+void write_stringstream(const StringStream& d, OutputStream& out);
+
+// Returns true if op is '=' or any compound assignment operator ('+=', '-=', etc.)
+bool is_assignment(Token::Kind op);
+
+// Given a compound assignment operator, returns the non-assignment version of the operator (e.g.
+// '+=' becomes '+')
+Token::Kind remove_assignment(Token::Kind op);
+
+NORETURN void sksl_abort();
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
new file mode 100644
index 0000000000..4ab25d2287
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BINARYEXPRESSION
+#define SKSL_BINARYEXPRESSION
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A binary operation.
+ */
+struct BinaryExpression : public Expression {
+ BinaryExpression(int offset, std::unique_ptr<Expression> left, Token::Kind op,
+ std::unique_ptr<Expression> right, const Type& type)
+ : INHERITED(offset, kBinary_Kind, type)
+ , fLeft(std::move(left))
+ , fOperator(op)
+ , fRight(std::move(right)) {}
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override {
+ return irGenerator.constantFold(*fLeft,
+ fOperator,
+ *fRight);
+ }
+
+ bool hasSideEffects() const override {
+ return Compiler::IsAssignment(fOperator) || fLeft->hasSideEffects() ||
+ fRight->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new BinaryExpression(fOffset, fLeft->clone(), fOperator,
+ fRight->clone(), fType));
+ }
+
+ String description() const override {
+ return "(" + fLeft->description() + " " + Compiler::OperatorName(fOperator) + " " +
+ fRight->description() + ")";
+ }
+
+ std::unique_ptr<Expression> fLeft;
+ const Token::Kind fOperator;
+ std::unique_ptr<Expression> fRight;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
new file mode 100644
index 0000000000..8a4449a01c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BLOCK
+#define SKSL_BLOCK
+
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A block of multiple statements functioning as a single statement.
+ */
+struct Block : public Statement {
+ Block(int offset, std::vector<std::unique_ptr<Statement>> statements,
+ const std::shared_ptr<SymbolTable> symbols = nullptr)
+ : INHERITED(offset, kBlock_Kind)
+ , fSymbols(std::move(symbols))
+ , fStatements(std::move(statements)) {}
+
+ bool isEmpty() const override {
+ for (const auto& s : fStatements) {
+ if (!s->isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ std::vector<std::unique_ptr<Statement>> cloned;
+ for (const auto& s : fStatements) {
+ cloned.push_back(s->clone());
+ }
+ return std::unique_ptr<Statement>(new Block(fOffset, std::move(cloned), fSymbols));
+ }
+
+ String description() const override {
+ String result("{");
+ for (size_t i = 0; i < fStatements.size(); i++) {
+ result += "\n";
+ result += fStatements[i]->description();
+ }
+ result += "\n}\n";
+ return result;
+ }
+
+ // it's important to keep fStatements defined after (and thus destroyed before) fSymbols,
+ // because destroying statements can modify reference counts in symbols
+ const std::shared_ptr<SymbolTable> fSymbols;
+ std::vector<std::unique_ptr<Statement>> fStatements;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h
new file mode 100644
index 0000000000..b99aec89c4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BOOLLITERAL
+#define SKSL_BOOLLITERAL
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents 'true' or 'false'.
+ */
+struct BoolLiteral : public Expression {
+ BoolLiteral(const Context& context, int offset, bool value)
+ : INHERITED(offset, kBoolLiteral_Kind, *context.fBool_Type)
+ , fValue(value) {}
+
+ String description() const override {
+ return String(fValue ? "true" : "false");
+ }
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ bool compareConstant(const Context& context, const Expression& other) const override {
+ BoolLiteral& b = (BoolLiteral&) other;
+ return fValue == b.fValue;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new BoolLiteral(fOffset, fValue, &fType));
+ }
+
+ const bool fValue;
+
+ typedef Expression INHERITED;
+
+private:
+ BoolLiteral(int offset, bool value, const Type* type)
+ : INHERITED(offset, kBoolLiteral_Kind, *type)
+ , fValue(value) {}
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
new file mode 100644
index 0000000000..ae0c1987e9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BREAKSTATEMENT
+#define SKSL_BREAKSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'break' statement.
+ */
+struct BreakStatement : public Statement {
+ BreakStatement(int offset)
+ : INHERITED(offset, kBreak_Kind) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new BreakStatement(fOffset));
+ }
+
+ String description() const override {
+ return String("break;");
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
new file mode 100644
index 0000000000..8d7a4989da
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR
+#define SKSL_CONSTRUCTOR
+
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents the construction of a compound type, such as "float2(x, y)".
+ *
+ * Vector constructors will always consist of either exactly 1 scalar, or a collection of vectors
+ * and scalars totalling exactly the right number of scalar components.
+ *
+ * Matrix constructors will always consist of either exactly 1 scalar, exactly 1 matrix, or a
+ * collection of vectors and scalars totalling exactly the right number of scalar components.
+ */
+struct Constructor : public Expression {
+ Constructor(int offset, const Type& type, std::vector<std::unique_ptr<Expression>> arguments)
+ : INHERITED(offset, kConstructor_Kind, type)
+ , fArguments(std::move(arguments)) {}
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override {
+ if (fArguments.size() == 1 && fArguments[0]->fKind == Expression::kIntLiteral_Kind) {
+ if (fType.isFloat()) {
+ // promote float(1) to 1.0
+ int64_t intValue = ((IntLiteral&) *fArguments[0]).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(irGenerator.fContext,
+ fOffset,
+ intValue));
+ } else if (fType.isInteger()) {
+ // promote uint(1) to 1u
+ int64_t intValue = ((IntLiteral&) *fArguments[0]).fValue;
+ return std::unique_ptr<Expression>(new IntLiteral(fOffset,
+ intValue,
+ &fType));
+ }
+ }
+ return nullptr;
+ }
+
+ bool hasSideEffects() const override {
+ for (const auto& arg : fArguments) {
+ if (arg->hasSideEffects()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ std::vector<std::unique_ptr<Expression>> cloned;
+ for (const auto& arg : fArguments) {
+ cloned.push_back(arg->clone());
+ }
+ return std::unique_ptr<Expression>(new Constructor(fOffset, fType, std::move(cloned)));
+ }
+
+ String description() const override {
+ String result = fType.description() + "(";
+ String separator;
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ result += separator;
+ result += fArguments[i]->description();
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+
+ bool isConstant() const override {
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ if (!fArguments[i]->isConstant()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool compareConstant(const Context& context, const Expression& other) const override {
+ SkASSERT(other.fKind == Expression::kConstructor_Kind && other.fType == fType);
+ Constructor& c = (Constructor&) other;
+ if (c.fType.kind() == Type::kVector_Kind) {
+ bool isFloat = c.fType.columns() > 1 ? c.fType.componentType().isFloat()
+ : c.fType.isFloat();
+ for (int i = 0; i < fType.columns(); i++) {
+ if (isFloat) {
+ if (this->getFVecComponent(i) != c.getFVecComponent(i)) {
+ return false;
+ }
+ } else if (this->getIVecComponent(i) != c.getIVecComponent(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ // shouldn't be possible to have a constant constructor that isn't a vector or matrix;
+ // a constant scalar constructor should have been collapsed down to the appropriate
+ // literal
+ SkASSERT(fType.kind() == Type::kMatrix_Kind);
+ for (int col = 0; col < fType.columns(); col++) {
+ for (int row = 0; row < fType.rows(); row++) {
+ if (getMatComponent(col, row) != c.getMatComponent(col, row)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ template<typename type>
+ type getVecComponent(int index) const {
+ SkASSERT(fType.kind() == Type::kVector_Kind);
+ if (fArguments.size() == 1 && fArguments[0]->fType.kind() == Type::kScalar_Kind) {
+ if (std::is_floating_point<type>::value) {
+ return fArguments[0]->getConstantFloat();
+ } else {
+ return fArguments[0]->getConstantInt();
+ }
+ }
+ int current = 0;
+ for (const auto& arg : fArguments) {
+ SkASSERT(current <= index);
+ if (arg->fType.kind() == Type::kScalar_Kind) {
+ if (index == current) {
+ if (std::is_floating_point<type>::value) {
+ return arg.get()->getConstantFloat();
+ } else {
+ return arg.get()->getConstantInt();
+ }
+ }
+ current++;
+ } else if (arg->fKind == kConstructor_Kind) {
+ if (current + arg->fType.columns() > index) {
+ return ((const Constructor&) *arg).getVecComponent<type>(index - current);
+ }
+ current += arg->fType.columns();
+ } else {
+ if (current + arg->fType.columns() > index) {
+ SkASSERT(arg->fKind == kPrefix_Kind);
+ const PrefixExpression& p = (PrefixExpression&) *arg;
+ const Constructor& c = (const Constructor&) *p.fOperand;
+ return -c.getVecComponent<type>(index - current);
+ }
+ current += arg->fType.columns();
+ }
+ }
+ ABORT("failed to find vector component %d in %s\n", index, description().c_str());
+ }
+
+ SKSL_FLOAT getFVecComponent(int n) const override {
+ return this->getVecComponent<SKSL_FLOAT>(n);
+ }
+
+ /**
+ * For a literal vector expression, return the integer value of the n'th vector component. It is
+ * an error to call this method on an expression which is not a literal vector.
+ */
+ SKSL_INT getIVecComponent(int n) const override {
+ return this->getVecComponent<SKSL_INT>(n);
+ }
+
+ SKSL_FLOAT getMatComponent(int col, int row) const override {
+ SkASSERT(this->isConstant());
+ SkASSERT(fType.kind() == Type::kMatrix_Kind);
+ SkASSERT(col < fType.columns() && row < fType.rows());
+ if (fArguments.size() == 1) {
+ if (fArguments[0]->fType.kind() == Type::kScalar_Kind) {
+ // single scalar argument, so matrix is of the form:
+ // x 0 0
+ // 0 x 0
+ // 0 0 x
+ // return x if col == row
+ return col == row ? fArguments[0]->getConstantFloat() : 0.0;
+ }
+ if (fArguments[0]->fType.kind() == Type::kMatrix_Kind) {
+ SkASSERT(fArguments[0]->fKind == Expression::kConstructor_Kind);
+ // single matrix argument. make sure we're within the argument's bounds.
+ const Type& argType = ((Constructor&) *fArguments[0]).fType;
+ if (col < argType.columns() && row < argType.rows()) {
+ // within bounds, defer to argument
+ return ((Constructor&) *fArguments[0]).getMatComponent(col, row);
+ }
+ // out of bounds
+ return 0.0;
+ }
+ }
+ int currentIndex = 0;
+ int targetIndex = col * fType.rows() + row;
+ for (const auto& arg : fArguments) {
+ SkASSERT(targetIndex >= currentIndex);
+ SkASSERT(arg->fType.rows() == 1);
+ if (currentIndex + arg->fType.columns() > targetIndex) {
+ if (arg->fType.columns() == 1) {
+ return arg->getConstantFloat();
+ } else {
+ return arg->getFVecComponent(targetIndex - currentIndex);
+ }
+ }
+ currentIndex += arg->fType.columns();
+ }
+ ABORT("can't happen, matrix component out of bounds");
+ }
+
+ std::vector<std::unique_ptr<Expression>> fArguments;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
new file mode 100644
index 0000000000..ecd9c3f497
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTINUESTATEMENT
+#define SKSL_CONTINUESTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'continue' statement.
+ */
+struct ContinueStatement : public Statement {
+ ContinueStatement(int offset)
+ : INHERITED(offset, kContinue_Kind) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new ContinueStatement(fOffset));
+ }
+
+ String description() const override {
+ return String("continue;");
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
new file mode 100644
index 0000000000..40f625c178
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DISCARDSTATEMENT
+#define SKSL_DISCARDSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'discard' statement.
+ */
+struct DiscardStatement : public Statement {
+ DiscardStatement(int offset)
+ : INHERITED(offset, kDiscard_Kind) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new DiscardStatement(fOffset));
+ }
+
+ String description() const override {
+ return String("discard;");
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
new file mode 100644
index 0000000000..5cab5c8bcd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DOSTATEMENT
+#define SKSL_DOSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'do' statement.
+ */
+struct DoStatement : public Statement {
+ DoStatement(int offset, std::unique_ptr<Statement> statement,
+ std::unique_ptr<Expression> test)
+ : INHERITED(offset, kDo_Kind)
+ , fStatement(std::move(statement))
+ , fTest(std::move(test)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new DoStatement(fOffset, fStatement->clone(),
+ fTest->clone()));
+ }
+
+ String description() const override {
+ return "do " + fStatement->description() + " while (" + fTest->description() + ");";
+ }
+
+ std::unique_ptr<Statement> fStatement;
+ std::unique_ptr<Expression> fTest;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLEnum.h b/gfx/skia/skia/src/sksl/ir/SkSLEnum.h
new file mode 100644
index 0000000000..a22db36577
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLEnum.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ENUM
+#define SKSL_ENUM
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <algorithm>
+#include <vector>
+
+namespace SkSL {
+
+struct Symbol;
+
+struct Enum : public ProgramElement {
+ Enum(int offset, StringFragment typeName, std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(offset, kEnum_Kind)
+ , fTypeName(typeName)
+ , fSymbols(std::move(symbols)) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new Enum(fOffset, fTypeName, fSymbols));
+ }
+
+ String description() const override {
+ String result = "enum class " + fTypeName + " {\n";
+ String separator;
+ std::vector<const Symbol*> sortedSymbols;
+ for (const auto& pair : *fSymbols) {
+ sortedSymbols.push_back(pair.second);
+ }
+ std::sort(sortedSymbols.begin(), sortedSymbols.end(),
+ [](const Symbol* a, const Symbol* b) { return a->fName < b->fName; });
+ for (const auto& s : sortedSymbols) {
+ result += separator + " " + s->fName + " = " +
+ ((Variable*) s)->fInitialValue->description();
+ separator = ",\n";
+ }
+ result += "\n};";
+ return result;
+ }
+
+ bool fBuiltin = false;
+ const StringFragment fTypeName;
+ const std::shared_ptr<SymbolTable> fSymbols;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
new file mode 100644
index 0000000000..928295dc72
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSION
+#define SKSL_EXPRESSION
+
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <unordered_map>
+
+namespace SkSL {
+
+struct Expression;
+class IRGenerator;
+
+typedef std::unordered_map<const Variable*, std::unique_ptr<Expression>*> DefinitionMap;
+
+/**
+ * Abstract supertype of all expressions.
+ */
+struct Expression : public IRNode {
+ enum Kind {
+ kBinary_Kind,
+ kBoolLiteral_Kind,
+ kConstructor_Kind,
+ kExternalFunctionCall_Kind,
+ kExternalValue_Kind,
+ kIntLiteral_Kind,
+ kFieldAccess_Kind,
+ kFloatLiteral_Kind,
+ kFunctionReference_Kind,
+ kFunctionCall_Kind,
+ kIndex_Kind,
+ kNullLiteral_Kind,
+ kPrefix_Kind,
+ kPostfix_Kind,
+ kSetting_Kind,
+ kSwizzle_Kind,
+ kVariableReference_Kind,
+ kTernary_Kind,
+ kTypeReference_Kind,
+ kDefined_Kind
+ };
+
+ Expression(int offset, Kind kind, const Type& type)
+ : INHERITED(offset)
+ , fKind(kind)
+ , fType(std::move(type)) {}
+
+ /**
+ * Returns true if this expression is constant. compareConstant must be implemented for all
+ * constants!
+ */
+ virtual bool isConstant() const {
+ return false;
+ }
+
+ /**
+ * Compares this constant expression against another constant expression of the same type. It is
+ * an error to call this on non-constant expressions, or if the types of the expressions do not
+ * match.
+ */
+ virtual bool compareConstant(const Context& context, const Expression& other) const {
+ ABORT("cannot call compareConstant on this type");
+ }
+
+ /**
+ * For an expression which evaluates to a constant int, returns the value. Otherwise calls
+ * ABORT.
+ */
+ virtual int64_t getConstantInt() const {
+ ABORT("not a constant int");
+ }
+
+ /**
+ * For an expression which evaluates to a constant float, returns the value. Otherwise calls
+ * ABORT.
+ */
+ virtual double getConstantFloat() const {
+ ABORT("not a constant float");
+ }
+
+ /**
+ * Returns true if evaluating the expression potentially has side effects. Expressions may never
+ * return false if they actually have side effects, but it is legal (though suboptimal) to
+ * return true if there are not actually any side effects.
+ */
+ virtual bool hasSideEffects() const = 0;
+
+ /**
+ * Given a map of known constant variable values, substitute them in for references to those
+ * variables occurring in this expression and its subexpressions. Similar simplifications, such
+ * as folding a constant binary expression down to a single value, may also be performed.
+ * Returns a new expression which replaces this expression, or null if no replacements were
+ * made. If a new expression is returned, this expression is no longer valid.
+ */
+ virtual std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) {
+ return nullptr;
+ }
+
+ virtual int coercionCost(const Type& target) const {
+ return fType.coercionCost(target);
+ }
+
+ /**
+ * For a literal vector expression, return the floating point value of the n'th vector
+ * component. It is an error to call this method on an expression which is not a literal vector.
+ */
+ virtual SKSL_FLOAT getFVecComponent(int n) const {
+ SkASSERT(false);
+ return 0;
+ }
+
+ /**
+ * For a literal vector expression, return the integer value of the n'th vector component. It is
+ * an error to call this method on an expression which is not a literal vector.
+ */
+ virtual SKSL_INT getIVecComponent(int n) const {
+ SkASSERT(false);
+ return 0;
+ }
+
+ /**
+ * For a literal matrix expression, return the floating point value of the component at
+ * [col][row]. It is an error to call this method on an expression which is not a literal
+ * matrix.
+ */
+ virtual SKSL_FLOAT getMatComponent(int col, int row) const {
+ SkASSERT(false);
+ return 0;
+ }
+
+ virtual std::unique_ptr<Expression> clone() const = 0;
+
+ const Kind fKind;
+ const Type& fType;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
new file mode 100644
index 0000000000..80a8d316c7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSIONSTATEMENT
+#define SKSL_EXPRESSIONSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A lone expression being used as a statement.
+ */
+struct ExpressionStatement : public Statement {
+ ExpressionStatement(std::unique_ptr<Expression> expression)
+ : INHERITED(expression->fOffset, kExpression_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new ExpressionStatement(fExpression->clone()));
+ }
+
+ String description() const override {
+ return fExpression->description() + ";";
+ }
+
+ std::unique_ptr<Expression> fExpression;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExtension.h b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
new file mode 100644
index 0000000000..5eed97508f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTENSION
+#define SKSL_EXTENSION
+
+#include "src/sksl/ir/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * An extension declaration.
+ */
+struct Extension : public ProgramElement {
+ Extension(int offset, String name)
+ : INHERITED(offset, kExtension_Kind)
+ , fName(std::move(name)) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new Extension(fOffset, fName));
+ }
+
+ String description() const override {
+ return "#extension " + fName + " : enable";
+ }
+
+ const String fName;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExternalFunctionCall.h b/gfx/skia/skia/src/sksl/ir/SkSLExternalFunctionCall.h
new file mode 100644
index 0000000000..3c52a10de6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExternalFunctionCall.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTERNALFUNCTIONCALL
+#define SKSL_EXTERNALFUNCTIONCALL
+
+#include "src/sksl/SkSLExternalValue.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An external function invocation.
+ */
+struct ExternalFunctionCall : public Expression {
+ ExternalFunctionCall(int offset, const Type& type, ExternalValue* function,
+ std::vector<std::unique_ptr<Expression>> arguments)
+ : INHERITED(offset, kExternalFunctionCall_Kind, type)
+ , fFunction(function)
+ , fArguments(std::move(arguments)) {}
+
+ bool hasSideEffects() const override {
+ return true;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ std::vector<std::unique_ptr<Expression>> cloned;
+ for (const auto& arg : fArguments) {
+ cloned.push_back(arg->clone());
+ }
+ return std::unique_ptr<Expression>(new ExternalFunctionCall(fOffset,
+ fType,
+ fFunction,
+ std::move(cloned)));
+ }
+
+ String description() const override {
+ String result = String(fFunction->fName) + "(";
+ String separator;
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ result += separator;
+ result += fArguments[i]->description();
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+
+ ExternalValue* fFunction;
+ std::vector<std::unique_ptr<Expression>> fArguments;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExternalValueReference.h b/gfx/skia/skia/src/sksl/ir/SkSLExternalValueReference.h
new file mode 100644
index 0000000000..ee8e81e12a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExternalValueReference.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTERNALVALUEREFERENCE
+#define SKSL_EXTERNALVALUEREFERENCE
+
+#include "src/sksl/SkSLExternalValue.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents an identifier referring to an ExternalValue.
+ */
+struct ExternalValueReference : public Expression {
+ ExternalValueReference(int offset, ExternalValue* ev)
+ : INHERITED(offset, kExternalValue_Kind, ev->type())
+ , fValue(ev) {}
+
+ bool hasSideEffects() const override {
+ return true;
+ }
+
+ String description() const override {
+ return String(fValue->fName);
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new ExternalValueReference(fOffset, fValue));
+ }
+
+ ExternalValue* fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLField.h b/gfx/skia/skia/src/sksl/ir/SkSLField.h
new file mode 100644
index 0000000000..d187a8032b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLField.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELD
+#define SKSL_FIELD
+
+#include "src/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLSymbol.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A symbol which should be interpreted as a field access. Fields are added to the symboltable
+ * whenever a bare reference to an identifier should refer to a struct field; in GLSL, this is the
+ * result of declaring anonymous interface blocks.
+ */
+struct Field : public Symbol {
+ Field(int offset, const Variable& owner, int fieldIndex)
+ : INHERITED(offset, kField_Kind, owner.fType.fields()[fieldIndex].fName)
+ , fOwner(owner)
+ , fFieldIndex(fieldIndex) {}
+
+ virtual String description() const override {
+ return fOwner.description() + "." + fOwner.fType.fields()[fFieldIndex].fName;
+ }
+
+ const Variable& fOwner;
+ const int fFieldIndex;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
new file mode 100644
index 0000000000..7576c0a31e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELDACCESS
+#define SKSL_FIELDACCESS
+
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * An expression which extracts a field from a struct, as in 'foo.bar'.
+ */
+struct FieldAccess : public Expression {
+ enum OwnerKind {
+ kDefault_OwnerKind,
+ // this field access is to a field of an anonymous interface block (and thus, the field name
+ // is actually in global scope, so only the field name needs to be written in GLSL)
+ kAnonymousInterfaceBlock_OwnerKind
+ };
+
+ FieldAccess(std::unique_ptr<Expression> base, int fieldIndex,
+ OwnerKind ownerKind = kDefault_OwnerKind)
+ : INHERITED(base->fOffset, kFieldAccess_Kind, *base->fType.fields()[fieldIndex].fType)
+ , fBase(std::move(base))
+ , fFieldIndex(fieldIndex)
+ , fOwnerKind(ownerKind) {}
+
+ bool hasSideEffects() const override {
+ return fBase->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new FieldAccess(fBase->clone(), fFieldIndex,
+ fOwnerKind));
+ }
+
+ String description() const override {
+ return fBase->description() + "." + fBase->fType.fields()[fFieldIndex].fName;
+ }
+
+ std::unique_ptr<Expression> fBase;
+ const int fFieldIndex;
+ const OwnerKind fOwnerKind;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h
new file mode 100644
index 0000000000..2ed2b796b1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FLOATLITERAL
+#define SKSL_FLOATLITERAL
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal floating point number.
+ */
+struct FloatLiteral : public Expression {
+ FloatLiteral(const Context& context, int offset, double value)
+ : INHERITED(offset, kFloatLiteral_Kind, *context.fFloatLiteral_Type)
+ , fValue(value) {}
+
+ FloatLiteral(int offset, double value, const Type* type)
+ : INHERITED(offset, kFloatLiteral_Kind, *type)
+ , fValue(value) {}
+
+ String description() const override {
+ return to_string(fValue);
+ }
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ int coercionCost(const Type& target) const override {
+ if (target.isFloat()) {
+ return 0;
+ }
+ return INHERITED::coercionCost(target);
+ }
+
+ bool compareConstant(const Context& context, const Expression& other) const override {
+ FloatLiteral& f = (FloatLiteral&) other;
+ return fValue == f.fValue;
+ }
+
+ double getConstantFloat() const override {
+ return fValue;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new FloatLiteral(fOffset, fValue, &fType));
+ }
+
+ const double fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
new file mode 100644
index 0000000000..4906e192a6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FORSTATEMENT
+#define SKSL_FORSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A 'for' statement.
+ */
+struct ForStatement : public Statement {
+ ForStatement(int offset, std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test, std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement, std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(offset, kFor_Kind)
+ , fSymbols(symbols)
+ , fInitializer(std::move(initializer))
+ , fTest(std::move(test))
+ , fNext(std::move(next))
+ , fStatement(std::move(statement)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new ForStatement(fOffset, fInitializer->clone(),
+ fTest->clone(), fNext->clone(),
+ fStatement->clone(), fSymbols));
+ }
+
+ String description() const override {
+ String result("for (");
+ if (fInitializer) {
+ result += fInitializer->description();
+ }
+ result += " ";
+ if (fTest) {
+ result += fTest->description();
+ }
+ result += "; ";
+ if (fNext) {
+ result += fNext->description();
+ }
+ result += ") " + fStatement->description();
+ return result;
+ }
+
+ // it's important to keep fSymbols defined first (and thus destroyed last) because destroying
+ // the other fields can update symbol reference counts
+ const std::shared_ptr<SymbolTable> fSymbols;
+ std::unique_ptr<Statement> fInitializer;
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Expression> fNext;
+ std::unique_ptr<Statement> fStatement;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
new file mode 100644
index 0000000000..90c173f66a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONCALL
+#define SKSL_FUNCTIONCALL
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A function invocation.
+ */
+struct FunctionCall : public Expression {
+ FunctionCall(int offset, const Type& type, const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments)
+ : INHERITED(offset, kFunctionCall_Kind, type)
+ , fFunction(std::move(function))
+ , fArguments(std::move(arguments)) {}
+
+ bool hasSideEffects() const override {
+ for (const auto& arg : fArguments) {
+ if (arg->hasSideEffects()) {
+ return true;
+ }
+ }
+ return fFunction.fModifiers.fFlags & Modifiers::kHasSideEffects_Flag;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ std::vector<std::unique_ptr<Expression>> cloned;
+ for (const auto& arg : fArguments) {
+ cloned.push_back(arg->clone());
+ }
+ return std::unique_ptr<Expression>(new FunctionCall(fOffset, fType, fFunction,
+ std::move(cloned)));
+ }
+
+ String description() const override {
+ String result = String(fFunction.fName) + "(";
+ String separator;
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ result += separator;
+ result += fArguments[i]->description();
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+
+ const FunctionDeclaration& fFunction;
+ std::vector<std::unique_ptr<Expression>> fArguments;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
new file mode 100644
index 0000000000..9b6d25e483
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDECLARATION
+#define SKSL_FUNCTIONDECLARATION
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLSymbol.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A function declaration (not a definition -- does not contain a body).
+ */
+struct FunctionDeclaration : public Symbol {
+ FunctionDeclaration(int offset, Modifiers modifiers, StringFragment name,
+ std::vector<const Variable*> parameters, const Type& returnType)
+ : INHERITED(offset, kFunctionDeclaration_Kind, std::move(name))
+ , fDefined(false)
+ , fBuiltin(false)
+ , fModifiers(modifiers)
+ , fParameters(std::move(parameters))
+ , fReturnType(returnType) {}
+
+ String description() const override {
+ String result = fReturnType.description() + " " + fName + "(";
+ String separator;
+ for (auto p : fParameters) {
+ result += separator;
+ separator = ", ";
+ result += p->description();
+ }
+ result += ")";
+ return result;
+ }
+
+ bool matches(const FunctionDeclaration& f) const {
+ if (fName != f.fName) {
+ return false;
+ }
+ if (fParameters.size() != f.fParameters.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < fParameters.size(); i++) {
+ if (fParameters[i]->fType != f.fParameters[i]->fType) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Determine the effective types of this function's parameters and return value when called with
+ * the given arguments. This is relevant for functions with generic parameter types, where this
+ * will collapse the generic types down into specific concrete types.
+ *
+ * Returns true if it was able to select a concrete set of types for the generic function, false
+ * if there is no possible way this can match the argument types. Note that even a true return
+ * does not guarantee that the function can be successfully called with those arguments, merely
+ * indicates that an attempt should be made. If false is returned, the state of
+ * outParameterTypes and outReturnType are undefined.
+ */
+ bool determineFinalTypes(const std::vector<std::unique_ptr<Expression>>& arguments,
+ std::vector<const Type*>* outParameterTypes,
+ const Type** outReturnType) const {
+ SkASSERT(arguments.size() == fParameters.size());
+ int genericIndex = -1;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ if (fParameters[i]->fType.kind() == Type::kGeneric_Kind) {
+ std::vector<const Type*> types = fParameters[i]->fType.coercibleTypes();
+ if (genericIndex == -1) {
+ for (size_t j = 0; j < types.size(); j++) {
+ if (arguments[i]->fType.canCoerceTo(*types[j])) {
+ genericIndex = j;
+ break;
+ }
+ }
+ if (genericIndex == -1) {
+ return false;
+ }
+ }
+ outParameterTypes->push_back(types[genericIndex]);
+ } else {
+ outParameterTypes->push_back(&fParameters[i]->fType);
+ }
+ }
+ if (fReturnType.kind() == Type::kGeneric_Kind) {
+ if (genericIndex == -1) {
+ return false;
+ }
+ *outReturnType = fReturnType.coercibleTypes()[genericIndex];
+ } else {
+ *outReturnType = &fReturnType;
+ }
+ return true;
+ }
+
+ mutable bool fDefined;
+ bool fBuiltin;
+ Modifiers fModifiers;
+ const std::vector<const Variable*> fParameters;
+ const Type& fReturnType;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
new file mode 100644
index 0000000000..7344373bdf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDEFINITION
+#define SKSL_FUNCTIONDEFINITION
+
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * A function definition (a declaration plus an associated block of code).
+ */
+struct FunctionDefinition : public ProgramElement {
+ FunctionDefinition(int offset, const FunctionDeclaration& declaration,
+ std::unique_ptr<Statement> body)
+ : INHERITED(offset, kFunction_Kind)
+ , fDeclaration(declaration)
+ , fBody(std::move(body)) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new FunctionDefinition(fOffset, fDeclaration,
+ fBody->clone()));
+ }
+
+ String description() const override {
+ return fDeclaration.description() + " " + fBody->description();
+ }
+
+ const FunctionDeclaration& fDeclaration;
+ std::unique_ptr<Statement> fBody;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
new file mode 100644
index 0000000000..e78f1cd641
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONREFERENCE
+#define SKSL_FUNCTIONREFERENCE
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An identifier referring to a function name. This is an intermediate value: FunctionReferences are
+ * always eventually replaced by FunctionCalls in valid programs.
+ */
+struct FunctionReference : public Expression {
+ FunctionReference(const Context& context, int offset,
+ std::vector<const FunctionDeclaration*> function)
+ : INHERITED(offset, kFunctionReference_Kind, *context.fInvalid_Type)
+ , fFunctions(function) {}
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new FunctionReference(fOffset, fFunctions, &fType));
+ }
+
+ String description() const override {
+ return String("<function>");
+ }
+
+ const std::vector<const FunctionDeclaration*> fFunctions;
+
+ typedef Expression INHERITED;
+
+private:
+ FunctionReference(int offset, std::vector<const FunctionDeclaration*> function,
+ const Type* type)
+ : INHERITED(offset, kFunctionReference_Kind, *type)
+ , fFunctions(function) {}};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h b/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h
new file mode 100644
index 0000000000..ca9ea99998
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRNODE
+#define SKSL_IRNODE
+
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/SkSLString.h"
+
+namespace SkSL {
+
+/**
+ * Represents a node in the intermediate representation (IR) tree. The IR is a fully-resolved
+ * version of the program (all types determined, everything validated), ready for code generation.
+ */
+struct IRNode {
+ IRNode(int offset)
+ : fOffset(offset) {}
+
+ virtual ~IRNode() {}
+
+ virtual String description() const = 0;
+
+ // character offset of this element within the program being compiled, for error reporting
+ // purposes
+ int fOffset;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
new file mode 100644
index 0000000000..5d0a22b647
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IFSTATEMENT
+#define SKSL_IFSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * An 'if' statement.
+ */
+struct IfStatement : public Statement {
+ IfStatement(int offset, bool isStatic, std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue, std::unique_ptr<Statement> ifFalse)
+ : INHERITED(offset, kIf_Kind)
+ , fIsStatic(isStatic)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new IfStatement(fOffset, fIsStatic, fTest->clone(),
+ fIfTrue->clone(), fIfFalse ? fIfFalse->clone() : nullptr));
+ }
+
+ String description() const override {
+ String result;
+ if (fIsStatic) {
+ result += "@";
+ }
+ result += "if (" + fTest->description() + ") " + fIfTrue->description();
+ if (fIfFalse) {
+ result += " else " + fIfFalse->description();
+ }
+ return result;
+ }
+
+ bool fIsStatic;
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Statement> fIfTrue;
+ // may be null
+ std::unique_ptr<Statement> fIfFalse;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
new file mode 100644
index 0000000000..7c5c1290b8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INDEX
+#define SKSL_INDEX
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Given a type, returns the type that will result from extracting an array value from it.
+ */
+static const Type& index_type(const Context& context, const Type& type) {
+ if (type.kind() == Type::kMatrix_Kind) {
+ if (type.componentType() == *context.fFloat_Type) {
+ switch (type.rows()) {
+ case 2: return *context.fFloat2_Type;
+ case 3: return *context.fFloat3_Type;
+ case 4: return *context.fFloat4_Type;
+ default: SkASSERT(false);
+ }
+ } else if (type.componentType() == *context.fHalf_Type) {
+ switch (type.rows()) {
+ case 2: return *context.fHalf2_Type;
+ case 3: return *context.fHalf3_Type;
+ case 4: return *context.fHalf4_Type;
+ default: SkASSERT(false);
+ }
+ } else {
+ SkASSERT(type.componentType() == *context.fDouble_Type);
+ switch (type.rows()) {
+ case 2: return *context.fDouble2_Type;
+ case 3: return *context.fDouble3_Type;
+ case 4: return *context.fDouble4_Type;
+ default: SkASSERT(false);
+ }
+ }
+ }
+ return type.componentType();
+}
+
+/**
+ * An expression which extracts a value from an array or matrix, as in 'm[2]'.
+ */
+struct IndexExpression : public Expression {
+ IndexExpression(const Context& context, std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index)
+ : INHERITED(base->fOffset, kIndex_Kind, index_type(context, base->fType))
+ , fBase(std::move(base))
+ , fIndex(std::move(index)) {
+ SkASSERT(fIndex->fType == *context.fInt_Type || fIndex->fType == *context.fUInt_Type);
+ }
+
+ bool hasSideEffects() const override {
+ return fBase->hasSideEffects() || fIndex->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new IndexExpression(fBase->clone(), fIndex->clone(),
+ &fType));
+ }
+
+ String description() const override {
+ return fBase->description() + "[" + fIndex->description() + "]";
+ }
+
+ std::unique_ptr<Expression> fBase;
+ std::unique_ptr<Expression> fIndex;
+
+ typedef Expression INHERITED;
+
+private:
+ IndexExpression(std::unique_ptr<Expression> base, std::unique_ptr<Expression> index,
+ const Type* type)
+ : INHERITED(base->fOffset, kIndex_Kind, *type)
+ , fBase(std::move(base))
+ , fIndex(std::move(index)) {}
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h
new file mode 100644
index 0000000000..a95875c3ea
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTLITERAL
+#define SKSL_INTLITERAL
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal integer.
+ */
+struct IntLiteral : public Expression {
+ // FIXME: we will need to revisit this if/when we add full support for both signed and unsigned
+ // 64-bit integers, but for right now an int64_t will hold every value we care about
+ IntLiteral(const Context& context, int offset, int64_t value)
+ : INHERITED(offset, kIntLiteral_Kind, *context.fInt_Type)
+ , fValue(value) {}
+
+ IntLiteral(int offset, int64_t value, const Type* type = nullptr)
+ : INHERITED(offset, kIntLiteral_Kind, *type)
+ , fValue(value) {}
+
+ String description() const override {
+ return to_string(fValue);
+ }
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ bool compareConstant(const Context& context, const Expression& other) const override {
+ IntLiteral& i = (IntLiteral&) other;
+ return fValue == i.fValue;
+ }
+
+ int coercionCost(const Type& target) const override {
+ if (target.isSigned() || target.isUnsigned() || target.isFloat()) {
+ return 0;
+ }
+ return INHERITED::coercionCost(target);
+ }
+
+ int64_t getConstantInt() const override {
+ return fValue;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new IntLiteral(fOffset, fValue, &fType));
+ }
+
+ const int64_t fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
new file mode 100644
index 0000000000..3dfd9fdf29
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTERFACEBLOCK
+#define SKSL_INTERFACEBLOCK
+
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+namespace SkSL {
+
+/**
+ * An interface block, as in:
+ *
+ * out sk_PerVertex {
+ * layout(builtin=0) float4 sk_Position;
+ * layout(builtin=1) float sk_PointSize;
+ * };
+ *
+ * At the IR level, this is represented by a single variable of struct type.
+ */
+struct InterfaceBlock : public ProgramElement {
+ InterfaceBlock(int offset, const Variable* var, String typeName, String instanceName,
+ std::vector<std::unique_ptr<Expression>> sizes,
+ std::shared_ptr<SymbolTable> typeOwner)
+ : INHERITED(offset, kInterfaceBlock_Kind)
+ , fVariable(*var)
+ , fTypeName(std::move(typeName))
+ , fInstanceName(std::move(instanceName))
+ , fSizes(std::move(sizes))
+ , fTypeOwner(typeOwner) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ std::vector<std::unique_ptr<Expression>> sizesClone;
+ for (const auto& s : fSizes) {
+ sizesClone.push_back(s->clone());
+ }
+ return std::unique_ptr<ProgramElement>(new InterfaceBlock(fOffset, &fVariable, fTypeName,
+ fInstanceName,
+ std::move(sizesClone),
+ fTypeOwner));
+ }
+
+ String description() const override {
+ String result = fVariable.fModifiers.description() + fTypeName + " {\n";
+ const Type* structType = &fVariable.fType;
+ while (structType->kind() == Type::kArray_Kind) {
+ structType = &structType->componentType();
+ }
+ for (const auto& f : structType->fields()) {
+ result += f.description() + "\n";
+ }
+ result += "}";
+ if (fInstanceName.size()) {
+ result += " " + fInstanceName;
+ for (const auto& size : fSizes) {
+ result += "[";
+ if (size) {
+ result += size->description();
+ }
+ result += "]";
+ }
+ }
+ return result + ";";
+ }
+
+ const Variable& fVariable;
+ const String fTypeName;
+ const String fInstanceName;
+ std::vector<std::unique_ptr<Expression>> fSizes;
+ const std::shared_ptr<SymbolTable> fTypeOwner;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLLayout.h b/gfx/skia/skia/src/sksl/ir/SkSLLayout.h
new file mode 100644
index 0000000000..143aff3d8f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLLayout.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LAYOUT
+#define SKSL_LAYOUT
+
+#include "src/sksl/SkSLString.h"
+#include "src/sksl/SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Represents a layout block appearing before a variable declaration, as in:
+ *
+ * layout (location = 0) int x;
+ */
+struct Layout {
+ enum Flag {
+ kOriginUpperLeft_Flag = 1 << 0,
+ kOverrideCoverage_Flag = 1 << 1,
+ kPushConstant_Flag = 1 << 2,
+ kBlendSupportAllEquations_Flag = 1 << 3,
+ kBlendSupportMultiply_Flag = 1 << 4,
+ kBlendSupportScreen_Flag = 1 << 5,
+ kBlendSupportOverlay_Flag = 1 << 6,
+ kBlendSupportDarken_Flag = 1 << 7,
+ kBlendSupportLighten_Flag = 1 << 8,
+ kBlendSupportColorDodge_Flag = 1 << 9,
+ kBlendSupportColorBurn_Flag = 1 << 10,
+ kBlendSupportHardLight_Flag = 1 << 11,
+ kBlendSupportSoftLight_Flag = 1 << 12,
+ kBlendSupportDifference_Flag = 1 << 13,
+ kBlendSupportExclusion_Flag = 1 << 14,
+ kBlendSupportHSLHue_Flag = 1 << 15,
+ kBlendSupportHSLSaturation_Flag = 1 << 16,
+ kBlendSupportHSLColor_Flag = 1 << 17,
+ kBlendSupportHSLLuminosity_Flag = 1 << 18,
+ kTracked_Flag = 1 << 19
+ };
+
+ enum Primitive {
+ kUnspecified_Primitive = -1,
+ kPoints_Primitive,
+ kLines_Primitive,
+ kLineStrip_Primitive,
+ kLinesAdjacency_Primitive,
+ kTriangles_Primitive,
+ kTriangleStrip_Primitive,
+ kTrianglesAdjacency_Primitive
+ };
+
+ // These are used by images in GLSL. We only support a subset of what GL supports.
+ enum class Format {
+ kUnspecified = -1,
+ kRGBA32F,
+ kR32F,
+ kRGBA16F,
+ kR16F,
+ kLUMINANCE16F,
+ kRGBA8,
+ kR8,
+ kRGBA8I,
+ kR8I,
+ kRG16F,
+ };
+
+ // used by SkSL processors
+ enum Key {
+ // field is not a key
+ kNo_Key,
+ // field is a key
+ kKey_Key,
+ // key is 0 or 1 depending on whether the matrix is an identity matrix
+ kIdentity_Key,
+ };
+
+ enum class CType {
+ kDefault,
+ kBool,
+ kFloat,
+ kInt32,
+ kSkRect,
+ kSkIRect,
+ kSkPMColor4f,
+ kSkPMColor,
+ kSkVector4,
+ kSkPoint,
+ kSkIPoint,
+ kSkMatrix,
+ kSkMatrix44,
+ kGrTextureProxy,
+ kGrFragmentProcessor,
+ };
+
+ static const char* FormatToStr(Format format) {
+ switch (format) {
+ case Format::kUnspecified: return "";
+ case Format::kRGBA32F: return "rgba32f";
+ case Format::kR32F: return "r32f";
+ case Format::kRGBA16F: return "rgba16f";
+ case Format::kR16F: return "r16f";
+ case Format::kLUMINANCE16F: return "lum16f";
+ case Format::kRGBA8: return "rgba8";
+ case Format::kR8: return "r8";
+ case Format::kRGBA8I: return "rgba8i";
+ case Format::kR8I: return "r8i";
+ case Format::kRG16F: return "rg16f";
+ }
+ ABORT("Unexpected format");
+ }
+
+ static bool ReadFormat(String str, Format* format) {
+ if (str == "rgba32f") {
+ *format = Format::kRGBA32F;
+ return true;
+ } else if (str == "r32f") {
+ *format = Format::kR32F;
+ return true;
+ } else if (str == "rgba16f") {
+ *format = Format::kRGBA16F;
+ return true;
+ } else if (str == "r16f") {
+ *format = Format::kR16F;
+ return true;
+ } else if (str == "lum16f") {
+ *format = Format::kLUMINANCE16F;
+ return true;
+ } else if (str == "rgba8") {
+ *format = Format::kRGBA8;
+ return true;
+ } else if (str == "r8") {
+ *format = Format::kR8;
+ return true;
+ } else if (str == "rgba8i") {
+ *format = Format::kRGBA8I;
+ return true;
+ } else if (str == "r8i") {
+ *format = Format::kR8I;
+ return true;
+ } else if (str == "rg16f") {
+ *format = Format::kRG16F;
+ return true;
+ }
+ return false;
+ }
+
+ static const char* CTypeToStr(CType ctype) {
+ switch (ctype) {
+ case CType::kDefault:
+ return nullptr;
+ case CType::kFloat:
+ return "float";
+ case CType::kInt32:
+ return "int32_t";
+ case CType::kSkRect:
+ return "SkRect";
+ case CType::kSkIRect:
+ return "SkIRect";
+ case CType::kSkPMColor4f:
+ return "SkPMColor4f";
+ case CType::kSkPMColor:
+ return "SkPMColor";
+ case CType::kSkVector4:
+ return "SkVector4";
+ case CType::kSkPoint:
+ return "SkPoint";
+ case CType::kSkIPoint:
+ return "SkIPoint";
+ case CType::kSkMatrix:
+ return "SkMatrix";
+ case CType::kSkMatrix44:
+ return "SkMatrix44";
+ case CType::kGrTextureProxy:
+ return "sk_sp<GrTextureProxy>";
+ case CType::kGrFragmentProcessor:
+ return "std::unique_ptr<GrFragmentProcessor>";
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ }
+
+ Layout(int flags, int location, int offset, int binding, int index, int set, int builtin,
+ int inputAttachmentIndex, Format format, Primitive primitive, int maxVertices,
+ int invocations, StringFragment when, Key key, CType ctype)
+ : fFlags(flags)
+ , fLocation(location)
+ , fOffset(offset)
+ , fBinding(binding)
+ , fIndex(index)
+ , fSet(set)
+ , fBuiltin(builtin)
+ , fInputAttachmentIndex(inputAttachmentIndex)
+ , fFormat(format)
+ , fPrimitive(primitive)
+ , fMaxVertices(maxVertices)
+ , fInvocations(invocations)
+ , fWhen(when)
+ , fKey(key)
+ , fCType(ctype) {}
+
+ Layout()
+ : fFlags(0)
+ , fLocation(-1)
+ , fOffset(-1)
+ , fBinding(-1)
+ , fIndex(-1)
+ , fSet(-1)
+ , fBuiltin(-1)
+ , fInputAttachmentIndex(-1)
+ , fFormat(Format::kUnspecified)
+ , fPrimitive(kUnspecified_Primitive)
+ , fMaxVertices(-1)
+ , fInvocations(-1)
+ , fKey(kNo_Key)
+ , fCType(CType::kDefault) {}
+
+ String description() const {
+ String result;
+ String separator;
+ if (fLocation >= 0) {
+ result += separator + "location = " + to_string(fLocation);
+ separator = ", ";
+ }
+ if (fOffset >= 0) {
+ result += separator + "offset = " + to_string(fOffset);
+ separator = ", ";
+ }
+ if (fBinding >= 0) {
+ result += separator + "binding = " + to_string(fBinding);
+ separator = ", ";
+ }
+ if (fIndex >= 0) {
+ result += separator + "index = " + to_string(fIndex);
+ separator = ", ";
+ }
+ if (fSet >= 0) {
+ result += separator + "set = " + to_string(fSet);
+ separator = ", ";
+ }
+ if (fBuiltin >= 0) {
+ result += separator + "builtin = " + to_string(fBuiltin);
+ separator = ", ";
+ }
+ if (fInputAttachmentIndex >= 0) {
+ result += separator + "input_attachment_index = " + to_string(fInputAttachmentIndex);
+ separator = ", ";
+ }
+ if (Format::kUnspecified != fFormat) {
+ result += separator + FormatToStr(fFormat);
+ separator = ", ";
+ }
+ if (fFlags & kOriginUpperLeft_Flag) {
+ result += separator + "origin_upper_left";
+ separator = ", ";
+ }
+ if (fFlags & kOverrideCoverage_Flag) {
+ result += separator + "override_coverage";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportAllEquations_Flag) {
+ result += separator + "blend_support_all_equations";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportMultiply_Flag) {
+ result += separator + "blend_support_multiply";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportScreen_Flag) {
+ result += separator + "blend_support_screen";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportOverlay_Flag) {
+ result += separator + "blend_support_overlay";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportDarken_Flag) {
+ result += separator + "blend_support_darken";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportLighten_Flag) {
+ result += separator + "blend_support_lighten";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportColorDodge_Flag) {
+ result += separator + "blend_support_colordodge";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportColorBurn_Flag) {
+ result += separator + "blend_support_colorburn";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportHardLight_Flag) {
+ result += separator + "blend_support_hardlight";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportSoftLight_Flag) {
+ result += separator + "blend_support_softlight";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportDifference_Flag) {
+ result += separator + "blend_support_difference";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportExclusion_Flag) {
+ result += separator + "blend_support_exclusion";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportHSLHue_Flag) {
+ result += separator + "blend_support_hsl_hue";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportHSLSaturation_Flag) {
+ result += separator + "blend_support_hsl_saturation";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportHSLColor_Flag) {
+ result += separator + "blend_support_hsl_color";
+ separator = ", ";
+ }
+ if (fFlags & kBlendSupportHSLLuminosity_Flag) {
+ result += separator + "blend_support_hsl_luminosity";
+ separator = ", ";
+ }
+ if (fFlags & kPushConstant_Flag) {
+ result += separator + "push_constant";
+ separator = ", ";
+ }
+ if (fFlags & kTracked_Flag) {
+ result += separator + "tracked";
+ separator = ", ";
+ }
+ switch (fPrimitive) {
+ case kPoints_Primitive:
+ result += separator + "points";
+ separator = ", ";
+ break;
+ case kLines_Primitive:
+ result += separator + "lines";
+ separator = ", ";
+ break;
+ case kLineStrip_Primitive:
+ result += separator + "line_strip";
+ separator = ", ";
+ break;
+ case kLinesAdjacency_Primitive:
+ result += separator + "lines_adjacency";
+ separator = ", ";
+ break;
+ case kTriangles_Primitive:
+ result += separator + "triangles";
+ separator = ", ";
+ break;
+ case kTriangleStrip_Primitive:
+ result += separator + "triangle_strip";
+ separator = ", ";
+ break;
+ case kTrianglesAdjacency_Primitive:
+ result += separator + "triangles_adjacency";
+ separator = ", ";
+ break;
+ case kUnspecified_Primitive:
+ break;
+ }
+ if (fMaxVertices >= 0) {
+ result += separator + "max_vertices = " + to_string(fMaxVertices);
+ separator = ", ";
+ }
+ if (fInvocations >= 0) {
+ result += separator + "invocations = " + to_string(fInvocations);
+ separator = ", ";
+ }
+ if (fWhen.fLength) {
+ result += separator + "when = " + fWhen;
+ separator = ", ";
+ }
+ if (result.size() > 0) {
+ result = "layout (" + result + ")";
+ }
+ if (fKey) {
+ result += "/* key */";
+ }
+ return result;
+ }
+
+ bool operator==(const Layout& other) const {
+ return fFlags == other.fFlags &&
+ fLocation == other.fLocation &&
+ fOffset == other.fOffset &&
+ fBinding == other.fBinding &&
+ fIndex == other.fIndex &&
+ fSet == other.fSet &&
+ fBuiltin == other.fBuiltin &&
+ fInputAttachmentIndex == other.fInputAttachmentIndex &&
+ fFormat == other.fFormat &&
+ fPrimitive == other.fPrimitive &&
+ fMaxVertices == other.fMaxVertices &&
+ fInvocations == other.fInvocations;
+ }
+
+ bool operator!=(const Layout& other) const {
+ return !(*this == other);
+ }
+
+ int fFlags;
+ int fLocation;
+ int fOffset;
+ int fBinding;
+ int fIndex;
+ int fSet;
+ // builtin comes from SPIR-V and identifies which particular builtin value this object
+ // represents.
+ int fBuiltin;
+ // input_attachment_index comes from Vulkan/SPIR-V to connect a shader variable to the a
+ // corresponding attachment on the subpass in which the shader is being used.
+ int fInputAttachmentIndex;
+ Format fFormat;
+ Primitive fPrimitive;
+ int fMaxVertices;
+ int fInvocations;
+ StringFragment fWhen;
+ Key fKey;
+ CType fCType;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h
new file mode 100644
index 0000000000..171ea82387
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERS
+#define SKSL_MODIFIERS
+
+#include "src/sksl/ir/SkSLLayout.h"
+
+namespace SkSL {
+
+/**
+ * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration.
+ */
+struct Modifiers {
+ enum Flag {
+ kNo_Flag = 0,
+ kConst_Flag = 1 << 0,
+ kIn_Flag = 1 << 1,
+ kOut_Flag = 1 << 2,
+ kUniform_Flag = 1 << 3,
+ kFlat_Flag = 1 << 4,
+ kNoPerspective_Flag = 1 << 5,
+ kReadOnly_Flag = 1 << 6,
+ kWriteOnly_Flag = 1 << 7,
+ kCoherent_Flag = 1 << 8,
+ kVolatile_Flag = 1 << 9,
+ kRestrict_Flag = 1 << 10,
+ kBuffer_Flag = 1 << 11,
+ kHasSideEffects_Flag = 1 << 12,
+ kPLS_Flag = 1 << 13,
+ kPLSIn_Flag = 1 << 14,
+ kPLSOut_Flag = 1 << 15,
+ };
+
+ Modifiers()
+ : fLayout(Layout())
+ , fFlags(0) {}
+
+ Modifiers(const Layout& layout, int flags)
+ : fLayout(layout)
+ , fFlags(flags) {}
+
+ String description() const {
+ String result = fLayout.description();
+ if (fFlags & kUniform_Flag) {
+ result += "uniform ";
+ }
+ if (fFlags & kConst_Flag) {
+ result += "const ";
+ }
+ if (fFlags & kFlat_Flag) {
+ result += "flat ";
+ }
+ if (fFlags & kNoPerspective_Flag) {
+ result += "noperspective ";
+ }
+ if (fFlags & kReadOnly_Flag) {
+ result += "readonly ";
+ }
+ if (fFlags & kWriteOnly_Flag) {
+ result += "writeonly ";
+ }
+ if (fFlags & kCoherent_Flag) {
+ result += "coherent ";
+ }
+ if (fFlags & kVolatile_Flag) {
+ result += "volatile ";
+ }
+ if (fFlags & kRestrict_Flag) {
+ result += "restrict ";
+ }
+ if (fFlags & kBuffer_Flag) {
+ result += "buffer ";
+ }
+ if (fFlags & kHasSideEffects_Flag) {
+ result += "sk_has_side_effects ";
+ }
+ if (fFlags & kPLS_Flag) {
+ result += "__pixel_localEXT ";
+ }
+ if (fFlags & kPLSIn_Flag) {
+ result += "__pixel_local_inEXT ";
+ }
+ if (fFlags & kPLSOut_Flag) {
+ result += "__pixel_local_outEXT ";
+ }
+ if ((fFlags & kIn_Flag) && (fFlags & kOut_Flag)) {
+ result += "inout ";
+ } else if (fFlags & kIn_Flag) {
+ result += "in ";
+ } else if (fFlags & kOut_Flag) {
+ result += "out ";
+ }
+
+ return result;
+ }
+
+ bool operator==(const Modifiers& other) const {
+ return fLayout == other.fLayout && fFlags == other.fFlags;
+ }
+
+ bool operator!=(const Modifiers& other) const {
+ return !(*this == other);
+ }
+
+ Layout fLayout;
+ int fFlags;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h
new file mode 100644
index 0000000000..1dc74149e6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERDECLARATION
+#define SKSL_MODIFIERDECLARATION
+
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * A declaration that consists only of modifiers, e.g.:
+ *
+ * layout(blend_support_all_equations) out;
+ */
+struct ModifiersDeclaration : public ProgramElement {
+ ModifiersDeclaration(Modifiers modifiers)
+ : INHERITED(-1, kModifiers_Kind)
+ , fModifiers(modifiers) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new ModifiersDeclaration(fModifiers));
+ }
+
+ String description() const override {
+ return fModifiers.description() + ";";
+ }
+
+ Modifiers fModifiers;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLNop.h b/gfx/skia/skia/src/sksl/ir/SkSLNop.h
new file mode 100644
index 0000000000..2ead371f87
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLNop.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NOP
+#define SKSL_NOP
+
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A no-op statement that does nothing.
+ */
+struct Nop : public Statement {
+ Nop()
+ : INHERITED(-1, kNop_Kind) {}
+
+ virtual bool isEmpty() const override {
+ return true;
+ }
+
+ String description() const override {
+ return String(";");
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new Nop());
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLNullLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLNullLiteral.h
new file mode 100644
index 0000000000..2d8816d6a2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLNullLiteral.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NULLLITERAL
+#define SKSL_NULLLITERAL
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents 'null'.
+ */
+struct NullLiteral : public Expression {
+ NullLiteral(const Context& context, int offset)
+ : INHERITED(offset, kNullLiteral_Kind, *context.fNull_Type) {}
+
+ NullLiteral(int offset, const Type& type)
+ : INHERITED(offset, kNullLiteral_Kind, type) {}
+
+ String description() const override {
+ return "null";
+ }
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ bool compareConstant(const Context& context, const Expression& other) const override {
+ return true;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new NullLiteral(fOffset, fType));
+ }
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
new file mode 100644
index 0000000000..c11e2085e4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSTFIXEXPRESSION
+#define SKSL_POSTFIXEXPRESSION
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * An expression modified by a unary operator appearing after it, such as 'i++'.
+ */
+struct PostfixExpression : public Expression {
+ PostfixExpression(std::unique_ptr<Expression> operand, Token::Kind op)
+ : INHERITED(operand->fOffset, kPostfix_Kind, operand->fType)
+ , fOperand(std::move(operand))
+ , fOperator(op) {}
+
+ bool hasSideEffects() const override {
+ return true;
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new PostfixExpression(fOperand->clone(), fOperator));
+ }
+
+ String description() const override {
+ return fOperand->description() + Compiler::OperatorName(fOperator);
+ }
+
+ std::unique_ptr<Expression> fOperand;
+ const Token::Kind fOperator;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
new file mode 100644
index 0000000000..408f1d0864
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PREFIXEXPRESSION
+#define SKSL_PREFIXEXPRESSION
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+
+namespace SkSL {
+
+/**
+ * An expression modified by a unary operator appearing before it, such as '!flag'.
+ */
+struct PrefixExpression : public Expression {
+ PrefixExpression(Token::Kind op, std::unique_ptr<Expression> operand)
+ : INHERITED(operand->fOffset, kPrefix_Kind, operand->fType)
+ , fOperand(std::move(operand))
+ , fOperator(op) {}
+
+ bool isConstant() const override {
+ return fOperator == Token::MINUS && fOperand->isConstant();
+ }
+
+ bool hasSideEffects() const override {
+ return fOperator == Token::PLUSPLUS || fOperator == Token::MINUSMINUS ||
+ fOperand->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override {
+ if (fOperand->fKind == Expression::kFloatLiteral_Kind) {
+ return std::unique_ptr<Expression>(new FloatLiteral(
+ irGenerator.fContext,
+ fOffset,
+ -((FloatLiteral&) *fOperand).fValue));
+
+ }
+ return nullptr;
+ }
+
+ SKSL_FLOAT getFVecComponent(int index) const override {
+ SkASSERT(fOperator == Token::Kind::MINUS);
+ return -fOperand->getFVecComponent(index);
+ }
+
+ SKSL_INT getIVecComponent(int index) const override {
+ SkASSERT(fOperator == Token::Kind::MINUS);
+ return -fOperand->getIVecComponent(index);
+ }
+
+ SKSL_FLOAT getMatComponent(int col, int row) const override {
+ SkASSERT(fOperator == Token::Kind::MINUS);
+ return -fOperand->getMatComponent(col, row);
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new PrefixExpression(fOperator, fOperand->clone()));
+ }
+
+ String description() const override {
+ return Compiler::OperatorName(fOperator) + fOperand->description();
+ }
+
+ std::unique_ptr<Expression> fOperand;
+ const Token::Kind fOperator;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgram.h b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
new file mode 100644
index 0000000000..c62bd0b11d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAM
+#define SKSL_PROGRAM
+
+#include <vector>
+#include <memory>
+
+#include "src/sksl/ir/SkSLBoolLiteral.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLIntLiteral.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#ifdef SK_VULKAN
+#include "src/gpu/vk/GrVkCaps.h"
+#endif
+
+// name of the render target width uniform
+#define SKSL_RTWIDTH_NAME "u_skRTWidth"
+
+// name of the render target height uniform
+#define SKSL_RTHEIGHT_NAME "u_skRTHeight"
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * Represents a fully-digested program, ready for code generation.
+ */
+struct Program {
+ struct Settings {
+ struct Value {
+ Value(bool b)
+ : fKind(kBool_Kind)
+ , fValue(b) {}
+
+ Value(int i)
+ : fKind(kInt_Kind)
+ , fValue(i) {}
+
+ Value(unsigned int i)
+ : fKind(kInt_Kind)
+ , fValue(i) {}
+
+ Value(float f)
+ : fKind(kFloat_Kind)
+ , fValue(f) {}
+
+ std::unique_ptr<Expression> literal(const Context& context, int offset) const {
+ switch (fKind) {
+ case Program::Settings::Value::kBool_Kind:
+ return std::unique_ptr<Expression>(new BoolLiteral(context,
+ offset,
+ fValue));
+ case Program::Settings::Value::kInt_Kind:
+ return std::unique_ptr<Expression>(new IntLiteral(context,
+ offset,
+ fValue));
+ case Program::Settings::Value::kFloat_Kind:
+ return std::unique_ptr<Expression>(new FloatLiteral(context,
+ offset,
+ fValue));
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ }
+
+ enum {
+ kBool_Kind,
+ kInt_Kind,
+ kFloat_Kind,
+ } fKind;
+
+ int fValue;
+ };
+
+#if defined(SKSL_STANDALONE) || !SK_SUPPORT_GPU
+ const StandaloneShaderCaps* fCaps = &standaloneCaps;
+#else
+ const GrShaderCaps* fCaps = nullptr;
+#ifdef SK_VULKAN
+ const GrVkCaps* fVkCaps = nullptr;
+#endif
+#endif
+ // if false, sk_FragCoord is exactly the same as gl_FragCoord. If true, the y coordinate
+ // must be flipped.
+ bool fFlipY = false;
+ // If true the destination fragment color is read sk_FragColor. It must be declared inout.
+ bool fFragColorIsInOut = false;
+ // if true, Setting objects (e.g. sk_Caps.fbFetchSupport) should be replaced with their
+ // constant equivalents during compilation
+ bool fReplaceSettings = true;
+ // if true, all halfs are forced to be floats
+ bool fForceHighPrecision = false;
+ // if true, add -0.5 bias to LOD of all texture lookups
+ bool fSharpenTextures = false;
+ // if the program needs to create an RTHeight uniform, this is its offset in the uniform
+ // buffer
+ int fRTHeightOffset = -1;
+ std::unordered_map<String, Value> fArgs;
+ };
+
+ struct Inputs {
+ // if true, this program requires the render target width uniform to be defined
+ bool fRTWidth;
+
+ // if true, this program requires the render target height uniform to be defined
+ bool fRTHeight;
+
+ // if true, this program must be recompiled if the flipY setting changes. If false, the
+ // program will compile to the same code regardless of the flipY setting.
+ bool fFlipY;
+
+ void reset() {
+ fRTWidth = false;
+ fRTHeight = false;
+ fFlipY = false;
+ }
+
+ bool isEmpty() {
+ return !fRTWidth && !fRTHeight && !fFlipY;
+ }
+ };
+
+ class iterator {
+ public:
+ ProgramElement& operator*() {
+ if (fIter1 != fEnd1) {
+ return **fIter1;
+ }
+ return **fIter2;
+ }
+
+ iterator& operator++() {
+ if (fIter1 != fEnd1) {
+ ++fIter1;
+ return *this;
+ }
+ ++fIter2;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const {
+ return fIter1 == other.fIter1 && fIter2 == other.fIter2;
+ }
+
+ bool operator!=(const iterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ using inner = std::vector<std::unique_ptr<ProgramElement>>::iterator;
+
+ iterator(inner begin1, inner end1, inner begin2, inner end2)
+ : fIter1(begin1)
+ , fEnd1(end1)
+ , fIter2(begin2)
+ , fEnd2(end2) {}
+
+ inner fIter1;
+ inner fEnd1;
+ inner fIter2;
+ inner fEnd2;
+
+ friend struct Program;
+ };
+
+ class const_iterator {
+ public:
+ const ProgramElement& operator*() {
+ if (fIter1 != fEnd1) {
+ return **fIter1;
+ }
+ return **fIter2;
+ }
+
+ const_iterator& operator++() {
+ if (fIter1 != fEnd1) {
+ ++fIter1;
+ return *this;
+ }
+ ++fIter2;
+ return *this;
+ }
+
+ bool operator==(const const_iterator& other) const {
+ return fIter1 == other.fIter1 && fIter2 == other.fIter2;
+ }
+
+ bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ using inner = std::vector<std::unique_ptr<ProgramElement>>::const_iterator;
+
+ const_iterator(inner begin1, inner end1, inner begin2, inner end2)
+ : fIter1(begin1)
+ , fEnd1(end1)
+ , fIter2(begin2)
+ , fEnd2(end2) {}
+
+ inner fIter1;
+ inner fEnd1;
+ inner fIter2;
+ inner fEnd2;
+
+ friend struct Program;
+ };
+
+ enum Kind {
+ kFragment_Kind,
+ kVertex_Kind,
+ kGeometry_Kind,
+ kFragmentProcessor_Kind,
+ kPipelineStage_Kind,
+ kGeneric_Kind,
+ };
+
+ Program(Kind kind,
+ std::unique_ptr<String> source,
+ Settings settings,
+ std::shared_ptr<Context> context,
+ std::vector<std::unique_ptr<ProgramElement>>* inheritedElements,
+ std::vector<std::unique_ptr<ProgramElement>> elements,
+ std::shared_ptr<SymbolTable> symbols,
+ Inputs inputs)
+ : fKind(kind)
+ , fSource(std::move(source))
+ , fSettings(settings)
+ , fContext(context)
+ , fSymbols(symbols)
+ , fInputs(inputs)
+ , fInheritedElements(inheritedElements)
+ , fElements(std::move(elements)) {}
+
+ iterator begin() {
+ if (fInheritedElements) {
+ return iterator(fInheritedElements->begin(), fInheritedElements->end(),
+ fElements.begin(), fElements.end());
+ }
+ return iterator(fElements.begin(), fElements.end(), fElements.end(), fElements.end());
+ }
+
+ iterator end() {
+ if (fInheritedElements) {
+ return iterator(fInheritedElements->end(), fInheritedElements->end(),
+ fElements.end(), fElements.end());
+ }
+ return iterator(fElements.end(), fElements.end(), fElements.end(), fElements.end());
+ }
+
+ const_iterator begin() const {
+ if (fInheritedElements) {
+ return const_iterator(fInheritedElements->begin(), fInheritedElements->end(),
+ fElements.begin(), fElements.end());
+ }
+ return const_iterator(fElements.begin(), fElements.end(), fElements.end(), fElements.end());
+ }
+
+ const_iterator end() const {
+ if (fInheritedElements) {
+ return const_iterator(fInheritedElements->end(), fInheritedElements->end(),
+ fElements.end(), fElements.end());
+ }
+ return const_iterator(fElements.end(), fElements.end(), fElements.end(), fElements.end());
+ }
+
+ Kind fKind;
+ std::unique_ptr<String> fSource;
+ Settings fSettings;
+ std::shared_ptr<Context> fContext;
+ // it's important to keep fElements defined after (and thus destroyed before) fSymbols,
+ // because destroying elements can modify reference counts in symbols
+ std::shared_ptr<SymbolTable> fSymbols;
+ Inputs fInputs;
+ bool fIsOptimized = false;
+
+private:
+ std::vector<std::unique_ptr<ProgramElement>>* fInheritedElements;
+ std::vector<std::unique_ptr<ProgramElement>> fElements;
+
+ friend class Compiler;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h b/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h
new file mode 100644
index 0000000000..4caaebe97a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMELEMENT
+#define SKSL_PROGRAMELEMENT
+
+#include "src/sksl/ir/SkSLIRNode.h"
+
+#include <memory>
+
+namespace SkSL {
+
+/**
+ * Represents a top-level element (e.g. function or global variable) in a program.
+ */
+struct ProgramElement : public IRNode {
+ enum Kind {
+ kEnum_Kind,
+ kExtension_Kind,
+ kFunction_Kind,
+ kInterfaceBlock_Kind,
+ kModifiers_Kind,
+ kSection_Kind,
+ kVar_Kind
+ };
+
+ ProgramElement(int offset, Kind kind)
+ : INHERITED(offset)
+ , fKind(kind) {}
+
+ Kind fKind;
+
+ virtual std::unique_ptr<ProgramElement> clone() const = 0;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
new file mode 100644
index 0000000000..e61fa36c74
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_RETURNSTATEMENT
+#define SKSL_RETURNSTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'return' statement.
+ */
+struct ReturnStatement : public Statement {
+ ReturnStatement(int offset)
+ : INHERITED(offset, kReturn_Kind) {}
+
+ ReturnStatement(std::unique_ptr<Expression> expression)
+ : INHERITED(expression->fOffset, kReturn_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ if (fExpression) {
+ return std::unique_ptr<Statement>(new ReturnStatement(fExpression->clone()));
+ }
+ return std::unique_ptr<Statement>(new ReturnStatement(fOffset));
+ }
+
+ String description() const override {
+ if (fExpression) {
+ return "return " + fExpression->description() + ";";
+ } else {
+ return String("return;");
+ }
+ }
+
+ std::unique_ptr<Expression> fExpression;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSection.h b/gfx/skia/skia/src/sksl/ir/SkSLSection.h
new file mode 100644
index 0000000000..20856127ca
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSection.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SECTION
+#define SKSL_SECTION
+
+#include "src/sksl/ir/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * A section declaration (e.g. @body { body code here })..
+ */
+struct Section : public ProgramElement {
+ Section(int offset, String name, String arg, String text)
+ : INHERITED(offset, kSection_Kind)
+ , fName(std::move(name))
+ , fArgument(std::move(arg))
+ , fText(std::move(text)) {}
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new Section(fOffset, fName, fArgument, fText));
+ }
+
+ String description() const override {
+ String result = "@" + fName;
+ if (fArgument.size()) {
+ result += "(" + fArgument + ")";
+ }
+ result += " { " + fText + " }";
+ return result;
+ }
+
+ const String fName;
+ const String fArgument;
+ const String fText;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp
new file mode 100644
index 0000000000..1cb0d4afd3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> Setting::constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) {
+ if (irGenerator.fSettings->fReplaceSettings) {
+ return VariableReference::copy_constant(irGenerator, fValue.get());
+ }
+ return nullptr;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSetting.h b/gfx/skia/skia/src/sksl/ir/SkSLSetting.h
new file mode 100644
index 0000000000..33c8a31acc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSetting.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SETTING
+#define SKSL_SETTING
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents a compile-time constant setting, such as sk_Caps.fbFetchSupport. These are generally
+ * collapsed down to their constant representations during the compilation process.
+ */
+struct Setting : public Expression {
+ Setting(int offset, String name, std::unique_ptr<Expression> value)
+ : INHERITED(offset, kSetting_Kind, value->fType)
+ , fName(std::move(name))
+ , fValue(std::move(value)) {
+ SkASSERT(fValue->isConstant());
+ }
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override;
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new Setting(fOffset, fName, fValue->clone()));
+ }
+
+ String description() const override {
+ return fName;
+ }
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ const String fName;
+ std::unique_ptr<Expression> fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLStatement.h
new file mode 100644
index 0000000000..ed8d33b142
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLStatement.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STATEMENT
+#define SKSL_STATEMENT
+
+#include "src/sksl/ir/SkSLIRNode.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all statements.
+ */
+struct Statement : public IRNode {
+ enum Kind {
+ kBlock_Kind,
+ kBreak_Kind,
+ kContinue_Kind,
+ kDiscard_Kind,
+ kDo_Kind,
+ kExpression_Kind,
+ kFor_Kind,
+ kGroup_Kind,
+ kIf_Kind,
+ kNop_Kind,
+ kReturn_Kind,
+ kSwitch_Kind,
+ kVarDeclaration_Kind,
+ kVarDeclarations_Kind,
+ kWhile_Kind
+ };
+
+ Statement(int offset, Kind kind)
+ : INHERITED(offset)
+ , fKind(kind) {}
+
+ virtual bool isEmpty() const {
+ return false;
+ }
+
+ virtual std::unique_ptr<Statement> clone() const = 0;
+
+ const Kind fKind;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h b/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h
new file mode 100644
index 0000000000..b1ddb012ec
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWITCHCASE
+#define SKSL_SWITCHCASE
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A single case of a 'switch' statement.
+ */
+struct SwitchCase : public Statement {
+ SwitchCase(int offset, std::unique_ptr<Expression> value,
+ std::vector<std::unique_ptr<Statement>> statements)
+ : INHERITED(offset, kSwitch_Kind)
+ , fValue(std::move(value))
+ , fStatements(std::move(statements)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ std::vector<std::unique_ptr<Statement>> cloned;
+ for (const auto& s : fStatements) {
+ cloned.push_back(s->clone());
+ }
+ return std::unique_ptr<Statement>(new SwitchCase(fOffset,
+ fValue ? fValue->clone() : nullptr,
+ std::move(cloned)));
+ }
+
+ String description() const override {
+ String result;
+ if (fValue) {
+ result.appendf("case %s:\n", fValue->description().c_str());
+ } else {
+ result += "default:\n";
+ }
+ for (const auto& s : fStatements) {
+ result += s->description() + "\n";
+ }
+ return result;
+ }
+
+ // null value implies "default" case
+ std::unique_ptr<Expression> fValue;
+ std::vector<std::unique_ptr<Statement>> fStatements;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h
new file mode 100644
index 0000000000..0777c5c5c6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWITCHSTATEMENT
+#define SKSL_SWITCHSTATEMENT
+
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+
+namespace SkSL {
+
+class SymbolTable;
+
+/**
+ * A 'switch' statement.
+ */
+struct SwitchStatement : public Statement {
+ SwitchStatement(int offset, bool isStatic, std::unique_ptr<Expression> value,
+ std::vector<std::unique_ptr<SwitchCase>> cases,
+ const std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(offset, kSwitch_Kind)
+ , fIsStatic(isStatic)
+ , fValue(std::move(value))
+ , fSymbols(std::move(symbols))
+ , fCases(std::move(cases)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ std::vector<std::unique_ptr<SwitchCase>> cloned;
+ for (const auto& s : fCases) {
+ cloned.push_back(std::unique_ptr<SwitchCase>((SwitchCase*) s->clone().release()));
+ }
+ return std::unique_ptr<Statement>(new SwitchStatement(fOffset, fIsStatic, fValue->clone(),
+ std::move(cloned), fSymbols));
+ }
+
+ String description() const override {
+ String result;
+ if (fIsStatic) {
+ result += "@";
+ }
+ result += String::printf("switch (%s) {\n", fValue->description().c_str());
+ for (const auto& c : fCases) {
+ result += c->description();
+ }
+ result += "}";
+ return result;
+ }
+
+ bool fIsStatic;
+ std::unique_ptr<Expression> fValue;
+ // it's important to keep fCases defined after (and thus destroyed before) fSymbols, because
+ // destroying statements can modify reference counts in symbols
+ const std::shared_ptr<SymbolTable> fSymbols;
+ std::vector<std::unique_ptr<SwitchCase>> fCases;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
new file mode 100644
index 0000000000..5067b844a4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWIZZLE
+#define SKSL_SWIZZLE
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+// represents a swizzle component of constant 0, as in x.rgb0
+const int SKSL_SWIZZLE_0 = -2;
+
+// represents a swizzle component of constant 1, as in x.rgb1
+const int SKSL_SWIZZLE_1 = -1;
+
+/**
+ * Given a type and a swizzle component count, returns the type that will result from swizzling. For
+ * instance, swizzling a float3 with two components will result in a float2. It is possible to
+ * swizzle with more components than the source vector, as in 'float2(1).xxxx'.
+ */
+static const Type& get_type(const Context& context, Expression& value, size_t count) {
+ const Type& base = value.fType.componentType();
+ if (count == 1) {
+ return base;
+ }
+ if (base == *context.fFloat_Type) {
+ switch (count) {
+ case 2: return *context.fFloat2_Type;
+ case 3: return *context.fFloat3_Type;
+ case 4: return *context.fFloat4_Type;
+ }
+ } else if (base == *context.fHalf_Type) {
+ switch (count) {
+ case 2: return *context.fHalf2_Type;
+ case 3: return *context.fHalf3_Type;
+ case 4: return *context.fHalf4_Type;
+ }
+ } else if (base == *context.fDouble_Type) {
+ switch (count) {
+ case 2: return *context.fDouble2_Type;
+ case 3: return *context.fDouble3_Type;
+ case 4: return *context.fDouble4_Type;
+ }
+ } else if (base == *context.fInt_Type) {
+ switch (count) {
+ case 2: return *context.fInt2_Type;
+ case 3: return *context.fInt3_Type;
+ case 4: return *context.fInt4_Type;
+ }
+ } else if (base == *context.fShort_Type) {
+ switch (count) {
+ case 2: return *context.fShort2_Type;
+ case 3: return *context.fShort3_Type;
+ case 4: return *context.fShort4_Type;
+ }
+ } else if (base == *context.fByte_Type) {
+ switch (count) {
+ case 2: return *context.fByte2_Type;
+ case 3: return *context.fByte3_Type;
+ case 4: return *context.fByte4_Type;
+ }
+ } else if (base == *context.fUInt_Type) {
+ switch (count) {
+ case 2: return *context.fUInt2_Type;
+ case 3: return *context.fUInt3_Type;
+ case 4: return *context.fUInt4_Type;
+ }
+ } else if (base == *context.fUShort_Type) {
+ switch (count) {
+ case 2: return *context.fUShort2_Type;
+ case 3: return *context.fUShort3_Type;
+ case 4: return *context.fUShort4_Type;
+ }
+ } else if (base == *context.fUByte_Type) {
+ switch (count) {
+ case 2: return *context.fUByte2_Type;
+ case 3: return *context.fUByte3_Type;
+ case 4: return *context.fUByte4_Type;
+ }
+ } else if (base == *context.fBool_Type) {
+ switch (count) {
+ case 2: return *context.fBool2_Type;
+ case 3: return *context.fBool3_Type;
+ case 4: return *context.fBool4_Type;
+ }
+ }
+ ABORT("cannot swizzle %s\n", value.description().c_str());
+}
+
+/**
+ * Represents a vector swizzle operation such as 'float2(1, 2, 3).zyx'.
+ */
+struct Swizzle : public Expression {
+ Swizzle(const Context& context, std::unique_ptr<Expression> base, std::vector<int> components)
+ : INHERITED(base->fOffset, kSwizzle_Kind, get_type(context, *base, components.size()))
+ , fBase(std::move(base))
+ , fComponents(std::move(components)) {
+ SkASSERT(fComponents.size() >= 1 && fComponents.size() <= 4);
+ }
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override {
+ if (fBase->fKind == Expression::kConstructor_Kind && fBase->isConstant()) {
+ // we're swizzling a constant vector, e.g. float4(1).x. Simplify it.
+ SkASSERT(fBase->fKind == Expression::kConstructor_Kind);
+ if (fType.isInteger()) {
+ SkASSERT(fComponents.size() == 1);
+ int64_t value = ((Constructor&) *fBase).getIVecComponent(fComponents[0]);
+ return std::unique_ptr<Expression>(new IntLiteral(irGenerator.fContext,
+ -1,
+ value));
+ } else if (fType.isFloat()) {
+ SkASSERT(fComponents.size() == 1);
+ double value = ((Constructor&) *fBase).getFVecComponent(fComponents[0]);
+ return std::unique_ptr<Expression>(new FloatLiteral(irGenerator.fContext,
+ -1,
+ value));
+ }
+ }
+ return nullptr;
+ }
+
+ bool hasSideEffects() const override {
+ return fBase->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new Swizzle(fType, fBase->clone(), fComponents));
+ }
+
+ String description() const override {
+ String result = fBase->description() + ".";
+ for (int x : fComponents) {
+ result += "01xyzw"[x + 2];
+ }
+ return result;
+ }
+
+ std::unique_ptr<Expression> fBase;
+ std::vector<int> fComponents;
+
+ typedef Expression INHERITED;
+
+private:
+ Swizzle(const Type& type, std::unique_ptr<Expression> base, std::vector<int> components)
+ : INHERITED(base->fOffset, kSwizzle_Kind, type)
+ , fBase(std::move(base))
+ , fComponents(std::move(components)) {
+ SkASSERT(fComponents.size() >= 1 && fComponents.size() <= 4);
+ }
+
+
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h b/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h
new file mode 100644
index 0000000000..4ee4d3d90d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOL
+#define SKSL_SYMBOL
+
+#include "src/sksl/ir/SkSLIRNode.h"
+
+namespace SkSL {
+
+/**
+ * Represents a symboltable entry.
+ */
+struct Symbol : public IRNode {
+ enum Kind {
+ kFunctionDeclaration_Kind,
+ kUnresolvedFunction_Kind,
+ kType_Kind,
+ kVariable_Kind,
+ kField_Kind,
+ kExternal_Kind
+ };
+
+ Symbol(int offset, Kind kind, StringFragment name)
+ : INHERITED(offset)
+ , fKind(kind)
+ , fName(name) {}
+
+ virtual ~Symbol() {}
+
+ Kind fKind;
+ StringFragment fName;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
new file mode 100644
index 0000000000..08bd6c2a65
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLUnresolvedFunction.h"
+
+namespace SkSL {
+
+std::vector<const FunctionDeclaration*> SymbolTable::GetFunctions(const Symbol& s) {
+ switch (s.fKind) {
+ case Symbol::kFunctionDeclaration_Kind:
+ return { &((FunctionDeclaration&) s) };
+ case Symbol::kUnresolvedFunction_Kind:
+ return ((UnresolvedFunction&) s).fFunctions;
+ default:
+ return std::vector<const FunctionDeclaration*>();
+ }
+}
+
+const Symbol* SymbolTable::operator[](StringFragment name) {
+ const auto& entry = fSymbols.find(name);
+ if (entry == fSymbols.end()) {
+ if (fParent) {
+ return (*fParent)[name];
+ }
+ return nullptr;
+ }
+ if (fParent) {
+ auto functions = GetFunctions(*entry->second);
+ if (functions.size() > 0) {
+ bool modified = false;
+ const Symbol* previous = (*fParent)[name];
+ if (previous) {
+ auto previousFunctions = GetFunctions(*previous);
+ for (const FunctionDeclaration* prev : previousFunctions) {
+ bool found = false;
+ for (const FunctionDeclaration* current : functions) {
+ if (current->matches(*prev)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ functions.push_back(prev);
+ modified = true;
+ }
+ }
+ if (modified) {
+ SkASSERT(functions.size() > 1);
+ return this->takeOwnership(std::unique_ptr<Symbol>(
+ new UnresolvedFunction(functions)));
+ }
+ }
+ }
+ }
+ return entry->second;
+}
+
+Symbol* SymbolTable::takeOwnership(std::unique_ptr<Symbol> s) {
+ Symbol* result = s.get();
+ fOwnedSymbols.push_back(std::move(s));
+ return result;
+}
+
+IRNode* SymbolTable::takeOwnership(std::unique_ptr<IRNode> n) {
+ IRNode* result = n.get();
+ fOwnedNodes.push_back(std::move(n));
+ return result;
+}
+
+void SymbolTable::add(StringFragment name, std::unique_ptr<Symbol> symbol) {
+ this->addWithoutOwnership(name, symbol.get());
+ this->takeOwnership(std::move(symbol));
+}
+
+void SymbolTable::addWithoutOwnership(StringFragment name, const Symbol* symbol) {
+ const auto& existing = fSymbols.find(name);
+ if (existing == fSymbols.end()) {
+ fSymbols[name] = symbol;
+ } else if (symbol->fKind == Symbol::kFunctionDeclaration_Kind) {
+ const Symbol* oldSymbol = existing->second;
+ if (oldSymbol->fKind == Symbol::kFunctionDeclaration_Kind) {
+ std::vector<const FunctionDeclaration*> functions;
+ functions.push_back((const FunctionDeclaration*) oldSymbol);
+ functions.push_back((const FunctionDeclaration*) symbol);
+ std::unique_ptr<Symbol> u = std::unique_ptr<Symbol>(new UnresolvedFunction(std::move(
+ functions)));
+ fSymbols[name] = this->takeOwnership(std::move(u));
+ } else if (oldSymbol->fKind == Symbol::kUnresolvedFunction_Kind) {
+ std::vector<const FunctionDeclaration*> functions;
+ for (const auto* f : ((UnresolvedFunction&) *oldSymbol).fFunctions) {
+ functions.push_back(f);
+ }
+ functions.push_back((const FunctionDeclaration*) symbol);
+ std::unique_ptr<Symbol> u = std::unique_ptr<Symbol>(new UnresolvedFunction(std::move(
+ functions)));
+ fSymbols[name] = this->takeOwnership(std::move(u));
+ }
+ } else {
+ fErrorReporter.error(symbol->fOffset, "symbol '" + name + "' was already defined");
+ }
+}
+
+
+void SymbolTable::markAllFunctionsBuiltin() {
+ for (const auto& pair : fSymbols) {
+ switch (pair.second->fKind) {
+ case Symbol::kFunctionDeclaration_Kind:
+ if (!((FunctionDeclaration&)*pair.second).fDefined) {
+ ((FunctionDeclaration&)*pair.second).fBuiltin = true;
+ }
+ break;
+ case Symbol::kUnresolvedFunction_Kind:
+ for (auto& f : ((UnresolvedFunction&) *pair.second).fFunctions) {
+ if (!((FunctionDeclaration*)f)->fDefined) {
+ ((FunctionDeclaration*)f)->fBuiltin = true;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+std::unordered_map<StringFragment, const Symbol*>::iterator SymbolTable::begin() {
+ return fSymbols.begin();
+}
+
+std::unordered_map<StringFragment, const Symbol*>::iterator SymbolTable::end() {
+ return fSymbols.end();
+}
+
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
new file mode 100644
index 0000000000..6969ba5e08
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOLTABLE
+#define SKSL_SYMBOLTABLE
+
+#include <unordered_map>
+#include <memory>
+#include <vector>
+#include "src/sksl/SkSLErrorReporter.h"
+#include "src/sksl/ir/SkSLSymbol.h"
+
+namespace SkSL {
+
+struct FunctionDeclaration;
+
+/**
+ * Maps identifiers to symbols. Functions, in particular, are mapped to either FunctionDeclaration
+ * or UnresolvedFunction depending on whether they are overloaded or not.
+ */
+class SymbolTable {
+public:
+ SymbolTable(ErrorReporter* errorReporter)
+ : fErrorReporter(*errorReporter) {}
+
+ SymbolTable(std::shared_ptr<SymbolTable> parent, ErrorReporter* errorReporter)
+ : fParent(parent)
+ , fErrorReporter(*errorReporter) {}
+
+ const Symbol* operator[](StringFragment name);
+
+ void add(StringFragment name, std::unique_ptr<Symbol> symbol);
+
+ void addWithoutOwnership(StringFragment name, const Symbol* symbol);
+
+ Symbol* takeOwnership(std::unique_ptr<Symbol> s);
+
+ IRNode* takeOwnership(std::unique_ptr<IRNode> n);
+
+ void markAllFunctionsBuiltin();
+
+ std::unordered_map<StringFragment, const Symbol*>::iterator begin();
+
+ std::unordered_map<StringFragment, const Symbol*>::iterator end();
+
+ const std::shared_ptr<SymbolTable> fParent;
+
+private:
+ static std::vector<const FunctionDeclaration*> GetFunctions(const Symbol& s);
+
+ std::vector<std::unique_ptr<Symbol>> fOwnedSymbols;
+
+ std::vector<std::unique_ptr<IRNode>> fOwnedNodes;
+
+ std::unordered_map<StringFragment, const Symbol*> fSymbols;
+
+ ErrorReporter& fErrorReporter;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
new file mode 100644
index 0000000000..e2bf9ed28b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TERNARYEXPRESSION
+#define SKSL_TERNARYEXPRESSION
+
+#include "src/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A ternary expression (test ? ifTrue : ifFalse).
+ */
+struct TernaryExpression : public Expression {
+ TernaryExpression(int offset, std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue, std::unique_ptr<Expression> ifFalse)
+ : INHERITED(offset, kTernary_Kind, ifTrue->fType)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {
+ SkASSERT(fIfTrue->fType == fIfFalse->fType);
+ }
+
+ bool hasSideEffects() const override {
+ return fTest->hasSideEffects() || fIfTrue->hasSideEffects() || fIfFalse->hasSideEffects();
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new TernaryExpression(fOffset, fTest->clone(),
+ fIfTrue->clone(),
+ fIfFalse->clone()));
+ }
+
+ String description() const override {
+ return "(" + fTest->description() + " ? " + fIfTrue->description() + " : " +
+ fIfFalse->description() + ")";
+ }
+
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Expression> fIfTrue;
+ std::unique_ptr<Expression> fIfFalse;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.cpp b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
new file mode 100644
index 0000000000..5d735fc11f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+int Type::coercionCost(const Type& other) const {
+ if (*this == other) {
+ return 0;
+ }
+ if (this->kind() == kNullable_Kind && other.kind() != kNullable_Kind) {
+ int result = this->componentType().coercionCost(other);
+ if (result != INT_MAX) {
+ ++result;
+ }
+ return result;
+ }
+ if (this->fName == "null" && other.kind() == kNullable_Kind) {
+ return 0;
+ }
+ if (this->kind() == kVector_Kind && other.kind() == kVector_Kind) {
+ if (this->columns() == other.columns()) {
+ return this->componentType().coercionCost(other.componentType());
+ }
+ return INT_MAX;
+ }
+ if (this->kind() == kMatrix_Kind) {
+ if (this->columns() == other.columns() && this->rows() == other.rows()) {
+ return this->componentType().coercionCost(other.componentType());
+ }
+ return INT_MAX;
+ }
+ if (this->isNumber() && other.isNumber() && other.priority() > this->priority()) {
+ return other.priority() - this->priority();
+ }
+ for (size_t i = 0; i < fCoercibleTypes.size(); i++) {
+ if (*fCoercibleTypes[i] == other) {
+ return (int) i + 1;
+ }
+ }
+ return INT_MAX;
+}
+
+const Type& Type::toCompound(const Context& context, int columns, int rows) const {
+ SkASSERT(this->kind() == Type::kScalar_Kind);
+ if (columns == 1 && rows == 1) {
+ return *this;
+ }
+ if (*this == *context.fFloat_Type || *this == *context.fFloatLiteral_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fFloat2_Type;
+ case 3: return *context.fFloat3_Type;
+ case 4: return *context.fFloat4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fFloat2x2_Type;
+ case 3: return *context.fFloat3x2_Type;
+ case 4: return *context.fFloat4x2_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fFloat2x3_Type;
+ case 3: return *context.fFloat3x3_Type;
+ case 4: return *context.fFloat4x3_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fFloat2x4_Type;
+ case 3: return *context.fFloat3x4_Type;
+ case 4: return *context.fFloat4x4_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fHalf_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fHalf2_Type;
+ case 3: return *context.fHalf3_Type;
+ case 4: return *context.fHalf4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fHalf2x2_Type;
+ case 3: return *context.fHalf3x2_Type;
+ case 4: return *context.fHalf4x2_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fHalf2x3_Type;
+ case 3: return *context.fHalf3x3_Type;
+ case 4: return *context.fHalf4x3_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fHalf2x4_Type;
+ case 3: return *context.fHalf3x4_Type;
+ case 4: return *context.fHalf4x4_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fDouble_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fDouble2_Type;
+ case 3: return *context.fDouble3_Type;
+ case 4: return *context.fDouble4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fDouble2x2_Type;
+ case 3: return *context.fDouble3x2_Type;
+ case 4: return *context.fDouble4x2_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fDouble2x3_Type;
+ case 3: return *context.fDouble3x3_Type;
+ case 4: return *context.fDouble4x3_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fDouble2x4_Type;
+ case 3: return *context.fDouble3x4_Type;
+ case 4: return *context.fDouble4x4_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fInt_Type || *this == *context.fIntLiteral_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fInt2_Type;
+ case 3: return *context.fInt3_Type;
+ case 4: return *context.fInt4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fShort_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fShort2_Type;
+ case 3: return *context.fShort3_Type;
+ case 4: return *context.fShort4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fByte_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fByte2_Type;
+ case 3: return *context.fByte3_Type;
+ case 4: return *context.fByte4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fUInt_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fUInt2_Type;
+ case 3: return *context.fUInt3_Type;
+ case 4: return *context.fUInt4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fUShort_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fUShort2_Type;
+ case 3: return *context.fUShort3_Type;
+ case 4: return *context.fUShort4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fUByte_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fUByte2_Type;
+ case 3: return *context.fUByte3_Type;
+ case 4: return *context.fUByte4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fBool_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fBool2_Type;
+ case 3: return *context.fBool3_Type;
+ case 4: return *context.fBool4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ }
+ ABORT("unsupported scalar_to_compound type %s", this->description().c_str());
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.h b/gfx/skia/skia/src/sksl/ir/SkSLType.h
new file mode 100644
index 0000000000..49ea8a5531
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKIASL_TYPE
+#define SKIASL_TYPE
+
+#include "src/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLSymbol.h"
+#include "src/sksl/spirv.h"
+#include <climits>
+#include <vector>
+#include <memory>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * Represents a type, such as int or float4.
+ */
+class Type : public Symbol {
+public:
+ struct Field {
+ Field(Modifiers modifiers, StringFragment name, const Type* type)
+ : fModifiers(modifiers)
+ , fName(name)
+ , fType(std::move(type)) {}
+
+ const String description() const {
+ return fType->description() + " " + fName + ";";
+ }
+
+ Modifiers fModifiers;
+ StringFragment fName;
+ const Type* fType;
+ };
+
+ enum Kind {
+ kArray_Kind,
+ kEnum_Kind,
+ kGeneric_Kind,
+ kNullable_Kind,
+ kMatrix_Kind,
+ kOther_Kind,
+ kSampler_Kind,
+ kSeparateSampler_Kind,
+ kScalar_Kind,
+ kStruct_Kind,
+ kTexture_Kind,
+ kVector_Kind
+ };
+
+ enum NumberKind {
+ kFloat_NumberKind,
+ kSigned_NumberKind,
+ kUnsigned_NumberKind,
+ kNonnumeric_NumberKind
+ };
+
+ // Create an "other" (special) type with the given name. These types cannot be directly
+ // referenced from user code.
+ Type(const char* name)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kOther_Kind)
+ , fNumberKind(kNonnumeric_NumberKind) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create an "other" (special) type that supports field access.
+ Type(const char* name, std::vector<Field> fields)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kOther_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fFields(std::move(fields)) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a simple type.
+ Type(String name, Kind kind)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(std::move(name))
+ , fTypeKind(kind)
+ , fNumberKind(kNonnumeric_NumberKind) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a generic type which maps to the listed types.
+ Type(const char* name, std::vector<const Type*> types)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kGeneric_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fCoercibleTypes(std::move(types)) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a struct type with the given fields.
+ Type(int offset, String name, std::vector<Field> fields)
+ : INHERITED(offset, kType_Kind, StringFragment())
+ , fNameString(std::move(name))
+ , fTypeKind(kStruct_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fFields(std::move(fields)) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a scalar type.
+ Type(const char* name, NumberKind numberKind, int priority, bool highPrecision = false)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kScalar_Kind)
+ , fNumberKind(numberKind)
+ , fPriority(priority)
+ , fColumns(1)
+ , fRows(1)
+ , fHighPrecision(highPrecision) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a scalar type which can be coerced to the listed types.
+ Type(const char* name,
+ NumberKind numberKind,
+ int priority,
+ std::vector<const Type*> coercibleTypes)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kScalar_Kind)
+ , fNumberKind(numberKind)
+ , fPriority(priority)
+ , fCoercibleTypes(std::move(coercibleTypes))
+ , fColumns(1)
+ , fRows(1) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a nullable type.
+ Type(String name, Kind kind, const Type& componentType)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(std::move(name))
+ , fTypeKind(kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fComponentType(&componentType)
+ , fColumns(1)
+ , fRows(1)
+ , fDimensions(SpvDim1D) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a vector type.
+ Type(const char* name, const Type& componentType, int columns)
+ : Type(name, kVector_Kind, componentType, columns) {}
+
+ // Create a vector or array type.
+ Type(String name, Kind kind, const Type& componentType, int columns)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(std::move(name))
+ , fTypeKind(kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fComponentType(&componentType)
+ , fColumns(columns)
+ , fRows(1)
+ , fDimensions(SpvDim1D) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a matrix type.
+ Type(const char* name, const Type& componentType, int columns, int rows)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kMatrix_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fComponentType(&componentType)
+ , fColumns(columns)
+ , fRows(rows)
+ , fDimensions(SpvDim1D) {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a texture type.
+ Type(const char* name, SpvDim_ dimensions, bool isDepth, bool isArrayed, bool isMultisampled,
+ bool isSampled)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kTexture_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fDimensions(dimensions)
+ , fIsDepth(isDepth)
+ , fIsArrayed(isArrayed)
+ , fIsMultisampled(isMultisampled)
+ , fIsSampled(isSampled)
+ {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ // Create a sampler type.
+ Type(const char* name, const Type& textureType)
+ : INHERITED(-1, kType_Kind, StringFragment())
+ , fNameString(name)
+ , fTypeKind(kSampler_Kind)
+ , fNumberKind(kNonnumeric_NumberKind)
+ , fDimensions(textureType.dimensions())
+ , fIsDepth(textureType.isDepth())
+ , fIsArrayed(textureType.isArrayed())
+ , fIsMultisampled(textureType.isMultisampled())
+ , fIsSampled(textureType.isSampled())
+ , fTextureType(&textureType)
+ {
+ fName.fChars = fNameString.c_str();
+ fName.fLength = fNameString.size();
+ }
+
+ const String& name() const {
+ return fNameString;
+ }
+
+ String description() const override {
+ if (fNameString == "$floatLiteral") {
+ return "float";
+ }
+ if (fNameString == "$intLiteral") {
+ return "int";
+ }
+ return fNameString;
+ }
+
+ bool operator==(const Type& other) const {
+ return fName == other.fName;
+ }
+
+ bool operator!=(const Type& other) const {
+ return fName != other.fName;
+ }
+
+ /**
+ * Returns the category (scalar, vector, matrix, etc.) of this type.
+ */
+ Kind kind() const {
+ return fTypeKind;
+ }
+
+ /**
+ * Returns true if this is a numeric scalar type.
+ */
+ bool isNumber() const {
+ return fNumberKind != kNonnumeric_NumberKind;
+ }
+
+ /**
+ * Returns true if this is a floating-point scalar type (float, half, or double).
+ */
+ bool isFloat() const {
+ return fNumberKind == kFloat_NumberKind;
+ }
+
+ /**
+ * Returns true if this is a signed scalar type (int or short).
+ */
+ bool isSigned() const {
+ return fNumberKind == kSigned_NumberKind;
+ }
+
+ /**
+ * Returns true if this is an unsigned scalar type (uint or ushort).
+ */
+ bool isUnsigned() const {
+ return fNumberKind == kUnsigned_NumberKind;
+ }
+
+ /**
+ * Returns true if this is a signed or unsigned integer.
+ */
+ bool isInteger() const {
+ return isSigned() || isUnsigned();
+ }
+
+ /**
+ * Returns the "priority" of a number type, in order of double > float > half > int > short.
+ * When operating on two number types, the result is the higher-priority type.
+ */
+ int priority() const {
+ return fPriority;
+ }
+
+ /**
+ * Returns true if an instance of this type can be freely coerced (implicitly converted) to
+ * another type.
+ */
+ bool canCoerceTo(const Type& other) const {
+ return coercionCost(other) != INT_MAX;
+ }
+
+ /**
+ * Determines the "cost" of coercing (implicitly converting) this type to another type. The cost
+ * is a number with no particular meaning other than that lower costs are preferable to higher
+ * costs. Returns INT_MAX if the coercion is not possible.
+ */
+ int coercionCost(const Type& other) const;
+
+ /**
+ * For matrices and vectors, returns the type of individual cells (e.g. mat2 has a component
+ * type of kFloat_Type). For all other types, causes an SkASSERTion failure.
+ */
+ const Type& componentType() const {
+ SkASSERT(fComponentType);
+ return *fComponentType;
+ }
+
+ /**
+ * For texturesamplers, returns the type of texture it samples (e.g., sampler2D has
+ * a texture type of texture2D).
+ */
+ const Type& textureType() const {
+ SkASSERT(fTextureType);
+ return *fTextureType;
+ }
+
+ /**
+ * For nullable types, returns the base type, otherwise returns the type itself.
+ */
+ const Type& nonnullable() const {
+ if (fTypeKind == kNullable_Kind) {
+ return this->componentType();
+ }
+ return *this;
+ }
+
+ /**
+ * For matrices and vectors, returns the number of columns (e.g. both mat3 and float3return 3).
+ * For scalars, returns 1. For arrays, returns either the size of the array (if known) or -1.
+ * For all other types, causes an SkASSERTion failure.
+ */
+ int columns() const {
+ SkASSERT(fTypeKind == kScalar_Kind || fTypeKind == kVector_Kind ||
+ fTypeKind == kMatrix_Kind || fTypeKind == kArray_Kind);
+ return fColumns;
+ }
+
+ /**
+ * For matrices, returns the number of rows (e.g. mat2x4 returns 4). For vectors and scalars,
+ * returns 1. For all other types, causes an SkASSERTion failure.
+ */
+ int rows() const {
+ SkASSERT(fRows > 0);
+ return fRows;
+ }
+
+ const std::vector<Field>& fields() const {
+ SkASSERT(fTypeKind == kStruct_Kind || fTypeKind == kOther_Kind);
+ return fFields;
+ }
+
+ /**
+ * For generic types, returns the types that this generic type can substitute for. For other
+ * types, returns a list of other types that this type can be coerced into.
+ */
+ const std::vector<const Type*>& coercibleTypes() const {
+ SkASSERT(fCoercibleTypes.size() > 0);
+ return fCoercibleTypes;
+ }
+
+ SpvDim_ dimensions() const {
+ SkASSERT(kSampler_Kind == fTypeKind || kTexture_Kind == fTypeKind);
+ return fDimensions;
+ }
+
+ bool isDepth() const {
+ SkASSERT(kSampler_Kind == fTypeKind || kTexture_Kind == fTypeKind);
+ return fIsDepth;
+ }
+
+ bool isArrayed() const {
+ SkASSERT(kSampler_Kind == fTypeKind || kTexture_Kind == fTypeKind);
+ return fIsArrayed;
+ }
+
+ bool isMultisampled() const {
+ SkASSERT(kSampler_Kind == fTypeKind || kTexture_Kind == fTypeKind);
+ return fIsMultisampled;
+ }
+
+ bool isSampled() const {
+ SkASSERT(kSampler_Kind == fTypeKind || kTexture_Kind == fTypeKind);
+ return fIsSampled;
+ }
+
+ bool highPrecision() const {
+ if (fComponentType) {
+ return fComponentType->highPrecision();
+ }
+ return fHighPrecision;
+ }
+
+ /**
+ * Returns the corresponding vector or matrix type with the specified number of columns and
+ * rows.
+ */
+ const Type& toCompound(const Context& context, int columns, int rows) const;
+
+private:
+ typedef Symbol INHERITED;
+
+ String fNameString;
+ Kind fTypeKind;
+ // always kNonnumeric_NumberKind for non-scalar values
+ NumberKind fNumberKind;
+ int fPriority = -1;
+ const Type* fComponentType = nullptr;
+ std::vector<const Type*> fCoercibleTypes;
+ int fColumns = -1;
+ int fRows = -1;
+ std::vector<Field> fFields;
+ SpvDim_ fDimensions = SpvDim1D;
+ bool fIsDepth = false;
+ bool fIsArrayed = false;
+ bool fIsMultisampled = false;
+ bool fIsSampled = false;
+ bool fHighPrecision = false;
+ const Type* fTextureType = nullptr;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
new file mode 100644
index 0000000000..20a533aeb0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TYPEREFERENCE
+#define SKSL_TYPEREFERENCE
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents an identifier referring to a type. This is an intermediate value: TypeReferences are
+ * always eventually replaced by Constructors in valid programs.
+ */
+struct TypeReference : public Expression {
+ TypeReference(const Context& context, int offset, const Type& value)
+ : INHERITED(offset, kTypeReference_Kind, *context.fInvalid_Type)
+ , fValue(value) {}
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ String description() const override {
+ return String(fValue.fName);
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new TypeReference(fOffset, fValue, &fType));
+ }
+
+ const Type& fValue;
+
+ typedef Expression INHERITED;
+
+private:
+ TypeReference(int offset, const Type& value, const Type* type)
+ : INHERITED(offset, kTypeReference_Kind, *type)
+ , fValue(value) {}
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h b/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h
new file mode 100644
index 0000000000..d32cd42856
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_UNRESOLVEDFUNCTION
+#define SKSL_UNRESOLVEDFUNCTION
+
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A symbol representing multiple functions with the same name.
+ */
+struct UnresolvedFunction : public Symbol {
+ UnresolvedFunction(std::vector<const FunctionDeclaration*> funcs)
+ : INHERITED(-1, kUnresolvedFunction_Kind, funcs[0]->fName)
+ , fFunctions(std::move(funcs)) {
+#ifdef DEBUG
+ for (auto func : funcs) {
+ SkASSERT(func->fName == fName);
+ }
+#endif
+ }
+
+ String description() const override {
+ return fName;
+ }
+
+ const std::vector<const FunctionDeclaration*> fFunctions;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h
new file mode 100644
index 0000000000..82dbd8616d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARDECLARATIONS
+#define SKSL_VARDECLARATIONS
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A single variable declaration within a var declaration statement. For instance, the statement
+ * 'int x = 2, y[3];' is a VarDeclarations statement containing two individual VarDeclaration
+ * instances.
+ */
+struct VarDeclaration : public Statement {
+ VarDeclaration(const Variable* var,
+ std::vector<std::unique_ptr<Expression>> sizes,
+ std::unique_ptr<Expression> value)
+ : INHERITED(var->fOffset, Statement::kVarDeclaration_Kind)
+ , fVar(var)
+ , fSizes(std::move(sizes))
+ , fValue(std::move(value)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ std::vector<std::unique_ptr<Expression>> sizesClone;
+ for (const auto& s : fSizes) {
+ if (s) {
+ sizesClone.push_back(s->clone());
+ } else {
+ sizesClone.push_back(nullptr);
+ }
+ }
+ return std::unique_ptr<Statement>(new VarDeclaration(fVar, std::move(sizesClone),
+ fValue ? fValue->clone() : nullptr));
+ }
+
+ String description() const override {
+ String result = fVar->fName;
+ for (const auto& size : fSizes) {
+ if (size) {
+ result += "[" + size->description() + "]";
+ } else {
+ result += "[]";
+ }
+ }
+ if (fValue) {
+ result += " = " + fValue->description();
+ }
+ return result;
+ }
+
+ const Variable* fVar;
+ std::vector<std::unique_ptr<Expression>> fSizes;
+ std::unique_ptr<Expression> fValue;
+
+ typedef Statement INHERITED;
+};
+
+/**
+ * A variable declaration statement, which may consist of one or more individual variables.
+ */
+struct VarDeclarations : public ProgramElement {
+ VarDeclarations(int offset, const Type* baseType,
+ std::vector<std::unique_ptr<VarDeclaration>> vars)
+ : INHERITED(offset, kVar_Kind)
+ , fBaseType(*baseType) {
+ for (auto& var : vars) {
+ fVars.push_back(std::unique_ptr<Statement>(var.release()));
+ }
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ std::vector<std::unique_ptr<VarDeclaration>> cloned;
+ for (const auto& v : fVars) {
+ cloned.push_back(std::unique_ptr<VarDeclaration>(
+ (VarDeclaration*) v->clone().release()));
+ }
+ return std::unique_ptr<ProgramElement>(new VarDeclarations(fOffset, &fBaseType,
+ std::move(cloned)));
+ }
+
+ String description() const override {
+ if (!fVars.size()) {
+ return String();
+ }
+ String result = ((VarDeclaration&) *fVars[0]).fVar->fModifiers.description() +
+ fBaseType.description() + " ";
+ String separator;
+ for (const auto& var : fVars) {
+ result += separator;
+ separator = ", ";
+ result += var->description();
+ }
+ return result;
+ }
+
+ const Type& fBaseType;
+ // this *should* be a vector of unique_ptr<VarDeclaration>, but it significantly simplifies the
+ // CFG to only have to worry about unique_ptr<Statement>
+ std::vector<std::unique_ptr<Statement>> fVars;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationsStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationsStatement.h
new file mode 100644
index 0000000000..465b691936
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationsStatement.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARDECLARATIONSSTATEMENT
+#define SKSL_VARDECLARATIONSSTATEMENT
+
+#include "src/sksl/ir/SkSLStatement.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+namespace SkSL {
+
+/**
+ * One or more variable declarations appearing as a statement within a function.
+ */
+struct VarDeclarationsStatement : public Statement {
+ VarDeclarationsStatement(std::unique_ptr<VarDeclarations> decl)
+ : INHERITED(decl->fOffset, kVarDeclarations_Kind)
+ , fDeclaration(std::move(decl)) {}
+
+ bool isEmpty() const override {
+ for (const auto& s : fDeclaration->fVars) {
+ if (!s->isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ std::unique_ptr<VarDeclarations> cloned((VarDeclarations*) fDeclaration->clone().release());
+ return std::unique_ptr<Statement>(new VarDeclarationsStatement(std::move(cloned)));
+ }
+
+ String description() const override {
+ return fDeclaration->description() + ";";
+ }
+
+ std::unique_ptr<VarDeclarations> fDeclaration;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariable.h b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
new file mode 100644
index 0000000000..bdf5cd8f79
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLE
+#define SKSL_VARIABLE
+
+#include "src/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLModifiers.h"
+#include "src/sksl/ir/SkSLSymbol.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+struct Expression;
+
+/**
+ * Represents a variable, whether local, global, or a function parameter. This represents the
+ * variable itself (the storage location), which is shared between all VariableReferences which
+ * read or write that storage location.
+ */
+struct Variable : public Symbol {
+ enum Storage {
+ kGlobal_Storage,
+ kInterfaceBlock_Storage,
+ kLocal_Storage,
+ kParameter_Storage
+ };
+
+ Variable(int offset, Modifiers modifiers, StringFragment name, const Type& type,
+ Storage storage, Expression* initialValue = nullptr)
+ : INHERITED(offset, kVariable_Kind, name)
+ , fModifiers(modifiers)
+ , fType(type)
+ , fStorage(storage)
+ , fInitialValue(initialValue)
+ , fReadCount(0)
+ , fWriteCount(initialValue ? 1 : 0) {}
+
+ ~Variable() override {
+ // can't destroy a variable while there are remaining references to it
+ if (fInitialValue) {
+ --fWriteCount;
+ }
+ SkASSERT(!fReadCount && !fWriteCount);
+ }
+
+ virtual String description() const override {
+ return fModifiers.description() + fType.fName + " " + fName;
+ }
+
+ bool dead() const {
+ if ((fStorage != kLocal_Storage && fReadCount) ||
+ (fModifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag |
+ Modifiers::kUniform_Flag))) {
+ return false;
+ }
+ return !fWriteCount ||
+ (!fReadCount && !(fModifiers.fFlags & (Modifiers::kPLS_Flag |
+ Modifiers::kPLSOut_Flag)));
+ }
+
+ mutable Modifiers fModifiers;
+ const Type& fType;
+ const Storage fStorage;
+
+ Expression* fInitialValue = nullptr;
+
+ // Tracks how many sites read from the variable. If this is zero for a non-out variable (or
+ // becomes zero during optimization), the variable is dead and may be eliminated.
+ mutable int fReadCount;
+ // Tracks how many sites write to the variable. If this is zero, the variable is dead and may be
+ // eliminated.
+ mutable int fWriteCount;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp
new file mode 100644
index 0000000000..2de8410ee5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include "src/sksl/SkSLIRGenerator.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLFloatLiteral.h"
+#include "src/sksl/ir/SkSLSetting.h"
+
+namespace SkSL {
+
+VariableReference::VariableReference(int offset, const Variable& variable, RefKind refKind)
+: INHERITED(offset, kVariableReference_Kind, variable.fType)
+, fVariable(variable)
+, fRefKind(refKind) {
+ if (refKind != kRead_RefKind) {
+ fVariable.fWriteCount++;
+ }
+ if (refKind != kWrite_RefKind) {
+ fVariable.fReadCount++;
+ }
+}
+
+VariableReference::~VariableReference() {
+ if (fRefKind != kRead_RefKind) {
+ fVariable.fWriteCount--;
+ }
+ if (fRefKind != kWrite_RefKind) {
+ fVariable.fReadCount--;
+ }
+}
+
+void VariableReference::setRefKind(RefKind refKind) {
+ if (fRefKind != kRead_RefKind) {
+ fVariable.fWriteCount--;
+ }
+ if (fRefKind != kWrite_RefKind) {
+ fVariable.fReadCount--;
+ }
+ if (refKind != kRead_RefKind) {
+ fVariable.fWriteCount++;
+ }
+ if (refKind != kWrite_RefKind) {
+ fVariable.fReadCount++;
+ }
+ fRefKind = refKind;
+}
+
+std::unique_ptr<Expression> VariableReference::copy_constant(const IRGenerator& irGenerator,
+ const Expression* expr) {
+ SkASSERT(expr->isConstant());
+ switch (expr->fKind) {
+ case Expression::kIntLiteral_Kind:
+ return std::unique_ptr<Expression>(new IntLiteral(irGenerator.fContext,
+ -1,
+ ((IntLiteral*) expr)->fValue));
+ case Expression::kFloatLiteral_Kind:
+ return std::unique_ptr<Expression>(new FloatLiteral(
+ irGenerator.fContext,
+ -1,
+ ((FloatLiteral*) expr)->fValue));
+ case Expression::kBoolLiteral_Kind:
+ return std::unique_ptr<Expression>(new BoolLiteral(irGenerator.fContext,
+ -1,
+ ((BoolLiteral*) expr)->fValue));
+ case Expression::kConstructor_Kind: {
+ const Constructor* c = (const Constructor*) expr;
+ std::vector<std::unique_ptr<Expression>> args;
+ for (const auto& arg : c->fArguments) {
+ args.push_back(copy_constant(irGenerator, arg.get()));
+ }
+ return std::unique_ptr<Expression>(new Constructor(-1, c->fType,
+ std::move(args)));
+ }
+ case Expression::kSetting_Kind: {
+ const Setting* s = (const Setting*) expr;
+ return std::unique_ptr<Expression>(new Setting(-1, s->fName,
+ copy_constant(irGenerator,
+ s->fValue.get())));
+ }
+ default:
+ ABORT("unsupported constant\n");
+ }
+}
+
+std::unique_ptr<Expression> VariableReference::constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) {
+ if (fRefKind != kRead_RefKind) {
+ return nullptr;
+ }
+ if (irGenerator.fKind == Program::kPipelineStage_Kind &&
+ fVariable.fStorage == Variable::kGlobal_Storage &&
+ (fVariable.fModifiers.fFlags & Modifiers::kIn_Flag) &&
+ !(fVariable.fModifiers.fFlags & Modifiers::kUniform_Flag)) {
+ return irGenerator.getArg(fOffset, fVariable.fName);
+ }
+ if ((fVariable.fModifiers.fFlags & Modifiers::kConst_Flag) && fVariable.fInitialValue &&
+ fVariable.fInitialValue->isConstant() && fType.kind() != Type::kArray_Kind) {
+ return copy_constant(irGenerator, fVariable.fInitialValue);
+ }
+ auto exprIter = definitions.find(&fVariable);
+ if (exprIter != definitions.end() && exprIter->second &&
+ (*exprIter->second)->isConstant()) {
+ return copy_constant(irGenerator, exprIter->second->get());
+ }
+ return nullptr;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
new file mode 100644
index 0000000000..6c8a92305d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLEREFERENCE
+#define SKSL_VARIABLEREFERENCE
+
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+class IRGenerator;
+
+/**
+ * A reference to a variable, through which it can be read or written. In the statement:
+ *
+ * x = x + 1;
+ *
+ * there is only one Variable 'x', but two VariableReferences to it.
+ */
+struct VariableReference : public Expression {
+ enum RefKind {
+ kRead_RefKind,
+ kWrite_RefKind,
+ kReadWrite_RefKind,
+ // taking the address of a variable - we consider this a read & write but don't complain if
+ // the variable was not previously assigned
+ kPointer_RefKind
+ };
+
+ VariableReference(int offset, const Variable& variable, RefKind refKind = kRead_RefKind);
+
+ ~VariableReference() override;
+
+ RefKind refKind() const {
+ return fRefKind;
+ }
+
+ void setRefKind(RefKind refKind);
+
+ bool hasSideEffects() const override {
+ return false;
+ }
+
+ bool isConstant() const override {
+ return 0 != (fVariable.fModifiers.fFlags & Modifiers::kConst_Flag);
+ }
+
+ std::unique_ptr<Expression> clone() const override {
+ return std::unique_ptr<Expression>(new VariableReference(fOffset, fVariable, fRefKind));
+ }
+
+ String description() const override {
+ return fVariable.fName;
+ }
+
+ static std::unique_ptr<Expression> copy_constant(const IRGenerator& irGenerator,
+ const Expression* expr);
+
+ std::unique_ptr<Expression> constantPropagate(const IRGenerator& irGenerator,
+ const DefinitionMap& definitions) override;
+
+ const Variable& fVariable;
+ RefKind fRefKind;
+
+private:
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h
new file mode 100644
index 0000000000..8e311a090a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_WHILESTATEMENT
+#define SKSL_WHILESTATEMENT
+
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'while' loop.
+ */
+struct WhileStatement : public Statement {
+ WhileStatement(int offset, std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> statement)
+ : INHERITED(offset, kWhile_Kind)
+ , fTest(std::move(test))
+ , fStatement(std::move(statement)) {}
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::unique_ptr<Statement>(new WhileStatement(fOffset, fTest->clone(),
+ fStatement->clone()));
+ }
+
+ String description() const override {
+ return "while (" + fTest->description() + ") " + fStatement->description();
+ }
+
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Statement> fStatement;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/DFA.h b/gfx/skia/skia/src/sksl/lex/DFA.h
new file mode 100644
index 0000000000..1fab51f921
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/DFA.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DFA
+#define SKSL_DFA
+
+#include <string>
+#include <vector>
+
+/**
+ * Tables representing a deterministic finite automaton for matching regular expressions.
+ */
+struct DFA {
+ DFA(std::vector<int> charMappings, std::vector<std::vector<int>> transitions,
+ std::vector<int> accepts)
+ : fCharMappings(charMappings)
+ , fTransitions(transitions)
+ , fAccepts(accepts) {}
+
+ // maps chars to the row index of fTransitions, as multiple characters may map to the same row.
+ // starting from state s and looking at char c, the new state is
+ // fTransitions[fCharMappings[c]][s].
+ std::vector<int> fCharMappings;
+
+ // one row per character mapping, one column per state
+ std::vector<std::vector<int>> fTransitions;
+
+ // contains, for each state, the token id we should report when matching ends in that state (-1
+ // for no match)
+ std::vector<int> fAccepts;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/DFAState.h b/gfx/skia/skia/src/sksl/lex/DFAState.h
new file mode 100644
index 0000000000..141078a769
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/DFAState.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DFASTATE
+#define SKSL_DFASTATE
+
+#include "src/sksl/lex/LexUtil.h"
+
+#include <vector>
+#include <string>
+
+struct DFAState {
+ struct Label {
+ std::vector<int> fStates;
+
+ Label(std::vector<int> states)
+ : fStates(std::move(states)) {}
+
+ bool operator==(const Label& other) const {
+ return fStates == other.fStates;
+ }
+
+ bool operator!=(const Label& other) const {
+ return !(*this == other);
+ }
+
+ std::string description() const {
+ std::string result = "<";
+ const char* separator = "";
+ for (int s : fStates) {
+ result += separator;
+ result += std::to_string(s);
+ separator = ", ";
+ }
+ result += ">";
+ return result;
+ }
+ };
+
+ DFAState()
+ : fId(INVALID)
+ , fLabel({}) {}
+
+ DFAState(int id, Label label)
+ : fId(id)
+ , fLabel(std::move(label)) {}
+
+ DFAState(const DFAState& other) = delete;
+
+ int fId;
+
+ Label fLabel;
+
+ bool fIsScanned = false;
+};
+
+namespace std {
+ template<> struct hash<DFAState::Label> {
+ size_t operator()(const DFAState::Label& s) const {
+ size_t result = 0;
+ for (int i : s.fStates) {
+ result = result * 101 + i;
+ }
+ return result;
+ }
+ };
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/LexUtil.h b/gfx/skia/skia/src/sksl/lex/LexUtil.h
new file mode 100644
index 0000000000..338b864646
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/LexUtil.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LEXUTIL
+#define SKSL_LEXUTIL
+
+#include <cstdlib>
+
+#define INVALID -1
+
+#define ABORT(...) (fprintf(stderr, __VA_ARGS__), abort())
+#define SkASSERT(x) (void)((x) || (ABORT("failed SkASSERT(%s): %s:%d\n", #x, __FILE__, __LINE__), 0))
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/Main.cpp b/gfx/skia/skia/src/sksl/lex/Main.cpp
new file mode 100644
index 0000000000..903e464e7c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/Main.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/NFAtoDFA.h"
+#include "src/sksl/lex/RegexParser.h"
+
+#include <fstream>
+#include <sstream>
+#include <string>
+
+/**
+ * Processes a .lex file and produces .h and .cpp files which implement a lexical analyzer. The .lex
+ * file is a text file with one token definition per line. Each line is of the form:
+ * <TOKEN_NAME> = <pattern>
+ * where <pattern> is either a regular expression (e.g [0-9]) or a double-quoted literal string.
+ */
+
+static constexpr const char* HEADER =
+ "/*\n"
+ " * Copyright 2017 Google Inc.\n"
+ " *\n"
+ " * Use of this source code is governed by a BSD-style license that can be\n"
+ " * found in the LICENSE file.\n"
+ " */\n"
+ "/*****************************************************************************************\n"
+ " ******************** This file was generated by sksllex. Do not edit. *******************\n"
+ " *****************************************************************************************/\n";
+
+void writeH(const DFA& dfa, const char* lexer, const char* token,
+ const std::vector<std::string>& tokens, const char* hPath) {
+ std::ofstream out(hPath);
+ SkASSERT(out.good());
+ out << HEADER;
+ out << "#ifndef SKSL_" << lexer << "\n";
+ out << "#define SKSL_" << lexer << "\n";
+ out << "#include <cstddef>\n";
+ out << "#include <cstdint>\n";
+ out << "namespace SkSL {\n";
+ out << "\n";
+ out << "struct " << token << " {\n";
+ out << " enum Kind {\n";
+ for (const std::string& t : tokens) {
+ out << " #undef " << t << "\n";
+ out << " " << t << ",\n";
+ }
+ out << " };\n";
+ out << "\n";
+ out << " " << token << "()\n";
+ out << " : fKind(Kind::INVALID)\n";
+ out << " , fOffset(-1)\n";
+ out << " , fLength(-1) {}\n";
+ out << "\n";
+ out << " " << token << "(Kind kind, int32_t offset, int32_t length)\n";
+ out << " : fKind(kind)\n";
+ out << " , fOffset(offset)\n";
+ out << " , fLength(length) {}\n";
+ out << "\n";
+ out << " Kind fKind;\n";
+ out << " int fOffset;\n";
+ out << " int fLength;\n";
+ out << "};\n";
+ out << "\n";
+ out << "class " << lexer << " {\n";
+ out << "public:\n";
+ out << " void start(const char* text, int32_t length) {\n";
+ out << " fText = text;\n";
+ out << " fLength = length;\n";
+ out << " fOffset = 0;\n";
+ out << " }\n";
+ out << "\n";
+ out << " " << token << " next();\n";
+ out << "\n";
+ out << "private:\n";
+ out << " const char* fText;\n";
+ out << " int32_t fLength;\n";
+ out << " int32_t fOffset;\n";
+ out << "};\n";
+ out << "\n";
+ out << "} // namespace\n";
+ out << "#endif\n";
+}
+
+void writeCPP(const DFA& dfa, const char* lexer, const char* token, const char* include,
+ const char* cppPath) {
+ std::ofstream out(cppPath);
+ SkASSERT(out.good());
+ out << HEADER;
+ out << "#include \"" << include << "\"\n";
+ out << "\n";
+ out << "namespace SkSL {\n";
+ out << "\n";
+
+ size_t states = 0;
+ for (const auto& row : dfa.fTransitions) {
+ states = std::max(states, row.size());
+ }
+ // arbitrarily-chosen character which is greater than START_CHAR and should not appear in actual
+ // input
+ out << "static const uint8_t INVALID_CHAR = 18;";
+ out << "static int8_t mappings[" << dfa.fCharMappings.size() << "] = {\n ";
+ const char* separator = "";
+ for (int m : dfa.fCharMappings) {
+ out << separator << std::to_string(m);
+ separator = ", ";
+ }
+ out << "\n};\n";
+ out << "static int16_t transitions[" << dfa.fTransitions.size() << "][" << states << "] = {\n";
+ for (size_t c = 0; c < dfa.fTransitions.size(); ++c) {
+ out << " {";
+ for (size_t j = 0; j < states; ++j) {
+ if ((size_t) c < dfa.fTransitions.size() && j < dfa.fTransitions[c].size()) {
+ out << " " << dfa.fTransitions[c][j] << ",";
+ } else {
+ out << " 0,";
+ }
+ }
+ out << " },\n";
+ }
+ out << "};\n";
+ out << "\n";
+
+ out << "static int8_t accepts[" << states << "] = {";
+ for (size_t i = 0; i < states; ++i) {
+ if (i < dfa.fAccepts.size()) {
+ out << " " << dfa.fAccepts[i] << ",";
+ } else {
+ out << " " << INVALID << ",";
+ }
+ }
+ out << " };\n";
+ out << "\n";
+
+ out << token << " " << lexer << "::next() {\n";
+ out << " // note that we cheat here: normally a lexer needs to worry about the case\n";
+ out << " // where a token has a prefix which is not itself a valid token - for instance, \n";
+ out << " // maybe we have a valid token 'while', but 'w', 'wh', etc. are not valid\n";
+ out << " // tokens. Our grammar doesn't have this property, so we can simplify the logic\n";
+ out << " // a bit.\n";
+ out << " int32_t startOffset = fOffset;\n";
+ out << " if (startOffset == fLength) {\n";
+ out << " return " << token << "(" << token << "::END_OF_FILE, startOffset, 0);\n";
+ out << " }\n";
+ out << " int16_t state = 1;\n";
+ out << " for (;;) {\n";
+ out << " if (fOffset >= fLength) {\n";
+ out << " if (accepts[state] == -1) {\n";
+ out << " return Token(Token::END_OF_FILE, startOffset, 0);\n";
+ out << " }\n";
+ out << " break;\n";
+ out << " }\n";
+ out << " uint8_t c = (uint8_t) fText[fOffset];";
+ out << " if (c <= 8 || c >= " << dfa.fCharMappings.size() << ") {";
+ out << " c = INVALID_CHAR;";
+ out << " }";
+ out << " int16_t newState = transitions[mappings[c]][state];\n";
+ out << " if (!newState) {\n";
+ out << " break;\n";
+ out << " }\n";
+ out << " state = newState;";
+ out << " ++fOffset;\n";
+ out << " }\n";
+ out << " Token::Kind kind = (" << token << "::Kind) accepts[state];\n";
+ out << " return " << token << "(kind, startOffset, fOffset - startOffset);\n";
+ out << "}\n";
+ out << "\n";
+ out << "} // namespace\n";
+}
+
+void process(const char* inPath, const char* lexer, const char* token, const char* hPath,
+ const char* cppPath) {
+ NFA nfa;
+ std::vector<std::string> tokens;
+ tokens.push_back("END_OF_FILE");
+ std::string line;
+ std::ifstream in(inPath);
+ while (std::getline(in, line)) {
+ std::istringstream split(line);
+ std::string name, delimiter, pattern;
+ if (split >> name >> delimiter >> pattern) {
+ SkASSERT(split.eof());
+ SkASSERT(name != "");
+ SkASSERT(delimiter == "=");
+ SkASSERT(pattern != "");
+ tokens.push_back(name);
+ if (pattern[0] == '"') {
+ SkASSERT(pattern.size() > 2 && pattern[pattern.size() - 1] == '"');
+ RegexNode node = RegexNode(RegexNode::kChar_Kind, pattern[1]);
+ for (size_t i = 2; i < pattern.size() - 1; ++i) {
+ node = RegexNode(RegexNode::kConcat_Kind, node,
+ RegexNode(RegexNode::kChar_Kind, pattern[i]));
+ }
+ nfa.addRegex(node);
+ }
+ else {
+ nfa.addRegex(RegexParser().parse(pattern));
+ }
+ }
+ }
+ NFAtoDFA converter(&nfa);
+ DFA dfa = converter.convert();
+ writeH(dfa, lexer, token, tokens, hPath);
+ writeCPP(dfa, lexer, token, (std::string("src/sksl/SkSL") + lexer + ".h").c_str(), cppPath);
+}
+
+int main(int argc, const char** argv) {
+ if (argc != 6) {
+ printf("usage: sksllex <input.lex> <lexername> <tokenname> <output.h> <output.cpp>\n");
+ exit(1);
+ }
+ process(argv[1], argv[2], argv[3], argv[4], argv[5]);
+ return 0;
+}
diff --git a/gfx/skia/skia/src/sksl/lex/NFA.cpp b/gfx/skia/skia/src/sksl/lex/NFA.cpp
new file mode 100644
index 0000000000..0c94bf7234
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFA.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/NFA.h"
+
+int NFA::match(std::string s) const {
+ std::vector<int> states = fStartStates;
+ for (size_t i = 0; i < s.size(); ++i) {
+ std::vector<int> next;
+ for (int id : states) {
+ if (fStates[id].accept(s[i])) {
+ for (int nextId : fStates[id].fNext) {
+ if (fStates[nextId].fKind != NFAState::kRemapped_Kind) {
+ next.push_back(nextId);
+ } else {
+ next.insert(next.end(), fStates[nextId].fData.begin(),
+ fStates[nextId].fData.end());
+ }
+ }
+ }
+ }
+ if (!next.size()) {
+ return INVALID;
+ }
+ states = next;
+ }
+ int accept = INVALID;
+ for (int id : states) {
+ if (fStates[id].fKind == NFAState::kAccept_Kind) {
+ int result = fStates[id].fData[0];
+ if (accept == INVALID || result < accept) {
+ accept = result;
+ }
+ }
+ }
+ return accept;
+}
diff --git a/gfx/skia/skia/src/sksl/lex/NFA.h b/gfx/skia/skia/src/sksl/lex/NFA.h
new file mode 100644
index 0000000000..49d18fcf83
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFA.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NFA
+#define SKSL_NFA
+
+#include "src/sksl/lex/NFAState.h"
+#include "src/sksl/lex/RegexNode.h"
+
+/**
+ * A nondeterministic finite automaton for matching regular expressions. The NFA is initialized with
+ * a number of regular expressions, and then matches a string against all of them simultaneously.
+ */
+struct NFA {
+ /**
+ * Adds a new regular expression to the set of expressions matched by this automaton, returning
+ * its index.
+ */
+ int addRegex(const RegexNode& regex) {
+ std::vector<int> accept;
+ // we reserve token 0 for END_OF_FILE, so this starts at 1
+ accept.push_back(this->addState(NFAState(++fRegexCount)));
+ std::vector<int> startStates = regex.createStates(this, accept);
+ fStartStates.insert(fStartStates.end(), startStates.begin(), startStates.end());
+ return fStartStates.size() - 1;
+ }
+
+ /**
+ * Adds a new state to the NFA, returning its index.
+ */
+ int addState(NFAState s) {
+ fStates.push_back(std::move(s));
+ return fStates.size() - 1;
+ }
+
+ /**
+ * Matches a string against all of the regexes added to this NFA. Returns the index of the first
+ * (in addRegex order) matching expression, or -1 if no match. This is relatively slow and used
+ * only for debugging purposes; the NFA should be converted to a DFA before actual use.
+ */
+ int match(std::string s) const;
+
+ int fRegexCount = 0;
+
+ std::vector<NFAState> fStates;
+
+ std::vector<int> fStartStates;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/NFAState.h b/gfx/skia/skia/src/sksl/lex/NFAState.h
new file mode 100644
index 0000000000..6f6e289b7f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFAState.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NFASTATE
+#define SKSL_NFASTATE
+
+#include <string>
+#include <vector>
+
+#include "src/sksl/lex/LexUtil.h"
+
+struct NFAState {
+ enum Kind {
+ // represents an accept state - if the NFA ends up in this state, we have successfully
+ // matched the token indicated by fData[0]
+ kAccept_Kind,
+ // matches the single character fChar
+ kChar_Kind,
+ // the regex '.'; matches any char but '\n'
+ kDot_Kind,
+ // a state which serves as a placeholder for the states indicated in fData. When we
+ // transition to this state, we instead transition to all of the fData states.
+ kRemapped_Kind,
+ // contains a list of true/false values in fData. fData[c] tells us whether we accept the
+ // character c.
+ kTable_Kind
+ };
+
+ NFAState(Kind kind, std::vector<int> next)
+ : fKind(kind)
+ , fNext(std::move(next)) {}
+
+ NFAState(char c, std::vector<int> next)
+ : fKind(kChar_Kind)
+ , fChar(c)
+ , fNext(std::move(next)) {}
+
+ NFAState(std::vector<int> states)
+ : fKind(kRemapped_Kind)
+ , fData(std::move(states)) {}
+
+ NFAState(bool inverse, std::vector<bool> accepts, std::vector<int> next)
+ : fKind(kTable_Kind)
+ , fInverse(inverse)
+ , fNext(std::move(next)) {
+ for (bool b : accepts) {
+ fData.push_back(b);
+ }
+ }
+
+ NFAState(int token)
+ : fKind(kAccept_Kind) {
+ fData.push_back(token);
+ }
+
+ bool accept(char c) const {
+ switch (fKind) {
+ case kAccept_Kind:
+ return false;
+ case kChar_Kind:
+ return c == fChar;
+ case kDot_Kind:
+ return c != '\n';
+ case kTable_Kind: {
+ bool value;
+ if ((size_t) c < fData.size()) {
+ value = fData[c];
+ } else {
+ value = false;
+ }
+ return value != fInverse;
+ }
+ default:
+ ABORT("unreachable");
+ }
+ }
+
+ std::string description() const {
+ switch (fKind) {
+ case kAccept_Kind:
+ return "Accept(" + std::to_string(fData[0]) + ")";
+ case kChar_Kind: {
+ std::string result = "Char('" + std::string(1, fChar) + "'";
+ for (int v : fNext) {
+ result += ", ";
+ result += std::to_string(v);
+ }
+ result += ")";
+ return result;
+ }
+ case kDot_Kind: {
+ std::string result = "Dot(";
+ const char* separator = "";
+ for (int v : fNext) {
+ result += separator;
+ result += std::to_string(v);
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+ case kRemapped_Kind: {
+ std::string result = "Remapped(";
+ const char* separator = "";
+ for (int v : fData) {
+ result += separator;
+ result += std::to_string(v);
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+ case kTable_Kind: {
+ std::string result = std::string("Table(") + (fInverse ? "true" : "false") + ", [";
+ const char* separator = "";
+ for (int v : fData) {
+ result += separator;
+ result += v ? "true" : "false";
+ separator = ", ";
+ }
+ result += "]";
+ for (int n : fNext) {
+ result += ", ";
+ result += std::to_string(n);
+ }
+ result += ")";
+ return result;
+ }
+ default:
+ ABORT("unreachable");
+ }
+ }
+
+ Kind fKind;
+
+ char fChar = 0;
+
+ bool fInverse = false;
+
+ std::vector<int> fData;
+
+ // states we transition to upon a succesful match from this state
+ std::vector<int> fNext;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h b/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h
new file mode 100644
index 0000000000..3c9038006d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef NFAtoDFA_DEFINED
+#define NFAtoDFA_DEFINED
+
+#include "src/sksl/lex/DFA.h"
+#include "src/sksl/lex/DFAState.h"
+#include "src/sksl/lex/NFA.h"
+#include "src/sksl/lex/NFAState.h"
+
+#include <algorithm>
+#include <climits>
+#include <memory>
+#include <unordered_map>
+#include <set>
+#include <vector>
+
+/**
+ * Converts a nondeterministic finite automaton to a deterministic finite automaton. Since NFAs and
+ * DFAs differ only in that an NFA allows multiple states at the same time, we can find each
+ * possible combination of simultaneous NFA states and give this combination a label. These labelled
+ * nodes are our DFA nodes, since we can only be in one such unique set of NFA states at a time.
+ *
+ * As an NFA can end up in multiple accept states at the same time (for instance, the token "while"
+ * is valid for both WHILE and IDENTIFIER), we disambiguate by preferring the first matching regex
+ * (in terms of the order in which they were added to the NFA).
+ */
+class NFAtoDFA {
+public:
+ static constexpr char START_CHAR = 9;
+ static constexpr char END_CHAR = 126;
+
+ NFAtoDFA(NFA* nfa)
+ : fNFA(*nfa) {}
+
+ /**
+ * Returns a DFA created from the NFA.
+ */
+ DFA convert() {
+ // create state 0, the "reject" state
+ getState(DFAState::Label({}));
+ // create a state representing being in all of the NFA's start states at once
+ std::vector<int> startStates = fNFA.fStartStates;
+ std::sort(startStates.begin(), startStates.end());
+ // this becomes state 1, our start state
+ DFAState* start = getState(DFAState::Label(startStates));
+ this->scanState(start);
+
+ this->computeMappings();
+
+ int stateCount = 0;
+ for (const auto& row : fTransitions) {
+ stateCount = std::max(stateCount, (int) row.size());
+ }
+ return DFA(fCharMappings, fTransitions, fAccepts);
+ }
+
+private:
+ /**
+ * Returns an existing state with the given label, or creates a new one and returns it.
+ */
+ DFAState* getState(DFAState::Label label) {
+ auto found = fStates.find(label);
+ if (found == fStates.end()) {
+ int id = fStates.size();
+ fStates[label] = std::unique_ptr<DFAState>(new DFAState(id, label));
+ return fStates[label].get();
+ }
+ return found->second.get();
+ }
+
+ void add(int nfaState, std::vector<int>* states) {
+ NFAState state = fNFA.fStates[nfaState];
+ if (state.fKind == NFAState::kRemapped_Kind) {
+ for (int next : state.fData) {
+ this->add(next, states);
+ }
+ } else {
+ for (int state : *states) {
+ if (nfaState == state) {
+ return;
+ }
+ }
+ states->push_back(nfaState);
+ }
+ }
+
+ void addTransition(char c, int start, int next) {
+ while (fTransitions.size() <= (size_t) c) {
+ fTransitions.push_back(std::vector<int>());
+ }
+ std::vector<int>& row = fTransitions[c];
+ while (row.size() <= (size_t) start) {
+ row.push_back(INVALID);
+ }
+ row[start] = next;
+ }
+
+ void scanState(DFAState* state) {
+ state->fIsScanned = true;
+ for (char c = START_CHAR; c <= END_CHAR; ++c) {
+ std::vector<int> next;
+ int bestAccept = INT_MAX;
+ for (int idx : state->fLabel.fStates) {
+ const NFAState& nfaState = fNFA.fStates[idx];
+ if (nfaState.accept(c)) {
+ for (int nextState : nfaState.fNext) {
+ if (fNFA.fStates[nextState].fKind == NFAState::kAccept_Kind) {
+ bestAccept = std::min(bestAccept, fNFA.fStates[nextState].fData[0]);
+ }
+ this->add(nextState, &next);
+ }
+ }
+ }
+ std::sort(next.begin(), next.end());
+ DFAState* nextState = this->getState(DFAState::Label(next));
+ this->addTransition(c, state->fId, nextState->fId);
+ if (bestAccept != INT_MAX) {
+ while (fAccepts.size() <= (size_t) nextState->fId) {
+ fAccepts.push_back(INVALID);
+ }
+ fAccepts[nextState->fId] = bestAccept;
+ }
+ if (!nextState->fIsScanned) {
+ this->scanState(nextState);
+ }
+ }
+ }
+
+ // collapse rows with the same transitions to a single row. This is common, as each row
+ // represents a character and often there are many characters for which all transitions are
+ // identical (e.g. [0-9] are treated the same way by all lexer rules)
+ void computeMappings() {
+ // mappings[<input row>] = <output row>
+ std::vector<std::vector<int>*> uniques;
+ // this could be done more efficiently, but O(n^2) is plenty fast for our purposes
+ for (size_t i = 0; i < fTransitions.size(); ++i) {
+ int found = -1;
+ for (size_t j = 0; j < uniques.size(); ++j) {
+ if (*uniques[j] == fTransitions[i]) {
+ found = j;
+ break;
+ }
+ }
+ if (found == -1) {
+ found = (int) uniques.size();
+ uniques.push_back(&fTransitions[i]);
+ }
+ fCharMappings.push_back(found);
+ }
+ std::vector<std::vector<int>> newTransitions;
+ for (std::vector<int>* row : uniques) {
+ newTransitions.push_back(*row);
+ }
+ fTransitions = newTransitions;
+ }
+
+ const NFA& fNFA;
+ std::unordered_map<DFAState::Label, std::unique_ptr<DFAState>> fStates;
+ std::vector<std::vector<int>> fTransitions;
+ std::vector<int> fCharMappings;
+ std::vector<int> fAccepts;
+};
+#endif // NFAtoDFA_DEFINED
diff --git a/gfx/skia/skia/src/sksl/lex/RegexNode.cpp b/gfx/skia/skia/src/sksl/lex/RegexNode.cpp
new file mode 100644
index 0000000000..90eeda2bff
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexNode.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/RegexNode.h"
+
+#include "src/sksl/lex/NFA.h"
+
+std::vector<int> RegexNode::createStates(NFA* nfa, const std::vector<int>& accept) const {
+ std::vector<int> result;
+ switch (fKind) {
+ case kChar_Kind:
+ result.push_back(nfa->addState(NFAState(fPayload.fChar, accept)));
+ break;
+ case kCharset_Kind: {
+ std::vector<bool> chars;
+ for (const RegexNode& child : fChildren) {
+ if (child.fKind == kChar_Kind) {
+ while (chars.size() <= (size_t) child.fPayload.fChar) {
+ chars.push_back(false);
+ }
+ chars[child.fPayload.fChar] = true;
+ } else {
+ SkASSERT(child.fKind == kRange_Kind);
+ while (chars.size() <= (size_t) child.fChildren[1].fPayload.fChar) {
+ chars.push_back(false);
+ }
+ for (char c = child.fChildren[0].fPayload.fChar;
+ c <= child.fChildren[1].fPayload.fChar;
+ ++c) {
+ chars[c] = true;
+ }
+ }
+ }
+ result.push_back(nfa->addState(NFAState(fPayload.fBool, chars, accept)));
+ break;
+ }
+ case kConcat_Kind: {
+ std::vector<int> right = fChildren[1].createStates(nfa, accept);
+ result = fChildren[0].createStates(nfa, right);
+ break;
+ }
+ case kDot_Kind:
+ result.push_back(nfa->addState(NFAState(NFAState::kDot_Kind, accept)));
+ break;
+ case kOr_Kind: {
+ std::vector<int> states = fChildren[0].createStates(nfa, accept);
+ result.insert(result.end(), states.begin(), states.end());
+ states = fChildren[1].createStates(nfa, accept);
+ result.insert(result.end(), states.begin(), states.end());
+ break;
+ }
+ case kPlus_Kind: {
+ std::vector<int> next = accept;
+ std::vector<int> placeholder;
+ int id = nfa->addState(NFAState(placeholder));
+ next.push_back(id);
+ result = fChildren[0].createStates(nfa, next);
+ nfa->fStates[id] = NFAState(result);
+ break;
+ }
+ case kQuestion_Kind:
+ result = fChildren[0].createStates(nfa, accept);
+ result.insert(result.end(), accept.begin(), accept.end());
+ break;
+ case kRange_Kind:
+ ABORT("unreachable");
+ case kStar_Kind: {
+ std::vector<int> next = accept;
+ std::vector<int> placeholder;
+ int id = nfa->addState(NFAState(placeholder));
+ next.push_back(id);
+ result = fChildren[0].createStates(nfa, next);
+ result.insert(result.end(), accept.begin(), accept.end());
+ nfa->fStates[id] = NFAState(result);
+ break;
+ }
+ }
+ return result;
+}
+
+std::string RegexNode::description() const {
+ switch (fKind) {
+ case kChar_Kind:
+ return std::string(1, fPayload.fChar);
+ case kCharset_Kind: {
+ std::string result("[");
+ if (fPayload.fBool) {
+ result += "^";
+ }
+ for (const RegexNode& c : fChildren) {
+ result += c.description();
+ }
+ result += "]";
+ return result;
+ }
+ case kConcat_Kind:
+ return fChildren[0].description() + fChildren[1].description();
+ case kDot_Kind:
+ return ".";
+ case kOr_Kind:
+ return "(" + fChildren[0].description() + "|" + fChildren[1].description() + ")";
+ case kPlus_Kind:
+ return fChildren[0].description() + "+";
+ case kQuestion_Kind:
+ return fChildren[0].description() + "?";
+ case kRange_Kind:
+ return fChildren[0].description() + "-" + fChildren[1].description();
+ case kStar_Kind:
+ return fChildren[0].description() + "*";
+ default:
+ return "<" + std::to_string(fKind) + ">";
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/lex/RegexNode.h b/gfx/skia/skia/src/sksl/lex/RegexNode.h
new file mode 100644
index 0000000000..e3aa65b3d1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexNode.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_REGEXNODE
+#define SKSL_REGEXNODE
+
+#include <string>
+#include <vector>
+
+struct NFA;
+
+/**
+ * Represents a node in the parse tree of a regular expression.
+ */
+struct RegexNode {
+ enum Kind {
+ kChar_Kind,
+ kCharset_Kind,
+ kConcat_Kind,
+ kDot_Kind,
+ kOr_Kind,
+ kPlus_Kind,
+ kRange_Kind,
+ kQuestion_Kind,
+ kStar_Kind
+ };
+
+ RegexNode(Kind kind)
+ : fKind(kind) {}
+
+ RegexNode(Kind kind, char payload)
+ : fKind(kind) {
+ fPayload.fChar = payload;
+ }
+
+ RegexNode(Kind kind, const char* children)
+ : fKind(kind) {
+ fPayload.fBool = false;
+ while (*children != '\0') {
+ fChildren.emplace_back(kChar_Kind, *children);
+ ++children;
+ }
+ }
+
+ RegexNode(Kind kind, RegexNode child)
+ : fKind(kind) {
+ fChildren.push_back(std::move(child));
+ }
+
+ RegexNode(Kind kind, RegexNode child1, RegexNode child2)
+ : fKind(kind) {
+ fChildren.push_back(std::move(child1));
+ fChildren.push_back(std::move(child2));
+ }
+
+ /**
+ * Creates NFA states for this node, with a successful match against this node resulting in a
+ * transition to all of the states in the accept vector.
+ */
+ std::vector<int> createStates(NFA* nfa, const std::vector<int>& accept) const;
+
+ std::string description() const;
+
+ Kind fKind;
+
+ union Payload {
+ char fChar;
+ bool fBool;
+ } fPayload;
+
+ std::vector<RegexNode> fChildren;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/RegexParser.cpp b/gfx/skia/skia/src/sksl/lex/RegexParser.cpp
new file mode 100644
index 0000000000..9b5445e1a3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexParser.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/RegexParser.h"
+
+#include "src/sksl/lex/LexUtil.h"
+
+RegexNode RegexParser::parse(std::string source) {
+ fSource = source;
+ fIndex = 0;
+ SkASSERT(fStack.size() == 0);
+ this->regex();
+ SkASSERT(fStack.size() == 1);
+ SkASSERT(fIndex == source.size());
+ return this->pop();
+}
+
+char RegexParser::peek() {
+ if (fIndex >= fSource.size()) {
+ return END;
+ }
+ return fSource[fIndex];
+}
+
+void RegexParser::expect(char c) {
+ if (this->peek() != c) {
+ printf("expected '%c' at index %d, but found '%c'", c, (int) fIndex, this->peek());
+ exit(1);
+ }
+ ++fIndex;
+}
+
+RegexNode RegexParser::pop() {
+ RegexNode result = fStack.top();
+ fStack.pop();
+ return result;
+}
+
+void RegexParser::term() {
+ switch (this->peek()) {
+ case '(': this->group(); break;
+ case '[': this->set(); break;
+ case '.': this->dot(); break;
+ default: this->literal();
+ }
+}
+
+void RegexParser::quantifiedTerm() {
+ this->term();
+ switch (this->peek()) {
+ case '*': fStack.push(RegexNode(RegexNode::kStar_Kind, this->pop())); ++fIndex; break;
+ case '+': fStack.push(RegexNode(RegexNode::kPlus_Kind, this->pop())); ++fIndex; break;
+ case '?': fStack.push(RegexNode(RegexNode::kQuestion_Kind, this->pop())); ++fIndex; break;
+ default: break;
+ }
+}
+
+void RegexParser::sequence() {
+ this->quantifiedTerm();
+ for (;;) {
+ switch (this->peek()) {
+ case END: // fall through
+ case '|': // fall through
+ case ')': return;
+ default:
+ this->sequence();
+ RegexNode right = this->pop();
+ RegexNode left = this->pop();
+ fStack.emplace(RegexNode::kConcat_Kind, std::move(left), std::move(right));
+ }
+ }
+}
+
+RegexNode RegexParser::escapeSequence(char c) {
+ switch (c) {
+ case 'n': return RegexNode(RegexNode::kChar_Kind, '\n');
+ case 'r': return RegexNode(RegexNode::kChar_Kind, '\r');
+ case 't': return RegexNode(RegexNode::kChar_Kind, '\t');
+ case 's': return RegexNode(RegexNode::kCharset_Kind, " \t\n\r");
+ default: return RegexNode(RegexNode::kChar_Kind, c);
+ }
+}
+
+void RegexParser::literal() {
+ char c = this->peek();
+ if (c == '\\') {
+ ++fIndex;
+ fStack.push(this->escapeSequence(peek()));
+ ++fIndex;
+ }
+ else {
+ fStack.push(RegexNode(RegexNode::kChar_Kind, c));
+ ++fIndex;
+ }
+}
+
+void RegexParser::dot() {
+ this->expect('.');
+ fStack.push(RegexNode(RegexNode::kDot_Kind));
+}
+
+void RegexParser::group() {
+ this->expect('(');
+ this->regex();
+ this->expect(')');
+}
+
+void RegexParser::setItem() {
+ this->literal();
+ if (this->peek() == '-') {
+ ++fIndex;
+ if (peek() == ']') {
+ fStack.push(RegexNode(RegexNode::kChar_Kind, '-'));
+ }
+ else {
+ literal();
+ RegexNode end = this->pop();
+ SkASSERT(end.fKind == RegexNode::kChar_Kind);
+ RegexNode start = this->pop();
+ SkASSERT(start.fKind == RegexNode::kChar_Kind);
+ fStack.push(RegexNode(RegexNode::kRange_Kind, std::move(start), std::move(end)));
+ }
+ }
+}
+
+void RegexParser::set() {
+ expect('[');
+ size_t depth = fStack.size();
+ RegexNode set(RegexNode::kCharset_Kind);
+ if (this->peek() == '^') {
+ ++fIndex;
+ set.fPayload.fBool = true;
+ }
+ else {
+ set.fPayload.fBool = false;
+ }
+ for (;;) {
+ switch (this->peek()) {
+ case ']':
+ ++fIndex;
+ while (fStack.size() > depth) {
+ set.fChildren.push_back(this->pop());
+ }
+ fStack.push(std::move(set));
+ return;
+ case END:
+ printf("unterminated character set\n");
+ exit(1);
+ default:
+ this->setItem();
+ }
+ }
+}
+
+void RegexParser::regex() {
+ this->sequence();
+ switch (this->peek()) {
+ case '|': {
+ ++fIndex;
+ this->regex();
+ RegexNode right = this->pop();
+ RegexNode left = this->pop();
+ fStack.push(RegexNode(RegexNode::kOr_Kind, left, right));
+ break;
+ }
+ case END: // fall through
+ case ')':
+ return;
+ default:
+ SkASSERT(false);
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/lex/RegexParser.h b/gfx/skia/skia/src/sksl/lex/RegexParser.h
new file mode 100644
index 0000000000..c1753de368
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexParser.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_REGEXPARSER
+#define SKSL_REGEXPARSER
+
+#include "src/sksl/lex/RegexNode.h"
+
+#include <stack>
+#include <string>
+
+/**
+ * Turns a simple regular expression into a parse tree. The regular expression syntax supports only
+ * the basic quantifiers ('*', '+', and '?'), alternation ('|'), character sets ('[a-z]'), and
+ * groups ('()').
+ */
+class RegexParser {
+public:
+ RegexNode parse(std::string source);
+
+private:
+ static constexpr char END = '\0';
+
+ char peek();
+
+ void expect(char c);
+
+ RegexNode pop();
+
+ /**
+ * Matches a char literal, parenthesized group, character set, or dot ('.').
+ */
+ void term();
+
+ /**
+ * Matches a term followed by an optional quantifier ('*', '+', or '?').
+ */
+ void quantifiedTerm();
+
+ /**
+ * Matches a sequence of quantifiedTerms.
+ */
+ void sequence();
+
+ /**
+ * Returns a node representing the given escape character (e.g. escapeSequence('n') returns a
+ * node which matches a newline character).
+ */
+ RegexNode escapeSequence(char c);
+
+ /**
+ * Matches a literal character or escape sequence.
+ */
+ void literal();
+
+ /**
+ * Matches a dot ('.').
+ */
+ void dot();
+
+ /**
+ * Matches a parenthesized group.
+ */
+ void group();
+
+ /**
+ * Matches a literal character, escape sequence, or character range from a character set.
+ */
+ void setItem();
+
+ /**
+ * Matches a character set.
+ */
+ void set();
+
+ void regex();
+
+ std::string fSource;
+
+ size_t fIndex;
+
+ std::stack<RegexNode> fStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/sksl.lex b/gfx/skia/skia/src/sksl/lex/sksl.lex
new file mode 100644
index 0000000000..3ed96fc1b1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/sksl.lex
@@ -0,0 +1,98 @@
+FLOAT_LITERAL = [0-9]*\.[0-9]+([eE][+-]?[0-9]+)?|[0-9]+\.[0-9]*([eE][+-]?[0-9]+)?|[0-9]+([eE][+-]?[0-9]+)
+INT_LITERAL = [0-9]+|0x[0-9a-fA-F]+
+TRUE_LITERAL = "true"
+FALSE_LITERAL = "false"
+IF = "if"
+STATIC_IF = "@if"
+ELSE = "else"
+FOR = "for"
+WHILE = "while"
+DO = "do"
+SWITCH = "switch"
+STATIC_SWITCH = "@switch"
+CASE = "case"
+DEFAULT = "default"
+BREAK = "break"
+CONTINUE = "continue"
+DISCARD = "discard"
+RETURN = "return"
+NULL_LITERAL = "null"
+IN = "in"
+OUT = "out"
+INOUT = "inout"
+UNIFORM = "uniform"
+CONST = "const"
+FLAT = "flat"
+NOPERSPECTIVE = "noperspective"
+READONLY = "readonly"
+WRITEONLY = "writeonly"
+COHERENT = "coherent"
+VOLATILE = "volatile"
+RESTRICT = "restrict"
+BUFFER = "buffer"
+HASSIDEEFFECTS = "sk_has_side_effects"
+PLS = "__pixel_localEXT"
+PLSIN = "__pixel_local_inEXT"
+PLSOUT = "__pixel_local_outEXT"
+STRUCT = "struct"
+LAYOUT = "layout"
+PRECISION = "precision"
+ENUM = "enum"
+CLASS = "class"
+IDENTIFIER = [a-zA-Z_$]([0-9]|[a-zA-Z_$])*
+DIRECTIVE = #[a-zA-Z_$]([0-9]|[a-zA-Z_$])*
+SECTION = @[a-zA-Z_$]([0-9]|[a-zA-Z_$])*
+LPAREN = "("
+RPAREN = ")"
+LBRACE = "{"
+RBRACE = "}"
+LBRACKET = "["
+RBRACKET = "]"
+DOT = "."
+COMMA = ","
+PLUSPLUS = "++"
+MINUSMINUS = "--"
+PLUS = "+"
+MINUS = "-"
+STAR = "*"
+SLASH = "/"
+PERCENT = "%"
+SHL = "<<"
+SHR = ">>"
+BITWISEOR = "|"
+BITWISEXOR = "^"
+BITWISEAND = "&"
+BITWISENOT = "~"
+LOGICALOR = "||"
+LOGICALXOR = "^^"
+LOGICALAND = "&&"
+LOGICALNOT = "!"
+QUESTION = "?"
+COLONCOLON = "::"
+COLON = ":"
+EQ = "="
+EQEQ = "=="
+NEQ = "!="
+GT = ">"
+LT = "<"
+GTEQ = ">="
+LTEQ = "<="
+PLUSEQ = "+="
+MINUSEQ = "-="
+STAREQ = "*="
+SLASHEQ = "/="
+PERCENTEQ = "%="
+SHLEQ = "<<="
+SHREQ = ">>="
+BITWISEOREQ = "|="
+BITWISEXOREQ = "^="
+BITWISEANDEQ = "&="
+LOGICALOREQ = "||="
+LOGICALXOREQ = "^^="
+LOGICALANDEQ = "&&="
+SEMICOLON = ";"
+ARROW = "->"
+WHITESPACE = \s+
+LINE_COMMENT = //.*
+BLOCK_COMMENT = /\*([^*]|\*[^/])*\*/
+INVALID = . \ No newline at end of file
diff --git a/gfx/skia/skia/src/sksl/sksl_enums.inc b/gfx/skia/skia/src/sksl/sksl_enums.inc
new file mode 100644
index 0000000000..eb60b3b0c5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_enums.inc
@@ -0,0 +1,35 @@
+R"(/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+/*************************************************************************************************/
+/* This file is used from both C++ and SkSL, so we need to stick to syntax compatible with both. */
+/*************************************************************************************************/
+
+/**
+ * We have coverage effects that clip rendering to the edge of some geometric primitive.
+ * This enum specifies how that clipping is performed. Not all factories that take a
+ * GrProcessorEdgeType will succeed with all values and it is up to the caller to check for
+ * a NULL return.
+ */
+enum class GrClipEdgeType {
+ kFillBW,
+ kFillAA,
+ kInverseFillBW,
+ kInverseFillAA,
+ kHairlineAA,
+
+ kLast = kHairlineAA
+};
+
+enum class PMConversion {
+ kToPremul = 0,
+ kToUnpremul = 1,
+ kPMConversionCnt = 2
+};
+
+)"
diff --git a/gfx/skia/skia/src/sksl/sksl_fp.inc b/gfx/skia/skia/src/sksl/sksl_fp.inc
new file mode 100644
index 0000000000..0608230302
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_fp.inc
@@ -0,0 +1,34 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL fragment shaders
+
+layout(builtin=15) in float4 sk_FragCoord;
+layout(builtin=3) float sk_ClipDistance[1];
+
+// 9999 is a temporary value that causes us to ignore these declarations beyond
+// adding them to the symbol table. This works fine in GLSL (where they do not
+// require any further handling) but will fail in SPIR-V. We'll have a better
+// solution for this soon.
+layout(builtin=9999) float4 gl_LastFragData[1];
+layout(builtin=9999) half4 gl_LastFragColor;
+layout(builtin=9999) half4 gl_LastFragColorARM;
+layout(builtin=9999) int gl_SampleMaskIn[1];
+layout(builtin=9999) out int gl_SampleMask[1];
+layout(builtin=9999) half4 gl_SecondaryFragColorEXT;
+
+layout(builtin=10003) half4 sk_InColor;
+layout(builtin=10004) out half4 sk_OutColor;
+layout(builtin=10005) float2[] sk_TransformedCoords2D;
+layout(builtin=10006) sampler2D[] sk_TextureSamplers;
+layout(builtin=10011) half sk_Width;
+layout(builtin=10012) half sk_Height;
+
+half4 sample(fragmentProcessor fp);
+half4 sample(fragmentProcessor fp, float2 coords);
+half4 sample(fragmentProcessor fp, half4 input);
+half4 sample(fragmentProcessor fp, half4 input, float2 coords);
+half4 sample(fragmentProcessor? fp);
+half4 sample(fragmentProcessor? fp, float2 coords);
+half4 sample(fragmentProcessor? fp, half4 input);
+half4 sample(fragmentProcessor? fp, half4 input, float2 coords);
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_frag.inc b/gfx/skia/skia/src/sksl/sksl_frag.inc
new file mode 100644
index 0000000000..5bc5f55dce
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_frag.inc
@@ -0,0 +1,23 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL fragment shaders
+
+// See "enum SpvBuiltIn_" in ./spirv.h
+layout(builtin=15) in float4 sk_FragCoord;
+layout(builtin=17) in bool sk_Clockwise; // Similar to gl_FrontFacing, but defined in device space.
+layout(builtin=3) float sk_ClipDistance[1];
+
+// 9999 is a temporary value that causes us to ignore these declarations beyond
+// adding them to the symbol table. This works fine in GLSL (where they do not
+// require any further handling) but will fail in SPIR-V. We'll have a better
+// solution for this soon.
+layout(builtin=9999) int gl_SampleMaskIn[1];
+layout(builtin=9999) out int gl_SampleMask[1];
+layout(builtin=9999) out half4 gl_SecondaryFragColorEXT;
+
+layout(location=0,index=0,builtin=10001) out half4 sk_FragColor;
+layout(builtin=10008) half4 sk_LastFragColor;
+layout(builtin=10011) half sk_Width;
+layout(builtin=10012) half sk_Height;
+
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_geom.inc b/gfx/skia/skia/src/sksl/sksl_geom.inc
new file mode 100644
index 0000000000..6d535d26a3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_geom.inc
@@ -0,0 +1,24 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL geometry shaders
+
+layout(builtin=10002) in sk_PerVertex {
+ layout(builtin=0) float4 sk_Position;
+ layout(builtin=1) float sk_PointSize;
+ layout(builtin=3) float sk_ClipDistance[1];
+} sk_in[1];
+
+layout(builtin=10007) out sk_PerVertex {
+ layout(builtin=0) float4 sk_Position;
+ layout(builtin=1) float sk_PointSize;
+ layout(builtin=3) float sk_ClipDistance[1];
+};
+
+layout(builtin=8) in int sk_InvocationID;
+
+sk_has_side_effects void EmitStreamVertex(int stream);
+sk_has_side_effects void EndStreamPrimitive(int stream);
+sk_has_side_effects void EmitVertex();
+sk_has_side_effects void EndPrimitive();
+
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_gpu.inc b/gfx/skia/skia/src/sksl/sksl_gpu.inc
new file mode 100644
index 0000000000..c027c2cc33
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_gpu.inc
@@ -0,0 +1,299 @@
+STRINGIFY(
+
+// defines built-in functions supported by SkSL when running on a GPU
+
+$genType radians($genType degrees);
+$genType sin($genType angle);
+$genType cos($genType angle);
+$genType tan($genType angle);
+$genType asin($genType x);
+$genType acos($genType x);
+$genType atan($genType y, $genType x);
+$genType atan($genType y_over_x);
+$genType sinh($genType x);
+$genType cosh($genType x);
+$genType tanh($genType x);
+$genType asinh($genType x);
+$genType acosh($genType x);
+$genType atanh($genType x);
+$genType pow($genType x, $genType y);
+$genType exp($genType x);
+$genType log($genType x);
+$genType exp2($genType x);
+$genType log2($genType x);
+$genType sqrt($genType x);
+$genHType radians($genHType degrees);
+$genHType sin($genHType angle);
+$genHType cos($genHType angle);
+$genHType tan($genHType angle);
+$genHType asin($genHType x);
+$genHType acos($genHType x);
+$genHType atan($genHType y, $genHType x);
+$genHType atan($genHType y_over_x);
+$genHType sinh($genHType x);
+$genHType cosh($genHType x);
+$genHType tanh($genHType x);
+$genHType asinh($genHType x);
+$genHType acosh($genHType x);
+$genHType atanh($genHType x);
+$genHType pow($genHType x, $genHType y);
+$genHType exp($genHType x);
+$genHType log($genHType x);
+$genHType exp2($genHType x);
+$genHType log2($genHType x);
+$genHType sqrt($genHType x);
+$genType inversesqrt($genType x);
+$genType abs($genType x);
+$genHType abs($genHType x);
+$genIType abs($genIType x);
+$genType sign($genType x);
+$genHType sign($genHType x);
+$genIType sign($genIType x);
+$genType floor($genType x);
+$genHType floor($genHType x);
+$genType trunc($genType x);
+$genHType trunc($genHType x);
+$genType round($genType x);
+$genHType round($genHType x);
+$genType roundEven($genType x);
+$genHType roundEven($genHType x);
+$genType ceil($genType x);
+$genHType ceil($genHType x);
+$genType fract($genType x);
+$genHType fract($genHType x);
+$genType mod($genType x, float y);
+$genType mod($genType x, $genType y);
+$genHType mod($genHType x, half y);
+$genHType mod($genHType x, $genType y);
+$genType modf($genType x, out $genType i);
+$genHType modf($genHType x, out $genHType i);
+$genType min($genType x, $genType y);
+$genType min($genType x, float y);
+$genHType min($genHType x, $genHType y);
+$genHType min($genHType x, half y);
+$genIType min($genIType x, $genIType y);
+$genIType min($genIType x, int y);
+$genType max($genType x, $genType y);
+$genType max($genType x, float y);
+$genHType max($genHType x, $genHType y);
+$genHType max($genHType x, half y);
+$genIType max($genIType x, $genIType y);
+$genIType max($genIType x, int y);
+$genType clamp($genType x, $genType minVal, $genType maxVal);
+$genType clamp($genType x, float minVal, float maxVal);
+$genHType clamp($genHType x, $genHType minVal, $genHType maxVal);
+$genHType clamp($genHType x, half minVal, half maxVal);
+$genIType clamp($genIType x, $genIType minVal, $genIType maxVal);
+$genIType clamp($genIType x, int minVal, int maxVal);
+$genType saturate($genType x);
+$genHType saturate($genHType x);
+$genType mix($genType x, $genType y, $genType a);
+$genType mix($genType x, $genType y, float a);
+$genHType mix($genHType x, $genHType y, $genHType a);
+$genHType mix($genHType x, $genHType y, half a);
+$genType mix($genType x, $genType y, $genBType a);
+$genIType mix($genIType x, $genIType y, $genBType a);
+$genBType mix($genBType x, $genBType y, $genBType a);
+$genType step($genType edge, $genType x);
+$genType step(float edge, $genType x);
+$genHType step($genHType edge, $genHType x);
+$genHType step(half edge, $genHType x);
+$genType smoothstep($genType edge0, $genType edge1, $genType x);
+$genType smoothstep(float edge0, float edge1, $genType x);
+$genHType smoothstep($genHType edge0, $genHType edge1, $genHType x);
+$genHType smoothstep(half edge0, half edge1, $genHType x);
+$genBType isnan($genType x);
+$genBType isnan($genDType x);
+$genBType isinf($genType x);
+$genBType isinf($genDType x);
+$genIType floatBitsToInt($genType value);
+$genType intBitsTofloat($genIType value);
+$genType uintBitsTofloat($genUType value);
+$genType fma($genType a, $genType b, $genType c);
+$genHType fma($genHType a, $genHType b, $genHType c);
+$genDType fma($genDType a, $genDType b, $genDType c);
+sk_has_side_effects $genType frexp($genType x, out $genIType exp);
+$genType ldexp($genType x, in $genIType exp);
+uint packUnorm2x16(float2 v);
+uint packSnorm2x16(float2 v);
+uint packUnorm4x8(float4 v);
+uint packSnorm4x8(float4 v);
+float2 unpackUnorm2x16(uint p);
+float2 unpackSnorm2x16(uint p);
+float4 unpackUnorm4x8(uint p);
+float4 unpackSnorm4x8(uint p);
+uint2 unpackDouble2x32(double v);
+uint packHalf2x16(float2 v);
+float2 unpackHalf2x16(uint v);
+float length($genType x);
+half length($genHType x);
+double length($genDType x);
+float distance($genType p0, $genType p1);
+half distance($genHType p0, $genHType p1);
+double distance($genDType p0, $genDType p1);
+float dot($genType x, $genType y);
+half dot($genHType x, $genHType y);
+double dot($genDType x, $genDType y);
+float3 cross(float3 x, float3 y);
+half3 cross(half3 x, half3 y);
+double3 cross(double3 x, double3 y);
+$genType normalize($genType x);
+$genHType normalize($genHType x);
+$genDType normalize($genDType x);
+float4 ftransform();
+$genType faceforward($genType N, $genType I, $genType Nref);
+$genHType faceforward($genHType N, $genHType I, $genHType Nref);
+$genDType faceforward($genDType N, $genDType I, $genDType Nref);
+$genType reflect($genType I, $genType N);
+$genHType reflect($genHType I, $genHType N);
+$genDType reflect($genDType I, $genDType N);
+$genType refract($genType I, $genType N, float eta);
+$genHType refract($genHType I, $genHType N, float eta);
+$genDType refract($genDType I, $genDType N, float eta);
+$mat matrixCompMult($mat x, $mat y);
+float2x2 outerProduct(float2 c, float2 r);
+float3x3 outerProduct(float3 c, float3 r);
+float4x3 outerProduct(float4 c, float4 r);
+float2x3 outerProduct(float3 c, float2 r);
+float3x2 outerProduct(float2 c, float3 r);
+float2x4 outerProduct(float4 c, float2 r);
+float4x2 outerProduct(float2 c, float4 r);
+float3x4 outerProduct(float4 c, float3 r);
+float4x3 outerProduct(float3 c, float4 r);
+half2x2 outerProduct(half2 c, half2 r);
+half3x3 outerProduct(half3 c, half3 r);
+half4x3 outerProduct(half4 c, half4 r);
+half2x3 outerProduct(half3 c, half2 r);
+half3x2 outerProduct(half2 c, half3 r);
+half2x4 outerProduct(half4 c, half2 r);
+half4x2 outerProduct(half2 c, half4 r);
+half3x4 outerProduct(half4 c, half3 r);
+half4x3 outerProduct(half3 c, half4 r);
+float2x2 transpose(float2x2 m);
+float3x3 transpose(float3x3 m);
+float4x4 transpose(float4x4 m);
+float2x3 transpose(float3x2 m);
+float3x2 transpose(float2x3 m);
+float2x4 transpose(float4x2 m);
+float4x2 transpose(float2x4 m);
+float3x4 transpose(float4x3 m);
+float4x3 transpose(float3x4 m);
+half2x2 transpose(half2x2 m);
+half3x3 transpose(half3x3 m);
+half4x4 transpose(half4x4 m);
+half2x3 transpose(half3x2 m);
+half3x2 transpose(half2x3 m);
+half2x4 transpose(half4x2 m);
+half4x2 transpose(half2x4 m);
+half3x4 transpose(half4x3 m);
+half4x3 transpose(half3x4 m);
+float determinant(float2x2 m);
+float determinant(float3x3 m);
+float determinant(float4x4 m);
+half determinant(half2x2 m);
+half determinant(half3x3 m);
+half determinant(half4x4 m);
+float2x2 inverse(float2x2 m);
+float3x3 inverse(float3x3 m);
+float4x4 inverse(float4x4 m);
+half2x2 inverse(half2x2 m);
+half3x3 inverse(half3x3 m);
+half4x4 inverse(half4x4 m);
+$bvec lessThan($vec x, $vec y);
+$bvec lessThan($hvec x, $hvec y);
+$bvec lessThan($dvec x, $dvec y);
+$bvec lessThan($ivec x, $ivec y);
+$bvec lessThan($svec x, $svec y);
+$bvec lessThan($usvec x, $usvec y);
+$bvec lessThan($uvec x, $uvec y);
+$bvec lessThanEqual($vec x, $vec y);
+$bvec lessThanEqual($hvec x, $hvec y);
+$bvec lessThanEqual($dvec x, $dvec y);
+$bvec lessThanEqual($ivec x, $ivec y);
+$bvec lessThanEqual($uvec x, $uvec y);
+$bvec lessThanEqual($svec x, $svec y);
+$bvec lessThanEqual($usvec x, $usvec y);
+$bvec greaterThan($vec x, $vec y);
+$bvec greaterThan($hvec x, $hvec y);
+$bvec greaterThan($dvec x, $dvec y);
+$bvec greaterThan($ivec x, $ivec y);
+$bvec greaterThan($uvec x, $uvec y);
+$bvec greaterThan($svec x, $svec y);
+$bvec greaterThan($usvec x, $usvec y);
+$bvec greaterThanEqual($vec x, $vec y);
+$bvec greaterThanEqual($hvec x, $hvec y);
+$bvec greaterThanEqual($dvec x, $dvec y);
+$bvec greaterThanEqual($ivec x, $ivec y);
+$bvec greaterThanEqual($uvec x, $uvec y);
+$bvec greaterThanEqual($svec x, $svec y);
+$bvec greaterThanEqual($usvec x, $usvec y);
+$bvec equal($vec x, $vec y);
+$bvec equal($hvec x, $hvec y);
+$bvec equal($dvec x, $dvec y);
+$bvec equal($ivec x, $ivec y);
+$bvec equal($uvec x, $uvec y);
+$bvec equal($svec x, $svec y);
+$bvec equal($usvec x, $usvec y);
+$bvec equal($bvec x, $bvec y);
+$bvec notEqual($vec x, $vec y);
+$bvec notEqual($hvec x, $hvec y);
+$bvec notEqual($dvec x, $dvec y);
+$bvec notEqual($ivec x, $ivec y);
+$bvec notEqual($uvec x, $uvec y);
+$bvec notEqual($svec x, $svec y);
+$bvec notEqual($usvec x, $usvec y);
+$bvec notEqual($bvec x, $bvec y);
+bool any($bvec x);
+bool all($bvec x);
+$bvec not($bvec x);
+
+$genIType bitCount($genIType value);
+$genIType bitCount($genUType value);
+$genIType findLSB($genIType value);
+$genIType findLSB($genUType value);
+$genIType findMSB($genIType value);
+$genIType findMSB($genUType value);
+
+sampler2D makeSampler2D(texture2D texture, sampler sampler);
+int2 textureSize($gsampler2DRect sampler);
+
+half4 sample($gsampler1D sampler, float P);
+half4 sample($gsampler1D sampler, float P, float bias);
+half4 sample($gsampler2D sampler, float2 P);
+// The above currently only expand to handle the float/fixed case. So we also declare this integer
+// version of sample().
+int4 sample(isampler2D sampler, float2 P);
+half4 sample(samplerExternalOES sampler, float2 P, float bias);
+half4 sample(samplerExternalOES sampler, float2 P);
+
+half4 sample($gsampler2DRect sampler, float2 P);
+half4 sample($gsampler2DRect sampler, float3 P);
+
+// Currently we do not support the generic types of loading subpassInput so we have some explicit
+// versions that we currently use
+float4 subpassLoad(subpassInput subpass);
+float4 subpassLoad(subpassInputMS subpass, int sample);
+
+half4 sample($gsampler1D sampler, float2 P);
+half4 sample($gsampler1D sampler, float2 P, float bias);
+half4 sample($gsampler2D sampler, float3 P);
+half4 sample($gsampler2D sampler, float3 P, float bias);
+
+float4 imageLoad(image2D image, int2 P);
+int4 imageLoad(iimage2D image, int2 P);
+$genType dFdx($genType p);
+$genType dFdy($genType p);
+$genHType dFdx($genHType p);
+$genHType dFdy($genHType p);
+$genType fwidth($genType p);
+$genHType fwidth($genHType p);
+float interpolateAtSample(float interpolant, int sample);
+float2 interpolateAtSample(float2 interpolant, int sample);
+float3 interpolateAtSample(float3 interpolant, int sample);
+float4 interpolateAtSample(float4 interpolant, int sample);
+float interpolateAtOffset(float interpolant, float2 offset);
+float2 interpolateAtOffset(float2 interpolant, float2 offset);
+float3 interpolateAtOffset(float3 interpolant, float2 offset);
+float4 interpolateAtOffset(float4 interpolant, float2 offset);
+
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_interp.inc b/gfx/skia/skia/src/sksl/sksl_interp.inc
new file mode 100644
index 0000000000..f43f05f8bf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_interp.inc
@@ -0,0 +1,53 @@
+STRINGIFY(
+
+$genType cos($genType y);
+$genHType cos($genHType y);
+float dot($genType x, $genType y);
+float2x2 inverse(float2x2 m);
+float3x3 inverse(float3x3 m);
+float4x4 inverse(float4x4 m);
+$genType sin($genType x);
+$genHType sin($genHType x);
+$genType sqrt($genType x);
+$genHType sqrt($genHType x);
+$genType tan($genType x);
+$genHType tan($genHType x);
+
+float degrees(float rad) { return rad * 57.2957795; }
+float2 degrees(float2 rad) { return rad * 57.2957795; }
+float3 degrees(float3 rad) { return rad * 57.2957795; }
+float4 degrees(float4 rad) { return rad * 57.2957795; }
+
+float radians(float deg) { return deg * 0.0174532925; }
+float2 radians(float2 deg) { return deg * 0.0174532925; }
+float3 radians(float3 deg) { return deg * 0.0174532925; }
+float4 radians(float4 deg) { return deg * 0.0174532925; }
+
+float length(float2 v) { return sqrt(dot(v, v)); }
+float length(float3 v) { return sqrt(dot(v, v)); }
+float length(float4 v) { return sqrt(dot(v, v)); }
+
+float distance(float2 a, float2 b) { return length(a - b); }
+float distance(float3 a, float3 b) { return length(a - b); }
+float distance(float4 a, float4 b) { return length(a - b); }
+
+float2 normalize(float2 v) { return v / length(v); }
+float3 normalize(float3 v) { return v / length(v); }
+float4 normalize(float4 v) { return v / length(v); }
+
+float mix(float x, float y, float t) { return x * (1 - t) + y * t; }
+float2 mix(float2 x, float2 y, float t) { return x * (1 - t) + y * t; }
+float3 mix(float3 x, float3 y, float t) { return x * (1 - t) + y * t; }
+float4 mix(float4 x, float4 y, float t) { return x * (1 - t) + y * t; }
+
+float2 mix(float2 x, float2 y, float2 t) { return x * (1 - t) + y * t; }
+float3 mix(float3 x, float3 y, float3 t) { return x * (1 - t) + y * t; }
+float4 mix(float4 x, float4 y, float4 t) { return x * (1 - t) + y * t; }
+
+float3 cross(float3 a, float3 b) {
+ return float3(a.y * b.z - a.z * b.y,
+ a.z * b.x - a.x * b.z,
+ a.x * b.y - a.y * b.x);
+}
+
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_pipeline.inc b/gfx/skia/skia/src/sksl/sksl_pipeline.inc
new file mode 100644
index 0000000000..56f189e317
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_pipeline.inc
@@ -0,0 +1,3 @@
+STRINGIFY(
+ half4 sample(fragmentProcessor fp);
+)
diff --git a/gfx/skia/skia/src/sksl/sksl_vert.inc b/gfx/skia/skia/src/sksl/sksl_vert.inc
new file mode 100644
index 0000000000..86db5d2c24
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_vert.inc
@@ -0,0 +1,14 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL vertex shaders
+
+out sk_PerVertex {
+ layout(builtin=0) float4 sk_Position;
+ layout(builtin=1) float sk_PointSize;
+ layout(builtin=3) float sk_ClipDistance[1];
+};
+
+layout(builtin=42) in int sk_VertexID;
+layout(builtin=43) in int sk_InstanceID;
+
+)
diff --git a/gfx/skia/skia/src/sksl/spirv.h b/gfx/skia/skia/src/sksl/spirv.h
new file mode 100644
index 0000000000..22821ed862
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/spirv.h
@@ -0,0 +1,870 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+** C, C++, C++11, JSON, Lua, Python
+**
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 4
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010000;
+static const unsigned int SpvRevision = 4;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL_C = 3,
+ SpvSourceLanguageOpenCL_CPP = 4,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL = 2,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeOriginLowerLeft = 8,
+ SpvExecutionModeEarlyFragmentTests = 9,
+ SpvExecutionModePointMode = 10,
+ SpvExecutionModeXfb = 11,
+ SpvExecutionModeDepthReplacing = 12,
+ SpvExecutionModeDepthGreater = 14,
+ SpvExecutionModeDepthLess = 15,
+ SpvExecutionModeDepthUnchanged = 16,
+ SpvExecutionModeLocalSize = 17,
+ SpvExecutionModeLocalSizeHint = 18,
+ SpvExecutionModeInputPoints = 19,
+ SpvExecutionModeInputLines = 20,
+ SpvExecutionModeInputLinesAdjacency = 21,
+ SpvExecutionModeTriangles = 22,
+ SpvExecutionModeInputTrianglesAdjacency = 23,
+ SpvExecutionModeQuads = 24,
+ SpvExecutionModeIsolines = 25,
+ SpvExecutionModeOutputVertices = 26,
+ SpvExecutionModeOutputPoints = 27,
+ SpvExecutionModeOutputLineStrip = 28,
+ SpvExecutionModeOutputTriangleStrip = 29,
+ SpvExecutionModeVecTypeHint = 30,
+ SpvExecutionModeContractionOff = 31,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroup = 4,
+ SpvStorageClassCrossWorkgroup = 5,
+ SpvStorageClassPrivate = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPushConstant = 9,
+ SpvStorageClassAtomicCounter = 10,
+ SpvStorageClassImage = 11,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+ SpvDimSubpassData = 6,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+ SpvImageFormatUnknown = 0,
+ SpvImageFormatRgba32f = 1,
+ SpvImageFormatRgba16f = 2,
+ SpvImageFormatR32f = 3,
+ SpvImageFormatRgba8 = 4,
+ SpvImageFormatRgba8Snorm = 5,
+ SpvImageFormatRg32f = 6,
+ SpvImageFormatRg16f = 7,
+ SpvImageFormatR11fG11fB10f = 8,
+ SpvImageFormatR16f = 9,
+ SpvImageFormatRgba16 = 10,
+ SpvImageFormatRgb10A2 = 11,
+ SpvImageFormatRg16 = 12,
+ SpvImageFormatRg8 = 13,
+ SpvImageFormatR16 = 14,
+ SpvImageFormatR8 = 15,
+ SpvImageFormatRgba16Snorm = 16,
+ SpvImageFormatRg16Snorm = 17,
+ SpvImageFormatRg8Snorm = 18,
+ SpvImageFormatR16Snorm = 19,
+ SpvImageFormatR8Snorm = 20,
+ SpvImageFormatRgba32i = 21,
+ SpvImageFormatRgba16i = 22,
+ SpvImageFormatRgba8i = 23,
+ SpvImageFormatR32i = 24,
+ SpvImageFormatRg32i = 25,
+ SpvImageFormatRg16i = 26,
+ SpvImageFormatRg8i = 27,
+ SpvImageFormatR16i = 28,
+ SpvImageFormatR8i = 29,
+ SpvImageFormatRgba32ui = 30,
+ SpvImageFormatRgba16ui = 31,
+ SpvImageFormatRgba8ui = 32,
+ SpvImageFormatR32ui = 33,
+ SpvImageFormatRgb10a2ui = 34,
+ SpvImageFormatRg32ui = 35,
+ SpvImageFormatRg16ui = 36,
+ SpvImageFormatRg8ui = 37,
+ SpvImageFormatR16ui = 38,
+ SpvImageFormatR8ui = 39,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+ SpvImageChannelOrderR = 0,
+ SpvImageChannelOrderA = 1,
+ SpvImageChannelOrderRG = 2,
+ SpvImageChannelOrderRA = 3,
+ SpvImageChannelOrderRGB = 4,
+ SpvImageChannelOrderRGBA = 5,
+ SpvImageChannelOrderBGRA = 6,
+ SpvImageChannelOrderARGB = 7,
+ SpvImageChannelOrderIntensity = 8,
+ SpvImageChannelOrderLuminance = 9,
+ SpvImageChannelOrderRx = 10,
+ SpvImageChannelOrderRGx = 11,
+ SpvImageChannelOrderRGBx = 12,
+ SpvImageChannelOrderDepth = 13,
+ SpvImageChannelOrderDepthStencil = 14,
+ SpvImageChannelOrdersRGB = 15,
+ SpvImageChannelOrdersRGBx = 16,
+ SpvImageChannelOrdersRGBA = 17,
+ SpvImageChannelOrdersBGRA = 18,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+ SpvImageChannelDataTypeSnormInt8 = 0,
+ SpvImageChannelDataTypeSnormInt16 = 1,
+ SpvImageChannelDataTypeUnormInt8 = 2,
+ SpvImageChannelDataTypeUnormInt16 = 3,
+ SpvImageChannelDataTypeUnormShort565 = 4,
+ SpvImageChannelDataTypeUnormShort555 = 5,
+ SpvImageChannelDataTypeUnormInt101010 = 6,
+ SpvImageChannelDataTypeSignedInt8 = 7,
+ SpvImageChannelDataTypeSignedInt16 = 8,
+ SpvImageChannelDataTypeSignedInt32 = 9,
+ SpvImageChannelDataTypeUnsignedInt8 = 10,
+ SpvImageChannelDataTypeUnsignedInt16 = 11,
+ SpvImageChannelDataTypeUnsignedInt32 = 12,
+ SpvImageChannelDataTypeHalfFloat = 13,
+ SpvImageChannelDataTypeFloat = 14,
+ SpvImageChannelDataTypeUnormInt24 = 15,
+ SpvImageChannelDataTypeUnormInt101010_2 = 16,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+ SpvImageOperandsBiasShift = 0,
+ SpvImageOperandsLodShift = 1,
+ SpvImageOperandsGradShift = 2,
+ SpvImageOperandsConstOffsetShift = 3,
+ SpvImageOperandsOffsetShift = 4,
+ SpvImageOperandsConstOffsetsShift = 5,
+ SpvImageOperandsSampleShift = 6,
+ SpvImageOperandsMinLodShift = 7,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+ SpvImageOperandsMaskNone = 0,
+ SpvImageOperandsBiasMask = 0x00000001,
+ SpvImageOperandsLodMask = 0x00000002,
+ SpvImageOperandsGradMask = 0x00000004,
+ SpvImageOperandsConstOffsetMask = 0x00000008,
+ SpvImageOperandsOffsetMask = 0x00000010,
+ SpvImageOperandsConstOffsetsMask = 0x00000020,
+ SpvImageOperandsSampleMask = 0x00000040,
+ SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeNoWrite = 6,
+ SpvFunctionParameterAttributeNoReadWrite = 7,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationRelaxedPrecision = 0,
+ SpvDecorationSpecId = 1,
+ SpvDecorationBlock = 2,
+ SpvDecorationBufferBlock = 3,
+ SpvDecorationRowMajor = 4,
+ SpvDecorationColMajor = 5,
+ SpvDecorationArrayStride = 6,
+ SpvDecorationMatrixStride = 7,
+ SpvDecorationGLSLShared = 8,
+ SpvDecorationGLSLPacked = 9,
+ SpvDecorationCPacked = 10,
+ SpvDecorationBuiltIn = 11,
+ SpvDecorationNoPerspective = 13,
+ SpvDecorationFlat = 14,
+ SpvDecorationPatch = 15,
+ SpvDecorationCentroid = 16,
+ SpvDecorationSample = 17,
+ SpvDecorationInvariant = 18,
+ SpvDecorationRestrict = 19,
+ SpvDecorationAliased = 20,
+ SpvDecorationVolatile = 21,
+ SpvDecorationConstant = 22,
+ SpvDecorationCoherent = 23,
+ SpvDecorationNonWritable = 24,
+ SpvDecorationNonReadable = 25,
+ SpvDecorationUniform = 26,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationXfbBuffer = 36,
+ SpvDecorationXfbStride = 37,
+ SpvDecorationFuncParamAttr = 38,
+ SpvDecorationFPRoundingMode = 39,
+ SpvDecorationFPFastMathMode = 40,
+ SpvDecorationLinkageAttributes = 41,
+ SpvDecorationNoContraction = 42,
+ SpvDecorationInputAttachmentIndex = 43,
+ SpvDecorationAlignment = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+ SpvBuiltInVertexIndex = 42,
+ SpvBuiltInInstanceIndex = 43,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsAcquireShift = 1,
+ SpvMemorySemanticsReleaseShift = 2,
+ SpvMemorySemanticsAcquireReleaseShift = 3,
+ SpvMemorySemanticsSequentiallyConsistentShift = 4,
+ SpvMemorySemanticsUniformMemoryShift = 6,
+ SpvMemorySemanticsSubgroupMemoryShift = 7,
+ SpvMemorySemanticsWorkgroupMemoryShift = 8,
+ SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+ SpvMemorySemanticsImageMemoryShift = 11,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsAcquireMask = 0x00000002,
+ SpvMemorySemanticsReleaseMask = 0x00000004,
+ SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+ SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+ SpvMemoryAccessNontemporalShift = 2,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+ SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+ SpvScopeCrossDevice = 0,
+ SpvScopeDevice = 1,
+ SpvScopeWorkgroup = 2,
+ SpvScopeSubgroup = 3,
+ SpvScopeInvocation = 4,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+ SpvCapabilityMatrix = 0,
+ SpvCapabilityShader = 1,
+ SpvCapabilityGeometry = 2,
+ SpvCapabilityTessellation = 3,
+ SpvCapabilityAddresses = 4,
+ SpvCapabilityLinkage = 5,
+ SpvCapabilityKernel = 6,
+ SpvCapabilityVector16 = 7,
+ SpvCapabilityFloat16Buffer = 8,
+ SpvCapabilityFloat16 = 9,
+ SpvCapabilityFloat64 = 10,
+ SpvCapabilityInt64 = 11,
+ SpvCapabilityInt64Atomics = 12,
+ SpvCapabilityImageBasic = 13,
+ SpvCapabilityImageReadWrite = 14,
+ SpvCapabilityImageMipmap = 15,
+ SpvCapabilityPipes = 17,
+ SpvCapabilityGroups = 18,
+ SpvCapabilityDeviceEnqueue = 19,
+ SpvCapabilityLiteralSampler = 20,
+ SpvCapabilityAtomicStorage = 21,
+ SpvCapabilityInt16 = 22,
+ SpvCapabilityTessellationPointSize = 23,
+ SpvCapabilityGeometryPointSize = 24,
+ SpvCapabilityImageGatherExtended = 25,
+ SpvCapabilityStorageImageMultisample = 27,
+ SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+ SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+ SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+ SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+ SpvCapabilityClipDistance = 32,
+ SpvCapabilityCullDistance = 33,
+ SpvCapabilityImageCubeArray = 34,
+ SpvCapabilitySampleRateShading = 35,
+ SpvCapabilityImageRect = 36,
+ SpvCapabilitySampledRect = 37,
+ SpvCapabilityGenericPointer = 38,
+ SpvCapabilityInt8 = 39,
+ SpvCapabilityInputAttachment = 40,
+ SpvCapabilitySparseResidency = 41,
+ SpvCapabilityMinLod = 42,
+ SpvCapabilitySampled1D = 43,
+ SpvCapabilityImage1D = 44,
+ SpvCapabilitySampledCubeArray = 45,
+ SpvCapabilitySampledBuffer = 46,
+ SpvCapabilityImageBuffer = 47,
+ SpvCapabilityImageMSArray = 48,
+ SpvCapabilityStorageImageExtendedFormats = 49,
+ SpvCapabilityImageQuery = 50,
+ SpvCapabilityDerivativeControl = 51,
+ SpvCapabilityInterpolationFunction = 52,
+ SpvCapabilityTransformFeedback = 53,
+ SpvCapabilityGeometryStreams = 54,
+ SpvCapabilityStorageImageReadWithoutFormat = 55,
+ SpvCapabilityStorageImageWriteWithoutFormat = 56,
+ SpvCapabilityMultiViewport = 57,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpUndef = 1,
+ SpvOpSourceContinued = 2,
+ SpvOpSource = 3,
+ SpvOpSourceExtension = 4,
+ SpvOpName = 5,
+ SpvOpMemberName = 6,
+ SpvOpString = 7,
+ SpvOpLine = 8,
+ SpvOpExtension = 10,
+ SpvOpExtInstImport = 11,
+ SpvOpExtInst = 12,
+ SpvOpMemoryModel = 14,
+ SpvOpEntryPoint = 15,
+ SpvOpExecutionMode = 16,
+ SpvOpCapability = 17,
+ SpvOpTypeVoid = 19,
+ SpvOpTypeBool = 20,
+ SpvOpTypeInt = 21,
+ SpvOpTypeFloat = 22,
+ SpvOpTypeVector = 23,
+ SpvOpTypeMatrix = 24,
+ SpvOpTypeImage = 25,
+ SpvOpTypeSampler = 26,
+ SpvOpTypeSampledImage = 27,
+ SpvOpTypeArray = 28,
+ SpvOpTypeRuntimeArray = 29,
+ SpvOpTypeStruct = 30,
+ SpvOpTypeOpaque = 31,
+ SpvOpTypePointer = 32,
+ SpvOpTypeFunction = 33,
+ SpvOpTypeEvent = 34,
+ SpvOpTypeDeviceEvent = 35,
+ SpvOpTypeReserveId = 36,
+ SpvOpTypeQueue = 37,
+ SpvOpTypePipe = 38,
+ SpvOpTypeForwardPointer = 39,
+ SpvOpConstantTrue = 41,
+ SpvOpConstantFalse = 42,
+ SpvOpConstant = 43,
+ SpvOpConstantComposite = 44,
+ SpvOpConstantSampler = 45,
+ SpvOpConstantNull = 46,
+ SpvOpSpecConstantTrue = 48,
+ SpvOpSpecConstantFalse = 49,
+ SpvOpSpecConstant = 50,
+ SpvOpSpecConstantComposite = 51,
+ SpvOpSpecConstantOp = 52,
+ SpvOpFunction = 54,
+ SpvOpFunctionParameter = 55,
+ SpvOpFunctionEnd = 56,
+ SpvOpFunctionCall = 57,
+ SpvOpVariable = 59,
+ SpvOpImageTexelPointer = 60,
+ SpvOpLoad = 61,
+ SpvOpStore = 62,
+ SpvOpCopyMemory = 63,
+ SpvOpCopyMemorySized = 64,
+ SpvOpAccessChain = 65,
+ SpvOpInBoundsAccessChain = 66,
+ SpvOpPtrAccessChain = 67,
+ SpvOpArrayLength = 68,
+ SpvOpGenericPtrMemSemantics = 69,
+ SpvOpInBoundsPtrAccessChain = 70,
+ SpvOpDecorate = 71,
+ SpvOpMemberDecorate = 72,
+ SpvOpDecorationGroup = 73,
+ SpvOpGroupDecorate = 74,
+ SpvOpGroupMemberDecorate = 75,
+ SpvOpVectorExtractDynamic = 77,
+ SpvOpVectorInsertDynamic = 78,
+ SpvOpVectorShuffle = 79,
+ SpvOpCompositeConstruct = 80,
+ SpvOpCompositeExtract = 81,
+ SpvOpCompositeInsert = 82,
+ SpvOpCopyObject = 83,
+ SpvOpTranspose = 84,
+ SpvOpSampledImage = 86,
+ SpvOpImageSampleImplicitLod = 87,
+ SpvOpImageSampleExplicitLod = 88,
+ SpvOpImageSampleDrefImplicitLod = 89,
+ SpvOpImageSampleDrefExplicitLod = 90,
+ SpvOpImageSampleProjImplicitLod = 91,
+ SpvOpImageSampleProjExplicitLod = 92,
+ SpvOpImageSampleProjDrefImplicitLod = 93,
+ SpvOpImageSampleProjDrefExplicitLod = 94,
+ SpvOpImageFetch = 95,
+ SpvOpImageGather = 96,
+ SpvOpImageDrefGather = 97,
+ SpvOpImageRead = 98,
+ SpvOpImageWrite = 99,
+ SpvOpImage = 100,
+ SpvOpImageQueryFormat = 101,
+ SpvOpImageQueryOrder = 102,
+ SpvOpImageQuerySizeLod = 103,
+ SpvOpImageQuerySize = 104,
+ SpvOpImageQueryLod = 105,
+ SpvOpImageQueryLevels = 106,
+ SpvOpImageQuerySamples = 107,
+ SpvOpConvertFToU = 109,
+ SpvOpConvertFToS = 110,
+ SpvOpConvertSToF = 111,
+ SpvOpConvertUToF = 112,
+ SpvOpUConvert = 113,
+ SpvOpSConvert = 114,
+ SpvOpFConvert = 115,
+ SpvOpQuantizeToF16 = 116,
+ SpvOpConvertPtrToU = 117,
+ SpvOpSatConvertSToU = 118,
+ SpvOpSatConvertUToS = 119,
+ SpvOpConvertUToPtr = 120,
+ SpvOpPtrCastToGeneric = 121,
+ SpvOpGenericCastToPtr = 122,
+ SpvOpGenericCastToPtrExplicit = 123,
+ SpvOpBitcast = 124,
+ SpvOpSNegate = 126,
+ SpvOpFNegate = 127,
+ SpvOpIAdd = 128,
+ SpvOpFAdd = 129,
+ SpvOpISub = 130,
+ SpvOpFSub = 131,
+ SpvOpIMul = 132,
+ SpvOpFMul = 133,
+ SpvOpUDiv = 134,
+ SpvOpSDiv = 135,
+ SpvOpFDiv = 136,
+ SpvOpUMod = 137,
+ SpvOpSRem = 138,
+ SpvOpSMod = 139,
+ SpvOpFRem = 140,
+ SpvOpFMod = 141,
+ SpvOpVectorTimesScalar = 142,
+ SpvOpMatrixTimesScalar = 143,
+ SpvOpVectorTimesMatrix = 144,
+ SpvOpMatrixTimesVector = 145,
+ SpvOpMatrixTimesMatrix = 146,
+ SpvOpOuterProduct = 147,
+ SpvOpDot = 148,
+ SpvOpIAddCarry = 149,
+ SpvOpISubBorrow = 150,
+ SpvOpUMulExtended = 151,
+ SpvOpSMulExtended = 152,
+ SpvOpAny = 154,
+ SpvOpAll = 155,
+ SpvOpIsNan = 156,
+ SpvOpIsInf = 157,
+ SpvOpIsFinite = 158,
+ SpvOpIsNormal = 159,
+ SpvOpSignBitSet = 160,
+ SpvOpLessOrGreater = 161,
+ SpvOpOrdered = 162,
+ SpvOpUnordered = 163,
+ SpvOpLogicalEqual = 164,
+ SpvOpLogicalNotEqual = 165,
+ SpvOpLogicalOr = 166,
+ SpvOpLogicalAnd = 167,
+ SpvOpLogicalNot = 168,
+ SpvOpSelect = 169,
+ SpvOpIEqual = 170,
+ SpvOpINotEqual = 171,
+ SpvOpUGreaterThan = 172,
+ SpvOpSGreaterThan = 173,
+ SpvOpUGreaterThanEqual = 174,
+ SpvOpSGreaterThanEqual = 175,
+ SpvOpULessThan = 176,
+ SpvOpSLessThan = 177,
+ SpvOpULessThanEqual = 178,
+ SpvOpSLessThanEqual = 179,
+ SpvOpFOrdEqual = 180,
+ SpvOpFUnordEqual = 181,
+ SpvOpFOrdNotEqual = 182,
+ SpvOpFUnordNotEqual = 183,
+ SpvOpFOrdLessThan = 184,
+ SpvOpFUnordLessThan = 185,
+ SpvOpFOrdGreaterThan = 186,
+ SpvOpFUnordGreaterThan = 187,
+ SpvOpFOrdLessThanEqual = 188,
+ SpvOpFUnordLessThanEqual = 189,
+ SpvOpFOrdGreaterThanEqual = 190,
+ SpvOpFUnordGreaterThanEqual = 191,
+ SpvOpShiftRightLogical = 194,
+ SpvOpShiftRightArithmetic = 195,
+ SpvOpShiftLeftLogical = 196,
+ SpvOpBitwiseOr = 197,
+ SpvOpBitwiseXor = 198,
+ SpvOpBitwiseAnd = 199,
+ SpvOpNot = 200,
+ SpvOpBitFieldInsert = 201,
+ SpvOpBitFieldSExtract = 202,
+ SpvOpBitFieldUExtract = 203,
+ SpvOpBitReverse = 204,
+ SpvOpBitCount = 205,
+ SpvOpDPdx = 207,
+ SpvOpDPdy = 208,
+ SpvOpFwidth = 209,
+ SpvOpDPdxFine = 210,
+ SpvOpDPdyFine = 211,
+ SpvOpFwidthFine = 212,
+ SpvOpDPdxCoarse = 213,
+ SpvOpDPdyCoarse = 214,
+ SpvOpFwidthCoarse = 215,
+ SpvOpEmitVertex = 218,
+ SpvOpEndPrimitive = 219,
+ SpvOpEmitStreamVertex = 220,
+ SpvOpEndStreamPrimitive = 221,
+ SpvOpControlBarrier = 224,
+ SpvOpMemoryBarrier = 225,
+ SpvOpAtomicLoad = 227,
+ SpvOpAtomicStore = 228,
+ SpvOpAtomicExchange = 229,
+ SpvOpAtomicCompareExchange = 230,
+ SpvOpAtomicCompareExchangeWeak = 231,
+ SpvOpAtomicIIncrement = 232,
+ SpvOpAtomicIDecrement = 233,
+ SpvOpAtomicIAdd = 234,
+ SpvOpAtomicISub = 235,
+ SpvOpAtomicSMin = 236,
+ SpvOpAtomicUMin = 237,
+ SpvOpAtomicSMax = 238,
+ SpvOpAtomicUMax = 239,
+ SpvOpAtomicAnd = 240,
+ SpvOpAtomicOr = 241,
+ SpvOpAtomicXor = 242,
+ SpvOpPhi = 245,
+ SpvOpLoopMerge = 246,
+ SpvOpSelectionMerge = 247,
+ SpvOpLabel = 248,
+ SpvOpBranch = 249,
+ SpvOpBranchConditional = 250,
+ SpvOpSwitch = 251,
+ SpvOpKill = 252,
+ SpvOpReturn = 253,
+ SpvOpReturnValue = 254,
+ SpvOpUnreachable = 255,
+ SpvOpLifetimeStart = 256,
+ SpvOpLifetimeStop = 257,
+ SpvOpGroupAsyncCopy = 259,
+ SpvOpGroupWaitEvents = 260,
+ SpvOpGroupAll = 261,
+ SpvOpGroupAny = 262,
+ SpvOpGroupBroadcast = 263,
+ SpvOpGroupIAdd = 264,
+ SpvOpGroupFAdd = 265,
+ SpvOpGroupFMin = 266,
+ SpvOpGroupUMin = 267,
+ SpvOpGroupSMin = 268,
+ SpvOpGroupFMax = 269,
+ SpvOpGroupUMax = 270,
+ SpvOpGroupSMax = 271,
+ SpvOpReadPipe = 274,
+ SpvOpWritePipe = 275,
+ SpvOpReservedReadPipe = 276,
+ SpvOpReservedWritePipe = 277,
+ SpvOpReserveReadPipePackets = 278,
+ SpvOpReserveWritePipePackets = 279,
+ SpvOpCommitReadPipe = 280,
+ SpvOpCommitWritePipe = 281,
+ SpvOpIsValidReserveId = 282,
+ SpvOpGetNumPipePackets = 283,
+ SpvOpGetMaxPipePackets = 284,
+ SpvOpGroupReserveReadPipePackets = 285,
+ SpvOpGroupReserveWritePipePackets = 286,
+ SpvOpGroupCommitReadPipe = 287,
+ SpvOpGroupCommitWritePipe = 288,
+ SpvOpEnqueueMarker = 291,
+ SpvOpEnqueueKernel = 292,
+ SpvOpGetKernelNDrangeSubGroupCount = 293,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+ SpvOpGetKernelWorkGroupSize = 295,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ SpvOpRetainEvent = 297,
+ SpvOpReleaseEvent = 298,
+ SpvOpCreateUserEvent = 299,
+ SpvOpIsValidEvent = 300,
+ SpvOpSetUserEventStatus = 301,
+ SpvOpCaptureEventProfilingInfo = 302,
+ SpvOpGetDefaultQueue = 303,
+ SpvOpBuildNDRange = 304,
+ SpvOpImageSparseSampleImplicitLod = 305,
+ SpvOpImageSparseSampleExplicitLod = 306,
+ SpvOpImageSparseSampleDrefImplicitLod = 307,
+ SpvOpImageSparseSampleDrefExplicitLod = 308,
+ SpvOpImageSparseSampleProjImplicitLod = 309,
+ SpvOpImageSparseSampleProjExplicitLod = 310,
+ SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+ SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+ SpvOpImageSparseFetch = 313,
+ SpvOpImageSparseGather = 314,
+ SpvOpImageSparseDrefGather = 315,
+ SpvOpImageSparseTexelsResident = 316,
+ SpvOpNoLine = 317,
+ SpvOpAtomicFlagTestAndSet = 318,
+ SpvOpAtomicFlagClear = 319,
+ SpvOpImageSparseRead = 320,
+} SpvOp;
+
+#endif // #ifndef spirv_H
diff --git a/gfx/skia/skia/src/svg/SkSVGCanvas.cpp b/gfx/skia/skia/src/svg/SkSVGCanvas.cpp
new file mode 100644
index 0000000000..30c9198c5b
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGCanvas.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/svg/SkSVGCanvas.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/svg/SkSVGDevice.h"
+#include "src/xml/SkXMLWriter.h"
+
+std::unique_ptr<SkCanvas> SkSVGCanvas::Make(const SkRect& bounds, SkWStream* writer,
+ uint32_t flags) {
+ // TODO: pass full bounds to the device
+ const auto size = bounds.roundOut().size();
+ const auto xml_flags = (flags & kNoPrettyXML_Flag) ? SkToU32(SkXMLStreamWriter::kNoPretty_Flag)
+ : 0;
+
+ auto svgDevice = SkSVGDevice::Make(size,
+ skstd::make_unique<SkXMLStreamWriter>(writer, xml_flags),
+ flags);
+
+ return svgDevice ? skstd::make_unique<SkCanvas>(std::move(svgDevice))
+ : nullptr;
+}
diff --git a/gfx/skia/skia/src/svg/SkSVGDevice.cpp b/gfx/skia/skia/src/svg/SkSVGDevice.cpp
new file mode 100644
index 0000000000..85e0b1bbd3
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGDevice.cpp
@@ -0,0 +1,1048 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/svg/SkSVGDevice.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/SkTHash.h"
+#include "include/private/SkTo.h"
+#include "include/svg/SkSVGCanvas.h"
+#include "include/utils/SkBase64.h"
+#include "include/utils/SkParsePath.h"
+#include "src/codec/SkJpegCodec.h"
+#include "src/codec/SkPngCodec.h"
+#include "src/core/SkAnnotationKeys.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkUtils.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/xml/SkXMLWriter.h"
+
+namespace {
+
+static SkString svg_color(SkColor color) {
+ // https://www.w3.org/TR/css-color-3/#html4
+ auto named_color = [](SkColor c) -> const char* {
+ switch (c & 0xffffff) {
+ case 0x000000: return "black";
+ case 0x000080: return "navy";
+ case 0x0000ff: return "blue";
+ case 0x008000: return "green";
+ case 0x008080: return "teal";
+ case 0x00ff00: return "lime";
+ case 0x00ffff: return "aqua";
+ case 0x800000: return "maroon";
+ case 0x800080: return "purple";
+ case 0x808000: return "olive";
+ case 0x808080: return "gray";
+ case 0xc0c0c0: return "silver";
+ case 0xff0000: return "red";
+ case 0xff00ff: return "fuchsia";
+ case 0xffff00: return "yellow";
+ case 0xffffff: return "white";
+ default: break;
+ }
+
+ return nullptr;
+ };
+
+ if (const auto* nc = named_color(color)) {
+ return SkString(nc);
+ }
+
+ return SkStringPrintf("rgb(%u,%u,%u)",
+ SkColorGetR(color),
+ SkColorGetG(color),
+ SkColorGetB(color));
+}
+
+static SkScalar svg_opacity(SkColor color) {
+ return SkIntToScalar(SkColorGetA(color)) / SK_AlphaOPAQUE;
+}
+
+// Keep in sync with SkPaint::Cap
+static const char* cap_map[] = {
+ nullptr, // kButt_Cap (default)
+ "round", // kRound_Cap
+ "square" // kSquare_Cap
+};
+static_assert(SK_ARRAY_COUNT(cap_map) == SkPaint::kCapCount, "missing_cap_map_entry");
+
+static const char* svg_cap(SkPaint::Cap cap) {
+ SkASSERT(cap < SK_ARRAY_COUNT(cap_map));
+ return cap_map[cap];
+}
+
+// Keep in sync with SkPaint::Join
+static const char* join_map[] = {
+ nullptr, // kMiter_Join (default)
+ "round", // kRound_Join
+ "bevel" // kBevel_Join
+};
+static_assert(SK_ARRAY_COUNT(join_map) == SkPaint::kJoinCount, "missing_join_map_entry");
+
+static const char* svg_join(SkPaint::Join join) {
+ SkASSERT(join < SK_ARRAY_COUNT(join_map));
+ return join_map[join];
+}
+
+static SkString svg_transform(const SkMatrix& t) {
+ SkASSERT(!t.isIdentity());
+
+ SkString tstr;
+ switch (t.getType()) {
+ case SkMatrix::kPerspective_Mask:
+ // TODO: handle perspective matrices?
+ break;
+ case SkMatrix::kTranslate_Mask:
+ tstr.printf("translate(%g %g)", t.getTranslateX(), t.getTranslateY());
+ break;
+ case SkMatrix::kScale_Mask:
+ tstr.printf("scale(%g %g)", t.getScaleX(), t.getScaleY());
+ break;
+ default:
+ // http://www.w3.org/TR/SVG/coords.html#TransformMatrixDefined
+ // | a c e |
+ // | b d f |
+ // | 0 0 1 |
+ tstr.printf("matrix(%g %g %g %g %g %g)",
+ t.getScaleX(), t.getSkewY(),
+ t.getSkewX(), t.getScaleY(),
+ t.getTranslateX(), t.getTranslateY());
+ break;
+ }
+
+ return tstr;
+}
+
+struct Resources {
+ Resources(const SkPaint& paint)
+ : fPaintServer(svg_color(paint.getColor())) {}
+
+ SkString fPaintServer;
+ SkString fColorFilter;
+};
+
+// Determine if the paint requires us to reset the viewport.
+// Currently, we do this whenever the paint shader calls
+// for a repeating image.
+bool RequiresViewportReset(const SkPaint& paint) {
+ SkShader* shader = paint.getShader();
+ if (!shader)
+ return false;
+
+ SkTileMode xy[2];
+ SkImage* image = shader->isAImage(nullptr, xy);
+
+ if (!image)
+ return false;
+
+ for (int i = 0; i < 2; i++) {
+ if (xy[i] == SkTileMode::kRepeat)
+ return true;
+ }
+ return false;
+}
+
+SkPath GetPath(const SkGlyphRun& glyphRun, const SkPoint& offset) {
+ SkPath path;
+
+ struct Rec {
+ SkPath* fPath;
+ const SkPoint fOffset;
+ const SkPoint* fPos;
+ } rec = { &path, offset, glyphRun.positions().data() };
+
+ glyphRun.font().getPaths(glyphRun.glyphsIDs().data(), SkToInt(glyphRun.glyphsIDs().size()),
+ [](const SkPath* path, const SkMatrix& mx, void* ctx) {
+ Rec* rec = reinterpret_cast<Rec*>(ctx);
+ if (path) {
+ SkMatrix total = mx;
+ total.postTranslate(rec->fPos->fX + rec->fOffset.fX,
+ rec->fPos->fY + rec->fOffset.fY);
+ rec->fPath->addPath(*path, total);
+ } else {
+ // TODO: this is going to drop color emojis.
+ }
+ rec->fPos += 1; // move to the next glyph's position
+ }, &rec);
+
+ return path;
+}
+
+} // namespace
+
+// For now all this does is serve unique serial IDs, but it will eventually evolve to track
+// and deduplicate resources.
+class SkSVGDevice::ResourceBucket : ::SkNoncopyable {
+public:
+ ResourceBucket()
+ : fGradientCount(0)
+ , fPathCount(0)
+ , fImageCount(0)
+ , fPatternCount(0)
+ , fColorFilterCount(0) {}
+
+ SkString addLinearGradient() {
+ return SkStringPrintf("gradient_%d", fGradientCount++);
+ }
+
+ SkString addPath() {
+ return SkStringPrintf("path_%d", fPathCount++);
+ }
+
+ SkString addImage() {
+ return SkStringPrintf("img_%d", fImageCount++);
+ }
+
+ SkString addColorFilter() { return SkStringPrintf("cfilter_%d", fColorFilterCount++); }
+
+ SkString addPattern() {
+ return SkStringPrintf("pattern_%d", fPatternCount++);
+ }
+
+private:
+ uint32_t fGradientCount;
+ uint32_t fPathCount;
+ uint32_t fImageCount;
+ uint32_t fPatternCount;
+ uint32_t fColorFilterCount;
+};
+
+struct SkSVGDevice::MxCp {
+ const SkMatrix* fMatrix;
+ const SkClipStack* fClipStack;
+
+ MxCp(const SkMatrix* mx, const SkClipStack* cs) : fMatrix(mx), fClipStack(cs) {}
+ MxCp(SkSVGDevice* device) : fMatrix(&device->ctm()), fClipStack(&device->cs()) {}
+};
+
+class SkSVGDevice::AutoElement : ::SkNoncopyable {
+public:
+ AutoElement(const char name[], SkXMLWriter* writer)
+ : fWriter(writer)
+ , fResourceBucket(nullptr) {
+ fWriter->startElement(name);
+ }
+
+ AutoElement(const char name[], const std::unique_ptr<SkXMLWriter>& writer)
+ : AutoElement(name, writer.get()) {}
+
+ AutoElement(const char name[], SkSVGDevice* svgdev,
+ ResourceBucket* bucket, const MxCp& mc, const SkPaint& paint)
+ : fWriter(svgdev->fWriter.get())
+ , fResourceBucket(bucket) {
+
+ svgdev->syncClipStack(*mc.fClipStack);
+ Resources res = this->addResources(mc, paint);
+
+ fWriter->startElement(name);
+
+ this->addPaint(paint, res);
+
+ if (!mc.fMatrix->isIdentity()) {
+ this->addAttribute("transform", svg_transform(*mc.fMatrix));
+ }
+ }
+
+ ~AutoElement() {
+ fWriter->endElement();
+ }
+
+ void addAttribute(const char name[], const char val[]) {
+ fWriter->addAttribute(name, val);
+ }
+
+ void addAttribute(const char name[], const SkString& val) {
+ fWriter->addAttribute(name, val.c_str());
+ }
+
+ void addAttribute(const char name[], int32_t val) {
+ fWriter->addS32Attribute(name, val);
+ }
+
+ void addAttribute(const char name[], SkScalar val) {
+ fWriter->addScalarAttribute(name, val);
+ }
+
+ void addText(const SkString& text) {
+ fWriter->addText(text.c_str(), text.size());
+ }
+
+ void addRectAttributes(const SkRect&);
+ void addPathAttributes(const SkPath&);
+ void addTextAttributes(const SkFont&);
+
+private:
+ Resources addResources(const MxCp&, const SkPaint& paint);
+ void addShaderResources(const SkPaint& paint, Resources* resources);
+ void addGradientShaderResources(const SkShader* shader, const SkPaint& paint,
+ Resources* resources);
+ void addColorFilterResources(const SkColorFilter& cf, Resources* resources);
+ void addImageShaderResources(const SkShader* shader, const SkPaint& paint,
+ Resources* resources);
+
+ void addPatternDef(const SkBitmap& bm);
+
+ void addPaint(const SkPaint& paint, const Resources& resources);
+
+
+ SkString addLinearGradientDef(const SkShader::GradientInfo& info, const SkShader* shader);
+
+ SkXMLWriter* fWriter;
+ ResourceBucket* fResourceBucket;
+};
+
+void SkSVGDevice::AutoElement::addPaint(const SkPaint& paint, const Resources& resources) {
+ SkPaint::Style style = paint.getStyle();
+ if (style == SkPaint::kFill_Style || style == SkPaint::kStrokeAndFill_Style) {
+ static constexpr char kDefaultFill[] = "black";
+ if (!resources.fPaintServer.equals(kDefaultFill)) {
+ this->addAttribute("fill", resources.fPaintServer);
+
+ if (SK_AlphaOPAQUE != SkColorGetA(paint.getColor())) {
+ this->addAttribute("fill-opacity", svg_opacity(paint.getColor()));
+ }
+ }
+ } else {
+ SkASSERT(style == SkPaint::kStroke_Style);
+ this->addAttribute("fill", "none");
+ }
+
+ if (!resources.fColorFilter.isEmpty()) {
+ this->addAttribute("filter", resources.fColorFilter.c_str());
+ }
+
+ if (style == SkPaint::kStroke_Style || style == SkPaint::kStrokeAndFill_Style) {
+ this->addAttribute("stroke", resources.fPaintServer);
+
+ SkScalar strokeWidth = paint.getStrokeWidth();
+ if (strokeWidth == 0) {
+ // Hairline stroke
+ strokeWidth = 1;
+ this->addAttribute("vector-effect", "non-scaling-stroke");
+ }
+ this->addAttribute("stroke-width", strokeWidth);
+
+ if (const char* cap = svg_cap(paint.getStrokeCap())) {
+ this->addAttribute("stroke-linecap", cap);
+ }
+
+ if (const char* join = svg_join(paint.getStrokeJoin())) {
+ this->addAttribute("stroke-linejoin", join);
+ }
+
+ if (paint.getStrokeJoin() == SkPaint::kMiter_Join) {
+ this->addAttribute("stroke-miterlimit", paint.getStrokeMiter());
+ }
+
+ if (SK_AlphaOPAQUE != SkColorGetA(paint.getColor())) {
+ this->addAttribute("stroke-opacity", svg_opacity(paint.getColor()));
+ }
+ } else {
+ SkASSERT(style == SkPaint::kFill_Style);
+ // SVG default stroke value is "none".
+ }
+}
+
+Resources SkSVGDevice::AutoElement::addResources(const MxCp& mc, const SkPaint& paint) {
+ Resources resources(paint);
+
+ if (paint.getShader()) {
+ AutoElement defs("defs", fWriter);
+
+ this->addShaderResources(paint, &resources);
+ }
+
+ if (const SkColorFilter* cf = paint.getColorFilter()) {
+ // TODO: Implement skia color filters for blend modes other than SrcIn
+ SkBlendMode mode;
+ if (cf->asAColorMode(nullptr, &mode) && mode == SkBlendMode::kSrcIn) {
+ this->addColorFilterResources(*cf, &resources);
+ }
+ }
+
+ return resources;
+}
+
+void SkSVGDevice::AutoElement::addGradientShaderResources(const SkShader* shader,
+ const SkPaint& paint,
+ Resources* resources) {
+ SkShader::GradientInfo grInfo;
+ grInfo.fColorCount = 0;
+ if (SkShader::kLinear_GradientType != shader->asAGradient(&grInfo)) {
+ // TODO: non-linear gradient support
+ return;
+ }
+
+ SkAutoSTArray<16, SkColor> grColors(grInfo.fColorCount);
+ SkAutoSTArray<16, SkScalar> grOffsets(grInfo.fColorCount);
+ grInfo.fColors = grColors.get();
+ grInfo.fColorOffsets = grOffsets.get();
+
+ // One more call to get the actual colors/offsets.
+ shader->asAGradient(&grInfo);
+ SkASSERT(grInfo.fColorCount <= grColors.count());
+ SkASSERT(grInfo.fColorCount <= grOffsets.count());
+
+ resources->fPaintServer.printf("url(#%s)", addLinearGradientDef(grInfo, shader).c_str());
+}
+
+void SkSVGDevice::AutoElement::addColorFilterResources(const SkColorFilter& cf,
+ Resources* resources) {
+ SkString colorfilterID = fResourceBucket->addColorFilter();
+ {
+ AutoElement filterElement("filter", fWriter);
+ filterElement.addAttribute("id", colorfilterID);
+ filterElement.addAttribute("x", "0%");
+ filterElement.addAttribute("y", "0%");
+ filterElement.addAttribute("width", "100%");
+ filterElement.addAttribute("height", "100%");
+
+ SkColor filterColor;
+ SkBlendMode mode;
+ bool asAColorMode = cf.asAColorMode(&filterColor, &mode);
+ SkAssertResult(asAColorMode);
+ SkASSERT(mode == SkBlendMode::kSrcIn);
+
+ {
+ // first flood with filter color
+ AutoElement floodElement("feFlood", fWriter);
+ floodElement.addAttribute("flood-color", svg_color(filterColor));
+ floodElement.addAttribute("flood-opacity", svg_opacity(filterColor));
+ floodElement.addAttribute("result", "flood");
+ }
+
+ {
+ // apply the transform to filter color
+ AutoElement compositeElement("feComposite", fWriter);
+ compositeElement.addAttribute("in", "flood");
+ compositeElement.addAttribute("operator", "in");
+ }
+ }
+ resources->fColorFilter.printf("url(#%s)", colorfilterID.c_str());
+}
+
+// Returns data uri from bytes.
+// it will use any cached data if available, otherwise will
+// encode as png.
+sk_sp<SkData> AsDataUri(SkImage* image) {
+ sk_sp<SkData> imageData = image->encodeToData();
+ if (!imageData) {
+ return nullptr;
+ }
+
+ const char* src = (char*)imageData->data();
+ const char* selectedPrefix = nullptr;
+ size_t selectedPrefixLength = 0;
+
+ const static char pngDataPrefix[] = "data:image/png;base64,";
+ const static char jpgDataPrefix[] = "data:image/jpeg;base64,";
+
+ if (SkJpegCodec::IsJpeg(src, imageData->size())) {
+ selectedPrefix = jpgDataPrefix;
+ selectedPrefixLength = sizeof(jpgDataPrefix);
+ } else {
+ if (!SkPngCodec::IsPng(src, imageData->size())) {
+ imageData = image->encodeToData(SkEncodedImageFormat::kPNG, 100);
+ }
+ selectedPrefix = pngDataPrefix;
+ selectedPrefixLength = sizeof(pngDataPrefix);
+ }
+
+ size_t b64Size = SkBase64::Encode(imageData->data(), imageData->size(), nullptr);
+ sk_sp<SkData> dataUri = SkData::MakeUninitialized(selectedPrefixLength + b64Size);
+ char* dest = (char*)dataUri->writable_data();
+ memcpy(dest, selectedPrefix, selectedPrefixLength);
+ SkBase64::Encode(imageData->data(), imageData->size(), dest + selectedPrefixLength - 1);
+ dest[dataUri->size() - 1] = 0;
+ return dataUri;
+}
+
+void SkSVGDevice::AutoElement::addImageShaderResources(const SkShader* shader, const SkPaint& paint,
+ Resources* resources) {
+ SkMatrix outMatrix;
+
+ SkTileMode xy[2];
+ SkImage* image = shader->isAImage(&outMatrix, xy);
+ SkASSERT(image);
+
+ SkString patternDims[2]; // width, height
+
+ sk_sp<SkData> dataUri = AsDataUri(image);
+ if (!dataUri) {
+ return;
+ }
+ SkIRect imageSize = image->bounds();
+ for (int i = 0; i < 2; i++) {
+ int imageDimension = i == 0 ? imageSize.width() : imageSize.height();
+ switch (xy[i]) {
+ case SkTileMode::kRepeat:
+ patternDims[i].appendScalar(imageDimension);
+ break;
+ default:
+ // TODO: other tile modes?
+ patternDims[i] = "100%";
+ }
+ }
+
+ SkString patternID = fResourceBucket->addPattern();
+ {
+ AutoElement pattern("pattern", fWriter);
+ pattern.addAttribute("id", patternID);
+ pattern.addAttribute("patternUnits", "userSpaceOnUse");
+ pattern.addAttribute("patternContentUnits", "userSpaceOnUse");
+ pattern.addAttribute("width", patternDims[0]);
+ pattern.addAttribute("height", patternDims[1]);
+ pattern.addAttribute("x", 0);
+ pattern.addAttribute("y", 0);
+
+ {
+ SkString imageID = fResourceBucket->addImage();
+ AutoElement imageTag("image", fWriter);
+ imageTag.addAttribute("id", imageID);
+ imageTag.addAttribute("x", 0);
+ imageTag.addAttribute("y", 0);
+ imageTag.addAttribute("width", image->width());
+ imageTag.addAttribute("height", image->height());
+ imageTag.addAttribute("xlink:href", static_cast<const char*>(dataUri->data()));
+ }
+ }
+ resources->fPaintServer.printf("url(#%s)", patternID.c_str());
+}
+
+void SkSVGDevice::AutoElement::addShaderResources(const SkPaint& paint, Resources* resources) {
+ const SkShader* shader = paint.getShader();
+ SkASSERT(shader);
+
+ if (shader->asAGradient(nullptr) != SkShader::kNone_GradientType) {
+ this->addGradientShaderResources(shader, paint, resources);
+ } else if (shader->isAImage()) {
+ this->addImageShaderResources(shader, paint, resources);
+ }
+ // TODO: other shader types?
+}
+
+SkString SkSVGDevice::AutoElement::addLinearGradientDef(const SkShader::GradientInfo& info,
+ const SkShader* shader) {
+ SkASSERT(fResourceBucket);
+ SkString id = fResourceBucket->addLinearGradient();
+
+ {
+ AutoElement gradient("linearGradient", fWriter);
+
+ gradient.addAttribute("id", id);
+ gradient.addAttribute("gradientUnits", "userSpaceOnUse");
+ gradient.addAttribute("x1", info.fPoint[0].x());
+ gradient.addAttribute("y1", info.fPoint[0].y());
+ gradient.addAttribute("x2", info.fPoint[1].x());
+ gradient.addAttribute("y2", info.fPoint[1].y());
+
+ if (!as_SB(shader)->getLocalMatrix().isIdentity()) {
+ this->addAttribute("gradientTransform", svg_transform(as_SB(shader)->getLocalMatrix()));
+ }
+
+ SkASSERT(info.fColorCount >= 2);
+ for (int i = 0; i < info.fColorCount; ++i) {
+ SkColor color = info.fColors[i];
+ SkString colorStr(svg_color(color));
+
+ {
+ AutoElement stop("stop", fWriter);
+ stop.addAttribute("offset", info.fColorOffsets[i]);
+ stop.addAttribute("stop-color", colorStr.c_str());
+
+ if (SK_AlphaOPAQUE != SkColorGetA(color)) {
+ stop.addAttribute("stop-opacity", svg_opacity(color));
+ }
+ }
+ }
+ }
+
+ return id;
+}
+
+void SkSVGDevice::AutoElement::addRectAttributes(const SkRect& rect) {
+ // x, y default to 0
+ if (rect.x() != 0) {
+ this->addAttribute("x", rect.x());
+ }
+ if (rect.y() != 0) {
+ this->addAttribute("y", rect.y());
+ }
+
+ this->addAttribute("width", rect.width());
+ this->addAttribute("height", rect.height());
+}
+
+void SkSVGDevice::AutoElement::addPathAttributes(const SkPath& path) {
+ SkString pathData;
+ SkParsePath::ToSVGString(path, &pathData);
+ this->addAttribute("d", pathData);
+}
+
+void SkSVGDevice::AutoElement::addTextAttributes(const SkFont& font) {
+ this->addAttribute("font-size", font.getSize());
+
+ SkString familyName;
+ SkTHashSet<SkString> familySet;
+ sk_sp<SkTypeface> tface = font.refTypefaceOrDefault();
+
+ SkASSERT(tface);
+ SkFontStyle style = tface->fontStyle();
+ if (style.slant() == SkFontStyle::kItalic_Slant) {
+ this->addAttribute("font-style", "italic");
+ } else if (style.slant() == SkFontStyle::kOblique_Slant) {
+ this->addAttribute("font-style", "oblique");
+ }
+ int weightIndex = (SkTPin(style.weight(), 100, 900) - 50) / 100;
+ if (weightIndex != 3) {
+ static constexpr const char* weights[] = {
+ "100", "200", "300", "normal", "400", "500", "600", "bold", "800", "900"
+ };
+ this->addAttribute("font-weight", weights[weightIndex]);
+ }
+ int stretchIndex = style.width() - 1;
+ if (stretchIndex != 4) {
+ static constexpr const char* stretches[] = {
+ "ultra-condensed", "extra-condensed", "condensed", "semi-condensed",
+ "normal",
+ "semi-expanded", "expanded", "extra-expanded", "ultra-expanded"
+ };
+ this->addAttribute("font-stretch", stretches[stretchIndex]);
+ }
+
+ sk_sp<SkTypeface::LocalizedStrings> familyNameIter(tface->createFamilyNameIterator());
+ SkTypeface::LocalizedString familyString;
+ if (familyNameIter) {
+ while (familyNameIter->next(&familyString)) {
+ if (familySet.contains(familyString.fString)) {
+ continue;
+ }
+ familySet.add(familyString.fString);
+ familyName.appendf((familyName.isEmpty() ? "%s" : ", %s"), familyString.fString.c_str());
+ }
+ }
+ if (!familyName.isEmpty()) {
+ this->addAttribute("font-family", familyName);
+ }
+}
+
+sk_sp<SkBaseDevice> SkSVGDevice::Make(const SkISize& size, std::unique_ptr<SkXMLWriter> writer,
+ uint32_t flags) {
+ return writer ? sk_sp<SkBaseDevice>(new SkSVGDevice(size, std::move(writer), flags))
+ : nullptr;
+}
+
+SkSVGDevice::SkSVGDevice(const SkISize& size, std::unique_ptr<SkXMLWriter> writer, uint32_t flags)
+ : INHERITED(SkImageInfo::MakeUnknown(size.fWidth, size.fHeight),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fWriter(std::move(writer))
+ , fResourceBucket(new ResourceBucket)
+ , fFlags(flags)
+{
+ SkASSERT(fWriter);
+
+ fWriter->writeHeader();
+
+ // The root <svg> tag gets closed by the destructor.
+ fRootElement.reset(new AutoElement("svg", fWriter));
+
+ fRootElement->addAttribute("xmlns", "http://www.w3.org/2000/svg");
+ fRootElement->addAttribute("xmlns:xlink", "http://www.w3.org/1999/xlink");
+ fRootElement->addAttribute("width", size.width());
+ fRootElement->addAttribute("height", size.height());
+}
+
+SkSVGDevice::~SkSVGDevice() {
+ // Pop order is important.
+ while (!fClipStack.empty()) {
+ fClipStack.pop_back();
+ }
+}
+
+void SkSVGDevice::syncClipStack(const SkClipStack& cs) {
+ SkClipStack::B2TIter iter(cs);
+
+ const SkClipStack::Element* elem;
+ size_t rec_idx = 0;
+
+ // First, find/preserve the common bottom.
+ while ((elem = iter.next()) && (rec_idx < fClipStack.size())) {
+ if (fClipStack[SkToInt(rec_idx)].fGenID != elem->getGenID()) {
+ break;
+ }
+ rec_idx++;
+ }
+
+ // Discard out-of-date stack top.
+ while (fClipStack.size() > rec_idx) {
+ fClipStack.pop_back();
+ }
+
+ auto define_clip = [this](const SkClipStack::Element* e) {
+ const auto cid = SkStringPrintf("cl_%x", e->getGenID());
+
+ AutoElement clip_path("clipPath", fWriter);
+ clip_path.addAttribute("id", cid);
+
+ // TODO: handle non-intersect clips.
+
+ switch (e->getDeviceSpaceType()) {
+ case SkClipStack::Element::DeviceSpaceType::kEmpty: {
+ // TODO: can we skip this?
+ AutoElement rect("rect", fWriter);
+ } break;
+ case SkClipStack::Element::DeviceSpaceType::kRect: {
+ AutoElement rect("rect", fWriter);
+ rect.addRectAttributes(e->getDeviceSpaceRect());
+ } break;
+ case SkClipStack::Element::DeviceSpaceType::kRRect: {
+ // TODO: complex rrect handling?
+ const auto& rr = e->getDeviceSpaceRRect();
+ const auto radii = rr.getSimpleRadii();
+
+ AutoElement rrect("rect", fWriter);
+ rrect.addRectAttributes(rr.rect());
+ rrect.addAttribute("rx", radii.x());
+ rrect.addAttribute("ry", radii.y());
+ } break;
+ case SkClipStack::Element::DeviceSpaceType::kPath: {
+ const auto& p = e->getDeviceSpacePath();
+ AutoElement path("path", fWriter);
+ path.addPathAttributes(p);
+ if (p.getFillType() == SkPath::kEvenOdd_FillType) {
+ path.addAttribute("clip-rule", "evenodd");
+ }
+ } break;
+ }
+
+ return cid;
+ };
+
+ // Rebuild the top.
+ while (elem) {
+ const auto cid = define_clip(elem);
+
+ auto clip_grp = skstd::make_unique<AutoElement>("g", fWriter);
+ clip_grp->addAttribute("clip-path", SkStringPrintf("url(#%s)", cid.c_str()));
+
+ fClipStack.push_back({ std::move(clip_grp), elem->getGenID() });
+
+ elem = iter.next();
+ }
+}
+
+void SkSVGDevice::drawPaint(const SkPaint& paint) {
+ AutoElement rect("rect", this, fResourceBucket.get(), MxCp(this), paint);
+ rect.addRectAttributes(SkRect::MakeWH(SkIntToScalar(this->width()),
+ SkIntToScalar(this->height())));
+}
+
+void SkSVGDevice::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ if (!value) {
+ return;
+ }
+
+ if (!strcmp(SkAnnotationKeys::URL_Key(), key) ||
+ !strcmp(SkAnnotationKeys::Link_Named_Dest_Key(), key)) {
+ this->cs().save();
+ this->cs().clipRect(rect, this->ctm(), kIntersect_SkClipOp, true);
+ SkRect transformedRect = this->cs().bounds(this->getGlobalBounds());
+ this->cs().restore();
+ if (transformedRect.isEmpty()) {
+ return;
+ }
+
+ SkString url(static_cast<const char*>(value->data()), value->size() - 1);
+ AutoElement a("a", fWriter);
+ a.addAttribute("xlink:href", url.c_str());
+ {
+ AutoElement r("rect", fWriter);
+ r.addAttribute("fill-opacity", "0.0");
+ r.addRectAttributes(transformedRect);
+ }
+ }
+}
+
+void SkSVGDevice::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ SkPath path;
+
+ switch (mode) {
+ // todo
+ case SkCanvas::kPoints_PointMode:
+ // TODO?
+ break;
+ case SkCanvas::kLines_PointMode:
+ count -= 1;
+ for (size_t i = 0; i < count; i += 2) {
+ path.rewind();
+ path.moveTo(pts[i]);
+ path.lineTo(pts[i+1]);
+ AutoElement elem("path", this, fResourceBucket.get(), MxCp(this), paint);
+ elem.addPathAttributes(path);
+ }
+ break;
+ case SkCanvas::kPolygon_PointMode:
+ if (count > 1) {
+ path.addPoly(pts, SkToInt(count), false);
+ path.moveTo(pts[0]);
+ AutoElement elem("path", this, fResourceBucket.get(), MxCp(this), paint);
+ elem.addPathAttributes(path);
+ }
+ break;
+ }
+}
+
+void SkSVGDevice::drawRect(const SkRect& r, const SkPaint& paint) {
+ std::unique_ptr<AutoElement> svg;
+ if (RequiresViewportReset(paint)) {
+ svg.reset(new AutoElement("svg", this, fResourceBucket.get(), MxCp(this), paint));
+ svg->addRectAttributes(r);
+ }
+
+ AutoElement rect("rect", this, fResourceBucket.get(), MxCp(this), paint);
+
+ if (svg) {
+ rect.addAttribute("x", 0);
+ rect.addAttribute("y", 0);
+ rect.addAttribute("width", "100%");
+ rect.addAttribute("height", "100%");
+ } else {
+ rect.addRectAttributes(r);
+ }
+}
+
+void SkSVGDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ AutoElement ellipse("ellipse", this, fResourceBucket.get(), MxCp(this), paint);
+ ellipse.addAttribute("cx", oval.centerX());
+ ellipse.addAttribute("cy", oval.centerY());
+ ellipse.addAttribute("rx", oval.width() / 2);
+ ellipse.addAttribute("ry", oval.height() / 2);
+}
+
+void SkSVGDevice::drawRRect(const SkRRect& rr, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(rr);
+
+ AutoElement elem("path", this, fResourceBucket.get(), MxCp(this), paint);
+ elem.addPathAttributes(path);
+}
+
+void SkSVGDevice::drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) {
+ AutoElement elem("path", this, fResourceBucket.get(), MxCp(this), paint);
+ elem.addPathAttributes(path);
+
+ // TODO: inverse fill types?
+ if (path.getFillType() == SkPath::kEvenOdd_FillType) {
+ elem.addAttribute("fill-rule", "evenodd");
+ }
+}
+
+static sk_sp<SkData> encode(const SkBitmap& src) {
+ SkDynamicMemoryWStream buf;
+ return SkEncodeImage(&buf, src, SkEncodedImageFormat::kPNG, 80) ? buf.detachAsData() : nullptr;
+}
+
+void SkSVGDevice::drawBitmapCommon(const MxCp& mc, const SkBitmap& bm, const SkPaint& paint) {
+ sk_sp<SkData> pngData = encode(bm);
+ if (!pngData) {
+ return;
+ }
+
+ size_t b64Size = SkBase64::Encode(pngData->data(), pngData->size(), nullptr);
+ SkAutoTMalloc<char> b64Data(b64Size);
+ SkBase64::Encode(pngData->data(), pngData->size(), b64Data.get());
+
+ SkString svgImageData("data:image/png;base64,");
+ svgImageData.append(b64Data.get(), b64Size);
+
+ SkString imageID = fResourceBucket->addImage();
+ {
+ AutoElement defs("defs", fWriter);
+ {
+ AutoElement image("image", fWriter);
+ image.addAttribute("id", imageID);
+ image.addAttribute("width", bm.width());
+ image.addAttribute("height", bm.height());
+ image.addAttribute("xlink:href", svgImageData);
+ }
+ }
+
+ {
+ AutoElement imageUse("use", this, fResourceBucket.get(), mc, paint);
+ imageUse.addAttribute("xlink:href", SkStringPrintf("#%s", imageID.c_str()));
+ }
+}
+
+void SkSVGDevice::drawSprite(const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) {
+ MxCp mc(this);
+ SkMatrix adjustedMatrix = *mc.fMatrix;
+ adjustedMatrix.preTranslate(SkIntToScalar(x), SkIntToScalar(y));
+ mc.fMatrix = &adjustedMatrix;
+
+ drawBitmapCommon(mc, bitmap, paint);
+}
+
+void SkSVGDevice::drawBitmapRect(const SkBitmap& bm, const SkRect* srcOrNull,
+ const SkRect& dst, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) {
+ SkClipStack* cs = &this->cs();
+ SkClipStack::AutoRestore ar(cs, false);
+ if (srcOrNull && *srcOrNull != SkRect::Make(bm.bounds())) {
+ cs->save();
+ cs->clipRect(dst, this->ctm(), kIntersect_SkClipOp, paint.isAntiAlias());
+ }
+
+ SkMatrix adjustedMatrix;
+ adjustedMatrix.setRectToRect(srcOrNull ? *srcOrNull : SkRect::Make(bm.bounds()),
+ dst,
+ SkMatrix::kFill_ScaleToFit);
+ adjustedMatrix.postConcat(this->ctm());
+
+ drawBitmapCommon(MxCp(&adjustedMatrix, cs), bm, paint);
+}
+
+class SVGTextBuilder : SkNoncopyable {
+public:
+ SVGTextBuilder(SkPoint origin, const SkGlyphRun& glyphRun)
+ : fOrigin(origin) {
+ auto runSize = glyphRun.runSize();
+ SkAutoSTArray<64, SkUnichar> unichars(runSize);
+ SkFontPriv::GlyphsToUnichars(glyphRun.font(), glyphRun.glyphsIDs().data(),
+ runSize, unichars.get());
+ auto positions = glyphRun.positions();
+ for (size_t i = 0; i < runSize; ++i) {
+ this->appendUnichar(unichars[i], positions[i]);
+ }
+ }
+
+ const SkString& text() const { return fText; }
+ const SkString& posX() const { return fPosXStr; }
+ const SkString& posY() const { return fHasConstY ? fConstYStr : fPosYStr; }
+
+private:
+ void appendUnichar(SkUnichar c, SkPoint position) {
+ bool discardPos = false;
+ bool isWhitespace = false;
+
+ switch(c) {
+ case ' ':
+ case '\t':
+ // consolidate whitespace to match SVG's xml:space=default munging
+ // (http://www.w3.org/TR/SVG/text.html#WhiteSpace)
+ if (fLastCharWasWhitespace) {
+ discardPos = true;
+ } else {
+ fText.appendUnichar(c);
+ }
+ isWhitespace = true;
+ break;
+ case '\0':
+ // SkPaint::glyphsToUnichars() returns \0 for inconvertible glyphs, but these
+ // are not legal XML characters (http://www.w3.org/TR/REC-xml/#charsets)
+ discardPos = true;
+ isWhitespace = fLastCharWasWhitespace; // preserve whitespace consolidation
+ break;
+ case '&':
+ fText.append("&amp;");
+ break;
+ case '"':
+ fText.append("&quot;");
+ break;
+ case '\'':
+ fText.append("&apos;");
+ break;
+ case '<':
+ fText.append("&lt;");
+ break;
+ case '>':
+ fText.append("&gt;");
+ break;
+ default:
+ fText.appendUnichar(c);
+ break;
+ }
+
+ fLastCharWasWhitespace = isWhitespace;
+
+ if (discardPos) {
+ return;
+ }
+
+ position += fOrigin;
+ fPosXStr.appendf("%.8g, ", position.fX);
+ fPosYStr.appendf("%.8g, ", position.fY);
+
+ if (fConstYStr.isEmpty()) {
+ fConstYStr = fPosYStr;
+ fConstY = position.fY;
+ } else {
+ fHasConstY &= SkScalarNearlyEqual(fConstY, position.fY);
+ }
+ }
+
+ const SkPoint fOrigin;
+
+ SkString fText,
+ fPosXStr, fPosYStr,
+ fConstYStr;
+ SkScalar fConstY;
+ bool fLastCharWasWhitespace = true, // start off in whitespace mode to strip leading space
+ fHasConstY = true;
+};
+
+void SkSVGDevice::drawGlyphRunAsPath(const SkGlyphRun& glyphRun, const SkPoint& origin,
+ const SkPaint& runPaint) {
+ this->drawPath(GetPath(glyphRun, origin), runPaint);
+}
+
+void SkSVGDevice::drawGlyphRunAsText(const SkGlyphRun& glyphRun, const SkPoint& origin,
+ const SkPaint& runPaint) {
+ AutoElement elem("text", this, fResourceBucket.get(), MxCp(this), runPaint);
+ elem.addTextAttributes(glyphRun.font());
+
+ SVGTextBuilder builder(origin, glyphRun);
+ elem.addAttribute("x", builder.posX());
+ elem.addAttribute("y", builder.posY());
+ elem.addText(builder.text());
+}
+
+void SkSVGDevice::drawGlyphRunList(const SkGlyphRunList& glyphRunList) {
+ const auto processGlyphRun = (fFlags & SkSVGCanvas::kConvertTextToPaths_Flag)
+ ? &SkSVGDevice::drawGlyphRunAsPath
+ : &SkSVGDevice::drawGlyphRunAsText;
+
+ for (auto& glyphRun : glyphRunList) {
+ (this->*processGlyphRun)(glyphRun, glyphRunList.origin(), glyphRunList.paint());
+ }
+}
+
+void SkSVGDevice::drawVertices(const SkVertices*, const SkVertices::Bone[], int, SkBlendMode,
+ const SkPaint&) {
+ // todo
+}
+
+void SkSVGDevice::drawDevice(SkBaseDevice*, int x, int y,
+ const SkPaint&) {
+ // todo
+}
diff --git a/gfx/skia/skia/src/svg/SkSVGDevice.h b/gfx/skia/skia/src/svg/SkSVGDevice.h
new file mode 100644
index 0000000000..fb850caaef
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGDevice.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSVGDevice_DEFINED
+#define SkSVGDevice_DEFINED
+
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkClipStackDevice.h"
+
+class SkXMLWriter;
+
+class SkSVGDevice final : public SkClipStackDevice {
+public:
+ static sk_sp<SkBaseDevice> Make(const SkISize& size, std::unique_ptr<SkXMLWriter>,
+ uint32_t flags);
+
+protected:
+ void drawPaint(const SkPaint& paint) override;
+ void drawAnnotation(const SkRect& rect, const char key[], SkData* value) override;
+ void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr, const SkPaint& paint) override;
+ void drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable = false) override;
+
+ void drawSprite(const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) override;
+ void drawBitmapRect(const SkBitmap&,
+ const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint) override;
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override;
+ void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const SkPaint& paint) override;
+
+ void drawDevice(SkBaseDevice*, int x, int y,
+ const SkPaint&) override;
+
+private:
+ SkSVGDevice(const SkISize& size, std::unique_ptr<SkXMLWriter>, uint32_t);
+ ~SkSVGDevice() override;
+
+ void drawGlyphRunAsText(const SkGlyphRun&, const SkPoint&, const SkPaint&);
+ void drawGlyphRunAsPath(const SkGlyphRun&, const SkPoint&, const SkPaint&);
+
+ struct MxCp;
+ void drawBitmapCommon(const MxCp&, const SkBitmap& bm, const SkPaint& paint);
+
+ void syncClipStack(const SkClipStack&);
+
+ class AutoElement;
+ class ResourceBucket;
+
+ const std::unique_ptr<SkXMLWriter> fWriter;
+ const std::unique_ptr<ResourceBucket> fResourceBucket;
+ const uint32_t fFlags;
+
+ struct ClipRec {
+ std::unique_ptr<AutoElement> fClipPathElem;
+ uint32_t fGenID;
+ };
+
+ std::unique_ptr<AutoElement> fRootElement;
+ SkTArray<ClipRec> fClipStack;
+
+ typedef SkClipStackDevice INHERITED;
+};
+
+#endif // SkSVGDevice_DEFINED
diff --git a/gfx/skia/skia/src/utils/Sk3D.cpp b/gfx/skia/skia/src/utils/Sk3D.cpp
new file mode 100644
index 0000000000..0df42b5fa9
--- /dev/null
+++ b/gfx/skia/skia/src/utils/Sk3D.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/Sk3D.h"
+
+static void set_col(SkMatrix44* m, int col, const SkPoint3& v) {
+ m->set(0, col, v.fX);
+ m->set(1, col, v.fY);
+ m->set(2, col, v.fZ);
+}
+
+static SkPoint3 cross(const SkPoint3& a, const SkPoint3& b) {
+ return {
+ a.fY * b.fZ - a.fZ * b.fY,
+ a.fZ * b.fX - a.fX * b.fZ,
+ a.fX * b.fY - a.fY * b.fX,
+ };
+}
+
+void Sk3LookAt(SkMatrix44* dst, const SkPoint3& eye, const SkPoint3& center, const SkPoint3& up) {
+ SkPoint3 f = center - eye;
+ f.normalize();
+ SkPoint3 u = up;
+ u.normalize();
+ SkPoint3 s = cross(f, u);
+ s.normalize();
+ u = cross(s, f);
+
+ dst->setIdentity();
+ set_col(dst, 0, s);
+ set_col(dst, 1, u);
+ set_col(dst, 2, -f);
+ set_col(dst, 3, eye);
+ dst->invert(dst);
+}
+
+bool Sk3Perspective(SkMatrix44* dst, float near, float far, float angle) {
+ SkASSERT(far > near);
+
+ float denomInv = sk_ieee_float_divide(1, far - near);
+ float halfAngle = angle * 0.5f;
+ float cot = sk_float_cos(halfAngle) / sk_float_sin(halfAngle);
+
+ dst->setIdentity();
+ dst->set(0, 0, cot);
+ dst->set(1, 1, cot);
+ dst->set(2, 2, (far + near) * denomInv);
+ dst->set(2, 3, 2 * far * near * denomInv);
+ dst->set(3, 2, -1);
+ return true;
+}
+
+void Sk3MapPts(SkPoint dst[], const SkMatrix44& m4, const SkPoint3 src[], int count) {
+ for (int i = 0; i < count; ++i) {
+ SkVector4 v = m4 * SkVector4{ src[i].fX, src[i].fY, src[i].fZ, 1 };
+ // clip v;
+ dst[i] = { v.fData[0] / v.fData[3], v.fData[1] / v.fData[3] };
+ }
+}
+
diff --git a/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp b/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp
new file mode 100644
index 0000000000..dc827e5f6f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/utils/SkAnimCodecPlayer.h"
+#include "src/codec/SkCodecImageGenerator.h"
+#include <algorithm>
+
+SkAnimCodecPlayer::SkAnimCodecPlayer(std::unique_ptr<SkCodec> codec) : fCodec(std::move(codec)) {
+ fImageInfo = fCodec->getInfo();
+ fFrameInfos = fCodec->getFrameInfo();
+ fImages.resize(fFrameInfos.size());
+
+ // change the interpretation of fDuration to a end-time for that frame
+ size_t dur = 0;
+ for (auto& f : fFrameInfos) {
+ dur += f.fDuration;
+ f.fDuration = dur;
+ }
+ fTotalDuration = dur;
+
+ if (!fTotalDuration) {
+ // Static image -- may or may not have returned a single frame info.
+ fFrameInfos.clear();
+ fImages.clear();
+ fImages.push_back(SkImage::MakeFromGenerator(
+ SkCodecImageGenerator::MakeFromCodec(std::move(fCodec))));
+ }
+}
+
+SkAnimCodecPlayer::~SkAnimCodecPlayer() {}
+
+SkISize SkAnimCodecPlayer::dimensions() {
+ return { fImageInfo.width(), fImageInfo.height() };
+}
+
+sk_sp<SkImage> SkAnimCodecPlayer::getFrameAt(int index) {
+ SkASSERT((unsigned)index < fFrameInfos.size());
+
+ if (fImages[index]) {
+ return fImages[index];
+ }
+
+ size_t rb = fImageInfo.minRowBytes();
+ size_t size = fImageInfo.computeByteSize(rb);
+ auto data = SkData::MakeUninitialized(size);
+
+ SkCodec::Options opts;
+ opts.fFrameIndex = index;
+
+ const int requiredFrame = fFrameInfos[index].fRequiredFrame;
+ if (requiredFrame != SkCodec::kNoFrame) {
+ auto requiredImage = fImages[requiredFrame];
+ SkPixmap requiredPM;
+ if (requiredImage && requiredImage->peekPixels(&requiredPM)) {
+ sk_careful_memcpy(data->writable_data(), requiredPM.addr(), size);
+ opts.fPriorFrame = requiredFrame;
+ }
+ }
+ if (SkCodec::kSuccess == fCodec->getPixels(fImageInfo, data->writable_data(), rb, &opts)) {
+ return fImages[index] = SkImage::MakeRasterData(fImageInfo, std::move(data), rb);
+ }
+ return nullptr;
+}
+
+sk_sp<SkImage> SkAnimCodecPlayer::getFrame() {
+ SkASSERT(fTotalDuration > 0 || fImages.size() == 1);
+
+ return fTotalDuration > 0
+ ? this->getFrameAt(fCurrIndex)
+ : fImages.front();
+}
+
+bool SkAnimCodecPlayer::seek(uint32_t msec) {
+ if (!fTotalDuration) {
+ return false;
+ }
+
+ msec %= fTotalDuration;
+
+ auto lower = std::lower_bound(fFrameInfos.begin(), fFrameInfos.end(), msec,
+ [](const SkCodec::FrameInfo& info, uint32_t msec) {
+ return (uint32_t)info.fDuration < msec;
+ });
+ int prevIndex = fCurrIndex;
+ fCurrIndex = lower - fFrameInfos.begin();
+ return fCurrIndex != prevIndex;
+}
+
+
diff --git a/gfx/skia/skia/src/utils/SkBase64.cpp b/gfx/skia/skia/src/utils/SkBase64.cpp
new file mode 100644
index 0000000000..225cb56c14
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBase64.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/utils/SkBase64.h"
+
+#define DecodePad -2
+#define EncodePad 64
+
+static const char default_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/=";
+
+static const signed char decodeData[] = {
+ 62, -1, -1, -1, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, DecodePad, -1, -1,
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
+};
+
+SkBase64::SkBase64() : fLength((size_t) -1), fData(nullptr) {
+}
+
+#if defined _WIN32 // disable 'two', etc. may be used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+SkBase64::Error SkBase64::decode(const void* srcPtr, size_t size, bool writeDestination) {
+ unsigned char* dst = (unsigned char*) fData;
+ const unsigned char* dstStart = (const unsigned char*) fData;
+ const unsigned char* src = (const unsigned char*) srcPtr;
+ bool padTwo = false;
+ bool padThree = false;
+ const unsigned char* end = src + size;
+ while (src < end) {
+ unsigned char bytes[4];
+ int byte = 0;
+ do {
+ unsigned char srcByte = *src++;
+ if (srcByte == 0)
+ goto goHome;
+ if (srcByte <= ' ')
+ continue; // treat as white space
+ if (srcByte < '+' || srcByte > 'z')
+ return kBadCharError;
+ signed char decoded = decodeData[srcByte - '+'];
+ bytes[byte] = decoded;
+ if (decoded < 0) {
+ if (decoded == DecodePad)
+ goto handlePad;
+ return kBadCharError;
+ } else
+ byte++;
+ if (*src)
+ continue;
+ if (byte == 0)
+ goto goHome;
+ if (byte == 4)
+ break;
+handlePad:
+ if (byte < 2)
+ return kPadError;
+ padThree = true;
+ if (byte == 2)
+ padTwo = true;
+ break;
+ } while (byte < 4);
+ int two = 0;
+ int three = 0;
+ if (writeDestination) {
+ int one = (uint8_t) (bytes[0] << 2);
+ two = bytes[1];
+ one |= two >> 4;
+ two = (uint8_t) ((two << 4) & 0xFF);
+ three = bytes[2];
+ two |= three >> 2;
+ three = (uint8_t) ((three << 6) & 0xFF);
+ three |= bytes[3];
+ SkASSERT(one < 256 && two < 256 && three < 256);
+ *dst = (unsigned char) one;
+ }
+ dst++;
+ if (padTwo)
+ break;
+ if (writeDestination)
+ *dst = (unsigned char) two;
+ dst++;
+ if (padThree)
+ break;
+ if (writeDestination)
+ *dst = (unsigned char) three;
+ dst++;
+ }
+goHome:
+ fLength = dst - dstStart;
+ return kNoError;
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+size_t SkBase64::Encode(const void* srcPtr, size_t length, void* dstPtr, const char* encodeMap) {
+ const char* encode;
+ if (nullptr == encodeMap) {
+ encode = default_encode;
+ } else {
+ encode = encodeMap;
+ }
+ const unsigned char* src = (const unsigned char*) srcPtr;
+ unsigned char* dst = (unsigned char*) dstPtr;
+ if (dst) {
+ size_t remainder = length % 3;
+ const unsigned char* end = &src[length - remainder];
+ while (src < end) {
+ unsigned a = *src++;
+ unsigned b = *src++;
+ unsigned c = *src++;
+ int d = c & 0x3F;
+ c = (c >> 6 | b << 2) & 0x3F;
+ b = (b >> 4 | a << 4) & 0x3F;
+ a = a >> 2;
+ *dst++ = encode[a];
+ *dst++ = encode[b];
+ *dst++ = encode[c];
+ *dst++ = encode[d];
+ }
+ if (remainder > 0) {
+ int k1 = 0;
+ int k2 = EncodePad;
+ int a = (uint8_t) *src++;
+ if (remainder == 2)
+ {
+ int b = *src++;
+ k1 = b >> 4;
+ k2 = (b << 2) & 0x3F;
+ }
+ *dst++ = encode[a >> 2];
+ *dst++ = encode[(k1 | a << 4) & 0x3F];
+ *dst++ = encode[k2];
+ *dst++ = encode[EncodePad];
+ }
+ }
+ return (length + 2) / 3 * 4;
+}
+
+SkBase64::Error SkBase64::decode(const char* src, size_t len) {
+ Error err = decode(src, len, false);
+ SkASSERT(err == kNoError);
+ if (err != kNoError)
+ return err;
+ fData = new char[fLength]; // should use sk_malloc/sk_free
+ decode(src, len, true);
+ return kNoError;
+}
diff --git a/gfx/skia/skia/src/utils/SkBitSet.h b/gfx/skia/skia/src/utils/SkBitSet.h
new file mode 100644
index 0000000000..770b0d8bce
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBitSet.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitSet_DEFINED
+#define SkBitSet_DEFINED
+
+#include "include/private/SkTemplates.h"
+
+class SkBitSet {
+public:
+ explicit SkBitSet(int numberOfBits) {
+ SkASSERT(numberOfBits >= 0);
+ fDwordCount = (numberOfBits + 31) / 32; // Round up size to 32-bit boundary.
+ if (fDwordCount > 0) {
+ fBitData.reset((uint32_t*)sk_calloc_throw(fDwordCount * sizeof(uint32_t)));
+ }
+ }
+
+ /** Set the value of the index-th bit to true. */
+ void set(int index) {
+ uint32_t mask = 1 << (index & 31);
+ uint32_t* chunk = this->internalGet(index);
+ SkASSERT(chunk);
+ *chunk |= mask;
+ }
+
+ bool has(int index) const {
+ const uint32_t* chunk = this->internalGet(index);
+ uint32_t mask = 1 << (index & 31);
+ return chunk && SkToBool(*chunk & mask);
+ }
+
+ // Calls f(unsigned) for each set value.
+ template<typename FN>
+ void getSetValues(FN f) const {
+ const uint32_t* data = fBitData.get();
+ for (unsigned i = 0; i < fDwordCount; ++i) {
+ if (uint32_t value = data[i]) { // There are set bits
+ unsigned index = i * 32;
+ for (unsigned j = 0; j < 32; ++j) {
+ if (0x1 & (value >> j)) {
+ f(index | j);
+ }
+ }
+ }
+ }
+ }
+
+private:
+ std::unique_ptr<uint32_t, SkFunctionWrapper<void(void*), sk_free>> fBitData;
+ size_t fDwordCount; // Dword (32-bit) count of the bitset.
+
+ uint32_t* internalGet(int index) const {
+ size_t internalIndex = index / 32;
+ if (internalIndex >= fDwordCount) {
+ return nullptr;
+ }
+ return fBitData.get() + internalIndex;
+ }
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCallableTraits.h b/gfx/skia/skia/src/utils/SkCallableTraits.h
new file mode 100644
index 0000000000..003db21280
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCallableTraits.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCallableTraits_DEFINED
+#define SkCallableTraits_DEFINED
+
+#include <type_traits>
+#include <tuple>
+
+template <typename R, typename... Args> struct sk_base_callable_traits {
+ using return_type = R;
+ static constexpr std::size_t arity = sizeof...(Args);
+ template <std::size_t N> struct argument {
+ static_assert(N < arity, "");
+ using type = typename std::tuple_element<N, std::tuple<Args...>>::type;
+ };
+};
+
+#define SK_CALLABLE_TRAITS__COMMA ,
+
+#define SK_CALLABLE_TRAITS__VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__INSTANCE(quals,) \
+SK_CALLABLE_TRAITS__INSTANCE(quals, SK_CALLABLE_TRAITS__COMMA ...)
+
+#ifdef __cpp_noexcept_function_type
+#define SK_CALLABLE_TRAITS__NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__VARARGS(quals,) \
+SK_CALLABLE_TRAITS__VARARGS(quals noexcept,)
+#else
+#define SK_CALLABLE_TRAITS__NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__VARARGS(quals,)
+#endif
+
+#define SK_CALLABLE_TRAITS__REF_NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals,) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals &,) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals &&,)
+
+#define SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS() \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(const,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(volatile,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(const volatile,)
+
+/** Infer the return_type and argument<N> of a callable type T. */
+template <typename T> struct SkCallableTraits : SkCallableTraits<decltype(&T::operator())> {};
+
+// function (..., (const, volatile), (&, &&), noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename R, typename... Args> \
+struct SkCallableTraits<R(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS()
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to function (..., noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename R, typename... Args> \
+struct SkCallableTraits<R(*)(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__NE_VARARGS(,)
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to method (..., (const, volatile), (&, &&), noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename T, typename R, typename... Args> \
+struct SkCallableTraits<R(T::*)(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS()
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to field
+template <typename T, typename R>
+struct SkCallableTraits<R T::*> : sk_base_callable_traits<typename std::add_lvalue_reference<R>::type> {};
+
+#undef SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS
+#undef SK_CALLABLE_TRAITS__REF_NE_VARARGS
+#undef SK_CALLABLE_TRAITS__NE_VARARGS
+#undef SK_CALLABLE_TRAITS__VARARGS
+#undef SK_CALLABLE_TRAITS__COMMA
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCamera.cpp b/gfx/skia/skia/src/utils/SkCamera.cpp
new file mode 100644
index 0000000000..63d1c804fa
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCamera.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkCamera.h"
+
+static SkScalar SkScalarDotDiv(int count, const SkScalar a[], int step_a,
+ const SkScalar b[], int step_b,
+ SkScalar denom) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; i++) {
+ prod += a[0] * b[0];
+ a += step_a;
+ b += step_b;
+ }
+ return prod / denom;
+}
+
+static SkScalar SkScalarDot(int count, const SkScalar a[], int step_a,
+ const SkScalar b[], int step_b) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; i++) {
+ prod += a[0] * b[0];
+ a += step_a;
+ b += step_b;
+ }
+ return prod;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkPoint3D::normalize(SkUnit3D* unit) const {
+ SkScalar mag = SkScalarSqrt(fX*fX + fY*fY + fZ*fZ);
+ if (mag) {
+ SkScalar scale = SkScalarInvert(mag);
+ unit->fX = fX * scale;
+ unit->fY = fY * scale;
+ unit->fZ = fZ * scale;
+ } else {
+ unit->fX = unit->fY = unit->fZ = 0;
+ }
+ return mag;
+}
+
+SkScalar SkUnit3D::Dot(const SkUnit3D& a, const SkUnit3D& b) {
+ return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ;
+}
+
+void SkUnit3D::Cross(const SkUnit3D& a, const SkUnit3D& b, SkUnit3D* cross) {
+ SkASSERT(cross);
+
+ // use x,y,z, in case &a == cross or &b == cross
+
+ SkScalar x = a.fY * b.fZ - a.fZ * b.fY;
+ SkScalar y = a.fZ * b.fX - a.fX * b.fY;
+ SkScalar z = a.fX * b.fY - a.fY * b.fX;
+
+ cross->set(x, y, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPatch3D::SkPatch3D() {
+ this->reset();
+}
+
+void SkPatch3D::reset() {
+ fOrigin.set(0, 0, 0);
+ fU.set(SK_Scalar1, 0, 0);
+ fV.set(0, -SK_Scalar1, 0);
+}
+
+void SkPatch3D::transform(const SkMatrix3D& m, SkPatch3D* dst) const {
+ if (dst == nullptr) {
+ dst = (SkPatch3D*)this;
+ }
+ m.mapVector(fU, &dst->fU);
+ m.mapVector(fV, &dst->fV);
+ m.mapPoint(fOrigin, &dst->fOrigin);
+}
+
+SkScalar SkPatch3D::dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const {
+ SkScalar cx = fU.fY * fV.fZ - fU.fZ * fV.fY;
+ SkScalar cy = fU.fZ * fV.fX - fU.fX * fV.fY;
+ SkScalar cz = fU.fX * fV.fY - fU.fY * fV.fX;
+
+ return cx * dx + cy * dy + cz * dz;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix3D::reset() {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[0][0] = fMat[1][1] = fMat[2][2] = SK_Scalar1;
+}
+
+void SkMatrix3D::setTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[0][0] = x;
+ fMat[1][1] = y;
+ fMat[2][2] = z;
+}
+
+void SkMatrix3D::setRotateX(SkScalar degX) {
+ SkScalar r = SkDegreesToRadians(degX),
+ s = SkScalarSin(r),
+ c = SkScalarCos(r);
+ this->setRow(0, SK_Scalar1, 0, 0);
+ this->setRow(1, 0, c, -s);
+ this->setRow(2, 0, s, c);
+}
+
+void SkMatrix3D::setRotateY(SkScalar degY) {
+ SkScalar r = SkDegreesToRadians(degY),
+ s = SkScalarSin(r),
+ c = SkScalarCos(r);
+ this->setRow(0, c, 0, -s);
+ this->setRow(1, 0, SK_Scalar1, 0);
+ this->setRow(2, s, 0, c);
+}
+
+void SkMatrix3D::setRotateZ(SkScalar degZ) {
+ SkScalar r = SkDegreesToRadians(degZ),
+ s = SkScalarSin(r),
+ c = SkScalarCos(r);
+ this->setRow(0, c, -s, 0);
+ this->setRow(1, s, c, 0);
+ this->setRow(2, 0, 0, SK_Scalar1);
+}
+
+void SkMatrix3D::preTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ SkScalar col[3] = { x, y, z};
+
+ for (int i = 0; i < 3; i++) {
+ fMat[i][3] += SkScalarDot(3, &fMat[i][0], 1, col, 1);
+ }
+}
+
+void SkMatrix3D::preRotateX(SkScalar degX) {
+ SkMatrix3D m;
+ m.setRotateX(degX);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::preRotateY(SkScalar degY) {
+ SkMatrix3D m;
+ m.setRotateY(degY);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::preRotateZ(SkScalar degZ) {
+ SkMatrix3D m;
+ m.setRotateZ(degZ);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::setConcat(const SkMatrix3D& a, const SkMatrix3D& b) {
+ SkMatrix3D tmp;
+ SkMatrix3D* c = this;
+
+ if (this == &a || this == &b) {
+ c = &tmp;
+ }
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ c->fMat[i][j] = SkScalarDot(3, &a.fMat[i][0], 1, &b.fMat[0][j], 4);
+ }
+ c->fMat[i][3] = SkScalarDot(3, &a.fMat[i][0], 1,
+ &b.fMat[0][3], 4) + a.fMat[i][3];
+ }
+
+ if (c == &tmp) {
+ *this = tmp;
+ }
+}
+
+void SkMatrix3D::mapPoint(const SkPoint3D& src, SkPoint3D* dst) const {
+ SkScalar x = SkScalarDot(3, &fMat[0][0], 1, &src.fX, 1) + fMat[0][3];
+ SkScalar y = SkScalarDot(3, &fMat[1][0], 1, &src.fX, 1) + fMat[1][3];
+ SkScalar z = SkScalarDot(3, &fMat[2][0], 1, &src.fX, 1) + fMat[2][3];
+ dst->set(x, y, z);
+}
+
+void SkMatrix3D::mapVector(const SkVector3D& src, SkVector3D* dst) const {
+ SkScalar x = SkScalarDot(3, &fMat[0][0], 1, &src.fX, 1);
+ SkScalar y = SkScalarDot(3, &fMat[1][0], 1, &src.fX, 1);
+ SkScalar z = SkScalarDot(3, &fMat[2][0], 1, &src.fX, 1);
+ dst->set(x, y, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCamera3D::SkCamera3D() {
+ this->reset();
+}
+
+void SkCamera3D::reset() {
+ fLocation.set(0, 0, -SkIntToScalar(576)); // 8 inches backward
+ fAxis.set(0, 0, SK_Scalar1); // forward
+ fZenith.set(0, -SK_Scalar1, 0); // up
+
+ fObserver.set(0, 0, fLocation.fZ);
+
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::update() {
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::doUpdate() const {
+ SkUnit3D axis, zenith, cross;
+
+ // construct a orthonormal basis of cross (x), zenith (y), and axis (z)
+ fAxis.normalize(&axis);
+
+ {
+ SkScalar dot = SkUnit3D::Dot(SkUnit3D{fZenith.fX, fZenith.fY, fZenith.fZ}, axis);
+
+ zenith.fX = fZenith.fX - dot * axis.fX;
+ zenith.fY = fZenith.fY - dot * axis.fY;
+ zenith.fZ = fZenith.fZ - dot * axis.fZ;
+
+ SkPoint3D{zenith.fX, zenith.fY, zenith.fZ}.normalize(&zenith);
+ }
+
+ SkUnit3D::Cross(axis, zenith, &cross);
+
+ {
+ SkMatrix* orien = &fOrientation;
+ SkScalar x = fObserver.fX;
+ SkScalar y = fObserver.fY;
+ SkScalar z = fObserver.fZ;
+
+ // Looking along the view axis we have:
+ //
+ // /|\ zenith
+ // |
+ // |
+ // | * observer (projected on XY plane)
+ // |
+ // |____________\ cross
+ // /
+ //
+ // So this does a z-shear along the view axis based on the observer's x and y values,
+ // and scales in x and y relative to the negative of the observer's z value
+ // (the observer is in the negative z direction).
+
+ orien->set(SkMatrix::kMScaleX, x * axis.fX - z * cross.fX);
+ orien->set(SkMatrix::kMSkewX, x * axis.fY - z * cross.fY);
+ orien->set(SkMatrix::kMTransX, x * axis.fZ - z * cross.fZ);
+ orien->set(SkMatrix::kMSkewY, y * axis.fX - z * zenith.fX);
+ orien->set(SkMatrix::kMScaleY, y * axis.fY - z * zenith.fY);
+ orien->set(SkMatrix::kMTransY, y * axis.fZ - z * zenith.fZ);
+ orien->set(SkMatrix::kMPersp0, axis.fX);
+ orien->set(SkMatrix::kMPersp1, axis.fY);
+ orien->set(SkMatrix::kMPersp2, axis.fZ);
+ }
+}
+
+void SkCamera3D::patchToMatrix(const SkPatch3D& quilt, SkMatrix* matrix) const {
+ if (fNeedToUpdate) {
+ this->doUpdate();
+ fNeedToUpdate = false;
+ }
+
+ const SkScalar* mapPtr = (const SkScalar*)(const void*)&fOrientation;
+ const SkScalar* patchPtr;
+ SkPoint3D diff;
+ SkScalar dot;
+
+ diff.fX = quilt.fOrigin.fX - fLocation.fX;
+ diff.fY = quilt.fOrigin.fY - fLocation.fY;
+ diff.fZ = quilt.fOrigin.fZ - fLocation.fZ;
+
+ dot = SkUnit3D::Dot(SkUnit3D{diff.fX, diff.fY, diff.fZ},
+ SkUnit3D{mapPtr[6], mapPtr[7], mapPtr[8]});
+
+ // This multiplies fOrientation by the matrix [quilt.fU quilt.fV diff] -- U, V, and diff are
+ // column vectors in the matrix -- then divides by the length of the projection of diff onto
+ // the view axis (which is 'dot'). This transforms the patch (which transforms from local path
+ // space to world space) into view space (since fOrientation transforms from world space to
+ // view space).
+ //
+ // The divide by 'dot' isn't strictly necessary as the homogeneous divide would do much the
+ // same thing (it's just scaling the entire matrix by 1/dot). It looks like it's normalizing
+ // the matrix into some canonical space.
+ patchPtr = (const SkScalar*)&quilt;
+ matrix->set(SkMatrix::kMScaleX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMSkewY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp0, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr += 3;
+ matrix->set(SkMatrix::kMSkewX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMScaleY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp1, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr = (const SkScalar*)(const void*)&diff;
+ matrix->set(SkMatrix::kMTransX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMTransY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp2, SK_Scalar1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+Sk3DView::Sk3DView() {
+ fInitialRec.fMatrix.reset();
+ fRec = &fInitialRec;
+}
+
+Sk3DView::~Sk3DView() {
+ Rec* rec = fRec;
+ while (rec != &fInitialRec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+void Sk3DView::save() {
+ Rec* rec = new Rec;
+ rec->fNext = fRec;
+ rec->fMatrix = fRec->fMatrix;
+ fRec = rec;
+}
+
+void Sk3DView::restore() {
+ SkASSERT(fRec != &fInitialRec);
+ Rec* next = fRec->fNext;
+ delete fRec;
+ fRec = next;
+}
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+void Sk3DView::setCameraLocation(SkScalar x, SkScalar y, SkScalar z) {
+ // the camera location is passed in inches, set in pt
+ SkScalar lz = z * 72.0f;
+ fCamera.fLocation.set(x * 72.0f, y * 72.0f, lz);
+ fCamera.fObserver.set(0, 0, lz);
+ fCamera.update();
+
+}
+
+SkScalar Sk3DView::getCameraLocationX() const {
+ return fCamera.fLocation.fX / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationY() const {
+ return fCamera.fLocation.fY / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationZ() const {
+ return fCamera.fLocation.fZ / 72.0f;
+}
+#endif
+
+void Sk3DView::translate(SkScalar x, SkScalar y, SkScalar z) {
+ fRec->fMatrix.preTranslate(x, y, z);
+}
+
+void Sk3DView::rotateX(SkScalar deg) {
+ fRec->fMatrix.preRotateX(deg);
+}
+
+void Sk3DView::rotateY(SkScalar deg) {
+ fRec->fMatrix.preRotateY(deg);
+}
+
+void Sk3DView::rotateZ(SkScalar deg) {
+ fRec->fMatrix.preRotateZ(deg);
+}
+
+SkScalar Sk3DView::dotWithNormal(SkScalar x, SkScalar y, SkScalar z) const {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ return patch.dotWith(x, y, z);
+}
+
+void Sk3DView::getMatrix(SkMatrix* matrix) const {
+ if (matrix != nullptr) {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ fCamera.patchToMatrix(patch, matrix);
+ }
+}
+
+#include "include/core/SkCanvas.h"
+
+void Sk3DView::applyToCanvas(SkCanvas* canvas) const {
+ SkMatrix matrix;
+
+ this->getMatrix(&matrix);
+ canvas->concat(matrix);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.cpp b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
new file mode 100644
index 0000000000..93612e2724
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/SkCanvasStack.h"
+
+SkCanvasStack::SkCanvasStack(int width, int height)
+ : INHERITED(width, height) {}
+
+SkCanvasStack::~SkCanvasStack() {
+ this->removeAll();
+}
+
+void SkCanvasStack::pushCanvas(std::unique_ptr<SkCanvas> canvas, const SkIPoint& origin) {
+ if (canvas) {
+ // compute the bounds of this canvas
+ const SkIRect canvasBounds = SkIRect::MakeSize(canvas->getBaseLayerSize());
+
+ // push the canvas onto the stack
+ this->INHERITED::addCanvas(canvas.get());
+
+ // push the canvas data onto the stack
+ CanvasData* data = &fCanvasData.push_back();
+ data->origin = origin;
+ data->requiredClip.setRect(canvasBounds);
+ data->ownedCanvas = std::move(canvas);
+
+ // subtract this region from the canvas objects already on the stack.
+ // This ensures they do not draw into the space occupied by the layers
+ // above them.
+ for (int i = fList.count() - 1; i > 0; --i) {
+ SkIRect localBounds = canvasBounds;
+ localBounds.offset(origin - fCanvasData[i-1].origin);
+
+ fCanvasData[i-1].requiredClip.op(localBounds, SkRegion::kDifference_Op);
+ fList[i-1]->clipRegion(fCanvasData[i-1].requiredClip);
+ }
+ }
+ SkASSERT(fList.count() == fCanvasData.count());
+}
+
+void SkCanvasStack::removeAll() {
+ this->INHERITED::removeAll(); // call the baseclass *before* we actually delete the canvases
+ fCanvasData.reset();
+}
+
+/**
+ * Traverse all canvases (e.g. layers) the stack and ensure that they are clipped
+ * to their bounds and that the area covered by any canvas higher in the stack is
+ * also clipped out.
+ */
+void SkCanvasStack::clipToZOrderedBounds() {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+ fList[i]->clipRegion(fCanvasData[i].requiredClip);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need to handle setMatrix specially as it overwrites the matrix in each
+ * canvas unlike all other matrix operations (i.e. translate, scale, etc) which
+ * just pre-concatenate with the existing matrix.
+ */
+void SkCanvasStack::didSetMatrix(const SkMatrix& matrix) {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+
+ SkMatrix tempMatrix = matrix;
+ tempMatrix.postTranslate(SkIntToScalar(-fCanvasData[i].origin.x()),
+ SkIntToScalar(-fCanvasData[i].origin.y()));
+ fList[i]->setMatrix(tempMatrix);
+ }
+ this->SkCanvas::didSetMatrix(matrix);
+}
+
+void SkCanvasStack::onClipRect(const SkRect& r, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRect(r, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipRRect(const SkRRect& rr, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRRect(rr, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipPath(const SkPath& p, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipPath(p, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+ SkRegion tempRegion;
+ deviceRgn.translate(-fCanvasData[i].origin.x(),
+ -fCanvasData[i].origin.y(), &tempRegion);
+ tempRegion.op(fCanvasData[i].requiredClip, SkRegion::kIntersect_Op);
+ fList[i]->clipRegion(tempRegion, op);
+ }
+ this->SkCanvas::onClipRegion(deviceRgn, op);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.h b/gfx/skia/skia/src/utils/SkCanvasStack.h
new file mode 100644
index 0000000000..ae293c7d77
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStack_DEFINED
+#define SkCanvasStack_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "include/private/SkTArray.h"
+#include "include/utils/SkNWayCanvas.h"
+
+/**
+ * Like NWayCanvas, in that it forwards all canvas methods to each sub-canvas that is "pushed".
+ *
+ * Unlike NWayCanvas, this takes ownership of each subcanvas, and deletes them when this canvas
+ * is deleted.
+ */
+class SkCanvasStack : public SkNWayCanvas {
+public:
+ SkCanvasStack(int width, int height);
+ ~SkCanvasStack() override;
+
+ void pushCanvas(std::unique_ptr<SkCanvas>, const SkIPoint& origin);
+ void removeAll() override;
+
+ /*
+ * The following add/remove canvas methods are overrides from SkNWayCanvas
+ * that do not make sense in the context of our CanvasStack, but since we
+ * can share most of the other implementation of NWay we override those
+ * methods to be no-ops.
+ */
+ void addCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+ void removeCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+
+protected:
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+
+private:
+ void clipToZOrderedBounds();
+
+ struct CanvasData {
+ SkIPoint origin;
+ SkRegion requiredClip;
+ std::unique_ptr<SkCanvas> ownedCanvas;
+ };
+
+ SkTArray<CanvasData> fCanvasData;
+
+ typedef SkNWayCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
new file mode 100644
index 0000000000..73fb5caa96
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkCanvasStateUtils.h"
+
+#include "include/core/SkCanvas.h"
+#include "src/core/SkClipOpPriv.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkWriter32.h"
+#include "src/utils/SkCanvasStack.h"
+
+/*
+ * WARNING: The structs below are part of a stable ABI and as such we explicitly
+ * use unambigious primitives (e.g. int32_t instead of an enum).
+ *
+ * ANY CHANGES TO THE STRUCTS BELOW THAT IMPACT THE ABI SHOULD RESULT IN A NEW
+ * NEW SUBCLASS OF SkCanvasState. SUCH CHANGES SHOULD ONLY BE MADE IF ABSOLUTELY
+ * NECESSARY!
+ *
+ * In order to test changes, run the CanvasState tests. gyp/canvas_state_lib.gyp
+ * describes how to create a library to pass to the CanvasState tests. The tests
+ * should succeed when building the library with your changes and passing that to
+ * the tests running in the unchanged Skia.
+ */
+enum RasterConfigs {
+ kUnknown_RasterConfig = 0,
+ kRGB_565_RasterConfig = 1,
+ kARGB_8888_RasterConfig = 2
+};
+typedef int32_t RasterConfig;
+
+enum CanvasBackends {
+ kUnknown_CanvasBackend = 0,
+ kRaster_CanvasBackend = 1,
+ kGPU_CanvasBackend = 2,
+ kPDF_CanvasBackend = 3
+};
+typedef int32_t CanvasBackend;
+
+struct ClipRect {
+ int32_t left, top, right, bottom;
+};
+
+struct SkMCState {
+ float matrix[9];
+ // NOTE: this only works for non-antialiased clips
+ int32_t clipRectCount;
+ ClipRect* clipRects;
+};
+
+// NOTE: If you add more members, create a new subclass of SkCanvasState with a
+// new CanvasState::version.
+struct SkCanvasLayerState {
+ CanvasBackend type;
+ int32_t x, y;
+ int32_t width;
+ int32_t height;
+
+ SkMCState mcState;
+
+ union {
+ struct {
+ RasterConfig config; // pixel format: a value from RasterConfigs.
+ uint64_t rowBytes; // Number of bytes from start of one line to next.
+ void* pixels; // The pixels, all (height * rowBytes) of them.
+ } raster;
+ struct {
+ int32_t textureID;
+ } gpu;
+ };
+};
+
+class SkCanvasState {
+public:
+ SkCanvasState(int32_t version, SkCanvas* canvas) {
+ SkASSERT(canvas);
+ this->version = version;
+ width = canvas->getBaseLayerSize().width();
+ height = canvas->getBaseLayerSize().height();
+
+ }
+
+ /**
+ * The version this struct was built with. This field must always appear
+ * first in the struct so that when the versions don't match (and the
+ * remaining contents and size are potentially different) we can still
+ * compare the version numbers.
+ */
+ int32_t version;
+ int32_t width;
+ int32_t height;
+ int32_t alignmentPadding;
+};
+
+class SkCanvasState_v1 : public SkCanvasState {
+public:
+ static const int32_t kVersion = 1;
+
+ SkCanvasState_v1(SkCanvas* canvas) : INHERITED(kVersion, canvas) {
+ layerCount = 0;
+ layers = nullptr;
+ mcState.clipRectCount = 0;
+ mcState.clipRects = nullptr;
+ originalCanvas = canvas;
+ }
+
+ ~SkCanvasState_v1() {
+ // loop through the layers and free the data allocated to the clipRects
+ for (int i = 0; i < layerCount; ++i) {
+ sk_free(layers[i].mcState.clipRects);
+ }
+
+ sk_free(mcState.clipRects);
+ sk_free(layers);
+ }
+
+ SkMCState mcState;
+
+ int32_t layerCount;
+ SkCanvasLayerState* layers;
+private:
+ SkCanvas* originalCanvas;
+ typedef SkCanvasState INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void setup_MC_state(SkMCState* state, const SkMatrix& matrix, const SkIRect& clip) {
+ // initialize the struct
+ state->clipRectCount = 0;
+
+ // capture the matrix
+ for (int i = 0; i < 9; i++) {
+ state->matrix[i] = matrix.get(i);
+ }
+
+ /*
+ * We only support a single clipRect, so we take the clip's bounds. Clients have long made
+ * this assumption anyway, so this restriction is fine.
+ */
+ SkSWriter32<sizeof(ClipRect)> clipWriter;
+
+ if (!clip.isEmpty()) {
+ state->clipRectCount = 1;
+ state->clipRects = (ClipRect*)sk_malloc_throw(sizeof(ClipRect));
+ state->clipRects->left = clip.fLeft;
+ state->clipRects->top = clip.fTop;
+ state->clipRects->right = clip.fRight;
+ state->clipRects->bottom = clip.fBottom;
+ }
+}
+
+
+
+SkCanvasState* SkCanvasStateUtils::CaptureCanvasState(SkCanvas* canvas) {
+ SkASSERT(canvas);
+
+ // Check the clip can be decomposed into rectangles (i.e. no soft clips).
+ if (canvas->androidFramework_isClipAA()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkCanvasState_v1> canvasState(new SkCanvasState_v1(canvas));
+
+ setup_MC_state(&canvasState->mcState, canvas->getTotalMatrix(), canvas->getDeviceClipBounds());
+
+ /*
+ * decompose the layers
+ *
+ * storage is allocated on the stack for the first 3 layers. It is common in
+ * some view systems (e.g. Android) that a few non-clipped layers are present
+ * and we will not need to malloc any additional memory in those cases.
+ */
+ SkSWriter32<3*sizeof(SkCanvasLayerState)> layerWriter;
+ int layerCount = 0;
+ for (SkCanvas::LayerIter layer(canvas); !layer.done(); layer.next()) {
+
+ // we currently only work for bitmap backed devices
+ SkPixmap pmap;
+ if (!layer.device()->accessPixels(&pmap) || 0 == pmap.width() || 0 == pmap.height()) {
+ return nullptr;
+ }
+
+ SkCanvasLayerState* layerState =
+ (SkCanvasLayerState*) layerWriter.reserve(sizeof(SkCanvasLayerState));
+ layerState->type = kRaster_CanvasBackend;
+ layerState->x = layer.x();
+ layerState->y = layer.y();
+ layerState->width = pmap.width();
+ layerState->height = pmap.height();
+
+ switch (pmap.colorType()) {
+ case kN32_SkColorType:
+ layerState->raster.config = kARGB_8888_RasterConfig;
+ break;
+ case kRGB_565_SkColorType:
+ layerState->raster.config = kRGB_565_RasterConfig;
+ break;
+ default:
+ return nullptr;
+ }
+ layerState->raster.rowBytes = pmap.rowBytes();
+ layerState->raster.pixels = pmap.writable_addr();
+
+ setup_MC_state(&layerState->mcState, layer.matrix(), layer.clipBounds());
+ layerCount++;
+ }
+
+ // allocate memory for the layers and then and copy them to the struct
+ SkASSERT(layerWriter.bytesWritten() == layerCount * sizeof(SkCanvasLayerState));
+ canvasState->layerCount = layerCount;
+ canvasState->layers = (SkCanvasLayerState*) sk_malloc_throw(layerWriter.bytesWritten());
+ layerWriter.flatten(canvasState->layers);
+
+ return canvasState.release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void setup_canvas_from_MC_state(const SkMCState& state, SkCanvas* canvas) {
+ // reconstruct the matrix
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix.set(i, state.matrix[i]);
+ }
+
+ // only realy support 1 rect, so if the caller (legacy?) sent us more, we just take the bounds
+ // of what they sent.
+ SkIRect bounds = SkIRect::MakeEmpty();
+ if (state.clipRectCount > 0) {
+ bounds.setLTRB(state.clipRects[0].left,
+ state.clipRects[0].top,
+ state.clipRects[0].right,
+ state.clipRects[0].bottom);
+ for (int i = 1; i < state.clipRectCount; ++i) {
+ bounds.join({state.clipRects[i].left,
+ state.clipRects[i].top,
+ state.clipRects[i].right,
+ state.clipRects[i].bottom});
+ }
+ }
+
+ canvas->clipRect(SkRect::Make(bounds));
+ canvas->concat(matrix);
+}
+
+static std::unique_ptr<SkCanvas>
+make_canvas_from_canvas_layer(const SkCanvasLayerState& layerState) {
+ SkASSERT(kRaster_CanvasBackend == layerState.type);
+
+ SkBitmap bitmap;
+ SkColorType colorType =
+ layerState.raster.config == kARGB_8888_RasterConfig ? kN32_SkColorType :
+ layerState.raster.config == kRGB_565_RasterConfig ? kRGB_565_SkColorType :
+ kUnknown_SkColorType;
+
+ if (colorType == kUnknown_SkColorType) {
+ return nullptr;
+ }
+
+ bitmap.installPixels(SkImageInfo::Make(layerState.width, layerState.height,
+ colorType, kPremul_SkAlphaType),
+ layerState.raster.pixels, (size_t) layerState.raster.rowBytes);
+
+ SkASSERT(!bitmap.empty());
+ SkASSERT(!bitmap.isNull());
+
+ std::unique_ptr<SkCanvas> canvas(new SkCanvas(bitmap));
+
+ // setup the matrix and clip
+ setup_canvas_from_MC_state(layerState.mcState, canvas.get());
+
+ return canvas;
+}
+
+std::unique_ptr<SkCanvas> SkCanvasStateUtils::MakeFromCanvasState(const SkCanvasState* state) {
+ SkASSERT(state);
+ // Currently there is only one possible version.
+ SkASSERT(SkCanvasState_v1::kVersion == state->version);
+
+ const SkCanvasState_v1* state_v1 = static_cast<const SkCanvasState_v1*>(state);
+
+ if (state_v1->layerCount < 1) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkCanvasStack> canvas(new SkCanvasStack(state->width, state->height));
+
+ // setup the matrix and clip on the n-way canvas
+ setup_canvas_from_MC_state(state_v1->mcState, canvas.get());
+
+ // Iterate over the layers and add them to the n-way canvas
+ for (int i = state_v1->layerCount - 1; i >= 0; --i) {
+ std::unique_ptr<SkCanvas> canvasLayer = make_canvas_from_canvas_layer(state_v1->layers[i]);
+ if (!canvasLayer.get()) {
+ return nullptr;
+ }
+ canvas->pushCanvas(std::move(canvasLayer), SkIPoint::Make(state_v1->layers[i].x,
+ state_v1->layers[i].y));
+ }
+
+ return canvas;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkCanvasStateUtils::ReleaseCanvasState(SkCanvasState* state) {
+ SkASSERT(!state || SkCanvasState_v1::kVersion == state->version);
+ // Upcast to the correct version of SkCanvasState. This avoids having a virtual destructor on
+ // SkCanvasState. That would be strange since SkCanvasState has no other virtual functions, and
+ // instead uses the field "version" to determine how to behave.
+ delete static_cast<SkCanvasState_v1*>(state);
+}
diff --git a/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp b/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp
new file mode 100644
index 0000000000..a4f459cd44
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkTFitsIn.h"
+#include "src/utils/SkCharToGlyphCache.h"
+
+SkCharToGlyphCache::SkCharToGlyphCache() {
+ this->reset();
+}
+
+SkCharToGlyphCache::~SkCharToGlyphCache() {}
+
+void SkCharToGlyphCache::reset() {
+ fK32.reset();
+ fV16.reset();
+
+ // Add sentinels so we can always rely on these to stop linear searches (in either direction)
+ // Neither is a legal unichar, so we don't care what glyphID we use.
+ //
+ *fK32.append() = 0x80000000; *fV16.append() = 0;
+ *fK32.append() = 0x7FFFFFFF; *fV16.append() = 0;
+
+ fDenom = 0;
+}
+
+// Determined experimentally. For N much larger, the slope technique is faster.
+// For N much smaller, a simple search is faster.
+//
+constexpr int kSmallCountLimit = 16;
+
+// To use slope technique we need at least 2 real entries (+2 sentinels) hence the min of 4
+//
+constexpr int kMinCountForSlope = 4;
+
+static int find_simple(const SkUnichar base[], int count, SkUnichar value) {
+ int index;
+ for (index = 0;; ++index) {
+ if (value <= base[index]) {
+ if (value < base[index]) {
+ index = ~index; // not found
+ }
+ break;
+ }
+ }
+ return index;
+}
+
+static int find_with_slope(const SkUnichar base[], int count, SkUnichar value, double denom) {
+ SkASSERT(count >= kMinCountForSlope);
+
+ int index;
+ if (value <= base[1]) {
+ index = 1;
+ if (value < base[index]) {
+ index = ~index;
+ }
+ } else if (value >= base[count - 2]) {
+ index = count - 2;
+ if (value > base[index]) {
+ index = ~(index + 1);
+ }
+ } else {
+ // make our guess based on the "slope" of the current values
+// index = 1 + (int64_t)(count - 2) * (value - base[1]) / (base[count - 2] - base[1]);
+ index = 1 + (int)(denom * (count - 2) * (value - base[1]));
+ SkASSERT(index >= 1 && index <= count - 2);
+
+ if (value >= base[index]) {
+ for (;; ++index) {
+ if (value <= base[index]) {
+ if (value < base[index]) {
+ index = ~index; // not found
+ }
+ break;
+ }
+ }
+ } else {
+ for (--index;; --index) {
+ SkASSERT(index >= 0);
+ if (value >= base[index]) {
+ if (value > base[index]) {
+ index = ~(index + 1);
+ }
+ break;
+ }
+ }
+ }
+ }
+ return index;
+}
+
+int SkCharToGlyphCache::findGlyphIndex(SkUnichar unichar) const {
+ const int count = fK32.count();
+ int index;
+ if (count <= kSmallCountLimit) {
+ index = find_simple(fK32.begin(), count, unichar);
+ } else {
+ index = find_with_slope(fK32.begin(), count, unichar, fDenom);
+ }
+ if (index >= 0) {
+ return fV16[index];
+ }
+ return index;
+}
+
+void SkCharToGlyphCache::insertCharAndGlyph(int index, SkUnichar unichar, SkGlyphID glyph) {
+ SkASSERT(fK32.size() == fV16.size());
+ SkASSERT((unsigned)index < fK32.size());
+ SkASSERT(unichar < fK32[index]);
+
+ *fK32.insert(index) = unichar;
+ *fV16.insert(index) = glyph;
+
+ // if we've changed the first [1] or last [count-2] entry, recompute our slope
+ const int count = fK32.count();
+ if (count >= kMinCountForSlope && (index == 1 || index == count - 2)) {
+ SkASSERT(index >= 1 && index <= count - 2);
+ fDenom = 1.0 / ((double)fK32[count - 2] - fK32[1]);
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 1; i < fK32.count(); ++i) {
+ SkASSERT(fK32[i-1] < fK32[i]);
+ }
+#endif
+}
diff --git a/gfx/skia/skia/src/utils/SkCharToGlyphCache.h b/gfx/skia/skia/src/utils/SkCharToGlyphCache.h
new file mode 100644
index 0000000000..6e0a47a930
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCharToGlyphCache.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCharToGlyphCache_DEFINED
+#define SkCharToGlyphCache_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTDArray.h"
+
+class SkCharToGlyphCache {
+public:
+ SkCharToGlyphCache();
+ ~SkCharToGlyphCache();
+
+ // return number of unichars cached
+ int count() const {
+ return fK32.count();
+ }
+
+ void reset(); // forget all cache entries (to save memory)
+
+ /**
+ * Given a unichar, return its glyphID (if the return value is positive), else return
+ * ~index of where to insert the computed glyphID.
+ *
+ * int result = cache.charToGlyph(unichar);
+ * if (result >= 0) {
+ * glyphID = result;
+ * } else {
+ * glyphID = compute_glyph_using_typeface(unichar);
+ * cache.insertCharAndGlyph(~result, unichar, glyphID);
+ * }
+ */
+ int findGlyphIndex(SkUnichar c) const;
+
+ /**
+ * Insert a new char/glyph pair into the cache at the specified index.
+ * See charToGlyph() for how to compute the bit-not of the index.
+ */
+ void insertCharAndGlyph(int index, SkUnichar, SkGlyphID);
+
+ // helper to pre-seed an entry in the cache
+ void addCharAndGlyph(SkUnichar unichar, SkGlyphID glyph) {
+ int index = this->findGlyphIndex(unichar);
+ if (index >= 0) {
+ SkASSERT(SkToU16(index) == glyph);
+ } else {
+ this->insertCharAndGlyph(~index, unichar, glyph);
+ }
+ }
+
+private:
+ SkTDArray<int32_t> fK32;
+ SkTDArray<uint16_t> fV16;
+ double fDenom;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkDashPath.cpp b/gfx/skia/skia/src/utils/SkDashPath.cpp
new file mode 100644
index 0000000000..92e4bdbda8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPath.cpp
@@ -0,0 +1,461 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkPointPriv.h"
+#include "src/utils/SkDashPathPriv.h"
+
+#include <utility>
+
+static inline int is_even(int x) {
+ return !(x & 1);
+}
+
+static SkScalar find_first_interval(const SkScalar intervals[], SkScalar phase,
+ int32_t* index, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkScalar gap = intervals[i];
+ if (phase > gap || (phase == gap && gap)) {
+ phase -= gap;
+ } else {
+ *index = i;
+ return gap - phase;
+ }
+ }
+ // If we get here, phase "appears" to be larger than our length. This
+ // shouldn't happen with perfect precision, but we can accumulate errors
+ // during the initial length computation (rounding can make our sum be too
+ // big or too small. In that event, we just have to eat the error here.
+ *index = 0;
+ return intervals[0];
+}
+
+void SkDashPath::CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase) {
+ SkScalar len = 0;
+ for (int i = 0; i < count; i++) {
+ len += intervals[i];
+ }
+ *intervalLength = len;
+ // Adjust phase to be between 0 and len, "flipping" phase if negative.
+ // e.g., if len is 100, then phase of -20 (or -120) is equivalent to 80
+ if (adjustedPhase) {
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > len) {
+ phase = SkScalarMod(phase, len);
+ }
+ phase = len - phase;
+
+ // Due to finite precision, it's possible that phase == len,
+ // even after the subtract (if len >>> phase), so fix that here.
+ // This fixes http://crbug.com/124652 .
+ SkASSERT(phase <= len);
+ if (phase == len) {
+ phase = 0;
+ }
+ } else if (phase >= len) {
+ phase = SkScalarMod(phase, len);
+ }
+ *adjustedPhase = phase;
+ }
+ SkASSERT(phase >= 0 && phase < len);
+
+ *initialDashLength = find_first_interval(intervals, phase,
+ initialDashIndex, count);
+
+ SkASSERT(*initialDashLength >= 0);
+ SkASSERT(*initialDashIndex >= 0 && *initialDashIndex < count);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius *= rec.getMiter();
+ }
+ rect->outset(radius, radius);
+}
+
+// If line is zero-length, bump out the end by a tiny amount
+// to draw endcaps. The bump factor is sized so that
+// SkPoint::Distance() computes a non-zero length.
+// Offsets SK_ScalarNearlyZero or smaller create empty paths when Iter measures length.
+// Large values are scaled by SK_ScalarNearlyZero so significant bits change.
+static void adjust_zero_length_line(SkPoint pts[2]) {
+ SkASSERT(pts[0] == pts[1]);
+ pts[1].fX += SkTMax(1.001f, pts[1].fX) * SK_ScalarNearlyZero;
+}
+
+static bool clip_line(SkPoint pts[2], const SkRect& bounds, SkScalar intervalLength,
+ SkScalar priorPhase) {
+ SkVector dxy = pts[1] - pts[0];
+
+ // only horizontal or vertical lines
+ if (dxy.fX && dxy.fY) {
+ return false;
+ }
+ int xyOffset = SkToBool(dxy.fY); // 0 to adjust horizontal, 1 to adjust vertical
+
+ SkScalar minXY = (&pts[0].fX)[xyOffset];
+ SkScalar maxXY = (&pts[1].fX)[xyOffset];
+ bool swapped = maxXY < minXY;
+ if (swapped) {
+ using std::swap;
+ swap(minXY, maxXY);
+ }
+
+ SkASSERT(minXY <= maxXY);
+ SkScalar leftTop = (&bounds.fLeft)[xyOffset];
+ SkScalar rightBottom = (&bounds.fRight)[xyOffset];
+ if (maxXY < leftTop || minXY > rightBottom) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left/top and
+ // right/bottom of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minXY < leftTop) {
+ minXY = leftTop - SkScalarMod(leftTop - minXY, intervalLength);
+ if (!swapped) {
+ minXY -= priorPhase; // for rectangles, adjust by prior phase
+ }
+ }
+ if (maxXY > rightBottom) {
+ maxXY = rightBottom + SkScalarMod(maxXY - rightBottom, intervalLength);
+ if (swapped) {
+ maxXY += priorPhase; // for rectangles, adjust by prior phase
+ }
+ }
+
+ SkASSERT(maxXY >= minXY);
+ if (swapped) {
+ using std::swap;
+ swap(minXY, maxXY);
+ }
+ (&pts[0].fX)[xyOffset] = minXY;
+ (&pts[1].fX)[xyOffset] = maxXY;
+
+ if (minXY == maxXY) {
+ adjust_zero_length_line(pts);
+ }
+ return true;
+}
+
+// Handles only lines and rects.
+// If cull_path() returns true, dstPath is the new smaller path,
+// otherwise dstPath may have been changed but you should ignore it.
+static bool cull_path(const SkPath& srcPath, const SkStrokeRec& rec,
+ const SkRect* cullRect, SkScalar intervalLength, SkPath* dstPath) {
+ if (!cullRect) {
+ SkPoint pts[2];
+ if (srcPath.isLine(pts) && pts[0] == pts[1]) {
+ adjust_zero_length_line(pts);
+ dstPath->moveTo(pts[0]);
+ dstPath->lineTo(pts[1]);
+ return true;
+ }
+ return false;
+ }
+
+ SkRect bounds;
+ bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ {
+ SkPoint pts[2];
+ if (srcPath.isLine(pts)) {
+ if (clip_line(pts, bounds, intervalLength, 0)) {
+ dstPath->moveTo(pts[0]);
+ dstPath->lineTo(pts[1]);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ if (srcPath.isRect(nullptr)) {
+ // We'll break the rect into four lines, culling each separately.
+ SkPath::Iter iter(srcPath, false);
+
+ SkPoint pts[4]; // Rects are all moveTo and lineTo, so we'll only use pts[0] and pts[1].
+ SkAssertResult(SkPath::kMove_Verb == iter.next(pts));
+
+ SkScalar accum = 0; // Sum of unculled edge lengths to keep the phase correct.
+ while (iter.next(pts) == SkPath::kLine_Verb) {
+ // Notice this vector v and accum work with the original unclipped length.
+ SkVector v = pts[1] - pts[0];
+
+ if (clip_line(pts, bounds, intervalLength, SkScalarMod(accum, intervalLength))) {
+ // pts[0] may have just been changed by clip_line().
+ // If that's not where we ended the previous lineTo(), we need to moveTo() there.
+ SkPoint last;
+ if (!dstPath->getLastPt(&last) || last != pts[0]) {
+ dstPath->moveTo(pts[0]);
+ }
+ dstPath->lineTo(pts[1]);
+ }
+
+ // We either just traveled v.fX horizontally or v.fY vertically.
+ SkASSERT(v.fX == 0 || v.fY == 0);
+ accum += SkScalarAbs(v.fX + v.fY);
+ }
+ return !dstPath->isEmpty();
+ }
+
+ return false;
+}
+
+class SpecialLineRec {
+public:
+ bool init(const SkPath& src, SkPath* dst, SkStrokeRec* rec,
+ int intervalCount, SkScalar intervalLength) {
+ if (rec->isHairlineStyle() || !src.isLine(fPts)) {
+ return false;
+ }
+
+ // can relax this in the future, if we handle square and round caps
+ if (SkPaint::kButt_Cap != rec->getCap()) {
+ return false;
+ }
+
+ SkScalar pathLength = SkPoint::Distance(fPts[0], fPts[1]);
+
+ fTangent = fPts[1] - fPts[0];
+ if (fTangent.isZero()) {
+ return false;
+ }
+
+ fPathLength = pathLength;
+ fTangent.scale(SkScalarInvert(pathLength));
+ SkPointPriv::RotateCCW(fTangent, &fNormal);
+ fNormal.scale(SkScalarHalf(rec->getWidth()));
+
+ // now estimate how many quads will be added to the path
+ // resulting segments = pathLen * intervalCount / intervalLen
+ // resulting points = 4 * segments
+
+ SkScalar ptCount = pathLength * intervalCount / (float)intervalLength;
+ ptCount = SkTMin(ptCount, SkDashPath::kMaxDashCount);
+ int n = SkScalarCeilToInt(ptCount) << 2;
+ dst->incReserve(n);
+
+ // we will take care of the stroking
+ rec->setFillStyle();
+ return true;
+ }
+
+ void addSegment(SkScalar d0, SkScalar d1, SkPath* path) const {
+ SkASSERT(d0 <= fPathLength);
+ // clamp the segment to our length
+ if (d1 > fPathLength) {
+ d1 = fPathLength;
+ }
+
+ SkScalar x0 = fPts[0].fX + fTangent.fX * d0;
+ SkScalar x1 = fPts[0].fX + fTangent.fX * d1;
+ SkScalar y0 = fPts[0].fY + fTangent.fY * d0;
+ SkScalar y1 = fPts[0].fY + fTangent.fY * d1;
+
+ SkPoint pts[4];
+ pts[0].set(x0 + fNormal.fX, y0 + fNormal.fY); // moveTo
+ pts[1].set(x1 + fNormal.fX, y1 + fNormal.fY); // lineTo
+ pts[2].set(x1 - fNormal.fX, y1 - fNormal.fY); // lineTo
+ pts[3].set(x0 - fNormal.fX, y0 - fNormal.fY); // lineTo
+
+ path->addPoly(pts, SK_ARRAY_COUNT(pts), false);
+ }
+
+private:
+ SkPoint fPts[2];
+ SkVector fTangent;
+ SkVector fNormal;
+ SkScalar fPathLength;
+};
+
+
+bool SkDashPath::InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength,
+ StrokeRecApplication strokeRecApplication) {
+ // we must always have an even number of intervals
+ SkASSERT(is_even(count));
+
+ // we do nothing if the src wants to be filled
+ SkStrokeRec::Style style = rec->getStyle();
+ if (SkStrokeRec::kFill_Style == style || SkStrokeRec::kStrokeAndFill_Style == style) {
+ return false;
+ }
+
+ const SkScalar* intervals = aIntervals;
+ SkScalar dashCount = 0;
+ int segCount = 0;
+
+ SkPath cullPathStorage;
+ const SkPath* srcPtr = &src;
+ if (cull_path(src, *rec, cullRect, intervalLength, &cullPathStorage)) {
+ // if rect is closed, starts in a dash, and ends in a dash, add the initial join
+ // potentially a better fix is described here: bug.skia.org/7445
+ if (src.isRect(nullptr) && src.isLastContourClosed() && is_even(initialDashIndex)) {
+ SkScalar pathLength = SkPathMeasure(src, false, rec->getResScale()).getLength();
+ SkScalar endPhase = SkScalarMod(pathLength + initialDashLength, intervalLength);
+ int index = 0;
+ while (endPhase > intervals[index]) {
+ endPhase -= intervals[index++];
+ SkASSERT(index <= count);
+ if (index == count) {
+ // We have run out of intervals. endPhase "should" never get to this point,
+ // but it could if the subtracts underflowed. Hence we will pin it as if it
+ // perfectly ran through the intervals.
+ // See crbug.com/875494 (and skbug.com/8274)
+ endPhase = 0;
+ break;
+ }
+ }
+ // if dash ends inside "on", or ends at beginning of "off"
+ if (is_even(index) == (endPhase > 0)) {
+ SkPoint midPoint = src.getPoint(0);
+ // get vector at end of rect
+ int last = src.countPoints() - 1;
+ while (midPoint == src.getPoint(last)) {
+ --last;
+ SkASSERT(last >= 0);
+ }
+ // get vector at start of rect
+ int next = 1;
+ while (midPoint == src.getPoint(next)) {
+ ++next;
+ SkASSERT(next < last);
+ }
+ SkVector v = midPoint - src.getPoint(last);
+ const SkScalar kTinyOffset = SK_ScalarNearlyZero;
+ // scale vector to make start of tiny right angle
+ v *= kTinyOffset;
+ cullPathStorage.moveTo(midPoint - v);
+ cullPathStorage.lineTo(midPoint);
+ v = midPoint - src.getPoint(next);
+ // scale vector to make end of tiny right angle
+ v *= kTinyOffset;
+ cullPathStorage.lineTo(midPoint - v);
+ }
+ }
+ srcPtr = &cullPathStorage;
+ }
+
+ SpecialLineRec lineRec;
+ bool specialLine = (StrokeRecApplication::kAllow == strokeRecApplication) &&
+ lineRec.init(*srcPtr, dst, rec, count >> 1, intervalLength);
+
+ SkPathMeasure meas(*srcPtr, false, rec->getResScale());
+
+ do {
+ bool skipFirstSegment = meas.isClosed();
+ bool addedSegment = false;
+ SkScalar length = meas.getLength();
+ int index = initialDashIndex;
+
+ // Since the path length / dash length ratio may be arbitrarily large, we can exert
+ // significant memory pressure while attempting to build the filtered path. To avoid this,
+ // we simply give up dashing beyond a certain threshold.
+ //
+ // The original bug report (http://crbug.com/165432) is based on a path yielding more than
+ // 90 million dash segments and crashing the memory allocator. A limit of 1 million
+ // segments seems reasonable: at 2 verbs per segment * 9 bytes per verb, this caps the
+ // maximum dash memory overhead at roughly 17MB per path.
+ dashCount += length * (count >> 1) / intervalLength;
+ if (dashCount > kMaxDashCount) {
+ dst->reset();
+ return false;
+ }
+
+ // Using double precision to avoid looping indefinitely due to single precision rounding
+ // (for extreme path_length/dash_length ratios). See test_infinite_dash() unittest.
+ double distance = 0;
+ double dlen = initialDashLength;
+
+ while (distance < length) {
+ SkASSERT(dlen >= 0);
+ addedSegment = false;
+ if (is_even(index) && !skipFirstSegment) {
+ addedSegment = true;
+ ++segCount;
+
+ if (specialLine) {
+ lineRec.addSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst);
+ } else {
+ meas.getSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst, true);
+ }
+ }
+ distance += dlen;
+
+ // clear this so we only respect it the first time around
+ skipFirstSegment = false;
+
+ // wrap around our intervals array if necessary
+ index += 1;
+ SkASSERT(index <= count);
+ if (index == count) {
+ index = 0;
+ }
+
+ // fetch our next dlen
+ dlen = intervals[index];
+ }
+
+ // extend if we ended on a segment and we need to join up with the (skipped) initial segment
+ if (meas.isClosed() && is_even(initialDashIndex) &&
+ initialDashLength >= 0) {
+ meas.getSegment(0, initialDashLength, dst, !addedSegment);
+ ++segCount;
+ }
+ } while (meas.nextContour());
+
+ if (segCount > 1) {
+ dst->setConvexity(SkPath::kConcave_Convexity);
+ }
+
+ return true;
+}
+
+bool SkDashPath::FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkPathEffect::DashInfo& info) {
+ if (!ValidDashPath(info.fPhase, info.fIntervals, info.fCount)) {
+ return false;
+ }
+ SkScalar initialDashLength = 0;
+ int32_t initialDashIndex = 0;
+ SkScalar intervalLength = 0;
+ CalcDashParameters(info.fPhase, info.fIntervals, info.fCount,
+ &initialDashLength, &initialDashIndex, &intervalLength);
+ return InternalFilter(dst, src, rec, cullRect, info.fIntervals, info.fCount, initialDashLength,
+ initialDashIndex, intervalLength);
+}
+
+bool SkDashPath::ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count) {
+ if (count < 2 || !SkIsAlign2(count)) {
+ return false;
+ }
+ SkScalar length = 0;
+ for (int i = 0; i < count; i++) {
+ if (intervals[i] < 0) {
+ return false;
+ }
+ length += intervals[i];
+ }
+ // watch out for values that might make us go out of bounds
+ return length > 0 && SkScalarIsFinite(phase) && SkScalarIsFinite(length);
+}
diff --git a/gfx/skia/skia/src/utils/SkDashPathPriv.h b/gfx/skia/skia/src/utils/SkDashPathPriv.h
new file mode 100644
index 0000000000..5c6a5db23e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPathPriv.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathPriv_DEFINED
+#define SkDashPathPriv_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+namespace SkDashPath {
+ /**
+ * Calculates the initialDashLength, initialDashIndex, and intervalLength based on the
+ * inputed phase and intervals. If adjustedPhase is passed in, then the phase will be
+ * adjusted to be between 0 and intervalLength. The result will be stored in adjustedPhase.
+ * If adjustedPhase is nullptr then it is assumed phase is already between 0 and intervalLength
+ *
+ * Caller should have already used ValidDashPath to exclude invalid data.
+ */
+ void CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase = nullptr);
+
+ bool FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkPathEffect::DashInfo& info);
+
+ const SkScalar kMaxDashCount = 1000000;
+
+ /** See comments for InternalFilter */
+ enum class StrokeRecApplication {
+ kDisallow,
+ kAllow,
+ };
+
+ /**
+ * Caller should have already used ValidDashPath to exclude invalid data. Typically, this leaves
+ * the strokeRec unmodified. However, for some simple shapes (e.g. a line) it may directly
+ * evaluate the dash and stroke to produce a stroked output path with a fill strokeRec. Passing
+ * true for disallowStrokeRecApplication turns this behavior off.
+ */
+ bool InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength,
+ StrokeRecApplication = StrokeRecApplication::kAllow);
+
+ bool ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkEventTracer.cpp b/gfx/skia/skia/src/utils/SkEventTracer.cpp
new file mode 100644
index 0000000000..2574306f6c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkEventTracer.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkOnce.h"
+#include "include/utils/SkEventTracer.h"
+#include <atomic>
+
+#include <stdlib.h>
+
+class SkDefaultEventTracer : public SkEventTracer {
+ SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) override { return 0; }
+
+ void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) override {}
+
+ const uint8_t* getCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+ const char* getCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* dummy = "dummy";
+ return dummy;
+ }
+};
+
+// We prefer gUserTracer if it's been set, otherwise we fall back on a default tracer;
+static std::atomic<SkEventTracer*> gUserTracer{nullptr};
+
+bool SkEventTracer::SetInstance(SkEventTracer* tracer) {
+ SkEventTracer* expected = nullptr;
+ if (!gUserTracer.compare_exchange_strong(expected, tracer)) {
+ delete tracer;
+ return false;
+ }
+ atexit([]() { delete gUserTracer.load(); });
+ return true;
+}
+
+SkEventTracer* SkEventTracer::GetInstance() {
+ if (auto tracer = gUserTracer.load(std::memory_order_acquire)) {
+ return tracer;
+ }
+ static SkOnce once;
+ static SkDefaultEventTracer* defaultTracer;
+ once([] { defaultTracer = new SkDefaultEventTracer; });
+ return defaultTracer;
+}
diff --git a/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp b/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp
new file mode 100644
index 0000000000..4bfcdadda2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkFloatToDecimal.h"
+
+#include <cfloat>
+#include <climits>
+#include <cmath>
+
+#include "include/core/SkTypes.h"
+
+// returns `value * pow(base, e)`, assuming `e` is positive.
+static double pow_by_squaring(double value, double base, int e) {
+ // https://en.wikipedia.org/wiki/Exponentiation_by_squaring
+ SkASSERT(e > 0);
+ while (true) {
+ if (e & 1) {
+ value *= base;
+ }
+ e >>= 1;
+ if (0 == e) {
+ return value;
+ }
+ base *= base;
+ }
+}
+
+// Return pow(10.0, e), optimized for common cases.
+static double pow10(int e) {
+ switch (e) {
+ case 0: return 1.0; // common cases
+ case 1: return 10.0;
+ case 2: return 100.0;
+ case 3: return 1e+03;
+ case 4: return 1e+04;
+ case 5: return 1e+05;
+ case 6: return 1e+06;
+ case 7: return 1e+07;
+ case 8: return 1e+08;
+ case 9: return 1e+09;
+ case 10: return 1e+10;
+ case 11: return 1e+11;
+ case 12: return 1e+12;
+ case 13: return 1e+13;
+ case 14: return 1e+14;
+ case 15: return 1e+15;
+ default:
+ if (e > 15) {
+ return pow_by_squaring(1e+15, 10.0, e - 15);
+ } else {
+ SkASSERT(e < 0);
+ return pow_by_squaring(1.0, 0.1, -e);
+ }
+ }
+}
+
+/** Write a string into output, including a terminating '\0' (for
+ unit testing). Return strlen(output) (for SkWStream::write) The
+ resulting string will be in the form /[-]?([0-9]*.)?[0-9]+/ and
+ sscanf(output, "%f", &x) will return the original value iff the
+ value is finite. This function accepts all possible input values.
+
+ Motivation: "PDF does not support [numbers] in exponential format
+ (such as 6.02e23)." Otherwise, this function would rely on a
+ sprintf-type function from the standard library. */
+unsigned SkFloatToDecimal(float value, char output[kMaximumSkFloatToDecimalLength]) {
+ /* The longest result is -FLT_MIN.
+ We serialize it as "-.0000000000000000000000000000000000000117549435"
+ which has 48 characters plus a terminating '\0'. */
+
+ static_assert(kMaximumSkFloatToDecimalLength == 49, "");
+ // 3 = '-', '.', and '\0' characters.
+ // 9 = number of significant digits
+ // abs(FLT_MIN_10_EXP) = number of zeros in FLT_MIN
+ static_assert(kMaximumSkFloatToDecimalLength == 3 + 9 - FLT_MIN_10_EXP, "");
+
+ /* section C.1 of the PDF1.4 spec (http://goo.gl/0SCswJ) says that
+ most PDF rasterizers will use fixed-point scalars that lack the
+ dynamic range of floats. Even if this is the case, I want to
+ serialize these (uncommon) very small and very large scalar
+ values with enough precision to allow a floating-point
+ rasterizer to read them in with perfect accuracy.
+ Experimentally, rasterizers such as pdfium do seem to benefit
+ from this. Rasterizers that rely on fixed-point scalars should
+ gracefully ignore these values that they can not parse. */
+ char* output_ptr = &output[0];
+ const char* const end = &output[kMaximumSkFloatToDecimalLength - 1];
+ // subtract one to leave space for '\0'.
+
+ /* This function is written to accept any possible input value,
+ including non-finite values such as INF and NAN. In that case,
+ we ignore value-correctness and output a syntacticly-valid
+ number. */
+ if (value == INFINITY) {
+ value = FLT_MAX; // nearest finite float.
+ }
+ if (value == -INFINITY) {
+ value = -FLT_MAX; // nearest finite float.
+ }
+ if (!std::isfinite(value) || value == 0.0f) {
+ // NAN is unsupported in PDF. Always output a valid number.
+ // Also catch zero here, as a special case.
+ *output_ptr++ = '0';
+ *output_ptr = '\0';
+ return static_cast<unsigned>(output_ptr - output);
+ }
+ if (value < 0.0) {
+ *output_ptr++ = '-';
+ value = -value;
+ }
+ SkASSERT(value >= 0.0f);
+
+ int binaryExponent;
+ (void)std::frexp(value, &binaryExponent);
+ static const double kLog2 = 0.3010299956639812; // log10(2.0);
+ int decimalExponent = static_cast<int>(std::floor(kLog2 * binaryExponent));
+ int decimalShift = decimalExponent - 8;
+ double power = pow10(-decimalShift);
+ SkASSERT(value * power <= (double)INT_MAX);
+ int d = static_cast<int>(value * power + 0.5);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ SkASSERT(d <= 999999999);
+ if (d > 167772159) { // floor(pow(10,1+log10(1<<24)))
+ // need one fewer decimal digits for 24-bit precision.
+ decimalShift = decimalExponent - 7;
+ // SkASSERT(power * 0.1 = pow10(-decimalShift));
+ // recalculate to get rounding right.
+ d = static_cast<int>(value * (power * 0.1) + 0.5);
+ SkASSERT(d <= 99999999);
+ }
+ while (d % 10 == 0) {
+ d /= 10;
+ ++decimalShift;
+ }
+ SkASSERT(d > 0);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ unsigned char buffer[9]; // decimal value buffer.
+ int bufferIndex = 0;
+ do {
+ buffer[bufferIndex++] = d % 10;
+ d /= 10;
+ } while (d != 0);
+ SkASSERT(bufferIndex <= (int)sizeof(buffer) && bufferIndex > 0);
+ if (decimalShift >= 0) {
+ do {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ } while (bufferIndex);
+ for (int i = 0; i < decimalShift; ++i) {
+ *output_ptr++ = '0';
+ }
+ } else {
+ int placesBeforeDecimal = bufferIndex + decimalShift;
+ if (placesBeforeDecimal > 0) {
+ while (placesBeforeDecimal-- > 0) {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ }
+ *output_ptr++ = '.';
+ } else {
+ *output_ptr++ = '.';
+ int placesAfterDecimal = -placesBeforeDecimal;
+ while (placesAfterDecimal-- > 0) {
+ *output_ptr++ = '0';
+ }
+ }
+ while (bufferIndex > 0) {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ if (output_ptr == end) {
+ break; // denormalized: don't need extra precision.
+ // Note: denormalized numbers will not have the same number of
+ // significantDigits, but do not need them to round-trip.
+ }
+ }
+ }
+ SkASSERT(output_ptr <= end);
+ *output_ptr = '\0';
+ return static_cast<unsigned>(output_ptr - output);
+}
diff --git a/gfx/skia/skia/src/utils/SkFloatToDecimal.h b/gfx/skia/skia/src/utils/SkFloatToDecimal.h
new file mode 100644
index 0000000000..ac1042dbfb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatToDecimal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatToDecimal_DEFINED
+#define SkFloatToDecimal_DEFINED
+
+constexpr unsigned kMaximumSkFloatToDecimalLength = 49;
+
+/** \fn SkFloatToDecimal
+ Convert a float into a decimal string.
+
+ The resulting string will be in the form `[-]?([0-9]*\.)?[0-9]+` (It does
+ not use scientific notation.) and `sscanf(output, "%f", &x)` will return
+ the original value if the value is finite. This function accepts all
+ possible input values.
+
+ INFINITY and -INFINITY are rounded to FLT_MAX and -FLT_MAX.
+
+ NAN values are converted to 0.
+
+ This function will always add a terminating '\0' to the output.
+
+ @param value Any floating-point number
+ @param output The buffer to write the string into. Must be non-null.
+
+ @return strlen(output)
+*/
+unsigned SkFloatToDecimal(float value, char output[kMaximumSkFloatToDecimalLength]);
+
+#endif // SkFloatToDecimal_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkFloatUtils.h b/gfx/skia/skia/src/utils/SkFloatUtils.h
new file mode 100644
index 0000000000..f89a77254e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatUtils.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatUtils_DEFINED
+#define SkFloatUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+#include <limits.h>
+#include <float.h>
+
+template <size_t size>
+class SkTypeWithSize {
+public:
+ // Prevents using SkTypeWithSize<N> with non-specialized N.
+ typedef void UInt;
+};
+
+template <>
+class SkTypeWithSize<32> {
+public:
+ typedef uint32_t UInt;
+};
+
+template <>
+class SkTypeWithSize<64> {
+public:
+ typedef uint64_t UInt;
+};
+
+template <typename RawType>
+struct SkNumericLimits {
+ static const int digits = 0;
+};
+
+template <>
+struct SkNumericLimits<double> {
+ static const int digits = DBL_MANT_DIG;
+};
+
+template <>
+struct SkNumericLimits<float> {
+ static const int digits = FLT_MANT_DIG;
+};
+
+//See
+//http://stackoverflow.com/questions/17333/most-effective-way-for-float-and-double-comparison/3423299#3423299
+//http://code.google.com/p/googletest/source/browse/trunk/include/gtest/internal/gtest-internal.h
+//http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+
+template <typename RawType, unsigned int ULPs>
+class SkFloatingPoint {
+public:
+ /** Bits is a unsigned integer the same size as the floating point number. */
+ typedef typename SkTypeWithSize<sizeof(RawType) * CHAR_BIT>::UInt Bits;
+
+ /** # of bits in a number. */
+ static const size_t kBitCount = CHAR_BIT * sizeof(RawType);
+
+ /** # of fraction bits in a number. */
+ static const size_t kFractionBitCount = SkNumericLimits<RawType>::digits - 1;
+
+ /** # of exponent bits in a number. */
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ /** The mask for the sign bit. */
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ /** The mask for the fraction bits. */
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ /** The mask for the exponent bits. */
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ /** How many ULP's (Units in the Last Place) to tolerate when comparing. */
+ static const size_t kMaxUlps = ULPs;
+
+ /**
+ * Constructs a FloatingPoint from a raw floating-point number.
+ *
+ * On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ * around may change its bits, although the new value is guaranteed
+ * to be also a NAN. Therefore, don't expect this constructor to
+ * preserve the bits in x when x is a NAN.
+ */
+ explicit SkFloatingPoint(const RawType& x) { fU.value = x; }
+
+ /** Returns the exponent bits of this number. */
+ Bits exponent_bits() const { return kExponentBitMask & fU.bits; }
+
+ /** Returns the fraction bits of this number. */
+ Bits fraction_bits() const { return kFractionBitMask & fU.bits; }
+
+ /** Returns true iff this is NAN (not a number). */
+ bool is_nan() const {
+ // It's a NAN if both of the folloowing are true:
+ // * the exponent bits are all ones
+ // * the fraction bits are not all zero.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ /**
+ * Returns true iff this number is at most kMaxUlps ULP's away from ths.
+ * In particular, this function:
+ * - returns false if either number is (or both are) NAN.
+ * - treats really large numbers as almost equal to infinity.
+ * - thinks +0.0 and -0.0 are 0 DLP's apart.
+ */
+ bool AlmostEquals(const SkFloatingPoint& rhs) const {
+ // Any comparison operation involving a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ const Bits dist = DistanceBetweenSignAndMagnitudeNumbers(fU.bits,
+ rhs.fU.bits);
+ //SkDEBUGF("(%f, %f, %d) ", u_.value_, rhs.u_.value_, dist);
+ return dist <= kMaxUlps;
+ }
+
+private:
+ /** The data type used to store the actual floating-point number. */
+ union FloatingPointUnion {
+ /** The raw floating-point number. */
+ RawType value;
+ /** The bits that represent the number. */
+ Bits bits;
+ };
+
+ /**
+ * Converts an integer from the sign-and-magnitude representation to
+ * the biased representation. More precisely, let N be 2 to the
+ * power of (kBitCount - 1), an integer x is represented by the
+ * unsigned number x + N.
+ *
+ * For instance,
+ *
+ * -N + 1 (the most negative number representable using
+ * sign-and-magnitude) is represented by 1;
+ * 0 is represented by N; and
+ * N - 1 (the biggest number representable using
+ * sign-and-magnitude) is represented by 2N - 1.
+ *
+ * Read http://en.wikipedia.org/wiki/Signed_number_representations
+ * for more details on signed number representations.
+ */
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ /**
+ * Given two numbers in the sign-and-magnitude representation,
+ * returns the distance between them as an unsigned number.
+ */
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion fU;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp b/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp
new file mode 100644
index 0000000000..6c2d310677
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkTemplates.h"
+#include "include/utils/SkFrontBufferedStream.h"
+
+class FrontBufferedStream : public SkStreamRewindable {
+public:
+ // Called by Make.
+ FrontBufferedStream(std::unique_ptr<SkStream>, size_t bufferSize);
+
+ size_t read(void* buffer, size_t size) override;
+
+ size_t peek(void* buffer, size_t size) const override;
+
+ bool isAtEnd() const override;
+
+ bool rewind() override;
+
+ bool hasLength() const override { return fHasLength; }
+
+ size_t getLength() const override { return fLength; }
+
+private:
+ SkStreamRewindable* onDuplicate() const override { return nullptr; }
+
+ std::unique_ptr<SkStream> fStream;
+ const bool fHasLength;
+ const size_t fLength;
+ // Current offset into the stream. Always >= 0.
+ size_t fOffset;
+ // Amount that has been buffered by calls to read. Will always be less than
+ // fBufferSize.
+ size_t fBufferedSoFar;
+ // Total size of the buffer.
+ const size_t fBufferSize;
+ // FIXME: SkAutoTMalloc throws on failure. Instead, Create should return a
+ // nullptr stream.
+ SkAutoTMalloc<char> fBuffer;
+
+ // Read up to size bytes from already buffered data, and copy to
+ // dst, if non-nullptr. Updates fOffset. Assumes that fOffset is less
+ // than fBufferedSoFar.
+ size_t readFromBuffer(char* dst, size_t size);
+
+ // Buffer up to size bytes from the stream, and copy to dst if non-
+ // nullptr. Updates fOffset and fBufferedSoFar. Assumes that fOffset is
+ // less than fBufferedSoFar, and size is greater than 0.
+ size_t bufferAndWriteTo(char* dst, size_t size);
+
+ // Read up to size bytes directly from the stream and into dst if non-
+ // nullptr. Updates fOffset. Assumes fOffset is at or beyond the buffered
+ // data, and size is greater than 0.
+ size_t readDirectlyFromStream(char* dst, size_t size);
+
+ typedef SkStream INHERITED;
+};
+
+std::unique_ptr<SkStreamRewindable> SkFrontBufferedStream::Make(std::unique_ptr<SkStream> stream,
+ size_t bufferSize) {
+ if (!stream) {
+ return nullptr;
+ }
+ return std::unique_ptr<SkStreamRewindable>(new FrontBufferedStream(std::move(stream),
+ bufferSize));
+}
+
+FrontBufferedStream::FrontBufferedStream(std::unique_ptr<SkStream> stream, size_t bufferSize)
+ : fStream(std::move(stream))
+ , fHasLength(fStream->hasPosition() && fStream->hasLength())
+ , fLength(fStream->getLength() - fStream->getPosition())
+ , fOffset(0)
+ , fBufferedSoFar(0)
+ , fBufferSize(bufferSize)
+ , fBuffer(bufferSize) {}
+
+bool FrontBufferedStream::isAtEnd() const {
+ if (fOffset < fBufferedSoFar) {
+ // Even if the underlying stream is at the end, this stream has been
+ // rewound after buffering, so it is not at the end.
+ return false;
+ }
+
+ return fStream->isAtEnd();
+}
+
+bool FrontBufferedStream::rewind() {
+ // Only allow a rewind if we have not exceeded the buffer.
+ if (fOffset <= fBufferSize) {
+ fOffset = 0;
+ return true;
+ }
+ return false;
+}
+
+size_t FrontBufferedStream::readFromBuffer(char* dst, size_t size) {
+ SkASSERT(fOffset < fBufferedSoFar);
+ // Some data has already been copied to fBuffer. Read up to the
+ // lesser of the size requested and the remainder of the buffered
+ // data.
+ const size_t bytesToCopy = SkTMin(size, fBufferedSoFar - fOffset);
+ if (dst != nullptr) {
+ memcpy(dst, fBuffer + fOffset, bytesToCopy);
+ }
+
+ // Update fOffset to the new position. It is guaranteed to be
+ // within the buffered data.
+ fOffset += bytesToCopy;
+ SkASSERT(fOffset <= fBufferedSoFar);
+
+ return bytesToCopy;
+}
+
+size_t FrontBufferedStream::bufferAndWriteTo(char* dst, size_t size) {
+ SkASSERT(size > 0);
+ SkASSERT(fOffset >= fBufferedSoFar);
+ SkASSERT(fBuffer);
+ // Data needs to be buffered. Buffer up to the lesser of the size requested
+ // and the remainder of the max buffer size.
+ const size_t bytesToBuffer = SkTMin(size, fBufferSize - fBufferedSoFar);
+ char* buffer = fBuffer + fOffset;
+ const size_t buffered = fStream->read(buffer, bytesToBuffer);
+
+ fBufferedSoFar += buffered;
+ fOffset = fBufferedSoFar;
+ SkASSERT(fBufferedSoFar <= fBufferSize);
+
+ // Copy the buffer to the destination buffer and update the amount read.
+ if (dst != nullptr) {
+ memcpy(dst, buffer, buffered);
+ }
+
+ return buffered;
+}
+
+size_t FrontBufferedStream::readDirectlyFromStream(char* dst, size_t size) {
+ SkASSERT(size > 0);
+ // If we get here, we have buffered all that can be buffered.
+ SkASSERT(fBufferSize == fBufferedSoFar && fOffset >= fBufferSize);
+
+ const size_t bytesReadDirectly = fStream->read(dst, size);
+ fOffset += bytesReadDirectly;
+
+ // If we have read past the end of the buffer, rewinding is no longer
+ // supported, so we can go ahead and free the memory.
+ if (bytesReadDirectly > 0) {
+ sk_free(fBuffer.release());
+ }
+
+ return bytesReadDirectly;
+}
+
+size_t FrontBufferedStream::peek(void* dst, size_t size) const {
+ // Keep track of the offset so we can return to it.
+ const size_t start = fOffset;
+
+ if (start >= fBufferSize) {
+ // This stream is not able to buffer.
+ return 0;
+ }
+
+ size = SkTMin(size, fBufferSize - start);
+ FrontBufferedStream* nonConstThis = const_cast<FrontBufferedStream*>(this);
+ const size_t bytesRead = nonConstThis->read(dst, size);
+ nonConstThis->fOffset = start;
+ return bytesRead;
+}
+
+size_t FrontBufferedStream::read(void* voidDst, size_t size) {
+ // Cast voidDst to a char* for easy addition.
+ char* dst = reinterpret_cast<char*>(voidDst);
+ SkDEBUGCODE(const size_t totalSize = size;)
+ const size_t start = fOffset;
+
+ // First, read any data that was previously buffered.
+ if (fOffset < fBufferedSoFar) {
+ const size_t bytesCopied = this->readFromBuffer(dst, size);
+
+ // Update the remaining number of bytes needed to read
+ // and the destination buffer.
+ size -= bytesCopied;
+ SkASSERT(size + (fOffset - start) == totalSize);
+ if (dst != nullptr) {
+ dst += bytesCopied;
+ }
+ }
+
+ // Buffer any more data that should be buffered, and copy it to the
+ // destination.
+ if (size > 0 && fBufferedSoFar < fBufferSize && !fStream->isAtEnd()) {
+ const size_t buffered = this->bufferAndWriteTo(dst, size);
+
+ // Update the remaining number of bytes needed to read
+ // and the destination buffer.
+ size -= buffered;
+ SkASSERT(size + (fOffset - start) == totalSize);
+ if (dst != nullptr) {
+ dst += buffered;
+ }
+ }
+
+ if (size > 0 && !fStream->isAtEnd()) {
+ SkDEBUGCODE(const size_t bytesReadDirectly =) this->readDirectlyFromStream(dst, size);
+ SkDEBUGCODE(size -= bytesReadDirectly;)
+ SkASSERT(size + (fOffset - start) == totalSize);
+ }
+
+ return fOffset - start;
+}
diff --git a/gfx/skia/skia/src/utils/SkInterpolator.cpp b/gfx/skia/skia/src/utils/SkInterpolator.cpp
new file mode 100644
index 0000000000..6bf2335f03
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkInterpolator.cpp
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkInterpolator.h"
+
+#include "include/core/SkMath.h"
+#include "include/private/SkFixed.h"
+#include "include/private/SkMalloc.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkTSearch.h"
+
+SkInterpolatorBase::SkInterpolatorBase() {
+ fStorage = nullptr;
+ fTimes = nullptr;
+ SkDEBUGCODE(fTimesArray = nullptr;)
+}
+
+SkInterpolatorBase::~SkInterpolatorBase() {
+ if (fStorage) {
+ sk_free(fStorage);
+ }
+}
+
+void SkInterpolatorBase::reset(int elemCount, int frameCount) {
+ fFlags = 0;
+ fElemCount = SkToU8(elemCount);
+ fFrameCount = SkToS16(frameCount);
+ fRepeat = SK_Scalar1;
+ if (fStorage) {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ fTimes = nullptr;
+ SkDEBUGCODE(fTimesArray = nullptr);
+ }
+}
+
+/* Each value[] run is formated as:
+ <time (in msec)>
+ <blend>
+ <data[fElemCount]>
+
+ Totaling fElemCount+2 entries per keyframe
+*/
+
+bool SkInterpolatorBase::getDuration(SkMSec* startTime, SkMSec* endTime) const {
+ if (fFrameCount == 0) {
+ return false;
+ }
+
+ if (startTime) {
+ *startTime = fTimes[0].fTime;
+ }
+ if (endTime) {
+ *endTime = fTimes[fFrameCount - 1].fTime;
+ }
+ return true;
+}
+
+SkScalar SkInterpolatorBase::ComputeRelativeT(SkMSec time, SkMSec prevTime,
+ SkMSec nextTime, const SkScalar blend[4]) {
+ SkASSERT(time > prevTime && time < nextTime);
+
+ SkScalar t = (SkScalar)(time - prevTime) / (SkScalar)(nextTime - prevTime);
+ return blend ?
+ SkUnitCubicInterp(t, blend[0], blend[1], blend[2], blend[3]) : t;
+}
+
+SkInterpolatorBase::Result SkInterpolatorBase::timeToT(SkMSec time, SkScalar* T,
+ int* indexPtr, bool* exactPtr) const {
+ SkASSERT(fFrameCount > 0);
+ Result result = kNormal_Result;
+ if (fRepeat != SK_Scalar1) {
+ SkMSec startTime = 0, endTime = 0; // initialize to avoid warning
+ this->getDuration(&startTime, &endTime);
+ SkMSec totalTime = endTime - startTime;
+ SkMSec offsetTime = time - startTime;
+ endTime = SkScalarFloorToInt(fRepeat * totalTime);
+ if (offsetTime >= endTime) {
+ SkScalar fraction = SkScalarFraction(fRepeat);
+ offsetTime = fraction == 0 && fRepeat > 0 ? totalTime :
+ (SkMSec) SkScalarFloorToInt(fraction * totalTime);
+ result = kFreezeEnd_Result;
+ } else {
+ int mirror = fFlags & kMirror;
+ offsetTime = offsetTime % (totalTime << mirror);
+ if (offsetTime > totalTime) { // can only be true if fMirror is true
+ offsetTime = (totalTime << 1) - offsetTime;
+ }
+ }
+ time = offsetTime + startTime;
+ }
+
+ int index = SkTSearch<SkMSec>(&fTimes[0].fTime, fFrameCount, time,
+ sizeof(SkTimeCode));
+
+ bool exact = true;
+
+ if (index < 0) {
+ index = ~index;
+ if (index == 0) {
+ result = kFreezeStart_Result;
+ } else if (index == fFrameCount) {
+ if (fFlags & kReset) {
+ index = 0;
+ } else {
+ index -= 1;
+ }
+ result = kFreezeEnd_Result;
+ } else {
+ exact = false;
+ }
+ }
+ SkASSERT(index < fFrameCount);
+ const SkTimeCode* nextTime = &fTimes[index];
+ SkMSec nextT = nextTime[0].fTime;
+ if (exact) {
+ *T = 0;
+ } else {
+ SkMSec prevT = nextTime[-1].fTime;
+ *T = ComputeRelativeT(time, prevT, nextT, nextTime[-1].fBlend);
+ }
+ *indexPtr = index;
+ *exactPtr = exact;
+ return result;
+}
+
+
+SkInterpolator::SkInterpolator() {
+ INHERITED::reset(0, 0);
+ fValues = nullptr;
+ SkDEBUGCODE(fScalarsArray = nullptr;)
+}
+
+SkInterpolator::SkInterpolator(int elemCount, int frameCount) {
+ SkASSERT(elemCount > 0);
+ this->reset(elemCount, frameCount);
+}
+
+void SkInterpolator::reset(int elemCount, int frameCount) {
+ INHERITED::reset(elemCount, frameCount);
+ fStorage = sk_malloc_throw((sizeof(SkScalar) * elemCount +
+ sizeof(SkTimeCode)) * frameCount);
+ fTimes = (SkTimeCode*) fStorage;
+ fValues = (SkScalar*) ((char*) fStorage + sizeof(SkTimeCode) * frameCount);
+#ifdef SK_DEBUG
+ fTimesArray = (SkTimeCode(*)[10]) fTimes;
+ fScalarsArray = (SkScalar(*)[10]) fValues;
+#endif
+}
+
+#define SK_Fixed1Third (SK_Fixed1/3)
+#define SK_Fixed2Third (SK_Fixed1*2/3)
+
+static const SkScalar gIdentityBlend[4] = {
+ 0.33333333f, 0.33333333f, 0.66666667f, 0.66666667f
+};
+
+bool SkInterpolator::setKeyFrame(int index, SkMSec time,
+ const SkScalar values[], const SkScalar blend[4]) {
+ SkASSERT(values != nullptr);
+
+ if (blend == nullptr) {
+ blend = gIdentityBlend;
+ }
+
+ bool success = ~index == SkTSearch<SkMSec>(&fTimes->fTime, index, time,
+ sizeof(SkTimeCode));
+ SkASSERT(success);
+ if (success) {
+ SkTimeCode* timeCode = &fTimes[index];
+ timeCode->fTime = time;
+ memcpy(timeCode->fBlend, blend, sizeof(timeCode->fBlend));
+ SkScalar* dst = &fValues[fElemCount * index];
+ memcpy(dst, values, fElemCount * sizeof(SkScalar));
+ }
+ return success;
+}
+
+SkInterpolator::Result SkInterpolator::timeToValues(SkMSec time,
+ SkScalar values[]) const {
+ SkScalar T;
+ int index;
+ bool exact;
+ Result result = timeToT(time, &T, &index, &exact);
+ if (values) {
+ const SkScalar* nextSrc = &fValues[index * fElemCount];
+
+ if (exact) {
+ memcpy(values, nextSrc, fElemCount * sizeof(SkScalar));
+ } else {
+ SkASSERT(index > 0);
+
+ const SkScalar* prevSrc = nextSrc - fElemCount;
+
+ for (int i = fElemCount - 1; i >= 0; --i) {
+ values[i] = SkScalarInterp(prevSrc[i], nextSrc[i], T);
+ }
+ }
+ }
+ return result;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int Dot14;
+#define Dot14_ONE (1 << 14)
+#define Dot14_HALF (1 << 13)
+
+#define Dot14ToFloat(x) ((x) / 16384.f)
+
+static inline Dot14 Dot14Mul(Dot14 a, Dot14 b) {
+ return (a * b + Dot14_HALF) >> 14;
+}
+
+static inline Dot14 eval_cubic(Dot14 t, Dot14 A, Dot14 B, Dot14 C) {
+ return Dot14Mul(Dot14Mul(Dot14Mul(C, t) + B, t) + A, t);
+}
+
+static inline Dot14 pin_and_convert(SkScalar x) {
+ if (x <= 0) {
+ return 0;
+ }
+ if (x >= SK_Scalar1) {
+ return Dot14_ONE;
+ }
+ return SkScalarToFixed(x) >> 2;
+}
+
+SkScalar SkUnitCubicInterp(SkScalar value, SkScalar bx, SkScalar by,
+ SkScalar cx, SkScalar cy) {
+ // pin to the unit-square, and convert to 2.14
+ Dot14 x = pin_and_convert(value);
+
+ if (x == 0) return 0;
+ if (x == Dot14_ONE) return SK_Scalar1;
+
+ Dot14 b = pin_and_convert(bx);
+ Dot14 c = pin_and_convert(cx);
+
+ // Now compute our coefficients from the control points
+ // t -> 3b
+ // t^2 -> 3c - 6b
+ // t^3 -> 3b - 3c + 1
+ Dot14 A = 3*b;
+ Dot14 B = 3*(c - 2*b);
+ Dot14 C = 3*(b - c) + Dot14_ONE;
+
+ // Now search for a t value given x
+ Dot14 t = Dot14_HALF;
+ Dot14 dt = Dot14_HALF;
+ for (int i = 0; i < 13; i++) {
+ dt >>= 1;
+ Dot14 guess = eval_cubic(t, A, B, C);
+ if (x < guess) {
+ t -= dt;
+ } else {
+ t += dt;
+ }
+ }
+
+ // Now we have t, so compute the coeff for Y and evaluate
+ b = pin_and_convert(by);
+ c = pin_and_convert(cy);
+ A = 3*b;
+ B = 3*(c - 2*b);
+ C = 3*(b - c) + Dot14_ONE;
+ return SkFixedToScalar(eval_cubic(t, A, B, C) << 2);
+}
diff --git a/gfx/skia/skia/src/utils/SkJSON.cpp b/gfx/skia/skia/src/utils/SkJSON.cpp
new file mode 100644
index 0000000000..b758f1fa91
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSON.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkJSON.h"
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkMalloc.h"
+#include "include/utils/SkParse.h"
+#include "src/utils/SkUTF.h"
+
+#include <cmath>
+#include <tuple>
+#include <vector>
+
+namespace skjson {
+
+// #define SK_JSON_REPORT_ERRORS
+
+static_assert( sizeof(Value) == 8, "");
+static_assert(alignof(Value) == 8, "");
+
+static constexpr size_t kRecAlign = alignof(Value);
+
+void Value::init_tagged(Tag t) {
+ memset(fData8, 0, sizeof(fData8));
+ fData8[Value::kTagOffset] = SkTo<uint8_t>(t);
+ SkASSERT(this->getTag() == t);
+}
+
+// Pointer values store a type (in the upper kTagBits bits) and a pointer.
+void Value::init_tagged_pointer(Tag t, void* p) {
+ *this->cast<uintptr_t>() = reinterpret_cast<uintptr_t>(p);
+
+ if (sizeof(Value) == sizeof(uintptr_t)) {
+ // For 64-bit, we rely on the pointer upper bits being unused/zero.
+ SkASSERT(!(fData8[kTagOffset] & kTagMask));
+ fData8[kTagOffset] |= SkTo<uint8_t>(t);
+ } else {
+ // For 32-bit, we need to zero-initialize the upper 32 bits
+ SkASSERT(sizeof(Value) == sizeof(uintptr_t) * 2);
+ this->cast<uintptr_t>()[kTagOffset >> 2] = 0;
+ fData8[kTagOffset] = SkTo<uint8_t>(t);
+ }
+
+ SkASSERT(this->getTag() == t);
+ SkASSERT(this->ptr<void>() == p);
+}
+
+NullValue::NullValue() {
+ this->init_tagged(Tag::kNull);
+ SkASSERT(this->getTag() == Tag::kNull);
+}
+
+BoolValue::BoolValue(bool b) {
+ this->init_tagged(Tag::kBool);
+ *this->cast<bool>() = b;
+ SkASSERT(this->getTag() == Tag::kBool);
+}
+
+NumberValue::NumberValue(int32_t i) {
+ this->init_tagged(Tag::kInt);
+ *this->cast<int32_t>() = i;
+ SkASSERT(this->getTag() == Tag::kInt);
+}
+
+NumberValue::NumberValue(float f) {
+ this->init_tagged(Tag::kFloat);
+ *this->cast<float>() = f;
+ SkASSERT(this->getTag() == Tag::kFloat);
+}
+
+// Vector recs point to externally allocated slabs with the following layout:
+//
+// [size_t n] [REC_0] ... [REC_n-1] [optional extra trailing storage]
+//
+// Long strings use extra_alloc_size == 1 to store the \0 terminator.
+//
+template <typename T, size_t extra_alloc_size = 0>
+static void* MakeVector(const void* src, size_t size, SkArenaAlloc& alloc) {
+ // The Ts are already in memory, so their size should be safe.
+ const auto total_size = sizeof(size_t) + size * sizeof(T) + extra_alloc_size;
+ auto* size_ptr = reinterpret_cast<size_t*>(alloc.makeBytesAlignedTo(total_size, kRecAlign));
+
+ *size_ptr = size;
+ sk_careful_memcpy(size_ptr + 1, src, size * sizeof(T));
+
+ return size_ptr;
+}
+
+ArrayValue::ArrayValue(const Value* src, size_t size, SkArenaAlloc& alloc) {
+ this->init_tagged_pointer(Tag::kArray, MakeVector<Value>(src, size, alloc));
+ SkASSERT(this->getTag() == Tag::kArray);
+}
+
+// Strings have two flavors:
+//
+// -- short strings (len <= 7) -> these are stored inline, in the record
+// (one byte reserved for null terminator/type):
+//
+// [str] [\0]|[max_len - actual_len]
+//
+// Storing [max_len - actual_len] allows the 'len' field to double-up as a
+// null terminator when size == max_len (this works 'cause kShortString == 0).
+//
+// -- long strings (len > 7) -> these are externally allocated vectors (VectorRec<char>).
+//
+// The string data plus a null-char terminator are copied over.
+//
+namespace {
+
+// An internal string builder with a fast 8 byte short string load path
+// (for the common case where the string is not at the end of the stream).
+class FastString final : public Value {
+public:
+ FastString(const char* src, size_t size, const char* eos, SkArenaAlloc& alloc) {
+ SkASSERT(src <= eos);
+
+ if (size > kMaxInlineStringSize) {
+ this->initLongString(src, size, alloc);
+ SkASSERT(this->getTag() == Tag::kString);
+ return;
+ }
+
+ static_assert(static_cast<uint8_t>(Tag::kShortString) == 0, "please don't break this");
+ static_assert(sizeof(Value) == 8, "");
+
+ // TODO: LIKELY
+ if (src + 7 <= eos) {
+ this->initFastShortString(src, size);
+ } else {
+ this->initShortString(src, size);
+ }
+
+ SkASSERT(this->getTag() == Tag::kShortString);
+ }
+
+private:
+ static constexpr size_t kMaxInlineStringSize = sizeof(Value) - 1;
+
+ void initLongString(const char* src, size_t size, SkArenaAlloc& alloc) {
+ SkASSERT(size > kMaxInlineStringSize);
+
+ this->init_tagged_pointer(Tag::kString, MakeVector<char, 1>(src, size, alloc));
+
+ auto* data = this->cast<VectorValue<char, Value::Type::kString>>()->begin();
+ const_cast<char*>(data)[size] = '\0';
+ }
+
+ void initShortString(const char* src, size_t size) {
+ SkASSERT(size <= kMaxInlineStringSize);
+
+ this->init_tagged(Tag::kShortString);
+ sk_careful_memcpy(this->cast<char>(), src, size);
+ // Null terminator provided by init_tagged() above (fData8 is zero-initialized).
+ }
+
+ void initFastShortString(const char* src, size_t size) {
+ SkASSERT(size <= kMaxInlineStringSize);
+
+ // Load 8 chars and mask out the tag and \0 terminator.
+ uint64_t* s64 = this->cast<uint64_t>();
+ memcpy(s64, src, 8);
+
+#if defined(SK_CPU_LENDIAN)
+ *s64 &= 0x00ffffffffffffffULL >> ((kMaxInlineStringSize - size) * 8);
+#else
+ static_assert(false, "Big-endian builds are not supported at this time.");
+#endif
+ }
+};
+
+} // namespace
+
+StringValue::StringValue(const char* src, size_t size, SkArenaAlloc& alloc) {
+ new (this) FastString(src, size, src, alloc);
+}
+
+ObjectValue::ObjectValue(const Member* src, size_t size, SkArenaAlloc& alloc) {
+ this->init_tagged_pointer(Tag::kObject, MakeVector<Member>(src, size, alloc));
+ SkASSERT(this->getTag() == Tag::kObject);
+}
+
+
+// Boring public Value glue.
+
+static int inline_strcmp(const char a[], const char b[]) {
+ for (;;) {
+ char c = *a++;
+ if (c == 0) {
+ break;
+ }
+ if (c != *b++) {
+ return 1;
+ }
+ }
+ return *b != 0;
+}
+
+const Value& ObjectValue::operator[](const char* key) const {
+ // Reverse search for duplicates resolution (policy: return last).
+ const auto* begin = this->begin();
+ const auto* member = this->end();
+
+ while (member > begin) {
+ --member;
+ if (0 == inline_strcmp(key, member->fKey.as<StringValue>().begin())) {
+ return member->fValue;
+ }
+ }
+
+ static const Value g_null = NullValue();
+ return g_null;
+}
+
+namespace {
+
+// Lexer/parser inspired by rapidjson [1], sajson [2] and pjson [3].
+//
+// [1] https://github.com/Tencent/rapidjson/
+// [2] https://github.com/chadaustin/sajson
+// [3] https://pastebin.com/hnhSTL3h
+
+
+// bit 0 (0x01) - plain ASCII string character
+// bit 1 (0x02) - whitespace
+// bit 2 (0x04) - string terminator (" \\ \0 [control chars] **AND } ]** <- see matchString notes)
+// bit 3 (0x08) - 0-9
+// bit 4 (0x10) - 0-9 e E .
+// bit 5 (0x20) - scope terminator (} ])
+static constexpr uint8_t g_token_flags[256] = {
+ // 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 4, 6, 4, 4, // 0
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 1
+ 3, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0x11,1, // 2
+ 0x19,0x19,0x19,0x19,0x19,0x19,0x19,0x19, 0x19,0x19, 1, 1, 1, 1, 1, 1, // 3
+ 1, 1, 1, 1, 1, 0x11,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4,0x25, 1, 1, // 5
+ 1, 1, 1, 1, 1, 0x11,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,0x25, 1, 1, // 7
+
+ // 128-255
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0
+};
+
+static inline bool is_ws(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x02; }
+static inline bool is_eostring(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x04; }
+static inline bool is_digit(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x08; }
+static inline bool is_numeric(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x10; }
+static inline bool is_eoscope(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x20; }
+
+static inline const char* skip_ws(const char* p) {
+ while (is_ws(*p)) ++p;
+ return p;
+}
+
+static inline float pow10(int32_t exp) {
+ static constexpr float g_pow10_table[63] =
+ {
+ 1.e-031f, 1.e-030f, 1.e-029f, 1.e-028f, 1.e-027f, 1.e-026f, 1.e-025f, 1.e-024f,
+ 1.e-023f, 1.e-022f, 1.e-021f, 1.e-020f, 1.e-019f, 1.e-018f, 1.e-017f, 1.e-016f,
+ 1.e-015f, 1.e-014f, 1.e-013f, 1.e-012f, 1.e-011f, 1.e-010f, 1.e-009f, 1.e-008f,
+ 1.e-007f, 1.e-006f, 1.e-005f, 1.e-004f, 1.e-003f, 1.e-002f, 1.e-001f, 1.e+000f,
+ 1.e+001f, 1.e+002f, 1.e+003f, 1.e+004f, 1.e+005f, 1.e+006f, 1.e+007f, 1.e+008f,
+ 1.e+009f, 1.e+010f, 1.e+011f, 1.e+012f, 1.e+013f, 1.e+014f, 1.e+015f, 1.e+016f,
+ 1.e+017f, 1.e+018f, 1.e+019f, 1.e+020f, 1.e+021f, 1.e+022f, 1.e+023f, 1.e+024f,
+ 1.e+025f, 1.e+026f, 1.e+027f, 1.e+028f, 1.e+029f, 1.e+030f, 1.e+031f
+ };
+
+ static constexpr int32_t k_exp_offset = SK_ARRAY_COUNT(g_pow10_table) / 2;
+
+ // We only support negative exponents for now.
+ SkASSERT(exp <= 0);
+
+ return (exp >= -k_exp_offset) ? g_pow10_table[exp + k_exp_offset]
+ : std::pow(10.0f, static_cast<float>(exp));
+}
+
+class DOMParser {
+public:
+ explicit DOMParser(SkArenaAlloc& alloc)
+ : fAlloc(alloc) {
+ fValueStack.reserve(kValueStackReserve);
+ fUnescapeBuffer.reserve(kUnescapeBufferReserve);
+ }
+
+ const Value parse(const char* p, size_t size) {
+ if (!size) {
+ return this->error(NullValue(), p, "invalid empty input");
+ }
+
+ const char* p_stop = p + size - 1;
+
+ // We're only checking for end-of-stream on object/array close('}',']'),
+ // so we must trim any whitespace from the buffer tail.
+ while (p_stop > p && is_ws(*p_stop)) --p_stop;
+
+ SkASSERT(p_stop >= p && p_stop < p + size);
+ if (!is_eoscope(*p_stop)) {
+ return this->error(NullValue(), p_stop, "invalid top-level value");
+ }
+
+ p = skip_ws(p);
+
+ switch (*p) {
+ case '{':
+ goto match_object;
+ case '[':
+ goto match_array;
+ default:
+ return this->error(NullValue(), p, "invalid top-level value");
+ }
+
+ match_object:
+ SkASSERT(*p == '{');
+ p = skip_ws(p + 1);
+
+ this->pushObjectScope();
+
+ if (*p == '}') goto pop_object;
+
+ // goto match_object_key;
+ match_object_key:
+ p = skip_ws(p);
+ if (*p != '"') return this->error(NullValue(), p, "expected object key");
+
+ p = this->matchString(p, p_stop, [this](const char* key, size_t size, const char* eos) {
+ this->pushObjectKey(key, size, eos);
+ });
+ if (!p) return NullValue();
+
+ p = skip_ws(p);
+ if (*p != ':') return this->error(NullValue(), p, "expected ':' separator");
+
+ ++p;
+
+ // goto match_value;
+ match_value:
+ p = skip_ws(p);
+
+ switch (*p) {
+ case '\0':
+ return this->error(NullValue(), p, "unexpected input end");
+ case '"':
+ p = this->matchString(p, p_stop, [this](const char* str, size_t size, const char* eos) {
+ this->pushString(str, size, eos);
+ });
+ break;
+ case '[':
+ goto match_array;
+ case 'f':
+ p = this->matchFalse(p);
+ break;
+ case 'n':
+ p = this->matchNull(p);
+ break;
+ case 't':
+ p = this->matchTrue(p);
+ break;
+ case '{':
+ goto match_object;
+ default:
+ p = this->matchNumber(p);
+ break;
+ }
+
+ if (!p) return NullValue();
+
+ // goto match_post_value;
+ match_post_value:
+ SkASSERT(!this->inTopLevelScope());
+
+ p = skip_ws(p);
+ switch (*p) {
+ case ',':
+ ++p;
+ if (this->inObjectScope()) {
+ goto match_object_key;
+ } else {
+ SkASSERT(this->inArrayScope());
+ goto match_value;
+ }
+ case ']':
+ goto pop_array;
+ case '}':
+ goto pop_object;
+ default:
+ return this->error(NullValue(), p - 1, "unexpected value-trailing token");
+ }
+
+ // unreachable
+ SkASSERT(false);
+
+ pop_object:
+ SkASSERT(*p == '}');
+
+ if (this->inArrayScope()) {
+ return this->error(NullValue(), p, "unexpected object terminator");
+ }
+
+ this->popObjectScope();
+
+ // goto pop_common
+ pop_common:
+ SkASSERT(is_eoscope(*p));
+
+ if (this->inTopLevelScope()) {
+ SkASSERT(fValueStack.size() == 1);
+
+ // Success condition: parsed the top level element and reached the stop token.
+ return p == p_stop
+ ? fValueStack.front()
+ : this->error(NullValue(), p + 1, "trailing root garbage");
+ }
+
+ if (p == p_stop) {
+ return this->error(NullValue(), p, "unexpected end-of-input");
+ }
+
+ ++p;
+
+ goto match_post_value;
+
+ match_array:
+ SkASSERT(*p == '[');
+ p = skip_ws(p + 1);
+
+ this->pushArrayScope();
+
+ if (*p != ']') goto match_value;
+
+ // goto pop_array;
+ pop_array:
+ SkASSERT(*p == ']');
+
+ if (this->inObjectScope()) {
+ return this->error(NullValue(), p, "unexpected array terminator");
+ }
+
+ this->popArrayScope();
+
+ goto pop_common;
+
+ SkASSERT(false);
+ return NullValue();
+ }
+
+ std::tuple<const char*, const SkString> getError() const {
+ return std::make_tuple(fErrorToken, fErrorMessage);
+ }
+
+private:
+ SkArenaAlloc& fAlloc;
+
+ // Pending values stack.
+ static constexpr size_t kValueStackReserve = 256;
+ std::vector<Value> fValueStack;
+
+ // String unescape buffer.
+ static constexpr size_t kUnescapeBufferReserve = 512;
+ std::vector<char> fUnescapeBuffer;
+
+ // Tracks the current object/array scope, as an index into fStack:
+ //
+ // - for objects: fScopeIndex = (index of first value in scope)
+ // - for arrays : fScopeIndex = -(index of first value in scope)
+ //
+ // fScopeIndex == 0 IFF we are at the top level (no current/active scope).
+ intptr_t fScopeIndex = 0;
+
+ // Error reporting.
+ const char* fErrorToken = nullptr;
+ SkString fErrorMessage;
+
+ bool inTopLevelScope() const { return fScopeIndex == 0; }
+ bool inObjectScope() const { return fScopeIndex > 0; }
+ bool inArrayScope() const { return fScopeIndex < 0; }
+
+ // Helper for masquerading raw primitive types as Values (bypassing tagging, etc).
+ template <typename T>
+ class RawValue final : public Value {
+ public:
+ explicit RawValue(T v) {
+ static_assert(sizeof(T) <= sizeof(Value), "");
+ *this->cast<T>() = v;
+ }
+
+ T operator *() const { return *this->cast<T>(); }
+ };
+
+ template <typename VectorT>
+ void popScopeAsVec(size_t scope_start) {
+ SkASSERT(scope_start > 0);
+ SkASSERT(scope_start <= fValueStack.size());
+
+ using T = typename VectorT::ValueT;
+ static_assert( sizeof(T) >= sizeof(Value), "");
+ static_assert( sizeof(T) % sizeof(Value) == 0, "");
+ static_assert(alignof(T) == alignof(Value), "");
+
+ const auto scope_count = fValueStack.size() - scope_start,
+ count = scope_count / (sizeof(T) / sizeof(Value));
+ SkASSERT(scope_count % (sizeof(T) / sizeof(Value)) == 0);
+
+ const auto* begin = reinterpret_cast<const T*>(fValueStack.data() + scope_start);
+
+ // Restore the previous scope index from saved placeholder value,
+ // and instantiate as a vector of values in scope.
+ auto& placeholder = fValueStack[scope_start - 1];
+ fScopeIndex = *static_cast<RawValue<intptr_t>&>(placeholder);
+ placeholder = VectorT(begin, count, fAlloc);
+
+ // Drop the (consumed) values in scope.
+ fValueStack.resize(scope_start);
+ }
+
+ void pushObjectScope() {
+ // Save a scope index now, and then later we'll overwrite this value as the Object itself.
+ fValueStack.push_back(RawValue<intptr_t>(fScopeIndex));
+
+ // New object scope.
+ fScopeIndex = SkTo<intptr_t>(fValueStack.size());
+ }
+
+ void popObjectScope() {
+ SkASSERT(this->inObjectScope());
+ this->popScopeAsVec<ObjectValue>(SkTo<size_t>(fScopeIndex));
+
+ SkDEBUGCODE(
+ const auto& obj = fValueStack.back().as<ObjectValue>();
+ SkASSERT(obj.is<ObjectValue>());
+ for (const auto& member : obj) {
+ SkASSERT(member.fKey.is<StringValue>());
+ }
+ )
+ }
+
+ void pushArrayScope() {
+ // Save a scope index now, and then later we'll overwrite this value as the Array itself.
+ fValueStack.push_back(RawValue<intptr_t>(fScopeIndex));
+
+ // New array scope.
+ fScopeIndex = -SkTo<intptr_t>(fValueStack.size());
+ }
+
+ void popArrayScope() {
+ SkASSERT(this->inArrayScope());
+ this->popScopeAsVec<ArrayValue>(SkTo<size_t>(-fScopeIndex));
+
+ SkDEBUGCODE(
+ const auto& arr = fValueStack.back().as<ArrayValue>();
+ SkASSERT(arr.is<ArrayValue>());
+ )
+ }
+
+ void pushObjectKey(const char* key, size_t size, const char* eos) {
+ SkASSERT(this->inObjectScope());
+ SkASSERT(fValueStack.size() >= SkTo<size_t>(fScopeIndex));
+ SkASSERT(!((fValueStack.size() - SkTo<size_t>(fScopeIndex)) & 1));
+ this->pushString(key, size, eos);
+ }
+
+ void pushTrue() {
+ fValueStack.push_back(BoolValue(true));
+ }
+
+ void pushFalse() {
+ fValueStack.push_back(BoolValue(false));
+ }
+
+ void pushNull() {
+ fValueStack.push_back(NullValue());
+ }
+
+ void pushString(const char* s, size_t size, const char* eos) {
+ fValueStack.push_back(FastString(s, size, eos, fAlloc));
+ }
+
+ void pushInt32(int32_t i) {
+ fValueStack.push_back(NumberValue(i));
+ }
+
+ void pushFloat(float f) {
+ fValueStack.push_back(NumberValue(f));
+ }
+
+ template <typename T>
+ T error(T&& ret_val, const char* p, const char* msg) {
+#if defined(SK_JSON_REPORT_ERRORS)
+ fErrorToken = p;
+ fErrorMessage.set(msg);
+#endif
+ return ret_val;
+ }
+
+ const char* matchTrue(const char* p) {
+ SkASSERT(p[0] == 't');
+
+ if (p[1] == 'r' && p[2] == 'u' && p[3] == 'e') {
+ this->pushTrue();
+ return p + 4;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const char* matchFalse(const char* p) {
+ SkASSERT(p[0] == 'f');
+
+ if (p[1] == 'a' && p[2] == 'l' && p[3] == 's' && p[4] == 'e') {
+ this->pushFalse();
+ return p + 5;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const char* matchNull(const char* p) {
+ SkASSERT(p[0] == 'n');
+
+ if (p[1] == 'u' && p[2] == 'l' && p[3] == 'l') {
+ this->pushNull();
+ return p + 4;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const std::vector<char>* unescapeString(const char* begin, const char* end) {
+ fUnescapeBuffer.clear();
+
+ for (const auto* p = begin; p != end; ++p) {
+ if (*p != '\\') {
+ fUnescapeBuffer.push_back(*p);
+ continue;
+ }
+
+ if (++p == end) {
+ return nullptr;
+ }
+
+ switch (*p) {
+ case '"': fUnescapeBuffer.push_back( '"'); break;
+ case '\\': fUnescapeBuffer.push_back('\\'); break;
+ case '/': fUnescapeBuffer.push_back( '/'); break;
+ case 'b': fUnescapeBuffer.push_back('\b'); break;
+ case 'f': fUnescapeBuffer.push_back('\f'); break;
+ case 'n': fUnescapeBuffer.push_back('\n'); break;
+ case 'r': fUnescapeBuffer.push_back('\r'); break;
+ case 't': fUnescapeBuffer.push_back('\t'); break;
+ case 'u': {
+ if (p + 4 >= end) {
+ return nullptr;
+ }
+
+ uint32_t hexed;
+ const char hex_str[] = {p[1], p[2], p[3], p[4], '\0'};
+ const auto* eos = SkParse::FindHex(hex_str, &hexed);
+ if (!eos || *eos) {
+ return nullptr;
+ }
+
+ char utf8[SkUTF::kMaxBytesInUTF8Sequence];
+ const auto utf8_len = SkUTF::ToUTF8(SkTo<SkUnichar>(hexed), utf8);
+ fUnescapeBuffer.insert(fUnescapeBuffer.end(), utf8, utf8 + utf8_len);
+ p += 4;
+ } break;
+ default: return nullptr;
+ }
+ }
+
+ return &fUnescapeBuffer;
+ }
+
+ template <typename MatchFunc>
+ const char* matchString(const char* p, const char* p_stop, MatchFunc&& func) {
+ SkASSERT(*p == '"');
+ const auto* s_begin = p + 1;
+ bool requires_unescape = false;
+
+ do {
+ // Consume string chars.
+ // This is the fast path, and hopefully we only hit it once then quick-exit below.
+ for (p = p + 1; !is_eostring(*p); ++p);
+
+ if (*p == '"') {
+ // Valid string found.
+ if (!requires_unescape) {
+ func(s_begin, p - s_begin, p_stop);
+ } else {
+ // Slow unescape. We could avoid this extra copy with some effort,
+ // but in practice escaped strings should be rare.
+ const auto* buf = this->unescapeString(s_begin, p);
+ if (!buf) {
+ break;
+ }
+
+ SkASSERT(!buf->empty());
+ func(buf->data(), buf->size(), buf->data() + buf->size() - 1);
+ }
+ return p + 1;
+ }
+
+ if (*p == '\\') {
+ requires_unescape = true;
+ ++p;
+ continue;
+ }
+
+ // End-of-scope chars are special: we use them to tag the end of the input.
+ // Thus they cannot be consumed indiscriminately -- we need to check if we hit the
+ // end of the input. To that effect, we treat them as string terminators above,
+ // then we catch them here.
+ if (is_eoscope(*p)) {
+ continue;
+ }
+
+ // Invalid/unexpected char.
+ break;
+ } while (p != p_stop);
+
+ // Premature end-of-input, or illegal string char.
+ return this->error(nullptr, s_begin - 1, "invalid string");
+ }
+
+ const char* matchFastFloatDecimalPart(const char* p, int sign, float f, int exp) {
+ SkASSERT(exp <= 0);
+
+ for (;;) {
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0'); --exp;
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0'); --exp;
+ }
+
+ const auto decimal_scale = pow10(exp);
+ if (is_numeric(*p) || !decimal_scale) {
+ SkASSERT((*p == '.' || *p == 'e' || *p == 'E') || !decimal_scale);
+ // Malformed input, or an (unsupported) exponent, or a collapsed decimal factor.
+ return nullptr;
+ }
+
+ this->pushFloat(sign * f * decimal_scale);
+
+ return p;
+ }
+
+ const char* matchFastFloatPart(const char* p, int sign, float f) {
+ for (;;) {
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0');
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0');
+ }
+
+ if (!is_numeric(*p)) {
+ // Matched (integral) float.
+ this->pushFloat(sign * f);
+ return p;
+ }
+
+ return (*p == '.') ? this->matchFastFloatDecimalPart(p + 1, sign, f, 0)
+ : nullptr;
+ }
+
+ const char* matchFast32OrFloat(const char* p) {
+ int sign = 1;
+ if (*p == '-') {
+ sign = -1;
+ ++p;
+ }
+
+ const auto* digits_start = p;
+
+ int32_t n32 = 0;
+
+ // This is the largest absolute int32 value we can handle before
+ // risking overflow *on the next digit* (214748363).
+ static constexpr int32_t kMaxInt32 = (std::numeric_limits<int32_t>::max() - 9) / 10;
+
+ if (is_digit(*p)) {
+ n32 = (*p++ - '0');
+ for (;;) {
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0');
+ }
+ }
+
+ if (!is_numeric(*p)) {
+ // Did we actually match any digits?
+ if (p > digits_start) {
+ this->pushInt32(sign * n32);
+ return p;
+ }
+ return nullptr;
+ }
+
+ if (*p == '.') {
+ const auto* decimals_start = ++p;
+
+ int exp = 0;
+
+ for (;;) {
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0'); --exp;
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0'); --exp;
+ }
+
+ if (!is_numeric(*p)) {
+ // Did we actually match any digits?
+ if (p > decimals_start) {
+ this->pushFloat(sign * n32 * pow10(exp));
+ return p;
+ }
+ return nullptr;
+ }
+
+ if (n32 > kMaxInt32) {
+ // we ran out on n32 bits
+ return this->matchFastFloatDecimalPart(p, sign, n32, exp);
+ }
+ }
+
+ return this->matchFastFloatPart(p, sign, n32);
+ }
+
+ const char* matchNumber(const char* p) {
+ if (const auto* fast = this->matchFast32OrFloat(p)) return fast;
+
+ // slow fallback
+ char* matched;
+ float f = strtof(p, &matched);
+ if (matched > p) {
+ this->pushFloat(f);
+ return matched;
+ }
+ return this->error(nullptr, p, "invalid numeric token");
+ }
+};
+
+void Write(const Value& v, SkWStream* stream) {
+ switch (v.getType()) {
+ case Value::Type::kNull:
+ stream->writeText("null");
+ break;
+ case Value::Type::kBool:
+ stream->writeText(*v.as<BoolValue>() ? "true" : "false");
+ break;
+ case Value::Type::kNumber:
+ stream->writeScalarAsText(*v.as<NumberValue>());
+ break;
+ case Value::Type::kString:
+ stream->writeText("\"");
+ stream->writeText(v.as<StringValue>().begin());
+ stream->writeText("\"");
+ break;
+ case Value::Type::kArray: {
+ const auto& array = v.as<ArrayValue>();
+ stream->writeText("[");
+ bool first_value = true;
+ for (const auto& v : array) {
+ if (!first_value) stream->writeText(",");
+ Write(v, stream);
+ first_value = false;
+ }
+ stream->writeText("]");
+ break;
+ }
+ case Value::Type::kObject:
+ const auto& object = v.as<ObjectValue>();
+ stream->writeText("{");
+ bool first_member = true;
+ for (const auto& member : object) {
+ SkASSERT(member.fKey.getType() == Value::Type::kString);
+ if (!first_member) stream->writeText(",");
+ Write(member.fKey, stream);
+ stream->writeText(":");
+ Write(member.fValue, stream);
+ first_member = false;
+ }
+ stream->writeText("}");
+ break;
+ }
+}
+
+} // namespace
+
+SkString Value::toString() const {
+ SkDynamicMemoryWStream wstream;
+ Write(*this, &wstream);
+ const auto data = wstream.detachAsData();
+ // TODO: is there a better way to pass data around without copying?
+ return SkString(static_cast<const char*>(data->data()), data->size());
+}
+
+static constexpr size_t kMinChunkSize = 4096;
+
+DOM::DOM(const char* data, size_t size)
+ : fAlloc(kMinChunkSize) {
+ DOMParser parser(fAlloc);
+
+ fRoot = parser.parse(data, size);
+}
+
+void DOM::write(SkWStream* stream) const {
+ Write(fRoot, stream);
+}
+
+} // namespace skjson
diff --git a/gfx/skia/skia/src/utils/SkJSON.h b/gfx/skia/skia/src/utils/SkJSON.h
new file mode 100644
index 0000000000..931b30f0a4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSON.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJSON_DEFINED
+#define SkJSON_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+
+#include <cstring>
+
+class SkString;
+class SkWStream;
+
+namespace skjson {
+
+/**
+ * A fast and likely non-conforming JSON parser.
+ *
+ * Some known limitations/compromises:
+ *
+ * -- single-precision FP numbers
+ *
+ * -- missing string unescaping (no current users, could be easily added)
+ *
+ *
+ * Values are opaque, fixed-size (64 bits), immutable records.
+ *
+ * They can be converted to facade types for type-specific functionality.
+ *
+ * E.g.:
+ *
+ * if (v.is<ArrayValue>()) {
+ * for (const auto& item : v.as<ArrayValue>()) {
+ * if (const NumberValue* n = item) {
+ * printf("Found number: %f", **n);
+ * }
+ * }
+ * }
+ *
+ * if (v.is<ObjectValue>()) {
+ * const StringValue* id = v.as<ObjectValue>()["id"];
+ * if (id) {
+ * printf("Found object ID: %s", id->begin());
+ * } else {
+ * printf("Missing object ID");
+ * }
+ * }
+ */
+class alignas(8) Value {
+public:
+ enum class Type {
+ kNull,
+ kBool,
+ kNumber,
+ kString,
+ kArray,
+ kObject,
+ };
+
+ /**
+ * @return The type of this value.
+ */
+ Type getType() const;
+
+ /**
+ * @return True if the record matches the facade type T.
+ */
+ template <typename T>
+ bool is() const { return this->getType() == T::kType; }
+
+ /**
+ * Unguarded conversion to facade types.
+ *
+ * @return The record cast as facade type T&.
+ */
+ template <typename T>
+ const T& as() const {
+ SkASSERT(this->is<T>());
+ return *reinterpret_cast<const T*>(this);
+ }
+
+ /**
+ * Guarded conversion to facade types.
+ *
+ * @return The record cast as facade type T*.
+ */
+ template <typename T>
+ operator const T*() const {
+ return this->is<T>() ? &this->as<T>() : nullptr;
+ }
+
+ /**
+ * @return The string representation of this value.
+ */
+ SkString toString() const;
+
+protected:
+ /*
+ Value implementation notes:
+
+ -- fixed 64-bit size
+
+ -- 8-byte aligned
+
+ -- union of:
+
+ bool
+ int32
+ float
+ char[8] (short string storage)
+ external payload (tagged) pointer
+
+ -- highest 3 bits reserved for type storage
+
+ */
+ enum class Tag : uint8_t {
+ // We picked kShortString == 0 so that tag 0x00 and stored max_size-size (7-7=0)
+ // conveniently overlap the '\0' terminator, allowing us to store a 7 character
+ // C string inline.
+ kShortString = 0b00000000, // inline payload
+ kNull = 0b00100000, // no payload
+ kBool = 0b01000000, // inline payload
+ kInt = 0b01100000, // inline payload
+ kFloat = 0b10000000, // inline payload
+ kString = 0b10100000, // ptr to external storage
+ kArray = 0b11000000, // ptr to external storage
+ kObject = 0b11100000, // ptr to external storage
+ };
+ static constexpr uint8_t kTagMask = 0b11100000;
+
+ void init_tagged(Tag);
+ void init_tagged_pointer(Tag, void*);
+
+ Tag getTag() const {
+ return static_cast<Tag>(fData8[kTagOffset] & kTagMask);
+ }
+
+ // Access the record data as T.
+ //
+ // This is also used to access the payload for inline records. Since the record type lives in
+ // the high bits, sizeof(T) must be less than sizeof(Value) when accessing inline payloads.
+ //
+ // E.g.
+ //
+ // uint8_t
+ // -----------------------------------------------------------------------
+ // | val8 | val8 | val8 | val8 | val8 | val8 | val8 | TYPE|
+ // -----------------------------------------------------------------------
+ //
+ // uint32_t
+ // -----------------------------------------------------------------------
+ // | val32 | unused | TYPE|
+ // -----------------------------------------------------------------------
+ //
+ // T* (64b)
+ // -----------------------------------------------------------------------
+ // | T* (kTypeShift bits) |TYPE|
+ // -----------------------------------------------------------------------
+ //
+ template <typename T>
+ const T* cast() const {
+ static_assert(sizeof (T) <= sizeof(Value), "");
+ static_assert(alignof(T) <= alignof(Value), "");
+ return reinterpret_cast<const T*>(this);
+ }
+
+ template <typename T>
+ T* cast() { return const_cast<T*>(const_cast<const Value*>(this)->cast<T>()); }
+
+ // Access the pointer payload.
+ template <typename T>
+ const T* ptr() const {
+ static_assert(sizeof(uintptr_t) == sizeof(Value) ||
+ sizeof(uintptr_t) * 2 == sizeof(Value), "");
+
+ return (sizeof(uintptr_t) < sizeof(Value))
+ // For 32-bit, pointers are stored unmodified.
+ ? *this->cast<const T*>()
+ // For 64-bit, we use the high bits of the pointer as tag storage.
+ : reinterpret_cast<T*>(*this->cast<uintptr_t>() & kTagPointerMask);
+ }
+
+private:
+ static constexpr size_t kValueSize = 8;
+
+ uint8_t fData8[kValueSize];
+
+#if defined(SK_CPU_LENDIAN)
+ static constexpr size_t kTagOffset = kValueSize - 1;
+
+ static constexpr uintptr_t kTagPointerMask =
+ ~(static_cast<uintptr_t>(kTagMask) << ((sizeof(uintptr_t) - 1) * 8));
+#else
+ // The current value layout assumes LE and will take some tweaking for BE.
+ static_assert(false, "Big-endian builds are not supported at this time.");
+#endif
+};
+
+class NullValue final : public Value {
+public:
+ static constexpr Type kType = Type::kNull;
+
+ NullValue();
+};
+
+class BoolValue final : public Value {
+public:
+ static constexpr Type kType = Type::kBool;
+
+ explicit BoolValue(bool);
+
+ bool operator *() const {
+ SkASSERT(this->getTag() == Tag::kBool);
+ return *this->cast<bool>();
+ }
+};
+
+class NumberValue final : public Value {
+public:
+ static constexpr Type kType = Type::kNumber;
+
+ explicit NumberValue(int32_t);
+ explicit NumberValue(float);
+
+ double operator *() const {
+ SkASSERT(this->getTag() == Tag::kInt ||
+ this->getTag() == Tag::kFloat);
+
+ return this->getTag() == Tag::kInt
+ ? static_cast<double>(*this->cast<int32_t>())
+ : static_cast<double>(*this->cast<float>());
+ }
+};
+
+template <typename T, Value::Type vtype>
+class VectorValue : public Value {
+public:
+ using ValueT = T;
+ static constexpr Type kType = vtype;
+
+ size_t size() const {
+ SkASSERT(this->getType() == kType);
+ return *this->ptr<size_t>();
+ }
+
+ const T* begin() const {
+ SkASSERT(this->getType() == kType);
+ const auto* size_ptr = this->ptr<size_t>();
+ return reinterpret_cast<const T*>(size_ptr + 1);
+ }
+
+ const T* end() const {
+ SkASSERT(this->getType() == kType);
+ const auto* size_ptr = this->ptr<size_t>();
+ return reinterpret_cast<const T*>(size_ptr + 1) + *size_ptr;
+ }
+
+ const T& operator[](size_t i) const {
+ SkASSERT(this->getType() == kType);
+ SkASSERT(i < this->size());
+
+ return *(this->begin() + i);
+ }
+};
+
+class ArrayValue final : public VectorValue<Value, Value::Type::kArray> {
+public:
+ ArrayValue(const Value* src, size_t size, SkArenaAlloc& alloc);
+};
+
+class StringValue final : public Value {
+public:
+ static constexpr Type kType = Type::kString;
+
+ StringValue();
+ StringValue(const char* src, size_t size, SkArenaAlloc& alloc);
+
+ size_t size() const {
+ switch (this->getTag()) {
+ case Tag::kShortString:
+ // We don't bother storing a length for short strings on the assumption
+ // that strlen is fast in this case. If this becomes problematic, we
+ // can either go back to storing (7-len) in the tag byte or write a fast
+ // short_strlen.
+ return strlen(this->cast<char>());
+ case Tag::kString:
+ return this->cast<VectorValue<char, Value::Type::kString>>()->size();
+ default:
+ return 0;
+ }
+ }
+
+ const char* begin() const {
+ return this->getTag() == Tag::kShortString
+ ? this->cast<char>()
+ : this->cast<VectorValue<char, Value::Type::kString>>()->begin();
+ }
+
+ const char* end() const {
+ return this->getTag() == Tag::kShortString
+ ? strchr(this->cast<char>(), '\0')
+ : this->cast<VectorValue<char, Value::Type::kString>>()->end();
+ }
+};
+
+struct Member {
+ StringValue fKey;
+ Value fValue;
+};
+
+class ObjectValue final : public VectorValue<Member, Value::Type::kObject> {
+public:
+ ObjectValue(const Member* src, size_t size, SkArenaAlloc& alloc);
+
+ const Value& operator[](const char*) const;
+
+private:
+ // Not particularly interesting - hiding for disambiguation.
+ const Member& operator[](size_t i) const = delete;
+};
+
+class DOM final : public SkNoncopyable {
+public:
+ DOM(const char*, size_t);
+
+ const Value& root() const { return fRoot; }
+
+ void write(SkWStream*) const;
+
+private:
+ SkArenaAlloc fAlloc;
+ Value fRoot;
+};
+
+inline Value::Type Value::getType() const {
+ switch (this->getTag()) {
+ case Tag::kNull: return Type::kNull;
+ case Tag::kBool: return Type::kBool;
+ case Tag::kInt: return Type::kNumber;
+ case Tag::kFloat: return Type::kNumber;
+ case Tag::kShortString: return Type::kString;
+ case Tag::kString: return Type::kString;
+ case Tag::kArray: return Type::kArray;
+ case Tag::kObject: return Type::kObject;
+ }
+
+ SkASSERT(false); // unreachable
+ return Type::kNull;
+}
+
+} // namespace skjson
+
+#endif // SkJSON_DEFINED
+
diff --git a/gfx/skia/skia/src/utils/SkJSONWriter.cpp b/gfx/skia/skia/src/utils/SkJSONWriter.cpp
new file mode 100644
index 0000000000..40b9be4404
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSONWriter.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Make sure that the PRI format string macros are defined
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+#include <stdarg.h>
+
+#include "src/utils/SkJSONWriter.h"
+
+void SkJSONWriter::appendS64(int64_t value) {
+ this->beginValue();
+ this->appendf("%" PRId64, value);
+}
+
+void SkJSONWriter::appendU64(uint64_t value) {
+ this->beginValue();
+ this->appendf("%" PRIu64, value);
+}
+
+void SkJSONWriter::appendHexU64(uint64_t value) {
+ this->beginValue();
+ this->appendf("\"0x%" PRIx64 "\"", value);
+}
+
+void SkJSONWriter::appendf(const char* fmt, ...) {
+ const int kBufferSize = 1024;
+ char buffer[kBufferSize];
+ va_list argp;
+ va_start(argp, fmt);
+#ifdef SK_BUILD_FOR_WIN
+ int length = _vsnprintf_s(buffer, kBufferSize, _TRUNCATE, fmt, argp);
+#else
+ int length = vsnprintf(buffer, kBufferSize, fmt, argp);
+#endif
+ SkASSERT(length >= 0 && length < kBufferSize);
+ va_end(argp);
+ this->write(buffer, length);
+}
diff --git a/gfx/skia/skia/src/utils/SkJSONWriter.h b/gfx/skia/skia/src/utils/SkJSONWriter.h
new file mode 100644
index 0000000000..decb901c13
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSONWriter.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJSONWriter_DEFINED
+#define SkJSONWriter_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTArray.h"
+
+/**
+ * Lightweight class for writing properly structured JSON data. No random-access, everything must
+ * be generated in-order. The resulting JSON is written directly to the SkWStream supplied at
+ * construction time. Output is buffered, so writing to disk (via an SkFILEWStream) is ideal.
+ *
+ * There is a basic state machine to ensure that JSON is structured correctly, and to allow for
+ * (optional) pretty formatting.
+ *
+ * This class adheres to the RFC-4627 usage of JSON (not ECMA-404). In other words, all JSON
+ * created with this class must have a top-level object or array. Free-floating values of other
+ * types are not considered valid.
+ *
+ * Note that all error checking is in the form of asserts - invalid usage in a non-debug build
+ * will simply produce invalid JSON.
+ */
+class SkJSONWriter : SkNoncopyable {
+public:
+ enum class Mode {
+ /**
+ * Output the minimal amount of text. No additional whitespace (including newlines) is
+ * generated. The resulting JSON is suitable for fast parsing and machine consumption.
+ */
+ kFast,
+
+ /**
+ * Output human-readable JSON, with indented objects and arrays, and one value per line.
+ * Slightly slower than kFast, and produces data that is somewhat larger.
+ */
+ kPretty
+ };
+
+ /**
+ * Construct a JSON writer that will serialize all the generated JSON to 'stream'.
+ */
+ SkJSONWriter(SkWStream* stream, Mode mode = Mode::kFast)
+ : fBlock(new char[kBlockSize])
+ , fWrite(fBlock)
+ , fBlockEnd(fBlock + kBlockSize)
+ , fStream(stream)
+ , fMode(mode)
+ , fState(State::kStart) {
+ fScopeStack.push_back(Scope::kNone);
+ fNewlineStack.push_back(true);
+ }
+
+ ~SkJSONWriter() {
+ this->flush();
+ delete[] fBlock;
+ SkASSERT(fScopeStack.count() == 1);
+ SkASSERT(fNewlineStack.count() == 1);
+ }
+
+ /**
+ * Force all buffered output to be flushed to the underlying stream.
+ */
+ void flush() {
+ if (fWrite != fBlock) {
+ fStream->write(fBlock, fWrite - fBlock);
+ fWrite = fBlock;
+ }
+ }
+
+ /**
+ * Append the name (key) portion of an object member. Must be called between beginObject() and
+ * endObject(). If you have both the name and value of an object member, you can simply call
+ * the two argument versions of the other append functions.
+ */
+ void appendName(const char* name) {
+ if (!name) {
+ return;
+ }
+ SkASSERT(Scope::kObject == this->scope());
+ SkASSERT(State::kObjectBegin == fState || State::kObjectValue == fState);
+ if (State::kObjectValue == fState) {
+ this->write(",", 1);
+ }
+ this->separator(this->multiline());
+ this->write("\"", 1);
+ this->write(name, strlen(name));
+ this->write("\":", 2);
+ fState = State::kObjectName;
+ }
+
+ /**
+ * Adds a new object. A name must be supplied when called between beginObject() and
+ * endObject(). Calls to beginObject() must be balanced by corresponding calls to endObject().
+ * By default, objects are written out with one named value per line (when in kPretty mode).
+ * This can be overridden for a particular object by passing false for multiline, this will
+ * keep the entire object on a single line. This can help with readability in some situations.
+ * In kFast mode, this parameter is ignored.
+ */
+ void beginObject(const char* name = nullptr, bool multiline = true) {
+ this->appendName(name);
+ this->beginValue(true);
+ this->write("{", 1);
+ fScopeStack.push_back(Scope::kObject);
+ fNewlineStack.push_back(multiline);
+ fState = State::kObjectBegin;
+ }
+
+ /**
+ * Ends an object that was previously started with beginObject().
+ */
+ void endObject() {
+ SkASSERT(Scope::kObject == this->scope());
+ SkASSERT(State::kObjectBegin == fState || State::kObjectValue == fState);
+ bool emptyObject = State::kObjectBegin == fState;
+ bool wasMultiline = this->multiline();
+ this->popScope();
+ if (!emptyObject) {
+ this->separator(wasMultiline);
+ }
+ this->write("}", 1);
+ }
+
+ /**
+ * Adds a new array. A name must be supplied when called between beginObject() and
+ * endObject(). Calls to beginArray() must be balanced by corresponding calls to endArray().
+ * By default, arrays are written out with one value per line (when in kPretty mode).
+ * This can be overridden for a particular array by passing false for multiline, this will
+ * keep the entire array on a single line. This can help with readability in some situations.
+ * In kFast mode, this parameter is ignored.
+ */
+ void beginArray(const char* name = nullptr, bool multiline = true) {
+ this->appendName(name);
+ this->beginValue(true);
+ this->write("[", 1);
+ fScopeStack.push_back(Scope::kArray);
+ fNewlineStack.push_back(multiline);
+ fState = State::kArrayBegin;
+ }
+
+ /**
+ * Ends an array that was previous started with beginArray().
+ */
+ void endArray() {
+ SkASSERT(Scope::kArray == this->scope());
+ SkASSERT(State::kArrayBegin == fState || State::kArrayValue == fState);
+ bool emptyArray = State::kArrayBegin == fState;
+ bool wasMultiline = this->multiline();
+ this->popScope();
+ if (!emptyArray) {
+ this->separator(wasMultiline);
+ }
+ this->write("]", 1);
+ }
+
+ /**
+ * Functions for adding values of various types. The single argument versions add un-named
+ * values, so must be called either
+ * - Between beginArray() and endArray() -or-
+ * - Between beginObject() and endObject(), after calling appendName()
+ */
+ void appendString(const char* value) {
+ this->beginValue();
+ this->write("\"", 1);
+ if (value) {
+ while (*value) {
+ switch (*value) {
+ case '"': this->write("\\\"", 2); break;
+ case '\\': this->write("\\\\", 2); break;
+ case '\b': this->write("\\b", 2); break;
+ case '\f': this->write("\\f", 2); break;
+ case '\n': this->write("\\n", 2); break;
+ case '\r': this->write("\\r", 2); break;
+ case '\t': this->write("\\t", 2); break;
+ default: this->write(value, 1); break;
+ }
+ value++;
+ }
+ }
+ this->write("\"", 1);
+ }
+
+ void appendPointer(const void* value) { this->beginValue(); this->appendf("\"%p\"", value); }
+ void appendBool(bool value) {
+ this->beginValue();
+ if (value) {
+ this->write("true", 4);
+ } else {
+ this->write("false", 5);
+ }
+ }
+ void appendS32(int32_t value) { this->beginValue(); this->appendf("%d", value); }
+ void appendS64(int64_t value);
+ void appendU32(uint32_t value) { this->beginValue(); this->appendf("%u", value); }
+ void appendU64(uint64_t value);
+ void appendFloat(float value) { this->beginValue(); this->appendf("%g", value); }
+ void appendDouble(double value) { this->beginValue(); this->appendf("%g", value); }
+ void appendFloatDigits(float value, int digits) {
+ this->beginValue();
+ this->appendf("%.*g", digits, value);
+ }
+ void appendDoubleDigits(double value, int digits) {
+ this->beginValue();
+ this->appendf("%.*g", digits, value);
+ }
+ void appendHexU32(uint32_t value) { this->beginValue(); this->appendf("\"0x%x\"", value); }
+ void appendHexU64(uint64_t value);
+
+#define DEFINE_NAMED_APPEND(function, type) \
+ void function(const char* name, type value) { this->appendName(name); this->function(value); }
+
+ /**
+ * Functions for adding named values of various types. These add a name field, so must be
+ * called between beginObject() and endObject().
+ */
+ DEFINE_NAMED_APPEND(appendString, const char *)
+ DEFINE_NAMED_APPEND(appendPointer, const void *)
+ DEFINE_NAMED_APPEND(appendBool, bool)
+ DEFINE_NAMED_APPEND(appendS32, int32_t)
+ DEFINE_NAMED_APPEND(appendS64, int64_t)
+ DEFINE_NAMED_APPEND(appendU32, uint32_t)
+ DEFINE_NAMED_APPEND(appendU64, uint64_t)
+ DEFINE_NAMED_APPEND(appendFloat, float)
+ DEFINE_NAMED_APPEND(appendDouble, double)
+ DEFINE_NAMED_APPEND(appendHexU32, uint32_t)
+ DEFINE_NAMED_APPEND(appendHexU64, uint64_t)
+
+#undef DEFINE_NAMED_APPEND
+
+ void appendFloatDigits(const char* name, float value, int digits) {
+ this->appendName(name);
+ this->appendFloatDigits(value, digits);
+ }
+ void appendDoubleDigits(const char* name, double value, int digits) {
+ this->appendName(name);
+ this->appendDoubleDigits(value, digits);
+ }
+
+private:
+ enum {
+ // Using a 32k scratch block gives big performance wins, but we diminishing returns going
+ // any larger. Even with a 1MB block, time to write a large (~300 MB) JSON file only drops
+ // another ~10%.
+ kBlockSize = 32 * 1024,
+ };
+
+ enum class Scope {
+ kNone,
+ kObject,
+ kArray
+ };
+
+ enum class State {
+ kStart,
+ kEnd,
+ kObjectBegin,
+ kObjectName,
+ kObjectValue,
+ kArrayBegin,
+ kArrayValue,
+ };
+
+ void appendf(const char* fmt, ...);
+
+ void beginValue(bool structure = false) {
+ SkASSERT(State::kObjectName == fState ||
+ State::kArrayBegin == fState ||
+ State::kArrayValue == fState ||
+ (structure && State::kStart == fState));
+ if (State::kArrayValue == fState) {
+ this->write(",", 1);
+ }
+ if (Scope::kArray == this->scope()) {
+ this->separator(this->multiline());
+ } else if (Scope::kObject == this->scope() && Mode::kPretty == fMode) {
+ this->write(" ", 1);
+ }
+ // We haven't added the value yet, but all (non-structure) callers emit something
+ // immediately, so transition state, to simplify the calling code.
+ if (!structure) {
+ fState = Scope::kArray == this->scope() ? State::kArrayValue : State::kObjectValue;
+ }
+ }
+
+ void separator(bool multiline) {
+ if (Mode::kPretty == fMode) {
+ if (multiline) {
+ this->write("\n", 1);
+ for (int i = 0; i < fScopeStack.count() - 1; ++i) {
+ this->write(" ", 3);
+ }
+ } else {
+ this->write(" ", 1);
+ }
+ }
+ }
+
+ void write(const char* buf, size_t length) {
+ if (static_cast<size_t>(fBlockEnd - fWrite) < length) {
+ // Don't worry about splitting writes that overflow our block.
+ this->flush();
+ }
+ if (length > kBlockSize) {
+ // Send particularly large writes straight through to the stream (unbuffered).
+ fStream->write(buf, length);
+ } else {
+ memcpy(fWrite, buf, length);
+ fWrite += length;
+ }
+ }
+
+ Scope scope() const {
+ SkASSERT(!fScopeStack.empty());
+ return fScopeStack.back();
+ }
+
+ bool multiline() const {
+ SkASSERT(!fNewlineStack.empty());
+ return fNewlineStack.back();
+ }
+
+ void popScope() {
+ fScopeStack.pop_back();
+ fNewlineStack.pop_back();
+ switch (this->scope()) {
+ case Scope::kNone:
+ fState = State::kEnd;
+ break;
+ case Scope::kObject:
+ fState = State::kObjectValue;
+ break;
+ case Scope::kArray:
+ fState = State::kArrayValue;
+ break;
+ default:
+ SkDEBUGFAIL("Invalid scope");
+ break;
+ }
+ }
+
+ char* fBlock;
+ char* fWrite;
+ char* fBlockEnd;
+
+ SkWStream* fStream;
+ Mode fMode;
+ State fState;
+ SkSTArray<16, Scope, true> fScopeStack;
+ SkSTArray<16, bool, true> fNewlineStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkLua.cpp b/gfx/skia/skia/src/utils/SkLua.cpp
new file mode 100644
index 0000000000..03318bb282
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkLua.cpp
@@ -0,0 +1,1993 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkLua.h"
+
+#if SK_SUPPORT_GPU
+//#include "src/gpu/GrReducedClip.h"
+#endif
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypeface.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/effects/SkGradientShader.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/SkTo.h"
+#include "modules/skshaper/include/SkShaper.h"
+#include "src/core/SkMakeUnique.h"
+#include <new>
+
+extern "C" {
+ #include "lua.h"
+ #include "lualib.h"
+ #include "lauxlib.h"
+}
+
+struct DocHolder {
+ sk_sp<SkDocument> fDoc;
+ std::unique_ptr<SkWStream> fStream;
+};
+
+// return the metatable name for a given class
+template <typename T> const char* get_mtname();
+#define DEF_MTNAME(T) \
+ template <> const char* get_mtname<T>() { \
+ return #T "_LuaMetaTableName"; \
+ }
+
+DEF_MTNAME(SkCanvas)
+DEF_MTNAME(SkColorFilter)
+DEF_MTNAME(DocHolder)
+DEF_MTNAME(SkFont)
+DEF_MTNAME(SkImage)
+DEF_MTNAME(SkImageFilter)
+DEF_MTNAME(SkMatrix)
+DEF_MTNAME(SkRRect)
+DEF_MTNAME(SkPath)
+DEF_MTNAME(SkPaint)
+DEF_MTNAME(SkPathEffect)
+DEF_MTNAME(SkPicture)
+DEF_MTNAME(SkPictureRecorder)
+DEF_MTNAME(SkShader)
+DEF_MTNAME(SkSurface)
+DEF_MTNAME(SkTextBlob)
+DEF_MTNAME(SkTypeface)
+DEF_MTNAME(SkFontStyle)
+
+template <typename T, typename... Args> T* push_new(lua_State* L, Args&&... args) {
+ T* addr = (T*)lua_newuserdata(L, sizeof(T));
+ new (addr) T(std::forward<Args>(args)...);
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+ return addr;
+}
+
+template <typename T> void push_obj(lua_State* L, const T& obj) {
+ new (lua_newuserdata(L, sizeof(T))) T(obj);
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+}
+
+template <typename T> T* push_ptr(lua_State* L, T* ptr) {
+ *(T**)lua_newuserdata(L, sizeof(T*)) = ptr;
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+ return ptr;
+}
+
+template <typename T> T* push_ref(lua_State* L, T* ref) {
+ *(T**)lua_newuserdata(L, sizeof(T*)) = SkSafeRef(ref);
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+ return ref;
+}
+
+template <typename T> void push_ref(lua_State* L, sk_sp<T> sp) {
+ *(T**)lua_newuserdata(L, sizeof(T*)) = sp.release();
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+}
+
+template <typename T> T* get_ref(lua_State* L, int index) {
+ return *(T**)luaL_checkudata(L, index, get_mtname<T>());
+}
+
+template <typename T> T* get_obj(lua_State* L, int index) {
+ return (T*)luaL_checkudata(L, index, get_mtname<T>());
+}
+
+static bool lua2bool(lua_State* L, int index) {
+ return !!lua_toboolean(L, index);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLua::SkLua(const char termCode[]) : fTermCode(termCode), fWeOwnL(true) {
+ fL = luaL_newstate();
+ luaL_openlibs(fL);
+ SkLua::Load(fL);
+}
+
+SkLua::SkLua(lua_State* L) : fL(L), fWeOwnL(false) {}
+
+SkLua::~SkLua() {
+ if (fWeOwnL) {
+ if (fTermCode.size() > 0) {
+ lua_getglobal(fL, fTermCode.c_str());
+ if (lua_pcall(fL, 0, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(fL, -1));
+ }
+ }
+ lua_close(fL);
+ }
+}
+
+bool SkLua::runCode(const char code[]) {
+ int err = luaL_loadstring(fL, code) || lua_pcall(fL, 0, 0, 0);
+ if (err) {
+ SkDebugf("--- lua failed: %s\n", lua_tostring(fL, -1));
+ return false;
+ }
+ return true;
+}
+
+bool SkLua::runCode(const void* code, size_t size) {
+ SkString str((const char*)code, size);
+ return this->runCode(str.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define CHECK_SETFIELD(key) do if (key) lua_setfield(fL, -2, key); while (0)
+
+static void setfield_bool_if(lua_State* L, const char key[], bool pred) {
+ if (pred) {
+ lua_pushboolean(L, true);
+ lua_setfield(L, -2, key);
+ }
+}
+
+static void setfield_string(lua_State* L, const char key[], const char value[]) {
+ lua_pushstring(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_number(lua_State* L, const char key[], double value) {
+ lua_pushnumber(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_boolean(lua_State* L, const char key[], bool value) {
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_scalar(lua_State* L, const char key[], SkScalar value) {
+ setfield_number(L, key, SkScalarToLua(value));
+}
+
+static void setfield_function(lua_State* L,
+ const char key[], lua_CFunction value) {
+ lua_pushcfunction(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int lua2int_def(lua_State* L, int index, int defaultValue) {
+ if (lua_isnumber(L, index)) {
+ return (int)lua_tonumber(L, index);
+ } else {
+ return defaultValue;
+ }
+}
+
+static SkScalar lua2scalar(lua_State* L, int index) {
+ SkASSERT(lua_isnumber(L, index));
+ return SkLuaToScalar(lua_tonumber(L, index));
+}
+
+static SkScalar lua2scalar_def(lua_State* L, int index, SkScalar defaultValue) {
+ if (lua_isnumber(L, index)) {
+ return SkLuaToScalar(lua_tonumber(L, index));
+ } else {
+ return defaultValue;
+ }
+}
+
+static SkScalar getarray_scalar(lua_State* L, int stackIndex, int arrayIndex) {
+ SkASSERT(lua_istable(L, stackIndex));
+ lua_rawgeti(L, stackIndex, arrayIndex);
+
+ SkScalar value = lua2scalar(L, -1);
+ lua_pop(L, 1);
+ return value;
+}
+
+static void getarray_scalars(lua_State* L, int stackIndex, SkScalar dst[], int count) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = getarray_scalar(L, stackIndex, i + 1);
+ }
+}
+
+static void getarray_points(lua_State* L, int stackIndex, SkPoint pts[], int count) {
+ getarray_scalars(L, stackIndex, &pts[0].fX, count * 2);
+}
+
+static void setarray_number(lua_State* L, int index, double value) {
+ lua_pushnumber(L, value);
+ lua_rawseti(L, -2, index);
+}
+
+static void setarray_scalar(lua_State* L, int index, SkScalar value) {
+ setarray_number(L, index, SkScalarToLua(value));
+}
+
+static void setarray_string(lua_State* L, int index, const char str[]) {
+ lua_pushstring(L, str);
+ lua_rawseti(L, -2, index);
+}
+
+void SkLua::pushBool(bool value, const char key[]) {
+ lua_pushboolean(fL, value);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const char str[], const char key[]) {
+ lua_pushstring(fL, str);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const char str[], size_t length, const char key[]) {
+ // TODO: how to do this w/o making a copy?
+ SkString s(str, length);
+ lua_pushstring(fL, s.c_str());
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const SkString& str, const char key[]) {
+ lua_pushstring(fL, str.c_str());
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushColor(SkColor color, const char key[]) {
+ lua_newtable(fL);
+ setfield_number(fL, "a", SkColorGetA(color) / 255.0);
+ setfield_number(fL, "r", SkColorGetR(color) / 255.0);
+ setfield_number(fL, "g", SkColorGetG(color) / 255.0);
+ setfield_number(fL, "b", SkColorGetB(color) / 255.0);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushU32(uint32_t value, const char key[]) {
+ lua_pushnumber(fL, (double)value);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushScalar(SkScalar value, const char key[]) {
+ lua_pushnumber(fL, SkScalarToLua(value));
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayU16(const uint16_t array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ setarray_number(fL, i + 1, (double)array[i]);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayPoint(const SkPoint array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ lua_newtable(fL);
+ this->pushScalar(array[i].fX, "x");
+ this->pushScalar(array[i].fY, "y");
+ lua_rawseti(fL, -2, i + 1);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayScalar(const SkScalar array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ setarray_scalar(fL, i + 1, array[i]);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushRect(const SkRect& r, const char key[]) {
+ lua_newtable(fL);
+ setfield_scalar(fL, "left", r.fLeft);
+ setfield_scalar(fL, "top", r.fTop);
+ setfield_scalar(fL, "right", r.fRight);
+ setfield_scalar(fL, "bottom", r.fBottom);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushRRect(const SkRRect& rr, const char key[]) {
+ push_obj(fL, rr);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushDash(const SkPathEffect::DashInfo& info, const char key[]) {
+ lua_newtable(fL);
+ setfield_scalar(fL, "phase", info.fPhase);
+ this->pushArrayScalar(info.fIntervals, info.fCount, "intervals");
+ CHECK_SETFIELD(key);
+}
+
+
+void SkLua::pushMatrix(const SkMatrix& matrix, const char key[]) {
+ push_obj(fL, matrix);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushPaint(const SkPaint& paint, const char key[]) {
+ push_obj(fL, paint);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushPath(const SkPath& path, const char key[]) {
+ push_obj(fL, path);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushCanvas(SkCanvas* canvas, const char key[]) {
+ push_ptr(fL, canvas);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushTextBlob(const SkTextBlob* blob, const char key[]) {
+ push_ref(fL, const_cast<SkTextBlob*>(blob));
+ CHECK_SETFIELD(key);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar getfield_scalar(lua_State* L, int index, const char key[]) {
+ SkASSERT(lua_istable(L, index));
+ lua_pushstring(L, key);
+ lua_gettable(L, index);
+
+ SkScalar value = lua2scalar(L, -1);
+ lua_pop(L, 1);
+ return value;
+}
+
+static SkScalar getfield_scalar_default(lua_State* L, int index, const char key[], SkScalar def) {
+ SkASSERT(lua_istable(L, index));
+ lua_pushstring(L, key);
+ lua_gettable(L, index);
+
+ SkScalar value;
+ if (lua_isnil(L, -1)) {
+ value = def;
+ } else {
+ value = lua2scalar(L, -1);
+ }
+ lua_pop(L, 1);
+ return value;
+}
+
+static SkScalar byte2unit(U8CPU byte) {
+ return byte / 255.0f;
+}
+
+static U8CPU unit2byte(SkScalar x) {
+ if (x <= 0) {
+ return 0;
+ } else if (x >= 1) {
+ return 255;
+ } else {
+ return SkScalarRoundToInt(x * 255);
+ }
+}
+
+static SkColor lua2color(lua_State* L, int index) {
+ return SkColorSetARGB(unit2byte(getfield_scalar_default(L, index, "a", 1)),
+ unit2byte(getfield_scalar_default(L, index, "r", 0)),
+ unit2byte(getfield_scalar_default(L, index, "g", 0)),
+ unit2byte(getfield_scalar_default(L, index, "b", 0)));
+}
+
+static SkRect* lua2rect(lua_State* L, int index, SkRect* rect) {
+ rect->setLTRB(getfield_scalar_default(L, index, "left", 0),
+ getfield_scalar_default(L, index, "top", 0),
+ getfield_scalar(L, index, "right"),
+ getfield_scalar(L, index, "bottom"));
+ return rect;
+}
+
+static int lcanvas_clear(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->clear(0);
+ return 0;
+}
+
+static int lcanvas_drawColor(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawColor(lua2color(L, 2));
+ return 0;
+}
+
+static int lcanvas_drawPaint(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawPaint(*get_obj<SkPaint>(L, 2));
+ return 0;
+}
+
+static int lcanvas_drawRect(lua_State* L) {
+ SkRect rect;
+ lua2rect(L, 2, &rect);
+ const SkPaint* paint = get_obj<SkPaint>(L, 3);
+ get_ref<SkCanvas>(L, 1)->drawRect(rect, *paint);
+ return 0;
+}
+
+static int lcanvas_drawOval(lua_State* L) {
+ SkRect rect;
+ get_ref<SkCanvas>(L, 1)->drawOval(*lua2rect(L, 2, &rect),
+ *get_obj<SkPaint>(L, 3));
+ return 0;
+}
+
+static int lcanvas_drawCircle(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawCircle(lua2scalar(L, 2),
+ lua2scalar(L, 3),
+ lua2scalar(L, 4),
+ *get_obj<SkPaint>(L, 5));
+ return 0;
+}
+
+static SkPaint* lua2OptionalPaint(lua_State* L, int index, SkPaint* paint) {
+ if (lua_isnumber(L, index)) {
+ paint->setAlpha(SkScalarRoundToInt(lua2scalar(L, index) * 255));
+ return paint;
+ } else if (lua_isuserdata(L, index)) {
+ const SkPaint* ptr = get_obj<SkPaint>(L, index);
+ if (ptr) {
+ *paint = *ptr;
+ return paint;
+ }
+ }
+ return nullptr;
+}
+
+static int lcanvas_drawImage(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkImage* image = get_ref<SkImage>(L, 2);
+ if (nullptr == image) {
+ return 0;
+ }
+ SkScalar x = lua2scalar(L, 3);
+ SkScalar y = lua2scalar(L, 4);
+
+ SkPaint paint;
+ canvas->drawImage(image, x, y, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawImageRect(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkImage* image = get_ref<SkImage>(L, 2);
+ if (nullptr == image) {
+ return 0;
+ }
+
+ SkRect srcR, dstR;
+ SkRect* srcRPtr = nullptr;
+ if (!lua_isnil(L, 3)) {
+ srcRPtr = lua2rect(L, 3, &srcR);
+ }
+ lua2rect(L, 4, &dstR);
+
+ SkPaint paint;
+ canvas->legacy_drawImageRect(image, srcRPtr, dstR, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawPatch(lua_State* L) {
+ SkPoint cubics[12];
+ SkColor colorStorage[4];
+ SkPoint texStorage[4];
+
+ const SkColor* colors = nullptr;
+ const SkPoint* texs = nullptr;
+
+ getarray_points(L, 2, cubics, 12);
+
+ colorStorage[0] = SK_ColorRED;
+ colorStorage[1] = SK_ColorGREEN;
+ colorStorage[2] = SK_ColorBLUE;
+ colorStorage[3] = SK_ColorGRAY;
+
+ if (lua_isnil(L, 4)) {
+ colors = colorStorage;
+ } else {
+ getarray_points(L, 4, texStorage, 4);
+ texs = texStorage;
+ }
+
+ get_ref<SkCanvas>(L, 1)->drawPatch(cubics, colors, texs, *get_obj<SkPaint>(L, 5));
+ return 0;
+}
+
+static int lcanvas_drawPath(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawPath(*get_obj<SkPath>(L, 2),
+ *get_obj<SkPaint>(L, 3));
+ return 0;
+}
+
+// drawPicture(pic, x, y, paint)
+static int lcanvas_drawPicture(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkPicture* picture = get_ref<SkPicture>(L, 2);
+ SkScalar x = lua2scalar_def(L, 3, 0);
+ SkScalar y = lua2scalar_def(L, 4, 0);
+ SkMatrix matrix, *matrixPtr = nullptr;
+ if (x || y) {
+ matrix.setTranslate(x, y);
+ matrixPtr = &matrix;
+ }
+ SkPaint paint;
+ canvas->drawPicture(picture, matrixPtr, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawText(lua_State* L) {
+ if (lua_gettop(L) < 5) {
+ return 0;
+ }
+
+ // TODO: restore this logic based on SkFont instead of SkPaint
+#if 0
+ if (lua_isstring(L, 2) && lua_isnumber(L, 3) && lua_isnumber(L, 4)) {
+ size_t len;
+ const char* text = lua_tolstring(L, 2, &len);
+ get_ref<SkCanvas>(L, 1)->drawSimpleText(
+ text, len, SkTextEncoding::kUTF8,
+ lua2scalar(L, 3), lua2scalar(L, 4),
+ SkFont::LEGACY_ExtractFromPaint(*get_obj<SkPaint>(L, 5)),
+ *get_obj<SkPaint>(L, 5));
+ }
+#endif
+ return 0;
+}
+
+static int lcanvas_drawTextBlob(lua_State* L) {
+ const SkTextBlob* blob = get_ref<SkTextBlob>(L, 2);
+ SkScalar x = lua2scalar(L, 3);
+ SkScalar y = lua2scalar(L, 4);
+ const SkPaint& paint = *get_obj<SkPaint>(L, 5);
+ get_ref<SkCanvas>(L, 1)->drawTextBlob(blob, x, y, paint);
+ return 0;
+}
+
+static int lcanvas_getSaveCount(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkCanvas>(L, 1)->getSaveCount());
+ return 1;
+}
+
+static int lcanvas_getTotalMatrix(lua_State* L) {
+ SkLua(L).pushMatrix(get_ref<SkCanvas>(L, 1)->getTotalMatrix());
+ return 1;
+}
+
+static int lcanvas_save(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkCanvas>(L, 1)->save());
+ return 1;
+}
+
+static int lcanvas_saveLayer(lua_State* L) {
+ SkPaint paint;
+ lua_pushinteger(L, get_ref<SkCanvas>(L, 1)->saveLayer(nullptr, lua2OptionalPaint(L, 2, &paint)));
+ return 1;
+}
+
+static int lcanvas_restore(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->restore();
+ return 0;
+}
+
+static int lcanvas_scale(lua_State* L) {
+ SkScalar sx = lua2scalar_def(L, 2, 1);
+ SkScalar sy = lua2scalar_def(L, 3, sx);
+ get_ref<SkCanvas>(L, 1)->scale(sx, sy);
+ return 0;
+}
+
+static int lcanvas_translate(lua_State* L) {
+ SkScalar tx = lua2scalar_def(L, 2, 0);
+ SkScalar ty = lua2scalar_def(L, 3, 0);
+ get_ref<SkCanvas>(L, 1)->translate(tx, ty);
+ return 0;
+}
+
+static int lcanvas_rotate(lua_State* L) {
+ SkScalar degrees = lua2scalar_def(L, 2, 0);
+ get_ref<SkCanvas>(L, 1)->rotate(degrees);
+ return 0;
+}
+
+static int lcanvas_concat(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->concat(*get_obj<SkMatrix>(L, 2));
+ return 0;
+}
+
+static int lcanvas_newSurface(lua_State* L) {
+ int width = lua2int_def(L, 2, 0);
+ int height = lua2int_def(L, 3, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ auto surface = get_ref<SkCanvas>(L, 1)->makeSurface(info);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lcanvas_gc(lua_State* L) {
+ // don't know how to track a ptr...
+ return 0;
+}
+
+const struct luaL_Reg gSkCanvas_Methods[] = {
+ { "clear", lcanvas_clear },
+ { "drawColor", lcanvas_drawColor },
+ { "drawPaint", lcanvas_drawPaint },
+ { "drawRect", lcanvas_drawRect },
+ { "drawOval", lcanvas_drawOval },
+ { "drawCircle", lcanvas_drawCircle },
+ { "drawImage", lcanvas_drawImage },
+ { "drawImageRect", lcanvas_drawImageRect },
+ { "drawPatch", lcanvas_drawPatch },
+ { "drawPath", lcanvas_drawPath },
+ { "drawPicture", lcanvas_drawPicture },
+ { "drawText", lcanvas_drawText },
+ { "drawTextBlob", lcanvas_drawTextBlob },
+ { "getSaveCount", lcanvas_getSaveCount },
+ { "getTotalMatrix", lcanvas_getTotalMatrix },
+ { "save", lcanvas_save },
+ { "saveLayer", lcanvas_saveLayer },
+ { "restore", lcanvas_restore },
+ { "scale", lcanvas_scale },
+ { "translate", lcanvas_translate },
+ { "rotate", lcanvas_rotate },
+ { "concat", lcanvas_concat },
+
+ { "newSurface", lcanvas_newSurface },
+
+ { "__gc", lcanvas_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ldocument_beginPage(lua_State* L) {
+ const SkRect* contentPtr = nullptr;
+ push_ptr(L, get_obj<DocHolder>(L, 1)->fDoc->beginPage(lua2scalar(L, 2),
+ lua2scalar(L, 3),
+ contentPtr));
+ return 1;
+}
+
+static int ldocument_endPage(lua_State* L) {
+ get_obj<DocHolder>(L, 1)->fDoc->endPage();
+ return 0;
+}
+
+static int ldocument_close(lua_State* L) {
+ get_obj<DocHolder>(L, 1)->fDoc->close();
+ return 0;
+}
+
+static int ldocument_gc(lua_State* L) {
+ get_obj<DocHolder>(L, 1)->~DocHolder();
+ return 0;
+}
+
+static const struct luaL_Reg gDocHolder_Methods[] = {
+ { "beginPage", ldocument_beginPage },
+ { "endPage", ldocument_endPage },
+ { "close", ldocument_close },
+ { "__gc", ldocument_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpaint_isAntiAlias(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isAntiAlias());
+ return 1;
+}
+
+static int lpaint_setAntiAlias(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setAntiAlias(lua2bool(L, 2));
+ return 0;
+}
+
+static int lpaint_isDither(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isDither());
+ return 1;
+}
+
+static int lpaint_setDither(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setDither(lua2bool(L, 2));
+ return 0;
+}
+
+static int lpaint_getAlpha(lua_State* L) {
+ SkLua(L).pushScalar(byte2unit(get_obj<SkPaint>(L, 1)->getAlpha()));
+ return 1;
+}
+
+static int lpaint_setAlpha(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setAlpha(unit2byte(lua2scalar(L, 2)));
+ return 0;
+}
+
+static int lpaint_getColor(lua_State* L) {
+ SkLua(L).pushColor(get_obj<SkPaint>(L, 1)->getColor());
+ return 1;
+}
+
+static int lpaint_setColor(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setColor(lua2color(L, 2));
+ return 0;
+}
+
+static int lpaint_getFilterQuality(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getFilterQuality());
+ return 1;
+}
+
+static int lpaint_setFilterQuality(lua_State* L) {
+ int level = lua2int_def(L, 2, -1);
+ if (level >= 0 && level <= 3) {
+ get_obj<SkPaint>(L, 1)->setFilterQuality((SkFilterQuality)level);
+ }
+ return 0;
+}
+
+static int lpaint_getStroke(lua_State* L) {
+ lua_pushboolean(L, SkPaint::kStroke_Style == get_obj<SkPaint>(L, 1)->getStyle());
+ return 1;
+}
+
+static int lpaint_setStroke(lua_State* L) {
+ SkPaint::Style style;
+
+ if (lua_toboolean(L, 2)) {
+ style = SkPaint::kStroke_Style;
+ } else {
+ style = SkPaint::kFill_Style;
+ }
+ get_obj<SkPaint>(L, 1)->setStyle(style);
+ return 0;
+}
+
+static int lpaint_getStrokeCap(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getStrokeCap());
+ return 1;
+}
+
+static int lpaint_getStrokeJoin(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getStrokeJoin());
+ return 1;
+}
+
+static int lpaint_getStrokeWidth(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getStrokeWidth());
+ return 1;
+}
+
+static int lpaint_setStrokeWidth(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setStrokeWidth(lua2scalar(L, 2));
+ return 0;
+}
+
+static int lpaint_getStrokeMiter(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getStrokeMiter());
+ return 1;
+}
+
+static int lpaint_getEffects(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+
+ lua_newtable(L);
+ setfield_bool_if(L, "pathEffect", !!paint->getPathEffect());
+ setfield_bool_if(L, "maskFilter", !!paint->getMaskFilter());
+ setfield_bool_if(L, "shader", !!paint->getShader());
+ setfield_bool_if(L, "colorFilter", !!paint->getColorFilter());
+ setfield_bool_if(L, "imageFilter", !!paint->getImageFilter());
+ return 1;
+}
+
+static int lpaint_getColorFilter(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkColorFilter* cf = paint->getColorFilter();
+ if (cf) {
+ push_ref(L, cf);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setColorFilter(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setColorFilter(sk_ref_sp(get_ref<SkColorFilter>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getImageFilter(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkImageFilter* imf = paint->getImageFilter();
+ if (imf) {
+ push_ref(L, imf);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setImageFilter(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setImageFilter(sk_ref_sp(get_ref<SkImageFilter>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getShader(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkShader* shader = paint->getShader();
+ if (shader) {
+ push_ref(L, shader);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setShader(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setShader(sk_ref_sp(get_ref<SkShader>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getPathEffect(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkPathEffect* pe = paint->getPathEffect();
+ if (pe) {
+ push_ref(L, pe);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_getFillPath(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ const SkPath* path = get_obj<SkPath>(L, 2);
+
+ SkPath fillpath;
+ paint->getFillPath(*path, &fillpath);
+
+ SkLua lua(L);
+ lua.pushPath(fillpath);
+
+ return 1;
+}
+
+static int lpaint_gc(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->~SkPaint();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPaint_Methods[] = {
+ { "isAntiAlias", lpaint_isAntiAlias },
+ { "setAntiAlias", lpaint_setAntiAlias },
+ { "isDither", lpaint_isDither },
+ { "setDither", lpaint_setDither },
+ { "getFilterQuality", lpaint_getFilterQuality },
+ { "setFilterQuality", lpaint_setFilterQuality },
+ { "getAlpha", lpaint_getAlpha },
+ { "setAlpha", lpaint_setAlpha },
+ { "getColor", lpaint_getColor },
+ { "setColor", lpaint_setColor },
+ { "getStroke", lpaint_getStroke },
+ { "setStroke", lpaint_setStroke },
+ { "getStrokeCap", lpaint_getStrokeCap },
+ { "getStrokeJoin", lpaint_getStrokeJoin },
+ { "getStrokeWidth", lpaint_getStrokeWidth },
+ { "setStrokeWidth", lpaint_setStrokeWidth },
+ { "getStrokeMiter", lpaint_getStrokeMiter },
+ { "getEffects", lpaint_getEffects },
+ { "getColorFilter", lpaint_getColorFilter },
+ { "setColorFilter", lpaint_setColorFilter },
+ { "getImageFilter", lpaint_getImageFilter },
+ { "setImageFilter", lpaint_setImageFilter },
+ { "getShader", lpaint_getShader },
+ { "setShader", lpaint_setShader },
+ { "getPathEffect", lpaint_getPathEffect },
+ { "getFillPath", lpaint_getFillPath },
+ { "__gc", lpaint_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lfont_getSize(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkFont>(L, 1)->getSize());
+ return 1;
+}
+
+static int lfont_getScaleX(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkFont>(L, 1)->getScaleX());
+ return 1;
+}
+
+static int lfont_getSkewX(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkFont>(L, 1)->getSkewX());
+ return 1;
+}
+
+static int lfont_setSize(lua_State* L) {
+ get_obj<SkFont>(L, 1)->setSize(lua2scalar(L, 2));
+ return 0;
+}
+
+static int lfont_getTypeface(lua_State* L) {
+ push_ref(L, get_obj<SkFont>(L, 1)->getTypefaceOrDefault());
+ return 1;
+}
+
+static int lfont_setTypeface(lua_State* L) {
+ get_obj<SkFont>(L, 1)->setTypeface(sk_ref_sp(get_ref<SkTypeface>(L, 2)));
+ return 0;
+}
+
+static int lfont_getHinting(lua_State* L) {
+ SkLua(L).pushU32((unsigned)get_obj<SkFont>(L, 1)->getHinting());
+ return 1;
+}
+
+static int lfont_getFontID(lua_State* L) {
+ SkTypeface* face = get_obj<SkFont>(L, 1)->getTypefaceOrDefault();
+ SkLua(L).pushU32(SkTypeface::UniqueID(face));
+ return 1;
+}
+
+static int lfont_measureText(lua_State* L) {
+ if (lua_isstring(L, 2)) {
+ size_t len;
+ const char* text = lua_tolstring(L, 2, &len);
+ SkLua(L).pushScalar(get_obj<SkFont>(L, 1)->measureText(text, len, SkTextEncoding::kUTF8));
+ return 1;
+ }
+ return 0;
+}
+
+static int lfont_getMetrics(lua_State* L) {
+ SkFontMetrics fm;
+ SkScalar height = get_obj<SkFont>(L, 1)->getMetrics(&fm);
+
+ lua_newtable(L);
+ setfield_scalar(L, "top", fm.fTop);
+ setfield_scalar(L, "ascent", fm.fAscent);
+ setfield_scalar(L, "descent", fm.fDescent);
+ setfield_scalar(L, "bottom", fm.fBottom);
+ setfield_scalar(L, "leading", fm.fLeading);
+ SkLua(L).pushScalar(height);
+ return 2;
+}
+
+static int lfont_gc(lua_State* L) {
+ get_obj<SkFont>(L, 1)->~SkFont();
+ return 0;
+}
+
+static const struct luaL_Reg gSkFont_Methods[] = {
+ { "getSize", lfont_getSize },
+ { "setSize", lfont_setSize },
+ { "getScaleX", lfont_getScaleX },
+ { "getSkewX", lfont_getSkewX },
+ { "getTypeface", lfont_getTypeface },
+ { "setTypeface", lfont_setTypeface },
+ { "getHinting", lfont_getHinting },
+ { "getFontID", lfont_getFontID },
+ { "measureText", lfont_measureText },
+ { "getMetrics", lfont_getMetrics },
+ { "__gc", lfont_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char* mode2string(SkTileMode mode) {
+ static const char* gNames[] = { "clamp", "repeat", "mirror", "decal" };
+ SkASSERT((unsigned)mode < SK_ARRAY_COUNT(gNames));
+ return gNames[static_cast<int>(mode)];
+}
+
+static const char* gradtype2string(SkShader::GradientType t) {
+ static const char* gNames[] = {
+ "none", "color", "linear", "radial", "radial2", "sweep", "conical"
+ };
+ SkASSERT((unsigned)t < SK_ARRAY_COUNT(gNames));
+ return gNames[t];
+}
+
+static int lshader_isOpaque(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ return shader && shader->isOpaque();
+}
+
+static int lshader_isAImage(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ if (shader) {
+ SkMatrix matrix;
+ SkTileMode modes[2];
+ if (SkImage* image = shader->isAImage(&matrix, modes)) {
+ lua_newtable(L);
+ setfield_number(L, "id", image->uniqueID());
+ setfield_number(L, "width", image->width());
+ setfield_number(L, "height", image->height());
+ setfield_string(L, "tileX", mode2string(modes[0]));
+ setfield_string(L, "tileY", mode2string(modes[1]));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lshader_asAGradient(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ if (shader) {
+ SkShader::GradientInfo info;
+ sk_bzero(&info, sizeof(info));
+
+ SkShader::GradientType t = shader->asAGradient(&info);
+
+ if (SkShader::kNone_GradientType != t) {
+ SkAutoTArray<SkScalar> pos(info.fColorCount);
+ info.fColorOffsets = pos.get();
+ shader->asAGradient(&info);
+
+ lua_newtable(L);
+ setfield_string(L, "type", gradtype2string(t));
+ setfield_string(L, "tile", mode2string((SkTileMode)info.fTileMode));
+ setfield_number(L, "colorCount", info.fColorCount);
+
+ lua_newtable(L);
+ for (int i = 0; i < info.fColorCount; i++) {
+ // Lua uses 1-based indexing
+ setarray_scalar(L, i+1, pos[i]);
+ }
+ lua_setfield(L, -2, "positions");
+
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lshader_gc(lua_State* L) {
+ get_ref<SkShader>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkShader_Methods[] = {
+ { "isOpaque", lshader_isOpaque },
+ { "isAImage", lshader_isAImage },
+ { "asAGradient", lshader_asAGradient },
+ { "__gc", lshader_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpatheffect_asADash(lua_State* L) {
+ SkPathEffect* pe = get_ref<SkPathEffect>(L, 1);
+ if (pe) {
+ SkPathEffect::DashInfo info;
+ SkPathEffect::DashType dashType = pe->asADash(&info);
+ if (SkPathEffect::kDash_DashType == dashType) {
+ SkAutoTArray<SkScalar> intervals(info.fCount);
+ info.fIntervals = intervals.get();
+ pe->asADash(&info);
+ SkLua(L).pushDash(info);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lpatheffect_gc(lua_State* L) {
+ get_ref<SkPathEffect>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPathEffect_Methods[] = {
+ { "asADash", lpatheffect_asADash },
+ { "__gc", lpatheffect_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpcolorfilter_gc(lua_State* L) {
+ get_ref<SkColorFilter>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkColorFilter_Methods[] = {
+ { "__gc", lpcolorfilter_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpimagefilter_gc(lua_State* L) {
+ get_ref<SkImageFilter>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkImageFilter_Methods[] = {
+ { "__gc", lpimagefilter_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lmatrix_getType(lua_State* L) {
+ SkMatrix::TypeMask mask = get_obj<SkMatrix>(L, 1)->getType();
+
+ lua_newtable(L);
+ setfield_boolean(L, "translate", SkToBool(mask & SkMatrix::kTranslate_Mask));
+ setfield_boolean(L, "scale", SkToBool(mask & SkMatrix::kScale_Mask));
+ setfield_boolean(L, "affine", SkToBool(mask & SkMatrix::kAffine_Mask));
+ setfield_boolean(L, "perspective", SkToBool(mask & SkMatrix::kPerspective_Mask));
+ return 1;
+}
+
+static int lmatrix_getScaleX(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getScaleX());
+ return 1;
+}
+
+static int lmatrix_getScaleY(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getScaleY());
+ return 1;
+}
+
+static int lmatrix_getTranslateX(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getTranslateX());
+ return 1;
+}
+
+static int lmatrix_getTranslateY(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getTranslateY());
+ return 1;
+}
+
+static int lmatrix_invert(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkMatrix>(L, 1)->invert(get_obj<SkMatrix>(L, 2)));
+ return 1;
+}
+
+static int lmatrix_mapXY(lua_State* L) {
+ SkPoint pt = { lua2scalar(L, 2), lua2scalar(L, 3) };
+ get_obj<SkMatrix>(L, 1)->mapPoints(&pt, &pt, 1);
+ lua_pushnumber(L, pt.x());
+ lua_pushnumber(L, pt.y());
+ return 2;
+}
+
+static int lmatrix_setRectToRect(lua_State* L) {
+ SkMatrix* matrix = get_obj<SkMatrix>(L, 1);
+ SkRect srcR, dstR;
+ lua2rect(L, 2, &srcR);
+ lua2rect(L, 3, &dstR);
+ const char* scaleToFitStr = lua_tostring(L, 4);
+ SkMatrix::ScaleToFit scaleToFit = SkMatrix::kFill_ScaleToFit;
+
+ if (scaleToFitStr) {
+ const struct {
+ const char* fName;
+ SkMatrix::ScaleToFit fScaleToFit;
+ } rec[] = {
+ { "fill", SkMatrix::kFill_ScaleToFit },
+ { "start", SkMatrix::kStart_ScaleToFit },
+ { "center", SkMatrix::kCenter_ScaleToFit },
+ { "end", SkMatrix::kEnd_ScaleToFit },
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(rec); ++i) {
+ if (strcmp(rec[i].fName, scaleToFitStr) == 0) {
+ scaleToFit = rec[i].fScaleToFit;
+ break;
+ }
+ }
+ }
+
+ matrix->setRectToRect(srcR, dstR, scaleToFit);
+ return 0;
+}
+
+static const struct luaL_Reg gSkMatrix_Methods[] = {
+ { "getType", lmatrix_getType },
+ { "getScaleX", lmatrix_getScaleX },
+ { "getScaleY", lmatrix_getScaleY },
+ { "getTranslateX", lmatrix_getTranslateX },
+ { "getTranslateY", lmatrix_getTranslateY },
+ { "setRectToRect", lmatrix_setRectToRect },
+ { "invert", lmatrix_invert },
+ { "mapXY", lmatrix_mapXY },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpath_getBounds(lua_State* L) {
+ SkLua(L).pushRect(get_obj<SkPath>(L, 1)->getBounds());
+ return 1;
+}
+
+static const char* fill_type_to_str(SkPath::FillType fill) {
+ switch (fill) {
+ case SkPath::kEvenOdd_FillType:
+ return "even-odd";
+ case SkPath::kWinding_FillType:
+ return "winding";
+ case SkPath::kInverseEvenOdd_FillType:
+ return "inverse-even-odd";
+ case SkPath::kInverseWinding_FillType:
+ return "inverse-winding";
+ }
+ return "unknown";
+}
+
+static int lpath_getFillType(lua_State* L) {
+ SkPath::FillType fill = get_obj<SkPath>(L, 1)->getFillType();
+ SkLua(L).pushString(fill_type_to_str(fill));
+ return 1;
+}
+
+static SkString segment_masks_to_str(uint32_t segmentMasks) {
+ SkString result;
+ bool first = true;
+ if (SkPath::kLine_SegmentMask & segmentMasks) {
+ result.append("line");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kLine_SegmentMask;)
+ }
+ if (SkPath::kQuad_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("quad");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kQuad_SegmentMask;)
+ }
+ if (SkPath::kConic_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("conic");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kConic_SegmentMask;)
+ }
+ if (SkPath::kCubic_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("cubic");
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kCubic_SegmentMask;)
+ }
+ SkASSERT(0 == segmentMasks);
+ return result;
+}
+
+static int lpath_getSegmentTypes(lua_State* L) {
+ uint32_t segMasks = get_obj<SkPath>(L, 1)->getSegmentMasks();
+ SkLua(L).pushString(segment_masks_to_str(segMasks));
+ return 1;
+}
+
+static int lpath_isConvex(lua_State* L) {
+ bool isConvex = SkPath::kConvex_Convexity == get_obj<SkPath>(L, 1)->getConvexity();
+ SkLua(L).pushBool(isConvex);
+ return 1;
+}
+
+static int lpath_isEmpty(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPath>(L, 1)->isEmpty());
+ return 1;
+}
+
+static int lpath_isRect(lua_State* L) {
+ SkRect r;
+ bool pred = get_obj<SkPath>(L, 1)->isRect(&r);
+ int ret_count = 1;
+ lua_pushboolean(L, pred);
+ if (pred) {
+ SkLua(L).pushRect(r);
+ ret_count += 1;
+ }
+ return ret_count;
+}
+
+static int lpath_countPoints(lua_State* L) {
+ lua_pushinteger(L, get_obj<SkPath>(L, 1)->countPoints());
+ return 1;
+}
+
+static int lpath_getVerbs(lua_State* L) {
+ const SkPath* path = get_obj<SkPath>(L, 1);
+ SkPath::Iter iter(*path, false);
+ SkPoint pts[4];
+
+ lua_newtable(L);
+
+ bool done = false;
+ int i = 0;
+ do {
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ setarray_string(L, ++i, "move");
+ break;
+ case SkPath::kClose_Verb:
+ setarray_string(L, ++i, "close");
+ break;
+ case SkPath::kLine_Verb:
+ setarray_string(L, ++i, "line");
+ break;
+ case SkPath::kQuad_Verb:
+ setarray_string(L, ++i, "quad");
+ break;
+ case SkPath::kConic_Verb:
+ setarray_string(L, ++i, "conic");
+ break;
+ case SkPath::kCubic_Verb:
+ setarray_string(L, ++i, "cubic");
+ break;
+ case SkPath::kDone_Verb:
+ setarray_string(L, ++i, "done");
+ done = true;
+ break;
+ }
+ } while (!done);
+
+ return 1;
+}
+
+static int lpath_reset(lua_State* L) {
+ get_obj<SkPath>(L, 1)->reset();
+ return 0;
+}
+
+static int lpath_moveTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->moveTo(lua2scalar(L, 2), lua2scalar(L, 3));
+ return 0;
+}
+
+static int lpath_lineTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->lineTo(lua2scalar(L, 2), lua2scalar(L, 3));
+ return 0;
+}
+
+static int lpath_quadTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->quadTo(lua2scalar(L, 2), lua2scalar(L, 3),
+ lua2scalar(L, 4), lua2scalar(L, 5));
+ return 0;
+}
+
+static int lpath_cubicTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->cubicTo(lua2scalar(L, 2), lua2scalar(L, 3),
+ lua2scalar(L, 4), lua2scalar(L, 5),
+ lua2scalar(L, 6), lua2scalar(L, 7));
+ return 0;
+}
+
+static int lpath_close(lua_State* L) {
+ get_obj<SkPath>(L, 1)->close();
+ return 0;
+}
+
+static int lpath_gc(lua_State* L) {
+ get_obj<SkPath>(L, 1)->~SkPath();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPath_Methods[] = {
+ { "getBounds", lpath_getBounds },
+ { "getFillType", lpath_getFillType },
+ { "getSegmentTypes", lpath_getSegmentTypes },
+ { "getVerbs", lpath_getVerbs },
+ { "isConvex", lpath_isConvex },
+ { "isEmpty", lpath_isEmpty },
+ { "isRect", lpath_isRect },
+ { "countPoints", lpath_countPoints },
+ { "reset", lpath_reset },
+ { "moveTo", lpath_moveTo },
+ { "lineTo", lpath_lineTo },
+ { "quadTo", lpath_quadTo },
+ { "cubicTo", lpath_cubicTo },
+ { "close", lpath_close },
+ { "__gc", lpath_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char* rrect_type(const SkRRect& rr) {
+ switch (rr.getType()) {
+ case SkRRect::kEmpty_Type: return "empty";
+ case SkRRect::kRect_Type: return "rect";
+ case SkRRect::kOval_Type: return "oval";
+ case SkRRect::kSimple_Type: return "simple";
+ case SkRRect::kNinePatch_Type: return "nine-patch";
+ case SkRRect::kComplex_Type: return "complex";
+ }
+ SkDEBUGFAIL("never get here");
+ return "";
+}
+
+static int lrrect_rect(lua_State* L) {
+ SkLua(L).pushRect(get_obj<SkRRect>(L, 1)->rect());
+ return 1;
+}
+
+static int lrrect_type(lua_State* L) {
+ lua_pushstring(L, rrect_type(*get_obj<SkRRect>(L, 1)));
+ return 1;
+}
+
+static int lrrect_radii(lua_State* L) {
+ int corner = SkToInt(lua_tointeger(L, 2));
+ SkVector v;
+ if (corner < 0 || corner > 3) {
+ SkDebugf("bad corner index %d", corner);
+ v.set(0, 0);
+ } else {
+ v = get_obj<SkRRect>(L, 1)->radii((SkRRect::Corner)corner);
+ }
+ lua_pushnumber(L, v.fX);
+ lua_pushnumber(L, v.fY);
+ return 2;
+}
+
+static int lrrect_gc(lua_State* L) {
+ get_obj<SkRRect>(L, 1)->~SkRRect();
+ return 0;
+}
+
+static const struct luaL_Reg gSkRRect_Methods[] = {
+ { "rect", lrrect_rect },
+ { "type", lrrect_type },
+ { "radii", lrrect_radii },
+ { "__gc", lrrect_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int limage_width(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkImage>(L, 1)->width());
+ return 1;
+}
+
+static int limage_height(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkImage>(L, 1)->height());
+ return 1;
+}
+
+static int limage_newShader(lua_State* L) {
+ SkTileMode tmode = SkTileMode::kClamp;
+ const SkMatrix* localM = nullptr;
+ push_ref(L, get_ref<SkImage>(L, 1)->makeShader(tmode, tmode, localM));
+ return 1;
+}
+
+static int limage_gc(lua_State* L) {
+ get_ref<SkImage>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkImage_Methods[] = {
+ { "width", limage_width },
+ { "height", limage_height },
+ { "newShader", limage_newShader },
+ { "__gc", limage_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lsurface_width(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkSurface>(L, 1)->width());
+ return 1;
+}
+
+static int lsurface_height(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkSurface>(L, 1)->height());
+ return 1;
+}
+
+static int lsurface_getCanvas(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkSurface>(L, 1)->getCanvas();
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ } else {
+ push_ptr(L, canvas);
+ // note: we don't unref canvas, since getCanvas did not ref it.
+ // warning: this is weird: now Lua owns a ref on this canvas, but what if they let
+ // the real owner (the surface) go away, but still hold onto the canvas?
+ // *really* we want to sort of ref the surface again, but have the native object
+ // know that it is supposed to be treated as a canvas...
+ }
+ return 1;
+}
+
+static int lsurface_newImageSnapshot(lua_State* L) {
+ sk_sp<SkImage> image = get_ref<SkSurface>(L, 1)->makeImageSnapshot();
+ if (!image) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, image);
+ }
+ return 1;
+}
+
+static int lsurface_newSurface(lua_State* L) {
+ int width = lua2int_def(L, 2, 0);
+ int height = lua2int_def(L, 3, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ auto surface = get_ref<SkSurface>(L, 1)->makeSurface(info);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lsurface_gc(lua_State* L) {
+ get_ref<SkSurface>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkSurface_Methods[] = {
+ { "width", lsurface_width },
+ { "height", lsurface_height },
+ { "getCanvas", lsurface_getCanvas },
+ { "newImageSnapshot", lsurface_newImageSnapshot },
+ { "newSurface", lsurface_newSurface },
+ { "__gc", lsurface_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpicturerecorder_beginRecording(lua_State* L) {
+ const SkScalar w = lua2scalar_def(L, 2, -1);
+ const SkScalar h = lua2scalar_def(L, 3, -1);
+ if (w <= 0 || h <= 0) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ SkCanvas* canvas = get_obj<SkPictureRecorder>(L, 1)->beginRecording(w, h);
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ push_ptr(L, canvas);
+ return 1;
+}
+
+static int lpicturerecorder_getCanvas(lua_State* L) {
+ SkCanvas* canvas = get_obj<SkPictureRecorder>(L, 1)->getRecordingCanvas();
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ return 1;
+ }
+ push_ptr(L, canvas);
+ return 1;
+}
+
+static int lpicturerecorder_endRecording(lua_State* L) {
+ sk_sp<SkPicture> pic = get_obj<SkPictureRecorder>(L, 1)->finishRecordingAsPicture();
+ if (!pic) {
+ lua_pushnil(L);
+ return 1;
+ }
+ push_ref(L, std::move(pic));
+ return 1;
+}
+
+static int lpicturerecorder_gc(lua_State* L) {
+ get_obj<SkPictureRecorder>(L, 1)->~SkPictureRecorder();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPictureRecorder_Methods[] = {
+ { "beginRecording", lpicturerecorder_beginRecording },
+ { "getCanvas", lpicturerecorder_getCanvas },
+ { "endRecording", lpicturerecorder_endRecording },
+ { "__gc", lpicturerecorder_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpicture_width(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkPicture>(L, 1)->cullRect().width());
+ return 1;
+}
+
+static int lpicture_height(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkPicture>(L, 1)->cullRect().height());
+ return 1;
+}
+
+static int lpicture_gc(lua_State* L) {
+ get_ref<SkPicture>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPicture_Methods[] = {
+ { "width", lpicture_width },
+ { "height", lpicture_height },
+ { "__gc", lpicture_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ltextblob_bounds(lua_State* L) {
+ SkLua(L).pushRect(get_ref<SkTextBlob>(L, 1)->bounds());
+ return 1;
+}
+
+static int ltextblob_gc(lua_State* L) {
+ SkSafeUnref(get_ref<SkTextBlob>(L, 1));
+ return 0;
+}
+
+static const struct luaL_Reg gSkTextBlob_Methods[] = {
+ { "bounds", ltextblob_bounds },
+ { "__gc", ltextblob_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ltypeface_getFamilyName(lua_State* L) {
+ SkString str;
+ get_ref<SkTypeface>(L, 1)->getFamilyName(&str);
+ lua_pushstring(L, str.c_str());
+ return 1;
+}
+
+static int ltypeface_getStyle(lua_State* L) {
+ push_obj(L, get_ref<SkTypeface>(L, 1)->fontStyle());
+ return 1;
+}
+
+static int ltypeface_gc(lua_State* L) {
+ SkSafeUnref(get_ref<SkTypeface>(L, 1));
+ return 0;
+}
+
+static const struct luaL_Reg gSkTypeface_Methods[] = {
+ { "getFamilyName", ltypeface_getFamilyName },
+ { "getStyle", ltypeface_getStyle },
+ { "__gc", ltypeface_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lfontstyle_weight(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkFontStyle>(L, 1)->weight());
+ return 1;
+}
+
+static int lfontstyle_width(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkFontStyle>(L, 1)->width());
+ return 1;
+}
+
+static int lfontstyle_slant(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkFontStyle>(L, 1)->slant());
+ return 1;
+}
+
+static int lfontstyle_gc(lua_State* L) {
+ get_obj<SkFontStyle>(L, 1)->~SkFontStyle();
+ return 0;
+}
+
+static const struct luaL_Reg gSkFontStyle_Methods[] = {
+ { "weight", lfontstyle_weight },
+ { "width", lfontstyle_width },
+ { "slant", lfontstyle_slant },
+ { "__gc", lfontstyle_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class AutoCallLua {
+public:
+ AutoCallLua(lua_State* L, const char func[], const char verb[]) : fL(L) {
+ lua_getglobal(L, func);
+ if (!lua_isfunction(L, -1)) {
+ int t = lua_type(L, -1);
+ SkDebugf("--- expected function %d\n", t);
+ }
+
+ lua_newtable(L);
+ setfield_string(L, "verb", verb);
+ }
+
+ ~AutoCallLua() {
+ if (lua_pcall(fL, 1, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(fL, -1));
+ }
+ lua_settop(fL, -1);
+ }
+
+private:
+ lua_State* fL;
+};
+
+#define AUTO_LUA(verb) AutoCallLua acl(fL, fFunc.c_str(), verb)
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lsk_newDocumentPDF(lua_State* L) {
+ const char* filename = nullptr;
+ if (lua_gettop(L) > 0 && lua_isstring(L, 1)) {
+ filename = lua_tolstring(L, 1, nullptr);
+ }
+ if (!filename) {
+ return 0;
+ }
+ auto file = skstd::make_unique<SkFILEWStream>(filename);
+ if (!file->isValid()) {
+ return 0;
+ }
+ auto doc = SkPDF::MakeDocument(file.get());
+ if (!doc) {
+ return 0;
+ }
+ push_ptr(L, new DocHolder{std::move(doc), std::move(file)});
+ return 1;
+}
+
+static int lsk_newBlurImageFilter(lua_State* L) {
+ SkScalar sigmaX = lua2scalar_def(L, 1, 0);
+ SkScalar sigmaY = lua2scalar_def(L, 2, 0);
+ sk_sp<SkImageFilter> imf(SkImageFilters::Blur(sigmaX, sigmaY, nullptr));
+ if (!imf) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, std::move(imf));
+ }
+ return 1;
+}
+
+static int lsk_newLinearGradient(lua_State* L) {
+ SkScalar x0 = lua2scalar_def(L, 1, 0);
+ SkScalar y0 = lua2scalar_def(L, 2, 0);
+ SkColor c0 = lua2color(L, 3);
+ SkScalar x1 = lua2scalar_def(L, 4, 0);
+ SkScalar y1 = lua2scalar_def(L, 5, 0);
+ SkColor c1 = lua2color(L, 6);
+
+ SkPoint pts[] = { { x0, y0 }, { x1, y1 } };
+ SkColor colors[] = { c0, c1 };
+ sk_sp<SkShader> s(SkGradientShader::MakeLinear(pts, colors, nullptr, 2, SkTileMode::kClamp));
+ if (!s) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, std::move(s));
+ }
+ return 1;
+}
+
+static int lsk_newMatrix(lua_State* L) {
+ push_new<SkMatrix>(L)->reset();
+ return 1;
+}
+
+static int lsk_newPaint(lua_State* L) {
+ push_new<SkPaint>(L);
+ return 1;
+}
+
+static int lsk_newPath(lua_State* L) {
+ push_new<SkPath>(L);
+ return 1;
+}
+
+static int lsk_newPictureRecorder(lua_State* L) {
+ push_new<SkPictureRecorder>(L);
+ return 1;
+}
+
+static int lsk_newRRect(lua_State* L) {
+ push_new<SkRRect>(L)->setEmpty();
+ return 1;
+}
+
+// Sk.newTextBlob(text, rect, paint)
+static int lsk_newTextBlob(lua_State* L) {
+ const char* text = lua_tolstring(L, 1, nullptr);
+ SkRect bounds;
+ lua2rect(L, 2, &bounds);
+
+ std::unique_ptr<SkShaper> shaper = SkShaper::Make();
+
+ // TODO: restore this logic based on SkFont instead of SkPaint
+#if 0
+ const SkPaint& paint = *get_obj<SkPaint>(L, 3);
+ SkFont font = SkFont::LEGACY_ExtractFromPaint(paint);
+#else
+ SkFont font;
+#endif
+ SkTextBlobBuilderRunHandler builder(text, { bounds.left(), bounds.top() });
+ shaper->shape(text, strlen(text), font, true, bounds.width(), &builder);
+
+ push_ref<SkTextBlob>(L, builder.makeBlob());
+ SkLua(L).pushScalar(builder.endPoint().fY);
+ return 2;
+}
+
+static int lsk_newTypeface(lua_State* L) {
+ const char* name = nullptr;
+ SkFontStyle style;
+
+ int count = lua_gettop(L);
+ if (count > 0 && lua_isstring(L, 1)) {
+ name = lua_tolstring(L, 1, nullptr);
+ if (count > 1) {
+ SkFontStyle* passedStyle = get_obj<SkFontStyle>(L, 2);
+ if (passedStyle) {
+ style = *passedStyle;
+ }
+ }
+ }
+
+ sk_sp<SkTypeface> face(SkTypeface::MakeFromName(name, style));
+// SkDebugf("---- name <%s> style=%d, face=%p ref=%d\n", name, style, face, face->getRefCnt());
+ if (nullptr == face) {
+ face = SkTypeface::MakeDefault();
+ }
+ push_ref(L, std::move(face));
+ return 1;
+}
+
+static int lsk_newFontStyle(lua_State* L) {
+ int count = lua_gettop(L);
+ int weight = SkFontStyle::kNormal_Weight;
+ int width = SkFontStyle::kNormal_Width;
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ if (count >= 1 && lua_isnumber(L, 1)) {
+ weight = lua_tointegerx(L, 1, nullptr);
+ }
+ if (count >= 2 && lua_isnumber(L, 2)) {
+ width = lua_tointegerx(L, 2, nullptr);
+ }
+ if (count >= 3 && lua_isnumber(L, 3)) {
+ slant = static_cast<SkFontStyle::Slant>(lua_tointegerx(L, 3, nullptr));
+ }
+ push_new<SkFontStyle>(L, weight, width, slant);
+ return 1;
+}
+
+static int lsk_newRasterSurface(lua_State* L) {
+ int width = lua2int_def(L, 1, 0);
+ int height = lua2int_def(L, 2, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ auto surface = SkSurface::MakeRaster(info, &props);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lsk_loadImage(lua_State* L) {
+ if (lua_gettop(L) > 0 && lua_isstring(L, 1)) {
+ const char* name = lua_tolstring(L, 1, nullptr);
+ sk_sp<SkData> data(SkData::MakeFromFileName(name));
+ if (data) {
+ auto image = SkImage::MakeFromEncoded(std::move(data));
+ if (image) {
+ push_ref(L, std::move(image));
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void register_Sk(lua_State* L) {
+ lua_newtable(L);
+ lua_pushvalue(L, -1);
+ lua_setglobal(L, "Sk");
+ // the Sk table is still on top
+
+ setfield_function(L, "newDocumentPDF", lsk_newDocumentPDF);
+ setfield_function(L, "loadImage", lsk_loadImage);
+ setfield_function(L, "newBlurImageFilter", lsk_newBlurImageFilter);
+ setfield_function(L, "newLinearGradient", lsk_newLinearGradient);
+ setfield_function(L, "newMatrix", lsk_newMatrix);
+ setfield_function(L, "newPaint", lsk_newPaint);
+ setfield_function(L, "newPath", lsk_newPath);
+ setfield_function(L, "newPictureRecorder", lsk_newPictureRecorder);
+ setfield_function(L, "newRRect", lsk_newRRect);
+ setfield_function(L, "newRasterSurface", lsk_newRasterSurface);
+ setfield_function(L, "newTextBlob", lsk_newTextBlob);
+ setfield_function(L, "newTypeface", lsk_newTypeface);
+ setfield_function(L, "newFontStyle", lsk_newFontStyle);
+ lua_pop(L, 1); // pop off the Sk table
+}
+
+#define REG_CLASS(L, C) \
+ do { \
+ luaL_newmetatable(L, get_mtname<C>()); \
+ lua_pushvalue(L, -1); \
+ lua_setfield(L, -2, "__index"); \
+ luaL_setfuncs(L, g##C##_Methods, 0); \
+ lua_pop(L, 1); /* pop off the meta-table */ \
+ } while (0)
+
+void SkLua::Load(lua_State* L) {
+ register_Sk(L);
+ REG_CLASS(L, SkCanvas);
+ REG_CLASS(L, SkColorFilter);
+ REG_CLASS(L, DocHolder);
+ REG_CLASS(L, SkFont);
+ REG_CLASS(L, SkImage);
+ REG_CLASS(L, SkImageFilter);
+ REG_CLASS(L, SkMatrix);
+ REG_CLASS(L, SkPaint);
+ REG_CLASS(L, SkPath);
+ REG_CLASS(L, SkPathEffect);
+ REG_CLASS(L, SkPicture);
+ REG_CLASS(L, SkPictureRecorder);
+ REG_CLASS(L, SkRRect);
+ REG_CLASS(L, SkShader);
+ REG_CLASS(L, SkSurface);
+ REG_CLASS(L, SkTextBlob);
+ REG_CLASS(L, SkTypeface);
+ REG_CLASS(L, SkFontStyle);
+}
+
+extern "C" int luaopen_skia(lua_State* L);
+extern "C" int luaopen_skia(lua_State* L) {
+ SkLua::Load(L);
+ return 0;
+}
diff --git a/gfx/skia/skia/src/utils/SkLuaCanvas.cpp b/gfx/skia/skia/src/utils/SkLuaCanvas.cpp
new file mode 100644
index 0000000000..f756ee6c6e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkLuaCanvas.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkLuaCanvas.h"
+
+#include "include/private/SkTo.h"
+#include "include/utils/SkLua.h"
+#include "src/core/SkStringUtils.h"
+
+extern "C" {
+ #include "lua.h"
+ #include "lauxlib.h"
+}
+
+class AutoCallLua : public SkLua {
+public:
+ AutoCallLua(lua_State* L, const char func[], const char verb[]) : INHERITED(L) {
+ lua_getglobal(L, func);
+ if (!lua_isfunction(L, -1)) {
+ int t = lua_type(L, -1);
+ SkDebugf("--- expected function %d\n", t);
+ }
+
+ lua_newtable(L);
+ this->pushString(verb, "verb");
+ }
+
+ ~AutoCallLua() {
+ lua_State* L = this->get();
+ if (lua_pcall(L, 1, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(L, -1));
+ }
+ lua_settop(L, -1);
+ }
+
+ void pushEncodedText(SkTextEncoding, const void*, size_t);
+
+private:
+ typedef SkLua INHERITED;
+};
+
+#define AUTO_LUA(verb) AutoCallLua lua(fL, fFunc.c_str(), verb)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void AutoCallLua::pushEncodedText(SkTextEncoding enc, const void* text, size_t length) {
+ switch (enc) {
+ case SkTextEncoding::kUTF8:
+ this->pushString((const char*)text, length, "text");
+ break;
+ case SkTextEncoding::kUTF16:
+ this->pushString(SkStringFromUTF16((const uint16_t*)text, length), "text");
+ break;
+ case SkTextEncoding::kGlyphID:
+ this->pushArrayU16((const uint16_t*)text, SkToInt(length >> 1),
+ "glyphs");
+ break;
+ case SkTextEncoding::kUTF32:
+ break;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLuaCanvas::pushThis() {
+ SkLua(fL).pushCanvas(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLuaCanvas::SkLuaCanvas(int width, int height, lua_State* L, const char func[])
+ : INHERITED(width, height)
+ , fL(L)
+ , fFunc(func) {
+}
+
+SkLuaCanvas::~SkLuaCanvas() {}
+
+void SkLuaCanvas::willSave() {
+ AUTO_LUA("save");
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkLuaCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ AUTO_LUA("saveLayer");
+ if (rec.fBounds) {
+ lua.pushRect(*rec.fBounds, "bounds");
+ }
+ if (rec.fPaint) {
+ lua.pushPaint(*rec.fPaint, "paint");
+ }
+
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkLuaCanvas::onDoSaveBehind(const SkRect*) {
+ // TODO
+ return false;
+}
+
+void SkLuaCanvas::willRestore() {
+ AUTO_LUA("restore");
+ this->INHERITED::willRestore();
+}
+
+void SkLuaCanvas::didConcat(const SkMatrix& matrix) {
+ switch (matrix.getType()) {
+ case SkMatrix::kTranslate_Mask: {
+ AUTO_LUA("translate");
+ lua.pushScalar(matrix.getTranslateX(), "dx");
+ lua.pushScalar(matrix.getTranslateY(), "dy");
+ break;
+ }
+ case SkMatrix::kScale_Mask: {
+ AUTO_LUA("scale");
+ lua.pushScalar(matrix.getScaleX(), "sx");
+ lua.pushScalar(matrix.getScaleY(), "sy");
+ break;
+ }
+ default: {
+ AUTO_LUA("concat");
+ // pushMatrix added in https://codereview.chromium.org/203203004/
+ // Doesn't seem to have ever been working correctly since added
+ // lua.pushMatrix(matrix);
+ break;
+ }
+ }
+
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkLuaCanvas::didSetMatrix(const SkMatrix& matrix) {
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkLuaCanvas::onClipRect(const SkRect& r, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipRect");
+ lua.pushRect(r, "rect");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipRect(r, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipRRect");
+ lua.pushRRect(rrect, "rrect");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipPath");
+ lua.pushPath(path, "path");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ AUTO_LUA("clipRegion");
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkLuaCanvas::onDrawPaint(const SkPaint& paint) {
+ AUTO_LUA("drawPaint");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPoints(PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ AUTO_LUA("drawPoints");
+ lua.pushArrayPoint(pts, SkToInt(count), "points");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ AUTO_LUA("drawOval");
+ lua.pushRect(rect, "rect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ AUTO_LUA("drawArc");
+ lua.pushRect(rect, "rect");
+ lua.pushScalar(startAngle, "startAngle");
+ lua.pushScalar(sweepAngle, "sweepAngle");
+ lua.pushBool(useCenter, "useCenter");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ AUTO_LUA("drawRect");
+ lua.pushRect(rect, "rect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ AUTO_LUA("drawRRect");
+ lua.pushRRect(rrect, "rrect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ AUTO_LUA("drawDRRect");
+ lua.pushRRect(outer, "outer");
+ lua.pushRRect(inner, "inner");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ AUTO_LUA("drawPath");
+ lua.pushPath(path, "path");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ AUTO_LUA("drawBitmap");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ AUTO_LUA("drawBitmapRect");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ AUTO_LUA("drawBitmapNine");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ AUTO_LUA("drawImage");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ AUTO_LUA("drawImageRect");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawTextBlob(const SkTextBlob *blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ AUTO_LUA("drawTextBlob");
+ lua.pushTextBlob(blob, "blob");
+ lua.pushScalar(x, "x");
+ lua.pushScalar(y, "y");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ AUTO_LUA("drawPicture");
+ // call through so we can see the nested picture ops
+ this->INHERITED::onDrawPicture(picture, matrix, paint);
+}
+
+void SkLuaCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ AUTO_LUA("drawDrawable");
+ // call through so we can see the nested ops
+ this->INHERITED::onDrawDrawable(drawable, matrix);
+}
+
+void SkLuaCanvas::onDrawVerticesObject(const SkVertices*, const SkVertices::Bone[], int,
+ SkBlendMode, const SkPaint& paint) {
+ AUTO_LUA("drawVertices");
+ lua.pushPaint(paint, "paint");
+}
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.cpp b/gfx/skia/skia/src/utils/SkMatrix22.cpp
new file mode 100644
index 0000000000..b04c502d83
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "src/utils/SkMatrix22.h"
+
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G) {
+ const SkScalar& a = h.fX;
+ const SkScalar& b = h.fY;
+ SkScalar c, s;
+ if (0 == b) {
+ c = SkScalarCopySign(SK_Scalar1, a);
+ s = 0;
+ //r = SkScalarAbs(a);
+ } else if (0 == a) {
+ c = 0;
+ s = -SkScalarCopySign(SK_Scalar1, b);
+ //r = SkScalarAbs(b);
+ } else if (SkScalarAbs(b) > SkScalarAbs(a)) {
+ SkScalar t = a / b;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), b);
+ s = -SK_Scalar1 / u;
+ c = -s * t;
+ //r = b * u;
+ } else {
+ SkScalar t = b / a;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), a);
+ c = SK_Scalar1 / u;
+ s = -c * t;
+ //r = a * u;
+ }
+
+ G->setSinCos(s, c);
+}
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.h b/gfx/skia/skia/src/utils/SkMatrix22.h
new file mode 100644
index 0000000000..c8bcd5f6bd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix22_DEFINED
+#define SkMatrix22_DEFINED
+
+#include "include/core/SkPoint.h"
+
+class SkMatrix;
+
+/** Find the Givens matrix G, which is the rotational matrix
+ * that rotates the vector h to the positive hoizontal axis.
+ * G * h = [hypot(h), 0]
+ *
+ * This is equivalent to
+ *
+ * SkScalar r = h.length();
+ * SkScalar r_inv = r ? SkScalarInvert(r) : 0;
+ * h.scale(r_inv);
+ * G->setSinCos(-h.fY, h.fX);
+ *
+ * but has better numerical stability by using (partial) hypot,
+ * and saves a multiply by not computing r.
+ */
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
new file mode 100644
index 0000000000..25be716243
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkMultiPictureDocument.h"
+
+#include "include/core/SkPicture.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTo.h"
+#include "include/utils/SkNWayCanvas.h"
+#include "src/utils/SkMultiPictureDocumentPriv.h"
+
+#include <limits.h>
+
+/*
+ File format:
+ BEGINNING_OF_FILE:
+ kMagic
+ uint32_t version_number (==2)
+ uint32_t page_count
+ {
+ float sizeX
+ float sizeY
+ } * page_count
+ skp file
+*/
+
+namespace {
+// The unique file signature for this file type.
+static constexpr char kMagic[] = "Skia Multi-Picture Doc\n\n";
+
+static constexpr char kEndPage[] = "SkMultiPictureEndPage";
+
+const uint32_t kVersion = 2;
+
+static SkSize join(const SkTArray<SkSize>& sizes) {
+ SkSize joined = {0, 0};
+ for (SkSize s : sizes) {
+ joined = SkSize{SkTMax(joined.width(), s.width()), SkTMax(joined.height(), s.height())};
+ }
+ return joined;
+}
+
+struct MultiPictureDocument final : public SkDocument {
+ const SkSerialProcs fProcs;
+ SkPictureRecorder fPictureRecorder;
+ SkSize fCurrentPageSize;
+ SkTArray<sk_sp<SkPicture>> fPages;
+ SkTArray<SkSize> fSizes;
+ MultiPictureDocument(SkWStream* s, const SkSerialProcs* procs)
+ : SkDocument(s)
+ , fProcs(procs ? *procs : SkSerialProcs())
+ {}
+ ~MultiPictureDocument() override { this->close(); }
+
+ SkCanvas* onBeginPage(SkScalar w, SkScalar h) override {
+ fCurrentPageSize.set(w, h);
+ return fPictureRecorder.beginRecording(w, h);
+ }
+ void onEndPage() override {
+ fSizes.push_back(fCurrentPageSize);
+ fPages.push_back(fPictureRecorder.finishRecordingAsPicture());
+ }
+ void onClose(SkWStream* wStream) override {
+ SkASSERT(wStream);
+ SkASSERT(wStream->bytesWritten() == 0);
+ wStream->writeText(kMagic);
+ wStream->write32(kVersion);
+ wStream->write32(SkToU32(fPages.count()));
+ for (SkSize s : fSizes) {
+ wStream->write(&s, sizeof(s));
+ }
+ SkSize bigsize = join(fSizes);
+ SkCanvas* c = fPictureRecorder.beginRecording(SkRect::MakeSize(bigsize));
+ for (const sk_sp<SkPicture>& page : fPages) {
+ c->drawPicture(page);
+ // Annotations must include some data.
+ c->drawAnnotation(SkRect::MakeEmpty(), kEndPage, SkData::MakeWithCString("X"));
+ }
+ sk_sp<SkPicture> p = fPictureRecorder.finishRecordingAsPicture();
+ p->serialize(wStream, &fProcs);
+ fPages.reset();
+ fSizes.reset();
+ return;
+ }
+ void onAbort() override {
+ fPages.reset();
+ fSizes.reset();
+ }
+};
+}
+
+sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* wStream, const SkSerialProcs* procs) {
+ return sk_make_sp<MultiPictureDocument>(wStream, procs);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+int SkMultiPictureDocumentReadPageCount(SkStreamSeekable* stream) {
+ if (!stream) {
+ return 0;
+ }
+ stream->seek(0);
+ const size_t size = sizeof(kMagic) - 1;
+ char buffer[size];
+ if (size != stream->read(buffer, size) || 0 != memcmp(kMagic, buffer, size)) {
+ stream = nullptr;
+ return 0;
+ }
+ uint32_t versionNumber;
+ if (!stream->readU32(&versionNumber) || versionNumber != kVersion) {
+ return 0;
+ }
+ uint32_t pageCount;
+ if (!stream->readU32(&pageCount) || pageCount > INT_MAX) {
+ return 0;
+ }
+ // leave stream position right here.
+ return SkTo<int>(pageCount);
+}
+
+bool SkMultiPictureDocumentReadPageSizes(SkStreamSeekable* stream,
+ SkDocumentPage* dstArray,
+ int dstArrayCount) {
+ if (!dstArray || dstArrayCount < 1) {
+ return false;
+ }
+ int pageCount = SkMultiPictureDocumentReadPageCount(stream);
+ if (pageCount < 1 || pageCount != dstArrayCount) {
+ return false;
+ }
+ for (int i = 0; i < pageCount; ++i) {
+ SkSize& s = dstArray[i].fSize;
+ if (sizeof(s) != stream->read(&s, sizeof(s))) {
+ return false;
+ }
+ }
+ // leave stream position right here.
+ return true;
+}
+
+namespace {
+struct PagerCanvas : public SkNWayCanvas {
+ SkPictureRecorder fRecorder;
+ SkDocumentPage* fDst;
+ int fCount;
+ int fIndex = 0;
+ PagerCanvas(SkISize wh, SkDocumentPage* dst, int count)
+ : SkNWayCanvas(wh.width(), wh.height()), fDst(dst), fCount(count) {
+ this->nextCanvas();
+ }
+ void nextCanvas() {
+ if (fIndex < fCount) {
+ SkRect bounds = SkRect::MakeSize(fDst[fIndex].fSize);
+ this->addCanvas(fRecorder.beginRecording(bounds));
+ }
+ }
+ void onDrawAnnotation(const SkRect& r, const char* key, SkData* d) override {
+ if (0 == strcmp(key, kEndPage)) {
+ this->removeAll();
+ if (fIndex < fCount) {
+ fDst[fIndex].fPicture = fRecorder.finishRecordingAsPicture();
+ ++fIndex;
+ }
+ this->nextCanvas();
+ } else {
+ this->SkNWayCanvas::onDrawAnnotation(r, key, d);
+ }
+ }
+};
+} // namespace
+
+bool SkMultiPictureDocumentRead(SkStreamSeekable* stream,
+ SkDocumentPage* dstArray,
+ int dstArrayCount,
+ const SkDeserialProcs* procs) {
+ if (!SkMultiPictureDocumentReadPageSizes(stream, dstArray, dstArrayCount)) {
+ return false;
+ }
+ SkSize joined = {0.0f, 0.0f};
+ for (int i = 0; i < dstArrayCount; ++i) {
+ joined = SkSize{SkTMax(joined.width(), dstArray[i].fSize.width()),
+ SkTMax(joined.height(), dstArray[i].fSize.height())};
+ }
+
+ auto picture = SkPicture::MakeFromStream(stream, procs);
+
+ PagerCanvas canvas(joined.toCeil(), dstArray, dstArrayCount);
+ // Must call playback(), not drawPicture() to reach
+ // PagerCanvas::onDrawAnnotation().
+ picture->playback(&canvas);
+ if (canvas.fIndex != dstArrayCount) {
+ SkDEBUGF("Malformed SkMultiPictureDocument: canvas.fIndex=%d dstArrayCount=%d\n",
+ canvas.fIndex, dstArrayCount);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.h b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
new file mode 100644
index 0000000000..b6d7be8429
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDocument_DEFINED
+#define SkMultiPictureDocument_DEFINED
+
+#include "include/core/SkDocument.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkSize.h"
+
+struct SkDeserialProcs;
+struct SkSerialProcs;
+class SkStreamSeekable;
+
+/**
+ * Writes into a file format that is similar to SkPicture::serialize()
+ */
+SK_API sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* dst, const SkSerialProcs* = nullptr);
+
+struct SkDocumentPage {
+ sk_sp<SkPicture> fPicture;
+ SkSize fSize;
+};
+
+/**
+ * Returns the number of pages in the SkMultiPictureDocument.
+ */
+SK_API int SkMultiPictureDocumentReadPageCount(SkStreamSeekable* src);
+
+/**
+ * Read the SkMultiPictureDocument into the provided array of pages.
+ * dstArrayCount must equal SkMultiPictureDocumentReadPageCount().
+ * Return false on error.
+ */
+SK_API bool SkMultiPictureDocumentRead(SkStreamSeekable* src,
+ SkDocumentPage* dstArray,
+ int dstArrayCount,
+ const SkDeserialProcs* = nullptr);
+
+#endif // SkMultiPictureDocument_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
new file mode 100644
index 0000000000..a33bcd99c1
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDocumentPriv_DEFINED
+#define SkMultiPictureDocumentPriv_DEFINED
+
+#include "src/utils/SkMultiPictureDocument.h"
+
+/**
+ * Additional API allows one to read the array of page-sizes without parsing
+ * the entire file. Used by DM.
+ */
+bool SkMultiPictureDocumentReadPageSizes(SkStreamSeekable* src,
+ SkDocumentPage* dstArray,
+ int dstArrayCount);
+
+#endif // SkMultiPictureDocumentPriv_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkNWayCanvas.cpp b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
new file mode 100644
index 0000000000..1ce6318e5c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkNWayCanvas.h"
+#include "src/core/SkCanvasPriv.h"
+
+SkNWayCanvas::SkNWayCanvas(int width, int height) : INHERITED(width, height) {}
+
+SkNWayCanvas::~SkNWayCanvas() {
+ this->removeAll();
+}
+
+void SkNWayCanvas::addCanvas(SkCanvas* canvas) {
+ if (canvas) {
+ *fList.append() = canvas;
+ }
+}
+
+void SkNWayCanvas::removeCanvas(SkCanvas* canvas) {
+ int index = fList.find(canvas);
+ if (index >= 0) {
+ fList.removeShuffle(index);
+ }
+}
+
+void SkNWayCanvas::removeAll() {
+ fList.reset();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// These are forwarded to the N canvases we're referencing
+
+class SkNWayCanvas::Iter {
+public:
+ Iter(const SkTDArray<SkCanvas*>& list) : fList(list) {
+ fIndex = 0;
+ }
+ bool next() {
+ if (fIndex < fList.count()) {
+ fCanvas = fList[fIndex++];
+ return true;
+ }
+ return false;
+ }
+ SkCanvas* operator->() { return fCanvas; }
+ SkCanvas* get() const { return fCanvas; }
+
+private:
+ const SkTDArray<SkCanvas*>& fList;
+ int fIndex;
+ SkCanvas* fCanvas;
+};
+
+void SkNWayCanvas::willSave() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->save();
+ }
+
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkNWayCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->saveLayer(rec);
+ }
+
+ this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkNWayCanvas::onDoSaveBehind(const SkRect* bounds) {
+ Iter iter(fList);
+ while (iter.next()) {
+ SkCanvasPriv::SaveBehind(iter.get(), bounds);
+ }
+ this->INHERITED::onDoSaveBehind(bounds);
+ return false;
+}
+
+void SkNWayCanvas::willRestore() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->restore();
+ }
+ this->INHERITED::willRestore();
+}
+
+void SkNWayCanvas::didConcat(const SkMatrix& matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->concat(matrix);
+ }
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkNWayCanvas::didSetMatrix(const SkMatrix& matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->setMatrix(matrix);
+ }
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkNWayCanvas::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipPath(path, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRegion(deviceRgn, op);
+ }
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkNWayCanvas::onDrawPaint(const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPaint(paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBehind(const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ SkCanvasPriv::DrawBehind(iter.get(), paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPoints(mode, count, pts, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRect(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRegion(region, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawOval(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawArc(rect, startAngle, sweepAngle, useCenter, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRRect(rrect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawDRRect(outer, inner, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPath(path, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawBitmap(bitmap, x, y, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->legacy_drawBitmapRect(bitmap, src, dst, paint, (SrcRectConstraint)constraint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawBitmapNine(bitmap, center, dst, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawBitmapLattice(bitmap, lattice, dst, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImage(image, left, top, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->legacy_drawImageRect(image, src, dst, paint, constraint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImageNine(image, center, dst, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImageLattice(image, lattice, dst, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawTextBlob(blob, x, y, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPicture(picture, matrix, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawDrawable(drawable, matrix);
+ }
+}
+
+void SkNWayCanvas::onDrawVerticesObject(const SkVertices* vertices, const SkVertices::Bone bones[],
+ int boneCount, SkBlendMode bmode, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawVertices(vertices, bones, boneCount, bmode, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPatch(cubics, colors, texCoords, bmode, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawAtlas(const SkImage* image, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode bmode,
+ const SkRect* cull, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawAtlas(image, xform, tex, colors, count, bmode, cull, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->private_draw_shadow_rec(path, rec);
+ }
+}
+
+void SkNWayCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* data) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawAnnotation(rect, key, data);
+ }
+}
+
+void SkNWayCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->experimental_DrawEdgeAAQuad(rect, clip, aa, color, mode);
+ }
+}
+
+void SkNWayCanvas::onDrawEdgeAAImageSet(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->experimental_DrawEdgeAAImageSet(
+ set, count, dstClips, preViewMatrices, paint, constraint);
+ }
+}
+
+void SkNWayCanvas::onFlush() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->flush();
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkNullCanvas.cpp b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
new file mode 100644
index 0000000000..b01a4c42f7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkNullCanvas.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/utils/SkNWayCanvas.h"
+#include "src/core/SkMakeUnique.h"
+
+std::unique_ptr<SkCanvas> SkMakeNullCanvas() {
+ // An N-Way canvas forwards calls to N canvas's. When N == 0 it's
+ // effectively a null canvas.
+ return std::unique_ptr<SkCanvas>(new SkNWayCanvas(0, 0));
+}
diff --git a/gfx/skia/skia/src/utils/SkOSPath.cpp b/gfx/skia/skia/src/utils/SkOSPath.cpp
new file mode 100644
index 0000000000..293e6a41b5
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOSPath.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkOSPath.h"
+
+SkString SkOSPath::Join(const char *rootPath, const char *relativePath) {
+ SkString result(rootPath);
+ if (!result.endsWith(SEPARATOR) && !result.isEmpty()) {
+ result.appendUnichar(SEPARATOR);
+ }
+ result.append(relativePath);
+ return result;
+}
+
+SkString SkOSPath::Basename(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* filename = strrchr(fullPath, SEPARATOR);
+ if (nullptr == filename) {
+ filename = fullPath;
+ } else {
+ ++filename;
+ }
+ return SkString(filename);
+}
+
+SkString SkOSPath::Dirname(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* end = strrchr(fullPath, SEPARATOR);
+ if (nullptr == end) {
+ return SkString();
+ }
+ if (end == fullPath) {
+ SkASSERT(fullPath[0] == SEPARATOR);
+ ++end;
+ }
+ return SkString(fullPath, end - fullPath);
+}
diff --git a/gfx/skia/skia/src/utils/SkOSPath.h b/gfx/skia/skia/src/utils/SkOSPath.h
new file mode 100644
index 0000000000..e0f9ef31ad
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOSPath.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSPath_DEFINED
+#define SkOSPath_DEFINED
+
+#include "include/core/SkString.h"
+
+/**
+ * Functions for modifying SkStrings which represent paths on the filesystem.
+ */
+class SkOSPath {
+public:
+#ifdef _WIN32
+ const static char SEPARATOR = '\\';
+#else
+ const static char SEPARATOR = '/';
+#endif
+
+ /**
+ * Assembles rootPath and relativePath into a single path, like this:
+ * rootPath/relativePath.
+ * It is okay to call with a NULL rootPath and/or relativePath. A path
+ * separator will still be inserted.
+ *
+ * Uses SkPATH_SEPARATOR, to work on all platforms.
+ */
+ static SkString Join(const char* rootPath, const char* relativePath);
+
+ /**
+ * Return the name of the file, ignoring the directory structure.
+ * Behaves like python's os.path.basename. If the fullPath is
+ * /dir/subdir/, an empty string is returned.
+ * @param fullPath Full path to the file.
+ * @return SkString The basename of the file - anything beyond the
+ * final slash, or the full name if there is no slash.
+ */
+ static SkString Basename(const char* fullPath);
+
+ /**
+ * Given a qualified file name returns the directory.
+ * Behaves like python's os.path.dirname. If the fullPath is
+ * /dir/subdir/ the return will be /dir/subdir/
+ * @param fullPath Full path to the file.
+ * @return SkString The dir containing the file - anything preceding the
+ * final slash, or the full name if ending in a slash.
+ */
+ static SkString Dirname(const char* fullPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
new file mode 100644
index 0000000000..bc89faebdd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkPaintFilterCanvas.h"
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkSurface.h"
+#include "src/core/SkTLazy.h"
+
+class SkPaintFilterCanvas::AutoPaintFilter {
+public:
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, const SkPaint* paint)
+ : fPaint(paint ? *paint : SkPaint()) {
+ fShouldDraw = canvas->onFilter(fPaint);
+ }
+
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, const SkPaint& paint)
+ : AutoPaintFilter(canvas, &paint) { }
+
+ const SkPaint& paint() const { return fPaint; }
+
+ bool shouldDraw() const { return fShouldDraw; }
+
+private:
+ SkPaint fPaint;
+ bool fShouldDraw;
+};
+
+SkPaintFilterCanvas::SkPaintFilterCanvas(SkCanvas *canvas)
+ : SkCanvasVirtualEnforcer<SkNWayCanvas>(canvas->imageInfo().width(),
+ canvas->imageInfo().height()) {
+
+ // Transfer matrix & clip state before adding the target canvas.
+ this->clipRect(SkRect::Make(canvas->getDeviceClipBounds()));
+ this->setMatrix(canvas->getTotalMatrix());
+
+ this->addCanvas(canvas);
+}
+
+void SkPaintFilterCanvas::onDrawPaint(const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPaint(apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBehind(const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBehind(apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPoints(mode, count, pts, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRect(rect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRRect(rrect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawDRRect(outer, inner, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRegion(region, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawOval(rect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawArc(rect, startAngle, sweepAngle, useCenter, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPath(path, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmap(const SkBitmap& bm, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBitmap(bm, left, top, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmapRect(const SkBitmap& bm, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBitmapRect(bm, src, dst, &apf.paint(), constraint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmapNine(const SkBitmap& bm, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBitmapNine(bm, center, dst, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBitmapLattice(bitmap, lattice, dst, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImage(image, left, top, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageRect(const SkImage* image, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImageRect(image, src, dst, &apf.paint(), constraint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImageNine(image, center, dst, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImageLattice(image, lattice, dst, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawVerticesObject(const SkVertices* vertices,
+ const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode bmode, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawVerticesObject(vertices, bones, boneCount, bmode, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPatch(const SkPoint cubics[], const SkColor colors[],
+ const SkPoint texCoords[], SkBlendMode bmode,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPatch(cubics, colors, texCoords, bmode, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* m,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPicture(picture, m, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // There is no paint to filter in this case, but we can still filter on type.
+ // Subclasses need to unroll the drawable explicity (by overriding this method) in
+ // order to actually filter nested content.
+ AutoPaintFilter apf(this, nullptr);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawDrawable(drawable, matrix);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawTextBlob(blob, x, y, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawAtlas(const SkImage* image, const SkRSXform xform[],
+ const SkRect tex[], const SkColor colors[], int count,
+ SkBlendMode bmode, const SkRect* cull, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawAtlas(image, xform, tex, colors, count, bmode, cull, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ this->SkNWayCanvas::onDrawAnnotation(rect, key, value);
+}
+
+void SkPaintFilterCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ this->SkNWayCanvas::onDrawShadowRec(path, rec);
+}
+
+void SkPaintFilterCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor(color);
+ paint.setBlendMode(mode);
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawEdgeAAQuad(rect, clip, aa, apf.paint().getColor4f(),
+ apf.paint().getBlendMode());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawEdgeAAImageSet(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawEdgeAAImageSet(
+ set, count, dstClips, preViewMatrices, &apf.paint(), constraint);
+ }
+}
+
+sk_sp<SkSurface> SkPaintFilterCanvas::onNewSurface(const SkImageInfo& info,
+ const SkSurfaceProps& props) {
+ return proxy()->makeSurface(info, &props);
+}
+
+bool SkPaintFilterCanvas::onPeekPixels(SkPixmap* pixmap) {
+ return proxy()->peekPixels(pixmap);
+}
+
+bool SkPaintFilterCanvas::onAccessTopLayerPixels(SkPixmap* pixmap) {
+ SkImageInfo info;
+ size_t rowBytes;
+
+ void* addr = proxy()->accessTopLayerPixels(&info, &rowBytes);
+ if (!addr) {
+ return false;
+ }
+
+ pixmap->reset(info, addr, rowBytes);
+ return true;
+}
+
+SkImageInfo SkPaintFilterCanvas::onImageInfo() const {
+ return proxy()->imageInfo();
+}
+
+bool SkPaintFilterCanvas::onGetProps(SkSurfaceProps* props) const {
+ return proxy()->getProps(props);
+}
diff --git a/gfx/skia/skia/src/utils/SkParse.cpp b/gfx/skia/skia/src/utils/SkParse.cpp
new file mode 100644
index 0000000000..4cbdaa5822
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParse.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/utils/SkParse.h"
+
+#include <stdlib.h>
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c)
+{
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c)
+{
+ return is_ws(c) || c == ',' || c == ';';
+}
+
+static int to_hex(int c)
+{
+ if (is_digit(c))
+ return c - '0';
+
+ c |= 0x20; // make us lower-case
+ if (is_between(c, 'a', 'f'))
+ return c + 10 - 'a';
+ else
+ return -1;
+}
+
+static inline bool is_hex(int c)
+{
+ return to_hex(c) >= 0;
+}
+
+static const char* skip_ws(const char str[])
+{
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[])
+{
+ SkASSERT(str);
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+int SkParse::Count(const char str[])
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c) == false);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c));
+ } while (true);
+goHome:
+ return count;
+}
+
+int SkParse::Count(const char str[], char separator)
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c != separator);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c == separator);
+ } while (true);
+goHome:
+ return count;
+}
+
+const char* SkParse::FindHex(const char str[], uint32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ if (!is_hex(*str))
+ return nullptr;
+
+ uint32_t n = 0;
+ int max_digits = 8;
+ int digit;
+
+ while ((digit = to_hex(*str)) >= 0)
+ {
+ if (--max_digits < 0)
+ return nullptr;
+ n = (n << 4) | digit;
+ str += 1;
+ }
+
+ if (*str == 0 || is_ws(*str))
+ {
+ if (value)
+ *value = n;
+ return str;
+ }
+ return nullptr;
+}
+
+const char* SkParse::FindS32(const char str[], int32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 0;
+ if (*str == '-')
+ {
+ sign = -1;
+ str += 1;
+ }
+
+ if (!is_digit(*str))
+ return nullptr;
+
+ int n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ }
+ if (value)
+ *value = (n ^ sign) - sign;
+ return str;
+}
+
+const char* SkParse::FindMSec(const char str[], SkMSec* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 0;
+ if (*str == '-')
+ {
+ sign = -1;
+ str += 1;
+ }
+
+ if (!is_digit(*str))
+ return nullptr;
+
+ int n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ }
+ int remaining10s = 3;
+ if (*str == '.') {
+ str++;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ if (--remaining10s == 0)
+ break;
+ }
+ }
+ while (--remaining10s >= 0)
+ n *= 10;
+ if (value)
+ *value = (n ^ sign) - sign;
+ return str;
+}
+
+const char* SkParse::FindScalar(const char str[], SkScalar* value) {
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ char* stop;
+ float v = (float)strtod(str, &stop);
+ if (str == stop) {
+ return nullptr;
+ }
+ if (value) {
+ *value = v;
+ }
+ return stop;
+}
+
+const char* SkParse::FindScalars(const char str[], SkScalar value[], int count)
+{
+ SkASSERT(count >= 0);
+
+ if (count > 0)
+ {
+ for (;;)
+ {
+ str = SkParse::FindScalar(str, value);
+ if (--count == 0 || str == nullptr)
+ break;
+
+ // keep going
+ str = skip_sep(str);
+ if (value)
+ value += 1;
+ }
+ }
+ return str;
+}
+
+static bool lookup_str(const char str[], const char** table, int count)
+{
+ while (--count >= 0)
+ if (!strcmp(str, table[count]))
+ return true;
+ return false;
+}
+
+bool SkParse::FindBool(const char str[], bool* value)
+{
+ static const char* gYes[] = { "yes", "1", "true" };
+ static const char* gNo[] = { "no", "0", "false" };
+
+ if (lookup_str(str, gYes, SK_ARRAY_COUNT(gYes)))
+ {
+ if (value) *value = true;
+ return true;
+ }
+ else if (lookup_str(str, gNo, SK_ARRAY_COUNT(gNo)))
+ {
+ if (value) *value = false;
+ return true;
+ }
+ return false;
+}
+
+int SkParse::FindList(const char target[], const char list[])
+{
+ size_t len = strlen(target);
+ int index = 0;
+
+ for (;;)
+ {
+ const char* end = strchr(list, ',');
+ size_t entryLen;
+
+ if (end == nullptr) // last entry
+ entryLen = strlen(list);
+ else
+ entryLen = end - list;
+
+ if (entryLen == len && memcmp(target, list, len) == 0)
+ return index;
+ if (end == nullptr)
+ break;
+
+ list = end + 1; // skip the ','
+ index += 1;
+ }
+ return -1;
+}
diff --git a/gfx/skia/skia/src/utils/SkParseColor.cpp b/gfx/skia/skia/src/utils/SkParseColor.cpp
new file mode 100644
index 0000000000..ba7c51048f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParseColor.cpp
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/utils/SkParse.h"
+
+static const unsigned int gColorNames[] = {
+0x85891945, 0x32a50000, 0x00f0f8ff, // aliceblue
+0x85d44c6b, 0x16e84d0a, 0x00faebd7, // antiquewhite
+0x86350800, 0x0000ffff, // aqua
+0x86350b43, 0x492e2800, 0x007fffd4, // aquamarine
+0x87559140, 0x00f0ffff, // azure
+0x88a93940, 0x00f5f5dc, // beige
+0x89338d4a, 0x00ffe4c4, // bisque
+0x89811ac0, 0x00000000, // black
+0x898170d1, 0x1481635f, 0x38800000, 0x00ffebcd, // blanchedalmond
+0x89952800, 0x000000ff, // blue
+0x89952d93, 0x3d85a000, 0x008a2be2, // blueviolet
+0x8a4fbb80, 0x00a52a2a, // brown
+0x8ab2666f, 0x3de40000, 0x00deb887, // burlywood
+0x8c242d05, 0x32a50000, 0x005f9ea0, // cadetblue
+0x8d019525, 0x16b32800, 0x007fff00, // chartreuse
+0x8d0f1bd9, 0x06850000, 0x00d2691e, // chocolate
+0x8df20b00, 0x00ff7f50, // coral
+0x8df27199, 0x3ee59099, 0x54a00000, 0x006495ed, // cornflowerblue
+0x8df274d3, 0x31600000, 0x00fff8dc, // cornsilk
+0x8e496cdf, 0x38000000, 0x00dc143c, // crimson
+0x8f217000, 0x0000ffff, // cyan
+0x90325899, 0x54a00000, 0x0000008b, // darkblue
+0x903258f3, 0x05c00000, 0x00008b8b, // darkcyan
+0x903259df, 0x3085749f, 0x10000000, 0x00b8860b, // darkgoldenrod
+0x903259e5, 0x07200000, 0x00a9a9a9, // darkgray
+0x903259e5, 0x14ae0000, 0x00006400, // darkgreen
+0x90325ad1, 0x05690000, 0x00bdb76b, // darkkhaki
+0x90325b43, 0x1caea040, 0x008b008b, // darkmagenta
+0x90325bd9, 0x26c53c8b, 0x15c00000, 0x00556b2f, // darkolivegreen
+0x90325be5, 0x05c72800, 0x00ff8c00, // darkorange
+0x90325be5, 0x0d092000, 0x009932cc, // darkorchid
+0x90325c8b, 0x10000000, 0x008b0000, // darkred
+0x90325cc3, 0x31af7000, 0x00e9967a, // darksalmon
+0x90325ccb, 0x04f2295c, 0x008fbc8f, // darkseagreen
+0x90325cd9, 0x0685132b, 0x14000000, 0x00483d8b, // darkslateblue
+0x90325cd9, 0x06853c83, 0x64000000, 0x002f4f4f, // darkslategray
+0x90325d2b, 0x4a357a67, 0x14000000, 0x0000ced1, // darkturquoise
+0x90325d93, 0x3d85a000, 0x009400d3, // darkviolet
+0x90a58413, 0x39600000, 0x00ff1493, // deeppink
+0x90a584d7, 0x644ca940, 0x0000bfff, // deepskyblue
+0x912d3c83, 0x64000000, 0x00696969, // dimgray
+0x91e43965, 0x09952800, 0x001e90ff, // dodgerblue
+0x993228a5, 0x246b0000, 0x00b22222, // firebrick
+0x998f9059, 0x5d09a140, 0x00fffaf0, // floralwhite
+0x99f22ce9, 0x1e452b80, 0x00228b22, // forestgreen
+0x9aa344d3, 0x04000000, 0x00ff00ff, // fuchsia
+0x9c2974c5, 0x3e4f0000, 0x00dcdcdc, // gainsboro
+0x9d0f9d2f, 0x21342800, 0x00f8f8ff, // ghostwhite
+0x9dec2000, 0x00ffd700, // gold
+0x9dec215d, 0x49e40000, 0x00daa520, // goldenrod
+0x9e41c800, 0x00808080, // gray
+0x9e452b80, 0x00008000, // green
+0x9e452bb3, 0x158c7dc0, 0x00adff2f, // greenyellow
+0xa1ee2e49, 0x16e00000, 0x00f0fff0, // honeydew
+0xa1f4825d, 0x2c000000, 0x00ff69b4, // hotpink
+0xa5c4485d, 0x48a40000, 0x00cd5c5c, // indianred
+0xa5c449de, 0x004b0082, // indigo
+0xa6cf9640, 0x00fffff0, // ivory
+0xad015a40, 0x00f0e68c, // khaki
+0xb0362b89, 0x16400000, 0x00e6e6fa, // lavender
+0xb0362b89, 0x16426567, 0x20000000, 0x00fff0f5, // lavenderblush
+0xb03771e5, 0x14ae0000, 0x007cfc00, // lawngreen
+0xb0ad7b87, 0x212633dc, 0x00fffacd, // lemonchiffon
+0xb1274505, 0x32a50000, 0x00add8e6, // lightblue
+0xb1274507, 0x3e416000, 0x00f08080, // lightcoral
+0xb1274507, 0x642e0000, 0x00e0ffff, // lightcyan
+0xb127450f, 0x3d842ba5, 0x3c992b19, 0x3ee00000, 0x00fafad2, // lightgoldenrodyellow
+0xb127450f, 0x48a57000, 0x0090ee90, // lightgreen
+0xb127450f, 0x48b90000, 0x00d3d3d3, // lightgrey
+0xb1274521, 0x25cb0000, 0x00ffb6c1, // lightpink
+0xb1274527, 0x058d7b80, 0x00ffa07a, // lightsalmon
+0xb1274527, 0x1427914b, 0x38000000, 0x0020b2aa, // lightseagreen
+0xb1274527, 0x2f22654a, 0x0087cefa, // lightskyblue
+0xb1274527, 0x303429e5, 0x07200000, 0x00778899, // lightslategray
+0xb1274527, 0x50a56099, 0x54a00000, 0x00b0c4de, // lightsteelblue
+0xb1274533, 0x158c7dc0, 0x00ffffe0, // lightyellow
+0xb12d2800, 0x0000ff00, // lime
+0xb12d29e5, 0x14ae0000, 0x0032cd32, // limegreen
+0xb12e2b80, 0x00faf0e6, // linen
+0xb4272ba9, 0x04000000, 0x00ff00ff, // magenta
+0xb4327bdc, 0x00800000, // maroon
+0xb4a44d5b, 0x06350b43, 0x492e2800, 0x0066cdaa, // mediumaquamarine
+0xb4a44d5b, 0x09952800, 0x000000cd, // mediumblue
+0xb4a44d5b, 0x3e434248, 0x00ba55d3, // mediumorchid
+0xb4a44d5b, 0x42b2830a, 0x009370db, // mediumpurple
+0xb4a44d5b, 0x4ca13c8b, 0x15c00000, 0x003cb371, // mediumseagreen
+0xb4a44d5b, 0x4d81a145, 0x32a50000, 0x007b68ee, // mediumslateblue
+0xb4a44d5b, 0x4e124b8f, 0x1e452b80, 0x0000fa9a, // mediumspringgreen
+0xb4a44d5b, 0x52b28d5f, 0x26650000, 0x0048d1cc, // mediumturquoise
+0xb4a44d5b, 0x592f6169, 0x48a40000, 0x00c71585, // mediumvioletred
+0xb524724f, 0x2282654a, 0x00191970, // midnightblue
+0xb52ea0e5, 0x142d0000, 0x00f5fffa, // mintcream
+0xb533a665, 0x3e650000, 0x00ffe4e1, // mistyrose
+0xb5e31867, 0x25c00000, 0x00ffe4b5, // moccasin
+0xb8360a9f, 0x5d09a140, 0x00ffdead, // navajowhite
+0xb836c800, 0x00000080, // navy
+0xbd846047, 0x14000000, 0x00fdf5e6, // oldlace
+0xbd89b140, 0x00808000, // olive
+0xbd89b149, 0x48220000, 0x006b8e23, // olivedrab
+0xbe4171ca, 0x00ffa500, // orange
+0xbe4171cb, 0x48a40000, 0x00ff4500, // orangered
+0xbe434248, 0x00da70d6, // orchid
+0xc02c29df, 0x3085749f, 0x10000000, 0x00eee8aa, // palegoldenrod
+0xc02c29e5, 0x14ae0000, 0x0098fb98, // palegreen
+0xc02c2d2b, 0x4a357a67, 0x14000000, 0x00afeeee, // paleturquoise
+0xc02c2d93, 0x3d85a48b, 0x10000000, 0x00db7093, // palevioletred
+0xc0300e43, 0x5d098000, 0x00ffefd5, // papayawhip
+0xc0a11a21, 0x54c60000, 0x00ffdab9, // peachpuff
+0xc0b2a800, 0x00cd853f, // peru
+0xc12e5800, 0x00ffc0cb, // pink
+0xc1956800, 0x00dda0dd, // plum
+0xc1f72165, 0x09952800, 0x00b0e0e6, // powderblue
+0xc2b2830a, 0x00800080, // purple
+0xc8a40000, 0x00ff0000, // red
+0xc9f3c8a5, 0x3eee0000, 0x00bc8f8f, // rosybrown
+0xc9f90b05, 0x32a50000, 0x004169e1, // royalblue
+0xcc24230b, 0x0a4fbb80, 0x008b4513, // saddlebrown
+0xcc2c6bdc, 0x00fa8072, // salmon
+0xcc2e2645, 0x49f77000, 0x00f4a460, // sandybrown
+0xcca13c8b, 0x15c00000, 0x002e8b57, // seagreen
+0xcca19a0b, 0x31800000, 0x00fff5ee, // seashell
+0xcd257382, 0x00a0522d, // sienna
+0xcd2cb164, 0x00c0c0c0, // silver
+0xcd79132b, 0x14000000, 0x0087ceeb, // skyblue
+0xcd81a145, 0x32a50000, 0x006a5acd, // slateblue
+0xcd81a14f, 0x48390000, 0x00708090, // slategray
+0xcdcfb800, 0x00fffafa, // snow
+0xce124b8f, 0x1e452b80, 0x0000ff7f, // springgreen
+0xce852b05, 0x32a50000, 0x004682b4, // steelblue
+0xd02e0000, 0x00d2b48c, // tan
+0xd0a16000, 0x00008080, // teal
+0xd1099d19, 0x14000000, 0x00d8bfd8, // thistle
+0xd1ed0d1e, 0x00ff6347, // tomato
+0xd2b28d5f, 0x26650000, 0x0040e0d0, // turquoise
+0xd92f6168, 0x00ee82ee, // violet
+0xdd050d00, 0x00f5deb3, // wheat
+0xdd09a140, 0x00ffffff, // white
+0xdd09a167, 0x35eb2800, 0x00f5f5f5, // whitesmoke
+0xe4ac63ee, 0x00ffff00, // yellow
+0xe4ac63ef, 0x1e452b80, 0x009acd32 // yellowgreen
+}; // original = 2505 : replacement = 1616
+
+
+const char* SkParse::FindNamedColor(const char* name, size_t len, SkColor* color) {
+ const char* namePtr = name;
+ unsigned int sixMatches[4];
+ unsigned int* sixMatchPtr = sixMatches;
+ bool first = true;
+ bool last = false;
+ char ch;
+ do {
+ unsigned int sixMatch = 0;
+ for (int chIndex = 0; chIndex < 6; chIndex++) {
+ sixMatch <<= 5;
+ ch = *namePtr | 0x20;
+ if (ch < 'a' || ch > 'z')
+ ch = 0;
+ else {
+ ch = ch - 'a' + 1;
+ namePtr++;
+ }
+ sixMatch |= ch ; // turn 'A' (0x41) into 'a' (0x61);
+ }
+ sixMatch <<= 1;
+ sixMatch |= 1;
+ if (first) {
+ sixMatch |= 0x80000000;
+ first = false;
+ }
+ ch = *namePtr | 0x20;
+ last = ch < 'a' || ch > 'z';
+ if (last)
+ sixMatch &= ~1;
+ len -= 6;
+ *sixMatchPtr++ = sixMatch;
+ } while (last == false && len > 0);
+ const int colorNameSize = sizeof(gColorNames) / sizeof(unsigned int);
+ int lo = 0;
+ int hi = colorNameSize - 3; // back off to beginning of yellowgreen
+ while (lo <= hi) {
+ int mid = (hi + lo) >> 1;
+ while ((int) gColorNames[mid] >= 0)
+ --mid;
+ sixMatchPtr = sixMatches;
+ while (gColorNames[mid] == *sixMatchPtr) {
+ ++mid;
+ if ((*sixMatchPtr & 1) == 0) { // last
+ *color = gColorNames[mid] | 0xFF000000;
+ return namePtr;
+ }
+ ++sixMatchPtr;
+ }
+ int sixMask = *sixMatchPtr & ~0x80000000;
+ int midMask = gColorNames[mid] & ~0x80000000;
+ if (sixMask > midMask) {
+ lo = mid + 2; // skip color
+ while ((int) gColorNames[lo] >= 0)
+ ++lo;
+ } else if (hi == mid)
+ return nullptr;
+ else
+ hi = mid;
+ }
+ return nullptr;
+}
+
+// !!! move to char utilities
+//static int count_separators(const char* str, const char* sep) {
+// char c;
+// int separators = 0;
+// while ((c = *str++) != '\0') {
+// if (strchr(sep, c) == nullptr)
+// continue;
+// do {
+// if ((c = *str++) == '\0')
+// goto goHome;
+// } while (strchr(sep, c) != nullptr);
+// separators++;
+// }
+//goHome:
+// return separators;
+//}
+
+static inline unsigned nib2byte(unsigned n)
+{
+ SkASSERT((n & ~0xF) == 0);
+ return (n << 4) | n;
+}
+
+const char* SkParse::FindColor(const char* value, SkColor* colorPtr) {
+ unsigned int oldAlpha = SkColorGetA(*colorPtr);
+ if (value[0] == '#') {
+ uint32_t hex;
+ const char* end = SkParse::FindHex(value + 1, &hex);
+// SkASSERT(end);
+ if (end == nullptr)
+ return end;
+ size_t len = end - value - 1;
+ if (len == 3 || len == 4) {
+ unsigned a = len == 4 ? nib2byte(hex >> 12) : oldAlpha;
+ unsigned r = nib2byte((hex >> 8) & 0xF);
+ unsigned g = nib2byte((hex >> 4) & 0xF);
+ unsigned b = nib2byte(hex & 0xF);
+ *colorPtr = SkColorSetARGB(a, r, g, b);
+ return end;
+ } else if (len == 6 || len == 8) {
+ if (len == 6)
+ hex |= oldAlpha << 24;
+ *colorPtr = hex;
+ return end;
+ } else {
+// SkASSERT(0);
+ return nullptr;
+ }
+// } else if (strchr(value, ',')) {
+// SkScalar array[4];
+// int count = count_separators(value, ",") + 1; // !!! count commas, add 1
+// SkASSERT(count == 3 || count == 4);
+// array[0] = SK_Scalar1 * 255;
+// const char* end = SkParse::FindScalars(value, &array[4 - count], count);
+// if (end == nullptr)
+// return nullptr;
+ // !!! range check for errors?
+// *colorPtr = SkColorSetARGB(SkScalarRoundToInt(array[0]), SkScalarRoundToInt(array[1]),
+// SkScalarRoundToInt(array[2]), SkScalarRoundToInt(array[3]));
+// return end;
+ } else
+ return FindNamedColor(value, strlen(value), colorPtr);
+}
diff --git a/gfx/skia/skia/src/utils/SkParsePath.cpp b/gfx/skia/skia/src/utils/SkParsePath.cpp
new file mode 100644
index 0000000000..8c9469b98f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParsePath.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/utils/SkParse.h"
+#include "include/utils/SkParsePath.h"
+
+static inline bool is_between(int c, int min, int max) {
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c) {
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c) {
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c) {
+ return is_ws(c) || c == ',';
+}
+
+static inline bool is_lower(int c) {
+ return is_between(c, 'a', 'z');
+}
+
+static inline int to_upper(int c) {
+ return c - 'a' + 'A';
+}
+
+static const char* skip_ws(const char str[]) {
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[]) {
+ if (!str) {
+ return nullptr;
+ }
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+static const char* find_points(const char str[], SkPoint value[], int count,
+ bool isRelative, SkPoint* relative) {
+ str = SkParse::FindScalars(str, &value[0].fX, count * 2);
+ if (isRelative) {
+ for (int index = 0; index < count; index++) {
+ value[index].fX += relative->fX;
+ value[index].fY += relative->fY;
+ }
+ }
+ return str;
+}
+
+static const char* find_scalar(const char str[], SkScalar* value,
+ bool isRelative, SkScalar relative) {
+ str = SkParse::FindScalar(str, value);
+ if (!str) {
+ return nullptr;
+ }
+ if (isRelative) {
+ *value += relative;
+ }
+ str = skip_sep(str);
+ return str;
+}
+
+bool SkParsePath::FromSVGString(const char data[], SkPath* result) {
+ SkPath path;
+ SkPoint first = {0, 0};
+ SkPoint c = {0, 0};
+ SkPoint lastc = {0, 0};
+ SkPoint points[3];
+ char op = '\0';
+ char previousOp = '\0';
+ bool relative = false;
+ for (;;) {
+ if (!data) {
+ // Truncated data
+ return false;
+ }
+ data = skip_ws(data);
+ if (data[0] == '\0') {
+ break;
+ }
+ char ch = data[0];
+ if (is_digit(ch) || ch == '-' || ch == '+' || ch == '.') {
+ if (op == '\0') {
+ return false;
+ }
+ } else if (is_sep(ch)) {
+ data = skip_sep(data);
+ } else {
+ op = ch;
+ relative = false;
+ if (is_lower(op)) {
+ op = (char) to_upper(op);
+ relative = true;
+ }
+ data++;
+ data = skip_sep(data);
+ }
+ switch (op) {
+ case 'M':
+ data = find_points(data, points, 1, relative, &c);
+ path.moveTo(points[0]);
+ previousOp = '\0';
+ op = 'L';
+ c = points[0];
+ break;
+ case 'L':
+ data = find_points(data, points, 1, relative, &c);
+ path.lineTo(points[0]);
+ c = points[0];
+ break;
+ case 'H': {
+ SkScalar x;
+ data = find_scalar(data, &x, relative, c.fX);
+ path.lineTo(x, c.fY);
+ c.fX = x;
+ } break;
+ case 'V': {
+ SkScalar y;
+ data = find_scalar(data, &y, relative, c.fY);
+ path.lineTo(c.fX, y);
+ c.fY = y;
+ } break;
+ case 'C':
+ data = find_points(data, points, 3, relative, &c);
+ goto cubicCommon;
+ case 'S':
+ data = find_points(data, &points[1], 2, relative, &c);
+ points[0] = c;
+ if (previousOp == 'C' || previousOp == 'S') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ cubicCommon:
+ path.cubicTo(points[0], points[1], points[2]);
+ lastc = points[1];
+ c = points[2];
+ break;
+ case 'Q': // Quadratic Bezier Curve
+ data = find_points(data, points, 2, relative, &c);
+ goto quadraticCommon;
+ case 'T':
+ data = find_points(data, &points[1], 1, relative, &c);
+ points[0] = c;
+ if (previousOp == 'Q' || previousOp == 'T') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ quadraticCommon:
+ path.quadTo(points[0], points[1]);
+ lastc = points[0];
+ c = points[1];
+ break;
+ case 'A': {
+ SkPoint radii;
+ SkScalar angle, largeArc, sweep;
+ if ((data = find_points(data, &radii, 1, false, nullptr))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &angle, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &largeArc, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &sweep, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_points(data, &points[0], 1, relative, &c))) {
+ path.arcTo(radii, angle, (SkPath::ArcSize) SkToBool(largeArc),
+ (SkPath::Direction) !SkToBool(sweep), points[0]);
+ path.getLastPt(&c);
+ }
+ } break;
+ case 'Z':
+ path.close();
+ c = first;
+ break;
+ case '~': {
+ SkPoint args[2];
+ data = find_points(data, args, 2, false, nullptr);
+ path.moveTo(args[0].fX, args[0].fY);
+ path.lineTo(args[1].fX, args[1].fY);
+ } break;
+ default:
+ return false;
+ }
+ if (previousOp == 0) {
+ first = c;
+ }
+ previousOp = op;
+ }
+ // we're good, go ahead and swap in the result
+ result->swap(path);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "src/core/SkGeometry.h"
+
+static void write_scalar(SkWStream* stream, SkScalar value) {
+ char buffer[64];
+#ifdef SK_BUILD_FOR_WIN
+ int len = _snprintf(buffer, sizeof(buffer), "%g", value);
+#else
+ int len = snprintf(buffer, sizeof(buffer), "%g", value);
+#endif
+ char* stop = buffer + len;
+ stream->write(buffer, stop - buffer);
+}
+
+static void append_scalars(SkWStream* stream, char verb, const SkScalar data[],
+ int count) {
+ stream->write(&verb, 1);
+ write_scalar(stream, data[0]);
+ for (int i = 1; i < count; i++) {
+ stream->write(" ", 1);
+ write_scalar(stream, data[i]);
+ }
+}
+
+void SkParsePath::ToSVGString(const SkPath& path, SkString* str) {
+ SkDynamicMemoryWStream stream;
+
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+
+ for (;;) {
+ switch (iter.next(pts)) {
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 1024; // how close to a quad
+ SkAutoConicToQuads quadder;
+ const SkPoint* quadPts = quadder.computeQuads(pts, iter.conicWeight(), tol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ append_scalars(&stream, 'Q', &quadPts[i*2 + 1].fX, 4);
+ }
+ } break;
+ case SkPath::kMove_Verb:
+ append_scalars(&stream, 'M', &pts[0].fX, 2);
+ break;
+ case SkPath::kLine_Verb:
+ append_scalars(&stream, 'L', &pts[1].fX, 2);
+ break;
+ case SkPath::kQuad_Verb:
+ append_scalars(&stream, 'Q', &pts[1].fX, 4);
+ break;
+ case SkPath::kCubic_Verb:
+ append_scalars(&stream, 'C', &pts[1].fX, 6);
+ break;
+ case SkPath::kClose_Verb:
+ stream.write("Z", 1);
+ break;
+ case SkPath::kDone_Verb:
+ str->resize(stream.bytesWritten());
+ stream.copyTo(str->writable_str());
+ return;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.cpp b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
new file mode 100644
index 0000000000..93873318b9
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkPatchUtils.h"
+
+#include "include/private/SkColorData.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkGeometry.h"
+
+namespace {
+ enum CubicCtrlPts {
+ kTopP0_CubicCtrlPts = 0,
+ kTopP1_CubicCtrlPts = 1,
+ kTopP2_CubicCtrlPts = 2,
+ kTopP3_CubicCtrlPts = 3,
+
+ kRightP0_CubicCtrlPts = 3,
+ kRightP1_CubicCtrlPts = 4,
+ kRightP2_CubicCtrlPts = 5,
+ kRightP3_CubicCtrlPts = 6,
+
+ kBottomP0_CubicCtrlPts = 9,
+ kBottomP1_CubicCtrlPts = 8,
+ kBottomP2_CubicCtrlPts = 7,
+ kBottomP3_CubicCtrlPts = 6,
+
+ kLeftP0_CubicCtrlPts = 0,
+ kLeftP1_CubicCtrlPts = 11,
+ kLeftP2_CubicCtrlPts = 10,
+ kLeftP3_CubicCtrlPts = 9,
+ };
+
+ // Enum for corner also clockwise.
+ enum Corner {
+ kTopLeft_Corner = 0,
+ kTopRight_Corner,
+ kBottomRight_Corner,
+ kBottomLeft_Corner
+ };
+}
+
+/**
+ * Evaluator to sample the values of a cubic bezier using forward differences.
+ * Forward differences is a method for evaluating a nth degree polynomial at a uniform step by only
+ * adding precalculated values.
+ * For a linear example we have the function f(t) = m*t+b, then the value of that function at t+h
+ * would be f(t+h) = m*(t+h)+b. If we want to know the uniform step that we must add to the first
+ * evaluation f(t) then we need to substract f(t+h) - f(t) = m*t + m*h + b - m*t + b = mh. After
+ * obtaining this value (mh) we could just add this constant step to our first sampled point
+ * to compute the next one.
+ *
+ * For the cubic case the first difference gives as a result a quadratic polynomial to which we can
+ * apply again forward differences and get linear function to which we can apply again forward
+ * differences to get a constant difference. This is why we keep an array of size 4, the 0th
+ * position keeps the sampled value while the next ones keep the quadratic, linear and constant
+ * difference values.
+ */
+
+class FwDCubicEvaluator {
+
+public:
+
+ /**
+ * Receives the 4 control points of the cubic bezier.
+ */
+
+ explicit FwDCubicEvaluator(const SkPoint points[4])
+ : fCoefs(points) {
+ memcpy(fPoints, points, 4 * sizeof(SkPoint));
+
+ this->restart(1);
+ }
+
+ /**
+ * Restarts the forward differences evaluator to the first value of t = 0.
+ */
+ void restart(int divisions) {
+ fDivisions = divisions;
+ fCurrent = 0;
+ fMax = fDivisions + 1;
+ Sk2s h = Sk2s(1.f / fDivisions);
+ Sk2s h2 = h * h;
+ Sk2s h3 = h2 * h;
+ Sk2s fwDiff3 = Sk2s(6) * fCoefs.fA * h3;
+ fFwDiff[3] = to_point(fwDiff3);
+ fFwDiff[2] = to_point(fwDiff3 + times_2(fCoefs.fB) * h2);
+ fFwDiff[1] = to_point(fCoefs.fA * h3 + fCoefs.fB * h2 + fCoefs.fC * h);
+ fFwDiff[0] = to_point(fCoefs.fD);
+ }
+
+ /**
+ * Check if the evaluator is still within the range of 0<=t<=1
+ */
+ bool done() const {
+ return fCurrent > fMax;
+ }
+
+ /**
+ * Call next to obtain the SkPoint sampled and move to the next one.
+ */
+ SkPoint next() {
+ SkPoint point = fFwDiff[0];
+ fFwDiff[0] += fFwDiff[1];
+ fFwDiff[1] += fFwDiff[2];
+ fFwDiff[2] += fFwDiff[3];
+ fCurrent++;
+ return point;
+ }
+
+ const SkPoint* getCtrlPoints() const {
+ return fPoints;
+ }
+
+private:
+ SkCubicCoeff fCoefs;
+ int fMax, fCurrent, fDivisions;
+ SkPoint fFwDiff[4], fPoints[4];
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+// size in pixels of each partition per axis, adjust this knob
+static const int kPartitionSize = 10;
+
+/**
+ * Calculate the approximate arc length given a bezier curve's control points.
+ * Returns -1 if bad calc (i.e. non-finite)
+ */
+static SkScalar approx_arc_length(const SkPoint points[], int count) {
+ if (count < 2) {
+ return 0;
+ }
+ SkScalar arcLength = 0;
+ for (int i = 0; i < count - 1; i++) {
+ arcLength += SkPoint::Distance(points[i], points[i + 1]);
+ }
+ return SkScalarIsFinite(arcLength) ? arcLength : -1;
+}
+
+static SkScalar bilerp(SkScalar tx, SkScalar ty, SkScalar c00, SkScalar c10, SkScalar c01,
+ SkScalar c11) {
+ SkScalar a = c00 * (1.f - tx) + c10 * tx;
+ SkScalar b = c01 * (1.f - tx) + c11 * tx;
+ return a * (1.f - ty) + b * ty;
+}
+
+static Sk4f bilerp(SkScalar tx, SkScalar ty,
+ const Sk4f& c00, const Sk4f& c10, const Sk4f& c01, const Sk4f& c11) {
+ Sk4f a = c00 * (1.f - tx) + c10 * tx;
+ Sk4f b = c01 * (1.f - tx) + c11 * tx;
+ return a * (1.f - ty) + b * ty;
+}
+
+SkISize SkPatchUtils::GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix) {
+ // Approximate length of each cubic.
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::GetTopCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar topLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetBottomCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar bottomLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetLeftCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar leftLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetRightCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar rightLength = approx_arc_length(pts, kNumPtsCubic);
+
+ if (topLength < 0 || bottomLength < 0 || leftLength < 0 || rightLength < 0) {
+ return {0, 0}; // negative length is a sentinel for bad length (i.e. non-finite)
+ }
+
+ // Level of detail per axis, based on the larger side between top and bottom or left and right
+ int lodX = static_cast<int>(SkMaxScalar(topLength, bottomLength) / kPartitionSize);
+ int lodY = static_cast<int>(SkMaxScalar(leftLength, rightLength) / kPartitionSize);
+
+ return SkISize::Make(SkMax32(8, lodX), SkMax32(8, lodY));
+}
+
+void SkPatchUtils::GetTopCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kTopP0_CubicCtrlPts];
+ points[1] = cubics[kTopP1_CubicCtrlPts];
+ points[2] = cubics[kTopP2_CubicCtrlPts];
+ points[3] = cubics[kTopP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetBottomCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kBottomP0_CubicCtrlPts];
+ points[1] = cubics[kBottomP1_CubicCtrlPts];
+ points[2] = cubics[kBottomP2_CubicCtrlPts];
+ points[3] = cubics[kBottomP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetLeftCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kLeftP0_CubicCtrlPts];
+ points[1] = cubics[kLeftP1_CubicCtrlPts];
+ points[2] = cubics[kLeftP2_CubicCtrlPts];
+ points[3] = cubics[kLeftP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetRightCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kRightP0_CubicCtrlPts];
+ points[1] = cubics[kRightP1_CubicCtrlPts];
+ points[2] = cubics[kRightP2_CubicCtrlPts];
+ points[3] = cubics[kRightP3_CubicCtrlPts];
+}
+
+static void skcolor_to_float(SkPMColor4f* dst, const SkColor* src, int count, SkColorSpace* dstCS) {
+ SkImageInfo srcInfo = SkImageInfo::Make(count, 1, kBGRA_8888_SkColorType,
+ kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkImageInfo dstInfo = SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType,
+ kPremul_SkAlphaType, sk_ref_sp(dstCS));
+ SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0);
+}
+
+static void float_to_skcolor(SkColor* dst, const SkPMColor4f* src, int count, SkColorSpace* srcCS) {
+ SkImageInfo srcInfo = SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType,
+ kPremul_SkAlphaType, sk_ref_sp(srcCS));
+ SkImageInfo dstInfo = SkImageInfo::Make(count, 1, kBGRA_8888_SkColorType,
+ kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0);
+}
+
+sk_sp<SkVertices> SkPatchUtils::MakeVertices(const SkPoint cubics[12], const SkColor srcColors[4],
+ const SkPoint srcTexCoords[4], int lodX, int lodY,
+ SkColorSpace* colorSpace) {
+ if (lodX < 1 || lodY < 1 || nullptr == cubics) {
+ return nullptr;
+ }
+
+ // check for overflow in multiplication
+ const int64_t lodX64 = (lodX + 1),
+ lodY64 = (lodY + 1),
+ mult64 = lodX64 * lodY64;
+ if (mult64 > SK_MaxS32) {
+ return nullptr;
+ }
+
+ // Treat null interpolation space as sRGB.
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+
+ int vertexCount = SkToS32(mult64);
+ // it is recommended to generate draw calls of no more than 65536 indices, so we never generate
+ // more than 60000 indices. To accomplish that we resize the LOD and vertex count
+ if (vertexCount > 10000 || lodX > 200 || lodY > 200) {
+ float weightX = static_cast<float>(lodX) / (lodX + lodY);
+ float weightY = static_cast<float>(lodY) / (lodX + lodY);
+
+ // 200 comes from the 100 * 2 which is the max value of vertices because of the limit of
+ // 60000 indices ( sqrt(60000 / 6) that comes from data->fIndexCount = lodX * lodY * 6)
+ // Need a min of 1 since we later divide by lod
+ lodX = std::max(1, sk_float_floor2int_no_saturate(weightX * 200));
+ lodY = std::max(1, sk_float_floor2int_no_saturate(weightY * 200));
+ vertexCount = (lodX + 1) * (lodY + 1);
+ }
+ const int indexCount = lodX * lodY * 6;
+ uint32_t flags = 0;
+ if (srcTexCoords) {
+ flags |= SkVertices::kHasTexCoords_BuilderFlag;
+ }
+ if (srcColors) {
+ flags |= SkVertices::kHasColors_BuilderFlag;
+ }
+
+ SkSTArenaAlloc<2048> alloc;
+ SkPMColor4f* cornerColors = srcColors ? alloc.makeArray<SkPMColor4f>(4) : nullptr;
+ SkPMColor4f* tmpColors = srcColors ? alloc.makeArray<SkPMColor4f>(vertexCount) : nullptr;
+
+ SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, vertexCount, indexCount, flags);
+ SkPoint* pos = builder.positions();
+ SkPoint* texs = builder.texCoords();
+ uint16_t* indices = builder.indices();
+
+ if (cornerColors) {
+ skcolor_to_float(cornerColors, srcColors, kNumCorners, colorSpace);
+ }
+
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::GetBottomCubic(cubics, pts);
+ FwDCubicEvaluator fBottom(pts);
+ SkPatchUtils::GetTopCubic(cubics, pts);
+ FwDCubicEvaluator fTop(pts);
+ SkPatchUtils::GetLeftCubic(cubics, pts);
+ FwDCubicEvaluator fLeft(pts);
+ SkPatchUtils::GetRightCubic(cubics, pts);
+ FwDCubicEvaluator fRight(pts);
+
+ fBottom.restart(lodX);
+ fTop.restart(lodX);
+
+ SkScalar u = 0.0f;
+ int stride = lodY + 1;
+ for (int x = 0; x <= lodX; x++) {
+ SkPoint bottom = fBottom.next(), top = fTop.next();
+ fLeft.restart(lodY);
+ fRight.restart(lodY);
+ SkScalar v = 0.f;
+ for (int y = 0; y <= lodY; y++) {
+ int dataIndex = x * (lodY + 1) + y;
+
+ SkPoint left = fLeft.next(), right = fRight.next();
+
+ SkPoint s0 = SkPoint::Make((1.0f - v) * top.x() + v * bottom.x(),
+ (1.0f - v) * top.y() + v * bottom.y());
+ SkPoint s1 = SkPoint::Make((1.0f - u) * left.x() + u * right.x(),
+ (1.0f - u) * left.y() + u * right.y());
+ SkPoint s2 = SkPoint::Make(
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].x()
+ + u * fTop.getCtrlPoints()[3].x())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].x()
+ + u * fBottom.getCtrlPoints()[3].x()),
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].y()
+ + u * fTop.getCtrlPoints()[3].y())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].y()
+ + u * fBottom.getCtrlPoints()[3].y()));
+ pos[dataIndex] = s0 + s1 - s2;
+
+ if (cornerColors) {
+ bilerp(u, v, Sk4f::Load(cornerColors[kTopLeft_Corner].vec()),
+ Sk4f::Load(cornerColors[kTopRight_Corner].vec()),
+ Sk4f::Load(cornerColors[kBottomLeft_Corner].vec()),
+ Sk4f::Load(cornerColors[kBottomRight_Corner].vec()))
+ .store(tmpColors[dataIndex].vec());
+ }
+
+ if (texs) {
+ texs[dataIndex] = SkPoint::Make(bilerp(u, v, srcTexCoords[kTopLeft_Corner].x(),
+ srcTexCoords[kTopRight_Corner].x(),
+ srcTexCoords[kBottomLeft_Corner].x(),
+ srcTexCoords[kBottomRight_Corner].x()),
+ bilerp(u, v, srcTexCoords[kTopLeft_Corner].y(),
+ srcTexCoords[kTopRight_Corner].y(),
+ srcTexCoords[kBottomLeft_Corner].y(),
+ srcTexCoords[kBottomRight_Corner].y()));
+
+ }
+
+ if(x < lodX && y < lodY) {
+ int i = 6 * (x * lodY + y);
+ indices[i] = x * stride + y;
+ indices[i + 1] = x * stride + 1 + y;
+ indices[i + 2] = (x + 1) * stride + 1 + y;
+ indices[i + 3] = indices[i];
+ indices[i + 4] = indices[i + 2];
+ indices[i + 5] = (x + 1) * stride + y;
+ }
+ v = SkScalarClampMax(v + 1.f / lodY, 1);
+ }
+ u = SkScalarClampMax(u + 1.f / lodX, 1);
+ }
+
+ if (tmpColors) {
+ float_to_skcolor(builder.colors(), tmpColors, vertexCount, colorSpace);
+ }
+ return builder.detach();
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.h b/gfx/skia/skia/src/utils/SkPatchUtils.h
new file mode 100644
index 0000000000..41b491241f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPatchUtils_DEFINED
+#define SkPatchUtils_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+
+class SkColorSpace;
+
+class SK_API SkPatchUtils {
+
+public:
+ // Enums for control points based on the order specified in the constructor (clockwise).
+ enum {
+ kNumCtrlPts = 12,
+ kNumCorners = 4,
+ kNumPtsCubic = 4
+ };
+
+ /**
+ * Get the points corresponding to the top cubic of cubics.
+ */
+ static void GetTopCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the bottom cubic of cubics.
+ */
+ static void GetBottomCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the left cubic of cubics.
+ */
+ static void GetLeftCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the right cubic of cubics.
+ */
+ static void GetRightCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Method that calculates a level of detail (number of subdivisions) for a patch in both axis.
+ */
+ static SkISize GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix);
+
+ static sk_sp<SkVertices> MakeVertices(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], int lodX, int lodY,
+ SkColorSpace* colorSpace = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkPolyUtils.cpp b/gfx/skia/skia/src/utils/SkPolyUtils.cpp
new file mode 100644
index 0000000000..f8f1954652
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.cpp
@@ -0,0 +1,1838 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkPolyUtils.h"
+
+#include <limits>
+
+#include "include/private/SkNx.h"
+#include "include/private/SkTArray.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkTDPQueue.h"
+#include "src/core/SkTInternalLList.h"
+
+//////////////////////////////////////////////////////////////////////////////////
+// Helper data structures and functions
+
+struct OffsetSegment {
+ SkPoint fP0;
+ SkVector fV;
+};
+
+constexpr SkScalar kCrossTolerance = SK_ScalarNearlyZero * SK_ScalarNearlyZero;
+
+// Computes perpDot for point p compared to segment defined by origin p0 and vector v.
+// A positive value means the point is to the left of the segment,
+// negative is to the right, 0 is collinear.
+static int compute_side(const SkPoint& p0, const SkVector& v, const SkPoint& p) {
+ SkVector w = p - p0;
+ SkScalar perpDot = v.cross(w);
+ if (!SkScalarNearlyZero(perpDot, kCrossTolerance)) {
+ return ((perpDot > 0) ? 1 : -1);
+ }
+
+ return 0;
+}
+
+// Returns 1 for cw, -1 for ccw and 0 if zero signed area (either degenerate or self-intersecting)
+int SkGetPolygonWinding(const SkPoint* polygonVerts, int polygonSize) {
+ if (polygonSize < 3) {
+ return 0;
+ }
+
+ // compute area and use sign to determine winding
+ SkScalar quadArea = 0;
+ SkVector v0 = polygonVerts[1] - polygonVerts[0];
+ for (int curr = 2; curr < polygonSize; ++curr) {
+ SkVector v1 = polygonVerts[curr] - polygonVerts[0];
+ quadArea += v0.cross(v1);
+ v0 = v1;
+ }
+ if (SkScalarNearlyZero(quadArea, kCrossTolerance)) {
+ return 0;
+ }
+ // 1 == ccw, -1 == cw
+ return (quadArea > 0) ? 1 : -1;
+}
+
+// Compute difference vector to offset p0-p1 'offset' units in direction specified by 'side'
+bool compute_offset_vector(const SkPoint& p0, const SkPoint& p1, SkScalar offset, int side,
+ SkPoint* vector) {
+ SkASSERT(side == -1 || side == 1);
+ // if distances are equal, can just outset by the perpendicular
+ SkVector perp = SkVector::Make(p0.fY - p1.fY, p1.fX - p0.fX);
+ if (!perp.setLength(offset*side)) {
+ return false;
+ }
+ *vector = perp;
+ return true;
+}
+
+// check interval to see if intersection is in segment
+static inline bool outside_interval(SkScalar numer, SkScalar denom, bool denomPositive) {
+ return (denomPositive && (numer < 0 || numer > denom)) ||
+ (!denomPositive && (numer > 0 || numer < denom));
+}
+
+// Compute the intersection 'p' between segments s0 and s1, if any.
+// 's' is the parametric value for the intersection along 's0' & 't' is the same for 's1'.
+// Returns false if there is no intersection.
+static bool compute_intersection(const OffsetSegment& s0, const OffsetSegment& s1,
+ SkPoint* p, SkScalar* s, SkScalar* t) {
+ const SkVector& v0 = s0.fV;
+ const SkVector& v1 = s1.fV;
+ SkVector w = s1.fP0 - s0.fP0;
+ SkScalar denom = v0.cross(v1);
+ bool denomPositive = (denom > 0);
+ SkScalar sNumer, tNumer;
+ if (SkScalarNearlyZero(denom, kCrossTolerance)) {
+ // segments are parallel, but not collinear
+ if (!SkScalarNearlyZero(w.cross(v0), kCrossTolerance) ||
+ !SkScalarNearlyZero(w.cross(v1), kCrossTolerance)) {
+ return false;
+ }
+
+ // Check for zero-length segments
+ if (!SkPointPriv::CanNormalize(v0.fX, v0.fY)) {
+ // Both are zero-length
+ if (!SkPointPriv::CanNormalize(v1.fX, v1.fY)) {
+ // Check if they're the same point
+ if (!SkPointPriv::CanNormalize(w.fX, w.fY)) {
+ *p = s0.fP0;
+ *s = 0;
+ *t = 0;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ // Otherwise project segment0's origin onto segment1
+ tNumer = v1.dot(-w);
+ denom = v1.dot(v1);
+ if (outside_interval(tNumer, denom, true)) {
+ return false;
+ }
+ sNumer = 0;
+ } else {
+ // Project segment1's endpoints onto segment0
+ sNumer = v0.dot(w);
+ denom = v0.dot(v0);
+ tNumer = 0;
+ if (outside_interval(sNumer, denom, true)) {
+ // The first endpoint doesn't lie on segment0
+ // If segment1 is degenerate, then there's no collision
+ if (!SkPointPriv::CanNormalize(v1.fX, v1.fY)) {
+ return false;
+ }
+
+ // Otherwise try the other one
+ SkScalar oldSNumer = sNumer;
+ sNumer = v0.dot(w + v1);
+ tNumer = denom;
+ if (outside_interval(sNumer, denom, true)) {
+ // it's possible that segment1's interval surrounds segment0
+ // this is false if params have the same signs, and in that case no collision
+ if (sNumer*oldSNumer > 0) {
+ return false;
+ }
+ // otherwise project segment0's endpoint onto segment1 instead
+ sNumer = 0;
+ tNumer = v1.dot(-w);
+ denom = v1.dot(v1);
+ }
+ }
+ }
+ } else {
+ sNumer = w.cross(v1);
+ if (outside_interval(sNumer, denom, denomPositive)) {
+ return false;
+ }
+ tNumer = w.cross(v0);
+ if (outside_interval(tNumer, denom, denomPositive)) {
+ return false;
+ }
+ }
+
+ SkScalar localS = sNumer/denom;
+ SkScalar localT = tNumer/denom;
+
+ *p = s0.fP0 + v0*localS;
+ *s = localS;
+ *t = localT;
+
+ return true;
+}
+
+bool SkIsConvexPolygon(const SkPoint* polygonVerts, int polygonSize) {
+ if (polygonSize < 3) {
+ return false;
+ }
+
+ SkScalar lastArea = 0;
+ SkScalar lastPerpDot = 0;
+
+ int prevIndex = polygonSize - 1;
+ int currIndex = 0;
+ int nextIndex = 1;
+ SkPoint origin = polygonVerts[0];
+ SkVector v0 = polygonVerts[currIndex] - polygonVerts[prevIndex];
+ SkVector v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ SkVector w0 = polygonVerts[currIndex] - origin;
+ SkVector w1 = polygonVerts[nextIndex] - origin;
+ for (int i = 0; i < polygonSize; ++i) {
+ if (!polygonVerts[i].isFinite()) {
+ return false;
+ }
+
+ // Check that winding direction is always the same (otherwise we have a reflex vertex)
+ SkScalar perpDot = v0.cross(v1);
+ if (lastPerpDot*perpDot < 0) {
+ return false;
+ }
+ if (0 != perpDot) {
+ lastPerpDot = perpDot;
+ }
+
+ // If the signed area ever flips it's concave
+ // TODO: see if we can verify convexity only with signed area
+ SkScalar quadArea = w0.cross(w1);
+ if (quadArea*lastArea < 0) {
+ return false;
+ }
+ if (0 != quadArea) {
+ lastArea = quadArea;
+ }
+
+ prevIndex = currIndex;
+ currIndex = nextIndex;
+ nextIndex = (currIndex + 1) % polygonSize;
+ v0 = v1;
+ v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ w0 = w1;
+ w1 = polygonVerts[nextIndex] - origin;
+ }
+
+ return true;
+}
+
+struct OffsetEdge {
+ OffsetEdge* fPrev;
+ OffsetEdge* fNext;
+ OffsetSegment fOffset;
+ SkPoint fIntersection;
+ SkScalar fTValue;
+ uint16_t fIndex;
+ uint16_t fEnd;
+
+ void init(uint16_t start = 0, uint16_t end = 0) {
+ fIntersection = fOffset.fP0;
+ fTValue = SK_ScalarMin;
+ fIndex = start;
+ fEnd = end;
+ }
+
+ // special intersection check that looks for endpoint intersection
+ bool checkIntersection(const OffsetEdge* that,
+ SkPoint* p, SkScalar* s, SkScalar* t) {
+ if (this->fEnd == that->fIndex) {
+ SkPoint p1 = this->fOffset.fP0 + this->fOffset.fV;
+ if (SkPointPriv::EqualsWithinTolerance(p1, that->fOffset.fP0)) {
+ *p = p1;
+ *s = SK_Scalar1;
+ *t = 0;
+ return true;
+ }
+ }
+
+ return compute_intersection(this->fOffset, that->fOffset, p, s, t);
+ }
+
+ // computes the line intersection and then the "distance" from that to this
+ // this is really a signed squared distance, where negative means that
+ // the intersection lies inside this->fOffset
+ SkScalar computeCrossingDistance(const OffsetEdge* that) {
+ const OffsetSegment& s0 = this->fOffset;
+ const OffsetSegment& s1 = that->fOffset;
+ const SkVector& v0 = s0.fV;
+ const SkVector& v1 = s1.fV;
+
+ SkScalar denom = v0.cross(v1);
+ if (SkScalarNearlyZero(denom, kCrossTolerance)) {
+ // segments are parallel
+ return SK_ScalarMax;
+ }
+
+ SkVector w = s1.fP0 - s0.fP0;
+ SkScalar localS = w.cross(v1) / denom;
+ if (localS < 0) {
+ localS = -localS;
+ } else {
+ localS -= SK_Scalar1;
+ }
+
+ localS *= SkScalarAbs(localS);
+ localS *= v0.dot(v0);
+
+ return localS;
+ }
+
+};
+
+static void remove_node(const OffsetEdge* node, OffsetEdge** head) {
+ // remove from linked list
+ node->fPrev->fNext = node->fNext;
+ node->fNext->fPrev = node->fPrev;
+ if (node == *head) {
+ *head = (node->fNext == node) ? nullptr : node->fNext;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+
+// The objective here is to inset all of the edges by the given distance, and then
+// remove any invalid inset edges by detecting right-hand turns. In a ccw polygon,
+// we should only be making left-hand turns (for cw polygons, we use the winding
+// parameter to reverse this). We detect this by checking whether the second intersection
+// on an edge is closer to its tail than the first one.
+//
+// We might also have the case that there is no intersection between two neighboring inset edges.
+// In this case, one edge will lie to the right of the other and should be discarded along with
+// its previous intersection (if any).
+//
+// Note: the assumption is that inputPolygon is convex and has no coincident points.
+//
+bool SkInsetConvexPolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ SkScalar inset, SkTDArray<SkPoint>* insetPolygon) {
+ if (inputPolygonSize < 3) {
+ return false;
+ }
+
+ // restrict this to match other routines
+ // practically we don't want anything bigger than this anyway
+ if (inputPolygonSize > std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ // can't inset by a negative or non-finite amount
+ if (inset < -SK_ScalarNearlyZero || !SkScalarIsFinite(inset)) {
+ return false;
+ }
+
+ // insetting close to zero just returns the original poly
+ if (inset <= SK_ScalarNearlyZero) {
+ for (int i = 0; i < inputPolygonSize; ++i) {
+ *insetPolygon->push() = inputPolygonVerts[i];
+ }
+ return true;
+ }
+
+ // get winding direction
+ int winding = SkGetPolygonWinding(inputPolygonVerts, inputPolygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // set up
+ SkAutoSTMalloc<64, OffsetEdge> edgeData(inputPolygonSize);
+ int prev = inputPolygonSize - 1;
+ for (int curr = 0; curr < inputPolygonSize; prev = curr, ++curr) {
+ int next = (curr + 1) % inputPolygonSize;
+ if (!inputPolygonVerts[curr].isFinite()) {
+ return false;
+ }
+ // check for convexity just to be sure
+ if (compute_side(inputPolygonVerts[prev], inputPolygonVerts[curr] - inputPolygonVerts[prev],
+ inputPolygonVerts[next])*winding < 0) {
+ return false;
+ }
+ SkVector v = inputPolygonVerts[next] - inputPolygonVerts[curr];
+ SkVector perp = SkVector::Make(-v.fY, v.fX);
+ perp.setLength(inset*winding);
+ edgeData[curr].fPrev = &edgeData[prev];
+ edgeData[curr].fNext = &edgeData[next];
+ edgeData[curr].fOffset.fP0 = inputPolygonVerts[curr] + perp;
+ edgeData[curr].fOffset.fV = v;
+ edgeData[curr].init();
+ }
+
+ OffsetEdge* head = &edgeData[0];
+ OffsetEdge* currEdge = head;
+ OffsetEdge* prevEdge = currEdge->fPrev;
+ int insetVertexCount = inputPolygonSize;
+ unsigned int iterations = 0;
+ unsigned int maxIterations = inputPolygonSize * inputPolygonSize;
+ while (head && prevEdge != currEdge) {
+ ++iterations;
+ // we should check each edge against each other edge at most once
+ if (iterations > maxIterations) {
+ return false;
+ }
+
+ SkScalar s, t;
+ SkPoint intersection;
+ if (compute_intersection(prevEdge->fOffset, currEdge->fOffset,
+ &intersection, &s, &t)) {
+ // if new intersection is further back on previous inset from the prior intersection
+ if (s < prevEdge->fTValue) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --insetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ // we've already considered this intersection, we're done
+ } else if (currEdge->fTValue > SK_ScalarMin &&
+ SkPointPriv::EqualsWithinTolerance(intersection,
+ currEdge->fIntersection,
+ 1.0e-6f)) {
+ break;
+ } else {
+ // add intersection
+ currEdge->fIntersection = intersection;
+ currEdge->fTValue = t;
+
+ // go to next segment
+ prevEdge = currEdge;
+ currEdge = currEdge->fNext;
+ }
+ } else {
+ // if prev to right side of curr
+ int side = winding*compute_side(currEdge->fOffset.fP0,
+ currEdge->fOffset.fV,
+ prevEdge->fOffset.fP0);
+ if (side < 0 &&
+ side == winding*compute_side(currEdge->fOffset.fP0,
+ currEdge->fOffset.fV,
+ prevEdge->fOffset.fP0 + prevEdge->fOffset.fV)) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --insetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ } else {
+ // move to next segment
+ remove_node(currEdge, &head);
+ --insetVertexCount;
+ currEdge = currEdge->fNext;
+ }
+ }
+ }
+
+ // store all the valid intersections that aren't nearly coincident
+ // TODO: look at the main algorithm and see if we can detect these better
+ insetPolygon->reset();
+ if (!head) {
+ return false;
+ }
+
+ static constexpr SkScalar kCleanupTolerance = 0.01f;
+ if (insetVertexCount >= 0) {
+ insetPolygon->setReserve(insetVertexCount);
+ }
+ int currIndex = 0;
+ *insetPolygon->push() = head->fIntersection;
+ currEdge = head->fNext;
+ while (currEdge != head) {
+ if (!SkPointPriv::EqualsWithinTolerance(currEdge->fIntersection,
+ (*insetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ *insetPolygon->push() = currEdge->fIntersection;
+ currIndex++;
+ }
+ currEdge = currEdge->fNext;
+ }
+ // make sure the first and last points aren't coincident
+ if (currIndex >= 1 &&
+ SkPointPriv::EqualsWithinTolerance((*insetPolygon)[0], (*insetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ insetPolygon->pop();
+ }
+
+ return SkIsConvexPolygon(insetPolygon->begin(), insetPolygon->count());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// compute the number of points needed for a circular join when offsetting a reflex vertex
+bool SkComputeRadialSteps(const SkVector& v1, const SkVector& v2, SkScalar offset,
+ SkScalar* rotSin, SkScalar* rotCos, int* n) {
+ const SkScalar kRecipPixelsPerArcSegment = 0.25f;
+
+ SkScalar rCos = v1.dot(v2);
+ if (!SkScalarIsFinite(rCos)) {
+ return false;
+ }
+ SkScalar rSin = v1.cross(v2);
+ if (!SkScalarIsFinite(rSin)) {
+ return false;
+ }
+ SkScalar theta = SkScalarATan2(rSin, rCos);
+
+ SkScalar floatSteps = SkScalarAbs(offset*theta*kRecipPixelsPerArcSegment);
+ // limit the number of steps to at most max uint16_t (that's all we can index)
+ // knock one value off the top to account for rounding
+ if (floatSteps >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+ int steps = SkScalarRoundToInt(floatSteps);
+
+ SkScalar dTheta = steps > 0 ? theta / steps : 0;
+ *rotSin = SkScalarSin(dTheta);
+ *rotCos = SkScalarCos(dTheta);
+ *n = steps;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// a point is "left" to another if its x-coord is less, or if equal, its y-coord is greater
+static bool left(const SkPoint& p0, const SkPoint& p1) {
+ return p0.fX < p1.fX || (!(p0.fX > p1.fX) && p0.fY > p1.fY);
+}
+
+// a point is "right" to another if its x-coord is greater, or if equal, its y-coord is less
+static bool right(const SkPoint& p0, const SkPoint& p1) {
+ return p0.fX > p1.fX || (!(p0.fX < p1.fX) && p0.fY < p1.fY);
+}
+
+struct Vertex {
+ static bool Left(const Vertex& qv0, const Vertex& qv1) {
+ return left(qv0.fPosition, qv1.fPosition);
+ }
+
+ // packed to fit into 16 bytes (one cache line)
+ SkPoint fPosition;
+ uint16_t fIndex; // index in unsorted polygon
+ uint16_t fPrevIndex; // indices for previous and next vertex in unsorted polygon
+ uint16_t fNextIndex;
+ uint16_t fFlags;
+};
+
+enum VertexFlags {
+ kPrevLeft_VertexFlag = 0x1,
+ kNextLeft_VertexFlag = 0x2,
+};
+
+struct ActiveEdge {
+ ActiveEdge() : fChild{ nullptr, nullptr }, fAbove(nullptr), fBelow(nullptr), fRed(false) {}
+ ActiveEdge(const SkPoint& p0, const SkVector& v, uint16_t index0, uint16_t index1)
+ : fSegment({ p0, v })
+ , fIndex0(index0)
+ , fIndex1(index1)
+ , fAbove(nullptr)
+ , fBelow(nullptr)
+ , fRed(true) {
+ fChild[0] = nullptr;
+ fChild[1] = nullptr;
+ }
+
+ // Returns true if "this" is above "that", assuming this->p0 is to the left of that->p0
+ // This is only used to verify the edgelist -- the actual test for insertion/deletion is much
+ // simpler because we can make certain assumptions then.
+ bool aboveIfLeft(const ActiveEdge* that) const {
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkPoint& q0 = that->fSegment.fP0;
+ SkASSERT(p0.fX <= q0.fX);
+ SkVector d = q0 - p0;
+ const SkVector& v = this->fSegment.fV;
+ const SkVector& w = that->fSegment.fV;
+ // The idea here is that if the vector between the origins of the two segments (d)
+ // rotates counterclockwise up to the vector representing the "this" segment (v),
+ // then we know that "this" is above "that". If the result is clockwise we say it's below.
+ if (this->fIndex0 != that->fIndex0) {
+ SkScalar cross = d.cross(v);
+ if (cross > kCrossTolerance) {
+ return true;
+ } else if (cross < -kCrossTolerance) {
+ return false;
+ }
+ } else if (this->fIndex1 == that->fIndex1) {
+ return false;
+ }
+ // At this point either the two origins are nearly equal or the origin of "that"
+ // lies on dv. So then we try the same for the vector from the tail of "this"
+ // to the head of "that". Again, ccw means "this" is above "that".
+ // d = that.P1 - this.P0
+ // = that.fP0 + that.fV - this.fP0
+ // = that.fP0 - this.fP0 + that.fV
+ // = old_d + that.fV
+ d += w;
+ SkScalar cross = d.cross(v);
+ if (cross > kCrossTolerance) {
+ return true;
+ } else if (cross < -kCrossTolerance) {
+ return false;
+ }
+ // If the previous check fails, the two segments are nearly collinear
+ // First check y-coord of first endpoints
+ if (p0.fX < q0.fX) {
+ return (p0.fY >= q0.fY);
+ } else if (p0.fY > q0.fY) {
+ return true;
+ } else if (p0.fY < q0.fY) {
+ return false;
+ }
+ // The first endpoints are the same, so check the other endpoint
+ SkPoint p1 = p0 + v;
+ SkPoint q1 = q0 + w;
+ if (p1.fX < q1.fX) {
+ return (p1.fY >= q1.fY);
+ } else {
+ return (p1.fY > q1.fY);
+ }
+ }
+
+ // same as leftAndAbove(), but generalized
+ bool above(const ActiveEdge* that) const {
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkPoint& q0 = that->fSegment.fP0;
+ if (right(p0, q0)) {
+ return !that->aboveIfLeft(this);
+ } else {
+ return this->aboveIfLeft(that);
+ }
+ }
+
+ bool intersect(const SkPoint& q0, const SkVector& w, uint16_t index0, uint16_t index1) const {
+ // check first to see if these edges are neighbors in the polygon
+ if (this->fIndex0 == index0 || this->fIndex1 == index0 ||
+ this->fIndex0 == index1 || this->fIndex1 == index1) {
+ return false;
+ }
+
+ // We don't need the exact intersection point so we can do a simpler test here.
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkVector& v = this->fSegment.fV;
+ SkPoint p1 = p0 + v;
+ SkPoint q1 = q0 + w;
+
+ // We assume some x-overlap due to how the edgelist works
+ // This allows us to simplify our test
+ // We need some slop here because storing the vector and recomputing the second endpoint
+ // doesn't necessary give us the original result in floating point.
+ // TODO: Store vector as double? Store endpoint as well?
+ SkASSERT(q0.fX <= p1.fX + SK_ScalarNearlyZero);
+
+ // if each segment straddles the other (i.e., the endpoints have different sides)
+ // then they intersect
+ bool result;
+ if (p0.fX < q0.fX) {
+ if (q1.fX < p1.fX) {
+ result = (compute_side(p0, v, q0)*compute_side(p0, v, q1) < 0);
+ } else {
+ result = (compute_side(p0, v, q0)*compute_side(q0, w, p1) > 0);
+ }
+ } else {
+ if (p1.fX < q1.fX) {
+ result = (compute_side(q0, w, p0)*compute_side(q0, w, p1) < 0);
+ } else {
+ result = (compute_side(q0, w, p0)*compute_side(p0, v, q1) > 0);
+ }
+ }
+ return result;
+ }
+
+ bool intersect(const ActiveEdge* edge) {
+ return this->intersect(edge->fSegment.fP0, edge->fSegment.fV, edge->fIndex0, edge->fIndex1);
+ }
+
+ bool lessThan(const ActiveEdge* that) const {
+ SkASSERT(!this->above(this));
+ SkASSERT(!that->above(that));
+ SkASSERT(!(this->above(that) && that->above(this)));
+ return this->above(that);
+ }
+
+ bool equals(uint16_t index0, uint16_t index1) const {
+ return (this->fIndex0 == index0 && this->fIndex1 == index1);
+ }
+
+ OffsetSegment fSegment;
+ uint16_t fIndex0; // indices for previous and next vertex in polygon
+ uint16_t fIndex1;
+ ActiveEdge* fChild[2];
+ ActiveEdge* fAbove;
+ ActiveEdge* fBelow;
+ int32_t fRed;
+};
+
+class ActiveEdgeList {
+public:
+ ActiveEdgeList(int maxEdges) {
+ fAllocation = (char*) sk_malloc_throw(sizeof(ActiveEdge)*maxEdges);
+ fCurrFree = 0;
+ fMaxFree = maxEdges;
+ }
+ ~ActiveEdgeList() {
+ fTreeHead.fChild[1] = nullptr;
+ sk_free(fAllocation);
+ }
+
+ bool insert(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ SkVector v = p1 - p0;
+ if (!v.isFinite()) {
+ return false;
+ }
+ // empty tree case -- easy
+ if (!fTreeHead.fChild[1]) {
+ ActiveEdge* root = fTreeHead.fChild[1] = this->allocate(p0, v, index0, index1);
+ SkASSERT(root);
+ if (!root) {
+ return false;
+ }
+ root->fRed = false;
+ return true;
+ }
+
+ // set up helpers
+ ActiveEdge* top = &fTreeHead;
+ ActiveEdge *grandparent = nullptr;
+ ActiveEdge *parent = nullptr;
+ ActiveEdge *curr = top->fChild[1];
+ int dir = 0;
+ int last = 0; // ?
+ // predecessor and successor, for intersection check
+ ActiveEdge* pred = nullptr;
+ ActiveEdge* succ = nullptr;
+
+ // search down the tree
+ while (true) {
+ if (!curr) {
+ // check for intersection with predecessor and successor
+ if ((pred && pred->intersect(p0, v, index0, index1)) ||
+ (succ && succ->intersect(p0, v, index0, index1))) {
+ return false;
+ }
+ // insert new node at bottom
+ parent->fChild[dir] = curr = this->allocate(p0, v, index0, index1);
+ SkASSERT(curr);
+ if (!curr) {
+ return false;
+ }
+ curr->fAbove = pred;
+ curr->fBelow = succ;
+ if (pred) {
+ pred->fBelow = curr;
+ }
+ if (succ) {
+ succ->fAbove = curr;
+ }
+ if (IsRed(parent)) {
+ int dir2 = (top->fChild[1] == grandparent);
+ if (curr == parent->fChild[last]) {
+ top->fChild[dir2] = SingleRotation(grandparent, !last);
+ } else {
+ top->fChild[dir2] = DoubleRotation(grandparent, !last);
+ }
+ }
+ break;
+ } else if (IsRed(curr->fChild[0]) && IsRed(curr->fChild[1])) {
+ // color flip
+ curr->fRed = true;
+ curr->fChild[0]->fRed = false;
+ curr->fChild[1]->fRed = false;
+ if (IsRed(parent)) {
+ int dir2 = (top->fChild[1] == grandparent);
+ if (curr == parent->fChild[last]) {
+ top->fChild[dir2] = SingleRotation(grandparent, !last);
+ } else {
+ top->fChild[dir2] = DoubleRotation(grandparent, !last);
+ }
+ }
+ }
+
+ last = dir;
+ int side;
+ // check to see if segment is above or below
+ if (curr->fIndex0 == index0) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+
+ if (0 == dir) {
+ succ = curr;
+ } else {
+ pred = curr;
+ }
+
+ // update helpers
+ if (grandparent) {
+ top = grandparent;
+ }
+ grandparent = parent;
+ parent = curr;
+ curr = curr->fChild[dir];
+ }
+
+ // update root and make it black
+ fTreeHead.fChild[1]->fRed = false;
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+ // replaces edge p0p1 with p1p2
+ bool replace(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ uint16_t index0, uint16_t index1, uint16_t index2) {
+ if (!fTreeHead.fChild[1]) {
+ return false;
+ }
+
+ SkVector v = p2 - p1;
+ ActiveEdge* curr = &fTreeHead;
+ ActiveEdge* found = nullptr;
+ int dir = 1;
+
+ // search
+ while (curr->fChild[dir] != nullptr) {
+ // update helpers
+ curr = curr->fChild[dir];
+ // save found node
+ if (curr->equals(index0, index1)) {
+ found = curr;
+ break;
+ } else {
+ // check to see if segment is above or below
+ int side;
+ if (curr->fIndex1 == index1) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+ }
+ }
+
+ if (!found) {
+ return false;
+ }
+
+ // replace if found
+ ActiveEdge* pred = found->fAbove;
+ ActiveEdge* succ = found->fBelow;
+ // check deletion and insert intersection cases
+ if (pred && (pred->intersect(found) || pred->intersect(p1, v, index1, index2))) {
+ return false;
+ }
+ if (succ && (succ->intersect(found) || succ->intersect(p1, v, index1, index2))) {
+ return false;
+ }
+ found->fSegment.fP0 = p1;
+ found->fSegment.fV = v;
+ found->fIndex0 = index1;
+ found->fIndex1 = index2;
+ // above and below should stay the same
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+ bool remove(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ if (!fTreeHead.fChild[1]) {
+ return false;
+ }
+
+ ActiveEdge* curr = &fTreeHead;
+ ActiveEdge* parent = nullptr;
+ ActiveEdge* grandparent = nullptr;
+ ActiveEdge* found = nullptr;
+ int dir = 1;
+
+ // search and push a red node down
+ while (curr->fChild[dir] != nullptr) {
+ int last = dir;
+
+ // update helpers
+ grandparent = parent;
+ parent = curr;
+ curr = curr->fChild[dir];
+ // save found node
+ if (curr->equals(index0, index1)) {
+ found = curr;
+ dir = 0;
+ } else {
+ // check to see if segment is above or below
+ int side;
+ if (curr->fIndex1 == index1) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+ }
+
+ // push the red node down
+ if (!IsRed(curr) && !IsRed(curr->fChild[dir])) {
+ if (IsRed(curr->fChild[!dir])) {
+ parent = parent->fChild[last] = SingleRotation(curr, dir);
+ } else {
+ ActiveEdge *s = parent->fChild[!last];
+
+ if (s != NULL) {
+ if (!IsRed(s->fChild[!last]) && !IsRed(s->fChild[last])) {
+ // color flip
+ parent->fRed = false;
+ s->fRed = true;
+ curr->fRed = true;
+ } else {
+ int dir2 = (grandparent->fChild[1] == parent);
+
+ if (IsRed(s->fChild[last])) {
+ grandparent->fChild[dir2] = DoubleRotation(parent, last);
+ } else if (IsRed(s->fChild[!last])) {
+ grandparent->fChild[dir2] = SingleRotation(parent, last);
+ }
+
+ // ensure correct coloring
+ curr->fRed = grandparent->fChild[dir2]->fRed = true;
+ grandparent->fChild[dir2]->fChild[0]->fRed = false;
+ grandparent->fChild[dir2]->fChild[1]->fRed = false;
+ }
+ }
+ }
+ }
+ }
+
+ // replace and remove if found
+ if (found) {
+ ActiveEdge* pred = found->fAbove;
+ ActiveEdge* succ = found->fBelow;
+ if ((pred && pred->intersect(found)) || (succ && succ->intersect(found))) {
+ return false;
+ }
+ if (found != curr) {
+ found->fSegment = curr->fSegment;
+ found->fIndex0 = curr->fIndex0;
+ found->fIndex1 = curr->fIndex1;
+ found->fAbove = curr->fAbove;
+ pred = found->fAbove;
+ // we don't need to set found->fBelow here
+ } else {
+ if (succ) {
+ succ->fAbove = pred;
+ }
+ }
+ if (pred) {
+ pred->fBelow = curr->fBelow;
+ }
+ parent->fChild[parent->fChild[1] == curr] = curr->fChild[!curr->fChild[0]];
+
+ // no need to delete
+ curr->fAbove = reinterpret_cast<ActiveEdge*>(0xdeadbeefll);
+ curr->fBelow = reinterpret_cast<ActiveEdge*>(0xdeadbeefll);
+ if (fTreeHead.fChild[1]) {
+ fTreeHead.fChild[1]->fRed = false;
+ }
+ }
+
+ // update root and make it black
+ if (fTreeHead.fChild[1]) {
+ fTreeHead.fChild[1]->fRed = false;
+ }
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+private:
+ // allocator
+ ActiveEdge * allocate(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ if (fCurrFree >= fMaxFree) {
+ return nullptr;
+ }
+ char* bytes = fAllocation + sizeof(ActiveEdge)*fCurrFree;
+ ++fCurrFree;
+ return new(bytes) ActiveEdge(p0, p1, index0, index1);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////
+ // Red-black tree methods
+ ///////////////////////////////////////////////////////////////////////////////////
+ static bool IsRed(const ActiveEdge* node) {
+ return node && node->fRed;
+ }
+
+ static ActiveEdge* SingleRotation(ActiveEdge* node, int dir) {
+ ActiveEdge* tmp = node->fChild[!dir];
+
+ node->fChild[!dir] = tmp->fChild[dir];
+ tmp->fChild[dir] = node;
+
+ node->fRed = true;
+ tmp->fRed = false;
+
+ return tmp;
+ }
+
+ static ActiveEdge* DoubleRotation(ActiveEdge* node, int dir) {
+ node->fChild[!dir] = SingleRotation(node->fChild[!dir], !dir);
+
+ return SingleRotation(node, dir);
+ }
+
+ // returns black link count
+ static int VerifyTree(const ActiveEdge* tree) {
+ if (!tree) {
+ return 1;
+ }
+
+ const ActiveEdge* left = tree->fChild[0];
+ const ActiveEdge* right = tree->fChild[1];
+
+ // no consecutive red links
+ if (IsRed(tree) && (IsRed(left) || IsRed(right))) {
+ SkASSERT(false);
+ return 0;
+ }
+
+ // check secondary links
+ if (tree->fAbove) {
+ SkASSERT(tree->fAbove->fBelow == tree);
+ SkASSERT(tree->fAbove->lessThan(tree));
+ }
+ if (tree->fBelow) {
+ SkASSERT(tree->fBelow->fAbove == tree);
+ SkASSERT(tree->lessThan(tree->fBelow));
+ }
+
+ // violates binary tree order
+ if ((left && tree->lessThan(left)) || (right && right->lessThan(tree))) {
+ SkASSERT(false);
+ return 0;
+ }
+
+ int leftCount = VerifyTree(left);
+ int rightCount = VerifyTree(right);
+
+ // return black link count
+ if (leftCount != 0 && rightCount != 0) {
+ // black height mismatch
+ if (leftCount != rightCount) {
+ SkASSERT(false);
+ return 0;
+ }
+ return IsRed(tree) ? leftCount : leftCount + 1;
+ } else {
+ return 0;
+ }
+ }
+
+ ActiveEdge fTreeHead;
+ char* fAllocation;
+ int fCurrFree;
+ int fMaxFree;
+};
+
+// Here we implement a sweep line algorithm to determine whether the provided points
+// represent a simple polygon, i.e., the polygon is non-self-intersecting.
+// We first insert the vertices into a priority queue sorting horizontally from left to right.
+// Then as we pop the vertices from the queue we generate events which indicate that an edge
+// should be added or removed from an edge list. If any intersections are detected in the edge
+// list, then we know the polygon is self-intersecting and hence not simple.
+bool SkIsSimplePolygon(const SkPoint* polygon, int polygonSize) {
+ if (polygonSize < 3) {
+ return false;
+ }
+
+ // If it's convex, it's simple
+ if (SkIsConvexPolygon(polygon, polygonSize)) {
+ return true;
+ }
+
+ // practically speaking, it takes too long to process large polygons
+ if (polygonSize > 2048) {
+ return false;
+ }
+
+ SkTDPQueue <Vertex, Vertex::Left> vertexQueue(polygonSize);
+ for (int i = 0; i < polygonSize; ++i) {
+ Vertex newVertex;
+ if (!polygon[i].isFinite()) {
+ return false;
+ }
+ newVertex.fPosition = polygon[i];
+ newVertex.fIndex = i;
+ newVertex.fPrevIndex = (i - 1 + polygonSize) % polygonSize;
+ newVertex.fNextIndex = (i + 1) % polygonSize;
+ newVertex.fFlags = 0;
+ if (left(polygon[newVertex.fPrevIndex], polygon[i])) {
+ newVertex.fFlags |= kPrevLeft_VertexFlag;
+ }
+ if (left(polygon[newVertex.fNextIndex], polygon[i])) {
+ newVertex.fFlags |= kNextLeft_VertexFlag;
+ }
+ vertexQueue.insert(newVertex);
+ }
+
+ // pop each vertex from the queue and generate events depending on
+ // where it lies relative to its neighboring edges
+ ActiveEdgeList sweepLine(polygonSize);
+ while (vertexQueue.count() > 0) {
+ const Vertex& v = vertexQueue.peek();
+
+ // both to the right -- insert both
+ if (v.fFlags == 0) {
+ if (!sweepLine.insert(v.fPosition, polygon[v.fPrevIndex], v.fIndex, v.fPrevIndex)) {
+ break;
+ }
+ if (!sweepLine.insert(v.fPosition, polygon[v.fNextIndex], v.fIndex, v.fNextIndex)) {
+ break;
+ }
+ // both to the left -- remove both
+ } else if (v.fFlags == (kPrevLeft_VertexFlag | kNextLeft_VertexFlag)) {
+ if (!sweepLine.remove(polygon[v.fPrevIndex], v.fPosition, v.fPrevIndex, v.fIndex)) {
+ break;
+ }
+ if (!sweepLine.remove(polygon[v.fNextIndex], v.fPosition, v.fNextIndex, v.fIndex)) {
+ break;
+ }
+ // one to left and right -- replace one with another
+ } else {
+ if (v.fFlags & kPrevLeft_VertexFlag) {
+ if (!sweepLine.replace(polygon[v.fPrevIndex], v.fPosition, polygon[v.fNextIndex],
+ v.fPrevIndex, v.fIndex, v.fNextIndex)) {
+ break;
+ }
+ } else {
+ SkASSERT(v.fFlags & kNextLeft_VertexFlag);
+ if (!sweepLine.replace(polygon[v.fNextIndex], v.fPosition, polygon[v.fPrevIndex],
+ v.fNextIndex, v.fIndex, v.fPrevIndex)) {
+ break;
+ }
+ }
+ }
+
+ vertexQueue.pop();
+ }
+
+ return (vertexQueue.count() == 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// helper function for SkOffsetSimplePolygon
+static void setup_offset_edge(OffsetEdge* currEdge,
+ const SkPoint& endpoint0, const SkPoint& endpoint1,
+ uint16_t startIndex, uint16_t endIndex) {
+ currEdge->fOffset.fP0 = endpoint0;
+ currEdge->fOffset.fV = endpoint1 - endpoint0;
+ currEdge->init(startIndex, endIndex);
+}
+
+static bool is_reflex_vertex(const SkPoint* inputPolygonVerts, int winding, SkScalar offset,
+ uint16_t prevIndex, uint16_t currIndex, uint16_t nextIndex) {
+ int side = compute_side(inputPolygonVerts[prevIndex],
+ inputPolygonVerts[currIndex] - inputPolygonVerts[prevIndex],
+ inputPolygonVerts[nextIndex]);
+ // if reflex point, we need to add extra edges
+ return (side*winding*offset < 0);
+}
+
+bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ const SkRect& bounds, SkScalar offset,
+ SkTDArray<SkPoint>* offsetPolygon, SkTDArray<int>* polygonIndices) {
+ if (inputPolygonSize < 3) {
+ return false;
+ }
+
+ // need to be able to represent all the vertices in the 16-bit indices
+ if (inputPolygonSize >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ if (!SkScalarIsFinite(offset)) {
+ return false;
+ }
+
+ // can't inset more than the half bounds of the polygon
+ if (offset > SkTMin(SkTAbs(SK_ScalarHalf*bounds.width()),
+ SkTAbs(SK_ScalarHalf*bounds.height()))) {
+ return false;
+ }
+
+ // offsetting close to zero just returns the original poly
+ if (SkScalarNearlyZero(offset)) {
+ for (int i = 0; i < inputPolygonSize; ++i) {
+ *offsetPolygon->push() = inputPolygonVerts[i];
+ if (polygonIndices) {
+ *polygonIndices->push() = i;
+ }
+ }
+ return true;
+ }
+
+ // get winding direction
+ int winding = SkGetPolygonWinding(inputPolygonVerts, inputPolygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // build normals
+ SkAutoSTMalloc<64, SkVector> normals(inputPolygonSize);
+ unsigned int numEdges = 0;
+ for (int currIndex = 0, prevIndex = inputPolygonSize - 1;
+ currIndex < inputPolygonSize;
+ prevIndex = currIndex, ++currIndex) {
+ if (!inputPolygonVerts[currIndex].isFinite()) {
+ return false;
+ }
+ int nextIndex = (currIndex + 1) % inputPolygonSize;
+ if (!compute_offset_vector(inputPolygonVerts[currIndex], inputPolygonVerts[nextIndex],
+ offset, winding, &normals[currIndex])) {
+ return false;
+ }
+ if (currIndex > 0) {
+ // if reflex point, we need to add extra edges
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset,
+ prevIndex, currIndex, nextIndex)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(normals[prevIndex], normals[currIndex], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ numEdges += SkTMax(numSteps, 1);
+ }
+ }
+ numEdges++;
+ }
+ // finish up the edge counting
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset, inputPolygonSize-1, 0, 1)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(normals[inputPolygonSize-1], normals[0], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ numEdges += SkTMax(numSteps, 1);
+ }
+
+ // Make sure we don't overflow the max array count.
+ // We shouldn't overflow numEdges, as SkComputeRadialSteps returns a max of 2^16-1,
+ // and we have a max of 2^16-1 original vertices.
+ if (numEdges > (unsigned int)std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+
+ // build initial offset edge list
+ SkSTArray<64, OffsetEdge> edgeData(numEdges);
+ OffsetEdge* prevEdge = nullptr;
+ for (int currIndex = 0, prevIndex = inputPolygonSize - 1;
+ currIndex < inputPolygonSize;
+ prevIndex = currIndex, ++currIndex) {
+ int nextIndex = (currIndex + 1) % inputPolygonSize;
+ // if reflex point, fill in curve
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset,
+ prevIndex, currIndex, nextIndex)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ SkVector prevNormal = normals[prevIndex];
+ if (!SkComputeRadialSteps(prevNormal, normals[currIndex], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ auto currEdge = edgeData.push_back_n(SkTMax(numSteps, 1));
+ for (int i = 0; i < numSteps - 1; ++i) {
+ SkVector currNormal = SkVector::Make(prevNormal.fX*rotCos - prevNormal.fY*rotSin,
+ prevNormal.fY*rotCos + prevNormal.fX*rotSin);
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + prevNormal,
+ inputPolygonVerts[currIndex] + currNormal,
+ currIndex, currIndex);
+ prevNormal = currNormal;
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ ++currEdge;
+ }
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + prevNormal,
+ inputPolygonVerts[currIndex] + normals[currIndex],
+ currIndex, currIndex);
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ }
+
+ // Add the edge
+ auto currEdge = edgeData.push_back_n(1);
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + normals[currIndex],
+ inputPolygonVerts[nextIndex] + normals[currIndex],
+ currIndex, nextIndex);
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ }
+ // close up the linked list
+ SkASSERT(prevEdge);
+ prevEdge->fNext = &edgeData[0];
+ edgeData[0].fPrev = prevEdge;
+
+ // now clip edges
+ SkASSERT(edgeData.count() == (int)numEdges);
+ auto head = &edgeData[0];
+ auto currEdge = head;
+ unsigned int offsetVertexCount = numEdges;
+ unsigned long long iterations = 0;
+ unsigned long long maxIterations = (unsigned long long)(numEdges) * numEdges;
+ while (head && prevEdge != currEdge && offsetVertexCount > 0) {
+ ++iterations;
+ // we should check each edge against each other edge at most once
+ if (iterations > maxIterations) {
+ return false;
+ }
+
+ SkScalar s, t;
+ SkPoint intersection;
+ if (prevEdge->checkIntersection(currEdge, &intersection, &s, &t)) {
+ // if new intersection is further back on previous inset from the prior intersection
+ if (s < prevEdge->fTValue) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --offsetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ // we've already considered this intersection, we're done
+ } else if (currEdge->fTValue > SK_ScalarMin &&
+ SkPointPriv::EqualsWithinTolerance(intersection,
+ currEdge->fIntersection,
+ 1.0e-6f)) {
+ break;
+ } else {
+ // add intersection
+ currEdge->fIntersection = intersection;
+ currEdge->fTValue = t;
+ currEdge->fIndex = prevEdge->fEnd;
+
+ // go to next segment
+ prevEdge = currEdge;
+ currEdge = currEdge->fNext;
+ }
+ } else {
+ // If there is no intersection, we want to minimize the distance between
+ // the point where the segment lines cross and the segments themselves.
+ OffsetEdge* prevPrevEdge = prevEdge->fPrev;
+ OffsetEdge* currNextEdge = currEdge->fNext;
+ SkScalar dist0 = currEdge->computeCrossingDistance(prevPrevEdge);
+ SkScalar dist1 = prevEdge->computeCrossingDistance(currNextEdge);
+ // if both lead to direct collision
+ if (dist0 < 0 && dist1 < 0) {
+ // check first to see if either represent parts of one contour
+ SkPoint p1 = prevPrevEdge->fOffset.fP0 + prevPrevEdge->fOffset.fV;
+ bool prevSameContour = SkPointPriv::EqualsWithinTolerance(p1,
+ prevEdge->fOffset.fP0);
+ p1 = currEdge->fOffset.fP0 + currEdge->fOffset.fV;
+ bool currSameContour = SkPointPriv::EqualsWithinTolerance(p1,
+ currNextEdge->fOffset.fP0);
+
+ // want to step along contour to find intersections rather than jump to new one
+ if (currSameContour && !prevSameContour) {
+ remove_node(currEdge, &head);
+ currEdge = currNextEdge;
+ --offsetVertexCount;
+ continue;
+ } else if (prevSameContour && !currSameContour) {
+ remove_node(prevEdge, &head);
+ prevEdge = prevPrevEdge;
+ --offsetVertexCount;
+ continue;
+ }
+ }
+
+ // otherwise minimize collision distance along segment
+ if (dist0 < dist1) {
+ remove_node(prevEdge, &head);
+ prevEdge = prevPrevEdge;
+ } else {
+ remove_node(currEdge, &head);
+ currEdge = currNextEdge;
+ }
+ --offsetVertexCount;
+ }
+ }
+
+ // store all the valid intersections that aren't nearly coincident
+ // TODO: look at the main algorithm and see if we can detect these better
+ offsetPolygon->reset();
+ if (!head || offsetVertexCount == 0 ||
+ offsetVertexCount >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ static constexpr SkScalar kCleanupTolerance = 0.01f;
+ offsetPolygon->setReserve(offsetVertexCount);
+ int currIndex = 0;
+ *offsetPolygon->push() = head->fIntersection;
+ if (polygonIndices) {
+ *polygonIndices->push() = head->fIndex;
+ }
+ currEdge = head->fNext;
+ while (currEdge != head) {
+ if (!SkPointPriv::EqualsWithinTolerance(currEdge->fIntersection,
+ (*offsetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ *offsetPolygon->push() = currEdge->fIntersection;
+ if (polygonIndices) {
+ *polygonIndices->push() = currEdge->fIndex;
+ }
+ currIndex++;
+ }
+ currEdge = currEdge->fNext;
+ }
+ // make sure the first and last points aren't coincident
+ if (currIndex >= 1 &&
+ SkPointPriv::EqualsWithinTolerance((*offsetPolygon)[0], (*offsetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ offsetPolygon->pop();
+ if (polygonIndices) {
+ polygonIndices->pop();
+ }
+ }
+
+ // check winding of offset polygon (it should be same as the original polygon)
+ SkScalar offsetWinding = SkGetPolygonWinding(offsetPolygon->begin(), offsetPolygon->count());
+
+ return (winding*offsetWinding > 0 &&
+ SkIsSimplePolygon(offsetPolygon->begin(), offsetPolygon->count()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+struct TriangulationVertex {
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(TriangulationVertex);
+
+ enum class VertexType { kConvex, kReflex };
+
+ SkPoint fPosition;
+ VertexType fVertexType;
+ uint16_t fIndex;
+ uint16_t fPrevIndex;
+ uint16_t fNextIndex;
+};
+
+static void compute_triangle_bounds(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkRect* bounds) {
+ Sk4s min, max;
+ min = max = Sk4s(p0.fX, p0.fY, p0.fX, p0.fY);
+ Sk4s xy(p1.fX, p1.fY, p2.fX, p2.fY);
+ min = Sk4s::Min(min, xy);
+ max = Sk4s::Max(max, xy);
+ bounds->setLTRB(SkTMin(min[0], min[2]), SkTMin(min[1], min[3]),
+ SkTMax(max[0], max[2]), SkTMax(max[1], max[3]));
+}
+
+// test to see if point p is in triangle p0p1p2.
+// for now assuming strictly inside -- if on the edge it's outside
+static bool point_in_triangle(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p) {
+ SkVector v0 = p1 - p0;
+ SkVector v1 = p2 - p1;
+ SkScalar n = v0.cross(v1);
+
+ SkVector w0 = p - p0;
+ if (n*v0.cross(w0) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ SkVector w1 = p - p1;
+ if (n*v1.cross(w1) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ SkVector v2 = p0 - p2;
+ SkVector w2 = p - p2;
+ if (n*v2.cross(w2) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ return true;
+}
+
+// Data structure to track reflex vertices and check whether any are inside a given triangle
+class ReflexHash {
+public:
+ bool init(const SkRect& bounds, int vertexCount) {
+ fBounds = bounds;
+ fNumVerts = 0;
+ SkScalar width = bounds.width();
+ SkScalar height = bounds.height();
+ if (!SkScalarIsFinite(width) || !SkScalarIsFinite(height)) {
+ return false;
+ }
+
+ // We want vertexCount grid cells, roughly distributed to match the bounds ratio
+ SkScalar hCount = SkScalarSqrt(sk_ieee_float_divide(vertexCount*width, height));
+ if (!SkScalarIsFinite(hCount)) {
+ return false;
+ }
+ fHCount = SkTMax(SkTMin(SkScalarRoundToInt(hCount), vertexCount), 1);
+ fVCount = vertexCount/fHCount;
+ fGridConversion.set(sk_ieee_float_divide(fHCount - 0.001f, width),
+ sk_ieee_float_divide(fVCount - 0.001f, height));
+ if (!fGridConversion.isFinite()) {
+ return false;
+ }
+
+ fGrid.setCount(fHCount*fVCount);
+ for (int i = 0; i < fGrid.count(); ++i) {
+ fGrid[i].reset();
+ }
+
+ return true;
+ }
+
+ void add(TriangulationVertex* v) {
+ int index = hash(v);
+ fGrid[index].addToTail(v);
+ ++fNumVerts;
+ }
+
+ void remove(TriangulationVertex* v) {
+ int index = hash(v);
+ fGrid[index].remove(v);
+ --fNumVerts;
+ }
+
+ bool checkTriangle(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ uint16_t ignoreIndex0, uint16_t ignoreIndex1) const {
+ if (!fNumVerts) {
+ return false;
+ }
+
+ SkRect triBounds;
+ compute_triangle_bounds(p0, p1, p2, &triBounds);
+ int h0 = (triBounds.fLeft - fBounds.fLeft)*fGridConversion.fX;
+ int h1 = (triBounds.fRight - fBounds.fLeft)*fGridConversion.fX;
+ int v0 = (triBounds.fTop - fBounds.fTop)*fGridConversion.fY;
+ int v1 = (triBounds.fBottom - fBounds.fTop)*fGridConversion.fY;
+
+ for (int v = v0; v <= v1; ++v) {
+ for (int h = h0; h <= h1; ++h) {
+ int i = v * fHCount + h;
+ for (SkTInternalLList<TriangulationVertex>::Iter reflexIter = fGrid[i].begin();
+ reflexIter != fGrid[i].end(); ++reflexIter) {
+ TriangulationVertex* reflexVertex = *reflexIter;
+ if (reflexVertex->fIndex != ignoreIndex0 &&
+ reflexVertex->fIndex != ignoreIndex1 &&
+ point_in_triangle(p0, p1, p2, reflexVertex->fPosition)) {
+ return true;
+ }
+ }
+
+ }
+ }
+
+ return false;
+ }
+
+private:
+ int hash(TriangulationVertex* vert) const {
+ int h = (vert->fPosition.fX - fBounds.fLeft)*fGridConversion.fX;
+ int v = (vert->fPosition.fY - fBounds.fTop)*fGridConversion.fY;
+ SkASSERT(v*fHCount + h >= 0);
+ return v*fHCount + h;
+ }
+
+ SkRect fBounds;
+ int fHCount;
+ int fVCount;
+ int fNumVerts;
+ // converts distance from the origin to a grid location (when cast to int)
+ SkVector fGridConversion;
+ SkTDArray<SkTInternalLList<TriangulationVertex>> fGrid;
+};
+
+// Check to see if a reflex vertex has become a convex vertex after clipping an ear
+static void reclassify_vertex(TriangulationVertex* p, const SkPoint* polygonVerts,
+ int winding, ReflexHash* reflexHash,
+ SkTInternalLList<TriangulationVertex>* convexList) {
+ if (TriangulationVertex::VertexType::kReflex == p->fVertexType) {
+ SkVector v0 = p->fPosition - polygonVerts[p->fPrevIndex];
+ SkVector v1 = polygonVerts[p->fNextIndex] - p->fPosition;
+ if (winding*v0.cross(v1) > SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ p->fVertexType = TriangulationVertex::VertexType::kConvex;
+ reflexHash->remove(p);
+ p->fPrev = p->fNext = nullptr;
+ convexList->addToTail(p);
+ }
+ }
+}
+
+bool SkTriangulateSimplePolygon(const SkPoint* polygonVerts, uint16_t* indexMap, int polygonSize,
+ SkTDArray<uint16_t>* triangleIndices) {
+ if (polygonSize < 3) {
+ return false;
+ }
+ // need to be able to represent all the vertices in the 16-bit indices
+ if (polygonSize >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ // get bounds
+ SkRect bounds;
+ if (!bounds.setBoundsCheck(polygonVerts, polygonSize)) {
+ return false;
+ }
+ // get winding direction
+ // TODO: we do this for all the polygon routines -- might be better to have the client
+ // compute it and pass it in
+ int winding = SkGetPolygonWinding(polygonVerts, polygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // Set up vertices
+ SkAutoSTMalloc<64, TriangulationVertex> triangulationVertices(polygonSize);
+ int prevIndex = polygonSize - 1;
+ SkVector v0 = polygonVerts[0] - polygonVerts[prevIndex];
+ for (int currIndex = 0; currIndex < polygonSize; ++currIndex) {
+ int nextIndex = (currIndex + 1) % polygonSize;
+
+ SkDEBUGCODE(memset(&triangulationVertices[currIndex], 0, sizeof(TriangulationVertex)));
+ triangulationVertices[currIndex].fPosition = polygonVerts[currIndex];
+ triangulationVertices[currIndex].fIndex = currIndex;
+ triangulationVertices[currIndex].fPrevIndex = prevIndex;
+ triangulationVertices[currIndex].fNextIndex = nextIndex;
+ SkVector v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ if (winding*v0.cross(v1) > SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ triangulationVertices[currIndex].fVertexType = TriangulationVertex::VertexType::kConvex;
+ } else {
+ triangulationVertices[currIndex].fVertexType = TriangulationVertex::VertexType::kReflex;
+ }
+
+ prevIndex = currIndex;
+ v0 = v1;
+ }
+
+ // Classify initial vertices into a list of convex vertices and a hash of reflex vertices
+ // TODO: possibly sort the convexList in some way to get better triangles
+ SkTInternalLList<TriangulationVertex> convexList;
+ ReflexHash reflexHash;
+ if (!reflexHash.init(bounds, polygonSize)) {
+ return false;
+ }
+ prevIndex = polygonSize - 1;
+ for (int currIndex = 0; currIndex < polygonSize; prevIndex = currIndex, ++currIndex) {
+ TriangulationVertex::VertexType currType = triangulationVertices[currIndex].fVertexType;
+ if (TriangulationVertex::VertexType::kConvex == currType) {
+ int nextIndex = (currIndex + 1) % polygonSize;
+ TriangulationVertex::VertexType prevType = triangulationVertices[prevIndex].fVertexType;
+ TriangulationVertex::VertexType nextType = triangulationVertices[nextIndex].fVertexType;
+ // We prioritize clipping vertices with neighboring reflex vertices.
+ // The intent here is that it will cull reflex vertices more quickly.
+ if (TriangulationVertex::VertexType::kReflex == prevType ||
+ TriangulationVertex::VertexType::kReflex == nextType) {
+ convexList.addToHead(&triangulationVertices[currIndex]);
+ } else {
+ convexList.addToTail(&triangulationVertices[currIndex]);
+ }
+ } else {
+ // We treat near collinear vertices as reflex
+ reflexHash.add(&triangulationVertices[currIndex]);
+ }
+ }
+
+ // The general concept: We are trying to find three neighboring vertices where
+ // no other vertex lies inside the triangle (an "ear"). If we find one, we clip
+ // that ear off, and then repeat on the new polygon. Once we get down to three vertices
+ // we have triangulated the entire polygon.
+ // In the worst case this is an n^2 algorithm. We can cut down the search space somewhat by
+ // noting that only convex vertices can be potential ears, and we only need to check whether
+ // any reflex vertices lie inside the ear.
+ triangleIndices->setReserve(triangleIndices->count() + 3 * (polygonSize - 2));
+ int vertexCount = polygonSize;
+ while (vertexCount > 3) {
+ bool success = false;
+ TriangulationVertex* earVertex = nullptr;
+ TriangulationVertex* p0 = nullptr;
+ TriangulationVertex* p2 = nullptr;
+ // find a convex vertex to clip
+ for (SkTInternalLList<TriangulationVertex>::Iter convexIter = convexList.begin();
+ convexIter != convexList.end(); ++convexIter) {
+ earVertex = *convexIter;
+ SkASSERT(TriangulationVertex::VertexType::kReflex != earVertex->fVertexType);
+
+ p0 = &triangulationVertices[earVertex->fPrevIndex];
+ p2 = &triangulationVertices[earVertex->fNextIndex];
+
+ // see if any reflex vertices are inside the ear
+ bool failed = reflexHash.checkTriangle(p0->fPosition, earVertex->fPosition,
+ p2->fPosition, p0->fIndex, p2->fIndex);
+ if (failed) {
+ continue;
+ }
+
+ // found one we can clip
+ success = true;
+ break;
+ }
+ // If we can't find any ears to clip, this probably isn't a simple polygon
+ if (!success) {
+ return false;
+ }
+
+ // add indices
+ auto indices = triangleIndices->append(3);
+ indices[0] = indexMap[p0->fIndex];
+ indices[1] = indexMap[earVertex->fIndex];
+ indices[2] = indexMap[p2->fIndex];
+
+ // clip the ear
+ convexList.remove(earVertex);
+ --vertexCount;
+
+ // reclassify reflex verts
+ p0->fNextIndex = earVertex->fNextIndex;
+ reclassify_vertex(p0, polygonVerts, winding, &reflexHash, &convexList);
+
+ p2->fPrevIndex = earVertex->fPrevIndex;
+ reclassify_vertex(p2, polygonVerts, winding, &reflexHash, &convexList);
+ }
+
+ // output indices
+ for (SkTInternalLList<TriangulationVertex>::Iter vertexIter = convexList.begin();
+ vertexIter != convexList.end(); ++vertexIter) {
+ TriangulationVertex* vertex = *vertexIter;
+ *triangleIndices->push() = indexMap[vertex->fIndex];
+ }
+
+ return true;
+}
+
+///////////
+
+static double crs(SkVector a, SkVector b) {
+ return a.fX * b.fY - a.fY * b.fX;
+}
+
+static int sign(SkScalar v) {
+ return v < 0 ? -1 : (v > 0);
+}
+
+struct SignTracker {
+ int fSign;
+ int fSignChanges;
+
+ void reset() {
+ fSign = 0;
+ fSignChanges = 0;
+ }
+
+ void init(int s) {
+ SkASSERT(fSignChanges == 0);
+ SkASSERT(s == 1 || s == -1 || s == 0);
+ fSign = s;
+ fSignChanges = 1;
+ }
+
+ void update(int s) {
+ if (s) {
+ if (fSign != s) {
+ fSignChanges += 1;
+ fSign = s;
+ }
+ }
+ }
+};
+
+struct ConvexTracker {
+ SkVector fFirst, fPrev;
+ SignTracker fDSign, fCSign;
+ int fVecCounter;
+ bool fIsConcave;
+
+ ConvexTracker() { this->reset(); }
+
+ void reset() {
+ fPrev = {0, 0};
+ fDSign.reset();
+ fCSign.reset();
+ fVecCounter = 0;
+ fIsConcave = false;
+ }
+
+ void addVec(SkPoint p1, SkPoint p0) {
+ this->addVec(p1 - p0);
+ }
+ void addVec(SkVector v) {
+ if (v.fX == 0 && v.fY == 0) {
+ return;
+ }
+
+ fVecCounter += 1;
+ if (fVecCounter == 1) {
+ fFirst = fPrev = v;
+ fDSign.update(sign(v.fX));
+ return;
+ }
+
+ SkScalar d = v.fX;
+ SkScalar c = crs(fPrev, v);
+ int sign_c;
+ if (c) {
+ sign_c = sign(c);
+ } else {
+ if (d >= 0) {
+ sign_c = fCSign.fSign;
+ } else {
+ sign_c = -fCSign.fSign;
+ }
+ }
+
+ fDSign.update(sign(d));
+ fCSign.update(sign_c);
+ fPrev = v;
+
+ if (fDSign.fSignChanges > 3 || fCSign.fSignChanges > 1) {
+ fIsConcave = true;
+ }
+ }
+
+ void finalCross() {
+ this->addVec(fFirst);
+ }
+};
+
+bool SkIsPolyConvex_experimental(const SkPoint pts[], int count) {
+ if (count <= 3) {
+ return true;
+ }
+
+ ConvexTracker tracker;
+
+ for (int i = 0; i < count - 1; ++i) {
+ tracker.addVec(pts[i + 1], pts[i]);
+ if (tracker.fIsConcave) {
+ return false;
+ }
+ }
+ tracker.addVec(pts[0], pts[count - 1]);
+ tracker.finalCross();
+ return !tracker.fIsConcave;
+}
+
diff --git a/gfx/skia/skia/src/utils/SkPolyUtils.h b/gfx/skia/skia/src/utils/SkPolyUtils.h
new file mode 100644
index 0000000000..6c3403e733
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOffsetPolygon_DEFINED
+#define SkOffsetPolygon_DEFINED
+
+#include <functional>
+
+#include "include/core/SkPoint.h"
+#include "include/private/SkTDArray.h"
+
+struct SkRect;
+
+/**
+ * Generates a polygon that is inset a constant from the boundary of a given convex polygon.
+ *
+ * @param inputPolygonVerts Array of points representing the vertices of the original polygon.
+ * It should be convex and have no coincident points.
+ * @param inputPolygonSize Number of vertices in the original polygon.
+ * @param inset How far we wish to inset the polygon. This should be a positive value.
+ * @param insetPolygon The resulting inset polygon, if any.
+ * @return true if an inset polygon exists, false otherwise.
+ */
+bool SkInsetConvexPolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ SkScalar inset, SkTDArray<SkPoint>* insetPolygon);
+
+/**
+ * Generates a simple polygon (if possible) that is offset a constant distance from the boundary
+ * of a given simple polygon.
+ * The input polygon must be simple and have no coincident vertices or collinear edges.
+ *
+ * @param inputPolygonVerts Array of points representing the vertices of the original polygon.
+ * @param inputPolygonSize Number of vertices in the original polygon.
+ * @param bounds Bounding rectangle for the original polygon.
+ * @param offset How far we wish to offset the polygon.
+ * Positive values indicate insetting, negative values outsetting.
+ * @param offsetPolgon The resulting offset polygon, if any.
+ * @param polygonIndices The indices of the original polygon that map to the new one.
+ * @return true if an offset simple polygon exists, false otherwise.
+ */
+bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ const SkRect& bounds, SkScalar offset, SkTDArray<SkPoint>* offsetPolygon,
+ SkTDArray<int>* polygonIndices = nullptr);
+
+/**
+ * Compute the number of points needed for a circular join when offsetting a vertex.
+ * The lengths of offset0 and offset1 don't have to equal |offset| -- only the direction matters.
+ * The segment lengths will be approximately four pixels.
+ *
+ * @param offset0 Starting offset vector direction.
+ * @param offset1 Ending offset vector direction.
+ * @param offset Offset value (can be negative).
+ * @param rotSin Sine of rotation delta per step.
+ * @param rotCos Cosine of rotation delta per step.
+ * @param n Number of steps to fill out the arc.
+ * @return true for success, false otherwise
+ */
+bool SkComputeRadialSteps(const SkVector& offset0, const SkVector& offset1, SkScalar offset,
+ SkScalar* rotSin, SkScalar* rotCos, int* n);
+
+/**
+ * Determine winding direction for a polygon.
+ * The input polygon must be simple or the result will be meaningless.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return 1 for cw, -1 for ccw, and 0 if zero signed area (either degenerate or self-intersecting).
+ * The y-axis is assumed to be pointing down.
+ */
+int SkGetPolygonWinding(const SkPoint* polygonVerts, int polygonSize);
+
+/**
+ * Determine whether a polygon is convex or not.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return true if the polygon is convex, false otherwise.
+ */
+bool SkIsConvexPolygon(const SkPoint* polygonVerts, int polygonSize);
+
+/**
+ * Determine whether a polygon is simple (i.e., not self-intersecting) or not.
+ * The input polygon must have no coincident vertices or the test will fail.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return true if the polygon is simple, false otherwise.
+ */
+ bool SkIsSimplePolygon(const SkPoint* polygonVerts, int polygonSize);
+
+ /**
+ * Compute indices to triangulate the given polygon.
+ * The input polygon must be simple (i.e. it is not self-intersecting)
+ * and have no coincident vertices or collinear edges.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param indexMap Mapping from index in the given array to the final index in the triangulation.
+ * @param polygonSize Number of vertices in the polygon.
+ * @param triangleIndices Indices of the resulting triangulation.
+ * @return true if successful, false otherwise.
+ */
+ bool SkTriangulateSimplePolygon(const SkPoint* polygonVerts, uint16_t* indexMap, int polygonSize,
+ SkTDArray<uint16_t>* triangleIndices);
+
+// Experiment: doesn't handle really big floats (returns false), always returns true for count <= 3
+bool SkIsPolyConvex_experimental(const SkPoint[], int count);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowTessellator.cpp b/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
new file mode 100644
index 0000000000..3e237b04fc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
@@ -0,0 +1,1169 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+#include "src/utils/SkPolyUtils.h"
+#include "src/utils/SkShadowTessellator.h"
+
+#if SK_SUPPORT_GPU
+#include "src/gpu/geometry/GrPathUtils.h"
+#endif
+
+
+/**
+ * Base class
+ */
+class SkBaseShadowTessellator {
+public:
+ SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds, bool transparent);
+ virtual ~SkBaseShadowTessellator() {}
+
+ sk_sp<SkVertices> releaseVertices() {
+ if (!fSucceeded) {
+ return nullptr;
+ }
+ return SkVertices::MakeCopy(SkVertices::kTriangles_VertexMode, this->vertexCount(),
+ fPositions.begin(), nullptr, fColors.begin(),
+ this->indexCount(), fIndices.begin());
+ }
+
+protected:
+ static constexpr auto kMinHeight = 0.1f;
+ static constexpr auto kPenumbraColor = SK_ColorTRANSPARENT;
+ static constexpr auto kUmbraColor = SK_ColorBLACK;
+
+ int vertexCount() const { return fPositions.count(); }
+ int indexCount() const { return fIndices.count(); }
+
+ // initialization methods
+ bool accumulateCentroid(const SkPoint& c, const SkPoint& n);
+ bool checkConvexity(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2);
+ void finishPathPolygon();
+
+ // convex shadow methods
+ bool computeConvexShadow(SkScalar inset, SkScalar outset, bool doClip);
+ void computeClipVectorsAndTestCentroid();
+ bool clipUmbraPoint(const SkPoint& umbraPoint, const SkPoint& centroid, SkPoint* clipPoint);
+ void addEdge(const SkVector& nextPoint, const SkVector& nextNormal, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon, bool lastEdge, bool doClip);
+ bool addInnerPoint(const SkPoint& pathPoint, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon, int* currUmbraIndex);
+ int getClosestUmbraIndex(const SkPoint& point, const SkTDArray<SkPoint>& umbraPolygon);
+
+ // concave shadow methods
+ bool computeConcaveShadow(SkScalar inset, SkScalar outset);
+ void stitchConcaveRings(const SkTDArray<SkPoint>& umbraPolygon,
+ SkTDArray<int>* umbraIndices,
+ const SkTDArray<SkPoint>& penumbraPolygon,
+ SkTDArray<int>* penumbraIndices);
+
+ void handleLine(const SkPoint& p);
+ void handleLine(const SkMatrix& m, SkPoint* p);
+
+ void handleQuad(const SkPoint pts[3]);
+ void handleQuad(const SkMatrix& m, SkPoint pts[3]);
+
+ void handleCubic(const SkMatrix& m, SkPoint pts[4]);
+
+ void handleConic(const SkMatrix& m, SkPoint pts[3], SkScalar w);
+
+ bool addArc(const SkVector& nextNormal, SkScalar offset, bool finishArc);
+
+ void appendTriangle(uint16_t index0, uint16_t index1, uint16_t index2);
+ void appendQuad(uint16_t index0, uint16_t index1, uint16_t index2, uint16_t index3);
+
+ SkScalar heightFunc(SkScalar x, SkScalar y) {
+ return fZPlaneParams.fX*x + fZPlaneParams.fY*y + fZPlaneParams.fZ;
+ }
+
+ SkPoint3 fZPlaneParams;
+
+ // temporary buffer
+ SkTDArray<SkPoint> fPointBuffer;
+
+ SkTDArray<SkPoint> fPositions;
+ SkTDArray<SkColor> fColors;
+ SkTDArray<uint16_t> fIndices;
+
+ SkTDArray<SkPoint> fPathPolygon;
+ SkTDArray<SkPoint> fClipPolygon;
+ SkTDArray<SkVector> fClipVectors;
+
+ SkRect fPathBounds;
+ SkPoint fCentroid;
+ SkScalar fArea;
+ SkScalar fLastArea;
+ SkScalar fLastCross;
+
+ int fFirstVertexIndex;
+ SkVector fFirstOutset;
+ SkPoint fFirstPoint;
+
+ bool fSucceeded;
+ bool fTransparent;
+ bool fIsConvex;
+ bool fValidUmbra;
+
+ SkScalar fDirection;
+ int fPrevUmbraIndex;
+ int fCurrUmbraIndex;
+ int fCurrClipIndex;
+ bool fPrevUmbraOutside;
+ bool fFirstUmbraOutside;
+ SkVector fPrevOutset;
+ SkPoint fPrevPoint;
+};
+
+// make external linkage happy
+constexpr SkColor SkBaseShadowTessellator::kUmbraColor;
+constexpr SkColor SkBaseShadowTessellator::kPenumbraColor;
+
+static bool compute_normal(const SkPoint& p0, const SkPoint& p1, SkScalar dir,
+ SkVector* newNormal) {
+ SkVector normal;
+ // compute perpendicular
+ normal.fX = p0.fY - p1.fY;
+ normal.fY = p1.fX - p0.fX;
+ normal *= dir;
+ if (!normal.normalize()) {
+ return false;
+ }
+ *newNormal = normal;
+ return true;
+}
+
+static bool duplicate_pt(const SkPoint& p0, const SkPoint& p1) {
+ static constexpr SkScalar kClose = (SK_Scalar1 / 16);
+ static constexpr SkScalar kCloseSqd = kClose * kClose;
+
+ SkScalar distSq = SkPointPriv::DistanceToSqd(p0, p1);
+ return distSq < kCloseSqd;
+}
+
+static SkScalar perp_dot(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ SkVector v0 = p1 - p0;
+ SkVector v1 = p2 - p1;
+ return v0.cross(v1);
+}
+
+SkBaseShadowTessellator::SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds,
+ bool transparent)
+ : fZPlaneParams(zPlaneParams)
+ , fPathBounds(bounds)
+ , fCentroid({0, 0})
+ , fArea(0)
+ , fLastArea(0)
+ , fLastCross(0)
+ , fFirstVertexIndex(-1)
+ , fSucceeded(false)
+ , fTransparent(transparent)
+ , fIsConvex(true)
+ , fValidUmbra(true)
+ , fDirection(1)
+ , fPrevUmbraIndex(-1)
+ , fCurrUmbraIndex(0)
+ , fCurrClipIndex(0)
+ , fPrevUmbraOutside(false)
+ , fFirstUmbraOutside(false) {
+ // child classes will set reserve for positions, colors and indices
+}
+
+bool SkBaseShadowTessellator::accumulateCentroid(const SkPoint& curr, const SkPoint& next) {
+ if (duplicate_pt(curr, next)) {
+ return false;
+ }
+
+ SkASSERT(fPathPolygon.count() > 0);
+ SkVector v0 = curr - fPathPolygon[0];
+ SkVector v1 = next - fPathPolygon[0];
+ SkScalar quadArea = v0.cross(v1);
+ fCentroid.fX += (v0.fX + v1.fX) * quadArea;
+ fCentroid.fY += (v0.fY + v1.fY) * quadArea;
+ fArea += quadArea;
+ // convexity check
+ if (quadArea*fLastArea < 0) {
+ fIsConvex = false;
+ }
+ if (0 != quadArea) {
+ fLastArea = quadArea;
+ }
+
+ return true;
+}
+
+bool SkBaseShadowTessellator::checkConvexity(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2) {
+ SkScalar cross = perp_dot(p0, p1, p2);
+ // skip collinear point
+ if (SkScalarNearlyZero(cross)) {
+ return false;
+ }
+
+ // check for convexity
+ if (fLastCross*cross < 0) {
+ fIsConvex = false;
+ }
+ if (0 != cross) {
+ fLastCross = cross;
+ }
+
+ return true;
+}
+
+void SkBaseShadowTessellator::finishPathPolygon() {
+ if (fPathPolygon.count() > 1) {
+ if (!this->accumulateCentroid(fPathPolygon[fPathPolygon.count() - 1], fPathPolygon[0])) {
+ // remove coincident point
+ fPathPolygon.pop();
+ }
+ }
+
+ if (fPathPolygon.count() > 2) {
+ // do this before the final convexity check, so we use the correct fPathPolygon[0]
+ fCentroid *= sk_ieee_float_divide(1, 3 * fArea);
+ fCentroid += fPathPolygon[0];
+ if (!checkConvexity(fPathPolygon[fPathPolygon.count() - 2],
+ fPathPolygon[fPathPolygon.count() - 1],
+ fPathPolygon[0])) {
+ // remove collinear point
+ fPathPolygon[0] = fPathPolygon[fPathPolygon.count() - 1];
+ fPathPolygon.pop();
+ }
+ }
+
+ // if area is positive, winding is ccw
+ fDirection = fArea > 0 ? -1 : 1;
+}
+
+bool SkBaseShadowTessellator::computeConvexShadow(SkScalar inset, SkScalar outset, bool doClip) {
+ if (doClip) {
+ this->computeClipVectorsAndTestCentroid();
+ }
+
+ // adjust inset distance and umbra color if necessary
+ auto umbraColor = kUmbraColor;
+ SkScalar minDistSq = SkPointPriv::DistanceToLineSegmentBetweenSqd(fCentroid,
+ fPathPolygon[0],
+ fPathPolygon[1]);
+ SkRect bounds;
+ bounds.setBounds(&fPathPolygon[0], fPathPolygon.count());
+ for (int i = 1; i < fPathPolygon.count(); ++i) {
+ int j = i + 1;
+ if (i == fPathPolygon.count() - 1) {
+ j = 0;
+ }
+ SkPoint currPoint = fPathPolygon[i];
+ SkPoint nextPoint = fPathPolygon[j];
+ SkScalar distSq = SkPointPriv::DistanceToLineSegmentBetweenSqd(fCentroid, currPoint,
+ nextPoint);
+ if (distSq < minDistSq) {
+ minDistSq = distSq;
+ }
+ }
+
+ SkTDArray<SkPoint> insetPolygon;
+ if (inset > SK_ScalarNearlyZero) {
+ static constexpr auto kTolerance = 1.0e-2f;
+ if (minDistSq < (inset + kTolerance)*(inset + kTolerance)) {
+ // if the umbra would collapse, we back off a bit on inner blur and adjust the alpha
+ auto newInset = SkScalarSqrt(minDistSq) - kTolerance;
+ auto ratio = 128 * (newInset / inset + 1);
+ SkASSERT(SkScalarIsFinite(ratio));
+ // they aren't PMColors, but the interpolation algorithm is the same
+ umbraColor = SkPMLerp(kUmbraColor, kPenumbraColor, (unsigned)ratio);
+ inset = newInset;
+ }
+
+ // generate inner ring
+ if (!SkInsetConvexPolygon(&fPathPolygon[0], fPathPolygon.count(), inset,
+ &insetPolygon)) {
+ // not ideal, but in this case we'll inset using the centroid
+ fValidUmbra = false;
+ }
+ }
+ const SkTDArray<SkPoint>& umbraPolygon = (inset > SK_ScalarNearlyZero) ? insetPolygon
+ : fPathPolygon;
+
+ // walk around the path polygon, generate outer ring and connect to inner ring
+ if (fTransparent) {
+ fPositions.push_back(fCentroid);
+ fColors.push_back(umbraColor);
+ }
+ fCurrUmbraIndex = 0;
+
+ // initial setup
+ // add first quad
+ int polyCount = fPathPolygon.count();
+ if (!compute_normal(fPathPolygon[polyCount - 1], fPathPolygon[0], fDirection, &fFirstOutset)) {
+ // polygon should be sanitized by this point, so this is unrecoverable
+ return false;
+ }
+
+ fFirstOutset *= outset;
+ fFirstPoint = fPathPolygon[polyCount - 1];
+ fFirstVertexIndex = fPositions.count();
+ fPrevOutset = fFirstOutset;
+ fPrevPoint = fFirstPoint;
+ fPrevUmbraIndex = -1;
+
+ this->addInnerPoint(fFirstPoint, umbraColor, umbraPolygon, &fPrevUmbraIndex);
+
+ if (!fTransparent && doClip) {
+ SkPoint clipPoint;
+ bool isOutside = this->clipUmbraPoint(fPositions[fFirstVertexIndex],
+ fCentroid, &clipPoint);
+ if (isOutside) {
+ fPositions.push_back(clipPoint);
+ fColors.push_back(umbraColor);
+ }
+ fPrevUmbraOutside = isOutside;
+ fFirstUmbraOutside = isOutside;
+ }
+
+ SkPoint newPoint = fFirstPoint + fFirstOutset;
+ fPositions.push_back(newPoint);
+ fColors.push_back(kPenumbraColor);
+ this->addEdge(fPathPolygon[0], fFirstOutset, umbraColor, umbraPolygon, false, doClip);
+
+ for (int i = 1; i < polyCount; ++i) {
+ SkVector normal;
+ if (!compute_normal(fPrevPoint, fPathPolygon[i], fDirection, &normal)) {
+ return false;
+ }
+ normal *= outset;
+ this->addArc(normal, outset, true);
+ this->addEdge(fPathPolygon[i], normal, umbraColor, umbraPolygon,
+ i == polyCount - 1, doClip);
+ }
+ SkASSERT(this->indexCount());
+
+ // final fan
+ SkASSERT(fPositions.count() >= 3);
+ if (this->addArc(fFirstOutset, outset, false)) {
+ if (fFirstUmbraOutside) {
+ this->appendTriangle(fFirstVertexIndex, fPositions.count() - 1,
+ fFirstVertexIndex + 2);
+ } else {
+ this->appendTriangle(fFirstVertexIndex, fPositions.count() - 1,
+ fFirstVertexIndex + 1);
+ }
+ } else {
+ // no arc added, fix up by setting first penumbra point position to last one
+ if (fFirstUmbraOutside) {
+ fPositions[fFirstVertexIndex + 2] = fPositions[fPositions.count() - 1];
+ } else {
+ fPositions[fFirstVertexIndex + 1] = fPositions[fPositions.count() - 1];
+ }
+ }
+
+ return true;
+}
+
+void SkBaseShadowTessellator::computeClipVectorsAndTestCentroid() {
+ SkASSERT(fClipPolygon.count() >= 3);
+ fCurrClipIndex = fClipPolygon.count() - 1;
+
+ // init clip vectors
+ SkVector v0 = fClipPolygon[1] - fClipPolygon[0];
+ SkVector v1 = fClipPolygon[2] - fClipPolygon[0];
+ fClipVectors.push_back(v0);
+
+ // init centroid check
+ bool hiddenCentroid = true;
+ v1 = fCentroid - fClipPolygon[0];
+ SkScalar initCross = v0.cross(v1);
+
+ for (int p = 1; p < fClipPolygon.count(); ++p) {
+ // add to clip vectors
+ v0 = fClipPolygon[(p + 1) % fClipPolygon.count()] - fClipPolygon[p];
+ fClipVectors.push_back(v0);
+ // Determine if transformed centroid is inside clipPolygon.
+ v1 = fCentroid - fClipPolygon[p];
+ if (initCross*v0.cross(v1) <= 0) {
+ hiddenCentroid = false;
+ }
+ }
+ SkASSERT(fClipVectors.count() == fClipPolygon.count());
+
+ fTransparent = fTransparent || !hiddenCentroid;
+}
+
+void SkBaseShadowTessellator::addEdge(const SkPoint& nextPoint, const SkVector& nextNormal,
+ SkColor umbraColor, const SkTDArray<SkPoint>& umbraPolygon,
+ bool lastEdge, bool doClip) {
+ // add next umbra point
+ int currUmbraIndex;
+ bool duplicate;
+ if (lastEdge) {
+ duplicate = false;
+ currUmbraIndex = fFirstVertexIndex;
+ fPrevPoint = nextPoint;
+ } else {
+ duplicate = this->addInnerPoint(nextPoint, umbraColor, umbraPolygon, &currUmbraIndex);
+ }
+ int prevPenumbraIndex = duplicate || (currUmbraIndex == fFirstVertexIndex)
+ ? fPositions.count() - 1
+ : fPositions.count() - 2;
+ if (!duplicate) {
+ // add to center fan if transparent or centroid showing
+ if (fTransparent) {
+ this->appendTriangle(0, fPrevUmbraIndex, currUmbraIndex);
+ // otherwise add to clip ring
+ } else if (doClip) {
+ SkPoint clipPoint;
+ bool isOutside = lastEdge ? fFirstUmbraOutside
+ : this->clipUmbraPoint(fPositions[currUmbraIndex], fCentroid,
+ &clipPoint);
+ if (isOutside) {
+ if (!lastEdge) {
+ fPositions.push_back(clipPoint);
+ fColors.push_back(umbraColor);
+ }
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex, currUmbraIndex + 1);
+ if (fPrevUmbraOutside) {
+ // fill out quad
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex + 1,
+ fPrevUmbraIndex + 1);
+ }
+ } else if (fPrevUmbraOutside) {
+ // add tri
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex, fPrevUmbraIndex + 1);
+ }
+
+ fPrevUmbraOutside = isOutside;
+ }
+ }
+
+ // add next penumbra point and quad
+ SkPoint newPoint = nextPoint + nextNormal;
+ fPositions.push_back(newPoint);
+ fColors.push_back(kPenumbraColor);
+
+ if (!duplicate) {
+ this->appendTriangle(fPrevUmbraIndex, prevPenumbraIndex, currUmbraIndex);
+ }
+ this->appendTriangle(prevPenumbraIndex, fPositions.count() - 1, currUmbraIndex);
+
+ fPrevUmbraIndex = currUmbraIndex;
+ fPrevOutset = nextNormal;
+}
+
+bool SkBaseShadowTessellator::clipUmbraPoint(const SkPoint& umbraPoint, const SkPoint& centroid,
+ SkPoint* clipPoint) {
+ SkVector segmentVector = centroid - umbraPoint;
+
+ int startClipPoint = fCurrClipIndex;
+ do {
+ SkVector dp = umbraPoint - fClipPolygon[fCurrClipIndex];
+ SkScalar denom = fClipVectors[fCurrClipIndex].cross(segmentVector);
+ SkScalar t_num = dp.cross(segmentVector);
+ // if line segments are nearly parallel
+ if (SkScalarNearlyZero(denom)) {
+ // and collinear
+ if (SkScalarNearlyZero(t_num)) {
+ return false;
+ }
+ // otherwise are separate, will try the next poly segment
+ // else if crossing lies within poly segment
+ } else if (t_num >= 0 && t_num <= denom) {
+ SkScalar s_num = dp.cross(fClipVectors[fCurrClipIndex]);
+ // if umbra point is inside the clip polygon
+ if (s_num >= 0 && s_num <= denom) {
+ segmentVector *= s_num / denom;
+ *clipPoint = umbraPoint + segmentVector;
+ return true;
+ }
+ }
+ fCurrClipIndex = (fCurrClipIndex + 1) % fClipPolygon.count();
+ } while (fCurrClipIndex != startClipPoint);
+
+ return false;
+}
+
+bool SkBaseShadowTessellator::addInnerPoint(const SkPoint& pathPoint, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon,
+ int* currUmbraIndex) {
+ SkPoint umbraPoint;
+ if (!fValidUmbra) {
+ SkVector v = fCentroid - pathPoint;
+ v *= 0.95f;
+ umbraPoint = pathPoint + v;
+ } else {
+ umbraPoint = umbraPolygon[this->getClosestUmbraIndex(pathPoint, umbraPolygon)];
+ }
+
+ fPrevPoint = pathPoint;
+
+ // merge "close" points
+ if (fPrevUmbraIndex == -1 ||
+ !duplicate_pt(umbraPoint, fPositions[fPrevUmbraIndex])) {
+ // if we've wrapped around, don't add a new point
+ if (fPrevUmbraIndex >= 0 && duplicate_pt(umbraPoint, fPositions[fFirstVertexIndex])) {
+ *currUmbraIndex = fFirstVertexIndex;
+ } else {
+ *currUmbraIndex = fPositions.count();
+ fPositions.push_back(umbraPoint);
+ fColors.push_back(umbraColor);
+ }
+ return false;
+ } else {
+ *currUmbraIndex = fPrevUmbraIndex;
+ return true;
+ }
+}
+
+int SkBaseShadowTessellator::getClosestUmbraIndex(const SkPoint& p,
+ const SkTDArray<SkPoint>& umbraPolygon) {
+ SkScalar minDistance = SkPointPriv::DistanceToSqd(p, umbraPolygon[fCurrUmbraIndex]);
+ int index = fCurrUmbraIndex;
+ int dir = 1;
+ int next = (index + dir) % umbraPolygon.count();
+
+ // init travel direction
+ SkScalar distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ if (distance < minDistance) {
+ index = next;
+ minDistance = distance;
+ } else {
+ dir = umbraPolygon.count() - 1;
+ }
+
+ // iterate until we find a point that increases the distance
+ next = (index + dir) % umbraPolygon.count();
+ distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ while (distance < minDistance) {
+ index = next;
+ minDistance = distance;
+ next = (index + dir) % umbraPolygon.count();
+ distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ }
+
+ fCurrUmbraIndex = index;
+ return index;
+}
+
+bool SkBaseShadowTessellator::computeConcaveShadow(SkScalar inset, SkScalar outset) {
+ if (!SkIsSimplePolygon(&fPathPolygon[0], fPathPolygon.count())) {
+ return false;
+ }
+
+ // generate inner ring
+ SkTDArray<SkPoint> umbraPolygon;
+ SkTDArray<int> umbraIndices;
+ umbraIndices.setReserve(fPathPolygon.count());
+ if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), fPathBounds, inset,
+ &umbraPolygon, &umbraIndices)) {
+ // TODO: figure out how to handle this case
+ return false;
+ }
+
+ // generate outer ring
+ SkTDArray<SkPoint> penumbraPolygon;
+ SkTDArray<int> penumbraIndices;
+ penumbraPolygon.setReserve(umbraPolygon.count());
+ penumbraIndices.setReserve(umbraPolygon.count());
+ if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), fPathBounds, -outset,
+ &penumbraPolygon, &penumbraIndices)) {
+ // TODO: figure out how to handle this case
+ return false;
+ }
+
+ if (!umbraPolygon.count() || !penumbraPolygon.count()) {
+ return false;
+ }
+
+ // attach the rings together
+ this->stitchConcaveRings(umbraPolygon, &umbraIndices, penumbraPolygon, &penumbraIndices);
+
+ return true;
+}
+
+void SkBaseShadowTessellator::stitchConcaveRings(const SkTDArray<SkPoint>& umbraPolygon,
+ SkTDArray<int>* umbraIndices,
+ const SkTDArray<SkPoint>& penumbraPolygon,
+ SkTDArray<int>* penumbraIndices) {
+ // TODO: only create and fill indexMap when fTransparent is true?
+ SkAutoSTMalloc<64, uint16_t> indexMap(umbraPolygon.count());
+
+ // find minimum indices
+ int minIndex = 0;
+ int min = (*penumbraIndices)[0];
+ for (int i = 1; i < (*penumbraIndices).count(); ++i) {
+ if ((*penumbraIndices)[i] < min) {
+ min = (*penumbraIndices)[i];
+ minIndex = i;
+ }
+ }
+ int currPenumbra = minIndex;
+
+ minIndex = 0;
+ min = (*umbraIndices)[0];
+ for (int i = 1; i < (*umbraIndices).count(); ++i) {
+ if ((*umbraIndices)[i] < min) {
+ min = (*umbraIndices)[i];
+ minIndex = i;
+ }
+ }
+ int currUmbra = minIndex;
+
+ // now find a case where the indices are equal (there should be at least one)
+ int maxPenumbraIndex = fPathPolygon.count() - 1;
+ int maxUmbraIndex = fPathPolygon.count() - 1;
+ while ((*penumbraIndices)[currPenumbra] != (*umbraIndices)[currUmbra]) {
+ if ((*penumbraIndices)[currPenumbra] < (*umbraIndices)[currUmbra]) {
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.count();
+ maxPenumbraIndex = (*penumbraIndices)[currPenumbra];
+ currPenumbra = (currPenumbra + 1) % penumbraPolygon.count();
+ } else {
+ (*umbraIndices)[currUmbra] += fPathPolygon.count();
+ maxUmbraIndex = (*umbraIndices)[currUmbra];
+ currUmbra = (currUmbra + 1) % umbraPolygon.count();
+ }
+ }
+
+ fPositions.push_back(penumbraPolygon[currPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int prevPenumbraIndex = 0;
+ fPositions.push_back(umbraPolygon[currUmbra]);
+ fColors.push_back(kUmbraColor);
+ fPrevUmbraIndex = 1;
+ indexMap[currUmbra] = 1;
+
+ int nextPenumbra = (currPenumbra + 1) % penumbraPolygon.count();
+ int nextUmbra = (currUmbra + 1) % umbraPolygon.count();
+ while ((*penumbraIndices)[nextPenumbra] <= maxPenumbraIndex ||
+ (*umbraIndices)[nextUmbra] <= maxUmbraIndex) {
+
+ if ((*umbraIndices)[nextUmbra] == (*penumbraIndices)[nextPenumbra]) {
+ // advance both one step
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.count() - 1;
+
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.count() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendQuad(prevPenumbraIndex, currPenumbraIndex,
+ fPrevUmbraIndex, currUmbraIndex);
+
+ prevPenumbraIndex = currPenumbraIndex;
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.count();
+ currPenumbra = nextPenumbra;
+ nextPenumbra = (currPenumbra + 1) % penumbraPolygon.count();
+
+ fPrevUmbraIndex = currUmbraIndex;
+ (*umbraIndices)[currUmbra] += fPathPolygon.count();
+ currUmbra = nextUmbra;
+ nextUmbra = (currUmbra + 1) % umbraPolygon.count();
+ }
+
+ while ((*penumbraIndices)[nextPenumbra] < (*umbraIndices)[nextUmbra] &&
+ (*penumbraIndices)[nextPenumbra] <= maxPenumbraIndex) {
+ // fill out penumbra arc
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.count() - 1;
+
+ this->appendTriangle(prevPenumbraIndex, currPenumbraIndex, fPrevUmbraIndex);
+
+ prevPenumbraIndex = currPenumbraIndex;
+ // this ensures the ordering when we wrap around
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.count();
+ currPenumbra = nextPenumbra;
+ nextPenumbra = (currPenumbra + 1) % penumbraPolygon.count();
+ }
+
+ while ((*umbraIndices)[nextUmbra] < (*penumbraIndices)[nextPenumbra] &&
+ (*umbraIndices)[nextUmbra] <= maxUmbraIndex) {
+ // fill out umbra arc
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.count() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendTriangle(fPrevUmbraIndex, prevPenumbraIndex, currUmbraIndex);
+
+ fPrevUmbraIndex = currUmbraIndex;
+ // this ensures the ordering when we wrap around
+ (*umbraIndices)[currUmbra] += fPathPolygon.count();
+ currUmbra = nextUmbra;
+ nextUmbra = (currUmbra + 1) % umbraPolygon.count();
+ }
+ }
+ // finish up by advancing both one step
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.count() - 1;
+
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.count() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendQuad(prevPenumbraIndex, currPenumbraIndex,
+ fPrevUmbraIndex, currUmbraIndex);
+
+ if (fTransparent) {
+ SkTriangulateSimplePolygon(umbraPolygon.begin(), indexMap, umbraPolygon.count(),
+ &fIndices);
+ }
+}
+
+
+// tesselation tolerance values, in device space pixels
+#if SK_SUPPORT_GPU
+static const SkScalar kQuadTolerance = 0.2f;
+static const SkScalar kCubicTolerance = 0.2f;
+#endif
+static const SkScalar kConicTolerance = 0.25f;
+
+// clamps the point to the nearest 16th of a pixel
+static void sanitize_point(const SkPoint& in, SkPoint* out) {
+ out->fX = SkScalarRoundToScalar(16.f*in.fX)*0.0625f;
+ out->fY = SkScalarRoundToScalar(16.f*in.fY)*0.0625f;
+}
+
+void SkBaseShadowTessellator::handleLine(const SkPoint& p) {
+ SkPoint pSanitized;
+ sanitize_point(p, &pSanitized);
+
+ if (fPathPolygon.count() > 0) {
+ if (!this->accumulateCentroid(fPathPolygon[fPathPolygon.count() - 1], pSanitized)) {
+ // skip coincident point
+ return;
+ }
+ }
+
+ if (fPathPolygon.count() > 1) {
+ if (!checkConvexity(fPathPolygon[fPathPolygon.count() - 2],
+ fPathPolygon[fPathPolygon.count() - 1],
+ pSanitized)) {
+ // remove collinear point
+ fPathPolygon.pop();
+ // it's possible that the previous point is coincident with the new one now
+ if (duplicate_pt(fPathPolygon[fPathPolygon.count() - 1], pSanitized)) {
+ fPathPolygon.pop();
+ }
+ }
+ }
+
+ fPathPolygon.push_back(pSanitized);
+}
+
+void SkBaseShadowTessellator::handleLine(const SkMatrix& m, SkPoint* p) {
+ m.mapPoints(p, 1);
+
+ this->handleLine(*p);
+}
+
+void SkBaseShadowTessellator::handleQuad(const SkPoint pts[3]) {
+#if SK_SUPPORT_GPU
+ // check for degeneracy
+ SkVector v0 = pts[1] - pts[0];
+ SkVector v1 = pts[2] - pts[0];
+ if (SkScalarNearlyZero(v0.cross(v1))) {
+ return;
+ }
+ // TODO: Pull PathUtils out of Ganesh?
+ int maxCount = GrPathUtils::quadraticPointCount(pts, kQuadTolerance);
+ fPointBuffer.setCount(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ kQuadTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count; i++) {
+ this->handleLine(fPointBuffer[i]);
+ }
+#else
+ // for now, just to draw something
+ this->handleLine(pts[1]);
+ this->handleLine(pts[2]);
+#endif
+}
+
+void SkBaseShadowTessellator::handleQuad(const SkMatrix& m, SkPoint pts[3]) {
+ m.mapPoints(pts, 3);
+ this->handleQuad(pts);
+}
+
+void SkBaseShadowTessellator::handleCubic(const SkMatrix& m, SkPoint pts[4]) {
+ m.mapPoints(pts, 4);
+#if SK_SUPPORT_GPU
+ // TODO: Pull PathUtils out of Ganesh?
+ int maxCount = GrPathUtils::cubicPointCount(pts, kCubicTolerance);
+ fPointBuffer.setCount(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ kCubicTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count; i++) {
+ this->handleLine(fPointBuffer[i]);
+ }
+#else
+ // for now, just to draw something
+ this->handleLine(pts[1]);
+ this->handleLine(pts[2]);
+ this->handleLine(pts[3]);
+#endif
+}
+
+void SkBaseShadowTessellator::handleConic(const SkMatrix& m, SkPoint pts[3], SkScalar w) {
+ if (m.hasPerspective()) {
+ w = SkConic::TransformW(pts, w, m);
+ }
+ m.mapPoints(pts, 3);
+ SkAutoConicToQuads quadder;
+ const SkPoint* quads = quadder.computeQuads(pts, w, kConicTolerance);
+ SkPoint lastPoint = *(quads++);
+ int count = quadder.countQuads();
+ for (int i = 0; i < count; ++i) {
+ SkPoint quadPts[3];
+ quadPts[0] = lastPoint;
+ quadPts[1] = quads[0];
+ quadPts[2] = i == count - 1 ? pts[2] : quads[1];
+ this->handleQuad(quadPts);
+ lastPoint = quadPts[2];
+ quads += 2;
+ }
+}
+
+bool SkBaseShadowTessellator::addArc(const SkVector& nextNormal, SkScalar offset, bool finishArc) {
+ // fill in fan from previous quad
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(fPrevOutset, nextNormal, offset, &rotSin, &rotCos, &numSteps)) {
+ // recover as best we can
+ numSteps = 0;
+ }
+ SkVector prevNormal = fPrevOutset;
+ for (int i = 0; i < numSteps-1; ++i) {
+ SkVector currNormal;
+ currNormal.fX = prevNormal.fX*rotCos - prevNormal.fY*rotSin;
+ currNormal.fY = prevNormal.fY*rotCos + prevNormal.fX*rotSin;
+ fPositions.push_back(fPrevPoint + currNormal);
+ fColors.push_back(kPenumbraColor);
+ this->appendTriangle(fPrevUmbraIndex, fPositions.count() - 1, fPositions.count() - 2);
+
+ prevNormal = currNormal;
+ }
+ if (finishArc && numSteps) {
+ fPositions.push_back(fPrevPoint + nextNormal);
+ fColors.push_back(kPenumbraColor);
+ this->appendTriangle(fPrevUmbraIndex, fPositions.count() - 1, fPositions.count() - 2);
+ }
+ fPrevOutset = nextNormal;
+
+ return (numSteps > 0);
+}
+
+void SkBaseShadowTessellator::appendTriangle(uint16_t index0, uint16_t index1, uint16_t index2) {
+ auto indices = fIndices.append(3);
+
+ indices[0] = index0;
+ indices[1] = index1;
+ indices[2] = index2;
+}
+
+void SkBaseShadowTessellator::appendQuad(uint16_t index0, uint16_t index1,
+ uint16_t index2, uint16_t index3) {
+ auto indices = fIndices.append(6);
+
+ indices[0] = index0;
+ indices[1] = index1;
+ indices[2] = index2;
+
+ indices[3] = index2;
+ indices[4] = index1;
+ indices[5] = index3;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkAmbientShadowTessellator : public SkBaseShadowTessellator {
+public:
+ SkAmbientShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams, bool transparent);
+
+private:
+ bool computePathPolygon(const SkPath& path, const SkMatrix& ctm);
+
+ typedef SkBaseShadowTessellator INHERITED;
+};
+
+SkAmbientShadowTessellator::SkAmbientShadowTessellator(const SkPath& path,
+ const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams,
+ bool transparent)
+ : INHERITED(zPlaneParams, path.getBounds(), transparent) {
+ // Set base colors
+ auto baseZ = heightFunc(fPathBounds.centerX(), fPathBounds.centerY());
+ // umbraColor is the interior value, penumbraColor the exterior value.
+ auto outset = SkDrawShadowMetrics::AmbientBlurRadius(baseZ);
+ auto inset = outset * SkDrawShadowMetrics::AmbientRecipAlpha(baseZ) - outset;
+ inset = SkScalarPin(inset, 0, SkTMin(path.getBounds().width(),
+ path.getBounds().height()));
+
+ if (!this->computePathPolygon(path, ctm)) {
+ return;
+ }
+ if (fPathPolygon.count() < 3 || !SkScalarIsFinite(fArea)) {
+ fSucceeded = true; // We don't want to try to blur these cases, so we will
+ // return an empty SkVertices instead.
+ return;
+ }
+
+ // Outer ring: 3*numPts
+ // Middle ring: numPts
+ fPositions.setReserve(4 * path.countPoints());
+ fColors.setReserve(4 * path.countPoints());
+ // Outer ring: 12*numPts
+ // Middle ring: 0
+ fIndices.setReserve(12 * path.countPoints());
+
+ if (fIsConvex) {
+ fSucceeded = this->computeConvexShadow(inset, outset, false);
+ } else {
+ fSucceeded = this->computeConcaveShadow(inset, outset);
+ }
+}
+
+bool SkAmbientShadowTessellator::computePathPolygon(const SkPath& path, const SkMatrix& ctm) {
+ fPathPolygon.setReserve(path.countPoints());
+
+ // walk around the path, tessellate and generate outer ring
+ // if original path is transparent, will accumulate sum of points for centroid
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ bool verbSeen = false;
+ bool closeSeen = false;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (closeSeen) {
+ return false;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ this->handleLine(ctm, &pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ this->handleQuad(ctm, pts);
+ break;
+ case SkPath::kCubic_Verb:
+ this->handleCubic(ctm, pts);
+ break;
+ case SkPath::kConic_Verb:
+ this->handleConic(ctm, pts, iter.conicWeight());
+ break;
+ case SkPath::kMove_Verb:
+ if (verbSeen) {
+ return false;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ closeSeen = true;
+ break;
+ }
+ verbSeen = true;
+ }
+
+ this->finishPathPolygon();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkSpotShadowTessellator : public SkBaseShadowTessellator {
+public:
+ SkSpotShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams, const SkPoint3& lightPos,
+ SkScalar lightRadius, bool transparent);
+
+private:
+ bool computeClipAndPathPolygons(const SkPath& path, const SkMatrix& ctm,
+ const SkMatrix& shadowTransform);
+ void addToClip(const SkVector& nextPoint);
+
+ typedef SkBaseShadowTessellator INHERITED;
+};
+
+SkSpotShadowTessellator::SkSpotShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ bool transparent)
+ : INHERITED(zPlaneParams, path.getBounds(), transparent) {
+
+ // Compute the blur radius, scale and translation for the spot shadow.
+ SkMatrix shadowTransform;
+ SkScalar outset;
+ if (!SkDrawShadowMetrics::GetSpotShadowTransform(lightPos, lightRadius,
+ ctm, zPlaneParams, path.getBounds(),
+ &shadowTransform, &outset)) {
+ return;
+ }
+ SkScalar inset = outset;
+
+ // compute rough clip bounds for umbra, plus offset polygon, plus centroid
+ if (!this->computeClipAndPathPolygons(path, ctm, shadowTransform)) {
+ return;
+ }
+ if (fClipPolygon.count() < 3 || fPathPolygon.count() < 3 || !SkScalarIsFinite(fArea)) {
+ fSucceeded = true; // We don't want to try to blur these cases, so we will
+ // return an empty SkVertices instead.
+ return;
+ }
+
+ // TODO: calculate these reserves better
+ // Penumbra ring: 3*numPts
+ // Umbra ring: numPts
+ // Inner ring: numPts
+ fPositions.setReserve(5 * path.countPoints());
+ fColors.setReserve(5 * path.countPoints());
+ // Penumbra ring: 12*numPts
+ // Umbra ring: 3*numPts
+ fIndices.setReserve(15 * path.countPoints());
+
+ if (fIsConvex) {
+ fSucceeded = this->computeConvexShadow(inset, outset, true);
+ } else {
+ fSucceeded = this->computeConcaveShadow(inset, outset);
+ }
+
+ if (!fSucceeded) {
+ return;
+ }
+
+ fSucceeded = true;
+}
+
+bool SkSpotShadowTessellator::computeClipAndPathPolygons(const SkPath& path, const SkMatrix& ctm,
+ const SkMatrix& shadowTransform) {
+
+ fPathPolygon.setReserve(path.countPoints());
+ fClipPolygon.setReserve(path.countPoints());
+
+ // Walk around the path and compute clip polygon and path polygon.
+ // Will also accumulate sum of areas for centroid.
+ // For Bezier curves, we compute additional interior points on curve.
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPoint clipPts[4];
+ SkPath::Verb verb;
+
+ // coefficients to compute cubic Bezier at t = 5/16
+ static constexpr SkScalar kA = 0.32495117187f;
+ static constexpr SkScalar kB = 0.44311523437f;
+ static constexpr SkScalar kC = 0.20141601562f;
+ static constexpr SkScalar kD = 0.03051757812f;
+
+ SkPoint curvePoint;
+ SkScalar w;
+ bool closeSeen = false;
+ bool verbSeen = false;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (closeSeen) {
+ return false;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ ctm.mapPoints(clipPts, &pts[1], 1);
+ this->addToClip(clipPts[0]);
+ this->handleLine(shadowTransform, &pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ ctm.mapPoints(clipPts, pts, 3);
+ // point at t = 1/2
+ curvePoint.fX = 0.25f*clipPts[0].fX + 0.5f*clipPts[1].fX + 0.25f*clipPts[2].fX;
+ curvePoint.fY = 0.25f*clipPts[0].fY + 0.5f*clipPts[1].fY + 0.25f*clipPts[2].fY;
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[2]);
+ this->handleQuad(shadowTransform, pts);
+ break;
+ case SkPath::kConic_Verb:
+ ctm.mapPoints(clipPts, pts, 3);
+ w = iter.conicWeight();
+ // point at t = 1/2
+ curvePoint.fX = 0.25f*clipPts[0].fX + w*0.5f*clipPts[1].fX + 0.25f*clipPts[2].fX;
+ curvePoint.fY = 0.25f*clipPts[0].fY + w*0.5f*clipPts[1].fY + 0.25f*clipPts[2].fY;
+ curvePoint *= SkScalarInvert(0.5f + 0.5f*w);
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[2]);
+ this->handleConic(shadowTransform, pts, w);
+ break;
+ case SkPath::kCubic_Verb:
+ ctm.mapPoints(clipPts, pts, 4);
+ // point at t = 5/16
+ curvePoint.fX = kA*clipPts[0].fX + kB*clipPts[1].fX
+ + kC*clipPts[2].fX + kD*clipPts[3].fX;
+ curvePoint.fY = kA*clipPts[0].fY + kB*clipPts[1].fY
+ + kC*clipPts[2].fY + kD*clipPts[3].fY;
+ this->addToClip(curvePoint);
+ // point at t = 11/16
+ curvePoint.fX = kD*clipPts[0].fX + kC*clipPts[1].fX
+ + kB*clipPts[2].fX + kA*clipPts[3].fX;
+ curvePoint.fY = kD*clipPts[0].fY + kC*clipPts[1].fY
+ + kB*clipPts[2].fY + kA*clipPts[3].fY;
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[3]);
+ this->handleCubic(shadowTransform, pts);
+ break;
+ case SkPath::kMove_Verb:
+ if (verbSeen) {
+ return false;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ closeSeen = true;
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ verbSeen = true;
+ }
+
+ this->finishPathPolygon();
+ return true;
+}
+
+void SkSpotShadowTessellator::addToClip(const SkPoint& point) {
+ if (fClipPolygon.isEmpty() || !duplicate_pt(point, fClipPolygon[fClipPolygon.count() - 1])) {
+ fClipPolygon.push_back(point);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkVertices> SkShadowTessellator::MakeAmbient(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, bool transparent) {
+ if (!ctm.mapRect(path.getBounds()).isFinite() || !zPlane.isFinite()) {
+ return nullptr;
+ }
+ SkAmbientShadowTessellator ambientTess(path, ctm, zPlane, transparent);
+ return ambientTess.releaseVertices();
+}
+
+sk_sp<SkVertices> SkShadowTessellator::MakeSpot(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, const SkPoint3& lightPos,
+ SkScalar lightRadius, bool transparent) {
+ if (!ctm.mapRect(path.getBounds()).isFinite() || !zPlane.isFinite() ||
+ !lightPos.isFinite() || !(lightPos.fZ >= SK_ScalarNearlyZero) ||
+ !SkScalarIsFinite(lightRadius) || !(lightRadius >= SK_ScalarNearlyZero)) {
+ return nullptr;
+ }
+ SkSpotShadowTessellator spotTess(path, ctm, zPlane, lightPos, lightRadius, transparent);
+ return spotTess.releaseVertices();
+}
diff --git a/gfx/skia/skia/src/utils/SkShadowTessellator.h b/gfx/skia/skia/src/utils/SkShadowTessellator.h
new file mode 100644
index 0000000000..92faccccf7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowTessellator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowTessellator_DEFINED
+#define SkShadowTessellator_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include <functional> // std::function
+
+class SkMatrix;
+class SkPath;
+struct SkPoint3;
+class SkVertices;
+
+namespace SkShadowTessellator {
+
+typedef std::function<SkScalar(SkScalar, SkScalar)> HeightFunc;
+
+/**
+ * This function generates an ambient shadow mesh for a path by walking the path, outsetting by
+ * the radius, and setting inner and outer colors to umbraColor and penumbraColor, respectively.
+ * If transparent is true, then the center of the ambient shadow will be filled in.
+ */
+sk_sp<SkVertices> MakeAmbient(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, bool transparent);
+
+/**
+ * This function generates a spot shadow mesh for a path by walking the transformed path,
+ * further transforming by the scale and translation, and outsetting and insetting by a radius.
+ * The center will be clipped against the original path unless transparent is true.
+ */
+sk_sp<SkVertices> MakeSpot(const SkPath& path, const SkMatrix& ctm, const SkPoint3& zPlane,
+ const SkPoint3& lightPos, SkScalar lightRadius, bool transparent);
+
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowUtils.cpp b/gfx/skia/skia/src/utils/SkShadowUtils.cpp
new file mode 100644
index 0000000000..1b47af5523
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowUtils.cpp
@@ -0,0 +1,766 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkString.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+#include "include/utils/SkRandom.h"
+#include "include/utils/SkShadowUtils.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkTLazy.h"
+#include "src/utils/SkShadowTessellator.h"
+#include <new>
+#if SK_SUPPORT_GPU
+#include "src/gpu/effects/generated/GrBlurredEdgeFragmentProcessor.h"
+#include "src/gpu/geometry/GrShape.h"
+#endif
+
+/**
+* Gaussian color filter -- produces a Gaussian ramp based on the color's B value,
+* then blends with the color's G value.
+* Final result is black with alpha of Gaussian(B)*G.
+* The assumption is that the original color's alpha is 1.
+*/
+class SkGaussianColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make() {
+ return sk_sp<SkColorFilter>(new SkGaussianColorFilter);
+ }
+
+#if SK_SUPPORT_GPU
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ const GrColorInfo&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override {}
+ bool onAppendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ rec.fPipeline->append(SkRasterPipeline::gauss_a_to_rgba);
+ return true;
+ }
+private:
+ SK_FLATTENABLE_HOOKS(SkGaussianColorFilter)
+
+ SkGaussianColorFilter() : INHERITED() {}
+
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkGaussianColorFilter::CreateProc(SkReadBuffer&) {
+ return Make();
+}
+
+#if SK_SUPPORT_GPU
+
+std::unique_ptr<GrFragmentProcessor> SkGaussianColorFilter::asFragmentProcessor(
+ GrRecordingContext*, const GrColorInfo&) const {
+ return GrBlurredEdgeFragmentProcessor::Make(GrBlurredEdgeFragmentProcessor::Mode::kGaussian);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+uint64_t resource_cache_shared_id() {
+ return 0x2020776f64616873llu; // 'shadow '
+}
+
+/** Factory for an ambient shadow mesh with particular shadow properties. */
+struct AmbientVerticesFactory {
+ SkScalar fOccluderHeight = SK_ScalarNaN; // NaN so that isCompatible will fail until init'ed.
+ bool fTransparent;
+ SkVector fOffset;
+
+ bool isCompatible(const AmbientVerticesFactory& that, SkVector* translate) const {
+ if (fOccluderHeight != that.fOccluderHeight || fTransparent != that.fTransparent) {
+ return false;
+ }
+ *translate = that.fOffset;
+ return true;
+ }
+
+ sk_sp<SkVertices> makeVertices(const SkPath& path, const SkMatrix& ctm,
+ SkVector* translate) const {
+ SkPoint3 zParams = SkPoint3::Make(0, 0, fOccluderHeight);
+ // pick a canonical place to generate shadow
+ SkMatrix noTrans(ctm);
+ if (!ctm.hasPerspective()) {
+ noTrans[SkMatrix::kMTransX] = 0;
+ noTrans[SkMatrix::kMTransY] = 0;
+ }
+ *translate = fOffset;
+ return SkShadowTessellator::MakeAmbient(path, noTrans, zParams, fTransparent);
+ }
+};
+
+/** Factory for an spot shadow mesh with particular shadow properties. */
+struct SpotVerticesFactory {
+ enum class OccluderType {
+ // The umbra cannot be dropped out because either the occluder is not opaque,
+ // or the center of the umbra is visible.
+ kTransparent,
+ // The umbra can be dropped where it is occluded.
+ kOpaquePartialUmbra,
+ // It is known that the entire umbra is occluded.
+ kOpaqueNoUmbra
+ };
+
+ SkVector fOffset;
+ SkPoint fLocalCenter;
+ SkScalar fOccluderHeight = SK_ScalarNaN; // NaN so that isCompatible will fail until init'ed.
+ SkPoint3 fDevLightPos;
+ SkScalar fLightRadius;
+ OccluderType fOccluderType;
+
+ bool isCompatible(const SpotVerticesFactory& that, SkVector* translate) const {
+ if (fOccluderHeight != that.fOccluderHeight || fDevLightPos.fZ != that.fDevLightPos.fZ ||
+ fLightRadius != that.fLightRadius || fOccluderType != that.fOccluderType) {
+ return false;
+ }
+ switch (fOccluderType) {
+ case OccluderType::kTransparent:
+ case OccluderType::kOpaqueNoUmbra:
+ // 'this' and 'that' will either both have no umbra removed or both have all the
+ // umbra removed.
+ *translate = that.fOffset;
+ return true;
+ case OccluderType::kOpaquePartialUmbra:
+ // In this case we partially remove the umbra differently for 'this' and 'that'
+ // if the offsets don't match.
+ if (fOffset == that.fOffset) {
+ translate->set(0, 0);
+ return true;
+ }
+ return false;
+ }
+ SK_ABORT("Uninitialized occluder type?");
+ }
+
+ sk_sp<SkVertices> makeVertices(const SkPath& path, const SkMatrix& ctm,
+ SkVector* translate) const {
+ bool transparent = OccluderType::kTransparent == fOccluderType;
+ SkPoint3 zParams = SkPoint3::Make(0, 0, fOccluderHeight);
+ if (ctm.hasPerspective() || OccluderType::kOpaquePartialUmbra == fOccluderType) {
+ translate->set(0, 0);
+ return SkShadowTessellator::MakeSpot(path, ctm, zParams,
+ fDevLightPos, fLightRadius, transparent);
+ } else {
+ // pick a canonical place to generate shadow, with light centered over path
+ SkMatrix noTrans(ctm);
+ noTrans[SkMatrix::kMTransX] = 0;
+ noTrans[SkMatrix::kMTransY] = 0;
+ SkPoint devCenter(fLocalCenter);
+ noTrans.mapPoints(&devCenter, 1);
+ SkPoint3 centerLightPos = SkPoint3::Make(devCenter.fX, devCenter.fY, fDevLightPos.fZ);
+ *translate = fOffset;
+ return SkShadowTessellator::MakeSpot(path, noTrans, zParams,
+ centerLightPos, fLightRadius, transparent);
+ }
+ }
+};
+
+/**
+ * This manages a set of tessellations for a given shape in the cache. Because SkResourceCache
+ * records are immutable this is not itself a Rec. When we need to update it we return this on
+ * the FindVisitor and let the cache destroy the Rec. We'll update the tessellations and then add
+ * a new Rec with an adjusted size for any deletions/additions.
+ */
+class CachedTessellations : public SkRefCnt {
+public:
+ size_t size() const { return fAmbientSet.size() + fSpotSet.size(); }
+
+ sk_sp<SkVertices> find(const AmbientVerticesFactory& ambient, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fAmbientSet.find(ambient, matrix, translate);
+ }
+
+ sk_sp<SkVertices> add(const SkPath& devPath, const AmbientVerticesFactory& ambient,
+ const SkMatrix& matrix, SkVector* translate) {
+ return fAmbientSet.add(devPath, ambient, matrix, translate);
+ }
+
+ sk_sp<SkVertices> find(const SpotVerticesFactory& spot, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fSpotSet.find(spot, matrix, translate);
+ }
+
+ sk_sp<SkVertices> add(const SkPath& devPath, const SpotVerticesFactory& spot,
+ const SkMatrix& matrix, SkVector* translate) {
+ return fSpotSet.add(devPath, spot, matrix, translate);
+ }
+
+private:
+ template <typename FACTORY, int MAX_ENTRIES>
+ class Set {
+ public:
+ size_t size() const { return fSize; }
+
+ sk_sp<SkVertices> find(const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) const {
+ for (int i = 0; i < MAX_ENTRIES; ++i) {
+ if (fEntries[i].fFactory.isCompatible(factory, translate)) {
+ const SkMatrix& m = fEntries[i].fMatrix;
+ if (matrix.hasPerspective() || m.hasPerspective()) {
+ if (matrix != fEntries[i].fMatrix) {
+ continue;
+ }
+ } else if (matrix.getScaleX() != m.getScaleX() ||
+ matrix.getSkewX() != m.getSkewX() ||
+ matrix.getScaleY() != m.getScaleY() ||
+ matrix.getSkewY() != m.getSkewY()) {
+ continue;
+ }
+ return fEntries[i].fVertices;
+ }
+ }
+ return nullptr;
+ }
+
+ sk_sp<SkVertices> add(const SkPath& path, const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) {
+ sk_sp<SkVertices> vertices = factory.makeVertices(path, matrix, translate);
+ if (!vertices) {
+ return nullptr;
+ }
+ int i;
+ if (fCount < MAX_ENTRIES) {
+ i = fCount++;
+ } else {
+ i = fRandom.nextULessThan(MAX_ENTRIES);
+ fSize -= fEntries[i].fVertices->approximateSize();
+ }
+ fEntries[i].fFactory = factory;
+ fEntries[i].fVertices = vertices;
+ fEntries[i].fMatrix = matrix;
+ fSize += vertices->approximateSize();
+ return vertices;
+ }
+
+ private:
+ struct Entry {
+ FACTORY fFactory;
+ sk_sp<SkVertices> fVertices;
+ SkMatrix fMatrix;
+ };
+ Entry fEntries[MAX_ENTRIES];
+ int fCount = 0;
+ size_t fSize = 0;
+ SkRandom fRandom;
+ };
+
+ Set<AmbientVerticesFactory, 4> fAmbientSet;
+ Set<SpotVerticesFactory, 4> fSpotSet;
+};
+
+/**
+ * A record of shadow vertices stored in SkResourceCache of CachedTessellations for a particular
+ * path. The key represents the path's geometry and not any shadow params.
+ */
+class CachedTessellationsRec : public SkResourceCache::Rec {
+public:
+ CachedTessellationsRec(const SkResourceCache::Key& key,
+ sk_sp<CachedTessellations> tessellations)
+ : fTessellations(std::move(tessellations)) {
+ fKey.reset(new uint8_t[key.size()]);
+ memcpy(fKey.get(), &key, key.size());
+ }
+
+ const Key& getKey() const override {
+ return *reinterpret_cast<SkResourceCache::Key*>(fKey.get());
+ }
+
+ size_t bytesUsed() const override { return fTessellations->size(); }
+
+ const char* getCategory() const override { return "tessellated shadow masks"; }
+
+ sk_sp<CachedTessellations> refTessellations() const { return fTessellations; }
+
+ template <typename FACTORY>
+ sk_sp<SkVertices> find(const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fTessellations->find(factory, matrix, translate);
+ }
+
+private:
+ std::unique_ptr<uint8_t[]> fKey;
+ sk_sp<CachedTessellations> fTessellations;
+};
+
+/**
+ * Used by FindVisitor to determine whether a cache entry can be reused and if so returns the
+ * vertices and a translation vector. If the CachedTessellations does not contain a suitable
+ * mesh then we inform SkResourceCache to destroy the Rec and we return the CachedTessellations
+ * to the caller. The caller will update it and reinsert it back into the cache.
+ */
+template <typename FACTORY>
+struct FindContext {
+ FindContext(const SkMatrix* viewMatrix, const FACTORY* factory)
+ : fViewMatrix(viewMatrix), fFactory(factory) {}
+ const SkMatrix* const fViewMatrix;
+ // If this is valid after Find is called then we found the vertices and they should be drawn
+ // with fTranslate applied.
+ sk_sp<SkVertices> fVertices;
+ SkVector fTranslate = {0, 0};
+
+ // If this is valid after Find then the caller should add the vertices to the tessellation set
+ // and create a new CachedTessellationsRec and insert it into SkResourceCache.
+ sk_sp<CachedTessellations> fTessellationsOnFailure;
+
+ const FACTORY* fFactory;
+};
+
+/**
+ * Function called by SkResourceCache when a matching cache key is found. The FACTORY and matrix of
+ * the FindContext are used to determine if the vertices are reusable. If so the vertices and
+ * necessary translation vector are set on the FindContext.
+ */
+template <typename FACTORY>
+bool FindVisitor(const SkResourceCache::Rec& baseRec, void* ctx) {
+ FindContext<FACTORY>* findContext = (FindContext<FACTORY>*)ctx;
+ const CachedTessellationsRec& rec = static_cast<const CachedTessellationsRec&>(baseRec);
+ findContext->fVertices =
+ rec.find(*findContext->fFactory, *findContext->fViewMatrix, &findContext->fTranslate);
+ if (findContext->fVertices) {
+ return true;
+ }
+ // We ref the tessellations and let the cache destroy the Rec. Once the tessellations have been
+ // manipulated we will add a new Rec.
+ findContext->fTessellationsOnFailure = rec.refTessellations();
+ return false;
+}
+
+class ShadowedPath {
+public:
+ ShadowedPath(const SkPath* path, const SkMatrix* viewMatrix)
+ : fPath(path)
+ , fViewMatrix(viewMatrix)
+#if SK_SUPPORT_GPU
+ , fShapeForKey(*path, GrStyle::SimpleFill())
+#endif
+ {}
+
+ const SkPath& path() const { return *fPath; }
+ const SkMatrix& viewMatrix() const { return *fViewMatrix; }
+#if SK_SUPPORT_GPU
+ /** Negative means the vertices should not be cached for this path. */
+ int keyBytes() const { return fShapeForKey.unstyledKeySize() * sizeof(uint32_t); }
+ void writeKey(void* key) const {
+ fShapeForKey.writeUnstyledKey(reinterpret_cast<uint32_t*>(key));
+ }
+ bool isRRect(SkRRect* rrect) { return fShapeForKey.asRRect(rrect, nullptr, nullptr, nullptr); }
+#else
+ int keyBytes() const { return -1; }
+ void writeKey(void* key) const { SK_ABORT("Should never be called"); }
+ bool isRRect(SkRRect* rrect) { return false; }
+#endif
+
+private:
+ const SkPath* fPath;
+ const SkMatrix* fViewMatrix;
+#if SK_SUPPORT_GPU
+ GrShape fShapeForKey;
+#endif
+};
+
+// This creates a domain of keys in SkResourceCache used by this file.
+static void* kNamespace;
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class ShadowInvalidator : public SkPathRef::GenIDChangeListener {
+public:
+ ShadowInvalidator(const SkResourceCache::Key& key) {
+ fKey.reset(new uint8_t[key.size()]);
+ memcpy(fKey.get(), &key, key.size());
+ }
+
+private:
+ const SkResourceCache::Key& getKey() const {
+ return *reinterpret_cast<SkResourceCache::Key*>(fKey.get());
+ }
+
+ // always purge
+ static bool FindVisitor(const SkResourceCache::Rec&, void*) {
+ return false;
+ }
+
+ void onChange() override {
+ SkResourceCache::Find(this->getKey(), ShadowInvalidator::FindVisitor, nullptr);
+ }
+
+ std::unique_ptr<uint8_t[]> fKey;
+};
+
+/**
+ * Draws a shadow to 'canvas'. The vertices used to draw the shadow are created by 'factory' unless
+ * they are first found in SkResourceCache.
+ */
+template <typename FACTORY>
+bool draw_shadow(const FACTORY& factory,
+ std::function<void(const SkVertices*, SkBlendMode, const SkPaint&,
+ SkScalar tx, SkScalar ty, bool)> drawProc, ShadowedPath& path, SkColor color) {
+ FindContext<FACTORY> context(&path.viewMatrix(), &factory);
+
+ SkResourceCache::Key* key = nullptr;
+ SkAutoSTArray<32 * 4, uint8_t> keyStorage;
+ int keyDataBytes = path.keyBytes();
+ if (keyDataBytes >= 0) {
+ keyStorage.reset(keyDataBytes + sizeof(SkResourceCache::Key));
+ key = new (keyStorage.begin()) SkResourceCache::Key();
+ path.writeKey((uint32_t*)(keyStorage.begin() + sizeof(*key)));
+ key->init(&kNamespace, resource_cache_shared_id(), keyDataBytes);
+ SkResourceCache::Find(*key, FindVisitor<FACTORY>, &context);
+ }
+
+ sk_sp<SkVertices> vertices;
+ bool foundInCache = SkToBool(context.fVertices);
+ if (foundInCache) {
+ vertices = std::move(context.fVertices);
+ } else {
+ // TODO: handle transforming the path as part of the tessellator
+ if (key) {
+ // Update or initialize a tessellation set and add it to the cache.
+ sk_sp<CachedTessellations> tessellations;
+ if (context.fTessellationsOnFailure) {
+ tessellations = std::move(context.fTessellationsOnFailure);
+ } else {
+ tessellations.reset(new CachedTessellations());
+ }
+ vertices = tessellations->add(path.path(), factory, path.viewMatrix(),
+ &context.fTranslate);
+ if (!vertices) {
+ return false;
+ }
+ auto rec = new CachedTessellationsRec(*key, std::move(tessellations));
+ SkPathPriv::AddGenIDChangeListener(path.path(), sk_make_sp<ShadowInvalidator>(*key));
+ SkResourceCache::Add(rec);
+ } else {
+ vertices = factory.makeVertices(path.path(), path.viewMatrix(),
+ &context.fTranslate);
+ if (!vertices) {
+ return false;
+ }
+ }
+ }
+
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the grayscale result of
+ // that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(color, SkBlendMode::kModulate)->makeComposed(
+ SkGaussianColorFilter::Make()));
+
+ drawProc(vertices.get(), SkBlendMode::kModulate, paint,
+ context.fTranslate.fX, context.fTranslate.fY, path.viewMatrix().hasPerspective());
+
+ return true;
+}
+}
+
+static bool tilted(const SkPoint3& zPlaneParams) {
+ return !SkScalarNearlyZero(zPlaneParams.fX) || !SkScalarNearlyZero(zPlaneParams.fY);
+}
+
+static SkPoint3 map(const SkMatrix& m, const SkPoint3& pt) {
+ SkPoint3 result;
+ m.mapXY(pt.fX, pt.fY, (SkPoint*)&result.fX);
+ result.fZ = pt.fZ;
+ return result;
+}
+
+void SkShadowUtils::ComputeTonalColors(SkColor inAmbientColor, SkColor inSpotColor,
+ SkColor* outAmbientColor, SkColor* outSpotColor) {
+ // For tonal color we only compute color values for the spot shadow.
+ // The ambient shadow is greyscale only.
+
+ // Ambient
+ *outAmbientColor = SkColorSetARGB(SkColorGetA(inAmbientColor), 0, 0, 0);
+
+ // Spot
+ int spotR = SkColorGetR(inSpotColor);
+ int spotG = SkColorGetG(inSpotColor);
+ int spotB = SkColorGetB(inSpotColor);
+ int max = SkTMax(SkTMax(spotR, spotG), spotB);
+ int min = SkTMin(SkTMin(spotR, spotG), spotB);
+ SkScalar luminance = 0.5f*(max + min)/255.f;
+ SkScalar origA = SkColorGetA(inSpotColor)/255.f;
+
+ // We compute a color alpha value based on the luminance of the color, scaled by an
+ // adjusted alpha value. We want the following properties to match the UX examples
+ // (assuming a = 0.25) and to ensure that we have reasonable results when the color
+ // is black and/or the alpha is 0:
+ // f(0, a) = 0
+ // f(luminance, 0) = 0
+ // f(1, 0.25) = .5
+ // f(0.5, 0.25) = .4
+ // f(1, 1) = 1
+ // The following functions match this as closely as possible.
+ SkScalar alphaAdjust = (2.6f + (-2.66667f + 1.06667f*origA)*origA)*origA;
+ SkScalar colorAlpha = (3.544762f + (-4.891428f + 2.3466f*luminance)*luminance)*luminance;
+ colorAlpha = SkTPin(alphaAdjust*colorAlpha, 0.0f, 1.0f);
+
+ // Similarly, we set the greyscale alpha based on luminance and alpha so that
+ // f(0, a) = a
+ // f(luminance, 0) = 0
+ // f(1, 0.25) = 0.15
+ SkScalar greyscaleAlpha = SkTPin(origA*(1 - 0.4f*luminance), 0.0f, 1.0f);
+
+ // The final color we want to emulate is generated by rendering a color shadow (C_rgb) using an
+ // alpha computed from the color's luminance (C_a), and then a black shadow with alpha (S_a)
+ // which is an adjusted value of 'a'. Assuming SrcOver, a background color of B_rgb, and
+ // ignoring edge falloff, this becomes
+ //
+ // (C_a - S_a*C_a)*C_rgb + (1 - (S_a + C_a - S_a*C_a))*B_rgb
+ //
+ // Assuming premultiplied alpha, this means we scale the color by (C_a - S_a*C_a) and
+ // set the alpha to (S_a + C_a - S_a*C_a).
+ SkScalar colorScale = colorAlpha*(SK_Scalar1 - greyscaleAlpha);
+ SkScalar tonalAlpha = colorScale + greyscaleAlpha;
+ SkScalar unPremulScale = colorScale / tonalAlpha;
+ *outSpotColor = SkColorSetARGB(tonalAlpha*255.999f,
+ unPremulScale*spotR,
+ unPremulScale*spotG,
+ unPremulScale*spotB);
+}
+
+// Draw an offset spot shadow and outlining ambient shadow for the given path.
+void SkShadowUtils::DrawShadow(SkCanvas* canvas, const SkPath& path, const SkPoint3& zPlaneParams,
+ const SkPoint3& devLightPos, SkScalar lightRadius,
+ SkColor ambientColor, SkColor spotColor,
+ uint32_t flags) {
+ SkMatrix inverse;
+ if (!canvas->getTotalMatrix().invert(&inverse)) {
+ return;
+ }
+ SkPoint pt = inverse.mapXY(devLightPos.fX, devLightPos.fY);
+
+ SkDrawShadowRec rec;
+ rec.fZPlaneParams = zPlaneParams;
+ rec.fLightPos = { pt.fX, pt.fY, devLightPos.fZ };
+ rec.fLightRadius = lightRadius;
+ rec.fAmbientColor = ambientColor;
+ rec.fSpotColor = spotColor;
+ rec.fFlags = flags;
+
+ canvas->private_draw_shadow_rec(path, rec);
+}
+
+static bool validate_rec(const SkDrawShadowRec& rec) {
+ return rec.fLightPos.isFinite() && rec.fZPlaneParams.isFinite() &&
+ SkScalarIsFinite(rec.fLightRadius);
+}
+
+void SkBaseDevice::drawShadow(const SkPath& path, const SkDrawShadowRec& rec) {
+ auto drawVertsProc = [this](const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint,
+ SkScalar tx, SkScalar ty, bool hasPerspective) {
+ if (vertices->vertexCount()) {
+ // For perspective shadows we've already computed the shadow in world space,
+ // and we can't translate it without changing it. Otherwise we concat the
+ // change in translation from the cached version.
+ SkAutoDeviceCTMRestore adr(
+ this,
+ hasPerspective ? SkMatrix::I()
+ : SkMatrix::Concat(this->ctm(), SkMatrix::MakeTrans(tx, ty)));
+ this->drawVertices(vertices, nullptr, 0, mode, paint);
+ }
+ };
+
+ if (!validate_rec(rec)) {
+ return;
+ }
+
+ SkMatrix viewMatrix = this->ctm();
+ SkAutoDeviceCTMRestore adr(this, SkMatrix::I());
+
+ ShadowedPath shadowedPath(&path, &viewMatrix);
+
+ bool tiltZPlane = tilted(rec.fZPlaneParams);
+ bool transparent = SkToBool(rec.fFlags & SkShadowFlags::kTransparentOccluder_ShadowFlag);
+ bool uncached = tiltZPlane || path.isVolatile();
+
+ SkPoint3 zPlaneParams = rec.fZPlaneParams;
+ SkPoint3 devLightPos = map(viewMatrix, rec.fLightPos);
+ float lightRadius = rec.fLightRadius;
+
+ if (SkColorGetA(rec.fAmbientColor) > 0) {
+ bool success = false;
+ if (uncached) {
+ sk_sp<SkVertices> vertices = SkShadowTessellator::MakeAmbient(path, viewMatrix,
+ zPlaneParams,
+ transparent);
+ if (vertices) {
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the
+ // grayscale result of that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(rec.fAmbientColor,
+ SkBlendMode::kModulate)->makeComposed(
+ SkGaussianColorFilter::Make()));
+ this->drawVertices(vertices.get(), nullptr, 0, SkBlendMode::kModulate, paint);
+ success = true;
+ }
+ }
+
+ if (!success) {
+ AmbientVerticesFactory factory;
+ factory.fOccluderHeight = zPlaneParams.fZ;
+ factory.fTransparent = transparent;
+ if (viewMatrix.hasPerspective()) {
+ factory.fOffset.set(0, 0);
+ } else {
+ factory.fOffset.fX = viewMatrix.getTranslateX();
+ factory.fOffset.fY = viewMatrix.getTranslateY();
+ }
+
+ if (!draw_shadow(factory, drawVertsProc, shadowedPath, rec.fAmbientColor)) {
+ // Pretransform the path to avoid transforming the stroke, below.
+ SkPath devSpacePath;
+ path.transform(viewMatrix, &devSpacePath);
+ devSpacePath.setIsVolatile(true);
+
+ // The tesselator outsets by AmbientBlurRadius (or 'r') to get the outer ring of
+ // the tesselation, and sets the alpha on the path to 1/AmbientRecipAlpha (or 'a').
+ //
+ // We want to emulate this with a blur. The full blur width (2*blurRadius or 'f')
+ // can be calculated by interpolating:
+ //
+ // original edge outer edge
+ // | |<---------- r ------>|
+ // |<------|--- f -------------->|
+ // | | |
+ // alpha = 1 alpha = a alpha = 0
+ //
+ // Taking ratios, f/1 = r/a, so f = r/a and blurRadius = f/2.
+ //
+ // We now need to outset the path to place the new edge in the center of the
+ // blur region:
+ //
+ // original new
+ // | |<------|--- r ------>|
+ // |<------|--- f -|------------>|
+ // | |<- o ->|<--- f/2 --->|
+ //
+ // r = o + f/2, so o = r - f/2
+ //
+ // We outset by using the stroker, so the strokeWidth is o/2.
+ //
+ SkScalar devSpaceOutset = SkDrawShadowMetrics::AmbientBlurRadius(zPlaneParams.fZ);
+ SkScalar oneOverA = SkDrawShadowMetrics::AmbientRecipAlpha(zPlaneParams.fZ);
+ SkScalar blurRadius = 0.5f*devSpaceOutset*oneOverA;
+ SkScalar strokeWidth = 0.5f*(devSpaceOutset - blurRadius);
+
+ // Now draw with blur
+ SkPaint paint;
+ paint.setColor(rec.fAmbientColor);
+ paint.setStrokeWidth(strokeWidth);
+ paint.setStyle(SkPaint::kStrokeAndFill_Style);
+ SkScalar sigma = SkBlurMask::ConvertRadiusToSigma(blurRadius);
+ bool respectCTM = false;
+ paint.setMaskFilter(SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, respectCTM));
+ this->drawPath(devSpacePath, paint);
+ }
+ }
+ }
+
+ if (SkColorGetA(rec.fSpotColor) > 0) {
+ bool success = false;
+ if (uncached) {
+ sk_sp<SkVertices> vertices = SkShadowTessellator::MakeSpot(path, viewMatrix,
+ zPlaneParams,
+ devLightPos, lightRadius,
+ transparent);
+ if (vertices) {
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the
+ // grayscale result of that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(rec.fSpotColor,
+ SkBlendMode::kModulate)->makeComposed(
+ SkGaussianColorFilter::Make()));
+ this->drawVertices(vertices.get(), nullptr, 0, SkBlendMode::kModulate, paint);
+ success = true;
+ }
+ }
+
+ if (!success) {
+ SpotVerticesFactory factory;
+ factory.fOccluderHeight = zPlaneParams.fZ;
+ factory.fDevLightPos = devLightPos;
+ factory.fLightRadius = lightRadius;
+
+ SkPoint center = SkPoint::Make(path.getBounds().centerX(), path.getBounds().centerY());
+ factory.fLocalCenter = center;
+ viewMatrix.mapPoints(&center, 1);
+ SkScalar radius, scale;
+ SkDrawShadowMetrics::GetSpotParams(zPlaneParams.fZ, devLightPos.fX - center.fX,
+ devLightPos.fY - center.fY, devLightPos.fZ,
+ lightRadius, &radius, &scale, &factory.fOffset);
+ SkRect devBounds;
+ viewMatrix.mapRect(&devBounds, path.getBounds());
+ if (transparent ||
+ SkTAbs(factory.fOffset.fX) > 0.5f*devBounds.width() ||
+ SkTAbs(factory.fOffset.fY) > 0.5f*devBounds.height()) {
+ // if the translation of the shadow is big enough we're going to end up
+ // filling the entire umbra, so we can treat these as all the same
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kTransparent;
+ } else if (factory.fOffset.length()*scale + scale < radius) {
+ // if we don't translate more than the blur distance, can assume umbra is covered
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kOpaqueNoUmbra;
+ } else if (path.isConvex()) {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kOpaquePartialUmbra;
+ } else {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kTransparent;
+ }
+ // need to add this after we classify the shadow
+ factory.fOffset.fX += viewMatrix.getTranslateX();
+ factory.fOffset.fY += viewMatrix.getTranslateY();
+
+ SkColor color = rec.fSpotColor;
+#ifdef DEBUG_SHADOW_CHECKS
+ switch (factory.fOccluderType) {
+ case SpotVerticesFactory::OccluderType::kTransparent:
+ color = 0xFFD2B48C; // tan for transparent
+ break;
+ case SpotVerticesFactory::OccluderType::kOpaquePartialUmbra:
+ color = 0xFFFFA500; // orange for opaque
+ break;
+ case SpotVerticesFactory::OccluderType::kOpaqueNoUmbra:
+ color = 0xFFE5E500; // corn yellow for covered
+ break;
+ }
+#endif
+ if (!draw_shadow(factory, drawVertsProc, shadowedPath, color)) {
+ // draw with blur
+ SkMatrix shadowMatrix;
+ if (!SkDrawShadowMetrics::GetSpotShadowTransform(devLightPos, lightRadius,
+ viewMatrix, zPlaneParams,
+ path.getBounds(),
+ &shadowMatrix, &radius)) {
+ return;
+ }
+ SkAutoDeviceCTMRestore adr(this, shadowMatrix);
+
+ SkPaint paint;
+ paint.setColor(rec.fSpotColor);
+ SkScalar sigma = SkBlurMask::ConvertRadiusToSigma(radius);
+ bool respectCTM = false;
+ paint.setMaskFilter(SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, respectCTM));
+ this->drawPath(path, paint);
+ }
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkShaperJSONWriter.cpp b/gfx/skia/skia/src/utils/SkShaperJSONWriter.cpp
new file mode 100644
index 0000000000..bbeb4c337b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShaperJSONWriter.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkShaperJSONWriter.h"
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "src/utils/SkJSONWriter.h"
+#include "src/utils/SkUTF.h"
+
+SkShaperJSONWriter::SkShaperJSONWriter(SkJSONWriter* JSONWriter, const char* utf8, size_t size)
+ : fJSONWriter{JSONWriter}
+ , fUTF8{utf8, size} {}
+
+void SkShaperJSONWriter::beginLine() { }
+
+void SkShaperJSONWriter::runInfo(const SkShaper::RunHandler::RunInfo& info) { }
+
+void SkShaperJSONWriter::commitRunInfo() { }
+
+SkShaper::RunHandler::Buffer
+SkShaperJSONWriter::runBuffer(const SkShaper::RunHandler::RunInfo& info) {
+ fGlyphs.resize(info.glyphCount);
+ fPositions.resize(info.glyphCount);
+ fClusters.resize(info.glyphCount);
+ return {fGlyphs.data(), fPositions.data(), nullptr, fClusters.data(), {0, 0}};
+}
+
+static bool is_one_to_one(const char utf8[], size_t utf8Begin, size_t utf8End,
+ std::vector<uint32_t>& clusters) {
+ size_t lastUtf8Index = utf8End;
+
+ auto checkCluster = [&](size_t clusterIndex) {
+ if (clusters[clusterIndex] >= lastUtf8Index) {
+ return false;
+ }
+ size_t utf8ClusterSize = lastUtf8Index - clusters[clusterIndex];
+ if (SkUTF::CountUTF8(&utf8[clusters[clusterIndex]], utf8ClusterSize) != 1) {
+ return false;
+ }
+ lastUtf8Index = clusters[clusterIndex];
+ return true;
+ };
+
+ if (clusters.front() <= clusters.back()) {
+ // left-to-right clusters
+ size_t clusterCursor = clusters.size();
+ while (clusterCursor > 0) {
+ if (!checkCluster(--clusterCursor)) { return false; }
+ }
+ } else {
+ // right-to-left clusters
+ size_t clusterCursor = 0;
+ while (clusterCursor < clusters.size()) {
+ if (!checkCluster(clusterCursor++)) { return false; }
+ }
+ }
+
+ return true;
+}
+
+void SkShaperJSONWriter::commitRunBuffer(const SkShaper::RunHandler::RunInfo& info) {
+ fJSONWriter->beginObject("run", true);
+
+ // Font name
+ SkString fontName;
+ info.fFont.getTypeface()->getFamilyName(&fontName);
+ fJSONWriter->appendString("font name", fontName.c_str());
+
+ // Font size
+ fJSONWriter->appendFloat("font size", info.fFont.getSize());
+
+ if (info.fBidiLevel > 0) {
+ std::string bidiType = info.fBidiLevel % 2 == 0 ? "left-to-right" : "right-to-left";
+ std::string bidiOutput = bidiType + " lvl " + std::to_string(info.fBidiLevel);
+ fJSONWriter->appendString("BiDi", bidiOutput.c_str());
+ }
+
+ if (is_one_to_one(fUTF8.c_str(), info.utf8Range.begin(), info.utf8Range.end(), fClusters)) {
+ std::string utf8{&fUTF8[info.utf8Range.begin()], info.utf8Range.size()};
+ fJSONWriter->appendString("UTF8", utf8.c_str());
+
+ fJSONWriter->beginArray("glyphs", false);
+ for (auto glyphID : fGlyphs) {
+ fJSONWriter->appendU32(glyphID);
+ }
+ fJSONWriter->endArray();
+
+ fJSONWriter->beginArray("clusters", false);
+ for (auto cluster : fClusters) {
+ fJSONWriter->appendU32(cluster);
+ }
+ fJSONWriter->endArray();
+ } else {
+ VisualizeClusters(fUTF8.c_str(),
+ info.utf8Range.begin(), info.utf8Range.end(),
+ SkMakeSpan(fGlyphs),
+ SkMakeSpan(fClusters),
+ [this](size_t codePointCount, SkSpan<const char> utf1to1,
+ SkSpan<const SkGlyphID> glyph1to1) {
+ this->displayMToN(codePointCount, utf1to1, glyph1to1);
+ });
+ }
+
+ if (info.glyphCount > 1) {
+ fJSONWriter->beginArray("horizontal positions", false);
+ for (auto position : fPositions) {
+ fJSONWriter->appendFloat(position.x());
+ }
+ fJSONWriter->endArray();
+ }
+
+ fJSONWriter->beginArray("advances", false);
+ for (size_t i = 1; i < info.glyphCount; i++) {
+ fJSONWriter->appendFloat(fPositions[i].fX - fPositions[i-1].fX);
+ }
+ SkPoint lastAdvance = info.fAdvance - (fPositions.back() - fPositions.front());
+ fJSONWriter->appendFloat(lastAdvance.fX);
+ fJSONWriter->endArray();
+
+ fJSONWriter->endObject();
+}
+
+void SkShaperJSONWriter::BreakupClusters(size_t utf8Begin, size_t utf8End,
+ SkSpan<const uint32_t> clusters,
+ const BreakupCluastersCallback& processMToN) {
+
+ if (clusters.front() <= clusters.back()) {
+ // Handle left-to-right text direction
+ size_t glyphStartIndex = 0;
+ for (size_t glyphEndIndex = 0; glyphEndIndex < clusters.size(); glyphEndIndex++) {
+
+ if (clusters[glyphStartIndex] == clusters[glyphEndIndex]) { continue; }
+
+ processMToN(glyphStartIndex, glyphEndIndex,
+ clusters[glyphStartIndex], clusters[glyphEndIndex]);
+
+ glyphStartIndex = glyphEndIndex;
+ }
+
+ processMToN(glyphStartIndex, clusters.size(), clusters[glyphStartIndex], utf8End);
+
+ } else {
+ // Handle right-to-left text direction.
+ SkASSERT(clusters.size() >= 2);
+ size_t glyphStartIndex = 0;
+ uint32_t utf8EndIndex = utf8End;
+ for (size_t glyphEndIndex = 0; glyphEndIndex < clusters.size(); glyphEndIndex++) {
+
+ if (clusters[glyphStartIndex] == clusters[glyphEndIndex]) { continue; }
+
+ processMToN(glyphStartIndex, glyphEndIndex,
+ clusters[glyphStartIndex], utf8EndIndex);
+
+ utf8EndIndex = clusters[glyphStartIndex];
+ glyphStartIndex = glyphEndIndex;
+ }
+ processMToN(glyphStartIndex, clusters.size(), utf8Begin, clusters[glyphStartIndex-1]);
+ }
+}
+
+void SkShaperJSONWriter::VisualizeClusters(const char* utf8, size_t utf8Begin, size_t utf8End,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const uint32_t> clusters,
+ const VisualizeClustersCallback& processMToN) {
+
+ size_t glyphRangeStart, glyphRangeEnd;
+ uint32_t utf8RangeStart, utf8RangeEnd;
+
+ auto resetRanges = [&]() {
+ glyphRangeStart = std::numeric_limits<size_t>::max();
+ glyphRangeEnd = 0;
+ utf8RangeStart = std::numeric_limits<uint32_t>::max();
+ utf8RangeEnd = 0;
+ };
+
+ auto checkRangesAndProcess = [&]() {
+ if (glyphRangeStart < glyphRangeEnd) {
+ size_t glyphRangeCount = glyphRangeEnd - glyphRangeStart;
+ SkSpan<const char> utf8Span{&utf8[utf8RangeStart], utf8RangeEnd - utf8RangeStart};
+ SkSpan<const SkGlyphID> glyphSpan{&glyphIDs[glyphRangeStart], glyphRangeCount};
+
+ // Glyph count is the same as codepoint count for 1:1.
+ processMToN(glyphRangeCount, utf8Span, glyphSpan);
+ }
+ resetRanges();
+ };
+
+ auto gatherRuns = [&](size_t glyphStartIndex, size_t glyphEndIndex,
+ uint32_t utf8StartIndex, uint32_t utf8EndIndex) {
+ int possibleCount = SkUTF::CountUTF8(&utf8[utf8StartIndex], utf8EndIndex - utf8StartIndex);
+ if (possibleCount == -1) { return; }
+ size_t codePointCount = SkTo<size_t>(possibleCount);
+ if (codePointCount == 1 && glyphEndIndex - glyphStartIndex == 1) {
+ glyphRangeStart = std::min(glyphRangeStart, glyphStartIndex);
+ glyphRangeEnd = std::max(glyphRangeEnd, glyphEndIndex );
+ utf8RangeStart = std::min(utf8RangeStart, utf8StartIndex );
+ utf8RangeEnd = std::max(utf8RangeEnd, utf8EndIndex );
+ } else {
+ checkRangesAndProcess();
+
+ SkSpan<const char> utf8Span{&utf8[utf8StartIndex], utf8EndIndex - utf8StartIndex};
+ SkSpan<const SkGlyphID> glyphSpan{&glyphIDs[glyphStartIndex],
+ glyphEndIndex - glyphStartIndex};
+
+ processMToN(codePointCount, utf8Span, glyphSpan);
+ }
+ };
+
+ resetRanges();
+ BreakupClusters(utf8Begin, utf8End, clusters, gatherRuns);
+ checkRangesAndProcess();
+}
+
+void SkShaperJSONWriter::displayMToN(size_t codePointCount,
+ SkSpan<const char> utf8,
+ SkSpan<const SkGlyphID> glyphIDs) {
+ std::string nString = std::to_string(codePointCount);
+ std::string mString = std::to_string(glyphIDs.size());
+ std::string clusterName = "cluster " + nString + " to " + mString;
+ fJSONWriter->beginObject(clusterName.c_str(), true);
+ std::string utf8String{utf8.data(), utf8.size()};
+ fJSONWriter->appendString("UTF", utf8String.c_str());
+ fJSONWriter->beginArray("glyphsIDs", false);
+ for (auto glyphID : glyphIDs) {
+ fJSONWriter->appendU32(glyphID);
+ }
+ fJSONWriter->endArray();
+ fJSONWriter->endObject();
+}
diff --git a/gfx/skia/skia/src/utils/SkShaperJSONWriter.h b/gfx/skia/skia/src/utils/SkShaperJSONWriter.h
new file mode 100644
index 0000000000..bca0cd138b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShaperJSONWriter.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaperJSONWriter_DEFINED
+#define SkShaperJSONWriter_DEFINED
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <string>
+#include <vector>
+
+#include "modules/skshaper/include/SkShaper.h"
+#include "src/core/SkSpan.h"
+
+class SkJSONWriter;
+
+class SkShaperJSONWriter final : public SkShaper::RunHandler {
+public:
+ SkShaperJSONWriter(SkJSONWriter* JSONWriter, const char* utf8, size_t size);
+
+ void beginLine() override;
+ void runInfo(const RunInfo& info) override;
+ void commitRunInfo() override;
+
+ Buffer runBuffer(const RunInfo& info) override;
+
+ void commitRunBuffer(const RunInfo& info) override;
+
+ void commitLine() override {}
+
+ using BreakupCluastersCallback =
+ std::function<void(size_t, size_t, uint32_t, uint32_t)>;
+
+ // Break up cluster into a set of ranges for the UTF8, and the glyphIDs.
+ static void BreakupClusters(size_t utf8Begin, size_t utf8End,
+ SkSpan<const uint32_t> clusters,
+ const BreakupCluastersCallback& processMToN);
+
+
+ using VisualizeClustersCallback =
+ std::function<void(size_t, SkSpan<const char>, SkSpan<const SkGlyphID>)>;
+
+ // Gather runs of 1:1 into larger runs, and display M:N as single entries.
+ static void VisualizeClusters(const char utf8[],
+ size_t utf8Begin, size_t utf8End,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const uint32_t> clusters,
+ const VisualizeClustersCallback& processMToN);
+
+private:
+ void displayMToN(size_t codePointCount,
+ SkSpan<const char> utf8,
+ SkSpan<const SkGlyphID> glyphIDs);
+
+ SkJSONWriter* fJSONWriter;
+ std::vector<SkGlyphID> fGlyphs;
+ std::vector<SkPoint> fPositions;
+ std::vector<uint32_t> fClusters;
+
+ std::string fUTF8;
+};
+
+#endif // SkShaperJSONWriter_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextUtils.cpp b/gfx/skia/skia/src/utils/SkTextUtils.cpp
new file mode 100644
index 0000000000..f5c4bc037a
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextUtils.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkTextBlob.h"
+#include "include/utils/SkTextUtils.h"
+#include "src/core/SkFontPriv.h"
+
+void SkTextUtils::Draw(SkCanvas* canvas, const void* text, size_t size, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint,
+ Align align) {
+ if (align != kLeft_Align) {
+ SkScalar width = font.measureText(text, size, encoding);
+ if (align == kCenter_Align) {
+ width *= 0.5f;
+ }
+ x -= width;
+ }
+
+ canvas->drawTextBlob(SkTextBlob::MakeFromText(text, size, font, encoding), x, y, paint);
+}
+
+void SkTextUtils::GetPath(const void* text, size_t length, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, SkPath* path) {
+ SkAutoToGlyphs ag(font, text, length, encoding);
+ SkAutoTArray<SkPoint> pos(ag.count());
+ font.getPos(ag.glyphs(), ag.count(), pos.get(), {x, y});
+
+ struct Rec {
+ SkPath* fDst;
+ const SkPoint* fPos;
+ } rec = { path, pos.get() };
+
+ path->reset();
+ font.getPaths(ag.glyphs(), ag.count(), [](const SkPath* src, const SkMatrix& mx, void* ctx) {
+ Rec* rec = (Rec*)ctx;
+ if (src) {
+ SkMatrix m(mx);
+ m.postTranslate(rec->fPos->fX, rec->fPos->fY);
+ rec->fDst->addPath(*src, m);
+ }
+ rec->fPos += 1;
+ }, &rec);
+}
+
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp
new file mode 100644
index 0000000000..ae4db9ae55
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Nothing to see here.
+//
+// We just need to keep this file around until we do the song and dance
+// to stop explicitly removing it from Chromium's GN build.
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp b/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp
new file mode 100644
index 0000000000..ae4db9ae55
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Nothing to see here.
+//
+// We just need to keep this file around until we do the song and dance
+// to stop explicitly removing it from Chromium's GN build.
diff --git a/gfx/skia/skia/src/utils/SkUTF.cpp b/gfx/skia/skia/src/utils/SkUTF.cpp
new file mode 100644
index 0000000000..8a9d5bd1f7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkUTF.cpp
@@ -0,0 +1,253 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/utils/SkUTF.h"
+
+#include <climits>
+
+static constexpr inline int32_t left_shift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+template <typename T> static constexpr bool is_align2(T x) { return 0 == (x & 1); }
+
+template <typename T> static constexpr bool is_align4(T x) { return 0 == (x & 3); }
+
+static constexpr inline bool utf16_is_high_surrogate(uint16_t c) { return (c & 0xFC00) == 0xD800; }
+
+static constexpr inline bool utf16_is_low_surrogate(uint16_t c) { return (c & 0xFC00) == 0xDC00; }
+
+/** @returns -1 iff invalid UTF8 byte,
+ 0 iff UTF8 continuation byte,
+ 1 iff ASCII byte,
+ 2 iff leading byte of 2-byte sequence,
+ 3 iff leading byte of 3-byte sequence, and
+ 4 iff leading byte of 4-byte sequence.
+ I.e.: if return value > 0, then gives length of sequence.
+*/
+static int utf8_byte_type(uint8_t c) {
+ if (c < 0x80) {
+ return 1;
+ } else if (c < 0xC0) {
+ return 0;
+ } else if (c >= 0xF5 || (c & 0xFE) == 0xC0) { // "octet values c0, c1, f5 to ff never appear"
+ return -1;
+ } else {
+ int value = (((0xe5 << 24) >> ((unsigned)c >> 4 << 1)) & 3) + 1;
+ // assert(value >= 2 && value <=4);
+ return value;
+ }
+}
+static bool utf8_type_is_valid_leading_byte(int type) { return type > 0; }
+
+static bool utf8_byte_is_continuation(uint8_t c) { return utf8_byte_type(c) == 0; }
+
+////////////////////////////////////////////////////////////////////////////////
+
+int SkUTF::CountUTF8(const char* utf8, size_t byteLength) {
+ if (!utf8) {
+ return -1;
+ }
+ int count = 0;
+ const char* stop = utf8 + byteLength;
+ while (utf8 < stop) {
+ int type = utf8_byte_type(*(const uint8_t*)utf8);
+ if (!utf8_type_is_valid_leading_byte(type) || utf8 + type > stop) {
+ return -1; // Sequence extends beyond end.
+ }
+ while(type-- > 1) {
+ ++utf8;
+ if (!utf8_byte_is_continuation(*(const uint8_t*)utf8)) {
+ return -1;
+ }
+ }
+ ++utf8;
+ ++count;
+ }
+ return count;
+}
+
+int SkUTF::CountUTF16(const uint16_t* utf16, size_t byteLength) {
+ if (!utf16 || !is_align2(intptr_t(utf16)) || !is_align2(byteLength)) {
+ return -1;
+ }
+ const uint16_t* src = (const uint16_t*)utf16;
+ const uint16_t* stop = src + (byteLength >> 1);
+ int count = 0;
+ while (src < stop) {
+ unsigned c = *src++;
+ if (utf16_is_low_surrogate(c)) {
+ return -1;
+ }
+ if (utf16_is_high_surrogate(c)) {
+ if (src >= stop) {
+ return -1;
+ }
+ c = *src++;
+ if (!utf16_is_low_surrogate(c)) {
+ return -1;
+ }
+ }
+ count += 1;
+ }
+ return count;
+}
+
+int SkUTF::CountUTF32(const int32_t* utf32, size_t byteLength) {
+ if (!is_align4(intptr_t(utf32)) || !is_align4(byteLength) || byteLength >> 2 > INT_MAX) {
+ return -1;
+ }
+ const uint32_t kInvalidUnicharMask = 0xFF000000; // unichar fits in 24 bits
+ const uint32_t* ptr = (const uint32_t*)utf32;
+ const uint32_t* stop = ptr + (byteLength >> 2);
+ while (ptr < stop) {
+ if (*ptr & kInvalidUnicharMask) {
+ return -1;
+ }
+ ptr += 1;
+ }
+ return (int)(byteLength >> 2);
+}
+
+template <typename T>
+static SkUnichar next_fail(const T** ptr, const T* end) {
+ *ptr = end;
+ return -1;
+}
+
+SkUnichar SkUTF::NextUTF8(const char** ptr, const char* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const uint8_t* p = (const uint8_t*)*ptr;
+ if (!p || p >= (const uint8_t*)end) {
+ return next_fail(ptr, end);
+ }
+ int c = *p;
+ int hic = c << 24;
+
+ if (!utf8_type_is_valid_leading_byte(utf8_byte_type(c))) {
+ return next_fail(ptr, end);
+ }
+ if (hic < 0) {
+ uint32_t mask = (uint32_t)~0x3F;
+ hic = left_shift(hic, 1);
+ do {
+ ++p;
+ if (p >= (const uint8_t*)end) {
+ return next_fail(ptr, end);
+ }
+ // check before reading off end of array.
+ uint8_t nextByte = *p;
+ if (!utf8_byte_is_continuation(nextByte)) {
+ return next_fail(ptr, end);
+ }
+ c = (c << 6) | (nextByte & 0x3F);
+ mask <<= 5;
+ } while ((hic = left_shift(hic, 1)) < 0);
+ c &= ~mask;
+ }
+ *ptr = (char*)p + 1;
+ return c;
+}
+
+SkUnichar SkUTF::NextUTF16(const uint16_t** ptr, const uint16_t* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const uint16_t* src = *ptr;
+ if (!src || src + 1 > end || !is_align2(intptr_t(src))) {
+ return next_fail(ptr, end);
+ }
+ uint16_t c = *src++;
+ SkUnichar result = c;
+ if (utf16_is_low_surrogate(c)) {
+ return next_fail(ptr, end); // srcPtr should never point at low surrogate.
+ }
+ if (utf16_is_high_surrogate(c)) {
+ if (src + 1 > end) {
+ return next_fail(ptr, end); // Truncated string.
+ }
+ uint16_t low = *src++;
+ if (!utf16_is_low_surrogate(low)) {
+ return next_fail(ptr, end);
+ }
+ /*
+ [paraphrased from wikipedia]
+ Take the high surrogate and subtract 0xD800, then multiply by 0x400.
+ Take the low surrogate and subtract 0xDC00. Add these two results
+ together, and finally add 0x10000 to get the final decoded codepoint.
+
+ unicode = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
+ unicode = (high * 0x400) - (0xD800 * 0x400) + low - 0xDC00 + 0x10000
+ unicode = (high << 10) - (0xD800 << 10) + low - 0xDC00 + 0x10000
+ unicode = (high << 10) + low - ((0xD800 << 10) + 0xDC00 - 0x10000)
+ */
+ result = (result << 10) + (SkUnichar)low - ((0xD800 << 10) + 0xDC00 - 0x10000);
+ }
+ *ptr = src;
+ return result;
+}
+
+SkUnichar SkUTF::NextUTF32(const int32_t** ptr, const int32_t* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const int32_t* s = *ptr;
+ if (!s || s + 1 > end || !is_align4(intptr_t(s))) {
+ return next_fail(ptr, end);
+ }
+ int32_t value = *s;
+ const uint32_t kInvalidUnicharMask = 0xFF000000; // unichar fits in 24 bits
+ if (value & kInvalidUnicharMask) {
+ return next_fail(ptr, end);
+ }
+ *ptr = s + 1;
+ return value;
+}
+
+size_t SkUTF::ToUTF8(SkUnichar uni, char utf8[SkUTF::kMaxBytesInUTF8Sequence]) {
+ if ((uint32_t)uni > 0x10FFFF) {
+ return 0;
+ }
+ if (uni <= 127) {
+ if (utf8) {
+ *utf8 = (char)uni;
+ }
+ return 1;
+ }
+ char tmp[4];
+ char* p = tmp;
+ size_t count = 1;
+ while (uni > 0x7F >> count) {
+ *p++ = (char)(0x80 | (uni & 0x3F));
+ uni >>= 6;
+ count += 1;
+ }
+ if (utf8) {
+ p = tmp;
+ utf8 += count;
+ while (p < tmp + count - 1) {
+ *--utf8 = *p++;
+ }
+ *--utf8 = (char)(~(0xFF >> count) | uni);
+ }
+ return count;
+}
+
+size_t SkUTF::ToUTF16(SkUnichar uni, uint16_t utf16[2]) {
+ if ((uint32_t)uni > 0x10FFFF) {
+ return 0;
+ }
+ int extra = (uni > 0xFFFF);
+ if (utf16) {
+ if (extra) {
+ utf16[0] = (uint16_t)((0xD800 - 64) + (uni >> 10));
+ utf16[1] = (uint16_t)(0xDC00 | (uni & 0x3FF));
+ } else {
+ utf16[0] = (uint16_t)uni;
+ }
+ }
+ return 1 + extra;
+}
+
diff --git a/gfx/skia/skia/src/utils/SkUTF.h b/gfx/skia/skia/src/utils/SkUTF.h
new file mode 100644
index 0000000000..385102aadb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkUTF.h
@@ -0,0 +1,68 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkUTF_DEFINED
+#define SkUTF_DEFINED
+
+#include <cstddef>
+#include <cstdint>
+
+typedef int32_t SkUnichar;
+
+namespace SkUTF {
+
+/** Given a sequence of UTF-8 bytes, return the number of unicode codepoints.
+ If the sequence is invalid UTF-8, return -1.
+*/
+int CountUTF8(const char* utf8, size_t byteLength);
+
+/** Given a sequence of aligned UTF-16 characters in machine-endian form,
+ return the number of unicode codepoints. If the sequence is invalid
+ UTF-16, return -1.
+*/
+int CountUTF16(const uint16_t* utf16, size_t byteLength);
+
+/** Given a sequence of aligned UTF-32 characters in machine-endian form,
+ return the number of unicode codepoints. If the sequence is invalid
+ UTF-32, return -1.
+*/
+int CountUTF32(const int32_t* utf32, size_t byteLength);
+
+/** Given a sequence of UTF-8 bytes, return the first unicode codepoint.
+ The pointer will be incremented to point at the next codepoint's start. If
+ invalid UTF-8 is encountered, set *ptr to end and return -1.
+*/
+SkUnichar NextUTF8(const char** ptr, const char* end);
+
+/** Given a sequence of aligned UTF-16 characters in machine-endian form,
+ return the first unicode codepoint. The pointer will be incremented to
+ point at the next codepoint's start. If invalid UTF-16 is encountered,
+ set *ptr to end and return -1.
+*/
+SkUnichar NextUTF16(const uint16_t** ptr, const uint16_t* end);
+
+/** Given a sequence of aligned UTF-32 characters in machine-endian form,
+ return the first unicode codepoint. The pointer will be incremented to
+ point at the next codepoint's start. If invalid UTF-32 is encountered,
+ set *ptr to end and return -1.
+*/
+SkUnichar NextUTF32(const int32_t** ptr, const int32_t* end);
+
+constexpr unsigned kMaxBytesInUTF8Sequence = 4;
+
+/** Convert the unicode codepoint into UTF-8. If `utf8` is non-null, place the
+ result in that array. Return the number of bytes in the result. If `utf8`
+ is null, simply return the number of bytes that would be used. For invalid
+ unicode codepoints, return 0.
+*/
+size_t ToUTF8(SkUnichar uni, char utf8[kMaxBytesInUTF8Sequence] = nullptr);
+
+/** Convert the unicode codepoint into UTF-16. If `utf16` is non-null, place
+ the result in that array. Return the number of UTF-16 code units in the
+ result (1 or 2). If `utf16` is null, simply return the number of code
+ units that would be used. For invalid unicode codepoints, return 0.
+*/
+size_t ToUTF16(SkUnichar uni, uint16_t utf16[2] = nullptr);
+
+} // namespace SkUTF
+
+#endif // SkUTF_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc b/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc
new file mode 100644
index 0000000000..1f177aca7e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * GenerateChecksums() in ../../src/utils/SkWhitelistTypefaces.cpp generated SkWhitelistChecksums.inc.
+ * Run 'whitelist_typefaces --generate' to create anew.
+ */
+
+#include "include/private/SkTDArray.h"
+
+struct Whitelist {
+ const char* fFontName;
+ uint32_t fChecksum;
+ bool fSerializedNameOnly;
+ bool fSerializedSub;
+};
+
+static Whitelist whitelist[] = {
+ { "Aegean", 0x639a35c7, false, false },
+ { "Analecta", 0x639a35c7, false, false },
+ { "Arial", 0xbc28cb14, false, false },
+ { "DejaVu Sans", 0x639a35c7, false, false },
+ { "DejaVu Sans Mono", 0xbc29a5d9, false, false },
+ { "DejaVu Serif", 0x9db67efe, false, false },
+ { "FreeMono", 0x724884f4, false, false },
+ { "FreeSans", 0x7dfc48a3, false, false },
+ { "FreeSerif", 0xa1ae8c77, false, false },
+ { "Khmer OS", 0x917c40aa, false, false },
+ { "Kochi Gothic", 0x962132dd, false, false },
+ { "Lohit Kannada", 0x0b6ce863, false, false },
+ { "Lohit Marathi", 0x0eb0a941, false, false },
+ { "Lohit Oriya", 0xf3e9d313, false, false },
+ { "Lohit Punjabi", 0xfd8b26e0, false, false },
+ { "Lohit Tamil", 0xa8111d99, false, false },
+ { "Lohit Telugu", 0xd34299e0, false, false },
+ { "Meera", 0xe3e16220, false, false },
+ { "Mukti Narrow", 0x53f7d053, false, false },
+ { "NanumBarunGothic", 0x639a35c7, false, false },
+ { "NanumGothic", 0xff8d773d, false, false },
+ { "OpenSymbol", 0x4fcaf331, false, false },
+ { "Symbola", 0x639a35c7, false, false },
+ { "TakaoPGothic", 0x068c405a, false, false },
+ { "Waree", 0x6a2bfca8, false, false },
+ { "WenQuanYi Micro Hei", 0xcdec08a3, false, false },
+ { "padmaa", 0x09eb1865, false, false },
+};
+
+static const int whitelistCount = (int) SK_ARRAY_COUNT(whitelist);
diff --git a/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp b/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp
new file mode 100644
index 0000000000..3d3dfcccfd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkOpts.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/utils/SkUTF.h"
+
+#include "SkWhitelistChecksums.inc"
+
+#define WHITELIST_DEBUG 0
+
+extern void WhitelistSerializeTypeface(const SkTypeface*, SkWStream* );
+sk_sp<SkTypeface> WhitelistDeserializeTypeface(SkStream* );
+extern bool CheckChecksums();
+extern bool GenerateChecksums();
+
+#if WHITELIST_DEBUG
+static bool timesNewRomanSerializedNameOnly = false;
+#endif
+
+#define SUBNAME_PREFIX "sk_"
+
+static bool font_name_is_local(const char* fontName, SkFontStyle style) {
+ if (!strcmp(fontName, "DejaVu Sans")) {
+ return true;
+ }
+ sk_sp<SkTypeface> defaultFace(SkTypeface::MakeFromName(nullptr, style));
+ sk_sp<SkTypeface> foundFace(SkTypeface::MakeFromName(fontName, style));
+ return defaultFace != foundFace;
+}
+
+static int whitelist_name_index(const SkTypeface* tf) {
+
+ SkString fontNameStr;
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*tf);
+ SkTypeface::LocalizedString familyNameLocalized;
+ while (nameIter->next(&familyNameLocalized)) {
+ fontNameStr = familyNameLocalized.fString;
+ // check against permissible list of names
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (fontNameStr.equals(whitelist[i].fFontName)) {
+ return i;
+ }
+ }
+ }
+#if WHITELIST_DEBUG
+ sk_sp<SkTypeface::LocalizedStrings> debugIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*tf);
+ while (debugIter->next(&familyNameLocalized)) {
+ SkDebugf("no match fontName=\"%s\"\n", familyNameLocalized.fString.c_str());
+ }
+#endif
+ return -1;
+}
+
+static uint32_t compute_checksum(const SkTypeface* tf) {
+ std::unique_ptr<SkFontData> fontData = tf->makeFontData();
+ if (!fontData) {
+ return 0;
+ }
+ SkStreamAsset* fontStream = fontData->getStream();
+ if (!fontStream) {
+ return 0;
+ }
+ SkTDArray<char> data;
+ size_t length = fontStream->getLength();
+ if (!length) {
+ return 0;
+ }
+ data.setCount((int) length);
+ if (!fontStream->peek(data.begin(), length)) {
+ return 0;
+ }
+ return SkOpts::hash(data.begin(), length);
+}
+
+static void serialize_sub(const char* fontName, SkFontStyle style, SkWStream* wstream) {
+ SkFontDescriptor desc;
+ SkString subName(SUBNAME_PREFIX);
+ subName.append(fontName);
+ const char* familyName = subName.c_str();
+ desc.setFamilyName(familyName);
+ desc.setStyle(style);
+ desc.serialize(wstream);
+#if WHITELIST_DEBUG
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (!strcmp(fontName, whitelist[i].fFontName)) {
+ if (!whitelist[i].fSerializedSub) {
+ whitelist[i].fSerializedSub = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ break;
+ }
+ }
+#endif
+}
+
+static bool is_local(const SkTypeface* tf) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+ return isLocal;
+}
+
+static void serialize_full(const SkTypeface* tf, SkWStream* wstream) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+
+ // Embed font data if it's a local font.
+ if (isLocal && !desc.hasFontData()) {
+ desc.setFontData(tf->makeFontData());
+ }
+ desc.serialize(wstream);
+}
+
+static void serialize_name_only(const SkTypeface* tf, SkWStream* wstream) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+ SkASSERT(!isLocal);
+#if WHITELIST_DEBUG
+ const char* familyName = desc.getFamilyName();
+ if (familyName) {
+ if (!strcmp(familyName, "Times New Roman")) {
+ if (!timesNewRomanSerializedNameOnly) {
+ timesNewRomanSerializedNameOnly = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ } else {
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (!strcmp(familyName, whitelist[i].fFontName)) {
+ if (!whitelist[i].fSerializedNameOnly) {
+ whitelist[i].fSerializedNameOnly = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ break;
+ }
+ }
+ }
+ }
+#endif
+ desc.serialize(wstream);
+}
+
+void WhitelistSerializeTypeface(const SkTypeface* tf, SkWStream* wstream) {
+ if (!is_local(tf)) {
+ serialize_name_only(tf, wstream);
+ return;
+ }
+ int whitelistIndex = whitelist_name_index(tf);
+ if (whitelistIndex < 0) {
+ serialize_full(tf, wstream);
+ return;
+ }
+ const char* fontName = whitelist[whitelistIndex].fFontName;
+ if (!font_name_is_local(fontName, tf->fontStyle())) {
+#if WHITELIST_DEBUG
+ SkDebugf("name not found locally \"%s\" style=%d\n", fontName, tf->style());
+#endif
+ serialize_full(tf, wstream);
+ return;
+ }
+ uint32_t checksum = compute_checksum(tf);
+ if (whitelist[whitelistIndex].fChecksum != checksum) {
+#if WHITELIST_DEBUG
+ if (whitelist[whitelistIndex].fChecksum) {
+ SkDebugf("!!! checksum changed !!!\n");
+ }
+ SkDebugf("checksum updated\n");
+ SkDebugf(" { \"%s\", 0x%08x },\n", fontName, checksum);
+#endif
+ whitelist[whitelistIndex].fChecksum = checksum;
+ }
+ serialize_sub(fontName, tf->fontStyle(), wstream);
+}
+
+sk_sp<SkTypeface> WhitelistDeserializeTypeface(SkStream* stream) {
+ SkFontDescriptor desc;
+ if (!SkFontDescriptor::Deserialize(stream, &desc)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkFontData> data = desc.detachFontData();
+ if (data) {
+ sk_sp<SkTypeface> typeface(SkTypeface::MakeFromFontData(std::move(data)));
+ if (typeface) {
+ return typeface;
+ }
+ }
+ const char* familyName = desc.getFamilyName();
+ if (!strncmp(SUBNAME_PREFIX, familyName, sizeof(SUBNAME_PREFIX) - 1)) {
+ familyName += sizeof(SUBNAME_PREFIX) - 1;
+ }
+ return SkTypeface::MakeFromName(familyName, desc.getStyle());
+}
+
+bool CheckChecksums() {
+ for (int i = 0; i < whitelistCount; ++i) {
+ const char* fontName = whitelist[i].fFontName;
+ sk_sp<SkTypeface> tf(SkTypeface::MakeFromName(fontName, SkFontStyle()));
+ uint32_t checksum = compute_checksum(tf.get());
+ if (whitelist[i].fChecksum != checksum) {
+ return false;
+ }
+ }
+ return true;
+}
+
+const char checksumFileName[] = "SkWhitelistChecksums.inc";
+
+const char checksumHeader[] =
+"/*" "\n"
+" * Copyright 2015 Google Inc." "\n"
+" *" "\n"
+" * Use of this source code is governed by a BSD-style license that can be" "\n"
+" * found in the LICENSE file." "\n"
+" *" "\n"
+" * %s() in %s generated %s." "\n"
+" * Run 'whitelist_typefaces --generate' to create anew." "\n"
+" */" "\n"
+"" "\n"
+"#include \"SkTDArray.h\"" "\n"
+"" "\n"
+"struct Whitelist {" "\n"
+" const char* fFontName;" "\n"
+" uint32_t fChecksum;" "\n"
+" bool fSerializedNameOnly;" "\n"
+" bool fSerializedSub;" "\n"
+"};" "\n"
+"" "\n"
+"static Whitelist whitelist[] = {" "\n";
+
+const char checksumEntry[] =
+" { \"%s\", 0x%08x, false, false }," "\n";
+
+const char checksumTrailer[] =
+"};" "\n"
+"" "\n"
+"static const int whitelistCount = (int) SK_ARRAY_COUNT(whitelist);" "\n";
+
+
+#include "src/core/SkOSFile.h"
+
+bool GenerateChecksums() {
+ FILE* file = sk_fopen(checksumFileName, kWrite_SkFILE_Flag);
+ if (!file) {
+ SkDebugf("Can't open %s for writing.\n", checksumFileName);
+ return false;
+ }
+ SkString line;
+ line.printf(checksumHeader, __FUNCTION__, __FILE__, checksumFileName);
+ sk_fwrite(line.c_str(), line.size(), file);
+ for (int i = 0; i < whitelistCount; ++i) {
+ const char* fontName = whitelist[i].fFontName;
+ sk_sp<SkTypeface> tf(SkTypeface::MakeFromName(fontName, SkFontStyle()));
+ uint32_t checksum = compute_checksum(tf.get());
+ line.printf(checksumEntry, fontName, checksum);
+ sk_fwrite(line.c_str(), line.size(), file);
+ }
+ sk_fwrite(checksumTrailer, sizeof(checksumTrailer) - 1, file);
+ sk_fclose(file);
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
new file mode 100644
index 0000000000..5423efe341
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkBitmap.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkMacros.h"
+#include "include/private/SkTo.h"
+#include "include/utils/mac/SkCGUtils.h"
+
+static CGBitmapInfo ComputeCGAlphaInfo_RGBA(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Big;
+ switch (at) {
+ case kUnknown_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ info |= kCGImageAlphaNoneSkipLast;
+ break;
+ case kPremul_SkAlphaType:
+ info |= kCGImageAlphaPremultipliedLast;
+ break;
+ case kUnpremul_SkAlphaType:
+ info |= kCGImageAlphaLast;
+ break;
+ }
+ return info;
+}
+
+static CGBitmapInfo ComputeCGAlphaInfo_BGRA(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Little;
+ switch (at) {
+ case kUnknown_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ info |= kCGImageAlphaNoneSkipFirst;
+ break;
+ case kPremul_SkAlphaType:
+ info |= kCGImageAlphaPremultipliedFirst;
+ break;
+ case kUnpremul_SkAlphaType:
+ info |= kCGImageAlphaFirst;
+ break;
+ }
+ return info;
+}
+
+static void SkBitmap_ReleaseInfo(void* info, const void* pixelData, size_t size) {
+ SkBitmap* bitmap = reinterpret_cast<SkBitmap*>(info);
+ delete bitmap;
+}
+
+static bool getBitmapInfo(const SkBitmap& bm,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info,
+ bool* upscaleTo32) {
+ if (upscaleTo32) {
+ *upscaleTo32 = false;
+ }
+
+ switch (bm.colorType()) {
+ case kRGB_565_SkColorType:
+#if 0
+ // doesn't see quite right. Are they thinking 1555?
+ *bitsPerComponent = 5;
+ *info = kCGBitmapByteOrder16Little | kCGImageAlphaNone;
+#else
+ if (upscaleTo32) {
+ *upscaleTo32 = true;
+ }
+ // now treat like RGBA
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_RGBA(kOpaque_SkAlphaType);
+#endif
+ break;
+ case kRGBA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_RGBA(bm.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_BGRA(bm.alphaType());
+ break;
+ case kARGB_4444_SkColorType:
+ *bitsPerComponent = 4;
+ *info = kCGBitmapByteOrder16Little;
+ if (bm.isOpaque()) {
+ *info |= kCGImageAlphaNoneSkipLast;
+ } else {
+ *info |= kCGImageAlphaPremultipliedLast;
+ }
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static SkBitmap* prepareForImageRef(const SkBitmap& bm,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info) {
+ bool upscaleTo32;
+ if (!getBitmapInfo(bm, bitsPerComponent, info, &upscaleTo32)) {
+ return nullptr;
+ }
+
+ SkBitmap* copy;
+ if (upscaleTo32) {
+ copy = new SkBitmap;
+ // here we make a deep copy of the pixels, since CG won't take our
+ // 565 directly
+ copy->allocPixels(bm.info().makeColorType(kN32_SkColorType));
+ bm.readPixels(copy->info(), copy->getPixels(), copy->rowBytes(), 0, 0);
+ } else {
+ copy = new SkBitmap(bm);
+ }
+ return copy;
+}
+
+CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef colorSpace) {
+ size_t bitsPerComponent SK_INIT_TO_AVOID_WARNING;
+ CGBitmapInfo info SK_INIT_TO_AVOID_WARNING;
+
+ SkBitmap* bitmap = prepareForImageRef(bm, &bitsPerComponent, &info);
+ if (nullptr == bitmap) {
+ return nullptr;
+ }
+
+ const int w = bitmap->width();
+ const int h = bitmap->height();
+ const size_t s = bitmap->computeByteSize();
+
+ // our provider "owns" the bitmap*, and will take care of deleting it
+ CGDataProviderRef dataRef = CGDataProviderCreateWithData(bitmap, bitmap->getPixels(), s,
+ SkBitmap_ReleaseInfo);
+
+ bool releaseColorSpace = false;
+ if (nullptr == colorSpace) {
+ colorSpace = CGColorSpaceCreateDeviceRGB();
+ releaseColorSpace = true;
+ }
+
+ CGImageRef ref = CGImageCreate(w, h, bitsPerComponent,
+ bitmap->bytesPerPixel() * 8,
+ bitmap->rowBytes(), colorSpace, info, dataRef,
+ nullptr, false, kCGRenderingIntentDefault);
+
+ if (releaseColorSpace) {
+ CGColorSpaceRelease(colorSpace);
+ }
+ CGDataProviderRelease(dataRef);
+ return ref;
+}
+
+void SkCGDrawBitmap(CGContextRef cg, const SkBitmap& bm, float x, float y) {
+ CGImageRef img = SkCreateCGImageRef(bm);
+
+ if (img) {
+ CGRect r = CGRectMake(0, 0, bm.width(), bm.height());
+
+ CGContextSaveGState(cg);
+ CGContextTranslateCTM(cg, x, r.size.height + y);
+ CGContextScaleCTM(cg, 1, -1);
+
+ CGContextDrawImage(cg, r, img);
+
+ CGContextRestoreGState(cg);
+
+ CGImageRelease(img);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+CGContextRef SkCreateCGContext(const SkPixmap& pmap) {
+ CGBitmapInfo cg_bitmap_info = 0;
+ size_t bitsPerComponent = 0;
+ switch (pmap.colorType()) {
+ case kRGBA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_RGBA(pmap.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_BGRA(pmap.alphaType());
+ break;
+ default:
+ return nullptr; // no other colortypes are supported (for now)
+ }
+
+ size_t rb = pmap.addr() ? pmap.rowBytes() : 0;
+ CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+ CGContextRef cg = CGBitmapContextCreate(pmap.writable_addr(), pmap.width(), pmap.height(),
+ bitsPerComponent, rb, cs, cg_bitmap_info);
+ CFRelease(cs);
+ return cg;
+}
+
+bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* pixels,
+ CGImageRef image) {
+ CGBitmapInfo cg_bitmap_info = 0;
+ size_t bitsPerComponent = 0;
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_RGBA(info.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_BGRA(info.alphaType());
+ break;
+ default:
+ return false; // no other colortypes are supported (for now)
+ }
+
+ CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+ CGContextRef cg = CGBitmapContextCreate(pixels, info.width(), info.height(), bitsPerComponent,
+ rowBytes, cs, cg_bitmap_info);
+ CFRelease(cs);
+ if (nullptr == cg) {
+ return false;
+ }
+
+ // use this blend mode, to avoid having to erase the pixels first, and to avoid CG performing
+ // any blending (which could introduce errors and be slower).
+ CGContextSetBlendMode(cg, kCGBlendModeCopy);
+
+ CGContextDrawImage(cg, CGRectMake(0, 0, info.width(), info.height()), image);
+ CGContextRelease(cg);
+ return true;
+}
+
+bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef image) {
+ const int width = SkToInt(CGImageGetWidth(image));
+ const int height = SkToInt(CGImageGetHeight(image));
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return false;
+ }
+
+ if (!SkCopyPixelsFromCGImage(tmp.info(), tmp.rowBytes(), tmp.getPixels(), image)) {
+ return false;
+ }
+
+ CGImageAlphaInfo cgInfo = CGImageGetAlphaInfo(image);
+ switch (cgInfo) {
+ case kCGImageAlphaNone:
+ case kCGImageAlphaNoneSkipLast:
+ case kCGImageAlphaNoneSkipFirst:
+ SkASSERT(SkBitmap::ComputeIsOpaque(tmp));
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ break;
+ default:
+ // we don't know if we're opaque or not, so compute it.
+ if (SkBitmap::ComputeIsOpaque(tmp)) {
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ }
+ }
+
+ *dst = tmp;
+ return true;
+}
+
+sk_sp<SkImage> SkMakeImageFromCGImage(CGImageRef src) {
+ SkBitmap bm;
+ if (!SkCreateBitmapFromCGImage(&bm, src)) {
+ return nullptr;
+ }
+
+ bm.setImmutable();
+ return SkImage::MakeFromBitmap(bm);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp b/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp
new file mode 100644
index 0000000000..30f8e1eddc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkStream.h"
+#include "include/private/SkMalloc.h"
+#include "include/utils/mac/SkCGUtils.h"
+
+// These are used by CGDataProviderCreateWithData
+
+static void unref_proc(void* info, const void* addr, size_t size) {
+ SkASSERT(info);
+ ((SkRefCnt*)info)->unref();
+}
+
+static void delete_stream_proc(void* info, const void* addr, size_t size) {
+ SkASSERT(info);
+ SkStream* stream = (SkStream*)info;
+ SkASSERT(stream->getMemoryBase() == addr);
+ SkASSERT(stream->getLength() == size);
+ delete stream;
+}
+
+// These are used by CGDataProviderSequentialCallbacks
+
+static size_t get_bytes_proc(void* info, void* buffer, size_t bytes) {
+ SkASSERT(info);
+ return ((SkStream*)info)->read(buffer, bytes);
+}
+
+static off_t skip_forward_proc(void* info, off_t bytes) {
+ return ((SkStream*)info)->skip((size_t) bytes);
+}
+
+static void rewind_proc(void* info) {
+ SkASSERT(info);
+ ((SkStream*)info)->rewind();
+}
+
+// Used when info is an SkStream.
+static void release_info_proc(void* info) {
+ SkASSERT(info);
+ delete (SkStream*)info;
+}
+
+CGDataProviderRef SkCreateDataProviderFromStream(std::unique_ptr<SkStreamRewindable> stream) {
+ // TODO: Replace with SkStream::getData() when that is added. Then we only
+ // have one version of CGDataProviderCreateWithData (i.e. same release proc)
+ const void* addr = stream->getMemoryBase();
+ if (addr) {
+ // special-case when the stream is just a block of ram
+ size_t size = stream->getLength();
+ return CGDataProviderCreateWithData(stream.release(), addr, size, delete_stream_proc);
+ }
+
+ CGDataProviderSequentialCallbacks rec;
+ sk_bzero(&rec, sizeof(rec));
+ rec.version = 0;
+ rec.getBytes = get_bytes_proc;
+ rec.skipForward = skip_forward_proc;
+ rec.rewind = rewind_proc;
+ rec.releaseInfo = release_info_proc;
+ return CGDataProviderCreateSequential(stream.release(), &rec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkData.h"
+
+CGDataProviderRef SkCreateDataProviderFromData(sk_sp<SkData> data) {
+ const void* addr = data->data();
+ size_t size = data->size();
+ return CGDataProviderCreateWithData(data.release(), addr, size, unref_proc);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h b/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h
new file mode 100644
index 0000000000..80640de412
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUniqueCFRef_DEFINED
+#define SkUniqueCFRef_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/private/SkTLogic.h"
+#include "include/private/SkTemplates.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <memory>
+
+template <typename CFRef> using SkUniqueCFRef =
+ std::unique_ptr<skstd::remove_pointer_t<CFRef>,
+ SkFunctionWrapper<decltype(CFRelease), CFRelease>>;
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
new file mode 100644
index 0000000000..5b0f3f9745
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/utils/win/SkAutoCoInitialize.h"
+
+#include <objbase.h>
+#include <winerror.h>
+
+SkAutoCoInitialize::SkAutoCoInitialize() :
+ fHR(
+ CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE)
+ )
+{ }
+
+SkAutoCoInitialize::~SkAutoCoInitialize() {
+ if (SUCCEEDED(this->fHR)) {
+ CoUninitialize();
+ }
+}
+
+bool SkAutoCoInitialize::succeeded() {
+ return SUCCEEDED(this->fHR) || RPC_E_CHANGED_MODE == this->fHR;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
new file mode 100644
index 0000000000..592c2682d9
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoCo_DEFINED
+#define SkAutoCo_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "include/private/SkNoncopyable.h"
+#include "src/core/SkLeanWindows.h"
+
+/**
+ * An instance of this class initializes COM on creation
+ * and closes the COM library on destruction.
+ */
+class SkAutoCoInitialize : SkNoncopyable {
+private:
+ HRESULT fHR;
+public:
+ SkAutoCoInitialize();
+ ~SkAutoCoInitialize();
+ bool succeeded();
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkAutoCo_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.cpp b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
new file mode 100644
index 0000000000..dceb453eb6
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkString.h"
+#include "include/private/SkOnce.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkHRESULT.h"
+
+#include <dwrite.h>
+
+static IDWriteFactory* gDWriteFactory = nullptr;
+
+static void release_dwrite_factory() {
+ if (gDWriteFactory) {
+ gDWriteFactory->Release();
+ }
+}
+
+static void create_dwrite_factory(IDWriteFactory** factory) {
+ typedef decltype(DWriteCreateFactory)* DWriteCreateFactoryProc;
+ DWriteCreateFactoryProc dWriteCreateFactoryProc = reinterpret_cast<DWriteCreateFactoryProc>(
+ GetProcAddress(LoadLibraryW(L"dwrite.dll"), "DWriteCreateFactory"));
+
+ if (!dWriteCreateFactoryProc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ HRVM(hr, "Could not get DWriteCreateFactory proc.");
+ }
+
+ HRVM(dWriteCreateFactoryProc(DWRITE_FACTORY_TYPE_SHARED,
+ __uuidof(IDWriteFactory),
+ reinterpret_cast<IUnknown**>(factory)),
+ "Could not create DirectWrite factory.");
+ atexit(release_dwrite_factory);
+}
+
+
+IDWriteFactory* sk_get_dwrite_factory() {
+ static SkOnce once;
+ once(create_dwrite_factory, &gDWriteFactory);
+ return gDWriteFactory;
+}
+
+static IDWriteRenderingParams* gDWriteRenderingParams = nullptr;
+
+static void release_dwrite_rendering_params() {
+ if (gDWriteRenderingParams) {
+ gDWriteRenderingParams->Release();
+ }
+}
+
+static void create_dwrite_rendering_params(IDWriteRenderingParams** params) {
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (!factory) {
+ return;
+ }
+ HRVM(factory->CreateRenderingParams(params),
+ "Could not create DWrite default rendering params");
+ atexit(release_dwrite_rendering_params);
+}
+
+IDWriteRenderingParams* sk_get_dwrite_default_rendering_params() {
+ static SkOnce once;
+ once(create_dwrite_rendering_params, &gDWriteRenderingParams);
+ return gDWriteRenderingParams;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name) {
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, nullptr, 0);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for wchar to utf-8 conversion.");
+ }
+ name->reset(wlen);
+ wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, name->get(), wlen);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert wchar to utf-8.");
+ }
+ return S_OK;
+}
+
+/** Converts a WCHAR string to a utf8 string. */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname) {
+ int len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, nullptr, 0, nullptr, nullptr);
+ if (0 == len) {
+ if (nameLen <= 0) {
+ skname->reset();
+ return S_OK;
+ }
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for utf-8 to wchar conversion.");
+ }
+ skname->resize(len);
+
+ len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, skname->writable_str(), len, nullptr, nullptr);
+ if (0 == len) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert utf-8 to wchar.");
+ }
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+HRESULT sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname) {
+ UINT32 nameIndex = 0;
+ if (preferedLocale) {
+ // Ignore any errors and continue with index 0 if there is a problem.
+ BOOL nameExists = FALSE;
+ (void)names->FindLocaleName(preferedLocale, &nameIndex, &nameExists);
+ if (!nameExists) {
+ nameIndex = 0;
+ }
+ }
+
+ UINT32 nameLen;
+ HRM(names->GetStringLength(nameIndex, &nameLen), "Could not get name length.");
+
+ SkSMallocWCHAR name(nameLen + 1);
+ HRM(names->GetString(nameIndex, name.get(), nameLen + 1), "Could not get string.");
+
+ HR(sk_wchar_to_skstring(name.get(), nameLen, skname));
+ return S_OK;
+}
+
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc) {
+ *proc = reinterpret_cast<SkGetUserDefaultLocaleNameProc>(
+ GetProcAddress(LoadLibraryW(L"Kernel32.dll"), "GetUserDefaultLocaleName")
+ );
+ if (!*proc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ return hr;
+ }
+ return S_OK;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.h b/gfx/skia/skia/src/utils/win/SkDWrite.h
new file mode 100644
index 0000000000..d9958344d8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWrite_DEFINED
+#define SkDWrite_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/private/SkTemplates.h"
+
+#include <dwrite.h>
+#include <winsdkver.h>
+
+class SkString;
+
+////////////////////////////////////////////////////////////////////////////////
+// Factory
+
+IDWriteFactory* sk_get_dwrite_factory();
+IDWriteRenderingParams* sk_get_dwrite_default_rendering_params();
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Prefer to use this type to prevent template proliferation. */
+typedef SkAutoSTMalloc<16, WCHAR> SkSMallocWCHAR;
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name);
+
+/** Converts a WCHAR string to a utf8 string.
+ * @param nameLen the number of WCHARs in the name.
+ */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname);
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+HRESULT sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname);
+
+typedef int (WINAPI *SkGetUserDefaultLocaleNameProc)(LPWSTR, int);
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc);
+
+////////////////////////////////////////////////////////////////////////////////
+// Table handling
+
+class AutoDWriteTable {
+public:
+ AutoDWriteTable(IDWriteFontFace* fontFace, UINT32 beTag) : fExists(FALSE), fFontFace(fontFace) {
+ // Any errors are ignored, user must check fExists anyway.
+ fontFace->TryGetFontTable(beTag,
+ reinterpret_cast<const void **>(&fData), &fSize, &fLock, &fExists);
+ }
+ ~AutoDWriteTable() {
+ if (fExists) {
+ fFontFace->ReleaseFontTable(fLock);
+ }
+ }
+
+ const uint8_t* fData;
+ UINT32 fSize;
+ BOOL fExists;
+private:
+ // Borrowed reference, the user must ensure the fontFace stays alive.
+ IDWriteFontFace* fFontFace;
+ void* fLock;
+};
+template<typename T> class AutoTDWriteTable : public AutoDWriteTable {
+public:
+ static const UINT32 tag = DWRITE_MAKE_OPENTYPE_TAG(T::TAG0, T::TAG1, T::TAG2, T::TAG3);
+ AutoTDWriteTable(IDWriteFontFace* fontFace) : AutoDWriteTable(fontFace, tag) { }
+
+ const T* get() const { return reinterpret_cast<const T*>(fData); }
+ const T* operator->() const { return reinterpret_cast<const T*>(fData); }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Style conversion
+
+struct DWriteStyle {
+ explicit DWriteStyle(const SkFontStyle& pattern) {
+ fWeight = (DWRITE_FONT_WEIGHT)pattern.weight();
+ fWidth = (DWRITE_FONT_STRETCH)pattern.width();
+ switch (pattern.slant()) {
+ case SkFontStyle::kUpright_Slant: fSlant = DWRITE_FONT_STYLE_NORMAL ; break;
+ case SkFontStyle::kItalic_Slant: fSlant = DWRITE_FONT_STYLE_ITALIC ; break;
+ case SkFontStyle::kOblique_Slant: fSlant = DWRITE_FONT_STYLE_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+ }
+ DWRITE_FONT_WEIGHT fWeight;
+ DWRITE_FONT_STRETCH fWidth;
+ DWRITE_FONT_STYLE fSlant;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
new file mode 100644
index 0000000000..a3448d35c2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkTFitsIn.h"
+#include "include/private/SkTemplates.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStream
+
+SkDWriteFontFileStream::SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream)
+ : fFontFileStream(SkRefComPtr(fontFileStream))
+ , fPos(0)
+ , fLockedMemory(nullptr)
+ , fFragmentLock(nullptr) {
+}
+
+SkDWriteFontFileStream::~SkDWriteFontFileStream() {
+ if (fFragmentLock) {
+ fFontFileStream->ReleaseFileFragment(fFragmentLock);
+ }
+}
+
+size_t SkDWriteFontFileStream::read(void* buffer, size_t size) {
+ HRESULT hr = S_OK;
+
+ if (nullptr == buffer) {
+ size_t fileSize = this->getLength();
+
+ if (fPos + size > fileSize) {
+ size_t skipped = fileSize - fPos;
+ fPos = fileSize;
+ return skipped;
+ } else {
+ fPos += size;
+ return size;
+ }
+ }
+
+ const void* start;
+ void* fragmentLock;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, size, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, size);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos += size;
+ return size;
+ }
+
+ //The read may have failed because we asked for too much data.
+ size_t fileSize = this->getLength();
+ if (fPos + size <= fileSize) {
+ //This means we were within bounds, but failed for some other reason.
+ return 0;
+ }
+
+ size_t read = fileSize - fPos;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, read, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, read);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos = fileSize;
+ return read;
+ }
+
+ return 0;
+}
+
+bool SkDWriteFontFileStream::isAtEnd() const {
+ return fPos == this->getLength();
+}
+
+bool SkDWriteFontFileStream::rewind() {
+ fPos = 0;
+ return true;
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::onDuplicate() const {
+ return new SkDWriteFontFileStream(fFontFileStream.get());
+}
+
+size_t SkDWriteFontFileStream::getPosition() const {
+ return fPos;
+}
+
+bool SkDWriteFontFileStream::seek(size_t position) {
+ size_t length = this->getLength();
+ fPos = (position > length) ? length : position;
+ return true;
+}
+
+bool SkDWriteFontFileStream::move(long offset) {
+ return seek(fPos + offset);
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::onFork() const {
+ std::unique_ptr<SkDWriteFontFileStream> that(this->duplicate());
+ that->seek(fPos);
+ return that.release();
+}
+
+size_t SkDWriteFontFileStream::getLength() const {
+ HRESULT hr = S_OK;
+ UINT64 realFileSize = 0;
+ hr = fFontFileStream->GetFileSize(&realFileSize);
+ if (!SkTFitsIn<size_t>(realFileSize)) {
+ return 0;
+ }
+ return static_cast<size_t>(realFileSize);
+}
+
+const void* SkDWriteFontFileStream::getMemoryBase() {
+ if (fLockedMemory) {
+ return fLockedMemory;
+ }
+
+ UINT64 fileSize;
+ HRNM(fFontFileStream->GetFileSize(&fileSize), "Could not get file size");
+ HRNM(fFontFileStream->ReadFileFragment(&fLockedMemory, 0, fileSize, &fFragmentLock),
+ "Could not lock file fragment.");
+ return fLockedMemory;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStreamWrapper
+
+HRESULT SkDWriteFontFileStreamWrapper::Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream)
+{
+ *streamFontFileStream = new SkDWriteFontFileStreamWrapper(stream);
+ if (nullptr == *streamFontFileStream) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+}
+
+SkDWriteFontFileStreamWrapper::SkDWriteFontFileStreamWrapper(SkStreamAsset* stream)
+ : fRefCount(1), fStream(stream) {
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileStream)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteFontFileStreamWrapper::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteFontFileStreamWrapper::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext)
+{
+ // The loader is responsible for doing a bounds check.
+ UINT64 fileSize;
+ this->GetFileSize(&fileSize);
+ if (fileOffset > fileSize || fragmentSize > fileSize - fileOffset) {
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+ return E_FAIL;
+ }
+
+ if (!SkTFitsIn<size_t>(fileOffset + fragmentSize)) {
+ return E_FAIL;
+ }
+
+ const void* data = fStream->getMemoryBase();
+ if (data) {
+ *fragmentStart = static_cast<BYTE const*>(data) + static_cast<size_t>(fileOffset);
+ *fragmentContext = nullptr;
+
+ } else {
+ // May be called from multiple threads.
+ SkAutoMutexExclusive ama(fStreamMutex);
+
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+
+ if (!fStream->seek(static_cast<size_t>(fileOffset))) {
+ return E_FAIL;
+ }
+ SkAutoTMalloc<uint8_t> streamData(static_cast<size_t>(fragmentSize));
+ if (fStream->read(streamData.get(), static_cast<size_t>(fragmentSize)) != fragmentSize) {
+ return E_FAIL;
+ }
+
+ *fragmentStart = streamData.get();
+ *fragmentContext = streamData.release();
+ }
+ return S_OK;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteFontFileStreamWrapper::ReleaseFileFragment(void* fragmentContext) {
+ sk_free(fragmentContext);
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::GetFileSize(UINT64* fileSize) {
+ *fileSize = fStream->getLength();
+ return S_OK;
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::GetLastWriteTime(UINT64* lastWriteTime) {
+ // The concept of last write time does not apply to this loader.
+ *lastWriteTime = 0;
+ return E_NOTIMPL;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
new file mode 100644
index 0000000000..d39b021db4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteFontFileStream_DEFINED
+#define SkDWriteFontFileStream_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/SkMutex.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+/**
+ * An SkStream backed by an IDWriteFontFileStream.
+ * This allows Skia code to read an IDWriteFontFileStream.
+ */
+class SkDWriteFontFileStream : public SkStreamMemory {
+public:
+ explicit SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream);
+ ~SkDWriteFontFileStream() override;
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+ bool rewind() override;
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+ size_t getLength() const override;
+ const void* getMemoryBase() override;
+
+ std::unique_ptr<SkDWriteFontFileStream> duplicate() const {
+ return std::unique_ptr<SkDWriteFontFileStream>(this->onDuplicate());
+ }
+ std::unique_ptr<SkDWriteFontFileStream> fork() const {
+ return std::unique_ptr<SkDWriteFontFileStream>(this->onFork());
+ }
+
+private:
+ SkDWriteFontFileStream* onDuplicate() const override;
+ SkDWriteFontFileStream* onFork() const override;
+
+ SkTScopedComPtr<IDWriteFontFileStream> fFontFileStream;
+ size_t fPos;
+ const void* fLockedMemory;
+ void* fFragmentLock;
+};
+
+/**
+ * An IDWriteFontFileStream backed by an SkStream.
+ * This allows DirectWrite to read an SkStream.
+ */
+class SkDWriteFontFileStreamWrapper : public IDWriteFontFileStream {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileStream methods
+ SK_STDMETHODIMP ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext) override;
+
+ SK_STDMETHODIMP_(void) ReleaseFileFragment(void* fragmentContext) override;
+ SK_STDMETHODIMP GetFileSize(UINT64* fileSize) override;
+ SK_STDMETHODIMP GetLastWriteTime(UINT64* lastWriteTime) override;
+
+ static HRESULT Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream);
+
+private:
+ explicit SkDWriteFontFileStreamWrapper(SkStreamAsset* stream);
+ virtual ~SkDWriteFontFileStreamWrapper() { }
+
+ ULONG fRefCount;
+ std::unique_ptr<SkStreamAsset> fStream;
+ SkMutex fStreamMutex;
+};
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
new file mode 100644
index 0000000000..65da6ae476
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkPath.h"
+#include "src/utils/SkFloatUtils.h"
+#include "src/utils/win/SkDWriteGeometrySink.h"
+#include "src/utils/win/SkObjBase.h"
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+SkDWriteGeometrySink::SkDWriteGeometrySink(SkPath* path) : fRefCount(1), fPath(path) { }
+
+SkDWriteGeometrySink::~SkDWriteGeometrySink() { }
+
+SK_STDMETHODIMP SkDWriteGeometrySink::QueryInterface(REFIID iid, void **object) {
+ if (nullptr == object) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown) || iid == __uuidof(IDWriteGeometrySink)) {
+ *object = static_cast<IDWriteGeometrySink*>(this);
+ this->AddRef();
+ return S_OK;
+ } else {
+ *object = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteGeometrySink::AddRef(void) {
+ return static_cast<ULONG>(InterlockedIncrement(&fRefCount));
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteGeometrySink::Release(void) {
+ ULONG res = static_cast<ULONG>(InterlockedDecrement(&fRefCount));
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::SetFillMode(D2D1_FILL_MODE fillMode) {
+ switch (fillMode) {
+ case D2D1_FILL_MODE_ALTERNATE:
+ fPath->setFillType(SkPath::kEvenOdd_FillType);
+ break;
+ case D2D1_FILL_MODE_WINDING:
+ fPath->setFillType(SkPath::kWinding_FillType);
+ break;
+ default:
+ SkDEBUGFAIL("Unknown D2D1_FILL_MODE.");
+ break;
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) {
+ if (vertexFlags == D2D1_PATH_SEGMENT_NONE || vertexFlags == D2D1_PATH_SEGMENT_FORCE_ROUND_LINE_JOIN) {
+ SkDEBUGFAIL("Invalid D2D1_PATH_SEGMENT value.");
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) {
+ fPath->moveTo(startPoint.x, startPoint.y);
+ if (figureBegin == D2D1_FIGURE_BEGIN_HOLLOW) {
+ SkDEBUGFAIL("Invalid D2D1_FIGURE_BEGIN value.");
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::AddLines(const D2D1_POINT_2F *points, UINT pointsCount) {
+ for (const D2D1_POINT_2F *end = &points[pointsCount]; points < end; ++points) {
+ fPath->lineTo(points->x, points->y);
+ }
+}
+
+static bool approximately_equal(float a, float b) {
+ const SkFloatingPoint<float, 10> lhs(a), rhs(b);
+ return lhs.AlmostEquals(rhs);
+}
+
+typedef struct {
+ float x;
+ float y;
+} Cubic[4], Quadratic[3];
+
+static bool check_quadratic(const Cubic& cubic, Quadratic& reduction) {
+ float dx10 = cubic[1].x - cubic[0].x;
+ float dx23 = cubic[2].x - cubic[3].x;
+ float midX = cubic[0].x + dx10 * 3 / 2;
+ //NOTE: !approximately_equal(midX - cubic[3].x, dx23 * 3 / 2)
+ //does not work as subnormals get in between the left side and 0.
+ if (!approximately_equal(midX, (dx23 * 3 / 2) + cubic[3].x)) {
+ return false;
+ }
+ float dy10 = cubic[1].y - cubic[0].y;
+ float dy23 = cubic[2].y - cubic[3].y;
+ float midY = cubic[0].y + dy10 * 3 / 2;
+ if (!approximately_equal(midY, (dy23 * 3 / 2) + cubic[3].y)) {
+ return false;
+ }
+ reduction[0] = cubic[0];
+ reduction[1].x = midX;
+ reduction[1].y = midY;
+ reduction[2] = cubic[3];
+ return true;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) {
+ SkPoint lastPt;
+ fPath->getLastPt(&lastPt);
+ D2D1_POINT_2F prevPt = { SkScalarToFloat(lastPt.fX), SkScalarToFloat(lastPt.fY) };
+
+ for (const D2D1_BEZIER_SEGMENT *end = &beziers[beziersCount]; beziers < end; ++beziers) {
+ Cubic cubic = { { prevPt.x, prevPt.y },
+ { beziers->point1.x, beziers->point1.y },
+ { beziers->point2.x, beziers->point2.y },
+ { beziers->point3.x, beziers->point3.y }, };
+ Quadratic quadratic;
+ if (check_quadratic(cubic, quadratic)) {
+ fPath->quadTo(quadratic[1].x, quadratic[1].y,
+ quadratic[2].x, quadratic[2].y);
+ } else {
+ fPath->cubicTo(beziers->point1.x, beziers->point1.y,
+ beziers->point2.x, beziers->point2.y,
+ beziers->point3.x, beziers->point3.y);
+ }
+ prevPt = beziers->point3;
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::EndFigure(D2D1_FIGURE_END figureEnd) {
+ fPath->close();
+}
+
+SK_STDMETHODIMP SkDWriteGeometrySink::Close() {
+ return S_OK;
+}
+
+HRESULT SkDWriteGeometrySink::Create(SkPath* path, IDWriteGeometrySink** geometryToPath) {
+ *geometryToPath = new SkDWriteGeometrySink(path);
+ return S_OK;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
new file mode 100644
index 0000000000..019539b191
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteToPath_DEFINED
+#define SkDWriteToPath_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/utils/win/SkObjBase.h"
+
+class SkPath;
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+class SkDWriteGeometrySink : public IDWriteGeometrySink {
+private:
+ LONG fRefCount;
+ SkPath* fPath;
+
+protected:
+ explicit SkDWriteGeometrySink(SkPath* path);
+ virtual ~SkDWriteGeometrySink();
+
+public:
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void **object) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ SK_STDMETHODIMP_(void) SetFillMode(D2D1_FILL_MODE fillMode) override;
+ SK_STDMETHODIMP_(void) SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) override;
+ SK_STDMETHODIMP_(void) BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) override;
+ SK_STDMETHODIMP_(void) AddLines(const D2D1_POINT_2F *points, UINT pointsCount) override;
+ SK_STDMETHODIMP_(void) AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) override;
+ SK_STDMETHODIMP_(void) EndFigure(D2D1_FIGURE_END figureEnd) override;
+ SK_STDMETHODIMP Close() override;
+
+ static HRESULT Create(SkPath* path, IDWriteGeometrySink** geometryToPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h b/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h
new file mode 100644
index 0000000000..d95486b328
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteNTDDI_VERSION_DEFINED
+#define SkDWriteNTDDI_VERSION_DEFINED
+
+// More strictly, this header should be the first thing in a translation unit,
+// since it is effectively negating build flags.
+#if defined(_WINDOWS_) || defined(DWRITE_3_H_INCLUDED)
+#error Must include SkDWriteNTDDI_VERSION.h before any Windows or DWrite headers.
+#endif
+
+// If the build defines NTDDI_VERSION, pretend it didn't.
+// This also requires resetting _WIN32_WINNT and WINVER.
+// dwrite_3.h guards enum, macro, and interface declarations behind NTDDI_VERSION,
+// but it is not clear this is correct since these are all immutable.
+#if defined(NTDDI_VERSION)
+# undef NTDDI_VERSION
+# if defined(_WIN32_WINNT)
+# undef _WIN32_WINNT
+# endif
+# if defined(WINVER)
+# undef WINVER
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.cpp b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
new file mode 100644
index 0000000000..2be633132d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/utils/win/SkHRESULT.h"
+
+void SkTraceHR(const char* file, unsigned long line, HRESULT hr, const char* msg) {
+ if (msg) {
+ SkDebugf("%s\n", msg);
+ }
+ SkDebugf("%s(%lu) : error 0x%x: ", file, line, hr);
+
+ LPSTR errorText = nullptr;
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr,
+ hr,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR) &errorText,
+ 0,
+ nullptr
+ );
+
+ if (nullptr == errorText) {
+ SkDebugf("<unknown>\n");
+ } else {
+ SkDebugf("%s", errorText);
+ LocalFree(errorText);
+ errorText = nullptr;
+ }
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.h b/gfx/skia/skia/src/utils/win/SkHRESULT.h
new file mode 100644
index 0000000000..80d046dfdc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHRESULT_DEFINED
+#define SkHRESULT_DEFINED
+
+#include "include/core/SkTypes.h"
+#ifdef SK_BUILD_FOR_WIN
+
+#include "src/core/SkLeanWindows.h"
+
+void SkTraceHR(const char* file, unsigned long line,
+ HRESULT hr, const char* msg);
+
+#ifdef SK_DEBUG
+#define SK_TRACEHR(_hr, _msg) SkTraceHR(__FILE__, __LINE__, _hr, _msg)
+#else
+#define SK_TRACEHR(_hr, _msg) sk_ignore_unused_variable(_hr)
+#endif
+
+#define HR_GENERAL(_ex, _msg, _ret) do {\
+ HRESULT _hr = _ex;\
+ if (FAILED(_hr)) {\
+ SK_TRACEHR(_hr, _msg);\
+ return _ret;\
+ }\
+} while(false)
+
+//@{
+/**
+These macros are for reporting HRESULT errors.
+The expression will be evaluated.
+If the resulting HRESULT SUCCEEDED then execution will continue normally.
+If the HRESULT FAILED then the macro will return from the current function.
+In variants ending with 'M' the given message will be traced when FAILED.
+The HR variants will return the HRESULT when FAILED.
+The HRB variants will return false when FAILED.
+The HRN variants will return nullptr when FAILED.
+The HRV variants will simply return when FAILED.
+The HRZ variants will return 0 when FAILED.
+*/
+#define HR(ex) HR_GENERAL(ex, nullptr, _hr)
+#define HRM(ex, msg) HR_GENERAL(ex, msg, _hr)
+
+#define HRB(ex) HR_GENERAL(ex, nullptr, false)
+#define HRBM(ex, msg) HR_GENERAL(ex, msg, false)
+
+#define HRN(ex) HR_GENERAL(ex, nullptr, nullptr)
+#define HRNM(ex, msg) HR_GENERAL(ex, msg, nullptr)
+
+#define HRV(ex) HR_GENERAL(ex, nullptr, )
+#define HRVM(ex, msg) HR_GENERAL(ex, msg, )
+
+#define HRZ(ex) HR_GENERAL(ex, nullptr, 0)
+#define HRZM(ex, msg) HR_GENERAL(ex, msg, 0)
+//@}
+#endif // SK_BUILD_FOR_WIN
+#endif // SkHRESULT_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.cpp b/gfx/skia/skia/src/utils/win/SkIStream.cpp
new file mode 100644
index 0000000000..93b483bf78
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkStream.h"
+#include "src/utils/win/SkIStream.h"
+
+/**
+ * SkBaseIStream
+ */
+SkBaseIStream::SkBaseIStream() : _refcount(1) { }
+SkBaseIStream::~SkBaseIStream() { }
+
+SK_STDMETHODIMP SkBaseIStream::QueryInterface(REFIID iid, void ** ppvObject) {
+ if (nullptr == ppvObject) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown)
+ || iid == __uuidof(IStream)
+ || iid == __uuidof(ISequentialStream))
+ {
+ *ppvObject = static_cast<IStream*>(this);
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkBaseIStream::AddRef() {
+ return (ULONG)InterlockedIncrement(&_refcount);
+}
+
+SK_STDMETHODIMP_(ULONG) SkBaseIStream::Release() {
+ ULONG res = (ULONG) InterlockedDecrement(&_refcount);
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkBaseIStream::Read(void* pv, ULONG cb, ULONG* pcbRead)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten)
+{ return E_NOTIMPL; }
+
+// IStream Interface
+SK_STDMETHODIMP SkBaseIStream::SetSize(ULARGE_INTEGER)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::CopyTo(IStream*, ULARGE_INTEGER, ULARGE_INTEGER*, ULARGE_INTEGER*)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Commit(DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Revert()
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Clone(IStream**)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag)
+{ return E_NOTIMPL; }
+
+
+/**
+ * SkIStream
+ */
+SkIStream::SkIStream(std::unique_ptr<SkStreamAsset> stream)
+ : SkBaseIStream()
+ , fSkStream(std::move(stream))
+ , fLocation()
+{
+ this->fSkStream->rewind();
+}
+
+SkIStream::~SkIStream() {}
+
+HRESULT SkIStream::CreateFromSkStream(std::unique_ptr<SkStreamAsset> stream, IStream** ppStream) {
+ if (nullptr == stream) {
+ return E_INVALIDARG;
+ }
+ *ppStream = new SkIStream(std::move(stream));
+ return S_OK;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkIStream::Read(void* pv, ULONG cb, ULONG* pcbRead) {
+ *pcbRead = static_cast<ULONG>(this->fSkStream->read(pv, cb));
+ this->fLocation.QuadPart += *pcbRead;
+ return (*pcbRead == cb) ? S_OK : S_FALSE;
+}
+
+SK_STDMETHODIMP SkIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten) {
+ return STG_E_CANTSAVE;
+}
+
+// IStream Interface
+SK_STDMETHODIMP SkIStream::Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer)
+{
+ HRESULT hr = S_OK;
+
+ switch(dwOrigin) {
+ case STREAM_SEEK_SET: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ size_t skip = static_cast<size_t>(liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart = skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ case STREAM_SEEK_CUR: {
+ size_t skip = static_cast<size_t>(liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart += skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ break;
+ }
+ case STREAM_SEEK_END: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ size_t skip = static_cast<size_t>(this->fSkStream->getLength() +
+ liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart = skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ default:
+ hr = STG_E_INVALIDFUNCTION;
+ break;
+ }
+
+ if (lpNewFilePointer) {
+ lpNewFilePointer->QuadPart = this->fLocation.QuadPart;
+ }
+ return hr;
+}
+
+SK_STDMETHODIMP SkIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag) {
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ pStatstg->cbSize.QuadPart = this->fSkStream->getLength();
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_READ;
+ return S_OK;
+}
+
+
+/**
+ * SkIWStream
+ */
+SkWIStream::SkWIStream(SkWStream* stream)
+ : SkBaseIStream()
+ , fSkWStream(stream)
+{ }
+
+SkWIStream::~SkWIStream() {
+ if (this->fSkWStream) {
+ this->fSkWStream->flush();
+ }
+}
+
+HRESULT SkWIStream::CreateFromSkWStream(SkWStream* stream, IStream ** ppStream) {
+ *ppStream = new SkWIStream(stream);
+ return S_OK;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkWIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten) {
+ HRESULT hr = S_OK;
+ bool wrote = this->fSkWStream->write(pv, cb);
+ if (wrote) {
+ *pcbWritten = cb;
+ } else {
+ *pcbWritten = 0;
+ hr = S_FALSE;
+ }
+ return hr;
+}
+
+// IStream Interface
+SK_STDMETHODIMP SkWIStream::Commit(DWORD) {
+ this->fSkWStream->flush();
+ return S_OK;
+}
+
+SK_STDMETHODIMP SkWIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag) {
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ pStatstg->cbSize.QuadPart = 0;
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_WRITE;
+ return S_OK;
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.h b/gfx/skia/skia/src/utils/win/SkIStream.h
new file mode 100644
index 0000000000..3c1af968a3
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.h
@@ -0,0 +1,101 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkIStream_DEFINED
+#define SkIStream_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "src/core/SkLeanWindows.h"
+#include "src/utils/win/SkObjBase.h"
+#include <ole2.h>
+
+class SkStream;
+class SkWStream;
+
+/**
+ * A bare IStream implementation which properly reference counts
+ * but returns E_NOTIMPL for all ISequentialStream and IStream methods.
+ */
+class SkBaseIStream : public IStream {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void ** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // ISequentialStream methods
+ SK_STDMETHODIMP Read(void* pv, ULONG cb, ULONG* pcbRead) override;
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+
+ // IStream methods
+ SK_STDMETHODIMP SetSize(ULARGE_INTEGER) override;
+ SK_STDMETHODIMP CopyTo(IStream*, ULARGE_INTEGER, ULARGE_INTEGER*, ULARGE_INTEGER*) override;
+ SK_STDMETHODIMP Commit(DWORD) override;
+ SK_STDMETHODIMP Revert() override;
+ SK_STDMETHODIMP LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override;
+ SK_STDMETHODIMP UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override;
+ SK_STDMETHODIMP Clone(IStream**) override;
+ SK_STDMETHODIMP Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+protected:
+ explicit SkBaseIStream();
+ virtual ~SkBaseIStream();
+
+private:
+ LONG _refcount;
+};
+
+/**
+ * A minimal read-only IStream implementation which wraps an SkStream.
+ */
+class SkIStream : public SkBaseIStream {
+public:
+ HRESULT static CreateFromSkStream(std::unique_ptr<SkStreamAsset>, IStream** ppStream);
+
+ SK_STDMETHODIMP Read(void* pv, ULONG cb, ULONG* pcbRead) override;
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+ SK_STDMETHODIMP Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+private:
+ const std::unique_ptr<SkStream> fSkStream;
+ ULARGE_INTEGER fLocation;
+
+ explicit SkIStream(std::unique_ptr<SkStreamAsset>);
+ ~SkIStream() override;
+};
+
+/**
+ * A minimal write-only IStream implementation which wraps an SkWIStream.
+ */
+class SkWIStream : public SkBaseIStream {
+public:
+ HRESULT static CreateFromSkWStream(SkWStream* stream, IStream ** ppStream);
+
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+ SK_STDMETHODIMP Commit(DWORD) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+private:
+ SkWStream *fSkWStream;
+
+ SkWIStream(SkWStream* stream);
+ ~SkWIStream() override;
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkIStream_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkObjBase.h b/gfx/skia/skia/src/utils/win/SkObjBase.h
new file mode 100644
index 0000000000..a85cc901c1
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkObjBase.h
@@ -0,0 +1,25 @@
+/*
+* Copyright 2019 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkObjBase_DEFINED
+#define SkObjBase_DEFINED
+
+#include "src/core/SkLeanWindows.h"
+#include <objbase.h>
+
+// STDMETHOD uses COM_DECLSPEC_NOTHROW, but STDMETHODIMP does not. This leads to attribute mismatch
+// between interfaces and implementations which produces warnings. In theory a COM component should
+// never throw a c++ exception, but COM_DECLSPEC_NOTHROW allows tweaking that (as it may be useful
+// for internal only implementations within a single project). The behavior of the attribute nothrow
+// and the keyword noexcept are slightly different, so use COM_DECLSPEC_NOTHROW instead of noexcept.
+// Older interfaces like IUnknown and IStream do not currently specify COM_DECLSPEC_NOTHROW, but it
+// is not harmful to mark the implementation more exception strict than the interface.
+
+#define SK_STDMETHODIMP COM_DECLSPEC_NOTHROW STDMETHODIMP
+#define SK_STDMETHODIMP_(type) COM_DECLSPEC_NOTHROW STDMETHODIMP_(type)
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
new file mode 100644
index 0000000000..d8cba3cb7a
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTScopedComPtr_DEFINED
+#define SkTScopedComPtr_DEFINED
+
+#include "src/core/SkLeanWindows.h"
+#include "src/utils/win/SkObjBase.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+template<typename T> T* SkRefComPtr(T* ptr) {
+ ptr->AddRef();
+ return ptr;
+}
+
+template<typename T> T* SkSafeRefComPtr(T* ptr) {
+ if (ptr) {
+ ptr->AddRef();
+ }
+ return ptr;
+}
+
+template<typename T>
+class SkTScopedComPtr {
+private:
+ T *fPtr;
+
+public:
+ constexpr SkTScopedComPtr() : fPtr(nullptr) {}
+ constexpr SkTScopedComPtr(std::nullptr_t) : fPtr(nullptr) {}
+ explicit SkTScopedComPtr(T *ptr) : fPtr(ptr) {}
+ SkTScopedComPtr(SkTScopedComPtr&& that) : fPtr(that.release()) {}
+ SkTScopedComPtr(const SkTScopedComPtr&) = delete;
+
+ ~SkTScopedComPtr() { this->reset();}
+
+ SkTScopedComPtr& operator=(SkTScopedComPtr&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+ SkTScopedComPtr& operator=(const SkTScopedComPtr&) = delete;
+ SkTScopedComPtr& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ T &operator*() const { SkASSERT(fPtr != nullptr); return *fPtr; }
+
+ explicit operator bool() const { return fPtr != nullptr; }
+
+ T *operator->() const { return fPtr; }
+
+ /**
+ * Returns the address of the underlying pointer.
+ * This is dangerous -- it breaks encapsulation and the reference escapes.
+ * Must only be used on instances currently pointing to NULL,
+ * and only to initialize the instance.
+ */
+ T **operator&() { SkASSERT(fPtr == nullptr); return &fPtr; }
+
+ T *get() const { return fPtr; }
+
+ void reset(T* ptr = nullptr) {
+ if (fPtr) {
+ fPtr->Release();
+ }
+ fPtr = ptr;
+ }
+
+ void swap(SkTScopedComPtr<T>& that) {
+ T* temp = this->fPtr;
+ this->fPtr = that.fPtr;
+ that.fPtr = temp;
+ }
+
+ T* release() {
+ T* temp = this->fPtr;
+ this->fPtr = nullptr;
+ return temp;
+ }
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTScopedComPtr_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkWGL.h b/gfx/skia/skia/src/utils/win/SkWGL.h
new file mode 100644
index 0000000000..3d347a0760
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+
+#ifndef SkWGL_DEFINED
+#define SkWGL_DEFINED
+
+#include "src/core/SkLeanWindows.h"
+
+/**
+ * Working with WGL extensions can be a pain. Among the reasons is that You must
+ * have a GL context to get the proc addresses, but you want to use the procs to
+ * create a context in the first place. So you have to create a dummy GL ctx to
+ * get the proc addresses.
+ *
+ * This file helps by providing SkCreateWGLInterface(). It returns a struct of
+ * function pointers that it initializes. It also has a helper function to query
+ * for WGL extensions. It handles the fact that wglGetExtensionsString is itself
+ * an extension.
+ */
+
+#define SK_WGL_DRAW_TO_WINDOW 0x2001
+#define SK_WGL_ACCELERATION 0x2003
+#define SK_WGL_SUPPORT_OPENGL 0x2010
+#define SK_WGL_DOUBLE_BUFFER 0x2011
+#define SK_WGL_COLOR_BITS 0x2014
+#define SK_WGL_RED_BITS 0x2015
+#define SK_WGL_GREEN_BITS 0x2017
+#define SK_WGL_BLUE_BITS 0x2019
+#define SK_WGL_ALPHA_BITS 0x201B
+#define SK_WGL_STENCIL_BITS 0x2023
+#define SK_WGL_FULL_ACCELERATION 0x2027
+#define SK_WGL_SAMPLE_BUFFERS 0x2041
+#define SK_WGL_SAMPLES 0x2042
+#define SK_WGL_CONTEXT_MAJOR_VERSION 0x2091
+#define SK_WGL_CONTEXT_MINOR_VERSION 0x2092
+#define SK_WGL_CONTEXT_LAYER_PLANE 0x2093
+#define SK_WGL_CONTEXT_FLAGS 0x2094
+#define SK_WGL_CONTEXT_PROFILE_MASK 0x9126
+#define SK_WGL_CONTEXT_DEBUG_BIT 0x0001
+#define SK_WGL_CONTEXT_FORWARD_COMPATIBLE_BIT 0x0002
+#define SK_WGL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define SK_WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define SK_WGL_CONTEXT_ES2_PROFILE_BIT 0x00000004
+#define SK_ERROR_INVALID_VERSION 0x2095
+#define SK_ERROR_INVALID_PROFILE 0x2096
+
+DECLARE_HANDLE(HPBUFFER);
+
+class SkWGLExtensions {
+public:
+ SkWGLExtensions();
+ /**
+ * Determines if an extensions is available for a given DC.
+ * WGL_extensions_string is considered a prerequisite for all other
+ * extensions. It is necessary to check this before calling other class
+ * functions.
+ */
+ bool hasExtension(HDC dc, const char* ext) const;
+
+ const char* getExtensionsString(HDC hdc) const;
+ BOOL choosePixelFormat(HDC hdc, const int*, const FLOAT*, UINT, int*, UINT*) const;
+ BOOL getPixelFormatAttribiv(HDC, int, int, UINT, const int*, int*) const;
+ BOOL getPixelFormatAttribfv(HDC hdc, int, int, UINT, const int*, FLOAT*) const;
+ HGLRC createContextAttribs(HDC, HGLRC, const int *) const;
+
+ BOOL swapInterval(int interval) const;
+
+ HPBUFFER createPbuffer(HDC, int , int, int, const int*) const;
+ HDC getPbufferDC(HPBUFFER) const;
+ int releasePbufferDC(HPBUFFER, HDC) const;
+ BOOL destroyPbuffer(HPBUFFER) const;
+
+ /**
+ * WGL doesn't have precise rules for the ordering of formats returned
+ * by wglChoosePixelFormat. This function helps choose among the set of
+ * formats returned by wglChoosePixelFormat. The rules in decreasing
+ * priority are:
+ * * Choose formats with the smallest sample count that is >=
+ * desiredSampleCount (or the largest sample count if all formats have
+ * fewer samples than desiredSampleCount.) If desiredSampleCount is 1 then
+ * all msaa formats are excluded from consideration.
+ * * Choose formats with the fewest color samples when coverage sampling
+ * is available.
+ * * If the above rules leave multiple formats, choose the one that
+ * appears first in the formats array parameter.
+ */
+ int selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const;
+private:
+ typedef const char* (WINAPI *GetExtensionsStringProc)(HDC);
+ typedef BOOL (WINAPI *ChoosePixelFormatProc)(HDC, const int *, const FLOAT *, UINT, int *, UINT *);
+ typedef BOOL (WINAPI *GetPixelFormatAttribivProc)(HDC, int, int, UINT, const int*, int*);
+ typedef BOOL (WINAPI *GetPixelFormatAttribfvProc)(HDC, int, int, UINT, const int*, FLOAT*);
+ typedef HGLRC (WINAPI *CreateContextAttribsProc)(HDC, HGLRC, const int *);
+ typedef BOOL (WINAPI* SwapIntervalProc)(int);
+ typedef HPBUFFER (WINAPI* CreatePbufferProc)(HDC, int , int, int, const int*);
+ typedef HDC (WINAPI* GetPbufferDCProc)(HPBUFFER);
+ typedef int (WINAPI* ReleasePbufferDCProc)(HPBUFFER, HDC);
+ typedef BOOL (WINAPI* DestroyPbufferProc)(HPBUFFER);
+
+ static GetExtensionsStringProc fGetExtensionsString;
+ static ChoosePixelFormatProc fChoosePixelFormat;
+ static GetPixelFormatAttribfvProc fGetPixelFormatAttribfv;
+ static GetPixelFormatAttribivProc fGetPixelFormatAttribiv;
+ static CreateContextAttribsProc fCreateContextAttribs;
+ static SwapIntervalProc fSwapInterval;
+ static CreatePbufferProc fCreatePbuffer;
+ static GetPbufferDCProc fGetPbufferDC;
+ static ReleasePbufferDCProc fReleasePbufferDC;
+ static DestroyPbufferProc fDestroyPbuffer;
+};
+
+enum SkWGLContextRequest {
+ /** Requests to create core profile context if possible, otherwise
+ compatibility profile. */
+ kGLPreferCoreProfile_SkWGLContextRequest,
+ /** Requests to create compatibility profile context if possible, otherwise
+ core profile. */
+ kGLPreferCompatibilityProfile_SkWGLContextRequest,
+ /** Requests to create GL ES profile context. */
+ kGLES_SkWGLContextRequest
+};
+/**
+ * Helper to create an OpenGL context for a DC using WGL. Configs with a sample count >= to
+ * msaaSampleCount are preferred but if none is available then a context with a lower sample count
+ * (including non-MSAA) will be created. If msaaSampleCount is 1 then this will fail if a non-msaa
+ * context cannot be created. If preferCoreProfile is true but a core profile cannot be created
+ * then a compatible profile context will be created.
+ */
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor, SkWGLContextRequest context,
+ HGLRC shareContext = nullptr);
+
+/**
+ * Helper class for creating a pbuffer context and deleting all the handles when finished. This
+ * requires that a device context has been created. However, the pbuffer gets its own device
+ * context. The original device context can be released once the pbuffer context is created.
+ */
+class SkWGLPbufferContext : public SkRefCnt {
+public:
+ static sk_sp<SkWGLPbufferContext> Create(HDC parentDC, SkWGLContextRequest contextType,
+ HGLRC shareContext);
+
+ virtual ~SkWGLPbufferContext();
+
+ HDC getDC() const { return fDC; }
+ HGLRC getGLRC() const { return fGLRC; }
+
+private:
+ SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc);
+
+ HPBUFFER fPbuffer;
+ HDC fDC;
+ HGLRC fGLRC;
+ SkWGLExtensions fExtensions;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkWGL_win.cpp b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
new file mode 100644
index 0000000000..559fc4a25b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN) && !defined(_M_ARM64)
+
+#include "src/utils/win/SkWGL.h"
+
+#include "include/private/SkOnce.h"
+#include "include/private/SkTDArray.h"
+#include "src/core/SkTSearch.h"
+#include "src/core/SkTSort.h"
+
+bool SkWGLExtensions::hasExtension(HDC dc, const char* ext) const {
+ if (nullptr == this->fGetExtensionsString) {
+ return false;
+ }
+ if (!strcmp("WGL_ARB_extensions_string", ext)) {
+ return true;
+ }
+ const char* extensionString = this->getExtensionsString(dc);
+ size_t extLength = strlen(ext);
+
+ while (true) {
+ size_t n = strcspn(extensionString, " ");
+ if (n == extLength && 0 == strncmp(ext, extensionString, n)) {
+ return true;
+ }
+ if (0 == extensionString[n]) {
+ return false;
+ }
+ extensionString += n+1;
+ }
+
+ return false;
+}
+
+const char* SkWGLExtensions::getExtensionsString(HDC hdc) const {
+ return fGetExtensionsString(hdc);
+}
+
+BOOL SkWGLExtensions::choosePixelFormat(HDC hdc,
+ const int* piAttribIList,
+ const FLOAT* pfAttribFList,
+ UINT nMaxFormats,
+ int* piFormats,
+ UINT* nNumFormats) const {
+ return fChoosePixelFormat(hdc, piAttribIList, pfAttribFList,
+ nMaxFormats, piFormats, nNumFormats);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribiv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ int *piValues) const {
+ return fGetPixelFormatAttribiv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, piValues);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribfv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ float *pfValues) const {
+ return fGetPixelFormatAttribfv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, pfValues);
+}
+HGLRC SkWGLExtensions::createContextAttribs(HDC hDC,
+ HGLRC hShareContext,
+ const int *attribList) const {
+ return fCreateContextAttribs(hDC, hShareContext, attribList);
+}
+
+BOOL SkWGLExtensions::swapInterval(int interval) const {
+ return fSwapInterval(interval);
+}
+
+HPBUFFER SkWGLExtensions::createPbuffer(HDC hDC,
+ int iPixelFormat,
+ int iWidth,
+ int iHeight,
+ const int *piAttribList) const {
+ return fCreatePbuffer(hDC, iPixelFormat, iWidth, iHeight, piAttribList);
+}
+
+HDC SkWGLExtensions::getPbufferDC(HPBUFFER hPbuffer) const {
+ return fGetPbufferDC(hPbuffer);
+}
+
+int SkWGLExtensions::releasePbufferDC(HPBUFFER hPbuffer, HDC hDC) const {
+ return fReleasePbufferDC(hPbuffer, hDC);
+}
+
+BOOL SkWGLExtensions::destroyPbuffer(HPBUFFER hPbuffer) const {
+ return fDestroyPbuffer(hPbuffer);
+}
+
+namespace {
+
+struct PixelFormat {
+ int fFormat;
+ int fSampleCnt;
+ int fChoosePixelFormatRank;
+};
+
+bool pf_less(const PixelFormat& a, const PixelFormat& b) {
+ if (a.fSampleCnt < b.fSampleCnt) {
+ return true;
+ } else if (b.fSampleCnt < a.fSampleCnt) {
+ return false;
+ } else if (a.fChoosePixelFormatRank < b.fChoosePixelFormatRank) {
+ return true;
+ }
+ return false;
+}
+}
+
+int SkWGLExtensions::selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const {
+ SkASSERT(desiredSampleCount >= 1);
+ if (formatCount <= 0) {
+ return -1;
+ }
+ PixelFormat desiredFormat = {
+ 0,
+ desiredSampleCount,
+ 0,
+ };
+ SkTDArray<PixelFormat> rankedFormats;
+ rankedFormats.setCount(formatCount);
+ for (int i = 0; i < formatCount; ++i) {
+ static const int kQueryAttr = SK_WGL_SAMPLES;
+ int numSamples;
+ this->getPixelFormatAttribiv(dc,
+ formats[i],
+ 0,
+ 1,
+ &kQueryAttr,
+ &numSamples);
+ rankedFormats[i].fFormat = formats[i];
+ rankedFormats[i].fSampleCnt = SkTMax(1, numSamples);
+ rankedFormats[i].fChoosePixelFormatRank = i;
+ }
+ SkTQSort(rankedFormats.begin(),
+ rankedFormats.begin() + rankedFormats.count() - 1,
+ SkTLessFunctionToFunctorAdaptor<PixelFormat, pf_less>());
+ int idx = SkTSearch<PixelFormat, pf_less>(rankedFormats.begin(),
+ rankedFormats.count(),
+ desiredFormat,
+ sizeof(PixelFormat));
+ if (idx < 0) {
+ idx = ~idx;
+ }
+ // If the caller asked for non-MSAA fail if the closest format has MSAA.
+ if (desiredSampleCount == 1 && rankedFormats[idx].fSampleCnt != 1) {
+ return -1;
+ }
+ return rankedFormats[idx].fFormat;
+}
+
+
+namespace {
+
+#if defined(UNICODE)
+ #define STR_LIT(X) L## #X
+#else
+ #define STR_LIT(X) #X
+#endif
+
+#define DUMMY_CLASS STR_LIT("DummyClass")
+
+HWND create_dummy_window() {
+ HMODULE module = GetModuleHandle(nullptr);
+ HWND dummy;
+ RECT windowRect;
+ windowRect.left = 0;
+ windowRect.right = 8;
+ windowRect.top = 0;
+ windowRect.bottom = 8;
+
+ WNDCLASS wc;
+
+ wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
+ wc.lpfnWndProc = (WNDPROC) DefWindowProc;
+ wc.cbClsExtra = 0;
+ wc.cbWndExtra = 0;
+ wc.hInstance = module;
+ wc.hIcon = LoadIcon(nullptr, IDI_WINLOGO);
+ wc.hCursor = LoadCursor(nullptr, IDC_ARROW);
+ wc.hbrBackground = nullptr;
+ wc.lpszMenuName = nullptr;
+ wc.lpszClassName = DUMMY_CLASS;
+
+ if(!RegisterClass(&wc)) {
+ return 0;
+ }
+
+ DWORD style, exStyle;
+ exStyle = WS_EX_CLIENTEDGE;
+ style = WS_SYSMENU;
+
+ AdjustWindowRectEx(&windowRect, style, false, exStyle);
+ if(!(dummy = CreateWindowEx(exStyle,
+ DUMMY_CLASS,
+ STR_LIT("DummyWindow"),
+ WS_CLIPSIBLINGS | WS_CLIPCHILDREN | style,
+ 0, 0,
+ windowRect.right-windowRect.left,
+ windowRect.bottom-windowRect.top,
+ nullptr, nullptr,
+ module,
+ nullptr))) {
+ UnregisterClass(DUMMY_CLASS, module);
+ return nullptr;
+ }
+ ShowWindow(dummy, SW_HIDE);
+
+ return dummy;
+}
+
+void destroy_dummy_window(HWND dummy) {
+ DestroyWindow(dummy);
+ HMODULE module = GetModuleHandle(nullptr);
+ UnregisterClass(DUMMY_CLASS, module);
+}
+}
+
+#define GET_PROC(NAME, SUFFIX) f##NAME = \
+ (NAME##Proc) wglGetProcAddress("wgl" #NAME #SUFFIX)
+
+
+SkWGLExtensions::GetExtensionsStringProc SkWGLExtensions::fGetExtensionsString = nullptr;
+SkWGLExtensions::ChoosePixelFormatProc SkWGLExtensions::fChoosePixelFormat = nullptr;
+SkWGLExtensions::GetPixelFormatAttribfvProc SkWGLExtensions::fGetPixelFormatAttribfv = nullptr;
+SkWGLExtensions::GetPixelFormatAttribivProc SkWGLExtensions::fGetPixelFormatAttribiv = nullptr;
+SkWGLExtensions::CreateContextAttribsProc SkWGLExtensions::fCreateContextAttribs = nullptr;
+SkWGLExtensions::SwapIntervalProc SkWGLExtensions::fSwapInterval = nullptr;
+SkWGLExtensions::CreatePbufferProc SkWGLExtensions::fCreatePbuffer = nullptr;
+SkWGLExtensions::GetPbufferDCProc SkWGLExtensions::fGetPbufferDC = nullptr;
+SkWGLExtensions::ReleasePbufferDCProc SkWGLExtensions::fReleasePbufferDC = nullptr;
+SkWGLExtensions::DestroyPbufferProc SkWGLExtensions::fDestroyPbuffer = nullptr;
+
+SkWGLExtensions::SkWGLExtensions() {
+ // We cache these function pointers once, and then reuse them. That's possibly incorrect if
+ // there are multiple GPUs, or if we intend to use these for rendering contexts of different
+ // pixel formats (where wglGetProcAddress is not guaranteed to return the same pointer).
+ static SkOnce once;
+ once([] {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ PIXELFORMATDESCRIPTOR dummyPFD;
+
+ ZeroMemory(&dummyPFD, sizeof(dummyPFD));
+ dummyPFD.nSize = sizeof(dummyPFD);
+ dummyPFD.nVersion = 1;
+ dummyPFD.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL;
+ dummyPFD.iPixelType = PFD_TYPE_RGBA;
+ dummyPFD.cColorBits = 32;
+ dummyPFD.cDepthBits = 0;
+ dummyPFD.cStencilBits = 8;
+ dummyPFD.iLayerType = PFD_MAIN_PLANE;
+ HWND dummyWND = create_dummy_window();
+ if (dummyWND) {
+ HDC dummyDC = GetDC(dummyWND);
+ int dummyFormat = ChoosePixelFormat(dummyDC, &dummyPFD);
+ SetPixelFormat(dummyDC, dummyFormat, &dummyPFD);
+ HGLRC dummyGLRC = wglCreateContext(dummyDC);
+ SkASSERT(dummyGLRC);
+ wglMakeCurrent(dummyDC, dummyGLRC);
+
+ GET_PROC(GetExtensionsString, ARB);
+ GET_PROC(ChoosePixelFormat, ARB);
+ GET_PROC(GetPixelFormatAttribiv, ARB);
+ GET_PROC(GetPixelFormatAttribfv, ARB);
+ GET_PROC(CreateContextAttribs, ARB);
+ GET_PROC(SwapInterval, EXT);
+ GET_PROC(CreatePbuffer, ARB);
+ GET_PROC(GetPbufferDC, ARB);
+ GET_PROC(ReleasePbufferDC, ARB);
+ GET_PROC(DestroyPbuffer, ARB);
+
+ wglMakeCurrent(dummyDC, nullptr);
+ wglDeleteContext(dummyGLRC);
+ destroy_dummy_window(dummyWND);
+ }
+
+ wglMakeCurrent(prevDC, prevGLRC);
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void get_pixel_formats_to_try(HDC dc, const SkWGLExtensions& extensions,
+ bool doubleBuffered, int msaaSampleCount, bool deepColor,
+ int formatsToTry[2]) {
+ auto appendAttr = [](SkTDArray<int>& attrs, int attr, int value) {
+ attrs.push_back(attr);
+ attrs.push_back(value);
+ };
+
+ SkTDArray<int> iAttrs;
+ appendAttr(iAttrs, SK_WGL_DRAW_TO_WINDOW, TRUE);
+ appendAttr(iAttrs, SK_WGL_DOUBLE_BUFFER, (doubleBuffered ? TRUE : FALSE));
+ appendAttr(iAttrs, SK_WGL_ACCELERATION, SK_WGL_FULL_ACCELERATION);
+ appendAttr(iAttrs, SK_WGL_SUPPORT_OPENGL, TRUE);
+ if (deepColor) {
+ appendAttr(iAttrs, SK_WGL_RED_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_GREEN_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_BLUE_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 2);
+ } else {
+ appendAttr(iAttrs, SK_WGL_COLOR_BITS, 24);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 8);
+ }
+ appendAttr(iAttrs, SK_WGL_STENCIL_BITS, 8);
+
+ float fAttrs[] = {0, 0};
+
+ // Get a MSAA format if requested and possible.
+ if (msaaSampleCount > 0 &&
+ extensions.hasExtension(dc, "WGL_ARB_multisample")) {
+ SkTDArray<int> msaaIAttrs = iAttrs;
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLE_BUFFERS, TRUE);
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLES, msaaSampleCount);
+ appendAttr(msaaIAttrs, 0, 0);
+ unsigned int num;
+ int formats[64];
+ extensions.choosePixelFormat(dc, msaaIAttrs.begin(), fAttrs, 64, formats, &num);
+ num = SkTMin(num, 64U);
+ formatsToTry[0] = extensions.selectFormat(formats, num, dc, msaaSampleCount);
+ }
+
+ // Get a non-MSAA format
+ int* format = -1 == formatsToTry[0] ? &formatsToTry[0] : &formatsToTry[1];
+ unsigned int num;
+ appendAttr(iAttrs, 0, 0);
+ extensions.choosePixelFormat(dc, iAttrs.begin(), fAttrs, 1, format, &num);
+}
+
+static HGLRC create_gl_context(HDC dc, const SkWGLExtensions& extensions,
+ SkWGLContextRequest contextType, HGLRC shareContext) {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ HGLRC glrc = nullptr;
+ if (kGLES_SkWGLContextRequest == contextType) {
+ if (!extensions.hasExtension(dc, "WGL_EXT_create_context_es2_profile")) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ static const int glesAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, 3,
+ SK_WGL_CONTEXT_MINOR_VERSION, 0,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_ES2_PROFILE_BIT,
+ 0,
+ };
+ glrc = extensions.createContextAttribs(dc, shareContext, glesAttribs);
+ if (nullptr == glrc) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ } else {
+ if (kGLPreferCoreProfile_SkWGLContextRequest == contextType &&
+ extensions.hasExtension(dc, "WGL_ARB_create_context")) {
+ static const int kCoreGLVersions[] = {
+ 4, 3,
+ 4, 2,
+ 4, 1,
+ 4, 0,
+ 3, 3,
+ 3, 2,
+ };
+ int coreProfileAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, -1,
+ SK_WGL_CONTEXT_MINOR_VERSION, -1,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_CORE_PROFILE_BIT,
+ 0,
+ };
+ for (size_t v = 0; v < SK_ARRAY_COUNT(kCoreGLVersions) / 2; ++v) {
+ coreProfileAttribs[1] = kCoreGLVersions[2 * v];
+ coreProfileAttribs[3] = kCoreGLVersions[2 * v + 1];
+ glrc = extensions.createContextAttribs(dc, shareContext, coreProfileAttribs);
+ if (glrc) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (nullptr == glrc) {
+ glrc = wglCreateContext(dc);
+ if (shareContext) {
+ if (!wglShareLists(shareContext, glrc)) {
+ wglDeleteContext(glrc);
+ return nullptr;
+ }
+ }
+ }
+ SkASSERT(glrc);
+
+ wglMakeCurrent(prevDC, prevGLRC);
+
+ return glrc;
+}
+
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor,
+ SkWGLContextRequest contextType, HGLRC shareContext) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(dc, "WGL_ARB_pixel_format")) {
+ return nullptr;
+ }
+
+ BOOL set = FALSE;
+
+ int pixelFormatsToTry[] = { -1, -1 };
+ get_pixel_formats_to_try(dc, extensions, true, msaaSampleCount, deepColor, pixelFormatsToTry);
+ for (size_t f = 0;
+ !set && -1 != pixelFormatsToTry[f] && f < SK_ARRAY_COUNT(pixelFormatsToTry);
+ ++f) {
+ PIXELFORMATDESCRIPTOR pfd;
+ DescribePixelFormat(dc, pixelFormatsToTry[f], sizeof(pfd), &pfd);
+ set = SetPixelFormat(dc, pixelFormatsToTry[f], &pfd);
+ }
+
+ if (!set) {
+ return nullptr;
+ }
+
+ return create_gl_context(dc, extensions, contextType, shareContext);
+}
+
+sk_sp<SkWGLPbufferContext> SkWGLPbufferContext::Create(HDC parentDC,
+ SkWGLContextRequest contextType,
+ HGLRC shareContext) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(parentDC, "WGL_ARB_pixel_format") ||
+ !extensions.hasExtension(parentDC, "WGL_ARB_pbuffer")) {
+ return nullptr;
+ }
+
+ // We cache the pixel formats once, and then reuse them. That's possibly incorrect if
+ // there are multiple GPUs, but this function is always called with a freshly made,
+ // identically constructed HDC (see WinGLTestContext).
+ //
+ // We only store two potential pixel formats, one for single buffer, one for double buffer.
+ // We never ask for MSAA, so we don't need the second pixel format for each buffering state.
+ static int gPixelFormats[2] = { -1, -1 };
+ static SkOnce once;
+ once([=] {
+ {
+ // Single buffer
+ int pixelFormatsToTry[2] = { -1, -1 };
+ get_pixel_formats_to_try(parentDC, extensions, false, 0, false, pixelFormatsToTry);
+ gPixelFormats[0] = pixelFormatsToTry[0];
+ }
+ {
+ // Double buffer
+ int pixelFormatsToTry[2] = { -1, -1 };
+ get_pixel_formats_to_try(parentDC, extensions, true, 0, false, pixelFormatsToTry);
+ gPixelFormats[1] = pixelFormatsToTry[0];
+ }
+ });
+
+ // try for single buffer first
+ for (int pixelFormat : gPixelFormats) {
+ if (-1 == pixelFormat) {
+ continue;
+ }
+ HPBUFFER pbuf = extensions.createPbuffer(parentDC, pixelFormat, 1, 1, nullptr);
+ if (0 != pbuf) {
+ HDC dc = extensions.getPbufferDC(pbuf);
+ if (dc) {
+ HGLRC glrc = create_gl_context(dc, extensions, contextType, shareContext);
+ if (glrc) {
+ return sk_sp<SkWGLPbufferContext>(new SkWGLPbufferContext(pbuf, dc, glrc));
+ }
+ extensions.releasePbufferDC(pbuf, dc);
+ }
+ extensions.destroyPbuffer(pbuf);
+ }
+ }
+ return nullptr;
+}
+
+SkWGLPbufferContext::~SkWGLPbufferContext() {
+ SkASSERT(fExtensions.hasExtension(fDC, "WGL_ARB_pbuffer"));
+ wglDeleteContext(fGLRC);
+ fExtensions.releasePbufferDC(fPbuffer, fDC);
+ fExtensions.destroyPbuffer(fPbuffer);
+}
+
+SkWGLPbufferContext::SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc)
+ : fPbuffer(pbuffer)
+ , fDC(dc)
+ , fGLRC(glrc) {
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/xml/SkDOM.cpp b/gfx/skia/skia/src/xml/SkDOM.cpp
new file mode 100644
index 0000000000..0f391354a0
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkDOM.cpp
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/xml/SkDOM.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/SkTo.h"
+#include "src/xml/SkXMLParser.h"
+#include "src/xml/SkXMLWriter.h"
+
+bool SkXMLParser::parse(const SkDOM& dom, const SkDOMNode* node) {
+ const char* elemName = dom.getName(node);
+
+ if (this->startElement(elemName)) {
+ return false;
+ }
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name, *value;
+
+ while ((name = iter.next(&value)) != nullptr) {
+ if (this->addAttribute(name, value)) {
+ return false;
+ }
+ }
+
+ if ((node = dom.getFirstChild(node)) != nullptr) {
+ do {
+ if (!this->parse(dom, node)) {
+ return false;
+ }
+ } while ((node = dom.getNextSibling(node)) != nullptr);
+ }
+ return !this->endElement(elemName);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+struct SkDOMAttr {
+ const char* fName;
+ const char* fValue;
+};
+
+struct SkDOMNode {
+ const char* fName;
+ SkDOMNode* fFirstChild;
+ SkDOMNode* fNextSibling;
+ SkDOMAttr* fAttrs;
+ uint16_t fAttrCount;
+ uint8_t fType;
+ uint8_t fPad;
+
+ const SkDOMAttr* attrs() const {
+ return fAttrs;
+ }
+
+ SkDOMAttr* attrs() {
+ return fAttrs;
+ }
+};
+
+/////////////////////////////////////////////////////////////////////////
+
+#define kMinChunkSize 4096
+
+SkDOM::SkDOM() : fAlloc(kMinChunkSize), fRoot(nullptr) {}
+
+SkDOM::~SkDOM() {}
+
+const SkDOM::Node* SkDOM::getRootNode() const {
+ return fRoot;
+}
+
+const SkDOM::Node* SkDOM::getFirstChild(const Node* node, const char name[]) const {
+ SkASSERT(node);
+ const Node* child = node->fFirstChild;
+
+ if (name) {
+ for (; child != nullptr; child = child->fNextSibling) {
+ if (!strcmp(name, child->fName)) {
+ break;
+ }
+ }
+ }
+ return child;
+}
+
+const SkDOM::Node* SkDOM::getNextSibling(const Node* node, const char name[]) const {
+ SkASSERT(node);
+ const Node* sibling = node->fNextSibling;
+ if (name) {
+ for (; sibling != nullptr; sibling = sibling->fNextSibling) {
+ if (!strcmp(name, sibling->fName)) {
+ break;
+ }
+ }
+ }
+ return sibling;
+}
+
+SkDOM::Type SkDOM::getType(const Node* node) const {
+ SkASSERT(node);
+ return (Type)node->fType;
+}
+
+const char* SkDOM::getName(const Node* node) const {
+ SkASSERT(node);
+ return node->fName;
+}
+
+const char* SkDOM::findAttr(const Node* node, const char name[]) const {
+ SkASSERT(node);
+ const Attr* attr = node->attrs();
+ const Attr* stop = attr + node->fAttrCount;
+
+ while (attr < stop) {
+ if (!strcmp(attr->fName, name)) {
+ return attr->fValue;
+ }
+ attr += 1;
+ }
+ return nullptr;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+const SkDOM::Attr* SkDOM::getFirstAttr(const Node* node) const {
+ return node->fAttrCount ? node->attrs() : nullptr;
+}
+
+const SkDOM::Attr* SkDOM::getNextAttr(const Node* node, const Attr* attr) const {
+ SkASSERT(node);
+ if (attr == nullptr) {
+ return nullptr;
+ }
+ return (attr - node->attrs() + 1) < node->fAttrCount ? attr + 1 : nullptr;
+}
+
+const char* SkDOM::getAttrName(const Node* node, const Attr* attr) const {
+ SkASSERT(node);
+ SkASSERT(attr);
+ return attr->fName;
+}
+
+const char* SkDOM::getAttrValue(const Node* node, const Attr* attr) const {
+ SkASSERT(node);
+ SkASSERT(attr);
+ return attr->fValue;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+SkDOM::AttrIter::AttrIter(const SkDOM&, const SkDOM::Node* node) {
+ SkASSERT(node);
+ fAttr = node->attrs();
+ fStop = fAttr + node->fAttrCount;
+}
+
+const char* SkDOM::AttrIter::next(const char** value) {
+ const char* name = nullptr;
+
+ if (fAttr < fStop) {
+ name = fAttr->fName;
+ if (value)
+ *value = fAttr->fValue;
+ fAttr += 1;
+ }
+ return name;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+#include "include/private/SkTDArray.h"
+#include "src/xml/SkXMLParser.h"
+
+static char* dupstr(SkArenaAlloc* chunk, const char src[]) {
+ SkASSERT(chunk && src);
+ size_t len = strlen(src);
+ char* dst = chunk->makeArrayDefault<char>(len + 1);
+ memcpy(dst, src, len + 1);
+ return dst;
+}
+
+class SkDOMParser : public SkXMLParser {
+public:
+ SkDOMParser(SkArenaAlloc* chunk) : SkXMLParser(&fParserError), fAlloc(chunk) {
+ fAlloc->reset();
+ fRoot = nullptr;
+ fLevel = 0;
+ fNeedToFlush = true;
+ }
+ SkDOM::Node* getRoot() const { return fRoot; }
+ SkXMLParserError fParserError;
+
+protected:
+ void flushAttributes() {
+ SkASSERT(fLevel > 0);
+
+ int attrCount = fAttrs.count();
+
+ SkDOMAttr* attrs = fAlloc->makeArrayDefault<SkDOMAttr>(attrCount);
+ SkDOM::Node* node = fAlloc->make<SkDOM::Node>();
+
+ node->fName = fElemName;
+ node->fFirstChild = nullptr;
+ node->fAttrCount = SkToU16(attrCount);
+ node->fAttrs = attrs;
+ node->fType = fElemType;
+
+ if (fRoot == nullptr) {
+ node->fNextSibling = nullptr;
+ fRoot = node;
+ } else { // this adds siblings in reverse order. gets corrected in onEndElement()
+ SkDOM::Node* parent = fParentStack.top();
+ SkASSERT(fRoot && parent);
+ node->fNextSibling = parent->fFirstChild;
+ parent->fFirstChild = node;
+ }
+ *fParentStack.push() = node;
+
+ sk_careful_memcpy(node->attrs(), fAttrs.begin(), attrCount * sizeof(SkDOM::Attr));
+ fAttrs.reset();
+
+ }
+
+ bool onStartElement(const char elem[]) override {
+ this->startCommon(elem, SkDOM::kElement_Type);
+ return false;
+ }
+
+ bool onAddAttribute(const char name[], const char value[]) override {
+ SkDOM::Attr* attr = fAttrs.append();
+ attr->fName = dupstr(fAlloc, name);
+ attr->fValue = dupstr(fAlloc, value);
+ return false;
+ }
+
+ bool onEndElement(const char elem[]) override {
+ --fLevel;
+ if (fNeedToFlush)
+ this->flushAttributes();
+ fNeedToFlush = false;
+
+ SkDOM::Node* parent;
+
+ fParentStack.pop(&parent);
+
+ SkDOM::Node* child = parent->fFirstChild;
+ SkDOM::Node* prev = nullptr;
+ while (child) {
+ SkDOM::Node* next = child->fNextSibling;
+ child->fNextSibling = prev;
+ prev = child;
+ child = next;
+ }
+ parent->fFirstChild = prev;
+ return false;
+ }
+
+ bool onText(const char text[], int len) override {
+ SkString str(text, len);
+ this->startCommon(str.c_str(), SkDOM::kText_Type);
+ this->SkDOMParser::onEndElement(str.c_str());
+
+ return false;
+ }
+
+private:
+ void startCommon(const char elem[], SkDOM::Type type) {
+ if (fLevel > 0 && fNeedToFlush) {
+ this->flushAttributes();
+ }
+ fNeedToFlush = true;
+ fElemName = dupstr(fAlloc, elem);
+ fElemType = type;
+ ++fLevel;
+ }
+
+ SkTDArray<SkDOM::Node*> fParentStack;
+ SkArenaAlloc* fAlloc;
+ SkDOM::Node* fRoot;
+ bool fNeedToFlush;
+
+ // state needed for flushAttributes()
+ SkTDArray<SkDOM::Attr> fAttrs;
+ char* fElemName;
+ SkDOM::Type fElemType;
+ int fLevel;
+};
+
+const SkDOM::Node* SkDOM::build(SkStream& docStream) {
+ SkDOMParser parser(&fAlloc);
+ if (!parser.parse(docStream))
+ {
+ SkDEBUGCODE(SkDebugf("xml parse error, line %d\n", parser.fParserError.getLineNumber());)
+ fRoot = nullptr;
+ fAlloc.reset();
+ return nullptr;
+ }
+ fRoot = parser.getRoot();
+ return fRoot;
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+static void walk_dom(const SkDOM& dom, const SkDOM::Node* node, SkXMLParser* parser) {
+ const char* elem = dom.getName(node);
+ if (dom.getType(node) == SkDOM::kText_Type) {
+ SkASSERT(dom.countChildren(node) == 0);
+ parser->text(elem, SkToInt(strlen(elem)));
+ return;
+ }
+
+ parser->startElement(elem);
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name;
+ const char* value;
+ while ((name = iter.next(&value)) != nullptr)
+ parser->addAttribute(name, value);
+
+ node = dom.getFirstChild(node, nullptr);
+ while (node)
+ {
+ walk_dom(dom, node, parser);
+ node = dom.getNextSibling(node, nullptr);
+ }
+
+ parser->endElement(elem);
+}
+
+const SkDOM::Node* SkDOM::copy(const SkDOM& dom, const SkDOM::Node* node) {
+ SkDOMParser parser(&fAlloc);
+
+ walk_dom(dom, node, &parser);
+
+ fRoot = parser.getRoot();
+ return fRoot;
+}
+
+SkXMLParser* SkDOM::beginParsing() {
+ SkASSERT(!fParser);
+ fParser.reset(new SkDOMParser(&fAlloc));
+
+ return fParser.get();
+}
+
+const SkDOM::Node* SkDOM::finishParsing() {
+ SkASSERT(fParser);
+ fRoot = fParser->getRoot();
+ fParser.reset();
+
+ return fRoot;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+int SkDOM::countChildren(const Node* node, const char elem[]) const {
+ int count = 0;
+
+ node = this->getFirstChild(node, elem);
+ while (node) {
+ count += 1;
+ node = this->getNextSibling(node, elem);
+ }
+ return count;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+#include "include/utils/SkParse.h"
+
+bool SkDOM::findS32(const Node* node, const char name[], int32_t* value) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindS32(vstr, value);
+}
+
+bool SkDOM::findScalars(const Node* node, const char name[], SkScalar value[], int count) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindScalars(vstr, value, count);
+}
+
+bool SkDOM::findHex(const Node* node, const char name[], uint32_t* value) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindHex(vstr, value);
+}
+
+bool SkDOM::findBool(const Node* node, const char name[], bool* value) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindBool(vstr, value);
+}
+
+int SkDOM::findList(const Node* node, const char name[], const char list[]) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr ? SkParse::FindList(vstr, list) : -1;
+}
+
+bool SkDOM::hasAttr(const Node* node, const char name[], const char value[]) const {
+ const char* vstr = this->findAttr(node, name);
+ return vstr && !strcmp(vstr, value);
+}
+
+bool SkDOM::hasS32(const Node* node, const char name[], int32_t target) const {
+ const char* vstr = this->findAttr(node, name);
+ int32_t value;
+ return vstr && SkParse::FindS32(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasScalar(const Node* node, const char name[], SkScalar target) const {
+ const char* vstr = this->findAttr(node, name);
+ SkScalar value;
+ return vstr && SkParse::FindScalar(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasHex(const Node* node, const char name[], uint32_t target) const {
+ const char* vstr = this->findAttr(node, name);
+ uint32_t value;
+ return vstr && SkParse::FindHex(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasBool(const Node* node, const char name[], bool target) const {
+ const char* vstr = this->findAttr(node, name);
+ bool value;
+ return vstr && SkParse::FindBool(vstr, &value) && value == target;
+}
diff --git a/gfx/skia/skia/src/xml/SkDOM.h b/gfx/skia/skia/src/xml/SkDOM.h
new file mode 100644
index 0000000000..01d51578ae
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkDOM.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDOM_DEFINED
+#define SkDOM_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkNoncopyable.h"
+#include "include/private/SkTemplates.h"
+#include "src/core/SkArenaAlloc.h"
+
+struct SkDOMNode;
+struct SkDOMAttr;
+
+class SkDOMParser;
+class SkStream;
+class SkXMLParser;
+
+class SkDOM : public SkNoncopyable {
+public:
+ SkDOM();
+ ~SkDOM();
+
+ typedef SkDOMNode Node;
+ typedef SkDOMAttr Attr;
+
+ /** Returns null on failure
+ */
+ const Node* build(SkStream&);
+ const Node* copy(const SkDOM& dom, const Node* node);
+
+ const Node* getRootNode() const;
+
+ SkXMLParser* beginParsing();
+ const Node* finishParsing();
+
+ enum Type {
+ kElement_Type,
+ kText_Type
+ };
+ Type getType(const Node*) const;
+
+ const char* getName(const Node*) const;
+ const Node* getFirstChild(const Node*, const char elem[] = nullptr) const;
+ const Node* getNextSibling(const Node*, const char elem[] = nullptr) const;
+
+ const char* findAttr(const Node*, const char attrName[]) const;
+ const Attr* getFirstAttr(const Node*) const;
+ const Attr* getNextAttr(const Node*, const Attr*) const;
+ const char* getAttrName(const Node*, const Attr*) const;
+ const char* getAttrValue(const Node*, const Attr*) const;
+
+ // helpers for walking children
+ int countChildren(const Node* node, const char elem[] = nullptr) const;
+
+ // helpers for calling SkParse
+ bool findS32(const Node*, const char name[], int32_t* value) const;
+ bool findScalars(const Node*, const char name[], SkScalar value[], int count) const;
+ bool findHex(const Node*, const char name[], uint32_t* value) const;
+ bool findBool(const Node*, const char name[], bool*) const;
+ int findList(const Node*, const char name[], const char list[]) const;
+
+ bool findScalar(const Node* node, const char name[], SkScalar value[]) const {
+ return this->findScalars(node, name, value, 1);
+ }
+
+ bool hasAttr(const Node*, const char name[], const char value[]) const;
+ bool hasS32(const Node*, const char name[], int32_t value) const;
+ bool hasScalar(const Node*, const char name[], SkScalar value) const;
+ bool hasHex(const Node*, const char name[], uint32_t value) const;
+ bool hasBool(const Node*, const char name[], bool value) const;
+
+ class AttrIter {
+ public:
+ AttrIter(const SkDOM&, const Node*);
+ const char* next(const char** value);
+ private:
+ const Attr* fAttr;
+ const Attr* fStop;
+ };
+
+private:
+ SkArenaAlloc fAlloc;
+ Node* fRoot;
+ std::unique_ptr<SkDOMParser> fParser;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/xml/SkXMLParser.cpp b/gfx/skia/skia/src/xml/SkXMLParser.cpp
new file mode 100644
index 0000000000..2e258da6b0
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLParser.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/xml/SkXMLParser.h"
+
+#include "expat.h"
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkTo.h"
+
+static char const* const gErrorStrings[] = {
+ "empty or missing file ",
+ "unknown element ",
+ "unknown attribute name ",
+ "error in attribute value ",
+ "duplicate ID ",
+ "unknown error "
+};
+
+SkXMLParserError::SkXMLParserError() : fCode(kNoError), fLineNumber(-1),
+ fNativeCode(-1)
+{
+ reset();
+}
+
+SkXMLParserError::~SkXMLParserError()
+{
+ // need a virtual destructor for our subclasses
+}
+
+void SkXMLParserError::getErrorString(SkString* str) const
+{
+ SkASSERT(str);
+ SkString temp;
+ if (fCode != kNoError) {
+ if ((unsigned)fCode < SK_ARRAY_COUNT(gErrorStrings))
+ temp.set(gErrorStrings[fCode - 1]);
+ temp.append(fNoun);
+ } else
+ SkXMLParser::GetNativeErrorString(fNativeCode, &temp);
+ str->append(temp);
+}
+
+void SkXMLParserError::reset() {
+ fCode = kNoError;
+ fLineNumber = -1;
+ fNativeCode = -1;
+}
+
+////////////////
+
+namespace {
+
+const XML_Memory_Handling_Suite sk_XML_alloc = {
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free
+};
+
+struct ParsingContext {
+ ParsingContext(SkXMLParser* parser)
+ : fParser(parser)
+ , fXMLParser(XML_ParserCreate_MM(nullptr, &sk_XML_alloc, nullptr)) { }
+
+ void flushText() {
+ if (!fBufferedText.isEmpty()) {
+ fParser->text(fBufferedText.c_str(), SkTo<int>(fBufferedText.size()));
+ fBufferedText.reset();
+ }
+ }
+
+ void appendText(const char* txt, size_t len) {
+ fBufferedText.append(txt, len);
+ }
+
+ SkXMLParser* fParser;
+ SkAutoTCallVProc<skstd::remove_pointer_t<XML_Parser>, XML_ParserFree> fXMLParser;
+
+private:
+ SkString fBufferedText;
+};
+
+#define HANDLER_CONTEXT(arg, name) ParsingContext* name = static_cast<ParsingContext*>(arg)
+
+void XMLCALL start_element_handler(void *data, const char* tag, const char** attributes) {
+ HANDLER_CONTEXT(data, ctx);
+ ctx->flushText();
+
+ ctx->fParser->startElement(tag);
+
+ for (size_t i = 0; attributes[i]; i += 2) {
+ ctx->fParser->addAttribute(attributes[i], attributes[i + 1]);
+ }
+}
+
+void XMLCALL end_element_handler(void* data, const char* tag) {
+ HANDLER_CONTEXT(data, ctx);
+ ctx->flushText();
+
+ ctx->fParser->endElement(tag);
+}
+
+void XMLCALL text_handler(void *data, const char* txt, int len) {
+ HANDLER_CONTEXT(data, ctx);
+
+ ctx->appendText(txt, SkTo<size_t>(len));
+}
+
+void XMLCALL entity_decl_handler(void *data,
+ const XML_Char *entityName,
+ int is_parameter_entity,
+ const XML_Char *value,
+ int value_length,
+ const XML_Char *base,
+ const XML_Char *systemId,
+ const XML_Char *publicId,
+ const XML_Char *notationName) {
+ HANDLER_CONTEXT(data, ctx);
+
+ SkDebugf("'%s' entity declaration found, stopping processing", entityName);
+ XML_StopParser(ctx->fXMLParser, XML_FALSE);
+}
+
+} // anonymous namespace
+
+SkXMLParser::SkXMLParser(SkXMLParserError* parserError) : fParser(nullptr), fError(parserError)
+{
+}
+
+SkXMLParser::~SkXMLParser()
+{
+}
+
+bool SkXMLParser::parse(SkStream& docStream)
+{
+ ParsingContext ctx(this);
+ if (!ctx.fXMLParser) {
+ SkDebugf("could not create XML parser\n");
+ return false;
+ }
+
+ XML_SetUserData(ctx.fXMLParser, &ctx);
+ XML_SetElementHandler(ctx.fXMLParser, start_element_handler, end_element_handler);
+ XML_SetCharacterDataHandler(ctx.fXMLParser, text_handler);
+
+ // Disable entity processing, to inhibit internal entity expansion. See expat CVE-2013-0340.
+ XML_SetEntityDeclHandler(ctx.fXMLParser, entity_decl_handler);
+
+ static const int kBufferSize = 512 SkDEBUGCODE( - 507);
+ bool done = false;
+ do {
+ void* buffer = XML_GetBuffer(ctx.fXMLParser, kBufferSize);
+ if (!buffer) {
+ SkDebugf("could not buffer enough to continue\n");
+ return false;
+ }
+
+ size_t len = docStream.read(buffer, kBufferSize);
+ done = docStream.isAtEnd();
+ XML_Status status = XML_ParseBuffer(ctx.fXMLParser, SkToS32(len), done);
+ if (XML_STATUS_ERROR == status) {
+ XML_Error error = XML_GetErrorCode(ctx.fXMLParser);
+ int line = XML_GetCurrentLineNumber(ctx.fXMLParser);
+ int column = XML_GetCurrentColumnNumber(ctx.fXMLParser);
+ const XML_LChar* errorString = XML_ErrorString(error);
+ SkDebugf("parse error @%d:%d: %d (%s).\n", line, column, error, errorString);
+ return false;
+ }
+ } while (!done);
+
+ return true;
+}
+
+bool SkXMLParser::parse(const char doc[], size_t len)
+{
+ SkMemoryStream docStream(doc, len);
+ return this->parse(docStream);
+}
+
+void SkXMLParser::GetNativeErrorString(int error, SkString* str)
+{
+
+}
+
+bool SkXMLParser::startElement(const char elem[])
+{
+ return this->onStartElement(elem);
+}
+
+bool SkXMLParser::addAttribute(const char name[], const char value[])
+{
+ return this->onAddAttribute(name, value);
+}
+
+bool SkXMLParser::endElement(const char elem[])
+{
+ return this->onEndElement(elem);
+}
+
+bool SkXMLParser::text(const char text[], int len)
+{
+ return this->onText(text, len);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkXMLParser::onStartElement(const char elem[]) {return false; }
+bool SkXMLParser::onAddAttribute(const char name[], const char value[]) {return false; }
+bool SkXMLParser::onEndElement(const char elem[]) { return false; }
+bool SkXMLParser::onText(const char text[], int len) {return false; }
diff --git a/gfx/skia/skia/src/xml/SkXMLParser.h b/gfx/skia/skia/src/xml/SkXMLParser.h
new file mode 100644
index 0000000000..3fe317a4fa
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLParser.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkXMLParser_DEFINED
+#define SkXMLParser_DEFINED
+
+#include "include/core/SkString.h"
+
+class SkStream;
+
+class SkDOM;
+struct SkDOMNode;
+
+class SkXMLParserError {
+public:
+ enum ErrorCode {
+ kNoError,
+ kEmptyFile,
+ kUnknownElement,
+ kUnknownAttributeName,
+ kErrorInAttributeValue,
+ kDuplicateIDs,
+ kUnknownError
+ };
+
+ SkXMLParserError();
+ virtual ~SkXMLParserError();
+ ErrorCode getErrorCode() const { return fCode; }
+ virtual void getErrorString(SkString* str) const;
+ int getLineNumber() const { return fLineNumber; }
+ int getNativeCode() const { return fNativeCode; }
+ bool hasError() const { return fCode != kNoError || fNativeCode != -1; }
+ bool hasNoun() const { return fNoun.size() > 0; }
+ void reset();
+ void setCode(ErrorCode code) { fCode = code; }
+ void setNoun(const SkString& str) { fNoun.set(str); }
+ void setNoun(const char* ch) { fNoun.set(ch); }
+ void setNoun(const char* ch, size_t len) { fNoun.set(ch, len); }
+protected:
+ ErrorCode fCode;
+private:
+ int fLineNumber;
+ int fNativeCode;
+ SkString fNoun;
+ friend class SkXMLParser;
+};
+
+class SkXMLParser {
+public:
+ SkXMLParser(SkXMLParserError* parserError = nullptr);
+ virtual ~SkXMLParser();
+
+ /** Returns true for success
+ */
+ bool parse(const char doc[], size_t len);
+ bool parse(SkStream& docStream);
+ bool parse(const SkDOM&, const SkDOMNode*);
+
+ static void GetNativeErrorString(int nativeErrorCode, SkString* str);
+
+protected:
+ // override in subclasses; return true to stop parsing
+ virtual bool onStartElement(const char elem[]);
+ virtual bool onAddAttribute(const char name[], const char value[]);
+ virtual bool onEndElement(const char elem[]);
+ virtual bool onText(const char text[], int len);
+
+public:
+ // public for ported implementation, not meant for clients to call
+ bool startElement(const char elem[]);
+ bool addAttribute(const char name[], const char value[]);
+ bool endElement(const char elem[]);
+ bool text(const char text[], int len);
+ void* fParser;
+protected:
+ SkXMLParserError* fError;
+private:
+ void reportError(void* parser);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/xml/SkXMLWriter.cpp b/gfx/skia/skia/src/xml/SkXMLWriter.cpp
new file mode 100644
index 0000000000..4640d73d93
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLWriter.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/xml/SkXMLWriter.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/SkTo.h"
+
+SkXMLWriter::SkXMLWriter(bool doEscapeMarkup) : fDoEscapeMarkup(doEscapeMarkup)
+{}
+
+SkXMLWriter::~SkXMLWriter() {
+ SkASSERT(fElems.count() == 0);
+}
+
+void SkXMLWriter::flush() {
+ while (fElems.count()) {
+ this->endElement();
+ }
+}
+
+void SkXMLWriter::addAttribute(const char name[], const char value[]) {
+ this->addAttributeLen(name, value, strlen(value));
+}
+
+void SkXMLWriter::addS32Attribute(const char name[], int32_t value) {
+ SkString tmp;
+ tmp.appendS32(value);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addHexAttribute(const char name[], uint32_t value, int minDigits) {
+ SkString tmp("0x");
+ tmp.appendHex(value, minDigits);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addScalarAttribute(const char name[], SkScalar value) {
+ SkString tmp;
+ tmp.appendScalar(value);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addText(const char text[], size_t length) {
+ if (fElems.isEmpty()) {
+ return;
+ }
+
+ this->onAddText(text, length);
+
+ fElems.top()->fHasText = true;
+}
+
+void SkXMLWriter::doEnd(Elem* elem) {
+ delete elem;
+}
+
+bool SkXMLWriter::doStart(const char name[], size_t length) {
+ int level = fElems.count();
+ bool firstChild = level > 0 && !fElems[level-1]->fHasChildren;
+ if (firstChild) {
+ fElems[level-1]->fHasChildren = true;
+ }
+ Elem** elem = fElems.push();
+ *elem = new Elem(name, length);
+ return firstChild;
+}
+
+SkXMLWriter::Elem* SkXMLWriter::getEnd() {
+ Elem* elem;
+ fElems.pop(&elem);
+ return elem;
+}
+
+const char* SkXMLWriter::getHeader() {
+ static const char gHeader[] = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>";
+ return gHeader;
+}
+
+void SkXMLWriter::startElement(const char name[]) {
+ this->startElementLen(name, strlen(name));
+}
+
+static const char* escape_char(char c, char storage[2]) {
+ static const char* gEscapeChars[] = {
+ "<&lt;",
+ ">&gt;",
+ //"\"&quot;",
+ //"'&apos;",
+ "&&amp;"
+ };
+
+ const char** array = gEscapeChars;
+ for (unsigned i = 0; i < SK_ARRAY_COUNT(gEscapeChars); i++) {
+ if (array[i][0] == c) {
+ return &array[i][1];
+ }
+ }
+ storage[0] = c;
+ storage[1] = 0;
+ return storage;
+}
+
+static size_t escape_markup(char dst[], const char src[], size_t length) {
+ size_t extra = 0;
+ const char* stop = src + length;
+
+ while (src < stop) {
+ char orig[2];
+ const char* seq = escape_char(*src, orig);
+ size_t seqSize = strlen(seq);
+
+ if (dst) {
+ memcpy(dst, seq, seqSize);
+ dst += seqSize;
+ }
+
+ // now record the extra size needed
+ extra += seqSize - 1; // minus one to subtract the original char
+
+ // bump to the next src char
+ src += 1;
+ }
+ return extra;
+}
+
+void SkXMLWriter::addAttributeLen(const char name[], const char value[], size_t length) {
+ SkString valueStr;
+
+ if (fDoEscapeMarkup) {
+ size_t extra = escape_markup(nullptr, value, length);
+ if (extra) {
+ valueStr.resize(length + extra);
+ (void)escape_markup(valueStr.writable_str(), value, length);
+ value = valueStr.c_str();
+ length += extra;
+ }
+ }
+ this->onAddAttributeLen(name, value, length);
+}
+
+void SkXMLWriter::startElementLen(const char elem[], size_t length) {
+ this->onStartElementLen(elem, length);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+static void write_dom(const SkDOM& dom, const SkDOM::Node* node, SkXMLWriter* w, bool skipRoot) {
+ if (!skipRoot) {
+ const char* elem = dom.getName(node);
+ if (dom.getType(node) == SkDOM::kText_Type) {
+ SkASSERT(dom.countChildren(node) == 0);
+ w->addText(elem, strlen(elem));
+ return;
+ }
+
+ w->startElement(elem);
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name;
+ const char* value;
+ while ((name = iter.next(&value)) != nullptr) {
+ w->addAttribute(name, value);
+ }
+ }
+
+ node = dom.getFirstChild(node, nullptr);
+ while (node) {
+ write_dom(dom, node, w, false);
+ node = dom.getNextSibling(node, nullptr);
+ }
+
+ if (!skipRoot) {
+ w->endElement();
+ }
+}
+
+void SkXMLWriter::writeDOM(const SkDOM& dom, const SkDOM::Node* node, bool skipRoot) {
+ if (node) {
+ write_dom(dom, node, this, skipRoot);
+ }
+}
+
+void SkXMLWriter::writeHeader()
+{}
+
+// SkXMLStreamWriter
+
+SkXMLStreamWriter::SkXMLStreamWriter(SkWStream* stream, uint32_t flags)
+ : fStream(*stream)
+ , fFlags(flags) {}
+
+SkXMLStreamWriter::~SkXMLStreamWriter() {
+ this->flush();
+}
+
+void SkXMLStreamWriter::onAddAttributeLen(const char name[], const char value[], size_t length) {
+ SkASSERT(!fElems.top()->fHasChildren && !fElems.top()->fHasText);
+ fStream.writeText(" ");
+ fStream.writeText(name);
+ fStream.writeText("=\"");
+ fStream.write(value, length);
+ fStream.writeText("\"");
+}
+
+void SkXMLStreamWriter::onAddText(const char text[], size_t length) {
+ Elem* elem = fElems.top();
+
+ if (!elem->fHasChildren && !elem->fHasText) {
+ fStream.writeText(">");
+ this->newline();
+ }
+
+ this->tab(fElems.count() + 1);
+ fStream.write(text, length);
+ this->newline();
+}
+
+void SkXMLStreamWriter::onEndElement() {
+ Elem* elem = getEnd();
+ if (elem->fHasChildren || elem->fHasText) {
+ this->tab(fElems.count());
+ fStream.writeText("</");
+ fStream.writeText(elem->fName.c_str());
+ fStream.writeText(">");
+ } else {
+ fStream.writeText("/>");
+ }
+ this->newline();
+ doEnd(elem);
+}
+
+void SkXMLStreamWriter::onStartElementLen(const char name[], size_t length) {
+ int level = fElems.count();
+ if (this->doStart(name, length)) {
+ // the first child, need to close with >
+ fStream.writeText(">");
+ this->newline();
+ }
+
+ this->tab(level);
+ fStream.writeText("<");
+ fStream.write(name, length);
+}
+
+void SkXMLStreamWriter::writeHeader() {
+ const char* header = getHeader();
+ fStream.write(header, strlen(header));
+ this->newline();
+}
+
+void SkXMLStreamWriter::newline() {
+ if (!(fFlags & kNoPretty_Flag)) {
+ fStream.newline();
+ }
+}
+
+void SkXMLStreamWriter::tab(int level) {
+ if (!(fFlags & kNoPretty_Flag)) {
+ for (int i = 0; i < level; i++) {
+ fStream.writeText("\t");
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/xml/SkXMLParser.h"
+
+SkXMLParserWriter::SkXMLParserWriter(SkXMLParser* parser)
+ : SkXMLWriter(false), fParser(*parser)
+{
+}
+
+SkXMLParserWriter::~SkXMLParserWriter() {
+ this->flush();
+}
+
+void SkXMLParserWriter::onAddAttributeLen(const char name[], const char value[], size_t length) {
+ SkASSERT(fElems.count() == 0 || (!fElems.top()->fHasChildren && !fElems.top()->fHasText));
+ SkString str(value, length);
+ fParser.addAttribute(name, str.c_str());
+}
+
+void SkXMLParserWriter::onAddText(const char text[], size_t length) {
+ fParser.text(text, SkToInt(length));
+}
+
+void SkXMLParserWriter::onEndElement() {
+ Elem* elem = this->getEnd();
+ fParser.endElement(elem->fName.c_str());
+ this->doEnd(elem);
+}
+
+void SkXMLParserWriter::onStartElementLen(const char name[], size_t length) {
+ (void)this->doStart(name, length);
+ SkString str(name, length);
+ fParser.startElement(str.c_str());
+}
diff --git a/gfx/skia/skia/src/xml/SkXMLWriter.h b/gfx/skia/skia/src/xml/SkXMLWriter.h
new file mode 100644
index 0000000000..a189169f04
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLWriter.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXMLWriter_DEFINED
+#define SkXMLWriter_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/private/SkTDArray.h"
+#include "src/xml/SkDOM.h"
+
+class SkWStream;
+class SkXMLParser;
+
+class SkXMLWriter {
+public:
+ SkXMLWriter(bool doEscapeMarkup = true);
+ virtual ~SkXMLWriter();
+
+ void addS32Attribute(const char name[], int32_t value);
+ void addAttribute(const char name[], const char value[]);
+ void addAttributeLen(const char name[], const char value[], size_t length);
+ void addHexAttribute(const char name[], uint32_t value, int minDigits = 0);
+ void addScalarAttribute(const char name[], SkScalar value);
+ void addText(const char text[], size_t length);
+ void endElement() { this->onEndElement(); }
+ void startElement(const char elem[]);
+ void startElementLen(const char elem[], size_t length);
+ void writeDOM(const SkDOM&, const SkDOM::Node*, bool skipRoot);
+ void flush();
+ virtual void writeHeader();
+
+protected:
+ virtual void onStartElementLen(const char elem[], size_t length) = 0;
+ virtual void onAddAttributeLen(const char name[], const char value[], size_t length) = 0;
+ virtual void onAddText(const char text[], size_t length) = 0;
+ virtual void onEndElement() = 0;
+
+ struct Elem {
+ Elem(const char name[], size_t len)
+ : fName(name, len)
+ , fHasChildren(false)
+ , fHasText(false) {}
+
+ SkString fName;
+ bool fHasChildren;
+ bool fHasText;
+ };
+ void doEnd(Elem* elem);
+ bool doStart(const char name[], size_t length);
+ Elem* getEnd();
+ const char* getHeader();
+ SkTDArray<Elem*> fElems;
+
+private:
+ bool fDoEscapeMarkup;
+ // illegal
+ SkXMLWriter& operator=(const SkXMLWriter&);
+};
+
+class SkXMLStreamWriter : public SkXMLWriter {
+public:
+ enum : uint32_t {
+ kNoPretty_Flag = 0x01,
+ };
+
+ SkXMLStreamWriter(SkWStream*, uint32_t flags = 0);
+ ~SkXMLStreamWriter() override;
+ void writeHeader() override;
+
+protected:
+ void onStartElementLen(const char elem[], size_t length) override;
+ void onEndElement() override;
+ void onAddAttributeLen(const char name[], const char value[], size_t length) override;
+ void onAddText(const char text[], size_t length) override;
+
+private:
+ void newline();
+ void tab(int lvl);
+
+ SkWStream& fStream;
+ const uint32_t fFlags;
+};
+
+class SkXMLParserWriter : public SkXMLWriter {
+public:
+ SkXMLParserWriter(SkXMLParser*);
+ ~SkXMLParserWriter() override;
+protected:
+ void onStartElementLen(const char elem[], size_t length) override;
+ void onEndElement() override;
+ void onAddAttributeLen(const char name[], const char value[], size_t length) override;
+ void onAddText(const char text[], size_t length) override;
+private:
+ SkXMLParser& fParser;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/xps/SkXPSDevice.cpp b/gfx/skia/skia/src/xps/SkXPSDevice.cpp
new file mode 100644
index 0000000000..cebf60d564
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkXPSDevice.cpp
@@ -0,0 +1,2018 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/core/SkLeanWindows.h"
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+#ifndef _UNICODE
+#define _UNICODE
+#endif
+#include <ObjBase.h>
+#include <XpsObjectModel.h>
+#include <T2EmbApi.h>
+#include <FontSub.h>
+#include <limits>
+
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkVertices.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/SkTDArray.h"
+#include "include/private/SkTo.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEndian.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMakeUnique.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTLazy.h"
+#include "src/core/SkTypefacePriv.h"
+#include "src/core/SkUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/sfnt/SkTTCFHeader.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkIStream.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+#include "src/xps/SkXPSDevice.h"
+
+//Windows defines a FLOAT type,
+//make it clear when converting a scalar that this is what is wanted.
+#define SkScalarToFLOAT(n) SkScalarToFloat(n)
+
+//Dummy representation of a GUID from createId.
+#define L_GUID_ID L"XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX"
+//Length of GUID representation from createId, including nullptr terminator.
+#define GUID_ID_LEN SK_ARRAY_COUNT(L_GUID_ID)
+
+/**
+ Formats a GUID and places it into buffer.
+ buffer should have space for at least GUID_ID_LEN wide characters.
+ The string will always be wchar null terminated.
+ XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX0
+ @return -1 if there was an error, > 0 if success.
+ */
+static int format_guid(const GUID& guid,
+ wchar_t* buffer, size_t bufferSize,
+ wchar_t sep = '-') {
+ SkASSERT(bufferSize >= GUID_ID_LEN);
+ return swprintf_s(buffer,
+ bufferSize,
+ L"%08lX%c%04X%c%04X%c%02X%02X%c%02X%02X%02X%02X%02X%02X",
+ guid.Data1,
+ sep,
+ guid.Data2,
+ sep,
+ guid.Data3,
+ sep,
+ guid.Data4[0],
+ guid.Data4[1],
+ sep,
+ guid.Data4[2],
+ guid.Data4[3],
+ guid.Data4[4],
+ guid.Data4[5],
+ guid.Data4[6],
+ guid.Data4[7]);
+}
+
+HRESULT SkXPSDevice::createId(wchar_t* buffer, size_t bufferSize, wchar_t sep) {
+ GUID guid = {};
+#ifdef SK_XPS_USE_DETERMINISTIC_IDS
+ guid.Data1 = fNextId++;
+ // The following make this a valid Type4 UUID.
+ guid.Data3 = 0x4000;
+ guid.Data4[0] = 0x80;
+#else
+ HRM(CoCreateGuid(&guid), "Could not create GUID for id.");
+#endif
+
+ if (format_guid(guid, buffer, bufferSize, sep) == -1) {
+ HRM(E_UNEXPECTED, "Could not format GUID into id.");
+ }
+
+ return S_OK;
+}
+
+SkXPSDevice::SkXPSDevice(SkISize s)
+ : INHERITED(SkImageInfo::MakeUnknown(s.width(), s.height()),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fCurrentPage(0) {}
+
+SkXPSDevice::~SkXPSDevice() {}
+
+bool SkXPSDevice::beginPortfolio(SkWStream* outputStream, IXpsOMObjectFactory* factory) {
+ SkASSERT(factory);
+ fXpsFactory.reset(SkRefComPtr(factory));
+ HRB(SkWIStream::CreateFromSkWStream(outputStream, &this->fOutputStream));
+ return true;
+}
+
+bool SkXPSDevice::beginSheet(
+ const SkVector& unitsPerMeter,
+ const SkVector& pixelsPerMeter,
+ const SkSize& trimSize,
+ const SkRect* mediaBox,
+ const SkRect* bleedBox,
+ const SkRect* artBox,
+ const SkRect* cropBox) {
+ ++this->fCurrentPage;
+
+ //For simplicity, just write everything out in geometry units,
+ //then have a base canvas do the scale to physical units.
+ this->fCurrentCanvasSize = trimSize;
+ this->fCurrentUnitsPerMeter = unitsPerMeter;
+ this->fCurrentPixelsPerMeter = pixelsPerMeter;
+ return this->createCanvasForLayer();
+}
+
+bool SkXPSDevice::createCanvasForLayer() {
+ SkASSERT(fXpsFactory);
+ fCurrentXpsCanvas.reset();
+ HRB(fXpsFactory->CreateCanvas(&fCurrentXpsCanvas));
+ return true;
+}
+
+template <typename T> static constexpr size_t sk_digits_in() {
+ return static_cast<size_t>(std::numeric_limits<T>::digits10 + 1);
+}
+
+HRESULT SkXPSDevice::createXpsThumbnail(IXpsOMPage* page,
+ const unsigned int pageNum,
+ IXpsOMImageResource** image) {
+ SkTScopedComPtr<IXpsOMThumbnailGenerator> thumbnailGenerator;
+ HRM(CoCreateInstance(
+ CLSID_XpsOMThumbnailGenerator,
+ nullptr,
+ CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&thumbnailGenerator)),
+ "Could not create thumbnail generator.");
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ constexpr size_t size = SkTMax(
+ SK_ARRAY_COUNT(L"/Documents/1/Metadata/.png") + sk_digits_in<decltype(pageNum)>(),
+ SK_ARRAY_COUNT(L"/Metadata/" L_GUID_ID L".png"));
+ wchar_t buffer[size];
+ if (pageNum > 0) {
+ swprintf_s(buffer, size, L"/Documents/1/Metadata/%u.png", pageNum);
+ } else {
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Metadata/%s.png", id);
+ }
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create thumbnail part uri.");
+
+ HRM(thumbnailGenerator->GenerateThumbnail(page,
+ XPS_IMAGE_TYPE_PNG,
+ XPS_THUMBNAIL_SIZE_LARGE,
+ partUri.get(),
+ image),
+ "Could not generate thumbnail.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsPage(const XPS_SIZE& pageSize,
+ IXpsOMPage** page) {
+ constexpr size_t size =
+ SK_ARRAY_COUNT(L"/Documents/1/Pages/.fpage")
+ + sk_digits_in<decltype(fCurrentPage)>();
+ wchar_t buffer[size];
+ swprintf_s(buffer, size, L"/Documents/1/Pages/%u.fpage",
+ this->fCurrentPage);
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create page part uri.");
+
+ //If the language is unknown, use "und" (XPS Spec 2.3.5.1).
+ HRM(this->fXpsFactory->CreatePage(&pageSize,
+ L"und",
+ partUri.get(),
+ page),
+ "Could not create page.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::initXpsDocumentWriter(IXpsOMImageResource* image) {
+ //Create package writer.
+ {
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(L"/FixedDocumentSequence.fdseq",
+ &partUri),
+ "Could not create document sequence part uri.");
+ HRM(this->fXpsFactory->CreatePackageWriterOnStream(
+ this->fOutputStream.get(),
+ TRUE,
+ XPS_INTERLEAVING_OFF, //XPS_INTERLEAVING_ON,
+ partUri.get(),
+ nullptr,
+ image,
+ nullptr,
+ nullptr,
+ &this->fPackageWriter),
+ "Could not create package writer.");
+ }
+
+ //Begin the lone document.
+ {
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(
+ L"/Documents/1/FixedDocument.fdoc",
+ &partUri),
+ "Could not create fixed document part uri.");
+ HRM(this->fPackageWriter->StartNewDocument(partUri.get(),
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr),
+ "Could not start document.");
+ }
+
+ return S_OK;
+}
+
+bool SkXPSDevice::endSheet() {
+ //XPS is fixed at 96dpi (XPS Spec 11.1).
+ static const float xpsDPI = 96.0f;
+ static const float inchesPerMeter = 10000.0f / 254.0f;
+ static const float targetUnitsPerMeter = xpsDPI * inchesPerMeter;
+ const float scaleX = targetUnitsPerMeter
+ / SkScalarToFLOAT(this->fCurrentUnitsPerMeter.fX);
+ const float scaleY = targetUnitsPerMeter
+ / SkScalarToFLOAT(this->fCurrentUnitsPerMeter.fY);
+
+ //Create the scale canvas.
+ SkTScopedComPtr<IXpsOMCanvas> scaleCanvas;
+ HRBM(this->fXpsFactory->CreateCanvas(&scaleCanvas),
+ "Could not create scale canvas.");
+ SkTScopedComPtr<IXpsOMVisualCollection> scaleCanvasVisuals;
+ HRBM(scaleCanvas->GetVisuals(&scaleCanvasVisuals),
+ "Could not get scale canvas visuals.");
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> geomToPhys;
+ XPS_MATRIX rawGeomToPhys = { scaleX, 0, 0, scaleY, 0, 0, };
+ HRBM(this->fXpsFactory->CreateMatrixTransform(&rawGeomToPhys, &geomToPhys),
+ "Could not create geometry to physical transform.");
+ HRBM(scaleCanvas->SetTransformLocal(geomToPhys.get()),
+ "Could not set transform on scale canvas.");
+
+ //Add the content canvas to the scale canvas.
+ HRBM(scaleCanvasVisuals->Append(this->fCurrentXpsCanvas.get()),
+ "Could not add base canvas to scale canvas.");
+
+ //Create the page.
+ XPS_SIZE pageSize = {
+ SkScalarToFLOAT(this->fCurrentCanvasSize.width()) * scaleX,
+ SkScalarToFLOAT(this->fCurrentCanvasSize.height()) * scaleY,
+ };
+ SkTScopedComPtr<IXpsOMPage> page;
+ HRB(this->createXpsPage(pageSize, &page));
+
+ SkTScopedComPtr<IXpsOMVisualCollection> pageVisuals;
+ HRBM(page->GetVisuals(&pageVisuals), "Could not get page visuals.");
+
+ //Add the scale canvas to the page.
+ HRBM(pageVisuals->Append(scaleCanvas.get()),
+ "Could not add scale canvas to page.");
+
+ //Create the package writer if it hasn't been created yet.
+ if (nullptr == this->fPackageWriter.get()) {
+ SkTScopedComPtr<IXpsOMImageResource> image;
+ //Ignore return, thumbnail is completely optional.
+ this->createXpsThumbnail(page.get(), 0, &image);
+
+ HRB(this->initXpsDocumentWriter(image.get()));
+ }
+
+ HRBM(this->fPackageWriter->AddPage(page.get(),
+ &pageSize,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr),
+ "Could not write the page.");
+ this->fCurrentXpsCanvas.reset();
+
+ return true;
+}
+
+static HRESULT subset_typeface(const SkXPSDevice::TypefaceUse& current) {
+ //CreateFontPackage wants unsigned short.
+ //Microsoft, Y U NO stdint.h?
+ std::vector<unsigned short> keepList;
+ current.glyphsUsed.getSetValues([&keepList](unsigned v) {
+ keepList.push_back((unsigned short)v);
+ });
+
+ int ttcCount = (current.ttcIndex + 1);
+
+ //The following are declared with the types required by CreateFontPackage.
+ unsigned char *fontPackageBufferRaw = nullptr;
+ unsigned long fontPackageBufferSize;
+ unsigned long bytesWritten;
+ unsigned long result = CreateFontPackage(
+ (unsigned char *) current.fontData->getMemoryBase(),
+ (unsigned long) current.fontData->getLength(),
+ &fontPackageBufferRaw,
+ &fontPackageBufferSize,
+ &bytesWritten,
+ TTFCFP_FLAGS_SUBSET | TTFCFP_FLAGS_GLYPHLIST | (ttcCount > 0 ? TTFCFP_FLAGS_TTC : 0),
+ current.ttcIndex,
+ TTFCFP_SUBSET,
+ 0,
+ 0,
+ 0,
+ keepList.data(),
+ SkTo<unsigned short>(keepList.size()),
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free,
+ nullptr);
+ SkAutoTMalloc<unsigned char> fontPackageBuffer(fontPackageBufferRaw);
+ if (result != NO_ERROR) {
+ SkDEBUGF("CreateFontPackage Error %lu", result);
+ return E_UNEXPECTED;
+ }
+
+ // If it was originally a ttc, keep it a ttc.
+ // CreateFontPackage over-allocates, realloc usually decreases the size substantially.
+ size_t extra;
+ if (ttcCount > 0) {
+ // Create space for a ttc header.
+ extra = sizeof(SkTTCFHeader) + (ttcCount * sizeof(SK_OT_ULONG));
+ fontPackageBuffer.realloc(bytesWritten + extra);
+ //overlap is certain, use memmove
+ memmove(fontPackageBuffer.get() + extra, fontPackageBuffer.get(), bytesWritten);
+
+ // Write the ttc header.
+ SkTTCFHeader* ttcfHeader = reinterpret_cast<SkTTCFHeader*>(fontPackageBuffer.get());
+ ttcfHeader->ttcTag = SkTTCFHeader::TAG;
+ ttcfHeader->version = SkTTCFHeader::version_1;
+ ttcfHeader->numOffsets = SkEndian_SwapBE32(ttcCount);
+ SK_OT_ULONG* offsetPtr = SkTAfter<SK_OT_ULONG>(ttcfHeader);
+ for (int i = 0; i < ttcCount; ++i, ++offsetPtr) {
+ *offsetPtr = SkEndian_SwapBE32(SkToU32(extra));
+ }
+
+ // Fix up offsets in sfnt table entries.
+ SkSFNTHeader* sfntHeader = SkTAddOffset<SkSFNTHeader>(fontPackageBuffer.get(), extra);
+ int numTables = SkEndian_SwapBE16(sfntHeader->numTables);
+ SkSFNTHeader::TableDirectoryEntry* tableDirectory =
+ SkTAfter<SkSFNTHeader::TableDirectoryEntry>(sfntHeader);
+ for (int i = 0; i < numTables; ++i, ++tableDirectory) {
+ tableDirectory->offset = SkEndian_SwapBE32(
+ SkToU32(SkEndian_SwapBE32(SkToU32(tableDirectory->offset)) + extra));
+ }
+ } else {
+ extra = 0;
+ fontPackageBuffer.realloc(bytesWritten);
+ }
+
+ std::unique_ptr<SkMemoryStream> newStream(new SkMemoryStream());
+ newStream->setMemoryOwned(fontPackageBuffer.release(), bytesWritten + extra);
+
+ SkTScopedComPtr<IStream> newIStream;
+ SkIStream::CreateFromSkStream(std::move(newStream), &newIStream);
+
+ XPS_FONT_EMBEDDING embedding;
+ HRM(current.xpsFont->GetEmbeddingOption(&embedding),
+ "Could not get embedding option from font.");
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(current.xpsFont->GetPartName(&partUri),
+ "Could not get part uri from font.");
+
+ HRM(current.xpsFont->SetContent(
+ newIStream.get(),
+ embedding,
+ partUri.get()),
+ "Could not set new stream for subsetted font.");
+
+ return S_OK;
+}
+
+bool SkXPSDevice::endPortfolio() {
+ //Subset fonts
+ for (const TypefaceUse& current : this->fTypefaces) {
+ //Ignore return for now, if it didn't subset, let it be.
+ subset_typeface(current);
+ }
+
+ if (this->fPackageWriter) {
+ HRBM(this->fPackageWriter->Close(), "Could not close writer.");
+ }
+
+ return true;
+}
+
+static XPS_COLOR xps_color(const SkColor skColor) {
+ //XPS uses non-pre-multiplied alpha (XPS Spec 11.4).
+ XPS_COLOR xpsColor;
+ xpsColor.colorType = XPS_COLOR_TYPE_SRGB;
+ xpsColor.value.sRGB.alpha = SkColorGetA(skColor);
+ xpsColor.value.sRGB.red = SkColorGetR(skColor);
+ xpsColor.value.sRGB.green = SkColorGetG(skColor);
+ xpsColor.value.sRGB.blue = SkColorGetB(skColor);
+
+ return xpsColor;
+}
+
+static XPS_POINT xps_point(const SkPoint& point) {
+ XPS_POINT xpsPoint = {
+ SkScalarToFLOAT(point.fX),
+ SkScalarToFLOAT(point.fY),
+ };
+ return xpsPoint;
+}
+
+static XPS_POINT xps_point(const SkPoint& point, const SkMatrix& matrix) {
+ SkPoint skTransformedPoint;
+ matrix.mapXY(point.fX, point.fY, &skTransformedPoint);
+ return xps_point(skTransformedPoint);
+}
+
+static XPS_SPREAD_METHOD xps_spread_method(SkTileMode tileMode) {
+ switch (tileMode) {
+ case SkTileMode::kClamp:
+ return XPS_SPREAD_METHOD_PAD;
+ case SkTileMode::kRepeat:
+ return XPS_SPREAD_METHOD_REPEAT;
+ case SkTileMode::kMirror:
+ return XPS_SPREAD_METHOD_REFLECT;
+ case SkTileMode::kDecal:
+ // TODO: fake
+ return XPS_SPREAD_METHOD_PAD;
+ default:
+ SkDEBUGFAIL("Unknown tile mode.");
+ }
+ return XPS_SPREAD_METHOD_PAD;
+}
+
+static void transform_offsets(SkScalar* stopOffsets, const int numOffsets,
+ const SkPoint& start, const SkPoint& end,
+ const SkMatrix& transform) {
+ SkPoint startTransformed;
+ transform.mapXY(start.fX, start.fY, &startTransformed);
+ SkPoint endTransformed;
+ transform.mapXY(end.fX, end.fY, &endTransformed);
+
+ //Manhattan distance between transformed start and end.
+ SkScalar startToEnd = (endTransformed.fX - startTransformed.fX)
+ + (endTransformed.fY - startTransformed.fY);
+ if (SkScalarNearlyZero(startToEnd)) {
+ for (int i = 0; i < numOffsets; ++i) {
+ stopOffsets[i] = 0;
+ }
+ return;
+ }
+
+ for (int i = 0; i < numOffsets; ++i) {
+ SkPoint stop;
+ stop.fX = (end.fX - start.fX) * stopOffsets[i];
+ stop.fY = (end.fY - start.fY) * stopOffsets[i];
+
+ SkPoint stopTransformed;
+ transform.mapXY(stop.fX, stop.fY, &stopTransformed);
+
+ //Manhattan distance between transformed start and stop.
+ SkScalar startToStop = (stopTransformed.fX - startTransformed.fX)
+ + (stopTransformed.fY - startTransformed.fY);
+ //Percentage along transformed line.
+ stopOffsets[i] = startToStop / startToEnd;
+ }
+}
+
+HRESULT SkXPSDevice::createXpsTransform(const SkMatrix& matrix,
+ IXpsOMMatrixTransform** xpsTransform) {
+ SkScalar affine[6];
+ if (!matrix.asAffine(affine)) {
+ *xpsTransform = nullptr;
+ return S_FALSE;
+ }
+ XPS_MATRIX rawXpsMatrix = {
+ SkScalarToFLOAT(affine[SkMatrix::kAScaleX]),
+ SkScalarToFLOAT(affine[SkMatrix::kASkewY]),
+ SkScalarToFLOAT(affine[SkMatrix::kASkewX]),
+ SkScalarToFLOAT(affine[SkMatrix::kAScaleY]),
+ SkScalarToFLOAT(affine[SkMatrix::kATransX]),
+ SkScalarToFLOAT(affine[SkMatrix::kATransY]),
+ };
+ HRM(this->fXpsFactory->CreateMatrixTransform(&rawXpsMatrix, xpsTransform),
+ "Could not create transform.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createPath(IXpsOMGeometryFigure* figure,
+ IXpsOMVisualCollection* visuals,
+ IXpsOMPath** path) {
+ SkTScopedComPtr<IXpsOMGeometry> geometry;
+ HRM(this->fXpsFactory->CreateGeometry(&geometry),
+ "Could not create geometry.");
+
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> figureCollection;
+ HRM(geometry->GetFigures(&figureCollection), "Could not get figures.");
+ HRM(figureCollection->Append(figure), "Could not add figure.");
+
+ HRM(this->fXpsFactory->CreatePath(path), "Could not create path.");
+ HRM((*path)->SetGeometryLocal(geometry.get()), "Could not set geometry");
+
+ HRM(visuals->Append(*path), "Could not add path to visuals.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsSolidColorBrush(const SkColor skColor,
+ const SkAlpha alpha,
+ IXpsOMBrush** xpsBrush) {
+ XPS_COLOR xpsColor = xps_color(skColor);
+ SkTScopedComPtr<IXpsOMSolidColorBrush> solidBrush;
+ HRM(this->fXpsFactory->CreateSolidColorBrush(&xpsColor, nullptr, &solidBrush),
+ "Could not create solid color brush.");
+ HRM(solidBrush->SetOpacity(alpha / 255.0f), "Could not set opacity.");
+ HRM(solidBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI Fail.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::sideOfClamp(const SkRect& areaToFill,
+ const XPS_RECT& imageViewBox,
+ IXpsOMImageResource* image,
+ IXpsOMVisualCollection* visuals) {
+ SkTScopedComPtr<IXpsOMGeometryFigure> areaToFillFigure;
+ HR(this->createXpsRect(areaToFill, FALSE, TRUE, &areaToFillFigure));
+
+ SkTScopedComPtr<IXpsOMPath> areaToFillPath;
+ HR(this->createPath(areaToFillFigure.get(), visuals, &areaToFillPath));
+
+ SkTScopedComPtr<IXpsOMImageBrush> areaToFillBrush;
+ HRM(this->fXpsFactory->CreateImageBrush(image,
+ &imageViewBox,
+ &imageViewBox,
+ &areaToFillBrush),
+ "Could not create brush for side of clamp.");
+ HRM(areaToFillBrush->SetTileMode(XPS_TILE_MODE_FLIPXY),
+ "Could not set tile mode for side of clamp.");
+ HRM(areaToFillPath->SetFillBrushLocal(areaToFillBrush.get()),
+ "Could not set brush for side of clamp");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::cornerOfClamp(const SkRect& areaToFill,
+ const SkColor color,
+ IXpsOMVisualCollection* visuals) {
+ SkTScopedComPtr<IXpsOMGeometryFigure> areaToFillFigure;
+ HR(this->createXpsRect(areaToFill, FALSE, TRUE, &areaToFillFigure));
+
+ SkTScopedComPtr<IXpsOMPath> areaToFillPath;
+ HR(this->createPath(areaToFillFigure.get(), visuals, &areaToFillPath));
+
+ SkTScopedComPtr<IXpsOMBrush> areaToFillBrush;
+ HR(this->createXpsSolidColorBrush(color, 0xFF, &areaToFillBrush));
+ HRM(areaToFillPath->SetFillBrushLocal(areaToFillBrush.get()),
+ "Could not set brush for corner of clamp.");
+
+ return S_OK;
+}
+
+static const XPS_TILE_MODE XTM_N = XPS_TILE_MODE_NONE;
+static const XPS_TILE_MODE XTM_T = XPS_TILE_MODE_TILE;
+static const XPS_TILE_MODE XTM_X = XPS_TILE_MODE_FLIPX;
+static const XPS_TILE_MODE XTM_Y = XPS_TILE_MODE_FLIPY;
+static const XPS_TILE_MODE XTM_XY = XPS_TILE_MODE_FLIPXY;
+
+//TODO(bungeman): In the future, should skia add None,
+//handle None+Mirror and None+Repeat correctly.
+//None is currently an internal hack so masks don't repeat (None+None only).
+static XPS_TILE_MODE gSkToXpsTileMode[kSkTileModeCount+1]
+ [kSkTileModeCount+1] = {
+ //Clamp //Repeat //Mirror //None
+ /*Clamp */ {XTM_N, XTM_T, XTM_Y, XTM_N},
+ /*Repeat*/ {XTM_T, XTM_T, XTM_Y, XTM_N},
+ /*Mirror*/ {XTM_X, XTM_X, XTM_XY, XTM_X},
+ /*None */ {XTM_N, XTM_N, XTM_Y, XTM_N},
+};
+
+static XPS_TILE_MODE SkToXpsTileMode(SkTileMode tmx, SkTileMode tmy) {
+ return gSkToXpsTileMode[(unsigned)tmx][(unsigned)tmy];
+}
+
+HRESULT SkXPSDevice::createXpsImageBrush(
+ const SkBitmap& bitmap,
+ const SkMatrix& localMatrix,
+ const SkTileMode (&xy)[2],
+ const SkAlpha alpha,
+ IXpsOMTileBrush** xpsBrush) {
+ SkDynamicMemoryWStream write;
+ if (!SkEncodeImage(&write, bitmap, SkEncodedImageFormat::kPNG, 100)) {
+ HRM(E_FAIL, "Unable to encode bitmap as png.");
+ }
+ SkTScopedComPtr<IStream> read;
+ HRM(SkIStream::CreateFromSkStream(write.detachAsStream(), &read),
+ "Could not create stream from png data.");
+
+ const size_t size =
+ SK_ARRAY_COUNT(L"/Documents/1/Resources/Images/" L_GUID_ID L".png");
+ wchar_t buffer[size];
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Documents/1/Resources/Images/%s.png", id);
+
+ SkTScopedComPtr<IOpcPartUri> imagePartUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &imagePartUri),
+ "Could not create image part uri.");
+
+ SkTScopedComPtr<IXpsOMImageResource> imageResource;
+ HRM(this->fXpsFactory->CreateImageResource(
+ read.get(),
+ XPS_IMAGE_TYPE_PNG,
+ imagePartUri.get(),
+ &imageResource),
+ "Could not create image resource.");
+
+ XPS_RECT bitmapRect = {
+ 0.0, 0.0,
+ static_cast<FLOAT>(bitmap.width()), static_cast<FLOAT>(bitmap.height())
+ };
+ SkTScopedComPtr<IXpsOMImageBrush> xpsImageBrush;
+ HRM(this->fXpsFactory->CreateImageBrush(imageResource.get(),
+ &bitmapRect, &bitmapRect,
+ &xpsImageBrush),
+ "Could not create image brush.");
+
+ if (SkTileMode::kClamp != xy[0] &&
+ SkTileMode::kClamp != xy[1]) {
+
+ HRM(xpsImageBrush->SetTileMode(SkToXpsTileMode(xy[0], xy[1])),
+ "Could not set image tile mode");
+ HRM(xpsImageBrush->SetOpacity(alpha / 255.0f),
+ "Could not set image opacity.");
+ HRM(xpsImageBrush->QueryInterface(xpsBrush), "QI failed.");
+ } else {
+ //TODO(bungeman): compute how big this really needs to be.
+ const SkScalar BIG = SkIntToScalar(1000); //SK_ScalarMax;
+ const FLOAT BIG_F = SkScalarToFLOAT(BIG);
+ const SkScalar bWidth = SkIntToScalar(bitmap.width());
+ const SkScalar bHeight = SkIntToScalar(bitmap.height());
+
+ //create brush canvas
+ SkTScopedComPtr<IXpsOMCanvas> brushCanvas;
+ HRM(this->fXpsFactory->CreateCanvas(&brushCanvas),
+ "Could not create image brush canvas.");
+ SkTScopedComPtr<IXpsOMVisualCollection> brushVisuals;
+ HRM(brushCanvas->GetVisuals(&brushVisuals),
+ "Could not get image brush canvas visuals collection.");
+
+ //create central figure
+ const SkRect bitmapPoints = SkRect::MakeLTRB(0, 0, bWidth, bHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> centralFigure;
+ HR(this->createXpsRect(bitmapPoints, FALSE, TRUE, &centralFigure));
+
+ SkTScopedComPtr<IXpsOMPath> centralPath;
+ HR(this->createPath(centralFigure.get(),
+ brushVisuals.get(),
+ &centralPath));
+ HRM(xpsImageBrush->SetTileMode(XPS_TILE_MODE_FLIPXY),
+ "Could not set tile mode for image brush central path.");
+ HRM(centralPath->SetFillBrushLocal(xpsImageBrush.get()),
+ "Could not set fill brush for image brush central path.");
+
+ //add left/right
+ if (SkTileMode::kClamp == xy[0]) {
+ SkRect leftArea = SkRect::MakeLTRB(-BIG, 0, 0, bHeight);
+ XPS_RECT leftImageViewBox = {
+ 0.0, 0.0,
+ 1.0, static_cast<FLOAT>(bitmap.height()),
+ };
+ HR(this->sideOfClamp(leftArea, leftImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+
+ SkRect rightArea = SkRect::MakeLTRB(bWidth, 0, BIG, bHeight);
+ XPS_RECT rightImageViewBox = {
+ bitmap.width() - 1.0f, 0.0f,
+ 1.0f, static_cast<FLOAT>(bitmap.height()),
+ };
+ HR(this->sideOfClamp(rightArea, rightImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+ }
+
+ //add top/bottom
+ if (SkTileMode::kClamp == xy[1]) {
+ SkRect topArea = SkRect::MakeLTRB(0, -BIG, bWidth, 0);
+ XPS_RECT topImageViewBox = {
+ 0.0, 0.0,
+ static_cast<FLOAT>(bitmap.width()), 1.0,
+ };
+ HR(this->sideOfClamp(topArea, topImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+
+ SkRect bottomArea = SkRect::MakeLTRB(0, bHeight, bWidth, BIG);
+ XPS_RECT bottomImageViewBox = {
+ 0.0f, bitmap.height() - 1.0f,
+ static_cast<FLOAT>(bitmap.width()), 1.0f,
+ };
+ HR(this->sideOfClamp(bottomArea, bottomImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+ }
+
+ //add tl, tr, bl, br
+ if (SkTileMode::kClamp == xy[0] &&
+ SkTileMode::kClamp == xy[1]) {
+
+ const SkColor tlColor = bitmap.getColor(0,0);
+ const SkRect tlArea = SkRect::MakeLTRB(-BIG, -BIG, 0, 0);
+ HR(this->cornerOfClamp(tlArea, tlColor, brushVisuals.get()));
+
+ const SkColor trColor = bitmap.getColor(bitmap.width()-1,0);
+ const SkRect trArea = SkRect::MakeLTRB(bWidth, -BIG, BIG, 0);
+ HR(this->cornerOfClamp(trArea, trColor, brushVisuals.get()));
+
+ const SkColor brColor = bitmap.getColor(bitmap.width()-1,
+ bitmap.height()-1);
+ const SkRect brArea = SkRect::MakeLTRB(bWidth, bHeight, BIG, BIG);
+ HR(this->cornerOfClamp(brArea, brColor, brushVisuals.get()));
+
+ const SkColor blColor = bitmap.getColor(0,bitmap.height()-1);
+ const SkRect blArea = SkRect::MakeLTRB(-BIG, bHeight, 0, BIG);
+ HR(this->cornerOfClamp(blArea, blColor, brushVisuals.get()));
+ }
+
+ //create visual brush from canvas
+ XPS_RECT bound = {};
+ if (SkTileMode::kClamp == xy[0] &&
+ SkTileMode::kClamp == xy[1]) {
+
+ bound.x = BIG_F / -2;
+ bound.y = BIG_F / -2;
+ bound.width = BIG_F;
+ bound.height = BIG_F;
+ } else if (SkTileMode::kClamp == xy[0]) {
+ bound.x = BIG_F / -2;
+ bound.y = 0.0f;
+ bound.width = BIG_F;
+ bound.height = static_cast<FLOAT>(bitmap.height());
+ } else if (SkTileMode::kClamp == xy[1]) {
+ bound.x = 0;
+ bound.y = BIG_F / -2;
+ bound.width = static_cast<FLOAT>(bitmap.width());
+ bound.height = BIG_F;
+ }
+ SkTScopedComPtr<IXpsOMVisualBrush> clampBrush;
+ HRM(this->fXpsFactory->CreateVisualBrush(&bound, &bound, &clampBrush),
+ "Could not create visual brush for image brush.");
+ HRM(clampBrush->SetVisualLocal(brushCanvas.get()),
+ "Could not set canvas on visual brush for image brush.");
+ HRM(clampBrush->SetTileMode(SkToXpsTileMode(xy[0], xy[1])),
+ "Could not set tile mode on visual brush for image brush.");
+ HRM(clampBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity on visual brush for image brush.");
+
+ HRM(clampBrush->QueryInterface(xpsBrush), "QI failed.");
+ }
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(localMatrix, &xpsMatrixToUse));
+ if (xpsMatrixToUse.get()) {
+ HRM((*xpsBrush)->SetTransformLocal(xpsMatrixToUse.get()),
+ "Could not set transform for image brush.");
+ } else {
+ //TODO(bungeman): perspective bitmaps in general.
+ }
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsGradientStop(const SkColor skColor,
+ const SkScalar offset,
+ IXpsOMGradientStop** xpsGradStop) {
+ XPS_COLOR gradStopXpsColor = xps_color(skColor);
+ HRM(this->fXpsFactory->CreateGradientStop(&gradStopXpsColor,
+ nullptr,
+ SkScalarToFLOAT(offset),
+ xpsGradStop),
+ "Could not create gradient stop.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsLinearGradient(SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrix,
+ IXpsOMBrush** xpsBrush) {
+ XPS_POINT startPoint;
+ XPS_POINT endPoint;
+ if (xpsMatrix) {
+ startPoint = xps_point(info.fPoint[0]);
+ endPoint = xps_point(info.fPoint[1]);
+ } else {
+ transform_offsets(info.fColorOffsets, info.fColorCount,
+ info.fPoint[0], info.fPoint[1],
+ localMatrix);
+ startPoint = xps_point(info.fPoint[0], localMatrix);
+ endPoint = xps_point(info.fPoint[1], localMatrix);
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop0;
+ HR(createXpsGradientStop(info.fColors[0],
+ info.fColorOffsets[0],
+ &gradStop0));
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop1;
+ HR(createXpsGradientStop(info.fColors[1],
+ info.fColorOffsets[1],
+ &gradStop1));
+
+ SkTScopedComPtr<IXpsOMLinearGradientBrush> gradientBrush;
+ HRM(this->fXpsFactory->CreateLinearGradientBrush(gradStop0.get(),
+ gradStop1.get(),
+ &startPoint,
+ &endPoint,
+ &gradientBrush),
+ "Could not create linear gradient brush.");
+ if (xpsMatrix) {
+ HRM(gradientBrush->SetTransformLocal(xpsMatrix),
+ "Could not set transform on linear gradient brush.");
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStopCollection> gradStopCollection;
+ HRM(gradientBrush->GetGradientStops(&gradStopCollection),
+ "Could not get linear gradient stop collection.");
+ for (int i = 2; i < info.fColorCount; ++i) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop;
+ HR(createXpsGradientStop(info.fColors[i],
+ info.fColorOffsets[i],
+ &gradStop));
+ HRM(gradStopCollection->Append(gradStop.get()),
+ "Could not add linear gradient stop.");
+ }
+
+ HRM(gradientBrush->SetSpreadMethod(xps_spread_method((SkTileMode)info.fTileMode)),
+ "Could not set spread method of linear gradient.");
+
+ HRM(gradientBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity of linear gradient brush.");
+ HRM(gradientBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI failed");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsRadialGradient(SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrix,
+ IXpsOMBrush** xpsBrush) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop0;
+ HR(createXpsGradientStop(info.fColors[0],
+ info.fColorOffsets[0],
+ &gradStop0));
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop1;
+ HR(createXpsGradientStop(info.fColors[1],
+ info.fColorOffsets[1],
+ &gradStop1));
+
+ //TODO: figure out how to fake better if not affine
+ XPS_POINT centerPoint;
+ XPS_POINT gradientOrigin;
+ XPS_SIZE radiiSizes;
+ if (xpsMatrix) {
+ centerPoint = xps_point(info.fPoint[0]);
+ gradientOrigin = xps_point(info.fPoint[0]);
+ radiiSizes.width = SkScalarToFLOAT(info.fRadius[0]);
+ radiiSizes.height = SkScalarToFLOAT(info.fRadius[0]);
+ } else {
+ centerPoint = xps_point(info.fPoint[0], localMatrix);
+ gradientOrigin = xps_point(info.fPoint[0], localMatrix);
+
+ SkScalar radius = info.fRadius[0];
+ SkVector vec[2];
+
+ vec[0].set(radius, 0);
+ vec[1].set(0, radius);
+ localMatrix.mapVectors(vec, 2);
+
+ SkScalar d0 = vec[0].length();
+ SkScalar d1 = vec[1].length();
+
+ radiiSizes.width = SkScalarToFLOAT(d0);
+ radiiSizes.height = SkScalarToFLOAT(d1);
+ }
+
+ SkTScopedComPtr<IXpsOMRadialGradientBrush> gradientBrush;
+ HRM(this->fXpsFactory->CreateRadialGradientBrush(gradStop0.get(),
+ gradStop1.get(),
+ &centerPoint,
+ &gradientOrigin,
+ &radiiSizes,
+ &gradientBrush),
+ "Could not create radial gradient brush.");
+ if (xpsMatrix) {
+ HRM(gradientBrush->SetTransformLocal(xpsMatrix),
+ "Could not set transform on radial gradient brush.");
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStopCollection> gradStopCollection;
+ HRM(gradientBrush->GetGradientStops(&gradStopCollection),
+ "Could not get radial gradient stop collection.");
+ for (int i = 2; i < info.fColorCount; ++i) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop;
+ HR(createXpsGradientStop(info.fColors[i],
+ info.fColorOffsets[i],
+ &gradStop));
+ HRM(gradStopCollection->Append(gradStop.get()),
+ "Could not add radial gradient stop.");
+ }
+
+ HRM(gradientBrush->SetSpreadMethod(xps_spread_method((SkTileMode)info.fTileMode)),
+ "Could not set spread method of radial gradient.");
+
+ HRM(gradientBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity of radial gradient brush.");
+ HRM(gradientBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI failed.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsBrush(const SkPaint& skPaint,
+ IXpsOMBrush** brush,
+ const SkMatrix* parentTransform) {
+ const SkShader *shader = skPaint.getShader();
+ if (nullptr == shader) {
+ HR(this->createXpsSolidColorBrush(skPaint.getColor(), 0xFF, brush));
+ return S_OK;
+ }
+
+ //Gradient shaders.
+ SkShader::GradientInfo info;
+ info.fColorCount = 0;
+ info.fColors = nullptr;
+ info.fColorOffsets = nullptr;
+ SkShader::GradientType gradientType = shader->asAGradient(&info);
+
+ if (SkShader::kNone_GradientType == gradientType) {
+ //Nothing to see, move along.
+
+ } else if (SkShader::kColor_GradientType == gradientType) {
+ SkASSERT(1 == info.fColorCount);
+ SkColor color;
+ info.fColors = &color;
+ shader->asAGradient(&info);
+ SkAlpha alpha = skPaint.getAlpha();
+ HR(this->createXpsSolidColorBrush(color, alpha, brush));
+ return S_OK;
+
+ } else {
+ if (info.fColorCount == 0) {
+ const SkColor color = skPaint.getColor();
+ HR(this->createXpsSolidColorBrush(color, 0xFF, brush));
+ return S_OK;
+ }
+
+ SkAutoTArray<SkColor> colors(info.fColorCount);
+ SkAutoTArray<SkScalar> colorOffsets(info.fColorCount);
+ info.fColors = colors.get();
+ info.fColorOffsets = colorOffsets.get();
+ shader->asAGradient(&info);
+
+ if (1 == info.fColorCount) {
+ SkColor color = info.fColors[0];
+ SkAlpha alpha = skPaint.getAlpha();
+ HR(this->createXpsSolidColorBrush(color, alpha, brush));
+ return S_OK;
+ }
+
+ SkMatrix localMatrix = as_SB(shader)->getLocalMatrix();
+ if (parentTransform) {
+ localMatrix.preConcat(*parentTransform);
+ }
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(localMatrix, &xpsMatrixToUse));
+
+ if (SkShader::kLinear_GradientType == gradientType) {
+ HR(this->createXpsLinearGradient(info,
+ skPaint.getAlpha(),
+ localMatrix,
+ xpsMatrixToUse.get(),
+ brush));
+ return S_OK;
+ }
+
+ if (SkShader::kRadial_GradientType == gradientType) {
+ HR(this->createXpsRadialGradient(info,
+ skPaint.getAlpha(),
+ localMatrix,
+ xpsMatrixToUse.get(),
+ brush));
+ return S_OK;
+ }
+
+ if (SkShader::kConical_GradientType == gradientType) {
+ //simple if affine and one is 0, otherwise will have to fake
+ }
+
+ if (SkShader::kSweep_GradientType == gradientType) {
+ //have to fake
+ }
+ }
+
+ SkBitmap outTexture;
+ SkMatrix outMatrix;
+ SkTileMode xy[2];
+ SkImage* image = shader->isAImage(&outMatrix, xy);
+ if (image && image->asLegacyBitmap(&outTexture)) {
+ //TODO: outMatrix??
+ SkMatrix localMatrix = as_SB(shader)->getLocalMatrix();
+ if (parentTransform) {
+ localMatrix.postConcat(*parentTransform);
+ }
+
+ SkTScopedComPtr<IXpsOMTileBrush> tileBrush;
+ HR(this->createXpsImageBrush(outTexture,
+ localMatrix,
+ xy,
+ skPaint.getAlpha(),
+ &tileBrush));
+
+ HRM(tileBrush->QueryInterface<IXpsOMBrush>(brush), "QI failed.");
+ } else {
+ HR(this->createXpsSolidColorBrush(skPaint.getColor(), 0xFF, brush));
+ }
+ return S_OK;
+}
+
+static bool rect_must_be_pathed(const SkPaint& paint, const SkMatrix& matrix) {
+ const bool zeroWidth = (0 == paint.getStrokeWidth());
+ const bool stroke = (SkPaint::kFill_Style != paint.getStyle());
+
+ return paint.getPathEffect() ||
+ paint.getMaskFilter() ||
+ (stroke && (
+ (matrix.hasPerspective() && !zeroWidth) ||
+ SkPaint::kMiter_Join != paint.getStrokeJoin() ||
+ (SkPaint::kMiter_Join == paint.getStrokeJoin() &&
+ paint.getStrokeMiter() < SK_ScalarSqrt2)
+ ))
+ ;
+}
+
+HRESULT SkXPSDevice::createXpsRect(const SkRect& rect, BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsRect) {
+ const SkPoint points[4] = {
+ { rect.fLeft, rect.fTop },
+ { rect.fRight, rect.fTop },
+ { rect.fRight, rect.fBottom },
+ { rect.fLeft, rect.fBottom },
+ };
+ return this->createXpsQuad(points, stroke, fill, xpsRect);
+}
+HRESULT SkXPSDevice::createXpsQuad(const SkPoint (&points)[4],
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsQuad) {
+ // Define the start point.
+ XPS_POINT startPoint = xps_point(points[0]);
+
+ // Create the figure.
+ HRM(this->fXpsFactory->CreateGeometryFigure(&startPoint, xpsQuad),
+ "Could not create quad geometry figure.");
+
+ // Define the type of each segment.
+ XPS_SEGMENT_TYPE segmentTypes[3] = {
+ XPS_SEGMENT_TYPE_LINE,
+ XPS_SEGMENT_TYPE_LINE,
+ XPS_SEGMENT_TYPE_LINE,
+ };
+
+ // Define the x and y coordinates of each corner of the figure.
+ FLOAT segmentData[6] = {
+ SkScalarToFLOAT(points[1].fX), SkScalarToFLOAT(points[1].fY),
+ SkScalarToFLOAT(points[2].fX), SkScalarToFLOAT(points[2].fY),
+ SkScalarToFLOAT(points[3].fX), SkScalarToFLOAT(points[3].fY),
+ };
+
+ // Describe if the segments are stroked.
+ BOOL segmentStrokes[3] = {
+ stroke, stroke, stroke,
+ };
+
+ // Add the segment data to the figure.
+ HRM((*xpsQuad)->SetSegments(
+ 3, 6,
+ segmentTypes , segmentData, segmentStrokes),
+ "Could not add segment data to quad.");
+
+ // Set the closed and filled properties of the figure.
+ HRM((*xpsQuad)->SetIsClosed(stroke), "Could not set quad close.");
+ HRM((*xpsQuad)->SetIsFilled(fill), "Could not set quad fill.");
+
+ return S_OK;
+}
+
+void SkXPSDevice::drawPoints(SkCanvas::PointMode mode,
+ size_t count, const SkPoint points[],
+ const SkPaint& paint) {
+ //TODO
+}
+
+void SkXPSDevice::drawVertices(const SkVertices* v, const SkVertices::Bone bones[], int boneCount,
+ SkBlendMode blendMode, const SkPaint& paint) {
+ //TODO
+}
+
+void SkXPSDevice::drawPaint(const SkPaint& origPaint) {
+ const SkRect r = SkRect::MakeSize(this->fCurrentCanvasSize);
+
+ //If trying to paint with a stroke, ignore that and fill.
+ SkPaint* fillPaint = const_cast<SkPaint*>(&origPaint);
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (paint->getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+
+ this->internalDrawRect(r, false, *fillPaint);
+}
+
+void SkXPSDevice::drawRect(const SkRect& r,
+ const SkPaint& paint) {
+ this->internalDrawRect(r, true, paint);
+}
+
+void SkXPSDevice::drawRRect(const SkRRect& rr,
+ const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(rr);
+ this->drawPath(path, paint, true);
+}
+
+static SkIRect size(const SkBaseDevice& dev) { return {0, 0, dev.width(), dev.height()}; }
+
+void SkXPSDevice::internalDrawRect(const SkRect& r,
+ bool transformRect,
+ const SkPaint& paint) {
+ //Exit early if there is nothing to draw.
+ if (this->cs().isEmpty(size(*this)) ||
+ (paint.getAlpha() == 0 && paint.isSrcOver())) {
+ return;
+ }
+
+ //Path the rect if we can't optimize it.
+ if (rect_must_be_pathed(paint, this->ctm())) {
+ SkPath tmp;
+ tmp.addRect(r);
+ tmp.setFillType(SkPath::kWinding_FillType);
+ this->drawPath(tmp, paint, true);
+ return;
+ }
+
+ //Create the shaded path.
+ SkTScopedComPtr<IXpsOMPath> shadedPath;
+ HRVM(this->fXpsFactory->CreatePath(&shadedPath),
+ "Could not create shaded path for rect.");
+
+ //Create the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRVM(this->fXpsFactory->CreateGeometry(&shadedGeometry),
+ "Could not create shaded geometry for rect.");
+
+ //Add the geometry to the shaded path.
+ HRVM(shadedPath->SetGeometryLocal(shadedGeometry.get()),
+ "Could not set shaded geometry for rect.");
+
+ //Set the brushes.
+ BOOL fill = FALSE;
+ BOOL stroke = FALSE;
+ HRV(this->shadePath(shadedPath.get(), paint, this->ctm(), &fill, &stroke));
+
+ bool xpsTransformsPath = true;
+ //Transform the geometry.
+ if (transformRect && xpsTransformsPath) {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ HRV(this->createXpsTransform(this->ctm(), &xpsTransform));
+ if (xpsTransform.get()) {
+ HRVM(shadedGeometry->SetTransformLocal(xpsTransform.get()),
+ "Could not set transform for rect.");
+ } else {
+ xpsTransformsPath = false;
+ }
+ }
+
+ //Create the figure.
+ SkTScopedComPtr<IXpsOMGeometryFigure> rectFigure;
+ {
+ SkPoint points[4] = {
+ { r.fLeft, r.fTop },
+ { r.fLeft, r.fBottom },
+ { r.fRight, r.fBottom },
+ { r.fRight, r.fTop },
+ };
+ if (!xpsTransformsPath && transformRect) {
+ this->ctm().mapPoints(points, SK_ARRAY_COUNT(points));
+ }
+ HRV(this->createXpsQuad(points, stroke, fill, &rectFigure));
+ }
+
+ //Get the figures of the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRVM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get shaded figures for rect.");
+
+ //Add the figure to the shaded geometry figures.
+ HRVM(shadedFigures->Append(rectFigure.get()),
+ "Could not add shaded figure for rect.");
+
+ HRV(this->clip(shadedPath.get()));
+
+ //Add the shaded path to the current visuals.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for rect.");
+ HRVM(currentVisuals->Append(shadedPath.get()),
+ "Could not add rect to current visuals.");
+}
+
+static HRESULT close_figure(const SkTDArray<XPS_SEGMENT_TYPE>& segmentTypes,
+ const SkTDArray<BOOL>& segmentStrokes,
+ const SkTDArray<FLOAT>& segmentData,
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure* figure,
+ IXpsOMGeometryFigureCollection* figures) {
+ // Add the segment data to the figure.
+ HRM(figure->SetSegments(segmentTypes.count(), segmentData.count(),
+ segmentTypes.begin() , segmentData.begin(),
+ segmentStrokes.begin()),
+ "Could not set path segments.");
+
+ // Set the closed and filled properties of the figure.
+ HRM(figure->SetIsClosed(stroke), "Could not set path closed.");
+ HRM(figure->SetIsFilled(fill), "Could not set path fill.");
+
+ // Add the figure created above to this geometry.
+ HRM(figures->Append(figure), "Could not add path to geometry.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::addXpsPathGeometry(
+ IXpsOMGeometryFigureCollection* xpsFigures,
+ BOOL stroke, BOOL fill, const SkPath& path) {
+ SkTDArray<XPS_SEGMENT_TYPE> segmentTypes;
+ SkTDArray<BOOL> segmentStrokes;
+ SkTDArray<FLOAT> segmentData;
+
+ SkTScopedComPtr<IXpsOMGeometryFigure> xpsFigure;
+ SkPath::Iter iter(path, true);
+ SkPoint points[4];
+ SkPath::Verb verb;
+ while ((verb = iter.next(points)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb: {
+ if (xpsFigure.get()) {
+ HR(close_figure(segmentTypes, segmentStrokes, segmentData,
+ stroke, fill,
+ xpsFigure.get() , xpsFigures));
+ xpsFigure.reset();
+ segmentTypes.rewind();
+ segmentStrokes.rewind();
+ segmentData.rewind();
+ }
+ // Define the start point.
+ XPS_POINT startPoint = xps_point(points[0]);
+ // Create the figure.
+ HRM(this->fXpsFactory->CreateGeometryFigure(&startPoint,
+ &xpsFigure),
+ "Could not create path geometry figure.");
+ break;
+ }
+ case SkPath::kLine_Verb:
+ if (iter.isCloseLine()) break; //ignore the line, auto-closed
+ segmentTypes.push_back(XPS_SEGMENT_TYPE_LINE);
+ segmentStrokes.push_back(stroke);
+ segmentData.push_back(SkScalarToFLOAT(points[1].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[1].fY));
+ break;
+ case SkPath::kQuad_Verb:
+ segmentTypes.push_back(XPS_SEGMENT_TYPE_QUADRATIC_BEZIER);
+ segmentStrokes.push_back(stroke);
+ segmentData.push_back(SkScalarToFLOAT(points[1].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[1].fY));
+ segmentData.push_back(SkScalarToFLOAT(points[2].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[2].fY));
+ break;
+ case SkPath::kCubic_Verb:
+ segmentTypes.push_back(XPS_SEGMENT_TYPE_BEZIER);
+ segmentStrokes.push_back(stroke);
+ segmentData.push_back(SkScalarToFLOAT(points[1].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[1].fY));
+ segmentData.push_back(SkScalarToFLOAT(points[2].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[2].fY));
+ segmentData.push_back(SkScalarToFLOAT(points[3].fX));
+ segmentData.push_back(SkScalarToFLOAT(points[3].fY));
+ break;
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 4;
+ SkAutoConicToQuads converter;
+ const SkPoint* quads =
+ converter.computeQuads(points, iter.conicWeight(), tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ segmentTypes.push_back(XPS_SEGMENT_TYPE_QUADRATIC_BEZIER);
+ segmentStrokes.push_back(stroke);
+ segmentData.push_back(SkScalarToFLOAT(quads[2 * i + 1].fX));
+ segmentData.push_back(SkScalarToFLOAT(quads[2 * i + 1].fY));
+ segmentData.push_back(SkScalarToFLOAT(quads[2 * i + 2].fX));
+ segmentData.push_back(SkScalarToFLOAT(quads[2 * i + 2].fY));
+ }
+ break;
+ }
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ if (xpsFigure.get()) {
+ HR(close_figure(segmentTypes, segmentStrokes, segmentData,
+ stroke, fill,
+ xpsFigure.get(), xpsFigures));
+ }
+ return S_OK;
+}
+
+void SkXPSDevice::convertToPpm(const SkMaskFilter* filter,
+ SkMatrix* matrix,
+ SkVector* ppuScale,
+ const SkIRect& clip, SkIRect* clipIRect) {
+ //This action is in unit space, but the ppm is specified in physical space.
+ ppuScale->set(fCurrentPixelsPerMeter.fX / fCurrentUnitsPerMeter.fX,
+ fCurrentPixelsPerMeter.fY / fCurrentUnitsPerMeter.fY);
+
+ matrix->postScale(ppuScale->fX, ppuScale->fY);
+
+ const SkIRect& irect = clip;
+ SkRect clipRect = SkRect::MakeLTRB(SkIntToScalar(irect.fLeft) * ppuScale->fX,
+ SkIntToScalar(irect.fTop) * ppuScale->fY,
+ SkIntToScalar(irect.fRight) * ppuScale->fX,
+ SkIntToScalar(irect.fBottom) * ppuScale->fY);
+ clipRect.roundOut(clipIRect);
+}
+
+HRESULT SkXPSDevice::applyMask(const SkMask& mask,
+ const SkVector& ppuScale,
+ IXpsOMPath* shadedPath) {
+ //Get the geometry object.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRM(shadedPath->GetGeometry(&shadedGeometry),
+ "Could not get mask shaded geometry.");
+
+ //Get the figures from the geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get mask shaded figures.");
+
+ SkMatrix m;
+ m.reset();
+ m.setTranslate(SkIntToScalar(mask.fBounds.fLeft),
+ SkIntToScalar(mask.fBounds.fTop));
+ m.postScale(SkScalarInvert(ppuScale.fX), SkScalarInvert(ppuScale.fY));
+
+ SkTileMode xy[2];
+ xy[0] = (SkTileMode)3;
+ xy[1] = (SkTileMode)3;
+
+ SkBitmap bm;
+ bm.installMaskPixels(mask);
+
+ SkTScopedComPtr<IXpsOMTileBrush> maskBrush;
+ HR(this->createXpsImageBrush(bm, m, xy, 0xFF, &maskBrush));
+ HRM(shadedPath->SetOpacityMaskBrushLocal(maskBrush.get()),
+ "Could not set mask.");
+
+ const SkRect universeRect = SkRect::MakeLTRB(0, 0,
+ this->fCurrentCanvasSize.fWidth, this->fCurrentCanvasSize.fHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> shadedFigure;
+ HRM(this->createXpsRect(universeRect, FALSE, TRUE, &shadedFigure),
+ "Could not create mask shaded figure.");
+ HRM(shadedFigures->Append(shadedFigure.get()),
+ "Could not add mask shaded figure.");
+
+ HR(this->clip(shadedPath));
+
+ //Add the path to the active visual collection.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get mask current visuals.");
+ HRM(currentVisuals->Append(shadedPath),
+ "Could not add masked shaded path to current visuals.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::shadePath(IXpsOMPath* shadedPath,
+ const SkPaint& shaderPaint,
+ const SkMatrix& matrix,
+ BOOL* fill, BOOL* stroke) {
+ *fill = FALSE;
+ *stroke = FALSE;
+
+ const SkPaint::Style style = shaderPaint.getStyle();
+ const bool hasFill = SkPaint::kFill_Style == style
+ || SkPaint::kStrokeAndFill_Style == style;
+ const bool hasStroke = SkPaint::kStroke_Style == style
+ || SkPaint::kStrokeAndFill_Style == style;
+
+ //TODO(bungeman): use dictionaries and lookups.
+ if (hasFill) {
+ *fill = TRUE;
+ SkTScopedComPtr<IXpsOMBrush> fillBrush;
+ HR(this->createXpsBrush(shaderPaint, &fillBrush, &matrix));
+ HRM(shadedPath->SetFillBrushLocal(fillBrush.get()),
+ "Could not set fill for shaded path.");
+ }
+
+ if (hasStroke) {
+ *stroke = TRUE;
+ SkTScopedComPtr<IXpsOMBrush> strokeBrush;
+ HR(this->createXpsBrush(shaderPaint, &strokeBrush, &matrix));
+ HRM(shadedPath->SetStrokeBrushLocal(strokeBrush.get()),
+ "Could not set stroke brush for shaded path.");
+ HRM(shadedPath->SetStrokeThickness(
+ SkScalarToFLOAT(shaderPaint.getStrokeWidth())),
+ "Could not set shaded path stroke thickness.");
+
+ if (0 == shaderPaint.getStrokeWidth()) {
+ //XPS hair width is a hack. (XPS Spec 11.6.12).
+ SkTScopedComPtr<IXpsOMDashCollection> dashes;
+ HRM(shadedPath->GetStrokeDashes(&dashes),
+ "Could not set dashes for shaded path.");
+ XPS_DASH dash;
+ dash.length = 1.0;
+ dash.gap = 0.0;
+ HRM(dashes->Append(&dash), "Could not add dashes to shaded path.");
+ HRM(shadedPath->SetStrokeDashOffset(-2.0),
+ "Could not set dash offset for shaded path.");
+ }
+ }
+ return S_OK;
+}
+
+void SkXPSDevice::drawPath(const SkPath& platonicPath,
+ const SkPaint& origPaint,
+ bool pathIsMutable) {
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ // nothing to draw
+ if (this->cs().isEmpty(size(*this)) ||
+ (paint->getAlpha() == 0 && paint->isSrcOver())) {
+ return;
+ }
+
+ SkPath modifiedPath;
+ const bool paintHasPathEffect = paint->getPathEffect()
+ || paint->getStyle() != SkPaint::kFill_Style;
+
+ //Apply pre-path matrix [Platonic-path -> Skeletal-path].
+ SkMatrix matrix = this->ctm();
+ SkPath* skeletalPath = const_cast<SkPath*>(&platonicPath);
+
+ //Apply path effect [Skeletal-path -> Fillable-path].
+ SkPath* fillablePath = skeletalPath;
+ if (paintHasPathEffect) {
+ if (!pathIsMutable) {
+ fillablePath = &modifiedPath;
+ pathIsMutable = true;
+ }
+ bool fill = paint->getFillPath(*skeletalPath, fillablePath);
+
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setPathEffect(nullptr);
+ if (fill) {
+ writablePaint->setStyle(SkPaint::kFill_Style);
+ } else {
+ writablePaint->setStyle(SkPaint::kStroke_Style);
+ writablePaint->setStrokeWidth(0);
+ }
+ }
+
+ //Create the shaded path. This will be the path which is painted.
+ SkTScopedComPtr<IXpsOMPath> shadedPath;
+ HRVM(this->fXpsFactory->CreatePath(&shadedPath),
+ "Could not create shaded path for path.");
+
+ //Create the geometry for the shaded path.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRVM(this->fXpsFactory->CreateGeometry(&shadedGeometry),
+ "Could not create shaded geometry for path.");
+
+ //Add the geometry to the shaded path.
+ HRVM(shadedPath->SetGeometryLocal(shadedGeometry.get()),
+ "Could not add the shaded geometry to shaded path.");
+
+ SkMaskFilter* filter = paint->getMaskFilter();
+
+ //Determine if we will draw or shade and mask.
+ if (filter) {
+ if (paint->getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+ }
+
+ //Set the brushes.
+ BOOL fill;
+ BOOL stroke;
+ HRV(this->shadePath(shadedPath.get(),
+ *paint,
+ this->ctm(),
+ &fill,
+ &stroke));
+
+ //Mask filter
+ if (filter) {
+ SkIRect clipIRect;
+ SkVector ppuScale;
+ this->convertToPpm(filter,
+ &matrix,
+ &ppuScale,
+ this->cs().bounds(size(*this)).roundOut(),
+ &clipIRect);
+
+ //[Fillable-path -> Pixel-path]
+ SkPath* pixelPath = pathIsMutable ? fillablePath : &modifiedPath;
+ fillablePath->transform(matrix, pixelPath);
+
+ SkMask* mask = nullptr;
+
+ SkASSERT(SkPaint::kFill_Style == paint->getStyle() ||
+ (SkPaint::kStroke_Style == paint->getStyle() && 0 == paint->getStrokeWidth()));
+ SkStrokeRec::InitStyle style = (SkPaint::kFill_Style == paint->getStyle())
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ //[Pixel-path -> Mask]
+ SkMask rasteredMask;
+ if (SkDraw::DrawToMask(
+ *pixelPath,
+ &clipIRect,
+ filter, //just to compute how much to draw.
+ &matrix,
+ &rasteredMask,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ style)) {
+
+ SkAutoMaskFreeImage rasteredAmi(rasteredMask.fImage);
+ mask = &rasteredMask;
+
+ //[Mask -> Mask]
+ SkMask filteredMask;
+ if (as_MFB(filter)->filterMask(&filteredMask, rasteredMask, matrix, nullptr)) {
+ mask = &filteredMask;
+ }
+ SkAutoMaskFreeImage filteredAmi(filteredMask.fImage);
+
+ //Draw mask.
+ HRV(this->applyMask(*mask, ppuScale, shadedPath.get()));
+ }
+ return;
+ }
+
+ //Get the figures from the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRVM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get shaded figures for shaded path.");
+
+ bool xpsTransformsPath = true;
+
+ //Set the fill rule.
+ SkPath* xpsCompatiblePath = fillablePath;
+ XPS_FILL_RULE xpsFillRule;
+ switch (fillablePath->getFillType()) {
+ case SkPath::kWinding_FillType:
+ xpsFillRule = XPS_FILL_RULE_NONZERO;
+ break;
+ case SkPath::kEvenOdd_FillType:
+ xpsFillRule = XPS_FILL_RULE_EVENODD;
+ break;
+ case SkPath::kInverseWinding_FillType: {
+ //[Fillable-path (inverse winding) -> XPS-path (inverse even odd)]
+ if (!pathIsMutable) {
+ xpsCompatiblePath = &modifiedPath;
+ pathIsMutable = true;
+ }
+ if (!Simplify(*fillablePath, xpsCompatiblePath)) {
+ SkDEBUGF("Could not simplify inverse winding path.");
+ return;
+ }
+ }
+ // The xpsCompatiblePath is now inverse even odd, so fall through.
+ case SkPath::kInverseEvenOdd_FillType: {
+ const SkRect universe = SkRect::MakeLTRB(
+ 0, 0,
+ this->fCurrentCanvasSize.fWidth,
+ this->fCurrentCanvasSize.fHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> addOneFigure;
+ HRV(this->createXpsRect(universe, FALSE, TRUE, &addOneFigure));
+ HRVM(shadedFigures->Append(addOneFigure.get()),
+ "Could not add even-odd flip figure to shaded path.");
+ xpsTransformsPath = false;
+ xpsFillRule = XPS_FILL_RULE_EVENODD;
+ break;
+ }
+ default:
+ SkDEBUGFAIL("Unknown SkPath::FillType.");
+ }
+ HRVM(shadedGeometry->SetFillRule(xpsFillRule),
+ "Could not set fill rule for shaded path.");
+
+ //Create the XPS transform, if possible.
+ if (xpsTransformsPath) {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ HRV(this->createXpsTransform(matrix, &xpsTransform));
+
+ if (xpsTransform.get()) {
+ HRVM(shadedGeometry->SetTransformLocal(xpsTransform.get()),
+ "Could not set transform on shaded path.");
+ } else {
+ xpsTransformsPath = false;
+ }
+ }
+
+ SkPath* devicePath = xpsCompatiblePath;
+ if (!xpsTransformsPath) {
+ //[Fillable-path -> Device-path]
+ devicePath = pathIsMutable ? xpsCompatiblePath : &modifiedPath;
+ xpsCompatiblePath->transform(matrix, devicePath);
+ }
+ HRV(this->addXpsPathGeometry(shadedFigures.get(),
+ stroke, fill, *devicePath));
+
+ HRV(this->clip(shadedPath.get()));
+
+ //Add the path to the active visual collection.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for shaded path.");
+ HRVM(currentVisuals->Append(shadedPath.get()),
+ "Could not add shaded path to current visuals.");
+}
+
+HRESULT SkXPSDevice::clip(IXpsOMVisual* xpsVisual) {
+ if (this->cs().isWideOpen()) {
+ return S_OK;
+ }
+ SkPath clipPath;
+ // clipPath.addRect(this->cs().bounds(size(*this)));
+ (void)this->cs().asPath(&clipPath);
+ // TODO: handle all the kinds of paths, like drawPath does
+ return this->clipToPath(xpsVisual, clipPath, XPS_FILL_RULE_EVENODD);
+}
+HRESULT SkXPSDevice::clipToPath(IXpsOMVisual* xpsVisual,
+ const SkPath& clipPath,
+ XPS_FILL_RULE fillRule) {
+ //Create the geometry.
+ SkTScopedComPtr<IXpsOMGeometry> clipGeometry;
+ HRM(this->fXpsFactory->CreateGeometry(&clipGeometry),
+ "Could not create clip geometry.");
+
+ //Get the figure collection of the geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> clipFigures;
+ HRM(clipGeometry->GetFigures(&clipFigures),
+ "Could not get the clip figures.");
+
+ //Create the figures into the geometry.
+ HR(this->addXpsPathGeometry(
+ clipFigures.get(),
+ FALSE, TRUE, clipPath));
+
+ HRM(clipGeometry->SetFillRule(fillRule),
+ "Could not set fill rule.");
+ HRM(xpsVisual->SetClipGeometryLocal(clipGeometry.get()),
+ "Could not set clip geometry.");
+
+ return S_OK;
+}
+
+void SkXPSDevice::drawSprite(const SkBitmap& bitmap, int x, int y, const SkPaint& paint) {
+ //TODO: override this for XPS
+ SkDEBUGF("XPS drawSprite not yet implemented.");
+}
+
+HRESULT SkXPSDevice::CreateTypefaceUse(const SkFont& font,
+ TypefaceUse** typefaceUse) {
+ SkAutoResolveDefaultTypeface typeface(font.getTypeface());
+
+ //Check cache.
+ const SkFontID typefaceID = typeface->uniqueID();
+ for (TypefaceUse& current : this->fTypefaces) {
+ if (current.typefaceId == typefaceID) {
+ *typefaceUse = &current;
+ return S_OK;
+ }
+ }
+
+ //TODO: create glyph only fonts
+ //and let the host deal with what kind of font we're looking at.
+ XPS_FONT_EMBEDDING embedding = XPS_FONT_EMBEDDING_RESTRICTED;
+
+ SkTScopedComPtr<IStream> fontStream;
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> fontData = typeface->openStream(&ttcIndex);
+ if (!fontData) {
+ return E_NOTIMPL;
+ }
+ //TODO: cannot handle FON fonts.
+ HRM(SkIStream::CreateFromSkStream(fontData->duplicate(), &fontStream),
+ "Could not create font stream.");
+
+ const size_t size =
+ SK_ARRAY_COUNT(L"/Resources/Fonts/" L_GUID_ID L".odttf");
+ wchar_t buffer[size];
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Resources/Fonts/%s.odttf", id);
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create font resource part uri.");
+
+ SkTScopedComPtr<IXpsOMFontResource> xpsFontResource;
+ HRM(this->fXpsFactory->CreateFontResource(fontStream.get(),
+ embedding,
+ partUri.get(),
+ FALSE,
+ &xpsFontResource),
+ "Could not create font resource.");
+
+ //TODO: change openStream to return -1 for non-ttc, get rid of this.
+ uint8_t* data = (uint8_t*)fontData->getMemoryBase();
+ bool isTTC = (data &&
+ fontData->getLength() >= sizeof(SkTTCFHeader) &&
+ ((SkTTCFHeader*)data)->ttcTag == SkTTCFHeader::TAG);
+
+ int glyphCount = typeface->countGlyphs();
+
+ TypefaceUse& newTypefaceUse = this->fTypefaces.emplace_back(
+ typefaceID,
+ isTTC ? ttcIndex : -1,
+ std::move(fontData),
+ std::move(xpsFontResource),
+ glyphCount);
+
+ *typefaceUse = &newTypefaceUse;
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::AddGlyphs(IXpsOMObjectFactory* xpsFactory,
+ IXpsOMCanvas* canvas,
+ const TypefaceUse* font,
+ LPCWSTR text,
+ XPS_GLYPH_INDEX* xpsGlyphs,
+ UINT32 xpsGlyphsLen,
+ XPS_POINT *origin,
+ FLOAT fontSize,
+ XPS_STYLE_SIMULATION sims,
+ const SkMatrix& transform,
+ const SkPaint& paint) {
+ SkTScopedComPtr<IXpsOMGlyphs> glyphs;
+ HRM(xpsFactory->CreateGlyphs(font->xpsFont.get(), &glyphs), "Could not create glyphs.");
+ HRM(glyphs->SetFontFaceIndex(font->ttcIndex), "Could not set glyph font face index.");
+
+ //XPS uses affine transformations for everything...
+ //...except positioning text.
+ bool useCanvasForClip;
+ if (transform.isTranslate()) {
+ origin->x += SkScalarToFLOAT(transform.getTranslateX());
+ origin->y += SkScalarToFLOAT(transform.getTranslateY());
+ useCanvasForClip = false;
+ } else {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(transform, &xpsMatrixToUse));
+ if (xpsMatrixToUse.get()) {
+ HRM(glyphs->SetTransformLocal(xpsMatrixToUse.get()),
+ "Could not set transform matrix.");
+ useCanvasForClip = true;
+ } else {
+ SkDEBUGFAIL("Attempt to add glyphs in perspective.");
+ useCanvasForClip = false;
+ }
+ }
+
+ SkTScopedComPtr<IXpsOMGlyphsEditor> glyphsEditor;
+ HRM(glyphs->GetGlyphsEditor(&glyphsEditor), "Could not get glyph editor.");
+
+ if (text) {
+ HRM(glyphsEditor->SetUnicodeString(text),
+ "Could not set unicode string.");
+ }
+
+ if (xpsGlyphs) {
+ HRM(glyphsEditor->SetGlyphIndices(xpsGlyphsLen, xpsGlyphs),
+ "Could not set glyphs.");
+ }
+
+ HRM(glyphsEditor->ApplyEdits(), "Could not apply glyph edits.");
+
+ SkTScopedComPtr<IXpsOMBrush> xpsFillBrush;
+ HR(this->createXpsBrush(
+ paint,
+ &xpsFillBrush,
+ useCanvasForClip ? nullptr : &transform));
+
+ HRM(glyphs->SetFillBrushLocal(xpsFillBrush.get()),
+ "Could not set fill brush.");
+
+ HRM(glyphs->SetOrigin(origin), "Could not set glyph origin.");
+
+ HRM(glyphs->SetFontRenderingEmSize(fontSize),
+ "Could not set font size.");
+
+ HRM(glyphs->SetStyleSimulations(sims),
+ "Could not set style simulations.");
+
+ SkTScopedComPtr<IXpsOMVisualCollection> visuals;
+ HRM(canvas->GetVisuals(&visuals), "Could not get glyph canvas visuals.");
+
+ if (!useCanvasForClip) {
+ HR(this->clip(glyphs.get()));
+ HRM(visuals->Append(glyphs.get()), "Could not add glyphs to canvas.");
+ } else {
+ SkTScopedComPtr<IXpsOMCanvas> glyphCanvas;
+ HRM(this->fXpsFactory->CreateCanvas(&glyphCanvas),
+ "Could not create glyph canvas.");
+
+ SkTScopedComPtr<IXpsOMVisualCollection> glyphCanvasVisuals;
+ HRM(glyphCanvas->GetVisuals(&glyphCanvasVisuals),
+ "Could not get glyph visuals collection.");
+
+ HRM(glyphCanvasVisuals->Append(glyphs.get()),
+ "Could not add glyphs to page.");
+ HR(this->clip(glyphCanvas.get()));
+
+ HRM(visuals->Append(glyphCanvas.get()),
+ "Could not add glyph canvas to page.");
+ }
+
+ return S_OK;
+}
+
+static bool text_must_be_pathed(const SkPaint& paint, const SkMatrix& matrix) {
+ const SkPaint::Style style = paint.getStyle();
+ return matrix.hasPerspective()
+ || SkPaint::kStroke_Style == style
+ || SkPaint::kStrokeAndFill_Style == style
+ || paint.getMaskFilter()
+ ;
+}
+
+void SkXPSDevice::drawGlyphRunList(const SkGlyphRunList& glyphRunList) {
+
+ const SkPaint& paint = glyphRunList.paint();
+ for (const auto& run : glyphRunList) {
+ const SkGlyphID* glyphIDs = run.glyphsIDs().data();
+ size_t glyphCount = run.glyphsIDs().size();
+ const SkFont& font = run.font();
+
+ if (!glyphCount || !glyphIDs || font.getSize() <= 0) {
+ continue;
+ }
+
+ TypefaceUse* typeface;
+ if (FAILED(CreateTypefaceUse(font, &typeface)) || text_must_be_pathed(paint, this->ctm())) {
+ SkPath path;
+ //TODO: make this work, Draw currently does not handle as well.
+ //paint.getTextPath(text, byteLength, x, y, &path);
+ //this->drawPath(path, paint, nullptr, true);
+ //TODO: add automation "text"
+ continue;
+ }
+
+ //TODO: handle font scale and skew in x (text_scale_skew)
+
+ // Advance width and offsets for glyphs measured in hundredths of the font em size
+ // (XPS Spec 5.1.3).
+ FLOAT centemPerUnit = 100.0f / SkScalarToFLOAT(font.getSize());
+ SkAutoSTMalloc<32, XPS_GLYPH_INDEX> xpsGlyphs(glyphCount);
+
+ for (size_t i = 0; i < glyphCount; ++i) {
+ const SkPoint& position = run.positions()[i];
+ XPS_GLYPH_INDEX& xpsGlyph = xpsGlyphs[i];
+ xpsGlyph.index = glyphIDs[i];
+ xpsGlyph.advanceWidth = 0.0f;
+ xpsGlyph.horizontalOffset = (SkScalarToFloat(position.fX) * centemPerUnit);
+ xpsGlyph.verticalOffset = (SkScalarToFloat(position.fY) * -centemPerUnit);
+ typeface->glyphsUsed.set(xpsGlyph.index);
+ }
+
+ XPS_POINT origin = {
+ glyphRunList.origin().x(),
+ glyphRunList.origin().y(),
+ };
+
+ HRV(AddGlyphs(this->fXpsFactory.get(),
+ this->fCurrentXpsCanvas.get(),
+ typeface,
+ nullptr,
+ xpsGlyphs.get(), glyphCount,
+ &origin,
+ SkScalarToFLOAT(font.getSize()),
+ XPS_STYLE_SIMULATION_NONE,
+ this->ctm(),
+ paint));
+ }
+}
+
+void SkXPSDevice::drawDevice( SkBaseDevice* dev,
+ int x, int y,
+ const SkPaint&) {
+ SkXPSDevice* that = static_cast<SkXPSDevice*>(dev);
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ // TODO(halcanary): assert that current transform is identity rather than calling setter.
+ XPS_MATRIX rawTransform = {1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f};
+ HRVM(this->fXpsFactory->CreateMatrixTransform(&rawTransform, &xpsTransform),
+ "Could not create layer transform.");
+ HRVM(that->fCurrentXpsCanvas->SetTransformLocal(xpsTransform.get()),
+ "Could not set layer transform.");
+
+ //Get the current visual collection and add the layer to it.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for layer.");
+ HRVM(currentVisuals->Append(that->fCurrentXpsCanvas.get()),
+ "Could not add layer to current visuals.");
+}
+
+SkBaseDevice* SkXPSDevice::onCreateDevice(const CreateInfo& info, const SkPaint*) {
+//Conditional for bug compatibility with PDF device.
+#if 0
+ if (SkBaseDevice::kGeneral_Usage == info.fUsage) {
+ return nullptr;
+ //To what stream do we write?
+ //SkXPSDevice* dev = new SkXPSDevice(this);
+ //SkSize s = SkSize::Make(width, height);
+ //dev->BeginCanvas(s, s, SkMatrix::I());
+ //return dev;
+ }
+#endif
+ SkXPSDevice* dev = new SkXPSDevice(info.fInfo.dimensions());
+ // TODO(halcanary) implement copy constructor on SkTScopedCOmPtr
+ dev->fXpsFactory.reset(SkRefComPtr(fXpsFactory.get()));
+ SkAssertResult(dev->createCanvasForLayer());
+ return dev;
+}
+
+void SkXPSDevice::drawOval( const SkRect& o, const SkPaint& p) {
+ SkPath path;
+ path.addOval(o);
+ this->drawPath(path, p, true);
+}
+
+void SkXPSDevice::drawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkRect bitmapBounds = SkRect::Make(bitmap.bounds());
+ SkRect srcBounds = src ? *src : bitmapBounds;
+ SkMatrix matrix = SkMatrix::MakeRectToRect(srcBounds, dst, SkMatrix::kFill_ScaleToFit);
+ SkRect actualDst;
+ if (!src || bitmapBounds.contains(*src)) {
+ actualDst = dst;
+ } else {
+ if (!srcBounds.intersect(bitmapBounds)) {
+ return;
+ }
+ matrix.mapRect(&actualDst, srcBounds);
+ }
+ auto bitmapShader = SkMakeBitmapShaderForPaint(paint, bitmap, SkTileMode::kClamp,
+ SkTileMode::kClamp, &matrix,
+ kNever_SkCopyPixelsMode);
+ SkASSERT(bitmapShader);
+ if (!bitmapShader) { return; }
+ SkPaint paintWithShader(paint);
+ paintWithShader.setStyle(SkPaint::kFill_Style);
+ paintWithShader.setShader(std::move(bitmapShader));
+ this->drawRect(actualDst, paintWithShader);
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/xps/SkXPSDevice.h b/gfx/skia/skia/src/xps/SkXPSDevice.h
new file mode 100644
index 0000000000..f37bb2bcaf
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkXPSDevice.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXPSDevice_DEFINED
+#define SkXPSDevice_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include <ObjBase.h>
+#include <XpsObjectModel.h>
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkTArray.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkClipStackDevice.h"
+#include "src/utils/SkBitSet.h"
+#include "src/utils/win/SkAutoCoInitialize.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+class SkGlyphRunList;
+
+//#define SK_XPS_USE_DETERMINISTIC_IDS
+
+/** \class SkXPSDevice
+
+ The drawing context for the XPS backend.
+*/
+class SkXPSDevice : public SkClipStackDevice {
+public:
+ SK_API SkXPSDevice(SkISize);
+ SK_API ~SkXPSDevice() override;
+
+ bool beginPortfolio(SkWStream* outputStream, IXpsOMObjectFactory*);
+ /**
+ @param unitsPerMeter converts geometry units into physical units.
+ @param pixelsPerMeter resolution to use when geometry must be rasterized.
+ @param trimSize final page size in physical units.
+ The top left of the trim is the origin of physical space.
+ @param mediaBox The size of the physical media in physical units.
+ The top and left must be less than zero.
+ The bottom and right must be greater than the trimSize.
+ The default is to coincide with the trimSize.
+ @param bleedBox The size of the bleed box in physical units.
+ Must be contained within the mediaBox.
+ The default is to coincide with the mediaBox.
+ @param artBox The size of the content box in physical units.
+ Must be contained within the trimSize.
+ The default is to coincide with the trimSize.
+ @param cropBox The size of the recommended view port in physical units.
+ Must be contained within the mediaBox.
+ The default is to coincide with the mediaBox.
+ */
+ bool beginSheet(
+ const SkVector& unitsPerMeter,
+ const SkVector& pixelsPerMeter,
+ const SkSize& trimSize,
+ const SkRect* mediaBox = NULL,
+ const SkRect* bleedBox = NULL,
+ const SkRect* artBox = NULL,
+ const SkRect* cropBox = NULL);
+
+ bool endSheet();
+ bool endPortfolio();
+
+protected:
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ void drawRect(const SkRect& r,
+ const SkPaint& paint) override;
+ void drawOval(const SkRect& oval,
+ const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr,
+ const SkPaint& paint) override;
+ void drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable = false) override;
+ void drawSprite(const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) override;
+ void drawBitmapRect(const SkBitmap&,
+ const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) override;
+ void drawGlyphRunList(const SkGlyphRunList& glyphRunList) override;
+ void drawVertices(const SkVertices*, const SkVertices::Bone bones[], int boneCount, SkBlendMode,
+ const SkPaint&) override;
+ void drawDevice(SkBaseDevice*, int x, int y,
+ const SkPaint&) override;
+
+private:
+ class TypefaceUse {
+ public:
+ TypefaceUse(SkFontID id, int index, std::unique_ptr<SkStream> data,
+ SkTScopedComPtr<IXpsOMFontResource> xps, size_t numGlyphs)
+ : typefaceId(id), ttcIndex(index), fontData(std::move(data))
+ , xpsFont(std::move(xps)), glyphsUsed(numGlyphs) {}
+ const SkFontID typefaceId;
+ const int ttcIndex;
+ const std::unique_ptr<SkStream> fontData;
+ const SkTScopedComPtr<IXpsOMFontResource> xpsFont;
+ SkBitSet glyphsUsed;
+ };
+ friend HRESULT subset_typeface(const TypefaceUse& current);
+
+ bool createCanvasForLayer();
+
+ SkTScopedComPtr<IXpsOMObjectFactory> fXpsFactory;
+ SkTScopedComPtr<IStream> fOutputStream;
+ SkTScopedComPtr<IXpsOMPackageWriter> fPackageWriter;
+
+ unsigned int fCurrentPage;
+ SkTScopedComPtr<IXpsOMCanvas> fCurrentXpsCanvas;
+ SkSize fCurrentCanvasSize;
+ SkVector fCurrentUnitsPerMeter;
+ SkVector fCurrentPixelsPerMeter;
+
+ SkTArray<TypefaceUse, true> fTypefaces;
+
+ /** Creates a GUID based id and places it into buffer.
+ buffer should have space for at least GUID_ID_LEN wide characters.
+ The string will always be wchar null terminated.
+ XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX0
+ The string may begin with a digit,
+ and so may not be suitable as a bare resource key.
+ */
+ HRESULT createId(wchar_t* buffer, size_t bufferSize, wchar_t sep = '-');
+#ifdef SK_XPS_USE_DETERMINISTIC_IDS
+ decltype(GUID::Data1) fNextId = 0;
+#endif
+
+ HRESULT initXpsDocumentWriter(IXpsOMImageResource* image);
+
+ HRESULT createXpsPage(
+ const XPS_SIZE& pageSize,
+ IXpsOMPage** page);
+
+ HRESULT createXpsThumbnail(
+ IXpsOMPage* page, const unsigned int pageNumber,
+ IXpsOMImageResource** image);
+
+ void internalDrawRect(
+ const SkRect& r,
+ bool transformRect,
+ const SkPaint& paint);
+
+ HRESULT createXpsBrush(
+ const SkPaint& skPaint,
+ IXpsOMBrush** xpsBrush,
+ const SkMatrix* parentTransform = NULL);
+
+ HRESULT createXpsSolidColorBrush(
+ const SkColor skColor, const SkAlpha alpha,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsImageBrush(
+ const SkBitmap& bitmap,
+ const SkMatrix& localMatrix,
+ const SkTileMode (&xy)[2],
+ const SkAlpha alpha,
+ IXpsOMTileBrush** xpsBrush);
+
+ HRESULT createXpsLinearGradient(
+ SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrixToUse,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsRadialGradient(
+ SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrixToUse,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsGradientStop(
+ const SkColor skColor,
+ const SkScalar offset,
+ IXpsOMGradientStop** xpsGradStop);
+
+ HRESULT createXpsTransform(
+ const SkMatrix& matrix,
+ IXpsOMMatrixTransform ** xpsTransform);
+
+ HRESULT createXpsRect(
+ const SkRect& rect,
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsRect);
+
+ HRESULT createXpsQuad(
+ const SkPoint (&points)[4],
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsQuad);
+
+ HRESULT CreateTypefaceUse(
+ const SkFont& font,
+ TypefaceUse** fontResource);
+
+ HRESULT AddGlyphs(
+ IXpsOMObjectFactory* xpsFactory,
+ IXpsOMCanvas* canvas,
+ const TypefaceUse* font,
+ LPCWSTR text,
+ XPS_GLYPH_INDEX* xpsGlyphs,
+ UINT32 xpsGlyphsLen,
+ XPS_POINT *origin,
+ FLOAT fontSize,
+ XPS_STYLE_SIMULATION sims,
+ const SkMatrix& transform,
+ const SkPaint& paint);
+
+ HRESULT addXpsPathGeometry(
+ IXpsOMGeometryFigureCollection* figures,
+ BOOL stroke, BOOL fill, const SkPath& path);
+
+ HRESULT createPath(
+ IXpsOMGeometryFigure* figure,
+ IXpsOMVisualCollection* visuals,
+ IXpsOMPath** path);
+
+ HRESULT sideOfClamp(
+ const SkRect& leftPoints, const XPS_RECT& left,
+ IXpsOMImageResource* imageResource,
+ IXpsOMVisualCollection* visuals);
+
+ HRESULT cornerOfClamp(
+ const SkRect& tlPoints,
+ const SkColor color,
+ IXpsOMVisualCollection* visuals);
+
+ HRESULT clip(IXpsOMVisual* xpsVisual);
+
+ HRESULT clipToPath(
+ IXpsOMVisual* xpsVisual,
+ const SkPath& clipPath,
+ XPS_FILL_RULE fillRule);
+
+ HRESULT drawInverseWindingPath(
+ const SkPath& devicePath,
+ IXpsOMPath* xpsPath);
+
+ HRESULT shadePath(
+ IXpsOMPath* shadedPath,
+ const SkPaint& shaderPaint,
+ const SkMatrix& matrix,
+ BOOL* fill, BOOL* stroke);
+
+ void convertToPpm(
+ const SkMaskFilter* filter,
+ SkMatrix* matrix,
+ SkVector* ppuScale,
+ const SkIRect& clip, SkIRect* clipIRect);
+
+ HRESULT applyMask(
+ const SkMask& mask,
+ const SkVector& ppuScale,
+ IXpsOMPath* shadedPath);
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ // Disable the default copy and assign implementation.
+ SkXPSDevice(const SkXPSDevice&);
+ void operator=(const SkXPSDevice&);
+
+ typedef SkClipStackDevice INHERITED;
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkXPSDevice_DEFINED
diff --git a/gfx/skia/skia/src/xps/SkXPSDocument.cpp b/gfx/skia/skia/src/xps/SkXPSDocument.cpp
new file mode 100644
index 0000000000..b7000bfca1
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkXPSDocument.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/docs/SkXPSDocument.h"
+
+#include "include/core/SkStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+#include "src/xps/SkXPSDevice.h"
+
+#include <XpsObjectModel.h>
+
+namespace {
+struct SkXPSDocument final : public SkDocument {
+ SkTScopedComPtr<IXpsOMObjectFactory> fXpsFactory;
+ SkXPSDevice fDevice;
+ std::unique_ptr<SkCanvas> fCanvas;
+ SkVector fUnitsPerMeter;
+ SkVector fPixelsPerMeter;
+
+ SkXPSDocument(SkWStream*, SkScalar dpi, SkTScopedComPtr<IXpsOMObjectFactory>);
+ ~SkXPSDocument() override;
+ SkCanvas* onBeginPage(SkScalar w, SkScalar h) override;
+ void onEndPage() override;
+ void onClose(SkWStream*) override;
+ void onAbort() override;
+};
+}
+
+SkXPSDocument::SkXPSDocument(SkWStream* stream,
+ SkScalar dpi,
+ SkTScopedComPtr<IXpsOMObjectFactory> xpsFactory)
+ : SkDocument(stream)
+ , fXpsFactory(std::move(xpsFactory))
+ , fDevice(SkISize{10000, 10000})
+{
+ const SkScalar kPointsPerMeter = SkDoubleToScalar(360000.0 / 127.0);
+ fUnitsPerMeter.set(kPointsPerMeter, kPointsPerMeter);
+ SkScalar pixelsPerMeterScale = SkDoubleToScalar(dpi * 5000.0 / 127.0);
+ fPixelsPerMeter.set(pixelsPerMeterScale, pixelsPerMeterScale);
+ SkASSERT(fXpsFactory);
+ fDevice.beginPortfolio(stream, fXpsFactory.get());
+}
+
+SkXPSDocument::~SkXPSDocument() {
+ // subclasses must call close() in their destructors
+ this->close();
+}
+
+SkCanvas* SkXPSDocument::onBeginPage(SkScalar width, SkScalar height) {
+ fDevice.beginSheet(fUnitsPerMeter, fPixelsPerMeter, {width, height});
+ fCanvas.reset(new SkCanvas(sk_ref_sp(&fDevice)));
+ return fCanvas.get();
+}
+
+void SkXPSDocument::onEndPage() {
+ SkASSERT(fCanvas.get());
+ fCanvas.reset(nullptr);
+ fDevice.endSheet();
+}
+
+void SkXPSDocument::onClose(SkWStream*) {
+ SkASSERT(!fCanvas.get());
+ (void)fDevice.endPortfolio();
+}
+
+void SkXPSDocument::onAbort() {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDocument> SkXPS::MakeDocument(SkWStream* stream,
+ IXpsOMObjectFactory* factoryPtr,
+ SkScalar dpi) {
+ SkTScopedComPtr<IXpsOMObjectFactory> factory(SkSafeRefComPtr(factoryPtr));
+ return stream && factory
+ ? sk_make_sp<SkXPSDocument>(stream, dpi, std::move(factory))
+ : nullptr;
+}
+#endif // defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/third_party/skcms/LICENSE b/gfx/skia/skia/third_party/skcms/LICENSE
new file mode 100644
index 0000000000..6c7c5be360
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/LICENSE
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
diff --git a/gfx/skia/skia/third_party/skcms/README.chromium b/gfx/skia/skia/third_party/skcms/README.chromium
new file mode 100644
index 0000000000..046f6b1d19
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/README.chromium
@@ -0,0 +1,5 @@
+Name: skcms
+URL: https://skia.org/
+Version: unknown
+Security Critical: yes
+License: BSD
diff --git a/gfx/skia/skia/third_party/skcms/skcms.cc b/gfx/skia/skia/third_party/skcms/skcms.cc
new file mode 100644
index 0000000000..6b4d87b15d
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/skcms.cc
@@ -0,0 +1,2570 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "skcms.h"
+#include "skcms_internal.h"
+#include <assert.h>
+#include <float.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(__ARM_NEON)
+ #include <arm_neon.h>
+#elif defined(__SSE__)
+ #include <immintrin.h>
+
+ #if defined(__clang__)
+ // That #include <immintrin.h> is usually enough, but Clang's headers
+ // "helpfully" skip including the whole kitchen sink when _MSC_VER is
+ // defined, because lots of programs on Windows would include that and
+ // it'd be a lot slower. But we want all those headers included so we
+ // can use their features after runtime checks later.
+ #include <smmintrin.h>
+ #include <avxintrin.h>
+ #include <avx2intrin.h>
+ #include <avx512fintrin.h>
+ #include <avx512dqintrin.h>
+ #endif
+#endif
+
+// sizeof(x) will return size_t, which is 32-bit on some machines and 64-bit on others.
+// We have better testing on 64-bit machines, so force 32-bit machines to behave like 64-bit.
+//
+// Please do not use sizeof() directly, and size_t only when required.
+// (We have no way of enforcing these requests...)
+#define SAFE_SIZEOF(x) ((uint64_t)sizeof(x))
+
+// Same sort of thing for _Layout structs with a variable sized array at the end (named "variable").
+#define SAFE_FIXED_SIZE(type) ((uint64_t)offsetof(type, variable))
+
+static const union {
+ uint32_t bits;
+ float f;
+} inf_ = { 0x7f800000 };
+#define INFINITY_ inf_.f
+
+#if defined(__clang__) || defined(__GNUC__)
+ #define small_memcpy __builtin_memcpy
+#else
+ #define small_memcpy memcpy
+#endif
+
+static float log2f_(float x) {
+ // The first approximation of log2(x) is its exponent 'e', minus 127.
+ int32_t bits;
+ small_memcpy(&bits, &x, sizeof(bits));
+
+ float e = (float)bits * (1.0f / (1<<23));
+
+ // If we use the mantissa too we can refine the error signficantly.
+ int32_t m_bits = (bits & 0x007fffff) | 0x3f000000;
+ float m;
+ small_memcpy(&m, &m_bits, sizeof(m));
+
+ return (e - 124.225514990f
+ - 1.498030302f*m
+ - 1.725879990f/(0.3520887068f + m));
+}
+static float logf_(float x) {
+ const float ln2 = 0.69314718f;
+ return ln2*log2f_(x);
+}
+
+static float exp2f_(float x) {
+ float fract = x - floorf_(x);
+
+ float fbits = (1.0f * (1<<23)) * (x + 121.274057500f
+ - 1.490129070f*fract
+ + 27.728023300f/(4.84252568f - fract));
+
+ // Before we cast fbits to int32_t, check for out of range values to pacify UBSAN.
+ // INT_MAX is not exactly representable as a float, so exclude it as effectively infinite.
+ // INT_MIN is a power of 2 and exactly representable as a float, so it's fine.
+ if (fbits >= (float)INT_MAX) {
+ return INFINITY_;
+ } else if (fbits < (float)INT_MIN) {
+ return -INFINITY_;
+ }
+
+ int32_t bits = (int32_t)fbits;
+ small_memcpy(&x, &bits, sizeof(x));
+ return x;
+}
+
+// Not static, as it's used by some test tools.
+float powf_(float x, float y) {
+ assert (x >= 0);
+ return (x == 0) || (x == 1) ? x
+ : exp2f_(log2f_(x) * y);
+}
+
+static float expf_(float x) {
+ const float log2_e = 1.4426950408889634074f;
+ return exp2f_(log2_e * x);
+}
+
+static float fmaxf_(float x, float y) { return x > y ? x : y; }
+static float fminf_(float x, float y) { return x < y ? x : y; }
+
+static bool isfinitef_(float x) { return 0 == x*0; }
+
+static float minus_1_ulp(float x) {
+ int32_t bits;
+ memcpy(&bits, &x, sizeof(bits));
+ bits = bits - 1;
+ memcpy(&x, &bits, sizeof(bits));
+ return x;
+}
+
+// Most transfer functions we work with are sRGBish.
+// For exotic HDR transfer functions, we encode them using a tf.g that makes no sense,
+// and repurpose the other fields to hold the parameters of the HDR functions.
+enum TFKind { Bad, sRGBish, PQish, HLGish, HLGinvish };
+struct TF_PQish { float A,B,C,D,E,F; };
+struct TF_HLGish { float R,G,a,b,c; };
+
+static float TFKind_marker(TFKind kind) {
+ // We'd use different NaNs, but those aren't guaranteed to be preserved by WASM.
+ return -(float)kind;
+}
+
+static TFKind classify(const skcms_TransferFunction& tf, TF_PQish* pq = nullptr
+ , TF_HLGish* hlg = nullptr) {
+ if (tf.g < 0 && (int)tf.g == tf.g) {
+ // TODO: sanity checks for PQ/HLG like we do for sRGBish.
+ switch (-(int)tf.g) {
+ case PQish: if (pq ) { memcpy(pq , &tf.a, sizeof(*pq )); } return PQish;
+ case HLGish: if (hlg) { memcpy(hlg, &tf.a, sizeof(*hlg)); } return HLGish;
+ case HLGinvish: if (hlg) { memcpy(hlg, &tf.a, sizeof(*hlg)); } return HLGinvish;
+ }
+ return Bad;
+ }
+
+ // Basic sanity checks for sRGBish transfer functions.
+ if (isfinitef_(tf.a + tf.b + tf.c + tf.d + tf.e + tf.f + tf.g)
+ // a,c,d,g should be non-negative to make any sense.
+ && tf.a >= 0
+ && tf.c >= 0
+ && tf.d >= 0
+ && tf.g >= 0
+ // Raising a negative value to a fractional tf->g produces complex numbers.
+ && tf.a * tf.d + tf.b >= 0) {
+ return sRGBish;
+ }
+
+ return Bad;
+}
+
+bool skcms_TransferFunction_makePQish(skcms_TransferFunction* tf,
+ float A, float B, float C,
+ float D, float E, float F) {
+ *tf = { TFKind_marker(PQish), A,B,C,D,E,F };
+ assert(classify(*tf) == PQish);
+ return true;
+}
+
+bool skcms_TransferFunction_makeHLGish(skcms_TransferFunction* tf,
+ float R, float G,
+ float a, float b, float c) {
+ *tf = { TFKind_marker(HLGish), R,G, a,b,c, 0 };
+ assert(classify(*tf) == HLGish);
+ return true;
+}
+
+float skcms_TransferFunction_eval(const skcms_TransferFunction* tf, float x) {
+ float sign = x < 0 ? -1.0f : 1.0f;
+ x *= sign;
+
+ TF_PQish pq;
+ TF_HLGish hlg;
+ switch (classify(*tf, &pq, &hlg)) {
+ case Bad: break;
+
+ case HLGish: return sign * (x*hlg.R <= 1 ? powf_(x*hlg.R, hlg.G)
+ : expf_((x-hlg.c)*hlg.a) + hlg.b);
+
+ // skcms_TransferFunction_invert() inverts R, G, and a for HLGinvish so this math is fast.
+ case HLGinvish: return sign * (x <= 1 ? hlg.R * powf_(x, hlg.G)
+ : hlg.a * logf_(x - hlg.b) + hlg.c);
+
+
+ case sRGBish: return sign * (x < tf->d ? tf->c * x + tf->f
+ : powf_(tf->a * x + tf->b, tf->g) + tf->e);
+
+ case PQish: return sign * powf_(fmaxf_(pq.A + pq.B * powf_(x, pq.C), 0)
+ / (pq.D + pq.E * powf_(x, pq.C)),
+ pq.F);
+ }
+ return 0;
+}
+
+
+static float eval_curve(const skcms_Curve* curve, float x) {
+ if (curve->table_entries == 0) {
+ return skcms_TransferFunction_eval(&curve->parametric, x);
+ }
+
+ float ix = fmaxf_(0, fminf_(x, 1)) * (curve->table_entries - 1);
+ int lo = (int) ix ,
+ hi = (int)(float)minus_1_ulp(ix + 1.0f);
+ float t = ix - (float)lo;
+
+ float l, h;
+ if (curve->table_8) {
+ l = curve->table_8[lo] * (1/255.0f);
+ h = curve->table_8[hi] * (1/255.0f);
+ } else {
+ uint16_t be_l, be_h;
+ memcpy(&be_l, curve->table_16 + 2*lo, 2);
+ memcpy(&be_h, curve->table_16 + 2*hi, 2);
+ uint16_t le_l = ((be_l << 8) | (be_l >> 8)) & 0xffff;
+ uint16_t le_h = ((be_h << 8) | (be_h >> 8)) & 0xffff;
+ l = le_l * (1/65535.0f);
+ h = le_h * (1/65535.0f);
+ }
+ return l + (h-l)*t;
+}
+
+static float max_roundtrip_error(const skcms_Curve* curve, const skcms_TransferFunction* inv_tf) {
+ uint32_t N = curve->table_entries > 256 ? curve->table_entries : 256;
+ const float dx = 1.0f / (N - 1);
+ float err = 0;
+ for (uint32_t i = 0; i < N; i++) {
+ float x = i * dx,
+ y = eval_curve(curve, x);
+ err = fmaxf_(err, fabsf_(x - skcms_TransferFunction_eval(inv_tf, y)));
+ }
+ return err;
+}
+
+bool skcms_AreApproximateInverses(const skcms_Curve* curve, const skcms_TransferFunction* inv_tf) {
+ return max_roundtrip_error(curve, inv_tf) < (1/512.0f);
+}
+
+// Additional ICC signature values that are only used internally
+enum {
+ // File signature
+ skcms_Signature_acsp = 0x61637370,
+
+ // Tag signatures
+ skcms_Signature_rTRC = 0x72545243,
+ skcms_Signature_gTRC = 0x67545243,
+ skcms_Signature_bTRC = 0x62545243,
+ skcms_Signature_kTRC = 0x6B545243,
+
+ skcms_Signature_rXYZ = 0x7258595A,
+ skcms_Signature_gXYZ = 0x6758595A,
+ skcms_Signature_bXYZ = 0x6258595A,
+
+ skcms_Signature_A2B0 = 0x41324230,
+ skcms_Signature_A2B1 = 0x41324231,
+ skcms_Signature_mAB = 0x6D414220,
+
+ skcms_Signature_CHAD = 0x63686164,
+
+ // Type signatures
+ skcms_Signature_curv = 0x63757276,
+ skcms_Signature_mft1 = 0x6D667431,
+ skcms_Signature_mft2 = 0x6D667432,
+ skcms_Signature_para = 0x70617261,
+ skcms_Signature_sf32 = 0x73663332,
+ // XYZ is also a PCS signature, so it's defined in skcms.h
+ // skcms_Signature_XYZ = 0x58595A20,
+};
+
+static uint16_t read_big_u16(const uint8_t* ptr) {
+ uint16_t be;
+ memcpy(&be, ptr, sizeof(be));
+#if defined(_MSC_VER)
+ return _byteswap_ushort(be);
+#else
+ return __builtin_bswap16(be);
+#endif
+}
+
+static uint32_t read_big_u32(const uint8_t* ptr) {
+ uint32_t be;
+ memcpy(&be, ptr, sizeof(be));
+#if defined(_MSC_VER)
+ return _byteswap_ulong(be);
+#else
+ return __builtin_bswap32(be);
+#endif
+}
+
+static int32_t read_big_i32(const uint8_t* ptr) {
+ return (int32_t)read_big_u32(ptr);
+}
+
+static float read_big_fixed(const uint8_t* ptr) {
+ return read_big_i32(ptr) * (1.0f / 65536.0f);
+}
+
+// Maps to an in-memory profile so that fields line up to the locations specified
+// in ICC.1:2010, section 7.2
+typedef struct {
+ uint8_t size [ 4];
+ uint8_t cmm_type [ 4];
+ uint8_t version [ 4];
+ uint8_t profile_class [ 4];
+ uint8_t data_color_space [ 4];
+ uint8_t pcs [ 4];
+ uint8_t creation_date_time [12];
+ uint8_t signature [ 4];
+ uint8_t platform [ 4];
+ uint8_t flags [ 4];
+ uint8_t device_manufacturer [ 4];
+ uint8_t device_model [ 4];
+ uint8_t device_attributes [ 8];
+ uint8_t rendering_intent [ 4];
+ uint8_t illuminant_X [ 4];
+ uint8_t illuminant_Y [ 4];
+ uint8_t illuminant_Z [ 4];
+ uint8_t creator [ 4];
+ uint8_t profile_id [16];
+ uint8_t reserved [28];
+ uint8_t tag_count [ 4]; // Technically not part of header, but required
+} header_Layout;
+
+typedef struct {
+ uint8_t signature [4];
+ uint8_t offset [4];
+ uint8_t size [4];
+} tag_Layout;
+
+static const tag_Layout* get_tag_table(const skcms_ICCProfile* profile) {
+ return (const tag_Layout*)(profile->buffer + SAFE_SIZEOF(header_Layout));
+}
+
+// s15Fixed16ArrayType is technically variable sized, holding N values. However, the only valid
+// use of the type is for the CHAD tag that stores exactly nine values.
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved [ 4];
+ uint8_t values [36];
+} sf32_Layout;
+
+bool skcms_GetCHAD(const skcms_ICCProfile* profile, skcms_Matrix3x3* m) {
+ skcms_ICCTag tag;
+ if (!skcms_GetTagBySignature(profile, skcms_Signature_CHAD, &tag)) {
+ return false;
+ }
+
+ if (tag.type != skcms_Signature_sf32 || tag.size < SAFE_SIZEOF(sf32_Layout)) {
+ return false;
+ }
+
+ const sf32_Layout* sf32Tag = (const sf32_Layout*)tag.buf;
+ const uint8_t* values = sf32Tag->values;
+ for (int r = 0; r < 3; ++r)
+ for (int c = 0; c < 3; ++c, values += 4) {
+ m->vals[r][c] = read_big_fixed(values);
+ }
+ return true;
+}
+
+// XYZType is technically variable sized, holding N XYZ triples. However, the only valid uses of
+// the type are for tags/data that store exactly one triple.
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved [4];
+ uint8_t X [4];
+ uint8_t Y [4];
+ uint8_t Z [4];
+} XYZ_Layout;
+
+static bool read_tag_xyz(const skcms_ICCTag* tag, float* x, float* y, float* z) {
+ if (tag->type != skcms_Signature_XYZ || tag->size < SAFE_SIZEOF(XYZ_Layout)) {
+ return false;
+ }
+
+ const XYZ_Layout* xyzTag = (const XYZ_Layout*)tag->buf;
+
+ *x = read_big_fixed(xyzTag->X);
+ *y = read_big_fixed(xyzTag->Y);
+ *z = read_big_fixed(xyzTag->Z);
+ return true;
+}
+
+static bool read_to_XYZD50(const skcms_ICCTag* rXYZ, const skcms_ICCTag* gXYZ,
+ const skcms_ICCTag* bXYZ, skcms_Matrix3x3* toXYZ) {
+ return read_tag_xyz(rXYZ, &toXYZ->vals[0][0], &toXYZ->vals[1][0], &toXYZ->vals[2][0]) &&
+ read_tag_xyz(gXYZ, &toXYZ->vals[0][1], &toXYZ->vals[1][1], &toXYZ->vals[2][1]) &&
+ read_tag_xyz(bXYZ, &toXYZ->vals[0][2], &toXYZ->vals[1][2], &toXYZ->vals[2][2]);
+}
+
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved_a [4];
+ uint8_t function_type [2];
+ uint8_t reserved_b [2];
+ uint8_t variable [1/*variable*/]; // 1, 3, 4, 5, or 7 s15.16, depending on function_type
+} para_Layout;
+
+static bool read_curve_para(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (size < SAFE_FIXED_SIZE(para_Layout)) {
+ return false;
+ }
+
+ const para_Layout* paraTag = (const para_Layout*)buf;
+
+ enum { kG = 0, kGAB = 1, kGABC = 2, kGABCD = 3, kGABCDEF = 4 };
+ uint16_t function_type = read_big_u16(paraTag->function_type);
+ if (function_type > kGABCDEF) {
+ return false;
+ }
+
+ static const uint32_t curve_bytes[] = { 4, 12, 16, 20, 28 };
+ if (size < SAFE_FIXED_SIZE(para_Layout) + curve_bytes[function_type]) {
+ return false;
+ }
+
+ if (curve_size) {
+ *curve_size = SAFE_FIXED_SIZE(para_Layout) + curve_bytes[function_type];
+ }
+
+ curve->table_entries = 0;
+ curve->parametric.a = 1.0f;
+ curve->parametric.b = 0.0f;
+ curve->parametric.c = 0.0f;
+ curve->parametric.d = 0.0f;
+ curve->parametric.e = 0.0f;
+ curve->parametric.f = 0.0f;
+ curve->parametric.g = read_big_fixed(paraTag->variable);
+
+ switch (function_type) {
+ case kGAB:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ if (curve->parametric.a == 0) {
+ return false;
+ }
+ curve->parametric.d = -curve->parametric.b / curve->parametric.a;
+ break;
+ case kGABC:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.e = read_big_fixed(paraTag->variable + 12);
+ if (curve->parametric.a == 0) {
+ return false;
+ }
+ curve->parametric.d = -curve->parametric.b / curve->parametric.a;
+ curve->parametric.f = curve->parametric.e;
+ break;
+ case kGABCD:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.c = read_big_fixed(paraTag->variable + 12);
+ curve->parametric.d = read_big_fixed(paraTag->variable + 16);
+ break;
+ case kGABCDEF:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.c = read_big_fixed(paraTag->variable + 12);
+ curve->parametric.d = read_big_fixed(paraTag->variable + 16);
+ curve->parametric.e = read_big_fixed(paraTag->variable + 20);
+ curve->parametric.f = read_big_fixed(paraTag->variable + 24);
+ break;
+ }
+ return classify(curve->parametric) == sRGBish;
+}
+
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved [4];
+ uint8_t value_count [4];
+ uint8_t variable [1/*variable*/]; // value_count, 8.8 if 1, uint16 (n*65535) if > 1
+} curv_Layout;
+
+static bool read_curve_curv(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (size < SAFE_FIXED_SIZE(curv_Layout)) {
+ return false;
+ }
+
+ const curv_Layout* curvTag = (const curv_Layout*)buf;
+
+ uint32_t value_count = read_big_u32(curvTag->value_count);
+ if (size < SAFE_FIXED_SIZE(curv_Layout) + value_count * SAFE_SIZEOF(uint16_t)) {
+ return false;
+ }
+
+ if (curve_size) {
+ *curve_size = SAFE_FIXED_SIZE(curv_Layout) + value_count * SAFE_SIZEOF(uint16_t);
+ }
+
+ if (value_count < 2) {
+ curve->table_entries = 0;
+ curve->parametric.a = 1.0f;
+ curve->parametric.b = 0.0f;
+ curve->parametric.c = 0.0f;
+ curve->parametric.d = 0.0f;
+ curve->parametric.e = 0.0f;
+ curve->parametric.f = 0.0f;
+ if (value_count == 0) {
+ // Empty tables are a shorthand for an identity curve
+ curve->parametric.g = 1.0f;
+ } else {
+ // Single entry tables are a shorthand for simple gamma
+ curve->parametric.g = read_big_u16(curvTag->variable) * (1.0f / 256.0f);
+ }
+ } else {
+ curve->table_8 = nullptr;
+ curve->table_16 = curvTag->variable;
+ curve->table_entries = value_count;
+ }
+
+ return true;
+}
+
+// Parses both curveType and parametricCurveType data. Ensures that at most 'size' bytes are read.
+// If curve_size is not nullptr, writes the number of bytes used by the curve in (*curve_size).
+static bool read_curve(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (!buf || size < 4 || !curve) {
+ return false;
+ }
+
+ uint32_t type = read_big_u32(buf);
+ if (type == skcms_Signature_para) {
+ return read_curve_para(buf, size, curve, curve_size);
+ } else if (type == skcms_Signature_curv) {
+ return read_curve_curv(buf, size, curve, curve_size);
+ }
+
+ return false;
+}
+
+// mft1 and mft2 share a large chunk of data
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved_a [ 4];
+ uint8_t input_channels [ 1];
+ uint8_t output_channels [ 1];
+ uint8_t grid_points [ 1];
+ uint8_t reserved_b [ 1];
+ uint8_t matrix [36];
+} mft_CommonLayout;
+
+typedef struct {
+ mft_CommonLayout common [1];
+
+ uint8_t variable [1/*variable*/];
+} mft1_Layout;
+
+typedef struct {
+ mft_CommonLayout common [1];
+
+ uint8_t input_table_entries [2];
+ uint8_t output_table_entries [2];
+ uint8_t variable [1/*variable*/];
+} mft2_Layout;
+
+static bool read_mft_common(const mft_CommonLayout* mftTag, skcms_A2B* a2b) {
+ // MFT matrices are applied before the first set of curves, but must be identity unless the
+ // input is PCSXYZ. We don't support PCSXYZ profiles, so we ignore this matrix. Note that the
+ // matrix in skcms_A2B is applied later in the pipe, so supporting this would require another
+ // field/flag.
+ a2b->matrix_channels = 0;
+
+ a2b->input_channels = mftTag->input_channels[0];
+ a2b->output_channels = mftTag->output_channels[0];
+
+ // We require exactly three (ie XYZ/Lab/RGB) output channels
+ if (a2b->output_channels != ARRAY_COUNT(a2b->output_curves)) {
+ return false;
+ }
+ // We require at least one, and no more than four (ie CMYK) input channels
+ if (a2b->input_channels < 1 || a2b->input_channels > ARRAY_COUNT(a2b->input_curves)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < a2b->input_channels; ++i) {
+ a2b->grid_points[i] = mftTag->grid_points[0];
+ }
+ // The grid only makes sense with at least two points along each axis
+ if (a2b->grid_points[0] < 2) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool init_a2b_tables(const uint8_t* table_base, uint64_t max_tables_len, uint32_t byte_width,
+ uint32_t input_table_entries, uint32_t output_table_entries,
+ skcms_A2B* a2b) {
+ // byte_width is 1 or 2, [input|output]_table_entries are in [2, 4096], so no overflow
+ uint32_t byte_len_per_input_table = input_table_entries * byte_width;
+ uint32_t byte_len_per_output_table = output_table_entries * byte_width;
+
+ // [input|output]_channels are <= 4, so still no overflow
+ uint32_t byte_len_all_input_tables = a2b->input_channels * byte_len_per_input_table;
+ uint32_t byte_len_all_output_tables = a2b->output_channels * byte_len_per_output_table;
+
+ uint64_t grid_size = a2b->output_channels * byte_width;
+ for (uint32_t axis = 0; axis < a2b->input_channels; ++axis) {
+ grid_size *= a2b->grid_points[axis];
+ }
+
+ if (max_tables_len < byte_len_all_input_tables + grid_size + byte_len_all_output_tables) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < a2b->input_channels; ++i) {
+ a2b->input_curves[i].table_entries = input_table_entries;
+ if (byte_width == 1) {
+ a2b->input_curves[i].table_8 = table_base + i * byte_len_per_input_table;
+ a2b->input_curves[i].table_16 = nullptr;
+ } else {
+ a2b->input_curves[i].table_8 = nullptr;
+ a2b->input_curves[i].table_16 = table_base + i * byte_len_per_input_table;
+ }
+ }
+
+ if (byte_width == 1) {
+ a2b->grid_8 = table_base + byte_len_all_input_tables;
+ a2b->grid_16 = nullptr;
+ } else {
+ a2b->grid_8 = nullptr;
+ a2b->grid_16 = table_base + byte_len_all_input_tables;
+ }
+
+ const uint8_t* output_table_base = table_base + byte_len_all_input_tables + grid_size;
+ for (uint32_t i = 0; i < a2b->output_channels; ++i) {
+ a2b->output_curves[i].table_entries = output_table_entries;
+ if (byte_width == 1) {
+ a2b->output_curves[i].table_8 = output_table_base + i * byte_len_per_output_table;
+ a2b->output_curves[i].table_16 = nullptr;
+ } else {
+ a2b->output_curves[i].table_8 = nullptr;
+ a2b->output_curves[i].table_16 = output_table_base + i * byte_len_per_output_table;
+ }
+ }
+
+ return true;
+}
+
+static bool read_tag_mft1(const skcms_ICCTag* tag, skcms_A2B* a2b) {
+ if (tag->size < SAFE_FIXED_SIZE(mft1_Layout)) {
+ return false;
+ }
+
+ const mft1_Layout* mftTag = (const mft1_Layout*)tag->buf;
+ if (!read_mft_common(mftTag->common, a2b)) {
+ return false;
+ }
+
+ uint32_t input_table_entries = 256;
+ uint32_t output_table_entries = 256;
+
+ return init_a2b_tables(mftTag->variable, tag->size - SAFE_FIXED_SIZE(mft1_Layout), 1,
+ input_table_entries, output_table_entries, a2b);
+}
+
+static bool read_tag_mft2(const skcms_ICCTag* tag, skcms_A2B* a2b) {
+ if (tag->size < SAFE_FIXED_SIZE(mft2_Layout)) {
+ return false;
+ }
+
+ const mft2_Layout* mftTag = (const mft2_Layout*)tag->buf;
+ if (!read_mft_common(mftTag->common, a2b)) {
+ return false;
+ }
+
+ uint32_t input_table_entries = read_big_u16(mftTag->input_table_entries);
+ uint32_t output_table_entries = read_big_u16(mftTag->output_table_entries);
+
+ // ICC spec mandates that 2 <= table_entries <= 4096
+ if (input_table_entries < 2 || input_table_entries > 4096 ||
+ output_table_entries < 2 || output_table_entries > 4096) {
+ return false;
+ }
+
+ return init_a2b_tables(mftTag->variable, tag->size - SAFE_FIXED_SIZE(mft2_Layout), 2,
+ input_table_entries, output_table_entries, a2b);
+}
+
+static bool read_curves(const uint8_t* buf, uint32_t size, uint32_t curve_offset,
+ uint32_t num_curves, skcms_Curve* curves) {
+ for (uint32_t i = 0; i < num_curves; ++i) {
+ if (curve_offset > size) {
+ return false;
+ }
+
+ uint32_t curve_bytes;
+ if (!read_curve(buf + curve_offset, size - curve_offset, &curves[i], &curve_bytes)) {
+ return false;
+ }
+
+ if (curve_bytes > UINT32_MAX - 3) {
+ return false;
+ }
+ curve_bytes = (curve_bytes + 3) & ~3U;
+
+ uint64_t new_offset_64 = (uint64_t)curve_offset + curve_bytes;
+ curve_offset = (uint32_t)new_offset_64;
+ if (new_offset_64 != curve_offset) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved_a [ 4];
+ uint8_t input_channels [ 1];
+ uint8_t output_channels [ 1];
+ uint8_t reserved_b [ 2];
+ uint8_t b_curve_offset [ 4];
+ uint8_t matrix_offset [ 4];
+ uint8_t m_curve_offset [ 4];
+ uint8_t clut_offset [ 4];
+ uint8_t a_curve_offset [ 4];
+} mAB_Layout;
+
+typedef struct {
+ uint8_t grid_points [16];
+ uint8_t grid_byte_width [ 1];
+ uint8_t reserved [ 3];
+ uint8_t variable [1/*variable*/];
+} mABCLUT_Layout;
+
+static bool read_tag_mab(const skcms_ICCTag* tag, skcms_A2B* a2b, bool pcs_is_xyz) {
+ if (tag->size < SAFE_SIZEOF(mAB_Layout)) {
+ return false;
+ }
+
+ const mAB_Layout* mABTag = (const mAB_Layout*)tag->buf;
+
+ a2b->input_channels = mABTag->input_channels[0];
+ a2b->output_channels = mABTag->output_channels[0];
+
+ // We require exactly three (ie XYZ/Lab/RGB) output channels
+ if (a2b->output_channels != ARRAY_COUNT(a2b->output_curves)) {
+ return false;
+ }
+ // We require no more than four (ie CMYK) input channels
+ if (a2b->input_channels > ARRAY_COUNT(a2b->input_curves)) {
+ return false;
+ }
+
+ uint32_t b_curve_offset = read_big_u32(mABTag->b_curve_offset);
+ uint32_t matrix_offset = read_big_u32(mABTag->matrix_offset);
+ uint32_t m_curve_offset = read_big_u32(mABTag->m_curve_offset);
+ uint32_t clut_offset = read_big_u32(mABTag->clut_offset);
+ uint32_t a_curve_offset = read_big_u32(mABTag->a_curve_offset);
+
+ // "B" curves must be present
+ if (0 == b_curve_offset) {
+ return false;
+ }
+
+ if (!read_curves(tag->buf, tag->size, b_curve_offset, a2b->output_channels,
+ a2b->output_curves)) {
+ return false;
+ }
+
+ // "M" curves and Matrix must be used together
+ if (0 != m_curve_offset) {
+ if (0 == matrix_offset) {
+ return false;
+ }
+ a2b->matrix_channels = a2b->output_channels;
+ if (!read_curves(tag->buf, tag->size, m_curve_offset, a2b->matrix_channels,
+ a2b->matrix_curves)) {
+ return false;
+ }
+
+ // Read matrix, which is stored as a row-major 3x3, followed by the fourth column
+ if (tag->size < matrix_offset + 12 * SAFE_SIZEOF(uint32_t)) {
+ return false;
+ }
+ float encoding_factor = pcs_is_xyz ? 65535 / 32768.0f : 1.0f;
+ const uint8_t* mtx_buf = tag->buf + matrix_offset;
+ a2b->matrix.vals[0][0] = encoding_factor * read_big_fixed(mtx_buf + 0);
+ a2b->matrix.vals[0][1] = encoding_factor * read_big_fixed(mtx_buf + 4);
+ a2b->matrix.vals[0][2] = encoding_factor * read_big_fixed(mtx_buf + 8);
+ a2b->matrix.vals[1][0] = encoding_factor * read_big_fixed(mtx_buf + 12);
+ a2b->matrix.vals[1][1] = encoding_factor * read_big_fixed(mtx_buf + 16);
+ a2b->matrix.vals[1][2] = encoding_factor * read_big_fixed(mtx_buf + 20);
+ a2b->matrix.vals[2][0] = encoding_factor * read_big_fixed(mtx_buf + 24);
+ a2b->matrix.vals[2][1] = encoding_factor * read_big_fixed(mtx_buf + 28);
+ a2b->matrix.vals[2][2] = encoding_factor * read_big_fixed(mtx_buf + 32);
+ a2b->matrix.vals[0][3] = encoding_factor * read_big_fixed(mtx_buf + 36);
+ a2b->matrix.vals[1][3] = encoding_factor * read_big_fixed(mtx_buf + 40);
+ a2b->matrix.vals[2][3] = encoding_factor * read_big_fixed(mtx_buf + 44);
+ } else {
+ if (0 != matrix_offset) {
+ return false;
+ }
+ a2b->matrix_channels = 0;
+ }
+
+ // "A" curves and CLUT must be used together
+ if (0 != a_curve_offset) {
+ if (0 == clut_offset) {
+ return false;
+ }
+ if (!read_curves(tag->buf, tag->size, a_curve_offset, a2b->input_channels,
+ a2b->input_curves)) {
+ return false;
+ }
+
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(mABCLUT_Layout)) {
+ return false;
+ }
+ const mABCLUT_Layout* clut = (const mABCLUT_Layout*)(tag->buf + clut_offset);
+
+ if (clut->grid_byte_width[0] == 1) {
+ a2b->grid_8 = clut->variable;
+ a2b->grid_16 = nullptr;
+ } else if (clut->grid_byte_width[0] == 2) {
+ a2b->grid_8 = nullptr;
+ a2b->grid_16 = clut->variable;
+ } else {
+ return false;
+ }
+
+ uint64_t grid_size = a2b->output_channels * clut->grid_byte_width[0];
+ for (uint32_t i = 0; i < a2b->input_channels; ++i) {
+ a2b->grid_points[i] = clut->grid_points[i];
+ // The grid only makes sense with at least two points along each axis
+ if (a2b->grid_points[i] < 2) {
+ return false;
+ }
+ grid_size *= a2b->grid_points[i];
+ }
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(mABCLUT_Layout) + grid_size) {
+ return false;
+ }
+ } else {
+ if (0 != clut_offset) {
+ return false;
+ }
+
+ // If there is no CLUT, the number of input and output channels must match
+ if (a2b->input_channels != a2b->output_channels) {
+ return false;
+ }
+
+ // Zero out the number of input channels to signal that we're skipping this stage
+ a2b->input_channels = 0;
+ }
+
+ return true;
+}
+
+// If you pass f, we'll fit a possibly-non-zero value for *f.
+// If you pass nullptr, we'll assume you want *f to be treated as zero.
+static int fit_linear(const skcms_Curve* curve, int N, float tol,
+ float* c, float* d, float* f = nullptr) {
+ assert(N > 1);
+ // We iteratively fit the first points to the TF's linear piece.
+ // We want the cx + f line to pass through the first and last points we fit exactly.
+ //
+ // As we walk along the points we find the minimum and maximum slope of the line before the
+ // error would exceed our tolerance. We stop when the range [slope_min, slope_max] becomes
+ // emtpy, when we definitely can't add any more points.
+ //
+ // Some points' error intervals may intersect the running interval but not lie fully
+ // within it. So we keep track of the last point we saw that is a valid end point candidate,
+ // and once the search is done, back up to build the line through *that* point.
+ const float dx = 1.0f / (N - 1);
+
+ int lin_points = 1;
+
+ float f_zero = 0.0f;
+ if (f) {
+ *f = eval_curve(curve, 0);
+ } else {
+ f = &f_zero;
+ }
+
+
+ float slope_min = -INFINITY_;
+ float slope_max = +INFINITY_;
+ for (int i = 1; i < N; ++i) {
+ float x = i * dx;
+ float y = eval_curve(curve, x);
+
+ float slope_max_i = (y + tol - *f) / x,
+ slope_min_i = (y - tol - *f) / x;
+ if (slope_max_i < slope_min || slope_max < slope_min_i) {
+ // Slope intervals would no longer overlap.
+ break;
+ }
+ slope_max = fminf_(slope_max, slope_max_i);
+ slope_min = fmaxf_(slope_min, slope_min_i);
+
+ float cur_slope = (y - *f) / x;
+ if (slope_min <= cur_slope && cur_slope <= slope_max) {
+ lin_points = i + 1;
+ *c = cur_slope;
+ }
+ }
+
+ // Set D to the last point that met our tolerance.
+ *d = (lin_points - 1) * dx;
+ return lin_points;
+}
+
+static bool read_a2b(const skcms_ICCTag* tag, skcms_A2B* a2b, bool pcs_is_xyz) {
+ bool ok = false;
+ if (tag->type == skcms_Signature_mft1) {
+ ok = read_tag_mft1(tag, a2b);
+ } else if (tag->type == skcms_Signature_mft2) {
+ ok = read_tag_mft2(tag, a2b);
+ } else if (tag->type == skcms_Signature_mAB) {
+ ok = read_tag_mab(tag, a2b, pcs_is_xyz);
+ }
+ if (!ok) {
+ return false;
+ }
+
+ // Detect and canonicalize identity tables.
+ skcms_Curve* curves[] = {
+ a2b->input_channels > 0 ? a2b->input_curves + 0 : nullptr,
+ a2b->input_channels > 1 ? a2b->input_curves + 1 : nullptr,
+ a2b->input_channels > 2 ? a2b->input_curves + 2 : nullptr,
+ a2b->input_channels > 3 ? a2b->input_curves + 3 : nullptr,
+ a2b->matrix_channels > 0 ? a2b->matrix_curves + 0 : nullptr,
+ a2b->matrix_channels > 1 ? a2b->matrix_curves + 1 : nullptr,
+ a2b->matrix_channels > 2 ? a2b->matrix_curves + 2 : nullptr,
+ a2b->output_channels > 0 ? a2b->output_curves + 0 : nullptr,
+ a2b->output_channels > 1 ? a2b->output_curves + 1 : nullptr,
+ a2b->output_channels > 2 ? a2b->output_curves + 2 : nullptr,
+ };
+
+ for (int i = 0; i < ARRAY_COUNT(curves); i++) {
+ skcms_Curve* curve = curves[i];
+
+ if (curve && curve->table_entries && curve->table_entries <= (uint32_t)INT_MAX) {
+ int N = (int)curve->table_entries;
+
+ float c = 0.0f, d = 0.0f, f = 0.0f;
+ if (N == fit_linear(curve, N, 1.0f/(2*N), &c,&d,&f)
+ && c == 1.0f
+ && f == 0.0f) {
+ curve->table_entries = 0;
+ curve->table_8 = nullptr;
+ curve->table_16 = nullptr;
+ curve->parametric = skcms_TransferFunction{1,1,0,0,0,0,0};
+ }
+ }
+ }
+
+ return true;
+}
+
+void skcms_GetTagByIndex(const skcms_ICCProfile* profile, uint32_t idx, skcms_ICCTag* tag) {
+ if (!profile || !profile->buffer || !tag) { return; }
+ if (idx > profile->tag_count) { return; }
+ const tag_Layout* tags = get_tag_table(profile);
+ tag->signature = read_big_u32(tags[idx].signature);
+ tag->size = read_big_u32(tags[idx].size);
+ tag->buf = read_big_u32(tags[idx].offset) + profile->buffer;
+ tag->type = read_big_u32(tag->buf);
+}
+
+bool skcms_GetTagBySignature(const skcms_ICCProfile* profile, uint32_t sig, skcms_ICCTag* tag) {
+ if (!profile || !profile->buffer || !tag) { return false; }
+ const tag_Layout* tags = get_tag_table(profile);
+ for (uint32_t i = 0; i < profile->tag_count; ++i) {
+ if (read_big_u32(tags[i].signature) == sig) {
+ tag->signature = sig;
+ tag->size = read_big_u32(tags[i].size);
+ tag->buf = read_big_u32(tags[i].offset) + profile->buffer;
+ tag->type = read_big_u32(tag->buf);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool usable_as_src(const skcms_ICCProfile* profile) {
+ return profile->has_A2B
+ || (profile->has_trc && profile->has_toXYZD50);
+}
+
+bool skcms_Parse(const void* buf, size_t len, skcms_ICCProfile* profile) {
+ assert(SAFE_SIZEOF(header_Layout) == 132);
+
+ if (!profile) {
+ return false;
+ }
+ memset(profile, 0, SAFE_SIZEOF(*profile));
+
+ if (len < SAFE_SIZEOF(header_Layout)) {
+ return false;
+ }
+
+ // Byte-swap all header fields
+ const header_Layout* header = (const header_Layout*)buf;
+ profile->buffer = (const uint8_t*)buf;
+ profile->size = read_big_u32(header->size);
+ uint32_t version = read_big_u32(header->version);
+ profile->data_color_space = read_big_u32(header->data_color_space);
+ profile->pcs = read_big_u32(header->pcs);
+ uint32_t signature = read_big_u32(header->signature);
+ float illuminant_X = read_big_fixed(header->illuminant_X);
+ float illuminant_Y = read_big_fixed(header->illuminant_Y);
+ float illuminant_Z = read_big_fixed(header->illuminant_Z);
+ profile->tag_count = read_big_u32(header->tag_count);
+
+ // Validate signature, size (smaller than buffer, large enough to hold tag table),
+ // and major version
+ uint64_t tag_table_size = profile->tag_count * SAFE_SIZEOF(tag_Layout);
+ if (signature != skcms_Signature_acsp ||
+ profile->size > len ||
+ profile->size < SAFE_SIZEOF(header_Layout) + tag_table_size ||
+ (version >> 24) > 4) {
+ return false;
+ }
+
+ // Validate that illuminant is D50 white
+ if (fabsf_(illuminant_X - 0.9642f) > 0.0100f ||
+ fabsf_(illuminant_Y - 1.0000f) > 0.0100f ||
+ fabsf_(illuminant_Z - 0.8249f) > 0.0100f) {
+ return false;
+ }
+
+ // Validate that all tag entries have sane offset + size
+ const tag_Layout* tags = get_tag_table(profile);
+ for (uint32_t i = 0; i < profile->tag_count; ++i) {
+ uint32_t tag_offset = read_big_u32(tags[i].offset);
+ uint32_t tag_size = read_big_u32(tags[i].size);
+ uint64_t tag_end = (uint64_t)tag_offset + (uint64_t)tag_size;
+ if (tag_size < 4 || tag_end > profile->size) {
+ return false;
+ }
+ }
+
+ if (profile->pcs != skcms_Signature_XYZ && profile->pcs != skcms_Signature_Lab) {
+ return false;
+ }
+
+ bool pcs_is_xyz = profile->pcs == skcms_Signature_XYZ;
+
+ // Pre-parse commonly used tags.
+ skcms_ICCTag kTRC;
+ if (profile->data_color_space == skcms_Signature_Gray &&
+ skcms_GetTagBySignature(profile, skcms_Signature_kTRC, &kTRC)) {
+ if (!read_curve(kTRC.buf, kTRC.size, &profile->trc[0], nullptr)) {
+ // Malformed tag
+ return false;
+ }
+ profile->trc[1] = profile->trc[0];
+ profile->trc[2] = profile->trc[0];
+ profile->has_trc = true;
+
+ if (pcs_is_xyz) {
+ profile->toXYZD50.vals[0][0] = illuminant_X;
+ profile->toXYZD50.vals[1][1] = illuminant_Y;
+ profile->toXYZD50.vals[2][2] = illuminant_Z;
+ profile->has_toXYZD50 = true;
+ }
+ } else {
+ skcms_ICCTag rTRC, gTRC, bTRC;
+ if (skcms_GetTagBySignature(profile, skcms_Signature_rTRC, &rTRC) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_gTRC, &gTRC) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_bTRC, &bTRC)) {
+ if (!read_curve(rTRC.buf, rTRC.size, &profile->trc[0], nullptr) ||
+ !read_curve(gTRC.buf, gTRC.size, &profile->trc[1], nullptr) ||
+ !read_curve(bTRC.buf, bTRC.size, &profile->trc[2], nullptr)) {
+ // Malformed TRC tags
+ return false;
+ }
+ profile->has_trc = true;
+ }
+
+ skcms_ICCTag rXYZ, gXYZ, bXYZ;
+ if (skcms_GetTagBySignature(profile, skcms_Signature_rXYZ, &rXYZ) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_gXYZ, &gXYZ) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_bXYZ, &bXYZ)) {
+ if (!read_to_XYZD50(&rXYZ, &gXYZ, &bXYZ, &profile->toXYZD50)) {
+ // Malformed XYZ tags
+ return false;
+ }
+ profile->has_toXYZD50 = true;
+ }
+ }
+
+ skcms_ICCTag a2b_tag;
+
+ // For now, we're preferring A2B0, like Skia does and the ICC spec tells us to.
+ // TODO: prefer A2B1 (relative colormetric) over A2B0 (perceptual)?
+ // This breaks with the ICC spec, but we think it's a good idea, given that TRC curves
+ // and all our known users are thinking exclusively in terms of relative colormetric.
+ const uint32_t sigs[] = { skcms_Signature_A2B0, skcms_Signature_A2B1 };
+ for (int i = 0; i < ARRAY_COUNT(sigs); i++) {
+ if (skcms_GetTagBySignature(profile, sigs[i], &a2b_tag)) {
+ if (!read_a2b(&a2b_tag, &profile->A2B, pcs_is_xyz)) {
+ // Malformed A2B tag
+ return false;
+ }
+ profile->has_A2B = true;
+ break;
+ }
+ }
+
+ return usable_as_src(profile);
+}
+
+
+const skcms_ICCProfile* skcms_sRGB_profile() {
+ static const skcms_ICCProfile sRGB_profile = {
+ nullptr, // buffer, moot here
+
+ 0, // size, moot here
+ skcms_Signature_RGB, // data_color_space
+ skcms_Signature_XYZ, // pcs
+ 0, // tag count, moot here
+
+ // We choose to represent sRGB with its canonical transfer function,
+ // and with its canonical XYZD50 gamut matrix.
+ true, // has_trc, followed by the 3 trc curves
+ {
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ },
+
+ true, // has_toXYZD50, followed by 3x3 toXYZD50 matrix
+ {{
+ { 0.436065674f, 0.385147095f, 0.143066406f },
+ { 0.222488403f, 0.716873169f, 0.060607910f },
+ { 0.013916016f, 0.097076416f, 0.714096069f },
+ }},
+
+ false, // has_A2B, followed by a2b itself which we don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+ };
+ return &sRGB_profile;
+}
+
+const skcms_ICCProfile* skcms_XYZD50_profile() {
+ // Just like sRGB above, but with identity transfer functions and toXYZD50 matrix.
+ static const skcms_ICCProfile XYZD50_profile = {
+ nullptr, // buffer, moot here
+
+ 0, // size, moot here
+ skcms_Signature_RGB, // data_color_space
+ skcms_Signature_XYZ, // pcs
+ 0, // tag count, moot here
+
+ true, // has_trc, followed by the 3 trc curves
+ {
+ {{0, {1,1, 0,0,0,0,0}}},
+ {{0, {1,1, 0,0,0,0,0}}},
+ {{0, {1,1, 0,0,0,0,0}}},
+ },
+
+ true, // has_toXYZD50, followed by 3x3 toXYZD50 matrix
+ {{
+ { 1,0,0 },
+ { 0,1,0 },
+ { 0,0,1 },
+ }},
+
+ false, // has_A2B, followed by a2b itself which we don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+ };
+
+ return &XYZD50_profile;
+}
+
+const skcms_TransferFunction* skcms_sRGB_TransferFunction() {
+ return &skcms_sRGB_profile()->trc[0].parametric;
+}
+
+const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction() {
+ static const skcms_TransferFunction sRGB_inv =
+ {0.416666657f, 1.137283325f, -0.0f, 12.920000076f, 0.003130805f, -0.054969788f, -0.0f};
+ return &sRGB_inv;
+}
+
+const skcms_TransferFunction* skcms_Identity_TransferFunction() {
+ static const skcms_TransferFunction identity = {1,1,0,0,0,0,0};
+ return &identity;
+}
+
+const uint8_t skcms_252_random_bytes[] = {
+ 8, 179, 128, 204, 253, 38, 134, 184, 68, 102, 32, 138, 99, 39, 169, 215,
+ 119, 26, 3, 223, 95, 239, 52, 132, 114, 74, 81, 234, 97, 116, 244, 205, 30,
+ 154, 173, 12, 51, 159, 122, 153, 61, 226, 236, 178, 229, 55, 181, 220, 191,
+ 194, 160, 126, 168, 82, 131, 18, 180, 245, 163, 22, 246, 69, 235, 252, 57,
+ 108, 14, 6, 152, 240, 255, 171, 242, 20, 227, 177, 238, 96, 85, 16, 211,
+ 70, 200, 149, 155, 146, 127, 145, 100, 151, 109, 19, 165, 208, 195, 164,
+ 137, 254, 182, 248, 64, 201, 45, 209, 5, 147, 207, 210, 113, 162, 83, 225,
+ 9, 31, 15, 231, 115, 37, 58, 53, 24, 49, 197, 56, 120, 172, 48, 21, 214,
+ 129, 111, 11, 50, 187, 196, 34, 60, 103, 71, 144, 47, 203, 77, 80, 232,
+ 140, 222, 250, 206, 166, 247, 139, 249, 221, 72, 106, 27, 199, 117, 54,
+ 219, 135, 118, 40, 79, 41, 251, 46, 93, 212, 92, 233, 148, 28, 121, 63,
+ 123, 158, 105, 59, 29, 42, 143, 23, 0, 107, 176, 87, 104, 183, 156, 193,
+ 189, 90, 188, 65, 190, 17, 198, 7, 186, 161, 1, 124, 78, 125, 170, 133,
+ 174, 218, 67, 157, 75, 101, 89, 217, 62, 33, 141, 228, 25, 35, 91, 230, 4,
+ 2, 13, 73, 86, 167, 237, 84, 243, 44, 185, 66, 130, 110, 150, 142, 216, 88,
+ 112, 36, 224, 136, 202, 76, 94, 98, 175, 213
+};
+
+bool skcms_ApproximatelyEqualProfiles(const skcms_ICCProfile* A, const skcms_ICCProfile* B) {
+ // Test for exactly equal profiles first.
+ if (A == B || 0 == memcmp(A,B, sizeof(skcms_ICCProfile))) {
+ return true;
+ }
+
+ // For now this is the essentially the same strategy we use in test_only.c
+ // for our skcms_Transform() smoke tests:
+ // 1) transform A to XYZD50
+ // 2) transform B to XYZD50
+ // 3) return true if they're similar enough
+ // Our current criterion in 3) is maximum 1 bit error per XYZD50 byte.
+
+ // skcms_252_random_bytes are 252 of a random shuffle of all possible bytes.
+ // 252 is evenly divisible by 3 and 4. Only 192, 10, 241, and 43 are missing.
+
+ if (A->data_color_space != B->data_color_space) {
+ return false;
+ }
+
+ // Interpret as RGB_888 if data color space is RGB or GRAY, RGBA_8888 if CMYK.
+ // TODO: working with RGBA_8888 either way is probably fastest.
+ skcms_PixelFormat fmt = skcms_PixelFormat_RGB_888;
+ size_t npixels = 84;
+ if (A->data_color_space == skcms_Signature_CMYK) {
+ fmt = skcms_PixelFormat_RGBA_8888;
+ npixels = 63;
+ }
+
+ // TODO: if A or B is a known profile (skcms_sRGB_profile, skcms_XYZD50_profile),
+ // use pre-canned results and skip that skcms_Transform() call?
+ uint8_t dstA[252],
+ dstB[252];
+ if (!skcms_Transform(
+ skcms_252_random_bytes, fmt, skcms_AlphaFormat_Unpremul, A,
+ dstA, skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul, skcms_XYZD50_profile(),
+ npixels)) {
+ return false;
+ }
+ if (!skcms_Transform(
+ skcms_252_random_bytes, fmt, skcms_AlphaFormat_Unpremul, B,
+ dstB, skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul, skcms_XYZD50_profile(),
+ npixels)) {
+ return false;
+ }
+
+ // TODO: make sure this final check has reasonable codegen.
+ for (size_t i = 0; i < 252; i++) {
+ if (abs((int)dstA[i] - (int)dstB[i]) > 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool skcms_TRCs_AreApproximateInverse(const skcms_ICCProfile* profile,
+ const skcms_TransferFunction* inv_tf) {
+ if (!profile || !profile->has_trc) {
+ return false;
+ }
+
+ return skcms_AreApproximateInverses(&profile->trc[0], inv_tf) &&
+ skcms_AreApproximateInverses(&profile->trc[1], inv_tf) &&
+ skcms_AreApproximateInverses(&profile->trc[2], inv_tf);
+}
+
+static bool is_zero_to_one(float x) {
+ return 0 <= x && x <= 1;
+}
+
+typedef struct { float vals[3]; } skcms_Vector3;
+
+static skcms_Vector3 mv_mul(const skcms_Matrix3x3* m, const skcms_Vector3* v) {
+ skcms_Vector3 dst = {{0,0,0}};
+ for (int row = 0; row < 3; ++row) {
+ dst.vals[row] = m->vals[row][0] * v->vals[0]
+ + m->vals[row][1] * v->vals[1]
+ + m->vals[row][2] * v->vals[2];
+ }
+ return dst;
+}
+
+bool skcms_PrimariesToXYZD50(float rx, float ry,
+ float gx, float gy,
+ float bx, float by,
+ float wx, float wy,
+ skcms_Matrix3x3* toXYZD50) {
+ if (!is_zero_to_one(rx) || !is_zero_to_one(ry) ||
+ !is_zero_to_one(gx) || !is_zero_to_one(gy) ||
+ !is_zero_to_one(bx) || !is_zero_to_one(by) ||
+ !is_zero_to_one(wx) || !is_zero_to_one(wy) ||
+ !toXYZD50) {
+ return false;
+ }
+
+ // First, we need to convert xy values (primaries) to XYZ.
+ skcms_Matrix3x3 primaries = {{
+ { rx, gx, bx },
+ { ry, gy, by },
+ { 1 - rx - ry, 1 - gx - gy, 1 - bx - by },
+ }};
+ skcms_Matrix3x3 primaries_inv;
+ if (!skcms_Matrix3x3_invert(&primaries, &primaries_inv)) {
+ return false;
+ }
+
+ // Assumes that Y is 1.0f.
+ skcms_Vector3 wXYZ = { { wx / wy, 1, (1 - wx - wy) / wy } };
+ skcms_Vector3 XYZ = mv_mul(&primaries_inv, &wXYZ);
+
+ skcms_Matrix3x3 toXYZ = {{
+ { XYZ.vals[0], 0, 0 },
+ { 0, XYZ.vals[1], 0 },
+ { 0, 0, XYZ.vals[2] },
+ }};
+ toXYZ = skcms_Matrix3x3_concat(&primaries, &toXYZ);
+
+ // Now convert toXYZ matrix to toXYZD50.
+ skcms_Vector3 wXYZD50 = { { 0.96422f, 1.0f, 0.82521f } };
+
+ // Calculate the chromatic adaptation matrix. We will use the Bradford method, thus
+ // the matrices below. The Bradford method is used by Adobe and is widely considered
+ // to be the best.
+ skcms_Matrix3x3 xyz_to_lms = {{
+ { 0.8951f, 0.2664f, -0.1614f },
+ { -0.7502f, 1.7135f, 0.0367f },
+ { 0.0389f, -0.0685f, 1.0296f },
+ }};
+ skcms_Matrix3x3 lms_to_xyz = {{
+ { 0.9869929f, -0.1470543f, 0.1599627f },
+ { 0.4323053f, 0.5183603f, 0.0492912f },
+ { -0.0085287f, 0.0400428f, 0.9684867f },
+ }};
+
+ skcms_Vector3 srcCone = mv_mul(&xyz_to_lms, &wXYZ);
+ skcms_Vector3 dstCone = mv_mul(&xyz_to_lms, &wXYZD50);
+
+ skcms_Matrix3x3 DXtoD50 = {{
+ { dstCone.vals[0] / srcCone.vals[0], 0, 0 },
+ { 0, dstCone.vals[1] / srcCone.vals[1], 0 },
+ { 0, 0, dstCone.vals[2] / srcCone.vals[2] },
+ }};
+ DXtoD50 = skcms_Matrix3x3_concat(&DXtoD50, &xyz_to_lms);
+ DXtoD50 = skcms_Matrix3x3_concat(&lms_to_xyz, &DXtoD50);
+
+ *toXYZD50 = skcms_Matrix3x3_concat(&DXtoD50, &toXYZ);
+ return true;
+}
+
+
+bool skcms_Matrix3x3_invert(const skcms_Matrix3x3* src, skcms_Matrix3x3* dst) {
+ double a00 = src->vals[0][0],
+ a01 = src->vals[1][0],
+ a02 = src->vals[2][0],
+ a10 = src->vals[0][1],
+ a11 = src->vals[1][1],
+ a12 = src->vals[2][1],
+ a20 = src->vals[0][2],
+ a21 = src->vals[1][2],
+ a22 = src->vals[2][2];
+
+ double b0 = a00*a11 - a01*a10,
+ b1 = a00*a12 - a02*a10,
+ b2 = a01*a12 - a02*a11,
+ b3 = a20,
+ b4 = a21,
+ b5 = a22;
+
+ double determinant = b0*b5
+ - b1*b4
+ + b2*b3;
+
+ if (determinant == 0) {
+ return false;
+ }
+
+ double invdet = 1.0 / determinant;
+ if (invdet > +FLT_MAX || invdet < -FLT_MAX || !isfinitef_((float)invdet)) {
+ return false;
+ }
+
+ b0 *= invdet;
+ b1 *= invdet;
+ b2 *= invdet;
+ b3 *= invdet;
+ b4 *= invdet;
+ b5 *= invdet;
+
+ dst->vals[0][0] = (float)( a11*b5 - a12*b4 );
+ dst->vals[1][0] = (float)( a02*b4 - a01*b5 );
+ dst->vals[2][0] = (float)( + b2 );
+ dst->vals[0][1] = (float)( a12*b3 - a10*b5 );
+ dst->vals[1][1] = (float)( a00*b5 - a02*b3 );
+ dst->vals[2][1] = (float)( - b1 );
+ dst->vals[0][2] = (float)( a10*b4 - a11*b3 );
+ dst->vals[1][2] = (float)( a01*b3 - a00*b4 );
+ dst->vals[2][2] = (float)( + b0 );
+
+ for (int r = 0; r < 3; ++r)
+ for (int c = 0; c < 3; ++c) {
+ if (!isfinitef_(dst->vals[r][c])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+skcms_Matrix3x3 skcms_Matrix3x3_concat(const skcms_Matrix3x3* A, const skcms_Matrix3x3* B) {
+ skcms_Matrix3x3 m = { { { 0,0,0 },{ 0,0,0 },{ 0,0,0 } } };
+ for (int r = 0; r < 3; r++)
+ for (int c = 0; c < 3; c++) {
+ m.vals[r][c] = A->vals[r][0] * B->vals[0][c]
+ + A->vals[r][1] * B->vals[1][c]
+ + A->vals[r][2] * B->vals[2][c];
+ }
+ return m;
+}
+
+#if defined(__clang__)
+ [[clang::no_sanitize("float-divide-by-zero")]] // Checked for by classify() on the way out.
+#endif
+bool skcms_TransferFunction_invert(const skcms_TransferFunction* src, skcms_TransferFunction* dst) {
+ TF_PQish pq;
+ TF_HLGish hlg;
+ switch (classify(*src, &pq, &hlg)) {
+ case Bad: return false;
+ case sRGBish: break; // handled below
+
+ case PQish:
+ *dst = { TFKind_marker(PQish), -pq.A, pq.D, 1.0f/pq.F
+ , pq.B, -pq.E, 1.0f/pq.C};
+ return true;
+
+ case HLGish:
+ *dst = { TFKind_marker(HLGinvish), 1.0f/hlg.R, 1.0f/hlg.G
+ , 1.0f/hlg.a, hlg.b, hlg.c, 0 };
+ return true;
+
+ case HLGinvish:
+ *dst = { TFKind_marker(HLGish), 1.0f/hlg.R, 1.0f/hlg.G
+ , 1.0f/hlg.a, hlg.b, hlg.c, 0 };
+ return true;
+ }
+
+ assert (classify(*src) == sRGBish);
+
+ // We're inverting this function, solving for x in terms of y.
+ // y = (cx + f) x < d
+ // (ax + b)^g + e x ≥ d
+ // The inverse of this function can be expressed in the same piecewise form.
+ skcms_TransferFunction inv = {0,0,0,0,0,0,0};
+
+ // We'll start by finding the new threshold inv.d.
+ // In principle we should be able to find that by solving for y at x=d from either side.
+ // (If those two d values aren't the same, it's a discontinuous transfer function.)
+ float d_l = src->c * src->d + src->f,
+ d_r = powf_(src->a * src->d + src->b, src->g) + src->e;
+ if (fabsf_(d_l - d_r) > 1/512.0f) {
+ return false;
+ }
+ inv.d = d_l; // TODO(mtklein): better in practice to choose d_r?
+
+ // When d=0, the linear section collapses to a point. We leave c,d,f all zero in that case.
+ if (inv.d > 0) {
+ // Inverting the linear section is pretty straightfoward:
+ // y = cx + f
+ // y - f = cx
+ // (1/c)y - f/c = x
+ inv.c = 1.0f/src->c;
+ inv.f = -src->f/src->c;
+ }
+
+ // The interesting part is inverting the nonlinear section:
+ // y = (ax + b)^g + e.
+ // y - e = (ax + b)^g
+ // (y - e)^1/g = ax + b
+ // (y - e)^1/g - b = ax
+ // (1/a)(y - e)^1/g - b/a = x
+ //
+ // To make that fit our form, we need to move the (1/a) term inside the exponentiation:
+ // let k = (1/a)^g
+ // (1/a)( y - e)^1/g - b/a = x
+ // (ky - ke)^1/g - b/a = x
+
+ float k = powf_(src->a, -src->g); // (1/a)^g == a^-g
+ inv.g = 1.0f / src->g;
+ inv.a = k;
+ inv.b = -k * src->e;
+ inv.e = -src->b / src->a;
+
+ // We need to enforce the same constraints here that we do when fitting a curve,
+ // a >= 0 and ad+b >= 0. These constraints are checked by classify(), so they're true
+ // of the source function if we're here.
+
+ // Just like when fitting the curve, there's really no way to rescue a < 0.
+ if (inv.a < 0) {
+ return false;
+ }
+ // On the other hand we can rescue an ad+b that's gone slightly negative here.
+ if (inv.a * inv.d + inv.b < 0) {
+ inv.b = -inv.a * inv.d;
+ }
+
+ // That should usually make classify(inv) == sRGBish true, but there are a couple situations
+ // where we might still fail here, like non-finite parameter values.
+ if (classify(inv) != sRGBish) {
+ return false;
+ }
+
+ assert (inv.a >= 0);
+ assert (inv.a * inv.d + inv.b >= 0);
+
+ // Now in principle we're done.
+ // But to preserve the valuable invariant inv(src(1.0f)) == 1.0f, we'll tweak
+ // e or f of the inverse, depending on which segment contains src(1.0f).
+ float s = skcms_TransferFunction_eval(src, 1.0f);
+ if (!isfinitef_(s)) {
+ return false;
+ }
+
+ float sign = s < 0 ? -1.0f : 1.0f;
+ s *= sign;
+ if (s < inv.d) {
+ inv.f = 1.0f - sign * inv.c * s;
+ } else {
+ inv.e = 1.0f - sign * powf_(inv.a * s + inv.b, inv.g);
+ }
+
+ *dst = inv;
+ return classify(*dst) == sRGBish;
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
+
+// From here below we're approximating an skcms_Curve with an skcms_TransferFunction{g,a,b,c,d,e,f}:
+//
+// tf(x) = cx + f x < d
+// tf(x) = (ax + b)^g + e x ≥ d
+//
+// When fitting, we add the additional constraint that both pieces meet at d:
+//
+// cd + f = (ad + b)^g + e
+//
+// Solving for e and folding it through gives an alternate formulation of the non-linear piece:
+//
+// tf(x) = cx + f x < d
+// tf(x) = (ax + b)^g - (ad + b)^g + cd + f x ≥ d
+//
+// Our overall strategy is then:
+// For a couple tolerances,
+// - fit_linear(): fit c,d,f iteratively to as many points as our tolerance allows
+// - invert c,d,f
+// - fit_nonlinear(): fit g,a,b using Gauss-Newton given those inverted c,d,f
+// (and by constraint, inverted e) to the inverse of the table.
+// Return the parameters with least maximum error.
+//
+// To run Gauss-Newton to find g,a,b, we'll also need the gradient of the residuals
+// of round-trip f_inv(x), the inverse of the non-linear piece of f(x).
+//
+// let y = Table(x)
+// r(x) = x - f_inv(y)
+//
+// ∂r/∂g = ln(ay + b)*(ay + b)^g
+// - ln(ad + b)*(ad + b)^g
+// ∂r/∂a = yg(ay + b)^(g-1)
+// - dg(ad + b)^(g-1)
+// ∂r/∂b = g(ay + b)^(g-1)
+// - g(ad + b)^(g-1)
+
+// Return the residual of roundtripping skcms_Curve(x) through f_inv(y) with parameters P,
+// and fill out the gradient of the residual into dfdP.
+static float rg_nonlinear(float x,
+ const skcms_Curve* curve,
+ const skcms_TransferFunction* tf,
+ const float P[3],
+ float dfdP[3]) {
+ const float y = eval_curve(curve, x);
+
+ const float g = P[0], a = P[1], b = P[2],
+ c = tf->c, d = tf->d, f = tf->f;
+
+ const float Y = fmaxf_(a*y + b, 0.0f),
+ D = a*d + b;
+ assert (D >= 0);
+
+ // The gradient.
+ dfdP[0] = logf_(Y)*powf_(Y, g)
+ - logf_(D)*powf_(D, g);
+ dfdP[1] = y*g*powf_(Y, g-1)
+ - d*g*powf_(D, g-1);
+ dfdP[2] = g*powf_(Y, g-1)
+ - g*powf_(D, g-1);
+
+ // The residual.
+ const float f_inv = powf_(Y, g)
+ - powf_(D, g)
+ + c*d + f;
+ return x - f_inv;
+}
+
+static bool gauss_newton_step(const skcms_Curve* curve,
+ const skcms_TransferFunction* tf,
+ float P[3],
+ float x0, float dx, int N) {
+ // We'll sample x from the range [x0,x1] (both inclusive) N times with even spacing.
+ //
+ // We want to do P' = P + (Jf^T Jf)^-1 Jf^T r(P),
+ // where r(P) is the residual vector
+ // and Jf is the Jacobian matrix of f(), ∂r/∂P.
+ //
+ // Let's review the shape of each of these expressions:
+ // r(P) is [N x 1], a column vector with one entry per value of x tested
+ // Jf is [N x 3], a matrix with an entry for each (x,P) pair
+ // Jf^T is [3 x N], the transpose of Jf
+ //
+ // Jf^T Jf is [3 x N] * [N x 3] == [3 x 3], a 3x3 matrix,
+ // and so is its inverse (Jf^T Jf)^-1
+ // Jf^T r(P) is [3 x N] * [N x 1] == [3 x 1], a column vector with the same shape as P
+ //
+ // Our implementation strategy to get to the final ∆P is
+ // 1) evaluate Jf^T Jf, call that lhs
+ // 2) evaluate Jf^T r(P), call that rhs
+ // 3) invert lhs
+ // 4) multiply inverse lhs by rhs
+ //
+ // This is a friendly implementation strategy because we don't have to have any
+ // buffers that scale with N, and equally nice don't have to perform any matrix
+ // operations that are variable size.
+ //
+ // Other implementation strategies could trade this off, e.g. evaluating the
+ // pseudoinverse of Jf ( (Jf^T Jf)^-1 Jf^T ) directly, then multiplying that by
+ // the residuals. That would probably require implementing singular value
+ // decomposition, and would create a [3 x N] matrix to be multiplied by the
+ // [N x 1] residual vector, but on the upside I think that'd eliminate the
+ // possibility of this gauss_newton_step() function ever failing.
+
+ // 0) start off with lhs and rhs safely zeroed.
+ skcms_Matrix3x3 lhs = {{ {0,0,0}, {0,0,0}, {0,0,0} }};
+ skcms_Vector3 rhs = { {0,0,0} };
+
+ // 1,2) evaluate lhs and evaluate rhs
+ // We want to evaluate Jf only once, but both lhs and rhs involve Jf^T,
+ // so we'll have to update lhs and rhs at the same time.
+ for (int i = 0; i < N; i++) {
+ float x = x0 + i*dx;
+
+ float dfdP[3] = {0,0,0};
+ float resid = rg_nonlinear(x,curve,tf,P, dfdP);
+
+ for (int r = 0; r < 3; r++) {
+ for (int c = 0; c < 3; c++) {
+ lhs.vals[r][c] += dfdP[r] * dfdP[c];
+ }
+ rhs.vals[r] += dfdP[r] * resid;
+ }
+ }
+
+ // If any of the 3 P parameters are unused, this matrix will be singular.
+ // Detect those cases and fix them up to indentity instead, so we can invert.
+ for (int k = 0; k < 3; k++) {
+ if (lhs.vals[0][k]==0 && lhs.vals[1][k]==0 && lhs.vals[2][k]==0 &&
+ lhs.vals[k][0]==0 && lhs.vals[k][1]==0 && lhs.vals[k][2]==0) {
+ lhs.vals[k][k] = 1;
+ }
+ }
+
+ // 3) invert lhs
+ skcms_Matrix3x3 lhs_inv;
+ if (!skcms_Matrix3x3_invert(&lhs, &lhs_inv)) {
+ return false;
+ }
+
+ // 4) multiply inverse lhs by rhs
+ skcms_Vector3 dP = mv_mul(&lhs_inv, &rhs);
+ P[0] += dP.vals[0];
+ P[1] += dP.vals[1];
+ P[2] += dP.vals[2];
+ return isfinitef_(P[0]) && isfinitef_(P[1]) && isfinitef_(P[2]);
+}
+
+
+// Fit the points in [L,N) to the non-linear piece of tf, or return false if we can't.
+static bool fit_nonlinear(const skcms_Curve* curve, int L, int N, skcms_TransferFunction* tf) {
+ float P[3] = { tf->g, tf->a, tf->b };
+
+ // No matter where we start, dx should always represent N even steps from 0 to 1.
+ const float dx = 1.0f / (N-1);
+
+ // As far as we can tell, 1 Gauss-Newton step won't converge, and 3 steps is no better than 2.
+ for (int j = 0; j < 2; j++) {
+ // These extra constraints a >= 0 and ad+b >= 0 are not modeled in the optimization.
+ // We don't really know how to fix up a if it goes negative.
+ if (P[1] < 0) {
+ return false;
+ }
+ // If ad+b goes negative, we feel just barely not uneasy enough to tweak b so ad+b is zero.
+ if (P[1] * tf->d + P[2] < 0) {
+ P[2] = -P[1] * tf->d;
+ }
+ assert (P[1] >= 0 &&
+ P[1] * tf->d + P[2] >= 0);
+
+ if (!gauss_newton_step(curve, tf,
+ P,
+ L*dx, dx, N-L)) {
+ return false;
+ }
+ }
+
+ // We need to apply our fixups one last time
+ if (P[1] < 0) {
+ return false;
+ }
+ if (P[1] * tf->d + P[2] < 0) {
+ P[2] = -P[1] * tf->d;
+ }
+
+ assert (P[1] >= 0 &&
+ P[1] * tf->d + P[2] >= 0);
+
+ tf->g = P[0];
+ tf->a = P[1];
+ tf->b = P[2];
+ tf->e = tf->c*tf->d + tf->f
+ - powf_(tf->a*tf->d + tf->b, tf->g);
+ return true;
+}
+
+bool skcms_ApproximateCurve(const skcms_Curve* curve,
+ skcms_TransferFunction* approx,
+ float* max_error) {
+ if (!curve || !approx || !max_error) {
+ return false;
+ }
+
+ if (curve->table_entries == 0) {
+ // No point approximating an skcms_TransferFunction with an skcms_TransferFunction!
+ return false;
+ }
+
+ if (curve->table_entries == 1 || curve->table_entries > (uint32_t)INT_MAX) {
+ // We need at least two points, and must put some reasonable cap on the maximum number.
+ return false;
+ }
+
+ int N = (int)curve->table_entries;
+ const float dx = 1.0f / (N - 1);
+
+ *max_error = INFINITY_;
+ const float kTolerances[] = { 1.5f / 65535.0f, 1.0f / 512.0f };
+ for (int t = 0; t < ARRAY_COUNT(kTolerances); t++) {
+ skcms_TransferFunction tf,
+ tf_inv;
+
+ // It's problematic to fit curves with non-zero f, so always force it to zero explicitly.
+ tf.f = 0.0f;
+ int L = fit_linear(curve, N, kTolerances[t], &tf.c, &tf.d);
+
+ if (L == N) {
+ // If the entire data set was linear, move the coefficients to the nonlinear portion
+ // with G == 1. This lets use a canonical representation with d == 0.
+ tf.g = 1;
+ tf.a = tf.c;
+ tf.b = tf.f;
+ tf.c = tf.d = tf.e = tf.f = 0;
+ } else if (L == N - 1) {
+ // Degenerate case with only two points in the nonlinear segment. Solve directly.
+ tf.g = 1;
+ tf.a = (eval_curve(curve, (N-1)*dx) -
+ eval_curve(curve, (N-2)*dx))
+ / dx;
+ tf.b = eval_curve(curve, (N-2)*dx)
+ - tf.a * (N-2)*dx;
+ tf.e = 0;
+ } else {
+ // Start by guessing a gamma-only curve through the midpoint.
+ int mid = (L + N) / 2;
+ float mid_x = mid / (N - 1.0f);
+ float mid_y = eval_curve(curve, mid_x);
+ tf.g = log2f_(mid_y) / log2f_(mid_x);
+ tf.a = 1;
+ tf.b = 0;
+ tf.e = tf.c*tf.d + tf.f
+ - powf_(tf.a*tf.d + tf.b, tf.g);
+
+
+ if (!skcms_TransferFunction_invert(&tf, &tf_inv) ||
+ !fit_nonlinear(curve, L,N, &tf_inv)) {
+ continue;
+ }
+
+ // We fit tf_inv, so calculate tf to keep in sync.
+ if (!skcms_TransferFunction_invert(&tf_inv, &tf)) {
+ continue;
+ }
+ }
+
+ // We'd better have a sane, sRGB-ish TF by now.
+ // Other non-Bad TFs would be fine, but we know we've only ever tried to fit sRGBish;
+ // anything else is just some accident of math and the way we pun tf.g as a type flag.
+ if (sRGBish != classify(tf)) {
+ continue;
+ }
+
+ // We find our error by roundtripping the table through tf_inv.
+ //
+ // (The most likely use case for this approximation is to be inverted and
+ // used as the transfer function for a destination color space.)
+ //
+ // We've kept tf and tf_inv in sync above, but we can't guarantee that tf is
+ // invertible, so re-verify that here (and use the new inverse for testing).
+ if (!skcms_TransferFunction_invert(&tf, &tf_inv)) {
+ continue;
+ }
+
+ float err = max_roundtrip_error(curve, &tf_inv);
+ if (*max_error > err) {
+ *max_error = err;
+ *approx = tf;
+ }
+ }
+ return isfinitef_(*max_error);
+}
+
+// ~~~~ Impl. of skcms_Transform() ~~~~
+
+typedef enum {
+ Op_load_a8,
+ Op_load_g8,
+ Op_load_8888_palette8,
+ Op_load_4444,
+ Op_load_565,
+ Op_load_888,
+ Op_load_8888,
+ Op_load_1010102,
+ Op_load_161616LE,
+ Op_load_16161616LE,
+ Op_load_161616BE,
+ Op_load_16161616BE,
+ Op_load_hhh,
+ Op_load_hhhh,
+ Op_load_fff,
+ Op_load_ffff,
+
+ Op_swap_rb,
+ Op_clamp,
+ Op_invert,
+ Op_force_opaque,
+ Op_premul,
+ Op_unpremul,
+ Op_matrix_3x3,
+ Op_matrix_3x4,
+ Op_lab_to_xyz,
+
+ Op_tf_r,
+ Op_tf_g,
+ Op_tf_b,
+ Op_tf_a,
+
+ Op_pq_r,
+ Op_pq_g,
+ Op_pq_b,
+ Op_pq_a,
+
+ Op_hlg_r,
+ Op_hlg_g,
+ Op_hlg_b,
+ Op_hlg_a,
+
+ Op_hlginv_r,
+ Op_hlginv_g,
+ Op_hlginv_b,
+ Op_hlginv_a,
+
+ Op_table_r,
+ Op_table_g,
+ Op_table_b,
+ Op_table_a,
+
+ Op_clut,
+
+ Op_store_a8,
+ Op_store_g8,
+ Op_store_4444,
+ Op_store_565,
+ Op_store_888,
+ Op_store_8888,
+ Op_store_1010102,
+ Op_store_161616LE,
+ Op_store_16161616LE,
+ Op_store_161616BE,
+ Op_store_16161616BE,
+ Op_store_hhh,
+ Op_store_hhhh,
+ Op_store_fff,
+ Op_store_ffff,
+} Op;
+
+#if defined(__clang__)
+ template <int N, typename T> using Vec = T __attribute__((ext_vector_type(N)));
+#elif defined(__GNUC__)
+ // For some reason GCC accepts this nonsense, but not the more straightforward version,
+ // template <int N, typename T> using Vec = T __attribute__((vector_size(N*sizeof(T))));
+ template <int N, typename T>
+ struct VecHelper { typedef T __attribute__((vector_size(N*sizeof(T)))) V; };
+
+ template <int N, typename T> using Vec = typename VecHelper<N,T>::V;
+#endif
+
+// First, instantiate our default exec_ops() implementation using the default compiliation target.
+
+namespace baseline {
+#if defined(SKCMS_PORTABLE) || !(defined(__clang__) || defined(__GNUC__)) \
+ || (defined(__EMSCRIPTEN_major__) && !defined(__wasm_simd128__))
+ #define N 1
+ template <typename T> using V = T;
+ using Color = float;
+#elif defined(__AVX512F__)
+ #define N 16
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#elif defined(__AVX__)
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(SKCMS_OPT_INTO_NEON_FP16)
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = _Float16;
+#else
+ #define N 4
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#endif
+
+ #include "src/Transform_inl.h"
+ #undef N
+}
+
+// Now, instantiate any other versions of run_program() we may want for runtime detection.
+#if !defined(SKCMS_PORTABLE) && \
+ !defined(SKCMS_NO_RUNTIME_CPU_DETECTION) && \
+ (( defined(__clang__) && __clang_major__ >= 5) || \
+ (!defined(__clang__) && defined(__GNUC__))) \
+ && defined(__x86_64__)
+
+ #if !defined(__AVX2__)
+ #if defined(__clang__)
+ #pragma clang attribute push(__attribute__((target("avx2,f16c"))), apply_to=function)
+ #elif defined(__GNUC__)
+ #pragma GCC push_options
+ #pragma GCC target("avx2,f16c")
+ #endif
+
+ namespace hsw {
+ #define USING_AVX
+ #define USING_AVX_F16C
+ #define USING_AVX2
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+
+ #include "src/Transform_inl.h"
+
+ // src/Transform_inl.h will undefine USING_* for us.
+ #undef N
+ }
+
+ #if defined(__clang__)
+ #pragma clang attribute pop
+ #elif defined(__GNUC__)
+ #pragma GCC pop_options
+ #endif
+
+ #define TEST_FOR_HSW
+ #endif
+
+ #if !defined(__AVX512F__)
+ #if defined(__clang__)
+ #pragma clang attribute push(__attribute__((target("avx512f,avx512dq,avx512cd,avx512bw,avx512vl"))), apply_to=function)
+ #elif defined(__GNUC__)
+ #pragma GCC push_options
+ #pragma GCC target("avx512f,avx512dq,avx512cd,avx512bw,avx512vl")
+ #endif
+
+ namespace skx {
+ #define USING_AVX512F
+ #define N 16
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+
+ #include "src/Transform_inl.h"
+
+ // src/Transform_inl.h will undefine USING_* for us.
+ #undef N
+ }
+
+ #if defined(__clang__)
+ #pragma clang attribute pop
+ #elif defined(__GNUC__)
+ #pragma GCC pop_options
+ #endif
+
+ #define TEST_FOR_SKX
+ #endif
+
+ #if defined(TEST_FOR_HSW) || defined(TEST_FOR_SKX)
+ enum class CpuType { None, HSW, SKX };
+ static CpuType cpu_type() {
+ static const CpuType type = []{
+ // See http://www.sandpile.org/x86/cpuid.htm
+
+ // First, a basic cpuid(1) lets us check prerequisites for HSW, SKX.
+ uint32_t eax, ebx, ecx, edx;
+ __asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ : "0"(1), "2"(0));
+ if ((edx & (1u<<25)) && // SSE
+ (edx & (1u<<26)) && // SSE2
+ (ecx & (1u<< 0)) && // SSE3
+ (ecx & (1u<< 9)) && // SSSE3
+ (ecx & (1u<<12)) && // FMA (N.B. not used, avoided even)
+ (ecx & (1u<<19)) && // SSE4.1
+ (ecx & (1u<<20)) && // SSE4.2
+ (ecx & (1u<<26)) && // XSAVE
+ (ecx & (1u<<27)) && // OSXSAVE
+ (ecx & (1u<<28)) && // AVX
+ (ecx & (1u<<29))) { // F16C
+
+ // Call cpuid(7) to check for AVX2 and AVX-512 bits.
+ __asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ : "0"(7), "2"(0));
+ // eax from xgetbv(0) will tell us whether XMM, YMM, and ZMM state is saved.
+ uint32_t xcr0, dont_need_edx;
+ __asm__ __volatile__("xgetbv" : "=a"(xcr0), "=d"(dont_need_edx) : "c"(0));
+
+ if ((xcr0 & (1u<<1)) && // XMM register state saved?
+ (xcr0 & (1u<<2)) && // YMM register state saved?
+ (ebx & (1u<<5))) { // AVX2
+ // At this point we're at least HSW. Continue checking for SKX.
+ if ((xcr0 & (1u<< 5)) && // Opmasks state saved?
+ (xcr0 & (1u<< 6)) && // First 16 ZMM registers saved?
+ (xcr0 & (1u<< 7)) && // High 16 ZMM registers saved?
+ (ebx & (1u<<16)) && // AVX512F
+ (ebx & (1u<<17)) && // AVX512DQ
+ (ebx & (1u<<28)) && // AVX512CD
+ (ebx & (1u<<30)) && // AVX512BW
+ (ebx & (1u<<31))) { // AVX512VL
+ return CpuType::SKX;
+ }
+ return CpuType::HSW;
+ }
+ }
+ return CpuType::None;
+ }();
+ return type;
+ }
+ #endif
+
+#endif
+
+typedef struct {
+ Op op;
+ const void* arg;
+} OpAndArg;
+
+static OpAndArg select_curve_op(const skcms_Curve* curve, int channel) {
+ static const struct { Op sRGBish, PQish, HLGish, HLGinvish, table; } ops[] = {
+ { Op_tf_r, Op_pq_r, Op_hlg_r, Op_hlginv_r, Op_table_r },
+ { Op_tf_g, Op_pq_g, Op_hlg_g, Op_hlginv_g, Op_table_g },
+ { Op_tf_b, Op_pq_b, Op_hlg_b, Op_hlginv_b, Op_table_b },
+ { Op_tf_a, Op_pq_a, Op_hlg_a, Op_hlginv_a, Op_table_a },
+ };
+ const auto& op = ops[channel];
+
+ if (curve->table_entries == 0) {
+ const OpAndArg noop = { Op_load_a8/*doesn't matter*/, nullptr };
+
+ const skcms_TransferFunction& tf = curve->parametric;
+
+ if (tf.g == 1 && tf.a == 1 &&
+ tf.b == 0 && tf.c == 0 && tf.d == 0 && tf.e == 0 && tf.f == 0) {
+ return noop;
+ }
+
+ switch (classify(tf)) {
+ case Bad: return noop;
+ case sRGBish: return OpAndArg{op.sRGBish, &tf};
+ case PQish: return OpAndArg{op.PQish, &tf};
+ case HLGish: return OpAndArg{op.HLGish, &tf};
+ case HLGinvish: return OpAndArg{op.HLGinvish, &tf};
+ }
+ }
+ return OpAndArg{op.table, curve};
+}
+
+static size_t bytes_per_pixel(skcms_PixelFormat fmt) {
+ switch (fmt >> 1) { // ignore rgb/bgr
+ case skcms_PixelFormat_A_8 >> 1: return 1;
+ case skcms_PixelFormat_G_8 >> 1: return 1;
+ case skcms_PixelFormat_RGBA_8888_Palette8 >> 1: return 1;
+ case skcms_PixelFormat_ABGR_4444 >> 1: return 2;
+ case skcms_PixelFormat_RGB_565 >> 1: return 2;
+ case skcms_PixelFormat_RGB_888 >> 1: return 3;
+ case skcms_PixelFormat_RGBA_8888 >> 1: return 4;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: return 4;
+ case skcms_PixelFormat_RGB_161616LE >> 1: return 6;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: return 8;
+ case skcms_PixelFormat_RGB_161616BE >> 1: return 6;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: return 8;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: return 6;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: return 8;
+ case skcms_PixelFormat_RGB_hhh >> 1: return 6;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: return 8;
+ case skcms_PixelFormat_RGB_fff >> 1: return 12;
+ case skcms_PixelFormat_RGBA_ffff >> 1: return 16;
+ }
+ assert(false);
+ return 0;
+}
+
+static bool prep_for_destination(const skcms_ICCProfile* profile,
+ skcms_Matrix3x3* fromXYZD50,
+ skcms_TransferFunction* invR,
+ skcms_TransferFunction* invG,
+ skcms_TransferFunction* invB) {
+ // We only support destinations with parametric transfer functions
+ // and with gamuts that can be transformed from XYZD50.
+ return profile->has_trc
+ && profile->has_toXYZD50
+ && profile->trc[0].table_entries == 0
+ && profile->trc[1].table_entries == 0
+ && profile->trc[2].table_entries == 0
+ && skcms_TransferFunction_invert(&profile->trc[0].parametric, invR)
+ && skcms_TransferFunction_invert(&profile->trc[1].parametric, invG)
+ && skcms_TransferFunction_invert(&profile->trc[2].parametric, invB)
+ && skcms_Matrix3x3_invert(&profile->toXYZD50, fromXYZD50);
+}
+
+bool skcms_Transform(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels) {
+ return skcms_TransformWithPalette(src, srcFmt, srcAlpha, srcProfile,
+ dst, dstFmt, dstAlpha, dstProfile,
+ npixels, nullptr);
+}
+
+bool skcms_TransformWithPalette(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t nz,
+ const void* palette) {
+ const size_t dst_bpp = bytes_per_pixel(dstFmt),
+ src_bpp = bytes_per_pixel(srcFmt);
+ // Let's just refuse if the request is absurdly big.
+ if (nz * dst_bpp > INT_MAX || nz * src_bpp > INT_MAX) {
+ return false;
+ }
+ int n = (int)nz;
+
+ // Null profiles default to sRGB. Passing null for both is handy when doing format conversion.
+ if (!srcProfile) {
+ srcProfile = skcms_sRGB_profile();
+ }
+ if (!dstProfile) {
+ dstProfile = skcms_sRGB_profile();
+ }
+
+ // We can't transform in place unless the PixelFormats are the same size.
+ if (dst == src && dst_bpp != src_bpp) {
+ return false;
+ }
+ // TODO: more careful alias rejection (like, dst == src + 1)?
+
+ if (needs_palette(srcFmt) && !palette) {
+ return false;
+ }
+
+ Op program [32];
+ const void* arguments[32];
+
+ Op* ops = program;
+ const void** args = arguments;
+
+ // These are always parametric curves of some sort.
+ skcms_Curve dst_curves[3];
+ dst_curves[0].table_entries =
+ dst_curves[1].table_entries =
+ dst_curves[2].table_entries = 0;
+
+ skcms_Matrix3x3 from_xyz;
+
+ switch (srcFmt >> 1) {
+ default: return false;
+ case skcms_PixelFormat_A_8 >> 1: *ops++ = Op_load_a8; break;
+ case skcms_PixelFormat_G_8 >> 1: *ops++ = Op_load_g8; break;
+ case skcms_PixelFormat_ABGR_4444 >> 1: *ops++ = Op_load_4444; break;
+ case skcms_PixelFormat_RGB_565 >> 1: *ops++ = Op_load_565; break;
+ case skcms_PixelFormat_RGB_888 >> 1: *ops++ = Op_load_888; break;
+ case skcms_PixelFormat_RGBA_8888 >> 1: *ops++ = Op_load_8888; break;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: *ops++ = Op_load_1010102; break;
+ case skcms_PixelFormat_RGB_161616LE >> 1: *ops++ = Op_load_161616LE; break;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: *ops++ = Op_load_16161616LE; break;
+ case skcms_PixelFormat_RGB_161616BE >> 1: *ops++ = Op_load_161616BE; break;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: *ops++ = Op_load_16161616BE; break;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: *ops++ = Op_load_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: *ops++ = Op_load_hhhh; break;
+ case skcms_PixelFormat_RGB_hhh >> 1: *ops++ = Op_load_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: *ops++ = Op_load_hhhh; break;
+ case skcms_PixelFormat_RGB_fff >> 1: *ops++ = Op_load_fff; break;
+ case skcms_PixelFormat_RGBA_ffff >> 1: *ops++ = Op_load_ffff; break;
+
+ case skcms_PixelFormat_RGBA_8888_Palette8 >> 1: *ops++ = Op_load_8888_palette8;
+ *args++ = palette;
+ break;
+ }
+ if (srcFmt == skcms_PixelFormat_RGB_hhh_Norm ||
+ srcFmt == skcms_PixelFormat_RGBA_hhhh_Norm) {
+ *ops++ = Op_clamp;
+ }
+ if (srcFmt & 1) {
+ *ops++ = Op_swap_rb;
+ }
+ skcms_ICCProfile gray_dst_profile;
+ if ((dstFmt >> 1) == (skcms_PixelFormat_G_8 >> 1)) {
+ // When transforming to gray, stop at XYZ (by setting toXYZ to identity), then transform
+ // luminance (Y) by the destination transfer function.
+ gray_dst_profile = *dstProfile;
+ skcms_SetXYZD50(&gray_dst_profile, &skcms_XYZD50_profile()->toXYZD50);
+ dstProfile = &gray_dst_profile;
+ }
+
+ if (srcProfile->data_color_space == skcms_Signature_CMYK) {
+ // Photoshop creates CMYK images as inverse CMYK.
+ // These happen to be the only ones we've _ever_ seen.
+ *ops++ = Op_invert;
+ // With CMYK, ignore the alpha type, to avoid changing K or conflating CMY with K.
+ srcAlpha = skcms_AlphaFormat_Unpremul;
+ }
+
+ if (srcAlpha == skcms_AlphaFormat_Opaque) {
+ *ops++ = Op_force_opaque;
+ } else if (srcAlpha == skcms_AlphaFormat_PremulAsEncoded) {
+ *ops++ = Op_unpremul;
+ }
+
+ if (dstProfile != srcProfile) {
+
+ if (!prep_for_destination(dstProfile,
+ &from_xyz,
+ &dst_curves[0].parametric,
+ &dst_curves[1].parametric,
+ &dst_curves[2].parametric)) {
+ return false;
+ }
+
+ if (srcProfile->has_A2B) {
+ if (srcProfile->A2B.input_channels) {
+ for (int i = 0; i < (int)srcProfile->A2B.input_channels; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.input_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ *ops++ = Op_clamp;
+ *ops++ = Op_clut;
+ *args++ = &srcProfile->A2B;
+ }
+
+ if (srcProfile->A2B.matrix_channels == 3) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.matrix_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+
+ static const skcms_Matrix3x4 I = {{
+ {1,0,0,0},
+ {0,1,0,0},
+ {0,0,1,0},
+ }};
+ if (0 != memcmp(&I, &srcProfile->A2B.matrix, sizeof(I))) {
+ *ops++ = Op_matrix_3x4;
+ *args++ = &srcProfile->A2B.matrix;
+ }
+ }
+
+ if (srcProfile->A2B.output_channels == 3) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.output_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+
+ if (srcProfile->pcs == skcms_Signature_Lab) {
+ *ops++ = Op_lab_to_xyz;
+ }
+
+ } else if (srcProfile->has_trc && srcProfile->has_toXYZD50) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->trc[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ } else {
+ return false;
+ }
+
+ // A2B sources should already be in XYZD50 at this point.
+ // Others still need to be transformed using their toXYZD50 matrix.
+ // N.B. There are profiles that contain both A2B tags and toXYZD50 matrices.
+ // If we use the A2B tags, we need to ignore the XYZD50 matrix entirely.
+ assert (srcProfile->has_A2B || srcProfile->has_toXYZD50);
+ static const skcms_Matrix3x3 I = {{
+ { 1.0f, 0.0f, 0.0f },
+ { 0.0f, 1.0f, 0.0f },
+ { 0.0f, 0.0f, 1.0f },
+ }};
+ const skcms_Matrix3x3* to_xyz = srcProfile->has_A2B ? &I : &srcProfile->toXYZD50;
+
+ // There's a chance the source and destination gamuts are identical,
+ // in which case we can skip the gamut transform.
+ if (0 != memcmp(&dstProfile->toXYZD50, to_xyz, sizeof(skcms_Matrix3x3))) {
+ // Concat the entire gamut transform into from_xyz,
+ // now slightly misnamed but it's a handy spot to stash the result.
+ from_xyz = skcms_Matrix3x3_concat(&from_xyz, to_xyz);
+ *ops++ = Op_matrix_3x3;
+ *args++ = &from_xyz;
+ }
+
+ // Encode back to dst RGB using its parametric transfer functions.
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(dst_curves+i, i);
+ if (oa.arg) {
+ assert (oa.op != Op_table_r &&
+ oa.op != Op_table_g &&
+ oa.op != Op_table_b &&
+ oa.op != Op_table_a);
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+
+ // Clamp here before premul to make sure we're clamping to normalized values _and_ gamut,
+ // not just to values that fit in [0,1].
+ //
+ // E.g. r = 1.1, a = 0.5 would fit fine in fixed point after premul (ra=0.55,a=0.5),
+ // but would be carrying r > 1, which is really unexpected for downstream consumers.
+ if (dstFmt < skcms_PixelFormat_RGB_hhh) {
+ *ops++ = Op_clamp;
+ }
+ if (dstAlpha == skcms_AlphaFormat_Opaque) {
+ *ops++ = Op_force_opaque;
+ } else if (dstAlpha == skcms_AlphaFormat_PremulAsEncoded) {
+ *ops++ = Op_premul;
+ }
+ if (dstFmt & 1) {
+ *ops++ = Op_swap_rb;
+ }
+ switch (dstFmt >> 1) {
+ default: return false;
+ case skcms_PixelFormat_A_8 >> 1: *ops++ = Op_store_a8; break;
+ case skcms_PixelFormat_G_8 >> 1: *ops++ = Op_store_g8; break;
+ case skcms_PixelFormat_ABGR_4444 >> 1: *ops++ = Op_store_4444; break;
+ case skcms_PixelFormat_RGB_565 >> 1: *ops++ = Op_store_565; break;
+ case skcms_PixelFormat_RGB_888 >> 1: *ops++ = Op_store_888; break;
+ case skcms_PixelFormat_RGBA_8888 >> 1: *ops++ = Op_store_8888; break;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: *ops++ = Op_store_1010102; break;
+ case skcms_PixelFormat_RGB_161616LE >> 1: *ops++ = Op_store_161616LE; break;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: *ops++ = Op_store_16161616LE; break;
+ case skcms_PixelFormat_RGB_161616BE >> 1: *ops++ = Op_store_161616BE; break;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: *ops++ = Op_store_16161616BE; break;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: *ops++ = Op_store_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: *ops++ = Op_store_hhhh; break;
+ case skcms_PixelFormat_RGB_hhh >> 1: *ops++ = Op_store_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: *ops++ = Op_store_hhhh; break;
+ case skcms_PixelFormat_RGB_fff >> 1: *ops++ = Op_store_fff; break;
+ case skcms_PixelFormat_RGBA_ffff >> 1: *ops++ = Op_store_ffff; break;
+ }
+
+ auto run = baseline::run_program;
+#if defined(TEST_FOR_HSW)
+ switch (cpu_type()) {
+ case CpuType::None: break;
+ case CpuType::HSW: run = hsw::run_program; break;
+ case CpuType::SKX: run = hsw::run_program; break;
+ }
+#endif
+#if defined(TEST_FOR_SKX)
+ switch (cpu_type()) {
+ case CpuType::None: break;
+ case CpuType::HSW: break;
+ case CpuType::SKX: run = skx::run_program; break;
+ }
+#endif
+ run(program, arguments, (const char*)src, (char*)dst, n, src_bpp,dst_bpp);
+ return true;
+}
+
+static void assert_usable_as_destination(const skcms_ICCProfile* profile) {
+#if defined(NDEBUG)
+ (void)profile;
+#else
+ skcms_Matrix3x3 fromXYZD50;
+ skcms_TransferFunction invR, invG, invB;
+ assert(prep_for_destination(profile, &fromXYZD50, &invR, &invG, &invB));
+#endif
+}
+
+bool skcms_MakeUsableAsDestination(skcms_ICCProfile* profile) {
+ skcms_Matrix3x3 fromXYZD50;
+ if (!profile->has_trc || !profile->has_toXYZD50
+ || !skcms_Matrix3x3_invert(&profile->toXYZD50, &fromXYZD50)) {
+ return false;
+ }
+
+ skcms_TransferFunction tf[3];
+ for (int i = 0; i < 3; i++) {
+ skcms_TransferFunction inv;
+ if (profile->trc[i].table_entries == 0
+ && skcms_TransferFunction_invert(&profile->trc[i].parametric, &inv)) {
+ tf[i] = profile->trc[i].parametric;
+ continue;
+ }
+
+ float max_error;
+ // Parametric curves from skcms_ApproximateCurve() are guaranteed to be invertible.
+ if (!skcms_ApproximateCurve(&profile->trc[i], &tf[i], &max_error)) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < 3; ++i) {
+ profile->trc[i].table_entries = 0;
+ profile->trc[i].parametric = tf[i];
+ }
+
+ assert_usable_as_destination(profile);
+ return true;
+}
+
+bool skcms_MakeUsableAsDestinationWithSingleCurve(skcms_ICCProfile* profile) {
+ // Operate on a copy of profile, so we can choose the best TF for the original curves
+ skcms_ICCProfile result = *profile;
+ if (!skcms_MakeUsableAsDestination(&result)) {
+ return false;
+ }
+
+ int best_tf = 0;
+ float min_max_error = INFINITY_;
+ for (int i = 0; i < 3; i++) {
+ skcms_TransferFunction inv;
+ if (!skcms_TransferFunction_invert(&result.trc[i].parametric, &inv)) {
+ return false;
+ }
+
+ float err = 0;
+ for (int j = 0; j < 3; ++j) {
+ err = fmaxf_(err, max_roundtrip_error(&profile->trc[j], &inv));
+ }
+ if (min_max_error > err) {
+ min_max_error = err;
+ best_tf = i;
+ }
+ }
+
+ for (int i = 0; i < 3; i++) {
+ result.trc[i].parametric = result.trc[best_tf].parametric;
+ }
+
+ *profile = result;
+ assert_usable_as_destination(profile);
+ return true;
+}
diff --git a/gfx/skia/skia/third_party/skcms/skcms.gni b/gfx/skia/skia/third_party/skcms/skcms.gni
new file mode 100644
index 0000000000..c0ebdbe334
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/skcms.gni
@@ -0,0 +1,8 @@
+# Copyright 2018 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+skcms_sources = [
+ "skcms.cc",
+]
diff --git a/gfx/skia/skia/third_party/skcms/skcms_internal.h b/gfx/skia/skia/third_party/skcms/skcms_internal.h
new file mode 100644
index 0000000000..551128a8bc
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/skcms_internal.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#pragma once
+
+// skcms_internal.h contains APIs shared by skcms' internals and its test tools.
+// Please don't use this header from outside the skcms repo.
+
+#include "skcms.h"
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ~~~~ General Helper Macros ~~~~
+ #define ARRAY_COUNT(arr) (int)(sizeof((arr)) / sizeof(*(arr)))
+
+// ~~~~ skcms_ICCProfile ~~~~
+ bool skcms_GetCHAD(const skcms_ICCProfile* profile, skcms_Matrix3x3* m);
+
+ // 252 of a random shuffle of all possible bytes.
+ // 252 is evenly divisible by 3 and 4. Only 192, 10, 241, and 43 are missing.
+ // Used for ICC profile equivalence testing.
+ extern const uint8_t skcms_252_random_bytes[252];
+
+// ~~~~ Portable Math ~~~~
+ static inline float floorf_(float x) {
+ float roundtrip = (float)((int)x);
+ return roundtrip > x ? roundtrip - 1 : roundtrip;
+ }
+ static inline float fabsf_(float x) { return x < 0 ? -x : x; }
+ float powf_(float, float);
+
+// ~~~~ Does this pixel format need a palette pointer to be usable? ~~~~
+ static inline bool needs_palette(skcms_PixelFormat fmt) {
+ return (fmt >> 1) == (skcms_PixelFormat_RGBA_8888_Palette8 >> 1);
+ }
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/gfx/skia/skia/third_party/skcms/src/Transform_inl.h b/gfx/skia/skia/third_party/skcms/src/Transform_inl.h
new file mode 100644
index 0000000000..c4b312286a
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/src/Transform_inl.h
@@ -0,0 +1,1545 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Intentionally NO #pragma once... included multiple times.
+
+// This file is included from skcms.cc in a namespace with some pre-defines:
+// - N: depth of all vectors, 1,4,8, or 16 (preprocessor define)
+// - V<T>: a template to create a vector of N T's.
+
+using F = V<Color>; // Called F for historic reasons... maybe rename C?
+using I32 = V<int32_t>;
+using U64 = V<uint64_t>;
+using U32 = V<uint32_t>;
+using U16 = V<uint16_t>;
+using U8 = V<uint8_t>;
+
+
+#if defined(__GNUC__) && !defined(__clang__)
+ // Once again, GCC is kind of weird, not allowing vector = scalar directly.
+ static constexpr F F0 = F() + 0.0f,
+ F1 = F() + 1.0f;
+#else
+ static constexpr F F0 = 0.0f,
+ F1 = 1.0f;
+#endif
+
+// Instead of checking __AVX__ below, we'll check USING_AVX.
+// This lets skcms.cc set USING_AVX to force us in even if the compiler's not set that way.
+// Same deal for __F16C__ and __AVX2__ ~~~> USING_AVX_F16C, USING_AVX2.
+
+#if !defined(USING_AVX) && N == 8 && defined(__AVX__)
+ #define USING_AVX
+#endif
+#if !defined(USING_AVX_F16C) && defined(USING_AVX) && defined(__F16C__)
+ #define USING AVX_F16C
+#endif
+#if !defined(USING_AVX2) && defined(USING_AVX) && defined(__AVX2__)
+ #define USING_AVX2
+#endif
+#if !defined(USING_AVX512F) && N == 16 && defined(__AVX512F__)
+ #define USING_AVX512F
+#endif
+
+// Similar to the AVX+ features, we define USING_NEON and USING_NEON_F16C.
+// This is more for organizational clarity... skcms.cc doesn't force these.
+#if N > 1 && defined(__ARM_NEON)
+ #define USING_NEON
+ #if __ARM_FP & 2
+ #define USING_NEON_F16C
+ #endif
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(SKCMS_OPT_INTO_NEON_FP16)
+ #define USING_NEON_FP16
+ #endif
+#endif
+
+// These -Wvector-conversion warnings seem to trigger in very bogus situations,
+// like vst3q_f32() expecting a 16x char rather than a 4x float vector. :/
+#if defined(USING_NEON) && defined(__clang__)
+ #pragma clang diagnostic ignored "-Wvector-conversion"
+#endif
+
+// GCC warns us about returning U64 on x86 because it's larger than a register.
+// You'd see warnings like, "using AVX even though AVX is not enabled".
+// We stifle these warnings... our helpers that return U64 are always inlined.
+#if defined(__SSE__) && defined(__GNUC__) && !defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wpsabi"
+#endif
+
+#if defined(__clang__)
+ #define FALLTHROUGH [[clang::fallthrough]]
+#else
+ #define FALLTHROUGH
+#endif
+
+// We tag most helper functions as SI, to enforce good code generation
+// but also work around what we think is a bug in GCC: when targeting 32-bit
+// x86, GCC tends to pass U16 (4x uint16_t vector) function arguments in the
+// MMX mm0 register, which seems to mess with unrelated code that later uses
+// x87 FP instructions (MMX's mm0 is an alias for x87's st0 register).
+//
+// It helps codegen to call __builtin_memcpy() when we know the byte count at compile time.
+#if defined(__clang__) || defined(__GNUC__)
+ #define SI static inline __attribute__((always_inline))
+#else
+ #define SI static inline
+#endif
+
+template <typename T, typename P>
+SI T load(const P* ptr) {
+ T val;
+ small_memcpy(&val, ptr, sizeof(val));
+ return val;
+}
+template <typename T, typename P>
+SI void store(P* ptr, const T& val) {
+ small_memcpy(ptr, &val, sizeof(val));
+}
+
+// (T)v is a cast when N == 1 and a bit-pun when N>1,
+// so we use cast<T>(v) to actually cast or bit_pun<T>(v) to bit-pun.
+template <typename D, typename S>
+SI D cast(const S& v) {
+#if N == 1
+ return (D)v;
+#elif defined(__clang__)
+ return __builtin_convertvector(v, D);
+#else
+ D d;
+ for (int i = 0; i < N; i++) {
+ d[i] = v[i];
+ }
+ return d;
+#endif
+}
+
+template <typename D, typename S>
+SI D bit_pun(const S& v) {
+ static_assert(sizeof(D) == sizeof(v), "");
+ return load<D>(&v);
+}
+
+// When we convert from float to fixed point, it's very common to want to round,
+// and for some reason compilers generate better code when converting to int32_t.
+// To serve both those ends, we use this function to_fixed() instead of direct cast().
+#if defined(USING_NEON_FP16)
+ // NEON's got a F16 -> U16 instruction, so this should be fine without going via I16.
+ SI U16 to_fixed(F f) { return cast<U16>(f + 0.5f); }
+#else
+ SI U32 to_fixed(F f) { return (U32)cast<I32>(f + 0.5f); }
+#endif
+
+
+// Sometimes we do something crazy on one branch of a conditonal,
+// like divide by zero or convert a huge float to an integer,
+// but then harmlessly select the other side. That trips up N==1
+// sanitizer builds, so we make if_then_else() a macro to avoid
+// evaluating the unused side.
+
+#if N == 1
+ #define if_then_else(cond, t, e) ((cond) ? (t) : (e))
+#else
+ template <typename C, typename T>
+ SI T if_then_else(C cond, T t, T e) {
+ return bit_pun<T>( ( cond & bit_pun<C>(t)) |
+ (~cond & bit_pun<C>(e)) );
+ }
+#endif
+
+
+SI F F_from_Half(U16 half) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<F>(half);
+#elif defined(USING_NEON_F16C)
+ return vcvt_f32_f16((float16x4_t)half);
+#elif defined(USING_AVX512F)
+ return (F)_mm512_cvtph_ps((__m256i)half);
+#elif defined(USING_AVX_F16C)
+ typedef int16_t __attribute__((vector_size(16))) I16;
+ return __builtin_ia32_vcvtph2ps256((I16)half);
+#else
+ U32 wide = cast<U32>(half);
+ // A half is 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
+ U32 s = wide & 0x8000,
+ em = wide ^ s;
+
+ // Constructing the float is easy if the half is not denormalized.
+ F norm = bit_pun<F>( (s<<16) + (em<<13) + ((127-15)<<23) );
+
+ // Simply flush all denorm half floats to zero.
+ return if_then_else(em < 0x0400, F0, norm);
+#endif
+}
+
+#if defined(__clang__)
+ // The -((127-15)<<10) underflows that side of the math when
+ // we pass a denorm half float. It's harmless... we'll take the 0 side anyway.
+ __attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+SI U16 Half_from_F(F f) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<U16>(f);
+#elif defined(USING_NEON_F16C)
+ return (U16)vcvt_f16_f32(f);
+#elif defined(USING_AVX512F)
+ return (U16)_mm512_cvtps_ph((__m512 )f, _MM_FROUND_CUR_DIRECTION );
+#elif defined(USING_AVX_F16C)
+ return (U16)__builtin_ia32_vcvtps2ph256(f, 0x04/*_MM_FROUND_CUR_DIRECTION*/);
+#else
+ // A float is 1-8-23 sign-exponent-mantissa, with 127 exponent bias.
+ U32 sem = bit_pun<U32>(f),
+ s = sem & 0x80000000,
+ em = sem ^ s;
+
+ // For simplicity we flush denorm half floats (including all denorm floats) to zero.
+ return cast<U16>(if_then_else(em < 0x38800000, (U32)F0
+ , (s>>16) + (em>>13) - ((127-15)<<10)));
+#endif
+}
+
+// Swap high and low bytes of 16-bit lanes, converting between big-endian and little-endian.
+#if defined(USING_NEON_FP16)
+ SI U16 swap_endian_16(U16 v) {
+ return (U16)vrev16q_u8((uint8x16_t) v);
+ }
+#elif defined(USING_NEON)
+ SI U16 swap_endian_16(U16 v) {
+ return (U16)vrev16_u8((uint8x8_t) v);
+ }
+#endif
+
+SI U64 swap_endian_16x4(const U64& rgba) {
+ return (rgba & 0x00ff00ff00ff00ff) << 8
+ | (rgba & 0xff00ff00ff00ff00) >> 8;
+}
+
+#if defined(USING_NEON_FP16)
+ SI F min_(F x, F y) { return (F)vminq_f16((float16x8_t)x, (float16x8_t)y); }
+ SI F max_(F x, F y) { return (F)vmaxq_f16((float16x8_t)x, (float16x8_t)y); }
+#elif defined(USING_NEON)
+ SI F min_(F x, F y) { return (F)vminq_f32((float32x4_t)x, (float32x4_t)y); }
+ SI F max_(F x, F y) { return (F)vmaxq_f32((float32x4_t)x, (float32x4_t)y); }
+#else
+ SI F min_(F x, F y) { return if_then_else(x > y, y, x); }
+ SI F max_(F x, F y) { return if_then_else(x < y, y, x); }
+#endif
+
+SI F floor_(F x) {
+#if N == 1
+ return floorf_(x);
+#elif defined(USING_NEON_FP16)
+ return vrndmq_f16(x);
+#elif defined(__aarch64__)
+ return vrndmq_f32(x);
+#elif defined(USING_AVX512F)
+ // Clang's _mm512_floor_ps() passes its mask as -1, not (__mmask16)-1,
+ // and integer santizer catches that this implicit cast changes the
+ // value from -1 to 65535. We'll cast manually to work around it.
+ // Read this as `return _mm512_floor_ps(x)`.
+ return _mm512_mask_floor_ps(x, (__mmask16)-1, x);
+#elif defined(USING_AVX)
+ return __builtin_ia32_roundps256(x, 0x01/*_MM_FROUND_FLOOR*/);
+#elif defined(__SSE4_1__)
+ return _mm_floor_ps(x);
+#else
+ // Round trip through integers with a truncating cast.
+ F roundtrip = cast<F>(cast<I32>(x));
+ // If x is negative, truncating gives the ceiling instead of the floor.
+ return roundtrip - if_then_else(roundtrip > x, F1, F0);
+
+ // This implementation fails for values of x that are outside
+ // the range an integer can represent. We expect most x to be small.
+#endif
+}
+
+SI F approx_log2(F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ return x;
+#else
+ // The first approximation of log2(x) is its exponent 'e', minus 127.
+ I32 bits = bit_pun<I32>(x);
+
+ F e = cast<F>(bits) * (1.0f / (1<<23));
+
+ // If we use the mantissa too we can refine the error signficantly.
+ F m = bit_pun<F>( (bits & 0x007fffff) | 0x3f000000 );
+
+ return e - 124.225514990f
+ - 1.498030302f*m
+ - 1.725879990f/(0.3520887068f + m);
+#endif
+}
+
+SI F approx_log(F x) {
+ const float ln2 = 0.69314718f;
+ return ln2 * approx_log2(x);
+}
+
+SI F approx_exp2(F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ return x;
+#else
+ F fract = x - floor_(x);
+
+ I32 bits = cast<I32>((1.0f * (1<<23)) * (x + 121.274057500f
+ - 1.490129070f*fract
+ + 27.728023300f/(4.84252568f - fract)));
+ return bit_pun<F>(bits);
+#endif
+}
+
+SI F approx_pow(F x, float y) {
+ return if_then_else((x == F0) | (x == F1), x
+ , approx_exp2(approx_log2(x) * y));
+}
+
+SI F approx_exp(F x) {
+ const float log2_e = 1.4426950408889634074f;
+ return approx_exp2(log2_e * x);
+}
+
+// Return tf(x).
+SI F apply_tf(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ // Peel off the sign bit and set x = |x|.
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ // The transfer function has a linear part up to d, exponential at d and after.
+ F v = if_then_else(x < tf->d, tf->c*x + tf->f
+ , approx_pow(tf->a*x + tf->b, tf->g) + tf->e);
+
+ // Tack the sign bit back on.
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_pq(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ F v = approx_pow(max_(tf->a + tf->b * approx_pow(x, tf->c), F0)
+ / (tf->d + tf->e * approx_pow(x, tf->c)),
+ tf->f);
+
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_hlg(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ const float R = tf->a, G = tf->b,
+ a = tf->c, b = tf->d, c = tf->e;
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ F v = if_then_else(x*R <= 1, approx_pow(x*R, G)
+ , approx_exp((x-c)*a) + b);
+
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_hlginv(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ const float R = tf->a, G = tf->b,
+ a = tf->c, b = tf->d, c = tf->e;
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ F v = if_then_else(x <= 1, R * approx_pow(x, G)
+ , a * approx_log(x - b) + c);
+
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+
+// Strided loads and stores of N values, starting from p.
+template <typename T, typename P>
+SI T load_3(const P* p) {
+#if N == 1
+ return (T)p[0];
+#elif N == 4
+ return T{p[ 0],p[ 3],p[ 6],p[ 9]};
+#elif N == 8
+ return T{p[ 0],p[ 3],p[ 6],p[ 9], p[12],p[15],p[18],p[21]};
+#elif N == 16
+ return T{p[ 0],p[ 3],p[ 6],p[ 9], p[12],p[15],p[18],p[21],
+ p[24],p[27],p[30],p[33], p[36],p[39],p[42],p[45]};
+#endif
+}
+
+template <typename T, typename P>
+SI T load_4(const P* p) {
+#if N == 1
+ return (T)p[0];
+#elif N == 4
+ return T{p[ 0],p[ 4],p[ 8],p[12]};
+#elif N == 8
+ return T{p[ 0],p[ 4],p[ 8],p[12], p[16],p[20],p[24],p[28]};
+#elif N == 16
+ return T{p[ 0],p[ 4],p[ 8],p[12], p[16],p[20],p[24],p[28],
+ p[32],p[36],p[40],p[44], p[48],p[52],p[56],p[60]};
+#endif
+}
+
+template <typename T, typename P>
+SI void store_3(P* p, const T& v) {
+#if N == 1
+ p[0] = v;
+#elif N == 4
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+#elif N == 8
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+ p[12] = v[ 4]; p[15] = v[ 5]; p[18] = v[ 6]; p[21] = v[ 7];
+#elif N == 16
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+ p[12] = v[ 4]; p[15] = v[ 5]; p[18] = v[ 6]; p[21] = v[ 7];
+ p[24] = v[ 8]; p[27] = v[ 9]; p[30] = v[10]; p[33] = v[11];
+ p[36] = v[12]; p[39] = v[13]; p[42] = v[14]; p[45] = v[15];
+#endif
+}
+
+template <typename T, typename P>
+SI void store_4(P* p, const T& v) {
+#if N == 1
+ p[0] = v;
+#elif N == 4
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+#elif N == 8
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+ p[16] = v[ 4]; p[20] = v[ 5]; p[24] = v[ 6]; p[28] = v[ 7];
+#elif N == 16
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+ p[16] = v[ 4]; p[20] = v[ 5]; p[24] = v[ 6]; p[28] = v[ 7];
+ p[32] = v[ 8]; p[36] = v[ 9]; p[40] = v[10]; p[44] = v[11];
+ p[48] = v[12]; p[52] = v[13]; p[56] = v[14]; p[60] = v[15];
+#endif
+}
+
+
+SI U8 gather_8(const uint8_t* p, I32 ix) {
+#if N == 1
+ U8 v = p[ix];
+#elif N == 4
+ U8 v = { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]] };
+#elif N == 8
+ U8 v = { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
+ p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]] };
+#elif N == 16
+ U8 v = { p[ix[ 0]], p[ix[ 1]], p[ix[ 2]], p[ix[ 3]],
+ p[ix[ 4]], p[ix[ 5]], p[ix[ 6]], p[ix[ 7]],
+ p[ix[ 8]], p[ix[ 9]], p[ix[10]], p[ix[11]],
+ p[ix[12]], p[ix[13]], p[ix[14]], p[ix[15]] };
+#endif
+ return v;
+}
+
+SI U16 gather_16(const uint8_t* p, I32 ix) {
+ // Load the i'th 16-bit value from p.
+ auto load_16 = [p](int i) {
+ return load<uint16_t>(p + 2*i);
+ };
+#if N == 1
+ U16 v = load_16(ix);
+#elif N == 4
+ U16 v = { load_16(ix[0]), load_16(ix[1]), load_16(ix[2]), load_16(ix[3]) };
+#elif N == 8
+ U16 v = { load_16(ix[0]), load_16(ix[1]), load_16(ix[2]), load_16(ix[3]),
+ load_16(ix[4]), load_16(ix[5]), load_16(ix[6]), load_16(ix[7]) };
+#elif N == 16
+ U16 v = { load_16(ix[ 0]), load_16(ix[ 1]), load_16(ix[ 2]), load_16(ix[ 3]),
+ load_16(ix[ 4]), load_16(ix[ 5]), load_16(ix[ 6]), load_16(ix[ 7]),
+ load_16(ix[ 8]), load_16(ix[ 9]), load_16(ix[10]), load_16(ix[11]),
+ load_16(ix[12]), load_16(ix[13]), load_16(ix[14]), load_16(ix[15]) };
+#endif
+ return v;
+}
+
+SI U32 gather_32(const uint8_t* p, I32 ix) {
+ // Load the i'th 32-bit value from p.
+ auto load_32 = [p](int i) {
+ return load<uint32_t>(p + 4*i);
+ };
+#if N == 1
+ U32 v = load_32(ix);
+#elif N == 4
+ U32 v = { load_32(ix[0]), load_32(ix[1]), load_32(ix[2]), load_32(ix[3]) };
+#elif N == 8
+ U32 v = { load_32(ix[0]), load_32(ix[1]), load_32(ix[2]), load_32(ix[3]),
+ load_32(ix[4]), load_32(ix[5]), load_32(ix[6]), load_32(ix[7]) };
+#elif N == 16
+ U32 v = { load_32(ix[ 0]), load_32(ix[ 1]), load_32(ix[ 2]), load_32(ix[ 3]),
+ load_32(ix[ 4]), load_32(ix[ 5]), load_32(ix[ 6]), load_32(ix[ 7]),
+ load_32(ix[ 8]), load_32(ix[ 9]), load_32(ix[10]), load_32(ix[11]),
+ load_32(ix[12]), load_32(ix[13]), load_32(ix[14]), load_32(ix[15]) };
+#endif
+ // TODO: AVX2 and AVX-512 gathers (c.f. gather_24).
+ return v;
+}
+
+SI U32 gather_24(const uint8_t* p, I32 ix) {
+ // First, back up a byte. Any place we're gathering from has a safe junk byte to read
+ // in front of it, either a previous table value, or some tag metadata.
+ p -= 1;
+
+ // Load the i'th 24-bit value from p, and 1 extra byte.
+ auto load_24_32 = [p](int i) {
+ return load<uint32_t>(p + 3*i);
+ };
+
+ // Now load multiples of 4 bytes (a junk byte, then r,g,b).
+#if N == 1
+ U32 v = load_24_32(ix);
+#elif N == 4
+ U32 v = { load_24_32(ix[0]), load_24_32(ix[1]), load_24_32(ix[2]), load_24_32(ix[3]) };
+#elif N == 8 && !defined(USING_AVX2)
+ U32 v = { load_24_32(ix[0]), load_24_32(ix[1]), load_24_32(ix[2]), load_24_32(ix[3]),
+ load_24_32(ix[4]), load_24_32(ix[5]), load_24_32(ix[6]), load_24_32(ix[7]) };
+#elif N == 8
+ (void)load_24_32;
+ // The gather instruction here doesn't need any particular alignment,
+ // but the intrinsic takes a const int*.
+ const int* p4 = bit_pun<const int*>(p);
+ I32 zero = { 0, 0, 0, 0, 0, 0, 0, 0},
+ mask = {-1,-1,-1,-1, -1,-1,-1,-1};
+ #if defined(__clang__)
+ U32 v = (U32)__builtin_ia32_gatherd_d256(zero, p4, 3*ix, mask, 1);
+ #elif defined(__GNUC__)
+ U32 v = (U32)__builtin_ia32_gathersiv8si(zero, p4, 3*ix, mask, 1);
+ #endif
+#elif N == 16
+ (void)load_24_32;
+ // The intrinsic is supposed to take const void* now, but it takes const int*, just like AVX2.
+ // And AVX-512 swapped the order of arguments. :/
+ const int* p4 = bit_pun<const int*>(p);
+ U32 v = (U32)_mm512_i32gather_epi32((__m512i)(3*ix), p4, 1);
+#endif
+
+ // Shift off the junk byte, leaving r,g,b in low 24 bits (and zero in the top 8).
+ return v >> 8;
+}
+
+#if !defined(__arm__)
+ SI void gather_48(const uint8_t* p, I32 ix, U64* v) {
+ // As in gather_24(), with everything doubled.
+ p -= 2;
+
+ // Load the i'th 48-bit value from p, and 2 extra bytes.
+ auto load_48_64 = [p](int i) {
+ return load<uint64_t>(p + 6*i);
+ };
+
+ #if N == 1
+ *v = load_48_64(ix);
+ #elif N == 4
+ *v = U64{
+ load_48_64(ix[0]), load_48_64(ix[1]), load_48_64(ix[2]), load_48_64(ix[3]),
+ };
+ #elif N == 8 && !defined(USING_AVX2)
+ *v = U64{
+ load_48_64(ix[0]), load_48_64(ix[1]), load_48_64(ix[2]), load_48_64(ix[3]),
+ load_48_64(ix[4]), load_48_64(ix[5]), load_48_64(ix[6]), load_48_64(ix[7]),
+ };
+ #elif N == 8
+ (void)load_48_64;
+ typedef int32_t __attribute__((vector_size(16))) Half_I32;
+ typedef long long __attribute__((vector_size(32))) Half_I64;
+
+ // The gather instruction here doesn't need any particular alignment,
+ // but the intrinsic takes a const long long*.
+ const long long int* p8 = bit_pun<const long long int*>(p);
+
+ Half_I64 zero = { 0, 0, 0, 0},
+ mask = {-1,-1,-1,-1};
+
+ ix *= 6;
+ Half_I32 ix_lo = { ix[0], ix[1], ix[2], ix[3] },
+ ix_hi = { ix[4], ix[5], ix[6], ix[7] };
+
+ #if defined(__clang__)
+ Half_I64 lo = (Half_I64)__builtin_ia32_gatherd_q256(zero, p8, ix_lo, mask, 1),
+ hi = (Half_I64)__builtin_ia32_gatherd_q256(zero, p8, ix_hi, mask, 1);
+ #elif defined(__GNUC__)
+ Half_I64 lo = (Half_I64)__builtin_ia32_gathersiv4di(zero, p8, ix_lo, mask, 1),
+ hi = (Half_I64)__builtin_ia32_gathersiv4di(zero, p8, ix_hi, mask, 1);
+ #endif
+ store((char*)v + 0, lo);
+ store((char*)v + 32, hi);
+ #elif N == 16
+ (void)load_48_64;
+ const long long int* p8 = bit_pun<const long long int*>(p);
+ __m512i lo = _mm512_i32gather_epi64(_mm512_extracti32x8_epi32((__m512i)(6*ix), 0), p8, 1),
+ hi = _mm512_i32gather_epi64(_mm512_extracti32x8_epi32((__m512i)(6*ix), 1), p8, 1);
+ store((char*)v + 0, lo);
+ store((char*)v + 64, hi);
+ #endif
+
+ *v >>= 16;
+ }
+#endif
+
+SI F F_from_U8(U8 v) {
+ return cast<F>(v) * (1/255.0f);
+}
+
+SI F F_from_U16_BE(U16 v) {
+ // All 16-bit ICC values are big-endian, so we byte swap before converting to float.
+ // MSVC catches the "loss" of data here in the portable path, so we also make sure to mask.
+ U16 lo = (v >> 8),
+ hi = (v << 8) & 0xffff;
+ return cast<F>(lo|hi) * (1/65535.0f);
+}
+
+SI U16 U16_from_F(F v) {
+ // 65535 == inf in FP16, so promote to FP32 before converting.
+ return cast<U16>(cast<V<float>>(v) * 65535 + 0.5f);
+}
+
+SI F minus_1_ulp(F v) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<F>( bit_pun<U16>(v) - 1 );
+#else
+ return bit_pun<F>( bit_pun<U32>(v) - 1 );
+#endif
+}
+
+SI F table(const skcms_Curve* curve, F v) {
+ // Clamp the input to [0,1], then scale to a table index.
+ F ix = max_(F0, min_(v, F1)) * (float)(curve->table_entries - 1);
+
+ // We'll look up (equal or adjacent) entries at lo and hi, then lerp by t between the two.
+ I32 lo = cast<I32>( ix ),
+ hi = cast<I32>(minus_1_ulp(ix+1.0f));
+ F t = ix - cast<F>(lo); // i.e. the fractional part of ix.
+
+ // TODO: can we load l and h simultaneously? Each entry in 'h' is either
+ // the same as in 'l' or adjacent. We have a rough idea that's it'd always be safe
+ // to read adjacent entries and perhaps underflow the table by a byte or two
+ // (it'd be junk, but always safe to read). Not sure how to lerp yet.
+ F l,h;
+ if (curve->table_8) {
+ l = F_from_U8(gather_8(curve->table_8, lo));
+ h = F_from_U8(gather_8(curve->table_8, hi));
+ } else {
+ l = F_from_U16_BE(gather_16(curve->table_16, lo));
+ h = F_from_U16_BE(gather_16(curve->table_16, hi));
+ }
+ return l + (h-l)*t;
+}
+
+SI void sample_clut_8(const skcms_A2B* a2b, I32 ix, F* r, F* g, F* b) {
+ U32 rgb = gather_24(a2b->grid_8, ix);
+
+ *r = cast<F>((rgb >> 0) & 0xff) * (1/255.0f);
+ *g = cast<F>((rgb >> 8) & 0xff) * (1/255.0f);
+ *b = cast<F>((rgb >> 16) & 0xff) * (1/255.0f);
+}
+
+SI void sample_clut_16(const skcms_A2B* a2b, I32 ix, F* r, F* g, F* b) {
+#if defined(__arm__)
+ // This is up to 2x faster on 32-bit ARM than the #else-case fast path.
+ *r = F_from_U16_BE(gather_16(a2b->grid_16, 3*ix+0));
+ *g = F_from_U16_BE(gather_16(a2b->grid_16, 3*ix+1));
+ *b = F_from_U16_BE(gather_16(a2b->grid_16, 3*ix+2));
+#else
+ // This strategy is much faster for 64-bit builds, and fine for 32-bit x86 too.
+ U64 rgb;
+ gather_48(a2b->grid_16, ix, &rgb);
+ rgb = swap_endian_16x4(rgb);
+
+ *r = cast<F>((rgb >> 0) & 0xffff) * (1/65535.0f);
+ *g = cast<F>((rgb >> 16) & 0xffff) * (1/65535.0f);
+ *b = cast<F>((rgb >> 32) & 0xffff) * (1/65535.0f);
+#endif
+}
+
+// GCC 7.2.0 hits an internal compiler error with -finline-functions (or -O3)
+// when targeting MIPS 64, i386, or s390x, I think attempting to inline clut() into exec_ops().
+#if 1 && defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__mips64) || defined(__i386) || defined(__s390x__))
+ #define MAYBE_NOINLINE __attribute__((noinline))
+#else
+ #define MAYBE_NOINLINE
+#endif
+
+MAYBE_NOINLINE
+static void clut(const skcms_A2B* a2b, F* r, F* g, F* b, F a) {
+ const int dim = (int)a2b->input_channels;
+ assert (0 < dim && dim <= 4);
+
+ // For each of these arrays, think foo[2*dim], but we use foo[8] since we know dim <= 4.
+ I32 index [8]; // Index contribution by dimension, first low from 0, then high from 4.
+ F weight[8]; // Weight for each contribution, again first low, then high.
+
+ // O(dim) work first: calculate index,weight from r,g,b,a.
+ const F inputs[] = { *r,*g,*b,a };
+ for (int i = dim-1, stride = 1; i >= 0; i--) {
+ // x is where we logically want to sample the grid in the i-th dimension.
+ F x = inputs[i] * (float)(a2b->grid_points[i] - 1);
+
+ // But we can't index at floats. lo and hi are the two integer grid points surrounding x.
+ I32 lo = cast<I32>( x ), // i.e. trunc(x) == floor(x) here.
+ hi = cast<I32>(minus_1_ulp(x+1.0f));
+ // Notice how we fold in the accumulated stride across previous dimensions here.
+ index[i+0] = lo * stride;
+ index[i+4] = hi * stride;
+ stride *= a2b->grid_points[i];
+
+ // We'll interpolate between those two integer grid points by t.
+ F t = x - cast<F>(lo); // i.e. fract(x)
+ weight[i+0] = 1-t;
+ weight[i+4] = t;
+ }
+
+ *r = *g = *b = F0;
+
+ // We'll sample 2^dim == 1<<dim table entries per pixel,
+ // in all combinations of low and high in each dimension.
+ for (int combo = 0; combo < (1<<dim); combo++) { // This loop can be done in any order.
+
+ // Each of these upcoming (combo&N)*K expressions here evaluates to 0 or 4,
+ // where 0 selects the low index contribution and its weight 1-t,
+ // or 4 the high index contribution and its weight t.
+
+ // Since 0<dim≤4, we can always just start off with the 0-th channel,
+ // then handle the others conditionally.
+ I32 ix = index [0 + (combo&1)*4];
+ F w = weight[0 + (combo&1)*4];
+
+ switch ((dim-1)&3) { // This lets the compiler know there are no other cases to handle.
+ case 3: ix += index [3 + (combo&8)/2];
+ w *= weight[3 + (combo&8)/2];
+ FALLTHROUGH;
+ // fall through
+
+ case 2: ix += index [2 + (combo&4)*1];
+ w *= weight[2 + (combo&4)*1];
+ FALLTHROUGH;
+ // fall through
+
+ case 1: ix += index [1 + (combo&2)*2];
+ w *= weight[1 + (combo&2)*2];
+ }
+
+ F R,G,B;
+ if (a2b->grid_8) {
+ sample_clut_8 (a2b,ix, &R,&G,&B);
+ } else {
+ sample_clut_16(a2b,ix, &R,&G,&B);
+ }
+
+ *r += w*R;
+ *g += w*G;
+ *b += w*B;
+ }
+}
+
+static void exec_ops(const Op* ops, const void** args,
+ const char* src, char* dst, int i) {
+ F r = F0, g = F0, b = F0, a = F1;
+ while (true) {
+ switch (*ops++) {
+ case Op_load_a8:{
+ a = F_from_U8(load<U8>(src + 1*i));
+ } break;
+
+ case Op_load_g8:{
+ r = g = b = F_from_U8(load<U8>(src + 1*i));
+ } break;
+
+ case Op_load_4444:{
+ U16 abgr = load<U16>(src + 2*i);
+
+ r = cast<F>((abgr >> 12) & 0xf) * (1/15.0f);
+ g = cast<F>((abgr >> 8) & 0xf) * (1/15.0f);
+ b = cast<F>((abgr >> 4) & 0xf) * (1/15.0f);
+ a = cast<F>((abgr >> 0) & 0xf) * (1/15.0f);
+ } break;
+
+ case Op_load_565:{
+ U16 rgb = load<U16>(src + 2*i);
+
+ r = cast<F>(rgb & (uint16_t)(31<< 0)) * (1.0f / (31<< 0));
+ g = cast<F>(rgb & (uint16_t)(63<< 5)) * (1.0f / (63<< 5));
+ b = cast<F>(rgb & (uint16_t)(31<<11)) * (1.0f / (31<<11));
+ } break;
+
+ case Op_load_888:{
+ const uint8_t* rgb = (const uint8_t*)(src + 3*i);
+ #if defined(USING_NEON_FP16)
+ // See the explanation under USING_NEON below. This is that doubled up.
+ uint8x16x3_t v = {{ vdupq_n_u8(0), vdupq_n_u8(0), vdupq_n_u8(0) }};
+ v = vld3q_lane_u8(rgb+ 0, v, 0);
+ v = vld3q_lane_u8(rgb+ 3, v, 2);
+ v = vld3q_lane_u8(rgb+ 6, v, 4);
+ v = vld3q_lane_u8(rgb+ 9, v, 6);
+
+ v = vld3q_lane_u8(rgb+12, v, 8);
+ v = vld3q_lane_u8(rgb+15, v, 10);
+ v = vld3q_lane_u8(rgb+18, v, 12);
+ v = vld3q_lane_u8(rgb+21, v, 14);
+
+ r = cast<F>((U16)v.val[0]) * (1/255.0f);
+ g = cast<F>((U16)v.val[1]) * (1/255.0f);
+ b = cast<F>((U16)v.val[2]) * (1/255.0f);
+ #elif defined(USING_NEON)
+ // There's no uint8x4x3_t or vld3 load for it, so we'll load each rgb pixel one at
+ // a time. Since we're doing that, we might as well load them into 16-bit lanes.
+ // (We'd even load into 32-bit lanes, but that's not possible on ARMv7.)
+ uint8x8x3_t v = {{ vdup_n_u8(0), vdup_n_u8(0), vdup_n_u8(0) }};
+ v = vld3_lane_u8(rgb+0, v, 0);
+ v = vld3_lane_u8(rgb+3, v, 2);
+ v = vld3_lane_u8(rgb+6, v, 4);
+ v = vld3_lane_u8(rgb+9, v, 6);
+
+ // Now if we squint, those 3 uint8x8_t we constructed are really U16s, easy to
+ // convert to F. (Again, U32 would be even better here if drop ARMv7 or split
+ // ARMv7 and ARMv8 impls.)
+ r = cast<F>((U16)v.val[0]) * (1/255.0f);
+ g = cast<F>((U16)v.val[1]) * (1/255.0f);
+ b = cast<F>((U16)v.val[2]) * (1/255.0f);
+ #else
+ r = cast<F>(load_3<U32>(rgb+0) ) * (1/255.0f);
+ g = cast<F>(load_3<U32>(rgb+1) ) * (1/255.0f);
+ b = cast<F>(load_3<U32>(rgb+2) ) * (1/255.0f);
+ #endif
+ } break;
+
+ case Op_load_8888:{
+ U32 rgba = load<U32>(src + 4*i);
+
+ r = cast<F>((rgba >> 0) & 0xff) * (1/255.0f);
+ g = cast<F>((rgba >> 8) & 0xff) * (1/255.0f);
+ b = cast<F>((rgba >> 16) & 0xff) * (1/255.0f);
+ a = cast<F>((rgba >> 24) & 0xff) * (1/255.0f);
+ } break;
+
+ case Op_load_8888_palette8:{
+ const uint8_t* palette = (const uint8_t*) *args++;
+ I32 ix = cast<I32>(load<U8>(src + 1*i));
+ U32 rgba = gather_32(palette, ix);
+
+ r = cast<F>((rgba >> 0) & 0xff) * (1/255.0f);
+ g = cast<F>((rgba >> 8) & 0xff) * (1/255.0f);
+ b = cast<F>((rgba >> 16) & 0xff) * (1/255.0f);
+ a = cast<F>((rgba >> 24) & 0xff) * (1/255.0f);
+ } break;
+
+ case Op_load_1010102:{
+ U32 rgba = load<U32>(src + 4*i);
+
+ r = cast<F>((rgba >> 0) & 0x3ff) * (1/1023.0f);
+ g = cast<F>((rgba >> 10) & 0x3ff) * (1/1023.0f);
+ b = cast<F>((rgba >> 20) & 0x3ff) * (1/1023.0f);
+ a = cast<F>((rgba >> 30) & 0x3 ) * (1/ 3.0f);
+ } break;
+
+ case Op_load_161616LE:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ #else
+ r = cast<F>(load_3<U32>(rgb+0)) * (1/65535.0f);
+ g = cast<F>(load_3<U32>(rgb+1)) * (1/65535.0f);
+ b = cast<F>(load_3<U32>(rgb+2)) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_16161616LE:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ a = cast<F>((U16)v.val[3]) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ a = cast<F>((U16)v.val[3]) * (1/65535.0f);
+ #else
+ U64 px = load<U64>(rgba);
+
+ r = cast<F>((px >> 0) & 0xffff) * (1/65535.0f);
+ g = cast<F>((px >> 16) & 0xffff) * (1/65535.0f);
+ b = cast<F>((px >> 32) & 0xffff) * (1/65535.0f);
+ a = cast<F>((px >> 48) & 0xffff) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_161616BE:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ #else
+ U32 R = load_3<U32>(rgb+0),
+ G = load_3<U32>(rgb+1),
+ B = load_3<U32>(rgb+2);
+ // R,G,B are big-endian 16-bit, so byte swap them before converting to float.
+ r = cast<F>((R & 0x00ff)<<8 | (R & 0xff00)>>8) * (1/65535.0f);
+ g = cast<F>((G & 0x00ff)<<8 | (G & 0xff00)>>8) * (1/65535.0f);
+ b = cast<F>((B & 0x00ff)<<8 | (B & 0xff00)>>8) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_16161616BE:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ a = cast<F>(swap_endian_16((U16)v.val[3])) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ a = cast<F>(swap_endian_16((U16)v.val[3])) * (1/65535.0f);
+ #else
+ U64 px = swap_endian_16x4(load<U64>(rgba));
+
+ r = cast<F>((px >> 0) & 0xffff) * (1/65535.0f);
+ g = cast<F>((px >> 16) & 0xffff) * (1/65535.0f);
+ b = cast<F>((px >> 32) & 0xffff) * (1/65535.0f);
+ a = cast<F>((px >> 48) & 0xffff) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_hhh:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2];
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2];
+ #else
+ U16 R = load_3<U16>(rgb+0),
+ G = load_3<U16>(rgb+1),
+ B = load_3<U16>(rgb+2);
+ #endif
+ r = F_from_Half(R);
+ g = F_from_Half(G);
+ b = F_from_Half(B);
+ } break;
+
+ case Op_load_hhhh:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2],
+ A = (U16)v.val[3];
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2],
+ A = (U16)v.val[3];
+ #else
+ U64 px = load<U64>(rgba);
+ U16 R = cast<U16>((px >> 0) & 0xffff),
+ G = cast<U16>((px >> 16) & 0xffff),
+ B = cast<U16>((px >> 32) & 0xffff),
+ A = cast<U16>((px >> 48) & 0xffff);
+ #endif
+ r = F_from_Half(R);
+ g = F_from_Half(G);
+ b = F_from_Half(B);
+ a = F_from_Half(A);
+ } break;
+
+ case Op_load_fff:{
+ uintptr_t ptr = (uintptr_t)(src + 12*i);
+ assert( (ptr & 3) == 0 ); // src must be 4-byte aligned for this
+ const float* rgb = (const float*)ptr; // cast to const float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x3_t lo = vld3q_f32(rgb + 0),
+ hi = vld3q_f32(rgb + 12);
+ r = (F)vcombine_f16(vcvt_f16_f32(lo.val[0]), vcvt_f16_f32(hi.val[0]));
+ g = (F)vcombine_f16(vcvt_f16_f32(lo.val[1]), vcvt_f16_f32(hi.val[1]));
+ b = (F)vcombine_f16(vcvt_f16_f32(lo.val[2]), vcvt_f16_f32(hi.val[2]));
+ #elif defined(USING_NEON)
+ float32x4x3_t v = vld3q_f32(rgb);
+ r = (F)v.val[0];
+ g = (F)v.val[1];
+ b = (F)v.val[2];
+ #else
+ r = load_3<F>(rgb+0);
+ g = load_3<F>(rgb+1);
+ b = load_3<F>(rgb+2);
+ #endif
+ } break;
+
+ case Op_load_ffff:{
+ uintptr_t ptr = (uintptr_t)(src + 16*i);
+ assert( (ptr & 3) == 0 ); // src must be 4-byte aligned for this
+ const float* rgba = (const float*)ptr; // cast to const float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x4_t lo = vld4q_f32(rgba + 0),
+ hi = vld4q_f32(rgba + 16);
+ r = (F)vcombine_f16(vcvt_f16_f32(lo.val[0]), vcvt_f16_f32(hi.val[0]));
+ g = (F)vcombine_f16(vcvt_f16_f32(lo.val[1]), vcvt_f16_f32(hi.val[1]));
+ b = (F)vcombine_f16(vcvt_f16_f32(lo.val[2]), vcvt_f16_f32(hi.val[2]));
+ a = (F)vcombine_f16(vcvt_f16_f32(lo.val[3]), vcvt_f16_f32(hi.val[3]));
+ #elif defined(USING_NEON)
+ float32x4x4_t v = vld4q_f32(rgba);
+ r = (F)v.val[0];
+ g = (F)v.val[1];
+ b = (F)v.val[2];
+ a = (F)v.val[3];
+ #else
+ r = load_4<F>(rgba+0);
+ g = load_4<F>(rgba+1);
+ b = load_4<F>(rgba+2);
+ a = load_4<F>(rgba+3);
+ #endif
+ } break;
+
+ case Op_swap_rb:{
+ F t = r;
+ r = b;
+ b = t;
+ } break;
+
+ case Op_clamp:{
+ r = max_(F0, min_(r, F1));
+ g = max_(F0, min_(g, F1));
+ b = max_(F0, min_(b, F1));
+ a = max_(F0, min_(a, F1));
+ } break;
+
+ case Op_invert:{
+ r = F1 - r;
+ g = F1 - g;
+ b = F1 - b;
+ a = F1 - a;
+ } break;
+
+ case Op_force_opaque:{
+ a = F1;
+ } break;
+
+ case Op_premul:{
+ r *= a;
+ g *= a;
+ b *= a;
+ } break;
+
+ case Op_unpremul:{
+ F scale = if_then_else(F1 / a < INFINITY_, F1 / a, F0);
+ r *= scale;
+ g *= scale;
+ b *= scale;
+ } break;
+
+ case Op_matrix_3x3:{
+ const skcms_Matrix3x3* matrix = (const skcms_Matrix3x3*) *args++;
+ const float* m = &matrix->vals[0][0];
+
+ F R = m[0]*r + m[1]*g + m[2]*b,
+ G = m[3]*r + m[4]*g + m[5]*b,
+ B = m[6]*r + m[7]*g + m[8]*b;
+
+ r = R;
+ g = G;
+ b = B;
+ } break;
+
+ case Op_matrix_3x4:{
+ const skcms_Matrix3x4* matrix = (const skcms_Matrix3x4*) *args++;
+ const float* m = &matrix->vals[0][0];
+
+ F R = m[0]*r + m[1]*g + m[ 2]*b + m[ 3],
+ G = m[4]*r + m[5]*g + m[ 6]*b + m[ 7],
+ B = m[8]*r + m[9]*g + m[10]*b + m[11];
+
+ r = R;
+ g = G;
+ b = B;
+ } break;
+
+ case Op_lab_to_xyz:{
+ // The L*a*b values are in r,g,b, but normalized to [0,1]. Reconstruct them:
+ F L = r * 100.0f,
+ A = g * 255.0f - 128.0f,
+ B = b * 255.0f - 128.0f;
+
+ // Convert to CIE XYZ.
+ F Y = (L + 16.0f) * (1/116.0f),
+ X = Y + A*(1/500.0f),
+ Z = Y - B*(1/200.0f);
+
+ X = if_then_else(X*X*X > 0.008856f, X*X*X, (X - (16/116.0f)) * (1/7.787f));
+ Y = if_then_else(Y*Y*Y > 0.008856f, Y*Y*Y, (Y - (16/116.0f)) * (1/7.787f));
+ Z = if_then_else(Z*Z*Z > 0.008856f, Z*Z*Z, (Z - (16/116.0f)) * (1/7.787f));
+
+ // Adjust to XYZD50 illuminant, and stuff back into r,g,b for the next op.
+ r = X * 0.9642f;
+ g = Y ;
+ b = Z * 0.8249f;
+ } break;
+
+ case Op_tf_r:{ r = apply_tf((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_tf_g:{ g = apply_tf((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_tf_b:{ b = apply_tf((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_tf_a:{ a = apply_tf((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_pq_r:{ r = apply_pq((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_pq_g:{ g = apply_pq((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_pq_b:{ b = apply_pq((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_pq_a:{ a = apply_pq((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_hlg_r:{ r = apply_hlg((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_hlg_g:{ g = apply_hlg((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_hlg_b:{ b = apply_hlg((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_hlg_a:{ a = apply_hlg((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_hlginv_r:{ r = apply_hlginv((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_hlginv_g:{ g = apply_hlginv((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_hlginv_b:{ b = apply_hlginv((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_hlginv_a:{ a = apply_hlginv((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_table_r: { r = table((const skcms_Curve*)*args++, r); } break;
+ case Op_table_g: { g = table((const skcms_Curve*)*args++, g); } break;
+ case Op_table_b: { b = table((const skcms_Curve*)*args++, b); } break;
+ case Op_table_a: { a = table((const skcms_Curve*)*args++, a); } break;
+
+ case Op_clut: {
+ const skcms_A2B* a2b = (const skcms_A2B*) *args++;
+ clut(a2b, &r,&g,&b,a);
+
+ if (a2b->input_channels == 4) {
+ // CMYK is opaque.
+ a = F1;
+ }
+ } break;
+
+ // Notice, from here on down the store_ ops all return, ending the loop.
+
+ case Op_store_a8: {
+ store(dst + 1*i, cast<U8>(to_fixed(a * 255)));
+ } return;
+
+ case Op_store_g8: {
+ // g should be holding luminance (Y) (r,g,b ~~~> X,Y,Z)
+ store(dst + 1*i, cast<U8>(to_fixed(g * 255)));
+ } return;
+
+ case Op_store_4444: {
+ store<U16>(dst + 2*i, cast<U16>(to_fixed(r * 15) << 12)
+ | cast<U16>(to_fixed(g * 15) << 8)
+ | cast<U16>(to_fixed(b * 15) << 4)
+ | cast<U16>(to_fixed(a * 15) << 0));
+ } return;
+
+ case Op_store_565: {
+ store<U16>(dst + 2*i, cast<U16>(to_fixed(r * 31) << 0 )
+ | cast<U16>(to_fixed(g * 63) << 5 )
+ | cast<U16>(to_fixed(b * 31) << 11 ));
+ } return;
+
+ case Op_store_888: {
+ uint8_t* rgb = (uint8_t*)dst + 3*i;
+ #if defined(USING_NEON_FP16)
+ // See the explanation under USING_NEON below. This is that doubled up.
+ U16 R = to_fixed(r * 255),
+ G = to_fixed(g * 255),
+ B = to_fixed(b * 255);
+
+ uint8x16x3_t v = {{ (uint8x16_t)R, (uint8x16_t)G, (uint8x16_t)B }};
+ vst3q_lane_u8(rgb+ 0, v, 0);
+ vst3q_lane_u8(rgb+ 3, v, 2);
+ vst3q_lane_u8(rgb+ 6, v, 4);
+ vst3q_lane_u8(rgb+ 9, v, 6);
+
+ vst3q_lane_u8(rgb+12, v, 8);
+ vst3q_lane_u8(rgb+15, v, 10);
+ vst3q_lane_u8(rgb+18, v, 12);
+ vst3q_lane_u8(rgb+21, v, 14);
+ #elif defined(USING_NEON)
+ // Same deal as load_888 but in reverse... we'll store using uint8x8x3_t, but
+ // get there via U16 to save some instructions converting to float. And just
+ // like load_888, we'd prefer to go via U32 but for ARMv7 support.
+ U16 R = cast<U16>(to_fixed(r * 255)),
+ G = cast<U16>(to_fixed(g * 255)),
+ B = cast<U16>(to_fixed(b * 255));
+
+ uint8x8x3_t v = {{ (uint8x8_t)R, (uint8x8_t)G, (uint8x8_t)B }};
+ vst3_lane_u8(rgb+0, v, 0);
+ vst3_lane_u8(rgb+3, v, 2);
+ vst3_lane_u8(rgb+6, v, 4);
+ vst3_lane_u8(rgb+9, v, 6);
+ #else
+ store_3(rgb+0, cast<U8>(to_fixed(r * 255)) );
+ store_3(rgb+1, cast<U8>(to_fixed(g * 255)) );
+ store_3(rgb+2, cast<U8>(to_fixed(b * 255)) );
+ #endif
+ } return;
+
+ case Op_store_8888: {
+ store(dst + 4*i, cast<U32>(to_fixed(r * 255)) << 0
+ | cast<U32>(to_fixed(g * 255)) << 8
+ | cast<U32>(to_fixed(b * 255)) << 16
+ | cast<U32>(to_fixed(a * 255)) << 24);
+ } return;
+
+ case Op_store_1010102: {
+ store(dst + 4*i, cast<U32>(to_fixed(r * 1023)) << 0
+ | cast<U32>(to_fixed(g * 1023)) << 10
+ | cast<U32>(to_fixed(b * 1023)) << 20
+ | cast<U32>(to_fixed(a * 3)) << 30);
+ } return;
+
+ case Op_store_161616LE: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)U16_from_F(r),
+ (uint16x8_t)U16_from_F(g),
+ (uint16x8_t)U16_from_F(b),
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)U16_from_F(r),
+ (uint16x4_t)U16_from_F(g),
+ (uint16x4_t)U16_from_F(b),
+ }};
+ vst3_u16(rgb, v);
+ #else
+ store_3(rgb+0, U16_from_F(r));
+ store_3(rgb+1, U16_from_F(g));
+ store_3(rgb+2, U16_from_F(b));
+ #endif
+
+ } return;
+
+ case Op_store_16161616LE: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)U16_from_F(r),
+ (uint16x8_t)U16_from_F(g),
+ (uint16x8_t)U16_from_F(b),
+ (uint16x8_t)U16_from_F(a),
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)U16_from_F(r),
+ (uint16x4_t)U16_from_F(g),
+ (uint16x4_t)U16_from_F(b),
+ (uint16x4_t)U16_from_F(a),
+ }};
+ vst4_u16(rgba, v);
+ #else
+ U64 px = cast<U64>(to_fixed(r * 65535)) << 0
+ | cast<U64>(to_fixed(g * 65535)) << 16
+ | cast<U64>(to_fixed(b * 65535)) << 32
+ | cast<U64>(to_fixed(a * 65535)) << 48;
+ store(rgba, px);
+ #endif
+ } return;
+
+ case Op_store_161616BE: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)swap_endian_16(U16_from_F(r)),
+ (uint16x8_t)swap_endian_16(U16_from_F(g)),
+ (uint16x8_t)swap_endian_16(U16_from_F(b)),
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(r))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(g))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(b))),
+ }};
+ vst3_u16(rgb, v);
+ #else
+ U32 R = to_fixed(r * 65535),
+ G = to_fixed(g * 65535),
+ B = to_fixed(b * 65535);
+ store_3(rgb+0, cast<U16>((R & 0x00ff) << 8 | (R & 0xff00) >> 8) );
+ store_3(rgb+1, cast<U16>((G & 0x00ff) << 8 | (G & 0xff00) >> 8) );
+ store_3(rgb+2, cast<U16>((B & 0x00ff) << 8 | (B & 0xff00) >> 8) );
+ #endif
+
+ } return;
+
+ case Op_store_16161616BE: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)swap_endian_16(U16_from_F(r)),
+ (uint16x8_t)swap_endian_16(U16_from_F(g)),
+ (uint16x8_t)swap_endian_16(U16_from_F(b)),
+ (uint16x8_t)swap_endian_16(U16_from_F(a)),
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(r))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(g))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(b))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(a))),
+ }};
+ vst4_u16(rgba, v);
+ #else
+ U64 px = cast<U64>(to_fixed(r * 65535)) << 0
+ | cast<U64>(to_fixed(g * 65535)) << 16
+ | cast<U64>(to_fixed(b * 65535)) << 32
+ | cast<U64>(to_fixed(a * 65535)) << 48;
+ store(rgba, swap_endian_16x4(px));
+ #endif
+ } return;
+
+ case Op_store_hhh: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+
+ U16 R = Half_from_F(r),
+ G = Half_from_F(g),
+ B = Half_from_F(b);
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)R,
+ (uint16x8_t)G,
+ (uint16x8_t)B,
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)R,
+ (uint16x4_t)G,
+ (uint16x4_t)B,
+ }};
+ vst3_u16(rgb, v);
+ #else
+ store_3(rgb+0, R);
+ store_3(rgb+1, G);
+ store_3(rgb+2, B);
+ #endif
+ } return;
+
+ case Op_store_hhhh: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+
+ U16 R = Half_from_F(r),
+ G = Half_from_F(g),
+ B = Half_from_F(b),
+ A = Half_from_F(a);
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)R,
+ (uint16x8_t)G,
+ (uint16x8_t)B,
+ (uint16x8_t)A,
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)R,
+ (uint16x4_t)G,
+ (uint16x4_t)B,
+ (uint16x4_t)A,
+ }};
+ vst4_u16(rgba, v);
+ #else
+ store(rgba, cast<U64>(R) << 0
+ | cast<U64>(G) << 16
+ | cast<U64>(B) << 32
+ | cast<U64>(A) << 48);
+ #endif
+
+ } return;
+
+ case Op_store_fff: {
+ uintptr_t ptr = (uintptr_t)(dst + 12*i);
+ assert( (ptr & 3) == 0 ); // The dst pointer must be 4-byte aligned
+ float* rgb = (float*)ptr; // for this cast to float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x3_t lo = {{
+ vcvt_f32_f16(vget_low_f16(r)),
+ vcvt_f32_f16(vget_low_f16(g)),
+ vcvt_f32_f16(vget_low_f16(b)),
+ }}, hi = {{
+ vcvt_f32_f16(vget_high_f16(r)),
+ vcvt_f32_f16(vget_high_f16(g)),
+ vcvt_f32_f16(vget_high_f16(b)),
+ }};
+ vst3q_f32(rgb + 0, lo);
+ vst3q_f32(rgb + 12, hi);
+ #elif defined(USING_NEON)
+ float32x4x3_t v = {{
+ (float32x4_t)r,
+ (float32x4_t)g,
+ (float32x4_t)b,
+ }};
+ vst3q_f32(rgb, v);
+ #else
+ store_3(rgb+0, r);
+ store_3(rgb+1, g);
+ store_3(rgb+2, b);
+ #endif
+ } return;
+
+ case Op_store_ffff: {
+ uintptr_t ptr = (uintptr_t)(dst + 16*i);
+ assert( (ptr & 3) == 0 ); // The dst pointer must be 4-byte aligned
+ float* rgba = (float*)ptr; // for this cast to float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x4_t lo = {{
+ vcvt_f32_f16(vget_low_f16(r)),
+ vcvt_f32_f16(vget_low_f16(g)),
+ vcvt_f32_f16(vget_low_f16(b)),
+ vcvt_f32_f16(vget_low_f16(a)),
+ }}, hi = {{
+ vcvt_f32_f16(vget_high_f16(r)),
+ vcvt_f32_f16(vget_high_f16(g)),
+ vcvt_f32_f16(vget_high_f16(b)),
+ vcvt_f32_f16(vget_high_f16(a)),
+ }};
+ vst4q_f32(rgba + 0, lo);
+ vst4q_f32(rgba + 16, hi);
+ #elif defined(USING_NEON)
+ float32x4x4_t v = {{
+ (float32x4_t)r,
+ (float32x4_t)g,
+ (float32x4_t)b,
+ (float32x4_t)a,
+ }};
+ vst4q_f32(rgba, v);
+ #else
+ store_4(rgba+0, r);
+ store_4(rgba+1, g);
+ store_4(rgba+2, b);
+ store_4(rgba+3, a);
+ #endif
+ } return;
+ }
+ }
+}
+
+
+static void run_program(const Op* program, const void** arguments,
+ const char* src, char* dst, int n,
+ const size_t src_bpp, const size_t dst_bpp) {
+ int i = 0;
+ while (n >= N) {
+ exec_ops(program, arguments, src, dst, i);
+ i += N;
+ n -= N;
+ }
+ if (n > 0) {
+ char tmp[4*4*N] = {0};
+
+ memcpy(tmp, (const char*)src + (size_t)i*src_bpp, (size_t)n*src_bpp);
+ exec_ops(program, arguments, tmp, tmp, 0);
+ memcpy((char*)dst + (size_t)i*dst_bpp, tmp, (size_t)n*dst_bpp);
+ }
+}
+
+// Clean up any #defines we may have set so that we can be #included again.
+#if defined(USING_AVX)
+ #undef USING_AVX
+#endif
+#if defined(USING_AVX_F16C)
+ #undef USING_AVX_F16C
+#endif
+#if defined(USING_AVX2)
+ #undef USING_AVX2
+#endif
+#if defined(USING_AVX512F)
+ #undef USING_AVX512F
+#endif
+
+#if defined(USING_NEON)
+ #undef USING_NEON
+#endif
+#if defined(USING_NEON_F16C)
+ #undef USING_NEON_F16C
+#endif
+#if defined(USING_NEON_FP16)
+ #undef USING_NEON_FP16
+#endif
+
+#undef FALLTHROUGH
diff --git a/gfx/skia/skia/third_party/skcms/version.sha1 b/gfx/skia/skia/third_party/skcms/version.sha1
new file mode 100755
index 0000000000..f6db96d095
--- /dev/null
+++ b/gfx/skia/skia/third_party/skcms/version.sha1
@@ -0,0 +1 @@
+8e28e18b5c9e38265362171570ccfedcbf662761 \ No newline at end of file